aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLuca Coelho <luciano.coelho@intel.com>2019-04-24 10:16:46 +0300
committerLuca Coelho <luciano.coelho@intel.com>2019-04-24 10:17:03 +0300
commit17aa1cecf8cfd0f71b5689a4acacbcdba2575ba7 (patch)
treefb50328f2bf3b3027420bc1152b1fd935601649e
parent60baf7f5ccbaa47d2976b3a36762aff7d93f3cd7 (diff)
parent2e7bd078f16ab5d0794d96e29d7539b5dbce20f9 (diff)
downloadchromeos-4.4__release/core43-116.tar.gz
Merge remote-tracking branch 'upstream/chromeos-4.4' into chromeos-4.4__release/core43-116chromeos-4.4__release/core43-2019-04-24chromeos-4.4__release/core43-116
Change-Id: I8f76c95de7fe65ddf505abca3064c70e78ec3100
-rw-r--r--COMMIT-QUEUE.ini9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats122
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb17
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu1
-rw-r--r--Documentation/Changes17
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml20
-rw-r--r--Documentation/Makefile3
-rw-r--r--Documentation/PCI/pcieaer-howto.txt5
-rw-r--r--Documentation/acpi/ssdt-overlays.txt91
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/sx9310.txt61
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/sx932x.txt34
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ak7375.txt8
-rw-r--r--Documentation/devicetree/bindings/misc/throttler.txt13
-rw-r--r--Documentation/devicetree/bindings/net/btusb.txt3
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt1
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt3
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt36
-rw-r--r--Documentation/devicetree/bindings/sound/dmic.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/max98373.txt40
-rw-r--r--Documentation/filesystems/proc.txt4
-rw-r--r--Documentation/hwmon/ina2xx2
-rw-r--r--Documentation/input/event-codes.txt21
-rw-r--r--Documentation/kernel-parameters.txt115
-rw-r--r--Documentation/lzo.txt35
-rw-r--r--Documentation/networking/Makefile1
-rw-r--r--Documentation/networking/ip-sysctl.txt13
-rw-r--r--Documentation/networking/netdev-FAQ.txt9
-rw-r--r--Documentation/networking/timestamping/Makefile14
-rw-r--r--Documentation/printk-formats.txt3
-rw-r--r--Documentation/spec_ctrl.txt94
-rw-r--r--Documentation/sysctl/fs.txt36
-rw-r--r--Documentation/usb/authorization.txt4
-rw-r--r--Documentation/virtual/kvm/api.txt16
-rw-r--r--Documentation/vm/unevictable-lru.txt6
-rw-r--r--MAINTAINERS44
-rw-r--r--Makefile31
-rw-r--r--arch/Kconfig19
-rw-r--r--arch/alpha/include/asm/irq.h6
-rw-r--r--arch/alpha/include/asm/termios.h8
-rw-r--r--arch/alpha/include/asm/thread_info.h27
-rw-r--r--arch/alpha/include/uapi/asm/ioctls.h5
-rw-r--r--arch/alpha/include/uapi/asm/mman.h3
-rw-r--r--arch/alpha/include/uapi/asm/termbits.h17
-rw-r--r--arch/alpha/kernel/osf_sys.c64
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile31
-rw-r--r--arch/arc/configs/axs101_defconfig3
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig1
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig1
-rw-r--r--arch/arc/include/asm/bitops.h6
-rw-r--r--arch/arc/include/asm/delay.h3
-rw-r--r--arch/arc/include/asm/io.h72
-rw-r--r--arch/arc/include/asm/mach_desc.h2
-rw-r--r--arch/arc/include/asm/page.h2
-rw-r--r--arch/arc/include/asm/perf_event.h3
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/include/asm/uaccess.h8
-rw-r--r--arch/arc/kernel/head.S10
-rw-r--r--arch/arc/kernel/irq.c2
-rw-r--r--arch/arc/kernel/process.c20
-rw-r--r--arch/arc/mm/cache.c7
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/boot/dts/am3517.dtsi5
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi4
-rw-r--r--arch/arm/boot/dts/da850-evm.dts2
-rw-r--r--arch/arm/boot/dts/da850.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi25
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi10
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-dnskw.dtsi4
-rw-r--r--arch/arm/boot/dts/mmp2.dtsi9
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts1
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d3_emac.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi1
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/kgdb.h2
-rw-r--r--arch/arm/include/asm/uaccess.h13
-rw-r--r--arch/arm/kernel/irq.c62
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kernel/smp.c17
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/kvm/mmio.c21
-rw-r--r--arch/arm/kvm/mmu.c42
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c2
-rw-r--r--arch/arm/mach-exynos/suspend.c1
-rw-r--r--arch/arm/mach-hisi/hotplug.c41
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6q.c27
-rw-r--r--arch/arm/mach-imx/cpuidle-imx6sx.c2
-rw-r--r--arch/arm/mach-integrator/impd1.c6
-rw-r--r--arch/arm/mach-iop32x/n2100.c3
-rw-r--r--arch/arm/mach-mvebu/pmsu.c6
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c3
-rw-r--r--arch/arm/mach-omap2/display.c7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/prm44xx.c2
-rw-r--r--arch/arm/mach-pxa/cm-x300.c2
-rw-r--r--arch/arm/mach-pxa/irq.c4
-rw-r--r--arch/arm/mach-pxa/littleton.c2
-rw-r--r--arch/arm/mach-pxa/zeus.c2
-rw-r--r--arch/arm/mach-s3c24xx/mach-osiris-dvs.c8
-rw-r--r--arch/arm/mm/cache-v7.S8
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/ioremap.c2
-rw-r--r--arch/arm/mm/mmu.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c3
-rw-r--r--arch/arm/vdso/vdso.S3
-rw-r--r--arch/arm64/Kconfig8
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi2
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile1
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r3-sku7.dts10
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r4-sku6.dts24
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r5-sku0.dts74
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi68
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi7
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-core.S5
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/assembler.h22
-rw-r--r--arch/arm64/include/asm/atomic_lse.h14
-rw-r--r--arch/arm64/include/asm/cacheflush.h4
-rw-r--r--arch/arm64/include/asm/cpu.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h42
-rw-r--r--arch/arm64/include/asm/cputype.h14
-rw-r--r--arch/arm64/include/asm/current.h29
-rw-r--r--arch/arm64/include/asm/jump_label.h4
-rw-r--r--arch/arm64/include/asm/kvm_arm.h3
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/memory.h6
-rw-r--r--arch/arm64/include/asm/page.h2
-rw-r--r--arch/arm64/include/asm/percpu.h5
-rw-r--r--arch/arm64/include/asm/perf_event.h2
-rw-r--r--arch/arm64/include/asm/shmparam.h2
-rw-r--r--arch/arm64/include/asm/smp.h23
-rw-r--r--arch/arm64/include/asm/stack_pointer.h9
-rw-r--r--arch/arm64/include/asm/string.h5
-rw-r--r--arch/arm64/include/asm/suspend.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h30
-rw-r--r--arch/arm64/include/asm/thread_info.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h29
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h2
-rw-r--r--arch/arm64/kernel/arm64ksyms.c4
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c6
-rw-r--r--arch/arm64/kernel/asm-offsets.c12
-rw-r--r--arch/arm64/kernel/cpu_errata.c33
-rw-r--r--arch/arm64/kernel/cpufeature.c452
-rw-r--r--arch/arm64/kernel/cpuinfo.c40
-rw-r--r--arch/arm64/kernel/entry-ftrace.S1
-rw-r--r--arch/arm64/kernel/entry.S35
-rw-r--r--arch/arm64/kernel/fpsimd.c2
-rw-r--r--arch/arm64/kernel/head.S18
-rw-r--r--arch/arm64/kernel/hyp-stub.S2
-rw-r--r--arch/arm64/kernel/image.h40
-rw-r--r--arch/arm64/kernel/perf_event.c1
-rw-r--r--arch/arm64/kernel/process.c36
-rw-r--r--arch/arm64/kernel/return_address.c1
-rw-r--r--arch/arm64/kernel/setup.c2
-rw-r--r--arch/arm64/kernel/smp.c38
-rw-r--r--arch/arm64/kernel/stacktrace.c7
-rw-r--r--arch/arm64/kernel/suspend.c2
-rw-r--r--arch/arm64/kernel/traps.c27
-rw-r--r--arch/arm64/kernel/vdso.c10
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/kvm/guest.c55
-rw-r--r--arch/arm64/kvm/hyp.S2
-rw-r--r--arch/arm64/lib/copy_from_user.S4
-rw-r--r--arch/arm64/lib/copy_to_user.S4
-rw-r--r--arch/arm64/mm/context.c36
-rw-r--r--arch/arm64/mm/dma-mapping.c4
-rw-r--r--arch/arm64/mm/fault.c15
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/init.c12
-rw-r--r--arch/arm64/mm/mmu.c7
-rw-r--r--arch/arm64/mm/proc.S10
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c4
-rw-r--r--arch/hexagon/include/asm/bitops.h4
-rw-r--r--arch/hexagon/kernel/dma.c2
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/include/asm/thread_info.h36
-rw-r--r--arch/ia64/kernel/err_inject.c2
-rw-r--r--arch/ia64/kernel/init_task.c1
-rw-r--r--arch/m68k/Makefile5
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/m68k/mm/kmap.c3
-rw-r--r--arch/microblaze/boot/Makefile10
-rw-r--r--arch/microblaze/include/asm/thread_info.h27
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/ath79/common.c2
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c7
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/dec/int-handler.S34
-rw-r--r--arch/mips/include/asm/fpu_emulator.h13
-rw-r--r--arch/mips/include/asm/io.h10
-rw-r--r--arch/mips/include/asm/jump_label.h8
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h1
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-64.h5
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/switch_to.h18
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h4
-rw-r--r--arch/mips/include/uapi/asm/mman.h3
-rw-r--r--arch/mips/jazz/jazzdma.c5
-rw-r--r--arch/mips/jz4740/Platform2
-rw-r--r--arch/mips/kernel/crash.c3
-rw-r--r--arch/mips/kernel/irq.c4
-rw-r--r--arch/mips/kernel/machine_kexec.c3
-rw-r--r--arch/mips/kernel/mcount.S27
-rw-r--r--arch/mips/kernel/mips-cm.c2
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c10
-rw-r--r--arch/mips/kernel/process.c50
-rw-r--r--arch/mips/kernel/ptrace.c11
-rw-r--r--arch/mips/kernel/ptrace32.c4
-rw-r--r--arch/mips/kernel/traps.c73
-rw-r--r--arch/mips/kernel/vdso.c20
-rw-r--r--arch/mips/lib/multi3.c6
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_ohci.c2
-rw-r--r--arch/mips/loongson64/lemote-2f/irq.c2
-rw-r--r--arch/mips/loongson64/loongson-3/irq.c56
-rw-r--r--arch/mips/mm/c-r4k.c6
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/ioremap.c37
-rw-r--r--arch/mips/pci/msi-octeon.c4
-rw-r--r--arch/mips/pci/pci-octeon.c10
-rw-r--r--arch/mips/ralink/mt7620.c2
-rw-r--r--arch/mips/sibyte/common/Makefile1
-rw-r--r--arch/mips/sibyte/common/dma.c14
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/mn10300/include/asm/thread_info.h2
-rw-r--r--arch/mn10300/kernel/kgdb.c3
-rw-r--r--arch/nios2/kernel/prom.c3
-rw-r--r--arch/openrisc/kernel/process.c2
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/include/asm/barrier.h32
-rw-r--r--arch/parisc/include/asm/cache.h3
-rw-r--r--arch/parisc/include/asm/cacheflush.h4
-rw-r--r--arch/parisc/include/asm/spinlock.h8
-rw-r--r--arch/parisc/include/uapi/asm/mman.h3
-rw-r--r--arch/parisc/kernel/entry.S4
-rw-r--r--arch/parisc/kernel/pacache.S1
-rw-r--r--arch/parisc/kernel/syscall.S28
-rw-r--r--arch/parisc/kernel/traps.c3
-rw-r--r--arch/parisc/mm/init.c8
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/boot/Makefile3
-rw-r--r--arch/powerpc/boot/crt0.S8
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h12
-rw-r--r--arch/powerpc/include/asm/fadump.h3
-rw-r--r--arch/powerpc/include/asm/mpic.h7
-rw-r--r--arch/powerpc/include/asm/thread_info.h25
-rw-r--r--arch/powerpc/include/asm/uaccess.h2
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S4
-rw-r--r--arch/powerpc/kernel/entry_32.S9
-rw-r--r--arch/powerpc/kernel/entry_64.S1
-rw-r--r--arch/powerpc/kernel/fadump.c118
-rw-r--r--arch/powerpc/kernel/head_8xx.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/machine_kexec.c7
-rw-r--r--arch/powerpc/kernel/msi.c7
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c1
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c2
-rw-r--r--arch/powerpc/kvm/trace.h8
-rw-r--r--arch/powerpc/kvm/trace_booke.h9
-rw-r--r--arch/powerpc/kvm/trace_hv.h9
-rw-r--r--arch/powerpc/kvm/trace_pr.h9
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c8
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/powerpc/platforms/83xx/suspend-asm.S34
-rw-r--r--arch/powerpc/platforms/chrp/time.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c5
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c4
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c4
-rw-r--r--arch/powerpc/platforms/powermac/setup.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c2
-rw-r--r--arch/s390/appldata/appldata_mem.c9
-rw-r--r--arch/s390/include/asm/cpu_mf.h6
-rw-r--r--arch/s390/include/asm/qdio.h1
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S4
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/smp.c12
-rw-r--r--arch/s390/kernel/vdso32/Makefile6
-rw-r--r--arch/s390/kernel/vdso64/Makefile6
-rw-r--r--arch/s390/lib/mem.S9
-rw-r--r--arch/s390/mm/extmem.c4
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/mm/gup.c2
-rw-r--r--arch/s390/mm/pgtable.c2
-rw-r--r--arch/s390/net/bpf_jit_comp.c2
-rw-r--r--arch/s390/numa/numa.c1
-rw-r--r--arch/s390/pci/pci.c2
-rw-r--r--arch/sh/include/asm/thread_info.h26
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c3
-rw-r--r--arch/sh/lib/ashlsi3.S35
-rw-r--r--arch/sh/lib/ashrsi3.S33
-rw-r--r--arch/sh/lib/lshrsi3.S34
-rw-r--r--arch/sh/mm/gup.c3
-rw-r--r--arch/sparc/include/asm/page_64.h1
-rw-r--r--arch/sparc/include/asm/thread_info_64.h24
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/pci.c29
-rw-r--r--arch/sparc/kernel/perf_event.c17
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c22
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c20
-rw-r--r--arch/sparc/lib/U3memcpy.S4
-rw-r--r--arch/sparc/mm/fault_64.c1
-rw-r--r--arch/sparc/mm/gup.c3
-rw-r--r--arch/sparc/mm/tlb.c35
-rw-r--r--arch/sparc/mm/tsb.c18
-rw-r--r--arch/tile/include/asm/thread_info.h29
-rw-r--r--arch/tile/kernel/process.c3
-rw-r--r--arch/um/Makefile4
-rw-r--r--arch/um/include/asm/pgtable.h9
-rw-r--r--arch/um/os-Linux/skas/process.c5
-rw-r--r--arch/x86/Kconfig32
-rw-r--r--arch/x86/Kconfig.debug28
-rw-r--r--arch/x86/Makefile17
-rw-r--r--arch/x86/boot/compressed/aslr.c5
-rw-r--r--arch/x86/boot/compressed/misc.c7
-rw-r--r--arch/x86/boot/cpuflags.h2
-rw-r--r--arch/x86/boot/mkcpustr.c2
-rw-r--r--arch/x86/boot/tools/build.c7
-rw-r--r--arch/x86/crypto/chacha20_glue.c3
-rw-r--r--arch/x86/crypto/crc32-pclmul_glue.c2
-rw-r--r--arch/x86/crypto/crc32c-intel_glue.c26
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c2
-rw-r--r--arch/x86/entry/common.c31
-rw-r--r--arch/x86/entry/entry_32.S79
-rw-r--r--arch/x86/entry/entry_64.S106
-rw-r--r--arch/x86/entry/entry_64_compat.S75
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c26
-rw-r--r--arch/x86/entry/vdso/vdso2c.h2
-rw-r--r--arch/x86/entry/vdso/vdso32-setup.c1
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S2
-rw-r--r--arch/x86/entry/vdso/vma.c3
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_64.c10
-rw-r--r--arch/x86/ia32/ia32_aout.c6
-rw-r--r--arch/x86/include/asm/alternative.h6
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/arch_hweight.h2
-rw-r--r--arch/x86/include/asm/asm.h59
-rw-r--r--arch/x86/include/asm/atomic.h1
-rw-r--r--arch/x86/include/asm/atomic64_32.h1
-rw-r--r--arch/x86/include/asm/barrier.h4
-rw-r--r--arch/x86/include/asm/cacheflush.h6
-rw-r--r--arch/x86/include/asm/checksum_32.h3
-rw-r--r--arch/x86/include/asm/cmpxchg.h1
-rw-r--r--arch/x86/include/asm/cmpxchg_32.h2
-rw-r--r--arch/x86/include/asm/cmpxchg_64.h2
-rw-r--r--arch/x86/include/asm/compat.h4
-rw-r--r--arch/x86/include/asm/cpu.h1
-rw-r--r--arch/x86/include/asm/cpufeature.h584
-rw-r--r--arch/x86/include/asm/cpufeatures.h336
-rw-r--r--arch/x86/include/asm/desc.h2
-rw-r--r--arch/x86/include/asm/disabled-features.h18
-rw-r--r--arch/x86/include/asm/efi.h7
-rw-r--r--arch/x86/include/asm/fpu/internal.h224
-rw-r--r--arch/x86/include/asm/fpu/types.h34
-rw-r--r--arch/x86/include/asm/fpu/xstate.h5
-rw-r--r--arch/x86/include/asm/intel-family.h10
-rw-r--r--arch/x86/include/asm/io.h6
-rw-r--r--arch/x86/include/asm/irq_work.h2
-rw-r--r--arch/x86/include/asm/irqflags.h7
-rw-r--r--arch/x86/include/asm/kprobes.h11
-rw-r--r--arch/x86/include/asm/kvm_emulate.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/include/asm/kvm_para.h7
-rw-r--r--arch/x86/include/asm/mmu.h15
-rw-r--r--arch/x86/include/asm/mmu_context.h24
-rw-r--r--arch/x86/include/asm/msr-index.h22
-rw-r--r--arch/x86/include/asm/mwait.h2
-rw-r--r--arch/x86/include/asm/nospec-branch.h56
-rw-r--r--arch/x86/include/asm/page_32_types.h9
-rw-r--r--arch/x86/include/asm/page_64_types.h4
-rw-r--r--arch/x86/include/asm/percpu.h8
-rw-r--r--arch/x86/include/asm/pgtable-2level.h17
-rw-r--r--arch/x86/include/asm/pgtable-3level.h44
-rw-r--r--arch/x86/include/asm/pgtable-invert.h41
-rw-r--r--arch/x86/include/asm/pgtable.h97
-rw-r--r--arch/x86/include/asm/pgtable_64.h54
-rw-r--r--arch/x86/include/asm/pgtable_types.h10
-rw-r--r--arch/x86/include/asm/processor.h45
-rw-r--r--arch/x86/include/asm/required-features.h10
-rw-r--r--arch/x86/include/asm/sections.h2
-rw-r--r--arch/x86/include/asm/smap.h2
-rw-r--r--arch/x86/include/asm/smp.h16
-rw-r--r--arch/x86/include/asm/spec-ctrl.h80
-rw-r--r--arch/x86/include/asm/stacktrace.h10
-rw-r--r--arch/x86/include/asm/string_32.h9
-rw-r--r--arch/x86/include/asm/string_64.h7
-rw-r--r--arch/x86/include/asm/switch_to.h151
-rw-r--r--arch/x86/include/asm/syscall.h23
-rw-r--r--arch/x86/include/asm/thread_info.h141
-rw-r--r--arch/x86/include/asm/tlbflush.h13
-rw-r--r--arch/x86/include/asm/uaccess.h116
-rw-r--r--arch/x86/include/asm/uaccess_32.h28
-rw-r--r--arch/x86/include/asm/uaccess_64.h98
-rw-r--r--arch/x86/include/asm/xor_32.h2
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c6
-rw-r--r--arch/x86/kernel/apic/apic_noop.c2
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c4
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c2
-rw-r--r--arch/x86/kernel/apic/msi.c2
-rw-r--r--arch/x86/kernel/apic/probe_32.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/asm-offsets.c19
-rw-r--r--arch/x86/kernel/asm-offsets_32.c5
-rw-r--r--arch/x86/kernel/asm-offsets_64.c5
-rw-r--r--arch/x86/kernel/check.c15
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/amd.c40
-rw-r--r--arch/x86/kernel/cpu/bugs.c543
-rw-r--r--arch/x86/kernel/cpu/centaur.c4
-rw-r--r--arch/x86/kernel/cpu/common.c235
-rw-r--r--arch/x86/kernel/cpu/cpu.h3
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c81
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/match.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c55
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh6
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c2
-rw-r--r--arch/x86/kernel/cpu/mtrr/main.c8
-rw-r--r--arch/x86/kernel/cpu/mtrr/mtrr.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c4
-rw-r--r--arch/x86/kernel/cpu/scattered.c20
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/dumpstack.c22
-rw-r--r--arch/x86/kernel/dumpstack_32.c4
-rw-r--r--arch/x86/kernel/dumpstack_64.c8
-rw-r--r--arch/x86/kernel/e820.c1
-rw-r--r--arch/x86/kernel/fpu/core.c47
-rw-r--r--arch/x86/kernel/fpu/init.c172
-rw-r--r--arch/x86/kernel/fpu/signal.c9
-rw-r--r--arch/x86/kernel/fpu/xstate.c3
-rw-r--r--arch/x86/kernel/ftrace.c6
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/hpet.c1
-rw-r--r--arch/x86/kernel/hw_breakpoint.c6
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/irq_64.c3
-rw-r--r--arch/x86/kernel/irqflags.S26
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c3
-rw-r--r--arch/x86/kernel/kgdb.c14
-rw-r--r--arch/x86/kernel/kprobes/core.c46
-rw-r--r--arch/x86/kernel/ksysfs.c2
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/ldt.c4
-rw-r--r--arch/x86/kernel/livepatch.c1
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/paravirt.c16
-rw-r--r--arch/x86/kernel/process.c233
-rw-r--r--arch/x86/kernel/process_32.c9
-rw-r--r--arch/x86/kernel/process_64.c16
-rw-r--r--arch/x86/kernel/ptrace.c23
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/kernel/signal.c35
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kernel/test_nx.c2
-rw-r--r--arch/x86/kernel/test_rodata.c2
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--arch/x86/kernel/traps.c1
-rw-r--r--arch/x86/kernel/tsc_msr.c1
-rw-r--r--arch/x86/kernel/uprobes.c4
-rw-r--r--arch/x86/kernel/verify_cpu.S2
-rw-r--r--arch/x86/kernel/vm86_32.c4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S36
-rw-r--r--arch/x86/kernel/x86_init.c6
-rw-r--r--arch/x86/kvm/cpuid.c36
-rw-r--r--arch/x86/kvm/cpuid.h40
-rw-r--r--arch/x86/kvm/emulate.c72
-rw-r--r--arch/x86/kvm/lapic.c30
-rw-r--r--arch/x86/kvm/mmu.c27
-rw-r--r--arch/x86/kvm/svm.c162
-rw-r--r--arch/x86/kvm/vmx.c827
-rw-r--r--arch/x86/kvm/x86.c89
-rw-r--r--arch/x86/kvm/x86.h4
-rw-r--r--arch/x86/lib/clear_page_64.S2
-rw-r--r--arch/x86/lib/cmdline.c34
-rw-r--r--arch/x86/lib/copy_page_64.S2
-rw-r--r--arch/x86/lib/copy_user_64.S10
-rw-r--r--arch/x86/lib/csum-wrappers_64.c1
-rw-r--r--arch/x86/lib/getuser.S20
-rw-r--r--arch/x86/lib/memcpy_32.c2
-rw-r--r--arch/x86/lib/memcpy_64.S2
-rw-r--r--arch/x86/lib/memmove_64.S2
-rw-r--r--arch/x86/lib/memset_64.S2
-rw-r--r--arch/x86/lib/putuser.S10
-rw-r--r--arch/x86/lib/retpoline.S2
-rw-r--r--arch/x86/lib/usercopy_32.c20
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/mm/extable.c3
-rw-r--r--arch/x86/mm/fault.c4
-rw-r--r--arch/x86/mm/gup.c2
-rw-r--r--arch/x86/mm/init.c25
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/mm/kmmio.c25
-rw-r--r--arch/x86/mm/mmap.c21
-rw-r--r--arch/x86/mm/mpx.c3
-rw-r--r--arch/x86/mm/numa_emulation.c2
-rw-r--r--arch/x86/mm/pageattr.c37
-rw-r--r--arch/x86/mm/pat.c14
-rw-r--r--arch/x86/mm/pgtable.c61
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/mm/tlb.c33
-rw-r--r--arch/x86/oprofile/op_model_amd.c1
-rw-r--r--arch/x86/pci/broadcom_bus.c4
-rw-r--r--arch/x86/pci/fixup.c7
-rw-r--r--arch/x86/pci/pcbios.c7
-rw-r--r--arch/x86/platform/efi/early_printk.c2
-rw-r--r--arch/x86/platform/efi/efi_64.c3
-rw-r--r--arch/x86/um/asm/barrier.h2
-rw-r--r--arch/x86/um/ptrace_32.c8
-rw-r--r--arch/x86/um/setjmp_32.S16
-rw-r--r--arch/x86/um/setjmp_64.S16
-rw-r--r--arch/x86/xen/enlighten.c16
-rw-r--r--arch/x86/xen/pmu.c2
-rw-r--r--arch/x86/xen/smp.c5
-rw-r--r--arch/x86/xen/spinlock.c35
-rw-r--r--arch/x86/xen/suspend.c16
-rw-r--r--arch/xtensa/boot/Makefile2
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig1
-rw-r--r--arch/xtensa/include/asm/processor.h6
-rw-r--r--arch/xtensa/include/uapi/asm/mman.h3
-rw-r--r--arch/xtensa/kernel/asm-offsets.c16
-rw-r--r--arch/xtensa/kernel/head.S12
-rw-r--r--arch/xtensa/kernel/process.c5
-rw-r--r--arch/xtensa/kernel/smp.c41
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S1
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-core.c10
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/partitions/aix.c13
-rw-r--r--chromeos/config/arm64/chromiumos-arm64.flavour.config8
-rw-r--r--chromeos/config/arm64/chromiumos-mediatek.flavour.config5
-rw-r--r--chromeos/config/arm64/chromiumos-rockchip64.flavour.config25
-rw-r--r--chromeos/config/arm64/common.config263
-rw-r--r--chromeos/config/armel/chromiumos-arm.flavour.config1
-rw-r--r--chromeos/config/armel/chromiumos-rockchip.flavour.config5
-rw-r--r--chromeos/config/armel/common.config260
-rw-r--r--chromeos/config/base.config330
-rw-r--r--chromeos/config/i386/chromeos-pinetrail-i386.flavour.config57
-rw-r--r--chromeos/config/i386/chromiumos-i386.flavour.config149
-rw-r--r--chromeos/config/i386/common.config1345
-rw-r--r--chromeos/config/x86_64/chromeos-amd-stoneyridge.flavour.config2
-rw-r--r--chromeos/config/x86_64/chromeos-intel-pineview.flavour.config8
-rw-r--r--chromeos/config/x86_64/chromiumos-x86_64.flavour.config4
-rw-r--r--chromeos/config/x86_64/common.config277
-rwxr-xr-xchromeos/scripts/kernelconfig2
-rw-r--r--crypto/ablkcipher.c59
-rw-r--r--crypto/af_alg.c4
-rw-r--r--crypto/ahash.c14
-rw-r--r--crypto/authenc.c15
-rw-r--r--crypto/authencesn.c3
-rw-r--r--crypto/blkcipher.c55
-rw-r--r--crypto/cts.c8
-rw-r--r--crypto/lrw.c7
-rw-r--r--crypto/pcbc.c14
-rw-r--r--crypto/shash.c2
-rw-r--r--crypto/vmac.c412
-rw-r--r--drivers/acpi/acpi_lpss.c4
-rw-r--r--drivers/acpi/acpi_platform.c1
-rw-r--r--drivers/acpi/bus.c9
-rw-r--r--drivers/acpi/device_sysfs.c6
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pmic/tps68470_pmic.c10
-rw-r--r--drivers/acpi/power.c22
-rw-r--r--drivers/acpi/processor_perflib.c10
-rw-r--r--drivers/acpi/scan.c81
-rw-r--r--drivers/acpi/sleep.c27
-rw-r--r--drivers/acpi/sysfs.c6
-rw-r--r--drivers/ata/ahci.c64
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c7
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/sata_rcar.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/zatm.c6
-rw-r--r--drivers/base/bus.c7
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/cpu.c16
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/base/power/main.c5
-rw-r--r--drivers/base/power/opp/core.c5
-rw-r--r--drivers/base/power/opp/cpu.c4
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/power/wakeup.c8
-rw-r--r--drivers/block/ataflop.c25
-rw-r--r--drivers/block/drbd/drbd_nl.c15
-rw-r--r--drivers/block/drbd/drbd_receiver.c13
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/loop.c140
-rw-r--r--drivers/block/loop.h2
-rw-r--r--drivers/block/sunvdc.c5
-rw-r--r--drivers/block/swim.c13
-rw-r--r--drivers/block/swim3.c6
-rw-r--r--drivers/block/zram/zram_drv.c26
-rw-r--r--drivers/bluetooth/btbcm.c1
-rw-r--r--drivers/bluetooth/btusb.c146
-rw-r--r--drivers/bluetooth/hci_intel.c15
-rw-r--r--drivers/bluetooth/hci_qca.c8
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/applicom.c35
-rw-r--r--drivers/char/hw_random/via-rng.c5
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c3
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c35
-rw-r--r--drivers/char/mwave/mwavedd.c7
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/char/tpm/cr50_i2c.c14
-rw-r--r--drivers/char/tpm/cr50_spi.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c13
-rw-r--r--drivers/char/tpm/tpm-dev.c43
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm.h1
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clk/clk-s2mps11.c30
-rw-r--r--drivers/clk/imx/clk-imx6q.c6
-rw-r--r--drivers/clk/imx/clk-imx6sl.c6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c1
-rw-r--r--drivers/clk/ingenic/cgu.c10
-rw-r--r--drivers/clk/mmp/clk.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c1
-rw-r--r--drivers/clk/tegra/clk-tegra30.c11
-rw-r--r--drivers/clocksource/exynos_mct.c23
-rw-r--r--drivers/clocksource/i8253.c14
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/cpufreq/cpufreq.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.h10
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c8
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra124-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c7
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c32
-rw-r--r--drivers/cpuidle/cpuidle-pseries.c8
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c23
-rw-r--r--drivers/crypto/caam/caamalg.c1
-rw-r--r--drivers/crypto/mxs-dcp.c53
-rw-r--r--drivers/crypto/padlock-aes.c10
-rw-r--r--drivers/crypto/padlock-sha.c2
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c4
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c2
-rw-r--r--drivers/crypto/vmx/aes.c2
-rw-r--r--drivers/crypto/vmx/aes_cbc.c32
-rw-r--r--drivers/crypto/vmx/aes_ctr.c2
-rw-r--r--drivers/crypto/vmx/ghash.c2
-rw-r--r--drivers/devfreq/devfreq.c222
-rw-r--r--drivers/devfreq/event/rockchip-dfi.c25
-rw-r--r--drivers/devfreq/governor.h6
-rw-r--r--drivers/devfreq/governor_performance.c5
-rw-r--r--drivers/devfreq/governor_powersave.c2
-rw-r--r--drivers/devfreq/governor_simpleondemand.c12
-rw-r--r--drivers/devfreq/governor_userspace.c16
-rw-r--r--drivers/devfreq/tegra-devfreq.c6
-rw-r--r--drivers/dma/at_hdmac.c10
-rw-r--r--drivers/dma/at_xdmac.c19
-rw-r--r--drivers/dma/dma-jz4780.c5
-rw-r--r--drivers/dma/dmatest.c28
-rw-r--r--drivers/dma/imx-dma.c8
-rw-r--r--drivers/dma/ioat/init.c9
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/pxa_dma.c2
-rw-r--r--drivers/dma/sh/usb-dmac.c6
-rw-r--r--drivers/edac/i7core_edac.c22
-rw-r--r--drivers/extcon/extcon-usb-gpio.c3
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/iscsi_ibft.c1
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-adp5588.c24
-rw-r--r--drivers/gpio/gpio-max7301.c12
-rw-r--r--drivers/gpio/gpio-ml-ioh.c3
-rw-r--r--drivers/gpio/gpio-msic.c4
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-tps68470.c10
-rw-r--r--drivers/gpio/gpio-vf610.c5
-rw-r--r--drivers/gpio/gpiolib.c9
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/arm/midgard/Kbuild33
-rw-r--r--drivers/gpu/arm/midgard/Kconfig64
-rw-r--r--drivers/gpu/arm/midgard/Makefile4
-rw-r--r--drivers/gpu/arm/midgard/Mconfig32
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/Kbuild8
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c4
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c4
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c51
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c28
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h18
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c53
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c90
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c28
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c16
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h14
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c177
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h22
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c618
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h10
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c308
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h134
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c9
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c99
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c8
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h15
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c32
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c132
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c134
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c70
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h45
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c18
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h12
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h226
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c20
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h8
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c189
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h20
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c191
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c397
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h173
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c34
-rw-r--r--drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h8
-rw-r--r--drivers/gpu/arm/midgard/build.bp69
-rw-r--r--drivers/gpu/arm/midgard/docs/Doxyfile2
-rw-r--r--drivers/gpu/arm/midgard/ipa/Kbuild12
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c126
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h111
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c22
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c20
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c307
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h121
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c256
-rw-r--r--drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c340
-rw-r--r--drivers/gpu/arm/midgard/mali_base_hwconfig_features.h8
-rw-r--r--drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h32
-rw-r--r--drivers/gpu/arm/midgard/mali_base_kernel.h200
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase.h79
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c18
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h11
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c24
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_cache_policy.c12
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_config_defaults.h42
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_context.c78
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_context.h30
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_core_linux.c455
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c8
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h49
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c10
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_defs.h1402
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_device.c99
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_event.c4
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_fence.h2
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_fence_defs.h7
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_api.c20
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h2
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h296
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h2
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h2
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h60
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gpu_id.h86
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c4
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gpuprops.c19
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h5
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_gwt.c66
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hw.c1
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h16
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h8
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h22
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_ioctl.h131
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_jd.c54
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_jm.c12
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_js.c266
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_js.h29
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_js_defs.h62
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem.c1427
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem.h494
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_linux.c743
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_linux.h210
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h74
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_pool.c256
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h5
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mmu.c933
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h14
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c64
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c76
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_pm.c53
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_pm.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_replay.c7
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_softjobs.c440
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_sync.h6
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_sync_common.c3
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_sync_file.c12
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_tlstream.c75
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_tlstream.h22
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_trace_defs.h12
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c241
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h368
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h145
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_utility.h26
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_vinstr.c441
-rw-r--r--drivers/gpu/arm/midgard/mali_kbase_vinstr.h60
-rw-r--r--drivers/gpu/arm/midgard/mali_linux_kbase_trace.h9
-rw-r--r--drivers/gpu/arm/midgard/mali_midg_regmap.h27
-rw-r--r--drivers/gpu/arm/midgard/mali_timeline.h401
-rw-r--r--drivers/gpu/arm/midgard/mali_uk.h4
-rw-r--r--[-rwxr-xr-x]drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c2
-rw-r--r--drivers/gpu/arm/midgard/sconscript8
-rw-r--r--drivers/gpu/arm/midgard/tests/Mconfig27
-rw-r--r--drivers/gpu/arm/midgard/tests/build.bp36
-rw-r--r--drivers/gpu/arm/midgard/tests/kutf/build.bp31
-rw-r--r--drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile8
-rw-r--r--drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp30
-rw-r--r--drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c9
-rw-r--r--drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript4
-rw-r--r--drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c92
-rw-r--r--drivers/gpu/drm/Kconfig9
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c30
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c21
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c39
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c7
-rw-r--r--drivers/gpu/drm/drm_atomic.c126
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c76
-rw-r--r--drivers/gpu/drm/drm_bufs.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c47
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c26
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c358
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c36
-rw-r--r--drivers/gpu/drm/drm_ioctl.c20
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/evdi/Kconfig4
-rw-r--r--drivers/gpu/drm/evdi/evdi_connector.c6
-rw-r--r--drivers/gpu/drm/evdi/evdi_debug.c24
-rw-r--r--drivers/gpu/drm/evdi/evdi_debug.h2
-rw-r--r--drivers/gpu/drm/evdi/evdi_drv.c1
-rw-r--r--drivers/gpu/drm/evdi/evdi_drv.h7
-rw-r--r--drivers/gpu/drm/evdi/evdi_fb.c8
-rw-r--r--drivers/gpu/drm/evdi/evdi_main.c27
-rw-r--r--drivers/gpu/drm/evdi/evdi_painter.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c29
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/gma500/Kconfig4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c12
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c277
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h286
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c9
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c93
-rw-r--r--drivers/gpu/drm/i915/i915_params.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h65
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c132
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c40
-rw-r--r--drivers/gpu/drm/i915/intel_color.c30
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c236
-rw-r--r--drivers/gpu/drm/i915/intel_display.c724
-rw-r--r--drivers/gpu/drm/i915/intel_display.h313
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c521
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c54
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c202
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h147
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c15
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c91
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c1143
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c47
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c71
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h333
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/Makefile15
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/allocmem.c455
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/allocmem.h176
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/cache_km.c3523
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/cache_km.h174
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/cache_ops.h55
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_cache_bridge.h88
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_cache_direct_bridge.c141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_bridge.h112
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_direct_bridge.c206
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_htbuffer_bridge.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_htbuffer_direct_bridge.c124
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_mm_bridge.h248
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_mm_direct_bridge.c805
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdump_bridge.h96
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdump_direct_bridge.c166
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_bridge.h74
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_direct_bridge.c114
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_bridge.h119
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_direct_bridge.c258
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pvrtl_bridge.h99
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_pvrtl_direct_bridge.c214
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_bridge.h64
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_direct_bridge.c79
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_ri_bridge.h97
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_ri_direct_bridge.c217
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_sync_bridge.h166
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_sync_direct_bridge.c525
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_synctracking_bridge.h70
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/client_synctracking_direct_bridge.c97
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_breakpoint_bridge.h155
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_cache_bridge.h135
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_cmm_bridge.h118
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_debugmisc_bridge.h173
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_devicememhistory_bridge.h190
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_dmabuf_bridge.h131
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_htbuffer_bridge.h125
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_mm_bridge.h782
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_pdump_bridge.h160
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_pdumpctrl_bridge.h138
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_pdumpmm_bridge.h248
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_pvrtl_bridge.h224
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_regconfig_bridge.h152
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxcmp_bridge.h217
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxhwperf_bridge.h159
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxkicksync_bridge.h134
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxpdump_bridge.h96
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxray_bridge.h291
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxsignals_bridge.h79
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxta3d_bridge.h482
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxtq2_bridge.h181
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_rgxtq_bridge.h165
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_ri_bridge.h235
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_srvcore_bridge.h368
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_sync_bridge.h480
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_synctracking_bridge.h101
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/common_timerquery_bridge.h135
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/config_kernel.h158
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/config_kernel.mk47
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/configs/rgxconfig_km_4.V.2.51.h80
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/connection_server.c509
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/connection_server.h123
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/cores/rgxcore_km_4.40.2.51.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dbgdriv.c1562
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dbgdriv.h122
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dbgdriv_handle.c141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dbgdriv_ioctl.h58
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dbgdrvif_srv5.h265
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/debugmisc_server.c301
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/debugmisc_server.h108
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/device.h435
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/device_connection.h79
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem.c2960
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem.h683
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.c136
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.h163
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.c1910
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.h154
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.c334
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.h346
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_server.c1760
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_server.h573
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_server_utils.h204
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_typedefs.h141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_utils.c1065
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicemem_utils.h457
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicememx.h176
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.c79
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.h81
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dllist.h278
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/dma_fence_sync_native_server.c188
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/env_connection.h93
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/event.c436
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/event.h54
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/fwtrace_string.h57
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/handle.c2480
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/handle.h202
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/handle_idr.c439
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/handle_impl.h89
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/handle_types.h90
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/hash.c681
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/hash.h229
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/hostfunc.c216
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/hostfunc.h105
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htb_debug.c1180
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htb_debug.h64
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbserver.c753
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbserver.h247
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbuffer.c195
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbuffer.h133
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbuffer_init.h115
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbuffer_sf.h220
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/htbuffer_types.h124
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/img_3dtypes.h247
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/img_defs.h452
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/img_types.h298
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/info_page.h88
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/info_page_defs.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/info_page_km.c134
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ioctl.c315
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/kernel_compatibility.h365
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/kernel_nospec.h71
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/kernel_types.h138
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_defs_km.h323
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_table_km.h376
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgx_cr_defs_km.h5608
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgxdefs_km.h335
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgxmhdefs_km.h380
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km/rgxmmudefs_km.h396
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km_apphint.c1430
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km_apphint.h100
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/km_apphint_defs.h320
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/linkage.h55
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/linux_sw_sync.h61
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/linuxsrv.h56
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/lists.c60
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/lists.h355
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/lock.h352
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/lock_types.h103
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/log2.h414
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/main.c248
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mem_utils.c313
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mmu_common.c4263
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mmu_common.h755
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/module_common.c569
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/module_common.h67
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mt8173/Makefile5
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.c350
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.h63
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_sysconfig.c521
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/mt8173/sysinfo.h47
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/opaque_types.h56
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/os_cpu_cache.h73
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/os_srvinit_param.h71
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osconnection_server.c155
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osconnection_server.h120
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osfunc.c1885
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osfunc.h1829
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osfunc_arm64.c293
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osfunc_x86.c158
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/oskm_apphint.h176
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osmmap.h123
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/osmmap_stub.c133
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump.c471
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump.h152
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_common.c4083
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_km.h926
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_mmu.c1326
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_mmu.h189
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_osfunc.h369
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_physmem.c634
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_physmem.h242
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdump_symbolicaddr.h55
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdumpdefs.h213
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pdumpdesc.h142
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physheap.c349
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physheap.h160
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem.c639
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem.h239
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.c1161
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.h114
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.c145
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.h54
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_lma.c1688
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_lma.h86
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_osmem.h124
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.c3837
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.h49
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.c586
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.h84
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pmr.c3522
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pmr.h1105
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pmr_impl.h522
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pmr_os.c617
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pmr_os.h62
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/power.c1024
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/power.h124
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/powervr/buffer_attribs.h90
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/powervr/mem_types.h64
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/powervr/pvrsrv_sync_ext.h56
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/powervr/sync_external.h86
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/private_data.h53
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/proc_stats.h129
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/process_stats.c3658
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/process_stats.h214
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_bridge.h469
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.c637
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.c577
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.h123
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync_shared.h52
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.c280
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.h66
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_debug.c1672
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_debug.h580
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.c1170
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.h118
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_drm.c314
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_drm.h93
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_drm_core.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_drv.h90
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_dvfs.h147
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.c595
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.h58
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_fd_sync_kernel.h77
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_fence.c1089
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_fence.h234
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_fence_trace.h224
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.c264
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.h130
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_intrinsics.h70
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_linux_fence.h105
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_notifier.c511
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_notifier.h248
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_platform_drv.c306
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_ricommon.h71
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.c201
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.h62
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_sync.h168
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_sync_file.c776
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvr_uaccess.h91
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrmodule.h48
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv.c3876
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv.h490
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_apphint.h66
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.c515
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.h57
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_cleanup.h158
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_device.h328
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_device_types.h56
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_devmem.h912
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_error.h61
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_errors.h383
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_memallocflags.h622
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.c266
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.h135
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_km.h74
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_server.h78
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlcommon.h240
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlstreams.h62
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrsrvkm.mk166
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/pvrversion.h72
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ra.c1388
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ra.h206
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_bridge.h235
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_common.h219
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.c218
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.h136
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_firmware_processor.h92
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif.h562
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_alignchecks.h194
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_hwperf.h242
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_km.h1090
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_resetframework.h74
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sf.h724
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_shared.h641
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sig.h168
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_heaps.h188
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_hwperf.h1265
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.c609
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.h111
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_memallocflags.h49
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_meta.h456
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_mips.h478
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_options.h226
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_pdump_panics.h68
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgx_tq_shared.h63
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxapi_km.h320
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.c347
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.h141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxbvnc.c581
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxbvnc.h80
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxccb.c2286
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxccb.h261
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxcompute.c1037
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxcompute.h178
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxdebug.c5269
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxdebug.h236
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxdevice.h648
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfw_log_helper.h76
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.c1010
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.h122
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwload.c317
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwload.h154
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwutils.c6256
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxfwutils.h1174
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxheapconfig.h173
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxhwperf.c4309
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxhwperf.h387
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxinit.c4648
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxinit.h275
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxkicksync.c745
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxkicksync.h125
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxlayer.h740
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.c1186
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.h68
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmem.c721
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmem.h135
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.c940
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.h94
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.c1076
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.h60
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxpdump.c439
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxpdump.h141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxpower.c969
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxpower.h245
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxray.c3764
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxray.h368
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxregconfig.c327
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxregconfig.h130
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxsignals.c95
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxsignals.h71
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxsrvinit.c1678
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxstartstop.c1150
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxstartstop.h84
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.c175
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.h86
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxta3d.c5183
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxta3d.h452
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.c1168
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.h118
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.c555
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.h204
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.c255
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.h135
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtransfer.c1688
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxtransfer.h152
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxutils.c249
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rgxutils.h185
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ri_server.c2131
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ri_server.h106
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/ri_typedefs.h53
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/rogue_trace_events.h455
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_breakpoint_bridge.c457
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_cache_bridge.c504
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_cmm_bridge.c478
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_debugmisc_bridge.c314
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_devicememhistory_bridge.c814
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_dmabuf_bridge.c485
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_htbuffer_bridge.c437
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_mm_bridge.c3435
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_pdump_bridge.c561
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_pdumpctrl_bridge.c254
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_pdumpmm_bridge.c957
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_pvrtl_bridge.c916
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_regconfig_bridge.c293
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxcmp_bridge.c1199
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxhwperf_bridge.c477
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxkicksync_bridge.c721
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxpdump_bridge.c172
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxray_bridge.c1915
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxsignals_bridge.c193
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxta3d_bridge.c2823
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxtq2_bridge.c1111
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_rgxtq_bridge.c1418
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_ri_bridge.c853
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_srvcore_bridge.c1098
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_sync_bridge.c2204
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_synctracking_bridge.c341
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/server_timerquery_bridge.c244
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/services_kernel_client.h346
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/services_km.h160
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/servicesext.h172
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sofunc_pvr.h94
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sofunc_rgx.h95
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/srvcore.c1336
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/srvcore.h213
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/srvinit.h68
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/srvkm.h141
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync.c2090
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync.h400
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.c2593
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.h596
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_external.h80
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_init.h82
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal.h255
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal_fw.h63
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_fallback_server.h178
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_internal.h129
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_server.c2620
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/sync_server.h437
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/dma_support.c538
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/dma_support.h126
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/syscommon.h124
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/sysvalidation.h63
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_impl.h283
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.c335
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.h143
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_common.h60
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.c324
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.h205
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vmm_type_stub.c226
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_physheap.h267
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_common.c563
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_generic.c410
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_support.c342
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_support.h126
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_vm.h61
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.c114
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.h85
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_vm.c243
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlclient.c506
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlclient.h256
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlintern.c436
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlintern.h320
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlserver.c714
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlserver.h98
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlstream.c1338
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/tlstream.h500
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/trace_events.c231
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/trace_events.h154
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.c244
-rw-r--r--drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.h86
-rw-r--r--drivers/gpu/drm/img-rogue/1.9/module_common.c8
-rw-r--r--drivers/gpu/drm/img-rogue/1.9/pvr_dvfs_device.c5
-rw-r--r--drivers/gpu/drm/img-rogue/Kconfig24
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c7
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/rockchip/Makefile3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c79
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.h3
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-link-training.c420
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c100
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.h52
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/udl/Kconfig5
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/udl/udl_main.c35
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c4
-rw-r--r--drivers/gpu/drm/virtio/Kconfig10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c8
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-apple.c11
-rw-r--r--drivers/hid/hid-core.c221
-rw-r--r--drivers/hid/hid-debug.c116
-rw-r--r--drivers/hid/hid-google-hammer.c5
-rw-r--r--drivers/hid/hid-google-hammer.h12
-rw-r--r--drivers/hid/hid-google-whiskers.c447
-rw-r--r--drivers/hid/hid-ids.h11
-rw-r--r--drivers/hid/hid-input.c112
-rw-r--r--drivers/hid/hid-lenovo.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c392
-rw-r--r--drivers/hid/hid-magicmouse.c148
-rw-r--r--drivers/hid/hid-multitouch.c18
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-plantronics.c6
-rw-r--r--drivers/hid/hid-rmi.c17
-rw-r--r--drivers/hid/hid-sony.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c10
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig17
-rw-r--r--drivers/hid/intel-ish-hid/Makefile22
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h228
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h88
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c976
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c342
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c988
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c283
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h190
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c783
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h114
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c257
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c1047
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h182
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/dma-if.c175
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.c1024
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/hbm.h321
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/init.c114
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h278
-rw-r--r--drivers/hid/uhid.c13
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c29
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c40
-rw-r--r--drivers/hv/hv_snapshot.c4
-rw-r--r--drivers/hv/hv_util.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h5
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/adt7475.c14
-rw-r--r--drivers/hwmon/ibmpowernv.c7
-rw-r--r--drivers/hwmon/ina2xx.c15
-rw-r--r--drivers/hwmon/lm80.c28
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c5
-rw-r--r--drivers/hwmon/w83795.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c13
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator-qcom.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c16
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c17
-rw-r--r--drivers/hwtracing/coresight/coresight.c62
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c2
-rw-r--r--drivers/hwtracing/intel_th/gth.c4
-rw-r--r--drivers/hwtracing/intel_th/msu.c3
-rw-r--r--drivers/hwtracing/stm/Kconfig4
-rw-r--r--drivers/hwtracing/stm/core.c166
-rw-r--r--drivers/hwtracing/stm/policy.c25
-rw-r--r--drivers/hwtracing/stm/stm.h2
-rw-r--r--drivers/i2c/busses/i2c-axxia.c58
-rw-r--r--drivers/i2c/busses/i2c-cadence.c9
-rw-r--r--drivers/i2c/busses/i2c-cros-ec-tunnel.c10
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c5
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c320
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h25
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c117
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c189
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c11
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c199
-rw-r--r--drivers/i2c/busses/i2c-scmi.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c7
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c7
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/i2c-core.c313
-rw-r--r--drivers/i2c/i2c-dev.c6
-rw-r--r--drivers/ide/pmac.c1
-rw-r--r--drivers/iio/adc/at91_adc.c6
-rw-r--r--drivers/iio/buffer/kfifo_buf.c11
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_ring.c559
-rw-r--r--drivers/iio/frequency/ad9523.c4
-rw-r--r--drivers/iio/proximity/Kconfig28
-rw-r--r--drivers/iio/proximity/Makefile2
-rw-r--r--drivers/iio/proximity/sx9310.c1163
-rw-r--r--drivers/iio/proximity/sx932x.c1470
-rw-r--r--drivers/infiniband/Kconfig12
-rw-r--r--drivers/infiniband/core/Makefile4
-rw-r--r--drivers/infiniband/core/cma.c25
-rw-r--r--drivers/infiniband/core/mad.c11
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c17
-rw-r--r--drivers/infiniband/core/umem.c17
-rw-r--r--drivers/infiniband/core/umem_odp.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c1
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c50
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_user_pages.c23
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c5
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c10
-rw-r--r--drivers/input/joystick/xpad.c809
-rw-r--r--drivers/input/keyboard/atakbd.c74
-rw-r--r--drivers/input/keyboard/cros_ec_keyb.c33
-rw-r--r--drivers/input/keyboard/matrix_keypad.c25
-rw-r--r--drivers/input/keyboard/omap4-keypad.c34
-rw-r--r--drivers/input/keyboard/st-keyscan.c4
-rw-r--r--drivers/input/misc/bma150.c9
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c12
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c32
-rw-r--r--drivers/input/mouse/elantech.c22
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c4
-rw-r--r--drivers/input/touchscreen/goodix.c1
-rw-r--r--drivers/iommu/amd_iommu.c15
-rw-r--r--drivers/iommu/arm-smmu-v3.c9
-rw-r--r--drivers/iommu/dmar.c6
-rw-r--r--drivers/iommu/intel-iommu.c24
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c12
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c59
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-mmp.c6
-rw-r--r--drivers/isdn/capi/kcapi.c4
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/avm/b1.c2
-rw-r--r--drivers/isdn/hardware/eicon/diva.c22
-rw-r--r--drivers/isdn/hardware/eicon/diva.h5
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c18
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/isdn/hisax/hfc_pci.c2
-rw-r--r--drivers/isdn/i4l/isdn_common.c8
-rw-r--r--drivers/isdn/i4l/isdn_tty.c8
-rw-r--r--drivers/isdn/mISDN/timerdev.c2
-rw-r--r--drivers/leds/leds-gpio.c4
-rw-r--r--drivers/leds/leds-lp5523.c4
-rw-r--r--drivers/leds/leds-pwm.c1
-rw-r--r--drivers/macintosh/via-pmu.c9
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/writeback.c4
-rw-r--r--drivers/md/dm-bufio.c3
-rw-r--r--drivers/md/dm-cache-metadata.c3
-rw-r--r--drivers/md/dm-cache-target.c9
-rw-r--r--drivers/md/dm-ioctl.c18
-rw-r--r--drivers/md/dm-kcopyd.c21
-rw-r--r--drivers/md/dm-snap.c22
-rw-r--r--drivers/md/dm-table.c28
-rw-r--r--drivers/md/dm-thin-metadata.c34
-rw-r--r--drivers/md/dm-thin.c139
-rw-r--r--drivers/md/dm-verity-chromeos.c22
-rw-r--r--drivers/md/md-cluster.c19
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c16
-rw-r--r--drivers/md/raid5.c8
-rw-r--r--drivers/media/cec/cec-notifier.c11
-rw-r--r--drivers/media/common/siano/smsendian.c14
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c23
-rw-r--r--drivers/media/dvb-frontends/ascot2e.c4
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c4
-rw-r--r--drivers/media/dvb-frontends/horus3a.c4
-rw-r--r--drivers/media/dvb-frontends/itd1000.c5
-rw-r--r--drivers/media/dvb-frontends/mt312.c5
-rw-r--r--drivers/media/dvb-frontends/stb0899_drv.c3
-rw-r--r--drivers/media/dvb-frontends/stb6100.c6
-rw-r--r--drivers/media/dvb-frontends/stv0367.c4
-rw-r--r--drivers/media/dvb-frontends/stv090x.c4
-rw-r--r--drivers/media/dvb-frontends/stv6110x.c4
-rw-r--r--drivers/media/dvb-frontends/zl10039.c4
-rw-r--r--drivers/media/firewire/firedtv-avc.c6
-rw-r--r--drivers/media/firewire/firedtv.h6
-rw-r--r--drivers/media/i2c/Kconfig47
-rw-r--r--drivers/media/i2c/Makefile6
-rw-r--r--drivers/media/i2c/ak7375.c292
-rw-r--r--drivers/media/i2c/cx25840/cx25840-core.c28
-rw-r--r--drivers/media/i2c/dw9714.c57
-rw-r--r--drivers/media/i2c/imx208.c1098
-rw-r--r--drivers/media/i2c/imx258.c88
-rw-r--r--drivers/media/i2c/imx319.c2557
-rw-r--r--drivers/media/i2c/imx355.c1859
-rw-r--r--drivers/media/i2c/ov13858.c21
-rw-r--r--drivers/media/i2c/ov2685.c7
-rw-r--r--drivers/media/i2c/ov5670.c15
-rw-r--r--drivers/media/i2c/ov5695.c7
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c11
-rw-r--r--drivers/media/i2c/soc_camera/ov772x.c2
-rw-r--r--drivers/media/i2c/tvp5150.c14
-rw-r--r--drivers/media/pci/cx23885/altera-ci.c10
-rw-r--r--drivers/media/pci/intel/ipu3/Kconfig12
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-abi.h1269
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.c52
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-fw.c12
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-fw.h18
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-params.c357
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-params.h9
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-pool.c83
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css-pool.h35
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css.c896
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-css.h74
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-dmamap.c49
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-dmamap.h14
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-mmu.c11
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-mmu.h15
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-tables.c2
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-tables.h10
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-v4l2.c817
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3.c328
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3.h61
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c3
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c8
-rw-r--r--drivers/media/pci/saa7164/saa7164-fw.c3
-rw-r--r--drivers/media/platform/Kconfig19
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/cros-ec-cec/Makefile1
-rw-r--r--drivers/media/platform/cros-ec-cec/cros-ec-cec.c347
-rw-r--r--drivers/media/platform/davinci/vpbe.c7
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp-video.c11
-rw-r--r--drivers/media/platform/fsl-viu.c38
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap3isp/isp.c9
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/rockchip-vpu/Makefile1
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_h264d.c3
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_vp9d.c17
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vdec_regs.h2
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vpu_hw.c19
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_h264e.c26
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_jpege.c158
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_vp8e.c32
-rw-r--r--drivers/media/platform/rockchip-vpu/rk3399_vpu_regs.h2
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu.c8
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu_common.h10
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu_dec.c57
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu_enc.c28
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.c5
-rw-r--r--drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.h6
-rw-r--r--drivers/media/platform/rockchip/isp1/capture.c95
-rw-r--r--drivers/media/platform/rockchip/isp1/dev.c3
-rw-r--r--drivers/media/platform/rockchip/isp1/isp_params.c63
-rw-r--r--drivers/media/platform/rockchip/isp1/rkisp1.c6
-rw-r--r--drivers/media/platform/s3c-camif/camif-capture.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c15
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-cap.c5
-rw-r--r--drivers/media/platform/vivid/vivid-kthread-out.c5
-rw-r--r--drivers/media/platform/vivid/vivid-vid-cap.c2
-rw-r--r--drivers/media/platform/vivid/vivid-vid-common.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c3
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-dvb.c3
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c8
-rw-r--r--drivers/media/usb/tm6000/tm6000-dvb.c5
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c14
-rw-r--r--drivers/media/usb/uvc/uvc_video.c32
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c44
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c63
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c6
-rw-r--r--drivers/memory/tegra/mc.c22
-rw-r--r--drivers/memory/tegra/mc.h9
-rw-r--r--drivers/memory/tegra/tegra114.c2
-rw-r--r--drivers/memory/tegra/tegra124.c6
-rw-r--r--drivers/memory/tegra/tegra210.c3
-rw-r--r--drivers/memory/tegra/tegra30.c2
-rw-r--r--drivers/memstick/core/memstick.c3
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/mfd/ab8500-debugfs.c1
-rw-r--r--drivers/mfd/as3722.c30
-rw-r--r--drivers/mfd/cros_ec.c91
-rw-r--r--drivers/mfd/cros_ec_spi.c3
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/intel-lpss.c4
-rw-r--r--drivers/mfd/mc13xxx-core.c4
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/mfd/qcom_rpm.c4
-rw-r--r--drivers/mfd/sm501.c1
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c8
-rw-r--r--drivers/mfd/tps6586x.c24
-rw-r--r--drivers/mfd/tps68470.c10
-rw-r--r--drivers/mfd/twl-core.c4
-rw-r--r--drivers/mfd/wm5110-tables.c2
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/atmel-ssc.c2
-rw-r--r--drivers/misc/eeprom/at24.c18
-rw-r--r--drivers/misc/genwqe/card_base.h2
-rw-r--r--drivers/misc/genwqe/card_dev.c9
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/lkdtm.c40
-rw-r--r--drivers/misc/mei/bus-fixup.c2
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/mic/scif/scif_api.c20
-rw-r--r--drivers/misc/mic/scif/scif_rma.c5
-rw-r--r--drivers/misc/sgi-gru/grufault.c2
-rw-r--r--drivers/misc/sgi-gru/grukdump.c4
-rw-r--r--drivers/misc/throttler/Kconfig34
-rw-r--r--drivers/misc/throttler/Makefile2
-rw-r--r--drivers/misc/throttler/core.c697
-rw-r--r--drivers/misc/throttler/cros_ec_throttler.c111
-rw-r--r--drivers/misc/ti-st/st_kim.c4
-rw-r--r--drivers/misc/tsl2550.c2
-rw-r--r--drivers/misc/vexpress-syscfg.c2
-rw-r--r--drivers/misc/vmw_balloon.c95
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c4
-rw-r--r--drivers/mmc/card/Kconfig8
-rw-r--r--drivers/mmc/card/block.c6
-rw-r--r--drivers/mmc/card/mmc_test.c146
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/core.c134
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/ffu.c465
-rw-r--r--drivers/mmc/core/mmc.c78
-rw-r--r--drivers/mmc/core/pwrseq_simple.c22
-rw-r--r--drivers/mmc/host/atmel-mci.c3
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/omap.c11
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c6
-rw-r--r--drivers/mmc/host/sdhci-iproc.c5
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c3
-rw-r--r--drivers/mmc/host/sdhci.c118
-rw-r--r--drivers/mmc/host/sdhci.h12
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c8
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c51
-rw-r--r--drivers/mtd/devices/Kconfig2
-rw-r--r--drivers/mtd/maps/solutionengine.c6
-rw-r--r--drivers/mtd/mtdchar.c10
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c17
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c15
-rw-r--r--drivers/mtd/nand/mxc_nand.c5
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c11
-rw-r--r--drivers/mtd/ubi/attach.c139
-rw-r--r--drivers/mtd/ubi/build.c3
-rw-r--r--drivers/mtd/ubi/eba.c96
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c51
-rw-r--r--drivers/mtd/ubi/ubi.h46
-rw-r--r--drivers/mtd/ubi/wl.c126
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_main.c45
-rw-r--r--drivers/net/bonding/bond_options.c1
-rw-r--r--drivers/net/can/dev.c47
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/rcar_can.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c1
-rw-r--r--drivers/net/can/xilinx_can.c323
-rw-r--r--drivers/net/dsa/mv88e6060.c10
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/altera/altera_msgdma.c3
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c34
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c51
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c27
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c19
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c93
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c7
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c16
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c13
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c11
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c10
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c30
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c1
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c23
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c5
-rw-r--r--drivers/net/ethernet/realtek/r8169.c19
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sungem.c22
-rw-r--r--drivers/net/ethernet/ti/cpsw.c49
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/hamradio/bpqether.c8
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c9
-rw-r--r--drivers/net/irda/w83977af_ir.c4
-rw-r--r--drivers/net/phy/bcm-cygnus.c6
-rw-r--r--drivers/net/phy/bcm-phy-lib.h7
-rw-r--r--drivers/net/phy/bcm7xxx.c4
-rw-r--r--drivers/net/phy/dp83640.c13
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy_device.c22
-rw-r--r--drivers/net/ppp/pppoe.c1
-rw-r--r--drivers/net/ppp/pptp.c1
-rw-r--r--drivers/net/rionet.c2
-rw-r--r--drivers/net/team/team.c35
-rw-r--r--drivers/net/team/team_mode_loadbalance.c15
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/asix_common.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c3
-rw-r--r--drivers/net/usb/cdc_mbim.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/usb/ch9200.c9
-rw-r--r--drivers/net/usb/hso.c18
-rw-r--r--drivers/net/usb/ipheth.c10
-rw-r--r--drivers/net/usb/kaweth.c18
-rw-r--r--drivers/net/usb/lan78xx.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c6
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc75xx.c66
-rw-r--r--drivers/net/usb/smsc95xx.c15
-rw-r--r--drivers/net/usb/sr9800.c3
-rw-r--r--drivers/net/vxlan.c30
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/ahb.c23
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/bmi.c23
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/bmi.h31
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/ce.c60
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/ce.h14
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/core.c203
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/core.h56
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/coredump.c954
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/coredump.h225
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/debug.c246
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/debug.h10
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/hw.c192
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/hw.h24
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/mac.c43
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/mac.h2
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/pci.c441
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/pci.h8
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/wmi-tlv.c4
-rw-r--r--drivers/net/wireless/ar10k/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ar10k/hdrs/mac80211-exp.h1
-rw-r--r--drivers/net/wireless/ar10k/hdrs/mac80211.h5
-rw-r--r--drivers/net/wireless/ar10k/mac80211/main.c12
-rw-r--r--drivers/net/wireless/ar10k/mac80211/tx.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c10
-rw-r--r--drivers/net/wireless/ath/regd.h5
-rw-r--r--drivers/net/wireless/ath/regd_common.h13
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c28
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c72
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c115
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c2
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c13
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c5
-rw-r--r--drivers/net/xen-netback/netback.c10
-rw-r--r--drivers/net/xen-netfront.c44
-rw-r--r--drivers/nfc/nfcmrvl/uart.c5
-rw-r--r--drivers/nfc/nxp-nci/firmware.c2
-rw-r--r--drivers/nfc/nxp-nci/i2c.c2
-rw-r--r--drivers/nvdimm/bus.c18
-rw-r--r--drivers/nvme/host/nvme.h16
-rw-r--r--drivers/nvme/host/pci.c211
-rw-r--r--drivers/of/base.c25
-rw-r--r--drivers/of/unittest.c36
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/parport/parport_sunbpp.c8
-rw-r--r--drivers/pci/host/pci-mvebu.c2
-rw-r--r--drivers/pci/host/pcie-altera.c201
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c9
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c18
-rw-r--r--drivers/pci/pci-acpi.c2
-rw-r--r--drivers/pci/pci-sysfs.c18
-rw-r--r--drivers/pci/pci.c27
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/aer/Makefile2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h10
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_errprint.c5
-rw-r--r--drivers/pci/pcie/aer/aerdrv_stats.c200
-rw-r--r--drivers/pci/probe.c8
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pcmcia/ricoh.h35
-rw-r--r--drivers/pcmcia/yenta_socket.c3
-rw-r--r--drivers/phy/phy-rockchip-typec.c308
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c2
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson8b.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c23
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c32
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c27
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c28
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c2
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/chrome/Kconfig8
-rw-r--r--drivers/platform/chrome/Makefile1
-rw-r--r--drivers/platform/chrome/cros_ec_dev.c56
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c24
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c10
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs_usb.c95
-rw-r--r--drivers/platform/goldfish/Kconfig18
-rw-r--r--drivers/platform/goldfish/Makefile2
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c176
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acerhdf.c1
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c3
-rw-r--r--drivers/platform/x86/intel_pmc_core.c4
-rw-r--r--drivers/platform/x86/toshiba_acpi.c3
-rw-r--r--drivers/pnp/isapnp/proc.c2
-rw-r--r--drivers/power/olpc_battery.c4
-rw-r--r--drivers/power/reset/vexpress-poweroff.c12
-rw-r--r--drivers/ptp/ptp_chardev.c10
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c2
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/pfuze100-regulator.c1
-rw-r--r--drivers/regulator/s2mpa01.c10
-rw-r--r--drivers/regulator/s2mps11.c6
-rw-r--r--drivers/rtc/hctosys.c4
-rw-r--r--drivers/rtc/interface.c5
-rw-r--r--drivers/rtc/rtc-bq4802.c4
-rw-r--r--drivers/rtc/rtc-cmos.c255
-rw-r--r--drivers/rtc/rtc-lib.c6
-rw-r--r--drivers/rtc/rtc-snvs.c104
-rw-r--r--drivers/s390/block/dasd_eckd.c15
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/cio/qdio_main.c5
-rw-r--r--drivers/s390/net/qeth_core_main.c45
-rw-r--r--drivers/s390/net/qeth_core_sys.c1
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c6
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c40
-rw-r--r--drivers/s390/scsi/zfcp_erp.c140
-rw-r--r--drivers/s390/scsi/zfcp_ext.h7
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c22
-rw-r--r--drivers/s390/virtio/virtio_ccw.c21
-rw-r--r--drivers/sbus/char/display7seg.c1
-rw-r--r--drivers/sbus/char/envctrl.c2
-rw-r--r--drivers/scsi/3w-9xxx.c11
-rw-r--r--drivers/scsi/3w-sas.c3
-rw-r--r--drivers/scsi/3w-xxxx.c5
-rw-r--r--drivers/scsi/aacraid/linit.c2
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c4
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c8
-rw-r--r--drivers/scsi/bfa/bfa_fcs.c78
-rw-r--r--drivers/scsi/bfa/bfa_fcs_lport.c62
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c2
-rw-r--r--drivers/scsi/bfa/bfa_svc.c4
-rw-r--r--drivers/scsi/bfa/bfad.c20
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c6
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/csiostor/csio_attr.c2
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c43
-rw-r--r--drivers/scsi/esp_scsi.c1
-rw-r--r--drivers/scsi/esp_scsi.h2
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c4
-rw-r--r--drivers/scsi/isci/init.c14
-rw-r--r--drivers/scsi/libfc/fc_lport.c6
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mac_esp.c2
-rw-r--r--drivers/scsi/megaraid.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c22
-rw-r--r--drivers/scsi/scsi_dh.c5
-rw-r--r--drivers/scsi/scsi_scan.c5
-rw-r--r--drivers/scsi/scsi_sysfs.c28
-rw-r--r--drivers/scsi/scsi_transport_srp.c22
-rw-r--r--drivers/scsi/sd.c25
-rw-r--r--drivers/scsi/sg.c43
-rw-r--r--drivers/scsi/sr.c29
-rw-r--r--drivers/scsi/sr_ioctl.c21
-rw-r--r--drivers/scsi/st.c5
-rw-r--r--drivers/scsi/ufs/ufs.h3
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c5
-rw-r--r--drivers/scsi/ufs/ufshcd.c77
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/scsi/vmw_pvscsi.c15
-rw-r--r--drivers/scsi/xen-scsifront.c33
-rw-r--r--drivers/soc/tegra/common.c6
-rw-r--r--drivers/soc/tegra/pmc.c2
-rw-r--r--drivers/spi/spi-bcm2835.c16
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c4
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-rspi.c34
-rw-r--r--drivers/spi/spi-sh-msiof.c28
-rw-r--r--drivers/spi/spi-tegra20-slink.c31
-rw-r--r--drivers/spi/spi-xlp.c4
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/android/ashmem.c6
-rwxr-xr-xdrivers/staging/android/ion/ion.c63
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/gasket/Kconfig23
-rw-r--r--drivers/staging/gasket/Makefile9
-rw-r--r--drivers/staging/gasket/TODO9
-rw-r--r--drivers/staging/gasket/apex.h30
-rw-r--r--drivers/staging/gasket/apex_driver.c690
-rw-r--r--drivers/staging/gasket/gasket.h122
-rw-r--r--drivers/staging/gasket/gasket_constants.h47
-rw-r--r--drivers/staging/gasket/gasket_core.c1895
-rw-r--r--drivers/staging/gasket/gasket_core.h707
-rw-r--r--drivers/staging/gasket/gasket_interrupt.c550
-rw-r--r--drivers/staging/gasket/gasket_interrupt.h117
-rw-r--r--drivers/staging/gasket/gasket_ioctl.c391
-rw-r--r--drivers/staging/gasket/gasket_ioctl.h28
-rw-r--r--drivers/staging/gasket/gasket_page_table.c1381
-rw-r--r--drivers/staging/gasket/gasket_page_table.h249
-rw-r--r--drivers/staging/gasket/gasket_sysfs.c401
-rw-r--r--drivers/staging/gasket/gasket_sysfs.h179
-rw-r--r--drivers/staging/goldfish/goldfish_audio.c1
-rw-r--r--drivers/staging/iio/adc/ad7280a.c17
-rw-r--r--drivers/staging/iio/adc/ad7780.c6
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c7
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c9
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c14
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h1
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c3
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/rdma/hfi1/ud.c1
-rw-r--r--drivers/staging/rdma/hfi1/user_pages.c2
-rw-r--r--drivers/staging/rdma/ipath/ipath_user_pages.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/staging/rts5208/sd.c8
-rw-r--r--drivers/staging/rts5208/xd.c2
-rw-r--r--drivers/staging/speakup/kobjects.c4
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/target/iscsi/iscsi_target.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c35
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_spc.c17
-rw-r--r--drivers/target/target_core_transport.c5
-rw-r--r--drivers/tc/tc.c8
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c2
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c1
-rw-r--r--drivers/thermal/thermal_hwmon.h4
-rw-r--r--drivers/tty/hvc/hvc_opal.c1
-rw-r--r--drivers/tty/n_hdlc.c1
-rw-r--r--drivers/tty/n_tty.c74
-rw-r--r--drivers/tty/pty.c5
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c16
-rw-r--r--drivers/tty/serial/8250/8250_pci.c141
-rw-r--r--drivers/tty/serial/8250/serial_cs.c6
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c6
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/kgdboc.c44
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/samsung.c10
-rw-r--r--drivers/tty/serial/sc16is7xx.c19
-rw-r--r--drivers/tty/serial/sh-sci.c20
-rw-r--r--drivers/tty/serial/sprd_serial.c10
-rw-r--r--drivers/tty/serial/suncore.c1
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/tty_ioctl.c4
-rw-r--r--drivers/tty/tty_ldsem.c10
-rw-r--r--drivers/tty/vt/vt.c1
-rw-r--r--drivers/tty/vt/vt_ioctl.c4
-rw-r--r--drivers/uio/uio.c13
-rw-r--r--drivers/usb/chipidea/otg.h3
-rw-r--r--drivers/usb/class/cdc-acm.c40
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/core/devio.c24
-rw-r--r--drivers/usb/core/driver.c38
-rw-r--r--drivers/usb/core/generic.c22
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c51
-rw-r--r--drivers/usb/core/hub.c130
-rw-r--r--drivers/usb/core/hub.h1
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c227
-rw-r--r--drivers/usb/core/usb-acpi.c163
-rw-r--r--drivers/usb/core/usb.c42
-rw-r--r--drivers/usb/core/usb.h2
-rw-r--r--drivers/usb/dwc2/gadget.c7
-rw-r--r--drivers/usb/dwc2/hcd.c1
-rw-r--r--drivers/usb/dwc2/hcd_intr.c3
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/gadget.c1
-rw-r--r--drivers/usb/gadget/composite.c5
-rw-r--r--drivers/usb/gadget/configfs.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c3
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c20
-rw-r--r--drivers/usb/gadget/function/rndis.c3
-rw-r--r--drivers/usb/gadget/function/u_serial.c9
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c9
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c15
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c16
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c87
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c1
-rw-r--r--drivers/usb/host/ehci-hub.c8
-rw-r--r--drivers/usb/host/ehci-omap.c4
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/imx21-hcd.c6
-rw-r--r--drivers/usb/host/r8a66597-hcd.c5
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c98
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c67
-rw-r--r--drivers/usb/host/xhci.h9
-rw-r--r--drivers/usb/misc/appledisplay.c2
-rw-r--r--drivers/usb/misc/uss720.c4
-rw-r--r--drivers/usb/misc/yurex.c29
-rw-r--r--drivers/usb/musb/musb_host.c5
-rw-r--r--drivers/usb/musb/musb_host.h7
-rw-r--r--drivers/usb/musb/musb_virthub.c25
-rw-r--r--drivers/usb/phy/phy-am335x.c5
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c5
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c26
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h10
-rw-r--r--drivers/usb/serial/io_ti.h2
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/kobil_sct.c12
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/serial/option.c39
-rw-r--r--drivers/usb/serial/pl2303.c6
-rw-r--r--drivers/usb/serial/pl2303.h7
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c4
-rw-r--r--drivers/usb/storage/scsiglue.c17
-rw-r--r--drivers/usb/storage/transport.c14
-rw-r--r--drivers/usb/storage/unusual_devs.h19
-rw-r--r--drivers/usb/storage/unusual_realtek.h10
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/uwb/hwa-rc.c1
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--drivers/vhost/vsock.c71
-rw-r--r--drivers/video/backlight/as3711_bl.c33
-rw-r--r--drivers/video/backlight/max8925_bl.c4
-rw-r--r--drivers/video/backlight/tps65217_bl.c4
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbdev/aty/atyfb.h3
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c7
-rw-r--r--drivers/video/fbdev/aty/mach64_accel.c28
-rw-r--r--drivers/video/fbdev/aty/mach64_ct.c10
-rw-r--r--drivers/video/fbdev/clps711x-fb.c5
-rw-r--r--drivers/video/fbdev/core/fbmem.c46
-rw-r--r--drivers/video/fbdev/core/modedb.c41
-rw-r--r--drivers/video/fbdev/goldfishfb.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c7
-rw-r--r--drivers/video/fbdev/pvr2fb.c2
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/uvesafb.c3
-rw-r--r--drivers/video/fbdev/via/viafbdev.c3
-rw-r--r--drivers/virt/fsl_hypervisor.c4
-rw-r--r--drivers/virtio/virtio_balloon.c7
-rw-r--r--drivers/w1/masters/mxc_w1.c20
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/watchdog/booke_wdt.c2
-rw-r--r--drivers/watchdog/pika_wdt.c2
-rw-r--r--drivers/xen/cpu_hotplug.c15
-rw-r--r--drivers/xen/events/events_base.c4
-rw-r--r--drivers/xen/manage.c6
-rw-r--r--drivers/xen/swiotlb-xen.c6
-rw-r--r--drivers/xen/xlate_mmu.c1
-rw-r--r--fs/9p/cache.c8
-rw-r--r--fs/9p/v9fs.h2
-rw-r--r--fs/9p/v9fs_vfs.h23
-rw-r--r--fs/9p/vfs_dir.c11
-rw-r--r--fs/9p/vfs_file.c22
-rw-r--r--fs/9p/vfs_inode.c25
-rw-r--r--fs/9p/vfs_inode_dotl.c27
-rw-r--r--fs/9p/vfs_super.c4
-rw-r--r--fs/9p/xattr.c6
-rw-r--r--fs/aio.c5
-rw-r--r--fs/autofs4/autofs_i.h4
-rw-r--r--fs/autofs4/expire.c3
-rw-r--r--fs/autofs4/inode.c5
-rw-r--r--fs/bfs/inode.c9
-rw-r--r--fs/binfmt_elf.c48
-rw-r--r--fs/binfmt_misc.c12
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h141
-rw-r--r--fs/btrfs/dev-replace.c8
-rw-r--r--fs/btrfs/disk-io.c133
-rw-r--r--fs/btrfs/disk-io.h2
-rw-r--r--fs/btrfs/extent-tree.c174
-rw-r--r--fs/btrfs/extent_io.c47
-rw-r--r--fs/btrfs/extent_io.h19
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/extent_map.h10
-rw-r--r--fs/btrfs/free-space-cache.c34
-rw-r--r--fs/btrfs/inode.c34
-rw-r--r--fs/btrfs/ioctl.c35
-rw-r--r--fs/btrfs/qgroup.c20
-rw-r--r--fs/btrfs/raid56.c3
-rw-r--r--fs/btrfs/relocation.c23
-rw-r--r--fs/btrfs/root-tree.c27
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/send.c11
-rw-r--r--fs/btrfs/struct-funcs.c9
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/transaction.c6
-rw-r--r--fs/btrfs/tree-checker.c649
-rw-r--r--fs/btrfs/tree-checker.h38
-rw-r--r--fs/btrfs/tree-log.c51
-rw-r--r--fs/btrfs/volumes.c139
-rw-r--r--fs/btrfs/volumes.h2
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c20
-rw-r--r--fs/ceph/caps.c1
-rw-r--r--fs/ceph/snap.c3
-rw-r--r--fs/cifs/cifs_debug.c41
-rw-r--r--fs/cifs/cifs_spnego.c6
-rw-r--r--fs/cifs/cifs_unicode.c3
-rw-r--r--fs/cifs/cifssmb.c21
-rw-r--r--fs/cifs/connect.c56
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c28
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/link.c4
-rw-r--r--fs/cifs/misc.c8
-rw-r--r--fs/cifs/readdir.c20
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb2file.c8
-rw-r--r--fs/cifs/smb2inode.c2
-rw-r--r--fs/cifs/smb2maperror.c4
-rw-r--r--fs/cifs/smb2misc.c7
-rw-r--r--fs/cifs/smb2ops.c8
-rw-r--r--fs/cifs/smb2pdu.c49
-rw-r--r--fs/cifs/smb2pdu.h4
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/configfs/symlink.c2
-rw-r--r--fs/cramfs/inode.c3
-rw-r--r--fs/dcache.c25
-rw-r--r--fs/debugfs/inode.c7
-rw-r--r--fs/dlm/ast.c10
-rw-r--r--fs/dlm/lock.c17
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/drop_caches.c8
-rw-r--r--fs/esdfs/dentry.c8
-rw-r--r--fs/esdfs/derive.c72
-rw-r--r--fs/esdfs/esdfs.h79
-rw-r--r--fs/esdfs/inode.c18
-rw-r--r--fs/esdfs/lookup.c65
-rw-r--r--fs/esdfs/main.c124
-rw-r--r--fs/esdfs/super.c5
-rw-r--r--fs/eventpoll.c2
-rw-r--r--fs/exec.c16
-rw-r--r--fs/exofs/super.c5
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext2/super.c39
-rw-r--r--fs/ext2/xattr.c2
-rw-r--r--fs/ext4/balloc.c24
-rw-r--r--fs/ext4/dir.c20
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c22
-rw-r--r--fs/ext4/indirect.c12
-rw-r--r--fs/ext4/inline.c75
-rw-r--r--fs/ext4/inode.c59
-rw-r--r--fs/ext4/mballoc.c10
-rw-r--r--fs/ext4/mmp.c1
-rw-r--r--fs/ext4/move_extent.c8
-rw-r--r--fs/ext4/namei.c6
-rw-r--r--fs/ext4/resize.c58
-rw-r--r--fs/ext4/super.c95
-rw-r--r--fs/ext4/sysfs.c13
-rw-r--r--fs/ext4/xattr.c55
-rw-r--r--fs/f2fs/acl.c14
-rw-r--r--fs/f2fs/checkpoint.c168
-rw-r--r--fs/f2fs/data.c118
-rw-r--r--fs/f2fs/dir.c87
-rw-r--r--fs/f2fs/f2fs.h77
-rw-r--r--fs/f2fs/file.c24
-rw-r--r--fs/f2fs/inline.c115
-rw-r--r--fs/f2fs/inode.c68
-rw-r--r--fs/f2fs/node.c107
-rw-r--r--fs/f2fs/node.h77
-rw-r--r--fs/f2fs/recovery.c165
-rw-r--r--fs/f2fs/segment.c101
-rw-r--r--fs/f2fs/segment.h51
-rw-r--r--fs/f2fs/super.c159
-rw-r--r--fs/fat/cache.c19
-rw-r--r--fs/fat/fat.h5
-rw-r--r--fs/fat/fatent.c7
-rw-r--r--fs/fat/inode.c27
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/fuse/control.c13
-rw-r--r--fs/fuse/dev.c73
-rw-r--r--fs/fuse/dir.c13
-rw-r--r--fs/fuse/file.c9
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c24
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/ops_fstype.c5
-rw-r--r--fs/gfs2/rgrp.c3
-rw-r--r--fs/hfs/brec.c11
-rw-r--r--fs/hfs/btree.c3
-rw-r--r--fs/hfs/dir.c2
-rw-r--r--fs/hfsplus/brec.c4
-rw-r--r--fs/hfsplus/btree.c3
-rw-r--r--fs/hfsplus/dir.c4
-rw-r--r--fs/hfsplus/super.c4
-rw-r--r--fs/hugetlbfs/inode.c42
-rw-r--r--fs/inode.c12
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jbd2/transaction.c42
-rw-r--r--fs/jffs2/super.c7
-rw-r--r--fs/jffs2/xattr.c6
-rw-r--r--fs/jfs/xattr.c10
-rw-r--r--fs/kernfs/symlink.c2
-rw-r--r--fs/lockd/host.c2
-rw-r--r--fs/namei.c53
-rw-r--r--fs/namespace.c50
-rw-r--r--fs/ncpfs/ioctl.c4
-rw-r--r--fs/ncpfs/mmap.c4
-rw-r--r--fs/nfs/blocklayout/dev.c2
-rw-r--r--fs/nfs/callback_xdr.c11
-rw-r--r--fs/nfs/direct.c15
-rw-r--r--fs/nfs/filelayout/filelayout.c17
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c25
-rw-r--r--fs/nfs/nfs4client.c16
-rw-r--r--fs/nfs/nfs4idmap.c5
-rw-r--r--fs/nfs/pagelist.c12
-rw-r--r--fs/nfs/pnfs.c24
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c8
-rw-r--r--fs/nfsd/nfs3proc.c16
-rw-r--r--fs/nfsd/nfs3xdr.c1
-rw-r--r--fs/nfsd/nfs4proc.c1
-rw-r--r--fs/nfsd/nfs4xdr.c7
-rw-r--r--fs/nfsd/nfsctl.c2
-rw-r--r--fs/ocfs2/buffer_head_io.c3
-rw-r--r--fs/ocfs2/cluster/nodemanager.c63
-rw-r--r--fs/ocfs2/dir.c3
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/localalloc.c9
-rw-r--r--fs/ocfs2/move_extents.c47
-rw-r--r--fs/overlayfs/copy_up.c26
-rw-r--r--fs/overlayfs/dir.c67
-rw-r--r--fs/overlayfs/overlayfs.h4
-rw-r--r--fs/overlayfs/readdir.c130
-rw-r--r--fs/overlayfs/super.c40
-rw-r--r--fs/proc/array.c26
-rw-r--r--fs/proc/base.c33
-rw-r--r--fs/proc/kcore.c31
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/pstore/ram_core.c22
-rw-r--r--fs/quota/quota.c2
-rw-r--r--fs/read_write.c4
-rw-r--r--fs/reiserfs/reiserfs.h2
-rw-r--r--fs/reiserfs/xattr.c11
-rw-r--r--fs/signalfd.c7
-rw-r--r--fs/squashfs/block.c2
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/fragment.c17
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/squashfs/squashfs_fs_sb.h1
-rw-r--r--fs/squashfs/super.c5
-rw-r--r--fs/super.c30
-rw-r--r--fs/sysfs/file.c44
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/ubifs/journal.c16
-rw-r--r--fs/ubifs/lprops.c8
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/udf/directory.c3
-rw-r--r--fs/udf/inode.c6
-rw-r--r--fs/udf/truncate.c3
-rw-r--r--fs/xattr.c2
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c94
-rw-r--r--fs/xfs/libxfs/xfs_attr.c9
-rw-r--r--fs/xfs/xfs_log.c7
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_trace.h9
-rw-r--r--include/acpi/processor.h2
-rw-r--r--include/asm-generic/fixmap.h12
-rw-r--r--include/asm-generic/pgtable.h20
-rw-r--r--include/asm-generic/vmlinux.lds.h9
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drm_atomic.h231
-rw-r--r--include/drm/drm_atomic_helper.h4
-rw-r--r--include/drm/drm_connector.h19
-rw-r--r--include/drm/drm_dp_helper.h75
-rw-r--r--include/drm/drm_dp_mst_helper.h45
-rw-r--r--include/drm/drm_mode_config.h5
-rw-r--r--include/drm/drm_modeset_helper_vtables.h34
-rw-r--r--include/keys/user-type.h2
-rw-r--r--include/linux/acpi.h36
-rw-r--r--include/linux/backing-dev-defs.h8
-rw-r--r--include/linux/bitfield.h93
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/bug.h3
-rw-r--r--include/linux/cache.h14
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/compiler-clang.h8
-rw-r--r--include/linux/compiler-gcc.h35
-rw-r--r--include/linux/compiler.h2
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h15
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/devfreq.h113
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/dma-iommu.h1
-rw-r--r--include/linux/f2fs_fs.h3
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/genl_magic_struct.h5
-rw-r--r--include/linux/hid-debug.h9
-rw-r--r--include/linux/hid.h19
-rw-r--r--include/linux/hugetlb.h14
-rw-r--r--include/linux/hyperv.h1
-rw-r--r--include/linux/i2c.h9
-rw-r--r--include/linux/i8253.h1
-rw-r--r--include/linux/iio/buffer.h6
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/intel-iommu.h8
-rw-r--r--include/linux/io.h22
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kobject.h17
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/low-mem-notify.h130
-rw-r--r--include/linux/lzo.h2
-rw-r--r--include/linux/memcontrol.h18
-rw-r--r--include/linux/mfd/cros_ec.h15
-rw-r--r--include/linux/mfd/cros_ec_commands.h1865
-rw-r--r--include/linux/mfd/tps68470.h17
-rw-r--r--include/linux/mm.h44
-rw-r--r--include/linux/mm_metrics.h84
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmc/card.h5
-rw-r--r--include/linux/mmc/core.h11
-rw-r--r--include/linux/mmc/ffu.h56
-rw-r--r--include/linux/mmc/mmc.h7
-rw-r--r--include/linux/mmc/sdio_ids.h1
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/netdev_features.h24
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netfilter/ipset/ip_set_comment.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_timeout.h10
-rw-r--r--include/linux/netfilter_bridge/ebtables.h5
-rw-r--r--include/linux/nospec.h10
-rw-r--r--include/linux/nvme.h64
-rw-r--r--include/linux/of.h8
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/platform_data/ina2xx.h2
-rw-r--r--include/linux/pm_opp.h6
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/radix-tree.h2
-rw-r--r--include/linux/rculist.h36
-rw-r--r--include/linux/restart_block.h51
-rw-r--r--include/linux/rhashtable.h143
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/sched.h135
-rw-r--r--include/linux/seccomp.h3
-rw-r--r--include/linux/seq_file.h14
-rw-r--r--include/linux/skbuff.h35
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/string.h205
-rw-r--r--include/linux/sunrpc/svc.h5
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/swapops.h61
-rw-r--r--include/linux/sysfs.h14
-rw-r--r--include/linux/tc.h1
-rw-r--r--include/linux/tcp.h9
-rw-r--r--include/linux/thread_info.h104
-rw-r--r--include/linux/throttler.h21
-rw-r--r--include/linux/uaccess.h10
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/usb/audio-v2.h7
-rw-r--r--include/linux/usb/audio-v3.h40
-rw-r--r--include/linux/usb/hcd.h11
-rw-r--r--include/linux/usb/quirks.h6
-rw-r--r--include/linux/vm_event_item.h3
-rw-r--r--include/linux/vmacache.h5
-rw-r--r--include/media/cec-notifier.h27
-rw-r--r--include/media/v4l2-ctrls.h26
-rw-r--r--include/media/v4l2-fh.h1
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/bluetooth/hci.h12
-rw-r--r--include/net/bluetooth/hci_core.h5
-rw-r--r--include/net/bluetooth/hci_le_splitter.h100
-rw-r--r--include/net/bluetooth/mgmt.h76
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/gro_cells.h13
-rw-r--r--include/net/icmp.h9
-rw-r--r--include/net/inet_ecn.h3
-rw-r--r--include/net/inet_frag.h133
-rw-r--r--include/net/inetpeer.h1
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/ipv6.h28
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/neighbour.h28
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/phonet/pep.h5
-rw-r--r--include/net/sctp/checksum.h2
-rw-r--r--include/net/sock.h49
-rw-r--r--include/net/tcp.h7
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/soc/rockchip/rockchip_phy_typec.h63
-rw-r--r--include/soc/tegra/mc.h2
-rw-r--r--include/sound/compress_driver.h6
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/pcm_params.h4
-rw-r--r--include/trace/events/ext4.h20
-rw-r--r--include/trace/events/intel_ish.h30
-rw-r--r--include/uapi/asm-generic/mman-common.h3
-rw-r--r--include/uapi/drm/drm_mode.h3
-rw-r--r--include/uapi/linux/ethtool.h4
-rw-r--r--include/uapi/linux/if_ether.h7
-rw-r--r--include/uapi/linux/input-event-codes.h11
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/intel-ipu3.h2074
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--include/uapi/linux/pci_regs.h20
-rw-r--r--include/uapi/linux/prctl.h12
-rw-r--r--include/uapi/linux/seccomp.h4
-rw-r--r--include/uapi/linux/signalfd.h6
-rw-r--r--include/uapi/linux/snmp.h1
-rw-r--r--include/uapi/linux/usb/audio.h19
-rw-r--r--include/uapi/linux/v4l2-controls.h32
-rw-r--r--include/uapi/linux/videodev2.h3
-rw-r--r--include/video/udlfb.h2
-rw-r--r--init/Kconfig25
-rw-r--r--init/init_task.c7
-rw-r--r--init/main.c31
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/audit_watch.c12
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bounds.c4
-rw-r--r--kernel/bpf/arraymap.c10
-rw-r--r--kernel/bpf/hashtab.c2
-rw-r--r--kernel/bpf/syscall.c13
-rw-r--r--kernel/bpf/verifier.c102
-rw-r--r--kernel/cgroup.c6
-rw-r--r--kernel/cpu.c11
-rw-r--r--kernel/debug/kdb/kdb_bp.c4
-rw-r--r--kernel/debug/kdb/kdb_io.c15
-rw-r--r--kernel/debug/kdb/kdb_private.h2
-rw-r--r--kernel/debug/kdb/kdb_support.c14
-rw-r--r--kernel/events/core.c5
-rw-r--r--kernel/events/ring_buffer.c50
-rw-r--r--kernel/events/uprobes.c16
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/fork.c97
-rw-r--r--kernel/futex.c11
-rw-r--r--kernel/hung_task.c8
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/kexec_file.c14
-rw-r--r--kernel/kmod.c14
-rw-r--r--kernel/kprobes.c31
-rw-r--r--kernel/ksysfs.c2
-rw-r--r--kernel/kthread.c16
-rw-r--r--kernel/locking/lockdep.c19
-rw-r--r--kernel/locking/osq_lock.c13
-rw-r--r--kernel/locking/rtmutex.c52
-rw-r--r--kernel/locking/rtmutex_common.h8
-rw-r--r--kernel/locking/rwsem-xadd.c27
-rw-r--r--kernel/memremap.c11
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/power/Kconfig1
-rw-r--r--kernel/power/swap.c18
-rw-r--r--kernel/power/user.c5
-rw-r--r--kernel/printk/printk.c12
-rw-r--r--kernel/rcu/tree.c22
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/sched/fair.c59
-rw-r--r--kernel/sched/idle.c2
-rw-r--r--kernel/sched/sched.h8
-rw-r--r--kernel/seccomp.c21
-rw-r--r--kernel/signal.c63
-rw-r--r--kernel/sys.c116
-rw-r--r--kernel/sysctl.c32
-rw-r--r--kernel/test_module.c26
-rw-r--r--kernel/time/alarmtimer.c3
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-timers.c29
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c6
-rw-r--r--kernel/time/timekeeping.c4
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/trace/blktrace.c4
-rw-r--r--kernel/trace/bpf_trace.c10
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/ring_buffer.c18
-rw-r--r--kernel/trace/trace.c35
-rw-r--r--kernel/trace/trace_events_trigger.c29
-rw-r--r--kernel/trace/trace_functions_graph.c5
-rw-r--r--kernel/trace/trace_kprobe.c15
-rw-r--r--kernel/trace/trace_uprobe.c11
-rw-r--r--kernel/user_namespace.c39
-rw-r--r--kernel/utsname_sysctl.c41
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/assoc_array.c8
-rw-r--r--lib/atomic64_test.c4
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/debug_locks.c2
-rw-r--r--lib/debugobjects.c10
-rw-r--r--lib/int_sqrt.c3
-rw-r--r--lib/interval_tree_test.c93
-rw-r--r--lib/ioremap.c4
-rw-r--r--lib/klist.c10
-rw-r--r--lib/kobject.c2
-rw-r--r--lib/lzo/lzo1x_compress.c107
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c93
-rw-r--r--lib/lzo/lzodefs.h26
-rw-r--r--lib/radix-tree.c96
-rw-r--r--lib/raid6/test/Makefile4
-rw-r--r--lib/rbtree_test.c80
-rw-r--r--lib/rhashtable.c32
-rw-r--r--lib/seq_buf.c6
-rw-r--r--lib/string.c7
-rw-r--r--lib/strncpy_from_user.c15
-rw-r--r--lib/strnlen_user.c21
-rw-r--r--lib/swiotlb.c20
-rw-r--r--lib/test-hexdump.c2
-rw-r--r--lib/vsprintf.c3
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/Makefile5
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c8
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/frame_vector.c9
-rw-r--r--mm/gup.c48
-rw-r--r--mm/huge_memory.c8
-rw-r--r--mm/hugetlb.c93
-rw-r--r--mm/kasan/kasan.c5
-rw-r--r--mm/low-mem-notify.c129
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/memory.c108
-rw-r--r--mm/memory_hotplug.c22
-rw-r--r--mm/mempolicy.c40
-rw-r--r--mm/metrics.c314
-rw-r--r--mm/migrate.c18
-rw-r--r--mm/mlock.c5
-rw-r--r--mm/mmap.c54
-rw-r--r--mm/mprotect.c49
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/nommu.c42
-rw-r--r--mm/oom_kill.c9
-rw-r--r--mm/page-writeback.c35
-rw-r--r--mm/page_alloc.c14
-rw-r--r--mm/percpu.c36
-rw-r--r--mm/process_vm_access.c6
-rw-r--r--mm/rmap.c58
-rw-r--r--mm/shmem.c81
-rw-r--r--mm/slab.c36
-rw-r--r--mm/slab_common.c7
-rw-r--r--mm/slub.c148
-rw-r--r--mm/swap_state.c2
-rw-r--r--mm/swapfile.c67
-rw-r--r--mm/truncate.c8
-rw-r--r--mm/usercopy.c280
-rw-r--r--mm/util.c2
-rw-r--r--mm/vmacache.c38
-rw-r--r--mm/vmalloc.c5
-rw-r--r--mm/vmscan.c32
-rw-r--r--mm/vmstat.c10
-rw-r--r--mm/zswap.c9
-rw-r--r--net/6lowpan/iphc.c1
-rw-r--r--net/9p/client.c23
-rw-r--r--net/9p/protocol.c5
-rw-r--r--net/9p/trans_fd.c7
-rw-r--r--net/9p/trans_rdma.c3
-rw-r--r--net/9p/trans_virtio.c16
-rw-r--r--net/ax25/af_ax25.c11
-rw-r--r--net/ax25/ax25_dev.c2
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/ax25_route.c19
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/hard-interface.c5
-rw-r--r--net/batman-adv/soft-interface.c4
-rw-r--r--net/bluetooth/Kconfig15
-rw-r--r--net/bluetooth/Makefile1
-rw-r--r--net/bluetooth/af_bluetooth.c5
-rw-r--r--net/bluetooth/hci_conn.c29
-rw-r--r--net/bluetooth/hci_core.c55
-rw-r--r--net/bluetooth/hci_debugfs.c193
-rw-r--r--net/bluetooth/hci_event.c89
-rw-r--r--net/bluetooth/hci_le_splitter.c1095
-rw-r--r--net/bluetooth/hci_request.c39
-rw-r--r--net/bluetooth/hidp/core.c6
-rw-r--r--net/bluetooth/l2cap_core.c85
-rw-r--r--net/bluetooth/mgmt.c75
-rw-r--r--net/bluetooth/sco.c3
-rw-r--r--net/bluetooth/smp.c29
-rw-r--r--net/bluetooth/smp.h3
-rw-r--r--net/bridge/br_forward.c7
-rw-r--r--net/bridge/br_if.c11
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c3
-rw-r--r--net/bridge/netfilter/ebtables.c22
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c1
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/can/bcm.c27
-rw-r--r--net/can/gw.c30
-rw-r--r--net/ceph/messenger.c17
-rw-r--r--net/ceph/pagevec.c2
-rw-r--r--net/compat.c15
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/net-sysfs.c31
-rw-r--r--net/core/rtnetlink.c36
-rw-r--r--net/core/skbuff.c46
-rw-r--r--net/core/sock.c4
-rw-r--r--net/dcb/dcbnl.c11
-rw-r--r--net/dccp/ccid.h4
-rw-r--r--net/dccp/ccids/ccid2.c6
-rw-r--r--net/dccp/ccids/ccid3.c16
-rw-r--r--net/dccp/ipv6.c4
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dns_resolver/dns_key.c28
-rw-r--r--net/dsa/slave.c18
-rw-r--r--net/hsr/hsr_device.c18
-rw-r--r--net/hsr/hsr_framereg.c12
-rw-r--r--net/hsr/hsr_framereg.h1
-rw-r--r--net/ieee802154/6lowpan/6lowpan_i.h26
-rw-r--r--net/ieee802154/6lowpan/reassembly.c148
-rw-r--r--net/ieee802154/6lowpan/tx.c24
-rw-r--r--net/ipv4/Kconfig1
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/cipso_ipv4.c40
-rw-r--r--net/ipv4/fib_frontend.c21
-rw-r--r--net/ipv4/fib_semantics.c52
-rw-r--r--net/ipv4/fib_trie.c14
-rw-r--r--net/ipv4/fou.c16
-rw-r--r--net/ipv4/icmp.c7
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_fragment.c385
-rw-r--r--net/ipv4/inetpeer.c1
-rw-r--r--net/ipv4/ip_fragment.c583
-rw-r--r--net/ipv4/ip_input.c1
-rw-r--r--net/ipv4/ip_options.c22
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/ip_tunnel.c13
-rw-r--r--net/ipv4/ip_tunnel_core.c2
-rw-r--r--net/ipv4/ip_vti.c50
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_tables.c4
-rw-r--r--net/ipv4/proc.c7
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/syncookies.c7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c9
-rw-r--r--net/ipv4/tcp_dctcp.c75
-rw-r--r--net/ipv4/tcp_input.c509
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c49
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/addrconf.c13
-rw-r--r--net/ipv6/af_inet6.c17
-rw-r--r--net/ipv6/datagram.c8
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c46
-rw-r--r--net/ipv6/ip6_tunnel.c13
-rw-r--r--net/ipv6/ip6_udp_tunnel.c3
-rw-r--r--net/ipv6/ip6_vti.c44
-rw-r--r--net/ipv6/ip6mr.c15
-rw-r--r--net/ipv6/mcast.c25
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6_tables.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c107
-rw-r--r--net/ipv6/proc.c5
-rw-r--r--net/ipv6/reassembly.c209
-rw-r--r--net/ipv6/route.c10
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c13
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_output.c2
-rw-r--r--net/ipv6/xfrm6_policy.c2
-rw-r--r--net/ipv6/xfrm6_tunnel.c3
-rw-r--r--net/irda/af_irda.c13
-rw-r--r--net/iucv/af_iucv.c8
-rw-r--r--net/key/af_key.c45
-rw-r--r--net/l2tp/l2tp_core.c45
-rw-r--r--net/l2tp/l2tp_core.h31
-rw-r--r--net/l2tp/l2tp_ip.c19
-rw-r--r--net/l2tp/l2tp_ip6.c23
-rw-r--r--net/llc/af_llc.c11
-rw-r--r--net/llc/llc_core.c4
-rw-r--r--net/mac80211/cfg.c8
-rw-r--r--net/mac80211/ibss.c22
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/key.c24
-rw-r--r--net/mac80211/main.c26
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mlme.c56
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/status.c9
-rw-r--r--net/mac80211/tx.c16
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/mac802154/tx.c15
-rw-r--r--net/netfilter/ipset/ip_set_hash_netportnet.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c15
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c33
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c8
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/nf_tables_core.c3
-rw-r--r--net/netfilter/nfnetlink_acct.c3
-rw-r--r--net/netfilter/nfnetlink_log.c2
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/x_tables.c4
-rw-r--r--net/netfilter/xt_IDLETIMER.c20
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c7
-rw-r--r--net/netrom/af_netrom.c15
-rw-r--r--net/netrom/nr_timer.c20
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/nfc/llcp_commands.c29
-rw-r--r--net/nfc/llcp_core.c24
-rw-r--r--net/openvswitch/flow_netlink.c2
-rw-r--r--net/packet/af_packet.c28
-rw-r--r--net/phonet/pep.c32
-rw-r--r--net/rds/loop.c1
-rw-r--r--net/rds/rds.h5
-rw-r--r--net/rds/recv.c5
-rw-r--r--net/rose/rose_route.c5
-rw-r--r--net/rose/rose_subr.c21
-rw-r--r--net/sched/cls_tcindex.c8
-rw-r--r--net/sched/sch_api.c3
-rw-r--r--net/sched/sch_blackhole.c2
-rw-r--r--net/sched/sch_gred.c2
-rw-r--r--net/sched/sch_hhf.c3
-rw-r--r--net/sched/sch_htb.c5
-rw-r--r--net/sched/sch_multiq.c9
-rw-r--r--net/sched/sch_netem.c7
-rw-r--r--net/sched/sch_red.c4
-rw-r--r--net/sched/sch_tbf.c5
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/protocol.c4
-rw-r--r--net/sctp/socket.c5
-rw-r--r--net/socket.c56
-rw-r--r--net/sunrpc/auth_generic.c8
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c8
-rw-r--r--net/sunrpc/cache.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c8
-rw-r--r--net/sunrpc/svc.c10
-rw-r--r--net/sunrpc/svc_xprt.c7
-rw-r--r--net/sunrpc/svcsock.c4
-rw-r--r--net/sunrpc/xdr.c7
-rw-r--r--net/sunrpc/xprt.c11
-rw-r--r--net/tipc/netlink_compat.c50
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/unix/af_unix.c53
-rw-r--r--net/unix/diag.c3
-rw-r--r--net/vmw_vsock/af_vsock.c15
-rw-r--r--net/vmw_vsock/vmci_transport.c74
-rw-r--r--net/wireless/nl80211.c18
-rw-r--r--net/wireless/reg.c12
-rw-r--r--net/wireless/util.c13
-rw-r--r--net/x25/af_x25.c26
-rw-r--r--net/xfrm/xfrm_policy.c13
-rw-r--r--net/xfrm/xfrm_state.c7
-rw-r--r--net/xfrm/xfrm_sysctl.c10
-rw-r--r--net/xfrm/xfrm_user.c42
-rw-r--r--scripts/Kbuild.include105
-rw-r--r--scripts/Makefile.extrawarn3
-rwxr-xr-xscripts/checkstack.pl4
-rwxr-xr-xscripts/decode_stacktrace.sh2
-rwxr-xr-xscripts/depmod.sh8
-rw-r--r--scripts/gdb/linux/tasks.py2
-rw-r--r--scripts/kconfig/conf.c25
-rw-r--r--scripts/kconfig/confdata.c2
-rw-r--r--scripts/kconfig/zconf.l4
-rw-r--r--scripts/mod/modpost.c58
-rwxr-xr-xscripts/setlocalversion2
-rw-r--r--scripts/unifdef.c4
-rw-r--r--security/Kconfig81
-rw-r--r--security/Makefile2
-rw-r--r--security/chromiumos/Kconfig3
-rw-r--r--security/chromiumos/alt-syscall.c1280
-rw-r--r--security/chromiumos/alt-syscall.h386
-rw-r--r--security/chromiumos/android_whitelists.h727
-rw-r--r--security/chromiumos/complete_whitelists.h402
-rw-r--r--security/chromiumos/inode_mark.c16
-rw-r--r--security/chromiumos/inode_mark.h2
-rw-r--r--security/chromiumos/lsm.c599
-rw-r--r--security/chromiumos/process_management.h38
-rw-r--r--security/chromiumos/read_write_test_whitelists.h56
-rw-r--r--security/chromiumos/securityfs.c160
-rw-r--r--security/chromiumos/third_party_whitelists.h261
-rw-r--r--security/chromiumos/utils.c3
-rw-r--r--security/integrity/ima/ima_appraise.c8
-rw-r--r--security/integrity/ima/ima_fs.c6
-rw-r--r--security/keys/key.c4
-rw-r--r--security/keys/keyring.c4
-rw-r--r--security/keys/proc.c11
-rw-r--r--security/keys/process_keys.c7
-rw-r--r--security/keys/request_key.c1
-rw-r--r--security/keys/request_key_auth.c2
-rw-r--r--security/lsm_audit.c10
-rw-r--r--security/security.c7
-rw-r--r--security/selinux/avc.c14
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/selinux/ss/services.c2
-rw-r--r--security/smack/smack_lsm.c13
-rw-r--r--security/tomoyo/domain.c3
-rw-r--r--security/yama/yama_lsm.c4
-rw-r--r--sound/aoa/core/gpio-feature.c4
-rw-r--r--sound/core/compress_offload.c16
-rw-r--r--sound/core/control.c80
-rw-r--r--sound/core/memalloc.c8
-rw-r--r--sound/core/oss/pcm_oss.c43
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_lib.c16
-rw-r--r--sound/core/pcm_native.c29
-rw-r--r--sound/core/rawmidi.c22
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c7
-rw-r--r--sound/core/seq/seq_virmidi.c10
-rw-r--r--sound/core/timer.c14
-rw-r--r--sound/firewire/bebob/bebob.c16
-rw-r--r--sound/firewire/bebob/bebob_maudio.c24
-rw-r--r--sound/hda/hdac_controller.c8
-rw-r--r--sound/isa/msnd/msnd_pinnacle.c4
-rw-r--r--sound/isa/wavefront/wavefront_synth.c9
-rw-r--r--sound/isa/wss/wss_lib.c2
-rw-r--r--sound/pci/ac97/ac97_codec.c2
-rw-r--r--sound/pci/ca0106/ca0106.h2
-rw-r--r--sound/pci/cs46xx/dsp_spos.c3
-rw-r--r--sound/pci/cs5535audio/cs5535audio.h6
-rw-r--r--sound/pci/cs5535audio/cs5535audio_pcm.c4
-rw-r--r--sound/pci/emu10k1/emufx.c7
-rw-r--r--sound/pci/emu10k1/emupcm.c4
-rw-r--r--sound/pci/emu10k1/memory.c6
-rw-r--r--sound/pci/fm801.c16
-rw-r--r--sound/pci/hda/hda_bind.c3
-rw-r--r--sound/pci/hda/hda_codec.c60
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/hda_controller.c4
-rw-r--r--sound/pci/hda/hda_controller.h1
-rw-r--r--sound/pci/hda/hda_intel.c22
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c8
-rw-r--r--sound/pci/hda/patch_conexant.c9
-rw-r--r--sound/pci/hda/patch_realtek.c18
-rw-r--r--sound/pci/rme9652/hdsp.c10
-rw-r--r--sound/pci/trident/trident.c2
-rw-r--r--sound/pci/vx222/vx222_ops.c8
-rw-r--r--sound/pcmcia/vx/vxp_ops.c10
-rw-r--r--sound/soc/cirrus/edb93xx.c2
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c26
-rw-r--r--sound/soc/cirrus/snappercl15.c2
-rw-r--r--sound/soc/codecs/Kconfig5
-rw-r--r--sound/soc/codecs/Makefile2
-rw-r--r--sound/soc/codecs/ak4613.c1
-rw-r--r--sound/soc/codecs/cs4265.c4
-rw-r--r--sound/soc/codecs/da7219-aad.c5
-rw-r--r--sound/soc/codecs/da7219.c44
-rw-r--r--sound/soc/codecs/da7219.h8
-rw-r--r--sound/soc/codecs/dmic.c35
-rw-r--r--sound/soc/codecs/max98373.c980
-rw-r--r--sound/soc/codecs/max98373.h213
-rw-r--r--sound/soc/codecs/rt5514.c6
-rw-r--r--sound/soc/codecs/rt5663.c7
-rw-r--r--sound/soc/codecs/sigmadsp.c3
-rw-r--r--sound/soc/codecs/wm8804-i2c.c15
-rw-r--r--sound/soc/codecs/wm8940.c1
-rw-r--r--sound/soc/codecs/wm8994.c1
-rw-r--r--sound/soc/fsl/Kconfig2
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/fsl/imx-audmux.c24
-rw-r--r--sound/soc/intel/Kconfig30
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c8
-rw-r--r--sound/soc/intel/atom/sst/sst_loader.c8
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-pcm.c3
-rw-r--r--sound/soc/intel/boards/Makefile4
-rw-r--r--sound/soc/intel/boards/broadwell.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c56
-rw-r--r--sound/soc/intel/boards/haswell.c2
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98357a.c20
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98373.c960
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c1026
-rw-r--r--sound/soc/intel/common/sst-firmware.c2
-rw-r--r--sound/soc/intel/skylake/skl.c34
-rw-r--r--sound/soc/omap/omap-dmic.c9
-rw-r--r--sound/soc/omap/omap-mcpdm.c43
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c1
-rw-r--r--sound/soc/pxa/mmp-pcm.c1
-rw-r--r--sound/soc/pxa/mmp-sspa.c1
-rw-r--r--sound/soc/pxa/palm27x.c1
-rw-r--r--sound/soc/pxa/pxa-ssp.c1
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c1
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c1
-rw-r--r--sound/soc/rockchip/rk3399_gru_sound.c19
-rw-r--r--sound/soc/sirf/sirf-usp.c7
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c19
-rw-r--r--sound/soc/soc-pcm.c14
-rw-r--r--sound/soc/soc-topology.c8
-rw-r--r--sound/soc/spear/spdif_in.c6
-rw-r--r--sound/sparc/cs4231.c8
-rw-r--r--sound/synth/emux/emux_hwdep.c7
-rw-r--r--sound/usb/card.c5
-rw-r--r--sound/usb/mixer.c225
-rw-r--r--sound/usb/mixer.h1
-rw-r--r--sound/usb/mixer_quirks.c23
-rw-r--r--sound/usb/pcm.c11
-rw-r--r--sound/usb/quirks-table.h6
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h336
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h65
-rw-r--r--tools/arch/x86/include/asm/required-features.h106
-rw-r--r--tools/arch/x86/include/asm/unistd_32.h9
-rw-r--r--tools/arch/x86/include/asm/unistd_64.h9
-rw-r--r--tools/arch/x86/lib/memcpy_64.S179
-rw-r--r--tools/arch/x86/lib/memset_64.S138
-rw-r--r--tools/build/Build.include9
-rw-r--r--tools/hv/hv_kvp_daemon.c2
-rw-r--r--tools/include/asm/alternative-asm.h (renamed from tools/perf/util/include/asm/alternative-asm.h)4
-rw-r--r--tools/perf/MANIFEST8
-rw-r--r--tools/perf/Makefile4
-rw-r--r--tools/perf/Makefile.perf15
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c12
-rw-r--r--tools/perf/arch/powerpc/util/sym-handling.c4
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c11
-rw-r--r--tools/perf/arch/x86/util/kvm-stat.c2
-rw-r--r--tools/perf/bench/mem-memcpy-x86-64-asm.S2
-rw-r--r--tools/perf/bench/mem-memset-x86-64-asm.S2
-rw-r--r--tools/perf/config/Makefile1
-rw-r--r--tools/perf/perf-sys.h18
-rw-r--r--tools/perf/perf.h2
-rw-r--r--tools/perf/scripts/python/export-to-postgresql.py9
-rw-r--r--tools/perf/tests/evsel-tp-sched.c2
-rw-r--r--tools/perf/tests/topology.c1
-rw-r--r--tools/perf/util/auxtrace.c7
-rw-r--r--tools/perf/util/auxtrace.h3
-rw-r--r--tools/perf/util/cpumap.c11
-rw-r--r--tools/perf/util/dso.c2
-rw-r--r--tools/perf/util/include/asm/unistd_32.h1
-rw-r--r--tools/perf/util/include/asm/unistd_64.h1
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.c80
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-decoder.h9
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c2
-rw-r--r--tools/perf/util/intel-pt.c5
-rw-r--r--tools/perf/util/llvm-utils.c6
-rw-r--r--tools/perf/util/parse-events.c2
-rw-r--r--tools/perf/util/pmu.c8
-rw-r--r--tools/perf/util/svghelper.c2
-rw-r--r--tools/perf/util/symbol-elf.c9
-rw-r--r--tools/perf/util/trace-event-info.c2
-rw-r--r--tools/perf/util/trace-event-read.c5
-rw-r--r--tools/perf/util/unwind-libdw.c4
-rw-r--r--tools/power/x86/turbostat/turbostat.c8
-rw-r--r--tools/scripts/Makefile.include2
-rw-r--r--tools/testing/selftests/efivarfs/config1
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc80
-rw-r--r--tools/testing/selftests/networking/timestamping/.gitignore (renamed from Documentation/networking/timestamping/.gitignore)0
-rw-r--r--tools/testing/selftests/networking/timestamping/Makefile8
-rw-r--r--tools/testing/selftests/networking/timestamping/hwtstamp_config.c (renamed from Documentation/networking/timestamping/hwtstamp_config.c)0
-rw-r--r--tools/testing/selftests/networking/timestamping/timestamping.c (renamed from Documentation/networking/timestamping/timestamping.c)0
-rw-r--r--tools/testing/selftests/networking/timestamping/txtimestamp.c (renamed from Documentation/networking/timestamping/txtimestamp.c)0
-rw-r--r--tools/testing/selftests/powerpc/harness.c18
-rwxr-xr-xtools/testing/selftests/pstore/pstore_post_reboot_tests5
-rw-r--r--tools/testing/selftests/seccomp/seccomp_bpf.c98
-rwxr-xr-xtools/testing/selftests/static_keys/test_static_keys.sh13
-rw-r--r--tools/testing/selftests/sync/config4
-rw-r--r--tools/testing/selftests/timers/raw_skew.c5
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh7
-rw-r--r--tools/testing/selftests/x86/sigreturn.c46
-rwxr-xr-xtools/testing/selftests/zram/zram.sh5
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh5
-rw-r--r--tools/usb/ffs-test.c19
-rw-r--r--tools/usb/usbip/src/usbip_detach.c9
-rw-r--r--tools/vm/page-types.c6
-rw-r--r--tools/vm/slabinfo.c4
-rw-r--r--virt/kvm/arm/vgic.c7
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/eventfd.c17
-rw-r--r--virt/kvm/kvm_main.c17
3293 files changed, 318030 insertions, 31641 deletions
diff --git a/COMMIT-QUEUE.ini b/COMMIT-QUEUE.ini
index 883502401e60be..99c67f12c780b7 100644
--- a/COMMIT-QUEUE.ini
+++ b/COMMIT-QUEUE.ini
@@ -7,8 +7,9 @@
[GENERAL]
-pre-cq-configs: amd64-generic-pre-cq
- arm-generic-pre-cq
- gru-pre-cq
+pre-cq-configs: amd64-generic-v4_4-pre-cq
+ arm-generic-v4_4-pre-cq
+ arm64-generic-v4_4-pre-cq
+ kevin-pre-cq
reef-pre-cq
-subsystem:all
+ fizz-no-vmtest-pre-cq
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
new file mode 100644
index 00000000000000..4b0318c99507f4
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
@@ -0,0 +1,122 @@
+==========================
+PCIe Device AER statistics
+==========================
+These attributes show up under all the devices that are AER capable. These
+statistical counters indicate the errors "as seen/reported by the device".
+Note that this may mean that if an endpoint is causing problems, the AER
+counters may increment at its link partner (e.g. root port) because the
+errors may be "seen" / reported by the link partner and not the
+problematic endpoint itself (which may report all counters as 0 as it never
+saw any problems).
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_correctable
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of correctable errors seen and reported by this
+ PCI device using ERR_COR. Note that since multiple errors may
+ be reported using a single ERR_COR message, thus
+ TOTAL_ERR_COR at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_correctable
+Receiver Error 2
+Bad TLP 0
+Bad DLLP 0
+RELAY_NUM Rollover 0
+Replay Timer Timeout 0
+Advisory Non-Fatal 0
+Corrected Internal Error 0
+Header Log Overflow 0
+TOTAL_ERR_COR 2
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable fatal errors seen and reported by this
+ PCI device using ERR_FATAL. Note that since multiple errors may
+ be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_FATAL at the end of the file may not match the actual
+ total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_fatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_FATAL 0
+-------------------------------------------------------------------------
+
+Where: /sys/bus/pci/devices/<dev>/aer_dev_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: List of uncorrectable nonfatal errors seen and reported by this
+ PCI device using ERR_NONFATAL. Note that since multiple errors
+ may be reported using a single ERR_FATAL message, thus
+ TOTAL_ERR_NONFATAL at the end of the file may not match the
+ actual total of all the errors in the file. Sample output:
+-------------------------------------------------------------------------
+localhost /sys/devices/pci0000:00/0000:00:1c.0 # cat aer_dev_nonfatal
+Undefined 0
+Data Link Protocol 0
+Surprise Down Error 0
+Poisoned TLP 0
+Flow Control Protocol 0
+Completion Timeout 0
+Completer Abort 0
+Unexpected Completion 0
+Receiver Overflow 0
+Malformed TLP 0
+ECRC 0
+Unsupported Request 0
+ACS Violation 0
+Uncorrectable Internal Error 0
+MC Blocked TLP 0
+AtomicOp Egress Blocked 0
+TLP Prefix Blocked Error 0
+TOTAL_ERR_NONFATAL 0
+-------------------------------------------------------------------------
+
+============================
+PCIe Rootport AER statistics
+============================
+These attributes show up under only the rootports (or root complex event
+collectors) that are AER capable. These indicate the number of error messages as
+"reported to" the rootport. Please note that the rootports also transmit
+(internally) the ERR_* messages for errors seen by the internal rootport PCI
+device, so these counters include them and are thus cumulative of all the error
+messages on the PCI hierarchy originating at that root port.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_cor
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_COR messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_fatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_FATAL messages reported to rootport.
+
+Where: /sys/bus/pci/devices/<dev>/aer_stats/aer_rootport_total_err_nonfatal
+Date: July 2018
+Kernel Version: 4.19.0
+Contact: linux-pci@vger.kernel.org, rajatja@google.com
+Description: Total number of ERR_NONFATAL messages reported to rootport.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 48fd30fa00d29a..8677f9c041a36c 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -189,6 +189,23 @@ Description:
The file will read "hotplug", "wired" and "not used" if the
information is available, and "unknown" otherwise.
+What: /sys/bus/usb/devices/.../(hub interface)/portX/over_current_count
+Date: February 2018
+Contact: Richard Leitner <richard.leitner@skidata.com>
+Description:
+ Most hubs are able to detect over-current situations on their
+ ports and report them to the kernel. This attribute is to expose
+ the number of over-current situation occurred on a specific port
+ to user space. This file will contain an unsigned 32 bit value
+ which wraps to 0 after its maximum is reached. This file supports
+ poll() for monitoring changes to this value in user space.
+
+ Any time this value changes the corresponding hub device will send a
+ udev event with the following attributes:
+
+ OVER_CURRENT_PORT=/sys/bus/usb/devices/.../(hub interface)/portX
+ OVER_CURRENT_COUNT=[current value of this sysfs attribute]
+
What: /sys/bus/usb/devices/.../(hub interface)/portX/quirks
Date: May 2018
Contact: Nicolas Boichat <drinkcat@chromium.org>
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index ea6a043f5beb8c..50f95689ab387f 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -276,6 +276,7 @@ What: /sys/devices/system/cpu/vulnerabilities
/sys/devices/system/cpu/vulnerabilities/meltdown
/sys/devices/system/cpu/vulnerabilities/spectre_v1
/sys/devices/system/cpu/vulnerabilities/spectre_v2
+ /sys/devices/system/cpu/vulnerabilities/spec_store_bypass
Date: January 2018
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
Description: Information about CPU vulnerabilities
diff --git a/Documentation/Changes b/Documentation/Changes
index ec97b77c8b007c..f25649ffb89296 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -25,7 +25,7 @@ o GNU C 3.2 # gcc --version
o GNU make 3.80 # make --version
o binutils 2.12 # ld -v
o util-linux 2.10o # fdformat --version
-o module-init-tools 0.9.10 # depmod -V
+o kmod 13 # depmod -V
o e2fsprogs 1.41.4 # e2fsck -V
o jfsutils 1.1.3 # fsck.jfs -V
o reiserfsprogs 3.6.3 # reiserfsck -V
@@ -132,12 +132,6 @@ is not build with CONFIG_KALLSYMS and you have no way to rebuild and
reproduce the Oops with that option, then you can still decode that Oops
with ksymoops.
-Module-Init-Tools
------------------
-
-A new module loader is now in the kernel that requires module-init-tools
-to use. It is backward compatible with the 2.4.x series kernels.
-
Mkinitrd
--------
@@ -319,14 +313,15 @@ Util-linux
----------
o <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>
+Kmod
+----
+o <https://www.kernel.org/pub/linux/utils/kernel/kmod/>
+o <https://git.kernel.org/pub/scm/utils/kernel/kmod/kmod.git>
+
Ksymoops
--------
o <ftp://ftp.kernel.org/pub/linux/utils/kernel/ksymoops/v2.4/>
-Module-Init-Tools
------------------
-o <ftp://ftp.kernel.org/pub/linux/kernel/people/rusty/modules/>
-
Mkinitrd
--------
o <https://code.launchpad.net/initrd-tools/main>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index a62ea7bab20614..6c8fa0318949b4 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -4336,7 +4336,7 @@ interface and may change in the future.</para>
<table pgwide="1" frame="none" id="flash-control-id">
<title>Flash Control IDs</title>
-
+
<tgroup cols="4">
<colspec colname="c1" colwidth="1*" />
<colspec colname="c2" colwidth="6*" />
@@ -4731,6 +4731,24 @@ interface and may change in the future.</para>
</tbody>
</entrytbl>
</row>
+ <row id="jpeg-luminance-quantization-matrix-control">
+ <entry spanname="id"><constant>V4L2_CID_JPEG_LUMA_QUANTIZATION</constant></entry>
+ <entry>__u8 matrix</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Application can configure the luminance quantization table
+ directly through V4L2_CID_JPEG_LUMA_QUANTIZATION control.
+ </entry>
+ </row>
+ <row id="jpeg-chrominance-quantization-matrix-control">
+ <entry spanname="id"><constant>V4L2_CID_JPEG_CHROMA_QUANTIZATION</constant></entry>
+ <entry>__u8 matrix</entry>
+ </row>
+ <row>
+ <entry spanname="descr">Application can configure the chrominance quantization table
+ directly through V4L2_CID_JPEG_CHROMA_QUANTIZATION control.
+ </entry>
+ </row>
<row><entry></entry></row>
</tbody>
</tgroup>
diff --git a/Documentation/Makefile b/Documentation/Makefile
index fc759598c4c9db..59d516b7afcb2f 100644
--- a/Documentation/Makefile
+++ b/Documentation/Makefile
@@ -1,4 +1,3 @@
subdir-y := accounting auxdisplay blackfin connector \
filesystems filesystems ia64 laptops misc-devices \
- networking pcmcia prctl ptp spi timers vDSO video4linux \
- watchdog
+ pcmcia prctl ptp spi timers vDSO video4linux watchdog
diff --git a/Documentation/PCI/pcieaer-howto.txt b/Documentation/PCI/pcieaer-howto.txt
index b4987c0bcb20fd..1c11ccbbbd70c7 100644
--- a/Documentation/PCI/pcieaer-howto.txt
+++ b/Documentation/PCI/pcieaer-howto.txt
@@ -81,6 +81,11 @@ In the example, 'Requester ID' means the ID of the device who sends
the error message to root port. Pls. refer to pci express specs for
other fields.
+2.4 AER Statistics / Counters
+
+When PCIe AER errors are captured, the counters / statistics are also exposed
+in the form of sysfs attributes which are documented at
+Documentation/ABI/testing/sysfs-bus-pci-devices-aer_stats
3. Developer Guide
diff --git a/Documentation/acpi/ssdt-overlays.txt b/Documentation/acpi/ssdt-overlays.txt
new file mode 100644
index 00000000000000..80502594024868
--- /dev/null
+++ b/Documentation/acpi/ssdt-overlays.txt
@@ -0,0 +1,91 @@
+
+In order to support ACPI open-ended hardware configurations (e.g. development
+boards) we need a way to augment the ACPI configuration provided by the firmware
+image. A common example is connecting sensors on I2C / SPI buses on development
+boards.
+
+Although this can be accomplished by creating a kernel platform driver or
+recompiling the firmware image with updated ACPI tables, neither is practical:
+the former proliferates board specific kernel code while the latter requires
+access to firmware tools which are often not publicly available.
+
+Because ACPI supports external references in AML code a more practical
+way to augment firmware ACPI configuration is by dynamically loading
+user defined SSDT tables that contain the board specific information.
+
+For example, to enumerate a Bosch BMA222E accelerometer on the I2C bus of the
+Minnowboard MAX development board exposed via the LSE connector [1], the
+following ASL code can be used:
+
+DefinitionBlock ("minnowmax.aml", "SSDT", 1, "Vendor", "Accel", 0x00000003)
+{
+ External (\_SB.I2C6, DeviceObj)
+
+ Scope (\_SB.I2C6)
+ {
+ Device (STAC)
+ {
+ Name (_ADR, Zero)
+ Name (_HID, "BMA222E")
+
+ Method (_CRS, 0, Serialized)
+ {
+ Name (RBUF, ResourceTemplate ()
+ {
+ I2cSerialBus (0x0018, ControllerInitiated, 0x00061A80,
+ AddressingMode7Bit, "\\_SB.I2C6", 0x00,
+ ResourceConsumer, ,)
+ GpioInt (Edge, ActiveHigh, Exclusive, PullDown, 0x0000,
+ "\\_SB.GPO2", 0x00, ResourceConsumer, , )
+ { // Pin list
+ 0
+ }
+ })
+ Return (RBUF)
+ }
+ }
+ }
+}
+
+which can then be compiled to AML binary format:
+
+$ iasl minnowmax.asl
+
+Intel ACPI Component Architecture
+ASL Optimizing Compiler version 20140214-64 [Mar 29 2014]
+Copyright (c) 2000 - 2014 Intel Corporation
+
+ASL Input: minnomax.asl - 30 lines, 614 bytes, 7 keywords
+AML Output: minnowmax.aml - 165 bytes, 6 named objects, 1 executable opcodes
+
+[1] http://wiki.minnowboard.org/MinnowBoard_MAX#Low_Speed_Expansion_Connector_.28Top.29
+
+The resulting AML code can then be loaded by the kernel using one of the methods
+below.
+
+== Loading ACPI SSDTs from initrd ==
+
+This option allows loading of user defined SSDTs from initrd and it is useful
+when the system does not support EFI or when there is not enough EFI storage.
+
+It works in a similar way with initrd based ACPI tables override/upgrade: SSDT
+aml code must be placed in the first, uncompressed, initrd under the
+"kernel/firmware/acpi" path. Multiple files can be used and this will translate
+in loading multiple tables. Only SSDT and OEM tables are allowed. See
+initrd_table_override.txt for more details.
+
+Here is an example:
+
+# Add the raw ACPI tables to an uncompressed cpio archive.
+# They must be put into a /kernel/firmware/acpi directory inside the
+# cpio archive.
+# The uncompressed cpio archive must be the first.
+# Other, typically compressed cpio archives, must be
+# concatenated on top of the uncompressed one.
+mkdir -p kernel/firmware/acpi
+cp ssdt.aml kernel/firmware/acpi
+
+# Create the uncompressed cpio archive and concatenate the original initrd
+# on top:
+find kernel | cpio -H newc --create > /boot/instrumented_initrd
+cat /boot/initrd >>/boot/instrumented_initrd
diff --git a/Documentation/devicetree/bindings/iio/proximity/sx9310.txt b/Documentation/devicetree/bindings/iio/proximity/sx9310.txt
new file mode 100644
index 00000000000000..7ce0256cbc067f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/sx9310.txt
@@ -0,0 +1,61 @@
+Semtech's SX9310 capacitive proximity button device driver
+
+Required properties:
+ - compatible: must be "semtech,sx9310"
+ - reg: i2c address where to find the device
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic
+ interrupt client node bindings.
+
+Example:
+
+sx9310@28 {
+ compatible = "semtech,sx9310";
+ reg = <0x28>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+};
+
+Driver supports ACPI bindings. We can store registers configuration obtained
+from per-model SAR sensor calibration. Refer to the data sheet for meaning.
+
+ACPI name is STH9310.
+
+Example:
+
+ACPI:
+ device pci 15.1 on
+ chip drivers/i2c/sx9310
+ register "desc" = ""Left SAR Proximity Sensor""
+ register "irq" = "ACPI_IRQ_LEVEL_LOW(GPP_D9_IRQ)"
+ register "speed" = "I2C_SPEED_FAST_PLUS"
+ register "uid" = "1"
+ register "reg_prox_ctrl0" = "0x10"
+ register "reg_prox_ctrl1" = "0x00"
+ register "reg_prox_ctrl2" = "0x84"
+ register "reg_prox_ctrl3" = "0x0e"
+ register "reg_prox_ctrl4" = "0x07"
+ register "reg_prox_ctrl5" = "0xc6"
+ register "reg_prox_ctrl6" = "0x20"
+ register "reg_prox_ctrl7" = "0x0d"
+ register "reg_prox_ctrl8" = "0x8d"
+ register "reg_prox_ctrl9" = "0x43"
+ register "reg_prox_ctrl10" = "0x11"
+ register "reg_prox_ctrl11" = "0x00"
+ register "reg_prox_ctrl12" = "0x00"
+ register "reg_prox_ctrl13" = "0x00"
+ register "reg_prox_ctrl14" = "0x00"
+ register "reg_prox_ctrl15" = "0x00"
+ register "reg_prox_ctrl16" = "0x00"
+ register "reg_prox_ctrl17" = "0x00"
+ register "reg_prox_ctrl18" = "0x00"
+ register "reg_prox_ctrl19" = "0x00"
+ register "reg_sar_ctrl0" = "0x50"
+ register "reg_sar_ctrl1" = "0x8a"
+ register "reg_sar_ctrl2" = "0x3c"
+ device i2c 28 on end
+ end
+ end # I2C #1
+
diff --git a/Documentation/devicetree/bindings/iio/proximity/sx932x.txt b/Documentation/devicetree/bindings/iio/proximity/sx932x.txt
new file mode 100644
index 00000000000000..48c66386ea3af0
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/proximity/sx932x.txt
@@ -0,0 +1,34 @@
+Semtech's SX9320/SX9321 capacitive proximity button device driver
+
+Required properties:
+ - compatible: must be "semtech,sx9320 or sx9321"
+ - reg: i2c address where to find the device
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : the sole interrupt generated by the device
+
+ Refer to interrupt-controller/interrupts.txt for generic
+ interrupt client node bindings.
+
+Example:
+
+sx9310@28 {
+ compatible = "semtech,sx9320";
+ reg = <0x28>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <16 IRQ_TYPE_LEVEL_LOW>;
+};
+
+Driver supports ACPI bindings. We can store registers configuration obtained
+from per-model SAR sensor calibration. Refer to the data sheet for meaning.
+
+ACPI name is STH9320 or STH9321.
+
+Example:
+ chip drivers/i2c/generic
+ register "hid" = ""STH9321""
+ register "name" = ""SEMTECH SX9321""
+ register "desc" = ""SAR Proximity Sensor""
+ register "irq" = "ACPI_IRQ_LEVEL_LOW(GPP_A18_IRQ)"
+ register "device_present_gpio" = "GPP_B20"
+ device i2c 28 on end
+ end
diff --git a/Documentation/devicetree/bindings/media/i2c/ak7375.txt b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
new file mode 100644
index 00000000000000..aa3e24b4124104
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ak7375.txt
@@ -0,0 +1,8 @@
+Asahi Kasei Microdevices AK7375 voice coil lens driver
+
+AK7375 is a camera voice coil lens.
+
+Mandatory properties:
+
+- compatible: "asahi-kasei,ak7375"
+- reg: I2C slave address
diff --git a/Documentation/devicetree/bindings/misc/throttler.txt b/Documentation/devicetree/bindings/misc/throttler.txt
new file mode 100644
index 00000000000000..2ea80c62dbe15c
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/throttler.txt
@@ -0,0 +1,13 @@
+Throttler driver
+
+The Throttler is used for non-thermal throttling of system components like
+CPUs or devfreq devices.
+
+Required properties:
+- throttler-opps Array of OPP-v2 phandles with the OPPs used for
+ throttling.
+
+Example:
+ throttler {
+ throttler-opps = <&cpu0_opp03, &cpu1_opp02, &gpu_opp03>;
+ };
diff --git a/Documentation/devicetree/bindings/net/btusb.txt b/Documentation/devicetree/bindings/net/btusb.txt
index 01fa2d4188d4a9..228eb3a6b87828 100644
--- a/Documentation/devicetree/bindings/net/btusb.txt
+++ b/Documentation/devicetree/bindings/net/btusb.txt
@@ -9,6 +9,9 @@ Required properties:
(more may be added later) are:
"usb1286,204e" (Marvell 8997)
+ "usbcf3,e300" (Qualcomm QCA6174A)
+ "usb4ca,301a" (Qualcomm QCA6174A (Lite-On))
+
Also, vendors that use btusb may have device additional properties, e.g:
Documentation/devicetree/bindings/net/marvell-bt-8xxx.txt
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index b5d79761ac970a..410c044166e204 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -8,6 +8,7 @@ Required properties:
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index edefc26c62042b..01ed527886dce9 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -7,6 +7,9 @@ Required properties:
-compatible : Should be "qcom,ath10k"
Optional properties:
+- qcom,ath10k-calibration-variant: string to search for in the board-2.bin
+ variant list with the same bus and device
+ specific ids
- qcom,ath10k-calibration-data : calibration data as an array, the
length can vary between hw versions
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
index 960da7fcaa9e4d..40d5e7a67de413 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt
@@ -17,7 +17,11 @@ Required properties:
Optional properties:
- extcon : extcon specifier for the Power Delivery
-
+ - rockchip,phy-config : A list of voltage swing(mV) and pre-emphasis
+ (dB) pairs. They are 3 blocks of 4 entries and
+ correspond to s0p0 ~ s0p3, s1p0 ~ s1p3,
+ s2p0 ~ s2p3, s3p0 ~ s2p3 swing and pre-emphasis
+ values.
Required nodes : a sub-node is required for each port the phy provides.
The sub-node name is used to identify dp or usb3 port,
and shall be the following entries:
@@ -50,6 +54,21 @@ Example:
<&cru SRST_P_UPHY0_TCPHY>;
reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,phy-config = <0x2a 0x00>,
+ <0x1f 0x15>,
+ <0x14 0x22>,
+ <0x02 0x2b>,
+
+ <0x21 0x00>,
+ <0x12 0x15>,
+ <0x02 0x22>,
+ <0 0>,
+
+ <0x15 0x00>,
+ <0x00 0x15>,
+ <0 0>,
+ <0 0>;
+
tcphy0_dp: dp-port {
#phy-cells = <0>;
};
@@ -74,6 +93,21 @@ Example:
<&cru SRST_P_UPHY1_TCPHY>;
reset-names = "uphy", "uphy-pipe", "uphy-tcphy";
+ rockchip,phy-config = <0x2a 0x00>,
+ <0x1f 0x15>,
+ <0x14 0x22>,
+ <0x02 0x2b>,
+
+ <0x21 0x00>,
+ <0x12 0x15>,
+ <0x02 0x22>,
+ <0 0>,
+
+ <0x15 0x00>,
+ <0x00 0x15>,
+ <0 0>,
+ <0 0>;
+
tcphy1_dp: dp-port {
#phy-cells = <0>;
};
diff --git a/Documentation/devicetree/bindings/sound/dmic.txt b/Documentation/devicetree/bindings/sound/dmic.txt
index e957b41367160b..32e8710372696d 100644
--- a/Documentation/devicetree/bindings/sound/dmic.txt
+++ b/Documentation/devicetree/bindings/sound/dmic.txt
@@ -9,6 +9,7 @@ Optional properties:
- dmicen-gpios: GPIO specifier for dmic to control start and stop
- num-channels: Number of microphones on this DAI
- wakeup-delay-ms: Delay (in ms) after enabling the DMIC
+ - modeswitch-delay-ms: Delay (in ms) to complete DMIC mode switch
Example node:
@@ -17,4 +18,5 @@ Example node:
dmicen-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>;
num-channels = <1>;
wakeup-delay-ms <50>;
+ modeswitch-delay-ms <35>;
};
diff --git a/Documentation/devicetree/bindings/sound/max98373.txt b/Documentation/devicetree/bindings/sound/max98373.txt
new file mode 100644
index 00000000000000..456cb1c59353d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/max98373.txt
@@ -0,0 +1,40 @@
+Maxim Integrated MAX98373 Speaker Amplifier
+
+This device supports I2C.
+
+Required properties:
+
+ - compatible : "maxim,max98373"
+
+ - reg : the I2C address of the device.
+
+Optional properties:
+
+ - maxim,vmon-slot-no : slot number used to send voltage information
+ or in inteleave mode this will be used as
+ interleave slot.
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,imon-slot-no : slot number used to send current information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,spkfb-slot-no : slot number used to send speaker feedback information
+ slot range : 0 ~ 15, Default : 0
+
+ - maxim,interleave-mode : For cases where a single combined channel
+ for the I/V sense data is not sufficient, the device can also be configured
+ to share a single data output channel on alternating frames.
+ In this configuration, the current and voltage data will be frame interleaved
+ on a single output channel.
+ Boolean, define to enable the interleave mode, Default : false
+
+Example:
+
+codec: max98373@31 {
+ compatible = "maxim,max98373";
+ reg = <0x31>;
+ maxim,vmon-slot-no = <0>;
+ maxim,imon-slot-no = <1>;
+ maxim,spkfb-slot-no = <2>;
+ maxim,interleave-mode;
+};
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 21e8bb5bcfed1f..22ddb43ee91bce 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -471,7 +471,9 @@ manner. The codes are the following:
Note that there is no guarantee that every flag and associated mnemonic will
be present in all further kernel releases. Things get changed, the flags may
-be vanished or the reverse -- new added.
+be vanished or the reverse -- new added. Interpretation of their meaning
+might change in future as well. So each consumer of these flags has to
+follow each specific kernel version for the exact semantic.
The "Name" field will only be present on a mapping that has been named by
userspace, and will show the name passed in by userspace.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index cfd31d94c87272..f8bf14055c2f38 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@ Supported chips:
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
Description
-----------
diff --git a/Documentation/input/event-codes.txt b/Documentation/input/event-codes.txt
index 3f0f5ce3338b63..0bbe24c5ed6c36 100644
--- a/Documentation/input/event-codes.txt
+++ b/Documentation/input/event-codes.txt
@@ -158,7 +158,26 @@ A few EV_REL codes have special meanings:
* REL_WHEEL, REL_HWHEEL:
- These codes are used for vertical and horizontal scroll wheels,
- respectively.
+ respectively. The value is the number of detents moved on the wheel, the
+ physical size of which varies by device. For high-resolution wheels
+ this may be an approximation based on the high-resolution scroll events,
+ see REL_WHEEL_HI_RES. These event codes are legacy codes and
+ REL_WHEEL_HI_RES and REL_HWHEEL_HI_RES should be preferred where
+ available.
+
+* REL_WHEEL_HI_RES, REL_HWHEEL_HI_RES:
+
+ - High-resolution scroll wheel data. The accumulated value 120 represents
+ movement by one detent. For devices that do not provide high-resolution
+ scrolling, the value is always a multiple of 120. For devices with
+ high-resolution scrolling, the value may be a fraction of 120.
+
+ If a vertical scroll wheel supports high-resolution scrolling, this code
+ will be emitted in addition to REL_WHEEL or REL_HWHEEL. The REL_WHEEL
+ and REL_HWHEEL may be an approximation based on the high-resolution
+ scroll events. There is no guarantee that the high-resolution data
+ is a multiple of 120 at the time of an emulated REL_WHEEL or REL_HWHEEL
+ event.
EV_ABS:
----------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7745ac6ac52d24..7d5fbed97741f4 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -653,7 +653,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
clearcpuid=BITNUM [X86]
Disable CPUID feature X for the kernel. See
- arch/x86/include/asm/cpufeature.h for the valid bit
+ arch/x86/include/asm/cpufeatures.h for the valid bit
numbers. Note the Linux specific bits are not necessarily
stable over kernel options, but the vendor specific
ones should be.
@@ -972,11 +972,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
See Documentation/x86/intel_mpx.txt for more
information about the feature.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
module.async_probe [KNL]
Enable asynchronous probe on this module.
@@ -2471,6 +2466,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
allow data leaks with this option, which is equivalent
to spectre_v2=off.
+ nospec_store_bypass_disable
+ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+
noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state.
@@ -3466,6 +3464,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
debug-uart get routed to the D+ and D- pins of the usb
port and the regular usb controller gets disabled.
+ rodata= [KNL]
+ on Mark read-only kernel memory as read-only (default).
+ off Leave read-only kernel memory writable for debugging.
+
root= [KNL] Root filesystem
See name_to_dev_t comment in init/do_mounts.c.
@@ -3644,6 +3646,48 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Not specifying this option is equivalent to
spectre_v2=auto.
+ spec_store_bypass_disable=
+ [HW] Control Speculative Store Bypass (SSB) Disable mitigation
+ (Speculative Store Bypass vulnerability)
+
+ Certain CPUs are vulnerable to an exploit against a
+ a common industry wide performance optimization known
+ as "Speculative Store Bypass" in which recent stores
+ to the same memory location may not be observed by
+ later loads during speculative execution. The idea
+ is that such stores are unlikely and that they can
+ be detected prior to instruction retirement at the
+ end of a particular speculation execution window.
+
+ In vulnerable processors, the speculatively forwarded
+ store can be used in a cache side channel attack, for
+ example to read memory to which the attacker does not
+ directly have access (e.g. inside sandboxed code).
+
+ This parameter controls whether the Speculative Store
+ Bypass optimization is used.
+
+ on - Unconditionally disable Speculative Store Bypass
+ off - Unconditionally enable Speculative Store Bypass
+ auto - Kernel detects whether the CPU model contains an
+ implementation of Speculative Store Bypass and
+ picks the most appropriate mitigation. If the
+ CPU is not vulnerable, "off" is selected. If the
+ CPU is vulnerable the default mitigation is
+ architecture and Kconfig dependent. See below.
+ prctl - Control Speculative Store Bypass per thread
+ via prctl. Speculative Store Bypass is enabled
+ for a process by default. The state of the control
+ is inherited on fork.
+ seccomp - Same as "prctl" above, but all seccomp threads
+ will disable SSB unless they explicitly opt out.
+
+ Not specifying this option is equivalent to
+ spec_store_bypass_disable=auto.
+
+ Default mitigations:
+ X86: If CONFIG_SECCOMP=y "seccomp", otherwise "prctl"
+
spia_io_base= [HW,MTD]
spia_fio_base=
spia_pedr=
@@ -3949,7 +3993,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
usbcore.authorized_default=
[USB] Default USB device authorization:
(default -1 = authorized except for wireless USB,
- 0 = not authorized, 1 = authorized)
+ 0 = not authorized, 1 = authorized, 2 = authorized
+ if device connected to internal port)
usbcore.autosuspend=
[USB] The autosuspend time delay (in seconds) used
@@ -3981,6 +4026,62 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
USB_REQ_GET_DESCRIPTOR request in milliseconds
(default 5000 = 5.0 seconds).
+ usbcore.quirks=
+ [USB] A list of quirk entries to augment the built-in
+ usb core quirk list. List entries are separated by
+ commas. Each entry has the form
+ VendorID:ProductID:Flags. The IDs are 4-digit hex
+ numbers and Flags is a set of letters. Each letter
+ will change the built-in quirk; setting it if it is
+ clear and clearing it if it is set. The letters have
+ the following meanings:
+ a = USB_QUIRK_STRING_FETCH_255 (string
+ descriptors must not be fetched using
+ a 255-byte read);
+ b = USB_QUIRK_RESET_RESUME (device can't resume
+ correctly so reset it instead);
+ c = USB_QUIRK_NO_SET_INTF (device can't handle
+ Set-Interface requests);
+ d = USB_QUIRK_CONFIG_INTF_STRINGS (device can't
+ handle its Configuration or Interface
+ strings);
+ e = USB_QUIRK_RESET (device can't be reset
+ (e.g morph devices), don't use reset);
+ f = USB_QUIRK_HONOR_BNUMINTERFACES (device has
+ more interface descriptions than the
+ bNumInterfaces count, and can't handle
+ talking to these interfaces);
+ g = USB_QUIRK_DELAY_INIT (device needs a pause
+ during initialization, after we read
+ the device descriptor);
+ h = USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL (For
+ high speed and super speed interrupt
+ endpoints, the USB 2.0 and USB 3.0 spec
+ require the interval in microframes (1
+ microframe = 125 microseconds) to be
+ calculated as interval = 2 ^
+ (bInterval-1).
+ Devices with this quirk report their
+ bInterval as the result of this
+ calculation instead of the exponent
+ variable used in the calculation);
+ i = USB_QUIRK_DEVICE_QUALIFIER (device can't
+ handle device_qualifier descriptor
+ requests);
+ j = USB_QUIRK_IGNORE_REMOTE_WAKEUP (device
+ generates spurious wakeup, ignore
+ remote wakeup capability);
+ k = USB_QUIRK_NO_LPM (device can't handle Link
+ Power Management);
+ l = USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL
+ (Device reports its bInterval as linear
+ frames instead of the USB 2.0
+ calculation);
+ m = USB_QUIRK_DISCONNECT_SUSPEND (Device needs
+ to be disconnected before suspend to
+ prevent spurious wakeup)
+ Example: quirks=0781:5580:bk,0a5c:5834:gij
+
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
diff --git a/Documentation/lzo.txt b/Documentation/lzo.txt
index ea45dd3901e3bf..1a0f946d7b8c79 100644
--- a/Documentation/lzo.txt
+++ b/Documentation/lzo.txt
@@ -73,15 +73,29 @@ Description
They just have to "refill" this credit if they consume extra bytes. This is
an implementation design choice independant on the algorithm or encoding.
+Versions
+
+0: Original version
+1: LZO-RLE
+
+Version 1 of LZO implements an extension to encode runs of zeros using run
+length encoding. This improves speed for data with many zeros, which is a
+common case for zram. This modifies the bitstream in a backwards compatible way
+(v1 can correctly decompress v0 compressed data, but v0 cannot read v1 data).
+
Byte sequences
First byte encoding :
- 0..17 : follow regular instruction encoding, see below. It is worth
- noting that codes 16 and 17 will represent a block copy from
- the dictionary which is empty, and that they will always be
+ 0..16 : follow regular instruction encoding, see below. It is worth
+ noting that code 16 will represent a block copy from the
+ dictionary which is empty, and that it will always be
invalid at this place.
+ 17 : bitstream version. If the first byte is 17, the next byte
+ gives the bitstream version. If the first byte is not 17,
+ the bitstream version is 0.
+
18..21 : copy 0..3 literals
state = (byte - 17) = 0..3 [ copy <state> literals ]
skip byte
@@ -134,6 +148,11 @@ Byte sequences
state = S (copy S literals after this block)
End of stream is reached if distance == 16384
+ In version 1, this instruction is also used to encode a run of zeros if
+ distance = 0xbfff, i.e. H = 1 and the D bits are all 1.
+ In this case, it is followed by a fourth byte, X.
+ run length = ((X << 3) | (0 0 0 0 0 L L L)) + 4.
+
0 0 1 L L L L L (32..63)
Copy of small block within 16kB distance (preferably less than 34B)
length = 2 + (L ?: 31 + (zero_bytes * 255) + non_zero_byte)
@@ -158,7 +177,9 @@ Byte sequences
Authors
This document was written by Willy Tarreau <w@1wt.eu> on 2014/07/19 during an
- analysis of the decompression code available in Linux 3.16-rc5. The code is
- tricky, it is possible that this document contains mistakes or that a few
- corner cases were overlooked. In any case, please report any doubt, fix, or
- proposed updates to the author(s) so that the document can be updated.
+ analysis of the decompression code available in Linux 3.16-rc5, and updated
+ by Dave Rodgman <dave.rodgman@arm.com> on 2018/10/30 to introduce run-length
+ encoding. The code is tricky, it is possible that this document contains
+ mistakes or that a few corner cases were overlooked. In any case, please
+ report any doubt, fix, or proposed updates to the author(s) so that the
+ document can be updated.
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
deleted file mode 100644
index 4c5d7c48543956..00000000000000
--- a/Documentation/networking/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-subdir-y := timestamping
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 5f1ea84ed72b18..c3e905163637a0 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -112,14 +112,11 @@ min_adv_mss - INTEGER
IP Fragmentation:
-ipfrag_high_thresh - INTEGER
- Maximum memory used to reassemble IP fragments. When
- ipfrag_high_thresh bytes of memory is allocated for this purpose,
- the fragment handler will toss packets until ipfrag_low_thresh
- is reached. This also serves as a maximum limit to namespaces
- different from the initial one.
-
-ipfrag_low_thresh - INTEGER
+ipfrag_high_thresh - LONG INTEGER
+ Maximum memory used to reassemble IP fragments.
+
+ipfrag_low_thresh - LONG INTEGER
+ (Obsolete since linux-4.4.174, backported from linux-4.17)
Maximum memory used to reassemble IP fragments before the kernel
begins to remove incomplete fragment queues to free up resources.
The kernel still accepts new fragments for defragmentation.
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
index 0fe1c6e0dbcd58..bfc6b3e68cc406 100644
--- a/Documentation/networking/netdev-FAQ.txt
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -168,6 +168,15 @@ A: No. See above answer. In short, if you think it really belongs in
dash marker line as described in Documentation/SubmittingPatches to
temporarily embed that information into the patch that you send.
+Q: Are all networking bug fixes backported to all stable releases?
+
+A: Due to capacity, Dave could only take care of the backports for the last
+ 2 stable releases. For earlier stable releases, each stable branch maintainer
+ is supposed to take care of them. If you find any patch is missing from an
+ earlier stable branch, please notify stable@vger.kernel.org with either a
+ commit ID or a formal patch backported, and CC Dave and other relevant
+ networking developers.
+
Q: Someone said that the comment style and coding convention is different
for the networking content. Is this true?
diff --git a/Documentation/networking/timestamping/Makefile b/Documentation/networking/timestamping/Makefile
deleted file mode 100644
index 8c20dfaa4d6ee9..00000000000000
--- a/Documentation/networking/timestamping/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# To compile, from the source root
-#
-# make headers_install
-# make M=documentation
-
-# List of programs to build
-hostprogs-y := hwtstamp_config timestamping txtimestamp
-
-# Tell kbuild to always build the programs
-always := $(hostprogs-y)
-
-HOSTCFLAGS_timestamping.o += -I$(objtree)/usr/include
-HOSTCFLAGS_txtimestamp.o += -I$(objtree)/usr/include
-HOSTCFLAGS_hwtstamp_config.o += -I$(objtree)/usr/include
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index b784c270105f40..ed6f6abaad5776 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -273,11 +273,10 @@ struct clk:
%pC pll1
%pCn pll1
- %pCr 1560000000
For printing struct clk structures. '%pC' and '%pCn' print the name
(Common Clock Framework) or address (legacy clock framework) of the
- structure; '%pCr' prints the current clock rate.
+ structure.
Passed by reference.
diff --git a/Documentation/spec_ctrl.txt b/Documentation/spec_ctrl.txt
new file mode 100644
index 00000000000000..32f3d55c54b75e
--- /dev/null
+++ b/Documentation/spec_ctrl.txt
@@ -0,0 +1,94 @@
+===================
+Speculation Control
+===================
+
+Quite some CPUs have speculation-related misfeatures which are in
+fact vulnerabilities causing data leaks in various forms even across
+privilege domains.
+
+The kernel provides mitigation for such vulnerabilities in various
+forms. Some of these mitigations are compile-time configurable and some
+can be supplied on the kernel command line.
+
+There is also a class of mitigations which are very expensive, but they can
+be restricted to a certain set of processes or tasks in controlled
+environments. The mechanism to control these mitigations is via
+:manpage:`prctl(2)`.
+
+There are two prctl options which are related to this:
+
+ * PR_GET_SPECULATION_CTRL
+
+ * PR_SET_SPECULATION_CTRL
+
+PR_GET_SPECULATION_CTRL
+-----------------------
+
+PR_GET_SPECULATION_CTRL returns the state of the speculation misfeature
+which is selected with arg2 of prctl(2). The return value uses bits 0-3 with
+the following meaning:
+
+==== ===================== ===================================================
+Bit Define Description
+==== ===================== ===================================================
+0 PR_SPEC_PRCTL Mitigation can be controlled per task by
+ PR_SET_SPECULATION_CTRL.
+1 PR_SPEC_ENABLE The speculation feature is enabled, mitigation is
+ disabled.
+2 PR_SPEC_DISABLE The speculation feature is disabled, mitigation is
+ enabled.
+3 PR_SPEC_FORCE_DISABLE Same as PR_SPEC_DISABLE, but cannot be undone. A
+ subsequent prctl(..., PR_SPEC_ENABLE) will fail.
+==== ===================== ===================================================
+
+If all bits are 0 the CPU is not affected by the speculation misfeature.
+
+If PR_SPEC_PRCTL is set, then the per-task control of the mitigation is
+available. If not set, prctl(PR_SET_SPECULATION_CTRL) for the speculation
+misfeature will fail.
+
+PR_SET_SPECULATION_CTRL
+-----------------------
+
+PR_SET_SPECULATION_CTRL allows to control the speculation misfeature, which
+is selected by arg2 of :manpage:`prctl(2)` per task. arg3 is used to hand
+in the control value, i.e. either PR_SPEC_ENABLE or PR_SPEC_DISABLE or
+PR_SPEC_FORCE_DISABLE.
+
+Common error codes
+------------------
+======= =================================================================
+Value Meaning
+======= =================================================================
+EINVAL The prctl is not implemented by the architecture or unused
+ prctl(2) arguments are not 0.
+
+ENODEV arg2 is selecting a not supported speculation misfeature.
+======= =================================================================
+
+PR_SET_SPECULATION_CTRL error codes
+-----------------------------------
+======= =================================================================
+Value Meaning
+======= =================================================================
+0 Success
+
+ERANGE arg3 is incorrect, i.e. it's neither PR_SPEC_ENABLE nor
+ PR_SPEC_DISABLE nor PR_SPEC_FORCE_DISABLE.
+
+ENXIO Control of the selected speculation misfeature is not possible.
+ See PR_GET_SPECULATION_CTRL.
+
+EPERM Speculation was disabled with PR_SPEC_FORCE_DISABLE and caller
+ tried to enable it again.
+======= =================================================================
+
+Speculation misfeature controls
+-------------------------------
+- PR_SPEC_STORE_BYPASS: Speculative Store Bypass
+
+ Invocations:
+ * prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, 0, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_ENABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_DISABLE, 0, 0);
+ * prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_STORE_BYPASS, PR_SPEC_FORCE_DISABLE, 0, 0);
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 35e17f748ca78a..af5859b2d0f91a 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -34,7 +34,9 @@ Currently, these files are in /proc/sys/fs:
- overflowgid
- pipe-user-pages-hard
- pipe-user-pages-soft
+- protected_fifos
- protected_hardlinks
+- protected_regular
- protected_symlinks
- suid_dumpable
- super-max
@@ -182,6 +184,24 @@ applied.
==============================================================
+protected_fifos:
+
+The intent of this protection is to avoid unintentional writes to
+an attacker-controlled FIFO, where a program expected to create a regular
+file.
+
+When set to "0", writing to FIFOs is unrestricted.
+
+When set to "1" don't allow O_CREAT open on FIFOs that we don't own
+in world writable sticky directories, unless they are owned by the
+owner of the directory.
+
+When set to "2" it also applies to group writable sticky directories.
+
+This protection is based on the restrictions in Openwall.
+
+==============================================================
+
protected_hardlinks:
A long-standing class of security issues is the hardlink-based
@@ -202,6 +222,22 @@ This protection is based on the restrictions in Openwall and grsecurity.
==============================================================
+protected_regular:
+
+This protection is similar to protected_fifos, but it
+avoids writes to an attacker-controlled regular file, where a program
+expected to create one.
+
+When set to "0", writing to regular files is unrestricted.
+
+When set to "1" don't allow O_CREAT open on regular files that we
+don't own in world writable sticky directories, unless they are
+owned by the owner of the directory.
+
+When set to "2" it also applies to group writable sticky directories.
+
+==============================================================
+
protected_symlinks:
A long-standing class of security issues is the symlink-based
diff --git a/Documentation/usb/authorization.txt b/Documentation/usb/authorization.txt
index c7e985f05d8f9f..68c001aca78c09 100644
--- a/Documentation/usb/authorization.txt
+++ b/Documentation/usb/authorization.txt
@@ -34,7 +34,9 @@ $ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
By default, Wired USB devices are authorized by default to
connect. Wireless USB hosts deauthorize by default all new connected
devices (this is so because we need to do an authentication phase
-before authorizing).
+before authorizing). Writing "2" to the authorized_default attribute
+causes kernel to only authorize by default devices connected to internal
+USB ports.
Example system lockdown (lame)
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index df8ab4fc240a0c..496673adcb6be9 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
- VM ioctls: These query and set attributes that affect an entire virtual
machine, for example memory layout. In addition a VM ioctl is used to
- create virtual cpus (vcpus).
+ create virtual cpus (vcpus) and devices.
Only run VM ioctls from the same process (address space) that was used
to create the VM.
@@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
+ - device ioctls: These query and set attributes that control the operation
+ of a single device.
+
+ device ioctls must be issued from the same process (address space) that
+ was used to create the VM.
2. File descriptors
-------------------
@@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
handle will create a VM file descriptor which can be used to issue VM
-ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
-and return a file descriptor pointing to it. Finally, ioctls on a vcpu
-fd can be used to control the vcpu, including the important task of
-actually running guest code.
+ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
+create a virtual cpu or device and return a file descriptor pointing to
+the new resource. Finally, ioctls on a vcpu or device fd can be used
+to control the vcpu or device. For vcpus, this includes the important
+task of actually running guest code.
In general file descriptors can be migrated among processes by means
of fork() and the SCM_RIGHTS facility of unix domain socket. These
diff --git a/Documentation/vm/unevictable-lru.txt b/Documentation/vm/unevictable-lru.txt
index fa3b527086fabd..7880f5cf9f0251 100644
--- a/Documentation/vm/unevictable-lru.txt
+++ b/Documentation/vm/unevictable-lru.txt
@@ -165,7 +165,7 @@ using a number of wrapper functions:
Query the address space, and return true if it is completely
unevictable.
-These are currently used in two places in the kernel:
+These are currently used in three places in the kernel:
(1) By ramfs to mark the address spaces of its inodes when they are created,
and this mark remains for the life of the inode.
@@ -176,6 +176,10 @@ These are currently used in two places in the kernel:
swapped out; the application must touch the pages manually if it wants to
ensure they're in memory.
+ (3) By the i915 driver to mark pinned address space until it's unpinned. The
+ amount of unevictable memory marked by i915 driver is roughly the bounded
+ object size in debugfs/dri/0/i915_gem_objects.
+
DETECTING UNEVICTABLE PAGES
---------------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index 852a8127927e98..cb62f26e256046 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1781,6 +1781,14 @@ S: Maintained
F: drivers/media/i2c/as3645a.c
F: include/media/as3645a.h
+ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
+M: Tianshu Qiu <tian.shu.qiu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/ak7375.c
+F: Documentation/devicetree/bindings/media/i2c/ak7375.txt
+
ASC7621 HARDWARE MONITOR DRIVER
M: George Joseph <george.joseph@fairview5.com>
L: linux-hwmon@vger.kernel.org
@@ -4652,6 +4660,13 @@ L: linux-scsi@vger.kernel.org
S: Odd Fixes (e.g., new signatures)
F: drivers/scsi/fdomain.*
+GASKET DRIVER FRAMEWORK
+M: Rob Springer <rspringer@google.com>
+M: John Joseph <jnjoseph@google.com>
+M: Ben Chan <benchan@chromium.org>
+S: Maintained
+F: drivers/staging/gasket/
+
GCOV BASED KERNEL PROFILING
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
S: Maintained
@@ -10054,6 +10069,13 @@ S: Maintained
F: drivers/ssb/
F: include/linux/ssb/
+SONY IMX208 SENSOR DRIVER
+M: Sakari Ailus <sakari.ailus@linux.intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx208.c
+
SONY IMX258 SENSOR DRIVER
M: Sakari Ailus <sakari.ailus@linux.intel.com>
L: linux-media@vger.kernel.org
@@ -10061,6 +10083,20 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/i2c/imx258.c
+SONY IMX319 SENSOR DRIVER
+M: Bingbu Cao <bingbu.cao@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx319.c
+
+SONY IMX355 SENSOR DRIVER
+M: Tianshu Qiu <tian.shu.qiu@intel.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/i2c/imx355.c
+
SONY VAIO CONTROL DEVICE DRIVER
M: Mattia Dongili <malattia@linux.it>
L: platform-driver-x86@vger.kernel.org
@@ -10225,6 +10261,7 @@ F: arch/alpha/kernel/srm_env.c
STABLE BRANCH
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Sasha Levin <sashal@kernel.org>
L: stable@vger.kernel.org
S: Supported
F: Documentation/stable_kernel_rules.txt
@@ -10726,6 +10763,13 @@ T: git git://repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
S: Maintained
F: drivers/platform/x86/thinkpad_acpi.c
+THROTTLER DRIVERS
+M: Matthias Kaehlcke <mka@chromium.org>
+L: linux-pm@vger.kernel.org
+S: Maintained
+F: drivers/misc/throttler/
+F: include/linux/throttler.h
+
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
L: linux-pm@vger.kernel.org
diff --git a/Makefile b/Makefile
index 3f2d749ad5446a..708cbf04370ec3 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 135
+SUBLEVEL = 178
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -608,6 +608,22 @@ endif # $(dot-config)
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
+CLANG_PREFIX := --prefix=$(GCC_TOOLCHAIN_DIR)
+GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) $(CLANG_PREFIX)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+endif
+
# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
# values of the respective KBUILD_* variables
ARCH_CPPFLAGS :=
@@ -621,9 +637,10 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
+KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS += -Os
else
ifdef CONFIG_PROFILE_ALL_BRANCHES
KBUILD_CFLAGS += -O2
@@ -636,7 +653,7 @@ endif
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
# check for 'asm goto'
-ifeq ($(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
@@ -727,7 +744,7 @@ KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
else
# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.build)
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
endif
@@ -788,7 +805,7 @@ KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
endif
# arch Makefile may override CC so keep this after arch Makefile is included
-NOSTDINC_FLAGS += -nostdinc -isystem $(call shell-cached,$(CC) -print-file-name=include)
+NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
CHECKFLAGS += $(NOSTDINC_FLAGS)
ifdef CONFIG_ERROR_ON_WARNING
@@ -801,6 +818,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)
# disable pointer signed / unsigned warnings in gcc 4.0
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
+# disable stringop warnings in gcc 8+
+KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
+
# disable invalid "can't wrap" optimizations for signed / pointers
KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
@@ -1492,7 +1512,6 @@ clean: $(clean-dirs)
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
-o -name modules.builtin -o -name '.tmp_*.o.*' \
- -o -name .cache.mk \
-o -name '*.ll' \
-o -name '*.gcno' \) -type f -print | xargs rm -f
diff --git a/arch/Kconfig b/arch/Kconfig
index 37253e67dfbc75..5f5f3c5c10bc91 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -214,6 +214,12 @@ config GENERIC_SMP_IDLE_THREAD
config GENERIC_IDLE_POLL_SETUP
bool
+config ARCH_HAS_FORTIFY_SOURCE
+ bool
+ help
+ An architecture should select this when it can successfully
+ build and run with CONFIG_FORTIFY_SOURCE.
+
# Select if arch init_task initializer is different to init/init_task.c
config ARCH_INIT_TASK
bool
@@ -222,8 +228,8 @@ config ARCH_INIT_TASK
config ARCH_TASK_STRUCT_ALLOCATOR
bool
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -420,6 +426,15 @@ config CC_STACKPROTECTOR_STRONG
endchoice
+config HAVE_ARCH_WITHIN_STACK_FRAMES
+ bool
+ help
+ An architecture should select this if it can walk the kernel stack
+ frames to determine if an object is part of either the arguments
+ or local variables (i.e. that it excludes saved return addresses,
+ and similar) by implementing an inline arch_within_stack_frames(),
+ which is used by CONFIG_HARDENED_USERCOPY.
+
config HAVE_CONTEXT_TRACKING
bool
help
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 06377400dc09b4..469642801a6838 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -55,15 +55,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
- defined(CONFIG_ALPHA_SHARK) || \
- defined(CONFIG_ALPHA_EIGER)
+ defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
- defined(CONFIG_ALPHA_TAKARA)
+ defined(CONFIG_ALPHA_TAKARA) || \
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h
index 7fde0f88da888b..51ed90be770a8f 100644
--- a/arch/alpha/include/asm/termios.h
+++ b/arch/alpha/include/asm/termios.h
@@ -72,9 +72,15 @@
})
#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
+ copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) \
+ copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+ copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
copy_to_user(u, k, sizeof(struct termios))
#endif /* _ALPHA_TERMIOS_H */
diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h
index 32e920a83ae57b..e9e90bfa2b50a6 100644
--- a/arch/alpha/include/asm/thread_info.h
+++ b/arch/alpha/include/asm/thread_info.h
@@ -86,33 +86,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */
#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */
#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'osf_sysinfo' */
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif
#define SET_UNALIGN_CTL(task,value) ({ \
__u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h
index f30c94ae1bdb19..7ee8ab577e115c 100644
--- a/arch/alpha/include/uapi/asm/ioctls.h
+++ b/arch/alpha/include/uapi/asm/ioctls.h
@@ -31,6 +31,11 @@
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
diff --git a/arch/alpha/include/uapi/asm/mman.h b/arch/alpha/include/uapi/asm/mman.h
index f2f94967179817..cddb5a82a66f43 100644
--- a/arch/alpha/include/uapi/asm/mman.h
+++ b/arch/alpha/include/uapi/asm/mman.h
@@ -63,6 +63,9 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h
index 879dd35899218b..483c7ec2a8795f 100644
--- a/arch/alpha/include/uapi/asm/termbits.h
+++ b/arch/alpha/include/uapi/asm/termbits.h
@@ -25,6 +25,19 @@ struct termios {
speed_t c_ospeed; /* output speed */
};
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
/* Alpha has matching termios and ktermios */
struct ktermios {
@@ -147,6 +160,7 @@ struct ktermios {
#define B3000000 00034
#define B3500000 00035
#define B4000000 00036
+#define BOTHER 00037
#define CSIZE 00001400
#define CS5 00000000
@@ -164,6 +178,9 @@ struct ktermios {
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define CIBAUD 07600000
+#define IBSHIFT 16
+
/* c_lflag bits */
#define ISIG 0x00000080
#define ICANON 0x00000100
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 63f06a2b1f7f8f..bbc7cb9faa01ce 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -526,24 +526,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path,
SYSCALL_DEFINE1(osf_utsname, char __user *, name)
{
int error;
+ char tmp[5 * 32];
down_read(&uts_sem);
- error = -EFAULT;
- if (copy_to_user(name + 0, utsname()->sysname, 32))
- goto out;
- if (copy_to_user(name + 32, utsname()->nodename, 32))
- goto out;
- if (copy_to_user(name + 64, utsname()->release, 32))
- goto out;
- if (copy_to_user(name + 96, utsname()->version, 32))
- goto out;
- if (copy_to_user(name + 128, utsname()->machine, 32))
- goto out;
+ memcpy(tmp + 0 * 32, utsname()->sysname, 32);
+ memcpy(tmp + 1 * 32, utsname()->nodename, 32);
+ memcpy(tmp + 2 * 32, utsname()->release, 32);
+ memcpy(tmp + 3 * 32, utsname()->version, 32);
+ memcpy(tmp + 4 * 32, utsname()->machine, 32);
+ up_read(&uts_sem);
- error = 0;
- out:
- up_read(&uts_sem);
- return error;
+ if (copy_to_user(name, tmp, sizeof(tmp)))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE0(getpagesize)
@@ -561,24 +556,22 @@ SYSCALL_DEFINE0(getdtablesize)
*/
SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
{
- unsigned len;
- int i;
+ int len, err = 0;
+ char *kname;
+ char tmp[32];
- if (!access_ok(VERIFY_WRITE, name, namelen))
- return -EFAULT;
-
- len = namelen;
- if (len > 32)
- len = 32;
+ if (namelen < 0 || namelen > 32)
+ namelen = 32;
down_read(&uts_sem);
- for (i = 0; i < len; ++i) {
- __put_user(utsname()->domainname[i], name + i);
- if (utsname()->domainname[i] == '\0')
- break;
- }
+ kname = utsname()->domainname;
+ len = strnlen(kname, namelen);
+ len = min(len + 1, namelen);
+ memcpy(tmp, kname, len);
up_read(&uts_sem);
+ if (copy_to_user(name, tmp, len))
+ return -EFAULT;
return 0;
}
@@ -741,13 +734,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
};
unsigned long offset;
const char *res;
- long len, err = -EINVAL;
+ long len;
+ char tmp[__NEW_UTS_LEN + 1];
offset = command-1;
if (offset >= ARRAY_SIZE(sysinfo_table)) {
/* Digital UNIX has a few unpublished interfaces here */
printk("sysinfo(%d)", command);
- goto out;
+ return -EINVAL;
}
down_read(&uts_sem);
@@ -755,13 +749,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
len = strlen(res)+1;
if ((unsigned long)len > (unsigned long)count)
len = count;
- if (copy_to_user(buf, res, len))
- err = -EFAULT;
- else
- err = 0;
+ memcpy(tmp, res, len);
up_read(&uts_sem);
- out:
- return err;
+ if (copy_to_user(buf, tmp, len))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 4a905bd667e2ef..0f68f0de9b5e1c 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -77,7 +77,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
- (r) <= 18 ? (r)+8 : (r)-10])
+ (r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c4ee25e88a7b0e..e983f410135a97 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -96,7 +96,7 @@ endmenu
choice
prompt "ARC Instruction Set"
- default ISA_ARCOMPACT
+ default ISA_ARCV2
config ISA_ARCOMPACT
bool "ARCompact ISA"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index c05ea2b5427629..fffaff9c7b2cc5 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -12,26 +12,12 @@ ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := arc-linux-
endif
-KBUILD_DEFCONFIG := nsim_700_defconfig
+KBUILD_DEFCONFIG := nsim_hs_defconfig
-cflags-y += -fno-common -pipe -fno-builtin -D__linux__
+cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
-
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
@@ -137,16 +123,3 @@ dtbs: scripts
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index f1ac9818b751e1..3023f91c77c27d 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,6 +1,5 @@
CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -18,6 +17,7 @@ CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_MODULES=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_ARC_PLAT_AXS10X=y
@@ -98,6 +98,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 323486d6ee8341..f18107185f5309 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,6 +1,5 @@
CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -104,6 +103,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 66191cd0447eaa..6e1dd8521d2a16 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,6 +1,5 @@
CONFIG_CROSS_COMPILE="arc-linux-"
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
-# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
@@ -105,6 +104,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 138f9d8879570a..86e5a62556a8f0 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -16,6 +16,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 31e1d95764ff91..a4d7b919224a24 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -17,6 +17,7 @@ CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_KPROBES=y
CONFIG_MODULES=y
# CONFIG_LBDAF is not set
@@ -69,5 +70,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index fcae66683ca0bd..b3fb49c8bd145c 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -69,5 +69,6 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index b01b659168ea4a..710c167bbdd888 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -88,6 +88,7 @@ CONFIG_EXT2_FS_XATTR=y
CONFIG_TMPFS=y
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
CONFIG_FTRACE=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3b4dc9cebcf152..7469b754ac77a9 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -19,6 +19,7 @@ CONFIG_KALLSYMS_ALL=y
# CONFIG_AIO is not set
CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_ISA_ARCOMPACT=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index a07f20de221ba1..772073e5ba0445 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -89,6 +89,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index f36c047b33cad0..50e215a163ff75 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -91,6 +91,7 @@ CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 0352fb8d21b998..9623ae002f5b48 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -286,7 +286,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@@ -346,9 +346,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
- int n;
+ unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index d5da2115d78a67..03d6bb0f4e13a2 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -17,8 +17,11 @@
#ifndef __ASM_ARC_UDELAY_H
#define __ASM_ARC_UDELAY_H
+#include <asm-generic/types.h>
#include <asm/param.h> /* HZ */
+extern unsigned long loops_per_jiffy;
+
static inline void __delay(unsigned long loops)
{
__asm__ __volatile__(
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
index cb69299a492e57..f120d823e8c24a 100644
--- a/arch/arc/include/asm/io.h
+++ b/arch/arc/include/asm/io.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#include <asm/page.h>
+#include <asm/unaligned.h>
#ifdef CONFIG_ISA_ARCV2
#include <asm/barrier.h>
@@ -85,6 +86,42 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
return w;
}
+/*
+ * {read,write}s{b,w,l}() repeatedly access the same IO address in
+ * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
+ * @count times
+ */
+#define __raw_readsx(t,f) \
+static inline void __raw_reads##f(const volatile void __iomem *addr, \
+ void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } else { \
+ do { \
+ u##t x = __raw_read##f(addr); \
+ put_unaligned(x, buf++); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_readsb __raw_readsb
+__raw_readsx(8, b)
+#define __raw_readsw __raw_readsw
+__raw_readsx(16, w)
+#define __raw_readsl __raw_readsl
+__raw_readsx(32, l)
+
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
{
@@ -117,6 +154,35 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
}
+#define __raw_writesx(t,f) \
+static inline void __raw_writes##f(volatile void __iomem *addr, \
+ const void *ptr, unsigned int count) \
+{ \
+ bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
+ const u##t *buf = ptr; \
+ \
+ if (!count) \
+ return; \
+ \
+ /* Some ARC CPU's don't support unaligned accesses */ \
+ if (is_aligned) { \
+ do { \
+ __raw_write##f(*buf++, addr); \
+ } while (--count); \
+ } else { \
+ do { \
+ __raw_write##f(get_unaligned(buf++), addr); \
+ } while (--count); \
+ } \
+}
+
+#define __raw_writesb __raw_writesb
+__raw_writesx(8, b)
+#define __raw_writesw __raw_writesw
+__raw_writesx(16, w)
+#define __raw_writesl __raw_writesl
+__raw_writesx(32, l)
+
/*
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
* Based on ARM model for the typical use case
@@ -132,10 +198,16 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
+#define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
+#define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
+#define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
+#define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
+#define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
/*
* Relaxed API for drivers which can handle barrier ordering themselves
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index c28e6c347b4900..871f3cb16af9f2 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -34,9 +34,7 @@ struct machine_desc {
const char *name;
const char **dt_compat;
void (*init_early)(void);
-#ifdef CONFIG_SMP
void (*init_per_cpu)(unsigned int);
-#endif
void (*init_machine)(void);
void (*init_late)(void);
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 429957f1c23655..8f1145ed0046f0 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -102,7 +102,7 @@ typedef pte_t * pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define WANT_PAGE_VIRTUAL 1
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index 5f071762fb1c72..6a2ae61748e4ce 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
/* counts condition */
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
#ifdef CONFIG_ISA_ARCV2
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index e5fec320f158e5..c07d7b0a40580e 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -372,7 +372,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 57387b567f3422..f077a419cb5165 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -209,7 +209,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -438,7 +438,7 @@ __arc_copy_to_user(void __user *to, const void *from, unsigned long n)
*/
"=&r" (tmp), "+r" (to), "+r" (from)
:
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return n;
}
@@ -658,7 +658,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
" .previous \n"
: "+r"(d_char), "+r"(res)
: "i"(0)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
@@ -691,7 +691,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
" .previous \n"
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
: "g"(-EFAULT), "r"(count)
- : "lp_count", "lp_start", "lp_end", "memory");
+ : "lp_count", "memory");
return res;
}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 689dd867fdff53..cd64cb4ef7b065 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+ bset r5, r5, STATUS_AD_BIT
+ kflag r5
+#endif
.endm
.section .init.text, "ax",@progbits
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index ba17f85285cfe8..dd42c6feaba5e9 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -31,10 +31,10 @@ void __init init_IRQ(void)
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
-#endif
}
/*
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index a3f750e76b683d..8f40c6c5d77e36 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -153,6 +153,26 @@ int copy_thread(unsigned long clone_flags,
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index 9a84cbdd44b018..017fb440bba4b8 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -821,7 +821,7 @@ void flush_cache_mm(struct mm_struct *mm)
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
- unsigned int paddr = pfn << PAGE_SHIFT;
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
@@ -841,8 +841,9 @@ void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
- __flush_dcache_page(page_address(page), u_vaddr);
- __flush_dcache_page(page_address(page), page_address(page));
+ __flush_dcache_page((phys_addr_t)page_address(page), u_vaddr);
+ __flush_dcache_page((phys_addr_t)page_address(page),
+ (phys_addr_t)page_address(page));
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c7585f313fddb9..5757f3db7bd35f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -33,6 +33,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32
select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32
select HAVE_ARCH_MMAP_RND_BITS if MMU
@@ -1431,8 +1432,7 @@ config BIG_LITTLE
config BL_SWITCHER
bool "big.LITTLE switcher support"
- depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
- select ARM_CPU_SUSPEND
+ depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
select CPU_PM
help
The big.LITTLE "switcher" provides the core functionality to
@@ -1484,6 +1484,7 @@ config NR_CPUS
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
+ select GENERIC_IRQ_MIGRATION
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
@@ -2173,7 +2174,8 @@ config ARCH_SUSPEND_POSSIBLE
def_bool y
config ARM_CPU_SUSPEND
- def_bool PM_SLEEP
+ def_bool PM_SLEEP || BL_SWITCHER
+ depends on ARCH_SUSPEND_POSSIBLE
config ARCH_HIBERNATION_POSSIBLE
bool
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index 5e3f5e86ffcfef..cfcbf5baba4f3f 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -74,6 +74,11 @@
};
};
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+ status = "disabled";
+};
+
&iva {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 63de2a1b4315ef..648236c5281b9f 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -508,6 +508,8 @@
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
+
+ wakeup-source;
};
tlv320aic3106: tlv320aic3106@1b {
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 2778533502d9b7..5ce200860c8930 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -110,7 +110,7 @@
reg = <0x18008000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@@ -138,7 +138,7 @@
reg = <0x1800b000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 6881757b03e82d..67369f284b9127 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -147,7 +147,7 @@
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 0bd98cd00816c7..4ef5c3410fcce8 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -267,11 +267,7 @@
compatible = "ti,dm6441-gpio";
gpio-controller;
reg = <0x226000 0x1000>;
- interrupts = <42 IRQ_TYPE_EDGE_BOTH
- 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
- 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
- 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
- 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+ interrupts = <42 43 44 45 46 47 48 49 50>;
ti,ngpio = <144>;
ti,davinci-gpio-unbanked = <0>;
status = "disabled";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 02bd6312d1d94a..e6a3a94bac693f 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1549,7 +1549,7 @@
};
};
- dcan1: can@481cc000 {
+ dcan1: can@4ae3c000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
@@ -1559,7 +1559,7 @@
status = "disabled";
};
- dcan2: can@481d0000 {
+ dcan2: can@48480000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x48480000 0x2000>;
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 2f30d632f1cca7..e81a27214188c6 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -150,6 +150,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
+ clock-names = "clkout8";
+ clocks = <&cmu CLK_FIN_PLL>;
+ #clock-cells = <1>;
};
mipi_phy: video-phy@10020710 {
diff --git a/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
new file mode 100644
index 00000000000000..c8771c660550ba
--- /dev/null
+++ b/arch/arm/boot/dts/exynos5420-tmu-sensor-conf.dtsi
@@ -0,0 +1,25 @@
+/*
+ * Device tree sources for Exynos5420 TMU sensor configuration
+ *
+ * Copyright (c) 2014 Lukasz Majewski <l.majewski@samsung.com>
+ * Copyright (c) 2017 Krzysztof Kozlowski <krzk@kernel.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/thermal/thermal_exynos.h>
+
+#thermal-sensor-cells = <0>;
+samsung,tmu_gain = <8>;
+samsung,tmu_reference_voltage = <16>;
+samsung,tmu_noise_cancel_mode = <4>;
+samsung,tmu_efuse_value = <55>;
+samsung,tmu_min_efuse_value = <0>;
+samsung,tmu_max_efuse_value = <100>;
+samsung,tmu_first_point_trim = <25>;
+samsung,tmu_second_point_trim = <85>;
+samsung,tmu_default_temp_offset = <50>;
+samsung,tmu_cal_type = <TYPE_ONE_POINT_TRIMMING>;
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 1b3d6c769a3cbb..d5edb77669422c 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -777,7 +777,7 @@
interrupts = <0 65 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu1: tmu@10064000 {
@@ -786,7 +786,7 @@
interrupts = <0 183 0>;
clocks = <&clock CLK_TMU>;
clock-names = "tmu_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu2: tmu@10068000 {
@@ -795,7 +795,7 @@
interrupts = <0 184 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_cpu3: tmu@1006c000 {
@@ -804,7 +804,7 @@
interrupts = <0 185 0>;
clocks = <&clock CLK_TMU>, <&clock CLK_TMU_GPU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
tmu_gpu: tmu@100a0000 {
@@ -813,7 +813,7 @@
interrupts = <0 215 0>;
clocks = <&clock CLK_TMU_GPU>, <&clock CLK_TMU>;
clock-names = "tmu_apbif", "tmu_triminfo_apbif";
- #include "exynos4412-tmu-sensor-conf.dtsi"
+ #include "exynos5420-tmu-sensor-conf.dtsi"
};
thermal-zones {
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index 53fd75c8ffcfd3..47894b41e4e2b4 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -130,6 +130,17 @@
};
};
+&cpu0 {
+ /* CPU rated to 1GHz, not 1.2GHz as per the default settings */
+ operating-points = <
+ /* kHz uV */
+ 166666 850000
+ 400000 900000
+ 800000 1050000
+ 1000000 1200000
+ >;
+};
+
&esdhc1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_esdhc1>;
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 399103b8e2c948..c81fb8fdc41ff0 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -95,7 +95,7 @@
clocks = <&clks IMX6Q_CLK_ECSPI5>,
<&clks IMX6Q_CLK_ECSPI5>;
clock-names = "ipg", "per";
- dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+ dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 167f77b3bd4365..6963dff815dc88 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -1250,7 +1250,7 @@
/* non-prefetchable memory */
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>;
num-lanes = <1>;
- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_PCIE_REF_125M>,
<&clks IMX6SX_CLK_PCIE_AXI>,
<&clks IMX6SX_CLK_LVDS1_OUT>,
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index 113dcf056dcfb7..1b2dacfa613237 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -35,8 +35,8 @@
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
- &gpio1 13 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 766bbb8495b60d..47e5b63339d18f 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -220,12 +220,15 @@
status = "disabled";
};
- twsi2: i2c@d4025000 {
+ twsi2: i2c@d4031000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4025000 0x1000>;
- interrupts = <58>;
+ reg = <0xd4031000 0x1000>;
+ interrupt-parent = <&intcmux17>;
+ interrupts = <0>;
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
resets = <&soc_clocks MMP2_CLK_TWSI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index f0bdc41f8eff0c..235d1493f8aa77 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -33,6 +33,7 @@
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
enable-active-high;
regulator-boot-on;
+ startup-delay-us = <25000>;
};
vbat: fixedregulator-vbat {
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index e00d50ef678fa7..3ff5ea16ebb375 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -577,7 +577,7 @@
};
sata0: sata@29000000 {
- compatible = "generic-ahci";
+ compatible = "qcom,apq8064-ahci", "generic-ahci";
status = "disabled";
reg = <0x29000000 0x180>;
interrupts = <GIC_SPI 209 IRQ_TYPE_NONE>;
@@ -599,6 +599,7 @@
phys = <&sata_phy0>;
phy-names = "sata-phy";
+ ports-implemented = <0x1>;
};
/* Temporary fixed regulator */
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235ef0fb6db..6e9e1c2f9def9f 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index bb1ca158273c8f..1922e7a93e4093 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -201,6 +201,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x70>;
+ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index d3a8018639de22..f4a2d28936e1ee 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -145,9 +145,11 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 7da5503c059141..e08d151840567d 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -117,6 +117,7 @@ static inline u32 gic_read_iar(void)
u32 irqstat;
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+ dsb(sy);
return irqstat;
}
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index d5525bfc7e3e61..9156fc303afd8d 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -491,7 +491,6 @@ static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
#endif
#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
#else
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 1bd9510de1b9ce..cae4df39f02e42 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -24,7 +24,6 @@
#ifndef __ASSEMBLY__
struct irqaction;
struct pt_regs;
-extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
void handle_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 0a9d5dd932941a..6949c7d4481c46 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -76,7 +76,7 @@ extern int kgdb_fault_expected;
#define KGDB_MAX_NO_CPUS 1
#define BUFMAX 400
-#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
+#define NUMREGBYTES (GDB_MAX_REGS << 2)
#define NUMCRITREGBYTES (32 << 2)
#define _R0 0
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 35c9db857ebe9c..7665bd2f48718b 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -251,7 +251,7 @@ extern int __put_user_8(void *, unsigned long long);
({ \
unsigned long __limit = current_thread_info()->addr_limit - 1; \
const typeof(*(p)) __user *__tmp_p = (p); \
- register const typeof(*(p)) __r2 asm("r2") = (x); \
+ register typeof(*(p)) __r2 asm("r2") = (x); \
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
@@ -496,7 +496,10 @@ arm_copy_from_user(void *to, const void __user *from, unsigned long n);
static inline unsigned long __must_check
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(to, n, false);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_from_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
@@ -511,11 +514,15 @@ static inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
#ifndef CONFIG_UACCESS_WITH_MEMCPY
- unsigned int __ua_flags = uaccess_save_and_enable();
+ unsigned int __ua_flags;
+
+ check_object_size(from, n, true);
+ __ua_flags = uaccess_save_and_enable();
n = arm_copy_to_user(to, from, n);
uaccess_restore(__ua_flags);
return n;
#else
+ check_object_size(from, n, true);
return arm_copy_to_user(to, from, n);
#endif
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 1d45320ee125d5..900c591913d5e2 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,7 +31,6 @@
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
-#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
@@ -119,64 +118,3 @@ int __init arch_probe_nr_irqs(void)
return nr_irqs;
}
#endif
-
-#ifdef CONFIG_HOTPLUG_CPU
-static bool migrate_one_irq(struct irq_desc *desc)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
- const struct cpumask *affinity = irq_data_get_affinity_mask(d);
- struct irq_chip *c;
- bool ret = false;
-
- /*
- * If this is a per-CPU interrupt, or the affinity does not
- * include this CPU, then we have nothing to do.
- */
- if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
- return false;
-
- if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
- ret = true;
- }
-
- c = irq_data_get_irq_chip(d);
- if (!c->irq_set_affinity)
- pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
- cpumask_copy(irq_data_get_affinity_mask(d), affinity);
-
- return ret;
-}
-
-/*
- * The current CPU has been marked offline. Migrate IRQs off this CPU.
- * If the affinity settings do not allow other CPUs, force them onto any
- * available CPU.
- *
- * Note: we must iterate over all IRQs, whether they have an attached
- * action structure or not, as we need to get chained interrupts too.
- */
-void migrate_irqs(void)
-{
- unsigned int i;
- struct irq_desc *desc;
- unsigned long flags;
-
- local_irq_save(flags);
-
- for_each_irq_desc(i, desc) {
- bool affinity_broken;
-
- raw_spin_lock(&desc->lock);
- affinity_broken = migrate_one_irq(desc);
- raw_spin_unlock(&desc->lock);
-
- if (affinity_broken)
- pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
- i, smp_processor_id());
- }
-
- local_irq_restore(flags);
-}
-#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 72f396b30ff76f..22726bd2ada4aa 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -771,7 +771,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b26361355daeb3..08ce9e36dc5a85 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -218,7 +218,7 @@ int __cpu_disable(void)
/*
* OK - migrate IRQs away from this CPU
*/
- migrate_irqs();
+ irq_migrate_all_off_this_cpu();
/*
* Flush user cache and TLB mappings, and then remove this CPU
@@ -687,6 +687,21 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
+}
+
/*
* not supported here
*/
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 28b690fcec1cbf..b2e234468cb52d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -121,6 +121,8 @@ SECTIONS
#ifdef CONFIG_DEBUG_RODATA
. = ALIGN(1<<SECTION_SHIFT);
#endif
+ _etext = .; /* End of text section */
+
RO_DATA(PAGE_SIZE)
. = ALIGN(4);
@@ -151,8 +153,6 @@ SECTIONS
NOTES
- _etext = .; /* End of text and rodata section */
-
#ifndef CONFIG_XIP_KERNEL
# ifdef CONFIG_ARM_KERNMEM_PERMS
. = ALIGN(1<<SECTION_SHIFT);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 387ee2a11e3607..ae61e2ea7255b6 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -87,11 +87,10 @@ static unsigned long mmio_read_buf(char *buf, unsigned int len)
/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
+ * or in-kernel IO emulation
+ *
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
- *
- * This should only be called after returning from userspace for MMIO load
- * emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
@@ -118,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
+ /*
+ * The MMIO instruction is emulated and should not be re-executed
+ * in the guest.
+ */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 0;
}
@@ -151,11 +156,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
@@ -206,14 +206,17 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
- memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
+ if (!is_write)
+ memcpy(run->mmio.data, data_buf, len);
kvm_handle_mmio_return(vcpu, run);
return 1;
}
+ if (is_write)
+ memcpy(run->mmio.data, data_buf, len);
run->exit_reason = KVM_EXIT_MMIO;
return 0;
}
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index e8835d4e173c9a..e0267532bd4e0f 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -892,19 +892,35 @@ static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
pmd = stage2_get_pmd(kvm, cache, addr);
VM_BUG_ON(!pmd);
- /*
- * Mapping in huge pages should only happen through a fault. If a
- * page is merged into a transparent huge page, the individual
- * subpages of that huge page should be unmapped through MMU
- * notifiers before we get here.
- *
- * Merging of CompoundPages is not supported; they should become
- * splitting first, unmapped, merged, and mapped back in on-demand.
- */
- VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
-
old_pmd = *pmd;
if (pmd_present(old_pmd)) {
+ /*
+ * Multiple vcpus faulting on the same PMD entry, can
+ * lead to them sequentially updating the PMD with the
+ * same value. Following the break-before-make
+ * (pmd_clear() followed by tlb_flush()) process can
+ * hinder forward progress due to refaults generated
+ * on missing translations.
+ *
+ * Skip updating the page table if the entry is
+ * unchanged.
+ */
+ if (pmd_val(old_pmd) == pmd_val(*new_pmd))
+ return 0;
+
+ /*
+ * Mapping in huge pages should only happen through a
+ * fault. If a page is merged into a transparent huge
+ * page, the individual subpages of that huge page
+ * should be unmapped through MMU notifiers before we
+ * get here.
+ *
+ * Merging of CompoundPages is not supported; they
+ * should become splitting first, unmapped, merged,
+ * and mapped back in on-demand.
+ */
+ VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
+
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
@@ -961,6 +977,10 @@ static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
/* Create 2nd stage page table mapping - Level 3 */
old_pte = *pte;
if (pte_present(old_pte)) {
+ /* Skip page table update if there is no change */
+ if (pte_val(old_pte) == pte_val(*new_pte))
+ return 0;
+
kvm_set_pte(pte, __pte(0));
kvm_tlb_flush_vmid_ipa(kvm, addr);
} else {
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 318394ed5c7a97..5e11ad3164e089 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus,
} else /* remote PCI bus */
base = cnspci->cfg1_regs + ((busno & 0xf) << 20);
- return base + (where & 0xffc) + (devfn << 12);
+ return base + where + (devfn << 12);
}
static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c
index c169cc3049aa3b..e8adb428dddb41 100644
--- a/arch/arm/mach-exynos/suspend.c
+++ b/arch/arm/mach-exynos/suspend.c
@@ -260,6 +260,7 @@ static int __init exynos_pmu_irq_init(struct device_node *node,
NULL);
if (!domain) {
iounmap(pmu_base_addr);
+ pmu_base_addr = NULL;
return -ENOMEM;
}
diff --git a/arch/arm/mach-hisi/hotplug.c b/arch/arm/mach-hisi/hotplug.c
index a129aae7260286..909bb249378120 100644
--- a/arch/arm/mach-hisi/hotplug.c
+++ b/arch/arm/mach-hisi/hotplug.c
@@ -148,13 +148,20 @@ static int hi3xxx_hotplug_init(void)
struct device_node *node;
node = of_find_compatible_node(NULL, NULL, "hisilicon,sysctrl");
- if (node) {
- ctrl_base = of_iomap(node, 0);
- id = HI3620_CTRL;
- return 0;
+ if (!node) {
+ id = ERROR_CTRL;
+ return -ENOENT;
}
- id = ERROR_CTRL;
- return -ENOENT;
+
+ ctrl_base = of_iomap(node, 0);
+ of_node_put(node);
+ if (!ctrl_base) {
+ id = ERROR_CTRL;
+ return -ENOMEM;
+ }
+
+ id = HI3620_CTRL;
+ return 0;
}
void hi3xxx_set_cpu(int cpu, bool enable)
@@ -173,11 +180,15 @@ static bool hix5hd2_hotplug_init(void)
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "hisilicon,cpuctrl");
- if (np) {
- ctrl_base = of_iomap(np, 0);
- return true;
- }
- return false;
+ if (!np)
+ return false;
+
+ ctrl_base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!ctrl_base)
+ return false;
+
+ return true;
}
void hix5hd2_set_cpu(int cpu, bool enable)
@@ -219,10 +230,10 @@ void hip01_set_cpu(int cpu, bool enable)
if (!ctrl_base) {
np = of_find_compatible_node(NULL, NULL, "hisilicon,hip01-sysctrl");
- if (np)
- ctrl_base = of_iomap(np, 0);
- else
- BUG();
+ BUG_ON(!np);
+ ctrl_base = of_iomap(np, 0);
+ of_node_put(np);
+ BUG_ON(!ctrl_base);
}
if (enable) {
diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
index 353bb8774112d8..ec74c2812c1aa6 100644
--- a/arch/arm/mach-imx/cpuidle-imx6q.c
+++ b/arch/arm/mach-imx/cpuidle-imx6q.c
@@ -14,30 +14,23 @@
#include "cpuidle.h"
#include "hardware.h"
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
static int imx6q_enter_wait(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- if (atomic_inc_return(&master) == num_online_cpus()) {
- /*
- * With this lock, we prevent other cpu to exit and enter
- * this function again and become the master.
- */
- if (!spin_trylock(&master_lock))
- goto idle;
+ spin_lock(&cpuidle_lock);
+ if (++num_idle_cpus == num_online_cpus())
imx6_set_lpm(WAIT_UNCLOCKED);
- cpu_do_idle();
- imx6_set_lpm(WAIT_CLOCKED);
- spin_unlock(&master_lock);
- goto done;
- }
+ spin_unlock(&cpuidle_lock);
-idle:
cpu_do_idle();
-done:
- atomic_dec(&master);
+
+ spin_lock(&cpuidle_lock);
+ if (num_idle_cpus-- == num_online_cpus())
+ imx6_set_lpm(WAIT_CLOCKED);
+ spin_unlock(&cpuidle_lock);
return index;
}
diff --git a/arch/arm/mach-imx/cpuidle-imx6sx.c b/arch/arm/mach-imx/cpuidle-imx6sx.c
index 3c6672b3796b24..7f5df89920082f 100644
--- a/arch/arm/mach-imx/cpuidle-imx6sx.c
+++ b/arch/arm/mach-imx/cpuidle-imx6sx.c
@@ -97,7 +97,7 @@ int __init imx6sx_cpuidle_init(void)
* except for power up sw2iso which need to be
* larger than LDO ramp up time.
*/
- imx_gpc_set_arm_power_up_timing(2, 1);
+ imx_gpc_set_arm_power_up_timing(0xf, 1);
imx_gpc_set_arm_power_down_timing(1, 1);
return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index 38b0da300dd547..423a88ff908ca6 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -394,7 +394,11 @@ static int __init_refok impd1_probe(struct lm_device *dev)
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+ "lm%x:00700", dev->id);
+ if (!lookup || !chipname || !mmciname)
+ return -ENOMEM;
+
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index c1cd80ecc21992..a904244264ce55 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index ed8fda4cd05584..45fd4b173dac10 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -117,8 +117,8 @@ void mvebu_pmsu_set_cpu_boot_addr(int hw_cpu, void *boot_addr)
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
}
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
/*
* This function sets up the boot address workaround needed for SMP
@@ -131,7 +131,7 @@ int mvebu_setup_boot_addr_wa(unsigned int crypto_eng_target,
phys_addr_t resume_addr_reg)
{
void __iomem *sram_virt_base;
- u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+ u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index a95499ea87064d..fa1d41edce68d2 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -511,6 +511,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old)
{
struct modem_private_data *priv = port->private_data;
+ if (!priv)
+ return;
+
if (IS_ERR(priv->regulator))
return;
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6ab13d18c63635..cde86d1199cf77 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -115,6 +115,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
u32 enable_mask, enable_shift;
u32 pipd_mask, pipd_shift;
u32 reg;
+ int ret;
if (dsi_id == 0) {
enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
@@ -130,7 +131,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
return -ENODEV;
}
- regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, &reg);
+ ret = regmap_read(omap4_dsi_mux_syscon,
+ OMAP4_DSIPHY_SYSCON_OFFSET,
+ &reg);
+ if (ret)
+ return ret;
reg &= ~enable_mask;
reg &= ~pipd_mask;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 147c90e70b2e04..36706d32d656d6 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2526,7 +2526,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
* a stub; implementing this properly requires iclk autoidle usecounting in
* the clock code. No return value.
*/
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
struct list_head *p;
@@ -2561,7 +2561,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
* reset. Returns 0 upon success or a negative error code upon
* failure.
*/
-static int __init _setup_reset(struct omap_hwmod *oh)
+static int _setup_reset(struct omap_hwmod *oh)
{
int r;
@@ -2622,7 +2622,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
*
* No return value.
*/
-static void __init _setup_postsetup(struct omap_hwmod *oh)
+static void _setup_postsetup(struct omap_hwmod *oh)
{
u8 postsetup_state;
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index 30768003f8543b..8c505284bc0c30 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -344,7 +344,7 @@ static void omap44xx_prm_reconfigure_io_chain(void)
* to occur, WAKEUPENABLE bits must be set in the pad mux registers, and
* omap44xx_prm_reconfigure_io_chain() must be called. No return value.
*/
-static void __init omap44xx_prm_enable_io_wakeup(void)
+static void omap44xx_prm_enable_io_wakeup(void)
{
s32 inst = omap4_prmst_get_prm_dev_inst();
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index a7dae60810e871..307fc18ededec2 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -547,7 +547,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
.exit = cm_x300_u2d_exit,
};
-static void cm_x300_init_u2d(void)
+static void __init cm_x300_init_u2d(void)
{
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
}
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 9c10248fadccc2..4e8c2116808ecf 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR);
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 5d665588c7eba5..05aa7071efd6c6 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -183,7 +183,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
.lcd_conn = LCD_COLOR_TFT_16BPP,
};
-static void littleton_init_lcd(void)
+static void __init littleton_init_lcd(void)
{
pxa_set_fb_info(NULL, &littleton_lcd_info);
}
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index d757cfb5f8a68a..4da2458d7f32d7 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -558,7 +558,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
};
-static void zeus_register_ohci(void)
+static void __init zeus_register_ohci(void)
{
/* Port 2 is shared between host and client interface. */
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
diff --git a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
index ce2db235dbafbc..5e8a306163de94 100644
--- a/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
+++ b/arch/arm/mach-s3c24xx/mach-osiris-dvs.c
@@ -70,16 +70,16 @@ static int osiris_dvs_notify(struct notifier_block *nb,
switch (val) {
case CPUFREQ_PRECHANGE:
- if (old_dvs & !new_dvs ||
- cur_dvs & !new_dvs) {
+ if ((old_dvs && !new_dvs) ||
+ (cur_dvs && !new_dvs)) {
pr_debug("%s: exiting dvs\n", __func__);
cur_dvs = false;
gpio_set_value(OSIRIS_GPIO_DVS, 1);
}
break;
case CPUFREQ_POSTCHANGE:
- if (!old_dvs & new_dvs ||
- !cur_dvs & new_dvs) {
+ if ((!old_dvs && new_dvs) ||
+ (!cur_dvs && new_dvs)) {
pr_debug("entering dvs\n");
cur_dvs = true;
gpio_set_value(OSIRIS_GPIO_DVS, 0);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a134d8a13d001e..11d699af30ed22 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -359,14 +359,16 @@ v7_dma_inv_range:
ALT_UP(W(nop))
#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
+ addne r0, r0, r2
tst r1, r3
bic r1, r1, r3
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line
-1:
- mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line
- add r0, r0, r2
cmp r0, r1
+1:
+ mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line
+ addlo r0, r0, r2
+ cmplo r0, r1
blo 1b
dsb st
ret lr
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c29ad610311b45..a9f6705aea238b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -716,19 +716,28 @@ int __mark_rodata_ro(void *unused)
return 0;
}
+static int kernel_set_to_readonly __read_mostly;
+
void mark_rodata_ro(void)
{
+ kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
}
void set_kernel_text_rw(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
}
void set_kernel_text_ro(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
}
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0c1230e17d3759..513ec5681d806a 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -460,7 +460,7 @@ void pci_ioremap_set_mem_type(int mem_type)
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
+ BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
PCI_IO_VIRT_BASE + offset + SZ_64K,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e47cffd25c6cfd..aead23f15213ef 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -572,7 +572,7 @@ static void __init build_mem_type_table(void)
* in the Short-descriptor translation table format descriptors.
*/
if (cpu_arch == CPU_ARCH_ARMv7 &&
- (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
+ (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) >= 4) {
user_pmd_table |= PMD_PXNTABLE;
}
#endif
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index daa1a65f2eb799..6748827c2ec8b6 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -238,8 +238,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
if (ssp == NULL)
return -ENODEV;
- iounmap(ssp->mmio_base);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -249,7 +247,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
- kfree(ssp);
return 0;
}
diff --git a/arch/arm/vdso/vdso.S b/arch/arm/vdso/vdso.S
index b2b97e3e7babbb..a62a7b64f49c52 100644
--- a/arch/arm/vdso/vdso.S
+++ b/arch/arm/vdso/vdso.S
@@ -23,9 +23,8 @@
#include <linux/const.h>
#include <asm/page.h>
- __PAGE_ALIGNED_DATA
-
.globl vdso_start, vdso_end
+ .section .data..ro_after_init
.balign PAGE_SIZE
vdso_start:
.incbin "arch/arm/vdso/vdso.so"
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6464c9bc8fef4b..e1fab8f7032cb5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -6,6 +6,7 @@ config ARM64
select ARCH_HAS_ALT_SYSCALL
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV
select ARCH_HAS_SG_CHAIN
@@ -50,6 +51,7 @@ config ARM64
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_BITREVERSE
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
select HAVE_ARCH_KGDB
@@ -92,11 +94,11 @@ config ARM64
select PERF_USE_VMALLOC
select POWER_RESET
select POWER_SUPPLY
- select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_CONTEXT_TRACKING
select HAVE_ARM_SMCCC
+ select THREAD_INFO_IN_TASK
help
ARM 64-bit (AArch64) Linux support.
@@ -904,6 +906,10 @@ config SYSVIPC_COMPAT
def_bool y
depends on COMPAT && SYSVIPC
+config KEYS_COMPAT
+ def_bool y
+ depends on COMPAT && KEYS
+
endmenu
menu "Power management options"
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index e676ffeb86a8a1..977b9723bb6d09 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=-p --no-undefined -X
+LDFLAGS_vmlinux :=--no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 445aa678f9141e..6a37101344aa4e 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -249,7 +249,7 @@
sysmgr: sysmgr@ffd12000 {
compatible = "altr,sys-mgr", "syscon";
- reg = <0xffd12000 0x1000>;
+ reg = <0xffd12000 0x228>;
};
/* Local timer */
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 6b8abbe6874622..3011c88bd2f34c 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -105,7 +105,7 @@
led@6 {
label = "apq8016-sbc:blue:bt";
gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
- linux,default-trigger = "bt";
+ linux,default-trigger = "bluetooth-power";
default-state = "off";
};
};
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
index 55fd984251fe3e..215b4ee2b1229b 100644
--- a/arch/arm64/boot/dts/rockchip/Makefile
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -16,6 +16,7 @@ dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-r1.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-r2.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-r3-sku7.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-r4-sku6.dtb
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-gru-scarlet-r5-sku0.dtb
dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3399-sapphire-excavator.dtb
always := $(dtb-y)
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r3-sku7.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r3-sku7.dts
index 3cf9be6b5511cd..aa080044ebfba5 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r3-sku7.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r3-sku7.dts
@@ -62,14 +62,4 @@
"google,scarlet-rev4-sku7", "google,scarlet-rev4",
"google,scarlet-rev3-sku7", "google,scarlet-rev3",
"google,scarlet", "google,gru", "rockchip,rk3399";
-
- firmware {
- coreboot {
- /*
- * HACK: put this here until firmware with this
- * property rolls out.
- */
- sku-id = <7>;
- };
- };
};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r4-sku6.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r4-sku6.dts
index f89a6cf9ee9f13..82dd7c6842613f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r4-sku6.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r4-sku6.dts
@@ -48,7 +48,19 @@
/ {
model = "Google Scarlet";
- compatible = "google,scarlet-rev15-sku6", "google,scarlet-rev15",
+ compatible = "google,scarlet-rev15-sku2", "google,scarlet-rev14-sku2",
+ "google,scarlet-rev13-sku2", "google,scarlet-rev12-sku2",
+ "google,scarlet-rev11-sku2", "google,scarlet-rev10-sku2",
+ "google,scarlet-rev9-sku2", "google,scarlet-rev8-sku2",
+ "google,scarlet-rev7-sku2", "google,scarlet-rev6-sku2",
+ "google,scarlet-rev5-sku2", "google,scarlet-rev4-sku2",
+ "google,scarlet-rev15-sku4", "google,scarlet-rev14-sku4",
+ "google,scarlet-rev13-sku4", "google,scarlet-rev12-sku4",
+ "google,scarlet-rev11-sku4", "google,scarlet-rev10-sku4",
+ "google,scarlet-rev9-sku4", "google,scarlet-rev8-sku4",
+ "google,scarlet-rev7-sku4", "google,scarlet-rev6-sku4",
+ "google,scarlet-rev5-sku4", "google,scarlet-rev4-sku4",
+ "google,scarlet-rev15-sku6", "google,scarlet-rev15",
"google,scarlet-rev14-sku6", "google,scarlet-rev14",
"google,scarlet-rev13-sku6", "google,scarlet-rev13",
"google,scarlet-rev12-sku6", "google,scarlet-rev12",
@@ -61,16 +73,6 @@
"google,scarlet-rev5-sku6", "google,scarlet-rev5",
"google,scarlet-rev4-sku6", "google,scarlet-rev4",
"google,scarlet", "google,gru", "rockchip,rk3399";
-
- firmware {
- coreboot {
- /*
- * HACK: put this here until firmware with this
- * property rolls out.
- */
- sku-id = <6>;
- };
- };
};
&mipi_panel {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r5-sku0.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r5-sku0.dts
new file mode 100644
index 00000000000000..16bbd34f68e857
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet-r5-sku0.dts
@@ -0,0 +1,74 @@
+/*
+ * Google Gru-Scarlet Rev5+ (SKU-0) board device tree source
+ *
+ * Copyright 2019 Google, Inc
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "rk3399-gru-scarlet.dtsi"
+
+/ {
+ model = "Google Scarlet";
+ compatible = "google,scarlet-rev15-sku0", "google,scarlet-rev15",
+ "google,scarlet-rev14-sku0", "google,scarlet-rev14",
+ "google,scarlet-rev13-sku0", "google,scarlet-rev13",
+ "google,scarlet-rev12-sku0", "google,scarlet-rev12",
+ "google,scarlet-rev11-sku0", "google,scarlet-rev11",
+ "google,scarlet-rev10-sku0", "google,scarlet-rev10",
+ "google,scarlet-rev9-sku0", "google,scarlet-rev9",
+ "google,scarlet-rev8-sku0", "google,scarlet-rev8",
+ "google,scarlet-rev7-sku0", "google,scarlet-rev7",
+ "google,scarlet-rev6-sku0", "google,scarlet-rev6",
+ "google,scarlet-rev5-sku0", "google,scarlet-rev5",
+ "google,scarlet", "google,gru", "rockchip,rk3399";
+};
+
+&mipi_panel {
+ compatible = "innolux,p097pfg";
+ /delete-property/ power-supply;
+ avdd-supply = <&ppvarp_lcd>;
+ avee-supply = <&ppvarn_lcd>;
+};
+
+&qca_wifi {
+ qcom,ath10k-calibration-variant = "GO_DUMO";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
index 5d4239d526a2be..2fcabbbb1f342f 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
@@ -313,6 +313,9 @@ pp1800_pcie: &pp1800_s0 {
/delete-node/ &mvl_wifi;
+/* Wake-on-BT handled in USB node. */
+/delete-node/ &wake_on_bt;
+
ap_i2c_dig: &i2c2 {
status = "okay";
@@ -427,6 +430,23 @@ camera: &i2c7 {
&cros_ec {
interrupt-parent = <&gpio1>;
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
+
+ throttler-opps = <&cl0_opp03 /* little cores: 1.008 GHz */
+ &cl1_opp05 /* big cores: 1.416 GHz */
+ &gpu_opp03>; /* GPU: 500 MHz */
+};
+
+/* add labels to OPPs used for throttling */
+&cluster0_opp {
+ cl0_opp03: opp03 {};
+};
+
+&cluster1_opp {
+ cl1_opp05: opp05 {};
+};
+
+&gpu_opp_table {
+ gpu_opp03: opp03 {};
};
&cru {
@@ -480,7 +500,7 @@ camera: &i2c7 {
&gpio_keys {
pinctrl-names = "default";
- pinctrl-0 = <&bt_host_wake_l>, <&pen_eject_odl>;
+ pinctrl-0 = <&pen_eject_odl>;
pen-insert {
label = "Pen Insert";
@@ -611,6 +631,14 @@ camera: &i2c7 {
vpcie1v8-supply = <&pp1800_pcie>;
};
+&pci_rootport {
+ qca_wifi: wifi@0,0 {
+ compatible = "qcom,ath10k";
+ reg = <0x83010000 0x0 0x00000000 0x0 0x00100000
+ 0x83010000 0x0 0x00100000 0x0 0x00100000>;
+ };
+};
+
&sdmmc {
cd-gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
};
@@ -644,6 +672,23 @@ camera: &i2c7 {
temperature = <71000>;
};
+&tcphy0 {
+ rockchip,phy-config = <0x2a 0x00>,
+ <0x1f 0x15>,
+ <0x14 0x22>,
+ <0x02 0x2b>,
+
+ <0x21 0x00>,
+ <0x12 0x15>,
+ <0x02 0x22>,
+ <0 0>,
+
+ <0x15 0x04>,
+ <0x00 0x20>,
+ <0 0>,
+ <0 0>;
+};
+
&tcphy1 {
status = "disabled";
};
@@ -660,6 +705,21 @@ camera: &i2c7 {
status = "disabled";
};
+&usb_host0_ohci {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qca_bt: bluetooth@1 {
+ compatible = "usbcf3,e300", "usb4ca,301a";
+ reg = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_host_wake_l>;
+ interrupt-parent = <&gpio1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "wakeup";
+ };
+};
+
&usb_host1_ehci {
status = "disabled";
};
@@ -680,10 +740,6 @@ camera: &i2c7 {
status = "disabled";
};
-&wake_on_bt {
- gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
-};
-
/* PINCTRL OVERRIDES */
&ap_fw_wp {
rockchip,pins = <0 13 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -694,7 +750,7 @@ camera: &i2c7 {
};
&bt_host_wake_l {
- rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ rockchip,pins = <1 2 RK_FUNC_GPIO &pcfg_pull_none>;
};
&ec_ap_int_l {
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
index dfc3b1ed9492f6..ac76d58f5f06f6 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399-gru.dtsi
@@ -489,6 +489,13 @@
};
/* END REGULATORS */
+ ap_rtc_clk: ap-rtc-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ #clock-cells = <0>;
+ };
+
firmware: firmware {
chromeos {
pinctrl-names = "default";
diff --git a/arch/arm64/crypto/aes-ce-ccm-core.S b/arch/arm64/crypto/aes-ce-ccm-core.S
index 3363560c79b7e6..7bc459d9235cdb 100644
--- a/arch/arm64/crypto/aes-ce-ccm-core.S
+++ b/arch/arm64/crypto/aes-ce-ccm-core.S
@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
beq 10f
ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
b 7b
-8: mov w7, w8
+8: cbz w8, 91f
+ mov w7, w8
add w8, w8, #16
9: ext v1.16b, v1.16b, v1.16b, #1
adds w7, w7, #1
bne 9b
- eor v0.16b, v0.16b, v1.16b
+91: eor v0.16b, v0.16b, v1.16b
st1 {v0.16b}, [x0]
10: str w8, [x3]
ret
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 70fd9ffb58cfc0..c6a1cd4761f31a 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -5,7 +5,6 @@ generic-y += bugs.h
generic-y += checksum.h
generic-y += clkdev.h
generic-y += cputime.h
-generic-y += current.h
generic-y += delay.h
generic-y += div64.h
generic-y += dma.h
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 06c578c88610ca..553d0a1d7e8a42 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -231,6 +231,28 @@ lr .req x30 // link register
str \src, [\tmp, :lo12:\sym]
.endm
+ /*
+ * @dst: Result of per_cpu(sym, smp_processor_id())
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ add \dst, \dst, \tmp
+ .endm
+
+ /*
+ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ ldr \dst, [\dst, \tmp]
+ .endm
+
/*
* Annotate a function as position independent, i.e., safe to be called before
* the kernel virtual mapping is activated.
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index 197e06afbf7194..e3438c62359814 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -114,7 +114,7 @@ static inline void atomic_and(int i, atomic_t *v)
/* LSE atomics */
" mvn %w[i], %w[i]\n"
" stclr %w[i], %[v]")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : [i] "+&r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
: "x30");
}
@@ -131,7 +131,7 @@ static inline void atomic_sub(int i, atomic_t *v)
/* LSE atomics */
" neg %w[i], %w[i]\n"
" stadd %w[i], %[v]")
- : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : [i] "+&r" (w0), [v] "+Q" (v->counter)
: "r" (x1)
: "x30");
}
@@ -151,7 +151,7 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
" neg %w[i], %w[i]\n" \
" ldadd" #mb " %w[i], w30, %[v]\n" \
" add %w[i], %w[i], w30") \
- : [i] "+r" (w0), [v] "+Q" (v->counter) \
+ : [i] "+&r" (w0), [v] "+Q" (v->counter) \
: "r" (x1) \
: "x30" , ##cl); \
\
@@ -255,7 +255,7 @@ static inline void atomic64_and(long i, atomic64_t *v)
/* LSE atomics */
" mvn %[i], %[i]\n"
" stclr %[i], %[v]")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : [i] "+&r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
: "x30");
}
@@ -272,7 +272,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
/* LSE atomics */
" neg %[i], %[i]\n"
" stadd %[i], %[v]")
- : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : [i] "+&r" (x0), [v] "+Q" (v->counter)
: "r" (x1)
: "x30");
}
@@ -292,7 +292,7 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
" neg %[i], %[i]\n" \
" ldadd" #mb " %[i], x30, %[v]\n" \
" add %[i], %[i], x30") \
- : [i] "+r" (x0), [v] "+Q" (v->counter) \
+ : [i] "+&r" (x0), [v] "+Q" (v->counter) \
: "r" (x1) \
: "x30" , ##cl); \
\
@@ -412,7 +412,7 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
" eor %[old1], %[old1], %[oldval1]\n" \
" eor %[old2], %[old2], %[oldval2]\n" \
" orr %[old1], %[old1], %[old2]") \
- : [old1] "+r" (x0), [old2] "+r" (x1), \
+ : [old1] "+&r" (x0), [old2] "+&r" (x1), \
[v] "+Q" (*(unsigned long *)ptr) \
: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
[oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 54efedaf331fda..ca3b7841e1c6fa 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -155,8 +155,4 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
-#endif
-
#endif
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index b5e9cee4b5f81a..13a6103130cd70 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -36,6 +36,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64isar1;
u64 reg_id_aa64mmfr0;
u64 reg_id_aa64mmfr1;
+ u64 reg_id_aa64mmfr2;
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 77dd88f0d1fcb9..deed45cba16c73 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -33,8 +33,9 @@
#define ARM64_WORKAROUND_CAVIUM_27456 8
#define ARM64_HARDEN_BRANCH_PREDICTOR 9
#define ARM64_HAS_NO_HW_PREFETCH 10
+#define ARM64_HAS_32BIT_EL0 11
-#define ARM64_NCAPS 11
+#define ARM64_NCAPS 12
#ifndef __ASSEMBLY__
@@ -75,10 +76,17 @@ struct arm64_ftr_reg {
struct arm64_ftr_bits *ftr_bits;
};
+/* scope of capability check */
+enum {
+ SCOPE_SYSTEM,
+ SCOPE_LOCAL_CPU,
+};
+
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
- bool (*matches)(const struct arm64_cpu_capabilities *);
+ int def_scope; /* default scope */
+ bool (*matches)(const struct arm64_cpu_capabilities *, int scope);
int (*enable)(void *); /* Called on all active CPUs */
union {
struct { /* To be used for erratum handling only */
@@ -88,9 +96,10 @@ struct arm64_cpu_capabilities {
struct { /* Feature register checking */
u32 sys_reg;
- int field_pos;
- int min_field_value;
- int hwcap_type;
+ u8 field_pos;
+ u8 min_field_value;
+ u8 hwcap_type;
+ bool sign;
unsigned long hwcap;
};
};
@@ -98,6 +107,8 @@ struct arm64_cpu_capabilities {
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+bool this_cpu_has_cap(unsigned int cap);
+
static inline bool cpu_have_feature(unsigned int num)
{
return elf_hwcap & (1UL << num);
@@ -161,19 +172,23 @@ static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
cpuid_feature_extract_field(mmfr0, ID_AA64MMFR0_BIGENDEL0_SHIFT) == 0x1;
}
+static inline bool id_aa64pfr0_32bit_el0(u64 pfr0)
+{
+ u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
+
+ return val == ID_AA64PFR0_EL0_32BIT_64BIT;
+}
+
void __init setup_cpu_features(void);
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
+void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
void check_local_cpu_errata(void);
+void __init enable_errata_workarounds(void);
-#ifdef CONFIG_HOTPLUG_CPU
+void verify_local_cpu_errata(void);
void verify_local_cpu_capabilities(void);
-#else
-static inline void verify_local_cpu_capabilities(void)
-{
-}
-#endif
u64 read_system_reg(u32 id);
@@ -182,6 +197,11 @@ static inline bool cpu_supports_mixed_endian_el0(void)
return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
}
+static inline bool system_supports_32bit_el0(void)
+{
+ return cpus_have_cap(ARM64_HAS_32BIT_EL0);
+}
+
static inline bool system_supports_mixed_endian_el0(void)
{
return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 7ba337e9ad63c7..b793dafe8f7fd3 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -32,12 +32,6 @@
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
-#define read_cpuid(reg) ({ \
- u64 __val; \
- asm("mrs %0, " #reg : "=r" (__val)); \
- __val; \
-})
-
#define MIDR_REVISION_MASK 0xf
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
#define MIDR_PARTNUM_SHIFT 4
@@ -108,6 +102,14 @@
#ifndef __ASSEMBLY__
+#include <asm/sysreg.h>
+
+#define read_cpuid(reg) ({ \
+ u64 __val; \
+ asm("mrs_s %0, " __stringify(SYS_ ## reg) : "=r" (__val)); \
+ __val; \
+})
+
/*
* The CPU ID never changes at run time, so we might as well tell the
* compiler that it's constant. Use this function to read the CPU ID
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 00000000000000..b03490b04b84d3
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct task_struct *)sp_el0;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e843c3af8..7e2b3e36008631 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm goto("1: nop\n\t"
+ asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@ l_yes:
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm goto("1: b %l[l_yes]\n\t"
+ asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index ef8e13d379cba2..d7e7cf56e8d6b9 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,8 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_API (UL(1) << 41)
+#define HCR_APK (UL(1) << 40)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
#define HCR_RW_SHIFT 31
@@ -81,6 +83,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
+#define HCR_HOST_NVHE_FLAGS (HCR_RW | HCR_API | HCR_APK)
/* Hyp System Control Register (SCTLR_EL2) bits */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 25a40213bd9b87..0729a2f9448269 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 438db448996536..cb8b72dc57bf80 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -141,7 +141,11 @@ static inline void *phys_to_virt(phys_addr_t x)
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
+ _virt_addr_valid(kaddr))
#endif
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 9b2f5a9d019df4..fbafd0ad16df76 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -19,6 +19,8 @@
#ifndef __ASM_PAGE_H
#define __ASM_PAGE_H
+#include <linux/const.h>
+
/* PAGE_SHIFT determines the page size */
/* CONT_SHIFT determines the number of pages which can be tracked together */
#ifdef CONFIG_ARM64_64K_PAGES
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 8a336852eeba05..b8beb45b8a20dd 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/stack_pointer.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -84,6 +86,7 @@ static inline unsigned long __percpu_##op(void *ptr, \
: [val] "Ir" (val)); \
break; \
default: \
+ ret = 0; \
BUILD_BUG(); \
} \
\
@@ -113,6 +116,7 @@ static inline unsigned long __percpu_read(void *ptr, int size)
ret = ACCESS_ONCE(*(u64 *)ptr);
break;
default:
+ ret = 0;
BUILD_BUG();
}
@@ -182,6 +186,7 @@ static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
: [val] "r" (val));
break;
default:
+ ret = 0;
BUILD_BUG();
}
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 7bd3cdb533ea80..91b6be092ce21a 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+#include <asm/stack_pointer.h>
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/shmparam.h b/arch/arm64/include/asm/shmparam.h
index 4df608a8459e27..e368a55ebd22d0 100644
--- a/arch/arm64/include/asm/shmparam.h
+++ b/arch/arm64/include/asm/shmparam.h
@@ -21,7 +21,7 @@
* alignment value. Since we don't have aliasing D-caches, the rest of
* the time we can safely use PAGE_SIZE.
*/
-#define COMPAT_SHMLBA 0x4000
+#define COMPAT_SHMLBA (4 * PAGE_SIZE)
#include <asm-generic/shmparam.h>
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index d9c3d6a6100ac5..ea4de789a6a130 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -16,11 +16,22 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#include <asm/percpu.h>
+
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;
@@ -57,6 +68,7 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
void *stack;
+ struct task_struct *task;
};
extern struct secondary_data secondary_data;
extern void secondary_entry(void);
@@ -68,5 +80,14 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
+extern void cpu_die_early(void);
+
+static inline void cpu_park_loop(void)
+{
+ for (;;) {
+ wfe();
+ wfi();
+ }
+}
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 00000000000000..ffcdf742cddf93
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 2eb714c4639f56..d0aa42907569be 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -63,6 +63,11 @@ extern int memcmp(const void *, const void *, size_t);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#endif
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 59a5b0f1e81c32..4d19a03d316ed0 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 11
+#define NR_CTX_REGS 13
/*
* struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 8373fa1c3036d4..fcb7b07e12cb58 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,8 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <linux/stringify.h>
+
#include <asm/opcodes.h>
/*
@@ -70,6 +72,7 @@
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
@@ -110,6 +113,7 @@
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
+#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
/* id_aa64mmfr0 */
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
@@ -136,6 +140,13 @@
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
#define ID_AA64MMFR1_HADBS_SHIFT 0
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_LVA_SHIFT 16
+#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_CNP_SHIFT 0
+
/* id_aa64dfr0 */
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
#define ID_AA64DFR0_WRPS_SHIFT 20
@@ -209,6 +220,8 @@
#else
+#include <linux/types.h>
+
asm(
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
" .equ __reg_num_x\\num, \\num\n"
@@ -233,6 +246,23 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
val |= set;
asm volatile("msr sctlr_el1, %0" : : "r" (val));
}
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+#define write_sysreg(v, r) do { \
+ u64 __val = (u64)v; \
+ asm volatile("msr " __stringify(r) ", %0" \
+ : : "r" (__val)); \
+} while (0)
+
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 7d097e741f423f..66a90003f0ca4d 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,6 +36,7 @@
struct task_struct;
+#include <asm/stack_pointer.h>
#include <asm/types.h>
#include <asm/unistd.h>
@@ -43,14 +44,11 @@ typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
*/
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
#ifdef CONFIG_ALT_SYSCALL
unsigned int nr_syscalls;
const void *sys_call_table;
@@ -80,32 +78,13 @@ extern void * const compat_sys_call_table[];
#define INIT_THREAD_INFO(tsk) \
{ \
- .task = &tsk, \
- .flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
INIT_THREAD_INFO_SYSCALL \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
-/*
- * how to get the thread information struct from C
- */
-static inline struct thread_info *current_thread_info(void) __attribute_const__;
-
-static inline struct thread_info *current_thread_info(void)
-{
- return (struct thread_info *)
- (current_stack_pointer & ~(THREAD_SIZE - 1));
-}
-
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 73f02e39ab1ff8..b277cf320c8638 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -300,24 +300,39 @@ do { \
#define put_user __put_user
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
+extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ check_object_size(to, n, false);
+ return __arch_copy_from_user(to, from, n);
+}
+
+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ check_object_size(from, n, true);
+ return __arch_copy_to_user(to, from, n);
+}
+
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
- if (access_ok(VERIFY_READ, from, n))
- n = __copy_from_user(to, from, n);
- else /* security hole - plug it */
+ if (access_ok(VERIFY_READ, from, n)) {
+ check_object_size(to, n, false);
+ n = __arch_copy_from_user(to, from, n);
+ } else /* security hole - plug it */
memset(to, 0, n);
return n;
}
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
- n = __copy_to_user(to, from, n);
+ if (access_ok(VERIFY_WRITE, to, n)) {
+ check_object_size(from, n, true);
+ n = __arch_copy_to_user(to, from, n);
+ }
return n;
}
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 361c8a8ef55f37..a739287ef6a3de 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -28,5 +28,7 @@
#define HWCAP_SHA2 (1 << 6)
#define HWCAP_CRC32 (1 << 7)
#define HWCAP_ATOMICS (1 << 8)
+#define HWCAP_FPHP (1 << 9)
+#define HWCAP_ASIMDHP (1 << 10)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 678f30b05a4558..2dc44406a7adb6 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -34,8 +34,8 @@ EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
/* user mem (segment) */
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__copy_in_user);
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 478a00b9732b4c..3e456689dba292 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -62,7 +62,7 @@ struct insn_emulation {
};
static LIST_HEAD(insn_emulation);
-static int nr_insn_emulated;
+static int nr_insn_emulated __initdata;
static DEFINE_RAW_SPINLOCK(insn_emulation_lock);
static void register_emulation_hooks(struct insn_emulation_ops *ops)
@@ -173,7 +173,7 @@ static int update_insn_emulation_mode(struct insn_emulation *insn,
return ret;
}
-static void register_insn_emulation(struct insn_emulation_ops *ops)
+static void __init register_insn_emulation(struct insn_emulation_ops *ops)
{
unsigned long flags;
struct insn_emulation *insn;
@@ -237,7 +237,7 @@ static struct ctl_table ctl_abi[] = {
{ }
};
-static void register_insn_emulation_sysctl(struct ctl_table *table)
+static void __init register_insn_emulation_sysctl(struct ctl_table *table)
{
unsigned long flags;
int i = 0;
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 724e96db670bf3..6fcddeb0745afd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -34,11 +34,10 @@ int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
#ifdef CONFIG_ALT_SYSCALL
DEFINE(TI_NR_SYSCALLS, offsetof(struct thread_info, nr_syscalls));
DEFINE(TI_SYS_CALL_TABLE, offsetof(struct thread_info, sys_call_table));
@@ -113,6 +112,9 @@ int main(void)
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
+ DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
+ BLANK();
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 734e817c0ca486..340c96bdcf0cf3 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -22,8 +22,9 @@
#include <asm/cpufeature.h>
static bool __maybe_unused
-is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
+is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
{
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
entry->midr_range_min,
entry->midr_range_max);
@@ -165,12 +166,14 @@ static int enable_smccc_arch_workaround_1(void *data)
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define MIDR_RANGE(model, min, max) \
+ .def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = min, \
.midr_range_max = max
#define MIDR_ALL_VERSIONS(model) \
+ .def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \
.midr_model = model, \
.midr_range_min = 0, \
@@ -264,7 +267,35 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
}
};
+/*
+ * The CPU Errata work arounds are detected and applied at boot time
+ * and the related information is freed soon after. If the new CPU requires
+ * an errata not detected at boot, fail this CPU.
+ */
+void verify_local_cpu_errata(void)
+{
+ const struct arm64_cpu_capabilities *caps = arm64_errata;
+
+ for (; caps->matches; caps++) {
+ if (cpus_have_cap(caps->capability)) {
+ if (caps->enable)
+ caps->enable((void *)caps);
+ } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ pr_crit("CPU%d: Requires work around for %s, not detected"
+ " at boot time\n",
+ smp_processor_id(),
+ caps->desc ? : "an erratum");
+ cpu_die_early();
+ }
+ }
+}
+
void check_local_cpu_errata(void)
{
update_cpu_capabilities(arm64_errata, "enabling workaround for");
}
+
+void __init enable_errata_workarounds(void)
+{
+ enable_cpu_capabilities(arm64_errata);
+}
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 2cf2f799455a9e..7747733bffd99b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -56,14 +56,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
.safe_val = SAFE_VAL, \
}
-/* Define a feature with signed values */
+/* Define a feature with unsigned values */
#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
- __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
-
-/* Define a feature with unsigned value */
-#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
__ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+/* Define a feature with a signed value */
+#define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
+ __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
+
#define ARM64_FTR_END \
{ \
.width = 0, \
@@ -86,8 +86,8 @@ static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
- ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
/* Linux doesn't care about the EL3 */
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
@@ -99,8 +99,8 @@ static struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
/* Linux shouldn't care about secure memory */
@@ -111,7 +111,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
* Differing PARange is fine as long as all peripherals and memory are mapped
* within the minimum PARange of all CPUs
*/
- U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
ARM64_FTR_END,
};
@@ -126,29 +126,38 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
ARM64_FTR_END,
};
+static struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
+ ARM64_FTR_END,
+};
+
static struct arm64_ftr_bits ftr_ctr[] = {
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
/*
* Linux can handle differing I-cache policies. Userspace JITs will
* make use of *minLine
*/
- U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
+ ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
ARM64_FTR_END,
};
static struct arm64_ftr_bits ftr_id_mmfr0[] = {
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), /* InnerShr */
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
- ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
ARM64_FTR_END,
@@ -156,12 +165,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
- U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
ARM64_FTR_END,
};
@@ -207,6 +216,18 @@ static struct arm64_ftr_bits ftr_id_pfr0[] = {
ARM64_FTR_END,
};
+static struct arm64_ftr_bits ftr_id_dfr0[] = {
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
+ S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
+ ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
+ ARM64_FTR_END,
+};
+
/*
* Common ftr bits for a 32bit register with all hidden, strict
* attributes, with 4bit feature fields and a default safe value of
@@ -252,7 +273,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
/* Op1 = 0, CRn = 0, CRm = 1 */
ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
- ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits),
+ ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
@@ -287,6 +308,7 @@ static struct arm64_ftr_reg arm64_ftr_regs[] = {
/* Op1 = 0, CRn = 0, CRm = 7 */
ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
+ ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
/* Op1 = 3, CRn = 0, CRm = 0 */
ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr),
@@ -411,24 +433,29 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
+ init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
- init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
- init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
- init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
- init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
- init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
- init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
- init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
- init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
- init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
- init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
- init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
- init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
- init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
- init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
- init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
- init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+ init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+ init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+ init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+ init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+ init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+ init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+ init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+ init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+ init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+ init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+ init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+ init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+ init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+ init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+ init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+ }
+
}
static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
@@ -520,6 +547,8 @@ void update_cpu_features(int cpu,
info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+ info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
/*
* EL3 is not our concern.
@@ -531,47 +560,51 @@ void update_cpu_features(int cpu,
info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
/*
- * If we have AArch32, we care about 32-bit features for compat. These
- * registers should be RES0 otherwise.
+ * If we have AArch32, we care about 32-bit features for compat.
+ * If the system doesn't support AArch32, don't update them.
*/
- taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
+ if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
+ id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+
+ taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
info->reg_id_dfr0, boot->reg_id_dfr0);
- taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
info->reg_id_isar0, boot->reg_id_isar0);
- taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
info->reg_id_isar1, boot->reg_id_isar1);
- taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
info->reg_id_isar2, boot->reg_id_isar2);
- taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
info->reg_id_isar3, boot->reg_id_isar3);
- taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
info->reg_id_isar4, boot->reg_id_isar4);
- taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
info->reg_id_isar5, boot->reg_id_isar5);
- /*
- * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
- * ACTLR formats could differ across CPUs and therefore would have to
- * be trapped for virtualization anyway.
- */
- taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
+ /*
+ * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
+ * ACTLR formats could differ across CPUs and therefore would have to
+ * be trapped for virtualization anyway.
+ */
+ taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
info->reg_id_mmfr0, boot->reg_id_mmfr0);
- taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
info->reg_id_mmfr1, boot->reg_id_mmfr1);
- taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
info->reg_id_mmfr2, boot->reg_id_mmfr2);
- taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
info->reg_id_mmfr3, boot->reg_id_mmfr3);
- taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
info->reg_id_pfr0, boot->reg_id_pfr0);
- taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
info->reg_id_pfr1, boot->reg_id_pfr1);
- taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
info->reg_mvfr0, boot->reg_mvfr0);
- taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
info->reg_mvfr1, boot->reg_mvfr1);
- taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
+ taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
info->reg_mvfr2, boot->reg_mvfr2);
+ }
/*
* Mismatched CPU features are a recipe for disaster. Don't even
@@ -590,6 +623,49 @@ u64 read_system_reg(u32 id)
return regp->sys_val;
}
+/*
+ * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
+ * Read the system register on the current CPU
+ */
+static u64 __raw_read_system_reg(u32 sys_id)
+{
+ switch (sys_id) {
+ case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1);
+ case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1);
+ case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1);
+ case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1);
+ case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1);
+ case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1);
+ case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1);
+ case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1);
+ case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1);
+ case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
+ case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
+ case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
+ case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1);
+ case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
+ case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
+ case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
+
+ case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1);
+ case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1);
+ case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
+ case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
+ case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
+ case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1);
+ case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1);
+
+ case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0);
+ case SYS_CTR_EL0: return read_cpuid(CTR_EL0);
+ case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0);
+ default:
+ BUG();
+ return 0;
+ }
+}
+
#include <linux/irqchip/arm-gic-v3.h>
static bool
@@ -601,19 +677,24 @@ feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
}
static bool
-has_cpuid_feature(const struct arm64_cpu_capabilities *entry)
+has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
{
u64 val;
- val = read_system_reg(entry->sys_reg);
+ WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
+ if (scope == SCOPE_SYSTEM)
+ val = read_system_reg(entry->sys_reg);
+ else
+ val = __raw_read_system_reg(entry->sys_reg);
+
return feature_matches(val, entry);
}
-static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
+static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
{
bool has_sre;
- if (!has_cpuid_feature(entry))
+ if (!has_cpuid_feature(entry, scope))
return false;
has_sre = gic_enable_sre();
@@ -624,7 +705,7 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
return has_sre;
}
-static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
{
u32 midr = read_cpuid_id();
u32 rv_min, rv_max;
@@ -640,18 +721,22 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
+ .def_scope = SCOPE_SYSTEM,
.matches = has_useable_gicv3_cpuif,
.sys_reg = SYS_ID_AA64PFR0_EL1,
.field_pos = ID_AA64PFR0_GIC_SHIFT,
+ .sign = FTR_UNSIGNED,
.min_field_value = 1,
},
#ifdef CONFIG_ARM64_PAN
{
.desc = "Privileged Access Never",
.capability = ARM64_HAS_PAN,
+ .def_scope = SCOPE_SYSTEM,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64MMFR1_EL1,
.field_pos = ID_AA64MMFR1_PAN_SHIFT,
+ .sign = FTR_UNSIGNED,
.min_field_value = 1,
.enable = cpu_enable_pan,
},
@@ -660,51 +745,72 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "LSE atomic instructions",
.capability = ARM64_HAS_LSE_ATOMICS,
+ .def_scope = SCOPE_SYSTEM,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR0_EL1,
.field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
+ .sign = FTR_UNSIGNED,
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
{
.desc = "Software prefetching using PRFM",
.capability = ARM64_HAS_NO_HW_PREFETCH,
+ .def_scope = SCOPE_SYSTEM,
.matches = has_no_hw_prefetch,
},
+ {
+ .desc = "32-bit EL0 Support",
+ .capability = ARM64_HAS_32BIT_EL0,
+ .def_scope = SCOPE_SYSTEM,
+ .matches = has_cpuid_feature,
+ .sys_reg = SYS_ID_AA64PFR0_EL1,
+ .sign = FTR_UNSIGNED,
+ .field_pos = ID_AA64PFR0_EL0_SHIFT,
+ .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
+ },
{},
};
-#define HWCAP_CAP(reg, field, min_value, type, cap) \
+#define HWCAP_CAP(reg, field, s, min_value, type, cap) \
{ \
.desc = #cap, \
+ .def_scope = SCOPE_SYSTEM, \
.matches = has_cpuid_feature, \
.sys_reg = reg, \
.field_pos = field, \
+ .sign = s, \
.min_field_value = min_value, \
.hwcap_type = type, \
.hwcap = cap, \
}
-static const struct arm64_cpu_capabilities arm64_hwcaps[] = {
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 2, CAP_HWCAP, HWCAP_PMULL),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, 1, CAP_HWCAP, HWCAP_AES),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, 1, CAP_HWCAP, HWCAP_SHA1),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, 1, CAP_HWCAP, HWCAP_SHA2),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, 1, CAP_HWCAP, HWCAP_CRC32),
- HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, 2, CAP_HWCAP, HWCAP_ATOMICS),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, 0, CAP_HWCAP, HWCAP_FP),
- HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, 0, CAP_HWCAP, HWCAP_ASIMD),
+static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
+ HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
+ HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
+ {},
+};
+
+static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
#ifdef CONFIG_COMPAT
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
- HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
+ HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
#endif
{},
};
-static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
+static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
switch (cap->hwcap_type) {
case CAP_HWCAP:
@@ -725,7 +831,7 @@ static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
}
/* Check if we have a particular HWCAP enabled */
-static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
+static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
{
bool rc;
@@ -749,28 +855,23 @@ static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *
return rc;
}
-static void setup_cpu_hwcaps(void)
+static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
{
- int i;
- const struct arm64_cpu_capabilities *hwcaps = arm64_hwcaps;
-
- for (i = 0; hwcaps[i].desc; i++)
- if (hwcaps[i].matches(&hwcaps[i]))
- cap_set_hwcap(&hwcaps[i]);
+ for (; hwcaps->matches; hwcaps++)
+ if (hwcaps->matches(hwcaps, hwcaps->def_scope))
+ cap_set_elf_hwcap(hwcaps);
}
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info)
{
- int i;
-
- for (i = 0; caps[i].desc; i++) {
- if (!caps[i].matches(&caps[i]))
+ for (; caps->matches; caps++) {
+ if (!caps->matches(caps, caps->def_scope))
continue;
- if (!cpus_have_cap(caps[i].capability))
- pr_info("%s %s\n", info, caps[i].desc);
- cpus_set_cap(caps[i].capability);
+ if (!cpus_have_cap(caps->capability) && caps->desc)
+ pr_info("%s %s\n", info, caps->desc);
+ cpus_set_cap(caps->capability);
}
}
@@ -778,23 +879,19 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
* Run through the enabled capabilities and enable() it on all active
* CPUs
*/
-static void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
+void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{
- int i;
-
- for (i = 0; caps[i].desc; i++)
- if (caps[i].enable && cpus_have_cap(caps[i].capability))
+ for (; caps->matches; caps++)
+ if (caps->enable && cpus_have_cap(caps->capability))
/*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
- stop_machine(caps[i].enable, NULL, cpu_online_mask);
+ stop_machine(caps->enable, NULL, cpu_online_mask);
}
-#ifdef CONFIG_HOTPLUG_CPU
-
/*
* Flag to indicate if we have computed the system wide
* capabilities based on the boot time active CPUs. This
@@ -810,67 +907,36 @@ static inline void set_sys_caps_initialised(void)
sys_caps_initialised = true;
}
-/*
- * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
- */
-static u64 __raw_read_system_reg(u32 sys_id)
+static void
+verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
{
- switch (sys_id) {
- case SYS_ID_PFR0_EL1: return (u64)read_cpuid(ID_PFR0_EL1);
- case SYS_ID_PFR1_EL1: return (u64)read_cpuid(ID_PFR1_EL1);
- case SYS_ID_DFR0_EL1: return (u64)read_cpuid(ID_DFR0_EL1);
- case SYS_ID_MMFR0_EL1: return (u64)read_cpuid(ID_MMFR0_EL1);
- case SYS_ID_MMFR1_EL1: return (u64)read_cpuid(ID_MMFR1_EL1);
- case SYS_ID_MMFR2_EL1: return (u64)read_cpuid(ID_MMFR2_EL1);
- case SYS_ID_MMFR3_EL1: return (u64)read_cpuid(ID_MMFR3_EL1);
- case SYS_ID_ISAR0_EL1: return (u64)read_cpuid(ID_ISAR0_EL1);
- case SYS_ID_ISAR1_EL1: return (u64)read_cpuid(ID_ISAR1_EL1);
- case SYS_ID_ISAR2_EL1: return (u64)read_cpuid(ID_ISAR2_EL1);
- case SYS_ID_ISAR3_EL1: return (u64)read_cpuid(ID_ISAR3_EL1);
- case SYS_ID_ISAR4_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
- case SYS_ID_ISAR5_EL1: return (u64)read_cpuid(ID_ISAR4_EL1);
- case SYS_MVFR0_EL1: return (u64)read_cpuid(MVFR0_EL1);
- case SYS_MVFR1_EL1: return (u64)read_cpuid(MVFR1_EL1);
- case SYS_MVFR2_EL1: return (u64)read_cpuid(MVFR2_EL1);
-
- case SYS_ID_AA64PFR0_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64PFR1_EL1: return (u64)read_cpuid(ID_AA64PFR0_EL1);
- case SYS_ID_AA64DFR0_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64DFR1_EL1: return (u64)read_cpuid(ID_AA64DFR0_EL1);
- case SYS_ID_AA64MMFR0_EL1: return (u64)read_cpuid(ID_AA64MMFR0_EL1);
- case SYS_ID_AA64MMFR1_EL1: return (u64)read_cpuid(ID_AA64MMFR1_EL1);
- case SYS_ID_AA64ISAR0_EL1: return (u64)read_cpuid(ID_AA64ISAR0_EL1);
- case SYS_ID_AA64ISAR1_EL1: return (u64)read_cpuid(ID_AA64ISAR1_EL1);
-
- case SYS_CNTFRQ_EL0: return (u64)read_cpuid(CNTFRQ_EL0);
- case SYS_CTR_EL0: return (u64)read_cpuid(CTR_EL0);
- case SYS_DCZID_EL0: return (u64)read_cpuid(DCZID_EL0);
- default:
- BUG();
- return 0;
- }
+
+ for (; caps->matches; caps++)
+ if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ pr_crit("CPU%d: missing HWCAP: %s\n",
+ smp_processor_id(), caps->desc);
+ cpu_die_early();
+ }
}
-/*
- * Park the CPU which doesn't have the capability as advertised
- * by the system.
- */
-static void fail_incapable_cpu(char *cap_type,
- const struct arm64_cpu_capabilities *cap)
+static void
+verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
{
- int cpu = smp_processor_id();
-
- pr_crit("CPU%d: missing %s : %s\n", cpu, cap_type, cap->desc);
- /* Mark this CPU absent */
- set_cpu_present(cpu, 0);
-
- /* Check if we can park ourselves */
- if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
- cpu_ops[cpu]->cpu_die(cpu);
- asm(
- "1: wfe\n"
- " wfi\n"
- " b 1b");
+ for (; caps->matches; caps++) {
+ if (!cpus_have_cap(caps->capability))
+ continue;
+ /*
+ * If the new CPU misses an advertised feature, we cannot proceed
+ * further, park the cpu.
+ */
+ if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
+ pr_crit("CPU%d: missing feature: %s\n",
+ smp_processor_id(), caps->desc);
+ cpu_die_early();
+ }
+ if (caps->enable)
+ caps->enable(NULL);
+ }
}
/*
@@ -883,8 +949,6 @@ static void fail_incapable_cpu(char *cap_type,
*/
void verify_local_cpu_capabilities(void)
{
- int i;
- const struct arm64_cpu_capabilities *caps;
/*
* If we haven't computed the system capabilities, there is nothing
@@ -893,40 +957,34 @@ void verify_local_cpu_capabilities(void)
if (!sys_caps_initialised)
return;
- caps = arm64_features;
- for (i = 0; caps[i].desc; i++) {
- if (!cpus_have_cap(caps[i].capability) || !caps[i].sys_reg)
- continue;
- /*
- * If the new CPU misses an advertised feature, we cannot proceed
- * further, park the cpu.
- */
- if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
- fail_incapable_cpu("arm64_features", &caps[i]);
- if (caps[i].enable)
- caps[i].enable(NULL);
- }
-
- for (i = 0, caps = arm64_hwcaps; caps[i].desc; i++) {
- if (!cpus_have_hwcap(&caps[i]))
- continue;
- if (!feature_matches(__raw_read_system_reg(caps[i].sys_reg), &caps[i]))
- fail_incapable_cpu("arm64_hwcaps", &caps[i]);
- }
+ verify_local_cpu_errata();
+ verify_local_cpu_features(arm64_features);
+ verify_local_elf_hwcaps(arm64_elf_hwcaps);
+ verify_local_elf_hwcaps(compat_elf_hwcaps);
}
-#else /* !CONFIG_HOTPLUG_CPU */
-
-static inline void set_sys_caps_initialised(void)
+static void __init setup_feature_capabilities(void)
{
+ update_cpu_capabilities(arm64_features, "detected feature:");
+ enable_cpu_capabilities(arm64_features);
}
-#endif /* CONFIG_HOTPLUG_CPU */
-
-static void setup_feature_capabilities(void)
+/*
+ * Check if the current CPU has a given feature capability.
+ * Should be called from non-preemptible context.
+ */
+bool this_cpu_has_cap(unsigned int cap)
{
- update_cpu_capabilities(arm64_features, "detected feature:");
- enable_cpu_capabilities(arm64_features);
+ const struct arm64_cpu_capabilities *caps;
+
+ if (WARN_ON(preemptible()))
+ return false;
+
+ for (caps = arm64_features; caps->desc; caps++)
+ if (caps->capability == cap && caps->matches)
+ return caps->matches(caps, SCOPE_LOCAL_CPU);
+
+ return false;
}
void __init setup_cpu_features(void)
@@ -936,7 +994,9 @@ void __init setup_cpu_features(void)
/* Set the CPU feature capabilies */
setup_feature_capabilities();
- setup_cpu_hwcaps();
+ enable_errata_workarounds();
+ setup_elf_hwcaps(arm64_elf_hwcaps);
+ setup_elf_hwcaps(compat_elf_hwcaps);
/* Advertise that we have computed the system capabilities */
set_sys_caps_initialised();
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index c3c6f0ea503b24..f7fa5c48358a97 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -61,6 +61,8 @@ static const char *const hwcap_str[] = {
"sha2",
"crc32",
"atomics",
+ "fphp",
+ "asimdhp",
NULL
};
@@ -218,26 +220,30 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+ info->reg_id_aa64mmfr2 = read_cpuid(ID_AA64MMFR2_EL1);
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
- info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
- info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
- info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
- info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
- info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
- info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
- info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
- info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
- info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
- info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
- info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
- info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
- info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-
- info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
- info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
- info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+ /* Update the 32bit ID registers only if AArch32 is implemented */
+ if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+ info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+ info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+ info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+ info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+ info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+ info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+ info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+ info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+ info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+ info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+ info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+ info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+ }
cpuinfo_detect_icache_policy(info);
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 0f03a8fe23144e..d18d15810d196b 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -78,7 +78,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
- mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9924548a2ca761..699b4289ceceff 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -90,17 +90,17 @@
.if \el == 0
mrs x21, sp_el0
- get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
- ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
disable_step_tsk x19, x20 // exceptions when scheduling.
.else
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
- ldr x20, [tsk, #TI_ADDR_LIMIT]
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #USER_DS
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
.endif /* \el == 0 */
mrs x22, elr_el1
mrs x23, spsr_el1
@@ -116,6 +116,13 @@
.endif
/*
+ * Set sp_el0 to current thread_info.
+ */
+ .if \el == 0
+ msr sp_el0, tsk
+ .endif
+
+ /*
* Registers that may be useful after this macro is invoked:
*
* x21 - aborted SP
@@ -128,7 +135,7 @@
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
- str x20, [tsk, #TI_ADDR_LIMIT]
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
.endif
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
@@ -178,8 +185,7 @@ alternative_endif
.endm
.macro get_thread_info, rd
- mov \rd, sp
- and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
+ mrs \rd, sp_el0
.endm
/*
@@ -243,7 +249,7 @@ END(vectors)
* Invalid mode handlers
*/
.macro inv_entry, el, reason, regsize = 64
- kernel_entry el, \regsize
+ kernel_entry \el, \regsize
mov x0, sp
mov x1, #\reason
mrs x2, esr_el1
@@ -377,9 +383,9 @@ el1_irq:
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
1:
@@ -394,7 +400,7 @@ ENDPROC(el1_irq)
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
#endif
@@ -629,6 +635,7 @@ ENTRY(cpu_switch_to)
ldp x29, x9, [x8], #16
ldr lr, [x8]
mov sp, x9
+ msr sp_el0, x1
ret
ENDPROC(cpu_switch_to)
@@ -639,7 +646,7 @@ ENDPROC(cpu_switch_to)
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
- ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
@@ -674,7 +681,7 @@ work_resched:
*/
ret_to_user:
disable_irq // disable interrupts
- ldr x1, [tsk, #TI_FLAGS]
+ ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
@@ -713,7 +720,7 @@ el0_svc_naked: // compat entry point
enable_dbg_and_irq
ct_user_exit 1
- ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 6638903f0cb9f9..f995dae1c8fda1 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -291,7 +291,7 @@ static struct notifier_block fpsimd_cpu_pm_notifier_block = {
.notifier_call = fpsimd_cpu_pm_notifier,
};
-static void fpsimd_pm_init(void)
+static void __init fpsimd_pm_init(void)
{
cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index d019c3a58cc281..59b6c0d05e3777 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -30,6 +30,7 @@
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -423,7 +424,10 @@ __mmap_switched:
str xzr, [x6], #8 // Clear BSS
b 1b
2:
- adr_l sp, initial_sp, x4
+ adrp x4, init_thread_union
+ add sp, x4, #THREAD_SIZE
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
str_l x21, __fdt_pointer, x5 // Save FDT pointer
str_l x24, memstart_addr, x6 // Save PHYS_OFFSET
mov x29, #0
@@ -464,7 +468,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
ret
/* Hyp configuration. */
-2: mov x0, #(1 << 31) // 64-bit EL1
+2: mov_q x0, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x0
/* Generic timers. */
@@ -477,8 +481,7 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
/* GICv3 system register access */
mrs x0, id_aa64pfr0_el1
ubfx x0, x0, #24, #4
- cmp x0, #1
- b.ne 3f
+ cbz x0, 3f
mrs_s x0, ICC_SRE_EL2
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
@@ -611,8 +614,11 @@ ENTRY(secondary_startup)
ENDPROC(secondary_startup)
ENTRY(__secondary_switched)
- ldr x0, [x21] // get secondary_data.stack
- mov sp, x0
+ adr_l x0, secondary_data
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ msr sp_el0, x2
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S
index a272f335c289dc..096e957aecb071 100644
--- a/arch/arm64/kernel/hyp-stub.S
+++ b/arch/arm64/kernel/hyp-stub.S
@@ -26,6 +26,8 @@
#include <asm/virt.h>
.text
+ .pushsection .hyp.text, "ax"
+
.align 11
ENTRY(__hyp_stub_vectors)
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index bc2abb8b159957..999633bd7294aa 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -65,6 +65,16 @@
#ifdef CONFIG_EFI
/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym) ABSOLUTE(sym)
+
+/*
* The EFI stub has its own symbol namespace prefixed by __efistub_, to
* isolate it from the kernel proper. The following symbols are legally
* accessed by the stub, so provide some aliases to make them accessible.
@@ -73,25 +83,25 @@
* linked at. The routines below are all implemented in assembler in a
* position independent manner
*/
-__efistub_memcmp = __pi_memcmp;
-__efistub_memchr = __pi_memchr;
-__efistub_memcpy = __pi_memcpy;
-__efistub_memmove = __pi_memmove;
-__efistub_memset = __pi_memset;
-__efistub_strlen = __pi_strlen;
-__efistub_strcmp = __pi_strcmp;
-__efistub_strncmp = __pi_strncmp;
-__efistub___flush_dcache_area = __pi___flush_dcache_area;
+__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr = KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset = KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
#ifdef CONFIG_KASAN
-__efistub___memcpy = __pi_memcpy;
-__efistub___memmove = __pi_memmove;
-__efistub___memset = __pi_memset;
+__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove = KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset = KALLSYMS_HIDE(__pi_memset);
#endif
-__efistub__text = _text;
-__efistub__end = _end;
-__efistub__edata = _edata;
+__efistub__text = KALLSYMS_HIDE(_text);
+__efistub__end = KALLSYMS_HIDE(_end);
+__efistub__edata = KALLSYMS_HIDE(_edata);
#endif
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f7ab14c4d5df2c..1d3851278f3f8e 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -828,6 +828,7 @@ static struct platform_driver armv8_pmu_driver = {
.driver = {
.name = "armv8-pmu",
.of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv8_pmu_device_probe,
};
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 60a1fc33afc226..20c8c62c7e2e47 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,7 @@
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
+#include <linux/percpu.h>
#include <asm/compat.h>
#include <asm/cacheflush.h>
@@ -384,6 +385,20 @@ static void tls_thread_switch(struct task_struct *next)
}
/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+
+/*
* Thread switching.
*/
struct task_struct *__switch_to(struct task_struct *prev,
@@ -395,6 +410,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+ entry_task_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -411,24 +427,32 @@ struct task_struct *__switch_to(struct task_struct *prev,
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
- unsigned long stack_page;
+ unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
- stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(&frame))
- return 0;
- if (!in_sched_functions(frame.pc))
- return frame.pc;
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
} while (count ++ < 16);
- return 0;
+
+out:
+ put_task_stack(p);
+ return ret;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 6c4fd2810ecb35..47179fedaf97be 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 2e1dc416defa07..7fe0b995a6cfff 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -200,7 +200,7 @@ static void __init request_standard_resources(void)
struct resource *res;
kernel_code.start = virt_to_phys(_text);
- kernel_code.end = virt_to_phys(_etext - 1);
+ kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f3c3d8fee5bab2..7c2de6fc2c9f00 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -57,6 +57,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -94,6 +97,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* We need to tell the secondary core where to find its stack and the
* page tables.
*/
+ secondary_data.task = idle;
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -117,6 +121,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
+ secondary_data.task = NULL;
secondary_data.stack = NULL;
return ret;
@@ -131,10 +136,13 @@ static void smp_store_cpu_info(unsigned int cpuid)
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
@@ -143,8 +151,6 @@ asmlinkage void secondary_start_kernel(void)
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -312,6 +318,28 @@ void cpu_die(void)
}
#endif
+/*
+ * Kill the calling secondary CPU, early in bringup before it is turned
+ * online.
+ */
+void cpu_die_early(void)
+{
+ int cpu = smp_processor_id();
+
+ pr_crit("CPU%d: will not boot\n", cpu);
+
+ /* Mark this CPU absent */
+ set_cpu_present(cpu, 0);
+
+#ifdef CONFIG_HOTPLUG_CPU
+ /* Check if we can park ourselves */
+ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die)
+ cpu_ops[cpu]->cpu_die(cpu);
+#endif
+
+ cpu_park_loop();
+}
+
static void __init hyp_mode_check(void)
{
if (is_hyp_mode_available())
@@ -597,6 +625,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (max_cpus == 0)
break;
+ per_cpu(cpu_number, cpu) = cpu;
+
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4f2369e360948a..27087a8e9e0ee5 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
@@ -66,7 +67,6 @@ void notrace walk_stackframe(struct stackframe *frame,
break;
}
}
-EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -98,6 +98,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
struct stack_trace_data data;
struct stackframe frame;
+ if (!try_get_task_stack(tsk))
+ return;
+
data.trace = trace;
data.skip = trace->skip;
@@ -116,6 +119,8 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+ put_task_stack(tsk);
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 00c1372bf57ba1..cbe0922edf638f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -110,7 +110,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
* Restore per-cpu offset before any kernel
* subsystem relying on it has a chance to run.
*/
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ set_my_cpu_offset(per_cpu_offset(task_cpu(current)));
/*
* PSTATE was not saved over suspend/resume, re-enable any
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5d270ca76aecaa..970eb226d0e0e6 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -38,6 +38,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
@@ -152,6 +153,9 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
if (!tsk)
tsk = current;
+ if (!try_get_task_stack(tsk))
+ return;
+
if (regs) {
frame.fp = regs->regs[29];
frame.sp = regs->sp;
@@ -184,6 +188,8 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
dump_mem("", "Exception stack", stack,
stack + sizeof(struct pt_regs), false);
}
+
+ put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -199,10 +205,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#endif
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct thread_info *thread,
- struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -217,7 +222,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
print_modules();
__show_regs(regs);
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
@@ -237,28 +243,31 @@ static DEFINE_RAW_SPINLOCK(die_lock);
*/
void die(const char *str, struct pt_regs *regs, int err)
{
- struct thread_info *thread = current_thread_info();
int ret;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&die_lock, flags);
oops_enter();
- raw_spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
- ret = __die(str, err, thread, regs);
+ ret = __die(str, err, regs);
- if (regs && kexec_should_crash(thread->task))
+ if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
+
+ raw_spin_unlock_irqrestore(&die_lock, flags);
+
if (ret != NOTIFY_STOP)
do_exit(SIGSEGV);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 97bc68f4c689f2..908bc5ab94c11d 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -36,7 +36,7 @@
#include <asm/vdso.h>
#include <asm/vdso_datapage.h>
-extern char vdso_start, vdso_end;
+extern char vdso_start[], vdso_end[];
static unsigned long vdso_pages;
static struct page **vdso_pagelist;
@@ -115,14 +115,14 @@ static int __init vdso_init(void)
{
int i;
- if (memcmp(&vdso_start, "\177ELF", 4)) {
+ if (memcmp(vdso_start, "\177ELF", 4)) {
pr_err("vDSO is not a valid ELF object!\n");
return -EINVAL;
}
- vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
- vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
+ vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
/* Allocate the vDSO pagelist, plus a page for the data. */
vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -135,7 +135,7 @@ static int __init vdso_init(void)
/* Grab the vDSO code pages. */
for (i = 0; i < vdso_pages; i++)
- vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+ vdso_pagelist[i + 1] = virt_to_page(vdso_start + i * PAGE_SIZE);
/* Populate the special mapping structures */
vdso_spec[0] = (struct vm_special_mapping) {
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index e3a2af68b3af16..cfd6387983d919 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -115,11 +115,12 @@ SECTIONS
}
ALIGN_DEBUG_RO
+ _etext = .; /* End of text section */
+
RO_DATA(PAGE_SIZE)
EXCEPTION_TABLE(8)
NOTES
ALIGN_DEBUG_RO
- _etext = .; /* End of text and rodata section */
ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
__init_begin = .;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 3039f080e2d582..79705fde8cc8fb 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -48,6 +48,45 @@ static u64 core_reg_offset_from_id(u64 id)
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (KVM_REG_SIZE(reg->id) == size &&
+ IS_ALIGNED(off, size / sizeof(__u32)))
+ return 0;
+
+ return -EINVAL;
+}
+
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
@@ -67,6 +106,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -89,6 +131,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
@@ -98,17 +143,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR:
+ if (!system_supports_32bit_el0())
+ return -EINVAL;
+ break;
case COMPAT_PSR_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ:
case COMPAT_PSR_MODE_SVC:
case COMPAT_PSR_MODE_ABT:
case COMPAT_PSR_MODE_UND:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
break;
default:
err = -EINVAL;
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 525da4446f1c3f..0c3ac7b5792c9b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -495,7 +495,7 @@
.endm
.macro deactivate_traps
- mov x2, #HCR_RW
+ mov_q x2, HCR_HOST_NVHE_FLAGS
msr hcr_el2, x2
msr hstr_el2, xzr
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4699cd74f87e4a..281e75db899aa0 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -66,7 +66,7 @@
.endm
end .req x5
-ENTRY(__copy_from_user)
+ENTRY(__arch_copy_from_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0 // Nothing to copy
ret
-ENDPROC(__copy_from_user)
+ENDPROC(__arch_copy_from_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7512bbbc07ac39..db4d187de61f2d 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -65,7 +65,7 @@
.endm
end .req x5
-ENTRY(__copy_to_user)
+ENTRY(__arch_copy_to_user)
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
add end, x0, x2
@@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
CONFIG_ARM64_PAN)
mov x0, #0
ret
-ENDPROC(__copy_to_user)
+ENDPROC(__arch_copy_to_user)
.section .fixup,"ax"
.align 2
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index ee04cc7fc3983b..02d435dba821db 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -41,6 +41,28 @@ static cpumask_t tlb_flush_pending;
#define ASID_FIRST_VERSION (1UL << asid_bits)
#define NUM_USER_ASIDS ASID_FIRST_VERSION
+/* Get the ASIDBits supported by the current CPU */
+static u32 get_cpu_asid_bits(void)
+{
+ u32 asid;
+ int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1),
+ ID_AA64MMFR0_ASID_SHIFT);
+
+ switch (fld) {
+ default:
+ pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
+ smp_processor_id(), fld);
+ /* Fallthrough */
+ case 0:
+ asid = 8;
+ break;
+ case 2:
+ asid = 16;
+ }
+
+ return asid;
+}
+
static void flush_context(unsigned int cpu)
{
int i;
@@ -198,19 +220,7 @@ asmlinkage void post_ttbr_update_workaround(void)
static int asids_init(void)
{
- int fld = cpuid_feature_extract_field(read_cpuid(ID_AA64MMFR0_EL1), 4);
-
- switch (fld) {
- default:
- pr_warn("Unknown ASID size (%d); assuming 8-bit\n", fld);
- /* Fallthrough */
- case 0:
- asid_bits = 8;
- break;
- case 2:
- asid_bits = 16;
- }
-
+ asid_bits = get_cpu_asid_bits();
/* If we end up with more CPUs than ASIDs, expect things to crash */
WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 7bc27ba50b5ec4..f4c0cd1bc1c3b8 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -43,7 +43,7 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
static struct gen_pool *atomic_pool;
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
-static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
static int __init early_coherent_pool(char *p)
{
@@ -867,7 +867,7 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
return 0;
}
-static int register_iommu_dma_ops_notifier(struct bus_type *bus)
+static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
{
struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
int ret;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index e7e279928d2544..1cda02c4810c02 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -107,26 +107,27 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
/* only preserve the access flags and write permission */
pte_val(entry) &= PTE_AF | PTE_WRITE | PTE_DIRTY;
- /*
- * PTE_RDONLY is cleared by default in the asm below, so set it in
- * back if necessary (read-only or clean PTE).
- */
+ /* set PTE_RDONLY if actual read-only or clean PTE */
if (!pte_write(entry) || !pte_sw_dirty(entry))
pte_val(entry) |= PTE_RDONLY;
/*
* Setting the flags must be done atomically to avoid racing with the
- * hardware update of the access/dirty state.
+ * hardware update of the access/dirty state. The PTE_RDONLY bit must
+ * be set to the most permissive (lowest value) of *ptep and entry
+ * (calculated as: a & b == ~(~a | ~b)).
*/
+ pte_val(entry) ^= PTE_RDONLY;
asm volatile("// ptep_set_access_flags\n"
" prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
- " and %0, %0, %3 // clear PTE_RDONLY\n"
+ " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n"
" orr %0, %0, %4 // set flags\n"
+ " eor %0, %0, %3 // negate final PTE_RDONLY\n"
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
- : "L" (~PTE_RDONLY), "r" (pte_val(entry)));
+ : "L" (PTE_RDONLY), "r" (pte_val(entry)));
flush_tlb_fix_spurious_fault(vma, address);
return 1;
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index c26b804015e80c..a90615baa529d8 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -70,10 +70,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
struct page *page = pte_page(pte);
- /* no flushing needed for anonymous pages */
- if (!page_mapping(page))
- return;
-
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
__flush_dcache_area(page_address(page),
PAGE_SIZE << compound_order(page));
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad0f895748de20..04892f86717ccb 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -71,7 +71,7 @@ early_param("initrd", early_initrd);
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -122,17 +122,21 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
int pfn_valid(unsigned long pfn)
{
- return (pfn & PFN_MASK) == pfn && memblock_is_memory(pfn << PAGE_SHIFT);
+ phys_addr_t addr = pfn << PAGE_SHIFT;
+
+ if ((addr >> PAGE_SHIFT) != pfn)
+ return 0;
+ return memblock_is_memory(addr);
}
EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
}
#else
-static void arm64_memory_present(void)
+static void __init arm64_memory_present(void)
{
struct memblock_region *reg;
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 51ac84e0812df8..fdc47290269ed9 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -337,7 +337,6 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
end - kernel_x_end,
PAGE_KERNEL);
}
-
}
#else
static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
@@ -425,7 +424,7 @@ static void __init fixup_executable(void)
void mark_rodata_ro(void)
{
create_mapping_late(__pa(_stext), (unsigned long)_stext,
- (unsigned long)_etext - (unsigned long)_stext,
+ (unsigned long)__init_begin - (unsigned long)_stext,
PAGE_KERNEL_ROX);
}
@@ -699,12 +698,12 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
}
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return pud_none(*pud);
}
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return pmd_none(*pmd);
}
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 1103da256c42f0..1daf9577b4c6c9 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -74,12 +74,15 @@ ENTRY(cpu_do_suspend)
mrs x10, mdscr_el1
mrs x11, oslsr_el1
mrs x12, sctlr_el1
+ mrs x13, tpidr_el1
+ mrs x14, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
stp x6, x7, [x0, #32]
stp x8, x9, [x0, #48]
stp x10, x11, [x0, #64]
- str x12, [x0, #80]
+ stp x12, x13, [x0, #80]
+ str x14, [x0, #96]
ret
ENDPROC(cpu_do_suspend)
@@ -102,7 +105,8 @@ ENTRY(cpu_do_resume)
ldp x6, x7, [x0, #32]
ldp x8, x9, [x0, #48]
ldp x10, x11, [x0, #64]
- ldr x12, [x0, #80]
+ ldp x12, x13, [x0, #80]
+ ldr x14, [x0, #96]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -114,6 +118,8 @@ ENTRY(cpu_do_resume)
msr tcr_el1, x8
msr vbar_el1, x9
msr mdscr_el1, x10
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 877da1908234b7..98e2a5dbcfda56 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -2724,7 +2724,6 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
(unsigned long int)(oper.indata + prev_ix),
noinpages,
0, /* read access only for in data */
- 0, /* no force */
inpages,
NULL);
@@ -2740,8 +2739,7 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
current->mm,
(unsigned long int)oper.cipher_outdata,
nooutpages,
- 1, /* write access for out data */
- 0, /* no force */
+ FOLL_WRITE, /* write access for out data */
outpages,
NULL);
up_read(&current->mm->mmap_sem);
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b3ec1bb0..2691a1857d203d 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@ static inline long ffz(int x)
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline long fls(int x)
+static inline int fls(int x)
{
int r;
@@ -232,7 +232,7 @@ static inline long fls(int x)
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static inline long ffs(int x)
+static inline int ffs(int x)
{
int r;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index 9e3ddf792bd3e0..2704e0b8de4320 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -68,7 +68,7 @@ static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
- pfn_to_virt(max_low_pfn),
+ (unsigned long)pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index fb0515eb639b55..491dec06ed92c1 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -44,7 +44,7 @@ config IA64
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
- select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b67c3f51a..29bd59790d6c08 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,15 +48,15 @@ struct thread_info {
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node) \
- ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
-#define free_thread_info(ti) /* nothing */
+#define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
@@ -121,32 +121,4 @@ struct thread_info {
/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
-#define TS_RESTORE_SIGMASK 2 /* restore signal mask in do_signal() */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif /* !__ASSEMBLY__ */
-
#endif /* _ASM_IA64_THREAD_INFO_H */
diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
index 0c161ed6d18e6d..8205b456de7a66 100644
--- a/arch/ia64/kernel/err_inject.c
+++ b/arch/ia64/kernel/err_inject.c
@@ -143,7 +143,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
int ret;
ret = get_user_pages(current, current->mm, virt_addr,
- 1, VM_READ, 0, NULL, NULL);
+ 1, FOLL_WRITE, NULL, NULL);
if (ret<=0) {
#ifdef ERR_INJ_DEBUG
printk("Virtual address %lx is not existing.\n",virt_addr);
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index f9efe9739d3fe5..0eaa89f3defd5d 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
* handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
+#define init_stack init_task_mem.stack
union {
struct {
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
index 0b29dcfef69f33..0c736ed58abdce 100644
--- a/arch/m68k/Makefile
+++ b/arch/m68k/Makefile
@@ -59,7 +59,10 @@ cpuflags-$(CONFIG_M5206e) := $(call cc-option,-mcpu=5206e,-m5200)
cpuflags-$(CONFIG_M5206) := $(call cc-option,-mcpu=5206,-m5200)
KBUILD_AFLAGS += $(cpuflags-y)
-KBUILD_CFLAGS += $(cpuflags-y) -pipe
+KBUILD_CFLAGS += $(cpuflags-y)
+
+KBUILD_CFLAGS += -pipe -ffreestanding
+
ifdef CONFIG_MMU
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -fno-strength-reduce -ffixed-a2
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index f9924fbcfe42b8..456e3f75ef3b7a 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -43,6 +43,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
@@ -73,8 +74,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return page;
}
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 6e4955bc542bfc..fcd52cefee2961 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -88,7 +88,8 @@ static inline void free_io_area(void *addr)
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
- __iounmap(tmp->addr, tmp->size);
+ /* remove gap added in get_io_area() */
+ __iounmap(tmp->addr, tmp->size - IO_SIZE);
kfree(tmp);
return;
}
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 91d2068da1b9bc..0f3fe6a151dce9 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -21,17 +21,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE
quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
-quiet_cmd_strip = STRIP $@
+quiet_cmd_strip = STRIP $< $@$2
cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
- -K _fdt_start vmlinux -o $@
+ -K _fdt_start $< -o $@$2
UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
+UIMAGE_IN = $@
+UIMAGE_OUT = $@.ub
$(obj)/simpleImage.%: vmlinux FORCE
$(call if_changed,cp,.unstrip)
$(call if_changed,objcopy)
$(call if_changed,uimage)
- $(call if_changed,strip)
- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
+ $(call if_changed,strip,.strip)
+ @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')'
clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 383f387b4eeeec..e7e8954e9815d0 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -148,33 +148,6 @@ static inline struct thread_info *current_thread_info(void)
*/
/* FPU was used by this task this quantum (SMP) */
#define TS_USEDFPU 0x0001
-#define TS_RESTORE_SIGMASK 0x0002
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif
#endif /* __KERNEL__ */
#endif /* _ASM_MICROBLAZE_THREAD_INFO_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5a15ce3704b65f..ef145809f89205 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -759,6 +759,7 @@ config SIBYTE_SWARM
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_LITTLESUR
bool "Sibyte BCM91250C2-LittleSur"
@@ -781,6 +782,7 @@ config SIBYTE_SENTOSA
select SYS_HAS_CPU_SB1
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SIBYTE_BIGSUR
bool "Sibyte BCM91480B-BigSur"
@@ -794,6 +796,7 @@ config SIBYTE_BIGSUR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_LITTLE_ENDIAN
select ZONE_DMA32 if 64BIT
+ select SWIOTLB if ARCH_DMA_ADDR_T_64BIT && PCI
config SNI_RM
bool "SNI RM200/300/400"
@@ -2971,6 +2974,7 @@ config MIPS32_O32
config MIPS32_N32
bool "Kernel support for n32 binaries"
depends on 64BIT
+ select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select COMPAT
select MIPS32_COMPAT
select SYSVIPC_COMPAT if SYSVIPC
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 8ae4067a5eda24..40ecb6e700cd85 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
- void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 8755d618e116ec..961c393c0f5584 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -44,6 +44,7 @@ static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
static void ath79_restart(char *command)
{
+ local_irq_disable();
ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
for (;;)
if (cpu_wait)
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c19a90f9..542c3ede97222f 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
int main(int argc, char *argv[])
{
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
/*
- * Align with 16 bytes: "greater than that used for any standard data
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
*/
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
printf("0x%llx\n", vmlinuz_load_addr);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 376701f41cc28e..692bbc1c5b79cd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port,
void (*cvmx_override_ipd_port_setup) (int ipd_port);
/* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
/* Port last configured link info index by IPD/PKO port */
static cvmx_helper_link_info_t
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index d113c8ded6e2f3..6df3a4ea77fc55 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -349,6 +349,7 @@ static int __init octeon_ehci_device_init(void)
return 0;
pd = of_find_device_by_node(ehci_node);
+ of_node_put(ehci_node);
if (!pd)
return 0;
@@ -411,6 +412,7 @@ static int __init octeon_ohci_device_init(void)
return 0;
pd = of_find_device_by_node(ohci_node);
+ of_node_put(ohci_node);
if (!pd)
return 0;
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 134879c1310a0d..4ed369c0ec6a16 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -74,6 +74,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 554d1da97743d9..21f4a9fe82fafc 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -147,23 +147,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1, cpu_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, cpu_mask_nr_tbl
lui t1, %hi(cpu_mask_nr_tbl)
addiu t1, %lo(cpu_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, cpu_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(cpu_mask_nr_tbl)
- lui AT, %hi(cpu_mask_nr_tbl)
- daddiu t1, t1, %higher(cpu_mask_nr_tbl)
- daddiu AT, AT, %lo(cpu_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
1: lw t2,(t1)
nop
@@ -214,23 +203,12 @@
* Find irq with highest priority
*/
# open coded PTR_LA t1,asic_mask_nr_tbl
-#if (_MIPS_SZPTR == 32)
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
# open coded la t1, asic_mask_nr_tbl
lui t1, %hi(asic_mask_nr_tbl)
addiu t1, %lo(asic_mask_nr_tbl)
-
-#endif
-#if (_MIPS_SZPTR == 64)
- # open coded dla t1, asic_mask_nr_tbl
- .set push
- .set noat
- lui t1, %highest(asic_mask_nr_tbl)
- lui AT, %hi(asic_mask_nr_tbl)
- daddiu t1, t1, %higher(asic_mask_nr_tbl)
- daddiu AT, AT, %lo(asic_mask_nr_tbl)
- dsll t1, 32
- daddu t1, t1, AT
- .set pop
+#else
+#error GCC `-msym32' option required for 64-bit DECstation builds
#endif
2: lw t2,(t1)
nop
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 2f021cdfba4f8f..742223716fc87f 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -66,6 +66,8 @@ extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
@@ -92,4 +94,15 @@ static inline void fpu_emulator_init_fpu(void)
set_fpr64(&t->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
+/*
+ * Mask the FCSR Cause bits according to the Enable bits, observing
+ * that Unimplemented is always enabled.
+ */
+static inline unsigned long mask_fcr31_x(unsigned long fcr31)
+{
+ return fcr31 & (FPU_CSR_UNI_X |
+ ((fcr31 & FPU_CSR_ALL_E) <<
+ (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E))));
+}
+
#endif /* _ASM_FPU_EMULATOR_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index d10fd80dbb7e96..ab1df19b09571a 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
/*
* ISA I/O bus memory addresses are 1:1 with the physical address.
*/
-static inline unsigned long isa_virt_to_bus(volatile void * address)
+static inline unsigned long isa_virt_to_bus(volatile void *address)
{
- return (unsigned long)address - PAGE_OFFSET;
+ return virt_to_phys(address);
}
-static inline void * isa_bus_to_virt(unsigned long address)
+static inline void *isa_bus_to_virt(unsigned long address)
{
- return (void *)(address + PAGE_OFFSET);
+ return phys_to_virt(address);
}
#define isa_page_to_bus page_to_phys
@@ -411,6 +411,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
__val = *__addr; \
slow; \
\
+ /* prevent prefetching of coherent DMA data prematurely */ \
+ rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e77672539e8ed8..e4456e450f946d 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -21,15 +21,15 @@
#endif
#ifdef CONFIG_CPU_MICROMIPS
-#define NOP_INSN "nop32"
+#define B_INSN "b32"
#else
-#define NOP_INSN "nop"
+#define B_INSN "b"
#endif
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm_volatile_goto("1:\t" NOP_INSN "\n\t"
- "nop\n\t"
+ asm_volatile_goto("1:\t" B_INSN " 2f\n\t"
+ "2:\tnop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
WORD_INSN " 1b, %l[l_yes], %0\n\t"
".popsection\n\t"
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 4eee221b0cf0bc..d2be8e4f7a35d1 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -133,6 +133,7 @@ static inline u32 ath79_pll_rr(unsigned reg)
static inline void ath79_reset_wr(unsigned reg, u32 val)
{
__raw_writel(val, ath79_reset_base + reg);
+ (void) __raw_readl(ath79_reset_base + reg); /* flush */
}
static inline u32 ath79_reset_rr(unsigned reg)
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index d18c45c7c39417..19ff9ce46c0201 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -9,7 +9,7 @@
#define MIPS_CPU_IRQ_BASE 56
#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
-#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
+#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 98c31e5d95793c..a7bc901819c822 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -89,7 +89,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
+ *end = rsrc->start + size - 1;
}
/*
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index cf661a2fb14113..16fade4f49dd31 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -189,6 +189,11 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 041153f5cf9343..41a8201d572ee8 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -131,7 +131,7 @@ struct mips_fpu_struct {
#define NUM_DSP_REGS 6
-typedef __u32 dspreg_t;
+typedef unsigned long dspreg_t;
struct mips_dsp_state {
dspreg_t dspr[NUM_DSP_REGS];
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index ebb5c0f2f90dae..c0ae27971e3108 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -76,6 +76,22 @@ do { if (cpu_has_rw_llb) { \
} while (0)
/*
+ * Check FCSR for any unmasked exceptions pending set with `ptrace',
+ * clear them and send a signal.
+ */
+#define __sanitize_fcr31(next) \
+do { \
+ unsigned long fcr31 = mask_fcr31_x(next->thread.fpu.fcr31); \
+ void __user *pc; \
+ \
+ if (unlikely(fcr31)) { \
+ pc = (void __user *)task_pt_regs(next)->cp0_epc; \
+ next->thread.fpu.fcr31 &= ~fcr31; \
+ force_fcr31_sig(fcr31, pc, next); \
+ } \
+} while (0)
+
+/*
* For newly created kernel threads switch_to() will return to
* ret_from_kernel_thread, newly created user threads to ret_from_fork.
* That is, everything following resume() will be skipped for new threads.
@@ -85,6 +101,8 @@ do { if (cpu_has_rw_llb) { \
do { \
__mips_mt_fpaff_switch_to(prev); \
lose_fpu_inatomic(1, prev); \
+ if (tsk_used_math(next)) \
+ __sanitize_fcr31(next); \
if (cpu_has_dsp) { \
__save_dsp(prev); \
__restore_dsp(next); \
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 47bc45a67e9ba1..032a497356f3d3 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -51,7 +51,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
- if (test_thread_flag(TIF_32BIT_REGS))
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
return get_user(*arg, (int *)usp + n);
else
#endif
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 9b44d5a816fa3e..9db764b51ffe67 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -290,8 +290,8 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
+ mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
- mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
@@ -846,7 +846,7 @@ struct mm16_r3_format { /* Load from global pointer format */
struct mm16_r5_format { /* Load/store from stack pointer format */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rt : 5,
- __BITFIELD_FIELD(signed int simmediate : 5,
+ __BITFIELD_FIELD(unsigned int imm : 5,
__BITFIELD_FIELD(unsigned int : 16, /* Ignored */
;))))
};
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 97c03f4689243b..efbc1501c1a7fb 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -90,6 +90,9 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index db6f5afff4ff1a..ea897912bc7125 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -71,14 +71,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
diff --git a/arch/mips/jz4740/Platform b/arch/mips/jz4740/Platform
index 28448d358c10d4..a2a5a85ea1f936 100644
--- a/arch/mips/jz4740/Platform
+++ b/arch/mips/jz4740/Platform
@@ -1,4 +1,4 @@
platform-$(CONFIG_MACH_INGENIC) += jz4740/
cflags-$(CONFIG_MACH_INGENIC) += -I$(srctree)/arch/mips/include/asm/mach-jz4740
load-$(CONFIG_MACH_INGENIC) += 0xffffffff80010000
-zload-$(CONFIG_MACH_INGENIC) += 0xffffffff80600000
+zload-$(CONFIG_MACH_INGENIC) += 0xffffffff81000000
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 610f0f3bdb3455..93c46c9cebb7ec 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -34,6 +34,9 @@ static void crash_shutdown_secondary(void *passed_regs)
if (!cpu_online(cpu))
return;
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index dc1180a8bfa163..66736397af9f7b 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@ asmlinkage void spurious_interrupt(void)
void __init init_IRQ(void)
{
int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@@ -59,8 +60,7 @@ void __init init_IRQ(void)
arch_init_irq();
for_each_possible_cpu(i) {
- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 50980bf3983ef3..92bc066e47a361 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -95,6 +95,9 @@ machine_kexec(struct kimage *image)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
+ /* Mark offline BEFORE disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
/*
* we do not want to be bothered.
*/
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 2f7c734771f4e0..0df911e772ae0f 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -116,10 +116,20 @@ ftrace_stub:
NESTED(_mcount, PT_SIZE, ra)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
- bne t1, t2, static_trace
+ beq t1, t2, fgraph_trace
nop
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_LA t1, ftrace_stub
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
@@ -128,24 +138,11 @@ NESTED(_mcount, PT_SIZE, ra)
bne t1, t3, ftrace_graph_caller
nop
#endif
- b ftrace_stub
-#ifdef CONFIG_32BIT
- addiu sp, sp, 8
-#else
- nop
-#endif
-static_trace:
- MCOUNT_SAVE_REGS
-
- move a0, ra /* arg1: self return address */
- jalr t2 /* (1) call *ftrace_trace_function */
- move a1, AT /* arg2: parent's return address */
-
- MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
+
.globl ftrace_stub
ftrace_stub:
RETURN_BACK
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e4a..76f18c56141cc9 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -424,5 +424,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
- write_gcr_error_cause(0);
+ write_gcr_error_cause(cm_error);
}
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index cbe0f025856d47..7b887027dca272 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -900,7 +900,7 @@ static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
* mipsr2_decoder: Decode and emulate a MIPS R2 instruction
* @regs: Process register set
* @inst: Instruction to decode and emulate
- * @fcr31: Floating Point Control and Status Register returned
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
*/
int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
{
@@ -1183,13 +1183,13 @@ fpu_emul:
err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- *fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
/*
* this is a tricky issue - lose_fpu() uses LL/SC atomics
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fcbc4e57d7656b..e6102775892d16 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -115,7 +115,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
struct thread_info *ti = task_thread_info(p);
struct pt_regs *childregs, *regs = current_pt_regs();
unsigned long childksp;
- p->set_child_tid = p->clear_child_tid = NULL;
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
@@ -208,7 +207,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
if (ip->mm16_r5_format.rt != 31)
return 0;
- *poff = ip->mm16_r5_format.simmediate;
+ *poff = ip->mm16_r5_format.imm;
*poff = (*poff << 2) / sizeof(ulong);
return 1;
@@ -340,8 +339,9 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip, *ip_end;
+ union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
+ unsigned int last_insn_size = 0;
unsigned int i;
info->pc_offset = -1;
@@ -351,17 +351,20 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
- ip_end = (void *)ip + info->func_size;
+ for (i = 0; i < max_insns; i++) {
+ ip = (void *)ip + last_insn_size;
- for (i = 0; i < max_insns && ip < ip_end; i++, ip++) {
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.halfword[0] = 0;
insn.halfword[1] = ip->halfword[0];
+ last_insn_size = 2;
} else if (is_mmips) {
insn.halfword[0] = ip->halfword[1];
insn.halfword[1] = ip->halfword[0];
+ last_insn_size = 4;
} else {
insn.word = ip->word;
+ last_insn_size = 4;
}
if (is_jump_ins(&insn))
@@ -383,8 +386,6 @@ static int get_frame_info(struct mips_frame_info *info)
tmp = (ip->halfword[0] >> 1);
info->frame_size = -(signed short)(tmp & 0xf);
}
- ip = (void *) &ip->halfword[1];
- ip--;
} else
#endif
info->frame_size = - ip->i_format.simmediate;
@@ -629,21 +630,48 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
+static DEFINE_PER_CPU(struct call_single_data, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
static void arch_dump_stack(void *info)
{
struct pt_regs *regs;
+ static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ arch_spin_lock(&lock);
regs = get_irq_regs();
if (regs)
show_regs(regs);
+ else
+ dump_stack();
+ arch_spin_unlock(&lock);
- dump_stack();
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
}
void arch_trigger_all_cpu_backtrace(bool include_self)
{
- smp_call_function(arch_dump_stack, NULL, 1);
+ struct call_single_data *csd;
+ int cpu;
+
+ for_each_cpu(cpu, cpu_online_mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = arch_dump_stack;
+ smp_call_function_single_async(cpu, csd);
+ }
}
int mips_get_process_fp_mode(struct task_struct *task)
@@ -680,6 +708,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (value & ~known_bits)
return -EOPNOTSUPP;
+ /* Setting FRE without FR is not supported. */
+ if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
+ return -EOPNOTSUPP;
+
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index a9958b4d919454..9d04392f7ef09a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -79,16 +79,15 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Poke at FCSR according to its mask. Don't set the cause bits as
- * this is currently not handled correctly in FP context restoration
- * and will cause an oops if a corresponding enable bit is set.
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
*/
static void ptrace_setfcr31(struct task_struct *child, u32 value)
{
u32 fcr31;
u32 mask;
- value &= ~FPU_CSR_ALL_X;
fcr31 = child->thread.fpu.fcr31;
mask = boot_cpu_data.fpu_msk31;
child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
@@ -841,7 +840,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
}
#endif
- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -879,7 +878,7 @@ long arch_ptrace(struct task_struct *child, long request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index b4b7e02443e733..286ec2d24d47bd 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -107,7 +107,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
addr & 1);
break;
}
- tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -140,7 +140,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
goto out;
}
dregs = __get_dsp_regs(child);
- tmp = (unsigned long) (dregs[addr - DSP_BASE]);
+ tmp = dregs[addr - DSP_BASE];
break;
}
case DSP_CONTROL:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 31ca2edd7218f0..6abd6b41c13d27 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -344,6 +344,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
+ dump_stack();
}
void show_registers(struct pt_regs *regs)
@@ -705,6 +706,32 @@ asmlinkage void do_ov(struct pt_regs *regs)
exception_exit(prev_state);
}
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si.si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si.si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si.si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si.si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si.si_code = FPE_FLTRES;
+ else
+ si.si_code = __SI_FAULT;
+ force_sig_info(SIGFPE, &si, tsk);
+}
+
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
@@ -714,27 +741,7 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
return 0;
case SIGFPE:
- si.si_addr = fault_addr;
- si.si_signo = sig;
- /*
- * Inexact can happen together with Overflow or Underflow.
- * Respect the mask to deliver the correct exception.
- */
- fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
- (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
- if (fcr31 & FPU_CSR_INV_X)
- si.si_code = FPE_FLTINV;
- else if (fcr31 & FPU_CSR_DIV_X)
- si.si_code = FPE_FLTDIV;
- else if (fcr31 & FPU_CSR_OVF_X)
- si.si_code = FPE_FLTOVF;
- else if (fcr31 & FPU_CSR_UDF_X)
- si.si_code = FPE_FLTUND;
- else if (fcr31 & FPU_CSR_INE_X)
- si.si_code = FPE_FLTRES;
- else
- si.si_code = __SI_FAULT;
- force_sig_info(sig, &si, current);
+ force_fcr31_sig(fcr31, fault_addr, current);
return 1;
case SIGBUS:
@@ -797,13 +804,13 @@ static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1);
@@ -829,7 +836,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
goto out;
/* Clear FCSR.Cause before enabling interrupts */
- write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
local_irq_enable();
die_if_kernel("FP exception in kernel code", regs);
@@ -851,13 +858,13 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
- * We can't allow the emulated instruction to leave any of
- * the cause bits set in $fcr31.
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
@@ -1430,13 +1437,13 @@ asmlinkage void do_cpu(struct pt_regs *regs)
sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
&fault_addr);
- fcr31 = current->thread.fpu.fcr31;
/*
* We can't allow the emulated instruction to leave
- * any of the cause bits set in $fcr31.
+ * any enabled Cause bits set in $fcr31.
*/
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
/* Send a signal if required. */
if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 5649a9e429e0a5..aca06b18c43e53 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -14,12 +14,14 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/irqchip/mips-gic.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timekeeper_internal.h>
#include <asm/abi.h>
+#include <asm/page.h>
#include <asm/vdso.h>
/* Kernel-provided data used by the VDSO. */
@@ -118,12 +120,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
vvar_size = gic_size + PAGE_SIZE;
size = vvar_size + image->size;
+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
base = get_unmapped_area(NULL, 0, size, 0, 0);
if (IS_ERR_VALUE(base)) {
ret = base;
goto out;
}
+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
+ }
+
data_addr = base + gic_size;
vdso_addr = data_addr + PAGE_SIZE;
diff --git a/arch/mips/lib/multi3.c b/arch/mips/lib/multi3.c
index 111ad475aa0cdd..4c2483f410c26b 100644
--- a/arch/mips/lib/multi3.c
+++ b/arch/mips/lib/multi3.c
@@ -4,12 +4,12 @@
#include "libgcc.h"
/*
- * GCC 7 suboptimally generates __multi3 calls for mips64r6, so for that
- * specific case only we'll implement it here.
+ * GCC 7 & older can suboptimally generate __multi3 calls for mips64r6, so for
+ * that specific case only we implement that intrinsic here.
*
* See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82981
*/
-#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ == 7)
+#if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPSR6) && (__GNUC__ < 8)
/* multiply 64-bit values, low 64-bits returned */
static inline long long notrace dmulu(long long a, long long b)
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
index f7c905e50dc415..92dc6bafc12717 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_ohci.c
@@ -138,7 +138,7 @@ u32 pci_ohci_read_reg(int reg)
break;
case PCI_OHCI_INT_REG:
_rdmsr(DIVIL_MSR_REG(PIC_YSEL_LOW), &hi, &lo);
- if ((lo & 0x00000f00) == CS5536_USB_INTR)
+ if (((lo >> PIC_YSEL_LOW_USB_SHIFT) & 0xf) == CS5536_USB_INTR)
conf_data = 1;
break;
default:
diff --git a/arch/mips/loongson64/lemote-2f/irq.c b/arch/mips/loongson64/lemote-2f/irq.c
index cab5f43e0e29ca..d371f0294cbb9c 100644
--- a/arch/mips/loongson64/lemote-2f/irq.c
+++ b/arch/mips/loongson64/lemote-2f/irq.c
@@ -102,7 +102,7 @@ static struct irqaction ip6_irqaction = {
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
- .flags = IRQF_NO_THREAD,
+ .flags = IRQF_NO_THREAD | IRQF_NO_SUSPEND,
};
void __init mach_init_irq(void)
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
index 0f75b6b3d2184b..241cb88f9c037c 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -42,51 +42,8 @@ void mach_irq_dispatch(unsigned int pending)
}
}
-static struct irqaction cascade_irqaction = {
- .handler = no_action,
- .flags = IRQF_NO_SUSPEND,
- .name = "cascade",
-};
-
-static inline void mask_loongson_irq(struct irq_data *d)
-{
- clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_disable_hazard();
-
- /* Workaround: UART IRQ may deliver to any core */
- if (d->irq == LOONGSON_UART_IRQ) {
- int cpu = smp_processor_id();
- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
- u64 intenclr_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
- u64 introuter_lpc_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_LPC);
-
- *(volatile u32 *)intenclr_addr = 1 << 10;
- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
- }
-}
-
-static inline void unmask_loongson_irq(struct irq_data *d)
-{
- /* Workaround: UART IRQ may deliver to any core */
- if (d->irq == LOONGSON_UART_IRQ) {
- int cpu = smp_processor_id();
- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
- u64 intenset_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_INTENSET);
- u64 introuter_lpc_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_LPC);
-
- *(volatile u32 *)intenset_addr = 1 << 10;
- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
- }
-
- set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_enable_hazard();
-}
+static inline void mask_loongson_irq(struct irq_data *d) { }
+static inline void unmask_loongson_irq(struct irq_data *d) { }
/* For MIPS IRQs which shared by all cores */
static struct irq_chip loongson_irq_chip = {
@@ -124,12 +81,11 @@ void __init mach_init_irq(void)
mips_cpu_irq_init();
init_i8259_irqs();
irq_set_chip_and_handler(LOONGSON_UART_IRQ,
- &loongson_irq_chip, handle_level_irq);
-
- /* setup HT1 irq */
- setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
+ &loongson_irq_chip, handle_percpu_irq);
+ irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
+ &loongson_irq_chip, handle_percpu_irq);
- set_c0_status(STATUSF_IP2 | STATUSF_IP6);
+ set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 5d3a25e1cfaea6..52e8c2026853ef 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -712,7 +712,8 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
@@ -745,7 +746,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
{
/* Catch bad driver code */
- BUG_ON(size == 0);
+ if (WARN_ON(size == 0))
+ return;
preempt_disable();
if (cpu_has_inclusive_pcaches) {
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 349995d19c7f2c..e596e0a1ceccff 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -303,7 +303,7 @@ slow_irqon:
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 8d5008cbdc0f0a..a853a83f294493 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
+#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -97,6 +98,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
return error;
}
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Generic mapping function (not visible outside):
*/
@@ -115,8 +130,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
{
+ unsigned long offset, pfn, last_pfn;
struct vm_struct * area;
- unsigned long offset;
phys_addr_t last_addr;
void * addr;
@@ -136,18 +151,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
return (void __iomem *) CKSEG1ADDR(phys_addr);
/*
- * Don't allow anybody to remap normal RAM that we're using..
+ * Don't allow anybody to remap RAM that may be allocated by the page
+ * allocator, since that could lead to races & data clobbering.
*/
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
+ pfn = PFN_DOWN(phys_addr);
+ last_pfn = PFN_DOWN(last_addr);
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
}
/*
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 2a5bb849b10efa..288b58b00dc845 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -369,7 +369,9 @@ int __init octeon_msi_initialize(void)
int irq;
struct irq_chip *msi;
- if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
+ if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_INVALID) {
+ return 0;
+ } else if (octeon_dma_bar_type == OCTEON_DMA_BAR_TYPE_PCIE) {
msi_rcv_reg[0] = CVMX_PEXP_NPEI_MSI_RCV0;
msi_rcv_reg[1] = CVMX_PEXP_NPEI_MSI_RCV1;
msi_rcv_reg[2] = CVMX_PEXP_NPEI_MSI_RCV2;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index c258cd406fbbe3..b36bbda310587a 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -571,6 +571,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
@@ -582,11 +587,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
- if (!octeon_is_pci_host()) {
- pr_notice("Not in host mode, PCI Controller not initialized\n");
- return 0;
- }
-
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index c5f45fc96c7438..9c19f5493b85ba 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -81,7 +81,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = {
};
static struct rt2880_pmx_func nd_sd_grp[] = {
FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
- FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
+ FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
};
static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
diff --git a/arch/mips/sibyte/common/Makefile b/arch/mips/sibyte/common/Makefile
index b3d6bf23a6620c..3ef3fb65813697 100644
--- a/arch/mips/sibyte/common/Makefile
+++ b/arch/mips/sibyte/common/Makefile
@@ -1,4 +1,5 @@
obj-y := cfe.o
+obj-$(CONFIG_SWIOTLB) += dma.o
obj-$(CONFIG_SIBYTE_BUS_WATCHER) += bus_watcher.o
obj-$(CONFIG_SIBYTE_CFE_CONSOLE) += cfe_console.o
obj-$(CONFIG_SIBYTE_TBPROF) += sb_tbprof.o
diff --git a/arch/mips/sibyte/common/dma.c b/arch/mips/sibyte/common/dma.c
new file mode 100644
index 00000000000000..eb47a94f3583ed
--- /dev/null
+++ b/arch/mips/sibyte/common/dma.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DMA support for Broadcom SiByte platforms.
+ *
+ * Copyright (c) 2018 Maciej W. Rozycki
+ */
+
+#include <linux/swiotlb.h>
+#include <asm/bootinfo.h>
+
+void __init plat_swiotlb_setup(void)
+{
+ swiotlb_init(1);
+}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 6c7d78546eeeb9..886005b1e87d45 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -107,7 +107,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
@@ -143,7 +143,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 4861a78c7160f7..f5f90bbf019d68 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c
index 99770823451a7e..2d7986c386fe59 100644
--- a/arch/mn10300/kernel/kgdb.c
+++ b/arch/mn10300/kernel/kgdb.c
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index de73beb36910ad..3901b80d442021 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -104,8 +104,7 @@ static int __init early_init_dt_scan_serial(unsigned long node,
return 0;
#endif
- *addr64 = fdt_translate_address((const void *)initial_boot_params,
- node);
+ *addr64 = of_flat_dt_translate_address(node);
return *addr64 == OF_BAD_ADDR ? 0 : 1;
}
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c
index 7095dfe7666ba3..962372143fda9b 100644
--- a/arch/openrisc/kernel/process.c
+++ b/arch/openrisc/kernel/process.c
@@ -152,8 +152,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
top_of_kernel_stack = sp;
- p->set_child_tid = p->clear_child_tid = NULL;
-
/* Locate userspace context on stack... */
sp -= STACK_FRAME_OVERHEAD; /* redzone */
sp -= sizeof(struct pt_regs);
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 63d08a7aeb93f2..2a4596d64a3f83 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -178,7 +178,7 @@ config PREFETCH
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
- def_bool y if (!MODULES)
+ default y
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/include/asm/barrier.h b/arch/parisc/include/asm/barrier.h
new file mode 100644
index 00000000000000..dbaaca84f27f34
--- /dev/null
+++ b/arch/parisc/include/asm/barrier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* The synchronize caches instruction executes as a nop on systems in
+ which all memory references are performed in order. */
+#define synchronize_caches() __asm__ __volatile__ ("sync" : : : "memory")
+
+#if defined(CONFIG_SMP)
+#define mb() do { synchronize_caches(); } while (0)
+#define rmb() mb()
+#define wmb() mb()
+#define dma_rmb() mb()
+#define dma_wmb() mb()
+#else
+#define mb() barrier()
+#define rmb() barrier()
+#define wmb() barrier()
+#define dma_rmb() barrier()
+#define dma_wmb() barrier()
+#endif
+
+#define __smp_mb() mb()
+#define __smp_rmb() mb()
+#define __smp_wmb() mb()
+
+#include <asm-generic/barrier.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __ASM_BARRIER_H */
diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
index 3d0e17bcc8e905..df0f52bd18b457 100644
--- a/arch/parisc/include/asm/cache.h
+++ b/arch/parisc/include/asm/cache.h
@@ -22,6 +22,9 @@
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+/* Read-only memory is marked before mark_rodata_ro() is called. */
+#define __ro_after_init __read_mostly
+
void parisc_cache_init(void); /* initializes cache-flushing */
void disable_sr_hashing_asm(int); /* low level support for above */
void disable_sr_hashing(void); /* turns off space register hashing */
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 845272ce9cc587..7bd69bd43a0185 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -121,10 +121,6 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
}
}
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
-#endif
-
#include <asm/kmap_types.h>
#define ARCH_HAS_KMAP
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 64f2992e439fcd..617efa845054e9 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -21,7 +21,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
{
volatile unsigned int *a;
- mb();
a = __ldcw_align(x);
while (__ldcw(a) == 0)
while (*a == 0)
@@ -31,16 +30,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
local_irq_disable();
} else
cpu_relax();
- mb();
}
static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
- mb();
+
a = __ldcw_align(x);
- *a = 1;
mb();
+ *a = 1;
}
static inline int arch_spin_trylock(arch_spinlock_t *x)
@@ -48,10 +46,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
volatile unsigned int *a;
int ret;
- mb();
a = __ldcw_align(x);
ret = __ldcw(a) != 0;
- mb();
return ret;
}
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index dd4d1876a020c1..52dd01f19166b0 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -59,6 +59,9 @@
overrides the coredump filter bits */
#define MADV_DODUMP 70 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 71 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 72 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
#define MAP_VARIABLE 0
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 5dc831955de5d6..3b7b022384a073 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -185,7 +185,7 @@
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
- .word PA(os_hpmc) /* address of handler */
+ .word 0 /* address of handler */
.word 0 /* length of handler */
.endm
@@ -482,6 +482,8 @@
.macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
+ sync
+ or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 16073f4721184b..b3434a7fd3c974 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -354,6 +354,7 @@ ENDPROC(flush_data_cache_local)
.macro tlb_unlock la,flags,tmp
#ifdef CONFIG_SMP
ldi 1,\tmp
+ sync
stw \tmp,0(\la)
mtsm \flags
#endif
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 9f22195b90ed0e..dd44022c3ae304 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -627,11 +627,12 @@ cas_action:
stw %r1, 4(%sr2,%r20)
#endif
/* The load and store could fail */
-1: ldw,ma 0(%r26), %r28
+1: ldw 0(%r26), %r28
sub,<> %r28, %r25, %r0
-2: stw,ma %r24, 0(%r26)
+2: stw %r24, 0(%r26)
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
/* Clear thread register indicator */
stw %r0, 4(%sr2,%r20)
@@ -645,6 +646,7 @@ cas_action:
3:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
#if ENABLE_LWS_DEBUG
stw %r0, 4(%sr2,%r20)
@@ -794,30 +796,30 @@ cas2_action:
ldo 1(%r0),%r28
/* 8bit CAS */
-13: ldb,ma 0(%r26), %r29
+13: ldb 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-14: stb,ma %r24, 0(%r26)
+14: stb %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 16bit CAS */
-15: ldh,ma 0(%r26), %r29
+15: ldh 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-16: sth,ma %r24, 0(%r26)
+16: sth %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
nop
/* 32bit CAS */
-17: ldw,ma 0(%r26), %r29
+17: ldw 0(%r26), %r29
sub,= %r29, %r25, %r0
b,n cas2_end
-18: stw,ma %r24, 0(%r26)
+18: stw %r24, 0(%r26)
b cas2_end
copy %r0, %r28
nop
@@ -825,10 +827,10 @@ cas2_action:
/* 64bit CAS */
#ifdef CONFIG_64BIT
-19: ldd,ma 0(%r26), %r29
+19: ldd 0(%r26), %r29
sub,*= %r29, %r25, %r0
b,n cas2_end
-20: std,ma %r24, 0(%r26)
+20: std %r24, 0(%r26)
copy %r0, %r28
#else
/* Compare first word */
@@ -846,7 +848,8 @@ cas2_action:
cas2_end:
/* Free lock */
- stw,ma %r20, 0(%sr2,%r20)
+ sync
+ stw %r20, 0(%sr2,%r20)
/* Enable interrupts */
ssm PSW_SM_I, %r0
/* Return to userspace, set no error */
@@ -856,6 +859,7 @@ cas2_end:
22:
/* Error occurred on load or store */
/* Free lock */
+ sync
stw %r20, 0(%sr2,%r20)
ssm PSW_SM_I, %r0
ldo 1(%r0),%r28
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 77e2262c97f644..6f61a17e2485af 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -829,7 +829,8 @@ void __init initialize_ivt(const void *iva)
for (i = 0; i < 8; i++)
*ivap++ = 0;
- /* Compute Checksum for HPMC handler */
+ /* Setup IVA and compute checksum for HPMC handler */
+ ivap[6] = (u32)__pa(os_hpmc);
length = os_hpmc_size;
ivap[7] = length;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 3c07d6b968772b..d19163454e2cdf 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -491,12 +491,8 @@ static void __init map_pages(unsigned long start_vaddr,
pte = pte_mkhuge(pte);
}
- if (address >= end_paddr) {
- if (force)
- break;
- else
- pte_val(pte) = 0;
- }
+ if (address >= end_paddr)
+ break;
set_pte(pg_table, pte);
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index b81a6bec952866..8100ebc5527153 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -88,6 +88,7 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK
config PPC
bool
default y
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select BINFMT_ELF
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 99e4487248ff35..57003d1bd24337 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -70,7 +70,8 @@ $(addprefix $(obj)/,$(zlib) cuboot-c2k.o gunzip_util.o main.o): \
libfdt := fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c
libfdtheader := fdt.h libfdt.h libfdt_internal.h
-$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o): \
+$(addprefix $(obj)/,$(libfdt) libfdt-wrapper.o simpleboot.o epapr.o \
+ treeboot-akebono.o treeboot-currituck.o treeboot-iss4xx.o): \
$(addprefix $(obj)/,$(libfdtheader))
src-wlib-y := string.S crt0.S crtsavres.S stdio.c main.c \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 12866ccb5694d3..a3550e8f1a77b6 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -15,7 +15,7 @@
RELA = 7
RELACOUNT = 0x6ffffff9
- .text
+ .data
/* A procedure descriptor used when booting this as a COFF file.
* When making COFF, this comes first in the link and we're
* linked at 0x500000.
@@ -23,6 +23,8 @@ RELACOUNT = 0x6ffffff9
.globl _zimage_start_opd
_zimage_start_opd:
.long 0x500000, 0, 0, 0
+ .text
+ b _zimage_start
#ifdef __powerpc64__
.balign 8
@@ -47,8 +49,10 @@ p_end: .long _end
p_pstack: .long _platform_stack_top
#endif
- .weak _zimage_start
.globl _zimage_start
+ /* Clang appears to require the .weak directive to be after the symbol
+ * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
+ .weak _zimage_start
_zimage_start:
.globl _zimage_start_lib
_zimage_start_lib:
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index 334459ad145b4e..90863245df53b6 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -508,7 +508,7 @@ static unsigned long epapr_hypercall(unsigned long *in,
static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
unsigned long r;
@@ -520,7 +520,7 @@ static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2)
static inline long epapr_hypercall0(unsigned int nr)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
return epapr_hypercall(in, out, nr);
@@ -528,7 +528,7 @@ static inline long epapr_hypercall0(unsigned int nr)
static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -538,7 +538,7 @@ static inline long epapr_hypercall1(unsigned int nr, unsigned long p1)
static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
unsigned long p2)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -549,7 +549,7 @@ static inline long epapr_hypercall2(unsigned int nr, unsigned long p1,
static inline long epapr_hypercall3(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
@@ -562,7 +562,7 @@ static inline long epapr_hypercall4(unsigned int nr, unsigned long p1,
unsigned long p2, unsigned long p3,
unsigned long p4)
{
- unsigned long in[8];
+ unsigned long in[8] = {0};
unsigned long out[8];
in[0] = p1;
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 493e72f64b35fe..5768ec3c17818e 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -194,9 +194,6 @@ struct fadump_crash_info_header {
struct cpumask cpu_online_mask;
};
-/* Crash memory ranges */
-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
-
struct fad_crash_memory_ranges {
unsigned long long base;
unsigned long long size;
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 98697611e7b31d..705f4dc5073b7f 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -392,7 +392,14 @@ extern struct bus_type mpic_subsys;
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+ return 0;
+}
+#endif
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 7efee4a3240ba2..9463ce85e32596 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -132,40 +132,15 @@ static inline struct thread_info *current_thread_info(void)
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
#define TLF_NAPPING 0 /* idle thread enabled NAP mode */
#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
-#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
#define TLF_LAZY_MMU 3 /* tlb_batch is active */
#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
#define _TLF_NAPPING (1 << TLF_NAPPING)
#define _TLF_SLEEPING (1 << TLF_SLEEPING)
-#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->local_flags |= _TLF_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
- return false;
- ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
- return true;
-}
static inline bool test_thread_local_flags(unsigned int flags)
{
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index a5ffe0207c16f9..05f1389228d271 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -59,7 +59,7 @@
#endif
#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
+ (__chk_user_ptr(addr), (void)(type), \
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 9c9b7411b28bbc..55eb3b752ca048 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -27,6 +27,7 @@ _GLOBAL(__setup_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
bl __init_tlb_power7
@@ -40,6 +41,7 @@ _GLOBAL(__restore_cpu_power7)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
bl __init_LPCR
bl __init_tlb_power7
@@ -55,6 +57,7 @@ _GLOBAL(__setup_cpu_power8)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR
@@ -74,6 +77,7 @@ _GLOBAL(__restore_cpu_power8)
beqlr
li r0,0
mtspr SPRN_LPID,r0
+ mtspr SPRN_PCR,r0
mfspr r3,SPRN_LPCR
ori r3, r3, LPCR_PECEDH
bl __init_LPCR
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 2405631e91a29d..3728e617e17ef6 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -685,6 +685,9 @@ fast_exception_return:
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r11)
REST_GPR(10, r11)
mtspr SPRN_SRR1,r9
mtspr SPRN_SRR0,r12
@@ -915,6 +918,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtcrf 0xFF,r10
mtlr r11
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r1)
/*
* Once we put values in SRR0 and SRR1, we are in a state
* where exceptions are not recoverable, since taking an
@@ -952,6 +958,9 @@ exc_exit_restart_end:
mtlr r11
lwz r10,_CCR(r1)
mtcrf 0xff,r10
+ /* Clear the exception_marker on the stack to avoid confusing stacktrace */
+ li r10, 0
+ stw r10, 8(r1)
REST_2GPRS(9, r1)
.globl exc_exit_restart
exc_exit_restart:
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2837232bbffb92..59be969173696e 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -574,6 +574,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
* actually hit this code path.
*/
+ isync
slbie r6
slbie r6 /* Workaround POWER5 < DD2.1 issue */
slbmte r7,r0
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 26d091a1a54cf5..ca3ad5ebcd41ab 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -35,6 +35,7 @@
#include <linux/crash_dump.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <linux/slab.h>
#include <asm/page.h>
#include <asm/prom.h>
@@ -48,8 +49,10 @@ static struct fadump_mem_struct fdm;
static const struct fadump_mem_struct *fdm_active;
static DEFINE_MUTEX(fadump_mutex);
-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
+struct fad_crash_memory_ranges *crash_memory_ranges;
+int crash_memory_ranges_size;
int crash_mem_ranges;
+int max_crash_mem_ranges;
/* Scan the Firmware Assisted dump configuration details. */
int __init early_init_dt_scan_fw_dump(unsigned long node,
@@ -357,9 +360,9 @@ static int __init early_fadump_reserve_mem(char *p)
}
early_param("fadump_reserve_mem", early_fadump_reserve_mem);
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
{
- int rc;
+ int rc, err;
unsigned int wait_time;
pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -376,7 +379,11 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
} while (wait_time);
+ err = -EIO;
switch (rc) {
+ default:
+ pr_err("Failed to register. Unknown Error(%d).\n", rc);
+ break;
case -1:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Hardware Error(%d).\n", rc);
@@ -384,18 +391,22 @@ static void register_fw_dump(struct fadump_mem_struct *fdm)
case -3:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Parameter Error(%d).\n", rc);
+ err = -EINVAL;
break;
case -9:
printk(KERN_ERR "firmware-assisted kernel dump is already "
" registered.");
fw_dump.dump_registered = 1;
+ err = -EEXIST;
break;
case 0:
printk(KERN_INFO "firmware-assisted kernel dump registration"
" is successful\n");
fw_dump.dump_registered = 1;
+ err = 0;
break;
}
+ return err;
}
void crash_fadump(struct pt_regs *regs, const char *str)
@@ -726,38 +737,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
return 0;
}
-static inline void fadump_add_crash_memory(unsigned long long base,
- unsigned long long end)
+static void free_crash_memory_ranges(void)
+{
+ kfree(crash_memory_ranges);
+ crash_memory_ranges = NULL;
+ crash_memory_ranges_size = 0;
+ max_crash_mem_ranges = 0;
+}
+
+/*
+ * Allocate or reallocate crash memory ranges array in incremental units
+ * of PAGE_SIZE.
+ */
+static int allocate_crash_memory_ranges(void)
+{
+ struct fad_crash_memory_ranges *new_array;
+ u64 new_size;
+
+ new_size = crash_memory_ranges_size + PAGE_SIZE;
+ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
+ new_size);
+
+ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
+ if (new_array == NULL) {
+ pr_err("Insufficient memory for setting up crash memory ranges\n");
+ free_crash_memory_ranges();
+ return -ENOMEM;
+ }
+
+ crash_memory_ranges = new_array;
+ crash_memory_ranges_size = new_size;
+ max_crash_mem_ranges = (new_size /
+ sizeof(struct fad_crash_memory_ranges));
+ return 0;
+}
+
+static inline int fadump_add_crash_memory(unsigned long long base,
+ unsigned long long end)
{
if (base == end)
- return;
+ return 0;
+
+ if (crash_mem_ranges == max_crash_mem_ranges) {
+ int ret;
+
+ ret = allocate_crash_memory_ranges();
+ if (ret)
+ return ret;
+ }
pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
crash_mem_ranges, base, end - 1, (end - base));
crash_memory_ranges[crash_mem_ranges].base = base;
crash_memory_ranges[crash_mem_ranges].size = end - base;
crash_mem_ranges++;
+ return 0;
}
-static void fadump_exclude_reserved_area(unsigned long long start,
+static int fadump_exclude_reserved_area(unsigned long long start,
unsigned long long end)
{
unsigned long long ra_start, ra_end;
+ int ret = 0;
ra_start = fw_dump.reserve_dump_area_start;
ra_end = ra_start + fw_dump.reserve_dump_area_size;
if ((ra_start < end) && (ra_end > start)) {
if ((start < ra_start) && (end > ra_end)) {
- fadump_add_crash_memory(start, ra_start);
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(start, ra_start);
+ if (ret)
+ return ret;
+
+ ret = fadump_add_crash_memory(ra_end, end);
} else if (start < ra_start) {
- fadump_add_crash_memory(start, ra_start);
+ ret = fadump_add_crash_memory(start, ra_start);
} else if (ra_end < end) {
- fadump_add_crash_memory(ra_end, end);
+ ret = fadump_add_crash_memory(ra_end, end);
}
} else
- fadump_add_crash_memory(start, end);
+ ret = fadump_add_crash_memory(start, end);
+
+ return ret;
}
static int fadump_init_elfcore_header(char *bufp)
@@ -793,10 +854,11 @@ static int fadump_init_elfcore_header(char *bufp)
* Traverse through memblock structure and setup crash memory ranges. These
* ranges will be used create PT_LOAD program headers in elfcore header.
*/
-static void fadump_setup_crash_memory_ranges(void)
+static int fadump_setup_crash_memory_ranges(void)
{
struct memblock_region *reg;
unsigned long long start, end;
+ int ret;
pr_debug("Setup crash memory ranges.\n");
crash_mem_ranges = 0;
@@ -807,7 +869,9 @@ static void fadump_setup_crash_memory_ranges(void)
* specified during fadump registration. We need to create a separate
* program header for this chunk with the correct offset.
*/
- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
+ if (ret)
+ return ret;
for_each_memblock(memory, reg) {
start = (unsigned long long)reg->base;
@@ -816,8 +880,12 @@ static void fadump_setup_crash_memory_ranges(void)
start = fw_dump.boot_memory_size;
/* add this range excluding the reserved dump area. */
- fadump_exclude_reserved_area(start, end);
+ ret = fadump_exclude_reserved_area(start, end);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
/*
@@ -937,19 +1005,22 @@ static unsigned long init_fadump_header(unsigned long addr)
return addr;
}
-static void register_fadump(void)
+static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
+ int ret;
/*
* If no memory is reserved then we can not register for firmware-
* assisted dump.
*/
if (!fw_dump.reserve_dump_area_size)
- return;
+ return -ENODEV;
- fadump_setup_crash_memory_ranges();
+ ret = fadump_setup_crash_memory_ranges();
+ if (ret)
+ return ret;
addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
/* Initialize fadump crash info header. */
@@ -960,7 +1031,7 @@ static void register_fadump(void)
fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
- register_fw_dump(&fdm);
+ return register_fw_dump(&fdm);
}
static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1025,6 +1096,10 @@ void fadump_cleanup(void)
init_fadump_mem_struct(&fdm,
be64_to_cpu(fdm_active->cpu_state_data.destination_address));
fadump_invalidate_dump(&fdm);
+ } else if (fw_dump.dump_registered) {
+ /* Un-register Firmware-assisted dump if it was registered. */
+ fadump_unregister_dump(&fdm);
+ free_crash_memory_ranges();
}
}
@@ -1141,7 +1216,6 @@ static ssize_t fadump_register_store(struct kobject *kobj,
switch (buf[0]) {
case '0':
if (fw_dump.dump_registered == 0) {
- ret = -EINVAL;
goto unlock_out;
}
/* Un-register Firmware-assisted dump */
@@ -1149,11 +1223,11 @@ static ssize_t fadump_register_store(struct kobject *kobj,
break;
case '1':
if (fw_dump.dump_registered == 1) {
- ret = -EINVAL;
+ ret = -EEXIST;
goto unlock_out;
}
/* Register Firmware-assisted dump */
- register_fadump();
+ ret = register_fadump();
break;
default:
ret = -EINVAL;
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 78c1eba4c04a43..01e274e6907b1e 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -720,7 +720,7 @@ start_here:
tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
- stw r5, 0xf0(r0) /* Must match your Abatron config file */
+ stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
stw r6, 0(r5)
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index fdf48785d3e9f8..56e4571e3a023d 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -174,8 +174,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
if (cpu_has_feature(CPU_FTR_DAWR)) {
length_max = 512 ; /* 64 doublewords */
/* DAWR region can't cross 512 boundary */
- if ((bp->attr.bp_addr >> 10) !=
- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
+ if ((bp->attr.bp_addr >> 9) !=
+ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
return -EINVAL;
}
if (info->len >
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 015ae55c18686f..8dff2b3712190c 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -186,7 +186,12 @@ void __init reserve_crashkernel(void)
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- memblock_reserve(crashk_res.start, crash_size);
+ if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+ memblock_reserve(crashk_res.start, crash_size)) {
+ pr_err("Failed to reserve memory for crashkernel!\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/msi.c b/arch/powerpc/kernel/msi.c
index dab616a33b8dbe..f2197654be0707 100644
--- a/arch/powerpc/kernel/msi.c
+++ b/arch/powerpc/kernel/msi.c
@@ -34,5 +34,10 @@ void arch_teardown_msi_irqs(struct pci_dev *dev)
{
struct pci_controller *phb = pci_bus_to_host(dev->bus);
- phb->controller_ops.teardown_msi_irqs(dev);
+ /*
+ * We can be called even when arch_setup_msi_irqs() returns -ENOSYS,
+ * so check the pointer again.
+ */
+ if (phb->controller_ops.teardown_msi_irqs)
+ phb->controller_ops.teardown_msi_irqs(dev);
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 1f7930037cb7df..d9e41b77dd13ec 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/bootmem.h>
+#include <linux/syscalls.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/of.h>
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 3139533640fc8e..8eda827c3e0aef 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -15,6 +15,9 @@
#undef DEBUG_PROM
+/* we cannot use FORTIFY as it brings in new symbols */
+#define __NO_FORTIFY
+
#include <stdarg.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index b38fd081b22235..3b63655efa3c55 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1004,6 +1004,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
/* Create a new breakpoint request if one doesn't exist already */
hw_breakpoint_init(&attr);
attr.bp_addr = hw_brk.address;
+ attr.bp_len = 8;
arch_bp_generic_fields(hw_brk.type,
&attr.bp_type);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index b7019b559ddbff..2d2860711e07e6 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -199,13 +199,27 @@ dont_backup_fp:
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -230,11 +244,11 @@ dont_backup_fp:
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index fb37290a57b410..366965ae37bd9c 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -314,7 +314,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
unsigned long pp, key;
unsigned long v, gr;
__be64 *hptep;
- int index;
+ long int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
/* Get SLB entry */
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index 2e0e67ef3544fd..e8cedf32345a01 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace
/*
* Tracepoint for guest mode entry.
@@ -119,4 +117,10 @@ TRACE_EVENT(kvm_check_requests,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h
index 7ec534d1db9f4e..7eadbf449a1f1f 100644
--- a/arch/powerpc/kvm/trace_booke.h
+++ b/arch/powerpc/kvm/trace_booke.h
@@ -5,8 +5,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_booke
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_booke
#define kvm_trace_symbol_exit \
{0, "CRITICAL"}, \
@@ -217,4 +215,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
#endif
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_booke
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h
index 33d9daff57832e..d1bfe017d751d3 100644
--- a/arch/powerpc/kvm/trace_hv.h
+++ b/arch/powerpc/kvm/trace_hv.h
@@ -8,8 +8,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_hv
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_hv
#define kvm_trace_symbol_hcall \
{H_REMOVE, "H_REMOVE"}, \
@@ -474,4 +472,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
#endif /* _TRACE_KVM_HV_H */
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_hv
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 810507cb688aaa..1e40aa2aa5af21 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -7,8 +7,6 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm_pr
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE trace_pr
TRACE_EVENT(kvm_book3s_reenter,
TP_PROTO(int r, struct kvm_vcpu *vcpu),
@@ -271,4 +269,11 @@ TRACE_EVENT(kvm_unmap_hva,
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_pr
+
#include <trace/define_trace.h>
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 3c4faa4c2742bb..bb3df222ae71f2 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1319,7 +1319,7 @@ static long vphn_get_associativity(unsigned long cpu,
switch (rc) {
case H_FUNCTION:
- printk(KERN_INFO
+ printk_once(KERN_INFO
"VPHN is not supported. Disabling polling...\n");
stop_topology_update();
break;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 515730e499fe66..309027208f7c00 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -69,14 +69,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
* updating it. No write barriers are needed here, provided
* we only update the current CPU's SLB shadow buffer.
*/
- p->save_area[index].esid = 0;
- p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags));
- p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index));
+ WRITE_ONCE(p->save_area[index].esid, 0);
+ WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
+ WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
}
static inline void slb_shadow_clear(enum slb_index index)
{
- get_slb_shadow()->save_area[index].esid = 0;
+ WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
}
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index bb04e4df310080..1b784b8fd8b401 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -487,6 +487,9 @@ static void setup_page_sizes(void)
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
+ if (!def->shift)
+ continue;
+
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S
index 3d1ecd21177699..8137f77abad577 100644
--- a/arch/powerpc/platforms/83xx/suspend-asm.S
+++ b/arch/powerpc/platforms/83xx/suspend-asm.S
@@ -26,13 +26,13 @@
#define SS_MSR 0x74
#define SS_SDR1 0x78
#define SS_LR 0x7c
-#define SS_SPRG 0x80 /* 4 SPRGs */
-#define SS_DBAT 0x90 /* 8 DBATs */
-#define SS_IBAT 0xd0 /* 8 IBATs */
-#define SS_TB 0x110
-#define SS_CR 0x118
-#define SS_GPREG 0x11c /* r12-r31 */
-#define STATE_SAVE_SIZE 0x16c
+#define SS_SPRG 0x80 /* 8 SPRGs */
+#define SS_DBAT 0xa0 /* 8 DBATs */
+#define SS_IBAT 0xe0 /* 8 IBATs */
+#define SS_TB 0x120
+#define SS_CR 0x128
+#define SS_GPREG 0x12c /* r12-r31 */
+#define STATE_SAVE_SIZE 0x17c
.section .data
.align 5
@@ -103,6 +103,16 @@ _GLOBAL(mpc83xx_enter_deep_sleep)
stw r7, SS_SPRG+12(r3)
stw r8, SS_SDR1(r3)
+ mfspr r4, SPRN_SPRG4
+ mfspr r5, SPRN_SPRG5
+ mfspr r6, SPRN_SPRG6
+ mfspr r7, SPRN_SPRG7
+
+ stw r4, SS_SPRG+16(r3)
+ stw r5, SS_SPRG+20(r3)
+ stw r6, SS_SPRG+24(r3)
+ stw r7, SS_SPRG+28(r3)
+
mfspr r4, SPRN_DBAT0U
mfspr r5, SPRN_DBAT0L
mfspr r6, SPRN_DBAT1U
@@ -493,6 +503,16 @@ mpc83xx_deep_resume:
mtspr SPRN_IBAT7U, r6
mtspr SPRN_IBAT7L, r7
+ lwz r4, SS_SPRG+16(r3)
+ lwz r5, SS_SPRG+20(r3)
+ lwz r6, SS_SPRG+24(r3)
+ lwz r7, SS_SPRG+28(r3)
+
+ mtspr SPRN_SPRG4, r4
+ mtspr SPRN_SPRG5, r5
+ mtspr SPRN_SPRG6, r6
+ mtspr SPRN_SPRG7, r7
+
lwz r4, SS_SPRG+0(r3)
lwz r5, SS_SPRG+4(r3)
lwz r6, SS_SPRG+8(r3)
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c
index f803f4b8ab6f21..8608e358217f38 100644
--- a/arch/powerpc/platforms/chrp/time.c
+++ b/arch/powerpc/platforms/chrp/time.c
@@ -27,6 +27,8 @@
#include <asm/sections.h>
#include <asm/time.h>
+#include <platforms/chrp/chrp.h>
+
extern spinlock_t rtc_lock;
#define NVRAM_AS0 0x74
@@ -62,7 +64,7 @@ long __init chrp_time_init(void)
return 0;
}
-int chrp_cmos_clock_read(int addr)
+static int chrp_cmos_clock_read(int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
@@ -70,7 +72,7 @@ int chrp_cmos_clock_read(int addr)
return (inb(nvram_data));
}
-void chrp_cmos_clock_write(unsigned long val, int addr)
+static void chrp_cmos_clock_write(unsigned long val, int addr)
{
if (nvram_as1 != 0)
outb(addr>>8, nvram_as1);
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 9b7975706bfc7a..9485f1024d46c2 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -35,6 +35,8 @@
*/
#define HW_BROADWAY_ICR 0x00
#define HW_BROADWAY_IMR 0x04
+#define HW_STARLET_ICR 0x08
+#define HW_STARLET_IMR 0x0c
/*
@@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d)
void __iomem *io_base = irq_data_get_irq_chip_data(d);
setbits32(io_base + HW_BROADWAY_IMR, 1 << irq);
+
+ /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */
+ clrbits32(io_base + HW_STARLET_IMR, 1 << irq);
}
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 352592d3e44ee7..7fd19a480422b4 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -104,6 +104,10 @@ unsigned long __init wii_mmu_mapin_mem2(unsigned long top)
/* MEM2 64MB@0x10000000 */
delta = wii_hole_start + wii_hole_size;
size = top - delta;
+
+ if (__map_without_bats)
+ return delta;
+
for (bl = 128<<10; bl < max_size; bl <<= 1) {
if (bl * 2 > size)
break;
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 76f5013c35e5c1..89237b84b0962b 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -467,7 +467,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
boot_infos_t *bi = (boot_infos_t *) r4;
unsigned long hdr;
unsigned long space;
- unsigned long ptr, x;
+ unsigned long ptr;
char *model;
unsigned long offset = reloc_offset();
@@ -561,6 +561,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4)
* MMU switched OFF, so this should not be useful anymore.
*/
if (bi->version < 4) {
+ unsigned long x __maybe_unused;
+
bootx_printf("Touching pages...\n");
/*
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 8dd78f4e1af4a4..32fc56cf626182 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -359,6 +359,7 @@ static int pmac_late_init(void)
}
machine_late_initcall(powermac, pmac_late_init);
+void note_bootable_part(dev_t dev, int part, int goodness);
/*
* This is __init_refok because we check for "initializing" before
* touching any of the __init sensitive things and "initializing"
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 44ed78af1a0dd5..9021b72728895d 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -92,7 +92,7 @@ out:
}
static struct bin_attribute opal_msglog_attr = {
- .attr = {.name = "msglog", .mode = 0444},
+ .attr = {.name = "msglog", .mode = 0400},
.read = opal_msglog_read
};
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index e48826aa314c35..b40606051efe12 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -371,7 +371,7 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
/* Closed or other error drop */
if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
rc != OPAL_BUSY_EVENT) {
- written = total_len;
+ written += total_len;
break;
}
if (rc == OPAL_SUCCESS) {
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index eac3b7cc78c6f8..ab7b2594e0f6b1 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2270,7 +2270,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
- if ((level_shift - 3) * levels + page_shift >= 60)
+ if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index 96536c969c9ce1..a8efed3b469163 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -280,6 +280,8 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
+ of_node_put(dn);
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 3b6647e574b6d0..9795e52bab3d33 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -300,7 +300,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
}
savep = __va(regs->gpr[3]);
- regs->gpr[3] = savep[0]; /* restore original r3 */
+ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
@@ -311,7 +311,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
int len, error_log_length;
error_log_length = 8 + rtas_error_extended_log_length(h);
- len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
+ len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 3f165d972a0eba..994fe73c2ed077 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
/* IO map the message register block. */
of_address_to_resource(np, 0, &rsrc);
- msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
+ msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
if (!msgr_block_addr) {
dev_err(&dev->dev, "Failed to iomap MPIC message registers");
return -EFAULT;
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
index edcf2a70694204..87e555c7e42984 100644
--- a/arch/s390/appldata/appldata_mem.c
+++ b/arch/s390/appldata/appldata_mem.c
@@ -62,6 +62,9 @@ struct appldata_mem_data {
u64 pgalloc; /* page allocations */
u64 pgfault; /* page faults (major+minor) */
u64 pgmajfault; /* page faults (major only) */
+ u64 pgmajfault_s; /* shmem page faults (major only) */
+ u64 pgmajfault_a; /* anonymous page faults (major only) */
+ u64 pgmajfault_f; /* file page faults (major only) */
// <-- New in 2.6
} __packed;
@@ -93,7 +96,11 @@ static void appldata_get_mem_data(void *data)
mem_data->pgalloc = ev[PGALLOC_NORMAL];
mem_data->pgalloc += ev[PGALLOC_DMA];
mem_data->pgfault = ev[PGFAULT];
- mem_data->pgmajfault = ev[PGMAJFAULT];
+ mem_data->pgmajfault =
+ ev[PGMAJFAULT_S] + ev[PGMAJFAULT_A] + ev[PGMAJFAULT_F];
+ mem_data->pgmajfault_s = ev[PGMAJFAULT_S];
+ mem_data->pgmajfault_a = ev[PGMAJFAULT_A];
+ mem_data->pgmajfault_f = ev[PGMAJFAULT_F];
si_meminfo(&val);
mem_data->sharedram = val.sharedram;
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 9dd04b9e978212..b2f8c52b384012 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -113,7 +113,7 @@ struct hws_basic_entry {
struct hws_diag_entry {
unsigned int def:16; /* 0-15 Data Entry Format */
- unsigned int R:14; /* 16-19 and 20-30 reserved */
+ unsigned int R:15; /* 16-19 and 20-30 reserved */
unsigned int I:1; /* 31 entry valid or invalid */
u8 data[]; /* Machine-dependent sample data */
} __packed;
@@ -129,7 +129,9 @@ struct hws_trailer_entry {
unsigned int f:1; /* 0 - Block Full Indicator */
unsigned int a:1; /* 1 - Alert request control */
unsigned int t:1; /* 2 - Timestamp format */
- unsigned long long:61; /* 3 - 63: Reserved */
+ unsigned int :29; /* 3 - 31: Reserved */
+ unsigned int bsdes:16; /* 32-47: size of basic SDE */
+ unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */
};
unsigned long long flags; /* 0 - 63: All indicators */
};
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 998b61cd0e5694..4b39ba700d32ff 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -261,7 +261,6 @@ struct qdio_outbuf_state {
void *user;
};
-#define QDIO_OUTBUF_STATE_FLAG_NONE 0x00
#define QDIO_OUTBUF_STATE_FLAG_PENDING 0x01
#define CHSC_AC1_INITIATE_INPUTQ 0x80
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 8eccead675d4d3..cc7b450a7766f3 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void)
if (stsi(vmms, 3, 2, 2) || !vmms->count)
return;
- /* Running under KVM? If not we assume z/VM */
+ /* Detect known hypervisors */
if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
- else
+ else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5416d5d68308e1..4cad1adff16bf1 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -1170,7 +1170,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: BR_EX %r14
+0: BR_EX %r14,%r11
.align 8
.Lcleanup_table:
@@ -1200,7 +1200,7 @@ cleanup_critical:
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- BR_EX %r14
+ BR_EX %r14,%r11
#endif
.Lcleanup_system_call:
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 929c147e07b40c..1b69bfdf59f9ce 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -344,6 +344,8 @@ static int __hw_perf_event_init(struct perf_event *event)
break;
case PERF_TYPE_HARDWARE:
+ if (is_sampling_event(event)) /* No sampling support */
+ return -ENOENT;
ev = attr->config;
/* Count user space (problem-state) only */
if (!attr->exclude_user && attr->exclude_kernel) {
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index be987cf7cd2165..e6aaffbd791bc2 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -833,6 +833,8 @@ void __init setup_arch(char **cmdline_p)
pr_info("Linux is running under KVM in 64-bit mode\n");
else if (MACHINE_IS_LPAR)
pr_info("Linux is running natively in 64-bit mode\n");
+ else
+ pr_info("Linux is running as a guest in 64-bit mode\n");
/* Have one command line that is parsed and saved in /proc/cmdline */
/* boot_command_line has been already set up in early.c */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 77f4f334a46593..29e5409c0d48d4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -360,9 +360,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
*/
void smp_call_ipl_cpu(void (*func)(void *), void *data)
{
+ struct _lowcore *lc = pcpu_devices->lowcore;
+
+ if (pcpu_devices[0].address == stap())
+ lc = &S390_lowcore;
+
pcpu_delegate(&pcpu_devices[0], func, data,
- pcpu_devices->lowcore->panic_stack -
- PANIC_FRAME_OFFSET + PAGE_SIZE);
+ lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
}
int smp_find_processor_id(u16 address)
@@ -1152,7 +1156,11 @@ static ssize_t __ref rescan_store(struct device *dev,
{
int rc;
+ rc = lock_device_hotplug_sysfs();
+ if (rc)
+ return rc;
rc = smp_rescan_cpus();
+ unlock_device_hotplug();
return rc ? rc : count;
}
static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile
index ee8a18e50a250d..29b72c46284dc1 100644
--- a/arch/s390/kernel/vdso32/Makefile
+++ b/arch/s390/kernel/vdso32/Makefile
@@ -29,7 +29,7 @@ GCOV_PROFILE := n
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
@@ -38,12 +38,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile
index c4b03f9ed22828..1b083d80de50eb 100644
--- a/arch/s390/kernel/vdso64/Makefile
+++ b/arch/s390/kernel/vdso64/Makefile
@@ -29,7 +29,7 @@ GCOV_PROFILE := n
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
@@ -38,12 +38,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index 16c5998b97922b..4254c477e8e05a 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -26,7 +26,7 @@
*/
ENTRY(memset)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemset_exit
ltgr %r3,%r3
jnz .Lmemset_fill
aghi %r4,-1
@@ -41,12 +41,13 @@ ENTRY(memset)
.Lmemset_clear_rest:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
+.Lmemset_exit:
BR_EX %r14
.Lmemset_fill:
stc %r3,0(%r2)
cghi %r4,1
lgr %r1,%r2
- ber %r14
+ je .Lmemset_fill_exit
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
@@ -58,6 +59,7 @@ ENTRY(memset)
.Lmemset_fill_rest:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
+.Lmemset_fill_exit:
BR_EX %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
@@ -71,7 +73,7 @@ ENTRY(memset)
*/
ENTRY(memcpy)
ltgr %r4,%r4
- bzr %r14
+ jz .Lmemcpy_exit
aghi %r4,-1
srlg %r5,%r4,8
ltgr %r5,%r5
@@ -80,6 +82,7 @@ ENTRY(memcpy)
.Lmemcpy_rest:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
+.Lmemcpy_exit:
BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 18fccc303db7e5..bfd75be6d4151a 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -79,7 +79,7 @@ struct qin64 {
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@@ -434,7 +434,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index ec1a30d0d11ab4..7218689bd6ee8b 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -459,6 +459,8 @@ retry:
/* No reason to continue if interrupted by SIGKILL. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL;
+ if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ goto out_up;
goto out;
}
if (unlikely(fault & VM_FAULT_ERROR))
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 12bbf0e8478f84..7ad41be8b37369 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -242,7 +242,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
start += nr << PAGE_SHIFT;
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- nr_pages - nr, write, 0, pages);
+ nr_pages - nr, pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0)
ret = (ret < 0) ? nr : ret + nr;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 3cdfb5eab64317..7a13739d4f9357 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -637,6 +637,8 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
vmaddr |= gaddr & ~PMD_MASK;
/* Find vma in the parent mm */
vma = find_vma(gmap->mm, vmaddr);
+ if (!vma)
+ continue;
size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
zap_page_range(vma, vmaddr, size, NULL);
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index a26528afceb268..727693e283da23 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -522,8 +522,6 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
/* br %r1 */
_EMIT2(0x07f1);
} else {
- /* larl %r1,.+14 */
- EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
/* ex 0,S390_lowcore.br_r1_tampoline */
EMIT4_DISP(0x44000000, REG_0, REG_0,
offsetof(struct _lowcore, br_r1_trampoline));
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
index 43f32ce60aa3d9..734f56d774bbdd 100644
--- a/arch/s390/numa/numa.c
+++ b/arch/s390/numa/numa.c
@@ -47,6 +47,7 @@ int __node_distance(int a, int b)
{
return mode->distance ? mode->distance(a, b) : 0;
}
+EXPORT_SYMBOL(__node_distance);
int numa_debug_enabled;
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index ef0499b76c505c..9a5754d4ee87c8 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -412,6 +412,8 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
hwirq = 0;
for_each_pci_msi_entry(msi, pdev) {
rc = -EIO;
+ if (hwirq >= msi_vecs)
+ break;
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
if (irq < 0)
goto out_msi;
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 2afa321157be6f..6c65dcd470abe2 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -151,19 +151,10 @@ extern void init_thread_xstate(void);
* ever touches our thread-synchronous status, so we don't
* have to worry about atomic accesses.
*/
-#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
#define TS_USEDFPU 0x0002 /* FPU used by this task this quantum */
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-
#define TI_FLAG_FAULT_CODE_SHIFT 24
/*
@@ -182,23 +173,6 @@ static inline unsigned int get_thread_fault_code(void)
return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index d77f2f6c7ff076..0b30b9dfc87f22 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -34,6 +34,9 @@ DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__lshrsi3);
DECLARE_EXPORT(__ashrsi3);
DECLARE_EXPORT(__ashlsi3);
+DECLARE_EXPORT(__lshrsi3_r0);
+DECLARE_EXPORT(__ashrsi3_r0);
+DECLARE_EXPORT(__ashlsi3_r0);
DECLARE_EXPORT(__ashiftrt_r4_6);
DECLARE_EXPORT(__ashiftrt_r4_7);
DECLARE_EXPORT(__ashiftrt_r4_8);
diff --git a/arch/sh/lib/ashlsi3.S b/arch/sh/lib/ashlsi3.S
index bd47e9b403a5bc..70a6434945ab8a 100644
--- a/arch/sh/lib/ashlsi3.S
+++ b/arch/sh/lib/ashlsi3.S
@@ -54,21 +54,38 @@ Boston, MA 02110-1301, USA. */
!
! (none)
!
+! __ashlsi3_r0
+!
+! Entry:
+!
+! r4: Value to shift
+! r0: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
+
+
.global __ashlsi3
+ .global __ashlsi3_r0
.align 2
__ashlsi3:
- mov #31,r0
- and r0,r5
+ mov r5,r0
+ .align 2
+__ashlsi3_r0:
+ and #31,r0
+ mov.l r4,@-r15
+ mov r0,r4
mova ashlsi3_table,r0
- mov.b @(r0,r5),r5
-#ifdef __sh1__
- add r5,r0
+ mov.b @(r0,r4),r4
+ add r4,r0
jmp @r0
-#else
- braf r5
-#endif
- mov r4,r0
+ mov.l @r15+,r0
.align 2
ashlsi3_table:
diff --git a/arch/sh/lib/ashrsi3.S b/arch/sh/lib/ashrsi3.S
index 6f3cf46b77c2c1..602599d8020911 100644
--- a/arch/sh/lib/ashrsi3.S
+++ b/arch/sh/lib/ashrsi3.S
@@ -54,22 +54,37 @@ Boston, MA 02110-1301, USA. */
!
! (none)
!
+! __ashrsi3_r0
+!
+! Entry:
+!
+! r4: Value to shift
+! r0: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
.global __ashrsi3
+ .global __ashrsi3_r0
.align 2
__ashrsi3:
- mov #31,r0
- and r0,r5
+ mov r5,r0
+ .align 2
+__ashrsi3_r0:
+ and #31,r0
+ mov.l r4,@-r15
+ mov r0,r4
mova ashrsi3_table,r0
- mov.b @(r0,r5),r5
-#ifdef __sh1__
- add r5,r0
+ mov.b @(r0,r4),r4
+ add r4,r0
jmp @r0
-#else
- braf r5
-#endif
- mov r4,r0
+ mov.l @r15+,r0
.align 2
ashrsi3_table:
diff --git a/arch/sh/lib/lshrsi3.S b/arch/sh/lib/lshrsi3.S
index 1e7aaa55713035..f2a6959f526d30 100644
--- a/arch/sh/lib/lshrsi3.S
+++ b/arch/sh/lib/lshrsi3.S
@@ -54,21 +54,37 @@ Boston, MA 02110-1301, USA. */
!
! (none)
!
+! __lshrsi3_r0
+!
+! Entry:
+!
+! r0: Value to shift
+! r5: Shifts
+!
+! Exit:
+!
+! r0: Result
+!
+! Destroys:
+!
+! (none)
+!
.global __lshrsi3
+ .global __lshrsi3_r0
.align 2
__lshrsi3:
- mov #31,r0
- and r0,r5
+ mov r5,r0
+ .align 2
+__lshrsi3_r0:
+ and #31,r0
+ mov.l r4,@-r15
+ mov r0,r4
mova lshrsi3_table,r0
- mov.b @(r0,r5),r5
-#ifdef __sh1__
- add r5,r0
+ mov.b @(r0,r4),r4
+ add r4,r0
jmp @r0
-#else
- braf r5
-#endif
- mov r4,r0
+ mov.l @r15+,r0
.align 2
lshrsi3_table:
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c
index e7af6a65baab91..8c51a0e94854e2 100644
--- a/arch/sh/mm/gup.c
+++ b/arch/sh/mm/gup.c
@@ -258,7 +258,8 @@ slow_irqon:
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h
index 8c2a8c937540ff..c1263fc390db6d 100644
--- a/arch/sparc/include/asm/page_64.h
+++ b/arch/sparc/include/asm/page_64.h
@@ -25,6 +25,7 @@
#define HPAGE_MASK (~(HPAGE_SIZE - 1UL))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT))
#endif
#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index bde59825d06c8c..3d7b925f65168a 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -222,32 +222,8 @@ register struct thread_info *current_thread_info_reg asm("g6");
*
* Note that there are only 8 bits available.
*/
-#define TS_RESTORE_SIGMASK 0x0001 /* restore signal mask in do_signal() */
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
#define thread32_stack_is_64bit(__SP) (((__SP) & 0x1) != 0)
#define test_thread_64bit_stack(__SP) \
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index f87a55d7709469..9b3f2e212b3775 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -908,7 +908,7 @@ static int register_services(struct ds_info *dp)
pbuf.req.handle = cp->handle;
pbuf.req.major = 1;
pbuf.req.minor = 0;
- strcpy(pbuf.req.svc_id, cp->service_id);
+ strcpy(pbuf.id_buf, cp->service_id);
err = __ds_send(lp, &pbuf, msg_len);
if (err > 0)
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 9f9614df9e1e54..c2b202d763a16a 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
}
}
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+ void *stc, void *host_controller,
+ struct platform_device *op,
+ int numa_node)
+{
+ sd->iommu = iommu;
+ sd->stc = stc;
+ sd->host_controller = host_controller;
+ sd->op = op;
+ sd->numa_node = numa_node;
+}
+
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
if (!dev)
return NULL;
+ op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
- sd->iommu = pbm->iommu;
- sd->stc = &pbm->stc;
- sd->host_controller = pbm;
- sd->op = op = of_find_device_by_node(node);
- sd->numa_node = pbm->numa_node;
-
+ pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+ pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
@@ -1003,9 +1012,13 @@ int pcibios_add_device(struct pci_dev *dev)
* Copy dev_archdata from PF to VF
*/
if (dev->is_virtfn) {
+ struct dev_archdata *psd;
+
pdev = dev->physfn;
- memcpy(&dev->dev.archdata, &pdev->dev.archdata,
- sizeof(struct dev_archdata));
+ psd = &pdev->dev.archdata;
+ pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+ psd->stc, psd->host_controller, NULL,
+ psd->numa_node);
}
return 0;
}
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6596f66ce1126f..a5d0c2f0811010 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -926,6 +926,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc)
sparc_perf_event_update(cp, &cp->hw,
cpuc->current_idx[i]);
cpuc->current_idx[i] = PIC_NO_INDEX;
+ if (cp->hw.state & PERF_HES_STOPPED)
+ cp->hw.state |= PERF_HES_ARCH;
}
}
}
@@ -958,10 +960,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc)
enc = perf_event_get_enc(cpuc->events[i]);
cpuc->pcr[0] &= ~mask_for_index(idx);
- if (hwc->state & PERF_HES_STOPPED)
+ if (hwc->state & PERF_HES_ARCH) {
cpuc->pcr[0] |= nop_for_index(idx);
- else
+ } else {
cpuc->pcr[0] |= event_encoding(enc, idx);
+ hwc->state = 0;
+ }
}
out:
cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
@@ -987,6 +991,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
cpuc->current_idx[i] = idx;
+ if (cp->hw.state & PERF_HES_ARCH)
+ continue;
+
sparc_pmu_start(cp, PERF_EF_RELOAD);
}
out:
@@ -1078,6 +1085,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
event->hw.state = 0;
sparc_pmu_enable_event(cpuc, &event->hw, idx);
+
+ perf_event_update_userpage(event);
}
static void sparc_pmu_stop(struct perf_event *event, int flags)
@@ -1370,9 +1379,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
cpuc->events[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PIC_NO_INDEX;
- event->hw.state = PERF_HES_UPTODATE;
+ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
if (!(ef_flags & PERF_EF_START))
- event->hw.state |= PERF_HES_STOPPED;
+ event->hw.state |= PERF_HES_ARCH;
/*
* If group events scheduling transaction was started,
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 646988d4c1a35a..740f43b9b54192 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -201,23 +201,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
asmlinkage long sys_getdomainname(char __user *name, int len)
{
- int nlen, err;
-
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
+
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ up_read(&uts_sem);
-out:
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
+
+out_unlock:
up_read(&uts_sem);
return err;
}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 98a5cf313d39a7..7301fa2091bcae 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -524,23 +524,27 @@ extern void check_pending(int signum);
SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
{
- int nlen, err;
+ int nlen, err;
+ char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
- down_read(&uts_sem);
-
+ down_read(&uts_sem);
+
nlen = strlen(utsname()->domainname) + 1;
err = -EINVAL;
if (nlen > len)
- goto out;
+ goto out_unlock;
+ memcpy(tmp, utsname()->domainname, nlen);
+
+ up_read(&uts_sem);
- err = -EFAULT;
- if (!copy_to_user(name, utsname()->domainname, nlen))
- err = 0;
+ if (copy_to_user(name, tmp, nlen))
+ return -EFAULT;
+ return 0;
-out:
+out_unlock:
up_read(&uts_sem);
return err;
}
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 54f98706b03b2f..5a8cb37f0a3b8e 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -145,13 +145,13 @@ ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
ENTRY(U3_retl_o2_and_7_plus_GS)
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS)
ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
add GLOBAL_SPARE, 8, GLOBAL_SPARE
and %o2, 7, %o2
retl
- add %o2, GLOBAL_SPARE, %o2
+ add %o2, GLOBAL_SPARE, %o0
ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
#endif
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index e15f33715103a1..b01ec72522cbc9 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -487,6 +487,7 @@ good_area:
tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+ mm_rss *= REAL_HPAGE_PER_HPAGE;
if (unlikely(mm_rss >
mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 2e5c4fc2daa91e..150f48303fb0fd 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -250,7 +250,8 @@ slow:
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- (end - start) >> PAGE_SHIFT, write, 0, pages);
+ (end - start) >> PAGE_SHIFT, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index 3659d37b4d818e..c56a195c90719f 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -174,10 +174,25 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
return;
if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
- if (pmd_val(pmd) & _PAGE_PMD_HUGE)
- mm->context.thp_pte_count++;
- else
- mm->context.thp_pte_count--;
+ /*
+ * Note that this routine only sets pmds for THP pages.
+ * Hugetlb pages are handled elsewhere. We need to check
+ * for huge zero page. Huge zero pages are like hugetlb
+ * pages in that there is no RSS, but there is the need
+ * for TSB entries. So, huge zero page counts go into
+ * hugetlb_pte_count.
+ */
+ if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
+ if (is_huge_zero_page(pmd_page(pmd)))
+ mm->context.hugetlb_pte_count++;
+ else
+ mm->context.thp_pte_count++;
+ } else {
+ if (is_huge_zero_page(pmd_page(orig)))
+ mm->context.hugetlb_pte_count--;
+ else
+ mm->context.thp_pte_count--;
+ }
/* Do not try to allocate the TSB hash table if we
* don't have one already. We have various locks held
@@ -204,6 +219,9 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
}
}
+/*
+ * This routine is only called when splitting a THP
+ */
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
@@ -213,6 +231,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
set_pmd_at(vma->vm_mm, address, pmdp, entry);
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ /*
+ * set_pmd_at() will not be called in a way to decrement
+ * thp_pte_count when splitting a THP, so do it now.
+ * Sanity check pmd before doing the actual decrement.
+ */
+ if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
+ !is_huge_zero_page(pmd_page(entry)))
+ (vma->vm_mm)->context.thp_pte_count--;
}
void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 2664112916341f..84cd593117a6da 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -489,8 +489,10 @@ retry_tsb_alloc:
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ unsigned long mm_rss = get_mm_rss(mm);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- unsigned long total_huge_pte_count;
+ unsigned long saved_hugetlb_pte_count;
+ unsigned long saved_thp_pte_count;
#endif
unsigned int i;
@@ -503,10 +505,12 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
* will re-increment the counters as the parent PTEs are
* copied into the child address space.
*/
- total_huge_pte_count = mm->context.hugetlb_pte_count +
- mm->context.thp_pte_count;
+ saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
+ saved_thp_pte_count = mm->context.thp_pte_count;
mm->context.hugetlb_pte_count = 0;
mm->context.thp_pte_count = 0;
+
+ mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
#endif
/* copy_mm() copies over the parent's mm_struct before calling
@@ -519,11 +523,13 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
/* If this is fork, inherit the parent's TSB size. We would
* grow it to that size on the first page fault anyways.
*/
- tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm));
+ tsb_grow(mm, MM_TSB_BASE, mm_rss);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (unlikely(total_huge_pte_count))
- tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count);
+ if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
+ tsb_grow(mm, MM_TSB_HUGE,
+ (saved_hugetlb_pte_count + saved_thp_pte_count) *
+ REAL_HPAGE_PER_HPAGE);
#endif
if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index dc1fb28d963627..958629bb6993c1 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -78,7 +78,7 @@ struct thread_info {
#ifndef __ASSEMBLY__
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
@@ -162,32 +162,5 @@ extern void _cpu_idle(void);
#ifdef __tilegx__
#define TS_COMPAT 0x0001 /* 32-bit compatibility mode */
#endif
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal */
-
-#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-#endif /* !__ASSEMBLY__ */
#endif /* _ASM_TILE_THREAD_INFO_H */
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 7d5769310bef88..a97ab1a69a9093 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
/*
* Release a thread_info structure
*/
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state;
if (step_state) {
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 9ccf462131c48f..d9cd7ed278346a 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -59,10 +59,14 @@ KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/um
# Same things for in6addr_loopback and mktime - found in libc. For these two we
# only get link-time error, luckily.
#
+# -Dlongjmp=kernel_longjmp prevents anything from referencing the libpthread.a
+# embedded copy of longjmp, same thing for setjmp.
+#
# These apply to USER_CFLAGS to.
KBUILD_CFLAGS += $(CFLAGS) $(CFLAGS-y) -D__arch_um__ \
$(ARCH_INCLUDE) $(MODE_INCLUDE) -Dvmap=kernel_vmap \
+ -Dlongjmp=kernel_longjmp -Dsetjmp=kernel_setjmp \
-Din6addr_loopback=kernel_in6addr_loopback \
-Din6addr_any=kernel_in6addr_any -Dstrrchr=kernel_strrchr
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 18eb9924dda382..aeb43021294737 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte_clear_bits(pte, _PAGE_RW);
+ if (likely(pte_get_bits(pte, _PAGE_RW)))
+ pte_clear_bits(pte, _PAGE_RW);
+ else
+ return pte;
return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+ return pte;
pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
+ return pte;
pte_set_bits(pte, _PAGE_RW);
return(pte_mknewprot(pte));
}
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index b856c66ebd3a2b..6dbf27ffafc847 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -585,6 +585,11 @@ int start_idle_thread(void *stack, jmp_buf *switch_buf)
fatal_sigsegv();
}
longjmp(*switch_buf, 1);
+
+ /* unreachable */
+ printk(UM_KERN_ERR "impossible long jump!");
+ fatal_sigsegv();
+ return 0;
}
void initial_thread_cb_skas(void (*proc)(void *), void *arg)
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 0650b4ab9b79f7..1cf4212ced5818 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_PMEM_API if X86_64
@@ -42,7 +43,6 @@ config X86
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if X86_64
select ARCH_USE_QUEUED_RWLOCKS
- select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANT_FRAME_POINTERS
@@ -81,6 +81,7 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_AOUT if X86_32
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
@@ -91,6 +92,7 @@ config X86
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select HAVE_BPF_JIT if X86_64
+ select HAVE_ARCH_WITHIN_STACK_FRAMES
select HAVE_EBPF_JIT if X86_64
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
@@ -152,11 +154,13 @@ config X86
select SPARSE_IRQ
select SRCU
select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
select USER_STACKTRACE_SUPPORT
select VIRT_TO_BUS
select X86_DEV_DMA_OPS if X86_64
select X86_FEATURE_NAMES if PROC_FS
select ARCH_HAS_ALT_SYSCALL
+ select ARCH_USES_HIGH_VMA_FLAGS if X86_INTEL_MEMORY_PROTECTION_KEYS
config INSTRUCTION_DECODER
def_bool y
@@ -303,6 +307,9 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM
def_bool y
+config DEBUG_RODATA
+ def_bool y
+
config PGTABLE_LEVELS
int
default 4 if X86_64
@@ -363,6 +370,17 @@ config X86_FEATURE_NAMES
If in doubt, say Y.
+config X86_FAST_FEATURE_TESTS
+ bool "Fast CPU feature tests" if EMBEDDED
+ default y
+ ---help---
+ Some fast-paths in the kernel depend on the capabilities of the CPU.
+ Say Y here for the kernel to patch in the appropriate code at runtime
+ based on the capabilities of the CPU. The infrastructure for patching
+ code at runtime takes up some additional space; space-constrained
+ embedded systems may wish to say N here to produce smaller, slightly
+ slower code.
+
config X86_X2APIC
bool "Support x2apic"
depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
@@ -1685,7 +1703,9 @@ config X86_SMAP
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
def_bool n
- depends on CPU_SUP_INTEL
+ # Note: only available in 64-bit mode due to VMA flags shortage
+ depends on CPU_SUP_INTEL && X86_64
+ select ARCH_USES_HIGH_VMA_FLAGS
---help---
MPX provides hardware features that can be used in
conjunction with compiler-instrumented code to check
@@ -1977,14 +1997,8 @@ config PHYSICAL_ALIGN
Don't change this unless you know what you are doing.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
+ def_bool y
depends on SMP
- ---help---
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- ( Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
config BOOTPARAM_HOTPLUG_CPU0
bool "Set default setting of cpu0_hotpluggable"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index da00fe1f48f4c0..3cb8e179f2f23f 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -91,28 +91,16 @@ config EFI_PGT_DUMP
issues with the mapping of the EFI runtime regions into that
table.
-config DEBUG_RODATA
- bool "Write protect kernel read-only data structures"
- default y
- depends on DEBUG_KERNEL
- ---help---
- Mark the kernel read-only data as write-protected in the pagetables,
- in order to catch accidental (and incorrect) writes to such const
- data. This is recommended so that we can catch kernel bugs sooner.
- If in doubt, say "Y".
-
config DEBUG_RODATA_TEST
- bool "Testcase for the DEBUG_RODATA feature"
- depends on DEBUG_RODATA
+ bool "Testcase for the marking rodata read-only"
default y
---help---
- This option enables a testcase for the DEBUG_RODATA
- feature as well as for the change_page_attr() infrastructure.
+ This option enables a testcase for the setting rodata read-only
+ as well as for the change_page_attr() infrastructure.
If in doubt, say "N"
config DEBUG_WX
bool "Warn on W+X mappings at boot"
- depends on DEBUG_RODATA
select X86_PTDUMP_CORE
---help---
Generate a warning if any W+X mappings are found at boot.
@@ -367,16 +355,6 @@ config DEBUG_IMR_SELFTEST
If unsure say N here.
-config X86_DEBUG_STATIC_CPU_HAS
- bool "Debug alternatives"
- depends on DEBUG_KERNEL
- ---help---
- This option causes additional code to be generated which
- fails if static_cpu_has() is used before alternatives have
- run.
-
- If unsure, say N.
-
config X86_DEBUG_FPU
bool "Debug the x86 FPU code"
depends on DEBUG_KERNEL
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index e2551054135dfb..dc267f67a2b570 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -11,6 +11,16 @@ else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
+# For gcc stack alignment is specified with -mpreferred-stack-boundary,
+# clang has the option -mstack-alignment for that purpose.
+ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
+ cc_stack_align4 := -mpreferred-stack-boundary=2
+ cc_stack_align8 := -mpreferred-stack-boundary=3
+else ifneq ($(call cc-option, -mstack-alignment=16),)
+ cc_stack_align4 := -mstack-alignment=4
+ cc_stack_align8 := -mstack-alignment=8
+endif
+
# How to compile the 16-bit code. Note we always compile for -march=i386;
# that way we can complain to the user if the CPU is insufficient.
#
@@ -28,8 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
-REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS),\
- -mpreferred-stack-boundary=2,-mstack-alignment=4)
+REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
export REALMODE_CFLAGS
# BITS is used as extension for files which are available in a 32 bit
@@ -69,7 +78,7 @@ ifeq ($(CONFIG_X86_32),y)
# Align the stack to the register width instead of using the default
# alignment of 16 bytes. This reduces stack usage and the number of
# alignment instructions.
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2,-mstack-alignment=4)
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align4))
# Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
# a lot more stack due to the lack of sharing of stacklots:
@@ -108,7 +117,7 @@ else
# default alignment which keep the stack *mis*aligned.
# Furthermore an alignment to the register width reduces stack usage
# and the number of alignment instructions.
- KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3,-mstack-alignment=8)
+ KBUILD_CFLAGS += $(call cc-option,$(cc_stack_align8))
# Use -mskip-rax-setup if supported.
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 4a5fbd2da65806..21332b431f10b3 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -1,5 +1,6 @@
#include "misc.h"
+#include <asm/asm.h>
#include <asm/msr.h>
#include <asm/archrandom.h>
#include <asm/e820.h>
@@ -24,8 +25,8 @@ static inline u16 i8254(void)
u16 status, timer;
do {
- outb(I8254_PORT_CONTROL,
- I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
+ I8254_PORT_CONTROL);
status = inb(I8254_PORT_COUNTER0);
timer = inb(I8254_PORT_COUNTER0);
timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 16df89c30c201e..68cd61566bbac7 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -109,7 +109,7 @@
#define memzero(s, n) memset((s), 0, (n))
-static void error(char *m);
+static void error(char *m) __noreturn;
/*
* This is set up by the setup-routine at boot-time
@@ -464,3 +464,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
+
+void fortify_panic(const char *name)
+{
+ error("detected buffer overflow");
+}
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
index ea97697e51e40f..4cb404fd45ceaa 100644
--- a/arch/x86/boot/cpuflags.h
+++ b/arch/x86/boot/cpuflags.h
@@ -1,7 +1,7 @@
#ifndef BOOT_CPUFLAGS_H
#define BOOT_CPUFLAGS_H
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/processor-flags.h>
struct cpu_features {
diff --git a/arch/x86/boot/mkcpustr.c b/arch/x86/boot/mkcpustr.c
index 637097e66a62a6..f72498dc90d237 100644
--- a/arch/x86/boot/mkcpustr.c
+++ b/arch/x86/boot/mkcpustr.c
@@ -17,7 +17,7 @@
#include "../include/asm/required-features.h"
#include "../include/asm/disabled-features.h"
-#include "../include/asm/cpufeature.h"
+#include "../include/asm/cpufeatures.h"
#include "../kernel/cpu/capflags.c"
int main(void)
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index a7661c430cd98d..523db6ce88dd0c 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -391,6 +391,13 @@ int main(int argc, char ** argv)
die("Unable to mmap '%s': %m", argv[2]);
/* Number of 16-byte paragraphs, including space for a 4-byte CRC */
sys_size = (sz + 15 + 4) / 16;
+#ifdef CONFIG_EFI_STUB
+ /*
+ * COFF requires minimum 32-byte alignment of sections, and
+ * adding a signature is problematic without that alignment.
+ */
+ sys_size = (sys_size + 1) & ~1;
+#endif
/* Patch the setup code with the appropriate size parameters */
buf[0x1f1] = setup_sectors-1;
diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c
index 722bacea040e71..75b9d43069f115 100644
--- a/arch/x86/crypto/chacha20_glue.c
+++ b/arch/x86/crypto/chacha20_glue.c
@@ -77,6 +77,7 @@ static int chacha20_simd(struct blkcipher_desc *desc, struct scatterlist *dst,
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, CHACHA20_BLOCK_SIZE);
+ desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
crypto_chacha20_init(state, crypto_blkcipher_ctx(desc->tfm), walk.iv);
@@ -125,7 +126,7 @@ static struct crypto_alg alg = {
static int __init chacha20_simd_mod_init(void)
{
- if (!cpu_has_ssse3)
+ if (!boot_cpu_has(X86_FEATURE_SSSE3))
return -ENODEV;
#ifdef CONFIG_AS_AVX2
diff --git a/arch/x86/crypto/crc32-pclmul_glue.c b/arch/x86/crypto/crc32-pclmul_glue.c
index 07d2c6c86a5483..27226df3f7d8aa 100644
--- a/arch/x86/crypto/crc32-pclmul_glue.c
+++ b/arch/x86/crypto/crc32-pclmul_glue.c
@@ -33,7 +33,7 @@
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/api.h>
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index 81a595d75cf595..c194d5717ae51e 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -30,7 +30,7 @@
#include <linux/kernel.h>
#include <crypto/internal/hash.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
#include <asm/fpu/internal.h>
@@ -48,26 +48,13 @@
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#if defined(X86_FEATURE_EAGER_FPU)
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
-#else
-#define set_pcl_breakeven_point() \
- (crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU)
-#endif
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -190,7 +177,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -202,7 +189,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
@@ -257,11 +244,10 @@ static int __init crc32c_intel_mod_init(void)
if (!x86_match_cpu(crc32c_cpu_id))
return -ENODEV;
#ifdef CONFIG_X86_64
- if (cpu_has_pclmulqdq) {
+ if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) {
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
index a3fcfc97a311d5..cd4df93225014b 100644
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ b/arch/x86/crypto/crct10dif-pclmul_glue.c
@@ -30,7 +30,7 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/fpu/api.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h>
asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index f08c8fd418d715..1e2a1472512169 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -27,17 +27,11 @@
#include <asm/traps.h>
#include <asm/vdso.h>
#include <asm/uaccess.h>
+#include <asm/cpufeature.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
-static struct thread_info *pt_regs_to_thread_info(struct pt_regs *regs)
-{
- unsigned long top_of_stack =
- (unsigned long)(regs + 1) + TOP_OF_KERNEL_STACK_PADDING;
- return (struct thread_info *)(top_of_stack - THREAD_SIZE);
-}
-
#ifdef CONFIG_CONTEXT_TRACKING
/* Called on entry from user mode with IRQs off. */
__visible void enter_from_user_mode(void)
@@ -76,7 +70,7 @@ static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
*/
unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
+ struct thread_info *ti = current_thread_info();
unsigned long ret = 0;
u32 work;
@@ -165,7 +159,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
unsigned long phase1_result)
{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
+ struct thread_info *ti = current_thread_info();
long ret = 0;
u32 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
@@ -210,7 +204,7 @@ long syscall_trace_enter_phase2(struct pt_regs *regs, u32 arch,
long syscall_trace_enter(struct pt_regs *regs)
{
- u32 arch = is_ia32_task() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
+ u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
unsigned long phase1_result = syscall_trace_enter_phase1(regs, arch);
if (phase1_result == 0)
@@ -258,18 +252,17 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
/* Disable IRQs and retry */
local_irq_disable();
- cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags);
+ cached_flags = READ_ONCE(current_thread_info()->flags);
if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
break;
-
}
}
/* Called with IRQs disabled. */
__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
+ struct thread_info *ti = current_thread_info();
u32 cached_flags;
if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
@@ -289,8 +282,12 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
* handling, because syscall restart has a fixup for compat
* syscalls. The fixup is exercised by the ptrace_syscall_32
* selftest.
+ *
+ * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
+ * special case only applies after poking regs and before the
+ * very next return to user mode.
*/
- ti->status &= ~TS_COMPAT;
+ current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
#endif
user_enter();
@@ -328,7 +325,7 @@ static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
*/
__visible inline void syscall_return_slowpath(struct pt_regs *regs)
{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
+ struct thread_info *ti = current_thread_info();
u32 cached_flags = READ_ONCE(ti->flags);
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
@@ -364,11 +361,11 @@ static
#endif
__always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
{
- struct thread_info *ti = pt_regs_to_thread_info(regs);
+ struct thread_info *ti = current_thread_info();
unsigned int nr = (unsigned int)regs->orig_ax;
#ifdef CONFIG_IA32_EMULATION
- ti->status |= TS_COMPAT;
+ current->thread.status |= TS_COMPAT;
#endif
if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index d437f3871e5322..d60d0aa5b2aeca 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -40,7 +40,7 @@
#include <asm/processor-flags.h>
#include <asm/ftrace.h>
#include <asm/irq_vectors.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
@@ -205,13 +205,58 @@
POP_GS_EX
.endm
+/*
+ * %eax: prev task
+ * %edx: next task
+ */
+ENTRY(__switch_to_asm)
+ /*
+ * Save callee-saved registers
+ * This must match the order in struct inactive_task_frame
+ */
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+
+ /* switch stack */
+ movl %esp, TASK_threadsp(%eax)
+ movl TASK_threadsp(%edx), %esp
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+ movl TASK_stack_canary(%edx), %ebx
+ movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
+#endif
+
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
+
+ /* restore callee-saved registers */
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+
+ jmp __switch_to
+END(__switch_to_asm)
+
+/*
+ * A newly forked process directly context switches into this address.
+ *
+ * eax: prev task we switched from
+ */
ENTRY(ret_from_fork)
pushl %eax
call schedule_tail
- GET_THREAD_INFO(%ebp)
popl %eax
- pushl $0x0202 # Reset kernel eflags
- popfl
/* When we fork, we trace the syscall return in the child, too. */
movl %esp, %eax
@@ -222,10 +267,7 @@ END(ret_from_fork)
ENTRY(ret_from_kernel_thread)
pushl %eax
call schedule_tail
- GET_THREAD_INFO(%ebp)
popl %eax
- pushl $0x0202 # Reset kernel eflags
- popfl
movl PT_EBP(%esp), %eax
movl PT_EBX(%esp), %edx
CALL_NOSPEC %edx
@@ -253,7 +295,6 @@ ENDPROC(ret_from_kernel_thread)
ret_from_exception:
preempt_stop(CLBR_ANY)
ret_from_intr:
- GET_THREAD_INFO(%ebp)
#ifdef CONFIG_VM86
movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
movb PT_CS(%esp), %al
@@ -303,6 +344,23 @@ sysenter_past_esp:
SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
/*
+ * SYSENTER doesn't filter flags, so we need to clear NT and AC
+ * ourselves. To save a few cycles, we can check whether
+ * either was set instead of doing an unconditional popfq.
+ * This needs to happen before enabling interrupts so that
+ * we don't get preempted with NT set.
+ *
+ * NB.: .Lsysenter_fix_flags is a label with the code under it moved
+ * out-of-line as an optimization: NT is unlikely to be set in the
+ * majority of the cases and instead of polluting the I$ unnecessarily,
+ * we're keeping that code behind a branch which will predict as
+ * not-taken and therefore its instructions won't be fetched.
+ */
+ testl $X86_EFLAGS_NT|X86_EFLAGS_AC, PT_EFLAGS(%esp)
+ jnz .Lsysenter_fix_flags
+.Lsysenter_flags_fixed:
+
+ /*
* User mode is traced as though IRQs are on, and SYSENTER
* turned them off.
*/
@@ -339,6 +397,11 @@ sysenter_past_esp:
.popsection
_ASM_EXTABLE(1b, 2b)
PTGS_TO_GS_EX
+
+.Lsysenter_fix_flags:
+ pushl $X86_EFLAGS_FIXED
+ popfl
+ jmp .Lsysenter_flags_fixed
ENDPROC(entry_SYSENTER_32)
# system call handler stub
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 513ff4c305c663..19a136c376760a 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -90,7 +90,7 @@ ENDPROC(native_usergs_sysret64)
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -174,12 +174,15 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
pushq %r11 /* pt_regs->r11 */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
- testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ movq PER_CPU_VAR(current_task), %r11
+ testl $_TIF_WORK_SYSCALL_ENTRY, TASK_TI_flags(%r11)
jnz tracesys
+
entry_SYSCALL_64_fastpath:
#if __SYSCALL_MASK == ~0
# ifdef CONFIG_ALT_SYSCALL
- movl ASM_THREAD_INFO(TI_nr_syscalls, %rsp, SIZEOF_PTREGS), %r11d
+ movq PER_CPU_VAR(current_task), %r11
+ movl TASK_TI_nr_syscalls(%r11), %r11d
cmpq %r11, %rax
# else
cmpq $NR_syscalls, %rax
@@ -187,7 +190,8 @@ entry_SYSCALL_64_fastpath:
#else
andl $__SYSCALL_MASK, %eax
# ifdef CONFIG_ALT_SYSCALL
- cmpl ASM_THREAD_INFO(TI_nr_syscalls, %rsp, SIZEOF_PTREGS), %eax
+ movl PER_CPU_VAR(current_task), %edx
+ cmpl TASK_TI_nr_syscalls(%edx), %eax
# else
cmpl $NR_syscalls, %eax
# endif /* CONFIG_ALT_SYSCALL */
@@ -197,7 +201,8 @@ entry_SYSCALL_64_fastpath:
and %rcx, %rax
movq %r10, %rcx
#ifdef CONFIG_ALT_SYSCALL
- movq ASM_THREAD_INFO(TI_sys_call_table, %rsp, SIZEOF_PTREGS), %r11
+ movq PER_CPU_VAR(current_task), %r11
+ movq TASK_TI_sys_call_table(%r11), %r11
# ifdef CONFIG_RETPOLINE
movq 0(%r11, %rax, 8), %rax
call __x86_indirect_thunk_rax
@@ -234,7 +239,8 @@ entry_SYSCALL_64_fastpath:
* flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
* very bad.
*/
- testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+ movq PER_CPU_VAR(current_task), %r11
+ testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
movq RIP(%rsp), %rcx
@@ -298,7 +304,8 @@ tracesys_phase2:
RESTORE_EXTRA_REGS
#if __SYSCALL_MASK == ~0
# ifdef CONFIG_ALT_SYSCALL
- movl ASM_THREAD_INFO(TI_nr_syscalls, %rsp, SIZEOF_PTREGS), %r11d
+ movq PER_CPU_VAR(current_task), %r11
+ movl TASK_TI_nr_syscalls(%r11), %r11d
cmpq %r11, %rax
# else
cmpq $NR_syscalls, %rax
@@ -306,7 +313,9 @@ tracesys_phase2:
#else
andl $__SYSCALL_MASK, %eax
# ifdef CONFIG_ALT_SYSCALL
- cmpl ASM_THREAD_INFO(TI_nr_syscalls, %rsp, SIZEOF_PTREGS), %eax
+ movq PER_CPU_VAR(current_task), %r11
+ movl TASK_TI_nr_syscalls(%r11), %r11d
+ cmpl %r11d, %eax
# else
cmpl $NR_syscalls, %eax
# endif /* CONFIG_ALT_SYSCALL */
@@ -316,7 +325,8 @@ tracesys_phase2:
and %rcx, %rax
movq %r10, %rcx /* fixup for C */
#ifdef CONFIG_ALT_SYSCALL
- movq ASM_THREAD_INFO(TI_sys_call_table, %rsp, SIZEOF_PTREGS), %r11
+ movq PER_CPU_VAR(current_task), %r11
+ movq TASK_TI_sys_call_table(%r11), %r11
# ifdef CONFIG_RETPOLINE
movq 0(%r11, %rax, 8), %rax
call __x86_indirect_thunk_rax
@@ -514,17 +524,59 @@ END(stub_x32_rt_sigreturn)
#endif
/*
- * A newly forked process directly context switches into this address.
- *
- * rdi: prev task we switched from
+ * %rdi: prev task
+ * %rsi: next task
*/
-ENTRY(ret_from_fork)
+ENTRY(__switch_to_asm)
+ /*
+ * Save callee-saved registers
+ * This must match the order in inactive_task_frame
+ */
+ pushq %rbp
+ pushq %rbx
+ pushq %r12
+ pushq %r13
+ pushq %r14
+ pushq %r15
+
+ /* switch stack */
+ movq %rsp, TASK_threadsp(%rdi)
+ movq TASK_threadsp(%rsi), %rsp
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+ movq TASK_stack_canary(%rsi), %rbx
+ movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
+#endif
+
+#ifdef CONFIG_RETPOLINE
+ /*
+ * When switching from a shallower to a deeper call stack
+ * the RSB may either underflow or use entries populated
+ * with userspace addresses. On CPUs where those concerns
+ * exist, overwrite the RSB with entries which capture
+ * speculative execution to prevent attack.
+ */
+ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
+#endif
- LOCK ; btr $TIF_FORK, TI_flags(%r8)
+ /* restore callee-saved registers */
+ popq %r15
+ popq %r14
+ popq %r13
+ popq %r12
+ popq %rbx
+ popq %rbp
- pushq $0x0002
- popfq /* reset kernel eflags */
+ jmp __switch_to
+END(__switch_to_asm)
+/*
+ * A newly forked process directly context switches into this address.
+ *
+ * rax: prev task we switched from
+ */
+ENTRY(ret_from_fork)
+ movq %rax, %rdi
call schedule_tail /* rdi: 'prev' task parameter */
RESTORE_EXTRA_REGS
@@ -658,7 +710,7 @@ retint_kernel:
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
@@ -894,7 +946,7 @@ ENTRY(\sym)
call \do_sym
- jmp error_exit /* %ebx: no swapgs flag */
+ jmp error_exit
.endif
END(\sym)
.endm
@@ -1156,7 +1208,6 @@ END(paranoid_exit)
/*
* Save all registers in pt_regs, and switch gs if needed.
- * Return: EBX=0: came from user mode; EBX=1: otherwise
*/
ENTRY(error_entry)
cld
@@ -1169,7 +1220,6 @@ ENTRY(error_entry)
* the kernel CR3 here.
*/
SWITCH_KERNEL_CR3
- xorl %ebx, %ebx
testb $3, CS+8(%rsp)
jz .Lerror_kernelspace
@@ -1203,7 +1253,6 @@ ENTRY(error_entry)
* for these here too.
*/
.Lerror_kernelspace:
- incl %ebx
leaq native_irq_return_iret(%rip), %rcx
cmpq %rcx, RIP+8(%rsp)
je .Lerror_bad_iret
@@ -1234,28 +1283,19 @@ ENTRY(error_entry)
/*
* Pretend that the exception came from user mode: set up pt_regs
- * as if we faulted immediately after IRET and clear EBX so that
- * error_exit knows that we will be returning to user mode.
+ * as if we faulted immediately after IRET.
*/
mov %rsp, %rdi
call fixup_bad_iret
mov %rax, %rsp
- decl %ebx
jmp .Lerror_entry_from_usermode_after_swapgs
END(error_entry)
-
-/*
- * On entry, EBS is a "return to kernel mode" flag:
- * 1: already in kernel mode, don't need SWAPGS
- * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
- */
ENTRY(error_exit)
- movl %ebx, %eax
DISABLE_INTERRUPTS(CLBR_NONE)
TRACE_IRQS_OFF
- testl %eax, %eax
- jnz retint_kernel
+ testb $3, CS(%rsp)
+ jz retint_kernel
jmp retint_user
END(error_exit)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index d03bf0e28b8b36..48c27c3fdfdbd9 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -79,24 +79,33 @@ ENTRY(entry_SYSENTER_compat)
ASM_CLAC /* Clear AC after saving FLAGS */
pushq $__USER32_CS /* pt_regs->cs */
- xorq %r8,%r8
- pushq %r8 /* pt_regs->ip = 0 (placeholder) */
+ pushq $0 /* pt_regs->ip = 0 (placeholder) */
pushq %rax /* pt_regs->orig_ax */
pushq %rdi /* pt_regs->di */
pushq %rsi /* pt_regs->si */
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
- pushq %r8 /* pt_regs->r8 = 0 */
- pushq %r8 /* pt_regs->r9 = 0 */
- pushq %r8 /* pt_regs->r10 = 0 */
- pushq %r8 /* pt_regs->r11 = 0 */
+ pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
- pushq %r8 /* pt_regs->r12 = 0 */
- pushq %r8 /* pt_regs->r13 = 0 */
- pushq %r8 /* pt_regs->r14 = 0 */
- pushq %r8 /* pt_regs->r15 = 0 */
+ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
+ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
+ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
+ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
+ xorq %r15, %r15 /* nospec r15 */
cld
/*
@@ -185,17 +194,26 @@ ENTRY(entry_SYSCALL_compat)
pushq %rdx /* pt_regs->dx */
pushq %rbp /* pt_regs->cx (stashed in bp) */
pushq $-ENOSYS /* pt_regs->ax */
- xorq %r8,%r8
- pushq %r8 /* pt_regs->r8 = 0 */
- pushq %r8 /* pt_regs->r9 = 0 */
- pushq %r8 /* pt_regs->r10 = 0 */
- pushq %r8 /* pt_regs->r11 = 0 */
+ pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp (will be overwritten) */
- pushq %r8 /* pt_regs->r12 = 0 */
- pushq %r8 /* pt_regs->r13 = 0 */
- pushq %r8 /* pt_regs->r14 = 0 */
- pushq %r8 /* pt_regs->r15 = 0 */
+ xorl %ebp, %ebp /* nospec rbp */
+ pushq $0 /* pt_regs->r12 = 0 */
+ xorq %r12, %r12 /* nospec r12 */
+ pushq $0 /* pt_regs->r13 = 0 */
+ xorq %r13, %r13 /* nospec r13 */
+ pushq $0 /* pt_regs->r14 = 0 */
+ xorq %r14, %r14 /* nospec r14 */
+ pushq $0 /* pt_regs->r15 = 0 */
+ xorq %r15, %r15 /* nospec r15 */
/*
* User mode is traced as though IRQs are on, and SYSENTER
@@ -292,17 +310,26 @@ ENTRY(entry_INT80_compat)
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
- xorq %r8,%r8
- pushq %r8 /* pt_regs->r8 = 0 */
- pushq %r8 /* pt_regs->r9 = 0 */
- pushq %r8 /* pt_regs->r10 = 0 */
- pushq %r8 /* pt_regs->r11 = 0 */
+ pushq $0 /* pt_regs->r8 = 0 */
+ xorq %r8, %r8 /* nospec r8 */
+ pushq $0 /* pt_regs->r9 = 0 */
+ xorq %r9, %r9 /* nospec r9 */
+ pushq $0 /* pt_regs->r10 = 0 */
+ xorq %r10, %r10 /* nospec r10 */
+ pushq $0 /* pt_regs->r11 = 0 */
+ xorq %r11, %r11 /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
pushq %rbp /* pt_regs->rbp */
+ xorl %ebp, %ebp /* nospec rbp */
pushq %r12 /* pt_regs->r12 */
+ xorq %r12, %r12 /* nospec r12 */
pushq %r13 /* pt_regs->r13 */
+ xorq %r13, %r13 /* nospec r13 */
pushq %r14 /* pt_regs->r14 */
+ xorq %r14, %r14 /* nospec r14 */
pushq %r15 /* pt_regs->r15 */
+ xorq %r15, %r15 /* nospec r15 */
cld
/*
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 5dd363d54348e3..049327ee88688a 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -51,8 +51,9 @@ extern u8 pvclock_page
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*ts) :
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -60,8 +61,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -143,13 +145,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx");
return ret;
}
@@ -158,13 +160,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
index 0224987556ce80..3f69326ed54571 100644
--- a/arch/x86/entry/vdso/vdso2c.h
+++ b/arch/x86/entry/vdso/vdso2c.h
@@ -140,7 +140,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
fprintf(outfile, "#include <asm/vdso.h>\n");
fprintf(outfile, "\n");
fprintf(outfile,
- "static unsigned char raw_data[%lu] __page_aligned_data = {",
+ "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
mapping_size);
for (j = 0; j < stripped_len; j++) {
if (j % 10 == 0)
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index a7508d7e20b7ca..3f9d1a83891adf 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/mm_types.h>
-#include <asm/cpufeature.h>
#include <asm/processor.h>
#include <asm/vdso.h>
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 3a1d9297074bc5..0109ac6cb79cc7 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -3,7 +3,7 @@
*/
#include <asm/dwarf2.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
/*
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index b8f69e264ac414..6b46648588d88e 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -20,6 +20,7 @@
#include <asm/page.h>
#include <asm/hpet.h>
#include <asm/desc.h>
+#include <asm/cpufeature.h>
#if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso64_enabled = 1;
@@ -254,7 +255,7 @@ static void vgetcpu_cpu_init(void *arg)
#ifdef CONFIG_NUMA
node = cpu_to_node(cpu);
#endif
- if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
+ if (static_cpu_has(X86_FEATURE_RDTSCP))
write_rdtscp_aux((node << 12) | cpu);
/*
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 2d359991a27306..1f2b7ece6f1366 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -102,7 +102,7 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
{
/*
* XXX: if access_ok, get_user, and put_user handled
- * sig_on_uaccess_error, this could go away.
+ * sig_on_uaccess_err, this could go away.
*/
if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
@@ -131,7 +131,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
struct task_struct *tsk;
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
- int prev_sig_on_uaccess_error;
+ int prev_sig_on_uaccess_err;
long ret;
/*
@@ -227,8 +227,8 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
* With a real vsyscall, page faults cause SIGSEGV. We want to
* preserve that behavior to make writing exploits harder.
*/
- prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
- current_thread_info()->sig_on_uaccess_error = 1;
+ prev_sig_on_uaccess_err = current->thread.sig_on_uaccess_err;
+ current->thread.sig_on_uaccess_err = 1;
ret = -EFAULT;
switch (vsyscall_nr) {
@@ -249,7 +249,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
break;
}
- current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+ current->thread.sig_on_uaccess_err = prev_sig_on_uaccess_err;
check_fault:
if (ret == -EFAULT) {
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index ae6aad1d24f79e..b348c4641312d7 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -50,7 +50,7 @@ static unsigned long get_dr(int n)
/*
* fill in the user structure for a core dump..
*/
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
{
u32 fs, gs;
memset(dump, 0, sizeof(*dump));
@@ -156,10 +156,12 @@ static int aout_core_dump(struct coredump_params *cprm)
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
+
+ fill_dump(cprm->regs, &dump);
+
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
dump.u_ar0 = offsetof(struct user32, regs);
dump.signal = cprm->siginfo->si_signo;
- dump_thread32(cprm->regs, &dump);
/*
* If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 215ea9214215ce..002fcd901f073d 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -154,12 +154,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
".popsection\n"
/*
- * This must be included *after* the definition of ALTERNATIVE due to
- * <asm/arch_hweight.h>
- */
-#include <asm/cpufeature.h>
-
-/*
* Alternative instructions for different CPU types or capabilities.
*
* This allows to use optimized instructions even on generic binary
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 163769d824754f..fd810a57ab1b12 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -6,7 +6,6 @@
#include <asm/alternative.h>
#include <asm/cpufeature.h>
-#include <asm/processor.h>
#include <asm/apicdef.h>
#include <linux/atomic.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index 20370c6db74bb6..3d1ec41ae09abd 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -6,6 +6,8 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
+#include <asm/nospec-branch.h>
+
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
}
static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -55,6 +59,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
+ firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -67,6 +72,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
+ firmware_restrict_branch_speculation_end();
return error;
}
diff --git a/arch/x86/include/asm/arch_hweight.h b/arch/x86/include/asm/arch_hweight.h
index 44f825c80ed55a..e7cd63175de443 100644
--- a/arch/x86/include/asm/arch_hweight.h
+++ b/arch/x86/include/asm/arch_hweight.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_HWEIGHT_H
#define _ASM_X86_HWEIGHT_H
+#include <asm/cpufeatures.h>
+
#ifdef CONFIG_64BIT
/* popcnt %edi, %eax */
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc7"
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 858f8c354cead0..f3d4f1edc9478a 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -45,6 +45,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1 _ASM_AX
+#define _ASM_ARG2 _ASM_DX
+#define _ASM_ARG3 _ASM_CX
+
+#define _ASM_ARG1L eax
+#define _ASM_ARG2L edx
+#define _ASM_ARG3L ecx
+
+#define _ASM_ARG1W ax
+#define _ASM_ARG2W dx
+#define _ASM_ARG3W cx
+
+#define _ASM_ARG1B al
+#define _ASM_ARG2B dl
+#define _ASM_ARG3B cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1 _ASM_DI
+#define _ASM_ARG2 _ASM_SI
+#define _ASM_ARG3 _ASM_DX
+#define _ASM_ARG4 _ASM_CX
+#define _ASM_ARG5 r8
+#define _ASM_ARG6 r9
+
+#define _ASM_ARG1Q rdi
+#define _ASM_ARG2Q rsi
+#define _ASM_ARG3Q rdx
+#define _ASM_ARG4Q rcx
+#define _ASM_ARG5Q r8
+#define _ASM_ARG6Q r9
+
+#define _ASM_ARG1L edi
+#define _ASM_ARG2L esi
+#define _ASM_ARG3L edx
+#define _ASM_ARG4L ecx
+#define _ASM_ARG5L r8d
+#define _ASM_ARG6L r9d
+
+#define _ASM_ARG1W di
+#define _ASM_ARG2W si
+#define _ASM_ARG3W dx
+#define _ASM_ARG4W cx
+#define _ASM_ARG5W r8w
+#define _ASM_ARG6W r9w
+
+#define _ASM_ARG1B dil
+#define _ASM_ARG2B sil
+#define _ASM_ARG3B dl
+#define _ASM_ARG4B cl
+#define _ASM_ARG5B r8b
+#define _ASM_ARG6B r9b
+
+#endif
+
/* Exception table entry */
#ifdef __ASSEMBLY__
# define _ASM_EXTABLE(from,to) \
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index ae5fb83e6d91c9..3e867428819840 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -3,7 +3,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/processor.h>
#include <asm/alternative.h>
#include <asm/cmpxchg.h>
#include <asm/rmwcc.h>
diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
index a11c30b77fb57d..a984111135b16e 100644
--- a/arch/x86/include/asm/atomic64_32.h
+++ b/arch/x86/include/asm/atomic64_32.h
@@ -3,7 +3,6 @@
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/processor.h>
//#include <asm/cmpxchg.h>
/* An 64bit atomic type */
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 814ef83c672033..7f5dcb64cedb22 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -38,9 +38,9 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
{
unsigned long mask;
- asm ("cmp %1,%2; sbb %0,%0;"
+ asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
- :"r"(size),"r" (index)
+ :"g"(size),"r" (index)
:"cc");
return mask;
}
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
index e63aa38e85fb23..61518cf7943767 100644
--- a/arch/x86/include/asm/cacheflush.h
+++ b/arch/x86/include/asm/cacheflush.h
@@ -91,16 +91,10 @@ void clflush_cache_range(void *addr, unsigned int size);
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void);
extern const int rodata_test_data;
extern int kernel_set_to_readonly;
void set_kernel_text_rw(void);
void set_kernel_text_ro(void);
-#else
-static inline void set_kernel_text_rw(void) { }
-static inline void set_kernel_text_ro(void) { }
-#endif
#ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void);
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index f50de69517384b..2a2d66c39bf6f1 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -2,8 +2,7 @@
#define _ASM_X86_CHECKSUM_32_H
#include <linux/in6.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index ad19841eddfe14..9733361fed6f4f 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -2,6 +2,7 @@
#define ASM_X86_CMPXCHG_H
#include <linux/compiler.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative.h> /* Provides LOCK_PREFIX */
/*
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
index f7e142926481b6..e4959d023af848 100644
--- a/arch/x86/include/asm/cmpxchg_32.h
+++ b/arch/x86/include/asm/cmpxchg_32.h
@@ -109,6 +109,6 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
#endif
-#define system_has_cmpxchg_double() cpu_has_cx8
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX8)
#endif /* _ASM_X86_CMPXCHG_32_H */
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
index 1af94697aae510..caa23a34c963ae 100644
--- a/arch/x86/include/asm/cmpxchg_64.h
+++ b/arch/x86/include/asm/cmpxchg_64.h
@@ -18,6 +18,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
cmpxchg_local((ptr), (o), (n)); \
})
-#define system_has_cmpxchg_double() cpu_has_cx16
+#define system_has_cmpxchg_double() boot_cpu_has(X86_FEATURE_CX16)
#endif /* _ASM_X86_CMPXCHG_64_H */
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index acdee09228b30e..3ce60fdcddd2a9 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -307,7 +307,7 @@ static inline void __user *arch_compat_alloc_user_space(long len)
return (void __user *)round_down(sp - len, 16);
}
-static inline bool is_x32_task(void)
+static inline bool in_x32_syscall(void)
{
#ifdef CONFIG_X86_X32_ABI
if (task_pt_regs(current)->orig_ax & __X32_SYSCALL_BIT)
@@ -318,7 +318,7 @@ static inline bool is_x32_task(void)
static inline bool is_compat_task(void)
{
- return is_ia32_task() || is_x32_task();
+ return in_ia32_syscall() || in_x32_syscall();
}
#endif /* _ASM_X86_COMPAT_H */
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index bf2caa1dedc5a3..bba27e62c44e5d 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -17,7 +17,6 @@ static inline void prefill_possible_map(void) {}
#define cpu_physical_id(cpu) boot_cpu_physical_apicid
#define safe_smp_processor_id() 0
-#define stack_smp_processor_id() 0
#endif /* CONFIG_SMP */
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 641f0f2c2982fb..d72c1db646791c 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -1,294 +1,36 @@
-/*
- * Defines x86 CPU feature bits
- */
#ifndef _ASM_X86_CPUFEATURE_H
#define _ASM_X86_CPUFEATURE_H
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#include <asm/required-features.h>
-#endif
-
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#include <asm/disabled-features.h>
-#endif
-
-#define NCAPINTS 14 /* N 32-bit words worth of info */
-#define NBUGINTS 1 /* N 32-bit bug flags */
-
-/*
- * Note: If the comment begins with a quoted string, that string is used
- * in /proc/cpuinfo instead of the macro name. If the string is "",
- * this feature bit is not displayed in /proc/cpuinfo at all.
- */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
- /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
-
-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
-/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
-
-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
-
-/* Other features, Linux-defined mapping, word 3 */
-/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
-/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
-#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
-#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
-
-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
-
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
-#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
-
-/*
- * Auxiliary flags: Linux defined - For features scattered in various
- * CPUID levels like 0x6, 0xA etc, word 7
- */
-#define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */
-#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
-#define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */
-#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
-#define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
-#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
-#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
-#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
-#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
-#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
-
-#define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
-#define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
-/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
-#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
-
-/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
-#define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */
-#define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */
-#define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */
-#define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */
-#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
-
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
-
-/*
- * BUG word(s)
- */
-#define X86_BUG(x) (NCAPINTS*32 + (x))
-
-#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
-#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
-#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
-#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#include <asm/processor.h>
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#include <asm/asm.h>
#include <linux/bitops.h>
+enum cpuid_leafs
+{
+ CPUID_1_EDX = 0,
+ CPUID_8000_0001_EDX,
+ CPUID_8086_0001_EDX,
+ CPUID_LNX_1,
+ CPUID_1_ECX,
+ CPUID_C000_0001_EDX,
+ CPUID_8000_0001_ECX,
+ CPUID_LNX_2,
+ CPUID_LNX_3,
+ CPUID_7_0_EBX,
+ CPUID_D_1_EAX,
+ CPUID_F_0_EDX,
+ CPUID_F_1_EDX,
+ CPUID_8000_0008_EBX,
+ CPUID_6_EAX,
+ CPUID_8000_000A_EDX,
+ CPUID_7_ECX,
+ CPUID_8000_0007_EBX,
+ CPUID_7_EDX,
+};
+
#ifdef CONFIG_X86_FEATURE_NAMES
extern const char * const x86_cap_flags[NCAPINTS*32];
extern const char * const x86_power_flags[32];
@@ -308,29 +50,61 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define test_cpu_cap(c, bit) \
test_bit(bit, (unsigned long *)((c)->x86_capability))
-#define REQUIRED_MASK_BIT_SET(bit) \
- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & REQUIRED_MASK0)) || \
- (((bit)>>5)==1 && (1UL<<((bit)&31) & REQUIRED_MASK1)) || \
- (((bit)>>5)==2 && (1UL<<((bit)&31) & REQUIRED_MASK2)) || \
- (((bit)>>5)==3 && (1UL<<((bit)&31) & REQUIRED_MASK3)) || \
- (((bit)>>5)==4 && (1UL<<((bit)&31) & REQUIRED_MASK4)) || \
- (((bit)>>5)==5 && (1UL<<((bit)&31) & REQUIRED_MASK5)) || \
- (((bit)>>5)==6 && (1UL<<((bit)&31) & REQUIRED_MASK6)) || \
- (((bit)>>5)==7 && (1UL<<((bit)&31) & REQUIRED_MASK7)) || \
- (((bit)>>5)==8 && (1UL<<((bit)&31) & REQUIRED_MASK8)) || \
- (((bit)>>5)==9 && (1UL<<((bit)&31) & REQUIRED_MASK9)) )
-
-#define DISABLED_MASK_BIT_SET(bit) \
- ( (((bit)>>5)==0 && (1UL<<((bit)&31) & DISABLED_MASK0)) || \
- (((bit)>>5)==1 && (1UL<<((bit)&31) & DISABLED_MASK1)) || \
- (((bit)>>5)==2 && (1UL<<((bit)&31) & DISABLED_MASK2)) || \
- (((bit)>>5)==3 && (1UL<<((bit)&31) & DISABLED_MASK3)) || \
- (((bit)>>5)==4 && (1UL<<((bit)&31) & DISABLED_MASK4)) || \
- (((bit)>>5)==5 && (1UL<<((bit)&31) & DISABLED_MASK5)) || \
- (((bit)>>5)==6 && (1UL<<((bit)&31) & DISABLED_MASK6)) || \
- (((bit)>>5)==7 && (1UL<<((bit)&31) & DISABLED_MASK7)) || \
- (((bit)>>5)==8 && (1UL<<((bit)&31) & DISABLED_MASK8)) || \
- (((bit)>>5)==9 && (1UL<<((bit)&31) & DISABLED_MASK9)) )
+/*
+ * There are 32 bits/features in each mask word. The high bits
+ * (selected with (bit>>5) give us the word number and the low 5
+ * bits give us the bit/feature number inside the word.
+ * (1UL<<((bit)&31) gives us a mask for the feature_bit so we can
+ * see if it is set in the mask word.
+ */
+#define CHECK_BIT_IN_MASK_WORD(maskname, word, bit) \
+ (((bit)>>5)==(word) && (1UL<<((bit)&31) & maskname##word ))
+
+#define REQUIRED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 0, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 1, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 2, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 3, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 4, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 5, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 6, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 7, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 8, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 9, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 10, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 11, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 12, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 13, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 14, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \
+ REQUIRED_MASK_CHECK || \
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
+
+#define DISABLED_MASK_BIT_SET(feature_bit) \
+ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 1, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 2, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 3, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 4, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 5, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 6, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 7, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 8, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 9, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 10, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 11, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 12, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 13, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 14, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 15, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 16, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \
+ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \
+ DISABLED_MASK_CHECK || \
+ BUILD_BUG_ON_ZERO(NCAPINTS != 19))
#define cpu_has(c, bit) \
(__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \
@@ -349,8 +123,7 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
* is not relevant.
*/
#define cpu_feature_enabled(bit) \
- (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : \
- cpu_has(&boot_cpu_data, bit))
+ (__builtin_constant_p(bit) && DISABLED_MASK_BIT_SET(bit) ? 0 : static_cpu_has(bit))
#define boot_cpu_has(bit) cpu_has(&boot_cpu_data, bit)
@@ -368,155 +141,39 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
-#define cpu_has_de boot_cpu_has(X86_FEATURE_DE)
#define cpu_has_pse boot_cpu_has(X86_FEATURE_PSE)
#define cpu_has_tsc boot_cpu_has(X86_FEATURE_TSC)
#define cpu_has_pge boot_cpu_has(X86_FEATURE_PGE)
#define cpu_has_apic boot_cpu_has(X86_FEATURE_APIC)
-#define cpu_has_sep boot_cpu_has(X86_FEATURE_SEP)
-#define cpu_has_mtrr boot_cpu_has(X86_FEATURE_MTRR)
-#define cpu_has_mmx boot_cpu_has(X86_FEATURE_MMX)
#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
#define cpu_has_xmm2 boot_cpu_has(X86_FEATURE_XMM2)
-#define cpu_has_xmm3 boot_cpu_has(X86_FEATURE_XMM3)
-#define cpu_has_ssse3 boot_cpu_has(X86_FEATURE_SSSE3)
#define cpu_has_aes boot_cpu_has(X86_FEATURE_AES)
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
-#define cpu_has_ht boot_cpu_has(X86_FEATURE_HT)
-#define cpu_has_nx boot_cpu_has(X86_FEATURE_NX)
-#define cpu_has_xstore boot_cpu_has(X86_FEATURE_XSTORE)
-#define cpu_has_xstore_enabled boot_cpu_has(X86_FEATURE_XSTORE_EN)
-#define cpu_has_xcrypt boot_cpu_has(X86_FEATURE_XCRYPT)
-#define cpu_has_xcrypt_enabled boot_cpu_has(X86_FEATURE_XCRYPT_EN)
-#define cpu_has_ace2 boot_cpu_has(X86_FEATURE_ACE2)
-#define cpu_has_ace2_enabled boot_cpu_has(X86_FEATURE_ACE2_EN)
-#define cpu_has_phe boot_cpu_has(X86_FEATURE_PHE)
-#define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN)
-#define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM)
-#define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN)
-#define cpu_has_ds boot_cpu_has(X86_FEATURE_DS)
-#define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS)
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
-#define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS)
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
-#define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1)
-#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC)
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
-#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT)
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
#define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE)
#define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR)
-#define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ)
-#define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE)
-#define cpu_has_perfctr_nb boot_cpu_has(X86_FEATURE_PERFCTR_NB)
-#define cpu_has_perfctr_l2 boot_cpu_has(X86_FEATURE_PERFCTR_L2)
-#define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8)
-#define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16)
-#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU)
-#define cpu_has_topoext boot_cpu_has(X86_FEATURE_TOPOEXT)
-#define cpu_has_bpext boot_cpu_has(X86_FEATURE_BPEXT)
-
-#if __GNUC__ >= 4
-extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
+/*
+ * Do not add any more of those clumsy macros - use static_cpu_has() for
+ * fast paths and boot_cpu_has() otherwise!
+ */
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
/*
* Static testing of CPU features. Used the same as boot_cpu_has().
- * These are only valid after alternatives have run, but will statically
- * patch the target code for additional performance.
+ * These will statically patch the target code for additional
+ * performance.
*/
-static __always_inline __pure bool __static_cpu_has(u16 bit)
+static __always_inline __pure bool _static_cpu_has(u16 bit)
{
-#ifdef CC_HAVE_ASM_GOTO
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-
- /*
- * Catch too early usage of this before alternatives
- * have run.
- */
- asm_volatile_goto("1: jmp %l[t_warn]\n"
- "2:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n"
- " .long 0\n" /* no replacement */
- " .word %P0\n" /* 1: do replace */
- " .byte 2b - 1b\n" /* source len */
- " .byte 0\n" /* replacement len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- /* skipping size check since replacement size = 0 */
- : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
-
-#endif
-
- asm_volatile_goto("1: jmp %l[t_no]\n"
- "2:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n"
- " .long 0\n" /* no replacement */
- " .word %P0\n" /* feature bit */
- " .byte 2b - 1b\n" /* source len */
- " .byte 0\n" /* replacement len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- /* skipping size check since replacement size = 0 */
- : : "i" (bit) : : t_no);
- return true;
- t_no:
- return false;
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
- t_warn:
- warn_pre_alternatives();
- return false;
-#endif
-
-#else /* CC_HAVE_ASM_GOTO */
-
- u8 flag;
- /* Open-coded due to __stringify() in ALTERNATIVE() */
- asm volatile("1: movb $0,%0\n"
- "2:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n"
- " .long 3f - .\n"
- " .word %P1\n" /* feature bit */
- " .byte 2b - 1b\n" /* source len */
- " .byte 4f - 3f\n" /* replacement len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "3: movb $1,%0\n"
- "4:\n"
- ".previous\n"
- : "=qm" (flag) : "i" (bit));
- return flag;
-
-#endif /* CC_HAVE_ASM_GOTO */
-}
-
-#define static_cpu_has(bit) \
-( \
- __builtin_constant_p(boot_cpu_has(bit)) ? \
- boot_cpu_has(bit) : \
- __builtin_constant_p(bit) ? \
- __static_cpu_has(bit) : \
- boot_cpu_has(bit) \
-)
-
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
-{
-#ifdef CC_HAVE_ASM_GOTO
- asm_volatile_goto("1: jmp %l[t_dynamic]\n"
+ asm_volatile_goto("1: jmp 6f\n"
"2:\n"
".skip -(((5f-4f) - (2b-1b)) > 0) * "
"((5f-4f) - (2b-1b)),0x90\n"
@@ -541,66 +198,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
" .byte 0\n" /* repl len */
" .byte 0\n" /* pad len */
".previous\n"
- : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
- : : t_dynamic, t_no);
+ ".section .altinstr_aux,\"ax\"\n"
+ "6:\n"
+ " testb %[bitnum],%[cap_byte]\n"
+ " jnz %l[t_yes]\n"
+ " jmp %l[t_no]\n"
+ ".previous\n"
+ : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
+ [bitnum] "i" (1 << (bit & 7)),
+ [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+ : : t_yes, t_no);
+ t_yes:
return true;
t_no:
return false;
- t_dynamic:
- return __static_cpu_has_safe(bit);
-#else
- u8 flag;
- /* Open-coded due to __stringify() in ALTERNATIVE() */
- asm volatile("1: movb $2,%0\n"
- "2:\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 3f - .\n" /* repl offset */
- " .word %P2\n" /* always replace */
- " .byte 2b - 1b\n" /* source len */
- " .byte 4f - 3f\n" /* replacement len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "3: movb $0,%0\n"
- "4:\n"
- ".previous\n"
- ".section .altinstructions,\"a\"\n"
- " .long 1b - .\n" /* src offset */
- " .long 5f - .\n" /* repl offset */
- " .word %P1\n" /* feature bit */
- " .byte 4b - 3b\n" /* src len */
- " .byte 6f - 5f\n" /* repl len */
- " .byte 0\n" /* pad len */
- ".previous\n"
- ".section .discard,\"aw\",@progbits\n"
- " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
- ".previous\n"
- ".section .altinstr_replacement,\"ax\"\n"
- "5: movb $1,%0\n"
- "6:\n"
- ".previous\n"
- : "=qm" (flag)
- : "i" (bit), "i" (X86_FEATURE_ALWAYS));
- return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
-#endif /* CC_HAVE_ASM_GOTO */
}
-#define static_cpu_has_safe(bit) \
+#define static_cpu_has(bit) \
( \
__builtin_constant_p(boot_cpu_has(bit)) ? \
boot_cpu_has(bit) : \
- _static_cpu_has_safe(bit) \
+ _static_cpu_has(bit) \
)
#else
/*
- * gcc 3.x is too stupid to do the static test; fall back to dynamic.
+ * Fall back to dynamic for gcc versions which don't support asm goto. Should be
+ * a minority now anyway.
*/
#define static_cpu_has(bit) boot_cpu_has(bit)
-#define static_cpu_has_safe(bit) boot_cpu_has(bit)
#endif
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
@@ -608,7 +233,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
#define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit))
#define static_cpu_has_bug(bit) static_cpu_has((bit))
-#define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit))
#define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit))
#define MAX_CPU_FEATURES (NCAPINTS * 32)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
new file mode 100644
index 00000000000000..a5fa3195a230f5
--- /dev/null
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -0,0 +1,336 @@
+#ifndef _ASM_X86_CPUFEATURES_H
+#define _ASM_X86_CPUFEATURES_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+/*
+ * Defines x86 CPU feature bits
+ */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name. If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
+ /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
+/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+
+#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
+
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+
+#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
+#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
+#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x) (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+
+#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 880db91d945778..2ed5a2b3f8f7e8 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -36,7 +36,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
extern struct desc_ptr idt_descr;
extern gate_desc idt_table[];
-extern struct desc_ptr debug_idt_descr;
+extern const struct desc_ptr debug_idt_descr;
extern gate_desc debug_idt_table[];
struct gdt_page {
diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
index 8b17c2ad1048f7..1f8cca459c6c8c 100644
--- a/arch/x86/include/asm/disabled-features.h
+++ b/arch/x86/include/asm/disabled-features.h
@@ -30,6 +30,14 @@
# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
#endif /* CONFIG_X86_64 */
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+# define DISABLE_PKU 0
+# define DISABLE_OSPKE 0
+#else
+# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
+# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
+#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
+
/*
* Make sure to add features to the correct mask
*/
@@ -43,5 +51,15 @@
#define DISABLED_MASK7 0
#define DISABLED_MASK8 0
#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK10 0
+#define DISABLED_MASK11 0
+#define DISABLED_MASK12 0
+#define DISABLED_MASK13 0
+#define DISABLED_MASK14 0
+#define DISABLED_MASK15 0
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17 0
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0010c78c4998cf..7e5a2ffb69385a 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -3,6 +3,7 @@
#include <asm/fpu/api.h>
#include <asm/pgtable.h>
+#include <asm/nospec-branch.h>
/*
* We map the EFI regions needed for runtime services non-contiguously,
@@ -39,8 +40,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
({ \
efi_status_t __s; \
kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
__s = ((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
+ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
__s; \
})
@@ -49,8 +52,10 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define __efi_call_virt(f, args...) \
({ \
kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
((efi_##f##_t __attribute__((regparm(0)))*) \
efi.systab->runtime->f)(args); \
+ firmware_restrict_branch_speculation_end(); \
kernel_fpu_end(); \
})
@@ -71,7 +76,9 @@ extern u64 asmlinkage efi_call(void *fp, ...);
efi_sync_low_kernel_mappings(); \
preempt_disable(); \
__kernel_fpu_begin(); \
+ firmware_restrict_branch_speculation_start(); \
__s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ firmware_restrict_branch_speculation_end(); \
__kernel_fpu_end(); \
preempt_enable(); \
__s; \
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 3c3550c3a4a3f0..66a5e60f60c416 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -17,6 +17,7 @@
#include <asm/user.h>
#include <asm/fpu/api.h>
#include <asm/fpu/xstate.h>
+#include <asm/cpufeature.h>
/*
* High level FPU state handling functions:
@@ -42,6 +43,7 @@ extern void fpu__init_cpu_xstate(void);
extern void fpu__init_system(struct cpuinfo_x86 *c);
extern void fpu__init_check_bugs(void);
extern void fpu__resume_cpu(void);
+extern u64 fpu__get_supported_xfeatures_mask(void);
/*
* Debugging facility:
@@ -55,24 +57,19 @@ extern void fpu__resume_cpu(void);
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
- return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+ return static_cpu_has(X86_FEATURE_XSAVEOPT);
}
static __always_inline __pure bool use_xsave(void)
{
- return static_cpu_has_safe(X86_FEATURE_XSAVE);
+ return static_cpu_has(X86_FEATURE_XSAVE);
}
static __always_inline __pure bool use_fxsr(void)
{
- return static_cpu_has_safe(X86_FEATURE_FXSR);
+ return static_cpu_has(X86_FEATURE_FXSR);
}
/*
@@ -97,6 +94,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
+ \
+ might_fault(); \
+ \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
@@ -224,18 +224,67 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
#define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
#define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
-/* xstate instruction fault handler: */
-#define xstate_fault(__err) \
- \
- ".section .fixup,\"ax\"\n" \
- \
- "3: movl $-2,%[_err]\n" \
- " jmp 2b\n" \
- \
- ".previous\n" \
- \
- _ASM_EXTABLE(1b, 3b) \
- : [_err] "=r" (__err)
+#define XSTATE_OP(op, st, lmask, hmask, err) \
+ asm volatile("1:" op "\n\t" \
+ "xor %[err], %[err]\n" \
+ "2:\n\t" \
+ ".pushsection .fixup,\"ax\"\n\t" \
+ "3: movl $-2,%[err]\n\t" \
+ "jmp 2b\n\t" \
+ ".popsection\n\t" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err) \
+ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : "memory")
+
+/*
+ * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
+ * format and supervisor states in addition to modified optimization in
+ * XSAVEOPT.
+ *
+ * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
+ * supports modified optimization which is not supported by XSAVE.
+ *
+ * We use XSAVE as a fallback.
+ *
+ * The 661 label is defined in the ALTERNATIVE* macros as the address of the
+ * original instruction which gets replaced. We need to use it here as the
+ * address of the instruction where we might get an exception at.
+ */
+#define XSTATE_XSAVE(st, lmask, hmask, err) \
+ asm volatile(ALTERNATIVE_2(XSAVE, \
+ XSAVEOPT, X86_FEATURE_XSAVEOPT, \
+ XSAVES, X86_FEATURE_XSAVES) \
+ "\n" \
+ "xor %[err], %[err]\n" \
+ "3:\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "4: movl $-2, %[err]\n" \
+ "jmp 3b\n" \
+ ".popsection\n" \
+ _ASM_EXTABLE(661b, 4b) \
+ : [err] "=r" (err) \
+ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : "memory")
+
+/*
+ * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
+ * XSAVE area format.
+ */
+#define XSTATE_XRESTORE(st, lmask, hmask, err) \
+ asm volatile(ALTERNATIVE(XRSTOR, \
+ XRSTORS, X86_FEATURE_XSAVES) \
+ "\n" \
+ "xor %[err], %[err]\n" \
+ "3:\n" \
+ ".pushsection .fixup,\"ax\"\n" \
+ "4: movl $-2, %[err]\n" \
+ "jmp 3b\n" \
+ ".popsection\n" \
+ _ASM_EXTABLE(661b, 4b) \
+ : [err] "=r" (err) \
+ : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
+ : "memory")
/*
* This function is called only during boot time when x86 caps are not set
@@ -246,22 +295,14 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
u64 mask = -1;
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err = 0;
+ int err;
WARN_ON(system_state != SYSTEM_BOOTING);
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- asm volatile("1:"XSAVES"\n\t"
- "2:\n\t"
- xstate_fault(err)
- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
- : "memory");
+ if (static_cpu_has(X86_FEATURE_XSAVES))
+ XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
else
- asm volatile("1:"XSAVE"\n\t"
- "2:\n\t"
- xstate_fault(err)
- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
- : "memory");
+ XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
@@ -276,22 +317,14 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
u64 mask = -1;
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err = 0;
+ int err;
WARN_ON(system_state != SYSTEM_BOOTING);
- if (boot_cpu_has(X86_FEATURE_XSAVES))
- asm volatile("1:"XRSTORS"\n\t"
- "2:\n\t"
- xstate_fault(err)
- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
- : "memory");
+ if (static_cpu_has(X86_FEATURE_XSAVES))
+ XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
else
- asm volatile("1:"XRSTOR"\n\t"
- "2:\n\t"
- xstate_fault(err)
- : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
- : "memory");
+ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
@@ -305,33 +338,11 @@ static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
u64 mask = -1;
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err = 0;
+ int err;
WARN_ON(!alternatives_patched);
- /*
- * If xsaves is enabled, xsaves replaces xsaveopt because
- * it supports compact format and supervisor states in addition to
- * modified optimization in xsaveopt.
- *
- * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
- * because xsaveopt supports modified optimization which is not
- * supported by xsave.
- *
- * If none of xsaves and xsaveopt is enabled, use xsave.
- */
- alternative_input_2(
- "1:"XSAVE,
- XSAVEOPT,
- X86_FEATURE_XSAVEOPT,
- XSAVES,
- X86_FEATURE_XSAVES,
- [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
- "memory");
- asm volatile("2:\n\t"
- xstate_fault(err)
- : "0" (err)
- : "memory");
+ XSTATE_XSAVE(xstate, lmask, hmask, err);
/* We should never fault when copying to a kernel buffer: */
WARN_ON_FPU(err);
@@ -344,23 +355,9 @@ static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
{
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err = 0;
+ int err;
- /*
- * Use xrstors to restore context if it is enabled. xrstors supports
- * compacted format of xsave area which is not supported by xrstor.
- */
- alternative_input(
- "1: " XRSTOR,
- XRSTORS,
- X86_FEATURE_XSAVES,
- "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
- : "memory");
-
- asm volatile("2:\n"
- xstate_fault(err)
- : "0" (err)
- : "memory");
+ XSTATE_XRESTORE(xstate, lmask, hmask, err);
/* We should never fault when copying from a kernel buffer: */
WARN_ON_FPU(err);
@@ -388,12 +385,10 @@ static inline int copy_xregs_to_user(struct xregs_state __user *buf)
if (unlikely(err))
return -EFAULT;
- __asm__ __volatile__(ASM_STAC "\n"
- "1:"XSAVE"\n"
- "2: " ASM_CLAC "\n"
- xstate_fault(err)
- : "D" (buf), "a" (-1), "d" (-1), "0" (err)
- : "memory");
+ stac();
+ XSTATE_OP(XSAVE, buf, -1, -1, err);
+ clac();
+
return err;
}
@@ -405,14 +400,12 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
struct xregs_state *xstate = ((__force struct xregs_state *)buf);
u32 lmask = mask;
u32 hmask = mask >> 32;
- int err = 0;
-
- __asm__ __volatile__(ASM_STAC "\n"
- "1:"XRSTOR"\n"
- "2: " ASM_CLAC "\n"
- xstate_fault(err)
- : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
- : "memory"); /* memory required? */
+ int err;
+
+ stac();
+ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+ clac();
+
return err;
}
@@ -466,7 +459,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
* pending. Clear the x87 state here by setting it to fixed values.
* "m" is a random variable that should be in L1.
*/
- if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+ if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
asm volatile(
"fnclex\n\t"
"emms\n\t"
@@ -503,24 +496,6 @@ static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
}
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
- if (!use_eager_fpu())
- clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
- if (!use_eager_fpu())
- stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
@@ -529,7 +504,6 @@ static inline void __fpregs_deactivate(struct fpu *fpu)
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
@@ -554,22 +528,17 @@ static inline int fpregs_active(void)
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
static inline void fpregs_activate(struct fpu *fpu)
{
- __fpregs_activate_hw();
__fpregs_activate(fpu);
}
static inline void fpregs_deactivate(struct fpu *fpu)
{
__fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
}
/*
@@ -595,8 +564,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
* If the task has used the math, pre-load the FPU on xsave processors
* or if the past 5 consecutive context-switches used math.
*/
- fpu.preload = new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
+ fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+ new_fpu->fpstate_active;
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
@@ -609,17 +578,12 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new_fpu->counter++;
__fpregs_activate(new_fpu);
prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
}
} else {
- old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
- new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 1c6f6ac52ad0a2..0d81c7d6fe9618 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -303,17 +303,6 @@ struct fpu {
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
@@ -321,29 +310,6 @@ struct fpu {
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
index 3a6c89b7030757..df8b6317167bb1 100644
--- a/arch/x86/include/asm/fpu/xstate.h
+++ b/arch/x86/include/asm/fpu/xstate.h
@@ -22,7 +22,7 @@
#define XFEATURE_MASK_LAZY (XFEATURE_MASK_FP | \
XFEATURE_MASK_SSE | \
XFEATURE_MASK_YMM | \
- XFEATURE_MASK_OPMASK | \
+ XFEATURE_MASK_OPMASK | \
XFEATURE_MASK_ZMM_Hi256 | \
XFEATURE_MASK_Hi16_ZMM)
@@ -42,7 +42,8 @@ extern unsigned int xstate_size;
extern u64 xfeatures_mask;
extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
-extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
+extern void __init update_regset_xstate_info(unsigned int size,
+ u64 xstate_mask);
void fpu__xstate_clear_all_cpu_caps(void);
void *get_xsave_addr(struct xregs_state *xsave, int xstate);
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 6999f7d01a0d12..e13ff5a1463316 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -12,6 +12,7 @@
*/
#define INTEL_FAM6_CORE_YONAH 0x0E
+
#define INTEL_FAM6_CORE2_MEROM 0x0F
#define INTEL_FAM6_CORE2_MEROM_L 0x16
#define INTEL_FAM6_CORE2_PENRYN 0x17
@@ -20,6 +21,7 @@
#define INTEL_FAM6_NEHALEM 0x1E
#define INTEL_FAM6_NEHALEM_EP 0x1A
#define INTEL_FAM6_NEHALEM_EX 0x2E
+
#define INTEL_FAM6_WESTMERE 0x25
#define INTEL_FAM6_WESTMERE2 0x1F
#define INTEL_FAM6_WESTMERE_EP 0x2C
@@ -36,9 +38,9 @@
#define INTEL_FAM6_HASWELL_GT3E 0x46
#define INTEL_FAM6_BROADWELL_CORE 0x3D
-#define INTEL_FAM6_BROADWELL_XEON_D 0x56
#define INTEL_FAM6_BROADWELL_GT3E 0x47
#define INTEL_FAM6_BROADWELL_X 0x4F
+#define INTEL_FAM6_BROADWELL_XEON_D 0x56
#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
@@ -56,13 +58,15 @@
#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
-#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
+#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
+#define INTEL_FAM6_ATOM_GEMINI_LAKE 0x7A
/* Xeon Phi */
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
+#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */
#endif /* _ASM_X86_INTEL_FAMILY_H */
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 9016b4b7037538..6c5020163db0b3 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -351,4 +351,10 @@ extern void arch_phys_wc_del(int handle);
#define arch_phys_wc_add arch_phys_wc_add
#endif
+#ifdef CONFIG_X86_PAT
+extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
+extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
+#define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
+#endif
+
#endif /* _ASM_X86_IO_H */
diff --git a/arch/x86/include/asm/irq_work.h b/arch/x86/include/asm/irq_work.h
index 78162f8e248bdc..d0afb05c84fc1f 100644
--- a/arch/x86/include/asm/irq_work.h
+++ b/arch/x86/include/asm/irq_work.h
@@ -1,7 +1,7 @@
#ifndef _ASM_IRQ_WORK_H
#define _ASM_IRQ_WORK_H
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
static inline bool arch_irq_work_has_interrupt(void)
{
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index b77f5edb03b0c0..8afbdcd3032bfa 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -8,7 +8,9 @@
* Interrupt control:
*/
-static inline unsigned long native_save_fl(void)
+/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
+extern inline unsigned long native_save_fl(void);
+extern inline unsigned long native_save_fl(void)
{
unsigned long flags;
@@ -26,7 +28,8 @@ static inline unsigned long native_save_fl(void)
return flags;
}
-static inline void native_restore_fl(unsigned long flags)
+extern inline void native_restore_fl(unsigned long flags);
+extern inline void native_restore_fl(unsigned long flags)
{
asm volatile("push %0 ; popf"
: /* no output */
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 4421b5da409d64..d1d1e5094c2844 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR) \
- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR))) \
- ? (MAX_STACK_SIZE) \
- : (((unsigned long)current_thread_info()) + \
- THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+ (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR) \
+ (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ? \
+ MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
#define flush_insn_slot(p) do { } while (0)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index fc3c7e49c8e489..ae357d0afc91f3 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -105,11 +105,12 @@ struct x86_emulate_ops {
* @addr: [IN ] Linear address from which to read.
* @val: [OUT] Value read from memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to read from memory.
+ * @system:[IN ] Whether the access is forced to be at CPL0.
*/
int (*read_std)(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *val,
unsigned int bytes,
- struct x86_exception *fault);
+ struct x86_exception *fault, bool system);
/*
* read_phys: Read bytes of standard (non-emulated/special) memory.
@@ -127,10 +128,11 @@ struct x86_emulate_ops {
* @addr: [IN ] Linear address to which to write.
* @val: [OUT] Value write to memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to write to memory.
+ * @system:[IN ] Whether the access is forced to be at CPL0.
*/
int (*write_std)(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *val, unsigned int bytes,
- struct x86_exception *fault);
+ struct x86_exception *fault, bool system);
/*
* fetch: Read bytes of standard (non-emulated/special) memory.
* Used for instruction fetch.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74fda1a453bdb0..2cb49ac1b2b255 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -439,7 +439,6 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_header_cache;
struct fpu guest_fpu;
- bool eager_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
u32 guest_xstate_size;
@@ -766,7 +765,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
- bool (*cpu_has_high_real_mode_segbase)(void);
+ bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */
@@ -1201,7 +1200,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
- "call kvm_spurious_fault \n\t" \
+ "jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index c1adf33fdd0d6f..bc62e7cbf1b1f8 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -17,15 +17,8 @@ static inline bool kvm_check_and_clear_guest_paused(void)
}
#endif /* CONFIG_KVM_GUEST */
-#ifdef CONFIG_DEBUG_RODATA
#define KVM_HYPERCALL \
ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
-#else
-/* On AMD processors, vmcall will generate a trap that we will
- * then rewrite to the appropriate instruction.
- */
-#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
-#endif
/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
* instruction. The hypervisor may replace it with something else but only the
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 7680b76adafc09..3359dfedc7ee7d 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -3,12 +3,18 @@
#include <linux/spinlock.h>
#include <linux/mutex.h>
+#include <linux/atomic.h>
/*
- * The x86 doesn't have a mmu context, but
- * we put the segment information here.
+ * x86 has arch-specific MMU state beyond what lives in mm_struct.
*/
typedef struct {
+ /*
+ * ctx_id uniquely identifies this mm_struct. A ctx_id will never
+ * be reused, and zero is not a valid ctx_id.
+ */
+ u64 ctx_id;
+
#ifdef CONFIG_MODIFY_LDT_SYSCALL
struct ldt_struct *ldt;
#endif
@@ -24,6 +30,11 @@ typedef struct {
atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
} mm_context_t;
+#define INIT_MM_CONTEXT(mm) \
+ .context = { \
+ .ctx_id = 1, \
+ }
+
void leave_mm(int cpu);
#endif /* _ASM_X86_MMU_H */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 9bfc5fd7701566..d8d19fe99e45f8 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -11,6 +11,9 @@
#include <asm/tlbflush.h>
#include <asm/paravirt.h>
#include <asm/mpx.h>
+
+extern atomic64_t last_mm_ctx_id;
+
#ifndef CONFIG_PARAVIRT
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
@@ -52,15 +55,15 @@ struct ldt_struct {
/*
* Used for LDT copy/destruction.
*/
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void destroy_context(struct mm_struct *mm);
+int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context_ldt(struct mm_struct *mm);
#else /* CONFIG_MODIFY_LDT_SYSCALL */
-static inline int init_new_context(struct task_struct *tsk,
- struct mm_struct *mm)
+static inline int init_new_context_ldt(struct task_struct *tsk,
+ struct mm_struct *mm)
{
return 0;
}
-static inline void destroy_context(struct mm_struct *mm) {}
+static inline void destroy_context_ldt(struct mm_struct *mm) {}
#endif
static inline void load_mm_ldt(struct mm_struct *mm)
@@ -102,6 +105,17 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
}
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
+ return init_new_context_ldt(tsk, mm);
+}
+static inline void destroy_context(struct mm_struct *mm)
+{
+ destroy_context_ldt(mm);
+}
+
extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f9b9a1cb862684..6dce5c0f281b50 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -32,6 +32,15 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
+#define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */
+#define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */
+#define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */
+#define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */
+
+#define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */
+#define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */
+
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
@@ -45,6 +54,16 @@
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_MTRRcap 0x000000fe
+
+#define MSR_IA32_ARCH_CAPABILITIES 0x0000010a
+#define ARCH_CAP_RDCL_NO (1 << 0) /* Not susceptible to Meltdown */
+#define ARCH_CAP_IBRS_ALL (1 << 1) /* Enhanced IBRS support */
+#define ARCH_CAP_SSB_NO (1 << 4) /*
+ * Not susceptible to Speculative Store Bypass
+ * attack, so no Speculative Store Bypass
+ * control required.
+ */
+
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
@@ -132,6 +151,7 @@
/* DEBUGCTLMSR bits (others vary by model): */
#define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */
+#define DEBUGCTLMSR_BTF_SHIFT 1
#define DEBUGCTLMSR_BTF (1UL << 1) /* single-step on branches */
#define DEBUGCTLMSR_TR (1UL << 6)
#define DEBUGCTLMSR_BTS (1UL << 7)
@@ -318,6 +338,8 @@
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
/* Fam 16h MSRs */
#define MSR_F16H_L2I_PERF_CTL 0xc0010230
#define MSR_F16H_L2I_PERF_CTR 0xc0010231
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index c70689b5e5aa4c..0deeb2d26df7cd 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -3,6 +3,8 @@
#include <linux/sched.h>
+#include <asm/cpufeature.h>
+
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 249f1c769f21fa..b4c74c24c8908a 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -5,7 +5,8 @@
#include <asm/alternative.h>
#include <asm/alternative-asm.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
/*
* Fill the CPU return stack buffer.
@@ -171,6 +172,14 @@ enum spectre_v2_mitigation {
SPECTRE_V2_IBRS,
};
+/* The Speculative Store Bypass disable variants */
+enum ssb_mitigation {
+ SPEC_STORE_BYPASS_NONE,
+ SPEC_STORE_BYPASS_DISABLE,
+ SPEC_STORE_BYPASS_PRCTL,
+ SPEC_STORE_BYPASS_SECCOMP,
+};
+
extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[];
@@ -194,6 +203,51 @@ static inline void vmexit_fill_RSB(void)
#endif
}
+static __always_inline
+void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+{
+ asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
+ : : "c" (msr),
+ "a" ((u32)val),
+ "d" ((u32)(val >> 32)),
+ [feature] "i" (feature)
+ : "memory");
+}
+
+static inline void indirect_branch_prediction_barrier(void)
+{
+ u64 val = PRED_CMD_IBPB;
+
+ alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
+}
+
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
+/*
+ * With retpoline, we must use IBRS to restrict branch prediction
+ * before calling into firmware.
+ *
+ * (Implemented as CPP macros due to header hell.)
+ */
+#define firmware_restrict_branch_speculation_start() \
+do { \
+ u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
+ \
+ preempt_disable(); \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+} while (0)
+
+#define firmware_restrict_branch_speculation_end() \
+do { \
+ u64 val = x86_spec_ctrl_base; \
+ \
+ alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
+ X86_FEATURE_USE_IBRS_FW); \
+ preempt_enable(); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index 3a52ee0e726d4c..bfceb5cc63478f 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -27,8 +27,13 @@
#define N_EXCEPTION_STACKS 1
#ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT 44
+/*
+ * This is beyond the 44 bit limit imposed by the 32bit long pfns,
+ * but we need the full mask to make sure inverted PROT_NONE
+ * entries have all the host bits set in a guest.
+ * The real limit is still 44 bits.
+ */
+#define __PHYSICAL_MASK_SHIFT 52
#define __VIRTUAL_MASK_SHIFT 32
#else /* !CONFIG_X86_PAE */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 4928cf0d5af0fe..fb1251946b45e5 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -2,7 +2,11 @@
#define _ASM_X86_PAGE_64_DEFS_H
#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
#define KASAN_STACK_ORDER 1
+#endif
#else
#define KASAN_STACK_ORDER 0
#endif
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index e0ba66ca68c6fa..f5e780bfa2b331 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -184,22 +184,22 @@ do { \
typeof(var) pfo_ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_arg(1)",%0" \
+ asm volatile(op "b "__percpu_arg(1)",%0"\
: "=q" (pfo_ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_arg(1)",%0" \
+ asm volatile(op "w "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_arg(1)",%0" \
+ asm volatile(op "l "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_arg(1)",%0" \
+ asm volatile(op "q "__percpu_arg(1)",%0"\
: "=r" (pfo_ret__) \
: "m" (var)); \
break; \
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index fd74a11959de0d..89c50332a71eda 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -77,4 +77,21 @@ static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshi
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_low })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
+/* No inverted PFNs on 2 level page tables */
+
+static inline u64 protnone_mask(u64 val)
+{
+ return 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ return val;
+}
+
+static inline bool __pte_needs_invert(u64 val)
+{
+ return false;
+}
+
#endif /* _ASM_X86_PGTABLE_2LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index cdaa58c9b39ed3..095dbc25122a3d 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_PGTABLE_3LEVEL_H
#define _ASM_X86_PGTABLE_3LEVEL_H
+#include <asm/atomic64_32.h>
+
/*
* Intel Physical Address Extension (PAE) Mode - three-level page
* tables on PPro+ CPUs.
@@ -142,10 +144,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
{
pte_t res;
- /* xchg acts as a barrier before the setting of the high bits */
- res.pte_low = xchg(&ptep->pte_low, 0);
- res.pte_high = ptep->pte_high;
- ptep->pte_high = 0;
+ res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
return res;
}
@@ -177,11 +176,44 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
#endif
/* Encode and de-code a swap entry */
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT + SWP_TYPE_BITS)
+
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > 5)
#define __swp_type(x) (((x).val) & 0x1f)
#define __swp_offset(x) ((x).val >> 5)
#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
-#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
-#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
+
+/*
+ * Normally, __swp_entry() converts from arch-independent swp_entry_t to
+ * arch-dependent swp_entry_t, and __swp_entry_to_pte() just stores the result
+ * to pte. But here we have 32bit swp_entry_t and 64bit pte, and need to use the
+ * whole 64 bits. Thus, we shift the "real" arch-dependent conversion to
+ * __swp_entry_to_pte() through the following helper macro based on 64bit
+ * __swp_entry().
+ */
+#define __swp_pteval_entry(type, offset) ((pteval_t) { \
+ (~(pteval_t)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((pteval_t)(type) << (64 - SWP_TYPE_BITS)) })
+
+#define __swp_entry_to_pte(x) ((pte_t){ .pte = \
+ __swp_pteval_entry(__swp_type(x), __swp_offset(x)) })
+/*
+ * Analogically, __pte_to_swp_entry() doesn't just extract the arch-dependent
+ * swp_entry_t, but also has to convert it from 64bit to the 32bit
+ * intermediate representation, using the following macros based on 64bit
+ * __swp_type() and __swp_offset().
+ */
+#define __pteval_swp_type(x) ((unsigned long)((x).pte >> (64 - SWP_TYPE_BITS)))
+#define __pteval_swp_offset(x) ((unsigned long)(~((x).pte) << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT))
+
+#define __pte_to_swp_entry(pte) (__swp_entry(__pteval_swp_type(pte), \
+ __pteval_swp_offset(pte)))
+
+#include <asm/pgtable-invert.h>
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */
diff --git a/arch/x86/include/asm/pgtable-invert.h b/arch/x86/include/asm/pgtable-invert.h
new file mode 100644
index 00000000000000..a0c1525f1b6f41
--- /dev/null
+++ b/arch/x86/include/asm/pgtable-invert.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_PGTABLE_INVERT_H
+#define _ASM_PGTABLE_INVERT_H 1
+
+#ifndef __ASSEMBLY__
+
+/*
+ * A clear pte value is special, and doesn't get inverted.
+ *
+ * Note that even users that only pass a pgprot_t (rather
+ * than a full pte) won't trigger the special zero case,
+ * because even PAGE_NONE has _PAGE_PROTNONE | _PAGE_ACCESSED
+ * set. So the all zero case really is limited to just the
+ * cleared page table entry case.
+ */
+static inline bool __pte_needs_invert(u64 val)
+{
+ return val && !(val & _PAGE_PRESENT);
+}
+
+/* Get a mask to xor with the page table entry to get the correct pfn. */
+static inline u64 protnone_mask(u64 val)
+{
+ return __pte_needs_invert(val) ? ~0ull : 0;
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask)
+{
+ /*
+ * When a PTE transitions from NONE to !NONE or vice-versa
+ * invert the PFN part to stop speculation.
+ * pte_pfn undoes this when needed.
+ */
+ if (__pte_needs_invert(oldval) != __pte_needs_invert(val))
+ val = (val & ~mask) | (~val & mask);
+ return val;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 84c62d9500232c..a67d7f210b7c8f 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -148,19 +148,34 @@ static inline int pte_special(pte_t pte)
return pte_flags(pte) & _PAGE_SPECIAL;
}
+/* Entries that were set to PROT_NONE are inverted */
+
+static inline u64 protnone_mask(u64 val);
+
static inline unsigned long pte_pfn(pte_t pte)
{
- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+ phys_addr_t pfn = pte_val(pte);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
- return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pmd_val(pmd);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
static inline unsigned long pud_pfn(pud_t pud)
{
- return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+ phys_addr_t pfn = pud_val(pud);
+ pfn ^= protnone_mask(pfn);
+ return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
+}
+
+static inline unsigned long pgd_pfn(pgd_t pgd)
+{
+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
@@ -305,11 +320,6 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_RW);
}
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
- return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
-}
-
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline int pte_soft_dirty(pte_t pte)
{
@@ -359,19 +369,58 @@ static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
- massage_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PTE_PFN_MASK;
+ return __pte(pfn | massage_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
- return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
- massage_pgprot(pgprot));
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PMD_PAGE_MASK;
+ return __pmd(pfn | massage_pgprot(pgprot));
+}
+
+static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
+{
+ phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
+ pfn ^= protnone_mask(pgprot_val(pgprot));
+ pfn &= PHYSICAL_PUD_PAGE_MASK;
+ return __pud(pfn | massage_pgprot(pgprot));
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+ return pfn_pmd(pmd_pfn(pmd),
+ __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
+}
+
+static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return __pud(v | set);
}
+static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
+{
+ pudval_t v = native_pud_val(pud);
+
+ return __pud(v & ~clear);
+}
+
+static inline pud_t pud_mkhuge(pud_t pud)
+{
+ return pud_set_flags(pud, _PAGE_PSE);
+}
+
+static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- pteval_t val = pte_val(pte);
+ pteval_t val = pte_val(pte), oldval = val;
/*
* Chop off the NX bit (if present), and add the NX portion of
@@ -379,17 +428,17 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
*/
val &= _PAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmdval_t val = pmd_val(pmd);
+ pmdval_t val = pmd_val(pmd), oldval = val;
val &= _HPAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
-
+ val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
return __pmd(val);
}
@@ -534,8 +583,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pmd_page(pmd) \
- pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
+#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
@@ -603,8 +651,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pud_page(pud) \
- pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
+#define pud_page(pud) pfn_to_page(pud_pfn(pud))
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -644,7 +691,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
@@ -926,6 +973,14 @@ static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
}
#endif
+#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
+extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return boot_cpu_has_bug(X86_BUG_L1TF);
+}
+
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index c810226e741a68..221a32ed13727d 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -163,18 +163,52 @@ static inline int pgd_large(pgd_t pgd) { return 0; }
#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
#define pte_unmap(pte) ((void)(pte))/* NOP */
-/* Encode and de-code a swap entry */
-#define SWP_TYPE_BITS 5
-#define SWP_OFFSET_SHIFT (_PAGE_BIT_PROTNONE + 1)
+/*
+ * Encode and de-code a swap entry
+ *
+ * | ... | 11| 10| 9|8|7|6|5| 4| 3|2| 1|0| <- bit number
+ * | ... |SW3|SW2|SW1|G|L|D|A|CD|WT|U| W|P| <- bit names
+ * | TYPE (59-63) | ~OFFSET (9-58) |0|0|X|X| X| X|X|SD|0| <- swp entry
+ *
+ * G (8) is aliased and used as a PROT_NONE indicator for
+ * !present ptes. We need to start storing swap entries above
+ * there. We also need to avoid using A and D because of an
+ * erratum where they can be incorrectly set by hardware on
+ * non-present PTEs.
+ *
+ * SD (1) in swp entry is used to store soft dirty bit, which helps us
+ * remember soft dirty over page migration
+ *
+ * Bit 7 in swp entry should be 0 because pmd_present checks not only P,
+ * but also L and G.
+ *
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
+ */
+#define SWP_TYPE_BITS 5
+
+#define SWP_OFFSET_FIRST_BIT (_PAGE_BIT_PROTNONE + 1)
+
+/* We always extract/encode the offset by shifting it all the way up, and then down again */
+#define SWP_OFFSET_SHIFT (SWP_OFFSET_FIRST_BIT+SWP_TYPE_BITS)
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS)
-#define __swp_type(x) (((x).val >> (_PAGE_BIT_PRESENT + 1)) \
- & ((1U << SWP_TYPE_BITS) - 1))
-#define __swp_offset(x) ((x).val >> SWP_OFFSET_SHIFT)
-#define __swp_entry(type, offset) ((swp_entry_t) { \
- ((type) << (_PAGE_BIT_PRESENT + 1)) \
- | ((offset) << SWP_OFFSET_SHIFT) })
+/* Extract the high bits for type */
+#define __swp_type(x) ((x).val >> (64 - SWP_TYPE_BITS))
+
+/* Shift up (to get rid of type), then down to get value */
+#define __swp_offset(x) (~(x).val << SWP_TYPE_BITS >> SWP_OFFSET_SHIFT)
+
+/*
+ * Shift the offset up "too far" by TYPE bits, then down again
+ * The offset is inverted by a binary not operation to make the high
+ * physical bits set.
+ */
+#define __swp_entry(type, offset) ((swp_entry_t) { \
+ (~(unsigned long)(offset) << SWP_OFFSET_SHIFT >> SWP_TYPE_BITS) \
+ | ((unsigned long)(type) << (64-SWP_TYPE_BITS)) })
+
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val })
@@ -201,6 +235,8 @@ extern void cleanup_highmap(void);
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+#include <asm/pgtable-invert.h>
+
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_64_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 8dba273da25a6e..7572ce32055eba 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -70,15 +70,15 @@
/*
* Tracking soft dirty bit when a page goes to a swap is tricky.
* We need a bit which can be stored in pte _and_ not conflict
- * with swap entry format. On x86 bits 6 and 7 are *not* involved
- * into swap entry computation, but bit 6 is used for nonlinear
- * file mapping, so we borrow bit 7 for soft dirty tracking.
+ * with swap entry format. On x86 bits 1-4 are *not* involved
+ * into swap entry computation, but bit 7 is used for thp migration,
+ * so we borrow bit 1 for soft dirty tracking.
*
* Please note that this bit must be treated as swap dirty page
- * mark if and only if the PTE has present bit clear!
+ * mark if and only if the PTE/PMD has present bit clear!
*/
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
#else
#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index fe3d9be6f84358..b04904353f9eb1 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -13,7 +13,7 @@ struct vm86;
#include <asm/types.h>
#include <uapi/asm/sigcontext.h>
#include <asm/current.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/page.h>
#include <asm/pgtable_types.h>
#include <asm/percpu.h>
@@ -24,7 +24,6 @@ struct vm86;
#include <asm/fpu/types.h>
#include <linux/personality.h>
-#include <linux/cpumask.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
@@ -105,6 +104,8 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
+
+ __u8 x86_cache_bits;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */
@@ -173,6 +174,11 @@ extern const struct seq_operations cpuinfo_op;
extern void cpu_detect(struct cpuinfo_x86 *c);
+static inline unsigned long long l1tf_pfn_limit(void)
+{
+ return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+}
+
extern void early_cpu_init(void);
extern void identify_boot_cpu(void);
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
@@ -369,6 +375,10 @@ extern unsigned int xstate_size;
struct perf_event;
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
struct thread_struct {
/* Cached TLS descriptors: */
struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
@@ -382,9 +392,9 @@ struct thread_struct {
unsigned short fsindex;
unsigned short gsindex;
#endif
-#ifdef CONFIG_X86_32
- unsigned long ip;
-#endif
+
+ u32 status; /* thread synchronous flags */
+
#ifdef CONFIG_X86_64
unsigned long fs;
#endif
@@ -410,6 +420,11 @@ struct thread_struct {
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
+ mm_segment_t addr_limit;
+
+ unsigned int sig_on_uaccess_err:1;
+ unsigned int uaccess_err:1; /* uaccess failed */
+
/* Floating point and extended processor state */
struct fpu fpu;
/*
@@ -419,6 +434,15 @@ struct thread_struct {
};
/*
+ * Thread-synchronous status.
+ *
+ * This is different from the flags in that nobody else
+ * ever touches our thread-synchronous status, so we don't
+ * have to worry about atomic accesses.
+ */
+#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
+
+/*
* Set IOPL bits in EFLAGS from given mask
*/
static inline void native_set_iopl_mask(unsigned mask)
@@ -482,11 +506,6 @@ static inline void load_sp0(struct tss_struct *tss,
#define set_iopl_mask native_set_iopl_mask
#endif /* CONFIG_PARAVIRT */
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
@@ -708,6 +727,7 @@ static inline void spin_lock_prefetch(const void *x)
.sp0 = TOP_OF_INIT_STACK, \
.sysenter_cs = __KERNEL_CS, \
.io_bitmap_ptr = NULL, \
+ .addr_limit = KERNEL_DS, \
}
extern unsigned long thread_saved_pc(struct task_struct *tsk);
@@ -757,8 +777,9 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE_MAX
-#define INIT_THREAD { \
- .sp0 = TOP_OF_INIT_STACK \
+#define INIT_THREAD { \
+ .sp0 = TOP_OF_INIT_STACK, \
+ .addr_limit = KERNEL_DS, \
}
/*
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index 5c6e4fb370f5aa..6847d85400a8b7 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -92,5 +92,15 @@
#define REQUIRED_MASK7 0
#define REQUIRED_MASK8 0
#define REQUIRED_MASK9 0
+#define REQUIRED_MASK10 0
+#define REQUIRED_MASK11 0
+#define REQUIRED_MASK12 0
+#define REQUIRED_MASK13 0
+#define REQUIRED_MASK14 0
+#define REQUIRED_MASK15 0
+#define REQUIRED_MASK16 0
+#define REQUIRED_MASK17 0
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/arch/x86/include/asm/sections.h b/arch/x86/include/asm/sections.h
index 0a524242865904..13b6cdd0af5704 100644
--- a/arch/x86/include/asm/sections.h
+++ b/arch/x86/include/asm/sections.h
@@ -7,7 +7,7 @@
extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[];
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[];
#endif
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index ba665ebd17bb8f..db333300bd4be1 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -15,7 +15,7 @@
#include <linux/stringify.h>
#include <asm/nops.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
/* "Raw" instruction opcodes */
#define __ASM_CLAC .byte 0x0f,0x01,0xca
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 222a6a3ca2b5eb..7e201dba8e556b 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -16,20 +16,10 @@
#endif
#include <asm/thread_info.h>
#include <asm/cpumask.h>
-#include <asm/cpufeature.h>
extern int smp_num_siblings;
extern unsigned int num_processors;
-static inline bool cpu_has_ht_siblings(void)
-{
- bool has_siblings = false;
-#ifdef CONFIG_SMP
- has_siblings = cpu_has_ht && smp_num_siblings > 1;
-#endif
- return has_siblings;
-}
-
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
/* cpus sharing the last level cache: */
@@ -184,12 +174,6 @@ extern int safe_smp_processor_id(void);
#elif defined(CONFIG_X86_64_SMP)
#define raw_smp_processor_id() (this_cpu_read(cpu_number))
-#define stack_smp_processor_id() \
-({ \
- struct thread_info *ti; \
- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
- ti->cpu; \
-})
#define safe_smp_processor_id() smp_processor_id()
#endif
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h
new file mode 100644
index 00000000000000..ae7c2c5cd7f0e2
--- /dev/null
+++ b/arch/x86/include/asm/spec-ctrl.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_X86_SPECCTRL_H_
+#define _ASM_X86_SPECCTRL_H_
+
+#include <linux/thread_info.h>
+#include <asm/nospec-branch.h>
+
+/*
+ * On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
+ * the guest has, while on VMEXIT we restore the host view. This
+ * would be easier if SPEC_CTRL were architecturally maskable or
+ * shadowable for guests but this is not (currently) the case.
+ * Takes the guest view of SPEC_CTRL MSR as a parameter and also
+ * the guest's version of VIRT_SPEC_CTRL, if emulated.
+ */
+extern void x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool guest);
+
+/**
+ * x86_spec_ctrl_set_guest - Set speculation control registers for the guest
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ * (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, true);
+}
+
+/**
+ * x86_spec_ctrl_restore_host - Restore host speculation control registers
+ * @guest_spec_ctrl: The guest content of MSR_SPEC_CTRL
+ * @guest_virt_spec_ctrl: The guest controlled bits of MSR_VIRT_SPEC_CTRL
+ * (may get translated to MSR_AMD64_LS_CFG bits)
+ *
+ * Avoids writing to the MSR if the content/bits are the same
+ */
+static inline
+void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl)
+{
+ x86_virt_spec_ctrl(guest_spec_ctrl, guest_virt_spec_ctrl, false);
+}
+
+/* AMD specific Speculative Store Bypass MSR data */
+extern u64 x86_amd_ls_cfg_base;
+extern u64 x86_amd_ls_cfg_ssbd_mask;
+
+static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn)
+{
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+ return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl)
+{
+ BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT);
+ return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT);
+}
+
+static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn)
+{
+ return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL;
+}
+
+#ifdef CONFIG_SMP
+extern void speculative_store_bypass_ht_init(void);
+#else
+static inline void speculative_store_bypass_ht_init(void) { }
+#endif
+
+extern void speculative_store_bypass_update(unsigned long tif);
+
+static inline void speculative_store_bypass_update_current(void)
+{
+ speculative_store_bypass_update(current_thread_info()->flags);
+}
+
+#endif
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index 32557fe5d78846..48eca27d2a1265 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -8,13 +8,14 @@
#include <linux/uaccess.h>
#include <linux/ptrace.h>
+#include <asm/switch_to.h>
extern int kstack_depth_to_print;
struct thread_info;
struct stacktrace_ops;
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
unsigned long *stack,
unsigned long bp,
const struct stacktrace_ops *ops,
@@ -23,13 +24,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
int *graph);
extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph);
@@ -70,8 +71,7 @@ stack_frame(struct task_struct *task, struct pt_regs *regs)
return bp;
}
- /* bp is the last reg pushed by switch_to */
- return *(unsigned long *)task->thread.sp;
+ return ((struct inactive_task_frame *)task->thread.sp)->bp;
}
#else
static inline unsigned long
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index 3d3e8353ee5c09..e9ee84873de50a 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -142,7 +142,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
}
#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifdef CONFIG_X86_USE_3DNOW
#include <asm/mmx.h>
@@ -195,11 +197,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len)
#endif
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE
void *memmove(void *dest, const void *src, size_t n);
+extern int memcmp(const void *, const void *, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#define memcmp __builtin_memcmp
+#endif
#define __HAVE_ARCH_MEMCHR
extern void *memchr(const void *cs, int c, size_t count);
@@ -321,6 +327,8 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
: __memset_generic((s), (c), (count)))
#define __HAVE_ARCH_MEMSET
+extern void *memset(void *, int, size_t);
+#ifndef CONFIG_FORTIFY_SOURCE
#if (__GNUC__ >= 4)
#define memset(s, c, count) __builtin_memset(s, c, count)
#else
@@ -330,6 +338,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern,
(count)) \
: __memset((s), (c), (count)))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
/*
* find the first occurrence of byte 'c', or 1 past the area if none
diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h
index ff8b9a17dc4b2d..e458a79c284482 100644
--- a/arch/x86/include/asm/string_64.h
+++ b/arch/x86/include/asm/string_64.h
@@ -30,6 +30,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t
extern void *memcpy(void *to, const void *from, size_t len);
extern void *__memcpy(void *to, const void *from, size_t len);
+#ifndef CONFIG_FORTIFY_SOURCE
#ifndef CONFIG_KMEMCHECK
#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
#define memcpy(dst, src, len) \
@@ -50,6 +51,7 @@ extern void *__memcpy(void *to, const void *from, size_t len);
*/
#define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len))
#endif
+#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMSET
void *memset(void *s, int c, size_t n);
@@ -76,6 +78,11 @@ int strcmp(const char *cs, const char *ct);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif
#endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 025ecfaba9c9ed..4ad9bdf1e53517 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -4,14 +4,16 @@
#include <asm/nospec-branch.h>
struct task_struct; /* one of the stranger aspects of C forward declarations */
+
+struct task_struct *__switch_to_asm(struct task_struct *prev,
+ struct task_struct *next);
+
__visible struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next);
+ struct task_struct *next);
struct tss_struct;
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss);
-#ifdef CONFIG_X86_32
-
#ifdef CONFIG_CC_STACKPROTECTOR
#define __switch_canary \
"movl %P[task_canary](%[next]), %%ebx\n\t" \
@@ -26,99 +28,26 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
#define __switch_canary_iparam
#endif /* CC_STACKPROTECTOR */
-#ifdef CONFIG_RETPOLINE
- /*
- * When switching from a shallower to a deeper call stack
- * the RSB may either underflow or use entries populated
- * with userspace addresses. On CPUs where those concerns
- * exist, overwrite the RSB with entries which capture
- * speculative execution to prevent attack.
- */
-#define __retpoline_fill_return_buffer \
- ALTERNATIVE("jmp 910f", \
- __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
- X86_FEATURE_RSB_CTXSW) \
- "910:\n\t"
+/* data that is pointed to by thread.sp */
+struct inactive_task_frame {
+#ifdef CONFIG_X86_64
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
#else
-#define __retpoline_fill_return_buffer
+ unsigned long si;
+ unsigned long di;
#endif
+ unsigned long bx;
+ unsigned long bp;
+ unsigned long ret_addr;
+};
-/*
- * Saving eflags is important. It switches not only IOPL between tasks,
- * it also protects other tasks from NT leaking through sysenter etc.
- */
-#define switch_to(prev, next, last) \
-do { \
- /* \
- * Context-switching clobbers all registers, so we clobber \
- * them explicitly, via unused output variables. \
- * (EAX and EBP is not listed because EBP is saved/restored \
- * explicitly for wchan access and EAX is the return value of \
- * __switch_to()) \
- */ \
- unsigned long ebx, ecx, edx, esi, edi; \
- \
- asm volatile("pushfl\n\t" /* save flags */ \
- "pushl %%ebp\n\t" /* save EBP */ \
- "movl %%esp,%[prev_sp]\n\t" /* save ESP */ \
- "movl %[next_sp],%%esp\n\t" /* restore ESP */ \
- "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
- "pushl %[next_ip]\n\t" /* restore EIP */ \
- __switch_canary \
- __retpoline_fill_return_buffer \
- "jmp __switch_to\n" /* regparm call */ \
- "1:\t" \
- "popl %%ebp\n\t" /* restore EBP */ \
- "popfl\n" /* restore flags */ \
- \
- /* output parameters */ \
- : [prev_sp] "=m" (prev->thread.sp), \
- [prev_ip] "=m" (prev->thread.ip), \
- "=a" (last), \
- \
- /* clobbered output registers: */ \
- "=b" (ebx), "=c" (ecx), "=d" (edx), \
- "=S" (esi), "=D" (edi) \
- \
- __switch_canary_oparam \
- \
- /* input parameters: */ \
- : [next_sp] "m" (next->thread.sp), \
- [next_ip] "m" (next->thread.ip), \
- \
- /* regparm parameters for __switch_to(): */ \
- [prev] "a" (prev), \
- [next] "d" (next) \
- \
- __switch_canary_iparam \
- \
- : /* reloaded segment registers */ \
- "memory"); \
-} while (0)
-
-#else /* CONFIG_X86_32 */
-
-/* frame pointer must be last for get_wchan */
-#define SAVE_CONTEXT "pushq %%rbp ; movq %%rsi,%%rbp\n\t"
-#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp\t"
-
-#define __EXTRA_CLOBBER \
- , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
- "r12", "r13", "r14", "r15", "flags"
-
-#ifdef CONFIG_CC_STACKPROTECTOR
-#define __switch_canary \
- "movq %P[task_canary](%%rsi),%%r8\n\t" \
- "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
-#define __switch_canary_oparam \
- , [gs_canary] "=m" (irq_stack_union.stack_canary)
-#define __switch_canary_iparam \
- , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
-#else /* CC_STACKPROTECTOR */
-#define __switch_canary
-#define __switch_canary_oparam
-#define __switch_canary_iparam
-#endif /* CC_STACKPROTECTOR */
+struct fork_frame {
+ struct inactive_task_frame frame;
+ struct pt_regs regs;
+};
#ifdef CONFIG_RETPOLINE
/*
@@ -130,42 +59,16 @@ do { \
*/
#define __retpoline_fill_return_buffer \
ALTERNATIVE("jmp 910f", \
- __stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
+ __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
X86_FEATURE_RSB_CTXSW) \
"910:\n\t"
#else
#define __retpoline_fill_return_buffer
#endif
-/*
- * There is no need to save or restore flags, because flags are always
- * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
- * has no effect.
- */
-#define switch_to(prev, next, last) \
- asm volatile(SAVE_CONTEXT \
- "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
- "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
- "call __switch_to\n\t" \
- "movq "__percpu_arg([current_task])",%%rsi\n\t" \
- __switch_canary \
- __retpoline_fill_return_buffer \
- "movq %P[thread_info](%%rsi),%%r8\n\t" \
- "movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
- "jnz ret_from_fork\n\t" \
- RESTORE_CONTEXT \
- : "=a" (last) \
- __switch_canary_oparam \
- : [next] "S" (next), [prev] "D" (prev), \
- [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
- [ti_flags] "i" (offsetof(struct thread_info, flags)), \
- [_tif_fork] "i" (_TIF_FORK), \
- [thread_info] "i" (offsetof(struct task_struct, stack)), \
- [current_task] "m" (current_task) \
- __switch_canary_iparam \
- : "memory", "cc" __EXTRA_CLOBBER)
-
-#endif /* CONFIG_X86_32 */
+#define switch_to(prev, next, last) \
+do { \
+ ((last) = __switch_to_asm((prev), (next))); \
+} while (0)
#endif /* _ASM_X86_SWITCH_TO_H */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 1d127abdd9e82b..3f2f06a5e9ea80 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -61,7 +61,7 @@ static inline long syscall_get_error(struct task_struct *task,
* TS_COMPAT is set for 32-bit syscall entries and then
* remains set until we return to user mode.
*/
- if (task_thread_info(task)->status & TS_COMPAT)
+ if (task->thread.status & (TS_COMPAT|TS_I386_REGS_POKED))
/*
* Sign-extend the value so (int)-EFOO becomes (long)-EFOO
* and will match correctly in comparisons.
@@ -117,7 +117,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long *args)
{
# ifdef CONFIG_IA32_EMULATION
- if (task_thread_info(task)->status & TS_COMPAT)
+ if (task->thread.status & TS_COMPAT)
switch (i) {
case 0:
if (!n--) break;
@@ -178,7 +178,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
const unsigned long *args)
{
# ifdef CONFIG_IA32_EMULATION
- if (task_thread_info(task)->status & TS_COMPAT)
+ if (task->thread.status & TS_COMPAT)
switch (i) {
case 0:
if (!n--) break;
@@ -235,21 +235,8 @@ static inline void syscall_set_arguments(struct task_struct *task,
static inline int syscall_get_arch(void)
{
-#ifdef CONFIG_IA32_EMULATION
- /*
- * TS_COMPAT is set for 32-bit syscall entry and then
- * remains set until we return to user mode.
- *
- * TIF_IA32 tasks should always have TS_COMPAT set at
- * system call time.
- *
- * x32 tasks should be considered AUDIT_ARCH_X86_64.
- */
- if (task_thread_info(current)->status & TS_COMPAT)
- return AUDIT_ARCH_I386;
-#endif
- /* Both x32 and x86_64 are considered "64-bit". */
- return AUDIT_ARCH_X86_64;
+ /* x32 tasks should be considered AUDIT_ARCH_X86_64. */
+ return in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 7772991fe06453..e924704201bf9a 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -53,17 +53,13 @@ struct task_struct;
typedef asmlinkage long (*ti_sys_call_ptr_t)(unsigned long, unsigned long,
unsigned long, unsigned long,
unsigned long, unsigned long);
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <linux/atomic.h>
struct thread_info {
struct task_struct *task; /* main task structure */
__u32 flags; /* low level flags */
- __u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
- mm_segment_t addr_limit;
- unsigned int sig_on_uaccess_error:1;
- unsigned int uaccess_err:1; /* uaccess failed */
#ifdef CONFIG_ALT_SYSCALL
/*
* This uses nr_syscalls instead of nr_syscall_max because we want
@@ -101,11 +97,9 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
- .addr_limit = KERNEL_DS, \
INIT_THREAD_INFO_SYSCALL \
}
-#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
#else /* !__ASSEMBLY__ */
@@ -127,6 +121,7 @@ struct thread_info {
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
+#define TIF_SSBD 5 /* Reduced data speculation */
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -134,7 +129,6 @@ struct thread_info {
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_IA32 17 /* IA32 compatibility process */
-#define TIF_FORK 18 /* ret_from_fork */
#define TIF_NOHZ 19 /* in adaptive nohz mode */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
@@ -149,8 +143,9 @@ struct thread_info {
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
-#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
+#define _TIF_SSBD (1 << TIF_SSBD)
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -158,7 +153,6 @@ struct thread_info {
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_IA32 (1 << TIF_IA32)
-#define _TIF_FORK (1 << TIF_FORK)
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
@@ -182,7 +176,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
- (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP)
+ (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD)
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -196,9 +190,49 @@ struct thread_info {
*/
#ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
+
+/*
+ * Walks up the stack frames to make sure that the specified object is
+ * entirely contained by a single stack frame.
+ *
+ * Returns:
+ * 1 if within a frame
+ * -1 if placed across a frame boundary (or outside stack)
+ * 0 unable to determine (no frame pointers, etc)
+ */
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
{
- return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
+#if defined(CONFIG_FRAME_POINTER)
+ const void *frame = NULL;
+ const void *oldframe;
+
+ oldframe = __builtin_frame_address(1);
+ if (oldframe)
+ frame = __builtin_frame_address(2);
+ /*
+ * low ----------------------------------------------> high
+ * [saved bp][saved ip][args][local vars][saved bp][saved ip]
+ * ^----------------^
+ * allow copies only within here
+ */
+ while (stack <= frame && frame < stackend) {
+ /*
+ * If obj + len extends past the last frame, this
+ * check won't pass and the next frame will be 0,
+ * causing us to bail out and correctly report
+ * the copy as invalid.
+ */
+ if (obj + len <= frame)
+ return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
+ oldframe = frame;
+ frame = *(const void * const *)frame;
+ }
+ return -1;
+#else
+ return 0;
+#endif
}
#else /* !__ASSEMBLY__ */
@@ -207,86 +241,19 @@ static inline struct thread_info *current_thread_info(void)
# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
#endif
-/* Load thread_info address into "reg" */
-#define GET_THREAD_INFO(reg) \
- _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
- _ASM_SUB $(THREAD_SIZE),reg ;
-
-/*
- * ASM operand which evaluates to a 'thread_info' address of
- * the current task, if it is known that "reg" is exactly "off"
- * bytes below the top of the stack currently.
- *
- * ( The kernel stack's size is known at build time, it is usually
- * 2 or 4 pages, and the bottom of the kernel stack contains
- * the thread_info structure. So to access the thread_info very
- * quickly from assembly code we can calculate down from the
- * top of the kernel stack to the bottom, using constant,
- * build-time calculations only. )
- *
- * For example, to fetch the current thread_info->flags value into %eax
- * on x86-64 defconfig kernels, in syscall entry code where RSP is
- * currently at exactly SIZEOF_PTREGS bytes away from the top of the
- * stack:
- *
- * mov ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS), %eax
- *
- * will translate to:
- *
- * 8b 84 24 b8 c0 ff ff mov -0x3f48(%rsp), %eax
- *
- * which is below the current RSP by almost 16K.
- */
-#define ASM_THREAD_INFO(field, reg, off) ((field)+(off)-THREAD_SIZE)(reg)
-
#endif
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_COMPAT 0x0002 /* 32bit syscall active (64BIT)*/
-#define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */
-
+#ifdef CONFIG_COMPAT
+#define TS_I386_REGS_POKED 0x0004 /* regs poked by 32-bit ptracer */
+#endif
#ifndef __ASSEMBLY__
-#define HAVE_SET_RESTORE_SIGMASK 1
-static inline void set_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- ti->status |= TS_RESTORE_SIGMASK;
- WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
-}
-static inline void clear_restore_sigmask(void)
-{
- current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
-}
-static inline bool test_restore_sigmask(void)
-{
- return current_thread_info()->status & TS_RESTORE_SIGMASK;
-}
-static inline bool test_and_clear_restore_sigmask(void)
-{
- struct thread_info *ti = current_thread_info();
- if (!(ti->status & TS_RESTORE_SIGMASK))
- return false;
- ti->status &= ~TS_RESTORE_SIGMASK;
- return true;
-}
-static inline bool is_ia32_task(void)
-{
#ifdef CONFIG_X86_32
- return true;
-#endif
-#ifdef CONFIG_IA32_EMULATION
- if (current_thread_info()->status & TS_COMPAT)
- return true;
+#define in_ia32_syscall() true
+#else
+#define in_ia32_syscall() (IS_ENABLED(CONFIG_IA32_EMULATION) && \
+ current->thread.status & TS_COMPAT)
#endif
- return false;
-}
/*
* Force syscall return via IRET by making it look as if there was
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index a691b66cc40ac7..72cfe3e53af1aa 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -5,6 +5,7 @@
#include <linux/sched.h>
#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/special_insns.h>
#include <asm/smp.h>
@@ -67,6 +68,8 @@ static inline void invpcid_flush_all_nonglobals(void)
struct tlb_state {
struct mm_struct *active_mm;
int state;
+ /* last user mm's ctx id */
+ u64 last_ctx_id;
/*
* Access to this CR4 shadow and to H/W CR4 is protected by
@@ -108,6 +111,16 @@ static inline void cr4_clear_bits(unsigned long mask)
}
}
+static inline void cr4_toggle_bits(unsigned long mask)
+{
+ unsigned long cr4;
+
+ cr4 = this_cpu_read(cpu_tlbstate.cr4);
+ cr4 ^= mask;
+ this_cpu_write(cpu_tlbstate.cr4, cr4);
+ __write_cr4(cr4);
+}
+
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index d788b0cdc0adf1..6d6be45e754317 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -29,12 +29,12 @@
#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
#define get_ds() (KERNEL_DS)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
+#define get_fs() (current->thread.addr_limit)
+#define set_fs(x) (current->thread.addr_limit = (x))
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define user_addr_max() (current_thread_info()->addr_limit.seg)
+#define user_addr_max() (current->thread.addr_limit.seg)
#define __addr_ok(addr) \
((unsigned long __force)(addr) < user_addr_max())
@@ -144,6 +144,14 @@ extern int __get_user_4(void);
extern int __get_user_8(void);
extern int __get_user_bad(void);
+#define __uaccess_begin() stac()
+#define __uaccess_end() clac()
+#define __uaccess_begin_nospec() \
+({ \
+ stac(); \
+ barrier_nospec(); \
+})
+
/*
* This is a type: either unsigned long, if the argument fits into
* that type, or otherwise unsigned long long.
@@ -203,10 +211,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
#ifdef CONFIG_X86_32
#define __put_user_asm_u64(x, addr, err, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%2)\n" \
"2: movl %%edx,4(%2)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
".section .fixup,\"ax\"\n" \
"4: movl %3,%0\n" \
" jmp 3b\n" \
@@ -217,10 +225,10 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
: "A" (x), "r" (addr), "i" (errret), "0" (err))
#define __put_user_asm_ex_u64(x, addr) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: movl %%eax,0(%1)\n" \
"2: movl %%edx,4(%1)\n" \
- "3: " ASM_CLAC "\n" \
+ "3:" \
_ASM_EXTABLE_EX(1b, 2b) \
_ASM_EXTABLE_EX(2b, 3b) \
: : "A" (x), "r" (addr))
@@ -306,14 +314,17 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
- errret); \
+ __put_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
__put_user_bad(); \
} \
} while (0)
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __put_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -368,9 +379,9 @@ do { \
} while (0)
#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %2,%"rtype"1\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" xor"itype" %"rtype"1,%"rtype"1\n" \
@@ -380,6 +391,10 @@ do { \
: "=r" (err), ltype(x) \
: "m" (__m(addr)), "i" (errret), "0" (err))
+/*
+ * This doesn't do __uaccess_begin/end - the exception handling
+ * around it must do that.
+ */
#define __get_user_size_ex(x, ptr, size) \
do { \
__chk_user_ptr(ptr); \
@@ -410,7 +425,11 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
+ __uaccess_begin(); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
+ __uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
@@ -418,7 +437,9 @@ do { \
({ \
int __gu_err; \
unsigned long __gu_val; \
+ __uaccess_begin_nospec(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
+ __uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
})
@@ -433,9 +454,9 @@ struct __large_struct { unsigned long buf[100]; };
* aliasing issues.
*/
#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
- asm volatile(ASM_STAC "\n" \
+ asm volatile("\n" \
"1: mov"itype" %"rtype"1,%2\n" \
- "2: " ASM_CLAC "\n" \
+ "2:\n" \
".section .fixup,\"ax\"\n" \
"3: mov %3,%0\n" \
" jmp 2b\n" \
@@ -454,13 +475,17 @@ struct __large_struct { unsigned long buf[100]; };
* uaccess_try and catch
*/
#define uaccess_try do { \
- current_thread_info()->uaccess_err = 0; \
- stac(); \
+ current->thread.uaccess_err = 0; \
+ __uaccess_begin(); \
barrier();
+#define uaccess_try_nospec do { \
+ current->thread.uaccess_err = 0; \
+ __uaccess_begin_nospec(); \
+
#define uaccess_catch(err) \
- clac(); \
- (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
+ __uaccess_end(); \
+ (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
} while (0)
/**
@@ -522,7 +547,7 @@ struct __large_struct { unsigned long buf[100]; };
* get_user_ex(...);
* } get_user_catch(err)
*/
-#define get_user_try uaccess_try
+#define get_user_try uaccess_try_nospec
#define get_user_catch(err) uaccess_catch(err)
#define get_user_ex(x, ptr) do { \
@@ -557,12 +582,13 @@ extern void __cmpxchg_wrong_size(void)
__typeof__(ptr) __uval = (uval); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
+ __uaccess_begin_nospec(); \
switch (size) { \
case 1: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -576,9 +602,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 2: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -592,9 +618,9 @@ extern void __cmpxchg_wrong_size(void)
} \
case 4: \
{ \
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -611,9 +637,9 @@ extern void __cmpxchg_wrong_size(void)
if (!IS_ENABLED(CONFIG_X86_64)) \
__cmpxchg_wrong_size(); \
\
- asm volatile("\t" ASM_STAC "\n" \
+ asm volatile("\n" \
"1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
- "2:\t" ASM_CLAC "\n" \
+ "2:\n" \
"\t.section .fixup, \"ax\"\n" \
"3:\tmov %3, %0\n" \
"\tjmp 2b\n" \
@@ -628,6 +654,7 @@ extern void __cmpxchg_wrong_size(void)
default: \
__cmpxchg_wrong_size(); \
} \
+ __uaccess_end(); \
*__uval = __old; \
__ret; \
})
@@ -724,9 +751,10 @@ copy_from_user(void *to, const void __user *from, unsigned long n)
* case, and do only runtime checking for non-constant sizes.
*/
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
n = _copy_from_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_from_user_overflow();
else
__copy_from_user_overflow(sz, n);
@@ -742,9 +770,10 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
might_fault();
/* See the comment in copy_from_user() above. */
- if (likely(sz < 0 || sz >= n))
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
n = _copy_to_user(to, from, n);
- else if(__builtin_constant_p(n))
+ } else if (__builtin_constant_p(n))
copy_to_user_overflow();
else
__copy_to_user_overflow(sz, n);
@@ -755,5 +784,30 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
#undef __copy_from_user_overflow
#undef __copy_to_user_overflow
+/*
+ * The "unsafe" user accesses aren't really "unsafe", but the naming
+ * is a big fat warning: you have to not only do the access_ok()
+ * checking before using them, but you have to surround them with the
+ * user_access_begin/end() pair.
+ */
+#define user_access_begin() __uaccess_begin()
+#define user_access_end() __uaccess_end()
+
+#define unsafe_put_user(x, ptr, err_label) \
+do { \
+ int __pu_err; \
+ __put_user_size((x), (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
+ if (unlikely(__pu_err)) goto err_label; \
+} while (0)
+
+#define unsafe_get_user(x, ptr, err_label) \
+do { \
+ int __gu_err; \
+ unsigned long __gu_val; \
+ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ if (unlikely(__gu_err)) goto err_label; \
+} while (0)
+
#endif /* _ASM_X86_UACCESS_H */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb5204dcd5b..d21eb05b16ebc1 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -43,25 +43,34 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
static __always_inline unsigned long __must_check
__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
{
+ check_object_size(from, n, true);
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__put_user_size(*(u8 *)from, (u8 __user *)to,
1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__put_user_size(*(u16 *)from, (u16 __user *)to,
2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__put_user_size(*(u32 *)from, (u32 __user *)to,
4, ret, 4);
+ __uaccess_end();
return ret;
case 8:
+ __uaccess_begin_nospec();
__put_user_size(*(u64 *)from, (u64 __user *)to,
8, ret, 8);
+ __uaccess_end();
return ret;
}
}
@@ -103,13 +112,19 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -143,18 +158,25 @@ static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_fault();
+ check_object_size(to, n, false);
if (__builtin_constant_p(n)) {
unsigned long ret;
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -170,13 +192,19 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
switch (n) {
case 1:
+ __uaccess_begin_nospec();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin_nospec();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin_nospec();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index f2f9b39b274ab0..411b6156bfeb4e 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -8,7 +8,7 @@
#include <linux/errno.h>
#include <linux/lockdep.h>
#include <asm/alternative.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/page.h>
/*
@@ -53,38 +53,53 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
+ check_object_size(dst, size, false);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
+ case 1:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u8 *)dst, (u8 __user *)src,
ret, "b", "b", "=q", 1);
+ __uaccess_end();
return ret;
- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
+ case 2:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u16 *)dst, (u16 __user *)src,
ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
+ case 4:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u32 *)dst, (u32 __user *)src,
ret, "l", "k", "=r", 4);
+ __uaccess_end();
return ret;
- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
+ case 8:
+ __uaccess_begin_nospec();
+ __get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 10);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u16 *)(8 + (char *)dst),
- (u16 __user *)(8 + (char __user *)src),
- ret, "w", "w", "=r", 2);
+ if (likely(!ret))
+ __get_user_asm(*(u16 *)(8 + (char *)dst),
+ (u16 __user *)(8 + (char __user *)src),
+ ret, "w", "w", "=r", 2);
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin_nospec();
__get_user_asm(*(u64 *)dst, (u64 __user *)src,
ret, "q", "", "=r", 16);
- if (unlikely(ret))
- return ret;
- __get_user_asm(*(u64 *)(8 + (char *)dst),
- (u64 __user *)(8 + (char __user *)src),
- ret, "q", "", "=r", 8);
+ if (likely(!ret))
+ __get_user_asm(*(u64 *)(8 + (char *)dst),
+ (u64 __user *)(8 + (char __user *)src),
+ ret, "q", "", "=r", 8);
+ __uaccess_end();
return ret;
default:
return copy_user_generic(dst, (__force void *)src, size);
@@ -103,38 +118,55 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
+ check_object_size(src, size, true);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
+ case 1:
+ __uaccess_begin();
+ __put_user_asm(*(u8 *)src, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
+ case 2:
+ __uaccess_begin();
+ __put_user_asm(*(u16 *)src, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
+ case 4:
+ __uaccess_begin();
+ __put_user_asm(*(u32 *)src, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
+ case 8:
+ __uaccess_begin();
+ __put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
case 10:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 10);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
- ret, "w", "w", "ir", 2);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
+ ret, "w", "w", "ir", 2);
+ }
+ __uaccess_end();
return ret;
case 16:
+ __uaccess_begin();
__put_user_asm(*(u64 *)src, (u64 __user *)dst,
ret, "q", "", "er", 16);
- if (unlikely(ret))
- return ret;
- asm("":::"memory");
- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
- ret, "q", "", "er", 8);
+ if (likely(!ret)) {
+ asm("":::"memory");
+ __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
+ ret, "q", "", "er", 8);
+ }
+ __uaccess_end();
return ret;
default:
return copy_user_generic((__force void *)dst, src, size);
@@ -160,39 +192,47 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
switch (size) {
case 1: {
u8 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u8 __user *)src,
ret, "b", "b", "=q", 1);
if (likely(!ret))
__put_user_asm(tmp, (u8 __user *)dst,
ret, "b", "b", "iq", 1);
+ __uaccess_end();
return ret;
}
case 2: {
u16 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u16 __user *)src,
ret, "w", "w", "=r", 2);
if (likely(!ret))
__put_user_asm(tmp, (u16 __user *)dst,
ret, "w", "w", "ir", 2);
+ __uaccess_end();
return ret;
}
case 4: {
u32 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u32 __user *)src,
ret, "l", "k", "=r", 4);
if (likely(!ret))
__put_user_asm(tmp, (u32 __user *)dst,
ret, "l", "k", "ir", 4);
+ __uaccess_end();
return ret;
}
case 8: {
u64 tmp;
+ __uaccess_begin_nospec();
__get_user_asm(tmp, (u64 __user *)src,
ret, "q", "", "=r", 8);
if (likely(!ret))
__put_user_asm(tmp, (u64 __user *)dst,
ret, "q", "", "er", 8);
+ __uaccess_end();
return ret;
}
default:
diff --git a/arch/x86/include/asm/xor_32.h b/arch/x86/include/asm/xor_32.h
index 5a08bc8bff3393..c54beb44c4c1f2 100644
--- a/arch/x86/include/asm/xor_32.h
+++ b/arch/x86/include/asm/xor_32.h
@@ -553,7 +553,7 @@ do { \
if (cpu_has_xmm) { \
xor_speed(&xor_block_pIII_sse); \
xor_speed(&xor_block_sse_pf64); \
- } else if (cpu_has_mmx) { \
+ } else if (boot_cpu_has(X86_FEATURE_MMX)) { \
xor_speed(&xor_block_pII_mmx); \
xor_speed(&xor_block_p5_mmx); \
} else { \
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index f7ad3932594054..5bdd05cff9e58c 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -52,6 +52,7 @@ obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
+obj-y += irqflags.o
obj-y += process.o
obj-y += fpu/
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index f92ab36979a207..15cfe1b320df6f 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -25,7 +25,7 @@
static struct apic apic_physflat;
static struct apic apic_flat;
-struct apic __read_mostly *apic = &apic_flat;
+struct apic *apic __ro_after_init = &apic_flat;
EXPORT_SYMBOL_GPL(apic);
static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
@@ -154,7 +154,7 @@ static int flat_probe(void)
return 1;
}
-static struct apic apic_flat = {
+static struct apic apic_flat __ro_after_init = {
.name = "flat",
.probe = flat_probe,
.acpi_madt_oem_check = flat_acpi_madt_oem_check,
@@ -259,7 +259,7 @@ static int physflat_probe(void)
return 0;
}
-static struct apic apic_physflat = {
+static struct apic apic_physflat __ro_after_init = {
.name = "physical flat",
.probe = physflat_probe,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index 0d96749cfcacf4..78428ee361f30b 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -108,7 +108,7 @@ static void noop_apic_write(u32 reg, u32 v)
WARN_ON_ONCE(cpu_has_apic && !disable_apic);
}
-struct apic apic_noop = {
+struct apic apic_noop __ro_after_init = {
.name = "noop",
.probe = noop_probe,
.acpi_madt_oem_check = NULL,
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 2bd2292a316d47..bac0805ea1d9d6 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
unsigned long value;
unsigned int id = (x >> 24) & 0xff;
- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, value);
id |= (value << 2) & 0xff00;
}
@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
this_cpu_write(cpu_llc_id, node);
/* Account for nodes per socket in multi-core-module processors */
- if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+ if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
rdmsrl(MSR_FAM10H_NODE_ID, val);
nodes = ((val >> 3) & 7) + 1;
}
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 971cf8875939d1..2e4d303c82b52e 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -147,7 +147,7 @@ static int probe_bigsmp(void)
return dmi_bigsmp;
}
-static struct apic apic_bigsmp = {
+static struct apic apic_bigsmp __ro_after_init = {
.name = "bigsmp",
.probe = probe_bigsmp,
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index 5f1feb6854afe9..98abe62884044b 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -267,7 +267,7 @@ static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
hpet_msi_write(irq_data_get_irq_handler_data(data), msg);
}
-static struct irq_chip hpet_msi_controller = {
+static struct irq_chip hpet_msi_controller __ro_after_init = {
.name = "HPET-MSI",
.irq_unmask = hpet_msi_unmask,
.irq_mask = hpet_msi_mask,
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 7694ae6c1199b3..2d8fd00371424c 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -72,7 +72,7 @@ static int probe_default(void)
return 1;
}
-static struct apic apic_default = {
+static struct apic apic_default __ro_after_init = {
.name = "default",
.probe = probe_default,
@@ -126,7 +126,7 @@ static struct apic apic_default = {
apic_driver(apic_default);
-struct apic *apic = &apic_default;
+struct apic *apic __ro_after_init = &apic_default;
EXPORT_SYMBOL_GPL(apic);
static int cmdline_apic __initdata;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index cc8311c4d29850..00a3bb99b5add4 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -234,7 +234,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
}
-static struct apic apic_x2apic_cluster = {
+static struct apic apic_x2apic_cluster __ro_after_init = {
.name = "cluster x2apic",
.probe = x2apic_cluster_probe,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 662e9150ea6f29..351659f0b27f90 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -90,7 +90,7 @@ static int x2apic_phys_probe(void)
return apic == &apic_x2apic_phys;
}
-static struct apic apic_x2apic_phys = {
+static struct apic apic_x2apic_phys __ro_after_init = {
.name = "physical x2apic",
.probe = x2apic_phys_probe,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 4a139465f1d4f0..a58bdfe9234a50 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -374,7 +374,7 @@ static int uv_probe(void)
return apic == &apic_x2apic_uv_x;
}
-static struct apic __refdata apic_x2apic_uv_x = {
+static struct apic apic_x2apic_uv_x __ro_after_init = {
.name = "UV large system",
.probe = uv_probe,
diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
index 8f68465be175b9..371db2e21395c4 100644
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
@@ -29,15 +29,20 @@
void common(void) {
BLANK();
- OFFSET(TI_flags, thread_info, flags);
- OFFSET(TI_status, thread_info, status);
- OFFSET(TI_addr_limit, thread_info, addr_limit);
+ OFFSET(TASK_threadsp, task_struct, thread.sp);
+#ifdef CONFIG_CC_STACKPROTECTOR
+ OFFSET(TASK_stack_canary, task_struct, stack_canary);
+#endif
+
+ BLANK();
+ OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
+ OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
#ifdef CONFIG_ALT_SYSCALL
- OFFSET(TI_nr_syscalls, thread_info, nr_syscalls);
- OFFSET(TI_sys_call_table, thread_info, sys_call_table);
+ OFFSET(TASK_TI_nr_syscalls, task_struct, thread_info.nr_syscalls);
+ OFFSET(TASK_TI_sys_call_table, task_struct, thread_info.sys_call_table);
# ifdef CONFIG_IA32_EMULATION
- OFFSET(TI_ia32_nr_syscalls, thread_info, ia32_nr_syscalls);
- OFFSET(TI_ia32_sys_call_table, thread_info, ia32_sys_call_table);
+ OFFSET(TASK_TI_ia32_nr_syscalls, task_struct, thread_info.ia32_nr_syscalls);
+ OFFSET(TASK_TI_ia32_sys_call_table, task_struct, thread_info.ia32_sys_call_table);
# endif
#endif
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index 6ce39025f467fb..ae8590d6c4f786 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -52,6 +52,11 @@ void foo(void)
DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
offsetofend(struct tss_struct, SYSENTER_stack));
+#ifdef CONFIG_CC_STACKPROTECTOR
+ BLANK();
+ OFFSET(stack_canary_offset, stack_canary, canary);
+#endif
+
#if defined(CONFIG_LGUEST) || defined(CONFIG_LGUEST_GUEST) || defined(CONFIG_LGUEST_MODULE)
BLANK();
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
index d8f42f902a0f6a..85e1d134f25c26 100644
--- a/arch/x86/kernel/asm-offsets_64.c
+++ b/arch/x86/kernel/asm-offsets_64.c
@@ -63,6 +63,11 @@ int main(void)
OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
BLANK();
+#ifdef CONFIG_CC_STACKPROTECTOR
+ DEFINE(stack_canary_offset, offsetof(union irq_stack_union, stack_canary));
+ BLANK();
+#endif
+
DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
DEFINE(NR_syscalls, sizeof(syscalls_64));
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 145863d4d343c4..a8b215865636a1 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -30,6 +30,11 @@ static __init int set_corruption_check(char *arg)
ssize_t ret;
unsigned long val;
+ if (!arg) {
+ pr_err("memory_corruption_check config string not provided\n");
+ return -EINVAL;
+ }
+
ret = kstrtoul(arg, 10, &val);
if (ret)
return ret;
@@ -44,6 +49,11 @@ static __init int set_corruption_check_period(char *arg)
ssize_t ret;
unsigned long val;
+ if (!arg) {
+ pr_err("memory_corruption_check_period config string not provided\n");
+ return -EINVAL;
+ }
+
ret = kstrtoul(arg, 10, &val);
if (ret)
return ret;
@@ -58,6 +68,11 @@ static __init int set_corruption_check_size(char *arg)
char *end;
unsigned size;
+ if (!arg) {
+ pr_err("memory_corruption_check_size config string not provided\n");
+ return -EINVAL;
+ }
+
size = memparse(arg, &end);
if (*end == '\0')
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 606ebe494756cb..1e5184092ee6ac 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,7 +66,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
quiet_cmd_mkcapflags = MKCAP $@
cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
-cpufeature = $(src)/../../include/asm/cpufeature.h
+cpufeature = $(src)/../../include/asm/cpufeatures.h
targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 4bf9e77f3e059a..e94e6f16172b38 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -9,6 +9,7 @@
#include <asm/processor.h>
#include <asm/apic.h>
#include <asm/cpu.h>
+#include <asm/spec-ctrl.h>
#include <asm/smp.h>
#include <asm/pci-direct.h>
#include <asm/delay.h>
@@ -304,7 +305,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
int cpu = smp_processor_id();
/* get information required for multi-node processors */
- if (cpu_has_topoext) {
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
@@ -519,6 +520,26 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_MWAITX))
use_mwaitx_delay();
+
+ if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+ unsigned int bit;
+
+ switch (c->x86) {
+ case 0x15: bit = 54; break;
+ case 0x16: bit = 33; break;
+ case 0x17: bit = 10; break;
+ default: return;
+ }
+ /*
+ * Try to cache the base value so further operations can
+ * avoid RMW. If that faults, do not enable SSBD.
+ */
+ if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
+ setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
+ setup_force_cpu_cap(X86_FEATURE_SSBD);
+ x86_amd_ls_cfg_ssbd_mask = 1ULL << bit;
+ }
+ }
}
static void early_init_amd(struct cpuinfo_x86 *c)
@@ -692,6 +713,15 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
}
}
+static void init_amd_zn(struct cpuinfo_x86 *c)
+{
+ set_cpu_cap(c, X86_FEATURE_ZEN);
+
+ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+ if (!cpu_has(c, X86_FEATURE_CPB))
+ set_cpu_cap(c, X86_FEATURE_CPB);
+}
+
static void init_amd(struct cpuinfo_x86 *c)
{
u32 dummy;
@@ -722,6 +752,7 @@ static void init_amd(struct cpuinfo_x86 *c)
case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
+ case 0x17: init_amd_zn(c); break;
}
/* Enable workaround for FXSAVE leak */
@@ -791,8 +822,9 @@ static void init_amd(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_3DNOW) || cpu_has(c, X86_FEATURE_LM))
set_cpu_cap(c, X86_FEATURE_3DNOWPREFETCH);
- /* AMD CPUs don't reset SS attributes on SYSRET */
- set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+ /* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
+ if (!cpu_has(c, X86_FEATURE_XENPV))
+ set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
}
#ifdef CONFIG_X86_32
@@ -954,7 +986,7 @@ static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
void set_dr_addr_mask(unsigned long mask, int dr)
{
- if (!cpu_has_bpext)
+ if (!boot_cpu_has(X86_FEATURE_BPEXT))
return;
switch (dr) {
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 2bbc74f8a4a831..621bc656118989 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -11,8 +11,10 @@
#include <linux/utsname.h>
#include <linux/cpu.h>
#include <linux/module.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
-#include <asm/nospec-branch.h>
+#include <asm/spec-ctrl.h>
#include <asm/cmdline.h>
#include <asm/bugs.h>
#include <asm/processor.h>
@@ -24,8 +26,31 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/intel-family.h>
+#include <asm/e820.h>
static void __init spectre_v2_select_mitigation(void);
+static void __init ssb_select_mitigation(void);
+static void __init l1tf_select_mitigation(void);
+
+/*
+ * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
+ * writes to SPEC_CTRL contain whatever reserved bits have been set.
+ */
+u64 x86_spec_ctrl_base;
+EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+
+/*
+ * The vendor and possibly platform specific bits which can be modified in
+ * x86_spec_ctrl_base.
+ */
+static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
+
+/*
+ * AMD specific MSR info for Speculative Store Bypass control.
+ * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
+ */
+u64 x86_amd_ls_cfg_base;
+u64 x86_amd_ls_cfg_ssbd_mask;
void __init check_bugs(void)
{
@@ -36,9 +61,29 @@ void __init check_bugs(void)
print_cpu_info(&boot_cpu_data);
}
+ /*
+ * Read the SPEC_CTRL MSR to account for reserved bits which may
+ * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
+ * init code as it is not enumerated and depends on the family.
+ */
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ /* Allow STIBP in MSR_SPEC_CTRL if supported */
+ if (boot_cpu_has(X86_FEATURE_STIBP))
+ x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
+
/* Select the proper spectre mitigation before patching alternatives */
spectre_v2_select_mitigation();
+ /*
+ * Select proper mitigation for any exposure to the Speculative Store
+ * Bypass vulnerability.
+ */
+ ssb_select_mitigation();
+
+ l1tf_select_mitigation();
+
#ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP.
@@ -94,6 +139,73 @@ static const char *spectre_v2_strings[] = {
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE;
+void
+x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
+{
+ u64 msrval, guestval, hostval = x86_spec_ctrl_base;
+ struct thread_info *ti = current_thread_info();
+
+ /* Is MSR_SPEC_CTRL implemented ? */
+ if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+ /*
+ * Restrict guest_spec_ctrl to supported values. Clear the
+ * modifiable bits in the host base value and or the
+ * modifiable bits from the guest value.
+ */
+ guestval = hostval & ~x86_spec_ctrl_mask;
+ guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
+ /* SSBD controlled in MSR_SPEC_CTRL */
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
+
+ if (hostval != guestval) {
+ msrval = setguest ? guestval : hostval;
+ wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
+ }
+ }
+
+ /*
+ * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
+ * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
+ */
+ if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
+ !static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ return;
+
+ /*
+ * If the host has SSBD mitigation enabled, force it in the host's
+ * virtual MSR value. If its not permanently enabled, evaluate
+ * current's TIF_SSBD thread flag.
+ */
+ if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
+ hostval = SPEC_CTRL_SSBD;
+ else
+ hostval = ssbd_tif_to_spec_ctrl(ti->flags);
+
+ /* Sanitize the guest value */
+ guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
+
+ if (hostval != guestval) {
+ unsigned long tif;
+
+ tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
+ ssbd_spec_ctrl_to_tif(hostval);
+
+ speculative_store_bypass_update(tif);
+ }
+}
+EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
+
+static void x86_amd_ssb_disable(void)
+{
+ u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
+
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
+ else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ wrmsrl(MSR_AMD64_LS_CFG, msrval);
+}
#ifdef RETPOLINE
static bool spectre_v2_bad_module;
@@ -162,8 +274,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
return SPECTRE_V2_CMD_NONE;
else {
- ret = cmdline_find_option(boot_command_line, "spectre_v2", arg,
- sizeof(arg));
+ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
if (ret < 0)
return SPECTRE_V2_CMD_AUTO;
@@ -184,8 +295,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
!IS_ENABLED(CONFIG_RETPOLINE)) {
- pr_err("%s selected but not compiled in. Switching to AUTO select\n",
- mitigation_options[i].option);
+ pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@@ -203,23 +313,6 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
return cmd;
}
-/* Check for Skylake-like CPUs (for RSB handling) */
-static bool __init is_skylake_era(void)
-{
- if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
- boot_cpu_data.x86 == 6) {
- switch (boot_cpu_data.x86_model) {
- case INTEL_FAM6_SKYLAKE_MOBILE:
- case INTEL_FAM6_SKYLAKE_DESKTOP:
- case INTEL_FAM6_SKYLAKE_X:
- case INTEL_FAM6_KABYLAKE_MOBILE:
- case INTEL_FAM6_KABYLAKE_DESKTOP:
- return true;
- }
- }
- return false;
-}
-
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -255,14 +348,14 @@ static void __init spectre_v2_select_mitigation(void)
goto retpoline_auto;
break;
}
- pr_err("kernel not compiled with retpoline; no mitigation available!");
+ pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
return;
retpoline_auto:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
retpoline_amd:
if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
- pr_err("LFENCE not serializing. Switching to generic retpoline\n");
+ pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
goto retpoline_generic;
}
mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
@@ -280,52 +373,396 @@ retpoline_auto:
pr_info("%s\n", spectre_v2_strings[mode]);
/*
- * If neither SMEP or KPTI are available, there is a risk of
- * hitting userspace addresses in the RSB after a context switch
- * from a shallow call stack to a deeper one. To prevent this fill
- * the entire RSB, even when using IBRS.
+ * If spectre v2 protection has been enabled, unconditionally fill
+ * RSB during a context switch; this protects against two independent
+ * issues:
*
- * Skylake era CPUs have a separate issue with *underflow* of the
- * RSB, when they will predict 'ret' targets from the generic BTB.
- * The proper mitigation for this is IBRS. If IBRS is not supported
- * or deactivated in favour of retpolines the RSB fill on context
- * switch is required.
+ * - RSB underflow (and switch to BTB) on Skylake+
+ * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
- if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
- !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
- setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
- pr_info("Filling RSB on context switch\n");
+ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
+ pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
+
+ /* Initialize Indirect Branch Prediction Barrier if supported */
+ if (boot_cpu_has(X86_FEATURE_IBPB)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
+ }
+
+ /*
+ * Retpoline means the kernel is safe because it has no indirect
+ * branches. But firmware isn't, so use IBRS to protect that.
+ */
+ if (boot_cpu_has(X86_FEATURE_IBRS)) {
+ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
+ pr_info("Enabling Restricted Speculation for firmware calls\n");
}
}
#undef pr_fmt
+#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
+
+static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE;
+
+/* The kernel command line selection */
+enum ssb_mitigation_cmd {
+ SPEC_STORE_BYPASS_CMD_NONE,
+ SPEC_STORE_BYPASS_CMD_AUTO,
+ SPEC_STORE_BYPASS_CMD_ON,
+ SPEC_STORE_BYPASS_CMD_PRCTL,
+ SPEC_STORE_BYPASS_CMD_SECCOMP,
+};
+
+static const char *ssb_strings[] = {
+ [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
+ [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
+ [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
+ [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
+};
+
+static const struct {
+ const char *option;
+ enum ssb_mitigation_cmd cmd;
+} ssb_mitigation_options[] = {
+ { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
+ { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
+ { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
+ { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
+ { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
+};
+
+static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
+{
+ enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
+ char arg[20];
+ int ret, i;
+
+ if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
+ return SPEC_STORE_BYPASS_CMD_NONE;
+ } else {
+ ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
+ arg, sizeof(arg));
+ if (ret < 0)
+ return SPEC_STORE_BYPASS_CMD_AUTO;
+
+ for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
+ if (!match_option(arg, ret, ssb_mitigation_options[i].option))
+ continue;
+
+ cmd = ssb_mitigation_options[i].cmd;
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
+ pr_err("unknown option (%s). Switching to AUTO select\n", arg);
+ return SPEC_STORE_BYPASS_CMD_AUTO;
+ }
+ }
+
+ return cmd;
+}
+
+static enum ssb_mitigation __init __ssb_select_mitigation(void)
+{
+ enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
+ enum ssb_mitigation_cmd cmd;
+
+ if (!boot_cpu_has(X86_FEATURE_SSBD))
+ return mode;
+
+ cmd = ssb_parse_cmdline();
+ if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
+ (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
+ cmd == SPEC_STORE_BYPASS_CMD_AUTO))
+ return mode;
+
+ switch (cmd) {
+ case SPEC_STORE_BYPASS_CMD_AUTO:
+ case SPEC_STORE_BYPASS_CMD_SECCOMP:
+ /*
+ * Choose prctl+seccomp as the default mode if seccomp is
+ * enabled.
+ */
+ if (IS_ENABLED(CONFIG_SECCOMP))
+ mode = SPEC_STORE_BYPASS_SECCOMP;
+ else
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
+ case SPEC_STORE_BYPASS_CMD_ON:
+ mode = SPEC_STORE_BYPASS_DISABLE;
+ break;
+ case SPEC_STORE_BYPASS_CMD_PRCTL:
+ mode = SPEC_STORE_BYPASS_PRCTL;
+ break;
+ case SPEC_STORE_BYPASS_CMD_NONE:
+ break;
+ }
+
+ /*
+ * We have three CPU feature flags that are in play here:
+ * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
+ * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
+ * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
+ */
+ if (mode == SPEC_STORE_BYPASS_DISABLE) {
+ setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
+ /*
+ * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
+ * a completely different MSR and bit dependent on family.
+ */
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
+ x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+ break;
+ case X86_VENDOR_AMD:
+ x86_amd_ssb_disable();
+ break;
+ }
+ }
+
+ return mode;
+}
+
+static void ssb_select_mitigation(void)
+{
+ ssb_mode = __ssb_select_mitigation();
+
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ pr_info("%s\n", ssb_strings[ssb_mode]);
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) "Speculation prctl: " fmt
+
+static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
+{
+ bool update;
+
+ if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
+ ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
+ return -ENXIO;
+
+ switch (ctrl) {
+ case PR_SPEC_ENABLE:
+ /* If speculation is force disabled, enable is not allowed */
+ if (task_spec_ssb_force_disable(task))
+ return -EPERM;
+ task_clear_spec_ssb_disable(task);
+ update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_DISABLE:
+ task_set_spec_ssb_disable(task);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ case PR_SPEC_FORCE_DISABLE:
+ task_set_spec_ssb_disable(task);
+ task_set_spec_ssb_force_disable(task);
+ update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /*
+ * If being set on non-current task, delay setting the CPU
+ * mitigation until it is next scheduled.
+ */
+ if (task == current && update)
+ speculative_store_bypass_update_current();
+
+ return 0;
+}
+
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_set(task, ctrl);
+ default:
+ return -ENODEV;
+ }
+}
+
+#ifdef CONFIG_SECCOMP
+void arch_seccomp_spec_mitigate(struct task_struct *task)
+{
+ if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
+ ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
+}
+#endif
+
+static int ssb_prctl_get(struct task_struct *task)
+{
+ switch (ssb_mode) {
+ case SPEC_STORE_BYPASS_DISABLE:
+ return PR_SPEC_DISABLE;
+ case SPEC_STORE_BYPASS_SECCOMP:
+ case SPEC_STORE_BYPASS_PRCTL:
+ if (task_spec_ssb_force_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
+ if (task_spec_ssb_disable(task))
+ return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
+ return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
+ default:
+ if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
+ return PR_SPEC_ENABLE;
+ return PR_SPEC_NOT_AFFECTED;
+ }
+}
+
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
+{
+ switch (which) {
+ case PR_SPEC_STORE_BYPASS:
+ return ssb_prctl_get(task);
+ default:
+ return -ENODEV;
+ }
+}
+
+void x86_spec_ctrl_setup_ap(void)
+{
+ if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+ if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
+ x86_amd_ssb_disable();
+}
+
+#undef pr_fmt
+#define pr_fmt(fmt) "L1TF: " fmt
+
+/*
+ * These CPUs all support 44bits physical address space internally in the
+ * cache but CPUID can report a smaller number of physical address bits.
+ *
+ * The L1TF mitigation uses the top most address bit for the inversion of
+ * non present PTEs. When the installed memory reaches into the top most
+ * address bit due to memory holes, which has been observed on machines
+ * which report 36bits physical address bits and have 32G RAM installed,
+ * then the mitigation range check in l1tf_select_mitigation() triggers.
+ * This is a false positive because the mitigation is still possible due to
+ * the fact that the cache uses 44bit internally. Use the cache bits
+ * instead of the reported physical bits and adjust them on the affected
+ * machines to 44bit if the reported bits are less than 44.
+ */
+static void override_cache_bits(struct cpuinfo_x86 *c)
+{
+ if (c->x86 != 6)
+ return;
+
+ switch (c->x86_model) {
+ case INTEL_FAM6_NEHALEM:
+ case INTEL_FAM6_WESTMERE:
+ case INTEL_FAM6_SANDYBRIDGE:
+ case INTEL_FAM6_IVYBRIDGE:
+ case INTEL_FAM6_HASWELL_CORE:
+ case INTEL_FAM6_HASWELL_ULT:
+ case INTEL_FAM6_HASWELL_GT3E:
+ case INTEL_FAM6_BROADWELL_CORE:
+ case INTEL_FAM6_BROADWELL_GT3E:
+ case INTEL_FAM6_SKYLAKE_MOBILE:
+ case INTEL_FAM6_SKYLAKE_DESKTOP:
+ case INTEL_FAM6_KABYLAKE_MOBILE:
+ case INTEL_FAM6_KABYLAKE_DESKTOP:
+ if (c->x86_cache_bits < 44)
+ c->x86_cache_bits = 44;
+ break;
+ }
+}
+
+static void __init l1tf_select_mitigation(void)
+{
+ u64 half_pa;
+
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return;
+
+ override_cache_bits(&boot_cpu_data);
+
+#if CONFIG_PGTABLE_LEVELS == 2
+ pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
+ return;
+#endif
+
+ half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
+ if (e820_any_mapped(half_pa, ULLONG_MAX - half_pa, E820_RAM)) {
+ pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
+ pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
+ half_pa);
+ pr_info("However, doing so will make a part of your RAM unusable.\n");
+ pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
+ return;
+ }
+
+ setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
+}
+#undef pr_fmt
#ifdef CONFIG_SYSFS
-ssize_t cpu_show_meltdown(struct device *dev,
- struct device_attribute *attr, char *buf)
+
+static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
+ char *buf, unsigned int bug)
{
- if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
+ if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n");
- if (boot_cpu_has(X86_FEATURE_KAISER))
- return sprintf(buf, "Mitigation: PTI\n");
+
+ switch (bug) {
+ case X86_BUG_CPU_MELTDOWN:
+ if (boot_cpu_has(X86_FEATURE_KAISER))
+ return sprintf(buf, "Mitigation: PTI\n");
+
+ break;
+
+ case X86_BUG_SPECTRE_V1:
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+
+ case X86_BUG_SPECTRE_V2:
+ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
+ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ spectre_v2_module_string());
+
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
+
+ case X86_BUG_L1TF:
+ if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
+ return sprintf(buf, "Mitigation: Page Table Inversion\n");
+ break;
+
+ default:
+ break;
+ }
+
return sprintf(buf, "Vulnerable\n");
}
-ssize_t cpu_show_spectre_v1(struct device *dev,
- struct device_attribute *attr, char *buf)
+ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1))
- return sprintf(buf, "Not affected\n");
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
}
-ssize_t cpu_show_spectre_v2(struct device *dev,
- struct device_attribute *attr, char *buf)
+ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
{
- if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
- return sprintf(buf, "Not affected\n");
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
+}
- return sprintf(buf, "%s%s\n", spectre_v2_strings[spectre_v2_enabled],
- spectre_v2_module_string());
+ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
+}
+
+ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
+}
+
+ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
}
#endif
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index d8fba5c15fbd88..6608c03c212675 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,7 +1,7 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
@@ -43,7 +43,7 @@ static void init_c3(struct cpuinfo_x86 *c)
/* store Centaur Extended Feature Flags as
* word 5 of the CPU capability bit array
*/
- c->x86_capability[5] = cpuid_edx(0xC0000001);
+ c->x86_capability[CPUID_C000_0001_EDX] = cpuid_edx(0xC0000001);
}
#ifdef CONFIG_X86_32
/* Cyrix III family needs CX8 & PGE explicitly enabled. */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 8d084cd7b5b120..ff7d20c92e5e3c 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -44,6 +44,8 @@
#include <asm/pat.h>
#include <asm/microcode.h>
#include <asm/microcode_intel.h>
+#include <asm/intel-family.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_LOCAL_APIC
#include <asm/uv/uv.h>
@@ -675,52 +677,87 @@ static void apply_forced_caps(struct cpuinfo_x86 *c)
}
}
+static void init_speculation_control(struct cpuinfo_x86 *c)
+{
+ /*
+ * The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
+ * and they also have a different bit for STIBP support. Also,
+ * a hypervisor might have set the individual AMD bits even on
+ * Intel CPUs, for finer-grained selection of what's available.
+ */
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+
+ if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
+ cpu_has(c, X86_FEATURE_VIRT_SSBD))
+ set_cpu_cap(c, X86_FEATURE_SSBD);
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
+ set_cpu_cap(c, X86_FEATURE_IBRS);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+
+ if (cpu_has(c, X86_FEATURE_AMD_IBPB))
+ set_cpu_cap(c, X86_FEATURE_IBPB);
+
+ if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
+ set_cpu_cap(c, X86_FEATURE_STIBP);
+ set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
+ }
+}
+
void get_cpu_cap(struct cpuinfo_x86 *c)
{
- u32 tfms, xlvl;
- u32 ebx;
+ u32 eax, ebx, ecx, edx;
/* Intel-defined flags: level 0x00000001 */
if (c->cpuid_level >= 0x00000001) {
- u32 capability, excap;
+ cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
- cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
- c->x86_capability[0] = capability;
- c->x86_capability[4] = excap;
+ c->x86_capability[CPUID_1_ECX] = ecx;
+ c->x86_capability[CPUID_1_EDX] = edx;
}
+ /* Thermal and Power Management Leaf: level 0x00000006 (eax) */
+ if (c->cpuid_level >= 0x00000006)
+ c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
+
/* Additional Intel-defined flags: level 0x00000007 */
if (c->cpuid_level >= 0x00000007) {
- u32 eax, ebx, ecx, edx;
-
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
-
- c->x86_capability[9] = ebx;
+ c->x86_capability[CPUID_7_0_EBX] = ebx;
+ c->x86_capability[CPUID_7_ECX] = ecx;
+ c->x86_capability[CPUID_7_EDX] = edx;
}
/* Extended state features: level 0x0000000d */
if (c->cpuid_level >= 0x0000000d) {
- u32 eax, ebx, ecx, edx;
-
cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
- c->x86_capability[10] = eax;
+ c->x86_capability[CPUID_D_1_EAX] = eax;
}
/* Additional Intel-defined flags: level 0x0000000F */
if (c->cpuid_level >= 0x0000000F) {
- u32 eax, ebx, ecx, edx;
/* QoS sub-leaf, EAX=0Fh, ECX=0 */
cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
- c->x86_capability[11] = edx;
+ c->x86_capability[CPUID_F_0_EDX] = edx;
+
if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
/* will be overridden if occupancy monitoring exists */
c->x86_cache_max_rmid = ebx;
/* QoS sub-leaf, EAX=0Fh, ECX=1 */
cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
- c->x86_capability[12] = edx;
+ c->x86_capability[CPUID_F_1_EDX] = edx;
+
if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) {
c->x86_cache_max_rmid = ecx;
c->x86_cache_occ_scale = ebx;
@@ -732,32 +769,51 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
}
/* AMD-defined flags: level 0x80000001 */
- xlvl = cpuid_eax(0x80000000);
- c->extended_cpuid_level = xlvl;
+ eax = cpuid_eax(0x80000000);
+ c->extended_cpuid_level = eax;
- if ((xlvl & 0xffff0000) == 0x80000000) {
- if (xlvl >= 0x80000001) {
- c->x86_capability[1] = cpuid_edx(0x80000001);
- c->x86_capability[6] = cpuid_ecx(0x80000001);
+ if ((eax & 0xffff0000) == 0x80000000) {
+ if (eax >= 0x80000001) {
+ cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
+
+ c->x86_capability[CPUID_8000_0001_ECX] = ecx;
+ c->x86_capability[CPUID_8000_0001_EDX] = edx;
}
}
+ if (c->extended_cpuid_level >= 0x80000007) {
+ cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
+
+ c->x86_capability[CPUID_8000_0007_EBX] = ebx;
+ c->x86_power = edx;
+ }
+
if (c->extended_cpuid_level >= 0x80000008) {
- u32 eax = cpuid_eax(0x80000008);
+ cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
- c->x86_capability[13] = cpuid_ebx(0x80000008);
+ c->x86_capability[CPUID_8000_0008_EBX] = ebx;
}
#ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
c->x86_phys_bits = 36;
#endif
- if (c->extended_cpuid_level >= 0x80000007)
- c->x86_power = cpuid_edx(0x80000007);
+ c->x86_cache_bits = c->x86_phys_bits;
+
+ if (c->extended_cpuid_level >= 0x8000000a)
+ c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
init_scattered_cpuid_features(c);
+ init_speculation_control(c);
+
+ /*
+ * Clear/Set all flags overridden by options, after probe.
+ * This needs to happen each time we re-probe, which may happen
+ * several times during CPU initialization.
+ */
+ apply_forced_caps(c);
}
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
@@ -786,6 +842,95 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#endif
}
+static const __initconst struct x86_cpu_id cpu_no_speculation[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL, X86_FEATURE_ANY },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW, X86_FEATURE_ANY },
+ { X86_VENDOR_CENTAUR, 5 },
+ { X86_VENDOR_INTEL, 5 },
+ { X86_VENDOR_NSC, 5 },
+ { X86_VENDOR_ANY, 4 },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_meltdown[] = {
+ { X86_VENDOR_AMD },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_spec_store_bypass[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PINEVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_LINCROFT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_PENWELL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CLOVERVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_CEDARVIEW },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_CORE_YONAH },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ { X86_VENDOR_CENTAUR, 5, },
+ { X86_VENDOR_INTEL, 5, },
+ { X86_VENDOR_NSC, 5, },
+ { X86_VENDOR_AMD, 0x12, },
+ { X86_VENDOR_AMD, 0x11, },
+ { X86_VENDOR_AMD, 0x10, },
+ { X86_VENDOR_AMD, 0xf, },
+ { X86_VENDOR_ANY, 4, },
+ {}
+};
+
+static const __initconst struct x86_cpu_id cpu_no_l1tf[] = {
+ /* in addition to cpu_no_speculation */
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT1 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT2 },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MERRIFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_MOOREFIELD },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GEMINI_LAKE },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNL },
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_XEON_PHI_KNM },
+ {}
+};
+
+static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
+{
+ u64 ia32_cap = 0;
+
+ if (cpu_has(c, X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, ia32_cap);
+
+ if (!x86_match_cpu(cpu_no_spec_store_bypass) &&
+ !(ia32_cap & ARCH_CAP_SSB_NO))
+ setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
+
+ if (x86_match_cpu(cpu_no_speculation))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
+ setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+
+ if (x86_match_cpu(cpu_no_meltdown))
+ return;
+
+ /* Rogue Data Cache Load? No! */
+ if (ia32_cap & ARCH_CAP_RDCL_NO)
+ return;
+
+ setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
+
+ if (x86_match_cpu(cpu_no_l1tf))
+ return;
+
+ setup_force_cpu_bug(X86_BUG_L1TF);
+}
+
/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
@@ -832,11 +977,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
- if (c->x86_vendor != X86_VENDOR_AMD)
- setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
-
- setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
- setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
+ cpu_set_bug_bits(c);
fpu__init_system(c);
@@ -1128,6 +1269,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
enable_sep_cpu();
#endif
mtrr_ap_init();
+ x86_spec_ctrl_setup_ap();
}
struct msr_range {
@@ -1232,9 +1374,14 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
-struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
- (unsigned long) debug_idt_table };
+struct desc_ptr idt_descr __ro_after_init = {
+ .size = NR_VECTORS * 16 - 1,
+ .address = (unsigned long) idt_table,
+};
+const struct desc_ptr debug_idt_descr = {
+ .size = NR_VECTORS * 16 - 1,
+ .address = (unsigned long) debug_idt_table,
+};
DEFINE_PER_CPU_FIRST(union irq_stack_union,
irq_stack_union) __aligned(PAGE_SIZE) __visible;
@@ -1508,7 +1655,7 @@ void cpu_init(void)
struct task_struct *me;
struct tss_struct *t;
unsigned long v;
- int cpu = stack_smp_processor_id();
+ int cpu = raw_smp_processor_id();
int i;
wait_for_master_cpu(cpu);
@@ -1632,7 +1779,9 @@ void cpu_init(void)
pr_info("Initializing CPU#%d\n", cpu);
- if (cpu_feature_enabled(X86_FEATURE_VME) || cpu_has_tsc || cpu_has_de)
+ if (cpu_feature_enabled(X86_FEATURE_VME) ||
+ cpu_has_tsc ||
+ boot_cpu_has(X86_FEATURE_DE))
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
load_current_idt();
@@ -1665,20 +1814,6 @@ void cpu_init(void)
}
#endif
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-void warn_pre_alternatives(void)
-{
- WARN(1, "You're using static_cpu_has before alternatives have run!\n");
-}
-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
-#endif
-
-inline bool __static_cpu_has_safe(u16 bit)
-{
- return boot_cpu_has(bit);
-}
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
-
static void bsp_resume(void)
{
if (this_cpu->c_bsp_resume)
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index 2584265d474556..3b19d82f7932c0 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -46,4 +46,7 @@ extern const struct cpu_dev *const __x86_cpu_dev_start[],
extern void get_cpu_cap(struct cpuinfo_x86 *c);
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
+
+extern void x86_spec_ctrl_setup_ap(void);
+
#endif /* ARCH_X86_CPU_H */
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index aaf152e7963738..15e47c1cd41265 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -8,6 +8,7 @@
#include <linux/timer.h>
#include <asm/pci-direct.h>
#include <asm/tsc.h>
+#include <asm/cpufeature.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 209ac1e7d1f036..b18fe3d245fed0 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -8,11 +8,12 @@
#include <linux/module.h>
#include <linux/uaccess.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/pgtable.h>
#include <asm/msr.h>
#include <asm/bugs.h>
#include <asm/cpu.h>
+#include <asm/intel-family.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -25,6 +26,65 @@
#include <asm/apic.h>
#endif
+/*
+ * Early microcode releases for the Spectre v2 mitigation were broken.
+ * Information taken from;
+ * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
+ * - https://kb.vmware.com/s/article/52345
+ * - Microcode revisions observed in the wild
+ * - Release note from 20180108 microcode release
+ */
+struct sku_microcode {
+ u8 model;
+ u8 stepping;
+ u32 microcode;
+};
+static const struct sku_microcode spectre_bad_microcodes[] = {
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
+ { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
+ { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
+ { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
+ { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
+ { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
+ { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
+ { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
+ { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
+ { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
+ { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
+ { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
+ { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
+ { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
+ /* Observed in the wild */
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
+ { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
+};
+
+static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
+{
+ int i;
+
+ /*
+ * We know that the hypervisor lie to us on the microcode version so
+ * we may as well hope that it is running the correct version.
+ */
+ if (cpu_has(c, X86_FEATURE_HYPERVISOR))
+ return false;
+
+ if (c->x86 != 6)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
+ if (c->x86_model == spectre_bad_microcodes[i].model &&
+ c->x86_mask == spectre_bad_microcodes[i].stepping)
+ return (c->microcode <= spectre_bad_microcodes[i].microcode);
+ }
+ return false;
+}
+
static void early_init_intel(struct cpuinfo_x86 *c)
{
u64 misc_enable;
@@ -51,6 +111,22 @@ static void early_init_intel(struct cpuinfo_x86 *c)
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
}
+ /* Now if any of them are set, check the blacklist and clear the lot */
+ if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
+ cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
+ cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
+ cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
+ pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
+ setup_clear_cpu_cap(X86_FEATURE_IBRS);
+ setup_clear_cpu_cap(X86_FEATURE_IBPB);
+ setup_clear_cpu_cap(X86_FEATURE_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
+ setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
+ setup_clear_cpu_cap(X86_FEATURE_SSBD);
+ setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
+ }
+
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
*
@@ -445,7 +521,8 @@ static void init_intel(struct cpuinfo_x86 *c)
if (cpu_has_xmm2)
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
- if (cpu_has_ds) {
+
+ if (boot_cpu_has(X86_FEATURE_DS)) {
unsigned int l1;
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
if (!(l1 & (1<<11)))
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index b4ca91cf55b0d2..3557b3ceab147a 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -14,7 +14,7 @@
#include <linux/sysfs.h>
#include <linux/pci.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/amd_nb.h>
#include <asm/smp.h>
@@ -591,7 +591,7 @@ cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
unsigned edx;
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
- if (cpu_has_topoext)
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT))
cpuid_count(0x8000001d, index, &eax.full,
&ebx.full, &ecx.full, &edx);
else
@@ -637,7 +637,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
void init_amd_cacheinfo(struct cpuinfo_x86 *c)
{
- if (cpu_has_topoext) {
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
num_cache_leaves = find_num_cache_leaves(c);
} else if (c->extended_cpuid_level >= 0x80000006) {
if (cpuid_edx(0x80000006) & 0xf000)
@@ -809,7 +809,7 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
struct cacheinfo *this_leaf;
int i, sibling;
- if (cpu_has_topoext) {
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
unsigned int apicid, nshared, first, last;
this_leaf = this_cpu_ci->info_list + index;
diff --git a/arch/x86/kernel/cpu/match.c b/arch/x86/kernel/cpu/match.c
index afa9f0d487ea07..fbb5e90557a525 100644
--- a/arch/x86/kernel/cpu/match.c
+++ b/arch/x86/kernel/cpu/match.c
@@ -1,5 +1,5 @@
#include <asm/cpu_device_id.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 5f3a30a5b04794..cfefa21cc45b66 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -670,6 +670,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
}
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ m->bank = i;
*msg = tmp;
ret = 1;
}
@@ -980,11 +981,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
int i;
int worst = 0;
int severity;
+
/*
* Establish sequential order between the CPUs entering the machine
* check handler.
*/
- int order;
+ int order = -1;
/*
* If no_way_out gets set, there is no safe way to recover from this
* MCE. If mca_cfg.tolerant is cranked up, we'll try anyway.
@@ -1000,7 +1002,12 @@ void do_machine_check(struct pt_regs *regs, long error_code)
char *msg = "Unknown";
u64 recover_paddr = ~0ull;
int flags = MF_ACTION_REQUIRED;
- int lmce = 0;
+
+ /*
+ * MCEs are always local on AMD. Same is determined by MCG_STATUS_LMCES
+ * on Intel.
+ */
+ int lmce = 1;
/* If this CPU is offline, just bail out. */
if (cpu_is_offline(smp_processor_id())) {
@@ -1039,17 +1046,23 @@ void do_machine_check(struct pt_regs *regs, long error_code)
kill_it = 1;
/*
- * Check if this MCE is signaled to only this logical processor
+ * Check if this MCE is signaled to only this logical processor,
+ * on Intel only.
*/
- if (m.mcgstatus & MCG_STATUS_LMCES)
- lmce = 1;
- else {
- /*
- * Go through all the banks in exclusion of the other CPUs.
- * This way we don't report duplicated events on shared banks
- * because the first one to see it will clear it.
- * If this is a Local MCE, then no need to perform rendezvous.
- */
+ if (m.cpuvendor == X86_VENDOR_INTEL)
+ lmce = m.mcgstatus & MCG_STATUS_LMCES;
+
+ /*
+ * Local machine check may already know that we have to panic.
+ * Broadcast machine check begins rendezvous in mce_start()
+ * Go through all banks in exclusion of the other CPUs. This way we
+ * don't report duplicated events on shared banks because the first one
+ * to see it will clear it.
+ */
+ if (lmce) {
+ if (no_way_out)
+ mce_panic("Fatal local machine check", &m, msg);
+ } else {
order = mce_start(&no_way_out);
}
@@ -1128,12 +1141,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
no_way_out = worst >= MCE_PANIC_SEVERITY;
} else {
/*
- * Local MCE skipped calling mce_reign()
- * If we found a fatal error, we need to panic here.
+ * If there was a fatal machine check we should have
+ * already called mce_panic earlier in this function.
+ * Since we re-read the banks, we might have found
+ * something new. Check again to see if we found a
+ * fatal error. We call "mce_severity()" again to
+ * make sure we have the right "msg".
*/
- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
- mce_panic("Machine check from unknown source",
- NULL, NULL);
+ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+ mce_severity(&m, cfg->tolerant, &msg, true);
+ mce_panic("Local fatal machine check!", &m, msg);
+ }
}
/*
@@ -2252,9 +2270,6 @@ static ssize_t store_int_with_restart(struct device *s,
if (check_interval == old_check_interval)
return ret;
- if (check_interval < 1)
- check_interval = 1;
-
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index 3f20710a5b23b7..6988c74409a825 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -1,6 +1,6 @@
#!/bin/sh
#
-# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h
+# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
#
IN=$1
@@ -49,8 +49,8 @@ dump_array()
trap 'rm "$OUT"' EXIT
(
- echo "#ifndef _ASM_X86_CPUFEATURE_H"
- echo "#include <asm/cpufeature.h>"
+ echo "#ifndef _ASM_X86_CPUFEATURES_H"
+ echo "#include <asm/cpufeatures.h>"
echo "#endif"
echo ""
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index b5624fafa44a50..136ae86f4f5fd0 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -349,7 +349,7 @@ static void get_fixed_ranges(mtrr_type *frs)
void mtrr_save_fixed_ranges(void *info)
{
- if (cpu_has_mtrr)
+ if (boot_cpu_has(X86_FEATURE_MTRR))
get_fixed_ranges(mtrr_state.fixed_ranges);
}
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index d76f13d6d8d671..ec894bf5eeb01b 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -173,6 +173,8 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
struct mtrr_gentry gentry;
void __user *arg = (void __user *) __arg;
+ memset(&gentry, 0, sizeof(gentry));
+
switch (cmd) {
case MTRRIOC_ADD_ENTRY:
case MTRRIOC_SET_ENTRY:
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
index fa77ac8291f03d..e5a5a1e75edf45 100644
--- a/arch/x86/kernel/cpu/mtrr/main.c
+++ b/arch/x86/kernel/cpu/mtrr/main.c
@@ -47,7 +47,7 @@
#include <linux/smp.h>
#include <linux/syscore_ops.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/e820.h>
#include <asm/mtrr.h>
#include <asm/msr.h>
@@ -72,14 +72,14 @@ static DEFINE_MUTEX(mtrr_mutex);
u64 size_or_mask, size_and_mask;
static bool mtrr_aps_delayed_init;
-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
const struct mtrr_ops *mtrr_if;
static void set_mtrr(unsigned int reg, unsigned long base,
unsigned long size, mtrr_type type);
-void set_mtrr_ops(const struct mtrr_ops *ops)
+void __init set_mtrr_ops(const struct mtrr_ops *ops)
{
if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
mtrr_ops[ops->vendor] = ops;
@@ -682,7 +682,7 @@ void __init mtrr_bp_init(void)
phys_addr = 32;
- if (cpu_has_mtrr) {
+ if (boot_cpu_has(X86_FEATURE_MTRR)) {
mtrr_if = &generic_mtrr_ops;
size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000;
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
index 6c7ced07d16d11..ad8bd763efa52b 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
@@ -54,7 +54,7 @@ void fill_mtrr_var_range(unsigned int index,
bool get_mtrr_state(void);
void mtrr_bp_pat_init(void);
-extern void set_mtrr_ops(const struct mtrr_ops *ops);
+extern void __init set_mtrr_ops(const struct mtrr_ops *ops);
extern u64 size_or_mask, size_and_mask;
extern const struct mtrr_ops *mtrr_if;
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 1cee5d2d7eceaf..3ea177cb736657 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -160,7 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
if (offset)
return offset;
- if (!cpu_has_perfctr_core)
+ if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
offset = index;
else
offset = index << 1;
@@ -652,7 +652,7 @@ static __initconst const struct x86_pmu amd_pmu = {
static int __init amd_core_pmu_init(void)
{
- if (!cpu_has_perfctr_core)
+ if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
return 0;
switch (boot_cpu_data.x86) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index cc6cedb8f25d5d..49742746a6c963 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -523,10 +523,10 @@ static int __init amd_uncore_init(void)
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
goto fail_nodev;
- if (!cpu_has_topoext)
+ if (!boot_cpu_has(X86_FEATURE_TOPOEXT))
goto fail_nodev;
- if (cpu_has_perfctr_nb) {
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) {
amd_uncore_nb = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_nb) {
ret = -ENOMEM;
@@ -540,7 +540,7 @@ static int __init amd_uncore_init(void)
ret = 0;
}
- if (cpu_has_perfctr_l2) {
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) {
amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
if (!amd_uncore_l2) {
ret = -ENOMEM;
@@ -583,10 +583,11 @@ fail_online:
/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
amd_uncore_nb = amd_uncore_l2 = NULL;
- if (cpu_has_perfctr_l2)
+
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
perf_pmu_unregister(&amd_l2_pmu);
fail_l2:
- if (cpu_has_perfctr_nb)
+ if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
perf_pmu_unregister(&amd_nb_pmu);
if (amd_uncore_l2)
free_percpu(amd_uncore_l2);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 61215a69b03d92..b22e9c4dd111fb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -229,7 +229,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e
u64 prev_count, new_count, delta;
int shift;
- if (event->hw.idx >= UNCORE_PMC_IDX_FIXED)
+ if (event->hw.idx == UNCORE_PMC_IDX_FIXED)
shift = 64 - uncore_fixed_ctr_bits(box);
else
shift = 64 - uncore_perf_ctr_bits(box);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
index 2749965afed0b0..83cadc2605a785 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
@@ -240,7 +240,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p
{
struct hw_perf_event *hwc = &event->hw;
- if (hwc->idx >= UNCORE_PMC_IDX_FIXED)
+ if (hwc->idx == UNCORE_PMC_IDX_FIXED)
wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0);
else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0)
wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index f0f4fcba252e15..94757942586121 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -1081,6 +1081,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
+#define NODE_ID_MASK 0x7
+
/*
* build pci bus to socket mapping
*/
@@ -1102,7 +1104,7 @@ static int snbep_pci2phy_map_init(int devid)
err = pci_read_config_dword(ubox_dev, 0x40, &config);
if (err)
break;
- nodeid = config;
+ nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, 0x54, &config);
if (err)
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 608fb26c72544c..8cb57df9398d91 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -31,32 +31,12 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
const struct cpuid_bit *cb;
static const struct cpuid_bit cpuid_bits[] = {
- { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
- { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
- { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
- { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
- { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
- { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
- { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
- { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
- { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
- { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
{ X86_FEATURE_INTEL_PT, CR_EBX,25, 0x00000007, 0 },
{ X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 },
{ X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 },
{ X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 },
- { X86_FEATURE_NPT, CR_EDX, 0, 0x8000000a, 0 },
- { X86_FEATURE_LBRV, CR_EDX, 1, 0x8000000a, 0 },
- { X86_FEATURE_SVML, CR_EDX, 2, 0x8000000a, 0 },
- { X86_FEATURE_NRIPS, CR_EDX, 3, 0x8000000a, 0 },
- { X86_FEATURE_TSCRATEMSR, CR_EDX, 4, 0x8000000a, 0 },
- { X86_FEATURE_VMCBCLEAN, CR_EDX, 5, 0x8000000a, 0 },
- { X86_FEATURE_FLUSHBYASID, CR_EDX, 6, 0x8000000a, 0 },
- { X86_FEATURE_DECODEASSISTS, CR_EDX, 7, 0x8000000a, 0 },
- { X86_FEATURE_PAUSEFILTER, CR_EDX,10, 0x8000000a, 0 },
- { X86_FEATURE_PFTHRESHOLD, CR_EDX,12, 0x8000000a, 0 },
{ 0, 0, 0, 0, 0 }
};
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 3fa0e5ad86b445..a19a663282b57e 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -1,6 +1,6 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/msr.h>
#include "cpu.h"
@@ -12,7 +12,7 @@ static void early_init_transmeta(struct cpuinfo_x86 *c)
xlvl = cpuid_eax(0x80860000);
if ((xlvl & 0xffff0000) == 0x80860000) {
if (xlvl >= 0x80860001)
- c->x86_capability[2] = cpuid_edx(0x80860001);
+ c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001);
}
}
@@ -82,7 +82,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
/* Unhide possibly hidden capability flags */
rdmsr(0x80860004, cap_mask, uk);
wrmsr(0x80860004, ~0, uk);
- c->x86_capability[0] = cpuid_edx(0x00000001);
+ c->x86_capability[CPUID_1_EDX] = cpuid_edx(0x00000001);
wrmsr(0x80860004, cap_mask, uk);
/* All Transmeta CPUs have a constant TSC */
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 7d580bb07636fc..e87661fefd4d93 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
static void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{
- struct task_struct *task;
unsigned long ret_addr;
int index;
if (addr != (unsigned long)return_to_handler)
return;
- task = tinfo->task;
index = task->curr_ret_stack;
if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
static inline void
print_ftrace_graph_addr(unsigned long addr, void *data,
const struct stacktrace_ops *ops,
- struct thread_info *tinfo, int *graph)
+ struct task_struct *task, int *graph)
{ }
#endif
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
void *p, unsigned int size, void *end)
{
- void *t = tinfo;
+ void *t = task_stack_page(task);
if (end) {
if (p < end && p >= (end-THREAD_SIZE))
return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
}
unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+ while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
unsigned long addr;
addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
} else {
ops->address(data, addr, 0);
}
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
stack++;
}
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *ret_addr = &frame->return_address;
- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+ while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
unsigned long addr = *ret_addr;
if (!__kernel_text_address(addr))
@@ -138,7 +136,7 @@ print_context_stack_bp(struct thread_info *tinfo,
ops->address(data, addr, 1);
frame = frame->next_frame;
ret_addr = &frame->return_address;
- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+ print_ftrace_graph_addr(addr, data, ops, task, graph);
}
return (unsigned long)frame;
diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
index 7717ea2bf12dd7..049de0c8a14c83 100644
--- a/arch/x86/kernel/dumpstack_32.c
+++ b/arch/x86/kernel/dumpstack_32.c
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
bp = stack_frame(task, regs);
for (;;) {
- struct thread_info *context;
void *end_stack;
end_stack = is_hardirq_stack(stack, cpu);
if (!end_stack)
end_stack = is_softirq_stack(stack, cpu);
- context = task_thread_info(task);
- bp = ops->walk_stack(context, stack, bp, ops, data,
+ bp = ops->walk_stack(task, stack, bp, ops, data,
end_stack, &graph);
/* Stop if not on irq stack */
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 97084ff674db88..6c390efbc917d2 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
const struct stacktrace_ops *ops, void *data)
{
const unsigned cpu = get_cpu();
- struct thread_info *tinfo;
unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
unsigned long dummy;
unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
* current stack address. If the stacks consist of nested
* exceptions
*/
- tinfo = task_thread_info(task);
while (!done) {
unsigned long *stack_end;
enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, id) < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp, ops,
+ bp = ops->walk_stack(task, stack, bp, ops,
data, stack_end, &graph);
ops->stack(data, "<EOE>");
/*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
if (ops->stack(data, "IRQ") < 0)
break;
- bp = ops->walk_stack(tinfo, stack, bp,
+ bp = ops->walk_stack(task, stack, bp,
ops, data, stack_end, &graph);
/*
* We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
/*
* This handles the process stack:
*/
- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+ bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
put_cpu();
}
EXPORT_SYMBOL(dump_trace);
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 52a2526c3fbe45..19bc19d5e17412 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -24,6 +24,7 @@
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/setup.h>
+#include <asm/cpufeature.h>
/*
* The e820 map is the map that gets modified e.g. with command line parameters
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d25097c3fc1d1a..b322325424bc02 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -53,27 +53,9 @@ static bool kernel_fpu_disabled(void)
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
@@ -114,10 +96,13 @@ void __kernel_fpu_begin(void)
kernel_fpu_disable();
if (fpu->fpregs_active) {
+ /*
+ * Ignore return value -- we don't care if reg state
+ * is clobbered.
+ */
copy_fpregs_to_fpstate(fpu);
} else {
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -128,8 +113,6 @@ void __kernel_fpu_end(void)
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
@@ -189,8 +172,9 @@ void fpu__save(struct fpu *fpu)
preempt_disable();
if (fpu->fpregs_active) {
- if (!copy_fpregs_to_fpstate(fpu))
- fpregs_deactivate(fpu);
+ if (!copy_fpregs_to_fpstate(fpu)) {
+ copy_kernel_to_fpregs(&fpu->state);
+ }
}
preempt_enable();
}
@@ -237,8 +221,7 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, xstate_size);
+ memset(&dst_fpu->state.xsave, 0, xstate_size);
/*
* Save current FPU registers directly into the child
@@ -259,14 +242,14 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
preempt_disable();
if (!copy_fpregs_to_fpstate(dst_fpu)) {
memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
- fpregs_deactivate(src_fpu);
+
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
}
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
@@ -369,7 +352,6 @@ void fpu__restore(struct fpu *fpu)
kernel_fpu_disable();
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
kernel_fpu_enable();
}
EXPORT_SYMBOL_GPL(fpu__restore);
@@ -386,7 +368,6 @@ EXPORT_SYMBOL_GPL(fpu__restore);
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
@@ -409,8 +390,10 @@ static inline void copy_init_fpstate_to_fpregs(void)
{
if (use_xsave())
copy_kernel_to_xregs(&init_fpstate.xsave, -1);
- else
+ else if (static_cpu_has(X86_FEATURE_FXSR))
copy_kernel_to_fxregs(&init_fpstate.fxsave);
+ else
+ copy_kernel_to_fregs(&init_fpstate.fsave);
}
/*
@@ -423,7 +406,7 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu()) {
+ if (!static_cpu_has(X86_FEATURE_FPU)) {
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 1011c05b1bd5df..738519f5a69dce 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -3,8 +3,11 @@
*/
#include <asm/fpu/internal.h>
#include <asm/tlbflush.h>
+#include <asm/setup.h>
+#include <asm/cmdline.h>
#include <linux/sched.h>
+#include <linux/init.h>
/*
* Initialize the TS bit in CR0 according to the style of context-switches
@@ -12,10 +15,7 @@
*/
static void fpu__init_cpu_ctx_switch(void)
{
- if (!cpu_has_eager_fpu)
- stts();
- else
- clts();
+ clts();
}
/*
@@ -75,13 +75,15 @@ static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
write_cr0(cr0);
- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
- : "+m" (fsw), "+m" (fcw));
+ if (!test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) {
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
- if (fsw == 0 && (fcw & 0x103f) == 0x003f)
- set_cpu_cap(c, X86_FEATURE_FPU);
- else
- clear_cpu_cap(c, X86_FEATURE_FPU);
+ if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+ set_cpu_cap(c, X86_FEATURE_FPU);
+ else
+ clear_cpu_cap(c, X86_FEATURE_FPU);
+ }
#ifndef CONFIG_MATH_EMULATION
if (!cpu_has_fpu) {
@@ -130,7 +132,7 @@ static void __init fpu__init_system_generic(void)
* Set up the legacy init FPU context. (xstate init might overwrite this
* with a more modern format, if the CPU supports it.)
*/
- fpstate_init_fxstate(&init_fpstate.fxsave);
+ fpstate_init(&init_fpstate);
fpu__init_system_mxcsr();
}
@@ -230,53 +232,16 @@ static void __init fpu__init_system_xstate_size_legacy(void)
}
/*
- * FPU context switching strategies:
- *
- * Against popular belief, we don't do lazy FPU saves, due to the
- * task migration complications it brings on SMP - we only do
- * lazy FPU restores.
- *
- * 'lazy' is the traditional strategy, which is based on setting
- * CR0::TS to 1 during context-switch (instead of doing a full
- * restore of the FPU state), which causes the first FPU instruction
- * after the context switch (whenever it is executed) to fault - at
- * which point we lazily restore the FPU state into FPU registers.
- *
- * Tasks are of course under no obligation to execute FPU instructions,
- * so it can easily happen that another context-switch occurs without
- * a single FPU instruction being executed. If we eventually switch
- * back to the original task (that still owns the FPU) then we have
- * not only saved the restores along the way, but we also have the
- * FPU ready to be used for the original task.
- *
- * 'eager' switching is used on modern CPUs, there we switch the FPU
- * state during every context switch, regardless of whether the task
- * has used FPU instructions in that time slice or not. This is done
- * because modern FPU context saving instructions are able to optimize
- * state saving and restoration in hardware: they can detect both
- * unused and untouched FPU state and optimize accordingly.
- *
- * [ Note that even in 'lazy' mode we might optimize context switches
- * to use 'eager' restores, if we detect that a task is using the FPU
- * frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+ * Find supported xfeatures based on cpu features and command-line input.
+ * This must be called after fpu__init_parse_early_param() is called and
+ * xfeatures_mask is enumerated.
*/
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
-
-static int __init eager_fpu_setup(char *s)
+u64 __init fpu__get_supported_xfeatures_mask(void)
{
- if (!strcmp(s, "on"))
- eagerfpu = ENABLE;
- else if (!strcmp(s, "off"))
- eagerfpu = DISABLE;
- else if (!strcmp(s, "auto"))
- eagerfpu = AUTO;
- return 1;
+ return XCNTXT_MASK;
}
-__setup("eagerfpu=", eager_fpu_setup);
-/*
- * Pick the FPU context switching strategy:
- */
+/* Legacy code to initialize eager fpu mode. */
static void __init fpu__init_system_ctx_switch(void)
{
static bool on_boot_cpu = 1;
@@ -285,26 +250,31 @@ static void __init fpu__init_system_ctx_switch(void)
on_boot_cpu = 0;
WARN_ON_FPU(current->thread.fpu.fpstate_active);
- current_thread_info()->status = 0;
-
- /* Auto enable eagerfpu for xsaveopt */
- if (cpu_has_xsaveopt && eagerfpu != DISABLE)
- eagerfpu = ENABLE;
-
- if (xfeatures_mask & XFEATURE_MASK_EAGER) {
- if (eagerfpu == DISABLE) {
- pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
- xfeatures_mask & XFEATURE_MASK_EAGER);
- xfeatures_mask &= ~XFEATURE_MASK_EAGER;
- } else {
- eagerfpu = ENABLE;
- }
+}
+
+/*
+ * We parse fpu parameters early because fpu__init_system() is executed
+ * before parse_early_param().
+ */
+static void __init fpu__init_parse_early_param(void)
+{
+ if (cmdline_find_option_bool(boot_command_line, "no387"))
+ setup_clear_cpu_cap(X86_FEATURE_FPU);
+
+ if (cmdline_find_option_bool(boot_command_line, "nofxsr")) {
+ setup_clear_cpu_cap(X86_FEATURE_FXSR);
+ setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
+ setup_clear_cpu_cap(X86_FEATURE_XMM);
}
- if (eagerfpu == ENABLE)
- setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+ if (cmdline_find_option_bool(boot_command_line, "noxsave"))
+ fpu__xstate_clear_all_cpu_caps();
- printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+ if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+
+ if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
+ setup_clear_cpu_cap(X86_FEATURE_XSAVES);
}
/*
@@ -313,6 +283,7 @@ static void __init fpu__init_system_ctx_switch(void)
*/
void __init fpu__init_system(struct cpuinfo_x86 *c)
{
+ fpu__init_parse_early_param();
fpu__init_system_early_generic(c);
/*
@@ -336,62 +307,3 @@ void __init fpu__init_system(struct cpuinfo_x86 *c)
fpu__init_system_ctx_switch();
}
-
-/*
- * Boot parameter to turn off FPU support and fall back to math-emu:
- */
-static int __init no_387(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_FPU);
- return 1;
-}
-__setup("no387", no_387);
-
-/*
- * Disable all xstate CPU features:
- */
-static int __init x86_noxsave_setup(char *s)
-{
- if (strlen(s))
- return 0;
-
- fpu__xstate_clear_all_cpu_caps();
-
- return 1;
-}
-__setup("noxsave", x86_noxsave_setup);
-
-/*
- * Disable the XSAVEOPT instruction specifically:
- */
-static int __init x86_noxsaveopt_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-
- return 1;
-}
-__setup("noxsaveopt", x86_noxsaveopt_setup);
-
-/*
- * Disable the XSAVES instruction:
- */
-static int __init x86_noxsaves_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-
- return 1;
-}
-__setup("noxsaves", x86_noxsaves_setup);
-
-/*
- * Disable FX save/restore and SSE support:
- */
-static int __init x86_nofxsr_setup(char *s)
-{
- setup_clear_cpu_cap(X86_FEATURE_FXSR);
- setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
- setup_clear_cpu_cap(X86_FEATURE_XMM);
-
- return 1;
-}
-__setup("nofxsr", x86_nofxsr_setup);
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3de077116218c7..31fad2cbd734bb 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -294,7 +294,6 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
* thread's fpu state, reconstruct fxstate from the fsave
* header. Sanitize the copied state etc.
*/
- struct fpu *fpu = &tsk->thread.fpu;
struct user_i387_ia32_struct env;
int err = 0;
@@ -319,11 +318,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 70fc312221fc6b..3fa200ecca623b 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -632,8 +632,7 @@ void __init fpu__init_system_xstate(void)
BUG();
}
- /* Support only the state known to the OS: */
- xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
+ xfeatures_mask &= fpu__get_supported_xfeatures_mask();
/* Enable xstate instructions to be able to continue with initialization: */
fpu__init_cpu_xstate();
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index bfc587579dc3b0..1b96bfe09d42af 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -81,9 +81,9 @@ within(unsigned long addr, unsigned long start, unsigned long end)
static unsigned long text_ip_addr(unsigned long ip)
{
/*
- * On x86_64, kernel text mappings are mapped read-only with
- * CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead
- * of the kernel text mapping to modify the kernel text.
+ * On x86_64, kernel text mappings are mapped read-only, so we use
+ * the kernel identity mapping instead of the kernel text mapping
+ * to modify the kernel text.
*
* For 32bit kernels, these mappings are same and we can use
* kernel identity mapping to modify code.
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 70284d38fdc266..1c0b49fd636541 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -19,7 +19,7 @@
#include <asm/setup.h>
#include <asm/processor-flags.h>
#include <asm/msr-index.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/percpu.h>
#include <asm/nops.h>
#include <asm/bootparam.h>
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 4034e905741a90..734ba1d0f6868a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -76,9 +76,7 @@ startup_64:
subq $_text - __START_KERNEL_map, %rbp
/* Is the address not 2M aligned? */
- movq %rbp, %rax
- andl $~PMD_PAGE_MASK, %eax
- testl %eax, %eax
+ testl $~PMD_PAGE_MASK, %ebp
jnz bad_address
/*
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f48eb8eeefe2b1..3fdc1e53aaac7b 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -12,6 +12,7 @@
#include <linux/pm.h>
#include <linux/io.h>
+#include <asm/cpufeature.h>
#include <asm/irqdomain.h>
#include <asm/fixmap.h>
#include <asm/hpet.h>
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 50a3fad5b89f1f..2bcfb5f2bc449c 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -300,6 +300,10 @@ static int arch_build_bp_info(struct perf_event *bp)
return -EINVAL;
if (bp->attr.bp_addr & (bp->attr.bp_len - 1))
return -EINVAL;
+
+ if (!boot_cpu_has(X86_FEATURE_BPEXT))
+ return -EOPNOTSUPP;
+
/*
* It's impossible to use a range breakpoint to fake out
* user vs kernel detection because bp_len - 1 can't
@@ -307,8 +311,6 @@ static int arch_build_bp_info(struct perf_event *bp)
* breakpoints, then we'll have to check for kprobe-blacklisted
* addresses anywhere in the range.
*/
- if (!cpu_has_bpext)
- return -EOPNOTSUPP;
info->mask = bp->attr.bp_len - 1;
info->len = X86_BREAKPOINT_LEN_1;
}
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 528b7aa1780d71..5fd358b913d0b3 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -131,11 +131,9 @@ void irq_ctx_init(int cpu)
void do_softirq_own_stack(void)
{
- struct thread_info *curstk;
struct irq_stack *irqstk;
u32 *isp, *prev_esp;
- curstk = current_stack();
irqstk = __this_cpu_read(softirq_stack);
/* build the stack frame on the softirq stack */
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 206d0b90a3ab1e..38f9f5678dc831 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -41,8 +41,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
if (user_mode(regs))
return;
- if (regs->sp >= curbase + sizeof(struct thread_info) +
- sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
+ if (regs->sp >= curbase + sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
regs->sp <= curbase + THREAD_SIZE)
return;
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644
index 00000000000000..3817eb748eb48d
--- /dev/null
+++ b/arch/x86/kernel/irqflags.S
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm-generic/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+ pushf
+ pop %_ASM_AX
+ ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+ push %_ASM_ARG1
+ popf
+ ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 0f8a6bbaaa443c..0bf17576dd2af6 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -168,6 +168,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = &params->efi_info;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
if (!current_ei->efi_memmap_size)
return 0;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 44256a62702b2c..f49bc1be626257 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -49,6 +49,7 @@
#include <asm/apicdef.h>
#include <asm/apic.h>
#include <asm/nmi.h>
+#include <asm/switch_to.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
{
@@ -165,13 +166,12 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[GDB_DX] = 0;
gdb_regs[GDB_SI] = 0;
gdb_regs[GDB_DI] = 0;
- gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
+ gdb_regs[GDB_BP] = ((struct inactive_task_frame *)p->thread.sp)->bp;
#ifdef CONFIG_X86_32
gdb_regs[GDB_DS] = __KERNEL_DS;
gdb_regs[GDB_ES] = __KERNEL_DS;
gdb_regs[GDB_PS] = 0;
gdb_regs[GDB_CS] = __KERNEL_CS;
- gdb_regs[GDB_PC] = p->thread.ip;
gdb_regs[GDB_SS] = __KERNEL_DS;
gdb_regs[GDB_FS] = 0xFFFF;
gdb_regs[GDB_GS] = 0xFFFF;
@@ -179,7 +179,6 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
gdb_regs32[GDB_CS] = __KERNEL_CS;
gdb_regs32[GDB_SS] = __KERNEL_DS;
- gdb_regs[GDB_PC] = 0;
gdb_regs[GDB_R8] = 0;
gdb_regs[GDB_R9] = 0;
gdb_regs[GDB_R10] = 0;
@@ -189,6 +188,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[GDB_R14] = 0;
gdb_regs[GDB_R15] = 0;
#endif
+ gdb_regs[GDB_PC] = 0;
gdb_regs[GDB_SP] = p->thread.sp;
}
@@ -750,9 +750,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
-#ifdef CONFIG_DEBUG_RODATA
char opc[BREAK_INSTR_SIZE];
-#endif /* CONFIG_DEBUG_RODATA */
bpt->type = BP_BREAKPOINT;
err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
@@ -761,7 +759,6 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
return err;
err = probe_kernel_write((char *)bpt->bpt_addr,
arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
-#ifdef CONFIG_DEBUG_RODATA
if (!err)
return err;
/*
@@ -778,13 +775,12 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
return -EINVAL;
bpt->type = BP_POKE_BREAKPOINT;
-#endif /* CONFIG_DEBUG_RODATA */
+
return err;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
-#ifdef CONFIG_DEBUG_RODATA
int err;
char opc[BREAK_INSTR_SIZE];
@@ -801,8 +797,8 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
goto knl_write;
return err;
+
knl_write:
-#endif /* CONFIG_DEBUG_RODATA */
return probe_kernel_write((char *)bpt->bpt_addr,
(char *)bpt->saved_instr, BREAK_INSTR_SIZE);
}
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index df9be5b912707c..c6f466d6cc5772 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -393,7 +393,6 @@ int __copy_instruction(u8 *dest, u8 *src)
newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
if ((s64) (s32) newdisp != newdisp) {
pr_err("Kprobes error: new displacement does not fit into s32 (%llx)\n", newdisp);
- pr_err("\tSrc: %p, Dest: %p, old disp: %x\n", src, dest, insn.displacement.value);
return 0;
}
disp = (u8 *) dest + insn_offset_displacement(&insn);
@@ -411,25 +410,38 @@ void free_insn_page(void *page)
module_memfree(page);
}
+/* Prepare reljump right after instruction to boost */
+static void prepare_boost(struct kprobe *p, int length)
+{
+ if (can_boost(p->ainsn.insn, p->addr) &&
+ MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) {
+ /*
+ * These instructions can be executed directly if it
+ * jumps back to correct address.
+ */
+ synthesize_reljump(p->ainsn.insn + length, p->addr + length);
+ p->ainsn.boostable = 1;
+ } else {
+ p->ainsn.boostable = -1;
+ }
+}
+
static int arch_copy_kprobe(struct kprobe *p)
{
- int ret;
+ int len;
set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
/* Copy an instruction with recovering if other optprobe modifies it.*/
- ret = __copy_instruction(p->ainsn.insn, p->addr);
- if (!ret)
+ len = __copy_instruction(p->ainsn.insn, p->addr);
+ if (!len)
return -EINVAL;
/*
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
- if (can_boost(p->ainsn.insn, p->addr))
- p->ainsn.boostable = 0;
- else
- p->ainsn.boostable = -1;
+ prepare_boost(p, len);
set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
@@ -596,8 +608,7 @@ static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
* Raise a BUG or we'll continue in an endless reentering loop
* and eventually a stack overflow.
*/
- printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
- p->addr);
+ pr_err("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
default:
@@ -894,21 +905,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs,
break;
}
- if (p->ainsn.boostable == 0) {
- if ((regs->ip > copy_ip) &&
- (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
- /*
- * These instructions can be executed directly if it
- * jumps back to correct address.
- */
- synthesize_reljump((void *)regs->ip,
- (void *)orig_ip + (regs->ip - copy_ip));
- p->ainsn.boostable = 1;
- } else {
- p->ainsn.boostable = -1;
- }
- }
-
regs->ip += orig_ip - copy_ip;
no_change:
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
index c2bedaea11f7b9..4afc67f5facc49 100644
--- a/arch/x86/kernel/ksysfs.c
+++ b/arch/x86/kernel/ksysfs.c
@@ -184,7 +184,7 @@ out:
static struct kobj_attribute type_attr = __ATTR_RO(type);
-static struct bin_attribute data_attr = {
+static struct bin_attribute data_attr __ro_after_init = {
.attr = {
.name = "data",
.mode = S_IRUGO,
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index ec1b06dc82d284..ca7ff89cf97fb8 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -29,7 +29,7 @@
#include <asm/x86_init.h>
#include <asm/reboot.h>
-static int kvmclock = 1;
+static int kvmclock __ro_after_init = 1;
static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME;
static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK;
static cycle_t kvm_sched_clock_offset;
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index bc429365b72a96..8bc68cfc0d33fc 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -119,7 +119,7 @@ static void free_ldt_struct(struct ldt_struct *ldt)
* we do not have to muck with descriptors here, that is
* done in switch_mm() as needed.
*/
-int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
{
struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
@@ -160,7 +160,7 @@ out_unlock:
*
* 64bit: Don't touch the LDT register - we're already in the next thread.
*/
-void destroy_context(struct mm_struct *mm)
+void destroy_context_ldt(struct mm_struct *mm)
{
free_ldt_struct(mm->context.ldt);
mm->context.ldt = NULL;
diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c
index d1d35ccffed3ca..579f8f813ce00a 100644
--- a/arch/x86/kernel/livepatch.c
+++ b/arch/x86/kernel/livepatch.c
@@ -58,6 +58,7 @@ int klp_write_module_reloc(struct module *mod, unsigned long type,
val = (s32)value;
break;
case R_X86_64_PC32:
+ case R_X86_64_PLT32:
val = (u32)(value - loc);
break;
default:
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index b9c2687b0811ff..eb3bb9a4a967c1 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -41,7 +41,7 @@
#include <linux/gfp.h>
#include <linux/ratelimit.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
#include <asm/msr.h>
static struct class *msr_class;
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index f534a0e3af5358..7ca45d49584016 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -97,10 +97,12 @@ unsigned paravirt_patch_call(void *insnbuf,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (tgt_clobbers & ~site_clobbers)
- return len; /* target would clobber too much for this site */
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect CALL in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe8; /* call */
b->delta = delta;
@@ -115,8 +117,12 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
struct branch *b = insnbuf;
unsigned long delta = (unsigned long)target - (addr+5);
- if (len < 5)
+ if (len < 5) {
+#ifdef CONFIG_RETPOLINE
+ WARN_ONCE("Failing to patch indirect JMP in %ps\n", (void *)addr);
+#endif
return len; /* call too long for patch site */
+ }
b->opcode = 0xe9; /* jmp */
b->delta = delta;
@@ -417,7 +423,7 @@ struct pv_apic_ops pv_apic_ops = {
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
#endif
-struct pv_mmu_ops pv_mmu_ops = {
+struct pv_mmu_ops pv_mmu_ops __ro_after_init = {
.read_cr2 = native_read_cr2,
.write_cr2 = native_write_cr2,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index c1b21d61b769ca..6970f8ac0baa69 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -31,6 +31,8 @@
#include <asm/tlbflush.h>
#include <asm/mce.h>
#include <asm/vm86.h>
+#include <asm/switch_to.h>
+#include <asm/spec-ctrl.h>
/*
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -117,11 +119,6 @@ void flush_thread(void)
fpu__clear(&tsk->thread.fpu);
}
-static void hard_disable_TSC(void)
-{
- cr4_set_bits(X86_CR4_TSD);
-}
-
void disable_TSC(void)
{
preempt_disable();
@@ -130,15 +127,10 @@ void disable_TSC(void)
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
- hard_disable_TSC();
+ cr4_set_bits(X86_CR4_TSD);
preempt_enable();
}
-static void hard_enable_TSC(void)
-{
- cr4_clear_bits(X86_CR4_TSD);
-}
-
static void enable_TSC(void)
{
preempt_disable();
@@ -147,7 +139,7 @@ static void enable_TSC(void)
* Must flip the CPU state synchronously with
* TIF_NOTSC in the current running context.
*/
- hard_enable_TSC();
+ cr4_clear_bits(X86_CR4_TSD);
preempt_enable();
}
@@ -175,48 +167,199 @@ int set_tsc_mode(unsigned int val)
return 0;
}
-void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
- struct tss_struct *tss)
+static inline void switch_to_bitmap(struct tss_struct *tss,
+ struct thread_struct *prev,
+ struct thread_struct *next,
+ unsigned long tifp, unsigned long tifn)
{
- struct thread_struct *prev, *next;
-
- prev = &prev_p->thread;
- next = &next_p->thread;
-
- if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
- test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
- unsigned long debugctl = get_debugctlmsr();
-
- debugctl &= ~DEBUGCTLMSR_BTF;
- if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
- debugctl |= DEBUGCTLMSR_BTF;
-
- update_debugctlmsr(debugctl);
- }
-
- if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
- test_tsk_thread_flag(next_p, TIF_NOTSC)) {
- /* prev and next are different */
- if (test_tsk_thread_flag(next_p, TIF_NOTSC))
- hard_disable_TSC();
- else
- hard_enable_TSC();
- }
-
- if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ if (tifn & _TIF_IO_BITMAP) {
/*
* Copy the relevant range of the IO bitmap.
* Normally this is 128 bytes or less:
*/
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
max(prev->io_bitmap_max, next->io_bitmap_max));
- } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+ } else if (tifp & _TIF_IO_BITMAP) {
/*
* Clear any possible leftover bits:
*/
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
}
+}
+
+#ifdef CONFIG_SMP
+
+struct ssb_state {
+ struct ssb_state *shared_state;
+ raw_spinlock_t lock;
+ unsigned int disable_state;
+ unsigned long local_state;
+};
+
+#define LSTATE_SSB 0
+
+static DEFINE_PER_CPU(struct ssb_state, ssb_state);
+
+void speculative_store_bypass_ht_init(void)
+{
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
+
+ st->local_state = 0;
+
+ /*
+ * Shared state setup happens once on the first bringup
+ * of the CPU. It's not destroyed on CPU hotunplug.
+ */
+ if (st->shared_state)
+ return;
+
+ raw_spin_lock_init(&st->lock);
+
+ /*
+ * Go over HT siblings and check whether one of them has set up the
+ * shared state pointer already.
+ */
+ for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
+ if (cpu == this_cpu)
+ continue;
+
+ if (!per_cpu(ssb_state, cpu).shared_state)
+ continue;
+
+ /* Link it to the state of the sibling: */
+ st->shared_state = per_cpu(ssb_state, cpu).shared_state;
+ return;
+ }
+
+ /*
+ * First HT sibling to come up on the core. Link shared state of
+ * the first HT sibling to itself. The siblings on the same core
+ * which come up later will see the shared state pointer and link
+ * themself to the state of this CPU.
+ */
+ st->shared_state = st;
+}
+
+/*
+ * Logic is: First HT sibling enables SSBD for both siblings in the core
+ * and last sibling to disable it, disables it for the whole core. This how
+ * MSR_SPEC_CTRL works in "hardware":
+ *
+ * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
+ */
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+ struct ssb_state *st = this_cpu_ptr(&ssb_state);
+ u64 msr = x86_amd_ls_cfg_base;
+
+ if (!static_cpu_has(X86_FEATURE_ZEN)) {
+ msr |= ssbd_tif_to_amd_ls_cfg(tifn);
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ return;
+ }
+
+ if (tifn & _TIF_SSBD) {
+ /*
+ * Since this can race with prctl(), block reentry on the
+ * same CPU.
+ */
+ if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
+ return;
+
+ msr |= x86_amd_ls_cfg_ssbd_mask;
+
+ raw_spin_lock(&st->shared_state->lock);
+ /* First sibling enables SSBD: */
+ if (!st->shared_state->disable_state)
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ st->shared_state->disable_state++;
+ raw_spin_unlock(&st->shared_state->lock);
+ } else {
+ if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
+ return;
+
+ raw_spin_lock(&st->shared_state->lock);
+ st->shared_state->disable_state--;
+ if (!st->shared_state->disable_state)
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+ raw_spin_unlock(&st->shared_state->lock);
+ }
+}
+#else
+static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
+{
+ u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+
+ wrmsrl(MSR_AMD64_LS_CFG, msr);
+}
+#endif
+
+static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
+{
+ /*
+ * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
+ * so ssbd_tif_to_spec_ctrl() just works.
+ */
+ wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
+}
+
+static __always_inline void intel_set_ssb_state(unsigned long tifn)
+{
+ u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+
+ wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+}
+
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+ if (static_cpu_has(X86_FEATURE_VIRT_SSBD))
+ amd_set_ssb_virt_state(tifn);
+ else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ amd_set_core_ssb_state(tifn);
+ else
+ intel_set_ssb_state(tifn);
+}
+
+void speculative_store_bypass_update(unsigned long tif)
+{
+ preempt_disable();
+ __speculative_store_bypass_update(tif);
+ preempt_enable();
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+ struct tss_struct *tss)
+{
+ struct thread_struct *prev, *next;
+ unsigned long tifp, tifn;
+
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+ tifn = READ_ONCE(task_thread_info(next_p)->flags);
+ tifp = READ_ONCE(task_thread_info(prev_p)->flags);
+ switch_to_bitmap(tss, prev, next, tifp, tifn);
+
propagate_user_return_notify(prev_p, next_p);
+
+ if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
+ arch_has_block_step()) {
+ unsigned long debugctl, msk;
+
+ rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ debugctl &= ~DEBUGCTLMSR_BTF;
+ msk = tifn & _TIF_BLOCKSTEP;
+ debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
+ wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
+ }
+
+ if ((tifp ^ tifn) & _TIF_NOTSC)
+ cr4_toggle_bits(X86_CR4_TSD);
+
+ if ((tifp ^ tifn) & _TIF_SSBD)
+ __speculative_store_bypass_update(tifn);
}
/*
@@ -521,9 +664,7 @@ unsigned long get_wchan(struct task_struct *p)
* PADDING
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
* stack
- * ----------- bottom = start + sizeof(thread_info)
- * thread_info
- * ----------- start
+ * ----------- bottom = start
*
* The tasks stack pointer points at the location where the
* framepointer is stored. The data on the stack is:
@@ -534,13 +675,13 @@ unsigned long get_wchan(struct task_struct *p)
*/
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
top -= 2 * sizeof(unsigned long);
- bottom = start + sizeof(struct thread_info);
+ bottom = start;
sp = READ_ONCE(p->thread.sp);
if (sp < bottom || sp > top)
return 0;
- fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
+ fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
do {
if (fp < bottom || fp > top)
return 0;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 9f950917528b33..81a82f52db5ae9 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -133,17 +133,20 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
unsigned long arg, struct task_struct *p, unsigned long tls)
{
struct pt_regs *childregs = task_pt_regs(p);
+ struct fork_frame *fork_frame = container_of(childregs, struct fork_frame, regs);
+ struct inactive_task_frame *frame = &fork_frame->frame;
struct task_struct *tsk;
int err;
- p->thread.sp = (unsigned long) childregs;
+ frame->bp = 0;
+ p->thread.sp = (unsigned long) fork_frame;
p->thread.sp0 = (unsigned long) (childregs+1);
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */
memset(childregs, 0, sizeof(struct pt_regs));
- p->thread.ip = (unsigned long) ret_from_kernel_thread;
+ frame->ret_addr = (unsigned long) ret_from_kernel_thread;
task_user_gs(p) = __KERNEL_STACK_CANARY;
childregs->ds = __USER_DS;
childregs->es = __USER_DS;
@@ -161,7 +164,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
if (sp)
childregs->sp = sp;
- p->thread.ip = (unsigned long) ret_from_fork;
+ frame->ret_addr = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(current_pt_regs());
p->thread.io_bitmap_ptr = NULL;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 4cbb60fbff3e36..9d04cc66911fb0 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -158,12 +158,17 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
{
int err;
struct pt_regs *childregs;
+ struct fork_frame *fork_frame;
+ struct inactive_task_frame *frame;
struct task_struct *me = current;
p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
childregs = task_pt_regs(p);
- p->thread.sp = (unsigned long) childregs;
- set_tsk_thread_flag(p, TIF_FORK);
+ fork_frame = container_of(childregs, struct fork_frame, regs);
+ frame = &fork_frame->frame;
+ frame->bp = 0;
+ frame->ret_addr = (unsigned long) ret_from_fork;
+ p->thread.sp = (unsigned long) fork_frame;
p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);
@@ -208,7 +213,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
*/
if (clone_flags & CLONE_SETTLS) {
#ifdef CONFIG_IA32_EMULATION
- if (is_ia32_task())
+ if (in_ia32_syscall())
err = do_set_thread_area(p, -1,
(struct user_desc __user *)tls, 0);
else
@@ -250,6 +255,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
start_thread_common(regs, new_ip, new_sp,
__USER_CS, __USER_DS, 0);
}
+EXPORT_SYMBOL_GPL(start_thread);
#ifdef CONFIG_COMPAT
void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
@@ -490,7 +496,7 @@ void set_personality_ia32(bool x32)
current->personality &= ~READ_IMPLIES_EXEC;
/* is_compat_task() uses the presence of the x32
syscall bit flag to determine compat status */
- current_thread_info()->status &= ~TS_COMPAT;
+ current->thread.status &= ~TS_COMPAT;
} else {
set_thread_flag(TIF_IA32);
clear_thread_flag(TIF_X32);
@@ -498,7 +504,7 @@ void set_personality_ia32(bool x32)
current->mm->context.ia32_compat = TIF_IA32;
current->personality |= force_personality32;
/* Prepare the first "return" to user space */
- current_thread_info()->status |= TS_COMPAT;
+ current->thread.status |= TS_COMPAT;
}
}
EXPORT_SYMBOL_GPL(set_personality_ia32);
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 479a409ddac8ea..fbd84e6c1143c9 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -970,15 +970,18 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 value)
case offsetof(struct user32, regs.orig_eax):
/*
- * A 32-bit debugger setting orig_eax means to restore
- * the state of the task restarting a 32-bit syscall.
- * Make sure we interpret the -ERESTART* codes correctly
- * in case the task is not actually still sitting at the
- * exit from a 32-bit syscall with TS_COMPAT still set.
+ * Warning: bizarre corner case fixup here. A 32-bit
+ * debugger setting orig_eax to -1 wants to disable
+ * syscall restart. Make sure that the syscall
+ * restart code sign-extends orig_ax. Also make sure
+ * we interpret the -ERESTART* codes correctly if
+ * loaded into regs->ax in case the task is not
+ * actually still sitting at the exit from a 32-bit
+ * syscall with TS_COMPAT still set.
*/
regs->orig_ax = value;
if (syscall_get_nr(child, regs) >= 0)
- task_thread_info(child)->status |= TS_COMPAT;
+ child->thread.status |= TS_I386_REGS_POKED;
break;
case offsetof(struct user32, regs.eflags):
@@ -1281,7 +1284,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
compat_ulong_t caddr, compat_ulong_t cdata)
{
#ifdef CONFIG_X86_X32_ABI
- if (!is_ia32_task())
+ if (!in_ia32_syscall())
return x32_arch_ptrace(child, request, caddr, cdata);
#endif
#ifdef CONFIG_IA32_EMULATION
@@ -1294,7 +1297,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
#ifdef CONFIG_X86_64
-static struct user_regset x86_64_regsets[] __read_mostly = {
+static struct user_regset x86_64_regsets[] __ro_after_init = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct) / sizeof(long),
@@ -1335,7 +1338,7 @@ static const struct user_regset_view user_x86_64_view = {
#endif /* CONFIG_X86_64 */
#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-static struct user_regset x86_32_regsets[] __read_mostly = {
+static struct user_regset x86_32_regsets[] __ro_after_init = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = sizeof(struct user_regs_struct32) / sizeof(u32),
@@ -1388,7 +1391,7 @@ static const struct user_regset_view user_x86_32_view = {
*/
u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
{
#ifdef CONFIG_X86_64
x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index b6b437a15cbfcb..e4df1bd33fa9a5 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -680,7 +680,7 @@ static void native_machine_power_off(void)
tboot_shutdown(TB_SHUTDOWN_HALT);
}
-struct machine_ops machine_ops = {
+struct machine_ops machine_ops __ro_after_init = {
.power_off = native_machine_power_off,
.shutdown = native_machine_shutdown,
.emergency_restart = native_machine_emergency_restart,
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bbaae4cf9e8edd..3df65ec4a1189e 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -209,9 +209,9 @@ EXPORT_SYMBOL(boot_cpu_data);
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-__visible unsigned long mmu_cr4_features;
+__visible unsigned long mmu_cr4_features __ro_after_init;
#else
-__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
+__visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
#endif
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
@@ -851,6 +851,12 @@ void __init setup_arch(char **cmdline_p)
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
+ /*
+ * Make sure page 0 is always reserved because on systems with
+ * L1TF its contents can be leaked to user processes.
+ */
+ memblock_reserve(0, PAGE_SIZE);
+
early_reserve_initrd();
/*
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e4fcb87ba7a61b..8ce03678166e5c 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -33,7 +33,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_number);
DEFINE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+unsigned long __per_cpu_offset[NR_CPUS] __ro_after_init = {
[0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
};
EXPORT_SYMBOL(__per_cpu_offset);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index cb6282c3638ffb..d27e3620b0e66d 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -692,12 +692,37 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
{
-#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+ /*
+ * This function is fundamentally broken as currently
+ * implemented.
+ *
+ * The idea is that we want to trigger a call to the
+ * restart_block() syscall and that we want in_ia32_syscall(),
+ * in_x32_syscall(), etc. to match whatever they were in the
+ * syscall being restarted. We assume that the syscall
+ * instruction at (regs->ip - 2) matches whatever syscall
+ * instruction we used to enter in the first place.
+ *
+ * The problem is that we can get here when ptrace pokes
+ * syscall-like values into regs even if we're not in a syscall
+ * at all.
+ *
+ * For now, we maintain historical behavior and guess based on
+ * stored state. We could do better by saving the actual
+ * syscall arch in restart_block or (with caveats on x32) by
+ * checking if regs->ip points to 'int $0x80'. The current
+ * behavior is incorrect if a tracer has a different bitness
+ * than the tracee.
+ */
+#ifdef CONFIG_IA32_EMULATION
+ if (current->thread.status & (TS_COMPAT|TS_I386_REGS_POKED))
+ return __NR_ia32_restart_syscall;
+#endif
+#ifdef CONFIG_X86_X32_ABI
+ return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#else
return __NR_restart_syscall;
-#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
- return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
- __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
-#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+#endif
}
/*
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 48ca93242bfdd5..5497bfced85141 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -75,6 +75,7 @@
#include <asm/i8259.h>
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/spec-ctrl.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -217,6 +218,8 @@ static void notrace start_secondary(void *unused)
*/
check_tsc_sync_target();
+ speculative_store_bypass_ht_init();
+
/*
* Lock vector_lock and initialize the vectors on this cpu
* before setting the cpu online. We must set it online with
@@ -295,7 +298,7 @@ do { \
static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
{
- if (cpu_has_topoext) {
+ if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id &&
@@ -808,7 +811,6 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
per_cpu(cpu_current_top_of_stack, cpu) =
(unsigned long)task_stack_page(idle) + THREAD_SIZE;
#else
- clear_tsk_thread_flag(idle, TIF_FORK);
initial_gs = per_cpu_offset(cpu);
#endif
}
@@ -1165,7 +1167,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
cpumask_copy(cpu_callin_mask, cpumask_of(0));
mb();
- current_thread_info()->cpu = 0; /* needed? */
for_each_possible_cpu(i) {
zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
@@ -1209,6 +1210,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
set_mtrr_aps_delayed_init();
smp_quirk_init_udelay();
+
+ speculative_store_bypass_ht_init();
}
void arch_enable_nonboot_cpus_begin(void)
diff --git a/arch/x86/kernel/test_nx.c b/arch/x86/kernel/test_nx.c
index 3f92ce07e525fd..27538f183c3b15 100644
--- a/arch/x86/kernel/test_nx.c
+++ b/arch/x86/kernel/test_nx.c
@@ -142,7 +142,6 @@ static int test_NX(void)
* by the error message
*/
-#ifdef CONFIG_DEBUG_RODATA
/* Test 3: Check if the .rodata section is executable */
if (rodata_test_data != 0xC3) {
printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
@@ -151,7 +150,6 @@ static int test_NX(void)
printk(KERN_ERR "test_nx: .rodata section is executable\n");
ret = -ENODEV;
}
-#endif
#if 0
/* Test 4: Check if the .data section of a module is executable */
diff --git a/arch/x86/kernel/test_rodata.c b/arch/x86/kernel/test_rodata.c
index 5ecbfe5099dad6..cb4a01b41e2778 100644
--- a/arch/x86/kernel/test_rodata.c
+++ b/arch/x86/kernel/test_rodata.c
@@ -76,5 +76,5 @@ int rodata_test(void)
}
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure");
+MODULE_DESCRIPTION("Testcase for marking rodata as read-only");
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index d39c09119db6d2..590c8fd2ed9b14 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -23,7 +23,7 @@
#include <asm/time.h>
#ifdef CONFIG_X86_64
-__visible volatile unsigned long jiffies __cacheline_aligned = INITIAL_JIFFIES;
+__visible volatile unsigned long jiffies __cacheline_aligned_in_smp = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 37753b75f66f09..a590f631dafff2 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -742,7 +742,6 @@ dotraplinkage void
do_device_not_available(struct pt_regs *regs, long error_code)
{
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
- BUG_ON(use_eager_fpu());
#ifdef CONFIG_MATH_EMULATION
if (read_cr0() & X86_CR0_EM) {
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 6aa0f4d9eea681..0e37e369b3a0cd 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -21,6 +21,7 @@
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/param.h>
+#include <asm/tsc.h>
/* CPU reference clock frequency: in KHz */
#define FREQ_83 83200
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index c6aace2bbe083e..95a362be51a394 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -290,7 +290,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
/* has the side-effect of processing the entire instruction */
insn_get_length(insn);
- if (WARN_ON_ONCE(!insn_complete(insn)))
+ if (!insn_complete(insn))
return -ENOEXEC;
if (is_prefix_bad(insn))
@@ -516,7 +516,7 @@ struct uprobe_xol_ops {
static inline int sizeof_long(void)
{
- return is_ia32_task() ? 4 : 8;
+ return in_ia32_syscall() ? 4 : 8;
}
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 4cf401f581e7e7..b7c9db5deebe54 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -30,7 +30,7 @@
* appropriately. Either display a message or halt.
*/
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/msr-index.h>
verify_cpu:
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
index af57736a03096e..7f4839ef3608b4 100644
--- a/arch/x86/kernel/vm86_32.c
+++ b/arch/x86/kernel/vm86_32.c
@@ -357,8 +357,10 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
tss = &per_cpu(cpu_tss, get_cpu());
/* make room for real-mode segments */
tsk->thread.sp0 += 16;
- if (cpu_has_sep)
+
+ if (static_cpu_has(X86_FEATURE_SEP))
tsk->thread.sysenter_cs = 0;
+
load_sp0(tss, &tsk->thread);
put_cpu();
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index a7871d1d36d558..3611136b1455cc 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -41,29 +41,28 @@ ENTRY(phys_startup_64)
jiffies_64 = jiffies;
#endif
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
/*
- * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA
- * we retain large page mappings for boundaries spanning kernel text, rodata
- * and data sections.
+ * On 64-bit, align RODATA to 2MB so we retain large page mappings for
+ * boundaries spanning kernel text, rodata and data sections.
*
* However, kernel identity mappings will have different RWX permissions
* to the pages mapping to text and to the pages padding (which are freed) the
* text section. Hence kernel identity mappings will be broken to smaller
* pages. For 64-bit, kernel text and kernel identity mappings are different,
- * so we can enable protection checks that come with CONFIG_DEBUG_RODATA,
- * as well as retain 2MB large page mappings for kernel text.
+ * so we can enable protection checks as well as retain 2MB large page
+ * mappings for kernel text.
*/
-#define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
+#define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
-#define X64_ALIGN_DEBUG_RODATA_END \
+#define X64_ALIGN_RODATA_END \
. = ALIGN(HPAGE_SIZE); \
__end_rodata_hpage_align = .;
#else
-#define X64_ALIGN_DEBUG_RODATA_BEGIN
-#define X64_ALIGN_DEBUG_RODATA_END
+#define X64_ALIGN_RODATA_BEGIN
+#define X64_ALIGN_RODATA_END
#endif
@@ -120,13 +119,11 @@ SECTIONS
EXCEPTION_TABLE(16) :text = 0x9090
-#if defined(CONFIG_DEBUG_RODATA)
/* .text should occupy whole number of pages */
. = ALIGN(PAGE_SIZE);
-#endif
- X64_ALIGN_DEBUG_RODATA_BEGIN
+ X64_ALIGN_RODATA_BEGIN
RO_DATA(PAGE_SIZE)
- X64_ALIGN_DEBUG_RODATA_END
+ X64_ALIGN_RODATA_END
/* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) {
@@ -203,6 +200,17 @@ SECTIONS
:init
#endif
+ /*
+ * Section for code used exclusively before alternatives are run. All
+ * references to such code must be patched out by alternatives, normally
+ * by using X86_FEATURE_ALWAYS CPU feature bit.
+ *
+ * See static_cpu_has() for an example.
+ */
+ .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
+ *(.altinstr_aux)
+ }
+
INIT_DATA_SECTION(16)
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index f5f549853b1211..6bff58acf9f215 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -91,7 +91,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { };
-struct x86_platform_ops x86_platform = {
+struct x86_platform_ops x86_platform __ro_after_init = {
.calibrate_tsc = native_calibrate_tsc,
.get_wallclock = mach_get_cmos_time,
.set_wallclock = mach_set_rtc_mmss,
@@ -106,7 +106,7 @@ struct x86_platform_ops x86_platform = {
EXPORT_SYMBOL_GPL(x86_platform);
#if defined(CONFIG_PCI_MSI)
-struct x86_msi_ops x86_msi = {
+struct x86_msi_ops x86_msi __ro_after_init = {
.setup_msi_irqs = native_setup_msi_irqs,
.teardown_msi_irq = native_teardown_msi_irq,
.teardown_msi_irqs = default_teardown_msi_irqs,
@@ -135,7 +135,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
}
#endif
-struct x86_io_apic_ops x86_io_apic_ops = {
+struct x86_io_apic_ops x86_io_apic_ops __ro_after_init = {
.read = native_io_apic_read,
.disable = native_disable_io_apic,
};
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 83d6369c45f59f..b857bb9f6f236f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -104,9 +103,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu();
- if (vcpu->arch.eager_fpu)
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
@@ -344,6 +341,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM);
+ /* cpuid 0x80000008.ebx */
+ const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+ F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
+
/* cpuid 0xC0000001.edx */
const u32 kvm_supported_word5_x86_features =
F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
@@ -361,6 +362,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
const u32 kvm_supported_word10_x86_features =
F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
+ /* cpuid 7.0.edx*/
+ const u32 kvm_cpuid_7_0_edx_x86_features =
+ F(SPEC_CTRL) | F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
+
/* all calls to cpuid_count() should be made on the same cpu */
get_cpu();
@@ -438,11 +443,14 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
cpuid_mask(&entry->ebx, 9);
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
- } else
+ entry->edx &= kvm_cpuid_7_0_edx_x86_features;
+ cpuid_mask(&entry->edx, CPUID_7_EDX);
+ } else {
entry->ebx = 0;
+ entry->edx = 0;
+ }
entry->eax = 0;
entry->ecx = 0;
- entry->edx = 0;
break;
}
case 9:
@@ -586,7 +594,21 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
if (!g_phys_as)
g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8);
- entry->ebx = entry->edx = 0;
+ entry->edx = 0;
+ /*
+ * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
+ * hardware cpuid
+ */
+ if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
+ entry->ebx |= F(AMD_IBPB);
+ if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
+ entry->ebx |= F(AMD_IBRS);
+ if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
+ entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
+ cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
+ entry->ebx |= F(VIRT_SSBD);
break;
}
case 0x80000019:
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index d1534feefcfeb4..72f159f4d45639 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -159,6 +159,46 @@ static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
return best && (best->edx & bit(X86_FEATURE_RDTSCP));
}
+static inline bool guest_cpuid_has_ibpb(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBPB)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_SPEC_CTRL));
+}
+
+static inline bool guest_cpuid_has_spec_ctrl(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ if (best && (best->ebx & bit(X86_FEATURE_AMD_IBRS)))
+ return true;
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & (bit(X86_FEATURE_SPEC_CTRL) | bit(X86_FEATURE_SPEC_CTRL_SSBD)));
+}
+
+static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
+}
+
+static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
+ return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
+}
+
+
+
/*
* NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
*/
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 7ee65b02ccb183..b10a055c01ca0a 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -792,6 +792,19 @@ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
+static int linear_read_system(struct x86_emulate_ctxt *ctxt, ulong linear,
+ void *data, unsigned size)
+{
+ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, true);
+}
+
+static int linear_write_system(struct x86_emulate_ctxt *ctxt,
+ ulong linear, void *data,
+ unsigned int size)
+{
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, true);
+}
+
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
@@ -803,7 +816,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
+ return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception, false);
}
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
@@ -817,7 +830,7 @@ static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
- return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception, false);
}
/*
@@ -1490,8 +1503,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
- return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
- &ctxt->exception);
+ return linear_read_system(ctxt, addr, desc, sizeof *desc);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
@@ -1554,8 +1566,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE)
return rc;
- return ctxt->ops->read_std(ctxt, *desc_addr_p, desc, sizeof(*desc),
- &ctxt->exception);
+ return linear_read_system(ctxt, *desc_addr_p, desc, sizeof(*desc));
}
/* allowed just for 8 bytes segments */
@@ -1569,8 +1580,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
if (rc != X86EMUL_CONTINUE)
return rc;
- return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
- &ctxt->exception);
+ return linear_write_system(ctxt, addr, desc, sizeof *desc);
}
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
@@ -1731,8 +1741,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return ret;
}
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
- ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
- sizeof(base3), &ctxt->exception);
+ ret = linear_read_system(ctxt, desc_addr+8, &base3, sizeof(base3));
if (ret != X86EMUL_CONTINUE)
return ret;
if (is_noncanonical_address(get_desc_base(&seg_desc) |
@@ -2045,11 +2054,11 @@ static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
- rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
+ rc = linear_read_system(ctxt, cs_addr, &cs, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
+ rc = linear_read_system(ctxt, eip_addr, &eip, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
@@ -2893,12 +2902,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
- r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
+ r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL, true);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
- r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
+ r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL, true);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
@@ -3027,35 +3036,30 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
- const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
- &ctxt->exception);
+ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss16(ctxt, &tss_seg);
- ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
- &ctxt->exception);
+ ret = linear_write_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
- &ctxt->exception);
+ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
- ret = ops->write_std(ctxt, new_tss_base,
- &tss_seg.prev_task_link,
- sizeof tss_seg.prev_task_link,
- &ctxt->exception);
+ ret = linear_write_system(ctxt, new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link);
if (ret != X86EMUL_CONTINUE)
return ret;
}
@@ -3171,38 +3175,34 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
- const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
- ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
- &ctxt->exception);
+ ret = linear_read_system(ctxt, old_tss_base, &tss_seg, sizeof tss_seg);
if (ret != X86EMUL_CONTINUE)
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
- ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
- ldt_sel_offset - eip_offset, &ctxt->exception);
+ ret = linear_write_system(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
+ ldt_sel_offset - eip_offset);
if (ret != X86EMUL_CONTINUE)
return ret;
- ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
- &ctxt->exception);
+ ret = linear_read_system(ctxt, new_tss_base, &tss_seg, sizeof tss_seg);
if (ret != X86EMUL_CONTINUE)
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
- ret = ops->write_std(ctxt, new_tss_base,
- &tss_seg.prev_task_link,
- sizeof tss_seg.prev_task_link,
- &ctxt->exception);
+ ret = linear_write_system(ctxt, new_tss_base,
+ &tss_seg.prev_task_link,
+ sizeof tss_seg.prev_task_link);
if (ret != X86EMUL_CONTINUE)
return ret;
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a1afd80a68aace..b46663a656af9a 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -56,7 +56,7 @@
#define APIC_BUS_CYCLE_NS 1
/* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
-#define apic_debug(fmt, arg...)
+#define apic_debug(fmt, arg...) do {} while (0)
#define APIC_LVT_NUM 6
/* 14 is the version for Xeon and Pentium 8.4.8*/
@@ -1189,14 +1189,40 @@ static void update_divide_count(struct kvm_lapic *apic)
apic->divide_count);
}
+static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
+{
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (apic->lapic_timer.period < min_period) {
+ pr_info_ratelimited(
+ "kvm: vcpu %i: requested %lld ns "
+ "lapic timer period limited to %lld ns\n",
+ apic->vcpu->vcpu_id,
+ apic->lapic_timer.period, min_period);
+ apic->lapic_timer.period = min_period;
+ }
+ }
+}
+
static void apic_update_lvtt(struct kvm_lapic *apic)
{
u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) &
apic->lapic_timer.timer_mode_mask;
if (apic->lapic_timer.timer_mode != timer_mode) {
+ if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
+ APIC_LVT_TIMER_TSCDEADLINE)) {
+ apic_set_reg(apic, APIC_TMICT, 0);
+ hrtimer_cancel(&apic->lapic_timer.timer);
+ }
apic->lapic_timer.timer_mode = timer_mode;
- hrtimer_cancel(&apic->lapic_timer.timer);
+ limit_periodic_timer_frequency(apic);
}
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 2b71f2c03b9eb1..a5b533aea958e1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4174,9 +4174,9 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
}
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
- const u8 *new, int *bytes)
+ int *bytes)
{
- u64 gentry;
+ u64 gentry = 0;
int r;
/*
@@ -4188,22 +4188,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
/* Handle a 32-bit guest writing two halves of a 64-bit gpte */
*gpa &= ~(gpa_t)7;
*bytes = 8;
- r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8);
- if (r)
- gentry = 0;
- new = (const u8 *)&gentry;
}
- switch (*bytes) {
- case 4:
- gentry = *(const u32 *)new;
- break;
- case 8:
- gentry = *(const u64 *)new;
- break;
- default:
- gentry = 0;
- break;
+ if (*bytes == 4 || *bytes == 8) {
+ r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes);
+ if (r)
+ gentry = 0;
}
return gentry;
@@ -4313,8 +4303,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
- gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
-
/*
* No need to care whether allocation memory is successful
* or not since pte prefetch is skiped if it does not have
@@ -4323,6 +4311,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mmu_topup_memory_caches(vcpu);
spin_lock(&vcpu->kvm->mmu_lock);
+
+ gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes);
+
++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 42654375b73f0f..757e5dc79231c2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -37,7 +37,8 @@
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
-#include <asm/nospec-branch.h>
+#include <asm/microcode.h>
+#include <asm/spec-ctrl.h>
#include <asm/virtext.h>
#include "trace.h"
@@ -147,6 +148,14 @@ struct vcpu_svm {
u64 gs_base;
} host;
+ u64 spec_ctrl;
+ /*
+ * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
+ * translated into the appropriate L2_CFG bits on the host to
+ * perform speculative control.
+ */
+ u64 virt_spec_ctrl;
+
u32 *msrpm;
ulong nmi_iret_rip;
@@ -182,6 +191,8 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_CSTAR, .always = true },
{ .index = MSR_SYSCALL_MASK, .always = true },
#endif
+ { .index = MSR_IA32_SPEC_CTRL, .always = false },
+ { .index = MSR_IA32_PRED_CMD, .always = false },
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
@@ -411,6 +422,7 @@ struct svm_cpu_data {
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
+ struct vmcb *current_vmcb;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
@@ -762,6 +774,25 @@ static bool valid_msr_intercept(u32 index)
return false;
}
+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
+{
+ u8 bit_write;
+ unsigned long tmp;
+ u32 offset;
+ u32 *msrpm;
+
+ msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
+ to_svm(vcpu)->msrpm;
+
+ offset = svm_msrpm_offset(msr);
+ bit_write = 2 * (msr & 0x0f) + 1;
+ tmp = msrpm[offset];
+
+ BUG_ON(offset == MSR_INVALID);
+
+ return !!test_bit(bit_write, &tmp);
+}
+
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
@@ -1120,6 +1151,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u32 dummy;
u32 eax = 1;
+ svm->spec_ctrl = 0;
+ svm->virt_spec_ctrl = 0;
+
if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
@@ -1210,11 +1244,17 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
+ /*
+ * The vmcb page can be recycled, causing a false negative in
+ * svm_vcpu_load(). So do a full IBPB now.
+ */
+ indirect_branch_prediction_barrier();
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
@@ -1239,6 +1279,10 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
}
}
+ if (sd->current_vmcb != svm->vmcb) {
+ sd->current_vmcb = svm->vmcb;
+ indirect_branch_prediction_barrier();
+ }
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
@@ -2344,6 +2388,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
+ /*
+ * Drop what we picked up for L2 via svm_complete_interrupts() so it
+ * doesn't end up in L1.
+ */
+ svm->vcpu.arch.nmi_injected = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
+
return 0;
}
@@ -3051,6 +3103,20 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_VM_CR:
msr_info->data = svm->nested.vm_cr_msr;
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = svm->spec_ctrl;
+ break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_virt_ssbd(vcpu))
+ return 1;
+
+ msr_info->data = svm->virt_spec_ctrl;
+ break;
case MSR_IA32_UCODE_REV:
msr_info->data = 0x01000065;
break;
@@ -3125,6 +3191,59 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
+ return 1;
+
+ svm->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_svm_vmrun_msrpm.
+ * We update the L1 MSR bit as well since it will end up
+ * touching the MSR anyway now.
+ */
+ set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+ if (is_guest_mode(vcpu))
+ break;
+ set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
+ break;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ if (!msr->host_initiated &&
+ !guest_cpuid_has_virt_ssbd(vcpu))
+ return 1;
+
+ if (data & ~SPEC_CTRL_SSBD)
+ return 1;
+
+ svm->virt_spec_ctrl = data;
+ break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
@@ -3811,6 +3930,14 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
local_irq_enable();
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
+
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
@@ -3915,6 +4042,26 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
#endif
#endif
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
+
reload_tss(vcpu);
local_irq_disable();
@@ -4015,8 +4162,15 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false;
}
-static bool svm_has_high_real_mode_segbase(void)
+static bool svm_has_emulated_msr(int index)
{
+ switch (index) {
+ case MSR_IA32_MCG_EXT_CTL:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
@@ -4290,7 +4444,7 @@ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}
-static struct kvm_x86_ops svm_x86_ops = {
+static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
@@ -4299,7 +4453,7 @@ static struct kvm_x86_ops svm_x86_ops = {
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
- .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
+ .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a750fc7c745830..8ab9437f6e0d16 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -48,7 +48,8 @@
#include <asm/kexec.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
-#include <asm/nospec-branch.h>
+#include <asm/microcode.h>
+#include <asm/spec-ctrl.h>
#include "trace.h"
#include "pmu.h"
@@ -109,6 +110,14 @@ static u64 __read_mostly host_xss;
static bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
+#define MSR_TYPE_R 1
+#define MSR_TYPE_W 2
+#define MSR_TYPE_RW 3
+
+#define MSR_BITMAP_MODE_X2APIC 1
+#define MSR_BITMAP_MODE_X2APIC_APICV 2
+#define MSR_BITMAP_MODE_LM 4
+
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
@@ -172,7 +181,6 @@ module_param(ple_window_max, int, S_IRUGO);
extern const ulong vmx_return;
#define NR_AUTOLOAD_MSRS 8
-#define VMCS02_POOL_SIZE 1
struct vmcs {
u32 revision_id;
@@ -189,6 +197,7 @@ struct loaded_vmcs {
struct vmcs *vmcs;
int cpu;
int launched;
+ unsigned long *msr_bitmap;
struct list_head loaded_vmcss_on_cpu_link;
};
@@ -205,7 +214,7 @@ struct shared_msr_entry {
* stored in guest memory specified by VMPTRLD, but is opaque to the guest,
* which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
* More than one of these structures may exist, if L1 runs multiple L2 guests.
- * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
+ * nested_vmx_run() will use the data here to build the vmcs02: a VMCS for the
* underlying hardware which will be used to run L2.
* This structure is packed to ensure that its layout is identical across
* machines (necessary for live migration).
@@ -384,13 +393,6 @@ struct __packed vmcs12 {
*/
#define VMCS12_SIZE 0x1000
-/* Used to remember the last vmcs02 used for some recently used vmcs12s */
-struct vmcs02_list {
- struct list_head list;
- gpa_t vmptr;
- struct loaded_vmcs vmcs02;
-};
-
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
@@ -412,16 +414,16 @@ struct nested_vmx {
*/
bool sync_shadow_vmcs;
- /* vmcs02_list cache of VMCSs recently used to run L2 guests */
- struct list_head vmcs02_pool;
- int vmcs02_num;
u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
+
+ struct loaded_vmcs vmcs02;
+
/*
- * Guest pages referred to in vmcs02 with host-physical pointers, so
- * we must keep them pinned while L2 runs.
+ * Guest pages referred to in the vmcs02 with host-physical
+ * pointers, so we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
@@ -429,7 +431,6 @@ struct nested_vmx {
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
- u64 msr_ia32_feature_control;
struct hrtimer preemption_timer;
bool preemption_timer_expired;
@@ -531,6 +532,7 @@ struct vcpu_vmx {
unsigned long host_rsp;
u8 fail;
bool nmi_known_unmasked;
+ u8 msr_bitmap_mode;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
@@ -542,6 +544,10 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
+
+ u64 arch_capabilities;
+ u64 spec_ctrl;
+
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
/*
@@ -606,6 +612,8 @@ struct vcpu_vmx {
struct page *pml_pg;
u64 current_tsc_ratio;
+
+ u64 msr_ia32_feature_control;
};
enum segment_cache_field {
@@ -889,6 +897,9 @@ static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static int alloc_identity_pagetable(struct kvm *kvm);
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -908,11 +919,6 @@ static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
-static unsigned long *vmx_msr_bitmap_legacy;
-static unsigned long *vmx_msr_bitmap_longmode;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
@@ -1689,6 +1695,52 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
+/*
+ * Check if MSR is intercepted for currently loaded MSR bitmap.
+ */
+static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
+/*
+ * Check if MSR is intercepted for L01 MSR bitmap.
+ */
+static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
+{
+ unsigned long *msr_bitmap;
+ int f = sizeof(unsigned long);
+
+ if (!cpu_has_vmx_msr_bitmap())
+ return true;
+
+ msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
+
+ if (msr <= 0x1fff) {
+ return !!test_bit(msr, msr_bitmap + 0x800 / f);
+ } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+ msr &= 0x1fff;
+ return !!test_bit(msr, msr_bitmap + 0xc00 / f);
+ }
+
+ return true;
+}
+
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
@@ -2074,6 +2126,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
+ indirect_branch_prediction_barrier();
}
if (vmx->loaded_vmcs->cpu != cpu) {
@@ -2353,27 +2406,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
vmx->guest_msrs[from] = tmp;
}
-static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
-{
- unsigned long *msr_bitmap;
-
- if (is_guest_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_nested;
- else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
- else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
- } else {
- if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode;
- else
- msr_bitmap = vmx_msr_bitmap_legacy;
- }
-
- vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
-}
-
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
@@ -2414,7 +2446,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx->save_nmsrs = save_nmsrs;
if (cpu_has_vmx_msr_bitmap())
- vmx_set_msr_bitmap(&vmx->vcpu);
+ vmx_update_msr_bitmap(&vmx->vcpu);
}
/*
@@ -2828,6 +2860,19 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
msr_info->data = guest_read_tsc(vcpu);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ msr_info->data = to_vmx(vcpu)->spec_ctrl;
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_arch_capabilities(vcpu))
+ return 1;
+ msr_info->data = to_vmx(vcpu)->arch_capabilities;
+ break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
@@ -2844,9 +2889,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
case MSR_IA32_FEATURE_CONTROL:
- if (!nested_vmx_allowed(vcpu))
- return 1;
- msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ msr_info->data = to_vmx(vcpu)->msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
@@ -2927,6 +2970,68 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
+ case MSR_IA32_SPEC_CTRL:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_spec_ctrl(vcpu))
+ return 1;
+
+ /* The STIBP bit doesn't fault even if it's not advertised */
+ if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
+ return 1;
+
+ vmx->spec_ctrl = data;
+
+ if (!data)
+ break;
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging. We update the vmcs01 here for L1 as well
+ * since it will end up touching the MSR anyway now.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_RW);
+ break;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has_ibpb(vcpu))
+ return 1;
+
+ if (data & ~PRED_CMD_IBPB)
+ return 1;
+
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+
+ /*
+ * For non-nested:
+ * When it's written (to non-zero) for the first time, pass
+ * it through.
+ *
+ * For nested:
+ * The handling of the MSR bitmap for L2 guests is done in
+ * nested_vmx_merge_msr_bitmap. We should not touch the
+ * vmcs02.msr_bitmap here since it gets completely overwritten
+ * in the merging.
+ */
+ vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+ break;
+ case MSR_IA32_ARCH_CAPABILITIES:
+ if (!msr_info->host_initiated)
+ return 1;
+ vmx->arch_capabilities = data;
+ break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
@@ -2942,10 +3047,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu) ||
- (to_vmx(vcpu)->nested.msr_ia32_feature_control &
+ (to_vmx(vcpu)->msr_ia32_feature_control &
FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
return 1;
- vmx->nested.msr_ia32_feature_control = data;
+ vmx->msr_ia32_feature_control = data;
if (msr_info->host_initiated && data == 0)
vmx_leave_nested(vcpu);
break;
@@ -3352,11 +3457,6 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
return vmcs;
}
-static struct vmcs *alloc_vmcs(void)
-{
- return alloc_vmcs_cpu(raw_smp_processor_id());
-}
-
static void free_vmcs(struct vmcs *vmcs)
{
free_pages((unsigned long)vmcs, vmcs_config.order);
@@ -3372,6 +3472,34 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ if (loaded_vmcs->msr_bitmap)
+ free_page((unsigned long)loaded_vmcs->msr_bitmap);
+}
+
+static struct vmcs *alloc_vmcs(void)
+{
+ return alloc_vmcs_cpu(raw_smp_processor_id());
+}
+
+static int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
+{
+ loaded_vmcs->vmcs = alloc_vmcs();
+ if (!loaded_vmcs->vmcs)
+ return -ENOMEM;
+
+ loaded_vmcs_init(loaded_vmcs);
+
+ if (cpu_has_vmx_msr_bitmap()) {
+ loaded_vmcs->msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
+ if (!loaded_vmcs->msr_bitmap)
+ goto out_vmcs;
+ memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE);
+ }
+ return 0;
+
+out_vmcs:
+ free_loaded_vmcs(loaded_vmcs);
+ return -ENOMEM;
}
static void free_kvm_area(void)
@@ -4370,10 +4498,8 @@ static void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock);
}
-#define MSR_TYPE_R 1
-#define MSR_TYPE_W 2
-static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);
@@ -4407,8 +4533,8 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
}
}
-static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
- u32 msr, int type)
+static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type)
{
int f = sizeof(unsigned long);
@@ -4488,37 +4614,78 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
}
}
-static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
+static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap,
+ u32 msr, int type, bool value)
{
- if (!longmode_only)
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
- msr, MSR_TYPE_R | MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
- msr, MSR_TYPE_R | MSR_TYPE_W);
+ if (value)
+ vmx_enable_intercept_for_msr(msr_bitmap, msr, type);
+ else
+ vmx_disable_intercept_for_msr(msr_bitmap, msr, type);
}
-static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
+static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
{
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ u8 mode = 0;
+
+ if (cpu_has_secondary_exec_ctrls() &&
+ (vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
+ mode |= MSR_BITMAP_MODE_X2APIC;
+ if (enable_apicv)
+ mode |= MSR_BITMAP_MODE_X2APIC_APICV;
+ }
+
+ if (is_long_mode(vcpu))
+ mode |= MSR_BITMAP_MODE_LM;
+
+ return mode;
}
-static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
+#define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
+
+static void vmx_update_msr_bitmap_x2apic(unsigned long *msr_bitmap,
+ u8 mode)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_R);
+ int msr;
+
+ for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
+ unsigned word = msr / BITS_PER_LONG;
+ msr_bitmap[word] = (mode & MSR_BITMAP_MODE_X2APIC_APICV) ? 0 : ~0;
+ msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
+ }
+
+ if (mode & MSR_BITMAP_MODE_X2APIC) {
+ /*
+ * TPR reads and writes can be virtualized even if virtual interrupt
+ * delivery is not in use.
+ */
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TASKPRI), MSR_TYPE_RW);
+ if (mode & MSR_BITMAP_MODE_X2APIC_APICV) {
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_ID), MSR_TYPE_R);
+ vmx_enable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_TMCCT), MSR_TYPE_R);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_EOI), MSR_TYPE_W);
+ vmx_disable_intercept_for_msr(msr_bitmap, X2APIC_MSR(APIC_SELF_IPI), MSR_TYPE_W);
+ }
+ }
}
-static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
+static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
{
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
- msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
- msr, MSR_TYPE_W);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap;
+ u8 mode = vmx_msr_bitmap_mode(vcpu);
+ u8 changed = mode ^ vmx->msr_bitmap_mode;
+
+ if (!changed)
+ return;
+
+ vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
+ !(mode & MSR_BITMAP_MODE_LM));
+
+ if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
+ vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
+
+ vmx->msr_bitmap_mode = mode;
}
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
@@ -4526,6 +4693,28 @@ static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
return enable_apicv && lapic_in_kernel(vcpu);
}
+static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ gfn_t gfn;
+
+ /*
+ * Don't need to mark the APIC access page dirty; it is never
+ * written to by the CPU during APIC virtualization.
+ */
+
+ if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
+ gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+
+ if (nested_cpu_has_posted_intr(vmcs12)) {
+ gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
+ kvm_vcpu_mark_page_dirty(vcpu, gfn);
+ }
+}
+
+
static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -4533,18 +4722,15 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
void *vapic_page;
u16 status;
- if (vmx->nested.pi_desc &&
- vmx->nested.pi_pending) {
- vmx->nested.pi_pending = false;
- if (!pi_test_and_clear_on(vmx->nested.pi_desc))
- return;
-
- max_irr = find_last_bit(
- (unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
+ return;
- if (max_irr == 256)
- return;
+ vmx->nested.pi_pending = false;
+ if (!pi_test_and_clear_on(vmx->nested.pi_desc))
+ return;
+ max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
+ if (max_irr != 256) {
vapic_page = kmap(vmx->nested.virtual_apic_page);
__kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
kunmap(vmx->nested.virtual_apic_page);
@@ -4556,6 +4742,8 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
+
+ nested_mark_vmcs12_pages_dirty(vcpu);
}
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4818,7 +5006,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
+ vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
@@ -4890,6 +5078,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
@@ -4918,6 +5108,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u64 cr0;
vmx->rmode.vm86_active = 0;
+ vmx->spec_ctrl = 0;
vmx->soft_vnmi_blocked = 0;
@@ -5382,6 +5573,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu)
static int handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
return 0;
}
@@ -5973,9 +6165,24 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
- skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
- return 1;
+ /*
+ * Doing kvm_skip_emulated_instruction() depends on undefined
+ * behavior: Intel's manual doesn't mandate
+ * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
+ * occurs and while on real hardware it was observed to be set,
+ * other hypervisors (namely Hyper-V) don't set it, we end up
+ * advancing IP with some random value. Disable fast mmio when
+ * running nested and keep it for real hardware in hope that
+ * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
+ */
+ if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) {
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ else
+ return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
+ NULL, 0) == EMULATE_DONE;
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
@@ -6159,7 +6366,7 @@ static void wakeup_handler(void)
static __init int hardware_setup(void)
{
- int r = -ENOMEM, i, msr;
+ int r = -ENOMEM, i;
rdmsrl_safe(MSR_EFER, &host_efer);
@@ -6174,38 +6381,13 @@ static __init int hardware_setup(void)
if (!vmx_io_bitmap_b)
goto out;
- vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy)
- goto out1;
-
- vmx_msr_bitmap_legacy_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
- goto out2;
-
- vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode)
- goto out3;
-
- vmx_msr_bitmap_longmode_x2apic =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
- goto out4;
-
- if (nested) {
- vmx_msr_bitmap_nested =
- (unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_nested)
- goto out5;
- }
-
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
- goto out6;
+ goto out1;
vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
- goto out7;
+ goto out2;
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -6214,14 +6396,9 @@ static __init int hardware_setup(void)
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
- memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- if (nested)
- memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
- goto out8;
+ goto out3;
}
if (boot_cpu_has(X86_FEATURE_NX))
@@ -6287,38 +6464,8 @@ static __init int hardware_setup(void)
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
@@ -6349,21 +6496,10 @@ static __init int hardware_setup(void)
return alloc_kvm_area();
-out8:
- free_page((unsigned long)vmx_vmwrite_bitmap);
-out7:
- free_page((unsigned long)vmx_vmread_bitmap);
-out6:
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
-out5:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
-out4:
- free_page((unsigned long)vmx_msr_bitmap_longmode);
out3:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+ free_page((unsigned long)vmx_vmwrite_bitmap);
out2:
- free_page((unsigned long)vmx_msr_bitmap_legacy);
+ free_page((unsigned long)vmx_vmread_bitmap);
out1:
free_page((unsigned long)vmx_io_bitmap_b);
out:
@@ -6374,16 +6510,10 @@ out:
static __exit void hardware_unsetup(void)
{
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy);
- free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
- if (nested)
- free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
@@ -6427,93 +6557,6 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
}
/*
- * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
- * We could reuse a single VMCS for all the L2 guests, but we also want the
- * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
- * allows keeping them loaded on the processor, and in the future will allow
- * optimizations where prepare_vmcs02 doesn't need to set all the fields on
- * every entry if they never change.
- * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
- * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
- *
- * The following functions allocate and free a vmcs02 in this pool.
- */
-
-/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
-static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmx->nested.current_vmptr) {
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
- /* Recycle the least recently used VMCS. */
- item = list_entry(vmx->nested.vmcs02_pool.prev,
- struct vmcs02_list, list);
- item->vmptr = vmx->nested.current_vmptr;
- list_move(&item->list, &vmx->nested.vmcs02_pool);
- return &item->vmcs02;
- }
-
- /* Create a new VMCS */
- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
- if (!item)
- return NULL;
- item->vmcs02.vmcs = alloc_vmcs();
- if (!item->vmcs02.vmcs) {
- kfree(item);
- return NULL;
- }
- loaded_vmcs_init(&item->vmcs02);
- item->vmptr = vmx->nested.current_vmptr;
- list_add(&(item->list), &(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num++;
- return &item->vmcs02;
-}
-
-/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
-static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
-{
- struct vmcs02_list *item;
- list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
- if (item->vmptr == vmptr) {
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- return;
- }
-}
-
-/*
- * Free all VMCSs saved for this vcpu, except the one pointed by
- * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
- * must be &vmx->vmcs01.
- */
-static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
-{
- struct vmcs02_list *item, *n;
-
- WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
- list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
- /*
- * Something will leak if the above WARN triggers. Better than
- * a use-after-free.
- */
- if (vmx->loaded_vmcs == &item->vmcs02)
- continue;
-
- free_loaded_vmcs(&item->vmcs02);
- list_del(&item->list);
- kfree(item);
- vmx->nested.vmcs02_num--;
- }
-}
-
-/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
* set the success or error code of an emulated VMX instruction, as specified
* by Vol 2B, VMX Instruction Reference, "Conventions".
@@ -6613,6 +6656,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
+ if (addr_size == 1)
+ off = (gva_t)sign_extend64(off, 31);
+ else if (addr_size == 0)
+ off = (gva_t)sign_extend64(off, 15);
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
@@ -6655,10 +6702,16 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
- /* Protected mode: #GP(0)/#SS(0) if the memory
- * operand is outside the segment limit.
+
+ /*
+ * Protected mode: #GP(0)/#SS(0) if the memory operand is
+ * outside the segment limit. All CPUs that support VMX ignore
+ * limit checks for flat segments, i.e. segments with base==0,
+ * limit==0xffffffff and of type expand-up data or code.
*/
- exn = exn || (off + sizeof(u64) > s.limit);
+ if (!(s.base == 0 && s.limit == 0xffffffff &&
+ ((s.type & 8) || !(s.type & 4))))
+ exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,
@@ -6692,8 +6745,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
- sizeof(vmptr), &e)) {
+ if (kvm_read_guest_virt(vcpu, gva, &vmptr, sizeof(vmptr), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -6787,6 +6839,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
struct vmcs *shadow_vmcs;
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+ int r;
/* The Intel VMX Instruction Reference lists a bunch of bits that
* are prerequisite to running VMXON, most notably cr4.VMXE must be
@@ -6820,16 +6873,20 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
+ if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
kvm_inject_gp(vcpu, 0);
return 1;
}
+ r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
+ if (r < 0)
+ goto out_vmcs02;
+
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
if (!shadow_vmcs)
- return -ENOMEM;
+ goto out_shadow_vmcs;
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
@@ -6837,18 +6894,23 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.current_shadow_vmcs = shadow_vmcs;
}
- INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
- vmx->nested.vmcs02_num = 0;
-
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
+ vmx->nested.vpid02 = allocate_vpid();
+
vmx->nested.vmxon = true;
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
+
+out_shadow_vmcs:
+ free_loaded_vmcs(&vmx->nested.vmcs02);
+
+out_vmcs02:
+ return -ENOMEM;
}
/*
@@ -6915,12 +6977,13 @@ static void free_nested(struct vcpu_vmx *vmx)
if (!vmx->nested.vmxon)
return;
+ hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
- /* Unpin physical memory we referred to in current vmcs02 */
+ /* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
@@ -6936,7 +6999,7 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.pi_desc = NULL;
}
- nested_free_all_saved_vmcss(vmx);
+ free_loaded_vmcs(&vmx->nested.vmcs02);
}
/* Emulate the VMXOFF instruction */
@@ -6970,8 +7033,6 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
vmptr + offsetof(struct vmcs12, launch_state),
&zero, sizeof(zero));
- nested_free_vmcs02(vmx, vmptr);
-
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
@@ -7211,8 +7272,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
- kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
- &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
+ kvm_write_guest_virt_system(vcpu, gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4), NULL);
}
nested_vmx_succeed(vcpu);
@@ -7247,8 +7308,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
- &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
+ if (kvm_read_guest_virt(vcpu, gva, &field_value,
+ (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -7338,9 +7399,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, &vmcs_gva))
return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
- if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
- (void *)&to_vmx(vcpu)->nested.current_vmptr,
- sizeof(u64), &e)) {
+ if (kvm_write_guest_virt_system(vcpu, vmcs_gva,
+ (void *)&to_vmx(vcpu)->nested.current_vmptr,
+ sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -7394,8 +7455,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
- sizeof(operand), &e)) {
+ if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -7454,8 +7514,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
- if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
- sizeof(u32), &e)) {
+ if (kvm_read_guest_virt(vcpu, gva, &vpid, sizeof(u32), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
@@ -7758,6 +7817,19 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
+ /*
+ * The host physical addresses of some pages of guest memory
+ * are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
+ * Page). The CPU may write to these pages via their host
+ * physical address while L2 is running, bypassing any
+ * address-translation-based dirty tracking (e.g. EPT write
+ * protection).
+ *
+ * Mark them dirty on every exit from L2 to prevent them from
+ * getting out of sync with dirty tracking.
+ */
+ nested_mark_vmcs12_pages_dirty(vcpu);
+
if (vmx->nested.nested_run_pending)
return false;
@@ -8245,7 +8317,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
- vmx_set_msr_bitmap(vcpu);
+ vmx_update_msr_bitmap(vcpu);
}
static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
@@ -8414,9 +8486,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
local_irq_enable();
}
-static bool vmx_has_high_real_mode_segbase(void)
+static bool vmx_has_emulated_msr(int index)
{
- return enable_unrestricted_guest || emulate_invalid_guest_state;
+ switch (index) {
+ case MSR_IA32_SMBASE:
+ /*
+ * We cannot do SMM unless we can run the guest in big
+ * real mode.
+ */
+ return enable_unrestricted_guest || emulate_invalid_guest_state;
+ case MSR_AMD64_VIRT_SPEC_CTRL:
+ /* This is AMD only. */
+ return false;
+ default:
+ return true;
+ }
}
static bool vmx_mpx_supported(void)
@@ -8608,7 +8692,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
+ /*
+ * If this vCPU has touched SPEC_CTRL, restore the guest's value if
+ * it's non-zero. Since vmentry is serialising on affected CPUs, there
+ * is no need to worry about the conditional branch over the wrmsr
+ * being speculatively taken.
+ */
+ x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
+
vmx->__launched = vmx->loaded_vmcs->launched;
+
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
@@ -8726,6 +8819,26 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
#endif
);
+ /*
+ * We do not use IBRS in the kernel. If this vCPU has used the
+ * SPEC_CTRL MSR it may have left it on; save the value and
+ * turn it off. This is much more efficient than blindly adding
+ * it to the atomic save/restore list. Especially as the former
+ * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
+ *
+ * For non-nested case:
+ * If the L01 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ *
+ * For nested case:
+ * If the L02 MSR bitmap does not intercept the MSR, then we need to
+ * save it.
+ */
+ if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
+ vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
+
+ x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
+
/* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB();
@@ -8825,6 +8938,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+ unsigned long *msr_bitmap;
int cpu;
if (!vmx)
@@ -8857,16 +8971,24 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx->guest_msrs)
goto free_pml;
- vmx->loaded_vmcs = &vmx->vmcs01;
- vmx->loaded_vmcs->vmcs = alloc_vmcs();
- if (!vmx->loaded_vmcs->vmcs)
- goto free_msrs;
if (!vmm_exclusive)
kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
- loaded_vmcs_init(vmx->loaded_vmcs);
+ err = alloc_loaded_vmcs(&vmx->vmcs01);
if (!vmm_exclusive)
kvm_cpu_vmxoff();
+ if (err < 0)
+ goto free_msrs;
+
+ msr_bitmap = vmx->vmcs01.msr_bitmap;
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_FS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
+ vmx_disable_intercept_for_msr(msr_bitmap, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
+ vmx->msr_bitmap_mode = 0;
+ vmx->loaded_vmcs = &vmx->vmcs01;
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
@@ -8890,10 +9012,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
goto free_vmcs;
}
- if (nested) {
+ if (nested)
nested_vmx_setup_ctls_msrs(vmx);
- vmx->nested.vpid02 = allocate_vpid();
- }
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
@@ -8902,7 +9022,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
return &vmx->vcpu;
free_vmcs:
- free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
@@ -9252,9 +9371,26 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
{
int msr;
struct page *page;
- unsigned long *msr_bitmap;
+ unsigned long *msr_bitmap_l1;
+ unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
+ /*
+ * pred_cmd & spec_ctrl are trying to verify two things:
+ *
+ * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
+ * ensures that we do not accidentally generate an L02 MSR bitmap
+ * from the L12 MSR bitmap that is too permissive.
+ * 2. That L1 or L2s have actually used the MSR. This avoids
+ * unnecessarily merging of the bitmap if the MSR is unused. This
+ * works properly because we only update the L01 MSR bitmap lazily.
+ * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
+ * updated to reflect this when L1 (or its L2s) actually write to
+ * the MSR.
+ */
+ bool pred_cmd = msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
+ bool spec_ctrl = msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
- if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
+ if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
+ !pred_cmd && !spec_ctrl)
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
@@ -9262,59 +9398,46 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
WARN_ON(1);
return false;
}
- msr_bitmap = (unsigned long *)kmap(page);
+ msr_bitmap_l1 = (unsigned long *)kmap(page);
+
+ memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
msr, MSR_TYPE_R);
- /* TPR is allowed */
- nested_vmx_disable_intercept_for_msr(msr_bitmap,
- vmx_msr_bitmap_nested,
+
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W);
+
if (nested_cpu_has_vid(vmcs12)) {
- /* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
- msr_bitmap,
- vmx_msr_bitmap_nested,
+ msr_bitmap_l1, msr_bitmap_l0,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
- } else {
- /*
- * Enable reading intercept of all the x2apic
- * MSRs. We should not rely on vmcs12 to do any
- * optimizations here, it may have been modified
- * by L1.
- */
- for (msr = 0x800; msr <= 0x8ff; msr++)
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- msr,
- MSR_TYPE_R);
-
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_TASKPRI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_EOI >> 4),
- MSR_TYPE_W);
- __vmx_enable_intercept_for_msr(
- vmx_msr_bitmap_nested,
- APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
- MSR_TYPE_W);
}
+
+ if (spec_ctrl)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_SPEC_CTRL,
+ MSR_TYPE_R | MSR_TYPE_W);
+
+ if (pred_cmd)
+ nested_vmx_disable_intercept_for_msr(
+ msr_bitmap_l1, msr_bitmap_l0,
+ MSR_IA32_PRED_CMD,
+ MSR_TYPE_W);
+
kunmap(page);
nested_release_page_clean(page);
@@ -9733,10 +9856,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
if (cpu_has_vmx_msr_bitmap() &&
- exec_control & CPU_BASED_USE_MSR_BITMAPS) {
- nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
- /* MSR_BITMAP will be set by following vmx_set_efer. */
- } else
+ exec_control & CPU_BASED_USE_MSR_BITMAPS &&
+ nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
+ ; /* MSR_BITMAP will be set by following vmx_set_efer. */
+ else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/*
@@ -9788,6 +9911,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
@@ -9880,7 +10006,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
- struct loaded_vmcs *vmcs02;
bool ia32e;
u32 msr_entry_idx;
@@ -10020,10 +10145,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* the nested entry.
*/
- vmcs02 = nested_get_current_vmcs02(vmx);
- if (!vmcs02)
- return -ENOMEM;
-
enter_guest_mode(vcpu);
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
@@ -10032,7 +10153,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
cpu = get_cpu();
- vmx->loaded_vmcs = vmcs02;
+ vmx->loaded_vmcs = &vmx->nested.vmcs02;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
@@ -10493,7 +10614,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
if (cpu_has_vmx_msr_bitmap())
- vmx_set_msr_bitmap(vcpu);
+ vmx_update_msr_bitmap(vcpu);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
@@ -10544,10 +10665,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);
- /* if no vmcs02 cache requested, remove the one we used */
- if (VMCS02_POOL_SIZE == 0)
- nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
-
load_vmcs12_host_state(vcpu, vmcs12);
/* Update TSC_OFFSET if TSC was changed while L2 ran */
@@ -10866,7 +10983,7 @@ out:
return ret;
}
-static struct kvm_x86_ops vmx_x86_ops = {
+static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
@@ -10875,7 +10992,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
- .cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
+ .has_emulated_msr = vmx_has_emulated_msr,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9cea09597d66cc..706c5d63a53fc7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -961,6 +961,7 @@ static u32 msrs_to_save[] = {
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
MSR_IA32_FEATURE_CONTROL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
+ MSR_IA32_SPEC_CTRL, MSR_IA32_ARCH_CAPABILITIES
};
static unsigned num_msrs_to_save;
@@ -984,6 +985,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
MSR_IA32_SMBASE,
+ MSR_AMD64_VIRT_SPEC_CTRL,
};
static unsigned num_emulated_msrs;
@@ -2583,7 +2585,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings
* of the module parameters.
*/
- r = kvm_x86_ops->cpu_has_high_real_mode_segbase();
+ r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
@@ -4072,14 +4074,8 @@ static void kvm_init_msr_list(void)
num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
- switch (emulated_msrs[i]) {
- case MSR_IA32_SMBASE:
- if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
- continue;
- break;
- default:
- break;
- }
+ if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
+ continue;
if (j < i)
emulated_msrs[j] = emulated_msrs[i];
@@ -4245,24 +4241,35 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
return X86EMUL_CONTINUE;
}
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
-static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
- gva_t addr, void *val, unsigned int bytes,
- struct x86_exception *exception)
+static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
+ gva_t addr, void *val, unsigned int bytes,
+ struct x86_exception *exception, bool system)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
- return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
+ u32 access = 0;
+
+ if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+ access |= PFERR_USER_MASK;
+
+ return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
}
static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
@@ -4274,18 +4281,16 @@ static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
}
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
- gva_t addr, void *val,
- unsigned int bytes,
- struct x86_exception *exception)
+static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
+ struct kvm_vcpu *vcpu, u32 access,
+ struct x86_exception *exception)
{
- struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
- PFERR_WRITE_MASK,
+ access,
exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
@@ -4306,6 +4311,27 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
out:
return r;
}
+
+static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
+ unsigned int bytes, struct x86_exception *exception,
+ bool system)
+{
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ u32 access = PFERR_WRITE_MASK;
+
+ if (!system && kvm_x86_ops->get_cpl(vcpu) == 3)
+ access |= PFERR_USER_MASK;
+
+ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+ access, exception);
+}
+
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
+ unsigned int bytes, struct x86_exception *exception)
+{
+ return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
+ PFERR_WRITE_MASK, exception);
+}
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
@@ -5025,8 +5051,8 @@ static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_fla
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
- .read_std = kvm_read_guest_virt_system,
- .write_std = kvm_write_guest_virt_system,
+ .read_std = emulator_read_std,
+ .write_std = emulator_write_std,
.read_phys = kvm_read_guest_phys_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
@@ -5417,7 +5443,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
* handle watchpoints yet, those would be handled in
* the emulate_ops.
*/
- if (kvm_vcpu_check_breakpoint(vcpu, &r))
+ if (!(emulation_type & EMULTYPE_SKIP) &&
+ kvm_vcpu_check_breakpoint(vcpu, &r))
return r;
ctxt->interruptibility = 0;
@@ -5504,8 +5531,7 @@ restart:
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
- if (r == EMULATE_DONE &&
- (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
+ if (r == EMULATE_DONE && ctxt->tf)
kvm_vcpu_do_singlestep(vcpu, &r);
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
@@ -6452,6 +6478,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
+ vcpu->mmio_needed = 0;
r = 0;
goto out;
}
@@ -7296,16 +7323,6 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!vcpu->arch.eager_fpu) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index f2afa5fe48a6dc..53a750a10598d0 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -164,11 +164,11 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
-int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
+int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception);
-int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
+int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception);
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index a2fe51b00ccefc..65be7cfaf94722 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -1,5 +1,5 @@
#include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
/*
diff --git a/arch/x86/lib/cmdline.c b/arch/x86/lib/cmdline.c
index a744506856b1d1..88ce150186c641 100644
--- a/arch/x86/lib/cmdline.c
+++ b/arch/x86/lib/cmdline.c
@@ -21,12 +21,14 @@ static inline int myisspace(u8 c)
* @option: option string to look for
*
* Returns the position of that @option (starts counting with 1)
- * or 0 on not found.
+ * or 0 on not found. @option will only be found if it is found
+ * as an entire word in @cmdline. For instance, if @option="car"
+ * then a cmdline which contains "cart" will not match.
*/
int cmdline_find_option_bool(const char *cmdline, const char *option)
{
char c;
- int len, pos = 0, wstart = 0;
+ int pos = 0, wstart = 0;
const char *opptr = NULL;
enum {
st_wordstart = 0, /* Start of word/after whitespace */
@@ -37,11 +39,14 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
if (!cmdline)
return -1; /* No command line */
- len = min_t(int, strlen(cmdline), COMMAND_LINE_SIZE);
- if (!len)
+ if (!strlen(cmdline))
return 0;
- while (len--) {
+ /*
+ * This 'pos' check ensures we do not overrun
+ * a non-NULL-terminated 'cmdline'
+ */
+ while (pos < COMMAND_LINE_SIZE) {
c = *(char *)cmdline++;
pos++;
@@ -58,17 +63,26 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
/* fall through */
case st_wordcmp:
- if (!*opptr)
+ if (!*opptr) {
+ /*
+ * We matched all the way to the end of the
+ * option we were looking for. If the
+ * command-line has a space _or_ ends, then
+ * we matched!
+ */
if (!c || myisspace(c))
return wstart;
else
state = st_wordskip;
- else if (!c)
+ } else if (!c) {
+ /*
+ * Hit the NULL terminator on the end of
+ * cmdline.
+ */
return 0;
- else if (c != *opptr++)
+ } else if (c != *opptr++) {
state = st_wordskip;
- else if (!len) /* last word and is matching */
- return wstart;
+ }
break;
case st_wordskip:
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index 009f98216b7eb3..24ef1c2104d422 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -1,7 +1,7 @@
/* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
#include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
/*
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 423644c230e76e..f11fb99f7f6c7c 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -10,18 +10,18 @@
#include <asm/current.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm/asm.h>
#include <asm/smap.h>
/* Standard copy_to_user with segment limit checking */
ENTRY(_copy_to_user)
- GET_THREAD_INFO(%rax)
+ mov PER_CPU_VAR(current_task), %rax
movq %rdi,%rcx
addq %rdx,%rcx
jc bad_to_user
- cmpq TI_addr_limit(%rax),%rcx
+ cmpq TASK_addr_limit(%rax),%rcx
ja bad_to_user
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
"jmp copy_user_generic_string", \
@@ -32,11 +32,11 @@ ENDPROC(_copy_to_user)
/* Standard copy_from_user with segment limit checking */
ENTRY(_copy_from_user)
- GET_THREAD_INFO(%rax)
+ mov PER_CPU_VAR(current_task), %rax
movq %rsi,%rcx
addq %rdx,%rcx
jc bad_from_user
- cmpq TI_addr_limit(%rax),%rcx
+ cmpq TASK_addr_limit(%rax),%rcx
ja bad_from_user
ALTERNATIVE_2 "jmp copy_user_generic_unrolled", \
"jmp copy_user_generic_string", \
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 1318f75d56e4f0..503d4b51a51c9e 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,7 @@
*/
#include <asm/checksum.h>
#include <linux/module.h>
+#include <linux/uaccess.h>
#include <asm/smap.h>
/**
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 490b2ee4e4bb88..6eddb5561dea03 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -35,8 +35,8 @@
.text
ENTRY(__get_user_1)
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
@@ -50,8 +50,8 @@ ENDPROC(__get_user_1)
ENTRY(__get_user_2)
add $1,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
@@ -65,8 +65,8 @@ ENDPROC(__get_user_2)
ENTRY(__get_user_4)
add $3,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
@@ -81,8 +81,8 @@ ENTRY(__get_user_8)
#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
@@ -94,8 +94,8 @@ ENTRY(__get_user_8)
#else
add $7,%_ASM_AX
jc bad_get_user_8
- GET_THREAD_INFO(%_ASM_DX)
- cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ mov PER_CPU_VAR(current_task), %_ASM_DX
+ cmp TASK_addr_limit(%_ASM_DX),%_ASM_AX
jae bad_get_user_8
sbb %_ASM_DX, %_ASM_DX /* array_index_mask_nospec() */
and %_ASM_DX, %_ASM_AX
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index a404b4b7553319..6bf6c1ecd28437 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -6,7 +6,7 @@
__visible void *memcpy(void *to, const void *from, size_t n)
{
-#ifdef CONFIG_X86_USE_3DNOW
+#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
return __memcpy3d(to, from, n);
#else
return __memcpy(to, from, n);
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 16698bba87deb9..a0de849435ad69 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -1,7 +1,7 @@
/* Copyright 2002 Andi Kleen */
#include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
/*
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index ca2afdd6d98ed2..90ce01bee00c17 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -6,7 +6,7 @@
* - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
*/
#include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#undef memmove
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 2661fad0582716..c9c81227ea37d1 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -1,7 +1,7 @@
/* Copyright 2002 Andi Kleen, SuSE Labs */
#include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
.weak memset
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index e0817a12d32362..c891ece81e5b11 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -29,14 +29,14 @@
* as they get called from within inline assembly.
*/
-#define ENTER GET_THREAD_INFO(%_ASM_BX)
+#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
#define EXIT ASM_CLAC ; \
ret
.text
ENTRY(__put_user_1)
ENTER
- cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
+ cmp TASK_addr_limit(%_ASM_BX),%_ASM_CX
jae bad_put_user
ASM_STAC
1: movb %al,(%_ASM_CX)
@@ -46,7 +46,7 @@ ENDPROC(__put_user_1)
ENTRY(__put_user_2)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $1,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
@@ -58,7 +58,7 @@ ENDPROC(__put_user_2)
ENTRY(__put_user_4)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $3,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
@@ -70,7 +70,7 @@ ENDPROC(__put_user_4)
ENTRY(__put_user_8)
ENTER
- mov TI_addr_limit(%_ASM_BX),%_ASM_BX
+ mov TASK_addr_limit(%_ASM_BX),%_ASM_BX
sub $7,%_ASM_BX
cmp %_ASM_BX,%_ASM_CX
jae bad_put_user
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index 3d06b482ebc7e2..7bbb853e36bd42 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -3,7 +3,7 @@
#include <linux/stringify.h>
#include <linux/linkage.h>
#include <asm/dwarf2.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/alternative-asm.h>
#include <asm-generic/export.h>
#include <asm/nospec-branch.h>
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 91d93b95bd8685..0a6fcae404f8b9 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -570,12 +570,12 @@ do { \
unsigned long __copy_to_user_ll(void __user *to, const void *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel(to, from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_to_user_ll);
@@ -583,12 +583,12 @@ EXPORT_SYMBOL(__copy_to_user_ll);
unsigned long __copy_from_user_ll(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user_zeroing(to, from, n);
else
n = __copy_user_zeroing_intel(to, from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll);
@@ -596,13 +596,13 @@ EXPORT_SYMBOL(__copy_from_user_ll);
unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
if (movsl_is_ok(to, from, n))
__copy_user(to, from, n);
else
n = __copy_user_intel((void __user *)to,
(const void *)from, n);
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nozero);
@@ -610,7 +610,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nozero);
unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
n = __copy_user_zeroing_intel_nocache(to, from, n);
@@ -619,7 +619,7 @@ unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from,
#else
__copy_user_zeroing(to, from, n);
#endif
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache);
@@ -627,7 +627,7 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache);
unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
unsigned long n)
{
- stac();
+ __uaccess_begin_nospec();
#ifdef CONFIG_X86_INTEL_USERCOPY
if (n > 64 && cpu_has_xmm2)
n = __copy_user_intel_nocache(to, from, n);
@@ -636,7 +636,7 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
#else
__copy_user(to, from, n);
#endif
- clac();
+ __uaccess_end();
return n;
}
EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0a42327a59d71e..9f760cdcaf40b8 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -6,7 +6,7 @@
* Copyright 2002 Andi Kleen <ak@suse.de>
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
/*
* Zero Userspace
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 903ec1e9c3263f..73523cf59e52ad 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -2,6 +2,7 @@
#include <linux/spinlock.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
+#include <linux/sched.h>
static inline unsigned long
ex_insn_addr(const struct exception_table_entry *x)
@@ -39,7 +40,7 @@ int fixup_exception(struct pt_regs *regs)
if (fixup->fixup - fixup->insn >= 0x7ffffff0 - 4) {
/* Special hack for uaccess_err */
- current_thread_info()->uaccess_err = 1;
+ current->thread.uaccess_err = 1;
new_ip -= 0x7ffffff0;
}
regs->ip = new_ip;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 333feba7a17466..97d4386864214b 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -273,8 +273,6 @@ static noinline int vmalloc_fault(unsigned long address)
if (!(address >= VMALLOC_START && address < VMALLOC_END))
return -1;
- WARN_ON_ONCE(in_nmi());
-
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
@@ -678,7 +676,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
* In this case we need to make sure we're not recursively
* faulting through the emulate_vsyscall() logic.
*/
- if (current_thread_info()->sig_on_uaccess_error && signal) {
+ if (current->thread.sig_on_uaccess_err && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
tsk->thread.cr2 = address;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index ae9a37bf137114..7d2542ad346aa5 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -388,7 +388,7 @@ slow_irqon:
ret = get_user_pages_unlocked(current, mm, start,
(end - start) >> PAGE_SHIFT,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 151fd33e904332..f00eb52c16a6a1 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -4,6 +4,8 @@
#include <linux/swap.h>
#include <linux/memblock.h>
#include <linux/bootmem.h> /* for max_low_pfn */
+#include <linux/swapfile.h>
+#include <linux/swapops.h>
#include <asm/cacheflush.h>
#include <asm/e820.h>
@@ -767,3 +769,26 @@ void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache)
__cachemode2pte_tbl[cache] = __cm_idx2pte(entry);
__pte2cachemode_tbl[entry] = cache;
}
+
+#ifdef CONFIG_SWAP
+unsigned long max_swapfile_size(void)
+{
+ unsigned long pages;
+
+ pages = generic_max_swapfile_size();
+
+ if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ /* Limit the swap file size to MAX_PA/2 for L1TF workaround */
+ unsigned long long l1tf_limit = l1tf_pfn_limit();
+ /*
+ * We encode swap offsets also with 3 bits below those for pfn
+ * which makes the usable limit higher.
+ */
+#if CONFIG_PGTABLE_LEVELS > 2
+ l1tf_limit <<= PAGE_SHIFT - SWP_OFFSET_FIRST_BIT;
+#endif
+ pages = min_t(unsigned long long, l1tf_limit, pages);
+ }
+ return pages;
+}
+#endif
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index cb4ef3de61f9ae..2ebfbaf611424b 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -871,7 +871,6 @@ static noinline int do_test_wp_bit(void)
return flag;
}
-#ifdef CONFIG_DEBUG_RODATA
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
@@ -960,5 +959,3 @@ void mark_rodata_ro(void)
if (__supported_pte_mask & _PAGE_NX)
debug_checkwx();
}
-#endif
-
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d76ec9348cff4c..97b6b0164dcbf2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1072,7 +1072,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
}
-#ifdef CONFIG_DEBUG_RODATA
const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data);
@@ -1164,8 +1163,6 @@ void mark_rodata_ro(void)
debug_checkwx();
}
-#endif
-
int kern_addr_valid(unsigned long addr)
{
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index 76604c8a2a4878..7bf14e74fc8f17 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -125,24 +125,29 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
static void clear_pmd_presence(pmd_t *pmd, bool clear, pmdval_t *old)
{
+ pmd_t new_pmd;
pmdval_t v = pmd_val(*pmd);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pmd(pmd, __pmd(v));
+ *old = v;
+ new_pmd = pmd_mknotpresent(*pmd);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ new_pmd = __pmd(*old);
+ }
+ set_pmd(pmd, new_pmd);
}
static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
{
pteval_t v = pte_val(*pte);
if (clear) {
- *old = v & _PAGE_PRESENT;
- v &= ~_PAGE_PRESENT;
- } else /* presume this has been called with clear==true previously */
- v |= *old;
- set_pte_atomic(pte, __pte(v));
+ *old = v;
+ /* Nothing should care about address */
+ pte_clear(&init_mm, 0, pte);
+ } else {
+ /* Presume this has been called with clear==true previously */
+ set_pte_atomic(pte, __pte(*old));
+ }
}
static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index d2dc0438d654a8..74609a957c491b 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -121,3 +121,24 @@ const char *arch_vma_name(struct vm_area_struct *vma)
return "[mpx]";
return NULL;
}
+
+/*
+ * Only allow root to set high MMIO mappings to PROT_NONE.
+ * This prevents an unpriv. user to set them to PROT_NONE and invert
+ * them, then pointing to valid memory for L1TF speculation.
+ *
+ * Note: for locked down kernels may want to disable the root override.
+ */
+bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ if (!boot_cpu_has_bug(X86_BUG_L1TF))
+ return true;
+ if (!__pte_needs_invert(pgprot_val(prot)))
+ return true;
+ /* If it's real memory always allow */
+ if (pfn_valid(pfn))
+ return true;
+ if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
+ return false;
+ return true;
+}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7ed47b1e6f42d0..7e94fc6f608ad2 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -536,10 +536,9 @@ static int mpx_resolve_fault(long __user *addr, int write)
{
long gup_ret;
int nr_pages = 1;
- int force = 0;
gup_ret = get_user_pages(current, current->mm, (unsigned long)addr,
- nr_pages, write, force, NULL, NULL);
+ nr_pages, write ? FOLL_WRITE : 0, NULL, NULL);
/*
* get_user_pages() returns number of pages gotten.
* 0 means we failed to fault in and get anything,
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index a8f90ce3dedff5..dc6d99017f3f0a 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -60,7 +60,7 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 79377e2a7bcd62..c0b65e8da1aa40 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -273,13 +273,15 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
/*
* The .rodata section needs to be read-only. Using the pfn
- * catches all aliases.
+ * catches all aliases. This also includes __ro_after_init,
+ * so do not enforce until kernel_set_to_readonly is true.
*/
- if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
+ if (kernel_set_to_readonly &&
+ within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
__pa_symbol(__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
-#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
+#if defined(CONFIG_X86_64)
/*
* Once the kernel maps the text as RO (kernel_set_to_readonly is set),
* kernel text mappings for the large page aligned text, rodata sections
@@ -955,11 +957,11 @@ static void populate_pte(struct cpa_data *cpa,
}
}
-static int populate_pmd(struct cpa_data *cpa,
- unsigned long start, unsigned long end,
- unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
- unsigned int cur_pages = 0;
+ long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@@ -1006,8 +1008,8 @@ static int populate_pmd(struct cpa_data *cpa,
pmd = pmd_offset(pud, start);
- set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
- massage_pgprot(pmd_pgprot)));
+ set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn >> PAGE_SHIFT,
+ canon_pgprot(pmd_pgprot))));
start += PMD_SIZE;
cpa->pfn += PMD_SIZE;
@@ -1029,12 +1031,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages;
}
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
- pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ pgprot_t pgprot)
{
pud_t *pud;
unsigned long end;
- int cur_pages = 0;
+ long cur_pages = 0;
pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1079,8 +1081,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
* Map everything starting from the Gb boundary, possibly with 1G pages
*/
while (end - start >= PUD_SIZE) {
- set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
- massage_pgprot(pud_pgprot)));
+ set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn >> PAGE_SHIFT,
+ canon_pgprot(pud_pgprot))));
start += PUD_SIZE;
cpa->pfn += PUD_SIZE;
@@ -1090,7 +1092,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */
if (start < end) {
- int tmp;
+ long tmp;
pud = pud_offset(pgd, start);
if (pud_none(*pud))
@@ -1116,7 +1118,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry;
- int ret;
+ long ret;
pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1351,7 +1353,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
- int ret, numpages = cpa->numpages;
+ unsigned long numpages = cpa->numpages;
+ int ret;
while (numpages) {
/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index cadf80c07e9ec9..9b64a360d1b97a 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -726,6 +726,20 @@ void io_free_memtype(resource_size_t start, resource_size_t end)
free_memtype(start, end);
}
+int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ enum page_cache_mode type = _PAGE_CACHE_MODE_WC;
+
+ return io_reserve_memtype(start, start + size, &type);
+}
+EXPORT_SYMBOL(arch_io_reserve_memtype_wc);
+
+void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size)
+{
+ io_free_memtype(start, start + size);
+}
+EXPORT_SYMBOL(arch_io_free_memtype_wc);
+
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 08e94b6139ab66..55c7446311a7ba 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -676,28 +676,50 @@ int pmd_clear_huge(pmd_t *pmd)
return 0;
}
+#ifdef CONFIG_X86_64
/**
* pud_free_pmd_page - Clear pud entry and free pmd page.
* @pud: Pointer to a PUD.
+ * @addr: Virtual address associated with pud.
*
- * Context: The pud range has been unmaped and TLB purged.
+ * Context: The pud range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
+ *
+ * NOTE: Callers must allow a single page allocation.
*/
-int pud_free_pmd_page(pud_t *pud)
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd;
+ pmd_t *pmd, *pmd_sv;
+ pte_t *pte;
int i;
if (pud_none(*pud))
return 1;
pmd = (pmd_t *)pud_page_vaddr(*pud);
+ pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
+ if (!pmd_sv)
+ return 0;
- for (i = 0; i < PTRS_PER_PMD; i++)
- if (!pmd_free_pte_page(&pmd[i]))
- return 0;
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd_sv[i] = pmd[i];
+ if (!pmd_none(pmd[i]))
+ pmd_clear(&pmd[i]);
+ }
pud_clear(pud);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ if (!pmd_none(pmd_sv[i])) {
+ pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
+ free_page((unsigned long)pte);
+ }
+ }
+
+ free_page((unsigned long)pmd_sv);
free_page((unsigned long)pmd);
return 1;
@@ -706,11 +728,12 @@ int pud_free_pmd_page(pud_t *pud)
/**
* pmd_free_pte_page - Clear pmd entry and free pte page.
* @pmd: Pointer to a PMD.
+ * @addr: Virtual address associated with pmd.
*
- * Context: The pmd range has been unmaped and TLB purged.
+ * Context: The pmd range has been unmapped and TLB purged.
* Return: 1 if clearing the entry succeeded. 0 otherwise.
*/
-int pmd_free_pte_page(pmd_t *pmd)
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
pte_t *pte;
@@ -719,8 +742,30 @@ int pmd_free_pte_page(pmd_t *pmd)
pte = (pte_t *)pmd_page_vaddr(*pmd);
pmd_clear(pmd);
+
+ /* INVLPG to clear all paging-structure caches */
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
+
free_page((unsigned long)pte);
return 1;
}
+
+#else /* !CONFIG_X86_64 */
+
+int pud_free_pmd_page(pud_t *pud, unsigned long addr)
+{
+ return pud_none(*pud);
+}
+
+/*
+ * Disable free page handling on x86-PAE. This assures that ioremap()
+ * does not update sync'd pmd entries. See vmalloc_sync_one().
+ */
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
+{
+ return pmd_none(*pmd);
+}
+
+#endif /* CONFIG_X86_64 */
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 90555bf60aa45d..f65a33f505b683 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -4,6 +4,7 @@
#include <asm/pgtable.h>
#include <asm/proto.h>
+#include <asm/cpufeature.h>
static int disable_nx;
@@ -31,7 +32,7 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void)
{
- if (cpu_has_nx && !disable_nx)
+ if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
__supported_pte_mask |= _PAGE_NX;
else
__supported_pte_mask &= ~_PAGE_NX;
@@ -39,7 +40,7 @@ void x86_configure_nx(void)
void __init x86_report_nx(void)
{
- if (!cpu_has_nx) {
+ if (!boot_cpu_has(X86_FEATURE_NX)) {
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
"missing in CPU!\n");
} else {
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 7cad01af6dcd2e..6d683bbb35021d 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -10,6 +10,7 @@
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
+#include <asm/nospec-branch.h>
#include <asm/cache.h>
#include <asm/apic.h>
#include <asm/uv/uv.h>
@@ -29,6 +30,8 @@
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
*/
+atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
+
struct flush_tlb_info {
struct mm_struct *flush_mm;
unsigned long flush_start;
@@ -104,6 +107,36 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
unsigned cpu = smp_processor_id();
if (likely(prev != next)) {
+ u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id);
+
+ /*
+ * Avoid user/user BTB poisoning by flushing the branch
+ * predictor when switching between processes. This stops
+ * one process from doing Spectre-v2 attacks on another.
+ *
+ * As an optimization, flush indirect branches only when
+ * switching into processes that disable dumping. This
+ * protects high value processes like gpg, without having
+ * too high performance overhead. IBPB is *expensive*!
+ *
+ * This will not flush branches when switching into kernel
+ * threads. It will also not flush if we switch to idle
+ * thread and back to the same process. It will flush if we
+ * switch to a different non-dumpable process.
+ */
+ if (tsk && tsk->mm &&
+ tsk->mm->context.ctx_id != last_ctx_id &&
+ get_dumpable(tsk->mm) != SUID_DUMP_USER)
+ indirect_branch_prediction_barrier();
+
+ /*
+ * Record last user mm's context id, so we can avoid
+ * flushing branch buffer with IBPB if we switch back
+ * to the same user.
+ */
+ if (next != &init_mm)
+ this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id);
+
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
this_cpu_write(cpu_tlbstate.active_mm, next);
cpumask_set_cpu(cpu, mm_cpumask(next));
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 50d86c0e9ba497..660a83c8287b61 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -24,7 +24,6 @@
#include <asm/nmi.h>
#include <asm/apic.h>
#include <asm/processor.h>
-#include <asm/cpufeature.h>
#include "op_x86_model.h"
#include "op_counter.h"
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 526536c81ddc41..ca1e8e6dccc8af 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
- res.start = (word1 << 16) | 0x0000;
- res.end = (word2 << 16) | 0xffff;
+ res.start = ((resource_size_t) word1 << 16) | 0x0000;
+ res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 0ae7e9fa348dfa..89f90549c6a857 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -541,9 +541,16 @@ static void twinhead_reserve_killing_zone(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
+/*
+ * Broadwell EP Home Agent BARs erroneously return non-zero values when read.
+ *
+ * See http://www.intel.com/content/www/us/en/processors/xeon/xeon-e5-v4-spec-update.html
+ * entry BDF2.
+ */
static void pci_bdwep_bar(struct pci_dev *dev)
{
dev->non_compliant_bars = 1;
}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_bdwep_bar);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_bdwep_bar);
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index 9b83b9051ae7bb..2a23e79d42b5eb 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -120,9 +120,12 @@ static unsigned long __init bios32_service(unsigned long service)
static struct {
unsigned long address;
unsigned short segment;
-} pci_indirect = { 0, __KERNEL_CS };
+} pci_indirect __ro_after_init = {
+ .address = 0,
+ .segment = __KERNEL_CS,
+};
-static int pci_bios_present;
+static int pci_bios_present __ro_after_init;
static int __init check_pcibios(void)
{
diff --git a/arch/x86/platform/efi/early_printk.c b/arch/x86/platform/efi/early_printk.c
index 52414211729689..82324fc25d5e6f 100644
--- a/arch/x86/platform/efi/early_printk.c
+++ b/arch/x86/platform/efi/early_printk.c
@@ -179,7 +179,7 @@ early_efi_write(struct console *con, const char *str, unsigned int num)
num--;
}
- if (efi_x >= si->lfb_width) {
+ if (efi_x + font->width > si->lfb_width) {
efi_x = 0;
efi_y += font->height;
}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index a0ac0f9c307f66..f5a8cd96bae48f 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -40,6 +40,7 @@
#include <asm/fixmap.h>
#include <asm/realmode.h>
#include <asm/time.h>
+#include <asm/nospec-branch.h>
/*
* We allocate runtime services regions bottom-up, starting from -4G, i.e.
@@ -347,6 +348,7 @@ extern efi_status_t efi64_thunk(u32, ...);
\
efi_sync_low_kernel_mappings(); \
local_irq_save(flags); \
+ firmware_restrict_branch_speculation_start(); \
\
efi_scratch.prev_cr3 = read_cr3(); \
write_cr3((unsigned long)efi_scratch.efi_pgt); \
@@ -357,6 +359,7 @@ extern efi_status_t efi64_thunk(u32, ...);
\
write_cr3(efi_scratch.prev_cr3); \
__flush_tlb_all(); \
+ firmware_restrict_branch_speculation_end(); \
local_irq_restore(flags); \
\
__s; \
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 755481f14d90f2..764ac2fc53fe84 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -3,7 +3,7 @@
#include <asm/asm.h>
#include <asm/segment.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
#include <asm/cmpxchg.h>
#include <asm/nops.h>
diff --git a/arch/x86/um/ptrace_32.c b/arch/x86/um/ptrace_32.c
index a29756f2d9402e..c081c84daad395 100644
--- a/arch/x86/um/ptrace_32.c
+++ b/arch/x86/um/ptrace_32.c
@@ -193,7 +193,7 @@ int peek_user(struct task_struct *child, long addr, long data)
static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
- int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
+ int err, n, cpu = task_cpu(child);
struct user_i387_struct fpregs;
err = save_fp_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
@@ -209,7 +209,7 @@ static int get_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *child)
{
- int n, cpu = ((struct thread_info *) child->stack)->cpu;
+ int n, cpu = task_cpu(child);
struct user_i387_struct fpregs;
n = copy_from_user(&fpregs, buf, sizeof(fpregs));
@@ -222,7 +222,7 @@ static int set_fpregs(struct user_i387_struct __user *buf, struct task_struct *c
static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
- int err, n, cpu = ((struct thread_info *) child->stack)->cpu;
+ int err, n, cpu = task_cpu(child);
struct user_fxsr_struct fpregs;
err = save_fpx_registers(userspace_pid[cpu], (unsigned long *) &fpregs);
@@ -238,7 +238,7 @@ static int get_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *
static int set_fpxregs(struct user_fxsr_struct __user *buf, struct task_struct *child)
{
- int n, cpu = ((struct thread_info *) child->stack)->cpu;
+ int n, cpu = task_cpu(child);
struct user_fxsr_struct fpregs;
n = copy_from_user(&fpregs, buf, sizeof(fpregs));
diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S
index b766792c99335a..39053192918da8 100644
--- a/arch/x86/um/setjmp_32.S
+++ b/arch/x86/um/setjmp_32.S
@@ -16,9 +16,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
#ifdef _REGPARM
movl %eax,%edx
#else
@@ -35,13 +35,13 @@ setjmp:
movl %ecx,20(%edx) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
#ifdef _REGPARM
xchgl %eax,%edx
#else
@@ -55,4 +55,4 @@ longjmp:
movl 16(%edx),%edi
jmp *20(%edx)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp
diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S
index 45f547b4043eed..c56942e1a38cac 100644
--- a/arch/x86/um/setjmp_64.S
+++ b/arch/x86/um/setjmp_64.S
@@ -18,9 +18,9 @@
.text
.align 4
- .globl setjmp
- .type setjmp, @function
-setjmp:
+ .globl kernel_setjmp
+ .type kernel_setjmp, @function
+kernel_setjmp:
pop %rsi # Return address, and adjust the stack
xorl %eax,%eax # Return value
movq %rbx,(%rdi)
@@ -34,13 +34,13 @@ setjmp:
movq %rsi,56(%rdi) # Return address
ret
- .size setjmp,.-setjmp
+ .size kernel_setjmp,.-kernel_setjmp
.text
.align 4
- .globl longjmp
- .type longjmp, @function
-longjmp:
+ .globl kernel_longjmp
+ .type kernel_longjmp, @function
+kernel_longjmp:
movl %esi,%eax # Return value (int)
movq (%rdi),%rbx
movq 8(%rdi),%rsp
@@ -51,4 +51,4 @@ longjmp:
movq 48(%rdi),%r15
jmp *56(%rdi)
- .size longjmp,.-longjmp
+ .size kernel_longjmp,.-kernel_longjmp
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 65028bafde6f22..67f39a310d5f71 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -460,6 +460,12 @@ static void __init xen_init_cpuid_mask(void)
cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
}
+static void __init xen_init_capabilities(void)
+{
+ if (xen_pv_domain())
+ setup_force_cpu_cap(X86_FEATURE_XENPV);
+}
+
static void xen_set_debugreg(int reg, unsigned long val)
{
HYPERVISOR_set_debugreg(reg, val);
@@ -1589,6 +1595,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_init_irq_ops();
xen_init_cpuid_mask();
+ xen_init_capabilities();
#ifdef CONFIG_X86_LOCAL_APIC
/*
@@ -1887,14 +1894,6 @@ bool xen_hvm_need_lapic(void)
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
-static void xen_set_cpu_features(struct cpuinfo_x86 *c)
-{
- if (xen_pv_domain()) {
- clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
- set_cpu_cap(c, X86_FEATURE_XENPV);
- }
-}
-
const struct hypervisor_x86 x86_hyper_xen = {
.name = "Xen",
.detect = xen_platform,
@@ -1902,7 +1901,6 @@ const struct hypervisor_x86 x86_hyper_xen = {
.init_platform = xen_hvm_guest_init,
#endif
.x2apic_available = xen_x2apic_para_available,
- .set_cpu_features = xen_set_cpu_features,
};
EXPORT_SYMBOL(x86_hyper_xen);
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index 724a08740a04b4..9c7358110d32a3 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -477,7 +477,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3f4ebf0261f286..29e50d1229bc03 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -28,6 +28,7 @@
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
+#include <asm/spec-ctrl.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
@@ -87,6 +88,8 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
+ speculative_store_bypass_ht_init();
+
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
@@ -357,6 +360,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
+ speculative_store_bypass_ht_init();
+
xen_pmu_init(0);
if (xen_smp_intr_init(0))
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index f42e78de1e107d..85872a08994a1a 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -8,6 +8,7 @@
#include <linux/log2.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/atomic.h>
#include <asm/paravirt.h>
@@ -19,6 +20,7 @@
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
#ifdef CONFIG_QUEUED_SPINLOCKS
@@ -42,33 +44,24 @@ static void xen_qlock_kick(int cpu)
static void xen_qlock_wait(u8 *byte, u8 val)
{
int irq = __this_cpu_read(lock_kicker_irq);
+ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */
- if (irq == -1)
+ if (irq == -1 || in_nmi())
return;
- /* clear pending */
- xen_clear_irq_pending(irq);
- barrier();
-
- /*
- * We check the byte value after clearing pending IRQ to make sure
- * that we won't miss a wakeup event because of the clearing.
- *
- * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
- * So it is effectively a memory barrier for x86.
- */
- if (READ_ONCE(*byte) != val)
- return;
+ /* Detect reentry. */
+ atomic_inc(nest_cnt);
- /*
- * If an interrupt happens here, it will leave the wakeup irq
- * pending, which will cause xen_poll_irq() to return
- * immediately.
- */
+ /* If irq pending already and no nested call clear it. */
+ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
+ xen_clear_irq_pending(irq);
+ } else if (READ_ONCE(*byte) == val) {
+ /* Block until irq becomes pending (or a spurious wakeup) */
+ xen_poll_irq(irq);
+ }
- /* Block until irq becomes pending (or perhaps a spurious wakeup) */
- xen_poll_irq(irq);
+ atomic_dec(nest_cnt);
}
#else /* CONFIG_QUEUED_SPINLOCKS */
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 7f664c416faf55..4ecd0de08557e8 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -1,11 +1,14 @@
#include <linux/types.h>
#include <linux/tick.h>
+#include <linux/percpu-defs.h>
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <xen/grant_table.h>
#include <xen/events.h>
+#include <asm/cpufeatures.h>
+#include <asm/msr-index.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
#include <asm/fixmap.h>
@@ -68,6 +71,8 @@ static void xen_pv_post_suspend(int suspend_cancelled)
xen_mm_unpin_all();
}
+static DEFINE_PER_CPU(u64, spec_ctrl);
+
void xen_arch_pre_suspend(void)
{
if (xen_pv_domain())
@@ -84,6 +89,9 @@ void xen_arch_post_suspend(int cancelled)
static void xen_vcpu_notify_restore(void *data)
{
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL))
+ wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl));
+
/* Boot processor notified via generic timekeeping_resume() */
if (smp_processor_id() == 0)
return;
@@ -93,7 +101,15 @@ static void xen_vcpu_notify_restore(void *data)
static void xen_vcpu_notify_suspend(void *data)
{
+ u64 tmp;
+
tick_suspend_local();
+
+ if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) {
+ rdmsrl(MSR_IA32_SPEC_CTRL, tmp);
+ this_cpu_write(spec_ctrl, tmp);
+ wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ }
}
void xen_arch_resume(void)
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile
index ca20a892021bb6..6c6877d628efb1 100644
--- a/arch/xtensa/boot/Makefile
+++ b/arch/xtensa/boot/Makefile
@@ -31,7 +31,7 @@ $(bootdir-y): $(addprefix $(obj)/,$(subdir-y)) \
$(addprefix $(obj)/,$(host-progs))
$(Q)$(MAKE) $(build)=$(obj)/$@ $(MAKECMDGOALS)
-OBJCOPYFLAGS = --strip-all -R .comment -R .note.gnu.build-id -O binary
+OBJCOPYFLAGS = --strip-all -R .comment -R .notes -O binary
vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 22eeacba37ccd4..199e05f85e892f 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -35,6 +35,7 @@ CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 83e2e4bc01ba24..d3ac00fcb15ccb 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -24,7 +24,11 @@
# error Linux requires the Xtensa Windowed Registers Option.
#endif
-#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
+/* Xtensa ABI requires stack alignment to be at least 16 */
+
+#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
+
+#define ARCH_SLAB_MINALIGN STACK_ALIGN
/*
* User space process size: 1 GB.
diff --git a/arch/xtensa/include/uapi/asm/mman.h b/arch/xtensa/include/uapi/asm/mman.h
index 360944e1da52a0..b3727f37ae2c29 100644
--- a/arch/xtensa/include/uapi/asm/mman.h
+++ b/arch/xtensa/include/uapi/asm/mman.h
@@ -102,6 +102,9 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c
index b123ace3b67c76..cbefed1800c145 100644
--- a/arch/xtensa/kernel/asm-offsets.c
+++ b/arch/xtensa/kernel/asm-offsets.c
@@ -90,14 +90,14 @@ int main(void)
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
#if XTENSA_HAVE_COPROCESSORS
- DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
- DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
+ DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
+ DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
+ DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2));
+ DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3));
+ DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4));
+ DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5));
+ DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6));
+ DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7));
#endif
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 05e1df943856fb..e3823b4f9d0869 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -88,9 +88,12 @@ _SetupMMU:
initialize_mmu
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
rsr a2, excsave1
- movi a3, 0x08000000
+ movi a3, XCHAL_KSEG_PADDR
+ bltu a2, a3, 1f
+ sub a2, a2, a3
+ movi a3, XCHAL_KSEG_SIZE
bgeu a2, a3, 1f
- movi a3, 0xd0000000
+ movi a3, XCHAL_KSEG_CACHED_VADDR
add a2, a2, a3
wsr a2, excsave1
1:
@@ -283,12 +286,13 @@ should_never_return:
movi a2, cpu_start_ccount
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
- memw
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@@ -325,11 +329,13 @@ ENTRY(cpu_restart)
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
+ memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
+ memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 1c85323f01d7de..df70d47d14abad 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -83,18 +83,21 @@ void coprocessor_release_all(struct thread_info *ti)
void coprocessor_flush_all(struct thread_info *ti)
{
- unsigned long cpenable;
+ unsigned long cpenable, old_cpenable;
int i;
preempt_disable();
+ RSR_CPENABLE(old_cpenable);
cpenable = ti->cpenable;
+ WSR_CPENABLE(cpenable);
for (i = 0; i < XCHAL_CP_MAX; i++) {
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
coprocessor_flush(ti, i);
cpenable >>= 1;
}
+ WSR_CPENABLE(old_cpenable);
preempt_enable();
}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 4d02e38514f546..54bb8e0473a063 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -80,7 +80,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned i;
- for (i = 0; i < max_cpus; ++i)
+ for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@@ -93,6 +93,11 @@ void __init smp_init_cpus(void)
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@@ -192,9 +197,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
int i;
#ifdef CONFIG_HOTPLUG_CPU
- cpu_start_id = cpu;
- system_flush_invalidate_dcache_range(
- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@@ -203,18 +210,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
ccount = get_ccount();
while (!ccount);
- cpu_start_ccount = ccount;
+ WRITE_ONCE(cpu_start_ccount, ccount);
- while (time_before(jiffies, timeout)) {
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
mb();
- if (!cpu_start_ccount)
- break;
- }
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
- if (cpu_start_ccount) {
+ if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
- (void *)cpu, 1);
- cpu_start_ccount = 0;
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@@ -234,6 +244,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
+ init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@@ -295,8 +306,10 @@ void __cpu_die(unsigned int cpu)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
- sizeof(cpu_start_id));
- if (cpu_start_id == -cpu) {
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index b9ad9feadc2d1a..a992cb6a47db39 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -87,7 +87,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
- disable_irq(evt->irq);
+ disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 42d441f7898b94..1edce040f470af 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -309,7 +309,7 @@ do_unaligned_user (struct pt_regs *regs)
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *) regs->excvaddr;
- force_sig_info(SIGSEGV, &info, current);
+ force_sig_info(SIGBUS, &info, current);
}
#endif
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index c417cbe4ec878b..bdfeda5a913c50 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -110,6 +110,7 @@ SECTIONS
.fixup : { *(.fixup) }
EXCEPTION_TABLE(16)
+ NOTES
/* Data section */
_sdata = .;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 46ba2402c8f974..987361113ecd8c 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -185,7 +185,8 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
}
wb_congested = wb_congested_get_create(&q->backing_dev_info,
- blkcg->css.id, GFP_NOWAIT);
+ blkcg->css.id,
+ GFP_NOWAIT | __GFP_NOWARN);
if (!wb_congested) {
ret = -ENOMEM;
goto err_put_css;
@@ -193,7 +194,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */
if (!new_blkg) {
- new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT);
+ new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) {
ret = -ENOMEM;
goto err_put_congested;
@@ -1022,7 +1023,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
}
spin_lock_init(&blkcg->lock);
- INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT);
+ INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN);
INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
@@ -1238,7 +1239,7 @@ pd_prealloc:
if (blkg->pd[pol->plid])
continue;
- pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node);
+ pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node);
if (!pd)
swap(pd, pd_prealloc);
if (!pd) {
diff --git a/block/blk-core.c b/block/blk-core.c
index f5f1a55703ae21..50d77c90070dac 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -651,21 +651,17 @@ EXPORT_SYMBOL(blk_alloc_queue);
int blk_queue_enter(struct request_queue *q, gfp_t gfp)
{
while (true) {
- int ret;
-
if (percpu_ref_tryget_live(&q->q_usage_counter))
return 0;
if (!gfpflags_allow_blocking(gfp))
return -EBUSY;
- ret = wait_event_interruptible(q->mq_freeze_wq,
- !atomic_read(&q->mq_freeze_depth) ||
- blk_queue_dying(q));
+ wait_event(q->mq_freeze_wq,
+ !atomic_read(&q->mq_freeze_depth) ||
+ blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
- if (ret)
- return ret;
}
}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e04a7b8492cf20..4e1f49434bbecd 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -2905,7 +2905,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* for devices that support queuing, otherwise we still have a problem
* with sync vs async workloads.
*/
- if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
+ if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
+ !cfqd->cfq_group_idle)
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -3810,7 +3811,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out;
}
- cfqq = kmem_cache_alloc_node(cfq_pool, GFP_NOWAIT | __GFP_ZERO,
+ cfqq = kmem_cache_alloc_node(cfq_pool,
+ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN,
cfqd->queue->node);
if (!cfqq) {
cfqq = &cfqd->oom_cfqq;
diff --git a/block/partitions/aix.c b/block/partitions/aix.c
index f3ed7b2d89bf5e..8e7d358e022636 100644
--- a/block/partitions/aix.c
+++ b/block/partitions/aix.c
@@ -177,7 +177,7 @@ int aix_partition(struct parsed_partitions *state)
u32 vgda_sector = 0;
u32 vgda_len = 0;
int numlvs = 0;
- struct pvd *pvd;
+ struct pvd *pvd = NULL;
struct lv_info {
unsigned short pps_per_lv;
unsigned short pps_found;
@@ -231,10 +231,11 @@ int aix_partition(struct parsed_partitions *state)
if (lvip[i].pps_per_lv)
foundlvs += 1;
}
+ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
+ pvd = alloc_pvd(state, vgda_sector + 17);
}
put_dev_sector(sect);
}
- pvd = alloc_pvd(state, vgda_sector + 17);
if (pvd) {
int numpps = be16_to_cpu(pvd->pp_count);
int psn_part1 = be32_to_cpu(pvd->psn_part1);
@@ -281,10 +282,14 @@ int aix_partition(struct parsed_partitions *state)
next_lp_ix += 1;
}
for (i = 0; i < state->limit; i += 1)
- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
+ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
+ char tmp[sizeof(n[i].name) + 1]; // null char
+
+ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
pr_warn("partition %s (%u pp's found) is "
"not contiguous\n",
- n[i].name, lvip[i].pps_found);
+ tmp, lvip[i].pps_found);
+ }
kfree(pvd);
}
kfree(n);
diff --git a/chromeos/config/arm64/chromiumos-arm64.flavour.config b/chromeos/config/arm64/chromiumos-arm64.flavour.config
index c0f18ba0257054..050eb24275d424 100644
--- a/chromeos/config/arm64/chromiumos-arm64.flavour.config
+++ b/chromeos/config/arm64/chromiumos-arm64.flavour.config
@@ -76,6 +76,7 @@ CONFIG_COMMON_CLK_XGENE=y
CONFIG_CPUFREQ_DT=y
CONFIG_CPUFREQ_DT_PLATDEV=y
# CONFIG_CPU_FREQ_GOV_SCHED is not set
+CONFIG_CROS_EC_THROTTLER=y
CONFIG_CRYPTO_DEV_ROCKCHIP=y
CONFIG_DEVPORT=y
# CONFIG_DGNC is not set
@@ -102,7 +103,8 @@ CONFIG_DRM_PANEL_BOE_TV097QXM_NU0=y
CONFIG_DRM_PANEL_INNOLUX_P079ZCA=y
CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04=y
CONFIG_DRM_PARADE_PS8640=y
-CONFIG_DRM_POWERVR_ROGUE_1_9=y
+CONFIG_DRM_POWERVR_ROGUE_1_10=y
+CONFIG_DRM_POWERVR_ROGUE_1_9=m
# CONFIG_DRM_POWERVR_ROGUE_DEBUG is not set
# CONFIG_DRM_POWERVR_ROGUE_PDUMP is not set
# CONFIG_DRM_QXL is not set
@@ -636,6 +638,7 @@ CONFIG_SPI_GPIO=y
CONFIG_SPI_MT65XX=y
# CONFIG_SPI_PXA2XX is not set
CONFIG_SPI_ROCKCHIP=y
+# CONFIG_STAGING_GASKET_FRAMEWORK is not set
# CONFIG_STMMAC_PCI is not set
# CONFIG_SUNDANCE is not set
# CONFIG_SUNGEM is not set
@@ -647,6 +650,8 @@ CONFIG_TCG_CR50_SPI=y
CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
# CONFIG_THERMAL_WRITABLE_TRIPS is not set
+CONFIG_THROTTLER=y
+# CONFIG_THROTTLER_DEBUG is not set
# CONFIG_THUNDERBOLT is not set
# CONFIG_THUNDER_NIC_BGX is not set
# CONFIG_THUNDER_NIC_PF is not set
@@ -673,7 +678,6 @@ CONFIG_V4L2_MEM2MEM_DEV=y
# CONFIG_V4L_PLATFORM_DRIVERS is not set
CONFIG_VGA_ARB=y
CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_VIDEOBUF2_VMALLOC=m
CONFIG_VIDEO_MEDIATEK_JPEG=y
CONFIG_VIDEO_MEDIATEK_MDP=y
CONFIG_VIDEO_MEDIATEK_VCODEC=y
diff --git a/chromeos/config/arm64/chromiumos-mediatek.flavour.config b/chromeos/config/arm64/chromiumos-mediatek.flavour.config
index 07cfaa9989150c..fded54622ec0ea 100644
--- a/chromeos/config/arm64/chromiumos-mediatek.flavour.config
+++ b/chromeos/config/arm64/chromiumos-mediatek.flavour.config
@@ -31,7 +31,8 @@ CONFIG_DRM_PANEL_BOE_TV097QXM_NU0=y
# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set
# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set
CONFIG_DRM_PARADE_PS8640=y
-CONFIG_DRM_POWERVR_ROGUE_1_9=y
+CONFIG_DRM_POWERVR_ROGUE_1_10=y
+CONFIG_DRM_POWERVR_ROGUE_1_9=m
# CONFIG_DRM_POWERVR_ROGUE_DEBUG is not set
# CONFIG_DRM_POWERVR_ROGUE_PDUMP is not set
# CONFIG_DWMAC_ROCKCHIP is not set
@@ -124,12 +125,12 @@ CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y
# CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE is not set
# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
# CONFIG_THERMAL_WRITABLE_TRIPS is not set
+# CONFIG_THROTTLER is not set
CONFIG_TOUCHSCREEN_MELFAS_MIP4=y
CONFIG_UCS2_STRING=y
CONFIG_USB_XHCI_MTK=y
CONFIG_V4L2_MEM2MEM_DEV=y
# CONFIG_V4L_PLATFORM_DRIVERS is not set
-CONFIG_VIDEOBUF2_VMALLOC=m
CONFIG_VIDEO_MEDIATEK_JPEG=y
CONFIG_VIDEO_MEDIATEK_MDP=y
CONFIG_VIDEO_MEDIATEK_VCODEC=y
diff --git a/chromeos/config/arm64/chromiumos-rockchip64.flavour.config b/chromeos/config/arm64/chromiumos-rockchip64.flavour.config
index cef002692b18c4..9d14fc666f705d 100644
--- a/chromeos/config/arm64/chromiumos-rockchip64.flavour.config
+++ b/chromeos/config/arm64/chromiumos-rockchip64.flavour.config
@@ -66,6 +66,7 @@ CONFIG_COMMON_CLK_RK808=y
CONFIG_CPUFREQ_DT=y
CONFIG_CPUFREQ_DT_PLATDEV=y
CONFIG_CPU_FREQ_GOV_SCHED=y
+CONFIG_CROS_EC_THROTTLER=y
CONFIG_CRYPTO_DEV_ROCKCHIP=y
CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI=y
CONFIG_DEVPORT=y
@@ -87,6 +88,7 @@ CONFIG_DRM_DW_MIPI_DSI=y
CONFIG_DRM_PANEL_INNOLUX_P079ZCA=y
CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04=y
# CONFIG_DRM_PARADE_PS8640 is not set
+# CONFIG_DRM_POWERVR_ROGUE_1_10 is not set
# CONFIG_DRM_POWERVR_ROGUE_1_9 is not set
# CONFIG_DRM_QXL is not set
# CONFIG_DRM_R128 is not set
@@ -204,18 +206,15 @@ CONFIG_I2C_RK3X=y
# CONFIG_LPC_SCH is not set
# CONFIG_MALI_2MB_ALLOC is not set
# CONFIG_MALI_CORESTACK is not set
-# CONFIG_MALI_DEBUG is not set
CONFIG_MALI_DEVFREQ=y
CONFIG_MALI_DMA_FENCE=y
CONFIG_MALI_EXPERT=y
# CONFIG_MALI_FENCE_DEBUG is not set
# CONFIG_MALI_GATOR_SUPPORT is not set
-# CONFIG_MALI_JOB_DUMPING is not set
+# CONFIG_MALI_JOB_DUMP is not set
CONFIG_MALI_MIDGARD=y
-# CONFIG_MALI_MIDGARD_ENABLE_TRACE is not set
CONFIG_MALI_PLATFORM_NAME="rk"
# CONFIG_MALI_PRFCNT_SET_SECONDARY is not set
-# CONFIG_MALI_TRACE_TIMELINE is not set
CONFIG_MEDIA_CONTROLLER=y
# CONFIG_MEDIA_PCI_SUPPORT is not set
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
@@ -520,6 +519,7 @@ CONFIG_SPI_BITBANG=m
# CONFIG_SPI_GPIO is not set
# CONFIG_SPI_PXA2XX is not set
CONFIG_SPI_ROCKCHIP=y
+# CONFIG_STAGING_GASKET_FRAMEWORK is not set
# CONFIG_STMMAC_PCI is not set
# CONFIG_SUNDANCE is not set
# CONFIG_SUNGEM is not set
@@ -531,6 +531,8 @@ CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR=y
# CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE is not set
CONFIG_THERMAL_GOV_FAIR_SHARE=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THROTTLER=y
+# CONFIG_THROTTLER_DEBUG is not set
# CONFIG_THUNDERBOLT is not set
# CONFIG_THUNDER_NIC_BGX is not set
# CONFIG_THUNDER_NIC_PF is not set
@@ -551,11 +553,10 @@ CONFIG_USB_EHCI_PCI=y
CONFIG_USB_OHCI_HCD_PCI=y
# CONFIG_USB_UHCI_HCD is not set
CONFIG_USB_XHCI_PCI=y
-CONFIG_V4L2_FWNODE=y
+CONFIG_V4L2_FWNODE=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_VGA_ARB=y
CONFIG_VGA_ARB_MAX_GPUS=16
-CONFIG_VIDEOBUF2_VMALLOC=y
# CONFIG_VIDEO_AD9389B is not set
# CONFIG_VIDEO_ADP1653 is not set
# CONFIG_VIDEO_ADV7170 is not set
@@ -567,6 +568,7 @@ CONFIG_VIDEOBUF2_VMALLOC=y
# CONFIG_VIDEO_ADV7511 is not set
# CONFIG_VIDEO_ADV7604 is not set
# CONFIG_VIDEO_ADV7842 is not set
+# CONFIG_VIDEO_AK7375 is not set
# CONFIG_VIDEO_AK881X is not set
# CONFIG_VIDEO_AS3645A is not set
# CONFIG_VIDEO_BT819 is not set
@@ -578,7 +580,10 @@ CONFIG_VIDEOBUF2_VMALLOC=y
# CONFIG_VIDEO_CX25840 is not set
# CONFIG_VIDEO_DW9714 is not set
# CONFIG_VIDEO_DW9807 is not set
+# CONFIG_VIDEO_IMX208 is not set
# CONFIG_VIDEO_IMX258 is not set
+# CONFIG_VIDEO_IMX319 is not set
+# CONFIG_VIDEO_IMX355 is not set
# CONFIG_VIDEO_KS0127 is not set
# CONFIG_VIDEO_LM3560 is not set
# CONFIG_VIDEO_LM3646 is not set
@@ -594,14 +599,14 @@ CONFIG_VIDEOBUF2_VMALLOC=y
# CONFIG_VIDEO_NOON010PC30 is not set
# CONFIG_VIDEO_OV13858 is not set
# CONFIG_VIDEO_OV2659 is not set
-CONFIG_VIDEO_OV2685=y
+CONFIG_VIDEO_OV2685=m
# CONFIG_VIDEO_OV5670 is not set
-CONFIG_VIDEO_OV5695=y
+CONFIG_VIDEO_OV5695=m
# CONFIG_VIDEO_OV7640 is not set
# CONFIG_VIDEO_OV7670 is not set
# CONFIG_VIDEO_OV9650 is not set
-CONFIG_VIDEO_ROCKCHIP_ISP1=y
-CONFIG_VIDEO_ROCKCHIP_ISP_DPHY_SY=y
+CONFIG_VIDEO_ROCKCHIP_ISP1=m
+CONFIG_VIDEO_ROCKCHIP_ISP_DPHY_SY=m
CONFIG_VIDEO_ROCKCHIP_VPU=y
# CONFIG_VIDEO_S5C73M3 is not set
# CONFIG_VIDEO_S5K4ECGX is not set
diff --git a/chromeos/config/arm64/common.config b/chromeos/config/arm64/common.config
index 104cca8a92433e..d228121c381a78 100644
--- a/chromeos/config/arm64/common.config
+++ b/chromeos/config/arm64/common.config
@@ -14,6 +14,7 @@ CONFIG_ARCH_DMA_ADDR_T_64BIT=y
# CONFIG_ARCH_EXYNOS7 is not set
CONFIG_ARCH_HAS_ALT_SYSCALL=y
CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y
CONFIG_ARCH_HAS_KCOV=y
CONFIG_ARCH_HAS_TICK_BROADCAST=y
@@ -36,14 +37,12 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
# CONFIG_ARCH_STRATIX10 is not set
# CONFIG_ARCH_TEGRA is not set
# CONFIG_ARCH_THUNDER is not set
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
# CONFIG_ARCH_VEXPRESS is not set
CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
CONFIG_ARCH_WANT_FRAME_POINTERS=y
CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
# CONFIG_ARCH_XGENE is not set
# CONFIG_ARCH_ZYNQMP is not set
-# CONFIG_ARC_EMAC is not set
CONFIG_ARM64=y
# CONFIG_ARM64_16K_PAGES is not set
CONFIG_ARM64_4K_PAGES=y
@@ -78,7 +77,6 @@ CONFIG_ARM_PSCI_FW=y
# CONFIG_ARM_SMMU is not set
# CONFIG_ARM_SP805_WATCHDOG is not set
# CONFIG_ARM_TIMER_SP804 is not set
-# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_ATH6KL is not set
# CONFIG_ATH9K is not set
# CONFIG_ATH_DEBUG is not set
@@ -96,9 +94,7 @@ CONFIG_BATTERY_SBS=y
CONFIG_BLOCK_COMPAT=y
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=1
-# CONFIG_BPF_JIT is not set
# CONFIG_BRCMFMAC is not set
-# CONFIG_BROADCOM_PHY is not set
# CONFIG_BT_ATH3K is not set
# CONFIG_BT_HCIBCM203X is not set
# CONFIG_BT_HCIBPA10X is not set
@@ -114,6 +110,7 @@ CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_LL=y
# CONFIG_BT_HCIUART_MRVL is not set
# CONFIG_BT_HCIUART_QCA is not set
+# CONFIG_BT_HCI_LE_SPLITTER is not set
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
# CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE is not set
@@ -125,28 +122,18 @@ CONFIG_CHARGER_GPIO=y
# CONFIG_CHARGER_MANAGER is not set
# CONFIG_CHARGER_TPS65090 is not set
CONFIG_CHROMEOS_OF_FIRMWARE=y
-CONFIG_CLKDEV_LOOKUP=y
CONFIG_CLKSRC_OF=y
CONFIG_CLKSRC_PROBE=y
# CONFIG_CLK_QORIQ is not set
# CONFIG_CLOCK_THERMAL is not set
CONFIG_CLONE_BACKWARDS=y
CONFIG_CMDLINE=""
-CONFIG_COMMON_CLK=y
-# CONFIG_COMMON_CLK_CDCE706 is not set
-# CONFIG_COMMON_CLK_CDCE925 is not set
# CONFIG_COMMON_CLK_PWM is not set
-# CONFIG_COMMON_CLK_PXA is not set
-# CONFIG_COMMON_CLK_SI514 is not set
-# CONFIG_COMMON_CLK_SI5351 is not set
-# CONFIG_COMMON_CLK_SI570 is not set
# CONFIG_COMMON_CLK_VERSATILE is not set
-# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
CONFIG_COMPAT=y
CONFIG_COMPAT_BINFMT_ELF=y
CONFIG_COMPAT_NETLINK_MESSAGES=y
CONFIG_COMPAT_OLD_SIGACTION=y
-# CONFIG_CORDIC is not set
# CONFIG_CORESIGHT is not set
CONFIG_CP15_BARRIER_EMULATION=y
# CONFIG_CPU_BIG_ENDIAN is not set
@@ -159,11 +146,7 @@ CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_PM=y
CONFIG_CPU_THERMAL=y
-# CONFIG_CRC8 is not set
CONFIG_CRC_T10DIF=m
-CONFIG_CROS_EC_CHARDEV=y
-CONFIG_CROS_EC_PROTO=y
-# CONFIG_CROS_EC_SYSFS_USB is not set
CONFIG_CRYPTO_ABLK_HELPER=y
CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
@@ -177,29 +160,17 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_DEBUG_ALIGN_RODATA=y
-# CONFIG_DEBUG_PINCTRL is not set
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
CONFIG_DEVFREQ_THERMAL=y
-CONFIG_DEV_COREDUMP=y
-CONFIG_DMADEVICES=y
-# CONFIG_DMADEVICES_DEBUG is not set
-# CONFIG_DMATEST is not set
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
# CONFIG_DRM_AMD_ACP is not set
CONFIG_DRM_DMA_SYNC=y
# CONFIG_DRM_HDLCD is not set
CONFIG_DRM_MIPI_DSI=y
-# CONFIG_DRM_NXP_PTN3460 is not set
-# CONFIG_DRM_PANEL_LG_LG4573 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set
CONFIG_DRM_PANEL_SIMPLE=y
-# CONFIG_DRM_PARADE_PS8622 is not set
CONFIG_DTC=y
CONFIG_DT_IDLE_STATES=y
CONFIG_DUMMY_CONSOLE_COLUMNS=80
@@ -223,20 +194,17 @@ CONFIG_EXTCON_CROS_EC=y
# CONFIG_EXTCON_RT8973A is not set
# CONFIG_EXTCON_SM5502 is not set
# CONFIG_EXTCON_USB_GPIO is not set
-# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
# CONFIG_FB_ARMCLCD is not set
# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_SSD1307 is not set
# CONFIG_FIQ_DEBUGGER is not set
# CONFIG_FIRMWARE_MEMMAP is not set
CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_FORTIFY_SOURCE=y
CONFIG_FRAME_WARN=2048
-# CONFIG_FSL_EDMA is not set
# CONFIG_FSL_MC_BUS is not set
# CONFIG_FS_DAX is not set
# CONFIG_FTL is not set
# CONFIG_FTRACE_SYSCALLS is not set
-CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
CONFIG_GENERIC_CPU_AUTOPROBE=y
CONFIG_GENERIC_CSUM=y
@@ -245,39 +213,26 @@ CONFIG_GENERIC_IDLE_POLL_SETUP=y
CONFIG_GENERIC_IRQ_MIGRATION=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
CONFIG_GENERIC_SCHED_CLOCK=y
CONFIG_GOOGLE_COREBOOT_TABLE_OF=y
-# CONFIG_GPIO_74X164 is not set
-# CONFIG_GPIO_74XX_MMIO is not set
-# CONFIG_GPIO_ADNP is not set
-# CONFIG_GPIO_ALTERA is not set
-# CONFIG_GPIO_GRGPIO is not set
# CONFIG_GPIO_PL061 is not set
# CONFIG_GPIO_SYSCON is not set
# CONFIG_GPIO_TPS6586X is not set
-# CONFIG_GPIO_WATCHDOG is not set
# CONFIG_GPIO_XGENE is not set
-# CONFIG_GPIO_XILINX is not set
CONFIG_GPU_TRACEPOINTS=y
CONFIG_HANDLE_DOMAIN_IRQ=y
CONFIG_HARDEN_BRANCH_PREDICTOR=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU=y
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
-# CONFIG_HAVE_AOUT is not set
CONFIG_HAVE_ARCH_BITREVERSE=y
CONFIG_HAVE_ARCH_KASAN=y
CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
CONFIG_HAVE_ARCH_PFN_VALID=y
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
CONFIG_HAVE_ARM_SMCCC=y
-CONFIG_HAVE_BPF_JIT=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_CLK_PREPARE=y
CONFIG_HAVE_CMPXCHG_DOUBLE=y
CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
CONFIG_HAVE_DEBUG_BUGVERBOSE=y
CONFIG_HAVE_EBPF_JIT=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
@@ -285,7 +240,6 @@ CONFIG_HAVE_GENERIC_RCU_GUP=y
CONFIG_HAVE_MEMORY_PRESENT=y
CONFIG_HAVE_PATA_PLATFORM=y
CONFIG_HAVE_RCU_TABLE_FREE=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
CONFIG_HDMI_NOTIFIERS=y
# CONFIG_HID_GOOGLE_HAMMER is not set
# CONFIG_HID_PID is not set
@@ -298,18 +252,12 @@ CONFIG_HID_RMI=m
# CONFIG_HUGETLBFS is not set
# CONFIG_HVC_DCC is not set
CONFIG_HW_PERF_EVENTS=y
-# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
# CONFIG_I2C_CADENCE is not set
CONFIG_I2C_CROS_EC_TUNNEL=y
# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
-# CONFIG_I2C_EMEV2 is not set
-CONFIG_I2C_MUX=y
# CONFIG_I2C_MUX_PINCTRL is not set
# CONFIG_I2C_NOMADIK is not set
CONFIG_IIO=y
-CONFIG_IIO_BUFFER=y
-# CONFIG_IIO_BUFFER_CB is not set
-CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
CONFIG_IIO_CROS_EC=y
# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set
# CONFIG_IIO_CROS_EC_ACTIVITY is not set
@@ -319,16 +267,12 @@ CONFIG_IIO_CROS_EC_SENSORS=y
CONFIG_IIO_CROS_EC_SENSORS_CORE=y
CONFIG_IIO_CROS_EC_SENSORS_RING=y
CONFIG_IIO_CROS_EC_SENSORS_SYNC=y
-# CONFIG_IIO_INTERRUPT_TRIGGER is not set
CONFIG_IIO_KFIFO_BUF=y
-# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
CONFIG_IIO_SYSFS_TRIGGER=y
-CONFIG_IIO_TRIGGER=y
CONFIG_IIO_TRIGGERED_BUFFER=y
CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
# CONFIG_IMX_THERMAL is not set
# CONFIG_INFTL is not set
-CONFIG_INPUT_MATRIXKMAP=y
# CONFIG_INPUT_POLLDEV is not set
# CONFIG_INPUT_PWM_BEEPER is not set
# CONFIG_INPUT_REGULATOR_HAPTIC is not set
@@ -338,51 +282,30 @@ CONFIG_IOMMU_API=y
CONFIG_IOMMU_DMA=y
CONFIG_IOMMU_IOVA=y
# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
-CONFIG_IRQCHIP=y
# CONFIG_JFFS2_FS is not set
# CONFIG_KASAN is not set
# CONFIG_KCOV is not set
CONFIG_KERNEL_MODE_NEON=y
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_BCM is not set
-# CONFIG_KEYBOARD_CAP11XX is not set
-CONFIG_KEYBOARD_CROS_EC=y
# CONFIG_KEYBOARD_GPIO_POLLED is not set
-# CONFIG_KEYBOARD_OMAP4 is not set
-# CONFIG_KEYBOARD_SAMSUNG is not set
CONFIG_KEYS_COMPAT=y
-# CONFIG_KS8842 is not set
-# CONFIG_LEDS_BCM6328 is not set
-# CONFIG_LEDS_BCM6358 is not set
# CONFIG_LEDS_GPIO is not set
# CONFIG_LEDS_PWM is not set
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_SYSCON is not set
-# CONFIG_LIBERTAS is not set
# CONFIG_LIBERTAS_THINFIRM_USB is not set
CONFIG_LIBFDT=y
# CONFIG_LIBNVDIMM is not set
CONFIG_MAILBOX=y
# CONFIG_MAILBOX_TEST is not set
-# CONFIG_MAX5821 is not set
-# CONFIG_MDIO_BUS_MUX_GPIO is not set
-# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
# CONFIG_MDIO_OCTEON is not set
+# CONFIG_MEDIA_CEC_SUPPORT is not set
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEMORY=y
CONFIG_MFD_AS3722=y
-# CONFIG_MFD_ATMEL_FLEXCOM is not set
-# CONFIG_MFD_ATMEL_HLCDC is not set
-CONFIG_MFD_CROS_EC=y
# CONFIG_MFD_CROS_EC_I2C is not set
-CONFIG_MFD_CROS_EC_PD_UPDATE=y
-CONFIG_MFD_CROS_EC_SPI=y
-# CONFIG_MFD_HI6421_PMIC is not set
-# CONFIG_MFD_MAX77686 is not set
CONFIG_MFD_MT6397=y
-# CONFIG_MFD_STMPE is not set
CONFIG_MFD_SYSCON=y
-# CONFIG_MFD_TC3589X is not set
CONFIG_MFD_TPS65090=y
CONFIG_MFD_TPS6586X=y
# CONFIG_MMC_ARMMMCI is not set
@@ -395,9 +318,6 @@ CONFIG_MODULES_USE_ELF_RELA=y
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_CENTROIDING is not set
-CONFIG_MOUSE_ELAN_I2C=y
-CONFIG_MOUSE_ELAN_I2C_I2C=y
-# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SYNAPTICS_USB is not set
CONFIG_MTD=y
@@ -443,45 +363,28 @@ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
# CONFIG_MTD_SWAP is not set
# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_UBI is not set
-CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
-# CONFIG_MWIFIEX_USB is not set
CONFIG_NET_VENDOR_AMD=y
CONFIG_NET_VENDOR_HISILICON=y
# CONFIG_NFTL is not set
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-# CONFIG_NO_HZ_FULL is not set
CONFIG_NR_CPUS=8
CONFIG_NVMEM=y
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
CONFIG_OF_EARLY_FLATTREE=y
CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
CONFIG_OF_IOMMU=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_MDIO=y
CONFIG_OF_MTD=y
-CONFIG_OF_NET=y
-# CONFIG_OF_OVERLAY is not set
CONFIG_OF_RESERVED_MEM=y
-# CONFIG_OF_UNITTEST is not set
CONFIG_PARTITION_PERCPU=y
CONFIG_PERF_USE_VMALLOC=y
CONFIG_PGTABLE_LEVELS=3
CONFIG_PHYS_ADDR_T_64BIT=y
# CONFIG_PHY_XGENE is not set
# CONFIG_PID_IN_CONTEXTIDR is not set
-CONFIG_PINCONF=y
-CONFIG_PINCTRL=y
# CONFIG_PINCTRL_AMD is not set
# CONFIG_PINCTRL_AS3722 is not set
-# CONFIG_PINCTRL_SINGLE is not set
CONFIG_PINMUX=y
# CONFIG_PL320_MBOX is not set
CONFIG_PL330_DMA=y
-CONFIG_PM_CLK=y
CONFIG_PM_DEVFREQ=y
CONFIG_PM_GENERIC_DOMAINS=y
CONFIG_PM_GENERIC_DOMAINS_OF=y
@@ -505,13 +408,9 @@ CONFIG_PWM_CROS_EC=y
# CONFIG_PWM_FSL_FTM is not set
# CONFIG_PWM_PCA9685 is not set
CONFIG_PWM_SYSFS=y
-# CONFIG_QCA7000 is not set
CONFIG_RAS=y
-CONFIG_RATIONAL=y
-CONFIG_REGMAP_I2C=y
CONFIG_REGMAP_IRQ=y
CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_SPI=y
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_ACT8865 is not set
# CONFIG_REGULATOR_AD5398 is not set
@@ -546,24 +445,21 @@ CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
CONFIG_RESET_CONTROLLER=y
# CONFIG_RFD_FTL is not set
# CONFIG_RFKILL_REGULATOR is not set
-# CONFIG_RT2500USB is not set
-# CONFIG_RT73USB is not set
CONFIG_RTC_DRV_CROS_EC=y
-# CONFIG_RTC_DRV_HYM8563 is not set
# CONFIG_RTC_DRV_PL030 is not set
# CONFIG_RTC_DRV_PL031 is not set
-# CONFIG_RTC_DRV_SNVS is not set
-# CONFIG_RTC_DRV_ZYNQMP is not set
CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
# CONFIG_RTL8187 is not set
# CONFIG_RTL8192CU is not set
# CONFIG_RTLLIB is not set
-# CONFIG_SCA3000 is not set
CONFIG_SCHED_MC=y
# CONFIG_SCHED_SMT is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set
+CONFIG_SECURITY_CHROMIUMOS=y
+CONFIG_SECURITY_CHROMIUMOS_NO_SYMLINK_MOUNT=y
+CONFIG_SECURITY_CHROMIUMOS_NO_UNPRIVILEGED_UNSAFE_MOUNTS=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_SENSORS_LIS3_SPI is not set
# CONFIG_SENSORS_PWM_FAN is not set
@@ -577,122 +473,45 @@ CONFIG_SERIAL_8250_NR_UARTS=4
# CONFIG_SERIAL_8250_RT288X is not set
CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_AMBA_PL010 is not set
-# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_EARLYCON=y
# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set
CONFIG_SERIAL_OF_PLATFORM=y
-# CONFIG_SERIAL_XILINX_PS_UART is not set
# CONFIG_SERIO is not set
CONFIG_SETEND_EMULATION=y
# CONFIG_SGETMASK_SYSCALL is not set
# CONFIG_SMC91X is not set
# CONFIG_SM_FTL is not set
-# CONFIG_SND_ATMEL_SOC is not set
# CONFIG_SND_DESIGNWARE_I2S is not set
-CONFIG_SND_PCM=y
CONFIG_SND_PCM_ELD=y
CONFIG_SND_PCM_IEC958=y
CONFIG_SND_SIMPLE_CARD=y
-CONFIG_SND_SOC=y
-# CONFIG_SND_SOC_AC97_CODEC is not set
-# CONFIG_SND_SOC_ADAU1701 is not set
-# CONFIG_SND_SOC_AK4104 is not set
-# CONFIG_SND_SOC_AK4554 is not set
-# CONFIG_SND_SOC_AK4613 is not set
-# CONFIG_SND_SOC_AK4642 is not set
-# CONFIG_SND_SOC_AK5386 is not set
-# CONFIG_SND_SOC_ALC5623 is not set
# CONFIG_SND_SOC_AMD_ACP is not set
-# CONFIG_SND_SOC_CS35L32 is not set
-# CONFIG_SND_SOC_CS4265 is not set
-# CONFIG_SND_SOC_CS4270 is not set
-# CONFIG_SND_SOC_CS4271_I2C is not set
-# CONFIG_SND_SOC_CS4271_SPI is not set
-# CONFIG_SND_SOC_CS42L51_I2C is not set
-# CONFIG_SND_SOC_CS42L52 is not set
-# CONFIG_SND_SOC_CS42L56 is not set
-# CONFIG_SND_SOC_CS42L73 is not set
-# CONFIG_SND_SOC_CS42XX8_I2C is not set
-# CONFIG_SND_SOC_CS4349 is not set
-# CONFIG_SND_SOC_ES8328 is not set
-# CONFIG_SND_SOC_FSL_ASRC is not set
-# CONFIG_SND_SOC_FSL_ESAI is not set
-# CONFIG_SND_SOC_FSL_SAI is not set
-# CONFIG_SND_SOC_FSL_SPDIF is not set
-# CONFIG_SND_SOC_FSL_SSI is not set
-# CONFIG_SND_SOC_GTM601 is not set
CONFIG_SND_SOC_HDMI_CODEC=y
-CONFIG_SND_SOC_I2C_AND_SPI=y
-# CONFIG_SND_SOC_IMX_AUDMUX is not set
-# CONFIG_SND_SOC_INNO_RK3036 is not set
+# CONFIG_SND_SOC_MAX98373 is not set
# CONFIG_SND_SOC_MAX98927 is not set
-# CONFIG_SND_SOC_PCM1681 is not set
-# CONFIG_SND_SOC_PCM1792A is not set
-# CONFIG_SND_SOC_PCM512x_I2C is not set
-# CONFIG_SND_SOC_PCM512x_SPI is not set
CONFIG_SND_SOC_RL6231=y
CONFIG_SND_SOC_RT5514=y
-# CONFIG_SND_SOC_RT5631 is not set
CONFIG_SND_SOC_RT5645=y
-# CONFIG_SND_SOC_SGTL5000 is not set
-# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set
-# CONFIG_SND_SOC_SPDIF is not set
-# CONFIG_SND_SOC_SSM2602_I2C is not set
-# CONFIG_SND_SOC_SSM2602_SPI is not set
CONFIG_SND_SOC_SSM4567=m
-# CONFIG_SND_SOC_STA32X is not set
-# CONFIG_SND_SOC_STA350 is not set
-# CONFIG_SND_SOC_STI_SAS is not set
-# CONFIG_SND_SOC_TAS2552 is not set
-# CONFIG_SND_SOC_TAS5086 is not set
-# CONFIG_SND_SOC_TAS571X is not set
-# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
-# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
-# CONFIG_SND_SOC_TLV320AIC31XX is not set
-# CONFIG_SND_SOC_TLV320AIC3X is not set
-# CONFIG_SND_SOC_TPA6130A2 is not set
CONFIG_SND_SOC_TS3A227E=y
-# CONFIG_SND_SOC_WM8510 is not set
-# CONFIG_SND_SOC_WM8523 is not set
-# CONFIG_SND_SOC_WM8580 is not set
-# CONFIG_SND_SOC_WM8711 is not set
-# CONFIG_SND_SOC_WM8728 is not set
-# CONFIG_SND_SOC_WM8731 is not set
-# CONFIG_SND_SOC_WM8737 is not set
-# CONFIG_SND_SOC_WM8741 is not set
-# CONFIG_SND_SOC_WM8750 is not set
# CONFIG_SND_SOC_WM8753 is not set
-# CONFIG_SND_SOC_WM8770 is not set
-# CONFIG_SND_SOC_WM8776 is not set
-# CONFIG_SND_SOC_WM8804_I2C is not set
-# CONFIG_SND_SOC_WM8804_SPI is not set
# CONFIG_SND_SOC_WM8903 is not set
-# CONFIG_SND_SOC_WM8962 is not set
-# CONFIG_SND_SOC_WM8978 is not set
-# CONFIG_SND_SOC_XTFPGA_I2S is not set
-# CONFIG_SND_SUN4I_CODEC is not set
-CONFIG_SND_TIMER=y
CONFIG_SPARSEMEM=y
CONFIG_SPARSEMEM_EXTREME=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM_VMEMMAP=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
-# CONFIG_SPI_FSL_SPI is not set
# CONFIG_SPI_PL022 is not set
# CONFIG_SPI_PXA2XX_PCI is not set
# CONFIG_SSB is not set
# CONFIG_SSFDC is not set
-# CONFIG_STAGING_BOARD is not set
-# CONFIG_STK8BA50 is not set
CONFIG_STMMAC_ETH=m
CONFIG_STMMAC_PLATFORM=m
CONFIG_SWP_EMULATION=y
-# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set
CONFIG_SYSCTL_EXCEPTION_TRACE=y
-# CONFIG_SYSTEMPORT is not set
CONFIG_SYSVIPC_COMPAT=y
CONFIG_SYS_SUPPORTS_HUGETLBFS=y
CONFIG_TCG_CR50=y
@@ -702,40 +521,10 @@ CONFIG_TCG_TIS_SPI=y
# CONFIG_TEGRA_AHB is not set
# CONFIG_THERMAL_GOV_BANG_BANG is not set
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
-CONFIG_THERMAL_OF=y
-# CONFIG_TOUCHSCREEN_AR1021_I2C is not set
-# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set
-# CONFIG_TOUCHSCREEN_EGALAX is not set
-CONFIG_TOUCHSCREEN_ELAN=y
-# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set
+CONFIG_THREAD_INFO_IN_TASK=y
# CONFIG_TOUCHSCREEN_RM_TS is not set
# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set
# CONFIG_TRANSPARENT_HUGEPAGE is not set
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_BDC_UDC is not set
-# CONFIG_USB_CDC_COMPOSITE is not set
-CONFIG_USB_CONFIGFS=m
-# CONFIG_USB_CONFIGFS_ACM is not set
-# CONFIG_USB_CONFIGFS_ECM is not set
-# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set
-# CONFIG_USB_CONFIGFS_EEM is not set
-# CONFIG_USB_CONFIGFS_F_ACC is not set
-CONFIG_USB_CONFIGFS_F_FS=y
-# CONFIG_USB_CONFIGFS_F_HID is not set
-# CONFIG_USB_CONFIGFS_F_LB_SS is not set
-# CONFIG_USB_CONFIGFS_F_MIDI is not set
-# CONFIG_USB_CONFIGFS_F_MTP is not set
-# CONFIG_USB_CONFIGFS_F_PRINTER is not set
-# CONFIG_USB_CONFIGFS_F_UAC1 is not set
-# CONFIG_USB_CONFIGFS_F_UAC2 is not set
-# CONFIG_USB_CONFIGFS_F_UVC is not set
-# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set
-# CONFIG_USB_CONFIGFS_NCM is not set
-# CONFIG_USB_CONFIGFS_OBEX is not set
-# CONFIG_USB_CONFIGFS_RNDIS is not set
-# CONFIG_USB_CONFIGFS_SERIAL is not set
-CONFIG_USB_CONFIGFS_UEVENT=y
-# CONFIG_USB_DUMMY_HCD is not set
# CONFIG_USB_DWC2 is not set
CONFIG_USB_DWC3=y
CONFIG_USB_DWC3_DUAL_ROLE=y
@@ -743,48 +532,17 @@ CONFIG_USB_DWC3_DUAL_ROLE=y
# CONFIG_USB_DWC3_HOST is not set
CONFIG_USB_DWC3_OF_SIMPLE=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
-# CONFIG_USB_ETH is not set
-# CONFIG_USB_FOTG210_UDC is not set
-# CONFIG_USB_FUNCTIONFS is not set
-CONFIG_USB_F_FS=m
CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGETFS is not set
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_USB_GADGET_VBUS_DRAW=2
-# CONFIG_USB_GADGET_XILINX is not set
-# CONFIG_USB_GR_UDC is not set
-# CONFIG_USB_G_ACM_MS is not set
-# CONFIG_USB_G_DBGP is not set
-# CONFIG_USB_G_HID is not set
-# CONFIG_USB_G_MULTI is not set
-# CONFIG_USB_G_NCM is not set
-# CONFIG_USB_G_PRINTER is not set
-# CONFIG_USB_G_SERIAL is not set
-# CONFIG_USB_G_WEBCAM is not set
-CONFIG_USB_LIBCOMPOSITE=m
-# CONFIG_USB_M66592 is not set
-# CONFIG_USB_MASS_STORAGE is not set
-# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_USB_MV_U3D is not set
-# CONFIG_USB_MV_UDC is not set
-# CONFIG_USB_NET2272 is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
# CONFIG_USB_PHY is not set
-# CONFIG_USB_PXA27X is not set
-# CONFIG_USB_R8A66597 is not set
# CONFIG_USB_STORAGE_REALTEK is not set
CONFIG_USB_ULPI=y
CONFIG_USB_ULPI_VIEWPORT=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
-# CONFIG_USB_ZERO is not set
CONFIG_V4L_MEM2MEM_DRIVERS=y
# CONFIG_VEXPRESS_CONFIG is not set
-# CONFIG_VF610_ADC is not set
# CONFIG_VFIO is not set
CONFIG_VIDEOBUF2_CORE=y
CONFIG_VIDEOBUF2_DMA_CONTIG=y
@@ -795,14 +553,11 @@ CONFIG_VIDEO_DEV=y
# CONFIG_VIDEO_SH_VEU is not set
CONFIG_VIDEO_V4L2=y
# CONFIG_VIRTUALIZATION is not set
-# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
# CONFIG_WAKELOCK is not set
-CONFIG_WANT_DEV_COREDUMP=y
# CONFIG_XEN is not set
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_BCJ is not set
# CONFIG_XZ_DEC_X86 is not set
-# CONFIG_ZD1211RW is not set
CONFIG_ZONE_DMA=y
CONFIG_ZONE_DMA_FLAG=1
diff --git a/chromeos/config/armel/chromiumos-arm.flavour.config b/chromeos/config/armel/chromiumos-arm.flavour.config
index c3e651b7cf30c7..fb6147edbfe5f4 100644
--- a/chromeos/config/armel/chromiumos-arm.flavour.config
+++ b/chromeos/config/armel/chromiumos-arm.flavour.config
@@ -59,7 +59,6 @@ CONFIG_HID_RMI=m
# CONFIG_HUGETLBFS is not set
CONFIG_I2C_MUX_PINCTRL=y
CONFIG_I2C_TEGRA=y
-# CONFIG_ION_TEGRA is not set
CONFIG_KEYBOARD_NVEC=y
CONFIG_KEYBOARD_TEGRA=y
# CONFIG_LIBNVDIMM is not set
diff --git a/chromeos/config/armel/chromiumos-rockchip.flavour.config b/chromeos/config/armel/chromiumos-rockchip.flavour.config
index 716ddaef75cdd4..583735d14e9ca5 100644
--- a/chromeos/config/armel/chromiumos-rockchip.flavour.config
+++ b/chromeos/config/armel/chromiumos-rockchip.flavour.config
@@ -23,18 +23,15 @@ CONFIG_HID_RMI=y
# CONFIG_MAILBOX is not set
# CONFIG_MALI_2MB_ALLOC is not set
# CONFIG_MALI_CORESTACK is not set
-# CONFIG_MALI_DEBUG is not set
CONFIG_MALI_DEVFREQ=y
CONFIG_MALI_DMA_FENCE=y
CONFIG_MALI_EXPERT=y
# CONFIG_MALI_FENCE_DEBUG is not set
# CONFIG_MALI_GATOR_SUPPORT is not set
-# CONFIG_MALI_JOB_DUMPING is not set
+# CONFIG_MALI_JOB_DUMP is not set
CONFIG_MALI_MIDGARD=y
-# CONFIG_MALI_MIDGARD_ENABLE_TRACE is not set
CONFIG_MALI_PLATFORM_NAME="rk"
# CONFIG_MALI_PRFCNT_SET_SECONDARY is not set
-# CONFIG_MALI_TRACE_TIMELINE is not set
CONFIG_MEDIA_SUPPORT=y
# CONFIG_MFD_AS3722 is not set
# CONFIG_MFD_CROS_EC_I2C is not set
diff --git a/chromeos/config/armel/common.config b/chromeos/config/armel/common.config
index 47c28ab7a99971..4517c6d76cd7a5 100644
--- a/chromeos/config/armel/common.config
+++ b/chromeos/config/armel/common.config
@@ -76,7 +76,6 @@ CONFIG_ARCH_SUPPORTS_UPROBES=y
# CONFIG_ARCH_U8500 is not set
# CONFIG_ARCH_UNIPHIER is not set
CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
# CONFIG_ARCH_VERSATILE is not set
# CONFIG_ARCH_VEXPRESS is not set
# CONFIG_ARCH_VIRT is not set
@@ -85,7 +84,6 @@ CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
# CONFIG_ARCH_WM8850 is not set
# CONFIG_ARCH_ZX is not set
# CONFIG_ARCH_ZYNQ is not set
-# CONFIG_ARC_EMAC is not set
CONFIG_ARM=y
CONFIG_ARM_AMBA=y
# CONFIG_ARM_APPENDED_DTB is not set
@@ -129,7 +127,6 @@ CONFIG_ARM_THUMBEE=y
# CONFIG_ARM_TIMER_SP804 is not set
CONFIG_ARM_UNWIND=y
CONFIG_ARM_VIRT_EXT=y
-# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_ATA is not set
CONFIG_ATAGS=y
# CONFIG_ATH9K is not set
@@ -150,7 +147,6 @@ CONFIG_BCMA=m
# CONFIG_BCMA_HOST_SOC is not set
# CONFIG_BIG_LITTLE is not set
# CONFIG_BLK_DEV_INTEGRITY is not set
-# CONFIG_BPF_JIT is not set
# CONFIG_BRCMDBG is not set
CONFIG_BRCMFMAC=m
CONFIG_BRCMFMAC_PROTO_BCDC=y
@@ -159,7 +155,6 @@ CONFIG_BRCMFMAC_SDIO=y
# CONFIG_BRCMSTB_GISB_ARB is not set
CONFIG_BRCMUTIL=m
# CONFIG_BRCM_TRACING is not set
-# CONFIG_BROADCOM_PHY is not set
# CONFIG_BT_ATH3K is not set
# CONFIG_BT_HCIBCM203X is not set
# CONFIG_BT_HCIBPA10X is not set
@@ -175,6 +170,7 @@ CONFIG_BT_HCIUART_H4=y
CONFIG_BT_HCIUART_LL=y
# CONFIG_BT_HCIUART_MRVL is not set
# CONFIG_BT_HCIUART_QCA is not set
+# CONFIG_BT_HCI_LE_SPLITTER is not set
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
# CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE is not set
@@ -185,7 +181,6 @@ CONFIG_CHARGER_BQ24735=y
CONFIG_CHARGER_GPIO=y
# CONFIG_CHARGER_MANAGER is not set
CONFIG_CHROMEOS_OF_FIRMWARE=y
-CONFIG_CLKDEV_LOOKUP=y
CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK=y
CONFIG_CLKSRC_OF=y
CONFIG_CLKSRC_PROBE=y
@@ -193,17 +188,8 @@ CONFIG_CLKSRC_PROBE=y
# CONFIG_CLOCK_THERMAL is not set
CONFIG_CLONE_BACKWARDS=y
CONFIG_CMDLINE=""
-CONFIG_COMMON_CLK=y
-# CONFIG_COMMON_CLK_CDCE706 is not set
-# CONFIG_COMMON_CLK_CDCE925 is not set
# CONFIG_COMMON_CLK_PWM is not set
-# CONFIG_COMMON_CLK_PXA is not set
CONFIG_COMMON_CLK_RK808=y
-# CONFIG_COMMON_CLK_SI514 is not set
-# CONFIG_COMMON_CLK_SI5351 is not set
-# CONFIG_COMMON_CLK_SI570 is not set
-# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
-# CONFIG_CORDIC is not set
# CONFIG_CORESIGHT is not set
CONFIG_CPUFREQ_DT=y
CONFIG_CPUFREQ_DT_PLATDEV=y
@@ -232,11 +218,7 @@ CONFIG_CPU_THERMAL=y
CONFIG_CPU_TLB_V7=y
CONFIG_CPU_V7=y
# CONFIG_CRASH_DUMP is not set
-# CONFIG_CRC8 is not set
CONFIG_CRC_T10DIF=m
-CONFIG_CROS_EC_CHARDEV=y
-CONFIG_CROS_EC_PROTO=y
-# CONFIG_CROS_EC_SYSFS_USB is not set
CONFIG_CRYPTO_ABLK_HELPER=y
CONFIG_CRYPTO_AES_ARM=y
CONFIG_CRYPTO_AES_ARM_BS=y
@@ -256,7 +238,6 @@ CONFIG_CRYPTO_SHA512_ARM=y
# CONFIG_DEBUG_HIGHMEM is not set
# CONFIG_DEBUG_LL is not set
CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S"
-# CONFIG_DEBUG_PINCTRL is not set
# CONFIG_DEBUG_UART_8250 is not set
# CONFIG_DEBUG_USER is not set
# CONFIG_DEPRECATED_PARAM_STRUCT is not set
@@ -265,13 +246,7 @@ CONFIG_DEVFREQ_GOV_POWERSAVE=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_USERSPACE=y
# CONFIG_DEVFREQ_THERMAL is not set
-CONFIG_DEV_COREDUMP=y
# CONFIG_DM9000 is not set
-CONFIG_DMADEVICES=y
-# CONFIG_DMADEVICES_DEBUG is not set
-# CONFIG_DMATEST is not set
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
# CONFIG_DRM_AMD_ACP is not set
# CONFIG_DRM_ANALOGIX_ANX7688 is not set
# CONFIG_DRM_ANALOGIX_ANX78XX is not set
@@ -284,16 +259,11 @@ CONFIG_DRM_DW_MIPI_DSI=y
# CONFIG_DRM_GENERIC_GPIO_MUX is not set
# CONFIG_DRM_HDLCD is not set
CONFIG_DRM_MIPI_DSI=y
-# CONFIG_DRM_NXP_PTN3460 is not set
# CONFIG_DRM_PANEL_BOE_TV097QXM_NU0 is not set
# CONFIG_DRM_PANEL_INNOLUX_P079ZCA is not set
# CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04 is not set
-# CONFIG_DRM_PANEL_LG_LG4573 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
# CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set
CONFIG_DRM_PANEL_SIMPLE=y
-# CONFIG_DRM_PARADE_PS8622 is not set
# CONFIG_DRM_PARADE_PS8640 is not set
CONFIG_DRM_ROCKCHIP=y
# CONFIG_DRM_STI is not set
@@ -316,10 +286,8 @@ CONFIG_EDAC_ATOMIC_SCRUB=y
# CONFIG_EMAC_ROCKCHIP is not set
CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_EXTCON is not set
-# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
# CONFIG_FB_ARMCLCD is not set
# CONFIG_FB_BACKLIGHT is not set
-# CONFIG_FB_SSD1307 is not set
# CONFIG_FIQ_DEBUGGER is not set
# CONFIG_FIRMWARE_MEMMAP is not set
CONFIG_FLATMEM=y
@@ -327,45 +295,31 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_FORCE_MAX_ZONEORDER=11
# CONFIG_FPC1020 is not set
CONFIG_FRAME_WARN=1024
-# CONFIG_FSL_EDMA is not set
# CONFIG_FTGMAC100 is not set
# CONFIG_FTL is not set
# CONFIG_FTMAC100 is not set
# CONFIG_FTRACE_SYSCALLS is not set
-CONFIG_FUNCTION_GRAPH_TRACER=y
CONFIG_GENERIC_IDLE_POLL_SETUP=y
CONFIG_GENERIC_IRQ_CHIP=y
+CONFIG_GENERIC_IRQ_MIGRATION=y
CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
CONFIG_GENERIC_PHY=y
-CONFIG_GENERIC_PINCONF=y
CONFIG_GENERIC_SCHED_CLOCK=y
CONFIG_GOOGLE_COREBOOT_TABLE_OF=y
-# CONFIG_GPIO_74X164 is not set
-# CONFIG_GPIO_74XX_MMIO is not set
-# CONFIG_GPIO_ADNP is not set
-# CONFIG_GPIO_ALTERA is not set
# CONFIG_GPIO_EM is not set
-# CONFIG_GPIO_GRGPIO is not set
# CONFIG_GPIO_PL061 is not set
# CONFIG_GPIO_SYSCON is not set
-# CONFIG_GPIO_WATCHDOG is not set
-# CONFIG_GPIO_XILINX is not set
# CONFIG_GPIO_ZEVIO is not set
CONFIG_HANDLE_DOMAIN_IRQ=y
CONFIG_HARDIRQS_SW_RESEND=y
CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU=y
CONFIG_HAS_IOPORT_MAP=y
-# CONFIG_HAVE_AOUT is not set
CONFIG_HAVE_ARCH_BITREVERSE=y
CONFIG_HAVE_ARCH_PFN_VALID=y
CONFIG_HAVE_ARM_ARCH_TIMER=y
CONFIG_HAVE_ARM_SCU=y
CONFIG_HAVE_ARM_SMCCC=y
CONFIG_HAVE_ARM_TWD=y
-CONFIG_HAVE_BPF_JIT=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_CLK_PREPARE=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
CONFIG_HAVE_GENERIC_DMA_COHERENT=y
CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
CONFIG_HAVE_KERNEL_GZIP=y
@@ -381,7 +335,6 @@ CONFIG_HAVE_OPTPROBES=y
CONFIG_HAVE_PROC_CPU=y
CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_SMP=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
# CONFIG_HIBERNATION is not set
# CONFIG_HID_GOOGLE_HAMMER is not set
# CONFIG_HID_PID is not set
@@ -398,30 +351,20 @@ CONFIG_HW_PERF_EVENTS=y
# CONFIG_HZ_200 is not set
# CONFIG_HZ_500 is not set
CONFIG_HZ_FIXED=0
-# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
CONFIG_I2C_CHARDEV=m
CONFIG_I2C_CROS_EC_TUNNEL=y
# CONFIG_I2C_DESIGNWARE_PLATFORM is not set
-# CONFIG_I2C_EMEV2 is not set
CONFIG_I2C_HID=m
-CONFIG_I2C_MUX=y
# CONFIG_I2C_NOMADIK is not set
CONFIG_I2C_RK3X=y
CONFIG_IIO=y
-CONFIG_IIO_BUFFER=y
-# CONFIG_IIO_BUFFER_CB is not set
-CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
# CONFIG_IIO_CROS_EC is not set
# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set
-# CONFIG_IIO_INTERRUPT_TRIGGER is not set
CONFIG_IIO_KFIFO_BUF=m
-# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
CONFIG_IIO_SYSFS_TRIGGER=m
-CONFIG_IIO_TRIGGER=y
# CONFIG_IMX_IPUV3_CORE is not set
# CONFIG_IMX_THERMAL is not set
# CONFIG_INFTL is not set
-CONFIG_INPUT_MATRIXKMAP=y
CONFIG_INPUT_POLLDEV=y
# CONFIG_INPUT_PWM_BEEPER is not set
# CONFIG_INPUT_REGULATOR_HAPTIC is not set
@@ -430,7 +373,6 @@ CONFIG_INPUT_POLLDEV=y
CONFIG_IOMMU_API=y
# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set
# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set
-CONFIG_IRQCHIP=y
# CONFIG_IRQ_TIME_ACCOUNTING is not set
# CONFIG_JFFS2_FS is not set
CONFIG_KERNEL_GZIP=y
@@ -441,48 +383,28 @@ CONFIG_KERNEL_MODE_NEON=y
# CONFIG_KERNEL_XZ is not set
# CONFIG_KEXEC is not set
# CONFIG_KEYBOARD_ATKBD is not set
-# CONFIG_KEYBOARD_BCM is not set
-# CONFIG_KEYBOARD_CAP11XX is not set
-CONFIG_KEYBOARD_CROS_EC=y
CONFIG_KEYBOARD_GPIO_POLLED=y
-# CONFIG_KEYBOARD_OMAP4 is not set
-# CONFIG_KEYBOARD_SAMSUNG is not set
# CONFIG_KPROBES is not set
-# CONFIG_KS8842 is not set
CONFIG_KUSER_HELPERS=y
CONFIG_LBDAF=y
-# CONFIG_LEDS_BCM6328 is not set
-# CONFIG_LEDS_BCM6358 is not set
CONFIG_LEDS_GPIO=y
# CONFIG_LEDS_PWM is not set
# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_SYSCON is not set
-# CONFIG_LIBERTAS is not set
# CONFIG_LIBERTAS_THINFIRM_USB is not set
CONFIG_LIBFDT=y
-# CONFIG_MAX5821 is not set
# CONFIG_MCPM is not set
-# CONFIG_MDIO_BUS_MUX_GPIO is not set
-# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
+# CONFIG_MEDIA_CEC_SUPPORT is not set
# CONFIG_MEDIA_CONTROLLER is not set
CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
CONFIG_MEMORY=y
# CONFIG_MFD_ASIC3 is not set
-# CONFIG_MFD_ATMEL_FLEXCOM is not set
-# CONFIG_MFD_ATMEL_HLCDC is not set
-CONFIG_MFD_CROS_EC=y
-CONFIG_MFD_CROS_EC_PD_UPDATE=y
-CONFIG_MFD_CROS_EC_SPI=y
-# CONFIG_MFD_HI6421_PMIC is not set
-# CONFIG_MFD_MAX77686 is not set
# CONFIG_MFD_MT6397 is not set
# CONFIG_MFD_PALMAS is not set
# CONFIG_MFD_PM8921_CORE is not set
CONFIG_MFD_RK808=y
-# CONFIG_MFD_STMPE is not set
CONFIG_MFD_SYSCON=y
# CONFIG_MFD_T7L66XB is not set
-# CONFIG_MFD_TC3589X is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_MFD_TC6393XB is not set
CONFIG_MFD_TPS6586X=y
@@ -504,9 +426,6 @@ CONFIG_MODULES_USE_ELF_REL=y
# CONFIG_MOUSE_APPLETOUCH is not set
# CONFIG_MOUSE_BCM5974 is not set
# CONFIG_MOUSE_CENTROIDING is not set
-CONFIG_MOUSE_ELAN_I2C=y
-CONFIG_MOUSE_ELAN_I2C_I2C=y
-# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set
# CONFIG_MOUSE_PS2 is not set
# CONFIG_MOUSE_SYNAPTICS_USB is not set
CONFIG_MTD=y
@@ -556,34 +475,21 @@ CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y
# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_UBI is not set
CONFIG_MULTI_IRQ_HANDLER=y
-CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
-# CONFIG_MWIFIEX_USB is not set
# CONFIG_NBPFAXI_DMA is not set
CONFIG_NEON=y
CONFIG_NET_VENDOR_CIRRUS=y
CONFIG_NET_VENDOR_FARADAY=y
CONFIG_NET_VENDOR_HISILICON=y
# CONFIG_NFTL is not set
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-# CONFIG_NO_HZ_FULL is not set
CONFIG_NR_CPUS=4
# CONFIG_NVMEM is not set
# CONFIG_OABI_COMPAT is not set
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
CONFIG_OF_EARLY_FLATTREE=y
CONFIG_OF_FLATTREE=y
-CONFIG_OF_GPIO=y
CONFIG_OF_IOMMU=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_MDIO=y
CONFIG_OF_MTD=y
-CONFIG_OF_NET=y
-# CONFIG_OF_OVERLAY is not set
CONFIG_OF_RESERVED_MEM=y
-# CONFIG_OF_UNITTEST is not set
CONFIG_OLD_MCOUNT=y
CONFIG_OLD_SIGACTION=y
# CONFIG_OPROFILE is not set
@@ -603,17 +509,13 @@ CONFIG_PHY_ROCKCHIP_DP=y
CONFIG_PHY_ROCKCHIP_USB=y
# CONFIG_PHY_SAMSUNG_USB2 is not set
# CONFIG_PID_IN_CONTEXTIDR is not set
-CONFIG_PINCONF=y
-CONFIG_PINCTRL=y
# CONFIG_PINCTRL_AMD is not set
CONFIG_PINCTRL_ROCKCHIP=y
-# CONFIG_PINCTRL_SINGLE is not set
CONFIG_PINMUX=y
# CONFIG_PL310_ERRATA_588369 is not set
# CONFIG_PL310_ERRATA_753970 is not set
CONFIG_PL330_DMA=y
# CONFIG_PLAT_SPEAR is not set
-CONFIG_PM_CLK=y
CONFIG_PM_DEVFREQ=y
# CONFIG_PM_DEVFREQ_EVENT is not set
CONFIG_PM_OPP=y
@@ -635,14 +537,10 @@ CONFIG_PWM=y
# CONFIG_PWM_PCA9685 is not set
CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SYSFS=y
-# CONFIG_QCA7000 is not set
# CONFIG_QORIQ_CPUFREQ is not set
# CONFIG_RAS is not set
-CONFIG_RATIONAL=y
-CONFIG_REGMAP_I2C=y
CONFIG_REGMAP_IRQ=y
CONFIG_REGMAP_MMIO=y
-CONFIG_REGMAP_SPI=y
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_ACT8865 is not set
# CONFIG_REGULATOR_AD5398 is not set
@@ -690,23 +588,17 @@ CONFIG_ROCKCHIP_IOMMU=y
CONFIG_ROCKCHIP_SARADC=m
CONFIG_ROCKCHIP_THERMAL=y
CONFIG_ROCKCHIP_TIMER=y
-# CONFIG_RT2500USB is not set
-# CONFIG_RT73USB is not set
# CONFIG_RTC_DRV_CMOS is not set
# CONFIG_RTC_DRV_CROS_EC is not set
-# CONFIG_RTC_DRV_HYM8563 is not set
# CONFIG_RTC_DRV_PL030 is not set
# CONFIG_RTC_DRV_PL031 is not set
CONFIG_RTC_DRV_RK808=y
-# CONFIG_RTC_DRV_SNVS is not set
# CONFIG_RTC_DRV_TPS6586X is not set
-# CONFIG_RTC_DRV_ZYNQMP is not set
CONFIG_RTC_HCTOSYS=y
CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
# CONFIG_RTL8187 is not set
# CONFIG_RTL8192CU is not set
# CONFIG_RTLLIB is not set
-# CONFIG_SCA3000 is not set
# CONFIG_SCHED_MC is not set
# CONFIG_SCHED_SMT is not set
# CONFIG_SCHED_TUNE is not set
@@ -732,7 +624,6 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4
# CONFIG_SERIAL_AMBA_PL010 is not set
# CONFIG_SERIAL_AMBA_PL011 is not set
# CONFIG_SERIAL_BCM63XX is not set
-# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_EARLYCON=y
@@ -740,7 +631,6 @@ CONFIG_SERIAL_EARLYCON=y
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_SERIAL_STM32 is not set
# CONFIG_SERIAL_ST_ASC is not set
-# CONFIG_SERIAL_XILINX_PS_UART is not set
# CONFIG_SERIO is not set
# CONFIG_SGETMASK_SYSCALL is not set
# CONFIG_SMC911X is not set
@@ -748,118 +638,43 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SMP_ON_UP=y
# CONFIG_SM_FTL is not set
# CONFIG_SND_ARM is not set
-# CONFIG_SND_ATMEL_SOC is not set
# CONFIG_SND_DESIGNWARE_I2S is not set
CONFIG_SND_DMAENGINE_PCM=y
-CONFIG_SND_PCM=y
CONFIG_SND_PCM_ELD=y
CONFIG_SND_PCM_IEC958=y
# CONFIG_SND_SIMPLE_CARD is not set
-CONFIG_SND_SOC=y
-# CONFIG_SND_SOC_AC97_CODEC is not set
-# CONFIG_SND_SOC_ADAU1701 is not set
-# CONFIG_SND_SOC_AK4104 is not set
-# CONFIG_SND_SOC_AK4554 is not set
-# CONFIG_SND_SOC_AK4613 is not set
-# CONFIG_SND_SOC_AK4642 is not set
-# CONFIG_SND_SOC_AK5386 is not set
-# CONFIG_SND_SOC_ALC5623 is not set
# CONFIG_SND_SOC_AMD_ACP is not set
-# CONFIG_SND_SOC_CS35L32 is not set
-# CONFIG_SND_SOC_CS4265 is not set
-# CONFIG_SND_SOC_CS4270 is not set
-# CONFIG_SND_SOC_CS4271_I2C is not set
-# CONFIG_SND_SOC_CS4271_SPI is not set
-# CONFIG_SND_SOC_CS42L51_I2C is not set
-# CONFIG_SND_SOC_CS42L52 is not set
-# CONFIG_SND_SOC_CS42L56 is not set
-# CONFIG_SND_SOC_CS42L73 is not set
-# CONFIG_SND_SOC_CS42XX8_I2C is not set
-# CONFIG_SND_SOC_CS4349 is not set
-# CONFIG_SND_SOC_ES8328 is not set
-# CONFIG_SND_SOC_FSL_ASRC is not set
-# CONFIG_SND_SOC_FSL_ESAI is not set
-# CONFIG_SND_SOC_FSL_SAI is not set
-# CONFIG_SND_SOC_FSL_SPDIF is not set
-# CONFIG_SND_SOC_FSL_SSI is not set
CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
-# CONFIG_SND_SOC_GTM601 is not set
-CONFIG_SND_SOC_I2C_AND_SPI=y
-# CONFIG_SND_SOC_IMX_AUDMUX is not set
-# CONFIG_SND_SOC_INNO_RK3036 is not set
CONFIG_SND_SOC_MAX98090=y
+# CONFIG_SND_SOC_MAX98373 is not set
# CONFIG_SND_SOC_MAX98927 is not set
-# CONFIG_SND_SOC_PCM1681 is not set
-# CONFIG_SND_SOC_PCM1792A is not set
-# CONFIG_SND_SOC_PCM512x_I2C is not set
-# CONFIG_SND_SOC_PCM512x_SPI is not set
# CONFIG_SND_SOC_RK3399_GRU_SOUND is not set
CONFIG_SND_SOC_ROCKCHIP=y
CONFIG_SND_SOC_ROCKCHIP_I2S=y
CONFIG_SND_SOC_ROCKCHIP_MAX98090=y
# CONFIG_SND_SOC_ROCKCHIP_RT5645 is not set
# CONFIG_SND_SOC_ROCKCHIP_SPDIF is not set
-# CONFIG_SND_SOC_RT5631 is not set
# CONFIG_SND_SOC_RT5677_SPI is not set
-# CONFIG_SND_SOC_SGTL5000 is not set
-# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set
-# CONFIG_SND_SOC_SPDIF is not set
-# CONFIG_SND_SOC_SSM2602_I2C is not set
-# CONFIG_SND_SOC_SSM2602_SPI is not set
# CONFIG_SND_SOC_SSM4567 is not set
-# CONFIG_SND_SOC_STA32X is not set
-# CONFIG_SND_SOC_STA350 is not set
-# CONFIG_SND_SOC_STI_SAS is not set
-# CONFIG_SND_SOC_TAS2552 is not set
-# CONFIG_SND_SOC_TAS5086 is not set
-# CONFIG_SND_SOC_TAS571X is not set
-# CONFIG_SND_SOC_TFA9879 is not set
CONFIG_SND_SOC_TLV320AIC23=y
CONFIG_SND_SOC_TLV320AIC23_I2C=y
-# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
-# CONFIG_SND_SOC_TLV320AIC31XX is not set
-# CONFIG_SND_SOC_TLV320AIC3X is not set
-# CONFIG_SND_SOC_TPA6130A2 is not set
CONFIG_SND_SOC_TS3A227E=y
-# CONFIG_SND_SOC_WM8510 is not set
-# CONFIG_SND_SOC_WM8523 is not set
-# CONFIG_SND_SOC_WM8580 is not set
-# CONFIG_SND_SOC_WM8711 is not set
-# CONFIG_SND_SOC_WM8728 is not set
-# CONFIG_SND_SOC_WM8731 is not set
-# CONFIG_SND_SOC_WM8737 is not set
-# CONFIG_SND_SOC_WM8741 is not set
-# CONFIG_SND_SOC_WM8750 is not set
CONFIG_SND_SOC_WM8753=y
-# CONFIG_SND_SOC_WM8770 is not set
-# CONFIG_SND_SOC_WM8776 is not set
-# CONFIG_SND_SOC_WM8804_I2C is not set
-# CONFIG_SND_SOC_WM8804_SPI is not set
CONFIG_SND_SOC_WM8903=y
-# CONFIG_SND_SOC_WM8962 is not set
-# CONFIG_SND_SOC_WM8978 is not set
-# CONFIG_SND_SOC_XTFPGA_I2S is not set
-# CONFIG_SND_SUN4I_CODEC is not set
-CONFIG_SND_TIMER=y
# CONFIG_SOC_AM33XX is not set
# CONFIG_SOC_AM43XX is not set
# CONFIG_SOC_BRCMSTB is not set
# CONFIG_SOC_DRA7XX is not set
# CONFIG_SOC_OMAP5 is not set
CONFIG_SPI_BITBANG=m
-# CONFIG_SPI_FSL_SPI is not set
# CONFIG_SPI_GPIO is not set
# CONFIG_SPI_PL022 is not set
# CONFIG_SPI_PXA2XX_PCI is not set
CONFIG_SPI_ROCKCHIP=y
# CONFIG_SSFDC is not set
-# CONFIG_STAGING_BOARD is not set
-# CONFIG_STK8BA50 is not set
CONFIG_STMMAC_ETH=m
CONFIG_STMMAC_PLATFORM=m
CONFIG_SWP_EMULATE=y
-# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set
-# CONFIG_SYSTEMPORT is not set
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
# CONFIG_TCG_ATMEL is not set
# CONFIG_TCG_CR50_I2C is not set
@@ -872,110 +687,46 @@ CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
# CONFIG_THERMAL_GOV_BANG_BANG is not set
# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_OF=y
# CONFIG_THERMAL_WRITABLE_TRIPS is not set
+# CONFIG_THROTTLER is not set
# CONFIG_THUMB2_KERNEL is not set
-# CONFIG_TOUCHSCREEN_AR1021_I2C is not set
-# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set
-# CONFIG_TOUCHSCREEN_EGALAX is not set
-CONFIG_TOUCHSCREEN_ELAN=y
-# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set
# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set
# CONFIG_TOUCHSCREEN_RM_TS is not set
# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set
# CONFIG_UACCESS_WITH_MEMCPY is not set
CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h"
# CONFIG_UPROBE_EVENT is not set
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_BDC_UDC is not set
-# CONFIG_USB_CDC_COMPOSITE is not set
-CONFIG_USB_CONFIGFS=m
-# CONFIG_USB_CONFIGFS_ACM is not set
-# CONFIG_USB_CONFIGFS_ECM is not set
-# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set
-# CONFIG_USB_CONFIGFS_EEM is not set
-# CONFIG_USB_CONFIGFS_F_ACC is not set
-CONFIG_USB_CONFIGFS_F_FS=y
-# CONFIG_USB_CONFIGFS_F_HID is not set
-# CONFIG_USB_CONFIGFS_F_LB_SS is not set
-# CONFIG_USB_CONFIGFS_F_MIDI is not set
-# CONFIG_USB_CONFIGFS_F_MTP is not set
-# CONFIG_USB_CONFIGFS_F_PRINTER is not set
-# CONFIG_USB_CONFIGFS_F_UAC1 is not set
-# CONFIG_USB_CONFIGFS_F_UAC2 is not set
-# CONFIG_USB_CONFIGFS_F_UVC is not set
-# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set
-# CONFIG_USB_CONFIGFS_NCM is not set
-# CONFIG_USB_CONFIGFS_OBEX is not set
-# CONFIG_USB_CONFIGFS_RNDIS is not set
-# CONFIG_USB_CONFIGFS_SERIAL is not set
-CONFIG_USB_CONFIGFS_UEVENT=y
-# CONFIG_USB_DUMMY_HCD is not set
CONFIG_USB_DWC2=y
# CONFIG_USB_DWC2_DEBUG is not set
CONFIG_USB_DWC2_HOST=y
# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set
# CONFIG_USB_DWC3 is not set
CONFIG_USB_EHCI_HCD_PLATFORM=y
-# CONFIG_USB_ETH is not set
-# CONFIG_USB_FOTG210_UDC is not set
-# CONFIG_USB_FUNCTIONFS is not set
-CONFIG_USB_F_FS=m
CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGETFS is not set
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_USB_GADGET_VBUS_DRAW=2
-# CONFIG_USB_GADGET_XILINX is not set
-# CONFIG_USB_GR_UDC is not set
-# CONFIG_USB_G_ACM_MS is not set
-# CONFIG_USB_G_DBGP is not set
-# CONFIG_USB_G_HID is not set
-# CONFIG_USB_G_MULTI is not set
-# CONFIG_USB_G_NCM is not set
-# CONFIG_USB_G_PRINTER is not set
-# CONFIG_USB_G_SERIAL is not set
-# CONFIG_USB_G_WEBCAM is not set
# CONFIG_USB_HCD_BCMA is not set
-CONFIG_USB_LIBCOMPOSITE=m
-# CONFIG_USB_M66592 is not set
-# CONFIG_USB_MASS_STORAGE is not set
-# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_USB_MV_U3D is not set
-# CONFIG_USB_MV_UDC is not set
-# CONFIG_USB_NET2272 is not set
-# CONFIG_USB_PXA27X is not set
-# CONFIG_USB_R8A66597 is not set
# CONFIG_USB_STORAGE_REALTEK is not set
-# CONFIG_USB_ZERO is not set
CONFIG_USE_OF=y
CONFIG_V4L_MEM2MEM_DRIVERS=y
# CONFIG_V4L_PLATFORM_DRIVERS is not set
CONFIG_VDSO=y
CONFIG_VECTORS_BASE=0xffff0000
# CONFIG_VEXPRESS_CONFIG is not set
-# CONFIG_VF610_ADC is not set
# CONFIG_VFIO is not set
CONFIG_VFP=y
CONFIG_VFPv3=y
CONFIG_VIDEOBUF2_CORE=m
CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_VMALLOC=m
CONFIG_VIDEOMODE_HELPERS=y
# CONFIG_VIDEO_MEM2MEM_DEINTERLACE is not set
# CONFIG_VIDEO_ROCKCHIP_VPU is not set
# CONFIG_VIDEO_SH_VEU is not set
# CONFIG_VIRTIO_MMIO is not set
# CONFIG_VIRTUALIZATION is not set
-# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
# CONFIG_VMSPLIT_1G is not set
# CONFIG_VMSPLIT_2G is not set
CONFIG_VMSPLIT_3G=y
# CONFIG_VMSPLIT_3G_OPT is not set
# CONFIG_WAKELOCK is not set
-CONFIG_WANT_DEV_COREDUMP=y
# CONFIG_XEN is not set
CONFIG_XZ_DEC_ARM=y
CONFIG_XZ_DEC_ARMTHUMB=y
@@ -983,5 +734,4 @@ CONFIG_XZ_DEC_BCJ=y
# CONFIG_XZ_DEC_X86 is not set
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ZBOOT_ROM_TEXT=0x0
-# CONFIG_ZD1211RW is not set
CONFIG_ZONE_DMA_FLAG=0
diff --git a/chromeos/config/base.config b/chromeos/config/base.config
index daad27f58ada2c..43752a0f8fc882 100644
--- a/chromeos/config/base.config
+++ b/chromeos/config/base.config
@@ -98,12 +98,15 @@ CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
CONFIG_ARCH_HAS_SG_CHAIN=y
CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
CONFIG_ARCH_WANT_GENERAL_HUGETLB=y
CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+# CONFIG_ARC_EMAC is not set
# CONFIG_AS3935 is not set
CONFIG_ASHMEM=y
CONFIG_ASSOCIATIVE_ARRAY=y
# CONFIG_ASYMMETRIC_KEY_TYPE is not set
+# CONFIG_ASYNC_TX_DMA is not set
# CONFIG_AT76C50X_USB is not set
# CONFIG_AT803X_PHY is not set
# CONFIG_ATALK is not set
@@ -198,6 +201,7 @@ CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1
# CONFIG_BOOT_PRINTK_DELAY is not set
CONFIG_BOUNCE=y
CONFIG_BPF=y
+# CONFIG_BPF_JIT is not set
# CONFIG_BPF_SYSCALL is not set
CONFIG_BQL=y
CONFIG_BRANCH_PROFILE_NONE=y
@@ -206,6 +210,7 @@ CONFIG_BRIDGE=m
CONFIG_BRIDGE_IGMP_SNOOPING=y
# CONFIG_BRIDGE_NETFILTER is not set
# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_BROADCOM_PHY is not set
# CONFIG_BSD_DISKLABEL is not set
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_BT=m
@@ -214,12 +219,14 @@ CONFIG_BT_BCM=m
# CONFIG_BT_BNEP is not set
CONFIG_BT_BREDR=y
CONFIG_BT_DEBUGFS=y
+CONFIG_BT_ENFORCE_CLASSIC_SECURITY=y
# CONFIG_BT_EVE_HACKS is not set
CONFIG_BT_HCIBFUSB=m
CONFIG_BT_HCIBTUSB=m
CONFIG_BT_HCIBTUSB_BCM=y
CONFIG_BT_HCIBTUSB_RTL=y
CONFIG_BT_HCIVHCI=m
+# CONFIG_BT_HCI_LE_SPLITTER is not set
CONFIG_BT_HIDP=m
CONFIG_BT_HS=y
CONFIG_BT_INTEL=m
@@ -237,6 +244,7 @@ CONFIG_BUILD_BIN2C=y
# CONFIG_CAIF is not set
# CONFIG_CAN is not set
# CONFIG_CARL9170 is not set
+# CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_NONE is not set
@@ -283,6 +291,7 @@ CONFIG_CHROME_PLATFORMS=y
# CONFIG_CICADA_PHY is not set
# CONFIG_CIFS is not set
# CONFIG_CLEANCACHE is not set
+CONFIG_CLKDEV_LOOKUP=y
CONFIG_CLS_U32_MARK=y
# CONFIG_CLS_U32_PERF is not set
# CONFIG_CM32181 is not set
@@ -293,6 +302,14 @@ CONFIG_CLS_U32_MARK=y
# CONFIG_CMDLINE_PARTITION is not set
# CONFIG_CODA_FS is not set
# CONFIG_COMEDI is not set
+CONFIG_COMMON_CLK=y
+# CONFIG_COMMON_CLK_CDCE706 is not set
+# CONFIG_COMMON_CLK_CDCE925 is not set
+# CONFIG_COMMON_CLK_PXA is not set
+# CONFIG_COMMON_CLK_SI514 is not set
+# CONFIG_COMMON_CLK_SI5351 is not set
+# CONFIG_COMMON_CLK_SI570 is not set
+# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
CONFIG_COMPACTION=y
# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPILE_TEST is not set
@@ -300,6 +317,7 @@ CONFIG_CONFIGFS_FS=y
CONFIG_CONNECTOR=y
CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_CONTEXT_SWITCH_TRACER=y
+# CONFIG_CORDIC is not set
CONFIG_COREDUMP=y
CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
CONFIG_CPUSETS=y
@@ -327,10 +345,13 @@ CONFIG_CRC32=y
# CONFIG_CRC32_SLICEBY4 is not set
CONFIG_CRC32_SLICEBY8=y
CONFIG_CRC7=m
+# CONFIG_CRC8 is not set
CONFIG_CRC_CCITT=y
CONFIG_CRC_ITU_T=y
CONFIG_CROSS_COMPILE=""
CONFIG_CROSS_MEMORY_ATTACH=y
+CONFIG_CROS_EC_CHARDEV=y
+CONFIG_CROS_EC_PROTO=y
CONFIG_CRYPTO=y
# CONFIG_CRYPTO_842 is not set
CONFIG_CRYPTO_AEAD=y
@@ -367,11 +388,11 @@ CONFIG_CRYPTO_DRBG=y
CONFIG_CRYPTO_DRBG_HMAC=y
CONFIG_CRYPTO_DRBG_MENU=y
CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_ECHAINIV=m
+CONFIG_CRYPTO_ECHAINIV=y
# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_GF128MUL=y
-CONFIG_CRYPTO_GHASH=m
+CONFIG_CRYPTO_GHASH=y
CONFIG_CRYPTO_HASH=y
CONFIG_CRYPTO_HASH2=y
CONFIG_CRYPTO_HMAC=y
@@ -462,6 +483,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_PAGEALLOC is not set
# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_DEBUG_PINCTRL is not set
# CONFIG_DEBUG_PI_LIST is not set
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_RODATA=y
@@ -500,12 +522,18 @@ CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DEVTMPFS_SAFE=y
+CONFIG_DEV_COREDUMP=y
# CONFIG_DGAP is not set
# CONFIG_DHT11 is not set
# CONFIG_DISK_BASED_SWAP is not set
# CONFIG_DLM is not set
+CONFIG_DMADEVICES=y
+# CONFIG_DMADEVICES_DEBUG is not set
+# CONFIG_DMATEST is not set
# CONFIG_DMA_API_DEBUG is not set
+CONFIG_DMA_ENGINE=y
# CONFIG_DMA_FENCE_TRACE is not set
+CONFIG_DMA_OF=y
CONFIG_DMA_SHARED_BUFFER=y
CONFIG_DM_BIO_PRISON=y
CONFIG_DM_BUFIO=y
@@ -547,12 +575,16 @@ CONFIG_DRM_EVDI=m
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_NXP_TDA998X is not set
# CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_KMS_FB_HELPER=y
CONFIG_DRM_KMS_HELPER=y
# CONFIG_DRM_LIB_RANDOM is not set
# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set
+# CONFIG_DRM_NXP_PTN3460 is not set
CONFIG_DRM_PANEL=y
CONFIG_DRM_PANEL_BRIDGE=y
+# CONFIG_DRM_PANEL_LG_LG4573 is not set
+# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set
+# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
+# CONFIG_DRM_PARADE_PS8622 is not set
CONFIG_DRM_UDL=y
CONFIG_DRM_VGEM=y
# CONFIG_DS1682 is not set
@@ -604,10 +636,12 @@ CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_USE_FOR_EXT2=y
CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
# CONFIG_EZX_PCAP is not set
# CONFIG_F2FS_FS is not set
CONFIG_FAIR_GROUP_SCHED=y
-# CONFIG_FANOTIFY is not set
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_FAT_FS=m
@@ -616,13 +650,12 @@ CONFIG_FB=y
# CONFIG_FB_AUO_K190X is not set
# CONFIG_FB_BOOT_VESA_SUPPORT is not set
# CONFIG_FB_BROADSHEET is not set
-CONFIG_FB_CFB_COPYAREA=y
-CONFIG_FB_CFB_FILLRECT=y
-CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_COPYAREA is not set
+# CONFIG_FB_CFB_FILLRECT is not set
+# CONFIG_FB_CFB_IMAGEBLIT is not set
# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
CONFIG_FB_CMDLINE=y
# CONFIG_FB_DDC is not set
-CONFIG_FB_DEFERRED_IO=y
# CONFIG_FB_FOREIGN_ENDIAN is not set
# CONFIG_FB_IBM_GXT4500 is not set
# CONFIG_FB_MACMODES is not set
@@ -632,11 +665,12 @@ CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_SIMPLE is not set
# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_SSD1307 is not set
# CONFIG_FB_SVGALIB is not set
-CONFIG_FB_SYS_COPYAREA=y
-CONFIG_FB_SYS_FILLRECT=y
-CONFIG_FB_SYS_FOPS=y
-CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
# CONFIG_FB_TFT is not set
# CONFIG_FB_TILEBLITTING is not set
# CONFIG_FB_UDL is not set
@@ -664,12 +698,14 @@ CONFIG_FRAME_VECTOR=y
CONFIG_FREEZER=y
# CONFIG_FRONTSWAP is not set
# CONFIG_FSCACHE is not set
+# CONFIG_FSL_EDMA is not set
CONFIG_FSNOTIFY=y
CONFIG_FS_MBCACHE=y
CONFIG_FS_POSIX_ACL=y
CONFIG_FTRACE=y
CONFIG_FTRACE_MCOUNT_RECORD=y
# CONFIG_FTRACE_STARTUP_TEST is not set
+CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_FUNCTION_PROFILER is not set
CONFIG_FUNCTION_TRACER=y
CONFIG_FUSE_FS=m
@@ -693,13 +729,13 @@ CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_GENERIC_IRQ_SHOW=y
CONFIG_GENERIC_NET_UTILS=y
CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_PINCONF=y
CONFIG_GENERIC_SMP_IDLE_THREAD=y
CONFIG_GENERIC_STRNCPY_FROM_USER=y
CONFIG_GENERIC_STRNLEN_USER=y
CONFIG_GENERIC_TIME_VSYSCALL=y
CONFIG_GENERIC_TRACER=y
# CONFIG_GFS2_FS is not set
-# CONFIG_GOLDFISH is not set
CONFIG_GOOGLE_COREBOOT_TABLE=y
CONFIG_GOOGLE_FIRMWARE=y
CONFIG_GOOGLE_MEMCONSOLE=y
@@ -707,10 +743,15 @@ CONFIG_GOOGLE_MEMCONSOLE_COREBOOT=y
CONFIG_GOOGLE_VPD=y
# CONFIG_GP2AP020A00F is not set
CONFIG_GPIOLIB=y
+# CONFIG_GPIO_74X164 is not set
+# CONFIG_GPIO_74XX_MMIO is not set
+# CONFIG_GPIO_ADNP is not set
# CONFIG_GPIO_ADP5588 is not set
+# CONFIG_GPIO_ALTERA is not set
CONFIG_GPIO_DEVRES=y
# CONFIG_GPIO_DWAPB is not set
# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_GRGPIO is not set
# CONFIG_GPIO_MAX7300 is not set
# CONFIG_GPIO_MAX7301 is not set
# CONFIG_GPIO_MAX732X is not set
@@ -720,22 +761,32 @@ CONFIG_GPIO_DEVRES=y
# CONFIG_GPIO_PCF857X is not set
# CONFIG_GPIO_SX150X is not set
CONFIG_GPIO_SYSFS=y
+# CONFIG_GPIO_WATCHDOG is not set
+# CONFIG_GPIO_XILINX is not set
# CONFIG_GPIO_ZX is not set
CONFIG_GRACE_PERIOD=m
# CONFIG_GS_FPGABOOT is not set
# CONFIG_HAMRADIO is not set
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_PAGESPAN is not set
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_HAS_DMA=y
CONFIG_HAS_IOMEM=y
# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+# CONFIG_HAVE_AOUT is not set
CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_HAVE_ARCH_HARDENED_USERCOPY=y
CONFIG_HAVE_ARCH_JUMP_LABEL=y
CONFIG_HAVE_ARCH_KGDB=y
CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
CONFIG_HAVE_ARCH_TRACEHOOK=y
# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_HAVE_BPF_JIT=y
CONFIG_HAVE_CC_STACKPROTECTOR=y
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_HAVE_CONTEXT_TRACKING=y
CONFIG_HAVE_C_RECORDMCOUNT=y
CONFIG_HAVE_DEBUG_KMEMLEAK=y
CONFIG_HAVE_DMA_API_DEBUG=y
@@ -745,6 +796,7 @@ CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y
CONFIG_HAVE_HW_BREAKPOINT=y
CONFIG_HAVE_MEMBLOCK=y
CONFIG_HAVE_NET_DSA=y
@@ -753,6 +805,7 @@ CONFIG_HAVE_PERF_REGS=y
CONFIG_HAVE_PERF_USER_STACK_DUMP=y
CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
CONFIG_HAVE_UID16=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
# CONFIG_HDC100X is not set
CONFIG_HDMI=y
# CONFIG_HEADERS_CHECK is not set
@@ -860,6 +913,7 @@ CONFIG_HZ_1000=y
# CONFIG_HZ_PERIODIC is not set
CONFIG_I2C=y
CONFIG_I2C_ALGOBIT=y
+# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
CONFIG_I2C_BOARDINFO=y
# CONFIG_I2C_CBUS_GPIO is not set
CONFIG_I2C_COMPAT=y
@@ -867,8 +921,10 @@ CONFIG_I2C_COMPAT=y
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_EMEV2 is not set
# CONFIG_I2C_GPIO is not set
CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_MUX=y
# CONFIG_I2C_MUX_GPIO is not set
# CONFIG_I2C_MUX_PCA9541 is not set
# CONFIG_I2C_MUX_PCA954x is not set
@@ -889,12 +945,18 @@ CONFIG_I2C_STUB=m
# CONFIG_IDLE_PAGE_TRACKING is not set
# CONFIG_IEEE802154 is not set
# CONFIG_IFB is not set
+CONFIG_IIO_BUFFER=y
+# CONFIG_IIO_BUFFER_CB is not set
+CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
+# CONFIG_IIO_INTERRUPT_TRIGGER is not set
+# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
# CONFIG_IIO_SIMPLE_DUMMY is not set
# CONFIG_IIO_SSP_SENSORHUB is not set
# CONFIG_IIO_ST_ACCEL_3AXIS is not set
# CONFIG_IIO_ST_GYRO_3AXIS is not set
# CONFIG_IIO_ST_MAGN_3AXIS is not set
# CONFIG_IIO_ST_PRESS is not set
+CONFIG_IIO_TRIGGER=y
CONFIG_IKCONFIG=m
CONFIG_IKCONFIG_PROC=y
# CONFIG_IMA is not set
@@ -953,6 +1015,7 @@ CONFIG_INPUT_KEYRESET=y
# CONFIG_INPUT_KEYSPAN_REMOTE is not set
# CONFIG_INPUT_KXTJ9 is not set
CONFIG_INPUT_LEDS=y
+CONFIG_INPUT_MATRIXKMAP=y
CONFIG_INPUT_MISC=y
# CONFIG_INPUT_MMA8450 is not set
CONFIG_INPUT_MOUSE=y
@@ -974,9 +1037,7 @@ CONFIG_INTEGRITY_AUDIT=y
# CONFIG_INV_MPU6050_IIO is not set
CONFIG_IOMMU_HELPER=y
CONFIG_IOMMU_SUPPORT=y
-CONFIG_ION=y
-CONFIG_ION_DUMMY=y
-# CONFIG_ION_TEST is not set
+# CONFIG_ION is not set
CONFIG_IOSCHED_CFQ=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_NOOP=y
@@ -1055,6 +1116,7 @@ CONFIG_IP_ROUTE_VERBOSE=y
# CONFIG_IP_SET is not set
# CONFIG_IP_VS is not set
# CONFIG_IRDA is not set
+CONFIG_IRQCHIP=y
# CONFIG_IRQSOFF_TRACER is not set
CONFIG_IRQ_DOMAIN=y
# CONFIG_IRQ_DOMAIN_DEBUG is not set
@@ -1106,6 +1168,9 @@ CONFIG_KALLSYMS=y
CONFIG_KERNFS=y
# CONFIG_KEYBOARD_ADP5588 is not set
# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_BCM is not set
+# CONFIG_KEYBOARD_CAP11XX is not set
+CONFIG_KEYBOARD_CROS_EC=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_KEYBOARD_LKKBD is not set
# CONFIG_KEYBOARD_LM8323 is not set
@@ -1115,9 +1180,11 @@ CONFIG_KEYBOARD_GPIO=y
# CONFIG_KEYBOARD_MCS is not set
# CONFIG_KEYBOARD_MPR121 is not set
# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OMAP4 is not set
# CONFIG_KEYBOARD_OPENCORES is not set
# CONFIG_KEYBOARD_QT1070 is not set
# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
# CONFIG_KEYBOARD_STOWAWAY is not set
# CONFIG_KEYBOARD_SUNKBD is not set
# CONFIG_KEYBOARD_TCA6416 is not set
@@ -1126,6 +1193,7 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYS=y
# CONFIG_KGDB is not set
# CONFIG_KMX61 is not set
+# CONFIG_KS8842 is not set
# CONFIG_KS8851 is not set
# CONFIG_KS8851_MLL is not set
# CONFIG_KSM is not set
@@ -1136,6 +1204,8 @@ CONFIG_KEYS=y
# CONFIG_LATTICE_ECP3_CONFIG is not set
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_LDM_PARTITION is not set
+# CONFIG_LEDS_BCM6328 is not set
+# CONFIG_LEDS_BCM6358 is not set
# CONFIG_LEDS_BD2802 is not set
# CONFIG_LEDS_BLINKM is not set
CONFIG_LEDS_CLASS=y
@@ -1173,6 +1243,7 @@ CONFIG_LIB80211_CRYPT_TKIP=m
CONFIG_LIB80211_CRYPT_WEP=m
# CONFIG_LIB80211_DEBUG is not set
CONFIG_LIBCRC32C=y
+# CONFIG_LIBERTAS is not set
CONFIG_LIBERTAS_THINFIRM=m
# CONFIG_LIBERTAS_THINFIRM_DEBUG is not set
# CONFIG_LIDAR_LITE_V2 is not set
@@ -1238,10 +1309,12 @@ CONFIG_MAC_PARTITION=y
# CONFIG_MAG3110 is not set
CONFIG_MAGIC_SYSRQ=y
CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
+CONFIG_MALI_KUTF=m
# CONFIG_MARVELL_PHY is not set
# CONFIG_MAX1027 is not set
# CONFIG_MAX1363 is not set
# CONFIG_MAX517 is not set
+# CONFIG_MAX5821 is not set
# CONFIG_MAX63XX_WATCHDOG is not set
# CONFIG_MCB is not set
# CONFIG_MCP320X is not set
@@ -1252,9 +1325,10 @@ CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0
CONFIG_MD=y
# CONFIG_MDIO_BCM_UNIMAC is not set
# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MDIO_BUS_MUX_GPIO is not set
+# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
CONFIG_MEDIA_CAMERA_SUPPORT=y
-# CONFIG_MEDIA_CEC_SUPPORT is not set
# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
# CONFIG_MEDIA_RADIO_SUPPORT is not set
# CONFIG_MEDIA_RC_SUPPORT is not set
@@ -1273,9 +1347,14 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
# CONFIG_MFD_ARIZONA_I2C is not set
# CONFIG_MFD_ARIZONA_SPI is not set
# CONFIG_MFD_AS3711 is not set
+# CONFIG_MFD_ATMEL_FLEXCOM is not set
+# CONFIG_MFD_ATMEL_HLCDC is not set
# CONFIG_MFD_AXP20X is not set
# CONFIG_MFD_BCM590XX is not set
CONFIG_MFD_CORE=y
+CONFIG_MFD_CROS_EC=y
+CONFIG_MFD_CROS_EC_PD_UPDATE=y
+CONFIG_MFD_CROS_EC_SPI=y
# CONFIG_MFD_DA9052_I2C is not set
# CONFIG_MFD_DA9052_SPI is not set
# CONFIG_MFD_DA9055 is not set
@@ -1283,11 +1362,13 @@ CONFIG_MFD_CORE=y
# CONFIG_MFD_DA9063 is not set
# CONFIG_MFD_DA9150 is not set
# CONFIG_MFD_DLN2 is not set
+# CONFIG_MFD_HI6421_PMIC is not set
# CONFIG_MFD_KEMPLD is not set
# CONFIG_MFD_LM3533 is not set
# CONFIG_MFD_LP3943 is not set
# CONFIG_MFD_LP8788 is not set
# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77686 is not set
# CONFIG_MFD_MAX77693 is not set
# CONFIG_MFD_MAX77843 is not set
# CONFIG_MFD_MAX8907 is not set
@@ -1308,6 +1389,8 @@ CONFIG_MFD_CORE=y
# CONFIG_MFD_SKY81452 is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_MFD_SMSC is not set
+# CONFIG_MFD_STMPE is not set
+# CONFIG_MFD_TC3589X is not set
# CONFIG_MFD_TI_AM335X_TSCADC is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_MFD_TPS65217 is not set
@@ -1344,6 +1427,7 @@ CONFIG_MMC_BLOCK_BOUNCE=y
CONFIG_MMC_BLOCK_MINORS=16
# CONFIG_MMC_DEBUG is not set
# CONFIG_MMC_EMBEDDED_SDIO is not set
+CONFIG_MMC_FFU=y
# CONFIG_MMC_PARANOID_SD_INIT is not set
CONFIG_MMC_SDHCI=y
# CONFIG_MMC_SIMULATE_MAX_SPEED is not set
@@ -1352,6 +1436,7 @@ CONFIG_MMC_TEST=m
# CONFIG_MMC_USHC is not set
# CONFIG_MMC_VUB300 is not set
CONFIG_MMU=y
+# CONFIG_MM_METRICS is not set
CONFIG_MODULES=y
CONFIG_MODULES_TREE_LOOKUP=y
# CONFIG_MODULE_COMPRESS is not set
@@ -1363,6 +1448,9 @@ CONFIG_MODULE_UNLOAD=y
# CONFIG_MODVERSIONS is not set
# CONFIG_MOST is not set
CONFIG_MOUSE_CYAPA=y
+CONFIG_MOUSE_ELAN_I2C=y
+CONFIG_MOUSE_ELAN_I2C_I2C=y
+# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set
# CONFIG_MOUSE_GPIO is not set
# CONFIG_MOUSE_SERIAL is not set
# CONFIG_MOUSE_SYNAPTICS_I2C is not set
@@ -1378,6 +1466,8 @@ CONFIG_MSDOS_PARTITION=y
CONFIG_MULTIUSER=y
CONFIG_MUTEX_SPIN_ON_OWNER=y
# CONFIG_MVMDIO is not set
+CONFIG_MWIFIEX=m
+# CONFIG_MWIFIEX_USB is not set
# CONFIG_MXC4005 is not set
CONFIG_NAMESPACES=y
# CONFIG_NATIONAL_PHY is not set
@@ -1594,7 +1684,7 @@ CONFIG_NFS_V4=m
CONFIG_NF_CONNTRACK=y
# CONFIG_NF_CONNTRACK_AMANDA is not set
CONFIG_NF_CONNTRACK_EVENTS=y
-# CONFIG_NF_CONNTRACK_FTP is not set
+CONFIG_NF_CONNTRACK_FTP=m
# CONFIG_NF_CONNTRACK_H323 is not set
CONFIG_NF_CONNTRACK_IPV4=y
CONFIG_NF_CONNTRACK_IPV6=y
@@ -1602,11 +1692,13 @@ CONFIG_NF_CONNTRACK_IPV6=y
CONFIG_NF_CONNTRACK_MARK=y
# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
# CONFIG_NF_CONNTRACK_SANE is not set
CONFIG_NF_CONNTRACK_SECMARK=y
# CONFIG_NF_CONNTRACK_SIP is not set
# CONFIG_NF_CONNTRACK_SNMP is not set
-# CONFIG_NF_CONNTRACK_TFTP is not set
+CONFIG_NF_CONNTRACK_TFTP=m
CONFIG_NF_CONNTRACK_TIMEOUT=y
# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
# CONFIG_NF_CONNTRACK_ZONES is not set
@@ -1626,7 +1718,7 @@ CONFIG_NF_DEFRAG_IPV6=y
# CONFIG_NF_LOG_IPV6 is not set
CONFIG_NF_NAT=y
# CONFIG_NF_NAT_AMANDA is not set
-# CONFIG_NF_NAT_FTP is not set
+CONFIG_NF_NAT_FTP=m
# CONFIG_NF_NAT_H323 is not set
CONFIG_NF_NAT_IPV4=y
CONFIG_NF_NAT_IPV6=m
@@ -1638,7 +1730,7 @@ CONFIG_NF_NAT_PPTP=y
CONFIG_NF_NAT_PROTO_GRE=y
CONFIG_NF_NAT_REDIRECT=m
# CONFIG_NF_NAT_SIP is not set
-# CONFIG_NF_NAT_TFTP is not set
+CONFIG_NF_NAT_TFTP=m
CONFIG_NF_REJECT_IPV4=y
CONFIG_NF_REJECT_IPV6=m
# CONFIG_NF_TABLES is not set
@@ -1703,11 +1795,20 @@ CONFIG_NOP_TRACER=y
CONFIG_NO_BOOTMEM=y
CONFIG_NO_HZ=y
CONFIG_NO_HZ_COMMON=y
+# CONFIG_NO_HZ_FULL is not set
CONFIG_NO_HZ_IDLE=y
# CONFIG_NTFS_FS is not set
# CONFIG_NVM is not set
# CONFIG_N_GSM is not set
# CONFIG_OCFS2_FS is not set
+CONFIG_OF=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_GPIO=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_MDIO=y
+CONFIG_OF_NET=y
+# CONFIG_OF_OVERLAY is not set
+# CONFIG_OF_UNITTEST is not set
CONFIG_OID_REGISTRY=m
CONFIG_OLD_SIGSUSPEND3=y
# CONFIG_OMFS_FS is not set
@@ -1721,10 +1822,10 @@ CONFIG_PACKET=y
# CONFIG_PACKET_DIAG is not set
# CONFIG_PAGE_EXTENSION is not set
# CONFIG_PAGE_OWNER is not set
-# CONFIG_PANIC_ON_OOPS is not set
-CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_ON_OOPS_VALUE=1
# CONFIG_PANIC_ON_RT_THROTTLING is not set
-CONFIG_PANIC_TIMEOUT=0
+CONFIG_PANIC_TIMEOUT=-1
# CONFIG_PARPORT is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_PDA_POWER is not set
@@ -1737,6 +1838,9 @@ CONFIG_PHYLIB=y
# CONFIG_PHY_PXA_28NM_HSIC is not set
# CONFIG_PHY_PXA_28NM_USB2 is not set
CONFIG_PID_NS=y
+CONFIG_PINCONF=y
+CONFIG_PINCTRL=y
+# CONFIG_PINCTRL_SINGLE is not set
CONFIG_PKGLIST=y
# CONFIG_PKGLIST_NO_CONFIG is not set
CONFIG_PKGLIST_USE_CONFIGFS=y
@@ -1746,6 +1850,7 @@ CONFIG_PM=y
# CONFIG_PMIC_DA903X is not set
CONFIG_PM_ADVANCED_DEBUG=y
# CONFIG_PM_AUTOSLEEP is not set
+CONFIG_PM_CLK=y
CONFIG_PM_DEBUG=y
CONFIG_PM_SLEEP=y
CONFIG_PM_SLEEP_DEBUG=y
@@ -1805,6 +1910,7 @@ CONFIG_PSTORE_CONSOLE=y
CONFIG_PSTORE_RAM=y
CONFIG_PSTORE_ZLIB_COMPRESS=y
CONFIG_PTP_1588_CLOCK=m
+# CONFIG_QCA7000 is not set
# CONFIG_QFMT_V1 is not set
CONFIG_QFMT_V2=y
# CONFIG_QNX4FS_FS is not set
@@ -1821,6 +1927,7 @@ CONFIG_QUOTA_TREE=y
# CONFIG_R8723AU is not set
# CONFIG_RAID_ATTRS is not set
# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_RATIONAL=y
# CONFIG_RAW_DRIVER is not set
# CONFIG_RBTREE_TEST is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
@@ -1843,6 +1950,8 @@ CONFIG_REED_SOLOMON=y
CONFIG_REED_SOLOMON_DEC8=y
CONFIG_REED_SOLOMON_ENC8=y
CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_SPI=y
# CONFIG_REISERFS_FS is not set
CONFIG_RELAY=y
CONFIG_RFKILL=y
@@ -1859,13 +1968,14 @@ CONFIG_RING_BUFFER=y
# CONFIG_RPR0521 is not set
CONFIG_RPS=y
# CONFIG_RSI_91X is not set
+# CONFIG_RT2500USB is not set
CONFIG_RT2800USB=m
CONFIG_RT2800USB_RT33XX=y
-# CONFIG_RT2800USB_RT3573 is not set
+CONFIG_RT2800USB_RT3573=y
CONFIG_RT2800USB_RT35XX=y
-# CONFIG_RT2800USB_RT53XX is not set
-# CONFIG_RT2800USB_RT55XX is not set
-# CONFIG_RT2800USB_UNKNOWN is not set
+CONFIG_RT2800USB_RT53XX=y
+CONFIG_RT2800USB_RT55XX=y
+CONFIG_RT2800USB_UNKNOWN=y
CONFIG_RT2800_LIB=m
CONFIG_RT2X00=m
# CONFIG_RT2X00_DEBUG is not set
@@ -1875,6 +1985,7 @@ CONFIG_RT2X00_LIB_CRYPTO=y
CONFIG_RT2X00_LIB_FIRMWARE=y
CONFIG_RT2X00_LIB_LEDS=y
CONFIG_RT2X00_LIB_USB=m
+# CONFIG_RT73USB is not set
CONFIG_RTC_CLASS=y
# CONFIG_RTC_DEBUG is not set
# CONFIG_RTC_DRV_ABB5ZES3 is not set
@@ -1899,6 +2010,7 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_EM3027 is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+# CONFIG_RTC_DRV_HYM8563 is not set
# CONFIG_RTC_DRV_ISL12022 is not set
# CONFIG_RTC_DRV_ISL12057 is not set
# CONFIG_RTC_DRV_ISL1208 is not set
@@ -1928,10 +2040,12 @@ CONFIG_RTC_CLASS=y
# CONFIG_RTC_DRV_RX8025 is not set
# CONFIG_RTC_DRV_RX8581 is not set
# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_SNVS is not set
# CONFIG_RTC_DRV_STK17TA8 is not set
# CONFIG_RTC_DRV_TEST is not set
# CONFIG_RTC_DRV_V3020 is not set
# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_ZYNQMP is not set
CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
CONFIG_RTC_INTF_PROC=y
@@ -1946,6 +2060,7 @@ CONFIG_RT_MUTEXES=y
CONFIG_RWSEM_SPIN_ON_OWNER=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_SAMPLES is not set
+# CONFIG_SCA3000 is not set
CONFIG_SCHEDSTATS=y
# CONFIG_SCHED_AUTOGROUP is not set
CONFIG_SCHED_DEBUG=y
@@ -1978,10 +2093,7 @@ CONFIG_SECCOMP_FILTER=y
CONFIG_SECURITY=y
CONFIG_SECURITYFS=y
# CONFIG_SECURITY_APPARMOR is not set
-CONFIG_SECURITY_CHROMIUMOS=y
CONFIG_SECURITY_CHROMIUMOS_DEVICE_JAIL=y
-CONFIG_SECURITY_CHROMIUMOS_NO_SYMLINK_MOUNT=y
-CONFIG_SECURITY_CHROMIUMOS_NO_UNPRIVILEGED_UNSAFE_MOUNTS=y
CONFIG_SECURITY_CHROMIUMOS_READONLY_PROC_SELF_MEM=y
# CONFIG_SECURITY_DMESG_RESTRICT is not set
CONFIG_SECURITY_NETWORK=y
@@ -2125,6 +2237,7 @@ CONFIG_SENSORS_TSL2563=m
# CONFIG_SERIAL_ALTERA_JTAGUART is not set
# CONFIG_SERIAL_ALTERA_UART is not set
# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
# CONFIG_SERIAL_FSL_LPUART is not set
# CONFIG_SERIAL_IFX6X60 is not set
# CONFIG_SERIAL_MAX3100 is not set
@@ -2133,6 +2246,7 @@ CONFIG_SENSORS_TSL2563=m
# CONFIG_SERIAL_SC16IS7XX is not set
# CONFIG_SERIAL_SCCNXP is not set
# CONFIG_SERIAL_UARTLITE is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
# CONFIG_SGI_PARTITION is not set
# CONFIG_SG_SPLIT is not set
CONFIG_SHMEM=y
@@ -2158,6 +2272,7 @@ CONFIG_SMP=y
# CONFIG_SMSC_PHY is not set
CONFIG_SND=y
# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_ATMEL_SOC is not set
# CONFIG_SND_BCD2000 is not set
# CONFIG_SND_DEBUG is not set
CONFIG_SND_DRIVERS=y
@@ -2174,6 +2289,7 @@ CONFIG_SND_MAX_CARDS=32
# CONFIG_SND_MTPAV is not set
# CONFIG_SND_OPL3_LIB_SEQ is not set
# CONFIG_SND_OPL4_LIB_SEQ is not set
+CONFIG_SND_PCM=y
# CONFIG_SND_PCM_OSS is not set
CONFIG_SND_PCM_TIMER=y
CONFIG_SND_PROC_FS=y
@@ -2185,8 +2301,77 @@ CONFIG_SND_SEQUENCER=m
CONFIG_SND_SEQ_DUMMY=m
CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
# CONFIG_SND_SERIAL_U16550 is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_SOC_AC97_CODEC is not set
+# CONFIG_SND_SOC_ADAU1701 is not set
+# CONFIG_SND_SOC_AK4104 is not set
+# CONFIG_SND_SOC_AK4554 is not set
+# CONFIG_SND_SOC_AK4613 is not set
+# CONFIG_SND_SOC_AK4642 is not set
+# CONFIG_SND_SOC_AK5386 is not set
+# CONFIG_SND_SOC_ALC5623 is not set
+# CONFIG_SND_SOC_CS35L32 is not set
+# CONFIG_SND_SOC_CS4265 is not set
+# CONFIG_SND_SOC_CS4270 is not set
+# CONFIG_SND_SOC_CS4271_I2C is not set
+# CONFIG_SND_SOC_CS4271_SPI is not set
+# CONFIG_SND_SOC_CS42L51_I2C is not set
+# CONFIG_SND_SOC_CS42L52 is not set
+# CONFIG_SND_SOC_CS42L56 is not set
+# CONFIG_SND_SOC_CS42L73 is not set
+# CONFIG_SND_SOC_CS42XX8_I2C is not set
+# CONFIG_SND_SOC_CS4349 is not set
+# CONFIG_SND_SOC_ES8328 is not set
+# CONFIG_SND_SOC_FSL_ASRC is not set
+# CONFIG_SND_SOC_FSL_ESAI is not set
+# CONFIG_SND_SOC_FSL_SAI is not set
+# CONFIG_SND_SOC_FSL_SPDIF is not set
+# CONFIG_SND_SOC_FSL_SSI is not set
+# CONFIG_SND_SOC_GTM601 is not set
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_IMX_AUDMUX is not set
+# CONFIG_SND_SOC_INNO_RK3036 is not set
+# CONFIG_SND_SOC_PCM1681 is not set
+# CONFIG_SND_SOC_PCM1792A is not set
+# CONFIG_SND_SOC_PCM512x_I2C is not set
+# CONFIG_SND_SOC_PCM512x_SPI is not set
+# CONFIG_SND_SOC_RT5631 is not set
+# CONFIG_SND_SOC_SGTL5000 is not set
+# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set
+# CONFIG_SND_SOC_SPDIF is not set
+# CONFIG_SND_SOC_SSM2602_I2C is not set
+# CONFIG_SND_SOC_SSM2602_SPI is not set
+# CONFIG_SND_SOC_STA32X is not set
+# CONFIG_SND_SOC_STA350 is not set
+# CONFIG_SND_SOC_STI_SAS is not set
+# CONFIG_SND_SOC_TAS2552 is not set
+# CONFIG_SND_SOC_TAS5086 is not set
+# CONFIG_SND_SOC_TAS571X is not set
+# CONFIG_SND_SOC_TFA9879 is not set
+# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
+# CONFIG_SND_SOC_TLV320AIC31XX is not set
+# CONFIG_SND_SOC_TLV320AIC3X is not set
+# CONFIG_SND_SOC_TPA6130A2 is not set
+# CONFIG_SND_SOC_WM8510 is not set
+# CONFIG_SND_SOC_WM8523 is not set
+# CONFIG_SND_SOC_WM8580 is not set
+# CONFIG_SND_SOC_WM8711 is not set
+# CONFIG_SND_SOC_WM8728 is not set
+# CONFIG_SND_SOC_WM8731 is not set
+# CONFIG_SND_SOC_WM8737 is not set
+# CONFIG_SND_SOC_WM8741 is not set
+# CONFIG_SND_SOC_WM8750 is not set
+# CONFIG_SND_SOC_WM8770 is not set
+# CONFIG_SND_SOC_WM8776 is not set
+# CONFIG_SND_SOC_WM8804_I2C is not set
+# CONFIG_SND_SOC_WM8804_SPI is not set
+# CONFIG_SND_SOC_WM8962 is not set
+# CONFIG_SND_SOC_WM8978 is not set
+# CONFIG_SND_SOC_XTFPGA_I2S is not set
# CONFIG_SND_SPI is not set
+# CONFIG_SND_SUN4I_CODEC is not set
CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_TIMER=y
CONFIG_SND_USB=y
# CONFIG_SND_USB_6FIRE is not set
CONFIG_SND_USB_AUDIO=m
@@ -2215,6 +2400,7 @@ CONFIG_SPI=y
# CONFIG_SPI_CADENCE is not set
# CONFIG_SPI_DEBUG is not set
# CONFIG_SPI_DESIGNWARE is not set
+# CONFIG_SPI_FSL_SPI is not set
CONFIG_SPI_MASTER=y
# CONFIG_SPI_OC_TINY is not set
# CONFIG_SPI_SC18IS602 is not set
@@ -2246,12 +2432,16 @@ CONFIG_STACKTRACE=y
CONFIG_STACKTRACE_SUPPORT=y
# CONFIG_STACK_TRACER is not set
CONFIG_STAGING=y
+# CONFIG_STAGING_BOARD is not set
# CONFIG_STAGING_MEDIA is not set
CONFIG_STANDALONE=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH="/sbin/usermode-helper"
# CONFIG_STE10XP is not set
# CONFIG_STE_MODEM_RPROC is not set
# CONFIG_STK3310 is not set
# CONFIG_STK8312 is not set
+# CONFIG_STK8BA50 is not set
# CONFIG_STM is not set
# CONFIG_STM_DUMMY is not set
# CONFIG_STM_SOURCE_CONSOLE is not set
@@ -2272,12 +2462,14 @@ CONFIG_SW_SYNC=y
# CONFIG_SX9500 is not set
# CONFIG_SXGBE_ETH is not set
CONFIG_SYNC_FILE=y
+# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set
CONFIG_SYN_COOKIES=y
CONFIG_SYSCTL=y
# CONFIG_SYSCTL_SYSCALL is not set
CONFIG_SYSFS=y
# CONFIG_SYSFS_DEPRECATED is not set
CONFIG_SYSFS_SYSCALL=y
+# CONFIG_SYSTEMPORT is not set
# CONFIG_SYSTEM_DATA_VERIFICATION is not set
# CONFIG_SYSTEM_TRUSTED_KEYRING is not set
# CONFIG_SYSV68_PARTITION is not set
@@ -2293,8 +2485,11 @@ CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_TABLET_USB_HANWANG is not set
# CONFIG_TABLET_USB_KBTAB is not set
# CONFIG_TARGET_CORE is not set
-# CONFIG_TASKSTATS is not set
+CONFIG_TASKSTATS=y
# CONFIG_TASKS_RCU is not set
+# CONFIG_TASK_DELAY_ACCT is not set
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
# CONFIG_TCG_TIS_I2C_ATMEL is not set
# CONFIG_TCG_TIS_I2C_NUVOTON is not set
# CONFIG_TCG_TIS_ST33ZP24_I2C is not set
@@ -2326,8 +2521,7 @@ CONFIG_TEST_FIRMWARE=m
# CONFIG_TEST_HEXDUMP is not set
# CONFIG_TEST_KSTRTOX is not set
# CONFIG_TEST_LIST_SORT is not set
-# CONFIG_TEST_LKM is not set
-CONFIG_TEST_MODULE=m
+CONFIG_TEST_LKM=m
# CONFIG_TEST_POWER is not set
# CONFIG_TEST_PRINTF is not set
# CONFIG_TEST_RHASHTABLE is not set
@@ -2342,6 +2536,7 @@ CONFIG_THERMAL=y
CONFIG_THERMAL_GOV_STEP_WISE=y
CONFIG_THERMAL_GOV_USER_SPACE=y
CONFIG_THERMAL_HWMON=y
+CONFIG_THERMAL_OF=y
CONFIG_TICK_CPU_ACCOUNTING=y
CONFIG_TICK_ONESHOT=y
CONFIG_TIMERFD=y
@@ -2359,15 +2554,19 @@ CONFIG_TMPFS_XATTR=y
# CONFIG_TOUCHSCREEN_AD7877 is not set
# CONFIG_TOUCHSCREEN_AD7879 is not set
# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AR1021_I2C is not set
CONFIG_TOUCHSCREEN_ATMEL_MXT=y
# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set
# CONFIG_TOUCHSCREEN_BU21013 is not set
+# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set
# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set
# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set
# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
# CONFIG_TOUCHSCREEN_DYNAPRO is not set
# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set
# CONFIG_TOUCHSCREEN_EETI is not set
+# CONFIG_TOUCHSCREEN_EGALAX is not set
+CONFIG_TOUCHSCREEN_ELAN=y
# CONFIG_TOUCHSCREEN_ELO is not set
# CONFIG_TOUCHSCREEN_FT6236 is not set
# CONFIG_TOUCHSCREEN_FUJITSU is not set
@@ -2375,6 +2574,7 @@ CONFIG_TOUCHSCREEN_ATMEL_MXT=y
# CONFIG_TOUCHSCREEN_GUNZE is not set
# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
# CONFIG_TOUCHSCREEN_ILI210X is not set
+# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set
# CONFIG_TOUCHSCREEN_INEXIO is not set
# CONFIG_TOUCHSCREEN_MAX11801 is not set
# CONFIG_TOUCHSCREEN_MCS5000 is not set
@@ -2471,14 +2671,39 @@ CONFIG_USB_ACM=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
# CONFIG_USB_APPLEDISPLAY is not set
CONFIG_USB_ARCH_HAS_HCD=y
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_BDC_UDC is not set
# CONFIG_USB_C67X00_HCD is not set
# CONFIG_USB_CATC is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
# CONFIG_USB_CHAOSKEY is not set
# CONFIG_USB_CHIPIDEA is not set
CONFIG_USB_COMMON=y
+CONFIG_USB_CONFIGFS=m
+# CONFIG_USB_CONFIGFS_ACM is not set
+# CONFIG_USB_CONFIGFS_ECM is not set
+# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set
+# CONFIG_USB_CONFIGFS_EEM is not set
+# CONFIG_USB_CONFIGFS_F_ACC is not set
+CONFIG_USB_CONFIGFS_F_FS=y
+# CONFIG_USB_CONFIGFS_F_HID is not set
+# CONFIG_USB_CONFIGFS_F_LB_SS is not set
+# CONFIG_USB_CONFIGFS_F_MIDI is not set
+# CONFIG_USB_CONFIGFS_F_MTP is not set
+# CONFIG_USB_CONFIGFS_F_PRINTER is not set
+# CONFIG_USB_CONFIGFS_F_UAC1 is not set
+# CONFIG_USB_CONFIGFS_F_UAC2 is not set
+# CONFIG_USB_CONFIGFS_F_UVC is not set
+# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set
+# CONFIG_USB_CONFIGFS_NCM is not set
+# CONFIG_USB_CONFIGFS_OBEX is not set
+# CONFIG_USB_CONFIGFS_RNDIS is not set
+# CONFIG_USB_CONFIGFS_SERIAL is not set
+CONFIG_USB_CONFIGFS_UEVENT=y
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
# CONFIG_USB_DEFAULT_PERSIST is not set
+# CONFIG_USB_DUMMY_HCD is not set
# CONFIG_USB_DYNAMIC_MINORS is not set
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
@@ -2486,11 +2711,31 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
# CONFIG_USB_EHSET_TEST_FIXTURE is not set
# CONFIG_USB_EMI26 is not set
# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_ETH is not set
CONFIG_USB_EZUSB_FX2=m
# CONFIG_USB_FOTG210_HCD is not set
+# CONFIG_USB_FOTG210_UDC is not set
# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_FUNCTIONFS is not set
+CONFIG_USB_F_FS=m
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+# CONFIG_USB_GADGET_DEBUG_FS is not set
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+CONFIG_USB_GADGET_VBUS_DRAW=2
+# CONFIG_USB_GADGET_XILINX is not set
# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_GR_UDC is not set
# CONFIG_USB_GSPCA is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_G_PRINTER is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_G_WEBCAM is not set
# CONFIG_USB_HCD_TEST_MODE is not set
CONFIG_USB_HID=y
CONFIG_USB_HIDDEV=y
@@ -2511,12 +2756,19 @@ CONFIG_USB_HIDDEV=y
# CONFIG_USB_LED is not set
# CONFIG_USB_LED_TRIG is not set
# CONFIG_USB_LEGOTOWER is not set
+CONFIG_USB_LIBCOMPOSITE=m
# CONFIG_USB_LINK_LAYER_TEST is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_MASS_STORAGE is not set
# CONFIG_USB_MAX3421_HCD is not set
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MIDI_GADGET is not set
CONFIG_USB_MON=y
# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_NET2272 is not set
CONFIG_USB_NET_AX88179_178A=m
CONFIG_USB_NET_AX8817X=m
CONFIG_USB_NET_CDCETHER=m
@@ -2551,6 +2803,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
CONFIG_USB_PEGASUS=m
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_PWC is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_R8A66597 is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_RIO500 is not set
CONFIG_USB_RTL8150=m
@@ -2655,6 +2909,7 @@ CONFIG_USB_VIDEO_CLASS=m
# CONFIG_USB_WUSB_CBAF is not set
# CONFIG_USB_YUREX is not set
# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_ZERO is not set
# CONFIG_USB_ZR364XX is not set
# CONFIG_USELIB is not set
# CONFIG_USERFAULTFD is not set
@@ -2664,10 +2919,12 @@ CONFIG_UTS_NS=y
CONFIG_V4L_TEST_DRIVERS=y
# CONFIG_VCNL4000 is not set
CONFIG_VETH=m
+# CONFIG_VF610_ADC is not set
CONFIG_VFAT_FS=m
# CONFIG_VGASTATE is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_VIA_VELOCITY is not set
+CONFIG_VIDEOBUF2_VMALLOC=m
# CONFIG_VIDEO_ADV_DEBUG is not set
# CONFIG_VIDEO_CPIA2 is not set
# CONFIG_VIDEO_EM28XX is not set
@@ -2675,6 +2932,7 @@ CONFIG_VFAT_FS=m
# CONFIG_VIDEO_USBTV is not set
# CONFIG_VIDEO_VIM2M is not set
# CONFIG_VIDEO_VIVID is not set
+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
# CONFIG_VIRT_DRIVERS is not set
# CONFIG_VITESSE_PHY is not set
# CONFIG_VLAN_8021Q is not set
@@ -2690,6 +2948,7 @@ CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_VZ89X is not set
# CONFIG_W1 is not set
# CONFIG_WAN is not set
+CONFIG_WANT_DEV_COREDUMP=y
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_CORE=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -2744,6 +3003,7 @@ CONFIG_XZ_DEC=y
# CONFIG_XZ_DEC_SPARC is not set
# CONFIG_XZ_DEC_TEST is not set
# CONFIG_ZBUD is not set
+# CONFIG_ZD1211RW is not set
CONFIG_ZISOFS=y
CONFIG_ZLIB_DEFLATE=y
CONFIG_ZLIB_INFLATE=y
diff --git a/chromeos/config/i386/chromeos-pinetrail-i386.flavour.config b/chromeos/config/i386/chromeos-pinetrail-i386.flavour.config
deleted file mode 100644
index afee03041f880e..00000000000000
--- a/chromeos/config/i386/chromeos-pinetrail-i386.flavour.config
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Config options generated by splitconfig
-#
-# CONFIG_8139CP is not set
-# CONFIG_8139TOO is not set
-# CONFIG_ACERHDF is not set
-CONFIG_ACPI_WMI=m
-# CONFIG_B43 is not set
-# CONFIG_BCMA is not set
-# CONFIG_BRCMFMAC is not set
-# CONFIG_BROADCOM_PHY is not set
-# CONFIG_CORDIC is not set
-# CONFIG_CRC8 is not set
-CONFIG_DMAR_TABLE=y
-CONFIG_DRM_CIRRUS_QEMU=m
-# CONFIG_DRM_GMA500 is not set
-# CONFIG_DRM_NOUVEAU is not set
-# CONFIG_DRM_RADEON is not set
-CONFIG_DRM_TTM=m
-# CONFIG_FB_BACKLIGHT is not set
-CONFIG_HAVE_INTEL_TXT=y
-CONFIG_INTEL_IOMMU=y
-CONFIG_INTEL_IOMMU_DEFAULT_ON=y
-CONFIG_INTEL_IOMMU_FLOPPY_WA=y
-# CONFIG_INTEL_IOMMU_SVM is not set
-# CONFIG_INTEL_TXT is not set
-CONFIG_IOMMU_API=y
-CONFIG_IOMMU_IOVA=y
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
-# CONFIG_IWL3945 is not set
-# CONFIG_IWL4965 is not set
-# CONFIG_IWLWIFI is not set
-# CONFIG_LIBERTAS is not set
-# CONFIG_MOUSE_BCM5974 is not set
-# CONFIG_MOUSE_SYNAPTICS_USB is not set
-# CONFIG_MWIFIEX is not set
-# CONFIG_MXM_WMI is not set
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-# CONFIG_RT2500USB is not set
-# CONFIG_RT73USB is not set
-# CONFIG_RTL8187 is not set
-# CONFIG_RTL8192CU is not set
-# CONFIG_RTL8192DE is not set
-# CONFIG_RTL8192SE is not set
-# CONFIG_RTLLIB is not set
-CONFIG_RTLWIFI_DEBUG=y
-# CONFIG_SKGE is not set
-# CONFIG_SKY2 is not set
-# CONFIG_SSB is not set
-# CONFIG_THERMAL_GOV_BANG_BANG is not set
-# CONFIG_VFIO is not set
-# CONFIG_X86_POWERNOW_K6 is not set
-# CONFIG_X86_POWERNOW_K7 is not set
-# CONFIG_X86_POWERNOW_K8 is not set
-# CONFIG_ZD1211RW is not set
diff --git a/chromeos/config/i386/chromiumos-i386.flavour.config b/chromeos/config/i386/chromiumos-i386.flavour.config
deleted file mode 100644
index d6bc13e0db54b5..00000000000000
--- a/chromeos/config/i386/chromiumos-i386.flavour.config
+++ /dev/null
@@ -1,149 +0,0 @@
-#
-# Config options generated by splitconfig
-#
-CONFIG_8139CP=m
-CONFIG_8139TOO=m
-# CONFIG_8139TOO_8129 is not set
-CONFIG_8139TOO_PIO=y
-# CONFIG_8139TOO_TUNE_TWISTER is not set
-# CONFIG_8139_OLD_RX_RESET is not set
-CONFIG_ACERHDF=m
-CONFIG_ACPI_WMI=y
-CONFIG_B43=m
-CONFIG_B43_BCMA=y
-CONFIG_B43_BCMA_PIO=y
-# CONFIG_B43_BUSES_BCMA is not set
-CONFIG_B43_BUSES_BCMA_AND_SSB=y
-# CONFIG_B43_BUSES_SSB is not set
-# CONFIG_B43_DEBUG is not set
-CONFIG_B43_HWRNG=y
-CONFIG_B43_LEDS=y
-CONFIG_B43_PCICORE_AUTOSELECT=y
-CONFIG_B43_PCI_AUTOSELECT=y
-CONFIG_B43_PHY_G=y
-CONFIG_B43_PHY_HT=y
-CONFIG_B43_PHY_LP=y
-CONFIG_B43_PHY_N=y
-CONFIG_B43_PIO=y
-CONFIG_B43_SDIO=y
-CONFIG_B43_SSB=y
-CONFIG_BCMA=m
-CONFIG_BCMA_BLOCKIO=y
-# CONFIG_BCMA_DEBUG is not set
-# CONFIG_BCMA_DRIVER_GMAC_CMN is not set
-# CONFIG_BCMA_DRIVER_GPIO is not set
-CONFIG_BCMA_DRIVER_PCI=y
-CONFIG_BCMA_HOST_PCI=y
-CONFIG_BCMA_HOST_PCI_POSSIBLE=y
-# CONFIG_BCMA_HOST_SOC is not set
-CONFIG_BCM_NET_PHYLIB=y
-# CONFIG_BRCMDBG is not set
-CONFIG_BRCMFMAC=m
-# CONFIG_BRCMFMAC_PCIE is not set
-CONFIG_BRCMFMAC_PROTO_BCDC=y
-CONFIG_BRCMFMAC_SDIO=y
-CONFIG_BRCMFMAC_USB=y
-CONFIG_BRCMUTIL=m
-# CONFIG_BRCM_TRACING is not set
-CONFIG_BROADCOM_PHY=y
-CONFIG_CFG80211_WEXT_EXPORT=y
-CONFIG_CORDIC=m
-CONFIG_CRC8=m
-CONFIG_DEV_COREDUMP=y
-# CONFIG_DRM_CIRRUS_QEMU is not set
-CONFIG_DRM_GMA3600=y
-CONFIG_DRM_GMA500=y
-CONFIG_DRM_GMA600=y
-CONFIG_DRM_NOUVEAU=y
-CONFIG_DRM_NOUVEAU_BACKLIGHT=y
-CONFIG_DRM_RADEON=m
-# CONFIG_DRM_RADEON_UMS is not set
-# CONFIG_DRM_RADEON_USERPTR is not set
-CONFIG_DRM_TTM=y
-CONFIG_FB_BACKLIGHT=y
-# CONFIG_INTEL_IOMMU is not set
-CONFIG_IPW2100=m
-# CONFIG_IPW2100_DEBUG is not set
-CONFIG_IPW2100_MONITOR=y
-CONFIG_IPW2200=m
-# CONFIG_IPW2200_DEBUG is not set
-CONFIG_IPW2200_MONITOR=y
-CONFIG_IPW2200_PROMISCUOUS=y
-# CONFIG_IPW2200_QOS is not set
-CONFIG_IPW2200_RADIOTAP=y
-CONFIG_IWL3945=m
-CONFIG_IWL4965=m
-CONFIG_IWLDVM=m
-CONFIG_IWLEGACY=m
-# CONFIG_IWLEGACY_DEBUG is not set
-# CONFIG_IWLEGACY_DEBUGFS is not set
-CONFIG_IWLMVM=m
-CONFIG_IWLWIFI=m
-# CONFIG_IWLWIFI_BCAST_FILTERING is not set
-# CONFIG_IWLWIFI_DEBUG is not set
-# CONFIG_IWLWIFI_DEBUGFS is not set
-# CONFIG_IWLWIFI_DEVICE_TRACING is not set
-CONFIG_IWLWIFI_LEDS=y
-CONFIG_IWLWIFI_OPMODE_MODULAR=y
-# CONFIG_IWLWIFI_UAPSD is not set
-CONFIG_LIBERTAS=m
-# CONFIG_LIBERTAS_DEBUG is not set
-# CONFIG_LIBERTAS_MESH is not set
-# CONFIG_LIBERTAS_SDIO is not set
-# CONFIG_LIBERTAS_SPI is not set
-# CONFIG_LIBERTAS_USB is not set
-CONFIG_LIBIPW=m
-# CONFIG_LIBIPW_DEBUG is not set
-CONFIG_MOUSE_BCM5974=m
-CONFIG_MOUSE_SYNAPTICS_USB=m
-CONFIG_MWIFIEX=m
-CONFIG_MWIFIEX_PCIE=m
-# CONFIG_MWIFIEX_SDIO is not set
-# CONFIG_MWIFIEX_USB is not set
-CONFIG_MXM_WMI=y
-# CONFIG_NF_CONNTRACK_PROCFS is not set
-CONFIG_NOUVEAU_DEBUG=5
-CONFIG_NOUVEAU_DEBUG_DEFAULT=3
-CONFIG_RT2500USB=m
-CONFIG_RT73USB=m
-CONFIG_RTL8187=m
-CONFIG_RTL8187_LEDS=y
-CONFIG_RTL8192CU=m
-CONFIG_RTL8192DE=m
-# CONFIG_RTL8192E is not set
-CONFIG_RTL8192SE=m
-CONFIG_RTLLIB=m
-CONFIG_RTLLIB_CRYPTO_CCMP=m
-CONFIG_RTLLIB_CRYPTO_TKIP=m
-CONFIG_RTLLIB_CRYPTO_WEP=m
-# CONFIG_RTLWIFI_DEBUG is not set
-CONFIG_RTLWIFI_USB=m
-CONFIG_SKGE=m
-# CONFIG_SKGE_DEBUG is not set
-# CONFIG_SKGE_GENESIS is not set
-CONFIG_SKY2=m
-# CONFIG_SKY2_DEBUG is not set
-CONFIG_SSB=m
-CONFIG_SSB_B43_PCI_BRIDGE=y
-CONFIG_SSB_BLOCKIO=y
-# CONFIG_SSB_DEBUG is not set
-# CONFIG_SSB_DRIVER_GPIO is not set
-CONFIG_SSB_DRIVER_PCICORE=y
-CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y
-# CONFIG_SSB_HOST_SOC is not set
-CONFIG_SSB_PCIHOST=y
-CONFIG_SSB_PCIHOST_POSSIBLE=y
-CONFIG_SSB_SDIOHOST=y
-CONFIG_SSB_SDIOHOST_POSSIBLE=y
-# CONFIG_SSB_SILENT is not set
-CONFIG_SSB_SPROM=y
-CONFIG_THERMAL_GOV_BANG_BANG=y
-# CONFIG_USB_HCD_BCMA is not set
-# CONFIG_USB_HCD_SSB is not set
-CONFIG_WANT_DEV_COREDUMP=y
-CONFIG_X86_POWERNOW_K6=y
-CONFIG_X86_POWERNOW_K7=y
-CONFIG_X86_POWERNOW_K7_ACPI=y
-CONFIG_X86_POWERNOW_K8=y
-CONFIG_ZD1211RW=m
-# CONFIG_ZD1211RW_DEBUG is not set
diff --git a/chromeos/config/i386/common.config b/chromeos/config/i386/common.config
deleted file mode 100644
index a4d5fef71eb817..00000000000000
--- a/chromeos/config/i386/common.config
+++ /dev/null
@@ -1,1345 +0,0 @@
-#
-# Config options generated by splitconfig
-#
-# CONFIG_60XX_WDT is not set
-# CONFIG_64BIT is not set
-# CONFIG_ACENIC is not set
-# CONFIG_ACER_WMI is not set
-CONFIG_ACPI=y
-CONFIG_ACPI_AC=y
-# CONFIG_ACPI_ALS is not set
-# CONFIG_ACPI_APEI is not set
-CONFIG_ACPI_BATTERY=y
-# CONFIG_ACPI_BGRT is not set
-CONFIG_ACPI_BUTTON=y
-CONFIG_ACPI_CHROMEOS=y
-# CONFIG_ACPI_CMPC is not set
-CONFIG_ACPI_CONTAINER=y
-CONFIG_ACPI_CPU_FREQ_PSS=y
-# CONFIG_ACPI_CUSTOM_DSDT is not set
-# CONFIG_ACPI_CUSTOM_METHOD is not set
-# CONFIG_ACPI_DEBUG is not set
-# CONFIG_ACPI_DEBUGGER is not set
-CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_EC_DEBUGFS is not set
-# CONFIG_ACPI_EXTLOG is not set
-CONFIG_ACPI_FAN=y
-# CONFIG_ACPI_HED is not set
-CONFIG_ACPI_HOTPLUG_CPU=y
-CONFIG_ACPI_HOTPLUG_IOAPIC=y
-CONFIG_ACPI_I2C_OPREGION=y
-# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set
-CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y
-# CONFIG_ACPI_NFIT is not set
-# CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_PROCESSOR=y
-# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set
-CONFIG_ACPI_PROCESSOR_IDLE=y
-# CONFIG_ACPI_PROCFS_POWER is not set
-# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set
-CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y
-# CONFIG_ACPI_SBS is not set
-CONFIG_ACPI_SLEEP=y
-CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y
-CONFIG_ACPI_THERMAL=y
-# CONFIG_ACPI_TOSHIBA is not set
-CONFIG_ACPI_VIDEO=y
-# CONFIG_ACQUIRE_WDT is not set
-# CONFIG_ADAPTEC_STARFIRE is not set
-# CONFIG_ADM8211 is not set
-# CONFIG_ADVANTECH_WDT is not set
-CONFIG_AGP=y
-# CONFIG_AGP_ALI is not set
-# CONFIG_AGP_AMD is not set
-# CONFIG_AGP_AMD64 is not set
-# CONFIG_AGP_ATI is not set
-# CONFIG_AGP_EFFICEON is not set
-CONFIG_AGP_INTEL=y
-# CONFIG_AGP_NVIDIA is not set
-# CONFIG_AGP_SIS is not set
-# CONFIG_AGP_SWORKS is not set
-# CONFIG_AGP_VIA is not set
-# CONFIG_AIRO is not set
-# CONFIG_AK8975 is not set
-# CONFIG_ALIENWARE_WMI is not set
-# CONFIG_ALIM1535_WDT is not set
-# CONFIG_ALIM7101_WDT is not set
-# CONFIG_ALIX is not set
-CONFIG_ALT_SYSCALL=y
-CONFIG_ALT_SYSCALL_CHROMIUMOS=y
-CONFIG_ALX=m
-# CONFIG_AMD8111_ETH is not set
-CONFIG_AMD_NB=y
-# CONFIG_AMILO_RFKILL is not set
-CONFIG_ANDROID_BINDER_IPC_32BIT=y
-# CONFIG_APM is not set
-# CONFIG_APPLE_GMUX is not set
-# CONFIG_APPLICOM is not set
-# CONFIG_AR10K is not set
-CONFIG_ARCH_CLOCKSOURCE_DATA=y
-CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-CONFIG_ARCH_DISCARD_MEMBLOCK=y
-CONFIG_ARCH_DMA_ADDR_T_64BIT=y
-CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
-CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
-CONFIG_ARCH_FLATMEM_ENABLE=y
-CONFIG_ARCH_HAS_ALT_SYSCALL=y
-CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
-CONFIG_ARCH_HAS_CPU_RELAX=y
-CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
-CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
-CONFIG_ARCH_HAS_MMIO_FLUSH=y
-CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
-CONFIG_ARCH_HIBERNATION_POSSIBLE=y
-CONFIG_ARCH_MAY_HAVE_PC_FDC=y
-CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y
-CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y
-CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y
-CONFIG_ARCH_MMAP_RND_BITS=8
-CONFIG_ARCH_MMAP_RND_BITS_MAX=16
-CONFIG_ARCH_MMAP_RND_BITS_MIN=8
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8
-# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set
-CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
-CONFIG_ARCH_RANDOM=y
-CONFIG_ARCH_SELECT_MEMORY_MODEL=y
-CONFIG_ARCH_SPARSEMEM_ENABLE=y
-CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
-CONFIG_ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT=y
-CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
-CONFIG_ARCH_SUPPORTS_UPROBES=y
-CONFIG_ARCH_USES_PG_UNCACHED=y
-CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
-CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
-CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
-CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
-CONFIG_ARCH_WANT_FRAME_POINTERS=y
-CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
-CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
-# CONFIG_ARCNET is not set
-# CONFIG_ASUS_LAPTOP is not set
-# CONFIG_ASUS_WMI is not set
-CONFIG_ATA=y
-CONFIG_ATA_ACPI=y
-CONFIG_ATA_BMDMA=y
-CONFIG_ATA_GENERIC=y
-# CONFIG_ATA_NONSTANDARD is not set
-CONFIG_ATA_PIIX=y
-CONFIG_ATA_SFF=y
-CONFIG_ATA_VERBOSE_ERROR=y
-CONFIG_ATH5K=m
-# CONFIG_ATH5K_DEBUG is not set
-CONFIG_ATH5K_PCI=y
-# CONFIG_ATH5K_TRACER is not set
-# CONFIG_ATH6KL is not set
-CONFIG_ATH9K=m
-# CONFIG_ATH9K_AHB is not set
-CONFIG_ATH9K_BTCOEX_SUPPORT=y
-# CONFIG_ATH9K_CHANNEL_CONTEXT is not set
-CONFIG_ATH9K_COMMON=m
-CONFIG_ATH9K_DEBUGFS=y
-# CONFIG_ATH9K_DYNACK is not set
-CONFIG_ATH9K_HW=m
-CONFIG_ATH9K_PCI=y
-CONFIG_ATH9K_PCOEM=y
-# CONFIG_ATH9K_RFKILL is not set
-# CONFIG_ATH9K_STATION_STATISTICS is not set
-# CONFIG_ATH9K_WOW is not set
-CONFIG_ATH_COMMON=m
-CONFIG_ATH_DEBUG=y
-# CONFIG_ATH_TRACEPOINTS is not set
-# CONFIG_ATL1 is not set
-# CONFIG_ATL1C is not set
-# CONFIG_ATL1E is not set
-# CONFIG_ATL2 is not set
-# CONFIG_ATMEL is not set
-# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set
-CONFIG_AUDIT_GENERIC=y
-# CONFIG_BACKLIGHT_APPLE is not set
-# CONFIG_BACKLIGHT_ARCXCNN is not set
-# CONFIG_BACKLIGHT_SAHARA is not set
-# CONFIG_BATTERY_SBS is not set
-# CONFIG_BE2ISCSI is not set
-# CONFIG_BE2NET is not set
-# CONFIG_BINFMT_AOUT is not set
-# CONFIG_BLK_CPQ_CISS_DA is not set
-# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
-# CONFIG_BLK_DEV_DAC960 is not set
-# CONFIG_BLK_DEV_FD is not set
-# CONFIG_BLK_DEV_HD is not set
-CONFIG_BLK_DEV_INTEGRITY=y
-# CONFIG_BLK_DEV_NVME is not set
-# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
-# CONFIG_BLK_DEV_RSXX is not set
-# CONFIG_BLK_DEV_SX8 is not set
-# CONFIG_BLK_DEV_UMEM is not set
-# CONFIG_BNA is not set
-# CONFIG_BNX2 is not set
-# CONFIG_BNX2X is not set
-# CONFIG_BNXT is not set
-# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
-CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
-# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
-CONFIG_BT_ATH3K=m
-CONFIG_BT_HCIBCM203X=m
-# CONFIG_BT_HCIBTSDIO is not set
-# CONFIG_BT_HCIUART is not set
-# CONFIG_BT_MRVL is not set
-# CONFIG_CASSINI is not set
-# CONFIG_CB710_CORE is not set
-# CONFIG_CHARGER_BQ24735 is not set
-# CONFIG_CHARGER_GPIO is not set
-CONFIG_CHECK_SIGNATURE=y
-# CONFIG_CHELSIO_T1 is not set
-# CONFIG_CHELSIO_T3 is not set
-# CONFIG_CHELSIO_T4 is not set
-# CONFIG_CHELSIO_T4VF is not set
-CONFIG_CHROMEOS_LAPTOP=y
-CONFIG_CHROMEOS_PSTORE=y
-# CONFIG_CHROMEOS_TBMC is not set
-CONFIG_CLKBLD_I8253=y
-CONFIG_CLKEVT_I8253=y
-CONFIG_CLKSRC_I8253=y
-CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
-CONFIG_CLOCKSOURCE_WATCHDOG=y
-CONFIG_CLONE_BACKWARDS=y
-# CONFIG_CMDLINE_BOOL is not set
-# CONFIG_CNIC is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_COMPAT_VDSO is not set
-# CONFIG_CPA_DEBUG is not set
-# CONFIG_CPU5_WDT is not set
-# CONFIG_CPU_BOOST is not set
-# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
-# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
-# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
-# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
-# CONFIG_CPU_FREQ_GOV_SCHED is not set
-CONFIG_CPU_SUP_AMD=y
-CONFIG_CPU_SUP_CENTAUR=y
-CONFIG_CPU_SUP_CYRIX_32=y
-CONFIG_CPU_SUP_INTEL=y
-CONFIG_CPU_SUP_TRANSMETA_32=y
-CONFIG_CPU_SUP_UMC_32=y
-# CONFIG_CRASH_DUMP is not set
-CONFIG_CRC_T10DIF=y
-# CONFIG_CROS_KBD_LED_BACKLIGHT is not set
-CONFIG_CRYPTO_ABLK_HELPER=y
-CONFIG_CRYPTO_AES_586=y
-CONFIG_CRYPTO_AES_NI_INTEL=y
-# CONFIG_CRYPTO_CRC32C_INTEL is not set
-# CONFIG_CRYPTO_CRC32_PCLMUL is not set
-CONFIG_CRYPTO_CRCT10DIF=y
-# CONFIG_CRYPTO_DEV_CCP is not set
-# CONFIG_CRYPTO_DEV_GEODE is not set
-# CONFIG_CRYPTO_DEV_PADLOCK is not set
-# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set
-# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set
-CONFIG_CRYPTO_LRW=y
-# CONFIG_CRYPTO_SALSA20_586 is not set
-# CONFIG_CRYPTO_SERPENT_SSE2_586 is not set
-# CONFIG_CRYPTO_TWOFISH_586 is not set
-# CONFIG_CX_ECAT is not set
-# CONFIG_DCDBAS is not set
-CONFIG_DEBUG_BOOT_PARAMS=y
-# CONFIG_DEBUG_ENTRY is not set
-# CONFIG_DEBUG_HIGHMEM is not set
-# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
-# CONFIG_DEBUG_NMI_SELFTEST is not set
-CONFIG_DEBUG_NX_TEST=m
-# CONFIG_DEBUG_RODATA_TEST is not set
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
-# CONFIG_DEBUG_TLBFLUSH is not set
-# CONFIG_DEBUG_VIRTUAL is not set
-# CONFIG_DEBUG_WX is not set
-CONFIG_DEFAULT_IO_DELAY_TYPE=1
-# CONFIG_DELL_RBTN is not set
-# CONFIG_DELL_RBU is not set
-# CONFIG_DELL_SMO8800 is not set
-CONFIG_DELL_WMI=m
-# CONFIG_DELL_WMI_AIO is not set
-CONFIG_DEVPORT=y
-# CONFIG_DGNC is not set
-# CONFIG_DL2K is not set
-# CONFIG_DMADEVICES is not set
-CONFIG_DMI=y
-CONFIG_DMIID=y
-CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
-# CONFIG_DMI_SYSFS is not set
-CONFIG_DOUBLEFAULT=y
-# CONFIG_DRM_AMDGPU is not set
-# CONFIG_DRM_AMD_ACP is not set
-# CONFIG_DRM_ANALOGIX_ANX7688 is not set
-# CONFIG_DRM_ANALOGIX_ANX78XX is not set
-# CONFIG_DRM_AST is not set
-# CONFIG_DRM_BOCHS is not set
-# CONFIG_DRM_DMA_SYNC is not set
-CONFIG_DRM_I915=y
-# CONFIG_DRM_I915_ALPHA_SUPPORT is not set
-# CONFIG_DRM_I915_DEBUG is not set
-# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set
-# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set
-CONFIG_DRM_I915_USERPTR=y
-# CONFIG_DRM_I915_WERROR is not set
-# CONFIG_DRM_MGA is not set
-# CONFIG_DRM_MGAG200 is not set
-CONFIG_DRM_MIPI_DSI=y
-# CONFIG_DRM_POWERVR_ROGUE_1_9 is not set
-# CONFIG_DRM_QXL is not set
-# CONFIG_DRM_R128 is not set
-# CONFIG_DRM_SAVAGE is not set
-# CONFIG_DRM_SIS is not set
-# CONFIG_DRM_TDFX is not set
-# CONFIG_DRM_VIA is not set
-# CONFIG_DRM_VIRTIO_GPU is not set
-# CONFIG_DRM_VMWGFX is not set
-CONFIG_DUMMY_CONSOLE_COLUMNS=80
-CONFIG_DUMMY_CONSOLE_ROWS=25
-# CONFIG_DW_WATCHDOG is not set
-CONFIG_DYNAMIC_FTRACE_WITH_REGS=y
-CONFIG_E100=m
-CONFIG_E1000=m
-CONFIG_E1000E=m
-CONFIG_EARLY_PRINTK=y
-CONFIG_EARLY_PRINTK_DBGP=y
-# CONFIG_EARLY_PRINTK_EFI is not set
-CONFIG_EDAC_ATOMIC_SCRUB=y
-# CONFIG_EDD is not set
-# CONFIG_EEEPC_LAPTOP is not set
-CONFIG_EEPROM_93CX6=m
-# CONFIG_EEPROM_AT24 is not set
-CONFIG_EFI=y
-# CONFIG_EFIVAR_FS is not set
-CONFIG_EFI_ESRT=y
-# CONFIG_EFI_FAKE_MEMMAP is not set
-# CONFIG_EFI_PGT_DUMP is not set
-CONFIG_EFI_RUNTIME_WRAPPERS=y
-# CONFIG_EFI_STUB is not set
-CONFIG_EFI_VARS=y
-CONFIG_EFI_VARS_PSTORE=y
-# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENIC is not set
-# CONFIG_EPIC100 is not set
-# CONFIG_ET131X is not set
-# CONFIG_EUROTECH_WDT is not set
-# CONFIG_EXTCON is not set
-# CONFIG_F71808E_WDT is not set
-# CONFIG_FB_3DFX is not set
-# CONFIG_FB_ARC is not set
-# CONFIG_FB_ARK is not set
-# CONFIG_FB_ASILIANT is not set
-# CONFIG_FB_ATY is not set
-# CONFIG_FB_ATY128 is not set
-# CONFIG_FB_CARMINE is not set
-# CONFIG_FB_CIRRUS is not set
-# CONFIG_FB_CYBER2000 is not set
-# CONFIG_FB_EFI is not set
-# CONFIG_FB_GEODE is not set
-# CONFIG_FB_HGA is not set
-# CONFIG_FB_I740 is not set
-# CONFIG_FB_I810 is not set
-# CONFIG_FB_IMSTT is not set
-# CONFIG_FB_KYRO is not set
-# CONFIG_FB_LE80578 is not set
-# CONFIG_FB_MATROX is not set
-# CONFIG_FB_MB862XX is not set
-# CONFIG_FB_N411 is not set
-# CONFIG_FB_NEOMAGIC is not set
-# CONFIG_FB_NVIDIA is not set
-# CONFIG_FB_PM2 is not set
-# CONFIG_FB_PM3 is not set
-# CONFIG_FB_RADEON is not set
-# CONFIG_FB_RIVA is not set
-# CONFIG_FB_S3 is not set
-# CONFIG_FB_SAVAGE is not set
-# CONFIG_FB_SIS is not set
-# CONFIG_FB_SM712 is not set
-# CONFIG_FB_SM750 is not set
-# CONFIG_FB_TRIDENT is not set
-# CONFIG_FB_VESA is not set
-# CONFIG_FB_VGA16 is not set
-# CONFIG_FB_VIA is not set
-# CONFIG_FB_VOODOO1 is not set
-# CONFIG_FB_VT8623 is not set
-# CONFIG_FB_XGI is not set
-# CONFIG_FDDI is not set
-# CONFIG_FEALNX is not set
-# CONFIG_FIREWIRE is not set
-# CONFIG_FIREWIRE_NOSY is not set
-CONFIG_FIRMWARE_MEMMAP=y
-# CONFIG_FLATMEM_MANUAL is not set
-# CONFIG_FM10K is not set
-# CONFIG_FORCEDETH is not set
-# CONFIG_FPC1020 is not set
-CONFIG_FRAME_WARN=2048
-# CONFIG_FS_DAX is not set
-CONFIG_FTRACE_SYSCALLS=y
-# CONFIG_FUJITSU_ES is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_FUJITSU_TABLET is not set
-# CONFIG_FUSION is not set
-CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y
-CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_GENERIC_CPU_AUTOPROBE=y
-CONFIG_GENERIC_CPU_VULNERABILITIES=y
-CONFIG_GENERIC_EARLY_IOREMAP=y
-CONFIG_GENERIC_FIND_FIRST_BIT=y
-CONFIG_GENERIC_IOMAP=y
-CONFIG_GENERIC_ISA_DMA=y
-CONFIG_GENERIC_MSI_IRQ=y
-CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
-CONFIG_GENERIC_PENDING_IRQ=y
-# CONFIG_GENERIC_PHY is not set
-# CONFIG_GEOS is not set
-CONFIG_GLOB=y
-# CONFIG_GLOB_SELFTEST is not set
-CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=y
-# CONFIG_GOOGLE_MEMCONSOLE_X86_LEGACY is not set
-CONFIG_GOOGLE_SMI=y
-# CONFIG_GPIO_104_IDIO_16 is not set
-CONFIG_GPIO_ACPI=y
-# CONFIG_GPIO_AMD8111 is not set
-# CONFIG_GPIO_AMDPT is not set
-# CONFIG_GPIO_BT8XX is not set
-# CONFIG_GPIO_F7188X is not set
-# CONFIG_GPIO_ICH is not set
-# CONFIG_GPIO_INTEL_MID is not set
-# CONFIG_GPIO_IT87 is not set
-# CONFIG_GPIO_LYNXPOINT is not set
-# CONFIG_GPIO_ML_IOH is not set
-# CONFIG_GPIO_PCH is not set
-# CONFIG_GPIO_RDC321X is not set
-# CONFIG_GPIO_SCH is not set
-# CONFIG_GPIO_SCH311X is not set
-# CONFIG_GPIO_VX855 is not set
-# CONFIG_HANGCHECK_TIMER is not set
-# CONFIG_HAPPYMEAL is not set
-CONFIG_HARDLOCKUP_DETECTOR_NMI=y
-CONFIG_HAS_IOPORT_MAP=y
-CONFIG_HAVE_ACPI_APEI=y
-CONFIG_HAVE_ACPI_APEI_NMI=y
-CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
-CONFIG_HAVE_AOUT=y
-# CONFIG_HAVE_ARCH_BITREVERSE is not set
-CONFIG_HAVE_ARCH_HUGE_VMAP=y
-CONFIG_HAVE_ARCH_KMEMCHECK=y
-CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_ATOMIC_IOMAP=y
-CONFIG_HAVE_CMPXCHG_DOUBLE=y
-CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_COPY_THREAD_TLS=y
-CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
-CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
-CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y
-CONFIG_HAVE_GENERIC_DMA_COHERENT=y
-CONFIG_HAVE_IDE=y
-CONFIG_HAVE_IOREMAP_PROT=y
-CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
-CONFIG_HAVE_KERNEL_BZIP2=y
-CONFIG_HAVE_KERNEL_GZIP=y
-CONFIG_HAVE_KERNEL_LZ4=y
-CONFIG_HAVE_KERNEL_LZMA=y
-CONFIG_HAVE_KERNEL_LZO=y
-CONFIG_HAVE_KERNEL_XZ=y
-CONFIG_HAVE_KPROBES=y
-CONFIG_HAVE_KPROBES_ON_FTRACE=y
-CONFIG_HAVE_KRETPROBES=y
-CONFIG_HAVE_KVM=y
-CONFIG_HAVE_LATENCYTOP_SUPPORT=y
-CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
-CONFIG_HAVE_MEMORY_PRESENT=y
-CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y
-CONFIG_HAVE_MMIOTRACE_SUPPORT=y
-CONFIG_HAVE_OPROFILE=y
-CONFIG_HAVE_OPTPROBES=y
-CONFIG_HAVE_PCSPKR_PLATFORM=y
-CONFIG_HAVE_PERF_EVENTS_NMI=y
-CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
-CONFIG_HAVE_SETUP_PER_CPU_AREA=y
-CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
-CONFIG_HAVE_USER_RETURN_NOTIFIER=y
-# CONFIG_HERMES is not set
-# CONFIG_HIBERNATION is not set
-# CONFIG_HID_GOOGLE_HAMMER is not set
-CONFIG_HID_PID=y
-CONFIG_HID_RMI=m
-CONFIG_HIGHMEM=y
-# CONFIG_HIGHMEM4G is not set
-CONFIG_HIGHMEM64G=y
-# CONFIG_HIGHPTE is not set
-# CONFIG_HIPPI is not set
-CONFIG_HOSTAP_PCI=m
-CONFIG_HOSTAP_PLX=m
-CONFIG_HOTPLUG_PCI=y
-# CONFIG_HOTPLUG_PCI_ACPI is not set
-# CONFIG_HOTPLUG_PCI_COMPAQ is not set
-# CONFIG_HOTPLUG_PCI_CPCI is not set
-# CONFIG_HOTPLUG_PCI_IBM is not set
-CONFIG_HOTPLUG_PCI_PCIE=y
-# CONFIG_HOTPLUG_PCI_SHPC is not set
-# CONFIG_HP100 is not set
-CONFIG_HPET=y
-CONFIG_HPET_EMULATE_RTC=y
-# CONFIG_HPET_MMAP is not set
-CONFIG_HPET_TIMER=y
-# CONFIG_HP_ACCEL is not set
-# CONFIG_HP_ILO is not set
-# CONFIG_HP_WATCHDOG is not set
-# CONFIG_HP_WIRELESS is not set
-CONFIG_HP_WMI=m
-CONFIG_HT_IRQ=y
-# CONFIG_HUGETLBFS is not set
-# CONFIG_HW_RANDOM_AMD is not set
-# CONFIG_HW_RANDOM_GEODE is not set
-CONFIG_HW_RANDOM_INTEL=y
-# CONFIG_HW_RANDOM_VIA is not set
-# CONFIG_HW_RANDOM_VIRTIO is not set
-# CONFIG_HYPERVISOR_GUEST is not set
-# CONFIG_I2C_ALI1535 is not set
-# CONFIG_I2C_ALI1563 is not set
-# CONFIG_I2C_ALI15X3 is not set
-# CONFIG_I2C_AMD756 is not set
-# CONFIG_I2C_AMD8111 is not set
-CONFIG_I2C_CHARDEV=m
-# CONFIG_I2C_DESIGNWARE_PCI is not set
-# CONFIG_I2C_EG20T is not set
-CONFIG_I2C_HID=m
-CONFIG_I2C_I801=y
-# CONFIG_I2C_ISCH is not set
-# CONFIG_I2C_ISMT is not set
-CONFIG_I2C_MUX=m
-# CONFIG_I2C_NFORCE2 is not set
-CONFIG_I2C_PIIX4=m
-# CONFIG_I2C_SCMI is not set
-# CONFIG_I2C_SIS5595 is not set
-# CONFIG_I2C_SIS630 is not set
-# CONFIG_I2C_SIS96X is not set
-# CONFIG_I2C_VIA is not set
-# CONFIG_I2C_VIAPRO is not set
-# CONFIG_I40E is not set
-# CONFIG_I40EVF is not set
-# CONFIG_I6300ESB_WDT is not set
-# CONFIG_I8K is not set
-# CONFIG_IB700_WDT is not set
-# CONFIG_IBMASR is not set
-# CONFIG_IBM_ASM is not set
-# CONFIG_IBM_RTL is not set
-# CONFIG_IDE is not set
-# CONFIG_IDEAPAD_LAPTOP is not set
-# CONFIG_IE6XX_WDT is not set
-# CONFIG_IGB is not set
-CONFIG_IGBVF=m
-CONFIG_IIO=m
-# CONFIG_IIO_BUFFER is not set
-# CONFIG_IIO_CROS_EC_ACCEL_LEGACY is not set
-# CONFIG_IIO_TRIGGER is not set
-CONFIG_ILLEGAL_POINTER_VALUE=0
-# CONFIG_INFINIBAND is not set
-# CONFIG_INPUT_APANEL is not set
-# CONFIG_INPUT_ATLAS_BTNS is not set
-# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set
-# CONFIG_INPUT_MATRIXKMAP is not set
-# CONFIG_INPUT_POLLDEV is not set
-CONFIG_INPUT_SPARSEKMAP=m
-# CONFIG_INPUT_WISTRON_BTNS is not set
-CONFIG_INSTRUCTION_DECODER=y
-# CONFIG_INT340X_THERMAL is not set
-CONFIG_INTEL_GTT=y
-CONFIG_INTEL_IDLE=y
-# CONFIG_INTEL_IPS is not set
-# CONFIG_INTEL_MEI is not set
-# CONFIG_INTEL_MEI_ME is not set
-# CONFIG_INTEL_MEI_TXE is not set
-CONFIG_INTEL_MENLOW=m
-# CONFIG_INTEL_OAKTRAIL is not set
-# CONFIG_INTEL_PCH_THERMAL is not set
-CONFIG_INTEL_PMC_CORE=y
-# CONFIG_INTEL_PMC_IPC is not set
-# CONFIG_INTEL_POWERCLAMP is not set
-# CONFIG_INTEL_PUNIT_IPC is not set
-# CONFIG_INTEL_RST is not set
-# CONFIG_INTEL_SMARTCONNECT is not set
-CONFIG_INTEL_SOC_DTS_IOSF_CORE=m
-CONFIG_INTEL_SOC_DTS_THERMAL=m
-# CONFIG_INTEL_VBTN is not set
-CONFIG_INTERVAL_TREE=y
-# CONFIG_IOMMU_STRESS is not set
-CONFIG_ION_POOL_CACHE_POLICY=y
-CONFIG_IOSF_MBI=m
-# CONFIG_IOSF_MBI_DEBUG is not set
-# CONFIG_IO_DELAY_0X80 is not set
-CONFIG_IO_DELAY_0XED=y
-# CONFIG_IO_DELAY_NONE is not set
-CONFIG_IO_DELAY_TYPE_0X80=0
-CONFIG_IO_DELAY_TYPE_0XED=1
-CONFIG_IO_DELAY_TYPE_NONE=3
-CONFIG_IO_DELAY_TYPE_UDELAY=2
-# CONFIG_IO_DELAY_UDELAY is not set
-# CONFIG_IRQ_TIME_ACCOUNTING is not set
-# CONFIG_ISA is not set
-CONFIG_ISA_DMA_API=y
-# CONFIG_ISCSI_IBFT_FIND is not set
-# CONFIG_IT8712F_WDT is not set
-# CONFIG_IT87_WDT is not set
-CONFIG_ITCO_VENDOR_SUPPORT=y
-CONFIG_ITCO_WDT=y
-# CONFIG_IWL7000 is not set
-# CONFIG_IXGB is not set
-# CONFIG_IXGBE is not set
-# CONFIG_IXGBEVF is not set
-CONFIG_JME=m
-# CONFIG_JOYSTICK_IFORCE_232 is not set
-# CONFIG_KERNEL_BZIP2 is not set
-CONFIG_KERNEL_GZIP=y
-# CONFIG_KERNEL_LZ4 is not set
-# CONFIG_KERNEL_LZMA is not set
-# CONFIG_KERNEL_LZO is not set
-# CONFIG_KERNEL_XZ is not set
-# CONFIG_KEXEC is not set
-CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_GPIO_POLLED is not set
-# CONFIG_KPROBES is not set
-# CONFIG_KSZ884X_PCI is not set
-# CONFIG_KVM is not set
-# CONFIG_LATENCYTOP is not set
-CONFIG_LBDAF=y
-# CONFIG_LEDS_CLEVO_MAIL is not set
-# CONFIG_LEDS_DELL_NETBOOKS is not set
-# CONFIG_LEDS_GPIO is not set
-# CONFIG_LEDS_INTEL_SS4200 is not set
-# CONFIG_LEDS_OT200 is not set
-# CONFIG_LGUEST is not set
-CONFIG_LIBERTAS_THINFIRM_USB=m
-# CONFIG_LIBNVDIMM is not set
-CONFIG_LPC_ICH=y
-CONFIG_LPC_SCH=m
-# CONFIG_M486 is not set
-# CONFIG_M586 is not set
-# CONFIG_M586MMX is not set
-# CONFIG_M586TSC is not set
-CONFIG_M686=y
-# CONFIG_MACHZ_WDT is not set
-# CONFIG_MACINTOSH_DRIVERS is not set
-# CONFIG_MAILBOX is not set
-# CONFIG_MATH_EMULATION is not set
-# CONFIG_MATOM is not set
-# CONFIG_MCORE2 is not set
-# CONFIG_MCRUSOE is not set
-# CONFIG_MCYRIXIII is not set
-CONFIG_MDIO=m
-# CONFIG_MEDIA_CONTROLLER is not set
-# CONFIG_MEDIA_PCI_SUPPORT is not set
-CONFIG_MEDIA_SUBDRV_AUTOSELECT=y
-CONFIG_MEDIA_SUPPORT=m
-# CONFIG_MEFFICEON is not set
-# CONFIG_MEGARAID_LEGACY is not set
-# CONFIG_MEGARAID_NEWGEN is not set
-# CONFIG_MEGARAID_SAS is not set
-# CONFIG_MELAN is not set
-# CONFIG_MEMORY is not set
-# CONFIG_MEMORY_HOTPLUG is not set
-# CONFIG_MFD_CROS_EC is not set
-# CONFIG_MFD_CS5535 is not set
-# CONFIG_MFD_INTEL_LPSS_ACPI is not set
-# CONFIG_MFD_INTEL_LPSS_PCI is not set
-# CONFIG_MFD_JANZ_CMODIO is not set
-# CONFIG_MFD_MT6397 is not set
-# CONFIG_MFD_PALMAS is not set
-# CONFIG_MFD_RDC321X is not set
-CONFIG_MFD_RTSX_PCI=m
-# CONFIG_MFD_SYSCON is not set
-# CONFIG_MFD_TIMBERDALE is not set
-# CONFIG_MFD_TPS65090 is not set
-# CONFIG_MFD_TPS6586X is not set
-# CONFIG_MFD_TPS68470 is not set
-# CONFIG_MFD_VX855 is not set
-# CONFIG_MGEODEGX1 is not set
-# CONFIG_MGEODE_LX is not set
-CONFIG_MICROCODE=y
-CONFIG_MICROCODE_AMD=y
-CONFIG_MICROCODE_INTEL=y
-CONFIG_MICROCODE_OLD_INTERFACE=y
-# CONFIG_MK6 is not set
-# CONFIG_MK7 is not set
-# CONFIG_MK8 is not set
-# CONFIG_MLX4_CORE is not set
-# CONFIG_MLX4_EN is not set
-# CONFIG_MLX5_CORE is not set
-# CONFIG_MLXSW_CORE is not set
-# CONFIG_MMC_CB710 is not set
-# CONFIG_MMC_MTK is not set
-CONFIG_MMC_REALTEK_PCI=m
-# CONFIG_MMC_RICOH_MMC is not set
-# CONFIG_MMC_SDHCI_ACPI is not set
-CONFIG_MMC_SDHCI_PCI=y
-# CONFIG_MMC_SDHCI_PLTFM is not set
-# CONFIG_MMC_TIFM_SD is not set
-# CONFIG_MMC_TOSHIBA_PCI is not set
-# CONFIG_MMC_VIA_SDMMC is not set
-# CONFIG_MMC_WBSD is not set
-# CONFIG_MMIOTRACE is not set
-CONFIG_MMU_NOTIFIER=y
-CONFIG_MODIFY_LDT_SYSCALL=y
-CONFIG_MODULES_USE_ELF_REL=y
-CONFIG_MOUSE_APPLETOUCH=m
-# CONFIG_MOUSE_CENTROIDING is not set
-# CONFIG_MOUSE_ELAN_I2C is not set
-CONFIG_MOUSE_PS2=y
-# CONFIG_MOUSE_PS2_ALPS is not set
-# CONFIG_MOUSE_PS2_CYPRESS is not set
-# CONFIG_MOUSE_PS2_ELANTECH is not set
-# CONFIG_MOUSE_PS2_FOCALTECH is not set
-# CONFIG_MOUSE_PS2_LIFEBOOK is not set
-# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
-# CONFIG_MOUSE_PS2_SENTELIC is not set
-# CONFIG_MOUSE_PS2_SYNAPTICS is not set
-# CONFIG_MOUSE_PS2_TOUCHKIT is not set
-# CONFIG_MOUSE_PS2_TRACKPOINT is not set
-# CONFIG_MPENTIUM4 is not set
-# CONFIG_MPENTIUMII is not set
-# CONFIG_MPENTIUMIII is not set
-# CONFIG_MPENTIUMM is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_MSI_WMI is not set
-# CONFIG_MTD is not set
-CONFIG_MTRR=y
-# CONFIG_MTRR_SANITIZER is not set
-# CONFIG_MVIAC3_2 is not set
-# CONFIG_MVIAC7 is not set
-# CONFIG_MWAVE is not set
-# CONFIG_MWINCHIP3D is not set
-# CONFIG_MWINCHIPC6 is not set
-# CONFIG_MWL8K is not set
-# CONFIG_MYRI10GE is not set
-# CONFIG_NATSEMI is not set
-# CONFIG_NE2K_PCI is not set
-CONFIG_NEED_NODE_MEMMAP_SIZE=y
-CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
-CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
-# CONFIG_NET5501 is not set
-# CONFIG_NETXEN_NIC is not set
-# CONFIG_NET_FC is not set
-# CONFIG_NET_PACKET_ENGINE is not set
-# CONFIG_NET_SB1000 is not set
-# CONFIG_NET_TULIP is not set
-CONFIG_NET_VENDOR_3COM=y
-CONFIG_NET_VENDOR_ADAPTEC=y
-CONFIG_NET_VENDOR_AGERE=y
-CONFIG_NET_VENDOR_ALTEON=y
-CONFIG_NET_VENDOR_AMD=y
-CONFIG_NET_VENDOR_ATHEROS=y
-CONFIG_NET_VENDOR_BROCADE=y
-CONFIG_NET_VENDOR_CAVIUM=y
-CONFIG_NET_VENDOR_CHELSIO=y
-CONFIG_NET_VENDOR_CISCO=y
-CONFIG_NET_VENDOR_DEC=y
-CONFIG_NET_VENDOR_DLINK=y
-CONFIG_NET_VENDOR_EMULEX=y
-CONFIG_NET_VENDOR_EXAR=y
-CONFIG_NET_VENDOR_HP=y
-CONFIG_NET_VENDOR_MELLANOX=y
-CONFIG_NET_VENDOR_MYRI=y
-CONFIG_NET_VENDOR_NVIDIA=y
-CONFIG_NET_VENDOR_OKI=y
-CONFIG_NET_VENDOR_QLOGIC=y
-CONFIG_NET_VENDOR_RDC=y
-CONFIG_NET_VENDOR_REALTEK=y
-CONFIG_NET_VENDOR_SILAN=y
-CONFIG_NET_VENDOR_SIS=y
-CONFIG_NET_VENDOR_SUN=y
-CONFIG_NET_VENDOR_TEHUTI=y
-CONFIG_NET_VENDOR_TI=y
-# CONFIG_NIU is not set
-# CONFIG_NOHIGHMEM is not set
-# CONFIG_NOZOMI is not set
-CONFIG_NR_CPUS=4
-# CONFIG_NS83820 is not set
-# CONFIG_NSC_GPIO is not set
-# CONFIG_NTB is not set
-# CONFIG_NVMEM is not set
-CONFIG_NVRAM=y
-# CONFIG_NV_TCO is not set
-# CONFIG_OF is not set
-CONFIG_OLD_SIGACTION=y
-# CONFIG_OPROFILE is not set
-CONFIG_OPROFILE_NMI_TIMER=y
-CONFIG_OPTIMIZE_INLINING=y
-CONFIG_OUTPUT_FORMAT="elf32-i386"
-CONFIG_PAGE_OFFSET=0x80000000
-# CONFIG_PANASONIC_LAPTOP is not set
-# CONFIG_PATA_ACPI is not set
-# CONFIG_PATA_ALI is not set
-# CONFIG_PATA_AMD is not set
-# CONFIG_PATA_ARTOP is not set
-# CONFIG_PATA_ATIIXP is not set
-# CONFIG_PATA_ATP867X is not set
-# CONFIG_PATA_CMD640_PCI is not set
-# CONFIG_PATA_CMD64X is not set
-# CONFIG_PATA_CS5520 is not set
-# CONFIG_PATA_CS5530 is not set
-# CONFIG_PATA_CS5535 is not set
-# CONFIG_PATA_CS5536 is not set
-# CONFIG_PATA_CYPRESS is not set
-# CONFIG_PATA_EFAR is not set
-# CONFIG_PATA_HPT366 is not set
-# CONFIG_PATA_HPT37X is not set
-# CONFIG_PATA_HPT3X2N is not set
-# CONFIG_PATA_HPT3X3 is not set
-# CONFIG_PATA_IT8213 is not set
-# CONFIG_PATA_IT821X is not set
-# CONFIG_PATA_JMICRON is not set
-# CONFIG_PATA_LEGACY is not set
-# CONFIG_PATA_MARVELL is not set
-# CONFIG_PATA_MPIIX is not set
-# CONFIG_PATA_NETCELL is not set
-# CONFIG_PATA_NINJA32 is not set
-# CONFIG_PATA_NS87410 is not set
-# CONFIG_PATA_NS87415 is not set
-# CONFIG_PATA_OLDPIIX is not set
-# CONFIG_PATA_OPTI is not set
-# CONFIG_PATA_OPTIDMA is not set
-# CONFIG_PATA_PDC2027X is not set
-# CONFIG_PATA_PDC_OLD is not set
-# CONFIG_PATA_PLATFORM is not set
-# CONFIG_PATA_RADISYS is not set
-# CONFIG_PATA_RDC is not set
-# CONFIG_PATA_RZ1000 is not set
-# CONFIG_PATA_SC1200 is not set
-# CONFIG_PATA_SCH is not set
-# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_SIL680 is not set
-# CONFIG_PATA_SIS is not set
-# CONFIG_PATA_TOSHIBA is not set
-# CONFIG_PATA_TRIFLEX is not set
-# CONFIG_PATA_VIA is not set
-# CONFIG_PATA_WINBOND is not set
-# CONFIG_PC8736x_GPIO is not set
-# CONFIG_PC87413_WDT is not set
-# CONFIG_PCCARD is not set
-# CONFIG_PCH_GBE is not set
-# CONFIG_PCH_PHUB is not set
-CONFIG_PCI=y
-CONFIG_PCIEAER=y
-# CONFIG_PCIEAER_INJECT is not set
-CONFIG_PCIEASPM=y
-# CONFIG_PCIEASPM_DEBUG is not set
-CONFIG_PCIEASPM_DEFAULT=y
-# CONFIG_PCIEASPM_PERFORMANCE is not set
-# CONFIG_PCIEASPM_POWERSAVE is not set
-CONFIG_PCIEPORTBUS=y
-# CONFIG_PCIE_ECRC is not set
-CONFIG_PCIE_PME=y
-# CONFIG_PCIPCWATCHDOG is not set
-CONFIG_PCI_BIOS=y
-CONFIG_PCI_BUS_ADDR_T_64BIT=y
-# CONFIG_PCI_CNB20LE_QUIRK is not set
-# CONFIG_PCI_DEBUG is not set
-CONFIG_PCI_DIRECT=y
-CONFIG_PCI_DOMAINS=y
-CONFIG_PCI_GOANY=y
-# CONFIG_PCI_GOBIOS is not set
-# CONFIG_PCI_GODIRECT is not set
-# CONFIG_PCI_GOMMCONFIG is not set
-# CONFIG_PCI_IOV is not set
-CONFIG_PCI_LABEL=y
-CONFIG_PCI_MMCONFIG=y
-CONFIG_PCI_MSI=y
-CONFIG_PCI_MSI_IRQ_DOMAIN=y
-# CONFIG_PCI_PASID is not set
-# CONFIG_PCI_PRI is not set
-CONFIG_PCI_QUIRKS=y
-# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
-# CONFIG_PCI_STUB is not set
-# CONFIG_PCNET32 is not set
-# CONFIG_PCSPKR_PLATFORM is not set
-# CONFIG_PDC_ADMA is not set
-CONFIG_PERF_EVENTS_INTEL_UNCORE=y
-CONFIG_PGTABLE_LEVELS=3
-# CONFIG_PHANTOM is not set
-CONFIG_PHYSICAL_ALIGN=0x200000
-CONFIG_PHYSICAL_START=0x1000000
-CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_PMC_ATOM=y
-# CONFIG_PMIC_OPREGION is not set
-# CONFIG_PM_DEVFREQ is not set
-CONFIG_PM_TRACE=y
-CONFIG_PM_TRACE_RTC=y
-CONFIG_PNP=y
-CONFIG_PNPACPI=y
-CONFIG_PNP_DEBUG_MESSAGES=y
-# CONFIG_POWERCAP is not set
-# CONFIG_POWER_AVS is not set
-# CONFIG_POWER_RESET is not set
-CONFIG_PPS=m
-# CONFIG_PRISM54 is not set
-# CONFIG_PROCESSOR_SELECT is not set
-CONFIG_PROC_KCORE=y
-CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
-# CONFIG_PTP_1588_CLOCK_PCH is not set
-# CONFIG_PUNIT_ATOM_DEBUG is not set
-# CONFIG_PVPANIC is not set
-# CONFIG_PWM is not set
-# CONFIG_QED is not set
-# CONFIG_QLA3XXX is not set
-# CONFIG_QLCNIC is not set
-# CONFIG_QLGE is not set
-CONFIG_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
-# CONFIG_R6040 is not set
-CONFIG_R8169=m
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x20000000
-# CONFIG_RAPIDIO is not set
-CONFIG_RAS=y
-CONFIG_REALTEK_AUTOPM=y
-CONFIG_REGMAP_I2C=m
-# CONFIG_REGULATOR is not set
-CONFIG_RELOCATABLE=y
-# CONFIG_RESET_CONTROLLER is not set
-CONFIG_RETPOLINE=y
-# CONFIG_RT2400PCI is not set
-# CONFIG_RT2500PCI is not set
-CONFIG_RT2800PCI=m
-CONFIG_RT2800PCI_RT3290=y
-CONFIG_RT2800PCI_RT33XX=y
-CONFIG_RT2800PCI_RT35XX=y
-CONFIG_RT2800PCI_RT53XX=y
-CONFIG_RT2800_LIB_MMIO=m
-CONFIG_RT2X00_LIB_MMIO=m
-CONFIG_RT2X00_LIB_PCI=m
-# CONFIG_RT61PCI is not set
-CONFIG_RTC_DRV_CMOS=y
-# CONFIG_RTC_HCTOSYS is not set
-# CONFIG_RTL8180 is not set
-# CONFIG_RTL8188EE is not set
-CONFIG_RTL8192CE=m
-CONFIG_RTL8192C_COMMON=m
-# CONFIG_RTL8192EE is not set
-# CONFIG_RTL8192U is not set
-# CONFIG_RTL8723AE is not set
-# CONFIG_RTL8723BE is not set
-# CONFIG_RTL8821AE is not set
-CONFIG_RTLWIFI=m
-CONFIG_RTLWIFI_PCI=m
-# CONFIG_RTS5208 is not set
-# CONFIG_S2IO is not set
-# CONFIG_SAMSUNG_LAPTOP is not set
-# CONFIG_SAMSUNG_Q10 is not set
-# CONFIG_SATA_ACARD_AHCI is not set
-CONFIG_SATA_AHCI=y
-# CONFIG_SATA_AHCI_PLATFORM is not set
-# CONFIG_SATA_INIC162X is not set
-# CONFIG_SATA_MV is not set
-# CONFIG_SATA_NV is not set
-# CONFIG_SATA_PMP is not set
-# CONFIG_SATA_PROMISE is not set
-# CONFIG_SATA_QSTOR is not set
-# CONFIG_SATA_SIL is not set
-# CONFIG_SATA_SIL24 is not set
-# CONFIG_SATA_SIS is not set
-# CONFIG_SATA_SVW is not set
-# CONFIG_SATA_SX4 is not set
-# CONFIG_SATA_ULI is not set
-# CONFIG_SATA_VIA is not set
-# CONFIG_SATA_VITESSE is not set
-# CONFIG_SATA_ZPODD is not set
-# CONFIG_SBC7240_WDT is not set
-# CONFIG_SBC8360_WDT is not set
-# CONFIG_SBC_EPX_C3_WATCHDOG is not set
-# CONFIG_SBC_FITPC2_WATCHDOG is not set
-# CONFIG_SC1200_WDT is not set
-# CONFIG_SC92031 is not set
-CONFIG_SCHED_MC=y
-CONFIG_SCHED_OMIT_FRAME_POINTER=y
-CONFIG_SCHED_SMT=y
-# CONFIG_SCHED_TUNE is not set
-# CONFIG_SCHED_WALT is not set
-# CONFIG_SCSI_3W_9XXX is not set
-# CONFIG_SCSI_3W_SAS is not set
-# CONFIG_SCSI_AACRAID is not set
-# CONFIG_SCSI_ACARD is not set
-# CONFIG_SCSI_ADVANSYS is not set
-# CONFIG_SCSI_AIC79XX is not set
-# CONFIG_SCSI_AIC7XXX is not set
-# CONFIG_SCSI_AIC94XX is not set
-# CONFIG_SCSI_AM53C974 is not set
-# CONFIG_SCSI_ARCMSR is not set
-# CONFIG_SCSI_BNX2_ISCSI is not set
-# CONFIG_SCSI_BUSLOGIC is not set
-CONFIG_SCSI_CONSTANTS=y
-# CONFIG_SCSI_CXGB3_ISCSI is not set
-# CONFIG_SCSI_CXGB4_ISCSI is not set
-# CONFIG_SCSI_DC395x is not set
-# CONFIG_SCSI_DMX3191D is not set
-# CONFIG_SCSI_DPT_I2O is not set
-# CONFIG_SCSI_EATA is not set
-# CONFIG_SCSI_ESAS2R is not set
-# CONFIG_SCSI_FUTURE_DOMAIN is not set
-# CONFIG_SCSI_GDTH is not set
-# CONFIG_SCSI_HPSA is not set
-# CONFIG_SCSI_HPTIOP is not set
-# CONFIG_SCSI_INIA100 is not set
-# CONFIG_SCSI_INITIO is not set
-# CONFIG_SCSI_IPR is not set
-# CONFIG_SCSI_IPS is not set
-# CONFIG_SCSI_ISCI is not set
-# CONFIG_SCSI_MPT2SAS is not set
-# CONFIG_SCSI_MPT3SAS is not set
-# CONFIG_SCSI_MVSAS is not set
-# CONFIG_SCSI_MVUMI is not set
-# CONFIG_SCSI_NSP32 is not set
-# CONFIG_SCSI_PM8001 is not set
-# CONFIG_SCSI_PMCRAID is not set
-# CONFIG_SCSI_QLA_ISCSI is not set
-# CONFIG_SCSI_QLOGIC_1280 is not set
-# CONFIG_SCSI_SCAN_ASYNC is not set
-# CONFIG_SCSI_SNIC is not set
-# CONFIG_SCSI_STEX is not set
-# CONFIG_SCSI_SYM53C8XX_2 is not set
-CONFIG_SCSI_VIRTIO=y
-# CONFIG_SCSI_WD719X is not set
-# CONFIG_SCx200 is not set
-# CONFIG_SCx200_ACB is not set
-CONFIG_SELECT_MEMORY_MODEL=y
-# CONFIG_SENSORS_ABITUGURU is not set
-# CONFIG_SENSORS_ABITUGURU3 is not set
-# CONFIG_SENSORS_ACPI_POWER is not set
-# CONFIG_SENSORS_APPLESMC is not set
-# CONFIG_SENSORS_ASB100 is not set
-# CONFIG_SENSORS_ATK0110 is not set
-CONFIG_SENSORS_CORETEMP=y
-# CONFIG_SENSORS_DELL_SMM is not set
-# CONFIG_SENSORS_FAM15H_POWER is not set
-# CONFIG_SENSORS_FSCHMD is not set
-# CONFIG_SENSORS_HDAPS is not set
-# CONFIG_SENSORS_I5500 is not set
-# CONFIG_SENSORS_I5K_AMB is not set
-# CONFIG_SENSORS_K10TEMP is not set
-# CONFIG_SENSORS_K8TEMP is not set
-# CONFIG_SENSORS_LM90 is not set
-# CONFIG_SENSORS_NTC_THERMISTOR is not set
-# CONFIG_SENSORS_SIS5595 is not set
-# CONFIG_SENSORS_TMP401 is not set
-# CONFIG_SENSORS_VIA686A is not set
-# CONFIG_SENSORS_VIA_CPUTEMP is not set
-# CONFIG_SENSORS_VT8231 is not set
-# CONFIG_SERIAL_8250 is not set
-# CONFIG_SERIAL_JSM is not set
-# CONFIG_SERIAL_PCH_UART is not set
-# CONFIG_SERIAL_RP2 is not set
-# CONFIG_SERIAL_TIMBERDALE is not set
-CONFIG_SERIO=y
-# CONFIG_SERIO_ALTERA_PS2 is not set
-# CONFIG_SERIO_ARC_PS2 is not set
-# CONFIG_SERIO_CT82C710 is not set
-CONFIG_SERIO_I8042=y
-CONFIG_SERIO_LIBPS2=y
-# CONFIG_SERIO_PCIPS2 is not set
-# CONFIG_SERIO_PS2MULT is not set
-# CONFIG_SERIO_RAW is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_SFC is not set
-# CONFIG_SFI is not set
-CONFIG_SGETMASK_SYSCALL=y
-# CONFIG_SGI_IOC4 is not set
-# CONFIG_SIS190 is not set
-# CONFIG_SIS900 is not set
-# CONFIG_SLICOSS is not set
-# CONFIG_SMSC37B787_WDT is not set
-# CONFIG_SMSC9420 is not set
-# CONFIG_SMSC_SCH311X_WDT is not set
-# CONFIG_SND_AD1889 is not set
-# CONFIG_SND_ALI5451 is not set
-# CONFIG_SND_ALS300 is not set
-# CONFIG_SND_ALS4000 is not set
-# CONFIG_SND_ASIHPI is not set
-# CONFIG_SND_ATIIXP is not set
-# CONFIG_SND_ATIIXP_MODEM is not set
-# CONFIG_SND_AU8810 is not set
-# CONFIG_SND_AU8820 is not set
-# CONFIG_SND_AU8830 is not set
-# CONFIG_SND_AW2 is not set
-# CONFIG_SND_AZT3328 is not set
-# CONFIG_SND_BT87X is not set
-# CONFIG_SND_CA0106 is not set
-# CONFIG_SND_CMIPCI is not set
-# CONFIG_SND_CS4281 is not set
-# CONFIG_SND_CS46XX is not set
-# CONFIG_SND_CS5530 is not set
-# CONFIG_SND_CS5535AUDIO is not set
-# CONFIG_SND_CTXFI is not set
-# CONFIG_SND_DARLA20 is not set
-# CONFIG_SND_DARLA24 is not set
-CONFIG_SND_DMA_SGBUF=y
-# CONFIG_SND_ECHO3G is not set
-# CONFIG_SND_EMU10K1 is not set
-# CONFIG_SND_EMU10K1X is not set
-# CONFIG_SND_ENS1370 is not set
-# CONFIG_SND_ENS1371 is not set
-# CONFIG_SND_ES1938 is not set
-# CONFIG_SND_ES1968 is not set
-# CONFIG_SND_FM801 is not set
-# CONFIG_SND_GINA20 is not set
-# CONFIG_SND_GINA24 is not set
-CONFIG_SND_HDA=m
-CONFIG_SND_HDA_CODEC_ANALOG=m
-CONFIG_SND_HDA_CODEC_CA0110=m
-# CONFIG_SND_HDA_CODEC_CA0132 is not set
-CONFIG_SND_HDA_CODEC_CIRRUS=m
-CONFIG_SND_HDA_CODEC_CMEDIA=m
-CONFIG_SND_HDA_CODEC_CONEXANT=m
-CONFIG_SND_HDA_CODEC_HDMI=m
-CONFIG_SND_HDA_CODEC_REALTEK=m
-CONFIG_SND_HDA_CODEC_SI3054=m
-CONFIG_SND_HDA_CODEC_SIGMATEL=m
-CONFIG_SND_HDA_CODEC_VIA=m
-CONFIG_SND_HDA_CORE=m
-CONFIG_SND_HDA_GENERIC=m
-CONFIG_SND_HDA_HWDEP=y
-CONFIG_SND_HDA_I915=y
-# CONFIG_SND_HDA_INPUT_BEEP is not set
-CONFIG_SND_HDA_INTEL=m
-CONFIG_SND_HDA_PATCH_LOADER=y
-CONFIG_SND_HDA_POWER_SAVE_DEFAULT=15
-CONFIG_SND_HDA_RECONFIG=y
-# CONFIG_SND_HDSP is not set
-# CONFIG_SND_HDSPM is not set
-# CONFIG_SND_ICE1712 is not set
-# CONFIG_SND_ICE1724 is not set
-# CONFIG_SND_INDIGO is not set
-# CONFIG_SND_INDIGODJ is not set
-# CONFIG_SND_INDIGODJX is not set
-# CONFIG_SND_INDIGOIO is not set
-# CONFIG_SND_INDIGOIOX is not set
-# CONFIG_SND_INTEL8X0 is not set
-# CONFIG_SND_INTEL8X0M is not set
-# CONFIG_SND_KORG1212 is not set
-# CONFIG_SND_LAYLA20 is not set
-# CONFIG_SND_LAYLA24 is not set
-# CONFIG_SND_LOLA is not set
-# CONFIG_SND_LX6464ES is not set
-# CONFIG_SND_MAESTRO3 is not set
-# CONFIG_SND_MIA is not set
-# CONFIG_SND_MIXART is not set
-# CONFIG_SND_MONA is not set
-# CONFIG_SND_NM256 is not set
-# CONFIG_SND_OXYGEN is not set
-CONFIG_SND_PCI=y
-CONFIG_SND_PCM=m
-# CONFIG_SND_PCXHR is not set
-# CONFIG_SND_RIPTIDE is not set
-# CONFIG_SND_RME32 is not set
-# CONFIG_SND_RME96 is not set
-# CONFIG_SND_RME9652 is not set
-# CONFIG_SND_SE6X is not set
-# CONFIG_SND_SIS7019 is not set
-# CONFIG_SND_SOC is not set
-# CONFIG_SND_SONICVIBES is not set
-CONFIG_SND_TIMER=m
-# CONFIG_SND_TRIDENT is not set
-# CONFIG_SND_USB_US122L is not set
-# CONFIG_SND_USB_USX2Y is not set
-# CONFIG_SND_VIA82XX is not set
-# CONFIG_SND_VIA82XX_MODEM is not set
-# CONFIG_SND_VIRTUOSO is not set
-CONFIG_SND_VMASTER=y
-# CONFIG_SND_VX222 is not set
-# CONFIG_SND_YMFPCI is not set
-# CONFIG_SONYPI is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_SP5100_TCO is not set
-CONFIG_SPARSEMEM=y
-CONFIG_SPARSEMEM_MANUAL=y
-CONFIG_SPARSEMEM_STATIC=y
-CONFIG_SPI_BITBANG=m
-# CONFIG_SPI_GPIO is not set
-# CONFIG_SPI_PXA2XX is not set
-# CONFIG_SPI_PXA2XX_PCI is not set
-# CONFIG_SPI_TOPCLIFF_PCH is not set
-# CONFIG_STMMAC_ETH is not set
-# CONFIG_SUNDANCE is not set
-# CONFIG_SUNGEM is not set
-# CONFIG_SURFACE_PRO3_BUTTON is not set
-CONFIG_SYSCTL_EXCEPTION_TRACE=y
-# CONFIG_TC1100_WMI is not set
-# CONFIG_TCG_ATMEL is not set
-# CONFIG_TCG_CR50_I2C is not set
-# CONFIG_TCG_CR50_SPI is not set
-# CONFIG_TCG_CRB is not set
-# CONFIG_TCG_INFINEON is not set
-# CONFIG_TCG_NSC is not set
-CONFIG_TCG_TIS=y
-CONFIG_TCG_TIS_CORE=y
-# CONFIG_TCG_TIS_I2C_INFINEON is not set
-# CONFIG_TCG_TIS_SPI is not set
-# CONFIG_TEHUTI is not set
-# CONFIG_TELCLOCK is not set
-# CONFIG_THERMAL_DEFAULT_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
-# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
-# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_WRITABLE_TRIPS=y
-# CONFIG_THINKPAD_ACPI is not set
-# CONFIG_THUNDERBOLT is not set
-# CONFIG_TIFM_CORE is not set
-CONFIG_TIGON3=m
-# CONFIG_TI_CPSW_ALE is not set
-# CONFIG_TLAN is not set
-# CONFIG_TOPSTAR_LAPTOP is not set
-# CONFIG_TOSHIBA is not set
-# CONFIG_TOSHIBA_BT_RFKILL is not set
-# CONFIG_TOSHIBA_HAPS is not set
-# CONFIG_TOSHIBA_WMI is not set
-# CONFIG_TOUCHSCREEN_ELAN is not set
-# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set
-# CONFIG_TOUCHSCREEN_RM_TS is not set
-# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set
-# CONFIG_TRANSPARENT_HUGEPAGE is not set
-# CONFIG_TYPHOON is not set
-CONFIG_UCS2_STRING=y
-# CONFIG_UPROBE_EVENT is not set
-# CONFIG_USB_DWC2 is not set
-# CONFIG_USB_DWC3 is not set
-# CONFIG_USB_EHCI_HCD_PLATFORM is not set
-CONFIG_USB_EHCI_PCI=y
-# CONFIG_USB_GADGET is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PCI=y
-# CONFIG_USB_OHCI_HCD_PLATFORM is not set
-# CONFIG_USB_PHY is not set
-CONFIG_USB_STORAGE_REALTEK=y
-CONFIG_USB_UHCI_HCD=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PCI=y
-# CONFIG_USB_XHCI_PLATFORM is not set
-# CONFIG_USERIO is not set
-CONFIG_USER_STACKTRACE_SUPPORT=y
-# CONFIG_V4L_MEM2MEM_DRIVERS is not set
-# CONFIG_V4L_PLATFORM_DRIVERS is not set
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
-# CONFIG_VGA_ARB is not set
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGA_SWITCHEROO is not set
-# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set
-# CONFIG_VHOST_NET is not set
-# CONFIG_VIA_WDT is not set
-CONFIG_VIDEOBUF2_CORE=m
-CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_VMALLOC=m
-CONFIG_VIDEO_DEV=m
-CONFIG_VIDEO_V4L2=m
-CONFIG_VIRTIO=y
-# CONFIG_VIRTIO_BALLOON is not set
-CONFIG_VIRTIO_BLK=m
-# CONFIG_VIRTIO_CONSOLE is not set
-# CONFIG_VIRTIO_INPUT is not set
-# CONFIG_VIRTIO_MMIO is not set
-CONFIG_VIRTIO_NET=m
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_VIRT_TO_BUS=y
-# CONFIG_VM86 is not set
-# CONFIG_VME_BUS is not set
-# CONFIG_VMSPLIT_1G is not set
-CONFIG_VMSPLIT_2G=y
-# CONFIG_VMSPLIT_3G is not set
-# CONFIG_VMWARE_PVSCSI is not set
-# CONFIG_VMWARE_VMCI is not set
-# CONFIG_VMXNET3 is not set
-# CONFIG_VORTEX is not set
-# CONFIG_VT6655 is not set
-# CONFIG_VXGE is not set
-# CONFIG_W83627HF_WDT is not set
-# CONFIG_W83877F_WDT is not set
-# CONFIG_W83977F_WDT is not set
-# CONFIG_WAFER_WDT is not set
-# CONFIG_WAKELOCK is not set
-# CONFIG_WDTPCI is not set
-# CONFIG_WIL6210 is not set
-CONFIG_X86=y
-CONFIG_X86_16BIT=y
-CONFIG_X86_32=y
-# CONFIG_X86_32_IRIS is not set
-# CONFIG_X86_32_NON_STANDARD is not set
-CONFIG_X86_32_SMP=y
-CONFIG_X86_ACPI_CPUFREQ=y
-CONFIG_X86_ACPI_CPUFREQ_CPB=y
-# CONFIG_X86_AMD_FREQ_SENSITIVITY is not set
-# CONFIG_X86_AMD_PLATFORM_DEVICE is not set
-# CONFIG_X86_ANCIENT_MCE is not set
-# CONFIG_X86_BIGSMP is not set
-CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
-CONFIG_X86_CHECK_BIOS_CORRUPTION=y
-CONFIG_X86_CMOV=y
-CONFIG_X86_CMPXCHG64=y
-# CONFIG_X86_CPUFREQ_NFORCE2 is not set
-CONFIG_X86_CPUID=y
-CONFIG_X86_DEBUGCTLMSR=y
-CONFIG_X86_DEBUG_FPU=y
-# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
-CONFIG_X86_ESPFIX32=y
-CONFIG_X86_EXTENDED_PLATFORM=y
-# CONFIG_X86_E_POWERSAVER is not set
-CONFIG_X86_FEATURE_NAMES=y
-# CONFIG_X86_GENERIC is not set
-# CONFIG_X86_GOLDFISH is not set
-# CONFIG_X86_GX_SUSPMOD is not set
-# CONFIG_X86_INTEL_LPSS is not set
-# CONFIG_X86_INTEL_MID is not set
-# CONFIG_X86_INTEL_MPX is not set
-CONFIG_X86_INTEL_PSTATE=y
-# CONFIG_X86_INTEL_QUARK is not set
-CONFIG_X86_INTERNODE_CACHE_SHIFT=5
-CONFIG_X86_IO_APIC=y
-CONFIG_X86_L1_CACHE_SHIFT=5
-# CONFIG_X86_LEGACY_VM86 is not set
-CONFIG_X86_LOCAL_APIC=y
-# CONFIG_X86_LONGHAUL is not set
-# CONFIG_X86_LONGRUN is not set
-CONFIG_X86_MCE=y
-# CONFIG_X86_MCE_AMD is not set
-# CONFIG_X86_MCE_INJECT is not set
-CONFIG_X86_MCE_INTEL=y
-CONFIG_X86_MCE_THRESHOLD=y
-CONFIG_X86_MINIMUM_CPU_FAMILY=5
-CONFIG_X86_MPPARSE=y
-CONFIG_X86_MSR=y
-CONFIG_X86_NEED_RELOCS=y
-# CONFIG_X86_P4_CLOCKMOD is not set
-CONFIG_X86_PAE=y
-CONFIG_X86_PAT=y
-# CONFIG_X86_PCC_CPUFREQ is not set
-CONFIG_X86_PKG_TEMP_THERMAL=m
-CONFIG_X86_PLATFORM_DEVICES=y
-# CONFIG_X86_PMEM_LEGACY is not set
-CONFIG_X86_PM_TIMER=y
-# CONFIG_X86_PPRO_FENCE is not set
-# CONFIG_X86_PTDUMP is not set
-# CONFIG_X86_PTDUMP_CORE is not set
-# CONFIG_X86_RDC321X is not set
-# CONFIG_X86_REBOOTFIXUPS is not set
-CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
-CONFIG_X86_RESERVE_LOW=64
-CONFIG_X86_SMAP=y
-# CONFIG_X86_SPEEDSTEP_CENTRINO is not set
-# CONFIG_X86_SPEEDSTEP_ICH is not set
-# CONFIG_X86_SPEEDSTEP_LIB is not set
-# CONFIG_X86_SPEEDSTEP_SMI is not set
-# CONFIG_X86_SYSFB is not set
-CONFIG_X86_THERMAL_VECTOR=y
-CONFIG_X86_TSC=y
-CONFIG_X86_USE_PPRO_CHECKSUM=y
-CONFIG_X86_VERBOSE_BOOTUP=y
-# CONFIG_XZ_DEC_ARM is not set
-# CONFIG_XZ_DEC_ARMTHUMB is not set
-CONFIG_XZ_DEC_BCJ=y
-CONFIG_XZ_DEC_X86=y
-CONFIG_ZONE_DMA=y
-CONFIG_ZONE_DMA_FLAG=1
diff --git a/chromeos/config/x86_64/chromeos-amd-stoneyridge.flavour.config b/chromeos/config/x86_64/chromeos-amd-stoneyridge.flavour.config
index b164d206457b15..b4256ad1d45543 100644
--- a/chromeos/config/x86_64/chromeos-amd-stoneyridge.flavour.config
+++ b/chromeos/config/x86_64/chromeos-amd-stoneyridge.flavour.config
@@ -13,6 +13,7 @@ CONFIG_BCMA_DRIVER_PCI=y
CONFIG_BCMA_HOST_PCI=y
CONFIG_BCMA_HOST_PCI_POSSIBLE=y
# CONFIG_BCMA_HOST_SOC is not set
+CONFIG_CEC_CORE=m
# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
@@ -97,6 +98,7 @@ CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM=y
# CONFIG_SND_SOC_INTEL_BXT_DA7219_MAX98357A_MACH is not set
# CONFIG_SND_SOC_INTEL_BYTCR_RT5640_MACH is not set
# CONFIG_SND_SOC_INTEL_SKL_RT286_MACH is not set
+# CONFIG_SND_SOC_MAX98373 is not set
# CONFIG_SND_SOC_MAX98927 is not set
# CONFIG_SND_SOC_SSM4567 is not set
# CONFIG_SND_SOC_TS3A227E is not set
diff --git a/chromeos/config/x86_64/chromeos-intel-pineview.flavour.config b/chromeos/config/x86_64/chromeos-intel-pineview.flavour.config
index 211570b44472bf..1bd328a32d4596 100644
--- a/chromeos/config/x86_64/chromeos-intel-pineview.flavour.config
+++ b/chromeos/config/x86_64/chromeos-intel-pineview.flavour.config
@@ -6,6 +6,7 @@ CONFIG_ACPI_WMI=m
CONFIG_AGP_INTEL=y
# CONFIG_B43 is not set
# CONFIG_BCMA is not set
+CONFIG_CEC_CORE=y
CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
@@ -140,6 +141,8 @@ CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
# CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH is not set
# CONFIG_SND_SOC_INTEL_HASWELL_MACH is not set
CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98373_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH=m
@@ -152,6 +155,7 @@ CONFIG_SND_SOC_INTEL_SST_ACPI=m
CONFIG_SND_SOC_INTEL_SST_MATCH=m
CONFIG_SND_SOC_MAX98090=m
CONFIG_SND_SOC_MAX98357A=m
+CONFIG_SND_SOC_MAX98373=m
CONFIG_SND_SOC_MAX98927=m
CONFIG_SND_SOC_NAU8825=m
CONFIG_SND_SOC_RL6347A=m
@@ -186,6 +190,7 @@ CONFIG_VIDEOBUF2_DMA_SG=m
# CONFIG_VIDEO_ADV7511 is not set
# CONFIG_VIDEO_ADV7604 is not set
# CONFIG_VIDEO_ADV7842 is not set
+CONFIG_VIDEO_AK7375=m
# CONFIG_VIDEO_AK881X is not set
# CONFIG_VIDEO_AS3645A is not set
# CONFIG_VIDEO_BT819 is not set
@@ -196,7 +201,10 @@ CONFIG_VIDEOBUF2_DMA_SG=m
# CONFIG_VIDEO_CX25840 is not set
CONFIG_VIDEO_DW9714=m
CONFIG_VIDEO_DW9807=m
+CONFIG_VIDEO_IMX208=m
CONFIG_VIDEO_IMX258=m
+CONFIG_VIDEO_IMX319=m
+CONFIG_VIDEO_IMX355=m
CONFIG_VIDEO_IPU3_CIO2=m
CONFIG_VIDEO_IPU3_IMGU=m
# CONFIG_VIDEO_KS0127 is not set
diff --git a/chromeos/config/x86_64/chromiumos-x86_64.flavour.config b/chromeos/config/x86_64/chromiumos-x86_64.flavour.config
index e2b594eebaba51..bd45bc71635eb5 100644
--- a/chromeos/config/x86_64/chromiumos-x86_64.flavour.config
+++ b/chromeos/config/x86_64/chromiumos-x86_64.flavour.config
@@ -31,6 +31,7 @@ CONFIG_BCMA_DRIVER_PCI=y
CONFIG_BCMA_HOST_PCI=y
CONFIG_BCMA_HOST_PCI_POSSIBLE=y
# CONFIG_BCMA_HOST_SOC is not set
+CONFIG_CEC_CORE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
@@ -180,6 +181,8 @@ CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH=m
CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH=m
# CONFIG_SND_SOC_INTEL_HASWELL_MACH is not set
CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98373_MACH=m
+CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH=m
CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH=m
CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH=m
# CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH is not set
@@ -192,6 +195,7 @@ CONFIG_SND_SOC_INTEL_SST_ACPI=m
CONFIG_SND_SOC_INTEL_SST_MATCH=m
CONFIG_SND_SOC_MAX98090=m
CONFIG_SND_SOC_MAX98357A=m
+CONFIG_SND_SOC_MAX98373=m
CONFIG_SND_SOC_MAX98927=m
CONFIG_SND_SOC_RT5514=m
CONFIG_SND_SOC_RT5514_SPI=m
diff --git a/chromeos/config/x86_64/common.config b/chromeos/config/x86_64/common.config
index 4bd94e1b17577c..f66733895b78a9 100644
--- a/chromeos/config/x86_64/common.config
+++ b/chromeos/config/x86_64/common.config
@@ -83,6 +83,7 @@ CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
CONFIG_ARCH_HAS_CPU_RELAX=y
CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y
CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
CONFIG_ARCH_HAS_KCOV=y
CONFIG_ARCH_HAS_MMIO_FLUSH=y
CONFIG_ARCH_HAS_PMEM_API=y
@@ -114,9 +115,7 @@ CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
CONFIG_ARCH_SUPPORTS_UPROBES=y
CONFIG_ARCH_USES_PG_UNCACHED=y
CONFIG_ARCH_USE_BUILTIN_BSWAP=y
-CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
-CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y
CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y
@@ -124,10 +123,8 @@ CONFIG_ARCH_WANT_FRAME_POINTERS=y
CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y
# CONFIG_ARCNET is not set
-# CONFIG_ARC_EMAC is not set
# CONFIG_ASUS_LAPTOP is not set
# CONFIG_ASUS_WMI is not set
-# CONFIG_ASYNC_TX_DMA is not set
CONFIG_ATA=y
CONFIG_ATA_ACPI=y
CONFIG_ATA_BMDMA=y
@@ -190,7 +187,6 @@ CONFIG_BLOCK_COMPAT=y
# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0
# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set
-# CONFIG_BPF_JIT is not set
# CONFIG_BRCMDBG is not set
CONFIG_BRCMFMAC=m
CONFIG_BRCMFMAC_PCIE=y
@@ -200,15 +196,17 @@ CONFIG_BRCMFMAC_SDIO=y
CONFIG_BRCMFMAC_USB=y
CONFIG_BRCMUTIL=m
# CONFIG_BRCM_TRACING is not set
-# CONFIG_BROADCOM_PHY is not set
CONFIG_BT_ATH3K=m
CONFIG_BT_HCIBCM203X=m
# CONFIG_BT_HCIBTSDIO is not set
# CONFIG_BT_HCIUART is not set
+CONFIG_BT_HCI_LE_SPLITTER=y
# CONFIG_BT_MRVL is not set
# CONFIG_CALGARY_IOMMU is not set
# CONFIG_CASSINI is not set
# CONFIG_CB710_CORE is not set
+CONFIG_CEC_NOTIFIER=y
+CONFIG_CEC_PLATFORM_DRIVERS=y
# CONFIG_CHARGER_BQ24735 is not set
CONFIG_CHARGER_CROS_USB_PD=y
# CONFIG_CHARGER_GPIO is not set
@@ -222,20 +220,11 @@ CONFIG_CHROMEOS_LAPTOP=y
CONFIG_CHROMEOS_PSTORE=y
CONFIG_CHROMEOS_TBMC=y
CONFIG_CLKBLD_I8253=y
-CONFIG_CLKDEV_LOOKUP=y
CONFIG_CLKEVT_I8253=y
CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y
CONFIG_CLOCKSOURCE_WATCHDOG=y
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_CNIC is not set
-CONFIG_COMMON_CLK=y
-# CONFIG_COMMON_CLK_CDCE706 is not set
-# CONFIG_COMMON_CLK_CDCE925 is not set
-# CONFIG_COMMON_CLK_PXA is not set
-# CONFIG_COMMON_CLK_SI514 is not set
-# CONFIG_COMMON_CLK_SI5351 is not set
-# CONFIG_COMMON_CLK_SI570 is not set
-# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
# CONFIG_COMPAL_LAPTOP is not set
CONFIG_COMPAT=y
CONFIG_COMPAT_BINFMT_ELF=y
@@ -243,7 +232,6 @@ CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
CONFIG_COMPAT_NETLINK_MESSAGES=y
CONFIG_COMPAT_OLD_SIGACTION=y
# CONFIG_COMPAT_VDSO is not set
-# CONFIG_CORDIC is not set
# CONFIG_CPA_DEBUG is not set
# CONFIG_CPU5_WDT is not set
# CONFIG_CPUFREQ_DT is not set
@@ -255,13 +243,9 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_SUP_AMD=y
# CONFIG_CPU_THERMAL is not set
# CONFIG_CRASH_DUMP is not set
-# CONFIG_CRC8 is not set
CONFIG_CRC_T10DIF=y
-CONFIG_CROS_EC_CHARDEV=y
CONFIG_CROS_EC_LPC=y
# CONFIG_CROS_EC_LPC_MEC is not set
-CONFIG_CROS_EC_PROTO=y
-# CONFIG_CROS_EC_SYSFS_USB is not set
CONFIG_CROS_KBD_LED_BACKLIGHT=y
CONFIG_CRYPTO_AES_X86_64=y
# CONFIG_CRYPTO_BLOWFISH_X86_64 is not set
@@ -301,7 +285,6 @@ CONFIG_DEBUG_BOOT_PARAMS=y
# CONFIG_DEBUG_HOTPLUG_CPU0 is not set
# CONFIG_DEBUG_NMI_SELFTEST is not set
CONFIG_DEBUG_NX_TEST=m
-# CONFIG_DEBUG_PINCTRL is not set
# CONFIG_DEBUG_RODATA_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set
@@ -315,15 +298,9 @@ CONFIG_DEFAULT_IO_DELAY_TYPE=1
CONFIG_DELL_WMI=m
# CONFIG_DELL_WMI_AIO is not set
CONFIG_DEVPORT=y
-CONFIG_DEV_COREDUMP=y
# CONFIG_DGNC is not set
# CONFIG_DL2K is not set
-CONFIG_DMADEVICES=y
-# CONFIG_DMADEVICES_DEBUG is not set
-# CONFIG_DMATEST is not set
CONFIG_DMA_ACPI=y
-CONFIG_DMA_ENGINE=y
-CONFIG_DMA_OF=y
CONFIG_DMI=y
CONFIG_DMIID=y
CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y
@@ -337,12 +314,7 @@ CONFIG_DOUBLEFAULT=y
# CONFIG_DRM_GENERIC_GPIO_MUX is not set
# CONFIG_DRM_MGA is not set
# CONFIG_DRM_MGAG200 is not set
-# CONFIG_DRM_NXP_PTN3460 is not set
-# CONFIG_DRM_PANEL_LG_LG4573 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set
-# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set
# CONFIG_DRM_PANEL_SIMPLE is not set
-# CONFIG_DRM_PARADE_PS8622 is not set
# CONFIG_DRM_PARADE_PS8640 is not set
# CONFIG_DRM_POWERVR_ROGUE_1_9 is not set
# CONFIG_DRM_QXL is not set
@@ -383,7 +355,6 @@ CONFIG_EFI_VARS_PSTORE=y
# CONFIG_ET131X is not set
# CONFIG_EUROTECH_WDT is not set
# CONFIG_EXTCON is not set
-# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set
# CONFIG_F71808E_WDT is not set
# CONFIG_FB_3DFX is not set
# CONFIG_FB_ARC is not set
@@ -414,7 +385,6 @@ CONFIG_EFI_VARS_PSTORE=y
# CONFIG_FB_SIS is not set
# CONFIG_FB_SM712 is not set
# CONFIG_FB_SM750 is not set
-# CONFIG_FB_SSD1307 is not set
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_VESA is not set
# CONFIG_FB_VGA16 is not set
@@ -429,15 +399,14 @@ CONFIG_EFI_VARS_PSTORE=y
CONFIG_FIRMWARE_MEMMAP=y
# CONFIG_FM10K is not set
# CONFIG_FORCEDETH is not set
+CONFIG_FORTIFY_SOURCE=y
CONFIG_FPC1020=m
CONFIG_FRAME_WARN=2048
-# CONFIG_FSL_EDMA is not set
# CONFIG_FS_DAX is not set
CONFIG_FTRACE_SYSCALLS=y
# CONFIG_FUJITSU_ES is not set
# CONFIG_FUJITSU_LAPTOP is not set
# CONFIG_FUJITSU_TABLET is not set
-CONFIG_FUNCTION_GRAPH_TRACER=y
# CONFIG_FUSION is not set
CONFIG_GART_IOMMU=y
CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
@@ -454,7 +423,6 @@ CONFIG_GENERIC_MSI_IRQ=y
CONFIG_GENERIC_MSI_IRQ_DOMAIN=y
CONFIG_GENERIC_PENDING_IRQ=y
# CONFIG_GENERIC_PHY is not set
-CONFIG_GENERIC_PINCONF=y
# CONFIG_GENWQE is not set
CONFIG_GLOB=y
# CONFIG_GLOB_SELFTEST is not set
@@ -464,16 +432,11 @@ CONFIG_GOOGLE_COREBOOT_TABLE_ACPI=y
CONFIG_GOOGLE_SMI=y
CONFIG_GPIOLIB_IRQCHIP=y
# CONFIG_GPIO_104_IDIO_16 is not set
-# CONFIG_GPIO_74X164 is not set
-# CONFIG_GPIO_74XX_MMIO is not set
CONFIG_GPIO_ACPI=y
-# CONFIG_GPIO_ADNP is not set
-# CONFIG_GPIO_ALTERA is not set
# CONFIG_GPIO_AMD8111 is not set
# CONFIG_GPIO_AMDPT is not set
# CONFIG_GPIO_BT8XX is not set
# CONFIG_GPIO_F7188X is not set
-# CONFIG_GPIO_GRGPIO is not set
# CONFIG_GPIO_ICH is not set
# CONFIG_GPIO_INTEL_MID is not set
# CONFIG_GPIO_IT87 is not set
@@ -484,8 +447,6 @@ CONFIG_GPIO_LYNXPOINT=y
# CONFIG_GPIO_SCH311X is not set
# CONFIG_GPIO_SODAVILLE is not set
# CONFIG_GPIO_VX855 is not set
-# CONFIG_GPIO_WATCHDOG is not set
-# CONFIG_GPIO_XILINX is not set
# CONFIG_HANGCHECK_TIMER is not set
# CONFIG_HAPPYMEAL is not set
CONFIG_HARDLOCKUP_DETECTOR_NMI=y
@@ -493,7 +454,6 @@ CONFIG_HAS_IOPORT_MAP=y
CONFIG_HAVE_ACPI_APEI=y
CONFIG_HAVE_ACPI_APEI_NMI=y
CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
-# CONFIG_HAVE_AOUT is not set
# CONFIG_HAVE_ARCH_BITREVERSE is not set
CONFIG_HAVE_ARCH_HUGE_VMAP=y
CONFIG_HAVE_ARCH_KASAN=y
@@ -501,12 +461,9 @@ CONFIG_HAVE_ARCH_KMEMCHECK=y
CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y
CONFIG_HAVE_ARCH_SOFT_DIRTY=y
CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
-CONFIG_HAVE_BPF_JIT=y
-CONFIG_HAVE_CLK=y
-CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y
CONFIG_HAVE_CMPXCHG_DOUBLE=y
CONFIG_HAVE_CMPXCHG_LOCAL=y
-CONFIG_HAVE_CONTEXT_TRACKING=y
CONFIG_HAVE_COPY_THREAD_TLS=y
CONFIG_HAVE_DEBUG_STACKOVERFLOW=y
CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y
@@ -542,10 +499,10 @@ CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
CONFIG_HAVE_SETUP_PER_CPU_AREA=y
CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
CONFIG_HAVE_USER_RETURN_NOTIFIER=y
-CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
# CONFIG_HERMES is not set
# CONFIG_HIBERNATION is not set
CONFIG_HID_GOOGLE_HAMMER=m
+CONFIG_HID_GOOGLE_WHISKERS=m
CONFIG_HID_PID=y
CONFIG_HID_RMI=m
# CONFIG_HIPPI is not set
@@ -576,19 +533,16 @@ CONFIG_HT_IRQ=y
# CONFIG_I2C_ALI15X3 is not set
# CONFIG_I2C_AMD756 is not set
# CONFIG_I2C_AMD8111 is not set
-# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_CROS_EC_TUNNEL is not set
# CONFIG_I2C_DESIGNWARE_BAYTRAIL is not set
CONFIG_I2C_DESIGNWARE_CORE=y
CONFIG_I2C_DESIGNWARE_PCI=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
-# CONFIG_I2C_EMEV2 is not set
CONFIG_I2C_HID=y
CONFIG_I2C_I801=y
# CONFIG_I2C_ISCH is not set
# CONFIG_I2C_ISMT is not set
-CONFIG_I2C_MUX=y
# CONFIG_I2C_MUX_PINCTRL is not set
# CONFIG_I2C_NFORCE2 is not set
CONFIG_I2C_PIIX4=m
@@ -616,9 +570,6 @@ CONFIG_IA32_EMULATION=y
# CONFIG_IGB is not set
CONFIG_IGBVF=m
CONFIG_IIO=m
-CONFIG_IIO_BUFFER=y
-# CONFIG_IIO_BUFFER_CB is not set
-CONFIG_IIO_CONSUMERS_PER_TRIGGER=2
CONFIG_IIO_CROS_EC=y
CONFIG_IIO_CROS_EC_ACCEL_LEGACY=m
CONFIG_IIO_CROS_EC_ACTIVITY=m
@@ -628,18 +579,14 @@ CONFIG_IIO_CROS_EC_SENSORS=m
CONFIG_IIO_CROS_EC_SENSORS_CORE=m
CONFIG_IIO_CROS_EC_SENSORS_RING=m
CONFIG_IIO_CROS_EC_SENSORS_SYNC=m
-# CONFIG_IIO_INTERRUPT_TRIGGER is not set
CONFIG_IIO_KFIFO_BUF=m
-# CONFIG_IIO_PERIODIC_RTC_TRIGGER is not set
CONFIG_IIO_SYSFS_TRIGGER=m
-CONFIG_IIO_TRIGGER=y
CONFIG_IIO_TRIGGERED_BUFFER=m
CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
# CONFIG_INFINIBAND is not set
# CONFIG_INPUT_APANEL is not set
# CONFIG_INPUT_ATLAS_BTNS is not set
# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set
-CONFIG_INPUT_MATRIXKMAP=y
# CONFIG_INPUT_POLLDEV is not set
CONFIG_INPUT_SPARSEKMAP=m
CONFIG_INSTRUCTION_DECODER=y
@@ -647,6 +594,7 @@ CONFIG_INT340X_THERMAL=y
# CONFIG_INTEL_IOATDMA is not set
# CONFIG_INTEL_IOMMU is not set
# CONFIG_INTEL_IPS is not set
+CONFIG_INTEL_ISH_HID=m
# CONFIG_INTEL_MEI is not set
# CONFIG_INTEL_MEI_ME is not set
# CONFIG_INTEL_MEI_TXE is not set
@@ -660,7 +608,6 @@ CONFIG_INTEL_VBTN=m
CONFIG_INTERVAL_TREE=y
# CONFIG_IOMMU_DEBUG is not set
# CONFIG_IOMMU_STRESS is not set
-CONFIG_ION_POOL_CACHE_POLICY=y
CONFIG_IOSF_MBI=y
# CONFIG_IOSF_MBI_DEBUG is not set
# CONFIG_IO_DELAY_0X80 is not set
@@ -673,7 +620,6 @@ CONFIG_IO_DELAY_TYPE_UDELAY=2
# CONFIG_IO_DELAY_UDELAY is not set
# CONFIG_IPW2100 is not set
# CONFIG_IPW2200 is not set
-CONFIG_IRQCHIP=y
# CONFIG_IRQ_REMAP is not set
# CONFIG_IRQ_TIME_ACCOUNTING is not set
CONFIG_ISA_DMA_API=y
@@ -698,20 +644,12 @@ CONFIG_KERNEL_GZIP=y
# CONFIG_KEXEC is not set
# CONFIG_KEXEC_FILE is not set
CONFIG_KEYBOARD_ATKBD=y
-# CONFIG_KEYBOARD_BCM is not set
-# CONFIG_KEYBOARD_CAP11XX is not set
-CONFIG_KEYBOARD_CROS_EC=y
# CONFIG_KEYBOARD_GPIO_POLLED is not set
-# CONFIG_KEYBOARD_OMAP4 is not set
-# CONFIG_KEYBOARD_SAMSUNG is not set
CONFIG_KEYS_COMPAT=y
# CONFIG_KPROBES is not set
-# CONFIG_KS8842 is not set
# CONFIG_KSZ884X_PCI is not set
# CONFIG_KVM is not set
# CONFIG_LATENCYTOP is not set
-# CONFIG_LEDS_BCM6328 is not set
-# CONFIG_LEDS_BCM6358 is not set
# CONFIG_LEDS_CLEVO_MAIL is not set
# CONFIG_LEDS_DELL_NETBOOKS is not set
# CONFIG_LEDS_GPIO is not set
@@ -719,7 +657,6 @@ CONFIG_KEYS_COMPAT=y
CONFIG_LEGACY_VSYSCALL_EMULATE=y
# CONFIG_LEGACY_VSYSCALL_NATIVE is not set
# CONFIG_LEGACY_VSYSCALL_NONE is not set
-# CONFIG_LIBERTAS is not set
CONFIG_LIBERTAS_THINFIRM_USB=m
# CONFIG_LIBNVDIMM is not set
# CONFIG_LIQUIDIO is not set
@@ -729,13 +666,11 @@ CONFIG_LPC_SCH=y
# CONFIG_MACINTOSH_DRIVERS is not set
# CONFIG_MAILBOX is not set
# CONFIG_MATOM is not set
-# CONFIG_MAX5821 is not set
# CONFIG_MAXSMP is not set
# CONFIG_MCORE2 is not set
CONFIG_MDIO=m
-# CONFIG_MDIO_BUS_MUX_GPIO is not set
-# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
# CONFIG_MDIO_OCTEON is not set
+CONFIG_MEDIA_CEC_SUPPORT=y
CONFIG_MEDIA_PCI_SUPPORT=y
CONFIG_MEDIA_SUPPORT=y
# CONFIG_MEGARAID_LEGACY is not set
@@ -745,24 +680,15 @@ CONFIG_MEDIA_SUPPORT=y
# CONFIG_MEMORY_FAILURE is not set
# CONFIG_MEMORY_HOTPLUG is not set
# CONFIG_MFD_AS3722 is not set
-# CONFIG_MFD_ATMEL_FLEXCOM is not set
-# CONFIG_MFD_ATMEL_HLCDC is not set
-CONFIG_MFD_CROS_EC=y
CONFIG_MFD_CROS_EC_I2C=y
-CONFIG_MFD_CROS_EC_PD_UPDATE=y
-CONFIG_MFD_CROS_EC_SPI=y
-# CONFIG_MFD_HI6421_PMIC is not set
# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set
# CONFIG_MFD_JANZ_CMODIO is not set
-# CONFIG_MFD_MAX77686 is not set
# CONFIG_MFD_MT6397 is not set
# CONFIG_MFD_PALMAS is not set
# CONFIG_MFD_RDC321X is not set
# CONFIG_MFD_RK808 is not set
CONFIG_MFD_RTSX_PCI=m
-# CONFIG_MFD_STMPE is not set
# CONFIG_MFD_SYSCON is not set
-# CONFIG_MFD_TC3589X is not set
# CONFIG_MFD_TPS65090 is not set
# CONFIG_MFD_TPS6586X is not set
# CONFIG_MFD_VX855 is not set
@@ -791,9 +717,6 @@ CONFIG_MODIFY_LDT_SYSCALL=y
CONFIG_MODULES_USE_ELF_RELA=y
CONFIG_MOUSE_APPLETOUCH=m
CONFIG_MOUSE_CENTROIDING=m
-CONFIG_MOUSE_ELAN_I2C=y
-CONFIG_MOUSE_ELAN_I2C_I2C=y
-# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set
CONFIG_MOUSE_PS2=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_CYPRESS is not set
@@ -812,10 +735,8 @@ CONFIG_MOUSE_PS2=y
CONFIG_MTRR=y
# CONFIG_MTRR_SANITIZER is not set
# CONFIG_MWAVE is not set
-CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_PCIE=m
# CONFIG_MWIFIEX_SDIO is not set
-# CONFIG_MWIFIEX_USB is not set
# CONFIG_MWL8K is not set
# CONFIG_MYRI10GE is not set
# CONFIG_NATSEMI is not set
@@ -854,28 +775,17 @@ CONFIG_NET_VENDOR_SIS=y
CONFIG_NET_VENDOR_SUN=y
CONFIG_NET_VENDOR_TEHUTI=y
CONFIG_NET_VENDOR_TI=y
-CONFIG_NF_CONNTRACK_PROCFS=y
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
# CONFIG_NIU is not set
# CONFIG_NOZOMI is not set
-# CONFIG_NO_HZ_FULL is not set
# CONFIG_NS83820 is not set
# CONFIG_NTB is not set
# CONFIG_NUMA is not set
# CONFIG_NVMEM is not set
CONFIG_NVRAM=y
# CONFIG_NV_TCO is not set
-CONFIG_OF=y
-CONFIG_OF_ADDRESS=y
CONFIG_OF_ADDRESS_PCI=y
-CONFIG_OF_GPIO=y
-CONFIG_OF_IRQ=y
-CONFIG_OF_MDIO=y
-CONFIG_OF_NET=y
-# CONFIG_OF_OVERLAY is not set
CONFIG_OF_PCI=y
CONFIG_OF_PCI_IRQ=y
-# CONFIG_OF_UNITTEST is not set
# CONFIG_OPROFILE is not set
CONFIG_OPROFILE_NMI_TIMER=y
CONFIG_OPTIMIZE_INLINING=y
@@ -960,13 +870,9 @@ CONFIG_PGTABLE_LEVELS=4
CONFIG_PHYSICAL_ALIGN=0x200000
CONFIG_PHYSICAL_START=0x1000000
CONFIG_PHYS_ADDR_T_64BIT=y
-CONFIG_PINCONF=y
-CONFIG_PINCTRL=y
CONFIG_PINCTRL_AMD=y
-# CONFIG_PINCTRL_SINGLE is not set
CONFIG_PMC_ATOM=y
# CONFIG_PMIC_OPREGION is not set
-CONFIG_PM_CLK=y
# CONFIG_PM_DEVFREQ is not set
CONFIG_PM_TRACE=y
CONFIG_PM_TRACE_RTC=y
@@ -983,13 +889,11 @@ CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
CONFIG_PUNIT_ATOM_DEBUG=y
# CONFIG_PVPANIC is not set
# CONFIG_PWM is not set
-# CONFIG_QCA7000 is not set
# CONFIG_QED is not set
# CONFIG_QLA3XXX is not set
# CONFIG_QLCNIC is not set
# CONFIG_QLGE is not set
CONFIG_QUEUED_RWLOCKS=y
-CONFIG_QUEUED_SPINLOCKS=y
CONFIG_QUOTACTL_COMPAT=y
# CONFIG_R6040 is not set
CONFIG_R8169=m
@@ -997,29 +901,21 @@ CONFIG_RANDOMIZE_BASE=y
CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x40000000
# CONFIG_RAPIDIO is not set
CONFIG_RAS=y
-CONFIG_RATIONAL=y
CONFIG_REALTEK_AUTOPM=y
-CONFIG_REGMAP_I2C=y
-CONFIG_REGMAP_SPI=y
# CONFIG_REGULATOR is not set
CONFIG_RELOCATABLE=y
# CONFIG_RESET_CONTROLLER is not set
CONFIG_RETPOLINE=y
# CONFIG_RT2400PCI is not set
# CONFIG_RT2500PCI is not set
-# CONFIG_RT2500USB is not set
CONFIG_RT2800PCI=m
CONFIG_RT2800PCI_RT3290=y
CONFIG_RT2800_LIB_MMIO=m
CONFIG_RT2X00_LIB_MMIO=m
CONFIG_RT2X00_LIB_PCI=m
# CONFIG_RT61PCI is not set
-# CONFIG_RT73USB is not set
CONFIG_RTC_DRV_CMOS=y
# CONFIG_RTC_DRV_CROS_EC is not set
-# CONFIG_RTC_DRV_HYM8563 is not set
-# CONFIG_RTC_DRV_SNVS is not set
-# CONFIG_RTC_DRV_ZYNQMP is not set
# CONFIG_RTC_HCTOSYS is not set
# CONFIG_RTL8180 is not set
# CONFIG_RTL8188EE is not set
@@ -1059,7 +955,6 @@ CONFIG_SATA_AHCI=y
# CONFIG_SBC_FITPC2_WATCHDOG is not set
# CONFIG_SC1200_WDT is not set
# CONFIG_SC92031 is not set
-# CONFIG_SCA3000 is not set
CONFIG_SCHED_MC=y
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_SCHED_SMT=y
@@ -1109,6 +1004,9 @@ CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_SYM53C8XX_2 is not set
CONFIG_SCSI_VIRTIO=y
# CONFIG_SCSI_WD719X is not set
+CONFIG_SECURITY_CHROMIUMOS=y
+CONFIG_SECURITY_CHROMIUMOS_NO_SYMLINK_MOUNT=y
+CONFIG_SECURITY_CHROMIUMOS_NO_UNPRIVILEGED_UNSAFE_MOUNTS=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_SENSORS_ABITUGURU is not set
# CONFIG_SENSORS_ABITUGURU3 is not set
@@ -1133,10 +1031,8 @@ CONFIG_SENSORS_CORETEMP=y
# CONFIG_SENSORS_VIA_CPUTEMP is not set
# CONFIG_SENSORS_VT8231 is not set
# CONFIG_SERIAL_8250 is not set
-# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
# CONFIG_SERIAL_JSM is not set
# CONFIG_SERIAL_RP2 is not set
-# CONFIG_SERIAL_XILINX_PS_UART is not set
CONFIG_SERIO=y
# CONFIG_SERIO_ALTERA_PS2 is not set
# CONFIG_SERIO_APBPS2 is not set
@@ -1165,7 +1061,6 @@ CONFIG_SGETMASK_SYSCALL=y
# CONFIG_SND_ASIHPI is not set
# CONFIG_SND_ATIIXP is not set
# CONFIG_SND_ATIIXP_MODEM is not set
-# CONFIG_SND_ATMEL_SOC is not set
# CONFIG_SND_AU8810 is not set
# CONFIG_SND_AU8820 is not set
# CONFIG_SND_AU8830 is not set
@@ -1213,7 +1108,6 @@ CONFIG_SND_DMA_SGBUF=y
# CONFIG_SND_NM256 is not set
# CONFIG_SND_OXYGEN is not set
CONFIG_SND_PCI=y
-CONFIG_SND_PCM=y
# CONFIG_SND_PCXHR is not set
# CONFIG_SND_RIPTIDE is not set
# CONFIG_SND_RME32 is not set
@@ -1221,83 +1115,14 @@ CONFIG_SND_PCM=y
# CONFIG_SND_RME9652 is not set
# CONFIG_SND_SE6X is not set
# CONFIG_SND_SIMPLE_CARD is not set
-CONFIG_SND_SOC=y
-# CONFIG_SND_SOC_AC97_CODEC is not set
-# CONFIG_SND_SOC_ADAU1701 is not set
-# CONFIG_SND_SOC_AK4104 is not set
-# CONFIG_SND_SOC_AK4554 is not set
-# CONFIG_SND_SOC_AK4613 is not set
-# CONFIG_SND_SOC_AK4642 is not set
-# CONFIG_SND_SOC_AK5386 is not set
-# CONFIG_SND_SOC_ALC5623 is not set
-# CONFIG_SND_SOC_CS35L32 is not set
-# CONFIG_SND_SOC_CS4265 is not set
-# CONFIG_SND_SOC_CS4270 is not set
-# CONFIG_SND_SOC_CS4271_I2C is not set
-# CONFIG_SND_SOC_CS4271_SPI is not set
-# CONFIG_SND_SOC_CS42L51_I2C is not set
-# CONFIG_SND_SOC_CS42L52 is not set
-# CONFIG_SND_SOC_CS42L56 is not set
-# CONFIG_SND_SOC_CS42L73 is not set
-# CONFIG_SND_SOC_CS42XX8_I2C is not set
-# CONFIG_SND_SOC_CS4349 is not set
-# CONFIG_SND_SOC_ES8328 is not set
-# CONFIG_SND_SOC_FSL_ASRC is not set
-# CONFIG_SND_SOC_FSL_ESAI is not set
-# CONFIG_SND_SOC_FSL_SAI is not set
-# CONFIG_SND_SOC_FSL_SPDIF is not set
-# CONFIG_SND_SOC_FSL_SSI is not set
-# CONFIG_SND_SOC_GTM601 is not set
-CONFIG_SND_SOC_I2C_AND_SPI=y
-# CONFIG_SND_SOC_IMX_AUDMUX is not set
-# CONFIG_SND_SOC_INNO_RK3036 is not set
# CONFIG_SND_SOC_INTEL_BXT_RT298_MACH is not set
-# CONFIG_SND_SOC_PCM1681 is not set
-# CONFIG_SND_SOC_PCM1792A is not set
-# CONFIG_SND_SOC_PCM512x_I2C is not set
-# CONFIG_SND_SOC_PCM512x_SPI is not set
CONFIG_SND_SOC_RL6231=m
-# CONFIG_SND_SOC_RT5631 is not set
CONFIG_SND_SOC_RT5645=m
# CONFIG_SND_SOC_RT5677_SPI is not set
-# CONFIG_SND_SOC_SGTL5000 is not set
-# CONFIG_SND_SOC_SIRF_AUDIO_CODEC is not set
-# CONFIG_SND_SOC_SPDIF is not set
-# CONFIG_SND_SOC_SSM2602_I2C is not set
-# CONFIG_SND_SOC_SSM2602_SPI is not set
-# CONFIG_SND_SOC_STA32X is not set
-# CONFIG_SND_SOC_STA350 is not set
-# CONFIG_SND_SOC_STI_SAS is not set
-# CONFIG_SND_SOC_TAS2552 is not set
-# CONFIG_SND_SOC_TAS5086 is not set
-# CONFIG_SND_SOC_TAS571X is not set
-# CONFIG_SND_SOC_TFA9879 is not set
# CONFIG_SND_SOC_TLV320AIC23_I2C is not set
-# CONFIG_SND_SOC_TLV320AIC23_SPI is not set
-# CONFIG_SND_SOC_TLV320AIC31XX is not set
-# CONFIG_SND_SOC_TLV320AIC3X is not set
-# CONFIG_SND_SOC_TPA6130A2 is not set
-# CONFIG_SND_SOC_WM8510 is not set
-# CONFIG_SND_SOC_WM8523 is not set
-# CONFIG_SND_SOC_WM8580 is not set
-# CONFIG_SND_SOC_WM8711 is not set
-# CONFIG_SND_SOC_WM8728 is not set
-# CONFIG_SND_SOC_WM8731 is not set
-# CONFIG_SND_SOC_WM8737 is not set
-# CONFIG_SND_SOC_WM8741 is not set
-# CONFIG_SND_SOC_WM8750 is not set
# CONFIG_SND_SOC_WM8753 is not set
-# CONFIG_SND_SOC_WM8770 is not set
-# CONFIG_SND_SOC_WM8776 is not set
-# CONFIG_SND_SOC_WM8804_I2C is not set
-# CONFIG_SND_SOC_WM8804_SPI is not set
# CONFIG_SND_SOC_WM8903 is not set
-# CONFIG_SND_SOC_WM8962 is not set
-# CONFIG_SND_SOC_WM8978 is not set
-# CONFIG_SND_SOC_XTFPGA_I2S is not set
# CONFIG_SND_SONICVIBES is not set
-# CONFIG_SND_SUN4I_CODEC is not set
-CONFIG_SND_TIMER=y
# CONFIG_SND_TRIDENT is not set
# CONFIG_SND_USB_US122L is not set
# CONFIG_SND_USB_USX2Y is not set
@@ -1315,19 +1140,17 @@ CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM_VMEMMAP=y
CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
CONFIG_SPI_BITBANG=m
-# CONFIG_SPI_FSL_SPI is not set
# CONFIG_SPI_GPIO is not set
CONFIG_SPI_PXA2XX=y
CONFIG_SPI_PXA2XX_PCI=y
-# CONFIG_STAGING_BOARD is not set
-# CONFIG_STK8BA50 is not set
+# CONFIG_STAGING_GASKET_FRAMEWORK is not set
# CONFIG_STMMAC_ETH is not set
# CONFIG_SUNDANCE is not set
# CONFIG_SUNGEM is not set
# CONFIG_SURFACE_PRO3_BUTTON is not set
-# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set
+CONFIG_SX9310=m
+CONFIG_SX932X=m
CONFIG_SYSCTL_EXCEPTION_TRACE=y
-# CONFIG_SYSTEMPORT is not set
CONFIG_SYSVIPC_COMPAT=y
# CONFIG_TCG_ATMEL is not set
# CONFIG_TCG_CRB is not set
@@ -1343,9 +1166,9 @@ CONFIG_TCG_TIS_CORE=y
CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
# CONFIG_THERMAL_GOV_FAIR_SHARE is not set
# CONFIG_THERMAL_GOV_POWER_ALLOCATOR is not set
-CONFIG_THERMAL_OF=y
CONFIG_THERMAL_WRITABLE_TRIPS=y
# CONFIG_THINKPAD_ACPI is not set
+CONFIG_THREAD_INFO_IN_TASK=y
# CONFIG_THUNDERBOLT is not set
# CONFIG_THUNDER_NIC_BGX is not set
# CONFIG_THUNDER_NIC_PF is not set
@@ -1358,11 +1181,6 @@ CONFIG_TIGON3=m
# CONFIG_TOSHIBA_BT_RFKILL is not set
# CONFIG_TOSHIBA_HAPS is not set
# CONFIG_TOSHIBA_WMI is not set
-# CONFIG_TOUCHSCREEN_AR1021_I2C is not set
-# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set
-# CONFIG_TOUCHSCREEN_EGALAX is not set
-CONFIG_TOUCHSCREEN_ELAN=y
-# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set
CONFIG_TOUCHSCREEN_MELFAS_MIP4=y
CONFIG_TOUCHSCREEN_RM_TS=y
CONFIG_TOUCHSCREEN_WDT87XX_I2C=y
@@ -1372,31 +1190,6 @@ CONFIG_UCS2_STRING=y
# CONFIG_UNISYSSPAR is not set
# CONFIG_UPROBE_EVENT is not set
# CONFIG_USB_AMD5536UDC is not set
-# CONFIG_USB_AUDIO is not set
-# CONFIG_USB_BDC_UDC is not set
-# CONFIG_USB_CDC_COMPOSITE is not set
-CONFIG_USB_CONFIGFS=m
-# CONFIG_USB_CONFIGFS_ACM is not set
-# CONFIG_USB_CONFIGFS_ECM is not set
-# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set
-# CONFIG_USB_CONFIGFS_EEM is not set
-# CONFIG_USB_CONFIGFS_F_ACC is not set
-CONFIG_USB_CONFIGFS_F_FS=y
-# CONFIG_USB_CONFIGFS_F_HID is not set
-# CONFIG_USB_CONFIGFS_F_LB_SS is not set
-# CONFIG_USB_CONFIGFS_F_MIDI is not set
-# CONFIG_USB_CONFIGFS_F_MTP is not set
-# CONFIG_USB_CONFIGFS_F_PRINTER is not set
-# CONFIG_USB_CONFIGFS_F_UAC1 is not set
-# CONFIG_USB_CONFIGFS_F_UAC2 is not set
-# CONFIG_USB_CONFIGFS_F_UVC is not set
-# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set
-# CONFIG_USB_CONFIGFS_NCM is not set
-# CONFIG_USB_CONFIGFS_OBEX is not set
-# CONFIG_USB_CONFIGFS_RNDIS is not set
-# CONFIG_USB_CONFIGFS_SERIAL is not set
-CONFIG_USB_CONFIGFS_UEVENT=y
-# CONFIG_USB_DUMMY_HCD is not set
# CONFIG_USB_DWC2 is not set
CONFIG_USB_DWC3=m
# CONFIG_USB_DWC3_DUAL_ROLE is not set
@@ -1407,53 +1200,22 @@ CONFIG_USB_DWC3_PCI=m
# CONFIG_USB_EG20T is not set
# CONFIG_USB_EHCI_HCD_PLATFORM is not set
CONFIG_USB_EHCI_PCI=y
-# CONFIG_USB_ETH is not set
-# CONFIG_USB_FOTG210_UDC is not set
-# CONFIG_USB_FUNCTIONFS is not set
-CONFIG_USB_F_FS=m
CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGETFS is not set
-# CONFIG_USB_GADGET_DEBUG is not set
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_DEBUG_FS is not set
-CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
-CONFIG_USB_GADGET_VBUS_DRAW=2
-# CONFIG_USB_GADGET_XILINX is not set
# CONFIG_USB_GOKU is not set
-# CONFIG_USB_GR_UDC is not set
-# CONFIG_USB_G_ACM_MS is not set
-# CONFIG_USB_G_DBGP is not set
-# CONFIG_USB_G_HID is not set
-# CONFIG_USB_G_MULTI is not set
-# CONFIG_USB_G_NCM is not set
-# CONFIG_USB_G_PRINTER is not set
-# CONFIG_USB_G_SERIAL is not set
-# CONFIG_USB_G_WEBCAM is not set
-CONFIG_USB_LIBCOMPOSITE=m
-# CONFIG_USB_M66592 is not set
-# CONFIG_USB_MASS_STORAGE is not set
-# CONFIG_USB_MIDI_GADGET is not set
-# CONFIG_USB_MV_U3D is not set
-# CONFIG_USB_MV_UDC is not set
-# CONFIG_USB_NET2272 is not set
# CONFIG_USB_NET2280 is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PCI=y
# CONFIG_USB_OHCI_HCD_PLATFORM is not set
# CONFIG_USB_PHY is not set
-# CONFIG_USB_PXA27X is not set
-# CONFIG_USB_R8A66597 is not set
CONFIG_USB_STORAGE_REALTEK=y
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PCI=y
CONFIG_USB_XHCI_PLATFORM=m
-# CONFIG_USB_ZERO is not set
# CONFIG_USERIO is not set
CONFIG_USER_STACKTRACE_SUPPORT=y
# CONFIG_V4L_MEM2MEM_DRIVERS is not set
# CONFIG_V4L_PLATFORM_DRIVERS is not set
-# CONFIG_VF610_ADC is not set
# CONFIG_VGACON_SOFT_SCROLLBACK is not set
# CONFIG_VGA_ARB is not set
CONFIG_VGA_CONSOLE=y
@@ -1463,7 +1225,7 @@ CONFIG_VGA_CONSOLE=y
# CONFIG_VIA_WDT is not set
CONFIG_VIDEOBUF2_CORE=m
CONFIG_VIDEOBUF2_MEMOPS=m
-CONFIG_VIDEOBUF2_VMALLOC=m
+CONFIG_VIDEO_CROS_EC_CEC=m
CONFIG_VIDEO_DEV=y
# CONFIG_VIDEO_SOLO6X10 is not set
# CONFIG_VIDEO_TW68 is not set
@@ -1479,7 +1241,6 @@ CONFIG_VIRTIO_NET=m
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
CONFIG_VIRTUALIZATION=y
-# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
CONFIG_VIRT_TO_BUS=y
# CONFIG_VM86 is not set
# CONFIG_VME_BUS is not set
@@ -1493,7 +1254,6 @@ CONFIG_VIRT_TO_BUS=y
# CONFIG_W83877F_WDT is not set
# CONFIG_W83977F_WDT is not set
# CONFIG_WAFER_WDT is not set
-CONFIG_WANT_DEV_COREDUMP=y
# CONFIG_WDTPCI is not set
# CONFIG_WIL6210 is not set
CONFIG_X86=y
@@ -1511,11 +1271,11 @@ CONFIG_X86_CMPXCHG64=y
CONFIG_X86_CPUID=y
CONFIG_X86_DEBUGCTLMSR=y
CONFIG_X86_DEBUG_FPU=y
-# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set
CONFIG_X86_DEV_DMA_OPS=y
CONFIG_X86_DIRECT_GBPAGES=y
CONFIG_X86_ESPFIX64=y
CONFIG_X86_EXTENDED_PLATFORM=y
+CONFIG_X86_FAST_FEATURE_TESTS=y
CONFIG_X86_FEATURE_NAMES=y
# CONFIG_X86_GOLDFISH is not set
CONFIG_X86_INTERNODE_CACHE_SHIFT=6
@@ -1554,7 +1314,6 @@ CONFIG_X86_VSYSCALL_EMULATION=y
# CONFIG_XZ_DEC_ARMTHUMB is not set
CONFIG_XZ_DEC_BCJ=y
CONFIG_XZ_DEC_X86=y
-# CONFIG_ZD1211RW is not set
CONFIG_ZONE_DMA=y
CONFIG_ZONE_DMA32=y
CONFIG_ZONE_DMA_FLAG=1
diff --git a/chromeos/scripts/kernelconfig b/chromeos/scripts/kernelconfig
index 102b43c66d3184..cd6d3f1e23f4ba 100755
--- a/chromeos/scripts/kernelconfig
+++ b/chromeos/scripts/kernelconfig
@@ -173,7 +173,7 @@ main() {
# Set up variables the build func expects.
local kerneldir=$(pwd)
local confdir="${kerneldir}/chromeos/config"
- local archs=( x86_64 i386 armel arm64 )
+ local archs=( x86_64 armel arm64 )
local bindir="${kerneldir}/chromeos/scripts"
local base_conf="${confdir}/base.config"
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index e5b5721809e21d..b524f702e658cb 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -73,11 +73,9 @@ static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
- unsigned int bsize)
+static inline void ablkcipher_done_slow(struct ablkcipher_walk *walk,
+ unsigned int n)
{
- unsigned int n = bsize;
-
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(&walk->out);
@@ -89,17 +87,13 @@ static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk,
n -= len_this_page;
scatterwalk_start(&walk->out, sg_next(walk->out.sg));
}
-
- return bsize;
}
-static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk,
- unsigned int n)
+static inline void ablkcipher_done_fast(struct ablkcipher_walk *walk,
+ unsigned int n)
{
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
static int ablkcipher_walk_next(struct ablkcipher_request *req,
@@ -109,39 +103,40 @@ int ablkcipher_walk_done(struct ablkcipher_request *req,
struct ablkcipher_walk *walk, int err)
{
struct crypto_tfm *tfm = req->base.tfm;
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW)))
- n = ablkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = ablkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) {
+ ablkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ ablkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(req->base.flags);
return ablkcipher_walk_next(req, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != req->info)
memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize);
kfree(walk->iv_buffer);
-
return err;
}
EXPORT_SYMBOL_GPL(ablkcipher_walk_done);
@@ -389,6 +384,7 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -470,6 +466,7 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index b5953f1d1a1848..9f80295d673925 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -121,8 +121,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
int af_alg_release(struct socket *sock)
{
- if (sock->sk)
+ if (sock->sk) {
sock_put(sock->sk);
+ sock->sk = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 6978ad86e516a3..595c4f3657ffd0 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -85,17 +85,17 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
{
unsigned int alignmask = walk->alignmask;
- unsigned int nbytes = walk->entrylen;
walk->data -= walk->offset;
- if (nbytes && walk->offset & alignmask && !err) {
- walk->offset = ALIGN(walk->offset, alignmask + 1);
- nbytes = min(nbytes,
- ((unsigned int)(PAGE_SIZE)) - walk->offset);
- walk->entrylen -= nbytes;
+ if (walk->entrylen && (walk->offset & alignmask) && !err) {
+ unsigned int nbytes;
+ walk->offset = ALIGN(walk->offset, alignmask + 1);
+ nbytes = min(walk->entrylen,
+ (unsigned int)(PAGE_SIZE - walk->offset));
if (nbytes) {
+ walk->entrylen -= nbytes;
walk->data += walk->offset;
return nbytes;
}
@@ -115,7 +115,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
if (err)
return err;
- if (nbytes) {
+ if (walk->entrylen) {
walk->offset = 0;
walk->pg++;
return hash_walk_next(walk);
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 55a354d572513b..5c25005ff39801 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -58,14 +58,22 @@ int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key,
return -EINVAL;
if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
return -EINVAL;
- if (RTA_PAYLOAD(rta) < sizeof(*param))
+
+ /*
+ * RTA_OK() didn't align the rtattr's payload when validating that it
+ * fits in the buffer. Yet, the keys should start on the next 4-byte
+ * aligned boundary. To avoid confusion, require that the rtattr
+ * payload be exactly the param struct, which has a 4-byte aligned size.
+ */
+ if (RTA_PAYLOAD(rta) != sizeof(*param))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(*param) % RTA_ALIGNTO);
param = RTA_DATA(rta);
keys->enckeylen = be32_to_cpu(param->enckeylen);
- key += RTA_ALIGN(rta->rta_len);
- keylen -= RTA_ALIGN(rta->rta_len);
+ key += rta->rta_len;
+ keylen -= rta->rta_len;
if (keylen < keys->enckeylen)
return -EINVAL;
@@ -108,6 +116,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
CRYPTO_TFM_RES_MASK);
out:
+ memzero_explicit(&keys, sizeof(keys));
return err;
badkey:
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 52154ef21b5e84..5fdf3e532310e2 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 *
CRYPTO_TFM_RES_MASK);
out:
+ memzero_explicit(&keys, sizeof(keys));
return err;
badkey:
@@ -275,7 +276,7 @@ static void authenc_esn_verify_ahash_done(struct crypto_async_request *areq,
struct aead_request *req = areq->data;
err = err ?: crypto_authenc_esn_decrypt_tail(req, 0);
- aead_request_complete(req, err);
+ authenc_esn_request_complete(req, err);
}
static int crypto_authenc_esn_decrypt(struct aead_request *req)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index dca7bc87dad9d3..d524f838eb100a 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -71,19 +71,18 @@ static inline u8 *blkcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
-static inline unsigned int blkcipher_done_slow(struct blkcipher_walk *walk,
- unsigned int bsize)
+static inline void blkcipher_done_slow(struct blkcipher_walk *walk,
+ unsigned int bsize)
{
u8 *addr;
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
addr = blkcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
- return bsize;
}
-static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
- unsigned int n)
+static inline void blkcipher_done_fast(struct blkcipher_walk *walk,
+ unsigned int n)
{
if (walk->flags & BLKCIPHER_WALK_COPY) {
blkcipher_map_dst(walk);
@@ -97,49 +96,48 @@ static inline unsigned int blkcipher_done_fast(struct blkcipher_walk *walk,
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
-
- return n;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
struct blkcipher_walk *walk, int err)
{
- unsigned int nbytes = 0;
+ unsigned int n; /* bytes processed */
+ bool more;
- if (likely(err >= 0)) {
- unsigned int n = walk->nbytes - err;
+ if (unlikely(err < 0))
+ goto finish;
- if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
- n = blkcipher_done_fast(walk, n);
- else if (WARN_ON(err)) {
- err = -EINVAL;
- goto err;
- } else
- n = blkcipher_done_slow(walk, n);
+ n = walk->nbytes - err;
+ walk->total -= n;
+ more = (walk->total != 0);
- nbytes = walk->total - n;
- err = 0;
+ if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW))) {
+ blkcipher_done_fast(walk, n);
+ } else {
+ if (WARN_ON(err)) {
+ /* unexpected case; didn't process all bytes */
+ err = -EINVAL;
+ goto finish;
+ }
+ blkcipher_done_slow(walk, n);
}
- scatterwalk_done(&walk->in, 0, nbytes);
- scatterwalk_done(&walk->out, 1, nbytes);
-
-err:
- walk->total = nbytes;
- walk->nbytes = nbytes;
+ scatterwalk_done(&walk->in, 0, more);
+ scatterwalk_done(&walk->out, 1, more);
- if (nbytes) {
+ if (more) {
crypto_yield(desc->flags);
return blkcipher_walk_next(desc, walk);
}
-
+ err = 0;
+finish:
+ walk->nbytes = 0;
if (walk->iv != desc->info)
memcpy(desc->info, walk->iv, walk->ivsize);
if (walk->buffer != walk->page)
kfree(walk->buffer);
if (walk->page)
free_page((unsigned long)walk->page);
-
return err;
}
EXPORT_SYMBOL_GPL(blkcipher_walk_done);
@@ -517,6 +515,7 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/cts.c b/crypto/cts.c
index e467ec0acf9f09..e65688d6a4caa2 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -137,8 +137,8 @@ static int crypto_cts_encrypt(struct blkcipher_desc *desc,
lcldesc.info = desc->info;
lcldesc.flags = desc->flags;
- if (tot_blocks == 1) {
- err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, bsize);
+ if (tot_blocks <= 1) {
+ err = crypto_blkcipher_encrypt_iv(&lcldesc, dst, src, nbytes);
} else if (nbytes <= bsize * 2) {
err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes);
} else {
@@ -232,8 +232,8 @@ static int crypto_cts_decrypt(struct blkcipher_desc *desc,
lcldesc.info = desc->info;
lcldesc.flags = desc->flags;
- if (tot_blocks == 1) {
- err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, bsize);
+ if (tot_blocks <= 1) {
+ err = crypto_blkcipher_decrypt_iv(&lcldesc, dst, src, nbytes);
} else if (nbytes <= bsize * 2) {
err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes);
} else {
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 6f9908a7ebcbe1..d38a382b09eb6c 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -132,7 +132,12 @@ static inline int get_index128(be128 *block)
return x + ffz(val);
}
- return x;
+ /*
+ * If we get here, then x == 128 and we are incrementing the counter
+ * from all ones to all zeros. This means we must return index 127, i.e.
+ * the one corresponding to key2*{ 1,...,1 }.
+ */
+ return 127;
}
static int crypt(struct blkcipher_desc *d,
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
index f654965f09338d..de81f716cf2646 100644
--- a/crypto/pcbc.c
+++ b/crypto/pcbc.c
@@ -52,7 +52,7 @@ static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
@@ -76,7 +76,7 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@@ -89,8 +89,6 @@ static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
@@ -130,7 +128,7 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
do {
fn(crypto_cipher_tfm(tfm), dst, src);
@@ -142,8 +140,6 @@ static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
@@ -156,7 +152,7 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 *iv = walk->iv;
+ u8 * const iv = walk->iv;
u8 tmpbuf[bsize];
do {
@@ -169,8 +165,6 @@ static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
src += bsize;
} while ((nbytes -= bsize) >= bsize);
- memcpy(walk->iv, iv, bsize);
-
return nbytes;
}
diff --git a/crypto/shash.c b/crypto/shash.c
index 5444b429e35d0e..4f89f78031e2b6 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
- buffer = kmalloc(absize, GFP_KERNEL);
+ buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
diff --git a/crypto/vmac.c b/crypto/vmac.c
index df76a816cfb22f..bb2fc787d61568 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -1,6 +1,10 @@
/*
- * Modified to interface to the Linux kernel
+ * VMAC: Message Authentication Code using Universal Hashing
+ *
+ * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
+ *
* Copyright (c) 2009, Intel Corporation.
+ * Copyright (c) 2018, Google Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -16,14 +20,15 @@
* Place - Suite 330, Boston, MA 02111-1307 USA.
*/
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
+/*
+ * Derived from:
+ * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
+ * This implementation is herby placed in the public domain.
+ * The authors offers no warranty. Use at your own risk.
+ * Last modified: 17 APR 08, 1700 PDT
+ */
+#include <asm/unaligned.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/crypto.h>
@@ -31,10 +36,36 @@
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <crypto/scatterwalk.h>
-#include <crypto/vmac.h>
#include <crypto/internal/hash.h>
/*
+ * User definable settings.
+ */
+#define VMAC_TAG_LEN 64
+#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
+#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
+#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
+
+/* per-transform (per-key) context */
+struct vmac_tfm_ctx {
+ struct crypto_cipher *cipher;
+ u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
+ u64 polykey[2*VMAC_TAG_LEN/64];
+ u64 l3key[2*VMAC_TAG_LEN/64];
+};
+
+/* per-request context */
+struct vmac_desc_ctx {
+ union {
+ u8 partial[VMAC_NHBYTES]; /* partial block */
+ __le64 partial_words[VMAC_NHBYTES / 8];
+ };
+ unsigned int partial_size; /* size of the partial block */
+ bool first_block_processed;
+ u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
+};
+
+/*
* Constants and masks
*/
#define UINT64_C(x) x##ULL
@@ -318,13 +349,6 @@ static void poly_step_func(u64 *ahi, u64 *alo,
} while (0)
#endif
-static void vhash_abort(struct vmac_ctx *ctx)
-{
- ctx->polytmp[0] = ctx->polykey[0] ;
- ctx->polytmp[1] = ctx->polykey[1] ;
- ctx->first_block_processed = 0;
-}
-
static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
{
u64 rh, rl, t, z = 0;
@@ -364,280 +388,209 @@ static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
return rl;
}
-static void vhash_update(const unsigned char *m,
- unsigned int mbytes, /* Pos multiple of VMAC_NHBYTES */
- struct vmac_ctx *ctx)
+/* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
+static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx,
+ const __le64 *mptr, unsigned int blocks)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- if (!mbytes)
- return;
-
- BUG_ON(mbytes % VMAC_NHBYTES);
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES; /* Must be non-zero */
-
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
-
- if (!ctx->first_block_processed) {
- ctx->first_block_processed = 1;
+ const u64 *kptr = tctx->nhkey;
+ const u64 pkh = tctx->polykey[0];
+ const u64 pkl = tctx->polykey[1];
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+ u64 rh, rl;
+
+ if (!dctx->first_block_processed) {
+ dctx->first_block_processed = true;
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
ADD128(ch, cl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
+ blocks--;
}
- while (i--) {
+ while (blocks--) {
nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
rh &= m62;
poly_step(ch, cl, pkh, pkl, rh, rl);
mptr += (VMAC_NHBYTES/sizeof(u64));
}
- ctx->polytmp[0] = ch;
- ctx->polytmp[1] = cl;
+ dctx->polytmp[0] = ch;
+ dctx->polytmp[1] = cl;
}
-static u64 vhash(unsigned char m[], unsigned int mbytes,
- u64 *tagl, struct vmac_ctx *ctx)
+static int vmac_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen)
{
- u64 rh, rl, *mptr;
- const u64 *kptr = (u64 *)ctx->nhkey;
- int i, remaining;
- u64 ch, cl;
- u64 pkh = ctx->polykey[0];
- u64 pkl = ctx->polykey[1];
-
- mptr = (u64 *)m;
- i = mbytes / VMAC_NHBYTES;
- remaining = mbytes % VMAC_NHBYTES;
-
- if (ctx->first_block_processed) {
- ch = ctx->polytmp[0];
- cl = ctx->polytmp[1];
- } else if (i) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- i--;
- } else if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), ch, cl);
- ch &= m62;
- ADD128(ch, cl, pkh, pkl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- goto do_l3;
- } else {/* Empty String */
- ch = pkh; cl = pkl;
- goto do_l3;
- }
-
- while (i--) {
- nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- mptr += (VMAC_NHBYTES/sizeof(u64));
- }
- if (remaining) {
- nh_16(mptr, kptr, 2*((remaining+15)/16), rh, rl);
- rh &= m62;
- poly_step(ch, cl, pkh, pkl, rh, rl);
- }
-
-do_l3:
- vhash_abort(ctx);
- remaining *= 8;
- return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining);
-}
+ struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
+ __be64 out[2];
+ u8 in[16] = { 0 };
+ unsigned int i;
+ int err;
-static u64 vmac(unsigned char m[], unsigned int mbytes,
- const unsigned char n[16], u64 *tagl,
- struct vmac_ctx_t *ctx)
-{
- u64 *in_n, *out_p;
- u64 p, h;
- int i;
-
- in_n = ctx->__vmac_ctx.cached_nonce;
- out_p = ctx->__vmac_ctx.cached_aes;
-
- i = n[15] & 1;
- if ((*(u64 *)(n+8) != in_n[1]) || (*(u64 *)(n) != in_n[0])) {
- in_n[0] = *(u64 *)(n);
- in_n[1] = *(u64 *)(n+8);
- ((unsigned char *)in_n)[15] &= 0xFE;
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out_p, (unsigned char *)in_n);
-
- ((unsigned char *)in_n)[15] |= (unsigned char)(1-i);
+ if (keylen != VMAC_KEY_LEN) {
+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
}
- p = be64_to_cpup(out_p + i);
- h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx);
- return le64_to_cpu(p + h);
-}
-static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx)
-{
- u64 in[2] = {0}, out[2];
- unsigned i;
- int err = 0;
-
- err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN);
+ err = crypto_cipher_setkey(tctx->cipher, key, keylen);
if (err)
return err;
/* Fill nh key */
- ((unsigned char *)in)[0] = 0x80;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0x80;
+ for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->nhkey[i] = be64_to_cpu(out[0]);
+ tctx->nhkey[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
}
/* Fill poly key */
- ((unsigned char *)in)[0] = 0xC0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.polytmp[i] =
- ctx->__vmac_ctx.polykey[i] =
- be64_to_cpup(out) & mpoly;
- ctx->__vmac_ctx.polytmp[i+1] =
- ctx->__vmac_ctx.polykey[i+1] =
- be64_to_cpup(out+1) & mpoly;
- ((unsigned char *)in)[15] += 1;
+ in[0] = 0xC0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
+ tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
+ in[15]++;
}
/* Fill ip key */
- ((unsigned char *)in)[0] = 0xE0;
- in[1] = 0;
- for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) {
+ in[0] = 0xE0;
+ in[15] = 0;
+ for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
do {
- crypto_cipher_encrypt_one(ctx->child,
- (unsigned char *)out, (unsigned char *)in);
- ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out);
- ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1);
- ((unsigned char *)in)[15] += 1;
- } while (ctx->__vmac_ctx.l3key[i] >= p64
- || ctx->__vmac_ctx.l3key[i+1] >= p64);
+ crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
+ tctx->l3key[i] = be64_to_cpu(out[0]);
+ tctx->l3key[i+1] = be64_to_cpu(out[1]);
+ in[15]++;
+ } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
}
- /* Invalidate nonce/aes cache and reset other elements */
- ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */
- ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */
- ctx->__vmac_ctx.first_block_processed = 0;
-
- return err;
+ return 0;
}
-static int vmac_setkey(struct crypto_shash *parent,
- const u8 *key, unsigned int keylen)
+static int vmac_init(struct shash_desc *desc)
{
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
- if (keylen != VMAC_KEY_LEN) {
- crypto_shash_set_flags(parent, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
-
- return vmac_set_key((u8 *)key, ctx);
-}
-
-static int vmac_init(struct shash_desc *pdesc)
-{
+ dctx->partial_size = 0;
+ dctx->first_block_processed = false;
+ memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
return 0;
}
-static int vmac_update(struct shash_desc *pdesc, const u8 *p,
- unsigned int len)
+static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- int expand;
- int min;
-
- expand = VMAC_NHBYTES - ctx->partial_size > 0 ?
- VMAC_NHBYTES - ctx->partial_size : 0;
-
- min = len < expand ? len : expand;
-
- memcpy(ctx->partial + ctx->partial_size, p, min);
- ctx->partial_size += min;
-
- if (len < expand)
- return 0;
-
- vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx);
- ctx->partial_size = 0;
-
- len -= expand;
- p += expand;
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ unsigned int n;
+
+ if (dctx->partial_size) {
+ n = min(len, VMAC_NHBYTES - dctx->partial_size);
+ memcpy(&dctx->partial[dctx->partial_size], p, n);
+ dctx->partial_size += n;
+ p += n;
+ len -= n;
+ if (dctx->partial_size == VMAC_NHBYTES) {
+ vhash_blocks(tctx, dctx, dctx->partial_words, 1);
+ dctx->partial_size = 0;
+ }
+ }
- if (len % VMAC_NHBYTES) {
- memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES),
- len % VMAC_NHBYTES);
- ctx->partial_size = len % VMAC_NHBYTES;
+ if (len >= VMAC_NHBYTES) {
+ n = round_down(len, VMAC_NHBYTES);
+ /* TODO: 'p' may be misaligned here */
+ vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
+ p += n;
+ len -= n;
}
- vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx);
+ if (len) {
+ memcpy(dctx->partial, p, len);
+ dctx->partial_size = len;
+ }
return 0;
}
-static int vmac_final(struct shash_desc *pdesc, u8 *out)
+static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
+ struct vmac_desc_ctx *dctx)
{
- struct crypto_shash *parent = pdesc->tfm;
- struct vmac_ctx_t *ctx = crypto_shash_ctx(parent);
- vmac_t mac;
- u8 nonce[16] = {};
-
- /* vmac() ends up accessing outside the array bounds that
- * we specify. In appears to access up to the next 2-word
- * boundary. We'll just be uber cautious and zero the
- * unwritten bytes in the buffer.
- */
- if (ctx->partial_size) {
- memset(ctx->partial + ctx->partial_size, 0,
- VMAC_NHBYTES - ctx->partial_size);
+ unsigned int partial = dctx->partial_size;
+ u64 ch = dctx->polytmp[0];
+ u64 cl = dctx->polytmp[1];
+
+ /* L1 and L2-hash the final block if needed */
+ if (partial) {
+ /* Zero-pad to next 128-bit boundary */
+ unsigned int n = round_up(partial, 16);
+ u64 rh, rl;
+
+ memset(&dctx->partial[partial], 0, n - partial);
+ nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
+ rh &= m62;
+ if (dctx->first_block_processed)
+ poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
+ rh, rl);
+ else
+ ADD128(ch, cl, rh, rl);
}
- mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
- memcpy(out, &mac, sizeof(vmac_t));
- memzero_explicit(&mac, sizeof(vmac_t));
- memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
- ctx->partial_size = 0;
+
+ /* L3-hash the 128-bit output of L2-hash */
+ return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
+}
+
+static int vmac_final(struct shash_desc *desc, u8 *out)
+{
+ const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
+ struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
+ static const u8 nonce[16] = {}; /* TODO: this is insecure */
+ union {
+ u8 bytes[16];
+ __be64 pads[2];
+ } block;
+ int index;
+ u64 hash, pad;
+
+ /* Finish calculating the VHASH of the message */
+ hash = vhash_final(tctx, dctx);
+
+ /* Generate pseudorandom pad by encrypting the nonce */
+ memcpy(&block, nonce, 16);
+ index = block.bytes[15] & 1;
+ block.bytes[15] &= ~1;
+ crypto_cipher_encrypt_one(tctx->cipher, block.bytes, block.bytes);
+ pad = be64_to_cpu(block.pads[index]);
+
+ /* The VMAC is the sum of VHASH and the pseudorandom pad */
+ put_unaligned_le64(hash + pad, out);
return 0;
}
static int vmac_init_tfm(struct crypto_tfm *tfm)
{
- struct crypto_cipher *cipher;
- struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ tctx->cipher = cipher;
return 0;
}
static void vmac_exit_tfm(struct crypto_tfm *tfm)
{
- struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm);
- crypto_free_cipher(ctx->child);
+ struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_cipher(tctx->cipher);
}
static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
@@ -655,6 +608,10 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
if (IS_ERR(alg))
return PTR_ERR(alg);
+ err = -EINVAL;
+ if (alg->cra_blocksize != 16)
+ goto out_put_alg;
+
inst = shash_alloc_instance("vmac", alg);
err = PTR_ERR(inst);
if (IS_ERR(inst))
@@ -670,11 +627,12 @@ static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
- inst->alg.digestsize = sizeof(vmac_t);
- inst->alg.base.cra_ctxsize = sizeof(struct vmac_ctx_t);
+ inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
inst->alg.base.cra_init = vmac_init_tfm;
inst->alg.base.cra_exit = vmac_exit_tfm;
+ inst->alg.descsize = sizeof(struct vmac_desc_ctx);
+ inst->alg.digestsize = VMAC_TAG_LEN / 8;
inst->alg.init = vmac_init;
inst->alg.update = vmac_update;
inst->alg.final = vmac_final;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 0d0de47a65519e..91611486a62950 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -155,10 +155,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
static const struct lpss_device_desc byt_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX,
+ .prv_offset = 0x800,
};
static const struct lpss_device_desc bsw_pwm_dev_desc = {
.flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
+ .prv_offset = 0x800,
};
static const struct lpss_device_desc byt_uart_dev_desc = {
@@ -234,9 +236,11 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "INT33FC", },
/* Braswell LPSS devices */
+ { "80862286", LPSS_ADDR(lpss_dma_desc) },
{ "80862288", LPSS_ADDR(bsw_pwm_dev_desc) },
{ "8086228A", LPSS_ADDR(bsw_uart_dev_desc) },
{ "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
+ { "808622C0", LPSS_ADDR(lpss_dma_desc) },
{ "808622C1", LPSS_ADDR(bsw_i2c_dev_desc) },
/* Broadwell LPSS devices */
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index fdb80b4e405778..e5c8788191faec 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
{"PNP0200", 0}, /* AT DMA Controller */
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
+ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
{"", 0},
};
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3919751a917a06..1cd1a377da15ea 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -987,6 +987,13 @@ void __init acpi_subsystem_init(void)
}
}
+static acpi_status acpi_bus_table_handler(u32 event, void *table, void *context)
+{
+ acpi_scan_table_handler(event, table, context);
+
+ return acpi_sysfs_table_handler(event, table, context);
+}
+
static int __init acpi_bus_init(void)
{
int result;
@@ -1031,6 +1038,8 @@ static int __init acpi_bus_init(void)
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
+ status = acpi_install_table_handler(acpi_bus_table_handler, NULL);
+
acpi_sysfs_init();
acpi_early_processor_set_pdc();
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 1396aeb5b23423..97927d162c7b58 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -202,11 +202,15 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
const union acpi_object *of_compatible, *obj;
+ acpi_status status;
int len, count;
int i, nval;
char *c;
- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
/* DT strings are all in lower case */
for (c = buf.pointer; *c != '\0'; c++)
*c = tolower(*c);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 9c11b50c19b9cc..1b9949c0a2667d 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -80,6 +80,9 @@ bool acpi_queue_hotplug_work(struct work_struct *work);
void acpi_device_hotplug(struct acpi_device *adev, u32 src);
bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent);
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context);
+void acpi_scan_table_handler(u32 event, void *table, void *context);
+
/* --------------------------------------------------------------------------
Device Node Initialization / Removal
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index ae3fe4e642035b..3b0b4bd67b71b4 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
}
control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL
- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL
| OSC_PCI_EXPRESS_PME_CONTROL;
+ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
+ control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL;
+
if (pci_aer_available()) {
if (aer_acpi_firmware_first())
dev_info(&device->dev,
diff --git a/drivers/acpi/pmic/tps68470_pmic.c b/drivers/acpi/pmic/tps68470_pmic.c
index 7f3c567e816861..a083de507009e6 100644
--- a/drivers/acpi/pmic/tps68470_pmic.c
+++ b/drivers/acpi/pmic/tps68470_pmic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TI TPS68470 PMIC operation region driver
*
@@ -5,15 +6,6 @@
*
* Author: Rajmohan Mani <rajmohan.mani@intel.com>
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Based on drivers/acpi/pmic/intel_pmic* drivers
*/
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 1c2b846c577604..f28b4949cb9db1 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -131,6 +131,23 @@ void acpi_power_resources_list_free(struct list_head *list)
}
}
+static bool acpi_power_resource_is_dup(union acpi_object *package,
+ unsigned int start, unsigned int i)
+{
+ acpi_handle rhandle, dup;
+ unsigned int j;
+
+ /* The caller is expected to check the package element types */
+ rhandle = package->package.elements[i].reference.handle;
+ for (j = start; j < i; j++) {
+ dup = package->package.elements[j].reference.handle;
+ if (dup == rhandle)
+ return true;
+ }
+
+ return false;
+}
+
int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
struct list_head *list)
{
@@ -150,6 +167,11 @@ int acpi_extract_power_resources(union acpi_object *package, unsigned int start,
err = -ENODEV;
break;
}
+
+ /* Some ACPI tables contain duplicate power resource references */
+ if (acpi_power_resource_is_dup(package, start, i))
+ continue;
+
err = acpi_add_power_resource(rhandle);
if (err)
break;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 9825780a1cd258..fdc8dd282a0682 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -157,7 +157,7 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
status, NULL);
}
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
{
int ret;
@@ -168,7 +168,7 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
*/
if (event_flag)
acpi_processor_ppc_ost(pr->handle, 1);
- return 0;
+ return;
}
ret = acpi_processor_get_platform_limit(pr);
@@ -182,10 +182,8 @@ int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
else
acpi_processor_ppc_ost(pr->handle, 0);
}
- if (ret < 0)
- return (ret);
- else
- return cpufreq_update_policy(pr->id);
+ if (ret >= 0)
+ cpufreq_update_policy(pr->id);
}
int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index c6990b4ff3ac30..a9ed0aaf921061 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -480,6 +480,8 @@ static void acpi_device_del(struct acpi_device *device)
device_del(&device->dev);
}
+static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
+
static LIST_HEAD(acpi_device_del_list);
static DEFINE_MUTEX(acpi_device_del_lock);
@@ -500,6 +502,9 @@ static void acpi_device_del_work_fn(struct work_struct *work_not_used)
mutex_unlock(&acpi_device_del_lock);
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_REMOVE, adev);
+
acpi_device_del(adev);
/*
* Drop references to all power resources that might have been
@@ -1433,7 +1438,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
acpi_bus_get_flags(device);
device->flags.match_driver = false;
device->flags.initialized = true;
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
@@ -1703,15 +1708,20 @@ static void acpi_default_enumeration(struct acpi_device *device)
bool is_spi_i2c_slave = false;
/*
- * Do not enemerate SPI/I2C slaves as they will be enuerated by their
+ * Do not enumerate SPI/I2C slaves as they will be enumerated by their
* respective parents.
*/
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
acpi_dev_free_resource_list(&resource_list);
- if (!is_spi_i2c_slave)
+ if (!is_spi_i2c_slave) {
acpi_create_platform_device(device);
+ acpi_device_set_enumerated(device);
+ } else {
+ blocking_notifier_call_chain(&acpi_reconfig_chain,
+ ACPI_RECONFIG_DEVICE_ADD, device);
+ }
}
static const struct acpi_device_id generic_device_ids[] = {
@@ -1778,7 +1788,7 @@ static void acpi_bus_attach(struct acpi_device *device)
acpi_bus_get_status(device);
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
- device->flags.visited = false;
+ acpi_device_clear_enumerated(device);
device->flags.power_manageable = 0;
return;
}
@@ -1793,7 +1803,7 @@ static void acpi_bus_attach(struct acpi_device *device)
device->flags.initialized = true;
}
- device->flags.visited = false;
+
ret = acpi_scan_attach_handler(device);
if (ret < 0)
return;
@@ -1807,7 +1817,6 @@ static void acpi_bus_attach(struct acpi_device *device)
if (!ret && device->pnp.type.platform_id)
acpi_default_enumeration(device);
}
- device->flags.visited = true;
ok:
list_for_each_entry(child, &device->children, node)
@@ -1899,7 +1908,7 @@ void acpi_bus_trim(struct acpi_device *adev)
*/
acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
adev->flags.initialized = false;
- adev->flags.visited = false;
+ acpi_device_clear_enumerated(adev);
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
@@ -1943,6 +1952,8 @@ static int acpi_bus_scan_fixed(void)
return result < 0 ? result : 0;
}
+static bool acpi_scan_initialized;
+
int __init acpi_scan_init(void)
{
int result;
@@ -1986,6 +1997,8 @@ int __init acpi_scan_init(void)
acpi_update_all_gpes();
+ acpi_scan_initialized = true;
+
out:
mutex_unlock(&acpi_scan_lock);
return result;
@@ -2029,3 +2042,57 @@ int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
return count;
}
+
+struct acpi_table_events_work {
+ struct work_struct work;
+ void *table;
+ u32 event;
+};
+
+static void acpi_table_events_fn(struct work_struct *work)
+{
+ struct acpi_table_events_work *tew;
+
+ tew = container_of(work, struct acpi_table_events_work, work);
+
+ if (tew->event == ACPI_TABLE_EVENT_LOAD) {
+ acpi_scan_lock_acquire();
+ acpi_bus_scan(ACPI_ROOT_OBJECT);
+ acpi_scan_lock_release();
+ }
+
+ kfree(tew);
+}
+
+void acpi_scan_table_handler(u32 event, void *table, void *context)
+{
+ struct acpi_table_events_work *tew;
+
+ if (!acpi_scan_initialized)
+ return;
+
+ if (event != ACPI_TABLE_EVENT_LOAD)
+ return;
+
+ tew = kmalloc(sizeof(*tew), GFP_KERNEL);
+ if (!tew)
+ return;
+
+ INIT_WORK(&tew->work, acpi_table_events_fn);
+ tew->table = table;
+ tew->event = event;
+
+ schedule_work(&tew->work);
+}
+
+int acpi_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_register);
+
+int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
+}
+EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 02a7088f953c63..7d5658e7dd2ce3 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -124,6 +124,12 @@ void __init acpi_nvs_nosave_s3(void)
nvs_nosave_s3 = true;
}
+static int __init init_nvs_save_s3(const struct dmi_system_id *d)
+{
+ nvs_nosave_s3 = false;
+ return 0;
+}
+
/*
* ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
* user to request that behavior by using the 'acpi_old_suspend_ordering'
@@ -318,6 +324,27 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
},
},
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Asus 1025C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1025C"),
+ },
+ },
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=189431
+ * Lenovo G50-45 is a platform later than 2012, but needs nvs memory
+ * saving during S3.
+ */
+ {
+ .callback = init_nvs_save_s3,
+ .ident = "Lenovo G50-45",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
+ },
+ },
{},
};
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 4b3a9e27f1b611..358165e9f5b8dd 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -378,8 +378,7 @@ static void acpi_table_attr_init(struct acpi_table_attr *table_attr,
return;
}
-static acpi_status
-acpi_sysfs_table_handler(u32 event, void *table, void *context)
+acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
{
struct acpi_table_attr *table_attr;
@@ -452,9 +451,8 @@ static int acpi_tables_sysfs_init(void)
kobject_uevent(tables_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
- status = acpi_install_table_handler(acpi_sysfs_table_handler, NULL);
- return ACPI_FAILURE(status) ? -EINVAL : 0;
+ return 0;
err_dynamic_tables:
kobject_put(tables_kobj);
err:
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a6a011354704b..5f1f049063dd21 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -619,8 +619,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
static int ahci_pci_reset_controller(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host->dev);
+ int rc;
- ahci_reset_controller(host);
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
struct ahci_host_priv *hpriv = host->private_data;
@@ -1229,6 +1232,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
return strcmp(buf, dmi->driver_data) < 0;
}
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /* Various Lenovo 50 series have LPM issues with older BIOSen */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+ },
+ .driver_data = "20180406", /* 1.31 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+ },
+ .driver_data = "20180420", /* 1.28 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+ },
+ .driver_data = "20180315", /* 1.33 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+ },
+ /*
+ * Note date based on release notes, 2.35 has been
+ * reported to be good, but I've been unable to get
+ * a hold of the reporter to get the DMI BIOS date.
+ * TODO: fix this.
+ */
+ .driver_data = "20180310", /* 2.35 */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ int year, month, date;
+ char buf[9];
+
+ if (!dmi)
+ return false;
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+ return strcmp(buf, dmi->driver_data) < 0;
+}
+
static bool ahci_broken_online(struct pci_dev *pdev)
{
#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1588,6 +1644,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_lpm(pdev)) {
+ pi.flags |= ATA_FLAG_NO_LPM;
+ dev_warn(&pdev->dev,
+ "BIOS update required for Link Power Management support\n");
+ }
+
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
dev_warn(&pdev->dev,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 9628fa13175788..8116cb2fef2dad 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2113,6 +2113,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
deto = 20;
}
+ /* Make dito, mdat, deto bits to 0s */
+ devslp &= ~GENMASK_ULL(24, 2);
devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
(mdat << PORT_DEVSLP_MDAT_OFFSET) |
(deto << PORT_DEVSLP_DETO_OFFSET) |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 8b77e51db8f2e0..4dd800f7404c56 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2209,6 +2209,9 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->horkage |= ATA_HORKAGE_NOLPM;
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
if (dev->horkage & ATA_HORKAGE_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
@@ -4241,9 +4244,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
- /* Sandisk devices which are known to not handle LPM well */
- { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
-
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4291,6 +4291,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*MZ7KM*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
{ "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 75cced210b2a0b..7db76b5c7ada3e 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2198,12 +2198,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
- /* SENSE_VALID trumps dev/unknown error and revalidation */
+ /*
+ * SENSE_VALID trumps dev/unknown error and revalidation. Upper
+ * layers will determine whether the command is worth retrying
+ * based on the sense data and device class/type. Otherwise,
+ * determine directly if the command is worth retrying using its
+ * error mask and flags.
+ */
if (qc->flags & ATA_QCFLAG_SENSE_VALID)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
-
- /* determine whether the command is worth retrying */
- if (ata_eh_worth_retry(qc))
+ else if (ata_eh_worth_retry(qc))
qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index f3a65a3140d3c7..0ad96c647541a9 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -34,7 +34,7 @@ struct zpodd {
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
- const char cdb[] = { GPCMD_START_STOP_UNIT,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct ata_taskfile tf;
- char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 8804127b108c0f..21b80f5ee09207 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -875,7 +875,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
int ret = 0;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (!irq)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sata_rcar_priv),
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 0f5cb37636bcc1..010581e8bee05b 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 6eab52b92e0173..94712e1c5cf9ad 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1149,8 +1149,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
}
-static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
- int offset, int swap)
+static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
+ int swap)
{
unsigned char buf[ZEPROM_SIZE];
struct zatm_dev *zatm_dev;
@@ -1481,6 +1481,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
if (copy_from_user(&info,
&((struct zatm_pool_req __user *) arg)->info,
sizeof(info))) return -EFAULT;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 0346e46e2871e2..ecca4ae248e067 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -33,6 +33,9 @@ static struct kset *system_kset;
#define to_drv_attr(_attr) container_of(_attr, struct driver_attribute, attr)
+#define DRIVER_ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store) \
+ struct driver_attribute driver_attr_##_name = \
+ __ATTR_IGNORE_LOCKDEP(_name, _mode, _show, _store)
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
@@ -198,7 +201,7 @@ static ssize_t unbind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
-static DRIVER_ATTR_WO(unbind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(unbind, S_IWUSR, NULL, unbind_store);
/*
* Manually attach a device to a driver.
@@ -234,7 +237,7 @@ static ssize_t bind_store(struct device_driver *drv, const char *buf,
bus_put(bus);
return err;
}
-static DRIVER_ATTR_WO(bind);
+static DRIVER_ATTR_IGNORE_LOCKDEP(bind, S_IWUSR, NULL, bind_store);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 4a7b1b65ba100c..7f6c44e80aed79 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1328,7 +1328,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
- return NULL;
+ return ERR_PTR(-ENOMEM);
dir->class = class;
kobject_init(&dir->kobj, &class_dir_ktype);
@@ -1338,7 +1338,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name);
if (retval < 0) {
kobject_put(&dir->kobj);
- return NULL;
+ return ERR_PTR(retval);
}
return &dir->kobj;
}
@@ -1431,6 +1431,8 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
return;
mutex_lock(&gdp_mutex);
+ if (!kobject_has_children(glue_dir))
+ kobject_del(glue_dir);
kobject_put(glue_dir);
mutex_unlock(&gdp_mutex);
}
@@ -1645,6 +1647,10 @@ int device_add(struct device *dev)
parent = get_device(dev->parent);
kobj = get_device_parent(dev, parent);
+ if (IS_ERR(kobj)) {
+ error = PTR_ERR(kobj);
+ goto parent_error;
+ }
if (kobj)
dev->kobj.parent = kobj;
@@ -1743,6 +1749,7 @@ done:
kobject_del(&dev->kobj);
Error:
cleanup_glue_dir(dev, glue_dir);
+parent_error:
put_device(parent);
name_error:
kfree(dev->p);
@@ -2561,6 +2568,11 @@ int device_move(struct device *dev, struct device *new_parent,
device_pm_lock();
new_parent = get_device(new_parent);
new_parent_kobj = get_device_parent(dev, new_parent);
+ if (IS_ERR(new_parent_kobj)) {
+ error = PTR_ERR(new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
__func__, new_parent ? dev_name(new_parent) : "<NULL>");
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 3db71afbba93c5..41090ef5facbe7 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -518,14 +518,30 @@ ssize_t __weak cpu_show_spectre_v2(struct device *dev,
return sprintf(buf, "Not affected\n");
}
+ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
+ssize_t __weak cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Not affected\n");
+}
+
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
+static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
+static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
&dev_attr_meltdown.attr,
&dev_attr_spectre_v1.attr,
&dev_attr_spectre_v2.attr,
+ &dev_attr_spec_store_bypass.attr,
+ &dev_attr_l1tf.attr,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 477a353e9710e2..909d54ed626637 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -333,14 +333,6 @@ static int really_probe(struct device *dev, struct device_driver *drv)
goto probe_failed;
}
- /*
- * Ensure devices are listed in devices_kset in correct order
- * It's important to move Dev to the end of devices_kset before
- * calling .probe, because it could be recursive and parent Dev
- * should always go first
- */
- devices_kset_move_last(dev);
-
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5c0d2c052034e0..33612694d41e85 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1433,8 +1433,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_wait_for_subordinate(dev, async);
- if (async_error)
+ if (async_error) {
+ dev->power.direct_complete = false;
goto Complete;
+ }
/*
* If a device configured to wake up the system from sleep states
@@ -1449,6 +1451,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
+ dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index 5b373d488884e6..cda80028adcd6a 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1549,7 +1549,7 @@ unlock:
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *_of_get_opp_desc_node(struct device *dev)
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
{
/*
* TODO: Support for multiple OPP tables.
@@ -1560,6 +1560,7 @@ struct device_node *_of_get_opp_desc_node(struct device *dev)
return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
/* Initializes OPP tables based on new bindings */
static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
@@ -1688,7 +1689,7 @@ int dev_pm_opp_of_add_table(struct device *dev)
* OPPs have two version of bindings now. The older one is deprecated,
* try for the new binding first.
*/
- opp_np = _of_get_opp_desc_node(dev);
+ opp_np = dev_pm_opp_of_get_opp_desc_node(dev);
if (!opp_np) {
/*
* Try old-deprecated bindings for backward compatibility with
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index 9f0c15570f64c4..55bf12d75ef1a6 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -223,7 +223,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
int cpu, ret = 0;
/* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
+ np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
if (!np) {
dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
return -ENOENT;
@@ -248,7 +248,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask
}
/* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
if (!tmp_np) {
dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
__func__);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 8ad2be953b6fe3..22267935ff848c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -388,7 +388,7 @@ static ssize_t wakeup_count_show(struct device *dev,
spin_lock_irq(&dev->power.lock);
if (dev->power.wakeup) {
- count = dev->power.wakeup->event_count;
+ count = dev->power.wakeup->wakeup_count;
enabled = true;
}
spin_unlock_irq(&dev->power.lock);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 6c63d179c20a47..42114d6d773b09 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -120,7 +120,6 @@ void wakeup_source_drop(struct wakeup_source *ws)
if (!ws)
return;
- del_timer_sync(&ws->timer);
__pm_relax(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_drop);
@@ -208,6 +207,13 @@ void wakeup_source_remove(struct wakeup_source *ws)
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
synchronize_srcu(&wakeup_srcu);
+
+ del_timer_sync(&ws->timer);
+ /*
+ * Clear timer.function to make wakeup_source_not_registered() treat
+ * this wakeup source as not registered.
+ */
+ ws->timer.function = NULL;
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 2104b1b4ccda27..9ab759bcebd541 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1933,6 +1933,11 @@ static int __init atari_floppy_init (void)
unit[i].disk = alloc_disk(1);
if (!unit[i].disk)
goto Enomem;
+
+ unit[i].disk->queue = blk_init_queue(do_fd_request,
+ &ataflop_lock);
+ if (!unit[i].disk->queue)
+ goto Enomem;
}
if (UseTrackbuffer < 0)
@@ -1964,10 +1969,6 @@ static int __init atari_floppy_init (void)
sprintf(unit[i].disk->disk_name, "fd%d", i);
unit[i].disk->fops = &floppy_fops;
unit[i].disk->private_data = &unit[i];
- unit[i].disk->queue = blk_init_queue(do_fd_request,
- &ataflop_lock);
- if (!unit[i].disk->queue)
- goto Enomem;
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
add_disk(unit[i].disk);
}
@@ -1982,13 +1983,17 @@ static int __init atari_floppy_init (void)
return 0;
Enomem:
- while (i--) {
- struct request_queue *q = unit[i].disk->queue;
+ do {
+ struct gendisk *disk = unit[i].disk;
- put_disk(unit[i].disk);
- if (q)
- blk_cleanup_queue(q);
- }
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ put_disk(unit[i].disk);
+ }
+ } while (i--);
unregister_blkdev(FLOPPY_MAJOR, "fd");
return -ENOMEM;
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index e80cbefbc2b548..27e1abcf571005 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -632,14 +632,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- int timeo;
- rcu_read_lock();
- nc = rcu_dereference(connection->net_conf);
- timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
- rcu_read_unlock();
- schedule_timeout_interruptible(timeo);
- if (try < max_tries)
+ if (try < max_tries) {
+ int timeo;
try = max_tries - 1;
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
+ }
continue;
}
if (rv < SS_SUCCESS) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index b4b5680ac6adb1..b1ee358edd3b4d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3126,7 +3126,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
- int hg, rule_nr, rr_conflict, tentative;
+ int hg, rule_nr, rr_conflict, tentative, always_asbp;
mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
@@ -3168,8 +3168,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
+ always_asbp = nc->always_asbp;
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
- if (hg == 100 || (hg == -100 && nc->always_asbp)) {
+ if (hg == 100 || (hg == -100 && always_asbp)) {
int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -3208,9 +3212,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
- rr_conflict = nc->rr_conflict;
- tentative = nc->tentative;
- rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -3889,7 +3890,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
kfree(device->p_uuid);
device->p_uuid = p_uuid;
- if (device->state.conn < C_CONNECTED &&
+ if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
device->state.disk < D_INCONSISTENT &&
device->state.role == R_PRIMARY &&
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 5578c1477ba661..8bfd4fd7e9ec02 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -256,8 +256,8 @@ void drbd_request_endio(struct bio *bio)
} else
what = COMPLETED_OK;
- bio_put(req->private_bio);
req->private_bio = ERR_PTR(bio->bi_error);
+ bio_put(bio);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 331363e7de0f1d..2daa5b84abbc0f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3459,6 +3459,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index e8165ec55e6f5a..c73e0eead77fc2 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -82,6 +82,7 @@
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_index_mutex);
+static DEFINE_MUTEX(loop_ctl_mutex);
static int max_part;
static int part_shift;
@@ -651,6 +652,36 @@ static void loop_reread_partitions(struct loop_device *lo,
__func__, lo->lo_number, lo->lo_file_name, rc);
}
+static inline int is_loop_device(struct file *file)
+{
+ struct inode *i = file->f_mapping->host;
+
+ return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+}
+
+static int loop_validate_file(struct file *file, struct block_device *bdev)
+{
+ struct inode *inode = file->f_mapping->host;
+ struct file *f = file;
+
+ /* Avoid recursion */
+ while (is_loop_device(f)) {
+ struct loop_device *l;
+
+ if (f->f_mapping->host->i_bdev == bdev)
+ return -EBADF;
+
+ l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+ if (l->lo_state == Lo_unbound) {
+ return -EINVAL;
+ }
+ f = l->lo_backing_file;
+ }
+ if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+ return -EINVAL;
+ return 0;
+}
+
/*
* loop_change_fd switched the backing store of a loopback device to
* a new file. This is useful for operating system installers to free up
@@ -680,14 +711,15 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
if (!file)
goto out;
+ error = loop_validate_file(file, bdev);
+ if (error)
+ goto out_putf;
+
inode = file->f_mapping->host;
old_file = lo->lo_backing_file;
error = -EINVAL;
- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
- goto out_putf;
-
/* size of the new backing store needs to be the same */
if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
goto out_putf;
@@ -708,13 +740,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
return error;
}
-static inline int is_loop_device(struct file *file)
-{
- struct inode *i = file->f_mapping->host;
-
- return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
-}
-
/* loop sysfs attributes */
static ssize_t loop_attr_show(struct device *dev, char *page,
@@ -811,16 +836,17 @@ static struct attribute_group loop_attribute_group = {
.attrs= loop_attrs,
};
-static int loop_sysfs_init(struct loop_device *lo)
+static void loop_sysfs_init(struct loop_device *lo)
{
- return sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
- &loop_attribute_group);
+ lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
+ &loop_attribute_group);
}
static void loop_sysfs_exit(struct loop_device *lo)
{
- sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
- &loop_attribute_group);
+ if (lo->sysfs_inited)
+ sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
+ &loop_attribute_group);
}
static void loop_config_discard(struct loop_device *lo)
@@ -869,10 +895,28 @@ static int loop_prepare_queue(struct loop_device *lo)
return 0;
}
+static void loop_update_rotational(struct loop_device *lo)
+{
+ struct file *file = lo->lo_backing_file;
+ struct inode *file_inode = file->f_mapping->host;
+ struct block_device *file_bdev = file_inode->i_sb->s_bdev;
+ struct request_queue *q = lo->lo_queue;
+ bool nonrot = true;
+
+ /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
+ if (file_bdev)
+ nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
+
+ if (nonrot)
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+ else
+ queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
+}
+
static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct block_device *bdev, unsigned int arg)
{
- struct file *file, *f;
+ struct file *file;
struct inode *inode;
struct address_space *mapping;
unsigned lo_blocksize;
@@ -892,29 +936,13 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (lo->lo_state != Lo_unbound)
goto out_putf;
- /* Avoid recursion */
- f = file;
- while (is_loop_device(f)) {
- struct loop_device *l;
-
- if (f->f_mapping->host->i_bdev == bdev)
- goto out_putf;
-
- l = f->f_mapping->host->i_bdev->bd_disk->private_data;
- if (l->lo_state == Lo_unbound) {
- error = -EINVAL;
- goto out_putf;
- }
- f = l->lo_backing_file;
- }
+ error = loop_validate_file(file, bdev);
+ if (error)
+ goto out_putf;
mapping = file->f_mapping;
inode = mapping->host;
- error = -EINVAL;
- if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
- goto out_putf;
-
if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
@@ -948,6 +976,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
blk_queue_flush(lo->lo_queue, REQ_FLUSH);
+ loop_update_rotational(lo);
loop_update_dio(lo);
set_capacity(lo->lo_disk, size);
bd_set_size(bdev, size << 9);
@@ -1035,7 +1064,7 @@ static int loop_clr_fd(struct loop_device *lo)
*/
if (atomic_read(&lo->lo_refcnt) > 1) {
lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return 0;
}
@@ -1084,12 +1113,12 @@ static int loop_clr_fd(struct loop_device *lo)
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
loop_unprepare_queue(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
/*
- * Need not hold lo_ctl_mutex to fput backing file.
- * Calling fput holding lo_ctl_mutex triggers a circular
+ * Need not hold loop_ctl_mutex to fput backing file.
+ * Calling fput holding loop_ctl_mutex triggers a circular
* lock dependency possibility warning as fput can take
- * bd_mutex which is usually taken before lo_ctl_mutex.
+ * bd_mutex which is usually taken before loop_ctl_mutex.
*/
fput(filp);
return 0;
@@ -1352,7 +1381,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
struct loop_device *lo = bdev->bd_disk->private_data;
int err;
- mutex_lock_nested(&lo->lo_ctl_mutex, 1);
+ mutex_lock_nested(&loop_ctl_mutex, 1);
switch (cmd) {
case LOOP_SET_FD:
err = loop_set_fd(lo, mode, bdev, arg);
@@ -1361,7 +1390,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
err = loop_change_fd(lo, bdev, arg);
break;
case LOOP_CLR_FD:
- /* loop_clr_fd would have unlocked lo_ctl_mutex on success */
+ /* loop_clr_fd would have unlocked loop_ctl_mutex on success */
err = loop_clr_fd(lo);
if (!err)
goto out_unlocked;
@@ -1397,7 +1426,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
out_unlocked:
return err;
@@ -1530,16 +1559,16 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
switch(cmd) {
case LOOP_SET_STATUS:
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_set_status_compat(
lo, (const struct compat_loop_info __user *) arg);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
case LOOP_GET_STATUS:
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
err = loop_get_status_compat(
lo, (struct compat_loop_info __user *) arg);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
case LOOP_SET_CAPACITY:
case LOOP_CLR_FD:
@@ -1583,7 +1612,7 @@ static void __lo_release(struct loop_device *lo)
if (atomic_dec_return(&lo->lo_refcnt))
return;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
/*
* In autoclear mode, stop the loop thread
@@ -1600,7 +1629,7 @@ static void __lo_release(struct loop_device *lo)
loop_flush(lo);
}
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
}
static void lo_release(struct gendisk *disk, fmode_t mode)
@@ -1646,10 +1675,10 @@ static int unregister_transfer_cb(int id, void *ptr, void *data)
struct loop_device *lo = ptr;
struct loop_func_table *xfer = data;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
if (lo->lo_encryption == xfer)
loop_release_xfer(lo);
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
return 0;
}
@@ -1811,7 +1840,6 @@ static int loop_add(struct loop_device **l, int i)
if (!part_shift)
disk->flags |= GENHD_FL_NO_PART_SCAN;
disk->flags |= GENHD_FL_EXT_DEVT;
- mutex_init(&lo->lo_ctl_mutex);
atomic_set(&lo->lo_refcnt, 0);
lo->lo_number = i;
spin_lock_init(&lo->lo_lock);
@@ -1924,19 +1952,19 @@ static long loop_control_ioctl(struct file *file, unsigned int cmd,
ret = loop_lookup(&lo, parm);
if (ret < 0)
break;
- mutex_lock(&lo->lo_ctl_mutex);
+ mutex_lock(&loop_ctl_mutex);
if (lo->lo_state != Lo_unbound) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
}
if (atomic_read(&lo->lo_refcnt) > 0) {
ret = -EBUSY;
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
break;
}
lo->lo_disk->private_data = NULL;
- mutex_unlock(&lo->lo_ctl_mutex);
+ mutex_unlock(&loop_ctl_mutex);
idr_remove(&loop_index_idr, lo->lo_number);
loop_remove(lo);
break;
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index fb2237c73e618e..a923e74495cec1 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -55,10 +55,10 @@ struct loop_device {
spinlock_t lo_lock;
int lo_state;
- struct mutex lo_ctl_mutex;
struct kthread_worker worker;
struct task_struct *worker_task;
bool use_dio;
+ bool sysfs_inited;
struct request_queue *lo_queue;
struct blk_mq_tag_set tag_set;
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 4b911ed96ea3e8..31219fb9e7f4e6 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -40,6 +40,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+#define VDC_MAX_RETRIES 10
+
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry {
@@ -419,6 +421,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
.end_idx = dr->prod,
};
int err, delay;
+ int retries = 0;
hdr.seq = dr->snd_nxt;
delay = 1;
@@ -431,6 +434,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
udelay(delay);
if ((delay <<= 1) > 128)
delay = 128;
+ if (retries++ > VDC_MAX_RETRIES)
+ break;
} while (err == -EAGAIN);
if (err == -ENOTCONN)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index b5afd495d482e8..eec6e393c124d1 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -868,8 +868,17 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
- while (drive--)
- put_disk(swd->unit[drive].disk);
+ do {
+ struct gendisk *disk = swd->unit[drive].disk;
+
+ if (disk) {
+ if (disk->queue) {
+ blk_cleanup_queue(disk->queue);
+ disk->queue = NULL;
+ }
+ put_disk(disk);
+ }
+ } while (drive--);
return err;
}
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index c264f2d284a7be..2e0a9e2531cb61 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1027,7 +1027,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
- if (fs->ref_count > 0 && --fs->ref_count == 0) {
+ if (fs->ref_count > 0)
+ --fs->ref_count;
+ else if (fs->ref_count == -1)
+ fs->ref_count = 0;
+ if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 6f9c36a4967c8d..79f9597eb716cb 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1182,6 +1182,11 @@ static struct attribute_group zram_disk_attr_group = {
.attrs = zram_disk_attrs,
};
+static const struct attribute_group *zram_disk_attr_groups[] = {
+ &zram_disk_attr_group,
+ NULL,
+};
+
/*
* Allocate and initialize new zram device. the function returns
* '>= 0' device_id upon success, and negative value otherwise.
@@ -1262,24 +1267,15 @@ static int zram_add(void)
zram->disk->queue->limits.discard_zeroes_data = 0;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
+ disk_to_dev(zram->disk)->groups = zram_disk_attr_groups;
add_disk(zram->disk);
- ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
- if (ret < 0) {
- pr_err("Error creating sysfs group for device %d\n",
- device_id);
- goto out_free_disk;
- }
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
zram->meta = NULL;
pr_info("Added device: %s\n", zram->disk->disk_name);
return device_id;
-out_free_disk:
- del_gendisk(zram->disk);
- put_disk(zram->disk);
out_free_queue:
blk_cleanup_queue(queue);
out_free_idr:
@@ -1307,16 +1303,6 @@ static int zram_remove(struct zram *zram)
zram->claim = true;
mutex_unlock(&bdev->bd_mutex);
- /*
- * Remove sysfs first, so no one will perform a disksize
- * store while we destroy the devices. This also helps during
- * hot_remove -- zram_reset_device() is the last holder of
- * ->init_lock, no later/concurrent disksize_store() or any
- * other sysfs handlers are possible.
- */
- sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
- &zram_disk_attr_group);
-
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index fdb44829ab6ff7..475f25c2451d30 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -270,6 +270,7 @@ static const struct {
{ 0x4103, "BCM4330B1" }, /* 002.001.003 */
{ 0x410e, "BCM43341B0" }, /* 002.001.014 */
{ 0x4406, "BCM4324B3" }, /* 002.004.006 */
+ { 0x6109, "BCM4335C0" }, /* 003.001.009 */
{ 0x610c, "BCM4354" }, /* 003.001.012 */
{ }
};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 198dfad6b6cb55..7f092e6e12d64f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -29,6 +29,7 @@
#include <linux/suspend.h>
#include <asm/unaligned.h>
#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -325,11 +326,13 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
/* Intel Bluetooth devices */
+ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
+ { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
/* Other Intel Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
@@ -354,6 +357,10 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8723BU Bluetooth devices */
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
@@ -421,6 +428,8 @@ struct btusb_data {
struct usb_endpoint_descriptor *diag_tx_ep;
struct usb_endpoint_descriptor *diag_rx_ep;
+ struct gpio_desc *reset_gpio;
+
__u8 cmdreq_type;
__u8 cmdreq;
@@ -436,6 +445,26 @@ struct btusb_data {
int oob_wake_irq; /* irq for out-of-band wake-on-bt */
};
+
+static void btusb_hw_reset(struct hci_dev *hdev)
+{
+ struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
+
+ /*
+ * Toggle the hard reset line if the platform provides one. The reset
+ * is going to yank the device off the USB and then replug. So doing
+ * once is enough. The cleanup is handled correctly on the way out
+ * (standard USB disconnect), and the new device is detected cleanly
+ * and bound to the driver again like it should be.
+ */
+ bt_dev_dbg(hdev, "%s: Initiating HW reset via gpio", __func__);
+ clear_bit(HCI_QUIRK_HW_RESET_ON_TIMEOUT, &hdev->quirks);
+ gpiod_set_value(reset_gpio, 1);
+ mdelay(100);
+ gpiod_set_value(reset_gpio, 0);
+}
+
static inline void btusb_free_frags(struct btusb_data *data)
{
unsigned long flags;
@@ -1682,6 +1711,16 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_DBG("%s", hdev->name);
+ /* Observed race condition during controller recovery mechanism
+ * resulting the controller not responding to the reset command.
+ *
+ * To avoid such race condition need a delay of 30ms soon after the
+ * USB re-enumeration and before sending the Reset command which shall
+ * allow controller to completely recover and process the Reset command.
+ */
+ BT_DBG("Delay 30ms to avoid race condition");
+ mdelay(30);
+
/* The controller has a bug with the first HCI command sent to it
* returning number of completed commands as zero. This would stall the
* command processing in the Bluetooth core.
@@ -2040,13 +2079,19 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return -EINVAL;
}
- /* At the moment the iBT 3.0 hardware variants 0x0b (LnP/SfP)
- * and 0x0c (WsP) are supported by this firmware loading method.
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
*
* This check has been put in place to ensure correct forward
* compatibility options when newer hardware variants come along.
*/
- if (ver.hw_variant != 0x0b && ver.hw_variant != 0x0c) {
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ break;
+ default:
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
hdev->name, ver.hw_variant);
return -EINVAL;
@@ -2139,17 +2184,44 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
}
/* With this Intel bootloader only the hardware variant and device
- * revision information are used to select the right firmware.
+ * revision information are used to select the right firmware for SfP
+ * and WsP.
*
* The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi.
*
* Currently the supported hardware variants are:
* 11 (0x0b) for iBT3.0 (LnP/SfP)
* 12 (0x0c) for iBT3.5 (WsP)
+ *
+ * For ThP/JfP and for future SKU's, the FW name varies based on HW
+ * variant, HW revision and FW revision, as these are dependent on CNVi
+ * and RF Combination.
+ *
+ * 17 (0x11) for iBT3.5 (JfP)
+ * 18 (0x12) for iBT3.5 (ThP)
+ *
+ * The firmware file name for these will be
+ * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi.
+ *
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params->dev_revid));
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ return -EINVAL;
+ }
err = request_firmware(&fw, fwname, &hdev->dev);
if (err < 0) {
@@ -2164,9 +2236,24 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Save the DDC file name for later use to apply once the firmware
* downloading is done.
*/
- snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params->dev_revid));
+ switch (ver.hw_variant) {
+ case 0x0b: /* SfP */
+ case 0x0c: /* WsP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(params->dev_revid));
+ break;
+ case 0x11: /* JfP */
+ case 0x12: /* ThP */
+ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
+ le16_to_cpu(ver.hw_variant),
+ le16_to_cpu(ver.hw_revision),
+ le16_to_cpu(ver.fw_revision));
+ break;
+ default:
+ BT_ERR("%s: Unsupported Intel firmware naming", hdev->name);
+ return -EINVAL;
+ }
kfree_skb(skb);
@@ -2352,6 +2439,22 @@ static int btusb_shutdown_intel(struct hci_dev *hdev)
struct sk_buff *skb;
long ret;
+ /* In the shutdown sequence where Bluetooth is turned off followed
+ * by WiFi being turned off, turning WiFi back on causes issue with
+ * the RF calibration.
+ *
+ * To ensure that any RF activity has been stopped, issue HCI Reset
+ * command to clear all ongoing activity including advertising,
+ * scanning etc.
+ */
+ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "HCI reset during shutdown failed");
+ return ret;
+ }
+ kfree_skb(skb);
+
/* Some platforms have an issue with BT LED when the interface is
* down or BT radio is turned off, which takes 5 seconds to BT LED
* goes off. This command turns off the BT LED immediately.
@@ -2820,6 +2923,8 @@ static irqreturn_t btusb_oob_wake_handler(int irq, void *priv)
static const struct of_device_id btusb_match_table[] = {
{ .compatible = "usb1286,204e" },
+ { .compatible = "usbcf3,e300" }, /* QCA6174A */
+ { .compatible = "usb4ca,301a" }, /* QCA6174A (Lite-On) */
{ }
};
MODULE_DEVICE_TABLE(of, btusb_match_table);
@@ -2843,6 +2948,7 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
return 0;
}
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler,
0, "OOB Wake-on-BT", data);
if (ret) {
@@ -2857,7 +2963,6 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
}
data->oob_wake_irq = irq;
- disable_irq(irq);
bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq);
return 0;
}
@@ -2867,6 +2972,7 @@ static int btusb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_endpoint_descriptor *ep_desc;
+ struct gpio_desc *reset_gpio;
struct btusb_data *data;
struct hci_dev *hdev;
unsigned ifnum_base;
@@ -2978,6 +3084,17 @@ static int btusb_probe(struct usb_interface *intf,
SET_HCIDEV_DEV(hdev, &intf->dev);
+ reset_gpio = gpiod_get_optional(&data->udev->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio)) {
+ err = PTR_ERR(reset_gpio);
+ goto out_free_dev;
+ } else if (reset_gpio) {
+ data->reset_gpio = reset_gpio;
+ dev_set_drvdata(&data->udev->dev, reset_gpio);
+ hdev->hw_reset = btusb_hw_reset;
+ }
+
hdev->open = btusb_open;
hdev->close = btusb_close;
hdev->flush = btusb_flush;
@@ -3042,6 +3159,7 @@ static int btusb_probe(struct usb_interface *intf,
#endif
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ set_bit(HCI_QUIRK_HW_RESET_ON_TIMEOUT, &hdev->quirks);
}
if (id->driver_info & BTUSB_INTEL_NEW) {
@@ -3059,6 +3177,7 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
#endif
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ set_bit(HCI_QUIRK_HW_RESET_ON_TIMEOUT, &hdev->quirks);
}
if (id->driver_info & BTUSB_MARVELL)
@@ -3182,6 +3301,8 @@ static int btusb_probe(struct usb_interface *intf,
return 0;
out_free_dev:
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
hci_free_dev(hdev);
return err;
}
@@ -3225,6 +3346,9 @@ static void btusb_disconnect(struct usb_interface *intf)
if (data->oob_wake_irq)
device_init_wakeup(&data->udev->dev, false);
+ if (data->reset_gpio)
+ gpiod_put(data->reset_gpio);
+
hci_free_dev(hdev);
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 73306384af6cc0..f63f62583d2bda 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -607,12 +607,17 @@ static int intel_setup(struct hci_uart *hu)
return -EINVAL;
}
- /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is
- * supported by this firmware loading method. This check has been
- * put in place to ensure correct forward compatibility options
- * when newer hardware variants come along.
+ /* Check for supported iBT hardware variants of this firmware
+ * loading method.
+ *
+ * This check has been put in place to ensure correct forward
+ * compatibility options when newer hardware variants come along.
*/
- if (ver.hw_variant != 0x0b) {
+ switch (ver.hw_variant) {
+ case 0x0b: /* LnP */
+ case 0x0c: /* WsP */
+ break;
+ default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
ver.hw_variant);
return -EINVAL;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 74b2f4a1464304..0986c324459fc6 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -884,7 +884,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
*/
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_RUNNING);
return 0;
}
@@ -939,6 +939,12 @@ static int qca_setup(struct hci_uart *hu)
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
ret = 0;
+ } else if (ret == -EAGAIN) {
+ /*
+ * Userspace firmware loader will return -EAGAIN in case no
+ * patch/nvm-config is found, so run with original fw/config.
+ */
+ ret = 0;
}
/* Setup bdaddr */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 0151039bff0522..d203940203b62e 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2425,7 +2425,7 @@ static int cdrom_ioctl_select_disc(struct cdrom_device_info *cdi,
return -ENOSYS;
if (arg != CDSL_CURRENT && arg != CDSL_NONE) {
- if ((int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
}
@@ -2526,7 +2526,7 @@ static int cdrom_ioctl_drive_status(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) ||
(arg == CDSL_CURRENT || arg == CDSL_NONE))
return cdi->ops->drive_status(cdi, CDSL_CURRENT);
- if (((int)arg >= cdi->capacity))
+ if (arg >= cdi->capacity)
return -EINVAL;
return cdrom_slot_status(cdi, arg);
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index e2808fefbb78b2..1852d19d0d7b08 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -882,6 +882,7 @@ static void __exit exit_gdrom(void)
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
+ kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 14790304b84b23..9fcd51095d1373 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/nospec.h>
#include <asm/io.h>
#include <asm/uaccess.h>
@@ -386,7 +387,11 @@ static ssize_t ac_write(struct file *file, const char __user *buf, size_t count,
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@@ -697,6 +702,7 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
+ static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
- static int warncount = 10;
- if (warncount) {
- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
- warncount--;
- }
- kfree(adgl);
- mutex_unlock(&ac_mutex);
- return -EINVAL;
- }
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
switch (cmd) {
@@ -838,5 +840,16 @@ static long ac_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
}
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index 0c98a9d51a2494..44ce8060694447 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -140,7 +140,7 @@ static int via_rng_init(struct hwrng *rng)
* RNG configuration like it used to be the case in this
* register */
if ((c->x86 == 6) && (c->x86_model >= 0x0f)) {
- if (!cpu_has_xstore_enabled) {
+ if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) {
pr_err(PFX "can't enable hardware RNG "
"if XSTORE is not enabled\n");
return -ENODEV;
@@ -200,8 +200,9 @@ static int __init mod_init(void)
{
int err;
- if (!cpu_has_xstore)
+ if (!boot_cpu_has(X86_FEATURE_XSTORE))
return -ENODEV;
+
pr_info("VIA RNG detected\n");
err = hwrng_register(&via_rng);
if (err) {
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index feafdab734ae20..4835b588b7833f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -522,11 +522,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
BT_CONTROL(BT_H_BUSY);
+ bt->timeout = bt->BT_CAP_req2rsp;
+
/* Read BT capabilities if it hasn't been done yet */
if (!bt->BT_CAP_outreqs)
BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
SI_SM_CALL_WITHOUT_DELAY);
- bt->timeout = bt->BT_CAP_req2rsp;
BT_SI_SM_RETURN(SI_SM_IDLE);
case BT_STATE_XACTION_START:
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index d6d166fe49a3ca..b2da2382d544cb 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -613,8 +613,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
ssif_info->rtc_us_timer = SSIF_MSG_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_JIFFIES);
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_JIFFIES);
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -636,8 +637,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
/* Remove the multi-part read marker. */
len -= 2;
+ data += 2;
for (i = 0; i < len; i++)
- ssif_info->data[i] = data[i+2];
+ ssif_info->data[i] = data[i];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@@ -665,8 +667,19 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
blocknum = data[0];
+ len--;
+ data++;
+
+ if (blocknum != 0xff && len != 31) {
+ /* All blocks but the last must have 31 data bytes. */
+ result = -EIO;
+ if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
+ pr_info("Received middle message <31\n");
- if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
+ goto continue_op;
+ }
+
+ if (ssif_info->multi_len + len > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -675,16 +688,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
- /* Remove the blocknum from the data. */
- len--;
for (i = 0; i < len; i++)
- ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
+ ssif_info->data[i + ssif_info->multi_len] = data[i];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if (blocknum + 1 != ssif_info->multi_pos) {
+ } else if (blocknum != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@@ -712,6 +723,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
}
+ continue_op:
if (result < 0) {
ssif_inc_stat(ssif_info, receive_errors);
} else {
@@ -719,8 +731,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
}
-
- continue_op:
if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
pr_info(PFX "DONE 1: state = %d, result=%d.\n",
ssif_info->ssif_state, result);
@@ -951,8 +961,9 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_PART_JIFFIES);
+ if (!ssif_info->stopping)
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
index 164544afd6809b..618f3df6c3b9b0 100644
--- a/drivers/char/mwave/mwavedd.c
+++ b/drivers/char/mwave/mwavedd.c
@@ -59,6 +59,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/serial_8250.h>
+#include <linux/nospec.h>
#include "smapi.h"
#include "mwavedd.h"
#include "3780i.h"
@@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
" ipcnum %x entry usIntCount %x\n",
@@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
" Invalid ipcnum %x\n", ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
PRINTK_3(TRACE_MWAVE,
"mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
" ipcnum %x, usIntCount %x\n",
@@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
ipcnum);
return -EINVAL;
}
+ ipcnum = array_index_nospec(ipcnum,
+ ARRAY_SIZE(pDrvData->IPCs));
mutex_lock(&mwave_mutex);
if (pDrvData->IPCs[ipcnum].bIsEnabled == TRUE) {
pDrvData->IPCs[ipcnum].bIsEnabled = FALSE;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index dffd06a3bb7611..2916d08ee30e29 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1503,14 +1503,22 @@ static int
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 buf[16];
+ __u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
+ int b, i = 0;
+
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
+ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ if (!arch_get_random_int(&t))
+ break;
+ buf[i] ^= t;
+ }
+
count -= bytes;
p += bytes;
diff --git a/drivers/char/tpm/cr50_i2c.c b/drivers/char/tpm/cr50_i2c.c
index 7cd98af082c07e..dd5a12b5ad3d67 100644
--- a/drivers/char/tpm/cr50_i2c.c
+++ b/drivers/char/tpm/cr50_i2c.c
@@ -93,8 +93,7 @@ static int cr50_i2c_wait_tpm_ready(struct tpm_chip *chip)
msecs_to_jiffies(chip->timeout_a));
if (rc == 0) {
- dev_err(&chip->dev, "Timeout waiting for TPM ready\n");
- return -ETIMEDOUT;
+ dev_warn(&chip->dev, "Timeout waiting for TPM ready\n");
}
return (int)rc;
}
@@ -248,8 +247,8 @@ static int cr50_i2c_write(struct tpm_chip *chip, u8 addr, u8 *buffer,
if (rc <= 0)
goto out;
- /* Wait for TPM to be ready */
- rc = cr50_i2c_wait_tpm_ready(chip);
+ /* Wait for TPM to be ready, ignore timeout */
+ cr50_i2c_wait_tpm_ready(chip);
out:
cr50_i2c_disable_tpm_irq(chip);
@@ -418,9 +417,10 @@ static int cr50_i2c_tis_recv(struct tpm_chip *chip, u8 *buf, size_t buf_len)
if (rc < 0)
goto out_err;
- if (burstcnt > buf_len) {
- dev_err(&chip->dev, "Burstcnt too large: %zu > %zu\n",
- burstcnt, buf_len);
+ if (burstcnt > buf_len || burstcnt < TPM_HEADER_SIZE) {
+ dev_err(&chip->dev,
+ "Unexpected burstcnt: %zu (max=%zu, min=%d)\n",
+ burstcnt, buf_len, TPM_HEADER_SIZE);
rc = -EIO;
goto out_err;
}
diff --git a/drivers/char/tpm/cr50_spi.c b/drivers/char/tpm/cr50_spi.c
index 9d5dce6469c9a4..af1f26ffbd0f17 100644
--- a/drivers/char/tpm/cr50_spi.c
+++ b/drivers/char/tpm/cr50_spi.c
@@ -30,7 +30,7 @@
*/
#define CR50_SLEEP_DELAY_MSEC 1000
#define CR50_WAKE_START_DELAY_MSEC 1
-#define CR50_NOIRQ_ACCESS_DELAY_MSEC 10
+#define CR50_NOIRQ_ACCESS_DELAY_MSEC 2
#define CR50_READY_IRQ_TIMEOUT_MSEC TPM2_TIMEOUT_A
#define CR50_FLOW_CONTROL_MSEC TPM2_TIMEOUT_A
#define MAX_IRQ_CONFIRMATION_ATTEMPTS 3
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 4723872a02711f..78ee5dd1e097ee 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/freezer.h>
#include <linux/major.h>
+#include <linux/of.h>
#include "tpm.h"
#include "tpm_eventlog.h"
@@ -351,8 +352,20 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip)
*/
int tpm_chip_register(struct tpm_chip *chip)
{
+#ifdef CONFIG_OF
+ struct device_node *np;
+#endif
int rc;
+#ifdef CONFIG_OF
+ np = of_find_node_by_name(NULL, "vtpm");
+ if (np) {
+ if (of_property_read_bool(np, "powered-while-suspended"))
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+ }
+ of_node_put(np);
+#endif
+
if (chip->ops->flags & TPM_OPS_AUTO_STARTUP) {
if (chip->flags & TPM_CHIP_FLAG_TPM2)
rc = tpm2_auto_startup(chip);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index db2eba38104271..61db61410097c9 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -25,7 +25,7 @@ struct file_priv {
struct tpm_chip *chip;
/* Data passed to and from the tpm via the read/write calls */
- atomic_t data_pending;
+ size_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
@@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work)
struct file_priv *priv = container_of(work, struct file_priv, work);
mutex_lock(&priv->buffer_mutex);
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
}
@@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file)
}
priv->chip = chip;
- atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
setup_timer(&priv->user_read_timer, user_reader_timeout,
(unsigned long)priv);
@@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- ssize_t ret_size;
+ ssize_t ret_size = 0;
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
- ret_size = atomic_read(&priv->data_pending);
- if (ret_size > 0) { /* relay data */
- ssize_t orig_ret_size = ret_size;
- if (size < ret_size)
- ret_size = size;
+ mutex_lock(&priv->buffer_mutex);
- mutex_lock(&priv->buffer_mutex);
+ if (priv->data_pending) {
+ ret_size = min_t(ssize_t, size, priv->data_pending);
rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, orig_ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
if (rc)
ret_size = -EFAULT;
- mutex_unlock(&priv->buffer_mutex);
+ priv->data_pending = 0;
}
- atomic_set(&priv->data_pending, 0);
-
+ mutex_unlock(&priv->buffer_mutex);
return ret_size;
}
@@ -118,13 +113,6 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
size_t in_size = size;
ssize_t out_size;
- /* cannot perform a write until the read has cleared
- either via tpm_read or a user_read_timer timeout.
- This also prevents splitted buffered writes from blocking here.
- */
- if (atomic_read(&priv->data_pending) != 0)
- return -EBUSY;
-
tpm_resume_if_needed(priv->chip);
if (in_size > TPM_BUFSIZE)
@@ -132,6 +120,15 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
mutex_lock(&priv->buffer_mutex);
+ /* Cannot perform a write until the read has cleared either via
+ * tpm_read or a user_read_timer timeout. This also prevents split
+ * buffered writes from blocking here.
+ */
+ if (priv->data_pending != 0) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EBUSY;
+ }
+
if (copy_from_user
(priv->data_buffer, (void __user *) buf, in_size)) {
mutex_unlock(&priv->buffer_mutex);
@@ -155,7 +152,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf,
return out_size;
}
- atomic_set(&priv->data_pending, out_size);
+ priv->data_pending = out_size;
mutex_unlock(&priv->buffer_mutex);
/* Set a timeout by which the reader must come claim the result */
@@ -174,7 +171,7 @@ static int tpm_release(struct inode *inode, struct file *file)
del_singleshot_timer_sync(&priv->user_read_timer);
flush_work(&priv->work);
file->private_data = NULL;
- atomic_set(&priv->data_pending, 0);
+ priv->data_pending = 0;
clear_bit(0, &priv->chip->is_open);
kfree(priv);
return 0;
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 0e9f36bc3a616c..66e7fbeabef3c6 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -462,7 +462,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, const void *cmd,
header = cmd;
err = be32_to_cpu(header->return_code);
- if (err != 0 && desc)
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
@@ -857,6 +858,10 @@ int tpm_do_selftest(struct tpm_chip *chip)
loops = jiffies_to_msecs(duration) / delay_msec;
rc = tpm_continue_selftest(chip);
+ if (rc == TPM_ERR_INVALID_POSTINIT) {
+ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
+ dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
+ }
/* This may fail if there was no TPM driver during a suspend/resume
* cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
*/
@@ -1025,6 +1030,9 @@ int tpm_pm_suspend(struct device *dev)
if (chip == NULL)
return -ENODEV;
+ if (chip->flags & TPM_CHIP_FLAG_ALWAYS_POWERED)
+ return 0;
+
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
tpm2_shutdown(chip, TPM2_SU_STATE);
set_bit(0, &chip->is_suspended);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index bc2d09a2274ba5..6974f1840243ff 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -173,6 +173,7 @@ enum tpm_chip_flags {
TPM_CHIP_FLAG_TPM2 = BIT(1),
TPM_CHIP_FLAG_IRQ = BIT(2),
TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
};
struct tpm_chip {
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index a2ab00831df1cc..97b3e312903dc6 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -203,7 +203,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
return -ENOMEM;
}
- rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
+ rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
if (rv < 0)
return rv;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index d266299dfdb1a2..785864893f9a62 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -297,6 +297,36 @@ static const struct platform_device_id s2mps11_clk_id[] = {
};
MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
+#ifdef CONFIG_OF
+/*
+ * Device is instantiated through parent MFD device and device matching is done
+ * through platform_device_id.
+ *
+ * However if device's DT node contains proper clock compatible and driver is
+ * built as a module, then the *module* matching will be done trough DT aliases.
+ * This requires of_device_id table. In the same time this will not change the
+ * actual *device* matching so do not add .of_match_table.
+ */
+static const struct of_device_id s2mps11_dt_match[] = {
+ {
+ .compatible = "samsung,s2mps11-clk",
+ .data = (void *)S2MPS11X,
+ }, {
+ .compatible = "samsung,s2mps13-clk",
+ .data = (void *)S2MPS13X,
+ }, {
+ .compatible = "samsung,s2mps14-clk",
+ .data = (void *)S2MPS14X,
+ }, {
+ .compatible = "samsung,s5m8767-clk",
+ .data = (void *)S5M8767X,
+ }, {
+ /* Sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, s2mps11_dt_match);
+#endif
+
static struct platform_driver s2mps11_clk_driver = {
.driver = {
.name = "s2mps11-clk",
diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
index a0df83e6b84b73..46c05c9a93541f 100644
--- a/drivers/clk/imx/clk-imx6q.c
+++ b/drivers/clk/imx/clk-imx6q.c
@@ -239,8 +239,12 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
* lvds1_gate and lvds2_gate are pseudo-gates. Both can be
* independently configured as clock inputs or outputs. We treat
* the "output_enable" bit as a gate, even though it's really just
- * enabling clock output.
+ * enabling clock output. Initially the gate bits are cleared, as
+ * otherwise the exclusive configuration gets locked in the setup done
+ * by software running before the clock driver, with no way to change
+ * it.
*/
+ writel(readl(base + 0x160) & ~0x3c00, base + 0x160);
clk[IMX6QDL_CLK_LVDS1_GATE] = imx_clk_gate_exclusive("lvds1_gate", "lvds1_sel", base + 0x160, 10, BIT(12));
clk[IMX6QDL_CLK_LVDS2_GATE] = imx_clk_gate_exclusive("lvds2_gate", "lvds2_sel", base + 0x160, 11, BIT(13));
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index 1be6230a07af0f..8b6306dc5fc64f 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -17,6 +17,8 @@
#include "clk.h"
+#define CCDR 0x4
+#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
#define CCSR 0xc
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
#define CACRR 0x10
@@ -414,6 +416,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+ /* Ensure the MMDC CH0 handshake is bypassed */
+ writel_relaxed(readl_relaxed(base + CCDR) |
+ BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
imx_check_clocks(clks, ARRAY_SIZE(clks));
clk_data.clks = clks;
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 01718d05e95221..9e8f0e255de2dc 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -120,6 +120,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-anatop");
base = of_iomap(np, 0);
+ of_node_put(np);
WARN_ON(!base);
clks[IMX6UL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index 7cfb7b2a2ed65b..8878efb8062037 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -355,16 +355,16 @@ ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate,
struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw);
struct ingenic_cgu *cgu = ingenic_clk->cgu;
const struct ingenic_cgu_clk_info *clk_info;
- long rate = *parent_rate;
+ unsigned int div = 1;
clk_info = &cgu->clock_info[ingenic_clk->idx];
if (clk_info->type & CGU_CLK_DIV)
- rate /= ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
+ div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate);
else if (clk_info->type & CGU_CLK_FIXDIV)
- rate /= clk_info->fixdiv.div;
+ div = clk_info->fixdiv.div;
- return rate;
+ return DIV_ROUND_UP(*parent_rate, div);
}
static int
@@ -384,7 +384,7 @@ ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate,
if (clk_info->type & CGU_CLK_DIV) {
div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate);
- rate = parent_rate / div;
+ rate = DIV_ROUND_UP(parent_rate, div);
if (rate != req_rate)
return -EINVAL;
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
index 61893fe73251b4..18b6c9b55b951a 100644
--- a/drivers/clk/mmp/clk.c
+++ b/drivers/clk/mmp/clk.c
@@ -182,7 +182,7 @@ void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
pr_err("CLK %d has invalid pointer %p\n", id, clk);
return;
}
- if (id > unit->nr_clks) {
+ if (id >= unit->nr_clks) {
pr_err("CLK %d is invalid\n", id);
return;
}
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 389af3c15ec450..bde71b07f15e73 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -273,6 +273,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
{ .offset = GATE_BUS_TOP, .value = 0xffffffff, },
{ .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
{ .offset = GATE_IP_PERIC, .value = 0xffffffff, },
+ { .offset = GATE_IP_PERIS, .value = 0xffffffff, },
};
static int exynos5420_clk_suspend(void)
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 8c41c6fcb9ee5c..acf83569f86fe7 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -333,11 +333,11 @@ static struct pdiv_map pllu_p[] = {
};
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 960, 12, 0, 12},
- { 13000000, 480000000, 960, 13, 0, 12},
- { 16800000, 480000000, 400, 7, 0, 5},
- { 19200000, 480000000, 200, 4, 0, 3},
- { 26000000, 480000000, 960, 26, 0, 12},
+ { 12000000, 480000000, 960, 12, 2, 12 },
+ { 13000000, 480000000, 960, 13, 2, 12 },
+ { 16800000, 480000000, 400, 7, 2, 5 },
+ { 19200000, 480000000, 200, 4, 2, 3 },
+ { 26000000, 480000000, 960, 26, 2, 12 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -1372,6 +1372,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{TEGRA30_CLK_GR2D, TEGRA30_CLK_PLL_C, 300000000, 0},
{TEGRA30_CLK_GR3D, TEGRA30_CLK_PLL_C, 300000000, 0},
{TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0},
+ { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
{TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0}, /* This MUST be the last entry. */
};
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 47f8aafe33441a..d65a6036d61053 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -379,6 +379,13 @@ static void exynos4_mct_tick_start(unsigned long cycles,
exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
}
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+{
+ /* Clear the MCT tick interrupt */
+ if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
+}
+
static int exynos4_tick_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
@@ -395,6 +402,7 @@ static int set_state_shutdown(struct clock_event_device *evt)
mevt = container_of(evt, struct mct_clock_event_device, evt);
exynos4_mct_tick_stop(mevt);
+ exynos4_mct_tick_clear(mevt);
return 0;
}
@@ -411,8 +419,11 @@ static int set_state_periodic(struct clock_event_device *evt)
return 0;
}
-static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
{
+ struct mct_clock_event_device *mevt = dev_id;
+ struct clock_event_device *evt = &mevt->evt;
+
/*
* This is for supporting oneshot mode.
* Mct would generate interrupt periodically
@@ -421,16 +432,6 @@ static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
if (!clockevent_state_periodic(&mevt->evt))
exynos4_mct_tick_stop(mevt);
- /* Clear the MCT tick interrupt */
- if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
- exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
-}
-
-static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
-{
- struct mct_clock_event_device *mevt = dev_id;
- struct clock_event_device *evt = &mevt->evt;
-
exynos4_mct_tick_clear(mevt);
evt->event_handler(evt);
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 0efd36e483ab41..60c8a9bd562d96 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -19,6 +19,13 @@
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown = true;
+
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
@@ -108,8 +115,11 @@ static int pit_shutdown(struct clock_event_device *evt)
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
+
+ if (i8253_clear_counter_on_shutdown) {
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
raw_spin_unlock(&i8253_lock);
return 0;
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 8518d9dfba5c40..73c990867c019c 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -98,6 +98,9 @@ static void __init ti_32k_timer_init(struct device_node *np)
return;
}
+ if (!of_machine_is_compatible("ti,am43"))
+ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
ti_32k_timer.counter = ti_32k_timer.base;
/*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e302a43d43e637..0be3dbfa3dee70 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -538,13 +538,13 @@ EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret, enable;
@@ -667,6 +667,8 @@ static ssize_t store_##file_name \
struct cpufreq_policy new_policy; \
\
memcpy(&new_policy, policy, sizeof(*policy)); \
+ new_policy.min = policy->user_policy.min; \
+ new_policy.max = policy->user_policy.max; \
\
ret = sscanf(buf, "%u", &new_policy.object); \
if (ret != 1) \
@@ -2275,14 +2277,13 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
* Useful for policy notifiers which have different necessities
* at different times.
*/
-int cpufreq_update_policy(unsigned int cpu)
+void cpufreq_update_policy(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
struct cpufreq_policy new_policy;
- int ret;
if (!policy)
- return -ENODEV;
+ return;
down_write(&policy->rwsem);
@@ -2297,10 +2298,8 @@ int cpufreq_update_policy(unsigned int cpu)
*/
if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
- if (WARN_ON(!new_policy.cur)) {
- ret = -EIO;
+ if (WARN_ON(!new_policy.cur))
goto unlock;
- }
if (!policy->cur) {
pr_debug("Driver did not initialize current freq\n");
@@ -2311,13 +2310,12 @@ int cpufreq_update_policy(unsigned int cpu)
}
}
- ret = cpufreq_set_policy(policy, &new_policy);
+ cpufreq_set_policy(policy, &new_policy);
unlock:
up_write(&policy->rwsem);
cpufreq_cpu_put(policy);
- return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 5621bb03e874e9..f7b340c27ff285 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -48,11 +48,11 @@ enum {OD_NORMAL_SAMPLE, OD_SUB_SAMPLE};
/* Create attributes */
#define gov_sys_attr_ro(_name) \
-static struct global_attr _name##_gov_sys = \
+static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0444, show_##_name##_gov_sys, NULL)
#define gov_sys_attr_rw(_name) \
-static struct global_attr _name##_gov_sys = \
+static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
#define gov_pol_attr_ro(_name) \
@@ -74,7 +74,7 @@ __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
/* Create show/store routines */
#define show_one(_gov, file_name) \
static ssize_t show_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
+(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
struct _gov##_dbs_tuners *tuners = _gov##_dbs_cdata.gdbs_data->tuners; \
return sprintf(buf, "%u\n", tuners->file_name); \
@@ -90,7 +90,7 @@ static ssize_t show_##file_name##_gov_pol \
#define store_one(_gov, file_name) \
static ssize_t store_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) \
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return store_##file_name(dbs_data, buf, count); \
@@ -254,7 +254,7 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate)
#define declare_show_sampling_rate_min(_gov) \
static ssize_t show_sampling_rate_min_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
+(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
struct dbs_data *dbs_data = _gov##_dbs_cdata.gdbs_data; \
return sprintf(buf, "%u\n", dbs_data->min_sampling_rate); \
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index 1e102da866a7cf..06bbb3e8297c1f 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -1100,7 +1100,7 @@ static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
*/
#define show_gov_pol_sys(file_name) \
static ssize_t show_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, char *buf) \
+(struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return show_##file_name(common_tunables, buf); \
} \
@@ -1113,7 +1113,7 @@ static ssize_t show_##file_name##_gov_pol \
#define store_gov_pol_sys(file_name) \
static ssize_t store_##file_name##_gov_sys \
-(struct kobject *kobj, struct attribute *attr, const char *buf, \
+(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, \
size_t count) \
{ \
return store_##file_name(common_tunables, buf, count); \
@@ -1142,7 +1142,7 @@ show_store_gov_pol_sys(boostpulse_duration);
show_store_gov_pol_sys(io_is_busy);
#define gov_sys_attr_rw(_name) \
-static struct global_attr _name##_gov_sys = \
+static struct kobj_attribute _name##_gov_sys = \
__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
#define gov_pol_attr_rw(_name) \
@@ -1164,7 +1164,7 @@ gov_sys_pol_attr_rw(boost);
gov_sys_pol_attr_rw(boostpulse_duration);
gov_sys_pol_attr_rw(io_is_busy);
-static struct global_attr boostpulse_gov_sys =
+static struct kobj_attribute boostpulse_gov_sys =
__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
static struct freq_attr boostpulse_gov_pol =
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index ef1fa8145419cd..fa86946d12aa0a 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -130,8 +130,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
/* Ensure the arm clock divider is what we expect */
ret = clk_set_rate(arm_clk, new_freq * 1000);
if (ret) {
+ int ret1;
+
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
- regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
+ if (ret1)
+ dev_warn(cpu_dev,
+ "failed to restore vddarm voltage: %d\n", ret1);
return ret;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 598be6b255c6f5..56161837f70131 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -365,13 +365,13 @@ static void __init intel_pstate_debug_expose_params(void)
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", limits->object); \
}
static ssize_t show_turbo_pct(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@@ -387,7 +387,7 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
}
static ssize_t show_num_pstates(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@@ -398,7 +398,7 @@ static ssize_t show_num_pstates(struct kobject *kobj,
}
static ssize_t show_no_turbo(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -411,7 +411,7 @@ static ssize_t show_no_turbo(struct kobject *kobj,
return ret;
}
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -435,7 +435,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -460,7 +460,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
return count;
}
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index 09637723274765..cd0333418d157c 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return ret;
}
-static void __init pxa_cpufreq_init_voltages(void)
+static void pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
return 0;
}
-static void __init pxa_cpufreq_init_voltages(void) { }
+static void pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
diff --git a/drivers/cpufreq/tegra124-cpufreq.c b/drivers/cpufreq/tegra124-cpufreq.c
index 43530254201a8b..4bb154f6c54cdc 100644
--- a/drivers/cpufreq/tegra124-cpufreq.c
+++ b/drivers/cpufreq/tegra124-cpufreq.c
@@ -134,6 +134,8 @@ static int tegra124_cpufreq_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
+ of_node_put(np);
+
return 0;
out_switch_to_pllx:
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index db2ede565f1aab..b44476a1b7ad8a 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
{
int ret;
struct device_node *root = of_find_node_by_path("/");
+ const struct of_device_id *match_id;
if (!root)
return -ENODEV;
@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
/*
* Initialize the driver just for a compliant set of machines
*/
- if (!of_match_node(compatible_machine_match, root))
+ match_id = of_match_node(compatible_machine_match, root);
+
+ of_node_put(root);
+
+ if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index c44a843cb405f0..44ebda8bbc848d 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -29,9 +29,31 @@ struct cpuidle_driver powernv_idle_driver = {
static int max_idle_state;
static struct cpuidle_state *cpuidle_state_table;
-static u64 snooze_timeout;
+static u64 default_snooze_timeout;
static bool snooze_timeout_en;
+static u64 get_snooze_timeout(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+{
+ int i;
+
+ if (unlikely(!snooze_timeout_en))
+ return default_snooze_timeout;
+
+ for (i = index + 1; i < drv->state_count; i++) {
+ struct cpuidle_state *s = &drv->states[i];
+ struct cpuidle_state_usage *su = &dev->states_usage[i];
+
+ if (s->disabled || su->disable)
+ continue;
+
+ return s->target_residency * tb_ticks_per_usec;
+ }
+
+ return default_snooze_timeout;
+}
+
static int snooze_loop(struct cpuidle_device *dev,
struct cpuidle_driver *drv,
int index)
@@ -41,7 +63,7 @@ static int snooze_loop(struct cpuidle_device *dev,
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
- snooze_exit_time = get_tb() + snooze_timeout;
+ snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
ppc64_runlatch_off();
while (!need_resched()) {
HMT_low();
@@ -286,11 +308,9 @@ static int powernv_idle_probe(void)
cpuidle_state_table = powernv_states;
/* Device tree can indicate more idle states */
max_idle_state = powernv_add_idle_states();
- if (max_idle_state > 1) {
+ default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
+ if (max_idle_state > 1)
snooze_timeout_en = true;
- snooze_timeout = powernv_states[1].target_residency *
- tb_ticks_per_usec;
- }
} else
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle-pseries.c b/drivers/cpuidle/cpuidle-pseries.c
index 07135e009d8b9c..601a6c3acc7f66 100644
--- a/drivers/cpuidle/cpuidle-pseries.c
+++ b/drivers/cpuidle/cpuidle-pseries.c
@@ -240,7 +240,13 @@ static int pseries_idle_probe(void)
return -ENODEV;
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- if (lppaca_shared_proc(get_lppaca())) {
+ /*
+ * Use local_paca instead of get_lppaca() since
+ * preemption is not disabled, and it is not required in
+ * fact, since lppaca_ptr does not need to be the value
+ * associated to the current CPU, it can be from any CPU.
+ */
+ if (lppaca_shared_proc(local_paca->lppaca_ptr)) {
cpuidle_state_table = shared_states;
max_idle_state = ARRAY_SIZE(shared_states);
} else {
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 58a630e55d5d60..78d0722feacb59 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -207,7 +207,7 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
dev->pdr_pa);
return -ENOMEM;
}
- memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
+ memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
256 * PPC4XX_NUM_PD,
&dev->shadow_sa_pool_pa,
@@ -240,13 +240,15 @@ static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
{
- if (dev->pdr != NULL)
+ if (dev->pdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_pd) * PPC4XX_NUM_PD,
dev->pdr, dev->pdr_pa);
+
if (dev->shadow_sa_pool)
dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
+
if (dev->shadow_sr_pool)
dma_free_coherent(dev->core_dev->device,
sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
@@ -416,12 +418,12 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
{
- if (dev->sdr != NULL)
+ if (dev->sdr)
dma_free_coherent(dev->core_dev->device,
sizeof(struct ce_sd) * PPC4XX_NUM_SD,
dev->sdr, dev->sdr_pa);
- if (dev->scatter_buffer_va != NULL)
+ if (dev->scatter_buffer_va)
dma_free_coherent(dev->core_dev->device,
dev->scatter_buffer_size * PPC4XX_NUM_SD,
dev->scatter_buffer_va,
@@ -1029,12 +1031,10 @@ int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
break;
}
- if (rc) {
- list_del(&alg->entry);
+ if (rc)
kfree(alg);
- } else {
+ else
list_add_tail(&alg->entry, &sec_dev->alg_list);
- }
}
return 0;
@@ -1188,7 +1188,7 @@ static int crypto4xx_probe(struct platform_device *ofdev)
rc = crypto4xx_build_gdr(core_dev->dev);
if (rc)
- goto err_build_gdr;
+ goto err_build_pdr;
rc = crypto4xx_build_sdr(core_dev->dev);
if (rc)
@@ -1230,12 +1230,11 @@ err_iomap:
err_request_irq:
irq_dispose_mapping(core_dev->irq);
tasklet_kill(&core_dev->tasklet);
- crypto4xx_destroy_sdr(core_dev->dev);
err_build_sdr:
+ crypto4xx_destroy_sdr(core_dev->dev);
crypto4xx_destroy_gdr(core_dev->dev);
-err_build_gdr:
- crypto4xx_destroy_pdr(core_dev->dev);
err_build_pdr:
+ crypto4xx_destroy_pdr(core_dev->dev);
kfree(core_dev->dev);
err_alloc_dev:
kfree(core_dev);
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index f3307fc38e7905..f2d1fea23fbfaf 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2081,6 +2081,7 @@ static void init_aead_job(struct aead_request *req,
if (unlikely(req->src != req->dst)) {
if (!edesc->dst_nents) {
dst_dma = sg_dma_address(req->dst);
+ out_options = 0;
} else {
dst_dma = edesc->sec4_sg_dma +
sec4_sg_index *
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 59ed54e464a96e..fe8cfe24c518f0 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@ struct dcp {
struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS];
- struct mutex mutex[DCP_MAX_CHANS];
+ spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
};
@@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
int ret;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret);
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -407,9 +411,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
@@ -645,13 +649,20 @@ static int dcp_chan_thread_sha(void *data)
struct ahash_request *req;
int ret, fini;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -663,12 +674,8 @@ static int dcp_chan_thread_sha(void *data)
ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini;
arq->complete(arq, ret);
- if (!fini)
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -726,9 +733,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
rctx->init = 1;
}
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
@@ -984,7 +991,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) {
- mutex_init(&sdcp->mutex[i]);
+ spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50);
}
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index da2d6777bd092f..047ef69b7e6531 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
return;
}
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
@@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
: "+S"(input), "+D"(output)
- : "d"(control_word), "b"(key), "c"(count - initial));
+ : "d"(control_word), "b"(key), "c"(count));
}
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
@@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
if (count < cbc_fetch_blocks)
return cbc_crypt(input, output, key, iv, control_word, count);
+ count -= initial;
+
if (initial)
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
@@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
: "+S" (input), "+D" (output), "+a" (iv)
- : "d" (control_word), "b" (key), "c" (count-initial));
+ : "d" (control_word), "b" (key), "c" (count));
return iv;
}
@@ -515,7 +519,7 @@ static int __init padlock_init(void)
if (!x86_match_cpu(padlock_cpu_id))
return -ENODEV;
- if (!cpu_has_xcrypt_enabled) {
+ if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) {
printk(KERN_NOTICE PFX "VIA PadLock detected, but not enabled. Hmm, strange...\n");
return -ENODEV;
}
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 4e154c9b92064b..8c5f90647b7a77 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -540,7 +540,7 @@ static int __init padlock_init(void)
struct shash_alg *sha1;
struct shash_alg *sha256;
- if (!x86_match_cpu(padlock_sha_ids) || !cpu_has_phe_enabled)
+ if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
return -ENODEV;
/* Register the newly added algorithm module if on *
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index f68c24a98277a0..dedfc96acc66b6 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1363,7 +1363,7 @@ err_sha_v4_algs:
err_sha_v3_algs:
for (j = 0; j < k; j++)
- crypto_unregister_ahash(&sha_v4_algs[j]);
+ crypto_unregister_ahash(&sha_v3_algs[j]);
err_aes_algs:
for (j = 0; j < i; j++)
@@ -1379,7 +1379,7 @@ static void sahara_unregister_algs(struct sahara_dev *dev)
for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
crypto_unregister_alg(&aes_algs[i]);
- for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
+ for (i = 0; i < ARRAY_SIZE(sha_v3_algs); i++)
crypto_unregister_ahash(&sha_v3_algs[i]);
if (dev->version > SAHARA_VERSION_3)
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 790f7cadc1ed87..efebc484e371b1 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -555,7 +555,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len,
- direction, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
@@ -579,7 +579,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len,
- direction,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK |
DMA_PREP_INTERRUPT);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index cd439849849508..bca6b701c06792 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -181,7 +181,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c
index 263af709e53604..b907e4b1bbe22e 100644
--- a/drivers/crypto/vmx/aes.c
+++ b/drivers/crypto/vmx/aes.c
@@ -53,8 +53,6 @@ static int p8_aes_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
crypto_cipher_set_flags(fallback,
crypto_cipher_get_flags((struct
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index 3f8bb9a40df126..d8ef1147b34425 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -55,8 +55,6 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
crypto_blkcipher_set_flags(
fallback,
@@ -113,24 +111,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
nbytes);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ enable_kernel_altivec();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->enc_key, walk.iv, 1);
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- pagefault_enable();
- preempt_enable();
}
return ret;
@@ -154,24 +151,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
nbytes);
} else {
- preempt_disable();
- pagefault_disable();
- enable_kernel_altivec();
- enable_kernel_vsx();
-
blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
+ preempt_disable();
+ pagefault_disable();
+ enable_kernel_vsx();
+ enable_kernel_altivec();
aes_p8_cbc_encrypt(walk.src.virt.addr,
walk.dst.virt.addr,
nbytes & AES_BLOCK_MASK,
&ctx->dec_key, walk.iv, 0);
+ pagefault_enable();
+ preempt_enable();
+
nbytes &= AES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, &walk, nbytes);
}
-
- pagefault_enable();
- preempt_enable();
}
return ret;
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index d83ab4bac8b125..7d070201b3d30f 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -53,8 +53,6 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
crypto_blkcipher_set_flags(
fallback,
diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c
index 9cb3a0b715e231..84b9389bf1edb9 100644
--- a/drivers/crypto/vmx/ghash.c
+++ b/drivers/crypto/vmx/ghash.c
@@ -64,8 +64,6 @@ static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
alg, PTR_ERR(fallback));
return PTR_ERR(fallback);
}
- printk(KERN_INFO "Using '%s' as fallback implementation.\n",
- crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
crypto_shash_set_flags(fallback,
crypto_shash_get_flags((struct crypto_shash
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 38031191bcd5e7..031c2281bea09e 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -31,7 +31,8 @@
#define MAX(a,b) ((a > b) ? a : b)
#define MIN(a,b) ((a < b) ? a : b)
-static struct class *devfreq_class;
+struct class *devfreq_class;
+EXPORT_SYMBOL_GPL(devfreq_class);
/*
* devfreq core provides delayed work based load monitoring helper
@@ -72,6 +73,21 @@ static struct devfreq *find_device_devfreq(struct device *dev)
return ERR_PTR(-ENODEV);
}
+/**
+ * dev_to_devfreq() - find devfreq struct using device pointer
+ * @dev: device pointer used to lookup device devfreq.
+ */
+struct devfreq *dev_to_devfreq(struct device *dev)
+{
+ struct devfreq *devfreq;
+
+ mutex_lock(&devfreq_list_lock);
+ devfreq = find_device_devfreq(dev);
+ mutex_unlock(&devfreq_list_lock);
+
+ return devfreq;
+}
+
static unsigned long find_available_min_freq(struct devfreq *devfreq)
{
struct dev_pm_opp *opp;
@@ -258,6 +274,7 @@ static int devfreq_notify_transition(struct devfreq *devfreq,
*/
int update_devfreq(struct devfreq *devfreq)
{
+ struct devfreq_policy *policy = &devfreq->policy;
struct devfreq_freqs freqs;
unsigned long freq, cur_freq, min_freq, max_freq;
int err = 0;
@@ -268,29 +285,30 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
}
- if (!devfreq->governor)
+ if (!policy->governor)
return -EINVAL;
+ policy->min = policy->devinfo.min_freq;
+ policy->max = policy->devinfo.max_freq;
+
+ srcu_notifier_call_chain(&devfreq->policy_notifier_list,
+ DEVFREQ_ADJUST, policy);
+
/* Reevaluate the proper frequency */
- err = devfreq->governor->get_target_freq(devfreq, &freq);
+ err = policy->governor->get_target_freq(devfreq, &freq);
if (err)
return err;
- /*
- * Adjust the frequency with user freq, QoS and available freq.
- *
- * List from the highest priority
- * max_freq
- * min_freq
- */
- max_freq = MIN(devfreq->scaling_max_freq, devfreq->max_freq);
- min_freq = MAX(devfreq->scaling_min_freq, devfreq->min_freq);
+ /* Adjust the frequency */
+
+ max_freq = MIN(policy->max, policy->user.max_freq);
+ min_freq = MAX(policy->min, policy->user.min_freq);
- if (min_freq && freq < min_freq) {
+ if (freq < min_freq) {
freq = min_freq;
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
}
- if (max_freq && freq > max_freq) {
+ if (freq > max_freq) {
freq = max_freq;
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
}
@@ -498,18 +516,19 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
void *devp)
{
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
+ struct devfreq_policy *policy = &devfreq->policy;
int ret;
mutex_lock(&devfreq->lock);
- devfreq->scaling_min_freq = find_available_min_freq(devfreq);
- if (!devfreq->scaling_min_freq) {
+ policy->devinfo.min_freq = find_available_min_freq(devfreq);
+ if (!policy->devinfo.min_freq) {
mutex_unlock(&devfreq->lock);
return -EINVAL;
}
- devfreq->scaling_max_freq = find_available_max_freq(devfreq);
- if (!devfreq->scaling_max_freq) {
+ policy->devinfo.max_freq = find_available_max_freq(devfreq);
+ if (!policy->devinfo.max_freq) {
mutex_unlock(&devfreq->lock);
return -EINVAL;
}
@@ -529,6 +548,7 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
static void devfreq_dev_release(struct device *dev)
{
struct devfreq *devfreq = to_devfreq(dev);
+ struct devfreq_policy *policy = &devfreq->policy;
mutex_lock(&devfreq_list_lock);
if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
@@ -539,9 +559,9 @@ static void devfreq_dev_release(struct device *dev)
list_del(&devfreq->node);
mutex_unlock(&devfreq_list_lock);
- if (devfreq->governor)
- devfreq->governor->event_handler(devfreq,
- DEVFREQ_GOV_STOP, NULL);
+ if (policy->governor)
+ policy->governor->event_handler(devfreq,
+ DEVFREQ_GOV_STOP, NULL);
if (devfreq->profile->exit)
devfreq->profile->exit(devfreq->dev.parent);
@@ -564,6 +584,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
void *data)
{
struct devfreq *devfreq;
+ struct devfreq_policy *policy;
struct devfreq_governor *governor;
static atomic_t devfreq_no = ATOMIC_INIT(-1);
int err = 0;
@@ -591,13 +612,14 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_out;
}
+ policy = &devfreq->policy;
mutex_init(&devfreq->lock);
mutex_lock(&devfreq->lock);
devfreq->dev.parent = dev;
devfreq->dev.class = devfreq_class;
devfreq->dev.release = devfreq_dev_release;
devfreq->profile = profile;
- strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
+ strncpy(policy->governor_name, governor_name, DEVFREQ_NAME_LEN);
devfreq->previous_freq = profile->initial_freq;
devfreq->last_status.current_frequency = profile->initial_freq;
devfreq->data = data;
@@ -615,21 +637,21 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->min_freq = find_available_min_freq(devfreq);
- if (!devfreq->min_freq) {
+ policy->devinfo.min_freq = find_available_min_freq(devfreq);
+ if (!policy->devinfo.min_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_min_freq = devfreq->min_freq;
+ policy->user.min_freq = policy->devinfo.min_freq;
- devfreq->max_freq = find_available_max_freq(devfreq);
- if (!devfreq->max_freq) {
+ policy->devinfo.max_freq = find_available_max_freq(devfreq);
+ if (!policy->devinfo.max_freq) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->scaling_max_freq = devfreq->max_freq;
+ policy->user.max_freq = policy->devinfo.max_freq;
dev_set_name(&devfreq->dev, "devfreq%d",
atomic_inc_return(&devfreq_no));
@@ -650,13 +672,14 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->last_stat_updated = jiffies;
srcu_init_notifier_head(&devfreq->transition_notifier_list);
+ srcu_init_notifier_head(&devfreq->policy_notifier_list);
mutex_unlock(&devfreq->lock);
mutex_lock(&devfreq_list_lock);
list_add(&devfreq->node, &devfreq_list);
- governor = find_devfreq_governor(devfreq->governor_name);
+ governor = find_devfreq_governor(policy->governor_name);
if (IS_ERR(governor)) {
dev_err(dev, "%s: Unable to find governor for the device\n",
__func__);
@@ -664,9 +687,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_init;
}
- devfreq->governor = governor;
- err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
- NULL);
+ policy->governor = governor;
+ err = policy->governor->event_handler(devfreq, DEVFREQ_GOV_START,
+ NULL);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
@@ -827,10 +850,10 @@ int devfreq_suspend_device(struct devfreq *devfreq)
if (!devfreq)
return -EINVAL;
- if (!devfreq->governor)
+ if (!devfreq->policy.governor)
return 0;
- return devfreq->governor->event_handler(devfreq,
+ return devfreq->policy.governor->event_handler(devfreq,
DEVFREQ_GOV_SUSPEND, NULL);
}
EXPORT_SYMBOL(devfreq_suspend_device);
@@ -848,10 +871,10 @@ int devfreq_resume_device(struct devfreq *devfreq)
if (!devfreq)
return -EINVAL;
- if (!devfreq->governor)
+ if (!devfreq->policy.governor)
return 0;
- return devfreq->governor->event_handler(devfreq,
+ return devfreq->policy.governor->event_handler(devfreq,
DEVFREQ_GOV_RESUME, NULL);
}
EXPORT_SYMBOL(devfreq_resume_device);
@@ -885,30 +908,31 @@ int devfreq_add_governor(struct devfreq_governor *governor)
list_for_each_entry(devfreq, &devfreq_list, node) {
int ret = 0;
struct device *dev = devfreq->dev.parent;
+ struct devfreq_policy *policy = &devfreq->policy;
- if (!strncmp(devfreq->governor_name, governor->name,
+ if (!strncmp(policy->governor_name, governor->name,
DEVFREQ_NAME_LEN)) {
/* The following should never occur */
- if (devfreq->governor) {
+ if (policy->governor) {
dev_warn(dev,
"%s: Governor %s already present\n",
- __func__, devfreq->governor->name);
- ret = devfreq->governor->event_handler(devfreq,
+ __func__, policy->governor->name);
+ ret = policy->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
dev_warn(dev,
"%s: Governor %s stop = %d\n",
__func__,
- devfreq->governor->name, ret);
+ policy->governor->name, ret);
}
/* Fall through */
}
- devfreq->governor = governor;
- ret = devfreq->governor->event_handler(devfreq,
+ policy->governor = governor;
+ ret = policy->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s start=%d\n",
- __func__, devfreq->governor->name,
+ __func__, policy->governor->name,
ret);
}
}
@@ -947,24 +971,25 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
list_for_each_entry(devfreq, &devfreq_list, node) {
int ret;
struct device *dev = devfreq->dev.parent;
+ struct devfreq_policy *policy = &devfreq->policy;
- if (!strncmp(devfreq->governor_name, governor->name,
+ if (!strncmp(policy->governor_name, governor->name,
DEVFREQ_NAME_LEN)) {
/* we should have a devfreq governor! */
- if (!devfreq->governor) {
+ if (!policy->governor) {
dev_warn(dev, "%s: Governor %s NOT present\n",
__func__, governor->name);
continue;
/* Fall through */
}
- ret = devfreq->governor->event_handler(devfreq,
+ ret = policy->governor->event_handler(devfreq,
DEVFREQ_GOV_STOP, NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s stop=%d\n",
- __func__, devfreq->governor->name,
+ __func__, policy->governor->name,
ret);
}
- devfreq->governor = NULL;
+ policy->governor = NULL;
}
}
@@ -979,16 +1004,17 @@ EXPORT_SYMBOL(devfreq_remove_governor);
static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!to_devfreq(dev)->governor)
+ if (!to_devfreq(dev)->policy.governor)
return -EINVAL;
- return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
+ return sprintf(buf, "%s\n", to_devfreq(dev)->policy.governor->name);
}
static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
+ struct devfreq_policy *policy = &df->policy;
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
struct devfreq_governor *governor;
@@ -1003,29 +1029,30 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
ret = PTR_ERR(governor);
goto out;
}
- if (df->governor == governor) {
+ if (policy->governor == governor) {
ret = 0;
goto out;
- } else if ((df->governor && df->governor->immutable) ||
+ } else if ((policy->governor && policy->governor->immutable) ||
governor->immutable) {
ret = -EINVAL;
goto out;
}
- if (df->governor) {
- ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+ if (policy->governor) {
+ ret = policy->governor->event_handler(df, DEVFREQ_GOV_STOP,
+ NULL);
if (ret) {
dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
- __func__, df->governor->name, ret);
+ __func__, policy->governor->name, ret);
goto out;
}
}
- df->governor = governor;
- strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
- ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
+ policy->governor = governor;
+ strncpy(policy->governor_name, governor->name, DEVFREQ_NAME_LEN);
+ ret = policy->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
if (ret)
dev_warn(dev, "%s: Governor %s not started(%d)\n",
- __func__, df->governor->name, ret);
+ __func__, policy->governor->name, ret);
out:
mutex_unlock(&devfreq_list_lock);
@@ -1040,6 +1067,7 @@ static ssize_t available_governors_show(struct device *d,
char *buf)
{
struct devfreq *df = to_devfreq(d);
+ struct devfreq_policy *policy = &df->policy;
ssize_t count = 0;
mutex_lock(&devfreq_list_lock);
@@ -1048,9 +1076,9 @@ static ssize_t available_governors_show(struct device *d,
* The devfreq with immutable governor (e.g., passive) shows
* only own governor.
*/
- if (df->governor->immutable) {
+ if (policy->governor->immutable) {
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
- "%s ", df->governor_name);
+ "%s ", policy->governor_name);
/*
* The devfreq device shows the registered governor except for
* immutable governors such as passive governor .
@@ -1110,17 +1138,18 @@ static ssize_t polling_interval_store(struct device *dev,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
+ struct devfreq_policy *policy = &df->policy;
unsigned int value;
int ret;
- if (!df->governor)
+ if (!policy->governor)
return -EINVAL;
ret = sscanf(buf, "%u", &value);
if (ret != 1)
return -EINVAL;
- df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
+ policy->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
ret = count;
return ret;
@@ -1132,21 +1161,30 @@ static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
{
struct devfreq *df = to_devfreq(dev);
unsigned long value;
+ unsigned long *freq_table;
int ret;
- unsigned long max;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
- max = df->max_freq;
- if (value && max && value > max) {
- ret = -EINVAL;
- goto unlock;
+
+ if (value) {
+ if (value > df->policy.user.max_freq) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ } else {
+ freq_table = df->profile->freq_table;
+ /* typical order is ascending, some drivers use descending */
+ if (freq_table[0] < freq_table[df->profile->max_state - 1])
+ value = freq_table[0];
+ else
+ value = freq_table[df->profile->max_state - 1];
}
- df->min_freq = value;
+ df->policy.user.min_freq = value;
update_devfreq(df);
ret = count;
unlock:
@@ -1157,9 +1195,10 @@ unlock:
static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct devfreq *df = to_devfreq(dev);
+ struct devfreq_policy *policy = &to_devfreq(dev)->policy;
- return sprintf(buf, "%lu\n", MAX(df->scaling_min_freq, df->min_freq));
+ return sprintf(buf, "%lu\n",
+ MAX(policy->devinfo.min_freq, policy->user.min_freq));
}
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
@@ -1167,21 +1206,30 @@ static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
{
struct devfreq *df = to_devfreq(dev);
unsigned long value;
+ unsigned long *freq_table;
int ret;
- unsigned long min;
ret = sscanf(buf, "%lu", &value);
if (ret != 1)
return -EINVAL;
mutex_lock(&df->lock);
- min = df->min_freq;
- if (value && min && value < min) {
- ret = -EINVAL;
- goto unlock;
+
+ if (value) {
+ if (value < df->policy.user.min_freq) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ } else {
+ freq_table = df->profile->freq_table;
+ /* typical order is ascending, some drivers use descending */
+ if (freq_table[0] < freq_table[df->profile->max_state - 1])
+ value = freq_table[df->profile->max_state - 1];
+ else
+ value = freq_table[0];
}
- df->max_freq = value;
+ df->policy.user.max_freq = value;
update_devfreq(df);
ret = count;
unlock:
@@ -1193,9 +1241,10 @@ static DEVICE_ATTR_RW(min_freq);
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct devfreq *df = to_devfreq(dev);
+ struct devfreq_policy *policy = &to_devfreq(dev)->policy;
- return sprintf(buf, "%lu\n", MIN(df->scaling_max_freq, df->max_freq));
+ return sprintf(buf, "%lu\n",
+ MIN(policy->devinfo.max_freq, policy->user.max_freq));
}
static DEVICE_ATTR_RW(max_freq);
@@ -1461,7 +1510,7 @@ EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
* devfreq_register_notifier() - Register a driver with devfreq
* @devfreq: The devfreq object.
* @nb: The notifier block to register.
- * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
*/
int devfreq_register_notifier(struct devfreq *devfreq,
struct notifier_block *nb,
@@ -1477,6 +1526,10 @@ int devfreq_register_notifier(struct devfreq *devfreq,
ret = srcu_notifier_chain_register(
&devfreq->transition_notifier_list, nb);
break;
+ case DEVFREQ_POLICY_NOTIFIER:
+ ret = srcu_notifier_chain_register(
+ &devfreq->policy_notifier_list, nb);
+ break;
default:
ret = -EINVAL;
}
@@ -1489,7 +1542,7 @@ EXPORT_SYMBOL(devfreq_register_notifier);
* devfreq_unregister_notifier() - Unregister a driver with devfreq
* @devfreq: The devfreq object.
* @nb: The notifier block to be unregistered.
- * @list: DEVFREQ_TRANSITION_NOTIFIER.
+ * @list: DEVFREQ_TRANSITION_NOTIFIER or DEVFREQ_POLICY_NOTIFIER.
*/
int devfreq_unregister_notifier(struct devfreq *devfreq,
struct notifier_block *nb,
@@ -1505,6 +1558,11 @@ int devfreq_unregister_notifier(struct devfreq *devfreq,
ret = srcu_notifier_chain_unregister(
&devfreq->transition_notifier_list, nb);
break;
+ case DEVFREQ_POLICY_NOTIFIER:
+ ret = srcu_notifier_chain_unregister(
+ &devfreq->policy_notifier_list, nb);
+ break;
+
default:
ret = -EINVAL;
}
diff --git a/drivers/devfreq/event/rockchip-dfi.c b/drivers/devfreq/event/rockchip-dfi.c
index 309a5976afaa99..8d5cefc99822c6 100644
--- a/drivers/devfreq/event/rockchip-dfi.c
+++ b/drivers/devfreq/event/rockchip-dfi.c
@@ -103,10 +103,9 @@ struct rockchip_dfi {
struct regmap *regmap_pmu;
struct clk *clk;
struct devfreq *devfreq;
+ struct mutex devfreq_lock;
unsigned int top;
unsigned int floor;
- bool enabled;
- struct mutex lock;
};
static unsigned int rockchip_dfi_calc_threshold_num(unsigned long rate,
@@ -239,14 +238,9 @@ static int rockchip_dfi_disable(struct devfreq_event_dev *edev)
{
struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- mutex_lock(&info->lock);
-
- info->enabled = false;
rockchip_dfi_stop_hardware_counter(edev);
clk_disable_unprepare(info->clk);
- mutex_unlock(&info->lock);
-
return 0;
}
@@ -255,8 +249,6 @@ static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
int ret;
- mutex_lock(&info->lock);
-
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(&edev->dev, "failed to enable dfi clk: %d\n", ret);
@@ -264,9 +256,6 @@ static int rockchip_dfi_enable(struct devfreq_event_dev *edev)
}
rockchip_dfi_start_hardware_counter(edev);
- info->enabled = true;
-
- mutex_unlock(&info->lock);
return 0;
}
@@ -296,9 +285,9 @@ static irqreturn_t ddrmon_thread_isr(int irq, void *data)
struct rockchip_dfi *info = data;
struct devfreq *devfreq;
- mutex_lock(&info->lock);
+ mutex_lock(&info->devfreq_lock);
- if (!info->enabled || !info->devfreq)
+ if (!info->devfreq)
goto out;
devfreq = info->devfreq;
@@ -307,7 +296,7 @@ static irqreturn_t ddrmon_thread_isr(int irq, void *data)
mutex_unlock(&devfreq->lock);
out:
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->devfreq_lock);
return IRQ_HANDLED;
}
@@ -335,9 +324,9 @@ void rockchip_dfi_set_devfreq(struct devfreq_event_dev *edev,
{
struct rockchip_dfi *info = devfreq_event_get_drvdata(edev);
- mutex_lock(&info->lock);
+ mutex_lock(&info->devfreq_lock);
info->devfreq = devfreq;
- mutex_unlock(&info->lock);
+ mutex_unlock(&info->devfreq_lock);
}
EXPORT_SYMBOL_GPL(rockchip_dfi_set_devfreq);
@@ -385,7 +374,7 @@ static int rockchip_dfi_probe(struct platform_device *pdev)
return PTR_ERR(data->regmap_pmu);
}
data->dev = dev;
- mutex_init(&data->lock);
+ mutex_init(&data->devfreq_lock);
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index 002ff301af455a..d1acbacf5b68db 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -25,6 +25,9 @@
#define DEVFREQ_GOV_SUSPEND 0x4
#define DEVFREQ_GOV_RESUME 0x5
+#define DEVFREQ_MIN_FREQ 0
+#define DEVFREQ_MAX_FREQ ULONG_MAX
+
/**
* struct devfreq_governor - Devfreq policy governor
* @node: list node - contains registered devfreq governors
@@ -54,9 +57,6 @@ struct devfreq_governor {
unsigned int event, void *data);
};
-/* Caution: devfreq->lock must be locked before calling update_devfreq */
-extern int update_devfreq(struct devfreq *devfreq);
-
extern void devfreq_monitor_start(struct devfreq *devfreq);
extern void devfreq_monitor_stop(struct devfreq *devfreq);
extern void devfreq_monitor_suspend(struct devfreq *devfreq);
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c72f942f30a85a..94a91f4c994be0 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -20,10 +20,7 @@ static int devfreq_performance_func(struct devfreq *df,
* target callback should be able to get floor value as
* said in devfreq.h
*/
- if (!df->max_freq)
- *freq = UINT_MAX;
- else
- *freq = df->max_freq;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c6bed567e6d4a..4395634da31927 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -20,7 +20,7 @@ static int devfreq_powersave_func(struct devfreq *df,
* target callback should be able to get ceiling value as
* said in devfreq.h
*/
- *freq = df->min_freq;
+ *freq = DEVFREQ_MIN_FREQ;
return 0;
}
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index b82f08968b2514..5833ddcc6aae00 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -27,7 +27,6 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
- unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
/*
* if devfreq in suspend status and have suspend_freq,
@@ -56,7 +55,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
/* Assume MAX if it is going to be divided by zero */
if (stat->total_time == 0) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
@@ -69,13 +68,13 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
/* Set MAX if it's busy enough */
if (stat->busy_time * 100 >
stat->total_time * dfso_upthreshold) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
/* Set MAX if we do not know the initial frequency */
if (stat->current_frequency == 0) {
- *freq = max;
+ *freq = DEVFREQ_MAX_FREQ;
return 0;
}
@@ -94,11 +93,6 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
*freq = (unsigned long) b;
- if (df->min_freq && *freq < df->min_freq)
- *freq = df->min_freq;
- if (df->max_freq && *freq > df->max_freq)
- *freq = df->max_freq;
-
return 0;
}
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 35de6e83c1febe..b90134f15fd0ab 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -26,19 +26,11 @@ static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
{
struct userspace_data *data = df->data;
- if (data->valid) {
- unsigned long adjusted_freq = data->user_frequency;
-
- if (df->max_freq && adjusted_freq > df->max_freq)
- adjusted_freq = df->max_freq;
-
- if (df->min_freq && adjusted_freq < df->min_freq)
- adjusted_freq = df->min_freq;
-
- *freq = adjusted_freq;
- } else {
+ if (data->valid)
+ *freq = data->user_frequency;
+ else
*freq = df->previous_freq; /* No user freq specified yet */
- }
+
return 0;
}
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930fd8..64a2e02b87d78c 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -688,9 +688,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq <= 0) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- return -ENODEV;
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
}
platform_set_drvdata(pdev, tegra);
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index be26f625bb3ee7..941ace0521306c 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1781,6 +1781,12 @@ static void atc_free_chan_resources(struct dma_chan *chan)
atchan->descs_allocated = 0;
atchan->status = 0;
+ /*
+ * Free atslave allocated in at_dma_xlate()
+ */
+ kfree(chan->private);
+ chan->private = NULL;
+
dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
}
@@ -1815,7 +1821,7 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL);
+ atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
if (!atslave)
return NULL;
@@ -2146,6 +2152,8 @@ static int at_dma_remove(struct platform_device *pdev)
struct resource *io;
at_dma_off(atdma);
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&atdma->dma_common);
dma_pool_destroy(atdma->memset_pool);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 82a7c89caae2aa..af24c5bf32d69e 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@ struct at_xdmac_chan {
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
+ u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@@ -1582,8 +1583,8 @@ static void at_xdmac_tasklet(unsigned long data)
struct at_xdmac_desc *desc;
u32 error_mask;
- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
- __func__, atchan->status);
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+ __func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@@ -1591,15 +1592,15 @@ static void at_xdmac_tasklet(unsigned long data)
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
- || (atchan->status & error_mask)) {
+ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+ || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
- if (atchan->status & AT_XDMAC_CIS_RBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_WBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_ROIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
@@ -1654,7 +1655,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
- atchan->status = chan_status & chan_imr;
+ atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@@ -1668,7 +1669,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index dade7c47ff1845..8344b7c91fe356 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -750,6 +750,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
struct resource *res;
int i, ret;
+ if (!dev->of_node) {
+ dev_err(dev, "This driver must be probed from devicetree\n");
+ return -EINVAL;
+ }
+
jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 6796eb1a8a4cbd..884aecebb249b5 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -563,11 +563,9 @@ static int dmatest_func(void *data)
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->to_cnt++;
}
@@ -582,11 +580,9 @@ static int dmatest_func(void *data)
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->bidi_cnt++;
}
@@ -611,12 +607,10 @@ static int dmatest_func(void *data)
}
if (!tx) {
- dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
done->done = false;
@@ -625,12 +619,10 @@ static int dmatest_func(void *data)
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@@ -643,16 +635,14 @@ static int dmatest_func(void *data)
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
dmaengine_unmap_put(um);
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
dmaengine_unmap_put(um);
@@ -691,6 +681,12 @@ static int dmatest_func(void *data)
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
+
+ continue;
+
+error_unmap_continue:
+ dmaengine_unmap_put(um);
+ failed_tests++;
}
runtime = ktime_us_delta(ktime_get(), ktime);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 48d85f8b95fe1e..dfa337ae06fcce 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -619,7 +619,7 @@ static void imxdma_tasklet(unsigned long data)
{
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
- struct imxdma_desc *desc;
+ struct imxdma_desc *desc, *next_desc;
unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags);
@@ -649,10 +649,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) {
- desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
- node);
+ next_desc = list_first_entry(&imxdmac->ld_queue,
+ struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
- if (imxdma_xfer_desc(desc) < 0)
+ if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel);
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index ac8c28968422e4..106fa9b327d926 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -1210,8 +1210,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
+ /*
+ * Synchronization rule for del_timer_sync():
+ * - The caller must not hold locks which would prevent
+ * completion of the timer's handler.
+ * So prep_lock cannot be held before calling it.
+ */
+ del_timer_sync(&ioat_chan->timer);
+
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 1ba2fd73852d27..0f0c06ab414b21 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -660,7 +660,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct k3_dma_dev *d = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 228e14f957ddee..8a3ddf0dd89fc9 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2169,13 +2169,14 @@ static int pl330_terminate_all(struct dma_chan *chan)
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
+
spin_lock(&pl330->lock);
_stop(pch->thread);
- spin_unlock(&pl330->lock);
-
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ spin_unlock(&pl330->lock);
+
power_down = pch->active;
pch->active = false;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 55f5d33f6dc70c..4251e9ac0373c6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1321,7 +1321,7 @@ static int pxad_init_phys(struct platform_device *op,
return 0;
}
-static const struct of_device_id const pxad_dt_ids[] = {
+static const struct of_device_id pxad_dt_ids[] = {
{ .compatible = "marvell,pdma-1.0", },
{}
};
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 56410ea75ac50e..cc8fc601ed477b 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -448,7 +448,7 @@ usb_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
{
struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
- struct usb_dmac_desc *desc;
+ struct usb_dmac_desc *desc, *_desc;
unsigned long flags;
LIST_HEAD(head);
LIST_HEAD(list);
@@ -459,7 +459,7 @@ static int usb_dmac_chan_terminate_all(struct dma_chan *chan)
if (uchan->desc)
uchan->desc = NULL;
list_splice_init(&uchan->desc_got, &list);
- list_for_each_entry(desc, &list, node)
+ list_for_each_entry_safe(desc, _desc, &list, node)
list_move_tail(&desc->node, &uchan->desc_freed);
spin_unlock_irqrestore(&uchan->vc.lock, flags);
vchan_dma_desc_free_list(&uchan->vc, &head);
@@ -700,6 +700,8 @@ static int usb_dmac_runtime_resume(struct device *dev)
#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
NULL)
};
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 792bdae2b91dfc..d14c8ffea9108a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1187,15 +1187,14 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1209,9 +1208,18 @@ static int i7core_create_sysfs_devices(struct mem_ctl_info *mci)
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1221,11 +1229,11 @@ static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 2b2fecffb1ad64..c6a7c9ddf0ac82 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -192,6 +192,9 @@ static int usb_extcon_resume(struct device *dev)
}
enable_irq(info->id_irq);
+ if (!device_may_wakeup(dev))
+ queue_delayed_work(system_power_efficient_wq,
+ &info->wq_detcable, 0);
return ret;
}
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index be75740caf724f..dacb0b9297efa1 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -8,7 +8,7 @@ cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
cflags-$(CONFIG_X86) += -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 \
-fPIC -fno-strict-aliasing -mno-red-zone \
- -mno-mmx -mno-sse -DDISABLE_BRANCH_PROFILING
+ -mno-mmx -mno-sse
cflags-$(CONFIG_ARM64) := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie
cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
@@ -16,7 +16,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \
cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt
-KBUILD_CFLAGS := $(cflags-y) \
+KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-ffreestanding) \
$(call cc-option,-fno-stack-protector)
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 72791232e46ba4..437c8ef90643b7 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -513,6 +513,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_CHAP_TYPE:
rc = S_IRUGO;
+ break;
case ISCSI_BOOT_TGT_NAME:
if (tgt->tgt_name_len)
rc = S_IRUGO;
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index b34a62a5a7e128..47de63ae20daa8 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -137,8 +137,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
if (err < 0)
goto out;
- if (err & BIT(pos))
- err = -EACCES;
+ if (value & BIT(pos)) {
+ err = -EPERM;
+ goto out;
+ }
err = 0;
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 984186ee58a099..f5f7b5368da6fb 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@ struct adp5588_gpio {
uint8_t int_en[3];
uint8_t irq_mask[3];
uint8_t irq_stat[3];
+ uint8_t int_input_en[3];
+ uint8_t int_lvl_cached[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -177,12 +179,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (dev->int_input_en[i]) {
+ mutex_lock(&dev->lock);
+ dev->dir[i] &= ~dev->int_input_en[i];
+ dev->int_input_en[i] = 0;
+ adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+ dev->dir[i]);
+ mutex_unlock(&dev->lock);
+ }
+
+ if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+ dev->int_lvl_cached[i] = dev->int_lvl[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+ dev->int_lvl[i]);
+ }
+
if (dev->int_en[i] ^ dev->irq_mask[i]) {
dev->int_en[i] = dev->irq_mask[i];
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
dev->int_en[i]);
}
+ }
mutex_unlock(&dev->irq_lock);
}
@@ -225,9 +243,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
else
return -EINVAL;
- adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
- adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
- dev->int_lvl[bank]);
+ dev->int_input_en[bank] |= bit;
return 0;
}
diff --git a/drivers/gpio/gpio-max7301.c b/drivers/gpio/gpio-max7301.c
index 05813fbf3daf25..647dfbbc4e1cf4 100644
--- a/drivers/gpio/gpio-max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -25,7 +25,7 @@ static int max7301_spi_write(struct device *dev, unsigned int reg,
struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
- return spi_write(spi, (const u8 *)&word, sizeof(word));
+ return spi_write_then_read(spi, &word, sizeof(word), NULL, 0);
}
/* A read from the MAX7301 means two transfers; here, one message each */
@@ -37,14 +37,8 @@ static int max7301_spi_read(struct device *dev, unsigned int reg)
struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8);
- ret = spi_write(spi, (const u8 *)&word, sizeof(word));
- if (ret)
- return ret;
- /*
- * This relies on the fact, that a transfer with NULL tx_buf shifts out
- * zero bytes (=NOOP for MAX7301)
- */
- ret = spi_read(spi, (u8 *)&word, sizeof(word));
+ ret = spi_write_then_read(spi, &word, sizeof(word), &word,
+ sizeof(word));
if (ret)
return ret;
return word & 0xff;
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 5536108aa9db5c..fe21734bbe5c4d 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -495,9 +495,10 @@ err_irq_alloc_descs:
chip = chip_save;
err_gpiochip_add:
+ chip = chip_save;
while (--i >= 0) {
- chip--;
gpiochip_remove(&chip->gpio);
+ chip++;
}
kfree(chip_save);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index fe9ef2bc981a98..6ae34279594f14 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -266,8 +266,8 @@ static int platform_msic_gpio_probe(struct platform_device *pdev)
int i;
if (irq < 0) {
- dev_err(dev, "no IRQ line\n");
- return -EINVAL;
+ dev_err(dev, "no IRQ line: %d\n", irq);
+ return irq;
}
if (!pdata || !pdata->gpio_base) {
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 896bf29776b093..fb2c1df4f58853 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -591,4 +591,4 @@ static int __init tegra_gpio_init(void)
{
return platform_driver_register(&tegra_gpio_driver);
}
-postcore_initcall(tegra_gpio_init);
+subsys_initcall(tegra_gpio_init);
diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c
index fa2662f8b02685..aff6e504c6668f 100644
--- a/drivers/gpio/gpio-tps68470.c
+++ b/drivers/gpio/gpio-tps68470.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO driver for TPS68470 PMIC
*
@@ -8,15 +9,6 @@
* Tianshu Qiu <tian.shu.qiu@intel.com>
* Jian Xu Zheng <jian.xu.zheng@intel.com>
* Yuning Pu <yuning.pu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/gpio/driver.h>
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 9031e60c815c23..6af57f210fa3a3 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -227,6 +227,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
+ int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -265,6 +266,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ /* Mask all GPIO interrupts */
+ for (i = 0; i < gc->ngpio; i++)
+ vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index e3cf40056f2d7b..65ea3f9a8fba55 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2208,6 +2208,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
struct gpio_desc *desc = NULL;
int status;
enum gpio_lookup_flags lookupflags = 0;
+ /* Maybe we have a device name, maybe not */
+ const char *devname = dev ? dev_name(dev) : "?";
dev_dbg(dev, "GPIO lookup for consumer %s\n", con_id);
@@ -2236,8 +2238,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
return desc;
}
- /* If a connection label was passed use that, else use the device name as label */
- status = gpiod_request(desc, con_id ? con_id : dev_name(dev));
+ /*
+ * If a connection label was passed use that, else attempt to use
+ * the device name as label
+ */
+ status = gpiod_request(desc, con_id ? con_id : devname);
if (status < 0)
return ERR_PTR(status);
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 98ab08c0aa2d2d..07541c5670e6f6 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -30,7 +30,7 @@ struct acpi_gpio_info {
};
/* gpio suffixes used for ACPI and device tree lookup */
-static const char * const gpio_suffixes[] = { "gpios", "gpio" };
+static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
#ifdef CONFIG_ACPI
void acpi_gpiochip_add(struct gpio_chip *chip);
diff --git a/drivers/gpu/arm/midgard/Kbuild b/drivers/gpu/arm/midgard/Kbuild
index 8e3d44cf35d7ea..454756f5dcda95 100644
--- a/drivers/gpu/arm/midgard/Kbuild
+++ b/drivers/gpu/arm/midgard/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2012-2016, 2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -21,47 +21,31 @@
# Driver version string which is returned to userspace via an ioctl
-MALI_RELEASE_NAME ?= "r22p0-01rel0"
+MALI_RELEASE_NAME ?= "r26p0-01rel0"
# Paths required for build
KBASE_PATH = $(src)
KBASE_PLATFORM_PATH = $(KBASE_PATH)/platform_dummy
UMP_PATH = $(src)/../../../base
-ifeq ($(CONFIG_MALI_ERROR_INJECT),y)
-MALI_ERROR_INJECT_ON = 1
-endif
-
# Set up defaults if not defined by build system
MALI_CUSTOMER_RELEASE ?= 1
+MALI_USE_CSF ?= 0
MALI_UNIT_TEST ?= 0
MALI_KERNEL_TEST_API ?= 0
-MALI_ERROR_INJECT_ON ?= 0
MALI_MOCK_TEST ?= 0
MALI_COVERAGE ?= 0
CONFIG_MALI_PLATFORM_NAME ?= "devicetree"
-# This workaround is for what seems to be a compiler bug we observed in
-# GCC 4.7 on AOSP 4.3. The bug caused an intermittent failure compiling
-# the "_Pragma" syntax, where an error message is returned:
-#
-# "internal compiler error: unspellable token PRAGMA"
-#
-# This regression has thus far only been seen on the GCC 4.7 compiler bundled
-# with AOSP 4.3.0. So this makefile, intended for in-tree kernel builds
-# which are not known to be used with AOSP, is hardcoded to disable the
-# workaround, i.e. set the define to 0.
-MALI_GCC_WORKAROUND_MIDCOM_4598 ?= 0
# Set up our defines, which will be passed to gcc
DEFINES = \
-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+ -DMALI_USE_CSF=$(MALI_USE_CSF) \
-DMALI_KERNEL_TEST_API=$(MALI_KERNEL_TEST_API) \
-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
- -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
-DMALI_MOCK_TEST=$(MALI_MOCK_TEST) \
-DMALI_COVERAGE=$(MALI_COVERAGE) \
- -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\" \
- -DMALI_GCC_WORKAROUND_MIDCOM_4598=$(MALI_GCC_WORKAROUND_MIDCOM_4598)
+ -DMALI_RELEASE_NAME=\"$(MALI_RELEASE_NAME)\"
ifeq ($(KBUILD_EXTMOD),)
# in-tree
@@ -73,6 +57,8 @@ endif
DEFINES += -I$(srctree)/drivers/staging/android
+DEFINES += -DMALI_KBASE_BUILD
+
# Use our defines when compiling
ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
subdir-ccflags-y += $(DEFINES) -I$(KBASE_PATH) -I$(KBASE_PLATFORM_PATH) -I$(OSK_PATH) -I$(UMP_PATH) -I$(srctree)/include/linux
@@ -99,7 +85,6 @@ SRC := \
mali_kbase_hw.c \
mali_kbase_utility.c \
mali_kbase_debug.c \
- mali_kbase_trace_timeline.c \
mali_kbase_gpu_memory_debugfs.c \
mali_kbase_mem_linux.c \
mali_kbase_core_linux.c \
@@ -154,6 +139,10 @@ ifeq ($(CONFIG_MALI_DEVFREQ),y)
endif
endif
+ifeq ($(MALI_USE_CSF),1)
+ include $(src)/csf/Kbuild
+endif
+
mali_kbase-$(CONFIG_MALI_DMA_FENCE) += \
mali_kbase_dma_fence.o \
mali_kbase_fence.o
diff --git a/drivers/gpu/arm/midgard/Kconfig b/drivers/gpu/arm/midgard/Kconfig
index 281b93402a78e6..d6bdb9f6c6b0e0 100644
--- a/drivers/gpu/arm/midgard/Kconfig
+++ b/drivers/gpu/arm/midgard/Kconfig
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -45,6 +45,7 @@ config MALI_GATOR_SUPPORT
config MALI_MIDGARD_DVFS
bool "Enable legacy DVFS"
depends on MALI_MIDGARD && !MALI_DEVFREQ
+ depends on BROKEN
default n
help
Choose this option to enable legacy DVFS in the Mali Midgard DDK.
@@ -52,6 +53,7 @@ config MALI_MIDGARD_DVFS
config MALI_MIDGARD_ENABLE_TRACE
bool "Enable kbase tracing"
depends on MALI_MIDGARD
+ depends on BROKEN
default n
help
Enables tracing in kbase. Trace log available through
@@ -60,6 +62,7 @@ config MALI_MIDGARD_ENABLE_TRACE
config MALI_DEVFREQ
bool "devfreq support for Mali"
depends on MALI_MIDGARD && PM_DEVFREQ
+ default y
help
Support devfreq for Mali.
@@ -109,22 +112,10 @@ config MALI_CORESTACK
If unsure, say N.
-config MALI_PRFCNT_SET_SECONDARY
- bool "Use secondary set of performance counters"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
- help
- Select this option to use secondary set of performance counters. Kernel
- features that depend on an access to the primary set of counters may
- become unavailable. Enabling this option will prevent power management
- from working optimally and may cause instrumentation tools to return
- bogus results.
-
- If unsure, say N.
-
config MALI_DEBUG
bool "Debug build"
depends on MALI_MIDGARD && MALI_EXPERT
+ depends on BROKEN
default n
help
Select this option for increased checking and reporting of errors.
@@ -166,13 +157,6 @@ config MALI_ERROR_INJECT
help
Enables insertion of errors to test module failure and recovery mechanisms.
-config MALI_TRACE_TIMELINE
- bool "Timeline tracing"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
- help
- Enables timeline tracing through the kernel tracepoint system.
-
config MALI_SYSTEM_TRACE
bool "Enable system event tracing support"
depends on MALI_MIDGARD && MALI_EXPERT
@@ -180,17 +164,7 @@ config MALI_SYSTEM_TRACE
default n
help
Choose this option to enable system trace events for each
- kbase event. This is typically used for debugging but has
- minimal overhead when not in use. Enable only if you know what
- you are doing.
-
-config MALI_JOB_DUMPING
- bool "Enable system level support needed for job dumping"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
- help
- Choose this option to enable system level support needed for
- job dumping. This is typically used for instrumentation but has
+ kbase event. This is typically used for debugging but has
minimal overhead when not in use. Enable only if you know what
you are doing.
@@ -218,4 +192,30 @@ config MALI_PWRSOFT_765
If using kernel >= v4.10 then say N, otherwise if devfreq cooling
changes have been backported say Y to avoid compilation errors.
+# Instrumentation options.
+
+config MALI_JOB_DUMP
+ bool "Enable system level support needed for job dumping"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Choose this option to enable system level support needed for
+ job dumping. This is typically used for instrumentation but has
+ minimal overhead when not in use. Enable only if you know what
+ you are doing.
+
+config MALI_PRFCNT_SET_SECONDARY
+ bool "Use secondary set of performance counters"
+ depends on MALI_MIDGARD && MALI_EXPERT
+ default n
+ help
+ Select this option to use secondary set of performance counters. Kernel
+ features that depend on an access to the primary set of counters may
+ become unavailable. Enabling this option will prevent power management
+ from working optimally and may cause instrumentation tools to return
+ bogus results.
+
+ If unsure, say N.
+
source "drivers/gpu/arm/midgard/platform/Kconfig"
+source "drivers/gpu/arm/midgard/tests/Kconfig"
diff --git a/drivers/gpu/arm/midgard/Makefile b/drivers/gpu/arm/midgard/Makefile
index cfe6fc34ad2918..13af9f473890c5 100644
--- a/drivers/gpu/arm/midgard/Makefile
+++ b/drivers/gpu/arm/midgard/Makefile
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2010-2016, 2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -23,9 +23,7 @@
KDIR ?= /lib/modules/$(shell uname -r)/build
BUSLOG_PATH_RELATIVE = $(CURDIR)/../../../..
-UMP_PATH_RELATIVE = $(CURDIR)/../../../base/ump
KBASE_PATH_RELATIVE = $(CURDIR)
-EXTRA_SYMBOLS = $(UMP_PATH_RELATIVE)/src/Module.symvers
ifeq ($(MALI_UNIT_TEST), 1)
EXTRA_SYMBOLS += $(KBASE_PATH_RELATIVE)/tests/internal/src/kernel_assert_module/linux/Module.symvers
diff --git a/drivers/gpu/arm/midgard/Mconfig b/drivers/gpu/arm/midgard/Mconfig
index 9cfa368b1e2bda..583dec320d8bce 100644
--- a/drivers/gpu/arm/midgard/Mconfig
+++ b/drivers/gpu/arm/midgard/Mconfig
@@ -24,7 +24,7 @@ menuconfig MALI_MIDGARD
config MALI_GATOR_SUPPORT
bool "Streamline support via Gator"
- depends on MALI_MIDGARD
+ depends on MALI_MIDGARD && !BACKEND_USER
default y if INSTRUMENTATION_STREAMLINE_OLD
default n
help
@@ -84,6 +84,9 @@ config MALI_PLATFORM_NAME
include in the build. 'platform/$(MALI_PLATFORM_NAME)/Kbuild' must
exist.
+ When PLATFORM_CUSTOM is set, this needs to be set manually to
+ pick up the desired platform files.
+
config MALI_MOCK_TEST
bool
depends on MALI_MIDGARD && !RELEASE
@@ -112,19 +115,6 @@ config MALI_CORESTACK
If unsure, say N.
-config MALI_PRFCNT_SET_SECONDARY
- bool "Use secondary set of performance counters"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
- help
- Select this option to use secondary set of performance counters. Kernel
- features that depend on an access to the primary set of counters may
- become unavailable. Enabling this option will prevent power management
- from working optimally and may cause instrumentation tools to return
- bogus results.
-
- If unsure, say N.
-
config MALI_DEBUG
bool "Debug build"
depends on MALI_MIDGARD && MALI_EXPERT
@@ -164,13 +154,6 @@ config MALI_ERROR_INJECT_RANDOM
help
Injected errors are random, rather than user-driven.
-config MALI_TRACE_TIMELINE
- bool "Timeline tracing"
- depends on MALI_MIDGARD && MALI_EXPERT
- default n
- help
- Enables timeline tracing through the kernel tracepoint system.
-
config MALI_SYSTEM_TRACE
bool "Enable system event tracing support"
depends on MALI_MIDGARD && MALI_EXPERT
@@ -205,3 +188,10 @@ config MALI_PWRSOFT_765
PWRSOFT-765 fixes devfreq cooling devices issues. However, they are
not merged in mainline kernel yet. So this define helps to guard those
parts of the code.
+
+# Instrumentation options.
+
+# config MALI_JOB_DUMP exists in the Kernel Kconfig but is configured using CINSTR_JOB_DUMP in Mconfig.
+# config MALI_PRFCNT_SET_SECONDARY exists in the Kernel Kconfig but is configured using CINSTR_SECONDARY_HWC in Mconfig.
+
+source "kernel/drivers/gpu/arm/midgard/tests/Mconfig"
diff --git a/drivers/gpu/arm/midgard/backend/gpu/Kbuild b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
index bdf4c5ad53bdd1..dcd8ca4ce4f43e 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/Kbuild
+++ b/drivers/gpu/arm/midgard/backend/gpu/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2014,2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -30,14 +30,12 @@ BACKEND += \
backend/gpu/mali_kbase_jm_as.c \
backend/gpu/mali_kbase_jm_hw.c \
backend/gpu/mali_kbase_jm_rb.c \
- backend/gpu/mali_kbase_js_affinity.c \
backend/gpu/mali_kbase_js_backend.c \
backend/gpu/mali_kbase_mmu_hw_direct.c \
backend/gpu/mali_kbase_pm_backend.c \
backend/gpu/mali_kbase_pm_driver.c \
backend/gpu/mali_kbase_pm_metrics.c \
backend/gpu/mali_kbase_pm_ca.c \
- backend/gpu/mali_kbase_pm_ca_fixed.c \
backend/gpu/mali_kbase_pm_always_on.c \
backend/gpu/mali_kbase_pm_coarse_demand.c \
backend/gpu/mali_kbase_pm_demand.c \
@@ -46,15 +44,13 @@ BACKEND += \
ifeq ($(MALI_CUSTOMER_RELEASE),0)
BACKEND += \
- backend/gpu/mali_kbase_pm_ca_random.c \
backend/gpu/mali_kbase_pm_demand_always_powered.c \
backend/gpu/mali_kbase_pm_fast_start.c
endif
ifeq ($(CONFIG_MALI_DEVFREQ),y)
BACKEND += \
- backend/gpu/mali_kbase_devfreq.c \
- backend/gpu/mali_kbase_pm_ca_devfreq.c
+ backend/gpu/mali_kbase_devfreq.c
endif
ifeq ($(CONFIG_MALI_NO_MALI),y)
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
index 49567f785d2c9e..7378bfd7b39798 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_cache_policy_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2016,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,6 +29,6 @@ void kbase_cache_set_coherency_mode(struct kbase_device *kbdev,
kbdev->current_gpu_coherency_mode = mode;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_COHERENCY_REG))
- kbase_reg_write(kbdev, COHERENCY_ENABLE, mode, NULL);
+ kbase_reg_write(kbdev, COHERENCY_ENABLE, mode);
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
index c9c463eb458d00..450f6e750a0c7e 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_debug_job_fault_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -152,7 +152,7 @@ bool kbase_job_fault_get_reg_snapshot(struct kbase_context *kctx)
while (kctx->reg_dump[offset] != REGISTER_DUMP_TERMINATION_FLAG) {
kctx->reg_dump[offset+1] =
kbase_reg_read(kctx->kbdev,
- kctx->reg_dump[offset], NULL);
+ kctx->reg_dump[offset]);
offset += 2;
}
return true;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
index 9c9a0b3dc2a9f4..683a24cfa6c671 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_devfreq.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -92,14 +92,21 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
freq = *target_freq;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_lock();
+#endif
opp = devfreq_recommended_opp(dev, &freq, flags);
voltage = dev_pm_opp_get_voltage(opp);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_unlock();
+#endif
if (IS_ERR_OR_NULL(opp)) {
dev_err(dev, "Failed to get opp (%ld)\n", PTR_ERR(opp));
return PTR_ERR(opp);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)
+ dev_pm_opp_put(opp);
+#endif
nominal_freq = freq;
@@ -141,9 +148,7 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
}
#endif
- if (kbdev->pm.backend.ca_current_policy->id ==
- KBASE_PM_CA_POLICY_ID_DEVFREQ)
- kbase_devfreq_set_core_mask(kbdev, core_mask);
+ kbase_devfreq_set_core_mask(kbdev, core_mask);
*target_freq = nominal_freq;
kbdev->current_voltage = voltage;
@@ -153,8 +158,6 @@ kbase_devfreq_target(struct device *dev, unsigned long *target_freq, u32 flags)
KBASE_TLSTREAM_AUX_DEVFREQ_TARGET((u64)nominal_freq);
- kbase_pm_reset_dvfs_utilisation(kbdev);
-
return err;
}
@@ -172,12 +175,13 @@ static int
kbase_devfreq_status(struct device *dev, struct devfreq_dev_status *stat)
{
struct kbase_device *kbdev = dev_get_drvdata(dev);
+ struct kbasep_pm_metrics diff;
- stat->current_frequency = kbdev->current_nominal_freq;
-
- kbase_pm_get_dvfs_utilisation(kbdev,
- &stat->total_time, &stat->busy_time);
+ kbase_pm_get_dvfs_metrics(kbdev, &kbdev->last_devfreq_metrics, &diff);
+ stat->busy_time = diff.time_busy;
+ stat->total_time = diff.time_busy + diff.time_idle;
+ stat->current_frequency = kbdev->current_nominal_freq;
stat->private_data = NULL;
return 0;
@@ -191,20 +195,24 @@ static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
unsigned long freq;
struct dev_pm_opp *opp;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_lock();
+#endif
count = dev_pm_opp_get_opp_count(kbdev->dev);
- if (count < 0) {
- rcu_read_unlock();
- return count;
- }
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_unlock();
+#endif
+ if (count < 0)
+ return count;
dp->freq_table = kmalloc_array(count, sizeof(dp->freq_table[0]),
GFP_KERNEL);
if (!dp->freq_table)
return -ENOMEM;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_lock();
+#endif
for (i = 0, freq = ULONG_MAX; i < count; i++, freq--) {
opp = dev_pm_opp_find_freq_floor(kbdev->dev, &freq);
if (IS_ERR(opp))
@@ -215,7 +223,9 @@ static int kbase_devfreq_init_freq_table(struct kbase_device *kbdev,
dp->freq_table[i] = freq;
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
rcu_read_unlock();
+#endif
if (count != i)
dev_warn(kbdev->dev, "Unable to enumerate all OPPs (%d!=%d\n",
@@ -247,6 +257,7 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
struct device_node *node;
int i = 0;
int count;
+ u64 shader_present = kbdev->gpu_props.props.raw_props.shader_present;
if (!opp_node)
return 0;
@@ -271,8 +282,14 @@ static int kbase_devfreq_init_core_mask_table(struct kbase_device *kbdev)
if (of_property_read_u64(node, "opp-hz-real", &real_freq))
real_freq = opp_freq;
if (of_property_read_u64(node, "opp-core-mask", &core_mask))
- core_mask =
- kbdev->gpu_props.props.raw_props.shader_present;
+ core_mask = shader_present;
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_11056) &&
+ core_mask != shader_present) {
+ dev_warn(kbdev->dev, "Ignoring OPP %llu - Dynamic Core Scaling not supported on this GPU\n",
+ opp_freq);
+ continue;
+ }
+
core_count_p = of_get_property(node, "opp-core-count", NULL);
if (core_count_p) {
u64 remaining_core_mask =
@@ -352,7 +369,7 @@ int kbase_devfreq_init(struct kbase_device *kbdev)
kbdev->devfreq = devfreq_add_device(kbdev->dev, dp,
"simple_ondemand", NULL);
if (IS_ERR(kbdev->devfreq)) {
- kbase_devfreq_term_freq_table(kbdev);
+ kfree(dp->freq_table);
return PTR_ERR(kbdev->devfreq);
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
index a0dfd81a808906..ebc3022299b582 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_hw.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -154,11 +154,9 @@ void kbase_io_history_dump(struct kbase_device *kbdev)
#endif /* CONFIG_DEBUG_FS */
-void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
- struct kbase_context *kctx)
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value)
{
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
writel(value, kbdev->reg + offset);
@@ -168,21 +166,15 @@ void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
value, 1);
#endif /* CONFIG_DEBUG_FS */
- dev_dbg(kbdev->dev, "w: reg %04x val %08x", offset, value);
-
- if (kctx && kctx->jctx.tb)
- kbase_device_trace_register_access(kctx, REG_WRITE, offset,
- value);
+ dev_dbg(kbdev->dev, "w: reg %08x val %08x", offset, value);
}
KBASE_EXPORT_TEST_API(kbase_reg_write);
-u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
- struct kbase_context *kctx)
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset)
{
u32 val;
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
- KBASE_DEBUG_ASSERT(kctx == NULL || kctx->as_nr != KBASEP_AS_NR_INVALID);
KBASE_DEBUG_ASSERT(kbdev->dev != NULL);
val = readl(kbdev->reg + offset);
@@ -192,10 +184,8 @@ u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
kbase_io_history_add(&kbdev->io_history, kbdev->reg + offset,
val, 0);
#endif /* CONFIG_DEBUG_FS */
- dev_dbg(kbdev->dev, "r: reg %04x val %08x", offset, val);
+ dev_dbg(kbdev->dev, "r: reg %08x val %08x", offset, val);
- if (kctx && kctx->jctx.tb)
- kbase_device_trace_register_access(kctx, REG_READ, offset, val);
return val;
}
@@ -216,11 +206,11 @@ static void kbase_report_gpu_fault(struct kbase_device *kbdev, int multiple)
u32 status;
u64 address;
- status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL);
+ status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS));
address = (u64) kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_FAULTADDRESS_HI), NULL) << 32;
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
address |= kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_FAULTADDRESS_LO), NULL);
+ GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
dev_warn(kbdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx",
status & 0xFF,
@@ -246,7 +236,7 @@ void kbase_gpu_interrupt(struct kbase_device *kbdev, u32 val)
kbase_clean_caches_done(kbdev);
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u, val);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
/* kbase_pm_check_transitions must be called after the IRQ has been
* cleared. This is because it might trigger further power transitions
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
index 729256ec6ce317..928efe950260c4 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_device_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -34,29 +34,21 @@
* @kbdev: Kbase device pointer
* @offset: Offset of register
* @value: Value to write
- * @kctx: Kbase context pointer. May be NULL
*
- * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
- * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
- * != KBASEP_AS_NR_INVALID).
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
*/
-void kbase_reg_write(struct kbase_device *kbdev, u16 offset, u32 value,
- struct kbase_context *kctx);
+void kbase_reg_write(struct kbase_device *kbdev, u32 offset, u32 value);
/**
* kbase_reg_read - read from GPU register
* @kbdev: Kbase device pointer
* @offset: Offset of register
- * @kctx: Kbase context pointer. May be NULL
*
- * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false). If
- * @kctx is not NULL then the caller must ensure it is scheduled (@kctx->as_nr
- * != KBASEP_AS_NR_INVALID).
+ * Caller must ensure the GPU is powered (@kbdev->pm.gpu_powered != false).
*
* Return: Value in desired register
*/
-u32 kbase_reg_read(struct kbase_device *kbdev, u16 offset,
- struct kbase_context *kctx);
+u32 kbase_reg_read(struct kbase_device *kbdev, u32 offset);
/**
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
index 02dc1ea0061f60..39773e6e63aa44 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_gpuprops_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,60 +37,61 @@ void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
int i;
/* Fill regdump with the content of the relevant registers */
- regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID), NULL);
+ regdump->gpu_id = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_ID));
regdump->l2_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_FEATURES), NULL);
- regdump->suspend_size = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SUSPEND_SIZE), NULL);
+ GPU_CONTROL_REG(L2_FEATURES));
+ regdump->core_features = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CORE_FEATURES));
regdump->tiler_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_FEATURES), NULL);
+ GPU_CONTROL_REG(TILER_FEATURES));
regdump->mem_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(MEM_FEATURES), NULL);
+ GPU_CONTROL_REG(MEM_FEATURES));
regdump->mmu_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(MMU_FEATURES), NULL);
+ GPU_CONTROL_REG(MMU_FEATURES));
regdump->as_present = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(AS_PRESENT), NULL);
+ GPU_CONTROL_REG(AS_PRESENT));
regdump->js_present = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(JS_PRESENT), NULL);
+ GPU_CONTROL_REG(JS_PRESENT));
for (i = 0; i < GPU_MAX_JOB_SLOTS; i++)
regdump->js_features[i] = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(JS_FEATURES_REG(i)), NULL);
+ GPU_CONTROL_REG(JS_FEATURES_REG(i)));
for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
regdump->texture_features[i] = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)), NULL);
+ GPU_CONTROL_REG(TEXTURE_FEATURES_REG(i)));
regdump->thread_max_threads = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(THREAD_MAX_THREADS), NULL);
+ GPU_CONTROL_REG(THREAD_MAX_THREADS));
regdump->thread_max_workgroup_size = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE),
- NULL);
+ GPU_CONTROL_REG(THREAD_MAX_WORKGROUP_SIZE));
regdump->thread_max_barrier_size = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE), NULL);
+ GPU_CONTROL_REG(THREAD_MAX_BARRIER_SIZE));
regdump->thread_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(THREAD_FEATURES), NULL);
+ GPU_CONTROL_REG(THREAD_FEATURES));
+ regdump->thread_tls_alloc = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(THREAD_TLS_ALLOC));
regdump->shader_present_lo = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_PRESENT_LO), NULL);
+ GPU_CONTROL_REG(SHADER_PRESENT_LO));
regdump->shader_present_hi = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_PRESENT_HI), NULL);
+ GPU_CONTROL_REG(SHADER_PRESENT_HI));
regdump->tiler_present_lo = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_PRESENT_LO), NULL);
+ GPU_CONTROL_REG(TILER_PRESENT_LO));
regdump->tiler_present_hi = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_PRESENT_HI), NULL);
+ GPU_CONTROL_REG(TILER_PRESENT_HI));
regdump->l2_present_lo = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_PRESENT_LO), NULL);
+ GPU_CONTROL_REG(L2_PRESENT_LO));
regdump->l2_present_hi = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_PRESENT_HI), NULL);
+ GPU_CONTROL_REG(L2_PRESENT_HI));
regdump->stack_present_lo = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(STACK_PRESENT_LO), NULL);
+ GPU_CONTROL_REG(STACK_PRESENT_LO));
regdump->stack_present_hi = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(STACK_PRESENT_HI), NULL);
+ GPU_CONTROL_REG(STACK_PRESENT_HI));
}
void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
@@ -101,7 +102,7 @@ void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
kbase_pm_register_access_enable(kbdev);
regdump->coherency_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
+ GPU_CONTROL_REG(COHERENCY_FEATURES));
/* We're done accessing the GPU registers for now. */
kbase_pm_register_access_disable(kbdev);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
index 3cbfb441d33811..6c69132a297c40 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_instr_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -51,16 +51,16 @@ static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
/* Enable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
- irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
- irq_mask | CLEAN_CACHES_COMPLETED, NULL);
+ irq_mask | CLEAN_CACHES_COMPLETED);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* clean&invalidate the caches so we're sure the mmu tables for the dump
* buffer is valid */
KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+ GPU_COMMAND_CLEAN_INV_CACHES);
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_CLEANING;
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -68,20 +68,16 @@ static void kbasep_instr_hwcnt_cacheclean(struct kbase_device *kbdev)
int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
struct kbase_context *kctx,
- struct kbase_uk_hwcnt_setup *setup)
+ struct kbase_ioctl_hwcnt_enable *enable)
{
unsigned long flags, pm_flags;
int err = -EINVAL;
u32 irq_mask;
int ret;
- u64 shader_cores_needed;
u32 prfcnt_config;
- shader_cores_needed = kbase_pm_get_present_cores(kbdev,
- KBASE_PM_CORE_SHADER);
-
/* alignment failure */
- if ((setup->dump_buffer == 0ULL) || (setup->dump_buffer & (2048 - 1)))
+ if ((enable->dump_buffer == 0ULL) || (enable->dump_buffer & (2048 - 1)))
goto out_err;
/* Override core availability policy to ensure all cores are available
@@ -90,7 +86,7 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
/* Request the cores early on synchronously - we'll release them on any
* errors (e.g. instrumentation already active) */
- kbase_pm_request_cores_sync(kbdev, true, shader_cores_needed);
+ kbase_pm_request_cores_sync(kbdev, true, true);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
@@ -102,15 +98,15 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
/* Enable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
- irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), irq_mask |
- PRFCNT_SAMPLE_COMPLETED, NULL);
+ PRFCNT_SAMPLE_COMPLETED);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* In use, this context is the owner */
kbdev->hwcnt.kctx = kctx;
/* Remember the dump address so we can reprogram it later */
- kbdev->hwcnt.addr = setup->dump_buffer;
+ kbdev->hwcnt.addr = enable->dump_buffer;
/* Request the clean */
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
@@ -147,35 +143,34 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
#endif
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
- prfcnt_config | PRFCNT_CONFIG_MODE_OFF, kctx);
+ prfcnt_config | PRFCNT_CONFIG_MODE_OFF);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
- setup->dump_buffer & 0xFFFFFFFF, kctx);
+ enable->dump_buffer & 0xFFFFFFFF);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
- setup->dump_buffer >> 32, kctx);
+ enable->dump_buffer >> 32);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_JM_EN),
- setup->jm_bm, kctx);
+ enable->jm_bm);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_SHADER_EN),
- setup->shader_bm, kctx);
+ enable->shader_bm);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_MMU_L2_EN),
- setup->mmu_l2_bm, kctx);
+ enable->mmu_l2_bm);
/* Due to PRLAM-8186 we need to disable the Tiler before we enable the
* HW counter dump. */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
- kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0,
- kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN), 0);
else
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
- setup->tiler_bm, kctx);
+ enable->tiler_bm);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG),
- prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL, kctx);
+ prfcnt_config | PRFCNT_CONFIG_MODE_MANUAL);
/* If HW has PRLAM-8186 we can now re-enable the tiler HW counters dump
*/
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8186))
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_TILER_EN),
- setup->tiler_bm, kctx);
+ enable->tiler_bm);
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
@@ -191,7 +186,7 @@ int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
return err;
out_unrequest_cores:
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_pm_unrequest_cores(kbdev, true, shader_cores_needed);
+ kbase_pm_release_cores(kbdev, true, true);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
out_err:
return err;
@@ -234,20 +229,19 @@ int kbase_instr_hwcnt_disable_internal(struct kbase_context *kctx)
/* Disable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
- irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
- irq_mask & ~PRFCNT_SAMPLE_COMPLETED, NULL);
+ irq_mask & ~PRFCNT_SAMPLE_COMPLETED);
/* Disable the counters */
- kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0, kctx);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_CONFIG), 0);
kbdev->hwcnt.kctx = NULL;
kbdev->hwcnt.addr = 0ULL;
kbase_pm_ca_instr_disable(kbdev);
- kbase_pm_unrequest_cores(kbdev, true,
- kbase_pm_get_present_cores(kbdev, KBASE_PM_CORE_SHADER));
+ kbase_pm_release_cores(kbdev, true, true);
kbase_pm_release_l2_caches(kbdev);
@@ -290,15 +284,15 @@ int kbase_instr_hwcnt_request_dump(struct kbase_context *kctx)
/* Reconfigure the dump address */
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_LO),
- kbdev->hwcnt.addr & 0xFFFFFFFF, NULL);
+ kbdev->hwcnt.addr & 0xFFFFFFFF);
kbase_reg_write(kbdev, GPU_CONTROL_REG(PRFCNT_BASE_HI),
- kbdev->hwcnt.addr >> 32, NULL);
+ kbdev->hwcnt.addr >> 32);
/* Start dumping */
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_SAMPLE, NULL, NULL,
kbdev->hwcnt.addr, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_PRFCNT_SAMPLE, kctx);
+ GPU_COMMAND_PRFCNT_SAMPLE);
dev_dbg(kbdev->dev, "HW counters dumping done for context %p", kctx);
@@ -376,13 +370,20 @@ void kbase_instr_hwcnt_sample_done(struct kbase_device *kbdev)
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
} else if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING) {
- int ret;
- /* Always clean and invalidate the cache after a successful dump
- */
- kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
- ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
- &kbdev->hwcnt.backend.cache_clean_work);
- KBASE_DEBUG_ASSERT(ret);
+ if (kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+ /* All finished and idle */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_IDLE;
+ kbdev->hwcnt.backend.triggered = 1;
+ wake_up(&kbdev->hwcnt.backend.wait);
+ } else {
+ int ret;
+ /* Always clean and invalidate the cache after a successful dump
+ */
+ kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_REQUEST_CLEAN;
+ ret = queue_work(kbdev->hwcnt.backend.cache_clean_wq,
+ &kbdev->hwcnt.backend.cache_clean_work);
+ KBASE_DEBUG_ASSERT(ret);
+ }
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -399,10 +400,9 @@ void kbase_clean_caches_done(struct kbase_device *kbdev)
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
/* Disable interrupt */
spin_lock_irqsave(&kbdev->hwaccess_lock, pm_flags);
- irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
- NULL);
+ irq_mask = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK));
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK),
- irq_mask & ~CLEAN_CACHES_COMPLETED, NULL);
+ irq_mask & ~CLEAN_CACHES_COMPLETED);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, pm_flags);
/* Wakeup... */
@@ -460,7 +460,7 @@ int kbase_instr_hwcnt_clear(struct kbase_context *kctx)
/* Clear the counters */
KBASE_TRACE_ADD(kbdev, CORE_GPU_PRFCNT_CLEAR, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_PRFCNT_CLEAR, kctx);
+ GPU_COMMAND_PRFCNT_CLEAR);
err = 0;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
index 95bebf85463733..dd0279a03abc3a 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_irq_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -58,7 +58,7 @@ static irqreturn_t kbase_job_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
@@ -96,7 +96,7 @@ static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
atomic_inc(&kbdev->faults_pending);
- val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
@@ -134,7 +134,7 @@ static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
return IRQ_NONE;
}
- val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
#ifdef CONFIG_MALI_DEBUG
if (!kbdev->pm.backend.driver_ready_for_irqs)
@@ -239,7 +239,7 @@ static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
return IRQ_NONE;
}
- val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS));
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
@@ -251,7 +251,7 @@ static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
- kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val);
return IRQ_HANDLED;
}
@@ -271,7 +271,7 @@ static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
return IRQ_NONE;
}
- val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS));
spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
@@ -283,7 +283,7 @@ static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
kbasep_irq_test_data.triggered = 1;
wake_up(&kbasep_irq_test_data.wait);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val);
return IRQ_HANDLED;
}
@@ -327,9 +327,9 @@ static int kbasep_common_test_interrupt(
}
/* store old mask */
- old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
+ old_mask_val = kbase_reg_read(kbdev, mask_offset);
/* mask interrupts */
- kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+ kbase_reg_write(kbdev, mask_offset, 0x0);
if (kbdev->irqs[tag].irq) {
/* release original handler and install test handler */
@@ -343,8 +343,8 @@ static int kbasep_common_test_interrupt(
kbasep_test_interrupt_timeout;
/* trigger interrupt */
- kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
- kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
+ kbase_reg_write(kbdev, mask_offset, 0x1);
+ kbase_reg_write(kbdev, rawstat_offset, 0x1);
hrtimer_start(&kbasep_irq_test_data.timer,
HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
@@ -366,7 +366,7 @@ static int kbasep_common_test_interrupt(
kbasep_irq_test_data.triggered = 0;
/* mask interrupts */
- kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
+ kbase_reg_write(kbdev, mask_offset, 0x0);
/* release test handler */
free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
@@ -382,7 +382,7 @@ static int kbasep_common_test_interrupt(
}
}
/* restore old mask */
- kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
+ kbase_reg_write(kbdev, mask_offset, old_mask_val);
return err;
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
index 4c991522eaf18e..c8153ba4c12162 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_as.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -68,11 +68,12 @@ static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
}
bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+ struct kbase_context *kctx,
+ int js)
{
int i;
- if (kbdev->hwaccess.active_kctx == kctx) {
+ if (kbdev->hwaccess.active_kctx[js] == kctx) {
/* Context is already active */
return true;
}
@@ -213,12 +214,15 @@ bool kbase_backend_use_ctx(struct kbase_device *kbdev,
{
struct kbasep_js_device_data *js_devdata;
struct kbase_as *new_address_space = NULL;
+ int js;
js_devdata = &kbdev->js_data;
- if (kbdev->hwaccess.active_kctx == kctx) {
- WARN(1, "Context is already scheduled in\n");
- return false;
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ if (kbdev->hwaccess.active_kctx[js] == kctx) {
+ WARN(1, "Context is already scheduled in\n");
+ return false;
+ }
}
new_address_space = &kbdev->as[as_nr];
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
index 27a6ca0d871b90..b4d2ae1cc4e824 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -113,16 +113,4 @@ struct kbase_backend_data {
bool timeouts_updated;
};
-/**
- * struct kbase_jd_atom_backend - GPU backend specific katom data
- */
-struct kbase_jd_atom_backend {
-};
-
-/**
- * struct kbase_context_backend - GPU backend specific context data
- */
-struct kbase_context_backend {
-};
-
#endif /* _KBASE_HWACCESS_GPU_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
index 331f6ee3ad3a4d..fee19aa803afdb 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_hw.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,7 +37,6 @@
#include <mali_kbase_ctx_sched.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
-#include <backend/gpu/mali_kbase_js_affinity.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
#define beenthere(kctx, f, a...) \
@@ -52,7 +51,54 @@ static enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *timer);
static inline int kbasep_jm_is_js_free(struct kbase_device *kbdev, int js,
struct kbase_context *kctx)
{
- return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT), kctx);
+ return !kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT));
+}
+
+static u64 kbase_job_write_affinity(struct kbase_device *kbdev,
+ base_jd_core_req core_req,
+ int js)
+{
+ u64 affinity;
+
+ if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+ BASE_JD_REQ_T) {
+ /* Tiler-only atom */
+ /* If the hardware supports XAFFINITY then we'll only enable
+ * the tiler (which is the default so this is a no-op),
+ * otherwise enable shader core 0.
+ */
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ affinity = 1;
+ else
+ affinity = 0;
+ } else if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
+ BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
+ unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
+ struct mali_base_gpu_coherent_group_info *coherency_info =
+ &kbdev->gpu_props.props.coherency_info;
+
+ affinity = kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->pm.debug_core_mask[js];
+
+ /* JS2 on a dual core group system targets core group 1. All
+ * other cases target core group 0.
+ */
+ if (js == 2 && num_core_groups > 1)
+ affinity &= coherency_info->group[1].core_mask;
+ else
+ affinity &= coherency_info->group[0].core_mask;
+ } else {
+ /* Use all cores */
+ affinity = kbase_pm_ca_get_core_mask(kbdev) &
+ kbdev->pm.debug_core_mask[js];
+ }
+
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
+ affinity & 0xFFFFFFFF);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
+ affinity >> 32);
+
+ return affinity;
}
void kbase_job_hw_submit(struct kbase_device *kbdev,
@@ -62,6 +108,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
struct kbase_context *kctx;
u32 cfg;
u64 jc_head = katom->jc;
+ u64 affinity;
KBASE_DEBUG_ASSERT(kbdev);
KBASE_DEBUG_ASSERT(katom);
@@ -70,20 +117,13 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
/* Command register must be available */
KBASE_DEBUG_ASSERT(kbasep_jm_is_js_free(kbdev, js, kctx));
- /* Affinity is not violating */
- kbase_js_debug_log_current_affinities(kbdev);
- KBASE_DEBUG_ASSERT(!kbase_js_affinity_would_violate(kbdev, js,
- katom->affinity));
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO),
- jc_head & 0xFFFFFFFF, kctx);
+ jc_head & 0xFFFFFFFF);
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI),
- jc_head >> 32, kctx);
+ jc_head >> 32);
- kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_LO),
- katom->affinity & 0xFFFFFFFF, kctx);
- kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_AFFINITY_NEXT_HI),
- katom->affinity >> 32, kctx);
+ affinity = kbase_job_write_affinity(kbdev, katom->core_req, js);
/* start MMU, medium priority, cache clean/flush on end, clean/flush on
* start */
@@ -127,11 +167,11 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
}
}
- kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_CONFIG_NEXT), cfg);
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_FLUSH_ID_NEXT),
- katom->flush_id, kctx);
+ katom->flush_id);
/* Write an approximate start timestamp.
* It's approximate because there might be a job in the HEAD register.
@@ -139,11 +179,11 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
katom->start_timestamp = ktime_get();
/* GO ! */
- dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx, affinity=0x%llx",
- katom, kctx, js, jc_head, katom->affinity);
+ dev_dbg(kbdev->dev, "JS: Submitting atom %p from ctx %p to js[%d] with head=0x%llx",
+ katom, kctx, js, jc_head);
KBASE_TRACE_ADD_SLOT_INFO(kbdev, JM_SUBMIT, kctx, katom, jc_head, js,
- (u32) katom->affinity);
+ (u32)affinity);
#if defined(CONFIG_MALI_GATOR_SUPPORT)
kbase_trace_mali_job_slots_event(
@@ -151,7 +191,7 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
kctx, kbase_jd_atom_id(kctx, katom));
#endif
KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG(katom, jc_head,
- katom->affinity, cfg);
+ affinity, cfg);
KBASE_TLSTREAM_TL_RET_CTX_LPU(
kctx,
&kbdev->gpu_props.props.raw_props.js_features[
@@ -174,10 +214,8 @@ void kbase_job_hw_submit(struct kbase_device *kbdev,
kbdev->hwaccess.backend.slot_rb[js].last_context = katom->kctx;
}
#endif
- kbase_timeline_job_slot_submit(kbdev, kctx, katom, js);
-
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
- JS_COMMAND_START, katom->kctx);
+ JS_COMMAND_START);
}
/**
@@ -245,9 +283,6 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
KBASE_TRACE_ADD(kbdev, JM_IRQ, NULL, NULL, 0, done);
- memset(&kbdev->slot_submit_count_irq[0], 0,
- sizeof(kbdev->slot_submit_count_irq));
-
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
while (done) {
@@ -272,10 +307,9 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
/* read out the job slot status code if the job
* slot reported failure */
completion_code = kbase_reg_read(kbdev,
- JOB_SLOT_REG(i, JS_STATUS), NULL);
+ JOB_SLOT_REG(i, JS_STATUS));
- switch (completion_code) {
- case BASE_JD_EVENT_STOPPED:
+ if (completion_code == BASE_JD_EVENT_STOPPED) {
#if defined(CONFIG_MALI_GATOR_SUPPORT)
kbase_trace_mali_job_slots_event(
GATOR_MAKE_EVENT(
@@ -290,37 +324,27 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
* JS<n>_TAIL so that the job chain can
* be resumed */
job_tail = (u64)kbase_reg_read(kbdev,
- JOB_SLOT_REG(i, JS_TAIL_LO),
- NULL) |
+ JOB_SLOT_REG(i, JS_TAIL_LO)) |
((u64)kbase_reg_read(kbdev,
- JOB_SLOT_REG(i, JS_TAIL_HI),
- NULL) << 32);
- break;
- case BASE_JD_EVENT_NOT_STARTED:
+ JOB_SLOT_REG(i, JS_TAIL_HI))
+ << 32);
+ } else if (completion_code ==
+ BASE_JD_EVENT_NOT_STARTED) {
/* PRLAM-10673 can cause a TERMINATED
* job to come back as NOT_STARTED, but
* the error interrupt helps us detect
* it */
completion_code =
BASE_JD_EVENT_TERMINATED;
- /* fall through */
- default:
- dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
- i, completion_code,
- kbase_exception_name
- (kbdev,
- completion_code));
}
- kbase_gpu_irq_evict(kbdev, i);
+ kbase_gpu_irq_evict(kbdev, i, completion_code);
}
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR),
- done & ((1 << i) | (1 << (i + 16))),
- NULL);
+ done & ((1 << i) | (1 << (i + 16))));
active = kbase_reg_read(kbdev,
- JOB_CONTROL_REG(JOB_IRQ_JS_STATE),
- NULL);
+ JOB_CONTROL_REG(JOB_IRQ_JS_STATE));
if (((active >> i) & 1) == 0 &&
(((done >> (i + 16)) & 1) == 0)) {
@@ -365,7 +389,7 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
* execution.
*/
u32 rawstat = kbase_reg_read(kbdev,
- JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
if ((rawstat >> (i + 16)) & 1) {
/* There is a failed job that we've
@@ -415,7 +439,7 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
}
spurious:
done = kbase_reg_read(kbdev,
- JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL);
+ JOB_CONTROL_REG(JOB_IRQ_RAWSTAT));
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10883)) {
/* Workaround for missing interrupt caused by
@@ -423,7 +447,7 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
if (((active >> i) & 1) && (0 ==
kbase_reg_read(kbdev,
JOB_SLOT_REG(i,
- JS_STATUS), NULL))) {
+ JS_STATUS)))) {
/* Force job slot to be processed again
*/
done |= (1u << i);
@@ -487,7 +511,6 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
base_jd_core_req core_reqs,
struct kbase_jd_atom *target_katom)
{
- struct kbase_context *kctx = target_katom->kctx;
#if KBASE_TRACE_ENABLE
u32 status_reg_before;
u64 job_in_head_before;
@@ -497,12 +520,11 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
/* Check the head pointer */
job_in_head_before = ((u64) kbase_reg_read(kbdev,
- JOB_SLOT_REG(js, JS_HEAD_LO), NULL))
+ JOB_SLOT_REG(js, JS_HEAD_LO)))
| (((u64) kbase_reg_read(kbdev,
- JOB_SLOT_REG(js, JS_HEAD_HI), NULL))
+ JOB_SLOT_REG(js, JS_HEAD_HI)))
<< 32);
- status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
- NULL);
+ status_reg_before = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
#endif
if (action == JS_COMMAND_SOFT_STOP) {
@@ -606,11 +628,10 @@ void kbasep_job_slot_soft_or_hard_stop_do_action(struct kbase_device *kbdev,
}
}
- kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action, kctx);
+ kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND), action);
#if KBASE_TRACE_ENABLE
- status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS),
- NULL);
+ status_reg_after = kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_STATUS));
if (status_reg_after == BASE_JD_EVENT_ACTIVE) {
struct kbase_jd_atom *head;
struct kbase_context *head_kctx;
@@ -745,7 +766,9 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
if (!katom)
continue;
- if (katom->kctx != kctx)
+ if ((kbdev->js_ctx_scheduling_mode ==
+ KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE) &&
+ (katom->kctx != kctx))
continue;
if (katom->sched_priority > priority) {
@@ -813,7 +836,7 @@ u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
mutex_lock(&kbdev->pm.lock);
if (kbdev->pm.backend.gpu_powered)
flush_id = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(LATEST_FLUSH), NULL);
+ GPU_CONTROL_REG(LATEST_FLUSH));
mutex_unlock(&kbdev->pm.lock);
}
@@ -1072,34 +1095,32 @@ static void kbase_debug_dump_registers(struct kbase_device *kbdev)
dev_err(kbdev->dev, "Register state:");
dev_err(kbdev->dev, " GPU_IRQ_RAWSTAT=0x%08x GPU_STATUS=0x%08x",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL));
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS)));
dev_err(kbdev->dev, " JOB_IRQ_RAWSTAT=0x%08x JOB_IRQ_JS_STATE=0x%08x",
- kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT), NULL),
- kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE), NULL));
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_RAWSTAT)),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_JS_STATE)));
for (i = 0; i < 3; i++) {
dev_err(kbdev->dev, " JS%d_STATUS=0x%08x JS%d_HEAD_LO=0x%08x",
- i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS),
- NULL),
- i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO),
- NULL));
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_STATUS)),
+ i, kbase_reg_read(kbdev, JOB_SLOT_REG(i, JS_HEAD_LO)));
}
dev_err(kbdev->dev, " MMU_IRQ_RAWSTAT=0x%08x GPU_FAULTSTATUS=0x%08x",
- kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT), NULL),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS), NULL));
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_RAWSTAT)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_FAULTSTATUS)));
dev_err(kbdev->dev, " GPU_IRQ_MASK=0x%08x JOB_IRQ_MASK=0x%08x MMU_IRQ_MASK=0x%08x",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), NULL),
- kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), NULL),
- kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL));
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK)),
+ kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK)),
+ kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)));
dev_err(kbdev->dev, " PWR_OVERRIDE0=0x%08x PWR_OVERRIDE1=0x%08x",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0), NULL),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1), NULL));
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE0)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(PWR_OVERRIDE1)));
dev_err(kbdev->dev, " SHADER_CONFIG=0x%08x L2_MMU_CONFIG=0x%08x",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG), NULL),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG), NULL));
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_CONFIG)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG)));
dev_err(kbdev->dev, " TILER_CONFIG=0x%08x JM_CONFIG=0x%08x",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG), NULL),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG), NULL));
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(TILER_CONFIG)),
+ kbase_reg_read(kbdev, GPU_CONTROL_REG(JM_CONFIG)));
}
static void kbasep_reset_timeout_worker(struct work_struct *data)
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
index d71a9edab94fea..831491e956a137 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -166,4 +166,24 @@ void kbase_job_slot_term(struct kbase_device *kbdev);
*/
void kbase_gpu_cacheclean(struct kbase_device *kbdev);
+static inline bool kbase_atom_needs_tiler(struct kbase_device *kbdev,
+ base_jd_core_req core_req)
+{
+ return core_req & BASE_JD_REQ_T;
+}
+
+static inline bool kbase_atom_needs_shaders(struct kbase_device *kbdev,
+ base_jd_core_req core_req)
+{
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
+ return true;
+ if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
+ BASE_JD_REQ_T) {
+ /* Tiler only atom */
+ return false;
+ }
+
+ return true;
+}
+
#endif /* _KBASE_JM_HWACCESS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
index ee93d4eb952243..79777b78740393 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -34,7 +34,6 @@
#include <backend/gpu/mali_kbase_cache_policy_backend.h>
#include <backend/gpu/mali_kbase_device_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
-#include <backend/gpu/mali_kbase_js_affinity.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
/* Return whether the specified ringbuffer is empty. HW access lock must be
@@ -104,8 +103,6 @@ static struct kbase_jd_atom *kbase_gpu_dequeue_atom(struct kbase_device *kbdev,
katom->gpu_rb_state = KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB;
- kbase_js_debug_log_current_affinities(kbdev);
-
return katom;
}
@@ -122,12 +119,6 @@ struct kbase_jd_atom *kbase_gpu_inspect(struct kbase_device *kbdev, int js,
return rb->entries[(rb->read_idx + idx) & SLOT_RB_MASK].katom;
}
-struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
- int js)
-{
- return kbase_gpu_inspect(kbdev, js, 0);
-}
-
struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
int js)
{
@@ -312,221 +303,58 @@ static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
int js,
struct kbase_jd_atom *katom)
{
- /* The most recently checked affinity. Having this at this scope allows
- * us to guarantee that we've checked the affinity in this function
- * call.
- */
- u64 recently_chosen_affinity = 0;
- bool chosen_affinity = false;
- bool retry;
-
- do {
- retry = false;
-
- /* NOTE: The following uses a number of FALLTHROUGHs to optimize
- * the calls to this function. Ending of the function is
- * indicated by BREAK OUT */
- switch (katom->coreref_state) {
- /* State when job is first attempted to be run */
- case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
- KBASE_DEBUG_ASSERT(katom->affinity == 0);
-
- /* Compute affinity */
- if (false == kbase_js_choose_affinity(
- &recently_chosen_affinity, kbdev, katom,
- js)) {
- /* No cores are currently available */
- /* *** BREAK OUT: No state transition *** */
- break;
- }
-
- chosen_affinity = true;
-
- /* Request the cores */
- kbase_pm_request_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
-
- katom->affinity = recently_chosen_affinity;
-
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- {
- enum kbase_pm_cores_ready cores_ready;
-
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
-
- cores_ready = kbase_pm_register_inuse_cores(
- kbdev,
- katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
- if (cores_ready == KBASE_NEW_AFFINITY) {
- /* Affinity no longer valid - return to
- * previous state */
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Return to previous
- * state, retry *** */
- retry = true;
- break;
- }
- if (cores_ready == KBASE_CORES_NOT_READY) {
- /* Stay in this state and return, to
- * retry at this state later */
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: No state transition
- * *** */
- break;
- }
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- }
+ base_jd_core_req core_req = katom->core_req;
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
-
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
-
- /* Optimize out choosing the affinity twice in the same
- * function call */
- if (chosen_affinity == false) {
- /* See if the affinity changed since a previous
- * call. */
- if (false == kbase_js_choose_affinity(
- &recently_chosen_affinity,
- kbdev, katom, js)) {
- /* No cores are currently available */
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REQUEST_ON_RECHECK_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) recently_chosen_affinity);
- /* *** BREAK OUT: Transition to lower
- * state *** */
- break;
- }
- chosen_affinity = true;
- }
+ /* NOTE: The following uses a number of FALLTHROUGHs to optimize the
+ * calls to this function. Ending of the function is indicated by BREAK
+ * OUT.
+ */
+ switch (katom->coreref_state) {
+ /* State when job is first attempted to be run */
+ case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
+ /* Request the cores */
+ kbase_pm_request_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, core_req),
+ kbase_atom_needs_shaders(kbdev, core_req));
- /* Now see if this requires a different set of cores */
- if (recently_chosen_affinity != katom->affinity) {
- enum kbase_pm_cores_ready cores_ready;
+ /* Proceed to next state */
+ katom->coreref_state =
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
- kbase_pm_request_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- /* Register new cores whilst we still hold the
- * old ones, to minimize power transitions */
- cores_ready =
- kbase_pm_register_inuse_cores(kbdev,
- katom->core_req & BASE_JD_REQ_T,
- recently_chosen_affinity);
- kbasep_js_job_check_deref_cores(kbdev, katom);
+ case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
+ {
+ bool cores_ready;
- /* Fixup the state that was reduced by
- * deref_cores: */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- katom->affinity = recently_chosen_affinity;
- if (cores_ready == KBASE_NEW_AFFINITY) {
- /* Affinity no longer valid - return to
- * previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
-
- kbasep_js_job_check_deref_cores(kbdev,
- katom);
+ cores_ready = kbase_pm_cores_requested(kbdev,
+ kbase_atom_needs_tiler(kbdev, core_req),
+ kbase_atom_needs_shaders(kbdev, core_req));
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_INUSE_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Return to previous
- * state, retry *** */
- retry = true;
- break;
- }
- /* Now might be waiting for powerup again, with
- * a new affinity */
- if (cores_ready == KBASE_CORES_NOT_READY) {
- /* Return to previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES;
- KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_REGISTER_ON_RECHECK_FAILED,
- katom->kctx, katom,
- katom->jc, js,
- (u32) katom->affinity);
- /* *** BREAK OUT: Transition to lower
- * state *** */
- break;
- }
- }
- /* Proceed to next state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS;
-
- /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS:
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- KBASE_DEBUG_ASSERT(katom->affinity ==
- recently_chosen_affinity);
-
- /* Note: this is where the caller must've taken the
- * hwaccess_lock */
-
- /* Check for affinity violations - if there are any,
- * then we just ask the caller to requeue and try again
- * later */
- if (kbase_js_affinity_would_violate(kbdev, js,
- katom->affinity) != false) {
- /* Return to previous state */
- katom->coreref_state =
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY;
- /* *** BREAK OUT: Transition to lower state ***
+ if (!cores_ready) {
+ /* Stay in this state and return, to retry at
+ * this state later.
*/
KBASE_TRACE_ADD_SLOT_INFO(kbdev,
- JS_CORE_REF_AFFINITY_WOULD_VIOLATE,
- katom->kctx, katom, katom->jc, js,
- (u32) katom->affinity);
+ JS_CORE_REF_REGISTER_INUSE_FAILED,
+ katom->kctx, katom,
+ katom->jc, js,
+ (u32) 0);
+ /* *** BREAK OUT: No state transition *** */
break;
}
-
- /* No affinity violations would result, so the cores are
- * ready */
+ /* Proceed to next state */
katom->coreref_state = KBASE_ATOM_COREREF_STATE_READY;
/* *** BREAK OUT: Cores Ready *** */
break;
-
- default:
- KBASE_DEBUG_ASSERT_MSG(false,
- "Unhandled kbase_atom_coreref_state %d",
- katom->coreref_state);
- break;
}
- } while (retry != false);
+
+ default:
+ KBASE_DEBUG_ASSERT_MSG(false,
+ "Unhandled kbase_atom_coreref_state %d",
+ katom->coreref_state);
+ break;
+ }
return (katom->coreref_state == KBASE_ATOM_COREREF_STATE_READY);
}
@@ -534,6 +362,8 @@ static bool kbasep_js_job_check_ref_cores(struct kbase_device *kbdev,
static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
struct kbase_jd_atom *katom)
{
+ base_jd_core_req core_req = katom->core_req;
+
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(katom != NULL);
@@ -541,31 +371,18 @@ static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
case KBASE_ATOM_COREREF_STATE_READY:
/* State where atom was submitted to the HW - just proceed to
* power-down */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
/* *** FALLTHROUGH *** */
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- /* State where cores were registered */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- kbase_pm_release_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
-
- break;
-
case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- /* State where cores were requested, but not registered */
- KBASE_DEBUG_ASSERT(katom->affinity != 0 ||
- (katom->core_req & BASE_JD_REQ_T));
- kbase_pm_unrequest_cores(kbdev, katom->core_req & BASE_JD_REQ_T,
- katom->affinity);
+ /* State where cores were requested */
+ kbase_pm_release_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, core_req),
+ kbase_atom_needs_shaders(kbdev, core_req));
break;
case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
/* Initial state - nothing required */
- KBASE_DEBUG_ASSERT(katom->affinity == 0);
break;
default:
@@ -575,12 +392,11 @@ static void kbasep_js_job_check_deref_cores(struct kbase_device *kbdev,
break;
}
- katom->affinity = 0;
katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
}
static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
- base_jd_core_req core_req, u64 affinity,
+ base_jd_core_req core_req,
enum kbase_atom_coreref_state coreref_state)
{
KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -589,31 +405,18 @@ static void kbasep_js_job_check_deref_cores_nokatom(struct kbase_device *kbdev,
case KBASE_ATOM_COREREF_STATE_READY:
/* State where atom was submitted to the HW - just proceed to
* power-down */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
/* *** FALLTHROUGH *** */
- case KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY:
- /* State where cores were registered */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
- kbase_pm_release_cores(kbdev, core_req & BASE_JD_REQ_T,
- affinity);
-
- break;
-
case KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES:
- /* State where cores were requested, but not registered */
- KBASE_DEBUG_ASSERT(affinity != 0 ||
- (core_req & BASE_JD_REQ_T));
- kbase_pm_unrequest_cores(kbdev, core_req & BASE_JD_REQ_T,
- affinity);
+ /* State where cores were requested */
+ kbase_pm_release_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, core_req),
+ kbase_atom_needs_shaders(kbdev, core_req));
break;
case KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED:
/* Initial state - nothing required */
- KBASE_DEBUG_ASSERT(affinity == 0);
break;
default:
@@ -659,8 +462,6 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
case KBASE_ATOM_GPU_RB_WAITING_AFFINITY:
- kbase_js_affinity_release_slot_cores(kbdev, katom->slot_nr,
- katom->affinity);
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
case KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE:
@@ -674,14 +475,23 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev,
kbdev->protected_mode_transition = false;
if (kbase_jd_katom_is_protected(katom) &&
- (katom->protected_state.enter ==
- KBASE_ATOM_ENTER_PROTECTED_IDLE_L2)) {
+ ((katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_IDLE_L2) ||
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY) ||
+ (katom->protected_state.enter ==
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED))) {
kbase_vinstr_resume(kbdev->vinstr_ctx);
-
- /* Go back to configured model for IPA */
- kbase_ipa_model_use_configured_locked(kbdev);
}
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+ if (katom->atom_flags &
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+ kbdev->l2_users_count--;
+ katom->atom_flags &=
+ ~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+ }
+ }
/* ***FALLTHROUGH: TRANSITION TO LOWER STATE*** */
@@ -769,15 +579,10 @@ static inline bool kbase_gpu_in_protected_mode(struct kbase_device *kbdev)
return kbdev->protected_mode;
}
-static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+static void kbase_gpu_disable_coherent(struct kbase_device *kbdev)
{
- int err = -EINVAL;
-
lockdep_assert_held(&kbdev->hwaccess_lock);
- WARN_ONCE(!kbdev->protected_ops,
- "Cannot enter protected mode: protected callbacks not specified.\n");
-
/*
* When entering into protected mode, we must ensure that the
* GPU is not operating in coherent mode as well. This is to
@@ -785,17 +590,29 @@ static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
*/
if (kbdev->system_coherency == COHERENCY_ACE)
kbase_cache_set_coherency_mode(kbdev, COHERENCY_ACE_LITE);
+}
+
+static int kbase_gpu_protected_mode_enter(struct kbase_device *kbdev)
+{
+ int err = -EINVAL;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ WARN_ONCE(!kbdev->protected_ops,
+ "Cannot enter protected mode: protected callbacks not specified.\n");
if (kbdev->protected_ops) {
/* Switch GPU to protected mode */
err = kbdev->protected_ops->protected_mode_enable(
kbdev->protected_dev);
- if (err)
+ if (err) {
dev_warn(kbdev->dev, "Failed to enable protected mode: %d\n",
err);
- else
+ } else {
kbdev->protected_mode = true;
+ kbase_ipa_protection_mode_switch_event(kbdev);
+ }
}
return err;
@@ -818,6 +635,58 @@ static int kbase_gpu_protected_mode_reset(struct kbase_device *kbdev)
return 0;
}
+static int kbase_jm_protected_entry(struct kbase_device *kbdev,
+ struct kbase_jd_atom **katom, int idx, int js)
+{
+ int err = 0;
+
+ err = kbase_gpu_protected_mode_enter(kbdev);
+
+ /*
+ * Regardless of result before this call, we are no longer
+ * transitioning the GPU.
+ */
+
+ kbdev->protected_mode_transition = false;
+
+ KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
+ if (err) {
+ /*
+ * Failed to switch into protected mode, resume
+ * vinstr core and fail atom.
+ */
+ kbase_vinstr_resume(kbdev->vinstr_ctx);
+ katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
+ kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
+ /*
+ * Only return if head atom or previous atom
+ * already removed - as atoms must be returned
+ * in order.
+ */
+ if (idx == 0 || katom[0]->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
+ kbase_gpu_dequeue_atom(kbdev, js, NULL);
+ kbase_jm_return_atom_to_js(kbdev, katom[idx]);
+ }
+
+ return -EINVAL;
+ }
+
+ /*
+ * Protected mode sanity checks.
+ */
+ KBASE_DEBUG_ASSERT_MSG(
+ kbase_jd_katom_is_protected(katom[idx]) ==
+ kbase_gpu_in_protected_mode(kbdev),
+ "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
+ kbase_jd_katom_is_protected(katom[idx]),
+ kbase_gpu_in_protected_mode(kbdev));
+ katom[idx]->gpu_rb_state =
+ KBASE_ATOM_GPU_RB_READY;
+
+ return err;
+}
+
static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
struct kbase_jd_atom **katom, int idx, int js)
{
@@ -848,9 +717,6 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
return -EAGAIN;
}
- /* Use generic model for IPA in protected mode */
- kbase_ipa_model_use_fallback_locked(kbdev);
-
/* Once reaching this point GPU must be
* switched to protected mode or vinstr
* re-enabled. */
@@ -873,61 +739,82 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev,
if (kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2) ||
kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2)) {
/*
- * The L2 is still powered, wait for all the users to
- * finish with it before doing the actual reset.
- */
+ * The L2 is still powered, wait for all the users to
+ * finish with it before doing the actual reset.
+ */
return -EAGAIN;
}
}
katom[idx]->protected_state.enter =
- KBASE_ATOM_ENTER_PROTECTED_FINISHED;
+ KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY;
/* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
- case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
-
- /* No jobs running, so we can switch GPU mode right now. */
- err = kbase_gpu_protected_mode_enter(kbdev);
-
+ case KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY:
/*
- * Regardless of result, we are no longer transitioning
- * the GPU.
+ * When entering into protected mode, we must ensure that the
+ * GPU is not operating in coherent mode as well. This is to
+ * ensure that no protected memory can be leaked.
*/
- kbdev->protected_mode_transition = false;
- KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END(kbdev);
- if (err) {
+ kbase_gpu_disable_coherent(kbdev);
+
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
/*
- * Failed to switch into protected mode, resume
- * vinstr core and fail atom.
+ * Power on L2 caches; this will also result in the
+ * correct value written to coherency enable register.
*/
- kbase_vinstr_resume(kbdev->vinstr_ctx);
- katom[idx]->event_code = BASE_JD_EVENT_JOB_INVALID;
- kbase_gpu_mark_atom_for_return(kbdev, katom[idx]);
- /* Only return if head atom or previous atom
- * already removed - as atoms must be returned
- * in order. */
- if (idx == 0 || katom[0]->gpu_rb_state ==
- KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB) {
- kbase_gpu_dequeue_atom(kbdev, js, NULL);
- kbase_jm_return_atom_to_js(kbdev, katom[idx]);
- }
+ kbase_pm_request_l2_caches_nolock(kbdev);
+ /*
+ * Set the flag on the atom that additional
+ * L2 references are taken.
+ */
+ katom[idx]->atom_flags |=
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+ }
- /* Go back to configured model for IPA */
- kbase_ipa_model_use_configured_locked(kbdev);
+ katom[idx]->protected_state.enter =
+ KBASE_ATOM_ENTER_PROTECTED_FINISHED;
- return -EINVAL;
- }
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234))
+ return -EAGAIN;
- /* Protected mode sanity checks. */
- KBASE_DEBUG_ASSERT_MSG(
- kbase_jd_katom_is_protected(katom[idx]) ==
- kbase_gpu_in_protected_mode(kbdev),
- "Protected mode of atom (%d) doesn't match protected mode of GPU (%d)",
- kbase_jd_katom_is_protected(katom[idx]),
- kbase_gpu_in_protected_mode(kbdev));
- katom[idx]->gpu_rb_state =
- KBASE_ATOM_GPU_RB_READY;
+ /* ***FALLTHROUGH: TRANSITION TO HIGHER STATE*** */
+
+ case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
+ if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
+ /*
+ * Check that L2 caches are powered and, if so,
+ * enter protected mode.
+ */
+ if (kbdev->pm.backend.l2_powered != 0) {
+ /*
+ * Remove additional L2 reference and reset
+ * the atom flag which denotes it.
+ */
+ if (katom[idx]->atom_flags &
+ KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
+ kbdev->l2_users_count--;
+ katom[idx]->atom_flags &=
+ ~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
+ }
+
+ err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+ if (err)
+ return err;
+ } else {
+ /*
+ * still waiting for L2 caches to power up
+ */
+ return -EAGAIN;
+ }
+ } else {
+ err = kbase_jm_protected_entry(kbdev, katom, idx, js);
+
+ if (err)
+ return err;
+ }
}
return 0;
@@ -995,9 +882,6 @@ static int kbase_jm_exit_protected_mode(struct kbase_device *kbdev,
kbase_vinstr_resume(kbdev->vinstr_ctx);
- /* Use generic model for IPA in protected mode */
- kbase_ipa_model_use_fallback_locked(kbdev);
-
return -EINVAL;
}
@@ -1144,8 +1028,6 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
if (!cores_ready)
break;
- kbase_js_affinity_retain_slot_cores(kbdev, js,
- katom[idx]->affinity);
katom[idx]->gpu_rb_state =
KBASE_ATOM_GPU_RB_WAITING_AFFINITY;
@@ -1247,7 +1129,8 @@ void kbase_backend_run_atom(struct kbase_device *kbdev,
#define HAS_DEP(katom) (katom->pre_dep || katom->atom_flags & \
(KBASE_KATOM_FLAG_X_DEP_BLOCKED | KBASE_KATOM_FLAG_FAIL_BLOCKER))
-bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+ u32 completion_code)
{
struct kbase_jd_atom *katom;
struct kbase_jd_atom *next_katom;
@@ -1259,23 +1142,29 @@ bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js)
if (next_katom && katom->kctx == next_katom->kctx &&
next_katom->gpu_rb_state == KBASE_ATOM_GPU_RB_SUBMITTED &&
- HAS_DEP(next_katom) &&
- (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO), NULL)
+ (HAS_DEP(next_katom) || next_katom->sched_priority ==
+ katom->sched_priority) &&
+ (kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_LO))
!= 0 ||
- kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI), NULL)
+ kbase_reg_read(kbdev, JOB_SLOT_REG(js, JS_HEAD_NEXT_HI))
!= 0)) {
kbase_reg_write(kbdev, JOB_SLOT_REG(js, JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
next_katom->gpu_rb_state = KBASE_ATOM_GPU_RB_READY;
- KBASE_TLSTREAM_TL_NRET_ATOM_LPU(katom,
+ if (completion_code == BASE_JD_EVENT_STOPPED) {
+ KBASE_TLSTREAM_TL_NRET_ATOM_LPU(next_katom,
&kbdev->gpu_props.props.raw_props.js_features
- [katom->slot_nr]);
- KBASE_TLSTREAM_TL_NRET_ATOM_AS(katom, &kbdev->as
- [katom->kctx->as_nr]);
- KBASE_TLSTREAM_TL_NRET_CTX_LPU(katom->kctx,
+ [next_katom->slot_nr]);
+ KBASE_TLSTREAM_TL_NRET_ATOM_AS(next_katom, &kbdev->as
+ [next_katom->kctx->as_nr]);
+ KBASE_TLSTREAM_TL_NRET_CTX_LPU(next_katom->kctx,
&kbdev->gpu_props.props.raw_props.js_features
- [katom->slot_nr]);
+ [next_katom->slot_nr]);
+ }
+
+ if (next_katom->core_req & BASE_JD_REQ_PERMON)
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
return true;
}
@@ -1314,26 +1203,24 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
* flushed. To prevent future evictions causing possible memory
* corruption we need to flush the cache manually before any
* affected memory gets reused. */
- katom->need_cache_flush_cores_retained = katom->affinity;
- kbase_pm_request_cores(kbdev, false, katom->affinity);
+ katom->need_cache_flush_cores_retained = true;
+ kbase_pm_request_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, katom->core_req),
+ kbase_atom_needs_shaders(kbdev,
+ katom->core_req));
} else if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_10676)) {
if (kbdev->gpu_props.num_core_groups > 1 &&
- !(katom->affinity &
- kbdev->gpu_props.props.coherency_info.group[0].core_mask
- ) &&
- (katom->affinity &
- kbdev->gpu_props.props.coherency_info.group[1].core_mask
- )) {
+ katom->device_nr >= 1) {
dev_info(kbdev->dev, "JD: Flushing cache due to PRLAM-10676\n");
- katom->need_cache_flush_cores_retained =
- katom->affinity;
- kbase_pm_request_cores(kbdev, false,
- katom->affinity);
+ katom->need_cache_flush_cores_retained = true;
+ kbase_pm_request_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, katom->core_req),
+ kbase_atom_needs_shaders(kbdev,
+ katom->core_req));
}
}
katom = kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
- kbase_timeline_job_slot_done(kbdev, katom->kctx, katom, js, 0);
if (completion_code == BASE_JD_EVENT_STOPPED) {
struct kbase_jd_atom *next_katom = kbase_gpu_inspect(kbdev, js,
@@ -1348,6 +1235,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
if (next_katom && katom->kctx == next_katom->kctx &&
next_katom->sched_priority ==
katom->sched_priority) {
+ WARN_ON(next_katom->gpu_rb_state ==
+ KBASE_ATOM_GPU_RB_SUBMITTED);
kbase_gpu_dequeue_atom(kbdev, js, end_timestamp);
kbase_jm_return_atom_to_js(kbdev, next_katom);
}
@@ -1355,6 +1244,13 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
int i;
+ if (!kbase_ctx_flag(katom->kctx, KCTX_DYING))
+ dev_warn(kbdev->dev, "error detected from slot %d, job status 0x%08x (%s)",
+ js, completion_code,
+ kbase_exception_name
+ (kbdev,
+ completion_code));
+
#if KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR != 0
KBASE_TRACE_DUMP(kbdev);
#endif
@@ -1428,10 +1324,6 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, int js,
if (katom->event_code != BASE_JD_EVENT_JOB_CANCELLED)
katom->event_code = (base_jd_event_code)completion_code;
- kbase_device_trace_register_access(kctx, REG_WRITE,
- JOB_CONTROL_REG(JOB_IRQ_CLEAR),
- 1 << js);
-
/* Complete the job, and start new ones
*
* Also defer remaining work onto the workqueue:
@@ -1515,8 +1407,7 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
if (!katom)
break;
if (katom->protected_state.exit ==
- KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT)
- {
+ KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT) {
KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END(kbdev);
kbase_vinstr_resume(kbdev->vinstr_ctx);
@@ -1544,7 +1435,6 @@ void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp)
if (keep_in_jm_rb) {
kbasep_js_job_check_deref_cores(kbdev, katom);
katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->affinity = 0;
katom->protected_state.exit = KBASE_ATOM_EXIT_PROTECTED_CHECK;
/* As the atom was not removed, increment the
* index so that we read the correct atom in the
@@ -1607,12 +1497,6 @@ static int should_stop_x_dep_slot(struct kbase_jd_atom *katom)
return -1;
}
-static void kbase_job_evicted(struct kbase_jd_atom *katom)
-{
- kbase_timeline_job_slot_done(katom->kctx->kbdev, katom->kctx, katom,
- katom->slot_nr, KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT);
-}
-
bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
struct kbase_context *kctx,
int js,
@@ -1683,12 +1567,12 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
katom_idx0->kctx->blocked_js[js][prio_idx0] = true;
} else {
/* katom_idx0 is on GPU */
- if (katom_idx1 && katom_idx1->gpu_rb_state ==
+ if (katom_idx1_valid && katom_idx1->gpu_rb_state ==
KBASE_ATOM_GPU_RB_SUBMITTED) {
/* katom_idx0 and katom_idx1 are on GPU */
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_COMMAND_NEXT), NULL) == 0) {
+ JS_COMMAND_NEXT)) == 0) {
/* idx0 has already completed - stop
* idx1 if needed*/
if (katom_idx1_valid) {
@@ -1703,19 +1587,18 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
kbase_reg_write(kbdev,
JOB_SLOT_REG(js,
JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
if (kbase_reg_read(kbdev,
JOB_SLOT_REG(js,
- JS_HEAD_NEXT_LO), NULL)
+ JS_HEAD_NEXT_LO))
!= 0 ||
kbase_reg_read(kbdev,
JOB_SLOT_REG(js,
- JS_HEAD_NEXT_HI), NULL)
+ JS_HEAD_NEXT_HI))
!= 0) {
/* idx1 removed successfully,
* will be handled in IRQ */
- kbase_job_evicted(katom_idx1);
kbase_gpu_remove_atom(kbdev,
katom_idx1,
action, true);
@@ -1769,7 +1652,7 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
} else {
/* idx1 is on GPU */
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_COMMAND_NEXT), NULL) == 0) {
+ JS_COMMAND_NEXT)) == 0) {
/* idx0 has already completed - stop idx1 */
kbase_gpu_stop_atom(kbdev, js, katom_idx1,
action);
@@ -1779,15 +1662,14 @@ bool kbase_backend_soft_hard_stop_slot(struct kbase_device *kbdev,
* remove */
kbase_reg_write(kbdev, JOB_SLOT_REG(js,
JS_COMMAND_NEXT),
- JS_COMMAND_NOP, NULL);
+ JS_COMMAND_NOP);
if (kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_HEAD_NEXT_LO), NULL) != 0 ||
+ JS_HEAD_NEXT_LO)) != 0 ||
kbase_reg_read(kbdev, JOB_SLOT_REG(js,
- JS_HEAD_NEXT_HI), NULL) != 0) {
+ JS_HEAD_NEXT_HI)) != 0) {
/* idx1 removed successfully, will be
* handled in IRQ once idx0 completes */
- kbase_job_evicted(katom_idx1);
kbase_gpu_remove_atom(kbdev, katom_idx1,
action,
false);
@@ -1827,11 +1709,11 @@ void kbase_gpu_cacheclean(struct kbase_device *kbdev)
/* clean & invalidate the caches */
KBASE_TRACE_ADD(kbdev, CORE_GPU_CLEAN_INV_CACHES, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES, NULL);
+ GPU_COMMAND_CLEAN_INV_CACHES);
/* wait for cache flush to complete before continuing */
while (--max_loops &&
- (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
CLEAN_CACHES_COMPLETED) == 0)
;
@@ -1839,7 +1721,7 @@ void kbase_gpu_cacheclean(struct kbase_device *kbdev)
KBASE_TRACE_ADD(kbdev, CORE_GPU_IRQ_CLEAR, NULL, NULL, 0u,
CLEAN_CACHES_COMPLETED);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR),
- CLEAN_CACHES_COMPLETED, NULL);
+ CLEAN_CACHES_COMPLETED);
KBASE_DEBUG_ASSERT_MSG(kbdev->hwcnt.backend.state !=
KBASE_INSTR_STATE_CLEANING,
"Instrumentation code was cleaning caches, but Job Management code cleared their IRQ - Instrumentation code will now hang.");
@@ -1856,10 +1738,12 @@ void kbase_backend_cacheclean(struct kbase_device *kbdev,
kbase_gpu_cacheclean(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_pm_unrequest_cores(kbdev, false,
- katom->need_cache_flush_cores_retained);
+ kbase_pm_release_cores(kbdev,
+ kbase_atom_needs_tiler(kbdev, katom->core_req),
+ kbase_atom_needs_shaders(kbdev,
+ katom->core_req));
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- katom->need_cache_flush_cores_retained = 0;
+ katom->need_cache_flush_cores_retained = false;
}
}
@@ -1895,18 +1779,16 @@ void kbase_backend_complete_wq(struct kbase_device *kbdev,
* this is not done, then if the atom is re-scheduled (following a soft
* stop) then the core reference would not be retaken. */
katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
- katom->affinity = 0;
}
void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
- base_jd_core_req core_req, u64 affinity,
+ base_jd_core_req core_req,
enum kbase_atom_coreref_state coreref_state)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, affinity,
- coreref_state);
+ kbasep_js_job_check_deref_cores_nokatom(kbdev, core_req, coreref_state);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (!kbdev->pm.active_count) {
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
index 456700814ee9a4..c3b9f2d855369a 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_jm_rb.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -33,15 +33,17 @@
/**
* kbase_gpu_irq_evict - Evict an atom from a NEXT slot
*
- * @kbdev: Device pointer
- * @js: Job slot to evict from
+ * @kbdev: Device pointer
+ * @js: Job slot to evict from
+ * @completion_code: Event code from job that was run.
*
* Evict the atom in the NEXT slot for the specified job slot. This function is
* called from the job complete IRQ handler when the previous job has failed.
*
* Return: true if job evicted from NEXT registers, false otherwise
*/
-bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js);
+bool kbase_gpu_irq_evict(struct kbase_device *kbdev, int js,
+ u32 completion_code);
/**
* kbase_gpu_complete_hw - Complete an atom on job slot js
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c
deleted file mode 100644
index c937eca8c166da..00000000000000
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.c
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-/*
- * Base kernel affinity manager APIs
- */
-
-#include <mali_kbase.h>
-#include "mali_kbase_js_affinity.h"
-#include "mali_kbase_hw.h"
-
-#include <backend/gpu/mali_kbase_pm_internal.h>
-
-
-bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev,
- int js)
-{
- /*
- * Here are the reasons for using job slot 2:
- * - BASE_HW_ISSUE_8987 (which is entirely used for that purpose)
- * - In absence of the above, then:
- * - Atoms with BASE_JD_REQ_COHERENT_GROUP
- * - But, only when there aren't contexts with
- * KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES, because the atoms that run on
- * all cores on slot 1 could be blocked by those using a coherent group
- * on slot 2
- * - And, only when you actually have 2 or more coregroups - if you
- * only have 1 coregroup, then having jobs for slot 2 implies they'd
- * also be for slot 1, meaning you'll get interference from them. Jobs
- * able to run on slot 2 could also block jobs that can only run on
- * slot 1 (tiler jobs)
- */
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
- return true;
-
- if (js != 2)
- return true;
-
- /* Only deal with js==2 now: */
- if (kbdev->gpu_props.num_core_groups > 1) {
- /* Only use slot 2 in the 2+ coregroup case */
- if (kbasep_js_ctx_attr_is_attr_on_runpool(kbdev,
- KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) ==
- false) {
- /* ...But only when we *don't* have atoms that run on
- * all cores */
-
- /* No specific check for BASE_JD_REQ_COHERENT_GROUP
- * atoms - the policy will sort that out */
- return true;
- }
- }
-
- /* Above checks failed mean we shouldn't use slot 2 */
- return false;
-}
-
-/*
- * As long as it has been decided to have a deeper modification of
- * what job scheduler, power manager and affinity manager will
- * implement, this function is just an intermediate step that
- * assumes:
- * - all working cores will be powered on when this is called.
- * - largest current configuration is 2 core groups.
- * - It has been decided not to have hardcoded values so the low
- * and high cores in a core split will be evently distributed.
- * - Odd combinations of core requirements have been filtered out
- * and do not get to this function (e.g. CS+T+NSS is not
- * supported here).
- * - This function is frequently called and can be optimized,
- * (see notes in loops), but as the functionallity will likely
- * be modified, optimization has not been addressed.
-*/
-bool kbase_js_choose_affinity(u64 * const affinity,
- struct kbase_device *kbdev,
- struct kbase_jd_atom *katom, int js)
-{
- base_jd_core_req core_req = katom->core_req;
- unsigned int num_core_groups = kbdev->gpu_props.num_core_groups;
- u64 core_availability_mask;
-
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- core_availability_mask = kbase_pm_ca_get_core_mask(kbdev);
-
- /*
- * If no cores are currently available (core availability policy is
- * transitioning) then fail.
- */
- if (0 == core_availability_mask) {
- *affinity = 0;
- return false;
- }
-
- KBASE_DEBUG_ASSERT(js >= 0);
-
- if ((core_req & (BASE_JD_REQ_FS | BASE_JD_REQ_CS | BASE_JD_REQ_T)) ==
- BASE_JD_REQ_T) {
- /* If the hardware supports XAFFINITY then we'll only enable
- * the tiler (which is the default so this is a no-op),
- * otherwise enable shader core 0. */
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
- *affinity = 1;
- else
- *affinity = 0;
-
- return true;
- }
-
- if (1 == kbdev->gpu_props.num_cores) {
- /* trivial case only one core, nothing to do */
- *affinity = core_availability_mask &
- kbdev->pm.debug_core_mask[js];
- } else {
- if ((core_req & (BASE_JD_REQ_COHERENT_GROUP |
- BASE_JD_REQ_SPECIFIC_COHERENT_GROUP))) {
- if (js == 0 || num_core_groups == 1) {
- /* js[0] and single-core-group systems just get
- * the first core group */
- *affinity =
- kbdev->gpu_props.props.coherency_info.group[0].core_mask
- & core_availability_mask &
- kbdev->pm.debug_core_mask[js];
- } else {
- /* js[1], js[2] use core groups 0, 1 for
- * dual-core-group systems */
- u32 core_group_idx = ((u32) js) - 1;
-
- KBASE_DEBUG_ASSERT(core_group_idx <
- num_core_groups);
- *affinity =
- kbdev->gpu_props.props.coherency_info.group[core_group_idx].core_mask
- & core_availability_mask &
- kbdev->pm.debug_core_mask[js];
-
- /* If the job is specifically targeting core
- * group 1 and the core availability policy is
- * keeping that core group off, then fail */
- if (*affinity == 0 && core_group_idx == 1 &&
- kbdev->pm.backend.cg1_disabled
- == true)
- katom->event_code =
- BASE_JD_EVENT_PM_EVENT;
- }
- } else {
- /* All cores are available when no core split is
- * required */
- *affinity = core_availability_mask &
- kbdev->pm.debug_core_mask[js];
- }
- }
-
- /*
- * If no cores are currently available in the desired core group(s)
- * (core availability policy is transitioning) then fail.
- */
- if (*affinity == 0)
- return false;
-
- /* Enable core 0 if tiler required for hardware without XAFFINITY
- * support (notes above) */
- if (core_req & BASE_JD_REQ_T) {
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY))
- *affinity = *affinity | 1;
- }
-
- return true;
-}
-
-static inline bool kbase_js_affinity_is_violating(
- struct kbase_device *kbdev,
- u64 *affinities)
-{
- /* This implementation checks whether the two slots involved in Generic
- * thread creation have intersecting affinity. This is due to micro-
- * architectural issues where a job in slot A targetting cores used by
- * slot B could prevent the job in slot B from making progress until the
- * job in slot A has completed.
- */
- u64 affinity_set_left;
- u64 affinity_set_right;
- u64 intersection;
-
- KBASE_DEBUG_ASSERT(affinities != NULL);
-
- affinity_set_left = affinities[1];
-
- affinity_set_right = affinities[2];
-
- /* A violation occurs when any bit in the left_set is also in the
- * right_set */
- intersection = affinity_set_left & affinity_set_right;
-
- return (bool) (intersection != (u64) 0u);
-}
-
-bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
- u64 affinity)
-{
- struct kbasep_js_device_data *js_devdata;
- u64 new_affinities[BASE_JM_MAX_NR_SLOTS];
-
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
- js_devdata = &kbdev->js_data;
-
- memcpy(new_affinities, js_devdata->runpool_irq.slot_affinities,
- sizeof(js_devdata->runpool_irq.slot_affinities));
-
- new_affinities[js] |= affinity;
-
- return kbase_js_affinity_is_violating(kbdev, new_affinities);
-}
-
-void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
- u64 affinity)
-{
- struct kbasep_js_device_data *js_devdata;
- u64 cores;
-
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
- js_devdata = &kbdev->js_data;
-
- KBASE_DEBUG_ASSERT(kbase_js_affinity_would_violate(kbdev, js, affinity)
- == false);
-
- cores = affinity;
- while (cores) {
- int bitnum = fls64(cores) - 1;
- u64 bit = 1ULL << bitnum;
- s8 cnt;
-
- cnt =
- ++(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
-
- if (cnt == 1)
- js_devdata->runpool_irq.slot_affinities[js] |= bit;
-
- cores &= ~bit;
- }
-}
-
-void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
- u64 affinity)
-{
- struct kbasep_js_device_data *js_devdata;
- u64 cores;
-
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(js < BASE_JM_MAX_NR_SLOTS);
- js_devdata = &kbdev->js_data;
-
- cores = affinity;
- while (cores) {
- int bitnum = fls64(cores) - 1;
- u64 bit = 1ULL << bitnum;
- s8 cnt;
-
- KBASE_DEBUG_ASSERT(
- js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum] > 0);
-
- cnt =
- --(js_devdata->runpool_irq.slot_affinity_refcount[js][bitnum]);
-
- if (0 == cnt)
- js_devdata->runpool_irq.slot_affinities[js] &= ~bit;
-
- cores &= ~bit;
- }
-}
-
-#if KBASE_TRACE_ENABLE
-void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
-{
- struct kbasep_js_device_data *js_devdata;
- int slot_nr;
-
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- js_devdata = &kbdev->js_data;
-
- for (slot_nr = 0; slot_nr < 3; ++slot_nr)
- KBASE_TRACE_ADD_SLOT_INFO(kbdev, JS_AFFINITY_CURRENT, NULL,
- NULL, 0u, slot_nr,
- (u32) js_devdata->runpool_irq.slot_affinities[slot_nr]);
-}
-#endif /* KBASE_TRACE_ENABLE */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h
deleted file mode 100644
index dbabd94564c788..00000000000000
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_affinity.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-/*
- * Affinity Manager internal APIs.
- */
-
-#ifndef _KBASE_JS_AFFINITY_H_
-#define _KBASE_JS_AFFINITY_H_
-
-/**
- * kbase_js_can_run_job_on_slot_no_lock - Decide whether it is possible to
- * submit a job to a particular job slot in the current status
- *
- * @kbdev: The kbase device structure of the device
- * @js: Job slot number to check for allowance
- *
- * Will check if submitting to the given job slot is allowed in the current
- * status. For example using job slot 2 while in soft-stoppable state and only
- * having 1 coregroup is not allowed by the policy. This function should be
- * called prior to submitting a job to a slot to make sure policy rules are not
- * violated.
- *
- * The following locking conditions are made on the caller
- * - it must hold hwaccess_lock
- */
-bool kbase_js_can_run_job_on_slot_no_lock(struct kbase_device *kbdev, int js);
-
-/**
- * kbase_js_choose_affinity - Compute affinity for a given job.
- *
- * @affinity: Affinity bitmap computed
- * @kbdev: The kbase device structure of the device
- * @katom: Job chain of which affinity is going to be found
- * @js: Slot the job chain is being submitted
- *
- * Currently assumes an all-on/all-off power management policy.
- * Also assumes there is at least one core with tiler available.
- *
- * Returns true if a valid affinity was chosen, false if
- * no cores were available.
- */
-bool kbase_js_choose_affinity(u64 * const affinity,
- struct kbase_device *kbdev,
- struct kbase_jd_atom *katom,
- int js);
-
-/**
- * kbase_js_affinity_would_violate - Determine whether a proposed affinity on
- * job slot @js would cause a violation of affinity restrictions.
- *
- * @kbdev: Kbase device structure
- * @js: The job slot to test
- * @affinity: The affinity mask to test
- *
- * The following locks must be held by the caller
- * - hwaccess_lock
- *
- * Return: true if the affinity would violate the restrictions
- */
-bool kbase_js_affinity_would_violate(struct kbase_device *kbdev, int js,
- u64 affinity);
-
-/**
- * kbase_js_affinity_retain_slot_cores - Affinity tracking: retain cores used by
- * a slot
- *
- * @kbdev: Kbase device structure
- * @js: The job slot retaining the cores
- * @affinity: The cores to retain
- *
- * The following locks must be held by the caller
- * - hwaccess_lock
- */
-void kbase_js_affinity_retain_slot_cores(struct kbase_device *kbdev, int js,
- u64 affinity);
-
-/**
- * kbase_js_affinity_release_slot_cores - Affinity tracking: release cores used
- * by a slot
- *
- * @kbdev: Kbase device structure
- * @js: Job slot
- * @affinity: Bit mask of core to be released
- *
- * Cores must be released as soon as a job is dequeued from a slot's 'submit
- * slots', and before another job is submitted to those slots. Otherwise, the
- * refcount could exceed the maximum number submittable to a slot,
- * %BASE_JM_SUBMIT_SLOTS.
- *
- * The following locks must be held by the caller
- * - hwaccess_lock
- */
-void kbase_js_affinity_release_slot_cores(struct kbase_device *kbdev, int js,
- u64 affinity);
-
-/**
- * kbase_js_debug_log_current_affinities - log the current affinities
- *
- * @kbdev: Kbase device structure
- *
- * Output to the Trace log the current tracked affinities on all slots
- */
-#if KBASE_TRACE_ENABLE
-void kbase_js_debug_log_current_affinities(struct kbase_device *kbdev);
-#else /* KBASE_TRACE_ENABLE */
-static inline void
-kbase_js_debug_log_current_affinities(struct kbase_device *kbdev)
-{
-}
-#endif /* KBASE_TRACE_ENABLE */
-
-#endif /* _KBASE_JS_AFFINITY_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
index 2dc97859ed5f5f..df2dd5ec0526bf 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_js_backend.c
@@ -147,16 +147,17 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
/* Job is Soft-Stoppable */
if (ticks == soft_stop_ticks) {
- int disjoint_threshold =
- KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
- u32 softstop_flags = 0u;
/* Job has been scheduled for at least
* js_devdata->soft_stop_ticks ticks.
* Soft stop the slot so we can run
* other jobs.
*/
- dev_dbg(kbdev->dev, "Soft-stop");
#if !KBASE_DISABLE_SCHEDULING_SOFT_STOPS
+ int disjoint_threshold =
+ KBASE_DISJOINT_STATE_INTERLEAVED_CONTEXT_COUNT_THRESHOLD;
+ u32 softstop_flags = 0u;
+
+ dev_dbg(kbdev->dev, "Soft-stop");
/* nr_user_contexts_running is updated
* with the runpool_mutex, but we can't
* take that here.
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
index ad27202c8f0879..3e9af7770e1a67 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_mmu_hw_direct.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -66,15 +66,15 @@ static inline u64 lock_region(struct kbase_device *kbdev, u64 pfn,
}
static int wait_ready(struct kbase_device *kbdev,
- unsigned int as_nr, struct kbase_context *kctx)
+ unsigned int as_nr)
{
unsigned int max_loops = KBASE_AS_INACTIVE_MAX_LOOPS;
- u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+ u32 val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. Do not log remaining register accesses. */
while (--max_loops && (val & AS_STATUS_AS_ACTIVE))
- val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), NULL);
+ val = kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
if (max_loops == 0) {
dev_err(kbdev->dev, "AS_ACTIVE bit stuck\n");
@@ -83,27 +83,24 @@ static int wait_ready(struct kbase_device *kbdev,
/* If waiting in loop was performed, log last read value. */
if (KBASE_AS_INACTIVE_MAX_LOOPS - 1 > max_loops)
- kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS), kctx);
+ kbase_reg_read(kbdev, MMU_AS_REG(as_nr, AS_STATUS));
return 0;
}
-static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd,
- struct kbase_context *kctx)
+static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
{
int status;
/* write AS_COMMAND when MMU is ready to accept another command */
- status = wait_ready(kbdev, as_nr, kctx);
+ status = wait_ready(kbdev, as_nr);
if (status == 0)
- kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd,
- kctx);
+ kbase_reg_write(kbdev, MMU_AS_REG(as_nr, AS_COMMAND), cmd);
return status;
}
-static void validate_protected_page_fault(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+static void validate_protected_page_fault(struct kbase_device *kbdev)
{
/* GPUs which support (native) protected mode shall not report page
* fault addresses unless it has protected debug mode and protected
@@ -115,8 +112,7 @@ static void validate_protected_page_fault(struct kbase_device *kbdev,
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
protected_debug_mode = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS),
- kctx) & GPU_DBGEN;
+ GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
}
if (!protected_debug_mode) {
@@ -145,9 +141,9 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* remember current mask */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ new_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
/* mask interrupts for now */
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
while (bf_bits | pf_bits) {
@@ -170,25 +166,21 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
*/
kctx = kbasep_js_runpool_lookup_ctx(kbdev, as_no);
-
/* find faulting address */
as->fault_addr = kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
- AS_FAULTADDRESS_HI),
- kctx);
+ AS_FAULTADDRESS_HI));
as->fault_addr <<= 32;
as->fault_addr |= kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
- AS_FAULTADDRESS_LO),
- kctx);
+ AS_FAULTADDRESS_LO));
/* Mark the fault protected or not */
as->protected_mode = kbdev->protected_mode;
- if (kbdev->protected_mode && as->fault_addr)
- {
+ if (kbdev->protected_mode && as->fault_addr) {
/* check if address reporting is allowed */
- validate_protected_page_fault(kbdev, kctx);
+ validate_protected_page_fault(kbdev);
}
/* report the fault to debugfs */
@@ -197,8 +189,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* record the fault status */
as->fault_status = kbase_reg_read(kbdev,
MMU_AS_REG(as_no,
- AS_FAULTSTATUS),
- kctx);
+ AS_FAULTSTATUS));
/* find the fault type */
as->fault_type = (bf_bits & (1 << as_no)) ?
@@ -207,12 +198,10 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_AARCH64_MMU)) {
as->fault_extra_addr = kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_HI),
- kctx);
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_HI));
as->fault_extra_addr <<= 32;
as->fault_extra_addr |= kbase_reg_read(kbdev,
- MMU_AS_REG(as_no, AS_FAULTEXTRA_LO),
- kctx);
+ MMU_AS_REG(as_no, AS_FAULTEXTRA_LO));
}
if (kbase_as_has_bus_fault(as)) {
@@ -241,14 +230,13 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* reenable interrupts */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), NULL);
+ tmp = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK));
new_mask |= tmp;
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), new_mask);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
-void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx)
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
{
struct kbase_mmu_setup *current_setup = &as->current_setup;
u32 transcfg = 0;
@@ -271,35 +259,34 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as,
}
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_LO),
- transcfg, kctx);
+ transcfg);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSCFG_HI),
- (current_setup->transcfg >> 32) & 0xFFFFFFFFUL,
- kctx);
+ (current_setup->transcfg >> 32) & 0xFFFFFFFFUL);
} else {
if (kbdev->system_coherency == COHERENCY_ACE)
current_setup->transtab |= AS_TRANSTAB_LPAE_SHARE_OUTER;
}
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_LO),
- current_setup->transtab & 0xFFFFFFFFUL, kctx);
+ current_setup->transtab & 0xFFFFFFFFUL);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_TRANSTAB_HI),
- (current_setup->transtab >> 32) & 0xFFFFFFFFUL, kctx);
+ (current_setup->transtab >> 32) & 0xFFFFFFFFUL);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_LO),
- current_setup->memattr & 0xFFFFFFFFUL, kctx);
+ current_setup->memattr & 0xFFFFFFFFUL);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_MEMATTR_HI),
- (current_setup->memattr >> 32) & 0xFFFFFFFFUL, kctx);
+ (current_setup->memattr >> 32) & 0xFFFFFFFFUL);
KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(as,
current_setup->transtab,
current_setup->memattr,
transcfg);
- write_cmd(kbdev, as->number, AS_COMMAND_UPDATE, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
}
int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, u64 vpfn, u32 nr, u32 op,
+ u64 vpfn, u32 nr, u32 op,
unsigned int handling_irq)
{
int ret;
@@ -308,22 +295,22 @@ int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
if (op == AS_COMMAND_UNLOCK) {
/* Unlock doesn't require a lock first */
- ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
} else {
u64 lock_addr = lock_region(kbdev, vpfn, nr);
/* Lock the region that needs to be updated */
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_LO),
- lock_addr & 0xFFFFFFFFUL, kctx);
+ lock_addr & 0xFFFFFFFFUL);
kbase_reg_write(kbdev, MMU_AS_REG(as->number, AS_LOCKADDR_HI),
- (lock_addr >> 32) & 0xFFFFFFFFUL, kctx);
- write_cmd(kbdev, as->number, AS_COMMAND_LOCK, kctx);
+ (lock_addr >> 32) & 0xFFFFFFFFUL);
+ write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
/* Run the MMU operation */
- write_cmd(kbdev, as->number, op, kctx);
+ write_cmd(kbdev, as->number, op);
/* Wait for the flush to complete */
- ret = wait_ready(kbdev, as->number, kctx);
+ ret = wait_ready(kbdev, as->number);
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9630)) {
/* Issue an UNLOCK command to ensure that valid page
@@ -340,8 +327,8 @@ int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
commands in order to flush the MMU/uTLB,
see PRLAM-8812.
*/
- write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
- write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK, kctx);
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+ write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
}
}
@@ -349,7 +336,7 @@ int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
}
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+ enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 pf_bf_mask;
@@ -369,14 +356,14 @@ void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
pf_bf_mask |= MMU_BUS_ERROR(as->number);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask, kctx);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), pf_bf_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, enum kbase_mmu_fault_type type)
+ enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 irq_mask;
@@ -392,14 +379,14 @@ void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
if (kbdev->irq_reset_flush)
goto unlock;
- irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK), kctx) |
+ irq_mask = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_MASK)) |
MMU_PAGE_FAULT(as->number);
if (type == KBASE_MMU_FAULT_TYPE_BUS ||
type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
irq_mask |= MMU_BUS_ERROR(as->number);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask, kctx);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), irq_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
index 2ed7dfdde6cc2e..51a10a231df08a 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,9 +29,9 @@
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
-static u64 always_on_get_core_mask(struct kbase_device *kbdev)
+static bool always_on_shaders_needed(struct kbase_device *kbdev)
{
- return kbdev->gpu_props.props.raw_props.shader_present;
+ return true;
}
static bool always_on_get_core_active(struct kbase_device *kbdev)
@@ -59,7 +59,7 @@ const struct kbase_pm_policy kbase_pm_always_on_policy_ops = {
"always_on", /* name */
always_on_init, /* init */
always_on_term, /* term */
- always_on_get_core_mask, /* get_core_mask */
+ always_on_shaders_needed, /* shaders_needed */
always_on_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_ALWAYS_ON, /* id */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
index d61d0d0e3640f4..e7927cf82e5a71 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_always_on.h
@@ -1,7 +1,6 @@
-
/*
*
- * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,13 +36,13 @@
*
* - When KBase indicates that the GPU will be powered up, but we don't yet
* know which Job Chains are to be run:
- * All Shader Cores are powered up, regardless of whether or not they will
- * be needed later.
+ * Shader Cores are powered up, regardless of whether or not they will be
+ * needed later.
*
- * - When KBase indicates that a set of Shader Cores are needed to submit the
- * currently queued Job Chains:
- * All Shader Cores are kept powered, regardless of whether or not they will
- * be needed
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ * queued Job Chains:
+ * Shader Cores are kept powered, regardless of whether or not they will be
+ * needed
*
* - When KBase indicates that the GPU need not be powered:
* The Shader Cores are kept powered, regardless of whether or not they will
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
index 0d899ccef1b4e6..a448a3b680b8f0 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_backend.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -179,11 +179,7 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
kbase_pm_clock_on(kbdev, is_resume);
/* Update core status as required by the policy */
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START);
kbase_pm_update_cores_state(kbdev);
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END);
/* NOTE: We don't wait to reach the desired state, since running atoms
* will wait for that state to be reached anyway */
@@ -201,11 +197,7 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
#if !PLATFORM_POWER_DOWN_ONLY
/* Wait for power transitions to complete. We do this with no locks held
* so that we don't deadlock with any pending workqueues */
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START);
kbase_pm_check_transitions_sync(kbdev);
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END);
#endif /* !PLATFORM_POWER_DOWN_ONLY */
mutex_lock(&js_devdata->runpool_mutex);
@@ -233,10 +225,6 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
#endif /* !PLATFORM_POWER_DOWN_ONLY */
- /* Consume any change-state events */
- kbase_timeline_pm_check_handle_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
-
/* Disable interrupts and turn the clock off */
if (!kbase_pm_clock_off(kbdev, backend->poweroff_is_suspend)) {
/*
@@ -252,7 +240,10 @@ static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
/* Turn off clock now that fault have been handled. We
* dropped locks so poweron_required may have changed -
- * power back on if this is the case.*/
+ * power back on if this is the case (effectively only
+ * re-enabling of the interrupts would be done in this
+ * case, as the clocks to GPU were not withdrawn yet).
+ */
if (backend->poweron_required)
kbase_pm_clock_on(kbdev, false);
else
@@ -422,21 +413,12 @@ void kbase_pm_power_changed(struct kbase_device *kbdev)
bool cores_are_available;
unsigned long flags;
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END);
-
- if (cores_are_available) {
- /* Log timelining information that a change in state has
- * completed */
- kbase_timeline_pm_handle_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
+ if (cores_are_available)
kbase_backend_slot_update(kbdev);
- }
+
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
index 5b369fb1b51566..d4e8e42c6360f8 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,145 +28,65 @@
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
-static const struct kbase_pm_ca_policy *const policy_list[] = {
- &kbase_pm_ca_fixed_policy_ops,
-#ifdef CONFIG_MALI_DEVFREQ
- &kbase_pm_ca_devfreq_policy_ops,
-#endif
-#if !MALI_CUSTOMER_RELEASE
- &kbase_pm_ca_random_policy_ops
-#endif
-};
-
-/**
- * POLICY_COUNT - The number of policies available in the system.
- *
- * This is derived from the number of functions listed in policy_list.
- */
-#define POLICY_COUNT (sizeof(policy_list)/sizeof(*policy_list))
-
int kbase_pm_ca_init(struct kbase_device *kbdev)
{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- kbdev->pm.backend.ca_current_policy = policy_list[0];
-
- kbdev->pm.backend.ca_current_policy->init(kbdev);
+ struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+#ifdef CONFIG_MALI_DEVFREQ
+ if (kbdev->current_core_mask)
+ pm_backend->ca_cores_enabled = kbdev->current_core_mask;
+ else
+ pm_backend->ca_cores_enabled =
+ kbdev->gpu_props.props.raw_props.shader_present;
+#endif
+ pm_backend->ca_in_transition = false;
return 0;
}
void kbase_pm_ca_term(struct kbase_device *kbdev)
{
- kbdev->pm.backend.ca_current_policy->term(kbdev);
}
-int kbase_pm_ca_list_policies(const struct kbase_pm_ca_policy * const **list)
-{
- if (!list)
- return POLICY_COUNT;
-
- *list = policy_list;
-
- return POLICY_COUNT;
-}
-
-KBASE_EXPORT_TEST_API(kbase_pm_ca_list_policies);
-
-const struct kbase_pm_ca_policy
-*kbase_pm_ca_get_policy(struct kbase_device *kbdev)
-{
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- return kbdev->pm.backend.ca_current_policy;
-}
-
-KBASE_EXPORT_TEST_API(kbase_pm_ca_get_policy);
-
-void kbase_pm_ca_set_policy(struct kbase_device *kbdev,
- const struct kbase_pm_ca_policy *new_policy)
+#ifdef CONFIG_MALI_DEVFREQ
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
{
- const struct kbase_pm_ca_policy *old_policy;
+ struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
unsigned long flags;
- KBASE_DEBUG_ASSERT(kbdev != NULL);
- KBASE_DEBUG_ASSERT(new_policy != NULL);
-
- KBASE_TRACE_ADD(kbdev, PM_CA_SET_POLICY, NULL, NULL, 0u,
- new_policy->id);
-
- /* During a policy change we pretend the GPU is active */
- /* A suspend won't happen here, because we're in a syscall from a
- * userspace thread */
- kbase_pm_context_active(kbdev);
-
- mutex_lock(&kbdev->pm.lock);
-
- /* Remove the policy to prevent IRQ handlers from working on it */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- old_policy = kbdev->pm.backend.ca_current_policy;
- kbdev->pm.backend.ca_current_policy = NULL;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- if (old_policy->term)
- old_policy->term(kbdev);
- if (new_policy->init)
- new_policy->init(kbdev);
+ pm_backend->ca_cores_enabled = core_mask;
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbdev->pm.backend.ca_current_policy = new_policy;
-
- /* If any core power state changes were previously attempted, but
- * couldn't be made because the policy was changing (current_policy was
- * NULL), then re-try them here. */
kbase_pm_update_cores_state_nolock(kbdev);
- kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
- kbdev->shader_ready_bitmap,
- kbdev->shader_transitioning_bitmap);
-
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- mutex_unlock(&kbdev->pm.lock);
-
- /* Now the policy change is finished, we release our fake context active
- * reference */
- kbase_pm_context_idle(kbdev);
+ dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n",
+ pm_backend->ca_cores_enabled);
}
-
-KBASE_EXPORT_TEST_API(kbase_pm_ca_set_policy);
+#endif
u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
{
+ struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
+
lockdep_assert_held(&kbdev->hwaccess_lock);
/* All cores must be enabled when instrumentation is in use */
- if (kbdev->pm.backend.instr_enabled)
- return kbdev->gpu_props.props.raw_props.shader_present &
- kbdev->pm.debug_core_mask_all;
-
- if (kbdev->pm.backend.ca_current_policy == NULL)
+ if (pm_backend->instr_enabled)
return kbdev->gpu_props.props.raw_props.shader_present &
kbdev->pm.debug_core_mask_all;
- return kbdev->pm.backend.ca_current_policy->get_core_mask(kbdev) &
- kbdev->pm.debug_core_mask_all;
+#ifdef CONFIG_MALI_DEVFREQ
+ return pm_backend->ca_cores_enabled & kbdev->pm.debug_core_mask_all;
+#else
+ return kbdev->gpu_props.props.raw_props.shader_present &
+ kbdev->pm.debug_core_mask_all;
+#endif
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);
-void kbase_pm_ca_update_core_status(struct kbase_device *kbdev, u64 cores_ready,
- u64 cores_transitioning)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- if (kbdev->pm.backend.ca_current_policy != NULL)
- kbdev->pm.backend.ca_current_policy->update_core_status(kbdev,
- cores_ready,
- cores_transitioning);
-}
-
void kbase_pm_ca_instr_enable(struct kbase_device *kbdev)
{
unsigned long flags;
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c
deleted file mode 100644
index 4bb4c400efe794..00000000000000
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_devfreq.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-/*
- * A core availability policy implementing core mask selection from devfreq OPPs
- *
- */
-
-#include <mali_kbase.h>
-#include <mali_kbase_pm.h>
-#include <backend/gpu/mali_kbase_pm_internal.h>
-#include <linux/version.h>
-
-void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
-{
- struct kbasep_pm_ca_policy_devfreq *data =
- &kbdev->pm.backend.ca_policy_data.devfreq;
- unsigned long flags;
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
-
- data->cores_desired = core_mask;
-
- /* Disable any cores that are now unwanted */
- data->cores_enabled &= data->cores_desired;
-
- kbdev->pm.backend.ca_in_transition = true;
-
- /* If there are no cores to be powered off then power on desired cores
- */
- if (!(data->cores_used & ~data->cores_desired)) {
- data->cores_enabled = data->cores_desired;
- kbdev->pm.backend.ca_in_transition = false;
- }
-
- kbase_pm_update_cores_state_nolock(kbdev);
-
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX %llX\n",
- data->cores_desired, data->cores_enabled);
-}
-
-static void devfreq_init(struct kbase_device *kbdev)
-{
- struct kbasep_pm_ca_policy_devfreq *data =
- &kbdev->pm.backend.ca_policy_data.devfreq;
-
- if (kbdev->current_core_mask) {
- data->cores_enabled = kbdev->current_core_mask;
- data->cores_desired = kbdev->current_core_mask;
- } else {
- data->cores_enabled =
- kbdev->gpu_props.props.raw_props.shader_present;
- data->cores_desired =
- kbdev->gpu_props.props.raw_props.shader_present;
- }
- data->cores_used = 0;
- kbdev->pm.backend.ca_in_transition = false;
-}
-
-static void devfreq_term(struct kbase_device *kbdev)
-{
-}
-
-static u64 devfreq_get_core_mask(struct kbase_device *kbdev)
-{
- return kbdev->pm.backend.ca_policy_data.devfreq.cores_enabled;
-}
-
-static void devfreq_update_core_status(struct kbase_device *kbdev,
- u64 cores_ready,
- u64 cores_transitioning)
-{
- struct kbasep_pm_ca_policy_devfreq *data =
- &kbdev->pm.backend.ca_policy_data.devfreq;
-
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- data->cores_used = cores_ready | cores_transitioning;
-
- /* If in desired state then clear transition flag */
- if (data->cores_enabled == data->cores_desired)
- kbdev->pm.backend.ca_in_transition = false;
-
- /* If all undesired cores are now off then power on desired cores.
- * The direct comparison against cores_enabled limits potential
- * recursion to one level */
- if (!(data->cores_used & ~data->cores_desired) &&
- data->cores_enabled != data->cores_desired) {
- data->cores_enabled = data->cores_desired;
-
- kbase_pm_update_cores_state_nolock(kbdev);
-
- kbdev->pm.backend.ca_in_transition = false;
- }
-}
-
-/*
- * The struct kbase_pm_ca_policy structure for the devfreq core availability
- * policy.
- *
- * This is the static structure that defines the devfreq core availability power
- * policy's callback and name.
- */
-const struct kbase_pm_ca_policy kbase_pm_ca_devfreq_policy_ops = {
- "devfreq", /* name */
- devfreq_init, /* init */
- devfreq_term, /* term */
- devfreq_get_core_mask, /* get_core_mask */
- devfreq_update_core_status, /* update_core_status */
- 0u, /* flags */
- KBASE_PM_CA_POLICY_ID_DEVFREQ, /* id */
-};
-
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c
deleted file mode 100644
index 1eea7e877f61c9..00000000000000
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-/*
- * A power policy implementing fixed core availability
- */
-
-#include <mali_kbase.h>
-#include <mali_kbase_pm.h>
-
-static void fixed_init(struct kbase_device *kbdev)
-{
- kbdev->pm.backend.ca_in_transition = false;
-}
-
-static void fixed_term(struct kbase_device *kbdev)
-{
- CSTD_UNUSED(kbdev);
-}
-
-static u64 fixed_get_core_mask(struct kbase_device *kbdev)
-{
- return kbdev->gpu_props.props.raw_props.shader_present;
-}
-
-static void fixed_update_core_status(struct kbase_device *kbdev,
- u64 cores_ready,
- u64 cores_transitioning)
-{
- CSTD_UNUSED(kbdev);
- CSTD_UNUSED(cores_ready);
- CSTD_UNUSED(cores_transitioning);
-}
-
-/*
- * The struct kbase_pm_policy structure for the fixed power policy.
- *
- * This is the static structure that defines the fixed power policy's callback
- * and name.
- */
-const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops = {
- "fixed", /* name */
- fixed_init, /* init */
- fixed_term, /* term */
- fixed_get_core_mask, /* get_core_mask */
- fixed_update_core_status, /* update_core_status */
- 0u, /* flags */
- KBASE_PM_CA_POLICY_ID_FIXED, /* id */
-};
-
-KBASE_EXPORT_TEST_API(kbase_pm_ca_fixed_policy_ops);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h
deleted file mode 100644
index 68a2eac4a12177..00000000000000
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_ca_fixed.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2013-2015 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-/*
- * A power policy implementing fixed core availability
- */
-
-#ifndef MALI_KBASE_PM_CA_FIXED_H
-#define MALI_KBASE_PM_CA_FIXED_H
-
-/**
- * struct kbasep_pm_ca_policy_fixed - Private structure for policy instance data
- *
- * @dummy: Dummy member - no state is needed
- *
- * This contains data that is private to the particular power policy that is
- * active.
- */
-struct kbasep_pm_ca_policy_fixed {
- int dummy;
-};
-
-extern const struct kbase_pm_ca_policy kbase_pm_ca_fixed_policy_ops;
-
-#endif /* MALI_KBASE_PM_CA_FIXED_H */
-
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
index 602e175dbbb9c5..e90c44def25e6a 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,22 +29,14 @@
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
-static u64 coarse_demand_get_core_mask(struct kbase_device *kbdev)
+static bool coarse_demand_shaders_needed(struct kbase_device *kbdev)
{
- if (kbdev->pm.active_count == 0)
- return 0;
-
- return kbdev->gpu_props.props.raw_props.shader_present;
+ return kbase_pm_is_active(kbdev);
}
static bool coarse_demand_get_core_active(struct kbase_device *kbdev)
{
- if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
- kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt)
- return false;
-
- return true;
+ return kbase_pm_is_active(kbdev);
}
static void coarse_demand_init(struct kbase_device *kbdev)
@@ -66,7 +58,7 @@ const struct kbase_pm_policy kbase_pm_coarse_demand_policy_ops = {
"coarse_demand", /* name */
coarse_demand_init, /* init */
coarse_demand_term, /* term */
- coarse_demand_get_core_mask, /* get_core_mask */
+ coarse_demand_shaders_needed, /* shaders_needed */
coarse_demand_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_COARSE_DEMAND, /* id */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
index f2b49eb4bcacb2..304e5d7fa32da8 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_coarse_demand.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,11 +35,11 @@
* characteristics:
* - When KBase indicates that the GPU will be powered up, but we don't yet
* know which Job Chains are to be run:
- * - All Shader Cores are powered up, regardless of whether or not they will
- * be needed later.
- * - When KBase indicates that a set of Shader Cores are needed to submit the
- * currently queued Job Chains:
- * - All Shader Cores are kept powered, regardless of whether or not they will
+ * - Shader Cores are powered up, regardless of whether or not they will be
+ * needed later.
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ * queued Job Chains:
+ * - Shader Cores are kept powered, regardless of whether or not they will
* be needed
* - When KBase indicates that the GPU need not be powered:
* - The Shader Cores are powered off, and the GPU itself is powered off too.
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
index 6dddb07d980dee..7fe8eb3cc3a2c9 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,12 +27,6 @@
#ifndef _KBASE_PM_HWACCESS_DEFS_H_
#define _KBASE_PM_HWACCESS_DEFS_H_
-#include "mali_kbase_pm_ca_fixed.h"
-#include "mali_kbase_pm_ca_devfreq.h"
-#if !MALI_CUSTOMER_RELEASE
-#include "mali_kbase_pm_ca_random.h"
-#endif
-
#include "mali_kbase_pm_always_on.h"
#include "mali_kbase_pm_coarse_demand.h"
#include "mali_kbase_pm_demand.h"
@@ -71,58 +65,67 @@ enum kbase_pm_core_type {
};
/**
- * struct kbasep_pm_metrics_data - Metrics data collected for use by the power
- * management framework.
+ * struct kbasep_pm_metrics - Metrics data collected for use by the power
+ * management framework.
*
- * @time_period_start: time at which busy/idle measurements started
* @time_busy: number of ns the GPU was busy executing jobs since the
* @time_period_start timestamp.
* @time_idle: number of ns since time_period_start the GPU was not executing
* jobs since the @time_period_start timestamp.
- * @prev_busy: busy time in ns of previous time period.
- * Updated when metrics are reset.
- * @prev_idle: idle time in ns of previous time period
- * Updated when metrics are reset.
- * @gpu_active: true when the GPU is executing jobs. false when
- * not. Updated when the job scheduler informs us a job in submitted
- * or removed from a GPU slot.
* @busy_cl: number of ns the GPU was busy executing CL jobs. Note that
* if two CL jobs were active for 400ns, this value would be updated
* with 800.
* @busy_gl: number of ns the GPU was busy executing GL jobs. Note that
* if two GL jobs were active for 400ns, this value would be updated
* with 800.
+ */
+struct kbasep_pm_metrics {
+ u32 time_busy;
+ u32 time_idle;
+ u32 busy_cl[2];
+ u32 busy_gl;
+};
+
+/**
+ * struct kbasep_pm_metrics_state - State required to collect the metrics in
+ * struct kbasep_pm_metrics
+ * @time_period_start: time at which busy/idle measurements started
+ * @gpu_active: true when the GPU is executing jobs. false when
+ * not. Updated when the job scheduler informs us a job in submitted
+ * or removed from a GPU slot.
* @active_cl_ctx: number of CL jobs active on the GPU. Array is per-device.
* @active_gl_ctx: number of GL jobs active on the GPU. Array is per-slot. As
* GL jobs never run on slot 2 this slot is not recorded.
* @lock: spinlock protecting the kbasep_pm_metrics_data structure
+ * @platform_data: pointer to data controlled by platform specific code
+ * @kbdev: pointer to kbase device for which metrics are collected
+ * @values: The current values of the power management metrics. The
+ * kbase_pm_get_dvfs_metrics() function is used to compare these
+ * current values with the saved values from a previous invocation.
* @timer: timer to regularly make DVFS decisions based on the power
* management metrics.
* @timer_active: boolean indicating @timer is running
- * @platform_data: pointer to data controlled by platform specific code
- * @kbdev: pointer to kbase device for which metrics are collected
- *
+ * @dvfs_last: values of the PM metrics from the last DVFS tick
+ * @dvfs_diff: different between the current and previous PM metrics.
*/
-struct kbasep_pm_metrics_data {
+struct kbasep_pm_metrics_state {
ktime_t time_period_start;
- u32 time_busy;
- u32 time_idle;
- u32 prev_busy;
- u32 prev_idle;
bool gpu_active;
- u32 busy_cl[2];
- u32 busy_gl;
u32 active_cl_ctx[2];
u32 active_gl_ctx[2]; /* GL jobs can only run on 2 of the 3 job slots */
spinlock_t lock;
+ void *platform_data;
+ struct kbase_device *kbdev;
+
+ struct kbasep_pm_metrics values;
+
#ifdef CONFIG_MALI_MIDGARD_DVFS
struct hrtimer timer;
bool timer_active;
+ struct kbasep_pm_metrics dvfs_last;
+ struct kbasep_pm_metrics dvfs_diff;
#endif
-
- void *platform_data;
- struct kbase_device *kbdev;
};
union kbase_pm_policy_data {
@@ -135,25 +138,14 @@ union kbase_pm_policy_data {
#endif
};
-union kbase_pm_ca_policy_data {
- struct kbasep_pm_ca_policy_fixed fixed;
- struct kbasep_pm_ca_policy_devfreq devfreq;
-#if !MALI_CUSTOMER_RELEASE
- struct kbasep_pm_ca_policy_random random;
-#endif
-};
-
/**
* struct kbase_pm_backend_data - Data stored per device for power management.
*
* This structure contains data for the power management framework. There is one
* instance of this structure per device in the system.
*
- * @ca_current_policy: The policy that is currently actively controlling core
- * availability.
* @pm_current_policy: The policy that is currently actively controlling the
* power state.
- * @ca_policy_data: Private data for current CA policy
* @pm_policy_data: Private data for current PM policy
* @ca_in_transition: Flag indicating when core availability policy is
* transitioning cores. The core availability policy must
@@ -243,20 +235,17 @@ union kbase_pm_ca_policy_data {
* &struct kbase_pm_callback_conf
* @callback_power_runtime_idle: Optional callback when the GPU may be idle. See
* &struct kbase_pm_callback_conf
+ * @ca_cores_enabled: Cores that are currently available
*
* Note:
- * During an IRQ, @ca_current_policy or @pm_current_policy can be NULL when the
- * policy is being changed with kbase_pm_ca_set_policy() or
- * kbase_pm_set_policy(). The change is protected under
- * kbase_device.pm.power_change_lock. Direct access to this
- * from IRQ context must therefore check for NULL. If NULL, then
- * kbase_pm_ca_set_policy() or kbase_pm_set_policy() will re-issue the policy
- * functions that would have been done under IRQ.
+ * During an IRQ, @pm_current_policy can be NULL when the policy is being
+ * changed with kbase_pm_set_policy(). The change is protected under
+ * kbase_device.pm.power_change_lock. Direct access to this from IRQ context
+ * must therefore check for NULL. If NULL, then kbase_pm_set_policy() will
+ * re-issue the policy functions that would have been done under IRQ.
*/
struct kbase_pm_backend_data {
- const struct kbase_pm_ca_policy *ca_current_policy;
const struct kbase_pm_policy *pm_current_policy;
- union kbase_pm_ca_policy_data ca_policy_data;
union kbase_pm_policy_data pm_policy_data;
bool ca_in_transition;
bool reset_done;
@@ -291,7 +280,7 @@ struct kbase_pm_backend_data {
spinlock_t gpu_powered_lock;
- struct kbasep_pm_metrics_data metrics;
+ struct kbasep_pm_metrics_state metrics;
int gpu_poweroff_pending;
int shader_poweroff_pending_time;
@@ -322,6 +311,10 @@ struct kbase_pm_backend_data {
int (*callback_power_runtime_on)(struct kbase_device *kbdev);
void (*callback_power_runtime_off)(struct kbase_device *kbdev);
int (*callback_power_runtime_idle)(struct kbase_device *kbdev);
+
+#ifdef CONFIG_MALI_DEVFREQ
+ u64 ca_cores_enabled;
+#endif
};
@@ -347,7 +340,7 @@ typedef u32 kbase_pm_policy_flags;
* @name: The name of this policy
* @init: Function called when the policy is selected
* @term: Function called when the policy is unselected
- * @get_core_mask: Function called to get the current shader core mask
+ * @shaders_needed: Function called to find out if shader cores are needed
* @get_core_active: Function called to get the current overall GPU power
* state
* @flags: Field indicating flags for this policy
@@ -382,26 +375,28 @@ struct kbase_pm_policy {
void (*term)(struct kbase_device *kbdev);
/**
- * Function called to get the current shader core mask
+ * Function called to find out if shader cores are needed
+ *
+ * This needs to at least satisfy kbdev->shader_needed_cnt, and so must
+ * never return false when kbdev->shader_needed_cnt > 0.
*
- * The returned mask should meet or exceed (kbdev->shader_needed_bitmap
- * | kbdev->shader_inuse_bitmap).
+ * Note that kbdev->pm.active_count being 0 is not a good indicator
+ * that kbdev->shader_needed_cnt is also 0 - refer to the documentation
+ * on the active_count member in struct kbase_pm_device_data and
+ * kbase_pm_is_active().
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
*
- * Return: The mask of shader cores to be powered
+ * Return: true if shader cores are needed, false otherwise
*/
- u64 (*get_core_mask)(struct kbase_device *kbdev);
+ bool (*shaders_needed)(struct kbase_device *kbdev);
/**
* Function called to get the current overall GPU power state
*
- * This function should consider the state of kbdev->pm.active_count. If
- * this count is greater than 0 then there is at least one active
- * context on the device and the GPU should be powered. If it is equal
- * to 0 then there are no active contexts and the GPU could be powered
- * off if desired.
+ * This function must meet or exceed the requirements for power
+ * indicated by kbase_pm_is_active().
*
* @kbdev: The kbase device structure for the device (must be a
* valid pointer)
@@ -414,111 +409,4 @@ struct kbase_pm_policy {
enum kbase_pm_policy_id id;
};
-
-enum kbase_pm_ca_policy_id {
- KBASE_PM_CA_POLICY_ID_FIXED = 1,
- KBASE_PM_CA_POLICY_ID_DEVFREQ,
- KBASE_PM_CA_POLICY_ID_RANDOM
-};
-
-typedef u32 kbase_pm_ca_policy_flags;
-
-/**
- * Maximum length of a CA policy names
- */
-#define KBASE_PM_CA_MAX_POLICY_NAME_LEN 15
-
-/**
- * struct kbase_pm_ca_policy - Core availability policy structure.
- *
- * Each core availability policy exposes a (static) instance of this structure
- * which contains function pointers to the policy's methods.
- *
- * @name: The name of this policy
- * @init: Function called when the policy is selected
- * @term: Function called when the policy is unselected
- * @get_core_mask: Function called to get the current shader core
- * availability mask
- * @update_core_status: Function called to update the current core status
- * @flags: Field indicating flags for this policy
- * @id: Field indicating an ID for this policy. This is not
- * necessarily the same as its index in the list returned
- * by kbase_pm_list_policies().
- * It is used purely for debugging.
- */
-struct kbase_pm_ca_policy {
- char name[KBASE_PM_CA_MAX_POLICY_NAME_LEN + 1];
-
- /**
- * Function called when the policy is selected
- *
- * This should initialize the kbdev->pm.ca_policy_data structure. It
- * should not attempt to make any changes to hardware state.
- *
- * It is undefined what state the cores are in when the function is
- * called.
- *
- * @kbdev The kbase device structure for the device (must be a
- * valid pointer)
- */
- void (*init)(struct kbase_device *kbdev);
-
- /**
- * Function called when the policy is unselected.
- *
- * @kbdev The kbase device structure for the device (must be a
- * valid pointer)
- */
- void (*term)(struct kbase_device *kbdev);
-
- /**
- * Function called to get the current shader core availability mask
- *
- * When a change in core availability is occurring, the policy must set
- * kbdev->pm.ca_in_transition to true. This is to indicate that
- * reporting changes in power state cannot be optimized out, even if
- * kbdev->pm.desired_shader_state remains unchanged. This must be done
- * by any functions internal to the Core Availability Policy that change
- * the return value of kbase_pm_ca_policy::get_core_mask.
- *
- * @kbdev The kbase device structure for the device (must be a
- * valid pointer)
- *
- * Return: The current core availability mask
- */
- u64 (*get_core_mask)(struct kbase_device *kbdev);
-
- /**
- * Function called to update the current core status
- *
- * If none of the cores in core group 0 are ready or transitioning, then
- * the policy must ensure that the next call to get_core_mask does not
- * return 0 for all cores in core group 0. It is an error to disable
- * core group 0 through the core availability policy.
- *
- * When a change in core availability has finished, the policy must set
- * kbdev->pm.ca_in_transition to false. This is to indicate that
- * changes in power state can once again be optimized out when
- * kbdev->pm.desired_shader_state is unchanged.
- *
- * @kbdev: The kbase device structure for the device
- * (must be a valid pointer)
- * @cores_ready: The mask of cores currently powered and
- * ready to run jobs
- * @cores_transitioning: The mask of cores currently transitioning
- * power state
- */
- void (*update_core_status)(struct kbase_device *kbdev, u64 cores_ready,
- u64 cores_transitioning);
-
- kbase_pm_ca_policy_flags flags;
-
- /**
- * Field indicating an ID for this policy. This is not necessarily the
- * same as its index in the list returned by kbase_pm_list_policies().
- * It is used purely for debugging.
- */
- enum kbase_pm_ca_policy_id id;
-};
-
#endif /* _KBASE_PM_HWACCESS_DEFS_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
index e0edddc2504d4a..01727d698a4c24 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,24 +29,14 @@
#include <mali_kbase.h>
#include <mali_kbase_pm.h>
-static u64 demand_get_core_mask(struct kbase_device *kbdev)
+static bool demand_shaders_needed(struct kbase_device *kbdev)
{
- u64 desired = kbdev->shader_needed_bitmap | kbdev->shader_inuse_bitmap;
-
- if (0 == kbdev->pm.active_count)
- return 0;
-
- return desired;
+ return (kbdev->shader_needed_cnt > 0);
}
static bool demand_get_core_active(struct kbase_device *kbdev)
{
- if (0 == kbdev->pm.active_count && !(kbdev->shader_needed_bitmap |
- kbdev->shader_inuse_bitmap) && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt)
- return false;
-
- return true;
+ return kbase_pm_is_active(kbdev);
}
static void demand_init(struct kbase_device *kbdev)
@@ -69,7 +59,7 @@ const struct kbase_pm_policy kbase_pm_demand_policy_ops = {
"demand", /* name */
demand_init, /* init */
demand_term, /* term */
- demand_get_core_mask, /* get_core_mask */
+ demand_shaders_needed, /* shaders_needed */
demand_get_core_active, /* get_core_active */
0u, /* flags */
KBASE_PM_POLICY_ID_DEMAND, /* id */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h
index 5ee182463bd4af..4b05e6d8a63d59 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_demand.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,9 +37,9 @@
* know which Job Chains are to be run:
* - The Shader Cores are not powered up
*
- * - When KBase indicates that a set of Shader Cores are needed to submit the
- * currently queued Job Chains:
- * - Only those Shader Cores are powered up
+ * - When KBase indicates that Shader Cores are needed to submit the currently
+ * queued Job Chains:
+ * - Shader Cores are powered up
*
* - When KBase indicates that the GPU need not be powered:
* - The Shader Cores are powered off, and the GPU itself is powered off too.
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
index 44803abee57421..cdd5cf741c5679 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_driver.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -135,19 +135,16 @@ static void mali_cci_flush_l2(struct kbase_device *kbdev)
kbase_reg_write(kbdev,
GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAN_INV_CACHES,
- NULL);
+ GPU_COMMAND_CLEAN_INV_CACHES);
raw = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
- NULL);
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
/* Wait for cache flush to complete before continuing, exit on
* gpu resets or loop expiry. */
while (((raw & mask) == 0) && --loops) {
raw = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
- NULL);
+ GPU_CONTROL_REG(GPU_IRQ_RAWSTAT));
}
}
#endif
@@ -238,10 +235,10 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
}
if (lo != 0)
- kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg), lo);
if (hi != 0)
- kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi, NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(reg + 4), hi);
}
/**
@@ -269,24 +266,20 @@ static u64 kbase_pm_get_state(struct kbase_device *kbdev,
KBASE_DEBUG_ASSERT(reg);
- lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg), NULL);
- hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4), NULL);
+ lo = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg));
+ hi = kbase_reg_read(kbdev, GPU_CONTROL_REG(reg + 4));
return (((u64) hi) << 32) | ((u64) lo);
}
void kbasep_pm_init_core_use_bitmaps(struct kbase_device *kbdev)
{
- kbdev->shader_inuse_bitmap = 0;
- kbdev->shader_needed_bitmap = 0;
kbdev->shader_available_bitmap = 0;
kbdev->tiler_available_bitmap = 0;
kbdev->l2_users_count = 0;
kbdev->l2_available_bitmap = 0;
kbdev->tiler_needed_cnt = 0;
- kbdev->tiler_inuse_cnt = 0;
-
- memset(kbdev->shader_needed_cnt, 0, sizeof(kbdev->shader_needed_cnt));
+ kbdev->shader_needed_cnt = 0;
}
/**
@@ -438,19 +431,21 @@ static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
present = kbase_pm_get_present_cores(kbdev, type);
trans = kbase_pm_get_trans_cores(kbdev, type);
ready = kbase_pm_get_ready_cores(kbdev, type);
+
/* mask off ready from trans in case transitions finished between the
* register reads */
trans &= ~ready;
- if (trans) /* Do not progress if any cores are transitioning */
- return false;
-
powering_on_trans = trans & *powering_on;
- *powering_on = powering_on_trans;
if (available != NULL)
*available = (ready | powering_on_trans) & desired_state;
+ if (trans) /* Do not progress if any cores are transitioning */
+ return false;
+
+ *powering_on = powering_on_trans;
+
/* Update desired state to include the in-use cores. These have to be
* kept powered up because there are jobs running or about to run on
* these cores
@@ -632,15 +627,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
return false;
}
- /* Trace that a change-state is being requested, and that it took
- * (effectively) no time to start it. This is useful for counting how
- * many state changes occurred, in a way that's backwards-compatible
- * with processing the trace data */
- kbase_timeline_pm_send_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
- kbase_timeline_pm_handle_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE);
-
/* If any cores are already powered then, we must keep the caches on */
shader_transitioning_bitmap = kbase_pm_get_trans_cores(kbdev,
KBASE_PM_CORE_SHADER);
@@ -689,9 +675,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
&l2_available_bitmap,
&kbdev->pm.backend.powering_on_l2_state);
- if (kbdev->l2_available_bitmap != l2_available_bitmap)
- KBASE_TIMELINE_POWER_L2(kbdev, l2_available_bitmap);
-
kbdev->l2_available_bitmap = l2_available_bitmap;
@@ -713,27 +696,20 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
in_desired_state &= kbase_pm_transition_core_type(kbdev,
KBASE_PM_CORE_SHADER,
kbdev->pm.backend.desired_shader_state,
- kbdev->shader_inuse_bitmap,
- &shader_available_bitmap,
+ 0, &shader_available_bitmap,
&kbdev->pm.backend.powering_on_shader_state);
- if (kbdev->shader_available_bitmap != shader_available_bitmap) {
+ if (kbdev->shader_available_bitmap != shader_available_bitmap)
KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
NULL, 0u,
(u32) shader_available_bitmap);
- KBASE_TIMELINE_POWER_SHADER(kbdev,
- shader_available_bitmap);
- }
kbdev->shader_available_bitmap = shader_available_bitmap;
- if (kbdev->tiler_available_bitmap != tiler_available_bitmap) {
+ if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE_TILER,
NULL, NULL, 0u,
(u32) tiler_available_bitmap);
- KBASE_TIMELINE_POWER_TILER(kbdev,
- tiler_available_bitmap);
- }
kbdev->tiler_available_bitmap = tiler_available_bitmap;
@@ -742,10 +718,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
kbdev->gpu_props.props.raw_props.tiler_present) {
tiler_available_bitmap = 0;
- if (kbdev->tiler_available_bitmap != tiler_available_bitmap)
- KBASE_TIMELINE_POWER_TILER(kbdev,
- tiler_available_bitmap);
-
kbdev->tiler_available_bitmap = tiler_available_bitmap;
}
@@ -774,13 +746,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
KBASE_TRACE_ADD(kbdev, PM_CORES_AVAILABLE_TILER, NULL, NULL, 0u,
(u32)(kbdev->tiler_available_bitmap &
kbdev->pm.backend.desired_tiler_state));
-
- /* Log timelining information about handling events that power
- * up cores, to match up either with immediate submission either
- * because cores already available, or from PM IRQ */
- if (!in_desired_state)
- kbase_timeline_pm_send_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
}
if (in_desired_state) {
@@ -830,9 +795,6 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
KBASE_TRACE_ADD(kbdev, PM_DESIRED_REACHED_TILER, NULL, NULL, 0u,
(u32)kbdev->pm.backend.desired_tiler_state);
- /* Log timelining information for synchronous waiters */
- kbase_timeline_pm_send_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
/* Wake slow-path waiters. Job scheduler does not use this. */
KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
@@ -841,19 +803,8 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
spin_unlock(&kbdev->pm.backend.gpu_powered_lock);
- /* kbase_pm_ca_update_core_status can cause one-level recursion into
- * this function, so it must only be called once all changes to kbdev
- * have been committed, and after the gpu_powered_lock has been
- * dropped. */
- if (kbdev->shader_ready_bitmap != shader_ready_bitmap ||
- kbdev->shader_transitioning_bitmap != shader_transitioning_bitmap) {
- kbdev->shader_ready_bitmap = shader_ready_bitmap;
- kbdev->shader_transitioning_bitmap =
- shader_transitioning_bitmap;
-
- kbase_pm_ca_update_core_status(kbdev, shader_ready_bitmap,
- shader_transitioning_bitmap);
- }
+ kbdev->shader_ready_bitmap = shader_ready_bitmap;
+ kbdev->shader_transitioning_bitmap = shader_transitioning_bitmap;
/* The core availability policy is not allowed to keep core group 0
* turned off (unless it was changing the l2 power state) */
@@ -916,46 +867,40 @@ void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
dev_err(kbdev->dev, "Current state :\n");
dev_err(kbdev->dev, "\tShader=%08x%08x\n",
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_READY_HI), NULL),
+ GPU_CONTROL_REG(SHADER_READY_HI)),
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_READY_LO),
- NULL));
+ GPU_CONTROL_REG(SHADER_READY_LO)));
dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_READY_HI), NULL),
+ GPU_CONTROL_REG(TILER_READY_HI)),
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_READY_LO), NULL));
+ GPU_CONTROL_REG(TILER_READY_LO)));
dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_READY_HI), NULL),
+ GPU_CONTROL_REG(L2_READY_HI)),
kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_READY_LO), NULL));
+ GPU_CONTROL_REG(L2_READY_LO)));
dev_err(kbdev->dev, "Cores transitioning :\n");
dev_err(kbdev->dev, "\tShader=%08x%08x\n",
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- SHADER_PWRTRANS_HI), NULL),
+ SHADER_PWRTRANS_HI)),
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- SHADER_PWRTRANS_LO), NULL));
+ SHADER_PWRTRANS_LO)));
dev_err(kbdev->dev, "\tTiler =%08x%08x\n",
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- TILER_PWRTRANS_HI), NULL),
+ TILER_PWRTRANS_HI)),
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- TILER_PWRTRANS_LO), NULL));
+ TILER_PWRTRANS_LO)));
dev_err(kbdev->dev, "\tL2 =%08x%08x\n",
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- L2_PWRTRANS_HI), NULL),
+ L2_PWRTRANS_HI)),
kbase_reg_read(kbdev, GPU_CONTROL_REG(
- L2_PWRTRANS_LO), NULL));
+ L2_PWRTRANS_LO)));
#if KBASE_GPU_RESET_EN
dev_err(kbdev->dev, "Sending reset to GPU - all running jobs will be lost\n");
if (kbase_prepare_to_reset_gpu(kbdev))
kbase_reset_gpu(kbdev);
#endif /* KBASE_GPU_RESET_EN */
- } else {
- /* Log timelining information that a change in state has
- * completed */
- kbase_timeline_pm_handle_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_check_transitions_sync);
@@ -970,18 +915,15 @@ void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
* and unmask them all.
*/
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
- NULL);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
- NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
- NULL);
- kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0xFFFFFFFF);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0xFFFFFFFF);
}
KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
@@ -995,15 +937,13 @@ void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
*/
lockdep_assert_held(&kbdev->hwaccess_lock);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
- NULL);
- kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
- kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
- NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0);
+ kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0, NULL);
- kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_MASK), 0);
+ kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF);
}
void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
@@ -1027,11 +967,10 @@ KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
{
bool reset_required = is_resume;
- struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
unsigned long flags;
KBASE_DEBUG_ASSERT(NULL != kbdev);
- lockdep_assert_held(&js_devdata->runpool_mutex);
+ lockdep_assert_held(&kbdev->js_data.runpool_mutex);
lockdep_assert_held(&kbdev->pm.lock);
if (kbdev->pm.backend.gpu_powered) {
@@ -1219,10 +1158,10 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
if (!kbdev->hw_quirks_sc)
kbdev->hw_quirks_sc = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(SHADER_CONFIG), NULL);
+ GPU_CONTROL_REG(SHADER_CONFIG));
kbdev->hw_quirks_tiler = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TILER_CONFIG), NULL);
+ GPU_CONTROL_REG(TILER_CONFIG));
/* Set tiler clock gate override if required */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
@@ -1230,7 +1169,7 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
/* Limit the GPU bus bandwidth if the platform needs this. */
kbdev->hw_quirks_mmu = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(L2_MMU_CONFIG), NULL);
+ GPU_CONTROL_REG(L2_MMU_CONFIG));
/* Limit read & write ID width for AXI */
@@ -1297,7 +1236,7 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
u32 coherency_features;
coherency_features = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(COHERENCY_FEATURES), NULL);
+ GPU_CONTROL_REG(COHERENCY_FEATURES));
/* (COHERENCY_ACE_LITE | COHERENCY_ACE) was incorrectly
* documented for tMIx so force correct value here.
@@ -1315,7 +1254,7 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
if (!kbdev->hw_quirks_jm)
kbdev->hw_quirks_jm = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(JM_CONFIG), NULL);
+ GPU_CONTROL_REG(JM_CONFIG));
#ifdef CONFIG_MALI_CORESTACK
#define MANUAL_POWER_CONTROL ((u32)(1 << 8))
@@ -1326,16 +1265,16 @@ static void kbase_pm_hw_issues_detect(struct kbase_device *kbdev)
static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
{
kbase_reg_write(kbdev, GPU_CONTROL_REG(SHADER_CONFIG),
- kbdev->hw_quirks_sc, NULL);
+ kbdev->hw_quirks_sc);
kbase_reg_write(kbdev, GPU_CONTROL_REG(TILER_CONFIG),
- kbdev->hw_quirks_tiler, NULL);
+ kbdev->hw_quirks_tiler);
kbase_reg_write(kbdev, GPU_CONTROL_REG(L2_MMU_CONFIG),
- kbdev->hw_quirks_mmu, NULL);
+ kbdev->hw_quirks_mmu);
kbase_reg_write(kbdev, GPU_CONTROL_REG(JM_CONFIG),
- kbdev->hw_quirks_jm, NULL);
+ kbdev->hw_quirks_jm);
}
@@ -1375,11 +1314,10 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
KBASE_TLSTREAM_JD_GPU_SOFT_RESET(kbdev);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_SOFT_RESET, NULL);
+ GPU_COMMAND_SOFT_RESET);
/* Unmask the reset complete interrupt only */
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED,
- NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), RESET_COMPLETED);
/* Initialize a structure for tracking the status of the reset */
rtdata.kbdev = kbdev;
@@ -1404,7 +1342,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
/* No interrupt has been received - check if the RAWSTAT register says
* the reset has completed */
- if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT), NULL) &
+ if (kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT)) &
RESET_COMPLETED) {
/* The interrupt is set in the RAWSTAT; this suggests that the
* interrupts are not getting to the CPU */
@@ -1420,7 +1358,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
RESET_TIMEOUT);
KBASE_TRACE_ADD(kbdev, CORE_GPU_HARD_RESET, NULL, NULL, 0u, 0);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_HARD_RESET, NULL);
+ GPU_COMMAND_HARD_RESET);
/* Restart the timer to wait for the hard reset to complete */
rtdata.timed_out = 0;
@@ -1451,7 +1389,7 @@ static int kbasep_protected_mode_enable(struct protected_mode_device *pdev)
struct kbase_device *kbdev = pdev->data;
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_SET_PROTECTED_MODE, NULL);
+ GPU_COMMAND_SET_PROTECTED_MODE);
return 0;
}
@@ -1522,7 +1460,6 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
if (kbdev->protected_mode)
resume_vinstr = true;
kbdev->protected_mode = false;
- kbase_ipa_model_use_configured_locked(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
@@ -1538,7 +1475,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
/* Sanity check protected mode was left after reset */
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_MODE)) {
u32 gpu_status = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS), NULL);
+ GPU_CONTROL_REG(GPU_STATUS));
WARN_ON(gpu_status & GPU_STATUS_PROTECTED_MODE_ACTIVE);
}
@@ -1557,7 +1494,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
irq_flags);
if (kbdev->pm.backend.gpu_cycle_counter_requests)
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CYCLE_COUNT_START, NULL);
+ GPU_COMMAND_CYCLE_COUNT_START);
spin_unlock_irqrestore(
&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
irq_flags);
@@ -1608,7 +1545,7 @@ kbase_pm_request_gpu_cycle_counter_do_request(struct kbase_device *kbdev)
if (1 == kbdev->pm.backend.gpu_cycle_counter_requests)
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CYCLE_COUNT_START, NULL);
+ GPU_COMMAND_CYCLE_COUNT_START);
spin_unlock_irqrestore(
&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
@@ -1664,7 +1601,7 @@ void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
if (0 == kbdev->pm.backend.gpu_cycle_counter_requests)
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CYCLE_COUNT_STOP, NULL);
+ GPU_COMMAND_CYCLE_COUNT_STOP);
spin_unlock_irqrestore(
&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
index 831971b61527ac..0d3599ae5da812 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_internal.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -507,9 +507,9 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume);
void kbase_pm_do_poweroff(struct kbase_device *kbdev, bool is_suspend);
#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
-void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
- unsigned long *total, unsigned long *busy);
-void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev);
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+ struct kbasep_pm_metrics *last,
+ struct kbasep_pm_metrics *diff);
#endif /* defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS) */
#ifdef CONFIG_MALI_MIDGARD_DVFS
@@ -565,4 +565,16 @@ void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev);
*/
void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev);
+#ifdef CONFIG_MALI_DEVFREQ
+/**
+ * kbase_devfreq_set_core_mask - Set devfreq core mask
+ * @kbdev: Device pointer
+ * @core_mask: New core mask
+ *
+ * This function is used by devfreq to change the available core mask as
+ * required by Dynamic Core Scaling.
+ */
+void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask);
+#endif
+
#endif /* _KBASE_BACKEND_PM_INTERNAL_H_ */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
index a8020b668e0c81..6b9b6862cc9b6d 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_metrics.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,6 +30,7 @@
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_jm_rb.h>
+#include <backend/gpu/mali_kbase_pm_defs.h>
/* When VSync is being hit aim for utilisation between 70-90% */
#define KBASE_PM_VSYNC_MIN_UTILISATION 70
@@ -43,19 +44,15 @@
* under 11s. Exceeding this will cause overflow */
#define KBASE_PM_TIME_SHIFT 8
-/* Maximum time between sampling of utilization data, without resetting the
- * counters. */
-#define MALI_UTILIZATION_MAX_PERIOD 100000 /* ns = 100ms */
-
#ifdef CONFIG_MALI_MIDGARD_DVFS
static enum hrtimer_restart dvfs_callback(struct hrtimer *timer)
{
unsigned long flags;
- struct kbasep_pm_metrics_data *metrics;
+ struct kbasep_pm_metrics_state *metrics;
KBASE_DEBUG_ASSERT(timer != NULL);
- metrics = container_of(timer, struct kbasep_pm_metrics_data, timer);
+ metrics = container_of(timer, struct kbasep_pm_metrics_state, timer);
kbase_pm_get_dvfs_action(metrics->kbdev);
spin_lock_irqsave(&metrics->lock, flags);
@@ -78,18 +75,17 @@ int kbasep_pm_metrics_init(struct kbase_device *kbdev)
kbdev->pm.backend.metrics.kbdev = kbdev;
kbdev->pm.backend.metrics.time_period_start = ktime_get();
- kbdev->pm.backend.metrics.time_busy = 0;
- kbdev->pm.backend.metrics.time_idle = 0;
- kbdev->pm.backend.metrics.prev_busy = 0;
- kbdev->pm.backend.metrics.prev_idle = 0;
kbdev->pm.backend.metrics.gpu_active = false;
kbdev->pm.backend.metrics.active_cl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_cl_ctx[1] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[0] = 0;
kbdev->pm.backend.metrics.active_gl_ctx[1] = 0;
- kbdev->pm.backend.metrics.busy_cl[0] = 0;
- kbdev->pm.backend.metrics.busy_cl[1] = 0;
- kbdev->pm.backend.metrics.busy_gl = 0;
+
+ kbdev->pm.backend.metrics.values.time_busy = 0;
+ kbdev->pm.backend.metrics.values.time_idle = 0;
+ kbdev->pm.backend.metrics.values.busy_cl[0] = 0;
+ kbdev->pm.backend.metrics.values.busy_cl[1] = 0;
+ kbdev->pm.backend.metrics.values.busy_gl = 0;
spin_lock_init(&kbdev->pm.backend.metrics.lock);
@@ -143,17 +139,17 @@ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
if (kbdev->pm.backend.metrics.gpu_active) {
u32 ns_time = (u32) (ktime_to_ns(diff) >> KBASE_PM_TIME_SHIFT);
- kbdev->pm.backend.metrics.time_busy += ns_time;
+ kbdev->pm.backend.metrics.values.time_busy += ns_time;
if (kbdev->pm.backend.metrics.active_cl_ctx[0])
- kbdev->pm.backend.metrics.busy_cl[0] += ns_time;
+ kbdev->pm.backend.metrics.values.busy_cl[0] += ns_time;
if (kbdev->pm.backend.metrics.active_cl_ctx[1])
- kbdev->pm.backend.metrics.busy_cl[1] += ns_time;
+ kbdev->pm.backend.metrics.values.busy_cl[1] += ns_time;
if (kbdev->pm.backend.metrics.active_gl_ctx[0])
- kbdev->pm.backend.metrics.busy_gl += ns_time;
+ kbdev->pm.backend.metrics.values.busy_gl += ns_time;
if (kbdev->pm.backend.metrics.active_gl_ctx[1])
- kbdev->pm.backend.metrics.busy_gl += ns_time;
+ kbdev->pm.backend.metrics.values.busy_gl += ns_time;
} else {
- kbdev->pm.backend.metrics.time_idle += (u32) (ktime_to_ns(diff)
+ kbdev->pm.backend.metrics.values.time_idle += (u32) (ktime_to_ns(diff)
>> KBASE_PM_TIME_SHIFT);
}
@@ -161,160 +157,53 @@ static void kbase_pm_get_dvfs_utilisation_calc(struct kbase_device *kbdev,
}
#if defined(CONFIG_MALI_DEVFREQ) || defined(CONFIG_MALI_MIDGARD_DVFS)
-/* Caller needs to hold kbdev->pm.backend.metrics.lock before calling this
- * function.
- */
-static void kbase_pm_reset_dvfs_utilisation_unlocked(struct kbase_device *kbdev,
- ktime_t now)
-{
- /* Store previous value */
- kbdev->pm.backend.metrics.prev_idle =
- kbdev->pm.backend.metrics.time_idle;
- kbdev->pm.backend.metrics.prev_busy =
- kbdev->pm.backend.metrics.time_busy;
-
- /* Reset current values */
- kbdev->pm.backend.metrics.time_period_start = now;
- kbdev->pm.backend.metrics.time_idle = 0;
- kbdev->pm.backend.metrics.time_busy = 0;
- kbdev->pm.backend.metrics.busy_cl[0] = 0;
- kbdev->pm.backend.metrics.busy_cl[1] = 0;
- kbdev->pm.backend.metrics.busy_gl = 0;
-}
-
-void kbase_pm_reset_dvfs_utilisation(struct kbase_device *kbdev)
+void kbase_pm_get_dvfs_metrics(struct kbase_device *kbdev,
+ struct kbasep_pm_metrics *last,
+ struct kbasep_pm_metrics *diff)
{
+ struct kbasep_pm_metrics *cur = &kbdev->pm.backend.metrics.values;
unsigned long flags;
spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
- kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, ktime_get());
- spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
-}
+ kbase_pm_get_dvfs_utilisation_calc(kbdev, ktime_get());
-void kbase_pm_get_dvfs_utilisation(struct kbase_device *kbdev,
- unsigned long *total_out, unsigned long *busy_out)
-{
- ktime_t now = ktime_get();
- unsigned long flags, busy, total;
+ memset(diff, 0, sizeof(*diff));
+ diff->time_busy = cur->time_busy - last->time_busy;
+ diff->time_idle = cur->time_idle - last->time_idle;
+ diff->busy_cl[0] = cur->busy_cl[0] - last->busy_cl[0];
+ diff->busy_cl[1] = cur->busy_cl[1] - last->busy_cl[1];
+ diff->busy_gl = cur->busy_gl - last->busy_gl;
- spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
- kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
-
- busy = kbdev->pm.backend.metrics.time_busy;
- total = busy + kbdev->pm.backend.metrics.time_idle;
-
- /* Reset stats if older than MALI_UTILIZATION_MAX_PERIOD (default
- * 100ms) */
- if (total >= MALI_UTILIZATION_MAX_PERIOD) {
- kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
- } else if (total < (MALI_UTILIZATION_MAX_PERIOD / 2)) {
- total += kbdev->pm.backend.metrics.prev_idle +
- kbdev->pm.backend.metrics.prev_busy;
- busy += kbdev->pm.backend.metrics.prev_busy;
- }
+ *last = *cur;
- *total_out = total;
- *busy_out = busy;
spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
}
+KBASE_EXPORT_TEST_API(kbase_pm_get_dvfs_metrics);
#endif
#ifdef CONFIG_MALI_MIDGARD_DVFS
-
-/* caller needs to hold kbdev->pm.backend.metrics.lock before calling this
- * function
- */
-int kbase_pm_get_dvfs_utilisation_old(struct kbase_device *kbdev,
- int *util_gl_share,
- int util_cl_share[2],
- ktime_t now)
-{
- int utilisation;
- int busy;
-
- kbase_pm_get_dvfs_utilisation_calc(kbdev, now);
-
- if (kbdev->pm.backend.metrics.time_idle +
- kbdev->pm.backend.metrics.time_busy == 0) {
- /* No data - so we return NOP */
- utilisation = -1;
- if (util_gl_share)
- *util_gl_share = -1;
- if (util_cl_share) {
- util_cl_share[0] = -1;
- util_cl_share[1] = -1;
- }
- goto out;
- }
-
- utilisation = (100 * kbdev->pm.backend.metrics.time_busy) /
- (kbdev->pm.backend.metrics.time_idle +
- kbdev->pm.backend.metrics.time_busy);
-
- busy = kbdev->pm.backend.metrics.busy_gl +
- kbdev->pm.backend.metrics.busy_cl[0] +
- kbdev->pm.backend.metrics.busy_cl[1];
-
- if (busy != 0) {
- if (util_gl_share)
- *util_gl_share =
- (100 * kbdev->pm.backend.metrics.busy_gl) /
- busy;
- if (util_cl_share) {
- util_cl_share[0] =
- (100 * kbdev->pm.backend.metrics.busy_cl[0]) /
- busy;
- util_cl_share[1] =
- (100 * kbdev->pm.backend.metrics.busy_cl[1]) /
- busy;
- }
- } else {
- if (util_gl_share)
- *util_gl_share = -1;
- if (util_cl_share) {
- util_cl_share[0] = -1;
- util_cl_share[1] = -1;
- }
- }
-
-out:
- return utilisation;
-}
-
void kbase_pm_get_dvfs_action(struct kbase_device *kbdev)
{
- unsigned long flags;
int utilisation, util_gl_share;
int util_cl_share[2];
- ktime_t now;
+ int busy;
+ struct kbasep_pm_metrics *diff;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- spin_lock_irqsave(&kbdev->pm.backend.metrics.lock, flags);
-
- now = ktime_get();
+ diff = &kbdev->pm.backend.metrics.dvfs_diff;
- utilisation = kbase_pm_get_dvfs_utilisation_old(kbdev, &util_gl_share,
- util_cl_share, now);
+ kbase_pm_get_dvfs_metrics(kbdev, &kbdev->pm.backend.metrics.dvfs_last, diff);
- if (utilisation < 0 || util_gl_share < 0 || util_cl_share[0] < 0 ||
- util_cl_share[1] < 0) {
- utilisation = 0;
- util_gl_share = 0;
- util_cl_share[0] = 0;
- util_cl_share[1] = 0;
- goto out;
- }
+ utilisation = (100 * diff->time_busy) /
+ max(diff->time_busy + diff->time_idle, 1u);
-out:
-#ifdef CONFIG_MALI_MIDGARD_DVFS
- kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share,
- util_cl_share);
-#endif /*CONFIG_MALI_MIDGARD_DVFS */
+ busy = max(diff->busy_gl + diff->busy_cl[0] + diff->busy_cl[1], 1u);
+ util_gl_share = (100 * diff->busy_gl) / busy;
+ util_cl_share[0] = (100 * diff->busy_cl[0]) / busy;
+ util_cl_share[1] = (100 * diff->busy_cl[1]) / busy;
- kbase_pm_reset_dvfs_utilisation_unlocked(kbdev, now);
-
- spin_unlock_irqrestore(&kbdev->pm.backend.metrics.lock, flags);
+ kbase_platform_dvfs_event(kbdev, utilisation, util_gl_share, util_cl_share);
}
bool kbase_pm_metrics_is_active(struct kbase_device *kbdev)
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
index f1e73a1c47b392..6dd00a92864ca7 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -89,72 +89,6 @@ enum {
};
typedef u32 kbase_pm_change_state;
-
-#ifdef CONFIG_MALI_TRACE_TIMELINE
-/* Timeline Trace code lookups for each function */
-static u32 kbase_pm_change_state_trace_code[KBASE_PM_FUNC_ID_COUNT]
- [KBASE_PM_CHANGE_STATE_COUNT] = {
- /* kbase_pm_request_cores */
- [KBASE_PM_FUNC_ID_REQUEST_CORES_START][0] = 0,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
- KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START,
-
- [KBASE_PM_FUNC_ID_REQUEST_CORES_END][0] = 0,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END,
- [KBASE_PM_FUNC_ID_REQUEST_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
- KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END,
-
- /* kbase_pm_release_cores */
- [KBASE_PM_FUNC_ID_RELEASE_CORES_START][0] = 0,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_START][KBASE_PM_CHANGE_STATE_SHADER |
- KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START,
-
- [KBASE_PM_FUNC_ID_RELEASE_CORES_END][0] = 0,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END,
- [KBASE_PM_FUNC_ID_RELEASE_CORES_END][KBASE_PM_CHANGE_STATE_SHADER |
- KBASE_PM_CHANGE_STATE_TILER] =
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END
-};
-
-static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
- enum kbase_pm_func_id func_id,
- kbase_pm_change_state state)
-{
- int trace_code;
-
- KBASE_DEBUG_ASSERT(func_id >= 0 && func_id < KBASE_PM_FUNC_ID_COUNT);
- KBASE_DEBUG_ASSERT(state != 0 && (state & KBASE_PM_CHANGE_STATE_MASK) ==
- state);
-
- trace_code = kbase_pm_change_state_trace_code[func_id][state];
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code);
-}
-
-#else /* CONFIG_MALI_TRACE_TIMELINE */
-static inline void kbase_timeline_pm_cores_func(struct kbase_device *kbdev,
- enum kbase_pm_func_id func_id, kbase_pm_change_state state)
-{
-}
-
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
-
/**
* kbasep_pm_do_poweroff_cores - Process a poweroff request and power down any
* requested shader cores
@@ -181,11 +115,7 @@ static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
kbdev->pm.backend.ca_in_transition) {
bool cores_are_available;
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START);
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
- KBASE_TIMELINE_PM_CHECKTRANS(kbdev,
- SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END);
/* Don't need 'cores_are_available',
* because we don't return anything */
@@ -356,6 +286,9 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
active = backend->pm_current_policy->get_core_active(kbdev);
+ WARN((kbase_pm_is_active(kbdev) && !active),
+ "GPU is active but policy '%s' is indicating that it can be powered off",
+ kbdev->pm.backend.pm_current_policy->name);
if (active) {
if (backend->gpu_poweroff_pending) {
@@ -383,6 +316,7 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
/* Power on the GPU and any cores requested by the policy */
if (pm->backend.poweroff_wait_in_progress) {
+ KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
pm->backend.poweron_required = true;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
} else {
@@ -435,6 +369,42 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
}
}
+/**
+ * get_desired_shader_bitmap - Get the desired shader bitmap, based on the
+ * current power policy
+ *
+ * @kbdev: The kbase device structure for the device
+ *
+ * Queries the current power policy to determine if shader cores will be
+ * required in the current state, and apply any HW workarounds.
+ *
+ * Return: bitmap of desired shader cores
+ */
+
+static u64 get_desired_shader_bitmap(struct kbase_device *kbdev)
+{
+ u64 desired_bitmap = 0u;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev))
+ desired_bitmap = kbase_pm_ca_get_core_mask(kbdev);
+
+ WARN(!desired_bitmap && kbdev->shader_needed_cnt,
+ "Shader cores are needed but policy '%s' did not make them needed",
+ kbdev->pm.backend.pm_current_policy->name);
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
+ /* Unless XAFFINITY is supported, enable core 0 if tiler
+ * required, regardless of core availability
+ */
+ if (kbdev->tiler_needed_cnt > 0)
+ desired_bitmap |= 1;
+ }
+
+ return desired_bitmap;
+}
+
void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
{
u64 desired_bitmap;
@@ -449,30 +419,19 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
if (kbdev->pm.backend.poweroff_wait_in_progress)
return;
- if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
- !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
- && !kbdev->tiler_inuse_cnt) {
+ if (kbdev->protected_mode_transition && !kbdev->shader_needed_cnt &&
+ !kbdev->tiler_needed_cnt) {
/* We are trying to change in/out of protected mode - force all
* cores off so that the L2 powers down */
desired_bitmap = 0;
desired_tiler_bitmap = 0;
} else {
- desired_bitmap =
- kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
- desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
+ desired_bitmap = get_desired_shader_bitmap(kbdev);
- if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
+ if (kbdev->tiler_needed_cnt > 0)
desired_tiler_bitmap = 1;
else
desired_tiler_bitmap = 0;
-
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
- /* Unless XAFFINITY is supported, enable core 0 if tiler
- * required, regardless of core availability */
- if (kbdev->tiler_needed_cnt > 0 ||
- kbdev->tiler_inuse_cnt > 0)
- desired_bitmap |= 1;
- }
}
if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
@@ -649,55 +608,28 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
KBASE_EXPORT_TEST_API(kbase_pm_set_policy);
-/* Check whether a state change has finished, and trace it as completed */
-static void
-kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
-{
- if ((kbdev->shader_available_bitmap &
- kbdev->pm.backend.desired_shader_state)
- == kbdev->pm.backend.desired_shader_state &&
- (kbdev->tiler_available_bitmap &
- kbdev->pm.backend.desired_tiler_state)
- == kbdev->pm.backend.desired_tiler_state)
- kbase_timeline_pm_check_handle_event(kbdev,
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
-}
-
void kbase_pm_request_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores)
+ bool tiler_required, bool shader_required)
{
- u64 cores;
-
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
lockdep_assert_held(&kbdev->hwaccess_lock);
- cores = shader_cores;
- while (cores) {
- int bitnum = fls64(cores) - 1;
- u64 bit = 1ULL << bitnum;
+ if (shader_required) {
+ int cnt = ++kbdev->shader_needed_cnt;
- /* It should be almost impossible for this to overflow. It would
- * require 2^32 atoms to request a particular core, which would
- * require 2^24 contexts to submit. This would require an amount
- * of memory that is impossible on a 32-bit system and extremely
- * unlikely on a 64-bit system. */
- int cnt = ++kbdev->shader_needed_cnt[bitnum];
-
- if (1 == cnt) {
- kbdev->shader_needed_bitmap |= bit;
+ if (cnt == 1)
change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
- }
- cores &= ~bit;
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt != 0);
}
if (tiler_required) {
int cnt = ++kbdev->tiler_needed_cnt;
- if (1 == cnt)
+ if (cnt == 1)
change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt != 0);
@@ -705,22 +637,18 @@ void kbase_pm_request_cores(struct kbase_device *kbdev,
if (change_gpu_state) {
KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_SHADER_NEEDED, NULL,
- NULL, 0u, (u32) kbdev->shader_needed_bitmap);
+ NULL, 0u, kbdev->shader_needed_cnt);
+ KBASE_TRACE_ADD(kbdev, PM_REQUEST_CHANGE_TILER_NEEDED, NULL,
+ NULL, 0u, kbdev->tiler_needed_cnt);
- kbase_timeline_pm_cores_func(kbdev,
- KBASE_PM_FUNC_ID_REQUEST_CORES_START,
- change_gpu_state);
kbase_pm_update_cores_state_nolock(kbdev);
- kbase_timeline_pm_cores_func(kbdev,
- KBASE_PM_FUNC_ID_REQUEST_CORES_END,
- change_gpu_state);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
-void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores)
+void kbase_pm_release_cores(struct kbase_device *kbdev,
+ bool tiler_required, bool shader_required)
{
kbase_pm_change_state change_gpu_state = 0u;
@@ -728,22 +656,16 @@ void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
lockdep_assert_held(&kbdev->hwaccess_lock);
- while (shader_cores) {
- int bitnum = fls64(shader_cores) - 1;
- u64 bit = 1ULL << bitnum;
+ if (shader_required) {
int cnt;
- KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
+ KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt > 0);
- cnt = --kbdev->shader_needed_cnt[bitnum];
+ cnt = --kbdev->shader_needed_cnt;
if (0 == cnt) {
- kbdev->shader_needed_bitmap &= ~bit;
-
change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
}
-
- shader_cores &= ~bit;
}
if (tiler_required) {
@@ -758,165 +680,26 @@ void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
}
if (change_gpu_state) {
- KBASE_TRACE_ADD(kbdev, PM_UNREQUEST_CHANGE_SHADER_NEEDED, NULL,
- NULL, 0u, (u32) kbdev->shader_needed_bitmap);
-
- kbase_pm_update_cores_state_nolock(kbdev);
-
- /* Trace that any state change effectively completes immediately
- * - no-one will wait on the state change */
- kbase_pm_trace_check_and_finish_state_change(kbdev);
- }
-}
-
-KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
-
-enum kbase_pm_cores_ready
-kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores)
-{
- u64 prev_shader_needed; /* Just for tracing */
- u64 prev_shader_inuse; /* Just for tracing */
-
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- prev_shader_needed = kbdev->shader_needed_bitmap;
- prev_shader_inuse = kbdev->shader_inuse_bitmap;
-
- /* If desired_shader_state does not contain the requested cores, then
- * power management is not attempting to powering those cores (most
- * likely due to core availability policy) and a new job affinity must
- * be chosen */
- if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
- shader_cores) {
- return (kbdev->pm.backend.poweroff_wait_in_progress ||
- kbdev->pm.backend.pm_current_policy == NULL) ?
- KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
- }
-
- if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
- (tiler_required && !kbdev->tiler_available_bitmap)) {
- /* Trace ongoing core transition */
- kbase_timeline_pm_l2_transition_start(kbdev);
- return KBASE_CORES_NOT_READY;
- }
-
- /* If we started to trace a state change, then trace it has being
- * finished by now, at the very latest */
- kbase_pm_trace_check_and_finish_state_change(kbdev);
- /* Trace core transition done */
- kbase_timeline_pm_l2_transition_done(kbdev);
-
- while (shader_cores) {
- int bitnum = fls64(shader_cores) - 1;
- u64 bit = 1ULL << bitnum;
- int cnt;
-
- KBASE_DEBUG_ASSERT(kbdev->shader_needed_cnt[bitnum] > 0);
-
- cnt = --kbdev->shader_needed_cnt[bitnum];
-
- if (0 == cnt)
- kbdev->shader_needed_bitmap &= ~bit;
-
- /* shader_inuse_cnt should not overflow because there can only
- * be a very limited number of jobs on the h/w at one time */
-
- kbdev->shader_inuse_cnt[bitnum]++;
- kbdev->shader_inuse_bitmap |= bit;
-
- shader_cores &= ~bit;
- }
-
- if (tiler_required) {
- KBASE_DEBUG_ASSERT(kbdev->tiler_needed_cnt > 0);
-
- --kbdev->tiler_needed_cnt;
-
- kbdev->tiler_inuse_cnt++;
-
- KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt != 0);
- }
-
- if (prev_shader_needed != kbdev->shader_needed_bitmap)
- KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_NEEDED, NULL,
- NULL, 0u, (u32) kbdev->shader_needed_bitmap);
-
- if (prev_shader_inuse != kbdev->shader_inuse_bitmap)
- KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
- NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
-
- return KBASE_CORES_READY;
-}
-
-KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores);
-
-void kbase_pm_release_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores)
-{
- kbase_pm_change_state change_gpu_state = 0u;
-
- KBASE_DEBUG_ASSERT(kbdev != NULL);
-
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- while (shader_cores) {
- int bitnum = fls64(shader_cores) - 1;
- u64 bit = 1ULL << bitnum;
- int cnt;
-
- KBASE_DEBUG_ASSERT(kbdev->shader_inuse_cnt[bitnum] > 0);
-
- cnt = --kbdev->shader_inuse_cnt[bitnum];
-
- if (0 == cnt) {
- kbdev->shader_inuse_bitmap &= ~bit;
- change_gpu_state |= KBASE_PM_CHANGE_STATE_SHADER;
- }
-
- shader_cores &= ~bit;
- }
-
- if (tiler_required) {
- int cnt;
-
- KBASE_DEBUG_ASSERT(kbdev->tiler_inuse_cnt > 0);
-
- cnt = --kbdev->tiler_inuse_cnt;
-
- if (0 == cnt)
- change_gpu_state |= KBASE_PM_CHANGE_STATE_TILER;
- }
-
- if (change_gpu_state) {
- KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_INUSE, NULL,
- NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
+ KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_SHADER_NEEDED, NULL,
+ NULL, 0u, kbdev->shader_needed_cnt);
+ KBASE_TRACE_ADD(kbdev, PM_RELEASE_CHANGE_TILER_NEEDED, NULL,
+ NULL, 0u, kbdev->tiler_needed_cnt);
- kbase_timeline_pm_cores_func(kbdev,
- KBASE_PM_FUNC_ID_RELEASE_CORES_START,
- change_gpu_state);
kbase_pm_update_cores_state_nolock(kbdev);
- kbase_timeline_pm_cores_func(kbdev,
- KBASE_PM_FUNC_ID_RELEASE_CORES_END,
- change_gpu_state);
-
- /* Trace that any state change completed immediately */
- kbase_pm_trace_check_and_finish_state_change(kbdev);
}
}
KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
- bool tiler_required,
- u64 shader_cores)
+ bool tiler_required, bool shader_required)
{
unsigned long flags;
kbase_pm_wait_for_poweroff_complete(kbdev);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
+ kbase_pm_request_cores(kbdev, tiler_required, shader_required);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_pm_check_transitions_sync(kbdev);
@@ -924,33 +707,52 @@ void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
KBASE_EXPORT_TEST_API(kbase_pm_request_cores_sync);
+static void kbase_pm_l2_caches_ref(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ kbdev->l2_users_count++;
+
+ KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
+
+ /* Check for the required L2 transitions.
+ * Caller would block here for the L2 caches of all core groups to be
+ * powered on, so need to inform the Hw to power up all the L2 caches.
+ * Can't rely on the l2_users_count value being non-zero previously to
+ * avoid checking for the transition, as the count could be non-zero
+ * even if not all the instances of L2 cache are powered up since
+ * currently the power status of L2 is not tracked separately for each
+ * core group. Also if the GPU is reset while the L2 is on, L2 will be
+ * off but the count will be non-zero.
+ */
+ kbase_pm_check_transitions_nolock(kbdev);
+}
+
void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
{
unsigned long flags;
- u32 prior_l2_users_count;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- prior_l2_users_count = kbdev->l2_users_count++;
-
- KBASE_DEBUG_ASSERT(kbdev->l2_users_count != 0);
-
- /* if the GPU is reset while the l2 is on, l2 will be off but
- * prior_l2_users_count will be > 0. l2_available_bitmap will have been
- * set to 0 though by kbase_pm_init_hw */
- if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
- kbase_pm_check_transitions_nolock(kbdev);
+ /* Take the reference on l2_users_count and check core transitions.
+ */
+ kbase_pm_l2_caches_ref(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
wait_event(kbdev->pm.backend.l2_powered_wait,
kbdev->pm.backend.l2_powered == 1);
-
- /* Trace that any state change completed immediately */
- kbase_pm_trace_check_and_finish_state_change(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches);
+void kbase_pm_request_l2_caches_nolock(struct kbase_device *kbdev)
+{
+ /* Take the reference on l2_users_count and check core transitions.
+ */
+ kbase_pm_l2_caches_ref(kbdev);
+}
+
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
@@ -968,11 +770,8 @@ void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
--kbdev->l2_users_count;
- if (!kbdev->l2_users_count) {
+ if (!kbdev->l2_users_count)
kbase_pm_check_transitions_nolock(kbdev);
- /* Trace that any state change completed immediately */
- kbase_pm_trace_check_and_finish_state_change(kbdev);
- }
}
KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
index 852fedd346ba37..2e86929c7e42f3 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_pm_policy.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -73,11 +73,25 @@ enum kbase_pm_cores_ready {
/**
- * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores()
+ * kbase_pm_request_cores - Request the desired cores to be powered up.
+ * @kbdev: Kbase device
+ * @tiler_required: true if tiler is required
+ * @shader_required: true if shaders are required
*
- * @kbdev: The kbase device structure for the device
- * @tiler_required: true if the tiler is required, false otherwise
- * @shader_cores: A bitmask of shader cores which are necessary for the job
+ * Called by the scheduler to request power to the desired cores.
+ *
+ * There is no guarantee that the HW will be powered up on return. Use
+ * kbase_pm_cores_requested()/kbase_pm_cores_ready() to verify that cores are
+ * now powered, or instead call kbase_pm_request_cores_sync().
+ */
+void kbase_pm_request_cores(struct kbase_device *kbdev, bool tiler_required,
+ bool shader_required);
+
+/**
+ * kbase_pm_request_cores_sync - Synchronous variant of kbase_pm_request_cores()
+ * @kbdev: Kbase device
+ * @tiler_required: true if tiler is required
+ * @shader_required: true if shaders are required
*
* When this function returns, the @shader_cores will be in the READY state.
*
@@ -87,98 +101,79 @@ enum kbase_pm_cores_ready {
* is made.
*/
void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores);
+ bool tiler_required, bool shader_required);
/**
- * kbase_pm_request_cores - Mark one or more cores as being required
- * for jobs to be submitted
- *
- * @kbdev: The kbase device structure for the device
- * @tiler_required: true if the tiler is required, false otherwise
- * @shader_cores: A bitmask of shader cores which are necessary for the job
+ * kbase_pm_release_cores - Request the desired cores to be powered down.
+ * @kbdev: Kbase device
+ * @tiler_required: true if tiler is required
+ * @shader_required: true if shaders are required
*
- * This function is called by the job scheduler to mark one or more cores as
- * being required to submit jobs that are ready to run.
- *
- * The cores requested are reference counted and a subsequent call to
- * kbase_pm_register_inuse_cores() or kbase_pm_unrequest_cores() should be
- * made to dereference the cores as being 'needed'.
- *
- * The active power policy will meet or exceed the requirements of the
- * requested cores in the system. Any core transitions needed will be begun
- * immediately, but they might not complete/the cores might not be available
- * until a Power Management IRQ.
- *
- * Return: 0 if the cores were successfully requested, or -errno otherwise.
+ * Called by the scheduler to release its power reference on the desired cores.
*/
-void kbase_pm_request_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores);
+void kbase_pm_release_cores(struct kbase_device *kbdev, bool tiler_required,
+ bool shader_required);
/**
- * kbase_pm_unrequest_cores - Unmark one or more cores as being required for
- * jobs to be submitted.
+ * kbase_pm_cores_requested - Check that a power request has been locked into
+ * the HW.
+ * @kbdev: Kbase device
+ * @tiler_required: true if tiler is required
+ * @shader_required: true if shaders are required
+ *
+ * Called by the scheduler to check if a power on request has been locked into
+ * the HW.
*
- * @kbdev: The kbase device structure for the device
- * @tiler_required: true if the tiler is required, false otherwise
- * @shader_cores: A bitmask of shader cores (as given to
- * kbase_pm_request_cores() )
+ * Note that there is no guarantee that the cores are actually ready, however
+ * when the request has been locked into the HW, then it is safe to submit work
+ * since the HW will wait for the transition to ready.
*
- * This function undoes the effect of kbase_pm_request_cores(). It should be
- * used when a job is not going to be submitted to the hardware (e.g. the job is
- * cancelled before it is enqueued).
+ * A reference must first be taken prior to making this call.
*
- * The active power policy will meet or exceed the requirements of the
- * requested cores in the system. Any core transitions needed will be begun
- * immediately, but they might not complete until a Power Management IRQ.
+ * Caller must hold the hwaccess_lock.
*
- * The policy may use this as an indication that it can power down cores.
+ * Return: true if the request to the HW was successfully made else false if the
+ * request is still pending.
*/
-void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores);
+static inline bool kbase_pm_cores_requested(struct kbase_device *kbdev,
+ bool tiler_required, bool shader_required)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if ((shader_required && !kbdev->shader_available_bitmap) ||
+ (tiler_required && !kbdev->tiler_available_bitmap))
+ return false;
+
+ return true;
+}
/**
- * kbase_pm_register_inuse_cores - Register a set of cores as in use by a job
- *
- * @kbdev: The kbase device structure for the device
- * @tiler_required: true if the tiler is required, false otherwise
- * @shader_cores: A bitmask of shader cores (as given to
- * kbase_pm_request_cores() )
+ * kbase_pm_cores_ready - Check that the required cores have been powered on by
+ * the HW.
+ * @kbdev: Kbase device
+ * @tiler_required: true if tiler is required
+ * @shader_required: true if shaders are required
*
- * This function should be called after kbase_pm_request_cores() when the job
- * is about to be submitted to the hardware. It will check that the necessary
- * cores are available and if so update the 'needed' and 'inuse' bitmasks to
- * reflect that the job is now committed to being run.
+ * Called by the scheduler to check if cores are ready.
*
- * If the necessary cores are not currently available then the function will
- * return %KBASE_CORES_NOT_READY and have no effect.
+ * Note that the caller should ensure that they have first requested cores
+ * before calling this function.
*
- * Return: %KBASE_CORES_NOT_READY if the cores are not immediately ready,
+ * Caller must hold the hwaccess_lock.
*
- * %KBASE_NEW_AFFINITY if the affinity requested is not allowed,
- *
- * %KBASE_CORES_READY if the cores requested are already available
+ * Return: true if the cores are ready.
*/
-enum kbase_pm_cores_ready kbase_pm_register_inuse_cores(
- struct kbase_device *kbdev,
- bool tiler_required,
- u64 shader_cores);
+static inline bool kbase_pm_cores_ready(struct kbase_device *kbdev,
+ bool tiler_required, bool shader_required)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
-/**
- * kbase_pm_release_cores - Release cores after a job has run
- *
- * @kbdev: The kbase device structure for the device
- * @tiler_required: true if the tiler is required, false otherwise
- * @shader_cores: A bitmask of shader cores (as given to
- * kbase_pm_register_inuse_cores() )
- *
- * This function should be called when a job has finished running on the
- * hardware. A call to kbase_pm_register_inuse_cores() must have previously
- * occurred. The reference counts of the specified cores will be decremented
- * which may cause the bitmask of 'inuse' cores to be reduced. The power policy
- * may then turn off any cores which are no longer 'inuse'.
- */
-void kbase_pm_release_cores(struct kbase_device *kbdev,
- bool tiler_required, u64 shader_cores);
+ if ((shader_required && !kbdev->shader_ready_bitmap) ||
+ (tiler_required && !kbdev->tiler_available_bitmap))
+ return false;
+
+ return true;
+}
/**
* kbase_pm_request_l2_caches - Request l2 caches
@@ -200,6 +195,26 @@ void kbase_pm_release_cores(struct kbase_device *kbdev,
void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
/**
+ * kbase_pm_request_l2_caches_nolock - Request l2 caches, nolock version
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * Request the use of l2 caches for all core groups and power up without
+ * waiting for power manager to actually power up the cores. This is done
+ * because the call to this function is done from within the atomic context
+ * and the actual l2 caches being powered up is checked at a later stage.
+ * The reference taken on l2 caches is removed when the protected mode atom
+ * is released so there is no need to make a call to a matching
+ * release_l2_caches().
+ *
+ * This function is used specifically for the case when l2 caches are
+ * to be powered up as part of the sequence for entering protected mode.
+ *
+ * This should only be used when power management is active.
+ */
+void kbase_pm_request_l2_caches_nolock(struct kbase_device *kbdev);
+
+/**
* kbase_pm_request_l2_caches_l2_is_on - Request l2 caches but don't power on
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
@@ -212,7 +227,7 @@ void kbase_pm_request_l2_caches(struct kbase_device *kbdev);
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev);
/**
- * kbase_pm_request_l2_caches - Release l2 caches
+ * kbase_pm_release_l2_caches - Release l2 caches
*
* @kbdev: The kbase device structure for the device (must be a valid pointer)
*
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
index cef07455cbaefd..5e1b761cf43c19 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2016,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,24 +35,20 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
* correctly */
do {
- hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
- NULL);
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
*cycle_counter = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
- hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI),
- NULL);
+ GPU_CONTROL_REG(CYCLE_COUNT_LO));
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(CYCLE_COUNT_HI));
*cycle_counter |= (((u64) hi1) << 32);
} while (hi1 != hi2);
/* Read hi, lo, hi to ensure that overflow from lo to hi is handled
* correctly */
do {
- hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
- NULL);
+ hi1 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
*system_time = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(TIMESTAMP_LO), NULL);
- hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI),
- NULL);
+ GPU_CONTROL_REG(TIMESTAMP_LO));
+ hi2 = kbase_reg_read(kbdev, GPU_CONTROL_REG(TIMESTAMP_HI));
*system_time |= (((u64) hi1) << 32);
} while (hi1 != hi2);
@@ -64,7 +60,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
/**
* kbase_wait_write_flush - Wait for GPU write flush
- * @kctx: Context pointer
+ * @kbdev: Kbase device
*
* Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
* its write buffer.
@@ -75,7 +71,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
* not be as expected.
*/
#ifndef CONFIG_MALI_NO_MALI
-void kbase_wait_write_flush(struct kbase_context *kctx)
+void kbase_wait_write_flush(struct kbase_device *kbdev)
{
u32 base_count = 0;
@@ -83,14 +79,14 @@ void kbase_wait_write_flush(struct kbase_context *kctx)
* The caller must be holding onto the kctx or the call is from
* userspace.
*/
- kbase_pm_context_active(kctx->kbdev);
- kbase_pm_request_gpu_cycle_counter(kctx->kbdev);
+ kbase_pm_context_active(kbdev);
+ kbase_pm_request_gpu_cycle_counter(kbdev);
while (true) {
u32 new_count;
- new_count = kbase_reg_read(kctx->kbdev,
- GPU_CONTROL_REG(CYCLE_COUNT_LO), NULL);
+ new_count = kbase_reg_read(kbdev,
+ GPU_CONTROL_REG(CYCLE_COUNT_LO));
/* First time around, just store the count. */
if (base_count == 0) {
base_count = new_count;
@@ -102,7 +98,7 @@ void kbase_wait_write_flush(struct kbase_context *kctx)
break;
}
- kbase_pm_release_gpu_cycle_counter(kctx->kbdev);
- kbase_pm_context_idle(kctx->kbdev);
+ kbase_pm_release_gpu_cycle_counter(kbdev);
+ kbase_pm_context_idle(kbdev);
}
#endif /* CONFIG_MALI_NO_MALI */
diff --git a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h
index e1bd2632b2d625..ece70092b48ad3 100644
--- a/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h
+++ b/drivers/gpu/arm/midgard/backend/gpu/mali_kbase_time.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -36,7 +36,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
/**
* kbase_wait_write_flush() - Wait for GPU write flush
- * @kctx: Context pointer
+ * @kbdev: Kbase device
*
* Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
* its write buffer.
@@ -47,11 +47,11 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
* This function is only in use for BASE_HW_ISSUE_6367
*/
#ifdef CONFIG_MALI_NO_MALI
-static inline void kbase_wait_write_flush(struct kbase_context *kctx)
+static inline void kbase_wait_write_flush(struct kbase_device *kbdev)
{
}
#else
-void kbase_wait_write_flush(struct kbase_context *kctx);
+void kbase_wait_write_flush(struct kbase_device *kbdev);
#endif
#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm/midgard/build.bp b/drivers/gpu/arm/midgard/build.bp
index 4517b46217d7b3..ada69207c3d309 100644
--- a/drivers/gpu/arm/midgard/build.bp
+++ b/drivers/gpu/arm/midgard/build.bp
@@ -3,13 +3,17 @@
* ----------------------------------------------------------------------------
* This confidential and proprietary software may be used only as authorized
* by a licensing agreement from ARM Limited.
- * (C) COPYRIGHT 2017 ARM Limited, ALL RIGHTS RESERVED
+ * (C) COPYRIGHT 2017-2018 ARM Limited, ALL RIGHTS RESERVED
* The entire notice above must be reproduced on all authorized copies and
* copies may only be made to the extent permitted by a licensing agreement
* from ARM Limited.
* ----------------------------------------------------------------------------
*/
+/* Kernel-side tests may include mali_kbase's headers. Therefore any config
+ * options which affect the sizes of any structs (e.g. adding extra members)
+ * must be included in these defaults, so that the structs are consistent in
+ * both mali_kbase and the test modules. */
bob_defaults {
name: "mali_kbase_shared_config_defaults",
no_mali: {
@@ -21,7 +25,33 @@ bob_defaults {
mali_devfreq: {
kbuild_options: ["CONFIG_MALI_DEVFREQ=y"],
},
-
+ mali_midgard_dvfs: {
+ kbuild_options: ["CONFIG_MALI_MIDGARD_DVFS=y"],
+ },
+ mali_debug: {
+ kbuild_options: ["CONFIG_MALI_DEBUG=y"],
+ },
+ mali_fpga_bus_logger: {
+ kbuild_options: ["CONFIG_MALI_FPGA_BUS_LOGGER=y"],
+ },
+ cinstr_job_dump: {
+ kbuild_options: ["CONFIG_MALI_JOB_DUMP=y"],
+ },
+ mali_gator_support: {
+ kbuild_options: ["CONFIG_MALI_GATOR_SUPPORT=y"],
+ },
+ mali_system_trace: {
+ kbuild_options: ["CONFIG_MALI_SYSTEM_TRACE=y"],
+ },
+ mali_pwrsoft_765: {
+ kbuild_options: ["CONFIG_MALI_PWRSOFT_765=y"],
+ },
+ kbuild_options: [
+ "MALI_UNIT_TEST={{.unit_test_code}}",
+ "MALI_CUSTOMER_RELEASE={{.release}}",
+ "MALI_USE_CSF={{.gpu_has_csf}}",
+ "MALI_KERNEL_TEST_API={{.debug}}",
+ ],
defaults: ["kernel_defaults"],
}
@@ -48,48 +78,31 @@ bob_kernel_module {
"CONFIG_MALI_MIDGARD=m",
"CONFIG_MALI_NO_MALI_DEFAULT_GPU={{.gpu}}",
"CONFIG_MALI_PLATFORM_NAME={{.mali_platform_name}}",
- "MALI_KERNEL_TEST_API={{.unit_test_code}}",
"MALI_MOCK_TEST={{.mali_mock_test}}",
- "MALI_UNIT_TEST={{.unit_test_code}}",
],
- cinstr_job_dump: {
- kbuild_options: ["CONFIG_MALI_JOB_DUMP=y"],
- },
- mali_debug: {
- kbuild_options: ["CONFIG_MALI_DEBUG=y"],
- },
- mali_gator_support: {
- kbuild_options: ["CONFIG_MALI_GATOR_SUPPORT=y"],
- },
- mali_system_trace: {
- kbuild_options: ["CONFIG_MALI_SYSTEM_TRACE=y"],
- },
mali_error_inject: {
kbuild_options: ["CONFIG_MALI_ERROR_INJECT=y"],
},
mali_error_inject_random: {
kbuild_options: ["CONFIG_MALI_ERROR_INJECT_RANDOM=y"],
},
- mali_trace_timeline: {
- kbuild_options: ["CONFIG_MALI_TRACE_TIMELINE=y"],
- },
- mali_prfcnt_set_secondary: {
+ cinstr_secondary_hwc: {
kbuild_options: ["CONFIG_MALI_PRFCNT_SET_SECONDARY=y"],
},
- mali_fpga_bus_logger: {
- kbuild_options: ["CONFIG_MALI_FPGA_BUS_LOGGER=y"],
- },
- mali_midgard_dvfs: {
- kbuild_options: ["CONFIG_MALI_MIDGARD_DVFS=y"],
- },
mali_2mb_alloc: {
kbuild_options: ["CONFIG_MALI_2MB_ALLOC=y"],
},
mali_mock_test: {
srcs: ["tests/internal/src/mock/mali_kbase_pm_driver_mock.c"],
},
- ump: {
- extra_symbols: ["ump"],
+ gpu_has_csf: {
+ srcs: [
+ "csf/*.c",
+ "csf/*.h",
+ "csf/Kbuild",
+ ],
},
defaults: ["mali_kbase_shared_config_defaults"],
}
+
+optional_subdirs = ["tests"]
diff --git a/drivers/gpu/arm/midgard/docs/Doxyfile b/drivers/gpu/arm/midgard/docs/Doxyfile
index cea7bd9a1730c2..6498dcbc1840d5 100644
--- a/drivers/gpu/arm/midgard/docs/Doxyfile
+++ b/drivers/gpu/arm/midgard/docs/Doxyfile
@@ -38,7 +38,7 @@
# directories like "/usr/src/myproject". Separate the files or directories
# with spaces.
-INPUT += ../../kernel/drivers/gpu/arm/midgard/
+INPUT += ../../kernel/drivers/gpu/arm/midgard/
##############################################################################
# Everything below here is optional, and in most cases not required
diff --git a/drivers/gpu/arm/midgard/ipa/Kbuild b/drivers/gpu/arm/midgard/ipa/Kbuild
index b3d0bcba412079..3d9cf8006b808a 100644
--- a/drivers/gpu/arm/midgard/ipa/Kbuild
+++ b/drivers/gpu/arm/midgard/ipa/Kbuild
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -21,10 +21,8 @@
mali_kbase-y += \
ipa/mali_kbase_ipa_simple.o \
- ipa/mali_kbase_ipa.o
-
-mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o
-
-mali_kbase-y += \
- ipa/mali_kbase_ipa_vinstr_g71.o \
+ ipa/mali_kbase_ipa.o \
+ ipa/mali_kbase_ipa_vinstr_g7x.o \
ipa/mali_kbase_ipa_vinstr_common.o
+
+mali_kbase-$(CONFIG_DEBUG_FS) += ipa/mali_kbase_ipa_debugfs.o \ No newline at end of file
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
index d16069f6ee4e54..15566f63c6e871 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -26,6 +26,7 @@
#include "mali_kbase_ipa.h"
#include "mali_kbase_ipa_debugfs.h"
#include "mali_kbase_ipa_simple.h"
+#include "backend/gpu/mali_kbase_pm_internal.h"
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
#include <linux/pm_opp.h>
@@ -38,10 +39,15 @@
#define KBASE_IPA_FALLBACK_MODEL_NAME "mali-simple-power-model"
#define KBASE_IPA_G71_MODEL_NAME "mali-g71-power-model"
+#define KBASE_IPA_G72_MODEL_NAME "mali-g72-power-model"
+#define KBASE_IPA_TNOX_MODEL_NAME "mali-tnox-power-model"
+#define KBASE_IPA_TGOX_R1_MODEL_NAME "mali-tgox_r1-power-model"
static struct kbase_ipa_model_ops *kbase_ipa_all_model_ops[] = {
&kbase_simple_ipa_model_ops,
- &kbase_g71_ipa_model_ops
+ &kbase_g71_ipa_model_ops,
+ &kbase_g72_ipa_model_ops,
+ &kbase_tnox_ipa_model_ops
};
int kbase_ipa_model_recalculate(struct kbase_ipa_model *model)
@@ -79,16 +85,6 @@ static struct kbase_ipa_model_ops *kbase_ipa_model_ops_find(struct kbase_device
return NULL;
}
-void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev)
-{
- atomic_set(&kbdev->ipa_use_configured_model, false);
-}
-
-void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev)
-{
- atomic_set(&kbdev->ipa_use_configured_model, true);
-}
-
const char *kbase_ipa_model_name_from_id(u32 gpu_id)
{
const u32 prod_id = (gpu_id & GPU_ID_VERSION_PRODUCT_ID) >>
@@ -98,6 +94,17 @@ const char *kbase_ipa_model_name_from_id(u32 gpu_id)
switch (GPU_ID2_MODEL_MATCH_VALUE(prod_id)) {
case GPU_ID2_PRODUCT_TMIX:
return KBASE_IPA_G71_MODEL_NAME;
+ case GPU_ID2_PRODUCT_THEX:
+ return KBASE_IPA_G72_MODEL_NAME;
+ case GPU_ID2_PRODUCT_TNOX:
+ return KBASE_IPA_TNOX_MODEL_NAME;
+ case GPU_ID2_PRODUCT_TGOX:
+ if ((gpu_id & GPU_ID2_VERSION_MAJOR) ==
+ (0 << GPU_ID2_VERSION_MAJOR_SHIFT))
+ /* TGOX r0 shares a power model with TNOX */
+ return KBASE_IPA_TNOX_MODEL_NAME;
+ else
+ return KBASE_IPA_TGOX_R1_MODEL_NAME;
default:
return KBASE_IPA_FALLBACK_MODEL_NAME;
}
@@ -305,14 +312,6 @@ int kbase_ipa_init(struct kbase_device *kbdev)
/* The simple IPA model must *always* be present.*/
ops = kbase_ipa_model_ops_find(kbdev, KBASE_IPA_FALLBACK_MODEL_NAME);
- if (!ops->do_utilization_scaling_in_framework) {
- dev_err(kbdev->dev,
- "Fallback IPA model %s should not account for utilization\n",
- ops->name);
- err = -EINVAL;
- goto end;
- }
-
default_model = kbase_ipa_init_model(kbdev, ops);
if (!default_model) {
err = -EINVAL;
@@ -353,8 +352,6 @@ int kbase_ipa_init(struct kbase_device *kbdev)
kbdev->ipa.configured_model = default_model;
}
- kbase_ipa_model_use_configured_locked(kbdev);
-
end:
if (err)
kbase_ipa_term_locked(kbdev);
@@ -443,14 +440,40 @@ u32 kbase_scale_static_power(const u32 c, const u32 voltage)
return div_u64(v3c_big, 1000000);
}
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
+{
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Record the event of GPU entering protected mode. */
+ kbdev->ipa_protection_mode_switched = true;
+}
+
static struct kbase_ipa_model *get_current_model(struct kbase_device *kbdev)
{
+ struct kbase_ipa_model *model;
+ unsigned long flags;
+
lockdep_assert_held(&kbdev->ipa.lock);
- if (atomic_read(&kbdev->ipa_use_configured_model))
- return kbdev->ipa.configured_model;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (kbdev->ipa_protection_mode_switched)
+ model = kbdev->ipa.fallback_model;
else
- return kbdev->ipa.fallback_model;
+ model = kbdev->ipa.configured_model;
+
+ /*
+ * Having taken cognizance of the fact that whether GPU earlier
+ * protected mode or not, the event can be now reset (if GPU is not
+ * currently in protected mode) so that configured model is used
+ * for the next sample.
+ */
+ if (!kbdev->protected_mode)
+ kbdev->ipa_protection_mode_switched = false;
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+
+ return model;
}
static u32 get_static_power_locked(struct kbase_device *kbdev,
@@ -532,7 +555,7 @@ static unsigned long kbase_get_dynamic_power(unsigned long freq,
model = kbdev->ipa.fallback_model;
- err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+ err = model->ops->get_dynamic_coeff(model, &power_coeff);
if (!err)
power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
@@ -551,48 +574,63 @@ static unsigned long kbase_get_dynamic_power(unsigned long freq,
return power;
}
-int kbase_get_real_power(struct devfreq *df, u32 *power,
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
unsigned long freq,
unsigned long voltage)
{
struct kbase_ipa_model *model;
u32 power_coeff = 0;
int err = 0;
- struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+ struct kbasep_pm_metrics diff;
+ u64 total_time;
- mutex_lock(&kbdev->ipa.lock);
+ lockdep_assert_held(&kbdev->ipa.lock);
+
+ kbase_pm_get_dvfs_metrics(kbdev, &kbdev->ipa.last_metrics, &diff);
model = get_current_model(kbdev);
- err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+ err = model->ops->get_dynamic_coeff(model, &power_coeff);
- /* If we switch to protected model between get_current_model() and
- * get_dynamic_coeff(), counter reading could fail. If that happens
- * (unlikely, but possible), revert to the fallback model. */
+ /* If the counter model returns an error (e.g. switching back to
+ * protected mode and failing to read counters, or a counter sample
+ * with too few cycles), revert to the fallback model.
+ */
if (err && model != kbdev->ipa.fallback_model) {
model = kbdev->ipa.fallback_model;
- err = model->ops->get_dynamic_coeff(model, &power_coeff, freq);
+ err = model->ops->get_dynamic_coeff(model, &power_coeff);
}
if (err)
- goto exit_unlock;
+ return err;
*power = kbase_scale_dynamic_power(power_coeff, freq, voltage);
- if (model->ops->do_utilization_scaling_in_framework) {
- struct devfreq_dev_status *status = &df->last_status;
- unsigned long total_time = max(status->total_time, 1ul);
- u64 busy_time = min(status->busy_time, total_time);
-
- *power = div_u64((u64) *power * (u64) busy_time, total_time);
- }
+ /* time_busy / total_time cannot be >1, so assigning the 64-bit
+ * result of div_u64 to *power cannot overflow.
+ */
+ total_time = diff.time_busy + (u64) diff.time_idle;
+ *power = div_u64(*power * (u64) diff.time_busy,
+ max(total_time, 1ull));
*power += get_static_power_locked(kbdev, model, voltage);
-exit_unlock:
+ return err;
+}
+KBASE_EXPORT_TEST_API(kbase_get_real_power_locked);
+
+int kbase_get_real_power(struct devfreq *df, u32 *power,
+ unsigned long freq,
+ unsigned long voltage)
+{
+ int ret;
+ struct kbase_device *kbdev = dev_get_drvdata(&df->dev);
+
+ mutex_lock(&kbdev->ipa.lock);
+ ret = kbase_get_real_power_locked(kbdev, power, freq, voltage);
mutex_unlock(&kbdev->ipa.lock);
- return err;
+ return ret;
}
KBASE_EXPORT_TEST_API(kbase_get_real_power);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
index 736399a6682cbf..4656ded0f0ee3b 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,8 +27,17 @@
struct devfreq;
+/**
+ * struct kbase_ipa_model - Object describing a particular IPA model.
+ * @kbdev: pointer to kbase device
+ * @model_data: opaque pointer to model specific data, accessed
+ * only by model specific methods.
+ * @ops: pointer to object containing model specific methods.
+ * @params: head of the list of debugfs params added for model
+ * @missing_dt_node_warning: flag to limit the matching power model DT not found
+ * warning to once.
+ */
struct kbase_ipa_model {
- struct list_head link;
struct kbase_device *kbdev;
void *model_data;
struct kbase_ipa_model_ops *ops;
@@ -87,8 +96,6 @@ struct kbase_ipa_model_ops {
* get_dynamic_coeff() - calculate dynamic power coefficient
* @model: pointer to model
* @coeffp: pointer to return value location
- * @current_freq: frequency the GPU has been running at for the
- * previous sampling period.
*
* Calculate a dynamic power coefficient, with units pW/(Hz V^2), which
* is then scaled by the IPA framework according to the current OPP's
@@ -96,8 +103,7 @@ struct kbase_ipa_model_ops {
*
* Return: 0 on success, or an error code.
*/
- int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp,
- u32 current_freq);
+ int (*get_dynamic_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
/*
* get_static_coeff() - calculate static power coefficient
* @model: pointer to model
@@ -109,31 +115,79 @@ struct kbase_ipa_model_ops {
* Return: 0 on success, or an error code.
*/
int (*get_static_coeff)(struct kbase_ipa_model *model, u32 *coeffp);
- /* If false, the model's get_dynamic_coeff() method accounts for how
- * long the GPU was active over the sample period. If true, the
- * framework will scale the calculated power according to the
- * utilization stats recorded by devfreq in get_real_power(). */
- bool do_utilization_scaling_in_framework;
};
-/* Models can be registered only in the platform's platform_init_func call */
-int kbase_ipa_model_ops_register(struct kbase_device *kbdev,
- struct kbase_ipa_model_ops *new_model_ops);
-struct kbase_ipa_model *kbase_ipa_get_model(struct kbase_device *kbdev,
- const char *name);
-
+/**
+ * kbase_ipa_init - Initialize the IPA feature
+ * @kbdev: pointer to kbase device
+ *
+ * simple IPA power model is initialized as a fallback model and if that
+ * initialization fails then IPA is not used.
+ * The device tree is read for the name of ipa model to be used, by using the
+ * property string "ipa-model". If that ipa model is supported then it is
+ * initialized but if the initialization fails then simple power model is used.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
int kbase_ipa_init(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_term - Terminate the IPA feature
+ * @kbdev: pointer to kbase device
+ *
+ * Both simple IPA power model and model retrieved from device tree are
+ * terminated.
+ */
void kbase_ipa_term(struct kbase_device *kbdev);
-void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev);
-void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev);
+
+/**
+ * kbase_ipa_model_recalculate - Recalculate the model coefficients
+ * @model: pointer to the IPA model object, already initialized
+ *
+ * It shall be called immediately after the model has been initialized
+ * or when the model parameter has changed, so that any coefficients
+ * derived from parameters can be recalculated.
+ * Its a wrapper for the module specific recalculate() method.
+ *
+ * Return: 0 on success, negative -errno on error
+ */
int kbase_ipa_model_recalculate(struct kbase_ipa_model *model);
+
+/**
+ * kbase_ipa_init_model - Initilaize the particular IPA model
+ * @kbdev: pointer to kbase device
+ * @ops: pointer to object containing model specific methods.
+ *
+ * Initialize the model corresponding to the @ops pointer passed.
+ * The init() method specified in @ops would be called.
+ *
+ * Return: pointer to kbase_ipa_model on success, NULL on error
+ */
struct kbase_ipa_model *kbase_ipa_init_model(struct kbase_device *kbdev,
struct kbase_ipa_model_ops *ops);
+/**
+ * kbase_ipa_term_model - Terminate the particular IPA model
+ * @model: pointer to the IPA model object, already initialized
+ *
+ * Terminate the model, using the term() method.
+ * Module specific parameters would be freed.
+ */
void kbase_ipa_term_model(struct kbase_ipa_model *model);
+/**
+ * kbase_ipa_protection_mode_switch_event - Inform IPA of the GPU's entry into
+ * protected mode
+ * @kbdev: pointer to kbase device
+ *
+ * Makes IPA aware of the GPU switching to protected mode.
+ */
+void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev);
+
extern struct kbase_ipa_model_ops kbase_g71_ipa_model_ops;
+extern struct kbase_ipa_model_ops kbase_g72_ipa_model_ops;
+extern struct kbase_ipa_model_ops kbase_tnox_ipa_model_ops;
+extern struct kbase_ipa_model_ops kbase_tgox_r1_ipa_model_ops;
-#if MALI_UNIT_TEST
/**
* kbase_get_real_power() - get the real power consumption of the GPU
* @df: dynamic voltage and frequency scaling information for the GPU.
@@ -141,14 +195,22 @@ extern struct kbase_ipa_model_ops kbase_g71_ipa_model_ops;
* @freq: a frequency, in HZ.
* @voltage: a voltage, in mV.
*
- * This function is only exposed for use by unit tests. The returned value
- * incorporates both static and dynamic power consumption.
+ * The returned value incorporates both static and dynamic power consumption.
*
* Return: 0 on success, or an error code.
*/
int kbase_get_real_power(struct devfreq *df, u32 *power,
unsigned long freq,
unsigned long voltage);
+
+#if MALI_UNIT_TEST
+/* Called by kbase_get_real_power() to invoke the power models.
+ * Must be called with kbdev->ipa.lock held.
+ * This function is only exposed for use by unit tests.
+ */
+int kbase_get_real_power_locked(struct kbase_device *kbdev, u32 *power,
+ unsigned long freq,
+ unsigned long voltage);
#endif /* MALI_UNIT_TEST */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
@@ -159,10 +221,7 @@ extern struct devfreq_cooling_power kbase_ipa_power_model_ops;
#else /* !(defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
-static inline void kbase_ipa_model_use_fallback_locked(struct kbase_device *kbdev)
-{ }
-
-static inline void kbase_ipa_model_use_configured_locked(struct kbase_device *kbdev)
+static inline void kbase_ipa_protection_mode_switch_event(struct kbase_device *kbdev)
{ }
#endif /* (defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)) */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
index 1fe745b720ca9e..8437edc598febf 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -188,6 +188,23 @@ void kbase_ipa_model_param_free_all(struct kbase_ipa_model *model)
}
}
+static int current_power_get(void *data, u64 *val)
+{
+ struct kbase_device *kbdev = data;
+ struct devfreq *df = kbdev->devfreq;
+ u32 power;
+
+ kbase_pm_context_active(kbdev);
+ kbase_get_real_power(df, &power,
+ kbdev->current_nominal_freq, (kbdev->current_voltage / 1000));
+ kbase_pm_context_idle(kbdev);
+
+ *val = power;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(current_power, current_power_get, NULL, "%llu\n");
+
static void kbase_ipa_model_debugfs_init(struct kbase_ipa_model *model)
{
struct list_head *it;
@@ -264,5 +281,8 @@ void kbase_ipa_debugfs_init(struct kbase_device *kbdev)
kbase_ipa_model_debugfs_init(kbdev->ipa.configured_model);
kbase_ipa_model_debugfs_init(kbdev->ipa.fallback_model);
+ debugfs_create_file("ipa_current_power", 0444,
+ kbdev->mali_debugfs_directory, kbdev, &current_power);
+
mutex_unlock(&kbdev->ipa.lock);
}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
index 639ade266e1485..e684df4a6662c2 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_simple.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -43,7 +43,7 @@ static int kbase_simple_power_model_get_dummy_temp(
struct thermal_zone_device *tz,
unsigned long *temp)
{
- *temp = ACCESS_ONCE(dummy_temp);
+ *temp = READ_ONCE(dummy_temp);
return 0;
}
@@ -54,7 +54,7 @@ static int kbase_simple_power_model_get_dummy_temp(
struct thermal_zone_device *tz,
int *temp)
{
- *temp = ACCESS_ONCE(dummy_temp);
+ *temp = READ_ONCE(dummy_temp);
return 0;
}
#endif
@@ -68,7 +68,7 @@ static int kbase_simple_power_model_get_dummy_temp(
void kbase_simple_power_model_set_dummy_temp(int temp)
{
- ACCESS_ONCE(dummy_temp) = temp;
+ WRITE_ONCE(dummy_temp, temp);
}
KBASE_EXPORT_TEST_API(kbase_simple_power_model_set_dummy_temp);
@@ -155,7 +155,7 @@ static int poll_temperature(void *data)
#endif
while (!kthread_should_stop()) {
- struct thermal_zone_device *tz = ACCESS_ONCE(model_data->gpu_tz);
+ struct thermal_zone_device *tz = READ_ONCE(model_data->gpu_tz);
if (tz) {
int ret;
@@ -170,9 +170,9 @@ static int poll_temperature(void *data)
temp = FALLBACK_STATIC_TEMPERATURE;
}
- ACCESS_ONCE(model_data->current_temperature) = temp;
+ WRITE_ONCE(model_data->current_temperature, temp);
- msleep_interruptible(ACCESS_ONCE(model_data->temperature_poll_interval_ms));
+ msleep_interruptible(READ_ONCE(model_data->temperature_poll_interval_ms));
}
return 0;
@@ -186,7 +186,7 @@ static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
u64 coeff_big;
int temp;
- temp = ACCESS_ONCE(model_data->current_temperature);
+ temp = READ_ONCE(model_data->current_temperature);
/* Range: 0 <= temp_scaling_factor < 2^24 */
temp_scaling_factor = calculate_temp_scaling_factor(model_data->ts,
@@ -203,8 +203,7 @@ static int model_static_coeff(struct kbase_ipa_model *model, u32 *coeffp)
return 0;
}
-static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
- u32 current_freq)
+static int model_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
{
struct kbase_ipa_model_simple_data *model_data =
(struct kbase_ipa_model_simple_data *) model->model_data;
@@ -347,6 +346,5 @@ struct kbase_ipa_model_ops kbase_simple_ipa_model_ops = {
.term = &kbase_simple_power_model_term,
.get_dynamic_coeff = &model_dynamic_coeff,
.get_static_coeff = &model_static_coeff,
- .do_utilization_scaling_in_framework = true,
};
KBASE_EXPORT_TEST_API(kbase_simple_ipa_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
index b9a9e573607ee3..699252d0735b3c 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,35 +21,15 @@
*/
#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase_ipa_debugfs.h"
-#if MALI_UNIT_TEST
-static ktime_t dummy_time;
+#define DEFAULT_SCALING_FACTOR 5
-/* Intercept calls to the kernel function using a macro */
-#ifdef ktime_get
-#undef ktime_get
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
-#define ktime_get() (ACCESS_ONCE(dummy_time))
-
-void kbase_ipa_set_dummy_time(ktime_t t)
-{
- ACCESS_ONCE(dummy_time) = t;
-}
-KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time);
-#else
-#define ktime_get() (READ_ONCE(dummy_time))
-
-void kbase_ipa_set_dummy_time(ktime_t t)
-{
- WRITE_ONCE(dummy_time, t);
-}
-KBASE_EXPORT_TEST_API(kbase_ipa_set_dummy_time);
-
-#endif
-
-#endif /* MALI_UNIT_TEST */
+/* If the value of GPU_ACTIVE is below this, use the simple model
+ * instead, to avoid extrapolating small amounts of counter data across
+ * large sample periods.
+ */
+#define DEFAULT_MIN_SAMPLE_CYCLES 10000
/**
* read_hwcnt() - read a counter value
@@ -99,10 +79,32 @@ s64 kbase_ipa_sum_all_shader_cores(
core_mask >>= 1;
}
- /* Range: -2^54 < ret < 2^54 */
- ret *= coeff;
+ /* Range: -2^54 < ret * coeff < 2^54 */
+ return ret * coeff;
+}
- return div_s64(ret, 1000000);
+s64 kbase_ipa_sum_all_memsys_blocks(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter)
+{
+ struct kbase_device *kbdev = model_data->kbdev;
+ const u32 num_blocks = kbdev->gpu_props.props.l2_props.num_l2_slices;
+ u32 base = 0;
+ s64 ret = 0;
+ u32 i;
+
+ for (i = 0; i < num_blocks; i++) {
+ /* 0 < counter_value < 2^27 */
+ u32 counter_value = kbase_ipa_read_hwcnt(model_data,
+ base + counter);
+
+ /* 0 < ret < 2^27 * max_num_memsys_blocks = 2^29 */
+ ret = kbase_ipa_add_saturate(ret, counter_value);
+ base += KBASE_IPA_NR_BYTES_PER_BLOCK;
+ }
+
+ /* Range: -2^51 < ret * coeff < 2^51 */
+ return ret * coeff;
}
s64 kbase_ipa_single_counter(
@@ -113,16 +115,49 @@ s64 kbase_ipa_single_counter(
const u32 counter_value = kbase_ipa_read_hwcnt(model_data, counter);
/* Range: -2^49 < ret < 2^49 */
- const s64 multiplied = (s64) counter_value * (s64) coeff;
+ return counter_value * (s64) coeff;
+}
+
+/**
+ * kbase_ipa_gpu_active - Inform IPA that GPU is now active
+ * @model_data: Pointer to model data
+ *
+ * This function may cause vinstr to become active.
+ */
+static void kbase_ipa_gpu_active(struct kbase_ipa_model_vinstr_data *model_data)
+{
+ struct kbase_device *kbdev = model_data->kbdev;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ if (!kbdev->ipa.vinstr_active) {
+ kbdev->ipa.vinstr_active = true;
+ kbase_vinstr_resume_client(model_data->vinstr_cli);
+ }
+}
+
+/**
+ * kbase_ipa_gpu_idle - Inform IPA that GPU is now idle
+ * @model_data: Pointer to model data
+ *
+ * This function may cause vinstr to become idle.
+ */
+static void kbase_ipa_gpu_idle(struct kbase_ipa_model_vinstr_data *model_data)
+{
+ struct kbase_device *kbdev = model_data->kbdev;
- /* Range: -2^29 < return < 2^29 */
- return div_s64(multiplied, 1000000);
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ if (kbdev->ipa.vinstr_active) {
+ kbase_vinstr_suspend_client(model_data->vinstr_cli);
+ kbdev->ipa.vinstr_active = false;
+ }
}
int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
{
struct kbase_device *kbdev = model_data->kbdev;
- struct kbase_uk_hwcnt_reader_setup setup;
+ struct kbase_ioctl_hwcnt_reader_setup setup;
size_t dump_size;
dump_size = kbase_vinstr_dump_size(kbdev);
@@ -145,90 +180,208 @@ int kbase_ipa_attach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
return -1;
}
- model_data->last_sample_read_time = ktime_get();
kbase_vinstr_hwc_clear(model_data->vinstr_cli);
+ kbdev->ipa.gpu_active_callback = kbase_ipa_gpu_active;
+ kbdev->ipa.gpu_idle_callback = kbase_ipa_gpu_idle;
+ kbdev->ipa.model_data = model_data;
+ kbdev->ipa.vinstr_active = false;
+ /* Suspend vinstr, to ensure that the GPU is powered off until there is
+ * something to execute.
+ */
+ kbase_vinstr_suspend_client(model_data->vinstr_cli);
+
return 0;
}
void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data)
{
+ struct kbase_device *kbdev = model_data->kbdev;
+
+ kbdev->ipa.gpu_active_callback = NULL;
+ kbdev->ipa.gpu_idle_callback = NULL;
+ kbdev->ipa.model_data = NULL;
+ kbdev->ipa.vinstr_active = false;
+
if (model_data->vinstr_cli)
kbase_vinstr_detach_client(model_data->vinstr_cli);
+
model_data->vinstr_cli = NULL;
kfree(model_data->vinstr_buffer);
model_data->vinstr_buffer = NULL;
}
-int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
- u32 current_freq)
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp)
{
struct kbase_ipa_model_vinstr_data *model_data =
(struct kbase_ipa_model_vinstr_data *)model->model_data;
+ struct kbase_device *kbdev = model_data->kbdev;
s64 energy = 0;
size_t i;
- ktime_t now = ktime_get();
- ktime_t time_since_last_sample =
- ktime_sub(now, model_data->last_sample_read_time);
- /* Range: 2^0 < time_since_last_sample_ms < 2^10 (1-1000ms) */
- s64 time_since_last_sample_ms = ktime_to_ms(time_since_last_sample);
- u64 coeff = 0;
- u64 num_cycles;
+ u64 coeff = 0, coeff_mul = 0;
+ u32 active_cycles;
int err = 0;
+ if (!kbdev->ipa.vinstr_active) {
+ err = -ENODATA;
+ goto err0; /* GPU powered off - no counters to collect */
+ }
+
err = kbase_vinstr_hwc_dump(model_data->vinstr_cli,
BASE_HWCNT_READER_EVENT_MANUAL);
if (err)
goto err0;
- model_data->last_sample_read_time = now;
+ /* Range: 0 (GPU not used at all), to the max sampling interval, say
+ * 1s, * max GPU frequency (GPU 100% utilized).
+ * 0 <= active_cycles <= 1 * ~2GHz
+ * 0 <= active_cycles < 2^31
+ */
+ active_cycles = model_data->get_active_cycles(model_data);
+
+ if (active_cycles < (u32) max(model_data->min_sample_cycles, 0)) {
+ err = -ENODATA;
+ goto err0;
+ }
- /* Range of 'energy' is +/- 2^34 * number of IPA groups, so around
- * -2^38 < energy < 2^38 */
+ /* Range: 1 <= active_cycles < 2^31 */
+ active_cycles = max(1u, active_cycles);
+
+ /* Range of 'energy' is +/- 2^54 * number of IPA groups (~8), so around
+ * -2^57 < energy < 2^57
+ */
for (i = 0; i < model_data->groups_def_num; i++) {
const struct kbase_ipa_group *group = &model_data->groups_def[i];
- s32 coeff, group_energy;
-
- coeff = model_data->group_values[i];
- group_energy = group->op(model_data, coeff, group->counter_block_offset);
+ s32 coeff = model_data->group_values[i];
+ s64 group_energy = group->op(model_data, coeff,
+ group->counter_block_offset);
energy = kbase_ipa_add_saturate(energy, group_energy);
}
- /* Range: 0 <= coeff < 2^38 */
+ /* Range: 0 <= coeff < 2^57 */
if (energy > 0)
coeff = energy;
- /* Scale by user-specified factor and divide by 1000. But actually
- * cancel the division out, because we want the num_cycles in KHz and
- * don't want to lose precision. */
+ /* Range: 0 <= coeff < 2^57 (because active_cycles >= 1). However, this
+ * can be constrained further: Counter values can only be increased by
+ * a theoretical maximum of about 64k per clock cycle. Beyond this,
+ * we'd have to sample every 1ms to avoid them overflowing at the
+ * lowest clock frequency (say 100MHz). Therefore, we can write the
+ * range of 'coeff' in terms of active_cycles:
+ *
+ * coeff = SUM(coeffN * counterN * num_cores_for_counterN)
+ * coeff <= SUM(coeffN * counterN) * max_num_cores
+ * coeff <= num_IPA_groups * max_coeff * max_counter * max_num_cores
+ * (substitute max_counter = 2^16 * active_cycles)
+ * coeff <= num_IPA_groups * max_coeff * 2^16 * active_cycles * max_num_cores
+ * coeff <= 2^3 * 2^22 * 2^16 * active_cycles * 2^5
+ * coeff <= 2^46 * active_cycles
+ *
+ * So after the division: 0 <= coeff <= 2^46
+ */
+ coeff = div_u64(coeff, active_cycles);
+
+ /* Not all models were derived at the same reference voltage. Voltage
+ * scaling is done by multiplying by V^2, so we need to *divide* by
+ * Vref^2 here.
+ * Range: 0 <= coeff <= 2^49
+ */
+ coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+ /* Range: 0 <= coeff <= 2^52 */
+ coeff = div_u64(coeff * 1000, max(model_data->reference_voltage, 1));
+
+ /* Scale by user-specified integer factor.
+ * Range: 0 <= coeff_mul < 2^57
+ */
+ coeff_mul = coeff * model_data->scaling_factor;
+
+ /* The power models have results with units
+ * mW/(MHz V^2), i.e. nW/(Hz V^2). With precision of 1/1000000, this
+ * becomes fW/(Hz V^2), which are the units of coeff_mul. However,
+ * kbase_scale_dynamic_power() expects units of pW/(Hz V^2), so divide
+ * by 1000.
+ * Range: 0 <= coeff_mul < 2^47
+ */
+ coeff_mul = div_u64(coeff_mul, 1000u);
- /* Range: 0 < coeff < 2^53 */
- coeff = coeff * model_data->scaling_factor;
+err0:
+ /* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */
+ *coeffp = clamp(coeff_mul, (u64) 0, (u64) 1 << 16);
+ return err;
+}
- if (time_since_last_sample_ms == 0) {
- time_since_last_sample_ms = 1;
- } else if (time_since_last_sample_ms < 0) {
- err = -ERANGE;
- goto err0;
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+ const struct kbase_ipa_group *ipa_groups_def,
+ size_t ipa_group_size,
+ kbase_ipa_get_active_cycles_callback get_active_cycles,
+ s32 reference_voltage)
+{
+ int err = 0;
+ size_t i;
+ struct kbase_ipa_model_vinstr_data *model_data;
+
+ if (!model || !ipa_groups_def || !ipa_group_size || !get_active_cycles)
+ return -EINVAL;
+
+ model_data = kzalloc(sizeof(*model_data), GFP_KERNEL);
+ if (!model_data)
+ return -ENOMEM;
+
+ model_data->kbdev = model->kbdev;
+ model_data->groups_def = ipa_groups_def;
+ model_data->groups_def_num = ipa_group_size;
+ model_data->get_active_cycles = get_active_cycles;
+
+ model->model_data = (void *) model_data;
+
+ for (i = 0; i < model_data->groups_def_num; ++i) {
+ const struct kbase_ipa_group *group = &model_data->groups_def[i];
+
+ model_data->group_values[i] = group->default_value;
+ err = kbase_ipa_model_add_param_s32(model, group->name,
+ &model_data->group_values[i],
+ 1, false);
+ if (err)
+ goto exit;
}
- /* Range: 2^20 < num_cycles < 2^40 mCycles */
- num_cycles = (u64) current_freq * (u64) time_since_last_sample_ms;
- /* Range: 2^10 < num_cycles < 2^30 Cycles */
- num_cycles = div_u64(num_cycles, 1000000);
+ model_data->scaling_factor = DEFAULT_SCALING_FACTOR;
+ err = kbase_ipa_model_add_param_s32(model, "scale",
+ &model_data->scaling_factor,
+ 1, false);
+ if (err)
+ goto exit;
+
+ model_data->min_sample_cycles = DEFAULT_MIN_SAMPLE_CYCLES;
+ err = kbase_ipa_model_add_param_s32(model, "min_sample_cycles",
+ &model_data->min_sample_cycles,
+ 1, false);
+ if (err)
+ goto exit;
- /* num_cycles should never be 0 in _normal_ usage (because we expect
- * frequencies on the order of MHz and >10ms polling intervals), but
- * protect against divide-by-zero anyway. */
- if (num_cycles == 0)
- num_cycles = 1;
+ model_data->reference_voltage = reference_voltage;
+ err = kbase_ipa_model_add_param_s32(model, "reference_voltage",
+ &model_data->reference_voltage,
+ 1, false);
+ if (err)
+ goto exit;
- /* Range: 0 < coeff < 2^43 */
- coeff = div_u64(coeff, num_cycles);
+ err = kbase_ipa_attach_vinstr(model_data);
-err0:
- /* Clamp to a sensible range - 2^16 gives about 14W at 400MHz/750mV */
- *coeffp = clamp(coeff, (u64) 0, (u64) 1 << 16);
+exit:
+ if (err) {
+ kbase_ipa_model_param_free_all(model);
+ kfree(model_data);
+ }
return err;
}
+
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model)
+{
+ struct kbase_ipa_model_vinstr_data *model_data =
+ (struct kbase_ipa_model_vinstr_data *)model->model_data;
+
+ kbase_ipa_detach_vinstr(model_data);
+ kfree(model_data);
+}
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
index 7233642add7802..0deafaebbc72cd 100644
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_common.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,9 +25,6 @@
#include "mali_kbase.h"
-/* Maximum length for the name of an IPA group. */
-#define KBASE_IPA_MAX_GROUP_NAME_LEN 15
-
/* Maximum number of IPA groups for an IPA model. */
#define KBASE_IPA_MAX_GROUP_DEF_NUM 16
@@ -41,30 +38,45 @@
#define KBASE_IPA_NR_BYTES_PER_BLOCK \
(KBASE_IPA_NR_CNT_PER_BLOCK * KBASE_IPA_NR_BYTES_PER_CNT)
+struct kbase_ipa_model_vinstr_data;
+typedef u32 (*kbase_ipa_get_active_cycles_callback)(struct kbase_ipa_model_vinstr_data *);
/**
* struct kbase_ipa_model_vinstr_data - IPA context per device
* @kbdev: pointer to kbase device
* @groups_def: Array of IPA groups.
* @groups_def_num: Number of elements in the array of IPA groups.
+ * @get_active_cycles: Callback to return number of active cycles during
+ * counter sample period
* @vinstr_cli: vinstr client handle
* @vinstr_buffer: buffer to dump hardware counters onto
- * @last_sample_read_time: timestamp of last vinstr buffer read
- * @scaling_factor: user-specified power scaling factor. This is
- * interpreted as a fraction where the denominator is
- * 1000. Range approx 0.0-32.0:
- * 0 < scaling_factor < 2^15
+ * @reference_voltage: voltage, in mV, of the operating point used when
+ * deriving the power model coefficients. Range approx
+ * 0.1V - 5V (~= 8V): 2^7 <= reference_voltage <= 2^13
+ * @scaling_factor: User-specified power scaling factor. This is an
+ * integer, which is multiplied by the power coefficient
+ * just before OPP scaling.
+ * Range approx 0-32: 0 < scaling_factor < 2^5
+ * @min_sample_cycles: If the value of the GPU_ACTIVE counter (the number of
+ * cycles the GPU was working) is less than
+ * min_sample_cycles, the counter model will return an
+ * error, causing the IPA framework to approximate using
+ * the cached simple model results instead. This may be
+ * more accurate than extrapolating using a very small
+ * counter dump.
*/
struct kbase_ipa_model_vinstr_data {
struct kbase_device *kbdev;
s32 group_values[KBASE_IPA_MAX_GROUP_DEF_NUM];
const struct kbase_ipa_group *groups_def;
size_t groups_def_num;
+ kbase_ipa_get_active_cycles_callback get_active_cycles;
struct kbase_vinstr_client *vinstr_cli;
void *vinstr_buffer;
- ktime_t last_sample_read_time;
+ s32 reference_voltage;
s32 scaling_factor;
+ s32 min_sample_cycles;
};
/**
@@ -77,38 +89,57 @@ struct kbase_ipa_model_vinstr_data {
* @counter_block_offset: block offset in bytes of the counter used to calculate energy for IPA group
*/
struct kbase_ipa_group {
- char name[KBASE_IPA_MAX_GROUP_NAME_LEN + 1];
+ const char *name;
s32 default_value;
s64 (*op)(struct kbase_ipa_model_vinstr_data *, s32, u32);
u32 counter_block_offset;
};
/**
- * sum_all_shader_cores() - sum a counter over all cores
- * @model_data pointer to model data
- * @coeff model coefficient. Unity is ~2^20, so range approx
- * +/- 4.0: -2^22 < coeff < 2^22
- * @counter offset in bytes of the counter used to calculate energy for IPA group
+ * kbase_ipa_sum_all_shader_cores() - sum a counter over all cores
+ * @model_data: pointer to model data
+ * @coeff: model coefficient. Unity is ~2^20, so range approx
+ * +/- 4.0: -2^22 < coeff < 2^22
+ * @counter offset in bytes of the counter used to calculate energy
+ * for IPA group
*
* Calculate energy estimation based on hardware counter `counter'
* across all shader cores.
*
- * Return: Sum of counter values. Range: -2^34 < ret < 2^34
+ * Return: Sum of counter values. Range: -2^54 < ret < 2^54
*/
s64 kbase_ipa_sum_all_shader_cores(
struct kbase_ipa_model_vinstr_data *model_data,
s32 coeff, u32 counter);
/**
- * sum_single_counter() - sum a single counter
- * @model_data pointer to model data
- * @coeff model coefficient. Unity is ~2^20, so range approx
- * +/- 4.0: -2^22 < coeff < 2^22
- * @counter offset in bytes of the counter used to calculate energy for IPA group
+ * kbase_ipa_sum_all_memsys_blocks() - sum a counter over all mem system blocks
+ * @model_data: pointer to model data
+ * @coeff: model coefficient. Unity is ~2^20, so range approx
+ * +/- 4.0: -2^22 < coeff < 2^22
+ * @counter: offset in bytes of the counter used to calculate energy
+ * for IPA group
+ *
+ * Calculate energy estimation based on hardware counter `counter' across all
+ * memory system blocks.
+ *
+ * Return: Sum of counter values. Range: -2^51 < ret < 2^51
+ */
+s64 kbase_ipa_sum_all_memsys_blocks(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff, u32 counter);
+
+/**
+ * kbase_ipa_single_counter() - sum a single counter
+ * @model_data: pointer to model data
+ * @coeff: model coefficient. Unity is ~2^20, so range approx
+ * +/- 4.0: -2^22 < coeff < 2^22
+ * @counter: offset in bytes of the counter used to calculate energy
+ * for IPA group
*
* Calculate energy estimation based on hardware counter `counter'.
*
- * Return: Counter value. Range: -2^34 < ret < 2^34
+ * Return: Counter value. Range: -2^49 < ret < 2^49
*/
s64 kbase_ipa_single_counter(
struct kbase_ipa_model_vinstr_data *model_data,
@@ -139,9 +170,6 @@ void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
* @model: pointer to instantiated model
* @coeffp: pointer to location where calculated power, in
* pW/(Hz V^2), is stored.
- * @current_freq: frequency the GPU has been running at over the sample
- * period. In Hz. Range: 10 MHz < 1GHz,
- * 2^20 < current_freq < 2^30
*
* This is a GPU-agnostic implementation of the get_dynamic_coeff()
* function of an IPA model. It relies on the model being populated
@@ -149,20 +177,39 @@ void kbase_ipa_detach_vinstr(struct kbase_ipa_model_vinstr_data *model_data);
*
* Return: 0 on success, or an error code.
*/
-int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp,
- u32 current_freq);
+int kbase_ipa_vinstr_dynamic_coeff(struct kbase_ipa_model *model, u32 *coeffp);
+
+/**
+ * kbase_ipa_vinstr_common_model_init() - initialize ipa power model
+ * @model: ipa power model to initialize
+ * @ipa_groups_def: array of ipa groups which sets coefficients for
+ * the corresponding counters used in the ipa model
+ * @ipa_group_size: number of elements in the array @ipa_groups_def
+ * @get_active_cycles: callback to return the number of cycles the GPU was
+ * active during the counter sample period.
+ * @reference_voltage: voltage, in mV, of the operating point used when
+ * deriving the power model coefficients.
+ *
+ * This initialization function performs initialization steps common
+ * for ipa models based on counter values. In each call, the model
+ * passes its specific coefficient values per ipa counter group via
+ * @ipa_groups_def array.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+int kbase_ipa_vinstr_common_model_init(struct kbase_ipa_model *model,
+ const struct kbase_ipa_group *ipa_groups_def,
+ size_t ipa_group_size,
+ kbase_ipa_get_active_cycles_callback get_active_cycles,
+ s32 reference_voltage);
-#if MALI_UNIT_TEST
/**
- * kbase_ipa_set_dummy_time() - set a dummy monotonic time value
- * @t: a monotonic time value
+ * kbase_ipa_vinstr_common_model_term() - terminate ipa power model
+ * @model: ipa power model to terminate
*
- * This is only intended for use in unit tests, to ensure that the kernel time
- * values used by a power model are predictable. Deterministic behavior is
- * necessary to allow validation of the dynamic power values computed by the
- * model.
+ * This function performs all necessary steps to terminate ipa power model
+ * including clean up of resources allocated to hold model data.
*/
-void kbase_ipa_set_dummy_time(ktime_t t);
-#endif /* MALI_UNIT_TEST */
+void kbase_ipa_vinstr_common_model_term(struct kbase_ipa_model *model);
#endif /* _KBASE_IPA_VINSTR_COMMON_H_ */
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c
deleted file mode 100644
index d07fb36d901eda..00000000000000
--- a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g71.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-#include <linux/thermal.h>
-
-#include "mali_kbase_ipa_vinstr_common.h"
-#include "mali_kbase.h"
-#include "mali_kbase_ipa_debugfs.h"
-
-
-/* Performance counter blocks base offsets */
-#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define MEMSYS_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define SC0_BASE_ONE_MEMSYS (3 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-#define SC0_BASE_TWO_MEMSYS (4 * KBASE_IPA_NR_BYTES_PER_BLOCK)
-
-/* JM counter block offsets */
-#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 6)
-
-/* Tiler counter block offsets */
-#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45)
-
-/* MEMSYS counter block offsets */
-#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25)
-
-/* SC counter block offsets */
-#define SC_FRAG_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 4)
-#define SC_EXEC_CORE_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 26)
-#define SC_EXEC_INSTR_COUNT (KBASE_IPA_NR_BYTES_PER_CNT * 28)
-#define SC_TEX_COORD_ISSUE (KBASE_IPA_NR_BYTES_PER_CNT * 40)
-#define SC_VARY_SLOT_32 (KBASE_IPA_NR_BYTES_PER_CNT * 50)
-#define SC_VARY_SLOT_16 (KBASE_IPA_NR_BYTES_PER_CNT * 51)
-#define SC_BEATS_RD_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 56)
-#define SC_BEATS_WR_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 61)
-#define SC_BEATS_WR_TIB (KBASE_IPA_NR_BYTES_PER_CNT * 62)
-
-/** Maximum number of cores for which a single Memory System block of performance counters is present. */
-#define KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ((u8)4)
-
-
-/**
- * get_jm_counter() - get performance counter offset inside the Job Manager block
- * @model_data: pointer to GPU model data.
- * @counter_block_offset: offset in bytes of the performance counter inside the Job Manager block.
- *
- * Return: Block offset in bytes of the required performance counter.
- */
-static u32 kbase_g71_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data,
- u32 counter_block_offset)
-{
- return JM_BASE + counter_block_offset;
-}
-
-/**
- * get_memsys_counter() - get peformance counter offset inside the Memory System block
- * @model_data: pointer to GPU model data.
- * @counter_block_offset: offset in bytes of the performance counter inside the (first) Memory System block.
- *
- * Return: Block offset in bytes of the required performance counter.
- */
-static u32 kbase_g71_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data,
- u32 counter_block_offset)
-{
- /* The base address of Memory System performance counters is always the same, although their number
- * may vary based on the number of cores. For the moment it's ok to return a constant.
- */
- return MEMSYS_BASE + counter_block_offset;
-}
-
-/**
- * get_sc_counter() - get performance counter offset inside the Shader Cores block
- * @model_data: pointer to GPU model data.
- * @counter_block_offset: offset in bytes of the performance counter inside the (first) Shader Cores block.
- *
- * Return: Block offset in bytes of the required performance counter.
- */
-static u32 kbase_g71_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data,
- u32 counter_block_offset)
-{
- const u32 sc_base = model_data->kbdev->gpu_props.num_cores <= KBASE_G71_SINGLE_MEMSYS_MAX_NUM_CORES ?
- SC0_BASE_ONE_MEMSYS :
- SC0_BASE_TWO_MEMSYS;
-
- return sc_base + counter_block_offset;
-}
-
-/**
- * memsys_single_counter() - calculate energy for a single Memory System performance counter.
- * @model_data: pointer to GPU model data.
- * @coeff: default value of coefficient for IPA group.
- * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
- *
- * Return: Energy estimation for a single Memory System performance counter.
- */
-static s64 kbase_g71_memsys_single_counter(
- struct kbase_ipa_model_vinstr_data *model_data,
- s32 coeff,
- u32 counter_block_offset)
-{
- return kbase_ipa_single_counter(model_data, coeff,
- kbase_g71_power_model_get_memsys_counter(model_data, counter_block_offset));
-}
-
-/**
- * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores.
- * @model_data: pointer to GPU model data.
- * @coeff: default value of coefficient for IPA group.
- * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
- *
- * Return: Energy estimation for a Shader Cores performance counter for all cores.
- */
-static s64 kbase_g71_sum_all_shader_cores(
- struct kbase_ipa_model_vinstr_data *model_data,
- s32 coeff,
- u32 counter_block_offset)
-{
- return kbase_ipa_sum_all_shader_cores(model_data, coeff,
- kbase_g71_power_model_get_sc_counter(model_data, counter_block_offset));
-}
-
-/**
- * jm_single_counter() - calculate energy for a single Job Manager performance counter.
- * @model_data: pointer to GPU model data.
- * @coeff: default value of coefficient for IPA group.
- * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
- *
- * Return: Energy estimation for a single Job Manager performance counter.
- */
-static s64 kbase_g71_jm_single_counter(
- struct kbase_ipa_model_vinstr_data *model_data,
- s32 coeff,
- u32 counter_block_offset)
-{
- return kbase_ipa_single_counter(model_data, coeff,
- kbase_g71_power_model_get_jm_counter(model_data, counter_block_offset));
-}
-
-/** Table of IPA group definitions.
- *
- * For each IPA group, this table defines a function to access the given performance block counter (or counters,
- * if the operation needs to be iterated on multiple blocks) and calculate energy estimation.
- */
-static const struct kbase_ipa_group ipa_groups_def[] = {
- {
- .name = "l2_access",
- .default_value = 526300,
- .op = kbase_g71_memsys_single_counter,
- .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
- },
- {
- .name = "exec_instr_count",
- .default_value = 301100,
- .op = kbase_g71_sum_all_shader_cores,
- .counter_block_offset = SC_EXEC_INSTR_COUNT,
- },
- {
- .name = "tex_issue",
- .default_value = 197400,
- .op = kbase_g71_sum_all_shader_cores,
- .counter_block_offset = SC_TEX_COORD_ISSUE,
- },
- {
- .name = "tile_wb",
- .default_value = -156400,
- .op = kbase_g71_sum_all_shader_cores,
- .counter_block_offset = SC_BEATS_WR_TIB,
- },
- {
- .name = "gpu_active",
- .default_value = 115800,
- .op = kbase_g71_jm_single_counter,
- .counter_block_offset = JM_GPU_ACTIVE,
- },
-};
-
-static int kbase_g71_power_model_init(struct kbase_ipa_model *model)
-{
- int i, err = 0;
- struct kbase_ipa_model_vinstr_data *model_data;
-
- model_data = kzalloc(sizeof(*model_data), GFP_KERNEL);
- if (!model_data)
- return -ENOMEM;
-
- model_data->kbdev = model->kbdev;
- model_data->groups_def = ipa_groups_def;
- BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def) > KBASE_IPA_MAX_GROUP_DEF_NUM);
- model_data->groups_def_num = ARRAY_SIZE(ipa_groups_def);
-
- model->model_data = (void *) model_data;
-
- for (i = 0; i < ARRAY_SIZE(ipa_groups_def); ++i) {
- const struct kbase_ipa_group *group = &ipa_groups_def[i];
-
- model_data->group_values[i] = group->default_value;
- err = kbase_ipa_model_add_param_s32(model, group->name,
- &model_data->group_values[i],
- 1, false);
- if (err)
- goto exit;
- }
-
- model_data->scaling_factor = 5;
- err = kbase_ipa_model_add_param_s32(model, "scale",
- &model_data->scaling_factor,
- 1, false);
- if (err)
- goto exit;
-
- err = kbase_ipa_attach_vinstr(model_data);
-
-exit:
- if (err) {
- kbase_ipa_model_param_free_all(model);
- kfree(model_data);
- }
- return err;
-}
-
-static void kbase_g71_power_model_term(struct kbase_ipa_model *model)
-{
- struct kbase_ipa_model_vinstr_data *model_data =
- (struct kbase_ipa_model_vinstr_data *)model->model_data;
-
- kbase_ipa_detach_vinstr(model_data);
- kfree(model_data);
-}
-
-
-struct kbase_ipa_model_ops kbase_g71_ipa_model_ops = {
- .name = "mali-g71-power-model",
- .init = kbase_g71_power_model_init,
- .term = kbase_g71_power_model_term,
- .get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff,
- .do_utilization_scaling_in_framework = false,
-};
-KBASE_EXPORT_TEST_API(kbase_g71_ipa_model_ops);
diff --git a/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
new file mode 100644
index 00000000000000..83660334d0834b
--- /dev/null
+++ b/drivers/gpu/arm/midgard/ipa/mali_kbase_ipa_vinstr_g7x.c
@@ -0,0 +1,340 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+#include <linux/thermal.h>
+
+#include "mali_kbase_ipa_vinstr_common.h"
+#include "mali_kbase.h"
+#include "mali_kbase_ipa_debugfs.h"
+
+
+/* Performance counter blocks base offsets */
+#define JM_BASE (0 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define TILER_BASE (1 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+#define MEMSYS_BASE (2 * KBASE_IPA_NR_BYTES_PER_BLOCK)
+
+/* JM counter block offsets */
+#define JM_GPU_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 6)
+
+/* Tiler counter block offsets */
+#define TILER_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 45)
+
+/* MEMSYS counter block offsets */
+#define MEMSYS_L2_ANY_LOOKUP (KBASE_IPA_NR_BYTES_PER_CNT * 25)
+
+/* SC counter block offsets */
+#define SC_FRAG_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 4)
+#define SC_EXEC_CORE_ACTIVE (KBASE_IPA_NR_BYTES_PER_CNT * 26)
+#define SC_EXEC_INSTR_COUNT (KBASE_IPA_NR_BYTES_PER_CNT * 28)
+#define SC_TEX_COORD_ISSUE (KBASE_IPA_NR_BYTES_PER_CNT * 40)
+#define SC_TEX_TFCH_NUM_OPERATIONS (KBASE_IPA_NR_BYTES_PER_CNT * 42)
+#define SC_VARY_INSTR (KBASE_IPA_NR_BYTES_PER_CNT * 49)
+#define SC_VARY_SLOT_32 (KBASE_IPA_NR_BYTES_PER_CNT * 50)
+#define SC_VARY_SLOT_16 (KBASE_IPA_NR_BYTES_PER_CNT * 51)
+#define SC_BEATS_RD_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 56)
+#define SC_BEATS_WR_LSC (KBASE_IPA_NR_BYTES_PER_CNT * 61)
+#define SC_BEATS_WR_TIB (KBASE_IPA_NR_BYTES_PER_CNT * 62)
+
+/**
+ * get_jm_counter() - get performance counter offset inside the Job Manager block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the Job Manager block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_jm_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ return JM_BASE + counter_block_offset;
+}
+
+/**
+ * get_memsys_counter() - get performance counter offset inside the Memory System block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the (first) Memory System block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_memsys_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ /* The base address of Memory System performance counters is always the same, although their number
+ * may vary based on the number of cores. For the moment it's ok to return a constant.
+ */
+ return MEMSYS_BASE + counter_block_offset;
+}
+
+/**
+ * get_sc_counter() - get performance counter offset inside the Shader Cores block
+ * @model_data: pointer to GPU model data.
+ * @counter_block_offset: offset in bytes of the performance counter inside the (first) Shader Cores block.
+ *
+ * Return: Block offset in bytes of the required performance counter.
+ */
+static u32 kbase_g7x_power_model_get_sc_counter(struct kbase_ipa_model_vinstr_data *model_data,
+ u32 counter_block_offset)
+{
+ const u32 sc_base = MEMSYS_BASE +
+ (model_data->kbdev->gpu_props.props.l2_props.num_l2_slices *
+ KBASE_IPA_NR_BYTES_PER_BLOCK);
+
+ return sc_base + counter_block_offset;
+}
+
+/**
+ * memsys_single_counter() - calculate energy for a single Memory System performance counter.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Memory System performance counter.
+ */
+static s64 kbase_g7x_sum_all_memsys_blocks(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 offset)
+{
+ u32 counter;
+
+ counter = kbase_g7x_power_model_get_memsys_counter(model_data, offset);
+ return kbase_ipa_sum_all_memsys_blocks(model_data, coeff, counter);
+}
+
+/**
+ * sum_all_shader_cores() - calculate energy for a Shader Cores performance counter for all cores.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a Shader Cores performance counter for all cores.
+ */
+static s64 kbase_g7x_sum_all_shader_cores(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 counter_block_offset)
+{
+ u32 counter;
+
+ counter = kbase_g7x_power_model_get_sc_counter(model_data,
+ counter_block_offset);
+ return kbase_ipa_sum_all_shader_cores(model_data, coeff, counter);
+}
+
+/**
+ * jm_single_counter() - calculate energy for a single Job Manager performance counter.
+ * @model_data: pointer to GPU model data.
+ * @coeff: default value of coefficient for IPA group.
+ * @counter_block_offset: offset in bytes of the counter inside the block it belongs to.
+ *
+ * Return: Energy estimation for a single Job Manager performance counter.
+ */
+static s64 kbase_g7x_jm_single_counter(
+ struct kbase_ipa_model_vinstr_data *model_data,
+ s32 coeff,
+ u32 counter_block_offset)
+{
+ u32 counter;
+
+ counter = kbase_g7x_power_model_get_jm_counter(model_data,
+ counter_block_offset);
+ return kbase_ipa_single_counter(model_data, coeff, counter);
+}
+
+/**
+ * get_active_cycles() - return the GPU_ACTIVE counter
+ * @model_data: pointer to GPU model data.
+ *
+ * Return: the number of cycles the GPU was active during the counter sampling
+ * period.
+ */
+static u32 kbase_g7x_get_active_cycles(
+ struct kbase_ipa_model_vinstr_data *model_data)
+{
+ u32 counter = kbase_g7x_power_model_get_jm_counter(model_data, JM_GPU_ACTIVE);
+
+ /* Counters are only 32-bit, so we can safely multiply by 1 then cast
+ * the 64-bit result back to a u32.
+ */
+ return kbase_ipa_single_counter(model_data, 1, counter);
+}
+
+/** Table of IPA group definitions.
+ *
+ * For each IPA group, this table defines a function to access the given performance block counter (or counters,
+ * if the operation needs to be iterated on multiple blocks) and calculate energy estimation.
+ */
+
+static const struct kbase_ipa_group ipa_groups_def_g71[] = {
+ {
+ .name = "l2_access",
+ .default_value = 526300,
+ .op = kbase_g7x_sum_all_memsys_blocks,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+ },
+ {
+ .name = "exec_instr_count",
+ .default_value = 301100,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_COUNT,
+ },
+ {
+ .name = "tex_issue",
+ .default_value = 197400,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_COORD_ISSUE,
+ },
+ {
+ .name = "tile_wb",
+ .default_value = -156400,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_BEATS_WR_TIB,
+ },
+ {
+ .name = "gpu_active",
+ .default_value = 115800,
+ .op = kbase_g7x_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
+ },
+};
+
+static const struct kbase_ipa_group ipa_groups_def_g72[] = {
+ {
+ .name = "l2_access",
+ .default_value = 393000,
+ .op = kbase_g7x_sum_all_memsys_blocks,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+ },
+ {
+ .name = "exec_instr_count",
+ .default_value = 227000,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_COUNT,
+ },
+ {
+ .name = "tex_issue",
+ .default_value = 181900,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_COORD_ISSUE,
+ },
+ {
+ .name = "tile_wb",
+ .default_value = -120200,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_BEATS_WR_TIB,
+ },
+ {
+ .name = "gpu_active",
+ .default_value = 133100,
+ .op = kbase_g7x_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
+ },
+};
+
+static const struct kbase_ipa_group ipa_groups_def_tnox[] = {
+ {
+ .name = "gpu_active",
+ .default_value = 122000,
+ .op = kbase_g7x_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
+ },
+ {
+ .name = "exec_instr_count",
+ .default_value = 488900,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_COUNT,
+ },
+ {
+ .name = "vary_instr",
+ .default_value = 212100,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_VARY_INSTR,
+ },
+ {
+ .name = "tex_tfch_num_operations",
+ .default_value = 288000,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+ },
+ {
+ .name = "l2_access",
+ .default_value = 378100,
+ .op = kbase_g7x_sum_all_memsys_blocks,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+ },
+};
+
+static const struct kbase_ipa_group ipa_groups_def_tgox_r1[] = {
+ {
+ .name = "gpu_active",
+ .default_value = 224200,
+ .op = kbase_g7x_jm_single_counter,
+ .counter_block_offset = JM_GPU_ACTIVE,
+ },
+ {
+ .name = "exec_instr_count",
+ .default_value = 384700,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_EXEC_INSTR_COUNT,
+ },
+ {
+ .name = "vary_instr",
+ .default_value = 271900,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_VARY_INSTR,
+ },
+ {
+ .name = "tex_tfch_num_operations",
+ .default_value = 477700,
+ .op = kbase_g7x_sum_all_shader_cores,
+ .counter_block_offset = SC_TEX_TFCH_NUM_OPERATIONS,
+ },
+ {
+ .name = "l2_access",
+ .default_value = 551400,
+ .op = kbase_g7x_sum_all_memsys_blocks,
+ .counter_block_offset = MEMSYS_L2_ANY_LOOKUP,
+ },
+};
+
+#define STANDARD_POWER_MODEL(gpu, reference_voltage) \
+ static int kbase_ ## gpu ## _power_model_init(\
+ struct kbase_ipa_model *model) \
+ { \
+ BUILD_BUG_ON(ARRAY_SIZE(ipa_groups_def_ ## gpu) > \
+ KBASE_IPA_MAX_GROUP_DEF_NUM); \
+ return kbase_ipa_vinstr_common_model_init(model, \
+ ipa_groups_def_ ## gpu, \
+ ARRAY_SIZE(ipa_groups_def_ ## gpu), \
+ kbase_g7x_get_active_cycles, \
+ (reference_voltage)); \
+ } \
+ struct kbase_ipa_model_ops kbase_ ## gpu ## _ipa_model_ops = { \
+ .name = "mali-" #gpu "-power-model", \
+ .init = kbase_ ## gpu ## _power_model_init, \
+ .term = kbase_ipa_vinstr_common_model_term, \
+ .get_dynamic_coeff = kbase_ipa_vinstr_dynamic_coeff, \
+ }; \
+ KBASE_EXPORT_TEST_API(kbase_ ## gpu ## _ipa_model_ops)
+
+STANDARD_POWER_MODEL(g71, 800);
+STANDARD_POWER_MODEL(g72, 800);
+STANDARD_POWER_MODEL(tnox, 800);
+STANDARD_POWER_MODEL(tgox_r1, 1000);
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
index e0eebd872eb9d5..10da0c58e9ebd3 100644
--- a/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_features.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -31,7 +31,6 @@
enum base_hw_feature {
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_XAFFINITY,
BASE_HW_FEATURE_OUT_OF_ORDER_EXEC,
BASE_HW_FEATURE_MRT,
@@ -85,7 +84,6 @@ static const enum base_hw_feature base_hw_features_t62x[] = {
};
static const enum base_hw_feature base_hw_features_t72x[] = {
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
BASE_HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
BASE_HW_FEATURE_INTERPIPE_REG_ALIASING,
@@ -139,7 +137,6 @@ static const enum base_hw_feature base_hw_features_tFxx[] = {
};
static const enum base_hw_feature base_hw_features_t83x[] = {
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
BASE_HW_FEATURE_XAFFINITY,
@@ -162,7 +159,6 @@ static const enum base_hw_feature base_hw_features_t83x[] = {
};
static const enum base_hw_feature base_hw_features_t82x[] = {
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
BASE_HW_FEATURE_XAFFINITY,
@@ -238,7 +234,6 @@ static const enum base_hw_feature base_hw_features_tHEx[] = {
};
static const enum base_hw_feature base_hw_features_tSIx[] = {
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
BASE_HW_FEATURE_XAFFINITY,
@@ -266,7 +261,6 @@ static const enum base_hw_feature base_hw_features_tSIx[] = {
};
static const enum base_hw_feature base_hw_features_tDVx[] = {
- BASE_HW_FEATURE_33BIT_VA,
BASE_HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
BASE_HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
BASE_HW_FEATURE_XAFFINITY,
diff --git a/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
index 7b70e7a82b6f6e..19ffd6996ba757 100644
--- a/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
+++ b/drivers/gpu/arm/midgard/mali_base_hwconfig_issues.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -97,6 +97,7 @@ enum base_hw_issue {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T720_1386,
BASE_HW_ISSUE_T76X_26,
BASE_HW_ISSUE_T76X_1909,
@@ -111,6 +112,7 @@ enum base_hw_issue {
BASE_HW_ISSUE_T76X_3964,
BASE_HW_ISSUE_T76X_3966,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_7940,
BASE_HW_ISSUE_TMIX_8042,
@@ -123,6 +125,8 @@ enum base_hw_issue {
GPUCORE_1619,
BASE_HW_ISSUE_TSIX_1116,
BASE_HW_ISSUE_TMIX_8438,
+ BASE_HW_ISSUE_TNOX_1194,
+ BASE_HW_ISSUE_TGOX_R1_1234,
BASE_HW_ISSUE_END
};
@@ -189,6 +193,7 @@ static const enum base_hw_issue base_hw_issues_t60x_r0p0_15dev0[] = {
BASE_HW_ISSUE_11035,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_3964,
GPUCORE_1619,
@@ -230,6 +235,7 @@ static const enum base_hw_issue base_hw_issues_t60x_r0p0_eac[] = {
BASE_HW_ISSUE_11035,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_3964,
BASE_HW_ISSUE_TMIX_8438,
@@ -267,6 +273,7 @@ static const enum base_hw_issue base_hw_issues_t60x_r0p1[] = {
BASE_HW_ISSUE_11035,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
BASE_HW_ISSUE_T76X_3964,
@@ -302,6 +309,7 @@ static const enum base_hw_issue base_hw_issues_t62x_r0p1[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
BASE_HW_ISSUE_TMIX_8438,
@@ -326,6 +334,7 @@ static const enum base_hw_issue base_hw_issues_t62x_r1p0[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
BASE_HW_ISSUE_T76X_3964,
@@ -349,6 +358,7 @@ static const enum base_hw_issue base_hw_issues_t62x_r1p1[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
BASE_HW_ISSUE_TMIX_8438,
@@ -527,6 +537,7 @@ static const enum base_hw_issue base_hw_issues_t72x_r0p0[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
BASE_HW_ISSUE_T76X_3964,
@@ -547,6 +558,7 @@ static const enum base_hw_issue base_hw_issues_t72x_r1p0[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T720_1386,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
@@ -568,6 +580,7 @@ static const enum base_hw_issue base_hw_issues_t72x_r1p1[] = {
BASE_HW_ISSUE_11042,
BASE_HW_ISSUE_11051,
BASE_HW_ISSUE_11054,
+ BASE_HW_ISSUE_11056,
BASE_HW_ISSUE_T720_1386,
BASE_HW_ISSUE_T76X_1909,
BASE_HW_ISSUE_T76X_1963,
@@ -834,6 +847,7 @@ static const enum base_hw_issue base_hw_issues_t83x_r0p1[] = {
BASE_HW_ISSUE_T76X_3953,
BASE_HW_ISSUE_T76X_3960,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_END
@@ -854,6 +868,7 @@ static const enum base_hw_issue base_hw_issues_t83x_r1p0[] = {
BASE_HW_ISSUE_T76X_3953,
BASE_HW_ISSUE_T76X_3960,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_END
@@ -869,6 +884,7 @@ static const enum base_hw_issue base_hw_issues_model_t83x[] = {
BASE_HW_ISSUE_T76X_3793,
BASE_HW_ISSUE_T76X_3964,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
GPUCORE_1619,
BASE_HW_ISSUE_TMIX_8438,
@@ -892,6 +908,7 @@ static const enum base_hw_issue base_hw_issues_t82x_r0p0[] = {
BASE_HW_ISSUE_T76X_3960,
BASE_HW_ISSUE_T76X_3964,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_END
@@ -913,6 +930,7 @@ static const enum base_hw_issue base_hw_issues_t82x_r0p1[] = {
BASE_HW_ISSUE_T76X_3953,
BASE_HW_ISSUE_T76X_3960,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_END
@@ -933,6 +951,7 @@ static const enum base_hw_issue base_hw_issues_t82x_r1p0[] = {
BASE_HW_ISSUE_T76X_3953,
BASE_HW_ISSUE_T76X_3960,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
BASE_HW_ISSUE_TMIX_8438,
BASE_HW_ISSUE_END
@@ -947,6 +966,7 @@ static const enum base_hw_issue base_hw_issues_model_t82x[] = {
BASE_HW_ISSUE_T76X_3700,
BASE_HW_ISSUE_T76X_3793,
BASE_HW_ISSUE_T76X_3979,
+ BASE_HW_ISSUE_T83X_817,
BASE_HW_ISSUE_TMIX_7891,
GPUCORE_1619,
BASE_HW_ISSUE_END
@@ -1123,6 +1143,7 @@ static const enum base_hw_issue base_hw_issues_tNOx_r0p0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_TNOX_1194,
BASE_HW_ISSUE_END
};
@@ -1138,6 +1159,15 @@ static const enum base_hw_issue base_hw_issues_tGOx_r0p0[] = {
BASE_HW_ISSUE_9435,
BASE_HW_ISSUE_TMIX_8133,
BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_TNOX_1194,
+ BASE_HW_ISSUE_END
+};
+
+static const enum base_hw_issue base_hw_issues_tGOx_r1p0[] = {
+ BASE_HW_ISSUE_9435,
+ BASE_HW_ISSUE_TMIX_8133,
+ BASE_HW_ISSUE_TSIX_1116,
+ BASE_HW_ISSUE_TGOX_R1_1234,
BASE_HW_ISSUE_END
};
diff --git a/drivers/gpu/arm/midgard/mali_base_kernel.h b/drivers/gpu/arm/midgard/mali_base_kernel.h
index e6b568fba520a1..cc44ff225fa8e3 100644
--- a/drivers/gpu/arm/midgard/mali_base_kernel.h
+++ b/drivers/gpu/arm/midgard/mali_base_kernel.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -127,18 +127,19 @@ typedef u32 base_mem_alloc_flags;
*/
#define BASE_MEM_PROT_GPU_EX ((base_mem_alloc_flags)1 << 4)
- /* BASE_MEM_HINT flags have been removed, but their values are reserved
- * for backwards compatibility with older user-space drivers. The values
- * can be re-used once support for r5p0 user-space drivers is removed,
- * presumably in r7p0.
- *
- * RESERVED: (1U << 5)
- * RESERVED: (1U << 6)
- * RESERVED: (1U << 7)
- * RESERVED: (1U << 8)
- */
-#define BASE_MEM_RESERVED_BIT_5 ((base_mem_alloc_flags)1 << 5)
-#define BASE_MEM_RESERVED_BIT_6 ((base_mem_alloc_flags)1 << 6)
+/* Will be permanently mapped in kernel space.
+ * Flag is only allowed on allocations originating from kbase.
+ */
+#define BASE_MEM_PERMANENT_KERNEL_MAPPING ((base_mem_alloc_flags)1 << 5)
+
+/* The allocation will completely reside within the same 4GB chunk in the GPU
+ * virtual space.
+ * Since this flag is primarily required only for the TLS memory which will
+ * not be used to contain executable code and also not used for Tiler heap,
+ * it can't be used along with BASE_MEM_PROT_GPU_EX and TILER_ALIGN_TOP flags.
+ */
+#define BASE_MEM_GPU_VA_SAME_4GB_PAGE ((base_mem_alloc_flags)1 << 6)
+
#define BASE_MEM_RESERVED_BIT_7 ((base_mem_alloc_flags)1 << 7)
#define BASE_MEM_RESERVED_BIT_8 ((base_mem_alloc_flags)1 << 8)
@@ -192,6 +193,7 @@ typedef u32 base_mem_alloc_flags;
* Do not remove, use the next unreserved bit for new flags
*/
#define BASE_MEM_RESERVED_BIT_19 ((base_mem_alloc_flags)1 << 19)
+#define BASE_MEM_MAYBE_RESERVED_BIT_19 BASE_MEM_RESERVED_BIT_19
/**
* Memory starting from the end of the initial commit is aligned to 'extent'
@@ -200,11 +202,20 @@ typedef u32 base_mem_alloc_flags;
*/
#define BASE_MEM_TILER_ALIGN_TOP ((base_mem_alloc_flags)1 << 20)
+/* Should be uncached on the GPU, will work only for GPUs using AARCH64 mmu mode.
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ * The 3 shareability flags will be ignored for GPU uncached memory.
+ * If used while importing USER_BUFFER type memory, then the import will fail
+ * if the memory is not aligned to GPU and CPU cache line width.
+ */
+#define BASE_MEM_UNCACHED_GPU ((base_mem_alloc_flags)1 << 21)
+
/* Number of bits used as flags for base memory management
*
* Must be kept in sync with the base_mem_alloc_flags flags
*/
-#define BASE_MEM_FLAGS_NR_BITS 21
+#define BASE_MEM_FLAGS_NR_BITS 22
/* A mask for all output bits, excluding IN/OUT bits.
*/
@@ -226,9 +237,13 @@ typedef u32 base_mem_alloc_flags;
/* A mask of all currently reserved flags
*/
#define BASE_MEM_FLAGS_RESERVED \
- (BASE_MEM_RESERVED_BIT_5 | BASE_MEM_RESERVED_BIT_6 | \
- BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \
- BASE_MEM_RESERVED_BIT_19)
+ (BASE_MEM_RESERVED_BIT_7 | BASE_MEM_RESERVED_BIT_8 | \
+ BASE_MEM_MAYBE_RESERVED_BIT_19)
+
+/* A mask of all the flags which are only valid for allocations within kbase,
+ * and may not be passed from user space.
+ */
+#define BASE_MEM_FLAGS_KERNEL_ONLY (BASE_MEM_PERMANENT_KERNEL_MAPPING)
/* A mask of all the flags that can be returned via the base_mem_get_flags()
* interface.
@@ -236,13 +251,13 @@ typedef u32 base_mem_alloc_flags;
#define BASE_MEM_FLAGS_QUERYABLE \
(BASE_MEM_FLAGS_INPUT_MASK & ~(BASE_MEM_SAME_VA | \
BASE_MEM_COHERENT_SYSTEM_REQUIRED | BASE_MEM_DONT_NEED | \
- BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED))
+ BASE_MEM_IMPORT_SHARED | BASE_MEM_FLAGS_RESERVED | \
+ BASE_MEM_FLAGS_KERNEL_ONLY))
/**
* enum base_mem_import_type - Memory types supported by @a base_mem_import
*
* @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
- * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
* @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
* @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
* base_mem_import_user_buffer
@@ -257,7 +272,9 @@ typedef u32 base_mem_alloc_flags;
*/
typedef enum base_mem_import_type {
BASE_MEM_IMPORT_TYPE_INVALID = 0,
- BASE_MEM_IMPORT_TYPE_UMP = 1,
+ /**
+ * Import type with value 1 is deprecated.
+ */
BASE_MEM_IMPORT_TYPE_UMM = 2,
BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
} base_mem_import_type;
@@ -303,13 +320,15 @@ struct base_mem_import_user_buffer {
#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
-/* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
+/* reserved handles ..-48<<PAGE_SHIFT> for future special handles */
#define BASE_MEM_COOKIE_BASE (64ul << 12)
#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
BASE_MEM_COOKIE_BASE)
/* Mask to detect 4GB boundary alignment */
#define BASE_MEM_MASK_4GB 0xfffff000UL
+/* Mask to detect 4GB boundary (in page units) alignment */
+#define BASE_MEM_PFN_MASK_4GB (BASE_MEM_MASK_4GB >> LOCAL_PAGE_SHIFT)
/**
* Limit on the 'extent' parameter for an allocation with the
@@ -317,12 +336,17 @@ struct base_mem_import_user_buffer {
*
* This is the same as the maximum limit for a Buffer Descriptor's chunk size
*/
+#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2 \
+ (21u - (LOCAL_PAGE_SHIFT))
#define BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES \
- ((2ull * 1024ull * 1024ull) >> (LOCAL_PAGE_SHIFT))
+ (1ull << (BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES_LOG2))
/* Bit mask of cookies used for for memory allocation setup */
#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
+/* Maximum size allowed in a single KBASE_IOCTL_MEM_ALLOC call */
+#define KBASE_MEM_ALLOC_MAX_SIZE ((8ull << 30) >> PAGE_SHIFT) /* 8 GB */
+
/**
* @brief Result codes of changing the size of the backing store allocated to a tmem region
@@ -434,6 +458,13 @@ struct base_mem_aliasing_info {
};
/**
+ * Similar to BASE_MEM_TILER_ALIGN_TOP, memory starting from the end of the
+ * initial commit is aligned to 'extent' pages, where 'extent' must be a power
+ * of 2 and no more than BASE_MEM_TILER_ALIGN_TOP_EXTENT_MAX_PAGES
+ */
+#define BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP (1 << 0)
+
+/**
* struct base_jit_alloc_info - Structure which describes a JIT allocation
* request.
* @gpu_alloc_addr: The GPU virtual address to write the JIT
@@ -446,6 +477,18 @@ struct base_mem_aliasing_info {
* @id: Unique ID provided by the caller, this is used
* to pair allocation and free requests.
* Zero is not a valid value.
+ * @bin_id: The JIT allocation bin, used in conjunction with
+ * @max_allocations to limit the number of each
+ * type of JIT allocation.
+ * @max_allocations: The maximum number of allocations allowed within
+ * the bin specified by @bin_id. Should be the same
+ * for all JIT allocations within the same bin.
+ * @flags: flags specifying the special requirements for
+ * the JIT allocation.
+ * @padding: Expansion space - should be initialised to zero
+ * @usage_id: A hint about which allocation should be reused.
+ * The kernel should attempt to use a previous
+ * allocation with the same usage_id
*/
struct base_jit_alloc_info {
u64 gpu_alloc_addr;
@@ -453,6 +496,11 @@ struct base_jit_alloc_info {
u64 commit_pages;
u64 extent;
u8 id;
+ u8 bin_id;
+ u8 max_allocations;
+ u8 flags;
+ u8 padding[2];
+ u16 usage_id;
};
/**
@@ -616,9 +664,10 @@ typedef u32 base_jd_core_req;
/**
* SW only requirement: Just In Time allocation
*
- * This job requests a JIT allocation based on the request in the
- * @base_jit_alloc_info structure which is passed via the jc element of
- * the atom.
+ * This job requests a single or multiple JIT allocations through a list
+ * of @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom. The number of @base_jit_alloc_info structures present in the
+ * list is passed via the nr_extres element of the atom
*
* It should be noted that the id entry in @base_jit_alloc_info must not
* be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
@@ -632,9 +681,9 @@ typedef u32 base_jd_core_req;
/**
* SW only requirement: Just In Time free
*
- * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
- * to be freed. The ID of the JIT allocation is passed via the jc element of
- * the atom.
+ * This job requests a single or multiple JIT allocations created by
+ * @BASE_JD_REQ_SOFT_JIT_ALLOC to be freed. The ID list of the JIT
+ * allocations is passed via the jc element of the atom.
*
* The job will complete immediately.
*/
@@ -750,41 +799,20 @@ typedef u32 base_jd_core_req;
(core_req & BASE_JD_REQ_ATOM_TYPE) == BASE_JD_REQ_DEP)
/**
- * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
- * handles retaining cores for power management and affinity management.
+ * enum kbase_atom_coreref_state - States to model state machine processed by
+ * kbasep_js_job_check_ref_cores(), which handles retaining cores for power
+ * management.
*
- * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
- * where lots of atoms could be submitted before powerup, and each has an
- * affinity chosen that causes other atoms to have an affinity
- * violation. Whilst the affinity was not causing violations at the time it
- * was chosen, it could cause violations thereafter. For example, 1000 jobs
- * could have had their affinity chosen during the powerup time, so any of
- * those 1000 jobs could cause an affinity violation later on.
- *
- * The attack would otherwise occur because other atoms/contexts have to wait for:
- * -# the currently running atoms (which are causing the violation) to
- * finish
- * -# and, the atoms that had their affinity chosen during powerup to
- * finish. These are run preferentially because they don't cause a
- * violation, but instead continue to cause the violation in others.
- * -# or, the attacker is scheduled out (which might not happen for just 2
- * contexts)
- *
- * By re-choosing the affinity (which is designed to avoid violations at the
- * time it's chosen), we break condition (2) of the wait, which minimizes the
- * problem to just waiting for current jobs to finish (which can be bounded if
- * the Job Scheduling Policy has a timer).
+ * @KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED: Starting state: Cores must be
+ * requested.
+ * @KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES: Cores requested, but
+ * waiting for them to be powered
+ * @KBASE_ATOM_COREREF_STATE_READY: Cores are powered, atom can be submitted to
+ * HW
*/
enum kbase_atom_coreref_state {
- /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
- /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
- /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
- KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
- /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
- KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
- /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
KBASE_ATOM_COREREF_STATE_READY
};
@@ -862,7 +890,7 @@ typedef struct base_jd_atom_v2 {
u64 jc; /**< job-chain GPU address */
struct base_jd_udata udata; /**< user data */
u64 extres_list; /**< list of external resources */
- u16 nr_extres; /**< nr of external resources */
+ u16 nr_extres; /**< nr of external resources or JIT allocations */
u16 compat_core_req; /**< core requirements which correspond to the legacy support for UK 10.2 */
struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
this is done in order to reduce possibility of improper assigment of a dependency field */
@@ -1412,6 +1440,11 @@ struct mali_base_gpu_core_props {
* client will not be expecting to allocate anywhere near this value.
*/
u64 gpu_available_memory_size;
+
+ /**
+ * The number of execution engines.
+ */
+ u8 num_exec_engines;
};
/**
@@ -1442,7 +1475,10 @@ struct mali_base_gpu_thread_props {
u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
- u8 padding[7];
+ u8 padding[3];
+ u32 tls_alloc; /* Number of threads per core that TLS must
+ * be allocated for
+ */
};
/**
@@ -1524,7 +1560,7 @@ struct gpu_raw_gpu_props {
u64 stack_present;
u32 l2_features;
- u32 suspend_size; /* API 8.2+ */
+ u32 core_features;
u32 mem_features;
u32 mmu_features;
@@ -1547,6 +1583,8 @@ struct gpu_raw_gpu_props {
* available modes as exposed in the coherency_features register.
*/
u32 coherency_mode;
+
+ u32 thread_tls_alloc;
};
/**
@@ -1582,39 +1620,44 @@ typedef struct base_gpu_props {
*/
/**
- * \enum base_context_create_flags
- *
* Flags to pass to ::base_context_init.
* Flags can be ORed together to enable multiple things.
*
* These share the same space as BASEP_CONTEXT_FLAG_*, and so must
* not collide with them.
*/
-enum base_context_create_flags {
- /** No flags set */
- BASE_CONTEXT_CREATE_FLAG_NONE = 0,
+typedef u32 base_context_create_flags;
- /** Base context is embedded in a cctx object (flag used for CINSTR software counter macros) */
- BASE_CONTEXT_CCTX_EMBEDDED = (1u << 0),
+/** No flags set */
+#define BASE_CONTEXT_CREATE_FLAG_NONE ((base_context_create_flags)0)
+
+/** Base context is embedded in a cctx object (flag used for CINSTR
+ * software counter macros)
+ */
+#define BASE_CONTEXT_CCTX_EMBEDDED ((base_context_create_flags)1 << 0)
+
+/** Base context is a 'System Monitor' context for Hardware counters.
+ *
+ * One important side effect of this is that job submission is disabled.
+ */
+#define BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED \
+ ((base_context_create_flags)1 << 1)
- /** Base context is a 'System Monitor' context for Hardware counters.
- *
- * One important side effect of this is that job submission is disabled. */
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
-};
/**
- * Bitpattern describing the ::base_context_create_flags that can be passed to base_context_init()
+ * Bitpattern describing the ::base_context_create_flags that can be
+ * passed to base_context_init()
*/
#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
- (((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
- ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
+ (BASE_CONTEXT_CCTX_EMBEDDED | \
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
/**
- * Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
+ * Bitpattern describing the ::base_context_create_flags that can be
+ * passed to the kernel
*/
#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
- ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED
/*
* Private flags used on the base context
@@ -1739,4 +1782,5 @@ typedef struct base_profiling_controls {
#define BASE_TLSTREAM_FLAGS_MASK (BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS | \
BASE_TLSTREAM_JOB_DUMPING_ENABLED)
+
#endif /* _BASE_KERNEL_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase.h b/drivers/gpu/arm/midgard/mali_kbase.h
index 7174ef2ba3d2ee..dc0d5f1173e00e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase.h
+++ b/drivers/gpu/arm/midgard/mali_kbase.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -59,10 +59,9 @@
#include "mali_kbase_context.h"
#include "mali_kbase_strings.h"
#include "mali_kbase_mem_lowlevel.h"
-#include "mali_kbase_trace_timeline.h"
#include "mali_kbase_js.h"
-#include "mali_kbase_mem.h"
#include "mali_kbase_utility.h"
+#include "mali_kbase_mem.h"
#include "mali_kbase_gpu_memory_debugfs.h"
#include "mali_kbase_mem_profile_debugfs.h"
#include "mali_kbase_debug_job_fault.h"
@@ -77,6 +76,7 @@
#include <trace/events/gpu.h>
#endif
+
#ifndef u64_to_user_ptr
/* Introduced in Linux v4.6 */
#define u64_to_user_ptr(x) ((void __user *)(uintptr_t)x)
@@ -111,10 +111,6 @@ void kbase_release_device(struct kbase_device *kbdev);
void kbase_set_profiling_control(struct kbase_device *kbdev, u32 control, u32 value);
-struct kbase_context *
-kbase_create_context(struct kbase_device *kbdev, bool is_compat);
-void kbase_destroy_context(struct kbase_context *kctx);
-
/**
* kbase_get_unmapped_area() - get an address range which is currently
@@ -243,6 +239,44 @@ void kbase_event_close(struct kbase_context *kctx);
void kbase_event_cleanup(struct kbase_context *kctx);
void kbase_event_wakeup(struct kbase_context *kctx);
+/**
+ * kbasep_jit_alloc_validate() - Validate the JIT allocation info.
+ *
+ * @kctx: Pointer to the kbase context within which the JIT
+ * allocation is to be validated.
+ * @info: Pointer to struct @base_jit_alloc_info
+ * which is to be validated.
+ * @return: 0 if jit allocation is valid; negative error code otherwise
+ */
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info);
+/**
+ * kbase_mem_copy_from_extres_page() - Copy pages from external resources.
+ *
+ * @kctx: kbase context within which the copying is to take place.
+ * @extres_pages: Pointer to the pages which correspond to the external
+ * resources from which the copying will take place.
+ * @pages: Pointer to the pages to which the content is to be
+ * copied from the provided external resources.
+ * @nr_pages: Number of pages to copy.
+ * @target_page_nr: Number of target pages which will be used for copying.
+ * @offset: Offset into the target pages from which the copying
+ * is to be performed.
+ * @to_copy: Size of the chunk to be copied, in bytes.
+ */
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+ void *extres_page, struct page **pages, unsigned int nr_pages,
+ unsigned int *target_page_nr, size_t offset, size_t *to_copy);
+/**
+ * kbase_mem_copy_from_extres() - Copy from external resources.
+ *
+ * @kctx: kbase context within which the copying is to take place.
+ * @buf_data: Pointer to the information about external resources:
+ * pages pertaining to the external resource, number of
+ * pages to copy.
+ */
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+ struct kbase_debug_copy_buffer *buf_data);
int kbase_process_soft_job(struct kbase_jd_atom *katom);
int kbase_prepare_soft_job(struct kbase_jd_atom *katom);
void kbase_finish_soft_job(struct kbase_jd_atom *katom);
@@ -258,15 +292,9 @@ int kbase_soft_event_update(struct kbase_context *kctx,
bool kbase_replay_process(struct kbase_jd_atom *katom);
-void kbasep_soft_job_timeout_worker(unsigned long data);
+void kbasep_soft_job_timeout_worker(struct timer_list *timer);
void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt);
-/* api used internally for register access. Contains validation and tracing */
-void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value);
-int kbase_device_trace_buffer_install(
- struct kbase_context *kctx, u32 *tb, size_t size);
-void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx);
-
void kbasep_as_do_poke(struct work_struct *work);
/** Returns the name associated with a Mali exception code
@@ -297,6 +325,29 @@ static inline bool kbase_pm_is_suspending(struct kbase_device *kbdev)
}
/**
+ * kbase_pm_is_active - Determine whether the GPU is active
+ *
+ * @kbdev: The kbase device structure for the device (must be a valid pointer)
+ *
+ * This takes into account the following
+ *
+ * - whether there is an active context reference
+ *
+ * - whether any of the shader cores or the tiler are needed
+ *
+ * It should generally be preferred against checking just
+ * kbdev->pm.active_count on its own, because some code paths drop their
+ * reference on this whilst still having the shader cores/tiler in use.
+ *
+ * Return: true if the GPU is active, false otherwise
+ */
+static inline bool kbase_pm_is_active(struct kbase_device *kbdev)
+{
+ return (kbdev->pm.active_count > 0 || kbdev->shader_needed_cnt ||
+ kbdev->tiler_needed_cnt);
+}
+
+/**
* Return the atom's ID, as was originally supplied by userspace in
* base_jd_atom_v2::atom_number
*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
index e0e40a9292e811..118511abe05346 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2015,2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2015,2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,11 +23,6 @@
#include <mali_kbase.h>
#include <mali_kbase_10969_workaround.h>
-/* This function is used to solve an HW issue with single iterator GPUs.
- * If a fragment job is soft-stopped on the edge of its bounding box, can happen that the
- * restart index is out of bounds and the rerun causes a tile range fault. If this happens
- * we try to clamp the restart index to a correct value and rerun the job.
- */
/* Mask of X and Y coordinates for the coordinates words in the descriptors*/
#define X_COORDINATE_MASK 0x00000FFF
#define Y_COORDINATE_MASK 0x0FFF0000
@@ -70,7 +65,7 @@ int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
kbase_gpu_vm_lock(katom->kctx);
region = kbase_region_tracker_find_region_enclosing_address(katom->kctx,
katom->jc);
- if (!region || (region->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(region))
goto out_unlock;
page_array = kbase_get_cpu_phy_pages(region);
@@ -79,7 +74,7 @@ int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
page_index = (katom->jc >> PAGE_SHIFT) - region->start_pfn;
- p = phys_to_page(as_phys_addr_t(page_array[page_index]));
+ p = as_page(page_array[page_index]);
/* we need the first 10 words of the fragment shader job descriptor.
* We need to check that the offset + 10 words is less that the page
@@ -103,7 +98,7 @@ int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
/* The data needed overflows page the dimension,
* need to map the subsequent page */
if (copy_size < JOB_HEADER_SIZE) {
- p = phys_to_page(as_phys_addr_t(page_array[page_index + 1]));
+ p = as_page(page_array[page_index + 1]);
page_2 = kmap_atomic(p);
kbase_sync_single_for_cpu(katom->kctx->kbdev,
@@ -186,7 +181,7 @@ int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
/* Flush CPU cache to update memory for future GPU reads*/
memcpy(page_1, dst, copy_size);
- p = phys_to_page(as_phys_addr_t(page_array[page_index]));
+ p = as_page(page_array[page_index]);
kbase_sync_single_for_device(katom->kctx->kbdev,
kbase_dma_addr(p) + offset,
@@ -195,8 +190,7 @@ int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom)
if (copy_size < JOB_HEADER_SIZE) {
memcpy(page_2, dst + copy_size,
JOB_HEADER_SIZE - copy_size);
- p = phys_to_page(as_phys_addr_t(page_array[page_index +
- 1]));
+ p = as_page(page_array[page_index + 1]);
kbase_sync_single_for_device(katom->kctx->kbdev,
kbase_dma_addr(p),
diff --git a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
index 624dc4a86b529b..379a05a1a12807 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_10969_workaround.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2014, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -23,6 +23,15 @@
#ifndef _KBASE_10969_WORKAROUND_
#define _KBASE_10969_WORKAROUND_
+/**
+ * kbasep_10969_workaround_clamp_coordinates - Apply the WA to clamp the restart indices
+ * @katom: atom representing the fragment job for which the WA has to be applied
+ *
+ * This workaround is used to solve an HW issue with single iterator GPUs.
+ * If a fragment job is soft-stopped on the edge of its bounding box, it can happen
+ * that the restart index is out of bounds and the rerun causes a tile range
+ * fault. If this happens we try to clamp the restart index to a correct value.
+ */
int kbasep_10969_workaround_clamp_coordinates(struct kbase_jd_atom *katom);
#endif /* _KBASE_10969_WORKAROUND_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
index 1dee5cb254c7b2..2e99a4d8ab1ce3 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_as_fault_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -41,13 +41,14 @@ static int kbase_as_fault_read(struct seq_file *sfile, void *data)
list_for_each(entry, kbdev_list) {
kbdev = list_entry(entry, struct kbase_device, entry);
- if(kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
+ if (kbdev->debugfs_as_read_bitmap & (1ULL << as_no)) {
/* don't show this one again until another fault occors */
kbdev->debugfs_as_read_bitmap &= ~(1ULL << as_no);
/* output the last page fault addr */
- seq_printf(sfile, "%llu\n", (u64) kbdev->as[as_no].fault_addr);
+ seq_printf(sfile, "%llu\n",
+ (u64) kbdev->as[as_no].fault_addr);
}
}
@@ -59,7 +60,7 @@ static int kbase_as_fault_read(struct seq_file *sfile, void *data)
static int kbase_as_fault_debugfs_open(struct inode *in, struct file *file)
{
- return single_open(file, kbase_as_fault_read , in->i_private);
+ return single_open(file, kbase_as_fault_read, in->i_private);
}
static const struct file_operations as_fault_fops = {
@@ -89,17 +90,20 @@ void kbase_as_fault_debugfs_init(struct kbase_device *kbdev)
KBASE_DEBUG_ASSERT(sizeof(kbdev->as[0].fault_addr) == sizeof(u64));
debugfs_directory = debugfs_create_dir("address_spaces",
- kbdev->mali_debugfs_directory);
+ kbdev->mali_debugfs_directory);
- if(debugfs_directory) {
- for(i = 0; i < kbdev->nr_hw_address_spaces; i++) {
+ if (debugfs_directory) {
+ for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
snprintf(as_name, ARRAY_SIZE(as_name), "as%u", i);
debugfs_create_file(as_name, S_IRUGO,
- debugfs_directory, (void*) ((uintptr_t) i), &as_fault_fops);
+ debugfs_directory,
+ (void *)(uintptr_t)i,
+ &as_fault_fops);
}
+ } else {
+ dev_warn(kbdev->dev,
+ "unable to create address_spaces debugfs directory");
}
- else
- dev_warn(kbdev->dev, "unable to create address_spaces debugfs directory");
#endif /* CONFIG_MALI_DEBUG */
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
index 18444b8a9c6355..27a03cf021380e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_cache_policy.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -30,7 +30,12 @@
/*
* The output flags should be a combination of the following values:
- * KBASE_REG_CPU_CACHED: CPU cache should be enabled.
+ * KBASE_REG_CPU_CACHED: CPU cache should be enabled
+ * KBASE_REG_GPU_CACHED: GPU cache should be enabled
+ *
+ * NOTE: Some components within the GPU might only be able to access memory
+ * that is KBASE_REG_GPU_CACHED. Refer to the specific GPU implementation for
+ * more details.
*/
u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
{
@@ -38,6 +43,9 @@ u32 kbase_cache_enabled(u32 flags, u32 nr_pages)
CSTD_UNUSED(nr_pages);
+ if (!(flags & BASE_MEM_UNCACHED_GPU))
+ cache_flags |= KBASE_REG_GPU_CACHED;
+
if (flags & BASE_MEM_CACHED_CPU)
cache_flags |= KBASE_REG_CPU_CACHED;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
index 684bf5dd8ffc6d..376a94bb8b8ab9 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_config_defaults.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -151,17 +151,11 @@ enum {
#define DEFAULT_3BIT_AWID_LIMIT KBASE_3BIT_AID_32
/**
- * Default UMP device mapping. A UMP_DEVICE_<device>_SHIFT value which
- * defines which UMP device this GPU should be mapped to.
- */
-#define DEFAULT_UMP_GPU_DEVICE_SHIFT UMP_DEVICE_Z_SHIFT
-
-/*
* Default period for DVFS sampling
*/
#define DEFAULT_PM_DVFS_PERIOD 100 /* 100ms */
-/*
+/**
* Power Management poweroff tick granuality. This is in nanoseconds to
* allow HR timer support.
*
@@ -171,22 +165,22 @@ enum {
*/
#define DEFAULT_PM_GPU_POWEROFF_TICK_NS (400000) /* 400us */
-/*
+/**
* Power Manager number of ticks before shader cores are powered off
*/
#define DEFAULT_PM_POWEROFF_TICK_SHADER (2) /* 400-800us */
-/*
+/**
* Power Manager number of ticks before GPU is powered off
*/
#define DEFAULT_PM_POWEROFF_TICK_GPU (2) /* 400-800us */
-/*
+/**
* Default scheduling tick granuality
*/
#define DEFAULT_JS_SCHEDULING_PERIOD_NS (100000000u) /* 100ms */
-/*
+/**
* Default minimum number of scheduling ticks before jobs are soft-stopped.
*
* This defines the time-slice for a job (which may be different from that of a
@@ -194,60 +188,60 @@ enum {
*/
#define DEFAULT_JS_SOFT_STOP_TICKS (1) /* 100ms-200ms */
-/*
+/**
* Default minimum number of scheduling ticks before CL jobs are soft-stopped.
*/
#define DEFAULT_JS_SOFT_STOP_TICKS_CL (1) /* 100ms-200ms */
-/*
+/**
* Default minimum number of scheduling ticks before jobs are hard-stopped
*/
#define DEFAULT_JS_HARD_STOP_TICKS_SS (50) /* 5s */
#define DEFAULT_JS_HARD_STOP_TICKS_SS_8408 (300) /* 30s */
-/*
+/**
* Default minimum number of scheduling ticks before CL jobs are hard-stopped.
*/
#define DEFAULT_JS_HARD_STOP_TICKS_CL (50) /* 5s */
-/*
+/**
* Default minimum number of scheduling ticks before jobs are hard-stopped
* during dumping
*/
#define DEFAULT_JS_HARD_STOP_TICKS_DUMPING (15000) /* 1500s */
-/*
+/**
* Default timeout for some software jobs, after which the software event wait
* jobs will be cancelled.
*/
#define DEFAULT_JS_SOFT_JOB_TIMEOUT (3000) /* 3s */
-/*
+/**
* Default minimum number of scheduling ticks before the GPU is reset to clear a
* "stuck" job
*/
#define DEFAULT_JS_RESET_TICKS_SS (55) /* 5.5s */
#define DEFAULT_JS_RESET_TICKS_SS_8408 (450) /* 45s */
-/*
+/**
* Default minimum number of scheduling ticks before the GPU is reset to clear a
* "stuck" CL job.
*/
#define DEFAULT_JS_RESET_TICKS_CL (55) /* 5.5s */
-/*
+/**
* Default minimum number of scheduling ticks before the GPU is reset to clear a
* "stuck" job during dumping.
*/
#define DEFAULT_JS_RESET_TICKS_DUMPING (15020) /* 1502s */
-/*
+/**
* Default number of milliseconds given for other jobs on the GPU to be
* soft-stopped when the GPU needs to be reset.
*/
#define DEFAULT_RESET_TIMEOUT_MS (3000) /* 3s */
-/*
+/**
* Default timeslice that a context is scheduled in for, in nanoseconds.
*
* When a context has used up this amount of time across its jobs, it is
@@ -258,7 +252,7 @@ enum {
*/
#define DEFAULT_JS_CTX_TIMESLICE_NS (50000000) /* 50ms */
-/*
+/**
* Perform GPU power down using only platform specific code, skipping DDK power
* management.
*
@@ -272,7 +266,7 @@ enum {
*/
#define PLATFORM_POWER_DOWN_ONLY (1)
-/*
+/**
* Maximum frequency (in kHz) that the GPU can be clocked. For some platforms
* this isn't available, so we simply define a dummy value here. If devfreq
* is enabled the value will be read from there, otherwise this should be
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.c b/drivers/gpu/arm/midgard/mali_kbase_context.c
index 868442a9c9bcbd..970be895e671c3 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_context.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -32,15 +32,6 @@
#include <mali_kbase_dma_fence.h>
#include <mali_kbase_ctx_sched.h>
-/**
- * kbase_create_context() - Create a kernel base context.
- * @kbdev: Kbase device
- * @is_compat: Force creation of a 32-bit context
- *
- * Allocate and init a kernel base context.
- *
- * Return: new kbase context
- */
struct kbase_context *
kbase_create_context(struct kbase_device *kbdev, bool is_compat)
{
@@ -69,9 +60,6 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kbase_ctx_flag_set(kctx, KCTX_FORCE_SAME_VA);
#endif /* !defined(CONFIG_64BIT) */
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- kctx->timeline.owner_tgid = task_tgid_nr(current);
-#endif
atomic_set(&kctx->setup_complete, 0);
atomic_set(&kctx->setup_in_progress, 0);
spin_lock_init(&kctx->mm_update_lock);
@@ -115,11 +103,12 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (err)
goto free_jd;
+
atomic_set(&kctx->drain_pending, 0);
mutex_init(&kctx->reg_lock);
- mutex_init(&kctx->mem_partials_lock);
+ spin_lock_init(&kctx->mem_partials_lock);
INIT_LIST_HEAD(&kctx->mem_partials);
INIT_LIST_HEAD(&kctx->waiting_soft_jobs);
@@ -128,21 +117,10 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
if (err)
goto free_event;
- err = kbase_mmu_init(kctx);
+ err = kbase_mmu_init(kbdev, &kctx->mmu, kctx);
if (err)
goto term_dma_fence;
- do {
- err = kbase_mem_pool_grow(&kctx->mem_pool,
- MIDGARD_MMU_BOTTOMLEVEL);
- if (err)
- goto pgd_no_mem;
-
- mutex_lock(&kctx->mmu_lock);
- kctx->pgd = kbase_mmu_alloc_pgd(kctx);
- mutex_unlock(&kctx->mmu_lock);
- } while (!kctx->pgd);
-
p = kbase_mem_alloc_page(&kctx->mem_pool);
if (!p)
goto no_sink_page;
@@ -152,6 +130,7 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
kctx->cookies = KBASE_COOKIE_MASK;
+
/* Make sure page 0 is not used... */
err = kbase_region_tracker_init(kctx);
if (err)
@@ -167,17 +146,13 @@ kbase_create_context(struct kbase_device *kbdev, bool is_compat)
#ifdef CONFIG_GPU_TRACEPOINTS
atomic_set(&kctx->jctx.work_id, 0);
#endif
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- atomic_set(&kctx->timeline.jd_atoms_in_flight, 0);
-#endif
kctx->id = atomic_add_return(1, &(kbdev->ctx_num)) - 1;
mutex_init(&kctx->vinstr_cli_lock);
- setup_timer(&kctx->soft_job_timeout,
- kbasep_soft_job_timeout_worker,
- (uintptr_t)kctx);
+ kbase_timer_setup(&kctx->soft_job_timeout,
+ kbasep_soft_job_timeout_worker);
return kctx;
@@ -190,12 +165,7 @@ no_sticky:
no_region_tracker:
kbase_mem_pool_free(&kctx->mem_pool, p, false);
no_sink_page:
- /* VM lock needed for the call to kbase_mmu_free_pgd */
- kbase_gpu_vm_lock(kctx);
- kbase_mmu_free_pgd(kctx);
- kbase_gpu_vm_unlock(kctx);
-pgd_no_mem:
- kbase_mmu_term(kctx);
+ kbase_mmu_term(kbdev, &kctx->mmu);
term_dma_fence:
kbase_dma_fence_term(kctx);
free_event:
@@ -217,21 +187,15 @@ out:
}
KBASE_EXPORT_SYMBOL(kbase_create_context);
-static void kbase_reg_pending_dtor(struct kbase_va_region *reg)
+static void kbase_reg_pending_dtor(struct kbase_device *kbdev,
+ struct kbase_va_region *reg)
{
- dev_dbg(reg->kctx->kbdev->dev, "Freeing pending unmapped region\n");
+ dev_dbg(kbdev->dev, "Freeing pending unmapped region\n");
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
kfree(reg);
}
-/**
- * kbase_destroy_context - Destroy a kernel base context.
- * @kctx: Context to destroy
- *
- * Calls kbase_destroy_os_context() to free OS specific structures.
- * Will release all outstanding regions.
- */
void kbase_destroy_context(struct kbase_context *kctx)
{
struct kbase_device *kbdev;
@@ -252,6 +216,8 @@ void kbase_destroy_context(struct kbase_context *kctx)
* thread. */
kbase_pm_context_active(kbdev);
+ kbase_mem_pool_mark_dying(&kctx->mem_pool);
+
kbase_jd_zap_context(kctx);
#ifdef CONFIG_DEBUG_FS
@@ -263,6 +229,7 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_event_cleanup(kctx);
+
/*
* JIT must be terminated before the code below as it must be called
* without the region lock being held.
@@ -275,11 +242,8 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_sticky_resource_term(kctx);
- /* MMU is disabled as part of scheduling out the context */
- kbase_mmu_free_pgd(kctx);
-
/* drop the aliasing sink page now that it can't be mapped anymore */
- p = phys_to_page(as_phys_addr_t(kctx->aliasing_sink_page));
+ p = as_page(kctx->aliasing_sink_page);
kbase_mem_pool_free(&kctx->mem_pool, p, false);
/* free pending region setups */
@@ -289,7 +253,7 @@ void kbase_destroy_context(struct kbase_context *kctx)
BUG_ON(!kctx->pending_regions[cookie]);
- kbase_reg_pending_dtor(kctx->pending_regions[cookie]);
+ kbase_reg_pending_dtor(kbdev, kctx->pending_regions[cookie]);
kctx->pending_regions[cookie] = NULL;
pending_regions_to_clean &= ~(1UL << cookie);
@@ -298,6 +262,7 @@ void kbase_destroy_context(struct kbase_context *kctx)
kbase_region_tracker_term(kctx);
kbase_gpu_vm_unlock(kctx);
+
/* Safe to call this one even when didn't initialize (assuming kctx was sufficiently zeroed) */
kbasep_js_kctx_term(kctx);
@@ -311,7 +276,7 @@ void kbase_destroy_context(struct kbase_context *kctx)
spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->mmu_hw_mutex);
- kbase_mmu_term(kctx);
+ kbase_mmu_term(kbdev, &kctx->mmu);
pages = atomic_read(&kctx->used_pages);
if (pages != 0)
@@ -328,13 +293,6 @@ void kbase_destroy_context(struct kbase_context *kctx)
}
KBASE_EXPORT_SYMBOL(kbase_destroy_context);
-/**
- * kbase_context_set_create_flags - Set creation flags on a context
- * @kctx: Kbase context
- * @flags: Flags to set
- *
- * Return: 0 on success
- */
int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags)
{
int err = 0;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_context.h b/drivers/gpu/arm/midgard/mali_kbase_context.h
index 431f9e5aa6de3a..30b0f649806bbe 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_context.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_context.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2016, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,7 +25,35 @@
#include <linux/atomic.h>
+/**
+ * kbase_create_context() - Create a kernel base context.
+ * @kbdev: Kbase device
+ * @is_compat: Force creation of a 32-bit context
+ *
+ * Allocate and init a kernel base context.
+ *
+ * Return: new kbase context
+ */
+struct kbase_context *
+kbase_create_context(struct kbase_device *kbdev, bool is_compat);
+/**
+ * kbase_destroy_context - Destroy a kernel base context.
+ * @kctx: Context to destroy
+ *
+ * Calls kbase_destroy_os_context() to free OS specific structures.
+ * Will release all outstanding regions.
+ */
+void kbase_destroy_context(struct kbase_context *kctx);
+
+/**
+ * kbase_context_set_create_flags - Set creation flags on a context
+ * @kctx: Kbase context
+ * @flags: Flags to set, which shall be one of the flags of
+ * BASE_CONTEXT_CREATE_KERNEL_FLAGS.
+ *
+ * Return: 0 on success, -EINVAL otherwise when an invalid flag is specified.
+ */
int kbase_context_set_create_flags(struct kbase_context *kctx, u32 flags);
/**
diff --git a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
index 22c995a22196ed..d44ebd90d8ac06 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_core_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -34,6 +34,7 @@
#endif /* CONFIG_MALI_DEVFREQ */
#ifdef CONFIG_MALI_NO_MALI
#include "mali_kbase_model_linux.h"
+#include <backend/gpu/mali_kbase_model_dummy.h>
#endif /* CONFIG_MALI_NO_MALI */
#include "mali_kbase_mem_profile_debugfs_buf_size.h"
#include "mali_kbase_debug_mem_view.h"
@@ -68,7 +69,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
-#include <linux/compat.h> /* is_compat_task */
+#include <linux/compat.h> /* is_compat_task/in_compat_syscall */
#include <linux/mman.h>
#include <linux/version.h>
#include <mali_kbase_hw.h>
@@ -82,7 +83,7 @@
#include <mali_kbase_config.h>
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+#if (KERNEL_VERSION(3, 13, 0) <= LINUX_VERSION_CODE)
#include <linux/pm_opp.h>
#else
#include <linux/opp.h>
@@ -105,19 +106,20 @@ static LIST_HEAD(kbase_dev_list);
#define KERNEL_SIDE_DDK_VERSION_STRING "K:" MALI_RELEASE_NAME "(GPL)"
static int kbase_api_handshake(struct kbase_context *kctx,
- struct kbase_ioctl_version_check *version)
+ struct kbase_ioctl_version_check *version)
{
switch (version->major) {
case BASE_UK_VERSION_MAJOR:
/* set minor to be the lowest common */
version->minor = min_t(int, BASE_UK_VERSION_MINOR,
- (int)version->minor);
+ (int)version->minor);
break;
default:
/* We return our actual version regardless if it
* matches the version returned by userspace -
* userspace can bail if it can't handle this
- * version */
+ * version
+ */
version->major = BASE_UK_VERSION_MAJOR;
version->minor = BASE_UK_VERSION_MINOR;
break;
@@ -162,7 +164,6 @@ enum {
inited_backend_late = (1u << 6),
inited_device = (1u << 7),
inited_vinstr = (1u << 8),
-
inited_job_fault = (1u << 10),
inited_sysfs_group = (1u << 11),
inited_misc_register = (1u << 12),
@@ -280,9 +281,9 @@ EXPORT_SYMBOL(kbase_release_device);
*/
static int kstrtobool_from_user(const char __user *s, size_t count, bool *res)
{
- char buf[32];
+ char buf[4];
- count = min(sizeof(buf), count);
+ count = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, s, count))
return -EFAULT;
@@ -393,7 +394,11 @@ static int kbase_open(struct inode *inode, struct file *filp)
if (!kbdev)
return -ENODEV;
+#if (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE)
+ kctx = kbase_create_context(kbdev, in_compat_syscall());
+#else
kctx = kbase_create_context(kbdev, is_compat_task());
+#endif /* (KERNEL_VERSION(4, 6, 0) <= LINUX_VERSION_CODE) */
if (!kctx) {
ret = -ENOMEM;
goto out;
@@ -493,11 +498,11 @@ static int kbase_release(struct inode *inode, struct file *filp)
/* If this client was performing hwcnt dumping and did not explicitly
* detach itself, remove it from the vinstr core now */
if (kctx->vinstr_cli) {
- struct kbase_uk_hwcnt_setup setup;
+ struct kbase_ioctl_hwcnt_enable enable;
- setup.dump_buffer = 0llu;
+ enable.dump_buffer = 0llu;
kbase_vinstr_legacy_hwc_setup(
- kbdev->vinstr_ctx, &kctx->vinstr_cli, &setup);
+ kbdev->vinstr_ctx, &kctx->vinstr_cli, &enable);
}
mutex_unlock(&kctx->vinstr_cli_lock);
@@ -573,12 +578,27 @@ static int kbase_api_mem_alloc(struct kbase_context *kctx,
u64 flags = alloc->in.flags;
u64 gpu_va;
+ rcu_read_lock();
+ /* Don't allow memory allocation until user space has set up the
+ * tracking page (which sets kctx->process_mm). Also catches when we've
+ * forked.
+ */
+ if (rcu_dereference(kctx->process_mm) != current->mm) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ return -ENOMEM;
+
if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA)) {
/* force SAME_VA if a 64-bit client */
flags |= BASE_MEM_SAME_VA;
}
+
reg = kbase_mem_alloc(kctx, alloc->in.va_pages,
alloc->in.commit_pages,
alloc->in.extent,
@@ -610,38 +630,22 @@ static int kbase_api_hwcnt_reader_setup(struct kbase_context *kctx,
struct kbase_ioctl_hwcnt_reader_setup *setup)
{
int ret;
- struct kbase_uk_hwcnt_reader_setup args = {
- .buffer_count = setup->buffer_count,
- .jm_bm = setup->jm_bm,
- .shader_bm = setup->shader_bm,
- .tiler_bm = setup->tiler_bm,
- .mmu_l2_bm = setup->mmu_l2_bm
- };
mutex_lock(&kctx->vinstr_cli_lock);
- ret = kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, &args);
+ ret = kbase_vinstr_hwcnt_reader_setup(kctx->kbdev->vinstr_ctx, setup);
mutex_unlock(&kctx->vinstr_cli_lock);
- if (ret)
- return ret;
- return args.fd;
+ return ret;
}
static int kbase_api_hwcnt_enable(struct kbase_context *kctx,
struct kbase_ioctl_hwcnt_enable *enable)
{
int ret;
- struct kbase_uk_hwcnt_setup args = {
- .dump_buffer = enable->dump_buffer,
- .jm_bm = enable->jm_bm,
- .shader_bm = enable->shader_bm,
- .tiler_bm = enable->tiler_bm,
- .mmu_l2_bm = enable->mmu_l2_bm
- };
mutex_lock(&kctx->vinstr_cli_lock);
ret = kbase_vinstr_legacy_hwc_setup(kctx->kbdev->vinstr_ctx,
- &kctx->vinstr_cli, &args);
+ &kctx->vinstr_cli, enable);
mutex_unlock(&kctx->vinstr_cli_lock);
return ret;
@@ -670,6 +674,18 @@ static int kbase_api_hwcnt_clear(struct kbase_context *kctx)
return ret;
}
+#ifdef CONFIG_MALI_NO_MALI
+static int kbase_api_hwcnt_set(struct kbase_context *kctx,
+ struct kbase_ioctl_hwcnt_values *values)
+{
+ gpu_model_set_dummy_prfcnt_sample(
+ (u32 __user *)(uintptr_t)values->data,
+ values->size);
+
+ return 0;
+}
+#endif
+
static int kbase_api_disjoint_query(struct kbase_context *kctx,
struct kbase_ioctl_disjoint_query *query)
{
@@ -700,10 +716,37 @@ static int kbase_api_get_ddk_version(struct kbase_context *kctx,
return len;
}
+/* Defaults for legacy JIT init ioctl */
+#define DEFAULT_MAX_JIT_ALLOCATIONS 255
+#define JIT_LEGACY_TRIM_LEVEL (0) /* No trimming */
+
+static int kbase_api_mem_jit_init_old(struct kbase_context *kctx,
+ struct kbase_ioctl_mem_jit_init_old *jit_init)
+{
+ kctx->jit_version = 1;
+
+ return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+ DEFAULT_MAX_JIT_ALLOCATIONS,
+ JIT_LEGACY_TRIM_LEVEL);
+}
+
static int kbase_api_mem_jit_init(struct kbase_context *kctx,
struct kbase_ioctl_mem_jit_init *jit_init)
{
- return kbase_region_tracker_init_jit(kctx, jit_init->va_pages);
+ int i;
+
+ kctx->jit_version = 2;
+
+ for (i = 0; i < sizeof(jit_init->padding); i++) {
+ /* Ensure all padding bytes are 0 for potential future
+ * extension
+ */
+ if (jit_init->padding[i])
+ return -EINVAL;
+ }
+
+ return kbase_region_tracker_init_jit(kctx, jit_init->va_pages,
+ jit_init->max_allocations, jit_init->trim_level);
}
static int kbase_api_mem_sync(struct kbase_context *kctx,
@@ -793,6 +836,10 @@ static int kbase_api_mem_alias(struct kbase_context *kctx,
}
flags = alias->in.flags;
+ if (flags & BASE_MEM_FLAGS_KERNEL_ONLY) {
+ vfree(ai);
+ return -EINVAL;
+ }
alias->out.gpu_va = kbase_mem_alias(kctx, &flags,
alias->in.stride, alias->in.nents,
@@ -814,6 +861,9 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
int ret;
u64 flags = import->in.flags;
+ if (flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ return -ENOMEM;
+
ret = kbase_mem_import(kctx,
import->in.type,
u64_to_user_ptr(import->in.phandle),
@@ -830,6 +880,9 @@ static int kbase_api_mem_import(struct kbase_context *kctx,
static int kbase_api_mem_flags_change(struct kbase_context *kctx,
struct kbase_ioctl_mem_flags_change *change)
{
+ if (change->flags & BASE_MEM_FLAGS_KERNEL_ONLY)
+ return -ENOMEM;
+
return kbase_mem_flags_change(kctx, change->gpu_va,
change->flags, change->mask);
}
@@ -1010,15 +1063,14 @@ static int kbase_api_tlstream_stats(struct kbase_context *kctx,
}
#endif /* MALI_UNIT_TEST */
+
#define KBASE_HANDLE_IOCTL(cmd, function) \
- case cmd: \
do { \
BUILD_BUG_ON(_IOC_DIR(cmd) != _IOC_NONE); \
return function(kctx); \
} while (0)
#define KBASE_HANDLE_IOCTL_IN(cmd, function, type) \
- case cmd: \
do { \
type param; \
int err; \
@@ -1031,7 +1083,6 @@ static int kbase_api_tlstream_stats(struct kbase_context *kctx,
} while (0)
#define KBASE_HANDLE_IOCTL_OUT(cmd, function, type) \
- case cmd: \
do { \
type param; \
int ret, err; \
@@ -1045,7 +1096,6 @@ static int kbase_api_tlstream_stats(struct kbase_context *kctx,
} while (0)
#define KBASE_HANDLE_IOCTL_INOUT(cmd, function, type) \
- case cmd: \
do { \
type param; \
int ret, err; \
@@ -1069,12 +1119,17 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* Only these ioctls are available until setup is complete */
switch (cmd) {
+ case KBASE_IOCTL_VERSION_CHECK:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_VERSION_CHECK,
kbase_api_handshake,
struct kbase_ioctl_version_check);
+ break;
+
+ case KBASE_IOCTL_SET_FLAGS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SET_FLAGS,
kbase_api_set_flags,
struct kbase_ioctl_set_flags);
+ break;
}
/* Block call until version handshake and setup is complete */
@@ -1083,109 +1138,192 @@ static long kbase_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* Normal ioctls */
switch (cmd) {
+ case KBASE_IOCTL_JOB_SUBMIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_JOB_SUBMIT,
kbase_api_job_submit,
struct kbase_ioctl_job_submit);
+ break;
+ case KBASE_IOCTL_GET_GPUPROPS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_GPUPROPS,
kbase_api_get_gpuprops,
struct kbase_ioctl_get_gpuprops);
+ break;
+ case KBASE_IOCTL_POST_TERM:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_POST_TERM,
kbase_api_post_term);
+ break;
+ case KBASE_IOCTL_MEM_ALLOC:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALLOC,
kbase_api_mem_alloc,
union kbase_ioctl_mem_alloc);
+ break;
+ case KBASE_IOCTL_MEM_QUERY:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_QUERY,
kbase_api_mem_query,
union kbase_ioctl_mem_query);
+ break;
+ case KBASE_IOCTL_MEM_FREE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FREE,
kbase_api_mem_free,
struct kbase_ioctl_mem_free);
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
- kbase_api_hwcnt_reader_setup,
- struct kbase_ioctl_hwcnt_reader_setup);
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
- kbase_api_hwcnt_enable,
- struct kbase_ioctl_hwcnt_enable);
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
- kbase_api_hwcnt_dump);
- KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
- kbase_api_hwcnt_clear);
+ break;
+ case KBASE_IOCTL_DISJOINT_QUERY:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_DISJOINT_QUERY,
kbase_api_disjoint_query,
struct kbase_ioctl_disjoint_query);
+ break;
+ case KBASE_IOCTL_GET_DDK_VERSION:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_DDK_VERSION,
kbase_api_get_ddk_version,
struct kbase_ioctl_get_ddk_version);
+ break;
+ case KBASE_IOCTL_MEM_JIT_INIT_OLD:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT_OLD,
+ kbase_api_mem_jit_init_old,
+ struct kbase_ioctl_mem_jit_init_old);
+ break;
+ case KBASE_IOCTL_MEM_JIT_INIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_JIT_INIT,
kbase_api_mem_jit_init,
struct kbase_ioctl_mem_jit_init);
+ break;
+ case KBASE_IOCTL_MEM_SYNC:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_SYNC,
kbase_api_mem_sync,
struct kbase_ioctl_mem_sync);
+ break;
+ case KBASE_IOCTL_MEM_FIND_CPU_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_CPU_OFFSET,
kbase_api_mem_find_cpu_offset,
union kbase_ioctl_mem_find_cpu_offset);
+ break;
+ case KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET,
kbase_api_mem_find_gpu_start_and_offset,
union kbase_ioctl_mem_find_gpu_start_and_offset);
+ break;
+ case KBASE_IOCTL_GET_CONTEXT_ID:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_GET_CONTEXT_ID,
kbase_api_get_context_id,
struct kbase_ioctl_get_context_id);
+ break;
+ case KBASE_IOCTL_TLSTREAM_ACQUIRE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_ACQUIRE,
kbase_api_tlstream_acquire,
struct kbase_ioctl_tlstream_acquire);
+ break;
+ case KBASE_IOCTL_TLSTREAM_FLUSH:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_TLSTREAM_FLUSH,
kbase_api_tlstream_flush);
+ break;
+ case KBASE_IOCTL_MEM_COMMIT:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_COMMIT,
kbase_api_mem_commit,
struct kbase_ioctl_mem_commit);
+ break;
+ case KBASE_IOCTL_MEM_ALIAS:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_ALIAS,
kbase_api_mem_alias,
union kbase_ioctl_mem_alias);
+ break;
+ case KBASE_IOCTL_MEM_IMPORT:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_MEM_IMPORT,
kbase_api_mem_import,
union kbase_ioctl_mem_import);
+ break;
+ case KBASE_IOCTL_MEM_FLAGS_CHANGE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_FLAGS_CHANGE,
kbase_api_mem_flags_change,
struct kbase_ioctl_mem_flags_change);
+ break;
+ case KBASE_IOCTL_STREAM_CREATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STREAM_CREATE,
kbase_api_stream_create,
struct kbase_ioctl_stream_create);
+ break;
+ case KBASE_IOCTL_FENCE_VALIDATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_FENCE_VALIDATE,
kbase_api_fence_validate,
struct kbase_ioctl_fence_validate);
+ break;
+ case KBASE_IOCTL_GET_PROFILING_CONTROLS:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_GET_PROFILING_CONTROLS,
kbase_api_get_profiling_controls,
struct kbase_ioctl_get_profiling_controls);
+ break;
+ case KBASE_IOCTL_MEM_PROFILE_ADD:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_MEM_PROFILE_ADD,
kbase_api_mem_profile_add,
struct kbase_ioctl_mem_profile_add);
+ break;
+ case KBASE_IOCTL_SOFT_EVENT_UPDATE:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_SOFT_EVENT_UPDATE,
kbase_api_soft_event_update,
struct kbase_ioctl_soft_event_update);
+ break;
+ case KBASE_IOCTL_STICKY_RESOURCE_MAP:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
+ kbase_api_sticky_resource_map,
+ struct kbase_ioctl_sticky_resource_map);
+ break;
+ case KBASE_IOCTL_STICKY_RESOURCE_UNMAP:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
+ kbase_api_sticky_resource_unmap,
+ struct kbase_ioctl_sticky_resource_unmap);
+ break;
+
+ /* Instrumentation. */
+ case KBASE_IOCTL_HWCNT_READER_SETUP:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_READER_SETUP,
+ kbase_api_hwcnt_reader_setup,
+ struct kbase_ioctl_hwcnt_reader_setup);
+ break;
+ case KBASE_IOCTL_HWCNT_ENABLE:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_ENABLE,
+ kbase_api_hwcnt_enable,
+ struct kbase_ioctl_hwcnt_enable);
+ break;
+ case KBASE_IOCTL_HWCNT_DUMP:
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_DUMP,
+ kbase_api_hwcnt_dump);
+ break;
+ case KBASE_IOCTL_HWCNT_CLEAR:
+ KBASE_HANDLE_IOCTL(KBASE_IOCTL_HWCNT_CLEAR,
+ kbase_api_hwcnt_clear);
+ break;
+#ifdef CONFIG_MALI_NO_MALI
+ case KBASE_IOCTL_HWCNT_SET:
+ KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_HWCNT_SET,
+ kbase_api_hwcnt_set,
+ struct kbase_ioctl_hwcnt_values);
+ break;
+#endif
#ifdef CONFIG_MALI_JOB_DUMP
+ case KBASE_IOCTL_CINSTR_GWT_START:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_START,
kbase_gpu_gwt_start);
+ break;
+ case KBASE_IOCTL_CINSTR_GWT_STOP:
KBASE_HANDLE_IOCTL(KBASE_IOCTL_CINSTR_GWT_STOP,
kbase_gpu_gwt_stop);
+ break;
+ case KBASE_IOCTL_CINSTR_GWT_DUMP:
KBASE_HANDLE_IOCTL_INOUT(KBASE_IOCTL_CINSTR_GWT_DUMP,
kbase_gpu_gwt_dump,
union kbase_ioctl_cinstr_gwt_dump);
+ break;
#endif
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_MAP,
- kbase_api_sticky_resource_map,
- struct kbase_ioctl_sticky_resource_map);
- KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_STICKY_RESOURCE_UNMAP,
- kbase_api_sticky_resource_unmap,
- struct kbase_ioctl_sticky_resource_unmap);
-
#if MALI_UNIT_TEST
+ case KBASE_IOCTL_TLSTREAM_TEST:
KBASE_HANDLE_IOCTL_IN(KBASE_IOCTL_TLSTREAM_TEST,
kbase_api_tlstream_test,
struct kbase_ioctl_tlstream_test);
+ break;
+ case KBASE_IOCTL_TLSTREAM_STATS:
KBASE_HANDLE_IOCTL_OUT(KBASE_IOCTL_TLSTREAM_STATS,
kbase_api_tlstream_stats,
struct kbase_ioctl_tlstream_stats);
+ break;
#endif
}
@@ -1382,111 +1520,6 @@ static ssize_t set_policy(struct device *dev, struct device_attribute *attr, con
*/
static DEVICE_ATTR(power_policy, S_IRUGO | S_IWUSR, show_policy, set_policy);
-/**
- * show_ca_policy - Show callback for the core_availability_policy sysfs file.
- *
- * This function is called to get the contents of the core_availability_policy
- * sysfs file. This is a list of the available policies with the currently
- * active one surrounded by square brackets.
- *
- * @dev: The device this sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The output buffer for the sysfs file contents
- *
- * Return: The number of bytes output to @buf.
- */
-static ssize_t show_ca_policy(struct device *dev, struct device_attribute *attr, char * const buf)
-{
- struct kbase_device *kbdev;
- const struct kbase_pm_ca_policy *current_policy;
- const struct kbase_pm_ca_policy *const *policy_list;
- int policy_count;
- int i;
- ssize_t ret = 0;
-
- kbdev = to_kbase_device(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- current_policy = kbase_pm_ca_get_policy(kbdev);
-
- policy_count = kbase_pm_ca_list_policies(&policy_list);
-
- for (i = 0; i < policy_count && ret < PAGE_SIZE; i++) {
- if (policy_list[i] == current_policy)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "[%s] ", policy_list[i]->name);
- else
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s ", policy_list[i]->name);
- }
-
- if (ret < PAGE_SIZE - 1) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- } else {
- buf[PAGE_SIZE - 2] = '\n';
- buf[PAGE_SIZE - 1] = '\0';
- ret = PAGE_SIZE - 1;
- }
-
- return ret;
-}
-
-/**
- * set_ca_policy - Store callback for the core_availability_policy sysfs file.
- *
- * This function is called when the core_availability_policy sysfs file is
- * written to. It matches the requested policy against the available policies
- * and if a matching policy is found calls kbase_pm_set_policy() to change
- * the policy.
- *
- * @dev: The device with sysfs file is for
- * @attr: The attributes of the sysfs file
- * @buf: The value written to the sysfs file
- * @count: The number of bytes written to the sysfs file
- *
- * Return: @count if the function succeeded. An error code on failure.
- */
-static ssize_t set_ca_policy(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct kbase_device *kbdev;
- const struct kbase_pm_ca_policy *new_policy = NULL;
- const struct kbase_pm_ca_policy *const *policy_list;
- int policy_count;
- int i;
-
- kbdev = to_kbase_device(dev);
-
- if (!kbdev)
- return -ENODEV;
-
- policy_count = kbase_pm_ca_list_policies(&policy_list);
-
- for (i = 0; i < policy_count; i++) {
- if (sysfs_streq(policy_list[i]->name, buf)) {
- new_policy = policy_list[i];
- break;
- }
- }
-
- if (!new_policy) {
- dev_err(dev, "core_availability_policy: policy not found\n");
- return -EINVAL;
- }
-
- kbase_pm_ca_set_policy(kbdev, new_policy);
-
- return count;
-}
-
-/*
- * The sysfs file core_availability_policy
- *
- * This is used for obtaining information about the available policies,
- * determining which policy is currently active, and changing the active
- * policy.
- */
-static DEVICE_ATTR(core_availability_policy, S_IRUGO | S_IWUSR, show_ca_policy, set_ca_policy);
-
/*
* show_core_mask - Show callback for the core_mask sysfs file.
*
@@ -2297,7 +2330,11 @@ static ssize_t kbase_show_gpuinfo(struct device *dev,
{ .id = GPU_ID2_PRODUCT_TSIX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
.name = "Mali-G51" },
{ .id = GPU_ID2_PRODUCT_TNOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
- .name = "Mali-TNOx" },
+ .name = "Mali-G76" },
+ { .id = GPU_ID2_PRODUCT_TDVX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-G31" },
+ { .id = GPU_ID2_PRODUCT_TGOX >> GPU_ID_VERSION_PRODUCT_ID_SHIFT,
+ .name = "Mali-G52" },
};
const char *product_name = "(Unknown Mali GPU)";
struct kbase_device *kbdev;
@@ -2737,6 +2774,88 @@ static ssize_t set_lp_mem_pool_max_size(struct device *dev,
static DEVICE_ATTR(lp_mem_pool_max_size, S_IRUGO | S_IWUSR, show_lp_mem_pool_max_size,
set_lp_mem_pool_max_size);
+/**
+ * show_js_ctx_scheduling_mode - Show callback for js_ctx_scheduling_mode sysfs
+ * entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The output buffer to receive the context scheduling mode information.
+ *
+ * This function is called to get the context scheduling mode being used by JS.
+ *
+ * Return: The number of bytes output to @buf.
+ */
+static ssize_t show_js_ctx_scheduling_mode(struct device *dev,
+ struct device_attribute *attr, char * const buf)
+{
+ struct kbase_device *kbdev;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", kbdev->js_ctx_scheduling_mode);
+}
+
+/**
+ * set_js_ctx_scheduling_mode - Set callback for js_ctx_scheduling_mode sysfs
+ * entry.
+ * @dev: The device this sysfs file is for.
+ * @attr: The attributes of the sysfs file.
+ * @buf: The value written to the sysfs file.
+ * @count: The number of bytes written to the sysfs file.
+ *
+ * This function is called when the js_ctx_scheduling_mode sysfs file is written
+ * to. It checks the data written, and if valid updates the ctx scheduling mode
+ * being by JS.
+ *
+ * Return: @count if the function succeeded. An error code on failure.
+ */
+static ssize_t set_js_ctx_scheduling_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kbasep_kctx_list_element *element;
+ u32 new_js_ctx_scheduling_mode;
+ struct kbase_device *kbdev;
+ unsigned long flags;
+ int ret;
+
+ kbdev = to_kbase_device(dev);
+ if (!kbdev)
+ return -ENODEV;
+
+ ret = kstrtouint(buf, 0, &new_js_ctx_scheduling_mode);
+ if (ret || new_js_ctx_scheduling_mode >= KBASE_JS_PRIORITY_MODE_COUNT) {
+ dev_err(kbdev->dev, "Couldn't process js_ctx_scheduling_mode"
+ " write operation.\n"
+ "Use format <js_ctx_scheduling_mode>\n");
+ return -EINVAL;
+ }
+
+ if (new_js_ctx_scheduling_mode == kbdev->js_ctx_scheduling_mode)
+ return count;
+
+ mutex_lock(&kbdev->kctx_list_lock);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ /* Update the context priority mode */
+ kbdev->js_ctx_scheduling_mode = new_js_ctx_scheduling_mode;
+
+ /* Adjust priority of all the contexts as per the new mode */
+ list_for_each_entry(element, &kbdev->kctx_list, link)
+ kbase_js_update_ctx_priority(element->kctx);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ mutex_unlock(&kbdev->kctx_list_lock);
+
+ dev_dbg(kbdev->dev, "JS ctx scheduling mode: %u\n", new_js_ctx_scheduling_mode);
+
+ return count;
+}
+
+static DEVICE_ATTR(js_ctx_scheduling_mode, S_IRUGO | S_IWUSR,
+ show_js_ctx_scheduling_mode,
+ set_js_ctx_scheduling_mode);
#ifdef CONFIG_DEBUG_FS
/* Number of entries in serialize_jobs_settings[] */
@@ -2996,6 +3115,7 @@ static int registers_map(struct kbase_device * const kbdev)
kbdev->reg_start = reg_res->start;
kbdev->reg_size = resource_size(reg_res);
+
err = kbase_common_reg_map(kbdev);
if (err) {
dev_err(kbdev->dev, "Failed to map registers\n");
@@ -3111,6 +3231,7 @@ static void power_control_term(struct kbase_device *kbdev)
#endif /* LINUX_VERSION_CODE >= 3, 12, 0 */
}
+#ifdef MALI_KBASE_BUILD
#ifdef CONFIG_DEBUG_FS
#if KBASE_GPU_RESET_EN
@@ -3171,7 +3292,7 @@ static ssize_t debugfs_protected_debug_mode_read(struct file *file,
ssize_t ret_val;
kbase_pm_context_active(kbdev);
- gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS), NULL);
+ gpu_status = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_STATUS));
kbase_pm_context_idle(kbdev);
if (gpu_status & GPU_DBGEN)
@@ -3265,10 +3386,6 @@ static int kbase_device_debugfs_init(struct kbase_device *kbdev)
kbasep_trace_debugfs_init(kbdev);
#endif /* KBASE_TRACE_ENABLE */
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- kbasep_trace_timeline_debugfs_init(kbdev);
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
-
#ifdef CONFIG_MALI_DEVFREQ
#ifdef CONFIG_DEVFREQ_THERMAL
if (kbdev->inited_subsys & inited_devfreq)
@@ -3302,6 +3419,7 @@ static inline int kbase_device_debugfs_init(struct kbase_device *kbdev)
static inline void kbase_device_debugfs_term(struct kbase_device *kbdev) { }
#endif /* CONFIG_DEBUG_FS */
+#endif /* MALI_KBASE_BUILD */
static void kbase_device_coherency_init(struct kbase_device *kbdev,
unsigned prod_id)
@@ -3390,12 +3508,12 @@ static struct attribute *kbase_attrs[] = {
&dev_attr_reset_timeout.attr,
&dev_attr_js_scheduling_period.attr,
&dev_attr_power_policy.attr,
- &dev_attr_core_availability_policy.attr,
&dev_attr_core_mask.attr,
&dev_attr_mem_pool_size.attr,
&dev_attr_mem_pool_max_size.attr,
&dev_attr_lp_mem_pool_size.attr,
&dev_attr_lp_mem_pool_max_size.attr,
+ &dev_attr_js_ctx_scheduling_mode.attr,
NULL
};
@@ -3443,10 +3561,12 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
kbdev->inited_subsys &= ~inited_get_device;
}
+#ifdef MALI_KBASE_BUILD
if (kbdev->inited_subsys & inited_debugfs) {
kbase_device_debugfs_term(kbdev);
kbdev->inited_subsys &= ~inited_debugfs;
}
+#endif
if (kbdev->inited_subsys & inited_job_fault) {
kbase_debug_job_fault_dev_term(kbdev);
@@ -3460,6 +3580,7 @@ static int kbase_platform_device_remove(struct platform_device *pdev)
}
#endif
+
if (kbdev->inited_subsys & inited_vinstr) {
kbase_vinstr_term(kbdev->vinstr_ctx);
kbdev->inited_subsys &= ~inited_vinstr;
@@ -3713,6 +3834,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
}
kbdev->inited_subsys |= inited_vinstr;
+
#ifdef CONFIG_MALI_DEVFREQ
/* Devfreq uses vinstr, so must be initialized after it. */
err = kbase_devfreq_init(kbdev);
@@ -3722,6 +3844,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
dev_err(kbdev->dev, "Continuing without devfreq\n");
#endif /* CONFIG_MALI_DEVFREQ */
+#ifdef MALI_KBASE_BUILD
err = kbase_debug_job_fault_dev_init(kbdev);
if (err) {
dev_err(kbdev->dev, "Job fault debug initialization failed\n");
@@ -3742,6 +3865,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
kbdev->mdev.name = kbdev->devname;
kbdev->mdev.fops = &kbase_fops;
kbdev->mdev.parent = get_device(kbdev->dev);
+ kbdev->mdev.mode = 0666;
kbdev->inited_subsys |= inited_get_device;
/* This needs to happen before registering the device with misc_register(),
@@ -3799,6 +3923,7 @@ static int kbase_platform_device_probe(struct platform_device *pdev)
"Probed as %s\n", dev_name(kbdev->mdev.this_device));
kbase_dev_nr++;
+#endif /* MALI_KBASE_BUILD */
return err;
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
index 85a6afdb4ef345..bda05602de5e47 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -121,7 +121,8 @@ int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx)
kctx->as_nr = free_as;
kbdev->as_to_kctx[free_as] = kctx;
- kbase_mmu_update(kctx);
+ kbase_mmu_update(kbdev, &kctx->mmu,
+ kctx->as_nr);
}
} else {
atomic_dec(&kctx->refcount);
@@ -193,7 +194,8 @@ void kbase_ctx_sched_restore_all_as(struct kbase_device *kbdev)
if (atomic_read(&kctx->refcount)) {
WARN_ON(kctx->as_nr != i);
- kbase_mmu_update(kctx);
+ kbase_mmu_update(kbdev, &kctx->mmu,
+ kctx->as_nr);
} else {
/* This context might have been assigned an
* AS before, clear it.
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
index 400ee623055d3a..ab57a0dc1ca8b0 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_ctx_sched.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -25,7 +25,8 @@
#include <mali_kbase.h>
-/* The Context Scheduler manages address space assignment and reference
+/**
+ * The Context Scheduler manages address space assignment and reference
* counting to kbase_context. The interface has been designed to minimise
* interactions between the Job Scheduler and Power Management/MMU to support
* the existing Job Scheduler interface.
@@ -39,35 +40,30 @@
* code.
*/
-/* base_ctx_sched_init - Initialise the context scheduler
+/**
+ * kbase_ctx_sched_init - Initialise the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be initialised
*
- * @kbdev: The device for which the context scheduler needs to be
- * initialised
+ * This must be called during device initialisation. The number of hardware
+ * address spaces must already be established before calling this function.
*
* Return: 0 for success, otherwise failure
- *
- * This must be called during device initilisation. The number of hardware
- * address spaces must already be established before calling this function.
*/
int kbase_ctx_sched_init(struct kbase_device *kbdev);
-/* base_ctx_sched_term - Terminate the context scheduler
- *
- * @kbdev: The device for which the context scheduler needs to be
- * terminated
+/**
+ * kbase_ctx_sched_term - Terminate the context scheduler
+ * @kbdev: The device for which the context scheduler needs to be terminated
*
* This must be called during device termination after all contexts have been
* destroyed.
*/
void kbase_ctx_sched_term(struct kbase_device *kbdev);
-/* kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
- *
+/**
+ * kbase_ctx_sched_retain_ctx - Retain a reference to the @ref kbase_context
* @kctx: The context to which to retain a reference
*
- * Return: The address space that the context has been assigned to or
- * KBASEP_AS_NR_INVALID if no address space was available.
- *
* This function should be called whenever an address space should be assigned
* to a context and programmed onto the MMU. It should typically be called
* when jobs are ready to be submitted to the GPU.
@@ -77,11 +73,14 @@ void kbase_ctx_sched_term(struct kbase_device *kbdev);
*
* The kbase_device::mmu_hw_mutex and kbase_device::hwaccess_lock locks must be
* held whilst calling this function.
+ *
+ * Return: The address space that the context has been assigned to or
+ * KBASEP_AS_NR_INVALID if no address space was available.
*/
int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
-/* kbase_ctx_sched_retain_ctx_refcount
- *
+/**
+ * kbase_ctx_sched_retain_ctx_refcount
* @kctx: The context to which to retain a reference
*
* This function only retains a reference to the context. It must be called
@@ -95,8 +94,8 @@ int kbase_ctx_sched_retain_ctx(struct kbase_context *kctx);
*/
void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
-/* kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
- *
+/**
+ * kbase_ctx_sched_release_ctx - Release a reference to the @ref kbase_context
* @kctx: The context from which to release a reference
*
* This function should be called whenever an address space could be unassigned
@@ -108,8 +107,8 @@ void kbase_ctx_sched_retain_ctx_refcount(struct kbase_context *kctx);
*/
void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
-/* kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
- *
+/**
+ * kbase_ctx_sched_remove_ctx - Unassign previously assigned address space
* @kctx: The context to be removed
*
* This function should be called when a context is being destroyed. The
@@ -121,8 +120,8 @@ void kbase_ctx_sched_release_ctx(struct kbase_context *kctx);
*/
void kbase_ctx_sched_remove_ctx(struct kbase_context *kctx);
-/* kbase_ctx_sched_restore_all_as - Reprogram all address spaces
- *
+/**
+ * kbase_ctx_sched_restore_all_as - Reprogram all address spaces
* @kbdev: The device for which address spaces to be reprogrammed
*
* This function shall reprogram all address spaces previously assigned to
diff --git a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
index d2c57cab117773..ee4552913b7afa 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_debug_mem_view.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2013-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -130,7 +130,7 @@ static int debug_mem_show(struct seq_file *m, void *v)
if (!(map->flags & KBASE_REG_CPU_CACHED))
prot = pgprot_writecombine(prot);
- page = phys_to_page(as_phys_addr_t(map->alloc->pages[data->offset]));
+ page = as_page(map->alloc->pages[data->offset]);
mapping = vmap(&page, 1, VM_MAP, prot);
if (!mapping)
goto out;
@@ -223,12 +223,6 @@ static int debug_mem_open(struct inode *i, struct file *file)
goto out;
}
- ret = debug_mem_zone_open(&kctx->reg_rbtree_exec, mem_data);
- if (0 != ret) {
- kbase_gpu_vm_unlock(kctx);
- goto out;
- }
-
ret = debug_mem_zone_open(&kctx->reg_rbtree_custom, mem_data);
if (0 != ret) {
kbase_gpu_vm_unlock(kctx);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_defs.h b/drivers/gpu/arm/midgard/mali_kbase_defs.h
index 6ac98bce1f4aef..e2110a14b4c161 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -42,6 +42,7 @@
#include <mali_kbase_gpuprops_types.h>
#include <protected_mode_switcher.h>
+
#include <linux/atomic.h>
#include <linux/mempool.h>
#include <linux/slab.h>
@@ -52,7 +53,6 @@
#include <linux/bus_logger.h>
#endif
-
#if defined(CONFIG_SYNC)
#include <sync.h>
#else
@@ -147,11 +147,7 @@
#define MIDGARD_MMU_LEVEL(x) (x)
-#if MIDGARD_MMU_VA_BITS > 39
#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(0)
-#else
-#define MIDGARD_MMU_TOPLEVEL MIDGARD_MMU_LEVEL(1)
-#endif
#define MIDGARD_MMU_BOTTOMLEVEL MIDGARD_MMU_LEVEL(3)
@@ -175,10 +171,20 @@
/* Maximum force replay limit when randomization is enabled */
#define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
+/* Maximum number of pages of memory that require a permanent mapping, per
+ * kbase_context
+ */
+#define KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES ((1024ul * 1024ul) >> \
+ PAGE_SHIFT)
+
+
/** Atom has been previously soft-stoppped */
#define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
/** Atom has been previously retried to execute */
#define KBASE_KATOM_FLAGS_RERUN (1<<2)
+/* Atom submitted with JOB_CHAIN_FLAG bit set in JS_CONFIG_NEXT register, helps to
+ * disambiguate short-running job chains during soft/hard stopping of jobs
+ */
#define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
/** Atom has been previously hard-stopped. */
#define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
@@ -196,6 +202,8 @@
#define KBASE_KATOM_FLAG_PROTECTED (1<<11)
/* Atom has been stored in runnable_tree */
#define KBASE_KATOM_FLAG_JSCTX_IN_TREE (1<<12)
+/* Atom is waiting for L2 caches to power up in order to enter protected mode */
+#define KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT (1<<13)
/* SW related flags about types of JS_COMMAND action
* NOTE: These must be masked off by JS_COMMAND_MASK */
@@ -229,8 +237,27 @@ struct kbase_context;
struct kbase_device;
struct kbase_as;
struct kbase_mmu_setup;
+struct kbase_ipa_model_vinstr_data;
#ifdef CONFIG_DEBUG_FS
+/**
+ * struct base_job_fault_event - keeps track of the atom which faulted or which
+ * completed after the faulty atom but before the
+ * debug data for faulty atom was dumped.
+ *
+ * @event_code: event code for the atom, should != BASE_JD_EVENT_DONE for the
+ * atom which faulted.
+ * @katom: pointer to the atom for which job fault occurred or which completed
+ * after the faulty atom.
+ * @job_fault_work: work item, queued only for the faulty atom, which waits for
+ * the dumping to get completed and then does the bottom half
+ * of job done for the atoms which followed the faulty atom.
+ * @head: List head used to store the atom in the global list of faulty
+ * atoms or context specific list of atoms which got completed
+ * during the dump.
+ * @reg_offset: offset of the register to be dumped next, only applicable for
+ * the faulty atom.
+ */
struct base_job_fault_event {
u32 event_code;
@@ -242,6 +269,12 @@ struct base_job_fault_event {
#endif
+/**
+ * struct kbase_jd_atom_dependency - Contains the dependency info for an atom.
+ * @atom: pointer to the dependee atom.
+ * @dep_type: type of dependency on the dependee @atom, i.e. order or data
+ * dependency. BASE_JD_DEP_TYPE_INVALID indicates no dependency.
+ */
struct kbase_jd_atom_dependency {
struct kbase_jd_atom *atom;
u8 dep_type;
@@ -281,14 +314,14 @@ struct kbase_io_history {
};
/**
- * @brief The function retrieves a read-only reference to the atom field from
- * the kbase_jd_atom_dependency structure
- *
- * @param[in] dep kbase jd atom dependency.
+ * kbase_jd_katom_dep_atom - Retrieves a read-only reference to the
+ * dependee atom.
+ * @dep: pointer to the dependency info structure.
*
- * @return readonly reference to dependent ATOM.
+ * Return: readonly reference to dependee atom.
*/
-static inline const struct kbase_jd_atom * kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
+static inline const struct kbase_jd_atom *
+kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency *dep)
{
LOCAL_ASSERT(dep != NULL);
@@ -296,12 +329,11 @@ static inline const struct kbase_jd_atom * kbase_jd_katom_dep_atom(const struct
}
/**
- * @brief The function retrieves a read-only reference to the dependency type field from
- * the kbase_jd_atom_dependency structure
+ * kbase_jd_katom_dep_type - Retrieves the dependency type info
*
- * @param[in] dep kbase jd atom dependency.
+ * @dep: pointer to the dependency info structure.
*
- * @return A dependency type value.
+ * Return: the type of dependency there is on the dependee atom.
*/
static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *dep)
{
@@ -311,12 +343,11 @@ static inline u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency *
}
/**
- * @brief Setter macro for dep_atom array entry in kbase_jd_atom
- *
- * @param[in] dep The kbase jd atom dependency.
- * @param[in] a The ATOM to be set as a dependency.
- * @param type The ATOM dependency type to be set.
- *
+ * kbase_jd_katom_dep_set - sets up the dependency info structure
+ * as per the values passed.
+ * @const_dep: pointer to the dependency info structure to be setup.
+ * @a: pointer to the dependee atom.
+ * @type: type of dependency there is on the dependee atom.
*/
static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency *const_dep,
struct kbase_jd_atom *a, u8 type)
@@ -332,10 +363,9 @@ static inline void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency
}
/**
- * @brief Setter macro for dep_atom array entry in kbase_jd_atom
- *
- * @param[in] dep The kbase jd atom dependency to be cleared.
+ * kbase_jd_katom_dep_clear - resets the dependency info structure
*
+ * @const_dep: pointer to the dependency info structure to be setup.
*/
static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency *const_dep)
{
@@ -349,74 +379,216 @@ static inline void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependenc
dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
}
+/**
+ * enum kbase_atom_gpu_rb_state - The state of an atom, pertinent after it becomes
+ * runnable, with respect to job slot ringbuffer/fifo.
+ * @KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB: Atom not currently present in slot fifo, which
+ * implies that either atom has not become runnable
+ * due to dependency or has completed the execution
+ * on GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_BLOCKED: Atom has been added to slot fifo but is blocked
+ * due to cross slot dependency, can't be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV: Atom has been added to slot fifo but
+ * is waiting for the completion of previously added atoms
+ * in current & other slots, as their protected mode
+ * requirements do not match with the current atom.
+ * @KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION: Atom is in slot fifo and is
+ * waiting for completion of protected mode transition,
+ * needed before the atom is submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE: Atom is in slot fifo but is waiting
+ * for the cores, which are needed to execute the job
+ * chain represented by the atom, to become available
+ * @KBASE_ATOM_GPU_RB_WAITING_AFFINITY: Atom is in slot fifo but is blocked on
+ * affinity due to rmu workaround for Hw issue 8987.
+ * @KBASE_ATOM_GPU_RB_READY: Atom is in slot fifo and can be submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_SUBMITTED: Atom is in slot fifo and has been submitted to GPU.
+ * @KBASE_ATOM_GPU_RB_RETURN_TO_JS: Atom must be returned to JS due to some failure,
+ * but only after the previously added atoms in fifo
+ * have completed or have also been returned to JS.
+ */
enum kbase_atom_gpu_rb_state {
- /* Atom is not currently present in slot ringbuffer */
KBASE_ATOM_GPU_RB_NOT_IN_SLOT_RB,
- /* Atom is in slot ringbuffer but is blocked on a previous atom */
KBASE_ATOM_GPU_RB_WAITING_BLOCKED,
- /* Atom is in slot ringbuffer but is waiting for a previous protected
- * mode transition to complete */
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_PREV,
- /* Atom is in slot ringbuffer but is waiting for proected mode
- * transition */
KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION,
- /* Atom is in slot ringbuffer but is waiting for cores to become
- * available */
KBASE_ATOM_GPU_RB_WAITING_FOR_CORE_AVAILABLE,
- /* Atom is in slot ringbuffer but is blocked on affinity */
KBASE_ATOM_GPU_RB_WAITING_AFFINITY,
- /* Atom is in slot ringbuffer and ready to run */
KBASE_ATOM_GPU_RB_READY,
- /* Atom is in slot ringbuffer and has been submitted to the GPU */
KBASE_ATOM_GPU_RB_SUBMITTED,
- /* Atom must be returned to JS as soon as it reaches the head of the
- * ringbuffer due to a previous failure */
KBASE_ATOM_GPU_RB_RETURN_TO_JS = -1
};
+/**
+ * enum kbase_atom_enter_protected_state - The state of an atom with respect to the
+ * preparation for GPU's entry into protected mode, becomes
+ * pertinent only after atom's state with respect to slot
+ * ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_ENTER_PROTECTED_CHECK: Starting state. Check if there are any atoms
+ * currently submitted to GPU and protected mode transition is
+ * not already in progress.
+ * @KBASE_ATOM_ENTER_PROTECTED_VINSTR: Wait for vinstr to suspend before entry into
+ * protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ * for the coherency change. L2 shall be powered down and GPU shall
+ * come out of fully coherent mode before entering protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY: Prepare coherency change;
+ * for BASE_HW_ISSUE_TGOX_R1_1234 also request L2 power on so that
+ * coherency register contains correct value when GPU enters
+ * protected mode.
+ * @KBASE_ATOM_ENTER_PROTECTED_FINISHED: End state; for BASE_HW_ISSUE_TGOX_R1_1234 check
+ * that L2 is powered up and switch GPU to protected mode.
+ */
enum kbase_atom_enter_protected_state {
- /*
- * Starting state:
- * Check if a transition into protected mode is required.
- *
- * NOTE: The integer value of this must
- * match KBASE_ATOM_EXIT_PROTECTED_CHECK.
+ /**
+ * NOTE: The integer value of this must match KBASE_ATOM_EXIT_PROTECTED_CHECK.
*/
KBASE_ATOM_ENTER_PROTECTED_CHECK = 0,
- /* Wait for vinstr to suspend. */
KBASE_ATOM_ENTER_PROTECTED_VINSTR,
- /* Wait for the L2 to become idle in preparation for
- * the coherency change. */
KBASE_ATOM_ENTER_PROTECTED_IDLE_L2,
- /* End state;
- * Prepare coherency change. */
+ KBASE_ATOM_ENTER_PROTECTED_SET_COHERENCY,
KBASE_ATOM_ENTER_PROTECTED_FINISHED,
};
+/**
+ * enum kbase_atom_exit_protected_state - The state of an atom with respect to the
+ * preparation for GPU's exit from protected mode, becomes
+ * pertinent only after atom's state with respect to slot
+ * ringbuffer is KBASE_ATOM_GPU_RB_WAITING_PROTECTED_MODE_TRANSITION
+ * @KBASE_ATOM_EXIT_PROTECTED_CHECK: Starting state. Check if there are any atoms
+ * currently submitted to GPU and protected mode transition is
+ * not already in progress.
+ * @KBASE_ATOM_EXIT_PROTECTED_IDLE_L2: Wait for the L2 to become idle in preparation
+ * for the reset, as exiting protected mode requires a reset.
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET: Issue the reset to trigger exit from protected mode
+ * @KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT: End state, Wait for the reset to complete
+ */
enum kbase_atom_exit_protected_state {
- /*
- * Starting state:
- * Check if a transition out of protected mode is required.
- *
- * NOTE: The integer value of this must
- * match KBASE_ATOM_ENTER_PROTECTED_CHECK.
+ /**
+ * NOTE: The integer value of this must match KBASE_ATOM_ENTER_PROTECTED_CHECK.
*/
KBASE_ATOM_EXIT_PROTECTED_CHECK = 0,
- /* Wait for the L2 to become idle in preparation
- * for the reset. */
KBASE_ATOM_EXIT_PROTECTED_IDLE_L2,
- /* Issue the protected reset. */
KBASE_ATOM_EXIT_PROTECTED_RESET,
- /* End state;
- * Wait for the reset to complete. */
KBASE_ATOM_EXIT_PROTECTED_RESET_WAIT,
};
+/**
+ * struct kbase_ext_res - Contains the info for external resources referred
+ * by an atom, which have been mapped on GPU side.
+ * @gpu_address: Start address of the memory region allocated for
+ * the resource from GPU virtual address space.
+ * @alloc: pointer to physical pages tracking object, set on
+ * mapping the external resource on GPU side.
+ */
struct kbase_ext_res {
u64 gpu_address;
struct kbase_mem_phy_alloc *alloc;
};
+/**
+ * struct kbase_jd_atom - object representing the atom, containing the complete
+ * state and attributes of an atom.
+ * @work: work item for the bottom half processing of the atom,
+ * by JD or JS, after it got executed on GPU or the input
+ * fence got signaled
+ * @start_timestamp: time at which the atom was submitted to the GPU, by
+ * updating the JS_HEAD_NEXTn register.
+ * @udata: copy of the user data sent for the atom in base_jd_submit.
+ * @kctx: Pointer to the base context with which the atom is associated.
+ * @dep_head: Array of 2 list heads, pointing to the two list of atoms
+ * which are blocked due to dependency on this atom.
+ * @dep_item: Array of 2 list heads, used to store the atom in the list of
+ * other atoms depending on the same dependee atom.
+ * @dep: Array containing the dependency info for the 2 atoms on which
+ * the atom depends upon.
+ * @jd_item: List head used during job dispatch job_done processing - as
+ * dependencies may not be entirely resolved at this point,
+ * we need to use a separate list head.
+ * @in_jd_list: flag set to true if atom's @jd_item is currently on a list,
+ * prevents atom being processed twice.
+ * @nr_extres: number of external resources referenced by the atom.
+ * @extres: pointer to the location containing info about @nr_extres
+ * external resources referenced by the atom.
+ * @device_nr: indicates the coregroup with which the atom is associated,
+ * when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified.
+ * @jc: GPU address of the job-chain.
+ * @softjob_data: Copy of data read from the user space buffer that @jc
+ * points to.
+ * @coreref_state: state of the atom with respect to retention of shader
+ * cores for affinity & power management.
+ * @fence: Stores either an input or output sync fence, depending
+ * on soft-job type
+ * @sync_waiter: Pointer to the sync fence waiter structure passed to the
+ * callback function on signaling of the input fence.
+ * @dma_fence: object containing pointers to both input & output fences
+ * and other related members used for explicit sync through
+ * soft jobs and for the implicit synchronization required
+ * on access to external resources.
+ * @event_code: Event code for the job chain represented by the atom, both
+ * HW and low-level SW events are represented by event codes.
+ * @core_req: bitmask of BASE_JD_REQ_* flags specifying either Hw or Sw
+ * requirements for the job chain represented by the atom.
+ * @ticks: Number of scheduling ticks for which atom has been running
+ * on the GPU.
+ * @sched_priority: Priority of the atom for Job scheduling, as per the
+ * KBASE_JS_ATOM_SCHED_PRIO_*.
+ * @poking: Indicates whether poking of MMU is ongoing for the atom,
+ * as a WA for the issue HW_ISSUE_8316.
+ * @completed: Wait queue to wait upon for the completion of atom.
+ * @status: Indicates at high level at what stage the atom is in,
+ * as per KBASE_JD_ATOM_STATE_*, that whether it is not in
+ * use or its queued in JD or given to JS or submitted to Hw
+ * or it completed the execution on Hw.
+ * @work_id: used for GPU tracepoints, its a snapshot of the 'work_id'
+ * counter in kbase_jd_context which is incremented on
+ * every call to base_jd_submit.
+ * @slot_nr: Job slot chosen for the atom.
+ * @atom_flags: bitmask of KBASE_KATOM_FLAG* flags capturing the exact
+ * low level state of the atom.
+ * @retry_count: Number of times this atom has been retried. Used by replay
+ * soft job.
+ * @gpu_rb_state: bitmnask of KBASE_ATOM_GPU_RB_* flags, precisely tracking
+ * atom's state after it has entered Job scheduler on becoming
+ * runnable. Atom could be blocked due to cross slot dependency
+ * or waiting for the shader cores to become available or
+ * waiting for protected mode transitions to complete.
+ * @need_cache_flush_cores_retained: flag indicating that manual flush of GPU
+ * cache is needed for the atom and the shader cores used
+ * for atom have been kept on.
+ * @blocked: flag indicating that atom's resubmission to GPU is
+ * blocked till the work item is scheduled to return the
+ * atom to JS.
+ * @pre_dep: Pointer to atom that this atom has same-slot dependency on
+ * @post_dep: Pointer to atom that has same-slot dependency on this atom
+ * @x_pre_dep: Pointer to atom that this atom has cross-slot dependency on
+ * @x_post_dep: Pointer to atom that has cross-slot dependency on this atom
+ * @flush_id: The GPU's flush count recorded at the time of submission,
+ * used for the cache flush optimisation
+ * @fault_event: Info for dumping the debug data on Job fault.
+ * @queue: List head used for 4 different purposes :
+ * Adds atom to the list of dma-buf fence waiting atoms.
+ * Adds atom to the list of atoms blocked due to cross
+ * slot dependency.
+ * Adds atom to the list of softjob atoms for which JIT
+ * allocation has been deferred
+ * Adds atom to the list of softjob atoms waiting for the
+ * signaling of fence.
+ * @jit_node: Used to keep track of all JIT free/alloc jobs in submission order
+ * @jit_blocked: Flag indicating that JIT allocation requested through
+ * softjob atom will be reattempted after the impending
+ * free of other active JIT allocations.
+ * @will_fail_event_code: If non-zero, this indicates that the atom will fail
+ * with the set event_code when the atom is processed.
+ * Used for special handling of atoms, which have a data
+ * dependency on the failed atoms.
+ * @protected_state: State of the atom, as per KBASE_ATOM_(ENTER|EXIT)_PROTECTED_*,
+ * when transitioning into or out of protected mode. Atom will
+ * be either entering or exiting the protected mode.
+ * @runnable_tree_node: The node added to context's job slot specific rb tree
+ * when the atom becomes runnable.
+ * @age: Age of atom relative to other atoms in the context, is
+ * snapshot of the age_count counter in kbase context.
+ */
struct kbase_jd_atom {
struct work_struct work;
ktime_t start_timestamp;
@@ -427,25 +599,17 @@ struct kbase_jd_atom {
struct list_head dep_head[2];
struct list_head dep_item[2];
const struct kbase_jd_atom_dependency dep[2];
- /* List head used during job dispatch job_done processing - as
- * dependencies may not be entirely resolved at this point, we need to
- * use a separate list head. */
struct list_head jd_item;
- /* true if atom's jd_item is currently on a list. Prevents atom being
- * processed twice. */
bool in_jd_list;
u16 nr_extres;
struct kbase_ext_res *extres;
u32 device_nr;
- u64 affinity;
u64 jc;
- /* Copy of data read from the user space buffer that jc points to */
void *softjob_data;
enum kbase_atom_coreref_state coreref_state;
#if defined(CONFIG_SYNC)
- /* Stores either an input or output fence, depending on soft-job type */
struct sync_fence *fence;
struct sync_fence_waiter sync_waiter;
#endif /* CONFIG_SYNC */
@@ -519,73 +683,49 @@ struct kbase_jd_atom {
/* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
enum base_jd_event_code event_code;
- base_jd_core_req core_req; /**< core requirements */
+ base_jd_core_req core_req;
u32 ticks;
- /* JS atom priority with respect to other atoms on its kctx. */
int sched_priority;
- int poking; /* BASE_HW_ISSUE_8316 */
+ int poking;
wait_queue_head_t completed;
enum kbase_jd_atom_state status;
#ifdef CONFIG_GPU_TRACEPOINTS
int work_id;
#endif
- /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
int slot_nr;
u32 atom_flags;
- /* Number of times this atom has been retried. Used by replay soft job.
- */
int retry_count;
enum kbase_atom_gpu_rb_state gpu_rb_state;
- u64 need_cache_flush_cores_retained;
+ bool need_cache_flush_cores_retained;
atomic_t blocked;
- /* Pointer to atom that this atom has same-slot dependency on */
struct kbase_jd_atom *pre_dep;
- /* Pointer to atom that has same-slot dependency on this atom */
struct kbase_jd_atom *post_dep;
- /* Pointer to atom that this atom has cross-slot dependency on */
struct kbase_jd_atom *x_pre_dep;
- /* Pointer to atom that has cross-slot dependency on this atom */
struct kbase_jd_atom *x_post_dep;
- /* The GPU's flush count recorded at the time of submission, used for
- * the cache flush optimisation */
u32 flush_id;
- struct kbase_jd_atom_backend backend;
#ifdef CONFIG_DEBUG_FS
struct base_job_fault_event fault_event;
#endif
- /* List head used for three different purposes:
- * 1. Overflow list for JS ring buffers. If an atom is ready to run,
- * but there is no room in the JS ring buffer, then the atom is put
- * on the ring buffer's overflow list using this list node.
- * 2. List of waiting soft jobs.
- */
struct list_head queue;
- /* Used to keep track of all JIT free/alloc jobs in submission order
- */
struct list_head jit_node;
bool jit_blocked;
- /* If non-zero, this indicates that the atom will fail with the set
- * event_code when the atom is processed. */
enum base_jd_event_code will_fail_event_code;
- /* Atoms will only ever be transitioning into, or out of
- * protected mode so we do not need two separate fields.
- */
union {
enum kbase_atom_enter_protected_state enter;
enum kbase_atom_exit_protected_state exit;
@@ -593,10 +733,33 @@ struct kbase_jd_atom {
struct rb_node runnable_tree_node;
- /* 'Age' of atom relative to other atoms in the context. */
u32 age;
};
+/**
+ * struct kbase_debug_copy_buffer - information about the buffer to be copied.
+ *
+ * @size: size of the buffer in bytes
+ * @pages: pointer to an array of pointers to the pages which contain
+ * the buffer
+ * @nr_pages: number of pages
+ * @offset: offset into the pages
+ * @gpu_alloc: pointer to physical memory allocated by the GPU
+ * @extres_pages: array of pointers to the pages containing external resources
+ * for this buffer
+ * @nr_extres_pages: number of pages in @extres_pages
+ */
+struct kbase_debug_copy_buffer {
+ size_t size;
+ struct page **pages;
+ int nr_pages;
+ size_t offset;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+
+ struct page **extres_pages;
+ int nr_extres_pages;
+};
+
static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom)
{
return (bool)(katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED);
@@ -612,38 +775,57 @@ static inline bool kbase_jd_katom_is_protected(const struct kbase_jd_atom *katom
#define KBASE_JD_DEP_QUEUE_SIZE 256
+/**
+ * struct kbase_jd_context - per context object encapsulating all the Job dispatcher
+ * related state.
+ * @lock: lock to serialize the updates made to the Job dispatcher
+ * state and kbase_jd_atom objects.
+ * @sched_info: Structure encapsulating all the Job scheduling info.
+ * @atoms: Array of the objects representing atoms, containing
+ * the complete state and attributes of an atom.
+ * @job_nr: Tracks the number of atoms being processed by the
+ * kbase. This includes atoms that are not tracked by
+ * scheduler: 'not ready to run' & 'dependency-only' jobs.
+ * @zero_jobs_wait: Waitq that reflects whether there are no jobs
+ * (including SW-only dependency jobs). This is set
+ * when no jobs are present on the ctx, and clear when
+ * there are jobs.
+ * This must be updated atomically with @job_nr.
+ * note: Job Dispatcher knows about more jobs than the
+ * Job Scheduler as it is unaware of jobs that are
+ * blocked on dependencies and SW-only dependency jobs.
+ * This waitq can be waited upon to find out when the
+ * context jobs are all done/cancelled (including those
+ * that might've been blocked on dependencies) - and so,
+ * whether it can be terminated. However, it should only
+ * be terminated once it is not present in the run-pool.
+ * Since the waitq is only set under @lock, the waiter
+ * should also briefly obtain and drop @lock to guarantee
+ * that the setter has completed its work on the kbase_context
+ * @job_done_wq: Workqueue to which the per atom work item is queued
+ * for bottom half processing when the atom completes
+ * execution on GPU or the input fence get signaled.
+ * @tb_lock: Lock to serialize the write access made to @tb to
+ * to store the register access trace messages.
+ * @tb: Pointer to the Userspace accessible buffer storing
+ * the trace messages for register read/write accesses
+ * made by the Kbase. The buffer is filled in circular
+ * fashion.
+ * @tb_wrap_offset: Offset to the end location in the trace buffer, the
+ * write pointer is moved to the beginning on reaching
+ * this offset.
+ * @work_id: atomic variable used for GPU tracepoints, incremented
+ * on every call to base_jd_submit.
+ */
struct kbase_jd_context {
struct mutex lock;
struct kbasep_js_kctx_info sched_info;
struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
- /** Tracks all job-dispatch jobs. This includes those not tracked by
- * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
u32 job_nr;
- /** Waitq that reflects whether there are no jobs (including SW-only
- * dependency jobs). This is set when no jobs are present on the ctx,
- * and clear when there are jobs.
- *
- * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
- * the Job Scheduler is unaware of jobs that are blocked on dependencies,
- * and SW-only dependency jobs.
- *
- * This waitq can be waited upon to find out when the context jobs are all
- * done/cancelled (including those that might've been blocked on
- * dependencies) - and so, whether it can be terminated. However, it should
- * only be terminated once it is not present in the run-pool (see
- * kbasep_js_kctx_info::ctx::is_scheduled).
- *
- * Since the waitq is only set under kbase_jd_context::lock,
- * the waiter should also briefly obtain and drop kbase_jd_context::lock to
- * guarentee that the setter has completed its work on the kbase_context
- *
- * This must be updated atomically with:
- * - kbase_jd_context::job_nr */
wait_queue_head_t zero_jobs_wait;
- /** Job Done workqueue. */
struct workqueue_struct *job_done_wq;
spinlock_t tb_lock;
@@ -675,15 +857,34 @@ struct kbase_mmu_setup {
};
/**
- * Important: Our code makes assumptions that a struct kbase_as structure is always at
- * kbase_device->as[number]. This is used to recover the containing
- * struct kbase_device from a struct kbase_as structure.
- *
- * Therefore, struct kbase_as structures must not be allocated anywhere else.
+ * struct kbase_as - object representing an address space of GPU.
+ * @number: Index at which this address space structure is present
+ * in an array of address space structures embedded inside the
+ * struct kbase_device.
+ * @pf_wq: Workqueue for processing work items related to Bus fault
+ * and Page fault handling.
+ * @work_pagefault: Work item for the Page fault handling.
+ * @work_busfault: Work item for the Bus fault handling.
+ * @fault_type: Type of fault which occured for this address space,
+ * regular/unexpected Bus or Page fault.
+ * @protected_mode: Flag indicating whether the fault occurred in protected
+ * mode or not.
+ * @fault_status: Records the fault status as reported by Hw.
+ * @fault_addr: Records the faulting address.
+ * @fault_extra_addr: Records the secondary fault address.
+ * @current_setup: Stores the MMU configuration for this address space.
+ * @poke_wq: Workqueue to process the work items queue for poking the
+ * MMU as a WA for BASE_HW_ISSUE_8316.
+ * @poke_work: Work item to do the poking of MMU for this address space.
+ * @poke_refcount: Refcount for the need of poking MMU. While the refcount is
+ * non zero the poking of MMU will continue.
+ * Protected by hwaccess_lock.
+ * @poke_state: State indicating whether poking is in progress or it has
+ * been stopped. Protected by hwaccess_lock.
+ * @poke_timer: Timer used to schedule the poking at regular intervals.
*/
struct kbase_as {
int number;
-
struct workqueue_struct *pf_wq;
struct work_struct work_pagefault;
struct work_struct work_busfault;
@@ -692,19 +893,36 @@ struct kbase_as {
u32 fault_status;
u64 fault_addr;
u64 fault_extra_addr;
-
struct kbase_mmu_setup current_setup;
-
- /* BASE_HW_ISSUE_8316 */
struct workqueue_struct *poke_wq;
struct work_struct poke_work;
- /** Protected by hwaccess_lock */
int poke_refcount;
- /** Protected by hwaccess_lock */
kbase_as_poke_state poke_state;
struct hrtimer poke_timer;
};
+/**
+ * struct kbase_mmu_table - object representing a set of GPU page tables
+ * @mmu_teardown_pages: Buffer of 4 Pages in size, used to cache the entries
+ * of top & intermediate level page tables to avoid
+ * repeated calls to kmap_atomic during the MMU teardown.
+ * @mmu_lock: Lock to serialize the accesses made to multi level GPU
+ * page tables
+ * @pgd: Physical address of the page allocated for the top
+ * level page table of the context, this is used for
+ * MMU HW programming as the address translation will
+ * start from the top level page table.
+ * @kctx: If this set of MMU tables belongs to a context then
+ * this is a back-reference to the context, otherwise
+ * it is NULL
+ */
+struct kbase_mmu_table {
+ u64 *mmu_teardown_pages;
+ struct mutex mmu_lock;
+ phys_addr_t pgd;
+ struct kbase_context *kctx;
+};
+
static inline int kbase_as_has_bus_fault(struct kbase_as *as)
{
return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
@@ -738,6 +956,37 @@ enum kbase_trace_code {
#define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
#define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
+/**
+ * struct kbase_trace - object representing a trace message added to trace buffer
+ * kbase_device::trace_rbuf
+ * @timestamp: CPU timestamp at which the trace message was added.
+ * @thread_id: id of the thread in the context of which trace message
+ * was added.
+ * @cpu: indicates which CPU the @thread_id was scheduled on when
+ * the trace message was added.
+ * @ctx: Pointer to the kbase context for which the trace message
+ * was added. Will be NULL for certain trace messages like
+ * for traces added corresponding to power management events.
+ * Will point to the appropriate context corresponding to
+ * job-slot & context's reference count related events.
+ * @katom: indicates if the trace message has atom related info.
+ * @atom_number: id of the atom for which trace message was added.
+ * Only valid if @katom is true.
+ * @atom_udata: Copy of the user data sent for the atom in base_jd_submit.
+ * Only valid if @katom is true.
+ * @gpu_addr: GPU address of the job-chain represented by atom. Could
+ * be valid even if @katom is false.
+ * @info_val: value specific to the type of event being traced. For the
+ * case where @katom is true, will be set to atom's affinity,
+ * i.e. bitmask of shader cores chosen for atom's execution.
+ * @code: Identifies the event, refer enum kbase_trace_code.
+ * @jobslot: job-slot for which trace message was added, valid only for
+ * job-slot management events.
+ * @refcount: reference count for the context, valid for certain events
+ * related to scheduler core and policy.
+ * @flags: indicates if info related to @jobslot & @refcount is present
+ * in the trace message, used during dumping of the message.
+ */
struct kbase_trace {
struct timespec timestamp;
u32 thread_id;
@@ -754,88 +1003,6 @@ struct kbase_trace {
u8 flags;
};
-/** Event IDs for the power management framework.
- *
- * Any of these events might be missed, so they should not be relied upon to
- * find the precise state of the GPU at a particular time in the
- * trace. Overall, we should get a high percentage of these events for
- * statisical purposes, and so a few missing should not be a problem */
-enum kbase_timeline_pm_event {
- /* helper for tests */
- KBASEP_TIMELINE_PM_EVENT_FIRST,
-
- /** Event reserved for backwards compatibility with 'init' events */
- KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
-
- /** The power state of the device has changed.
- *
- * Specifically, the device has reached a desired or available state.
- */
- KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
-
- /** The GPU is becoming active.
- *
- * This event is sent when the first context is about to use the GPU.
- */
- KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
-
- /** The GPU is becoming idle.
- *
- * This event is sent when the last context has finished using the GPU.
- */
- KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
-
- /** Event reserved for backwards compatibility with 'policy_change'
- * events */
- KBASE_TIMELINE_PM_EVENT_RESERVED_4,
-
- /** Event reserved for backwards compatibility with 'system_suspend'
- * events */
- KBASE_TIMELINE_PM_EVENT_RESERVED_5,
-
- /** Event reserved for backwards compatibility with 'system_resume'
- * events */
- KBASE_TIMELINE_PM_EVENT_RESERVED_6,
-
- /** The job scheduler is requesting to power up/down cores.
- *
- * This event is sent when:
- * - powered down cores are needed to complete a job
- * - powered up cores are not needed anymore
- */
- KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
-
- KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
-};
-
-#ifdef CONFIG_MALI_TRACE_TIMELINE
-struct kbase_trace_kctx_timeline {
- atomic_t jd_atoms_in_flight;
- u32 owner_tgid;
-};
-
-struct kbase_trace_kbdev_timeline {
- /* Note: strictly speaking, not needed, because it's in sync with
- * kbase_device::jm_slots[]::submitted_nr
- *
- * But it's kept as an example of how to add global timeline tracking
- * information
- *
- * The caller must hold hwaccess_lock when accessing this */
- u8 slot_atoms_submitted[BASE_JM_MAX_NR_SLOTS];
-
- /* Last UID for each PM event */
- atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
- /* Counter for generating PM event UIDs */
- atomic_t pm_event_uid_counter;
- /*
- * L2 transition state - true indicates that the transition is ongoing
- * Expected to be protected by hwaccess_lock */
- bool l2_transitioning;
-};
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
-
-
struct kbasep_kctx_list_element {
struct list_head link;
struct kbase_context *kctx;
@@ -857,7 +1024,11 @@ struct kbase_pm_device_data {
*/
struct mutex lock;
- /** The reference count of active contexts on this device. */
+ /**
+ * The reference count of active contexts on this device. Note that
+ * some code paths keep shaders/the tiler powered whilst this is 0. Use
+ * kbase_pm_is_active() instead to check for such cases.
+ */
int active_count;
/** Flag indicating suspending/suspended */
bool suspending;
@@ -904,19 +1075,23 @@ struct kbase_pm_device_data {
/**
* struct kbase_mem_pool - Page based memory pool for kctx/kbdev
- * @kbdev: Kbase device where memory is used
- * @cur_size: Number of free pages currently in the pool (may exceed @max_size
- * in some corner cases)
- * @max_size: Maximum number of free pages in the pool
- * @order: order = 0 refers to a pool of 4 KB pages
- * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
- * @pool_lock: Lock protecting the pool - must be held when modifying @cur_size
- * and @page_list
- * @page_list: List of free pages in the pool
- * @reclaim: Shrinker for kernel reclaim of free pages
- * @next_pool: Pointer to next pool where pages can be allocated when this pool
- * is empty. Pages will spill over to the next pool when this pool
- * is full. Can be NULL if there is no next pool.
+ * @kbdev: Kbase device where memory is used
+ * @cur_size: Number of free pages currently in the pool (may exceed
+ * @max_size in some corner cases)
+ * @max_size: Maximum number of free pages in the pool
+ * @order: order = 0 refers to a pool of 4 KB pages
+ * order = 9 refers to a pool of 2 MB pages (2^9 * 4KB = 2 MB)
+ * @pool_lock: Lock protecting the pool - must be held when modifying
+ * @cur_size and @page_list
+ * @page_list: List of free pages in the pool
+ * @reclaim: Shrinker for kernel reclaim of free pages
+ * @next_pool: Pointer to next pool where pages can be allocated when this
+ * pool is empty. Pages will spill over to the next pool when
+ * this pool is full. Can be NULL if there is no next pool.
+ * @dying: true if the pool is being terminated, and any ongoing
+ * operations should be abandoned
+ * @dont_reclaim: true if the shrinker is forbidden from reclaiming memory from
+ * this pool, eg during a grow operation
*/
struct kbase_mem_pool {
struct kbase_device *kbdev;
@@ -928,6 +1103,9 @@ struct kbase_mem_pool {
struct shrinker reclaim;
struct kbase_mem_pool *next_pool;
+
+ bool dying;
+ bool dont_reclaim;
};
/**
@@ -943,9 +1121,33 @@ struct kbase_devfreq_opp {
u64 core_mask;
};
+/* MMU mode flags */
+#define KBASE_MMU_MODE_HAS_NON_CACHEABLE (1ul << 0) /* Has NON_CACHEABLE MEMATTR */
+
+/**
+ * struct kbase_mmu_mode - object containing pointer to methods invoked for
+ * programming the MMU, as per the MMU mode supported
+ * by Hw.
+ * @update: enable & setup/configure one of the GPU address space.
+ * @get_as_setup: retrieve the configuration of one of the GPU address space.
+ * @disable_as: disable one of the GPU address space.
+ * @pte_to_phy_addr: retrieve the physical address encoded in the page table entry.
+ * @ate_is_valid: check if the pte is a valid address translation entry
+ * encoding the physical address of the actual mapped page.
+ * @pte_is_valid: check if the pte is a valid entry encoding the physical
+ * address of the next lower level page table.
+ * @entry_set_ate: program the pte to be a valid address translation entry to
+ * encode the physical address of the actual page being mapped.
+ * @entry_set_pte: program the pte to be a valid entry to encode the physical
+ * address of the next lower level page table.
+ * @entry_invalidate: clear out or invalidate the pte.
+ * @flags: bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants.
+ */
struct kbase_mmu_mode {
- void (*update)(struct kbase_context *kctx);
- void (*get_as_setup)(struct kbase_context *kctx,
+ void (*update)(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ int as_nr);
+ void (*get_as_setup)(struct kbase_mmu_table *mmut,
struct kbase_mmu_setup * const setup);
void (*disable_as)(struct kbase_device *kbdev, int as_nr);
phys_addr_t (*pte_to_phy_addr)(u64 entry);
@@ -955,6 +1157,7 @@ struct kbase_mmu_mode {
unsigned long flags, unsigned int level);
void (*entry_set_pte)(u64 *entry, phys_addr_t phy);
void (*entry_invalidate)(u64 *entry);
+ unsigned long flags;
};
struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void);
@@ -963,9 +1166,250 @@ struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void);
#define DEVNAME_SIZE 16
-struct kbase_device {
- s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
+/**
+ * struct kbase_device - Object representing an instance of GPU platform device,
+ * allocated from the probe method of mali driver.
+ * @hw_quirks_sc: Configuration to be used for the shader cores as per
+ * the HW issues present in the GPU.
+ * @hw_quirks_tiler: Configuration to be used for the Tiler as per the HW
+ * issues present in the GPU.
+ * @hw_quirks_mmu: Configuration to be used for the MMU as per the HW
+ * issues present in the GPU.
+ * @hw_quirks_jm: Configuration to be used for the Job Manager as per
+ * the HW issues present in the GPU.
+ * @entry: Links the device instance to the global list of GPU
+ * devices. The list would have as many entries as there
+ * are GPU device instances.
+ * @dev: Pointer to the kernel's generic/base representation
+ * of the GPU platform device.
+ * @mdev: Pointer to the miscellaneous device registered to
+ * provide Userspace access to kernel driver through the
+ * device file /dev/malixx.
+ * @reg_start: Base address of the region in physical address space
+ * where GPU registers have been mapped.
+ * @reg_size: Size of the region containing GPU registers
+ * @reg: Kernel virtual address of the region containing GPU
+ * registers, using which Driver will access the registers.
+ * @irqs: Array containing IRQ resource info for 3 types of
+ * interrupts : Job scheduling, MMU & GPU events (like
+ * power management, cache etc.)
+ * @clock: Pointer to the input clock resource (having an id of 0),
+ * referenced by the GPU device node.
+ * @regulator: Pointer to the struct corresponding to the regulator
+ * for GPU device
+ * @devname: string containing the name used for GPU device instance,
+ * miscellaneous device is registered using the same name.
+ * @model: Pointer, valid only when Driver is compiled to not access
+ * the real GPU Hw, to the dummy model which tries to mimic
+ * to some extent the state & behavior of GPU Hw in response
+ * to the register accesses made by the Driver.
+ * @irq_slab: slab cache for allocating the work items queued when
+ * model mimics raising of IRQ to cause an interrupt on CPU.
+ * @irq_workq: workqueue for processing the irq work items.
+ * @serving_job_irq: function to execute work items queued when model mimics
+ * the raising of JS irq, mimics the interrupt handler
+ * processing JS interrupts.
+ * @serving_gpu_irq: function to execute work items queued when model mimics
+ * the raising of GPU irq, mimics the interrupt handler
+ * processing GPU interrupts.
+ * @serving_mmu_irq: function to execute work items queued when model mimics
+ * the raising of MMU irq, mimics the interrupt handler
+ * processing MMU interrupts.
+ * @reg_op_lock: lock used by model to serialize the handling of register
+ * accesses made by the driver.
+ * @pm: Per device object for storing data for power management
+ * framework.
+ * @js_data: Per device object encapsulating the current context of
+ * Job Scheduler, which is global to the device and is not
+ * tied to any particular struct kbase_context running on
+ * the device
+ * @mem_pool: Object containing the state for global pool of 4KB size
+ * physical pages which can be used by all the contexts.
+ * @lp_mem_pool: Object containing the state for global pool of 2MB size
+ * physical pages which can be used by all the contexts.
+ * @memdev: keeps track of the in use physical pages allocated by
+ * the Driver.
+ * @mmu_mode: Pointer to the object containing methods for programming
+ * the MMU, depending on the type of MMU supported by Hw.
+ * @as: Array of objects representing address spaces of GPU.
+ * @as_free: Bitpattern of free/available GPU address spaces.
+ * @as_to_kctx: Array of pointers to struct kbase_context, having
+ * GPU adrress spaces assigned to them.
+ * @mmu_mask_change: Lock to serialize the access to MMU interrupt mask
+ * register used in the handling of Bus & Page faults.
+ * @gpu_props: Object containing complete information about the
+ * configuration/properties of GPU HW device in use.
+ * @hw_issues_mask: List of SW workarounds for HW issues
+ * @hw_features_mask: List of available HW features.
+ * @shader_needed_cnt: Count for the 64 shader cores, incremented when
+ * shaders are requested for use and decremented later
+ * when they are no longer required.
+ * @tiler_needed_cnt: Count for the Tiler block shader cores, incremented
+ * when Tiler is requested for use and decremented
+ * later when the Tiler is no longer required.
+ * @disjoint_event: struct for keeping track of the disjoint information,
+ * that whether the GPU is in a disjoint state and the
+ * number of disjoint events that have occurred on GPU.
+ * @l2_users_count: Refcount for tracking users of the l2 cache, e.g.
+ * when using hardware counter instrumentation.
+ * @shader_available_bitmap: Bitmap of shader cores that are currently available,
+ * powered up and the power policy is happy for jobs
+ * to be submitted to these cores. These are updated
+ * by the power management code. The job scheduler
+ * should avoid submitting new jobs to any cores
+ * that are not marked as available.
+ * @tiler_available_bitmap: Bitmap of tiler units that are currently available.
+ * @l2_available_bitmap: Bitmap of the currently available Level 2 caches.
+ * @stack_available_bitmap: Bitmap of the currently available Core stacks.
+ * @shader_ready_bitmap: Bitmap of shader cores that are ready (powered on)
+ * @shader_transitioning_bitmap: Bitmap of shader cores that are currently changing
+ * power state.
+ * @nr_hw_address_spaces: Number of address spaces actually available in the
+ * GPU, remains constant after driver initialisation.
+ * @nr_user_address_spaces: Number of address spaces available to user contexts
+ * @hwcnt: Structure used for instrumentation and HW counters
+ * dumping
+ * @vinstr_ctx: vinstr context created per device
+ * @trace_lock: Lock to serialize the access to trace buffer.
+ * @trace_first_out: Index/offset in the trace buffer at which the first
+ * unread message is present.
+ * @trace_next_in: Index/offset in the trace buffer at which the new
+ * message will be written.
+ * @trace_rbuf: Pointer to the buffer storing debug messages/prints
+ * tracing the various events in Driver.
+ * The buffer is filled in circular fashion.
+ * @reset_timeout_ms: Number of milliseconds to wait for the soft stop to
+ * complete for the GPU jobs before proceeding with the
+ * GPU reset.
+ * @cacheclean_lock: Lock to serialize the clean & invalidation of GPU caches,
+ * between Job Manager backend & Instrumentation code.
+ * @platform_context: Platform specific private data to be accessed by
+ * platform specific config files only.
+ * @kctx_list: List of kbase_contexts created for the device, including
+ * the kbase_context created for vinstr_ctx.
+ * @kctx_list_lock: Lock protecting concurrent accesses to @kctx_list.
+ * @devfreq_profile: Describes devfreq profile for the Mali GPU device, passed
+ * to devfreq_add_device() to add devfreq feature to Mali
+ * GPU device.
+ * @devfreq: Pointer to devfreq structure for Mali GPU device,
+ * returned on the call to devfreq_add_device().
+ * @current_freq: The real frequency, corresponding to @current_nominal_freq,
+ * at which the Mali GPU device is currently operating, as
+ * retrieved from @opp_table in the target callback of
+ * @devfreq_profile.
+ * @current_nominal_freq: The nominal frequency currently used for the Mali GPU
+ * device as retrieved through devfreq_recommended_opp()
+ * using the freq value passed as an argument to target
+ * callback of @devfreq_profile
+ * @current_voltage: The voltage corresponding to @current_nominal_freq, as
+ * retrieved through dev_pm_opp_get_voltage().
+ * @current_core_mask: bitmask of shader cores that are currently desired &
+ * enabled, corresponding to @current_nominal_freq as
+ * retrieved from @opp_table in the target callback of
+ * @devfreq_profile.
+ * @opp_table: Pointer to the lookup table for converting between nominal
+ * OPP (operating performance point) frequency, and real
+ * frequency and core mask. This table is constructed according
+ * to operating-points-v2-mali table in devicetree.
+ * @num_opps: Number of operating performance points available for the Mali
+ * GPU device.
+ * @devfreq_cooling: Pointer returned on registering devfreq cooling device
+ * corresponding to @devfreq.
+ * @ipa_protection_mode_switched: is set to TRUE when GPU is put into protected
+ * mode. It is a sticky flag which is cleared by IPA
+ * once it has made use of information that GPU had
+ * previously entered protected mode.
+ * @ipa: Top level structure for IPA, containing pointers to both
+ * configured & fallback models.
+ * @timeline: Stores the global timeline tracking information.
+ * @job_fault_debug: Flag to control the dumping of debug data for job faults,
+ * set when the 'job_fault' debugfs file is opened.
+ * @mali_debugfs_directory: Root directory for the debugfs files created by the driver
+ * @debugfs_ctx_directory: Directory inside the @mali_debugfs_directory containing
+ * a sub-directory for every context.
+ * @debugfs_as_read_bitmap: bitmap of address spaces for which the bus or page fault
+ * has occurred.
+ * @job_fault_wq: Waitqueue to block the job fault dumping daemon till the
+ * occurrence of a job fault.
+ * @job_fault_resume_wq: Waitqueue on which every context with a faulty job wait
+ * for the job fault dumping to complete before they can
+ * do bottom half of job done for the atoms which followed
+ * the faulty atom.
+ * @job_fault_resume_workq: workqueue to process the work items queued for the faulty
+ * atoms, whereby the work item function waits for the dumping
+ * to get completed.
+ * @job_fault_event_list: List of atoms, each belonging to a different context, which
+ * generated a job fault.
+ * @job_fault_event_lock: Lock to protect concurrent accesses to @job_fault_event_list
+ * @regs_dump_debugfs_data: Contains the offset of register to be read through debugfs
+ * file "read_register".
+ * @kbase_profiling_controls: Profiling controls set by gator to control frame buffer
+ * dumping and s/w counter reporting.
+ * @force_replay_limit: Number of gpu jobs, having replay atoms associated with them,
+ * that are run before a job is forced to fail and replay.
+ * Set to 0 to disable forced failures.
+ * @force_replay_count: Count of gpu jobs, having replay atoms associated with them,
+ * between forced failures. Incremented on each gpu job which
+ * has replay atoms dependent on it. A gpu job is forced to
+ * fail once this is greater than or equal to @force_replay_limit
+ * @force_replay_core_req: Core requirements, set through the sysfs file, for the replay
+ * job atoms to consider the associated gpu job for forceful
+ * failure and replay. May be zero
+ * @force_replay_random: Set to 1 to randomize the @force_replay_limit, in the
+ * range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
+ * @ctx_num: Total number of contexts created for the device.
+ * @io_history: Pointer to an object keeping a track of all recent
+ * register accesses. The history of register accesses
+ * can be read through "regs_history" debugfs file.
+ * @hwaccess: Contains a pointer to active kbase context and GPU
+ * backend specific data for HW access layer.
+ * @faults_pending: Count of page/bus faults waiting for bottom half processing
+ * via workqueues.
+ * @poweroff_pending: Set when power off operation for GPU is started, reset when
+ * power on for GPU is started.
+ * @infinite_cache_active_default: Set to enable using infinite cache for all the
+ * allocations of a new context.
+ * @mem_pool_max_size_default: Initial/default value for the maximum size of both
+ * types of pool created for a new context.
+ * @current_gpu_coherency_mode: coherency mode in use, which can be different
+ * from @system_coherency, when using protected mode.
+ * @system_coherency: coherency mode as retrieved from the device tree.
+ * @cci_snoop_enabled: Flag to track when CCI snoops have been enabled.
+ * @snoop_enable_smc: SMC function ID to call into Trusted firmware to
+ * enable cache snooping. Value of 0 indicates that it
+ * is not used.
+ * @snoop_disable_smc: SMC function ID to call disable cache snooping.
+ * @protected_ops: Pointer to the methods for switching in or out of the
+ * protected mode, as per the @protected_dev being used.
+ * @protected_dev: Pointer to the protected mode switcher device attached
+ * to the GPU device retrieved through device tree if
+ * GPU do not support protected mode switching natively.
+ * @protected_mode: set to TRUE when GPU is put into protected mode
+ * @protected_mode_transition: set to TRUE when GPU is transitioning into or
+ * out of protected mode.
+ * @protected_mode_support: set to true if protected mode is supported.
+ * @buslogger: Pointer to the structure required for interfacing
+ * with the bus logger module to set the size of buffer
+ * used by the module for capturing bus logs.
+ * @irq_reset_flush: Flag to indicate that GPU reset is in-flight and flush of
+ * IRQ + bottom half is being done, to prevent the writes
+ * to MMU_IRQ_CLEAR & MMU_IRQ_MASK registers.
+ * @inited_subsys: Bitmap of inited sub systems at the time of device probe.
+ * Used during device remove or for handling error in probe.
+ * @hwaccess_lock: Lock, which can be taken from IRQ context, to serialize
+ * the updates made to Job dispatcher + scheduler states.
+ * @mmu_hw_mutex: Protects access to MMU operations and address space
+ * related state.
+ * @serialize_jobs: Currently used mode for serialization of jobs, both
+ * intra & inter slots serialization is supported.
+ * @backup_serialize_jobs: Copy of the original value of @serialize_jobs taken
+ * when GWT is enabled. Used to restore the original value
+ * on disabling of GWT.
+ * @js_ctx_scheduling_mode: Context scheduling mode currently being used by
+ * Job Scheduler
+ */
+struct kbase_device {
u32 hw_quirks_sc;
u32 hw_quirks_tiler;
u32 hw_quirks_mmu;
@@ -1007,66 +1451,26 @@ struct kbase_device {
struct kbase_mmu_mode const *mmu_mode;
struct kbase_as as[BASE_MAX_NR_AS];
- /* The below variables (as_free and as_to_kctx) are managed by the
- * Context Scheduler. The kbasep_js_device_data::runpool_irq::lock must
- * be held whilst accessing these.
- */
u16 as_free; /* Bitpattern of free Address Spaces */
- /* Mapping from active Address Spaces to kbase_context */
struct kbase_context *as_to_kctx[BASE_MAX_NR_AS];
-
spinlock_t mmu_mask_change;
struct kbase_gpu_props gpu_props;
- /** List of SW workarounds for HW issues */
unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
- /** List of features available */
unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
- /* Bitmaps of cores that are currently in use (running jobs).
- * These should be kept up to date by the job scheduler.
- *
- * pm.power_change_lock should be held when accessing these members.
- *
- * kbase_pm_check_transitions_nolock() should be called when bits are
- * cleared to update the power management system and allow transitions to
- * occur. */
- u64 shader_inuse_bitmap;
-
- /* Refcount for cores in use */
- u32 shader_inuse_cnt[64];
-
- /* Bitmaps of cores the JS needs for jobs ready to run */
- u64 shader_needed_bitmap;
-
- /* Refcount for cores needed */
- u32 shader_needed_cnt[64];
-
- u32 tiler_inuse_cnt;
-
u32 tiler_needed_cnt;
+ u32 shader_needed_cnt;
- /* struct for keeping track of the disjoint information
- *
- * The state is > 0 if the GPU is in a disjoint state. Otherwise 0
- * The count is the number of disjoint events that have occurred on the GPU
- */
struct {
atomic_t count;
atomic_t state;
} disjoint_event;
- /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
u32 l2_users_count;
- /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
- * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
- * submitting new jobs to any cores that are not marked as available.
- *
- * pm.power_change_lock should be held when accessing these members.
- */
u64 shader_available_bitmap;
u64 tiler_available_bitmap;
u64 l2_available_bitmap;
@@ -1075,10 +1479,9 @@ struct kbase_device {
u64 shader_ready_bitmap;
u64 shader_transitioning_bitmap;
- s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
- s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
+ s8 nr_hw_address_spaces;
+ s8 nr_user_address_spaces;
- /* Structure used for instrumentation and HW counters dumping */
struct kbase_hwcnt {
/* The lock should be used when accessing any of the following members */
spinlock_t lock;
@@ -1102,10 +1505,8 @@ struct kbase_device {
struct mutex cacheclean_lock;
- /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
void *platform_context;
- /* List of kbase_contexts created */
struct list_head kctx_list;
struct mutex kctx_list_lock;
@@ -1118,101 +1519,95 @@ struct kbase_device {
u64 current_core_mask;
struct kbase_devfreq_opp *opp_table;
int num_opps;
+ struct kbasep_pm_metrics last_devfreq_metrics;
#ifdef CONFIG_DEVFREQ_THERMAL
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
struct devfreq_cooling_device *devfreq_cooling;
#else
struct thermal_cooling_device *devfreq_cooling;
#endif
- /* Current IPA model - true for configured model, false for fallback */
- atomic_t ipa_use_configured_model;
+ bool ipa_protection_mode_switched;
struct {
/* Access to this struct must be with ipa.lock held */
struct mutex lock;
struct kbase_ipa_model *configured_model;
struct kbase_ipa_model *fallback_model;
+
+ /* Values of the PM utilization metrics from last time the
+ * power model was invoked. The utilization is calculated as
+ * the difference between last_metrics and the current values.
+ */
+ struct kbasep_pm_metrics last_metrics;
+
+ /*
+ * gpu_active_callback - Inform IPA that GPU is now active
+ * @model_data: Pointer to model data
+ */
+ void (*gpu_active_callback)(
+ struct kbase_ipa_model_vinstr_data *model_data);
+
+ /*
+ * gpu_idle_callback - Inform IPA that GPU is now idle
+ * @model_data: Pointer to model data
+ */
+ void (*gpu_idle_callback)(
+ struct kbase_ipa_model_vinstr_data *model_data);
+
+ /* Model data to pass to ipa_gpu_active/idle() */
+ struct kbase_ipa_model_vinstr_data *model_data;
+
+ /* true if IPA is currently using vinstr */
+ bool vinstr_active;
} ipa;
#endif /* CONFIG_DEVFREQ_THERMAL */
#endif /* CONFIG_MALI_DEVFREQ */
-
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- struct kbase_trace_kbdev_timeline timeline;
-#endif
-
- /*
- * Control for enabling job dump on failure, set when control debugfs
- * is opened.
- */
bool job_fault_debug;
#ifdef CONFIG_DEBUG_FS
- /* directory for debugfs entries */
struct dentry *mali_debugfs_directory;
- /* Root directory for per context entry */
struct dentry *debugfs_ctx_directory;
#ifdef CONFIG_MALI_DEBUG
- /* bit for each as, set if there is new data to report */
u64 debugfs_as_read_bitmap;
#endif /* CONFIG_MALI_DEBUG */
- /* failed job dump, used for separate debug process */
wait_queue_head_t job_fault_wq;
wait_queue_head_t job_fault_resume_wq;
struct workqueue_struct *job_fault_resume_workq;
struct list_head job_fault_event_list;
spinlock_t job_fault_event_lock;
- struct kbase_context *kctx_fault;
#if !MALI_CUSTOMER_RELEASE
- /* Per-device data for register dumping interface */
struct {
- u16 reg_offset; /* Offset of a GPU_CONTROL register to be
- dumped upon request */
+ u16 reg_offset;
} regs_dump_debugfs_data;
#endif /* !MALI_CUSTOMER_RELEASE */
#endif /* CONFIG_DEBUG_FS */
- /* fbdump profiling controls set by gator */
u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
#if MALI_CUSTOMER_RELEASE == 0
- /* Number of jobs that are run before a job is forced to fail and
- * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced
- * failures. */
int force_replay_limit;
- /* Count of jobs between forced failures. Incremented on each job. A
- * job is forced to fail once this is greater than or equal to
- * force_replay_limit. */
int force_replay_count;
- /* Core requirement for jobs to be failed and replayed. May be zero. */
base_jd_core_req force_replay_core_req;
- /* true if force_replay_limit should be randomized. The random
- * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
- */
bool force_replay_random;
#endif
- /* Total number of created contexts */
atomic_t ctx_num;
#ifdef CONFIG_DEBUG_FS
- /* Holds the most recent register accesses */
struct kbase_io_history io_history;
#endif /* CONFIG_DEBUG_FS */
struct kbase_hwaccess_data hwaccess;
- /* Count of page/bus faults waiting for workqueues to process */
atomic_t faults_pending;
- /* true if GPU is powered off or power off operation is in progress */
bool poweroff_pending;
- /* defaults for new context created for this device */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
bool infinite_cache_active_default;
#else
@@ -1220,73 +1615,46 @@ struct kbase_device {
#endif
size_t mem_pool_max_size_default;
- /* current gpu coherency mode */
u32 current_gpu_coherency_mode;
- /* system coherency mode */
u32 system_coherency;
- /* Flag to track when cci snoops have been enabled on the interface */
+
bool cci_snoop_enabled;
- /* SMC function IDs to call into Trusted firmware to enable/disable
- * cache snooping. Value of 0 indicates that they are not used
- */
u32 snoop_enable_smc;
u32 snoop_disable_smc;
- /* Protected mode operations */
struct protected_mode_ops *protected_ops;
- /* Protected device attached to this kbase device */
struct protected_mode_device *protected_dev;
- /*
- * true when GPU is put into protected mode
- */
bool protected_mode;
- /*
- * true when GPU is transitioning into or out of protected mode
- */
bool protected_mode_transition;
- /*
- * true if protected mode is supported
- */
bool protected_mode_support;
-
-#ifdef CONFIG_MALI_DEBUG
- wait_queue_head_t driver_inactive_wait;
- bool driver_inactive;
-#endif /* CONFIG_MALI_DEBUG */
-
#ifdef CONFIG_MALI_FPGA_BUS_LOGGER
- /*
- * Bus logger integration.
- */
struct bus_logger_client *buslogger;
#endif
- /* Boolean indicating if an IRQ flush during reset is in progress. */
+
bool irq_reset_flush;
- /* list of inited sub systems. Used during terminate/error recovery */
u32 inited_subsys;
spinlock_t hwaccess_lock;
- /* Protects access to MMU operations */
struct mutex mmu_hw_mutex;
- /* Current serialization mode. See KBASE_SERIALIZE_* for details */
+ /* See KBASE_SERIALIZE_* for details */
u8 serialize_jobs;
#ifdef CONFIG_MALI_JOB_DUMP
- /* Used to backup status of job serialization mode
- * when we use GWT and restore when GWT is disabled.
- * GWT uses full serialization mode.
- */
u8 backup_serialize_jobs;
#endif
+
+ /* See KBASE_JS_*_PRIORITY_MODE for details. */
+ u32 js_ctx_scheduling_mode;
+
};
/**
@@ -1351,6 +1719,18 @@ struct jsctx_queue {
* allocation mechanism. However, the 64-bit user-space client must still
* reserve a JIT region using KBASE_IOCTL_MEM_JIT_INIT
*
+ * @KCTX_PULLED_SINCE_ACTIVE_JS0: Set when the context has had an atom pulled
+ * from it for job slot 0. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS1: Set when the context has had an atom pulled
+ * from it for job slot 1. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
+ * @KCTX_PULLED_SINCE_ACTIVE_JS2: Set when the context has had an atom pulled
+ * from it for job slot 2. This is reset when the context first goes active or
+ * is re-activated on that slot.
+ *
* All members need to be separate bits. This enum is intended for use in a
* bitmask where multiple values get OR-ed together.
*/
@@ -1367,6 +1747,9 @@ enum kbase_context_flags {
KCTX_DYING = 1U << 9,
KCTX_NO_IMPLICIT_SYNC = 1U << 10,
KCTX_FORCE_SAME_VA = 1U << 11,
+ KCTX_PULLED_SINCE_ACTIVE_JS0 = 1U << 12,
+ KCTX_PULLED_SINCE_ACTIVE_JS1 = 1U << 13,
+ KCTX_PULLED_SINCE_ACTIVE_JS2 = 1U << 14,
};
struct kbase_sub_alloc {
@@ -1375,12 +1758,233 @@ struct kbase_sub_alloc {
DECLARE_BITMAP(sub_pages, SZ_2M / SZ_4K);
};
+
+/**
+ * struct kbase_context - Object representing an entity, among which GPU is
+ * scheduled and gets its own GPU address space.
+ * Created when the device file /dev/malixx is opened.
+ * @filp: Pointer to the struct file corresponding to device file
+ * /dev/malixx instance, passed to the file's open method.
+ * @kbdev: Pointer to the Kbase device for which the context is created.
+ * @mmu: Structure holding details of the MMU tables for this
+ * context
+ * @id: Unique indentifier for the context, indicates the number of
+ * contexts which have been created for the device so far.
+ * @api_version: contains the version number for User/kernel interface,
+ * used for compatibility check.
+ * @event_list: list of posted events about completed atoms, to be sent to
+ * event handling thread of Userpsace.
+ * @event_coalesce_list: list containing events corresponding to successive atoms
+ * which have requested deferred delivery of the completion
+ * events to Userspace.
+ * @event_mutex: Lock to protect the concurrent access to @event_list &
+ * @event_mutex.
+ * @event_closed: Flag set through POST_TERM ioctl, indicates that Driver
+ * should stop posting events and also inform event handling
+ * thread that context termination is in progress.
+ * @event_workq: Workqueue for processing work items corresponding to atoms
+ * that do not return an event to Userspace or have to perform
+ * a replay job
+ * @event_count: Count of the posted events to be consumed by Userspace.
+ * @event_coalesce_count: Count of the events present in @event_coalesce_list.
+ * @flags: bitmap of enums from kbase_context_flags, indicating the
+ * state & attributes for the context.
+ * @setup_complete: Indicates if the setup for context has completed, i.e.
+ * flags have been set for the context. Driver allows only
+ * 2 ioctls until the setup is done. Valid only for
+ * @api_version value 0.
+ * @setup_in_progress: Indicates if the context's setup is in progress and other
+ * setup calls during that shall be rejected.
+ * @aliasing_sink_page: Special page used for KBASE_MEM_TYPE_ALIAS allocations,
+ * which can alias number of memory regions. The page is
+ * represent a region where it is mapped with a write-alloc
+ * cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ * @mem_partials_lock: Lock for protecting the operations done on the elements
+ * added to @mem_partials list.
+ * @mem_partials: List head for the list of large pages, 2MB in size, which
+ * which have been split into 4 KB pages and are used
+ * partially for the allocations >= 2 MB in size.
+ * @reg_lock: Lock used for GPU virtual address space management operations,
+ * like adding/freeing a memory region in the address space.
+ * Can be converted to a rwlock ?.
+ * @reg_rbtree_same: RB tree of the memory regions allocated from the SAME_VA
+ * zone of the GPU virtual address space. Used for allocations
+ * having the same value for GPU & CPU virtual address.
+ * @reg_rbtree_custom: RB tree of the memory regions allocated from the CUSTOM_VA
+ * zone of the GPU virtual address space.
+ * @cookies: Bitmask containing of BITS_PER_LONG bits, used mainly for
+ * SAME_VA allocations to defer the reservation of memory region
+ * (from the GPU virtual address space) from base_mem_alloc
+ * ioctl to mmap system call. This helps returning unique
+ * handles, disguised as GPU VA, to Userspace from base_mem_alloc
+ * and later retrieving the pointer to memory region structure
+ * in the mmap handler.
+ * @pending_regions: Array containing pointers to memory region structures,
+ * used in conjunction with @cookies bitmask mainly for
+ * providing a mechansim to have the same value for CPU &
+ * GPU virtual address.
+ * @event_queue: Wait queue used for blocking the thread, which consumes
+ * the base_jd_event corresponding to an atom, when there
+ * are no more posted events.
+ * @tgid: thread group id of the process, whose thread opened the
+ * device file /dev/malixx instance to create a context.
+ * @pid: id of the thread, corresponding to process @tgid, which
+ * actually which opened the device file.
+ * @jctx: object encapsulating all the Job dispatcher related state,
+ * including the array of atoms.
+ * @used_pages: Keeps a track of the number of 4KB physical pages in use
+ * for the context.
+ * @nonmapped_pages: Updated in the same way as @used_pages, except for the case
+ * when special tracking page is freed by userspace where it
+ * is reset to 0.
+ * @permanent_mapped_pages: Usage count of permanently mapped memory
+ * @mem_pool: Object containing the state for the context specific pool of
+ * 4KB size physical pages.
+ * @lp_mem_pool: Object containing the state for the context specific pool of
+ * 2MB size physical pages.
+ * @reclaim: Shrinker object registered with the kernel containing
+ * the pointer to callback function which is invoked under
+ * low memory conditions. In the callback function Driver
+ * frees up the memory for allocations marked as
+ * evictable/reclaimable.
+ * @evict_list: List head for the list containing the allocations which
+ * can be evicted or freed up in the shrinker callback.
+ * @waiting_soft_jobs: List head for the list containing softjob atoms, which
+ * are either waiting for the event set operation, or waiting
+ * for the signaling of input fence or waiting for the GPU
+ * device to powered on so as to dump the CPU/GPU timestamps.
+ * @waiting_soft_jobs_lock: Lock to protect @waiting_soft_jobs list from concurrent
+ * accesses.
+ * @dma_fence: Object containing list head for the list of dma-buf fence
+ * waiting atoms and the waitqueue to process the work item
+ * queued for the atoms blocked on the signaling of dma-buf
+ * fences.
+ * @as_nr: id of the address space being used for the scheduled in
+ * context. This is effectively part of the Run Pool, because
+ * it only has a valid setting (!=KBASEP_AS_NR_INVALID) whilst
+ * the context is scheduled in. The hwaccess_lock must be held
+ * whilst accessing this.
+ * If the context relating to this value of as_nr is required,
+ * then the context must be retained to ensure that it doesn't
+ * disappear whilst it is being used. Alternatively, hwaccess_lock
+ * can be held to ensure the context doesn't disappear (but this
+ * has restrictions on what other locks can be taken simutaneously).
+ * @refcount: Keeps track of the number of users of this context. A user
+ * can be a job that is available for execution, instrumentation
+ * needing to 'pin' a context for counter collection, etc.
+ * If the refcount reaches 0 then this context is considered
+ * inactive and the previously programmed AS might be cleared
+ * at any point.
+ * Generally the reference count is incremented when the context
+ * is scheduled in and an atom is pulled from the context's per
+ * slot runnable tree.
+ * @mm_update_lock: lock used for handling of special tracking page.
+ * @process_mm: Pointer to the memory descriptor of the process which
+ * created the context. Used for accounting the physical
+ * pages used for GPU allocations, done for the context,
+ * to the memory consumed by the process.
+ * @same_va_end: End address of the SAME_VA zone (in 4KB page units)
+ * @timeline: Object tracking the number of atoms currently in flight for
+ * the context and thread group id of the process, i.e. @tgid.
+ * @mem_profile_data: Buffer containing the profiling information provided by
+ * Userspace, can be read through the mem_profile debugfs file.
+ * @mem_profile_size: Size of the @mem_profile_data.
+ * @mem_profile_lock: Lock to serialize the operations related to mem_profile
+ * debugfs file.
+ * @kctx_dentry: Pointer to the debugfs directory created for every context,
+ * inside kbase_device::debugfs_ctx_directory, containing
+ * context specific files.
+ * @reg_dump: Buffer containing a register offset & value pair, used
+ * for dumping job fault debug info.
+ * @job_fault_count: Indicates that a job fault occurred for the context and
+ * dumping of its debug info is in progress.
+ * @job_fault_resume_event_list: List containing atoms completed after the faulty
+ * atom but before the debug data for faulty atom was dumped.
+ * @jsctx_queue: Per slot & priority arrays of object containing the root
+ * of RB-tree holding currently runnable atoms on the job slot
+ * and the head item of the linked list of atoms blocked on
+ * cross-slot dependencies.
+ * @atoms_pulled: Total number of atoms currently pulled from the context.
+ * @atoms_pulled_slot: Per slot count of the number of atoms currently pulled
+ * from the context.
+ * @atoms_pulled_slot_pri: Per slot & priority count of the number of atoms currently
+ * pulled from the context. hwaccess_lock shall be held when
+ * accessing it.
+ * @blocked_js: Indicates if the context is blocked from submitting atoms
+ * on a slot at a given priority. This is set to true, when
+ * the atom corresponding to context is soft/hard stopped or
+ * removed from the HEAD_NEXT register in response to
+ * soft/hard stop.
+ * @slots_pullable: Bitmask of slots, indicating the slots for which the
+ * context has pullable atoms in the runnable tree.
+ * @work: Work structure used for deferred ASID assignment.
+ * @vinstr_cli: Pointer to the legacy userspace vinstr client, there can
+ * be only such client per kbase context.
+ * @vinstr_cli_lock: Lock used for the vinstr ioctl calls made for @vinstr_cli.
+ * @completed_jobs: List containing completed atoms for which base_jd_event is
+ * to be posted.
+ * @work_count: Number of work items, corresponding to atoms, currently
+ * pending on job_done workqueue of @jctx.
+ * @soft_job_timeout: Timer object used for failing/cancelling the waiting
+ * soft-jobs which have been blocked for more than the
+ * timeout value used for the soft-jobs
+ * @jit_alloc: Array of 256 pointers to GPU memory regions, used for
+ * for JIT allocations.
+ * @jit_max_allocations: Maximum number of JIT allocations allowed at once.
+ * @jit_current_allocations: Current number of in-flight JIT allocations.
+ * @jit_current_allocations_per_bin: Current number of in-flight JIT allocations per bin
+ * @jit_version: version number indicating whether userspace is using
+ * old or new version of interface for JIT allocations
+ * 1 -> client used KBASE_IOCTL_MEM_JIT_INIT_OLD
+ * 2 -> client used KBASE_IOCTL_MEM_JIT_INIT
+ * @jit_active_head: List containing the JIT allocations which are in use.
+ * @jit_pool_head: List containing the JIT allocations which have been
+ * freed up by userpsace and so not being used by them.
+ * Driver caches them to quickly fulfill requests for new
+ * JIT allocations. They are released in case of memory
+ * pressure as they are put on the @evict_list when they
+ * are freed up by userspace.
+ * @jit_destroy_head: List containing the JIT allocations which were moved to it
+ * from @jit_pool_head, in the shrinker callback, after freeing
+ * their backing physical pages.
+ * @jit_evict_lock: Lock used for operations done on JIT allocations and also
+ * for accessing @evict_list.
+ * @jit_work: Work item queued to defer the freeing of memory region when
+ * JIT allocation is moved to @jit_destroy_head.
+ * @jit_atoms_head: A list of the JIT soft-jobs, both alloc & free, in submission
+ * order, protected by kbase_jd_context.lock.
+ * @jit_pending_alloc: A list of JIT alloc soft-jobs for which allocation will be
+ * reattempted after the impending free of other active JIT
+ * allocations.
+ * @ext_res_meta_head: A list of sticky external resources which were requested to
+ * be mapped on GPU side, through a softjob atom of type
+ * EXT_RES_MAP or STICKY_RESOURCE_MAP ioctl.
+ * @drain_pending: Used to record that a flush/invalidate of the GPU caches was
+ * requested from atomic context, so that the next flush request
+ * can wait for the flush of GPU writes.
+ * @age_count: Counter incremented on every call to jd_submit_atom,
+ * atom is assigned the snapshot of this counter, which
+ * is used to determine the atom's age when it is added to
+ * the runnable RB-tree.
+ * @trim_level: Level of JIT allocation trimming to perform on free (0-100%)
+ * @gwt_enabled: Indicates if tracking of GPU writes is enabled, protected by
+ * kbase_context.reg_lock.
+ * @gwt_was_enabled: Simple sticky bit flag to know if GWT was ever enabled.
+ * @gwt_current_list: A list of addresses for which GPU has generated write faults,
+ * after the last snapshot of it was sent to userspace.
+ * @gwt_snapshot_list: Snapshot of the @gwt_current_list for sending to user space.
+ * @priority: Indicates the context priority. Used along with @atoms_count
+ * for context scheduling, protected by hwaccess_lock.
+ * @atoms_count: Number of gpu atoms currently in use, per priority
+ */
struct kbase_context {
struct file *filp;
struct kbase_device *kbdev;
- u32 id; /* System wide unique id */
+ struct kbase_mmu_table mmu;
+
+ u32 id;
unsigned long api_version;
- phys_addr_t pgd;
struct list_head event_list;
struct list_head event_coalesce_list;
struct mutex event_mutex;
@@ -1394,21 +1998,15 @@ struct kbase_context {
atomic_t setup_complete;
atomic_t setup_in_progress;
- u64 *mmu_teardown_pages;
-
struct tagged_addr aliasing_sink_page;
- struct mutex mem_partials_lock;
+ spinlock_t mem_partials_lock;
struct list_head mem_partials;
- struct mutex mmu_lock;
- struct mutex reg_lock; /* To be converted to a rwlock? */
- struct rb_root reg_rbtree_same; /* RB tree of GPU (live) regions,
- * SAME_VA zone */
- struct rb_root reg_rbtree_exec; /* RB tree of GPU (live) regions,
- * EXEC zone */
- struct rb_root reg_rbtree_custom; /* RB tree of GPU (live) regions,
- * CUSTOM_VA zone */
+ struct mutex reg_lock;
+ struct rb_root reg_rbtree_same;
+ struct rb_root reg_rbtree_custom;
+
unsigned long cookies;
struct kbase_va_region *pending_regions[BITS_PER_LONG];
@@ -1420,6 +2018,7 @@ struct kbase_context {
struct kbase_jd_context jctx;
atomic_t used_pages;
atomic_t nonmapped_pages;
+ unsigned long permanent_mapped_pages;
struct kbase_mem_pool mem_pool;
struct kbase_mem_pool lp_mem_pool;
@@ -1435,26 +2034,12 @@ struct kbase_context {
struct workqueue_struct *wq;
} dma_fence;
#endif /* CONFIG_MALI_DMA_FENCE */
- /** This is effectively part of the Run Pool, because it only has a valid
- * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
- *
- * The hwaccess_lock must be held whilst accessing this.
- *
- * If the context relating to this as_nr is required, you must use
- * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
- * whilst you're using it. Alternatively, just hold the hwaccess_lock
- * to ensure the context doesn't disappear (but this has restrictions on what other locks
- * you can take whilst doing this) */
+
int as_nr;
- /* Keeps track of the number of users of this context. A user can be a
- * job that is available for execution, instrumentation needing to 'pin'
- * a context for counter collection, etc. If the refcount reaches 0 then
- * this context is considered inactive and the previously programmed
- * AS might be cleared at any point.
- */
atomic_t refcount;
+
/* NOTE:
*
* Flags are in jctx.sched_info.ctx.flags
@@ -1462,29 +2047,17 @@ struct kbase_context {
*
* All other flags must be added there */
spinlock_t mm_update_lock;
- struct mm_struct *process_mm;
- /* End of the SAME_VA zone */
+ struct mm_struct __rcu *process_mm;
u64 same_va_end;
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- struct kbase_trace_kctx_timeline timeline;
-#endif
#ifdef CONFIG_DEBUG_FS
- /* Content of mem_profile file */
char *mem_profile_data;
- /* Size of @c mem_profile_data */
size_t mem_profile_size;
- /* Mutex guarding memory profile state */
struct mutex mem_profile_lock;
- /* Memory profile directory under debugfs */
struct dentry *kctx_dentry;
- /* for job fault debug */
unsigned int *reg_dump;
atomic_t job_fault_count;
- /* This list will keep the following atoms during the dump
- * in the same context
- */
struct list_head job_fault_resume_event_list;
#endif /* CONFIG_DEBUG_FS */
@@ -1492,86 +2065,59 @@ struct kbase_context {
struct jsctx_queue jsctx_queue
[KBASE_JS_ATOM_SCHED_PRIO_COUNT][BASE_JM_MAX_NR_SLOTS];
- /* Number of atoms currently pulled from this context */
atomic_t atoms_pulled;
- /* Number of atoms currently pulled from this context, per slot */
atomic_t atoms_pulled_slot[BASE_JM_MAX_NR_SLOTS];
- /* Number of atoms currently pulled from this context, per slot and
- * priority. Hold hwaccess_lock when accessing */
int atoms_pulled_slot_pri[BASE_JM_MAX_NR_SLOTS][
KBASE_JS_ATOM_SCHED_PRIO_COUNT];
- /* true if slot is blocked on the given priority. This will be set on a
- * soft-stop */
bool blocked_js[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
- /* Bitmask of slots that can be pulled from */
u32 slots_pullable;
- /* Backend specific data */
- struct kbase_context_backend backend;
-
- /* Work structure used for deferred ASID assignment */
struct work_struct work;
- /* Only one userspace vinstr client per kbase context */
struct kbase_vinstr_client *vinstr_cli;
struct mutex vinstr_cli_lock;
- /* List of completed jobs waiting for events to be posted */
struct list_head completed_jobs;
- /* Number of work items currently pending on job_done_wq */
atomic_t work_count;
- /* Waiting soft-jobs will fail when this timer expires */
struct timer_list soft_job_timeout;
- /* JIT allocation management */
struct kbase_va_region *jit_alloc[256];
+ u8 jit_max_allocations;
+ u8 jit_current_allocations;
+ u8 jit_current_allocations_per_bin[256];
+ u8 jit_version;
struct list_head jit_active_head;
struct list_head jit_pool_head;
struct list_head jit_destroy_head;
struct mutex jit_evict_lock;
struct work_struct jit_work;
- /* A list of the JIT soft-jobs in submission order
- * (protected by kbase_jd_context.lock)
- */
struct list_head jit_atoms_head;
- /* A list of pending JIT alloc soft-jobs (using the 'queue' list_head)
- * (protected by kbase_jd_context.lock)
- */
struct list_head jit_pending_alloc;
- /* External sticky resource management */
struct list_head ext_res_meta_head;
- /* Used to record that a drain was requested from atomic context */
atomic_t drain_pending;
- /* Current age count, used to determine age for newly submitted atoms */
u32 age_count;
+ u8 trim_level;
+
#ifdef CONFIG_MALI_JOB_DUMP
- /* Used for tracking GPU writes.
- * (protected by kbase_context.reg_lock)
- */
bool gwt_enabled;
- /* Simple sticky bit flag to know if GWT was ever enabled
- * (protected by kbase_context.reg_lock)
- */
bool gwt_was_enabled;
- /* Current list of GPU writes.
- * (protected by kbase_context.reg_lock)
- */
struct list_head gwt_current_list;
- /* Snapshot of list of GPU writes for sending to user space. */
struct list_head gwt_snapshot_list;
-
#endif
+
+ int priority;
+ s16 atoms_count[KBASE_JS_ATOM_SCHED_PRIO_COUNT];
};
#ifdef CONFIG_MALI_JOB_DUMP
@@ -1579,17 +2125,17 @@ struct kbase_context {
* struct kbasep_gwt_list_element - Structure used to collect GPU
* write faults.
* @link: List head for adding write faults.
- * @handle: The handle for the modified region.
- * @offset: The offset in pages of the modified
- * part of the region.
+ * @region: Details of the region where we have the
+ * faulting page address.
+ * @page_addr: Page address where GPU write fault occurred.
* @num_pages: The number of pages modified.
*
* Using this structure all GPU write faults are stored in a list.
*/
struct kbasep_gwt_list_element {
struct list_head link;
- u64 handle;
- u64 offset;
+ struct kbase_va_region *region;
+ u64 page_addr;
u64 num_pages;
};
diff --git a/drivers/gpu/arm/midgard/mali_kbase_device.c b/drivers/gpu/arm/midgard/mali_kbase_device.c
index d2940424fdeb4b..804cf3feab236e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_device.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_device.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -225,14 +225,6 @@ int kbase_device_init(struct kbase_device * const kbdev)
mutex_init(&kbdev->cacheclean_lock);
-#ifdef CONFIG_MALI_TRACE_TIMELINE
- for (i = 0; i < BASE_JM_MAX_NR_SLOTS; ++i)
- kbdev->timeline.slot_atoms_submitted[i] = 0;
-
- for (i = 0; i <= KBASEP_TIMELINE_PM_EVENT_LAST; ++i)
- atomic_set(&kbdev->timeline.pm_event_uid[i], 0);
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
-
/* fbdump profiling controls set to 0 - fbdump not enabled until changed by gator */
for (i = 0; i < FBDUMP_CONTROL_MAX; i++)
kbdev->kbase_profiling_controls[i] = 0;
@@ -254,10 +246,6 @@ int kbase_device_init(struct kbase_device * const kbdev)
else
kbdev->mmu_mode = kbase_mmu_mode_get_lpae();
-#ifdef CONFIG_MALI_DEBUG
- init_waitqueue_head(&kbdev->driver_inactive_wait);
-#endif /* CONFIG_MALI_DEBUG */
-
return 0;
term_trace:
kbasep_trace_term(kbdev);
@@ -289,91 +277,6 @@ void kbase_device_free(struct kbase_device *kbdev)
kfree(kbdev);
}
-int kbase_device_trace_buffer_install(
- struct kbase_context *kctx, u32 *tb, size_t size)
-{
- unsigned long flags;
-
- KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(tb);
-
- /* Interface uses 16-bit value to track last accessed entry. Each entry
- * is composed of two 32-bit words.
- * This limits the size that can be handled without an overflow. */
- if (0xFFFF * (2 * sizeof(u32)) < size)
- return -EINVAL;
-
- /* set up the header */
- /* magic number in the first 4 bytes */
- tb[0] = TRACE_BUFFER_HEADER_SPECIAL;
- /* Store (write offset = 0, wrap counter = 0, transaction active = no)
- * write offset 0 means never written.
- * Offsets 1 to (wrap_offset - 1) used to store values when trace started
- */
- tb[1] = 0;
-
- /* install trace buffer */
- spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
- kctx->jctx.tb_wrap_offset = size / 8;
- kctx->jctx.tb = tb;
- spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
-
- return 0;
-}
-
-void kbase_device_trace_buffer_uninstall(struct kbase_context *kctx)
-{
- unsigned long flags;
-
- KBASE_DEBUG_ASSERT(kctx);
- spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
- kctx->jctx.tb = NULL;
- kctx->jctx.tb_wrap_offset = 0;
- spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
-}
-
-void kbase_device_trace_register_access(struct kbase_context *kctx, enum kbase_reg_access_type type, u16 reg_offset, u32 reg_value)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&kctx->jctx.tb_lock, flags);
- if (kctx->jctx.tb) {
- u16 wrap_count;
- u16 write_offset;
- u32 *tb = kctx->jctx.tb;
- u32 header_word;
-
- header_word = tb[1];
- KBASE_DEBUG_ASSERT(0 == (header_word & 0x1));
-
- wrap_count = (header_word >> 1) & 0x7FFF;
- write_offset = (header_word >> 16) & 0xFFFF;
-
- /* mark as transaction in progress */
- tb[1] |= 0x1;
- mb();
-
- /* calculate new offset */
- write_offset++;
- if (write_offset == kctx->jctx.tb_wrap_offset) {
- /* wrap */
- write_offset = 1;
- wrap_count++;
- wrap_count &= 0x7FFF; /* 15bit wrap counter */
- }
-
- /* store the trace entry at the selected offset */
- tb[write_offset * 2 + 0] = (reg_offset & ~0x3) | ((type == REG_WRITE) ? 0x1 : 0x0);
- tb[write_offset * 2 + 1] = reg_value;
- mb();
-
- /* new header word */
- header_word = (write_offset << 16) | (wrap_count << 1) | 0x0; /* transaction complete */
- tb[1] = header_word;
- }
- spin_unlock_irqrestore(&kctx->jctx.tb_lock, flags);
-}
-
/*
* Device trace functions
*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_event.c b/drivers/gpu/arm/midgard/mali_kbase_event.c
index e290fceea6cd17..3c9cef36413468 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_event.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_event.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2016,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -38,8 +38,6 @@ static struct base_jd_udata kbase_event_process(struct kbase_context *kctx, stru
data = katom->udata;
- KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_sub_return(1, &kctx->timeline.jd_atoms_in_flight));
-
KBASE_TLSTREAM_TL_NRET_ATOM_CTX(katom, kctx);
KBASE_TLSTREAM_TL_DEL_ATOM(katom);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence.h b/drivers/gpu/arm/midgard/mali_kbase_fence.h
index b993bb6c4c9ef3..f68bdd3912423b 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_fence.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
index a9678ce74eeb5e..800a5f9a3da6f4 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_fence_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -44,7 +44,12 @@
#define dma_fence_is_signaled(a) fence_is_signaled(a)
#define dma_fence_add_callback(a, b, c) fence_add_callback(a, b, c)
#define dma_fence_remove_callback(a, b) fence_remove_callback(a, b)
+
+#if (KERNEL_VERSION(4, 9, 68) <= LINUX_VERSION_CODE)
+#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->error ?: 1 : 0)
+#else
#define dma_fence_get_status(a) (fence_is_signaled(a) ? (a)->status ?: 1 : 0)
+#endif
#else
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
index 2fa68067050a98..7077c3a8732287 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_api.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -73,10 +73,26 @@ const char * const *kbase_gator_hwcnt_init_names(uint32_t *total_counters)
hardware_counters = hardware_counters_mali_tSIx;
count = ARRAY_SIZE(hardware_counters_mali_tSIx);
break;
+ case GPU_ID2_PRODUCT_TDVX:
+ hardware_counters = hardware_counters_mali_tSIx;
+ count = ARRAY_SIZE(hardware_counters_mali_tSIx);
+ break;
case GPU_ID2_PRODUCT_TNOX:
hardware_counters = hardware_counters_mali_tNOx;
count = ARRAY_SIZE(hardware_counters_mali_tNOx);
break;
+ case GPU_ID2_PRODUCT_TGOX:
+ hardware_counters = hardware_counters_mali_tGOx;
+ count = ARRAY_SIZE(hardware_counters_mali_tGOx);
+ break;
+ case GPU_ID2_PRODUCT_TKAX:
+ hardware_counters = hardware_counters_mali_tKAx;
+ count = ARRAY_SIZE(hardware_counters_mali_tKAx);
+ break;
+ case GPU_ID2_PRODUCT_TTRX:
+ hardware_counters = hardware_counters_mali_tTRx;
+ count = ARRAY_SIZE(hardware_counters_mali_tTRx);
+ break;
default:
hardware_counters = NULL;
count = 0;
@@ -158,7 +174,7 @@ KBASE_EXPORT_SYMBOL(kbase_gator_hwcnt_term_names);
struct kbase_gator_hwcnt_handles *kbase_gator_hwcnt_init(struct kbase_gator_hwcnt_info *in_out_info)
{
struct kbase_gator_hwcnt_handles *hand;
- struct kbase_uk_hwcnt_reader_setup setup;
+ struct kbase_ioctl_hwcnt_reader_setup setup;
uint32_t dump_size = 0, i = 0;
if (!in_out_info)
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
index b048db8bf834a1..5d38c7b735531b 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names.h
@@ -2169,6 +2169,8 @@ static const char * const hardware_counters_mali_t88x[] = {
#include "mali_kbase_gator_hwcnt_names_tnox.h"
+#include "mali_kbase_gator_hwcnt_names_tgox.h"
+
#include "mali_kbase_gator_hwcnt_names_tkax.h"
#include "mali_kbase_gator_hwcnt_names_ttrx.h"
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
new file mode 100644
index 00000000000000..72b5266622a9d2
--- /dev/null
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tgox.h
@@ -0,0 +1,296 @@
+/*
+ *
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ */
+
+/*
+ * This header was autogenerated, it should not be edited.
+ */
+
+#ifndef _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+#define _KBASE_GATOR_HWCNT_NAMES_TGOX_H_
+
+static const char * const hardware_counters_mali_tGOx[] = {
+ /* Performance counters for the Job Manager */
+ "",
+ "",
+ "",
+ "",
+ "TGOx_MESSAGES_SENT",
+ "TGOx_MESSAGES_RECEIVED",
+ "TGOx_GPU_ACTIVE",
+ "TGOx_IRQ_ACTIVE",
+ "TGOx_JS0_JOBS",
+ "TGOx_JS0_TASKS",
+ "TGOx_JS0_ACTIVE",
+ "",
+ "TGOx_JS0_WAIT_READ",
+ "TGOx_JS0_WAIT_ISSUE",
+ "TGOx_JS0_WAIT_DEPEND",
+ "TGOx_JS0_WAIT_FINISH",
+ "TGOx_JS1_JOBS",
+ "TGOx_JS1_TASKS",
+ "TGOx_JS1_ACTIVE",
+ "",
+ "TGOx_JS1_WAIT_READ",
+ "TGOx_JS1_WAIT_ISSUE",
+ "TGOx_JS1_WAIT_DEPEND",
+ "TGOx_JS1_WAIT_FINISH",
+ "TGOx_JS2_JOBS",
+ "TGOx_JS2_TASKS",
+ "TGOx_JS2_ACTIVE",
+ "",
+ "TGOx_JS2_WAIT_READ",
+ "TGOx_JS2_WAIT_ISSUE",
+ "TGOx_JS2_WAIT_DEPEND",
+ "TGOx_JS2_WAIT_FINISH",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+
+ /* Performance counters for the Tiler */
+ "",
+ "",
+ "",
+ "",
+ "TGOx_TILER_ACTIVE",
+ "TGOx_JOBS_PROCESSED",
+ "TGOx_TRIANGLES",
+ "TGOx_LINES",
+ "TGOx_POINTS",
+ "TGOx_FRONT_FACING",
+ "TGOx_BACK_FACING",
+ "TGOx_PRIM_VISIBLE",
+ "TGOx_PRIM_CULLED",
+ "TGOx_PRIM_CLIPPED",
+ "TGOx_PRIM_SAT_CULLED",
+ "TGOx_BIN_ALLOC_INIT",
+ "TGOx_BIN_ALLOC_OVERFLOW",
+ "TGOx_BUS_READ",
+ "",
+ "TGOx_BUS_WRITE",
+ "TGOx_LOADING_DESC",
+ "TGOx_IDVS_POS_SHAD_REQ",
+ "TGOx_IDVS_POS_SHAD_WAIT",
+ "TGOx_IDVS_POS_SHAD_STALL",
+ "TGOx_IDVS_POS_FIFO_FULL",
+ "TGOx_PREFETCH_STALL",
+ "TGOx_VCACHE_HIT",
+ "TGOx_VCACHE_MISS",
+ "TGOx_VCACHE_LINE_WAIT",
+ "TGOx_VFETCH_POS_READ_WAIT",
+ "TGOx_VFETCH_VERTEX_WAIT",
+ "TGOx_VFETCH_STALL",
+ "TGOx_PRIMASSY_STALL",
+ "TGOx_BBOX_GEN_STALL",
+ "TGOx_IDVS_VBU_HIT",
+ "TGOx_IDVS_VBU_MISS",
+ "TGOx_IDVS_VBU_LINE_DEALLOCATE",
+ "TGOx_IDVS_VAR_SHAD_REQ",
+ "TGOx_IDVS_VAR_SHAD_STALL",
+ "TGOx_BINNER_STALL",
+ "TGOx_ITER_STALL",
+ "TGOx_COMPRESS_MISS",
+ "TGOx_COMPRESS_STALL",
+ "TGOx_PCACHE_HIT",
+ "TGOx_PCACHE_MISS",
+ "TGOx_PCACHE_MISS_STALL",
+ "TGOx_PCACHE_EVICT_STALL",
+ "TGOx_PMGR_PTR_WR_STALL",
+ "TGOx_PMGR_PTR_RD_STALL",
+ "TGOx_PMGR_CMD_WR_STALL",
+ "TGOx_WRBUF_ACTIVE",
+ "TGOx_WRBUF_HIT",
+ "TGOx_WRBUF_MISS",
+ "TGOx_WRBUF_NO_FREE_LINE_STALL",
+ "TGOx_WRBUF_NO_AXI_ID_STALL",
+ "TGOx_WRBUF_AXI_STALL",
+ "",
+ "",
+ "",
+ "TGOx_UTLB_TRANS",
+ "TGOx_UTLB_TRANS_HIT",
+ "TGOx_UTLB_TRANS_STALL",
+ "TGOx_UTLB_TRANS_MISS_DELAY",
+ "TGOx_UTLB_MMU_REQ",
+
+ /* Performance counters for the Shader Core */
+ "",
+ "",
+ "",
+ "",
+ "TGOx_FRAG_ACTIVE",
+ "TGOx_FRAG_PRIMITIVES",
+ "TGOx_FRAG_PRIM_RAST",
+ "TGOx_FRAG_FPK_ACTIVE",
+ "TGOx_FRAG_STARVING",
+ "TGOx_FRAG_WARPS",
+ "TGOx_FRAG_PARTIAL_WARPS",
+ "TGOx_FRAG_QUADS_RAST",
+ "TGOx_FRAG_QUADS_EZS_TEST",
+ "TGOx_FRAG_QUADS_EZS_UPDATE",
+ "TGOx_FRAG_QUADS_EZS_KILL",
+ "TGOx_FRAG_LZS_TEST",
+ "TGOx_FRAG_LZS_KILL",
+ "TGOx_WARP_REG_SIZE_64",
+ "TGOx_FRAG_PTILES",
+ "TGOx_FRAG_TRANS_ELIM",
+ "TGOx_QUAD_FPK_KILLER",
+ "TGOx_FULL_QUAD_WARPS",
+ "TGOx_COMPUTE_ACTIVE",
+ "TGOx_COMPUTE_TASKS",
+ "TGOx_COMPUTE_WARPS",
+ "TGOx_COMPUTE_STARVING",
+ "TGOx_EXEC_CORE_ACTIVE",
+ "TGOx_EXEC_ACTIVE",
+ "TGOx_EXEC_INSTR_COUNT",
+ "TGOx_EXEC_INSTR_DIVERGED",
+ "TGOx_EXEC_INSTR_STARVING",
+ "TGOx_ARITH_INSTR_SINGLE_FMA",
+ "TGOx_ARITH_INSTR_DOUBLE",
+ "TGOx_ARITH_INSTR_MSG",
+ "TGOx_ARITH_INSTR_MSG_ONLY",
+ "TGOx_TEX_MSGI_NUM_QUADS",
+ "TGOx_TEX_DFCH_NUM_PASSES",
+ "TGOx_TEX_DFCH_NUM_PASSES_MISS",
+ "TGOx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+ "TGOx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+ "TGOx_TEX_TFCH_NUM_LINES_FETCHED",
+ "TGOx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+ "TGOx_TEX_TFCH_NUM_OPERATIONS",
+ "TGOx_TEX_FILT_NUM_OPERATIONS",
+ "TGOx_LS_MEM_READ_FULL",
+ "TGOx_LS_MEM_READ_SHORT",
+ "TGOx_LS_MEM_WRITE_FULL",
+ "TGOx_LS_MEM_WRITE_SHORT",
+ "TGOx_LS_MEM_ATOMIC",
+ "TGOx_VARY_INSTR",
+ "TGOx_VARY_SLOT_32",
+ "TGOx_VARY_SLOT_16",
+ "TGOx_ATTR_INSTR",
+ "TGOx_ARITH_INSTR_FP_MUL",
+ "TGOx_BEATS_RD_FTC",
+ "TGOx_BEATS_RD_FTC_EXT",
+ "TGOx_BEATS_RD_LSC",
+ "TGOx_BEATS_RD_LSC_EXT",
+ "TGOx_BEATS_RD_TEX",
+ "TGOx_BEATS_RD_TEX_EXT",
+ "TGOx_BEATS_RD_OTHER",
+ "TGOx_BEATS_WR_LSC_WB",
+ "TGOx_BEATS_WR_TIB",
+ "TGOx_BEATS_WR_LSC_OTHER",
+
+ /* Performance counters for the Memory System */
+ "",
+ "",
+ "",
+ "",
+ "TGOx_MMU_REQUESTS",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "TGOx_L2_RD_MSG_IN",
+ "TGOx_L2_RD_MSG_IN_STALL",
+ "TGOx_L2_WR_MSG_IN",
+ "TGOx_L2_WR_MSG_IN_STALL",
+ "TGOx_L2_SNP_MSG_IN",
+ "TGOx_L2_SNP_MSG_IN_STALL",
+ "TGOx_L2_RD_MSG_OUT",
+ "TGOx_L2_RD_MSG_OUT_STALL",
+ "TGOx_L2_WR_MSG_OUT",
+ "TGOx_L2_ANY_LOOKUP",
+ "TGOx_L2_READ_LOOKUP",
+ "TGOx_L2_WRITE_LOOKUP",
+ "TGOx_L2_EXT_SNOOP_LOOKUP",
+ "TGOx_L2_EXT_READ",
+ "TGOx_L2_EXT_READ_NOSNP",
+ "TGOx_L2_EXT_READ_UNIQUE",
+ "TGOx_L2_EXT_READ_BEATS",
+ "TGOx_L2_EXT_AR_STALL",
+ "TGOx_L2_EXT_AR_CNT_Q1",
+ "TGOx_L2_EXT_AR_CNT_Q2",
+ "TGOx_L2_EXT_AR_CNT_Q3",
+ "TGOx_L2_EXT_RRESP_0_127",
+ "TGOx_L2_EXT_RRESP_128_191",
+ "TGOx_L2_EXT_RRESP_192_255",
+ "TGOx_L2_EXT_RRESP_256_319",
+ "TGOx_L2_EXT_RRESP_320_383",
+ "TGOx_L2_EXT_WRITE",
+ "TGOx_L2_EXT_WRITE_NOSNP_FULL",
+ "TGOx_L2_EXT_WRITE_NOSNP_PTL",
+ "TGOx_L2_EXT_WRITE_SNP_FULL",
+ "TGOx_L2_EXT_WRITE_SNP_PTL",
+ "TGOx_L2_EXT_WRITE_BEATS",
+ "TGOx_L2_EXT_W_STALL",
+ "TGOx_L2_EXT_AW_CNT_Q1",
+ "TGOx_L2_EXT_AW_CNT_Q2",
+ "TGOx_L2_EXT_AW_CNT_Q3",
+ "TGOx_L2_EXT_SNOOP",
+ "TGOx_L2_EXT_SNOOP_STALL",
+ "TGOx_L2_EXT_SNOOP_RESP_CLEAN",
+ "TGOx_L2_EXT_SNOOP_RESP_DATA",
+ "TGOx_L2_EXT_SNOOP_INTERNAL",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+};
+
+#endif /* _KBASE_GATOR_HWCNT_NAMES_TGOX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
index af00a6acb09b4f..e24e91ab1ca4cb 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_thex.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
index 1c1f6693bfb5d7..73db45c232f17e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tkax.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -222,9 +222,9 @@ static const char * const hardware_counters_mali_tKAx[] = {
"TKAx_BEATS_RD_TEX",
"TKAx_BEATS_RD_TEX_EXT",
"TKAx_BEATS_RD_OTHER",
- "TKAx_BEATS_WR_LSC_WB",
- "TKAx_BEATS_WR_TIB",
"TKAx_BEATS_WR_LSC_OTHER",
+ "TKAx_BEATS_WR_TIB",
+ "TKAx_BEATS_WR_LSC_WB",
/* Performance counters for the Memory System */
"",
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
index 233ffbec416eb9..63eac50e0cc723 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tmix.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
index fbb5080f6779a7..932663cfb6a958 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tnox.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -222,9 +222,9 @@ static const char * const hardware_counters_mali_tNOx[] = {
"TNOx_BEATS_RD_TEX",
"TNOx_BEATS_RD_TEX_EXT",
"TNOx_BEATS_RD_OTHER",
- "TNOx_BEATS_WR_LSC_WB",
- "TNOx_BEATS_WR_TIB",
"TNOx_BEATS_WR_LSC_OTHER",
+ "TNOx_BEATS_WR_TIB",
+ "TNOx_BEATS_WR_LSC_WB",
/* Performance counters for the Memory System */
"",
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
index 552db5732239e0..b8dde32bc529eb 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_tsix.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -222,9 +222,9 @@ static const char * const hardware_counters_mali_tSIx[] = {
"TSIx_BEATS_RD_TEX",
"TSIx_BEATS_RD_TEX_EXT",
"TSIx_BEATS_RD_OTHER",
- "TSIx_BEATS_WR_LSC",
+ "TSIx_BEATS_WR_LSC_OTHER",
"TSIx_BEATS_WR_TIB",
- "",
+ "TSIx_BEATS_WR_LSC_WB",
/* Performance counters for the Memory System */
"",
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
index d1bb02a72fc3ca..c1e315b0f534c6 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gator_hwcnt_names_ttrx.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -116,10 +116,10 @@ static const char * const hardware_counters_mali_tTRx[] = {
"",
"TTRx_BUS_WRITE",
"TTRx_LOADING_DESC",
- "",
- "",
- "",
- "",
+ "TTRx_IDVS_POS_SHAD_REQ",
+ "TTRx_IDVS_POS_SHAD_WAIT",
+ "TTRx_IDVS_POS_SHAD_STALL",
+ "TTRx_IDVS_POS_FIFO_FULL",
"TTRx_PREFETCH_STALL",
"TTRx_VCACHE_HIT",
"TTRx_VCACHE_MISS",
@@ -129,11 +129,11 @@ static const char * const hardware_counters_mali_tTRx[] = {
"TTRx_VFETCH_STALL",
"TTRx_PRIMASSY_STALL",
"TTRx_BBOX_GEN_STALL",
- "",
- "",
- "",
- "",
- "",
+ "TTRx_IDVS_VBU_HIT",
+ "TTRx_IDVS_VBU_MISS",
+ "TTRx_IDVS_VBU_LINE_DEALLOCATE",
+ "TTRx_IDVS_VAR_SHAD_REQ",
+ "TTRx_IDVS_VAR_SHAD_STALL",
"TTRx_BINNER_STALL",
"TTRx_ITER_STALL",
"TTRx_COMPRESS_MISS",
@@ -178,33 +178,33 @@ static const char * const hardware_counters_mali_tTRx[] = {
"TTRx_FRAG_QUADS_EZS_KILL",
"TTRx_FRAG_LZS_TEST",
"TTRx_FRAG_LZS_KILL",
- "",
+ "TTRx_WARP_REG_SIZE_64",
"TTRx_FRAG_PTILES",
"TTRx_FRAG_TRANS_ELIM",
"TTRx_QUAD_FPK_KILLER",
- "",
+ "TTRx_FULL_QUAD_WARPS",
"TTRx_COMPUTE_ACTIVE",
"TTRx_COMPUTE_TASKS",
"TTRx_COMPUTE_WARPS",
"TTRx_COMPUTE_STARVING",
"TTRx_EXEC_CORE_ACTIVE",
- "TTRx_EXEC_ACTIVE",
- "TTRx_EXEC_INSTR_COUNT",
+ "TTRx_EXEC_INSTR_FMA",
+ "TTRx_EXEC_INSTR_CVT",
+ "TTRx_EXEC_INSTR_SFU",
+ "TTRx_EXEC_INSTR_MSG",
"TTRx_EXEC_INSTR_DIVERGED",
- "TTRx_EXEC_INSTR_STARVING",
- "TTRx_ARITH_INSTR_SINGLE_FMA",
- "TTRx_ARITH_INSTR_DOUBLE",
- "TTRx_ARITH_INSTR_MSG",
- "TTRx_ARITH_INSTR_MSG_ONLY",
- "TTRx_TEX_INSTR",
- "TTRx_TEX_INSTR_MIPMAP",
- "TTRx_TEX_INSTR_COMPRESSED",
- "TTRx_TEX_INSTR_3D",
- "TTRx_TEX_INSTR_TRILINEAR",
- "TTRx_TEX_COORD_ISSUE",
- "TTRx_TEX_COORD_STALL",
- "TTRx_TEX_STARVE_CACHE",
- "TTRx_TEX_STARVE_FILTER",
+ "TTRx_EXEC_ICACHE_MISS",
+ "TTRx_EXEC_STARVE_ARITH",
+ "TTRx_CALL_BLEND_SHADER",
+ "TTRx_TEX_MSGI_NUM_QUADS",
+ "TTRx_TEX_DFCH_NUM_PASSES",
+ "TTRx_TEX_DFCH_NUM_PASSES_MISS",
+ "TTRx_TEX_DFCH_NUM_PASSES_MIP_MAP",
+ "TTRx_TEX_TIDX_NUM_SPLIT_MIP_MAP",
+ "TTRx_TEX_TFCH_NUM_LINES_FETCHED",
+ "TTRx_TEX_TFCH_NUM_LINES_FETCHED_BLOCK",
+ "TTRx_TEX_TFCH_NUM_OPERATIONS",
+ "TTRx_TEX_FILT_NUM_OPERATIONS",
"TTRx_LS_MEM_READ_FULL",
"TTRx_LS_MEM_READ_SHORT",
"TTRx_LS_MEM_WRITE_FULL",
@@ -222,9 +222,9 @@ static const char * const hardware_counters_mali_tTRx[] = {
"TTRx_BEATS_RD_TEX",
"TTRx_BEATS_RD_TEX_EXT",
"TTRx_BEATS_RD_OTHER",
- "TTRx_BEATS_WR_LSC",
+ "TTRx_BEATS_WR_LSC_OTHER",
"TTRx_BEATS_WR_TIB",
- "",
+ "TTRx_BEATS_WR_LSC_WB",
/* Performance counters for the Memory System */
"",
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
index 4052e2fd0768b0..218e63a61c6caf 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_id.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,20 +27,20 @@
#define GPU_ID_VERSION_MINOR_SHIFT 4
#define GPU_ID_VERSION_MAJOR_SHIFT 12
#define GPU_ID_VERSION_PRODUCT_ID_SHIFT 16
-#define GPU_ID_VERSION_STATUS (0xF << GPU_ID_VERSION_STATUS_SHIFT)
-#define GPU_ID_VERSION_MINOR (0xFF << GPU_ID_VERSION_MINOR_SHIFT)
-#define GPU_ID_VERSION_MAJOR (0xF << GPU_ID_VERSION_MAJOR_SHIFT)
-#define GPU_ID_VERSION_PRODUCT_ID (0xFFFF << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
+#define GPU_ID_VERSION_STATUS (0xFu << GPU_ID_VERSION_STATUS_SHIFT)
+#define GPU_ID_VERSION_MINOR (0xFFu << GPU_ID_VERSION_MINOR_SHIFT)
+#define GPU_ID_VERSION_MAJOR (0xFu << GPU_ID_VERSION_MAJOR_SHIFT)
+#define GPU_ID_VERSION_PRODUCT_ID (0xFFFFu << GPU_ID_VERSION_PRODUCT_ID_SHIFT)
/* Values for GPU_ID_VERSION_PRODUCT_ID bitfield */
-#define GPU_ID_PI_T60X 0x6956
-#define GPU_ID_PI_T62X 0x0620
-#define GPU_ID_PI_T76X 0x0750
-#define GPU_ID_PI_T72X 0x0720
-#define GPU_ID_PI_TFRX 0x0880
-#define GPU_ID_PI_T86X 0x0860
-#define GPU_ID_PI_T82X 0x0820
-#define GPU_ID_PI_T83X 0x0830
+#define GPU_ID_PI_T60X 0x6956u
+#define GPU_ID_PI_T62X 0x0620u
+#define GPU_ID_PI_T76X 0x0750u
+#define GPU_ID_PI_T72X 0x0720u
+#define GPU_ID_PI_TFRX 0x0880u
+#define GPU_ID_PI_T86X 0x0860u
+#define GPU_ID_PI_T82X 0x0820u
+#define GPU_ID_PI_T83X 0x0830u
/* New GPU ID format when PRODUCT_ID is >= 0x1000 (and not 0x6956) */
#define GPU_ID_PI_NEW_FORMAT_START 0x1000
@@ -55,13 +55,13 @@
#define GPU_ID2_ARCH_REV_SHIFT 20
#define GPU_ID2_ARCH_MINOR_SHIFT 24
#define GPU_ID2_ARCH_MAJOR_SHIFT 28
-#define GPU_ID2_VERSION_STATUS (0xF << GPU_ID2_VERSION_STATUS_SHIFT)
-#define GPU_ID2_VERSION_MINOR (0xFF << GPU_ID2_VERSION_MINOR_SHIFT)
-#define GPU_ID2_VERSION_MAJOR (0xF << GPU_ID2_VERSION_MAJOR_SHIFT)
-#define GPU_ID2_PRODUCT_MAJOR (0xF << GPU_ID2_PRODUCT_MAJOR_SHIFT)
-#define GPU_ID2_ARCH_REV (0xF << GPU_ID2_ARCH_REV_SHIFT)
-#define GPU_ID2_ARCH_MINOR (0xF << GPU_ID2_ARCH_MINOR_SHIFT)
-#define GPU_ID2_ARCH_MAJOR (0xF << GPU_ID2_ARCH_MAJOR_SHIFT)
+#define GPU_ID2_VERSION_STATUS (0xFu << GPU_ID2_VERSION_STATUS_SHIFT)
+#define GPU_ID2_VERSION_MINOR (0xFFu << GPU_ID2_VERSION_MINOR_SHIFT)
+#define GPU_ID2_VERSION_MAJOR (0xFu << GPU_ID2_VERSION_MAJOR_SHIFT)
+#define GPU_ID2_PRODUCT_MAJOR (0xFu << GPU_ID2_PRODUCT_MAJOR_SHIFT)
+#define GPU_ID2_ARCH_REV (0xFu << GPU_ID2_ARCH_REV_SHIFT)
+#define GPU_ID2_ARCH_MINOR (0xFu << GPU_ID2_ARCH_MINOR_SHIFT)
+#define GPU_ID2_ARCH_MAJOR (0xFu << GPU_ID2_ARCH_MAJOR_SHIFT)
#define GPU_ID2_PRODUCT_MODEL (GPU_ID2_ARCH_MAJOR | GPU_ID2_PRODUCT_MAJOR)
#define GPU_ID2_VERSION (GPU_ID2_VERSION_MAJOR | \
GPU_ID2_VERSION_MINOR | \
@@ -70,17 +70,17 @@
/* Helper macro to create a partial GPU_ID (new format) that defines
a product ignoring its version. */
#define GPU_ID2_PRODUCT_MAKE(arch_major, arch_minor, arch_rev, product_major) \
- (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
- ((arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
- ((arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
- ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+ ((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ (((u32)arch_minor) << GPU_ID2_ARCH_MINOR_SHIFT) | \
+ (((u32)arch_rev) << GPU_ID2_ARCH_REV_SHIFT) | \
+ (((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
/* Helper macro to create a partial GPU_ID (new format) that specifies the
revision (major, minor, status) of a product */
#define GPU_ID2_VERSION_MAKE(version_major, version_minor, version_status) \
- (((version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
- ((version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
- ((version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
+ ((((u32)version_major) << GPU_ID2_VERSION_MAJOR_SHIFT) | \
+ (((u32)version_minor) << GPU_ID2_VERSION_MINOR_SHIFT) | \
+ (((u32)version_status) << GPU_ID2_VERSION_STATUS_SHIFT))
/* Helper macro to create a complete GPU_ID (new format) */
#define GPU_ID2_MAKE(arch_major, arch_minor, arch_rev, product_major, \
@@ -93,25 +93,25 @@
/* Helper macro to create a partial GPU_ID (new format) that identifies
a particular GPU model by its arch_major and product_major. */
#define GPU_ID2_MODEL_MAKE(arch_major, product_major) \
- (((arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
- ((product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
+ ((((u32)arch_major) << GPU_ID2_ARCH_MAJOR_SHIFT) | \
+ (((u32)product_major) << GPU_ID2_PRODUCT_MAJOR_SHIFT))
/* Strip off the non-relevant bits from a product_id value and make it suitable
for comparison against the GPU_ID2_PRODUCT_xxx values which identify a GPU
model. */
#define GPU_ID2_MODEL_MATCH_VALUE(product_id) \
- (((product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
+ ((((u32)product_id) << GPU_ID2_PRODUCT_MAJOR_SHIFT) & \
GPU_ID2_PRODUCT_MODEL)
-#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6u, 0)
-#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6u, 1)
-#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7u, 0)
-#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7u, 3)
-#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7u, 1)
-#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7u, 2)
-#define GPU_ID2_PRODUCT_TKAX GPU_ID2_MODEL_MAKE(8u, 0)
-#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(8u, 1)
-#define GPU_ID2_PRODUCT_TBOX GPU_ID2_MODEL_MAKE(8u, 2)
+#define GPU_ID2_PRODUCT_TMIX GPU_ID2_MODEL_MAKE(6, 0)
+#define GPU_ID2_PRODUCT_THEX GPU_ID2_MODEL_MAKE(6, 1)
+#define GPU_ID2_PRODUCT_TSIX GPU_ID2_MODEL_MAKE(7, 0)
+#define GPU_ID2_PRODUCT_TDVX GPU_ID2_MODEL_MAKE(7, 3)
+#define GPU_ID2_PRODUCT_TNOX GPU_ID2_MODEL_MAKE(7, 1)
+#define GPU_ID2_PRODUCT_TGOX GPU_ID2_MODEL_MAKE(7, 2)
+#define GPU_ID2_PRODUCT_TKAX GPU_ID2_MODEL_MAKE(8, 0)
+#define GPU_ID2_PRODUCT_TTRX GPU_ID2_MODEL_MAKE(9, 0)
+#define GPU_ID2_PRODUCT_TBOX GPU_ID2_MODEL_MAKE(8, 2)
/* Values for GPU_ID_VERSION_STATUS field for PRODUCT_ID GPU_ID_PI_T60X */
#define GPU_ID_S_15DEV0 0x1
@@ -120,9 +120,9 @@
/* Helper macro to create a GPU_ID assuming valid values for id, major,
minor, status */
#define GPU_ID_MAKE(id, major, minor, status) \
- (((id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
- ((major) << GPU_ID_VERSION_MAJOR_SHIFT) | \
- ((minor) << GPU_ID_VERSION_MINOR_SHIFT) | \
- ((status) << GPU_ID_VERSION_STATUS_SHIFT))
+ ((((u32)id) << GPU_ID_VERSION_PRODUCT_ID_SHIFT) | \
+ (((u32)major) << GPU_ID_VERSION_MAJOR_SHIFT) | \
+ (((u32)minor) << GPU_ID_VERSION_MINOR_SHIFT) | \
+ (((u32)status) << GPU_ID_VERSION_STATUS_SHIFT))
#endif /* _KBASE_GPU_ID_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
index 2fd033280359c2..514b065d48674e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpu_memory_debugfs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -70,7 +70,7 @@ static int kbasep_gpu_memory_seq_show(struct seq_file *sfile, void *data)
*/
static int kbasep_gpu_memory_debugfs_open(struct inode *in, struct file *file)
{
- return single_open(file, kbasep_gpu_memory_seq_show , NULL);
+ return single_open(file, kbasep_gpu_memory_seq_show, NULL);
}
static const struct file_operations kbasep_gpu_memory_debugfs_fops = {
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
index 9a9ce2d9e661b5..62ba105ca4173e 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -138,7 +138,7 @@ static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kb
gpu_props->raw_props.mem_features = regdump.mem_features;
gpu_props->raw_props.mmu_features = regdump.mmu_features;
gpu_props->raw_props.l2_features = regdump.l2_features;
- gpu_props->raw_props.suspend_size = regdump.suspend_size;
+ gpu_props->raw_props.core_features = regdump.core_features;
gpu_props->raw_props.as_present = regdump.as_present;
gpu_props->raw_props.js_present = regdump.js_present;
@@ -165,6 +165,7 @@ static void kbase_gpuprops_get_props(base_gpu_props * const gpu_props, struct kb
gpu_props->raw_props.thread_max_threads = regdump.thread_max_threads;
gpu_props->raw_props.thread_max_workgroup_size = regdump.thread_max_workgroup_size;
gpu_props->raw_props.thread_features = regdump.thread_features;
+ gpu_props->raw_props.thread_tls_alloc = regdump.thread_tls_alloc;
}
void kbase_gpuprops_update_core_props_gpu_id(base_gpu_props * const gpu_props)
@@ -195,6 +196,8 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
kbase_gpuprops_update_core_props_gpu_id(gpu_props);
gpu_props->core_props.log2_program_counter_size = KBASE_GPU_PC_SIZE_LOG2;
gpu_props->core_props.gpu_available_memory_size = totalram_pages << PAGE_SHIFT;
+ gpu_props->core_props.num_exec_engines =
+ KBASE_UBFX32(gpu_props->raw_props.core_features, 0, 4);
for (i = 0; i < BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS; i++)
gpu_props->core_props.texture_features[i] = gpu_props->raw_props.texture_features[i];
@@ -226,6 +229,13 @@ static void kbase_gpuprops_calculate_props(base_gpu_props * const gpu_props, str
else
gpu_props->thread_props.max_barrier_size = gpu_props->raw_props.thread_max_barrier_size;
+ if (gpu_props->raw_props.thread_tls_alloc == 0)
+ gpu_props->thread_props.tls_alloc =
+ gpu_props->thread_props.max_threads;
+ else
+ gpu_props->thread_props.tls_alloc =
+ gpu_props->raw_props.thread_tls_alloc;
+
gpu_props->thread_props.max_registers = KBASE_UBFX32(gpu_props->raw_props.thread_features, 0U, 16);
gpu_props->thread_props.max_task_queue = KBASE_UBFX32(gpu_props->raw_props.thread_features, 16U, 8);
gpu_props->thread_props.max_thread_group_split = KBASE_UBFX32(gpu_props->raw_props.thread_features, 24U, 6);
@@ -312,6 +322,7 @@ static struct {
PROP(TEXTURE_FEATURES_2, core_props.texture_features[2]),
PROP(TEXTURE_FEATURES_3, core_props.texture_features[3]),
PROP(GPU_AVAILABLE_MEMORY_SIZE, core_props.gpu_available_memory_size),
+ PROP(NUM_EXEC_ENGINES, core_props.num_exec_engines),
PROP(L2_LOG2_LINE_SIZE, l2_props.log2_line_size),
PROP(L2_LOG2_CACHE_SIZE, l2_props.log2_cache_size),
@@ -327,13 +338,14 @@ static struct {
PROP(MAX_TASK_QUEUE, thread_props.max_task_queue),
PROP(MAX_THREAD_GROUP_SPLIT, thread_props.max_thread_group_split),
PROP(IMPL_TECH, thread_props.impl_tech),
+ PROP(TLS_ALLOC, thread_props.tls_alloc),
PROP(RAW_SHADER_PRESENT, raw_props.shader_present),
PROP(RAW_TILER_PRESENT, raw_props.tiler_present),
PROP(RAW_L2_PRESENT, raw_props.l2_present),
PROP(RAW_STACK_PRESENT, raw_props.stack_present),
PROP(RAW_L2_FEATURES, raw_props.l2_features),
- PROP(RAW_SUSPEND_SIZE, raw_props.suspend_size),
+ PROP(RAW_CORE_FEATURES, raw_props.core_features),
PROP(RAW_MEM_FEATURES, raw_props.mem_features),
PROP(RAW_MMU_FEATURES, raw_props.mmu_features),
PROP(RAW_AS_PRESENT, raw_props.as_present),
@@ -365,6 +377,7 @@ static struct {
raw_props.thread_max_workgroup_size),
PROP(RAW_THREAD_MAX_BARRIER_SIZE, raw_props.thread_max_barrier_size),
PROP(RAW_THREAD_FEATURES, raw_props.thread_features),
+ PROP(RAW_THREAD_TLS_ALLOC, raw_props.thread_tls_alloc),
PROP(RAW_COHERENCY_MODE, raw_props.coherency_mode),
PROP(COHERENCY_NUM_GROUPS, coherency_info.num_groups),
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
index a3ddec79bee7b8..d7877d1d4a5763 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_gpuprops_types.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -38,7 +38,7 @@
struct kbase_gpuprops_regdump {
u32 gpu_id;
u32 l2_features;
- u32 suspend_size; /* API 8.2+ */
+ u32 core_features;
u32 tiler_features;
u32 mem_features;
u32 mmu_features;
@@ -48,6 +48,7 @@ struct kbase_gpuprops_regdump {
u32 thread_max_workgroup_size;
u32 thread_max_barrier_size;
u32 thread_features;
+ u32 thread_tls_alloc;
u32 texture_features[BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS];
u32 js_features[GPU_MAX_JOB_SLOTS];
u32 shader_present_lo;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_gwt.c b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
index 2caab877a447e0..2d1263ddaf901c 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_gwt.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_gwt.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,7 +35,7 @@ static inline void kbase_gpu_gwt_setup_page_permission(
int err = 0;
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if (reg->nr_pages && !(reg->flags & KBASE_REG_FREE) &&
+ if (reg->nr_pages && !kbase_is_region_invalid_or_free(reg) &&
(reg->flags & KBASE_REG_GPU_WR)) {
err = kbase_mmu_update_pages(kctx, reg->start_pfn,
kbase_get_gpu_phy_pages(reg),
@@ -55,8 +55,6 @@ static void kbase_gpu_gwt_setup_pages(struct kbase_context *kctx,
kbase_gpu_gwt_setup_page_permission(kctx, flag,
rb_first(&(kctx->reg_rbtree_same)));
kbase_gpu_gwt_setup_page_permission(kctx, flag,
- rb_first(&(kctx->reg_rbtree_exec)));
- kbase_gpu_gwt_setup_page_permission(kctx, flag,
rb_first(&(kctx->reg_rbtree_custom)));
}
@@ -124,7 +122,8 @@ int kbase_gpu_gwt_stop(struct kbase_context *kctx)
}
-int list_cmp_function(void *priv, struct list_head *a, struct list_head *b)
+static int list_cmp_function(void *priv, struct list_head *a,
+ struct list_head *b)
{
struct kbasep_gwt_list_element *elementA = container_of(a,
struct kbasep_gwt_list_element, link);
@@ -133,30 +132,27 @@ int list_cmp_function(void *priv, struct list_head *a, struct list_head *b)
CSTD_UNUSED(priv);
- if (elementA->handle > elementB->handle)
- return 1;
- else if ((elementA->handle == elementB->handle) &&
- (elementA->offset > elementB->offset))
+ if (elementA->page_addr > elementB->page_addr)
return 1;
- else
- return -1;
+ return -1;
}
-void kbase_gpu_gwt_collate(struct kbase_context *kctx,
+static void kbase_gpu_gwt_collate(struct kbase_context *kctx,
struct list_head *snapshot_list)
{
struct kbasep_gwt_list_element *pos, *n;
struct kbasep_gwt_list_element *collated = NULL;
- /* sort the list */
+ /* Sort the list */
list_sort(NULL, snapshot_list, list_cmp_function);
- /* Combine contiguous areas from same region */
+ /* Combine contiguous areas. */
list_for_each_entry_safe(pos, n, snapshot_list, link) {
- if (NULL == collated ||
- collated->handle != pos->handle ||
- collated->offset + collated->num_pages !=
- pos->offset) {
+ if (collated == NULL || collated->region !=
+ pos->region ||
+ (collated->page_addr +
+ (collated->num_pages * PAGE_SIZE)) !=
+ pos->page_addr) {
/* This is the first time through, a new region or
* is not contiguous - start collating to this element
*/
@@ -176,10 +172,8 @@ int kbase_gpu_gwt_dump(struct kbase_context *kctx,
{
const u32 ubuf_size = gwt_dump->in.len;
u32 ubuf_count = 0;
- __user void *user_handles = (__user void *)
- (uintptr_t)gwt_dump->in.handle_buffer;
- __user void *user_offsets = (__user void *)
- (uintptr_t)gwt_dump->in.offset_buffer;
+ __user void *user_addr = (__user void *)
+ (uintptr_t)gwt_dump->in.addr_buffer;
__user void *user_sizes = (__user void *)
(uintptr_t)gwt_dump->in.size_buffer;
@@ -191,8 +185,7 @@ int kbase_gpu_gwt_dump(struct kbase_context *kctx,
return -EPERM;
}
- if (!gwt_dump->in.len || !gwt_dump->in.handle_buffer
- || !gwt_dump->in.offset_buffer
+ if (!gwt_dump->in.len || !gwt_dump->in.addr_buffer
|| !gwt_dump->in.size_buffer) {
kbase_gpu_vm_unlock(kctx);
/* We don't have any valid user space buffer to copy the
@@ -219,8 +212,7 @@ int kbase_gpu_gwt_dump(struct kbase_context *kctx,
}
while ((!list_empty(&kctx->gwt_snapshot_list))) {
- u64 handle_buffer[32];
- u64 offset_buffer[32];
+ u64 addr_buffer[32];
u64 num_page_buffer[32];
u32 count = 0;
int err;
@@ -228,30 +220,20 @@ int kbase_gpu_gwt_dump(struct kbase_context *kctx,
list_for_each_entry_safe(dump_info, n,
&kctx->gwt_snapshot_list, link) {
- handle_buffer[count] = dump_info->handle;
- offset_buffer[count] = dump_info->offset;
+ addr_buffer[count] = dump_info->page_addr;
num_page_buffer[count] = dump_info->num_pages;
count++;
list_del(&dump_info->link);
kfree(dump_info);
- if (ARRAY_SIZE(handle_buffer) == count ||
+ if (ARRAY_SIZE(addr_buffer) == count ||
ubuf_size == (ubuf_count + count))
break;
}
if (count) {
- err = copy_to_user((user_handles +
- (ubuf_count * sizeof(u64))),
- (void *)handle_buffer,
- count * sizeof(u64));
- if (err) {
- dev_err(kctx->kbdev->dev, "Copy to user failure\n");
- kbase_gpu_vm_unlock(kctx);
- return err;
- }
- err = copy_to_user((user_offsets +
- (ubuf_count * sizeof(u64))),
- (void *)offset_buffer,
+ err = copy_to_user((user_addr +
+ (ubuf_count * sizeof(u64))),
+ (void *)addr_buffer,
count * sizeof(u64));
if (err) {
dev_err(kctx->kbdev->dev, "Copy to user failure\n");
@@ -259,7 +241,7 @@ int kbase_gpu_gwt_dump(struct kbase_context *kctx,
return err;
}
err = copy_to_user((user_sizes +
- (ubuf_count * sizeof(u64))),
+ (ubuf_count * sizeof(u64))),
(void *)num_page_buffer,
count * sizeof(u64));
if (err) {
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hw.c b/drivers/gpu/arm/midgard/mali_kbase_hw.c
index 286cc954ccbc7e..f34f53a919b8d6 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hw.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_hw.c
@@ -176,6 +176,7 @@ static const enum base_hw_issue *kbase_hw_get_issues_for_new_id(
{GPU_ID2_PRODUCT_TGOX,
{{GPU_ID2_VERSION_MAKE(0, 0, 0), base_hw_issues_tGOx_r0p0},
+ {GPU_ID2_VERSION_MAKE(1, 0, 0), base_hw_issues_tGOx_r1p0},
{U32_MAX, NULL} } },
{GPU_ID2_PRODUCT_TKAX,
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
index dd25746d8434ad..124a2d9cf0c314 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014, 2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2016, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -31,9 +31,19 @@
#include <mali_kbase_jm_defs.h>
-/* The hwaccess_lock (a spinlock) must be held when accessing this structure */
+/**
+ * struct kbase_hwaccess_data - object encapsulating the GPU backend specific
+ * data for the HW access layer.
+ * hwaccess_lock (a spinlock) must be held when
+ * accessing this structure.
+ * @active_kctx: pointer to active kbase context which last submitted an
+ * atom to GPU and while the context is active it can
+ * submit new atoms to GPU from the irq context also, without
+ * going through the bottom half of job completion path.
+ * @backend: GPU backend specific data for HW access layer
+ */
struct kbase_hwaccess_data {
- struct kbase_context *active_kctx;
+ struct kbase_context *active_kctx[BASE_JM_MAX_NR_SLOTS];
struct kbase_backend_data backend;
};
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
index b8ab0dc268f357..63844d97ce0218 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_gpuprops.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -33,6 +33,8 @@
* GPU
* @kbdev: Device pointer
* @regdump: Pointer to struct kbase_gpuprops_regdump structure
+ *
+ * The caller should ensure that GPU remains powered-on during this function.
*/
void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump);
@@ -43,7 +45,7 @@ void kbase_backend_gpuprops_get(struct kbase_device *kbdev,
* @regdump: Pointer to struct kbase_gpuprops_regdump structure
*
* This function reads GPU properties that are dependent on the hardware
- * features bitmask
+ * features bitmask. It will power-on the GPU if required.
*/
void kbase_backend_gpuprops_get_features(struct kbase_device *kbdev,
struct kbase_gpuprops_regdump *regdump);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
index d180e39253bdc8..0c5ceffb0e47ae 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_instr.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015, 2017 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,15 +35,15 @@
* kbase_instr_hwcnt_enable_internal - Enable HW counters collection
* @kbdev: Kbase device
* @kctx: Kbase context
- * @setup: HW counter setup parameters
+ * @enable: HW counter setup parameters
*
* Context: might sleep, waiting for reset to complete
*
* Return: 0 on success
*/
int kbase_instr_hwcnt_enable_internal(struct kbase_device *kbdev,
- struct kbase_context *kctx,
- struct kbase_uk_hwcnt_setup *setup);
+ struct kbase_context *kctx,
+ struct kbase_ioctl_hwcnt_enable *enable);
/**
* kbase_instr_hwcnt_disable_internal - Disable HW counters collection
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
index 8b3d7e20f609d7..580ac98789592a 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_jm.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -86,6 +86,7 @@ bool kbase_backend_use_ctx(struct kbase_device *kbdev,
* kbase_backend_use_ctx_sched() - Activate a context.
* @kbdev: Device pointer
* @kctx: Context pointer
+ * @js: Job slot to activate context on
*
* kbase_gpu_next_job() will pull atoms from the active context.
*
@@ -99,7 +100,7 @@ bool kbase_backend_use_ctx(struct kbase_device *kbdev,
* not have an address space assigned)
*/
bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
- struct kbase_context *kctx);
+ struct kbase_context *kctx, int js);
/**
* kbase_backend_release_ctx_irq - Release a context from the GPU. This will
@@ -159,14 +160,13 @@ void kbase_backend_complete_wq(struct kbase_device *kbdev,
* any scheduling has taken place.
* @kbdev: Device pointer
* @core_req: Core requirements of atom
- * @affinity: Affinity of atom
* @coreref_state: Coreref state of atom
*
* This function should only be called from kbase_jd_done_worker() or
* js_return_worker().
*/
void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
- base_jd_core_req core_req, u64 affinity,
+ base_jd_core_req core_req,
enum kbase_atom_coreref_state coreref_state);
/**
@@ -178,17 +178,6 @@ void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
/**
- * kbase_backend_inspect_head() - Return the atom currently at the head of slot
- * @js
- * @kbdev: Device pointer
- * @js: Job slot to inspect
- *
- * Return : Atom currently at the head of slot @js, or NULL
- */
-struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
- int js);
-
-/**
* kbase_backend_inspect_tail - Return the atom currently at the tail of slot
* @js
* @kbdev: Device pointer
@@ -381,6 +370,9 @@ bool kbase_reset_gpu_active(struct kbase_device *kbdev);
void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
struct kbase_jd_atom *target_katom);
+/* Object containing callbacks for enabling/disabling protected mode, used
+ * on GPU which supports protected mode switching natively.
+ */
extern struct protected_mode_ops kbase_native_protected_ops;
#endif /* _KBASE_HWACCESS_JM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
index 7f64936f011ef3..9b86b510702156 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_hwaccess_time.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -41,7 +41,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
/**
* kbase_wait_write_flush() - Wait for GPU write flush
- * @kctx: Context pointer
+ * @kbdev: Kbase device
*
* Wait 1000 GPU clock cycles. This delay is known to give the GPU time to flush
* its write buffer.
@@ -52,7 +52,7 @@ void kbase_backend_get_gpu_time(struct kbase_device *kbdev, u64 *cycle_counter,
* This function is only in use for BASE_HW_ISSUE_6367
*/
#ifndef CONFIG_MALI_NO_MALI
-void kbase_wait_write_flush(struct kbase_context *kctx);
+void kbase_wait_write_flush(struct kbase_device *kbdev);
#endif
#endif /* _KBASE_BACKEND_TIME_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_ioctl.h b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
index a8fe9cd9eddeb3..bee2f3a172eaec 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_ioctl.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2017-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -27,6 +27,7 @@
extern "C" {
#endif
+#include <asm-generic/ioctl.h>
#include <linux/types.h>
#define KBASE_IOCTL_TYPE 0x80
@@ -43,20 +44,27 @@ extern "C" {
* KBASE_IOCTL_STICKY_RESOURCE_UNMAP
* 11.4:
* - New ioctl KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET
+ * 11.5:
+ * - New ioctl: KBASE_IOCTL_MEM_JIT_INIT (old ioctl renamed to _OLD)
+ * 11.6:
+ * - Added flags field to base_jit_alloc_info structure, which can be used to
+ * specify pseudo chunked tiler alignment for JIT allocations.
+ * 11.7:
+ * - Removed UMP support
+ * 11.8:
+ * - Added BASE_MEM_UNCACHED_GPU under base_mem_alloc_flags
+ * 11.9:
+ * - Added BASE_MEM_PERMANENT_KERNEL_MAPPING and BASE_MEM_FLAGS_KERNEL_ONLY
+ * under base_mem_alloc_flags
+ * 11.10:
+ * - Enabled the use of nr_extres field of base_jd_atom_v2 structure for
+ * JIT_ALLOC and JIT_FREE type softjobs to enable multiple JIT allocations
+ * with one softjob.
+ * 11.11:
+ * - Added BASE_MEM_GPU_VA_SAME_4GB_PAGE under base_mem_alloc_flags
*/
#define BASE_UK_VERSION_MAJOR 11
-#define BASE_UK_VERSION_MINOR 4
-
-#ifdef ANDROID
-/* Android's definition of ioctl is incorrect, specifying the type argument as
- * 'int'. This creates a warning when using _IOWR (as the top bit is set). Work
- * round this by redefining _IOC to include a case to 'int'.
- */
-#undef _IOC
-#define _IOC(dir, type, nr, size) \
- ((int)(((dir) << _IOC_DIRSHIFT) | ((type) << _IOC_TYPESHIFT) | \
- ((nr) << _IOC_NRSHIFT) | ((size) << _IOC_SIZESHIFT)))
-#endif
+#define BASE_UK_VERSION_MINOR 11
/**
* struct kbase_ioctl_version_check - Check version compatibility with kernel
@@ -191,9 +199,9 @@ union kbase_ioctl_mem_query {
#define KBASE_IOCTL_MEM_QUERY \
_IOWR(KBASE_IOCTL_TYPE, 6, union kbase_ioctl_mem_query)
-#define KBASE_MEM_QUERY_COMMIT_SIZE 1
-#define KBASE_MEM_QUERY_VA_SIZE 2
-#define KBASE_MEM_QUERY_FLAGS 3
+#define KBASE_MEM_QUERY_COMMIT_SIZE ((u64)1)
+#define KBASE_MEM_QUERY_VA_SIZE ((u64)2)
+#define KBASE_MEM_QUERY_FLAGS ((u64)3)
/**
* struct kbase_ioctl_mem_free - Free a memory region
@@ -253,6 +261,21 @@ struct kbase_ioctl_hwcnt_enable {
_IO(KBASE_IOCTL_TYPE, 11)
/**
+ * struct kbase_ioctl_hwcnt_values - Values to set dummy the dummy counters to.
+ * @data: Counter samples for the dummy model.
+ * @size: Size of the counter sample data.
+ * @padding: Padding.
+ */
+struct kbase_ioctl_hwcnt_values {
+ __u64 data;
+ __u32 size;
+ __u32 padding;
+};
+
+#define KBASE_IOCTL_HWCNT_SET \
+ _IOW(KBASE_IOCTL_TYPE, 32, struct kbase_ioctl_hwcnt_values)
+
+/**
* struct kbase_ioctl_disjoint_query - Query the disjoint counter
* @counter: A counter of disjoint events in the kernel
*/
@@ -271,6 +294,10 @@ struct kbase_ioctl_disjoint_query {
*
* The ioctl will return the number of bytes written into version_buffer
* (which includes a NULL byte) or a negative error code
+ *
+ * The ioctl request code has to be _IOW because the data in ioctl struct is
+ * being copied to the kernel, even though the kernel then writes out the
+ * version info to the buffer specified in the ioctl.
*/
struct kbase_ioctl_get_ddk_version {
__u64 version_buffer;
@@ -282,15 +309,39 @@ struct kbase_ioctl_get_ddk_version {
_IOW(KBASE_IOCTL_TYPE, 13, struct kbase_ioctl_get_ddk_version)
/**
+ * struct kbase_ioctl_mem_jit_init_old - Initialise the JIT memory allocator
+ *
+ * @va_pages: Number of VA pages to reserve for JIT
+ *
+ * Note that depending on the VA size of the application and GPU, the value
+ * specified in @va_pages may be ignored.
+ *
+ * New code should use KBASE_IOCTL_MEM_JIT_INIT instead, this is kept for
+ * backwards compatibility.
+ */
+struct kbase_ioctl_mem_jit_init_old {
+ __u64 va_pages;
+};
+
+#define KBASE_IOCTL_MEM_JIT_INIT_OLD \
+ _IOW(KBASE_IOCTL_TYPE, 14, struct kbase_ioctl_mem_jit_init_old)
+
+/**
* struct kbase_ioctl_mem_jit_init - Initialise the JIT memory allocator
*
* @va_pages: Number of VA pages to reserve for JIT
+ * @max_allocations: Maximum number of concurrent allocations
+ * @trim_level: Level of JIT allocation trimming to perform on free (0 - 100%)
+ * @padding: Currently unused, must be zero
*
* Note that depending on the VA size of the application and GPU, the value
* specified in @va_pages may be ignored.
*/
struct kbase_ioctl_mem_jit_init {
__u64 va_pages;
+ __u8 max_allocations;
+ __u8 trim_level;
+ __u8 padding[6];
};
#define KBASE_IOCTL_MEM_JIT_INIT \
@@ -595,7 +646,6 @@ union kbase_ioctl_mem_find_gpu_start_and_offset {
#define KBASE_IOCTL_MEM_FIND_GPU_START_AND_OFFSET \
_IOWR(KBASE_IOCTL_TYPE, 31, union kbase_ioctl_mem_find_gpu_start_and_offset)
-/* IOCTL 32 is free for use */
#define KBASE_IOCTL_CINSTR_GWT_START \
_IO(KBASE_IOCTL_TYPE, 33)
@@ -605,9 +655,7 @@ union kbase_ioctl_mem_find_gpu_start_and_offset {
/**
* union kbase_ioctl_gwt_dump - Used to collect all GPU write fault addresses.
- * @handle_buffer: Address of buffer to hold handles of modified areas.
- * @offset_buffer: Address of buffer to hold offset size of modified areas
- * (in pages)
+ * @addr_buffer: Address of buffer to hold addresses of gpu modified areas.
* @size_buffer: Address of buffer to hold size of modified areas (in pages)
* @len: Number of addresses the buffers can hold.
* @more_data_available: Status indicating if more addresses are available.
@@ -615,13 +663,13 @@ union kbase_ioctl_mem_find_gpu_start_and_offset {
*
* @in: Input parameters
* @out: Output parameters
+ *
* This structure is used when performing a call to dump GPU write fault
* addresses.
*/
union kbase_ioctl_cinstr_gwt_dump {
struct {
- __u64 handle_buffer;
- __u64 offset_buffer;
+ __u64 addr_buffer;
__u64 size_buffer;
__u32 len;
__u32 padding;
@@ -637,7 +685,6 @@ union kbase_ioctl_cinstr_gwt_dump {
#define KBASE_IOCTL_CINSTR_GWT_DUMP \
_IOWR(KBASE_IOCTL_TYPE, 35, union kbase_ioctl_cinstr_gwt_dump)
-/* IOCTLs 36-41 are reserved */
/***************
* test ioctls *
@@ -680,6 +727,37 @@ struct kbase_ioctl_tlstream_stats {
#define KBASE_IOCTL_TLSTREAM_STATS \
_IOR(KBASE_IOCTL_TEST_TYPE, 2, struct kbase_ioctl_tlstream_stats)
+/**
+ * struct kbase_ioctl_cs_event_memory_write - Write an event memory address
+ * @cpu_addr: Memory address to write
+ * @value: Value to write
+ * @padding: Currently unused, must be zero
+ */
+struct kbase_ioctl_cs_event_memory_write {
+ __u64 cpu_addr;
+ __u8 value;
+ __u8 padding[7];
+};
+
+/**
+ * union kbase_ioctl_cs_event_memory_read - Read an event memory address
+ * @cpu_addr: Memory address to read
+ * @value: Value read
+ * @padding: Currently unused, must be zero
+ *
+ * @in: Input parameters
+ * @out: Output parameters
+ */
+union kbase_ioctl_cs_event_memory_read {
+ struct {
+ __u64 cpu_addr;
+ } in;
+ struct {
+ __u8 value;
+ __u8 padding[7];
+ } out;
+};
+
#endif
/**********************************
@@ -723,7 +801,7 @@ struct kbase_ioctl_tlstream_stats {
#define KBASE_GPUPROP_RAW_L2_PRESENT 27
#define KBASE_GPUPROP_RAW_STACK_PRESENT 28
#define KBASE_GPUPROP_RAW_L2_FEATURES 29
-#define KBASE_GPUPROP_RAW_SUSPEND_SIZE 30
+#define KBASE_GPUPROP_RAW_CORE_FEATURES 30
#define KBASE_GPUPROP_RAW_MEM_FEATURES 31
#define KBASE_GPUPROP_RAW_MMU_FEATURES 32
#define KBASE_GPUPROP_RAW_AS_PRESENT 33
@@ -778,6 +856,11 @@ struct kbase_ioctl_tlstream_stats {
#define KBASE_GPUPROP_TEXTURE_FEATURES_3 80
#define KBASE_GPUPROP_RAW_TEXTURE_FEATURES_3 81
+#define KBASE_GPUPROP_NUM_EXEC_ENGINES 82
+
+#define KBASE_GPUPROP_RAW_THREAD_TLS_ALLOC 83
+#define KBASE_GPUPROP_TLS_ALLOC 84
+
#ifdef __cpluscplus
}
#endif
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jd.c b/drivers/gpu/arm/midgard/mali_kbase_jd.c
index 70277ad5ddcd0b..4f6a346896ebe8 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_jd.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_jd.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -91,6 +91,7 @@ static int jd_run_atom(struct kbase_jd_atom *katom)
} else if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
/* Soft-job */
if (katom->will_fail_event_code) {
+ kbase_finish_soft_job(katom);
katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
return 0;
}
@@ -208,7 +209,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
*/
const bool implicit_sync = !kbase_ctx_flag(katom->kctx,
KCTX_NO_IMPLICIT_SYNC);
-#else /* CONFIG_SYNC || CONFIG_SYNC_FILE */
+#else /* CONFIG_SYNC || CONFIG_SYNC_FILE*/
const bool implicit_sync = true;
#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */
#endif /* CONFIG_MALI_DMA_FENCE */
@@ -281,7 +282,7 @@ static int kbase_jd_pre_external_resources(struct kbase_jd_atom *katom, const st
katom->kctx,
res->ext_resource & ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
/* did we find a matching region object? */
- if (NULL == reg || (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg)) {
/* roll back */
goto failed_loop;
}
@@ -808,7 +809,6 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
katom->nr_extres = user_atom->nr_extres;
katom->extres = NULL;
katom->device_nr = user_atom->device_nr;
- katom->affinity = 0;
katom->jc = user_atom->jc;
katom->coreref_state = KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED;
katom->core_req = user_atom->core_req;
@@ -923,10 +923,35 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
if (will_fail) {
if (!queued) {
+ if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* This softjob has failed due to a previous
+ * dependency, however we should still run the
+ * prepare & finish functions
+ */
+ int err = kbase_prepare_soft_job(katom);
+
+ if (err >= 0)
+ kbase_finish_soft_job(katom);
+ }
+
ret = jd_done_nolock(katom, NULL);
goto out;
} else {
+
+ if (katom->core_req & BASE_JD_REQ_SOFT_JOB) {
+ /* This softjob has failed due to a previous
+ * dependency, however we should still run the
+ * prepare & finish functions
+ */
+ if (kbase_prepare_soft_job(katom) != 0) {
+ katom->event_code =
+ BASE_JD_EVENT_JOB_INVALID;
+ ret = jd_done_nolock(katom, NULL);
+ goto out;
+ }
+ }
+
katom->will_fail_event_code = katom->event_code;
ret = false;
@@ -1003,11 +1028,13 @@ bool jd_submit_atom(struct kbase_context *kctx, const struct base_jd_atom_v2 *us
goto out;
}
- /* Reject fence wait soft-job atoms accessing external resources */
+ /* Reject soft-job atom of certain types from accessing external resources */
if ((katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) &&
- ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_FENCE_WAIT)) {
+ (((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_FENCE_WAIT) ||
+ ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_ALLOC) ||
+ ((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) == BASE_JD_REQ_SOFT_JIT_FREE))) {
dev_warn(kctx->kbdev->dev,
- "Rejecting fence wait soft-job atom accessing external resources");
+ "Rejecting soft-job atom accessing external resources");
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
ret = jd_done_nolock(katom, NULL);
goto out;
@@ -1123,9 +1150,6 @@ int kbase_jd_submit(struct kbase_context *kctx,
return -EINVAL;
}
- KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, atomic_add_return(nr_atoms,
- &kctx->timeline.jd_atoms_in_flight));
-
/* All atoms submitted in this call have the same flush ID */
latest_flush = kbase_backend_get_current_flush_id(kbdev);
@@ -1136,9 +1160,6 @@ int kbase_jd_submit(struct kbase_context *kctx,
if (copy_from_user(&user_atom, user_addr,
sizeof(user_atom)) != 0) {
err = -EINVAL;
- KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx,
- atomic_sub_return(nr_atoms - i,
- &kctx->timeline.jd_atoms_in_flight));
break;
}
@@ -1222,7 +1243,6 @@ void kbase_jd_done_worker(struct work_struct *data)
struct kbasep_js_atom_retained_state katom_retained_state;
bool context_idle;
base_jd_core_req core_req = katom->core_req;
- u64 affinity = katom->affinity;
enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
/* Soft jobs should never reach this function */
@@ -1270,7 +1290,8 @@ void kbase_jd_done_worker(struct work_struct *data)
return;
}
- if (katom->event_code != BASE_JD_EVENT_DONE)
+ if ((katom->event_code != BASE_JD_EVENT_DONE) &&
+ (!kbase_ctx_flag(katom->kctx, KCTX_DYING)))
dev_err(kbdev->dev,
"t6xx: GPU fault 0x%02lx from job slot %d\n",
(unsigned long)katom->event_code,
@@ -1368,8 +1389,7 @@ void kbase_jd_done_worker(struct work_struct *data)
mutex_unlock(&jctx->lock);
}
- kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
- coreref_state);
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, coreref_state);
if (context_idle)
kbase_pm_context_idle(kbdev);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_jm.c b/drivers/gpu/arm/midgard/mali_kbase_jm.c
index 3b16e6f73fe7f2..da78a1670d9b8c 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_jm.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_jm.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -44,7 +44,7 @@ static bool kbase_jm_next_job(struct kbase_device *kbdev, int js,
struct kbase_context *kctx;
int i;
- kctx = kbdev->hwaccess.active_kctx;
+ kctx = kbdev->hwaccess.active_kctx[js];
if (!kctx)
return true;
@@ -106,10 +106,14 @@ void kbase_jm_try_kick_all(struct kbase_device *kbdev)
void kbase_jm_idle_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
{
+ int js;
+
lockdep_assert_held(&kbdev->hwaccess_lock);
- if (kbdev->hwaccess.active_kctx == kctx)
- kbdev->hwaccess.active_kctx = NULL;
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ if (kbdev->hwaccess.active_kctx[js] == kctx)
+ kbdev->hwaccess.active_kctx[js] = NULL;
+ }
}
struct kbase_jd_atom *kbase_jm_return_atom_to_js(struct kbase_device *kbdev,
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.c b/drivers/gpu/arm/midgard/mali_kbase_js.c
index 8f50b3c08ad45f..66a84447c60ec8 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_js.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -422,7 +422,7 @@ static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
int kbasep_js_devdata_init(struct kbase_device * const kbdev)
{
struct kbasep_js_device_data *jsdd;
- int i;
+ int i, j;
KBASE_DEBUG_ASSERT(kbdev != NULL);
@@ -527,8 +527,10 @@ int kbasep_js_devdata_init(struct kbase_device * const kbdev)
sema_init(&jsdd->schedule_sem, 1);
for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
- INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i]);
- INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i]);
+ for (j = 0; j < KBASE_JS_ATOM_SCHED_PRIO_COUNT; ++j) {
+ INIT_LIST_HEAD(&jsdd->ctx_list_pullable[i][j]);
+ INIT_LIST_HEAD(&jsdd->ctx_list_unpullable[i][j]);
+ }
}
return 0;
@@ -552,13 +554,13 @@ void kbasep_js_devdata_term(struct kbase_device *kbdev)
*/
KBASE_DEBUG_ASSERT(js_devdata->nr_all_contexts_running == 0);
KBASE_DEBUG_ASSERT(memcmp(
- js_devdata->runpool_irq.ctx_attr_ref_count,
- zero_ctx_attr_ref_count,
- sizeof(zero_ctx_attr_ref_count)) == 0);
+ js_devdata->runpool_irq.ctx_attr_ref_count,
+ zero_ctx_attr_ref_count,
+ sizeof(zero_ctx_attr_ref_count)) == 0);
CSTD_UNUSED(zero_ctx_attr_ref_count);
}
-int kbasep_js_kctx_init(struct kbase_context * const kctx)
+int kbasep_js_kctx_init(struct kbase_context *const kctx)
{
struct kbase_device *kbdev;
struct kbasep_js_kctx_info *js_kctx_info;
@@ -606,6 +608,7 @@ void kbasep_js_kctx_term(struct kbase_context *kctx)
struct kbasep_js_kctx_info *js_kctx_info;
int js;
bool update_ctx_count = false;
+ unsigned long flags;
KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -621,8 +624,10 @@ void kbasep_js_kctx_term(struct kbase_context *kctx)
mutex_lock(&kbdev->js_data.queue_mutex);
mutex_lock(&kctx->jctx.sched_info.ctx.jsctx_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++)
list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (kbase_ctx_flag(kctx, KCTX_RUNNABLE_REF)) {
WARN_ON(atomic_read(&kbdev->js_data.nr_contexts_runnable) <= 0);
@@ -666,7 +671,7 @@ static bool kbase_js_ctx_list_add_pullable_nolock(struct kbase_device *kbdev,
list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
list_add_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
- &kbdev->js_data.ctx_list_pullable[js]);
+ &kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
if (!kctx->slots_pullable) {
kbdev->js_data.nr_contexts_pullable++;
@@ -706,7 +711,7 @@ static bool kbase_js_ctx_list_add_pullable_head_nolock(
list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
list_add(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
- &kbdev->js_data.ctx_list_pullable[js]);
+ &kbdev->js_data.ctx_list_pullable[js][kctx->priority]);
if (!kctx->slots_pullable) {
kbdev->js_data.nr_contexts_pullable++;
@@ -777,7 +782,7 @@ static bool kbase_js_ctx_list_add_unpullable_nolock(struct kbase_device *kbdev,
lockdep_assert_held(&kbdev->hwaccess_lock);
list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
- &kbdev->js_data.ctx_list_unpullable[js]);
+ &kbdev->js_data.ctx_list_unpullable[js][kctx->priority]);
if (kctx->slots_pullable == (1 << js)) {
kbdev->js_data.nr_contexts_pullable--;
@@ -852,19 +857,23 @@ static struct kbase_context *kbase_js_ctx_list_pop_head_nolock(
int js)
{
struct kbase_context *kctx;
+ int i;
lockdep_assert_held(&kbdev->hwaccess_lock);
- if (list_empty(&kbdev->js_data.ctx_list_pullable[js]))
- return NULL;
+ for (i = 0; i < KBASE_JS_ATOM_SCHED_PRIO_COUNT; i++) {
+ if (list_empty(&kbdev->js_data.ctx_list_pullable[js][i]))
+ continue;
- kctx = list_entry(kbdev->js_data.ctx_list_pullable[js].next,
- struct kbase_context,
- jctx.sched_info.ctx.ctx_list_entry[js]);
+ kctx = list_entry(kbdev->js_data.ctx_list_pullable[js][i].next,
+ struct kbase_context,
+ jctx.sched_info.ctx.ctx_list_entry[js]);
- list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
+ list_del_init(&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
- return kctx;
+ return kctx;
+ }
+ return NULL;
}
/**
@@ -1065,6 +1074,51 @@ static bool kbase_js_dep_validate(struct kbase_context *kctx,
return ret;
}
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ int js;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Move kctx to the pullable/upullable list as per the new priority */
+ if (new_priority != kctx->priority) {
+ for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
+ if (kctx->slots_pullable & (1 << js))
+ list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_pullable[js][new_priority]);
+ else
+ list_move_tail(&kctx->jctx.sched_info.ctx.ctx_list_entry[js],
+ &kbdev->js_data.ctx_list_unpullable[js][new_priority]);
+ }
+
+ kctx->priority = new_priority;
+ }
+}
+
+void kbase_js_update_ctx_priority(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ int new_priority = KBASE_JS_ATOM_SCHED_PRIO_LOW;
+ int prio;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (kbdev->js_ctx_scheduling_mode == KBASE_JS_SYSTEM_PRIORITY_MODE) {
+ /* Determine the new priority for context, as per the priority
+ * of currently in-use atoms.
+ */
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ if (kctx->atoms_count[prio]) {
+ new_priority = prio;
+ break;
+ }
+ }
+ }
+
+ kbase_js_set_ctx_priority(kctx, new_priority);
+}
+
bool kbasep_js_add_job(struct kbase_context *kctx,
struct kbase_jd_atom *atom)
{
@@ -1099,6 +1153,9 @@ bool kbasep_js_add_job(struct kbase_context *kctx,
/* Lock for state available during IRQ */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (++kctx->atoms_count[atom->sched_priority] == 1)
+ kbase_js_update_ctx_priority(kctx);
+
if (!kbase_js_dep_validate(kctx, atom)) {
/* Dependencies could not be represented */
--(js_kctx_info->ctx.nr_jobs);
@@ -1107,6 +1164,19 @@ bool kbasep_js_add_job(struct kbase_context *kctx,
* dependencies */
atom->status = KBASE_JD_ATOM_STATE_QUEUED;
+ /* Undo the count, as the atom will get added again later but
+ * leave the context priority adjusted or boosted, in case if
+ * this was the first higher priority atom received for this
+ * context.
+ * This will prevent the scenario of priority inversion, where
+ * another context having medium priority atoms keeps getting
+ * scheduled over this context, which is having both lower and
+ * higher priority atoms, but higher priority atoms are blocked
+ * due to dependency on lower priority atoms. With priority
+ * boost the high priority atom will get to run at earliest.
+ */
+ kctx->atoms_count[atom->sched_priority]--;
+
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&js_devdata->runpool_mutex);
@@ -1114,7 +1184,6 @@ bool kbasep_js_add_job(struct kbase_context *kctx,
}
KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE(atom, TL_ATOM_STATE_READY);
- KBASE_TIMELINE_ATOM_READY(kctx, kbase_jd_atom_id(kctx, atom));
enqueue_required = kbase_js_dep_resolved_submit(kctx, atom);
@@ -1134,7 +1203,8 @@ bool kbasep_js_add_job(struct kbase_context *kctx,
}
/* If this context is active and the atom is the first on its slot,
* kick the job manager to attempt to fast-start the atom */
- if (enqueue_required && kctx == kbdev->hwaccess.active_kctx)
+ if (enqueue_required && kctx ==
+ kbdev->hwaccess.active_kctx[atom->slot_nr])
kbase_jm_try_kick(kbdev, 1 << atom->slot_nr);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -1173,6 +1243,7 @@ void kbasep_js_remove_job(struct kbase_device *kbdev,
struct kbase_context *kctx, struct kbase_jd_atom *atom)
{
struct kbasep_js_kctx_info *js_kctx_info;
+ unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
KBASE_DEBUG_ASSERT(kctx != NULL);
@@ -1186,6 +1257,11 @@ void kbasep_js_remove_job(struct kbase_device *kbdev,
/* De-refcount ctx.nr_jobs */
KBASE_DEBUG_ASSERT(js_kctx_info->ctx.nr_jobs > 0);
--(js_kctx_info->ctx.nr_jobs);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ if (--kctx->atoms_count[atom->sched_priority] == 0)
+ kbase_js_update_ctx_priority(kctx);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
bool kbasep_js_remove_cancelled_job(struct kbase_device *kbdev,
@@ -1256,9 +1332,8 @@ struct kbase_context *kbasep_js_runpool_lookup_ctx(struct kbase_device *kbdev,
}
/**
- * kbasep_js_release_result - Try running more jobs after releasing a context
- * and/or atom
- *
+ * kbasep_js_run_jobs_after_ctx_and_atom_release - Try running more jobs after
+ * releasing a context and/or atom
* @kbdev: The kbase_device to operate on
* @kctx: The kbase_context to operate on
* @katom_retained_state: Retained state from the atom
@@ -1304,12 +1379,15 @@ static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
return result;
}
-/*
- * Internal function to release the reference on a ctx and an atom's "retained
- * state", only taking the runpool and as transaction mutexes
+/**
+ * kbasep_js_runpool_release_ctx_internal - Internal function to release the reference
+ * on a ctx and an atom's "retained state", only
+ * taking the runpool and as transaction mutexes
+ * @kbdev: The kbase_device to operate on
+ * @kctx: The kbase_context to operate on
+ * @katom_retained_state: Retained state from the atom
*
- * This also starts more jobs running in the case of an ctx-attribute state
- * change
+ * This also starts more jobs running in the case of an ctx-attribute state change
*
* This does none of the followup actions for scheduling:
* - It does not schedule in a new context
@@ -1317,11 +1395,15 @@ static kbasep_js_release_result kbasep_js_run_jobs_after_ctx_and_atom_release(
*
* For those tasks, just call kbasep_js_runpool_release_ctx() instead
*
- * Requires:
+ * Has following requirements
* - Context is scheduled in, and kctx->as_nr matches kctx_as_nr
* - Context has a non-zero refcount
* - Caller holds js_kctx_info->ctx.jsctx_mutex
* - Caller holds js_devdata->runpool_mutex
+ *
+ * Return: A bitpattern, containing KBASEP_JS_RELEASE_RESULT_* flags, indicating
+ * the result of releasing a context that whether the caller should try
+ * scheduling a new context or should try scheduling all contexts.
*/
static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
struct kbase_device *kbdev,
@@ -1407,8 +1489,10 @@ static kbasep_js_release_result kbasep_js_runpool_release_ctx_internal(
kbase_backend_release_ctx_irq(kbdev, kctx);
- if (kbdev->hwaccess.active_kctx == kctx)
- kbdev->hwaccess.active_kctx = NULL;
+ for (slot = 0; slot < num_slots; slot++) {
+ if (kbdev->hwaccess.active_kctx[slot] == kctx)
+ kbdev->hwaccess.active_kctx[slot] = NULL;
+ }
/* Ctx Attribute handling
*
@@ -1600,7 +1684,8 @@ void kbase_js_set_timeouts(struct kbase_device *kbdev)
}
static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+ struct kbase_context *kctx,
+ int js)
{
struct kbasep_js_device_data *js_devdata;
struct kbasep_js_kctx_info *js_kctx_info;
@@ -1676,7 +1761,7 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
return false;
}
- kbdev->hwaccess.active_kctx = kctx;
+ kbdev->hwaccess.active_kctx[js] = kctx;
#if defined(CONFIG_MALI_GATOR_SUPPORT)
kbase_trace_mali_mmu_as_in_use(kctx->as_nr);
@@ -1709,6 +1794,8 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
kctx_suspended = true;
}
+ kbase_ctx_flag_clear(kctx, KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+
/* Transaction complete */
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
mutex_unlock(&kbdev->mmu_hw_mutex);
@@ -1733,23 +1820,27 @@ static bool kbasep_js_schedule_ctx(struct kbase_device *kbdev,
}
static bool kbase_js_use_ctx(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+ struct kbase_context *kctx,
+ int js)
{
unsigned long flags;
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
if (kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
- kbase_backend_use_ctx_sched(kbdev, kctx)) {
+ kbase_backend_use_ctx_sched(kbdev, kctx, js)) {
/* Context already has ASID - mark as active */
- kbdev->hwaccess.active_kctx = kctx;
+ if (kbdev->hwaccess.active_kctx[js] != kctx) {
+ kbdev->hwaccess.active_kctx[js] = kctx;
+ kbase_ctx_flag_clear(kctx,
+ KCTX_PULLED_SINCE_ACTIVE_JS0 << js);
+ }
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return true; /* Context already scheduled */
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
-
- return kbasep_js_schedule_ctx(kbdev, kctx);
+ return kbasep_js_schedule_ctx(kbdev, kctx, js);
}
void kbasep_js_schedule_privileged_ctx(struct kbase_device *kbdev,
@@ -1846,7 +1937,7 @@ void kbasep_js_suspend(struct kbase_device *kbdev)
retained = retained << 1;
- if (kctx) {
+ if (kctx && !(kbdev->as_free & (1u << i))) {
kbase_ctx_sched_retain_ctx_refcount(kctx);
retained |= 1u;
/* We can only cope with up to 1 privileged context -
@@ -1880,7 +1971,7 @@ void kbasep_js_suspend(struct kbase_device *kbdev)
void kbasep_js_resume(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata;
- int js;
+ int js, prio;
KBASE_DEBUG_ASSERT(kbdev);
js_devdata = &kbdev->js_data;
@@ -1888,31 +1979,43 @@ void kbasep_js_resume(struct kbase_device *kbdev)
mutex_lock(&js_devdata->queue_mutex);
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
- struct kbase_context *kctx, *n;
-
- list_for_each_entry_safe(kctx, n,
- &kbdev->js_data.ctx_list_unpullable[js],
- jctx.sched_info.ctx.ctx_list_entry[js]) {
- struct kbasep_js_kctx_info *js_kctx_info;
+ for (prio = 0; prio < KBASE_JS_ATOM_SCHED_PRIO_COUNT; prio++) {
+ struct kbase_context *kctx, *n;
unsigned long flags;
- bool timer_sync = false;
- js_kctx_info = &kctx->jctx.sched_info;
-
- mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
- mutex_lock(&js_devdata->runpool_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
- kbase_js_ctx_pullable(kctx, js, false))
- timer_sync =
- kbase_js_ctx_list_add_pullable_nolock(
- kbdev, kctx, js);
+ list_for_each_entry_safe(kctx, n,
+ &kbdev->js_data.ctx_list_unpullable[js][prio],
+ jctx.sched_info.ctx.ctx_list_entry[js]) {
+ struct kbasep_js_kctx_info *js_kctx_info;
+ bool timer_sync = false;
+
+ /* Drop lock so we can take kctx mutexes */
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
+ flags);
+
+ js_kctx_info = &kctx->jctx.sched_info;
+
+ mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
+ mutex_lock(&js_devdata->runpool_mutex);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED) &&
+ kbase_js_ctx_pullable(kctx, js, false))
+ timer_sync =
+ kbase_js_ctx_list_add_pullable_nolock(
+ kbdev, kctx, js);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ if (timer_sync)
+ kbase_backend_ctx_count_changed(kbdev);
+ mutex_unlock(&js_devdata->runpool_mutex);
+ mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
+
+ /* Take lock before accessing list again */
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ }
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- if (timer_sync)
- kbase_backend_ctx_count_changed(kbdev);
- mutex_unlock(&js_devdata->runpool_mutex);
- mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
}
}
mutex_unlock(&js_devdata->queue_mutex);
@@ -2119,6 +2222,7 @@ struct kbase_jd_atom *kbase_js_pull(struct kbase_context *kctx, int js)
}
kbase_ctx_flag_set(kctx, KCTX_PULLED);
+ kbase_ctx_flag_set(kctx, (KCTX_PULLED_SINCE_ACTIVE_JS0 << js));
pulled = atomic_inc_return(&kctx->atoms_pulled);
if (pulled == 1 && !kctx->slots_pullable) {
@@ -2155,7 +2259,6 @@ static void js_return_worker(struct work_struct *data)
bool context_idle = false;
unsigned long flags;
base_jd_core_req core_req = katom->core_req;
- u64 affinity = katom->affinity;
enum kbase_atom_coreref_state coreref_state = katom->coreref_state;
KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(katom);
@@ -2246,8 +2349,7 @@ static void js_return_worker(struct work_struct *data)
kbase_js_sched_all(kbdev);
- kbase_backend_complete_wq_post_sched(kbdev, core_req, affinity,
- coreref_state);
+ kbase_backend_complete_wq_post_sched(kbdev, core_req, coreref_state);
}
void kbase_js_unpull(struct kbase_context *kctx, struct kbase_jd_atom *katom)
@@ -2420,20 +2522,22 @@ struct kbase_jd_atom *kbase_js_complete_atom(struct kbase_jd_atom *katom,
void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
{
struct kbasep_js_device_data *js_devdata;
- struct kbase_context *last_active;
+ struct kbase_context *last_active[BASE_JM_MAX_NR_SLOTS];
bool timer_sync = false;
- bool ctx_waiting = false;
+ bool ctx_waiting[BASE_JM_MAX_NR_SLOTS];
+ int js;
js_devdata = &kbdev->js_data;
down(&js_devdata->schedule_sem);
mutex_lock(&js_devdata->queue_mutex);
- last_active = kbdev->hwaccess.active_kctx;
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ last_active[js] = kbdev->hwaccess.active_kctx[js];
+ ctx_waiting[js] = false;
+ }
while (js_mask) {
- int js;
-
js = ffs(js_mask) - 1;
while (1) {
@@ -2470,7 +2574,7 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
kbase_ctx_flag_set(kctx, KCTX_ACTIVE);
}
- if (!kbase_js_use_ctx(kbdev, kctx)) {
+ if (!kbase_js_use_ctx(kbdev, kctx, js)) {
mutex_lock(
&kctx->jctx.sched_info.ctx.jsctx_mutex);
/* Context can not be used at this time */
@@ -2515,7 +2619,10 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
* Unless this context is already 'active', in
* which case it's effectively already scheduled
* so push it to the back of the list. */
- if (pullable && kctx == last_active)
+ if (pullable && kctx == last_active[js] &&
+ kbase_ctx_flag(kctx,
+ (KCTX_PULLED_SINCE_ACTIVE_JS0 <<
+ js)))
timer_sync |=
kbase_js_ctx_list_add_pullable_nolock(
kctx->kbdev,
@@ -2537,10 +2644,10 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
* marker to prevent it from submitting atoms in
* the IRQ handler, which would prevent this
* context from making progress. */
- if (last_active && kctx != last_active &&
- kbase_js_ctx_pullable(
- last_active, js, true))
- ctx_waiting = true;
+ if (last_active[js] && kctx != last_active[js]
+ && kbase_js_ctx_pullable(
+ last_active[js], js, true))
+ ctx_waiting[js] = true;
if (context_idle) {
kbase_jm_idle_ctx(kbdev, kctx);
@@ -2580,8 +2687,11 @@ void kbase_js_sched(struct kbase_device *kbdev, int js_mask)
if (timer_sync)
kbase_js_sync_timers(kbdev);
- if (kbdev->hwaccess.active_kctx == last_active && ctx_waiting)
- kbdev->hwaccess.active_kctx = NULL;
+ for (js = 0; js < BASE_JM_MAX_NR_SLOTS; js++) {
+ if (kbdev->hwaccess.active_kctx[js] == last_active[js] &&
+ ctx_waiting[js])
+ kbdev->hwaccess.active_kctx[js] = NULL;
+ }
mutex_unlock(&js_devdata->queue_mutex);
up(&js_devdata->schedule_sem);
@@ -2647,12 +2757,16 @@ void kbase_js_zap_context(struct kbase_context *kctx)
* handled when it leaves the runpool.
*/
if (!kbase_ctx_flag(kctx, KCTX_SCHEDULED)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
if (!list_empty(
&kctx->jctx.sched_info.ctx.ctx_list_entry[js]))
list_del_init(
&kctx->jctx.sched_info.ctx.ctx_list_entry[js]);
}
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* The following events require us to kill off remaining jobs
* and update PM book-keeping:
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js.h b/drivers/gpu/arm/midgard/mali_kbase_js.h
index aa930b9d83f67c..355da27edc1b90 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_js.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_js.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -596,6 +596,27 @@ bool kbase_js_is_atom_valid(struct kbase_device *kbdev,
*/
void kbase_js_set_timeouts(struct kbase_device *kbdev);
+/**
+ * kbase_js_set_ctx_priority - set the context priority
+ * @kctx: Context pointer
+ * @new_priority: New priority value for the Context
+ *
+ * The context priority is set to a new value and it is moved to the
+ * pullable/unpullable list as per the new priority.
+ */
+void kbase_js_set_ctx_priority(struct kbase_context *kctx, int new_priority);
+
+
+/**
+ * kbase_js_update_ctx_priority - update the context priority
+ * @kctx: Context pointer
+ *
+ * The context priority gets updated as per the priority of atoms currently in
+ * use for that context, but only if system priority mode for context scheduling
+ * is being used.
+ */
+void kbase_js_update_ctx_priority(struct kbase_context *kctx);
+
/*
* Helpers follow
*/
@@ -641,7 +662,8 @@ static inline void kbasep_js_set_submit_allowed(struct kbasep_js_device_data *js
set_bit = (u16) (1u << kctx->as_nr);
- dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+ dev_dbg(kctx->kbdev->dev, "JS: Setting Submit Allowed on %p (as=%d)",
+ kctx, kctx->as_nr);
js_devdata->runpool_irq.submit_allowed |= set_bit;
}
@@ -666,7 +688,8 @@ static inline void kbasep_js_clear_submit_allowed(struct kbasep_js_device_data *
clear_bit = (u16) (1u << kctx->as_nr);
clear_mask = ~clear_bit;
- dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)", kctx, kctx->as_nr);
+ dev_dbg(kctx->kbdev->dev, "JS: Clearing Submit Allowed on %p (as=%d)",
+ kctx, kctx->as_nr);
js_devdata->runpool_irq.submit_allowed &= clear_mask;
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_js_defs.h b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
index a54b6f3d47357f..7385daa42e94ff 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_js_defs.h
@@ -146,6 +146,48 @@ enum {
/** Combination of KBASE_JS_ATOM_DONE_<...> bits */
typedef u32 kbasep_js_atom_done_code;
+/*
+ * Context scheduling mode defines for kbase_device::js_ctx_scheduling_mode
+ */
+enum {
+ /*
+ * In this mode, the context containing higher priority atoms will be
+ * scheduled first and also the new runnable higher priority atoms can
+ * preempt lower priority atoms currently running on the GPU, even if
+ * they belong to a different context.
+ */
+ KBASE_JS_SYSTEM_PRIORITY_MODE = 0,
+
+ /*
+ * In this mode, the contexts are scheduled in round-robin fashion and
+ * the new runnable higher priority atoms can preempt the lower priority
+ * atoms currently running on the GPU, only if they belong to the same
+ * context.
+ */
+ KBASE_JS_PROCESS_LOCAL_PRIORITY_MODE,
+
+ /* Must be the last in the enum */
+ KBASE_JS_PRIORITY_MODE_COUNT,
+};
+
+/*
+ * Internal atom priority defines for kbase_jd_atom::sched_prio
+ */
+enum {
+ KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
+ KBASE_JS_ATOM_SCHED_PRIO_MED,
+ KBASE_JS_ATOM_SCHED_PRIO_LOW,
+ KBASE_JS_ATOM_SCHED_PRIO_COUNT,
+};
+
+/* Invalid priority for kbase_jd_atom::sched_prio */
+#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
+
+/* Default priority in the case of contexts with no atoms, or being lenient
+ * about invalid priorities from userspace.
+ */
+#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
+
/**
* @brief KBase Device Data Job Scheduler sub-structure
*
@@ -229,12 +271,12 @@ struct kbasep_js_device_data {
/**
* List of contexts that can currently be pulled from
*/
- struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS];
+ struct list_head ctx_list_pullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
/**
* List of contexts that can not currently be pulled from, but have
* jobs currently running.
*/
- struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS];
+ struct list_head ctx_list_unpullable[BASE_JM_MAX_NR_SLOTS][KBASE_JS_ATOM_SCHED_PRIO_COUNT];
/** Number of currently scheduled user contexts (excluding ones that are not submitting jobs) */
s8 nr_user_contexts_running;
@@ -365,22 +407,6 @@ struct kbasep_js_atom_retained_state {
*/
#define KBASEP_JS_TICK_RESOLUTION_US 1
-/*
- * Internal atom priority defines for kbase_jd_atom::sched_prio
- */
-enum {
- KBASE_JS_ATOM_SCHED_PRIO_HIGH = 0,
- KBASE_JS_ATOM_SCHED_PRIO_MED,
- KBASE_JS_ATOM_SCHED_PRIO_LOW,
- KBASE_JS_ATOM_SCHED_PRIO_COUNT,
-};
-
-/* Invalid priority for kbase_jd_atom::sched_prio */
-#define KBASE_JS_ATOM_SCHED_PRIO_INVALID -1
-
-/* Default priority in the case of contexts with no atoms, or being lenient
- * about invalid priorities from userspace */
-#define KBASE_JS_ATOM_SCHED_PRIO_DEFAULT KBASE_JS_ATOM_SCHED_PRIO_MED
/** @} *//* end group kbase_js */
/** @} *//* end group base_kbase_api */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.c b/drivers/gpu/arm/midgard/mali_kbase_mem.c
index a0897fb1862ac7..ea7cba2112b43c 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -29,9 +29,6 @@
#ifdef CONFIG_DMA_SHARED_BUFFER
#include <linux/dma-buf.h>
#endif /* CONFIG_DMA_SHARED_BUFFER */
-#ifdef CONFIG_UMP
-#include <linux/ump.h>
-#endif /* CONFIG_UMP */
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compat.h>
@@ -45,29 +42,34 @@
#include <mali_kbase_hw.h>
#include <mali_kbase_tlstream.h>
-/* This function finds out which RB tree the given GPU VA region belongs to
- * based on the region zone */
-static struct rb_root *kbase_reg_flags_to_rbtree(struct kbase_context *kctx,
- struct kbase_va_region *reg)
+/* Forward declarations */
+static void free_partial_locked(struct kbase_context *kctx,
+ struct kbase_mem_pool *pool, struct tagged_addr tp);
+
+static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx)
{
- struct rb_root *rbtree = NULL;
+#if defined(CONFIG_ARM64)
+ /* VA_BITS can be as high as 48 bits, but all bits are available for
+ * both user and kernel.
+ */
+ size_t cpu_va_bits = VA_BITS;
+#elif defined(CONFIG_X86_64)
+ /* x86_64 can access 48 bits of VA, but the 48th is used to denote
+ * kernel (1) vs userspace (0), so the max here is 47.
+ */
+ size_t cpu_va_bits = 47;
+#elif defined(CONFIG_ARM) || defined(CONFIG_X86_32)
+ size_t cpu_va_bits = sizeof(void *) * BITS_PER_BYTE;
+#else
+#error "Unknown CPU VA width for this architecture"
+#endif
- switch (reg->flags & KBASE_REG_ZONE_MASK) {
- case KBASE_REG_ZONE_CUSTOM_VA:
- rbtree = &kctx->reg_rbtree_custom;
- break;
- case KBASE_REG_ZONE_EXEC:
- rbtree = &kctx->reg_rbtree_exec;
- break;
- case KBASE_REG_ZONE_SAME_VA:
- rbtree = &kctx->reg_rbtree_same;
- /* fall through */
- default:
- rbtree = &kctx->reg_rbtree_same;
- break;
- }
+#ifdef CONFIG_64BIT
+ if (kbase_ctx_flag(kctx, KCTX_COMPAT))
+ cpu_va_bits = 32;
+#endif
- return rbtree;
+ return cpu_va_bits;
}
/* This function finds out which RB tree the given pfn from the GPU VA belongs
@@ -82,8 +84,6 @@ static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
#endif /* CONFIG_64BIT */
if (gpu_pfn >= KBASE_REG_ZONE_CUSTOM_VA_BASE)
rbtree = &kctx->reg_rbtree_custom;
- else if (gpu_pfn >= KBASE_REG_ZONE_EXEC_BASE)
- rbtree = &kctx->reg_rbtree_exec;
else
rbtree = &kctx->reg_rbtree_same;
#ifdef CONFIG_64BIT
@@ -99,15 +99,14 @@ static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
}
/* This function inserts a region into the tree. */
-static void kbase_region_tracker_insert(struct kbase_context *kctx,
- struct kbase_va_region *new_reg)
+static void kbase_region_tracker_insert(struct kbase_va_region *new_reg)
{
u64 start_pfn = new_reg->start_pfn;
struct rb_node **link = NULL;
struct rb_node *parent = NULL;
struct rb_root *rbtree = NULL;
- rbtree = kbase_reg_flags_to_rbtree(kctx, new_reg);
+ rbtree = new_reg->rbtree;
link = &(rbtree->rb_node);
/* Find the right place in the tree using tree search */
@@ -132,18 +131,13 @@ static void kbase_region_tracker_insert(struct kbase_context *kctx,
rb_insert_color(&(new_reg->rblink), rbtree);
}
-/* Find allocated region enclosing free range. */
-static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(
- struct kbase_context *kctx, u64 start_pfn, size_t nr_pages)
+static struct kbase_va_region *find_region_enclosing_range_rbtree(
+ struct rb_root *rbtree, u64 start_pfn, size_t nr_pages)
{
- struct rb_node *rbnode = NULL;
- struct kbase_va_region *reg = NULL;
- struct rb_root *rbtree = NULL;
-
+ struct rb_node *rbnode;
+ struct kbase_va_region *reg;
u64 end_pfn = start_pfn + nr_pages;
- rbtree = kbase_gpu_va_to_rbtree(kctx, start_pfn);
-
rbnode = rbtree->rb_node;
while (rbnode) {
@@ -166,19 +160,12 @@ static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_
return NULL;
}
-/* Find region enclosing given address. */
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr)
+struct kbase_va_region *kbase_find_region_enclosing_address(
+ struct rb_root *rbtree, u64 gpu_addr)
{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
struct rb_node *rbnode;
struct kbase_va_region *reg;
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_root *rbtree = NULL;
-
- KBASE_DEBUG_ASSERT(NULL != kctx);
-
- lockdep_assert_held(&kctx->reg_lock);
-
- rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
rbnode = rbtree->rb_node;
@@ -202,14 +189,11 @@ struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struc
return NULL;
}
-KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
-
-/* Find region with given base address */
-struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr)
+/* Find region enclosing given address. */
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+ struct kbase_context *kctx, u64 gpu_addr)
{
u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_node *rbnode = NULL;
- struct kbase_va_region *reg = NULL;
struct rb_root *rbtree = NULL;
KBASE_DEBUG_ASSERT(NULL != kctx);
@@ -218,6 +202,18 @@ struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kba
rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+ return kbase_find_region_enclosing_address(rbtree, gpu_addr);
+}
+
+KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
+
+struct kbase_va_region *kbase_find_region_base_address(
+ struct rb_root *rbtree, u64 gpu_addr)
+{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_node *rbnode = NULL;
+ struct kbase_va_region *reg = NULL;
+
rbnode = rbtree->rb_node;
while (rbnode) {
@@ -234,11 +230,25 @@ struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kba
return NULL;
}
+/* Find region with given base address */
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+ struct kbase_context *kctx, u64 gpu_addr)
+{
+ u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
+ struct rb_root *rbtree = NULL;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ rbtree = kbase_gpu_va_to_rbtree(kctx, gpu_pfn);
+
+ return kbase_find_region_base_address(rbtree, gpu_addr);
+}
+
KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
/* Find region meeting given requirements */
static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
- struct kbase_context *kctx, struct kbase_va_region *reg_reqs,
+ struct kbase_va_region *reg_reqs,
size_t nr_pages, size_t align_offset, size_t align_mask,
u64 *out_start_pfn)
{
@@ -248,11 +258,9 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
/* Note that this search is a linear search, as we do not have a target
address in mind, so does not benefit from the rbtree search */
- rbtree = kbase_reg_flags_to_rbtree(kctx, reg_reqs);
-
- rbnode = rb_first(rbtree);
+ rbtree = reg_reqs->rbtree;
- while (rbnode) {
+ for (rbnode = rb_first(rbtree); rbnode; rbnode = rb_next(rbnode)) {
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
if ((reg->nr_pages >= nr_pages) &&
(reg->flags & KBASE_REG_FREE)) {
@@ -268,6 +276,27 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
start_pfn += align_mask;
start_pfn -= (start_pfn - align_offset) & (align_mask);
+ if (!(reg_reqs->flags & KBASE_REG_GPU_NX)) {
+ /* Can't end at 4GB boundary */
+ if (0 == ((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB))
+ start_pfn += align_offset;
+
+ /* Can't start at 4GB boundary */
+ if (0 == (start_pfn & BASE_MEM_PFN_MASK_4GB))
+ start_pfn += align_offset;
+
+ if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) ||
+ !(start_pfn & BASE_MEM_PFN_MASK_4GB))
+ continue;
+ } else if (reg_reqs->flags &
+ KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+ u64 end_pfn = start_pfn + nr_pages - 1;
+
+ if ((start_pfn & ~BASE_MEM_PFN_MASK_4GB) !=
+ (end_pfn & ~BASE_MEM_PFN_MASK_4GB))
+ start_pfn = end_pfn & ~BASE_MEM_PFN_MASK_4GB;
+ }
+
if ((start_pfn >= reg->start_pfn) &&
(start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) {
@@ -275,7 +304,6 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
return reg;
}
}
- rbnode = rb_next(rbnode);
}
return NULL;
@@ -289,7 +317,7 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
* region lock held. The associated memory is not released (see
* kbase_free_alloced_region). Internal use only.
*/
-static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg)
+int kbase_remove_va_region(struct kbase_va_region *reg)
{
struct rb_node *rbprev;
struct kbase_va_region *prev = NULL;
@@ -301,7 +329,7 @@ static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_re
int merged_back = 0;
int err = 0;
- reg_rbtree = kbase_reg_flags_to_rbtree(kctx, reg);
+ reg_rbtree = reg->rbtree;
/* Try to merge with the previous block first */
rbprev = rb_prev(&(reg->rblink));
@@ -334,7 +362,7 @@ static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_re
merged_back = 1;
if (merged_front) {
/* We already merged with prev, free it */
- kbase_free_alloced_region(reg);
+ kfree(reg);
}
}
}
@@ -347,7 +375,9 @@ static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_re
*/
struct kbase_va_region *free_reg;
- free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK);
+ free_reg = kbase_alloc_free_region(reg_rbtree,
+ reg->start_pfn, reg->nr_pages,
+ reg->flags & KBASE_REG_ZONE_MASK);
if (!free_reg) {
err = -ENOMEM;
goto out;
@@ -362,14 +392,21 @@ static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_re
KBASE_EXPORT_TEST_API(kbase_remove_va_region);
/**
- * @brief Insert a VA region to the list, replacing the current at_reg.
+ * kbase_insert_va_region_nolock - Insert a VA region to the list,
+ * replacing the existing one.
+ *
+ * @new_reg: The new region to insert
+ * @at_reg: The region to replace
+ * @start_pfn: The Page Frame Number to insert at
+ * @nr_pages: The number of pages of the region
*/
-static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
+static int kbase_insert_va_region_nolock(struct kbase_va_region *new_reg,
+ struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
{
struct rb_root *reg_rbtree = NULL;
int err = 0;
- reg_rbtree = kbase_reg_flags_to_rbtree(kctx, at_reg);
+ reg_rbtree = at_reg->rbtree;
/* Must be a free region */
KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
@@ -385,7 +422,7 @@ static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbas
if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
reg_rbtree);
- kbase_free_alloced_region(at_reg);
+ kfree(at_reg);
}
/* New region replaces the start of the old one, so insert before. */
else if (at_reg->start_pfn == start_pfn) {
@@ -393,19 +430,19 @@ static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbas
KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
at_reg->nr_pages -= nr_pages;
- kbase_region_tracker_insert(kctx, new_reg);
+ kbase_region_tracker_insert(new_reg);
}
/* New region replaces the end of the old one, so insert after. */
else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
at_reg->nr_pages -= nr_pages;
- kbase_region_tracker_insert(kctx, new_reg);
+ kbase_region_tracker_insert(new_reg);
}
/* New region splits the old one, so insert and create new */
else {
struct kbase_va_region *new_front_reg;
- new_front_reg = kbase_alloc_free_region(kctx,
+ new_front_reg = kbase_alloc_free_region(reg_rbtree,
at_reg->start_pfn,
start_pfn - at_reg->start_pfn,
at_reg->flags & KBASE_REG_ZONE_MASK);
@@ -414,8 +451,8 @@ static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbas
at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
at_reg->start_pfn = start_pfn + nr_pages;
- kbase_region_tracker_insert(kctx, new_front_reg);
- kbase_region_tracker_insert(kctx, new_reg);
+ kbase_region_tracker_insert(new_front_reg);
+ kbase_region_tracker_insert(new_reg);
} else {
err = -ENOMEM;
}
@@ -425,21 +462,84 @@ static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbas
}
/**
- * @brief Add a VA region to the list.
+ * kbase_add_va_region - Add a VA region to the region list for a context.
+ *
+ * @kctx: kbase context containing the region
+ * @reg: the region to add
+ * @addr: the address to insert the region at
+ * @nr_pages: the number of pages in the region
+ * @align: the minimum alignment in pages
*/
int kbase_add_va_region(struct kbase_context *kctx,
struct kbase_va_region *reg, u64 addr,
size_t nr_pages, size_t align)
{
- struct kbase_va_region *tmp;
- u64 gpu_pfn = addr >> PAGE_SHIFT;
int err = 0;
+ struct kbase_device *kbdev = kctx->kbdev;
+ int cpu_va_bits = kbase_get_num_cpu_va_bits(kctx);
+ int gpu_pc_bits =
+ kbdev->gpu_props.props.core_props.log2_program_counter_size;
KBASE_DEBUG_ASSERT(NULL != kctx);
KBASE_DEBUG_ASSERT(NULL != reg);
lockdep_assert_held(&kctx->reg_lock);
+ /* The executable allocation from the SAME_VA zone would already have an
+ * appropriately aligned GPU VA chosen for it.
+ */
+ if (!(reg->flags & KBASE_REG_GPU_NX) && !addr) {
+ if (cpu_va_bits > gpu_pc_bits) {
+ align = max(align, (size_t)((1ULL << gpu_pc_bits)
+ >> PAGE_SHIFT));
+ }
+ }
+
+ do {
+ err = kbase_add_va_region_rbtree(kbdev, reg, addr, nr_pages,
+ align);
+ if (err != -ENOMEM)
+ break;
+
+ /*
+ * If the allocation is not from the same zone as JIT
+ * then don't retry, we're out of VA and there is
+ * nothing which can be done about it.
+ */
+ if ((reg->flags & KBASE_REG_ZONE_MASK) !=
+ KBASE_REG_ZONE_CUSTOM_VA)
+ break;
+ } while (kbase_jit_evict(kctx));
+
+ return err;
+}
+
+KBASE_EXPORT_TEST_API(kbase_add_va_region);
+
+/**
+ * kbase_add_va_region_rbtree - Insert a region into its corresponding rbtree
+ *
+ * Insert a region into the rbtree that was specified when the region was
+ * created. If addr is 0 a free area in the rbtree is used, otherwise the
+ * specified address is used.
+ *
+ * @kbdev: The kbase device
+ * @reg: The region to add
+ * @addr: The address to add the region at, or 0 to map at any available address
+ * @nr_pages: The size of the region in pages
+ * @align: The minimum alignment in pages
+ */
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+ struct kbase_va_region *reg,
+ u64 addr, size_t nr_pages, size_t align)
+{
+ struct rb_root *rbtree = NULL;
+ struct kbase_va_region *tmp;
+ u64 gpu_pfn = addr >> PAGE_SHIFT;
+ int err = 0;
+
+ rbtree = reg->rbtree;
+
if (!align)
align = 1;
@@ -447,102 +547,81 @@ int kbase_add_va_region(struct kbase_context *kctx,
KBASE_DEBUG_ASSERT(is_power_of_2(align));
KBASE_DEBUG_ASSERT(nr_pages > 0);
- /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
+ /* Path 1: Map a specific address. Find the enclosing region,
+ * which *must* be free.
+ */
if (gpu_pfn) {
- struct device *dev = kctx->kbdev->dev;
+ struct device *dev = kbdev->dev;
KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
- tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
- if (!tmp) {
- dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
+ tmp = find_region_enclosing_range_rbtree(rbtree, gpu_pfn,
+ nr_pages);
+ if (kbase_is_region_invalid(tmp)) {
+ dev_warn(dev, "Enclosing region not found or invalid: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
err = -ENOMEM;
goto exit;
- }
- if (!(tmp->flags & KBASE_REG_FREE)) {
- dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
- dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
- dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align);
+ } else if (!kbase_is_region_free(tmp)) {
+ dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n",
+ tmp->start_pfn, tmp->flags,
+ tmp->nr_pages, gpu_pfn, nr_pages);
err = -ENOMEM;
goto exit;
}
- err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages);
+ err = kbase_insert_va_region_nolock(reg, tmp, gpu_pfn,
+ nr_pages);
if (err) {
dev_warn(dev, "Failed to insert va region");
err = -ENOMEM;
- goto exit;
}
-
- goto exit;
- }
-
- /* Path 2: Map any free address which meets the requirements.
- *
- * Depending on the zone the allocation request is for
- * we might need to retry it. */
- do {
+ } else {
+ /* Path 2: Map any free address which meets the requirements. */
u64 start_pfn;
size_t align_offset = align;
size_t align_mask = align - 1;
if ((reg->flags & KBASE_REG_TILER_ALIGN_TOP)) {
- WARN(align > 1,
- "kbase_add_va_region with align %lx might not be honored for KBASE_REG_TILER_ALIGN_TOP memory",
+ WARN(align > 1, "%s with align %lx might not be honored for KBASE_REG_TILER_ALIGN_TOP memory",
+ __func__,
(unsigned long)align);
align_mask = reg->extent - 1;
align_offset = reg->extent - reg->initial_commit;
}
- tmp = kbase_region_tracker_find_region_meeting_reqs(kctx, reg,
+ tmp = kbase_region_tracker_find_region_meeting_reqs(reg,
nr_pages, align_offset, align_mask,
&start_pfn);
if (tmp) {
- err = kbase_insert_va_region_nolock(kctx, reg, tmp,
- start_pfn, nr_pages);
- break;
+ err = kbase_insert_va_region_nolock(reg, tmp,
+ start_pfn, nr_pages);
+ } else {
+ err = -ENOMEM;
}
+ }
- /*
- * If the allocation is not from the same zone as JIT
- * then don't retry, we're out of VA and there is
- * nothing which can be done about it.
- */
- if ((reg->flags & KBASE_REG_ZONE_MASK) !=
- KBASE_REG_ZONE_CUSTOM_VA)
- break;
- } while (kbase_jit_evict(kctx));
-
- if (!tmp)
- err = -ENOMEM;
-
- exit:
+exit:
return err;
}
-KBASE_EXPORT_TEST_API(kbase_add_va_region);
-
/**
* @brief Initialize the internal region tracker data structure.
*/
static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
struct kbase_va_region *same_va_reg,
- struct kbase_va_region *exec_reg,
struct kbase_va_region *custom_va_reg)
{
kctx->reg_rbtree_same = RB_ROOT;
- kbase_region_tracker_insert(kctx, same_va_reg);
+ kbase_region_tracker_insert(same_va_reg);
- /* Although exec and custom_va_reg don't always exist,
+ /* Although custom_va_reg doesn't always exist,
* initialize unconditionally because of the mem_view debugfs
- * implementation which relies on these being empty */
- kctx->reg_rbtree_exec = RB_ROOT;
+ * implementation which relies on this being empty.
+ */
kctx->reg_rbtree_custom = RB_ROOT;
- if (exec_reg)
- kbase_region_tracker_insert(kctx, exec_reg);
if (custom_va_reg)
- kbase_region_tracker_insert(kctx, custom_va_reg);
+ kbase_region_tracker_insert(custom_va_reg);
}
static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
@@ -555,6 +634,16 @@ static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
if (rbnode) {
rb_erase(rbnode, rbtree);
reg = rb_entry(rbnode, struct kbase_va_region, rblink);
+ WARN_ON(reg->va_refcnt != 1);
+ /* Reset the start_pfn - as the rbtree is being
+ * destroyed and we've already erased this region, there
+ * is no further need to attempt to remove it.
+ * This won't affect the cleanup if the region was
+ * being used as a sticky resource as the cleanup
+ * related to sticky resources anyways need to be
+ * performed before the term of region tracker.
+ */
+ reg->start_pfn = 0;
kbase_free_alloced_region(reg);
}
} while (rbnode);
@@ -563,19 +652,28 @@ static void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
void kbase_region_tracker_term(struct kbase_context *kctx)
{
kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_same);
- kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_exec);
kbase_region_tracker_erase_rbtree(&kctx->reg_rbtree_custom);
}
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree)
+{
+ kbase_region_tracker_erase_rbtree(rbtree);
+}
+
+static size_t kbase_get_same_va_bits(struct kbase_context *kctx)
+{
+ return min(kbase_get_num_cpu_va_bits(kctx),
+ (size_t) kctx->kbdev->gpu_props.mmu.va_bits);
+}
+
/**
* Initialize the region tracker data structure.
*/
int kbase_region_tracker_init(struct kbase_context *kctx)
{
struct kbase_va_region *same_va_reg;
- struct kbase_va_region *exec_reg = NULL;
struct kbase_va_region *custom_va_reg = NULL;
- size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE;
+ size_t same_va_bits = kbase_get_same_va_bits(kctx);
u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
u64 same_va_pages;
@@ -584,29 +682,9 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
/* Take the lock as kbase_free_alloced_region requires it */
kbase_gpu_vm_lock(kctx);
-#if defined(CONFIG_ARM64)
- same_va_bits = VA_BITS;
-#elif defined(CONFIG_X86_64)
- same_va_bits = 47;
-#elif defined(CONFIG_64BIT)
-#error Unsupported 64-bit architecture
-#endif
-
-#ifdef CONFIG_64BIT
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- same_va_bits = 32;
- else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
- same_va_bits = 33;
-#endif
-
- if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) {
- err = -EINVAL;
- goto fail_unlock;
- }
-
same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
/* all have SAME_VA */
- same_va_reg = kbase_alloc_free_region(kctx, 1,
+ same_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 1,
same_va_pages,
KBASE_REG_ZONE_SAME_VA);
@@ -616,7 +694,7 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
}
#ifdef CONFIG_64BIT
- /* 32-bit clients have exec and custom VA zones */
+ /* 32-bit clients have custom VA zones */
if (kbase_ctx_flag(kctx, KCTX_COMPAT)) {
#endif
if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
@@ -630,37 +708,27 @@ int kbase_region_tracker_init(struct kbase_context *kctx)
if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
- exec_reg = kbase_alloc_free_region(kctx,
- KBASE_REG_ZONE_EXEC_BASE,
- KBASE_REG_ZONE_EXEC_SIZE,
- KBASE_REG_ZONE_EXEC);
-
- if (!exec_reg) {
- err = -ENOMEM;
- goto fail_free_same_va;
- }
-
- custom_va_reg = kbase_alloc_free_region(kctx,
+ custom_va_reg = kbase_alloc_free_region(
+ &kctx->reg_rbtree_custom,
KBASE_REG_ZONE_CUSTOM_VA_BASE,
custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
if (!custom_va_reg) {
err = -ENOMEM;
- goto fail_free_exec;
+ goto fail_free_same_va;
}
#ifdef CONFIG_64BIT
}
#endif
- kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg);
+ kbase_region_tracker_ds_init(kctx, same_va_reg, custom_va_reg);
kctx->same_va_end = same_va_pages + 1;
+
kbase_gpu_vm_unlock(kctx);
return 0;
-fail_free_exec:
- kbase_free_alloced_region(exec_reg);
fail_free_same_va:
kbase_free_alloced_region(same_va_reg);
fail_unlock:
@@ -668,33 +736,16 @@ fail_unlock:
return err;
}
-int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
-{
#ifdef CONFIG_64BIT
+static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
+ u64 jit_va_pages)
+{
struct kbase_va_region *same_va;
struct kbase_va_region *custom_va_reg;
- u64 same_va_bits;
+ u64 same_va_bits = kbase_get_same_va_bits(kctx);
u64 total_va_size;
int err;
- /*
- * Nothing to do for 32-bit clients, JIT uses the existing
- * custom VA zone.
- */
- if (kbase_ctx_flag(kctx, KCTX_COMPAT))
- return 0;
-
-#if defined(CONFIG_ARM64)
- same_va_bits = VA_BITS;
-#elif defined(CONFIG_X86_64)
- same_va_bits = 47;
-#elif defined(CONFIG_64BIT)
-#error Unsupported 64-bit architecture
-#endif
-
- if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
- same_va_bits = 33;
-
total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
kbase_gpu_vm_lock(kctx);
@@ -732,7 +783,7 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
* Create a custom VA zone at the end of the VA for allocations which
* JIT can use so it doesn't have to allocate VA from the kernel.
*/
- custom_va_reg = kbase_alloc_free_region(kctx,
+ custom_va_reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
kctx->same_va_end,
jit_va_pages,
KBASE_REG_ZONE_CUSTOM_VA);
@@ -746,7 +797,7 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
goto fail_unlock;
}
- kbase_region_tracker_insert(kctx, custom_va_reg);
+ kbase_region_tracker_insert(custom_va_reg);
kbase_gpu_vm_unlock(kctx);
return 0;
@@ -754,11 +805,30 @@ int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
fail_unlock:
kbase_gpu_vm_unlock(kctx);
return err;
-#else
- return 0;
+}
+#endif
+
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+ u8 max_allocations, u8 trim_level)
+{
+ if (trim_level > 100)
+ return -EINVAL;
+
+ kctx->jit_max_allocations = max_allocations;
+ kctx->trim_level = trim_level;
+
+#ifdef CONFIG_64BIT
+ if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+ return kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
#endif
+ /*
+ * Nothing to do for 32-bit clients, JIT uses the existing
+ * custom VA zone.
+ */
+ return 0;
}
+
int kbase_mem_init(struct kbase_device *kbdev)
{
struct kbasep_mem_device *memdev;
@@ -824,14 +894,15 @@ KBASE_EXPORT_TEST_API(kbase_mem_term);
* The allocated object is not part of any list yet, and is flagged as
* KBASE_REG_FREE. No mapping is allocated yet.
*
- * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC
+ * zone is KBASE_REG_ZONE_CUSTOM_VA or KBASE_REG_ZONE_SAME_VA.
*
*/
-struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+ u64 start_pfn, size_t nr_pages, int zone)
{
struct kbase_va_region *new_reg;
- KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(rbtree != NULL);
/* zone argument should only contain zone related region flags */
KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
@@ -844,9 +915,10 @@ struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64
if (!new_reg)
return NULL;
+ new_reg->va_refcnt = 1;
new_reg->cpu_alloc = NULL; /* no alloc bound yet */
new_reg->gpu_alloc = NULL; /* no alloc bound yet */
- new_reg->kctx = kctx;
+ new_reg->rbtree = rbtree;
new_reg->flags = zone | KBASE_REG_FREE;
new_reg->flags |= KBASE_REG_GROWABLE;
@@ -861,6 +933,29 @@ struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64
KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
+static struct kbase_context *kbase_reg_flags_to_kctx(
+ struct kbase_va_region *reg)
+{
+ struct kbase_context *kctx = NULL;
+ struct rb_root *rbtree = reg->rbtree;
+
+ switch (reg->flags & KBASE_REG_ZONE_MASK) {
+ case KBASE_REG_ZONE_CUSTOM_VA:
+ kctx = container_of(rbtree, struct kbase_context,
+ reg_rbtree_custom);
+ break;
+ case KBASE_REG_ZONE_SAME_VA:
+ kctx = container_of(rbtree, struct kbase_context,
+ reg_rbtree_same);
+ break;
+ default:
+ WARN(1, "Unknown zone in region: flags=0x%lx\n", reg->flags);
+ break;
+ }
+
+ return kctx;
+}
+
/**
* @brief Free a region object.
*
@@ -874,6 +969,16 @@ KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
void kbase_free_alloced_region(struct kbase_va_region *reg)
{
if (!(reg->flags & KBASE_REG_FREE)) {
+ struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+ if (WARN_ON(!kctx))
+ return;
+
+ if (WARN_ON(kbase_is_region_invalid(reg)))
+ return;
+
+ mutex_lock(&kctx->jit_evict_lock);
+
/*
* The physical allocation should have been removed from the
* eviction list before this function is called. However, in the
@@ -882,6 +987,8 @@ void kbase_free_alloced_region(struct kbase_va_region *reg)
* on the list at termination time of the region tracker.
*/
if (!list_empty(&reg->gpu_alloc->evict_node)) {
+ mutex_unlock(&kctx->jit_evict_lock);
+
/*
* Unlink the physical allocation before unmaking it
* evictable so that the allocation isn't grown back to
@@ -904,21 +1011,25 @@ void kbase_free_alloced_region(struct kbase_va_region *reg)
KBASE_MEM_TYPE_NATIVE);
kbase_mem_evictable_unmake(reg->gpu_alloc);
}
+ } else {
+ mutex_unlock(&kctx->jit_evict_lock);
}
/*
* Remove the region from the sticky resource metadata
* list should it be there.
*/
- kbase_sticky_resource_release(reg->kctx, NULL,
+ kbase_sticky_resource_release(kctx, NULL,
reg->start_pfn << PAGE_SHIFT);
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
- /* To detect use-after-free in debug builds */
- KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+
+ reg->flags |= KBASE_REG_VA_FREED;
+ kbase_va_region_alloc_put(kctx, reg);
+ } else {
+ kfree(reg);
}
- kfree(reg);
}
KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
@@ -958,11 +1069,13 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
for (i = 0; i < alloc->imported.alias.nents; i++) {
if (alloc->imported.alias.aliased[i].alloc) {
- err = kbase_mmu_insert_pages(kctx,
+ err = kbase_mmu_insert_pages(kctx->kbdev,
+ &kctx->mmu,
reg->start_pfn + (i * stride),
alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
alloc->imported.alias.aliased[i].length,
- reg->flags & gwt_mask);
+ reg->flags & gwt_mask,
+ kctx->as_nr);
if (err)
goto bad_insert;
@@ -979,10 +1092,13 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
}
}
} else {
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+ err = kbase_mmu_insert_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
kbase_get_gpu_phy_pages(reg),
kbase_reg_current_backed_size(reg),
- reg->flags & gwt_mask);
+ reg->flags & gwt_mask,
+ kctx->as_nr);
if (err)
goto bad_insert;
kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
@@ -998,12 +1114,16 @@ bad_insert:
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
while (i--)
if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
- kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length);
+ kbase_mmu_teardown_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn + (i * stride),
+ reg->gpu_alloc->imported.alias.aliased[i].length,
+ kctx->as_nr);
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
}
}
- kbase_remove_va_region(kctx, reg);
+ kbase_remove_va_region(reg);
return err;
}
@@ -1023,13 +1143,16 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
size_t i;
- err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
+ err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+ reg->start_pfn, reg->nr_pages, kctx->as_nr);
KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
} else {
- err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
+ err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+ reg->start_pfn, kbase_reg_current_backed_size(reg),
+ kctx->as_nr);
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
}
@@ -1050,7 +1173,6 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
if (err)
return err;
- err = kbase_remove_va_region(kctx, reg);
return err;
}
@@ -1221,8 +1343,8 @@ static int kbase_do_syncset(struct kbase_context *kctx,
/* find the region where the virtual address is contained */
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
sset->mem_handle.basep.handle);
- if (!reg) {
- dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
+ if (kbase_is_region_invalid_or_free(reg)) {
+ dev_warn(kctx->kbdev->dev, "Can't find a valid region at VA 0x%016llX",
sset->mem_handle.basep.handle);
err = -EINVAL;
goto out_unlock;
@@ -1332,7 +1454,7 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re
lockdep_assert_held(&kctx->reg_lock);
if (reg->flags & KBASE_REG_JIT) {
- dev_warn(reg->kctx->kbdev->dev, "Attempt to free JIT memory!\n");
+ dev_warn(kctx->kbdev->dev, "Attempt to free JIT memory!\n");
return -EINVAL;
}
@@ -1359,7 +1481,7 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re
err = kbase_gpu_munmap(kctx, reg);
if (err) {
- dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n");
+ dev_warn(kctx->kbdev->dev, "Could not unmap from the GPU...\n");
goto out;
}
@@ -1416,7 +1538,7 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
/* A real GPU va */
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg)) {
dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
gpu_addr);
err = -EINVAL;
@@ -1469,7 +1591,8 @@ int kbase_update_region_flags(struct kbase_context *kctx,
reg->flags |= KBASE_REG_GPU_NX;
if (!kbase_device_is_cpu_coherent(kctx->kbdev)) {
- if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED)
+ if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED &&
+ !(flags & BASE_MEM_UNCACHED_GPU))
return -EINVAL;
} else if (flags & (BASE_MEM_COHERENT_SYSTEM |
BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
@@ -1484,8 +1607,20 @@ int kbase_update_region_flags(struct kbase_context *kctx,
if (flags & BASE_MEM_TILER_ALIGN_TOP)
reg->flags |= KBASE_REG_TILER_ALIGN_TOP;
+
/* Set up default MEMATTR usage */
- if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
+ if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+ if (kctx->kbdev->mmu_mode->flags &
+ KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+ /* Override shareability, and MEMATTR for uncached */
+ reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
+ reg->flags |= KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_NON_CACHEABLE);
+ } else {
+ dev_warn(kctx->kbdev->dev,
+ "Can't allocate GPU uncached memory due to MMU in Legacy Mode\n");
+ return -EINVAL;
+ }
+ } else if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
(reg->flags & KBASE_REG_SHARE_BOTH)) {
reg->flags |=
KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
@@ -1494,12 +1629,17 @@ int kbase_update_region_flags(struct kbase_context *kctx,
KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
}
+ if (flags & BASE_MEM_PERMANENT_KERNEL_MAPPING)
+ reg->flags |= KBASE_REG_PERMANENT_KERNEL_MAPPING;
+
+ if (flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE)
+ reg->flags |= KBASE_REG_GPU_VA_SAME_4GB_PAGE;
+
return 0;
}
-int kbase_alloc_phy_pages_helper(
- struct kbase_mem_phy_alloc *alloc,
- size_t nr_pages_requested)
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_requested)
{
int new_page_count __maybe_unused;
size_t nr_left = nr_pages_requested;
@@ -1508,14 +1648,14 @@ int kbase_alloc_phy_pages_helper(
struct tagged_addr *tp;
KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
- KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+ KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
if (alloc->reg) {
if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
goto invalid_request;
}
- kctx = alloc->imported.kctx;
+ kctx = alloc->imported.native.kctx;
if (nr_pages_requested == 0)
goto done; /*nothing to do*/
@@ -1551,7 +1691,7 @@ int kbase_alloc_phy_pages_helper(
if (nr_left) {
struct kbase_sub_alloc *sa, *temp_sa;
- mutex_lock(&kctx->mem_partials_lock);
+ spin_lock(&kctx->mem_partials_lock);
list_for_each_entry_safe(sa, temp_sa,
&kctx->mem_partials, link) {
@@ -1574,7 +1714,7 @@ int kbase_alloc_phy_pages_helper(
}
}
}
- mutex_unlock(&kctx->mem_partials_lock);
+ spin_unlock(&kctx->mem_partials_lock);
}
/* only if we actually have a chunk left <512. If more it indicates
@@ -1621,9 +1761,9 @@ int kbase_alloc_phy_pages_helper(
nr_left = 0;
/* expose for later use */
- mutex_lock(&kctx->mem_partials_lock);
+ spin_lock(&kctx->mem_partials_lock);
list_add(&sa->link, &kctx->mem_partials);
- mutex_unlock(&kctx->mem_partials_lock);
+ spin_unlock(&kctx->mem_partials_lock);
}
}
}
@@ -1649,12 +1789,18 @@ done:
alloc_failed:
/* rollback needed if got one or more 2MB but failed later */
- if (nr_left != nr_pages_requested)
- kbase_mem_pool_free_pages(&kctx->lp_mem_pool,
- nr_pages_requested - nr_left,
- alloc->pages + alloc->nents,
- false,
- false);
+ if (nr_left != nr_pages_requested) {
+ size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+ alloc->nents += nr_pages_to_free;
+
+ kbase_process_page_usage_inc(kctx, nr_pages_to_free);
+ kbase_atomic_add_pages(nr_pages_to_free, &kctx->used_pages);
+ kbase_atomic_add_pages(nr_pages_to_free,
+ &kctx->kbdev->memdev.used_pages);
+
+ kbase_free_phy_pages_helper(alloc, nr_pages_to_free);
+ }
kbase_process_page_usage_dec(kctx, nr_pages_requested);
kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages);
@@ -1665,15 +1811,210 @@ invalid_request:
return -ENOMEM;
}
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+ struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+ size_t nr_pages_requested,
+ struct kbase_sub_alloc **prealloc_sa)
+{
+ int new_page_count __maybe_unused;
+ size_t nr_left = nr_pages_requested;
+ int res;
+ struct kbase_context *kctx;
+ struct tagged_addr *tp;
+ struct tagged_addr *new_pages = NULL;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+
+ lockdep_assert_held(&pool->pool_lock);
+
+#if !defined(CONFIG_MALI_2MB_ALLOC)
+ WARN_ON(pool->order);
+#endif
+
+ if (alloc->reg) {
+ if (nr_pages_requested > alloc->reg->nr_pages - alloc->nents)
+ goto invalid_request;
+ }
+
+ kctx = alloc->imported.native.kctx;
+
+ lockdep_assert_held(&kctx->mem_partials_lock);
+
+ if (nr_pages_requested == 0)
+ goto done; /*nothing to do*/
+
+ new_page_count = kbase_atomic_add_pages(
+ nr_pages_requested, &kctx->used_pages);
+ kbase_atomic_add_pages(nr_pages_requested,
+ &kctx->kbdev->memdev.used_pages);
+
+ /* Increase mm counters before we allocate pages so that this
+ * allocation is visible to the OOM killer
+ */
+ kbase_process_page_usage_inc(kctx, nr_pages_requested);
+
+ tp = alloc->pages + alloc->nents;
+ new_pages = tp;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+ if (pool->order) {
+ int nr_lp = nr_left / (SZ_2M / SZ_4K);
+
+ res = kbase_mem_pool_alloc_pages_locked(pool,
+ nr_lp * (SZ_2M / SZ_4K),
+ tp);
+
+ if (res > 0) {
+ nr_left -= res;
+ tp += res;
+ }
+
+ if (nr_left) {
+ struct kbase_sub_alloc *sa, *temp_sa;
+
+ list_for_each_entry_safe(sa, temp_sa,
+ &kctx->mem_partials, link) {
+ int pidx = 0;
+
+ while (nr_left) {
+ pidx = find_next_zero_bit(sa->sub_pages,
+ SZ_2M / SZ_4K,
+ pidx);
+ bitmap_set(sa->sub_pages, pidx, 1);
+ *tp++ = as_tagged_tag(page_to_phys(
+ sa->page + pidx),
+ FROM_PARTIAL);
+ nr_left--;
+
+ if (bitmap_full(sa->sub_pages,
+ SZ_2M / SZ_4K)) {
+ /* unlink from partial list when
+ * full
+ */
+ list_del_init(&sa->link);
+ break;
+ }
+ }
+ }
+ }
+
+ /* only if we actually have a chunk left <512. If more it
+ * indicates that we couldn't allocate a 2MB above, so no point
+ * to retry here.
+ */
+ if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+ /* create a new partial and suballocate the rest from it
+ */
+ struct page *np = NULL;
+
+ np = kbase_mem_pool_alloc_locked(pool);
+
+ if (np) {
+ int i;
+ struct kbase_sub_alloc *const sa = *prealloc_sa;
+ struct page *p;
+
+ /* store pointers back to the control struct */
+ np->lru.next = (void *)sa;
+ for (p = np; p < np + SZ_2M / SZ_4K; p++)
+ p->lru.prev = (void *)np;
+ INIT_LIST_HEAD(&sa->link);
+ bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+ sa->page = np;
+
+ for (i = 0; i < nr_left; i++)
+ *tp++ = as_tagged_tag(
+ page_to_phys(np + i),
+ FROM_PARTIAL);
+
+ bitmap_set(sa->sub_pages, 0, nr_left);
+ nr_left = 0;
+ /* Indicate to user that we'll free this memory
+ * later.
+ */
+ *prealloc_sa = NULL;
+
+ /* expose for later use */
+ list_add(&sa->link, &kctx->mem_partials);
+ }
+ }
+ if (nr_left)
+ goto alloc_failed;
+ } else {
+#endif
+ res = kbase_mem_pool_alloc_pages_locked(pool,
+ nr_left,
+ tp);
+ if (res <= 0)
+ goto alloc_failed;
+#ifdef CONFIG_MALI_2MB_ALLOC
+ }
+#endif
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ kctx->id,
+ (u64)new_page_count);
+
+ alloc->nents += nr_pages_requested;
+done:
+ return new_pages;
+
+alloc_failed:
+ /* rollback needed if got one or more 2MB but failed later */
+ if (nr_left != nr_pages_requested) {
+ size_t nr_pages_to_free = nr_pages_requested - nr_left;
+
+ struct tagged_addr *start_free = alloc->pages + alloc->nents;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+ if (pool->order) {
+ while (nr_pages_to_free) {
+ if (is_huge_head(*start_free)) {
+ kbase_mem_pool_free_pages_locked(
+ pool, 512,
+ start_free,
+ false, /* not dirty */
+ true); /* return to pool */
+ nr_pages_to_free -= 512;
+ start_free += 512;
+ } else if (is_partial(*start_free)) {
+ free_partial_locked(kctx, pool,
+ *start_free);
+ nr_pages_to_free--;
+ start_free++;
+ }
+ }
+ } else {
+#endif
+ kbase_mem_pool_free_pages_locked(pool,
+ nr_pages_to_free,
+ start_free,
+ false, /* not dirty */
+ true); /* return to pool */
+#ifdef CONFIG_MALI_2MB_ALLOC
+ }
+#endif
+ }
+
+ kbase_process_page_usage_dec(kctx, nr_pages_requested);
+ kbase_atomic_sub_pages(nr_pages_requested, &kctx->used_pages);
+ kbase_atomic_sub_pages(nr_pages_requested,
+ &kctx->kbdev->memdev.used_pages);
+
+invalid_request:
+ return NULL;
+}
+
static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
{
struct page *p, *head_page;
struct kbase_sub_alloc *sa;
- p = phys_to_page(as_phys_addr_t(tp));
+ p = as_page(tp);
head_page = (struct page *)p->lru.prev;
sa = (struct kbase_sub_alloc *)head_page->lru.next;
- mutex_lock(&kctx->mem_partials_lock);
+ spin_lock(&kctx->mem_partials_lock);
clear_bit(p - head_page, sa->sub_pages);
if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
list_del(&sa->link);
@@ -1684,14 +2025,14 @@ static void free_partial(struct kbase_context *kctx, struct tagged_addr tp)
/* expose the partial again */
list_add(&sa->link, &kctx->mem_partials);
}
- mutex_unlock(&kctx->mem_partials_lock);
+ spin_unlock(&kctx->mem_partials_lock);
}
int kbase_free_phy_pages_helper(
struct kbase_mem_phy_alloc *alloc,
size_t nr_pages_to_free)
{
- struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_context *kctx = alloc->imported.native.kctx;
bool syncback;
bool reclaimed = (alloc->evicted != 0);
struct tagged_addr *start_free;
@@ -1699,7 +2040,7 @@ int kbase_free_phy_pages_helper(
size_t freed = 0;
KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
- KBASE_DEBUG_ASSERT(alloc->imported.kctx);
+ KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
/* early out if nothing to do */
@@ -1776,6 +2117,124 @@ int kbase_free_phy_pages_helper(
return 0;
}
+static void free_partial_locked(struct kbase_context *kctx,
+ struct kbase_mem_pool *pool, struct tagged_addr tp)
+{
+ struct page *p, *head_page;
+ struct kbase_sub_alloc *sa;
+
+ lockdep_assert_held(&pool->pool_lock);
+ lockdep_assert_held(&kctx->mem_partials_lock);
+
+ p = as_page(tp);
+ head_page = (struct page *)p->lru.prev;
+ sa = (struct kbase_sub_alloc *)head_page->lru.next;
+ clear_bit(p - head_page, sa->sub_pages);
+ if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+ list_del(&sa->link);
+ kbase_mem_pool_free_locked(pool, head_page, true);
+ kfree(sa);
+ } else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
+ SZ_2M / SZ_4K - 1) {
+ /* expose the partial again */
+ list_add(&sa->link, &kctx->mem_partials);
+ }
+}
+
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+ struct kbase_mem_pool *pool, struct tagged_addr *pages,
+ size_t nr_pages_to_free)
+{
+ struct kbase_context *kctx = alloc->imported.native.kctx;
+ bool syncback;
+ bool reclaimed = (alloc->evicted != 0);
+ struct tagged_addr *start_free;
+ size_t freed = 0;
+
+ KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
+ KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
+ KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
+
+ lockdep_assert_held(&pool->pool_lock);
+ lockdep_assert_held(&kctx->mem_partials_lock);
+
+ /* early out if nothing to do */
+ if (!nr_pages_to_free)
+ return;
+
+ start_free = pages;
+
+ syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
+
+ /* pad start_free to a valid start location */
+ while (nr_pages_to_free && is_huge(*start_free) &&
+ !is_huge_head(*start_free)) {
+ nr_pages_to_free--;
+ start_free++;
+ }
+
+ while (nr_pages_to_free) {
+ if (is_huge_head(*start_free)) {
+ /* This is a 2MB entry, so free all the 512 pages that
+ * it points to
+ */
+ WARN_ON(!pool->order);
+ kbase_mem_pool_free_pages_locked(pool,
+ 512,
+ start_free,
+ syncback,
+ reclaimed);
+ nr_pages_to_free -= 512;
+ start_free += 512;
+ freed += 512;
+ } else if (is_partial(*start_free)) {
+ WARN_ON(!pool->order);
+ free_partial_locked(kctx, pool, *start_free);
+ nr_pages_to_free--;
+ start_free++;
+ freed++;
+ } else {
+ struct tagged_addr *local_end_free;
+
+ WARN_ON(pool->order);
+ local_end_free = start_free;
+ while (nr_pages_to_free &&
+ !is_huge(*local_end_free) &&
+ !is_partial(*local_end_free)) {
+ local_end_free++;
+ nr_pages_to_free--;
+ }
+ kbase_mem_pool_free_pages_locked(pool,
+ local_end_free - start_free,
+ start_free,
+ syncback,
+ reclaimed);
+ freed += local_end_free - start_free;
+ start_free += local_end_free - start_free;
+ }
+ }
+
+ alloc->nents -= freed;
+
+ /*
+ * If the allocation was not evicted (i.e. evicted == 0) then
+ * the page accounting needs to be done.
+ */
+ if (!reclaimed) {
+ int new_page_count;
+
+ kbase_process_page_usage_dec(kctx, freed);
+ new_page_count = kbase_atomic_sub_pages(freed,
+ &kctx->used_pages);
+ kbase_atomic_sub_pages(freed,
+ &kctx->kbdev->memdev.used_pages);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ kctx->id,
+ (u64)new_page_count);
+ }
+}
+
void kbase_mem_kref_free(struct kref *kref)
{
struct kbase_mem_phy_alloc *alloc;
@@ -1784,12 +2243,27 @@ void kbase_mem_kref_free(struct kref *kref)
switch (alloc->type) {
case KBASE_MEM_TYPE_NATIVE: {
- WARN_ON(!alloc->imported.kctx);
- /*
- * The physical allocation must have been removed from the
- * eviction list before trying to free it.
- */
- WARN_ON(!list_empty(&alloc->evict_node));
+
+ if (!WARN_ON(!alloc->imported.native.kctx)) {
+ if (alloc->permanent_map)
+ kbase_phy_alloc_mapping_term(
+ alloc->imported.native.kctx,
+ alloc);
+
+ /*
+ * The physical allocation must have been removed from
+ * the eviction list before trying to free it.
+ */
+ mutex_lock(
+ &alloc->imported.native.kctx->jit_evict_lock);
+ WARN_ON(!list_empty(&alloc->evict_node));
+ mutex_unlock(
+ &alloc->imported.native.kctx->jit_evict_lock);
+
+ kbase_process_page_usage_dec(
+ alloc->imported.native.kctx,
+ alloc->imported.native.nr_struct_pages);
+ }
kbase_free_phy_pages_helper(alloc, alloc->nents);
break;
}
@@ -1810,11 +2284,6 @@ void kbase_mem_kref_free(struct kref *kref)
case KBASE_MEM_TYPE_RAW:
/* raw pages, external cleanup */
break;
- #ifdef CONFIG_UMP
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- ump_dd_release(alloc->imported.ump_handle);
- break;
-#endif
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM:
dma_buf_detach(alloc->imported.umm.dma_buf,
@@ -1827,14 +2296,6 @@ void kbase_mem_kref_free(struct kref *kref)
mmdrop(alloc->imported.user_buf.mm);
kfree(alloc->imported.user_buf.pages);
break;
- case KBASE_MEM_TYPE_TB:{
- void *tb;
-
- tb = alloc->imported.kctx->jctx.tb;
- kbase_device_trace_buffer_uninstall(alloc->imported.kctx);
- vfree(tb);
- break;
- }
default:
WARN(1, "Unexecpted free of type %d\n", alloc->type);
break;
@@ -1913,6 +2374,14 @@ bool kbase_check_alloc_flags(unsigned long flags)
BASE_MEM_TILER_ALIGN_TOP)))
return false;
+ /* To have an allocation lie within a 4GB chunk is required only for
+ * TLS memory, which will never be used to contain executable code
+ * and also used for Tiler heap.
+ */
+ if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags &
+ (BASE_MEM_PROT_GPU_EX | BASE_MEM_TILER_ALIGN_TOP)))
+ return false;
+
/* GPU should have at least read or write access otherwise there is no
reason for allocating. */
if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
@@ -1983,9 +2452,8 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
return -EINVAL;
}
- if (va_pages > (U64_MAX / PAGE_SIZE)) {
- /* 64-bit address range is the max */
- dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than 64-bit address range!",
+ if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
+ dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
(unsigned long long)va_pages);
return -ENOMEM;
}
@@ -2044,6 +2512,13 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
#undef KBASE_MSG_PRE_FLAG
}
+ if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) &&
+ (va_pages > (BASE_MEM_PFN_MASK_4GB + 1))) {
+ dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GPU_VA_SAME_4GB_PAGE and va_pages==%lld greater than that needed for 4GB space",
+ (unsigned long long)va_pages);
+ return -EINVAL;
+ }
+
return 0;
#undef KBASE_MSG_PRE
}
@@ -2284,6 +2759,7 @@ static void kbase_jit_destroy_worker(struct work_struct *work)
int kbase_jit_init(struct kbase_context *kctx)
{
+ mutex_lock(&kctx->jit_evict_lock);
INIT_LIST_HEAD(&kctx->jit_active_head);
INIT_LIST_HEAD(&kctx->jit_pool_head);
INIT_LIST_HEAD(&kctx->jit_destroy_head);
@@ -2291,49 +2767,268 @@ int kbase_jit_init(struct kbase_context *kctx)
INIT_LIST_HEAD(&kctx->jit_pending_alloc);
INIT_LIST_HEAD(&kctx->jit_atoms_head);
+ mutex_unlock(&kctx->jit_evict_lock);
+
+ kctx->jit_max_allocations = 0;
+ kctx->jit_current_allocations = 0;
+ kctx->trim_level = 0;
return 0;
}
+/* Check if the allocation from JIT pool is of the same size as the new JIT
+ * allocation and also, if BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP is set, meets
+ * the alignment requirements.
+ */
+static bool meet_size_and_tiler_align_top_requirements(struct kbase_context *kctx,
+ struct kbase_va_region *walker, struct base_jit_alloc_info *info)
+{
+ bool meet_reqs = true;
+
+ if (walker->nr_pages != info->va_pages)
+ meet_reqs = false;
+ else if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+ size_t align = info->extent;
+ size_t align_mask = align - 1;
+
+ if ((walker->start_pfn + info->commit_pages) & align_mask)
+ meet_reqs = false;
+ }
+
+ return meet_reqs;
+}
+
+static int kbase_jit_grow(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info, struct kbase_va_region *reg)
+{
+ size_t delta;
+ size_t pages_required;
+ size_t old_size;
+ struct kbase_mem_pool *pool;
+ int ret = -ENOMEM;
+ struct tagged_addr *gpu_pages;
+ struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+ int i;
+
+ if (info->commit_pages > reg->nr_pages) {
+ /* Attempted to grow larger than maximum size */
+ return -EINVAL;
+ }
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* Make the physical backing no longer reclaimable */
+ if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
+ goto update_failed;
+
+ if (reg->gpu_alloc->nents >= info->commit_pages)
+ goto done;
+
+ /* Grow the backing */
+ old_size = reg->gpu_alloc->nents;
+
+ /* Allocate some more pages */
+ delta = info->commit_pages - reg->gpu_alloc->nents;
+ pages_required = delta;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+ /* Preallocate memory for the sub-allocation structs */
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+ prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]),
+ GFP_KERNEL);
+ if (!prealloc_sas[i])
+ goto update_failed;
+ }
+
+ if (pages_required >= (SZ_2M / SZ_4K)) {
+ pool = &kctx->lp_mem_pool;
+ /* Round up to number of 2 MB pages required */
+ pages_required += ((SZ_2M / SZ_4K) - 1);
+ pages_required /= (SZ_2M / SZ_4K);
+ } else {
+#endif
+ pool = &kctx->mem_pool;
+#ifdef CONFIG_MALI_2MB_ALLOC
+ }
+#endif
+
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ pages_required *= 2;
+
+ spin_lock(&kctx->mem_partials_lock);
+ kbase_mem_pool_lock(pool);
+
+ /* As we can not allocate memory from the kernel with the vm_lock held,
+ * grow the pool to the required size with the lock dropped. We hold the
+ * pool lock to prevent another thread from allocating from the pool
+ * between the grow and allocation.
+ */
+ while (kbase_mem_pool_size(pool) < pages_required) {
+ int pool_delta = pages_required - kbase_mem_pool_size(pool);
+
+ kbase_mem_pool_unlock(pool);
+ spin_unlock(&kctx->mem_partials_lock);
+ kbase_gpu_vm_unlock(kctx);
+
+ if (kbase_mem_pool_grow(pool, pool_delta))
+ goto update_failed_unlocked;
+
+ kbase_gpu_vm_lock(kctx);
+ spin_lock(&kctx->mem_partials_lock);
+ kbase_mem_pool_lock(pool);
+ }
+
+ gpu_pages = kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool,
+ delta, &prealloc_sas[0]);
+ if (!gpu_pages) {
+ kbase_mem_pool_unlock(pool);
+ spin_unlock(&kctx->mem_partials_lock);
+ goto update_failed;
+ }
+
+ if (reg->cpu_alloc != reg->gpu_alloc) {
+ struct tagged_addr *cpu_pages;
+
+ cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc,
+ pool, delta, &prealloc_sas[1]);
+ if (!cpu_pages) {
+ kbase_free_phy_pages_helper_locked(reg->gpu_alloc,
+ pool, gpu_pages, delta);
+ kbase_mem_pool_unlock(pool);
+ spin_unlock(&kctx->mem_partials_lock);
+ goto update_failed;
+ }
+ }
+ kbase_mem_pool_unlock(pool);
+ spin_unlock(&kctx->mem_partials_lock);
+
+ ret = kbase_mem_grow_gpu_mapping(kctx, reg, info->commit_pages,
+ old_size);
+ /*
+ * The grow failed so put the allocation back in the
+ * pool and return failure.
+ */
+ if (ret)
+ goto update_failed;
+
+done:
+ ret = 0;
+
+ /* Update attributes of JIT allocation taken from the pool */
+ reg->initial_commit = info->commit_pages;
+ reg->extent = info->extent;
+
+update_failed:
+ kbase_gpu_vm_unlock(kctx);
+update_failed_unlocked:
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+ kfree(prealloc_sas[i]);
+
+ return ret;
+}
+
struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
struct base_jit_alloc_info *info)
{
struct kbase_va_region *reg = NULL;
- struct kbase_va_region *walker;
- struct kbase_va_region *temp;
- size_t current_diff = SIZE_MAX;
- int ret;
+ if (kctx->jit_current_allocations >= kctx->jit_max_allocations) {
+ /* Too many current allocations */
+ return NULL;
+ }
+ if (info->max_allocations > 0 &&
+ kctx->jit_current_allocations_per_bin[info->bin_id] >=
+ info->max_allocations) {
+ /* Too many current allocations in this bin */
+ return NULL;
+ }
mutex_lock(&kctx->jit_evict_lock);
+
/*
* Scan the pool for an existing allocation which meets our
* requirements and remove it.
*/
- list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head, jit_node) {
-
- if (walker->nr_pages >= info->va_pages) {
- size_t min_size, max_size, diff;
+ if (info->usage_id != 0) {
+ /* First scan for an allocation with the same usage ID */
+ struct kbase_va_region *walker;
+ struct kbase_va_region *temp;
+ size_t current_diff = SIZE_MAX;
+
+ list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head,
+ jit_node) {
+
+ if (walker->jit_usage_id == info->usage_id &&
+ walker->jit_bin_id == info->bin_id &&
+ meet_size_and_tiler_align_top_requirements(
+ kctx, walker, info)) {
+ size_t min_size, max_size, diff;
+
+ /*
+ * The JIT allocations VA requirements have been
+ * met, it's suitable but other allocations
+ * might be a better fit.
+ */
+ min_size = min_t(size_t,
+ walker->gpu_alloc->nents,
+ info->commit_pages);
+ max_size = max_t(size_t,
+ walker->gpu_alloc->nents,
+ info->commit_pages);
+ diff = max_size - min_size;
+
+ if (current_diff > diff) {
+ current_diff = diff;
+ reg = walker;
+ }
- /*
- * The JIT allocations VA requirements have been
- * meet, it's suitable but other allocations
- * might be a better fit.
- */
- min_size = min_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
- max_size = max_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
- diff = max_size - min_size;
-
- if (current_diff > diff) {
- current_diff = diff;
- reg = walker;
+ /* The allocation is an exact match */
+ if (current_diff == 0)
+ break;
}
+ }
+ }
- /* The allocation is an exact match, stop looking */
- if (current_diff == 0)
- break;
+ if (!reg) {
+ /* No allocation with the same usage ID, or usage IDs not in
+ * use. Search for an allocation we can reuse.
+ */
+ struct kbase_va_region *walker;
+ struct kbase_va_region *temp;
+ size_t current_diff = SIZE_MAX;
+
+ list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head,
+ jit_node) {
+
+ if (walker->jit_bin_id == info->bin_id &&
+ meet_size_and_tiler_align_top_requirements(
+ kctx, walker, info)) {
+ size_t min_size, max_size, diff;
+
+ /*
+ * The JIT allocations VA requirements have been
+ * met, it's suitable but other allocations
+ * might be a better fit.
+ */
+ min_size = min_t(size_t,
+ walker->gpu_alloc->nents,
+ info->commit_pages);
+ max_size = max_t(size_t,
+ walker->gpu_alloc->nents,
+ info->commit_pages);
+ diff = max_size - min_size;
+
+ if (current_diff > diff) {
+ current_diff = diff;
+ reg = walker;
+ }
+
+ /* The allocation is an exact match, so stop
+ * looking.
+ */
+ if (current_diff == 0)
+ break;
+ }
}
}
@@ -2352,42 +3047,15 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
list_del_init(&reg->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
- kbase_gpu_vm_lock(kctx);
-
- /* Make the physical backing no longer reclaimable */
- if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
- goto update_failed;
-
- /* Grow the backing if required */
- if (reg->gpu_alloc->nents < info->commit_pages) {
- size_t delta;
- size_t old_size = reg->gpu_alloc->nents;
-
- /* Allocate some more pages */
- delta = info->commit_pages - reg->gpu_alloc->nents;
- if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, delta)
- != 0)
- goto update_failed;
-
- if (reg->cpu_alloc != reg->gpu_alloc) {
- if (kbase_alloc_phy_pages_helper(
- reg->cpu_alloc, delta) != 0) {
- kbase_free_phy_pages_helper(
- reg->gpu_alloc, delta);
- goto update_failed;
- }
- }
-
- ret = kbase_mem_grow_gpu_mapping(kctx, reg,
- info->commit_pages, old_size);
+ if (kbase_jit_grow(kctx, info, reg) < 0) {
/*
- * The grow failed so put the allocation back in the
- * pool and return failure.
+ * An update to an allocation from the pool failed,
+ * chances are slim a new allocation would fair any
+ * better so return the allocation to the pool and
+ * return the function with failure.
*/
- if (ret)
- goto update_failed;
+ goto update_failed_unlocked;
}
- kbase_gpu_vm_unlock(kctx);
} else {
/* No suitable JIT allocation was found so create a new one */
u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
@@ -2397,6 +3065,9 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
mutex_unlock(&kctx->jit_evict_lock);
+ if (info->flags & BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP)
+ flags |= BASE_MEM_TILER_ALIGN_TOP;
+
reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
info->extent, &flags, &gpu_addr);
if (!reg)
@@ -2409,15 +3080,15 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
mutex_unlock(&kctx->jit_evict_lock);
}
+ kctx->jit_current_allocations++;
+ kctx->jit_current_allocations_per_bin[info->bin_id]++;
+
+ reg->jit_usage_id = info->usage_id;
+ reg->jit_bin_id = info->bin_id;
+
return reg;
-update_failed:
- /*
- * An update to an allocation from the pool failed, chances
- * are slim a new allocation would fair any better so return
- * the allocation to the pool and return the function with failure.
- */
- kbase_gpu_vm_unlock(kctx);
+update_failed_unlocked:
mutex_lock(&kctx->jit_evict_lock);
list_move(&reg->jit_node, &kctx->jit_pool_head);
mutex_unlock(&kctx->jit_evict_lock);
@@ -2427,19 +3098,62 @@ out_unlocked:
void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
{
- /* The physical backing of memory in the pool is always reclaimable */
+ u64 old_pages;
+
+ /* Get current size of JIT region */
+ old_pages = kbase_reg_current_backed_size(reg);
+ if (reg->initial_commit < old_pages) {
+ /* Free trim_level % of region, but don't go below initial
+ * commit size
+ */
+ u64 new_size = MAX(reg->initial_commit,
+ div_u64(old_pages * (100 - kctx->trim_level), 100));
+ u64 delta = old_pages - new_size;
+
+ if (delta) {
+ kbase_mem_shrink_cpu_mapping(kctx, reg, old_pages-delta,
+ old_pages);
+ kbase_mem_shrink_gpu_mapping(kctx, reg, old_pages-delta,
+ old_pages);
+
+ kbase_free_phy_pages_helper(reg->cpu_alloc, delta);
+ if (reg->cpu_alloc != reg->gpu_alloc)
+ kbase_free_phy_pages_helper(reg->gpu_alloc,
+ delta);
+ }
+ }
+
+ kctx->jit_current_allocations--;
+ kctx->jit_current_allocations_per_bin[reg->jit_bin_id]--;
+
+ kbase_mem_evictable_mark_reclaim(reg->gpu_alloc);
+
kbase_gpu_vm_lock(kctx);
- kbase_mem_evictable_make(reg->gpu_alloc);
+ reg->flags |= KBASE_REG_DONT_NEED;
+ kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
kbase_gpu_vm_unlock(kctx);
+ /*
+ * Add the allocation to the eviction list and the jit pool, after this
+ * point the shrink can reclaim it, or it may be reused.
+ */
mutex_lock(&kctx->jit_evict_lock);
+
+ /* This allocation can't already be on a list. */
+ WARN_ON(!list_empty(&reg->gpu_alloc->evict_node));
+ list_add(&reg->gpu_alloc->evict_node, &kctx->evict_list);
+
list_move(&reg->jit_node, &kctx->jit_pool_head);
+
mutex_unlock(&kctx->jit_evict_lock);
}
void kbase_jit_backing_lost(struct kbase_va_region *reg)
{
- struct kbase_context *kctx = reg->kctx;
+ struct kbase_context *kctx = kbase_reg_flags_to_kctx(reg);
+
+ if (WARN_ON(!kctx))
+ return;
lockdep_assert_held(&kctx->jit_evict_lock);
@@ -2472,6 +3186,7 @@ bool kbase_jit_evict(struct kbase_context *kctx)
reg = list_entry(kctx->jit_pool_head.prev,
struct kbase_va_region, jit_node);
list_del(&reg->jit_node);
+ list_del_init(&reg->gpu_alloc->evict_node);
}
mutex_unlock(&kctx->jit_evict_lock);
@@ -2489,12 +3204,6 @@ void kbase_jit_term(struct kbase_context *kctx)
/* Free all allocations for this context */
- /*
- * Flush the freeing of allocations whose backing has been freed
- * (i.e. everything in jit_destroy_head).
- */
- cancel_work_sync(&kctx->jit_work);
-
kbase_gpu_vm_lock(kctx);
mutex_lock(&kctx->jit_evict_lock);
/* Free all allocations from the pool */
@@ -2502,6 +3211,7 @@ void kbase_jit_term(struct kbase_context *kctx)
walker = list_first_entry(&kctx->jit_pool_head,
struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
+ list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
walker->flags &= ~KBASE_REG_JIT;
kbase_mem_free_region(kctx, walker);
@@ -2513,6 +3223,7 @@ void kbase_jit_term(struct kbase_context *kctx)
walker = list_first_entry(&kctx->jit_active_head,
struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
+ list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
walker->flags &= ~KBASE_REG_JIT;
kbase_mem_free_region(kctx, walker);
@@ -2520,6 +3231,12 @@ void kbase_jit_term(struct kbase_context *kctx)
}
mutex_unlock(&kctx->jit_evict_lock);
kbase_gpu_vm_unlock(kctx);
+
+ /*
+ * Flush the freeing of allocations whose backing has been freed
+ * (i.e. everything in jit_destroy_head).
+ */
+ cancel_work_sync(&kctx->jit_work);
}
static int kbase_jd_user_buf_map(struct kbase_context *kctx,
@@ -2551,14 +3268,14 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
pinned_pages = get_user_pages(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
- reg->flags & KBASE_REG_GPU_WR,
- 0, pages, NULL);
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
pinned_pages = get_user_pages_remote(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
- reg->flags & KBASE_REG_GPU_WR,
- 0, pages, NULL);
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
pinned_pages = get_user_pages_remote(NULL, mm,
address,
@@ -2610,9 +3327,9 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
gwt_mask = ~KBASE_REG_GPU_WR;
#endif
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa,
- kbase_reg_current_backed_size(reg),
- reg->flags & gwt_mask);
+ err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ pa, kbase_reg_current_backed_size(reg),
+ reg->flags & gwt_mask, kctx->as_nr);
if (err == 0)
return 0;
@@ -2670,6 +3387,7 @@ static int kbase_jd_umm_map(struct kbase_context *kctx,
int err;
size_t count = 0;
struct kbase_mem_phy_alloc *alloc;
+ unsigned long gwt_mask = ~0;
alloc = reg->gpu_alloc;
@@ -2718,10 +3436,17 @@ static int kbase_jd_umm_map(struct kbase_context *kctx,
/* Update nents as we now have pages to map */
alloc->nents = reg->nr_pages;
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
+#ifdef CONFIG_MALI_JOB_DUMP
+ if (kctx->gwt_enabled)
+ gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+ err = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
kbase_get_gpu_phy_pages(reg),
count,
- reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
+ (reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD) &
+ gwt_mask,
+ kctx->as_nr);
if (err)
goto err_unmap_attachment;
@@ -2739,7 +3464,8 @@ static int kbase_jd_umm_map(struct kbase_context *kctx,
return 0;
err_teardown_orig_pages:
- kbase_mmu_teardown_pages(kctx, reg->start_pfn, count);
+ kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ count, kctx->as_nr);
err_unmap_attachment:
dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
@@ -2784,9 +3510,6 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
}
}
break;
- case KBASE_MEM_TYPE_IMPORTED_UMP: {
- break;
- }
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
@@ -2822,9 +3545,11 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
int err;
err = kbase_mmu_teardown_pages(
- kctx,
+ kctx->kbdev,
+ &kctx->mmu,
reg->start_pfn,
- alloc->nents);
+ alloc->nents,
+ kctx->as_nr);
WARN_ON(err);
}
@@ -2841,9 +3566,11 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
if (reg && reg->gpu_alloc == alloc)
kbase_mmu_teardown_pages(
- kctx,
+ kctx->kbdev,
+ &kctx->mmu,
reg->start_pfn,
- kbase_reg_current_backed_size(reg));
+ kbase_reg_current_backed_size(reg),
+ kctx->as_nr);
if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
writeable = false;
@@ -2884,7 +3611,7 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
/* Find the region */
reg = kbase_region_tracker_find_region_enclosing_address(
kctx, gpu_addr);
- if (NULL == reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto failed;
/* Allocate the metadata object */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem.h b/drivers/gpu/arm/midgard/mali_kbase_mem.h
index 36de3819bfa020..6e38aa0e3ee1fa 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -35,9 +35,6 @@
#endif
#include <linux/kref.h>
-#ifdef CONFIG_UMP
-#include <linux/ump.h>
-#endif /* CONFIG_UMP */
#include "mali_base_kernel.h"
#include <mali_kbase_hw.h>
#include "mali_kbase_pm.h"
@@ -48,6 +45,9 @@
/* Required for kbase_mem_evictable_unmake */
#include "mali_kbase_mem_linux.h"
+static inline void kbase_process_page_usage_inc(struct kbase_context *kctx,
+ int pages);
+
/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
@@ -77,11 +77,9 @@ struct kbase_cpu_mapping {
enum kbase_memory_type {
KBASE_MEM_TYPE_NATIVE,
- KBASE_MEM_TYPE_IMPORTED_UMP,
KBASE_MEM_TYPE_IMPORTED_UMM,
KBASE_MEM_TYPE_IMPORTED_USER_BUF,
KBASE_MEM_TYPE_ALIAS,
- KBASE_MEM_TYPE_TB,
KBASE_MEM_TYPE_RAW
};
@@ -129,13 +127,13 @@ struct kbase_mem_phy_alloc {
/* type of buffer */
enum kbase_memory_type type;
+ /* Kernel side mapping of the alloc */
+ struct kbase_vmap_struct *permanent_map;
+
unsigned long properties;
/* member in union valid based on @a type */
union {
-#ifdef CONFIG_UMP
- ump_dd_handle ump_handle;
-#endif /* CONFIG_UMP */
#if defined(CONFIG_DMA_SHARED_BUFFER)
struct {
struct dma_buf *dma_buf;
@@ -149,8 +147,13 @@ struct kbase_mem_phy_alloc {
size_t nents;
struct kbase_aliased *aliased;
} alias;
- /* Used by type = (KBASE_MEM_TYPE_NATIVE, KBASE_MEM_TYPE_TB) */
- struct kbase_context *kctx;
+ struct {
+ struct kbase_context *kctx;
+ /* Number of pages in this structure, including *pages.
+ * Used for kernel memory tracking.
+ */
+ size_t nr_struct_pages;
+ } native;
struct kbase_alloc_import_user_buf {
unsigned long address;
unsigned long size;
@@ -204,8 +207,7 @@ static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *
*/
static inline bool kbase_mem_is_imported(enum kbase_memory_type type)
{
- return (type == KBASE_MEM_TYPE_IMPORTED_UMP) ||
- (type == KBASE_MEM_TYPE_IMPORTED_UMM) ||
+ return (type == KBASE_MEM_TYPE_IMPORTED_UMM) ||
(type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
}
@@ -234,7 +236,7 @@ struct kbase_va_region {
struct rb_node rblink;
struct list_head link;
- struct kbase_context *kctx; /* Backlink to base context */
+ struct rb_root *rbtree; /* Backlink to rb tree */
u64 start_pfn; /* The PFN in GPU space */
size_t nr_pages;
@@ -252,14 +254,18 @@ struct kbase_va_region {
#define KBASE_REG_GPU_NX (1ul << 3)
/* Is CPU cached? */
#define KBASE_REG_CPU_CACHED (1ul << 4)
-/* Is GPU cached? */
+/* Is GPU cached?
+ * Some components within the GPU might only be able to access memory that is
+ * GPU cacheable. Refer to the specific GPU implementation for more details.
+ */
#define KBASE_REG_GPU_CACHED (1ul << 5)
#define KBASE_REG_GROWABLE (1ul << 6)
/* Can grow on pf? */
#define KBASE_REG_PF_GROW (1ul << 7)
-/* Bit 8 is unused */
+/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
+#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)
/* inner shareable coherency */
#define KBASE_REG_SHARE_IN (1ul << 9)
@@ -299,30 +305,43 @@ struct kbase_va_region {
/* Memory is handled by JIT - user space should not be able to free it */
#define KBASE_REG_JIT (1ul << 24)
+/* Memory has permanent kernel side mapping */
+#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)
+
+/* GPU VA region has been freed by the userspace, but still remains allocated
+ * due to the reference held by CPU mappings created on the GPU VA region.
+ *
+ * A region with this flag set has had kbase_gpu_munmap() called on it, but can
+ * still be looked-up in the region tracker as a non-free region. Hence must
+ * not create or update any more GPU mappings on such regions because they will
+ * not be unmapped when the region is finally destroyed.
+ *
+ * Since such regions are still present in the region tracker, new allocations
+ * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
+ * a region with this flag set.
+ *
+ * In addition, this flag indicates the gpu_alloc member might no longer valid
+ * e.g. in infinite cache simulation.
+ */
+#define KBASE_REG_VA_FREED (1ul << 26)
+
#define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
/* only used with 32-bit clients */
/*
- * On a 32bit platform, custom VA should be wired from (4GB + shader region)
+ * On a 32bit platform, custom VA should be wired from 4GB
* to the VA limit of the GPU. Unfortunately, the Linux mmap() interface
* limits us to 2^32 pages (2^44 bytes, see mmap64 man page for reference).
* So we put the default limit to the maximum possible on Linux and shrink
* it down, if required by the GPU, during initialization.
*/
-/*
- * Dedicated 16MB region for shader code:
- * VA range 0x101000000-0x102000000
- */
-#define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
-#define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
-#define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
-
-#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
-#define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
+#define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(1)
+#define KBASE_REG_ZONE_CUSTOM_VA_BASE (0x100000000ULL >> PAGE_SHIFT)
#define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
/* end 32-bit clients only */
+
unsigned long flags;
size_t extent; /* nr of pages alloc'd on PF */
@@ -332,8 +351,73 @@ struct kbase_va_region {
/* List head used to store the region in the JIT allocation pool */
struct list_head jit_node;
+ /* The last JIT usage ID for this region */
+ u16 jit_usage_id;
+ /* The JIT bin this allocation came from */
+ u8 jit_bin_id;
+
+ int va_refcnt; /* number of users of this va */
};
+static inline bool kbase_is_region_free(struct kbase_va_region *reg)
+{
+ return (!reg || reg->flags & KBASE_REG_FREE);
+}
+
+static inline bool kbase_is_region_invalid(struct kbase_va_region *reg)
+{
+ return (!reg || reg->flags & KBASE_REG_VA_FREED);
+}
+
+static inline bool kbase_is_region_invalid_or_free(struct kbase_va_region *reg)
+{
+ /* Possibly not all functions that find regions would be using this
+ * helper, so they need to be checked when maintaining this function.
+ */
+ return (kbase_is_region_invalid(reg) || kbase_is_region_free(reg));
+}
+
+int kbase_remove_va_region(struct kbase_va_region *reg);
+static inline void kbase_region_refcnt_free(struct kbase_va_region *reg)
+{
+ /* If region was mapped then remove va region*/
+ if (reg->start_pfn)
+ kbase_remove_va_region(reg);
+
+ /* To detect use-after-free in debug builds */
+ KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
+ kfree(reg);
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_get(
+ struct kbase_context *kctx, struct kbase_va_region *region)
+{
+ lockdep_assert_held(&kctx->reg_lock);
+
+ WARN_ON(!region->va_refcnt);
+
+ /* non-atomic as kctx->reg_lock is held */
+ region->va_refcnt++;
+
+ return region;
+}
+
+static inline struct kbase_va_region *kbase_va_region_alloc_put(
+ struct kbase_context *kctx, struct kbase_va_region *region)
+{
+ lockdep_assert_held(&kctx->reg_lock);
+
+ WARN_ON(region->va_refcnt <= 0);
+ WARN_ON(region->flags & KBASE_REG_FREE);
+
+ /* non-atomic as kctx->reg_lock is held */
+ region->va_refcnt--;
+ if (!region->va_refcnt)
+ kbase_region_refcnt_free(region);
+
+ return NULL;
+}
+
/* Common functions */
static inline struct tagged_addr *kbase_get_cpu_phy_pages(
struct kbase_va_region *reg)
@@ -373,7 +457,9 @@ static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
-static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
+static inline struct kbase_mem_phy_alloc *kbase_alloc_create(
+ struct kbase_context *kctx, size_t nr_pages,
+ enum kbase_memory_type type)
{
struct kbase_mem_phy_alloc *alloc;
size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
@@ -403,6 +489,13 @@ static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, en
if (!alloc)
return ERR_PTR(-ENOMEM);
+ if (type == KBASE_MEM_TYPE_NATIVE) {
+ alloc->imported.native.nr_struct_pages =
+ (alloc_size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ kbase_process_page_usage_inc(kctx,
+ alloc->imported.native.nr_struct_pages);
+ }
+
/* Store allocation method */
if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
@@ -429,29 +522,38 @@ static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
- reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
+ reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
KBASE_MEM_TYPE_NATIVE);
if (IS_ERR(reg->cpu_alloc))
return PTR_ERR(reg->cpu_alloc);
else if (!reg->cpu_alloc)
return -ENOMEM;
- reg->cpu_alloc->imported.kctx = kctx;
- INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+
+ reg->cpu_alloc->imported.native.kctx = kctx;
if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE)
&& (reg->flags & KBASE_REG_CPU_CACHED)) {
- reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
+ reg->gpu_alloc = kbase_alloc_create(kctx, reg->nr_pages,
KBASE_MEM_TYPE_NATIVE);
- reg->gpu_alloc->imported.kctx = kctx;
- INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+ if (IS_ERR_OR_NULL(reg->gpu_alloc)) {
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
+ return -ENOMEM;
+ }
+ reg->gpu_alloc->imported.native.kctx = kctx;
} else {
reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
}
+ mutex_lock(&kctx->jit_evict_lock);
+ INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
+ INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
+ mutex_unlock(&kctx->jit_evict_lock);
+
reg->flags &= ~KBASE_REG_FREE;
+
return 0;
}
-static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
+static inline u32 kbase_atomic_add_pages(u32 num_pages, atomic_t *used_pages)
{
int new_val = atomic_add_return(num_pages, used_pages);
#if defined(CONFIG_MALI_GATOR_SUPPORT)
@@ -460,7 +562,7 @@ static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
return new_val;
}
-static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
+static inline u32 kbase_atomic_sub_pages(u32 num_pages, atomic_t *used_pages)
{
int new_val = atomic_sub_return(num_pages, used_pages);
#if defined(CONFIG_MALI_GATOR_SUPPORT)
@@ -539,10 +641,26 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool);
* 3. Return NULL if no memory in the pool
*
* Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_locked() instead.
*/
struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
/**
+ * kbase_mem_pool_alloc_locked - Allocate a page from memory pool
+ * @pool: Memory pool to allocate from
+ *
+ * If there are free pages in the pool, this function allocates a page from
+ * @pool. This function does not use @next_pool.
+ *
+ * Return: Pointer to allocated page, or NULL if allocation failed.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool);
+
+/**
* kbase_mem_pool_free - Free a page to memory pool
* @pool: Memory pool where page should be freed
* @page: Page to free to the pool
@@ -553,11 +671,28 @@ struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
* 2. Otherwise, if @next_pool is not NULL and not full, add @page to
* @next_pool.
* 3. Finally, free @page to the kernel.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_free_locked() instead.
*/
void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
bool dirty);
/**
+ * kbase_mem_pool_free_locked - Free a page to memory pool
+ * @pool: Memory pool where page should be freed
+ * @p: Page to free to the pool
+ * @dirty: Whether some of the page may be dirty in the cache.
+ *
+ * If @pool is not full, this function adds @page to @pool. Otherwise, @page is
+ * freed to the kernel. This function does not use @next_pool.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+ bool dirty);
+
+/**
* kbase_mem_pool_alloc_pages - Allocate pages from memory pool
* @pool: Memory pool to allocate from
* @nr_pages: Number of pages to allocate
@@ -571,11 +706,58 @@ void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
* On success number of pages allocated (could be less than nr_pages if
* partial_allowed).
* On error an error code.
+ *
+ * Note : This function should not be used if the pool lock is held. Use
+ * kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
*/
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
struct tagged_addr *pages, bool partial_allowed);
/**
+ * kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
+ * @pool: Memory pool to allocate from
+ * @nr_4k_pages: Number of pages to allocate
+ * @pages: Pointer to array where the physical address of the allocated
+ * pages will be stored.
+ *
+ * Like kbase_mem_pool_alloc() but optimized for allocating many pages. This
+ * version does not allocate new pages from the kernel, and therefore will never
+ * trigger the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ * kbase_gpu_vm_lock(kctx);
+ * kbase_mem_pool_lock(pool)
+ * while (kbase_mem_pool_size(pool) < pages_required) {
+ * kbase_mem_pool_unlock(pool)
+ * kbase_gpu_vm_unlock(kctx);
+ * kbase_mem_pool_grow(pool)
+ * kbase_gpu_vm_lock(kctx);
+ * kbase_mem_pool_lock(pool)
+ * }
+ * kbase_mem_pool_alloc_pages_locked(pool)
+ * kbase_mem_pool_unlock(pool)
+ * Perform other processing that requires vm_lock...
+ * kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * Return:
+ * On success number of pages allocated.
+ * On error an error code.
+ *
+ * Note : Caller must hold the pool lock.
+ */
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+ size_t nr_4k_pages, struct tagged_addr *pages);
+
+/**
* kbase_mem_pool_free_pages - Free pages to memory pool
* @pool: Memory pool where pages should be freed
* @nr_pages: Number of pages to free
@@ -591,6 +773,22 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
struct tagged_addr *pages, bool dirty, bool reclaimed);
/**
+ * kbase_mem_pool_free_pages_locked - Free pages to memory pool
+ * @pool: Memory pool where pages should be freed
+ * @nr_pages: Number of pages to free
+ * @pages: Pointer to array holding the physical addresses of the pages to
+ * free.
+ * @dirty: Whether any pages may be dirty in the cache.
+ * @reclaimed: Whether the pages where reclaimable and thus should bypass
+ * the pool and go straight to the kernel.
+ *
+ * Like kbase_mem_pool_free() but optimized for freeing many pages.
+ */
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+ size_t nr_pages, struct tagged_addr *pages, bool dirty,
+ bool reclaimed);
+
+/**
* kbase_mem_pool_size - Get number of free pages in memory pool
* @pool: Memory pool to inspect
*
@@ -600,7 +798,7 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
*/
static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
{
- return ACCESS_ONCE(pool->cur_size);
+ return READ_ONCE(pool->cur_size);
}
/**
@@ -649,6 +847,15 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow);
void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
/**
+ * kbase_mem_pool_mark_dying - Mark that this pool is dying
+ * @pool: Memory pool
+ *
+ * This will cause any ongoing allocation operations (eg growing on page fault)
+ * to be terminated.
+ */
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool);
+
+/**
* kbase_mem_alloc_page - Allocate a new page for a device
* @pool: Memory pool to allocate a page from
*
@@ -660,21 +867,42 @@ void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool);
int kbase_region_tracker_init(struct kbase_context *kctx);
-int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
+int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
+ u8 max_allocations, u8 trim_level);
void kbase_region_tracker_term(struct kbase_context *kctx);
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
+/**
+ * kbase_region_tracker_term_rbtree - Free memory for a region tracker
+ *
+ * This will free all the regions within the region tracker
+ *
+ * @rbtree: Region tracker tree root
+ */
+void kbase_region_tracker_term_rbtree(struct rb_root *rbtree);
+
+struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
+ struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_enclosing_address(
+ struct rb_root *rbtree, u64 gpu_addr);
/**
* @brief Check that a pointer is actually a valid region.
*
* Must be called with context lock held.
*/
-struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_region_tracker_find_region_base_address(
+ struct kbase_context *kctx, u64 gpu_addr);
+struct kbase_va_region *kbase_find_region_base_address(struct rb_root *rbtree,
+ u64 gpu_addr);
-struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
+struct kbase_va_region *kbase_alloc_free_region(struct rb_root *rbtree,
+ u64 start_pfn, size_t nr_pages, int zone);
void kbase_free_alloced_region(struct kbase_va_region *reg);
-int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
+int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg,
+ u64 addr, size_t nr_pages, size_t align);
+int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
+ struct kbase_va_region *reg, u64 addr, size_t nr_pages,
+ size_t align);
bool kbase_check_alloc_flags(unsigned long flags);
bool kbase_check_import_flags(unsigned long flags);
@@ -718,25 +946,44 @@ void kbase_gpu_vm_unlock(struct kbase_context *kctx);
int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
-int kbase_mmu_init(struct kbase_context *kctx);
-void kbase_mmu_term(struct kbase_context *kctx);
-
-phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
-void kbase_mmu_free_pgd(struct kbase_context *kctx);
-int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags);
-int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags);
+/**
+ * kbase_mmu_init - Initialise an object representing GPU page tables
+ *
+ * The structure should be terminated using kbase_mmu_term()
+ *
+ * @kbdev: kbase device
+ * @mmut: structure to initialise
+ * @kctx: optional kbase context, may be NULL if this set of MMU tables is not
+ * associated with a context
+ */
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+ struct kbase_context *kctx);
+/**
+ * kbase_mmu_term - Terminate an object representing GPU page tables
+ *
+ * This will free any page tables that have been allocated
+ *
+ * @kbdev: kbase device
+ * @mmut: kbase_mmu_table to be destroyed
+ */
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
+
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ const u64 start_vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags);
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags, int as_nr);
int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
struct tagged_addr phys, size_t nr,
unsigned long flags);
-int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
-int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags);
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn,
+ size_t nr, int as_nr);
int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
struct tagged_addr *phys, size_t nr,
unsigned long flags);
@@ -756,11 +1003,19 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
/**
+ * kbase_mmu_update - Configure an address space on the GPU to the specified
+ * MMU tables
+ *
* The caller has the following locking conditions:
* - It must hold kbase_device->mmu_hw_mutex
* - It must hold the hwaccess_lock
+ *
+ * @kbdev: Kbase device structure
+ * @mmut: The set of MMU tables to be configured on the address space
+ * @as_nr: The address space to be configured
*/
-void kbase_mmu_update(struct kbase_context *kctx);
+void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+ int as_nr);
/**
* kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
@@ -922,16 +1177,74 @@ void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_
void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
/**
-* @brief Allocates physical pages.
-*
-* Allocates \a nr_pages_requested and updates the alloc object.
-*
-* @param[in] alloc allocation object to add pages to
-* @param[in] nr_pages_requested number of physical pages to allocate
-*
-* @return 0 if all pages have been successfully allocated. Error code otherwise
-*/
-int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
+ * kbase_alloc_phy_pages_helper - Allocates physical pages.
+ * @alloc: allocation object to add pages to
+ * @nr_pages_requested: number of physical pages to allocate
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object.
+ *
+ * Return: 0 if all pages have been successfully allocated. Error code otherwise
+ *
+ * Note : The caller must not hold vm_lock, as this could cause a deadlock if
+ * the kernel OoM killer runs. If the caller must allocate pages while holding
+ * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
+ *
+ * This function cannot be used from interrupt context
+ */
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
+ size_t nr_pages_requested);
+
+/**
+ * kbase_alloc_phy_pages_helper_locked - Allocates physical pages.
+ * @alloc: allocation object to add pages to
+ * @pool: Memory pool to allocate from
+ * @nr_pages_requested: number of physical pages to allocate
+ * @prealloc_sa: Information about the partial allocation if the amount
+ * of memory requested is not a multiple of 2MB. One
+ * instance of struct kbase_sub_alloc must be allocated by
+ * the caller iff CONFIG_MALI_2MB_ALLOC is enabled.
+ *
+ * Allocates \a nr_pages_requested and updates the alloc object. This function
+ * does not allocate new pages from the kernel, and therefore will never trigger
+ * the OoM killer. Therefore, it can be run while the vm_lock is held.
+ *
+ * As new pages can not be allocated, the caller must ensure there are
+ * sufficient pages in the pool. Usage of this function should look like :
+ *
+ * kbase_gpu_vm_lock(kctx);
+ * kbase_mem_pool_lock(pool)
+ * while (kbase_mem_pool_size(pool) < pages_required) {
+ * kbase_mem_pool_unlock(pool)
+ * kbase_gpu_vm_unlock(kctx);
+ * kbase_mem_pool_grow(pool)
+ * kbase_gpu_vm_lock(kctx);
+ * kbase_mem_pool_lock(pool)
+ * }
+ * kbase_alloc_phy_pages_helper_locked(pool)
+ * kbase_mem_pool_unlock(pool)
+ * Perform other processing that requires vm_lock...
+ * kbase_gpu_vm_unlock(kctx);
+ *
+ * This ensures that the pool can be grown to the required size and that the
+ * allocation can complete without another thread using the newly grown pages.
+ *
+ * If CONFIG_MALI_2MB_ALLOC is defined and the allocation is >= 2MB, then
+ * @pool must be alloc->imported.native.kctx->lp_mem_pool. Otherwise it must be
+ * alloc->imported.native.kctx->mem_pool.
+ * @prealloc_sa is used to manage the non-2MB sub-allocation. It has to be
+ * pre-allocated because we must not sleep (due to the usage of kmalloc())
+ * whilst holding pool->pool_lock.
+ * @prealloc_sa shall be set to NULL if it has been consumed by this function
+ * to indicate that the caller must not free it.
+ *
+ * Return: Pointer to array of allocated pages. NULL on failure.
+ *
+ * Note : Caller must hold pool->pool_lock
+ */
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
+ struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
+ size_t nr_pages_requested,
+ struct kbase_sub_alloc **prealloc_sa);
/**
* @brief Free physical pages.
@@ -943,6 +1256,26 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pa
*/
int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
+/**
+ * kbase_free_phy_pages_helper_locked - Free pages allocated with
+ * kbase_alloc_phy_pages_helper_locked()
+ * @alloc: Allocation object to free pages from
+ * @pool: Memory pool to return freed pages to
+ * @pages: Pages allocated by kbase_alloc_phy_pages_helper_locked()
+ * @nr_pages_to_free: Number of physical pages to free
+ *
+ * This function atomically frees pages allocated with
+ * kbase_alloc_phy_pages_helper_locked(). @pages is the pointer to the page
+ * array that is returned by that function. @pool must be the pool that the
+ * pages were originally allocated from.
+ *
+ * If the mem_pool has been unlocked since the allocation then
+ * kbase_free_phy_pages_helper() should be used instead.
+ */
+void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+ struct kbase_mem_pool *pool, struct tagged_addr *pages,
+ size_t nr_pages_to_free);
+
static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
{
SetPagePrivate(p);
@@ -1150,4 +1483,29 @@ bool kbase_sticky_resource_release(struct kbase_context *kctx,
*/
void kbase_sticky_resource_term(struct kbase_context *kctx);
+/**
+ * kbase_mem_pool_lock - Lock a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
+{
+ spin_lock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_pool_lock - Release a memory pool
+ * @pool: Memory pool to lock
+ */
+static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
+{
+ spin_unlock(&pool->pool_lock);
+}
+
+/**
+ * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
+ * @alloc: The physical allocation
+ */
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);
+
+
#endif /* _KBASE_MEM_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
index 4e6668e6247741..cdbb1577f7fa0f 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -47,44 +47,185 @@
#include <mali_kbase.h>
#include <mali_kbase_mem_linux.h>
-#include <mali_kbase_config_defaults.h>
#include <mali_kbase_tlstream.h>
#include <mali_kbase_ioctl.h>
+
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+ struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+ struct kbase_vmap_struct *map);
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+ struct kbase_vmap_struct *map);
+
static int kbase_tracking_page_setup(struct kbase_context *kctx, struct vm_area_struct *vma);
-/**
- * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
- * @kctx: Context the region belongs to
- * @reg: The GPU region
- * @new_pages: The number of pages after the shrink
- * @old_pages: The number of pages before the shrink
- *
- * Shrink (or completely remove) all CPU mappings which reference the shrunk
- * part of the allocation.
- *
- * Note: Caller must be holding the processes mmap_sem lock.
+/* Retrieve the associated region pointer if the GPU address corresponds to
+ * one of the event memory pages. The enclosing region, if found, shouldn't
+ * have been marked as free.
*/
-static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
- struct kbase_va_region *reg,
- u64 new_pages, u64 old_pages);
+static struct kbase_va_region *kbase_find_event_mem_region(
+ struct kbase_context *kctx, u64 gpu_addr)
+{
+
+ return NULL;
+}
/**
- * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
- * @kctx: Context the region belongs to
- * @reg: The GPU region or NULL if there isn't one
- * @new_pages: The number of pages after the shrink
- * @old_pages: The number of pages before the shrink
+ * kbase_phy_alloc_mapping_init - Initialize the kernel side permanent mapping
+ * of the physical allocation belonging to a
+ * region
+ * @kctx: The kernel base context @reg belongs to.
+ * @reg: The region whose physical allocation is to be mapped
+ * @vsize: The size of the requested region, in pages
+ * @size: The size in pages initially committed to the region
*
- * Return: 0 on success, negative -errno on error
+ * Return: 0 on success, otherwise an error code indicating failure
*
- * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
- * itself is unmodified as we still need to reserve the VA, only the page tables
- * will be modified by this function.
+ * Maps the physical allocation backing a non-free @reg, so it may be
+ * accessed directly from the kernel. This is only supported for physical
+ * allocations of type KBASE_MEM_TYPE_NATIVE, and will fail for other types of
+ * physical allocation.
+ *
+ * The mapping is stored directly in the allocation that backs @reg. The
+ * refcount is not incremented at this point. Instead, use of the mapping should
+ * be surrounded by kbase_phy_alloc_mapping_get() and
+ * kbase_phy_alloc_mapping_put() to ensure it does not disappear whilst the
+ * client is accessing it.
+ *
+ * Both cached and uncached regions are allowed, but any sync operations are the
+ * responsibility of the client using the permanent mapping.
+ *
+ * A number of checks are made to ensure that a region that needs a permanent
+ * mapping can actually be supported:
+ * - The region must be created as fully backed
+ * - The region must not be growable
+ *
+ * This function will fail if those checks are not satisfied.
+ *
+ * On success, the region will also be forced into a certain kind:
+ * - It will no longer be growable
*/
-static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
- struct kbase_va_region *reg,
- u64 new_pages, u64 old_pages);
+static int kbase_phy_alloc_mapping_init(struct kbase_context *kctx,
+ struct kbase_va_region *reg, size_t vsize, size_t size)
+{
+ size_t size_bytes = (size << PAGE_SHIFT);
+ struct kbase_vmap_struct *kern_mapping;
+ int err = 0;
+
+ /* Can only map in regions that are always fully committed
+ * Don't setup the mapping twice
+ * Only support KBASE_MEM_TYPE_NATIVE allocations
+ */
+ if (vsize != size || reg->cpu_alloc->permanent_map != NULL ||
+ reg->cpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
+ return -EINVAL;
+
+ if (size > (KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES -
+ kctx->permanent_mapped_pages)) {
+ dev_warn(kctx->kbdev->dev, "Request for %llu more pages mem needing a permanent mapping would breach limit %lu, currently at %lu pages",
+ (u64)size,
+ KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES,
+ kctx->permanent_mapped_pages);
+ return -ENOMEM;
+ }
+
+ kern_mapping = kzalloc(sizeof(*kern_mapping), GFP_KERNEL);
+ if (!kern_mapping)
+ return -ENOMEM;
+
+ err = kbase_vmap_phy_pages(kctx, reg, 0u, size_bytes, kern_mapping);
+ if (err < 0)
+ goto vmap_fail;
+
+ /* No support for growing or shrinking mapped regions */
+ reg->flags &= ~KBASE_REG_GROWABLE;
+
+ reg->cpu_alloc->permanent_map = kern_mapping;
+ kctx->permanent_mapped_pages += size;
+
+ return 0;
+vmap_fail:
+ kfree(kern_mapping);
+ return err;
+}
+
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc)
+{
+ WARN_ON(!alloc->permanent_map);
+ kbase_vunmap_phy_pages(kctx, alloc->permanent_map);
+ kfree(alloc->permanent_map);
+
+ alloc->permanent_map = NULL;
+
+ /* Mappings are only done on cpu_alloc, so don't need to worry about
+ * this being reduced a second time if a separate gpu_alloc is
+ * freed
+ */
+ WARN_ON(alloc->nents > kctx->permanent_mapped_pages);
+ kctx->permanent_mapped_pages -= alloc->nents;
+}
+
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx,
+ u64 gpu_addr,
+ struct kbase_vmap_struct **out_kern_mapping)
+{
+ struct kbase_va_region *reg;
+ void *kern_mem_ptr = NULL;
+ struct kbase_vmap_struct *kern_mapping;
+ u64 mapping_offset;
+
+ WARN_ON(!kctx);
+ WARN_ON(!out_kern_mapping);
+
+ kbase_gpu_vm_lock(kctx);
+
+ /* First do a quick lookup in the list of event memory regions */
+ reg = kbase_find_event_mem_region(kctx, gpu_addr);
+
+ if (!reg) {
+ reg = kbase_region_tracker_find_region_enclosing_address(
+ kctx, gpu_addr);
+ }
+
+ if (kbase_is_region_invalid_or_free(reg))
+ goto out_unlock;
+
+ kern_mapping = reg->cpu_alloc->permanent_map;
+ if (kern_mapping == NULL)
+ goto out_unlock;
+
+ mapping_offset = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+
+ /* Refcount the allocations to prevent them disappearing */
+ WARN_ON(reg->cpu_alloc != kern_mapping->cpu_alloc);
+ WARN_ON(reg->gpu_alloc != kern_mapping->gpu_alloc);
+ (void)kbase_mem_phy_alloc_get(kern_mapping->cpu_alloc);
+ (void)kbase_mem_phy_alloc_get(kern_mapping->gpu_alloc);
+
+ kern_mem_ptr = (void *)(uintptr_t)((uintptr_t)kern_mapping->addr + mapping_offset);
+ *out_kern_mapping = kern_mapping;
+out_unlock:
+ kbase_gpu_vm_unlock(kctx);
+ return kern_mem_ptr;
+}
+
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+ struct kbase_vmap_struct *kern_mapping)
+{
+ WARN_ON(!kctx);
+ WARN_ON(!kern_mapping);
+
+ WARN_ON(kctx != kern_mapping->cpu_alloc->imported.native.kctx);
+ WARN_ON(kern_mapping != kern_mapping->cpu_alloc->permanent_map);
+
+ kbase_mem_phy_alloc_put(kern_mapping->cpu_alloc);
+ kbase_mem_phy_alloc_put(kern_mapping->gpu_alloc);
+
+ /* kern_mapping and the gpu/cpu phy allocs backing it must not be used
+ * from now on
+ */
+}
struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
@@ -92,6 +233,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
{
int zone;
struct kbase_va_region *reg;
+ struct rb_root *rbtree;
struct device *dev;
KBASE_DEBUG_ASSERT(kctx);
@@ -108,6 +250,11 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
goto bad_flags;
}
+ if ((*flags & BASE_MEM_UNCACHED_GPU) != 0 &&
+ (*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0) {
+ /* Remove COHERENT_SYSTEM_REQUIRED flag if uncached GPU mapping is requested */
+ *flags &= ~BASE_MEM_COHERENT_SYSTEM_REQUIRED;
+ }
if ((*flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED) != 0 &&
!kbase_device_is_cpu_coherent(kctx->kbdev)) {
dev_warn(dev, "kbase_mem_alloc call required coherent mem when unavailable");
@@ -123,14 +270,15 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
goto bad_sizes;
/* find out which VA zone to use */
- if (*flags & BASE_MEM_SAME_VA)
+ if (*flags & BASE_MEM_SAME_VA) {
+ rbtree = &kctx->reg_rbtree_same;
zone = KBASE_REG_ZONE_SAME_VA;
- else if (*flags & BASE_MEM_PROT_GPU_EX)
- zone = KBASE_REG_ZONE_EXEC;
- else
+ } else {
+ rbtree = &kctx->reg_rbtree_custom;
zone = KBASE_REG_ZONE_CUSTOM_VA;
+ }
- reg = kbase_alloc_free_region(kctx, 0, va_pages, zone);
+ reg = kbase_alloc_free_region(rbtree, 0, va_pages, zone);
if (!reg) {
dev_err(dev, "Failed to allocate free region");
goto no_region;
@@ -162,6 +310,21 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
kbase_gpu_vm_lock(kctx);
+ if (reg->flags & KBASE_REG_PERMANENT_KERNEL_MAPPING) {
+ /* Permanent kernel mappings must happen as soon as
+ * reg->cpu_alloc->pages is ready. Currently this happens after
+ * kbase_alloc_phy_pages(). If we move that to setup pages
+ * earlier, also move this call too
+ */
+ int err = kbase_phy_alloc_mapping_init(kctx, reg, va_pages,
+ commit_pages);
+ if (err < 0) {
+ kbase_gpu_vm_unlock(kctx);
+ goto no_kern_mapping;
+ }
+ }
+
+
/* mmap needed to setup VA? */
if (*flags & BASE_MEM_SAME_VA) {
unsigned long prot = PROT_NONE;
@@ -230,6 +393,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
no_mmap:
no_cookie:
+no_kern_mapping:
no_mem:
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
@@ -243,7 +407,8 @@ bad_flags:
}
KBASE_EXPORT_TEST_API(kbase_mem_alloc);
-int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * const out)
+int kbase_mem_query(struct kbase_context *kctx,
+ u64 gpu_addr, u64 query, u64 * const out)
{
struct kbase_va_region *reg;
int ret = -EINVAL;
@@ -260,7 +425,7 @@ int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * c
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
switch (query) {
@@ -311,6 +476,10 @@ int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * c
}
if (KBASE_REG_TILER_ALIGN_TOP & reg->flags)
*out |= BASE_MEM_TILER_ALIGN_TOP;
+ if (!(KBASE_REG_GPU_CACHED & reg->flags))
+ *out |= BASE_MEM_UNCACHED_GPU;
+ if (KBASE_REG_GPU_VA_SAME_4GB_PAGE & reg->flags)
+ *out |= BASE_MEM_GPU_VA_SAME_4GB_PAGE;
WARN(*out & ~BASE_MEM_FLAGS_QUERYABLE,
"BASE_MEM_FLAGS_QUERYABLE needs updating\n");
@@ -471,9 +640,9 @@ void kbase_mem_evictable_deinit(struct kbase_context *kctx)
* kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
* @alloc: The physical allocation
*/
-static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
+void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
{
- struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_context *kctx = alloc->imported.native.kctx;
int __maybe_unused new_page_count;
kbase_process_page_usage_dec(kctx, alloc->nents);
@@ -493,7 +662,7 @@ static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
static
void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
{
- struct kbase_context *kctx = alloc->imported.kctx;
+ struct kbase_context *kctx = alloc->imported.native.kctx;
int __maybe_unused new_page_count;
new_page_count = kbase_atomic_add_pages(alloc->nents,
@@ -512,21 +681,21 @@ void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
{
- struct kbase_context *kctx = gpu_alloc->imported.kctx;
+ struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
lockdep_assert_held(&kctx->reg_lock);
- /* This alloction can't already be on a list. */
- WARN_ON(!list_empty(&gpu_alloc->evict_node));
-
kbase_mem_shrink_cpu_mapping(kctx, gpu_alloc->reg,
0, gpu_alloc->nents);
+ mutex_lock(&kctx->jit_evict_lock);
+ /* This allocation can't already be on a list. */
+ WARN_ON(!list_empty(&gpu_alloc->evict_node));
+
/*
* Add the allocation to the eviction list, after this point the shrink
* can reclaim it.
*/
- mutex_lock(&kctx->jit_evict_lock);
list_add(&gpu_alloc->evict_node, &kctx->evict_list);
mutex_unlock(&kctx->jit_evict_lock);
kbase_mem_evictable_mark_reclaim(gpu_alloc);
@@ -537,16 +706,18 @@ int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc)
bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *gpu_alloc)
{
- struct kbase_context *kctx = gpu_alloc->imported.kctx;
+ struct kbase_context *kctx = gpu_alloc->imported.native.kctx;
int err = 0;
lockdep_assert_held(&kctx->reg_lock);
+ mutex_lock(&kctx->jit_evict_lock);
/*
* First remove the allocation from the eviction list as it's no
* longer eligible for eviction.
*/
list_del_init(&gpu_alloc->evict_node);
+ mutex_unlock(&kctx->jit_evict_lock);
if (gpu_alloc->evicted == 0) {
/*
@@ -620,7 +791,7 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
/* Is the region being transitioning between not needed and needed? */
@@ -646,10 +817,15 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
}
/* limit to imported memory */
- if ((reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMP) &&
- (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM))
+ if (reg->gpu_alloc->type != KBASE_MEM_TYPE_IMPORTED_UMM)
goto out_unlock;
+ /* shareability flags are ignored for GPU uncached memory */
+ if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
/* no change? */
if (real_flags == (reg->flags & (KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH))) {
ret = 0;
@@ -662,28 +838,17 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
reg->flags |= real_flags;
/* Currently supporting only imported memory */
- switch (reg->gpu_alloc->type) {
-#ifdef CONFIG_UMP
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- ret = kbase_mmu_update_pages(kctx, reg->start_pfn,
- kbase_get_gpu_phy_pages(reg),
- reg->gpu_alloc->nents, reg->flags);
- break;
-#endif
#ifdef CONFIG_DMA_SHARED_BUFFER
- case KBASE_MEM_TYPE_IMPORTED_UMM:
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
/* Future use will use the new flags, existing mapping will NOT be updated
* as memory should not be in use by the GPU when updating the flags.
*/
ret = 0;
WARN_ON(reg->gpu_alloc->imported.umm.current_mapping_usage_count);
- break;
-#endif
- default:
- break;
}
+#endif /* CONFIG_DMA_SHARED_BUFFER */
- /* roll back on error, i.e. not UMP */
+ /* roll back on error */
if (ret)
reg->flags = prev_flags;
@@ -696,128 +861,6 @@ out:
#define KBASE_MEM_IMPORT_HAVE_PAGES (1UL << BASE_MEM_FLAGS_NR_BITS)
-#ifdef CONFIG_UMP
-static struct kbase_va_region *kbase_mem_from_ump(struct kbase_context *kctx, ump_secure_id id, u64 *va_pages, u64 *flags)
-{
- struct kbase_va_region *reg;
- ump_dd_handle umph;
- u64 block_count;
- const ump_dd_physical_block_64 *block_array;
- u64 i, j;
- int page = 0;
- ump_alloc_flags ump_flags;
- ump_alloc_flags cpu_flags;
- ump_alloc_flags gpu_flags;
-
- if (*flags & BASE_MEM_SECURE)
- goto bad_flags;
-
- umph = ump_dd_from_secure_id(id);
- if (UMP_DD_INVALID_MEMORY_HANDLE == umph)
- goto bad_id;
-
- ump_flags = ump_dd_allocation_flags_get(umph);
- cpu_flags = (ump_flags >> UMP_DEVICE_CPU_SHIFT) & UMP_DEVICE_MASK;
- gpu_flags = (ump_flags >> DEFAULT_UMP_GPU_DEVICE_SHIFT) &
- UMP_DEVICE_MASK;
-
- *va_pages = ump_dd_size_get_64(umph);
- *va_pages >>= PAGE_SHIFT;
-
- if (!*va_pages)
- goto bad_size;
-
- if (*va_pages > (U64_MAX / PAGE_SIZE))
- /* 64-bit address range is the max */
- goto bad_size;
-
- if (*flags & BASE_MEM_SAME_VA)
- reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
- else
- reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
-
- if (!reg)
- goto no_region;
-
- /* we've got pages to map now, and support SAME_VA */
- *flags |= KBASE_MEM_IMPORT_HAVE_PAGES;
-
- reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMP);
- if (IS_ERR_OR_NULL(reg->gpu_alloc))
- goto no_alloc_obj;
-
- reg->cpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
-
- reg->gpu_alloc->imported.ump_handle = umph;
-
- reg->flags &= ~KBASE_REG_FREE;
- reg->flags |= KBASE_REG_GPU_NX; /* UMP is always No eXecute */
- reg->flags &= ~KBASE_REG_GROWABLE; /* UMP cannot be grown */
-
- /* Override import flags based on UMP flags */
- *flags &= ~(BASE_MEM_CACHED_CPU);
- *flags &= ~(BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_CPU_WR);
- *flags &= ~(BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR);
-
- if ((cpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
- (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) {
- reg->flags |= KBASE_REG_CPU_CACHED;
- *flags |= BASE_MEM_CACHED_CPU;
- }
-
- if (cpu_flags & UMP_PROT_CPU_WR) {
- reg->flags |= KBASE_REG_CPU_WR;
- *flags |= BASE_MEM_PROT_CPU_WR;
- }
-
- if (cpu_flags & UMP_PROT_CPU_RD) {
- reg->flags |= KBASE_REG_CPU_RD;
- *flags |= BASE_MEM_PROT_CPU_RD;
- }
-
- if ((gpu_flags & (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR)) ==
- (UMP_HINT_DEVICE_RD | UMP_HINT_DEVICE_WR))
- reg->flags |= KBASE_REG_GPU_CACHED;
-
- if (gpu_flags & UMP_PROT_DEVICE_WR) {
- reg->flags |= KBASE_REG_GPU_WR;
- *flags |= BASE_MEM_PROT_GPU_WR;
- }
-
- if (gpu_flags & UMP_PROT_DEVICE_RD) {
- reg->flags |= KBASE_REG_GPU_RD;
- *flags |= BASE_MEM_PROT_GPU_RD;
- }
-
- /* ump phys block query */
- ump_dd_phys_blocks_get_64(umph, &block_count, &block_array);
-
- for (i = 0; i < block_count; i++) {
- for (j = 0; j < (block_array[i].size >> PAGE_SHIFT); j++) {
- struct tagged_addr tagged;
-
- tagged = as_tagged(block_array[i].addr +
- (j << PAGE_SHIFT));
- reg->gpu_alloc->pages[page] = tagged;
- page++;
- }
- }
- reg->gpu_alloc->nents = *va_pages;
- reg->extent = 0;
-
- return reg;
-
-no_alloc_obj:
- kfree(reg);
-no_region:
-bad_size:
- ump_dd_release(umph);
-bad_id:
-bad_flags:
- return NULL;
-}
-#endif /* CONFIG_UMP */
-
#ifdef CONFIG_DMA_SHARED_BUFFER
static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
int fd, u64 *va_pages, u64 *flags, u32 padding)
@@ -861,15 +904,18 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
if (shared_zone) {
*flags |= BASE_MEM_NEED_MMAP;
- reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
+ reg = kbase_alloc_free_region(&kctx->reg_rbtree_same,
+ 0, *va_pages, KBASE_REG_ZONE_SAME_VA);
} else {
- reg = kbase_alloc_free_region(kctx, 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
+ reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+ 0, *va_pages, KBASE_REG_ZONE_CUSTOM_VA);
}
if (!reg)
goto no_region;
- reg->gpu_alloc = kbase_alloc_create(*va_pages, KBASE_MEM_TYPE_IMPORTED_UMM);
+ reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
+ KBASE_MEM_TYPE_IMPORTED_UMM);
if (IS_ERR_OR_NULL(reg->gpu_alloc))
goto no_alloc_obj;
@@ -884,7 +930,6 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
reg->flags &= ~KBASE_REG_FREE;
reg->flags |= KBASE_REG_GPU_NX; /* UMM is always No eXecute */
reg->flags &= ~KBASE_REG_GROWABLE; /* UMM cannot be grown */
- reg->flags |= KBASE_REG_GPU_CACHED;
if (*flags & BASE_MEM_SECURE)
reg->flags |= KBASE_REG_SECURE;
@@ -903,6 +948,7 @@ static struct kbase_va_region *kbase_mem_from_umm(struct kbase_context *kctx,
invalid_flags:
kbase_mem_phy_alloc_put(reg->gpu_alloc);
+ kbase_mem_phy_alloc_put(reg->cpu_alloc);
no_alloc_obj:
kfree(reg);
no_region:
@@ -932,6 +978,7 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
{
long i;
struct kbase_va_region *reg;
+ struct rb_root *rbtree;
long faulted_pages;
int zone = KBASE_REG_ZONE_CUSTOM_VA;
bool shared_zone = false;
@@ -941,6 +988,12 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
if ((address & (cache_line_alignment - 1)) != 0 ||
(size & (cache_line_alignment - 1)) != 0) {
+ if (*flags & BASE_MEM_UNCACHED_GPU) {
+ dev_warn(kctx->kbdev->dev,
+ "User buffer is not cache line aligned and marked as GPU uncached\n");
+ goto bad_size;
+ }
+
/* Coherency must be enabled to handle partial cache lines */
if (*flags & (BASE_MEM_COHERENT_SYSTEM |
BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
@@ -983,14 +1036,16 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
if (shared_zone) {
*flags |= BASE_MEM_NEED_MMAP;
zone = KBASE_REG_ZONE_SAME_VA;
- }
+ rbtree = &kctx->reg_rbtree_same;
+ } else
+ rbtree = &kctx->reg_rbtree_custom;
- reg = kbase_alloc_free_region(kctx, 0, *va_pages, zone);
+ reg = kbase_alloc_free_region(rbtree, 0, *va_pages, zone);
if (!reg)
goto no_region;
- reg->gpu_alloc = kbase_alloc_create(*va_pages,
+ reg->gpu_alloc = kbase_alloc_create(kctx, *va_pages,
KBASE_MEM_TYPE_IMPORTED_USER_BUF);
if (IS_ERR_OR_NULL(reg->gpu_alloc))
goto no_alloc_obj;
@@ -1010,6 +1065,11 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
user_buf->address = address;
user_buf->nr_pages = *va_pages;
user_buf->mm = current->mm;
+#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
+ atomic_inc(&current->mm->mm_count);
+#else
+ mmgrab(current->mm);
+#endif
user_buf->pages = kmalloc_array(*va_pages, sizeof(struct page *),
GFP_KERNEL);
@@ -1032,10 +1092,12 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
- reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
faulted_pages = get_user_pages(address, *va_pages,
- reg->flags & KBASE_REG_GPU_WR, 0, pages, NULL);
+ reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
+ pages, NULL);
#else
faulted_pages = get_user_pages(address, *va_pages,
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
@@ -1047,8 +1109,6 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
if (faulted_pages != *va_pages)
goto fault_mismatch;
- atomic_inc(&current->mm->mm_count);
-
reg->gpu_alloc->nents = 0;
reg->extent = 0;
@@ -1095,7 +1155,6 @@ fault_mismatch:
for (i = 0; i < faulted_pages; i++)
put_page(pages[i]);
}
- kfree(user_buf->pages);
no_page_array:
invalid_flags:
kbase_mem_phy_alloc_put(reg->cpu_alloc);
@@ -1155,21 +1214,23 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
/* 64-bit tasks must MMAP anyway, but not expose this address to
* clients */
*flags |= BASE_MEM_NEED_MMAP;
- reg = kbase_alloc_free_region(kctx, 0, *num_pages,
- KBASE_REG_ZONE_SAME_VA);
+ reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0,
+ *num_pages,
+ KBASE_REG_ZONE_SAME_VA);
} else {
#else
if (1) {
#endif
- reg = kbase_alloc_free_region(kctx, 0, *num_pages,
- KBASE_REG_ZONE_CUSTOM_VA);
+ reg = kbase_alloc_free_region(&kctx->reg_rbtree_custom,
+ 0, *num_pages,
+ KBASE_REG_ZONE_CUSTOM_VA);
}
if (!reg)
goto no_reg;
/* zero-sized page array, as we don't need one/can support one */
- reg->gpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_ALIAS);
+ reg->gpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_ALIAS);
if (IS_ERR_OR_NULL(reg->gpu_alloc))
goto no_alloc_obj;
@@ -1207,12 +1268,12 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
(ai[i].handle.basep.handle >> PAGE_SHIFT) << PAGE_SHIFT);
/* validate found region */
- if (!aliasing_reg)
- goto bad_handle; /* Not found */
- if (aliasing_reg->flags & KBASE_REG_FREE)
- goto bad_handle; /* Free region */
+ if (kbase_is_region_invalid_or_free(aliasing_reg))
+ goto bad_handle; /* Not found/already free */
if (aliasing_reg->flags & KBASE_REG_DONT_NEED)
goto bad_handle; /* Ephemeral region */
+ if (!(aliasing_reg->flags & KBASE_REG_GPU_CACHED))
+ goto bad_handle; /* GPU uncached memory */
if (!aliasing_reg->gpu_alloc)
goto bad_handle; /* No alloc */
if (aliasing_reg->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE)
@@ -1339,17 +1400,6 @@ int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
}
switch (type) {
-#ifdef CONFIG_UMP
- case BASE_MEM_IMPORT_TYPE_UMP: {
- ump_secure_id id;
-
- if (get_user(id, (ump_secure_id __user *)phandle))
- reg = NULL;
- else
- reg = kbase_mem_from_ump(kctx, id, va_pages, flags);
- }
- break;
-#endif /* CONFIG_UMP */
#ifdef CONFIG_DMA_SHARED_BUFFER
case BASE_MEM_IMPORT_TYPE_UMM: {
int fd;
@@ -1456,13 +1506,13 @@ int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
/* Map the new pages into the GPU */
phy_pages = kbase_get_gpu_phy_pages(reg);
- ret = kbase_mmu_insert_pages(kctx, reg->start_pfn + old_pages,
- phy_pages + old_pages, delta, reg->flags);
+ ret = kbase_mmu_insert_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn + old_pages,
+ phy_pages + old_pages, delta, reg->flags, kctx->as_nr);
return ret;
}
-static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
struct kbase_va_region *reg,
u64 new_pages, u64 old_pages)
{
@@ -1477,15 +1527,15 @@ static void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
(old_pages - new_pages)<<PAGE_SHIFT, 1);
}
-static int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
struct kbase_va_region *reg,
u64 new_pages, u64 old_pages)
{
u64 delta = old_pages - new_pages;
int ret = 0;
- ret = kbase_mmu_teardown_pages(kctx,
- reg->start_pfn + new_pages, delta);
+ ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+ reg->start_pfn + new_pages, delta, kctx->as_nr);
return ret;
}
@@ -1511,7 +1561,7 @@ int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages)
/* Validate the region */
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
+ if (kbase_is_region_invalid_or_free(reg))
goto out_unlock;
KBASE_DEBUG_ASSERT(reg->cpu_alloc);
@@ -1649,6 +1699,7 @@ static void kbase_cpu_vm_close(struct vm_area_struct *vma)
list_del(&map->mappings_list);
+ kbase_va_region_alloc_put(map->kctx, map->region);
kbase_gpu_vm_unlock(map->kctx);
kbase_mem_phy_alloc_put(map->alloc);
@@ -1717,7 +1768,13 @@ const struct vm_operations_struct kbase_vm_ops = {
.fault = kbase_cpu_vm_fault
};
-static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vma, void *kaddr, size_t nr_pages, unsigned long aligned_offset, int free_on_close)
+static int kbase_cpu_mmap(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ struct vm_area_struct *vma,
+ void *kaddr,
+ size_t nr_pages,
+ unsigned long aligned_offset,
+ int free_on_close)
{
struct kbase_cpu_mapping *map;
struct tagged_addr *page_array;
@@ -1797,9 +1854,9 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
goto out;
}
- map->region = reg;
+ map->region = kbase_va_region_alloc_get(kctx, reg);
map->free_on_close = free_on_close;
- map->kctx = reg->kctx;
+ map->kctx = kctx;
map->alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
map->count = 1; /* start with one ref */
@@ -1812,91 +1869,6 @@ static int kbase_cpu_mmap(struct kbase_va_region *reg, struct vm_area_struct *vm
return err;
}
-static int kbase_trace_buffer_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kaddr)
-{
- struct kbase_va_region *new_reg;
- u32 nr_pages;
- size_t size;
- int err = 0;
- u32 *tb;
- int owns_tb = 1;
-
- dev_dbg(kctx->kbdev->dev, "in %s\n", __func__);
- size = (vma->vm_end - vma->vm_start);
- nr_pages = size >> PAGE_SHIFT;
-
- if (!kctx->jctx.tb) {
- KBASE_DEBUG_ASSERT(0 != size);
- tb = vmalloc_user(size);
-
- if (NULL == tb) {
- err = -ENOMEM;
- goto out;
- }
-
- err = kbase_device_trace_buffer_install(kctx, tb, size);
- if (err) {
- vfree(tb);
- goto out;
- }
- } else {
- err = -EINVAL;
- goto out;
- }
-
- *kaddr = kctx->jctx.tb;
-
- new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
- if (!new_reg) {
- err = -ENOMEM;
- WARN_ON(1);
- goto out_no_region;
- }
-
- new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_TB);
- if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
- err = -ENOMEM;
- new_reg->cpu_alloc = NULL;
- WARN_ON(1);
- goto out_no_alloc;
- }
-
- new_reg->gpu_alloc = kbase_mem_phy_alloc_get(new_reg->cpu_alloc);
-
- new_reg->cpu_alloc->imported.kctx = kctx;
- new_reg->flags &= ~KBASE_REG_FREE;
- new_reg->flags |= KBASE_REG_CPU_CACHED;
-
- /* alloc now owns the tb */
- owns_tb = 0;
-
- if (kbase_add_va_region(kctx, new_reg, vma->vm_start, nr_pages, 1) != 0) {
- err = -ENOMEM;
- WARN_ON(1);
- goto out_no_va_region;
- }
-
- *reg = new_reg;
-
- /* map read only, noexec */
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_EXEC | VM_MAYEXEC);
- /* the rest of the flags is added by the cpu_mmap handler */
-
- dev_dbg(kctx->kbdev->dev, "%s done\n", __func__);
- return 0;
-
-out_no_va_region:
-out_no_alloc:
- kbase_free_alloced_region(new_reg);
-out_no_region:
- if (owns_tb) {
- kbase_device_trace_buffer_uninstall(kctx);
- vfree(tb);
- }
-out:
- return err;
-}
-
static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, struct kbase_va_region **const reg, void **const kmap_addr)
{
struct kbase_va_region *new_reg;
@@ -1916,14 +1888,15 @@ static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct
goto out;
}
- new_reg = kbase_alloc_free_region(kctx, 0, nr_pages, KBASE_REG_ZONE_SAME_VA);
+ new_reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0, nr_pages,
+ KBASE_REG_ZONE_SAME_VA);
if (!new_reg) {
err = -ENOMEM;
WARN_ON(1);
goto out;
}
- new_reg->cpu_alloc = kbase_alloc_create(0, KBASE_MEM_TYPE_RAW);
+ new_reg->cpu_alloc = kbase_alloc_create(kctx, 0, KBASE_MEM_TYPE_RAW);
if (IS_ERR_OR_NULL(new_reg->cpu_alloc)) {
err = -ENOMEM;
new_reg->cpu_alloc = NULL;
@@ -2090,14 +2063,6 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
/* Illegal handle for direct map */
err = -EINVAL;
goto out_unlock;
- case PFN_DOWN(BASE_MEM_TRACE_BUFFER_HANDLE):
- err = kbase_trace_buffer_mmap(kctx, vma, &reg, &kaddr);
- if (0 != err)
- goto out_unlock;
- dev_dbg(dev, "kbase_trace_buffer_mmap ok\n");
- /* free the region on munmap */
- free_on_close = 1;
- break;
case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE):
/* MMU dump */
err = kbase_mmu_dump_mmap(kctx, vma, &reg, &kaddr);
@@ -2120,7 +2085,7 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
(u64)vma->vm_pgoff << PAGE_SHIFT);
- if (reg && !(reg->flags & KBASE_REG_FREE)) {
+ if (!kbase_is_region_invalid_or_free(reg)) {
/* will this mapping overflow the size of the region? */
if (nr_pages > (reg->nr_pages -
(vma->vm_pgoff - reg->start_pfn))) {
@@ -2169,7 +2134,8 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
} /* default */
} /* switch */
- err = kbase_cpu_mmap(reg, vma, kaddr, nr_pages, aligned_offset, free_on_close);
+ err = kbase_cpu_mmap(kctx, reg, vma, kaddr, nr_pages, aligned_offset,
+ free_on_close);
if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) {
/* MMU dump - userspace should now have a reference on
@@ -2188,11 +2154,11 @@ out:
KBASE_EXPORT_TEST_API(kbase_mmap);
-static void kbasep_sync_mem_regions(struct kbase_context *kctx,
+void kbase_sync_mem_regions(struct kbase_context *kctx,
struct kbase_vmap_struct *map, enum kbase_sync_type dest)
{
size_t i;
- off_t const offset = (uintptr_t)map->gpu_addr & ~PAGE_MASK;
+ off_t const offset = map->offset_in_page;
size_t const page_count = PFN_UP(offset + map->size);
/* Sync first page */
@@ -2218,66 +2184,55 @@ static void kbasep_sync_mem_regions(struct kbase_context *kctx,
}
}
-void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
- unsigned long prot_request, struct kbase_vmap_struct *map)
+static int kbase_vmap_phy_pages(struct kbase_context *kctx,
+ struct kbase_va_region *reg, u64 offset_bytes, size_t size,
+ struct kbase_vmap_struct *map)
{
- struct kbase_va_region *reg;
unsigned long page_index;
- unsigned int offset = gpu_addr & ~PAGE_MASK;
- size_t page_count = PFN_UP(offset + size);
+ unsigned int offset_in_page = offset_bytes & ~PAGE_MASK;
+ size_t page_count = PFN_UP(offset_in_page + size);
struct tagged_addr *page_array;
struct page **pages;
void *cpu_addr = NULL;
pgprot_t prot;
size_t i;
- if (!size || !map)
- return NULL;
+ if (!size || !map || !reg->cpu_alloc || !reg->gpu_alloc)
+ return -EINVAL;
/* check if page_count calculation will wrap */
if (size > ((size_t)-1 / PAGE_SIZE))
- return NULL;
-
- kbase_gpu_vm_lock(kctx);
-
- reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE))
- goto out_unlock;
+ return -EINVAL;
- page_index = (gpu_addr >> PAGE_SHIFT) - reg->start_pfn;
+ page_index = offset_bytes >> PAGE_SHIFT;
/* check if page_index + page_count will wrap */
if (-1UL - page_count < page_index)
- goto out_unlock;
+ return -EINVAL;
if (page_index + page_count > kbase_reg_current_backed_size(reg))
- goto out_unlock;
+ return -ENOMEM;
if (reg->flags & KBASE_REG_DONT_NEED)
- goto out_unlock;
+ return -EINVAL;
- /* check access permissions can be satisfied
- * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR} */
- if ((reg->flags & prot_request) != prot_request)
- goto out_unlock;
+ prot = PAGE_KERNEL;
+ if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
+ /* Map uncached */
+ prot = pgprot_writecombine(prot);
+ }
page_array = kbase_get_cpu_phy_pages(reg);
if (!page_array)
- goto out_unlock;
+ return -ENOMEM;
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
if (!pages)
- goto out_unlock;
+ return -ENOMEM;
for (i = 0; i < page_count; i++)
- pages[i] = phys_to_page(as_phys_addr_t(page_array[page_index +
- i]));
+ pages[i] = as_page(page_array[page_index + i]);
- prot = PAGE_KERNEL;
- if (!(reg->flags & KBASE_REG_CPU_CACHED)) {
- /* Map uncached */
- prot = pgprot_writecombine(prot);
- }
/* Note: enforcing a RO prot_request onto prot is not done, since:
* - CPU-arch-specific integration required
* - kbase_vmap() requires no access checks to be made/enforced */
@@ -2287,26 +2242,66 @@ void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
kfree(pages);
if (!cpu_addr)
- goto out_unlock;
+ return -ENOMEM;
- map->gpu_addr = gpu_addr;
- map->cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ map->offset_in_page = offset_in_page;
+ map->cpu_alloc = reg->cpu_alloc;
map->cpu_pages = &kbase_get_cpu_phy_pages(reg)[page_index];
- map->gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+ map->gpu_alloc = reg->gpu_alloc;
map->gpu_pages = &kbase_get_gpu_phy_pages(reg)[page_index];
- map->addr = (void *)((uintptr_t)cpu_addr + offset);
+ map->addr = (void *)((uintptr_t)cpu_addr + offset_in_page);
map->size = size;
map->sync_needed = ((reg->flags & KBASE_REG_CPU_CACHED) != 0) &&
!kbase_mem_is_imported(map->gpu_alloc->type);
if (map->sync_needed)
- kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU);
- kbase_gpu_vm_unlock(kctx);
+ kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_CPU);
- return map->addr;
+ return 0;
+}
+
+void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
+ unsigned long prot_request, struct kbase_vmap_struct *map)
+{
+ struct kbase_va_region *reg;
+ void *addr = NULL;
+ u64 offset_bytes;
+ struct kbase_mem_phy_alloc *cpu_alloc;
+ struct kbase_mem_phy_alloc *gpu_alloc;
+ int err;
+
+ kbase_gpu_vm_lock(kctx);
+
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx,
+ gpu_addr);
+ if (kbase_is_region_invalid_or_free(reg))
+ goto out_unlock;
+
+ /* check access permissions can be satisfied
+ * Intended only for checking KBASE_REG_{CPU,GPU}_{RD,WR}
+ */
+ if ((reg->flags & prot_request) != prot_request)
+ goto out_unlock;
+
+ offset_bytes = gpu_addr - (reg->start_pfn << PAGE_SHIFT);
+ cpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
+ gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
+
+ err = kbase_vmap_phy_pages(kctx, reg, offset_bytes, size, map);
+ if (err < 0)
+ goto fail_vmap_phy_pages;
+
+ addr = map->addr;
out_unlock:
kbase_gpu_vm_unlock(kctx);
+ return addr;
+
+fail_vmap_phy_pages:
+ kbase_gpu_vm_unlock(kctx);
+ kbase_mem_phy_alloc_put(cpu_alloc);
+ kbase_mem_phy_alloc_put(gpu_alloc);
+
return NULL;
}
@@ -2322,22 +2317,29 @@ void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
}
KBASE_EXPORT_TEST_API(kbase_vmap);
-void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+static void kbase_vunmap_phy_pages(struct kbase_context *kctx,
+ struct kbase_vmap_struct *map)
{
void *addr = (void *)((uintptr_t)map->addr & PAGE_MASK);
vunmap(addr);
if (map->sync_needed)
- kbasep_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE);
- map->gpu_addr = 0;
- map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
- map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+ kbase_sync_mem_regions(kctx, map, KBASE_SYNC_TO_DEVICE);
+
+ map->offset_in_page = 0;
map->cpu_pages = NULL;
map->gpu_pages = NULL;
map->addr = NULL;
map->size = 0;
map->sync_needed = false;
}
+
+void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map)
+{
+ kbase_vunmap_phy_pages(kctx, map);
+ map->cpu_alloc = kbase_mem_phy_alloc_put(map->cpu_alloc);
+ map->gpu_alloc = kbase_mem_phy_alloc_put(map->gpu_alloc);
+}
KBASE_EXPORT_TEST_API(kbase_vunmap);
void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages)
@@ -2467,7 +2469,8 @@ void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_
handle->size = size;
- reg = kbase_alloc_free_region(kctx, 0, pages, KBASE_REG_ZONE_SAME_VA);
+ reg = kbase_alloc_free_region(&kctx->reg_rbtree_same, 0, pages,
+ KBASE_REG_ZONE_SAME_VA);
if (!reg)
goto no_reg;
@@ -2475,7 +2478,7 @@ void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_
if (kbase_update_region_flags(kctx, reg, flags) != 0)
goto invalid_flags;
- reg->cpu_alloc = kbase_alloc_create(pages, KBASE_MEM_TYPE_RAW);
+ reg->cpu_alloc = kbase_alloc_create(kctx, pages, KBASE_MEM_TYPE_RAW);
if (IS_ERR_OR_NULL(reg->cpu_alloc))
goto no_alloc;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
index 301fdc33ea1edf..a8a52a724c226b 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010, 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010, 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,14 +37,84 @@ struct kbase_hwc_dma_mapping {
size_t size;
};
+/**
+ * kbase_mem_alloc - Create a new allocation for GPU
+ *
+ * @kctx: The kernel context
+ * @va_pages: The number of pages of virtual address space to reserve
+ * @commit_pages: The number of physical pages to allocate upfront
+ * @extent: The number of extra pages to allocate on each GPU fault which
+ * grows the region.
+ * @flags: bitmask of BASE_MEM_* flags to convey special requirements &
+ * properties for the new allocation.
+ * @gpu_va: Start address of the memory region which was allocated from GPU
+ * virtual address space.
+ *
+ * Return: 0 on success or error code
+ */
struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
u64 *gpu_va);
-int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages);
+
+/**
+ * kbase_mem_query - Query properties of a GPU memory region
+ *
+ * @kctx: The kernel context
+ * @gpu_addr: A GPU address contained within the memory region
+ * @query: The type of query, from KBASE_MEM_QUERY_* flags, which could be
+ * regarding the amount of backing physical memory allocated so far
+ * for the region or the size of the region or the flags associated
+ * with the region.
+ * @out: Pointer to the location to store the result of query.
+ *
+ * Return: 0 on success or error code
+ */
+int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query,
+ u64 *const out);
+
+/**
+ * kbase_mem_import - Import the external memory for use by the GPU
+ *
+ * @kctx: The kernel context
+ * @type: Type of external memory
+ * @phandle: Handle to the external memory interpreted as per the type.
+ * @padding: Amount of extra VA pages to append to the imported buffer
+ * @gpu_va: GPU address assigned to the imported external memory
+ * @va_pages: Size of the memory region reserved from the GPU address space
+ * @flags: bitmask of BASE_MEM_* flags to convey special requirements &
+ * properties for the new allocation representing the external
+ * memory.
+ * Return: 0 on success or error code
+ */
int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
void __user *phandle, u32 padding, u64 *gpu_va, u64 *va_pages,
u64 *flags);
+
+/**
+ * kbase_mem_alias - Create a new allocation for GPU, aliasing one or more
+ * memory regions
+ *
+ * @kctx: The kernel context
+ * @flags: bitmask of BASE_MEM_* flags.
+ * @stride: Bytes between start of each memory region
+ * @nents: The number of regions to pack together into the alias
+ * @ai: Pointer to the struct containing the memory aliasing info
+ * @num_pages: Number of pages the alias will cover
+ *
+ * Return: 0 on failure or otherwise the GPU VA for the alias
+ */
u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
+
+/**
+ * kbase_mem_flags_change - Change the flags for a memory region
+ *
+ * @kctx: The kernel context
+ * @gpu_addr: A GPU address contained within the memory region to modify.
+ * @flags: The new flags to set
+ * @mask: Mask of the flags, from BASE_MEM_*, to modify.
+ *
+ * Return: 0 on success or error code
+ */
int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
/**
@@ -58,10 +128,19 @@ int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned in
*/
int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages);
+/**
+ * kbase_mmap - Mmap method, gets invoked when mmap system call is issued on
+ * device file /dev/malixx.
+ * @file: Pointer to the device file /dev/malixx instance.
+ * @vma: Pointer to the struct containing the info where the GPU allocation
+ * will be mapped in virtual address space of CPU.
+ *
+ * Return: 0 on success or error code
+ */
int kbase_mmap(struct file *file, struct vm_area_struct *vma);
/**
- * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction
+ * kbase_mem_evictable_init - Initialize the Ephemeral memory eviction
* mechanism.
* @kctx: The kbase context to initialize.
*
@@ -127,7 +206,7 @@ int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);
bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);
struct kbase_vmap_struct {
- u64 gpu_addr;
+ off_t offset_in_page;
struct kbase_mem_phy_alloc *cpu_alloc;
struct kbase_mem_phy_alloc *gpu_alloc;
struct tagged_addr *cpu_pages;
@@ -242,4 +321,127 @@ void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *han
extern const struct vm_operations_struct kbase_vm_ops;
+/**
+ * kbase_sync_mem_regions - Perform the cache maintenance for the kernel mode
+ * CPU mapping.
+ * @kctx: Context the CPU mapping belongs to.
+ * @map: Structure describing the CPU mapping, setup previously by the
+ * kbase_vmap() call.
+ * @dest: Indicates the type of maintenance required (i.e. flush or invalidate)
+ *
+ * Note: The caller shall ensure that CPU mapping is not revoked & remains
+ * active whilst the maintenance is in progress.
+ */
+void kbase_sync_mem_regions(struct kbase_context *kctx,
+ struct kbase_vmap_struct *map, enum kbase_sync_type dest);
+
+/**
+ * kbase_mem_shrink_cpu_mapping - Shrink the CPU mapping(s) of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Shrink (or completely remove) all CPU mappings which reference the shrunk
+ * part of the allocation.
+ */
+void kbase_mem_shrink_cpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_mem_shrink_gpu_mapping - Shrink the GPU mapping of an allocation
+ * @kctx: Context the region belongs to
+ * @reg: The GPU region or NULL if there isn't one
+ * @new_pages: The number of pages after the shrink
+ * @old_pages: The number of pages before the shrink
+ *
+ * Return: 0 on success, negative -errno on error
+ *
+ * Unmap the shrunk pages from the GPU mapping. Note that the size of the region
+ * itself is unmodified as we still need to reserve the VA, only the page tables
+ * will be modified by this function.
+ */
+int kbase_mem_shrink_gpu_mapping(struct kbase_context *kctx,
+ struct kbase_va_region *reg,
+ u64 new_pages, u64 old_pages);
+
+/**
+ * kbase_phy_alloc_mapping_term - Terminate the kernel side mapping of a
+ * physical allocation
+ * @kctx: The kernel base context associated with the mapping
+ * @alloc: Pointer to the allocation to terminate
+ *
+ * This function will unmap the kernel mapping, and free any structures used to
+ * track it.
+ */
+void kbase_phy_alloc_mapping_term(struct kbase_context *kctx,
+ struct kbase_mem_phy_alloc *alloc);
+
+/**
+ * kbase_phy_alloc_mapping_get - Get a kernel-side CPU pointer to the permanent
+ * mapping of a physical allocation
+ * @kctx: The kernel base context @gpu_addr will be looked up in
+ * @gpu_addr: The gpu address to lookup for the kernel-side CPU mapping
+ * @out_kern_mapping: Pointer to storage for a struct kbase_vmap_struct pointer
+ * which will be used for a call to
+ * kbase_phy_alloc_mapping_put()
+ *
+ * Return: Pointer to a kernel-side accessible location that directly
+ * corresponds to @gpu_addr, or NULL on failure
+ *
+ * Looks up @gpu_addr to retrieve the CPU pointer that can be used to access
+ * that location kernel-side. Only certain kinds of memory have a permanent
+ * kernel mapping, refer to the internal functions
+ * kbase_reg_needs_kernel_mapping() and kbase_phy_alloc_mapping_init() for more
+ * information.
+ *
+ * If this function succeeds, a CPU access to the returned pointer will access
+ * the actual location represented by @gpu_addr. That is, the return value does
+ * not require any offset added to it to access the location specified in
+ * @gpu_addr
+ *
+ * The client must take care to either apply any necessary sync operations when
+ * accessing the data, or ensure that the enclosing region was coherent with
+ * the GPU, or uncached in the CPU.
+ *
+ * The refcount on the physical allocations backing the region are taken, so
+ * that they do not disappear whilst the client is accessing it. Once the
+ * client has finished accessing the memory, it must be released with a call to
+ * kbase_phy_alloc_mapping_put()
+ *
+ * Whilst this is expected to execute quickly (the mapping was already setup
+ * when the physical allocation was created), the call is not IRQ-safe due to
+ * the region lookup involved.
+ *
+ * An error code may indicate that:
+ * - a userside process has freed the allocation, and so @gpu_addr is no longer
+ * valid
+ * - the region containing @gpu_addr does not support a permanent kernel mapping
+ */
+void *kbase_phy_alloc_mapping_get(struct kbase_context *kctx, u64 gpu_addr,
+ struct kbase_vmap_struct **out_kern_mapping);
+
+/**
+ * kbase_phy_alloc_mapping_put - Put a reference to the kernel-side mapping of a
+ * physical allocation
+ * @kctx: The kernel base context associated with the mapping
+ * @kern_mapping: Pointer to a struct kbase_phy_alloc_mapping pointer obtained
+ * from a call to kbase_phy_alloc_mapping_get()
+ *
+ * Releases the reference to the allocations backing @kern_mapping that was
+ * obtained through a call to kbase_phy_alloc_mapping_get(). This must be used
+ * when the client no longer needs to access the kernel-side CPU pointer.
+ *
+ * If this was the last reference on the underlying physical allocations, they
+ * will go through the normal allocation free steps, which also includes an
+ * unmap of the permanent kernel mapping for those allocations.
+ *
+ * Due to these operations, the function is not IRQ-safe. However it is
+ * expected to execute quickly in the normal case, i.e. when the region holding
+ * the physical allocation is still present.
+ */
+void kbase_phy_alloc_mapping_put(struct kbase_context *kctx,
+ struct kbase_vmap_struct *kern_mapping);
+
#endif /* _KBASE_MEM_LINUX_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
index 0c2b70bcfbf5c5..70116030f233ea 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_lowlevel.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2014,2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2014,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -53,11 +53,50 @@ struct tagged_addr { phys_addr_t tagged_addr; };
#define HUGE_HEAD (1u << 1)
#define FROM_PARTIAL (1u << 2)
+/*
+ * Note: if macro for converting physical address to page is not defined
+ * in the kernel itself, it is defined hereby. This is to avoid build errors
+ * which are reported during builds for some architectures.
+ */
+#ifndef phys_to_page
+#define phys_to_page(phys) (pfn_to_page((phys) >> PAGE_SHIFT))
+#endif
+
+/**
+ * as_phys_addr_t - Retrieve the physical address from tagged address by
+ * masking the lower order 12 bits.
+ * @t: tagged address to be translated.
+ *
+ * Return: physical address corresponding to tagged address.
+ */
static inline phys_addr_t as_phys_addr_t(struct tagged_addr t)
{
return t.tagged_addr & PAGE_MASK;
}
+/**
+ * as_page - Retrieve the struct page from a tagged address
+ * @t: tagged address to be translated.
+ *
+ * Return: pointer to struct page corresponding to tagged address.
+ */
+static inline struct page *as_page(struct tagged_addr t)
+{
+ return phys_to_page(as_phys_addr_t(t));
+}
+
+/**
+ * as_tagged - Convert the physical address to tagged address type though
+ * there is no tag info present, the lower order 12 bits will be 0
+ * @phys: physical address to be converted to tagged type
+ *
+ * This is used for 4KB physical pages allocated by the Driver or imported pages
+ * and is needed as physical pages tracking object stores the reference for
+ * physical pages using tagged address type in lieu of the type generally used
+ * for physical addresses.
+ *
+ * Return: address of tagged address type.
+ */
static inline struct tagged_addr as_tagged(phys_addr_t phys)
{
struct tagged_addr t;
@@ -66,6 +105,16 @@ static inline struct tagged_addr as_tagged(phys_addr_t phys)
return t;
}
+/**
+ * as_tagged_tag - Form the tagged address by storing the tag or metadata in the
+ * lower order 12 bits of physial address
+ * @phys: physical address to be converted to tagged address
+ * @tag: tag to be stored along with the physical address.
+ *
+ * The tag info is used while freeing up the pages
+ *
+ * Return: tagged address storing physical address & tag.
+ */
static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
{
struct tagged_addr t;
@@ -74,11 +123,26 @@ static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
return t;
}
+/**
+ * is_huge - Check if the physical page is one of the 512 4KB pages of the
+ * large page which was not split to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page belongs to large page, or false
+ */
static inline bool is_huge(struct tagged_addr t)
{
return t.tagged_addr & HUGE_PAGE;
}
+/**
+ * is_huge_head - Check if the physical page is the first 4KB page of the
+ * 512 4KB pages within a large page which was not split
+ * to be used partially
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page is the first page of a large page, or false
+ */
static inline bool is_huge_head(struct tagged_addr t)
{
int mask = HUGE_HEAD | HUGE_PAGE;
@@ -86,6 +150,14 @@ static inline bool is_huge_head(struct tagged_addr t)
return mask == (t.tagged_addr & mask);
}
+/**
+ * is_partial - Check if the physical page is one of the 512 pages of the
+ * large page which was split in 4KB pages to be used
+ * partially for allocations >= 2 MB in size.
+ * @t: tagged address storing the tag in the lower order bits.
+ *
+ * Return: true if page was taken from large page used partially, or false
+ */
static inline bool is_partial(struct tagged_addr t)
{
return t.tagged_addr & FROM_PARTIAL;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
index 574f1d51cccf7f..0f91be17a81b7b 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_pool.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -39,16 +39,6 @@
#define NOT_DIRTY false
#define NOT_RECLAIMED false
-static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
-{
- spin_lock(&pool->pool_lock);
-}
-
-static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
-{
- spin_unlock(&pool->pool_lock);
-}
-
static size_t kbase_mem_pool_capacity(struct kbase_mem_pool *pool)
{
ssize_t max_size = kbase_mem_pool_max_size(pool);
@@ -177,12 +167,6 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
gfp = GFP_HIGHUSER | __GFP_ZERO;
#endif
- if (current->flags & PF_KTHREAD) {
- /* Don't trigger OOM killer from kernel threads, e.g. when
- * growing memory on GPU page fault */
- gfp |= __GFP_NORETRY;
- }
-
/* don't warn on higer order failures */
if (pool->order)
gfp |= __GFP_NOWARN;
@@ -255,12 +239,33 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
struct page *p;
size_t i;
+ kbase_mem_pool_lock(pool);
+
+ pool->dont_reclaim = true;
for (i = 0; i < nr_to_grow; i++) {
+ if (pool->dying) {
+ pool->dont_reclaim = false;
+ kbase_mem_pool_shrink_locked(pool, nr_to_grow);
+ kbase_mem_pool_unlock(pool);
+
+ return -ENOMEM;
+ }
+ kbase_mem_pool_unlock(pool);
+
p = kbase_mem_alloc_page(pool);
- if (!p)
+ if (!p) {
+ kbase_mem_pool_lock(pool);
+ pool->dont_reclaim = false;
+ kbase_mem_pool_unlock(pool);
+
return -ENOMEM;
- kbase_mem_pool_add(pool, p);
+ }
+
+ kbase_mem_pool_lock(pool);
+ kbase_mem_pool_add_locked(pool, p);
}
+ pool->dont_reclaim = false;
+ kbase_mem_pool_unlock(pool);
return 0;
}
@@ -312,10 +317,19 @@ static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
struct shrink_control *sc)
{
struct kbase_mem_pool *pool;
+ size_t pool_size;
pool = container_of(s, struct kbase_mem_pool, reclaim);
- pool_dbg(pool, "reclaim count: %zu\n", kbase_mem_pool_size(pool));
- return kbase_mem_pool_size(pool);
+
+ kbase_mem_pool_lock(pool);
+ if (pool->dont_reclaim && !pool->dying) {
+ kbase_mem_pool_unlock(pool);
+ return 0;
+ }
+ pool_size = kbase_mem_pool_size(pool);
+ kbase_mem_pool_unlock(pool);
+
+ return pool_size;
}
static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
@@ -326,9 +340,17 @@ static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
pool = container_of(s, struct kbase_mem_pool, reclaim);
+ kbase_mem_pool_lock(pool);
+ if (pool->dont_reclaim && !pool->dying) {
+ kbase_mem_pool_unlock(pool);
+ return 0;
+ }
+
pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
- freed = kbase_mem_pool_shrink(pool, sc->nr_to_scan);
+ freed = kbase_mem_pool_shrink_locked(pool, sc->nr_to_scan);
+
+ kbase_mem_pool_unlock(pool);
pool_dbg(pool, "reclaim freed %ld pages\n", freed);
@@ -357,6 +379,7 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
pool->order = order;
pool->kbdev = kbdev;
pool->next_pool = next_pool;
+ pool->dying = false;
spin_lock_init(&pool->pool_lock);
INIT_LIST_HEAD(&pool->page_list);
@@ -381,12 +404,20 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
return 0;
}
+void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
+{
+ kbase_mem_pool_lock(pool);
+ pool->dying = true;
+ kbase_mem_pool_unlock(pool);
+}
+
void kbase_mem_pool_term(struct kbase_mem_pool *pool)
{
struct kbase_mem_pool *next_pool = pool->next_pool;
- struct page *p;
+ struct page *p, *tmp;
size_t nr_to_spill = 0;
LIST_HEAD(spill_list);
+ LIST_HEAD(free_list);
int i;
pool_dbg(pool, "terminate()\n");
@@ -404,7 +435,6 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
/* Zero pages first without holding the next_pool lock */
for (i = 0; i < nr_to_spill; i++) {
p = kbase_mem_pool_remove_locked(pool);
- kbase_mem_pool_zero_page(pool, p);
list_add(&p->lru, &spill_list);
}
}
@@ -412,18 +442,26 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
while (!kbase_mem_pool_is_empty(pool)) {
/* Free remaining pages to kernel */
p = kbase_mem_pool_remove_locked(pool);
- kbase_mem_pool_free_page(pool, p);
+ list_add(&p->lru, &free_list);
}
kbase_mem_pool_unlock(pool);
if (next_pool && nr_to_spill) {
+ list_for_each_entry(p, &spill_list, lru)
+ kbase_mem_pool_zero_page(pool, p);
+
/* Add new page list to next_pool */
kbase_mem_pool_add_list(next_pool, &spill_list, nr_to_spill);
pool_dbg(pool, "terminate() spilled %zu pages\n", nr_to_spill);
}
+ list_for_each_entry_safe(p, tmp, &free_list, lru) {
+ list_del_init(&p->lru);
+ kbase_mem_pool_free_page(pool, p);
+ }
+
pool_dbg(pool, "terminated\n");
}
@@ -444,6 +482,21 @@ struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
return NULL;
}
+struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
+{
+ struct page *p;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ pool_dbg(pool, "alloc_locked()\n");
+ p = kbase_mem_pool_remove_locked(pool);
+
+ if (p)
+ return p;
+
+ return NULL;
+}
+
void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
bool dirty)
{
@@ -466,6 +519,25 @@ void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
}
}
+void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
+ bool dirty)
+{
+ pool_dbg(pool, "free_locked()\n");
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ if (!kbase_mem_pool_is_full(pool)) {
+ /* Add to our own pool */
+ if (dirty)
+ kbase_mem_pool_sync_page(pool, p);
+
+ kbase_mem_pool_add_locked(pool, p);
+ } else {
+ /* Free page */
+ kbase_mem_pool_free_page(pool, p);
+ }
+}
+
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
struct tagged_addr *pages, bool partial_allowed)
{
@@ -543,7 +615,6 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
done:
pool_dbg(pool, "alloc_pages(%zu) done\n", i);
-
return i;
err_rollback:
@@ -551,6 +622,49 @@ err_rollback:
return err;
}
+int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
+ size_t nr_4k_pages, struct tagged_addr *pages)
+{
+ struct page *p;
+ size_t i;
+ size_t nr_pages_internal;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ nr_pages_internal = nr_4k_pages / (1u << (pool->order));
+
+ if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+ return -EINVAL;
+
+ pool_dbg(pool, "alloc_pages_locked(4k=%zu):\n", nr_4k_pages);
+ pool_dbg(pool, "alloc_pages_locked(internal=%zu):\n",
+ nr_pages_internal);
+
+ if (kbase_mem_pool_size(pool) < nr_pages_internal) {
+ pool_dbg(pool, "Failed alloc\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < nr_pages_internal; i++) {
+ int j;
+
+ p = kbase_mem_pool_remove_locked(pool);
+ if (pool->order) {
+ *pages++ = as_tagged_tag(page_to_phys(p),
+ HUGE_HEAD | HUGE_PAGE);
+ for (j = 1; j < (1u << pool->order); j++) {
+ *pages++ = as_tagged_tag(page_to_phys(p) +
+ PAGE_SIZE * j,
+ HUGE_PAGE);
+ }
+ } else {
+ *pages++ = as_tagged(page_to_phys(p));
+ }
+ }
+
+ return nr_4k_pages;
+}
+
static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
size_t nr_pages, struct tagged_addr *pages,
bool zero, bool sync)
@@ -572,7 +686,7 @@ static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
continue;
if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
- p = phys_to_page(as_phys_addr_t(pages[i]));
+ p = as_page(pages[i]);
if (zero)
kbase_mem_pool_zero_page(pool, p);
else if (sync)
@@ -591,6 +705,48 @@ static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
nr_pages, nr_to_pool);
}
+static void kbase_mem_pool_add_array_locked(struct kbase_mem_pool *pool,
+ size_t nr_pages, struct tagged_addr *pages,
+ bool zero, bool sync)
+{
+ struct page *p;
+ size_t nr_to_pool = 0;
+ LIST_HEAD(new_page_list);
+ size_t i;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ if (!nr_pages)
+ return;
+
+ pool_dbg(pool, "add_array_locked(%zu, zero=%d, sync=%d):\n",
+ nr_pages, zero, sync);
+
+ /* Zero/sync pages first */
+ for (i = 0; i < nr_pages; i++) {
+ if (unlikely(!as_phys_addr_t(pages[i])))
+ continue;
+
+ if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+ p = as_page(pages[i]);
+ if (zero)
+ kbase_mem_pool_zero_page(pool, p);
+ else if (sync)
+ kbase_mem_pool_sync_page(pool, p);
+
+ list_add(&p->lru, &new_page_list);
+ nr_to_pool++;
+ }
+ pages[i] = as_tagged(0);
+ }
+
+ /* Add new page list to pool */
+ kbase_mem_pool_add_list_locked(pool, &new_page_list, nr_to_pool);
+
+ pool_dbg(pool, "add_array_locked(%zu) added %zu pages\n",
+ nr_pages, nr_to_pool);
+}
+
void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
struct tagged_addr *pages, bool dirty, bool reclaimed)
{
@@ -632,7 +788,7 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
continue;
}
- p = phys_to_page(as_phys_addr_t(pages[i]));
+ p = as_page(pages[i]);
kbase_mem_pool_free_page(pool, p);
pages[i] = as_tagged(0);
@@ -640,3 +796,47 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
}
+
+
+void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
+ size_t nr_pages, struct tagged_addr *pages, bool dirty,
+ bool reclaimed)
+{
+ struct page *p;
+ size_t nr_to_pool;
+ LIST_HEAD(to_pool_list);
+ size_t i = 0;
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ pool_dbg(pool, "free_pages_locked(%zu):\n", nr_pages);
+
+ if (!reclaimed) {
+ /* Add to this pool */
+ nr_to_pool = kbase_mem_pool_capacity(pool);
+ nr_to_pool = min(nr_pages, nr_to_pool);
+
+ kbase_mem_pool_add_array_locked(pool, nr_pages, pages, false,
+ dirty);
+
+ i += nr_to_pool;
+ }
+
+ /* Free any remaining pages to kernel */
+ for (; i < nr_pages; i++) {
+ if (unlikely(!as_phys_addr_t(pages[i])))
+ continue;
+
+ if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+ pages[i] = as_tagged(0);
+ continue;
+ }
+
+ p = as_page(pages[i]);
+
+ kbase_mem_pool_free_page(pool, p);
+ pages[i] = as_tagged(0);
+ }
+
+ pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
index cb968f65fc5c4b..43b0f6c0321102 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mem_profile_debugfs_buf_size.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -32,7 +32,8 @@
* The size of the buffer to accumulate the histogram report text in
* @see @ref CCTXP_HIST_BUF_SIZE_MAX_LENGTH_REPORT
*/
-#define KBASE_MEM_PROFILE_MAX_BUF_SIZE ((size_t) (64 + ((80 + (56 * 64)) * 15) + 56))
+#define KBASE_MEM_PROFILE_MAX_BUF_SIZE \
+ ((size_t) (64 + ((80 + (56 * 64)) * 34) + 56))
#endif /*_KBASE_MEM_PROFILE_DEBUGFS_BUF_SIZE_H_*/
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu.c b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
index 65b7da03d0d7a9..e2d29148ff1318 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -72,6 +72,19 @@ static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
u64 vpfn, size_t nr, bool sync);
/**
+ * kbase_mmu_flush_invalidate_no_ctx() - Flush and invalidate the GPU caches.
+ * @kbdev: Device pointer.
+ * @vpfn: The virtual page frame number to start the flush on.
+ * @nr: The number of pages to flush.
+ * @sync: Set if the operation should be synchronous or not.
+ * @as_nr: GPU address space number for which flush + invalidate is required.
+ *
+ * This is used for MMU tables which do not belong to a user space context.
+ */
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+ u64 vpfn, size_t nr, bool sync, int as_nr);
+
+/**
* kbase_mmu_sync_pgd - sync page directory to memory
* @kbdev: Device pointer.
* @handle: Address of DMA region.
@@ -103,6 +116,9 @@ static void kbase_mmu_sync_pgd(struct kbase_device *kbdev,
static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
struct kbase_as *as, const char *reason_str);
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags);
/**
* reg_grow_calc_extra_pages() - Calculate the number of backed pages to add to
@@ -120,7 +136,8 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
*
* Return: the number of backed pages to increase by
*/
-static size_t reg_grow_calc_extra_pages(struct kbase_va_region *reg, size_t fault_rel_pfn)
+static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
+ struct kbase_va_region *reg, size_t fault_rel_pfn)
{
size_t multiple = reg->extent;
size_t reg_current_size = kbase_reg_current_backed_size(reg);
@@ -128,7 +145,7 @@ static size_t reg_grow_calc_extra_pages(struct kbase_va_region *reg, size_t faul
size_t remainder;
if (!multiple) {
- dev_warn(reg->kctx->kbdev->dev,
+ dev_warn(kbdev->dev,
"VA Region 0x%llx extent was 0, allocator needs to set this properly for KBASE_REG_PF_GROW\n",
((unsigned long long)reg->start_pfn) << PAGE_SHIFT);
return minimum_extra;
@@ -172,21 +189,21 @@ static size_t reg_grow_calc_extra_pages(struct kbase_va_region *reg, size_t faul
}
#ifdef CONFIG_MALI_JOB_DUMP
-static void kbase_gpu_mmu_handle_write_faulting_as(struct kbase_context *kctx,
+static void kbase_gpu_mmu_handle_write_faulting_as(
struct kbase_device *kbdev,
struct kbase_as *faulting_as,
u64 start_pfn, size_t nr, u32 op)
{
mutex_lock(&kbdev->mmu_hw_mutex);
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
- kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx, start_pfn,
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, start_pfn,
nr, op, 1);
mutex_unlock(&kbdev->mmu_hw_mutex);
- kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
}
@@ -210,7 +227,7 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
/* Find region and check if it should be writable. */
region = kbase_region_tracker_find_region_enclosing_address(kctx,
faulting_as->fault_addr);
- if (!region || region->flags & KBASE_REG_FREE) {
+ if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Memory is not mapped on the GPU");
@@ -224,23 +241,15 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
return;
}
- /* Capture handle and offset of the faulting write location
+ /* Capture addresses of faulting write location
* for job dumping if write tracking is enabled.
*/
if (kctx->gwt_enabled) {
u64 page_addr = faulting_as->fault_addr & PAGE_MASK;
- u64 offset = (page_addr >> PAGE_SHIFT) - region->start_pfn;
- u64 handle = region->start_pfn << PAGE_SHIFT;
bool found = false;
-
- if (KBASE_MEM_TYPE_IMPORTED_UMM == region->cpu_alloc->type)
- handle |= BIT(0);
-
/* Check if this write was already handled. */
list_for_each_entry(pos, &kctx->gwt_current_list, link) {
- if (handle == pos->handle &&
- offset >= pos->offset &&
- offset < pos->offset + pos->num_pages) {
+ if (page_addr == pos->page_addr) {
found = true;
break;
}
@@ -249,8 +258,8 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
if (!found) {
pos = kmalloc(sizeof(*pos), GFP_KERNEL);
if (pos) {
- pos->handle = handle;
- pos->offset = offset;
+ pos->region = region;
+ pos->page_addr = page_addr;
pos->num_pages = 1;
list_add(&pos->link, &kctx->gwt_current_list);
} else {
@@ -271,7 +280,7 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
else
op = AS_COMMAND_FLUSH_PT;
- kbase_gpu_mmu_handle_write_faulting_as(kctx, kbdev, faulting_as,
+ kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as,
fault_pfn, 1, op);
kbase_gpu_vm_unlock(kctx);
@@ -305,6 +314,203 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
}
#endif
+#define MAX_POOL_LEVEL 2
+
+/**
+ * page_fault_try_alloc - Try to allocate memory from a context pool
+ * @kctx: Context pointer
+ * @region: Region to grow
+ * @new_pages: Number of 4 kB pages to allocate
+ * @pages_to_grow: Pointer to variable to store number of outstanding pages on
+ * failure. This can be either 4 kB or 2 MB pages, depending on
+ * the number of pages requested.
+ * @grow_2mb_pool: Pointer to variable to store which pool needs to grow - true
+ * for 2 MB, false for 4 kB.
+ * @prealloc_sas: Pointer to kbase_sub_alloc structures
+ *
+ * This function will try to allocate as many pages as possible from the context
+ * pool, then if required will try to allocate the remaining pages from the
+ * device pool.
+ *
+ * This function will not allocate any new memory beyond that that is already
+ * present in the context or device pools. This is because it is intended to be
+ * called with the vm_lock held, which could cause recursive locking if the
+ * allocation caused the out-of-memory killer to run.
+ *
+ * If 2 MB pages are enabled and new_pages is >= 2 MB then pages_to_grow will be
+ * a count of 2 MB pages, otherwise it will be a count of 4 kB pages.
+ *
+ * Return: true if successful, false on failure
+ */
+static bool page_fault_try_alloc(struct kbase_context *kctx,
+ struct kbase_va_region *region, size_t new_pages,
+ int *pages_to_grow, bool *grow_2mb_pool,
+ struct kbase_sub_alloc **prealloc_sas)
+{
+ struct tagged_addr *gpu_pages[MAX_POOL_LEVEL] = {NULL};
+ struct tagged_addr *cpu_pages[MAX_POOL_LEVEL] = {NULL};
+ size_t pages_alloced[MAX_POOL_LEVEL] = {0};
+ struct kbase_mem_pool *pool, *root_pool;
+ int pool_level = 0;
+ bool alloc_failed = false;
+ size_t pages_still_required;
+
+#ifdef CONFIG_MALI_2MB_ALLOC
+ if (new_pages >= (SZ_2M / SZ_4K)) {
+ root_pool = &kctx->lp_mem_pool;
+ *grow_2mb_pool = true;
+ } else {
+#endif
+ root_pool = &kctx->mem_pool;
+ *grow_2mb_pool = false;
+#ifdef CONFIG_MALI_2MB_ALLOC
+ }
+#endif
+
+ if (region->gpu_alloc != region->cpu_alloc)
+ new_pages *= 2;
+
+ pages_still_required = new_pages;
+
+ /* Determine how many pages are in the pools before trying to allocate.
+ * Don't attempt to allocate & free if the allocation can't succeed.
+ */
+ for (pool = root_pool; pool != NULL; pool = pool->next_pool) {
+ size_t pool_size_4k;
+
+ kbase_mem_pool_lock(pool);
+
+ pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+ if (pool_size_4k >= pages_still_required)
+ pages_still_required = 0;
+ else
+ pages_still_required -= pool_size_4k;
+
+ kbase_mem_pool_unlock(pool);
+
+ if (!pages_still_required)
+ break;
+ }
+
+ if (pages_still_required) {
+ /* Insufficient pages in pools. Don't try to allocate - just
+ * request a grow.
+ */
+ *pages_to_grow = pages_still_required;
+
+ return false;
+ }
+
+ /* Since we've dropped the pool locks, the amount of memory in the pools
+ * may change between the above check and the actual allocation.
+ */
+ pool = root_pool;
+ for (pool_level = 0; pool_level < MAX_POOL_LEVEL; pool_level++) {
+ size_t pool_size_4k;
+ size_t pages_to_alloc_4k;
+ size_t pages_to_alloc_4k_per_alloc;
+
+ kbase_mem_pool_lock(pool);
+
+ /* Allocate as much as possible from this pool*/
+ pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
+ pages_to_alloc_4k = MIN(new_pages, pool_size_4k);
+ if (region->gpu_alloc == region->cpu_alloc)
+ pages_to_alloc_4k_per_alloc = pages_to_alloc_4k;
+ else
+ pages_to_alloc_4k_per_alloc = pages_to_alloc_4k >> 1;
+
+ pages_alloced[pool_level] = pages_to_alloc_4k;
+ if (pages_to_alloc_4k) {
+ gpu_pages[pool_level] =
+ kbase_alloc_phy_pages_helper_locked(
+ region->gpu_alloc, pool,
+ pages_to_alloc_4k_per_alloc,
+ &prealloc_sas[0]);
+
+ if (!gpu_pages[pool_level]) {
+ alloc_failed = true;
+ } else if (region->gpu_alloc != region->cpu_alloc) {
+ cpu_pages[pool_level] =
+ kbase_alloc_phy_pages_helper_locked(
+ region->cpu_alloc, pool,
+ pages_to_alloc_4k_per_alloc,
+ &prealloc_sas[1]);
+
+ if (!cpu_pages[pool_level])
+ alloc_failed = true;
+ }
+ }
+
+ kbase_mem_pool_unlock(pool);
+
+ if (alloc_failed) {
+ WARN_ON(!new_pages);
+ WARN_ON(pages_to_alloc_4k >= new_pages);
+ WARN_ON(pages_to_alloc_4k_per_alloc >= new_pages);
+ break;
+ }
+
+ new_pages -= pages_to_alloc_4k;
+
+ if (!new_pages)
+ break;
+
+ pool = pool->next_pool;
+ if (!pool)
+ break;
+ }
+
+ if (new_pages) {
+ /* Allocation was unsuccessful */
+ int max_pool_level = pool_level;
+
+ pool = root_pool;
+
+ /* Free memory allocated so far */
+ for (pool_level = 0; pool_level <= max_pool_level;
+ pool_level++) {
+ kbase_mem_pool_lock(pool);
+
+ if (region->gpu_alloc != region->cpu_alloc) {
+ if (pages_alloced[pool_level] &&
+ cpu_pages[pool_level])
+ kbase_free_phy_pages_helper_locked(
+ region->cpu_alloc,
+ pool, cpu_pages[pool_level],
+ pages_alloced[pool_level]);
+ }
+
+ if (pages_alloced[pool_level] && gpu_pages[pool_level])
+ kbase_free_phy_pages_helper_locked(
+ region->gpu_alloc,
+ pool, gpu_pages[pool_level],
+ pages_alloced[pool_level]);
+
+ kbase_mem_pool_unlock(pool);
+
+ pool = pool->next_pool;
+ }
+
+ /*
+ * If the allocation failed despite there being enough memory in
+ * the pool, then just fail. Otherwise, try to grow the memory
+ * pool.
+ */
+ if (alloc_failed)
+ *pages_to_grow = 0;
+ else
+ *pages_to_grow = new_pages;
+
+ return false;
+ }
+
+ /* Allocation was successful. No pages to grow, return success. */
+ *pages_to_grow = 0;
+
+ return true;
+}
+
void page_fault_worker(struct work_struct *data)
{
u64 fault_pfn;
@@ -318,6 +524,10 @@ void page_fault_worker(struct work_struct *data)
struct kbase_va_region *region;
int err;
bool grown = false;
+ int pages_to_grow;
+ bool grow_2mb_pool;
+ struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+ int i;
faulting_as = container_of(data, struct kbase_as, work_pagefault);
fault_pfn = faulting_as->fault_addr >> PAGE_SHIFT;
@@ -336,11 +546,10 @@ void page_fault_worker(struct work_struct *data)
KBASE_DEBUG_ASSERT(kctx->kbdev == kbdev);
- if (unlikely(faulting_as->protected_mode))
- {
+ if (unlikely(faulting_as->protected_mode)) {
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Protected mode fault");
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
goto fault_done;
@@ -403,13 +612,26 @@ void page_fault_worker(struct work_struct *data)
goto fault_done;
}
+#ifdef CONFIG_MALI_2MB_ALLOC
+ /* Preallocate memory for the sub-allocation structs if necessary */
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+ prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]), GFP_KERNEL);
+ if (!prealloc_sas[i]) {
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Failed pre-allocating memory for sub-allocations' metadata");
+ goto fault_done;
+ }
+ }
+#endif /* CONFIG_MALI_2MB_ALLOC */
+
+page_fault_retry:
/* so we have a translation fault, let's see if it is for growable
* memory */
kbase_gpu_vm_lock(kctx);
region = kbase_region_tracker_find_region_enclosing_address(kctx,
faulting_as->fault_addr);
- if (!region || region->flags & KBASE_REG_FREE) {
+ if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Memory is not mapped on the GPU");
@@ -451,7 +673,7 @@ void page_fault_worker(struct work_struct *data)
mutex_lock(&kbdev->mmu_hw_mutex);
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
/* [1] in case another page fault occurred while we were
* handling the (duplicate) page fault we need to ensure we
@@ -461,19 +683,19 @@ void page_fault_worker(struct work_struct *data)
* transaction (which should cause the other page fault to be
* raised again).
*/
- kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
AS_COMMAND_UNLOCK, 1);
mutex_unlock(&kbdev->mmu_hw_mutex);
- kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
goto fault_done;
}
- new_pages = reg_grow_calc_extra_pages(region, fault_rel_pfn);
+ new_pages = reg_grow_calc_extra_pages(kbdev, region, fault_rel_pfn);
/* cap to max vsize */
new_pages = min(new_pages, region->nr_pages - kbase_reg_current_backed_size(region));
@@ -482,34 +704,26 @@ void page_fault_worker(struct work_struct *data)
mutex_lock(&kbdev->mmu_hw_mutex);
/* Duplicate of a fault we've already handled, nothing to do */
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
/* See comment [1] about UNLOCK usage */
- kbase_mmu_hw_do_operation(kbdev, faulting_as, NULL, 0, 0,
+ kbase_mmu_hw_do_operation(kbdev, faulting_as, 0, 0,
AS_COMMAND_UNLOCK, 1);
mutex_unlock(&kbdev->mmu_hw_mutex);
- kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
goto fault_done;
}
- if (kbase_alloc_phy_pages_helper(region->gpu_alloc, new_pages) == 0) {
- if (region->gpu_alloc != region->cpu_alloc) {
- if (kbase_alloc_phy_pages_helper(
- region->cpu_alloc, new_pages) == 0) {
- grown = true;
- } else {
- kbase_free_phy_pages_helper(region->gpu_alloc,
- new_pages);
- }
- } else {
- grown = true;
- }
- }
+ pages_to_grow = 0;
+ spin_lock(&kctx->mem_partials_lock);
+ grown = page_fault_try_alloc(kctx, region, new_pages, &pages_to_grow,
+ &grow_2mb_pool, prealloc_sas);
+ spin_unlock(&kctx->mem_partials_lock);
if (grown) {
u64 pfn_offset;
@@ -528,7 +742,7 @@ void page_fault_worker(struct work_struct *data)
* so the no_flush version of insert_pages is used which allows
* us to unlock the MMU as we see fit.
*/
- err = kbase_mmu_insert_pages_no_flush(kctx,
+ err = kbase_mmu_insert_pages_no_flush(kbdev, &kctx->mmu,
region->start_pfn + pfn_offset,
&kbase_get_gpu_phy_pages(region)[pfn_offset],
new_pages, region->flags);
@@ -565,10 +779,10 @@ void page_fault_worker(struct work_struct *data)
* this stage a new IRQ might not be raised when the GPU finds
* a MMU IRQ is already pending.
*/
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
- kbase_mmu_hw_do_operation(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_do_operation(kbdev, faulting_as,
faulting_as->fault_addr >> PAGE_SHIFT,
new_pages,
op, 1);
@@ -577,7 +791,7 @@ void page_fault_worker(struct work_struct *data)
/* AS transaction end */
/* reenable this in the mask */
- kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_PAGE);
#ifdef CONFIG_MALI_JOB_DUMP
@@ -587,12 +801,13 @@ void page_fault_worker(struct work_struct *data)
pos = kmalloc(sizeof(*pos), GFP_KERNEL);
if (pos) {
- pos->handle = region->start_pfn << PAGE_SHIFT;
- pos->offset = pfn_offset;
+ pos->region = region;
+ pos->page_addr = (region->start_pfn +
+ pfn_offset) <<
+ PAGE_SHIFT;
pos->num_pages = new_pages;
list_add(&pos->link,
&kctx->gwt_current_list);
-
} else {
dev_warn(kbdev->dev, "kmalloc failure");
}
@@ -600,13 +815,43 @@ void page_fault_worker(struct work_struct *data)
#endif
kbase_gpu_vm_unlock(kctx);
} else {
- /* failed to extend, handle as a normal PF */
+ int ret = -ENOMEM;
+
kbase_gpu_vm_unlock(kctx);
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Page allocation failure");
+
+ /* If the memory pool was insufficient then grow it and retry.
+ * Otherwise fail the allocation.
+ */
+ if (pages_to_grow > 0) {
+#ifdef CONFIG_MALI_2MB_ALLOC
+ if (grow_2mb_pool) {
+ /* Round page requirement up to nearest 2 MB */
+ pages_to_grow = (pages_to_grow +
+ ((1 << kctx->lp_mem_pool.order) - 1))
+ >> kctx->lp_mem_pool.order;
+ ret = kbase_mem_pool_grow(&kctx->lp_mem_pool,
+ pages_to_grow);
+ } else {
+#endif
+ ret = kbase_mem_pool_grow(&kctx->mem_pool,
+ pages_to_grow);
+#ifdef CONFIG_MALI_2MB_ALLOC
+ }
+#endif
+ }
+ if (ret < 0) {
+ /* failed to extend, handle as a normal PF */
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as,
+ "Page allocation failure");
+ } else {
+ goto page_fault_retry;
+ }
}
fault_done:
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+ kfree(prealloc_sas[i]);
+
/*
* By this point, the fault was handled in some way,
* so release the ctx refcount
@@ -616,54 +861,57 @@ fault_done:
atomic_dec(&kbdev->faults_pending);
}
-phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx)
+static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut)
{
u64 *page;
int i;
struct page *p;
- int new_page_count __maybe_unused;
-
- KBASE_DEBUG_ASSERT(NULL != kctx);
- new_page_count = kbase_atomic_add_pages(1, &kctx->used_pages);
- kbase_atomic_add_pages(1, &kctx->kbdev->memdev.used_pages);
- p = kbase_mem_pool_alloc(&kctx->mem_pool);
+ p = kbase_mem_pool_alloc(&kbdev->mem_pool);
if (!p)
- goto sub_pages;
-
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kctx->id,
- (u64)new_page_count);
+ return 0;
page = kmap(p);
if (NULL == page)
goto alloc_free;
- kbase_process_page_usage_inc(kctx, 1);
+ /* If the MMU tables belong to a context then account the memory usage
+ * to that context, otherwise the MMU tables are device wide and are
+ * only accounted to the device.
+ */
+ if (mmut->kctx) {
+ int new_page_count;
+
+ new_page_count = kbase_atomic_add_pages(1,
+ &mmut->kctx->used_pages);
+ KBASE_TLSTREAM_AUX_PAGESALLOC(
+ mmut->kctx->id,
+ (u64)new_page_count);
+ kbase_process_page_usage_inc(mmut->kctx, 1);
+ }
+
+ kbase_atomic_add_pages(1, &kbdev->memdev.used_pages);
for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++)
- kctx->kbdev->mmu_mode->entry_invalidate(&page[i]);
+ kbdev->mmu_mode->entry_invalidate(&page[i]);
- kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+ kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
kunmap(p);
return page_to_phys(p);
alloc_free:
- kbase_mem_pool_free(&kctx->mem_pool, p, false);
-sub_pages:
- kbase_atomic_sub_pages(1, &kctx->used_pages);
- kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+ kbase_mem_pool_free(&kbdev->mem_pool, p, false);
return 0;
}
-KBASE_EXPORT_TEST_API(kbase_mmu_alloc_pgd);
-
/* Given PGD PFN for level N, return PGD PFN for level N+1, allocating the
* new table from the pool if needed and possible
*/
-static int mmu_get_next_pgd(struct kbase_context *kctx,
+static int mmu_get_next_pgd(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
phys_addr_t *pgd, u64 vpfn, int level)
{
u64 *page;
@@ -671,9 +919,8 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
struct page *p;
KBASE_DEBUG_ASSERT(*pgd);
- KBASE_DEBUG_ASSERT(NULL != kctx);
- lockdep_assert_held(&kctx->mmu_lock);
+ lockdep_assert_held(&mmut->mmu_lock);
/*
* Architecture spec defines level-0 as being the top-most.
@@ -685,23 +932,24 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
p = pfn_to_page(PFN_DOWN(*pgd));
page = kmap(p);
if (NULL == page) {
- dev_warn(kctx->kbdev->dev, "mmu_get_next_pgd: kmap failure\n");
+ dev_warn(kbdev->dev, "%s: kmap failure\n", __func__);
return -EINVAL;
}
- target_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
+ target_pgd = kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]);
if (!target_pgd) {
- target_pgd = kbase_mmu_alloc_pgd(kctx);
+ target_pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
if (!target_pgd) {
- dev_dbg(kctx->kbdev->dev, "mmu_get_next_pgd: kbase_mmu_alloc_pgd failure\n");
+ dev_dbg(kbdev->dev, "%s: kbase_mmu_alloc_pgd failure\n",
+ __func__);
kunmap(p);
return -ENOMEM;
}
- kctx->kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
+ kbdev->mmu_mode->entry_set_pte(&page[vpfn], target_pgd);
- kbase_mmu_sync_pgd(kctx->kbdev, kbase_dma_addr(p), PAGE_SIZE);
+ kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE);
/* Rely on the caller to update the address space flags. */
}
@@ -714,7 +962,8 @@ static int mmu_get_next_pgd(struct kbase_context *kctx,
/*
* Returns the PGD for the specified level of translation
*/
-static int mmu_get_pgd_at_level(struct kbase_context *kctx,
+static int mmu_get_pgd_at_level(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
u64 vpfn,
unsigned int level,
phys_addr_t *out_pgd)
@@ -722,14 +971,14 @@ static int mmu_get_pgd_at_level(struct kbase_context *kctx,
phys_addr_t pgd;
int l;
- lockdep_assert_held(&kctx->mmu_lock);
- pgd = kctx->pgd;
+ lockdep_assert_held(&mmut->mmu_lock);
+ pgd = mmut->pgd;
for (l = MIDGARD_MMU_TOPLEVEL; l < level; l++) {
- int err = mmu_get_next_pgd(kctx, &pgd, vpfn, l);
+ int err = mmu_get_next_pgd(kbdev, mmut, &pgd, vpfn, l);
/* Handle failure condition */
if (err) {
- dev_dbg(kctx->kbdev->dev,
+ dev_dbg(kbdev->dev,
"%s: mmu_get_next_pgd failure at level %d\n",
__func__, l);
return err;
@@ -741,27 +990,30 @@ static int mmu_get_pgd_at_level(struct kbase_context *kctx,
return 0;
}
-#define mmu_get_bottom_pgd(kctx, vpfn, out_pgd) \
- mmu_get_pgd_at_level((kctx), (vpfn), MIDGARD_MMU_BOTTOMLEVEL, (out_pgd))
-
+static int mmu_get_bottom_pgd(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ u64 vpfn,
+ phys_addr_t *out_pgd)
+{
+ return mmu_get_pgd_at_level(kbdev, mmut, vpfn, MIDGARD_MMU_BOTTOMLEVEL,
+ out_pgd);
+}
-static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx,
- u64 from_vpfn, u64 to_vpfn)
+static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ u64 from_vpfn, u64 to_vpfn)
{
phys_addr_t pgd;
u64 vpfn = from_vpfn;
struct kbase_mmu_mode const *mmu_mode;
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(0 != vpfn);
/* 64-bit address range is the max */
KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn);
- lockdep_assert_held(&kctx->mmu_lock);
- lockdep_assert_held(&kctx->reg_lock);
+ lockdep_assert_held(&mmut->mmu_lock);
- mmu_mode = kctx->kbdev->mmu_mode;
+ mmu_mode = kbdev->mmu_mode;
while (vpfn < to_vpfn) {
unsigned int i;
@@ -776,7 +1028,7 @@ static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx,
count = left;
/* need to check if this is a 2MB page or a 4kB */
- pgd = kctx->pgd;
+ pgd = mmut->pgd;
for (level = MIDGARD_MMU_TOPLEVEL;
level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
@@ -798,7 +1050,7 @@ static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx,
pcount = count;
break;
default:
- dev_warn(kctx->kbdev->dev, "%sNo support for ATEs at level %d\n",
+ dev_warn(kbdev->dev, "%sNo support for ATEs at level %d\n",
__func__, level);
goto next;
}
@@ -807,7 +1059,7 @@ static void mmu_insert_pages_failure_recovery(struct kbase_context *kctx,
for (i = 0; i < pcount; i++)
mmu_mode->entry_invalidate(&page[idx + i]);
- kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_mmu_sync_pgd(kbdev,
kbase_dma_addr(phys_to_page(pgd)) + 8 * idx,
8 * pcount);
kunmap(phys_to_page(pgd));
@@ -836,7 +1088,6 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
struct kbase_mmu_mode const *mmu_mode;
KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(0 != vpfn);
/* 64-bit address range is the max */
KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
@@ -846,7 +1097,7 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
if (nr == 0)
return 0;
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&kctx->mmu.mmu_lock);
while (remain) {
unsigned int i;
@@ -865,27 +1116,27 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
* 256 pages at once (on average). Do we really care?
*/
do {
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ err = mmu_get_bottom_pgd(kctx->kbdev, &kctx->mmu,
+ vpfn, &pgd);
if (err != -ENOMEM)
break;
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
+ mutex_unlock(&kctx->mmu.mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
dev_warn(kctx->kbdev->dev, "kbase_mmu_insert_pages: mmu_get_bottom_pgd failure\n");
if (recover_required) {
/* Invalidate the pages we have partially
* completed */
- mmu_insert_pages_failure_recovery(kctx,
- recover_vpfn,
- recover_vpfn +
- recover_count
- );
+ mmu_insert_pages_failure_recovery(kctx->kbdev,
+ &kctx->mmu,
+ recover_vpfn,
+ recover_vpfn + recover_count);
}
goto fail_unlock;
}
@@ -897,11 +1148,10 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
if (recover_required) {
/* Invalidate the pages we have partially
* completed */
- mmu_insert_pages_failure_recovery(kctx,
- recover_vpfn,
- recover_vpfn +
- recover_count
- );
+ mmu_insert_pages_failure_recovery(kctx->kbdev,
+ &kctx->mmu,
+ recover_vpfn,
+ recover_vpfn + recover_count);
}
err = -ENOMEM;
goto fail_unlock;
@@ -932,30 +1182,38 @@ int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
recover_required = true;
recover_count += count;
}
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
return 0;
fail_unlock:
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
return err;
}
-static inline void cleanup_empty_pte(struct kbase_context *kctx, u64 *pte)
+static inline void cleanup_empty_pte(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 *pte)
{
phys_addr_t tmp_pgd;
struct page *tmp_p;
- tmp_pgd = kctx->kbdev->mmu_mode->pte_to_phy_addr(*pte);
+ tmp_pgd = kbdev->mmu_mode->pte_to_phy_addr(*pte);
tmp_p = phys_to_page(tmp_pgd);
- kbase_mem_pool_free(&kctx->mem_pool, tmp_p, false);
- kbase_process_page_usage_dec(kctx, 1);
- kbase_atomic_sub_pages(1, &kctx->used_pages);
- kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+ kbase_mem_pool_free(&kbdev->mem_pool, tmp_p, false);
+
+ /* If the MMU tables belong to a context then we accounted the memory
+ * usage to that context, so decrement here.
+ */
+ if (mmut->kctx) {
+ kbase_process_page_usage_dec(mmut->kctx, 1);
+ kbase_atomic_sub_pages(1, &mmut->kctx->used_pages);
+ }
+ kbase_atomic_sub_pages(1, &kbdev->memdev.used_pages);
}
-int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
+int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
const u64 start_vpfn,
struct tagged_addr *phys, size_t nr,
unsigned long flags)
@@ -967,18 +1225,17 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
int err;
struct kbase_mmu_mode const *mmu_mode;
- KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(start_vpfn);
+ /* Note that 0 is a valid start_vpfn */
/* 64-bit address range is the max */
KBASE_DEBUG_ASSERT(start_vpfn <= (U64_MAX / PAGE_SIZE));
- mmu_mode = kctx->kbdev->mmu_mode;
+ mmu_mode = kbdev->mmu_mode;
/* Early out if there is nothing to do */
if (nr == 0)
return 0;
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&mmut->mmu_lock);
while (remain) {
unsigned int i;
@@ -1003,28 +1260,27 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
* 256 pages at once (on average). Do we really care?
*/
do {
- err = mmu_get_pgd_at_level(kctx, insert_vpfn, cur_level,
- &pgd);
+ err = mmu_get_pgd_at_level(kbdev, mmut, insert_vpfn,
+ cur_level, &pgd);
if (err != -ENOMEM)
break;
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
+ mutex_unlock(&mmut->mmu_lock);
+ err = kbase_mem_pool_grow(&kbdev->mem_pool,
cur_level);
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&mmut->mmu_lock);
} while (!err);
if (err) {
- dev_warn(kctx->kbdev->dev,
+ dev_warn(kbdev->dev,
"%s: mmu_get_bottom_pgd failure\n", __func__);
if (insert_vpfn != start_vpfn) {
/* Invalidate the pages we have partially
* completed */
- mmu_insert_pages_failure_recovery(kctx,
- start_vpfn,
- insert_vpfn);
+ mmu_insert_pages_failure_recovery(kbdev,
+ mmut, start_vpfn, insert_vpfn);
}
goto fail_unlock;
}
@@ -1032,14 +1288,13 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
p = pfn_to_page(PFN_DOWN(pgd));
pgd_page = kmap(p);
if (!pgd_page) {
- dev_warn(kctx->kbdev->dev, "%s: kmap failure\n",
+ dev_warn(kbdev->dev, "%s: kmap failure\n",
__func__);
if (insert_vpfn != start_vpfn) {
/* Invalidate the pages we have partially
* completed */
- mmu_insert_pages_failure_recovery(kctx,
- start_vpfn,
- insert_vpfn);
+ mmu_insert_pages_failure_recovery(kbdev,
+ mmut, start_vpfn, insert_vpfn);
}
err = -ENOMEM;
goto fail_unlock;
@@ -1050,7 +1305,7 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
u64 *target = &pgd_page[level_index];
if (mmu_mode->pte_is_valid(*target, cur_level))
- cleanup_empty_pte(kctx, target);
+ cleanup_empty_pte(kbdev, mmut, target);
mmu_mode->entry_set_ate(target, *phys, flags,
cur_level);
} else {
@@ -1058,18 +1313,16 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
unsigned int ofs = vindex + i;
u64 *target = &pgd_page[ofs];
- /* Fail if the current page is a valid ATE entry
- * unless gwt_was_enabled as in that case all
- * pages will be valid from when
- * kbase_gpu_gwt_start() cleared the gpu
- * write flag.
+ /* Warn if the current page is a valid ATE
+ * entry. The page table shouldn't have anything
+ * in the place where we are trying to put a
+ * new entry. Modification to page table entries
+ * should be performed with
+ * kbase_mmu_update_pages()
*/
-#ifdef CONFIG_MALI_JOB_DUMP
- if (!kctx->gwt_was_enabled)
-#endif
- KBASE_DEBUG_ASSERT
- (0 == (*target & 1UL));
- kctx->kbdev->mmu_mode->entry_set_ate(target,
+ WARN_ON((*target & 1UL) != 0);
+
+ kbdev->mmu_mode->entry_set_ate(target,
phys[i], flags, cur_level);
}
}
@@ -1078,32 +1331,39 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_context *kctx,
insert_vpfn += count;
remain -= count;
- kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_mmu_sync_pgd(kbdev,
kbase_dma_addr(p) + (vindex * sizeof(u64)),
count * sizeof(u64));
kunmap(p);
}
- mutex_unlock(&kctx->mmu_lock);
- return 0;
+ err = 0;
fail_unlock:
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&mmut->mmu_lock);
return err;
}
/*
- * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn'
+ * Map 'nr' pages pointed to by 'phys' at GPU PFN 'vpfn' for GPU address space
+ * number 'as_nr'.
*/
-int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags)
+int kbase_mmu_insert_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn,
+ struct tagged_addr *phys, size_t nr,
+ unsigned long flags, int as_nr)
{
int err;
- err = kbase_mmu_insert_pages_no_flush(kctx, vpfn, phys, nr, flags);
- kbase_mmu_flush_invalidate(kctx, vpfn, nr, false);
+ err = kbase_mmu_insert_pages_no_flush(kbdev, mmut, vpfn,
+ phys, nr, flags);
+
+ if (mmut->kctx)
+ kbase_mmu_flush_invalidate(mmut->kctx, vpfn, nr, false);
+ else
+ kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, nr, false, as_nr);
+
return err;
}
@@ -1138,7 +1398,7 @@ static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
err = kbase_mmu_hw_do_operation(kbdev,
&kbdev->as[kctx->as_nr],
- kctx, vpfn, nr, op, 0);
+ vpfn, nr, op, 0);
#if KBASE_GPU_RESET_EN
if (err) {
/* Flush failed to complete, assume the
@@ -1163,14 +1423,83 @@ static void kbase_mmu_flush_invalidate_noretain(struct kbase_context *kctx,
#endif /* !CONFIG_MALI_NO_MALI */
}
+/* Perform a flush/invalidate on a particular address space
+ */
+static void kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev,
+ struct kbase_as *as,
+ u64 vpfn, size_t nr, bool sync, bool drain_pending)
+{
+ int err;
+ u32 op;
+
+ if (kbase_pm_context_active_handle_suspend(kbdev,
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ /* GPU is off so there's no need to perform flush/invalidate */
+ return;
+ }
+
+ /* AS transaction begin */
+ mutex_lock(&kbdev->mmu_hw_mutex);
+
+ if (sync)
+ op = AS_COMMAND_FLUSH_MEM;
+ else
+ op = AS_COMMAND_FLUSH_PT;
+
+ err = kbase_mmu_hw_do_operation(kbdev,
+ as, vpfn, nr, op, 0);
+
+#if KBASE_GPU_RESET_EN
+ if (err) {
+ /* Flush failed to complete, assume the GPU has hung and
+ * perform a reset to recover
+ */
+ dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
+
+ if (kbase_prepare_to_reset_gpu(kbdev))
+ kbase_reset_gpu(kbdev);
+ }
+#endif /* KBASE_GPU_RESET_EN */
+
+ mutex_unlock(&kbdev->mmu_hw_mutex);
+ /* AS transaction end */
+
+#ifndef CONFIG_MALI_NO_MALI
+ /*
+ * The transaction lock must be dropped before here
+ * as kbase_wait_write_flush could take it if
+ * the GPU was powered down (static analysis doesn't
+ * know this can't happen).
+ */
+ drain_pending |= (!err) && sync &&
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_6367);
+ if (drain_pending) {
+ /* Wait for GPU to flush write buffer */
+ kbase_wait_write_flush(kbdev);
+ }
+#endif /* !CONFIG_MALI_NO_MALI */
+
+ kbase_pm_context_idle(kbdev);
+}
+
+static void kbase_mmu_flush_invalidate_no_ctx(struct kbase_device *kbdev,
+ u64 vpfn, size_t nr, bool sync, int as_nr)
+{
+ /* Skip if there is nothing to do */
+ if (nr) {
+ kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[as_nr], vpfn,
+ nr, sync, false);
+ }
+}
+
static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
u64 vpfn, size_t nr, bool sync)
{
struct kbase_device *kbdev;
bool ctx_is_in_runpool;
-#ifndef CONFIG_MALI_NO_MALI
bool drain_pending = false;
+#ifndef CONFIG_MALI_NO_MALI
if (atomic_xchg(&kctx->drain_pending, 0))
drain_pending = true;
#endif /* !CONFIG_MALI_NO_MALI */
@@ -1187,71 +1516,22 @@ static void kbase_mmu_flush_invalidate(struct kbase_context *kctx,
if (ctx_is_in_runpool) {
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- if (!kbase_pm_context_active_handle_suspend(kbdev,
- KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
- int err;
- u32 op;
-
- /* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
-
- if (sync)
- op = AS_COMMAND_FLUSH_MEM;
- else
- op = AS_COMMAND_FLUSH_PT;
-
- err = kbase_mmu_hw_do_operation(kbdev,
- &kbdev->as[kctx->as_nr],
- kctx, vpfn, nr, op, 0);
-
-#if KBASE_GPU_RESET_EN
- if (err) {
- /* Flush failed to complete, assume the
- * GPU has hung and perform a reset to
- * recover */
- dev_err(kbdev->dev, "Flush for GPU page table update did not complete. Issueing GPU soft-reset to recover\n");
-
- if (kbase_prepare_to_reset_gpu(kbdev))
- kbase_reset_gpu(kbdev);
- }
-#endif /* KBASE_GPU_RESET_EN */
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
- /* AS transaction end */
+ kbase_mmu_flush_invalidate_as(kbdev, &kbdev->as[kctx->as_nr],
+ vpfn, nr, sync, drain_pending);
-#ifndef CONFIG_MALI_NO_MALI
- /*
- * The transaction lock must be dropped before here
- * as kbase_wait_write_flush could take it if
- * the GPU was powered down (static analysis doesn't
- * know this can't happen).
- */
- drain_pending |= (!err) && sync &&
- kbase_hw_has_issue(kctx->kbdev,
- BASE_HW_ISSUE_6367);
- if (drain_pending) {
- /* Wait for GPU to flush write buffer */
- kbase_wait_write_flush(kctx);
- }
-#endif /* !CONFIG_MALI_NO_MALI */
-
- kbase_pm_context_idle(kbdev);
- }
kbasep_js_runpool_release_ctx(kbdev, kctx);
}
}
-void kbase_mmu_update(struct kbase_context *kctx)
+void kbase_mmu_update(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ int as_nr)
{
- lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
- /* ASSERT that the context has a valid as_nr, which is only the case
- * when it's scheduled in.
- *
- * as_nr won't change because the caller has the hwaccess_lock */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+ lockdep_assert_held(&kbdev->mmu_hw_mutex);
+ KBASE_DEBUG_ASSERT(as_nr != KBASEP_AS_NR_INVALID);
- kctx->kbdev->mmu_mode->update(kctx);
+ kbdev->mmu_mode->update(kbdev, mmut, as_nr);
}
KBASE_EXPORT_TEST_API(kbase_mmu_update);
@@ -1298,24 +1578,22 @@ KBASE_EXPORT_TEST_API(kbase_mmu_disable);
* already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
* information.
*/
-int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
+int kbase_mmu_teardown_pages(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, u64 vpfn, size_t nr, int as_nr)
{
phys_addr_t pgd;
size_t requested_nr = nr;
struct kbase_mmu_mode const *mmu_mode;
int err = -EFAULT;
- KBASE_DEBUG_ASSERT(NULL != kctx);
- beenthere(kctx, "kctx %p vpfn %lx nr %zd", (void *)kctx, (unsigned long)vpfn, nr);
-
if (0 == nr) {
/* early out if nothing to do */
return 0;
}
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&mmut->mmu_lock);
- mmu_mode = kctx->kbdev->mmu_mode;
+ mmu_mode = kbdev->mmu_mode;
while (nr) {
unsigned int i;
@@ -1329,7 +1607,7 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
count = nr;
/* need to check if this is a 2MB or a 4kB page */
- pgd = kctx->pgd;
+ pgd = mmut->pgd;
for (level = MIDGARD_MMU_TOPLEVEL;
level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
@@ -1367,7 +1645,7 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
switch (level) {
case MIDGARD_MMU_LEVEL(0):
case MIDGARD_MMU_LEVEL(1):
- dev_warn(kctx->kbdev->dev,
+ dev_warn(kbdev->dev,
"%s: No support for ATEs at level %d\n",
__func__, level);
kunmap(phys_to_page(pgd));
@@ -1377,7 +1655,7 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
if (count >= 512) {
pcount = 1;
} else {
- dev_warn(kctx->kbdev->dev,
+ dev_warn(kbdev->dev,
"%s: limiting teardown as it tries to do a partial 2MB teardown, need 512, but have %d to tear down\n",
__func__, count);
pcount = 0;
@@ -1388,7 +1666,7 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
pcount = count;
break;
default:
- dev_err(kctx->kbdev->dev,
+ dev_err(kbdev->dev,
"%s: found non-mapped memory, early out\n",
__func__);
vpfn += count;
@@ -1400,7 +1678,7 @@ int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr)
for (i = 0; i < pcount; i++)
mmu_mode->entry_invalidate(&page[index + i]);
- kbase_mmu_sync_pgd(kctx->kbdev,
+ kbase_mmu_sync_pgd(kbdev,
kbase_dma_addr(phys_to_page(pgd)) +
8 * index, 8*pcount);
@@ -1411,26 +1689,35 @@ next:
}
err = 0;
out:
- mutex_unlock(&kctx->mmu_lock);
- kbase_mmu_flush_invalidate(kctx, vpfn, requested_nr, true);
+ mutex_unlock(&mmut->mmu_lock);
+
+ if (mmut->kctx)
+ kbase_mmu_flush_invalidate(mmut->kctx, vpfn, requested_nr, true);
+ else
+ kbase_mmu_flush_invalidate_no_ctx(kbdev, vpfn, requested_nr, true, as_nr);
+
return err;
}
KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages);
/**
- * Update the entries for specified number of pages pointed to by 'phys' at GPU PFN 'vpfn'.
- * This call is being triggered as a response to the changes of the mem attributes
+ * kbase_mmu_update_pages_no_flush() - Update page table entries on the GPU
*
- * @pre : The caller is responsible for validating the memory attributes
+ * This will update page table entries that already exist on the GPU based on
+ * the new flags that are passed. It is used as a response to the changes of
+ * the memory attributes
*
- * IMPORTANT: This uses kbasep_js_runpool_release_ctx() when the context is
- * currently scheduled into the runpool, and so potentially uses a lot of locks.
- * These locks must be taken in the correct order with respect to others
- * already held by the caller. Refer to kbasep_js_runpool_release_ctx() for more
- * information.
+ * The caller is responsible for validating the memory attributes
+ *
+ * @kctx: Kbase context
+ * @vpfn: Virtual PFN (Page Frame Number) of the first page to update
+ * @phys: Tagged physical addresses of the physical pages to replace the
+ * current mappings
+ * @nr: Number of pages to update
+ * @flags: Flags
*/
-int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
+static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
struct tagged_addr *phys, size_t nr,
unsigned long flags)
{
@@ -1440,14 +1727,13 @@ int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
int err;
KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(0 != vpfn);
KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
/* Early out if there is nothing to do */
if (nr == 0)
return 0;
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&kctx->mmu.mmu_lock);
mmu_mode = kctx->kbdev->mmu_mode;
@@ -1461,16 +1747,17 @@ int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
count = nr;
do {
- err = mmu_get_bottom_pgd(kctx, vpfn, &pgd);
+ err = mmu_get_bottom_pgd(kctx->kbdev, &kctx->mmu,
+ vpfn, &pgd);
if (err != -ENOMEM)
break;
/* Fill the memory pool with enough pages for
* the page walk to succeed
*/
- mutex_unlock(&kctx->mmu_lock);
- err = kbase_mem_pool_grow(&kctx->mem_pool,
+ mutex_unlock(&kctx->mmu.mmu_lock);
+ err = kbase_mem_pool_grow(&kctx->kbdev->mem_pool,
MIDGARD_MMU_BOTTOMLEVEL);
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&kctx->mmu.mmu_lock);
} while (!err);
if (err) {
dev_warn(kctx->kbdev->dev,
@@ -1501,11 +1788,11 @@ int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn,
kunmap(pfn_to_page(PFN_DOWN(pgd)));
}
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
return 0;
fail_unlock:
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
return err;
}
@@ -1520,8 +1807,9 @@ int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
return err;
}
-static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd,
- int level, u64 *pgd_page_buffer)
+static void mmu_teardown_level(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut, phys_addr_t pgd,
+ int level, u64 *pgd_page_buffer)
{
phys_addr_t target_pgd;
struct page *p;
@@ -1529,9 +1817,7 @@ static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd,
int i;
struct kbase_mmu_mode const *mmu_mode;
- KBASE_DEBUG_ASSERT(NULL != kctx);
- lockdep_assert_held(&kctx->mmu_lock);
- lockdep_assert_held(&kctx->reg_lock);
+ lockdep_assert_held(&mmut->mmu_lock);
pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
/* kmap_atomic should NEVER fail. */
@@ -1542,14 +1828,14 @@ static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd,
kunmap_atomic(pgd_page);
pgd_page = pgd_page_buffer;
- mmu_mode = kctx->kbdev->mmu_mode;
+ mmu_mode = kbdev->mmu_mode;
for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) {
target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]);
if (target_pgd) {
if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
- mmu_teardown_level(kctx,
+ mmu_teardown_level(kbdev, mmut,
target_pgd,
level + 1,
pgd_page_buffer +
@@ -1559,56 +1845,69 @@ static void mmu_teardown_level(struct kbase_context *kctx, phys_addr_t pgd,
}
p = pfn_to_page(PFN_DOWN(pgd));
- kbase_mem_pool_free(&kctx->mem_pool, p, true);
- kbase_process_page_usage_dec(kctx, 1);
- kbase_atomic_sub_pages(1, &kctx->used_pages);
- kbase_atomic_sub_pages(1, &kctx->kbdev->memdev.used_pages);
+ kbase_mem_pool_free(&kbdev->mem_pool, p, true);
+ kbase_atomic_sub_pages(1, &kbdev->memdev.used_pages);
+
+ /* If MMU tables belong to a context then pages will have been accounted
+ * against it, so we must decrement the usage counts here.
+ */
+ if (mmut->kctx) {
+ kbase_process_page_usage_dec(mmut->kctx, 1);
+ kbase_atomic_sub_pages(1, &mmut->kctx->used_pages);
+ }
}
-int kbase_mmu_init(struct kbase_context *kctx)
+int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+ struct kbase_context *kctx)
{
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL == kctx->mmu_teardown_pages);
-
- mutex_init(&kctx->mmu_lock);
+ mutex_init(&mmut->mmu_lock);
+ mmut->kctx = kctx;
/* Preallocate MMU depth of four pages for mmu_teardown_level to use */
- kctx->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
+ mmut->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
- if (NULL == kctx->mmu_teardown_pages)
+ if (mmut->mmu_teardown_pages == NULL)
return -ENOMEM;
- return 0;
-}
+ mmut->pgd = 0;
+ /* We allocate pages into the kbdev memory pool, then
+ * kbase_mmu_alloc_pgd will allocate out of that pool. This is done to
+ * avoid allocations from the kernel happening with the lock held.
+ */
+ while (!mmut->pgd) {
+ int err;
-void kbase_mmu_term(struct kbase_context *kctx)
-{
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
+ err = kbase_mem_pool_grow(&kbdev->mem_pool,
+ MIDGARD_MMU_BOTTOMLEVEL);
+ if (err) {
+ kbase_mmu_term(kbdev, mmut);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&mmut->mmu_lock);
+ mmut->pgd = kbase_mmu_alloc_pgd(kbdev, mmut);
+ mutex_unlock(&mmut->mmu_lock);
+ }
- kfree(kctx->mmu_teardown_pages);
- kctx->mmu_teardown_pages = NULL;
+ return 0;
}
-void kbase_mmu_free_pgd(struct kbase_context *kctx)
+void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
{
- int new_page_count = 0;
-
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != kctx->mmu_teardown_pages);
-
- mutex_lock(&kctx->mmu_lock);
- mmu_teardown_level(kctx, kctx->pgd, MIDGARD_MMU_TOPLEVEL,
- kctx->mmu_teardown_pages);
- mutex_unlock(&kctx->mmu_lock);
+ if (mmut->pgd) {
+ mutex_lock(&mmut->mmu_lock);
+ mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL,
+ mmut->mmu_teardown_pages);
+ mutex_unlock(&mmut->mmu_lock);
+
+ if (mmut->kctx)
+ KBASE_TLSTREAM_AUX_PAGESALLOC(mmut->kctx->id, 0);
+ }
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kctx->id,
- (u64)new_page_count);
+ kfree(mmut->mmu_teardown_pages);
+ mutex_destroy(&mmut->mmu_lock);
}
-KBASE_EXPORT_TEST_API(kbase_mmu_free_pgd);
-
static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left)
{
phys_addr_t target_pgd;
@@ -1619,7 +1918,7 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
struct kbase_mmu_mode const *mmu_mode;
KBASE_DEBUG_ASSERT(NULL != kctx);
- lockdep_assert_held(&kctx->mmu_lock);
+ lockdep_assert_held(&kctx->mmu.mmu_lock);
mmu_mode = kctx->kbdev->mmu_mode;
@@ -1684,7 +1983,7 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
KBASE_DEBUG_ASSERT(0 != size_left);
kaddr = vmalloc_user(size_left);
- mutex_lock(&kctx->mmu_lock);
+ mutex_lock(&kctx->mmu.mmu_lock);
if (kaddr) {
u64 end_marker = 0xFFULL;
@@ -1699,7 +1998,8 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
if (kctx->api_version >= KBASE_API_VERSION(8, 4)) {
struct kbase_mmu_setup as_setup;
- kctx->kbdev->mmu_mode->get_as_setup(kctx, &as_setup);
+ kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
+ &as_setup);
config[0] = as_setup.transtab;
config[1] = as_setup.memattr;
config[2] = as_setup.transcfg;
@@ -1710,7 +2010,7 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
}
dump_size = kbasep_mmu_dump_level(kctx,
- kctx->pgd,
+ kctx->mmu.pgd,
MIDGARD_MMU_TOPLEVEL,
&mmu_dump_buffer,
&size_left);
@@ -1732,12 +2032,12 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
memcpy(mmu_dump_buffer, &end_marker, sizeof(u64));
}
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
return kaddr;
fail_free:
vfree(kaddr);
- mutex_unlock(&kctx->mmu_lock);
+ mutex_unlock(&kctx->mmu.mmu_lock);
return NULL;
}
KBASE_EXPORT_TEST_API(kbase_mmu_dump);
@@ -1767,11 +2067,10 @@ void bus_fault_worker(struct work_struct *data)
return;
}
- if (unlikely(faulting_as->protected_mode))
- {
+ if (unlikely(faulting_as->protected_mode)) {
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
"Permission failure");
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
kbasep_js_runpool_release_ctx(kbdev, kctx);
atomic_dec(&kbdev->faults_pending);
@@ -1805,9 +2104,9 @@ void bus_fault_worker(struct work_struct *data)
mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
- kbase_mmu_hw_clear_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, faulting_as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
kbase_pm_context_idle(kbdev);
@@ -2102,9 +2401,9 @@ static void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
/* Clear down the fault */
- kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
#if KBASE_GPU_RESET_EN
@@ -2117,7 +2416,6 @@ void kbasep_as_do_poke(struct work_struct *work)
{
struct kbase_as *as;
struct kbase_device *kbdev;
- struct kbase_context *kctx;
unsigned long flags;
KBASE_DEBUG_ASSERT(work);
@@ -2133,12 +2431,11 @@ void kbasep_as_do_poke(struct work_struct *work)
* the AS will not be released as before the atom is released this workqueue
* is flushed (in kbase_as_poking_timer_release_atom)
*/
- kctx = kbasep_js_runpool_lookup_ctx_noretain(kbdev, as->number);
/* AS transaction begin */
mutex_lock(&kbdev->mmu_hw_mutex);
/* Force a uTLB invalidate */
- kbase_mmu_hw_do_operation(kbdev, as, kctx, 0, 0,
+ kbase_mmu_hw_do_operation(kbdev, as, 0, 0,
AS_COMMAND_UNLOCK, 0);
mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
@@ -2286,14 +2583,14 @@ void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_contex
WARN_ON(as->current_setup.transtab);
if (kbase_as_has_bus_fault(as)) {
- kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
} else if (kbase_as_has_page_fault(as)) {
- kbase_mmu_hw_clear_fault(kbdev, as, kctx,
+ kbase_mmu_hw_clear_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as, kctx,
+ kbase_mmu_hw_enable_fault(kbdev, as,
KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
index 92aa55dc2b35e9..70d5f2becc718a 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_hw.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -70,10 +70,9 @@ enum kbase_mmu_fault_type {
*
* @param[in] kbdev kbase device to configure.
* @param[in] as address space to configure.
- * @param[in] kctx kbase context to configure.
*/
void kbase_mmu_hw_configure(struct kbase_device *kbdev,
- struct kbase_as *as, struct kbase_context *kctx);
+ struct kbase_as *as);
/** @brief Issue an operation to the MMU.
*
@@ -82,7 +81,6 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev,
*
* @param[in] kbdev kbase device to issue the MMU operation on.
* @param[in] as address space to issue the MMU operation on.
- * @param[in] kctx kbase context to issue the MMU operation on.
* @param[in] vpfn MMU Virtual Page Frame Number to start the
* operation on.
* @param[in] nr Number of pages to work on.
@@ -93,7 +91,7 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev,
* @return Zero if the operation was successful, non-zero otherwise.
*/
int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, u64 vpfn, u32 nr, u32 type,
+ u64 vpfn, u32 nr, u32 type,
unsigned int handling_irq);
/** @brief Clear a fault that has been previously reported by the MMU.
@@ -102,11 +100,10 @@ int kbase_mmu_hw_do_operation(struct kbase_device *kbdev, struct kbase_as *as,
*
* @param[in] kbdev kbase device to clear the fault from.
* @param[in] as address space to clear the fault from.
- * @param[in] kctx kbase context to clear the fault from or NULL.
* @param[in] type The type of fault that needs to be cleared.
*/
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+ enum kbase_mmu_fault_type type);
/** @brief Enable fault that has been previously reported by the MMU.
*
@@ -116,11 +113,10 @@ void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
*
* @param[in] kbdev kbase device to again enable the fault from.
* @param[in] as address space to again enable the fault from.
- * @param[in] kctx kbase context to again enable the fault from.
* @param[in] type The type of fault that needs to be enabled again.
*/
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
- struct kbase_context *kctx, enum kbase_mmu_fault_type type);
+ enum kbase_mmu_fault_type type);
/** @} *//* end group mali_kbase_mmu_hw */
/** @} *//* end group base_kbase_api */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
index 4bb2628c92512e..38ca456477cc13 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_aarch64.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2014, 2016-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2014, 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,7 +21,6 @@
*/
-
#include "mali_kbase.h"
#include "mali_midg_regmap.h"
#include "mali_kbase_defs.h"
@@ -48,32 +47,28 @@
*/
static inline void page_table_entry_set(u64 *pte, u64 phy)
{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+ WRITE_ONCE(*pte, phy);
+#else
#ifdef CONFIG_64BIT
+ barrier();
*pte = phy;
+ barrier();
#elif defined(CONFIG_ARM)
- /*
- * In order to prevent the compiler keeping cached copies of
- * memory, we have to explicitly say that we have updated memory.
- *
- * Note: We could manually move the data ourselves into R0 and
- * R1 by specifying register variables that are explicitly
- * given registers assignments, the down side of this is that
- * we have to assume cpu endianness. To avoid this we can use
- * the ldrd to read the data from memory into R0 and R1 which
- * will respect the cpu endianness, we then use strd to make
- * the 64 bit assignment to the page table entry.
- */
- asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
- "strd r0, r1, [%[pte]]\n\t"
- : "=m" (*pte)
- : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
- : "r0", "r1");
+ barrier();
+ asm volatile("ldrd r0, [%1]\n\t"
+ "strd r0, %0\n\t"
+ : "=m" (*pte)
+ : "r" (&phy)
+ : "r0", "r1");
+ barrier();
#else
#error "64-bit atomic write must be implemented for your architecture"
#endif
+#endif
}
-static void mmu_get_as_setup(struct kbase_context *kctx,
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
struct kbase_mmu_setup * const setup)
{
/* Set up the required caching policies at the correct indices
@@ -89,22 +84,30 @@ static void mmu_get_as_setup(struct kbase_context *kctx,
(AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
(AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
(AS_MEMATTR_AARCH64_OUTER_WA <<
- (AS_MEMATTR_INDEX_OUTER_WA * 8));
+ (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
+ (AS_MEMATTR_AARCH64_NON_CACHEABLE <<
+ (AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
- setup->transtab = (u64)kctx->pgd & AS_TRANSTAB_BASE_MASK;
+ setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
}
-static void mmu_update(struct kbase_context *kctx)
+static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
+ int as_nr)
{
- struct kbase_device * const kbdev = kctx->kbdev;
- struct kbase_as * const as = &kbdev->as[kctx->as_nr];
- struct kbase_mmu_setup * const current_setup = &as->current_setup;
+ struct kbase_as *as;
+ struct kbase_mmu_setup *current_setup;
+
+ if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+ return;
+
+ as = &kbdev->as[as_nr];
+ current_setup = &as->current_setup;
- mmu_get_as_setup(kctx, current_setup);
+ mmu_get_as_setup(mmut, current_setup);
/* Apply the address space setting */
- kbase_mmu_hw_configure(kbdev, as, kctx);
+ kbase_mmu_hw_configure(kbdev, as);
}
static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
@@ -116,7 +119,7 @@ static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
/* Apply the address space setting */
- kbase_mmu_hw_configure(kbdev, as, NULL);
+ kbase_mmu_hw_configure(kbdev, as);
}
static phys_addr_t pte_to_phy_addr(u64 entry)
@@ -210,7 +213,8 @@ static struct kbase_mmu_mode const aarch64_mode = {
.pte_is_valid = pte_is_valid,
.entry_set_ate = entry_set_ate,
.entry_set_pte = entry_set_pte,
- .entry_invalidate = entry_invalidate
+ .entry_invalidate = entry_invalidate,
+ .flags = KBASE_MMU_MODE_HAS_NON_CACHEABLE
};
struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void)
diff --git a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
index bc8da6348772ce..f6bdf91dc22585 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_mmu_mode_lpae.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,7 +21,6 @@
*/
-
#include "mali_kbase.h"
#include "mali_midg_regmap.h"
#include "mali_kbase_defs.h"
@@ -46,33 +45,28 @@
*/
static inline void page_table_entry_set(u64 *pte, u64 phy)
{
+#if KERNEL_VERSION(3, 18, 13) <= LINUX_VERSION_CODE
+ WRITE_ONCE(*pte, phy);
+#else
#ifdef CONFIG_64BIT
+ barrier();
*pte = phy;
+ barrier();
#elif defined(CONFIG_ARM)
- /*
- * In order to prevent the compiler keeping cached copies of
- * memory, we have to explicitly say that we have updated
- * memory.
- *
- * Note: We could manually move the data ourselves into R0 and
- * R1 by specifying register variables that are explicitly
- * given registers assignments, the down side of this is that
- * we have to assume cpu endianness. To avoid this we can use
- * the ldrd to read the data from memory into R0 and R1 which
- * will respect the cpu endianness, we then use strd to make
- * the 64 bit assignment to the page table entry.
- */
- asm volatile("ldrd r0, r1, [%[ptemp]]\n\t"
- "strd r0, r1, [%[pte]]\n\t"
- : "=m" (*pte)
- : [ptemp] "r" (&phy), [pte] "r" (pte), "m" (phy)
- : "r0", "r1");
+ barrier();
+ asm volatile("ldrd r0, [%1]\n\t"
+ "strd r0, %0\n\t"
+ : "=m" (*pte)
+ : "r" (&phy)
+ : "r0", "r1");
+ barrier();
#else
#error "64-bit atomic write must be implemented for your architecture"
#endif
+#endif
}
-static void mmu_get_as_setup(struct kbase_context *kctx,
+static void mmu_get_as_setup(struct kbase_mmu_table *mmut,
struct kbase_mmu_setup * const setup)
{
/* Set up the required caching policies at the correct indices
@@ -90,7 +84,7 @@ static void mmu_get_as_setup(struct kbase_context *kctx,
(AS_MEMATTR_INDEX_OUTER_WA * 8)) |
0; /* The other indices are unused for now */
- setup->transtab = ((u64)kctx->pgd &
+ setup->transtab = ((u64)mmut->pgd &
((0xFFFFFFFFULL << 32) | AS_TRANSTAB_LPAE_ADDR_SPACE_MASK)) |
AS_TRANSTAB_LPAE_ADRMODE_TABLE |
AS_TRANSTAB_LPAE_READ_INNER;
@@ -98,16 +92,23 @@ static void mmu_get_as_setup(struct kbase_context *kctx,
setup->transcfg = 0;
}
-static void mmu_update(struct kbase_context *kctx)
+static void mmu_update(struct kbase_device *kbdev,
+ struct kbase_mmu_table *mmut,
+ int as_nr)
{
- struct kbase_device * const kbdev = kctx->kbdev;
- struct kbase_as * const as = &kbdev->as[kctx->as_nr];
- struct kbase_mmu_setup * const current_setup = &as->current_setup;
+ struct kbase_as *as;
+ struct kbase_mmu_setup *current_setup;
- mmu_get_as_setup(kctx, current_setup);
+ if (WARN_ON(as_nr == KBASEP_AS_NR_INVALID))
+ return;
+
+ as = &kbdev->as[as_nr];
+ current_setup = &as->current_setup;
+
+ mmu_get_as_setup(mmut, current_setup);
/* Apply the address space setting */
- kbase_mmu_hw_configure(kbdev, as, kctx);
+ kbase_mmu_hw_configure(kbdev, as);
}
static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
@@ -118,7 +119,7 @@ static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
current_setup->transtab = AS_TRANSTAB_LPAE_ADRMODE_UNMAPPED;
/* Apply the address space setting */
- kbase_mmu_hw_configure(kbdev, as, NULL);
+ kbase_mmu_hw_configure(kbdev, as);
}
static phys_addr_t pte_to_phy_addr(u64 entry)
@@ -145,9 +146,17 @@ static int pte_is_valid(u64 pte, unsigned int level)
static u64 get_mmu_flags(unsigned long flags)
{
u64 mmu_flags;
-
- /* store mem_attr index as 4:2 (macro called ensures 3 bits already) */
- mmu_flags = KBASE_REG_MEMATTR_VALUE(flags) << 2;
+ unsigned long memattr_idx;
+
+ memattr_idx = KBASE_REG_MEMATTR_VALUE(flags);
+ if (WARN(memattr_idx == AS_MEMATTR_INDEX_NON_CACHEABLE,
+ "Legacy Mode MMU cannot honor GPU non-cachable memory, will use default instead\n"))
+ memattr_idx = AS_MEMATTR_INDEX_DEFAULT;
+ /* store mem_attr index as 4:2, noting that:
+ * - macro called above ensures 3 bits already
+ * - all AS_MEMATTR_INDEX_<...> macros only use 3 bits
+ */
+ mmu_flags = memattr_idx << 2;
/* write perm if requested */
mmu_flags |= (flags & KBASE_REG_GPU_WR) ? ENTRY_WR_BIT : 0;
@@ -195,7 +204,8 @@ static struct kbase_mmu_mode const lpae_mode = {
.pte_is_valid = pte_is_valid,
.entry_set_ate = entry_set_ate,
.entry_set_pte = entry_set_pte,
- .entry_invalidate = entry_invalidate
+ .entry_invalidate = entry_invalidate,
+ .flags = 0
};
struct kbase_mmu_mode const *kbase_mmu_mode_get_lpae(void)
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.c b/drivers/gpu/arm/midgard/mali_kbase_pm.c
index da56f0af2f865e..d5b8c776e74e6d 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_pm.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -52,18 +52,9 @@ int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbas
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
int c;
- int old_count;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- /* Trace timeline information about how long it took to handle the decision
- * to powerup. Sometimes the event might be missed due to reading the count
- * outside of mutex, but this is necessary to get the trace timing
- * correct. */
- old_count = kbdev->pm.active_count;
- if (old_count == 0)
- kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
-
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
if (kbase_pm_is_suspending(kbdev)) {
@@ -75,8 +66,6 @@ int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbas
case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
- if (old_count == 0)
- kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
return 1;
case KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE:
@@ -87,17 +76,17 @@ int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbas
}
}
c = ++kbdev->pm.active_count;
- KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_ACTIVE, NULL, NULL, 0u, c);
- /* Trace the event being handled */
- if (old_count == 0)
- kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE);
-
- if (c == 1)
+ if (c == 1) {
/* First context active: Power on the GPU and any cores requested by
* the policy */
kbase_hwaccess_pm_gpu_active(kbdev);
+ }
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(CONFIG_MALI_DEVFREQ)
+ if (kbdev->ipa.gpu_active_callback)
+ kbdev->ipa.gpu_active_callback(kbdev->ipa.model_data);
+#endif
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
@@ -111,31 +100,18 @@ void kbase_pm_context_idle(struct kbase_device *kbdev)
{
struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
int c;
- int old_count;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- /* Trace timeline information about how long it took to handle the decision
- * to powerdown. Sometimes the event might be missed due to reading the
- * count outside of mutex, but this is necessary to get the trace timing
- * correct. */
- old_count = kbdev->pm.active_count;
- if (old_count == 0)
- kbase_timeline_pm_send_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
mutex_lock(&js_devdata->runpool_mutex);
mutex_lock(&kbdev->pm.lock);
c = --kbdev->pm.active_count;
- KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, c);
KBASE_TRACE_ADD_REFCOUNT(kbdev, PM_CONTEXT_IDLE, NULL, NULL, 0u, c);
KBASE_DEBUG_ASSERT(c >= 0);
- /* Trace the event being handled */
- if (old_count == 0)
- kbase_timeline_pm_handle_event(kbdev, KBASE_TIMELINE_PM_EVENT_GPU_IDLE);
-
if (c == 0) {
/* Last context has gone idle */
kbase_hwaccess_pm_gpu_idle(kbdev);
@@ -146,6 +122,21 @@ void kbase_pm_context_idle(struct kbase_device *kbdev)
wake_up(&kbdev->pm.zero_active_count_wait);
}
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(CONFIG_MALI_DEVFREQ)
+ /* IPA may be using vinstr, in which case there may be one PM reference
+ * still held when all other contexts have left the GPU. Inform IPA that
+ * the GPU is now idle so that vinstr can drop it's reference.
+ *
+ * If the GPU was only briefly active then it might have gone idle
+ * before vinstr has taken a PM reference, meaning that active_count is
+ * zero. We still need to inform IPA in this case, so that vinstr can
+ * drop the PM reference and avoid keeping the GPU powered
+ * unnecessarily.
+ */
+ if (c <= 1 && kbdev->ipa.gpu_idle_callback)
+ kbdev->ipa.gpu_idle_callback(kbdev->ipa.model_data);
+#endif
+
mutex_unlock(&kbdev->pm.lock);
mutex_unlock(&js_devdata->runpool_mutex);
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_pm.h b/drivers/gpu/arm/midgard/mali_kbase_pm.h
index 8de17e1aca59eb..59a031467c95cc 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_pm.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_pm.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -136,6 +136,10 @@ int kbase_pm_context_active_handle_suspend(struct kbase_device *kbdev, enum kbas
*/
void kbase_pm_context_idle(struct kbase_device *kbdev);
+/* NOTE: kbase_pm_is_active() is in mali_kbase.h, because it is an inline
+ * function
+ */
+
/**
* Suspend the GPU and prevent any further register accesses to it from Kernel
* threads.
diff --git a/drivers/gpu/arm/midgard/mali_kbase_replay.c b/drivers/gpu/arm/midgard/mali_kbase_replay.c
index 3d93922a5fd1b0..92101fec8d5e59 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_replay.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_replay.c
@@ -664,8 +664,8 @@ static void kbasep_replay_create_atom(struct kbase_context *kctx,
atom->prio = prio;
atom->atom_number = atom_nr;
- base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
- base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
+ base_jd_atom_dep_set(&atom->pre_dep[0], 0, BASE_JD_DEP_TYPE_INVALID);
+ base_jd_atom_dep_set(&atom->pre_dep[1], 0, BASE_JD_DEP_TYPE_INVALID);
atom->udata.blob[0] = 0;
atom->udata.blob[1] = 0;
@@ -713,7 +713,8 @@ static int kbasep_replay_create_atoms(struct kbase_context *kctx,
kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
- base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
+ base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr,
+ BASE_JD_DEP_TYPE_DATA);
return 0;
}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
index 7cce3f84892a5a..0f66ac69d332c1 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_softjobs.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -375,12 +375,12 @@ static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
}
#endif /* CONFIG_MALI_FENCE_DEBUG */
-void kbasep_soft_job_timeout_worker(unsigned long data)
+void kbasep_soft_job_timeout_worker(struct timer_list *timer)
{
- struct kbase_context *kctx = (struct kbase_context *)data;
+ struct kbase_context *kctx = container_of(timer, struct kbase_context,
+ soft_job_timeout);
u32 timeout_ms = (u32)atomic_read(
&kctx->kbdev->js_data.soft_job_timeout_ms);
- struct timer_list *timer = &kctx->soft_job_timeout;
ktime_t cur_time = ktime_get();
bool restarting = false;
unsigned long lflags;
@@ -495,17 +495,6 @@ static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
kbase_js_sched_all(katom->kctx->kbdev);
}
-struct kbase_debug_copy_buffer {
- size_t size;
- struct page **pages;
- int nr_pages;
- size_t offset;
- struct kbase_mem_phy_alloc *gpu_alloc;
-
- struct page **extres_pages;
- int nr_extres_pages;
-};
-
static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
{
struct page **pages = buffer->extres_pages;
@@ -652,8 +641,8 @@ static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
katom->kctx, user_extres.ext_resource &
~BASE_EXT_RES_ACCESS_EXCLUSIVE);
- if (NULL == reg || NULL == reg->gpu_alloc ||
- (reg->flags & KBASE_REG_FREE)) {
+ if (kbase_is_region_invalid_or_free(reg) ||
+ reg->gpu_alloc == NULL) {
ret = -EINVAL;
goto out_unlock;
}
@@ -691,13 +680,6 @@ static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
ret = 0;
break;
}
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- {
- dev_warn(katom->kctx->kbdev->dev,
- "UMP is not supported for debug_copy jobs\n");
- ret = -EINVAL;
- goto out_unlock;
- }
default:
/* Nothing to be done. */
break;
@@ -720,7 +702,7 @@ out_cleanup:
return ret;
}
-static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
void *extres_page, struct page **pages, unsigned int nr_pages,
unsigned int *target_page_nr, size_t offset, size_t *to_copy)
{
@@ -762,7 +744,7 @@ static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
kunmap(pages[*target_page_nr]);
}
-static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
struct kbase_debug_copy_buffer *buf_data)
{
unsigned int i;
@@ -771,9 +753,11 @@ static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
u64 offset = buf_data->offset;
size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
size_t to_copy = min(extres_size, buf_data->size);
- size_t dma_to_copy;
struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
int ret = 0;
+#ifdef CONFIG_DMA_SHARED_BUFFER
+ size_t dma_to_copy;
+#endif
KBASE_DEBUG_ASSERT(pages != NULL);
@@ -872,50 +856,90 @@ static int kbase_debug_copy(struct kbase_jd_atom *katom)
return 0;
}
+#define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7)
+
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info)
+{
+ /* If the ID is zero, then fail the job */
+ if (info->id == 0)
+ return -EINVAL;
+
+ /* Sanity check that the PA fits within the VA */
+ if (info->va_pages < info->commit_pages)
+ return -EINVAL;
+
+ /* Ensure the GPU address is correctly aligned */
+ if ((info->gpu_alloc_addr & KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT) != 0)
+ return -EINVAL;
+
+ if (kctx->jit_version == 1) {
+ /* Old JIT didn't have usage_id, max_allocations, bin_id
+ * or padding, so force them to zero
+ */
+ info->usage_id = 0;
+ info->max_allocations = 0;
+ info->bin_id = 0;
+ info->flags = 0;
+ memset(info->padding, 0, sizeof(info->padding));
+ } else {
+ int j;
+
+ /* Check padding is all zeroed */
+ for (j = 0; j < sizeof(info->padding); j++) {
+ if (info->padding[j] != 0) {
+ return -EINVAL;
+ }
+ }
+
+ /* No bit other than TILER_ALIGN_TOP shall be set */
+ if (info->flags & ~BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
{
__user void *data = (__user void *)(uintptr_t) katom->jc;
struct base_jit_alloc_info *info;
struct kbase_context *kctx = katom->kctx;
+ u32 count;
int ret;
+ u32 i;
- /* Fail the job if there is no info structure */
- if (!data) {
+ /* For backwards compatibility */
+ if (katom->nr_extres == 0)
+ katom->nr_extres = 1;
+ count = katom->nr_extres;
+
+ /* Sanity checks */
+ if (!data || count > kctx->jit_max_allocations ||
+ count > ARRAY_SIZE(kctx->jit_alloc)) {
ret = -EINVAL;
goto fail;
}
/* Copy the information for safe access and future storage */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
if (!info) {
ret = -ENOMEM;
goto fail;
}
-
- if (copy_from_user(info, data, sizeof(*info)) != 0) {
- ret = -EINVAL;
- goto free_info;
- }
-
- /* If the ID is zero then fail the job */
- if (info->id == 0) {
- ret = -EINVAL;
- goto free_info;
- }
-
- /* Sanity check that the PA fits within the VA */
- if (info->va_pages < info->commit_pages) {
+ if (copy_from_user(info, data, sizeof(*info)*count) != 0) {
ret = -EINVAL;
goto free_info;
}
+ katom->softjob_data = info;
- /* Ensure the GPU address is correctly aligned */
- if ((info->gpu_alloc_addr & 0x7) != 0) {
- ret = -EINVAL;
- goto free_info;
+ for (i = 0; i < count; i++, info++) {
+ ret = kbasep_jit_alloc_validate(kctx, info);
+ if (ret)
+ goto free_info;
}
- katom->softjob_data = info;
katom->jit_blocked = false;
lockdep_assert_held(&kctx->jctx.lock);
@@ -935,17 +959,38 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
return 0;
free_info:
- kfree(info);
+ kfree(katom->softjob_data);
+ katom->softjob_data = NULL;
fail:
return ret;
}
-static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom)
+static u8 *kbase_jit_free_get_ids(struct kbase_jd_atom *katom)
{
- if (WARN_ON(katom->core_req != BASE_JD_REQ_SOFT_JIT_FREE))
- return 0;
+ if (WARN_ON((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) !=
+ BASE_JD_REQ_SOFT_JIT_FREE))
+ return NULL;
+
+ return (u8 *) katom->softjob_data;
+}
+
+static void kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct list_head *target_list_head = NULL;
+ struct kbase_jd_atom *entry;
+
+ list_for_each_entry(entry, &kctx->jit_pending_alloc, queue) {
+ if (katom->age < entry->age) {
+ target_list_head = &entry->queue;
+ break;
+ }
+ }
+
+ if (target_list_head == NULL)
+ target_list_head = &kctx->jit_pending_alloc;
- return (u8) katom->jc;
+ list_add_tail(&katom->queue, target_list_head);
}
static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
@@ -955,6 +1000,8 @@ static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
struct kbase_va_region *reg;
struct kbase_vmap_struct mapping;
u64 *ptr, new_addr;
+ u32 count = katom->nr_extres;
+ u32 i;
if (katom->jit_blocked) {
list_del(&katom->queue);
@@ -962,96 +1009,130 @@ static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
}
info = katom->softjob_data;
-
if (WARN_ON(!info)) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
return 0;
}
- /* The JIT ID is still in use so fail the allocation */
- if (kctx->jit_alloc[info->id]) {
- katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
- return 0;
+ for (i = 0; i < count; i++, info++) {
+ /* The JIT ID is still in use so fail the allocation */
+ if (kctx->jit_alloc[info->id]) {
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
}
- /* Create a JIT allocation */
- reg = kbase_jit_allocate(kctx, info);
- if (!reg) {
- struct kbase_jd_atom *jit_atom;
- bool can_block = false;
+ for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+ if (kctx->jit_alloc[info->id]) {
+ /* The JIT ID is duplicated in this atom. Roll back
+ * previous allocations and fail.
+ */
+ u32 j;
+
+ info = katom->softjob_data;
+ for (j = 0; j < i; j++, info++) {
+ kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+ kctx->jit_alloc[info->id] =
+ (struct kbase_va_region *) -1;
+ }
- lockdep_assert_held(&kctx->jctx.lock);
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
- jit_atom = list_first_entry(&kctx->jit_atoms_head,
- struct kbase_jd_atom, jit_node);
+ /* Create a JIT allocation */
+ reg = kbase_jit_allocate(kctx, info);
+ if (!reg) {
+ struct kbase_jd_atom *jit_atom;
+ bool can_block = false;
- list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
- if (jit_atom == katom)
- break;
- if (jit_atom->core_req == BASE_JD_REQ_SOFT_JIT_FREE) {
- u8 free_id = kbase_jit_free_get_id(jit_atom);
-
- if (free_id && kctx->jit_alloc[free_id]) {
- /* A JIT free which is active and
- * submitted before this atom
- */
- can_block = true;
+ lockdep_assert_held(&kctx->jctx.lock);
+
+ jit_atom = list_first_entry(&kctx->jit_atoms_head,
+ struct kbase_jd_atom, jit_node);
+
+ list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
+ if (jit_atom == katom)
break;
+
+ if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_JIT_FREE) {
+ u8 *free_ids = kbase_jit_free_get_ids(jit_atom);
+
+ if (free_ids && *free_ids &&
+ kctx->jit_alloc[*free_ids]) {
+ /* A JIT free which is active and
+ * submitted before this atom
+ */
+ can_block = true;
+ break;
+ }
}
}
- }
- if (!can_block) {
- /* Mark the allocation so we know it's in use even if
- * the allocation itself fails.
+ if (!can_block) {
+ /* Mark the failed allocation as well as the
+ * other un-attempted allocations in the set,
+ * so we know they are in use even if the
+ * allocation itself failed.
+ */
+ for (; i < count; i++, info++) {
+ kctx->jit_alloc[info->id] =
+ (struct kbase_va_region *) -1;
+ }
+
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
+
+ /* There are pending frees for an active allocation
+ * so we should wait to see whether they free the
+ * memory. Add to the list of atoms for which JIT
+ * allocation is pending.
*/
- kctx->jit_alloc[info->id] =
- (struct kbase_va_region *) -1;
+ kbase_jit_add_to_pending_alloc_list(katom);
+ katom->jit_blocked = true;
+
+ /* Rollback, the whole set will be re-attempted */
+ while (i-- > 0) {
+ info--;
+ kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+ kctx->jit_alloc[info->id] = NULL;
+ }
- katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
- return 0;
+ return 1;
}
- /* There are pending frees for an active allocation
- * so we should wait to see whether they free the memory.
- * Add to the beginning of the list to ensure that the atom is
- * processed only once in kbase_jit_free_finish
- */
- list_add(&katom->queue, &kctx->jit_pending_alloc);
- katom->jit_blocked = true;
-
- return 1;
+ /* Bind it to the user provided ID. */
+ kctx->jit_alloc[info->id] = reg;
}
- /*
- * Write the address of the JIT allocation to the user provided
- * GPU allocation.
- */
- ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
- &mapping);
- if (!ptr) {
+ for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
/*
- * Leave the allocation "live" as the JIT free jit will be
- * submitted anyway.
+ * Write the address of the JIT allocation to the user provided
+ * GPU allocation.
*/
- katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return 0;
- }
+ ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+ &mapping);
+ if (!ptr) {
+ /*
+ * Leave the allocations "live" as the JIT free atom
+ * will be submitted anyway.
+ */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return 0;
+ }
- new_addr = reg->start_pfn << PAGE_SHIFT;
- *ptr = new_addr;
- KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
- katom, info->gpu_alloc_addr, new_addr);
- kbase_vunmap(kctx, &mapping);
+ reg = kctx->jit_alloc[info->id];
+ new_addr = reg->start_pfn << PAGE_SHIFT;
+ *ptr = new_addr;
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
+ katom, info->gpu_alloc_addr, new_addr);
+ kbase_vunmap(kctx, &mapping);
+ }
katom->event_code = BASE_JD_EVENT_DONE;
- /*
- * Bind it to the user provided ID. Do this last so we can check for
- * the JIT free racing this JIT alloc job.
- */
- kctx->jit_alloc[info->id] = reg;
-
return 0;
}
@@ -1061,6 +1142,9 @@ static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
lockdep_assert_held(&katom->kctx->jctx.lock);
+ if (WARN_ON(!katom->softjob_data))
+ return;
+
/* Remove atom from jit_atoms_head list */
list_del(&katom->jit_node);
@@ -1077,34 +1161,76 @@ static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
{
struct kbase_context *kctx = katom->kctx;
+ __user void *data = (__user void *)(uintptr_t) katom->jc;
+ u8 *ids;
+ u32 count = MAX(katom->nr_extres, 1);
+ int ret;
+
+ /* Sanity checks */
+ if (count > ARRAY_SIZE(kctx->jit_alloc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
+ if (!ids) {
+ ret = -ENOMEM;
+ goto fail;
+ }
lockdep_assert_held(&kctx->jctx.lock);
+ katom->softjob_data = ids;
+
+ /* For backwards compatibility */
+ if (katom->nr_extres) {
+ /* Fail the job if there is no list of ids */
+ if (!data) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ if (copy_from_user(ids, data, sizeof(*ids)*count) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+ } else {
+ katom->nr_extres = 1;
+ *ids = (u8)katom->jc;
+ }
+
list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
return 0;
+
+free_info:
+ kfree(katom->softjob_data);
+ katom->softjob_data = NULL;
+fail:
+ return ret;
}
static void kbase_jit_free_process(struct kbase_jd_atom *katom)
{
struct kbase_context *kctx = katom->kctx;
- u8 id = kbase_jit_free_get_id(katom);
+ u8 *ids = kbase_jit_free_get_ids(katom);
+ u32 count = katom->nr_extres;
+ u32 i;
- /*
- * If the ID is zero or it is not in use yet then fail the job.
- */
- if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
+ if (ids == NULL) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
return;
}
- /*
- * If the ID is valid but the allocation request failed still succeed
- * this soft job but don't try and free the allocation.
- */
- if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
- kbase_jit_free(kctx, kctx->jit_alloc[id]);
-
- kctx->jit_alloc[id] = NULL;
+ for (i = 0; i < count; i++, ids++) {
+ /*
+ * If the ID is zero or it is not in use yet then fail the job.
+ */
+ if ((*ids == 0) || (kctx->jit_alloc[*ids] == NULL)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return;
+ }
+ }
}
static void kbasep_jit_free_finish_worker(struct work_struct *work)
@@ -1127,12 +1253,39 @@ static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
{
struct list_head *i, *tmp;
struct kbase_context *kctx = katom->kctx;
+ LIST_HEAD(jit_pending_alloc_list);
+ u8 *ids;
+ size_t j;
lockdep_assert_held(&kctx->jctx.lock);
+
+ ids = kbase_jit_free_get_ids(katom);
+ if (WARN_ON(ids == NULL)) {
+ return;
+ }
+
/* Remove this atom from the kctx->jit_atoms_head list */
list_del(&katom->jit_node);
- list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) {
+ for (j = 0; j != katom->nr_extres; ++j) {
+ if ((ids[j] != 0) && (kctx->jit_alloc[ids[j]] != NULL)) {
+ /*
+ * If the ID is valid but the allocation request failed
+ * still succeed this soft job but don't try and free
+ * the allocation.
+ */
+ if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1)
+ kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
+
+ kctx->jit_alloc[ids[j]] = NULL;
+ }
+ }
+ /* Free the list of ids */
+ kfree(ids);
+
+ list_splice_tail_init(&kctx->jit_pending_alloc, &jit_pending_alloc_list);
+
+ list_for_each_safe(i, tmp, &jit_pending_alloc_list) {
struct kbase_jd_atom *pending_atom = list_entry(i,
struct kbase_jd_atom, queue);
if (kbase_jit_allocate_process(pending_atom) == 0) {
@@ -1271,9 +1424,14 @@ static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
int kbase_process_soft_job(struct kbase_jd_atom *katom)
{
+ int ret = 0;
+
+ KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(katom);
+
switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
- return kbase_dump_cpu_gpu_time(katom);
+ ret = kbase_dump_cpu_gpu_time(katom);
+ break;
#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
@@ -1283,7 +1441,7 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
break;
case BASE_JD_REQ_SOFT_FENCE_WAIT:
{
- int ret = kbase_sync_fence_in_wait(katom);
+ ret = kbase_sync_fence_in_wait(katom);
if (ret == 1) {
#ifdef CONFIG_MALI_FENCE_DEBUG
@@ -1292,14 +1450,16 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
kbasep_add_waiting_soft_job(katom);
#endif
}
- return ret;
+ break;
}
#endif
case BASE_JD_REQ_SOFT_REPLAY:
- return kbase_replay_process(katom);
+ ret = kbase_replay_process(katom);
+ break;
case BASE_JD_REQ_SOFT_EVENT_WAIT:
- return kbasep_soft_event_wait(katom);
+ ret = kbasep_soft_event_wait(katom);
+ break;
case BASE_JD_REQ_SOFT_EVENT_SET:
kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
break;
@@ -1315,7 +1475,8 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
break;
}
case BASE_JD_REQ_SOFT_JIT_ALLOC:
- return kbase_jit_allocate_process(katom);
+ ret = kbase_jit_allocate_process(katom);
+ break;
case BASE_JD_REQ_SOFT_JIT_FREE:
kbase_jit_free_process(katom);
break;
@@ -1328,7 +1489,8 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
}
/* Atom is complete */
- return 0;
+ KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(katom);
+ return ret;
}
void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync.h b/drivers/gpu/arm/midgard/mali_kbase_sync.h
index a7690b2e446355..70557dd5b33f2d 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_sync.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -161,7 +161,11 @@ void kbase_sync_fence_out_remove(struct kbase_jd_atom *katom);
*/
static inline void kbase_sync_fence_close_fd(int fd)
{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0)
+ ksys_close(fd);
+#else
sys_close(fd);
+#endif
}
/**
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_common.c b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
index 9520f5ac3b5ebe..5239daee409e23 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_common.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -28,6 +28,7 @@
#include <linux/workqueue.h>
#include "mali_kbase.h"
+#include "mali_kbase_sync.h"
void kbase_sync_fence_wait_worker(struct work_struct *data)
{
diff --git a/drivers/gpu/arm/midgard/mali_kbase_sync_file.c b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
index 4978d486688f6a..74495903755ad6 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_sync_file.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -73,10 +73,14 @@ int kbase_sync_fence_out_create(struct kbase_jd_atom *katom, int stream_fd)
if (!fence)
return -ENOMEM;
- /* Take an extra reference to the fence on behalf of the katom.
- * This is needed because sync_file_create() will take ownership of
- * one of these refs */
+#if (KERNEL_VERSION(4, 4, 0) > LINUX_VERSION_CODE)
+ /* Take an extra reference to the fence on behalf of the sync_file.
+ * This is only needed on older kernels where sync_file_create()
+ * does not take its own reference. This was changed in v4.9.68,
+ * where sync_file_create() now takes its own reference.
+ */
dma_fence_get(fence);
+#endif
/* create a sync_file fd representing the fence */
sync_file = sync_file_create(fence);
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.c b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
index 926d6b631469fb..2ff45f50bf16d6 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -155,6 +155,8 @@ enum tl_msg_id_obj {
KBASE_TL_EVENT_LPU_SOFTSTOP,
KBASE_TL_EVENT_ATOM_SOFTSTOP_EX,
KBASE_TL_EVENT_ATOM_SOFTSTOP_ISSUE,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+ KBASE_TL_EVENT_ATOM_SOFTJOB_END,
/* Job dump specific events. */
KBASE_JD_GPU_SOFT_RESET
@@ -500,6 +502,20 @@ static const struct tp_desc tp_desc_obj[] = {
"atom"
},
{
+ KBASE_TL_EVENT_ATOM_SOFTJOB_START,
+ __stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_START),
+ "atom soft job has started",
+ "@p",
+ "atom"
+ },
+ {
+ KBASE_TL_EVENT_ATOM_SOFTJOB_END,
+ __stringify(KBASE_TL_EVENT_ATOM_SOFTJOB_END),
+ "atom soft job has completed",
+ "@p",
+ "atom"
+ },
+ {
KBASE_JD_GPU_SOFT_RESET,
__stringify(KBASE_JD_GPU_SOFT_RESET),
"gpu soft reset",
@@ -1042,17 +1058,17 @@ static void kbasep_tlstream_flush_stream(enum tl_stream_type stype)
/**
* kbasep_tlstream_autoflush_timer_callback - autoflush timer callback
- * @data: unused
+ * @timer: unused
*
* Timer is executed periodically to check if any of the stream contains
* buffer ready to be submitted to user space.
*/
-static void kbasep_tlstream_autoflush_timer_callback(unsigned long data)
+static void kbasep_tlstream_autoflush_timer_callback(struct timer_list *timer)
{
enum tl_stream_type stype;
int rcode;
- CSTD_UNUSED(data);
+ CSTD_UNUSED(timer);
for (stype = 0; stype < TL_STREAM_TYPE_COUNT; stype++) {
struct tl_stream *stream = tl_stream[stype];
@@ -1376,9 +1392,8 @@ int kbase_tlstream_init(void)
/* Initialize autoflush timer. */
atomic_set(&autoflush_timer_active, 0);
- setup_timer(&autoflush_timer,
- kbasep_tlstream_autoflush_timer_callback,
- 0);
+ kbase_timer_setup(&autoflush_timer,
+ kbasep_tlstream_autoflush_timer_callback);
return 0;
}
@@ -2365,6 +2380,52 @@ void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom)
kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
}
+void __kbase_tlstream_tl_event_atom_softjob_start(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_START;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
+void __kbase_tlstream_tl_event_atom_softjob_end(void *atom)
+{
+ const u32 msg_id = KBASE_TL_EVENT_ATOM_SOFTJOB_END;
+ const size_t msg_size =
+ sizeof(msg_id) + sizeof(u64) + sizeof(atom);
+ unsigned long flags;
+ char *buffer;
+ size_t pos = 0;
+
+ buffer = kbasep_tlstream_msgbuf_acquire(
+ TL_STREAM_TYPE_OBJ,
+ msg_size, &flags);
+ KBASE_DEBUG_ASSERT(buffer);
+
+ pos = kbasep_tlstream_write_bytes(buffer, pos, &msg_id, sizeof(msg_id));
+ pos = kbasep_tlstream_write_timestamp(buffer, pos);
+ pos = kbasep_tlstream_write_bytes(
+ buffer, pos, &atom, sizeof(atom));
+ KBASE_DEBUG_ASSERT(msg_size == pos);
+
+ kbasep_tlstream_msgbuf_release(TL_STREAM_TYPE_OBJ, flags);
+}
+
void __kbase_tlstream_jd_gpu_soft_reset(void *gpu)
{
const u32 msg_id = KBASE_JD_GPU_SOFT_RESET;
diff --git a/drivers/gpu/arm/midgard/mali_kbase_tlstream.h b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
index f4369014d219dc..bfa25d98264add 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_tlstream.h
@@ -147,6 +147,8 @@ void __kbase_tlstream_tl_attrib_as_config(
void __kbase_tlstream_tl_event_atom_softstop_ex(void *atom);
void __kbase_tlstream_tl_event_lpu_softstop(void *lpu);
void __kbase_tlstream_tl_event_atom_softstop_issue(void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_start(void *atom);
+void __kbase_tlstream_tl_event_atom_softjob_end(void *atom);
void __kbase_tlstream_jd_gpu_soft_reset(void *gpu);
void __kbase_tlstream_aux_pm_state(u32 core_type, u64 state);
void __kbase_tlstream_aux_pagefault(u32 ctx_nr, u64 page_count_change);
@@ -515,27 +517,41 @@ extern atomic_t kbase_tlstream_enabled;
__TRACE_IF_ENABLED(tl_attrib_as_config, as, transtab, memattr, transcfg)
/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ex
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX
* @atom: atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX(atom) \
__TRACE_IF_ENABLED(tl_event_atom_softstop_ex, atom)
/**
- * KBASE_TLSTREAM_TL_EVENT_LPU_softstop
+ * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP
* @lpu: name of the LPU object
*/
#define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP(lpu) \
__TRACE_IF_ENABLED(tl_event_lpu_softstop, lpu)
/**
- * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_issue
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE
* @atom: atom identifier
*/
#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE(atom) \
__TRACE_IF_ENABLED(tl_event_atom_softstop_issue, atom)
/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START
+ * @atom: atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softjob_start, atom)
+
+/**
+ * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END
+ * @atom: atom identifier
+ */
+#define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(atom) \
+ __TRACE_IF_ENABLED(tl_event_atom_softjob_end, atom)
+
+/**
* KBASE_TLSTREAM_JD_GPU_SOFT_RESET - The GPU is being soft reset
* @gpu: name of the GPU object
*
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
index 32fffe0d80a93c..d7364d5d8c1fee 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_trace_defs.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2015,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -236,14 +236,8 @@ int dummy_array[] = {
/* PM_DESIRED_REACHED: gpu_addr == pm.gpu_in_desired_state */
KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED),
KBASE_TRACE_CODE_MAKE_CODE(PM_DESIRED_REACHED_TILER),
- KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_INUSE),
- KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_INUSE),
- KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_SHADER_NEEDED),
- KBASE_TRACE_CODE_MAKE_CODE(PM_REGISTER_CHANGE_TILER_NEEDED),
- KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_INUSE),
- KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_INUSE),
- KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_SHADER_NEEDED),
- KBASE_TRACE_CODE_MAKE_CODE(PM_UNREQUEST_CHANGE_TILER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_SHADER_NEEDED),
+ KBASE_TRACE_CODE_MAKE_CODE(PM_RELEASE_CHANGE_TILER_NEEDED),
KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_SHADER_NEEDED),
KBASE_TRACE_CODE_MAKE_CODE(PM_REQUEST_CHANGE_TILER_NEEDED),
KBASE_TRACE_CODE_MAKE_CODE(PM_WAKE_WAITERS),
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c
deleted file mode 100644
index ee6bdf8ae32479..00000000000000
--- a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.c
+++ /dev/null
@@ -1,241 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-#include <mali_kbase.h>
-#include <mali_kbase_jm.h>
-#include <mali_kbase_hwaccess_jm.h>
-
-#define CREATE_TRACE_POINTS
-
-#ifdef CONFIG_MALI_TRACE_TIMELINE
-#include "mali_timeline.h"
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atoms_in_flight);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_atom);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_active);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_slot_action);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_gpu_power_active);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_l2_power_active);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_event);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_slot_atom);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_pm_checktrans);
-EXPORT_TRACEPOINT_SYMBOL_GPL(mali_timeline_context_active);
-
-struct kbase_trace_timeline_desc {
- char *enum_str;
- char *desc;
- char *format;
- char *format_desc;
-};
-
-static struct kbase_trace_timeline_desc kbase_trace_timeline_desc_table[] = {
- #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) { #enum_val, desc, format, format_desc }
- #include "mali_kbase_trace_timeline_defs.h"
- #undef KBASE_TIMELINE_TRACE_CODE
-};
-
-#define KBASE_NR_TRACE_CODES ARRAY_SIZE(kbase_trace_timeline_desc_table)
-
-static void *kbasep_trace_timeline_seq_start(struct seq_file *s, loff_t *pos)
-{
- if (*pos >= KBASE_NR_TRACE_CODES)
- return NULL;
-
- return &kbase_trace_timeline_desc_table[*pos];
-}
-
-static void kbasep_trace_timeline_seq_stop(struct seq_file *s, void *data)
-{
-}
-
-static void *kbasep_trace_timeline_seq_next(struct seq_file *s, void *data, loff_t *pos)
-{
- (*pos)++;
-
- if (*pos == KBASE_NR_TRACE_CODES)
- return NULL;
-
- return &kbase_trace_timeline_desc_table[*pos];
-}
-
-static int kbasep_trace_timeline_seq_show(struct seq_file *s, void *data)
-{
- struct kbase_trace_timeline_desc *trace_desc = data;
-
- seq_printf(s, "%s#%s#%s#%s\n", trace_desc->enum_str, trace_desc->desc, trace_desc->format, trace_desc->format_desc);
- return 0;
-}
-
-
-static const struct seq_operations kbasep_trace_timeline_seq_ops = {
- .start = kbasep_trace_timeline_seq_start,
- .next = kbasep_trace_timeline_seq_next,
- .stop = kbasep_trace_timeline_seq_stop,
- .show = kbasep_trace_timeline_seq_show,
-};
-
-static int kbasep_trace_timeline_debugfs_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &kbasep_trace_timeline_seq_ops);
-}
-
-static const struct file_operations kbasep_trace_timeline_debugfs_fops = {
- .open = kbasep_trace_timeline_debugfs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-#ifdef CONFIG_DEBUG_FS
-
-void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev)
-{
- debugfs_create_file("mali_timeline_defs",
- S_IRUGO, kbdev->mali_debugfs_directory, NULL,
- &kbasep_trace_timeline_debugfs_fops);
-}
-
-#endif /* CONFIG_DEBUG_FS */
-
-void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- if (kbdev->timeline.slot_atoms_submitted[js] > 0) {
- KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 1);
- } else {
- base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
-
- KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 1);
- KBASE_TIMELINE_JOB_START(kctx, js, atom_number);
- }
- ++kbdev->timeline.slot_atoms_submitted[js];
-
- KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
-}
-
-void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js,
- kbasep_js_atom_done_code done_code)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- if (done_code & KBASE_JS_ATOM_DONE_EVICTED_FROM_NEXT) {
- KBASE_TIMELINE_JOB_START_NEXT(kctx, js, 0);
- } else {
- /* Job finished in JS_HEAD */
- base_atom_id atom_number = kbase_jd_atom_id(kctx, katom);
-
- KBASE_TIMELINE_JOB_START_HEAD(kctx, js, 0);
- KBASE_TIMELINE_JOB_STOP(kctx, js, atom_number);
-
- /* see if we need to trace the job in JS_NEXT moving to JS_HEAD */
- if (kbase_backend_nr_atoms_submitted(kbdev, js)) {
- struct kbase_jd_atom *next_katom;
- struct kbase_context *next_kctx;
-
- /* Peek the next atom - note that the atom in JS_HEAD will already
- * have been dequeued */
- next_katom = kbase_backend_inspect_head(kbdev, js);
- WARN_ON(!next_katom);
- next_kctx = next_katom->kctx;
- KBASE_TIMELINE_JOB_START_NEXT(next_kctx, js, 0);
- KBASE_TIMELINE_JOB_START_HEAD(next_kctx, js, 1);
- KBASE_TIMELINE_JOB_START(next_kctx, js, kbase_jd_atom_id(next_kctx, next_katom));
- }
- }
-
- --kbdev->timeline.slot_atoms_submitted[js];
-
- KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, kbdev->timeline.slot_atoms_submitted[js]);
-}
-
-void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
-{
- int uid = 0;
- int old_uid;
-
- /* If a producer already exists for the event, try to use their UID (multiple-producers) */
- uid = atomic_read(&kbdev->timeline.pm_event_uid[event_sent]);
- old_uid = uid;
-
- /* Get a new non-zero UID if we don't have one yet */
- while (!uid)
- uid = atomic_inc_return(&kbdev->timeline.pm_event_uid_counter);
-
- /* Try to use this UID */
- if (old_uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event_sent], old_uid, uid))
- /* If it changed, raced with another producer: we've lost this UID */
- uid = 0;
-
- KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_sent, uid);
-}
-
-void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
-{
- int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
-
- if (uid != 0) {
- if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
- /* If it changed, raced with another consumer: we've lost this UID */
- uid = 0;
-
- KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
- }
-}
-
-void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
-{
- int uid = atomic_read(&kbdev->timeline.pm_event_uid[event]);
-
- if (uid != atomic_cmpxchg(&kbdev->timeline.pm_event_uid[event], uid, 0))
- /* If it changed, raced with another consumer: we've lost this UID */
- uid = 0;
-
- KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event, uid);
-}
-
-void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
- /* Simply log the start of the transition */
- kbdev->timeline.l2_transitioning = true;
- KBASE_TIMELINE_POWERING_L2(kbdev);
-}
-
-void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
- /* Simply log the end of the transition */
- if (kbdev->timeline.l2_transitioning) {
- kbdev->timeline.l2_transitioning = false;
- KBASE_TIMELINE_POWERED_L2(kbdev);
- }
-}
-
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h
deleted file mode 100644
index c1a3dfc5675225..00000000000000
--- a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline.h
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-#if !defined(_KBASE_TRACE_TIMELINE_H)
-#define _KBASE_TRACE_TIMELINE_H
-
-#ifdef CONFIG_MALI_TRACE_TIMELINE
-
-enum kbase_trace_timeline_code {
- #define KBASE_TIMELINE_TRACE_CODE(enum_val, desc, format, format_desc) enum_val
- #include "mali_kbase_trace_timeline_defs.h"
- #undef KBASE_TIMELINE_TRACE_CODE
-};
-
-#ifdef CONFIG_DEBUG_FS
-
-/** Initialize Timeline DebugFS entries */
-void kbasep_trace_timeline_debugfs_init(struct kbase_device *kbdev);
-
-#else /* CONFIG_DEBUG_FS */
-
-#define kbasep_trace_timeline_debugfs_init CSTD_NOP
-
-#endif /* CONFIG_DEBUG_FS */
-
-/* mali_timeline.h defines kernel tracepoints used by the KBASE_TIMELINE
- * functions.
- * Output is timestamped by either sched_clock() (default), local_clock(), or
- * cpu_clock(), depending on /sys/kernel/debug/tracing/trace_clock */
-#include "mali_timeline.h"
-
-/* Trace number of atoms in flight for kctx (atoms either not completed, or in
- process of being returned to user */
-#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_atoms_in_flight(ts.tv_sec, ts.tv_nsec, \
- (int)kctx->timeline.owner_tgid, \
- count); \
- } while (0)
-
-/* Trace atom_id being Ready to Run */
-#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_atom(ts.tv_sec, ts.tv_nsec, \
- CTX_FLOW_ATOM_READY, \
- (int)kctx->timeline.owner_tgid, \
- atom_id); \
- } while (0)
-
-/* Trace number of atoms submitted to job slot js
- *
- * NOTE: This uses a different tracepoint to the head/next/soft-stop actions,
- * so that those actions can be filtered out separately from this
- *
- * This is because this is more useful, as we can use it to calculate general
- * utilization easily and accurately */
-#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_slot_active(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_SLOT_ACTIVE, \
- (int)kctx->timeline.owner_tgid, \
- js, count); \
- } while (0)
-
-
-/* Trace atoms present in JS_NEXT */
-#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_SLOT_NEXT, \
- (int)kctx->timeline.owner_tgid, \
- js, count); \
- } while (0)
-
-/* Trace atoms present in JS_HEAD */
-#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_SLOT_HEAD, \
- (int)kctx->timeline.owner_tgid, \
- js, count); \
- } while (0)
-
-/* Trace that a soft stop/evict from next is being attempted on a slot */
-#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_slot_action(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_SLOT_STOPPING, \
- (kctx) ? (int)kctx->timeline.owner_tgid : 0, \
- js, count); \
- } while (0)
-
-
-
-/* Trace state of overall GPU power */
-#define KBASE_TIMELINE_GPU_POWER(kbdev, active) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_POWER_ACTIVE, active); \
- } while (0)
-
-/* Trace state of tiler power */
-#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_POWER_TILER_ACTIVE, \
- hweight64(bitmap)); \
- } while (0)
-
-/* Trace number of shaders currently powered */
-#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_POWER_SHADER_ACTIVE, \
- hweight64(bitmap)); \
- } while (0)
-
-/* Trace state of L2 power */
-#define KBASE_TIMELINE_POWER_L2(kbdev, bitmap) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_gpu_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_SET_GPU_POWER_L2_ACTIVE, \
- hweight64(bitmap)); \
- } while (0)
-
-/* Trace state of L2 cache*/
-#define KBASE_TIMELINE_POWERING_L2(kbdev) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_FLOW_GPU_POWER_L2_POWERING, \
- 1); \
- } while (0)
-
-#define KBASE_TIMELINE_POWERED_L2(kbdev) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_l2_power_active(ts.tv_sec, ts.tv_nsec, \
- SW_FLOW_GPU_POWER_L2_ACTIVE, \
- 1); \
- } while (0)
-
-/* Trace kbase_pm_send_event message send */
-#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
- SW_FLOW_PM_SEND_EVENT, \
- event_type, pm_event_id); \
- } while (0)
-
-/* Trace kbase_pm_worker message receive */
-#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_pm_event(ts.tv_sec, ts.tv_nsec, \
- SW_FLOW_PM_HANDLE_EVENT, \
- event_type, pm_event_id); \
- } while (0)
-
-
-/* Trace atom_id starting in JS_HEAD */
-#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
- HW_START_GPU_JOB_CHAIN_SW_APPROX, \
- (int)kctx->timeline.owner_tgid, \
- js, _consumerof_atom_number); \
- } while (0)
-
-/* Trace atom_id stopping on JS_HEAD */
-#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_slot_atom(ts.tv_sec, ts.tv_nsec, \
- HW_STOP_GPU_JOB_CHAIN_SW_APPROX, \
- (int)kctx->timeline.owner_tgid, \
- js, _producerof_atom_number_completed); \
- } while (0)
-
-/** Trace beginning/end of a call to kbase_pm_check_transitions_nolock from a
- * certin caller */
-#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_pm_checktrans(ts.tv_sec, ts.tv_nsec, \
- trace_code, 1); \
- } while (0)
-
-/* Trace number of contexts active */
-#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) \
- do { \
- struct timespec ts; \
- getrawmonotonic(&ts); \
- trace_mali_timeline_context_active(ts.tv_sec, ts.tv_nsec, \
- count); \
- } while (0)
-
-/* NOTE: kbase_timeline_pm_cores_func() is in mali_kbase_pm_policy.c */
-
-/**
- * Trace that an atom is starting on a job slot
- *
- * The caller must be holding hwaccess_lock
- */
-void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js);
-
-/**
- * Trace that an atom has done on a job slot
- *
- * 'Done' in this sense can occur either because:
- * - the atom in JS_HEAD finished
- * - the atom in JS_NEXT was evicted
- *
- * Whether the atom finished or was evicted is passed in @a done_code
- *
- * It is assumed that the atom has already been removed from the submit slot,
- * with either:
- * - kbasep_jm_dequeue_submit_slot()
- * - kbasep_jm_dequeue_tail_submit_slot()
- *
- * The caller must be holding hwaccess_lock
- */
-void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js,
- kbasep_js_atom_done_code done_code);
-
-
-/** Trace a pm event starting */
-void kbase_timeline_pm_send_event(struct kbase_device *kbdev,
- enum kbase_timeline_pm_event event_sent);
-
-/** Trace a pm event finishing */
-void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
-
-/** Check whether a pm event was present, and if so trace finishing it */
-void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event);
-
-/** Trace L2 power-up start */
-void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev);
-
-/** Trace L2 power-up done */
-void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev);
-
-#else
-
-#define KBASE_TIMELINE_ATOMS_IN_FLIGHT(kctx, count) CSTD_NOP()
-
-#define KBASE_TIMELINE_ATOM_READY(kctx, atom_id) CSTD_NOP()
-
-#define KBASE_TIMELINE_ATOMS_SUBMITTED(kctx, js, count) CSTD_NOP()
-
-#define KBASE_TIMELINE_JOB_START_NEXT(kctx, js, count) CSTD_NOP()
-
-#define KBASE_TIMELINE_JOB_START_HEAD(kctx, js, count) CSTD_NOP()
-
-#define KBASE_TIMELINE_TRY_SOFT_STOP(kctx, js, count) CSTD_NOP()
-
-#define KBASE_TIMELINE_GPU_POWER(kbdev, active) CSTD_NOP()
-
-#define KBASE_TIMELINE_POWER_TILER(kbdev, bitmap) CSTD_NOP()
-
-#define KBASE_TIMELINE_POWER_SHADER(kbdev, bitmap) CSTD_NOP()
-
-#define KBASE_TIMELINE_POWER_L2(kbdev, active) CSTD_NOP()
-
-#define KBASE_TIMELINE_POWERING_L2(kbdev) CSTD_NOP()
-
-#define KBASE_TIMELINE_POWERED_L2(kbdev) CSTD_NOP()
-
-#define KBASE_TIMELINE_PM_SEND_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
-
-#define KBASE_TIMELINE_PM_HANDLE_EVENT(kbdev, event_type, pm_event_id) CSTD_NOP()
-
-#define KBASE_TIMELINE_JOB_START(kctx, js, _consumerof_atom_number) CSTD_NOP()
-
-#define KBASE_TIMELINE_JOB_STOP(kctx, js, _producerof_atom_number_completed) CSTD_NOP()
-
-#define KBASE_TIMELINE_PM_CHECKTRANS(kbdev, trace_code) CSTD_NOP()
-
-#define KBASE_TIMELINE_CONTEXT_ACTIVE(kbdev, count) CSTD_NOP()
-
-static inline void kbase_timeline_job_slot_submit(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-}
-
-static inline void kbase_timeline_job_slot_done(struct kbase_device *kbdev, struct kbase_context *kctx,
- struct kbase_jd_atom *katom, int js,
- kbasep_js_atom_done_code done_code)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-}
-
-static inline void kbase_timeline_pm_send_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event_sent)
-{
-}
-
-static inline void kbase_timeline_pm_check_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
-{
-}
-
-static inline void kbase_timeline_pm_handle_event(struct kbase_device *kbdev, enum kbase_timeline_pm_event event)
-{
-}
-
-static inline void kbase_timeline_pm_l2_transition_start(struct kbase_device *kbdev)
-{
-}
-
-static inline void kbase_timeline_pm_l2_transition_done(struct kbase_device *kbdev)
-{
-}
-#endif /* CONFIG_MALI_TRACE_TIMELINE */
-
-#endif /* _KBASE_TRACE_TIMELINE_H */
-
diff --git a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h b/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h
deleted file mode 100644
index 114bcac541e923..00000000000000
--- a/drivers/gpu/arm/midgard/mali_kbase_trace_timeline_defs.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2012-2015 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-/* ***** IMPORTANT: THIS IS NOT A NORMAL HEADER FILE *****
- * ***** DO NOT INCLUDE DIRECTLY *****
- * ***** THE LACK OF HEADER GUARDS IS INTENTIONAL ***** */
-
-/*
- * Conventions on Event Names:
- *
- * - The prefix determines something about how the timeline should be
- * displayed, and is split up into various parts, separated by underscores:
- * - 'SW' and 'HW' as the first part will be used to determine whether a
- * timeline is to do with Software or Hardware - effectively, separate
- * 'channels' for Software and Hardware
- * - 'START', 'STOP', 'ENTER', 'LEAVE' can be used in the second part, and
- * signify related pairs of events - these are optional.
- * - 'FLOW' indicates a generic event, which can use dependencies
- * - This gives events such as:
- * - 'SW_ENTER_FOO'
- * - 'SW_LEAVE_FOO'
- * - 'SW_FLOW_BAR_1'
- * - 'SW_FLOW_BAR_2'
- * - 'HW_START_BAZ'
- * - 'HW_STOP_BAZ'
- * - And an unadorned HW event:
- * - 'HW_BAZ_FROZBOZ'
- */
-
-/*
- * Conventions on parameter names:
- * - anything with 'instance' in the name will have a separate timeline based
- * on that instances.
- * - underscored-prefixed parameters will by hidden by default on timelines
- *
- * Hence:
- * - Different job slots have their own 'instance', based on the instance value
- * - Per-context info (e.g. atoms on a context) have their own 'instance'
- * (i.e. each context should be on a different timeline)
- *
- * Note that globally-shared resources can be tagged with a tgid, but we don't
- * want an instance per context:
- * - There's no point having separate Job Slot timelines for each context, that
- * would be confusing - there's only really 3 job slots!
- * - There's no point having separate Shader-powered timelines for each
- * context, that would be confusing - all shader cores (whether it be 4, 8,
- * etc) are shared in the system.
- */
-
- /*
- * CTX events
- */
- /* Separate timelines for each context 'instance'*/
- KBASE_TIMELINE_TRACE_CODE(CTX_SET_NR_ATOMS_IN_FLIGHT, "CTX: Atoms in flight", "%d,%d", "_instance_tgid,_value_number_of_atoms"),
- KBASE_TIMELINE_TRACE_CODE(CTX_FLOW_ATOM_READY, "CTX: Atoms Ready to Run", "%d,%d,%d", "_instance_tgid,_consumerof_atom_number,_producerof_atom_number_ready"),
-
- /*
- * SW Events
- */
- /* Separate timelines for each slot 'instance' */
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_ACTIVE, "SW: GPU slot active", "%d,%d,%d", "_tgid,_instance_slot,_value_number_of_atoms"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_NEXT, "SW: GPU atom in NEXT", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_next"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_HEAD, "SW: GPU atom in HEAD", "%d,%d,%d", "_tgid,_instance_slot,_value_is_an_atom_in_head"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_SLOT_STOPPING, "SW: Try Soft-Stop on GPU slot", "%d,%d,%d", "_tgid,_instance_slot,_value_is_slot_stopping"),
- /* Shader and overall power is shared - can't have separate instances of
- * it, just tagging with the context */
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_ACTIVE, "SW: GPU power active", "%d,%d", "_tgid,_value_is_power_active"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_TILER_ACTIVE, "SW: GPU tiler powered", "%d,%d", "_tgid,_value_number_of_tilers"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_SHADER_ACTIVE, "SW: GPU shaders powered", "%d,%d", "_tgid,_value_number_of_shaders"),
- KBASE_TIMELINE_TRACE_CODE(SW_SET_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powered", "%d,%d", "_tgid,_value_number_of_l2"),
-
- /* SW Power event messaging. _event_type is one from the kbase_pm_event enum */
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_SEND_EVENT, "SW: PM Send Event", "%d,%d,%d", "_tgid,_event_type,_writerof_pm_event_id"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_HANDLE_EVENT, "SW: PM Handle Event", "%d,%d,%d", "_tgid,_event_type,_finalconsumerof_pm_event_id"),
- /* SW L2 power events */
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_POWERING, "SW: GPU L2 powering", "%d,%d", "_tgid,_writerof_l2_transitioning"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_GPU_POWER_L2_ACTIVE, "SW: GPU L2 powering done", "%d,%d", "_tgid,_finalconsumerof_l2_transitioning"),
-
- KBASE_TIMELINE_TRACE_CODE(SW_SET_CONTEXT_ACTIVE, "SW: Context Active", "%d,%d", "_tgid,_value_active"),
-
- /*
- * BEGIN: Significant SW Functions that call kbase_pm_check_transitions_nolock()
- */
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_START, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweroff"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWEROFF_END, "SW: PM CheckTrans from kbase_pm_do_poweroff", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweroff"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_START, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_poweron"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_DO_POWERON_END, "SW: PM CheckTrans from kbase_pm_do_poweron", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_poweron"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_START, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_writerof_pm_checktrans_gpu_interrupt"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_GPU_INTERRUPT_END, "SW: PM CheckTrans from kbase_gpu_interrupt", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_gpu_interrupt"),
-
- /*
- * Significant Indirect callers of kbase_pm_check_transitions_nolock()
- */
- /* kbase_pm_request_cores */
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_request_cores_shader_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_REQUEST_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_request_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_request_cores_shader_tiler"),
- /* kbase_pm_release_cores */
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_START, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_release_cores_shader_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_SHADER_TILER_END, "SW: PM CheckTrans from kbase_pm_release_cores(shader+tiler)", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_release_cores_shader_tiler"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_START, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_writerof_pm_checktrans_pm_do_shader_poweroff_callback"),
- KBASE_TIMELINE_TRACE_CODE(SW_FLOW_PM_CHECKTRANS_PM_RELEASE_CORES_DEFERRED_END, "SW: PM CheckTrans from kbasep_pm_do_shader_poweroff_callback", "%d,%d", "_tgid,_finalconsumerof_pm_checktrans_pm_do_shader_poweroff_callback"),
- /*
- * END: SW Functions that call kbase_pm_check_transitions_nolock()
- */
-
- /*
- * HW Events
- */
- KBASE_TIMELINE_TRACE_CODE(HW_MMU_FAULT,
-"HW: MMU Fault", "%d,%d,%d", "_tgid,fault_type,fault_stage,asid"),
- KBASE_TIMELINE_TRACE_CODE(HW_START_GPU_JOB_CHAIN_SW_APPROX,
-"HW: Job Chain start (SW approximated)", "%d,%d,%d",
-"_tgid,job_slot,_consumerof_atom_number_ready"),
- KBASE_TIMELINE_TRACE_CODE(HW_STOP_GPU_JOB_CHAIN_SW_APPROX,
-"HW: Job Chain stop (SW approximated)", "%d,%d,%d",
-"_tgid,job_slot,_producerof_atom_number_completed")
diff --git a/drivers/gpu/arm/midgard/mali_kbase_utility.h b/drivers/gpu/arm/midgard/mali_kbase_utility.h
index d36285e26a684d..f2e5a3381e13c3 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_utility.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_utility.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2012-2013, 2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2012-2013, 2015, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -39,4 +39,28 @@
*/
bool kbasep_list_member_of(const struct list_head *base, struct list_head *entry);
+
+static inline void kbase_timer_setup(struct timer_list *timer,
+ void (*callback)(struct timer_list *timer))
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)
+ setup_timer(timer, (void (*)(unsigned long)) callback,
+ (unsigned long) timer);
+#else
+ timer_setup(timer, callback, 0);
+#endif
+}
+
+#ifndef WRITE_ONCE
+ #ifdef ASSIGN_ONCE
+ #define WRITE_ONCE(x, val) ASSIGN_ONCE(val, x)
+ #else
+ #define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
+ #endif
+#endif
+
+#ifndef READ_ONCE
+ #define READ_ONCE(x) ACCESS_ONCE(x)
+#endif
+
#endif /* _KBASE_UTILITY_H */
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
index 60308be0cc7d35..df936cfdd4d84c 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2011-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2011-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -82,7 +82,9 @@ enum vinstr_state {
/**
* struct kbase_vinstr_context - vinstr context per device
- * @lock: protects the entire vinstr context
+ * @lock: protects the entire vinstr context, but the list of
+ * vinstr clients can be updated outside the lock using
+ * @state_lock.
* @kbdev: pointer to kbase device
* @kctx: pointer to kbase context
* @vmap: vinstr vmap for mapping hwcnt dump buffer
@@ -94,12 +96,14 @@ enum vinstr_state {
* @reprogram: when true, reprogram hwcnt block with the new set of
* counters
* @state: vinstr state
- * @state_lock: protects information about vinstr state
+ * @state_lock: protects information about vinstr state and list of
+ * clients.
* @suspend_waitq: notification queue to trigger state re-validation
* @suspend_cnt: reference counter of vinstr's suspend state
* @suspend_work: worker to execute on entering suspended state
* @resume_work: worker to execute on leaving suspended state
- * @nclients: number of attached clients, pending or otherwise
+ * @nclients: number of attached clients, pending or idle
+ * @nclients_suspended: number of attached but suspended clients
* @waiting_clients: head of list of clients being periodically sampled
* @idle_clients: head of list of clients being idle
* @suspended_clients: head of list of clients being suspended
@@ -109,13 +113,22 @@ enum vinstr_state {
* @clients_present: when true, we have at least one client
* Note: this variable is in sync. with nclients and is
* present to preserve simplicity. Protected by state_lock.
+ * @need_suspend: when true, a suspend has been requested while a resume is
+ * in progress. Resume worker should queue a suspend.
+ * @need_resume: when true, a resume has been requested while a suspend is
+ * in progress. Suspend worker should queue a resume.
+ * @forced_suspend: when true, the suspend of vinstr needs to take place
+ * regardless of the kernel/user space clients attached
+ * to it. In particular, this flag is set when the suspend
+ * of vinstr is requested on entering protected mode or at
+ * the time of device suspend.
*/
struct kbase_vinstr_context {
struct mutex lock;
struct kbase_device *kbdev;
struct kbase_context *kctx;
- struct kbase_vmap_struct vmap;
+ struct kbase_vmap_struct *vmap;
u64 gpu_va;
void *cpu_va;
size_t dump_size;
@@ -130,6 +143,7 @@ struct kbase_vinstr_context {
struct work_struct resume_work;
u32 nclients;
+ u32 nclients_suspended;
struct list_head waiting_clients;
struct list_head idle_clients;
struct list_head suspended_clients;
@@ -139,6 +153,10 @@ struct kbase_vinstr_context {
atomic_t request_pending;
bool clients_present;
+
+ bool need_suspend;
+ bool need_resume;
+ bool forced_suspend;
};
/**
@@ -161,6 +179,7 @@ struct kbase_vinstr_context {
* @write_idx: index of buffer being written by dumping service
* @waitq: client's notification queue
* @pending: when true, client has attached but hwcnt not yet updated
+ * @suspended: when true, client is suspended
*/
struct kbase_vinstr_client {
struct kbase_vinstr_context *vinstr_ctx;
@@ -181,6 +200,7 @@ struct kbase_vinstr_client {
atomic_t write_idx;
wait_queue_head_t waitq;
bool pending;
+ bool suspended;
};
/**
@@ -195,6 +215,9 @@ struct kbasep_vinstr_wake_up_timer {
/*****************************************************************************/
+static void kbase_vinstr_update_suspend(
+ struct kbase_vinstr_context *vinstr_ctx);
+
static int kbasep_vinstr_service_task(void *data);
static unsigned int kbasep_vinstr_hwcnt_reader_poll(
@@ -226,14 +249,14 @@ static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
{
struct kbase_context *kctx = vinstr_ctx->kctx;
struct kbase_device *kbdev = kctx->kbdev;
- struct kbase_uk_hwcnt_setup setup;
+ struct kbase_ioctl_hwcnt_enable enable;
int err;
- setup.dump_buffer = vinstr_ctx->gpu_va;
- setup.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
- setup.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM];
- setup.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
- setup.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
+ enable.dump_buffer = vinstr_ctx->gpu_va;
+ enable.jm_bm = vinstr_ctx->bitmap[JM_HWCNT_BM];
+ enable.tiler_bm = vinstr_ctx->bitmap[TILER_HWCNT_BM];
+ enable.shader_bm = vinstr_ctx->bitmap[SHADER_HWCNT_BM];
+ enable.mmu_l2_bm = vinstr_ctx->bitmap[MMU_L2_HWCNT_BM];
/* Mark the context as active so the GPU is kept turned on */
/* A suspend won't happen here, because we're in a syscall from a
@@ -242,7 +265,7 @@ static int enable_hwcnt(struct kbase_vinstr_context *vinstr_ctx)
/* Schedule the context in */
kbasep_js_schedule_privileged_ctx(kbdev, kctx);
- err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &setup);
+ err = kbase_instr_hwcnt_enable_internal(kbdev, kctx, &enable);
if (err) {
/* Release the context. This had its own Power Manager Active
* reference */
@@ -315,9 +338,15 @@ size_t kbase_vinstr_dump_size(struct kbase_device *kbdev)
#endif /* CONFIG_MALI_NO_MALI */
{
/* assume v5 for now */
+#ifdef CONFIG_MALI_NO_MALI
+ u32 nr_l2 = KBASE_DUMMY_MODEL_MAX_MEMSYS_BLOCKS;
+ u64 core_mask =
+ (1ULL << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1;
+#else
base_gpu_props *props = &kbdev->gpu_props.props;
u32 nr_l2 = props->l2_props.num_l2_slices;
u64 core_mask = props->coherency_info.group[0].core_mask;
+#endif
u32 nr_blocks = fls64(core_mask);
/* JM and tiler counter blocks are always present */
@@ -342,7 +371,11 @@ static int kbasep_vinstr_map_kernel_dump_buffer(
struct kbase_context *kctx = vinstr_ctx->kctx;
u64 flags, nr_pages;
- flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR;
+ flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_WR |
+ BASE_MEM_PERMANENT_KERNEL_MAPPING | BASE_MEM_CACHED_CPU;
+ if (kctx->kbdev->mmu_mode->flags &
+ KBASE_MMU_MODE_HAS_NON_CACHEABLE)
+ flags |= BASE_MEM_UNCACHED_GPU;
vinstr_ctx->dump_size = kbasep_vinstr_dump_size_ctx(vinstr_ctx);
nr_pages = PFN_UP(vinstr_ctx->dump_size);
@@ -351,11 +384,9 @@ static int kbasep_vinstr_map_kernel_dump_buffer(
if (!reg)
return -ENOMEM;
- vinstr_ctx->cpu_va = kbase_vmap(
- kctx,
- vinstr_ctx->gpu_va,
- vinstr_ctx->dump_size,
- &vinstr_ctx->vmap);
+ vinstr_ctx->cpu_va = kbase_phy_alloc_mapping_get(kctx,
+ vinstr_ctx->gpu_va, &vinstr_ctx->vmap);
+
if (!vinstr_ctx->cpu_va) {
kbase_mem_free(kctx, vinstr_ctx->gpu_va);
return -ENOMEM;
@@ -369,7 +400,7 @@ static void kbasep_vinstr_unmap_kernel_dump_buffer(
{
struct kbase_context *kctx = vinstr_ctx->kctx;
- kbase_vunmap(kctx, &vinstr_ctx->vmap);
+ kbase_phy_alloc_mapping_put(kctx, vinstr_ctx->vmap);
kbase_mem_free(kctx, vinstr_ctx->gpu_va);
}
@@ -472,18 +503,24 @@ static void kbasep_vinstr_destroy_kctx(struct kbase_vinstr_context *vinstr_ctx)
struct kbasep_kctx_list_element *element;
struct kbasep_kctx_list_element *tmp;
bool found = false;
+ bool hwcnt_disabled = false;
unsigned long flags;
/* Release hw counters dumping resources. */
vinstr_ctx->thread = NULL;
- disable_hwcnt(vinstr_ctx);
- kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
/* Simplify state transitions by specifying that we have no clients. */
spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
vinstr_ctx->clients_present = false;
+ if ((VINSTR_SUSPENDED == vinstr_ctx->state) || (VINSTR_RESUMING == vinstr_ctx->state))
+ hwcnt_disabled = true;
spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ if (!hwcnt_disabled)
+ disable_hwcnt(vinstr_ctx);
+
+ kbasep_vinstr_unmap_kernel_dump_buffer(vinstr_ctx);
+
/* Remove kernel context from the device's contexts list. */
mutex_lock(&kbdev->kctx_list_lock);
list_for_each_entry_safe(element, tmp, &kbdev->kctx_list, link) {
@@ -523,6 +560,8 @@ static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
{
struct task_struct *thread = NULL;
struct kbase_vinstr_client *cli;
+ unsigned long flags;
+ bool clients_present = false;
KBASE_DEBUG_ASSERT(vinstr_ctx);
@@ -548,10 +587,14 @@ static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
hwcnt_bitmap_union(vinstr_ctx->bitmap, cli->bitmap);
vinstr_ctx->reprogram = true;
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ clients_present = (vinstr_ctx->nclients || vinstr_ctx->nclients_suspended);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
/* If this is the first client, create the vinstr kbase
* context. This context is permanently resident until the
* last client exits. */
- if (!vinstr_ctx->nclients) {
+ if (!clients_present) {
hwcnt_bitmap_set(vinstr_ctx->bitmap, cli->bitmap);
if (kbasep_vinstr_create_kctx(vinstr_ctx) < 0)
goto error;
@@ -606,8 +649,11 @@ static struct kbase_vinstr_client *kbasep_vinstr_attach_client(
atomic_set(&cli->write_idx, 0);
init_waitqueue_head(&cli->waitq);
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
vinstr_ctx->nclients++;
list_add(&cli->list, &vinstr_ctx->idle_clients);
+ kbase_vinstr_update_suspend(vinstr_ctx);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
mutex_unlock(&vinstr_ctx->lock);
@@ -620,7 +666,7 @@ error:
(unsigned long)cli->dump_buffers,
get_order(cli->dump_size * cli->buffer_count));
kfree(cli->accum_buffer);
- if (!vinstr_ctx->nclients && vinstr_ctx->kctx) {
+ if (!clients_present && vinstr_ctx->kctx) {
thread = vinstr_ctx->thread;
kbasep_vinstr_destroy_kctx(vinstr_ctx);
}
@@ -642,18 +688,19 @@ void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
struct task_struct *thread = NULL;
u32 zerobitmap[4] = { 0 };
int cli_found = 0;
+ unsigned long flags;
+ bool clients_present;
KBASE_DEBUG_ASSERT(cli);
vinstr_ctx = cli->vinstr_ctx;
KBASE_DEBUG_ASSERT(vinstr_ctx);
mutex_lock(&vinstr_ctx->lock);
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
list_for_each_entry_safe(iter, tmp, &vinstr_ctx->idle_clients, list) {
if (iter == cli) {
- vinstr_ctx->reprogram = true;
cli_found = 1;
- list_del(&iter->list);
break;
}
}
@@ -661,15 +708,47 @@ void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
list_for_each_entry_safe(
iter, tmp, &vinstr_ctx->waiting_clients, list) {
if (iter == cli) {
- vinstr_ctx->reprogram = true;
cli_found = 1;
- list_del(&iter->list);
+ break;
+ }
+ }
+ }
+ if (!cli_found) {
+ list_for_each_entry_safe(
+ iter, tmp, &vinstr_ctx->suspended_clients, list) {
+ if (iter == cli) {
+ cli_found = 1;
break;
}
}
}
KBASE_DEBUG_ASSERT(cli_found);
+ if (cli_found) {
+ vinstr_ctx->reprogram = true;
+ list_del(&iter->list);
+ }
+
+ if (!cli->suspended)
+ vinstr_ctx->nclients--;
+ else
+ vinstr_ctx->nclients_suspended--;
+
+ kbase_vinstr_update_suspend(vinstr_ctx);
+
+ clients_present = (vinstr_ctx->nclients || vinstr_ctx->nclients_suspended);
+
+ /* Rebuild context bitmap now that the client has detached */
+ hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap);
+ list_for_each_entry(iter, &vinstr_ctx->idle_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+ list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+ list_for_each_entry(iter, &vinstr_ctx->suspended_clients, list)
+ hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
+
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
kfree(cli->dump_buffers_meta);
free_pages(
(unsigned long)cli->dump_buffers,
@@ -677,19 +756,11 @@ void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli)
kfree(cli->accum_buffer);
kfree(cli);
- vinstr_ctx->nclients--;
- if (!vinstr_ctx->nclients) {
+ if (!clients_present) {
thread = vinstr_ctx->thread;
kbasep_vinstr_destroy_kctx(vinstr_ctx);
}
- /* Rebuild context bitmap now that the client has detached */
- hwcnt_bitmap_set(vinstr_ctx->bitmap, zerobitmap);
- list_for_each_entry(iter, &vinstr_ctx->idle_clients, list)
- hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
- list_for_each_entry(iter, &vinstr_ctx->waiting_clients, list)
- hwcnt_bitmap_union(vinstr_ctx->bitmap, iter->bitmap);
-
mutex_unlock(&vinstr_ctx->lock);
/* Thread must be stopped after lock is released. */
@@ -977,9 +1048,18 @@ static int kbasep_vinstr_collect_and_accumulate(
rcode = kbase_instr_hwcnt_wait_for_dump(vinstr_ctx->kctx);
WARN_ON(rcode);
+ if (!rcode) {
+ /* Invalidate the kernel buffer before reading from it.
+ * As the vinstr_ctx->lock is already held by the caller, the
+ * unmap of kernel buffer cannot take place simultaneously.
+ */
+ lockdep_assert_held(&vinstr_ctx->lock);
+ kbase_sync_mem_regions(vinstr_ctx->kctx, vinstr_ctx->vmap,
+ KBASE_SYNC_TO_CPU);
+ }
+
spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
- switch (vinstr_ctx->state)
- {
+ switch (vinstr_ctx->state) {
case VINSTR_SUSPENDING:
schedule_work(&vinstr_ctx->suspend_work);
break;
@@ -990,12 +1070,13 @@ static int kbasep_vinstr_collect_and_accumulate(
default:
break;
}
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
/* Accumulate values of collected counters. */
if (!rcode)
accum_clients(vinstr_ctx);
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
return rcode;
}
@@ -1100,6 +1181,7 @@ static void kbasep_vinstr_reprogram(
if (!reprogram_hwcnt(vinstr_ctx)) {
vinstr_ctx->reprogram = false;
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
list_for_each_entry(
iter,
&vinstr_ctx->idle_clients,
@@ -1110,6 +1192,7 @@ static void kbasep_vinstr_reprogram(
&vinstr_ctx->waiting_clients,
list)
iter->pending = false;
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
}
}
}
@@ -1128,6 +1211,7 @@ static int kbasep_vinstr_update_client(
enum base_hwcnt_reader_event event_id)
{
int rcode = 0;
+ unsigned long flags;
/* Copy collected counters to user readable buffer. */
if (cli->buffer_count)
@@ -1138,18 +1222,23 @@ static int kbasep_vinstr_update_client(
else
rcode = kbasep_vinstr_fill_dump_buffer_legacy(cli);
+ /* Prepare for next request. */
+ memset(cli->accum_buffer, 0, cli->dump_size);
+
+ spin_lock_irqsave(&cli->vinstr_ctx->state_lock, flags);
+ /* Check if client was put to suspend state while it was being updated */
+ if (cli->suspended)
+ rcode = -EINVAL;
+ spin_unlock_irqrestore(&cli->vinstr_ctx->state_lock, flags);
+
if (rcode)
goto exit;
-
/* Notify client. Make sure all changes to memory are visible. */
wmb();
atomic_inc(&cli->write_idx);
wake_up_interruptible(&cli->waitq);
- /* Prepare for next request. */
- memset(cli->accum_buffer, 0, cli->dump_size);
-
exit:
return rcode;
}
@@ -1208,6 +1297,7 @@ static int kbasep_vinstr_service_task(void *data)
struct kbase_vinstr_client *cli = NULL;
struct kbase_vinstr_client *tmp;
int rcode;
+ unsigned long flags;
u64 timestamp = kbasep_vinstr_get_timestamp();
u64 dump_time = 0;
@@ -1220,6 +1310,7 @@ static int kbasep_vinstr_service_task(void *data)
if (current == vinstr_ctx->thread) {
atomic_set(&vinstr_ctx->request_pending, 0);
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
if (!list_empty(&vinstr_ctx->waiting_clients)) {
cli = list_first_entry(
&vinstr_ctx->waiting_clients,
@@ -1227,6 +1318,7 @@ static int kbasep_vinstr_service_task(void *data)
list);
dump_time = cli->dump_time;
}
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
}
if (!cli || ((s64)timestamp - (s64)dump_time < 0ll)) {
@@ -1255,6 +1347,7 @@ static int kbasep_vinstr_service_task(void *data)
INIT_LIST_HEAD(&expired_requests);
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
/* Find all expired requests. */
list_for_each_entry_safe(
cli,
@@ -1273,18 +1366,29 @@ static int kbasep_vinstr_service_task(void *data)
}
/* Fill data for each request found. */
- list_for_each_entry_safe(cli, tmp, &expired_requests, list) {
+ while (!list_empty(&expired_requests)) {
+ cli = list_first_entry(&expired_requests,
+ struct kbase_vinstr_client, list);
+
/* Ensure that legacy buffer will not be used from
* this kthread context. */
BUG_ON(0 == cli->buffer_count);
/* Expect only periodically sampled clients. */
BUG_ON(0 == cli->dump_interval);
+ /* Release the spinlock, as filling the data in client's
+ * userspace buffer could result in page faults. */
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
if (!rcode)
kbasep_vinstr_update_client(
cli,
timestamp,
BASE_HWCNT_READER_EVENT_PERIODIC);
+ spin_lock_irqsave(&cli->vinstr_ctx->state_lock, flags);
+
+ /* This client got suspended, move to the next one. */
+ if (cli->suspended)
+ continue;
/* Set new dumping time. Drop missed probing times. */
do {
@@ -1296,6 +1400,7 @@ static int kbasep_vinstr_service_task(void *data)
cli,
&vinstr_ctx->waiting_clients);
}
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
/* Reprogram counters set if required. */
kbasep_vinstr_reprogram(vinstr_ctx);
@@ -1410,10 +1515,18 @@ static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
struct kbase_vinstr_client *cli, u32 interval)
{
struct kbase_vinstr_context *vinstr_ctx = cli->vinstr_ctx;
+ unsigned long flags;
KBASE_DEBUG_ASSERT(vinstr_ctx);
mutex_lock(&vinstr_ctx->lock);
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+
+ if (cli->suspended) {
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+ mutex_unlock(&vinstr_ctx->lock);
+ return -ENOMEM;
+ }
list_del(&cli->list);
@@ -1435,6 +1548,7 @@ static long kbasep_vinstr_hwcnt_reader_ioctl_set_interval(
list_add(&cli->list, &vinstr_ctx->idle_clients);
}
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
mutex_unlock(&vinstr_ctx->lock);
return 0;
@@ -1739,17 +1853,29 @@ static void kbasep_vinstr_suspend_worker(struct work_struct *data)
spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
vinstr_ctx->state = VINSTR_SUSPENDED;
wake_up_all(&vinstr_ctx->suspend_waitq);
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- mutex_unlock(&vinstr_ctx->lock);
+ if (vinstr_ctx->need_resume) {
+ vinstr_ctx->need_resume = false;
+ vinstr_ctx->state = VINSTR_RESUMING;
+ schedule_work(&vinstr_ctx->resume_work);
+
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+ } else {
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
- /* Kick GPU scheduler to allow entering protected mode.
- * This must happen after vinstr was suspended. */
- kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+ /* Kick GPU scheduler to allow entering protected mode.
+ * This must happen after vinstr was suspended.
+ */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+ }
}
/**
- * kbasep_vinstr_suspend_worker - worker resuming vinstr module
+ * kbasep_vinstr_resume_worker - worker resuming vinstr module
* @data: pointer to work structure
*/
static void kbasep_vinstr_resume_worker(struct work_struct *data)
@@ -1768,15 +1894,27 @@ static void kbasep_vinstr_resume_worker(struct work_struct *data)
spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
vinstr_ctx->state = VINSTR_IDLE;
wake_up_all(&vinstr_ctx->suspend_waitq);
- spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- mutex_unlock(&vinstr_ctx->lock);
+ if (vinstr_ctx->need_suspend) {
+ vinstr_ctx->need_suspend = false;
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ schedule_work(&vinstr_ctx->suspend_work);
+
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ mutex_unlock(&vinstr_ctx->lock);
+ } else {
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
- /* Kick GPU scheduler to allow entering protected mode.
- * Note that scheduler state machine might requested re-entry to
- * protected mode before vinstr was resumed.
- * This must happen after vinstr was release. */
- kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+ mutex_unlock(&vinstr_ctx->lock);
+
+ /* Kick GPU scheduler to allow entering protected mode.
+ * Note that scheduler state machine might requested re-entry to
+ * protected mode before vinstr was resumed.
+ * This must happen after vinstr was release.
+ */
+ kbasep_vinstr_kick_scheduler(vinstr_ctx->kbdev);
+ }
}
/*****************************************************************************/
@@ -1791,6 +1929,7 @@ struct kbase_vinstr_context *kbase_vinstr_init(struct kbase_device *kbdev)
INIT_LIST_HEAD(&vinstr_ctx->idle_clients);
INIT_LIST_HEAD(&vinstr_ctx->waiting_clients);
+ INIT_LIST_HEAD(&vinstr_ctx->suspended_clients);
mutex_init(&vinstr_ctx->lock);
spin_lock_init(&vinstr_ctx->state_lock);
vinstr_ctx->kbdev = kbdev;
@@ -1824,27 +1963,35 @@ void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx)
if (list_empty(list)) {
list = &vinstr_ctx->waiting_clients;
- if (list_empty(list))
- break;
+ if (list_empty(list)) {
+ list = &vinstr_ctx->suspended_clients;
+ if (list_empty(list))
+ break;
+ }
}
cli = list_first_entry(list, struct kbase_vinstr_client, list);
list_del(&cli->list);
+ if (!cli->suspended)
+ vinstr_ctx->nclients--;
+ else
+ vinstr_ctx->nclients_suspended--;
kfree(cli->accum_buffer);
kfree(cli);
- vinstr_ctx->nclients--;
}
KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients);
+ KBASE_DEBUG_ASSERT(!vinstr_ctx->nclients_suspended);
if (vinstr_ctx->kctx)
kbasep_vinstr_destroy_kctx(vinstr_ctx);
kfree(vinstr_ctx);
}
int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx,
- struct kbase_uk_hwcnt_reader_setup *setup)
+ struct kbase_ioctl_hwcnt_reader_setup *setup)
{
struct kbase_vinstr_client *cli;
u32 bitmap[4];
+ int fd;
KBASE_DEBUG_ASSERT(vinstr_ctx);
KBASE_DEBUG_ASSERT(setup);
@@ -1859,31 +2006,32 @@ int kbase_vinstr_hwcnt_reader_setup(struct kbase_vinstr_context *vinstr_ctx,
vinstr_ctx,
setup->buffer_count,
bitmap,
- &setup->fd,
+ &fd,
NULL);
if (!cli)
return -ENOMEM;
- return 0;
+ kbase_vinstr_wait_for_ready(vinstr_ctx);
+ return fd;
}
int kbase_vinstr_legacy_hwc_setup(
struct kbase_vinstr_context *vinstr_ctx,
struct kbase_vinstr_client **cli,
- struct kbase_uk_hwcnt_setup *setup)
+ struct kbase_ioctl_hwcnt_enable *enable)
{
KBASE_DEBUG_ASSERT(vinstr_ctx);
- KBASE_DEBUG_ASSERT(setup);
+ KBASE_DEBUG_ASSERT(enable);
KBASE_DEBUG_ASSERT(cli);
- if (setup->dump_buffer) {
+ if (enable->dump_buffer) {
u32 bitmap[4];
- bitmap[SHADER_HWCNT_BM] = setup->shader_bm;
- bitmap[TILER_HWCNT_BM] = setup->tiler_bm;
- bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
- bitmap[JM_HWCNT_BM] = setup->jm_bm;
+ bitmap[SHADER_HWCNT_BM] = enable->shader_bm;
+ bitmap[TILER_HWCNT_BM] = enable->tiler_bm;
+ bitmap[MMU_L2_HWCNT_BM] = enable->mmu_l2_bm;
+ bitmap[JM_HWCNT_BM] = enable->jm_bm;
if (*cli)
return -EBUSY;
@@ -1892,11 +2040,13 @@ int kbase_vinstr_legacy_hwc_setup(
vinstr_ctx,
0,
bitmap,
- (void *)(long)setup->dump_buffer,
+ (void *)(uintptr_t)enable->dump_buffer,
NULL);
if (!(*cli))
return -ENOMEM;
+
+ kbase_vinstr_wait_for_ready(vinstr_ctx);
} else {
if (!*cli)
return -EINVAL;
@@ -1910,9 +2060,10 @@ int kbase_vinstr_legacy_hwc_setup(
struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
struct kbase_vinstr_context *vinstr_ctx,
- struct kbase_uk_hwcnt_reader_setup *setup,
+ struct kbase_ioctl_hwcnt_reader_setup *setup,
void *kernel_buffer)
{
+ struct kbase_vinstr_client *kernel_client;
u32 bitmap[4];
if (!vinstr_ctx || !setup || !kernel_buffer)
@@ -1923,12 +2074,17 @@ struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
bitmap[MMU_L2_HWCNT_BM] = setup->mmu_l2_bm;
bitmap[JM_HWCNT_BM] = setup->jm_bm;
- return kbasep_vinstr_attach_client(
- vinstr_ctx,
- 0,
- bitmap,
- NULL,
- kernel_buffer);
+ kernel_client = kbasep_vinstr_attach_client(
+ vinstr_ctx,
+ 0,
+ bitmap,
+ NULL,
+ kernel_buffer);
+
+ if (kernel_client)
+ kbase_vinstr_wait_for_ready(vinstr_ctx);
+
+ return kernel_client;
}
KBASE_EXPORT_TEST_API(kbase_vinstr_hwcnt_kernel_setup);
@@ -2010,6 +2166,7 @@ int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx)
KBASE_DEBUG_ASSERT(vinstr_ctx);
spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ vinstr_ctx->forced_suspend = true;
switch (vinstr_ctx->state) {
case VINSTR_SUSPENDED:
vinstr_ctx->suspend_cnt++;
@@ -2036,13 +2193,42 @@ int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx)
vinstr_ctx->state = VINSTR_SUSPENDING;
break;
+ case VINSTR_RESUMING:
+ vinstr_ctx->need_suspend = true;
+ break;
+
case VINSTR_SUSPENDING:
- /* fall through */
+ break;
+
+ default:
+ KBASE_DEBUG_ASSERT(0);
+ break;
+ }
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+
+ return ret;
+}
+
+static int kbase_vinstr_is_ready(struct kbase_vinstr_context *vinstr_ctx)
+{
+ unsigned long flags;
+ int ret = -EAGAIN;
+
+ KBASE_DEBUG_ASSERT(vinstr_ctx);
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+ switch (vinstr_ctx->state) {
+ case VINSTR_SUSPENDED:
case VINSTR_RESUMING:
+ case VINSTR_SUSPENDING:
break;
+ case VINSTR_IDLE:
+ case VINSTR_DUMPING:
+ ret = 0;
+ break;
default:
- BUG();
+ KBASE_DEBUG_ASSERT(0);
break;
}
spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
@@ -2056,6 +2242,58 @@ void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx)
(0 == kbase_vinstr_try_suspend(vinstr_ctx)));
}
+void kbase_vinstr_wait_for_ready(struct kbase_vinstr_context *vinstr_ctx)
+{
+ wait_event(vinstr_ctx->suspend_waitq,
+ (0 == kbase_vinstr_is_ready(vinstr_ctx)));
+}
+KBASE_EXPORT_TEST_API(kbase_vinstr_wait_for_ready);
+
+/**
+ * kbase_vinstr_update_suspend - Update vinstr suspend/resume status depending
+ * on nclients
+ * @vinstr_ctx: vinstr context pointer
+ *
+ * This function should be called whenever vinstr_ctx->nclients changes. This
+ * may cause vinstr to be suspended or resumed, depending on the number of
+ * clients and whether IPA is suspended or not.
+ */
+static void kbase_vinstr_update_suspend(struct kbase_vinstr_context *vinstr_ctx)
+{
+ lockdep_assert_held(&vinstr_ctx->state_lock);
+
+ switch (vinstr_ctx->state) {
+ case VINSTR_SUSPENDED:
+ if ((vinstr_ctx->nclients) && (0 == vinstr_ctx->suspend_cnt)) {
+ vinstr_ctx->state = VINSTR_RESUMING;
+ schedule_work(&vinstr_ctx->resume_work);
+ }
+ break;
+
+ case VINSTR_SUSPENDING:
+ if ((vinstr_ctx->nclients) && (!vinstr_ctx->forced_suspend))
+ vinstr_ctx->need_resume = true;
+ break;
+
+ case VINSTR_IDLE:
+ if (!vinstr_ctx->nclients) {
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ schedule_work(&vinstr_ctx->suspend_work);
+ }
+ break;
+
+ case VINSTR_DUMPING:
+ if (!vinstr_ctx->nclients)
+ vinstr_ctx->state = VINSTR_SUSPENDING;
+ break;
+
+ case VINSTR_RESUMING:
+ if (!vinstr_ctx->nclients)
+ vinstr_ctx->need_suspend = true;
+ break;
+ }
+}
+
void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
{
unsigned long flags;
@@ -2068,6 +2306,7 @@ void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
BUG_ON(0 == vinstr_ctx->suspend_cnt);
vinstr_ctx->suspend_cnt--;
if (0 == vinstr_ctx->suspend_cnt) {
+ vinstr_ctx->forced_suspend = false;
if (vinstr_ctx->clients_present) {
vinstr_ctx->state = VINSTR_RESUMING;
schedule_work(&vinstr_ctx->resume_work);
@@ -2078,3 +2317,45 @@ void kbase_vinstr_resume(struct kbase_vinstr_context *vinstr_ctx)
}
spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
}
+
+void kbase_vinstr_suspend_client(struct kbase_vinstr_client *client)
+{
+ struct kbase_vinstr_context *vinstr_ctx = client->vinstr_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+
+ if (!client->suspended) {
+ list_del(&client->list);
+ list_add(&client->list, &vinstr_ctx->suspended_clients);
+
+ vinstr_ctx->nclients--;
+ vinstr_ctx->nclients_suspended++;
+ kbase_vinstr_update_suspend(vinstr_ctx);
+
+ client->suspended = true;
+ }
+
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+}
+
+void kbase_vinstr_resume_client(struct kbase_vinstr_client *client)
+{
+ struct kbase_vinstr_context *vinstr_ctx = client->vinstr_ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vinstr_ctx->state_lock, flags);
+
+ if (client->suspended) {
+ list_del(&client->list);
+ list_add(&client->list, &vinstr_ctx->idle_clients);
+
+ vinstr_ctx->nclients++;
+ vinstr_ctx->nclients_suspended--;
+ kbase_vinstr_update_suspend(vinstr_ctx);
+
+ client->suspended = false;
+ }
+
+ spin_unlock_irqrestore(&vinstr_ctx->state_lock, flags);
+}
diff --git a/drivers/gpu/arm/midgard/mali_kbase_vinstr.h b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
index af7c7b68aa268f..d32799f74084d7 100644
--- a/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
+++ b/drivers/gpu/arm/midgard/mali_kbase_vinstr.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2015-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -24,35 +24,13 @@
#define _KBASE_VINSTR_H_
#include <mali_kbase_hwcnt_reader.h>
+#include <mali_kbase_ioctl.h>
/*****************************************************************************/
struct kbase_vinstr_context;
struct kbase_vinstr_client;
-struct kbase_uk_hwcnt_setup {
- /* IN */
- u64 dump_buffer;
- u32 jm_bm;
- u32 shader_bm;
- u32 tiler_bm;
- u32 unused_1; /* keep for backwards compatibility */
- u32 mmu_l2_bm;
- u32 padding;
- /* OUT */
-};
-
-struct kbase_uk_hwcnt_reader_setup {
- /* IN */
- u32 buffer_count;
- u32 jm_bm;
- u32 shader_bm;
- u32 tiler_bm;
- u32 mmu_l2_bm;
-
- /* OUT */
- s32 fd;
-};
/*****************************************************************************/
/**
@@ -74,24 +52,24 @@ void kbase_vinstr_term(struct kbase_vinstr_context *vinstr_ctx);
* @vinstr_ctx: vinstr context
* @setup: reader's configuration
*
- * Return: zero on success
+ * Return: file descriptor on success and a (negative) error code otherwise
*/
int kbase_vinstr_hwcnt_reader_setup(
struct kbase_vinstr_context *vinstr_ctx,
- struct kbase_uk_hwcnt_reader_setup *setup);
+ struct kbase_ioctl_hwcnt_reader_setup *setup);
/**
* kbase_vinstr_legacy_hwc_setup - configure hw counters for dumping
* @vinstr_ctx: vinstr context
* @cli: pointer where to store pointer to new vinstr client structure
- * @setup: hwc configuration
+ * @enable: hwc configuration
*
* Return: zero on success
*/
int kbase_vinstr_legacy_hwc_setup(
struct kbase_vinstr_context *vinstr_ctx,
struct kbase_vinstr_client **cli,
- struct kbase_uk_hwcnt_setup *setup);
+ struct kbase_ioctl_hwcnt_enable *enable);
/**
* kbase_vinstr_hwcnt_kernel_setup - configure hw counters for kernel side
@@ -100,13 +78,13 @@ int kbase_vinstr_legacy_hwc_setup(
* @setup: reader's configuration
* @kernel_buffer: pointer to dump buffer
*
- * setup->buffer_count and setup->fd are not used for kernel side clients.
+ * setup->buffer_count is not used for kernel side clients.
*
* Return: pointer to client structure, or NULL on failure
*/
struct kbase_vinstr_client *kbase_vinstr_hwcnt_kernel_setup(
struct kbase_vinstr_context *vinstr_ctx,
- struct kbase_uk_hwcnt_reader_setup *setup,
+ struct kbase_ioctl_hwcnt_reader_setup *setup,
void *kernel_buffer);
/**
@@ -156,6 +134,16 @@ int kbase_vinstr_try_suspend(struct kbase_vinstr_context *vinstr_ctx);
void kbase_vinstr_suspend(struct kbase_vinstr_context *vinstr_ctx);
/**
+ * kbase_vinstr_wait_for_ready - waits for the vinstr context to get ready
+ * @vinstr_ctx: vinstr context
+ *
+ * Function waits for the vinstr to become ready for dumping. It can be in the
+ * resuming state after the client was attached but the client currently expects
+ * that vinstr is ready for dumping immediately post attach.
+ */
+void kbase_vinstr_wait_for_ready(struct kbase_vinstr_context *vinstr_ctx);
+
+/**
* kbase_vinstr_resume - resumes operation of a given vinstr context
* @vinstr_ctx: vinstr context
*
@@ -178,5 +166,17 @@ size_t kbase_vinstr_dump_size(struct kbase_device *kbdev);
*/
void kbase_vinstr_detach_client(struct kbase_vinstr_client *cli);
+/**
+ * kbase_vinstr_suspend_client - Suspend vinstr client
+ * @client: pointer to vinstr client
+ */
+void kbase_vinstr_suspend_client(struct kbase_vinstr_client *client);
+
+/**
+ * kbase_vinstr_resume_client - Resume vinstr client
+ * @client: pointer to vinstr client
+ */
+void kbase_vinstr_resume_client(struct kbase_vinstr_client *client);
+
#endif /* _KBASE_VINSTR_H_ */
diff --git a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
index da2ffaffccc733..920562e40603ee 100644
--- a/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
+++ b/drivers/gpu/arm/midgard/mali_linux_kbase_trace.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2014 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2014,2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -176,11 +176,10 @@ DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_TILER);
DEFINE_MALI_ADD_EVENT(PM_CORES_POWERED_L2);
DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED);
DEFINE_MALI_ADD_EVENT(PM_DESIRED_REACHED_TILER);
-DEFINE_MALI_ADD_EVENT(PM_UNREQUEST_CHANGE_SHADER_NEEDED);
DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_SHADER_NEEDED);
-DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_NEEDED);
-DEFINE_MALI_ADD_EVENT(PM_REGISTER_CHANGE_SHADER_INUSE);
-DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_INUSE);
+DEFINE_MALI_ADD_EVENT(PM_REQUEST_CHANGE_TILER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_SHADER_NEEDED);
+DEFINE_MALI_ADD_EVENT(PM_RELEASE_CHANGE_TILER_NEEDED);
DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE);
DEFINE_MALI_ADD_EVENT(PM_CORES_AVAILABLE_TILER);
DEFINE_MALI_ADD_EVENT(PM_CORES_CHANGE_AVAILABLE);
diff --git a/drivers/gpu/arm/midgard/mali_midg_regmap.h b/drivers/gpu/arm/midgard/mali_midg_regmap.h
index 5e83ee87242cf6..8d9f7b61b6d30b 100644
--- a/drivers/gpu/arm/midgard/mali_midg_regmap.h
+++ b/drivers/gpu/arm/midgard/mali_midg_regmap.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -34,8 +34,7 @@
#define GPU_CONTROL_REG(r) (GPU_CONTROL_BASE + (r))
#define GPU_ID 0x000 /* (RO) GPU and revision identifier */
#define L2_FEATURES 0x004 /* (RO) Level 2 cache features */
-#define SUSPEND_SIZE 0x008 /* (RO) Fixed-function suspend buffer
- size */
+#define CORE_FEATURES 0x008 /* (RO) Shader Core Features */
#define TILER_FEATURES 0x00C /* (RO) Tiler Features */
#define MEM_FEATURES 0x010 /* (RO) Memory system features */
#define MMU_FEATURES 0x014 /* (RO) MMU features */
@@ -46,6 +45,7 @@
#define GPU_IRQ_MASK 0x028 /* (RW) */
#define GPU_IRQ_STATUS 0x02C /* (RO) */
+
/* IRQ flags */
#define GPU_FAULT (1 << 0) /* A GPU Fault has occurred */
#define MULTIPLE_GPU_FAULTS (1 << 7) /* More than one GPU Fault occurred. */
@@ -93,6 +93,9 @@
#define THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
#define THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
#define THREAD_FEATURES 0x0AC /* (RO) Thread features */
+#define THREAD_TLS_ALLOC 0x310 /* (RO) Number of threads per core that
+ * TLS must be allocated for
+ */
#define TEXTURE_FEATURES_0 0x0B0 /* (RO) Support flags for indexed texture formats 0..31 */
#define TEXTURE_FEATURES_1 0x0B4 /* (RO) Support flags for indexed texture formats 32..63 */
@@ -213,6 +216,9 @@
#define JOB_IRQ_JS_STATE 0x010 /* status==active and _next == busy snapshot from last JOB_IRQ_CLEAR */
#define JOB_IRQ_THROTTLE 0x014 /* cycles to delay delivering an interrupt externally. The JOB_IRQ_STATUS is NOT affected by this, just the delivery of the interrupt. */
+/* JOB IRQ flags */
+#define JOB_IRQ_GLOBAL_IF (1 << 18) /* Global interface interrupt received */
+
#define JOB_SLOT0 0x800 /* Configuration registers for job slot 0 */
#define JOB_SLOT1 0x880 /* Configuration registers for job slot 1 */
#define JOB_SLOT2 0x900 /* Configuration registers for job slot 2 */
@@ -495,7 +501,7 @@
#define PRFCNT_CONFIG_MODE_MANUAL 1 /* The performance counters are enabled, but are only written out when a PRFCNT_SAMPLE command is issued using the GPU_COMMAND register. */
#define PRFCNT_CONFIG_MODE_TILE 2 /* The performance counters are enabled, and are written out each time a tile finishes rendering. */
-/* AS<n>_MEMATTR values: */
+/* AS<n>_MEMATTR values from MMU_MEMATTR_STAGE1: */
/* Use GPU implementation-defined caching policy. */
#define AS_MEMATTR_IMPL_DEF_CACHE_POLICY 0x88ull
/* The attribute set to force all resources to be cached. */
@@ -507,6 +513,12 @@
#define AS_MEMATTR_AARCH64_OUTER_IMPL_DEF 0x88ull
/* Set to write back memory, outer caching */
#define AS_MEMATTR_AARCH64_OUTER_WA 0x8Dull
+/* Set to inner non-cacheable, outer-non-cacheable
+ * Setting defined by the alloc bits is ignored, but set to a valid encoding:
+ * - no-alloc on read
+ * - no alloc on write
+ */
+#define AS_MEMATTR_AARCH64_NON_CACHEABLE 0x4Cull
/* Use GPU implementation-defined caching policy. */
#define AS_MEMATTR_LPAE_IMPL_DEF_CACHE_POLICY 0x48ull
@@ -518,6 +530,11 @@
#define AS_MEMATTR_LPAE_OUTER_IMPL_DEF 0x88ull
/* Set to write back memory, outer caching */
#define AS_MEMATTR_LPAE_OUTER_WA 0x8Dull
+/* There is no LPAE support for non-cacheable, since the memory type is always
+ * write-back.
+ * Marking this setting as reserved for LPAE
+ */
+#define AS_MEMATTR_LPAE_NON_CACHEABLE_RESERVED
/* Symbols for default MEMATTR to use
* Default is - HW implementation defined caching */
@@ -534,6 +551,8 @@
#define AS_MEMATTR_INDEX_OUTER_IMPL_DEF 3
/* Outer coherent, write alloc inner */
#define AS_MEMATTR_INDEX_OUTER_WA 4
+/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
+#define AS_MEMATTR_INDEX_NON_CACHEABLE 5
/* JS<n>_FEATURES register */
diff --git a/drivers/gpu/arm/midgard/mali_timeline.h b/drivers/gpu/arm/midgard/mali_timeline.h
deleted file mode 100644
index d0deeadf479fad..00000000000000
--- a/drivers/gpu/arm/midgard/mali_timeline.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- *
- * (C) COPYRIGHT 2012-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, you can access it online at
- * http://www.gnu.org/licenses/gpl-2.0.html.
- *
- * SPDX-License-Identifier: GPL-2.0
- *
- */
-
-
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM mali_timeline
-
-#if !defined(_MALI_TIMELINE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _MALI_TIMELINE_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(mali_timeline_atoms_in_flight,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int tgid,
- int count),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- tgid,
- count),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, tgid)
- __field(int, count)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->tgid = tgid;
- __entry->count = count;
- ),
-
- TP_printk("%i,%i.%.9i,%i,%i", CTX_SET_NR_ATOMS_IN_FLIGHT,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->tgid,
- __entry->count)
-);
-
-
-TRACE_EVENT(mali_timeline_atom,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int tgid,
- int atom_id),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- tgid,
- atom_id),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, tgid)
- __field(int, atom_id)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->tgid = tgid;
- __entry->atom_id = atom_id;
- ),
-
- TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->tgid,
- __entry->atom_id,
- __entry->atom_id)
-);
-
-TRACE_EVENT(mali_timeline_gpu_slot_active,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int tgid,
- int js,
- int count),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- tgid,
- js,
- count),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, tgid)
- __field(int, js)
- __field(int, count)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->tgid = tgid;
- __entry->js = js;
- __entry->count = count;
- ),
-
- TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->tgid,
- __entry->js,
- __entry->count)
-);
-
-TRACE_EVENT(mali_timeline_gpu_slot_action,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int tgid,
- int js,
- int count),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- tgid,
- js,
- count),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, tgid)
- __field(int, js)
- __field(int, count)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->tgid = tgid;
- __entry->js = js;
- __entry->count = count;
- ),
-
- TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->tgid,
- __entry->js,
- __entry->count)
-);
-
-TRACE_EVENT(mali_timeline_gpu_power_active,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int active),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- active),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, active)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->active = active;
- ),
-
- TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->active)
-
-);
-
-TRACE_EVENT(mali_timeline_l2_power_active,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int state),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- state),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, state)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->state = state;
- ),
-
- TP_printk("%i,%i.%.9i,0,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->state)
-
-);
-TRACE_EVENT(mali_timeline_pm_event,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int pm_event_type,
- unsigned int pm_event_id),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- pm_event_type,
- pm_event_id),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, pm_event_type)
- __field(unsigned int, pm_event_id)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->pm_event_type = pm_event_type;
- __entry->pm_event_id = pm_event_id;
- ),
-
- TP_printk("%i,%i.%.9i,0,%i,%u", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->pm_event_type, __entry->pm_event_id)
-
-);
-
-TRACE_EVENT(mali_timeline_slot_atom,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int event_type,
- int tgid,
- int js,
- int atom_id),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- event_type,
- tgid,
- js,
- atom_id),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, event_type)
- __field(int, tgid)
- __field(int, js)
- __field(int, atom_id)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->event_type = event_type;
- __entry->tgid = tgid;
- __entry->js = js;
- __entry->atom_id = atom_id;
- ),
-
- TP_printk("%i,%i.%.9i,%i,%i,%i", __entry->event_type,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->tgid,
- __entry->js,
- __entry->atom_id)
-);
-
-TRACE_EVENT(mali_timeline_pm_checktrans,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int trans_code,
- int trans_id),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- trans_code,
- trans_id),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, trans_code)
- __field(int, trans_id)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->trans_code = trans_code;
- __entry->trans_id = trans_id;
- ),
-
- TP_printk("%i,%i.%.9i,0,%i", __entry->trans_code,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->trans_id)
-
-);
-
-TRACE_EVENT(mali_timeline_context_active,
-
- TP_PROTO(u64 ts_sec,
- u32 ts_nsec,
- int count),
-
- TP_ARGS(ts_sec,
- ts_nsec,
- count),
-
- TP_STRUCT__entry(
- __field(u64, ts_sec)
- __field(u32, ts_nsec)
- __field(int, count)
- ),
-
- TP_fast_assign(
- __entry->ts_sec = ts_sec;
- __entry->ts_nsec = ts_nsec;
- __entry->count = count;
- ),
-
- TP_printk("%i,%i.%.9i,0,%i", SW_SET_CONTEXT_ACTIVE,
- (int)__entry->ts_sec,
- (int)__entry->ts_nsec,
- __entry->count)
-);
-
-#endif /* _MALI_TIMELINE_H */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-
-/* This part must be outside protection */
-#include <trace/define_trace.h>
-
diff --git a/drivers/gpu/arm/midgard/mali_uk.h b/drivers/gpu/arm/midgard/mali_uk.h
index 961a4a5c63ebf7..c81f404de26a1e 100644
--- a/drivers/gpu/arm/midgard/mali_uk.h
+++ b/drivers/gpu/arm/midgard/mali_uk.h
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2010, 2012-2015 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010, 2012-2015, 2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -44,7 +44,7 @@ extern "C" {
* @defgroup uk_api User-Kernel Interface API
*
* The User-Kernel Interface abstracts the communication mechanism between the user and kernel-side code of device
- * drivers developed as part of the Midgard DDK. Currently that includes the Base driver and the UMP driver.
+ * drivers developed as part of the Midgard DDK. Currently that includes the Base driver.
*
* It exposes an OS independent API to user-side code (UKU) which routes functions calls to an OS-independent
* kernel-side API (UKK) via an OS-specific communication mechanism.
diff --git a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
index 85cabf4ff08fcb..4f84c04121c904 100755..100644
--- a/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
+++ b/drivers/gpu/arm/midgard/platform/rk/mali_kbase_config_rk.c
@@ -170,7 +170,6 @@ static int rk_pm_callback_power_on(struct kbase_device *kbdev)
}
platform->is_powered = true;
- KBASE_TIMELINE_GPU_POWER(kbdev, 1);
return ret;
}
@@ -187,7 +186,6 @@ static void rk_pm_callback_power_off(struct kbase_device *kbdev)
dev_dbg(kbdev->dev, "powering off\n");
platform->is_powered = false;
- KBASE_TIMELINE_GPU_POWER(kbdev, 0);
rk_pm_disable_clk(kbdev);
diff --git a/drivers/gpu/arm/midgard/sconscript b/drivers/gpu/arm/midgard/sconscript
index eae28f4011e9f1..01c75895611f19 100644
--- a/drivers/gpu/arm/midgard/sconscript
+++ b/drivers/gpu/arm/midgard/sconscript
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2010-2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -47,22 +47,18 @@ if Glob('#kernel/drivers/gpu/arm/midgard/tests/internal/src/mock') and env['unit
make_args = env.kernel_get_config_defines(ret_list = True) + [
'PLATFORM=%s' % env['platform'],
- 'MALI_ERROR_INJECT_ON=%s' % env['error_inject'],
'MALI_KERNEL_TEST_API=%s' % env['debug'],
'MALI_UNIT_TEST=%s' % env['unit'],
'MALI_RELEASE_NAME=%s' % env['mali_release_name'],
'MALI_MOCK_TEST=%s' % mock_test,
'MALI_CUSTOMER_RELEASE=%s' % env['release'],
+ 'MALI_USE_CSF=%s' % env['csf'],
'MALI_COVERAGE=%s' % env['coverage'],
]
kbase = env.BuildKernelModule('$STATIC_LIB_PATH/mali_kbase.ko', kbase_src,
make_args = make_args)
-# need Module.symvers from ump.ko build
-if int(env['ump']) == 1:
- env.Depends(kbase, '$STATIC_LIB_PATH/ump.ko')
-
if 'smc_protected_mode_switcher' in env:
env.Depends('$STATIC_LIB_PATH/mali_kbase.ko', '$STATIC_LIB_PATH/smc_protected_mode_switcher.ko')
diff --git a/drivers/gpu/arm/midgard/tests/Mconfig b/drivers/gpu/arm/midgard/tests/Mconfig
new file mode 100644
index 00000000000000..ddd76305dbb87e
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/Mconfig
@@ -0,0 +1,27 @@
+#
+# (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the
+# GNU General Public License version 2 as published by the Free Software
+# Foundation, and any use by you of this program is subject to the terms
+# of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained
+# from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+# Boston, MA 02110-1301, USA.
+#
+
+config UNIT_TEST_KERNEL_MODULES
+ bool
+ default y if UNIT_TEST_CODE && BUILD_KERNEL_MODULES
+ default n
+
+config BUILD_IPA_TESTS
+ bool
+ default y if UNIT_TEST_KERNEL_MODULES && MALI_DEVFREQ
+ default n
+
+config BUILD_CSF_TESTS
+ bool
+ default y if UNIT_TEST_KERNEL_MODULES && GPU_HAS_CSF
+ default n
diff --git a/drivers/gpu/arm/midgard/tests/build.bp b/drivers/gpu/arm/midgard/tests/build.bp
new file mode 100644
index 00000000000000..3107062d6610ff
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/build.bp
@@ -0,0 +1,36 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ * (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_defaults {
+ name: "kernel_test_module_defaults",
+ defaults: ["mali_kbase_shared_config_defaults"],
+ include_dirs: [
+ "kernel/drivers/gpu/arm",
+ "kernel/drivers/gpu/arm/midgard",
+ "kernel/drivers/gpu/arm/midgard/backend/gpu",
+ "kernel/drivers/gpu/arm/midgard/tests/include",
+ ],
+}
+
+subdirs = [
+ "kutf",
+ "mali_kutf_irq_test",
+]
+
+optional_subdirs = [
+ "kutf_test",
+ "kutf_test_runner",
+ "mali_kutf_ipa_test",
+ "mali_kutf_ipa_unit_test",
+ "mali_kutf_vinstr_test",
+ "mali_kutf_fw_test",
+]
diff --git a/drivers/gpu/arm/midgard/tests/kutf/build.bp b/drivers/gpu/arm/midgard/tests/kutf/build.bp
new file mode 100644
index 00000000000000..960c8faa8df954
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/kutf/build.bp
@@ -0,0 +1,31 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ * (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+ name: "kutf",
+ defaults: ["kernel_defaults"],
+ srcs: [
+ "Kbuild",
+ "kutf_helpers.c",
+ "kutf_helpers_user.c",
+ "kutf_mem.c",
+ "kutf_resultset.c",
+ "kutf_suite.c",
+ "kutf_utils.c",
+ ],
+ kbuild_options: ["CONFIG_MALI_KUTF=m"],
+ include_dirs: ["kernel/drivers/gpu/arm/midgard/tests/include"],
+ enabled: false,
+ base_build_kutf: {
+ enabled: true,
+ },
+}
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
index e3dc5eb940cf0f..9218a40f80695c 100644
--- a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/Makefile
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2015, 2017-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -29,13 +29,9 @@ $(error Must specify KDIR to point to the kernel to target))
endif
TEST_CCFLAGS := \
- -DMALI_DEBUG=$(MALI_DEBUG) \
- -DMALI_BACKEND_KERNEL=$(MALI_BACKEND_KERNEL) \
- -DMALI_NO_MALI=$(MALI_NO_MALI) \
-DMALI_UNIT_TEST=$(MALI_UNIT_TEST) \
- -DMALI_USE_UMP=$(MALI_USE_UMP) \
- -DMALI_ERROR_INJECT_ON=$(MALI_ERROR_INJECT_ON) \
-DMALI_CUSTOMER_RELEASE=$(MALI_CUSTOMER_RELEASE) \
+ -DMALI_USE_CSF=$(MALI_USE_CSF) \
$(SCONS_CFLAGS) \
-I$(CURDIR)/../include \
-I$(CURDIR)/../../../../../../include \
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
new file mode 100644
index 00000000000000..a6669afd85ca38
--- /dev/null
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/build.bp
@@ -0,0 +1,30 @@
+/*
+ * Copyright:
+ * ----------------------------------------------------------------------------
+ * This confidential and proprietary software may be used only as authorized
+ * by a licensing agreement from ARM Limited.
+ * (C) COPYRIGHT 2018 ARM Limited, ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorized copies and
+ * copies may only be made to the extent permitted by a licensing agreement
+ * from ARM Limited.
+ * ----------------------------------------------------------------------------
+ */
+
+bob_kernel_module {
+ name: "mali_kutf_irq_test",
+ defaults: ["kernel_test_module_defaults"],
+ srcs: [
+ "Kbuild",
+ "mali_kutf_irq_test_main.c",
+ ],
+ extra_symbols: [
+ "mali_kbase",
+ "kutf",
+ ],
+ install_group: "IG_tests",
+ enabled: false,
+ base_build_kutf: {
+ enabled: true,
+ kbuild_options: ["CONFIG_MALI_IRQ_LATENCY=m"],
+ },
+}
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
index 5013a9d7cf895c..4181b7f92db613 100644
--- a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/mali_kutf_irq_test_main.c
@@ -1,6 +1,6 @@
/*
*
- * (C) COPYRIGHT 2016, 2017 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2016-2018 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -90,15 +90,14 @@ static irqreturn_t kbase_gpu_irq_custom_handler(int irq, void *data)
struct kbase_device *kbdev = kbase_untag(data);
u32 val;
- val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
+ val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS));
if (val & TEST_IRQ) {
struct timespec tval;
getnstimeofday(&tval);
irq_time = SEC_TO_NANO(tval.tv_sec) + (tval.tv_nsec);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val,
- NULL);
+ kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), val);
triggered = true;
wake_up(&wait);
@@ -194,7 +193,7 @@ static void mali_kutf_irq_latency(struct kutf_context *context)
/* Trigger fake IRQ */
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_RAWSTAT),
- TEST_IRQ, NULL);
+ TEST_IRQ);
ret = wait_event_timeout(wait, triggered != false, IRQ_TIMEOUT);
diff --git a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
index be695143b7e118..76e37308b1fc52 100644
--- a/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
+++ b/drivers/gpu/arm/midgard/tests/mali_kutf_irq_test/sconscript
@@ -1,5 +1,5 @@
#
-# (C) COPYRIGHT 2015, 2017 ARM Limited. All rights reserved.
+# (C) COPYRIGHT 2015-2018 ARM Limited. All rights reserved.
#
# This program is free software and is provided to you under the terms of the
# GNU General Public License version 2 as published by the Free Software
@@ -29,7 +29,7 @@ if env.GetOption('clean') :
cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [])
env.KernelObjTarget('mali_kutf_irq_test', cmd)
else:
- makeAction=Action("cd ${SOURCE.dir} && make MALI_DEBUG=${debug} MALI_BACKEND_KERNEL=1 MALI_ERROR_INJECT_ON=${error_inject} MALI_NO_MALI=${no_mali} MALI_UNIT_TEST=${unit} MALI_USE_UMP=${ump} MALI_CUSTOMER_RELEASE=${release} %s && ( ( [ -f mali_kutf_irq_test.ko ] && cp mali_kutf_irq_test.ko $STATIC_LIB_PATH/ ) || touch $STATIC_LIB_PATH/mali_kutf_irq_test.ko)" % env.kernel_get_config_defines(), '$MAKECOMSTR')
+ makeAction=Action("cd ${SOURCE.dir} && make MALI_UNIT_TEST=${unit} MALI_CUSTOMER_RELEASE=${release} MALI_USE_CSF=${csf} %s && ( ( [ -f mali_kutf_irq_test.ko ] && cp mali_kutf_irq_test.ko $STATIC_LIB_PATH/ ) || touch $STATIC_LIB_PATH/mali_kutf_irq_test.ko)" % env.kernel_get_config_defines(), '$MAKECOMSTR')
cmd = env.Command('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', src, [makeAction])
env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/kutf.ko')
env.Depends('$STATIC_LIB_PATH/mali_kutf_irq_test.ko', '$STATIC_LIB_PATH/mali_kbase.ko')
diff --git a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
index 1690da43a0c3b9..9cb0465c715a88 100644
--- a/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
+++ b/drivers/gpu/arm/midgard/thirdparty/mali_kbase_mmap.c
@@ -49,12 +49,15 @@
* alignment, length and limits for the allocation
* @is_shader_code: True if the allocation is for shader code (which has
* additional alignment requirements)
+ * @is_same_4gb_page: True if the allocation needs to reside completely within
+ * a 4GB chunk
*
* Return: true if gap_end is now aligned correctly and is still in range,
* false otherwise
*/
static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
- struct vm_unmapped_area_info *info, bool is_shader_code)
+ struct vm_unmapped_area_info *info, bool is_shader_code,
+ bool is_same_4gb_page)
{
/* Compute highest gap address at the desired alignment */
(*gap_end) -= info->length;
@@ -72,6 +75,35 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
info->length) & BASE_MEM_MASK_4GB))
return false;
+ } else if (is_same_4gb_page) {
+ unsigned long start = *gap_end;
+ unsigned long end = *gap_end + info->length;
+ unsigned long mask = ~((unsigned long)U32_MAX);
+
+ /* Check if 4GB boundary is straddled */
+ if ((start & mask) != ((end - 1) & mask)) {
+ unsigned long offset = end - (end & mask);
+ /* This is to ensure that alignment doesn't get
+ * disturbed in an attempt to prevent straddling at
+ * 4GB boundary. The GPU VA is aligned to 2MB when the
+ * allocation size is > 2MB and there is enough CPU &
+ * GPU virtual space.
+ */
+ unsigned long rounded_offset =
+ ALIGN(offset, info->align_mask + 1);
+
+ start -= rounded_offset;
+ end -= rounded_offset;
+
+ *gap_end = start;
+
+ /* The preceding 4GB boundary shall not get straddled,
+ * even after accounting for the alignment, as the
+ * size of allocation is limited to 4GB and the initial
+ * start location was already aligned.
+ */
+ WARN_ON((start & mask) != ((end - 1) & mask));
+ }
}
@@ -89,6 +121,8 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
* @is_shader_code: Boolean which denotes whether the allocated area is
* intended for the use by shader core in which case a
* special alignment requirements apply.
+ * @is_same_4gb_page: Boolean which indicates whether the allocated area needs
+ * to reside completely within a 4GB chunk.
*
* The unmapped_area_topdown() function in the Linux kernel is not exported
* using EXPORT_SYMBOL_GPL macro. To allow us to call this function from a
@@ -97,25 +131,26 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
* of this function and prefixed it with 'kbase_'.
*
* The difference in the call parameter list comes from the fact that
- * kbase_unmapped_area_topdown() is called with additional parameter which
- * is provided to denote whether the allocation is for a shader core memory
- * or not. This is significant since the executable shader core memory has
- * additional alignment requirements.
+ * kbase_unmapped_area_topdown() is called with additional parameters which
+ * are provided to indicate whether the allocation is for a shader core memory,
+ * which has additional alignment requirements, and whether the allocation can
+ * straddle a 4GB boundary.
*
* The modification of the original Linux function lies in how the computation
* of the highest gap address at the desired alignment is performed once the
* gap with desirable properties is found. For this purpose a special function
* is introduced (@ref align_and_check()) which beside computing the gap end
- * at the desired alignment also performs additional alignment check for the
- * case when the memory is executable shader core memory. For such case, it is
- * ensured that the gap does not end on a 4GB boundary.
+ * at the desired alignment also performs additional alignment checks for the
+ * case when the memory is executable shader core memory, for which it is
+ * ensured that the gap does not end on a 4GB boundary, and for the case when
+ * memory needs to be confined within a 4GB chunk.
*
* Return: address of the found gap end (high limit) if area is found;
* -ENOMEM if search is unsuccessful
*/
static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
- *info, bool is_shader_code)
+ *info, bool is_shader_code, bool is_same_4gb_page)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -142,7 +177,8 @@ static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit) {
- if (align_and_check(&gap_end, gap_start, info, is_shader_code))
+ if (align_and_check(&gap_end, gap_start, info,
+ is_shader_code, is_same_4gb_page))
return gap_end;
}
@@ -178,7 +214,7 @@ check_current:
gap_end = info->high_limit;
if (align_and_check(&gap_end, gap_start, info,
- is_shader_code))
+ is_shader_code, is_same_4gb_page))
return gap_end;
}
@@ -232,6 +268,7 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
int gpu_pc_bits =
kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
bool is_shader_code = false;
+ bool is_same_4gb_page = false;
unsigned long ret;
/* err on fixed address */
@@ -245,12 +282,13 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
- if (kbase_hw_has_feature(kctx->kbdev,
- BASE_HW_FEATURE_33BIT_VA)) {
- high_limit = kctx->same_va_end << PAGE_SHIFT;
- } else {
- high_limit = min_t(unsigned long, mm->mmap_base,
- (kctx->same_va_end << PAGE_SHIFT));
+ high_limit = min_t(unsigned long, mm->mmap_base,
+ (kctx->same_va_end << PAGE_SHIFT));
+
+ /* If there's enough (> 33 bits) of GPU VA space, align
+ * to 2MB boundaries.
+ */
+ if (kctx->kbdev->gpu_props.mmu.va_bits > 33) {
if (len >= SZ_2M) {
align_offset = SZ_2M;
align_mask = SZ_2M - 1;
@@ -265,12 +303,15 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
(PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
- struct kbase_va_region *reg =
- kctx->pending_regions[cookie];
+ struct kbase_va_region *reg;
- if (!reg)
+ /* Need to hold gpu vm lock when using reg */
+ kbase_gpu_vm_lock(kctx);
+ reg = kctx->pending_regions[cookie];
+ if (!reg) {
+ kbase_gpu_vm_unlock(kctx);
return -EINVAL;
-
+ }
if (!(reg->flags & KBASE_REG_GPU_NX)) {
if (cpu_va_bits > gpu_pc_bits) {
align_offset = 1ULL << gpu_pc_bits;
@@ -290,7 +331,10 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
align_mask = extent_bytes - 1;
align_offset =
extent_bytes - (reg->initial_commit << PAGE_SHIFT);
+ } else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
+ is_same_4gb_page = true;
}
+ kbase_gpu_vm_unlock(kctx);
#ifndef CONFIG_64BIT
} else {
return current->mm->get_unmapped_area(filp, addr, len, pgoff,
@@ -305,7 +349,8 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
info.align_offset = align_offset;
info.align_mask = align_mask;
- ret = kbase_unmapped_area_topdown(&info, is_shader_code);
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+ is_same_4gb_page);
if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
high_limit < (kctx->same_va_end << PAGE_SHIFT)) {
@@ -314,7 +359,8 @@ unsigned long kbase_get_unmapped_area(struct file *filp,
info.high_limit = min_t(u64, TASK_SIZE,
(kctx->same_va_end << PAGE_SHIFT));
- ret = kbase_unmapped_area_topdown(&info, is_shader_code);
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code,
+ is_same_4gb_page);
}
return ret;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e558154ff722c5..a50636a3328895 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -77,6 +77,7 @@ config DRM_FBDEV_EMULATION
depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
+ select FB_DEFERRED_IO
default y
help
Choose this option if you have a need for the legacy fbdev
@@ -163,12 +164,8 @@ config DRM_R128
config DRM_RADEON
tristate "ATI Radeon"
depends on DRM && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
@@ -187,12 +184,8 @@ source "drivers/gpu/drm/radeon/Kconfig"
config DRM_AMDGPU
tristate "AMD GPU"
depends on DRM && PCI
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
select POWER_SUPPLY
select HWMON
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 49bc7cc08471f7..26c8dec2f381c6 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -88,6 +88,7 @@ obj-$(CONFIG_DRM_STI) += sti/
obj-$(CONFIG_DRM_IMX) += imx/
obj-$(CONFIG_DRM_MEDIATEK) += mediatek/
obj-$(CONFIG_DRM_POWERVR_ROGUE_1_9) += img-rogue/1.9/
+obj-$(CONFIG_DRM_POWERVR_ROGUE_1_10) += img-rogue/1.10/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index b233cf8436b0d9..2e1e84c9803444 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -504,7 +504,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (timeout == 0)
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index d923837fa9cec7..b197dfb8338746 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -492,6 +492,10 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
int amdgpu_bo_init(struct amdgpu_device *adev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->mc.aper_base,
+ adev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
adev->mc.vram_mtrr = arch_phys_wc_add(adev->mc.aper_base,
adev->mc.aper_size);
@@ -507,6 +511,7 @@ void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
arch_phys_wc_del(adev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->mc.aper_base, adev->mc.aper_size);
}
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 824050f2ad84ed..b1f4cecc0f8181 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -496,9 +496,13 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
int r;
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+ unsigned int flags = 0;
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ if (write)
+ flags |= FOLL_WRITE;
+
if (current->mm != gtt->usermm)
return -EPERM;
@@ -519,7 +523,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
struct page **pages = ttm->pages + pinned;
r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ flags, pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index eb1da83c990295..8cdd505784ed75 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -125,6 +125,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
return ERR_PTR(-EINVAL);
process = find_process(thread);
+ if (!process)
+ return ERR_PTR(-EINVAL);
return process;
}
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e258..345dc4d0851ef4 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 8c401c0c054be0..5e42ca6ed452b5 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -27,6 +27,7 @@ struct armada_ovl_plane_properties {
uint16_t contrast;
uint16_t saturation;
uint32_t colorkey_mode;
+ uint32_t colorkey_enable;
};
struct armada_ovl_plane {
@@ -62,11 +63,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
-
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ armada_updatel(prop->colorkey_mode,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+ if (dcrtc->variant->has_spu_adv_reg)
+ armada_updatel(prop->colorkey_enable,
+ ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+ dcrtc->base + LCD_SPU_ADV_REG);
spin_unlock_irq(&dcrtc->irq_lock);
}
@@ -340,8 +343,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
dplane->prop.colorkey_vb |= K2B(val);
update_attr = true;
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ if (val == CKMODE_DISABLE) {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ dplane->prop.colorkey_enable = 0;
+ } else {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+ }
update_attr = true;
} else if (property == priv->brightness_prop) {
dplane->prop.brightness = val - 256;
@@ -471,7 +483,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
dplane->prop.colorkey_yr = 0xfefefe00;
dplane->prop.colorkey_ug = 0x01010100;
dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
dplane->prop.brightness = 0;
dplane->prop.contrast = 0x4000;
dplane->prop.saturation = 0x4000;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 665e32f1ef1a59..552dcf750cdbb1 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+{
+ struct apertures_struct *ap;
+ bool primary = false;
+
+ ap = alloc_apertures(1);
+ if (!ap)
+ return;
+
+ ap->ranges[0].base = pci_resource_start(pdev, 0);
+ ap->ranges[0].size = pci_resource_len(pdev, 0);
+
+#ifdef CONFIG_X86
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+#endif
+ remove_conflicting_framebuffers(ap, "astdrmfb", primary);
+ kfree(ap);
+}
+
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ ast_kick_out_firmware_fb(pdev);
+
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index c67ef9ad7cdd6b..7f0ee43ffe3e1e 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -557,7 +557,8 @@ int ast_driver_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
- pci_iounmap(dev->pdev, ast->ioregs);
+ if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
+ pci_iounmap(dev->pdev, ast->ioregs);
pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
return 0;
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1d64af75919a14..f8c2162ccafd54 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -558,6 +558,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc,
}
ast_bo_unreserve(bo);
+ ast_set_offset_reg(crtc);
ast_set_start_address_crt1(crtc, (u32)gpu_addr);
return 0;
@@ -968,9 +969,21 @@ static int get_clock(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x10) >> 4;
return val & 1 ? 1 : 0;
}
@@ -978,9 +991,21 @@ static int get_data(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
struct ast_private *ast = i2c->dev->dev_private;
- uint32_t val;
+ uint32_t val, val2, count, pass;
+
+ count = 0;
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ do {
+ val2 = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ if (val == val2) {
+ pass++;
+ } else {
+ pass = 0;
+ val = (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5) & 0x01;
+ }
+ } while ((pass < 5) && (count++ < 0x10000));
- val = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x20) >> 5;
return val & 1 ? 1 : 0;
}
@@ -993,7 +1018,7 @@ static void set_clock(void *i2c_priv, int clock)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((clock & 0x01) ? 0 : 1);
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfe, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf4, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x01);
if (ujcrb7 == jtemp)
break;
@@ -1009,7 +1034,7 @@ static void set_data(void *i2c_priv, int data)
for (i = 0; i < 0x10000; i++) {
ujcrb7 = ((data & 0x01) ? 0 : 1) << 2;
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xfb, ujcrb7);
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0xf1, ujcrb7);
jtemp = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb7, 0x04);
if (ujcrb7 == jtemp)
break;
@@ -1250,7 +1275,7 @@ static int ast_cursor_move(struct drm_crtc *crtc,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07));
/* dummy write to fire HWC */
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00);
+ ast_show_cursor(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 08f82eae693985..ac12f74e6b32aa 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -275,6 +275,8 @@ int ast_mm_init(struct ast_private *ast)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -283,11 +285,15 @@ int ast_mm_init(struct ast_private *ast)
void ast_mm_fini(struct ast_private *ast)
{
+ struct drm_device *dev = ast->dev;
+
ttm_bo_device_release(&ast->ttm.bdev);
ast_ttm_global_release(ast);
arch_phys_wc_del(ast->fb_mtrr);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void ast_ttm_placement(struct ast_bo *bo, int domain)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 9864559e5fb994..04b3c161dfae6f 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,11 +1,7 @@
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
help
This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 91db7edb847a47..91acc37c5d0bd7 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -276,6 +276,9 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
cirrus->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -285,6 +288,8 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
void cirrus_mm_fini(struct cirrus_device *cirrus)
{
+ struct drm_device *dev = cirrus->dev;
+
if (!cirrus->mm_inited)
return;
@@ -294,6 +299,8 @@ void cirrus_mm_fini(struct cirrus_device *cirrus)
arch_phys_wc_del(cirrus->fb_mtrr);
cirrus->fb_mtrr = 0;
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
}
void cirrus_ttm_placement(struct cirrus_bo *bo, int domain)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 8ce875d262ce32..035da39f594ed7 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -62,6 +62,7 @@ void drm_atomic_state_default_release(struct drm_atomic_state *state)
kfree(state->connectors);
kfree(state->crtcs);
kfree(state->planes);
+ kfree(state->private_objs);
}
EXPORT_SYMBOL(drm_atomic_state_default_release);
@@ -189,6 +190,22 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
state->planes[i].ptr = NULL;
state->planes[i].state = NULL;
}
+
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
+
+ if (!obj)
+ continue;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->private_objs[i].state);
+ state->private_objs[i].ptr = NULL;
+ state->private_objs[i].state = NULL;
+ state->private_objs[i].old_state = NULL;
+ state->private_objs[i].new_state = NULL;
+ }
+ state->num_private_objs = 0;
+
}
EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -981,6 +998,95 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_private_obj_init - initialize private object
+ * @obj: private object
+ * @state: initial private object state
+ * @funcs: pointer to the struct of function pointers that identify the object
+ * type
+ *
+ * Initialize the private object, which can be embedded into any
+ * driver private object that needs its own atomic state.
+ */
+void
+drm_atomic_private_obj_init(struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_init);
+
+/**
+ * drm_atomic_private_obj_fini - finalize private object
+ * @obj: private object
+ *
+ * Finalize the private object.
+ */
+void
+drm_atomic_private_obj_fini(struct drm_private_obj *obj)
+{
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+}
+EXPORT_SYMBOL(drm_atomic_private_obj_fini);
+
+/**
+ * drm_atomic_get_private_obj_state - get private object state
+ * @state: global atomic state
+ * @obj: private object to get the state for
+ *
+ * This function returns the private object state for the given private object,
+ * allocating the state if needed. It does not grab any locks as the caller is
+ * expected to care of any required locking.
+ *
+ * RETURNS:
+ *
+ * Either the allocated state or the error code encoded into a pointer.
+ */
+struct drm_private_state *
+drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int index, num_objs, i;
+ size_t size;
+ struct __drm_private_objs_state *arr;
+ struct drm_private_state *obj_state;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].state;
+
+ num_objs = state->num_private_objs + 1;
+ size = sizeof(*state->private_objs) * num_objs;
+ arr = krealloc(state->private_objs, size, GFP_KERNEL);
+ if (!arr)
+ return ERR_PTR(-ENOMEM);
+
+ state->private_objs = arr;
+ index = state->num_private_objs;
+ memset(&state->private_objs[index], 0, sizeof(*state->private_objs));
+
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
+ return ERR_PTR(-ENOMEM);
+
+ state->private_objs[index].state = obj_state;
+ state->private_objs[index].old_state = obj->state;
+ state->private_objs[index].new_state = obj_state;
+ state->private_objs[index].ptr = obj;
+
+ state->num_private_objs = num_objs;
+
+ DRM_DEBUG_ATOMIC("Added new private object %p state %p to %p\n",
+ obj, obj_state, state);
+
+ return obj_state;
+}
+EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -1094,6 +1200,20 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == config->link_status_property) {
+ /* Never downgrade from GOOD to BAD on userspace's request here,
+ * only hw issues can do that.
+ *
+ * For an atomic property the userspace doesn't need to be able
+ * to understand all the properties, but needs to be able to
+ * restore the state it wants on VT switch. So if the userspace
+ * tries to change the link_status from GOOD to BAD, driver
+ * silently rejects it and returns a 0. This prevents userspace
+ * from accidently breaking the display when it restores the
+ * state.
+ */
+ if (state->link_status != DRM_LINK_STATUS_GOOD)
+ state->link_status = val;
} else if (connector->funcs->atomic_set_property) {
return connector->funcs->atomic_set_property(connector,
state, property, val);
@@ -1142,6 +1262,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = connector->dpms;
} else if (property == connector->content_protection_property) {
*val = state->content_protection;
+ } else if (property == config->link_status_property) {
+ *val = state->link_status;
} else if (connector->funcs->atomic_get_property) {
return connector->funcs->atomic_get_property(connector,
state, property, val);
@@ -1208,7 +1330,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state,
{
struct drm_plane *plane = plane_state->plane;
struct drm_crtc_state *crtc_state;
-
+ /* Nothing to do for same crtc*/
+ if (plane_state->crtc == crtc)
+ return 0;
if (plane_state->crtc) {
crtc_state = drm_atomic_get_crtc_state(plane_state->state,
plane_state->crtc);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 30165f718a0a4b..bb0d2f4580edbf 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -542,6 +542,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i, ret;
+ unsigned connectors_mask = 0;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
@@ -572,6 +573,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
for_each_connector_in_state(state, connector, connector_state, i) {
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
@@ -581,6 +584,19 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
connector_state);
if (ret)
return ret;
+ if (connector->state->crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ connector->state->crtc);
+ if (connector->state->link_status !=
+ connector_state->link_status)
+ crtc_state->connectors_changed = true;
+ }
+ if (funcs->atomic_check)
+ ret = funcs->atomic_check(connector, connector_state);
+ if (ret)
+ return ret;
+
+ connectors_mask += BIT(i);
}
/*
@@ -630,6 +646,22 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
}
}
+ /*
+ * Iterate over all connectors again, to make sure atomic_check()
+ * has been called on them when a modeset is forced.
+ */
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ const struct drm_connector_helper_funcs *funcs = connector->helper_private;
+
+ if (connectors_mask & BIT(i))
+ continue;
+
+ if (funcs->atomic_check)
+ ret = funcs->atomic_check(connector, connector_state);
+ if (ret)
+ return ret;
+ }
+
ret = mode_valid(state);
if (ret)
return ret;
@@ -1340,7 +1372,10 @@ static int drm_atomic_add_implicit_fences(struct drm_device *dev,
return -ENOMEM;
for_each_plane_in_state(state, plane, plane_state, i) {
- WARN_ON(plane_state->fence);
+ /* No implicit fencing if explicit fence is attached. */
+ if (plane_state->fence)
+ continue;
+
/* If fb is not changing or new fb is NULL. */
if (plane->state->fb == plane_state->fb || !plane_state->fb)
continue;
@@ -1394,6 +1429,10 @@ static int drm_atomic_add_implicit_fences(struct drm_device *dev,
}
for_each_plane_in_state(state, plane, plane_state, i) {
+ /* No implicit fencing if explicit fence is attached. */
+ if (plane_state->fence)
+ continue;
+
/* If fb is not changing or new fb is NULL. */
if (plane->state->fb == plane_state->fb || !plane_state->fb)
continue;
@@ -2075,6 +2114,8 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
struct drm_plane *plane;
struct drm_plane_state *plane_state, *old_plane_state;
struct drm_crtc_commit *commit;
+ struct drm_private_obj *obj;
+ struct drm_private_state *old_obj_state, *new_obj_state;
if (stall) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
@@ -2129,6 +2170,16 @@ void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
swap(state->planes[i].state, plane->state);
plane->state->state = NULL;
}
+
+ for_each_oldnew_private_obj_in_state(state, obj, old_obj_state, new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->private_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
}
EXPORT_SYMBOL(drm_atomic_helper_swap_state);
@@ -2331,6 +2382,8 @@ static int update_output_state(struct drm_atomic_state *state,
NULL);
if (ret)
return ret;
+ /* Make sure legacy setCrtc always re-trains */
+ conn_state->link_status = DRM_LINK_STATUS_GOOD;
}
}
@@ -2374,6 +2427,12 @@ static int update_output_state(struct drm_atomic_state *state,
*
* Provides a default crtc set_config handler using the atomic driver interface.
*
+ * NOTE: For backwards compatibility with old userspace this automatically
+ * resets the "link-status" property to GOOD, to force any link
+ * re-training. The SETCRTC ioctl does not define whether an update does
+ * need a full modeset or just a plane update, hence we're allowed to do
+ * that. See also drm_mode_connector_set_link_status_property().
+ *
* Returns:
* Returns 0 on success, negative errno numbers on failure.
*/
@@ -3621,3 +3680,18 @@ backoff:
goto retry;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * __drm_atomic_helper_private_duplicate_state - copy atomic private state
+ * @obj: CRTC object
+ * @state: new private object state
+ *
+ * Copies atomic state from a private objects's current state and resets inferred values.
+ * This is useful for drivers that subclass the private state.
+ */
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ memcpy(state, obj->state, sizeof(*state));
+}
+EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index c3a12cd8bd0de9..308da8a10ee070 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
#include <drm/drmP.h>
#include "drm_legacy.h"
+#include <linux/nospec.h>
+
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -1413,6 +1415,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
idx, dma->buf_count - 1);
return -EINVAL;
}
+ idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 1ccdfc7a30e068..e1e008ea7cb2ee 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -244,6 +244,10 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
config->dpms_property, 0);
+ drm_object_attach_property(&connector->base,
+ config->link_status_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
@@ -537,6 +541,12 @@ static const struct drm_prop_enum_list drm_dpms_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_dpms_name, drm_dpms_enum_list)
+static const struct drm_prop_enum_list drm_link_status_enum_list[] = {
+ { DRM_MODE_LINK_STATUS_GOOD, "Good" },
+ { DRM_MODE_LINK_STATUS_BAD, "Bad" },
+};
+DRM_ENUM_NAME_FN(drm_get_link_status_name, drm_link_status_enum_list)
+
/**
* drm_display_info_set_bus_formats - set the supported bus formats
* @info: display info to store bus formats in
@@ -660,6 +670,13 @@ int drm_connector_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.tile_property = prop;
+ prop = drm_property_create_enum(dev, 0, "link-status",
+ drm_link_status_enum_list,
+ ARRAY_SIZE(drm_link_status_enum_list));
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.link_status_property = prop;
+
return 0;
}
@@ -1025,6 +1042,36 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_connector_update_edid_property);
+/**
+ * drm_mode_connector_set_link_status_property - Set link status property of a connector
+ * @connector: drm connector
+ * @link_status: new value of link status property (0: Good, 1: Bad)
+ *
+ * In usual working scenario, this link status property will always be set to
+ * "GOOD". If something fails during or after a mode set, the kernel driver
+ * may set this link status property to "BAD". The caller then needs to send a
+ * hotplug uevent for userspace to re-check the valid modes through
+ * GET_CONNECTOR_IOCTL and retry modeset.
+ *
+ * Note: Drivers cannot rely on userspace to support this property and
+ * issue a modeset. As such, they may choose to handle issues (like
+ * re-training a link) without userspace's intervention.
+ *
+ * The reason for adding this property is to handle link training failures, but
+ * it is not limited to DP or link training. For example, if we implement
+ * asynchronous setcrtc, this property can be used to report any failures in that.
+ */
+void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status)
+{
+ struct drm_device *dev = connector->dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ connector->state->link_status = link_status;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+EXPORT_SYMBOL(drm_mode_connector_set_link_status_property);
+
int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t value)
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 885aafcfda44e4..b003299073a7db 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -119,18 +119,32 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4)
+ DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ rd_interval);
+
+ if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
udelay(100);
else
- mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+ mdelay(rd_interval * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) {
- if (dpcd[DP_TRAINING_AUX_RD_INTERVAL] == 0)
+ int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ DP_TRAINING_AUX_RD_MASK;
+
+ if (rd_interval > 4)
+ DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n",
+ rd_interval);
+
+ if (rd_interval == 0)
udelay(400);
else
- mdelay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] * 4);
+ mdelay(rd_interval * 4);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -144,6 +158,8 @@ u8 drm_dp_link_rate_to_bw_code(int link_rate)
return DP_LINK_BW_2_7;
case 540000:
return DP_LINK_BW_5_4;
+ case 810000:
+ return DP_LINK_BW_8_1;
}
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
@@ -158,6 +174,8 @@ int drm_dp_bw_code_to_link_rate(u8 link_bw)
return 270000;
case DP_LINK_BW_5_4:
return 540000;
+ case DP_LINK_BW_8_1:
+ return 810000;
}
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d5e9b414987f8d..9c166621e920be 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -31,6 +31,8 @@
#include <drm/drmP.h>
#include <drm/drm_fixed.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
/**
* DOC: dp mst helper
@@ -292,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
idx += req->u.i2c_write.num_bytes;
break;
+
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+ idx++;
+ break;
}
raw->cur_len = idx;
}
@@ -536,6 +544,21 @@ fail_len:
return false;
}
+static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+ int idx = 1;
+
+ repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
+ idx++;
+ if (idx > raw->curlen) {
+ DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
+ idx, raw->curlen);
+ return false;
+ }
+ return true;
+}
+
static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
@@ -565,6 +588,9 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
case DP_ALLOCATE_PAYLOAD:
return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+ case DP_POWER_DOWN_PHY:
+ case DP_POWER_UP_PHY:
+ return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
default:
DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
return false;
@@ -691,6 +717,22 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
return 0;
}
+static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
+{
+ struct drm_dp_sideband_msg_req_body req;
+
+ if (power_up)
+ req.req_type = DP_POWER_UP_PHY;
+ else
+ req.req_type = DP_POWER_DOWN_PHY;
+
+ req.u.port_num.port_number = port_num;
+ drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
+ return 0;
+}
+
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_vcpi *vcpi)
{
@@ -744,16 +786,16 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
- bool ret;
+ unsigned int state;
/*
* All updates to txmsg->state are protected by mgr->qlock, and the two
* cases we check here are terminal states. For those the barriers
* provided by the wake_up/wait_event pair are enough.
*/
- ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
- txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
- return ret;
+ state = READ_ONCE(txmsg->state);
+ return (state == DRM_DP_SIDEBAND_TX_RX ||
+ state == DRM_DP_SIDEBAND_TX_TIMEOUT);
}
static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
@@ -862,7 +904,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
mutex_unlock(&mstb->mgr->qlock);
if (wake_tx)
- wake_up(&mstb->mgr->tx_waitq);
+ wake_up_all(&mstb->mgr->tx_waitq);
kref_put(kref, drm_dp_free_mst_branch_device);
}
@@ -1040,10 +1082,12 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
lct = drm_dp_calculate_rad(port, rad);
port->mstb = drm_dp_add_mst_branch_device(lct, rad);
- port->mstb->mgr = port->mgr;
- port->mstb->port_parent = port;
+ if (port->mstb) {
+ port->mstb->mgr = port->mgr;
+ port->mstb->port_parent = port;
- send_link = true;
+ send_link = true;
+ }
break;
}
return send_link;
@@ -1230,6 +1274,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
+ if (!mstb)
+ goto out;
+
for (i = 0; i < lct - 1; i++) {
int shift = (i % 2) ? 0 : 4;
int port_num = (rad[i / 2] >> shift) & 0xf;
@@ -1342,15 +1389,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid)
{
- static u8 zero_guid[16];
+ u64 salt;
- if (!memcmp(guid, zero_guid, 16)) {
- u64 salt = get_jiffies_64();
- memcpy(&guid[0], &salt, sizeof(u64));
- memcpy(&guid[8], &salt, sizeof(u64));
- return false;
- }
- return true;
+ if (memchr_inv(guid, 0, 16))
+ return true;
+
+ salt = get_jiffies_64();
+
+ memcpy(&guid[0], &salt, sizeof(u64));
+ memcpy(&guid[8], &salt, sizeof(u64));
+
+ return false;
}
#if 0
@@ -1517,7 +1566,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- wake_up(&mgr->tx_waitq);
+ wake_up_all(&mgr->tx_waitq);
}
}
@@ -1720,6 +1769,40 @@ fail_put:
return ret;
}
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up)
+{
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int len, ret;
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port)
+ return -EINVAL;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ drm_dp_put_port(port);
+ return -ENOMEM;
+ }
+
+ txmsg->dst = port->parent;
+ len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ drm_dp_queue_down_tx(mgr, txmsg);
+
+ ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == 1)
+ ret = -EINVAL;
+ else
+ ret = 0;
+ }
+ kfree(txmsg);
+ drm_dp_put_port(port);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
+
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
@@ -2009,6 +2092,9 @@ static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
case DP_LINK_BW_5_4:
*out = 10 * dp_link_count;
break;
+ case DP_LINK_BW_8_1:
+ *out = 15 * dp_link_count;
+ break;
}
return true;
}
@@ -2049,10 +2135,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
}
- mgr->total_pbn = 2560;
- mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
- mgr->avail_slots = mgr->total_slots;
-
/* add initial branch device at LCT 1 */
mstb = drm_dp_add_mst_branch_device(1, NULL);
if (mstb == NULL) {
@@ -2280,7 +2362,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mstb->tx_slots[slot] = NULL;
mutex_unlock(&mgr->qlock);
- wake_up(&mgr->tx_waitq);
+ wake_up_all(&mgr->tx_waitq);
}
return ret;
}
@@ -2500,26 +2582,25 @@ int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
- if (num_slots > mgr->avail_slots)
+ /* max. time slots - one slot for MTP header */
+ if (num_slots > 63)
return -ENOSPC;
return num_slots;
}
EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_vcpi *vcpi, int pbn)
+ struct drm_dp_vcpi *vcpi, int pbn, int slots)
{
- int num_slots;
int ret;
- num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
-
- if (num_slots > mgr->avail_slots)
+ /* max. time slots - one slot for MTP header */
+ if (slots > 63)
return -ENOSPC;
vcpi->pbn = pbn;
- vcpi->aligned_pbn = num_slots * mgr->pbn_div;
- vcpi->num_slots = num_slots;
+ vcpi->aligned_pbn = slots * mgr->pbn_div;
+ vcpi->num_slots = slots;
ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
if (ret < 0)
@@ -2528,13 +2609,89 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
}
/**
+ * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
+ * @state: global atomic state
+ * @mgr: MST topology manager for the port
+ * @port: port to find vcpi slots for
+ * @pbn: bandwidth required for the mode in PBN
+ *
+ * RETURNS:
+ * Total slots in the atomic state assigned for this port or error
+ */
+int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn)
+{
+ struct drm_dp_mst_topology_state *topology_state;
+ int req_slots;
+
+ topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
+
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (port == NULL)
+ return -EINVAL;
+ req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+ DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
+ req_slots, topology_state->avail_slots);
+
+ if (req_slots > topology_state->avail_slots) {
+ drm_dp_put_port(port);
+ return -ENOSPC;
+ }
+
+ topology_state->avail_slots -= req_slots;
+ DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
+
+ drm_dp_put_port(port);
+ return req_slots;
+}
+EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
+
+/**
+ * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
+ * @state: global atomic state
+ * @mgr: MST topology manager for the port
+ * @slots: number of vcpi slots to release
+ *
+ * RETURNS:
+ * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
+ * negative error code
+ */
+int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ int slots)
+{
+ struct drm_dp_mst_topology_state *topology_state;
+
+ topology_state = drm_atomic_get_mst_topology_state(state, mgr);
+ if (IS_ERR(topology_state))
+ return PTR_ERR(topology_state);
+
+ /* We cannot rely on port->vcpi.num_slots to update
+ * topology_state->avail_slots as the port may not exist if the parent
+ * branch device was unplugged. This should be fixed by tracking
+ * per-port slot allocation in drm_dp_mst_topology_state instead of
+ * depending on the caller to tell us how many slots to release.
+ */
+ topology_state->avail_slots += slots;
+ DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
+ slots, topology_state->avail_slots);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
+
+/**
* drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
* @mgr: manager for this port
* @port: port to allocate a virtual channel for.
* @pbn: payload bandwidth number to request
* @slots: returned number of slots for this PBN.
*/
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn, int slots)
{
int ret;
@@ -2542,22 +2699,25 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
if (!port)
return false;
+ if (slots < 0)
+ return false;
+
if (port->vcpi.vcpi > 0) {
DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
- *slots = port->vcpi.num_slots;
drm_dp_put_port(port);
return true;
}
}
- ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
+ ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
- DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
+ DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
goto out;
}
- DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
- *slots = port->vcpi.num_slots;
+ DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
+ pbn, port->vcpi.num_slots);
drm_dp_put_port(port);
return true;
@@ -2784,19 +2944,20 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m,
}
}
+#define DP_PAYLOAD_TABLE_SIZE 64
+
static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf)
{
- int ret;
int i;
- for (i = 0; i < 4; i++) {
- ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
- if (ret != 16)
- break;
+
+ for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
+ if (drm_dp_dpcd_read(mgr->aux,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
+ &buf[i], 16) != 16)
+ return false;
}
- if (i == 4)
- return true;
- return false;
+ return true;
}
static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
@@ -2859,44 +3020,25 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
mutex_lock(&mgr->lock);
if (mgr->mst_primary) {
- u8 buf[64];
- bool bret;
+ u8 buf[DP_PAYLOAD_TABLE_SIZE];
int ret;
+
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
- seq_printf(m, "dpcd: ");
- for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
- seq_printf(m, "faux/mst: ");
- for (i = 0; i < 2; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "faux/mst: %*ph\n", 2, buf);
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
- seq_printf(m, "mst ctrl: ");
- for (i = 0; i < 1; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
+ seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
- seq_printf(m, "branch oui: ");
- for (i = 0; i < 0x3; i++)
- seq_printf(m, "%02x", buf[i]);
- seq_printf(m, " devid: ");
+ seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
-
- seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
- seq_printf(m, "\n");
- bret = dump_dp_payload_table(mgr, buf);
- if (bret == true) {
- seq_printf(m, "payload table: ");
- for (i = 0; i < 63; i++)
- seq_printf(m, "%02x ", buf[i]);
- seq_printf(m, "\n");
- }
-
+ seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
+ buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
+ if (dump_dp_payload_table(mgr, buf))
+ seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
mutex_unlock(&mgr->lock);
@@ -2962,6 +3104,59 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
(*mgr->cbs->hotplug)(mgr);
}
+static struct drm_private_state *
+drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
+{
+ struct drm_dp_mst_topology_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct drm_dp_mst_topology_state *mst_state =
+ to_dp_mst_topology_state(state);
+
+ kfree(mst_state);
+}
+
+static const struct drm_private_state_funcs mst_state_funcs = {
+ .atomic_duplicate_state = drm_dp_mst_duplicate_state,
+ .atomic_destroy_state = drm_dp_mst_destroy_state,
+};
+
+/**
+ * drm_atomic_get_mst_topology_state: get MST topology state
+ *
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
+ * to care of the locking, so warn if don't hold the connection_mutex.
+ *
+ * RETURNS:
+ *
+ * The MST topology state or error pointer.
+ */
+struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_device *dev = mgr->dev;
+
+ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
+}
+EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
+
/**
* drm_dp_mst_topology_mgr_init - initialise a topology manager
* @mgr: manager struct to initialise
@@ -2978,6 +3173,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
int max_dpcd_transaction_bytes,
int max_payloads, int conn_base_id)
{
+ struct drm_dp_mst_topology_state *mst_state;
+
mutex_init(&mgr->lock);
mutex_init(&mgr->qlock);
mutex_init(&mgr->payload_lock);
@@ -3006,6 +3203,19 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (test_calc_pbn_mode() < 0)
DRM_ERROR("MST PBN self-test failed\n");
+ mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
+ if (mst_state == NULL)
+ return -ENOMEM;
+
+ mst_state->mgr = mgr;
+
+ /* max. time slots - one slot for MTP header */
+ mst_state->avail_slots = 63;
+
+ drm_atomic_private_obj_init(&mgr->base,
+ &mst_state->base,
+ &mst_state_funcs);
+
return 0;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
@@ -3026,6 +3236,8 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->payload_lock);
mgr->dev = NULL;
mgr->aux = NULL;
+ drm_atomic_private_obj_fini(&mgr->base);
+ mgr->funcs = NULL;
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index b98a59b2de266d..27a274a5900557 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1052,9 +1052,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0 || in_dbg_master())
+ if (in_dbg_master())
return -EINVAL;
+ if (var->pixclock != 0) {
+ DRM_DEBUG("fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n");
+ var->pixclock = 0;
+ }
+
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->format->cpp[0] * 8 ||
var->xres > fb->width || var->yres > fb->height ||
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 17fcf382ddeb44..5ec4169637f39e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -197,6 +197,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return -ENOMEM;
filp->private_data = priv;
+ filp->f_mode |= FMODE_UNSIGNED_OFFSET;
priv->filp = filp;
priv->uid = current_euid();
priv->pid = get_pid(task_pid(current));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c40c257005dcef..8c92aa06ba56cc 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -36,6 +36,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <linux/pagevec.h>
#include <drm/drmP.h>
#include <drm/drm_vma_manager.h>
#include <drm/drm_gem.h>
@@ -474,6 +475,17 @@ int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void drm_gem_check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
+
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
@@ -499,6 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
struct address_space *mapping;
struct page *p, **pages;
+ struct pagevec pvec;
int i, npages;
/* This is the shared memory object that backs the GEM resource */
@@ -516,6 +529,8 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
if (pages == NULL)
return ERR_PTR(-ENOMEM);
+ mapping_set_unevictable(mapping);
+
for (i = 0; i < npages; i++) {
p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
@@ -534,8 +549,14 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
return pages;
fail:
- while (i--)
- page_cache_release(pages[i]);
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec, 0);
+ while (i--) {
+ if (!pagevec_add(&pvec, pages[i]))
+ drm_gem_check_release_pagevec(&pvec);
+ }
+ if (pagevec_count(&pvec))
+ drm_gem_check_release_pagevec(&pvec);
drm_free_large(pages);
return ERR_CAST(p);
@@ -553,6 +574,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed)
{
int i, npages;
+ struct address_space *mapping;
+ struct pagevec pvec;
+
+ mapping = file_inode(obj->filp)->i_mapping;
+ mapping_clear_unevictable(mapping);
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init(), so we should never hit this unless
@@ -562,6 +588,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
npages = obj->size >> PAGE_SHIFT;
+ pagevec_init(&pvec, 0);
for (i = 0; i < npages; i++) {
if (dirty)
set_page_dirty(pages[i]);
@@ -570,8 +597,11 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
+ if (!pagevec_add(&pvec, pages[i]))
+ drm_gem_check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ drm_gem_check_release_pagevec(&pvec);
drm_free_large(pages);
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index db528308c94b34..a9343e838f5c97 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -36,6 +36,7 @@
#include <linux/pci.h>
#include <linux/export.h>
+#include <linux/nospec.h>
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -242,10 +243,12 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
req->value = dev->mode_config.async_page_flip;
break;
case DRM_CAP_PAGE_FLIP_TARGET:
- req->value = 1;
- drm_for_each_crtc(crtc, dev) {
- if (!crtc->funcs->page_flip_target)
- req->value = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ req->value = 1;
+ drm_for_each_crtc(crtc, dev) {
+ if (!crtc->funcs->page_flip_target)
+ req->value = 0;
+ }
}
break;
case DRM_CAP_CURSOR_WIDTH:
@@ -653,13 +656,17 @@ long drm_ioctl(struct file *filp,
if (is_driver_ioctl) {
/* driver ioctl */
- if (nr - DRM_COMMAND_BASE >= dev->driver->num_ioctls)
+ unsigned int index = nr - DRM_COMMAND_BASE;
+
+ if (index >= dev->driver->num_ioctls)
goto err_i1;
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ index = array_index_nospec(index, dev->driver->num_ioctls);
+ ioctl = &dev->driver->ioctls[index];
} else {
/* core ioctl */
if (nr >= DRM_CORE_IOCTL_COUNT)
goto err_i1;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
ioctl = &drm_ioctls[nr];
}
@@ -764,6 +771,7 @@ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
if (nr >= DRM_CORE_IOCTL_COUNT)
return false;
+ nr = array_index_nospec(nr, DRM_CORE_IOCTL_COUNT);
*flags = drm_ioctls[nr].flags;
return true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 48ac0ebbd6634e..9c370b9a4fd896 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -176,7 +176,8 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
lock->context, task_pid_nr(current),
- master->lock.hw_lock->lock, lock->flags);
+ master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
+ lock->flags);
add_wait_queue(&master->lock.lock_queue, &entry);
spin_lock_bh(&master->lock.spinlock);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 712333433b08a6..bec266590a8c2f 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -718,7 +718,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
if (mode->hsync)
return mode->hsync;
- if (mode->htotal < 0)
+ if (mode->htotal <= 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/evdi/Kconfig b/drivers/gpu/drm/evdi/Kconfig
index 26e0d5ff5a4572..4a790ac9af34fc 100644
--- a/drivers/gpu/drm/evdi/Kconfig
+++ b/drivers/gpu/drm/evdi/Kconfig
@@ -9,11 +9,7 @@
config DRM_EVDI
tristate "Extensible Virtual Display Interface"
depends on DRM
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
This is a KMS interface driver allowing user-space programs to
register a virtual display (that imitates physical monitor) and
diff --git a/drivers/gpu/drm/evdi/evdi_connector.c b/drivers/gpu/drm/evdi/evdi_connector.c
index 2d8099b235ae45..fb2c8aef698070 100644
--- a/drivers/gpu/drm/evdi/evdi_connector.c
+++ b/drivers/gpu/drm/evdi/evdi_connector.c
@@ -73,10 +73,12 @@ evdi_detect(struct drm_connector *connector, __always_unused bool force)
EVDI_CHECKPT();
if (evdi_painter_is_connected(evdi)) {
- EVDI_DEBUG("(dev=%d) Painter is connected\n", evdi->dev_index);
+ EVDI_DEBUG("(dev=%d) poll connector state: connected\n",
+ evdi->dev_index);
return connector_status_connected;
}
- EVDI_DEBUG("(dev=%d) Painter is disconnected\n", evdi->dev_index);
+ EVDI_DEBUG("(dev=%d) poll connector state: disconnected\n",
+ evdi->dev_index);
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/evdi/evdi_debug.c b/drivers/gpu/drm/evdi/evdi_debug.c
index 848f323f3008ae..432439e5f09ab9 100644
--- a/drivers/gpu/drm/evdi/evdi_debug.c
+++ b/drivers/gpu/drm/evdi/evdi_debug.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/sched.h>
#include "evdi_debug.h"
@@ -15,3 +16,26 @@ unsigned int evdi_loglevel = EVDI_LOGLEVEL_DEBUG;
module_param_named(initial_loglevel, evdi_loglevel, int, 0400);
MODULE_PARM_DESC(initial_loglevel, "Initial log level");
+
+void evdi_log_process(void)
+{
+ int task_pid = (int)task_pid_nr(current);
+ char task_comm[TASK_COMM_LEN] = { 0 };
+
+ get_task_comm(task_comm, current);
+
+ if (current->group_leader) {
+ char process_comm[TASK_COMM_LEN] = { 0 };
+
+ get_task_comm(process_comm, current->group_leader);
+ EVDI_INFO("Task %d (%s) of process %d (%s)\n",
+ task_pid,
+ task_comm,
+ (int)task_pid_nr(current->group_leader),
+ process_comm);
+ } else {
+ EVDI_INFO("Task %d (%s)\n",
+ task_pid,
+ task_comm);
+ }
+}
diff --git a/drivers/gpu/drm/evdi/evdi_debug.h b/drivers/gpu/drm/evdi/evdi_debug.h
index 86c384a92f4970..3d748eb53c84dc 100644
--- a/drivers/gpu/drm/evdi/evdi_debug.h
+++ b/drivers/gpu/drm/evdi/evdi_debug.h
@@ -53,5 +53,7 @@ extern unsigned int evdi_loglevel;
#define EVDI_ENTER() EVDI_VERBOSE("enter\n")
#define EVDI_EXIT() EVDI_VERBOSE("exit\n")
+void evdi_log_process(void);
+
#endif /* EVDI_DEBUG_H */
diff --git a/drivers/gpu/drm/evdi/evdi_drv.c b/drivers/gpu/drm/evdi/evdi_drv.c
index 09681e3e7271e5..9529eb4f7e1e7d 100644
--- a/drivers/gpu/drm/evdi/evdi_drv.c
+++ b/drivers/gpu/drm/evdi/evdi_drv.c
@@ -77,6 +77,7 @@ static struct drm_driver driver = {
.load = evdi_driver_load,
.unload = evdi_driver_unload,
.preclose = evdi_driver_preclose,
+ .postclose = evdi_driver_postclose,
/* gem hooks */
.gem_free_object = evdi_gem_free_object,
diff --git a/drivers/gpu/drm/evdi/evdi_drv.h b/drivers/gpu/drm/evdi/evdi_drv.h
index 0e4153ee3ef0d0..91d2cc65fe93b9 100644
--- a/drivers/gpu/drm/evdi/evdi_drv.h
+++ b/drivers/gpu/drm/evdi/evdi_drv.h
@@ -25,11 +25,11 @@
#define DRIVER_NAME "evdi"
#define DRIVER_DESC "Extensible Virtual Display Interface"
-#define DRIVER_DATE "20170419"
+#define DRIVER_DATE "20190103"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 5
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 1
struct evdi_fbdev;
struct evdi_painter;
@@ -79,14 +79,17 @@ struct drm_encoder *evdi_encoder_init(struct drm_device *dev);
int evdi_driver_load(struct drm_device *dev, unsigned long flags);
int evdi_driver_unload(struct drm_device *dev);
void evdi_driver_preclose(struct drm_device *dev, struct drm_file *file_priv);
+void evdi_driver_postclose(struct drm_device *dev, struct drm_file *file_priv);
#ifdef CONFIG_COMPAT
long evdi_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
#endif
+#ifdef CONFIG_FB
int evdi_fbdev_init(struct drm_device *dev);
void evdi_fbdev_cleanup(struct drm_device *dev);
void evdi_fbdev_unplug(struct drm_device *dev);
+#endif /* CONFIG_FB */
struct drm_framebuffer *evdi_fb_user_fb_create(
struct drm_device *dev,
struct drm_file *file,
diff --git a/drivers/gpu/drm/evdi/evdi_fb.c b/drivers/gpu/drm/evdi/evdi_fb.c
index 103bbc78ec7b16..7107dfecaa13d0 100644
--- a/drivers/gpu/drm/evdi/evdi_fb.c
+++ b/drivers/gpu/drm/evdi/evdi_fb.c
@@ -11,7 +11,9 @@
*/
#include <linux/slab.h>
+#ifdef CONFIG_FB
#include <linux/fb.h>
+#endif /* CONFIG_FB */
#include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
@@ -19,7 +21,6 @@
#include <drm/drm_fb_helper.h>
#include "evdi_drv.h"
-
struct evdi_fbdev {
struct drm_fb_helper helper;
struct evdi_framebuffer ufb;
@@ -93,6 +94,7 @@ static int evdi_handle_damage(struct evdi_framebuffer *fb,
return 0;
}
+#ifdef CONFIG_FB
static int evdi_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
@@ -204,6 +206,7 @@ static struct fb_ops evdifb_ops = {
.fb_open = evdi_fb_open,
.fb_release = evdi_fb_release,
};
+#endif /* CONFIG_FB */
static int evdi_user_framebuffer_dirty(struct drm_framebuffer *fb,
__always_unused struct drm_file *file,
@@ -317,6 +320,7 @@ evdi_framebuffer_init(struct drm_device *dev,
return drm_framebuffer_init(dev, &ufb->base, &evdifb_funcs);
}
+#ifdef CONFIG_FB
static int evdifb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -414,7 +418,6 @@ static void evdi_fbdev_destroy(__always_unused struct drm_device *dev,
if (ufbdev->helper.fbdev) {
info = ufbdev->helper.fbdev;
unregister_framebuffer(info);
-
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
@@ -484,6 +487,7 @@ void evdi_fbdev_unplug(struct drm_device *dev)
unlink_framebuffer(info);
}
}
+#endif /* CONFIG_FB */
int evdi_fb_get_bpp(uint32_t format)
{
diff --git a/drivers/gpu/drm/evdi/evdi_main.c b/drivers/gpu/drm/evdi/evdi_main.c
index 1d55dc5b70e35c..5d2a97e1b20e50 100644
--- a/drivers/gpu/drm/evdi/evdi_main.c
+++ b/drivers/gpu/drm/evdi/evdi_main.c
@@ -37,9 +37,11 @@ int evdi_driver_load(struct drm_device *dev,
EVDI_CHECKPT();
evdi_modeset_init(dev);
+#ifdef CONFIG_FB
ret = evdi_fbdev_init(dev);
if (ret)
goto err;
+#endif /* CONFIG_FB */
ret = drm_vblank_init(dev, 1);
if (ret)
@@ -59,7 +61,9 @@ int evdi_driver_load(struct drm_device *dev,
return 0;
err_fb:
+#ifdef CONFIG_FB
evdi_fbdev_cleanup(dev);
+#endif /* CONFIG_FB */
err:
kfree(evdi);
EVDI_ERROR("%d\n", ret);
@@ -77,24 +81,43 @@ int evdi_driver_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_connector_unregister_all(dev);
+#ifdef CONFIG_FB
evdi_fbdev_unplug(dev);
+#endif /* CONFIG_FB */
if (evdi->cursor)
evdi_cursor_free(evdi->cursor);
evdi_painter_cleanup(evdi);
evdi_stats_cleanup(evdi);
+#ifdef CONFIG_FB
evdi_fbdev_cleanup(dev);
+#endif /* CONFIG_FB */
evdi_modeset_cleanup(dev);
kfree(evdi);
return 0;
}
-void evdi_driver_preclose(struct drm_device *drm_dev, struct drm_file *file)
+void evdi_driver_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct evdi_device *evdi = drm_dev->dev_private;
- EVDI_CHECKPT();
if (evdi)
evdi_painter_close(evdi, file);
}
+void evdi_driver_preclose(struct drm_device *drm_dev, struct drm_file *file)
+{
+ evdi_driver_close(drm_dev, file);
+}
+
+void evdi_driver_postclose(struct drm_device *drm_dev, struct drm_file *file)
+{
+ struct evdi_device *evdi = drm_dev->dev_private;
+
+ EVDI_DEBUG("(dev=%d) Process tries to close us, postclose\n",
+ evdi ? evdi->dev_index : -1);
+ evdi_log_process();
+
+ evdi_driver_close(drm_dev, file);
+}
+
diff --git a/drivers/gpu/drm/evdi/evdi_painter.c b/drivers/gpu/drm/evdi/evdi_painter.c
index d6d3f4ce78b818..10e1deecc7ad27 100644
--- a/drivers/gpu/drm/evdi/evdi_painter.c
+++ b/drivers/gpu/drm/evdi/evdi_painter.c
@@ -1,11 +1,13 @@
/*
- * Copyright (c) 2013 - 2017 DisplayLink (UK) Ltd.
+ * Copyright (c) 2013 - 2018 DisplayLink (UK) Ltd.
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*/
+#include "linux/thread_info.h"
+#include "linux/mm.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <uapi/drm/evdi_drm.h>
@@ -211,8 +213,7 @@ u8 *evdi_painter_get_edid_copy(struct evdi_device *evdi)
memcpy(block,
evdi->painter->edid,
evdi->painter->edid_length);
- EVDI_DEBUG("(dev=%d) %02x %02x %02x\n", evdi->dev_index,
- block[0], block[1], block[2]);
+ EVDI_DEBUG("(dev=%d) EDID valid\n", evdi->dev_index);
}
}
painter_unlock(evdi->painter);
@@ -505,7 +506,9 @@ evdi_painter_connect(struct evdi_device *evdi,
struct edid *new_edid = NULL;
int expected_edid_size = 0;
- EVDI_CHECKPT();
+ EVDI_DEBUG("(dev=%d) Process is trying to connect\n",
+ evdi->dev_index);
+ evdi_log_process();
if (edid_length < sizeof(struct edid)) {
EVDI_ERROR("Edid length too small\n");
@@ -562,7 +565,7 @@ evdi_painter_connect(struct evdi_device *evdi,
return 0;
}
-static void evdi_painter_disconnect(struct evdi_device *evdi,
+static int evdi_painter_disconnect(struct evdi_device *evdi,
struct drm_file *file)
{
struct evdi_painter *painter = evdi->painter;
@@ -572,14 +575,8 @@ static void evdi_painter_disconnect(struct evdi_device *evdi,
painter_lock(painter);
if (file != painter->drm_filp) {
- EVDI_WARN
- ("(dev=%d) An unknown connection to %p tries to close us",
- evdi->dev_index, file);
- EVDI_WARN(" - ignoring\n");
-
-
painter_unlock(painter);
- return;
+ return -EFAULT;
}
evdi_painter_set_new_scanout_buffer(evdi, NULL);
@@ -603,6 +600,7 @@ static void evdi_painter_disconnect(struct evdi_device *evdi,
painter_unlock(painter);
drm_helper_hpd_irq_event(evdi->ddev);
+ return 0;
}
void evdi_painter_close(struct evdi_device *evdi, struct drm_file *file)
@@ -621,20 +619,25 @@ int evdi_painter_connect_ioctl(struct drm_device *drm_dev, void *data,
struct evdi_device *evdi = drm_dev->dev_private;
struct evdi_painter *painter = evdi->painter;
struct drm_evdi_connect *cmd = data;
+ int ret;
EVDI_CHECKPT();
if (painter) {
if (cmd->connected)
- evdi_painter_connect(evdi,
+ ret = evdi_painter_connect(evdi,
cmd->edid,
cmd->edid_length,
cmd->sku_area_limit,
file,
cmd->dev_index);
else
- evdi_painter_disconnect(evdi, file);
+ ret = evdi_painter_disconnect(evdi, file);
- return 0;
+ if (ret) {
+ EVDI_WARN("(dev=%d) (pid=%d) disconnect failed\n",
+ evdi->dev_index, (int)task_pid_nr(current));
+ }
+ return ret;
}
EVDI_WARN("Painter does not exist!");
return -ENODEV;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index d0959086847e3e..308987de2fe64d 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -190,7 +190,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
@@ -278,8 +278,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
COORDINATE_Y(plane->crtc_y + plane->crtc_h - 1);
writel(val, ctx->addr + DECON_VIDOSDxB(win));
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 52d47520f84e6f..0532656666f348 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -471,7 +471,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
goto err_free;
}
- ret = get_vaddr_frames(start, npages, true, true, g2d_userptr->vec);
+ ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
+ g2d_userptr->vec);
if (ret != npages) {
DRM_ERROR("failed to get user pages from userptr.\n");
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 11b87d2a7913b6..ba69d1c72221a3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -526,21 +526,25 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
default:
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
@@ -800,18 +804,25 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
default:
dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 9ad592707aafd1..ade10966d6af7c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 17f928ec84ea77..8906d67494fc47 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,11 +1,7 @@
config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
depends on DRM && PCI && X86
- select FB_CFB_COPYAREA
- select FB_CFB_FILLRECT
- select FB_CFB_IMAGEBLIT
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 9330ba701fe31f..f793f47bfab991 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -99,7 +99,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
/* Wait for for the pipe enable to take effect. */
for (count = 0; count < COUNT_MAX; count++) {
temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_PIPE_STATE) == 1)
+ if (temp & PIPEACONF_PIPE_STATE)
break;
}
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 6a10215fc42d4f..346f6ee5a6c2c2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -256,7 +256,7 @@ extern int intelfb_remove(struct drm_device *dev,
extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 3bb4ba98937f00..676a7f2c1b0286 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
}
}
-int psb_intel_lvds_mode_valid(struct drm_connector *connector,
+enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index dba5c0ea0827de..c7c243e9b80834 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -450,6 +450,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
+ /*
+ * The bridge resets its registers on unplug. So when we get a plug
+ * event and we're already supposed to be powered, cycle the bridge to
+ * restore its state.
+ */
+ if (status == connector_status_connected &&
+ adv7511->connector.status == connector_status_disconnected &&
+ adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ }
+
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
drm_kms_helper_hotplug_event(adv7511->connector.dev);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 0676534b512bd9..46b605f40333d1 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -19,6 +19,7 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5fbe97dac3ccee..8f1a8a82205d7a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2502,72 +2502,138 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
return 0;
}
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+ int ret;
+
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, psr_status;
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ psr_status = I915_READ(EDP_PSR2_STATUS);
+ val = (psr_status & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ psr_status = I915_READ(EDP_PSR_STATUS);
+ val = (psr_status & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (val < ARRAY_SIZE(live_status)) {
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n",
+ psr_status, live_status[val]);
+ return;
+ }
+ }
+
+ seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
+}
+
static int i915_edp_psr_status(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 psrperf = 0;
- u32 stat[3];
- enum pipe pipe;
bool enabled = false;
+ bool sink_support;
if (!HAS_PSR(dev_priv)) {
seq_puts(m, "PSR not supported\n");
return 0;
}
+ sink_support = dev_priv->psr.sink_support;
+ seq_printf(m, "Sink_Support: %s\n", yesno(sink_support));
+ if (!sink_support)
+ return 0;
+
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->psr.lock);
- seq_printf(m, "Sink_Support: %s\n", yesno(dev_priv->psr.sink_support));
- seq_printf(m, "Source_OK: %s\n", yesno(dev_priv->psr.source_ok));
+ seq_printf(m, "PSR mode: %s\n",
+ dev_priv->psr.psr2_enabled ? "PSR2" : "PSR1");
seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled));
- seq_printf(m, "Active: %s\n", yesno(dev_priv->psr.active));
seq_printf(m, "Busy frontbuffer bits: 0x%03x\n",
dev_priv->psr.busy_frontbuffer_bits);
- seq_printf(m, "Re-enable work scheduled: %s\n",
- yesno(work_busy(&dev_priv->psr.work.work)));
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support)
- enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
- else
- enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
- } else {
- for_each_pipe(dev_priv, pipe) {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
- enum intel_display_power_domain power_domain;
-
- power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv,
- power_domain))
- continue;
-
- stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- enabled = true;
-
- intel_display_power_put(dev_priv, power_domain);
- }
- }
+ if (dev_priv->psr.psr2_enabled)
+ enabled = I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE;
+ else
+ enabled = I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
seq_printf(m, "Main link in standby mode: %s\n",
yesno(dev_priv->psr.link_standby));
- seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
-
- if (!HAS_DDI(dev_priv))
- for_each_pipe(dev_priv, pipe) {
- if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE))
- seq_printf(m, " pipe %c", pipe_name(pipe));
- }
- seq_puts(m, "\n");
+ seq_printf(m, "HW Enabled & Active bit: %s\n", yesno(enabled));
/*
- * VLV/CHV PSR has no kind of performance counter
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@ -2576,36 +2642,58 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
seq_printf(m, "Performance_Counter: %u\n", psrperf);
}
- if (dev_priv->psr.psr2_support) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON" };
- u8 pos = (I915_READ(EDP_PSR2_STATUS_CTL) &
- EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
- seq_printf(m, "PSR2_STATUS_EDP: %x\n",
- I915_READ(EDP_PSR2_STATUS_CTL));
+ psr_source_status(dev_priv, m);
+ mutex_unlock(&dev_priv->psr.lock);
- if (pos < ARRAY_SIZE(live_status))
- seq_printf(m, "PSR2 live state %s\n",
- live_status[pos]);
+ if (READ_ONCE(dev_priv->psr.debug)) {
+ /*
+ * ktime_to_ns() needed only in chromeos-4.4 because
+ * ktime_t is defined as a union with member tv64
+ */
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ ktime_to_ns(dev_priv->psr.last_entry_attempt));
+ seq_printf(m, "Last exit at: %lld\n",
+ ktime_to_ns(dev_priv->psr.last_exit));
}
- mutex_unlock(&dev_priv->psr.lock);
intel_runtime_pm_put(dev_priv);
return 0;
}
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ DRM_DEBUG_KMS("PSR debug %s\n", enableddisabled(val));
+
+ intel_runtime_pm_get(dev_priv);
+ intel_psr_irq_control(dev_priv, !!val);
+ intel_runtime_pm_put(dev_priv);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
static int i915_sink_crc(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -3679,7 +3767,19 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ }
} else
seq_puts(m, "0");
}
@@ -4606,7 +4706,7 @@ static const struct i915_debugfs_files {
{"i915_fbc_false_color", &i915_fbc_fc_fops},
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops}
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4730,6 +4830,40 @@ static const struct file_operations i915_dpcd_fops = {
.release = single_release,
};
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ enc_to_intel_dp(&intel_attached_encoder(connector)->base);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->backlight_off_delay);
+
+ return 0;
+}
+
+static int i915_panel_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_panel_show, inode->i_private);
+}
+
+static const struct file_operations i915_panel_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_panel_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**
* i915_debugfs_connector_add - add i915 specific connector debugfs files
* @connector: pointer to a registered drm_connector
@@ -4749,8 +4883,15 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_dpcd", S_IRUGO, root, connector,
- &i915_dpcd_fops);
+ debugfs_create_file("i915_dpcd", S_IRUGO, root,
+ connector, &i915_dpcd_fops);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index fe220ec3b92d69..38f94b62463e23 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1248,7 +1248,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
intel_runtime_pm_enable(dev_priv);
- dev_priv->ipc_enabled = false;
+ intel_init_ipc(dev_priv);
/* Everything is in place, we can now relax! */
DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
@@ -2425,6 +2425,8 @@ static int intel_runtime_resume(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_init(dev_priv);
+ intel_enable_ipc(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17d09857faa978..cb9223b2abfbde 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,6 +59,7 @@
#include "intel_guc.h"
#include "intel_lrc.h"
#include "intel_ringbuffer.h"
+#include "intel_display.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -256,151 +257,6 @@ static inline const char *enableddisabled(bool v)
return v ? "enabled" : "disabled";
}
-enum pipe {
- INVALID_PIPE = -1,
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- _PIPE_EDP,
- I915_MAX_PIPES = _PIPE_EDP
-};
-#define pipe_name(p) ((p) + 'A')
-
-enum transcoder {
- TRANSCODER_A = 0,
- TRANSCODER_B,
- TRANSCODER_C,
- TRANSCODER_EDP,
- TRANSCODER_DSI_A,
- TRANSCODER_DSI_C,
- I915_MAX_TRANSCODERS
-};
-
-static inline const char *transcoder_name(enum transcoder transcoder)
-{
- switch (transcoder) {
- case TRANSCODER_A:
- return "A";
- case TRANSCODER_B:
- return "B";
- case TRANSCODER_C:
- return "C";
- case TRANSCODER_EDP:
- return "EDP";
- case TRANSCODER_DSI_A:
- return "DSI A";
- case TRANSCODER_DSI_C:
- return "DSI C";
- default:
- return "<invalid>";
- }
-}
-
-static inline bool transcoder_is_dsi(enum transcoder transcoder)
-{
- return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
-}
-
-/*
- * Global legacy plane identifier. Valid only for primary/sprite
- * planes on pre-g4x, and only for primary planes on g4x+.
- */
-enum plane {
- PLANE_A,
- PLANE_B,
- PLANE_C,
-};
-#define plane_name(p) ((p) + 'A')
-
-#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_CURSOR,
- I915_MAX_PLANES,
-};
-
-#define for_each_plane_id_on_crtc(__crtc, __p) \
- for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
- for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
-
-enum port {
- PORT_NONE = -1,
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- I915_MAX_PORTS
-};
-#define port_name(p) ((p) + 'A')
-
-#define I915_NUM_PHYS_VLV 2
-
-enum dpio_channel {
- DPIO_CH0,
- DPIO_CH1
-};
-
-enum dpio_phy {
- DPIO_PHY0,
- DPIO_PHY1
-};
-
-enum intel_display_power_domain {
- POWER_DOMAIN_PIPE_A,
- POWER_DOMAIN_PIPE_B,
- POWER_DOMAIN_PIPE_C,
- POWER_DOMAIN_PIPE_A_PANEL_FITTER,
- POWER_DOMAIN_PIPE_B_PANEL_FITTER,
- POWER_DOMAIN_PIPE_C_PANEL_FITTER,
- POWER_DOMAIN_TRANSCODER_A,
- POWER_DOMAIN_TRANSCODER_B,
- POWER_DOMAIN_TRANSCODER_C,
- POWER_DOMAIN_TRANSCODER_EDP,
- POWER_DOMAIN_TRANSCODER_DSI_A,
- POWER_DOMAIN_TRANSCODER_DSI_C,
- POWER_DOMAIN_PORT_DDI_A_LANES,
- POWER_DOMAIN_PORT_DDI_B_LANES,
- POWER_DOMAIN_PORT_DDI_C_LANES,
- POWER_DOMAIN_PORT_DDI_D_LANES,
- POWER_DOMAIN_PORT_DDI_E_LANES,
- POWER_DOMAIN_PORT_DSI,
- POWER_DOMAIN_PORT_CRT,
- POWER_DOMAIN_PORT_OTHER,
- POWER_DOMAIN_VGA,
- POWER_DOMAIN_AUDIO,
- POWER_DOMAIN_PLLS,
- POWER_DOMAIN_AUX_A,
- POWER_DOMAIN_AUX_B,
- POWER_DOMAIN_AUX_C,
- POWER_DOMAIN_AUX_D,
- POWER_DOMAIN_GMBUS,
- POWER_DOMAIN_MODESET,
- POWER_DOMAIN_INIT,
-
- POWER_DOMAIN_NUM,
-};
-
-#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
-#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
- ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
-#define POWER_DOMAIN_TRANSCODER(tran) \
- ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
- (tran) + POWER_DOMAIN_TRANSCODER_A)
-
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -458,86 +314,6 @@ struct i915_hotplug {
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
-#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
-#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
- for_each_if ((__mask) & (1 << (__p)))
-#define for_each_universal_plane(__dev_priv, __pipe, __p) \
- for ((__p) = 0; \
- (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
- (__p)++)
-#define for_each_sprite(__dev_priv, __p, __s) \
- for ((__s) = 0; \
- (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
- (__s)++)
-
-#define for_each_port_masked(__port, __ports_mask) \
- for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
- for_each_if ((__ports_mask) & (1 << (__port)))
-
-#define for_each_crtc(dev, crtc) \
- list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
-
-#define for_each_intel_plane(dev, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head)
-
-#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((plane_mask) & \
- (1 << drm_plane_index(&intel_plane->base)))
-
-#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
- list_for_each_entry(intel_plane, \
- &(dev)->mode_config.plane_list, \
- base.head) \
- for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
-
-#define for_each_intel_crtc(dev, intel_crtc) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head)
-
-#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
- list_for_each_entry(intel_crtc, \
- &(dev)->mode_config.crtc_list, \
- base.head) \
- for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
-
-#define for_each_intel_encoder(dev, intel_encoder) \
- list_for_each_entry(intel_encoder, \
- &(dev)->mode_config.encoder_list, \
- base.head)
-
-#define for_each_intel_connector(dev, intel_connector) \
- list_for_each_entry(intel_connector, \
- &(dev)->mode_config.connector_list, \
- base.head)
-
-#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
- list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
- for_each_if ((intel_encoder)->base.crtc == (__crtc))
-
-#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
- list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
- for_each_if ((intel_connector)->base.encoder == (__encoder))
-
-#define for_each_power_domain(domain, mask) \
- for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if ((1 << (domain)) & (mask))
-
-#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
- for ((__i) = 0; \
- (__i) < (__state)->base.dev->mode_config.num_crtc && \
- ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
- (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
- (__i)++) \
- for_each_if (crtc)
-
struct drm_i915_private;
struct i915_mm_struct;
struct i915_mmu_object;
@@ -575,20 +351,6 @@ struct drm_i915_file_private {
int context_bans;
};
-/* Used by dp and fdi links */
-struct intel_link_m_n {
- uint32_t tu;
- uint32_t gmch_m;
- uint32_t gmch_n;
- uint32_t link_m;
- uint32_t link_n;
-};
-
-void intel_link_compute_m_n(int bpp, int nlanes,
- int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool reduce_m_n);
-
/* Interface history:
*
* 1.1: Original.
@@ -683,11 +445,6 @@ struct drm_i915_display_funcs {
void (*audio_codec_disable)(struct intel_encoder *encoder);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags);
void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
@@ -837,7 +594,8 @@ struct intel_csr {
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
- func(supports_tv)
+ func(supports_tv); \
+ func(has_ipc);
/* Keep in gen based order, and chronological order within a gen */
enum intel_platform {
@@ -1168,18 +926,19 @@ struct i915_drrs {
struct i915_psr {
struct mutex lock;
bool sink_support;
- bool source_ok;
struct intel_dp *enabled;
bool active;
- struct delayed_work work;
+ struct work_struct work;
unsigned busy_frontbuffer_bits;
- bool psr2_support;
- bool aux_frame_sync;
+ bool sink_psr2_support;
bool link_standby;
- bool y_cord_support;
bool colorimetry_support;
bool alpm;
- bool has_hw_tracking;
+ bool psr2_enabled;
+ u8 sink_sync_latency;
+ bool debug;
+ ktime_t last_entry_attempt;
+ ktime_t last_exit;
};
enum intel_pch {
@@ -1656,6 +1415,7 @@ struct intel_vbt_data {
} edp;
struct {
+ bool enable;
bool full_link;
bool require_aux_wakeup;
int idle_frames;
@@ -1685,7 +1445,7 @@ struct intel_vbt_data {
int crt_ddc_pin;
int child_dev_num;
- union child_device_config *child_dev;
+ struct child_device_config *child_dev;
struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
struct sdvo_device_mapping sdvo_mappings[2];
@@ -2742,6 +2502,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
INTEL_DEVID(dev_priv) == 0x5915 || \
INTEL_DEVID(dev_priv) == 0x591E)
+#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
+ INTEL_DEVID(dev_priv) == 0x87C0)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2871,6 +2633,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_RUNTIME_PM(dev_priv) ((dev_priv)->info.has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) ((dev_priv)->info.has_64bit_reloc)
+#define HAS_IPC(dev_priv) ((dev_priv)->info.has_ipc)
+
/*
* For now, anything with a GuC requires uCode loading, and then supports
* command submission once loaded. But these are logically independent
@@ -3047,11 +2811,23 @@ int intel_wait_for_register(struct drm_i915_private *dev_priv,
return __intel_wait_for_register(dev_priv, reg, mask, value, timeout_ms,
NULL);
}
+int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value);
+static inline
int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms);
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
+{
+ return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+ 2, timeout_ms, NULL);
+}
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index af3e004bb71443..10afef4b84c6a4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,6 +42,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
#include <linux/dma-buf.h>
+#include <linux/pagevec.h>
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
@@ -2205,14 +2206,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
invalidate_mapping_pages(mapping, 0, (loff_t)-1);
}
-extern void mlock_vma_page(struct page *page);
-extern unsigned int munlock_vma_page(struct page *page);
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+ check_move_unevictable_pages(pvec);
+ __pagevec_release(pvec);
+ cond_resched();
+}
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
struct sgt_iter sgt_iter;
+ struct pagevec pvec;
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
@@ -2222,6 +2232,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_save_bit_17_swizzle(obj, pages);
+ mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+ pagevec_init(&pvec, 0);
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
set_page_dirty(page);
@@ -2229,12 +2242,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
- lock_page(page);
- munlock_vma_page(page);
- unlock_page(page);
-
- page_cache_release(page);
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
obj->mm.dirty = false;
sg_free_table(pages);
@@ -2316,6 +2328,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment;
+ struct pagevec pvec;
int ret;
gfp_t gfp;
@@ -2346,11 +2359,13 @@ rebuild_st:
* Fail silently without starting the shrinker
*/
mapping = file_inode(obj->base.filp)->i_mapping;
+ mapping_set_unevictable(mapping);
gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
gfp |= __GFP_NORETRY | __GFP_NOWARN;
sg = st->sgl;
st->nents = 0;
for (i = 0; i < page_count; i++) {
+ cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
if (IS_ERR(page)) {
i915_gem_shrink(dev_priv,
@@ -2383,10 +2398,6 @@ rebuild_st:
}
last_pfn = page_to_pfn(page);
- lock_page(page);
- mlock_vma_page(page);
- unlock_page(page);
-
/* Check that the i965g/gm workaround works. */
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
@@ -2422,12 +2433,14 @@ rebuild_st:
err_sg:
sg_mark_end(sg);
err_pages:
+ mapping_clear_unevictable(mapping);
+ pagevec_init(&pvec, 0);
for_each_sgt_page(page, sgt_iter, st) {
- lock_page(page);
- munlock_vma_page(page);
- unlock_page(page);
- page_cache_release(page);
+ if (!pagevec_add(&pvec, page))
+ check_release_pagevec(&pvec);
}
+ if (pagevec_count(&pvec))
+ check_release_pagevec(&pvec);
sg_free_table(st);
kfree(st);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 0a558603d58f0e..050f1c08199426 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -501,6 +501,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY);
if (pvec != NULL) {
struct mm_struct *mm = obj->userptr.mm->mm;
+ unsigned int flags = 0;
+
+ if (!obj->userptr.read_only)
+ flags |= FOLL_WRITE;
ret = -EFAULT;
if (atomic_inc_not_zero(&mm->mm_users)) {
@@ -510,7 +514,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
- !obj->userptr.read_only, 0,
+ flags,
pvec + pinned, NULL);
if (ret < 0)
break;
@@ -766,6 +770,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ if (!args->user_size)
+ return -EINVAL;
+
if (offset_in_page(args->user_ptr | args->user_size))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e8823f3f2db723..4dd4902be23799 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -126,7 +126,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
POSTING_READ(GEN8_##type##_IIR(which)); \
} while (0)
-#define GEN5_IRQ_RESET(type) do { \
+#define GEN3_IRQ_RESET(type) do { \
I915_WRITE(type##IMR, 0xffffffff); \
POSTING_READ(type##IMR); \
I915_WRITE(type##IER, 0); \
@@ -139,7 +139,7 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
+static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
u32 val = I915_READ(reg);
@@ -156,14 +156,14 @@ static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
}
#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
+ gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
POSTING_READ(GEN8_##type##_IMR(which)); \
} while (0)
-#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
- gen5_assert_iir_is_zero(dev_priv, type##IIR); \
+#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
+ gen3_assert_iir_is_zero(dev_priv, type##IIR); \
I915_WRITE(type##IER, (ier_val)); \
I915_WRITE(type##IMR, (imr_val)); \
POSTING_READ(type##IMR); \
@@ -2199,6 +2199,13 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
+ if (de_iir & DE_EDP_PSR_INT_HSW) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ }
+
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev_priv);
@@ -2333,11 +2340,25 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
if (master_ctl & GEN8_DE_MISC_IRQ) {
iir = I915_READ(GEN8_DE_MISC_IIR);
if (iir) {
+ bool found = false;
+
I915_WRITE(GEN8_DE_MISC_IIR, iir);
ret = IRQ_HANDLED;
- if (iir & GEN8_DE_MISC_GSE)
+
+ if (iir & GEN8_DE_MISC_GSE) {
intel_opregion_asle_intr(dev_priv);
- else
+ found = true;
+ }
+
+ if (iir & GEN8_DE_EDP_PSR) {
+ u32 psr_iir = I915_READ(EDP_PSR_IIR);
+
+ intel_psr_irq_handler(dev_priv, psr_iir);
+ I915_WRITE(EDP_PSR_IIR, psr_iir);
+ found = true;
+ }
+
+ if (!found)
DRM_ERROR("Unexpected DE Misc interrupt\n");
}
else
@@ -2838,7 +2859,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- GEN5_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -2866,9 +2887,9 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN5_IRQ_RESET(GT);
+ GEN3_IRQ_RESET(GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN5_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
@@ -2890,7 +2911,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
dev_priv->pipestat_irq_mask[pipe] = 0;
}
- GEN5_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(VLV_);
dev_priv->irq_mask = ~0;
}
@@ -2917,7 +2938,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -2928,10 +2949,15 @@ static void ironlake_irq_reset(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xffffffff);
- GEN5_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(DE);
if (IS_GEN7(dev_priv))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
+ if (IS_HASWELL(dev_priv)) {
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+ }
+
gen5_gt_irq_reset(dev_priv);
ibx_irq_reset(dev_priv);
@@ -2970,14 +2996,17 @@ static void gen8_irq_reset(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
+ I915_WRITE(EDP_PSR_IMR, 0xffffffff);
+ I915_WRITE(EDP_PSR_IIR, 0xffffffff);
+
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
- GEN5_IRQ_RESET(GEN8_DE_PORT_);
- GEN5_IRQ_RESET(GEN8_DE_MISC_);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3020,7 +3049,7 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3176,7 +3205,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
else
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
- gen5_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(dev_priv, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
}
@@ -3201,7 +3230,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
@@ -3212,7 +3241,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_irq_mask = 0xffffffff;
- GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
+ GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
}
}
@@ -3240,13 +3269,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_DP_A_HOTPLUG);
}
+ if (IS_HASWELL(dev_priv)) {
+ gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+ display_mask |= DE_EDP_PSR_INT_HSW;
+ }
+
dev_priv->irq_mask = ~display_mask;
I915_WRITE(HWSTAM, 0xeffe);
ibx_irq_pre_postinstall(dev);
- GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3349,6 +3384,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
uint32_t de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
u32 de_port_enables;
+ u32 de_misc_masked = GEN8_DE_MISC_GSE | GEN8_DE_EDP_PSR;
enum pipe pipe;
if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3372,18 +3408,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
- dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
+ gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
+
+ for_each_pipe(dev_priv, pipe) {
+ dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
- for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
+ }
- GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3464,7 +3503,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
gen8_gt_irq_reset(dev_priv);
- GEN5_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 71b167a6940015..65c3be2df07c5b 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(enable_execlists,
module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR "
- "(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
+ "(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index b3997ac982bd53..946f6c41ad4974 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -261,7 +261,6 @@ static const struct intel_device_info intel_ivybridge_q_info = {
#define VLV_FEATURES \
.gen = 7, .num_pipes = 2, \
- .has_psr = 1, \
.has_runtime_pm = 1, \
.has_rc6 = 1, \
.has_gmbus_irq = 1, \
@@ -345,7 +344,6 @@ static const struct intel_device_info intel_cherryview_info = {
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
.platform = INTEL_CHERRYVIEW,
.has_64bit_reloc = 1,
- .has_psr = 1,
.has_runtime_pm = 1,
.has_resource_streamer = 1,
.has_rc6 = 1,
@@ -397,6 +395,7 @@ static const struct intel_device_info intel_skylake_gt3_info = {
.has_hw_contexts = 1, \
.has_logical_ring_contexts = 1, \
.has_guc = 1, \
+ .has_ipc = 1, \
GEN_DEFAULT_PIPEOFFSETS, \
IVB_CURSOR_OFFSETS, \
BDW_COLORS
@@ -421,6 +420,7 @@ static const struct intel_device_info intel_kabylake_info = {
.has_csr = 1,
.has_guc = 1,
.ddb_size = 896,
+ .has_ipc = 1,
};
static const struct intel_device_info intel_kabylake_gt3_info = {
@@ -431,6 +431,7 @@ static const struct intel_device_info intel_kabylake_gt3_info = {
.has_guc = 1,
.ddb_size = 896,
.ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .has_ipc = 1,
};
/*
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 60ec56acf32837..72a8ea28b7593f 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3539,35 +3539,47 @@ enum {
#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0)
#define EDP_PSR_ENABLE (1<<31)
#define BDW_PSR_SINGLE_FRAME (1<<30)
+#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1<<29) /* SW can't modify */
#define EDP_PSR_LINK_STANDBY (1<<27)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
-#define EDP_PSR_MAX_SLEEP_TIME_MASK (0x1f<<20)
#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
#define EDP_PSR_TP1_TP2_SEL (0<<11)
#define EDP_PSR_TP1_TP3_SEL (1<<11)
-#define EDP_PSR_TP2_TP3_TIME_MASK (3<<8)
+#define EDP_PSR_CRC_ENABLE (1 << 10) /* BDW+ */
#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
-#define EDP_PSR_TP1_TIME_MASK (0x3<<4)
#define EDP_PSR_TP1_TIME_500us (0<<4)
#define EDP_PSR_TP1_TIME_100us (1<<4)
#define EDP_PSR_TP1_TIME_2500us (2<<4)
#define EDP_PSR_TP1_TIME_0us (3<<4)
-#define EDP_PSR_IDLE_FRAME_MASK (0xf<<0)
#define EDP_PSR_IDLE_FRAME_SHIFT 0
+/* Bspec claims those aren't shifted but stay at 0x64800 */
+#define EDP_PSR_IMR _MMIO(0x64834)
+#define EDP_PSR_IIR _MMIO(0x64838)
+#define EDP_PSR_ERROR(trans) (1 << (((trans) * 8 + 10) & 31))
+#define EDP_PSR_POST_EXIT(trans) (1 << (((trans) * 8 + 9) & 31))
+#define EDP_PSR_PRE_ENTRY(trans) (1 << (((trans) * 8 + 8) & 31))
+
#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10)
+#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
+#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
+#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
+#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
+#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
+
#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */
-#define EDP_PSR_STATUS_CTL _MMIO(dev_priv->psr_mmio_base + 0x40)
+#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40)
#define EDP_PSR_STATUS_STATE_MASK (7<<29)
+#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
@@ -3593,17 +3605,19 @@ enum {
#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44)
#define EDP_PSR_PERF_CNT_MASK 0xffffff
-#define EDP_PSR_DEBUG_CTL _MMIO(dev_priv->psr_mmio_base + 0x60)
+#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1<<28)
#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
#define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1<<16)
-#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15)
+#define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1<<15) /* SKL+ */
#define EDP_PSR2_CTL _MMIO(0x6f900)
#define EDP_PSR2_ENABLE (1<<31)
#define EDP_SU_TRACK_ENABLE (1<<30)
+#define EDP_Y_COORDINATE_VALID (1<<26) /* GLK and CNL+ */
+#define EDP_Y_COORDINATE_ENABLE (1<<25) /* GLK and CNL+ */
#define EDP_MAX_SU_DISABLE_TIME(t) ((t)<<20)
#define EDP_MAX_SU_DISABLE_TIME_MASK (0x1f<<20)
#define EDP_PSR2_TP2_TIME_500us (0<<8)
@@ -3613,10 +3627,34 @@ enum {
#define EDP_PSR2_TP2_TIME_MASK (3<<8)
#define EDP_PSR2_FRAME_BEFORE_SU_SHIFT 4
#define EDP_PSR2_FRAME_BEFORE_SU_MASK (0xf<<4)
-#define EDP_PSR2_IDLE_MASK 0xf
#define EDP_PSR2_FRAME_BEFORE_SU(a) ((a)<<4)
-
-#define EDP_PSR2_STATUS_CTL _MMIO(0x6f940)
+#define EDP_PSR2_IDLE_FRAME_MASK 0xf
+#define EDP_PSR2_IDLE_FRAME_SHIFT 0
+
+#define _PSR_EVENT_TRANS_A 0x60848
+#define _PSR_EVENT_TRANS_B 0x61848
+#define _PSR_EVENT_TRANS_C 0x62848
+#define _PSR_EVENT_TRANS_D 0x63848
+#define _PSR_EVENT_TRANS_EDP 0x6F848
+#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A)
+#define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17)
+#define PSR_EVENT_PSR2_DISABLED (1 << 16)
+#define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15)
+#define PSR_EVENT_SU_CRC_FIFO_UNDERRUN (1 << 14)
+#define PSR_EVENT_GRAPHICS_RESET (1 << 12)
+#define PSR_EVENT_PCH_INTERRUPT (1 << 11)
+#define PSR_EVENT_MEMORY_UP (1 << 10)
+#define PSR_EVENT_FRONT_BUFFER_MODIFY (1 << 9)
+#define PSR_EVENT_WD_TIMER_EXPIRE (1 << 8)
+#define PSR_EVENT_PIPE_REGISTERS_UPDATE (1 << 6)
+#define PSR_EVENT_REGISTER_UPDATE (1 << 5)
+#define PSR_EVENT_HDCP_ENABLE (1 << 4)
+#define PSR_EVENT_KVMR_SESSION_ENABLE (1 << 3)
+#define PSR_EVENT_VBI_ENABLE (1 << 2)
+#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
+#define PSR_EVENT_PSR_DISABLE (1 << 0)
+
+#define EDP_PSR2_STATUS _MMIO(0x6f940)
#define EDP_PSR2_STATUS_STATE_MASK (0xf<<28)
#define EDP_PSR2_STATUS_STATE_SHIFT 28
@@ -5435,6 +5473,9 @@ enum {
#define IVB_CURSOR_B_OFFSET 0x71080
#define IVB_CURSOR_C_OFFSET 0x72080
+#define _CUR_SURLIVE 0x700AC
+#define CUR_SURLIVE(pipe) _CURSOR2(pipe, _CUR_SURLIVE)
+
/* Display A control */
#define _DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -6306,6 +6347,7 @@ enum {
#define DE_PCH_EVENT_IVB (1<<28)
#define DE_DP_A_HOTPLUG_IVB (1<<27)
#define DE_AUX_CHANNEL_A_IVB (1<<26)
+#define DE_EDP_PSR_INT_HSW (1<<19)
#define DE_SPRITEC_FLIP_DONE_IVB (1<<14)
#define DE_PLANEC_FLIP_DONE_IVB (1<<13)
#define DE_PIPEC_VBLANK_IVB (1<<10)
@@ -6418,6 +6460,7 @@ enum {
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
#define GEN8_DE_MISC_IER _MMIO(0x4446c)
#define GEN8_DE_MISC_GSE (1 << 27)
+#define GEN8_DE_EDP_PSR (1 << 19)
#define GEN8_PCU_ISR _MMIO(0x444e0)
#define GEN8_PCU_IMR _MMIO(0x444e4)
@@ -6468,6 +6511,7 @@ enum {
#define CHICKEN_TRANS_A 0x420c0
#define CHICKEN_TRANS_B 0x420c4
#define CHICKEN_TRANS(trans) _MMIO_TRANS(trans, CHICKEN_TRANS_A, CHICKEN_TRANS_B)
+#define VSC_DATA_SEL_SOFTWARE_CONTROL (1<<25) /* GLK and CNL+ */
#define PSR2_VSC_ENABLE_PROG_HEADER (1<<12)
#define PSR2_ADD_VERTICAL_LINE_COUNT (1<<15)
@@ -6477,6 +6521,7 @@ enum {
#define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1<<6)
+#define DISP_IPC_ENABLE (1<<3)
#define DBUF_CTL _MMIO(0x45008)
#define DBUF_POWER_REQUEST (1<<31)
#define DBUF_POWER_STATE (1<<30)
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 051fa4b92cd165..471ad59be5a2a5 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -76,6 +76,9 @@ static const struct {
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
static const struct {
int sample_rate;
int clock;
@@ -96,6 +99,20 @@ static const struct {
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 7651ce692011f1..19b8529eddb718 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -434,7 +434,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
}
-static const union child_device_config *
+static const struct child_device_config *
child_device_ptr(const struct bdb_general_definitions *p_defs, int i)
{
return (const void *) &p_defs->devices[i * p_defs->child_dev_size];
@@ -446,7 +446,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
{
struct sdvo_device_mapping *p_mapping;
const struct bdb_general_definitions *p_defs;
- const struct old_child_dev_config *child; /* legacy */
+ const struct child_device_config *child;
int i, child_device_num, count;
u16 block_size;
@@ -461,7 +461,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
* device size matches that of the *legacy* child device config
* struct. Thus, SDVO mapping will be skipped for newer VBT.
*/
- if (p_defs->child_dev_size != sizeof(*child)) {
+ if (p_defs->child_dev_size != LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Unsupported child device size for SDVO mapping.\n");
return;
}
@@ -472,7 +472,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
p_defs->child_dev_size;
count = 0;
for (i = 0; i < child_device_num; i++) {
- child = &child_device_ptr(p_defs, i)->old;
+ child = child_device_ptr(p_defs, i);
if (!child->device_type) {
/* skip the device block if device type is invalid */
continue;
@@ -552,6 +552,7 @@ parse_driver_features(struct drm_i915_private *dev_priv,
*/
if (!driver->drrs_enabled)
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
+ dev_priv->vbt.psr.enable = driver->psr_enabled;
}
static void
@@ -559,7 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
{
const struct bdb_edp *edp;
const struct edp_power_seq *edp_pps;
- const struct edp_link_params *edp_link_params;
+ const struct edp_fast_link_params *edp_link_params;
int panel_type = dev_priv->vbt.panel_type;
edp = find_section(bdb, BDB_EDP);
@@ -583,7 +584,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
/* Get the eDP sequencing and link info */
edp_pps = &edp->power_seqs[panel_type];
- edp_link_params = &edp->link_params[panel_type];
+ edp_link_params = &edp->fast_link_params[panel_type];
dev_priv->vbt.edp.pps = *edp_pps;
@@ -740,7 +741,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 100;
break;
case 3:
- dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
+ dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
@@ -1125,7 +1126,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
const struct bdb_header *bdb)
{
- union child_device_config *it, *child = NULL;
+ struct child_device_config *it, *child = NULL;
struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
uint8_t hdmi_level_shift;
int i, j;
@@ -1150,7 +1151,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (dvo_ports[port][j] == -1)
break;
- if (it->common.dvo_port == dvo_ports[port][j]) {
+ if (it->dvo_port == dvo_ports[port][j]) {
if (child) {
DRM_DEBUG_KMS("More than one child device for port %c in VBT.\n",
port_name(port));
@@ -1163,14 +1164,14 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (!child)
return;
- aux_channel = child->common.aux_channel;
- ddc_pin = child->common.ddc_pin;
+ aux_channel = child->aux_channel;
+ ddc_pin = child->ddc_pin;
- is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
- is_dp = child->common.device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- is_crt = child->common.device_type & DEVICE_TYPE_ANALOG_OUTPUT;
- is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
- is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
+ is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
+ is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ is_crt = child->device_type & DEVICE_TYPE_ANALOG_OUTPUT;
+ is_hdmi = is_dvi && (child->device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
+ is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
if (port == PORT_A && is_dvi) {
DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
@@ -1216,7 +1217,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
if (bdb->version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
- hdmi_level_shift = child->raw[7] & 0xF;
+ hdmi_level_shift = child->hdmi_level_shifter_value;
DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
port_name(port),
hdmi_level_shift);
@@ -1224,11 +1225,11 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
}
/* Parse the I_boost config for SKL and above */
- if (bdb->version >= 196 && child->common.iboost) {
- info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF);
+ if (bdb->version >= 196 && child->iboost) {
+ info->dp_boost_level = translate_iboost(child->dp_iboost_level);
DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
port_name(port), info->dp_boost_level);
- info->hdmi_boost_level = translate_iboost(child->common.iboost_level >> 4);
+ info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
port_name(port), info->hdmi_boost_level);
}
@@ -1257,8 +1258,8 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
{
const struct bdb_general_definitions *p_defs;
- const union child_device_config *p_child;
- union child_device_config *child_dev_ptr;
+ const struct child_device_config *p_child;
+ struct child_device_config *child_dev_ptr;
int i, child_device_num, count;
u8 expected_size;
u16 block_size;
@@ -1273,15 +1274,16 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
} else if (bdb->version < 109) {
expected_size = 27;
} else if (bdb->version < 195) {
- BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
- expected_size = sizeof(struct old_child_dev_config);
+ expected_size = LEGACY_CHILD_DEVICE_CONFIG_SIZE;
} else if (bdb->version == 195) {
expected_size = 37;
- } else if (bdb->version <= 197) {
+ } else if (bdb->version <= 215) {
expected_size = 38;
+ } else if (bdb->version <= 216) {
+ expected_size = 39;
} else {
- expected_size = 38;
- BUILD_BUG_ON(sizeof(*p_child) < 38);
+ expected_size = sizeof(*p_child);
+ BUILD_BUG_ON(sizeof(*p_child) < 39);
DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
bdb->version, expected_size);
}
@@ -1292,7 +1294,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
p_defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
- if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+ if (p_defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
DRM_DEBUG_KMS("Child device config size %u is too small.\n",
p_defs->child_dev_size);
return;
@@ -1307,7 +1309,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
/* get the number of child device that is present */
for (i = 0; i < child_device_num; i++) {
p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
+ if (!p_child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -1327,7 +1329,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
count = 0;
for (i = 0; i < child_device_num; i++) {
p_child = child_device_ptr(p_defs, i);
- if (!p_child->common.device_type) {
+ if (!p_child->device_type) {
/* skip the device block if device type is invalid */
continue;
}
@@ -1349,12 +1351,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
*/
if (bdb->version < 196) {
/* Set default values for bits added from v196 */
- child_dev_ptr->common.iboost = 0;
- child_dev_ptr->common.hpd_invert = 0;
+ child_dev_ptr->iboost = 0;
+ child_dev_ptr->hpd_invert = 0;
}
if (bdb->version < 192)
- child_dev_ptr->common.lspcon = 0;
+ child_dev_ptr->lspcon = 0;
}
return;
}
@@ -1546,7 +1548,7 @@ intel_bios_init(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.int_tv_support)
@@ -1556,11 +1558,11 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
/*
* If the device type is not TV, continue.
*/
- switch (p_child->old.device_type) {
+ switch (child->device_type) {
case DEVICE_TYPE_INT_TV:
case DEVICE_TYPE_TV:
case DEVICE_TYPE_TV_SVIDEO_COMPOSITE:
@@ -1571,7 +1573,7 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
/* Only when the addin_offset is non-zero, it is regarded
* as present.
*/
- if (p_child->old.addin_offset)
+ if (child->addin_offset)
return true;
}
@@ -1588,14 +1590,14 @@ bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv)
*/
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
{
+ const struct child_device_config *child;
int i;
if (!dev_priv->vbt.child_dev_num)
return true;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- union child_device_config *uchild = dev_priv->vbt.child_dev + i;
- struct old_child_dev_config *child = &uchild->old;
+ child = dev_priv->vbt.child_dev + i;
/* If the device type is not LFP, continue.
* We have to check both the new identifiers as well as the
@@ -1637,6 +1639,7 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
*/
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
{
+ const struct child_device_config *child;
static const struct {
u16 dp, hdmi;
} port_mapping[] = {
@@ -1655,12 +1658,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
- if ((p_child->common.dvo_port == port_mapping[port].dp ||
- p_child->common.dvo_port == port_mapping[port].hdmi) &&
- (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
- DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+ child = dev_priv->vbt.child_dev + i;
+
+ if ((child->dvo_port == port_mapping[port].dp ||
+ child->dvo_port == port_mapping[port].hdmi) &&
+ (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
return true;
}
@@ -1676,7 +1679,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
*/
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
static const short port_mapping[] = {
[PORT_B] = DVO_PORT_DPB,
[PORT_C] = DVO_PORT_DPC,
@@ -1689,10 +1692,10 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (p_child->common.dvo_port == port_mapping[port] &&
- (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
+ if (child->dvo_port == port_mapping[port] &&
+ (child->device_type & DEVICE_TYPE_eDP_BITS) ==
(DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
return true;
}
@@ -1700,7 +1703,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
-static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
+static bool child_dev_is_dp_dual_mode(const struct child_device_config *child,
enum port port)
{
static const struct {
@@ -1719,16 +1722,16 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
return false;
- if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
+ if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
(DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
return false;
- if (p_child->common.dvo_port == port_mapping[port].dp)
+ if (child->dvo_port == port_mapping[port].dp)
return true;
/* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */
- if (p_child->common.dvo_port == port_mapping[port].hdmi &&
- p_child->common.aux_channel != 0)
+ if (child->dvo_port == port_mapping[port].hdmi &&
+ child->aux_channel != 0)
return true;
return false;
@@ -1737,13 +1740,13 @@ static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child,
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- const union child_device_config *p_child =
- &dev_priv->vbt.child_dev[i];
+ child = dev_priv->vbt.child_dev + i;
- if (child_dev_is_dp_dual_mode(p_child, port))
+ if (child_dev_is_dp_dual_mode(child, port))
return true;
}
@@ -1760,17 +1763,17 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv,
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
enum port *port)
{
- union child_device_config *p_child;
+ const struct child_device_config *child;
u8 dvo_port;
int i;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- p_child = dev_priv->vbt.child_dev + i;
+ child = dev_priv->vbt.child_dev + i;
- if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT))
+ if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- dvo_port = p_child->common.dvo_port;
+ dvo_port = child->dvo_port;
switch (dvo_port) {
case DVO_PORT_MIPIA:
@@ -1800,16 +1803,19 @@ bool
intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port)
{
+ const struct child_device_config *child;
int i;
if (WARN_ON_ONCE(!IS_BROXTON(dev_priv)))
return false;
for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- if (!dev_priv->vbt.child_dev[i].common.hpd_invert)
+ child = dev_priv->vbt.child_dev + i;
+
+ if (!child->hpd_invert)
continue;
- switch (dev_priv->vbt.child_dev[i].common.dvo_port) {
+ switch (child->dvo_port) {
case DVO_PORT_DPA:
case DVO_PORT_HDMIA:
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 19f8c55888d10a..9e1a742ffb4a70 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1532,6 +1532,42 @@ static void bdw_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bdw_set_cdclk(dev, req_cdclk);
}
+static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
+ struct intel_crtc *crtc;
+ struct intel_crtc_state *crtc_state;
+ int vco, i;
+
+ vco = intel_state->cdclk.logical.vco;
+ if (!vco)
+ vco = dev_priv->skl_preferred_vco_freq;
+
+ for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
+ if (!crtc_state->base.enable)
+ continue;
+
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+ continue;
+
+ /*
+ * DPLL0 VCO may need to be adjusted to get the correct
+ * clock for eDP. This will affect cdclk as well.
+ */
+ switch (crtc_state->port_clock / 2) {
+ case 108000:
+ case 216000:
+ vco = 8640000;
+ break;
+ default:
+ vco = 8100000;
+ break;
+ }
+ }
+
+ return vco;
+}
+
static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
{
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
@@ -1539,9 +1575,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
const int max_pixclk = intel_max_pixel_rate(state);
int cdclk, vco;
- vco = intel_state->cdclk.logical.vco;
- if (!vco)
- vco = dev_priv->skl_preferred_vco_freq;
+ vco = skl_dpll0_vco(intel_state);
/*
* FIXME should also account for plane ratio
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index d81232b79f0077..0b7fe332a826ce 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -68,26 +68,25 @@ static bool crtc_state_is_legacy(struct drm_crtc_state *state)
/*
* When using limited range, multiply the matrix given by userspace by
- * the matrix that we would use for the limited range. We do the
- * multiplication in U2.30 format.
+ * the matrix that we would use for the limited range.
*/
-static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
+static void ctm_mult_by_limited(u64 *result, const u64 *input)
{
int i;
- for (i = 0; i < 9; i++)
- result[i] = 0;
+ for (i = 0; i < 9; i++) {
+ u64 user_coeff = input[i];
+ u32 limited_coeff = CTM_COEFF_LIMITED_RANGE;
+ u32 abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), 0,
+ CTM_COEFF_4_0 - 1) >> 2;
- for (i = 0; i < 3; i++) {
- int64_t user_coeff = input[i * 3 + i];
- uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2;
- uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff),
- 0,
- CTM_COEFF_4_0 - 1) >> 2;
-
- result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27;
- if (CTM_COEFF_NEGATIVE(user_coeff))
- result[i * 3 + i] |= CTM_COEFF_SIGN;
+ /*
+ * By scaling every co-efficient with limited range (16-235)
+ * vs full range (0-255) the final o/p will be scaled down to
+ * fit in the limited range supported by the panel.
+ */
+ result[i] = mul_u32_u32(limited_coeff, abs_coeff) >> 30;
+ result[i] |= user_coeff & CTM_COEFF_SIGN;
}
}
@@ -392,6 +391,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
}
/* Program the max register to clamp values > 1.0. */
+ i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index dfd33d1f97d118..c6e1163bc49676 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -120,6 +120,8 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
static void intel_crt_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+
pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d4d40ca234ff27..02a9096263136b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -388,7 +388,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
kbl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_KBL_ULX(dev_priv)) {
+ if (IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(kbl_y_ddi_translations_dp);
return kbl_y_ddi_translations_dp;
} else if (IS_KBL_ULT(dev_priv)) {
@@ -404,7 +404,7 @@ static const struct ddi_buf_trans *
skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
if (dev_priv->vbt.edp.low_vswing) {
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
return skl_y_ddi_translations_edp;
} else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
@@ -425,7 +425,7 @@ skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct ddi_buf_trans *
skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
+ if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv) || IS_AML_ULX(dev_priv)) {
*n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
return skl_y_ddi_translations_hdmi;
} else {
@@ -785,21 +785,20 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
}
static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *intel_encoder, *ret = NULL;
int num_encoders = 0;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, intel_encoder) {
ret = intel_encoder;
num_encoders++;
}
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
@@ -1161,12 +1160,12 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
intel_encoder);
}
-void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
@@ -1174,7 +1173,7 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -1194,12 +1193,12 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
-void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1209,14 +1208,13 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
@@ -1225,7 +1223,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -1242,9 +1240,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1255,8 +1253,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev_priv) &&
- (intel_crtc->config->pch_pfit.enabled ||
- intel_crtc->config->pch_pfit.force_thru))
+ (crtc_state->pch_pfit.enabled ||
+ crtc_state->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1274,20 +1272,20 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- if (intel_crtc->config->has_hdmi_sink)
+ if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
+ temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
- temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
intel_encoder->type, pipe_name(pipe));
@@ -1473,24 +1471,23 @@ out:
return ret;
}
-void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
}
-void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1716,6 +1713,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
+ WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
+
intel_dp_set_link_params(intel_dp, link_rate, lane_count,
link_mst);
if (encoder->type == INTEL_OUTPUT_EDP)
@@ -1724,7 +1723,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_clk_select(encoder, pll);
intel_prepare_dp_ddi_buffers(encoder);
intel_ddi_init_dp_buf_reg(encoder);
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+ if (!link_mst)
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
intel_dp_stop_link_train(intel_dp);
@@ -1759,38 +1759,43 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
int type = intel_encoder->type;
+ /*
+ * When called from DP MST code:
+ * - conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - crtc_state will be the state of the first stream to
+ * be activated on this port, and it may not be the same
+ * stream that will be deactivated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
intel_ddi_pre_enable_dp(intel_encoder,
- crtc->config->port_clock,
- crtc->config->lane_count,
- crtc->config->shared_dpll,
- intel_crtc_has_type(crtc->config,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ pipe_config->shared_dpll,
+ intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
}
if (type == INTEL_OUTPUT_HDMI) {
intel_ddi_pre_enable_hdmi(intel_encoder,
- crtc->config->has_hdmi_sink,
- &crtc->config->base.adjusted_mode,
- crtc->config->shared_dpll);
+ pipe_config->has_hdmi_sink,
+ &pipe_config->base.adjusted_mode,
+ pipe_config->shared_dpll);
}
}
-static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+static void intel_disable_ddi_buf(struct intel_encoder *encoder)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->dev);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
- uint32_t val;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
bool wait = false;
-
- /* old_crtc_state and old_conn_state are NULL when called from DP_MST */
+ u32 val;
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
@@ -1806,25 +1811,78 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
+}
- if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
+
+ /*
+ * Power down sink before disabling the port, otherwise we end
+ * up getting interrupts from the sink on detecting link loss.
+ */
+ if (!is_mst)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
- intel_edp_panel_vdd_on(intel_dp);
- intel_edp_panel_off(intel_dp);
- }
+
+ intel_disable_ddi_buf(encoder);
+
+ intel_edp_panel_vdd_on(intel_dp);
+ intel_edp_panel_off(intel_dp);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_GEN(dev_priv) < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+}
- if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
- }
+ intel_disable_ddi_buf(encoder);
+
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
+ DPLL_CTRL2_DDI_CLK_OFF(port)));
+ else if (INTEL_GEN(dev_priv) < 9)
+ I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+}
+
+static void intel_ddi_post_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
+{
+ /*
+ * When called from DP MST code:
+ * - old_conn_state will be NULL
+ * - encoder will be the main encoder (ie. mst->primary)
+ * - the main connector associated with this port
+ * won't be active or linked to a crtc
+ * - old_crtc_state will be the state of the last stream to
+ * be deactivated on this port, and it may not be the same
+ * stream that was activated last, but each stream
+ * should have a state that is identical when it comes to
+ * the DP link parameteres
+ */
+
+ if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+ intel_ddi_post_disable_hdmi(encoder,
+ old_crtc_state, old_conn_state);
+ else
+ intel_ddi_post_disable_dp(encoder,
+ old_crtc_state, old_conn_state);
}
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
@@ -1889,7 +1947,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
intel_dp_stop_link_train(intel_dp);
intel_edp_backlight_on(intel_dp);
- intel_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp, pipe_config);
intel_edp_drrs_enable(intel_dp, pipe_config);
}
@@ -1919,7 +1977,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_edp_drrs_disable(intel_dp, old_crtc_state);
- intel_psr_disable(intel_dp);
+ intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(intel_dp);
}
}
@@ -1928,8 +1986,7 @@ static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
+ uint8_t mask = pipe_config->lane_lat_optim_mask;
bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
@@ -1980,6 +2037,19 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
udelay(600);
}
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc)
+{
+ u32 temp;
+
+ if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+ return true;
+ }
+ return false;
+}
+
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
@@ -2031,12 +2101,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_infoframe = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
case TRANS_DDI_MODE_SELECT_FDI:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+ pipe_config->lane_count =
+ ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
+ intel_dp_get_m_n(intel_crtc, pipe_config);
+ break;
case TRANS_DDI_MODE_SELECT_DP_MST:
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2045,11 +2126,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
- temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
- if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
- pipe_config->has_audio = true;
- }
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a8b9372cbd0fa9..0e30a714bb6a4a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2814,20 +2814,6 @@ out_unref_obj:
return false;
}
-/* Update plane->state->fb to match plane->fb after driver-internal updates */
-static void
-update_state_fb(struct drm_plane *plane)
-{
- if (plane->fb == plane->state->fb)
- return;
-
- if (plane->state->fb)
- drm_framebuffer_unreference(plane->state->fb);
- plane->state->fb = plane->fb;
- if (plane->state->fb)
- drm_framebuffer_reference(plane->state->fb);
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -5740,7 +5726,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
dev_priv->display.fdi_link_train(crtc);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(intel_crtc);
+ intel_ddi_enable_pipe_clock(pipe_config);
/* Display WA #1180: WaDisableScalarClockGating: glk */
psl_clkgate_wa = IS_GEMINILAKE(dev_priv) &&
@@ -5759,9 +5745,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(&pipe_config->base);
- intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(crtc);
+ intel_ddi_enable_transcoder_func(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5774,7 +5760,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
lpt_pch_enable(crtc);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(crtc, true);
+ intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5899,7 +5885,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(intel_crtc);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(crtc, false);
+ intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -5910,7 +5896,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
ironlake_pfit_disable(intel_crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_pipe_clock(intel_crtc);
+ intel_ddi_disable_pipe_clock(intel_crtc->config);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
@@ -6425,6 +6411,18 @@ struct intel_connector *intel_connector_alloc(void)
return connector;
}
+/*
+ * Free the bits allocated by intel_connector_alloc.
+ * This should only be used after intel_connector_alloc has returned
+ * successfully, and before drm_connector_init returns successfully.
+ * Otherwise the destroy callbacks for the connector and the state should
+ * take care of proper cleanup/free
+ */
+void intel_connector_free(struct intel_connector *connector)
+{
+ kfree(connector);
+}
+
/* Simple connector->get_hw_state implementation for encoders that support only
* one connector and no cloning and hence the encoder state determines the state
* of the connector. */
@@ -10352,35 +10350,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(intel_crtc);
}
-static void intel_unpin_work_fn(struct work_struct *__work)
-{
- struct intel_flip_work *work =
- container_of(__work, struct intel_flip_work, unpin_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_plane *primary = crtc->base.primary;
-
- if (is_mmio_work(work))
- flush_work(&work->mmio_work);
-
- mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_vma(work->old_vma);
- i915_gem_object_put(work->pending_flip_obj);
- mutex_unlock(&dev->struct_mutex);
-
- i915_gem_request_put(work->flip_queued_req);
-
- intel_frontbuffer_flip_complete(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
- intel_fbc_post_update(crtc);
- drm_framebuffer_unreference(work->old_fb);
-
- BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
- atomic_dec(&crtc->unpin_work_count);
-
- kfree(work);
-}
-
/* Is 'a' after or equal to 'b'? */
static bool g4x_flip_count_after_eq(u32 a, u32 b)
{
@@ -10525,363 +10494,6 @@ static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
atomic_set(&work->pending, 1);
}
-static int intel_gen2_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_ring *ring = req->ring;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- /* Can't queue multiple flips, so wait for the previous
- * one to finish before executing the next.
- */
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
-
- return 0;
-}
-
-static int intel_gen3_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_ring *ring = req->ring;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- if (intel_crtc->plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
-
- return 0;
-}
-
-static int intel_gen4_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_ring *ring = req->ring;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- /* i965+ uses the linear or tiled offsets from the
- * Display Registers (which do not change across a page-flip)
- * so we need only reprogram the base address.
- */
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier));
-
- /* XXX Enabling the panel-fitter across page-flip is so far
- * untested on non-native modes, so ignore it for now.
- * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
-
- return 0;
-}
-
-static int intel_gen6_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct intel_ring *ring = req->ring;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
-
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-
- /* Contrary to the suggestions in the documentation,
- * "Enable Panel Fitter" does not seem to be required when page
- * flipping with a non-native mode, and worse causes a normal
- * modeset to fail.
- * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
- */
- pf = 0;
- pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
-
- return 0;
-}
-
-static int intel_gen7_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ring *ring = req->ring;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane_bit = 0;
- int len, ret;
-
- switch (intel_crtc->plane) {
- case PLANE_A:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
- break;
- case PLANE_B:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
- break;
- case PLANE_C:
- plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
- break;
- default:
- WARN_ONCE(1, "unknown plane in flip command\n");
- return -ENODEV;
- }
-
- len = 4;
- if (req->engine->id == RCS) {
- len += 6;
- /*
- * On Gen 8, SRM is now taking an extra dword to accommodate
- * 48bits addresses, and we need a NOOP for the batch size to
- * stay even.
- */
- if (IS_GEN8(dev_priv))
- len += 2;
- }
-
- /*
- * BSpec MI_DISPLAY_FLIP for IVB:
- * "The full packet must be contained within the same cache line."
- *
- * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
- * cacheline, if we ever start emitting more commands before
- * the MI_DISPLAY_FLIP we may need to first emit everything else,
- * then do the cacheline alignment, and finally emit the
- * MI_DISPLAY_FLIP.
- */
- ret = intel_ring_cacheline_align(req);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, len);
- if (ret)
- return ret;
-
- /* Unmask the flip-done completion message. Note that the bspec says that
- * we should do this for both the BCS and RCS, and that we must not unmask
- * more than one flip event at any time (or ensure that one flip message
- * can be sent by waiting for flip-done prior to queueing new flips).
- * Experimentation says that BCS works despite DERRMR masking all
- * flip-done completion events and that unmasking all planes at once
- * for the RCS also doesn't appear to drop events. Setting the DERRMR
- * to zero does lead to lockups within MI_DISPLAY_FLIP.
- */
- if (req->engine->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
- if (IS_GEN8(dev_priv))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT);
- else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring,
- i915_ggtt_offset(req->engine->scratch) + 256);
- if (IS_GEN8(dev_priv)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- }
- }
-
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
-
- return 0;
-}
-
-static bool use_mmio_flip(struct intel_engine_cs *engine,
- struct drm_i915_gem_object *obj)
-{
- /*
- * This is not being used for older platforms, because
- * non-availability of flip done interrupt forces us to use
- * CS flips. Older platforms derive flip done using some clever
- * tricks involving the flip_pending status bits and vblank irqs.
- * So using MMIO flips there would disrupt this mechanism.
- */
-
- if (engine == NULL)
- return true;
-
- if (INTEL_GEN(engine->i915) < 5)
- return false;
-
- if (i915.use_mmio_flip < 0)
- return false;
- else if (i915.use_mmio_flip > 0)
- return true;
- else if (i915.enable_execlists)
- return true;
-
- return engine != i915_gem_object_last_write_engine(obj);
-}
-
-static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
- unsigned int rotation,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- const enum pipe pipe = intel_crtc->pipe;
- u32 ctl, stride = skl_plane_stride(fb, 0, rotation);
-
- ctl = I915_READ(PLANE_CTL(pipe, 0));
- ctl &= ~PLANE_CTL_TILED_MASK;
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
- break;
- case I915_FORMAT_MOD_X_TILED:
- ctl |= PLANE_CTL_TILED_X;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- ctl |= PLANE_CTL_TILED_Y;
- break;
- case I915_FORMAT_MOD_Yf_TILED:
- ctl |= PLANE_CTL_TILED_YF;
- break;
- default:
- MISSING_CASE(fb->modifier);
- }
-
- /*
- * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
- * PLANE_SURF updates, the update is then guaranteed to be atomic.
- */
- I915_WRITE(PLANE_CTL(pipe, 0), ctl);
- I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
-
- I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
- POSTING_READ(PLANE_SURF(pipe, 0));
-}
-
-static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
- struct intel_flip_work *work)
-{
- struct drm_device *dev = intel_crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
- i915_reg_t reg = DSPCNTR(intel_crtc->plane);
- u32 dspcntr;
-
- dspcntr = I915_READ(reg);
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
-
- I915_WRITE(reg, dspcntr);
-
- I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
- POSTING_READ(DSPSURF(intel_crtc->plane));
-}
-
-static void intel_mmio_flip_work_func(struct work_struct *w)
-{
- struct intel_flip_work *work =
- container_of(w, struct intel_flip_work, mmio_work);
- struct intel_crtc *crtc = to_intel_crtc(work->crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_framebuffer *intel_fb =
- to_intel_framebuffer(crtc->base.primary->fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
-
- WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0);
-
- intel_pipe_update_start(crtc);
-
- if (INTEL_GEN(dev_priv) >= 9)
- skl_do_mmio_flip(crtc, work->rotation, work);
- else
- /* use_mmio_flip() retricts MMIO flips to ilk+ */
- ilk_do_mmio_flip(crtc, work);
-
- intel_pipe_update_end(crtc, work);
-}
-
-static int intel_default_queue_flip(struct drm_device *dev,
- struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj,
- struct drm_i915_gem_request *req,
- uint32_t flags)
-{
- return -ENODEV;
-}
-
static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_flip_work *work)
@@ -10948,243 +10560,6 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
spin_unlock(&dev->event_lock);
}
-__maybe_unused
-static int intel_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *primary = crtc->primary;
- enum pipe pipe = intel_crtc->pipe;
- struct intel_flip_work *work;
- struct intel_engine_cs *engine;
- bool mmio_flip;
- struct drm_i915_gem_request *request;
- struct i915_vma *vma;
- int ret;
-
- /*
- * drm_mode_page_flip_ioctl() should already catch this, but double
- * check to be safe. In the future we may enable pageflipping from
- * a disabled primary plane.
- */
- if (WARN_ON(intel_fb_obj(old_fb) == NULL))
- return -EBUSY;
-
- /* Can't change pixel format via MI display flips. */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- /*
- * TILEOFF/LINOFF registers can't be changed via MI display flips.
- * Note that pitch changes could also affect these register.
- */
- if (INTEL_GEN(dev_priv) > 3 &&
- (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
- fb->pitches[0] != crtc->primary->fb->pitches[0]))
- return -EINVAL;
-
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- goto out_hang;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (work == NULL)
- return -ENOMEM;
-
- work->event = event;
- work->crtc = crtc;
- work->old_fb = old_fb;
- INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
-
- ret = drm_crtc_vblank_get(crtc);
- if (ret)
- goto free_work;
-
- /* We borrow the event spin lock for protecting flip_work */
- spin_lock_irq(&dev->event_lock);
- if (intel_crtc->flip_work) {
- /* Before declaring the flip queue wedged, check if
- * the hardware completed the operation behind our backs.
- */
- if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
- DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
- page_flip_completed(intel_crtc);
- } else {
- DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
- kfree(work);
- return -EBUSY;
- }
- }
- intel_crtc->flip_work = work;
- spin_unlock_irq(&dev->event_lock);
-
- if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
- flush_workqueue(dev_priv->wq);
-
- /* Reference the objects for the scheduled work. */
- drm_framebuffer_reference(work->old_fb);
-
- crtc->primary->fb = fb;
- update_state_fb(crtc->primary);
-
- intel_fbc_pre_update(intel_crtc, intel_crtc->config,
- to_intel_plane_state(primary->state));
-
- work->pending_flip_obj = i915_gem_object_get(obj);
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto cleanup;
-
- intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
- if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
- goto unlock;
- }
-
- atomic_inc(&intel_crtc->unpin_work_count);
-
- if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
- work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- engine = dev_priv->engine[BCS];
- if (fb->modifier != old_fb->modifier)
- /* vlv: DISPLAY_FLIP fails to change tiling */
- engine = NULL;
- } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) {
- engine = dev_priv->engine[BCS];
- } else if (INTEL_GEN(dev_priv) >= 7) {
- engine = i915_gem_object_last_write_engine(obj);
- if (engine == NULL || engine->id != RCS)
- engine = dev_priv->engine[BCS];
- } else {
- engine = dev_priv->engine[RCS];
- }
-
- mmio_flip = use_mmio_flip(engine, obj);
-
- vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto cleanup_pending;
- }
-
- work->old_vma = to_intel_plane_state(primary->state)->vma;
- to_intel_plane_state(primary->state)->vma = vma;
-
- work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
- work->rotation = crtc->primary->state->rotation;
-
- if (mmio_flip) {
- INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
- queue_work(system_unbound_wq, &work->mmio_work);
- } else {
- request = i915_gem_request_alloc(engine,
- dev_priv->kernel_context);
- if (IS_ERR(request)) {
- ret = PTR_ERR(request);
- goto cleanup_unpin;
- }
-
- ret = i915_gem_request_await_object(request, obj, false);
- if (ret)
- goto cleanup_request;
-
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
- page_flip_flags);
- if (ret)
- goto cleanup_request;
-
- intel_mark_page_flip_active(intel_crtc, work);
-
- work->flip_queued_req = i915_gem_request_get(request);
- i915_add_request_no_flush(request);
- }
-
- i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
- i915_gem_track_fb(intel_fb_obj(old_fb), obj,
- to_intel_plane(primary)->frontbuffer_bit);
- mutex_unlock(&dev->struct_mutex);
-
- intel_frontbuffer_flip_prepare(to_i915(dev),
- to_intel_plane(primary)->frontbuffer_bit);
-
- trace_i915_flip_request(intel_crtc->plane, obj);
-
- return 0;
-
-cleanup_request:
- i915_add_request_no_flush(request);
-cleanup_unpin:
- to_intel_plane_state(primary->state)->vma = work->old_vma;
- intel_unpin_fb_vma(vma);
-cleanup_pending:
- atomic_dec(&intel_crtc->unpin_work_count);
-unlock:
- mutex_unlock(&dev->struct_mutex);
-cleanup:
- crtc->primary->fb = old_fb;
- update_state_fb(crtc->primary);
-
- i915_gem_object_put(obj);
- drm_framebuffer_unreference(work->old_fb);
-
- spin_lock_irq(&dev->event_lock);
- intel_crtc->flip_work = NULL;
- spin_unlock_irq(&dev->event_lock);
-
- drm_crtc_vblank_put(crtc);
-free_work:
- kfree(work);
-
- if (ret == -EIO) {
- struct drm_atomic_state *state;
- struct drm_plane_state *plane_state;
-
-out_hang:
- state = drm_atomic_state_alloc(dev);
- if (!state)
- return -ENOMEM;
- state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
-
-retry:
- plane_state = drm_atomic_get_plane_state(state, primary);
- ret = PTR_ERR_OR_ZERO(plane_state);
- if (!ret) {
- drm_atomic_set_fb_for_plane(plane_state, fb);
-
- ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
- if (!ret)
- ret = drm_atomic_commit(state);
- }
-
- if (ret == -EDEADLK) {
- drm_modeset_backoff(state->acquire_ctx);
- drm_atomic_state_clear(state);
- goto retry;
- }
-
- drm_atomic_state_put(state);
-
- if (ret == 0 && event) {
- spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&dev->event_lock);
- }
- }
- return ret;
-}
-
-
/**
* intel_wm_need_update - Check whether watermarks need updating
* @plane: drm plane
@@ -11890,8 +11265,11 @@ encoder_retry:
}
/* Dithering seems to not pass-through bits correctly when it should, so
- * only enable it on 6bpc panels. */
- pipe_config->dither = pipe_config->pipe_bpp == 6*3;
+ * only enable it on 6bpc panels and when its not a compliance
+ * test requesting 6bpc video pattern.
+ */
+ pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
+ !pipe_config->dither_force_disable;
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -12491,10 +11869,8 @@ verify_crtc_state(struct drm_crtc *crtc,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
- if (active) {
- pipe_config->output_types |= 1 << encoder->type;
+ if (active)
encoder->get_config(encoder, pipe_config);
- }
}
intel_crtc_compute_pixel_rate(pipe_config);
@@ -13878,16 +13254,16 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *intel_cstate =
- to_intel_crtc_state(crtc->state);
struct intel_crtc_state *old_intel_cstate =
to_intel_crtc_state(old_crtc_state);
struct intel_atomic_state *old_intel_state =
to_intel_atomic_state(old_crtc_state->state);
- bool modeset = needs_modeset(crtc->state);
+ struct intel_crtc_state *intel_cstate =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ bool modeset = needs_modeset(&intel_cstate->base);
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_crtc);
+ intel_pipe_update_start(intel_cstate);
if (modeset)
goto out;
@@ -13912,8 +13288,12 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- intel_pipe_update_end(intel_crtc, NULL);
+ intel_pipe_update_end(new_crtc_state, NULL);
}
/**
@@ -14775,7 +14155,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
} else if (HAS_PCH_SPLIT(dev_priv)) {
int found;
- dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D);
+ dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
if (has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
@@ -14818,14 +14198,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* trust the port type the VBT declares as we've seen at least
* HDMI ports that the VBT claim are DP or eDP.
*/
- has_edp = intel_dp_is_edp(dev_priv, PORT_B);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
- has_edp = intel_dp_is_edp(dev_priv, PORT_C);
+ has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
@@ -15343,34 +14723,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
dev_priv->display.update_crtcs = intel_update_crtcs;
-
- switch (INTEL_INFO(dev_priv)->gen) {
- case 2:
- dev_priv->display.queue_flip = intel_gen2_queue_flip;
- break;
-
- case 3:
- dev_priv->display.queue_flip = intel_gen3_queue_flip;
- break;
-
- case 4:
- case 5:
- dev_priv->display.queue_flip = intel_gen4_queue_flip;
- break;
-
- case 6:
- dev_priv->display.queue_flip = intel_gen6_queue_flip;
- break;
- case 7:
- case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
- dev_priv->display.queue_flip = intel_gen7_queue_flip;
- break;
- case 9:
- /* Drop through - unsupported since execlist only. */
- default:
- /* Default just returns -ENODEV to indicate unsupported */
- dev_priv->display.queue_flip = intel_default_queue_flip;
- }
}
/*
@@ -15867,7 +15219,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
/* Clear any frame start delays used for debugging left by the BIOS */
- if (!transcoder_is_dsi(cpu_transcoder)) {
+ if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
i915_reg_t reg = PIPECONF(cpu_transcoder);
I915_WRITE(reg,
@@ -16107,7 +15459,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
- crtc_state->output_types |= 1 << encoder->type;
encoder->get_config(encoder, crtc_state);
} else {
encoder->base.crtc = NULL;
@@ -16293,6 +15644,7 @@ void intel_display_resume(struct drm_device *dev)
if (!ret)
ret = __intel_display_resume(dev, state, &ctx);
+ intel_enable_ipc(dev_priv);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
new file mode 100644
index 00000000000000..8627080b573b64
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -0,0 +1,313 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _INTEL_DISPLAY_H_
+#define _INTEL_DISPLAY_H_
+
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+#define pipe_name(p) ((p) + 'A')
+
+enum transcoder {
+ TRANSCODER_A = 0,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_A,
+ TRANSCODER_DSI_C,
+
+ I915_MAX_TRANSCODERS
+};
+
+static inline const char *transcoder_name(enum transcoder transcoder)
+{
+ switch (transcoder) {
+ case TRANSCODER_A:
+ return "A";
+ case TRANSCODER_B:
+ return "B";
+ case TRANSCODER_C:
+ return "C";
+ case TRANSCODER_EDP:
+ return "EDP";
+ case TRANSCODER_DSI_A:
+ return "DSI A";
+ case TRANSCODER_DSI_C:
+ return "DSI C";
+ default:
+ return "<invalid>";
+ }
+}
+
+static inline bool transcoder_is_dsi(enum transcoder transcoder)
+{
+ return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
+}
+
+/*
+ * Global legacy plane identifier. Valid only for primary/sprite
+ * planes on pre-g4x, and only for primary planes on g4x-bdw.
+ */
+enum plane {
+ PLANE_A,
+ PLANE_B,
+ PLANE_C,
+};
+#define plane_name(p) ((p) + 'A')
+
+#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+#define for_each_plane_id_on_crtc(__crtc, __p) \
+ for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
+ for_each_if((__crtc)->plane_ids_mask & BIT(__p))
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+
+ I915_MAX_PORTS
+};
+
+#define port_name(p) ((p) + 'A')
+
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ default:
+ return "<invalid>";
+ }
+}
+
+#define I915_NUM_PHYS_VLV 2
+
+enum dpio_channel {
+ DPIO_CH0,
+ DPIO_CH1
+};
+
+enum dpio_phy {
+ DPIO_PHY0,
+ DPIO_PHY1
+};
+
+enum intel_display_power_domain {
+ POWER_DOMAIN_PIPE_A,
+ POWER_DOMAIN_PIPE_B,
+ POWER_DOMAIN_PIPE_C,
+ POWER_DOMAIN_PIPE_A_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_B_PANEL_FITTER,
+ POWER_DOMAIN_PIPE_C_PANEL_FITTER,
+ POWER_DOMAIN_TRANSCODER_A,
+ POWER_DOMAIN_TRANSCODER_B,
+ POWER_DOMAIN_TRANSCODER_C,
+ POWER_DOMAIN_TRANSCODER_EDP,
+ POWER_DOMAIN_TRANSCODER_DSI_A,
+ POWER_DOMAIN_TRANSCODER_DSI_C,
+ POWER_DOMAIN_PORT_DDI_A_LANES,
+ POWER_DOMAIN_PORT_DDI_B_LANES,
+ POWER_DOMAIN_PORT_DDI_C_LANES,
+ POWER_DOMAIN_PORT_DDI_D_LANES,
+ POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DSI,
+ POWER_DOMAIN_PORT_CRT,
+ POWER_DOMAIN_PORT_OTHER,
+ POWER_DOMAIN_VGA,
+ POWER_DOMAIN_AUDIO,
+ POWER_DOMAIN_PLLS,
+ POWER_DOMAIN_AUX_A,
+ POWER_DOMAIN_AUX_B,
+ POWER_DOMAIN_AUX_C,
+ POWER_DOMAIN_AUX_D,
+ POWER_DOMAIN_GMBUS,
+ POWER_DOMAIN_MODESET,
+ POWER_DOMAIN_INIT,
+
+ POWER_DOMAIN_NUM,
+};
+
+#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
+#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
+ ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
+#define POWER_DOMAIN_TRANSCODER(tran) \
+ ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
+ (tran) + POWER_DOMAIN_TRANSCODER_A)
+
+/* Used by dp and fdi links */
+struct intel_link_m_n {
+ u32 tu;
+ u32 gmch_m;
+ u32 gmch_n;
+ u32 link_m;
+ u32 link_n;
+};
+
+#define for_each_pipe(__dev_priv, __p) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
+
+#define for_each_pipe_masked(__dev_priv, __p, __mask) \
+ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
+ for_each_if ((__mask) & (1 << (__p)))
+
+#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
+ for ((__t) = 0; (__t) < I915_MAX_TRANSCODERS; (__t)++) \
+ for_each_if ((__mask) & (1 << (__t)))
+
+#define for_each_universal_plane(__dev_priv, __pipe, __p) \
+ for ((__p) = 0; \
+ (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
+ (__p)++)
+
+#define for_each_sprite(__dev_priv, __p, __s) \
+ for ((__s) = 0; \
+ (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
+ (__s)++)
+
+#define for_each_port_masked(__port, __ports_mask) \
+ for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
+ for_each_if ((__ports_mask) & (1 << (__port)))
+
+#define for_each_crtc(dev, crtc) \
+ list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
+
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head)
+
+#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if ((plane_mask) & \
+ (1 << drm_plane_index(&intel_plane->base)))
+
+#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &(dev)->mode_config.plane_list, \
+ base.head) \
+ for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
+
+#define for_each_intel_crtc(dev, intel_crtc) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head)
+
+#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
+ list_for_each_entry(intel_crtc, \
+ &(dev)->mode_config.crtc_list, \
+ base.head) \
+ for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
+
+#define for_each_intel_encoder(dev, intel_encoder) \
+ list_for_each_entry(intel_encoder, \
+ &(dev)->mode_config.encoder_list, \
+ base.head)
+
+#define for_each_intel_connector(dev, intel_connector) \
+ list_for_each_entry(intel_connector, \
+ &(dev)->mode_config.connector_list, \
+ base.head)
+
+#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
+ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
+ for_each_if((intel_encoder)->base.crtc == (__crtc))
+
+#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
+ list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
+ for_each_if((intel_connector)->base.encoder == (__encoder))
+
+#define for_each_power_domain(domain, mask) \
+ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
+ for_each_if ((1 << (domain)) & (mask))
+
+#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
+ (new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
+ (__i)++) \
+ for_each_if(plane)
+
+void intel_link_compute_m_n(int bpp, int nlanes,
+ int pixel_clock, int link_clock,
+ struct intel_link_m_n *m_n,
+ bool reduce_m_n);
+
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d493c5cee581d2..3bc2a312821483 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,9 +28,10 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
-#include <media/cec.h>
+#include <asm/byteorder.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -101,13 +102,13 @@ static const int skl_rates[] = { 162000, 216000, 270000,
static const int default_rates[] = { 162000, 270000, 540000 };
/**
- * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
+ * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
-static bool is_edp(struct intel_dp *intel_dp)
+bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -162,22 +163,27 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = num_rates;
}
-static int intel_dp_max_sink_rate(struct intel_dp *intel_dp)
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
{
- return intel_dp->sink_rates[intel_dp->num_sink_rates - 1];
+ return intel_dp->common_rates[intel_dp->num_common_rates - 1];
}
-static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
+/* Theoretical max between source and sink */
+static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- u8 source_max, sink_max;
-
- source_max = intel_dig_port->max_lanes;
- sink_max = intel_dp->max_sink_lane_count;
+ int source_max = intel_dig_port->max_lanes;
+ int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
return min(source_max, sink_max);
}
+int intel_dp_max_lane_count(struct intel_dp *intel_dp)
+{
+ return intel_dp->max_link_lane_count;
+}
+
int
intel_dp_link_required(int pixel_clock, int bpp)
{
@@ -285,17 +291,29 @@ static int intel_dp_rate_index(const int *rates, int len, int rate)
return -1;
}
-static int intel_dp_common_rates(struct intel_dp *intel_dp,
- int *common_rates)
+static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
{
- int max_rate = intel_dp->max_sink_link_rate;
- int i, common_len;
+ WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
- common_len = intersect_rates(intel_dp->source_rates,
- intel_dp->num_source_rates,
- intel_dp->sink_rates,
- intel_dp->num_sink_rates,
- common_rates);
+ intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
+ intel_dp->num_source_rates,
+ intel_dp->sink_rates,
+ intel_dp->num_sink_rates,
+ intel_dp->common_rates);
+
+ /* Paranoia, there should always be something in common. */
+ if (WARN_ON(intel_dp->num_common_rates == 0)) {
+ intel_dp->common_rates[0] = default_rates[0];
+ intel_dp->num_common_rates = 1;
+ }
+}
+
+/* get length of common rates potentially limited by max_rate */
+static int intel_dp_common_len_rate_limit(struct intel_dp *intel_dp,
+ int max_rate)
+{
+ const int *common_rates = intel_dp->common_rates;
+ int i, common_len = intel_dp->num_common_rates;
/* Limit results by potentially reduced max rate */
for (i = 0; i < common_len; i++) {
@@ -306,31 +324,20 @@ static int intel_dp_common_rates(struct intel_dp *intel_dp,
return 0;
}
-static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
- int *common_rates, int link_rate)
-{
- int common_len;
-
- common_len = intel_dp_common_rates(intel_dp, common_rates);
-
- return intel_dp_rate_index(common_rates, common_len, link_rate);
-}
-
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
int link_rate, uint8_t lane_count)
{
- int common_rates[DP_MAX_SUPPORTED_RATES];
- int link_rate_index;
+ int index;
- link_rate_index = intel_dp_link_rate_index(intel_dp,
- common_rates,
- link_rate);
- if (link_rate_index > 0) {
- intel_dp->max_sink_link_rate = common_rates[link_rate_index - 1];
- intel_dp->max_sink_lane_count = lane_count;
+ index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ link_rate);
+ if (index > 0) {
+ intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
+ intel_dp->max_link_lane_count = lane_count;
} else if (lane_count > 1) {
- intel_dp->max_sink_link_rate = intel_dp_max_sink_rate(intel_dp);
- intel_dp->max_sink_lane_count = lane_count >> 1;
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
+ intel_dp->max_link_lane_count = lane_count >> 1;
} else {
DRM_ERROR("Link Training Unsuccessful\n");
return -1;
@@ -352,7 +359,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
- if (is_edp(intel_dp) && fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
@@ -528,7 +535,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -588,7 +595,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!is_edp(intel_dp));
+ WARN_ON(!intel_dp_is_edp(intel_dp));
/*
* TODO: BXT has 2 PPS instances. The correct port->PPS instance
@@ -784,7 +791,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp) || code != SYS_RESTART)
+ if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
return 0;
pps_lock(intel_dp);
@@ -844,7 +851,7 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
@@ -1502,8 +1509,6 @@ static void snprintf_int_array(char *str, size_t len,
static void intel_dp_print_rates(struct intel_dp *intel_dp)
{
- int common_len;
- int common_rates[DP_MAX_SUPPORTED_RATES];
char str[128]; /* FIXME: too big for stack? */
if ((drm_debug & DRM_UT_KMS) == 0)
@@ -1517,22 +1522,21 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp)
intel_dp->sink_rates, intel_dp->num_sink_rates);
DRM_DEBUG_KMS("sink rates: %s\n", str);
- common_len = intel_dp_common_rates(intel_dp, common_rates);
- snprintf_int_array(str, sizeof(str), common_rates, common_len);
+ snprintf_int_array(str, sizeof(str),
+ intel_dp->common_rates, intel_dp->num_common_rates);
DRM_DEBUG_KMS("common rates: %s\n", str);
}
int
intel_dp_max_link_rate(struct intel_dp *intel_dp)
{
- int rates[DP_MAX_SUPPORTED_RATES] = {};
int len;
- len = intel_dp_common_rates(intel_dp, rates);
+ len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
if (WARN_ON(len <= 0))
return 162000;
- return rates[len - 1];
+ return intel_dp->common_rates[len - 1];
}
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
@@ -1571,6 +1575,13 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
if (bpc > 0)
bpp = min(bpp, 3*bpc);
+ /* For DP Compliance we override the computed bpp for the pipe */
+ if (intel_dp->compliance.test_data.bpc != 0) {
+ pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
+ pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
+ DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
+ pipe_config->pipe_bpp);
+ }
return bpp;
}
@@ -1591,15 +1602,16 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock;
+ int link_rate_index;
int bpp, mode_rate;
int link_avail, link_clock;
- int common_rates[DP_MAX_SUPPORTED_RATES] = {};
int common_len;
uint8_t link_bw, rate_select;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
- common_len = intel_dp_common_rates(intel_dp, common_rates);
+ common_len = intel_dp_common_len_rate_limit(intel_dp,
+ intel_dp->max_link_rate);
/* No common link rates between source and sink */
WARN_ON(common_len <= 0);
@@ -1612,7 +1624,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_drrs = false;
pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
- if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
+ if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
@@ -1634,15 +1646,24 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
+ /* Use values requested by Compliance Test Request */
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (link_rate_index >= 0)
+ min_clock = max_clock = link_rate_index;
+ min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
+ }
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
- max_lane_count, common_rates[max_clock],
+ max_lane_count, intel_dp->common_rates[max_clock],
adjusted_mode->crtc_clock);
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
@@ -1672,7 +1693,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
lane_count <= max_lane_count;
lane_count <<= 1) {
- link_clock = common_rates[clock];
+ link_clock = intel_dp->common_rates[clock];
link_avail = intel_dp_max_data_rate(link_clock,
lane_count);
@@ -1702,7 +1723,7 @@ found:
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = common_rates[clock];
+ pipe_config->port_clock = intel_dp->common_rates[clock];
intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
&link_bw, &rate_select);
@@ -1729,30 +1750,11 @@ found:
reduce_m_n);
}
- /*
- * DPLL0 VCO may need to be adjusted to get the correct
- * clock for eDP. This will affect cdclk as well.
- */
- if (is_edp(intel_dp) &&
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
- int vco;
-
- switch (pipe_config->port_clock / 2) {
- case 108000:
- case 216000:
- vco = 8640000;
- break;
- default:
- vco = 8100000;
- break;
- }
-
- to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
- }
-
if (!HAS_DDI(dev_priv))
intel_dp_set_clock(encoder, pipe_config);
+ intel_psr_compute_config(intel_dp, pipe_config);
+
return true;
}
@@ -1976,7 +1978,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return false;
cancel_delayed_work(&intel_dp->panel_vdd_work);
@@ -2027,7 +2029,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
bool vdd;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2114,7 +2116,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
@@ -2137,7 +2139,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
@@ -2178,7 +2180,7 @@ static void edp_panel_on(struct intel_dp *intel_dp)
void intel_edp_panel_on(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2199,7 +2201,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
@@ -2231,7 +2233,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
void intel_edp_panel_off(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2272,7 +2274,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
/* Enable backlight PWM and backlight PP control. */
void intel_edp_backlight_on(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2289,7 +2291,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
u32 pp;
i915_reg_t pp_ctrl_reg;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
pps_lock(intel_dp);
@@ -2311,7 +2313,7 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
/* Disable backlight PP control and backlight PWM. */
void intel_edp_backlight_off(struct intel_dp *intel_dp)
{
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
DRM_DEBUG_KMS("\n");
@@ -2523,6 +2525,11 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
+ else
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
+
tmp = I915_READ(intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
@@ -2572,7 +2579,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dotclock_calculate(pipe_config->port_clock,
&pipe_config->dp_m_n);
- if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
+ if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
/*
* This is a big fat ugly hack.
@@ -2603,8 +2610,8 @@ static void intel_disable_dp(struct intel_encoder *encoder,
if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
- if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
- intel_psr_disable(intel_dp);
+ if (!HAS_DDI(dev_priv))
+ intel_psr_disable(intel_dp, old_crtc_state);
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
@@ -2835,7 +2842,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
intel_edp_backlight_on(intel_dp);
- intel_psr_enable(intel_dp);
+ intel_psr_enable(intel_dp, pipe_config);
}
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
@@ -2924,7 +2931,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
if (intel_dp->pps_pipe == crtc->pipe)
@@ -3012,32 +3019,6 @@ intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_
DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
}
-static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
-{
- uint8_t psr_caps = 0;
-
- drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
- return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
-}
-
-static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
-{
- uint8_t dprx = 0;
-
- drm_dp_dpcd_readb(&intel_dp->aux,
- DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dprx);
- return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
-}
-
-bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
-{
- uint8_t alpm_caps = 0;
-
- drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
- return alpm_caps & DP_ALPM_CAP;
-}
-
/* These are source-specific values. */
uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
@@ -3589,53 +3570,29 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
- /* Check if the panel supports PSR */
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
- intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
- if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
- dev_priv->psr.sink_support = true;
- DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
- }
-
- if (INTEL_GEN(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
- uint8_t frame_sync_cap;
-
- dev_priv->psr.sink_support = true;
- drm_dp_dpcd_read(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
- &frame_sync_cap, 1);
- dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
- /* PSR2 needs frame sync as well */
- dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
- DRM_DEBUG_KMS("PSR2 %s on sink",
- dev_priv->psr.psr2_support ? "supported" : "not supported");
-
- /* TODO(b/67599437) PSR2 is broken */
- dev_priv->psr.psr2_support = false;
-
- if (dev_priv->psr.psr2_support) {
- dev_priv->psr.y_cord_support =
- intel_dp_get_y_cord_status(intel_dp);
- dev_priv->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- dev_priv->psr.alpm =
- intel_dp_get_alpm_status(intel_dp);
- }
-
- }
-
- /* Read the eDP Display control capabilities registers */
- if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
- drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
+ /*
+ * Read the eDP display control registers.
+ *
+ * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
+ * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
+ * set, but require eDP 1.4+ detection (e.g. for supported link rates
+ * method). The display control registers should read zero if they're
+ * not supported anyway.
+ */
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
+ DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
intel_dp->edp_dpcd);
- /* Intermediate frequency support */
- if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
+ /*
+ * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
+ * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
+ */
+ intel_psr_init_dpcd(intel_dp);
+
+ /* Read the eDP 1.4+ supported link rates. */
+ if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
__le16 sink_rates[DP_MAX_SUPPORTED_RATES];
int i;
@@ -3659,11 +3616,17 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp->num_sink_rates = i;
}
+ /*
+ * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
+ * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
+ */
if (intel_dp->num_sink_rates)
intel_dp->use_rate_select = true;
else
intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+
return true;
}
@@ -3675,11 +3638,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
return false;
/* Don't clobber cached eDP rates. */
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp)) {
intel_dp_set_sink_rates(intel_dp);
+ intel_dp_set_common_rates(intel_dp);
+ }
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
- &intel_dp->sink_count, 1) < 0)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT,
+ &intel_dp->sink_count) <= 0)
return false;
/*
@@ -3696,7 +3661,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* downstream port information. So, an early return here saves
* time from performing other operations which are not required.
*/
- if (!is_edp(intel_dp) && !intel_dp->sink_count)
+ if (!intel_dp_is_edp(intel_dp) && !intel_dp->sink_count)
return false;
if (!drm_dp_is_branch(intel_dp->dpcd))
@@ -3716,7 +3681,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
static bool
intel_dp_can_mst(struct intel_dp *intel_dp)
{
- u8 buf[1];
+ u8 mstm_cap;
if (!i915.enable_dp_mst)
return false;
@@ -3727,10 +3692,10 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
return false;
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
return false;
- return buf[0] & DP_MST_CAP;
+ return mstm_cap & DP_MST_CAP;
}
static void
@@ -3876,9 +3841,8 @@ stop:
static bool
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
{
- return drm_dp_dpcd_read(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- sink_irq_vector, 1) == 1;
+ return drm_dp_dpcd_readb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector) == 1;
}
static bool
@@ -3897,19 +3861,111 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_ACK;
- return test_result;
+ int status = 0;
+ int min_lane_count = 1;
+ int link_rate_index, test_link_rate;
+ uint8_t test_lane_count, test_link_bw;
+ /* (DP CTS 1.2)
+ * 4.3.1.11
+ */
+ /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
+ &test_lane_count);
+
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Lane count read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_lane_count &= DP_MAX_LANE_COUNT_MASK;
+ /* Validate the requested lane count */
+ if (test_lane_count < min_lane_count ||
+ test_lane_count > intel_dp->max_link_lane_count)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
+ &test_link_bw);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Link Rate read failed\n");
+ return DP_TEST_NAK;
+ }
+ /* Validate the requested link rate */
+ test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
+ link_rate_index = intel_dp_rate_index(intel_dp->common_rates,
+ intel_dp->num_common_rates,
+ test_link_rate);
+ if (link_rate_index < 0)
+ return DP_TEST_NAK;
+
+ intel_dp->compliance.test_lane_count = test_lane_count;
+ intel_dp->compliance.test_link_rate = test_link_rate;
+
+ return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
- return test_result;
+ uint8_t test_pattern;
+ uint16_t test_misc;
+ __be16 h_width, v_height;
+ int status = 0;
+
+ /* Read the TEST_PATTERN (DP CTS 3.1.5) */
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
+ &test_pattern, 1);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Test pattern read failed\n");
+ return DP_TEST_NAK;
+ }
+ if (test_pattern != DP_COLOR_RAMP)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
+ &h_width, 2);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("H Width read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
+ &v_height, 2);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("V Height read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
+ &test_misc, 1);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("TEST MISC read failed\n");
+ return DP_TEST_NAK;
+ }
+ if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
+ return DP_TEST_NAK;
+ if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
+ return DP_TEST_NAK;
+ switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
+ case DP_TEST_BIT_DEPTH_6:
+ intel_dp->compliance.test_data.bpc = 6;
+ break;
+ case DP_TEST_BIT_DEPTH_8:
+ intel_dp->compliance.test_data.bpc = 8;
+ break;
+ default:
+ return DP_TEST_NAK;
+ }
+
+ intel_dp->compliance.test_data.video_pattern = test_pattern;
+ intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
+ intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = 1;
+
+ return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
+ uint8_t test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -3937,14 +3993,12 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
*/
block += intel_connector->detect_edid->extensions;
- if (!drm_dp_dpcd_write(&intel_dp->aux,
- DP_TEST_EDID_CHECKSUM,
- &block->checksum,
- 1))
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
+ block->checksum) <= 0)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
- intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
}
/* Set test active flag here so userspace doesn't interrupt things */
@@ -4012,12 +4066,14 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
int ret = 0;
int retry;
bool handled;
+
+ WARN_ON_ONCE(intel_dp->active_mst_links < 0);
bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
go_again:
if (bret == true) {
/* check link status - esi[10] = 0x200c */
- if (intel_dp->active_mst_links &&
+ if (intel_dp->active_mst_links > 0 &&
!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
intel_dp_start_link_train(intel_dp);
@@ -4109,9 +4165,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!intel_dp->lane_count)
return;
- /* if link training is requested we should perform it always */
- if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
- (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+ /* Retrain if Channel EQ or CR not ok */
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
@@ -4136,6 +4191,7 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4169,7 +4225,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
+ intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
@@ -4177,6 +4233,13 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+ /* Send a Hotplug Uevent to userspace to start modeset */
+ drm_kms_helper_hotplug_event(intel_encoder->base.dev);
+ }
+
+ intel_psr_short_pulse(intel_dp);
return true;
}
@@ -4191,7 +4254,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
return connector_status_connected;
/* if there's no downstream port, we're done */
@@ -4455,7 +4518,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_display_power_get(to_i915(dev), power_domain);
/* Can't disconnect eDP, but you can close the lid... */
- if (is_edp(intel_dp))
+ if (intel_dp_is_edp(intel_dp))
status = edp_detect(intel_dp);
else if (intel_digital_port_connected(to_i915(dev),
dp_to_dig_port(intel_dp)))
@@ -4486,11 +4549,11 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
if (intel_dp->reset_link_params) {
- /* Set the max lane count for sink */
- intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ /* Initial max link lane count */
+ intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
- /* Set the max link rate for sink */
- intel_dp->max_sink_link_rate = intel_dp_max_sink_rate(intel_dp);
+ /* Initial max link rate */
+ intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
intel_dp->reset_link_params = false;
}
@@ -4539,7 +4602,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
intel_dp->aux.i2c_defer_count = 0;
intel_dp_set_edid(intel_dp);
- if (is_edp(intel_dp) || intel_connector->detect_edid)
+ if (intel_dp_is_edp(intel_dp) || intel_connector->detect_edid)
status = connector_status_connected;
intel_dp->detect_done = true;
@@ -4642,7 +4705,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
}
/* if eDP has no EDID, fall back to fixed mode */
- if (is_edp(intel_attached_dp(connector)) &&
+ if (intel_dp_is_edp(intel_attached_dp(connector)) &&
intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
@@ -4733,7 +4796,7 @@ intel_dp_set_property(struct drm_connector *connector,
goto done;
}
- if (is_edp(intel_dp) &&
+ if (intel_dp_is_edp(intel_dp) &&
property == connector->dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
@@ -4822,8 +4885,10 @@ intel_dp_connector_destroy(struct drm_connector *connector)
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
- /* Can't call is_edp() since the encoder may have been destroyed
- * already. */
+ /*
+ * Can't call intel_dp_is_edp() since the encoder may have been
+ * destroyed already.
+ */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
intel_panel_fini(&intel_connector->panel);
@@ -4837,7 +4902,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
struct intel_dp *intel_dp = &intel_dig_port->dp;
intel_dp_mst_encoder_cleanup(intel_dig_port);
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
/*
* vdd might still be enabled do to the delayed vdd off.
@@ -4863,7 +4928,7 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return;
/*
@@ -5252,7 +5317,7 @@ put_power:
}
/* check the VBT to see whether the eDP is on another port */
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
{
/*
* eDP not supported on g4x. so bail out early just
@@ -5267,7 +5332,7 @@ bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
return intel_bios_is_port_edp(dev_priv, port);
}
-void
+static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -5276,7 +5341,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_broadcast_rgb_property(connector);
intel_dp->color_range_auto = true;
- if (is_edp(intel_dp)) {
+ if (intel_dp_is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
drm_object_attach_property(
&connector->base,
@@ -5541,11 +5606,6 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- /*
- * FIXME: This needs proper synchronization with psr state for some
- * platforms that cannot have PSR and DRRS enabled at the same time.
- */
-
dig_port = dp_to_dig_port(intel_dp);
encoder = &dig_port->base;
intel_crtc = to_intel_crtc(encoder->base.crtc);
@@ -5628,6 +5688,11 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
return;
}
+ if (dev_priv->psr.enabled) {
+ DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ return;
+ }
+
mutex_lock(&dev_priv->drrs.mutex);
if (WARN_ON(dev_priv->drrs.dp)) {
DRM_ERROR("DRRS already enabled\n");
@@ -5897,7 +5962,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct edid *edid;
enum pipe pipe = INVALID_PIPE;
- if (!is_edp(intel_dp))
+ if (!intel_dp_is_edp(intel_dp))
return true;
pps_lock(intel_dp);
@@ -5995,6 +6060,29 @@ out_vdd_off:
return false;
}
+static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
+{
+ struct intel_connector *intel_connector;
+ struct drm_connector *connector;
+
+ intel_connector = container_of(work, typeof(*intel_connector),
+ modeset_retry_work);
+ connector = &intel_connector->base;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
+
+ /* Grab the locks before changing connector property*/
+ mutex_lock(&connector->dev->mode_config.mutex);
+ /* Set connector link status to BAD and send a Uevent to notify
+ * userspace to do a modeset.
+ */
+ drm_mode_connector_set_link_status_property(connector,
+ DRM_MODE_LINK_STATUS_BAD);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+ /* Send Hotplug uevent so userspace can reprobe */
+ drm_kms_helper_hotplug_event(connector->dev);
+}
+
bool
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
@@ -6007,6 +6095,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
enum port port = intel_dig_port->port;
int type;
+ /* Initialize the work for modeset in case of link train failure */
+ INIT_WORK(&intel_connector->modeset_retry_work,
+ intel_dp_modeset_retry_work_fn);
+
if (WARN(intel_dig_port->max_lanes < 1,
"Not enough lanes (%d) for DP on port %c\n",
intel_dig_port->max_lanes, port_name(port)))
@@ -6039,7 +6131,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
- if (intel_dp_is_edp(dev_priv, port))
+ if (intel_dp_is_port_edp(dev_priv, port))
type = DRM_MODE_CONNECTOR_eDP;
else
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -6054,7 +6146,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
/* eDP only on port B and/or C on vlv/chv */
if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- is_edp(intel_dp) && port != PORT_B && port != PORT_C))
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
DRM_DEBUG_KMS("Adding %s connector on port %c\n",
@@ -6103,7 +6196,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
}
/* init MST on ports that can support it */
- if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
+ if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
intel_dp_mst_encoder_init(intel_dig_port,
intel_connector->base.base.id);
@@ -6116,7 +6209,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp_add_properties(intel_dp, connector);
- if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(dev_priv, port)) {
+ if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_port_edp(dev_priv, port)) {
int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
if (ret)
DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 694ad0ffb5235e..04a6cbd54935d0 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -129,7 +129,8 @@ static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
uint8_t voltage;
- int voltage_tries, max_vswing_tries;
+ int voltage_tries, cr_tries, max_cr_tries;
+ bool max_vswing_reached = false;
uint8_t link_config[2];
uint8_t link_bw, rate_select;
@@ -165,9 +166,21 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
return false;
}
+ /*
+ * The DP 1.4 spec defines the max clock recovery retries value
+ * as 10 but for pre-DP 1.4 devices we set a very tolerant
+ * retry limit of 80 (4 voltage levels x 4 preemphasis levels x
+ * x 5 identical voltage retries). Since the previous specs didn't
+ * define a limit and created the possibility of an infinite loop
+ * we want to prevent any sync from triggering that corner case.
+ */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
+ max_cr_tries = 10;
+ else
+ max_cr_tries = 80;
+
voltage_tries = 1;
- max_vswing_tries = 0;
- for (;;) {
+ for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
uint8_t link_status[DP_LINK_STATUS_SIZE];
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
@@ -187,7 +200,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
return false;
}
- if (max_vswing_tries == 1) {
+ if (max_vswing_reached) {
DRM_DEBUG_KMS("Max Voltage Swing reached\n");
return false;
}
@@ -208,9 +221,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
voltage_tries = 1;
if (intel_dp_link_max_vswing_reached(intel_dp))
- ++max_vswing_tries;
+ max_vswing_reached = true;
}
+ DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ return false;
}
/*
@@ -314,6 +329,31 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp)
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
- intel_dp_link_training_clock_recovery(intel_dp);
- intel_dp_link_training_channel_equalization(intel_dp);
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+
+ /* eDP can be flakey, especially on BYT */
+ if (intel_dp_is_edp(intel_dp)) {
+ intel_dp_link_training_clock_recovery(intel_dp);
+ intel_dp_link_training_channel_equalization(intel_dp);
+ return;
+ }
+
+ if (!intel_dp_link_training_clock_recovery(intel_dp))
+ goto failure_handling;
+ if (!intel_dp_link_training_channel_equalization(intel_dp))
+ goto failure_handling;
+
+ DRM_DEBUG_KMS("Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_dp->link_rate, intel_dp->lane_count);
+ return;
+
+ failure_handling:
+ DRM_DEBUG_KMS("Link Training failed at link rate = %d, lane count = %d",
+ intel_dp->link_rate, intel_dp->lane_count);
+ if (!intel_dp_get_link_train_fallback_values(intel_dp,
+ intel_dp->link_rate,
+ intel_dp->lane_count))
+ /* Schedule a Hotplug Uevent to userspace to start modeset */
+ schedule_work(&intel_connector->modeset_retry_work);
+ return;
}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9d46bf452beeb9..354d36ef7ac86d 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -37,9 +37,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
struct intel_digital_port *intel_dig_port = intel_mst->primary;
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_atomic_state *state;
+ struct drm_connector *connector = conn_state->connector;
+ void *port = to_intel_connector(connector)->port;
+ struct drm_atomic_state *state = pipe_config->base.state;
int bpp;
- int lane_count, slots;
+ int lane_count, slots = 0;
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int mst_pbn;
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
@@ -47,23 +49,40 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
bpp = 24;
+ if (intel_dp->compliance.test_data.bpc) {
+ bpp = intel_dp->compliance.test_data.bpc * 3;
+ DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
+ bpp);
+ }
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
-
+ lane_count = intel_dp_max_lane_count(intel_dp);
pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = 24;
+ pipe_config->pipe_bpp = bpp;
+
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
- state = pipe_config->base.state;
+ if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
+ pipe_config->has_audio = true;
mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
-
pipe_config->pbn = mst_pbn;
- slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
+
+ /* Zombie connectors can't have VCPI slots */
+ if (READ_ONCE(connector->registered)) {
+ slots = drm_dp_atomic_find_vcpi_slots(state,
+ &intel_dp->mst_mgr,
+ port,
+ mst_pbn);
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
+ slots);
+ return false;
+ }
+ }
intel_link_compute_m_n(bpp, lane_count,
adjusted_mode->crtc_clock,
@@ -74,7 +93,38 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->dp_m_n.tu = slots;
return true;
+}
+
+static int intel_dp_mst_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *new_conn_state)
+{
+ struct drm_atomic_state *state = new_conn_state->state;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *old_crtc;
+ struct drm_crtc_state *crtc_state;
+ int slots, ret = 0;
+
+ old_conn_state = drm_atomic_get_old_connector_state(state, connector);
+ old_crtc = old_conn_state->crtc;
+ if (!old_crtc)
+ return ret;
+ crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
+ slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
+ if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_encoder *old_encoder;
+
+ old_encoder = old_conn_state->best_encoder;
+ mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
+
+ ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
+ if (ret)
+ DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
+ else
+ to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
+ }
+ return ret;
}
static void intel_mst_disable_dp(struct intel_encoder *encoder,
@@ -86,9 +136,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
@@ -96,6 +147,10 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
+ if (old_crtc_state->has_audio) {
+ intel_audio_codec_disable(encoder);
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+ }
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -108,8 +163,6 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-
/* this can fail */
drm_dp_check_act_status(&intel_dp->mst_mgr);
/* and this can also fail */
@@ -117,15 +170,23 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port);
+ /*
+ * Power down mst path before disabling the port, otherwise we end
+ * up getting interrupts from the sink upon detecting link loss.
+ */
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
+ false);
+
intel_dp->active_mst_links--;
intel_mst->connector = NULL;
if (intel_dp->active_mst_links == 0) {
- intel_dig_port->base.post_disable(&intel_dig_port->base,
- NULL, NULL);
-
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ intel_dig_port->base.post_disable(&intel_dig_port->base,
+ old_crtc_state, NULL);
}
+
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
}
static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
@@ -141,7 +202,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
to_intel_connector(conn_state->connector);
int ret;
uint32_t temp;
- int slots;
/* MST encoders are bound to a crtc, not to a connector,
* force the mapping here for get_hw_state.
@@ -149,34 +209,23 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
-
- if (intel_dp->active_mst_links == 0) {
- intel_ddi_clk_select(&intel_dig_port->base,
- pipe_config->shared_dpll);
-
- intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
- intel_dp_set_link_params(intel_dp,
- pipe_config->port_clock,
- pipe_config->lane_count,
- true);
-
- intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
+ if (intel_dp->active_mst_links == 0)
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+
+ if (intel_dp->active_mst_links == 0)
+ intel_dig_port->base.pre_enable(&intel_dig_port->base,
+ pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
connector->port,
- pipe_config->pbn, &slots);
- if (ret == false) {
+ pipe_config->pbn,
+ pipe_config->dp_m_n.tu);
+ if (!ret)
DRM_ERROR("failed to allocate vcpi\n");
- return;
- }
-
intel_dp->active_mst_links++;
temp = I915_READ(DP_TP_STATUS(port));
@@ -196,7 +245,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
enum port port = intel_dig_port->port;
int ret;
- DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
+ DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_wait_for_register(dev_priv,
DP_TP_STATUS(port),
@@ -208,6 +257,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
+ if (pipe_config->has_audio) {
+ intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ intel_audio_codec_enable(encoder);
+ }
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -230,6 +283,11 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 temp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
+
+ pipe_config->has_audio =
+ intel_ddi_is_audio_enabled(dev_priv, crtc);
+
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -273,9 +331,8 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!intel_dp) {
+ if (!READ_ONCE(connector->registered))
return intel_connector_update_modes(connector, NULL);
- }
edid = drm_dp_mst_get_edid(connector, &intel_dp->mst_mgr, intel_connector->port);
ret = intel_connector_update_modes(connector, edid);
@@ -290,17 +347,10 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- if (!intel_dp)
+ if (!READ_ONCE(connector->registered))
return connector_status_disconnected;
- return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
-}
-
-static int
-intel_dp_mst_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t val)
-{
- return 0;
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr,
+ intel_connector->port);
}
static void
@@ -319,8 +369,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = intel_dp_mst_set_property,
- .atomic_get_property = intel_connector_atomic_get_property,
+ .set_property = drm_atomic_helper_connector_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_dp_mst_connector_destroy,
@@ -343,8 +392,11 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
+ if (!READ_ONCE(connector->registered))
+ return MODE_ERROR;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
- max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+ max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(mode->clock, bpp);
@@ -369,7 +421,7 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
struct intel_dp *intel_dp = intel_connector->mst_port;
struct intel_crtc *crtc = to_intel_crtc(state->crtc);
- if (!intel_dp)
+ if (!READ_ONCE(connector->registered))
return NULL;
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
@@ -388,6 +440,7 @@ static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_fun
.mode_valid = intel_dp_mst_mode_valid,
.atomic_best_encoder = intel_mst_atomic_best_encoder,
.best_encoder = intel_mst_best_encoder,
+ .atomic_check = intel_dp_mst_atomic_check,
};
static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
@@ -440,33 +493,52 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_connector *intel_connector;
struct drm_connector *connector;
- int i;
+ enum pipe pipe;
+ int ret;
intel_connector = intel_connector_alloc();
if (!intel_connector)
return NULL;
connector = &intel_connector->base;
- drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
+ ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
+ DRM_MODE_CONNECTOR_DisplayPort);
+ if (ret) {
+ intel_connector_free(intel_connector);
+ return NULL;
+ }
+
drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
- for (i = PIPE_A; i <= PIPE_C; i++) {
- drm_mode_connector_attach_encoder(&intel_connector->base,
- &intel_dp->mst_encoders[i]->base.base);
+ for_each_pipe(dev_priv, pipe) {
+ struct drm_encoder *enc =
+ &intel_dp->mst_encoders[pipe]->base.base;
+
+ ret = drm_mode_connector_attach_encoder(&intel_connector->base,
+ enc);
+ if (ret)
+ goto err;
}
- intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
- drm_mode_connector_set_path_property(connector, pathprop);
+ ret = drm_mode_connector_set_path_property(connector, pathprop);
+ if (ret)
+ goto err;
+
return connector;
+
+err:
+ drm_connector_cleanup(connector);
+ return NULL;
}
static void intel_dp_register_mst_connector(struct drm_connector *connector)
@@ -487,16 +559,15 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
drm_connector_unregister(connector);
/* need to nuke the connector */
drm_modeset_lock_all(dev);
intel_connector_remove_from_fbdev(intel_connector);
- intel_connector->mst_port = NULL;
drm_modeset_unlock_all(dev);
drm_connector_unreference(&intel_connector->base);
- DRM_DEBUG_KMS("\n");
}
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
@@ -554,11 +625,12 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
static bool
intel_dp_create_fake_mst_encoders(struct intel_digital_port *intel_dig_port)
{
- int i;
struct intel_dp *intel_dp = &intel_dig_port->dp;
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
+ enum pipe pipe;
- for (i = PIPE_A; i <= PIPE_C; i++)
- intel_dp->mst_encoders[i] = intel_dp_create_fake_mst_encoder(intel_dig_port, i);
+ for_each_pipe(dev_priv, pipe)
+ intel_dp->mst_encoders[pipe] = intel_dp_create_fake_mst_encoder(intel_dig_port, pipe);
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 41d99ea91706b0..d422db2911f62c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -38,6 +38,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <media/cec-notifier.h>
/**
* __wait_for - magic wait macro
@@ -74,39 +75,63 @@
#define _wait_for(COND, US, W) __wait_for(,(COND), (US), (W))
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
-#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
-# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic())
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
#else
-# define _WAIT_FOR_ATOMIC_CHECK do { } while (0)
+# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
#endif
-#define _wait_for_atomic(COND, US) ({ \
- unsigned long end__; \
- int ret__ = 0; \
- _WAIT_FOR_ATOMIC_CHECK; \
+#define _wait_for_atomic(COND, US, ATOMIC) \
+({ \
+ int cpu, ret, timeout = (US) * 1000; \
+ u64 base; \
+ _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
BUILD_BUG_ON((US) > 50000); \
- end__ = (local_clock() >> 10) + (US) + 1; \
- while (!(COND)) { \
- if (time_after((unsigned long)(local_clock() >> 10), end__)) { \
- /* Unlike the regular wait_for(), this atomic variant \
- * cannot be preempted (and we'll just ignore the issue\
- * of irq interruptions) and so we know that no time \
- * has passed since the last check of COND and can \
- * immediately report the timeout. \
- */ \
- ret__ = -ETIMEDOUT; \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ cpu = smp_processor_id(); \
+ } \
+ base = local_clock(); \
+ for (;;) { \
+ u64 now = local_clock(); \
+ if (!(ATOMIC)) \
+ preempt_enable(); \
+ if (COND) { \
+ ret = 0; \
+ break; \
+ } \
+ if (now - base >= timeout) { \
+ ret = -ETIMEDOUT; \
break; \
} \
cpu_relax(); \
+ if (!(ATOMIC)) { \
+ preempt_disable(); \
+ if (unlikely(cpu != smp_processor_id())) { \
+ timeout -= now - base; \
+ cpu = smp_processor_id(); \
+ base = local_clock(); \
+ } \
+ } \
} \
+ ret; \
+})
+
+#define wait_for_us(COND, US) \
+({ \
+ int ret__; \
+ BUILD_BUG_ON(!__builtin_constant_p(US)); \
+ if ((US) > 10) \
+ ret__ = _wait_for((COND), (US), 10); \
+ else \
+ ret__ = _wait_for_atomic((COND), (US), 0); \
ret__; \
})
-#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000)
-#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US))
+#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
+#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
#define KHz(x) (1000 * (x))
#define MHz(x) KHz(1000 * (x))
@@ -368,6 +393,8 @@ struct intel_connector {
uint64_t hdcp_value; /* protected by hdcp_mutex */
struct delayed_work hdcp_check_work;
struct work_struct hdcp_prop_work;
+ /* Work struct to schedule a uevent on link train failure */
+ struct work_struct modeset_retry_work;
};
typedef struct dpll {
@@ -646,6 +673,14 @@ struct intel_crtc_state {
*/
bool dither;
+ /*
+ * Dither gets enabled for 18bpp which causes CRC mismatch errors for
+ * compliance video pattern tests.
+ * Disable dither only if it is a compliance test request for
+ * 18bpp.
+ */
+ bool dither_force_disable;
+
/* Controls for the clock computation, to override various stages. */
bool clock_set;
@@ -682,6 +717,9 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ bool has_psr;
+ bool has_psr2;
+
/*
* Frequence the dpll for the port should run at. Differs from the
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
@@ -880,14 +918,14 @@ struct intel_watermark_params {
};
struct cxsr_latency {
+ bool is_desktop : 1;
+ bool is_ddr3 : 1;
u16 fsb_freq;
u16 mem_freq;
u16 display_sr;
u16 display_hpll_disable;
u16 cursor_sr;
u16 cursor_hpll_disable;
- bool is_desktop : 1;
- bool is_ddr3 : 1;
};
#define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base)
@@ -923,6 +961,7 @@ struct intel_hdmi {
const struct drm_display_mode *adjusted_mode);
bool (*infoframe_enabled)(struct drm_encoder *encoder,
const struct intel_crtc_state *pipe_config);
+ struct cec_notifier *cec_notifier;
};
struct intel_dp_mst_encoder;
@@ -950,12 +989,17 @@ enum link_m_n_set {
struct intel_dp_compliance_data {
unsigned long edid;
+ uint8_t video_pattern;
+ uint16_t hdisplay, vdisplay;
+ uint8_t bpc;
};
struct intel_dp_compliance {
unsigned long test_type;
struct intel_dp_compliance_data test_data;
bool test_active;
+ int test_link_rate;
+ u8 test_lane_count;
};
struct intel_dp {
@@ -985,10 +1029,13 @@ struct intel_dp {
int num_sink_rates;
int sink_rates[DP_MAX_SUPPORTED_RATES];
bool use_rate_select;
- /* Max lane count for the sink as per DPCD registers */
- uint8_t max_sink_lane_count;
- /* Max link BW for the sink as per DPCD registers */
- int max_sink_link_rate;
+ /* intersection of source and sink rates */
+ int num_common_rates;
+ int common_rates[DP_MAX_SUPPORTED_RATES];
+ /* Max lane count for the current link */
+ int max_link_lane_count;
+ /* Max rate for the current link */
+ int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
struct drm_dp_aux aux;
@@ -1174,6 +1221,14 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
+static inline struct intel_crtc_state *
+intel_atomic_get_new_crtc_state(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ return to_intel_crtc_state(drm_atomic_get_new_crtc_state(&state->base,
+ &crtc->base));
+}
+
/* intel_fifo_underrun.c */
bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
enum pipe pipe, bool enable);
@@ -1227,16 +1282,18 @@ void hsw_fdi_link_train(struct drm_crtc *crtc);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
-void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
struct intel_encoder *
@@ -1245,7 +1302,8 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
int clock);
@@ -1297,6 +1355,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
bool intel_connector_get_hw_state(struct intel_connector *connector);
void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
@@ -1477,7 +1536,8 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
bool intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
bool long_hpd);
void intel_edp_backlight_on(struct intel_dp *intel_dp);
@@ -1485,10 +1545,10 @@ void intel_edp_backlight_off(struct intel_dp *intel_dp);
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
void intel_edp_panel_on(struct intel_dp *intel_dp);
void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
void intel_dp_mst_suspend(struct drm_device *dev);
void intel_dp_mst_resume(struct drm_device *dev);
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
@@ -1691,8 +1751,12 @@ int intel_hdcp_check_link(struct intel_connector *connector);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
/* intel_psr.c */
-void intel_psr_enable(struct intel_dp *intel_dp);
-void intel_psr_disable(struct intel_dp *intel_dp);
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
@@ -1700,8 +1764,13 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
/* intel_runtime_pm.c */
int intel_power_domains_init(struct drm_i915_private *);
@@ -1839,6 +1908,8 @@ bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
static inline int intel_enable_rc6(void)
{
return i915.enable_rc6;
@@ -1856,8 +1927,8 @@ struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, int plane);
int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void intel_pipe_update_start(struct intel_crtc *crtc);
-void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state, struct intel_flip_work *work);
bool intel_format_is_yuv(uint32_t format);
bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 06dcf9d375acbd..fa87b29aef1d5c 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -843,6 +843,8 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
u32 pclk;
DRM_DEBUG_KMS("\n");
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
+
if (IS_BROXTON(dev_priv))
bxt_dsi_get_pipe_config(encoder, pipe_config);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 12cb50ce5f1471..81da3230379e5c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -159,6 +159,8 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
+
tmp = I915_READ(intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 335c38c8731b07..f592c0391c4631 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -79,6 +79,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
spin_unlock(&dev_priv->fb_tracking.lock);
}
+ might_sleep();
intel_psr_invalidate(dev_priv, frontbuffer_bits, origin);
intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
@@ -108,6 +109,7 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
if (!frontbuffer_bits)
return;
+ might_sleep();
intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
intel_psr_flush(dev_priv, frontbuffer_bits, origin);
intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
@@ -152,8 +154,6 @@ void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
/* Remove stale busy bits due to the old buffer. */
dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
spin_unlock(&dev_priv->fb_tracking.lock);
-
- intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c77de8f7a2b95c..9f32b64c55a884 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1178,6 +1178,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
u32 tmp, flags = 0;
int dotclock;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
@@ -1734,6 +1736,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
connected = true;
}
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
return connected;
}
@@ -1781,6 +1785,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
return status;
}
@@ -2054,6 +2061,9 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
cancel_work_sync(&intel_connector->hdcp_prop_work);
}
+ if (intel_attached_hdmi(connector)->cec_notifier)
+ cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
+
kfree(intel_connector->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
@@ -2236,6 +2246,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
+ port_identifier(port));
+ if (!intel_hdmi->cec_notifier)
+ DRM_DEBUG_KMS("CEC notifier get failed\n");
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 11b89d2e64cda2..e1eed67924bcc5 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -125,6 +125,8 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp, flags = 0;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
+
tmp = I915_READ(lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
@@ -857,6 +859,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "D525MW"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Radiant P845",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Radiant Systems Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P845"),
+ },
+ },
{ } /* terminating entry */
};
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2cc12b48b74dbd..23dc682291e0b4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3116,8 +3116,12 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *for_crtc = cstate->base.crtc;
- unsigned int pipe_size, ddb_size;
- int nth_active_pipe;
+ const struct drm_crtc_state *crtc_state;
+ const struct drm_crtc *crtc;
+ u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+ enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
+ u16 ddb_size;
+ u32 i;
if (WARN_ON(!state) || !cstate->base.active) {
alloc->start = 0;
@@ -3137,14 +3141,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
ddb_size -= 4; /* 4 blocks for bypass path allocation */
/*
- * If the state doesn't change the active CRTC's, then there's
- * no need to recalculate; the existing pipe allocation limits
- * should remain unchanged. Note that we're safe from racing
- * commits since any racing commit that changes the active CRTC
- * list would need to grab _all_ crtc locks, including the one
- * we currently hold.
+ * If the state doesn't change the active CRTC's or there is no
+ * modeset request, then there's no need to recalculate;
+ * the existing pipe allocation limits should remain unchanged.
+ * Note that we're safe from racing commits since any racing commit
+ * that changes the active CRTC list or do modeset would need to
+ * grab _all_ crtc locks, including the one we currently hold.
*/
- if (!intel_state->active_pipe_changes) {
+ if (!intel_state->active_pipe_changes && !intel_state->modeset) {
/*
* alloc may be cleared by clear_intel_crtc_state,
* copy from old state to be sure
@@ -3153,11 +3157,32 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
return;
}
- nth_active_pipe = hweight32(intel_state->active_crtcs &
- (drm_crtc_mask(for_crtc) - 1));
- pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
- alloc->start = nth_active_pipe * ddb_size / *num_active;
- alloc->end = alloc->start + pipe_size;
+ /*
+ * Watermark/ddb requirement highly depends upon width of the
+ * framebuffer, So instead of allocating DDB equally among pipes
+ * distribute DDB based on resolution/width of the display.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ const struct drm_display_mode *adjusted_mode;
+ int hdisplay, vdisplay;
+ enum pipe pipe;
+
+ if (!crtc_state->enable)
+ continue;
+
+ pipe = to_intel_crtc(crtc)->pipe;
+ adjusted_mode = &crtc_state->adjusted_mode;
+ drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
+ total_width += hdisplay;
+
+ if (pipe < for_pipe)
+ width_before_pipe += hdisplay;
+ else if (pipe == for_pipe)
+ pipe_width = hdisplay;
+ }
+
+ alloc->start = ddb_size * width_before_pipe / total_width;
+ alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
static unsigned int skl_cursor_allocation(int num_active)
@@ -4470,7 +4495,7 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
* any other display updates race with this transaction, so we need
* to grab the lock on *all* CRTC's.
*/
- if (intel_state->active_pipe_changes) {
+ if (intel_state->active_pipe_changes || intel_state->modeset) {
realloc_pipes = ~0;
intel_state->wm_results.dirty_pipes = ~0;
}
@@ -5012,6 +5037,30 @@ void intel_update_watermarks(struct intel_crtc *crtc)
dev_priv->display.update_wm(crtc);
}
+void intel_enable_ipc(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(DISP_ARB_CTL2);
+
+ if (dev_priv->ipc_enabled)
+ val |= DISP_IPC_ENABLE;
+ else
+ val &= ~DISP_IPC_ENABLE;
+
+ I915_WRITE(DISP_ARB_CTL2, val);
+}
+
+void intel_init_ipc(struct drm_i915_private *dev_priv)
+{
+ dev_priv->ipc_enabled = false;
+ if (!HAS_IPC(dev_priv))
+ return;
+
+ dev_priv->ipc_enabled = true;
+ intel_enable_ipc(dev_priv);
+}
+
/*
* Lock protecting IPS related data structures
*/
@@ -8208,9 +8257,9 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
@@ -8253,9 +8302,9 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
- 500)) {
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
+ 500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index e6a08dfc9da146..11ac8fcd4416b9 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -56,20 +56,204 @@
#include "intel_drv.h"
#include "i915_drv.h"
-static bool is_edp_psr(struct intel_dp *intel_dp)
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug)
{
- return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+ u32 debug_mask, mask;
+
+ mask = EDP_PSR_ERROR(TRANSCODER_EDP);
+ debug_mask = EDP_PSR_POST_EXIT(TRANSCODER_EDP) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_EDP);
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ mask |= EDP_PSR_ERROR(TRANSCODER_A) |
+ EDP_PSR_ERROR(TRANSCODER_B) |
+ EDP_PSR_ERROR(TRANSCODER_C);
+
+ debug_mask |= EDP_PSR_POST_EXIT(TRANSCODER_A) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_A) |
+ EDP_PSR_POST_EXIT(TRANSCODER_B) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_B) |
+ EDP_PSR_POST_EXIT(TRANSCODER_C) |
+ EDP_PSR_PRE_ENTRY(TRANSCODER_C);
+ }
+
+ if (debug)
+ mask |= debug_mask;
+
+ WRITE_ONCE(dev_priv->psr.debug, debug);
+ I915_WRITE(EDP_PSR_IMR, ~mask);
}
-static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
+static void psr_event_print(u32 val, bool psr2_enabled)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val;
+ DRM_DEBUG_KMS("PSR exit events: 0x%x\n", val);
+ if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
+ DRM_DEBUG_KMS("\tPSR2 watchdog timer expired\n");
+ if ((val & PSR_EVENT_PSR2_DISABLED) && psr2_enabled)
+ DRM_DEBUG_KMS("\tPSR2 disabled\n");
+ if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
+ DRM_DEBUG_KMS("\tSU dirty FIFO underrun\n");
+ if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
+ DRM_DEBUG_KMS("\tSU CRC FIFO underrun\n");
+ if (val & PSR_EVENT_GRAPHICS_RESET)
+ DRM_DEBUG_KMS("\tGraphics reset\n");
+ if (val & PSR_EVENT_PCH_INTERRUPT)
+ DRM_DEBUG_KMS("\tPCH interrupt\n");
+ if (val & PSR_EVENT_MEMORY_UP)
+ DRM_DEBUG_KMS("\tMemory up\n");
+ if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
+ DRM_DEBUG_KMS("\tFront buffer modification\n");
+ if (val & PSR_EVENT_WD_TIMER_EXPIRE)
+ DRM_DEBUG_KMS("\tPSR watchdog timer expired\n");
+ if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
+ DRM_DEBUG_KMS("\tPIPE registers updated\n");
+ if (val & PSR_EVENT_REGISTER_UPDATE)
+ DRM_DEBUG_KMS("\tRegister updated\n");
+ if (val & PSR_EVENT_HDCP_ENABLE)
+ DRM_DEBUG_KMS("\tHDCP enabled\n");
+ if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
+ DRM_DEBUG_KMS("\tKVMR session enabled\n");
+ if (val & PSR_EVENT_VBI_ENABLE)
+ DRM_DEBUG_KMS("\tVBI enabled\n");
+ if (val & PSR_EVENT_LPSP_MODE_EXIT)
+ DRM_DEBUG_KMS("\tLPSP mode exited\n");
+ if ((val & PSR_EVENT_PSR_DISABLE) && !psr2_enabled)
+ DRM_DEBUG_KMS("\tPSR disabled\n");
+}
+
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+{
+ u32 transcoders = BIT(TRANSCODER_EDP);
+ enum transcoder cpu_transcoder;
+ /* Deviating from upstream, ktime_t is a union in chromeos-4.4 */
+ ktime_t time_ns;
+ time_ns.tv64 = ktime_to_ns(ktime_get());
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ transcoders |= BIT(TRANSCODER_A) |
+ BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C);
+
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) {
+ /* FIXME: Exit PSR and link train manually when this happens. */
+ if (psr_iir & EDP_PSR_ERROR(cpu_transcoder))
+ DRM_DEBUG_KMS("[transcoder %s] PSR aux error\n",
+ transcoder_name(cpu_transcoder));
+
+ if (psr_iir & EDP_PSR_PRE_ENTRY(cpu_transcoder)) {
+ /*
+ * in kernel 4.4 ktime_t is defined as a union
+ * with one member: tv64 but upstream is now defined
+ * as an s64 type
+ */
+ dev_priv->psr.last_entry_attempt.tv64 = ktime_to_ns(time_ns);
+ DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
+ }
+
+ if (psr_iir & EDP_PSR_POST_EXIT(cpu_transcoder)) {
+ /*
+ * in kernel 4.4 ktime_t is defined as a union
+ * with one member: tv64 but upstream is now defined
+ * as an s64 type
+ */
+ dev_priv->psr.last_exit.tv64 = ktime_to_ns(time_ns);
+ DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ bool psr2_enabled = dev_priv->psr.psr2_enabled;
+
+ I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ psr_event_print(val, psr2_enabled);
+ }
+ }
+ }
+}
+
+static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
+{
+ uint8_t dprx = 0;
- val = I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_CURR_STATE_MASK;
- return (val == VLV_EDP_PSR_ACTIVE_NORFB_UP) ||
- (val == VLV_EDP_PSR_ACTIVE_SF_UPDATE);
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dprx) != 1)
+ return false;
+ return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
+}
+
+static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
+{
+ uint8_t alpm_caps = 0;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &alpm_caps) != 1)
+ return false;
+ return alpm_caps & DP_ALPM_CAP;
+}
+
+static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
+{
+ u8 val = 8; /* assume the worst if we can't read the value */
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
+ val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
+ else
+ DRM_DEBUG_KMS("Unable to get sink synchronization latency, assuming 8 frames\n");
+ return val;
+}
+
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (!intel_dp->psr_dpcd[0])
+ return;
+ DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
+
+ if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
+ DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ return;
+ }
+ dev_priv->psr.sink_support = true;
+ dev_priv->psr.sink_sync_latency =
+ intel_dp_get_sink_sync_latency(intel_dp);
+
+ if (INTEL_GEN(dev_priv) >= 9 &&
+ (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ bool y_req = intel_dp->psr_dpcd[1] &
+ DP_PSR2_SU_Y_COORDINATE_REQUIRED;
+ bool alpm = intel_dp_get_alpm_status(intel_dp);
+
+ /*
+ * All panels that supports PSR version 03h (PSR2 +
+ * Y-coordinate) can handle Y-coordinates in VSC but we are
+ * only sure that it is going to be used when required by the
+ * panel. This way panel is capable to do selective update
+ * without a aux frame sync.
+ *
+ * To support PSR version 02h and PSR version 03h without
+ * Y-coordinate requirement panels we would need to enable
+ * GTC first.
+ */
+ dev_priv->psr.sink_psr2_support = y_req && alpm;
+ DRM_DEBUG_KMS("PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
+
+ /* TODO(b/67599437) PSR2 is broken */
+ dev_priv->psr.sink_psr2_support = false;
+
+ if (dev_priv->psr.sink_psr2_support) {
+ dev_priv->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ }
+ }
}
static void intel_psr_write_vsc(struct intel_dp *intel_dp,
@@ -103,92 +287,43 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
POSTING_READ(ctl_reg);
}
-static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
+static void intel_psr_setup_vsc(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- uint32_t val;
-
- /* VLV auto-generate VSC package as per EDP 1.3 spec, Table 3.10 */
- val = I915_READ(VLV_VSCSDP(pipe));
- val &= ~VLV_EDP_PSR_SDP_FREQ_MASK;
- val |= VLV_EDP_PSR_SDP_FREQ_EVFRAME;
- I915_WRITE(VLV_VSCSDP(pipe), val);
-}
-
-static void skl_psr_setup_su_vsc(struct intel_dp *intel_dp)
-{
+ struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
struct edp_vsc_psr psr_vsc;
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- if (dev_priv->psr.colorimetry_support &&
- dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x5;
- psr_vsc.sdp_header.HB3 = 0x13;
- } else if (dev_priv->psr.y_cord_support) {
- psr_vsc.sdp_header.HB2 = 0x4;
- psr_vsc.sdp_header.HB3 = 0xe;
+ if (dev_priv->psr.psr2_enabled) {
+ /* Prepare VSC Header for SU as per EDP 1.4 spec, Table 6.11 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ if (dev_priv->psr.colorimetry_support) {
+ psr_vsc.sdp_header.HB2 = 0x5;
+ psr_vsc.sdp_header.HB3 = 0x13;
+ } else {
+ psr_vsc.sdp_header.HB2 = 0x4;
+ psr_vsc.sdp_header.HB3 = 0xe;
+ }
} else {
- psr_vsc.sdp_header.HB2 = 0x3;
- psr_vsc.sdp_header.HB3 = 0xc;
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
}
intel_psr_write_vsc(intel_dp, &psr_vsc);
}
-static void hsw_psr_setup_vsc(struct intel_dp *intel_dp)
-{
- struct edp_vsc_psr psr_vsc;
-
- /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
- memset(&psr_vsc, 0, sizeof(psr_vsc));
- psr_vsc.sdp_header.HB0 = 0;
- psr_vsc.sdp_header.HB1 = 0x7;
- psr_vsc.sdp_header.HB2 = 0x2;
- psr_vsc.sdp_header.HB3 = 0x8;
- intel_psr_write_vsc(intel_dp, &psr_vsc);
-}
-
-static void vlv_psr_enable_sink(struct intel_dp *intel_dp)
-{
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
-}
-
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
- enum port port)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return DP_AUX_CH_CTL(port);
- else
- return EDP_PSR_AUX_CTL;
-}
-
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
- enum port port, int index)
-{
- if (INTEL_INFO(dev_priv)->gen >= 9)
- return DP_AUX_CH_DATA(port, index);
- else
- return EDP_PSR_AUX_DATA(index);
-}
-
-static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
+static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t aux_clock_divider;
- i915_reg_t aux_ctl_reg;
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ u32 aux_clock_divider, aux_ctl;
+ int i;
static const uint8_t aux_msg[] = {
[0] = DP_AUX_NATIVE_WRITE << 4,
[1] = DP_SET_POWER >> 8,
@@ -196,116 +331,76 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
[3] = 1 - 1,
[4] = DP_SET_POWER_D0,
};
- enum port port = dig_port->port;
- u32 aux_ctl;
- int i;
+ u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
+ EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
+ EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
+ EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
BUILD_BUG_ON(sizeof(aux_msg) > 20);
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- /* Enable AUX frame sync at sink */
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- DP_AUX_FRAME_SYNC_ENABLE);
- /* Enable ALPM at sink for psr2 */
- if (dev_priv->psr.psr2_support && dev_priv->psr.alpm)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_RECEIVER_ALPM_CONFIG,
- DP_ALPM_ENABLE);
- if (dev_priv->psr.link_standby)
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
- else
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
- DP_PSR_ENABLE);
-
- aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
-
- /* Setup AUX registers */
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
+ I915_WRITE(EDP_PSR_AUX_DATA(i >> 2),
intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+ aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
+
+ /* Start with bits set for DDI_AUX_CTL register */
aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
aux_clock_divider);
- I915_WRITE(aux_ctl_reg, aux_ctl);
-}
-static void vlv_psr_enable_source(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- /* Transition from PSR_state 0 to PSR_state 1, i.e. PSR Inactive */
- I915_WRITE(VLV_PSRCTL(pipe),
- VLV_EDP_PSR_MODE_SW_TIMER |
- VLV_EDP_PSR_SRC_TRANSMITTER_STATE |
- VLV_EDP_PSR_ENABLE);
+ /* Select only valid bits for SRD_AUX_CTL */
+ aux_ctl &= psr_aux_mask;
+ I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl);
}
-static void vlv_psr_activate(struct intel_dp *intel_dp)
+static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u8 dpcd_val = DP_PSR_ENABLE;
- /* Let's do the transition from PSR_state 1 to PSR_state 2
- * that is PSR transition to active - static frame transmission.
- * Then Hardware is responsible for the transition to PSR_state 3
- * that is PSR active - no Remote Frame Buffer (RFB) update.
- */
- I915_WRITE(VLV_PSRCTL(pipe), I915_READ(VLV_PSRCTL(pipe)) |
- VLV_EDP_PSR_ACTIVE_ENTRY);
+ /* Enable ALPM at sink for psr2 */
+ if (dev_priv->psr.psr2_enabled) {
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
+ DP_ALPM_ENABLE);
+ dpcd_val |= DP_PSR_ENABLE_PSR2;
+ }
+
+ if (dev_priv->psr.link_standby)
+ dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
+ if (!dev_priv->psr.psr2_enabled && INTEL_GEN(dev_priv) >= 8)
+ dpcd_val |= DP_PSR_CRC_VERIFICATION;
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
+
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void intel_enable_source_psr1(struct intel_dp *intel_dp)
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
- uint32_t max_sleep_time = 0x1f;
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = I915_READ(EDP_PSR_CTL);
-
- val |= EDP_PSR_ENABLE;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val &= ~EDP_PSR_MAX_SLEEP_TIME_MASK;
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
-
- val &= ~EDP_PSR_IDLE_FRAME_MASK;
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
- val &= ~EDP_PSR_MIN_LINK_ENTRY_TIME_MASK;
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
- if (dev_priv->psr.link_standby) {
+ if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- /* SFU should only be enabled with link standby, but for
- * now we do not support it. */
- val &= ~BDW_PSR_SINGLE_FRAME;
- } else {
- val &= ~EDP_PSR_LINK_STANDBY;
- val &= ~BDW_PSR_SINGLE_FRAME;
- }
-
- val &= ~EDP_PSR_TP1_TIME_MASK;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
@@ -315,7 +410,6 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TIME_2500us;
- val &= ~EDP_PSR_TP2_TP3_TIME_MASK;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
@@ -325,48 +419,42 @@ static void intel_enable_source_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP2_TP3_TIME_2500us;
- val &= ~EDP_PSR_TP1_TP3_SEL;
if (intel_dp_source_supports_hbr2(intel_dp) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
val |= EDP_PSR_TP1_TP2_SEL;
+ if (INTEL_GEN(dev_priv) >= 8)
+ val |= EDP_PSR_CRC_ENABLE;
+
+ val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK;
I915_WRITE(EDP_PSR_CTL, val);
}
-static void intel_enable_source_psr2(struct intel_dp *intel_dp)
+static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- /*
- * Let's respect VBT in case VBT asks a higher idle_frame value.
- * Let's use 6 as the minimum to cover all known cases including
- * the off-by-one issue that HW has in some cases. Also there are
- * cases where sink should be able to train
- * with the 5 or 6 idle patterns.
+ u32 val;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
*/
- uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val;
- uint8_t sink_latency;
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- val = idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
/* FIXME: selective update is probably totally broken because it doesn't
* mesh at all with our frontbuffer tracking. And the hw alone isn't
* good enough. */
- val |= EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE;
+ val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ val |= EDP_Y_COORDINATE_ENABLE;
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_SYNCHRONIZATION_LATENCY_IN_SINK,
- &sink_latency) == 1) {
- sink_latency &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
- } else {
- sink_latency = 0;
- }
- val |= EDP_PSR2_FRAME_BEFORE_SU(sink_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
@@ -381,35 +469,48 @@ static void intel_enable_source_psr2(struct intel_dp *intel_dp)
I915_WRITE(EDP_PSR2_CTL, val);
}
-static void hsw_psr_enable_source(struct intel_dp *intel_dp)
+static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
- /* psr1 and psr2 are mutually exclusive.*/
- if (dev_priv->psr.psr2_support)
- intel_enable_source_psr2(intel_dp);
- else
- intel_enable_source_psr1(intel_dp);
+ /*
+ * FIXME psr2_support is messed up. It's both computed
+ * dynamically during PSR enable, and extracted from sink
+ * caps during eDP detection.
+ */
+ if (!dev_priv->psr.sink_psr2_support)
+ return false;
+
+ /* PSR2 is restricted to work with panel resolutions up to 3640x2304 */
+ if (adjusted_mode->crtc_hdisplay > 3640 ||
+ adjusted_mode->crtc_vdisplay > 2304) {
+ DRM_DEBUG_KMS("PSR2 not enabled, panel resolution too big\n");
+ return false;
+ }
+
+ return true;
}
-static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc = dig_port->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
int psr_setup_time;
- lockdep_assert_held(&dev_priv->psr.lock);
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
+ if (!CAN_PSR(dev_priv))
+ return;
- dev_priv->psr.source_ok = false;
+ if (!i915.enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ return;
+ }
/*
* HSW spec explicitly says PSR is tied to port A.
@@ -420,66 +521,39 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
*/
if (HAS_DDI(dev_priv) && dig_port->port != PORT_A) {
DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
- return false;
- }
-
- if (!i915.enable_psr) {
- DRM_DEBUG_KMS("PSR disable by flag\n");
- return false;
- }
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- !dev_priv->psr.link_standby) {
- DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
- I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
+ I915_READ(HSW_STEREO_3D_CTL(crtc_state->cpu_transcoder)) &
S3D_ENABLE) {
DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
- return false;
+ return;
}
if (IS_HASWELL(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
- return false;
+ return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
- return false;
+ return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
- return false;
- }
-
- /* PSR2 is restricted to work with panel resolutions upto 3200x2000 */
- if (dev_priv->psr.psr2_support &&
- (intel_crtc->config->pipe_src_w > 3200 ||
- intel_crtc->config->pipe_src_h > 2000)) {
- dev_priv->psr.psr2_support = false;
- return false;
- }
-
- /*
- * FIXME:enable psr2 only for y-cordinate psr2 panels
- * After gtc implementation , remove this restriction.
- */
- if (!dev_priv->psr.y_cord_support && dev_priv->psr.psr2_support) {
- DRM_DEBUG_KMS("PSR2 disabled, panel does not support Y coordinate\n");
- return false;
+ return;
}
- dev_priv->psr.source_ok = true;
- return true;
+ crtc_state->has_psr = true;
+ crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+ DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : "");
}
static void intel_psr_activate(struct intel_dp *intel_dp)
@@ -488,277 +562,279 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- if (dev_priv->psr.psr2_support)
+ if (INTEL_GEN(dev_priv) >= 9)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
- else
- WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
+ WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
WARN_ON(dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
- /* Enable/Re-enable PSR on the host */
- if (HAS_DDI(dev_priv))
- /* On HSW+ after we enable PSR on source it will activate it
- * as soon as it match configure idle_frame count. So
- * we just actually enable it here on activation time.
- */
- hsw_psr_enable_source(intel_dp);
+ /* psr1 and psr2 are mutually exclusive.*/
+ if (dev_priv->psr.psr2_enabled)
+ hsw_activate_psr2(intel_dp);
else
- vlv_psr_activate(intel_dp);
+ hsw_activate_psr1(intel_dp);
dev_priv->psr.active = true;
}
+static void intel_psr_enable_source(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
+ * use hardcoded values PSR AUX transactions
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ hsw_psr_setup_aux(intel_dp);
+
+ if (dev_priv->psr.psr2_enabled) {
+ u32 chicken = I915_READ(CHICKEN_TRANS(cpu_transcoder));
+
+ if (INTEL_GEN(dev_priv) == 9 && !IS_GEMINILAKE(dev_priv))
+ chicken |= (PSR2_VSC_ENABLE_PROG_HEADER
+ | PSR2_ADD_VERTICAL_LINE_COUNT);
+
+ else
+ chicken &= ~VSC_DATA_SEL_SOFTWARE_CONTROL;
+ I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
+
+ I915_WRITE(EDP_PSR_DEBUG,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP |
+ EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
+ } else {
+ /*
+ * Per Spec: Avoid continuous PSR exit by masking MEMUP
+ * and HPD. also mask LPSP to avoid dependency on other
+ * drivers that might block runtime_pm besides
+ * preventing other hw tracking issues now we can rely
+ * on frontbuffer tracking.
+ */
+ I915_WRITE(EDP_PSR_DEBUG,
+ EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD |
+ EDP_PSR_DEBUG_MASK_LPSP |
+ EDP_PSR_DEBUG_MASK_MAX_SLEEP);
+ }
+}
+
/**
* intel_psr_enable - Enable PSR
* @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
*
* This function can only be called after the pipe is fully trained and enabled.
*/
-void intel_psr_enable(struct intel_dp *intel_dp)
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
- enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
- u32 chicken;
- if (!HAS_PSR(dev_priv)) {
- DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ if (!crtc_state->has_psr)
return;
- }
- if (!is_edp_psr(intel_dp)) {
- DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ if (WARN_ON(!CAN_PSR(dev_priv)))
return;
- }
+ WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
if (dev_priv->psr.enabled) {
DRM_DEBUG_KMS("PSR already in use\n");
goto unlock;
}
- if (!intel_psr_match_conditions(intel_dp))
- goto unlock;
-
+ dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
dev_priv->psr.busy_frontbuffer_bits = 0;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support) {
- skl_psr_setup_su_vsc(intel_dp);
- chicken = PSR2_VSC_ENABLE_PROG_HEADER;
- if (dev_priv->psr.y_cord_support)
- chicken |= PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(CHICKEN_TRANS(cpu_transcoder), chicken);
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP |
- EDP_PSR_DEBUG_MASK_MAX_SLEEP |
- EDP_PSR_DEBUG_MASK_DISP_REG_WRITE);
- } else {
- /* set up vsc header for psr1 */
- hsw_psr_setup_vsc(intel_dp);
- /*
- * Per Spec: Avoid continuous PSR exit by masking MEMUP
- * and HPD. also mask LPSP to avoid dependency on other
- * drivers that might block runtime_pm besides
- * preventing other hw tracking issues now we can rely
- * on frontbuffer tracking.
- */
- I915_WRITE(EDP_PSR_DEBUG_CTL,
- EDP_PSR_DEBUG_MASK_MEMUP |
- EDP_PSR_DEBUG_MASK_HPD |
- EDP_PSR_DEBUG_MASK_LPSP);
- }
-
- /* Enable PSR on the panel */
- hsw_psr_enable_sink(intel_dp);
-
- if (INTEL_GEN(dev_priv) >= 9)
- intel_psr_activate(intel_dp);
- } else {
- vlv_psr_setup_vsc(intel_dp);
-
- /* Enable PSR on the panel */
- vlv_psr_enable_sink(intel_dp);
-
- /* On HSW+ enable_source also means go to PSR entry/active
- * state as soon as idle_frame achieved and here would be
- * to soon. However on VLV enable_source just enable PSR
- * but let it on inactive state. So we might do this prior
- * to active transition, i.e. here.
- */
- vlv_psr_enable_source(intel_dp);
- }
+ intel_psr_setup_vsc(intel_dp, crtc_state);
+ intel_psr_enable_sink(intel_dp);
+ intel_psr_enable_source(intel_dp, crtc_state);
+ dev_priv->psr.enabled = intel_dp;
- /*
- * FIXME: Activation should happen immediately since this function
- * is just called after pipe is fully trained and enabled.
- * However on every platform we face issues when first activation
- * follows a modeset so quickly.
- * - On VLV/CHV we get bank screen on first activation
- * - On HSW/BDW we get a recoverable frozen screen until next
- * exit-activate sequence.
- */
- if (INTEL_GEN(dev_priv) < 9)
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
+ intel_psr_activate(intel_dp);
- dev_priv->psr.enabled = intel_dp;
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
-static void vlv_psr_disable(struct intel_dp *intel_dp)
+static void
+intel_psr_disable_source(struct intel_dp *intel_dp)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc =
- to_intel_crtc(intel_dig_port->base.base.crtc);
- uint32_t val;
if (dev_priv->psr.active) {
- /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
- if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1))
- WARN(1, "PSR transition took longer than expected\n");
+ i915_reg_t psr_status;
+ u32 psr_status_mask;
- val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- val &= ~VLV_EDP_PSR_ENABLE;
- val &= ~VLV_EDP_PSR_MODE_MASK;
- I915_WRITE(VLV_PSRCTL(intel_crtc->pipe), val);
+ if (dev_priv->psr.psr2_enabled) {
+ psr_status = EDP_PSR2_STATUS;
+ psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
- dev_priv->psr.active = false;
- } else {
- WARN_ON(vlv_is_psr_active_on_pipe(dev, intel_crtc->pipe));
- }
-}
-
-static void hsw_psr_disable(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct drm_device *dev = intel_dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- if (dev_priv->psr.active) {
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- 0);
-
- if (dev_priv->psr.psr2_support) {
I915_WRITE(EDP_PSR2_CTL,
- I915_READ(EDP_PSR2_CTL) &
- ~(EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE));
- /* Wait till PSR2 is idle */
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS_CTL,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 2000))
- DRM_ERROR("Timed out waiting for PSR2 Idle State\n");
+ I915_READ(EDP_PSR2_CTL) &
+ ~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
+
} else {
+ psr_status = EDP_PSR_STATUS;
+ psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
+
I915_WRITE(EDP_PSR_CTL,
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
- /* Wait till PSR1 is idle */
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 2000))
- DRM_ERROR("Timed out waiting for PSR Idle State\n");
}
+
+ /* Wait till PSR is idle */
+ if (intel_wait_for_register(dev_priv,
+ psr_status, psr_status_mask, 0,
+ 2000))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+
dev_priv->psr.active = false;
} else {
- if (dev_priv->psr.psr2_support)
+ if (dev_priv->psr.psr2_enabled)
WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE);
else
WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
}
}
+static void intel_psr_disable_locked(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.enabled)
+ return;
+
+ intel_psr_disable_source(intel_dp);
+
+ /* Disable PSR on Sink */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+
+ dev_priv->psr.enabled = NULL;
+}
+
/**
* intel_psr_disable - Disable PSR
* @intel_dp: Intel DP
+ * @old_crtc_state: old CRTC state
*
* This function needs to be called before disabling pipe.
*/
-void intel_psr_disable(struct intel_dp *intel_dp)
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
+ if (!old_crtc_state->has_psr)
return;
- }
- /* Disable PSR on Source */
- if (HAS_DDI(dev_priv))
- hsw_psr_disable(intel_dp);
- else
- vlv_psr_disable(intel_dp);
+ if (WARN_ON(!CAN_PSR(dev_priv)))
+ return;
- /* Disable PSR on Sink */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
+ mutex_lock(&dev_priv->psr.lock);
+ intel_psr_disable_locked(intel_dp);
+ mutex_unlock(&dev_priv->psr.lock);
+ cancel_work_sync(&dev_priv->psr.work);
+}
+
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ * @out_value: PSR status in case of failure
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ *
+ * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ */
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!new_crtc_state->has_psr)
+ return 0;
+
+ /* FIXME: Update this for PSR2 if we need to wait for idle */
+ if (READ_ONCE(dev_priv->psr.psr2_enabled))
+ return 0;
+
+ /*
+ * From bspec: Panel Self Refresh (BDW+)
+ * Max. time for PSR to idle = Inverse of the refresh rate + 6 ms of
+ * exit training time + 1.5 ms of aux channel handshake. 50 ms is
+ * defensive enough to cover everything.
+ */
+
+ return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ EDP_PSR_STATUS_STATE_MASK,
+ EDP_PSR_STATUS_STATE_IDLE, 50,
+ out_value);
+}
+
+static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_dp *intel_dp;
+ i915_reg_t reg;
+ u32 mask;
+ int err;
+
+ intel_dp = dev_priv->psr.enabled;
+ if (!intel_dp)
+ return false;
+
+ if (dev_priv->psr.psr2_enabled) {
+ reg = EDP_PSR2_STATUS;
+ mask = EDP_PSR2_STATUS_STATE_MASK;
+ } else {
+ reg = EDP_PSR_STATUS;
+ mask = EDP_PSR_STATUS_STATE_MASK;
+ }
- dev_priv->psr.enabled = NULL;
mutex_unlock(&dev_priv->psr.lock);
- cancel_delayed_work_sync(&dev_priv->psr.work);
+ err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ if (err)
+ DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+
+ /* After the unlocked wait, verify that PSR is still wanted! */
+ mutex_lock(&dev_priv->psr.lock);
+ return err == 0 && dev_priv->psr.enabled;
}
static void intel_psr_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.work.work);
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ container_of(work, typeof(*dev_priv), psr.work);
- /* We have to make sure PSR is ready for re-enable
+ mutex_lock(&dev_priv->psr.lock);
+
+ if (!dev_priv->psr.enabled)
+ goto unlock;
+
+ /*
+ * We have to make sure PSR is ready for re-enable
* otherwise it keeps disabled until next full enable/disable cycle.
* PSR might take some time to get fully disabled
* and be ready for re-enable.
*/
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.psr2_support) {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR2_STATUS_CTL,
- EDP_PSR2_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR2 Idle for re-enable\n");
- return;
- }
- } else {
- if (intel_wait_for_register(dev_priv,
- EDP_PSR_STATUS_CTL,
- EDP_PSR_STATUS_STATE_MASK,
- 0,
- 50)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
- }
- } else {
- if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) &
- VLV_EDP_PSR_IN_TRANS) == 0, 1)) {
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
- return;
- }
- }
- mutex_lock(&dev_priv->psr.lock);
- intel_dp = dev_priv->psr.enabled;
-
- if (!intel_dp)
+ if (!__psr_wait_for_idle_locked(dev_priv))
goto unlock;
/*
@@ -766,115 +842,38 @@ static void intel_psr_work(struct work_struct *work)
* recheck. Since psr_flush first clears this and then reschedules we
* won't ever miss a flush when bailing out here.
*/
- if (dev_priv->psr.busy_frontbuffer_bits)
+ if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
goto unlock;
- intel_psr_activate(intel_dp);
+ intel_psr_activate(dev_priv->psr.enabled);
unlock:
mutex_unlock(&dev_priv->psr.lock);
}
static void intel_psr_exit(struct drm_i915_private *dev_priv)
{
- struct intel_dp *intel_dp = dev_priv->psr.enabled;
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
u32 val;
if (!dev_priv->psr.active)
return;
- if (HAS_DDI(dev_priv)) {
- if (dev_priv->psr.aux_frame_sync)
- drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
- 0);
- if (dev_priv->psr.psr2_support) {
- val = I915_READ(EDP_PSR2_CTL);
- WARN_ON(!(val & EDP_PSR2_ENABLE));
- I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
- } else {
- val = I915_READ(EDP_PSR_CTL);
- WARN_ON(!(val & EDP_PSR_ENABLE));
- I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
- }
+ if (dev_priv->psr.psr2_enabled) {
+ val = I915_READ(EDP_PSR2_CTL);
+ WARN_ON(!(val & EDP_PSR2_ENABLE));
+ I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE);
} else {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /* Here we do the transition from PSR_state 3 to PSR_state 5
- * directly once PSR State 4 that is active with single frame
- * update can be skipped. PSR_state 5 that is PSR exit then
- * Hardware is responsible to transition back to PSR_state 1
- * that is PSR inactive. Same state after
- * vlv_edp_psr_enable_source.
- */
- val &= ~VLV_EDP_PSR_ACTIVE_ENTRY;
- I915_WRITE(VLV_PSRCTL(pipe), val);
-
- /* Send AUX wake up - Spec says after transitioning to PSR
- * active we have to send AUX wake up by writing 01h in DPCD
- * 600h of sink device.
- * XXX: This might slow down the transition, but without this
- * HW doesn't complete the transition to PSR_state 1 and we
- * never get the screen updated.
- */
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
- DP_SET_POWER_D0);
+ val = I915_READ(EDP_PSR_CTL);
+ WARN_ON(!(val & EDP_PSR_ENABLE));
+ I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE);
}
-
dev_priv->psr.active = false;
}
/**
- * intel_psr_single_frame_update - Single Frame Update
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * Some platforms support a single frame update feature that is used to
- * send and update only one frame on Remote Frame Buffer.
- * So far it is only implemented for Valleyview and Cherryview because
- * hardware requires this to be done before a page flip.
- */
-void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits)
-{
- struct drm_crtc *crtc;
- enum pipe pipe;
- u32 val;
-
- /*
- * Single frame update is already supported on BDW+ but it requires
- * many W/A and it isn't really needed.
- */
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
- return;
-
- mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.enabled) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
-
- crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) {
- val = I915_READ(VLV_PSRCTL(pipe));
-
- /*
- * We need to set this bit before writing registers for a flip.
- * This bit will be self-clear when it gets to the PSR active state.
- */
- I915_WRITE(VLV_PSRCTL(pipe), val | VLV_EDP_PSR_SINGLE_FRAME_UPDATE);
- }
- mutex_unlock(&dev_priv->psr.lock);
-}
-
-/**
* intel_psr_invalidate - Invalidade PSR
* @dev_priv: i915 device
* @frontbuffer_bits: frontbuffer plane tracking bits
- * @origin: which operation caused the invalidated
+ * @origin: which operation caused the invalidate
*
* Since the hardware frontbuffer tracking has gaps we need to integrate
* with the software frontbuffer tracking. This function gets called every
@@ -889,8 +888,10 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
- if (dev_priv->psr.has_hw_tracking &&
- (origin == ORIGIN_FLIP || origin == ORIGIN_CS))
+ if (!CAN_PSR(dev_priv))
+ return;
+
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -930,8 +931,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
struct drm_crtc *crtc;
enum pipe pipe;
- if (dev_priv->psr.has_hw_tracking &&
- (origin == ORIGIN_FLIP || origin == ORIGIN_CS))
+ if (!CAN_PSR(dev_priv))
+ return;
+
+ if (origin == ORIGIN_FLIP)
return;
mutex_lock(&dev_priv->psr.lock);
@@ -947,13 +950,25 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits)
- intel_psr_exit(dev_priv);
+ if (frontbuffer_bits) {
+ if (dev_priv->psr.psr2_enabled) {
+ intel_psr_exit(dev_priv);
+ } else {
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CUR_SURLIVE(pipe), 0);
+ }
+ }
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
- if (!work_busy(&dev_priv->psr.work.work))
- schedule_delayed_work(&dev_priv->psr.work,
- msecs_to_jiffies(100));
+ schedule_work(&dev_priv->psr.work);
mutex_unlock(&dev_priv->psr.lock);
}
@@ -966,16 +981,14 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
*/
void intel_psr_init(struct drm_i915_private *dev_priv)
{
+ if (!HAS_PSR(dev_priv))
+ return;
+
dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
- /* Per platform default */
- if (i915.enable_psr == -1) {
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- i915.enable_psr = 1;
- else
- i915.enable_psr = 0;
- }
+ if (!dev_priv->psr.sink_support)
+ return;
/* Chromeos baytrails were never tested with PSR, so disable it */
if (IS_VALLEYVIEW(dev_priv)) {
@@ -983,29 +996,75 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
i915.enable_psr = 0;
}
+ if (i915.enable_psr == -1) {
+ i915.enable_psr = dev_priv->vbt.psr.enable;
+
+ /* Per platform default: all disabled. */
+ i915.enable_psr = 0;
+ }
+
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
/* HSW and BDW require workarounds that we don't implement. */
dev_priv->psr.link_standby = false;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- /* On VLV and CHV only standby mode is supported. */
- dev_priv->psr.link_standby = true;
- } else {
+ else
/* For new platforms let's respect VBT back again */
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
- dev_priv->psr.has_hw_tracking = true;
+
+ INIT_WORK(&dev_priv->psr.work, intel_psr_work);
+ mutex_init(&dev_priv->psr.lock);
+}
+
+void intel_psr_short_pulse(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_psr *psr = &dev_priv->psr;
+ u8 val;
+ const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
+ DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
+ DP_PSR_LINK_CRC_ERROR;
+
+ if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+ return;
+
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled != intel_dp)
+ goto exit;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_STATUS dpcd read failed\n");
+ goto exit;
}
- /* Override link_standby x link_off defaults */
- if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing link standby\n");
- dev_priv->psr.link_standby = true;
+ if ((val & DP_PSR_SINK_STATE_MASK) == DP_PSR_SINK_INTERNAL_ERROR) {
+ DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ intel_psr_disable_locked(intel_dp);
}
- if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
- DRM_DEBUG_KMS("PSR: Forcing main link off\n");
- dev_priv->psr.link_standby = false;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ERROR_STATUS, &val) != 1) {
+ DRM_ERROR("PSR_ERROR_STATUS dpcd read failed\n");
+ goto exit;
}
- INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
- mutex_init(&dev_priv->psr.lock);
+ if (val & DP_PSR_RFB_STORAGE_ERROR)
+ DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
+ DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ if (val & DP_PSR_LINK_CRC_ERROR)
+ DRM_ERROR("PSR Link CRC error, disabling PSR\n");
+
+ if (val & ~errors)
+ DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
+ val & ~errors);
+ if (val & errors)
+ intel_psr_disable_locked(intel_dp);
+ /* clear status register */
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, val);
+
+ /* TODO: handle PSR2 errors */
+exit:
+ mutex_unlock(&psr->lock);
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 94bf334a063ee1..4fbcb0acff13e9 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -645,19 +645,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
DRM_DEBUG_KMS("Enabling DC6\n");
- gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
+ /* Wa Display #1183: skl,kbl,cfl */
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
+ SKL_SELECT_ALTERNATE_DC_EXIT);
+ gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
void skl_disable_dc6(struct drm_i915_private *dev_priv)
{
DRM_DEBUG_KMS("Disabling DC6\n");
- /* Wa Display #1183: skl,kbl,cfl */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
-
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6eee577d042d25..7c0970606c79ee 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1360,6 +1360,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
u8 val;
bool ret;
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
+
sdvox = I915_READ(intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 038ea12546208d..461f209cd5e695 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -71,8 +71,7 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
/**
* intel_pipe_update_start() - start update of a set of display registers
- * @crtc: the crtc of which the registers are going to be updated
- * @start_vbl_count: vblank counter return pointer used for error checking
+ * @new_crtc_state: the new crtc state
*
* Mark the start of an update to pipe registers that should be updated
* atomically regarding vblank. If the next vblank will happens within
@@ -80,16 +79,17 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
*
* After a successful call to this function, interrupts will be disabled
* until a subsequent call to intel_pipe_update_end(). That is done to
- * avoid random delays. The value written to @start_vbl_count should be
- * supplied to intel_pipe_update_end() for error checking.
+ * avoid random delays.
*/
-void intel_pipe_update_start(struct intel_crtc *crtc)
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
{
- const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ const struct drm_display_mode *adjusted_mode = &new_crtc_state->base.adjusted_mode;
long timeout = msecs_to_jiffies_timeout(1);
int scanline, min, max, vblank_start;
wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
DEFINE_WAIT(wait);
+ u32 psr_status;
vblank_start = adjusted_mode->crtc_vblank_start;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -100,13 +100,22 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
- local_irq_disable();
-
if (min <= 0 || max <= 0)
- return;
+ goto irq_disable;
if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
- return;
+ goto irq_disable;
+
+ /*
+ * Wait for psr to idle out after enabling the VBL interrupts
+ * VBL interrupts will start the PSR exit and prevent a PSR
+ * re-entry as well.
+ */
+ if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
+ DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
+
+ local_irq_disable();
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
@@ -146,19 +155,23 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
trace_i915_pipe_update_vblank_evaded(crtc);
+ return;
+
+irq_disable:
+ local_irq_disable();
}
/**
* intel_pipe_update_end() - end update of a set of display registers
- * @crtc: the crtc of which the registers were updated
- * @start_vbl_count: start vblank counter (used for error checking)
+ * @new_crtc_state: the new crtc state
*
* Mark the end of an update started with intel_pipe_update_start(). This
* re-enables interrupts and verifies the update was actually completed
- * before a vblank using the value of @start_vbl_count.
+ * before a vblank.
*/
-void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state, struct intel_flip_work *work)
{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
@@ -176,14 +189,14 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
* Would be slightly nice to just grab the vblank count and arm the
* event outside of the critical section - the spinlock might spin for a
* while ... */
- if (crtc->base.state->event) {
+ if (new_crtc_state->base.event) {
WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
- drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
+ drm_crtc_arm_vblank_event(&crtc->base, new_crtc_state->base.event);
spin_unlock(&crtc->base.dev->event_lock);
- crtc->base.state->event = NULL;
+ new_crtc_state->base.event = NULL;
}
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d6ce559b1138e8..653b15568eabb1 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -907,6 +907,8 @@ static void
intel_tv_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
+
pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 28c8370ff2b193..09a1accc87a10a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1551,17 +1551,23 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
}
/**
- * intel_wait_for_register_fw - wait until register matches expected state
+ * __intel_wait_for_register_fw - wait until register matches expected state
* @dev_priv: the i915 device
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
- * @timeout_ms: timeout in millisecond
+ * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
+ * @slow_timeout_ms: slow timeout in millisecond
+ * @out_value: optional placeholder to hold registry value
*
* This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- * (I915_READ_FW(@reg) & @mask) == @value
- * Otherwise, the wait will timeout after @timeout_ms milliseconds.
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ * (I915_READ_FW(reg) & mask) == value
+ *
+ * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
+ * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
+ * must be not larger than 20,0000 microseconds.
*
* Note that this routine assumes the caller holds forcewake asserted, it is
* not suitable for very long waits. See intel_wait_for_register() if you
@@ -1570,16 +1576,31 @@ static int gen6_reset_engines(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- const u32 mask,
- const u32 value,
- const unsigned long timeout_ms)
-{
-#define done ((I915_READ_FW(reg) & mask) == value)
- int ret = wait_for_us(done, 2);
+int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
+{
+ u32 reg_value;
+#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+ int ret;
+
+ /* Catch any overuse of this function */
+ might_sleep_if(slow_timeout_ms);
+ GEM_BUG_ON(fast_timeout_us > 20000);
+
+ ret = -ETIMEDOUT;
+ if (fast_timeout_us && fast_timeout_us <= 20000)
+ ret = _wait_for_atomic(done, fast_timeout_us, 0);
if (ret)
- ret = wait_for(done, timeout_ms);
+ ret = wait_for(done, slow_timeout_ms);
+
+ if (out_value)
+ *out_value = reg_value;
+
return ret;
#undef done
}
@@ -1594,8 +1615,10 @@ int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
* @out_value: optional placeholder to hold registry value
*
* This routine waits until the target register @reg contains the expected
- * @value after applying the @mask, i.e. it waits until
- * (I915_READ(@reg) & @mask) == @value
+ * @value after applying the @mask, i.e. it waits until ::
+ *
+ * (I915_READ(reg) & mask) == value
+ *
* Otherwise, the wait will timeout after @timeout_ms milliseconds.
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
@@ -1607,15 +1630,23 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
const unsigned long timeout_ms,
u32 *out_value)
{
-
unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
u32 reg_value;
int ret;
- intel_uncore_forcewake_get(dev_priv, fw);
- ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
- intel_uncore_forcewake_put(dev_priv, fw);
+ might_sleep();
+
+ spin_lock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_get__locked(dev_priv, fw);
+
+ ret = __intel_wait_for_register_fw(dev_priv,
+ reg, mask, value,
+ 2, 0, &reg_value);
+
+ intel_uncore_forcewake_put__locked(dev_priv, fw);
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
if (ret)
ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
(reg_value & mask) == value,
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index ee45e5714d59fb..e3f820362e9fea 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -149,16 +149,19 @@ struct bdb_general_features {
u8 ssc_freq:1;
u8 enable_lfp_on_override:1;
u8 disable_ssc_ddt:1;
- u8 rsvd7:1;
+ u8 underscan_vga_timings:1;
u8 display_clock_mode:1;
- u8 rsvd8:1; /* finish byte */
+ u8 vbios_hotplug_support:1;
/* bits 3 */
u8 disable_smooth_vision:1;
u8 single_dvi:1;
- u8 rsvd9:1;
+ u8 rotate_180:1; /* 181 */
u8 fdi_rx_polarity_inverted:1;
- u8 rsvd10:4; /* finish byte */
+ u8 vbios_extended_mode:1; /* 160 */
+ u8 copy_ilfp_dtd_to_sdvo_lvds_dtd:1; /* 160 */
+ u8 panel_best_fit_timing:1; /* 160 */
+ u8 ignore_strap_state:1; /* 160 */
/* bits 4 */
u8 legacy_monitor_detect;
@@ -167,9 +170,10 @@ struct bdb_general_features {
u8 int_crt_support:1;
u8 int_tv_support:1;
u8 int_efp_support:1;
- u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */
+ u8 dp_ssc_enable:1; /* PCH attached eDP supports SSC */
u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */
- u8 rsvd11:3; /* finish byte */
+ u8 dp_ssc_dongle_supported:1;
+ u8 rsvd11:2; /* finish byte */
} __packed;
/* pre-915 */
@@ -206,6 +210,56 @@ struct bdb_general_features {
#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
+/* Add the device class for LFP, TV, HDMI */
+#define DEVICE_TYPE_INT_LFP 0x1022
+#define DEVICE_TYPE_INT_TV 0x1009
+#define DEVICE_TYPE_HDMI 0x60D2
+#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
+#define DEVICE_TYPE_eDP 0x78C6
+
+#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
+#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
+#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
+#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
+#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
+#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
+#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
+#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
+#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
+#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
+#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
+#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
+#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
+#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
+#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
+
+/*
+ * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the
+ * system, the other bits may or may not be set for eDP outputs.
+ */
+#define DEVICE_TYPE_eDP_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_DUAL_CHANNEL | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
#define DEVICE_CFG_NONE 0x00
#define DEVICE_CFG_12BIT_DVOB 0x01
#define DEVICE_CFG_12BIT_DVOC 0x02
@@ -226,77 +280,128 @@ struct bdb_general_features {
#define DEVICE_WIRE_DVOB_MASTER 0x0d
#define DEVICE_WIRE_DVOC_MASTER 0x0e
+/* dvo_port pre BDB 155 */
#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
#define DEVICE_PORT_DVOB 0x01
#define DEVICE_PORT_DVOC 0x02
+/* dvo_port BDB 155+ */
+#define DVO_PORT_HDMIA 0
+#define DVO_PORT_HDMIB 1
+#define DVO_PORT_HDMIC 2
+#define DVO_PORT_HDMID 3
+#define DVO_PORT_LVDS 4
+#define DVO_PORT_TV 5
+#define DVO_PORT_CRT 6
+#define DVO_PORT_DPB 7
+#define DVO_PORT_DPC 8
+#define DVO_PORT_DPD 9
+#define DVO_PORT_DPA 10
+#define DVO_PORT_DPE 11 /* 193 */
+#define DVO_PORT_HDMIE 12 /* 193 */
+#define DVO_PORT_MIPIA 21 /* 171 */
+#define DVO_PORT_MIPIB 22 /* 171 */
+#define DVO_PORT_MIPIC 23 /* 171 */
+#define DVO_PORT_MIPID 24 /* 171 */
+
+#define LEGACY_CHILD_DEVICE_CONFIG_SIZE 33
+
/*
- * We used to keep this struct but without any version control. We should avoid
- * using it in the future, but it should be safe to keep using it in the old
- * code. Do not change; we rely on its size.
+ * The child device config, aka the display device data structure, provides a
+ * description of a port and its configuration on the platform.
+ *
+ * The child device config size has been increased, and fields have been added
+ * and their meaning has changed over time. Care must be taken when accessing
+ * basically any of the fields to ensure the correct interpretation for the BDB
+ * version in question.
+ *
+ * When we copy the child device configs to dev_priv->vbt.child_dev, we reserve
+ * space for the full structure below, and initialize the tail not actually
+ * present in VBT to zeros. Accessing those fields is fine, as long as the
+ * default zero is taken into account, again according to the BDB version.
+ *
+ * BDB versions 155 and below are considered legacy, and version 155 seems to be
+ * a baseline for some of the VBT documentation. When adding new fields, please
+ * include the BDB version when the field was added, if it's above that.
*/
-struct old_child_dev_config {
+struct child_device_config {
u16 handle;
- u16 device_type;
- u8 device_id[10]; /* ascii string */
- u16 addin_offset;
- u8 dvo_port; /* See Device_PORT_* above */
- u8 i2c_pin;
- u8 slave_addr;
- u8 ddc_pin;
- u16 edid_ptr;
- u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 dvo2_port;
- u8 i2c2_pin;
- u8 slave2_addr;
- u8 ddc2_pin;
- u8 capabilities;
- u8 dvo_wiring;/* See DEVICE_WIRE_* above */
- u8 dvo2_wiring;
- u16 extended_type;
- u8 dvo_function;
-} __packed;
+ u16 device_type; /* See DEVICE_TYPE_* above */
+
+ union {
+ u8 device_id[10]; /* ascii string */
+ struct {
+ u8 i2c_speed;
+ u8 dp_onboard_redriver; /* 158 */
+ u8 dp_ondock_redriver; /* 158 */
+ u8 hdmi_level_shifter_value:5; /* 169 */
+ u8 hdmi_max_data_rate:3; /* 204 */
+ u16 dtd_buf_ptr; /* 161 */
+ u8 edidless_efp:1; /* 161 */
+ u8 compression_enable:1; /* 198 */
+ u8 compression_method:1; /* 198 */
+ u8 ganged_edp:1; /* 202 */
+ u8 reserved0:4;
+ u8 compression_structure_index:4; /* 198 */
+ u8 reserved1:4;
+ u8 slave_port; /* 202 */
+ u8 reserved2;
+ } __packed;
+ } __packed;
-/* This one contains field offsets that are known to be common for all BDB
- * versions. Notice that the meaning of the contents contents may still change,
- * but at least the offsets are consistent. */
-
-struct common_child_dev_config {
- u16 handle;
- u16 device_type;
- u8 not_common1[12];
- u8 dvo_port;
- u8 not_common2[2];
+ u16 addin_offset;
+ u8 dvo_port; /* See DEVICE_PORT_* and DVO_PORT_* above */
+ u8 i2c_pin;
+ u8 slave_addr;
u8 ddc_pin;
u16 edid_ptr;
u8 dvo_cfg; /* See DEVICE_CFG_* above */
- u8 efp_routed:1;
- u8 lane_reversal:1;
- u8 lspcon:1;
- u8 iboost:1;
- u8 hpd_invert:1;
- u8 flag_reserved:3;
- u8 hdmi_support:1;
- u8 dp_support:1;
- u8 tmds_support:1;
- u8 support_reserved:5;
- u8 aux_channel;
- u8 not_common3[11];
- u8 iboost_level;
-} __packed;
+ union {
+ struct {
+ u8 dvo2_port;
+ u8 i2c2_pin;
+ u8 slave2_addr;
+ u8 ddc2_pin;
+ } __packed;
+ struct {
+ u8 efp_routed:1; /* 158 */
+ u8 lane_reversal:1; /* 184 */
+ u8 lspcon:1; /* 192 */
+ u8 iboost:1; /* 196 */
+ u8 hpd_invert:1; /* 196 */
+ u8 flag_reserved:3;
+ u8 hdmi_support:1; /* 158 */
+ u8 dp_support:1; /* 158 */
+ u8 tmds_support:1; /* 158 */
+ u8 support_reserved:5;
+ u8 aux_channel;
+ u8 dongle_detect;
+ } __packed;
+ } __packed;
+
+ u8 pipe_cap:2;
+ u8 sdvo_stall:1; /* 158 */
+ u8 hpd_status:2;
+ u8 integrated_encoder:1;
+ u8 capabilities_reserved:2;
+ u8 dvo_wiring; /* See DEVICE_WIRE_* above */
+
+ union {
+ u8 dvo2_wiring;
+ u8 mipi_bridge_type; /* 171 */
+ } __packed;
-/* This field changes depending on the BDB version, so the most reliable way to
- * read it is by checking the BDB version and reading the raw pointer. */
-union child_device_config {
- /* This one is safe to be used anywhere, but the code should still check
- * the BDB version. */
- u8 raw[33];
- /* This one should only be kept for legacy code. */
- struct old_child_dev_config old;
- /* This one should also be safe to use anywhere, even without version
- * checks. */
- struct common_child_dev_config common;
+ u16 extended_type;
+ u8 dvo_function;
+ u8 dp_usb_type_c:1; /* 195 */
+ u8 flags2_reserved:7; /* 195 */
+ u8 dp_gpio_index; /* 195 */
+ u16 dp_gpio_pin_num; /* 195 */
+ u8 dp_iboost_level:4; /* 196 */
+ u8 hdmi_iboost_level:4; /* 196 */
+ u8 dp_max_link_rate:2; /* 216 CNL+ */
+ u8 dp_max_link_rate_reserved:6; /* 216 */
} __packed;
struct bdb_general_definitions {
@@ -576,23 +681,38 @@ struct bdb_driver_features {
#define EDP_VSWING_1_2V 3
-struct edp_link_params {
+struct edp_fast_link_params {
u8 rate:4;
u8 lanes:4;
u8 preemphasis:4;
u8 vswing:4;
} __packed;
+struct edp_pwm_delays {
+ u16 pwm_on_to_backlight_enable;
+ u16 backlight_disable_to_pwm_off;
+} __packed;
+
+struct edp_full_link_params {
+ u8 preemphasis:4;
+ u8 vswing:4;
+} __packed;
+
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
- struct edp_link_params link_params[16];
+ struct edp_fast_link_params fast_link_params[16];
u32 sdrrs_msa_timing_delay;
/* ith bit indicates enabled/disabled for (i+1)th panel */
- u16 edp_s3d_feature;
- u16 edp_t3_optimization;
- u64 edp_vswing_preemph; /* v173 */
+ u16 edp_s3d_feature; /* 162 */
+ u16 edp_t3_optimization; /* 165 */
+ u64 edp_vswing_preemph; /* 173 */
+ u16 fast_link_training; /* 182 */
+ u16 dpcd_600h_write_required; /* 185 */
+ struct edp_pwm_delays pwm_delays[16]; /* 186 */
+ u16 full_link_params_provided; /* 199 */
+ struct edp_full_link_params full_link_params[16]; /* 199 */
} __packed;
struct psr_table {
@@ -736,81 +856,6 @@ struct bdb_psr {
#define SWF14_APM_STANDBY 0x1
#define SWF14_APM_RESTORE 0x0
-/* Add the device class for LFP, TV, HDMI */
-#define DEVICE_TYPE_INT_LFP 0x1022
-#define DEVICE_TYPE_INT_TV 0x1009
-#define DEVICE_TYPE_HDMI 0x60D2
-#define DEVICE_TYPE_DP 0x68C6
-#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
-#define DEVICE_TYPE_eDP 0x78C6
-
-#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
-#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14)
-#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13)
-#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12)
-#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11)
-#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10)
-#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9)
-#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8)
-#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6)
-#define DEVICE_TYPE_LVDS_SINGALING (1 << 5)
-#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4)
-#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3)
-#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2)
-#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1)
-#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0)
-
-/*
- * Bits we care about when checking for DEVICE_TYPE_eDP
- * Depending on the system, the other bits may or may not
- * be set for eDP outputs.
- */
-#define DEVICE_TYPE_eDP_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_DUAL_CHANNEL | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
- (DEVICE_TYPE_INTERNAL_CONNECTOR | \
- DEVICE_TYPE_MIPI_OUTPUT | \
- DEVICE_TYPE_COMPOSITE_OUTPUT | \
- DEVICE_TYPE_LVDS_SINGALING | \
- DEVICE_TYPE_TMDS_DVI_SIGNALING | \
- DEVICE_TYPE_VIDEO_SIGNALING | \
- DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
- DEVICE_TYPE_DIGITAL_OUTPUT | \
- DEVICE_TYPE_ANALOG_OUTPUT)
-
-/* define the DVO port for HDMI output type */
-#define DVO_B 1
-#define DVO_C 2
-#define DVO_D 3
-
-/* Possible values for the "DVO Port" field for versions >= 155: */
-#define DVO_PORT_HDMIA 0
-#define DVO_PORT_HDMIB 1
-#define DVO_PORT_HDMIC 2
-#define DVO_PORT_HDMID 3
-#define DVO_PORT_LVDS 4
-#define DVO_PORT_TV 5
-#define DVO_PORT_CRT 6
-#define DVO_PORT_DPB 7
-#define DVO_PORT_DPC 8
-#define DVO_PORT_DPD 9
-#define DVO_PORT_DPA 10
-#define DVO_PORT_DPE 11
-#define DVO_PORT_HDMIE 12
-#define DVO_PORT_MIPIA 21
-#define DVO_PORT_MIPIB 22
-#define DVO_PORT_MIPIC 23
-#define DVO_PORT_MIPID 24
-
/* Block 52 contains MIPI configuration block
* 6 * bdb_mipi_config, followed by 6 pps data block
* block below
diff --git a/drivers/gpu/drm/img-rogue/1.10/Makefile b/drivers/gpu/drm/img-rogue/1.10/Makefile
new file mode 100644
index 00000000000000..1a4e5c2e506ea4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/Makefile
@@ -0,0 +1,15 @@
+img_basedir := drivers/gpu/drm/img-rogue/1.10
+include $(img_basedir)/config_kernel.mk
+
+obj-$(CONFIG_DRM_POWERVR_ROGUE_1_10) += pvrsrvkm_1_10.o
+
+ccflags-y += \
+ -include config_kernel.h \
+ -Iinclude/drm \
+ -I$(img_basedir) \
+ -I$(img_basedir)/km \
+ -I$(img_basedir)/system \
+ -D__linux__
+
+include $(img_basedir)/pvrsrvkm.mk
+include $(img_basedir)/mt8173/Makefile
diff --git a/drivers/gpu/drm/img-rogue/1.10/allocmem.c b/drivers/gpu/drm/img-rogue/1.10/allocmem.c
new file mode 100644
index 00000000000000..bd0a5aed40d42e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/allocmem.c
@@ -0,0 +1,455 @@
+/*************************************************************************/ /*!
+@File
+@Title Host memory management implementation for Linux
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "osfunc.h"
+
+#if defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#define ALLOCMEM_MEMSTATS_PADDING 0
+#else
+#define ALLOCMEM_MEMSTATS_PADDING sizeof(IMG_UINT32)
+#endif
+
+static inline void _pvr_vfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+ /* Size harder to come by for vmalloc and since vmalloc allocates
+ * a whole number of pages, poison the minimum size known to have
+ * been allocated.
+ */
+ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+ PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD);
+#endif
+ vfree(pvAddr);
+}
+
+static inline void _pvr_kfree(const void* pvAddr)
+{
+#if defined(DEBUG)
+ /* Poison whole memory block */
+ OSCachedMemSet((void*)pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+ ksize(pvAddr));
+#endif
+ kfree(pvAddr);
+}
+
+#if !defined(PVRSRV_ENABLE_PROCESS_STATS)
+void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMem)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if (!is_vmalloc_addr(pvMem))
+ {
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+ _pvr_vfree(pvMem);
+ }
+ }
+}
+#else
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+
+ if (!is_vmalloc_addr(pvRet))
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ OSGetCurrentClientProcessIDKM(),
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ OSGetCurrentClientProcessIDKM(),
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ }
+ return pvRet;
+}
+
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+ if (!is_vmalloc_addr(pvRet))
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ OSGetCurrentClientProcessIDKM(),
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ OSGetCurrentClientProcessIDKM(),
+ pvAllocFromFile,
+ ui32AllocFromLine);
+ }
+ }
+ return pvRet;
+}
+#else
+void *OSAllocMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ /* Allocate an additional 4 bytes to store the PID of the allocating process */
+ pvRet = kmalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+
+ if (!is_vmalloc_addr(pvRet))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ /* Store the PID in the final additional 4 bytes allocated */
+ IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+ *puiTemp = OSGetCurrentProcessID();
+ }
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet), OSGetCurrentClientProcessIDKM());
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ (IMG_UINT64)(uintptr_t) pvRet,
+ OSGetCurrentClientProcessIDKM());
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ }
+ }
+ return pvRet;
+}
+
+void *OSAllocZMem(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if ((ui32Size + ALLOCMEM_MEMSTATS_PADDING) > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ /* Allocate an additional 4 bytes to store the PID of the allocating process */
+ pvRet = kzalloc(ui32Size + ALLOCMEM_MEMSTATS_PADDING, GFP_KERNEL);
+ }
+
+ if (pvRet != NULL)
+ {
+ if (!is_vmalloc_addr(pvRet))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ /* Store the PID in the final additional 4 bytes allocated */
+ IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvRet) + (ksize(pvRet) - ALLOCMEM_MEMSTATS_PADDING));
+ *puiTemp = OSGetCurrentProcessID();
+ }
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, ksize(pvRet), OSGetCurrentClientProcessIDKM());
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ksize(pvRet),
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ (IMG_UINT64)(uintptr_t) pvRet,
+ OSGetCurrentClientProcessIDKM());
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ pvRet,
+ sCpuPAddr,
+ ((ui32Size + PAGE_SIZE -1) & ~(PAGE_SIZE-1)),
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ }
+ }
+ return pvRet;
+}
+#endif
+
+/*
+ * The parentheses around OSFreeMem prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMem)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if (!is_vmalloc_addr(pvMem))
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ IMG_UINT32 *puiTemp = (IMG_UINT32*) (((IMG_BYTE*)pvMem) + (ksize(pvMem) - ALLOCMEM_MEMSTATS_PADDING));
+ PVRSRVStatsDecrMemKAllocStat(ksize(pvMem), *puiTemp);
+ }
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_KMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMALLOC,
+ (IMG_UINT64)(uintptr_t) pvMem,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ _pvr_vfree(pvMem);
+ }
+ }
+}
+#endif
+
+
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vmalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kmalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size)
+{
+ void *pvRet = NULL;
+
+ if (ui32Size > PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD)
+ {
+ pvRet = vzalloc(ui32Size);
+ }
+ if (pvRet == NULL)
+ {
+ pvRet = kzalloc(ui32Size, GFP_KERNEL);
+ }
+
+ return pvRet;
+}
+
+/*
+ * The parentheses around OSFreeMemNoStats prevent the macro in allocmem.h from
+ * applying, as it would break the function's definition.
+ */
+void (OSFreeMemNoStats)(void *pvMem)
+{
+ if (pvMem != NULL)
+ {
+ if ( !is_vmalloc_addr(pvMem) )
+ {
+ _pvr_kfree(pvMem);
+ }
+ else
+ {
+ _pvr_vfree(pvMem);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/allocmem.h b/drivers/gpu/drm/img-rogue/1.10/allocmem.h
new file mode 100644
index 00000000000000..decef9925c983b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/allocmem.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File allocmem.h
+@Title memory allocation header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory-Allocation API definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __ALLOCMEM_H__
+#define __ALLOCMEM_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) || !defined(DEBUG) || !defined(PVRSRV_ENABLE_PROCESS_STATS) || !defined(PVRSRV_ENABLE_MEMORY_STATS)
+/**************************************************************************/ /*!
+@Function OSAllocMem
+@Description Allocates CPU memory. Contents are uninitialized.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMem(IMG_UINT32 ui32Size);
+/**************************************************************************/ /*!
+@Function OSAllocZMem
+@Description Allocates CPU memory and initializes the contents to zero.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMem(IMG_UINT32 ui32Size);
+#else
+void *_OSAllocMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+void *_OSAllocZMem(IMG_UINT32 ui32Size, void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#define OSAllocMem(_size) \
+ _OSAllocMem ((_size), (__FILE__), (__LINE__));
+#define OSAllocZMem(_size) \
+ _OSAllocZMem ((_size), (__FILE__), (__LINE__));
+#endif
+
+/**************************************************************************/ /*!
+@Function OSAllocMemNoStats
+@Description Allocates CPU memory. Contents are uninitialized.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+ The allocated memory is not accounted for by process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSAllocMem() and OSAllocMemNoStats() equate to
+ the same operation.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function OSAllocZMemNoStats
+@Description Allocates CPU memory and initializes the contents to zero.
+ If passed a size of zero, function should not assert,
+ but just return a NULL pointer.
+ The allocated memory is not accounted for by process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSAllocZMem() and OSAllocZMemNoStats() equate to
+ the same operation.
+@Input ui32Size Size of required allocation (in bytes)
+@Return Pointer to allocated memory on success.
+ Otherwise NULL.
+ */ /**************************************************************************/
+void *OSAllocZMemNoStats(IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+@Function OSFreeMem
+@Description Frees previously allocated CPU memory.
+@Input pvCpuVAddr Pointer to the memory to be freed.
+@Return None.
+ */ /**************************************************************************/
+void OSFreeMem(void *pvCpuVAddr);
+
+/**************************************************************************/ /*!
+@Function OSFreeMemNoStats
+@Description Frees previously allocated CPU memory.
+ The freed memory does not update the figures in process stats.
+ Process stats are an optional feature (enabled only when
+ PVRSRV_ENABLE_PROCESS_STATS is defined) which track the amount
+ of memory allocated to help in debugging. Where this is not
+ required, OSFreeMem() and OSFreeMemNoStats() equate to the
+ same operation.
+@Input pvCpuVAddr Pointer to the memory to be freed.
+@Return None.
+ */ /**************************************************************************/
+void OSFreeMemNoStats(void *pvCpuVAddr);
+
+/*
+ * These macros allow us to catch double-free bugs on DEBUG builds and
+ * prevent crashes on RELEASE builds.
+ */
+
+#if defined(DEBUG)
+#define double_free_sentinel (void*) &OSFreeMem
+#define ALLOCMEM_ASSERT(exp) PVR_ASSERT(exp)
+#else
+#define double_free_sentinel NULL
+#define ALLOCMEM_ASSERT(exp) do {} while(0)
+#endif
+
+#define OSFreeMem(_ptr) do { \
+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+ (OSFreeMem)(_ptr); \
+ (_ptr) = double_free_sentinel; \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#define OSFreeMemNoStats(_ptr) do { \
+ ALLOCMEM_ASSERT((_ptr) != double_free_sentinel); \
+ (OSFreeMemNoStats)(_ptr); \
+ (_ptr) = double_free_sentinel; \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __ALLOCMEM_H__ */
+
+/******************************************************************************
+ End of file (allocmem.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/cache_km.c b/drivers/gpu/drm/img-rogue/1.10/cache_km.c
new file mode 100644
index 00000000000000..0efcf7568ac28b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/cache_km.c
@@ -0,0 +1,3523 @@
+/*************************************************************************/ /*!
+@File cache_km.c
+@Title CPU d-cache maintenance operations framework
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements server side code for CPU d-cache maintenance taking
+ into account the idiosyncrasies of the various types of CPU
+ d-cache instruction-set architecture (ISA) maintenance
+ mechanisms.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(LINUX)
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#endif
+
+#include "pmr.h"
+#include "log2.h"
+#include "device.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "pvr_debug.h"
+#include "lock_types.h"
+#include "allocmem.h"
+#include "process_stats.h"
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+#include "ri_server.h"
+#endif
+#include "devicemem.h"
+#include "pvrsrv_apphint.h"
+#include "pvrsrv_sync_server.h"
+#include "km_apphint_defs.h"
+
+/* This header must always be included last */
+#if defined(LINUX)
+#include "kernel_compatibility.h"
+#endif
+
+/* Top-level file-local build definitions */
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS) && defined(LINUX)
+#define CACHEOP_DEBUG
+#define CACHEOP_STATS_ITEMS_MAX 32
+#define INCR_WRAP(x) ((x+1) >= CACHEOP_STATS_ITEMS_MAX ? 0 : (x+1))
+#define DECR_WRAP(x) ((x-1) < 0 ? (CACHEOP_STATS_ITEMS_MAX-1) : (x-1))
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+/* Refer to CacheOpStatsExecLogHeader() for header item names */
+#define CACHEOP_RI_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-16s %-16s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_RI_PRINTF "%-8d %-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#else
+#define CACHEOP_PRINTF_HEADER "%-8s %-10s %-10s %-5s %-10s %-10s %-18s %-18s %-12s"
+#define CACHEOP_PRINTF "%-8d %-10s %-10s %-5s 0x%-8llx 0x%-8llx %-18llu %-18llu 0x%-10x\n"
+#endif
+#endif
+
+//#define CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING /* Force OS page (not cache line) flush granularity */
+#define CACHEOP_PVR_ASSERT(x) /* Define as PVR_ASSERT(x), enable for swdev & testing */
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define CACHEOP_THREAD_WAIT_TIMEOUT 0ULL /* Wait indefinitely */
+#else
+#define CACHEOP_THREAD_WAIT_TIMEOUT 500000ULL /* Wait 500ms between wait unless woken-up on demand */
+#endif
+#define CACHEOP_FENCE_WAIT_TIMEOUT 1000ULL /* Wait 1ms between wait events unless woken-up */
+#define CACHEOP_FENCE_RETRY_ABORT 1000ULL /* Fence retries that aborts fence operation */
+#define CACHEOP_SEQ_MIDPOINT (IMG_UINT32) 0x7FFFFFFF /* Where seqNum(s) are rebase, compared at */
+#define CACHEOP_ABORT_FENCE_ERROR_STRING "detected stalled client, retrying cacheop fence"
+#define CACHEOP_NO_GFLUSH_ERROR_STRING "global flush requested on CPU without support"
+#define CACHEOP_DEVMEM_OOR_ERROR_STRING "cacheop device memory request is out of range"
+#define CACHEOP_MAX_DEBUG_MESSAGE_LEN 160
+
+typedef struct _CACHEOP_WORK_ITEM_
+{
+ PMR *psPMR;
+ IMG_UINT32 ui32GFSeqNum;
+ IMG_UINT32 ui32OpSeqNum;
+ IMG_DEVMEM_SIZE_T uiSize;
+ PVRSRV_CACHE_OP uiCacheOp;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ PVRSRV_TIMELINE iTimeline;
+ SYNC_TIMELINE_OBJ sSWTimelineObj;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 ui64EnqueuedTime;
+ IMG_UINT64 ui64DequeuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bDeferred;
+ IMG_BOOL bKMReq;
+ IMG_BOOL bRBF;
+ IMG_BOOL bUMF;
+ IMG_PID pid;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ RGXFWIF_DM eFenceOpType;
+#endif
+#endif
+} CACHEOP_WORK_ITEM;
+
+typedef struct _CACHEOP_STATS_EXEC_ITEM_
+{
+ IMG_PID pid;
+ IMG_UINT32 ui32OpSeqNum;
+ PVRSRV_CACHE_OP uiCacheOp;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT64 ui64EnqueuedTime;
+ IMG_UINT64 ui64DequeuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bIsFence;
+ IMG_BOOL bKMReq;
+ IMG_BOOL bRBF;
+ IMG_BOOL bUMF;
+ IMG_BOOL bDeferred;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ RGXFWIF_DM eFenceOpType;
+#endif
+} CACHEOP_STATS_EXEC_ITEM;
+
+typedef enum _CACHEOP_CONFIG_
+{
+ CACHEOP_CONFIG_DEFAULT = 0,
+ /* cache flush mechanism types */
+ CACHEOP_CONFIG_KRBF = 1,
+ CACHEOP_CONFIG_KGF = 2,
+ CACHEOP_CONFIG_URBF = 4,
+ /* sw-emulated deferred flush mechanism */
+ CACHEOP_CONFIG_KDF = 8,
+ /* pseudo configuration items */
+ CACHEOP_CONFIG_LAST = 16,
+ CACHEOP_CONFIG_KLOG = 16,
+ CACHEOP_CONFIG_ALL = 31
+} CACHEOP_CONFIG;
+
+typedef struct _CACHEOP_WORK_QUEUE_
+{
+/*
+ * Init. state & primary device node framework
+ * is anchored on.
+ */
+ IMG_BOOL bInit;
+/*
+ MMU page size/shift & d-cache line size
+ */
+ size_t uiPageSize;
+ IMG_UINT32 uiLineSize;
+ IMG_UINT32 uiLineShift;
+ IMG_UINT32 uiPageShift;
+ PVRSRV_CACHE_OP_ADDR_TYPE uiCacheOpAddrType;
+/*
+ CacheOp deferred queueing protocol
+ + Implementation geared for performance, atomic counter based
+ - Value Space is 0 -> 1 -> 2 -> 3 -> 4 -> 5 -> 6 -> 7 -> 8 -> n.
+ - Index Space is 0 -> 1 -> 2 -> 3 -> 0 -> 1 -> 2 -> 3 -> 0 -> m.
+ - Index = Value modulo CACHEOP_INDICES_LOG2_SIZE.
+ + Write counter never collides with read counter in index space
+ - Unless at start of day when both are initialised to zero.
+ - This means we sacrifice one entry when the queue is full.
+ - Incremented by producer
+ - Value space tracks total number of CacheOps queued.
+ - Index space identifies CacheOp CCB queue index.
+ + Read counter increments towards write counter in value space
+ - Empty queue occurs when read equals write counter.
+ - Wrap-round logic handled by consumer as/when needed.
+ - Incremented by consumer
+ - Value space tracks total # of CacheOps executed.
+ - Index space identifies CacheOp CCB queue index.
+ + Total queued size adjusted up/down during write/read activity
+ - Counter might overflow but does not compromise framework.
+ */
+ ATOMIC_T hReadCounter;
+ ATOMIC_T hWriteCounter;
+/*
+ CacheOp sequence numbers
+ + hCommonSeqNum:
+ - Common sequence, numbers every CacheOp operation in both UM/KM.
+ - In KM
+ - Every deferred CacheOp (on behalf of UM) gets a unique seqNum.
+ - Last executed deferred CacheOp updates gsCwq.hCompletedSeqNum.
+ - Every GF operation (if supported) also gets a unique seqNum.
+ - Last executed GF operation updates CACHEOP_INFO_GFSEQNUM0.
+ - Under debug, all CacheOp gets a unique seqNum for tracking.
+ - This includes all UM/KM synchronous non-deferred CacheOp(s)
+ - In UM
+ - If the processor architecture supports GF maintenance (in KM)
+ - All UM CacheOp samples CACHEOP_INFO_GFSEQNUM0 via info. page.
+ - CacheOp(s) discarded if another GF occurs before execution.
+ - CacheOp(s) discarding happens in both UM and KM space.
+ + hCompletedSeqNum:
+ - Tracks last executed KM/deferred RBF/Global<timeline> CacheOp(s)
+ + hDeferredSize:
+ - Running total of size of currently deferred CacheOp in queue.
+ */
+ ATOMIC_T hDeferredSize;
+ ATOMIC_T hCommonSeqNum;
+ ATOMIC_T hCompletedSeqNum;
+/*
+ CacheOp information page
+ + psInfoPageMemDesc:
+ - Single system-wide OS page that is multi-mapped in UM/KM.
+ - Mapped into clients using read-only memory protection.
+ - Mapped into server using read/write memory protection.
+ - Contains information pertaining to cache framework.
+ + pui32InfoPage:
+ - Server linear address pointer to said information page.
+ - Each info-page entry currently of sizeof(IMG_UINT32).
+ */
+ PMR *psInfoPagePMR;
+ IMG_UINT32 *pui32InfoPage;
+ DEVMEM_MEMDESC *psInfoPageMemDesc;
+/*
+ CacheOp deferred work-item queue
+ + CACHEOP_INDICES_LOG2_SIZE
+ - Sized using GF/RBF ratio
+ */
+#define CACHEOP_INDICES_LOG2_SIZE (4)
+#define CACHEOP_INDICES_MAX (1 << CACHEOP_INDICES_LOG2_SIZE)
+#define CACHEOP_INDICES_MASK (CACHEOP_INDICES_MAX-1)
+ CACHEOP_WORK_ITEM asWorkItems[CACHEOP_INDICES_MAX];
+#if defined(CACHEOP_DEBUG)
+/*
+ CacheOp statistics
+ */
+ void *pvStatsEntry;
+ IMG_HANDLE hStatsExecLock;
+ IMG_UINT32 ui32ServerASync;
+ IMG_UINT32 ui32ServerSyncVA;
+ IMG_UINT32 ui32ServerSync;
+ IMG_UINT32 ui32ServerRBF;
+ IMG_UINT32 ui32ServerGF;
+ IMG_UINT32 ui32ServerDGF;
+ IMG_UINT32 ui32ServerDTL;
+ IMG_UINT32 ui32ClientSync;
+ IMG_UINT32 ui32ClientRBF;
+ IMG_UINT32 ui32KMDiscards;
+ IMG_UINT32 ui32UMDiscards;
+ IMG_UINT32 ui32TotalFenceOps;
+ IMG_UINT32 ui32TotalExecOps;
+ IMG_UINT32 ui32AvgExecTime;
+ IMG_UINT32 ui32AvgFenceTime;
+ IMG_INT32 i32StatsExecWriteIdx;
+ CACHEOP_STATS_EXEC_ITEM asStatsExecuted[CACHEOP_STATS_ITEMS_MAX];
+#endif
+/*
+ CacheOp (re)configuration
+ */
+ void *pvConfigTune;
+ IMG_HANDLE hConfigLock;
+/*
+ CacheOp deferred worker thread
+ + eConfig
+ - Runtime configuration
+ + hWorkerThread
+ - CacheOp thread handler
+ + hThreadWakeUpEvtObj
+ - Event object to drive CacheOp worker thread sleep/wake-ups.
+ + hClientWakeUpEvtObj
+ - Event object to unblock stalled clients waiting on queue.
+ + uiWorkerThreadPid
+ - CacheOp thread process id
+ */
+ CACHEOP_CONFIG eConfig;
+ IMG_UINT32 ui32Config;
+ IMG_BOOL bConfigTuning;
+ IMG_HANDLE hWorkerThread;
+ IMG_HANDLE hDeferredLock;
+ IMG_HANDLE hGlobalFlushLock;
+ IMG_PID uiWorkerThreadPid;
+ IMG_HANDLE hThreadWakeUpEvtObj;
+ IMG_HANDLE hClientWakeUpEvtObj;
+ IMG_UINT32 ui32FenceWaitTimeUs;
+ IMG_UINT32 ui32FenceRetryAbort;
+ IMG_BOOL bNoGlobalFlushImpl;
+ IMG_BOOL bSupportsUMFlush;
+} CACHEOP_WORK_QUEUE;
+
+/* Top-level CacheOp framework object */
+static CACHEOP_WORK_QUEUE gsCwq;
+
+#define CacheOpConfigSupports(e) ((gsCwq.eConfig & (e)) ? IMG_TRUE : IMG_FALSE)
+
+static INLINE IMG_UINT32 CacheOpIdxRead(ATOMIC_T *phCounter)
+{
+ IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+ return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxIncrement(ATOMIC_T *phCounter)
+{
+ IMG_UINT32 ui32Idx = OSAtomicIncrement(phCounter);
+ return ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxNext(ATOMIC_T *phCounter)
+{
+ IMG_UINT32 ui32Idx = OSAtomicRead(phCounter);
+ return ++ui32Idx & CACHEOP_INDICES_MASK;
+}
+
+static INLINE IMG_UINT32 CacheOpIdxSpan(ATOMIC_T *phLhs, ATOMIC_T *phRhs)
+{
+ return OSAtomicRead(phLhs) - OSAtomicRead(phRhs);
+}
+
+static INLINE IMG_UINT64 DivBy10(IMG_UINT64 uiNum)
+{
+ IMG_UINT64 uiQuot;
+ IMG_UINT64 uiRem;
+
+ uiQuot = (uiNum >> 1) + (uiNum >> 2);
+ uiQuot = uiQuot + (uiQuot >> 4);
+ uiQuot = uiQuot + (uiQuot >> 8);
+ uiQuot = uiQuot + (uiQuot >> 16);
+ uiQuot = uiQuot >> 3;
+ uiRem = uiNum - (((uiQuot << 2) + uiQuot) << 1);
+
+ return uiQuot + (uiRem > 9);
+}
+
+/* Callback to dump info of cacheop thread in debug_dump */
+static void CacheOpThreadDumpInfo(IMG_HANDLE hDbgReqestHandle,
+ DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_DUMPDEBUG_LOG(" Configuration: QSZ: %d, UKT: %d, KDFT: %d, "
+ "KGFT: %d, LINESIZE: %d, PGSIZE: %d, KDF: %s, "
+ "URBF: %s, KGF: %s, KRBF: %s ",
+ CACHEOP_INDICES_MAX,
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE],
+ gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KGF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+ );
+ PVR_DUMPDEBUG_LOG(" Pending deferred CacheOp entries : %u",
+ CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter));
+}
+
+#if defined(CACHEOP_DEBUG)
+static INLINE void CacheOpStatsExecLogHeader(IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(szBuffer, CACHEOP_MAX_DEBUG_MESSAGE_LEN,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF_HEADER,
+#else
+ CACHEOP_PRINTF_HEADER,
+#endif
+ "Pid",
+ "CacheOp",
+ " Type",
+ "Mode",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ "DevVAddr",
+ "DevPAddr",
+#endif
+ "Offset",
+ "Size",
+ "xTime (us)",
+ "qTime (us)",
+ "SeqNum");
+}
+
+static void CacheOpStatsExecLogWrite(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_UINT64 ui64EnqueuedTime;
+ IMG_INT32 i32WriteOffset;
+
+ if (!psCacheOpWorkItem->ui32OpSeqNum && !psCacheOpWorkItem->uiCacheOp)
+ {
+ /* This breaks the logic of read-out, so we do not queue items
+ with zero sequence number and no CacheOp */
+ return;
+ }
+ else if (psCacheOpWorkItem->bKMReq && !CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+ {
+ /* KM logs spams the history due to frequency, this remove its completely */
+ return;
+ }
+
+ OSLockAcquire(gsCwq.hStatsExecLock);
+
+ i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+ gsCwq.asStatsExecuted[i32WriteOffset].pid = psCacheOpWorkItem->pid;
+ gsCwq.i32StatsExecWriteIdx = INCR_WRAP(gsCwq.i32StatsExecWriteIdx);
+ gsCwq.asStatsExecuted[i32WriteOffset].bRBF = psCacheOpWorkItem->bRBF;
+ gsCwq.asStatsExecuted[i32WriteOffset].bUMF = psCacheOpWorkItem->bUMF;
+ gsCwq.asStatsExecuted[i32WriteOffset].uiSize = psCacheOpWorkItem->uiSize;
+ gsCwq.asStatsExecuted[i32WriteOffset].bKMReq = psCacheOpWorkItem->bKMReq;
+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset = psCacheOpWorkItem->uiOffset;
+ gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp = psCacheOpWorkItem->uiCacheOp;
+ gsCwq.asStatsExecuted[i32WriteOffset].bDeferred = psCacheOpWorkItem->bDeferred;
+ gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+ gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime = psCacheOpWorkItem->ui64ExecuteTime;
+ gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+ gsCwq.asStatsExecuted[i32WriteOffset].ui64DequeuedTime = psCacheOpWorkItem->ui64DequeuedTime;
+ /* During early system initialisation, only non-fence & non-PMR CacheOps are processed */
+ gsCwq.asStatsExecuted[i32WriteOffset].bIsFence = gsCwq.bInit && !psCacheOpWorkItem->psPMR;
+ CACHEOP_PVR_ASSERT(gsCwq.asStatsExecuted[i32WriteOffset].pid);
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ if (gsCwq.bInit && psCacheOpWorkItem->psPMR)
+ {
+ IMG_CPU_PHYADDR sDevPAddr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bValid;
+
+ /* Get more detailed information regarding the sub allocations that
+ PMR has from RI manager for process that requested the CacheOp */
+ eError = RIDumpProcessListKM(psCacheOpWorkItem->psPMR,
+ gsCwq.asStatsExecuted[i32WriteOffset].pid,
+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+ &gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* (Re)lock here as some PMR might have not been locked */
+ eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = PMR_CpuPhysAddr(psCacheOpWorkItem->psPMR,
+ gsCwq.uiPageShift,
+ 1,
+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+ &sDevPAddr,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+ goto e0;
+ }
+
+ eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+
+ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr.uiAddr = sDevPAddr.uiAddr;
+ }
+
+ if (gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+ {
+ gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType = psCacheOpWorkItem->eFenceOpType;
+ }
+#endif
+
+ /* Convert timing from nano-seconds to micro-seconds */
+ ui64ExecuteTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64ExecuteTime;
+ ui64EnqueuedTime = gsCwq.asStatsExecuted[i32WriteOffset].ui64EnqueuedTime;
+ ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+ ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+
+ /* Coalesced (to global) deferred CacheOps do not contribute to statistics,
+ as both enqueue/execute time is identical for these CacheOps */
+ if (!gsCwq.asStatsExecuted[i32WriteOffset].bIsFence)
+ {
+ /* Calculate the rolling approximate average execution time */
+ IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64EnqueuedTime :
+ ui64EnqueuedTime - ui64ExecuteTime;
+ if (gsCwq.ui32TotalExecOps > 2 && ui32Time)
+ {
+ gsCwq.ui32AvgExecTime -= (gsCwq.ui32AvgExecTime / gsCwq.ui32TotalExecOps);
+ gsCwq.ui32AvgExecTime += (ui32Time / gsCwq.ui32TotalExecOps);
+ }
+ else if (ui32Time)
+ {
+ gsCwq.ui32AvgExecTime = (IMG_UINT32)ui32Time;
+ }
+ }
+
+ if (! gsCwq.asStatsExecuted[i32WriteOffset].bKMReq)
+ {
+ /* This operation queues only UM CacheOp in per-PID process statistics database */
+ PVRSRVStatsUpdateCacheOpStats(gsCwq.asStatsExecuted[i32WriteOffset].uiCacheOp,
+ gsCwq.asStatsExecuted[i32WriteOffset].ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ gsCwq.asStatsExecuted[i32WriteOffset].sDevVAddr,
+ gsCwq.asStatsExecuted[i32WriteOffset].sDevPAddr,
+ gsCwq.asStatsExecuted[i32WriteOffset].eFenceOpType,
+#endif
+ gsCwq.asStatsExecuted[i32WriteOffset].uiOffset,
+ gsCwq.asStatsExecuted[i32WriteOffset].uiSize,
+ ui64EnqueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64EnqueuedTime:
+ ui64EnqueuedTime - ui64ExecuteTime,
+ gsCwq.asStatsExecuted[i32WriteOffset].bRBF,
+ gsCwq.asStatsExecuted[i32WriteOffset].bUMF,
+ gsCwq.asStatsExecuted[i32WriteOffset].bIsFence,
+ psCacheOpWorkItem->pid);
+ }
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+e0:
+#endif
+ OSLockRelease(gsCwq.hStatsExecLock);
+}
+
+static void CacheOpStatsExecLogRead(void *pvFilePtr, void *pvData,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_CHAR *pszFlushype;
+ IMG_CHAR *pszCacheOpType;
+ IMG_CHAR *pszFlushSource;
+ IMG_INT32 i32ReadOffset;
+ IMG_INT32 i32WriteOffset;
+ IMG_UINT64 ui64EnqueuedTime;
+ IMG_UINT64 ui64DequeuedTime;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_CHAR szBuffer[CACHEOP_MAX_DEBUG_MESSAGE_LEN] = {0};
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ OSLockAcquire(gsCwq.hStatsExecLock);
+
+ pfnOSStatsPrintf(pvFilePtr,
+ "Primary CPU d-cache architecture: LSZ: 0x%d, URBF: %s, KGF: %s, KRBF: %s\n",
+ gsCwq.uiLineSize,
+ gsCwq.bSupportsUMFlush ? "Yes" : "No",
+ !gsCwq.bNoGlobalFlushImpl ? "Yes" : "No",
+ "Yes" /* KRBF mechanism always available */
+ );
+
+ pfnOSStatsPrintf(pvFilePtr,
+ "Configuration: QSZ: %d, UKT: %d, KDFT: %d, KGFT: %d, KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+ CACHEOP_INDICES_MAX,
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD],
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD],
+ gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KGF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+ );
+
+ pfnOSStatsPrintf(pvFilePtr,
+ "Summary: OP[F][TL] (tot.avg): %d.%d/%d.%d/%d, [KM][UM][A]SYNC: %d.%d/%d/%d, RBF (um/km): %d/%d, [D]GF (km): %d/%d, DSC (um/km): %d/%d\n",
+ gsCwq.ui32TotalExecOps, gsCwq.ui32AvgExecTime, gsCwq.ui32TotalFenceOps, gsCwq.ui32AvgFenceTime, gsCwq.ui32ServerDTL,
+ gsCwq.ui32ServerSync, gsCwq.ui32ServerSyncVA, gsCwq.ui32ClientSync, gsCwq.ui32ServerASync,
+ gsCwq.ui32ClientRBF, gsCwq.ui32ServerRBF,
+ gsCwq.ui32ServerDGF, gsCwq.ui32ServerGF,
+ gsCwq.ui32UMDiscards, gsCwq.ui32KMDiscards
+ );
+
+ CacheOpStatsExecLogHeader(szBuffer);
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+ i32WriteOffset = gsCwq.i32StatsExecWriteIdx;
+ for (i32ReadOffset = DECR_WRAP(i32WriteOffset);
+ i32ReadOffset != i32WriteOffset;
+ i32ReadOffset = DECR_WRAP(i32ReadOffset))
+ {
+ if (!gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum &&
+ !gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+ {
+ break;
+ }
+
+ /* Convert from nano-seconds to micro-seconds */
+ ui64ExecuteTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime;
+ ui64EnqueuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64EnqueuedTime;
+ ui64DequeuedTime = gsCwq.asStatsExecuted[i32ReadOffset].ui64DequeuedTime;
+ ui64ExecuteTime = DivBy10(DivBy10(DivBy10(ui64ExecuteTime)));
+ ui64EnqueuedTime = DivBy10(DivBy10(DivBy10(ui64EnqueuedTime)));
+ ui64DequeuedTime = ui64DequeuedTime ? DivBy10(DivBy10(DivBy10(ui64DequeuedTime))) : 0;
+
+ if (gsCwq.asStatsExecuted[i32ReadOffset].bIsFence)
+ {
+ IMG_CHAR *pszMode = "";
+ IMG_CHAR *pszFenceType = "";
+ pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ pszMode = gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : " GF ";
+ switch (gsCwq.asStatsExecuted[i32ReadOffset].eFenceOpType)
+ {
+ case RGXFWIF_DM_GP:
+ pszFenceType = " GP/GF";
+ break;
+
+ case RGXFWIF_DM_TDM:
+ pszFenceType = " TDM ";
+ break;
+
+ case RGXFWIF_DM_TA:
+ pszFenceType = " TA ";
+ break;
+
+ case RGXFWIF_DM_3D:
+ pszFenceType = " PDM ";
+ break;
+
+ case RGXFWIF_DM_CDM:
+ pszFenceType = " CDM ";
+ break;
+
+ case RGXFWIF_DM_RTU:
+ pszFenceType = " RTU ";
+ break;
+
+ case RGXFWIF_DM_SHG:
+ pszFenceType = " SHG ";
+ break;
+
+ default:
+ CACHEOP_PVR_ASSERT(0);
+ break;
+ }
+#else
+ /* The CacheOp fence operation also triggered a global cache flush operation */
+ pszFenceType =
+ gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp != PVRSRV_CACHE_OP_GLOBAL ? "" : " GF ";
+#endif
+ pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF,
+#else
+ CACHEOP_PRINTF,
+#endif
+ gsCwq.asStatsExecuted[i32ReadOffset].pid,
+ pszCacheOpType,
+ pszFenceType,
+ pszMode,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ "",
+ "",
+#endif
+ gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+ gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+ ui64EnqueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64EnqueuedTime
+ :
+ ui64EnqueuedTime - ui64ExecuteTime,
+ ui64EnqueuedTime < ui64DequeuedTime ?
+ ui64DequeuedTime - ui64EnqueuedTime
+ :
+ !ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+ gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+ }
+ else
+ {
+ if (gsCwq.asStatsExecuted[i32ReadOffset].bRBF)
+ {
+ IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+ ui64NumOfPages = gsCwq.asStatsExecuted[i32ReadOffset].uiSize >> gsCwq.uiPageShift;
+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pszFlushype = "RBF.Fast";
+ }
+ else
+ {
+ pszFlushype = "RBF.Slow";
+ }
+ }
+ else
+ {
+ pszFlushype = " GF ";
+ }
+
+ if (gsCwq.asStatsExecuted[i32ReadOffset].bUMF)
+ {
+ pszFlushSource = " UM";
+ }
+ else
+ {
+ /*
+ - Request originates directly from a KM thread or in KM (KM<), or
+ - Request originates from a UM thread and is KM deferred (KM+), or
+ - Request is/was discarded due to an 'else-[when,where]' GFlush
+ - i.e. GF occurs either (a)sync to current UM/KM thread
+ */
+ pszFlushSource =
+ gsCwq.asStatsExecuted[i32ReadOffset].bKMReq ? " KM<" :
+ gsCwq.asStatsExecuted[i32ReadOffset].bDeferred && gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM+" :
+ !gsCwq.asStatsExecuted[i32ReadOffset].ui64ExecuteTime ? " KM-" : " KM";
+ }
+
+ switch (gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_NONE:
+ pszCacheOpType = "None";
+ break;
+ case PVRSRV_CACHE_OP_CLEAN:
+ pszCacheOpType = "Clean";
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ pszCacheOpType = "Invalidate";
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ pszCacheOpType = "Flush";
+ break;
+ case PVRSRV_CACHE_OP_GLOBAL:
+ pszCacheOpType = "GFlush";
+ break;
+ case PVRSRV_CACHE_OP_TIMELINE:
+ pszCacheOpType = "Timeline";
+ pszFlushype = " ";
+ break;
+ default:
+ if ((IMG_UINT32)gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp == (IMG_UINT32)(PVRSRV_CACHE_OP_GLOBAL|PVRSRV_CACHE_OP_TIMELINE))
+ {
+ pszCacheOpType = "Timeline";
+ }
+ else
+ {
+ pszCacheOpType = "Unknown";
+ gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum =
+ (IMG_UINT32) gsCwq.asStatsExecuted[i32ReadOffset].uiCacheOp;
+ }
+ break;
+ }
+
+ pfnOSStatsPrintf(pvFilePtr,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF,
+#else
+ CACHEOP_PRINTF,
+#endif
+ gsCwq.asStatsExecuted[i32ReadOffset].pid,
+ pszCacheOpType,
+ pszFlushype,
+ pszFlushSource,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ gsCwq.asStatsExecuted[i32ReadOffset].sDevVAddr.uiAddr,
+ gsCwq.asStatsExecuted[i32ReadOffset].sDevPAddr.uiAddr,
+#endif
+ gsCwq.asStatsExecuted[i32ReadOffset].uiOffset,
+ gsCwq.asStatsExecuted[i32ReadOffset].uiSize,
+ ui64EnqueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64EnqueuedTime
+ :
+ ui64EnqueuedTime - ui64ExecuteTime,
+ ui64EnqueuedTime < ui64DequeuedTime ?
+ ui64DequeuedTime - ui64EnqueuedTime
+ :
+ !ui64DequeuedTime ? 0 : ui64EnqueuedTime - ui64DequeuedTime,
+ gsCwq.asStatsExecuted[i32ReadOffset].ui32OpSeqNum);
+ }
+ }
+
+ OSLockRelease(gsCwq.hStatsExecLock);
+}
+#endif /* defined(CACHEOP_DEBUG) */
+
+static INLINE void CacheOpStatsReset(void)
+{
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32KMDiscards = 0;
+ gsCwq.ui32UMDiscards = 0;
+ gsCwq.ui32TotalExecOps = 0;
+ gsCwq.ui32TotalFenceOps = 0;
+ gsCwq.ui32AvgExecTime = 0;
+ gsCwq.ui32AvgFenceTime = 0;
+ gsCwq.ui32ClientRBF = 0;
+ gsCwq.ui32ClientSync = 0;
+ gsCwq.ui32ServerRBF = 0;
+ gsCwq.ui32ServerASync = 0;
+ gsCwq.ui32ServerSyncVA = 0;
+ gsCwq.ui32ServerSync = 0;
+ gsCwq.ui32ServerGF = 0;
+ gsCwq.ui32ServerDGF = 0;
+ gsCwq.ui32ServerDTL = 0;
+ gsCwq.i32StatsExecWriteIdx = 0;
+ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+#endif
+}
+
+static void CacheOpConfigUpdate(IMG_UINT32 ui32Config)
+{
+ OSLockAcquire(gsCwq.hConfigLock);
+
+ /* Step 0, set the gsCwq.eConfig bits */
+ if (!(ui32Config & (CACHEOP_CONFIG_LAST - 1)))
+ {
+ gsCwq.bConfigTuning = IMG_FALSE;
+ gsCwq.eConfig = CACHEOP_CONFIG_KRBF | CACHEOP_CONFIG_KDF;
+ if (! gsCwq.bNoGlobalFlushImpl)
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+ }
+ if (gsCwq.bSupportsUMFlush)
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+ }
+ }
+ else
+ {
+ gsCwq.bConfigTuning = IMG_TRUE;
+
+ if (ui32Config & CACHEOP_CONFIG_KRBF)
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_KRBF;
+ }
+ else
+ {
+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KRBF;
+ }
+
+ if (ui32Config & CACHEOP_CONFIG_KDF)
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_KDF;
+ }
+ else
+ {
+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KDF;
+ }
+
+ if (!gsCwq.bNoGlobalFlushImpl && (ui32Config & CACHEOP_CONFIG_KGF))
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_KGF;
+ }
+ else
+ {
+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KGF;
+ }
+
+ if (gsCwq.bSupportsUMFlush && (ui32Config & CACHEOP_CONFIG_URBF))
+ {
+ gsCwq.eConfig |= CACHEOP_CONFIG_URBF;
+ }
+ else
+ {
+ gsCwq.eConfig &= ~CACHEOP_CONFIG_URBF;
+ }
+ }
+
+ if (ui32Config & CACHEOP_CONFIG_KLOG)
+ {
+ /* Suppress logs from KM caller */
+ gsCwq.eConfig |= CACHEOP_CONFIG_KLOG;
+ }
+ else
+ {
+ gsCwq.eConfig &= ~CACHEOP_CONFIG_KLOG;
+ }
+
+ /* Step 1, set gsCwq.ui32Config based on gsCwq.eConfig */
+ ui32Config = 0;
+ if (gsCwq.eConfig & CACHEOP_CONFIG_KRBF)
+ {
+ ui32Config |= CACHEOP_CONFIG_KRBF;
+ }
+ if (gsCwq.eConfig & CACHEOP_CONFIG_KDF)
+ {
+ ui32Config |= CACHEOP_CONFIG_KDF;
+ }
+ if (gsCwq.eConfig & CACHEOP_CONFIG_KGF)
+ {
+ ui32Config |= CACHEOP_CONFIG_KGF;
+ }
+ if (gsCwq.eConfig & CACHEOP_CONFIG_URBF)
+ {
+ ui32Config |= CACHEOP_CONFIG_URBF;
+ }
+ if (gsCwq.eConfig & CACHEOP_CONFIG_KLOG)
+ {
+ ui32Config |= CACHEOP_CONFIG_KLOG;
+ }
+ gsCwq.ui32Config = ui32Config;
+
+ /* Step 2, Bar RBF promotion to GF, unless a GF is implemented */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)~0;
+ if (! gsCwq.bNoGlobalFlushImpl)
+ {
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = (IMG_UINT32)PVR_DIRTY_BYTES_FLUSH_THRESHOLD;
+ }
+
+ /* Step 3, in certain cases where a CacheOp/VA is provided, this threshold determines at what point
+ the optimisation due to the presence of said VA (i.e. us not having to remap the PMR pages in KM)
+ is clawed-back because of the overhead of maintaining such large request which might stalls the
+ user thread; so to hide this latency have these CacheOps executed on deferred CacheOp thread */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = (IMG_UINT32)(PVR_DIRTY_BYTES_FLUSH_THRESHOLD >> 2);
+
+ /* Step 4, if no UM support, all requests are done in KM so zero these forcing all client requests
+ to come down into the KM for maintenance */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = 0;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 0;
+ if (gsCwq.bSupportsUMFlush)
+ {
+ /* If URBF has been selected exclusively OR selected but there is no GF implementation */
+ if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) &&
+ (gsCwq.bNoGlobalFlushImpl || !((gsCwq.ui32Config & (CACHEOP_CONFIG_LAST-1)) & ~CACHEOP_CONFIG_URBF)))
+ {
+ /* If only URBF has been selected, simulate without GF support OR no GF support means all client
+ requests should be done in UM. In both cases, set this threshold to the highest value to
+ prevent any client requests coming down to the server for maintenance. */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = (IMG_UINT32)~0;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMRBFONLY] = 1;
+ }
+ /* This is the default entry for setting the UM info. page entries */
+ else if ((gsCwq.eConfig & CACHEOP_CONFIG_URBF) && !gsCwq.bNoGlobalFlushImpl)
+ {
+ /* Set UM/KM threshold, all request sizes above this goes to server for GF maintenance _only_
+ because UM flushes already have VA acquired, no cost is incurred in per-page (re)mapping
+ of the to-be maintained PMR/page(s) as it the case with KM flushing so disallow KDF */
+#if defined(ARM64) || defined(__aarch64__) || defined(__arm64__)
+ /* This value is set to be higher for ARM64 due to a very optimised UM flush implementation */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] << 4;
+#else
+ /* For others, assume an average UM flush performance, anything above should be promoted to GF.
+ For x86 UMA/LMA, we avoid KDF because remapping PMR/pages in KM might fail due to exhausted
+ or fragmented VMALLOC kernel VA space */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+#endif
+ }
+ }
+
+ /* Step 5, reset stats. */
+ CacheOpStatsReset();
+
+ OSLockRelease(gsCwq.hConfigLock);
+}
+
+static INLINE void CacheOpConfigRead(void *pvFilePtr,
+ void *pvData,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ pfnOSStatsPrintf(pvFilePtr,
+ "KDF: %s, URBF: %s, KGF: %s, KRBF: %s\n",
+ gsCwq.eConfig & CACHEOP_CONFIG_KDF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_URBF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KGF ? "Yes" : "No",
+ gsCwq.eConfig & CACHEOP_CONFIG_KRBF ? "Yes" : "No"
+ );
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigQuery(const PVRSRV_DEVICE_NODE *psDevNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+ switch (ui32ID)
+ {
+ case APPHINT_ID_CacheOpConfig:
+ *pui32Value = gsCwq.ui32Config;
+ break;
+
+ case APPHINT_ID_CacheOpGFThresholdSize:
+ *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+ break;
+
+ case APPHINT_ID_CacheOpUMKMThresholdSize:
+ *pui32Value = gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD];
+ break;
+
+ default:
+ break;
+ }
+
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR CacheOpConfigSet(const PVRSRV_DEVICE_NODE *psDevNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ IMG_UINT32 ui32ID = (IMG_UINT32)(uintptr_t) psPrivate;
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+ switch (ui32ID)
+ {
+ case APPHINT_ID_CacheOpConfig:
+ CacheOpConfigUpdate(ui32Value & CACHEOP_CONFIG_ALL);
+ break;
+
+ case APPHINT_ID_CacheOpGFThresholdSize:
+ {
+ if (!ui32Value || gsCwq.bNoGlobalFlushImpl)
+ {
+ /* CPU ISA does not support GF, silently ignore request to adjust threshold */
+ PVR_ASSERT(gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] == (IMG_UINT32)~0);
+ break;
+ }
+ else if (ui32Value < gsCwq.uiPageSize)
+ {
+ /* Silently round-up to OS page size */
+ ui32Value = gsCwq.uiPageSize;
+ }
+
+ /* Align to OS page size */
+ ui32Value &= ~(gsCwq.uiPageSize - 1);
+
+ /* Adjust KM deferred threshold given this updated KM global threshold */
+ if (ui32Value == gsCwq.uiPageSize || ui32Value < gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD])
+ {
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = ui32Value >> 2;
+ }
+
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] = ui32Value;
+
+ break;
+ }
+
+ case APPHINT_ID_CacheOpUMKMThresholdSize:
+ {
+ if (!ui32Value || !gsCwq.bSupportsUMFlush)
+ {
+ /* CPU ISA does not support UM flush, therefore every request goes down into
+ the KM, silently ignore request to adjust threshold */
+ PVR_ASSERT(! gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD]);
+ break;
+ }
+ else if (ui32Value < gsCwq.uiPageSize)
+ {
+ /* Silently round-up to OS page size */
+ ui32Value = gsCwq.uiPageSize;
+ }
+
+ /* Align to OS page size */
+ ui32Value &= ~(gsCwq.uiPageSize - 1);
+
+ if (gsCwq.bNoGlobalFlushImpl || ui32Value < gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD])
+ {
+ /* CPU ISA does not support GF also, therefore it seems there is no benefit to
+ re-routing this to KM as request won't be promoted to GF but request can
+ benefit from KM async. execution so ensure KM deferred threshold applies */
+ PVR_ASSERT(gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD] == (IMG_UINT32)~0);
+ gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] = ui32Value >> 1;
+ break;
+ }
+
+ gsCwq.pui32InfoPage[CACHEOP_INFO_UMKMTHRESHLD] = ui32Value;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return PVRSRV_OK;
+}
+
+static INLINE void CacheOpQItemRecycle(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+ PVRSRV_ERROR eError;
+ eError = PMRUnlockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+ /* Set to max as precaution should recycling this CacheOp index fail
+ to reset it, this is purely to safe-guard having to discard such
+ subsequent deferred CacheOps or signal the sw sync timeline */
+ psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE;
+ psCacheOpWorkItem->ui32GFSeqNum = (IMG_UINT32)~0;
+ psCacheOpWorkItem->ui32OpSeqNum = (IMG_UINT32)~0;
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->psPMR = (void *)(uintptr_t)~0;
+#endif
+}
+
+static INLINE void CacheOpQItemReadCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR);
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR != (void *)(uintptr_t)~0);
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum != (IMG_UINT32)~0);
+ if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+ {
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum != (IMG_UINT32)~0);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE void CacheOpQItemWriteCheck(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->psPMR == (void *)(uintptr_t)~0);
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32OpSeqNum == (IMG_UINT32)~0);
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->ui32GFSeqNum == (IMG_UINT32)~0);
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE);
+#else
+ PVR_UNREFERENCED_PARAMETER(psCacheOpWorkItem);
+#endif
+}
+
+static INLINE IMG_UINT32 CacheOpGetNextCommonSeqNum(void)
+{
+ IMG_UINT32 ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+ if (! ui32SeqNum)
+ {
+ /* Zero is _not_ a valid sequence value, doing so simplifies _all_
+ subsequent fence checking when no cache maintenance operation
+ is outstanding as in this case a fence value of zero is supplied. */
+ if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+ {
+ /* Also when seqNum wraps around/crosses zero, this requires us to
+ ensure that GFSEQNUM is not erroneously higher than any/all client
+ seqNum(s) in the system during this wrap-around transition so we
+ disable both momentarily until the next GF comes along. This has
+ the effect that all subsequent in-flight discards using ">" is
+ never true seeing zero is _not_ greater than anything and all
+ "<=" comparison are always true seeing zero is always less than
+ all non-zero integers. The additional GF here is done mostly to
+ account for race condition(s) during this transition for all
+ pending seqNum(s) that are still behind zero. */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = 0;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = 0;
+ ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+ (void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ }
+ else
+ {
+ ui32SeqNum = OSAtomicIncrement(&gsCwq.hCommonSeqNum);
+ }
+ }
+ return ui32SeqNum;
+}
+
+static INLINE IMG_BOOL CacheOpFenceCheck(IMG_UINT32 ui32CompletedSeqNum,
+ IMG_UINT32 ui32FenceSeqNum)
+{
+ IMG_UINT32 ui32RebasedCompletedNum;
+ IMG_UINT32 ui32RebasedFenceNum;
+ IMG_UINT32 ui32Rebase;
+
+ if (ui32FenceSeqNum == 0)
+ {
+ return IMG_TRUE;
+ }
+
+ /*
+ The problem statement is how to compare two values on
+ a numerical sequentially incrementing timeline in the
+ presence of wrap around arithmetic semantics using a
+ single ui32 counter & atomic (increment) operations.
+
+ The rationale for the solution here is to rebase the
+ incoming values to the sequence midpoint and perform
+ comparisons there; this allows us to handle overflow
+ or underflow wrap-round using only a single integer.
+
+ NOTE: Here we assume that the absolute value of the
+ difference between the two incoming values in _not_
+ greater than CACHEOP_SEQ_MIDPOINT. This assumption
+ holds as it implies that it is very _unlikely_ that 2
+ billion CacheOp requests could have been made between
+ a single client's CacheOp request & the corresponding
+ fence check. This code sequence is hopefully a _more_
+ hand optimised (branchless) version of this:
+
+ x = ui32CompletedOpSeqNum
+ y = ui32FenceOpSeqNum
+
+ if (|x - y| < CACHEOP_SEQ_MIDPOINT)
+ return (x - y) >= 0 ? true : false
+ else
+ return (y - x) >= 0 ? true : false
+ */
+ ui32Rebase = CACHEOP_SEQ_MIDPOINT - ui32CompletedSeqNum;
+
+ /* ui32Rebase could be either positive/negative, in
+ any case we still perform operation using unsigned
+ semantics as 2's complement notation always means
+ we end up with the correct result */
+ ui32RebasedCompletedNum = ui32Rebase + ui32CompletedSeqNum;
+ ui32RebasedFenceNum = ui32Rebase + ui32FenceSeqNum;
+
+ return (ui32RebasedCompletedNum >= ui32RebasedFenceNum);
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineBind(CACHEOP_WORK_ITEM *psCacheOpWorkItem,
+ PVRSRV_TIMELINE iTimeline)
+{
+ PVRSRV_ERROR eError;
+
+ /* Always default the incoming CacheOp work-item to safe values */
+ psCacheOpWorkItem->sSWTimelineObj = (SYNC_TIMELINE_OBJ)(uintptr_t)NULL;
+ psCacheOpWorkItem->iTimeline = PVRSRV_NO_TIMELINE;
+ if (iTimeline == PVRSRV_NO_TIMELINE)
+ {
+ return PVRSRV_OK;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ psCacheOpWorkItem->iTimeline = iTimeline;
+ eError = SyncSWGetTimelineObj(iTimeline, &psCacheOpWorkItem->sSWTimelineObj);
+ PVR_LOG_IF_ERROR(eError, "SyncSWGetTimelineObj");
+#else
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpTimelineExec(CACHEOP_WORK_ITEM *psCacheOpWorkItem)
+{
+ PVRSRV_ERROR eError;
+
+ if (psCacheOpWorkItem->iTimeline == PVRSRV_NO_TIMELINE)
+ {
+ return PVRSRV_OK;
+ }
+ CACHEOP_PVR_ASSERT(psCacheOpWorkItem->sSWTimelineObj);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ eError = SyncSWTimelineAdvanceKM(psCacheOpWorkItem->sSWTimelineObj);
+ (void) SyncSWTimelineReleaseKM(psCacheOpWorkItem->sSWTimelineObj);
+#else
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpGlobalFlush(void)
+{
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+
+ if (! CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ OSLockAcquire(gsCwq.hGlobalFlushLock);
+ if (ui32OpSeqNum < gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0])
+ {
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32KMDiscards += 1;
+#endif
+ eError = PVRSRV_OK;
+ goto exit;
+ }
+
+ /* User space sampling the information-page seqNumbers after this point
+ and before the corresponding GFSEQNUM0 update leads to an invalid
+ sampling which must be discarded by UM. This implements a lockless
+ critical region for a single KM(writer) & multiple UM/KM(readers) */
+ ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = ui32OpSeqNum;
+
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ PVR_LOGG_IF_ERROR(eError, "OSCPUOperation(PVRSRV_CACHE_OP_FLUSH)", exit);
+
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = ui32OpSeqNum;
+ OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32ServerGF += 1;
+#endif
+
+exit:
+ OSLockRelease(gsCwq.hGlobalFlushLock);
+ return eError;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBased(PVRSRV_DEVICE_NODE *psDevNode,
+ PVRSRV_CACHE_OP uiCacheOp,
+ IMG_BYTE *pbCpuVirtAddr,
+ IMG_CPU_PHYADDR sCpuPhyAddr,
+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset,
+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset,
+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset)
+{
+ IMG_BYTE *pbCpuVirtAddrEnd;
+ IMG_BYTE *pbCpuVirtAddrStart;
+ IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+ IMG_CPU_PHYADDR sCpuPhyAddrStart;
+ IMG_DEVMEM_SIZE_T uiRelFlushSize;
+ IMG_DEVMEM_OFFSET_T uiRelFlushOffset;
+ IMG_DEVMEM_SIZE_T uiNextPgAlignedOffset;
+
+ /* These quantities allows us to perform cache operations
+ at cache-line granularity thereby ensuring we do not
+ perform more than is necessary */
+ CACHEOP_PVR_ASSERT(uiPgAlignedOffset < uiCLAlignedEndOffset);
+ uiRelFlushSize = (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+ uiRelFlushOffset = 0;
+
+ if (uiCLAlignedStartOffset > uiPgAlignedOffset)
+ {
+ /* Zero unless initially starting at an in-page offset */
+ uiRelFlushOffset = uiCLAlignedStartOffset - uiPgAlignedOffset;
+ uiRelFlushSize -= uiRelFlushOffset;
+ }
+
+ /* uiRelFlushSize is gsCwq.uiPageSize unless current outstanding CacheOp
+ size is smaller. The 1st case handles in-page CacheOp range and
+ the 2nd case handles multiple-page CacheOp range with a last
+ CacheOp size that is less than gsCwq.uiPageSize */
+ uiNextPgAlignedOffset = uiPgAlignedOffset + (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize;
+ if (uiNextPgAlignedOffset < uiPgAlignedOffset)
+ {
+ /* uiNextPgAlignedOffset is greater than uiCLAlignedEndOffset
+ by implication of this wrap-round; this only happens when
+ uiPgAlignedOffset is the last page aligned offset */
+ uiRelFlushSize = uiRelFlushOffset ?
+ uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+ uiCLAlignedEndOffset - uiPgAlignedOffset;
+ }
+ else
+ {
+ if (uiNextPgAlignedOffset > uiCLAlignedEndOffset)
+ {
+ uiRelFlushSize = uiRelFlushOffset ?
+ uiCLAlignedEndOffset - uiCLAlignedStartOffset :
+ uiCLAlignedEndOffset - uiPgAlignedOffset;
+ }
+ }
+
+ /* More efficient to request cache maintenance operation for full
+ relative range as opposed to multiple cache-aligned ranges */
+ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + uiRelFlushOffset;
+ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr + uiRelFlushSize;
+ if (pbCpuVirtAddr)
+ {
+ pbCpuVirtAddrStart = pbCpuVirtAddr + uiRelFlushOffset;
+ pbCpuVirtAddrEnd = pbCpuVirtAddrStart + uiRelFlushSize;
+ }
+ else
+ {
+ /* Some OS/Env layer support functions expect NULL(s) */
+ pbCpuVirtAddrStart = NULL;
+ pbCpuVirtAddrEnd = NULL;
+ }
+
+ /* Perform requested CacheOp on the CPU data cache for successive cache
+ line worth of bytes up to page or in-page cache-line boundary */
+ switch (uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ OSCPUCacheCleanRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ OSCPUCacheInvalidateRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ OSCPUCacheFlushRangeKM(psDevNode, pbCpuVirtAddrStart, pbCpuVirtAddrEnd,
+ sCpuPhyAddrStart, sCpuPhyAddrEnd);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d",
+ __FUNCTION__, uiCacheOp));
+ break;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ /* Tracks the number of kernel-mode cacheline maintenance instructions */
+ gsCwq.ui32ServerRBF += (uiRelFlushSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static INLINE void CacheOpExecRangeBasedVA(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_CPU_VIRTADDR pvAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+ IMG_CPU_PHYADDR sCpuPhyAddrUnused =
+ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+ IMG_BYTE *pbEnd = (IMG_BYTE*)((uintptr_t)pvAddress + (uintptr_t)uiSize);
+ IMG_BYTE *pbStart = (IMG_BYTE*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiLineSize-1));
+
+ /*
+ If the start/end address isn't aligned to cache line size, round it up to the
+ nearest multiple; this ensures that we flush all the cache lines affected by
+ unaligned start/end addresses.
+ */
+ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)gsCwq.uiLineSize);
+ switch (uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ OSCPUCacheCleanRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ OSCPUCacheInvalidateRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ OSCPUCacheFlushRangeKM(psDevNode, pbStart, pbEnd, sCpuPhyAddrUnused, sCpuPhyAddrUnused);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", __FUNCTION__, uiCacheOp));
+ break;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ /* Tracks the number of kernel-mode cacheline maintenance instructions */
+ gsCwq.ui32ServerRBF += (uiSize & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+#endif
+}
+
+static INLINE PVRSRV_ERROR CacheOpValidateVAOffset(PMR *psPMR,
+ IMG_CPU_VIRTADDR pvAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ void **ppvOutAddress)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(LINUX) && !defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+#endif
+
+ if (! pvAddress)
+ {
+ /* As pvAddress is optional, NULL is expected from UM/KM requests */
+ goto e0;
+ }
+
+#if !defined(LINUX) || defined(CACHEFLUSH_NO_KMRBF_USING_UMVA)
+ pvAddress = NULL;
+#else
+ /* Validate VA, assume most basic address limit access_ok() check */
+ pvAddress = (void*)(uintptr_t)((uintptr_t)pvAddress + uiOffset);
+ if (! access_ok(VERIFY_READ, pvAddress, uiSize))
+ {
+ pvAddress = NULL;
+ if (! mm)
+ {
+ /* Bad KM request, don't silently ignore */
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ goto e0;
+ }
+ }
+ else if (mm)
+ {
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, (unsigned long)(uintptr_t)pvAddress);
+ if (!vma ||
+ vma->vm_start > (unsigned long)(uintptr_t)pvAddress ||
+ vma->vm_end - vma->vm_start > (unsigned long)(uintptr_t)uiSize)
+ {
+ /* Out of range mm_struct->vm_area VA */
+ pvAddress = NULL;
+ }
+ else if (vma->vm_private_data != psPMR)
+ {
+ /*
+ Unknown mm_struct->vm_area VA, can't risk dcache maintenance using
+ this VA as the client user space mapping could be removed without
+ us knowing which might induce CPU fault during cache maintenance.
+ */
+ pvAddress = NULL;
+ }
+ else if ((uintptr_t)pvAddress < (uintptr_t)gsCwq.uiPageSize)
+ {
+ /* Silently suppress UM NULL page pointers */
+ pvAddress = NULL;
+ }
+
+ up_read(&mm->mmap_sem);
+ }
+ else
+ {
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
+ mm = current->active_mm;
+
+ /*
+ For KM requests perform additional VA validation, so we walk the
+ kernel page-table structures to be sure VA is safe to use.
+ */
+ pgd = pgd_offset(mm, (uintptr_t)pvAddress);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ {
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ pvAddress = NULL;
+ goto e0;
+ }
+
+ p4d = p4d_offset(pgd, (uintptr_t)pvAddress);
+ if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
+ {
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ pvAddress = NULL;
+ goto e0;
+ }
+
+ pud = pud_offset(p4d, (uintptr_t)pvAddress);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ {
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ pvAddress = NULL;
+ goto e0;
+ }
+
+ pmd = pmd_offset(pud, (uintptr_t)pvAddress);
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+ {
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ pvAddress = NULL;
+ goto e0;
+ }
+
+ ptep = pte_offset_map(pmd, (uintptr_t)pvAddress);
+ if (!ptep || !pte_present(*ptep))
+ {
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ pvAddress = NULL;
+ goto e0;
+ }
+ }
+#endif
+
+e0:
+ *ppvOutAddress = pvAddress;
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpPMRExec (PMR *psPMR,
+ IMG_CPU_VIRTADDR pvAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp,
+ IMG_UINT32 ui32GFlushSeqNum,
+ IMG_BOOL bIsRequestValidated,
+ IMG_BOOL *pbUsedGlobalFlush)
+{
+ IMG_HANDLE hPrivOut;
+ IMG_BOOL bPMRIsSparse;
+ IMG_UINT32 ui32PageIndex;
+ IMG_UINT32 ui32NumOfPages;
+ IMG_DEVMEM_SIZE_T uiOutSize;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_DEVMEM_SIZE_T uiPgAlignedSize;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedOffset;
+ IMG_DEVMEM_OFFSET_T uiCLAlignedEndOffset;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedEndOffset;
+ IMG_DEVMEM_OFFSET_T uiCLAlignedStartOffset;
+ IMG_DEVMEM_OFFSET_T uiPgAlignedStartOffset;
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_CPU_PHYADDR asCpuPhyAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_CPU_PHYADDR *psCpuPhyAddr = asCpuPhyAddr;
+ IMG_BOOL bIsPMRInfoValid = IMG_FALSE;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BYTE *pbCpuVirtAddr = NULL;
+ IMG_BOOL *pbValid = abValid;
+ *pbUsedGlobalFlush = IMG_FALSE;
+
+ if (uiCacheOp == PVRSRV_CACHE_OP_NONE || uiCacheOp == PVRSRV_CACHE_OP_TIMELINE)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Check for explicitly requested-for KGF or KRBF promoted to KGF requests */
+ if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL || uiSize == 0 ||
+ uiSize >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+ {
+ /* Discard if an else-when KGF has occurred in the interim time */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+ {
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32KMDiscards += 1;
+#endif
+ return PVRSRV_OK;
+ }
+ /* Some CPU ISA support KGF, if it fails fall-back to KRBF */
+ else if ((eError = CacheOpGlobalFlush()) == PVRSRV_OK)
+ {
+ *pbUsedGlobalFlush = IMG_TRUE;
+ return PVRSRV_OK;
+ }
+ /* Request with uiSize=0 is treated as a KGF request as well */
+ else if (uiCacheOp == PVRSRV_CACHE_OP_GLOBAL || uiSize == 0)
+ {
+ /* Cannot fall-back to KRBF as an explicit KGF was erroneously requested for */
+ PVR_LOGR_IF_ERROR(eError, CACHEOP_NO_GFLUSH_ERROR_STRING);
+ CACHEOP_PVR_ASSERT(0);
+ }
+ }
+
+ if (! bIsRequestValidated)
+ {
+ IMG_DEVMEM_SIZE_T uiLogicalSize;
+
+ /* Need to validate parameters before proceeding */
+ eError = PMR_LogicalSize(psPMR, &uiLogicalSize);
+ PVR_LOGR_IF_ERROR(eError, "PMR_LogicalSize");
+
+ PVR_LOGR_IF_FALSE(((uiOffset+uiSize) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE);
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_LOGR_IF_ERROR(eError, "PMRLockSysPhysAddresses");
+ }
+
+ /* Fast track the request if a CPU VA is provided and CPU ISA supports VA only maintenance */
+ eError = CacheOpValidateVAOffset(psPMR, pvAddress, uiOffset, uiSize, (void**)&pbCpuVirtAddr);
+ if (eError == PVRSRV_OK)
+ {
+ pvAddress = pbCpuVirtAddr;
+
+ if (pvAddress && gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pvAddress, uiSize, uiCacheOp);
+ if (! bIsRequestValidated)
+ {
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+ }
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32ServerSyncVA += 1;
+#endif
+ return PVRSRV_OK;
+ }
+ else if (pvAddress)
+ {
+ /* Round down the incoming VA (if any) down to the nearest page aligned VA */
+ pvAddress = (void*)((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1));
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32ServerSyncVA += 1;
+#endif
+ }
+ }
+ else
+ {
+ /*
+ * This validation pathway has been added to accommodate any/all requests that might
+ * cause the kernel to Oops; essentially, KM requests should prevalidate cache maint.
+ * parameters but if this fails then we would rather fail gracefully than cause the
+ * kernel to Oops so instead we log the fact that an invalid KM virtual address was
+ * supplied and what action was taken to mitigate against kernel Oops(ing) if any.
+ */
+ CACHEOP_PVR_ASSERT(pbCpuVirtAddr == NULL);
+
+ if (gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, using paddress",
+ __FUNCTION__,
+ pvAddress));
+
+ /* We can still proceed as kernel/cpu uses CPU PA for d-cache maintenance */
+ pvAddress = NULL;
+ }
+ else if (CacheOpGlobalFlush() == PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, used global flush",
+ __FUNCTION__,
+ pvAddress));
+
+ /* Saved by global flush impl. */
+ *pbUsedGlobalFlush = IMG_TRUE;
+ eError = PVRSRV_OK;
+ goto e0;
+ }
+ else
+ {
+ /*
+ * The approach here is to attempt a reacquisition of the PMR kernel VA and see if
+ * said VA corresponds to the parameter VA, if so fail requested cache maint. op.
+ * cause this indicates some kind of internal, memory and/or meta-data corruption
+ * else we reissue the request using this (re)acquired alias PMR kernel VA.
+ */
+ if (PMR_IsSparse(psPMR))
+ {
+ eError = PMRAcquireSparseKernelMappingData(psPMR,
+ 0,
+ gsCwq.uiPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ PVR_LOGG_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+ }
+ else
+ {
+ eError = PMRAcquireKernelMappingData(psPMR,
+ 0,
+ gsCwq.uiPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+ }
+
+ /* Here, we only compare these CPU virtual addresses at granularity of the OS page size */
+ if ((uintptr_t)pbCpuVirtAddr == ((uintptr_t)pvAddress & ~((uintptr_t)gsCwq.uiPageSize-1)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid vaddress 0x%p in CPU d-cache maint. op, no alt. so failing request",
+ __FUNCTION__,
+ pvAddress));
+
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+ eError = PVRSRV_ERROR_INVALID_CPU_ADDR;
+ goto e0;
+ }
+ else if (gsCwq.uiCacheOpAddrType == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Bad vaddress 0x%p in CPU d-cache maint. op, using reacquired vaddress 0x%p",
+ __FUNCTION__,
+ pvAddress,
+ pbCpuVirtAddr));
+
+ /* Note that this might still fail if there is kernel memory/meta-data corruption;
+ there is not much we can do here but at the least we will be informed of this
+ before the kernel Oops(ing) */
+ CacheOpExecRangeBasedVA(PMR_DeviceNode(psPMR), pbCpuVirtAddr, uiSize, uiCacheOp);
+
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+ eError = PVRSRV_OK;
+ goto e0;
+ }
+ else
+ {
+ /* At this junction, we have exhausted every possible work-around possible but we do
+ know that VA reacquisition returned another/alias page-aligned VA; so with this
+ future expectation of PMRAcquireKernelMappingData(), we proceed */
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Bad vaddress %p in CPU d-cache maint. op, will use reacquired vaddress",
+ __FUNCTION__,
+ pvAddress));
+
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+
+ /* NULL this to force per-page reacquisition down-stream */
+ pvAddress = NULL;
+ }
+ }
+ }
+
+ /* NULL clobbered var., OK to proceed */
+ pbCpuVirtAddr = NULL;
+ eError = PVRSRV_OK;
+
+ /* Need this for kernel mapping */
+ bPMRIsSparse = PMR_IsSparse(psPMR);
+ psDevNode = PMR_DeviceNode(psPMR);
+
+ /* Round the incoming offset down to the nearest cache-line / page aligned-address */
+ uiCLAlignedEndOffset = uiOffset + uiSize;
+ uiCLAlignedEndOffset = PVR_ALIGN(uiCLAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiLineSize);
+ uiCLAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiLineSize-1));
+
+ uiPgAlignedEndOffset = uiCLAlignedEndOffset;
+ uiPgAlignedEndOffset = PVR_ALIGN(uiPgAlignedEndOffset, (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize);
+ uiPgAlignedStartOffset = (uiOffset & ~((IMG_DEVMEM_OFFSET_T)gsCwq.uiPageSize-1));
+ uiPgAlignedSize = uiPgAlignedEndOffset - uiPgAlignedStartOffset;
+
+#if defined(CACHEOP_NO_CACHE_LINE_ALIGNED_ROUNDING)
+ /* For internal debug if cache-line optimised
+ flushing is suspected of causing data corruption */
+ uiCLAlignedStartOffset = uiPgAlignedStartOffset;
+ uiCLAlignedEndOffset = uiPgAlignedEndOffset;
+#endif
+
+ /* Type of allocation backing the PMR data */
+ ui32NumOfPages = uiPgAlignedSize >> gsCwq.uiPageShift;
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ /* The pbValid array is allocated first as it is needed in
+ both physical/virtual cache maintenance methods */
+ pbValid = OSAllocZMem(ui32NumOfPages * sizeof(IMG_BOOL));
+ if (! pbValid)
+ {
+ pbValid = abValid;
+ }
+ else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ psCpuPhyAddr = OSAllocZMem(ui32NumOfPages * sizeof(IMG_CPU_PHYADDR));
+ if (! psCpuPhyAddr)
+ {
+ psCpuPhyAddr = asCpuPhyAddr;
+ OSFreeMem(pbValid);
+ pbValid = abValid;
+ }
+ }
+ }
+
+ /* We always retrieve PMR data in bulk, up-front if number of pages is within
+ PMR_MAX_TRANSLATION_STACK_ALLOC limits else we check to ensure that a
+ dynamic buffer has been allocated to satisfy requests outside limits */
+ if (ui32NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC || pbValid != abValid)
+ {
+ if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ /* Look-up PMR CpuPhyAddr once, if possible */
+ eError = PMR_CpuPhysAddr(psPMR,
+ gsCwq.uiPageShift,
+ ui32NumOfPages,
+ uiPgAlignedStartOffset,
+ psCpuPhyAddr,
+ pbValid);
+ if (eError == PVRSRV_OK)
+ {
+ bIsPMRInfoValid = IMG_TRUE;
+ }
+ }
+ else
+ {
+ /* Look-up PMR per-page validity once, if possible */
+ eError = PMR_IsOffsetValid(psPMR,
+ gsCwq.uiPageShift,
+ ui32NumOfPages,
+ uiPgAlignedStartOffset,
+ pbValid);
+ bIsPMRInfoValid = (eError == PVRSRV_OK) ? IMG_TRUE : IMG_FALSE;
+ }
+ }
+
+ /* For each (possibly non-contiguous) PMR page(s), carry out the requested cache maint. op. */
+ for (uiPgAlignedOffset = uiPgAlignedStartOffset, ui32PageIndex = 0;
+ uiPgAlignedOffset < uiPgAlignedEndOffset;
+ uiPgAlignedOffset += (IMG_DEVMEM_OFFSET_T) gsCwq.uiPageSize, ui32PageIndex += 1)
+ {
+ /* Just before issuing the CacheOp RBF, check if it can be discarded */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GFlushSeqNum)
+ {
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32KMDiscards += 1;
+#endif
+ break;
+ }
+
+ if (! bIsPMRInfoValid)
+ {
+ /* Never cross page boundary without looking up corresponding PMR page physical
+ address and/or page validity if these were not looked-up, in bulk, up-front */
+ ui32PageIndex = 0;
+ if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ eError = PMR_CpuPhysAddr(psPMR,
+ gsCwq.uiPageShift,
+ 1,
+ uiPgAlignedOffset,
+ psCpuPhyAddr,
+ pbValid);
+ PVR_LOGG_IF_ERROR(eError, "PMR_CpuPhysAddr", e0);
+ }
+ else
+ {
+ eError = PMR_IsOffsetValid(psPMR,
+ gsCwq.uiPageShift,
+ 1,
+ uiPgAlignedOffset,
+ pbValid);
+ PVR_LOGG_IF_ERROR(eError, "PMR_IsOffsetValid", e0);
+ }
+ }
+
+ /* Skip invalid PMR pages (i.e. sparse) */
+ if (pbValid[ui32PageIndex] == IMG_FALSE)
+ {
+ CACHEOP_PVR_ASSERT(bPMRIsSparse);
+ continue;
+ }
+
+ if (pvAddress)
+ {
+ /* The caller has supplied either a KM/UM CpuVA, so use it unconditionally */
+ pbCpuVirtAddr =
+ (void *)(uintptr_t)((uintptr_t)pvAddress + (uintptr_t)(uiPgAlignedOffset-uiPgAlignedStartOffset));
+ }
+ /* Skip CpuVA acquire if CacheOp can be maintained entirely using CpuPA */
+ else if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+ {
+ if (bPMRIsSparse)
+ {
+ eError =
+ PMRAcquireSparseKernelMappingData(psPMR,
+ uiPgAlignedOffset,
+ gsCwq.uiPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ PVR_LOGG_IF_ERROR(eError, "PMRAcquireSparseKernelMappingData", e0);
+ }
+ else
+ {
+ eError =
+ PMRAcquireKernelMappingData(psPMR,
+ uiPgAlignedOffset,
+ gsCwq.uiPageSize,
+ (void **)&pbCpuVirtAddr,
+ (size_t*)&uiOutSize,
+ &hPrivOut);
+ PVR_LOGG_IF_ERROR(eError, "PMRAcquireKernelMappingData", e0);
+ }
+ }
+
+ /* Issue actual cache maintenance for PMR */
+ CacheOpExecRangeBased(psDevNode,
+ uiCacheOp,
+ pbCpuVirtAddr,
+ (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL) ?
+ psCpuPhyAddr[ui32PageIndex] : psCpuPhyAddr[0],
+ uiPgAlignedOffset,
+ uiCLAlignedStartOffset,
+ uiCLAlignedEndOffset);
+
+ if (! pvAddress)
+ {
+ /* The caller has not supplied either a KM/UM CpuVA, release mapping */
+ if (gsCwq.uiCacheOpAddrType != PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL)
+ {
+ eError = PMRReleaseKernelMappingData(psPMR, hPrivOut);
+ PVR_LOG_IF_ERROR(eError, "PMRReleaseKernelMappingData");
+ }
+ }
+ }
+
+e0:
+ if (psCpuPhyAddr != asCpuPhyAddr)
+ {
+ OSFreeMem(psCpuPhyAddr);
+ }
+
+ if (pbValid != abValid)
+ {
+ OSFreeMem(pbValid);
+ }
+
+ if (! bIsRequestValidated)
+ {
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_LOG_IF_ERROR(eError, "PMRUnlockSysPhysAddresses");
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpQListExecGlobal(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32NumOfEntries;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 uiTimeNow = 0;
+ IMG_UINT64 ui64DequeuedTime;
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+#endif
+ CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+
+ /* Take the current snapshot of queued CacheOps before we issue a global cache
+ flush operation so that we retire the right amount of CacheOps that has
+ been affected by the to-be executed global CacheOp */
+ ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+ if (OSAtomicRead(&gsCwq.hWriteCounter) < OSAtomicRead(&gsCwq.hReadCounter))
+ {
+ /* Branch handles when the write-counter has wrapped-around in value space.
+ The logic here works seeing the read-counter does not change value for
+ the duration of this function so we don't run the risk of it too wrapping
+ round whilst the number of entries is being determined here, that is to
+ say, the consumer in this framework is single threaded and this function
+ is that consumer thread */
+ ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hReadCounter, &gsCwq.hWriteCounter);
+
+ /* Two's complement arithmetic gives the number of entries */
+ ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+ }
+ if (! ui32NumOfEntries)
+ {
+ return PVRSRV_OK;
+ }
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+ /* Use the current/latest queue-tail work-item's GF/SeqNum to predicate GF */
+ psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxRead(&gsCwq.hWriteCounter)];
+ CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+ /* The time waiting in the queue to be serviced */
+ ui64DequeuedTime = OSClockns64();
+#endif
+
+ /* Check if items between [hRead/hWrite]Counter can be discarded before issuing GF */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > psCacheOpWorkItem->ui32GFSeqNum)
+ {
+ /* The currently discarded CacheOp item updates gsCwq.hCompletedSeqNum */
+ OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32KMDiscards += ui32NumOfEntries;
+#endif
+ }
+ else
+ {
+ eError = CacheOpGlobalFlush();
+ PVR_LOGR_IF_ERROR(eError, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+ uiTimeNow = OSClockns64();
+ sCacheOpWorkItem.bDeferred = IMG_TRUE;
+ sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+ sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+ sCacheOpWorkItem.pid = OSGetCurrentProcessID();
+ sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+ sCacheOpWorkItem.ui64DequeuedTime = ui64DequeuedTime;
+ sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+ sCacheOpWorkItem.ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+ }
+
+ while (ui32NumOfEntries)
+ {
+ psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+ CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+ if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+ {
+ psCacheOpWorkItem->bRBF = IMG_FALSE;
+ if (! uiTimeNow)
+ {
+ /* Measure deferred queueing overhead only */
+ uiTimeNow = OSClockns64();
+ psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+ }
+ else
+ {
+ psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+ }
+ psCacheOpWorkItem->ui64DequeuedTime = ui64DequeuedTime;
+ CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+ }
+ /* Something's gone horribly wrong if these 2 counters are identical at this point */
+ CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+ /* If CacheOp is timeline(d), notify timeline waiters */
+ eError = CacheOpTimelineExec(psCacheOpWorkItem);
+ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+ /* Mark index as ready for recycling for next CacheOp */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ (void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+ ui32NumOfEntries = ui32NumOfEntries - 1;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ if (uiTimeNow)
+ {
+ /* Only log GF that was actually executed */
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+ }
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpQListExecRangeBased(void)
+{
+ IMG_UINT32 ui32NumOfEntries;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32WriteCounter = ~0;
+ IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 uiTimeNow = 0;
+#endif
+
+ /* Take a snapshot of the current count of deferred entries at this junction */
+ ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+ if (! ui32NumOfEntries)
+ {
+ return PVRSRV_OK;
+ }
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_PVR_ASSERT(ui32NumOfEntries < CACHEOP_INDICES_MAX);
+#endif
+
+ while (ui32NumOfEntries)
+ {
+ if (! OSAtomicRead(&gsCwq.hReadCounter))
+ {
+ /* Normally, the read-counter will trail the write counter until the write
+ counter wraps-round to zero. Under this condition we (re)calculate as the
+ read-counter too is wrapping around at this point */
+ ui32NumOfEntries = CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter);
+ }
+#if defined(CACHEOP_DEBUG)
+ /* Something's gone horribly wrong if these 2 counters are identical at this point */
+ CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+ /* Select the next pending deferred work-item for RBF cache maintenance */
+ psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+ CacheOpQItemReadCheck(psCacheOpWorkItem);
+#if defined(CACHEOP_DEBUG)
+ /* The time waiting in the queue to be serviced */
+ psCacheOpWorkItem->ui64DequeuedTime = OSClockns64();
+#endif
+
+ /* The following CacheOpPMRExec() could trigger a GF, so we (re)read this
+ counter just in case so that we know all such pending CacheOp(s) that will
+ benefit from this soon-to-be-executed GF */
+ ui32WriteCounter = CacheOpConfigSupports(CACHEOP_CONFIG_KGF) ?
+ OSAtomicRead(&gsCwq.hWriteCounter) : ui32WriteCounter;
+
+ eError = CacheOpPMRExec(psCacheOpWorkItem->psPMR,
+ NULL, /* No UM virtual address */
+ psCacheOpWorkItem->uiOffset,
+ psCacheOpWorkItem->uiSize,
+ psCacheOpWorkItem->uiCacheOp,
+ psCacheOpWorkItem->ui32GFSeqNum,
+ IMG_TRUE, /* PMR is pre-validated */
+ &bUsedGlobalFlush);
+ if (eError != PVRSRV_OK)
+ {
+#if defined(CACHEOP_DEBUG)
+ PVR_LOG(("Deferred CacheOpPMRExec failed: PID:%d PMR:%p Offset:%" IMG_UINT64_FMTSPECX " Size:%" IMG_UINT64_FMTSPECX " CacheOp:%d, error: %d",
+ (IMG_UINT32)psCacheOpWorkItem->pid,
+ psCacheOpWorkItem->psPMR,
+ psCacheOpWorkItem->uiOffset,
+ psCacheOpWorkItem->uiSize,
+ psCacheOpWorkItem->uiCacheOp,
+ eError));
+#else
+ PVR_LOG(("Deferred CacheOpPMRExec failed: PMR:%p Offset: %" IMG_UINT64_FMTSPECX "Size:%" IMG_UINT64_FMTSPECX " CacheOp:%d, error: %d",
+ psCacheOpWorkItem->psPMR,
+ psCacheOpWorkItem->uiOffset,
+ psCacheOpWorkItem->uiSize,
+ psCacheOpWorkItem->uiCacheOp,
+ eError));
+#endif
+ }
+ else if (bUsedGlobalFlush)
+ {
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui32OpSeqNum = gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+ break;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ if (psCacheOpWorkItem->uiCacheOp != PVRSRV_CACHE_OP_GLOBAL)
+ {
+ psCacheOpWorkItem->bRBF = IMG_TRUE;
+ psCacheOpWorkItem->ui64ExecuteTime = OSClockns64();
+ CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+ }
+ else
+ {
+ CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+ }
+#endif
+
+ /* The currently executed CacheOp item updates gsCwq.hCompletedSeqNum.
+ NOTE: This CacheOp item might be a discard item, if so its seqNum
+ still updates the gsCwq.hCompletedSeqNum */
+ OSAtomicWrite(&gsCwq.hCompletedSeqNum, psCacheOpWorkItem->ui32OpSeqNum);
+ OSAtomicSubtract(&gsCwq.hDeferredSize, psCacheOpWorkItem->uiSize);
+
+ /* If CacheOp is timeline(d), notify timeline waiters */
+ eError = CacheOpTimelineExec(psCacheOpWorkItem);
+ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+ /* Indicate that this CCB work-item slot is now free for (re)use */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ (void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+ ui32NumOfEntries = ui32NumOfEntries - 1;
+ }
+
+ if (bUsedGlobalFlush)
+ {
+#if defined(CACHEOP_DEBUG)
+ uiTimeNow = OSClockns64();
+ CACHEOP_PVR_ASSERT(OSAtomicRead(&gsCwq.hWriteCounter) != OSAtomicRead(&gsCwq.hReadCounter));
+#endif
+
+ /* Snapshot of queued CacheOps before the global cache flush was issued */
+ ui32NumOfEntries = ui32WriteCounter - OSAtomicRead(&gsCwq.hReadCounter);
+ if (ui32WriteCounter < OSAtomicRead(&gsCwq.hReadCounter))
+ {
+ /* Branch handles when the write-counter has wrapped-around in value space */
+ ui32NumOfEntries = OSAtomicRead(&gsCwq.hReadCounter) - ui32WriteCounter;
+ ui32NumOfEntries = CACHEOP_INDICES_MAX - ui32NumOfEntries;
+ }
+
+ while (ui32NumOfEntries)
+ {
+ CacheOpQItemReadCheck(psCacheOpWorkItem);
+
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->bRBF = IMG_FALSE;
+ psCacheOpWorkItem->ui64ExecuteTime = uiTimeNow;
+ if (psCacheOpWorkItem->uiCacheOp == PVRSRV_CACHE_OP_GLOBAL)
+ {
+ CACHEOP_PVR_ASSERT(!gsCwq.bNoGlobalFlushImpl);
+ psCacheOpWorkItem->pid = OSGetCurrentProcessID();
+ }
+ CacheOpStatsExecLogWrite(psCacheOpWorkItem);
+#endif
+
+ eError = CacheOpTimelineExec(psCacheOpWorkItem);
+ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+
+ /* Mark index as ready for recycling for next CacheOp */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ (void) CacheOpIdxIncrement(&gsCwq.hReadCounter);
+ ui32NumOfEntries = ui32NumOfEntries - 1;
+ psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hReadCounter)];
+ }
+ }
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR CacheOpQListExec(void)
+{
+ PVRSRV_ERROR eError;
+
+ if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+ (!CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)
+ || OSAtomicRead(&gsCwq.hDeferredSize) >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD]))
+ {
+ eError = CacheOpQListExecGlobal();
+ PVR_LOG_IF_ERROR(eError, "CacheOpQListExecGlobal");
+ }
+ else
+ {
+ eError = CacheOpQListExecRangeBased();
+ PVR_LOG_IF_ERROR(eError, "CacheOpQListExecRangeBased");
+ }
+
+ /* Signal any waiting threads blocked on CacheOp fence checks update
+ completed sequence number to last queue work item */
+ eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ return eError;
+}
+
+static void CacheOpThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+
+ /* Store the process id (pid) of the CacheOp-up thread */
+ gsCwq.uiWorkerThreadPid = OSGetCurrentProcessID();
+
+ /* Open CacheOp thread event object, abort driver if event object open fails */
+ eError = OSEventObjectOpen(gsCwq.hThreadWakeUpEvtObj, &hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectOpen");
+
+ /* While driver is in good state & loaded, perform pending cache maintenance */
+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) && gsCwq.bInit)
+ {
+ /* Sleep-wait here until when signalled for new queued CacheOp work items;
+ when woken-up, drain deferred queue completely before next event-wait */
+ eError = OSEventObjectWaitKernel(hOSEvent, CACHEOP_THREAD_WAIT_TIMEOUT);
+ while (CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter))
+ {
+ eError = CacheOpQListExec();
+ PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+ }
+ }
+
+ eError = CacheOpQListExec();
+ PVR_LOG_IF_ERROR(eError, "CacheOpQListExec");
+
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+static PVRSRV_ERROR CacheOpBatchExecTimeline(PVRSRV_TIMELINE iTimeline,
+ IMG_BOOL bUsedGlobalFlush,
+ IMG_UINT32 ui32CurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32NextIdx;
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = { };
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+
+ eError = CacheOpTimelineBind(&sCacheOpWorkItem, iTimeline);
+ PVR_LOGR_IF_ERROR(eError, "CacheOpTimelineBind");
+
+ OSLockAcquire(gsCwq.hDeferredLock);
+
+ /*
+ Check if there is any deferred queueing space available and that nothing is
+ currently queued. This second check is required as Android where timelines
+ are used sets a timeline signalling deadline of 1000ms to signal timelines
+ else complains. So seeing we cannot be sure how long the CacheOp presently
+ in the queue would take we should not send this timeline down the queue as
+ well.
+ */
+ ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+ if (!CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter) &&
+ CacheOpIdxRead(&gsCwq.hReadCounter) != ui32NextIdx)
+ {
+ psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+ CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+ psCacheOpWorkItem->sSWTimelineObj = sCacheOpWorkItem.sSWTimelineObj;
+ psCacheOpWorkItem->iTimeline = sCacheOpWorkItem.iTimeline;
+ psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_TIMELINE;
+ psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+ psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+ psCacheOpWorkItem->ui32GFSeqNum = 0;
+ /* Defer timeline using information page PMR */
+ psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+ eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->pid = OSGetCurrentClientProcessIDKM();
+ psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+ gsCwq.ui32ServerASync += 1;
+ gsCwq.ui32ServerDTL += 1;
+#endif
+
+ /* Mark index ready for cache maintenance */
+ (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+ OSLockRelease(gsCwq.hDeferredLock);
+
+ /* Signal the CacheOp thread to ensure this GF get processed */
+ eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ else
+ {
+ IMG_BOOL bExecTimeline = IMG_TRUE;
+ IMG_UINT32 ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+
+ OSLockRelease(gsCwq.hDeferredLock);
+
+ /*
+ This pathway requires careful handling here as the client CacheOp(s) predicated on this
+ timeline might have been broken-up (i.e. batched) into several server requests by client:
+ 1 - In the first case, a CacheOp from an earlier batch is still in-flight, so we check if
+ this is the case because even though we might have executed all the CacheOps in this batch
+ synchronously, we cannot be sure that any in-flight CacheOp pending on this client is not
+ predicated on this timeline hence we need to synchronise here for safety by fencing until
+ all in-flight CacheOps are completed. NOTE: On Android, this might cause issues due to
+ timelines notification deadlines so we do not fence (i.e. cannot sleep or wait) here to
+ synchronise, instead nudge services client to retry the request if there is no GF support.
+ 2 - In the second case, there is no in-flight CacheOp for this client in which case just
+ continue processing as normal.
+ */
+ if (!bUsedGlobalFlush && !CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32CurrentFenceSeqNum))
+ {
+#if defined(ANDROID)
+ bExecTimeline = IMG_TRUE;
+ if (CacheOpGlobalFlush() != PVRSRV_OK)
+ {
+ bExecTimeline = IMG_FALSE;
+ eError = PVRSRV_ERROR_RETRY;
+ }
+#else
+ eError = CacheOpFence ((RGXFWIF_DM)0, ui32CurrentFenceSeqNum);
+ PVR_LOG_IF_ERROR(eError, "CacheOpFence");
+
+ /* CacheOpFence() might have triggered a GF so we take advantage of it */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32CurrentFenceSeqNum)
+ {
+ *pui32NextFenceSeqNum = 0;
+ }
+#endif
+ }
+
+ if (bExecTimeline)
+ {
+ /* CacheOp fence requirement met, signal timeline */
+ eError = CacheOpTimelineExec(&sCacheOpWorkItem);
+ PVR_LOG_IF_ERROR(eError, "CacheOpTimelineExec");
+ }
+ }
+
+ return eError;
+e0:
+ if (psCacheOpWorkItem)
+ {
+ /* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ OSLockRelease(gsCwq.hDeferredLock);
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecRangeBased(PMR **ppsPMR,
+ IMG_CPU_VIRTADDR *pvAddress,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32NumCacheOps,
+ PVRSRV_TIMELINE uiTimeline,
+ IMG_UINT32 ui32GlobalFlushSeqNum,
+ IMG_UINT32 uiCurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum)
+{
+ IMG_UINT32 ui32Idx;
+ IMG_UINT32 ui32NextIdx;
+ IMG_BOOL bBatchHasTimeline;
+ IMG_BOOL bCacheOpConfigKDF;
+ IMG_BOOL bCacheOpConfigKRBF;
+ IMG_DEVMEM_SIZE_T uiLogicalSize;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ IMG_UINT32 ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+
+ /* Check if batch has an associated timeline update */
+ bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+ puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+ /* Check if config. supports kernel deferring of cacheops */
+ bCacheOpConfigKDF = CacheOpConfigSupports(CACHEOP_CONFIG_KDF);
+ bCacheOpConfigKRBF = CacheOpConfigSupports(CACHEOP_CONFIG_KRBF);
+
+ /*
+ Client expects the next fence seqNum to be zero unless the server has deferred
+ at least one CacheOp in the submitted queue in which case the server informs
+ the client of the last CacheOp seqNum deferred in this batch.
+ */
+ for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ if (! puiSize[ui32Idx])
+ {
+ /* Fail UM request, don't silently ignore */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ else if (bCacheOpConfigKDF)
+ {
+ /* Check if there is deferred queueing space available */
+ ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+ if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+ {
+ psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+ }
+ }
+
+ /*
+ Normally, we would like to defer client CacheOp(s) but we may not always be in a
+ position or is necessary to do so based on the following reasons:
+ 0 - There is currently no queueing space left to enqueue this CacheOp, this might
+ imply the system is queueing more requests than can be consumed by the CacheOp
+ thread in time.
+ 1 - Batch has timeline, action this now due to Android timeline signaling deadlines.
+ 2 - Configuration does not support deferring of cache maintenance operations so we
+ execute the batch synchronously/immediately.
+ 3 - CacheOp has an INVALIDATE, as this is used to transfer device memory buffer
+ ownership back to the processor, we cannot defer it so action it immediately.
+ 4 - CacheOp size too small (single OS page size) to warrant overhead of deferment,
+ this will not be considered if KRBF is not present, as it implies defer all.
+ 5 - CacheOp size OK for deferment, but a client virtual address is supplied so we
+ might has well just take advantage of said VA & flush immediately in UM context.
+ 6 - Prevent DoS attack if a malicious client queues something very large, say 1GiB
+ and the processor cache ISA does not have a global flush implementation. Here
+ we upper bound this threshold to PVR_DIRTY_BYTES_FLUSH_THRESHOLD.
+ 7 - Ensure QoS (load balancing) by not over-loading queue with too much requests,
+ here the (pseudo) alternate queue is the user context so we execute directly
+ on it if the processor cache ISA does not have a global flush implementation.
+ */
+ if (!psCacheOpWorkItem ||
+ bBatchHasTimeline ||
+ !bCacheOpConfigKDF ||
+ puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE ||
+ (bCacheOpConfigKRBF && puiSize[ui32Idx] <= (IMG_DEVMEM_SIZE_T)gsCwq.uiPageSize) ||
+ (pvAddress[ui32Idx] && puiSize[ui32Idx] < (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD]) ||
+ (gsCwq.bNoGlobalFlushImpl && puiSize[ui32Idx] >= (IMG_DEVMEM_SIZE_T)(gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << 2)) ||
+ (gsCwq.bNoGlobalFlushImpl && OSAtomicRead(&gsCwq.hDeferredSize) >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMDFTHRESHLD] << CACHEOP_INDICES_LOG2_SIZE))
+ {
+ /* When the CacheOp thread not keeping up, trash d-cache */
+ bUseGlobalFlush = !psCacheOpWorkItem && bCacheOpConfigKDF ? IMG_TRUE : IMG_FALSE;
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+ gsCwq.ui32ServerSync += 1;
+#endif
+ psCacheOpWorkItem = NULL;
+
+ eError = CacheOpPMRExec(ppsPMR[ui32Idx],
+ pvAddress[ui32Idx],
+ puiOffset[ui32Idx],
+ puiSize[ui32Idx],
+ puiCacheOp[ui32Idx],
+ ui32GlobalFlushSeqNum,
+ IMG_FALSE,
+ &bUseGlobalFlush);
+ PVR_LOGG_IF_ERROR(eError, "CacheOpExecPMR", e0);
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+ sCacheOpWorkItem.ui32OpSeqNum = bUseGlobalFlush ?
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] : ui32OpSeqNum;
+ sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+ sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+ sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+ sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+ if (bUseGlobalFlush) break;
+ continue;
+ }
+
+ /* Need to validate request parameters here before enqueing */
+ eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+ PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+ eError = PVRSRV_OK;
+
+ /* For safety, take reference here in user context */
+ eError = PMRLockSysPhysAddresses(ppsPMR[ui32Idx]);
+ PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+ OSLockAcquire(gsCwq.hDeferredLock);
+
+ /* Select next item off the queue to defer with */
+ ui32NextIdx = CacheOpIdxNext(&gsCwq.hWriteCounter);
+ if (ui32NextIdx != CacheOpIdxRead(&gsCwq.hReadCounter))
+ {
+ psCacheOpWorkItem = &gsCwq.asWorkItems[ui32NextIdx];
+ CacheOpQItemWriteCheck(psCacheOpWorkItem);
+ }
+ else
+ {
+ /* Retry, disable KDF for this batch */
+ OSLockRelease(gsCwq.hDeferredLock);
+ bCacheOpConfigKDF = IMG_FALSE;
+ psCacheOpWorkItem = NULL;
+ ui32Idx = ui32Idx - 1;
+ continue;
+ }
+
+ /* Timeline need to be looked-up (i.e. bind) in the user context
+ before deferring into the CacheOp thread kernel context */
+ eError = CacheOpTimelineBind(psCacheOpWorkItem, PVRSRV_NO_TIMELINE);
+ PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e1);
+
+ /* Prepare & enqueue next deferred work item for CacheOp thread */
+ psCacheOpWorkItem->ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ *pui32NextFenceSeqNum = psCacheOpWorkItem->ui32OpSeqNum;
+ psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+ psCacheOpWorkItem->uiCacheOp = puiCacheOp[ui32Idx];
+ psCacheOpWorkItem->uiOffset = puiOffset[ui32Idx];
+ psCacheOpWorkItem->uiSize = puiSize[ui32Idx];
+ psCacheOpWorkItem->psPMR = ppsPMR[ui32Idx];
+#if defined(CACHEOP_DEBUG)
+ psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+ psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+ psCacheOpWorkItem->bDeferred = IMG_TRUE;
+ psCacheOpWorkItem->bKMReq = IMG_FALSE;
+ psCacheOpWorkItem->bUMF = IMG_FALSE;
+ gsCwq.ui32ServerASync += 1;
+#endif
+
+ /* Increment deferred size & mark index ready for cache maintenance */
+ OSAtomicAdd(&gsCwq.hDeferredSize, (IMG_UINT32)puiSize[ui32Idx]);
+ (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+ OSLockRelease(gsCwq.hDeferredLock);
+ psCacheOpWorkItem = NULL;
+ }
+
+ /* Signal the CacheOp thread to ensure these items get processed */
+ eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+ if (bUseGlobalFlush)
+ {
+#if defined(CACHEOP_DEBUG)
+ /* GF was logged already in the loop above, so rest if any are discards */
+ sCacheOpWorkItem.ui64ExecuteTime = sCacheOpWorkItem.ui64EnqueuedTime;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ while (++ui32Idx < ui32NumCacheOps)
+ {
+ sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+ sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+ sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+ sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+ gsCwq.ui32KMDiscards += 1;
+ }
+#endif
+
+ /* No next UM fence seqNum */
+ *pui32NextFenceSeqNum = 0;
+ }
+
+e1:
+ if (psCacheOpWorkItem)
+ {
+ /* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ OSLockRelease(gsCwq.hDeferredLock);
+ }
+e0:
+ if (bBatchHasTimeline)
+ {
+ PVRSRV_ERROR eError2;
+ eError2 = CacheOpBatchExecTimeline(uiTimeline, bUseGlobalFlush, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+ eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR CacheOpBatchExecGlobal(PMR **ppsPMR,
+ IMG_CPU_VIRTADDR *pvAddress,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32NumCacheOps,
+ PVRSRV_TIMELINE uiTimeline,
+ IMG_UINT32 ui32GlobalFlushSeqNum,
+ IMG_UINT32 uiCurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum)
+{
+ IMG_UINT32 ui32Idx;
+ IMG_BOOL bBatchHasTimeline;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bUseGlobalFlush = IMG_FALSE;
+ CACHEOP_WORK_ITEM *psCacheOpWorkItem = NULL;
+#if defined(CACHEOP_DEBUG)
+ IMG_DEVMEM_SIZE_T uiTotalSize = 0;
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+#endif
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+ PVR_LOGR_IF_ERROR(PVRSRV_ERROR_NOT_SUPPORTED, CACHEOP_NO_GFLUSH_ERROR_STRING);
+#endif
+ PVR_UNREFERENCED_PARAMETER(pvAddress);
+
+ /* Check if batch has an associated timeline update request */
+ bBatchHasTimeline = puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_TIMELINE;
+ puiCacheOp[ui32NumCacheOps-1] &= ~(PVRSRV_CACHE_OP_GLOBAL | PVRSRV_CACHE_OP_TIMELINE);
+
+ /* Skip operation if an else-when GF has occurred in the interim time */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32GlobalFlushSeqNum)
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui32OpSeqNum = ui32GlobalFlushSeqNum;
+#endif
+ bUseGlobalFlush = IMG_TRUE;
+ *pui32NextFenceSeqNum = 0;
+ goto exec_timeline;
+ }
+
+ /* Here we need to check that client batch does not contain an INVALIDATE CacheOp */
+ for (*pui32NextFenceSeqNum = 0, ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+#if defined(CACHEOP_DEBUG)
+ IMG_DEVMEM_SIZE_T uiLogicalSize;
+ uiTotalSize += puiSize[ui32Idx];
+ /* There is no need to validate request parameters as we are about
+ to issue a GF but this might lead to issues being reproducible
+ in one config but not the other, so valid under debug */
+ eError = PMR_LogicalSize(ppsPMR[ui32Idx], &uiLogicalSize);
+ PVR_LOGG_IF_ERROR(eError, "PMR_LogicalSize", e0);
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ PVR_LOGG_IF_FALSE(((puiOffset[ui32Idx]+puiSize[ui32Idx]) <= uiLogicalSize), CACHEOP_DEVMEM_OOR_ERROR_STRING, e0);
+ eError = PVRSRV_OK;
+#endif
+ if (! puiSize[ui32Idx])
+ {
+ /* Fail UM request, don't silently ignore */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ else if (puiCacheOp[ui32Idx] & PVRSRV_CACHE_OP_INVALIDATE)
+ {
+ /* Invalidates cannot be deferred */
+ bUseGlobalFlush = IMG_TRUE;
+ }
+ }
+
+ OSLockAcquire(gsCwq.hDeferredLock);
+
+ /*
+ Normally, we would like to defer client CacheOp(s) but we may not always be in a
+ position to do so based on the following reasons:
+ 0 - Batch has an INVALIDATE, as this is used to transfer device memory buffer
+ ownership back to the processor, we cannot defer it so action it immediately.
+ 1 - Configuration does not support deferring of cache maintenance operations so
+ we execute synchronously/immediately.
+ 2 - There is currently no queueing space left to enqueue this CacheOp, this might
+ imply the system is queueing more requests that can be consumed by the CacheOp
+ thread in time.
+ 3 - Batch has a timeline and there is currently something queued, we cannot defer
+ because currently queued operation(s) might take quite a while to action which
+ might cause a timeline deadline timeout.
+ */
+ if (bUseGlobalFlush ||
+ !CacheOpConfigSupports(CACHEOP_CONFIG_KDF) ||
+ CacheOpIdxNext(&gsCwq.hWriteCounter) == CacheOpIdxRead(&gsCwq.hReadCounter) ||
+ (bBatchHasTimeline && CacheOpIdxSpan(&gsCwq.hWriteCounter, &gsCwq.hReadCounter)))
+
+ {
+ OSLockRelease(gsCwq.hDeferredLock);
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+ eError = CacheOpGlobalFlush();
+ PVR_LOGG_IF_ERROR(eError, "CacheOpGlobalFlush", e0);
+ bUseGlobalFlush = IMG_TRUE;
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ gsCwq.ui32ServerSync += 1;
+#endif
+ goto exec_timeline;
+ }
+
+ /* Select next item off queue to defer this GF and possibly timeline with */
+ psCacheOpWorkItem = &gsCwq.asWorkItems[CacheOpIdxNext(&gsCwq.hWriteCounter)];
+ CacheOpQItemWriteCheck(psCacheOpWorkItem);
+
+ /* Defer the GF using information page PMR */
+ psCacheOpWorkItem->psPMR = gsCwq.psInfoPagePMR;
+ eError = PMRLockSysPhysAddresses(psCacheOpWorkItem->psPMR);
+ PVR_LOGG_IF_ERROR(eError, "PMRLockSysPhysAddresses", e0);
+
+ /* Timeline object has to be looked-up here in user context */
+ eError = CacheOpTimelineBind(psCacheOpWorkItem, uiTimeline);
+ PVR_LOGG_IF_ERROR(eError, "CacheOpTimelineBind", e0);
+
+ /* Prepare & enqueue next deferred work item for CacheOp thread */
+ *pui32NextFenceSeqNum = CacheOpGetNextCommonSeqNum();
+ psCacheOpWorkItem->ui32OpSeqNum = *pui32NextFenceSeqNum;
+ psCacheOpWorkItem->ui32GFSeqNum = ui32GlobalFlushSeqNum;
+ psCacheOpWorkItem->uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+ psCacheOpWorkItem->uiOffset = (IMG_DEVMEM_OFFSET_T)0;
+ psCacheOpWorkItem->uiSize = (IMG_DEVMEM_SIZE_T)0;
+#if defined(CACHEOP_DEBUG)
+ /* Note client pid & queueing time of deferred GF CacheOp */
+ psCacheOpWorkItem->ui64EnqueuedTime = OSClockns64();
+ psCacheOpWorkItem->pid = sCacheOpWorkItem.pid;
+ OSAtomicAdd(&gsCwq.hDeferredSize, uiTotalSize);
+ psCacheOpWorkItem->uiSize = uiTotalSize;
+ psCacheOpWorkItem->bDeferred = IMG_TRUE;
+ psCacheOpWorkItem->bKMReq = IMG_FALSE;
+ psCacheOpWorkItem->bUMF = IMG_FALSE;
+ /* Client CacheOp is logged using the deferred seqNum */
+ sCacheOpWorkItem.ui32OpSeqNum = *pui32NextFenceSeqNum;
+ sCacheOpWorkItem.ui64EnqueuedTime = psCacheOpWorkItem->ui64EnqueuedTime;
+ sCacheOpWorkItem.ui64ExecuteTime = psCacheOpWorkItem->ui64EnqueuedTime;
+ /* Update the CacheOp statistics */
+ gsCwq.ui32ServerASync += 1;
+ gsCwq.ui32ServerDGF += 1;
+#endif
+
+ /* Mark index ready for cache maintenance */
+ (void) CacheOpIdxIncrement(&gsCwq.hWriteCounter);
+
+ OSLockRelease(gsCwq.hDeferredLock);
+
+ /* Signal CacheOp thread to ensure this GF get processed */
+ eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+
+exec_timeline:
+ if (bUseGlobalFlush && bBatchHasTimeline)
+ {
+ eError = CacheOpBatchExecTimeline(uiTimeline, bUseGlobalFlush, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+ }
+
+#if defined(CACHEOP_DEBUG)
+ for (ui32Idx = 0; ui32Idx < ui32NumCacheOps; ui32Idx++)
+ {
+ sCacheOpWorkItem.psPMR = ppsPMR[ui32Idx];
+ sCacheOpWorkItem.uiSize = puiSize[ui32Idx];
+ sCacheOpWorkItem.uiOffset = puiOffset[ui32Idx];
+ sCacheOpWorkItem.uiCacheOp = puiCacheOp[ui32Idx];
+ if (bUseGlobalFlush)
+ {
+ if (sCacheOpWorkItem.ui64ExecuteTime && ui32Idx)
+ {
+ /* Only first item carries the real execution time, rest are discards */
+ sCacheOpWorkItem.ui64EnqueuedTime = sCacheOpWorkItem.ui64ExecuteTime;
+ }
+ gsCwq.ui32KMDiscards += !sCacheOpWorkItem.ui64ExecuteTime ? 1 : ui32Idx ? 1 : 0;
+ }
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+ }
+#endif
+
+ return eError;
+e0:
+ if (psCacheOpWorkItem)
+ {
+ /* Need to ensure we leave this CacheOp QItem in the proper recycled state */
+ CacheOpQItemRecycle(psCacheOpWorkItem);
+ OSLockRelease(gsCwq.hDeferredLock);
+ }
+
+ if (bBatchHasTimeline)
+ {
+ PVRSRV_ERROR eError2;
+ eError2 = CacheOpBatchExecTimeline(uiTimeline, IMG_FALSE, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+ eError = (eError2 == PVRSRV_ERROR_RETRY) ? eError2 : eError;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+#if defined(CACHEOP_DEBUG)
+ IMG_BOOL bUsedGlobalFlush = IMG_FALSE;
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+ if (gsCwq.bInit)
+ {
+ IMG_DEVMEM_SIZE_T uiSize = sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr;
+ if (uiSize >= (IMG_DEVMEM_SIZE_T)gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD])
+ {
+ eError = CacheOpGlobalFlush();
+ }
+ }
+
+ if (eError == PVRSRV_OK)
+ {
+#if defined(CACHEOP_DEBUG)
+ bUsedGlobalFlush = IMG_TRUE;
+#endif
+ }
+ else
+ {
+ switch (uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ OSCPUCacheCleanRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ OSCPUCacheInvalidateRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ OSCPUCacheFlushRangeKM(psDevNode, pvVirtStart, pvVirtEnd, sCPUPhysStart, sCPUPhysEnd);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache operation type %d", __FUNCTION__, uiCacheOp));
+ break;
+ }
+ eError = PVRSRV_OK;
+ }
+
+#if defined(CACHEOP_DEBUG)
+ if (! CacheOpConfigSupports(CACHEOP_CONFIG_KLOG))
+ {
+ if (bUsedGlobalFlush)
+ {
+ /* Undo the accounting for server GF done in CacheOpGlobalFlush() */
+ gsCwq.ui32ServerGF -= 1;
+ }
+ }
+ else
+ {
+ gsCwq.ui32TotalExecOps += 1;
+ if (! bUsedGlobalFlush)
+ {
+ gsCwq.ui32ServerSync += 1;
+ gsCwq.ui32ServerRBF +=
+ ((sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr) & ((IMG_DEVMEM_SIZE_T)~(gsCwq.uiLineSize - 1))) >> gsCwq.uiLineShift;
+ }
+ sCacheOpWorkItem.uiOffset = 0;
+ sCacheOpWorkItem.bKMReq = IMG_TRUE;
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.bRBF = !bUsedGlobalFlush;
+ /* Use information page PMR for logging KM request */
+ sCacheOpWorkItem.psPMR = gsCwq.psInfoPagePMR;
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ sCacheOpWorkItem.uiSize = (sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr);
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+ IMG_UINT64 uiAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError;
+ IMG_CPU_VIRTADDR pvAddress = (IMG_CPU_VIRTADDR)(uintptr_t)uiAddress;
+ IMG_BOOL bUseGlobalFlush = uiSize >= gsCwq.pui32InfoPage[CACHEOP_INFO_KMGFTHRESHLD];
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ gsCwq.ui32TotalExecOps += 1;
+ gsCwq.ui32ServerSync += 1;
+ sCacheOpWorkItem.psPMR = psPMR;
+ sCacheOpWorkItem.uiSize = uiSize;
+ sCacheOpWorkItem.uiOffset = uiOffset;
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+ sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+#endif
+
+ eError = CacheOpPMRExec(psPMR,
+ pvAddress,
+ uiOffset,
+ uiSize,
+ uiCacheOp,
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0],
+ IMG_FALSE,
+ &bUseGlobalFlush);
+ PVR_LOGG_IF_ERROR(eError, "CacheOpPMRExec", e0);
+
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.bRBF = !bUseGlobalFlush;
+ sCacheOpWorkItem.ui64ExecuteTime = OSClockns64();
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32NumCacheOps,
+ PMR **ppsPMR,
+ IMG_UINT64 *puiAddress,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32OpTimeline,
+ IMG_UINT32 ui32ClientGFSeqNum,
+ IMG_UINT32 uiCurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_TIMELINE uiTimeline = (PVRSRV_TIMELINE)ui32OpTimeline;
+ IMG_CPU_VIRTADDR *pvAddress = (IMG_CPU_VIRTADDR*)(uintptr_t)puiAddress;
+#if !defined(CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH)
+ PVR_LOGR_IF_FALSE((ui32ClientGFSeqNum == 0),
+ "CacheOpQueue(ui32ClientGFSeqNum > 0)",
+ PVRSRV_ERROR_INVALID_PARAMS);
+#endif
+#if defined(CACHEOP_DEBUG)
+ gsCwq.ui32TotalExecOps += ui32NumCacheOps;
+#endif
+
+ if (! gsCwq.bInit)
+ {
+ PVR_LOG(("CacheOp framework not initialised, failing request"));
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+ else if (! ui32NumCacheOps)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ /* Ensure any single timeline CacheOp request is processed immediately */
+ else if (ui32NumCacheOps == 1 && puiCacheOp[0] == PVRSRV_CACHE_OP_TIMELINE)
+ {
+ eError = CacheOpBatchExecTimeline(uiTimeline, IMG_TRUE, uiCurrentFenceSeqNum, pui32NextFenceSeqNum);
+ }
+ /* Services client explicitly requested a GF or config is GF only (i.e. no KRBF support), this takes priority */
+ else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF) &&
+ ((puiCacheOp[ui32NumCacheOps-1] & PVRSRV_CACHE_OP_GLOBAL) || !CacheOpConfigSupports(CACHEOP_CONFIG_KRBF)))
+ {
+ eError =
+ CacheOpBatchExecGlobal(ppsPMR,
+ pvAddress,
+ puiOffset,
+ puiSize,
+ puiCacheOp,
+ ui32NumCacheOps,
+ uiTimeline,
+ ui32ClientGFSeqNum,
+ uiCurrentFenceSeqNum,
+ pui32NextFenceSeqNum);
+ }
+ /* This is the default entry for all client requests */
+ else
+ {
+ if (!(gsCwq.eConfig & (CACHEOP_CONFIG_LAST-1)))
+ {
+ /* default the configuration before execution */
+ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+ }
+
+ eError =
+ CacheOpBatchExecRangeBased(ppsPMR,
+ pvAddress,
+ puiOffset,
+ puiSize,
+ puiCacheOp,
+ ui32NumCacheOps,
+ uiTimeline,
+ ui32ClientGFSeqNum,
+ uiCurrentFenceSeqNum,
+ pui32NextFenceSeqNum);
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eFenceOpType, IMG_UINT32 ui32FenceOpSeqNum)
+{
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 ui32RetryAbort;
+ IMG_UINT32 ui32CompletedOpSeqNum;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+#if defined(CACHEOP_DEBUG)
+ IMG_UINT64 uiTimeNow;
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = ui32FenceOpSeqNum;
+ sCacheOpWorkItem.ui64EnqueuedTime = OSClockns64();
+ uiTimeNow = sCacheOpWorkItem.ui64EnqueuedTime;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ sCacheOpWorkItem.eFenceOpType = eFenceOpType;
+#endif
+ sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+ sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+ PVR_UNREFERENCED_PARAMETER(eFenceOpType);
+
+ /* CacheOp(s) this thread is fencing for has already been satisfied by an
+ else-when GF. Another way of looking at this, if last else-when GF is
+ logically behind or momentarily disabled (zero) then we have to flush
+ the cache */
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+#endif
+ goto e0;
+ }
+
+ /* If initial fence check fails, then wait-and-retry in loop */
+ ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+ if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+ goto e0;
+ }
+
+ /* Open CacheOp update event object, if event open fails return error */
+ eError2 = OSEventObjectOpen(gsCwq.hClientWakeUpEvtObj, &hOSEvent);
+ PVR_LOGG_IF_ERROR(eError2, "OSEventObjectOpen", e0);
+
+ /* Linear (i.e. use exponential?) back-off, upper bounds user wait */
+ for (ui32RetryAbort = gsCwq.ui32FenceRetryAbort; ;--ui32RetryAbort)
+ {
+ /* (Re)read completed CacheOp sequence number before waiting */
+ ui32CompletedOpSeqNum = OSAtomicRead(&gsCwq.hCompletedSeqNum);
+ if (CacheOpFenceCheck(ui32CompletedOpSeqNum, ui32FenceOpSeqNum))
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiSize = (uintptr_t) ui32CompletedOpSeqNum;
+#endif
+ break;
+ }
+
+ /*
+ For cache ISA with GF support, the wait(ms) must be set to be around
+ 25% GF overhead and as such there is no point waiting longer, we just
+ perform a GF as it means the CacheOp thread is really lagging behind.
+ Lastly, we cannot (or should not) hang the client thread indefinitely
+ so after a certain duration, we just give up. What this duration is,
+ is hard to state but for now we set it to be 1 seconds, which is the
+ product of CACHEOP_FENCE_[WAIT_TIMEOUT * RETRY_ABORT]. We ask the
+ client to retry the operation by exiting with PVRSRV_ERROR_RETRY.
+ */
+ (void) OSEventObjectWaitTimeout(hOSEvent, gsCwq.ui32FenceWaitTimeUs);
+ if (gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] > ui32FenceOpSeqNum)
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+ uiTimeNow = OSClockns64();
+#endif
+ break;
+ }
+ else if (CacheOpConfigSupports(CACHEOP_CONFIG_KGF))
+ {
+ eError2 = CacheOpGlobalFlush();
+ PVR_LOG_IF_ERROR(eError2, "CacheOpGlobalFlush");
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiCacheOp = PVRSRV_CACHE_OP_GLOBAL;
+ sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+ uiTimeNow = OSClockns64();
+#endif
+ break;
+ }
+ else if (! ui32RetryAbort)
+ {
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.uiSize = (uintptr_t) OSAtomicRead(&gsCwq.hCompletedSeqNum);
+ sCacheOpWorkItem.uiOffset = (uintptr_t) gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0];
+ uiTimeNow = OSClockns64();
+#endif
+ PVR_LOG(("CacheOpFence() event: "CACHEOP_ABORT_FENCE_ERROR_STRING));
+ eError = PVRSRV_ERROR_RETRY;
+ break;
+ }
+ else
+ {
+#if defined(CACHEOP_DEBUG)
+ uiTimeNow = OSClockns64();
+#endif
+ }
+ }
+
+ eError2 = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError2, "OSEventObjectOpen");
+
+e0:
+#if defined(CACHEOP_DEBUG)
+ sCacheOpWorkItem.ui64ExecuteTime = uiTimeNow;
+ if (ui32FenceOpSeqNum)
+ {
+ /* Only fence(s) pending on CacheOp(s) contribute towards statistics,
+ here we calculate the rolling approximate average waiting time
+ for these fence(s) */
+ IMG_UINT32 ui64EnqueuedTime = sCacheOpWorkItem.ui64EnqueuedTime;
+ IMG_UINT32 ui64ExecuteTime = sCacheOpWorkItem.ui64ExecuteTime;
+ IMG_UINT32 ui32Time = ui64EnqueuedTime < ui64ExecuteTime ?
+ ui64ExecuteTime - ui64EnqueuedTime :
+ ui64EnqueuedTime - ui64ExecuteTime;
+ ui32Time = DivBy10(DivBy10(DivBy10(ui32Time)));
+ gsCwq.ui32TotalFenceOps += 1;
+ if (gsCwq.ui32TotalFenceOps > 2)
+ {
+ gsCwq.ui32AvgFenceTime -= (gsCwq.ui32AvgFenceTime / gsCwq.ui32TotalFenceOps);
+ gsCwq.ui32AvgFenceTime += (ui32Time / gsCwq.ui32TotalFenceOps);
+ }
+ else if (ui32Time)
+ {
+ gsCwq.ui32AvgFenceTime = (IMG_UINT32)ui32Time;
+ }
+ }
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+ IMG_UINT64 puiAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64EnqueuedTimeUs,
+ IMG_UINT64 ui64ExecuteTimeUs,
+ IMG_UINT32 ui32NumRBF,
+ IMG_BOOL bIsDiscard,
+ PVRSRV_CACHE_OP uiCacheOp)
+{
+#if defined(CACHEOP_DEBUG)
+ CACHEOP_WORK_ITEM sCacheOpWorkItem = {0};
+ PVR_UNREFERENCED_PARAMETER(puiAddress);
+
+ sCacheOpWorkItem.psPMR = psPMR;
+ sCacheOpWorkItem.uiSize = uiSize;
+ sCacheOpWorkItem.uiOffset = uiOffset;
+ sCacheOpWorkItem.uiCacheOp = uiCacheOp;
+ sCacheOpWorkItem.pid = OSGetCurrentClientProcessIDKM();
+ sCacheOpWorkItem.ui32OpSeqNum = CacheOpGetNextCommonSeqNum();
+
+ sCacheOpWorkItem.ui64EnqueuedTime = ui64EnqueuedTimeUs;
+ sCacheOpWorkItem.ui64ExecuteTime = ui64ExecuteTimeUs;
+ sCacheOpWorkItem.bUMF = IMG_TRUE;
+ sCacheOpWorkItem.bRBF = bIsDiscard ? IMG_FALSE : IMG_TRUE;
+ gsCwq.ui32UMDiscards += bIsDiscard ? 1 : 0;
+ gsCwq.ui32ClientRBF += bIsDiscard ? 0 : ui32NumRBF;
+ gsCwq.ui32ClientSync += 1;
+ gsCwq.ui32TotalExecOps += 1;
+
+ CacheOpStatsExecLogWrite(&sCacheOpWorkItem);
+#else
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiCacheOp);
+ PVR_UNREFERENCED_PARAMETER(ui32NumRBF);
+ PVR_UNREFERENCED_PARAMETER(puiAddress);
+ PVR_UNREFERENCED_PARAMETER(ui64ExecuteTimeUs);
+ PVR_UNREFERENCED_PARAMETER(ui64EnqueuedTimeUs);
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR CacheOpInit2 (void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Create an event object for pending CacheOp work items */
+ eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+ /* Create an event object for updating pending fence checks on CacheOp */
+ eError = OSEventObjectCreate("PVRSRV_CACHEOP_EVENTOBJECT", &gsCwq.hClientWakeUpEvtObj);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", e0);
+
+ /* Appending work-items is not concurrent, lock protects against this */
+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hDeferredLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+ /* Apphint read/write is not concurrent, so lock protects against this */
+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hConfigLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+ /* Determine CPU cache ISA maintenance mechanism available, GF and UMF */
+ gsCwq.bNoGlobalFlushImpl = (IMG_BOOL)OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ if (! gsCwq.bNoGlobalFlushImpl)
+ {
+ IMG_UINT64 uiIdx;
+ IMG_UINT64 uiTime = 0;
+ IMG_UINT64 uiTimeAfter;
+ IMG_UINT64 uiTimeBefore;
+
+ for (uiIdx = 0; uiIdx < 4; uiIdx++)
+ {
+ /* Take average of four GF */
+ uiTimeBefore = OSClockns64();
+ (void) OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ uiTimeAfter = OSClockns64();
+
+ uiTimeBefore = DivBy10(DivBy10(DivBy10(uiTimeBefore)));
+ uiTimeAfter = DivBy10(DivBy10(DivBy10(uiTimeAfter)));
+ uiTime += uiTimeBefore < uiTimeAfter ?
+ uiTimeAfter - uiTimeBefore :
+ uiTimeBefore - uiTimeAfter;
+ }
+
+ gsCwq.ui32FenceWaitTimeUs = (IMG_UINT32)(uiTime >> 2);
+ gsCwq.ui32FenceRetryAbort = ~0;
+ }
+ else
+ {
+ gsCwq.ui32FenceWaitTimeUs = CACHEOP_FENCE_WAIT_TIMEOUT;
+ gsCwq.ui32FenceRetryAbort = CACHEOP_FENCE_RETRY_ABORT;
+ }
+#if defined(CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH)
+ gsCwq.bSupportsUMFlush = IMG_TRUE;
+#else
+ gsCwq.bSupportsUMFlush = IMG_FALSE;
+#endif
+
+ gsCwq.psInfoPageMemDesc = psPVRSRVData->psInfoPageMemDesc;
+ gsCwq.pui32InfoPage = psPVRSRVData->pui32InfoPage;
+ gsCwq.psInfoPagePMR = psPVRSRVData->psInfoPagePMR;
+
+ /* Normally, platforms should use their default configurations, put exceptions here */
+#if defined(__i386__) || defined(__x86_64__)
+#if !defined(TC_MEMORY_CONFIG)
+ CacheOpConfigUpdate(CACHEOP_CONFIG_URBF | CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#else
+ CacheOpConfigUpdate(CACHEOP_CONFIG_KGF | CACHEOP_CONFIG_KDF);
+#endif
+#else /* defined(__x86__) */
+ CacheOpConfigUpdate(CACHEOP_CONFIG_DEFAULT);
+#endif
+
+ /* Initialise the remaining occupants of the CacheOp information page */
+ gsCwq.pui32InfoPage[CACHEOP_INFO_PGSIZE] = (IMG_UINT32)gsCwq.uiPageSize;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_LINESIZE] = (IMG_UINT32)gsCwq.uiLineSize;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM0] = (IMG_UINT32)0;
+ gsCwq.pui32InfoPage[CACHEOP_INFO_GFSEQNUM1] = (IMG_UINT32)0;
+
+ /* Set before spawning thread */
+ gsCwq.bInit = IMG_TRUE;
+
+ /* Create a thread which is used to execute the deferred CacheOp(s),
+ these are CacheOp(s) executed by the server on behalf of clients
+ asynchronously. All clients synchronise with the server before
+ submitting any HW operation (i.e. device kicks) to ensure that
+ client device work-load memory is coherent */
+ eError = OSThreadCreatePriority(&gsCwq.hWorkerThread,
+ "pvr_cacheop",
+ CacheOpThread,
+ CacheOpThreadDumpInfo,
+ IMG_TRUE,
+ psPVRSRVData,
+ OS_THREAD_HIGHEST_PRIORITY);
+ PVR_LOGG_IF_ERROR(eError, "OSThreadCreatePriority", e0);
+
+ /* Writing the unsigned integer binary encoding of CACHEOP_CONFIG
+ into this file cycles through avail. configuration(s) */
+ gsCwq.pvConfigTune = OSCreateStatisticEntry("cacheop_config",
+ NULL,
+ CacheOpConfigRead,
+ NULL,
+ NULL,
+ NULL);
+ PVR_LOGG_IF_FALSE(gsCwq.pvConfigTune, "OSCreateStatisticEntry", e0);
+
+ /* Register the CacheOp framework (re)configuration handlers */
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpConfig,
+ CacheOpConfigQuery,
+ CacheOpConfigSet,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) APPHINT_ID_CacheOpConfig);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpGFThresholdSize,
+ CacheOpConfigQuery,
+ CacheOpConfigSet,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) APPHINT_ID_CacheOpGFThresholdSize);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_CacheOpUMKMThresholdSize,
+ CacheOpConfigQuery,
+ CacheOpConfigSet,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) APPHINT_ID_CacheOpUMKMThresholdSize);
+
+ return PVRSRV_OK;
+e0:
+ CacheOpDeInit2();
+ return eError;
+}
+
+void CacheOpDeInit2 (void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ gsCwq.bInit = IMG_FALSE;
+
+ if (gsCwq.hThreadWakeUpEvtObj)
+ {
+ eError = OSEventObjectSignal(gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+
+ if (gsCwq.hClientWakeUpEvtObj)
+ {
+ eError = OSEventObjectSignal(gsCwq.hClientWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+
+ if (gsCwq.hWorkerThread)
+ {
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(gsCwq.hWorkerThread);
+ if (PVRSRV_OK == eError)
+ {
+ gsCwq.hWorkerThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ gsCwq.hWorkerThread = NULL;
+ }
+
+ if (gsCwq.hClientWakeUpEvtObj)
+ {
+ eError = OSEventObjectDestroy(gsCwq.hClientWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ gsCwq.hClientWakeUpEvtObj = NULL;
+ }
+
+ if (gsCwq.hThreadWakeUpEvtObj)
+ {
+ eError = OSEventObjectDestroy(gsCwq.hThreadWakeUpEvtObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ gsCwq.hThreadWakeUpEvtObj = NULL;
+ }
+
+ if (gsCwq.hConfigLock)
+ {
+ eError = OSLockDestroy(gsCwq.hConfigLock);
+ PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+ gsCwq.hConfigLock = NULL;
+ }
+
+ if (gsCwq.hDeferredLock)
+ {
+ eError = OSLockDestroy(gsCwq.hDeferredLock);
+ PVR_LOG_IF_ERROR(eError, "OSLockDestroy");
+ gsCwq.hDeferredLock = NULL;
+ }
+
+ if (gsCwq.pvConfigTune)
+ {
+ OSRemoveStatisticEntry(gsCwq.pvConfigTune);
+ gsCwq.pvConfigTune = NULL;
+ }
+
+ gsCwq.psInfoPageMemDesc = NULL;
+ gsCwq.pui32InfoPage = NULL;
+ gsCwq.psInfoPagePMR = NULL;
+}
+
+PVRSRV_ERROR CacheOpInit (void)
+{
+ IMG_UINT32 idx;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* DDK initialisation is anticipated to be performed on the boot
+ processor (little core in big/little systems) though this may
+ not always be the case. If so, the value cached here is the
+ system wide safe (i.e. smallest) L1 d-cache line size value
+ on any/such platforms with mismatched d-cache line sizes */
+ gsCwq.uiPageSize = OSGetPageSize();
+ gsCwq.uiPageShift = OSGetPageShift();
+ gsCwq.uiLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+ gsCwq.uiLineShift = ExactLog2(gsCwq.uiLineSize);
+ PVR_LOGR_IF_FALSE((gsCwq.uiLineSize && gsCwq.uiPageSize && gsCwq.uiPageShift), "", PVRSRV_ERROR_INIT_FAILURE);
+ gsCwq.uiCacheOpAddrType = OSCPUCacheOpAddressType();
+
+ /* More information regarding these atomic counters can be found
+ in the CACHEOP_WORK_QUEUE type definition at top of file */
+ OSAtomicWrite(&gsCwq.hCompletedSeqNum, 0);
+ OSAtomicWrite(&gsCwq.hCommonSeqNum, 0);
+ OSAtomicWrite(&gsCwq.hDeferredSize, 0);
+ OSAtomicWrite(&gsCwq.hWriteCounter, 0);
+ OSAtomicWrite(&gsCwq.hReadCounter, 0);
+
+ for (idx = 0; idx < CACHEOP_INDICES_MAX; idx++)
+ {
+ gsCwq.asWorkItems[idx].iTimeline = PVRSRV_NO_TIMELINE;
+ gsCwq.asWorkItems[idx].psPMR = (void *)(uintptr_t)~0;
+ gsCwq.asWorkItems[idx].ui32OpSeqNum = (IMG_UINT32)~0;
+ gsCwq.asWorkItems[idx].ui32GFSeqNum = (IMG_UINT32)~0;
+ }
+
+ /* Lock prevents multiple threads from issuing surplus to requirement GF */
+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hGlobalFlushLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+#if defined(CACHEOP_DEBUG)
+ /* debugfs file read-out is not concurrent, so lock protects against this */
+ eError = OSLockCreate((POS_LOCK*)&gsCwq.hStatsExecLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+ gsCwq.i32StatsExecWriteIdx = 0;
+ OSCachedMemSet(gsCwq.asStatsExecuted, 0, sizeof(gsCwq.asStatsExecuted));
+
+ /* File captures the most recent subset of CacheOp(s) executed */
+ gsCwq.pvStatsEntry = OSCreateStatisticEntry("cacheop_history",
+ NULL,
+ CacheOpStatsExecLogRead,
+ NULL,
+ NULL,
+ NULL);
+ PVR_LOGG_IF_ERROR(eError, "OSCreateStatisticEntry", e0);
+#endif
+
+e0:
+ return eError;
+}
+
+void CacheOpDeInit (void)
+{
+#if defined(CACHEOP_DEBUG)
+ if (gsCwq.hStatsExecLock)
+ {
+ (void) OSLockDestroy(gsCwq.hStatsExecLock);
+ gsCwq.hStatsExecLock = NULL;
+ }
+
+ if (gsCwq.pvStatsEntry)
+ {
+ OSRemoveStatisticEntry(gsCwq.pvStatsEntry);
+ gsCwq.pvStatsEntry = NULL;
+ }
+#endif
+ if (gsCwq.hGlobalFlushLock)
+ {
+ (void) OSLockDestroy(gsCwq.hGlobalFlushLock);
+ gsCwq.hGlobalFlushLock = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/cache_km.h b/drivers/gpu/drm/img-rogue/1.10/cache_km.h
new file mode 100644
index 00000000000000..a3b338036f1aa2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/cache_km.h
@@ -0,0 +1,174 @@
+/*************************************************************************/ /*!
+@File cache.h
+@Title CPU cache management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_KM_H_
+#define _CACHE_KM_H_
+
+#if defined(LINUX)
+#include <linux/version.h>
+#else
+#define KERNEL_VERSION
+#endif
+
+#include "pvrsrv_error.h"
+#include "os_cpu_cache.h"
+#include "img_types.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "pmr.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP_ADDR_TYPE; /*!< Represents CPU address type required for CPU d-cache maintenance */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL 0x1 /*!< Operation requires CPU virtual address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL 0x2 /*!< Operation requires CPU physical address only */
+#define PVRSRV_CACHE_OP_ADDR_TYPE_BOTH 0x3 /*!< Operation requires both CPU virtual & physical addresses */
+
+/*
+ * CacheOpInit() & CacheOpDeInit()
+ *
+ * This must be called to initialise the KM cache maintenance framework.
+ * This is called early during the driver/module (un)loading phase.
+ */
+PVRSRV_ERROR CacheOpInit(void);
+void CacheOpDeInit(void);
+
+/*
+ * CacheOpInit2() & CacheOpDeInit2()
+ *
+ * This must be called to initialise the UM cache maintenance framework.
+ * This is called when the driver is loaded/unloaded from the kernel.
+ */
+PVRSRV_ERROR CacheOpInit2(void);
+void CacheOpDeInit2(void);
+
+/*
+ * CacheOpAcquireInfoPage() & CacheOpReleaseInfoPage()
+ *
+ * This interface is used for obtaining the global CacheOp info. page
+ * which acts as a repository of meta-data for the cache maintenance
+ * framework. The use of this information page outside of services
+ * is _not_ recommended.
+ */
+PVRSRV_ERROR CacheOpAcquireInfoPage (PMR **ppsPMR);
+PVRSRV_ERROR CacheOpReleaseInfoPage (PMR *psPMR);
+
+/*
+ * CacheOpExec()
+ *
+ * This is the primary CPU data-cache maintenance interface and it is
+ * always guaranteed to be synchronous; the arguments supplied must be
+ * pre-validated for performance reasons else the d-cache maintenance
+ * operation might cause the underlying OS kernel to fault.
+ */
+PVRSRV_ERROR CacheOpExec (PPVRSRV_DEVICE_NODE psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd,
+ PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpValExec()
+ *
+ * Same as CacheOpExec(), except arguments are _Validated_ before being
+ * presented to the underlying OS kernel for CPU data-cache maintenance.
+ * The uiAddress is the start CPU virtual address for the to-be d-cache
+ * maintained PMR, it can be NULL in which case a remap will be performed
+ * internally, if required for cache maintenance. This is primarily used
+ * as the services client bridge call handler for synchronous user-mode
+ * cache maintenance requests.
+ */
+PVRSRV_ERROR CacheOpValExec(PMR *psPMR,
+ IMG_UINT64 uiAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP uiCacheOp);
+
+/*
+ * CacheOpQueue()
+ *
+ * This is the secondary cache maintenance interface and it is not
+ * guaranteed to be synchronous in that requests could be deferred
+ * and executed asynchronously. This interface is primarily meant
+ * as services client bridge call handler. Both uiInfoPgGFSeqNum
+ * and ui32[Current,Next]FenceSeqNum implements an internal client
+ * server queueing protocol so making use of this interface outside
+ * of services client is not recommended and should not be done.
+ */
+PVRSRV_ERROR CacheOpQueue (IMG_UINT32 ui32OpCount,
+ PMR **ppsPMR,
+ IMG_UINT64 *puiAddress,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *puiCacheOp,
+ IMG_UINT32 ui32OpTimeline,
+ IMG_UINT32 uiOpInfoPgGFSeqNum,
+ IMG_UINT32 uiCurrentFenceSeqNum,
+ IMG_UINT32 *puiNextFenceSeqNum);
+
+/*
+ * CacheOpFence()
+ *
+ * This is used for fencing for any client in-flight cache maintenance
+ * operations that might have been deferred by the use of CacheOpQueue().
+ * This should be called before any subsequent HW device kicks to ensure
+ * device memory is coherent with the HW before the kick.
+ */
+PVRSRV_ERROR CacheOpFence (RGXFWIF_DM eOpType, IMG_UINT32 ui32OpSeqNum);
+
+/*
+ * CacheOpLog()
+ *
+ * This is used for logging client cache maintenance operations that
+ * was executed in user-space.
+ */
+PVRSRV_ERROR CacheOpLog (PMR *psPMR,
+ IMG_UINT64 uiAddress,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64QueuedTimeMs,
+ IMG_UINT64 ui64ExecuteTimeMs,
+ IMG_UINT32 ui32NumRBF,
+ IMG_BOOL bIsDiscard,
+ PVRSRV_CACHE_OP uiCacheOp);
+
+#endif /* _CACHE_KM_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/cache_ops.h b/drivers/gpu/drm/img-rogue/1.10/cache_ops.h
new file mode 100644
index 00000000000000..ea71bbfbe5a360
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/cache_ops.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title Services cache management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines for cache management which are visible internally
+ and externally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _CACHE_OPS_H_
+#define _CACHE_OPS_H_
+#include "img_types.h"
+
+typedef IMG_UINT32 PVRSRV_CACHE_OP; /*!< Type represents cache maintenance operation */
+#define PVRSRV_CACHE_OP_NONE 0x0 /*!< No operation */
+#define PVRSRV_CACHE_OP_CLEAN 0x1 /*!< Flush w/o invalidate */
+#define PVRSRV_CACHE_OP_INVALIDATE 0x2 /*!< Invalidate w/o flush */
+#define PVRSRV_CACHE_OP_FLUSH 0x3 /*!< Flush w/ invalidate */
+
+#endif /* _CACHE_OPS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_cache_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_cache_bridge.h
new file mode 100644
index 00000000000000..2dbebdcdb516ac
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_cache_bridge.h
@@ -0,0 +1,88 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_CACHE_BRIDGE_H
+#define CLIENT_CACHE_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_cache_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE *phPMR,
+ IMG_UINT64 *pui64Address,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *piuCacheOp,
+ IMG_UINT32 ui32OpTimeline,
+ IMG_UINT32 ui32OpInfoPgGFSeqNum,
+ IMG_UINT32 ui32CurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP iuCacheOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64QueuedTimeUs,
+ IMG_INT64 i64ExecuteTimeUs,
+ IMG_INT32 i32NumRBF,
+ IMG_BOOL bIsDiscard,
+ PVRSRV_CACHE_OP iuCacheOp);
+
+
+#endif /* CLIENT_CACHE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_cache_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_cache_direct_bridge.c
new file mode 100644
index 00000000000000..a7a7397cd83797
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_cache_direct_bridge.c
@@ -0,0 +1,141 @@
+/*******************************************************************************
+@Title Direct client bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_cache_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "cache_ops.h"
+
+#include "cache_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpQueue(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumCacheOps,
+ IMG_HANDLE *phPMR,
+ IMG_UINT64 *pui64Address,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PVRSRV_CACHE_OP *piuCacheOp,
+ IMG_UINT32 ui32OpTimeline,
+ IMG_UINT32 ui32OpInfoPgGFSeqNum,
+ IMG_UINT32 ui32CurrentFenceSeqNum,
+ IMG_UINT32 *pui32NextFenceSeqNum)
+{
+ PVRSRV_ERROR eError;
+ PMR * *psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR **) phPMR;
+
+ eError =
+ CacheOpQueue(
+ ui32NumCacheOps,
+ psPMRInt,
+ pui64Address,
+ puiOffset,
+ puiSize,
+ piuCacheOp,
+ ui32OpTimeline,
+ ui32OpInfoPgGFSeqNum,
+ ui32CurrentFenceSeqNum,
+ pui32NextFenceSeqNum);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpExec(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ CacheOpValExec(
+ psPMRInt,
+ ui64Address,
+ uiOffset,
+ uiSize,
+ iuCacheOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeCacheOpLog(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 ui64Address,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_INT64 i64QueuedTimeUs,
+ IMG_INT64 i64ExecuteTimeUs,
+ IMG_INT32 i32NumRBF,
+ IMG_BOOL bIsDiscard,
+ PVRSRV_CACHE_OP iuCacheOp)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ CacheOpLog(
+ psPMRInt,
+ ui64Address,
+ uiOffset,
+ uiSize,
+ i64QueuedTimeUs,
+ i64ExecuteTimeUs,
+ i32NumRBF,
+ bIsDiscard,
+ iuCacheOp);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_bridge.h
new file mode 100644
index 00000000000000..4fa4379489dde6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_bridge.h
@@ -0,0 +1,112 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+#define CLIENT_DEVICEMEMHISTORY_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_devicememhistory_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+
+#endif /* CLIENT_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_direct_bridge.c
new file mode 100644
index 00000000000000..9df669ded3498a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_devicememhistory_direct_bridge.c
@@ -0,0 +1,206 @@
+/*******************************************************************************
+@Title Direct client bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_devicememhistory_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_history_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryMapKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmap(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistoryUnmapKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryMapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryMapVRangeKM(
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistoryUnmapVRange(IMG_HANDLE hBridge,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ DevicememHistoryUnmapVRangeKM(
+ sBaseDevVAddr,
+ ui32ui32StartPage,
+ ui32NumPages,
+ uiAllocSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevicememHistorySparseChange(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *puiText,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevicememHistorySparseChangeKM(
+ psPMRInt,
+ uiOffset,
+ sDevVAddr,
+ uiSize,
+ puiText,
+ ui32Log2PageSize,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32AllocationIndex,
+ pui32AllocationIndexOut);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_bridge.h
new file mode 100644
index 00000000000000..d2dcb39174e683
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_bridge.h
@@ -0,0 +1,78 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_HTBUFFER_BRIDGE_H
+#define CLIENT_HTBUFFER_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_htbuffer_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32BufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 *pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode,
+ IMG_UINT32 ui32OpMode);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32TimeStamp,
+ IMG_UINT32 ui32SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 *pui32Args);
+
+
+#endif /* CLIENT_HTBUFFER_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_direct_bridge.c
new file mode 100644
index 00000000000000..1a2290423dddc4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_htbuffer_direct_bridge.c
@@ -0,0 +1,124 @@
+/*******************************************************************************
+@Title Direct client bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_htbuffer_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+
+#include "htbserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBConfigure(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32BufferSize)
+{
+#if defined(PVR_NEVER_USED)
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PVRSRVHTBConfigureKM(
+ ui32NameSize,
+ puiName,
+ ui32BufferSize);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+ PVR_UNREFERENCED_PARAMETER(puiName);
+ PVR_UNREFERENCED_PARAMETER(ui32BufferSize);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBControl(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32NumGroups,
+ IMG_UINT32 *pui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ IMG_UINT32 ui32LogMode,
+ IMG_UINT32 ui32OpMode)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ HTBControlKM(
+ ui32NumGroups,
+ pui32GroupEnable,
+ ui32LogLevel,
+ ui32EnablePID,
+ ui32LogMode,
+ ui32OpMode);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHTBLog(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32TimeStamp,
+ IMG_UINT32 ui32SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 *pui32Args)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ HTBLogKM(
+ ui32PID,
+ ui32TimeStamp,
+ ui32SF,
+ ui32NumArgs,
+ pui32Args);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_mm_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_mm_bridge.h
new file mode 100644
index 00000000000000..dcef0f0fe261bf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_mm_bridge.h
@@ -0,0 +1,248 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_MM_BRIDGE_H
+#define CLIENT_MM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_mm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE *phPMRExport,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT32 *pui32Log2Contig,
+ IMG_UINT64 *pui64Password);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 *pui64UID);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer,
+ IMG_HANDLE *phExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtMem);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig,
+ IMG_HANDLE *phPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *psAlign);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE *phPMRPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemImportSecBuf(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_HANDLE *phPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE *phDevMemServerContext,
+ IMG_HANDLE *phPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE *phDevmemHeapPtr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE *phMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE *phReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT64 ui64CPUVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32NumHeapConfigs);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 *pui32NumHeaps);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR *puiHeapConfigName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR *puiHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T *puiHeapLength,
+ IMG_UINT32 *pui32Log2DataPageSizeOut,
+ IMG_UINT32 *pui32Log2ImportAlignmentOut,
+ IMG_UINT32 *pui32Log2TilingStrideFactorOut);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR *psFaultAddress);
+
+
+#endif /* CLIENT_MM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_mm_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_mm_direct_bridge.c
new file mode 100644
index 00000000000000..6cb04b0bb45b9e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_mm_direct_bridge.c
@@ -0,0 +1,805 @@
+/*******************************************************************************
+@Title Direct client bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_mm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "physmem_tdsecbuf.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRExportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE *phPMRExport,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT32 *pui32Log2Contig,
+ IMG_UINT64 *pui64Password)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PMR_EXPORT * psPMRExportInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRExportPMR(
+ psPMRInt,
+ &psPMRExportInt,
+ pui64Size,
+ pui32Log2Contig,
+ pui64Password);
+
+ *phPMRExport = psPMRExportInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnexportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT * psPMRExportInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError =
+ PMRUnexportPMR(
+ psPMRExportInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRGetUID(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_UINT64 *pui64UID)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRGetUID(
+ psPMRInt,
+ pui64UID);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRMakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hBuffer,
+ IMG_HANDLE *phExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR * psBufferInt;
+ PMR * psExtMemInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psBufferInt = (PMR *) hBuffer;
+
+ eError =
+ PMRMakeLocalImportHandle(
+ psBufferInt,
+ &psExtMemInt);
+
+ *phExtMem = psExtMemInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnmakeLocalImportHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtMem)
+{
+ PVRSRV_ERROR eError;
+ PMR * psExtMemInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtMemInt = (PMR *) hExtMem;
+
+ eError =
+ PMRUnmakeLocalImportHandle(
+ psExtMemInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRExport,
+ IMG_UINT64 ui64uiPassword,
+ IMG_UINT64 ui64uiSize,
+ IMG_UINT32 ui32uiLog2Contig,
+ IMG_HANDLE *phPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR_EXPORT * psPMRExportInt;
+ PMR * psPMRInt;
+
+ psPMRExportInt = (PMR_EXPORT *) hPMRExport;
+
+ eError =
+ PhysmemImportPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psPMRExportInt,
+ ui64uiPassword,
+ ui64uiSize,
+ ui32uiLog2Contig,
+ &psPMRInt);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRLocalImportPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *psAlign)
+{
+ PVRSRV_ERROR eError;
+ PMR * psExtHandleInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psExtHandleInt = (PMR *) hExtHandle;
+
+ eError =
+ PMRLocalImportPMR(
+ psExtHandleInt,
+ &psPMRInt,
+ puiSize,
+ psAlign);
+
+ *phPMR = psPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRUnrefPMR(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRUnrefUnlockPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRUnrefUnlockPMR(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE *phPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRPtrInt;
+
+
+ eError =
+ PhysmemNewRamBackedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32Log2PageSize,
+ uiFlags,
+ ui32AnnotationLength,
+ puiAnnotation,
+ ui32PID,
+ &psPMRPtrInt);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemNewRamBackedLockedPMR(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32Log2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32AnnotationLength,
+ const IMG_CHAR *puiAnnotation,
+ IMG_PID ui32PID,
+ IMG_HANDLE *phPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRPtrInt;
+
+
+ eError =
+ PhysmemNewRamBackedLockedPMR(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32Log2PageSize,
+ uiFlags,
+ ui32AnnotationLength,
+ puiAnnotation,
+ ui32PID,
+ &psPMRPtrInt);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePhysmemImportSecBuf(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_HANDLE *phPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRPtrInt;
+
+
+ eError =
+ PhysmemImportSecBuf(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ uiSize,
+ ui32Log2Align,
+ uiFlags,
+ &psPMRPtrInt,
+ pui64SecBufHandle);
+
+ *phPMRPtr = psPMRPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntPin(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpin(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntUnpin(
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPinValidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntPinValidate(
+ psMappingInt,
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnpinInvalidate(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping,
+ IMG_HANDLE hPMR)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntUnpinInvalidate(
+ psMappingInt,
+ psPMRInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxCreate(IMG_HANDLE hBridge,
+ IMG_BOOL bbKernelMemoryCtx,
+ IMG_HANDLE *phDevMemServerContext,
+ IMG_HANDLE *phPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevMemServerContextInt;
+ IMG_HANDLE hPrivDataInt;
+
+
+ eError =
+ DevmemIntCtxCreate(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ bbKernelMemoryCtx,
+ &psDevMemServerContextInt,
+ &hPrivDataInt,
+ pui32CPUCacheLineSize);
+
+ *phDevMemServerContext = psDevMemServerContextInt;
+ *phPrivData = hPrivDataInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntCtxDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemServerContextInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError =
+ DevmemIntCtxDestroy(
+ psDevmemServerContextInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapCreate(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 ui32Log2DataPageSize,
+ IMG_HANDLE *phDevmemHeapPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+ DEVMEMINT_HEAP * psDevmemHeapPtrInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntHeapCreate(
+ psDevmemCtxInt,
+ sHeapBaseAddr,
+ uiHeapLength,
+ ui32Log2DataPageSize,
+ &psDevmemHeapPtrInt);
+
+ *phDevmemHeapPtr = psDevmemHeapPtrInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntHeapDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemHeap)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemHeapInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemHeapInt = (DEVMEMINT_HEAP *) hDevmemHeap;
+
+ eError =
+ DevmemIntHeapDestroy(
+ psDevmemHeapInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE *phMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PMR * psPMRInt;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPMR(
+ psDevmemServerHeapInt,
+ psReservationInt,
+ psPMRInt,
+ uiMapFlags,
+ &psMappingInt);
+
+ *phMapping = psMappingInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING * psMappingInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psMappingInt = (DEVMEMINT_MAPPING *) hMapping;
+
+ eError =
+ DevmemIntUnmapPMR(
+ psMappingInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntReserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerHeap,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_HANDLE *phReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerHeapInt = (DEVMEMINT_HEAP *) hDevmemServerHeap;
+
+ eError =
+ DevmemIntReserveRange(
+ psDevmemServerHeapInt,
+ sAddress,
+ uiLength,
+ &psReservationInt);
+
+ *phReservation = psReservationInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnreserveRange(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError =
+ DevmemIntUnreserveRange(
+ psReservationInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeChangeSparseMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hSrvDevMemHeap,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pui32FreePageIndices,
+ IMG_UINT32 ui32SparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT64 ui64CPUVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP * psSrvDevMemHeapInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSrvDevMemHeapInt = (DEVMEMINT_HEAP *) hSrvDevMemHeap;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntChangeSparse(
+ psSrvDevMemHeapInt,
+ psPMRInt,
+ ui32AllocPageCount,
+ pui32AllocPageIndices,
+ ui32FreePageCount,
+ pui32FreePageIndices,
+ ui32SparseFlags,
+ uiFlags,
+ sDevVAddr,
+ ui64CPUVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntMapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ DevmemIntMapPages(
+ psReservationInt,
+ psPMRInt,
+ ui32PageCount,
+ ui32PhysicalPgOffset,
+ uiFlags,
+ sDevVAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntUnmapPages(IMG_HANDLE hBridge,
+ IMG_HANDLE hReservation,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION * psReservationInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psReservationInt = (DEVMEMINT_RESERVATION *) hReservation;
+
+ eError =
+ DevmemIntUnmapPages(
+ psReservationInt,
+ sDevVAddr,
+ ui32PageCount);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIsVDevAddrValid(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR sAddress)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntIsVDevAddrValid(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psDevmemCtxInt,
+ sAddress);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigCount(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32NumHeapConfigs)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapConfigCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ pui32NumHeapConfigs);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapCount(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 *pui32NumHeaps)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapCount(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ pui32NumHeaps);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapConfigName(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapConfigNameBufSz,
+ IMG_CHAR *puiHeapConfigName)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapConfigName(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ ui32HeapConfigNameBufSz,
+ puiHeapConfigName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeHeapCfgHeapDetails(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32HeapConfigIndex,
+ IMG_UINT32 ui32HeapIndex,
+ IMG_UINT32 ui32HeapNameBufSz,
+ IMG_CHAR *puiHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBase,
+ IMG_DEVMEM_SIZE_T *puiHeapLength,
+ IMG_UINT32 *pui32Log2DataPageSizeOut,
+ IMG_UINT32 *pui32Log2ImportAlignmentOut,
+ IMG_UINT32 *pui32Log2TilingStrideFactorOut)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ HeapCfgHeapDetails(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32HeapConfigIndex,
+ ui32HeapIndex,
+ ui32HeapNameBufSz,
+ puiHeapNameOut,
+ psDevVAddrBase,
+ puiHeapLength,
+ pui32Log2DataPageSizeOut,
+ pui32Log2ImportAlignmentOut,
+ pui32Log2TilingStrideFactorOut);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntRegisterPFNotifyKM(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntRegisterPFNotifyKM(
+ psDevmemCtxInt,
+ ui32PID,
+ bRegister);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeGetMaxDevMemSize(IMG_HANDLE hBridge,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVGetMaxDevMemSizeKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ puiLMASize,
+ puiUMASize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemGetFaultAddress(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_DEV_VIRTADDR *psFaultAddress)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntGetFaultAddress(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psDevmemCtxInt,
+ psFaultAddress);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdump_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_pdump_bridge.h
new file mode 100644
index 00000000000000..e10c80c2ce68e5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdump_bridge.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMP_BRIDGE_H
+#define CLIENT_PDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+ IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR *puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *pui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeaderDevAddr,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_CHAR *puiComment,
+ IMG_UINT32 ui32Flags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Frame);
+
+
+#endif /* CLIENT_PDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdump_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_pdump_direct_bridge.c
new file mode 100644
index 00000000000000..b51e320c2ada7d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdump_direct_bridge.c
@@ -0,0 +1,166 @@
+/*******************************************************************************
+@Title Direct client bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemPDumpBitmap(IMG_HANDLE hBridge,
+ IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntPDumpBitmap(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ puiFileName,
+ ui32FileOffset,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ sDevBaseAddr,
+ psDevmemCtxInt,
+ ui32Size,
+ ePixelFormat,
+ ui32AddrMode,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpImageDescriptor(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemCtx,
+ IMG_UINT32 ui32StringSize,
+ const IMG_CHAR *puiFileName,
+ IMG_DEV_VIRTADDR sDataDevAddr,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *pui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeaderDevAddr,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemCtxInt;
+
+ psDevmemCtxInt = (DEVMEMINT_CTX *) hDevmemCtx;
+
+ eError =
+ DevmemIntPdumpImageDescriptor(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ psDevmemCtxInt,
+ ui32StringSize,
+ puiFileName,
+ sDataDevAddr,
+ ui32DataSize,
+ ui32LogicalWidth,
+ ui32LogicalHeight,
+ ui32PhysicalWidth,
+ ui32PhysicalHeight,
+ ePixelFormat,
+ eMemLayout,
+ eFBCompression,
+ pui32FBCClearColour,
+ sHeaderDevAddr,
+ ui32HeaderSize,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpComment(IMG_HANDLE hBridge,
+ IMG_CHAR *puiComment,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpCommentKM(
+ puiComment,
+ ui32Flags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PDumpSetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32Frame);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_bridge.h
new file mode 100644
index 00000000000000..68d1e557fdba25
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_bridge.h
@@ -0,0 +1,74 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPCTRL_BRIDGE_H
+#define CLIENT_PDUMPCTRL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpctrl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge,
+ IMG_UINT64 *pui64State);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32Frame);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL *pbpbIsLastCaptureFrame);
+
+
+#endif /* CLIENT_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_direct_bridge.c
new file mode 100644
index 00000000000000..7cbda72db73c6b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdumpctrl_direct_bridge.c
@@ -0,0 +1,114 @@
+/*******************************************************************************
+@Title Direct client bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpctrl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "pdump_km.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetState(IMG_HANDLE hBridge,
+ IMG_UINT64 *pui64State)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpGetStateKM(
+ pui64State);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpGetFrame(IMG_HANDLE hBridge,
+ IMG_UINT32 *pui32Frame)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PDumpGetFrameKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ pui32Frame);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpSetDefaultCaptureParams(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpSetDefaultCaptureParamsKM(
+ ui32Mode,
+ ui32Start,
+ ui32End,
+ ui32Interval,
+ ui32MaxParamFileSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePVRSRVPDumpIsLastCaptureFrame(IMG_HANDLE hBridge,
+ IMG_BOOL *pbpbIsLastCaptureFrame)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ PDumpIsLastCaptureFrameKM(
+ pbpbIsLastCaptureFrame);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_bridge.h
new file mode 100644
index 00000000000000..73bb73e519f27d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_bridge.h
@@ -0,0 +1,119 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PDUMPMM_BRIDGE_H
+#define CLIENT_PDUMPMM_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pdumpmm_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_BOOL bbZero);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32uiFileOffset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_PDUMPMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_direct_bridge.c
new file mode 100644
index 00000000000000..f44decc33faa50
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pdumpmm_direct_bridge.c
@@ -0,0 +1,258 @@
+/*******************************************************************************
+@Title Direct client bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pdumpmm_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMem(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_BOOL bbZero)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMem(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ ui32PDumpFlags,
+ bbZero);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMemValue32(
+ psPMRInt,
+ uiOffset,
+ ui32Value,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpLoadMemValue64(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpLoadMemValue64(
+ psPMRInt,
+ uiOffset,
+ ui64Value,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSaveToFile(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpSaveToFile(
+ psPMRInt,
+ uiOffset,
+ uiSize,
+ ui32ArraySize,
+ puiFileName,
+ ui32uiFileOffset);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpSymbolicAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *puiMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *puiSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMR_PDumpSymbolicAddr(
+ psPMRInt,
+ uiOffset,
+ ui32MemspaceNameLen,
+ puiMemspaceName,
+ ui32SymbolicAddrLen,
+ puiSymbolicAddr,
+ puiNewOffset,
+ puiNextSymName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpPol32(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpPol32(
+ psPMRInt,
+ uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePMRPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRInt = (PMR *) hPMR;
+
+ eError =
+ PMRPDumpCBP(
+ psPMRInt,
+ uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeDevmemIntPDumpSaveToFileVirtual(IMG_HANDLE hBridge,
+ IMG_HANDLE hDevmemServerContext,
+ IMG_DEV_VIRTADDR sAddress,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *puiFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX * psDevmemServerContextInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psDevmemServerContextInt = (DEVMEMINT_CTX *) hDevmemServerContext;
+
+ eError =
+ DevmemIntPDumpSaveToFileVirtual(
+ psDevmemServerContextInt,
+ sAddress,
+ uiSize,
+ ui32ArraySize,
+ puiFileName,
+ ui32FileOffset,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_bridge.h
new file mode 100644
index 00000000000000..0713b5f0b40506
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_bridge.h
@@ -0,0 +1,99 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_PVRTL_BRIDGE_H
+#define CLIENT_PVRTL_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_pvrtl_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE *phSD,
+ IMG_HANDLE *phTLPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32ReadOffset,
+ IMG_UINT32 *pui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset,
+ IMG_UINT32 ui32ReadLen);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiNamePattern,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *puiStreams,
+ IMG_UINT32 *pui32NumFound);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReqSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *psData);
+
+
+#endif /* CLIENT_PVRTL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_direct_bridge.c
new file mode 100644
index 00000000000000..d5865a069a1aaf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_pvrtl_direct_bridge.c
@@ -0,0 +1,214 @@
+/*******************************************************************************
+@Title Direct client bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_pvrtl_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+#include "tlserver.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLOpenStream(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE *phSD,
+ IMG_HANDLE *phTLPMR)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PMR * psTLPMRInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ TLServerOpenStreamKM(
+ puiName,
+ ui32Mode,
+ &psSDInt,
+ &psTLPMRInt);
+
+ *phSD = psSDInt;
+ *phTLPMR = psTLPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCloseStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerCloseStreamKM(
+ psSDInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLAcquireData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32ReadOffset,
+ IMG_UINT32 *pui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerAcquireDataKM(
+ psSDInt,
+ pui32ReadOffset,
+ pui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReleaseData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReadOffset,
+ IMG_UINT32 ui32ReadLen)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerReleaseDataKM(
+ psSDInt,
+ ui32ReadOffset,
+ ui32ReadLen);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLDiscoverStreams(IMG_HANDLE hBridge,
+ const IMG_CHAR *puiNamePattern,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *puiStreams,
+ IMG_UINT32 *pui32NumFound)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ TLServerDiscoverStreamsKM(
+ puiNamePattern,
+ ui32Size,
+ puiStreams,
+ pui32NumFound);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLReserveStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 *pui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerReserveStreamKM(
+ psSDInt,
+ pui32BufferOffset,
+ ui32Size,
+ ui32SizeMin,
+ pui32Available);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLCommitStream(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32ReqSize)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerCommitStreamKM(
+ psSDInt,
+ ui32ReqSize);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeTLWriteData(IMG_HANDLE hBridge,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *psData)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC * psSDInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSDInt = (TL_STREAM_DESC *) hSD;
+
+ eError =
+ TLServerWriteDataKM(
+ psSDInt,
+ ui32Size,
+ psData);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_bridge.h
new file mode 100644
index 00000000000000..c923daeabafecf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_bridge.h
@@ -0,0 +1,64 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RGXPDUMP_BRIDGE_H
+#define CLIENT_RGXPDUMP_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_rgxpdump_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+#endif /* CLIENT_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_direct_bridge.c
new file mode 100644
index 00000000000000..6fe32ca9779ba5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_rgxpdump_direct_bridge.c
@@ -0,0 +1,79 @@
+/*******************************************************************************
+@Title Direct client bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_rgxpdump_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "rgx_bridge.h"
+
+#include "rgxpdump.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpTraceBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVPDumpTraceBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgePDumpSignatureBuffer(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVPDumpSignatureBufferKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32PDumpFlags);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_ri_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_ri_bridge.h
new file mode 100644
index 00000000000000..fc069b2024e89b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_ri_bridge.h
@@ -0,0 +1,97 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_RI_BRIDGE_H
+#define CLIENT_RI_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_ri_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc,
+ IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE *phRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sAddr);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+ IMG_PID ui32Pid);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_PID ui32Owner);
+
+
+#endif /* CLIENT_RI_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_ri_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_ri_direct_bridge.c
new file mode 100644
index 00000000000000..d6529e044a7887
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_ri_direct_bridge.c
@@ -0,0 +1,217 @@
+/*******************************************************************************
+@Title Direct client bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_ri_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "ri_typedefs.h"
+
+#include "ri_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWritePMREntryKM(
+ psPMRHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc,
+ IMG_HANDLE *phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWriteMEMDESCEntryKM(
+ psPMRHandleInt,
+ ui32TextBSize,
+ puiTextB,
+ ui64Offset,
+ ui64Size,
+ bIsImport,
+ bIsSuballoc,
+ &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWriteProcListEntry(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *puiTextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ IMG_HANDLE *phRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIWriteProcListEntryKM(
+ ui32TextBSize,
+ puiTextB,
+ ui64Size,
+ ui64DevVAddr,
+ &psRIHandleInt);
+
+ *phRIHandle = psRIHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIUpdateMEMDESCAddr(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sAddr)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIUpdateMEMDESCAddrKM(
+ psRIHandleInt,
+ sAddr);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDeleteMEMDESCEntry(IMG_HANDLE hBridge,
+ IMG_HANDLE hRIHandle)
+{
+ PVRSRV_ERROR eError;
+ RI_HANDLE psRIHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psRIHandleInt = (RI_HANDLE) hRIHandle;
+
+ eError =
+ RIDeleteMEMDESCEntryKM(
+ psRIHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpList(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIDumpListKM(
+ psPMRHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpAll(IMG_HANDLE hBridge)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIDumpAllKM(
+ );
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIDumpProcess(IMG_HANDLE hBridge,
+ IMG_PID ui32Pid)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+
+ eError =
+ RIDumpProcessKM(
+ ui32Pid);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeRIWritePMREntryWithOwner(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMRHandle,
+ IMG_PID ui32Owner)
+{
+ PVRSRV_ERROR eError;
+ PMR * psPMRHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psPMRHandleInt = (PMR *) hPMRHandle;
+
+ eError =
+ RIWritePMREntryWithOwnerKM(
+ psPMRHandleInt,
+ ui32Owner);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_sync_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_sync_bridge.h
new file mode 100644
index 00000000000000..391f54c6f97e9e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_sync_bridge.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNC_BRIDGE_H
+#define CLIENT_SYNC_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_sync_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 *pui32SyncPrimBlockSize,
+ IMG_HANDLE *phhSyncPMR);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_BOOL bbUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncCount,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncBlockCount,
+ IMG_HANDLE *phBlockList,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32SyncBlockIndex,
+ IMG_UINT32 *pui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_HANDLE *phServerSync,
+ IMG_HANDLE *phServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *pui32ServerFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_BOOL *pbReady);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32FWAddr);
+
+
+#endif /* CLIENT_SYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_sync_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_sync_direct_bridge.c
new file mode 100644
index 00000000000000..50e678a30fe185
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_sync_direct_bridge.c
@@ -0,0 +1,525 @@
+/*******************************************************************************
+@Title Direct client bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_sync_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeAllocSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 *pui32SyncPrimBlockSize,
+ IMG_HANDLE *phhSyncPMR)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PMR * pshSyncPMRInt;
+
+
+ eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &psSyncHandleInt,
+ pui32SyncPrimVAddr,
+ pui32SyncPrimBlockSize,
+ &pshSyncPMRInt);
+
+ *phSyncHandle = psSyncHandleInt;
+ *phhSyncPMR = pshSyncPMRInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeFreeSyncPrimitiveBlock(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVFreeSyncPrimitiveBlockKM(
+ psSyncHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimSetKM(
+ psSyncHandleInt,
+ ui32Index,
+ ui32Value);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncPrimSet(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncPrimSetKM(
+ psSyncHandleInt,
+ ui32Value);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncAlloc(IMG_HANDLE hBridge,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+
+
+ eError =
+ PVRSRVServerSyncAllocKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &psSyncHandleInt,
+ pui32SyncPrimVAddr,
+ ui32ClassNameSize,
+ puiClassName);
+
+ *phSyncHandle = psSyncHandleInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncFree(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncFreeKM(
+ psSyncHandleInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncQueueHWOp(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_BOOL bbUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE *) hSyncHandle;
+
+ eError =
+ PVRSRVServerSyncQueueHWOpKM(
+ psSyncHandleInt,
+ bbUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeServerSyncGetStatus(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncCount,
+ IMG_HANDLE *phSyncHandle,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_PRIMITIVE * *psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **) phSyncHandle;
+
+ eError =
+ PVRSRVServerSyncGetStatusKM(
+ ui32SyncCount,
+ psSyncHandleInt,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpCreate(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32SyncBlockCount,
+ IMG_HANDLE *phBlockList,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32SyncBlockIndex,
+ IMG_UINT32 *pui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_HANDLE *phServerSync,
+ IMG_HANDLE *phServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * *psBlockListInt;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psBlockListInt = (SYNC_PRIMITIVE_BLOCK **) phBlockList;
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **) phServerSync;
+
+ eError =
+ PVRSRVSyncPrimOpCreateKM(
+ ui32SyncBlockCount,
+ psBlockListInt,
+ ui32ClientSyncCount,
+ pui32SyncBlockIndex,
+ pui32Index,
+ ui32ServerSyncCount,
+ psServerSyncInt,
+ &psServerCookieInt);
+
+ *phServerCookie = psServerCookieInt;
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpTake(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *pui32Flags,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *pui32ServerFlags)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpTakeKM(
+ psServerCookieInt,
+ ui32ClientSyncCount,
+ pui32Flags,
+ pui32FenceValue,
+ pui32UpdateValue,
+ ui32ServerSyncCount,
+ pui32ServerFlags);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpReady(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ IMG_BOOL *pbReady)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpReadyKM(
+ psServerCookieInt,
+ pbReady);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpComplete(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpCompleteKM(
+ psServerCookieInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpDestroy(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie)
+{
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpDestroyKM(
+ psServerCookieInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDump(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpKM(
+ psSyncHandleInt,
+ ui32Offset);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpValue(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpValueKM(
+ psSyncHandleInt,
+ ui32Offset,
+ ui32Value);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpPolKM(
+ psSyncHandleInt,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ uiPDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimOpPDumpPol(IMG_HANDLE hBridge,
+ IMG_HANDLE hServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SERVER_OP_COOKIE * psServerCookieInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psServerCookieInt = (SERVER_OP_COOKIE *) hServerCookie;
+
+ eError =
+ PVRSRVSyncPrimOpPDumpPolKM(
+ psServerCookieInt,
+ eOperator,
+ uiPDumpFlags);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hServerCookie);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncPrimPDumpCBP(IMG_HANDLE hBridge,
+ IMG_HANDLE hSyncHandle,
+ IMG_UINT32 ui32Offset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+#if defined(PDUMP)
+ PVRSRV_ERROR eError;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ psSyncHandleInt = (SYNC_PRIMITIVE_BLOCK *) hSyncHandle;
+
+ eError =
+ PVRSRVSyncPrimPDumpCBPKM(
+ psSyncHandleInt,
+ ui32Offset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(hSyncHandle);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#endif
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncAllocEvent(IMG_HANDLE hBridge,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVSyncAllocEventKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ bServerSync,
+ ui32FWAddr,
+ ui32ClassNameSize,
+ puiClassName);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncFreeEvent(IMG_HANDLE hBridge,
+ IMG_UINT32 ui32FWAddr)
+{
+ PVRSRV_ERROR eError;
+
+
+ eError =
+ PVRSRVSyncFreeEventKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ ui32FWAddr);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/1.10/client_synctracking_bridge.h
new file mode 100644
index 00000000000000..4c7a57f3876bc0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_synctracking_bridge.h
@@ -0,0 +1,70 @@
+/*******************************************************************************
+@File
+@Title Client bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exports the client bridge functions for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef CLIENT_SYNCTRACKING_BRIDGE_H
+#define CLIENT_SYNCTRACKING_BRIDGE_H
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#if defined(PVR_INDIRECT_BRIDGE_CLIENTS)
+#include "pvr_bridge_client.h"
+#include "pvr_bridge.h"
+#endif
+
+#include "common_synctracking_bridge.h"
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hhRecord);
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE *phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName);
+
+
+#endif /* CLIENT_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/client_synctracking_direct_bridge.c b/drivers/gpu/drm/img-rogue/1.10/client_synctracking_direct_bridge.c
new file mode 100644
index 00000000000000..b0c8108fd0bc44
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/client_synctracking_direct_bridge.c
@@ -0,0 +1,97 @@
+/*******************************************************************************
+@Title Direct client bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#include "client_synctracking_bridge.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+
+/* Module specific includes */
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordRemoveByHandle(IMG_HANDLE hBridge,
+ IMG_HANDLE hhRecord)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt;
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+
+ pshRecordInt = (SYNC_RECORD_HANDLE) hhRecord;
+
+ eError =
+ PVRSRVSyncRecordRemoveByHandleKM(
+ pshRecordInt);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR IMG_CALLCONV BridgeSyncRecordAdd(IMG_HANDLE hBridge,
+ IMG_HANDLE *phhRecord,
+ IMG_HANDLE hhServerSyncPrimBlock,
+ IMG_UINT32 ui32ui32FwBlockAddr,
+ IMG_UINT32 ui32ui32SyncOffset,
+ IMG_BOOL bbServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *puiClassName)
+{
+ PVRSRV_ERROR eError;
+ SYNC_RECORD_HANDLE pshRecordInt;
+ SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt;
+
+ pshServerSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK *) hhServerSyncPrimBlock;
+
+ eError =
+ PVRSRVSyncRecordAddKM(NULL, (PVRSRV_DEVICE_NODE *)((void*) hBridge)
+ ,
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ ui32ui32FwBlockAddr,
+ ui32ui32SyncOffset,
+ bbServerSync,
+ ui32ClassNameSize,
+ puiClassName);
+
+ *phhRecord = pshRecordInt;
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_breakpoint_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_breakpoint_bridge.h
new file mode 100644
index 00000000000000..5cfa27d9b17545
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_breakpoint_bridge.h
@@ -0,0 +1,155 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for breakpoint
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for breakpoint
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_BREAKPOINT_BRIDGE_H
+#define COMMON_BREAKPOINT_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST 0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+0
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+1
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+2
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+3
+#define PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4
+#define PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST (PVRSRV_BRIDGE_BREAKPOINT_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXSetBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 eFWDataMaster;
+ IMG_UINT32 ui32BreakpointAddr;
+ IMG_UINT32 ui32HandlerAddr;
+ IMG_UINT32 ui32DM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT;
+
+/* Bridge out structure for RGXSetBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT;
+
+
+/*******************************************
+ RGXClearBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT;
+
+/* Bridge out structure for RGXClearBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT;
+
+
+/*******************************************
+ RGXEnableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT;
+
+/* Bridge out structure for RGXEnableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT;
+
+
+/*******************************************
+ RGXDisableBreakpoint
+ *******************************************/
+
+/* Bridge in structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT;
+
+/* Bridge out structure for RGXDisableBreakpoint */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT;
+
+
+/*******************************************
+ RGXOverallocateBPRegisters
+ *******************************************/
+
+/* Bridge in structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+ IMG_UINT32 ui32TempRegs;
+ IMG_UINT32 ui32SharedRegs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS;
+
+/* Bridge out structure for RGXOverallocateBPRegisters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS;
+
+
+#endif /* COMMON_BREAKPOINT_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_cache_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_cache_bridge.h
new file mode 100644
index 00000000000000..0cdb414f724e5e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_cache_bridge.h
@@ -0,0 +1,135 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CACHE_BRIDGE_H
+#define COMMON_CACHE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_CACHE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE PVRSRV_BRIDGE_CACHE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CACHE_CACHEOPEXEC PVRSRV_BRIDGE_CACHE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CACHE_CACHEOPLOG PVRSRV_BRIDGE_CACHE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CACHE_CMD_LAST (PVRSRV_BRIDGE_CACHE_CMD_FIRST+2)
+
+
+/*******************************************
+ CacheOpQueue
+ *******************************************/
+
+/* Bridge in structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPQUEUE_TAG
+{
+ IMG_UINT32 ui32NumCacheOps;
+ IMG_HANDLE * phPMR;
+ IMG_UINT64 * pui64Address;
+ IMG_DEVMEM_OFFSET_T * puiOffset;
+ IMG_DEVMEM_SIZE_T * puiSize;
+ PVRSRV_CACHE_OP * piuCacheOp;
+ IMG_UINT32 ui32OpTimeline;
+ IMG_UINT32 ui32OpInfoPgGFSeqNum;
+ IMG_UINT32 ui32CurrentFenceSeqNum;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPQUEUE;
+
+/* Bridge out structure for CacheOpQueue */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPQUEUE_TAG
+{
+ IMG_UINT32 ui32NextFenceSeqNum;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPQUEUE;
+
+
+/*******************************************
+ CacheOpExec
+ *******************************************/
+
+/* Bridge in structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPEXEC_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_UINT64 ui64Address;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPEXEC;
+
+/* Bridge out structure for CacheOpExec */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPEXEC_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPEXEC;
+
+
+/*******************************************
+ CacheOpLog
+ *******************************************/
+
+/* Bridge in structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_IN_CACHEOPLOG_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_UINT64 ui64Address;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_INT64 i64QueuedTimeUs;
+ IMG_INT64 i64ExecuteTimeUs;
+ IMG_INT32 i32NumRBF;
+ IMG_BOOL bIsDiscard;
+ PVRSRV_CACHE_OP iuCacheOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CACHEOPLOG;
+
+/* Bridge out structure for CacheOpLog */
+typedef struct PVRSRV_BRIDGE_OUT_CACHEOPLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CACHEOPLOG;
+
+
+#endif /* COMMON_CACHE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_cmm_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_cmm_bridge.h
new file mode 100644
index 00000000000000..0dec25e3b3915c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_cmm_bridge.h
@@ -0,0 +1,118 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_CMM_BRIDGE_H
+#define COMMON_CMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_CMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX PVRSRV_BRIDGE_CMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX PVRSRV_BRIDGE_CMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_CMM_CMD_LAST (PVRSRV_BRIDGE_CMM_CMD_FIRST+2)
+
+
+/*******************************************
+ DevmemIntExportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX;
+
+/* Bridge out structure for DevmemIntExportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX;
+
+
+/*******************************************
+ DevmemIntUnexportCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX_TAG
+{
+ IMG_HANDLE hContextExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX;
+
+/* Bridge out structure for DevmemIntUnexportCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX;
+
+
+/*******************************************
+ DevmemIntAcquireRemoteCtx
+ *******************************************/
+
+/* Bridge in structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX;
+
+/* Bridge out structure for DevmemIntAcquireRemoteCtx */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX_TAG
+{
+ IMG_HANDLE hContext;
+ IMG_HANDLE hPrivData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX;
+
+
+#endif /* COMMON_CMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_debugmisc_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_debugmisc_bridge.h
new file mode 100644
index 00000000000000..fc324d539d51e4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_debugmisc_bridge.h
@@ -0,0 +1,173 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for debugmisc
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for debugmisc
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DEBUGMISC_BRIDGE_H
+#define COMMON_DEBUGMISC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "rgx_bridge.h"
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST (PVRSRV_BRIDGE_DEBUGMISC_CMD_FIRST+5)
+
+
+/*******************************************
+ DebugMiscSLCSetBypassState
+ *******************************************/
+
+/* Bridge in structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+ IMG_UINT32 ui32Flags;
+ IMG_BOOL bIsBypassed;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE;
+
+/* Bridge out structure for DebugMiscSLCSetBypassState */
+typedef struct PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE;
+
+
+/*******************************************
+ RGXDebugMiscSetFWLog
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG_TAG
+{
+ IMG_UINT32 ui32RGXFWLogType;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG;
+
+/* Bridge out structure for RGXDebugMiscSetFWLog */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG;
+
+
+/*******************************************
+ RGXDebugMiscDumpFreelistPageList
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+/* Bridge out structure for RGXDebugMiscDumpFreelistPageList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST;
+
+
+/*******************************************
+ RGXDebugMiscSetHCSDeadline
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+ IMG_UINT32 ui32RGXHCSDeadline;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE;
+
+/* Bridge out structure for RGXDebugMiscSetHCSDeadline */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE;
+
+
+/*******************************************
+ RGXDebugMiscSetOSidPriority
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY;
+
+/* Bridge out structure for RGXDebugMiscSetOSidPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY;
+
+
+/*******************************************
+ RGXDebugMiscSetOSNewOnlineState
+ *******************************************/
+
+/* Bridge in structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSNewState;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+/* Bridge out structure for RGXDebugMiscSetOSNewOnlineState */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE;
+
+
+#endif /* COMMON_DEBUGMISC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_devicememhistory_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_devicememhistory_bridge.h
new file mode 100644
index 00000000000000..1eba1375d68f00
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_devicememhistory_bridge.h
@@ -0,0 +1,190 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DEVICEMEMHISTORY_BRIDGE_H
+#define COMMON_DEVICEMEMHISTORY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_FIRST+4)
+
+
+/*******************************************
+ DevicememHistoryMap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP;
+
+/* Bridge out structure for DevicememHistoryMap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP;
+
+
+/*******************************************
+ DevicememHistoryUnmap
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP;
+
+/* Bridge out structure for DevicememHistoryUnmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP;
+
+
+/*******************************************
+ DevicememHistoryMapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_UINT32 ui32ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryMapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE;
+
+
+/*******************************************
+ DevicememHistoryUnmapVRange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ IMG_UINT32 ui32ui32StartPage;
+ IMG_UINT32 ui32NumPages;
+ IMG_DEVMEM_SIZE_T uiAllocSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE;
+
+/* Bridge out structure for DevicememHistoryUnmapVRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE;
+
+
+/*******************************************
+ DevicememHistorySparseChange
+ *******************************************/
+
+/* Bridge in structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ const IMG_CHAR * puiText;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 * pui32AllocPageIndices;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 * pui32FreePageIndices;
+ IMG_UINT32 ui32AllocationIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE;
+
+/* Bridge out structure for DevicememHistorySparseChange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE_TAG
+{
+ IMG_UINT32 ui32AllocationIndexOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE;
+
+
+#endif /* COMMON_DEVICEMEMHISTORY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_dmabuf_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_dmabuf_bridge.h
new file mode 100644
index 00000000000000..fff4db2653a9f2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_dmabuf_bridge.h
@@ -0,0 +1,131 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_DMABUF_BRIDGE_H
+#define COMMON_DMABUF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+
+
+#define PVRSRV_BRIDGE_DMABUF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_DMABUF_CMD_LAST (PVRSRV_BRIDGE_DMABUF_CMD_FIRST+2)
+
+
+/*******************************************
+ PhysmemImportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF_TAG
+{
+ IMG_INT ifd;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_UINT32 ui32NameSize;
+ const IMG_CHAR * puiName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF;
+
+/* Bridge out structure for PhysmemImportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF;
+
+
+/*******************************************
+ PhysmemExportDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF;
+
+/* Bridge out structure for PhysmemExportDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF_TAG
+{
+ IMG_INT iFd;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF;
+
+
+/*******************************************
+ PhysmemImportSparseDmaBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_INT ifd;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+ IMG_UINT32 ui32NameSize;
+ const IMG_CHAR * puiName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF;
+
+/* Bridge out structure for PhysmemImportSparseDmaBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF;
+
+
+#endif /* COMMON_DMABUF_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_htbuffer_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_htbuffer_bridge.h
new file mode 100644
index 00000000000000..c3bbde6d9c8433
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_htbuffer_bridge.h
@@ -0,0 +1,125 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_HTBUFFER_BRIDGE_H
+#define COMMON_HTBUFFER_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+0
+#define PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+1
+#define PVRSRV_BRIDGE_HTBUFFER_HTBLOG PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2
+#define PVRSRV_BRIDGE_HTBUFFER_CMD_LAST (PVRSRV_BRIDGE_HTBUFFER_CMD_FIRST+2)
+
+
+/*******************************************
+ HTBConfigure
+ *******************************************/
+
+/* Bridge in structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONFIGURE_TAG
+{
+ IMG_UINT32 ui32NameSize;
+ const IMG_CHAR * puiName;
+ IMG_UINT32 ui32BufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONFIGURE;
+
+/* Bridge out structure for HTBConfigure */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONFIGURE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONFIGURE;
+
+
+/*******************************************
+ HTBControl
+ *******************************************/
+
+/* Bridge in structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_IN_HTBCONTROL_TAG
+{
+ IMG_UINT32 ui32NumGroups;
+ IMG_UINT32 * pui32GroupEnable;
+ IMG_UINT32 ui32LogLevel;
+ IMG_UINT32 ui32EnablePID;
+ IMG_UINT32 ui32LogMode;
+ IMG_UINT32 ui32OpMode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBCONTROL;
+
+/* Bridge out structure for HTBControl */
+typedef struct PVRSRV_BRIDGE_OUT_HTBCONTROL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBCONTROL;
+
+
+/*******************************************
+ HTBLog
+ *******************************************/
+
+/* Bridge in structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_IN_HTBLOG_TAG
+{
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32TimeStamp;
+ IMG_UINT32 ui32SF;
+ IMG_UINT32 ui32NumArgs;
+ IMG_UINT32 * pui32Args;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HTBLOG;
+
+/* Bridge out structure for HTBLog */
+typedef struct PVRSRV_BRIDGE_OUT_HTBLOG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HTBLOG;
+
+
+#endif /* COMMON_HTBUFFER_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_mm_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_mm_bridge.h
new file mode 100644
index 00000000000000..5783c98dbef482
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_mm_bridge.h
@@ -0,0 +1,782 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_MM_BRIDGE_H
+#define COMMON_MM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_MM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_MM_PMREXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_MM_PMRGETUID PVRSRV_BRIDGE_MM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE PVRSRV_BRIDGE_MM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_MM_PMRIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR PVRSRV_BRIDGE_MM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_MM_PMRUNREFPMR PVRSRV_BRIDGE_MM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR PVRSRV_BRIDGE_MM_CMD_FIRST+8
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+9
+#define PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR PVRSRV_BRIDGE_MM_CMD_FIRST+10
+#define PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF PVRSRV_BRIDGE_MM_CMD_FIRST+11
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPIN PVRSRV_BRIDGE_MM_CMD_FIRST+12
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN PVRSRV_BRIDGE_MM_CMD_FIRST+13
+#define PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+14
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE PVRSRV_BRIDGE_MM_CMD_FIRST+15
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+16
+#define PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+17
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE PVRSRV_BRIDGE_MM_CMD_FIRST+18
+#define PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY PVRSRV_BRIDGE_MM_CMD_FIRST+19
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+20
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR PVRSRV_BRIDGE_MM_CMD_FIRST+21
+#define PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+22
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE PVRSRV_BRIDGE_MM_CMD_FIRST+23
+#define PVRSRV_BRIDGE_MM_CHANGESPARSEMEM PVRSRV_BRIDGE_MM_CMD_FIRST+24
+#define PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+25
+#define PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES PVRSRV_BRIDGE_MM_CMD_FIRST+26
+#define PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID PVRSRV_BRIDGE_MM_CMD_FIRST+27
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+28
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT PVRSRV_BRIDGE_MM_CMD_FIRST+29
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME PVRSRV_BRIDGE_MM_CMD_FIRST+30
+#define PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS PVRSRV_BRIDGE_MM_CMD_FIRST+31
+#define PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM PVRSRV_BRIDGE_MM_CMD_FIRST+32
+#define PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE PVRSRV_BRIDGE_MM_CMD_FIRST+33
+#define PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS PVRSRV_BRIDGE_MM_CMD_FIRST+34
+#define PVRSRV_BRIDGE_MM_CMD_LAST (PVRSRV_BRIDGE_MM_CMD_FIRST+34)
+
+
+/*******************************************
+ PMRExportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMREXPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMREXPORTPMR;
+
+/* Bridge out structure for PMRExportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMREXPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Log2Contig;
+ IMG_UINT64 ui64Password;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMREXPORTPMR;
+
+
+/*******************************************
+ PMRUnexportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR;
+
+/* Bridge out structure for PMRUnexportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR;
+
+
+/*******************************************
+ PMRGetUID
+ *******************************************/
+
+/* Bridge in structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_IN_PMRGETUID_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRGETUID;
+
+/* Bridge out structure for PMRGetUID */
+typedef struct PVRSRV_BRIDGE_OUT_PMRGETUID_TAG
+{
+ IMG_UINT64 ui64UID;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRGETUID;
+
+
+/*******************************************
+ PMRMakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hBuffer;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRMakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+ PMRUnmakeLocalImportHandle
+ *******************************************/
+
+/* Bridge in structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ IMG_HANDLE hExtMem;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE;
+
+/* Bridge out structure for PMRUnmakeLocalImportHandle */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE;
+
+
+/*******************************************
+ PMRImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMRExport;
+ IMG_UINT64 ui64uiPassword;
+ IMG_UINT64 ui64uiSize;
+ IMG_UINT32 ui32uiLog2Contig;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRIMPORTPMR;
+
+/* Bridge out structure for PMRImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRIMPORTPMR;
+
+
+/*******************************************
+ PMRLocalImportPMR
+ *******************************************/
+
+/* Bridge in structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_HANDLE hExtHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR;
+
+/* Bridge out structure for PMRLocalImportPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T sAlign;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR;
+
+
+/*******************************************
+ PMRUnrefPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFPMR;
+
+/* Bridge out structure for PMRUnrefPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFPMR;
+
+
+/*******************************************
+ PMRUnrefUnlockPMR
+ *******************************************/
+
+/* Bridge in structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR;
+
+/* Bridge out structure for PMRUnrefUnlockPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR;
+
+
+/*******************************************
+ PhysmemNewRamBackedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+ IMG_UINT32 ui32Log2PageSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_UINT32 ui32AnnotationLength;
+ const IMG_CHAR * puiAnnotation;
+ IMG_PID ui32PID;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR;
+
+
+/*******************************************
+ PhysmemNewRamBackedLockedPMR
+ *******************************************/
+
+/* Bridge in structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_SIZE_T uiChunkSize;
+ IMG_UINT32 ui32NumPhysChunks;
+ IMG_UINT32 ui32NumVirtChunks;
+ IMG_UINT32 * pui32MappingTable;
+ IMG_UINT32 ui32Log2PageSize;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_UINT32 ui32AnnotationLength;
+ const IMG_CHAR * puiAnnotation;
+ IMG_PID ui32PID;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+/* Bridge out structure for PhysmemNewRamBackedLockedPMR */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR;
+
+
+/*******************************************
+ PhysmemImportSecBuf
+ *******************************************/
+
+/* Bridge in structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF_TAG
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32Log2Align;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF;
+
+/* Bridge out structure for PhysmemImportSecBuf */
+typedef struct PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF_TAG
+{
+ IMG_HANDLE hPMRPtr;
+ IMG_UINT64 ui64SecBufHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF;
+
+
+/*******************************************
+ DevmemIntPin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPIN_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPIN;
+
+/* Bridge out structure for DevmemIntPin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPIN_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPIN;
+
+
+/*******************************************
+ DevmemIntUnpin
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN;
+
+/* Bridge out structure for DevmemIntUnpin */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN;
+
+
+/*******************************************
+ DevmemIntPinValidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE_TAG
+{
+ IMG_HANDLE hMapping;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE;
+
+/* Bridge out structure for DevmemIntPinValidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE;
+
+
+/*******************************************
+ DevmemIntUnpinInvalidate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE_TAG
+{
+ IMG_HANDLE hMapping;
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE;
+
+/* Bridge out structure for DevmemIntUnpinInvalidate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE;
+
+
+/*******************************************
+ DevmemIntCtxCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_BOOL bbKernelMemoryCtx;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE;
+
+/* Bridge out structure for DevmemIntCtxCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE_TAG
+{
+ IMG_HANDLE hDevMemServerContext;
+ IMG_HANDLE hPrivData;
+ IMG_UINT32 ui32CPUCacheLineSize;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE;
+
+
+/*******************************************
+ DevmemIntCtxDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY_TAG
+{
+ IMG_HANDLE hDevmemServerContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY;
+
+/* Bridge out structure for DevmemIntCtxDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY;
+
+
+/*******************************************
+ DevmemIntHeapCreate
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_DEV_VIRTADDR sHeapBaseAddr;
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_UINT32 ui32Log2DataPageSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE;
+
+/* Bridge out structure for DevmemIntHeapCreate */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE_TAG
+{
+ IMG_HANDLE hDevmemHeapPtr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE;
+
+
+/*******************************************
+ DevmemIntHeapDestroy
+ *******************************************/
+
+/* Bridge in structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY_TAG
+{
+ IMG_HANDLE hDevmemHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY;
+
+/* Bridge out structure for DevmemIntHeapDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY;
+
+
+/*******************************************
+ DevmemIntMapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hDevmemServerHeap;
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR;
+
+/* Bridge out structure for DevmemIntMapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR;
+
+
+/*******************************************
+ DevmemIntUnmapPMR
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR_TAG
+{
+ IMG_HANDLE hMapping;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR;
+
+/* Bridge out structure for DevmemIntUnmapPMR */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR;
+
+
+/*******************************************
+ DevmemIntReserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hDevmemServerHeap;
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiLength;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE;
+
+/* Bridge out structure for DevmemIntReserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE;
+
+
+/*******************************************
+ DevmemIntUnreserveRange
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE_TAG
+{
+ IMG_HANDLE hReservation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE;
+
+/* Bridge out structure for DevmemIntUnreserveRange */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE;
+
+
+/*******************************************
+ ChangeSparseMem
+ *******************************************/
+
+/* Bridge in structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_IN_CHANGESPARSEMEM_TAG
+{
+ IMG_HANDLE hSrvDevMemHeap;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32AllocPageCount;
+ IMG_UINT32 * pui32AllocPageIndices;
+ IMG_UINT32 ui32FreePageCount;
+ IMG_UINT32 * pui32FreePageIndices;
+ IMG_UINT32 ui32SparseFlags;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT64 ui64CPUVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CHANGESPARSEMEM;
+
+/* Bridge out structure for ChangeSparseMem */
+typedef struct PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM;
+
+
+/*******************************************
+ DevmemIntMapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ IMG_UINT32 ui32PageCount;
+ IMG_UINT32 ui32PhysicalPgOffset;
+ PVRSRV_MEMALLOCFLAGS_T uiFlags;
+ IMG_DEV_VIRTADDR sDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES;
+
+/* Bridge out structure for DevmemIntMapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES;
+
+
+/*******************************************
+ DevmemIntUnmapPages
+ *******************************************/
+
+/* Bridge in structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_UINT32 ui32PageCount;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES;
+
+/* Bridge out structure for DevmemIntUnmapPages */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES;
+
+
+/*******************************************
+ DevmemIsVDevAddrValid
+ *******************************************/
+
+/* Bridge in structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_DEV_VIRTADDR sAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID;
+
+/* Bridge out structure for DevmemIsVDevAddrValid */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID;
+
+
+/*******************************************
+ HeapCfgHeapConfigCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT;
+
+/* Bridge out structure for HeapCfgHeapConfigCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT_TAG
+{
+ IMG_UINT32 ui32NumHeapConfigs;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT;
+
+
+/*******************************************
+ HeapCfgHeapCount
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT;
+
+/* Bridge out structure for HeapCfgHeapCount */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT_TAG
+{
+ IMG_UINT32 ui32NumHeaps;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT;
+
+
+/*******************************************
+ HeapCfgHeapConfigName
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapConfigNameBufSz;
+ /* Output pointer puiHeapConfigName is also an implied input */
+ IMG_CHAR * puiHeapConfigName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME;
+
+/* Bridge out structure for HeapCfgHeapConfigName */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME_TAG
+{
+ IMG_CHAR * puiHeapConfigName;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME;
+
+
+/*******************************************
+ HeapCfgHeapDetails
+ *******************************************/
+
+/* Bridge in structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_UINT32 ui32HeapConfigIndex;
+ IMG_UINT32 ui32HeapIndex;
+ IMG_UINT32 ui32HeapNameBufSz;
+ /* Output pointer puiHeapNameOut is also an implied input */
+ IMG_CHAR * puiHeapNameOut;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS;
+
+/* Bridge out structure for HeapCfgHeapDetails */
+typedef struct PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS_TAG
+{
+ IMG_CHAR * puiHeapNameOut;
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_UINT32 ui32Log2DataPageSizeOut;
+ IMG_UINT32 ui32Log2ImportAlignmentOut;
+ IMG_UINT32 ui32Log2TilingStrideFactorOut;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS;
+
+
+/*******************************************
+ DevmemIntRegisterPFNotifyKM
+ *******************************************/
+
+/* Bridge in structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32PID;
+ IMG_BOOL bRegister;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM;
+
+/* Bridge out structure for DevmemIntRegisterPFNotifyKM */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM;
+
+
+/*******************************************
+ GetMaxDevMemSize
+ *******************************************/
+
+/* Bridge in structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE;
+
+/* Bridge out structure for GetMaxDevMemSize */
+typedef struct PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE_TAG
+{
+ IMG_DEVMEM_SIZE_T uiLMASize;
+ IMG_DEVMEM_SIZE_T uiUMASize;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE;
+
+
+/*******************************************
+ DevmemGetFaultAddress
+ *******************************************/
+
+/* Bridge in structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS;
+
+/* Bridge out structure for DevmemGetFaultAddress */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS_TAG
+{
+ IMG_DEV_VIRTADDR sFaultAddress;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS;
+
+
+#endif /* COMMON_MM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_pdump_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_pdump_bridge.h
new file mode 100644
index 00000000000000..fa8996837282c6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_pdump_bridge.h
@@ -0,0 +1,160 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMP_BRIDGE_H
+#define COMMON_PDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include <powervr/buffer_attribs.h>
+
+
+#define PVRSRV_BRIDGE_PDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP PVRSRV_BRIDGE_PDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR PVRSRV_BRIDGE_PDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT PVRSRV_BRIDGE_PDUMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMP_CMD_LAST (PVRSRV_BRIDGE_PDUMP_CMD_FIRST+3)
+
+
+/*******************************************
+ DevmemPDumpBitmap
+ *******************************************/
+
+/* Bridge in structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP_TAG
+{
+ IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32Width;
+ IMG_UINT32 ui32Height;
+ IMG_UINT32 ui32StrideInBytes;
+ IMG_DEV_VIRTADDR sDevBaseAddr;
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32Size;
+ PDUMP_PIXEL_FORMAT ePixelFormat;
+ IMG_UINT32 ui32AddrMode;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP;
+
+/* Bridge out structure for DevmemPDumpBitmap */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP;
+
+
+/*******************************************
+ PDumpImageDescriptor
+ *******************************************/
+
+/* Bridge in structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR_TAG
+{
+ IMG_HANDLE hDevmemCtx;
+ IMG_UINT32 ui32StringSize;
+ const IMG_CHAR * puiFileName;
+ IMG_DEV_VIRTADDR sDataDevAddr;
+ IMG_UINT32 ui32DataSize;
+ IMG_UINT32 ui32LogicalWidth;
+ IMG_UINT32 ui32LogicalHeight;
+ IMG_UINT32 ui32PhysicalWidth;
+ IMG_UINT32 ui32PhysicalHeight;
+ PDUMP_PIXEL_FORMAT ePixelFormat;
+ IMG_MEMLAYOUT eMemLayout;
+ IMG_FB_COMPRESSION eFBCompression;
+ const IMG_UINT32 * pui32FBCClearColour;
+ IMG_DEV_VIRTADDR sHeaderDevAddr;
+ IMG_UINT32 ui32HeaderSize;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR;
+
+/* Bridge out structure for PDumpImageDescriptor */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR;
+
+
+/*******************************************
+ PVRSRVPDumpComment
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT_TAG
+{
+ IMG_CHAR * puiComment;
+ IMG_UINT32 ui32Flags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT;
+
+/* Bridge out structure for PVRSRVPDumpComment */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT;
+
+
+/*******************************************
+ PVRSRVPDumpSetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME_TAG
+{
+ IMG_UINT32 ui32Frame;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpSetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME;
+
+
+#endif /* COMMON_PDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_pdumpctrl_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_pdumpctrl_bridge.h
new file mode 100644
index 00000000000000..70ebcdaa665731
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_pdumpctrl_bridge.h
@@ -0,0 +1,138 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPCTRL_BRIDGE_H
+#define COMMON_PDUMPCTRL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST (PVRSRV_BRIDGE_PDUMPCTRL_CMD_FIRST+3)
+
+
+/*******************************************
+ PVRSRVPDumpGetState
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE;
+
+/* Bridge out structure for PVRSRVPDumpGetState */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE_TAG
+{
+ IMG_UINT64 ui64State;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE;
+
+
+/*******************************************
+ PVRSRVPDumpGetFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME;
+
+/* Bridge out structure for PVRSRVPDumpGetFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME_TAG
+{
+ IMG_UINT32 ui32Frame;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME;
+
+
+/*******************************************
+ PVRSRVPDumpSetDefaultCaptureParams
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ IMG_UINT32 ui32Mode;
+ IMG_UINT32 ui32Start;
+ IMG_UINT32 ui32End;
+ IMG_UINT32 ui32Interval;
+ IMG_UINT32 ui32MaxParamFileSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+/* Bridge out structure for PVRSRVPDumpSetDefaultCaptureParams */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS;
+
+
+/*******************************************
+ PVRSRVPDumpIsLastCaptureFrame
+ *******************************************/
+
+/* Bridge in structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+/* Bridge out structure for PVRSRVPDumpIsLastCaptureFrame */
+typedef struct PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME_TAG
+{
+ IMG_BOOL bpbIsLastCaptureFrame;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME;
+
+
+#endif /* COMMON_PDUMPCTRL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_pdumpmm_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_pdumpmm_bridge.h
new file mode 100644
index 00000000000000..e9960fe2f1595c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_pdumpmm_bridge.h
@@ -0,0 +1,248 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PDUMPMM_BRIDGE_H
+#define COMMON_PDUMPMM_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32 PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PDUMPMM_CMD_LAST (PVRSRV_BRIDGE_PDUMPMM_CMD_FIRST+7)
+
+
+/*******************************************
+ PMRPDumpLoadMem
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_BOOL bbZero;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM;
+
+/* Bridge out structure for PMRPDumpLoadMem */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM;
+
+
+/*******************************************
+ PMRPDumpLoadMemValue32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32;
+
+/* Bridge out structure for PMRPDumpLoadMemValue32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32;
+
+
+/*******************************************
+ PMRPDumpLoadMemValue64
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT64 ui64Value;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64;
+
+/* Bridge out structure for PMRPDumpLoadMemValue64 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64;
+
+
+/*******************************************
+ PMRPDumpSaveToFile
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32ArraySize;
+ const IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32uiFileOffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE;
+
+/* Bridge out structure for PMRPDumpSaveToFile */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE;
+
+
+/*******************************************
+ PMRPDumpSymbolicAddr
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32MemspaceNameLen;
+ IMG_UINT32 ui32SymbolicAddrLen;
+ /* Output pointer puiMemspaceName is also an implied input */
+ IMG_CHAR * puiMemspaceName;
+ /* Output pointer puiSymbolicAddr is also an implied input */
+ IMG_CHAR * puiSymbolicAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR;
+
+/* Bridge out structure for PMRPDumpSymbolicAddr */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR_TAG
+{
+ IMG_CHAR * puiMemspaceName;
+ IMG_CHAR * puiSymbolicAddr;
+ IMG_DEVMEM_OFFSET_T uiNewOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR;
+
+
+/*******************************************
+ PMRPDumpPol32
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPPOL32_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ PDUMP_POLL_OPERATOR eOperator;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPPOL32;
+
+/* Bridge out structure for PMRPDumpPol32 */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32;
+
+
+/*******************************************
+ PMRPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_PMRPDUMPCBP_TAG
+{
+ IMG_HANDLE hPMR;
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PMRPDUMPCBP;
+
+/* Bridge out structure for PMRPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_PMRPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PMRPDUMPCBP;
+
+
+/*******************************************
+ DevmemIntPDumpSaveToFileVirtual
+ *******************************************/
+
+/* Bridge in structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ IMG_HANDLE hDevmemServerContext;
+ IMG_DEV_VIRTADDR sAddress;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32ArraySize;
+ const IMG_CHAR * puiFileName;
+ IMG_UINT32 ui32FileOffset;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+/* Bridge out structure for DevmemIntPDumpSaveToFileVirtual */
+typedef struct PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL;
+
+
+#endif /* COMMON_PDUMPMM_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_pvrtl_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_pvrtl_bridge.h
new file mode 100644
index 00000000000000..3895c9cfa3c17f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_pvrtl_bridge.h
@@ -0,0 +1,224 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_PVRTL_BRIDGE_H
+#define COMMON_PVRTL_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+
+
+#define PVRSRV_BRIDGE_PVRTL_CMD_FIRST 0
+#define PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+0
+#define PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+1
+#define PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+2
+#define PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+3
+#define PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS PVRSRV_BRIDGE_PVRTL_CMD_FIRST+4
+#define PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+5
+#define PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM PVRSRV_BRIDGE_PVRTL_CMD_FIRST+6
+#define PVRSRV_BRIDGE_PVRTL_TLWRITEDATA PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7
+#define PVRSRV_BRIDGE_PVRTL_CMD_LAST (PVRSRV_BRIDGE_PVRTL_CMD_FIRST+7)
+
+
+/*******************************************
+ TLOpenStream
+ *******************************************/
+
+/* Bridge in structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_IN_TLOPENSTREAM_TAG
+{
+ const IMG_CHAR * puiName;
+ IMG_UINT32 ui32Mode;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLOPENSTREAM;
+
+/* Bridge out structure for TLOpenStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLOPENSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_HANDLE hTLPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLOPENSTREAM;
+
+
+/*******************************************
+ TLCloseStream
+ *******************************************/
+
+/* Bridge in structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCLOSESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCLOSESTREAM;
+
+/* Bridge out structure for TLCloseStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCLOSESTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCLOSESTREAM;
+
+
+/*******************************************
+ TLAcquireData
+ *******************************************/
+
+/* Bridge in structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_IN_TLACQUIREDATA_TAG
+{
+ IMG_HANDLE hSD;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLACQUIREDATA;
+
+/* Bridge out structure for TLAcquireData */
+typedef struct PVRSRV_BRIDGE_OUT_TLACQUIREDATA_TAG
+{
+ IMG_UINT32 ui32ReadOffset;
+ IMG_UINT32 ui32ReadLen;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLACQUIREDATA;
+
+
+/*******************************************
+ TLReleaseData
+ *******************************************/
+
+/* Bridge in structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_IN_TLRELEASEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReadOffset;
+ IMG_UINT32 ui32ReadLen;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRELEASEDATA;
+
+/* Bridge out structure for TLReleaseData */
+typedef struct PVRSRV_BRIDGE_OUT_TLRELEASEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRELEASEDATA;
+
+
+/*******************************************
+ TLDiscoverStreams
+ *******************************************/
+
+/* Bridge in structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS_TAG
+{
+ const IMG_CHAR * puiNamePattern;
+ IMG_UINT32 ui32Size;
+ /* Output pointer puiStreams is also an implied input */
+ IMG_CHAR * puiStreams;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS;
+
+/* Bridge out structure for TLDiscoverStreams */
+typedef struct PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS_TAG
+{
+ IMG_CHAR * puiStreams;
+ IMG_UINT32 ui32NumFound;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS;
+
+
+/*******************************************
+ TLReserveStream
+ *******************************************/
+
+/* Bridge in structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_IN_TLRESERVESTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32SizeMin;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLRESERVESTREAM;
+
+/* Bridge out structure for TLReserveStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLRESERVESTREAM_TAG
+{
+ IMG_UINT32 ui32BufferOffset;
+ IMG_UINT32 ui32Available;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLRESERVESTREAM;
+
+
+/*******************************************
+ TLCommitStream
+ *******************************************/
+
+/* Bridge in structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_IN_TLCOMMITSTREAM_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32ReqSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLCOMMITSTREAM;
+
+/* Bridge out structure for TLCommitStream */
+typedef struct PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM;
+
+
+/*******************************************
+ TLWriteData
+ *******************************************/
+
+/* Bridge in structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_IN_TLWRITEDATA_TAG
+{
+ IMG_HANDLE hSD;
+ IMG_UINT32 ui32Size;
+ IMG_BYTE * psData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_TLWRITEDATA;
+
+/* Bridge out structure for TLWriteData */
+typedef struct PVRSRV_BRIDGE_OUT_TLWRITEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_TLWRITEDATA;
+
+
+#endif /* COMMON_PVRTL_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_regconfig_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_regconfig_bridge.h
new file mode 100644
index 00000000000000..397a600b0e4260
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_regconfig_bridge.h
@@ -0,0 +1,152 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for regconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for regconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_REGCONFIG_BRIDGE_H
+#define COMMON_REGCONFIG_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST 0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+0
+#define PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+1
+#define PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+2
+#define PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+3
+#define PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4
+#define PVRSRV_BRIDGE_REGCONFIG_CMD_LAST (PVRSRV_BRIDGE_REGCONFIG_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXSetRegConfigType
+ *******************************************/
+
+/* Bridge in structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE_TAG
+{
+ IMG_UINT8 ui8RegPowerIsland;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE;
+
+/* Bridge out structure for RGXSetRegConfigType */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE;
+
+
+/*******************************************
+ RGXAddRegconfig
+ *******************************************/
+
+/* Bridge in structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXADDREGCONFIG_TAG
+{
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT64 ui64RegValue;
+ IMG_UINT64 ui64RegMask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXADDREGCONFIG;
+
+/* Bridge out structure for RGXAddRegconfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG;
+
+
+/*******************************************
+ RGXClearRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG;
+
+/* Bridge out structure for RGXClearRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG;
+
+
+/*******************************************
+ RGXEnableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG;
+
+/* Bridge out structure for RGXEnableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG;
+
+
+/*******************************************
+ RGXDisableRegConfig
+ *******************************************/
+
+/* Bridge in structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG;
+
+/* Bridge out structure for RGXDisableRegConfig */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG;
+
+
+#endif /* COMMON_REGCONFIG_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxcmp_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxcmp_bridge.h
new file mode 100644
index 00000000000000..47a85761c6e2a8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxcmp_bridge.h
@@ -0,0 +1,217 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXCMP_BRIDGE_H
+#define COMMON_RGXCMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXCMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXCMP_CMD_LAST (PVRSRV_BRIDGE_RGXCMP_CMD_FIRST+6)
+
+
+/*******************************************
+ RGXCreateComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+ IMG_DEV_VIRTADDR sResumeSignalAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT;
+
+/* Bridge out structure for RGXCreateComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT;
+
+
+/*******************************************
+ RGXDestroyComputeContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT;
+
+/* Bridge out structure for RGXDestroyComputeContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT;
+
+
+/*******************************************
+ RGXKickCDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKCDM_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ PVRSRV_FENCE hCheckFenceFd;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKCDM;
+
+/* Bridge out structure for RGXKickCDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKCDM_TAG
+{
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKCDM;
+
+
+/*******************************************
+ RGXFlushComputeData
+ *******************************************/
+
+/* Bridge in structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA;
+
+/* Bridge out structure for RGXFlushComputeData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA;
+
+
+/*******************************************
+ RGXSetComputeContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hComputeContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetComputeContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXGetLastComputeContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastComputeContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON_TAG
+{
+ IMG_UINT32 ui32LastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON;
+
+
+/*******************************************
+ RGXNotifyComputeWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hComputeContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXNotifyComputeWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXCMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxhwperf_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxhwperf_bridge.h
new file mode 100644
index 00000000000000..277d931ae349ce
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxhwperf_bridge.h
@@ -0,0 +1,159 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXHWPERF_BRIDGE_H
+#define COMMON_RGXHWPERF_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "rgx_hwperf.h"
+
+
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST (PVRSRV_BRIDGE_RGXHWPERF_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXCtrlHWPerf
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERF_TAG
+{
+ IMG_UINT32 ui32StreamId;
+ IMG_BOOL bToggle;
+ IMG_UINT64 ui64Mask;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERF;
+
+/* Bridge out structure for RGXCtrlHWPerf */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF;
+
+
+/*******************************************
+ RGXConfigEnableHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+ IMG_UINT32 ui32ArrayLen;
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXConfigEnableHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS;
+
+
+/*******************************************
+ RGXCtrlHWPerfCounters
+ *******************************************/
+
+/* Bridge in structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS_TAG
+{
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32ArrayLen;
+ IMG_UINT16 * pui16BlockIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS;
+
+/* Bridge out structure for RGXCtrlHWPerfCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS;
+
+
+/*******************************************
+ RGXConfigCustomCounters
+ *******************************************/
+
+/* Bridge in structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+ IMG_UINT16 ui16CustomBlockID;
+ IMG_UINT16 ui16NumCustomCounters;
+ IMG_UINT32 * pui32CustomCounterIDs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS;
+
+/* Bridge out structure for RGXConfigCustomCounters */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS;
+
+
+/*******************************************
+ RGXGetHWPerfBvncFeatureFlags
+ *******************************************/
+
+/* Bridge in structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+/* Bridge out structure for RGXGetHWPerfBvncFeatureFlags */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS_TAG
+{
+ IMG_UINT32 ui32FeatureFlags;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS;
+
+
+#endif /* COMMON_RGXHWPERF_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxkicksync_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxkicksync_bridge.h
new file mode 100644
index 00000000000000..b9970d70ad2266
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxkicksync_bridge.h
@@ -0,0 +1,134 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXKICKSYNC_BRIDGE_H
+#define COMMON_RGXKICKSYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_CMD_FIRST+2)
+
+
+/*******************************************
+ RGXCreateKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXCreateKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT;
+
+
+/*******************************************
+ RGXDestroyKickSyncContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT;
+
+/* Bridge out structure for RGXDestroyKickSyncContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT;
+
+
+/*******************************************
+ RGXKickSync
+ *******************************************/
+
+/* Bridge in structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKSYNC_TAG
+{
+ IMG_HANDLE hKickSyncContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32FenceSyncOffset;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32UpdateSyncOffset;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSync;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hTimelineFenceFD;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32ExtJobRef;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKSYNC;
+
+/* Bridge out structure for RGXKickSync */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKSYNC_TAG
+{
+ PVRSRV_FENCE hUpdateFenceFD;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKSYNC;
+
+
+#endif /* COMMON_RGXKICKSYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxpdump_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxpdump_bridge.h
new file mode 100644
index 00000000000000..ee4bb44a8a4561
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxpdump_bridge.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXPDUMP_BRIDGE_H
+#define COMMON_RGXPDUMP_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST (PVRSRV_BRIDGE_RGXPDUMP_CMD_FIRST+1)
+
+
+/*******************************************
+ PDumpTraceBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER;
+
+/* Bridge out structure for PDumpTraceBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER;
+
+
+/*******************************************
+ PDumpSignatureBuffer
+ *******************************************/
+
+/* Bridge in structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER_TAG
+{
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER;
+
+/* Bridge out structure for PDumpSignatureBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER;
+
+
+#endif /* COMMON_RGXPDUMP_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxray_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxray_bridge.h
new file mode 100644
index 00000000000000..12d7ef30ebf336
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxray_bridge.h
@@ -0,0 +1,291 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXRAY_BRIDGE_H
+#define COMMON_RGXRAY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include "pvrsrv_devmem.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXRAY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKRS PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXRAY_CMD_LAST (PVRSRV_BRIDGE_RGXRAY_CMD_FIRST+8)
+
+
+/*******************************************
+ RGXCreateRPMFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST_TAG
+{
+ IMG_HANDLE hRPMContext;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_DEV_VIRTADDR sFreeListDevVAddr;
+ IMG_BOOL bIsExternal;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST;
+
+/* Bridge out structure for RGXCreateRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_UINT32 ui32HWFreeList;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST;
+
+
+/*******************************************
+ RGXDestroyRPMFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST;
+
+/* Bridge out structure for RGXDestroyRPMFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST;
+
+
+/*******************************************
+ RGXCreateRPMContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT_TAG
+{
+ IMG_UINT32 ui32TotalRPMPages;
+ IMG_UINT32 ui32Log2DopplerPageSize;
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr;
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr;
+ IMG_HANDLE hSceneHeap;
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr;
+ IMG_HANDLE hRPMPageTableHeap;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT;
+
+/* Bridge out structure for RGXCreateRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_HANDLE hHWMemDesc;
+ IMG_UINT32 ui32HWFrameData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRPMContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT;
+
+/* Bridge out structure for RGXDestroyRPMContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT;
+
+
+/*******************************************
+ RGXKickRS
+ *******************************************/
+
+/* Bridge in structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKRS_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceSyncOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32FCCmdSize;
+ IMG_BYTE * psFCDMCmd;
+ IMG_UINT32 ui32FrameContext;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKRS;
+
+/* Bridge out structure for RGXKickRS */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKRS_TAG
+{
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKRS;
+
+
+/*******************************************
+ RGXKickVRDM
+ *******************************************/
+
+/* Bridge in structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKVRDM_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phClientFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientFenceSyncOffset;
+ IMG_UINT32 * pui32ClientFenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phClientUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32ClientUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientUpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSyncs;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CmdSize;
+ IMG_BYTE * psDMCmd;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKVRDM;
+
+/* Bridge out structure for RGXKickVRDM */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKVRDM_TAG
+{
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKVRDM;
+
+
+/*******************************************
+ RGXCreateRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sVRMCallStackAddr;
+ IMG_UINT32 ui32FrameworkCmdSize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT;
+
+/* Bridge out structure for RGXCreateRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRayContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT_TAG
+{
+ IMG_HANDLE hRayContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT;
+
+/* Bridge out structure for RGXDestroyRayContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT;
+
+
+/*******************************************
+ RGXSetRayContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hRayContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRayContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXRAY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxsignals_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxsignals_bridge.h
new file mode 100644
index 00000000000000..342e3419325e16
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxsignals_bridge.h
@@ -0,0 +1,79 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxsignals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxsignals
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXSIGNALS_BRIDGE_H
+#define COMMON_RGXSIGNALS_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST (PVRSRV_BRIDGE_RGXSIGNALS_CMD_FIRST+0)
+
+
+/*******************************************
+ RGXNotifySignalUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE_TAG
+{
+ IMG_HANDLE hPrivData;
+ IMG_DEV_VIRTADDR sDevSignalAddress;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE;
+
+/* Bridge out structure for RGXNotifySignalUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE;
+
+
+#endif /* COMMON_RGXSIGNALS_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxta3d_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxta3d_bridge.h
new file mode 100644
index 00000000000000..4977a26155063c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxta3d_bridge.h
@@ -0,0 +1,482 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTA3D_BRIDGE_H
+#define COMMON_RGXTA3D_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "rgx_fwif_shared.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+9
+#define PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+10
+#define PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+11
+#define PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+12
+#define PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+13
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+14
+#define PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+15
+#define PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16
+#define PVRSRV_BRIDGE_RGXTA3D_CMD_LAST (PVRSRV_BRIDGE_RGXTA3D_CMD_FIRST+16)
+
+
+/*******************************************
+ RGXCreateHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA_TAG
+{
+ IMG_UINT32 ui32RenderTarget;
+ IMG_DEV_VIRTADDR sPMMlistDevVAddr;
+ IMG_HANDLE * phapsFreeLists;
+ IMG_UINT32 ui32PPPScreen;
+ IMG_UINT32 ui32PPPGridOffset;
+ IMG_UINT64 ui64PPPMultiSampleCtl;
+ IMG_UINT32 ui32TPCStride;
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32MTileStride;
+ IMG_UINT32 ui32ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ui32ISPMergeUpperY;
+ IMG_UINT32 ui32ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ui32ISPMergeScaleY;
+ IMG_UINT16 ui16MaxRTs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA;
+
+/* Bridge out structure for RGXCreateHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ IMG_HANDLE hRTACtlMemDesc;
+ IMG_HANDLE hsHWRTDataMemDesc;
+ IMG_UINT32 ui32FWHWRTData;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA;
+
+
+/*******************************************
+ RGXDestroyHWRTData
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA;
+
+/* Bridge out structure for RGXDestroyHWRTData */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA;
+
+
+/*******************************************
+ RGXCreateRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET_TAG
+{
+ IMG_DEV_VIRTADDR spsVHeapTableDevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET;
+
+/* Bridge out structure for RGXCreateRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET_TAG
+{
+ IMG_HANDLE hsRenderTargetMemDesc;
+ IMG_UINT32 ui32sRenderTargetFWDevVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET;
+
+
+/*******************************************
+ RGXDestroyRenderTarget
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET_TAG
+{
+ IMG_HANDLE hsRenderTargetMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET;
+
+/* Bridge out structure for RGXDestroyRenderTarget */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET;
+
+
+/*******************************************
+ RGXCreateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hReservation;
+ IMG_HANDLE hPMR;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER;
+
+/* Bridge out structure for RGXCreateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+ IMG_UINT32 ui32sZSBufferFWDevVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER;
+
+
+/*******************************************
+ RGXDestroyZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER;
+
+/* Bridge out structure for RGXDestroyZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER;
+
+
+/*******************************************
+ RGXPopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsZSBufferKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXPopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER;
+
+
+/*******************************************
+ RGXUnpopulateZSBuffer
+ *******************************************/
+
+/* Bridge in structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER_TAG
+{
+ IMG_HANDLE hsPopulation;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER;
+
+/* Bridge out structure for RGXUnpopulateZSBuffer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER;
+
+
+/*******************************************
+ RGXCreateFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATEFREELIST_TAG
+{
+ IMG_UINT32 ui32ui32MaxFLPages;
+ IMG_UINT32 ui32ui32InitFLPages;
+ IMG_UINT32 ui32ui32GrowFLPages;
+ IMG_UINT32 ui32ui32GrowParamThreshold;
+ IMG_HANDLE hsGlobalFreeList;
+ IMG_BOOL bbFreeListCheck;
+ IMG_DEV_VIRTADDR spsFreeListDevVAddr;
+ IMG_HANDLE hsFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiPMROffset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATEFREELIST;
+
+/* Bridge out structure for RGXCreateFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST;
+
+
+/*******************************************
+ RGXDestroyFreeList
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST;
+
+/* Bridge out structure for RGXDestroyFreeList */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST;
+
+
+/*******************************************
+ RGXCreateRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_DEV_VIRTADDR sVDMCallStackAddr;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT;
+
+/* Bridge out structure for RGXCreateRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT_TAG
+{
+ IMG_HANDLE hRenderContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT;
+
+
+/*******************************************
+ RGXDestroyRenderContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT_TAG
+{
+ IMG_HANDLE hCleanupCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT;
+
+/* Bridge out structure for RGXDestroyRenderContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT;
+
+
+/*******************************************
+ RGXKickTA3D
+ *******************************************/
+
+/* Bridge in structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_IN_RGXKICKTA3D_TAG
+{
+ IMG_HANDLE hRenderContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientTAFenceCount;
+ IMG_HANDLE * phClientTAFenceSyncPrimBlock;
+ IMG_UINT32 * pui32ClientTAFenceSyncOffset;
+ IMG_UINT32 * pui32ClientTAFenceValue;
+ IMG_UINT32 ui32ClientTAUpdateCount;
+ IMG_HANDLE * phClientTAUpdateSyncPrimBlock;
+ IMG_UINT32 * pui32ClientTAUpdateSyncOffset;
+ IMG_UINT32 * pui32ClientTAUpdateValue;
+ IMG_UINT32 ui32ServerTASyncPrims;
+ IMG_UINT32 * pui32ServerTASyncFlags;
+ IMG_HANDLE * phServerTASyncs;
+ IMG_UINT32 ui32Client3DFenceCount;
+ IMG_HANDLE * phClient3DFenceSyncPrimBlock;
+ IMG_UINT32 * pui32Client3DFenceSyncOffset;
+ IMG_UINT32 * pui32Client3DFenceValue;
+ IMG_UINT32 ui32Client3DUpdateCount;
+ IMG_HANDLE * phClient3DUpdateSyncPrimBlock;
+ IMG_UINT32 * pui32Client3DUpdateSyncOffset;
+ IMG_UINT32 * pui32Client3DUpdateValue;
+ IMG_UINT32 ui32Server3DSyncPrims;
+ IMG_UINT32 * pui32Server3DSyncFlags;
+ IMG_HANDLE * phServer3DSyncs;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock;
+ IMG_UINT32 ui32FRFenceUFOSyncOffset;
+ IMG_UINT32 ui32FRFenceValue;
+ PVRSRV_FENCE hCheckFence;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ PVRSRV_FENCE hCheckFence3D;
+ PVRSRV_TIMELINE hUpdateTimeline3D;
+ IMG_CHAR * puiUpdateFenceName3D;
+ IMG_UINT32 ui32TACmdSize;
+ IMG_BYTE * psTACmd;
+ IMG_UINT32 ui323DPRCmdSize;
+ IMG_BYTE * ps3DPRCmd;
+ IMG_UINT32 ui323DCmdSize;
+ IMG_BYTE * ps3DCmd;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_BOOL bbLastTAInScene;
+ IMG_BOOL bbKickTA;
+ IMG_BOOL bbKickPR;
+ IMG_BOOL bbKick3D;
+ IMG_BOOL bbAbort;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_HANDLE hRTDataCleanup;
+ IMG_HANDLE hZBuffer;
+ IMG_HANDLE hSBuffer;
+ IMG_HANDLE hMSAAScratchBuffer;
+ IMG_BOOL bbCommitRefCountsTA;
+ IMG_BOOL bbCommitRefCounts3D;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+ IMG_UINT32 ui32RenderTargetSize;
+ IMG_UINT32 ui32NumberOfDrawCalls;
+ IMG_UINT32 ui32NumberOfIndices;
+ IMG_UINT32 ui32NumberOfMRTs;
+ IMG_UINT64 ui64Deadline;
+ IMG_DEV_VIRTADDR ssRobustnessResetReason;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXKICKTA3D;
+
+/* Bridge out structure for RGXKickTA3D */
+typedef struct PVRSRV_BRIDGE_OUT_RGXKICKTA3D_TAG
+{
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_FENCE hUpdateFence3D;
+ IMG_BOOL bbCommittedRefCountsTA;
+ IMG_BOOL bbCommittedRefCounts3D;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXKICKTA3D;
+
+
+/*******************************************
+ RGXSetRenderContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hRenderContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetRenderContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXGetLastRenderContextResetReason
+ *******************************************/
+
+/* Bridge in structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+ IMG_HANDLE hRenderContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+/* Bridge out structure for RGXGetLastRenderContextResetReason */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON_TAG
+{
+ IMG_UINT32 ui32LastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON;
+
+
+/*******************************************
+ RGXGetPartialRenderCount
+ *******************************************/
+
+/* Bridge in structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT_TAG
+{
+ IMG_HANDLE hHWRTDataMemDesc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT;
+
+/* Bridge out structure for RGXGetPartialRenderCount */
+typedef struct PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT_TAG
+{
+ IMG_UINT32 ui32NumPartialRenders;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT;
+
+
+/*******************************************
+ RGXRenderContextStalled
+ *******************************************/
+
+/* Bridge in structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED_TAG
+{
+ IMG_HANDLE hRenderContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED;
+
+/* Bridge out structure for RGXRenderContextStalled */
+typedef struct PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED;
+
+
+#endif /* COMMON_RGXTA3D_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxtq2_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxtq2_bridge.h
new file mode 100644
index 00000000000000..6d10907058f3c7
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxtq2_bridge.h
@@ -0,0 +1,181 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ2_BRIDGE_H
+#define COMMON_RGXTQ2_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RGXTQ2_CMD_LAST (PVRSRV_BRIDGE_RGXTQ2_CMD_FIRST+4)
+
+
+/*******************************************
+ RGXTDMCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXTDMDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXTDMDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXTDMSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32PDumpFlags;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32ClientFenceCount;
+ IMG_HANDLE * phFenceUFOSyncPrimBlock;
+ IMG_UINT32 * pui32FenceSyncOffset;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ IMG_HANDLE * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32 * pui32UpdateSyncOffset;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerSyncFlags;
+ IMG_HANDLE * phServerSync;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE hUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 ui32CommandSize;
+ IMG_UINT8 * pui8FWCommand;
+ IMG_UINT32 ui32ExternalJobReference;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER;
+
+/* Bridge out structure for RGXTDMSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER_TAG
+{
+ PVRSRV_FENCE hUpdateFence;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER;
+
+
+/*******************************************
+ RGXTDMSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXTDMSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY;
+
+
+/*******************************************
+ RGXTDMNotifyWriteOffsetUpdate
+ *******************************************/
+
+/* Bridge in structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32PDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+/* Bridge out structure for RGXTDMNotifyWriteOffsetUpdate */
+typedef struct PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE;
+
+
+#endif /* COMMON_RGXTQ2_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_rgxtq_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_rgxtq_bridge.h
new file mode 100644
index 00000000000000..9f429507820cd6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_rgxtq_bridge.h
@@ -0,0 +1,165 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for rgxtq
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for rgxtq
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RGXTQ_BRIDGE_H
+#define COMMON_RGXTQ_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+#include <powervr/sync_external.h>
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_RGXTQ_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RGXTQ_CMD_LAST (PVRSRV_BRIDGE_RGXTQ_CMD_FIRST+3)
+
+
+/*******************************************
+ RGXCreateTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT_TAG
+{
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32FrameworkCmdize;
+ IMG_BYTE * psFrameworkCmd;
+ IMG_HANDLE hPrivData;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT;
+
+/* Bridge out structure for RGXCreateTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXDestroyTransferContext
+ *******************************************/
+
+/* Bridge in structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+ IMG_HANDLE hTransferContext;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT;
+
+/* Bridge out structure for RGXDestroyTransferContext */
+typedef struct PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT;
+
+
+/*******************************************
+ RGXSubmitTransfer
+ *******************************************/
+
+/* Bridge in structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32ClientCacheOpSeqNum;
+ IMG_UINT32 ui32PrepareCount;
+ IMG_UINT32 * pui32ClientFenceCount;
+ IMG_HANDLE* * phFenceUFOSyncPrimBlock;
+ IMG_UINT32* * pui32FenceSyncOffset;
+ IMG_UINT32* * pui32FenceValue;
+ IMG_UINT32 * pui32ClientUpdateCount;
+ IMG_HANDLE* * phUpdateUFOSyncPrimBlock;
+ IMG_UINT32* * pui32UpdateSyncOffset;
+ IMG_UINT32* * pui32UpdateValue;
+ IMG_UINT32 * pui32ServerSyncCount;
+ IMG_UINT32* * pui32ServerSyncFlags;
+ IMG_HANDLE* * phServerSync;
+ PVRSRV_FENCE hCheckFenceFD;
+ PVRSRV_TIMELINE h2DUpdateTimeline;
+ PVRSRV_TIMELINE h3DUpdateTimeline;
+ IMG_CHAR * puiUpdateFenceName;
+ IMG_UINT32 * pui32CommandSize;
+ IMG_UINT8* * pui8FWCommand;
+ IMG_UINT32 * pui32TQPrepareFlags;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32SyncPMRCount;
+ IMG_UINT32 * pui32SyncPMRFlags;
+ IMG_HANDLE * phSyncPMRs;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER;
+
+/* Bridge out structure for RGXSubmitTransfer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER_TAG
+{
+ PVRSRV_FENCE h2DUpdateFence;
+ PVRSRV_FENCE h3DUpdateFence;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER;
+
+
+/*******************************************
+ RGXSetTransferContextPriority
+ *******************************************/
+
+/* Bridge in structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ IMG_HANDLE hTransferContext;
+ IMG_UINT32 ui32Priority;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY;
+
+/* Bridge out structure for RGXSetTransferContextPriority */
+typedef struct PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY;
+
+
+#endif /* COMMON_RGXTQ_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_ri_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_ri_bridge.h
new file mode 100644
index 00000000000000..3b602270086d84
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_ri_bridge.h
@@ -0,0 +1,235 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_RI_BRIDGE_H
+#define COMMON_RI_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "ri_typedefs.h"
+
+
+#define PVRSRV_BRIDGE_RI_CMD_FIRST 0
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+0
+#define PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+1
+#define PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+2
+#define PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR PVRSRV_BRIDGE_RI_CMD_FIRST+3
+#define PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY PVRSRV_BRIDGE_RI_CMD_FIRST+4
+#define PVRSRV_BRIDGE_RI_RIDUMPLIST PVRSRV_BRIDGE_RI_CMD_FIRST+5
+#define PVRSRV_BRIDGE_RI_RIDUMPALL PVRSRV_BRIDGE_RI_CMD_FIRST+6
+#define PVRSRV_BRIDGE_RI_RIDUMPPROCESS PVRSRV_BRIDGE_RI_CMD_FIRST+7
+#define PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER PVRSRV_BRIDGE_RI_CMD_FIRST+8
+#define PVRSRV_BRIDGE_RI_CMD_LAST (PVRSRV_BRIDGE_RI_CMD_FIRST+8)
+
+
+/*******************************************
+ RIWritePMREntry
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY_TAG
+{
+ IMG_HANDLE hPMRHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY;
+
+/* Bridge out structure for RIWritePMREntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY;
+
+
+/*******************************************
+ RIWriteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hPMRHandle;
+ IMG_UINT32 ui32TextBSize;
+ const IMG_CHAR * puiTextB;
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_BOOL bIsImport;
+ IMG_BOOL bIsSuballoc;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY;
+
+/* Bridge out structure for RIWriteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY;
+
+
+/*******************************************
+ RIWriteProcListEntry
+ *******************************************/
+
+/* Bridge in structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_UINT32 ui32TextBSize;
+ const IMG_CHAR * puiTextB;
+ IMG_UINT64 ui64Size;
+ IMG_UINT64 ui64DevVAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY;
+
+/* Bridge out structure for RIWriteProcListEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY;
+
+
+/*******************************************
+ RIUpdateMEMDESCAddr
+ *******************************************/
+
+/* Bridge in structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR_TAG
+{
+ IMG_HANDLE hRIHandle;
+ IMG_DEV_VIRTADDR sAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR;
+
+/* Bridge out structure for RIUpdateMEMDESCAddr */
+typedef struct PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR;
+
+
+/*******************************************
+ RIDeleteMEMDESCEntry
+ *******************************************/
+
+/* Bridge in structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY_TAG
+{
+ IMG_HANDLE hRIHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY;
+
+/* Bridge out structure for RIDeleteMEMDESCEntry */
+typedef struct PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY;
+
+
+/*******************************************
+ RIDumpList
+ *******************************************/
+
+/* Bridge in structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPLIST_TAG
+{
+ IMG_HANDLE hPMRHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPLIST;
+
+/* Bridge out structure for RIDumpList */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPLIST_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPLIST;
+
+
+/*******************************************
+ RIDumpAll
+ *******************************************/
+
+/* Bridge in structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPALL_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPALL;
+
+/* Bridge out structure for RIDumpAll */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPALL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPALL;
+
+
+/*******************************************
+ RIDumpProcess
+ *******************************************/
+
+/* Bridge in structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_IN_RIDUMPPROCESS_TAG
+{
+ IMG_PID ui32Pid;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIDUMPPROCESS;
+
+/* Bridge out structure for RIDumpProcess */
+typedef struct PVRSRV_BRIDGE_OUT_RIDUMPPROCESS_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIDUMPPROCESS;
+
+
+/*******************************************
+ RIWritePMREntryWithOwner
+ *******************************************/
+
+/* Bridge in structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+ IMG_HANDLE hPMRHandle;
+ IMG_PID ui32Owner;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER;
+
+/* Bridge out structure for RIWritePMREntryWithOwner */
+typedef struct PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER;
+
+
+#endif /* COMMON_RI_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_srvcore_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_srvcore_bridge.h
new file mode 100644
index 00000000000000..a066764abd080c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_srvcore_bridge.h
@@ -0,0 +1,368 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SRVCORE_BRIDGE_H
+#define COMMON_SRVCORE_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pvrsrv_device_types.h"
+#include "cache_ops.h"
+
+
+#define PVRSRV_BRIDGE_SRVCORE_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SRVCORE_CONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SRVCORE_DISCONNECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SRVCORE_CMD_LAST (PVRSRV_BRIDGE_SRVCORE_CMD_FIRST+15)
+
+
+/*******************************************
+ Connect
+ *******************************************/
+
+/* Bridge in structure for Connect */
+typedef struct PVRSRV_BRIDGE_IN_CONNECT_TAG
+{
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32ClientBuildOptions;
+ IMG_UINT32 ui32ClientDDKVersion;
+ IMG_UINT32 ui32ClientDDKBuild;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_CONNECT;
+
+/* Bridge out structure for Connect */
+typedef struct PVRSRV_BRIDGE_OUT_CONNECT_TAG
+{
+ IMG_UINT8 ui8KernelArch;
+ IMG_UINT32 ui32CapabilityFlags;
+ IMG_UINT32 ui32PVRBridges;
+ IMG_UINT32 ui32RGXBridges;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_CONNECT;
+
+
+/*******************************************
+ Disconnect
+ *******************************************/
+
+/* Bridge in structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_IN_DISCONNECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DISCONNECT;
+
+/* Bridge out structure for Disconnect */
+typedef struct PVRSRV_BRIDGE_OUT_DISCONNECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DISCONNECT;
+
+
+/*******************************************
+ AcquireGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT;
+
+/* Bridge out structure for AcquireGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT;
+
+
+/*******************************************
+ ReleaseGlobalEventObject
+ *******************************************/
+
+/* Bridge in structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ IMG_HANDLE hGlobalEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT;
+
+/* Bridge out structure for ReleaseGlobalEventObject */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT;
+
+
+/*******************************************
+ EventObjectOpen
+ *******************************************/
+
+/* Bridge in structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hEventObject;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN;
+
+/* Bridge out structure for EventObjectOpen */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN_TAG
+{
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN;
+
+
+/*******************************************
+ EventObjectWait
+ *******************************************/
+
+/* Bridge in structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT;
+
+/* Bridge out structure for EventObjectWait */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT;
+
+
+/*******************************************
+ EventObjectClose
+ *******************************************/
+
+/* Bridge in structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE_TAG
+{
+ IMG_HANDLE hOSEventKM;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE;
+
+/* Bridge out structure for EventObjectClose */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE;
+
+
+/*******************************************
+ DumpDebugInfo
+ *******************************************/
+
+/* Bridge in structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_IN_DUMPDEBUGINFO_TAG
+{
+ IMG_UINT32 ui32ui32VerbLevel;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_DUMPDEBUGINFO;
+
+/* Bridge out structure for DumpDebugInfo */
+typedef struct PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO;
+
+
+/*******************************************
+ GetDevClockSpeed
+ *******************************************/
+
+/* Bridge in structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED;
+
+/* Bridge out structure for GetDevClockSpeed */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED_TAG
+{
+ IMG_UINT32 ui32ui32ClockSpeed;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED;
+
+
+/*******************************************
+ HWOpTimeout
+ *******************************************/
+
+/* Bridge in structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_IN_HWOPTIMEOUT_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_HWOPTIMEOUT;
+
+/* Bridge out structure for HWOpTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_HWOPTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_HWOPTIMEOUT;
+
+
+/*******************************************
+ AlignmentCheck
+ *******************************************/
+
+/* Bridge in structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_IN_ALIGNMENTCHECK_TAG
+{
+ IMG_UINT32 ui32AlignChecksSize;
+ IMG_UINT32 * pui32AlignChecks;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALIGNMENTCHECK;
+
+/* Bridge out structure for AlignmentCheck */
+typedef struct PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK;
+
+
+/*******************************************
+ GetDeviceStatus
+ *******************************************/
+
+/* Bridge in structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_IN_GETDEVICESTATUS_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_GETDEVICESTATUS;
+
+/* Bridge out structure for GetDeviceStatus */
+typedef struct PVRSRV_BRIDGE_OUT_GETDEVICESTATUS_TAG
+{
+ IMG_UINT32 ui32DeviceSatus;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_GETDEVICESTATUS;
+
+
+/*******************************************
+ EventObjectWaitTimeout
+ *******************************************/
+
+/* Bridge in structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ IMG_HANDLE hOSEventKM;
+ IMG_UINT64 ui64uiTimeoutus;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT;
+
+/* Bridge out structure for EventObjectWaitTimeout */
+typedef struct PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT;
+
+
+/*******************************************
+ FindProcessMemStats
+ *******************************************/
+
+/* Bridge in structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS_TAG
+{
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ArrSize;
+ IMG_BOOL bbAllProcessStats;
+ /* Output pointer pui32MemStatsArray is also an implied input */
+ IMG_UINT32 * pui32MemStatsArray;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS;
+
+/* Bridge out structure for FindProcessMemStats */
+typedef struct PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS_TAG
+{
+ IMG_UINT32 * pui32MemStatsArray;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS;
+
+
+/*******************************************
+ AcquireInfoPage
+ *******************************************/
+
+/* Bridge in structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE;
+
+/* Bridge out structure for AcquireInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE_TAG
+{
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE;
+
+
+/*******************************************
+ ReleaseInfoPage
+ *******************************************/
+
+/* Bridge in structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_IN_RELEASEINFOPAGE_TAG
+{
+ IMG_HANDLE hPMR;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RELEASEINFOPAGE;
+
+/* Bridge out structure for ReleaseInfoPage */
+typedef struct PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE;
+
+
+#endif /* COMMON_SRVCORE_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_sync_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_sync_bridge.h
new file mode 100644
index 00000000000000..0fb3019f929711
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_sync_bridge.h
@@ -0,0 +1,480 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNC_BRIDGE_H
+#define COMMON_SYNC_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "pdumpdefs.h"
+#include "devicemem_typedefs.h"
+#include "pvrsrv_sync_km.h"
+
+
+#define PVRSRV_BRIDGE_SYNC_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK PVRSRV_BRIDGE_SYNC_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+2
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET PVRSRV_BRIDGE_SYNC_CMD_FIRST+3
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC PVRSRV_BRIDGE_SYNC_CMD_FIRST+4
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE PVRSRV_BRIDGE_SYNC_CMD_FIRST+5
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP PVRSRV_BRIDGE_SYNC_CMD_FIRST+6
+#define PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS PVRSRV_BRIDGE_SYNC_CMD_FIRST+7
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE PVRSRV_BRIDGE_SYNC_CMD_FIRST+8
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE PVRSRV_BRIDGE_SYNC_CMD_FIRST+9
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY PVRSRV_BRIDGE_SYNC_CMD_FIRST+10
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE PVRSRV_BRIDGE_SYNC_CMD_FIRST+11
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY PVRSRV_BRIDGE_SYNC_CMD_FIRST+12
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP PVRSRV_BRIDGE_SYNC_CMD_FIRST+13
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE PVRSRV_BRIDGE_SYNC_CMD_FIRST+14
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+15
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL PVRSRV_BRIDGE_SYNC_CMD_FIRST+16
+#define PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP PVRSRV_BRIDGE_SYNC_CMD_FIRST+17
+#define PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+18
+#define PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT PVRSRV_BRIDGE_SYNC_CMD_FIRST+19
+#define PVRSRV_BRIDGE_SYNC_CMD_LAST (PVRSRV_BRIDGE_SYNC_CMD_FIRST+19)
+
+
+/*******************************************
+ AllocSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for AllocSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32SyncPrimVAddr;
+ IMG_UINT32 ui32SyncPrimBlockSize;
+ IMG_HANDLE hhSyncPMR;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+ FreeSyncPrimitiveBlock
+ *******************************************/
+
+/* Bridge in structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK;
+
+/* Bridge out structure for FreeSyncPrimitiveBlock */
+typedef struct PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK;
+
+
+/*******************************************
+ SyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMSET_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMSET;
+
+/* Bridge out structure for SyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMSET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMSET;
+
+
+/*******************************************
+ ServerSyncPrimSet
+ *******************************************/
+
+/* Bridge in structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET;
+
+/* Bridge out structure for ServerSyncPrimSet */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET;
+
+
+/*******************************************
+ ServerSyncAlloc
+ *******************************************/
+
+/* Bridge in structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCALLOC_TAG
+{
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCALLOC;
+
+/* Bridge out structure for ServerSyncAlloc */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32SyncPrimVAddr;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC;
+
+
+/*******************************************
+ ServerSyncFree
+ *******************************************/
+
+/* Bridge in structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCFREE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCFREE;
+
+/* Bridge out structure for ServerSyncFree */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCFREE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCFREE;
+
+
+/*******************************************
+ ServerSyncQueueHWOp
+ *******************************************/
+
+/* Bridge in structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_BOOL bbUpdate;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP;
+
+/* Bridge out structure for ServerSyncQueueHWOp */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP_TAG
+{
+ IMG_UINT32 ui32FenceValue;
+ IMG_UINT32 ui32UpdateValue;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP;
+
+
+/*******************************************
+ ServerSyncGetStatus
+ *******************************************/
+
+/* Bridge in structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS_TAG
+{
+ IMG_UINT32 ui32SyncCount;
+ IMG_HANDLE * phSyncHandle;
+ /* Output pointer pui32UID is also an implied input */
+ IMG_UINT32 * pui32UID;
+ /* Output pointer pui32FWAddr is also an implied input */
+ IMG_UINT32 * pui32FWAddr;
+ /* Output pointer pui32CurrentOp is also an implied input */
+ IMG_UINT32 * pui32CurrentOp;
+ /* Output pointer pui32NextOp is also an implied input */
+ IMG_UINT32 * pui32NextOp;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS;
+
+/* Bridge out structure for ServerSyncGetStatus */
+typedef struct PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS_TAG
+{
+ IMG_UINT32 * pui32UID;
+ IMG_UINT32 * pui32FWAddr;
+ IMG_UINT32 * pui32CurrentOp;
+ IMG_UINT32 * pui32NextOp;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS;
+
+
+/*******************************************
+ SyncPrimOpCreate
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE_TAG
+{
+ IMG_UINT32 ui32SyncBlockCount;
+ IMG_HANDLE * phBlockList;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 * pui32SyncBlockIndex;
+ IMG_UINT32 * pui32Index;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_HANDLE * phServerSync;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE;
+
+/* Bridge out structure for SyncPrimOpCreate */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE_TAG
+{
+ IMG_HANDLE hServerCookie;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE;
+
+
+/*******************************************
+ SyncPrimOpTake
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE_TAG
+{
+ IMG_HANDLE hServerCookie;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 * pui32Flags;
+ IMG_UINT32 * pui32FenceValue;
+ IMG_UINT32 * pui32UpdateValue;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 * pui32ServerFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE;
+
+/* Bridge out structure for SyncPrimOpTake */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE;
+
+
+/*******************************************
+ SyncPrimOpReady
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY;
+
+/* Bridge out structure for SyncPrimOpReady */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY_TAG
+{
+ IMG_BOOL bReady;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY;
+
+
+/*******************************************
+ SyncPrimOpComplete
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE;
+
+/* Bridge out structure for SyncPrimOpComplete */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE;
+
+
+/*******************************************
+ SyncPrimOpDestroy
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY_TAG
+{
+ IMG_HANDLE hServerCookie;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY;
+
+/* Bridge out structure for SyncPrimOpDestroy */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY;
+
+
+/*******************************************
+ SyncPrimPDump
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP;
+
+/* Bridge out structure for SyncPrimPDump */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP;
+
+
+/*******************************************
+ SyncPrimPDumpValue
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE;
+
+/* Bridge out structure for SyncPrimPDumpValue */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE;
+
+
+/*******************************************
+ SyncPrimPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Mask;
+ PDUMP_POLL_OPERATOR eOperator;
+ PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL;
+
+/* Bridge out structure for SyncPrimPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL;
+
+
+/*******************************************
+ SyncPrimOpPDumpPol
+ *******************************************/
+
+/* Bridge in structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL_TAG
+{
+ IMG_HANDLE hServerCookie;
+ PDUMP_POLL_OPERATOR eOperator;
+ PDUMP_FLAGS_T uiPDumpFlags;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL;
+
+/* Bridge out structure for SyncPrimOpPDumpPol */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL;
+
+
+/*******************************************
+ SyncPrimPDumpCBP
+ *******************************************/
+
+/* Bridge in structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP_TAG
+{
+ IMG_HANDLE hSyncHandle;
+ IMG_UINT32 ui32Offset;
+ IMG_DEVMEM_OFFSET_T uiWriteOffset;
+ IMG_DEVMEM_SIZE_T uiPacketSize;
+ IMG_DEVMEM_SIZE_T uiBufferSize;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP;
+
+/* Bridge out structure for SyncPrimPDumpCBP */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP;
+
+
+/*******************************************
+ SyncAllocEvent
+ *******************************************/
+
+/* Bridge in structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCALLOCEVENT_TAG
+{
+ IMG_BOOL bServerSync;
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCALLOCEVENT;
+
+/* Bridge out structure for SyncAllocEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT;
+
+
+/*******************************************
+ SyncFreeEvent
+ *******************************************/
+
+/* Bridge in structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_IN_SYNCFREEEVENT_TAG
+{
+ IMG_UINT32 ui32FWAddr;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCFREEEVENT;
+
+/* Bridge out structure for SyncFreeEvent */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCFREEEVENT_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCFREEEVENT;
+
+
+#endif /* COMMON_SYNC_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_synctracking_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_synctracking_bridge.h
new file mode 100644
index 00000000000000..17a2acf676de45
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_synctracking_bridge.h
@@ -0,0 +1,101 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_SYNCTRACKING_BRIDGE_H
+#define COMMON_SYNCTRACKING_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+0
+#define PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1
+#define PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST (PVRSRV_BRIDGE_SYNCTRACKING_CMD_FIRST+1)
+
+
+/*******************************************
+ SyncRecordRemoveByHandle
+ *******************************************/
+
+/* Bridge in structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ IMG_HANDLE hhRecord;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE;
+
+/* Bridge out structure for SyncRecordRemoveByHandle */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE;
+
+
+/*******************************************
+ SyncRecordAdd
+ *******************************************/
+
+/* Bridge in structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_IN_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhServerSyncPrimBlock;
+ IMG_UINT32 ui32ui32FwBlockAddr;
+ IMG_UINT32 ui32ui32SyncOffset;
+ IMG_BOOL bbServerSync;
+ IMG_UINT32 ui32ClassNameSize;
+ const IMG_CHAR * puiClassName;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_SYNCRECORDADD;
+
+/* Bridge out structure for SyncRecordAdd */
+typedef struct PVRSRV_BRIDGE_OUT_SYNCRECORDADD_TAG
+{
+ IMG_HANDLE hhRecord;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_SYNCRECORDADD;
+
+
+#endif /* COMMON_SYNCTRACKING_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/common_timerquery_bridge.h b/drivers/gpu/drm/img-rogue/1.10/common_timerquery_bridge.h
new file mode 100644
index 00000000000000..1ab379dee6771e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/common_timerquery_bridge.h
@@ -0,0 +1,135 @@
+/*******************************************************************************
+@File
+@Title Common bridge header for timerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declares common defines and structures used by both the client
+ and server side of the bridge for timerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*******************************************************************************/
+
+#ifndef COMMON_TIMERQUERY_BRIDGE_H
+#define COMMON_TIMERQUERY_BRIDGE_H
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "rgx_bridge.h"
+
+
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST 0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+0
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+1
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+2
+#define PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3
+#define PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST (PVRSRV_BRIDGE_TIMERQUERY_CMD_FIRST+3)
+
+
+/*******************************************
+ RGXBeginTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY;
+
+/* Bridge out structure for RGXBeginTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY;
+
+
+/*******************************************
+ RGXEndTimerQuery
+ *******************************************/
+
+/* Bridge in structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY;
+
+/* Bridge out structure for RGXEndTimerQuery */
+typedef struct PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY_TAG
+{
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY;
+
+
+/*******************************************
+ RGXQueryTimer
+ *******************************************/
+
+/* Bridge in structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_IN_RGXQUERYTIMER_TAG
+{
+ IMG_UINT32 ui32QueryId;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXQUERYTIMER;
+
+/* Bridge out structure for RGXQueryTimer */
+typedef struct PVRSRV_BRIDGE_OUT_RGXQUERYTIMER_TAG
+{
+ IMG_UINT64 ui64StartTime;
+ IMG_UINT64 ui64EndTime;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXQUERYTIMER;
+
+
+/*******************************************
+ RGXCurrentTime
+ *******************************************/
+
+/* Bridge in structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_IN_RGXCURRENTTIME_TAG
+{
+ IMG_UINT32 ui32EmptyStructPlaceholder;
+} __attribute__((packed)) PVRSRV_BRIDGE_IN_RGXCURRENTTIME;
+
+/* Bridge out structure for RGXCurrentTime */
+typedef struct PVRSRV_BRIDGE_OUT_RGXCURRENTTIME_TAG
+{
+ IMG_UINT64 ui64Time;
+ PVRSRV_ERROR eError;
+} __attribute__((packed)) PVRSRV_BRIDGE_OUT_RGXCURRENTTIME;
+
+
+#endif /* COMMON_TIMERQUERY_BRIDGE_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/config_kernel.h b/drivers/gpu/drm/img-rogue/1.10/config_kernel.h
new file mode 100644
index 00000000000000..1354cba9144143
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/config_kernel.h
@@ -0,0 +1,158 @@
+#define CHROMIUMOS_KERNEL
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES 8
+#define FIX_DUSTS_POW_ON_INIT
+#define GPUVIRT_VALIDATION_NUM_OS 8
+#define GPUVIRT_VALIDATION_NUM_REGIONS 2
+#define HWR_DEFAULT_ENABLED
+#define LINUX
+#define PDUMP_STREAMBUF_MAX_SIZE_MB 16
+#define PDVFS_COM PDVFS_COM_HOST
+#define PDVFS_COM_AP 2
+#define PDVFS_COM_HOST 1
+#define PDVFS_COM_PMC 3
+#define PVRSRV_APPHINT_ASSERTONHWRTRIGGER IMG_FALSE
+#define PVRSRV_APPHINT_ASSERTOUTOFMEMORY IMG_FALSE
+#define PVRSRV_APPHINT_BIFTILINGMODE 4
+#define PVRSRV_APPHINT_CACHEOPCONFIG 0
+#define PVRSRV_APPHINT_CACHEOPGFTHRESHOLDSIZE 0
+#define PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE 0
+#define PVRSRV_APPHINT_CHECKMLIST APPHNT_BLDVAR_DEBUG
+#define PVRSRV_APPHINT_CLEANUPTHREADPRIORITY 0
+#define PVRSRV_APPHINT_CLEANUPTHREADWEIGHT 0
+#define PVRSRV_APPHINT_DISABLECLOCKGATING 0
+#define PVRSRV_APPHINT_DISABLEDMOVERLAP 0
+#define PVRSRV_APPHINT_DISABLEFEDLOGGING IMG_FALSE
+#define PVRSRV_APPHINT_DISABLEPDUMPPANIC IMG_FALSE
+#define PVRSRV_APPHINT_DRIVERMODE 0x7FFFFFFF
+#define PVRSRV_APPHINT_DUSTREQUESTINJECT IMG_FALSE
+#define PVRSRV_APPHINT_EMUMAXFREQ 0
+#define PVRSRV_APPHINT_ENABLEAPM RGX_ACTIVEPM_DEFAULT
+#define PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE 0
+#define PVRSRV_APPHINT_ENABLEFTRACEGPU IMG_FALSE
+#define PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH RGXFWIF_INICFG_CTXSWITCH_DM_ALL
+#define PVRSRV_APPHINT_ENABLEHTBLOGGROUP 0
+#define PVRSRV_APPHINT_ENABLELOGGROUP RGXFWIF_LOG_TYPE_NONE
+#define PVRSRV_APPHINT_ENABLERDPOWERISLAND RGX_RD_POWER_ISLAND_DEFAULT
+#define PVRSRV_APPHINT_ENABLESIGNATURECHECKS APPHNT_BLDVAR_ENABLESIGNATURECHECKS
+#define PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG IMG_FALSE
+#define PVRSRV_APPHINT_FIRMWARELOGTYPE 0
+#define PVRSRV_APPHINT_FIRMWAREPERF FW_PERF_CONF_NONE
+#define PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN
+#define PVRSRV_APPHINT_FWPOISONONFREEVALUE 0xBD
+#define PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE 0x4000
+#define PVRSRV_APPHINT_GPIOVALIDATIONMODE 0
+#define PVRSRV_APPHINT_HTBOPERATIONMODE HTB_OPMODE_DROPOLDEST
+#define PVRSRV_APPHINT_HTBUFFERSIZE 64
+#define PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE 786432
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENRL 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES 0
+#define PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN 0
+#define PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER 0
+#define PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFFWFILTER 0
+#define PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB 2048
+#define PVRSRV_APPHINT_HWPERFHOSTFILTER 0
+#define PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT APPHNT_BLDVAR_DBGDUMPLIMIT
+#define PVRSRV_APPHINT_JONESDISABLEMASK 0
+#define PVRSRV_APPHINT_NEWFILTERINGMODE 1
+#define PVRSRV_APPHINT_OSIDREGION0MAX "0x3FFFFFFF 0x0FFFFFFF 0x17FFFFFF 0x1FFFFFFF 0x27FFFFFF 0x2FFFFFFF 0x37FFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION0MIN "0x00000000 0x04000000 0x10000000 0x18000000 0x20000000 0x28000000 0x30000000 0x38000000"
+#define PVRSRV_APPHINT_OSIDREGION1MAX "0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF 0x3FFFFFFF"
+#define PVRSRV_APPHINT_OSIDREGION1MIN "0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000 0x3F000000"
+#define PVRSRV_APPHINT_RGXBVNC ""
+#define PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE RGXFW_SIG_BUFFER_SIZE_MIN
+#define PVRSRV_APPHINT_TIMECORRCLOCK 0
+#define PVRSRV_APPHINT_TRUNCATEMODE 0
+#define PVRSRV_APPHINT_USEMETAT1 RGX_META_T1_OFF
+#define PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX
+#define PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY 0
+#define PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT 0
+#define PVRSRV_APPHINT_ZEROFREELIST IMG_FALSE
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD 90
+#define PVRSRV_ENABLE_PROCESS_STATS
+#define PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN 256
+#define PVRSRV_MODNAME "pvrsrvkm_1_10"
+#define PVRSRV_NEED_PVR_DPF
+#define PVRSRV_NEED_PVR_STACKTRACE_NATIVE
+#define PVRSRV_POISON_ON_ALLOC_VALUE 0xd9
+#define PVRSRV_POISON_ON_FREE_VALUE 0x63
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D 16
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC 13
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RTU 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA 15
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D 14
+#define PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D 14
+#define PVRSRV_VZ_NUM_OSID
+#define PVRSYNC_MODNAME "pvr_sync"
+#define PVR_BUILD_DIR "mt8173_linux"
+#define PVR_DIRTY_BYTES_FLUSH_THRESHOLD 524288
+#define PVR_DRM_NAME "pvr"
+#define PVR_DVFS
+#define PVR_GPIO_MODE PVR_GPIO_MODE_GENERAL
+#define PVR_GPIO_MODE_GENERAL 1
+#define PVR_GPIO_MODE_POWMON_PIN 2
+#define PVR_GPIO_MODE_POWMON_WO_PIN 3
+#define PVR_LDM_DRIVER_REGISTRATION_NAME "pvrsrvkm_1_10"
+#define PVR_LDM_PLATFORM_PRE_REGISTERED
+#define PVR_LINUX_BLOB_CACHE_SIZE_MEGABYTES 20
+#define PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD 256
+#define PVR_LINUX_KMALLOC_ALLOCATION_THRESHOLD 16384
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 2
+#define PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES 20480
+#define PVR_LINUX_PHYSMEM_MAX_POOL_PAGES 10240
+#define RGX_BNC_CONFIG_KM_HEADER "configs/rgxconfig_km_4.V.2.51.h"
+#define RGX_BVNC_CORE_KM_HEADER "cores/rgxcore_km_4.40.2.51.h"
+#define RGX_FW_FILENAME "rgx.fw"
+#define RGX_FW_HEAP_SHIFT 25
+#define SOC_TIMER_FREQ 20
+#define SUPPORT_BUFFER_SYNC 1
+#define SUPPORT_DBGDRV_EVENT_OBJECTS
+#define SUPPORT_GPUTRACE_EVENTS
+#define SUPPORT_LINUX_X86_PAT
+#define SUPPORT_LINUX_X86_WRITECOMBINE
+#define SUPPORT_PERCONTEXT_FREELIST
+#define SUPPORT_RGX 1
+#define SUPPORT_VDM_CONTEXT_STORE_BUFFER_AB
+#define CACHEFLUSH_NO_KMRBF_USING_UMVA 1
+#ifdef CONFIG_DRM_POWERVR_ROGUE_DEBUG
+#define DEBUG
+#define DEBUG_BRIDGE_KM
+#define DEBUG_HANDLEALLOC_KM
+#define DEBUG_LINUX_MEMORY_ALLOCATIONS
+#define DEBUG_LINUX_MEM_AREAS
+#define DEBUG_LINUX_MMAP_AREAS
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_TRUE
+#define PVRSRV_DEBUG_HANDLE_LOCK
+#define PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE
+#define PVRSRV_ENABLE_FULL_SYNC_TRACKING
+#define PVRSRV_ENABLE_SYNC_POISONING
+#define PVR_BUILD_TYPE "debug"
+#define PVR_RI_DEBUG
+#define RGXFW_ALIGNCHECKS
+#define SUPPORT_DEVICEMEMHISTORY_BRIDGE
+#define SUPPORT_PAGE_FAULT_DEBUG
+#define SUPPORT_SYNCTRACKING_BRIDGE
+#define PVR_ANNOTATION_MAX_LEN 96
+#else
+#define PVR_ANNOTATION_MAX_LEN 40
+#define PVRSRV_APPHINT_ENABLEFWPOISONONFREE IMG_FALSE
+#define PVR_BUILD_TYPE "release"
+#define RELEASE
+#endif
+#ifdef CONFIG_DRM_POWERVR_ROGUE_PDUMP
+#define PDUMP
+#define PVRSRV_USE_BRIDGE_LOCK
+#undef PVR_ANNOTATION_MAX_LEN
+#define PVR_ANNOTATION_MAX_LEN 96
+#define SUPPORT_SERVER_SYNC
+#else
+#define PVRSRV_SYNC_SEPARATE_TIMELINES
+#define PVR_USE_FENCE_SYNC_MODEL 1
+#define SUPPORT_MMU_PENDING_FAULT_PROTECTION
+#define SUPPORT_NATIVE_FENCE_SYNC
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/config_kernel.mk b/drivers/gpu/drm/img-rogue/1.10/config_kernel.mk
new file mode 100644
index 00000000000000..30a4f7232cc014
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/config_kernel.mk
@@ -0,0 +1,47 @@
+override CHROMIUMOS_KERNEL := 1
+override KERNEL_DRIVER_DIR := drivers/gpu/drm/img-rogue/1.10
+override METAG_VERSION_NEEDED := 2.8.1.0.3
+override MIPS_VERSION_NEEDED := 2014.07-1
+override PDVFS_COM := PDVFS_COM_HOST
+override PDVFS_COM_AP := 2
+override PDVFS_COM_HOST := 1
+override PDVFS_COM_PMC := 3
+override PVRSRV_MODNAME := pvrsrvkm_1_10
+override PVRSYNC_MODNAME := pvr_sync
+override PVR_BUILD_DIR := mt8173_linux
+override PVR_DVFS := 1
+override PVR_GPIO_MODE := PVR_GPIO_MODE_GENERAL
+override PVR_GPIO_MODE_GENERAL := 1
+override PVR_GPIO_MODE_POWMON_PIN := 2
+override PVR_GPIO_MODE_POWMON_WO_PIN := 3
+override PVR_HANDLE_BACKEND := idr
+override PVR_SYSTEM := mt8173
+override RGX_TIMECORR_CLOCK := mono
+override SUPPORT_BUFFER_SYNC := 1
+override SUPPORT_GPUTRACE_EVENTS := 1
+override SUPPORT_RGX := 1
+override TARGET_OS :=
+override VMM_TYPE := stub
+override undefine SUPPORT_DISPLAY_CLASS
+ifeq ($(CONFIG_DRM_POWERVR_ROGUE_DEBUG),y)
+override BUILD := debug
+override PVR_BUILD_TYPE := debug
+override PVR_RI_DEBUG := 1
+override PVR_SERVICES_DEBUG := 1
+override SUPPORT_DEVICEMEMHISTORY_BRIDGE := 1
+override SUPPORT_PAGE_FAULT_DEBUG := 1
+override SUPPORT_SYNCTRACKING_BRIDGE := 1
+else
+override BUILD := release
+override PVR_BUILD_TYPE := release
+endif
+ifeq ($(CONFIG_DRM_POWERVR_ROGUE_PDUMP),y)
+override EXTRA_PVRSRVKM_COMPONENTS := dbgdrv
+override PDUMP := 1
+override SUPPORT_SERVER_SYNC := 1
+override undefine SUPPORT_FALLBACK_FENCE_SYNC
+override undefine SUPPORT_NATIVE_FENCE_SYNC
+else
+override PVR_USE_FENCE_SYNC_MODEL := 1
+override SUPPORT_NATIVE_FENCE_SYNC := 1
+endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/configs/rgxconfig_km_4.V.2.51.h b/drivers/gpu/drm/img-rogue/1.10/configs/rgxconfig_km_4.V.2.51.h
new file mode 100644
index 00000000000000..cd49551f6e66ec
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/configs/rgxconfig_km_4.V.2.51.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@Title RGX Config BVNC 4.V.2.51
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXCONFIG_KM_4_V_2_51_H
+#define RGXCONFIG_KM_4_V_2_51_H
+
+/***** Automatically generated file (10/10/2018 09:01:01): Do not edit manually ********************/
+/***** Timestamp: (10/10/2018 09:01:01)************************************************************/
+
+#define RGX_BNC_KM_B 4
+#define RGX_BNC_KM_N 2
+#define RGX_BNC_KM_C 51
+
+/******************************************************************************
+ * DDK Defines
+ *****************************************************************************/
+#define RGX_FEATURE_NUM_CLUSTERS (2)
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES (8)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (128*1024)
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS (512)
+#define RGX_FEATURE_META (LTP218)
+#define RGX_FEATURE_META_COREMEM_SIZE (32)
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS (40)
+#define RGX_FEATURE_PERFBUS
+#define RGX_FEATURE_GS_RTA_SUPPORT
+#define RGX_FEATURE_COMPUTE
+#define RGX_FEATURE_PERF_COUNTER_BATCH
+#define RGX_FEATURE_DYNAMIC_DUST_POWER
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE
+#define RGX_FEATURE_COMPUTE_OVERLAP
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS
+#define RGX_FEATURE_FBCDC_ARCHITECTURE (2)
+#define RGX_FEATURE_FBCDC_ALGORITHM (2)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE
+#define RGX_FEATURE_TLA
+#define RGX_FEATURE_CLUSTER_GROUPING
+#define RGX_FEATURE_AXI_ACELITE
+#define RGX_FEATURE_PHYS_BUS_WIDTH (40)
+
+
+#endif /* RGXCONFIG_4_V_2_51_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/connection_server.c b/drivers/gpu/drm/img-rogue/1.10/connection_server.c
new file mode 100644
index 00000000000000..d7522742c7d7ff
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/connection_server.c
@@ -0,0 +1,509 @@
+/*************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Handles connections coming from the client and the management
+ connection based information
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "handle.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "sync_server.h"
+#include "process_stats.h"
+#include "pdump_km.h"
+#include "lists.h"
+#include "osfunc.h"
+#include "tlstream.h"
+
+/* PID associated with Connection currently being purged by Cleanup thread */
+static IMG_PID gCurrentPurgeConnectionPid;
+
+static PVRSRV_ERROR ConnectionDataDestroy(CONNECTION_DATA *psConnection)
+{
+ PVRSRV_ERROR eError;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ IMG_UINT64 ui64MaxBridgeTime;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if(psPVRSRVData->bUnload)
+ {
+ /* driver is unloading so do not allow the bridge lock to be released */
+ ui64MaxBridgeTime = 0;
+ }
+ else
+ {
+ ui64MaxBridgeTime = CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS;
+ }
+
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ConnectionDestroy: Missing connection!"));
+ PVR_ASSERT(0);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Close HWPerfClient stream here even though we created it in
+ * PVRSRVConnectKM(). */
+ if (psConnection->hClientTLStream)
+ {
+ TLStreamClose(psConnection->hClientTLStream);
+ psConnection->hClientTLStream = NULL;
+ PVR_DPF((PVR_DBG_MESSAGE, "Destroyed private stream."));
+ }
+
+ /* Get process handle base to decrement the refcount */
+ psProcessHandleBase = psConnection->psProcessHandleBase;
+
+ if (psProcessHandleBase != NULL)
+ {
+ /* acquire the lock now to ensure unref and removal from the
+ * hash table is atomic.
+ * if the refcount becomes zero then the lock needs to be held
+ * until the entry is removed from the hash table.
+ */
+ OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+
+ /* In case the refcount becomes 0 we can remove the process handle base */
+ if (OSAtomicDecrement(&psProcessHandleBase->iRefCount) == 0)
+ {
+ uintptr_t uiHashValue;
+
+ uiHashValue = HASH_Remove(psPVRSRVData->psProcessHandleBase_Table, psConnection->pid);
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+ if (!uiHashValue)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to remove handle base from hash table.",
+ __func__));
+ return PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE;
+ }
+
+ eError = PVRSRVFreeKernelHandles(psProcessHandleBase->psHandleBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ConnectionDataDestroy: Couldn't free kernel handles for process (%d)",
+ eError));
+
+ return eError;
+ }
+
+ eError = PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, ui64MaxBridgeTime);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ConnectionDataDestroy: Couldn't free handle base for process (%d)",
+ eError));
+ }
+
+ return eError;
+ }
+
+ OSFreeMem(psProcessHandleBase);
+ }
+ else
+ {
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+ }
+
+ psConnection->psProcessHandleBase = NULL;
+ }
+
+ /* Free handle base for this connection */
+ if (psConnection->psHandleBase != NULL)
+ {
+ eError = PVRSRVFreeHandleBase(psConnection->psHandleBase, ui64MaxBridgeTime);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ConnectionDataDestroy: Couldn't free handle base for connection (%d)",
+ eError));
+ }
+
+ return eError;
+ }
+
+ psConnection->psHandleBase = NULL;
+ }
+
+ if (psConnection->psSyncConnectionData != NULL)
+ {
+ SyncUnregisterConnection(psConnection->psSyncConnectionData);
+ psConnection->psSyncConnectionData = NULL;
+ }
+
+ if (psConnection->psPDumpConnectionData != NULL)
+ {
+ PDumpUnregisterConnection(psConnection->psPDumpConnectionData);
+ psConnection->psPDumpConnectionData = NULL;
+ }
+
+ /* Call environment specific connection data deinit function */
+ if (psConnection->hOsPrivateData != NULL)
+ {
+ eError = OSConnectionPrivateDataDeInit(psConnection->hOsPrivateData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionDataDestroy: OSConnectionPrivateDataDeInit failed (%d)",
+ eError));
+
+ return eError;
+ }
+
+ psConnection->hOsPrivateData = NULL;
+ }
+
+ /* Close the PID stats entry as late as possible to catch all frees */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (psConnection->hProcessStats != NULL)
+ {
+ PVRSRVStatsDeregisterProcess(psConnection->hProcessStats);
+ psConnection->hProcessStats = NULL;
+ }
+#endif
+
+ OSFreeMemNoStats(psConnection);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData)
+{
+ CONNECTION_DATA *psConnection;
+ PVRSRV_ERROR eError;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Allocate connection data area, no stats since process not registered yet */
+ psConnection = OSAllocZMemNoStats(sizeof(*psConnection));
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't allocate connection data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Allocate process statistics as early as possible to catch all allocs */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ eError = PVRSRVStatsRegisterProcess(&psConnection->hProcessStats);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register process statistics (%d)",
+ eError));
+ goto failure;
+ }
+#endif
+
+ /* Call environment specific connection data init function */
+ eError = OSConnectionPrivateDataInit(&psConnection->hOsPrivateData, pvOSData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: OSConnectionPrivateDataInit failed (%d)",
+ eError));
+ goto failure;
+ }
+
+ psConnection->pid = OSGetCurrentClientProcessIDKM();
+ OSStringLCopy(psConnection->pszProcName, OSGetCurrentClientProcessNameKM(), PVRSRV_CONNECTION_PROCESS_NAME_LEN);
+
+ /* Register this connection with the sync core */
+ eError = SyncRegisterConnection(&psConnection->psSyncConnectionData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register the sync data"));
+ goto failure;
+ }
+
+ /*
+ * Register this connection with the pdump core. Pass in the sync connection data
+ * as it will be needed later when we only get passed in the PDump connection data.
+ */
+ eError = PDumpRegisterConnection(psConnection->psSyncConnectionData,
+ &psConnection->psPDumpConnectionData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't register the PDump data"));
+ goto failure;
+ }
+
+ /* Allocate handle base for this connection */
+ eError = PVRSRVAllocHandleBase(&psConnection->psHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVConnectionConnect: Couldn't allocate handle base for connection (%d)",
+ eError));
+ goto failure;
+ }
+
+ /* Try to get process handle base if it already exists */
+ OSLockAcquire(psPVRSRVData->hProcessHandleBase_Lock);
+ psProcessHandleBase = (PROCESS_HANDLE_BASE*) HASH_Retrieve(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+ psConnection->pid);
+
+ /* In case there is none we are going to allocate one */
+ if (psProcessHandleBase == NULL)
+ {
+ psProcessHandleBase = OSAllocZMem(sizeof(PROCESS_HANDLE_BASE));
+ if (psProcessHandleBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate handle base, oom.",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failureLock;
+ }
+
+ /* Allocate handle base for this process */
+ eError = PVRSRVAllocHandleBase(&psProcessHandleBase->psHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_PROCESS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Couldn't allocate handle base for process (%d)",
+ __func__,
+ eError));
+ OSFreeMem(psProcessHandleBase);
+ goto failureLock;
+ }
+
+ /* Insert the handle base into the global hash table */
+ if (!HASH_Insert(PVRSRVGetPVRSRVData()->psProcessHandleBase_Table,
+ psConnection->pid,
+ (uintptr_t) psProcessHandleBase))
+ {
+
+ eError = PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE;
+
+ PVRSRVFreeHandleBase(psProcessHandleBase->psHandleBase, 0);
+
+ OSFreeMem(psProcessHandleBase);
+ goto failureLock;
+ }
+ }
+ OSAtomicIncrement(&psProcessHandleBase->iRefCount);
+
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+
+ OSLockAcquire(psPVRSRVData->hConnectionsLock);
+ dllist_add_to_tail(&psPVRSRVData->sConnections, &psConnection->sConnectionListNode);
+ OSLockRelease(psPVRSRVData->hConnectionsLock);
+
+ psConnection->psProcessHandleBase = psProcessHandleBase;
+
+ *ppvPrivData = psConnection;
+
+ return eError;
+
+failureLock:
+ OSLockRelease(psPVRSRVData->hProcessHandleBase_Lock);
+failure:
+ ConnectionDataDestroy(psConnection);
+
+ return eError;
+}
+
+static PVRSRV_ERROR _CleanupThreadPurgeConnectionData(void *pvConnectionData)
+{
+ PVRSRV_ERROR eErrorConnection, eErrorKernel;
+ CONNECTION_DATA *psConnectionData = pvConnectionData;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+
+ gCurrentPurgeConnectionPid = psConnectionData->pid;
+
+ eErrorConnection = ConnectionDataDestroy(psConnectionData);
+ if (eErrorConnection != PVRSRV_OK)
+ {
+ if (eErrorConnection == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "_CleanupThreadPurgeConnectionData: Failed to purge connection data %p "
+ "(deferring destruction)",
+ psConnectionData));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "_CleanupThreadPurgeConnectionData: Connection data %p deferred destruction finished",
+ psConnectionData));
+ }
+
+ /* Check if possible resize the global handle base */
+ eErrorKernel = PVRSRVPurgeHandles(KERNEL_HANDLE_BASE);
+ if (eErrorKernel != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_CleanupThreadPurgeConnectionData: Purge of global handle pool failed (%d)",
+ eErrorKernel));
+ }
+
+ gCurrentPurgeConnectionPid = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+
+ return eErrorConnection;
+}
+
+void PVRSRVConnectionDisconnect(void *pvDataPtr)
+{
+ CONNECTION_DATA *psConnectionData = pvDataPtr;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ OSLockAcquire(psPVRSRVData->hConnectionsLock);
+ dllist_remove_node(&psConnectionData->sConnectionListNode);
+ OSLockRelease(psPVRSRVData->hConnectionsLock);
+
+ /* Notify the PDump core if the pdump control client is disconnecting */
+ if (psConnectionData->ui32ClientFlags & SRV_FLAGS_PDUMPCTRL)
+ {
+ PDumpDisconnectionNotify();
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+ {
+ /* Defer the release of the connection data */
+ psConnectionData->sCleanupThreadFn.pfnFree = _CleanupThreadPurgeConnectionData;
+ psConnectionData->sCleanupThreadFn.pvData = psConnectionData;
+ psConnectionData->sCleanupThreadFn.bDependsOnHW = IMG_FALSE;
+ CLEANUP_THREAD_SET_RETRY_COUNT(&psConnectionData->sCleanupThreadFn,
+ CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+ PVRSRVCleanupThreadAddWork(&psConnectionData->sCleanupThreadFn);
+ }
+}
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void)
+{
+ return gCurrentPurgeConnectionPid;
+}
+
+/* Prefix for debug messages about Active Connections */
+#define ACTIVE_PREFIX "Active connects:"
+
+void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PDLLIST_NODE pNext, pNode;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ OSLockAcquire(psPVRSRVData->hConnectionsLock);
+ if (dllist_is_empty(&psPVRSRVData->sConnections))
+ {
+ PVR_DUMPDEBUG_LOG(ACTIVE_PREFIX " No active connections");
+ }
+ else
+ {
+#define MAX_DEBUG_DUMP_STRING_LEN 150
+#define MAX_DEBUG_DUMP_CONNECTION_STR_LEN 26
+ IMG_CHAR sActiveConnections[MAX_DEBUG_DUMP_STRING_LEN];
+ IMG_UINT16 i, uiPos = 0;
+ IMG_BOOL bPrinted = IMG_FALSE;
+ size_t uiSize = sizeof (sActiveConnections);
+
+ OSStringLCopy(sActiveConnections, ACTIVE_PREFIX, uiSize);
+ uiPos = sizeof (ACTIVE_PREFIX) - 1; /* Next buffer location to fill */
+ uiSize -= uiPos; /* Remaining space to use in sActiveConnections[] */
+
+ dllist_foreach_node(&psPVRSRVData->sConnections, pNode, pNext)
+ {
+ CONNECTION_DATA *sData = IMG_CONTAINER_OF(pNode, CONNECTION_DATA, sConnectionListNode);
+
+ IMG_CHAR sTmpBuff[MAX_DEBUG_DUMP_CONNECTION_STR_LEN];
+ i = OSSNPrintf(sTmpBuff, MAX_DEBUG_DUMP_CONNECTION_STR_LEN, " %d (%s),", sData->pid, sData->pszProcName);
+ i = MIN(MAX_DEBUG_DUMP_CONNECTION_STR_LEN, i);
+ bPrinted = IMG_FALSE;
+
+ OSStringLCopy(sActiveConnections+uiPos, sTmpBuff, uiSize);
+
+ // Move the write offset to the end of the current string
+ uiPos += i;
+ // Update the amount of remaining space available to copy into
+ uiSize -= i;
+
+ // If there is not enough space to add another connection to this line, output the line
+ if (uiSize <= MAX_DEBUG_DUMP_CONNECTION_STR_LEN)
+ {
+ PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+
+ /*
+ * Remove the "Active connects:" prefix from the buffer.
+ * Leave the subsequent buffer contents indented by the same
+ * amount to aid in interpreting the debug output.
+ */
+ OSCachedMemSet(sActiveConnections, ' ', sizeof (ACTIVE_PREFIX));
+ uiPos = sizeof (ACTIVE_PREFIX) - 1;
+ // Reset the amount of space available to copy into
+ uiSize = MAX_DEBUG_DUMP_STRING_LEN - uiPos;
+ bPrinted = IMG_TRUE;
+ }
+ }
+
+ // Only print the current line if it hasn't already been printed
+ if (!bPrinted)
+ {
+ // Strip of the final comma
+ sActiveConnections[OSStringNLength(sActiveConnections, MAX_DEBUG_DUMP_STRING_LEN) - 1] = '\0';
+ PVR_DUMPDEBUG_LOG("%s", sActiveConnections);
+ }
+#undef MAX_DEBUG_DUMP_STRING_LEN
+#undef MAX_DEBUG_DUMP_CONNECTIONS_PER_LINE
+ }
+ OSLockRelease(psPVRSRVData->hConnectionsLock);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/connection_server.h b/drivers/gpu/drm/img-rogue/1.10/connection_server.h
new file mode 100644
index 00000000000000..c42552eedbde3f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/connection_server.h
@@ -0,0 +1,123 @@
+/**************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description API for server side connection management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_CONNECTION_SERVER_H_)
+#define _CONNECTION_SERVER_H_
+
+
+#include "img_types.h"
+#include "handle.h"
+#include "pvrsrv_cleanup.h"
+
+/* Variable used to hold in memory the timeout for the current time slice*/
+extern IMG_UINT64 gui64TimesliceLimit;
+/* Counter number of handle data freed during the current time slice */
+extern IMG_UINT32 gui32HandleDataFreeCounter;
+/* Set the maximum time the freeing of the resources can keep the lock */
+#define CONNECTION_DEFERRED_CLEANUP_TIMESLICE_NS 3000 * 1000 /* 3ms */
+
+typedef struct _CONNECTION_DATA_
+{
+ PVRSRV_HANDLE_BASE *psHandleBase;
+ PROCESS_HANDLE_BASE *psProcessHandleBase;
+ struct _SYNC_CONNECTION_DATA_ *psSyncConnectionData;
+ struct _PDUMP_CONNECTION_DATA_ *psPDumpConnectionData;
+
+ /* Holds the client flags supplied at connection time */
+ IMG_UINT32 ui32ClientFlags;
+
+ /*
+ * OS specific data can be stored via this handle.
+ * See osconnection_server.h for a generic mechanism
+ * for initialising this field.
+ */
+ IMG_HANDLE hOsPrivateData;
+
+#define PVRSRV_CONNECTION_PROCESS_NAME_LEN (16)
+ IMG_PID pid;
+ IMG_CHAR pszProcName[PVRSRV_CONNECTION_PROCESS_NAME_LEN];
+
+ IMG_HANDLE hProcessStats;
+
+ IMG_HANDLE hClientTLStream;
+
+ /* Structure which is hooked into the cleanup thread work list */
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+
+ DLLIST_NODE sConnectionListNode;
+
+ /* List navigation for deferred freeing of connection data */
+ struct _CONNECTION_DATA_ **ppsThis;
+ struct _CONNECTION_DATA_ *psNext;
+} CONNECTION_DATA;
+
+#include "osconnection_server.h"
+
+PVRSRV_ERROR PVRSRVConnectionConnect(void **ppvPrivData, void *pvOSData);
+void PVRSRVConnectionDisconnect(void *pvPrivData);
+
+IMG_PID PVRSRVGetPurgeConnectionPid(void);
+
+void PVRSRVConnectionDebugNotify(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVConnectionPrivateData)
+#endif
+static INLINE
+IMG_HANDLE PVRSRVConnectionPrivateData(CONNECTION_DATA *psConnection)
+{
+ return (psConnection != NULL) ? psConnection->hOsPrivateData : NULL;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVGetDevData)
+#endif
+static INLINE
+PVRSRV_DEVICE_NODE * PVRSRVGetDevData(CONNECTION_DATA *psConnection)
+{
+ return OSGetDevData(psConnection);
+}
+
+#endif /* !defined(_CONNECTION_SERVER_H_) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/cores/rgxcore_km_4.40.2.51.h b/drivers/gpu/drm/img-rogue/1.10/cores/rgxcore_km_4.40.2.51.h
new file mode 100644
index 00000000000000..a85ac3500e6199
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/cores/rgxcore_km_4.40.2.51.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@Title RGX Core BVNC 4.40.2.51
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXCORE_KM_4_40_2_51_H_
+#define _RGXCORE_KM_4_40_2_51_H_
+
+/***** Automatically generated file (06/02/2017 07:01:06): Do not edit manually ********************/
+/***** Timestamp: (06/02/2017 07:01:06)************************************************************/
+/***** CS: @3254374 ******************************************************************/
+
+
+/******************************************************************************
+ * BVNC = 4.40.2.51
+ *****************************************************************************/
+#define RGX_BVNC_KM_B 4
+#define RGX_BVNC_KM_V 40
+#define RGX_BVNC_KM_N 2
+#define RGX_BVNC_KM_C 51
+
+/******************************************************************************
+ * Errata
+ *****************************************************************************/
+
+#define FIX_HW_BRN_50767
+#define FIX_HW_BRN_54441
+#define FIX_HW_BRN_57193
+#define FIX_HW_BRN_63142
+
+
+
+/******************************************************************************
+ * Enhancements
+ *****************************************************************************/
+#define HW_ERN_36400
+#define HW_ERN_42290
+#define HW_ERN_42606
+
+
+
+#endif /* _RGXCORE_KM_4_40_2_51_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/dbgdriv.c b/drivers/gpu/drm/img-rogue/1.10/dbgdriv.c
new file mode 100644
index 00000000000000..b2f8a6509ceba0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dbgdriv.c
@@ -0,0 +1,1562 @@
+/*************************************************************************/ /*!
+@File
+@Title Debug Driver
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description 32 Bit kernel mode debug driver
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma warning(disable:4201)
+#pragma warning(disable:4214)
+#pragma warning(disable:4115)
+#pragma warning(disable:4514)
+
+
+#include <ntddk.h>
+#include <windef.h>
+#include <winerror.h>
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <linux/string.h>
+#endif
+
+#if defined (__QNXNTO__) || defined (INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma warning(default:4214)
+#pragma warning(default:4115)
+#endif /* _WIN32 */
+
+
+/******************************************************************************
+ Types
+******************************************************************************/
+
+/*
+ Per-buffer control structure.
+*/
+typedef struct _DBG_STREAM_
+{
+ struct _DBG_STREAM_* psNext;
+ struct _DBG_STREAM_* psInitStream;
+ struct _DBG_STREAM_* psDeinitStream;
+ IMG_UINT32 ui32Flags; /*!< flags (see DEBUG_FLAGS) */
+ void *pvBase;
+ IMG_UINT32 ui32Size;
+ IMG_UINT32 ui32RPtr;
+ IMG_UINT32 ui32WPtr;
+
+ IMG_UINT32 ui32Marker; /*!< Size marker for file splitting */
+
+ IMG_UINT32 ui32InitPhaseWOff; /*!< snapshot offset for init phase end for follow-on pdump */
+
+ IMG_CHAR szName[DEBUG_STREAM_NAME_MAX]; /* Give this a size, some compilers don't like [] */
+} DBG_STREAM;
+
+/* Check 4xDBG_STREAM will fit in one page */
+static_assert((sizeof(DBG_STREAM) * 4) < HOST_PAGESIZE, "DBG_STREAM is too large");
+
+/******************************************************************************
+ Global variables
+******************************************************************************/
+
+static PDBG_STREAM g_psStreamList;
+
+/* Mutex used to prevent UM threads (via the dbgdrv ioctl interface) and KM
+ * threads (from pvrsrvkm via the ExtDBG API) entering the debug driver core
+ * and changing the state of share data at the same time.
+ */
+void * g_pvAPIMutex=NULL;
+
+static IMG_UINT32 g_PDumpCurrentFrameNo = 0;
+
+DBGKM_SERVICE_TABLE g_sDBGKMServices =
+{
+ sizeof (DBGKM_SERVICE_TABLE),
+ ExtDBGDrivCreateStream,
+ ExtDBGDrivDestroyStream,
+ ExtDBGDrivWrite2,
+ ExtDBGDrivSetMarker,
+ ExtDBGDrivGetMarker,
+ ExtDBGDrivWaitForEvent,
+ ExtDBGDrivGetCtrlState,
+ ExtDBGDrivSetFrame
+};
+
+
+/***************************************************************************
+ Forward declarations
+***************************************************************************/
+
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+void IMG_CALLCONV DBGDrivSetCaptureMode(PDBG_STREAM psStream,IMG_UINT32 ui32Mode,IMG_UINT32 ui32Start,IMG_UINT32 ui32Stop,IMG_UINT32 ui32SampleRate);
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+void IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void);
+void IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame);
+void DestroyAllStreams(void);
+
+/* Static function declarations */
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream);
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize);
+static void InvalidateAllStreams(void);
+
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*!
+ @name ExtDBGDrivCreateStream
+ */
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit)
+{
+ IMG_BOOL pvRet;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivCreateStream(pszName, ui32Flags, ui32Size, phInit, phMain, phDeinit);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+/*!
+ @name ExtDBGDrivDestroyStream
+ */
+void IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivDestroyStream(hInit, hMain, hDeinit);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+/*!
+ @name ExtDBGDrivFindStream
+ */
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ void * pvRet;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ pvRet=DBGDrivFindStream(pszName, bResetStream);
+ if (pvRet == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ExtDBGDrivFindStream: Stream not found"));
+ }
+
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return pvRet;
+}
+
+/*!
+ @name ExtDBGDrivRead
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Ret;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivRead(psStream, ui32BufID, ui32OutBuffSize, pui8OutBuf);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+/*!
+ @name ExtDBGDrivWrite2
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT32 ui32Ret;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Ret=DBGDrivWrite2(psStream, pui8InBuf, ui32InBuffSize);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Ret;
+}
+
+/*!
+ @name ExtDBGDrivSetMarker
+ */
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetMarker(psStream, ui32Marker);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+/*!
+ @name ExtDBGDrivGetMarker
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Marker;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Marker = DBGDrivGetMarker(psStream);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Marker;
+}
+
+/*!
+ @name ExtDBGDrivWaitForEvent
+ */
+void IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ DBGDrivWaitForEvent(eEvent);
+#else /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+ PVR_UNREFERENCED_PARAMETER(eEvent); /* PRQA S 3358 */
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+}
+
+
+/*!
+ @name ExtDBGDrivGetCtrlState
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+ IMG_UINT32 ui32State = 0;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32State = DBGDrivGetCtrlState(psStream, ui32StateID);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32State;
+}
+
+/*!
+ @name ExtDBGDrivGetFrame
+ */
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void)
+{
+ IMG_UINT32 ui32Frame = 0;
+
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ ui32Frame = DBGDrivGetFrame();
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return ui32Frame;
+}
+
+/*!
+ @name ExtDBGDrivGetCtrlState
+ */
+void IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+ /* Acquire API Mutex */
+ HostAquireMutex(g_pvAPIMutex);
+
+ DBGDrivSetFrame(ui32Frame);
+
+ /* Release API Mutex */
+ HostReleaseMutex(g_pvAPIMutex);
+
+ return;
+}
+
+
+
+/*!****************************************************************************
+ @name AtoI
+ @brief Returns the integer value of a decimal string
+ @param szIn - String with hexadecimal value
+ @return IMG_UINT32 integer value, 0 if string is null or not valid
+ Based on Max`s one, now copes with (only) hex ui32ords, upper or lower case a-f.
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn)
+{
+ IMG_INT iLen = 0;
+ IMG_UINT32 ui32Value = 0;
+ IMG_UINT32 ui32Digit=1;
+ IMG_UINT32 ui32Base=10;
+ IMG_INT iPos;
+ IMG_CHAR bc;
+
+ /* get len of string */
+ while (szIn[iLen] > 0)
+ {
+ iLen ++;
+ }
+
+ /* nothing to do */
+ if (iLen == 0)
+ {
+ return (0);
+ }
+
+ /* See if we have an 'x' or 'X' before the number to make it a hex number */
+ iPos=0;
+ while (szIn[iPos] == '0')
+ {
+ iPos++;
+ }
+ if (szIn[iPos] == '\0')
+ {
+ return 0;
+ }
+ if (szIn[iPos] == 'x' || szIn[iPos] == 'X')
+ {
+ ui32Base=16;
+ szIn[iPos]='0';
+ }
+
+ /* go through string from right (least significant) to left */
+ for (iPos = iLen - 1; iPos >= 0; iPos --)
+ {
+ bc = szIn[iPos];
+
+ if ( (bc >= 'a') && (bc <= 'f') && ui32Base == 16) /* handle lower case a-f */
+ {
+ bc -= 'a' - 0xa;
+ }
+ else
+ if ( (bc >= 'A') && (bc <= 'F') && ui32Base == 16) /* handle upper case A-F */
+ {
+ bc -= 'A' - 0xa;
+ }
+ else
+ if ((bc >= '0') && (bc <= '9')) /* if char out of range, return 0 */
+ {
+ bc -= '0';
+ }
+ else
+ return (0);
+
+ ui32Value += (IMG_UINT32)bc * ui32Digit;
+
+ ui32Digit = ui32Digit * ui32Base;
+ }
+ return (ui32Value);
+}
+
+
+/*!****************************************************************************
+ @name StreamValid
+ @brief Validates supplied debug buffer.
+ @param psStream - debug stream
+ @return true if valid
+*****************************************************************************/
+static IMG_BOOL StreamValid(PDBG_STREAM psStream)
+{
+ PDBG_STREAM psThis;
+
+ psThis = g_psStreamList;
+
+ while (psThis)
+ {
+ if (psStream && ((psThis == psStream) ||
+ (psThis->psInitStream == psStream) ||
+ (psThis->psDeinitStream == psStream)) )
+ {
+ return(IMG_TRUE);
+ }
+ else
+ {
+ psThis = psThis->psNext;
+ }
+ }
+
+ return(IMG_FALSE);
+}
+
+
+/*!****************************************************************************
+ @name StreamValidForRead
+ @brief Validates supplied debug buffer for read op.
+ @param psStream - debug stream
+ @return true if readable
+*****************************************************************************/
+static IMG_BOOL StreamValidForRead(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->ui32Flags & DEBUG_FLAGS_WRITEONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name StreamValidForWrite
+ @brief Validates supplied debug buffer for write op.
+ @param psStream - debug stream
+ @return true if writable
+*****************************************************************************/
+static IMG_BOOL StreamValidForWrite(PDBG_STREAM psStream)
+{
+ if( StreamValid(psStream) &&
+ ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) == 0) )
+ {
+ return(IMG_TRUE);
+ }
+
+ return(IMG_FALSE);
+}
+
+/*!****************************************************************************
+ @name Write
+ @brief Copies data from a buffer into selected stream. Stream size is fixed.
+ @param psStream - stream for output
+ @param pui8Data - input buffer
+ @param ui32InBuffSize - size of input
+ @return none
+*****************************************************************************/
+static void Write(PDBG_STREAM psStream,IMG_PUINT8 pui8Data,IMG_UINT32 ui32InBuffSize)
+{
+ /*
+ Split copy into two bits as necessary (if we're allowed to wrap).
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) == 0)
+ {
+ PVR_ASSERT( (psStream->ui32WPtr + ui32InBuffSize) < psStream->ui32Size );
+ }
+
+ if ((psStream->ui32WPtr + ui32InBuffSize) > psStream->ui32Size)
+ {
+ /* Yes we need two bits, calculate their sizes */
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32WPtr;
+ IMG_UINT32 ui32B2 = ui32InBuffSize - ui32B1;
+
+ /* Copy first block to current location */
+ HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+ (void *) pui8Data,
+ ui32B1);
+
+ /* Copy second block to start of buffer */
+ HostMemCopy(psStream->pvBase,
+ (void *)(pui8Data + ui32B1),
+ ui32B2);
+
+ /* Set pointer to be the new end point */
+ psStream->ui32WPtr = ui32B2;
+ }
+ else
+ { /* Can fit block in single chunk */
+ HostMemCopy((void *)((uintptr_t)psStream->pvBase + psStream->ui32WPtr),
+ (void *) pui8Data,
+ ui32InBuffSize);
+
+ psStream->ui32WPtr += ui32InBuffSize;
+
+ if (psStream->ui32WPtr == psStream->ui32Size)
+ {
+ psStream->ui32WPtr = 0;
+ }
+ }
+}
+
+
+/*!****************************************************************************
+ @name WriteExpandingBuffer
+ @brief Copies data from a buffer into selected stream. Stream size may be expandable.
+ @param psStream - stream for output
+ @param pui8InBuf - input buffer
+ @param ui32InBuffSize - size of input
+ @return bytes copied
+*****************************************************************************/
+static IMG_UINT32 WriteExpandingBuffer(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+ IMG_UINT ui32Space;
+
+ /*
+ How much space have we got in the buffer ?
+ */
+ ui32Space = SpaceInStream(psStream);
+
+ /*
+ Check if we can expand the buffer
+ */
+ if (psStream->ui32Flags & DEBUG_FLAGS_NO_BUF_EXPANDSION)
+ {
+ /*
+ Don't do anything if we've got less that 32 ui8tes of space and
+ we're not allowing expansion of buffer space...
+ */
+ if (ui32Space < 32)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is full and isn't expandable", psStream));
+ return(0);
+ }
+ }
+ else
+ {
+ if ((ui32Space < 32) || (ui32Space <= (ui32InBuffSize + 4)))
+ {
+ IMG_UINT32 ui32NewBufSize;
+
+ /*
+ Find new buffer size, double the current size or increase by 1MB
+ */
+ ui32NewBufSize = MIN(psStream->ui32Size<<1,psStream->ui32Size+(1<<20));
+ ui32NewBufSize = MIN(ui32NewBufSize, PDUMP_STREAMBUF_MAX_SIZE_MB<<20);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanding buffer size = %x, new size = %x",
+ psStream->ui32Size, ui32NewBufSize));
+
+ if (ui32InBuffSize > psStream->ui32Size)
+ {
+ ui32NewBufSize += ui32InBuffSize;
+ PVR_DPF((PVR_DBG_ERROR, "WriteExpandingBuffer: buffer %p is expanding by size of input buffer %u", psStream, ui32NewBufSize));
+ }
+
+ /*
+ Attempt to expand the buffer
+ */
+ if ((ui32NewBufSize < psStream->ui32Size) ||
+ !ExpandStreamBuffer(psStream,ui32NewBufSize))
+ {
+ if (ui32Space < 32)
+ {
+ if((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ return(0);
+ }
+ else
+ {
+ /* out of memory */
+ PVR_LOG(("DBGDRV: Error: unable to expand %p stream. Out of PDump memory, InvalidateAllStreams() called", psStream));
+ InvalidateAllStreams();
+ return (0xFFFFFFFFUL);
+ }
+ }
+ }
+
+ /*
+ Recalc the space in the buffer
+ */
+ ui32Space = SpaceInStream(psStream);
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Expanded buffer, free space = %x",
+ ui32Space));
+ }
+ }
+
+ /*
+ Only copy what we can..
+ */
+ if (ui32Space <= (ui32InBuffSize + 4))
+ {
+ ui32InBuffSize = ui32Space - 4;
+ }
+
+ /*
+ Write the stuff...
+ */
+ Write(psStream,pui8InBuf,ui32InBuffSize);
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ if (ui32InBuffSize)
+ {
+ HostSignalEvent(DBG_EVENT_STREAM_DATA);
+ }
+#endif
+ return(ui32InBuffSize);
+}
+
+/*****************************************************************************
+******************************************************************************
+******************************************************************************
+ THE ACTUAL FUNCTIONS
+******************************************************************************
+******************************************************************************
+*****************************************************************************/
+
+static void DBGDrivSetStreamName(PDBG_STREAM psStream,
+ IMG_CHAR* pszBase,
+ IMG_CHAR* pszExt)
+{
+ IMG_CHAR* pCh = psStream->szName;
+ IMG_CHAR* pChEnd = psStream->szName+DEBUG_STREAM_NAME_MAX-8;
+ IMG_CHAR* pSrcCh;
+ IMG_CHAR* pSrcChEnd;
+
+ for (pSrcCh = pszBase, pSrcChEnd = pszBase+strlen(pszBase);
+ (pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+ pSrcCh++, pCh++)
+ {
+ *pCh = *pSrcCh;
+ }
+
+ for (pSrcCh = pszExt, pSrcChEnd = pszExt+strlen(pszExt);
+ (pSrcCh < pSrcChEnd) && (pCh < pChEnd) ;
+ pSrcCh++, pCh++)
+ {
+ *pCh = *pSrcCh;
+ }
+
+ *pCh = '\0';
+}
+
+/*!****************************************************************************
+ @name DBGDrivCreateStream
+ @brief Creates a pdump/debug stream
+ @param pszName - stream name
+ @param ui32Flags - output flags, text stream bit is set for pdumping
+ @param ui32Size - size of stream buffer in pages
+ @return none
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32Size,
+ IMG_HANDLE* phInit,
+ IMG_HANDLE* phMain,
+ IMG_HANDLE* phDeinit)
+{
+ IMG_BOOL bUseNonPagedMem4Buffers = ((ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0);
+ PDBG_STREAM psStream = NULL;
+ PDBG_STREAM psInitStream = NULL;
+ PDBG_STREAM psStreamDeinit = NULL;
+ void* pvBase = NULL;
+
+ /*
+ If we already have a buffer using this name just return
+ its handle.
+ */
+ psStream = (PDBG_STREAM) DBGDrivFindStream(pszName, IMG_FALSE);
+ if (psStream)
+ {
+ *phInit = psStream->psInitStream;
+ *phMain = psStream;
+ *phDeinit = psStream->psDeinitStream;
+ return IMG_TRUE;
+ }
+
+ /*
+ Allocate memory for control structures
+ */
+ psStream = HostNonPageablePageAlloc(1);
+ if (!psStream)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc control structs\n\r"));
+ goto errCleanup;
+ }
+ psInitStream = psStream+1;
+ psStreamDeinit = psStream+2;
+
+
+ /* Allocate memory for Main buffer */
+ psStream->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc Stream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /*
+ Setup debug buffer state.
+ */
+ psStream->psNext = NULL;
+ psStream->pvBase = pvBase;
+ psStream->ui32Flags = ui32Flags | DEBUG_FLAGS_CIRCULAR;
+ psStream->ui32Size = ui32Size * HOST_PAGESIZE;
+ psStream->ui32RPtr = 0;
+ psStream->ui32WPtr = 0;
+ psStream->ui32Marker = 0;
+ psStream->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psStream, pszName, "");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStream->szName));
+
+ /* Allocate memory for Init buffer */
+ psInitStream->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(ui32Size);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(ui32Size);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc InitStream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /* Initialise the stream for the Init phase */
+ psInitStream->psNext = psInitStream->psInitStream = psInitStream->psDeinitStream = NULL;
+ psInitStream->ui32Flags = ui32Flags;
+ psInitStream->pvBase = pvBase;
+ psInitStream->ui32Size = ui32Size * HOST_PAGESIZE;
+ psInitStream->ui32RPtr = 0;
+ psInitStream->ui32WPtr = 0;
+ psInitStream->ui32Marker = 0;
+ psInitStream->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psInitStream, pszName, "_Init");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with init name (%s)\n\r", psInitStream->szName));
+
+ psStream->psInitStream = psInitStream;
+
+ /* Allocate memory for Deinit buffer */
+ psStreamDeinit->pvBase = NULL;
+ if (bUseNonPagedMem4Buffers)
+ {
+ pvBase = HostNonPageablePageAlloc(1);
+ }
+ else
+ {
+ pvBase = HostPageablePageAlloc(1);
+ }
+
+ if (!pvBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DBGDriv: Couldn't alloc DeinitStream buffer\n\r"));
+ goto errCleanup;
+ }
+
+ /* Initialise the stream for the Deinit phase */
+ psStreamDeinit->psNext = psStreamDeinit->psInitStream = psStreamDeinit->psDeinitStream = NULL;
+ psStreamDeinit->pvBase = pvBase;
+ psStreamDeinit->ui32Flags = ui32Flags;
+ psStreamDeinit->ui32Size = HOST_PAGESIZE;
+ psStreamDeinit->ui32RPtr = 0;
+ psStreamDeinit->ui32WPtr = 0;
+ psStreamDeinit->ui32Marker = 0;
+ psStreamDeinit->ui32InitPhaseWOff = 0;
+ DBGDrivSetStreamName(psStreamDeinit, pszName, "_Deinit");
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Created stream with deinit name (%s)\n\r", psStreamDeinit->szName));
+
+ psStream->psDeinitStream = psStreamDeinit;
+
+ /*
+ Insert into list.
+ */
+ psStream->psNext = g_psStreamList;
+ g_psStreamList = psStream;
+
+ AddSIDEntry(psStream);
+
+ *phInit = psStream->psInitStream;
+ *phMain = psStream;
+ *phDeinit = psStream->psDeinitStream;
+
+ return IMG_TRUE;
+
+errCleanup:
+ if (bUseNonPagedMem4Buffers)
+ {
+ if (psStream) HostNonPageablePageFree(psStream->pvBase);
+ if (psInitStream) HostNonPageablePageFree(psInitStream->pvBase);
+ if (psStreamDeinit) HostNonPageablePageFree(psStreamDeinit->pvBase);
+ }
+ else
+ {
+ if (psStream) HostPageablePageFree(psStream->pvBase);
+ if (psInitStream) HostPageablePageFree(psInitStream->pvBase);
+ if (psStreamDeinit) HostPageablePageFree(psStreamDeinit->pvBase);
+ }
+ HostNonPageablePageFree(psStream);
+ psStream = psInitStream = psStreamDeinit = NULL;
+ return IMG_FALSE;
+}
+
+/*!****************************************************************************
+ @name DBGDrivDestroyStream
+ @brief Delete a stream and free its memory
+ @param psStream - stream to be removed
+ @return none
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit)
+{
+ PDBG_STREAM psStreamInit = (PDBG_STREAM) hInit;
+ PDBG_STREAM psStream = (PDBG_STREAM) hMain;
+ PDBG_STREAM psStreamDeinit = (PDBG_STREAM) hDeinit;
+ PDBG_STREAM psStreamThis;
+ PDBG_STREAM psStreamPrev;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDriv: Destroying stream %s\r\n", psStream->szName ));
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ RemoveSIDEntry(psStream);
+
+ /*
+ Remove from linked list.
+ */
+ psStreamThis = g_psStreamList;
+ psStreamPrev = NULL;
+
+ while (psStreamThis)
+ {
+ if (psStreamThis == psStream)
+ {
+ if (psStreamPrev)
+ {
+ psStreamPrev->psNext = psStreamThis->psNext;
+ }
+ else
+ {
+ g_psStreamList = psStreamThis->psNext;
+ }
+
+ psStreamThis = NULL;
+ }
+ else
+ {
+ psStreamPrev = psStreamThis;
+ psStreamThis = psStreamThis->psNext;
+ }
+ }
+
+ /*
+ And free its memory.
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ HostNonPageablePageFree(psStreamInit->pvBase);
+ HostNonPageablePageFree(psStreamDeinit->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ HostPageablePageFree(psStreamInit->pvBase);
+ HostPageablePageFree(psStreamDeinit->pvBase);
+ }
+
+ /* Free the shared page used for the three stream tuple */
+ HostNonPageablePageFree(psStream);
+ psStream = psStreamInit = psStreamDeinit = NULL;
+
+ if (g_psStreamList == NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"DBGDriv: Stream list now empty" ));
+ }
+
+ return;
+}
+
+/*!****************************************************************************
+ @name DBGDrivFindStream
+ @brief Finds/resets a named stream
+ @param pszName - stream name
+ @param bResetStream - whether to reset the stream, e.g. to end pdump init phase
+ @return none
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream)
+{
+ PDBG_STREAM psStream = NULL;
+ PDBG_STREAM psThis;
+ IMG_UINT32 ui32Off;
+ IMG_BOOL bAreSame;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "PDump client connecting to %s %s",
+ pszName,
+ (bResetStream == IMG_TRUE) ? "with reset" : "no reset"));
+
+ /*
+ Scan buffer names for supplied one.
+ */
+ for (psThis = g_psStreamList; psThis != NULL; psThis = psThis->psNext)
+ {
+ bAreSame = IMG_TRUE;
+ ui32Off = 0;
+
+ if (strlen(psThis->szName) == strlen(pszName))
+ {
+ while ((ui32Off < DEBUG_STREAM_NAME_MAX) && (psThis->szName[ui32Off] != 0) && (pszName[ui32Off] != 0) && bAreSame)
+ {
+ if (psThis->szName[ui32Off] != pszName[ui32Off])
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ ui32Off++;
+ }
+ }
+ else
+ {
+ bAreSame = IMG_FALSE;
+ }
+
+ if (bAreSame)
+ {
+ psStream = psThis;
+ break;
+ }
+ }
+
+ if(psStream)
+ {
+ psStream->psInitStream->ui32RPtr = 0;
+ psStream->psDeinitStream->ui32RPtr = 0;
+ psStream->ui32RPtr = 0;
+ if (bResetStream)
+ {
+ /* This will erase any data written to the main stream
+ * before the client starts. */
+ psStream->ui32WPtr = 0;
+ }
+ psStream->ui32Marker = psStream->psInitStream->ui32Marker = 0;
+
+
+ /* mark init stream to prevent further reading by pdump client */
+ /* Check for possible race condition */
+ psStream->psInitStream->ui32InitPhaseWOff = psStream->psInitStream->ui32WPtr;
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Set %s client marker bo %x",
+ psStream->szName,
+ psStream->psInitStream->ui32InitPhaseWOff));
+ }
+
+ return((void *) psStream);
+}
+
+static void IMG_CALLCONV DBGDrivInvalidateStream(PDBG_STREAM psStream)
+{
+ IMG_CHAR pszErrorMsg[] = "**OUTOFMEM\n";
+ IMG_UINT32 ui32Space;
+ IMG_UINT32 ui32Off = 0;
+ IMG_UINT32 ui32WPtr = psStream->ui32WPtr;
+ IMG_PUINT8 pui8Buffer = (IMG_UINT8 *) psStream->pvBase;
+
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: An error occurred for stream %s", psStream->szName ));
+
+ /*
+ Validate buffer.
+ */
+ /*
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+*/
+ /* Write what we can of the error message */
+ ui32Space = SpaceInStream(psStream);
+
+ /* Make sure there's space for termination character */
+ if(ui32Space > 0)
+ {
+ ui32Space--;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivInvalidateStream: Buffer full."));
+ }
+
+ while((pszErrorMsg[ui32Off] != 0) && (ui32Off < ui32Space))
+ {
+ pui8Buffer[ui32WPtr] = (IMG_UINT8)pszErrorMsg[ui32Off];
+ ui32Off++;
+ ui32WPtr++;
+ }
+ pui8Buffer[ui32WPtr++] = '\0';
+ psStream->ui32WPtr = ui32WPtr;
+
+ /* Buffer will accept no more params from Services/client driver */
+ psStream->ui32Flags |= DEBUG_FLAGS_READONLY;
+}
+
+/*!****************************************************************************
+ @name InvalidateAllStreams
+ @brief invalidate all streams in list
+ @return none
+*****************************************************************************/
+static void InvalidateAllStreams(void)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ while (psStream != NULL)
+ {
+ DBGDrivInvalidateStream(psStream);
+ DBGDrivInvalidateStream(psStream->psInitStream);
+ DBGDrivInvalidateStream(psStream->psDeinitStream);
+ psStream = psStream->psNext;
+ }
+ return;
+}
+
+/*!****************************************************************************
+ @name DBGDrivWrite2
+ @brief Copies data from a buffer into selected (expandable) stream.
+ @param psStream - stream for output
+ @param pui8InBuf - input buffer
+ @param ui32InBuffSize - size of input
+ @return bytes copied, 0 if recoverable error, -1 if unrecoverable error
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 * pui8InBuf,IMG_UINT32 ui32InBuffSize)
+{
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValidForWrite(psStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivWrite2: stream not valid"));
+ return(0xFFFFFFFFUL);
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Recv(exp) %d b for %s: Roff = %x, WOff = %x",
+ ui32InBuffSize,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ return( WriteExpandingBuffer(psStream, pui8InBuf, ui32InBuffSize) );
+}
+
+/*!****************************************************************************
+ @name DBGDrivRead
+ @brief Read from debug driver buffers
+ @param psMainStream - stream
+ @param ui32BufID - on of the DEBUG_READ_BUFID flags to indicate which buffer
+ @param ui32OutBuffSize - available space in client buffer
+ @param pui8OutBuf - output buffer
+ @return bytes read, 0 if failure occurred
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psMainStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 * pui8OutBuf)
+{
+ IMG_UINT32 ui32Data;
+ DBG_STREAM *psStream;
+
+ /*
+ Validate buffer.
+ */
+ if (!StreamValidForRead(psMainStream))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivRead: buffer %p is invalid", psMainStream));
+ return(0);
+ }
+
+ if(ui32BufID == DEBUG_READ_BUFID_INIT)
+ {
+ psStream = psMainStream->psInitStream;
+ }
+ else if (ui32BufID == DEBUG_READ_BUFID_DEINIT)
+ {
+ psStream = psMainStream->psDeinitStream;
+ }
+ else
+ {
+ psStream = psMainStream;
+ }
+
+ /* Don't read beyond the init phase marker point */
+ if (psStream->ui32RPtr == psStream->ui32WPtr ||
+ ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32RPtr >= psStream->ui32InitPhaseWOff)) )
+ {
+ return(0);
+ }
+
+ /*
+ Get amount of data in buffer.
+ */
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ ui32Data = psStream->ui32WPtr - psStream->ui32RPtr;
+ }
+ else
+ {
+ ui32Data = psStream->ui32WPtr + (psStream->ui32Size - psStream->ui32RPtr);
+ }
+
+ /*
+ Don't read beyond the init phase marker point
+ */
+ if ((psStream->ui32InitPhaseWOff > 0) &&
+ (psStream->ui32InitPhaseWOff < psStream->ui32WPtr))
+ {
+ ui32Data = psStream->ui32InitPhaseWOff - psStream->ui32RPtr;
+ }
+
+ /*
+ Only transfer what target buffer can handle.
+ */
+ if (ui32Data > ui32OutBuffSize)
+ {
+ ui32Data = ui32OutBuffSize;
+ }
+
+ PVR_DPF((PVR_DBGDRIV_MESSAGE, "Send %x b from %s: Roff = %x, WOff = %x",
+ ui32Data,
+ psStream->szName,
+ psStream->ui32RPtr,
+ psStream->ui32WPtr));
+
+ /*
+ Split copy into two bits or one depending on W/R position.
+ */
+ if ((psStream->ui32RPtr + ui32Data) > psStream->ui32Size)
+ { /* Calc block 1 and block 2 sizes */
+ IMG_UINT32 ui32B1 = psStream->ui32Size - psStream->ui32RPtr;
+ IMG_UINT32 ui32B2 = ui32Data - ui32B1;
+
+ /* Copy up to end of circular buffer */
+ HostMemCopy((void *) pui8OutBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32B1);
+
+ /* Copy from start of circular buffer */
+ HostMemCopy((void *)(pui8OutBuf + ui32B1),
+ psStream->pvBase,
+ ui32B2);
+
+ /* Update read pointer now that we've copied the data out */
+ psStream->ui32RPtr = ui32B2;
+ }
+ else
+ { /* Copy data from wherever */
+ HostMemCopy((void *) pui8OutBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32Data);
+
+ /* Update read pointer now that we've copied the data out */
+ psStream->ui32RPtr += ui32Data;
+
+ /* Check for wrapping */
+ if ((psStream->ui32RPtr != psStream->ui32WPtr) &&
+ (psStream->ui32RPtr >= psStream->ui32Size))
+ {
+ psStream->ui32RPtr = 0;
+ }
+ }
+
+ return(ui32Data);
+}
+
+/*!****************************************************************************
+ @name DBGDrivSetMarker
+ @brief Sets the marker in the stream to split output files
+ @param psStream, ui32Marker
+ @return nothing
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker)
+{
+ /*
+ Validate buffer
+ */
+ if (!StreamValid(psStream))
+ {
+ return;
+ }
+
+ /* Called by PDump client to reset the marker to zero after a file split */
+ if ((ui32Marker == 0) && (psStream->ui32Marker == 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: [%s] Client resetting marker that is already zero!", psStream->szName));
+ }
+ /* Called by pvrsrvkm to set the marker to signal a file split is required */
+ if ((ui32Marker != 0) && (psStream->ui32Marker != 0))
+ {
+ /* In this case a previous split request is still outstanding. The
+ * client has not yet actioned and acknowledged the previous
+ * marker. This may be an error if the client does not catch-up and
+ * the stream's written data is allowed to pass the max file
+ * size again. If this happens the PDump is invalid as the offsets
+ * from the script file will be incorrect.
+ */
+ PVR_DPF((PVR_DBG_ERROR, "DBGDrivSetMarker: [%s] Server setting marker that is already set (%d)!", psStream->szName, psStream->ui32Marker));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DBGDrivSetMarker: [%s] Setting stream split marker to %d (was %d)", psStream->szName, ui32Marker, psStream->ui32Marker));
+ }
+
+ psStream->ui32Marker = ui32Marker;
+}
+
+/*!****************************************************************************
+ @name DBGDrivGetMarker
+ @brief Gets the marker in the stream to split output files
+ @param psStream - stream
+ @return marker offset
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream)
+{
+ /*
+ Validate buffer
+ */
+ if (!StreamValid(psStream))
+ {
+ return 0;
+ }
+
+ return psStream->ui32Marker;
+}
+
+/*!****************************************************************************
+ @name DBGDrivGetServiceTable
+ @brief get jump table for Services driver
+ @return pointer to jump table
+*****************************************************************************/
+void * IMG_CALLCONV DBGDrivGetServiceTable(void)
+{
+ return &g_sDBGKMServices;
+}
+
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+/*!****************************************************************************
+ @name DBGDrivWaitForEvent
+ @brief waits for an event
+ @param eEvent - debug driver event
+ @return void
+*****************************************************************************/
+void IMG_CALLCONV DBGDrivWaitForEvent(DBG_EVENT eEvent)
+{
+ HostWaitForEvent(eEvent);
+}
+#endif
+
+/*!****************************************************************************
+ @name DBGDrivGetCtrlState
+ @brief Gets a state value from the debug driver or stream
+ @param psStream - stream
+ @param ui32StateID - state ID
+ @return Nothing
+*****************************************************************************/
+IMG_UINT32 IMG_CALLCONV DBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID)
+{
+ /* Validate buffer */
+ if (!StreamValid(psStream))
+ {
+ return (0xFFFFFFFF);
+ }
+
+ /* Retrieve the state asked for */
+ switch (ui32StateID)
+ {
+ case DBG_GET_STATE_FLAG_IS_READONLY:
+ return ((psStream->ui32Flags & DEBUG_FLAGS_READONLY) != 0);
+
+ case 0xFE: /* Dump the current stream state */
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+ psStream, psStream->szName, psStream->ui32Flags));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->pvBase( %p ) psStream->ui32Size( %u )",
+ psStream->pvBase, psStream->ui32Size));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->ui32RPtr( %u ) psStream->ui32WPtr( %u )",
+ psStream->ui32RPtr, psStream->ui32WPtr));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: psStream->ui32Marker( %u ) psStream->ui32InitPhaseWOff( %u )",
+ psStream->ui32Marker, psStream->ui32InitPhaseWOff));
+ if (psStream->psInitStream)
+ {
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream( %p ) ( -- %s -- ) ui32Flags( %x )",
+ psStream->psInitStream, psStream->psInitStream->szName, psStream->ui32Flags));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->pvBase( %p ) psInitStream->ui32Size( %u )",
+ psStream->psInitStream->pvBase, psStream->psInitStream->ui32Size));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->ui32RPtr( %u ) psInitStream->ui32WPtr( %u )",
+ psStream->psInitStream->ui32RPtr, psStream->psInitStream->ui32WPtr));
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "-------- PDUMP DBGDriv: psInitStream->ui32Marker( %u ) psInitStream->ui32InitPhaseWOff( %u ) ",
+ psStream->psInitStream->ui32Marker, psStream->psInitStream->ui32InitPhaseWOff));
+ }
+
+ break;
+
+ case 0xFF: /* Dump driver state not in a stream */
+ {
+ PVR_DPF((PVR_DBG_CALLTRACE,
+ "------ PDUMP DBGDriv: g_psStreamList( head %p ) g_pvAPIMutex( %p ) g_PDumpCurrentFrameNo( %u )",
+ g_psStreamList, g_pvAPIMutex, g_PDumpCurrentFrameNo));
+ }
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ }
+
+ return (0xFFFFFFFF);
+}
+
+IMG_UINT32 IMG_CALLCONV DBGDrivGetFrame(void)
+{
+ return g_PDumpCurrentFrameNo;
+}
+
+void IMG_CALLCONV DBGDrivSetFrame(IMG_UINT32 ui32Frame)
+{
+ g_PDumpCurrentFrameNo = ui32Frame;
+}
+
+
+/*!****************************************************************************
+ @name ExpandStreamBuffer
+ @brief allocates a new buffer when the current one is full
+ @param psStream - stream
+ @param ui32NewSize - new size
+ @return IMG_TRUE - if allocation succeeded, IMG_FALSE - if not
+*****************************************************************************/
+static IMG_BOOL ExpandStreamBuffer(PDBG_STREAM psStream, IMG_UINT32 ui32NewSize)
+{
+ void * pvNewBuf;
+ IMG_UINT32 ui32NewSizeInPages;
+ IMG_UINT32 ui32NewWOffset;
+ IMG_UINT32 ui32NewROffset;
+ IMG_UINT32 ui32SpaceInOldBuf;
+
+ /*
+ First check new size is bigger than existing size
+ */
+ if (psStream->ui32Size >= ui32NewSize)
+ {
+ return IMG_FALSE;
+ }
+
+ /*
+ Calc space in old buffer
+ */
+ ui32SpaceInOldBuf = SpaceInStream(psStream);
+
+ /*
+ Allocate new buffer
+ */
+ ui32NewSizeInPages = ((ui32NewSize + 0xfffUL) & ~0xfffUL) / HOST_PAGESIZE;
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ pvNewBuf = HostNonPageablePageAlloc(ui32NewSizeInPages);
+ }
+ else
+ {
+ pvNewBuf = HostPageablePageAlloc(ui32NewSizeInPages);
+ }
+
+ if (pvNewBuf == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ /*
+ Copy over old buffer to new one, we place data at start of buffer
+ even if Read offset is not at start of buffer
+ */
+ if (psStream->ui32RPtr <= psStream->ui32WPtr)
+ {
+ /*
+ No wrapping of data so copy data to start of new buffer
+ */
+ HostMemCopy(pvNewBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ psStream->ui32WPtr - psStream->ui32RPtr);
+ }
+ else
+ {
+ IMG_UINT32 ui32FirstCopySize;
+
+ /*
+ The data has wrapped around the buffer, copy beginning of buffer first
+ */
+ ui32FirstCopySize = psStream->ui32Size - psStream->ui32RPtr;
+
+ HostMemCopy(pvNewBuf,
+ (void *)((uintptr_t)psStream->pvBase + psStream->ui32RPtr),
+ ui32FirstCopySize);
+
+ /*
+ Now second half
+ */
+ HostMemCopy((void *)((uintptr_t)pvNewBuf + ui32FirstCopySize),
+ (void *)(IMG_PBYTE)psStream->pvBase,
+ psStream->ui32WPtr);
+ }
+ ui32NewROffset = 0;
+ }
+ else
+ {
+ /* Copy everything in the old buffer to the new one */
+ HostMemCopy(pvNewBuf, psStream->pvBase, psStream->ui32WPtr);
+ ui32NewROffset = psStream->ui32RPtr;
+ }
+
+ /*
+ New Write offset is at end of data
+ */
+ ui32NewWOffset = psStream->ui32Size - ui32SpaceInOldBuf;
+
+ /*
+ Free old buffer
+ */
+ if ((psStream->ui32Flags & DEBUG_FLAGS_USE_NONPAGED_MEM) != 0)
+ {
+ HostNonPageablePageFree(psStream->pvBase);
+ }
+ else
+ {
+ HostPageablePageFree(psStream->pvBase);
+ }
+
+ /*
+ Now set new params up
+ */
+ psStream->pvBase = pvNewBuf;
+ psStream->ui32RPtr = ui32NewROffset;
+ psStream->ui32WPtr = ui32NewWOffset;
+ psStream->ui32Size = ui32NewSizeInPages * HOST_PAGESIZE;
+
+ return IMG_TRUE;
+}
+
+/*!****************************************************************************
+ @name SpaceInStream
+ @brief remaining space in stream
+ @param psStream - stream
+ @return bytes remaining
+*****************************************************************************/
+static IMG_UINT32 SpaceInStream(PDBG_STREAM psStream)
+{
+ IMG_UINT32 ui32Space;
+
+ if ((psStream->ui32Flags & DEBUG_FLAGS_CIRCULAR) != 0)
+ {
+ /* Allow overwriting the buffer which was already read */
+ if (psStream->ui32RPtr > psStream->ui32WPtr)
+ {
+ ui32Space = psStream->ui32RPtr - psStream->ui32WPtr;
+ }
+ else
+ {
+ ui32Space = psStream->ui32RPtr + (psStream->ui32Size - psStream->ui32WPtr);
+ }
+ }
+ else
+ {
+ /* Don't overwrite anything */
+ ui32Space = psStream->ui32Size - psStream->ui32WPtr;
+ }
+
+ return ui32Space;
+}
+
+
+/*!****************************************************************************
+ @name DestroyAllStreams
+ @brief delete all streams in list
+ @return none
+*****************************************************************************/
+void DestroyAllStreams(void)
+{
+ PDBG_STREAM psStream = g_psStreamList;
+ PDBG_STREAM psStreamToFree;
+
+ while (psStream != NULL)
+ {
+ psStreamToFree = psStream;
+ psStream = psStream->psNext;
+ DBGDrivDestroyStream(psStreamToFree->psInitStream, psStreamToFree, psStreamToFree->psDeinitStream);
+ }
+ g_psStreamList = NULL;
+ return;
+}
+
+/******************************************************************************
+ End of file (DBGDRIV.C)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/dbgdriv.h b/drivers/gpu/drm/img-rogue/1.10/dbgdriv.h
new file mode 100644
index 00000000000000..b3de02a2ec6ad3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dbgdriv.h
@@ -0,0 +1,122 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_
+#define _DBGDRIV_
+
+/*****************************************************************************
+ The odd constant or two
+*****************************************************************************/
+
+#define DBGDRIV_VERSION 0x100
+#define MAX_PROCESSES 2
+#define BLOCK_USED 0x01
+#define BLOCK_LOCKED 0x02
+#define DBGDRIV_MONOBASE 0x000B0000
+
+
+/*****************************************************************************
+ * OS-specific declarations and init/cleanup functions
+*****************************************************************************/
+extern void * g_pvAPIMutex;
+
+extern DBGKM_SERVICE_TABLE g_sDBGKMServices;
+
+extern IMG_INT dbgdrv_init(void);
+extern void dbgdrv_cleanup(void);
+
+/*****************************************************************************
+ Internal debug driver core functions
+*****************************************************************************/
+/* Called by WDDM debug driver win7/hostfunc.c */
+IMG_BOOL IMG_CALLCONV DBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Pages,
+ IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+
+/* Called by Linux debug driver main.c to allow the API mutex lock to be used
+ * to protect the common IOCTL read buffer while avoiding deadlock in the Ext
+ * layer
+ */
+IMG_UINT32 IMG_CALLCONV DBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID,
+ IMG_UINT32 ui32OutBufferSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV DBGDrivGetMarker(PDBG_STREAM psStream);
+
+/* Used in ioctl.c in DBGDIOCDrivGetServiceTable() which is called in WDDM PDump files */
+void * IMG_CALLCONV DBGDrivGetServiceTable(void);
+
+/* Used in WDDM version of debug driver win7/main.c */
+void DestroyAllStreams(void);
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 AtoI(IMG_CHAR *szIn);
+
+void HostMemSet(void *pvDest,IMG_UINT8 ui8Value,IMG_UINT32 ui32Size);
+void HostMemCopy(void *pvDest,void *pvSrc,IMG_UINT32 ui32Size);
+
+/*****************************************************************************
+ Secure handle Function prototypes
+*****************************************************************************/
+IMG_SID PStream2SID(PDBG_STREAM psStream);
+PDBG_STREAM SID2PStream(IMG_SID hStream);
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream);
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream);
+
+/*****************************************************************************
+ Declarations for IOCTL Service table and KM table entry points
+*****************************************************************************/
+IMG_BOOL IMG_CALLCONV ExtDBGDrivCreateStream(IMG_CHAR *pszName, IMG_UINT32 ui32Flags, IMG_UINT32 ui32Size, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+void IMG_CALLCONV ExtDBGDrivDestroyStream(IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+void * IMG_CALLCONV ExtDBGDrivFindStream(IMG_CHAR * pszName, IMG_BOOL bResetStream);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivRead(PDBG_STREAM psStream, IMG_UINT32 ui32BufID, IMG_UINT32 ui32OutBuffSize,IMG_UINT8 *pui8OutBuf);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivWrite2(PDBG_STREAM psStream,IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+void IMG_CALLCONV ExtDBGDrivSetMarker(PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetMarker(PDBG_STREAM psStream);
+void IMG_CALLCONV ExtDBGDrivWaitForEvent(DBG_EVENT eEvent);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetCtrlState(PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+IMG_UINT32 IMG_CALLCONV ExtDBGDrivGetFrame(void);
+void IMG_CALLCONV ExtDBGDrivSetFrame(IMG_UINT32 ui32Frame);
+
+#endif
+
+/*****************************************************************************
+ End of file (DBGDRIV.H)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/dbgdriv_handle.c b/drivers/gpu/drm/img-rogue/1.10/dbgdriv_handle.c
new file mode 100644
index 00000000000000..3388c40d46762d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dbgdriv_handle.c
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide resource handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+
+/* max number of streams held in SID info table */
+#define MAX_SID_ENTRIES 8
+
+typedef struct _SID_INFO
+{
+ PDBG_STREAM psStream;
+} SID_INFO, *PSID_INFO;
+
+static SID_INFO gaSID_Xlat_Table[MAX_SID_ENTRIES];
+
+IMG_SID PStream2SID(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ /* idx is one based */
+ return (IMG_SID)iIdx+1;
+ }
+ }
+ }
+
+ return (IMG_SID)0;
+}
+
+
+PDBG_STREAM SID2PStream(IMG_SID hStream)
+{
+ /* changed to zero based */
+ IMG_INT32 iIdx = (IMG_INT32)hStream-1;
+
+ if (iIdx >= 0 && iIdx < MAX_SID_ENTRIES)
+ {
+ return gaSID_Xlat_Table[iIdx].psStream;
+ }
+ else
+ {
+ return (PDBG_STREAM)NULL;
+ }
+}
+
+
+IMG_BOOL AddSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ /* already created */
+ return IMG_TRUE;
+ }
+
+ if (gaSID_Xlat_Table[iIdx].psStream == (PDBG_STREAM)NULL)
+ {
+ /* free entry */
+ gaSID_Xlat_Table[iIdx].psStream = psStream;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_BOOL RemoveSIDEntry(PDBG_STREAM psStream)
+{
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ IMG_INT32 iIdx;
+
+ for (iIdx = 0; iIdx < MAX_SID_ENTRIES; iIdx++)
+ {
+ if (psStream == gaSID_Xlat_Table[iIdx].psStream)
+ {
+ gaSID_Xlat_Table[iIdx].psStream = (PDBG_STREAM)NULL;
+ return IMG_TRUE;
+ }
+ }
+ }
+
+ return IMG_FALSE;
+}
+
+
+/******************************************************************************
+ End of file (handle.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/dbgdriv_ioctl.h b/drivers/gpu/drm/img-rogue/1.10/dbgdriv_ioctl.h
new file mode 100644
index 00000000000000..0cc46eba1fc149
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dbgdriv_ioctl.h
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRIV_IOCTL_H_
+#define _DBGDRIV_IOCTL_H_
+
+#include "dbgdrvif_srv5.h"
+
+
+/* Share this debug driver global with the OS layer so that IOCTL calls
+ * coming from the OS enter the common table of entry points.
+ */
+extern IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL);
+
+
+#endif /* _DBGDRIV_IOCTL_H_ */
+
+/*****************************************************************************
+ End of file
+ *****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/dbgdrvif_srv5.h b/drivers/gpu/drm/img-rogue/1.10/dbgdrvif_srv5.h
new file mode 100644
index 00000000000000..3b6fbc7753a1f1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dbgdrvif_srv5.h
@@ -0,0 +1,265 @@
+/*************************************************************************/ /*!
+@File
+@Title Debug driver for Services 5
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Debug Driver Interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DBGDRVIF_SRV5_
+#define _DBGDRVIF_SRV5_
+
+#if defined(_MSC_VER)
+#pragma warning(disable:4200)
+#endif
+
+#if defined(_WIN32)
+
+#include "ioctldef.h"
+
+#else
+
+#define FILE_DEVICE_UNKNOWN 0
+#define METHOD_BUFFERED 0
+#define FILE_ANY_ACCESS 0
+
+#define CTL_CODE( DeviceType, Function, Method, Access ) (Function)
+#define MAKEIOCTLINDEX(i) ((i) & 0xFFF)
+
+#endif
+
+#include "img_defs.h"
+
+
+/*****************************************************************************
+ Stream mode stuff.
+*****************************************************************************/
+#define DEBUG_CAPMODE_FRAMED 0x00000001UL /* Default capture mode, set when streams created */
+#define DEBUG_CAPMODE_CONTINUOUS 0x00000002UL /* Only set in WDDM, streams created with it set to this mode */
+#define DEBUG_CAPMODE_BLKMODE 0x00000004UL /* Block-mode of pdump */
+
+#define DEBUG_FLAGS_USE_NONPAGED_MEM 0x00000001UL /* Only set in WDDM */
+#define DEBUG_FLAGS_NO_BUF_EXPANDSION 0x00000002UL
+#define DEBUG_FLAGS_READONLY 0x00000008UL
+#define DEBUG_FLAGS_WRITEONLY 0x00000010UL
+#define DEBUG_FLAGS_CIRCULAR 0x00000020UL
+
+/* Stream name maximum length */
+#define DEBUG_STREAM_NAME_MAX 32
+
+/*****************************************************************************
+ IOCTL values.
+*****************************************************************************/
+/* IOCTL values defined here so that the windows based OS layer of PDump
+ in the server can access the GetServiceTable method.
+ */
+#define DEBUG_SERVICE_IOCTL_BASE 0x800UL
+#define DEBUG_SERVICE_GETSERVICETABLE CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x01, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETSTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x02, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_READ CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x03, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_SETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x04, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETMARKER CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x05, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_WAITFOREVENT CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x06, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_GETFRAME CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x07, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#if defined(__QNXNTO__)
+#define DEBUG_SERVICE_CREATESTREAM CTL_CODE(FILE_DEVICE_UNKNOWN, DEBUG_SERVICE_IOCTL_BASE + 0x08, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define DEBUG_SERVICE_MAX_API 8
+#else
+#define DEBUG_SERVICE_MAX_API 9
+#endif
+
+
+#if defined(_WIN32)
+/*****************************************************************************
+ Debug driver device name
+*****************************************************************************/
+#if defined (DBGDRV_MODULE_NAME)
+#define REGISTRY_PATH_TO_DEBUG_DRIVER \
+ L"\\Registry\\Machine\\System\\CurrentControlSet\\Services\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_DEVICE_NAME L"\\Device\\" DBGDRV_MODULE_NAME
+#define DBGDRV_NT_SYMLINK L"\\DosDevices\\" DBGDRV_MODULE_NAME
+#else
+#error Debug driver name must be specified
+/*
+#define DBGDRV_NT_DEVICE_NAME L"\\Device\\VLDbgDrv"
+#define DBGDRV_NT_SYMLINK L"\\DosDevices\\VLDBGDRV"
+*/
+#endif
+
+/* symbolic link name */
+#define DBGDRV_WIN32_DEVICE_NAME "\\\\.\\VLDBGDRV"
+
+#define DBGDRV_WINCE_DEVICE_NAME L"DBD1:"
+#endif
+
+/* A pointer type which is at least 64 bits wide. The fixed width ensures
+ * consistency in structures between 32 and 64-bit code.
+ * The UM code (be it 32 or 64 bit) can simply write to the native pointer type (pvPtr).
+ * 64-bit KM code must read ui32Ptr if in the case of a 32-bit client, otherwise it can
+ * just read pvPtr if the client is also 64-bit
+ *
+ * ui64Ptr ensures the union is 64-bits wide in a 32-bit client.
+ *
+ * The union is explicitly 64-bit aligned as it was found gcc on x32 only
+ * aligns it to 32-bit, as the ABI permits aligning 64-bit types to a 32-bit
+ * boundary.
+ */
+typedef union
+{
+ /* native pointer type for UM to write to */
+ void __user *pvPtr;
+ /* the pointer written by a 32-bit client */
+ IMG_UINT32 ui32Ptr;
+ /* force the union width */
+ IMG_UINT64 ui64Ptr;
+} DBG_WIDEPTR __aligned(8);
+
+/* Helper macro for dbgdriv (KM) to get the pointer value from the WIDEPTR type,
+ * depending on whether the client is 32 or 64-bit.
+ *
+ * note: double cast is required to avoid
+ * 'cast to pointer from integer of different size' warning.
+ * this is solved by first casting to an integer type.
+ */
+
+#if defined(CONFIG_COMPAT)
+#define WIDEPTR_GET_PTR(p, bCompat) (bCompat ? \
+ (void __user *) (uintptr_t) (p).ui32Ptr : \
+ (p).pvPtr)
+#else
+#define WIDEPTR_GET_PTR(p, bCompat) (p).pvPtr
+#endif
+
+typedef enum _DBG_EVENT_
+{
+ DBG_EVENT_STREAM_DATA = 1
+} DBG_EVENT;
+
+
+/*****************************************************************************
+ In/Out Structures
+*****************************************************************************/
+#if defined(__QNXNTO__)
+typedef struct _DBG_IN_CREATESTREAM_
+{
+ union
+ {
+ IMG_CHAR *pszName;
+ IMG_UINT64 ui64Name;
+ } u;
+ IMG_UINT32 ui32Pages;
+ IMG_UINT32 ui32CapMode;
+ IMG_UINT32 ui32OutMode;
+}DBG_IN_CREATESTREAM, *PDBG_IN_CREATESTREAM;
+
+typedef struct _DBG_OUT_CREATESTREAM_
+{
+ IMG_HANDLE phInit;
+ IMG_HANDLE phMain;
+ IMG_HANDLE phDeinit;
+} DBG_OUT_CREATESTREAM, *PDBG_OUT_CREATESTREAM;
+#endif
+
+typedef struct _DBG_IN_FINDSTREAM_
+{
+ IMG_CHAR pszName[DEBUG_STREAM_NAME_MAX];
+ IMG_BOOL bResetStream;
+}DBG_IN_FINDSTREAM, *PDBG_IN_FINDSTREAM;
+
+#define DEBUG_READ_BUFID_MAIN 0
+#define DEBUG_READ_BUFID_INIT 1
+#define DEBUG_READ_BUFID_DEINIT 2
+
+typedef struct _DBG_IN_READ_
+{
+ DBG_WIDEPTR pui8OutBuffer;
+ IMG_SID hStream;
+ IMG_UINT32 ui32BufID;
+ IMG_UINT32 ui32OutBufferSize;
+} DBG_IN_READ, *PDBG_IN_READ;
+
+typedef struct _DBG_OUT_READ_
+{
+ IMG_UINT32 ui32DataRead;
+ IMG_UINT32 ui32SplitMarker;
+} DBG_OUT_READ, *PDBG_OUT_READ;
+
+typedef struct _DBG_IN_SETMARKER_
+{
+ IMG_SID hStream;
+ IMG_UINT32 ui32Marker;
+} DBG_IN_SETMARKER, *PDBG_IN_SETMARKER;
+
+/*
+ DBG STREAM abstract types
+*/
+
+typedef struct _DBG_STREAM_CONTROL_* PDBG_STREAM_CONTROL;
+typedef struct _DBG_STREAM_* PDBG_STREAM;
+
+/*
+ Lookup identifiers for the GetState method in the KM service table.
+ */
+#define DBG_GET_STATE_FLAG_IS_READONLY 0x03
+
+
+/*****************************************************************************
+ Kernel mode service table
+*****************************************************************************/
+typedef struct _DBGKM_SERVICE_TABLE_
+{
+ IMG_UINT32 ui32Size;
+ IMG_BOOL (IMG_CALLCONV *pfnCreateStream) (IMG_CHAR * pszName,IMG_UINT32 ui32Flags,IMG_UINT32 ui32Pages, IMG_HANDLE* phInit, IMG_HANDLE* phMain, IMG_HANDLE* phDeinit);
+ void (IMG_CALLCONV *pfnDestroyStream) (IMG_HANDLE hInit, IMG_HANDLE hMain, IMG_HANDLE hDeinit);
+ IMG_UINT32 (IMG_CALLCONV *pfnDBGDrivWrite2) (PDBG_STREAM psStream, IMG_UINT8 *pui8InBuf,IMG_UINT32 ui32InBuffSize);
+ void (IMG_CALLCONV *pfnSetMarker) (PDBG_STREAM psStream, IMG_UINT32 ui32Marker);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetMarker) (PDBG_STREAM psStream);
+ void (IMG_CALLCONV *pfnWaitForEvent) (DBG_EVENT eEvent);
+ IMG_UINT32 (IMG_CALLCONV *pfnGetCtrlState) (PDBG_STREAM psStream, IMG_UINT32 ui32StateID);
+ void (IMG_CALLCONV *pfnSetFrame) (IMG_UINT32 ui32Frame);
+} DBGKM_SERVICE_TABLE, *PDBGKM_SERVICE_TABLE;
+
+#if defined(_MSC_VER)
+#pragma warning(default:4200)
+#endif
+
+#endif
+
+/*****************************************************************************
+ End of file
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.c b/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.c
new file mode 100644
index 00000000000000..fcdc70f1090c56
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.c
@@ -0,0 +1,301 @@
+/*************************************************************************/ /*!
+@File
+@Title Debugging and miscellaneous functions server implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel services functions for debugging and other
+ miscellaneous functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "debugmisc_server.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "pdump_km.h"
+#include "mmu_common.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+
+PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiFlags,
+ IMG_BOOL bSetBypassed)
+{
+ RGXFWIF_KCCB_CMD sSLCBPCtlCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sSLCBPCtlCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCBPCTL;
+ sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.bSetBypassed = bSetBypassed;
+ sSLCBPCtlCmd.uCmdData.sSLCBPCtlData.uiFlags = uiFlags;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sSLCBPCtlCmd,
+ sizeof(sSLCBPCtlCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVDebugMiscSLCSetEnableStateKM: RGXScheduleCommandfailed. Error:%u", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVDebugMiscSLCSetEnableStateKM: Waiting for value aborted with error (%u)", eError));
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32RGXFWLogType)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ if (!psDeviceNode || !pui32RGXFWLogType)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ if (!psDevInfo || !psDevInfo->psRGXFWIfTraceBuf)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui32RGXFWLogType = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+ const CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWLogType)
+{
+ RGXFWIF_KCCB_CMD sLogTypeUpdateCmd;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32OldRGXFWLogTpe = psDevInfo->psRGXFWIfTraceBuf->ui32LogType;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* check log type is valid */
+ if (ui32RGXFWLogType & ~RGXFWIF_LOG_TYPE_MASK)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hRGXFWIfBufInitLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ /* set the new log type and ensure the new log type is written to memory
+ * before requesting the FW to read it
+ */
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32RGXFWLogType;
+ OSMemoryBarrier();
+
+ /* Allocate firmware trace buffer resource(s) if not already done */
+ if (RGXTraceBufferIsInitRequired(psDevInfo))
+ {
+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+ }
+ /* Check if LogType is TBI then allocate resource on demand and copy
+ * SFs to it
+ */
+ else if(RGXTBIBufferIsInitRequired(psDevInfo))
+ {
+ eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+ if (eError == PVRSRV_OK)
+ {
+ sLogTypeUpdateCmd.uCmdData.sTBIBuffer = psDevInfo->sRGXFWIfTBIBuffer;
+ }
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate resource on-demand. Reverting to old value", __func__));
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32OldRGXFWLogTpe;
+ OSMemoryBarrier();
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ return eError;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hRGXFWIfBufInitLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ eError = PVRSRVPowerLock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+ return eError;
+ }
+
+ eError = PVRSRVGetDevicePowerState((const PPVRSRV_DEVICE_NODE) psDeviceNode, &ePowerState);
+
+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+ {
+ /* Ask the FW to update its cached version of logType value */
+ sLogTypeUpdateCmd.eCmdType = RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE;
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sLogTypeUpdateCmd,
+ sizeof(sLogTypeUpdateCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+ }
+ else
+ {
+ /* Give up the power lock as its acquired in RGXWaitForFWOp */
+ PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+
+ /* Wait for the LogType value to be updated */
+ eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+ }
+ return eError;
+ }
+ }
+
+ PVRSRVPowerUnlock((const PPVRSRV_DEVICE_NODE) psDeviceNode);
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HCSDeadlineMS)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadlineMS);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSidPriority)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32OSidPriority);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSNewState)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32OSNewState)
+ {
+ return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_ONLINE);
+ }
+
+ return RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+}
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO* psDevInfo = psDeviceNode->pvDevice;
+ DLLIST_NODE *psNode, *psNext;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (dllist_is_empty(&psDevInfo->sFreeListHead))
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_LOG(("---------------[ Begin Freelist Page List Dump ]------------------"));
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+ RGXDumpFreeListPageList(psFreeList);
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ PVR_LOG(("----------------[ End Freelist Page List Dump ]-------------------"));
+
+ return PVRSRV_OK;
+
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.h b/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.h
new file mode 100644
index 00000000000000..11a60407fb0622
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/debugmisc_server.h
@@ -0,0 +1,108 @@
+/*************************************************************************/ /*!
+@File
+@Title Debugging and miscellaneous functions server interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Kernel services functions for debugging and other
+ miscellaneous functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined(DEBUGMISC_SERVER_H)
+#define DEBUGMISC_SERVER_H
+
+#include <img_defs.h>
+#include <pvrsrv_error.h>
+#include <device.h>
+#include <pmr.h>
+
+#include "connection_server.h"
+
+
+PVRSRV_ERROR
+PVRSRVDebugMiscSLCSetBypassStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiFlags,
+ IMG_BOOL bSetBypassed);
+
+PVRSRV_ERROR
+PVRSRVDebugMiscInitFWImageKM(
+ PMR *psFWImgDestPMR,
+ PMR *psFWImgSrcPMR,
+ IMG_UINT64 ui64FWImgLen,
+ PMR *psFWImgSigPMR,
+ IMG_UINT64 ui64FWSigLen);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscQueryFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetFWLogKM(
+ const CONNECTION_DATA *psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RGXFWLogType);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetHCSDeadlineKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HCSDeadlineMS);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSidPriorityKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSidPriority);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32OSNewState);
+
+PVRSRV_ERROR
+PVRSRVRGXDebugMiscDumpFreelistPageListKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/device.h b/drivers/gpu/drm/img-rogue/1.10/device.h
new file mode 100644
index 00000000000000..a57e1932216825
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/device.h
@@ -0,0 +1,435 @@
+/**************************************************************************/ /*!
+@File
+@Title Common Device header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device related function templates and defines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICE_H__
+#define __DEVICE_H__
+
+
+#include "devicemem_heapcfg.h"
+#include "mmu_common.h"
+#include "ra.h" /* RA_ARENA */
+#include "pvrsrv_device.h"
+#include "sync_checkpoint.h"
+#include "srvkm.h"
+#include "physheap.h"
+#include <powervr/sync_external.h>
+#include "sysinfo.h"
+#include "dllist.h"
+#include "cache_km.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "lock.h"
+
+#include "power.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ *PPVRSRV_POWER_DEV;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_RECORD;
+#endif
+
+/*********************************************************************/ /*!
+ @Function AllocUFOCallback
+ @Description Device specific callback for allocation of an UFO block
+
+ @Input psDeviceNode Pointer to device node to allocate
+ the UFO for.
+ @Output ppsMemDesc Pointer to pointer for the memdesc of
+ the allocation
+ @Output pui32SyncAddr FW Base address of the UFO block
+ @Output puiSyncPrimBlockSize Size of the UFO block
+
+ @Return PVRSRV_OK if allocation was successful
+ */
+/*********************************************************************/
+typedef PVRSRV_ERROR (*AllocUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *pui32SyncAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize);
+
+/*********************************************************************/ /*!
+ @Function FreeUFOCallback
+ @Description Device specific callback for freeing of an UFO
+
+ @Input psDeviceNode Pointer to device node that the UFO block was
+ allocated from.
+ @Input psMemDesc Pointer to pointer for the memdesc of
+ the UFO block to free.
+ */
+/*********************************************************************/
+typedef void (*FreeUFOBlockCallback)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ DEVMEM_MEMDESC *psMemDesc);
+
+typedef struct _PVRSRV_DEVICE_IDENTIFIER_
+{
+ /* Pdump memory and register bank names */
+ IMG_CHAR *pszPDumpDevName;
+ IMG_CHAR *pszPDumpRegName;
+
+ /* Under Linux, this is the minor number of RenderNode corresponding to this Device */
+ IMG_INT32 i32UMIdentifier;
+} PVRSRV_DEVICE_IDENTIFIER;
+
+typedef struct _DEVICE_MEMORY_INFO_
+{
+ /* heap count. Doesn't include additional heaps from PVRSRVCreateDeviceMemHeap */
+ IMG_UINT32 ui32HeapCount;
+
+ /* Blueprints for creating new device memory contexts */
+ IMG_UINT32 uiNumHeapConfigs;
+ DEVMEM_HEAP_CONFIG *psDeviceMemoryHeapConfigArray;
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeap;
+} DEVICE_MEMORY_INFO;
+
+
+typedef struct _PG_HANDLE_
+{
+ union
+ {
+ void *pvHandle;
+ IMG_UINT64 ui64Handle;
+ }u;
+ /*Order of the corresponding allocation */
+ IMG_UINT32 ui32Order;
+} PG_HANDLE;
+
+#define MMU_BAD_PHYS_ADDR (0xbadbad00badULL)
+typedef struct __DUMMY_PAGE__
+{
+ /*Page handle for the dummy page allocated (UMA/LMA)*/
+ PG_HANDLE sDummyPageHandle;
+ POS_LOCK psDummyPgLock;
+ ATOMIC_T atRefCounter;
+ /*Dummy page size in terms of log2 */
+ IMG_UINT32 ui32Log2DummyPgSize;
+ IMG_UINT64 ui64DummyPgPhysAddr;
+#if defined(PDUMP)
+#define DUMMY_PAGE ("DUMMY_PAGE")
+ IMG_HANDLE hPdumpDummyPg;
+#endif
+} PVRSRV_DUMMY_PAGE;
+
+typedef enum _PVRSRV_DEVICE_STATE_
+{
+ PVRSRV_DEVICE_STATE_UNDEFINED = 0,
+ PVRSRV_DEVICE_STATE_INIT,
+ PVRSRV_DEVICE_STATE_ACTIVE,
+ PVRSRV_DEVICE_STATE_DEINIT,
+ PVRSRV_DEVICE_STATE_BAD,
+} PVRSRV_DEVICE_STATE;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_STATUS_
+{
+ PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED = 0,
+ PVRSRV_DEVICE_HEALTH_STATUS_OK,
+ PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING,
+ PVRSRV_DEVICE_HEALTH_STATUS_DEAD,
+ PVRSRV_DEVICE_HEALTH_STATUS_FAULT
+} PVRSRV_DEVICE_HEALTH_STATUS;
+
+typedef enum _PVRSRV_DEVICE_HEALTH_REASON_
+{
+ PVRSRV_DEVICE_HEALTH_REASON_NONE = 0,
+ PVRSRV_DEVICE_HEALTH_REASON_ASSERTED,
+ PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING,
+ PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS,
+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT,
+ PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED,
+ PVRSRV_DEVICE_HEALTH_REASON_IDLING,
+ PVRSRV_DEVICE_HEALTH_REASON_RESTARTING
+} PVRSRV_DEVICE_HEALTH_REASON;
+
+typedef PVRSRV_ERROR (*FN_CREATERAMBACKEDPMR)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr);
+
+typedef struct _PVRSRV_DEVICE_NODE_
+{
+ PVRSRV_DEVICE_IDENTIFIER sDevId;
+
+ PVRSRV_DEVICE_STATE eDevState;
+ ATOMIC_T eHealthStatus; /* Holds values from PVRSRV_DEVICE_HEALTH_STATUS */
+ ATOMIC_T eHealthReason; /* Holds values from PVRSRV_DEVICE_HEALTH_REASON */
+
+ IMG_HANDLE *hDebugTable;
+
+ /* device specific MMU attributes */
+ MMU_DEVICEATTRIBS *psMMUDevAttrs;
+ /* device specific MMU firmware atrributes, used only in some devices*/
+ MMU_DEVICEATTRIBS *psFirmwareMMUDevAttrs;
+
+ /* lock for power state transitions */
+ POS_LOCK hPowerLock;
+ /* current system device power state */
+ PVRSRV_SYS_POWER_STATE eCurrentSysPowerState;
+ PPVRSRV_POWER_DEV psPowerDev;
+
+ /*
+ callbacks the device must support:
+ */
+
+ FN_CREATERAMBACKEDPMR pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ PVRSRV_ERROR (*pfnDevPxAlloc)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+ void (*pfnDevPxFree)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *psMemHandle);
+
+ PVRSRV_ERROR (*pfnDevPxMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, PG_HANDLE *pshMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+ void (*pfnDevPxUnMap)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PG_HANDLE *psMemHandle, void *pvPtr);
+
+ PVRSRV_ERROR (*pfnDevPxClean)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PG_HANDLE *pshMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+ IMG_UINT32 uiMMUPxLog2AllocGran;
+
+ void (*pfnMMUCacheInvalidate)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eLevel,
+ IMG_BOOL bUnmap);
+
+ PVRSRV_ERROR (*pfnMMUCacheInvalidateKick)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+ IMG_UINT32 (*pfnMMUCacheGetInvalidateCounter)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+
+ void (*pfnDumpDebugInfo)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ PVRSRV_ERROR (*pfnUpdateHealthStatus)(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_BOOL bIsTimerPoll);
+
+ PVRSRV_ERROR (*pfnResetHWRLogs)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ /* Method to drain device HWPerf packets from firmware buffer to host buffer */
+ PVRSRV_ERROR (*pfnServiceHWPerf)(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+ PVRSRV_ERROR (*pfnDeviceVersionString)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_CHAR **ppszVersionString);
+
+ PVRSRV_ERROR (*pfnDeviceClockSpeed)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_PUINT32 pui32RGXClockSpeed);
+
+ PVRSRV_ERROR (*pfnSoftReset)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+
+ PVRSRV_ERROR (*pfnAlignmentCheck)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT32 ui32FWAlignChecksSize, IMG_UINT32 aui32FWAlignChecks[]);
+ IMG_BOOL (*pfnCheckDeviceFeature)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, IMG_UINT64 ui64FeatureMask);
+
+ IMG_INT32 (*pfnGetDeviceFeatureValue)(struct _PVRSRV_DEVICE_NODE_ *psDevNode, enum _RGX_FEATURE_WITH_VALUE_INDEX_ eFeatureIndex);
+
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+ /* device post-finalise compatibility check */
+ PVRSRV_ERROR (*pfnInitDeviceCompatCheck) (struct _PVRSRV_DEVICE_NODE_*);
+
+ /* information about the device's address space and heaps */
+ DEVICE_MEMORY_INFO sDevMemoryInfo;
+
+ /* device's shared-virtual-memory heap max virtual address */
+ IMG_UINT64 ui64GeneralSVMHeapTopVA;
+
+ ATOMIC_T iNumClockSpeedChanges;
+
+ /* private device information */
+ void *pvDevice;
+
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ RA_ARENA *psOSidSubArena[GPUVIRT_VALIDATION_NUM_OS];
+#endif
+
+
+#define PVRSRV_MAX_RA_NAME_LENGTH (50)
+ RA_ARENA **apsLocalDevMemArenas;
+ IMG_CHAR **apszRANames;
+ IMG_UINT32 ui32NumOfLocalMemArenas;
+
+ IMG_CHAR szKernelFwRawRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+ IMG_CHAR szKernelFwMainRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+ IMG_CHAR szKernelFwConfigRAName[RGXFW_NUM_OS][PVRSRV_MAX_RA_NAME_LENGTH];
+ RA_ARENA *psKernelFwRawMemArena[RGXFW_NUM_OS];
+ RA_ARENA *psKernelFwMainMemArena[RGXFW_NUM_OS];
+ RA_ARENA *psKernelFwConfigMemArena[RGXFW_NUM_OS];
+ RA_BASE_T ui64RABase[RGXFW_NUM_OS];
+ IMG_UINT32 uiKernelFwRAIdx;
+
+ IMG_UINT32 ui32RegisteredPhysHeaps;
+ PHYS_HEAP **papsRegisteredPhysHeaps;
+
+ /*
+ * Pointers to the device's physical memory heap(s)
+ * The first entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is not set. Normally this will be an LMA heap
+ * (but the device configuration could specify a UMA heap here, if desired)
+ * The second entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL flag is set. Normally this will be a UMA heap
+ * (but the configuration could specify an LMA heap here, if desired)
+ * The third entry (apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) will be used for allocations
+ * where the PVRSRV_MEMALLOCFLAG_FW_LOCAL flag is set; this is used when virtualization is enabled
+ * The device configuration will always specify two physical heap IDs - in the event of the device
+ * only using one physical heap, both of these IDs will be the same, and hence both pointers below
+ * will also be the same; when virtualization is enabled the device configuration specifies
+ * three physical heap IDs, the last being for PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL allocations
+ */
+ PHYS_HEAP *apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ struct _PVRSRV_DEVICE_NODE_ *psNext;
+ struct _PVRSRV_DEVICE_NODE_ **ppsThis;
+
+ /* Functions for notification about memory contexts */
+ PVRSRV_ERROR (*pfnRegisterMemoryContext)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData);
+ void (*pfnUnregisterMemoryContext)(IMG_HANDLE hPrivData);
+
+ /* Functions for allocation/freeing of UFOs */
+ AllocUFOBlockCallback pfnAllocUFOBlock; /*!< Callback for allocation of a block of UFO memory */
+ FreeUFOBlockCallback pfnFreeUFOBlock; /*!< Callback for freeing of a block of UFO memory */
+
+ IMG_HANDLE hSyncServerNotify;
+ POS_LOCK hSyncServerListLock;
+ DLLIST_NODE sSyncServerSyncsList;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ IMG_HANDLE hSyncServerRecordNotify;
+ POS_LOCK hSyncServerRecordLock;
+ IMG_UINT32 ui32SyncServerRecordCount;
+ IMG_UINT32 ui32SyncServerRecordCountHighWatermark;
+ DLLIST_NODE sSyncServerRecordList;
+ struct SYNC_RECORD *apsSyncServerRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+ IMG_UINT32 uiSyncServerRecordFreeIdx;
+
+ IMG_HANDLE hSyncCheckpointRecordNotify;
+ POS_LOCK hSyncCheckpointRecordLock;
+ IMG_UINT32 ui32SyncCheckpointRecordCount;
+ IMG_UINT32 ui32SyncCheckpointRecordCountHighWatermark;
+ DLLIST_NODE sSyncCheckpointRecordList;
+ struct SYNC_CHECKPOINT_RECORD *apsSyncCheckpointRecordsFreed[PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN];
+ IMG_UINT32 uiSyncCheckpointRecordFreeIdx;
+#endif
+
+ IMG_HANDLE hSyncCheckpointNotify;
+ POS_LOCK hSyncCheckpointListLock;
+ DLLIST_NODE sSyncCheckpointSyncsList;
+
+ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext;
+ PSYNC_PRIM_CONTEXT hSyncPrimContext;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim;
+ /* With this sync-prim we make sure the MMU cache is flushed
+ * before we free the page table memory */
+ PVRSRV_CLIENT_SYNC_PRIM *psMMUCacheSyncPrim;
+ IMG_UINT16 ui16NextMMUInvalidateUpdate;
+
+ IMG_HANDLE hCmdCompNotify;
+ IMG_HANDLE hDbgReqNotify;
+ IMG_HANDLE hHtbDbgReqNotify;
+ IMG_HANDLE hAppHintDbgReqNotify;
+ IMG_HANDLE hThreadsDbgReqNotify;
+
+ PVRSRV_DUMMY_PAGE sDummyPage;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POSWR_LOCK hMemoryContextPageFaultNotifyListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ DLLIST_NODE sMemoryContextPageFaultNotifyListHead;
+
+ /* PC address used to find contexts to be notified of a page fault */
+ IMG_UINT64 ui64ContextResetPCAddress;
+
+#if defined(PDUMP)
+ /* device-level callback which is called when pdump.exe starts.
+ * Should be implemented in device-specific init code, e.g. rgxinit.c
+ */
+ PVRSRV_ERROR (*pfnPDumpInitDevice)(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+ /* device-level callback to return pdump ID associated to a memory context */
+ IMG_UINT32 (*pfnMMUGetContextID)(IMG_HANDLE hDevMemContext);
+#endif
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hValidationLock;
+#endif
+} PVRSRV_DEVICE_NODE;
+
+/*
+ * Macros to be used instead of calling directly the pfns since these macros
+ * will expand the feature passed as argument into the bitmask/index to work
+ * with the macros defined in rgx_bvnc_defs_km.h
+ */
+#define PVRSRV_IS_FEATURE_SUPPORTED(psDevNode, Feature) \
+ psDevNode->pfnCheckDeviceFeature(psDevNode, RGX_FEATURE_##Feature##_BIT_MASK)
+#define PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, Feature) \
+ psDevNode->pfnGetDeviceFeatureValue(psDevNode, RGX_FEATURE_##Feature##_IDX)
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bInitSuccessful);
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions);
+
+
+#endif /* __DEVICE_H__ */
+
+/******************************************************************************
+ End of file (device.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/device_connection.h b/drivers/gpu/drm/img-rogue/1.10/device_connection.h
new file mode 100644
index 00000000000000..e5d122020f1ce1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/device_connection.h
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File device_connection.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__DEVICE_CONNECTION_H__)
+#define __DEVICE_CONNECTION_H__
+
+#include "img_types.h"
+
+#if defined(__KERNEL__)
+typedef struct _PVRSRV_DEVICE_NODE_ *SHARED_DEV_CONNECTION;
+#else
+typedef IMG_HANDLE SHARED_DEV_CONNECTION;
+#endif
+
+/******************************************************************************
+ * Device capability flags and masks
+ *****************************************************************************/
+
+/* Flag to be passed over the bridge during connection stating whether CPU cache coherent is available*/
+#define PVRSRV_CACHE_COHERENT_SHIFT (0)
+#define PVRSRV_CACHE_COHERENT_DEVICE_FLAG (1U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_CPU_FLAG (2U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_EMULATE_FLAG (4U << PVRSRV_CACHE_COHERENT_SHIFT)
+#define PVRSRV_CACHE_COHERENT_MASK (7U << PVRSRV_CACHE_COHERENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating whether CPU non-mappable memory is present */
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT (7)
+#define PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG (1U << PVRSRV_NONMAPPABLE_MEMORY_PRESENT_SHIFT)
+
+/* Flag to be passed over the bridge during connection stating SVM allocation availability */
+#define PVRSRV_DEVMEM_SVM_ALLOC_SHIFT (8)
+#define PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED (1U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED (2U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+#define PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL (4U << PVRSRV_DEVMEM_SVM_ALLOC_SHIFT)
+
+#define PVRSRV_PDUMP_IS_RECORDING_SHIFT (4)
+#define PVRSRV_PDUMP_IS_RECORDING (1U << PVRSRV_PDUMP_IS_RECORDING_SHIFT)
+
+#endif /* !defined(__DEVICE_CONNECTION_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem.c b/drivers/gpu/drm/img-rogue/1.10/devicemem.c
new file mode 100644
index 00000000000000..bbde8fad72fcd8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem.c
@@ -0,0 +1,2960 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Front End (nominally Client side part, but now invokable
+ from server too) of device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+#include "client_cache_bridge.h"
+#include "services_km.h"
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+#include "pdump_km.h"
+#else
+#include "client_pdump_bridge.h"
+#endif
+#include "devicemem_pdump.h"
+#endif
+#if defined(PVR_RI_DEBUG)
+#include "client_ri_bridge.h"
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "client_devicememhistory_bridge.h"
+#endif
+
+#include "rgx_heaps.h"
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#include "rgxdefs_km.h"
+#include "rgx_bvnc_defs_km.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "pvr_ricommon.h"
+#if defined(LINUX)
+#include "linux/kernel.h"
+#endif
+#else
+#include "rgxdefs.h"
+#endif
+
+#if defined(__KERNEL__) && defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR RIDumpAllKM(void);
+#endif
+
+#if defined(__KERNEL__)
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorStringKM(eError)
+#else
+#define GET_ERROR_STRING(eError) PVRSRVGetErrorString(eError)
+#endif
+/*****************************************************************************
+ * Sub allocation internals *
+ *****************************************************************************/
+
+static INLINE void
+_CheckAnnotationLength(const IMG_CHAR *pszAnnotation)
+{
+#if defined(SUPPORT_PAGE_FAULT_DEBUG) || defined(PDUMP) || defined(PVR_RI_DEBUG)
+ IMG_UINT32 length = OSStringLength(pszAnnotation);
+
+ if (length >= DEVMEM_ANNOTATION_MAX_LEN)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Annotation \"%s\" has been truncated to %d characters from %d characters",
+ __FUNCTION__, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1, length));
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+}
+
+static PVRSRV_ERROR
+_AllocateDeviceMemory(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_BOOL bExportable,
+ const IMG_CHAR *pszAnnotation,
+ DEVMEM_IMPORT **ppsImport)
+{
+ DEVMEM_IMPORT *psImport;
+ DEVMEM_FLAGS_T uiPMRFlags;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ eError = _DevmemImportStructAlloc(hDevConnection,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failAlloc;
+ }
+
+ /* Check the size is a multiple of the quantum */
+ PVR_ASSERT((uiSize & ((1ULL<<uiLog2Quantum)-1)) == 0);
+
+ _CheckAnnotationLength(pszAnnotation);
+
+ /* Pass only the PMR flags down */
+ uiPMRFlags = uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK;
+ eError = BridgePhysmemNewRamBackedPMR(hDevConnection,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2Quantum,
+ uiPMRFlags,
+ OSStringNLength(pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN - 1) + 1,
+ pszAnnotation,
+ OSGetCurrentProcessID(),
+ &hPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Problem to allocate memory for %s (%s)",
+ __func__,
+ pszAnnotation,
+ PVRSRVGETERRORSTRING(eError) ));
+ goto failPMR;
+ }
+
+ _DevmemImportStructInit(psImport,
+ uiSize,
+ uiAlign,
+ uiFlags,
+ hPMR,
+ bExportable ? DEVMEM_PROPERTIES_EXPORTABLE : 0);
+
+ *ppsImport = psImport;
+ return PVRSRV_OK;
+
+ failPMR:
+ _DevmemImportDiscard(psImport);
+ failAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*****************************************************************************
+ * Sub allocation internals *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_HANDLE hPMR;
+ IMG_HANDLE hSrvDevMemHeap;
+ POS_LOCK hLock;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_CPU_VIRTADDR pvCpuVAddr;
+
+ if (NULL == psImport)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Sparse memory import", __func__));
+ goto e0;
+ }
+
+ hDevConnection = psImport->hDevConnection;
+ hPMR = psImport->hPMR;
+ hLock = psImport->hLock;
+ sDevVAddr = psImport->sDeviceImport.sDevVAddr;
+ pvCpuVAddr = psImport->sCPUImport.pvCPUVAddr;
+
+ if (NULL == hDevConnection)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Bridge handle", __func__));
+ goto e0;
+ }
+
+ if (NULL == hPMR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid PMR handle", __func__));
+ goto e0;
+ }
+
+ if ((uiSparseFlags & SPARSE_RESIZE_BOTH) && (0 == sDevVAddr.uiAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid Device Virtual Map", __func__));
+ goto e0;
+ }
+
+ if ((uiSparseFlags & SPARSE_MAP_CPU_ADDR) && (NULL == pvCpuVAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid CPU Virtual Map", __func__));
+ goto e0;
+ }
+
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Secure buffers currently do not support sparse changes",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ hSrvDevMemHeap = psImport->sDeviceImport.psHeap->hDevMemServerHeap;
+
+ OSLockAcquire(hLock);
+
+ eError = BridgeChangeSparseMem(hDevConnection,
+ hSrvDevMemHeap,
+ hPMR,
+ ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32FreePageCount,
+ pauiFreePageIndices,
+ uiSparseFlags,
+ psImport->uiFlags,
+ sDevVAddr,
+ (IMG_UINT64)((uintptr_t)pvCpuVAddr));
+
+ OSLockRelease(hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ BridgeDevicememHistorySparseChange(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->szText,
+ DevmemGetHeapLog2PageSize(psImport->sDeviceImport.psHeap),
+ ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32FreePageCount,
+ pauiFreePageIndices,
+ psMemDesc->ui32AllocationIndex,
+ &psMemDesc->ui32AllocationIndex);
+ }
+#endif
+
+#ifdef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ if ((PVRSRV_OK == eError) && (psMemDesc->sCPUMemDesc.ui32RefCount))
+ {
+ /*
+ * Release the CPU Virtual mapping here
+ * the caller is supposed to map entire range again
+ */
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+ }
+#endif
+
+ e0:
+ return eError;
+}
+
+static void
+_FreeDeviceMemory(DEVMEM_IMPORT *psImport)
+{
+ _DevmemImportStructRelease(psImport);
+}
+
+static PVRSRV_ERROR
+_SubAllocImportAlloc(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uiSize,
+ RA_FLAGS_T _flags,
+ const IMG_CHAR *pszAnnotation,
+ /* returned data */
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ /* When suballocations need a new lump of memory, the RA calls
+ back here. Later, in the kernel, we must construct a new PMR
+ and a pairing between the new lump of virtual memory and the
+ PMR (whether or not such PMR is backed by physical memory) */
+ DEVMEM_HEAP *psHeap;
+ DEVMEM_IMPORT *psImport;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32MappingTable = 0;
+ DEVMEM_FLAGS_T uiFlags = (DEVMEM_FLAGS_T) _flags;
+ IMG_UINT64 ui64OptionalMapAddress = DEVICEMEM_UTILS_NO_ADDRESS;
+
+ /* Per-arena private handle is, for us, the heap */
+ psHeap = hArena;
+
+ /* align to the l.s.b. of the size... e.g. 96kiB aligned to
+ 32kiB. NB: There is an argument to say that the RA should never
+ ask us for Non-power-of-2 size anyway, but I don't want to make
+ that restriction arbitrarily now */
+ uiAlign = uiSize & ~(uiSize-1);
+
+ /* Technically this is only required for guest drivers due to
+ fw heaps being pre-allocated and pre-mapped resulting in
+ a 1:1 (i.e. virtual : physical) offset correlation but we
+ force this behaviour for all drivers to maintain consistency
+ (i.e. heap->VA uiAlign <= heap->PA uiLog2Quantum) */
+ if (uiAlign > (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum))
+ {
+ uiAlign = (IMG_DEVMEM_ALIGN_T)(1 << psHeap->uiLog2Quantum);
+ }
+
+ /* The RA should not have invoked us with a size that is not a
+ multiple of the quantum anyway */
+ PVR_ASSERT((uiSize & ((1ULL<<psHeap->uiLog2Quantum)-1)) == 0);
+
+ eError = _AllocateDeviceMemory(psHeap->psCtx->hDevConnection,
+ psHeap->uiLog2Quantum,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_FALSE,
+ "PMR sub-allocated",
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failAlloc;
+ }
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+ PDumpCommentWithFlags(PDUMP_NONE,
+ "Created PMR for sub-allocations with handle ID: 0x%p Annotation: \"%s\" (PID %u)",
+ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+#else
+ {
+ IMG_CHAR pszComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ OSSNPrintf(pszComment,
+ PVRSRV_PDUMP_MAX_COMMENT_SIZE,
+ "Created PMR for sub-allocations with handle ID: %p Annotation: \"%s\" (PID %u)",
+ psImport->hPMR, pszAnnotation, OSGetCurrentProcessID());
+
+ BridgePVRSRVPDumpComment(psHeap->psCtx->hDevConnection, pszComment, IMG_FALSE);
+ }
+#endif
+#else
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+#if defined(__KERNEL__)
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDevNode->pvDevice;
+
+ PVR_ASSERT(PVRSRV_CHECK_FW_LOCAL(uiFlags));
+
+ /* If allocation is made by the Kernel from the firmware heap, account for it
+ * under the PVR_SYS_ALLOC_PID.
+ */
+ if ((psHeap == psDevInfo->psFirmwareMainHeap) || (psHeap == psDevInfo->psFirmwareConfigHeap))
+ {
+ eError = BridgeRIWritePMREntryWithOwner (psImport->hDevConnection,
+ psImport->hPMR,
+ PVR_SYS_ALLOC_PID);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntryWithOwner failed (eError=%d)", __func__, eError));
+ }
+ }
+ else
+#endif
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif
+
+ /*
+ Suballocations always get mapped into the device was we need to
+ key the RA off something and as we can't export suballocations
+ there is no valid reason to request an allocation an not map it
+ */
+ eError = _DevmemImportStructDevMap(psHeap,
+ IMG_TRUE,
+ psImport,
+ ui64OptionalMapAddress);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ /* Mark this import struct as zeroed so we can save some PDump LDBs
+ * and do not have to CPU map + memset()*/
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_ZEROED;
+ }
+ else if (uiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC)
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_POISONED;
+ }
+ psImport->uiProperties |= DEVMEM_PROPERTIES_IMPORT_IS_CLEAN;
+
+ *puiBase = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ *puiActualSize = uiSize;
+ *phImport = psImport;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+ failMap:
+ _FreeDeviceMemory(psImport);
+ failAlloc:
+
+ return eError;
+}
+
+static void
+_SubAllocImportFree(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ DEVMEM_IMPORT *psImport = hImport;
+
+ PVR_ASSERT(psImport != NULL);
+ PVR_ASSERT(hArena == psImport->sDeviceImport.psHeap);
+ PVR_ASSERT(uiBase == psImport->sDeviceImport.sDevVAddr.uiAddr);
+
+ _DevmemImportStructDevUnmap(psImport);
+ _DevmemImportStructRelease(psImport);
+}
+
+/*****************************************************************************
+ * Devmem context internals *
+ *****************************************************************************/
+
+static PVRSRV_ERROR
+_PopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ struct _DEVMEM_HEAP_ **ppsHeapArray;
+ IMG_UINT32 uiNumHeaps;
+ IMG_UINT32 uiHeapsToUnwindOnError;
+ IMG_UINT32 uiHeapIndex;
+ IMG_DEV_VIRTADDR sDevVAddrBase;
+ IMG_CHAR aszHeapName[DEVMEM_HEAPNAME_MAXLENGTH];
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2DataPageSize;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ImportAlignment;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2TilingStrideFactor;
+
+ eError = DevmemHeapCount(psCtx->hDevConnection,
+ uiHeapBlueprintID,
+ &uiNumHeaps);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (uiNumHeaps == 0)
+ {
+ ppsHeapArray = NULL;
+ }
+ else
+ {
+ ppsHeapArray = OSAllocMem(sizeof(*ppsHeapArray) * uiNumHeaps);
+ if (ppsHeapArray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ uiHeapsToUnwindOnError = 0;
+
+ for (uiHeapIndex = 0; uiHeapIndex < uiNumHeaps; uiHeapIndex++)
+ {
+ eError = DevmemHeapDetails(psCtx->hDevConnection,
+ uiHeapBlueprintID,
+ uiHeapIndex,
+ &aszHeapName[0],
+ sizeof(aszHeapName),
+ &sDevVAddrBase,
+ &uiHeapLength,
+ &uiLog2DataPageSize,
+ &uiLog2ImportAlignment,
+ &uiLog2TilingStrideFactor);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ eError = DevmemCreateHeap(psCtx,
+ sDevVAddrBase,
+ uiHeapLength,
+ uiLog2DataPageSize,
+ uiLog2ImportAlignment,
+ uiLog2TilingStrideFactor,
+ aszHeapName,
+ uiHeapBlueprintID,
+ &ppsHeapArray[uiHeapIndex]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ uiHeapsToUnwindOnError = uiHeapIndex + 1;
+ }
+
+ psCtx->uiAutoHeapCount = uiNumHeaps;
+ psCtx->ppsAutoHeapArray = ppsHeapArray;
+
+ PVR_ASSERT(psCtx->uiNumHeaps >= psCtx->uiAutoHeapCount);
+ PVR_ASSERT(psCtx->uiAutoHeapCount == uiNumHeaps);
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths
+ */
+ e1:
+ for (uiHeapIndex = 0; uiHeapIndex < uiHeapsToUnwindOnError; uiHeapIndex++)
+ {
+ eError2 = DevmemDestroyHeap(ppsHeapArray[uiHeapIndex]);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+ if (uiNumHeaps != 0)
+ {
+ OSFreeMem(ppsHeapArray);
+ }
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR
+_UnpopulateContextFromBlueprint(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+ PVRSRV_ERROR eReturn = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 uiHeapIndex;
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+
+ for (uiHeapIndex = 0; uiHeapIndex < psCtx->uiAutoHeapCount; uiHeapIndex++)
+ {
+ if (!psCtx->ppsAutoHeapArray[uiHeapIndex])
+ {
+ continue;
+ }
+
+ eError2 = DevmemDestroyHeap(psCtx->ppsAutoHeapArray[uiHeapIndex]);
+ if (eError2 != PVRSRV_OK)
+ {
+ eReturn = eError2;
+ }
+ else
+ {
+ psCtx->ppsAutoHeapArray[uiHeapIndex] = NULL;
+ }
+ }
+
+ if ((!bDoCheck || (eReturn == PVRSRV_OK)) && psCtx->ppsAutoHeapArray)
+ {
+ OSFreeMem(psCtx->ppsAutoHeapArray);
+ psCtx->ppsAutoHeapArray = NULL;
+ psCtx->uiAutoHeapCount = 0;
+ }
+
+ return eReturn;
+}
+
+static PVRSRV_ERROR
+_AllocateMCUFenceAddress(struct _DEVMEM_CONTEXT_ *psCtx)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_HEAP *psGeneralHeap;
+ IMG_DEV_VIRTADDR sTempMCUFenceAddr;
+
+ eError = DevmemFindHeapByName(psCtx, RGX_GENERAL_HEAP_IDENT, &psGeneralHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: General Heap not found (%s)", __func__, GET_ERROR_STRING(eError)));
+ goto e0;
+ }
+
+ /* MCUFence: Fixed address reserved per Memory Context */
+ eError = DevmemAllocate(psGeneralHeap,
+ sizeof(IMG_UINT32),
+ RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ "MCUFence",
+ &psCtx->psMCUFenceMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+ goto e0;
+ }
+
+ /* This is the first memory allocation on General Heap so its virtual address
+ * is always equal to heap base address. Storing this address separately is not required. */
+ eError = DevmemMapToDevice(psCtx->psMCUFenceMemDesc, psGeneralHeap, &sTempMCUFenceAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to map MCU fence word (%s)", __func__, GET_ERROR_STRING(eError)));
+ goto e1;
+ }
+ else if (sTempMCUFenceAddr.uiAddr != psGeneralHeap->sBaseAddress.uiAddr)
+ {
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: MCU_FENCE address (%llx) not at the start of General Heap (%llx)", \
+ __FUNCTION__, (long long unsigned) sTempMCUFenceAddr.uiAddr, \
+ (long long unsigned) psGeneralHeap->sBaseAddress.uiAddr));
+ eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+ goto e1;
+ }
+
+ e0:
+ return eError;
+
+ e1:
+ DevmemFree(psCtx->psMCUFenceMemDesc);
+ psCtx->psMCUFenceMemDesc = NULL;
+ return eError;
+}
+
+/*****************************************************************************
+ * Devmem context functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_CONTEXT **ppsCtxPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_CONTEXT *psCtx;
+ /* handle to the server-side counterpart of the device memory
+ context (specifically, for handling mapping to device MMU) */
+ IMG_HANDLE hDevMemServerContext;
+ IMG_HANDLE hPrivData;
+ IMG_BOOL bHeapCfgMetaId = (uiHeapBlueprintID == DEVMEM_HEAPCFG_META);
+
+ if (ppsCtxPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psCtx = OSAllocMem(sizeof *psCtx);
+ if (psCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ psCtx->uiNumHeaps = 0;
+
+ psCtx->hDevConnection = hDevConnection;
+
+ /* Create (server-side) Device Memory context */
+ eError = BridgeDevmemIntCtxCreate(psCtx->hDevConnection,
+ bHeapCfgMetaId,
+ &hDevMemServerContext,
+ &hPrivData,
+ &psCtx->ui32CPUCacheLineSize);
+ if (eError != PVRSRV_OK) goto e1;
+
+ psCtx->hDevMemServerContext = hDevMemServerContext;
+ psCtx->hPrivData = hPrivData;
+
+ /* automagic heap creation */
+ psCtx->uiAutoHeapCount = 0;
+
+ eError = _PopulateContextFromBlueprint(psCtx, uiHeapBlueprintID);
+ if (eError != PVRSRV_OK) goto e2;
+
+ /* Allocate a word at the start of the General heap to be used as MCU_FENCE Address*/
+ if (uiHeapBlueprintID == DEVMEM_HEAPCFG_FORCLIENTS)
+ {
+ eError = _AllocateMCUFenceAddress(psCtx);
+ if (eError != PVRSRV_OK) goto e2;
+ }
+ else
+ {
+ psCtx->psMCUFenceMemDesc = NULL;
+ }
+
+ *ppsCtxPtr = psCtx;
+
+ PVR_ASSERT(psCtx->uiNumHeaps == psCtx->uiAutoHeapCount);
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e2:
+ PVR_ASSERT(psCtx->uiAutoHeapCount == 0);
+ PVR_ASSERT(psCtx->uiNumHeaps == 0);
+ BridgeDevmemIntCtxDestroy(psCtx->hDevConnection, hDevMemServerContext);
+
+ e1:
+ OSFreeMem(psCtx);
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+ IMG_HANDLE *hPrivData)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psCtx == NULL) || (hPrivData == NULL))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ *hPrivData = psCtx->hPrivData;
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx)
+{
+ PVRSRV_ERROR eError;
+
+ if (psCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemFindHeapByName(const struct _DEVMEM_CONTEXT_ *psCtx,
+ const IMG_CHAR *pszHeapName,
+ struct _DEVMEM_HEAP_ **ppsHeapRet)
+{
+ IMG_UINT32 uiHeapIndex;
+
+ /* N.B. This func is only useful for finding "automagic" heaps by name */
+ for (uiHeapIndex = 0;
+ uiHeapIndex < psCtx->uiAutoHeapCount;
+ uiHeapIndex++)
+ {
+ if (!OSStringCompare(psCtx->ppsAutoHeapArray[uiHeapIndex]->pszName, pszHeapName))
+ {
+ *ppsHeapRet = psCtx->ppsAutoHeapArray[uiHeapIndex];
+ return PVRSRV_OK;
+ }
+ }
+
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bDoCheck = IMG_TRUE;
+
+#if defined(__KERNEL__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+
+ if (psCtx == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (psCtx->psMCUFenceMemDesc != NULL)
+ {
+ DevmemReleaseDevVirtAddr(psCtx->psMCUFenceMemDesc);
+ DevmemFree(psCtx->psMCUFenceMemDesc);
+ }
+
+ eError = _UnpopulateContextFromBlueprint(psCtx);
+ if (bDoCheck && eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: _UnpopulateContextFromBlueprint failed (%d) leaving %d heaps",
+ __func__, eError, psCtx->uiNumHeaps));
+ goto e1;
+ }
+
+ eError = BridgeDevmemIntCtxDestroy(psCtx->hDevConnection,
+ psCtx->hDevMemServerContext);
+ if (bDoCheck && eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BridgeDevmemIntCtxDestroy failed (%d)",
+ __func__, eError));
+ goto e1;
+ }
+
+ /* should be no more heaps left */
+ if (bDoCheck && psCtx->uiNumHeaps)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Additional heaps remain in DEVMEM_CONTEXT",
+ __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT;
+ goto e1;
+ }
+
+ OSDeviceMemSet(psCtx, 0, sizeof(*psCtx));
+ OSFreeMem(psCtx);
+
+ e1:
+ return eError;
+}
+
+/*****************************************************************************
+ * Devmem heap query functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 *puiNumHeapConfigsOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapConfigCount(hDevConnection,
+ puiNumHeapConfigsOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapCount(hDevConnection,
+ uiHeapConfigIndex,
+ puiNumHeapsOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_CHAR *pszConfigNameOut,
+ IMG_UINT32 uiConfigNameBufSz)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapConfigName(hDevConnection,
+ uiHeapConfigIndex,
+ uiConfigNameBufSz,
+ pszConfigNameOut);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeHeapCfgHeapDetails(hDevConnection,
+ uiHeapConfigIndex,
+ uiHeapIndex,
+ uiHeapNameBufSz,
+ pszHeapNameOut,
+ psDevVAddrBaseOut,
+ puiHeapLengthOut,
+ puiLog2DataPageSizeOut,
+ puiLog2ImportAlignmentOut,
+ puiLog2TilingStrideFactor);
+
+ VG_MARK_INITIALIZED(pszHeapNameOut,uiHeapNameBufSz);
+
+ return eError;
+}
+
+/*****************************************************************************
+ * Devmem heap functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+ IMG_HANDLE *phDevmemHeap)
+{
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *phDevmemHeap = psHeap->hDevMemServerHeap;
+ return PVRSRV_OK;
+}
+
+/* See devicemem.h for important notes regarding the arguments
+ to this function */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtx,
+ IMG_DEV_VIRTADDR sBaseAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ IMG_UINT32 ui32Log2Quantum,
+ IMG_UINT32 ui32Log2ImportAlignment,
+ IMG_UINT32 ui32Log2TilingStrideFactor,
+ const IMG_CHAR *pszName,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_HEAP **ppsHeapPtr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ DEVMEM_HEAP *psHeap;
+ /* handle to the server-side counterpart of the device memory
+ heap (specifically, for handling mapping to device MMU */
+ IMG_HANDLE hDevMemServerHeap;
+ IMG_BOOL bRANoSplit = IMG_FALSE;
+
+ IMG_CHAR aszBuf[100];
+ IMG_CHAR *pszStr;
+
+ if (ppsHeapPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psHeap = OSAllocMem(sizeof *psHeap);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Need to keep local copy of heap name, so caller may free
+ theirs */
+ pszStr = OSAllocMem(OSStringLength(pszName)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+ OSStringCopy(pszStr, pszName);
+ psHeap->pszName = pszStr;
+
+ psHeap->uiSize = uiLength;
+ psHeap->sBaseAddress = sBaseAddress;
+ OSAtomicWrite(&psHeap->hImportCount,0);
+
+ OSSNPrintf(aszBuf, sizeof(aszBuf),
+ "NDM heap '%s' (suballocs) ctx:%p",
+ pszName, psCtx);
+ pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+ OSStringCopy(pszStr, aszBuf);
+ psHeap->pszSubAllocRAName = pszStr;
+
+#if defined(PDUMP)
+ /* the META heap is shared globally so a single
+ * physical memory import may be used to satisfy
+ * allocations of different processes.
+ * This is problematic when PDumping because the
+ * physical memory import used to satisfy a new allocation
+ * may actually have been imported (and thus the PDump MALLOC
+ * generated) before the PDump client was started, leading to the
+ * MALLOC being missing.
+ * This is solved by disabling splitting of imports for the META physmem
+ * RA, meaning that every firmware allocation gets its own import, thus
+ * ensuring the MALLOC is present for every allocation made within the
+ * pdump capture range
+ */
+ if(uiHeapBlueprintID == DEVMEM_HEAPCFG_META)
+ {
+ bRANoSplit = IMG_TRUE;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(uiHeapBlueprintID);
+#endif
+
+
+ psHeap->psSubAllocRA = RA_Create(psHeap->pszSubAllocRAName,
+ /* Subsequent imports: */
+ ui32Log2Quantum,
+ RA_LOCKCLASS_2,
+ _SubAllocImportAlloc,
+ _SubAllocImportFree,
+ (RA_PERARENA_HANDLE) psHeap,
+ bRANoSplit);
+ if (psHeap->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e3;
+ }
+
+ psHeap->uiLog2ImportAlignment = ui32Log2ImportAlignment;
+ psHeap->uiLog2TilingStrideFactor = ui32Log2TilingStrideFactor;
+ psHeap->uiLog2Quantum = ui32Log2Quantum;
+
+ if (! OSStringCompare(pszName, RGX_GENERAL_SVM_HEAP_IDENT))
+ {
+ /* The SVM heap normally starts out as this type though
+ it may transition to DEVMEM_HEAP_TYPE_USER_MANAGED
+ on platforms with more processor virtual address
+ bits than device virtual address bits */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+ }
+ else
+ {
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_UNKNOWN;
+ }
+
+ OSSNPrintf(aszBuf, sizeof(aszBuf),
+ "NDM heap '%s' (QVM) ctx:%p",
+ pszName, psCtx);
+ pszStr = OSAllocMem(OSStringLength(aszBuf)+1);
+ if (pszStr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e4;
+ }
+ OSStringCopy(pszStr, aszBuf);
+ psHeap->pszQuantizedVMRAName = pszStr;
+
+ psHeap->psQuantizedVMRA = RA_Create(psHeap->pszQuantizedVMRAName,
+ /* Subsequent import: */
+ 0, RA_LOCKCLASS_1, NULL, NULL,
+ (RA_PERARENA_HANDLE) psHeap,
+ IMG_FALSE);
+ if (psHeap->psQuantizedVMRA == NULL)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e5;
+ }
+
+ if (!RA_Add(psHeap->psQuantizedVMRA,
+ (RA_BASE_T)sBaseAddress.uiAddr,
+ (RA_LENGTH_T)uiLength,
+ (RA_FLAGS_T)0, /* This RA doesn't use or need flags */
+ NULL /* per ispan handle */))
+ {
+ RA_Delete(psHeap->psQuantizedVMRA);
+ eError = PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA;
+ goto e5;
+ }
+
+ psHeap->psCtx = psCtx;
+
+
+ /* Create server-side counterpart of Device Memory heap */
+ eError = BridgeDevmemIntHeapCreate(psCtx->hDevConnection,
+ psCtx->hDevMemServerContext,
+ sBaseAddress,
+ uiLength,
+ ui32Log2Quantum,
+ &hDevMemServerHeap);
+ if (eError != PVRSRV_OK)
+ {
+ goto e6;
+ }
+ psHeap->hDevMemServerHeap = hDevMemServerHeap;
+
+ eError = OSLockCreate(&psHeap->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e7;
+ }
+
+ psHeap->psCtx->uiNumHeaps ++;
+ *ppsHeapPtr = psHeap;
+
+#if defined PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING
+ psHeap->psMemDescList = NULL;
+#endif /* PVRSRV_NEWDEVMEM_SUPPORT_MEM_TRACKING */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths
+ */
+ e7:
+ eError2 = BridgeDevmemIntHeapDestroy(psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap);
+ PVR_ASSERT (eError2 == PVRSRV_OK);
+ e6:
+ if (psHeap->psQuantizedVMRA)
+ RA_Delete(psHeap->psQuantizedVMRA);
+ e5:
+ if (psHeap->pszQuantizedVMRAName)
+ OSFreeMem(psHeap->pszQuantizedVMRAName);
+ e4:
+ RA_Delete(psHeap->psSubAllocRA);
+ e3:
+ OSFreeMem(psHeap->pszSubAllocRAName);
+ e2:
+ OSFreeMem(psHeap->pszName);
+ e1:
+ OSFreeMem(psHeap);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(struct _DEVMEM_HEAP_ *psHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr)
+{
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pDevVAddr = psHeap->sBaseAddress;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = *puiAlign;
+
+ if ((1ULL << uiLog2Quantum) > uiAlign)
+ {
+ uiAlign = 1ULL << uiLog2Quantum;
+ }
+ uiSize = (uiSize + uiAlign - 1) & ~(uiAlign - 1);
+
+ *puiSize = uiSize;
+ *puiAlign = uiAlign;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap)
+{
+ PVRSRV_ERROR eError;
+ IMG_INT uiImportCount;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+#endif
+
+ if (psHeap == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ uiImportCount = OSAtomicRead(&psHeap->hImportCount);
+ if (uiImportCount > 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%d(%s) leaks remain", uiImportCount, psHeap->pszName));
+#if defined(__KERNEL__)
+#if defined(PVR_RI_DEBUG)
+ PVR_DPF((PVR_DBG_ERROR, "Details of remaining allocated device memory (for all processes):"));
+ RIDumpAllKM();
+#else
+ PVR_DPF((PVR_DBG_ERROR, "Compile with PVR_RI_DEBUG=1 to get a full "
+ "list of all driver allocations."));
+#endif
+#endif
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bDoCheck)
+#endif
+ {
+ return PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+ }
+ }
+
+ eError = BridgeDevmemIntHeapDestroy(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bDoCheck)
+#endif
+ {
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: BridgeDevmemIntHeapDestroy failed (%d)",
+ __func__, eError));
+ return eError;
+ }
+ }
+
+ PVR_ASSERT(psHeap->psCtx->uiNumHeaps > 0);
+ psHeap->psCtx->uiNumHeaps--;
+
+ OSLockDestroy(psHeap->hLock);
+
+ if (psHeap->psQuantizedVMRA)
+ {
+ RA_Delete(psHeap->psQuantizedVMRA);
+ }
+ if (psHeap->pszQuantizedVMRAName)
+ {
+ OSFreeMem(psHeap->pszQuantizedVMRAName);
+ }
+
+ RA_Delete(psHeap->psSubAllocRA);
+ OSFreeMem(psHeap->pszSubAllocRAName);
+
+ OSFreeMem(psHeap->pszName);
+
+ OSDeviceMemSet(psHeap, 0, sizeof(*psHeap));
+ OSFreeMem(psHeap);
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ * Devmem allocation/free functions *
+ *****************************************************************************/
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ RA_BASE_T uiAllocatedAddr;
+ RA_LENGTH_T uiAllocatedSize;
+ RA_PERISPAN_HANDLE hImport; /* the "import" from which this sub-allocation came */
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ IMG_DEVMEM_OFFSET_T uiOffset = 0;
+ DEVMEM_IMPORT *psImport;
+ IMG_UINT32 ui32CPUCacheLineSize;
+ void *pvAddr = NULL;
+
+ IMG_BOOL bImportClean;
+ IMG_BOOL bCPUCleanFlag = PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags);
+ IMG_BOOL bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+ IMG_BOOL bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+ IMG_BOOL bCPUCached = (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) ||
+ PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags));
+ IMG_BOOL bGPUCached = (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) ||
+ PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags));
+ PVRSRV_CACHE_OP eOp = PVRSRV_CACHE_OP_INVALIDATE;
+ IMG_UINT32 ui32CacheLineSize = 0;
+
+ if (uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ /* Deferred Allocation not supported on SubAllocs*/
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psHeap == NULL || psHeap->psCtx == NULL ||ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+#if defined(__KERNEL__)
+ {
+ /* The hDevConnection holds two different types of pointers depending on the
+ * address space in which it is used.
+ * In this instance the variable points to the device node in server */
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psHeap->psCtx->hDevConnection;
+ ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(PVRSRV_GET_DEVICE_FEATURE_VALUE(psDevNode, SLC_CACHE_LINE_SIZE_BITS));
+ }
+#else
+ ui32CacheLineSize = ROGUE_CACHE_LINE_SIZE;
+#endif
+
+ /* The following logic makes sure that any cached memory is aligned to both the CPU and GPU.
+ * To be aligned on both you have to take the Lowest Common Multiple (LCM) of the cache line sizes of each.
+ * As the possibilities are all powers of 2 then simply the largest number can be picked as the LCM.
+ * Therefore this algorithm just picks the highest from the CPU, GPU and given alignments.
+ */
+ ui32CPUCacheLineSize = psHeap->psCtx->ui32CPUCacheLineSize;
+ /* If the CPU cache line size is larger than the alignment given then it is the lowest common multiple
+ * Also checking if the allocation is going to be cached on the CPU
+ * Currently there is no check for the validity of the cache coherent option.
+ * In this case, the alignment could be applied but the mode could still fall back to uncached.
+ */
+ if (ui32CPUCacheLineSize > uiAlign && bCPUCached)
+ {
+ uiAlign = ui32CPUCacheLineSize;
+ }
+
+ /* If the GPU cache line size is larger than the alignment given then it is the lowest common multiple
+ * Also checking if the allocation is going to be cached on the GPU via checking for any of the cached options.
+ * Currently there is no check for the validity of the cache coherent option.
+ * In this case, the alignment could be applied but the mode could still fall back to uncached.
+ */
+ if (ui32CacheLineSize > uiAlign && bGPUCached)
+ {
+ uiAlign = ui32CacheLineSize;
+ }
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ /* No request for exportable memory so use the RA */
+ eError = RA_Alloc(psHeap->psSubAllocRA,
+ uiSize,
+ uiPreAllocMultiplier,
+ uiFlags,
+ uiAlign,
+ pszText,
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ &hImport);
+ if (PVRSRV_OK != eError)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ psImport = hImport;
+
+ /* This assignment is assuming the RA returns an hImport where suballocations
+ * can be made from if uiSize is NOT a page multiple of the passed heap.
+ *
+ * So we check if uiSize is a page multiple and mark it as exportable
+ * if it is not.
+ * */
+ if (!(uiSize & ((1 << psHeap->uiLog2Quantum) - 1)) &&
+ (uiPreAllocMultiplier == RA_NO_IMPORT_MULTIPLIER) )
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_EXPORTABLE;
+ }
+ psImport->uiProperties |= DEVMEM_PROPERTIES_SUBALLOCATABLE;
+
+ uiOffset = uiAllocatedAddr - psImport->sDeviceImport.sDevVAddr.uiAddr;
+
+#if defined(PDUMP)
+#if defined(__KERNEL__)
+ PDumpCommentWithFlags(PDUMP_NONE,
+ "Suballocated %u Byte for \"%s\" from PMR with handle ID: 0x%p (PID %u)",
+ (IMG_UINT32) uiSize, pszText, psImport->hPMR, OSGetCurrentProcessID());
+#else
+ {
+ IMG_CHAR pszComment[PVRSRV_PDUMP_MAX_COMMENT_SIZE];
+ OSSNPrintf(pszComment,
+ PVRSRV_PDUMP_MAX_COMMENT_SIZE,
+ "Suballocated %u Byte for \"%s\" from PMR with handle ID: %p (PID %u)",
+ (IMG_UINT32) uiSize,
+ pszText,
+ psImport->hPMR,
+ OSGetCurrentProcessID());
+
+ BridgePVRSRVPDumpComment(psHeap->psCtx->hDevConnection, pszComment, IMG_FALSE);
+ }
+#endif
+#endif
+
+ _DevmemMemDescInit(psMemDesc,
+ uiOffset,
+ psImport,
+ uiSize);
+
+ bImportClean = ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_CLEAN) != 0);
+
+ /* Zero the memory */
+ if (bZero)
+ {
+ /* Has the import been zeroed on allocation and were no suballocations returned to it so far? */
+ bImportClean = bImportClean && ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_ZEROED) != 0);
+
+ if(!bImportClean)
+ {
+ eOp = PVRSRV_CACHE_OP_FLUSH;
+
+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMaintenance;
+ }
+
+ /* uiSize is a 64-bit quantity whereas the 3rd argument
+ * to OSDeviceMemSet is a 32-bit quantity on 32-bit systems
+ * hence a compiler warning of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+ PVR_ASSERT(uiSize < IMG_UINT32_MAX);
+
+ OSDeviceMemSet(pvAddr, 0x0, (size_t) uiSize);
+#if defined(PDUMP)
+ DevmemPDumpLoadZeroMem(psMemDesc, 0, uiSize, PDUMP_FLAGS_CONTINUOUS);
+#endif
+ }
+ }
+ else if (bPoisonOnAlloc)
+ {
+ /* Has the import been poisoned on allocation and were no suballocations returned to it so far? */
+ bPoisonOnAlloc = (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_IMPORT_IS_POISONED) != 0;
+
+ if (!bPoisonOnAlloc)
+ {
+ eOp = PVRSRV_CACHE_OP_FLUSH;
+
+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMaintenance;
+ }
+
+ if (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) ||
+ PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))
+ {
+ OSDeviceMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+ uiSize);
+ }
+ else
+ {
+ OSCachedMemSet(pvAddr, PVRSRV_POISON_ON_ALLOC_VALUE,
+ uiSize);
+ }
+
+ bPoisonOnAlloc = IMG_TRUE;
+ }
+ }
+
+ /* Flush or invalidate */
+ if (bCPUCached && !bImportClean && (bZero || bCPUCleanFlag || bPoisonOnAlloc))
+ {
+ /* BridgeCacheOpQueue _may_ be deferred so use BridgeCacheOpExec
+ to ensure this cache maintenance is actioned immediately */
+ eError = BridgeCacheOpExec (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ (IMG_UINT64)(uintptr_t)
+ pvAddr - psMemDesc->uiOffset,
+ psMemDesc->uiOffset,
+ psMemDesc->uiAllocSize,
+ eOp);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMaintenance;
+ }
+ }
+
+ if (pvAddr)
+ {
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+ pvAddr = NULL;
+ }
+
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ _CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+ OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+ psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif /* if defined(__KERNEL__) */
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ OSStringNLength(psMemDesc->szText, DEVMEM_ANNOTATION_MAX_LEN),
+ psMemDesc->szText,
+ psMemDesc->uiOffset,
+ uiAllocatedSize,
+ IMG_FALSE,
+ IMG_TRUE,
+ &(psMemDesc->hRIHandle));
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ *ppsMemDescPtr = psMemDesc;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ failMaintenance:
+ if (pvAddr)
+ {
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+ pvAddr = NULL;
+ }
+ _DevmemMemDescRelease(psMemDesc);
+ psMemDesc = NULL; /* Make sure we don't do a discard after the release */
+ failDeviceMemAlloc:
+ if (psMemDesc)
+ {
+ _DevmemMemDescDiscard(psMemDesc);
+ }
+ failMemDescAlloc:
+ failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_UINT32 ui32MappingTable = 0;
+
+ DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+ &uiSize,
+ &uiAlign);
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _AllocateDeviceMemory(hDevConnection,
+ uiLog2HeapPageSize,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_TRUE,
+ pszText,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ _CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+ OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+ psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif /* if defined(__KERNEL__) */
+
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psImport->hDevConnection,
+ psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ uiSize,
+ IMG_FALSE,
+ IMG_FALSE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ failDeviceMemAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+
+ failMemDescAlloc:
+ failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+
+ DevmemExportalignAdjustSizeAndAlign(uiLog2HeapPageSize,
+ &uiSize,
+ &uiAlign);
+
+ eError = _DevmemValidateParams(uiSize,
+ uiAlign,
+ &uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _AllocateDeviceMemory(hDevConnection,
+ uiLog2HeapPageSize,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiAlign,
+ uiFlags,
+ IMG_TRUE,
+ pszText,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDeviceMemAlloc;
+ }
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ _CheckAnnotationLength(pszText);
+#if defined(__KERNEL__)
+ OSStringLCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+ OSStringNCopy(psMemDesc->szText, pszText, DEVMEM_ANNOTATION_MAX_LEN);
+ psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif /* if defined(__KERNEL__) */
+
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ eError = BridgeRIWritePMREntry (psImport->hDevConnection,
+ psImport->hPMR);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWritePMREntry failed (eError=%d)", __func__, eError));
+ }
+
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ uiSize,
+ IMG_FALSE,
+ IMG_FALSE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#else /* if defined(PVR_RI_DEBUG) */
+ PVR_UNREFERENCED_PARAMETER (pszText);
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ *ppsMemDescPtr = psMemDesc;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ failDeviceMemAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+
+ failMemDescAlloc:
+ failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed! Error is %s. Allocation size: %#llX",
+ __func__,
+ PVRSRVGETERRORSTRING(eError),
+ (unsigned long long) uiSize));
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+ IMG_HANDLE hServerHandle,
+ IMG_HANDLE *hLocalImportHandle)
+{
+ return BridgePMRMakeLocalImportHandle(hBridge,
+ hServerHandle,
+ hLocalImportHandle);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hBridge,
+ IMG_HANDLE hLocalImportHandle)
+{
+ return BridgePMRUnmakeLocalImportHandle(hBridge, hLocalImportHandle);
+}
+
+/*****************************************************************************
+ * Devmem unsecure export functions *
+ *****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+
+static PVRSRV_ERROR
+_Mapping_Export(DEVMEM_IMPORT *psImport,
+ DEVMEM_EXPORTHANDLE *phPMRExportHandlePtr,
+ DEVMEM_EXPORTKEY *puiExportKeyPtr,
+ DEVMEM_SIZE_T *puiSize,
+ DEVMEM_LOG2ALIGN_T *puiLog2Contig)
+{
+ /* Gets an export handle and key for the PMR used for this mapping */
+ /* Can only be done if there are no suballocations for this mapping */
+
+ PVRSRV_ERROR eError;
+ DEVMEM_EXPORTHANDLE hPMRExportHandle;
+ DEVMEM_EXPORTKEY uiExportKey;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig;
+
+ if (psImport == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if ((psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ goto failParams;
+ }
+
+ eError = BridgePMRExportPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ &hPMRExportHandle,
+ &uiSize,
+ &uiLog2Contig,
+ &uiExportKey);
+ if (eError != PVRSRV_OK)
+ {
+ goto failExport;
+ }
+
+ PVR_ASSERT(uiSize == psImport->uiSize);
+
+ *phPMRExportHandlePtr = hPMRExportHandle;
+ *puiExportKeyPtr = uiExportKey;
+ *puiSize = uiSize;
+ *puiLog2Contig = uiLog2Contig;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ failExport:
+ failParams:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+
+}
+
+static void
+_Mapping_Unexport(DEVMEM_IMPORT *psImport,
+ DEVMEM_EXPORTHANDLE hPMRExportHandle)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT (psImport != NULL);
+
+ eError = BridgePMRUnexportPMR(psImport->hDevConnection,
+ hPMRExportHandle);
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+ /* Caller to provide storage for export cookie struct */
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hPMRExportHandle = 0;
+ IMG_UINT64 uiPMRExportPassword = 0;
+ IMG_DEVMEM_SIZE_T uiSize = 0;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Contig = 0;
+
+ if (psMemDesc == NULL || psExportCookie == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = _Mapping_Export(psMemDesc->psImport,
+ &hPMRExportHandle,
+ &uiPMRExportPassword,
+ &uiSize,
+ &uiLog2Contig);
+ if (eError != PVRSRV_OK)
+ {
+ psExportCookie->uiSize = 0;
+ goto e0;
+ }
+
+ psExportCookie->hPMRExportHandle = hPMRExportHandle;
+ psExportCookie->uiPMRExportPassword = uiPMRExportPassword;
+ psExportCookie->uiSize = uiSize;
+ psExportCookie->uiLog2ContiguityGuarantee = uiLog2Contig;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie)
+{
+ _Mapping_Unexport(psMemDesc->psImport,
+ psExportCookie->hPMRExportHandle);
+
+ psExportCookie->uiSize = 0;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_EXPORTCOOKIE *psCookie,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ if (ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _DevmemImportStructAlloc(hDevConnection,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failImportAlloc;
+ }
+
+ /* Get a handle to the PMR (inc refcount) */
+ eError = BridgePMRImportPMR(hDevConnection,
+ psCookie->hPMRExportHandle,
+ psCookie->uiPMRExportPassword,
+ psCookie->uiSize, /* not trusted - just for sanity checks */
+ psCookie->uiLog2ContiguityGuarantee, /* not trusted - just for sanity checks */
+ &hPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto failImport;
+ }
+
+ _DevmemImportStructInit(psImport,
+ psCookie->uiSize,
+ 1ULL << psCookie->uiLog2ContiguityGuarantee,
+ uiFlags,
+ hPMR,
+ DEVMEM_PROPERTIES_IMPORTED |
+ DEVMEM_PROPERTIES_EXPORTABLE);
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ psImport->uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ psMemDesc->psImport->uiSize,
+ IMG_TRUE,
+ IMG_TRUE,
+ &psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ failImport:
+ _DevmemImportDiscard(psImport);
+ failImportAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+ failMemDescAlloc:
+ failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*****************************************************************************
+ * Common MemDesc functions *
+ *****************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+ /* Stop if the allocation might have suballocations. */
+ if (!(psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: The passed allocation is not valid to unpin because "
+ "there might be suballocations on it. Make sure you allocate a page multiple "
+ "of the heap when using PVRSRVAllocDeviceMem()",
+ __FUNCTION__));
+
+ goto e_exit;
+ }
+
+ /* Stop if the Import is still mapped to CPU */
+ if (psImport->sCPUImport.ui32RefCount)
+ {
+ eError = PVRSRV_ERROR_STILL_MAPPED;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: There are still %u references on the CPU mapping. "
+ "Please remove all CPU mappings before unpinning.",
+ __FUNCTION__,
+ psImport->sCPUImport.ui32RefCount));
+
+ goto e_exit;
+ }
+
+ /* Only unpin if it is not already unpinned
+ * Return PVRSRV_OK */
+ if (psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ goto e_exit;
+ }
+
+ /* Unpin it and invalidate mapping */
+ if (psImport->sDeviceImport.bMapped == IMG_TRUE)
+ {
+ eError = BridgeDevmemIntUnpinInvalidate(psImport->hDevConnection,
+ psImport->sDeviceImport.hMapping,
+ psImport->hPMR);
+ }
+ else
+ {
+ /* Or just unpin it */
+ eError = BridgeDevmemIntUnpin(psImport->hDevConnection,
+ psImport->hPMR);
+ }
+
+ /* Update flags and RI when call was successful */
+ if (eError == PVRSRV_OK)
+ {
+ psImport->uiProperties |= DEVMEM_PROPERTIES_UNPINNED;
+ }
+ else
+ {
+ /* Or just show what went wrong */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Unpin aborted because of error %d",
+ __func__,
+ eError));
+ }
+
+ e_exit:
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+
+ /* Only pin if it is unpinned */
+ if ((psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED) == 0)
+ {
+ goto e_exit;
+ }
+
+ /* Pin it and make mapping valid */
+ if (psImport->sDeviceImport.bMapped)
+ {
+ eError = BridgeDevmemIntPinValidate(psImport->hDevConnection,
+ psImport->sDeviceImport.hMapping,
+ psImport->hPMR);
+ }
+ else
+ {
+ /* Or just pin it */
+ eError = BridgeDevmemIntPin(psImport->hDevConnection,
+ psImport->hPMR);
+ }
+
+ if ( (eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_PMR_NEW_MEMORY) )
+ {
+ psImport->uiProperties &= ~DEVMEM_PROPERTIES_UNPINNED;
+ }
+ else
+ {
+ /* Or just show what went wrong */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Pin aborted because of error %d",
+ __func__,
+ eError));
+ }
+
+ e_exit:
+ return eError;
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc, IMG_DEVMEM_SIZE_T* puiSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ *puiSize = psMemDesc->uiAllocSize;
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc, IMG_CHAR **pszAnnotation)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ *pszAnnotation = psMemDesc->szText;
+
+ return eError;
+}
+
+/*
+ This function is called for freeing any class of memory
+ */
+IMG_INTERNAL void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc)
+{
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SECURE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Please use methods dedicated to secure buffers.",
+ __func__));
+ return;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = BridgeRIDeleteMEMDESCEntry(psMemDesc->psImport->hDevConnection,
+ psMemDesc->hRIHandle);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIDeleteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+ _DevmemMemDescRelease(psMemDesc);
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+ DEVMEM_IMPORT *psImport;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bMap = IMG_TRUE;
+ IMG_BOOL bDestroyed = IMG_FALSE;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failCheck;
+ }
+
+ /* Don't map memory for deferred allocations */
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+ bMap = IMG_FALSE;
+ }
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ psImport = psMemDesc->psImport;
+ _DevmemMemDescAcquire(psMemDesc);
+
+ eError = _DevmemImportStructDevMap(psHeap,
+ bMap,
+ psImport,
+ DEVICEMEM_UTILS_NO_ADDRESS);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ sDevVAddr.uiAddr += psMemDesc->uiOffset;
+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->szText,
+ DevmemGetHeapLog2PageSize(psHeap),
+ psMemDesc->ui32AllocationIndex,
+ &psMemDesc->ui32AllocationIndex);
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ psImport->sDeviceImport.sDevVAddr);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+ failMap:
+ bDestroyed = _DevmemMemDescRelease(psMemDesc);
+ failCheck:
+ failParams:
+ if (!bDestroyed)
+ {
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ failFlags:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr)
+{
+ DEVMEM_IMPORT *psImport;
+ IMG_DEV_VIRTADDR sDevVAddr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bMap = IMG_TRUE;
+ IMG_BOOL bDestroyed = IMG_FALSE;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ if (psHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount != 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failCheck;
+ }
+
+ /* Don't map memory for deferred allocations */
+ if (psMemDesc->psImport->uiFlags & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC)
+ {
+ PVR_ASSERT(psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE);
+ bMap = IMG_FALSE;
+ }
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ psImport = psMemDesc->psImport;
+ _DevmemMemDescAcquire(psMemDesc);
+
+ eError = _DevmemImportStructDevMap(psHeap,
+ bMap,
+ psImport,
+ sDevVirtAddr.uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ sDevVAddr.uiAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ sDevVAddr.uiAddr += psMemDesc->uiOffset;
+ psMemDesc->sDeviceMemDesc.sDevVAddr = sDevVAddr;
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ BridgeDevicememHistoryMap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->szText,
+ DevmemGetHeapLog2PageSize(psHeap),
+ psMemDesc->ui32AllocationIndex,
+ &psMemDesc->ui32AllocationIndex);
+ }
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ if (psMemDesc->hRIHandle)
+ {
+ eError = BridgeRIUpdateMEMDESCAddr(psImport->hDevConnection,
+ psMemDesc->hRIHandle,
+ psImport->sDeviceImport.sDevVAddr);
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIUpdateMEMDESCAddr failed (eError=%d)", __func__, eError));
+ }
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+ failMap:
+ bDestroyed = _DevmemMemDescRelease(psMemDesc);
+ failCheck:
+ failParams:
+ if (!bDestroyed)
+ {
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ failFlags:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddr)
+{
+ PVRSRV_ERROR eError;
+
+ /* Do not try to map unpinned memory */
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_UNPINNED)
+ {
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failCheck;
+ }
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount+1);
+
+ if (psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_NO_MAPPING;
+ goto failRelease;
+ }
+ psMemDesc->sDeviceMemDesc.ui32RefCount++;
+
+ *psDevVirtAddr = psMemDesc->sDeviceMemDesc.sDevVAddr;
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+ return PVRSRV_OK;
+
+ failRelease:
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ failCheck:
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(psMemDesc != NULL);
+
+ OSLockAcquire(psMemDesc->sDeviceMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sDeviceMemDesc.ui32RefCount,
+ psMemDesc->sDeviceMemDesc.ui32RefCount-1);
+
+ PVR_ASSERT(psMemDesc->sDeviceMemDesc.ui32RefCount != 0);
+
+ if (--psMemDesc->sDeviceMemDesc.ui32RefCount == 0)
+ {
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_DEVICEMEMHISTORY))
+ {
+ BridgeDevicememHistoryUnmap(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset,
+ psMemDesc->sDeviceMemDesc.sDevVAddr,
+ psMemDesc->uiAllocSize,
+ psMemDesc->szText,
+ DevmemGetHeapLog2PageSize(psMemDesc->psImport->sDeviceImport.psHeap),
+ psMemDesc->ui32AllocationIndex,
+ &psMemDesc->ui32AllocationIndex);
+ }
+#endif
+ _DevmemImportStructDevUnmap(psMemDesc->psImport);
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+
+ _DevmemMemDescRelease(psMemDesc);
+ }
+ else
+ {
+ OSLockRelease(psMemDesc->sDeviceMemDesc.hLock);
+ }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psMemDesc != NULL);
+ PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+ if ( psMemDesc->psImport->uiProperties &
+ (DEVMEM_PROPERTIES_UNPINNED | DEVMEM_PROPERTIES_SECURE) )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Allocation is currently unpinned or a secure buffer. "
+ "Not possible to map to CPU!",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_MAP_REQUEST;
+ goto failFlags;
+ }
+
+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sCPUMemDesc.ui32RefCount,
+ psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+ if (psMemDesc->sCPUMemDesc.ui32RefCount++ == 0)
+ {
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ IMG_UINT8 *pui8CPUVAddr;
+
+ _DevmemMemDescAcquire(psMemDesc);
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ pui8CPUVAddr = psImport->sCPUImport.pvCPUVAddr;
+ pui8CPUVAddr += psMemDesc->uiOffset;
+ psMemDesc->sCPUMemDesc.pvCPUVAddr = pui8CPUVAddr;
+ }
+ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+
+ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+
+ return PVRSRV_OK;
+
+ failMap:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ psMemDesc->sCPUMemDesc.ui32RefCount--;
+
+ if (!_DevmemMemDescRelease(psMemDesc))
+ {
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+ }
+ failFlags:
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr)
+{
+ PVR_ASSERT(psMemDesc != NULL);
+ PVR_ASSERT(ppvCpuVirtAddr != NULL);
+
+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sCPUMemDesc.ui32RefCount,
+ psMemDesc->sCPUMemDesc.ui32RefCount+1);
+
+ *ppvCpuVirtAddr = NULL;
+ if (psMemDesc->sCPUMemDesc.ui32RefCount)
+ {
+ *ppvCpuVirtAddr = psMemDesc->sCPUMemDesc.pvCPUVAddr;
+ psMemDesc->sCPUMemDesc.ui32RefCount += 1;
+ }
+
+ VG_MARK_INITIALIZED(*ppvCpuVirtAddr, psMemDesc->psImport->uiSize);
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+}
+
+IMG_INTERNAL void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(psMemDesc != NULL);
+
+ OSLockAcquire(psMemDesc->sCPUMemDesc.hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ psMemDesc->sCPUMemDesc.ui32RefCount,
+ psMemDesc->sCPUMemDesc.ui32RefCount-1);
+
+ PVR_ASSERT(psMemDesc->sCPUMemDesc.ui32RefCount != 0);
+
+ if (--psMemDesc->sCPUMemDesc.ui32RefCount == 0)
+ {
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+ _DevmemImportStructCPUUnmap(psMemDesc->psImport);
+ _DevmemMemDescRelease(psMemDesc);
+ }
+ else
+ {
+ OSLockRelease(psMemDesc->sCPUMemDesc.hLock);
+ }
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport)
+{
+ if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ }
+
+ *phImport = psMemDesc->psImport->hPMR;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT64 *pui64UID)
+{
+ DEVMEM_IMPORT *psImport = psMemDesc->psImport;
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRGetUID(psImport->hDevConnection,
+ psImport->hPMR,
+ pui64UID);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hReservation)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *hReservation = psImport->sDeviceImport.hReservation;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phPMR,
+ IMG_DEVMEM_OFFSET_T *puiPMROffset)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ *puiPMROffset = psMemDesc->uiOffset;
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *phPMR = psImport->hPMR;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_FLAGS_T *puiFlags)
+{
+ DEVMEM_IMPORT *psImport;
+
+ PVR_ASSERT(psMemDesc);
+ psImport = psMemDesc->psImport;
+
+ PVR_ASSERT(psImport);
+ *puiFlags = psImport->uiFlags;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc)
+{
+ return psMemDesc->psImport->hDevConnection;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation)
+{
+ DEVMEM_MEMDESC *psMemDesc = NULL;
+ DEVMEM_IMPORT *psImport;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_HANDLE hPMR;
+ PVRSRV_ERROR eError;
+
+ if (ppsMemDescPtr == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failParams;
+ }
+
+ eError =_DevmemMemDescAlloc(&psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMemDescAlloc;
+ }
+
+ eError = _DevmemImportStructAlloc(hBridge,
+ &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failImportAlloc;
+ }
+
+ /* Get the PMR handle and its size from the server */
+ eError = BridgePMRLocalImportPMR(hBridge,
+ hExtHandle,
+ &hPMR,
+ &uiSize,
+ &uiAlign);
+ if (eError != PVRSRV_OK)
+ {
+ goto failImport;
+ }
+
+ _DevmemImportStructInit(psImport,
+ uiSize,
+ uiAlign,
+ uiFlags,
+ hPMR,
+ DEVMEM_PROPERTIES_IMPORTED |
+ DEVMEM_PROPERTIES_EXPORTABLE);
+
+ _DevmemMemDescInit(psMemDesc,
+ 0,
+ psImport,
+ uiSize);
+
+ *ppsMemDescPtr = psMemDesc;
+ if (puiSizePtr)
+ *puiSizePtr = uiSize;
+
+#if defined(PVR_RI_DEBUG)
+ if(PVRSRVIsBridgeEnabled(psMemDesc->psImport->hDevConnection, PVRSRV_BRIDGE_RI))
+ {
+ /* Attach RI information.
+ * Set backed size to 0 since this allocation has been allocated
+ * by the same process and has been accounted for. */
+ eError = BridgeRIWriteMEMDESCEntry (psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ sizeof("^"),
+ "^",
+ psMemDesc->uiOffset,
+ psMemDesc->psImport->uiSize,
+ IMG_TRUE,
+ IMG_FALSE,
+ &(psMemDesc->hRIHandle));
+ if( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to BridgeRIWriteMEMDESCEntry failed (eError=%d)", __func__, eError));
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+
+
+ /* copy the allocation descriptive name and size so it can be passed to DevicememHistory when
+ * the allocation gets mapped/unmapped
+ */
+ _CheckAnnotationLength(pszAnnotation);
+#if defined(__KERNEL__)
+ OSStringLCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+#else
+ OSStringNCopy(psMemDesc->szText, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+ psMemDesc->szText[DEVMEM_ANNOTATION_MAX_LEN - 1] = '\0';
+#endif /* if defined(__KERNEL__) */
+
+
+ return PVRSRV_OK;
+
+ failImport:
+ _DevmemImportDiscard(psImport);
+ failImportAlloc:
+ _DevmemMemDescDiscard(psMemDesc);
+ failMemDescAlloc:
+ failParams:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ return BridgeDevmemIsVDevAddrValid(psContext->hDevConnection,
+ psContext->hDevMemServerContext,
+ sDevVAddr);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR *psFaultAddress)
+{
+ return BridgeDevmemGetFaultAddress(psContext->hDevConnection,
+ psContext->hDevMemServerContext,
+ psFaultAddress);
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap)
+{
+ return psHeap->uiLog2Quantum;
+}
+
+IMG_INTERNAL IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+ IMG_UINT32 *puiLog2ImportAlignment,
+ IMG_UINT32 *puiLog2TilingStrideFactor)
+{
+ *puiLog2ImportAlignment = psHeap->uiLog2ImportAlignment;
+ *puiLog2TilingStrideFactor = psHeap->uiLog2TilingStrideFactor;
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function RegisterDevMemPFNotify
+@Description Registers that the application wants to be signaled when a page
+ fault occurs.
+
+@Input psContext Memory context the process that would like to
+ be notified about.
+@Input ui32PID The PID of the calling process.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ */ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeDevmemIntRegisterPFNotifyKM(psContext->hDevConnection,
+ psContext->hDevMemServerContext,
+ ui32PID,
+ bRegister);
+ if (eError == PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Bridge Call Failed: This could suggest a UM/KM miss-match (%d)",
+ __func__,
+ (IMG_INT)(eError)));
+ }
+
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION psConnection,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize)
+{
+ return BridgeGetMaxDevMemSize(psConnection,
+ puiLMASize,
+ puiUMASize);
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem.h b/drivers/gpu/drm/img-rogue/1.10/devicemem.h
new file mode 100644
index 00000000000000..aeaa65855fe726
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem.h
@@ -0,0 +1,683 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management core internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to core device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef SRVCLIENT_DEVICEMEM_H
+#define SRVCLIENT_DEVICEMEM_H
+
+/********************************************************************************
+ * *
+ * +------------+ +------------+ +--------------+ +--------------+ *
+ * | a sub- | | a sub- | | an | | allocation | *
+ * | allocation | | allocation | | allocation | | also mapped | *
+ * | | | | | in proc 1 | | into proc 2 | *
+ * +------------+ +------------+ +--------------+ +--------------+ *
+ * | | | | *
+ * +--------------+ +--------------+ +--------------+ *
+ * | page gran- | | page gran- | | page gran- | *
+ * | ular mapping | | ular mapping | | ular mapping | *
+ * +--------------+ +--------------+ +--------------+ *
+ * | | | *
+ * | | | *
+ * | | | *
+ * +--------------+ +--------------+ *
+ * | | | | *
+ * | A "P.M.R." | | A "P.M.R." | *
+ * | | | | *
+ * +--------------+ +--------------+ *
+ * *
+ ********************************************************************************/
+
+/*
+ All device memory allocations are ultimately a view upon (not
+ necessarily the whole of) a "PMR".
+
+ A PMR is a "Physical Memory Resource", which may be a
+ "pre-faulted" lump of physical memory, or it may be a
+ representation of some physical memory that will be instantiated
+ at some future time.
+
+ PMRs always represent multiple of some power-of-2 "contiguity"
+ promised by the PMR, which will allow them to be mapped in whole
+ pages into the device MMU. As memory allocations may be smaller
+ than a page, these mappings may be suballocated and thus shared
+ between multiple allocations in one process. A PMR may also be
+ mapped simultaneously into multiple device memory contexts
+ (cross-process scenario), however, for security reasons, it is not
+ legal to share a PMR "both ways" at once, that is, mapped into
+ multiple processes and divided up amongst several suballocations.
+
+ This PMR terminology is introduced here for background
+ information, but is generally of little concern to the caller of
+ this API. This API handles suballocations and mappings, and the
+ caller thus deals primarily with MEMORY DESCRIPTORS representing
+ an allocation or suballocation, HEAPS representing ranges of
+ virtual addresses in a CONTEXT.
+*/
+
+/*
+ |<---------------------------context------------------------------>|
+ |<-------heap------->| |<-------heap------->|<-------heap------->|
+ |<-alloc->| | |<-alloc->|<-alloc->|| |<-alloc->| |
+*/
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "pdump.h"
+
+#include "device_connection.h"
+
+
+typedef IMG_UINT32 DEVMEM_HEAPCFGID;
+#define DEVMEM_HEAPCFG_FORCLIENTS 0
+#define DEVMEM_HEAPCFG_META 1
+
+
+
+
+
+/*
+ In order to call the server side functions, we need a bridge handle.
+ We abstract that here, as we may wish to change its form.
+ */
+
+typedef IMG_HANDLE DEVMEM_BRIDGE_HANDLE;
+
+/**************************************************************************/ /*!
+@Function DevmemUnpin
+@Description This is the counterpart to DevmemPin(). It is meant to be
+ called before repinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input phMemDesc The MemDesc that is going to be unpinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemUnpin(DEVMEM_MEMDESC *psMemDesc);
+
+/**************************************************************************/ /*!
+@Function DevmemPin
+@Description This is the counterpart to DevmemUnpin(). It is meant to be
+ called after unpinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input phMemDesc The MemDesc that is going to be pinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPin(DEVMEM_MEMDESC *psMemDesc);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetHeapInt(DEVMEM_HEAP *psHeap,
+ IMG_HANDLE *phDevmemHeap);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetSize(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_SIZE_T* puiSize);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetAnnotation(DEVMEM_MEMDESC *psMemDesc,
+ IMG_CHAR **pszAnnotation);
+
+/*
+ * DevmemCreateContext()
+ *
+ * Create a device memory context
+ *
+ * This must be called before any heap is created in this context
+ *
+ * Caller to provide bridge handle which will be squirreled away
+ * internally and used for all future operations on items from this
+ * memory context. Caller also to provide devicenode handle, as this
+ * is used for MMU configuration and also to determine the heap
+ * configuration for the auto-instantiated heaps.
+ *
+ * Note that when compiled in services/server, the hBridge is not used
+ * and is thrown away by the "fake" direct bridge. (This may change.
+ * It is recommended that NULL be passed for the handle for now)
+ *
+ * hDeviceNode and uiHeapBlueprintID shall together dictate which
+ * heap-config to use.
+ *
+ * This will cause the server side counterpart to be created also.
+ *
+ * If you call DevmemCreateContext() (and the call succeeds) you
+ * are promising that you will later call Devmem_ContextDestroy(),
+ * except for abnormal process termination in which case it is
+ * expected it will be destroyed as part of handle clean up.
+ *
+ * Caller to provide storage for the pointer to the NEWDEVMEM_CONTEXT
+ * object thusly created.
+ */
+extern PVRSRV_ERROR
+DevmemCreateContext(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_CONTEXT **ppsCtxPtr);
+
+/*
+ * DevmemAcquireDevPrivData()
+ *
+ * Acquire the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemAcquireDevPrivData(DEVMEM_CONTEXT *psCtx,
+ IMG_HANDLE *hPrivData);
+
+/*
+ * DevmemReleaseDevPrivData()
+ *
+ * Release the device private data for this memory context
+ */
+PVRSRV_ERROR
+DevmemReleaseDevPrivData(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemDestroyContext()
+ *
+ * Undoes that done by DevmemCreateContext()
+ */
+extern PVRSRV_ERROR
+DevmemDestroyContext(DEVMEM_CONTEXT *psCtx);
+
+/*
+ * DevmemCreateHeap()
+ *
+ * Create a heap in the given context.
+ *
+ * N.B. Not intended to be called directly, though it can be.
+ * Normally, heaps are instantiated at context creation time according
+ * to the specified blueprint. See DevmemCreateContext() for details.
+ *
+ * This will cause MMU code to set up data structures for the heap,
+ * but may not cause page tables to be modified until allocations are
+ * made from the heap.
+ *
+ * The "Quantum" is both the device MMU page size to be configured for
+ * this heap, and the unit multiples of which "quantized" allocations
+ * are made (allocations smaller than this, known as "suballocations"
+ * will be made from a "sub alloc RA" and will "import" chunks
+ * according to this quantum)
+ *
+ * Where imported PMRs (or, for example, PMRs created by device class
+ * buffers) are mapped into this heap, it is important that the
+ * physical contiguity guarantee offered by the PMR is greater than or
+ * equal to the quantum size specified here, otherwise the attempt to
+ * map it will fail. "Normal" allocations via Devmem_Allocate
+ * shall automatically meet this requirement, as each "import" will
+ * trigger the creation of a PMR with the desired contiguity. The
+ * supported quantum sizes in that case shall be dictated by the OS
+ * specific implementation of PhysmemNewOSRamBackedPMR() (see)
+ */
+extern PVRSRV_ERROR
+DevmemCreateHeap(DEVMEM_CONTEXT *psCtxPtr,
+ /* base and length of heap */
+ IMG_DEV_VIRTADDR sBaseAddress,
+ IMG_DEVMEM_SIZE_T uiLength,
+ /* log2 of allocation quantum, i.e. "page" size.
+ All allocations (that go to server side) are
+ multiples of this. We use a client-side RA to
+ make sub-allocations from this */
+ IMG_UINT32 ui32Log2Quantum,
+ /* The minimum import alignment for this heap */
+ IMG_UINT32 ui32Log2ImportAlignment,
+ /* (For tiling heaps) the factor to use to convert
+ alignment to optimum buffer stride */
+ IMG_UINT32 ui32Log2TilingStrideFactor,
+ /* Name of heap for debug */
+ /* N.B. Okay to exist on caller's stack - this
+ func takes a copy if it needs it. */
+ const IMG_CHAR *pszName,
+ DEVMEM_HEAPCFGID uiHeapBlueprintID,
+ DEVMEM_HEAP **ppsHeapPtr);
+/*
+ * DevmemDestroyHeap()
+ *
+ * Reverses DevmemCreateHeap()
+ *
+ * N.B. All allocations must have been freed and all mappings must
+ * have been unmapped before invoking this call
+ */
+extern PVRSRV_ERROR
+DevmemDestroyHeap(DEVMEM_HEAP *psHeap);
+
+/*
+ * DevmemExportalignAdjustSizeAndAlign()
+ * Compute the Size and Align passed to avoid suballocations (used when allocation with PVRSRV_MEMALLOCFLAG_EXPORTALIGN)
+ */
+IMG_INTERNAL void
+DevmemExportalignAdjustSizeAndAlign(IMG_UINT32 uiLog2Quantum,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * DevmemSubAllocate()
+ *
+ * Makes an allocation (possibly a "suballocation", as described
+ * below) of device virtual memory from this heap.
+ *
+ * The size and alignment of the allocation will be honoured by the RA
+ * that allocates the "suballocation". The resulting allocation will
+ * be mapped into GPU virtual memory and the physical memory to back
+ * it will exist, by the time this call successfully completes.
+ *
+ * The size must be a positive integer multiple of the alignment.
+ * (i.e. the aligment specifies the alignment of both the start and
+ * the end of the resulting allocation.)
+ *
+ * Allocations made via this API are routed though a "suballocation
+ * RA" which is responsible for ensuring that small allocations can be
+ * made without wasting physical memory in the server. Furthermore,
+ * such suballocations can be made entirely client side without
+ * needing to go to the server unless the allocation spills into a new
+ * page.
+ *
+ * Such suballocations cause many allocations to share the same "PMR".
+ * This happens only when the flags match exactly.
+ *
+ */
+
+PVRSRV_ERROR
+DevmemSubAllocate(IMG_UINT8 uiPreAllocMultiplier,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+#define DevmemAllocate(...) \
+ DevmemSubAllocate(DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER, __VA_ARGS__)
+
+PVRSRV_ERROR
+DevmemAllocateExportable(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+PVRSRV_ERROR
+DeviceMemChangeSparse(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+PVRSRV_ERROR
+DevmemAllocateSparse(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_UINT32 uiLog2HeapPageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+
+/*
+ * DevmemFree()
+ *
+ * Reverses that done by DevmemSubAllocate() N.B. The underlying
+ * mapping and server side allocation _may_ not be torn down, for
+ * example, if the allocation has been exported, or if multiple
+ * allocations were suballocated from the same mapping, but this is
+ * properly refcounted, so the caller does not have to care.
+ */
+
+extern void
+DevmemFree(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ DevmemMapToDevice:
+
+ Map an allocation to the device it was allocated from.
+ This function _must_ be called before any call to
+ DevmemAcquireDevVirtAddr is made as it binds the allocation
+ to the heap.
+ DevmemReleaseDevVirtAddr is used to release the reference
+ to the device mapping this function created, but it doesn't
+ mean that the memory will actually be unmapped from the
+ device as other references to the mapping obtained via
+ DevmemAcquireDevVirtAddr could still be active.
+*/
+PVRSRV_ERROR DevmemMapToDevice(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddr);
+
+/*
+ DevmemMapToDeviceAddress:
+
+ Same as DevmemMapToDevice but the caller chooses the address
+ to map to.
+*/
+IMG_INTERNAL PVRSRV_ERROR
+DevmemMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr);
+
+/*
+ DevmemAcquireDevVirtAddr
+
+ Acquire the MemDesc's device virtual address.
+ This function _must_ be called after DevmemMapToDevice
+ and is expected to be used be functions which didn't allocate
+ the MemDesc but need to know it's address
+ */
+PVRSRV_ERROR DevmemAcquireDevVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddrRet);
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the device virtual address that was
+ * acquired by "Acquire" or "MapToDevice"
+ */
+extern void
+DevmemReleaseDevVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+/*
+ * DevmemAcquireCpuVirtAddr()
+ *
+ * Acquires a license to use the cpu virtual address of this mapping.
+ * Note that the memory may not have been mapped into cpu virtual
+ * memory prior to this call. On first "acquire" the memory will be
+ * mapped in (if it wasn't statically mapped in) and on last put it
+ * _may_ become unmapped. Later calling "Acquire" again, _may_ cause
+ * the memory to be mapped at a different address.
+ */
+PVRSRV_ERROR DevmemAcquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReacquireCpuVirtAddr()
+ *
+ * (Re)acquires license to use the cpu virtual address of this mapping
+ * if (and only if) there is already a pre-existing license to use the
+ * cpu virtual address for the mapping, returns NULL otherwise.
+ */
+void DevmemReacquireCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc,
+ void **ppvCpuVirtAddr);
+
+/*
+ * DevmemReleaseDevVirtAddr()
+ *
+ * give up the licence to use the cpu virtual address that was granted
+ * with the "Get" call.
+ */
+extern void
+DevmemReleaseCpuVirtAddr(DEVMEM_MEMDESC *psMemDesc);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/*
+ * DevmemExport()
+ *
+ * Given a memory allocation allocated with DevmemAllocateExportable()
+ * create a "cookie" that can be passed intact by the caller's own choice
+ * of secure IPC to another process and used as the argument to "map"
+ * to map this memory into a heap in the target processes. N.B. This can
+ * also be used to map into multiple heaps in one process, though that's not
+ * the intention.
+ *
+ * Note, the caller must later call Unexport before freeing the
+ * memory.
+ */
+PVRSRV_ERROR DevmemExport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+
+void DevmemUnexport(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_EXPORTCOOKIE *psExportCookie);
+
+PVRSRV_ERROR
+DevmemImport(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_EXPORTCOOKIE *psCookie,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/*
+ * DevmemMakeLocalImportHandle()
+ *
+ * This is a "special case" function for making a server export cookie
+ * which went through the direct bridge into an export cookie that can
+ * be passed through the client bridge.
+ */
+PVRSRV_ERROR
+DevmemMakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_HANDLE hServerExport,
+ IMG_HANDLE *hClientExport);
+
+/*
+ * DevmemUnmakeLocalImportHandle()
+ *
+ * Free any resource associated with the Make operation
+ */
+PVRSRV_ERROR
+DevmemUnmakeLocalImportHandle(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_HANDLE hClientExport);
+
+/*
+ *
+ * The following set of functions is specific to the heap "blueprint"
+ * stuff, for automatic creation of heaps when a context is created
+ *
+ */
+
+
+/* Devmem_HeapConfigCount: returns the number of heap configs that
+ this device has. Note that there is no acquire/release semantics
+ required, as this data is guaranteed to be constant for the
+ lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapConfigCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 *puiNumHeapConfigsOut);
+
+/* Devmem_HeapCount: returns the number of heaps that a given heap
+ config on this device has. Note that there is no acquire/release
+ semantics required, as this data is guaranteed to be constant for
+ the lifetime of the device node */
+extern PVRSRV_ERROR
+DevmemHeapCount(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut);
+/* Devmem_HeapConfigName: return the name of the given heap config.
+ The caller is to provide the storage for the returned string and
+ indicate the number of bytes (including null terminator) for such
+ string in the BufSz arg. Note that there is no acquire/release
+ semantics required, as this data is guaranteed to be constant for
+ the lifetime of the device node.
+ */
+extern PVRSRV_ERROR
+DevmemHeapConfigName(SHARED_DEV_CONNECTION hsDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_CHAR *pszConfigNameOut,
+ IMG_UINT32 uiConfigNameBufSz);
+
+/* Devmem_HeapDetails: fetches all the metadata that is recorded in
+ this heap "blueprint". Namely: heap name (caller to provide
+ storage, and indicate buffer size (including null terminator) in
+ BufSz arg), device virtual address and length, log2 of data page
+ size (will be one of 12, 14, 16, 18, 20, 21, at time of writing).
+ Note that there is no acquire/release semantics required, as this
+ data is guaranteed to be constant for the lifetime of the device
+ node. */
+extern PVRSRV_ERROR
+DevmemHeapDetails(SHARED_DEV_CONNECTION hDevConnection,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSize,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/*
+ * Devmem_FindHeapByName()
+ *
+ * returns the heap handle for the named _automagic_ heap in this
+ * context. "automagic" heaps are those that are born with the
+ * context from a blueprint
+ */
+extern PVRSRV_ERROR
+DevmemFindHeapByName(const DEVMEM_CONTEXT *psCtx,
+ const IMG_CHAR *pszHeapName,
+ DEVMEM_HEAP **ppsHeapRet);
+
+/*
+ * DevmemGetHeapBaseDevVAddr()
+ *
+ * returns the device virtual address of the base of the heap.
+ */
+
+PVRSRV_ERROR
+DevmemGetHeapBaseDevVAddr(DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+extern PVRSRV_ERROR
+DevmemLocalGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport);
+
+extern PVRSRV_ERROR
+DevmemGetImportUID(DEVMEM_MEMDESC *psMemDesc,
+ IMG_UINT64 *pui64UID);
+
+PVRSRV_ERROR
+DevmemGetReservation(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hReservation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetPMRData(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *hPMR,
+ IMG_DEVMEM_OFFSET_T *puiPMROffset);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFlags(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_FLAGS_T *puiFlags);
+
+IMG_INTERNAL IMG_HANDLE
+DevmemGetConnection(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+DevmemLocalImport(IMG_HANDLE hBridge,
+ IMG_HANDLE hExtHandle,
+ DEVMEM_FLAGS_T uiFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemIsDevVirtAddrValid(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemGetFaultAddress(DEVMEM_CONTEXT *psContext,
+ IMG_DEV_VIRTADDR *psFaultAddress);
+
+/* DevmemGetHeapLog2PageSize()
+ *
+ * Get the page size used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapLog2PageSize(DEVMEM_HEAP *psHeap);
+
+/* DevmemGetHeapTilingProperties()
+ *
+ * Get the import alignment and tiling stride factor used for a certain heap.
+ */
+IMG_UINT32
+DevmemGetHeapTilingProperties(DEVMEM_HEAP *psHeap,
+ IMG_UINT32 *puiLog2ImportAlignment,
+ IMG_UINT32 *puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function RegisterDevMemPFNotify
+@Description Registers that the application wants to be signaled when a page
+ fault occurs.
+
+@Input psContext Memory context the process that would like to
+ be notified about.
+@Input ui32PID The PID of the calling process.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RegisterDevmemPFNotify(DEVMEM_CONTEXT *psContext,
+ IMG_UINT32 ui32PID,
+ IMG_BOOL bRegister);
+
+/**************************************************************************/ /*!
+@Function GetMaxDevMemSize
+@Description Get the amount of device memory on current platform
+ (memory size in Bytes)
+@Output puiLMASize LMA memory size
+@Output puiUMASize UMA memory size
+@Return Error code
+*/ /***************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+GetMaxDevMemSize(SHARED_DEV_CONNECTION psConnection,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize);
+
+#endif /* #ifndef SRVCLIENT_DEVICEMEM_CLIENT_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.c b/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.c
new file mode 100644
index 00000000000000..79a31a11fe5867
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.c
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@File devicemem_heapcfg.c
+@Title Temporary Device Memory 2 stuff
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* our exported API */
+#include "devicemem_heapcfg.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "connection_server.h"
+
+PVRSRV_ERROR
+HeapCfgHeapConfigCount(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *puiNumHeapConfigsOut)
+{
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ *puiNumHeapConfigsOut = psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapCount(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut)
+{
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ *puiNumHeapsOut = psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapConfigName(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapConfigNameBufSz,
+ IMG_CHAR *pszHeapConfigNameOut)
+{
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ OSSNPrintf(pszHeapConfigNameOut, uiHeapConfigNameBufSz, "%s", psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].pszName);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+HeapCfgHeapDetails(CONNECTION_DATA * psConnection,
+ const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactorOut)
+{
+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprint;
+
+ if (uiHeapConfigIndex >= psDeviceNode->sDevMemoryInfo.uiNumHeapConfigs)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX;
+ }
+
+ if (uiHeapIndex >= psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].uiNumHeaps)
+ {
+ return PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX;
+ }
+
+ psHeapBlueprint = &psDeviceNode->sDevMemoryInfo.psDeviceMemoryHeapConfigArray[uiHeapConfigIndex].psHeapBlueprintArray[uiHeapIndex];
+
+ OSSNPrintf(pszHeapNameOut, uiHeapNameBufSz, "%s", psHeapBlueprint->pszName);
+ *psDevVAddrBaseOut = psHeapBlueprint->sHeapBaseAddr;
+ *puiHeapLengthOut = psHeapBlueprint->uiHeapLength;
+ *puiLog2DataPageSizeOut = psHeapBlueprint->uiLog2DataPageSize;
+ *puiLog2ImportAlignmentOut = psHeapBlueprint->uiLog2ImportAlignment;
+ *puiLog2TilingStrideFactorOut = psHeapBlueprint->uiLog2TilingStrideFactor;
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.h
new file mode 100644
index 00000000000000..8933831882fdc4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_heapcfg.h
@@ -0,0 +1,163 @@
+/**************************************************************************/ /*!
+@File
+@Title Temporary Device Memory 2 stuff
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEMHEAPCFG_H__
+#define __DEVICEMEMHEAPCFG_H__
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+
+/* FIXME: Find a better way of defining _PVRSRV_DEVICE_NODE_ */
+struct _PVRSRV_DEVICE_NODE_;
+/* FIXME: Find a better way of defining _CONNECTION_DATA_ */
+struct _CONNECTION_DATA_;
+
+
+/*
+ A "heap config" is a blueprint to be used for initial setting up of
+ heaps when a device memory context is created.
+
+ We define a data structure to define this, but it's really down to
+ the caller to populate it. This is all expected to be in-kernel.
+ We provide an API that client code can use to enquire about the
+ blueprint, such that it may do the heap setup during the context
+ creation call on behalf of the user */
+
+/* blueprint for a single heap */
+typedef struct _DEVMEM_HEAP_BLUEPRINT_
+{
+ /* Name of this heap - for debug purposes, and perhaps for lookup
+ by name? */
+ const IMG_CHAR *pszName;
+
+ /* Virtual address of the beginning of the heap. This _must_ be a
+ multiple of the data page size for the heap. It is
+ _recommended_ that it be coarser than that - especially, it
+ should begin on a boundary appropriate to the MMU for the
+ device. For Rogue, this is a Page Directory boundary, or 1GB
+ (virtual address a multiple of 0x0040000000). */
+ IMG_DEV_VIRTADDR sHeapBaseAddr;
+
+ /* Length of the heap. Given that the END address of the heap has
+ a similar restriction to that of the _beginning_ of the heap.
+ That is the heap length _must_ be a whole number of data pages.
+ Again, the recommendation is that it ends on a 1GB boundary.
+ Again, this is not essential, but we do know that (at the time
+ of writing) the current implementation of mmu_common.c is such
+ that no two heaps may share a page directory, thus the
+ remaining virtual space would be wasted if the length were not
+ a multiple of 1GB */
+ IMG_DEVMEM_SIZE_T uiHeapLength;
+
+ /* Data page size. This is the page size that is going to get
+ programmed into the MMU, so it needs to be a valid one for the
+ device. Importantly, the start address and length _must_ be
+ multiples of this page size. Note that the page size is
+ specified as the log 2 relative to 1 byte (e.g. 12 indicates
+ 4kB) */
+ IMG_UINT32 uiLog2DataPageSize;
+
+ /* Import alignment. Force imports to this heap to be
+ aligned to at least this value */
+ IMG_UINT32 uiLog2ImportAlignment;
+
+ /* Tiled heaps have an optimum byte-stride, this can be derived from
+ the heap alignment and tiling mode. This is abstracted here such that
+ Log2ByteStride = Log2Alignment - Log2TilingStrideFactor */
+ IMG_UINT32 uiLog2TilingStrideFactor;
+} DEVMEM_HEAP_BLUEPRINT;
+
+/* entire named heap config */
+typedef struct _DEVMEM_HEAP_CONFIG_
+{
+ /* Name of this heap config - for debug and maybe lookup */
+ const IMG_CHAR *pszName;
+
+ /* Number of heaps in this config */
+ IMG_UINT32 uiNumHeaps;
+
+ /* Array of individual heap blueprints as defined above */
+ DEVMEM_HEAP_BLUEPRINT *psHeapBlueprintArray;
+} DEVMEM_HEAP_CONFIG;
+
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigCount(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 *puiNumHeapConfigsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapCount(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 *puiNumHeapsOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapConfigName(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapConfigNameBufSz,
+ IMG_CHAR *pszHeapConfigNameOut
+);
+
+extern PVRSRV_ERROR
+HeapCfgHeapDetails(struct _CONNECTION_DATA_ * psConnection,
+ const struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ IMG_UINT32 uiHeapConfigIndex,
+ IMG_UINT32 uiHeapIndex,
+ IMG_UINT32 uiHeapNameBufSz,
+ IMG_CHAR *pszHeapNameOut,
+ IMG_DEV_VIRTADDR *psDevVAddrBaseOut,
+ IMG_DEVMEM_SIZE_T *puiHeapLengthOut,
+ IMG_UINT32 *puiLog2DataPageSizeOut,
+ IMG_UINT32 *puiLog2ImportAlignmentOut,
+ IMG_UINT32 *puiLog2TilingStrideFactorOut
+);
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.c b/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.c
new file mode 100644
index 00000000000000..f30083cd0ddc0d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.c
@@ -0,0 +1,1910 @@
+/*************************************************************************/ /*!
+@File
+@Title Devicemem history functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Devicemem history functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_defs.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "devicemem_server.h"
+#include "lock.h"
+#include "devicemem_history_server.h"
+#include "pdump_km.h"
+
+#define ALLOCATION_LIST_NUM_ENTRIES 10000
+
+/* data type to hold an allocation index.
+ * we make it 16 bits wide if possible
+ */
+#if ALLOCATION_LIST_NUM_ENTRIES <= 0xFFFF
+typedef uint16_t ALLOC_INDEX_T;
+#else
+typedef uint32_t ALLOC_INDEX_T;
+#endif
+
+/* a record describing a single allocation known to DeviceMemHistory.
+ * this is an element in a doubly linked list of allocations
+ */
+typedef struct _RECORD_ALLOCATION_
+{
+ /* time when this RECORD_ALLOCATION was created/initialised */
+ IMG_UINT64 ui64CreationTime;
+ /* serial number of the PMR relating to this allocation */
+ IMG_UINT64 ui64Serial;
+ /* base DevVAddr of this allocation */
+ IMG_DEV_VIRTADDR sDevVAddr;
+ /* size in bytes of this allocation */
+ IMG_DEVMEM_SIZE_T uiSize;
+ /* Log2 page size of this allocation's GPU pages */
+ IMG_UINT32 ui32Log2PageSize;
+ /* Process ID (PID) this allocation belongs to */
+ IMG_PID uiPID;
+ /* index of previous allocation in the list */
+ ALLOC_INDEX_T ui32Prev;
+ /* index of next allocation in the list */
+ ALLOC_INDEX_T ui32Next;
+ /* annotation/name of this allocation */
+ IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN];
+} RECORD_ALLOCATION;
+
+/* each command in the circular buffer is prefixed with an 8-bit value
+ * denoting the command type
+ */
+typedef enum _COMMAND_TYPE_
+{
+ COMMAND_TYPE_NONE,
+ COMMAND_TYPE_TIMESTAMP,
+ COMMAND_TYPE_MAP_ALL,
+ COMMAND_TYPE_UNMAP_ALL,
+ COMMAND_TYPE_MAP_RANGE,
+ COMMAND_TYPE_UNMAP_RANGE,
+ /* sentinel value */
+ COMMAND_TYPE_COUNT,
+} COMMAND_TYPE;
+
+/* Timestamp command:
+ * This command is inserted into the circular buffer to provide an updated
+ * timestamp.
+ * The nanosecond-accuracy timestamp is packed into a 56-bit integer, in order
+ * for the whole command to fit into 8 bytes.
+ */
+typedef struct _COMMAND_TIMESTAMP_
+{
+ IMG_UINT8 aui8TimeNs[7];
+} COMMAND_TIMESTAMP;
+
+/* MAP_ALL command:
+ * This command denotes the allocation at the given index was wholly mapped
+ * in to the GPU MMU
+ */
+typedef struct _COMMAND_MAP_ALL_
+{
+ ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_ALL;
+
+/* UNMAP_ALL command:
+ * This command denotes the allocation at the given index was wholly unmapped
+ * from the GPU MMU
+ * Note: COMMAND_MAP_ALL and COMMAND_UNMAP_ALL commands have the same layout.
+ */
+typedef COMMAND_MAP_ALL COMMAND_UNMAP_ALL;
+
+/* packing attributes for the MAP_RANGE command */
+#define MAP_RANGE_MAX_START ((1 << 18) - 1)
+#define MAP_RANGE_MAX_RANGE ((1 << 12) - 1)
+
+/* MAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ */
+
+typedef struct _COMMAND_MAP_RANGE_
+{
+ IMG_UINT8 aui8Data[5];
+ ALLOC_INDEX_T uiAllocIndex;
+} COMMAND_MAP_RANGE;
+
+/* UNMAP_RANGE command:
+ * Denotes a range of pages within the given allocation being mapped.
+ * The range is expressed as [Page Index] + [Page Count]
+ * This information is packed into a 40-bit integer, in order to make
+ * the command size 8 bytes.
+ * Note: COMMAND_MAP_RANGE and COMMAND_UNMAP_RANGE commands have the same layout.
+ */
+typedef COMMAND_MAP_RANGE COMMAND_UNMAP_RANGE;
+
+/* wrapper structure for a command */
+typedef struct _COMMAND_WRAPPER_
+{
+ IMG_UINT8 ui8Type;
+ union {
+ COMMAND_TIMESTAMP sTimeStamp;
+ COMMAND_MAP_ALL sMapAll;
+ COMMAND_UNMAP_ALL sUnmapAll;
+ COMMAND_MAP_RANGE sMapRange;
+ COMMAND_UNMAP_RANGE sUnmapRange;
+ } u;
+} COMMAND_WRAPPER;
+
+/* target size for the circular buffer of commands */
+#define CIRCULAR_BUFFER_SIZE_KB 2048
+/* turn the circular buffer target size into a number of commands */
+#define CIRCULAR_BUFFER_NUM_COMMANDS ((CIRCULAR_BUFFER_SIZE_KB * 1024) / sizeof(COMMAND_WRAPPER))
+
+/* index value denoting the end of a list */
+#define END_OF_LIST 0xFFFFFFFF
+#define ALLOC_INDEX_TO_PTR(idx) (&(gsDevicememHistoryData.sRecords.pasAllocations[idx]))
+#define CHECK_ALLOC_INDEX(idx) (idx < ALLOCATION_LIST_NUM_ENTRIES)
+
+/* wrapper structure for the allocation records and the commands circular buffer */
+typedef struct _RECORDS_
+{
+ RECORD_ALLOCATION *pasAllocations;
+ IMG_UINT32 ui32AllocationsListHead;
+
+ IMG_UINT32 ui32Head;
+ IMG_UINT32 ui32Tail;
+ COMMAND_WRAPPER *pasCircularBuffer;
+} RECORDS;
+
+typedef struct _DEVICEMEM_HISTORY_DATA_
+{
+ /* debugfs entry */
+ void *pvStatsEntry;
+
+ RECORDS sRecords;
+ POS_LOCK hLock;
+} DEVICEMEM_HISTORY_DATA;
+
+static DEVICEMEM_HISTORY_DATA gsDevicememHistoryData;
+
+static void DevicememHistoryLock(void)
+{
+ OSLockAcquire(gsDevicememHistoryData.hLock);
+}
+
+static void DevicememHistoryUnlock(void)
+{
+ OSLockRelease(gsDevicememHistoryData.hLock);
+}
+
+/* given a time stamp, calculate the age in nanoseconds */
+static IMG_UINT64 _CalculateAge(IMG_UINT64 ui64Now,
+ IMG_UINT64 ui64Then,
+ IMG_UINT64 ui64Max)
+{
+ if(ui64Now >= ui64Then)
+ {
+ /* no clock wrap */
+ return ui64Now - ui64Then;
+ }
+ else
+ {
+ /* clock has wrapped */
+ return (ui64Max - ui64Then) + ui64Now + 1;
+ }
+}
+
+/* AcquireCBSlot:
+ * Acquire the next slot in the circular buffer and
+ * move the circular buffer head along by one
+ * Returns a pointer to the acquired slot.
+ */
+static COMMAND_WRAPPER *AcquireCBSlot(void)
+{
+ COMMAND_WRAPPER *psSlot;
+
+ psSlot = &gsDevicememHistoryData.sRecords.pasCircularBuffer[gsDevicememHistoryData.sRecords.ui32Head];
+
+ gsDevicememHistoryData.sRecords.ui32Head =
+ (gsDevicememHistoryData.sRecords.ui32Head + 1)
+ % CIRCULAR_BUFFER_NUM_COMMANDS;
+
+ return psSlot;
+}
+
+/* TimeStampPack:
+ * Packs the given timestamp value into the COMMAND_TIMESTAMP structure.
+ * This takes a 64-bit nanosecond timestamp and packs it in to a 56-bit
+ * integer in the COMMAND_TIMESTAMP command.
+ */
+static void TimeStampPack(COMMAND_TIMESTAMP *psTimeStamp, IMG_UINT64 ui64Now)
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < ARRAY_SIZE(psTimeStamp->aui8TimeNs); i++)
+ {
+ psTimeStamp->aui8TimeNs[i] = ui64Now & 0xFF;
+ ui64Now >>= 8;
+ }
+}
+
+/* packing a 64-bit nanosecond into a 7-byte integer loses the
+ * top 8 bits of data. This must be taken into account when
+ * comparing a full timestamp against an unpacked timestamp
+ */
+#define TIME_STAMP_MASK ((1LLU << 56) - 1)
+#define DO_TIME_STAMP_MASK(ns64) (ns64 & TIME_STAMP_MASK)
+
+/* TimeStampUnpack:
+ * Unpack the timestamp value from the given COMMAND_TIMESTAMP command
+ */
+static IMG_UINT64 TimeStampUnpack(COMMAND_TIMESTAMP *psTimeStamp)
+{
+ IMG_UINT64 ui64TimeNs = 0;
+ IMG_UINT32 i;
+
+ for (i = ARRAY_SIZE(psTimeStamp->aui8TimeNs); i > 0; i--)
+ {
+ ui64TimeNs <<= 8;
+ ui64TimeNs |= (IMG_UINT64) psTimeStamp->aui8TimeNs[i - 1];
+ }
+
+ return ui64TimeNs;
+}
+
+#if defined(PDUMP)
+
+static void EmitPDumpAllocation(IMG_UINT32 ui32AllocationIndex,
+ RECORD_ALLOCATION *psAlloc)
+{
+ PDUMPCOMMENT("[SrvPFD] Allocation: %u"
+ " Addr: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Page size: %u"
+ " PID: %u"
+ " Process: %s"
+ " Name: %s",
+ ui32AllocationIndex,
+ psAlloc->sDevVAddr.uiAddr,
+ psAlloc->uiSize,
+ 1U << psAlloc->ui32Log2PageSize,
+ psAlloc->uiPID,
+ OSGetCurrentClientProcessNameKM(),
+ psAlloc->szName);
+}
+
+static void EmitPDumpMapUnmapAll(COMMAND_TYPE eType,
+ IMG_UINT32 ui32AllocationIndex)
+{
+ const IMG_CHAR *pszOpName;
+
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_ALL:
+ pszOpName = "MAP_ALL";
+ break;
+ case COMMAND_TYPE_UNMAP_ALL:
+ pszOpName = "UNMAP_ALL";
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapAll: Invalid type: %u",
+ eType));
+ return;
+
+ }
+
+ PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u",
+ pszOpName,
+ ui32AllocationIndex);
+}
+
+static void EmitPDumpMapUnmapRange(COMMAND_TYPE eType,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ const IMG_CHAR *pszOpName;
+
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_RANGE:
+ pszOpName = "MAP_RANGE";
+ break;
+ case COMMAND_TYPE_UNMAP_RANGE:
+ pszOpName = "UNMAP_RANGE";
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "EmitPDumpMapUnmapRange: Invalid type: %u",
+ eType));
+ return;
+ }
+
+ PDUMPCOMMENT("[SrvPFD] Op: %s Allocation: %u Start Page: %u Count: %u",
+ pszOpName,
+ ui32AllocationIndex,
+ ui32StartPage,
+ ui32Count);
+}
+
+#endif
+
+/* InsertTimeStampCommand:
+ * Insert a timestamp command into the circular buffer.
+ */
+static void InsertTimeStampCommand(IMG_UINT64 ui64Now)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_TIMESTAMP;
+
+ TimeStampPack(&psCommand->u.sTimeStamp, ui64Now);
+}
+
+/* InsertMapAllCommand:
+ * Insert a "MAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertMapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_MAP_ALL;
+ psCommand->u.sMapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapAll(COMMAND_TYPE_MAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* InsertUnmapAllCommand:
+ * Insert a "UNMAP_ALL" command for the given allocation into the circular buffer
+ */
+static void InsertUnmapAllCommand(IMG_UINT32 ui32AllocIndex)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_ALL;
+ psCommand->u.sUnmapAll.uiAllocIndex = ui32AllocIndex;
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapAll(COMMAND_TYPE_UNMAP_ALL, ui32AllocIndex);
+#endif
+}
+
+/* MapRangePack:
+ * Pack the given StartPage and Count values into the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangePack(COMMAND_MAP_RANGE *psMapRange,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ IMG_UINT64 ui64Data;
+ IMG_UINT32 i;
+
+ /* we must encode the data into 40 bits:
+ * 18 bits for the start page index
+ * 12 bits for the range
+ */
+
+ PVR_ASSERT(ui32StartPage <= MAP_RANGE_MAX_START);
+ PVR_ASSERT(ui32Count <= MAP_RANGE_MAX_RANGE);
+
+ ui64Data = (((IMG_UINT64) ui32StartPage) << 12) | ui32Count;
+
+ for (i = 0; i < ARRAY_SIZE(psMapRange->aui8Data); i++)
+ {
+ psMapRange->aui8Data[i] = ui64Data & 0xFF;
+ ui64Data >>= 8;
+ }
+}
+
+/* MapRangePack:
+ * Unpack the StartPage and Count values from the 40-bit representation
+ * in the MAP_RANGE command.
+ */
+static void MapRangeUnpack(COMMAND_MAP_RANGE *psMapRange,
+ IMG_UINT32 *pui32StartPage,
+ IMG_UINT32 *pui32Count)
+{
+ IMG_UINT64 ui64Data = 0;
+ IMG_UINT32 i;
+
+ for (i = ARRAY_SIZE(psMapRange->aui8Data); i > 0; i--)
+ {
+ ui64Data <<= 8;
+ ui64Data |= (IMG_UINT64) psMapRange->aui8Data[i - 1];
+ }
+
+ *pui32StartPage = (ui64Data >> 12);
+ *pui32Count = ui64Data & ((1 << 12) - 1);
+}
+
+/* InsertMapRangeCommand:
+ * Insert a MAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertMapRangeCommand(IMG_UINT32 ui32AllocIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_MAP_RANGE;
+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapRange(COMMAND_TYPE_MAP_RANGE,
+ ui32AllocIndex,
+ ui32StartPage,
+ ui32Count);
+#endif
+}
+
+/* InsertUnmapRangeCommand:
+ * Insert a UNMAP_RANGE command into the circular buffer with the given
+ * StartPage and Count values.
+ */
+static void InsertUnmapRangeCommand(IMG_UINT32 ui32AllocIndex,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32Count)
+{
+ COMMAND_WRAPPER *psCommand;
+
+ psCommand = AcquireCBSlot();
+
+ psCommand->ui8Type = COMMAND_TYPE_UNMAP_RANGE;
+ psCommand->u.sMapRange.uiAllocIndex = ui32AllocIndex;
+
+ MapRangePack(&psCommand->u.sMapRange, ui32StartPage, ui32Count);
+
+#if defined(PDUMP)
+ EmitPDumpMapUnmapRange(COMMAND_TYPE_UNMAP_RANGE,
+ ui32AllocIndex,
+ ui32StartPage,
+ ui32Count);
+#endif
+}
+
+/* InsertAllocationToList:
+ * Helper function for the allocation list.
+ * Inserts the given allocation at the head of the list, whose current head is
+ * pointed to by pui32ListHead
+ */
+static void InsertAllocationToList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ if(*pui32ListHead == END_OF_LIST)
+ {
+ /* list is currently empty, so just replace it */
+ *pui32ListHead = ui32Alloc;
+ psAlloc->ui32Next = psAlloc->ui32Prev = *pui32ListHead;
+ }
+ else
+ {
+ RECORD_ALLOCATION *psHeadAlloc;
+ RECORD_ALLOCATION *psTailAlloc;
+
+ psHeadAlloc = ALLOC_INDEX_TO_PTR(*pui32ListHead);
+ psTailAlloc = ALLOC_INDEX_TO_PTR(psHeadAlloc->ui32Prev);
+
+ /* make the new alloc point forwards to the previous head */
+ psAlloc->ui32Next = *pui32ListHead;
+ /* make the new alloc point backwards to the previous tail */
+ psAlloc->ui32Prev = psHeadAlloc->ui32Prev;
+
+ /* the head is now our new alloc */
+ *pui32ListHead = ui32Alloc;
+
+ /* the old head now points back to the new head */
+ psHeadAlloc->ui32Prev = *pui32ListHead;
+
+ /* the tail now points forward to the new head */
+ psTailAlloc->ui32Next = ui32Alloc;
+ }
+}
+
+static void InsertAllocationToBusyList(IMG_UINT32 ui32Alloc)
+{
+ InsertAllocationToList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* RemoveAllocationFromList:
+ * Helper function for the allocation list.
+ * Removes the given allocation from the list, whose head is
+ * pointed to by pui32ListHead
+ */
+static void RemoveAllocationFromList(IMG_UINT32 *pui32ListHead, IMG_UINT32 ui32Alloc)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ /* if this is the only element in the list then just make the list empty */
+ if((*pui32ListHead == ui32Alloc) && (psAlloc->ui32Next == ui32Alloc))
+ {
+ *pui32ListHead = END_OF_LIST;
+ }
+ else
+ {
+ RECORD_ALLOCATION *psPrev, *psNext;
+
+ psPrev = ALLOC_INDEX_TO_PTR(psAlloc->ui32Prev);
+ psNext = ALLOC_INDEX_TO_PTR(psAlloc->ui32Next);
+
+ /* remove the allocation from the list */
+ psPrev->ui32Next = psAlloc->ui32Next;
+ psNext->ui32Prev = psAlloc->ui32Prev;
+
+ /* if this allocation is the head then update the head */
+ if(*pui32ListHead == ui32Alloc)
+ {
+ *pui32ListHead = psAlloc->ui32Prev;
+ }
+ }
+}
+
+static void RemoveAllocationFromBusyList(IMG_UINT32 ui32Alloc)
+{
+ RemoveAllocationFromList(&gsDevicememHistoryData.sRecords.ui32AllocationsListHead, ui32Alloc);
+}
+
+/* TouchBusyAllocation:
+ * Move the given allocation to the head of the list
+ */
+static void TouchBusyAllocation(IMG_UINT32 ui32Alloc)
+{
+ RemoveAllocationFromBusyList(ui32Alloc);
+ InsertAllocationToBusyList(ui32Alloc);
+}
+
+static INLINE IMG_BOOL IsAllocationListEmpty(IMG_UINT32 ui32ListHead)
+{
+ return ui32ListHead == END_OF_LIST;
+}
+
+/* GetOldestBusyAllocation:
+ * Returns the index of the oldest allocation in the MRU list
+ */
+static IMG_UINT32 GetOldestBusyAllocation(void)
+{
+ IMG_UINT32 ui32Alloc;
+ RECORD_ALLOCATION *psAlloc;
+
+ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+ if(ui32Alloc == END_OF_LIST)
+ {
+ return END_OF_LIST;
+ }
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ return psAlloc->ui32Prev;
+}
+
+static IMG_UINT32 GetFreeAllocation(void)
+{
+ IMG_UINT32 ui32Alloc;
+
+ ui32Alloc = GetOldestBusyAllocation();
+
+ return ui32Alloc;
+}
+
+
+/* InitialiseAllocation:
+ * Initialise the given allocation structure with the given properties
+ */
+static void InitialiseAllocation(RECORD_ALLOCATION *psAlloc,
+ const IMG_CHAR *pszName,
+ IMG_UINT64 ui64Serial,
+ IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2PageSize)
+{
+ OSStringLCopy(psAlloc->szName, pszName, sizeof(psAlloc->szName));
+ psAlloc->ui64Serial = ui64Serial;
+ psAlloc->uiPID = uiPID;
+ psAlloc->sDevVAddr = sDevVAddr;
+ psAlloc->uiSize = uiSize;
+ psAlloc->ui32Log2PageSize = ui32Log2PageSize;
+ psAlloc->ui64CreationTime = OSClockns64();
+}
+
+/* CreateAllocation:
+ * Creates a new allocation with the given properties then outputs the
+ * index of the allocation
+ */
+static PVRSRV_ERROR CreateAllocation(const IMG_CHAR *pszName,
+ IMG_UINT64 ui64Serial,
+ IMG_PID uiPID,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_BOOL bAutoPurge,
+ IMG_UINT32 *puiAllocationIndex)
+{
+ IMG_UINT32 ui32Alloc;
+ RECORD_ALLOCATION *psAlloc;
+
+ ui32Alloc = GetFreeAllocation();
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ InitialiseAllocation(ALLOC_INDEX_TO_PTR(ui32Alloc),
+ pszName,
+ ui64Serial,
+ uiPID,
+ sDevVAddr,
+ uiSize,
+ ui32Log2PageSize);
+
+ /* put the newly initialised allocation at the front of the MRU list */
+ TouchBusyAllocation(ui32Alloc);
+
+ *puiAllocationIndex = ui32Alloc;
+
+#if defined(PDUMP)
+ EmitPDumpAllocation(ui32Alloc, psAlloc);
+#endif
+
+ return PVRSRV_OK;
+}
+
+/* MatchAllocation:
+ * Tests if the allocation at the given index matches the supplied properties.
+ * Returns IMG_TRUE if it is a match, otherwise IMG_FALSE.
+ */
+static IMG_BOOL MatchAllocation(IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT64 ui64Serial,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszName,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_PID uiPID)
+{
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocationIndex);
+
+ return (psAlloc->ui64Serial == ui64Serial) &&
+ (psAlloc->sDevVAddr.uiAddr == sDevVAddr.uiAddr) &&
+ (psAlloc->uiSize == uiSize) &&
+ (psAlloc->ui32Log2PageSize == ui32Log2PageSize) &&
+ (OSStringCompare(psAlloc->szName, pszName) == 0);
+}
+
+/* FindOrCreateAllocation:
+ * Convenience function.
+ * Given a set of allocation properties (serial, DevVAddr, size, name, etc),
+ * this function will look for an existing record of this allocation and
+ * create the allocation if there is no existing record
+ */
+static PVRSRV_ERROR FindOrCreateAllocation(IMG_UINT32 ui32AllocationIndexHint,
+ IMG_UINT64 ui64Serial,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char *pszName,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_PID uiPID,
+ IMG_BOOL bSparse,
+ IMG_UINT32 *pui32AllocationIndexOut,
+ IMG_BOOL *pbCreated)
+{
+ IMG_UINT32 ui32AllocationIndex;
+ PVRSRV_ERROR eError;
+
+ if(ui32AllocationIndexHint != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE)
+ {
+ IMG_BOOL bHaveAllocation;
+
+ /* first, try to match against the index given by the client.
+ * if the caller provided a hint but the allocation record is no longer
+ * there, it must have been purged, so go ahead and create a new allocation
+ */
+ bHaveAllocation = MatchAllocation(ui32AllocationIndexHint,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ pszName,
+ ui32Log2PageSize,
+ uiPID);
+ if(bHaveAllocation)
+ {
+ *pbCreated = IMG_FALSE;
+ *pui32AllocationIndexOut = ui32AllocationIndexHint;
+ return PVRSRV_OK;
+ }
+ }
+
+ /* if there is no record of the allocation then we
+ * create it now
+ */
+ eError = CreateAllocation(pszName,
+ ui64Serial,
+ uiPID,
+ sDevVAddr,
+ uiSize,
+ ui32Log2PageSize,
+ IMG_TRUE,
+ &ui32AllocationIndex);
+
+ if(eError == PVRSRV_OK)
+ {
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+ *pbCreated = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create record for allocation %s",
+ __func__,
+ pszName));
+ }
+
+ return eError;
+}
+
+/* GenerateMapUnmapCommandsForSparsePMR:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the PMR's
+ * current mapping table
+ *
+ * PMR: The PMR whose mapping table to read.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the PMR's mapping table and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForSparsePMR(PMR *psPMR,
+ IMG_UINT32 ui32AllocIndex,
+ IMG_BOOL bMap)
+{
+ PMR_MAPPING_TABLE *psMappingTable;
+ IMG_UINT32 ui32DonePages = 0;
+ IMG_UINT32 ui32NumPages;
+ IMG_UINT32 i;
+ IMG_BOOL bInARun = IMG_FALSE;
+ IMG_UINT32 ui32CurrentStart = 0;
+ IMG_UINT32 ui32RunCount = 0;
+
+ psMappingTable = PMR_GetMappigTable(psPMR);
+ ui32NumPages = psMappingTable->ui32NumPhysChunks;
+
+ if(ui32NumPages == 0)
+ {
+ /* nothing to do */
+ return;
+ }
+
+ for(i = 0; i < psMappingTable->ui32NumVirtChunks; i++)
+ {
+ if(psMappingTable->aui32Translation[i] != TRANSLATION_INVALID)
+ {
+ if(!bInARun)
+ {
+ bInARun = IMG_TRUE;
+ ui32CurrentStart = i;
+ ui32RunCount = 1;
+ }
+ else
+ {
+ ui32RunCount++;
+ }
+ }
+
+ if(bInARun)
+ {
+ /* test if we need to end this current run and generate the command,
+ * either because the next page is not virtually contiguous
+ * to the current page, we have reached the maximum range,
+ * or this is the last page in the mapping table
+ */
+ if((psMappingTable->aui32Translation[i] == TRANSLATION_INVALID) ||
+ (ui32RunCount == MAP_RANGE_MAX_RANGE) ||
+ (i == (psMappingTable->ui32NumVirtChunks - 1)))
+ {
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+
+ ui32DonePages += ui32RunCount;
+
+ if(ui32DonePages == ui32NumPages)
+ {
+ break;
+ }
+
+ bInARun = IMG_FALSE;
+ }
+ }
+ }
+
+}
+
+/* GenerateMapUnmapCommandsForChangeList:
+ * Generate the MAP_RANGE or UNMAP_RANGE commands for the sparse PMR, using the
+ * list of page change (page map or page unmap) indices given.
+ *
+ * ui32NumPages: Number of pages which have changed.
+ * pui32PageList: List of indices of the pages which have changed.
+ * ui32AllocIndex: The allocation to attribute the MAP_RANGE/UNMAP range commands to.
+ * bMap: Set to TRUE for mapping or IMG_FALSE for unmapping
+ *
+ * This function goes through every page in the list and looks for
+ * virtually contiguous ranges to record as being mapped or unmapped.
+ */
+static void GenerateMapUnmapCommandsForChangeList(IMG_UINT32 ui32NumPages,
+ IMG_UINT32 *pui32PageList,
+ IMG_UINT32 ui32AllocIndex,
+ IMG_BOOL bMap)
+{
+ IMG_UINT32 i;
+ IMG_BOOL bInARun = IMG_FALSE;
+ IMG_UINT32 ui32CurrentStart = 0;
+ IMG_UINT32 ui32RunCount = 0;
+
+ for(i = 0; i < ui32NumPages; i++)
+ {
+ if(!bInARun)
+ {
+ bInARun = IMG_TRUE;
+ ui32CurrentStart = pui32PageList[i];
+ }
+
+ ui32RunCount++;
+
+ /* we flush if:
+ * - the next page in the list is not one greater than the current page
+ * - this is the last page in the list
+ * - we have reached the maximum range size
+ */
+ if((i == (ui32NumPages - 1)) ||
+ ((pui32PageList[i] + 1) != pui32PageList[i + 1]) ||
+ (ui32RunCount == MAP_RANGE_MAX_RANGE))
+ {
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocIndex,
+ ui32CurrentStart,
+ ui32RunCount);
+ }
+
+ bInARun = IMG_FALSE;
+ ui32RunCount = 0;
+ }
+ }
+}
+
+/* DevicememHistoryMapKM:
+ * Entry point for when an allocation is mapped into the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ bSparse,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ if(!bSparse)
+ {
+ InsertMapAllCommand(ui32AllocationIndex);
+ }
+ else
+ {
+ GenerateMapUnmapCommandsForSparsePMR(psPMR,
+ ui32AllocationIndex,
+ IMG_TRUE);
+ }
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+static void VRangeInsertMapUnmapCommands(IMG_BOOL bMap,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ const IMG_CHAR *pszName)
+{
+ while(ui32NumPages > 0)
+ {
+ IMG_UINT32 ui32PagesToAdd;
+
+ ui32PagesToAdd = MIN(ui32NumPages, MAP_RANGE_MAX_RANGE);
+
+ if(ui32StartPage > MAP_RANGE_MAX_START)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Cannot record %s range beginning at page "
+ "%u on allocation %s",
+ bMap ? "map" : "unmap",
+ ui32StartPage,
+ pszName));
+ return;
+ }
+
+ if(bMap)
+ {
+ InsertMapRangeCommand(ui32AllocationIndex,
+ ui32StartPage,
+ ui32PagesToAdd);
+ }
+ else
+ {
+ InsertUnmapRangeCommand(ui32AllocationIndex,
+ ui32StartPage,
+ ui32PagesToAdd);
+ }
+
+ ui32StartPage += ui32PagesToAdd;
+ ui32NumPages -= ui32PagesToAdd;
+ }
+}
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ 0,
+ sBaseDevVAddr,
+ uiAllocSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_FALSE,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ VRangeInsertMapUnmapCommands(IMG_TRUE,
+ ui32AllocationIndex,
+ sBaseDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ szName);
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+
+}
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ 0,
+ sBaseDevVAddr,
+ uiAllocSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_FALSE,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ VRangeInsertMapUnmapCommands(IMG_FALSE,
+ ui32AllocationIndex,
+ sBaseDevVAddr,
+ ui32StartPage,
+ ui32NumPages,
+ szName);
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+
+
+/* DevicememHistoryUnmapKM:
+ * Entry point for when an allocation is unmapped from the MMU GPU
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_BOOL bSparse = PMR_IsSparse(psPMR);
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ bSparse,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ if(!bSparse)
+ {
+ InsertUnmapAllCommand(ui32AllocationIndex);
+ }
+ else
+ {
+ GenerateMapUnmapCommandsForSparsePMR(psPMR,
+ ui32AllocationIndex,
+ IMG_FALSE);
+ }
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+}
+
+/* DevicememHistorySparseChangeKM:
+ * Entry point for when a sparse allocation is changed, such that some of the
+ * pages within the sparse allocation are mapped or unmapped.
+ *
+ * psPMR: The PMR to which the allocation belongs.
+ * ui32Offset: The offset within the PMR at which the allocation begins.
+ * sDevVAddr: The DevVAddr at which the allocation begins.
+ * szName: Annotation/name for the allocation.
+ * ui32Log2PageSize: Page size of the allocation, expressed in log2 form.
+ * ui32AllocPageCount: Number of pages which have been mapped.
+ * paui32AllocPageIndices: Indices of pages which have been mapped.
+ * ui32FreePageCount: Number of pages which have been unmapped.
+ * paui32FreePageIndices: Indices of pages which have been unmapped.
+ * ui32AllocationIndex: Allocation index as provided by the client.
+ * We will use this as a short-cut to find the allocation
+ * in our records.
+ * pui32AllocationIndexOut: An updated allocation index for the client.
+ * This may be a new value if we just created the
+ * allocation record.
+ */
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *paui32FreePageIndices,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut)
+{
+ IMG_UINT64 ui64Serial;
+ IMG_PID uiPID = OSGetCurrentProcessID();
+ PVRSRV_ERROR eError;
+ IMG_BOOL bCreated;
+
+ if((ui32AllocationIndex != DEVICEMEM_HISTORY_ALLOC_INDEX_NONE) &&
+ !CHECK_ALLOC_INDEX(ui32AllocationIndex))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid allocation index: %u",
+ __func__,
+ ui32AllocationIndex));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PMRGetUID(psPMR, &ui64Serial);
+
+ DevicememHistoryLock();
+
+ eError = FindOrCreateAllocation(ui32AllocationIndex,
+ ui64Serial,
+ sDevVAddr,
+ uiSize,
+ szName,
+ ui32Log2PageSize,
+ uiPID,
+ IMG_TRUE /* bSparse */,
+ &ui32AllocationIndex,
+ &bCreated);
+
+ if((eError == PVRSRV_OK) && !bCreated)
+ {
+ /* touch the allocation so it goes to the head of our MRU list */
+ TouchBusyAllocation(ui32AllocationIndex);
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to Find or Create allocation %s (%s)",
+ __func__,
+ szName,
+ PVRSRVGETERRORSTRING(eError)));
+ goto out_unlock;
+ }
+
+ GenerateMapUnmapCommandsForChangeList(ui32AllocPageCount,
+ paui32AllocPageIndices,
+ ui32AllocationIndex,
+ IMG_TRUE);
+
+ GenerateMapUnmapCommandsForChangeList(ui32FreePageCount,
+ paui32FreePageIndices,
+ ui32AllocationIndex,
+ IMG_FALSE);
+
+ InsertTimeStampCommand(OSClockns64());
+
+ *pui32AllocationIndexOut = ui32AllocationIndex;
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return eError;
+
+}
+
+/* CircularBufferIterateStart:
+ * Initialise local state for iterating over the circular buffer
+ */
+static void CircularBufferIterateStart(IMG_UINT32 *pui32Head, IMG_UINT32 *pui32Iter)
+{
+ *pui32Head = gsDevicememHistoryData.sRecords.ui32Head;
+
+ if(*pui32Head != 0)
+ {
+ *pui32Iter = *pui32Head - 1;
+ }
+ else
+ {
+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+ }
+}
+
+/* CircularBufferIteratePrevious:
+ * Iterate to the previous item in the circular buffer.
+ * This is called repeatedly to iterate over the whole circular buffer.
+ */
+static COMMAND_WRAPPER *CircularBufferIteratePrevious(IMG_UINT32 ui32Head,
+ IMG_UINT32 *pui32Iter,
+ COMMAND_TYPE *peType,
+ IMG_BOOL *pbLast)
+{
+ IMG_UINT8 *pui8Header;
+ COMMAND_WRAPPER *psOut = NULL;
+
+ psOut = gsDevicememHistoryData.sRecords.pasCircularBuffer + *pui32Iter;
+
+ pui8Header = (IMG_UINT8 *) psOut;
+
+ /* sanity check the command looks valid.
+ * this condition should never happen, but check for it anyway
+ * and try to handle it
+ */
+ if(*pui8Header >= COMMAND_TYPE_COUNT)
+ {
+ /* invalid header detected. Circular buffer corrupted? */
+ PVR_DPF((PVR_DBG_ERROR, "CircularBufferIteratePrevious: "
+ "Invalid header: %u",
+ *pui8Header));
+ *pbLast = IMG_TRUE;
+ return NULL;
+ }
+
+ *peType = *pui8Header;
+
+ if(*pui32Iter != 0)
+ {
+ (*pui32Iter)--;
+ }
+ else
+ {
+ *pui32Iter = CIRCULAR_BUFFER_NUM_COMMANDS - 1;
+ }
+
+
+ /* inform the caller this is the last command if either we have reached
+ * the head (where we started) or if we have reached an empty command,
+ * which means we have covered all populated entries
+ */
+ if((*pui32Iter == ui32Head) || (*peType == COMMAND_TYPE_NONE))
+ {
+ /* this is the final iteration */
+ *pbLast = IMG_TRUE;
+ }
+
+ return psOut;
+}
+
+/* MapUnmapCommandGetInfo:
+ * Helper function to get the address and mapping information from a MAP_ALL, UNMAP_ALL,
+ * MAP_RANGE or UNMAP_RANGE command
+ */
+static void MapUnmapCommandGetInfo(COMMAND_WRAPPER *psCommand,
+ COMMAND_TYPE eType,
+ IMG_DEV_VIRTADDR *psDevVAddrStart,
+ IMG_DEV_VIRTADDR *psDevVAddrEnd,
+ IMG_BOOL *pbMap,
+ IMG_UINT32 *pui32AllocIndex)
+{
+ if((eType == COMMAND_TYPE_MAP_ALL) || ((eType == COMMAND_TYPE_UNMAP_ALL)))
+ {
+ COMMAND_MAP_ALL *psMapAll = &psCommand->u.sMapAll;
+ RECORD_ALLOCATION *psAlloc;
+
+ *pbMap = (eType == COMMAND_TYPE_MAP_ALL);
+ *pui32AllocIndex = psMapAll->uiAllocIndex;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psMapAll->uiAllocIndex);
+
+ *psDevVAddrStart = psAlloc->sDevVAddr;
+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr + psAlloc->uiSize - 1;
+ }
+ else if((eType == COMMAND_TYPE_MAP_RANGE) || ((eType == COMMAND_TYPE_UNMAP_RANGE)))
+ {
+ COMMAND_MAP_RANGE *psMapRange = &psCommand->u.sMapRange;
+ RECORD_ALLOCATION *psAlloc;
+ IMG_UINT32 ui32StartPage, ui32Count;
+
+ *pbMap = (eType == COMMAND_TYPE_MAP_RANGE);
+ *pui32AllocIndex = psMapRange->uiAllocIndex;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(psMapRange->uiAllocIndex);
+
+ MapRangeUnpack(psMapRange, &ui32StartPage, &ui32Count);
+
+ psDevVAddrStart->uiAddr = psAlloc->sDevVAddr.uiAddr +
+ ((1U << psAlloc->ui32Log2PageSize) * ui32StartPage);
+
+ psDevVAddrEnd->uiAddr = psDevVAddrStart->uiAddr +
+ ((1U << psAlloc->ui32Log2PageSize) * ui32Count) - 1;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid command type: %u",
+ __func__,
+ eType));
+ }
+}
+
+/* DevicememHistoryQuery:
+ * Entry point for rgxdebug to look up addresses relating to a page fault
+ */
+IMG_BOOL DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+ IMG_UINT32 ui32PageSizeBytes,
+ IMG_BOOL bMatchAnyAllocInPage)
+{
+ IMG_UINT32 ui32Head, ui32Iter;
+ COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+ COMMAND_WRAPPER *psCommand = NULL;
+ IMG_BOOL bLast = IMG_FALSE;
+ IMG_UINT64 ui64StartTime = OSClockns64();
+ IMG_UINT64 ui64TimeNs = 0;
+
+ /* initialise the results count for the caller */
+ psQueryOut->ui32NumResults = 0;
+
+ DevicememHistoryLock();
+
+ /* if the search is constrained to a particular PID then we
+ * first search the list of allocations to see if this
+ * PID is known to us
+ */
+ if(psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY)
+ {
+ IMG_UINT32 ui32Alloc;
+ ui32Alloc = gsDevicememHistoryData.sRecords.ui32AllocationsListHead;
+
+ while(ui32Alloc != END_OF_LIST)
+ {
+ RECORD_ALLOCATION *psAlloc;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32Alloc);
+
+ if(psAlloc->uiPID == psQueryIn->uiPID)
+ {
+ goto found_pid;
+ }
+
+ if(ui32Alloc == gsDevicememHistoryData.sRecords.ui32AllocationsListHead)
+ {
+ /* gone through whole list */
+ break;
+ }
+ }
+
+ /* PID not found, so we do not have any suitable data for this
+ * page fault
+ */
+ goto out_unlock;
+ }
+
+found_pid:
+
+ CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+ while(!bLast)
+ {
+ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+ if(eType == COMMAND_TYPE_TIMESTAMP)
+ {
+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+ continue;
+ }
+
+ if((eType == COMMAND_TYPE_MAP_ALL) ||
+ (eType == COMMAND_TYPE_UNMAP_ALL) ||
+ (eType == COMMAND_TYPE_MAP_RANGE) ||
+ (eType == COMMAND_TYPE_UNMAP_RANGE))
+ {
+ RECORD_ALLOCATION *psAlloc;
+ IMG_DEV_VIRTADDR sAllocStartAddrOrig, sAllocEndAddrOrig;
+ IMG_DEV_VIRTADDR sAllocStartAddr, sAllocEndAddr;
+ IMG_BOOL bMap;
+ IMG_UINT32 ui32AllocIndex;
+
+ MapUnmapCommandGetInfo(psCommand,
+ eType,
+ &sAllocStartAddrOrig,
+ &sAllocEndAddrOrig,
+ &bMap,
+ &ui32AllocIndex);
+
+ sAllocStartAddr = sAllocStartAddrOrig;
+ sAllocEndAddr = sAllocEndAddrOrig;
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+ /* skip this command if we need to search within
+ * a particular PID, and this allocation is not from
+ * that PID
+ */
+ if((psQueryIn->uiPID != DEVICEMEM_HISTORY_PID_ANY) &&
+ (psAlloc->uiPID != psQueryIn->uiPID))
+ {
+ continue;
+ }
+
+ /* if the allocation was created after this event, then this
+ * event must be for an old/removed allocation, so skip it
+ */
+ if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+ {
+ continue;
+ }
+
+ /* if the caller wants us to match any allocation in the
+ * same page as the allocation then tweak the real start/end
+ * addresses of the allocation here
+ */
+ if(bMatchAnyAllocInPage)
+ {
+ sAllocStartAddr.uiAddr = sAllocStartAddr.uiAddr & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+ sAllocEndAddr.uiAddr = (sAllocEndAddr.uiAddr + ui32PageSizeBytes - 1) & ~(IMG_UINT64) (ui32PageSizeBytes - 1);
+ }
+
+ if((psQueryIn->sDevVAddr.uiAddr >= sAllocStartAddr.uiAddr) &&
+ (psQueryIn->sDevVAddr.uiAddr < sAllocEndAddr.uiAddr))
+ {
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult = &psQueryOut->sResults[psQueryOut->ui32NumResults];
+
+ OSStringLCopy(psResult->szString, psAlloc->szName, sizeof(psResult->szString));
+ psResult->sBaseDevVAddr = psAlloc->sDevVAddr;
+ psResult->uiSize = psAlloc->uiSize;
+ psResult->bMap = bMap;
+ psResult->ui64Age = _CalculateAge(ui64StartTime, ui64TimeNs, TIME_STAMP_MASK);
+ psResult->ui64When = ui64TimeNs;
+ /* write the responsible PID in the placeholder */
+ psResult->sProcessInfo.uiPID = psAlloc->uiPID;
+
+ if((eType == COMMAND_TYPE_MAP_ALL) || (eType == COMMAND_TYPE_UNMAP_ALL))
+ {
+ psResult->bRange = IMG_FALSE;
+ psResult->bAll = IMG_TRUE;
+ }
+ else
+ {
+ psResult->bRange = IMG_TRUE;
+ MapRangeUnpack(&psCommand->u.sMapRange,
+ &psResult->ui32StartPage,
+ &psResult->ui32PageCount);
+ psResult->bAll = (psResult->ui32PageCount * (1U << psAlloc->ui32Log2PageSize))
+ == psAlloc->uiSize;
+ psResult->sMapStartAddr = sAllocStartAddrOrig;
+ psResult->sMapEndAddr = sAllocEndAddrOrig;
+ }
+
+ psQueryOut->ui32NumResults++;
+
+ if(psQueryOut->ui32NumResults == DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS)
+ {
+ break;
+ }
+ }
+ }
+ }
+
+out_unlock:
+ DevicememHistoryUnlock();
+
+ return psQueryOut->ui32NumResults > 0;
+}
+
+static void DeviceMemHistoryFmt(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN],
+ IMG_PID uiPID,
+ const IMG_CHAR *pszName,
+ const IMG_CHAR *pszAction,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT64 ui64TimeNs)
+{
+
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ /* PID NAME MAP/UNMAP MIN-MAX SIZE AbsUS AgeUS*/
+ "%04u %-40s %-10s "
+ IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC " "
+ "0x%08llX "
+ "%013llu", /* 13 digits is over 2 hours of ns */
+ uiPID,
+ pszName,
+ pszAction,
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr,
+ sDevVAddrEnd.uiAddr - sDevVAddrStart.uiAddr,
+ ui64TimeNs);
+}
+
+static void DeviceMemHistoryFmtHeader(IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN])
+{
+ OSSNPrintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN,
+ "%-4s %-40s %-6s %10s %10s %8s %13s",
+ "PID",
+ "NAME",
+ "ACTION",
+ "ADDR MIN",
+ "ADDR MAX",
+ "SIZE",
+ "ABS NS");
+}
+
+static const char *CommandTypeToString(COMMAND_TYPE eType)
+{
+ switch(eType)
+ {
+ case COMMAND_TYPE_MAP_ALL:
+ return "MapAll";
+ case COMMAND_TYPE_UNMAP_ALL:
+ return "UnmapAll";
+ case COMMAND_TYPE_MAP_RANGE:
+ return "MapRange";
+ case COMMAND_TYPE_UNMAP_RANGE:
+ return "UnmapRange";
+ case COMMAND_TYPE_TIMESTAMP:
+ return "TimeStamp";
+ default:
+ return "???";
+ }
+}
+
+static void DevicememHistoryPrintAll(void *pvFilePtr, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ IMG_UINT32 ui32Iter;
+ IMG_UINT32 ui32Head;
+ IMG_BOOL bLast = IMG_FALSE;
+ IMG_UINT64 ui64TimeNs = 0;
+ IMG_UINT64 ui64StartTime = OSClockns64();
+
+ DeviceMemHistoryFmtHeader(szBuffer);
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+
+ CircularBufferIterateStart(&ui32Head, &ui32Iter);
+
+ while(!bLast)
+ {
+ COMMAND_WRAPPER *psCommand;
+ COMMAND_TYPE eType = COMMAND_TYPE_NONE;
+
+ psCommand = CircularBufferIteratePrevious(ui32Head, &ui32Iter, &eType, &bLast);
+
+ if(eType == COMMAND_TYPE_TIMESTAMP)
+ {
+ ui64TimeNs = TimeStampUnpack(&psCommand->u.sTimeStamp);
+ continue;
+ }
+
+
+ if((eType == COMMAND_TYPE_MAP_ALL) ||
+ (eType == COMMAND_TYPE_UNMAP_ALL) ||
+ (eType == COMMAND_TYPE_MAP_RANGE) ||
+ (eType == COMMAND_TYPE_UNMAP_RANGE))
+ {
+ RECORD_ALLOCATION *psAlloc;
+ IMG_DEV_VIRTADDR sDevVAddrStart, sDevVAddrEnd;
+ IMG_BOOL bMap;
+ IMG_UINT32 ui32AllocIndex;
+
+ MapUnmapCommandGetInfo(psCommand,
+ eType,
+ &sDevVAddrStart,
+ &sDevVAddrEnd,
+ &bMap,
+ &ui32AllocIndex);
+
+ psAlloc = ALLOC_INDEX_TO_PTR(ui32AllocIndex);
+
+ if(DO_TIME_STAMP_MASK(psAlloc->ui64CreationTime) > ui64TimeNs)
+ {
+ /* if this event relates to an allocation we
+ * are no longer tracking then do not print it
+ */
+ continue;
+ }
+
+ DeviceMemHistoryFmt(szBuffer,
+ psAlloc->uiPID,
+ psAlloc->szName,
+ CommandTypeToString(eType),
+ sDevVAddrStart,
+ sDevVAddrEnd,
+ ui64TimeNs);
+
+ pfnOSStatsPrintf(pvFilePtr, "%s\n", szBuffer);
+ }
+ }
+
+ pfnOSStatsPrintf(pvFilePtr, "\nTimestamp reference: %013llu\n", ui64StartTime);
+}
+
+static void DevicememHistoryPrintAllWrapper(void *pvFilePtr, void *pvData, OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ DevicememHistoryLock();
+ DevicememHistoryPrintAll(pvFilePtr, pfnOSStatsPrintf);
+ DevicememHistoryUnlock();
+}
+
+static PVRSRV_ERROR CreateRecords(void)
+{
+ gsDevicememHistoryData.sRecords.pasAllocations =
+ OSAllocMem(sizeof(RECORD_ALLOCATION) * ALLOCATION_LIST_NUM_ENTRIES);
+
+ if(gsDevicememHistoryData.sRecords.pasAllocations == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Allocated and initialise the circular buffer with zeros so every
+ * command is initialised as a command of type COMMAND_TYPE_NONE. */
+ gsDevicememHistoryData.sRecords.pasCircularBuffer =
+ OSAllocZMem(sizeof(COMMAND_WRAPPER) * CIRCULAR_BUFFER_NUM_COMMANDS);
+
+ if(gsDevicememHistoryData.sRecords.pasCircularBuffer == NULL)
+ {
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+
+static void DestroyRecords(void)
+{
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasCircularBuffer);
+ OSFreeMem(gsDevicememHistoryData.sRecords.pasAllocations);
+}
+
+static void InitialiseRecords(void)
+{
+ IMG_UINT32 i;
+
+ /* initialise the allocations list */
+
+ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Prev = ALLOCATION_LIST_NUM_ENTRIES - 1;
+ gsDevicememHistoryData.sRecords.pasAllocations[0].ui32Next = 1;
+
+ for(i = 1; i < ALLOCATION_LIST_NUM_ENTRIES; i++)
+ {
+ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Prev = i - 1;
+ gsDevicememHistoryData.sRecords.pasAllocations[i].ui32Next = i + 1;
+ }
+
+ gsDevicememHistoryData.sRecords.pasAllocations[ALLOCATION_LIST_NUM_ENTRIES - 1].ui32Next = 0;
+
+ gsDevicememHistoryData.sRecords.ui32AllocationsListHead = 0;
+}
+
+PVRSRV_ERROR DevicememHistoryInitKM(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&gsDevicememHistoryData.hLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create lock"));
+ goto err_lock;
+ }
+
+ eError = CreateRecords();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicememHistoryInitKM: Failed to create records"));
+ goto err_allocations;
+ }
+
+ InitialiseRecords();
+
+ gsDevicememHistoryData.pvStatsEntry = OSCreateStatisticEntry("devicemem_history",
+ NULL,
+ DevicememHistoryPrintAllWrapper,
+ NULL,
+ NULL,
+ NULL);
+
+ return PVRSRV_OK;
+
+err_allocations:
+ OSLockDestroy(gsDevicememHistoryData.hLock);
+err_lock:
+ return eError;
+}
+
+void DevicememHistoryDeInitKM(void)
+{
+ if(gsDevicememHistoryData.pvStatsEntry != NULL)
+ {
+ OSRemoveStatisticEntry(gsDevicememHistoryData.pvStatsEntry);
+ }
+
+ DestroyRecords();
+
+ OSLockDestroy(gsDevicememHistoryData.hLock);
+}
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.h
new file mode 100644
index 00000000000000..1c6766ac33b041
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_history_server.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File devicemem_history_server.h
+@Title Resource Information abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Devicemem History functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_HISTORY_SERVER_H_
+#define _DEVICEMEM_HISTORY_SERVER_H_
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxmem.h"
+#include "devicemem_utils.h"
+
+extern PVRSRV_ERROR
+DevicememHistoryInitKM(void);
+
+extern void
+DevicememHistoryDeInitKM(void);
+
+PVRSRV_ERROR DevicememHistoryMapKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryMapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistoryUnmapVRangeKM(IMG_DEV_VIRTADDR sBaseDevVAddr,
+ IMG_UINT32 ui32StartPage,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEVMEM_SIZE_T uiAllocSize,
+ const IMG_CHAR szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32AllocationIndex,
+ IMG_UINT32 *ui32AllocationIndexOut);
+
+PVRSRV_ERROR DevicememHistorySparseChangeKM(PMR *psPMR,
+ IMG_UINT32 ui32Offset,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const char szName[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT32 ui32PageSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *paui32AllocPageIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pauiFreePageIndices,
+ IMG_UINT32 AllocationIndex,
+ IMG_UINT32 *pui32AllocationIndexOut);
+
+/* used when the PID does not matter */
+#define DEVICEMEM_HISTORY_PID_ANY 0xFFFFFFFE
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_IN_
+{
+ IMG_PID uiPID;
+ IMG_DEV_VIRTADDR sDevVAddr;
+} DEVICEMEM_HISTORY_QUERY_IN;
+
+/* Store up to 4 results for a lookup. In the case of the faulting page being
+ * re-mapped between the page fault occurring on HW and the page fault analysis
+ * being done, the second result entry will show the allocation being unmapped.
+ * A further 2 entries are added to cater for multiple buffers in the same page.
+ */
+#define DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS 4
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_RESULT_
+{
+ IMG_CHAR szString[DEVMEM_ANNOTATION_MAX_LEN];
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+ size_t uiSize;
+ IMG_BOOL bMap;
+ IMG_BOOL bRange;
+ IMG_BOOL bAll;
+ IMG_UINT64 ui64When;
+ IMG_UINT64 ui64Age;
+ /* info for sparse map/unmap operations (i.e. bRange=IMG_TRUE) */
+ IMG_UINT32 ui32StartPage;
+ IMG_UINT32 ui32PageCount;
+ IMG_DEV_VIRTADDR sMapStartAddr;
+ IMG_DEV_VIRTADDR sMapEndAddr;
+ RGXMEM_PROCESS_INFO sProcessInfo;
+} DEVICEMEM_HISTORY_QUERY_OUT_RESULT;
+
+typedef struct _DEVICEMEM_HISTORY_QUERY_OUT_
+{
+ IMG_UINT32 ui32NumResults;
+ /* result 0 is the newest */
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT sResults[DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS];
+} DEVICEMEM_HISTORY_QUERY_OUT;
+
+extern IMG_BOOL
+DevicememHistoryQuery(DEVICEMEM_HISTORY_QUERY_IN *psQueryIn,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut,
+ IMG_UINT32 ui32PageSizeBytes,
+ IMG_BOOL bMatchAnyAllocInPage);
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.c b/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.c
new file mode 100644
index 00000000000000..8625cea02ec6be
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.c
@@ -0,0 +1,334 @@
+/*************************************************************************/ /*!
+@File
+@Title Shared device memory management PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements common (client & server) PDump functions for the
+ memory management code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined PDUMP
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pdump.h"
+#include "devicemem.h"
+#include "devicemem_utils.h"
+#include "devicemem_pdump.h"
+#include "client_pdumpmm_bridge.h"
+#if defined(LINUX) && !defined(__KERNEL__)
+#include <stdio.h>
+#if defined(SUPPORT_ANDROID_PLATFORM)
+#include "android_utils.h"
+#endif
+#endif
+
+IMG_INTERNAL void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+ eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiOffset + uiSize <= psMemDesc->psImport->uiSize);
+
+ eError = BridgePMRPDumpLoadMem(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_TRUE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpLoadMemValue32(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui32Value,
+ uiPDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpLoadMemValue64(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui64Value,
+ uiPDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* FIXME: This should be server side only */
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[100];
+ IMG_CHAR aszSymbolicName[100];
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ *puiMemOffset += psMemDesc->uiOffset;
+
+ eError = BridgePMRPDumpSymbolicAddr(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ *puiMemOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ puiMemOffset,
+ &uiNextSymName);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ OSSNPrintf(pszName, ui32Size, "%s:%s", &aszMemspaceName[0], &aszSymbolicName[0]);
+ return eError;
+}
+
+IMG_INTERNAL void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgePMRPDumpSaveToFile(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ uiSize,
+ OSStringLength(pszFilename) + 1,
+ pszFilename,
+ uiFileOffset);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+
+/* FIXME: Remove? */
+IMG_INTERNAL void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevAddrStart;
+
+ sDevAddrStart = psMemDesc->psImport->sDeviceImport.sDevVAddr;
+ sDevAddrStart.uiAddr += psMemDesc->uiOffset;
+ sDevAddrStart.uiAddr += uiOffset;
+
+ eError = BridgeDevmemIntPDumpSaveToFileVirtual(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->sDeviceImport.psHeap->psCtx->hDevMemServerContext,
+ sDevAddrStart,
+ uiSize,
+ OSStringLength(pszFilename) + 1,
+ pszFilename,
+ ui32FileOffset,
+ ui32PdumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiNumBytes;
+
+ uiNumBytes = 4;
+
+ if (psMemDesc->uiOffset + uiOffset + uiNumBytes > psMemDesc->psImport->uiSize)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ eError = BridgePMRPDumpPol32(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psMemDesc->uiOffset + uiReadOffset) > psMemDesc->psImport->uiSize)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ eError = BridgePMRPDumpCBP(psMemDesc->psImport->hDevConnection,
+ psMemDesc->psImport->hPMR,
+ psMemDesc->uiOffset + uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#endif /* PDUMP */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.h
new file mode 100644
index 00000000000000..cbf948d5ff53b2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_pdump.h
@@ -0,0 +1,346 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management PDump internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to PDump device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_PDUMP_H_
+#define _DEVICEMEM_PDUMP_H_
+
+#include "devicemem.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * DevmemPDumpMem()
+ *
+ * takes a memory descriptor, offset, and size, and takes the current
+ * contents of the memory at that location and writes it to the prm
+ * pdump file, and emits a pdump LDB to load the data from that file.
+ * The intention here is that the contents of the simulated buffer
+ * upon pdump playback will be made to be the same as they are when
+ * this command is run, enabling pdump of cases where the memory has
+ * been modified externally, i.e. by the host cpu or by a third
+ * party.
+ */
+extern void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpZeroMem()
+ *
+ * as DevmemPDumpMem() but the PDump allocation will be populated with zeros from
+ * the zero page in the parameter stream
+ */
+extern void
+DevmemPDumpLoadZeroMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue()
+ *
+ * As above but dumps the value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ * the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpMemValue64()
+ *
+ * As above but dumps the 64bit-value at a dword-aligned address in plain
+ * text to the pdump script2 file. Useful for patching a buffer at
+ * pdump playback by simply editing the script output file.
+ *
+ * (The same functionality can be achieved by the above function but
+ * the binary PARAM file must be patched in that case.)
+ */
+IMG_INTERNAL void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * DevmemPDumpPageCatBaseToSAddr()
+ *
+ * Returns the symbolic address of a piece of memory represented
+ * by an offset into the mem descriptor.
+ */
+extern PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size);
+
+/*
+ * DevmemPDumpSaveToFile()
+ *
+ * emits a pdump SAB to cause the current contents of the memory to be
+ * written to the given file during playback
+ */
+extern void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset);
+
+/*
+ * DevmemPDumpSaveToFileVirtual()
+ *
+ * emits a pdump SAB, just like DevmemPDumpSaveToFile(), but uses the
+ * virtual address and device MMU context to cause the pdump player to
+ * traverse the MMU page tables itself.
+ */
+extern void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags);
+
+
+/*
+ *
+ * Devmem_PDumpDevmemPol32()
+ *
+ * writes a PDump 'POL' command to wait for a masked 32-bit memory
+ * location to become the specified value
+ */
+extern PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags);
+
+/*
+ * DevmemPDumpCBP()
+ *
+ * Polls for space in circular buffer. Reads the read offset
+ * from memory and waits until there is enough space to write
+ * the packet.
+ *
+ * hMemDesc - MemDesc which contains the read offset
+ * uiReadOffset - Offset into MemDesc to the read offset
+ * uiWriteOffset - Current write offset
+ * uiPacketSize - Size of packet to write
+ * uiBufferSize - Size of circular buffer
+ */
+extern PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMem)
+#endif
+static INLINE void
+DevmemPDumpLoadMem(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue32)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue32(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue64)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue64(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui64Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpLoadMemValue)
+#endif
+static INLINE void
+DevmemPDumpLoadMemValue(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpPageCatBaseToSAddr)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpPageCatBaseToSAddr(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T *puiMemOffset,
+ IMG_CHAR *pszName,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(puiMemOffset);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFile)
+#endif
+static INLINE void
+DevmemPDumpSaveToFile(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpSaveToFileVirtual)
+#endif
+static INLINE void
+DevmemPDumpSaveToFileVirtual(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PdumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32PdumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpDevmemPol32)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpDevmemPol32(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemPDumpCBP(const DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDesc);
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /* _DEVICEMEM_PDUMP_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_server.c b/drivers/gpu/drm/img-rogue/1.10/devicemem_server.c
new file mode 100644
index 00000000000000..4ef2bc3301d63a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_server.c
@@ -0,0 +1,1760 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Server-side component of the Device Memory Management.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* our exported API */
+#include "devicemem_server.h"
+#include "devicemem_utils.h"
+#include "devicemem.h"
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "physmem.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lock.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#define DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE (1 << 0)
+
+struct _DEVMEMINT_CTX_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /* MMU common code needs to have a context. There's a one-to-one
+ correspondence between device memory context and MMU context,
+ but we have the abstraction here so that we don't need to care
+ what the MMU does with its context, and the MMU code need not
+ know about us at all. */
+ MMU_CONTEXT *psMMUContext;
+
+ ATOMIC_T hRefCount;
+
+ /* This handle is for devices that require notification when a new
+ memory context is created and they need to store private data that
+ is associated with the context. */
+ IMG_HANDLE hPrivData;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ /* Protects access to sProcessNotifyListHead */
+ POSWR_LOCK hListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ /* The following tracks UM applications that need to be notified of a
+ * page fault */
+ DLLIST_NODE sProcessNotifyListHead;
+ /* The following is a node for the list of registered devmem contexts */
+ DLLIST_NODE sPageFaultNotifyListElem;
+
+ /* Device virtual address of a page fault on this context */
+ IMG_DEV_VIRTADDR sFaultAddress;
+
+ /* General purpose flags */
+ IMG_UINT32 ui32Flags;
+};
+
+struct _DEVMEMINT_CTX_EXPORT_
+{
+ DEVMEMINT_CTX *psDevmemCtx;
+ PMR *psPMR;
+ ATOMIC_T hRefCount;
+ DLLIST_NODE sNode;
+};
+
+struct _DEVMEMINT_HEAP_
+{
+ struct _DEVMEMINT_CTX_ *psDevmemCtx;
+ IMG_UINT32 uiLog2PageSize;
+ ATOMIC_T hRefCount;
+};
+
+struct _DEVMEMINT_RESERVATION_
+{
+ struct _DEVMEMINT_HEAP_ *psDevmemHeap;
+ IMG_DEV_VIRTADDR sBase;
+ IMG_DEVMEM_SIZE_T uiLength;
+};
+
+struct _DEVMEMINT_MAPPING_
+{
+ struct _DEVMEMINT_RESERVATION_ *psReservation;
+ PMR *psPMR;
+ IMG_UINT32 uiNumPages;
+};
+
+struct _DEVMEMINT_PF_NOTIFY_
+{
+ IMG_UINT32 ui32PID;
+ DLLIST_NODE sProcessNotifyListElem;
+};
+
+/*************************************************************************/ /*!
+@Function _DevmemIntCtxAcquire
+@Description Acquire a reference to the provided device memory context.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxAcquire(DEVMEMINT_CTX *psDevmemCtx)
+{
+ OSAtomicIncrement(&psDevmemCtx->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntCtxRelease
+@Description Release the reference to the provided device memory context.
+ If this is the last reference which was taken then the
+ memory context will be freed.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntCtxRelease(DEVMEMINT_CTX *psDevmemCtx)
+{
+ if (OSAtomicDecrement(&psDevmemCtx->hRefCount) == 0)
+ {
+ /* The last reference has gone, destroy the context */
+ PVRSRV_DEVICE_NODE *psDevNode = psDevmemCtx->psDevNode;
+ DLLIST_NODE *psNode, *psNodeNext;
+
+ /* If there are any PIDs registered for page fault notification.
+ * Loop through the registered PIDs and free each one */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ DEVMEMINT_PF_NOTIFY *psNotifyNode =
+ IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+ dllist_remove_node(psNode);
+ OSFreeMem(psNotifyNode);
+ }
+
+ /* If this context is in the list registered for a debugger, remove
+ * from that list */
+ if (dllist_node_is_in_list(&psDevmemCtx->sPageFaultNotifyListElem))
+ {
+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+ }
+
+ if (psDevNode->pfnUnregisterMemoryContext)
+ {
+ psDevNode->pfnUnregisterMemoryContext(psDevmemCtx->hPrivData);
+ }
+ MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockDestroy(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed memory context %p", __FUNCTION__, psDevmemCtx));
+ OSFreeMem(psDevmemCtx);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntHeapAcquire
+@Description Acquire a reference to the provided device memory heap.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapAcquire(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ OSAtomicIncrement(&psDevmemHeap->hRefCount);
+}
+
+/*************************************************************************/ /*!
+@Function _DevmemIntHeapRelease
+@Description Release the reference to the provided device memory heap.
+ If this is the last reference which was taken then the
+ memory context will be freed.
+@Return None
+*/ /**************************************************************************/
+static INLINE void _DevmemIntHeapRelease(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ OSAtomicDecrement(&psDevmemHeap->hRefCount);
+}
+
+PVRSRV_ERROR
+DevmemIntUnpin(PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Unpin */
+ eError = PMRUnpinPMR(psPMR, IMG_FALSE);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = PMRUnpinPMR(psPMR, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e_exit;
+ }
+
+ /* Invalidate mapping */
+ eError = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ psDevmemMapping->psReservation->sBase,
+ psDevmemMapping->uiNumPages,
+ psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE, /* !< Choose to invalidate PT entries */
+ psPMR);
+
+e_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPin(PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Start the pinning */
+ eError = PMRPinPMR(psPMR);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eErrorMMU = PVRSRV_OK;
+ IMG_UINT32 uiLog2PageSize = psDevmemMapping->psReservation->psDevmemHeap->uiLog2PageSize;
+
+ /* Start the pinning */
+ eError = PMRPinPMR(psPMR);
+
+ if (eError == PVRSRV_OK)
+ {
+ /* Make mapping valid again */
+ eErrorMMU = MMU_ChangeValidity(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ psDevmemMapping->psReservation->sBase,
+ psDevmemMapping->uiNumPages,
+ uiLog2PageSize,
+ IMG_TRUE, /* !< Choose to make PT entries valid again */
+ psPMR);
+ }
+ else if (eError == PVRSRV_ERROR_PMR_NEW_MEMORY)
+ {
+ /* If we lost the physical backing we have to map it again because
+ * the old physical addresses are not valid anymore. */
+ IMG_UINT32 uiFlags;
+ uiFlags = PMR_Flags(psPMR);
+
+ eErrorMMU = MMU_MapPages(psDevmemMapping->psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ psDevmemMapping->psReservation->sBase,
+ psPMR,
+ 0,
+ psDevmemMapping->uiNumPages,
+ NULL,
+ uiLog2PageSize);
+ }
+
+ /* Just overwrite eError if the mappings failed.
+ * PMR_NEW_MEMORY has to be propagated to the user. */
+ if (eErrorMMU != PVRSRV_OK)
+ {
+ eError = eErrorMMU;
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemServerGetImportHandle
+@Description For given exportable memory descriptor returns PMR handle.
+@Return Memory is exportable - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport)
+{
+ PVRSRV_ERROR eError;
+
+ if ((psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_EXPORTABLE) == 0)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION;
+ goto e0;
+ }
+
+ *phImport = psMemDesc->psImport->hPMR;
+ return PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemServerGetHeapHandle
+@Description For given reservation returns the Heap handle.
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+ IMG_HANDLE *phHeap)
+{
+ *phHeap = psReservation->psDevmemHeap;
+ return PVRSRV_OK;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function DevmemIntCtxCreate
+@Description Creates and initialises a device memory context.
+@Return valid Device Memory context handle - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bKernelMemoryCtx,
+ DEVMEMINT_CTX **ppsDevmemCtxPtr,
+ IMG_HANDLE *hPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtx;
+ IMG_HANDLE hPrivDataInt = NULL;
+ MMU_DEVICEATTRIBS *psMMUDevAttrs;
+
+ if((psDeviceNode->pfnCheckDeviceFeature) &&
+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+ {
+ psMMUDevAttrs = bKernelMemoryCtx ? psDeviceNode->psFirmwareMMUDevAttrs:
+ psDeviceNode->psMMUDevAttrs;
+ }else
+ {
+ psMMUDevAttrs = psDeviceNode->psMMUDevAttrs;
+ PVR_UNREFERENCED_PARAMETER(bKernelMemoryCtx);
+ }
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s", __FUNCTION__));
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* allocate a Devmem context */
+ psDevmemCtx = OSAllocMem(sizeof *psDevmemCtx);
+ if (psDevmemCtx == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+ goto fail_alloc;
+ }
+
+ OSAtomicWrite(&psDevmemCtx->hRefCount, 1);
+ psDevmemCtx->psDevNode = psDeviceNode;
+
+ /* Call down to MMU context creation */
+
+ eError = MMU_ContextCreate(psDeviceNode,
+ &psDevmemCtx->psMMUContext,
+ psMMUDevAttrs);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: MMU_ContextCreate failed", __FUNCTION__));
+ goto fail_mmucontext;
+ }
+
+
+ if (psDeviceNode->pfnRegisterMemoryContext)
+ {
+ eError = psDeviceNode->pfnRegisterMemoryContext(psDeviceNode, psDevmemCtx->psMMUContext, &hPrivDataInt);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register MMU context", __FUNCTION__));
+ goto fail_register;
+ }
+ }
+
+ /* Store the private data as it is required to unregister the memory context */
+ psDevmemCtx->hPrivData = hPrivDataInt;
+ *hPrivData = hPrivDataInt;
+ *ppsDevmemCtxPtr = psDevmemCtx;
+
+ /* Pass the CPU cache line size through the bridge to the user mode as it can't be queried in user mode.*/
+ *pui32CPUCacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+
+ /* Initialise the PID notify list */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockCreate(&psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_init(&(psDevmemCtx->sProcessNotifyListHead));
+ psDevmemCtx->sPageFaultNotifyListElem.psNextNode = NULL;
+ psDevmemCtx->sPageFaultNotifyListElem.psPrevNode = NULL;
+
+ /* Initialise page fault address */
+ psDevmemCtx->sFaultAddress.uiAddr = 0ULL;
+
+ /* Initialise flags */
+ psDevmemCtx->ui32Flags = 0;
+
+ return PVRSRV_OK;
+
+fail_register:
+ MMU_ContextDestroy(psDevmemCtx->psMMUContext);
+fail_mmucontext:
+ OSFreeMem(psDevmemCtx);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntHeapCreate
+@Description Creates and initialises a device memory heap.
+@Return valid Device Memory heap handle - Success
+ PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 uiLog2DataPageSize,
+ DEVMEMINT_HEAP **ppsDevmemHeapPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemHeap;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: DevmemIntHeap_Create", __FUNCTION__));
+
+ /* allocate a Devmem context */
+ psDevmemHeap = OSAllocMem(sizeof *psDevmemHeap);
+ if (psDevmemHeap == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Alloc failed", __FUNCTION__));
+ goto fail_alloc;
+ }
+
+ psDevmemHeap->psDevmemCtx = psDevmemCtx;
+
+ _DevmemIntCtxAcquire(psDevmemHeap->psDevmemCtx);
+
+ OSAtomicWrite(&psDevmemHeap->hRefCount, 1);
+
+ psDevmemHeap->uiLog2PageSize = uiLog2DataPageSize;
+
+ *ppsDevmemHeapPtr = psDevmemHeap;
+
+ return PVRSRV_OK;
+
+fail_alloc:
+ return eError;
+}
+
+#define PVR_DUMMY_PAGE_INIT_VALUE (0x0)
+
+static PVRSRV_ERROR DevmemIntAllocDummyPage(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_BOOL bInitPage)
+{
+ IMG_UINT32 ui32Dummyref;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+ /* We know there will not be 4G number of sparse PMR's */
+ ui32Dummyref = OSAtomicIncrement(&psDevNode->sDummyPage.atRefCounter);
+
+ if (1 == ui32Dummyref)
+ {
+ IMG_DEV_PHYADDR sDevPhysAddr;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Alloc Dummy page object");
+#endif
+ /*Allocate the dummy page required for sparse backing */
+ eError = DevPhysMemAlloc(psDevNode,
+ (1 << psDevNode->sDummyPage.ui32Log2DummyPgSize),
+ 0,
+ PVR_DUMMY_PAGE_INIT_VALUE,
+ bInitPage,
+#if defined(PDUMP)
+ psDevNode->psMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ DUMMY_PAGE,
+ &psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+ &psDevNode->sDummyPage.sDummyPageHandle,
+ &sDevPhysAddr);
+ if(PVRSRV_OK != eError)
+ {
+ OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+ }
+ else
+ {
+ psDevNode->sDummyPage.ui64DummyPgPhysAddr = sDevPhysAddr.uiAddr;
+ }
+
+ }
+
+ OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+
+ return eError;
+}
+
+static void DevmemIntFreeDummyPage(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ IMG_UINT32 ui32Dummyref = 0;
+
+ ui32Dummyref = OSAtomicRead(&psDevNode->sDummyPage.atRefCounter);
+
+ /* For the cases where the dummy page allocation fails due to lack of memory
+ * The refcount can still be 0 even for a sparse allocation */
+ if (0 != ui32Dummyref)
+ {
+ OSLockAcquire(psDevNode->sDummyPage.psDummyPgLock);
+
+ /* We know there will not be 4G number of sparse PMR's */
+ ui32Dummyref = OSAtomicDecrement(&psDevNode->sDummyPage.atRefCounter);
+
+ if (0 == ui32Dummyref)
+ {
+ PDUMPCOMMENT("Free Dummy page object");
+
+ /* Free the dummy page when refcount reaches zero */
+ DevPhysMemFree(psDevNode,
+#if defined(PDUMP)
+ psDevNode->sDummyPage.hPdumpDummyPg,
+#endif
+ &psDevNode->sDummyPage.sDummyPageHandle);
+
+#if defined(PDUMP)
+ psDevNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+ psDevNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+ }
+
+ OSLockRelease(psDevNode->sDummyPage.psDummyPgLock);
+ }
+
+}
+
+PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+
+ if (psReservation->psDevmemHeap->uiLog2PageSize > PMR_GetLog2Contiguity(psPMR))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "%s: Device heap and PMR have incompatible Log2Contiguity (%u - %u). "
+ "PMR contiguity must be a multiple of the heap contiguity!",
+ __func__,
+ psReservation->psDevmemHeap->uiLog2PageSize,
+ PMR_GetLog2Contiguity(psPMR) ));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = MMU_MapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ ui32PhysicalPgOffset,
+ ui32PageCount,
+ NULL,
+ psReservation->psDevmemHeap->uiLog2PageSize);
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount)
+{
+ /*Unmap the pages and mark them invalid in the MMU PTE */
+ MMU_UnmapPages(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ 0,
+ sDevVAddrBase,
+ ui32PageCount,
+ NULL,
+ psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ DEVMEMINT_MAPPING **ppsMappingPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_MAPPING *psMapping;
+ /* number of pages (device pages) that allocation spans */
+ IMG_UINT32 ui32NumDevPages;
+ /* device virtual address of start of allocation */
+ IMG_DEV_VIRTADDR sAllocationDevVAddr;
+ /* and its length */
+ IMG_DEVMEM_SIZE_T uiAllocationSize;
+ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PMR_FLAGS_T uiPMRFlags;
+
+ if (uiLog2HeapContiguity > PMR_GetLog2Contiguity(psPMR))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "%s: Device heap and PMR have incompatible contiguity (%u - %u). "
+ "Heap contiguity must be a multiple of the heap contiguity!",
+ __func__,
+ uiLog2HeapContiguity,
+ PMR_GetLog2Contiguity(psPMR) ));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ psDevNode = psDevmemHeap->psDevmemCtx->psDevNode;
+
+ /* allocate memory to record the mapping info */
+ psMapping = OSAllocMem(sizeof *psMapping);
+ if (psMapping == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "DevmemIntMapPMR: Alloc failed"));
+ goto e0;
+ }
+
+ uiAllocationSize = psReservation->uiLength;
+
+
+ ui32NumDevPages = 0xffffffffU & ( ( (uiAllocationSize - 1) >> uiLog2HeapContiguity) + 1);
+ PVR_ASSERT(ui32NumDevPages << uiLog2HeapContiguity == uiAllocationSize);
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ sAllocationDevVAddr = psReservation->sBase;
+
+ /*Check if the PMR that needs to be mapped is sparse */
+ bIsSparse = PMR_IsSparse(psPMR);
+ if (bIsSparse)
+ {
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if (bNeedBacking)
+ {
+ /*Error is logged with in the function if any failures.
+ * As the allocation fails we need to fail the map request and
+ * return appropriate error
+ *
+ * Allocation of dummy page is done after locking the pages for PMR physically
+ * By implementing this way, the best case path of dummy page being most likely to be
+ * allocated after physically locking down pages, is considered.
+ * If the dummy page allocation fails, we do unlock the physical address and the impact
+ * is a bit more in on demand mode of operation */
+ eError = DevmemIntAllocDummyPage(psDevmemHeap->psDevmemCtx->psDevNode, IMG_TRUE);
+ if (PVRSRV_OK != eError)
+ {
+ goto e3;
+ }
+ }
+
+ /* N.B. We pass mapping permission flags to MMU_MapPages and let
+ * it reject the mapping if the permissions on the PMR are not compatible. */
+ eError = MMU_MapPages(psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiMapFlags,
+ sAllocationDevVAddr,
+ psPMR,
+ 0,
+ ui32NumDevPages,
+ NULL,
+ uiLog2HeapContiguity);
+ if (PVRSRV_OK != eError)
+ {
+ goto e4;
+ }
+ }
+ else
+ {
+ eError = MMU_MapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+ sAllocationDevVAddr,
+ psPMR,
+ ui32NumDevPages << uiLog2HeapContiguity,
+ uiMapFlags,
+ uiLog2HeapContiguity);
+ if (PVRSRV_OK != eError)
+ {
+ goto e3;
+ }
+ }
+
+ psMapping->psReservation = psReservation;
+ psMapping->uiNumPages = ui32NumDevPages;
+ psMapping->psPMR = psPMR;
+
+ /* Don't bother with refcount on reservation, as a reservation
+ only ever holds one mapping, so we directly increment the
+ refcount on the heap instead */
+ _DevmemIntHeapAcquire(psMapping->psReservation->psDevmemHeap);
+
+ *ppsMappingPtr = psMapping;
+
+ return PVRSRV_OK;
+e4:
+ if (bNeedBacking)
+ {
+ /*if the mapping failed, the allocated dummy ref count need
+ * to be handled accordingly */
+ DevmemIntFreeDummyPage(psDevmemHeap->psDevmemCtx->psDevNode);
+ }
+e3:
+ {
+ PVRSRV_ERROR eError1=PVRSRV_OK;
+ eError1 = PMRUnlockSysPhysAddresses(psPMR);
+ if (PVRSRV_OK != eError1)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Failed to unlock the physical addresses",__func__));
+ }
+ *ppsMappingPtr = NULL;
+ }
+e2:
+ OSFreeMem(psMapping);
+
+e0:
+ PVR_ASSERT (eError != PVRSRV_OK);
+ return eError;
+}
+
+
+PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_HEAP *psDevmemHeap = psMapping->psReservation->psDevmemHeap;
+ /* device virtual address of start of allocation */
+ IMG_DEV_VIRTADDR sAllocationDevVAddr;
+ /* number of pages (device pages) that allocation spans */
+ IMG_UINT32 ui32NumDevPages;
+ IMG_BOOL bIsSparse = IMG_FALSE, bNeedBacking = IMG_FALSE;
+ PMR_FLAGS_T uiPMRFlags;
+
+ ui32NumDevPages = psMapping->uiNumPages;
+ sAllocationDevVAddr = psMapping->psReservation->sBase;
+
+ /*Check if the PMR that needs to be mapped is sparse */
+ bIsSparse = PMR_IsSparse(psMapping->psPMR);
+
+ if(bIsSparse)
+ {
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psMapping->psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if(bNeedBacking)
+ {
+ DevmemIntFreeDummyPage(psDevmemHeap->psDevmemCtx->psDevNode);
+ }
+
+ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ 0,
+ sAllocationDevVAddr,
+ ui32NumDevPages,
+ NULL,
+ psMapping->psReservation->psDevmemHeap->uiLog2PageSize,
+ IMG_FALSE);
+ }
+ else
+ {
+ MMU_UnmapPMRFast(psDevmemHeap->psDevmemCtx->psMMUContext,
+ sAllocationDevVAddr,
+ ui32NumDevPages,
+ psMapping->psReservation->psDevmemHeap->uiLog2PageSize);
+ }
+
+
+
+ eError = PMRUnlockSysPhysAddresses(psMapping->psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Don't bother with refcount on reservation, as a reservation
+ only ever holds one mapping, so we directly decrement the
+ refcount on the heap instead */
+ _DevmemIntHeapRelease(psDevmemHeap);
+
+ OSFreeMem(psMapping);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMINT_RESERVATION **ppsReservationPtr)
+{
+ PVRSRV_ERROR eError;
+ DEVMEMINT_RESERVATION *psReservation;
+
+ /* allocate memory to record the reservation info */
+ psReservation = OSAllocMem(sizeof *psReservation);
+ if (psReservation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF ((PVR_DBG_ERROR, "DevmemIntReserveRange: Alloc failed"));
+ goto e0;
+ }
+
+ psReservation->sBase = sAllocationDevVAddr;
+ psReservation->uiLength = uiAllocationSize;
+
+ eError = MMU_Alloc(psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiAllocationSize,
+ &uiAllocationSize,
+ 0, /* IMG_UINT32 uiProtFlags */
+ 0, /* alignment is n/a since we supply devvaddr */
+ &sAllocationDevVAddr,
+ psDevmemHeap->uiLog2PageSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ /* since we supplied the virt addr, MMU_Alloc shouldn't have
+ chosen a new one for us */
+ PVR_ASSERT(sAllocationDevVAddr.uiAddr == psReservation->sBase.uiAddr);
+
+ _DevmemIntHeapAcquire(psDevmemHeap);
+
+ psReservation->psDevmemHeap = psDevmemHeap;
+ *ppsReservationPtr = psReservation;
+
+ return PVRSRV_OK;
+
+ /*
+ * error exit paths follow
+ */
+
+e1:
+ OSFreeMem(psReservation);
+
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psReservation)
+{
+ IMG_DEV_VIRTADDR sBase = psReservation->sBase;
+ IMG_UINT32 uiLength = psReservation->uiLength;
+ IMG_UINT32 uiLog2DataPageSize = psReservation->psDevmemHeap->uiLog2PageSize;
+
+ MMU_Free(psReservation->psDevmemHeap->psDevmemCtx->psMMUContext,
+ sBase,
+ uiLength,
+ uiLog2DataPageSize);
+
+ _DevmemIntHeapRelease(psReservation->psDevmemHeap);
+ OSFreeMem(psReservation);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap)
+{
+ if (OSAtomicRead(&psDevmemHeap->hRefCount) != 1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BUG! %s called but has too many references (%d) "
+ "which probably means allocations have been made from the heap and not freed",
+ __FUNCTION__,
+ OSAtomicRead(&psDevmemHeap->hRefCount)));
+
+ /*
+ * Try again later when you've freed all the memory
+ *
+ * Note:
+ * We don't expect the application to retry (after all this call would
+ * succeed if the client had freed all the memory which it should have
+ * done before calling this function). However, given there should be
+ * an associated handle, when the handle base is destroyed it will free
+ * any allocations leaked by the client and then it will retry this call,
+ * which should then succeed.
+ */
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ PVR_ASSERT(OSAtomicRead(&psDevmemHeap->hRefCount) == 1);
+
+ _DevmemIntCtxRelease(psDevmemHeap->psDevmemCtx);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Freed heap %p", __FUNCTION__, psDevmemHeap));
+ OSFreeMem(psDevmemHeap);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+ PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT64 sCpuVAddrBase)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ IMG_UINT32 uiLog2PMRContiguity = PMR_GetLog2Contiguity(psPMR);
+ IMG_UINT32 uiLog2HeapContiguity = psDevmemHeap->uiLog2PageSize;
+ IMG_UINT32 uiOrderDiff = uiLog2PMRContiguity - uiLog2HeapContiguity;
+ IMG_UINT32 uiPagesPerOrder = 1 << uiOrderDiff;
+
+ IMG_UINT32 *pai32MapIndices = pai32AllocIndices;
+ IMG_UINT32 *pai32UnmapIndices = pai32FreeIndices;
+ IMG_UINT32 uiMapPageCount = ui32AllocPageCount;
+ IMG_UINT32 uiUnmapPageCount = ui32FreePageCount;
+
+ /* Special case:
+ * Adjust indices if we map into a heap that uses smaller page sizes
+ * than the physical allocation itself.
+ * The incoming parameters are all based on the page size of the PMR
+ * but the mapping functions expects parameters to be in terms of heap page sizes. */
+ if (uiOrderDiff != 0)
+ {
+ IMG_UINT32 uiPgIdx, uiPgOffset;
+
+ uiMapPageCount = (uiMapPageCount << uiOrderDiff);
+ uiUnmapPageCount = (uiUnmapPageCount << uiOrderDiff);
+
+ pai32MapIndices = OSAllocMem(uiMapPageCount * sizeof(*pai32MapIndices));
+ if (!pai32MapIndices)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pai32UnmapIndices = OSAllocMem(uiUnmapPageCount * sizeof(*pai32UnmapIndices));
+ if (!pai32UnmapIndices)
+ {
+ OSFreeMem(pai32MapIndices);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Every chunk index needs to be translated from physical indices
+ * into heap based indices. */
+ for (uiPgIdx = 0; uiPgIdx < ui32AllocPageCount; uiPgIdx++)
+ {
+ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+ {
+ pai32MapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+ pai32AllocIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+ }
+ }
+
+ for (uiPgIdx = 0; uiPgIdx < ui32FreePageCount; uiPgIdx++)
+ {
+ for (uiPgOffset = 0; uiPgOffset < uiPagesPerOrder; uiPgOffset++)
+ {
+ pai32UnmapIndices[uiPgIdx*uiPagesPerOrder + uiPgOffset] =
+ pai32FreeIndices[uiPgIdx]*uiPagesPerOrder + uiPgOffset;
+ }
+ }
+ }
+
+ /*
+ * The order of steps in which this request is done is given below. The order of
+ * operations is very important in this case:
+ *
+ * 1. The parameters are validated in function PMR_ChangeSparseMem below.
+ * A successful response indicates all the parameters are correct.
+ * In failure case we bail out from here without processing further.
+ * 2. On success, get the PMR specific operations done. this includes page alloc, page free
+ * and the corresponding PMR status changes.
+ * when this call fails, it is ensured that the state of the PMR before is
+ * not disturbed. If it succeeds, then we can go ahead with the subsequent steps.
+ * 3. Invalidate the GPU page table entries for the pages to be freed.
+ * 4. Write the GPU page table entries for the pages that got allocated.
+ * 5. Change the corresponding CPU space map.
+ *
+ * The above steps can be selectively controlled using flags.
+ */
+ if (uiSparseFlags & (SPARSE_REMAP_MEM | SPARSE_RESIZE_BOTH))
+ {
+ /* Do the PMR specific changes first */
+ eError = PMR_ChangeSparseMem(psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiSparseFlags);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to do PMR specific changes.",
+ __func__));
+ goto e1;
+ }
+
+ /* Invalidate the page table entries for the free pages.
+ * Optimisation later would be not to touch the ones that gets re-mapped */
+ if ((0 != ui32FreePageCount) && (uiSparseFlags & SPARSE_RESIZE_FREE))
+ {
+ PMR_FLAGS_T uiPMRFlags;
+ IMG_BOOL bNeedBacking = IMG_FALSE;
+
+ /*Get the flags*/
+ uiPMRFlags = PMR_Flags(psPMR);
+ bNeedBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiPMRFlags);
+
+ if (SPARSE_REMAP_MEM != (uiSparseFlags & SPARSE_REMAP_MEM))
+ {
+ /* Unmap the pages and mark them invalid in the MMU PTE */
+ MMU_UnmapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ uiUnmapPageCount,
+ pai32UnmapIndices,
+ uiLog2HeapContiguity,
+ bNeedBacking);
+ }
+ }
+
+ /* Wire the pages tables that got allocated */
+ if ((0 != ui32AllocPageCount) && (uiSparseFlags & SPARSE_RESIZE_ALLOC))
+ {
+ /* Map the pages and mark them Valid in the MMU PTE */
+ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ 0,
+ uiMapPageCount,
+ pai32MapIndices,
+ uiLog2HeapContiguity);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map alloc indices.",
+ __func__));
+ goto e1;
+ }
+ }
+
+ /* Currently only used for debug */
+ if (SPARSE_REMAP_MEM == (uiSparseFlags & SPARSE_REMAP_MEM))
+ {
+ eError = MMU_MapPages (psDevmemHeap->psDevmemCtx->psMMUContext,
+ uiFlags,
+ sDevVAddrBase,
+ psPMR,
+ 0,
+ uiMapPageCount,
+ pai32UnmapIndices,
+ uiLog2HeapContiguity);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map Free indices.",
+ __func__));
+ goto e1;
+ }
+ }
+ }
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ /* Do the changes in sparse on to the CPU virtual map accordingly */
+ if (uiSparseFlags & SPARSE_MAP_CPU_ADDR)
+ {
+ if (sCpuVAddrBase != 0)
+ {
+ eError = PMR_ChangeSparseMemCPUMap(psPMR,
+ sCpuVAddrBase,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Failed to map to CPU addr space.",
+ __func__));
+ goto e0;
+ }
+ }
+ }
+#endif
+
+e1:
+ if (pai32MapIndices != pai32AllocIndices)
+ {
+ OSFreeMem(pai32MapIndices);
+ }
+ if (pai32UnmapIndices != pai32FreeIndices)
+ {
+ OSFreeMem(pai32UnmapIndices);
+ }
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntCtxDestroy
+@Description Destroy that created by DevmemIntCtxCreate
+@Input psDevmemCtx Device Memory context
+@Return cannot fail.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx)
+{
+ /*
+ We can't determine if we should be freeing the context here
+ as it refcount!=1 could be due to either the fact that heap(s)
+ remain with allocations on them, or that this memory context
+ has been exported.
+ As the client couldn’t do anything useful with this information
+ anyway and the fact that the refcount will ensure we only
+ free the context when _all_ references have been released
+ don't bother checking and just return OK regardless.
+ */
+ _DevmemIntCtxRelease(psDevmemCtx);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr)
+{
+ IMG_UINT32 i, j, uiLog2HeapPageSize = 0;
+ DEVICE_MEMORY_INFO *psDinfo = &psDevNode->sDevMemoryInfo;
+ DEVMEM_HEAP_CONFIG *psConfig = psDinfo->psDeviceMemoryHeapConfigArray;
+
+ IMG_BOOL bFound = IMG_FALSE;
+
+ for(i = 0;
+ i < psDinfo->uiNumHeapConfigs && !bFound;
+ i++)
+ {
+ for(j = 0;
+ j < psConfig[i].uiNumHeaps && !bFound;
+ j++)
+ {
+ IMG_DEV_VIRTADDR uiBase =
+ psConfig[i].psHeapBlueprintArray[j].sHeapBaseAddr;
+ IMG_DEVMEM_SIZE_T uiSize =
+ psConfig[i].psHeapBlueprintArray[j].uiHeapLength;
+
+ if((sDevAddr.uiAddr >= uiBase.uiAddr) &&
+ (sDevAddr.uiAddr < (uiBase.uiAddr + uiSize)))
+ {
+ uiLog2HeapPageSize =
+ psConfig[i].psHeapBlueprintArray[j].uiLog2DataPageSize;
+ bFound = IMG_TRUE;
+ }
+ }
+ }
+
+ if (uiLog2HeapPageSize == 0)
+ {
+ return PVRSRV_ERROR_INVALID_GPU_ADDR;
+ }
+
+ return MMU_IsVDevAddrValid(psDevMemContext->psMMUContext,
+ uiLog2HeapPageSize,
+ sDevAddr) ? PVRSRV_OK : PVRSRV_ERROR_INVALID_GPU_ADDR;
+}
+
+PVRSRV_ERROR DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR *psFaultAddress)
+{
+ if ((psDevMemContext->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+ {
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ *psFaultAddress = psDevMemContext->sFaultAddress;
+ psDevMemContext->ui32Flags &= ~DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+
+ return PVRSRV_OK;
+}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+static POSWR_LOCK g_hExportCtxListLock;
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+static DLLIST_NODE g_sExportCtxList;
+
+PVRSRV_ERROR
+DevmemIntInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ dllist_init(&g_sExportCtxList);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSWRLockCreate(&g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntDeInit(void)
+{
+ PVR_ASSERT(dllist_is_empty(&g_sExportCtxList));
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockDestroy(g_hExportCtxListLock);
+#endif
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+ PMR *psPMR,
+ DEVMEMINT_CTX_EXPORT **ppsContextExport)
+{
+ DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+ _DevmemIntCtxAcquire(psContext);
+ PMRRefPMR(psPMR);
+
+ psCtxExport = OSAllocMem(sizeof(DEVMEMINT_CTX_EXPORT));
+ if (psCtxExport == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to export context. System currently out of memory",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psCtxExport->psDevmemCtx = psContext;
+ psCtxExport->psPMR = psPMR;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_add_to_tail(&g_sExportCtxList, &psCtxExport->sNode);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ *ppsContextExport = psCtxExport;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport)
+{
+ PMRUnrefPMR(psContextExport->psPMR);
+ _DevmemIntCtxRelease(psContextExport->psDevmemCtx);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_remove_node(&psContextExport->sNode);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseWrite(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ OSFreeMem(psContextExport);
+
+ /* Unable to find exported context, return error */
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+ DEVMEMINT_CTX **ppsContext,
+ IMG_HANDLE *phPrivData)
+{
+ PDLLIST_NODE psListNode, psListNodeNext;
+ DEVMEMINT_CTX_EXPORT *psCtxExport;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ /* Find context from list using PMR as key */
+ dllist_foreach_node(&g_sExportCtxList, psListNode, psListNodeNext)
+ {
+ psCtxExport = IMG_CONTAINER_OF(psListNode, DEVMEMINT_CTX_EXPORT, sNode);
+ if (psCtxExport->psPMR == psPMR)
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ _DevmemIntCtxAcquire(psCtxExport->psDevmemCtx);
+ *ppsContext = psCtxExport->psDevmemCtx;
+ *phPrivData = psCtxExport->psDevmemCtx->hPrivData;
+ return PVRSRV_OK;
+ }
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(g_hExportCtxListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ /* Unable to find exported context, return error */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire remote context. Could not retrieve context with given PMR",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntRegisterPFNotify
+@Description Registers a PID to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Input ui32PID The PID of the process that would like to be
+ notified.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_INT32 ui32PID,
+ IMG_BOOL bRegister)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ DLLIST_NODE *psNode, *psNodeNext;
+ DEVMEMINT_PF_NOTIFY *psNotifyNode;
+ IMG_BOOL bPresent = IMG_FALSE;
+
+ if (psDevmemCtx == NULL)
+ {
+ PVR_ASSERT(!"Devmem Context Missing");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevNode = psDevmemCtx->psDevNode;
+
+ if (bRegister)
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ /* If this is the first PID in the list, the device memory context
+ * needs to be registered for notification */
+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_add_to_tail(&psDevNode->sMemoryContextPageFaultNotifyListHead,
+ &psDevmemCtx->sPageFaultNotifyListElem);
+ }
+ else
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ }
+ }
+
+ /* Loop through the registered PIDs and check whether this one is
+ * present */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+ if (psNotifyNode->ui32PID == ui32PID)
+ {
+ bPresent = IMG_TRUE;
+ break;
+ }
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ if (bRegister == IMG_TRUE)
+ {
+ if (bPresent)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to register a PID that is already registered",
+ __func__));
+ return PVRSRV_ERROR_PID_ALREADY_REGISTERED;
+ }
+
+ psNotifyNode = OSAllocMem(sizeof(*psNotifyNode));
+ if (psNotifyNode == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to allocate memory for the notify list",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psNotifyNode->ui32PID = ui32PID;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ dllist_add_to_tail(&(psDevmemCtx->sProcessNotifyListHead), &(psNotifyNode->sProcessNotifyListElem));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ }
+ else
+ {
+ if (!bPresent)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to unregister a PID that is not registered",
+ __func__));
+ return PVRSRV_ERROR_PID_NOT_REGISTERED;
+ }
+ dllist_remove_node(psNode);
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+ OSFreeMem(psNotifyNode);
+ }
+
+ if (!bRegister)
+ {
+ /* If the last process in the list is being unregistered, then also
+ * unregister the device memory context from the notify list. */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ if (dllist_is_empty(&psDevmemCtx->sProcessNotifyListHead))
+ {
+ dllist_remove_node(&psDevmemCtx->sPageFaultNotifyListElem);
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseWrite(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function DevmemIntPFNotify
+@Description Notifies any processes that have registered themselves to be
+ notified when a page fault happens on a specific device memory
+ context.
+@Input *psDevNode The device node.
+@Input ui64FaultedPCAddress The page catalogue address that faulted.
+@Input sFaultAddress The address that triggered the fault.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT64 ui64FaultedPCAddress,
+ IMG_DEV_VIRTADDR sFaultAddress)
+{
+ DLLIST_NODE *psNode, *psNodeNext;
+ DEVMEMINT_PF_NOTIFY *psNotifyNode;
+ PVRSRV_ERROR eError;
+ DEVMEMINT_CTX *psDevmemCtx = NULL;
+ IMG_BOOL bFailed = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ if (dllist_is_empty(&(psDevNode->sMemoryContextPageFaultNotifyListHead)))
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ return PVRSRV_OK;
+ }
+
+ dllist_foreach_node(&(psDevNode->sMemoryContextPageFaultNotifyListHead), psNode, psNodeNext)
+ {
+ DEVMEMINT_CTX *psThisContext =
+ IMG_CONTAINER_OF(psNode, DEVMEMINT_CTX, sPageFaultNotifyListElem);
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ eError = MMU_AcquireBaseAddr(psThisContext->psMMUContext, &sPCDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to Acquire Base Address (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+ return eError;
+ }
+
+ if (sPCDevPAddr.uiAddr == ui64FaultedPCAddress)
+ {
+ psDevmemCtx = psThisContext;
+ break;
+ }
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevNode->hMemoryContextPageFaultNotifyListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ if (psDevmemCtx == NULL)
+ {
+ /* Not found, just return */
+ return PVRSRV_OK;
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockAcquireRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ /*
+ * Store the first occurrence of a page fault address,
+ * until that address is consumed by a client.
+ */
+ if ((psDevmemCtx->ui32Flags & DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE) == 0)
+ {
+ psDevmemCtx->sFaultAddress = sFaultAddress;
+ psDevmemCtx->ui32Flags |= DEVMEMCTX_FLAGS_FAULT_ADDRESS_AVAILABLE;
+ }
+
+ /* Loop through each registered PID and send a signal to the process */
+ dllist_foreach_node(&(psDevmemCtx->sProcessNotifyListHead), psNode, psNodeNext)
+ {
+ psNotifyNode = IMG_CONTAINER_OF(psNode, DEVMEMINT_PF_NOTIFY, sProcessNotifyListElem);
+
+ eError = OSDebugSignalPID(psNotifyNode->ui32PID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to signal process for PID: %u",
+ __func__,
+ psNotifyNode->ui32PID));
+
+ PVR_ASSERT(!"Unable to signal process");
+
+ bFailed = IMG_TRUE;
+ }
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockReleaseRead(psDevmemCtx->hListLock);
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ if (bFailed)
+ {
+ return PVRSRV_ERROR_SIGNAL_FAILED;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined (PDUMP)
+IMG_UINT32 DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext)
+{
+ IMG_UINT32 ui32MMUContextID;
+ MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32MMUContextID);
+ return ui32MMUContextID;
+}
+
+PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32ArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiPDumpMMUCtx;
+
+
+ PVR_UNREFERENCED_PARAMETER(ui32ArraySize);
+
+ eError = MMU_AcquirePDumpMMUContext(psDevmemCtx->psMMUContext,
+ &uiPDumpMMUCtx);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ The following SYSMEM refers to the 'MMU Context', hence it
+ should be the MMU context, not the PMR, that says what the PDump
+ MemSpace tag is?
+ From a PDump P.O.V. it doesn't matter which name space we use as long
+ as that MemSpace is used on the 'MMU Context' we're dumping from
+ */
+ eError = PDumpMMUSAB(psDevmemCtx->psDevNode->sDevId.pszPDumpDevName,
+ uiPDumpMMUCtx,
+ sDevAddrStart,
+ uiSize,
+ pszFilename,
+ ui32FileOffset,
+ ui32PDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ MMU_ReleasePDumpMMUContext(psDevmemCtx->psMMUContext);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_UINT32 ui32ContextID;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemIntPDumpBitmap: Failed to acquire MMU context"));
+ return PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID;
+ }
+
+ eError = PDumpBitmapKM(psDeviceNode,
+ pszFileName,
+ ui32FileOffset,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ sDevBaseAddr,
+ ui32ContextID,
+ ui32Size,
+ ePixelFormat,
+ ui32AddrMode,
+ ui32PDumpFlags);
+
+ /* Don't care about return value */
+ MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+DevmemIntPdumpImageDescriptor(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ const IMG_CHAR *pszFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_UINT32 ui32ContextID;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = MMU_AcquirePDumpMMUContext(psDevMemContext->psMMUContext, &ui32ContextID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemIntPdumpImageDescriptor: Failed to acquire MMU context"));
+ return eError;
+ }
+
+ eError = PDumpImageDescriptorKM(psDeviceNode,
+ ui32ContextID,
+ ui32Size,
+ (IMG_CHAR *)pszFileName,
+ sData,
+ ui32DataSize,
+ ui32LogicalWidth,
+ ui32LogicalHeight,
+ ui32PhysicalWidth,
+ ui32PhysicalHeight,
+ ePixFmt,
+ eMemLayout,
+ eFBCompression,
+ paui32FBCClearColour,
+ sHeader,
+ ui32HeaderSize,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemIntPdumpImageDescriptor: Failed to Pdump Image descriptor"));
+ return eError;
+ }
+
+ eError = MMU_ReleasePDumpMMUContext(psDevMemContext->psMMUContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemIntPdumpImageDescriptor: Failed to release MMU context"));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_server.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_server.h
new file mode 100644
index 00000000000000..837da55ab53a30
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_server.h
@@ -0,0 +1,573 @@
+/**************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header file for server side component of device memory management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __DEVICEMEM_SERVER_H__
+#define __DEVICEMEM_SERVER_H__
+
+#include "device.h" /* For device node */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "connection_server.h"
+
+#include "pmr.h"
+
+
+typedef struct _DEVMEMINT_CTX_ DEVMEMINT_CTX;
+typedef struct _DEVMEMINT_CTX_EXPORT_ DEVMEMINT_CTX_EXPORT;
+typedef struct _DEVMEMINT_HEAP_ DEVMEMINT_HEAP;
+
+typedef struct _DEVMEMINT_RESERVATION_ DEVMEMINT_RESERVATION;
+typedef struct _DEVMEMINT_MAPPING_ DEVMEMINT_MAPPING;
+typedef struct _DEVMEMINT_PF_NOTIFY_ DEVMEMINT_PF_NOTIFY;
+
+
+/**************************************************************************/ /*!
+@Function DevmemIntUnpin
+@Description This is the counterpart to DevmemPin(). It is meant to be
+ called when the allocation is NOT mapped in the device virtual
+ space.
+
+@Input psPMR The physical memory to unpin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntUnpinInvalidate
+@Description This is the counterpart to DevmemIntPinValidate(). It is meant to be
+ called for allocations that ARE mapped in the device virtual space
+ and we have to invalidate the mapping.
+
+@Input psPMR The physical memory to unpin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntUnpinInvalidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntPin
+@Description This is the counterpart to DevmemIntUnpin().
+ Is meant to be called if there is NO device mapping present.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPin(PMR *psPMR);
+
+/**************************************************************************/ /*!
+@Function DevmemIntPinValidate
+@Description This is the counterpart to DevmemIntUnpinInvalidate().
+ Is meant to be called if there is IS a device mapping present
+ that needs to be taken care of.
+
+@Input psDevmemMapping The mapping structure used for the passed PMR.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR DevmemIntPinValidate(DEVMEMINT_MAPPING *psDevmemMapping, PMR *psPMR);
+/*
+ * DevmemServerGetImportHandle()
+ *
+ * For given exportable memory descriptor returns PMR handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetImportHandle(DEVMEM_MEMDESC *psMemDesc,
+ IMG_HANDLE *phImport);
+
+/*
+ * DevmemServerGetHeapHandle()
+ *
+ * For given reservation returns the Heap handle
+ *
+ */
+PVRSRV_ERROR
+DevmemServerGetHeapHandle(DEVMEMINT_RESERVATION *psReservation,
+ IMG_HANDLE *phHeap);
+
+/*
+ * DevmemIntCtxCreate()
+ *
+ * Create a Server-side Device Memory Context. This is usually the
+ * counterpart of the client side memory context, and indeed is
+ * usually created at the same time.
+ *
+ * You must have one of these before creating any heaps.
+ *
+ * All heaps must have been destroyed before calling
+ * DevmemIntCtxDestroy()
+ *
+ * If you call DevmemIntCtxCreate() (and it succeeds) you are promising
+ * to later call DevmemIntCtxDestroy()
+ *
+ * Note that this call will cause the device MMU code to do some work
+ * for creating the device memory context, but it does not guarantee
+ * that a page catalogue will have been created, as this may be
+ * deferred until first allocation.
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_CTX object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxCreate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ /* devnode / perproc etc */
+ IMG_BOOL bKernelMemoryCtx,
+ DEVMEMINT_CTX **ppsDevmemCtxPtr,
+ IMG_HANDLE *hPrivData,
+ IMG_UINT32 *pui32CPUCacheLineSize);
+/*
+ * DevmemIntCtxDestroy()
+ *
+ * Undoes a prior DevmemIntCtxCreate or DevmemIntCtxImport.
+ */
+extern PVRSRV_ERROR
+DevmemIntCtxDestroy(DEVMEMINT_CTX *psDevmemCtx);
+
+/*
+ * DevmemIntHeapCreate()
+ *
+ * Creates a new heap in this device memory context. This will cause
+ * a call into the MMU code to allocate various data structures for
+ * managing this heap. It will not necessarily cause any page tables
+ * to be set up, as this can be deferred until first allocation.
+ * (i.e. we shouldn't care - it's up to the MMU code)
+ *
+ * Note that the data page size must be specified (as log 2). The
+ * data page size as specified here will be communicated to the mmu
+ * module, and thus may determine the page size configured in page
+ * directory entries for subsequent allocations from this heap. It is
+ * essential that the page size here is less than or equal to the
+ * "minimum contiguity guarantee" of any PMR that you subsequently
+ * attempt to map to this heap.
+ *
+ * If you call DevmemIntHeapCreate() (and the call succeeds) you are
+ * promising that you shall subsequently call DevmemIntHeapDestroy()
+ *
+ * Caller to provide storage for a pointer to the DEVMEM_HEAP object
+ * that will be created by this call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapCreate(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sHeapBaseAddr,
+ IMG_DEVMEM_SIZE_T uiHeapLength,
+ IMG_UINT32 uiLog2DataPageSize,
+ DEVMEMINT_HEAP **ppsDevmemHeapPtr);
+/*
+ * DevmemIntHeapDestroy()
+ *
+ * Destroys a heap previously created with DevmemIntHeapCreate()
+ *
+ * All allocations from his heap must have been freed before this
+ * call.
+ */
+extern PVRSRV_ERROR
+DevmemIntHeapDestroy(DEVMEMINT_HEAP *psDevmemHeap);
+
+/*
+ * DevmemIntMapPMR()
+ *
+ * Maps the given PMR to the virtual range previously allocated with
+ * DevmemIntReserveRange()
+ *
+ * If appropriate, the PMR must have had its physical backing
+ * committed, as this call will call into the MMU code to set up the
+ * page tables for this allocation, which shall in turn request the
+ * physical addresses from the PMR. Alternatively, the PMR
+ * implementation can choose to do so off the back of the "lock"
+ * callback, which it will receive as a result (indirectly) of this
+ * call.
+ *
+ * This function makes no promise w.r.t. the circumstances that it can
+ * be called, and these would be "inherited" from the implementation
+ * of the PMR. For example if the PMR "lock" callback causes pages to
+ * be pinned at that time (which may cause scheduling or disk I/O
+ * etc.) then it would not be legal to "Map" the PMR in a context
+ * where scheduling events are disallowed.
+ *
+ * If you call DevmemIntMapPMR() (and the call succeeds) then you are
+ * promising that you shall later call DevmemIntUnmapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPMR(DEVMEMINT_HEAP *psDevmemHeap,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ DEVMEMINT_MAPPING **ppsMappingPtr);
+/*
+ * DevmemIntUnmapPMR()
+ *
+ * Reverses the mapping caused by DevmemIntMapPMR()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPMR(DEVMEMINT_MAPPING *psMapping);
+
+/* DevmemIntMapPages()
+ *
+ * Maps an arbitrary amount of pages from a PMR to a reserved range
+ *
+ * @input psReservation Reservation handle for the range
+ * @input psPMR PMR that is mapped
+ * @input ui32PageCount Number of consecutive pages that are mapped
+ * @input uiPhysicalOffset Logical offset in the PMR
+ * @input uiFlags Mapping flags
+ * @input sDevVAddrBase Virtual address base to start the mapping from
+ */
+extern PVRSRV_ERROR
+DevmemIntMapPages(DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 ui32PhysicalPgOffset,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+/* DevmemIntUnmapPages()
+ *
+ * Unmaps an arbitrary amount of pages from a reserved range
+ *
+ * @input psReservation Reservation handle for the range
+ * @input sDevVAddrBase Virtual address base to start from
+ * @input ui32PageCount Number of consecutive pages that are unmapped
+ */
+extern PVRSRV_ERROR
+DevmemIntUnmapPages(DEVMEMINT_RESERVATION *psReservation,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount);
+
+/*
+ * DevmemIntReserveRange()
+ *
+ * Indicates that the specified range should be reserved from the
+ * given heap.
+ *
+ * In turn causes the page tables to be allocated to cover the
+ * specified range.
+ *
+ * If you call DevmemIntReserveRange() (and the call succeeds) then you
+ * are promising that you shall later call DevmemIntUnreserveRange()
+ */
+extern PVRSRV_ERROR
+DevmemIntReserveRange(DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_DEV_VIRTADDR sAllocationDevVAddr,
+ IMG_DEVMEM_SIZE_T uiAllocationSize,
+ DEVMEMINT_RESERVATION **ppsReservationPtr);
+/*
+ * DevmemIntUnreserveRange()
+ *
+ * Undoes the state change caused by DevmemIntReserveRage()
+ */
+extern PVRSRV_ERROR
+DevmemIntUnreserveRange(DEVMEMINT_RESERVATION *psDevmemReservation);
+
+/*************************************************************************/ /*!
+@Function DevmemIntChangeSparse
+@Description Changes the sparse allocations of a PMR by allocating and freeing
+ pages and changing their corresponding CPU and GPU mappings.
+
+@input psDevmemHeap Pointer to the heap we map on
+@input psPMR The PMR we want to map
+@input ui32AllocPageCount Number of pages to allocate
+@input pai32AllocIndices The logical PMR indices where pages will
+ be allocated. May be NULL.
+@input ui32FreePageCount Number of pages to free
+@input pai32FreeIndices The logical PMR indices where pages will
+ be freed. May be NULL.
+@input uiSparseFlags Flags passed in to determine which kind
+ of sparse change the user wanted.
+ See devicemem_typedefs.h for details.
+@input uiFlags The memalloc flags for this virtual range.
+@input sDevVAddrBase The base address of the virtual range of
+ this sparse allocation.
+@input sCpuVAddrBase The CPU base address of this allocation.
+ May be 0 if not existing.
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+DevmemIntChangeSparse(DEVMEMINT_HEAP *psDevmemHeap,
+ PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiSparseFlags,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT64 sCpuVAddrBase);
+
+extern PVRSRV_ERROR
+DevmemIntIsVDevAddrValid(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR sDevAddr);
+
+extern PVRSRV_ERROR
+DevmemIntGetFaultAddress(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_DEV_VIRTADDR *psFaultAddress);
+
+/*************************************************************************/ /*!
+@Function DevmemIntRegisterPFNotify
+@Description Registers a PID to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Input ui32PID The PID of the process that would like to be
+ notified.
+@Input bRegister If true, register. If false, de-register.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+DevmemIntRegisterPFNotifyKM(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_INT32 ui32PID,
+ IMG_BOOL bRegister);
+
+/*************************************************************************/ /*!
+@Function DevmemIntPFNotify
+@Description Notifies any processes that have registered themselves to be
+ notified when a page fault happens on a specific device memory
+ context.
+@Input *psDevNode The device node.
+@Input ui64FaultedPCAddress The page catalogue address that faulted.
+@Input sFaultAddress The address that triggered the fault.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR DevmemIntPFNotify(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT64 ui64FaultedPCAddress,
+ IMG_DEV_VIRTADDR sFaultAddress);
+
+#if defined(PDUMP)
+/*
+ * DevmemIntPDumpSaveToFileVirtual()
+ *
+ * Writes out PDump "SAB" commands with the data found in memory at
+ * the given virtual address.
+ */
+extern PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+extern IMG_UINT32
+DevmemIntMMUContextID(DEVMEMINT_CTX *psDevMemContext);
+
+extern PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+
+extern PVRSRV_ERROR
+DevmemIntPdumpImageDescriptor(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ const IMG_CHAR *pszFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpSaveToFileVirtual(DEVMEMINT_CTX *psDevmemCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevmemCtx);
+ PVR_UNREFERENCED_PARAMETER(sDevAddrStart);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPDumpBitmap(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Width);
+ PVR_UNREFERENCED_PARAMETER(ui32Height);
+ PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+ PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+ PVR_UNREFERENCED_PARAMETER(psDevMemContext);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+ PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(DevmemIntPdumpImageDescriptor)
+#endif
+static INLINE PVRSRV_ERROR
+DevmemIntPdumpImageDescriptor(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_CTX *psDevMemContext,
+ IMG_UINT32 ui32Size,
+ const IMG_CHAR *pszFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(sData);
+ PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ePixFmt);
+ PVR_UNREFERENCED_PARAMETER(eMemLayout);
+ PVR_UNREFERENCED_PARAMETER(eFBCompression);
+ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+ PVR_UNREFERENCED_PARAMETER(sHeader);
+ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+PVRSRV_ERROR
+DevmemIntInit(void);
+
+PVRSRV_ERROR
+DevmemIntDeInit(void);
+
+PVRSRV_ERROR
+DevmemIntExportCtx(DEVMEMINT_CTX *psContext,
+ PMR *psPMR,
+ DEVMEMINT_CTX_EXPORT **ppsContextExport);
+
+PVRSRV_ERROR
+DevmemIntUnexportCtx(DEVMEMINT_CTX_EXPORT *psContextExport);
+
+PVRSRV_ERROR
+DevmemIntAcquireRemoteCtx(PMR *psPMR,
+ DEVMEMINT_CTX **ppsContext,
+ IMG_HANDLE *phPrivData);
+
+#endif /* ifndef __DEVICEMEM_SERVER_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_server_utils.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_server_utils.h
new file mode 100644
index 00000000000000..469080d8e0fe43
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_server_utils.h
@@ -0,0 +1,204 @@
+/**************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header file utilities that are specific to device memory functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_memallocflags.h"
+#include "pvrsrv.h"
+
+static INLINE PVRSRV_ERROR DevmemCPUCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags,
+ IMG_UINT32 *pui32Ret)
+{
+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+ IMG_UINT32 ui32Ret;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+ switch (ui32CPUCacheMode)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+
+ /*
+ * If system has no coherency but coherency has been requested for CPU
+ * and GPU we currently have to fall back to uncached.
+ *
+ * Usually the first case here should return an error but as long as a lot
+ * of services allocations using both CPU/GPU coherency flags and rely on
+ * the UNCACHED fallback we have to leave it here.
+ */
+ if ( (PVRSRV_GPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT) &&
+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ }
+ else
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_CACHED;
+ }
+
+ break;
+
+ default:
+ PVR_LOG(("DevmemCPUCacheMode: Unknown CPU cache mode 0x%08x", ui32CPUCacheMode));
+ PVR_ASSERT(0);
+ /*
+ We should never get here, but if we do then setting the mode
+ to uncached is the safest thing to do.
+ */
+ ui32Ret = PVRSRV_MEMALLOCFLAG_CPU_UNCACHED;
+ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ break;
+ }
+
+ *pui32Ret = ui32Ret;
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR DevmemDeviceCacheMode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags,
+ IMG_UINT32 *pui32Ret)
+{
+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+ IMG_UINT32 ui32Ret;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+ switch (ui32DeviceCacheMode)
+ {
+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT:
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT:
+
+ /*
+ * If system has no coherency but coherency has been requested for CPU
+ * and GPU we currently have to fall back to uncached.
+ *
+ * Usually the first case here should return an error but as long as a lot
+ * of services allocations using both CPU/GPU coherency flags and rely on
+ * the UNCACHED fallback we have to leave it here.
+ */
+ if ( (PVRSRV_CPU_CACHE_MODE(ulFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT) &&
+ !(PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig) && PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig)) )
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ }
+ else
+ {
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_CACHED;
+ }
+
+ break;
+
+ default:
+ PVR_LOG(("DevmemDeviceCacheMode: Unknown device cache mode 0x%08x", ui32DeviceCacheMode));
+ PVR_ASSERT(0);
+ /*
+ We should never get here, but if we do then setting the mode
+ to uncached is the safest thing to do.
+ */
+ ui32Ret = PVRSRV_MEMALLOCFLAG_GPU_UNCACHED;
+ eError = PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ break;
+ }
+
+ *pui32Ret = ui32Ret;
+
+ return eError;
+}
+
+static INLINE IMG_BOOL DevmemCPUCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32CPUCacheMode = PVRSRV_CPU_CACHE_MODE(ulFlags);
+ IMG_BOOL bRet = IMG_FALSE;
+
+ PVR_ASSERT(ui32CPUCacheMode == PVRSRV_CPU_CACHE_MODE(ulFlags));
+
+ if (ui32CPUCacheMode == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+ {
+ bRet = PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig);
+ }
+ return bRet;
+}
+
+static INLINE IMG_BOOL DevmemDeviceCacheCoherency(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_MEMALLOCFLAGS_T ulFlags)
+{
+ IMG_UINT32 ui32DeviceCacheMode = PVRSRV_GPU_CACHE_MODE(ulFlags);
+ IMG_BOOL bRet = IMG_FALSE;
+
+ PVR_ASSERT(ui32DeviceCacheMode == PVRSRV_GPU_CACHE_MODE(ulFlags));
+
+ if (ui32DeviceCacheMode == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+ {
+ bRet = PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig);
+ }
+ return bRet;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_typedefs.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_typedefs.h
new file mode 100644
index 00000000000000..94966763034f55
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_typedefs.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of device memory management -- this file
+ is forked from new_devmem_allocation.h as this one has to
+ reside in the top level include so that client code is able
+ to make use of the typedefs.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEM_TYPEDEFS_H
+#define DEVICEMEM_TYPEDEFS_H
+
+#include <powervr/mem_types.h>
+#include "img_types.h"
+#include "pvrsrv_memallocflags.h"
+
+typedef struct _DEVMEM_CONTEXT_ DEVMEM_CONTEXT; /*!< Convenience typedef for struct _DEVMEM_CONTEXT_ */
+typedef struct _DEVMEM_HEAP_ DEVMEM_HEAP; /*!< Convenience typedef for struct _DEVMEM_HEAP_ */
+typedef struct _DEVMEM_MEMDESC_ DEVMEM_MEMDESC; /*!< Convenience typedef for struct _DEVMEM_MEMDESC_ */
+typedef struct _DEVMEM_PAGELIST_ DEVMEM_PAGELIST; /*!< Convenience typedef for struct _DEVMEM_PAGELIST_ */
+typedef PVRSRV_MEMALLOCFLAGS_T DEVMEM_FLAGS_T; /*!< Convenience typedef for PVRSRV_MEMALLOCFLAGS_T */
+
+typedef IMG_HANDLE DEVMEM_EXPORTHANDLE; /*!< Typedef for DeviceMem Export Handle */
+typedef IMG_UINT64 DEVMEM_EXPORTKEY; /*!< Typedef for DeviceMem Export Key */
+typedef IMG_DEVMEM_SIZE_T DEVMEM_SIZE_T; /*!< Typedef for DeviceMem SIZE_T */
+typedef IMG_DEVMEM_LOG2ALIGN_T DEVMEM_LOG2ALIGN_T; /*!< Typedef for DeviceMem LOG2 Alignment */
+
+typedef struct _DEVMEMX_PHYS_MEMDESC_ DEVMEMX_PHYSDESC; /*!< Convenience typedef for DevmemX physical */
+typedef struct _DEVMEMX_VIRT_MEMDESC_ DEVMEMX_VIRTDESC; /*!< Convenience typedef for DevmemX virtual */
+
+/*! calling code needs all the info in this struct, to be able to pass it around */
+typedef struct
+{
+ /*! A handle to the PMR. */
+ IMG_HANDLE hPMRExportHandle;
+ /*! The "key" to prove we have authorization to use this PMR */
+ IMG_UINT64 uiPMRExportPassword;
+ /*! Size and alignment properties for this PMR. Note, these
+ numbers are not trusted in kernel, but we need to cache them
+ client-side in order to allocate from the VM arena. The kernel
+ will know the actual alignment and size of the PMR and thus
+ would prevent client code from breaching security here. Ditto
+ for physmem granularity (aka page size) if this is different
+ from alignment */
+ IMG_DEVMEM_SIZE_T uiSize;
+ /*! We call this "contiguity guarantee" to be more precise than
+ calling it "alignment" or "page size", terms which may seem
+ similar but have different emphasis. The number reported here
+ is the minimum contiguity guarantee from the creator of the
+ PMR. Now, there is no requirement to allocate that coarsely
+ from the RA. The alignment given to the RA simply needs to be
+ at least as coarse as the device page size for the heap we
+ ultimately intend to map into. What is important is that the
+ device MMU data page size is not greater than the minimum
+ contiguity guarantee from the PMR. This value is reported to
+ the client in order that it can choose to make early checks and
+ perhaps decide which heap (in a variable page size scenario) it
+ would be safe to map this PMR into. For convenience, the
+ client may choose to use this argument as the alignment of the
+ virtual range he chooses to allocate, but this is _not_
+ necessary and in many cases would be able to get away with a
+ finer alignment, should the heap into which this PMR will be
+ mapped support it. */
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+} DEVMEM_EXPORTCOOKIE;
+
+/* Enum that describes the operation associated with changing sparse memory*/
+typedef enum Resize {
+ SPARSE_RESIZE_NONE = 0,
+
+ /* This should be set to indicate the change needs allocation */
+ SPARSE_RESIZE_ALLOC = 1,
+
+ /* This should be set to indicate the change needs free */
+ SPARSE_RESIZE_FREE = 2,
+
+ SPARSE_RESIZE_BOTH = (SPARSE_RESIZE_ALLOC | SPARSE_RESIZE_FREE),
+
+ /* This should be set to silently swap underlying physical memory
+ * without disturbing its device or cpu virtual maps
+ * This flag is not supported in the case of PDUMP and could lead to
+ * PDUMP panic when used */
+ SPARSE_REMAP_MEM = 4,
+
+ /* Should be set to get the sparse changes appear in cpu virtual map */
+ SPARSE_MAP_CPU_ADDR = 8
+}SPARSE_MEM_RESIZE_FLAGS;
+
+/* To be used with all the sparse allocations that get mapped to CPU Virtual space
+ * The sparse allocation CPU mapping is torn down and re-mapped every time the
+ * sparse allocation layout changes */
+#define PVRSRV_UNMAP_ON_SPARSE_CHANGE 1
+
+/* To use with DevmemSubAllocate() as the default factor if no
+ * over-allocation is desired. */
+#define DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER 1
+
+/* Defines the max length for PMR, MemDesc, Device memory
+ * History and RI debug annotations stored in memory, including
+ * the null terminator. */
+#define DEVMEM_ANNOTATION_MAX_LEN (PVR_ANNOTATION_MAX_LEN + 1)
+
+#endif /* #ifndef DEVICEMEM_TYPEDEFS_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.c b/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.c
new file mode 100644
index 00000000000000..a94ce60b8a6dd3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.c
@@ -0,0 +1,1065 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management internal utility functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "ra.h"
+#include "devicemem_utils.h"
+#include "client_mm_bridge.h"
+
+/*
+ SVM heap management support functions for CPU (un)mapping
+ */
+#define DEVMEM_MAP_SVM_USER_MANAGED_RETRY 2
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMKernelManaged(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 *ui64MapAddress)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 ui64SvmMapAddr;
+ IMG_UINT64 ui64SvmMapAddrEnd;
+ IMG_UINT64 ui64SvmHeapAddrEnd;
+
+ /* SVM heap management is always XXX_KERNEL_MANAGED unless we
+ have triggered the fall back code-path in which case we
+ should not be calling into this code-path */
+ PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED);
+
+ /* By acquiring the CPU virtual address here, it essentially
+ means we lock-down the virtual address for the duration
+ of the life-cycle of the allocation until a de-allocation
+ request comes in. Thus the allocation is guaranteed not to
+ change its virtual address on the CPU during its life-time.
+ NOTE: Import might have already been CPU Mapped before now,
+ normally this is not a problem, see fall back */
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to CPU map (lock-down) device memory for SVM use",
+ __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_MAP_FAILED;
+ goto failSVM;
+ }
+
+ /* Supplied kernel mmap virtual address is also device virtual address;
+ calculate the heap & kernel supplied mmap virtual address limits */
+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+ ui64SvmHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+ ui64SvmMapAddrEnd = ui64SvmMapAddr + psImport->uiSize;
+ PVR_ASSERT(ui64SvmMapAddr != (IMG_UINT64)0);
+
+ /* SVM limit test may fail if processor has more virtual address bits than device */
+ if (ui64SvmMapAddr >= ui64SvmHeapAddrEnd || ui64SvmMapAddrEnd > ui64SvmHeapAddrEnd)
+ {
+ /* Unmap incompatible SVM virtual address, this
+ may not release address if it was elsewhere
+ CPU Mapped before call into this function */
+ _DevmemImportStructCPUUnmap(psImport);
+
+ /* Flag incompatible SVM mapping */
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto failSVM;
+ }
+
+ *ui64MapAddress = ui64SvmMapAddr;
+ failSVM:
+ /* either OK, MAP_FAILED or BAD_MAPPING */
+ return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMKernelManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ PVR_UNREFERENCED_PARAMETER(psHeap);
+ _DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemCPUMapSVMUserManaged(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT uiAlign,
+ IMG_UINT64 *ui64MapAddress)
+{
+ RA_LENGTH_T uiAllocatedSize;
+ RA_BASE_T uiAllocatedAddr;
+ IMG_UINT64 ui64SvmMapAddr;
+ IMG_UINT uiRetry = 0;
+ PVRSRV_ERROR eError;
+
+ /* If SVM heap management has transitioned to XXX_USER_MANAGED,
+ this is essentially a fall back approach that ensures we
+ continue to satisfy SVM alloc. This approach is not without
+ hazards in that we may specify a virtual address that is
+ already in use by the user process */
+ PVR_ASSERT(psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED);
+
+ /* Normally, for SVM heap allocations, CPUMap _must_ be done
+ before DevMap; ideally the initial CPUMap should be done by
+ SVM functions though this is not a hard requirement as long
+ as the prior elsewhere obtained CPUMap virtual address meets
+ SVM address requirements. This is a fall-back code-pathway
+ so we have to test that this assumption holds before we
+ progress any further */
+ OSLockAcquire(psImport->sCPUImport.hLock);
+
+ if (psImport->sCPUImport.ui32RefCount)
+ {
+ /* Already CPU Mapped SVM heap allocation, this prior elsewhere
+ obtained virtual address is responsible for the above
+ XXX_KERNEL_MANAGED failure. As we are not responsible for
+ this, we cannot progress any further so need to fail */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Previously obtained CPU map address not SVM compatible"
+ , __func__));
+
+ /* Revert SVM heap to DEVMEM_HEAP_TYPE_KERNEL_MANAGED */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_KERNEL_MANAGED;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Reverting SVM heap back to kernel managed",
+ __func__));
+
+ OSLockRelease(psImport->sCPUImport.hLock);
+
+ /* Do we need a more specific error code here */
+ eError = PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED;
+ goto failSVM;
+ }
+
+ OSLockRelease(psImport->sCPUImport.hLock);
+
+ do
+ {
+ /* Next we proceed to instruct the kernel to use the RA_Alloc supplied
+ virtual address to map-in this SVM import suballocation; there is no
+ guarantee that this RA_Alloc virtual address may not collide with an
+ already in-use VMA range in the process */
+ eError = RA_Alloc(psHeap->psQuantizedVMRA,
+ psImport->uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* flags: this RA doesn't use flags*/
+ uiAlign,
+ "SVM_Virtual_Alloc",
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ NULL /* don't care about per-import priv data */);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot RA allocate SVM compatible address",
+ __func__));
+ goto failSVM;
+ }
+
+ /* No reason for allocated virtual size to be different from
+ the PMR's size */
+ psImport->sCPUImport.pvCPUVAddr = (void*)(uintptr_t)uiAllocatedAddr;
+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+ /* Map the import or allocation using the RA_Alloc virtual address;
+ the kernel may fail the request if the supplied virtual address
+ is already in-use in which case we re-try using another virtual
+ address obtained from the RA_Alloc */
+ eError = _DevmemImportStructCPUMap(psImport);
+ if (eError != PVRSRV_OK)
+ {
+ /* For now we simply discard failed RA_Alloc() obtained virtual
+ address (i.e. plenty of virtual space), this prevents us from
+ re-using these and furthermore essentially blacklists these
+ addresses from future SVM consideration; We exit fall-back
+ attempt if retry exceeds the fall-back retry limit */
+ if (uiRetry++ > DEVMEM_MAP_SVM_USER_MANAGED_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot find SVM compatible address, bad mapping",
+ __func__));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto failSVM;
+ }
+ }
+ else
+ {
+ /* Found compatible SVM virtual address, set as device virtual address */
+ ui64SvmMapAddr = (IMG_UINT64)(uintptr_t)psImport->sCPUImport.pvCPUVAddr;
+ }
+ } while (eError != PVRSRV_OK);
+
+ *ui64MapAddress = ui64SvmMapAddr;
+ failSVM:
+ return eError;
+}
+
+static inline void
+_DevmemCPUUnmapSVMUserManaged(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ RA_BASE_T uiAllocatedAddr;
+
+ /* We only free SVM compatible addresses, all addresses in
+ the blacklist are essentially excluded from future RA_Alloc */
+ uiAllocatedAddr = psImport->sDeviceImport.sDevVAddr.uiAddr;
+ RA_Free(psHeap->psQuantizedVMRA, uiAllocatedAddr);
+
+ _DevmemImportStructCPUUnmap(psImport);
+}
+
+static inline PVRSRV_ERROR
+_DevmemImportStructDevMapSVM(DEVMEM_HEAP *psHeap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT uiAlign,
+ IMG_UINT64 *ui64MapAddress)
+{
+ PVRSRV_ERROR eError;
+
+ switch(psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ eError = _DevmemCPUMapSVMKernelManaged(psHeap,
+ psImport,
+ ui64MapAddress);
+ if (eError == PVRSRV_ERROR_BAD_MAPPING)
+ {
+ /* If the SVM map address is outside of SVM heap limits,
+ change heap type to DEVMEM_HEAP_TYPE_USER_MANAGED */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Kernel managed SVM heap is now user managed",
+ __func__));
+
+ /* Retry using user managed fall-back approach */
+ eError = _DevmemCPUMapSVMUserManaged(psHeap,
+ psImport,
+ uiAlign,
+ ui64MapAddress);
+ }
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ eError = _DevmemCPUMapSVMUserManaged(psHeap,
+ psImport,
+ uiAlign,
+ ui64MapAddress);
+ break;
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ break;
+ }
+
+ return eError;
+}
+
+static inline void
+_DevmemImportStructDevUnmapSVM(DEVMEM_HEAP *psHeap, DEVMEM_IMPORT *psImport)
+{
+ switch(psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ _DevmemCPUUnmapSVMKernelManaged(psHeap, psImport);
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ _DevmemCPUUnmapSVMUserManaged(psHeap, psImport);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ The Devmem import structure is the structure we use
+ to manage memory that is "imported" (which is page
+ granular) from the server into our process, this
+ includes allocations.
+
+ This allows memory to be imported without requiring
+ any CPU or device mapping. Memory can then be mapped
+ into the device or CPU on demand, but neither is
+ required.
+ */
+
+IMG_INTERNAL
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psImport->hRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+ PVR_ASSERT(iRefCount != 1);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ iRefCount-1,
+ iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport)
+{
+ IMG_INT iRefCount = OSAtomicDecrement(&psImport->hRefCount);
+ PVR_ASSERT(iRefCount >= 0);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ iRefCount+1,
+ iRefCount);
+
+ if (iRefCount == 0)
+ {
+ BridgePMRUnrefPMR(psImport->hDevConnection,
+ psImport->hPMR);
+ OSLockDestroy(psImport->sCPUImport.hLock);
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+ OSLockDestroy(psImport->hLock);
+ OSFreeMem(psImport);
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport)
+{
+ PVR_ASSERT(OSAtomicRead(&psImport->hRefCount) == 0);
+ OSLockDestroy(psImport->sCPUImport.hLock);
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+ OSLockDestroy(psImport->hLock);
+ OSFreeMem(psImport);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc)
+{
+ DEVMEM_MEMDESC *psMemDesc;
+ PVRSRV_ERROR eError;
+
+ /* Must be zeroed in case it needs to be freed before it is initialised */
+ psMemDesc = OSAllocZMem(sizeof(DEVMEM_MEMDESC));
+ if (psMemDesc == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto failAlloc;
+ }
+
+ eError = OSLockCreate(&psMemDesc->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMDLock;
+ }
+
+ eError = OSLockCreate(&psMemDesc->sDeviceMemDesc.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDMDLock;
+ }
+
+ eError = OSLockCreate(&psMemDesc->sCPUMemDesc.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failCMDLock;
+ }
+
+ *ppsMemDesc = psMemDesc;
+
+ return PVRSRV_OK;
+
+ failCMDLock:
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+ failDMDLock:
+ OSLockDestroy(psMemDesc->hLock);
+ failMDLock:
+ OSFreeMem(psMemDesc);
+ failAlloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ Init the MemDesc structure
+ */
+IMG_INTERNAL
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize)
+{
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ 0,
+ 1);
+
+ psMemDesc->psImport = psImport;
+ psMemDesc->uiOffset = uiOffset;
+
+ psMemDesc->sDeviceMemDesc.ui32RefCount = 0;
+ psMemDesc->sCPUMemDesc.ui32RefCount = 0;
+ psMemDesc->uiAllocSize = uiSize;
+ psMemDesc->hPrivData = NULL;
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ psMemDesc->ui32AllocationIndex = DEVICEMEM_HISTORY_ALLOC_INDEX_NONE;
+#endif
+
+ OSAtomicWrite(&psMemDesc->hRefCount, 1);
+}
+
+IMG_INTERNAL
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc)
+{
+ IMG_INT iRefCount = 0;
+
+ iRefCount = OSAtomicIncrement(&psMemDesc->hRefCount);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ iRefCount-1,
+ iRefCount);
+}
+
+IMG_INTERNAL
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc)
+{
+ IMG_INT iRefCount;
+ PVR_ASSERT(psMemDesc != NULL);
+
+ iRefCount = OSAtomicDecrement(&psMemDesc->hRefCount);
+ PVR_ASSERT(iRefCount >= 0);
+
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psMemDesc,
+ iRefCount+1,
+ iRefCount);
+
+ if (iRefCount == 0)
+ {
+ if (psMemDesc->psImport->uiProperties & DEVMEM_PROPERTIES_SUBALLOCATABLE)
+ {
+ /* As soon as the first sub-allocation on the psImport is freed
+ * we might get dirty memory when reusing it.
+ * We have to delete the ZEROED, CLEAN & POISONED flag */
+
+ psMemDesc->psImport->uiProperties &=
+ ~(DEVMEM_PROPERTIES_IMPORT_IS_ZEROED |
+ DEVMEM_PROPERTIES_IMPORT_IS_CLEAN |
+ DEVMEM_PROPERTIES_IMPORT_IS_POISONED);
+
+ RA_Free(psMemDesc->psImport->sDeviceImport.psHeap->psSubAllocRA,
+ psMemDesc->psImport->sDeviceImport.sDevVAddr.uiAddr +
+ psMemDesc->uiOffset);
+ }
+ else
+ {
+ _DevmemImportStructRelease(psMemDesc->psImport);
+ }
+
+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+ OSLockDestroy(psMemDesc->hLock);
+ OSFreeMem(psMemDesc);
+
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+IMG_INTERNAL
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_ASSERT(OSAtomicRead(&psMemDesc->hRefCount) == 0);
+
+ OSLockDestroy(psMemDesc->sCPUMemDesc.hLock);
+ OSLockDestroy(psMemDesc->sDeviceMemDesc.hLock);
+ OSLockDestroy(psMemDesc->hLock);
+ OSFreeMem(psMemDesc);
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T *puiFlags)
+{
+ if ((*puiFlags & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) &&
+ (*puiFlags & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (uiAlign & (uiAlign-1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: The requested alignment is not a power of two.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (uiSize == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Please request a non-zero size value.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* If zero flag is set we have to have write access to the page. */
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(*puiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(*puiFlags))
+ {
+ (*puiFlags) |= PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ Allocate and init an import structure
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_IMPORT **ppsImport)
+{
+ DEVMEM_IMPORT *psImport;
+ PVRSRV_ERROR eError;
+
+ psImport = OSAllocMem(sizeof *psImport);
+ if (psImport == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Setup some known bad values for things we don't have yet */
+ psImport->sDeviceImport.hReservation = LACK_OF_RESERVATION_POISON;
+ psImport->sDeviceImport.hMapping = LACK_OF_MAPPING_POISON;
+ psImport->sDeviceImport.psHeap = NULL;
+ psImport->sDeviceImport.bMapped = IMG_FALSE;
+
+ eError = OSLockCreate(&psImport->sDeviceImport.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failDIOSLockCreate;
+ }
+
+ psImport->sCPUImport.hOSMMapData = NULL;
+ psImport->sCPUImport.pvCPUVAddr = NULL;
+
+ eError = OSLockCreate(&psImport->sCPUImport.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failCIOSLockCreate;
+ }
+
+ /* Set up common elements */
+ psImport->hDevConnection = hDevConnection;
+
+ /* Setup properties */
+ psImport->uiProperties = 0;
+
+ /* Setup refcounts */
+ psImport->sDeviceImport.ui32RefCount = 0;
+ psImport->sCPUImport.ui32RefCount = 0;
+ OSAtomicWrite(&psImport->hRefCount, 0);
+
+ /* Create the lock */
+ eError = OSLockCreate(&psImport->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto failILockAlloc;
+ }
+
+ *ppsImport = psImport;
+
+ return PVRSRV_OK;
+
+ failILockAlloc:
+ OSLockDestroy(psImport->sCPUImport.hLock);
+ failCIOSLockCreate:
+ OSLockDestroy(psImport->sDeviceImport.hLock);
+ failDIOSLockCreate:
+ OSFreeMem(psImport);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ Initialise the import structure
+ */
+IMG_INTERNAL
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ IMG_HANDLE hPMR,
+ DEVMEM_PROPERTIES_T uiProperties)
+{
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ 0,
+ 1);
+
+ psImport->uiSize = uiSize;
+ psImport->uiAlign = uiAlign;
+ psImport->uiFlags = uiFlags;
+ psImport->hPMR = hPMR;
+ psImport->uiProperties = uiProperties;
+ OSAtomicWrite(&psImport->hRefCount, 1);
+}
+
+/*
+ Map an import to the device
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+ IMG_BOOL bMap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 ui64OptionalMapAddress)
+{
+ DEVMEM_DEVICE_IMPORT *psDeviceImport;
+ RA_BASE_T uiAllocatedAddr;
+ RA_LENGTH_T uiAllocatedSize;
+ IMG_DEV_VIRTADDR sBase;
+ IMG_HANDLE hReservation;
+ PVRSRV_ERROR eError;
+ IMG_UINT uiAlign;
+ IMG_BOOL bDestroyed = IMG_FALSE;
+
+ /* Round the provided import alignment to the configured heap alignment */
+ uiAlign = 1ULL << psHeap->uiLog2ImportAlignment;
+ uiAlign = (psImport->uiAlign + uiAlign - 1) & ~(uiAlign-1);
+
+ psDeviceImport = &psImport->sDeviceImport;
+
+ OSLockAcquire(psDeviceImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psDeviceImport->ui32RefCount,
+ psDeviceImport->ui32RefCount+1);
+
+ if (psDeviceImport->ui32RefCount++ == 0)
+ {
+ _DevmemImportStructAcquire(psImport);
+
+ OSAtomicIncrement(&psHeap->hImportCount);
+
+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+ {
+ /* SVM (shared virtual memory) imports or allocations always
+ need to acquire CPU virtual address first as address is
+ used to map the allocation into the device virtual address
+ space; i.e. the virtual address of the allocation for both
+ the CPU/GPU must be identical. */
+ eError = _DevmemImportStructDevMapSVM(psHeap,
+ psImport,
+ uiAlign,
+ &ui64OptionalMapAddress);
+ if (eError != PVRSRV_OK)
+ {
+ goto failVMRAAlloc;
+ }
+ }
+
+ if (ui64OptionalMapAddress == 0)
+ {
+ if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ||
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_KERNEL_MANAGED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+ "%s: Heap is user managed, please use PVRSRVMapToDeviceAddress().":
+ "%s: Heap is kernel managed, use right allocation flags (e.g. SVM).",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_RA_MANAGED;
+
+ /* Allocate space in the VM */
+ eError = RA_Alloc(psHeap->psQuantizedVMRA,
+ psImport->uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* flags: this RA doesn't use flags*/
+ uiAlign,
+ "Virtual_Alloc",
+ &uiAllocatedAddr,
+ &uiAllocatedSize,
+ NULL /* don't care about per-import priv data */
+ );
+ if (PVRSRV_OK != eError)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM;
+ goto failVMRAAlloc;
+ }
+
+ /* No reason for the allocated virtual size to be different from
+ the PMR's size */
+ PVR_ASSERT(uiAllocatedSize == psImport->uiSize);
+
+ sBase.uiAddr = uiAllocatedAddr;
+
+ }
+ else
+ {
+ IMG_UINT64 uiHeapAddrEnd;
+
+ switch (psHeap->eHeapType)
+ {
+ case DEVMEM_HEAP_TYPE_UNKNOWN:
+ /* DEVMEM_HEAP_TYPE_USER_MANAGED can apply to _any_
+ heap and can only be determined here. This heap
+ type transitions from DEVMEM_HEAP_TYPE_UNKNOWN
+ to DEVMEM_HEAP_TYPE_USER_MANAGED on 1st alloc */
+ psHeap->eHeapType = DEVMEM_HEAP_TYPE_USER_MANAGED;
+ break;
+
+ case DEVMEM_HEAP_TYPE_USER_MANAGED:
+ case DEVMEM_HEAP_TYPE_KERNEL_MANAGED:
+ if (! psHeap->uiSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ psHeap->eHeapType == DEVMEM_HEAP_TYPE_USER_MANAGED ?
+ "%s: Heap DEVMEM_HEAP_TYPE_USER_MANAGED is disabled.":
+ "%s: Heap DEVMEM_HEAP_TYPE_KERNEL_MANAGED is disabled."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_HEAP;
+ goto failVMRAAlloc;
+ }
+ break;
+
+ case DEVMEM_HEAP_TYPE_RA_MANAGED:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This heap is managed by an RA, please use PVRSRVMapToDevice()"
+ " and don't use allocation flags that assume differently (e.g. SVM)."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+
+ default:
+ break;
+ }
+
+ /* Ensure supplied ui64OptionalMapAddress is within heap range */
+ uiHeapAddrEnd = psHeap->sBaseAddress.uiAddr + psHeap->uiSize;
+ if (ui64OptionalMapAddress >= uiHeapAddrEnd ||
+ ui64OptionalMapAddress + psImport->uiSize > uiHeapAddrEnd)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ui64OptionalMapAddress %p is outside of heap limits <%p:%p>."
+ , __func__
+ , (void*)(uintptr_t)ui64OptionalMapAddress
+ , (void*)(uintptr_t)psHeap->sBaseAddress.uiAddr
+ , (void*)(uintptr_t)uiHeapAddrEnd));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+
+ if (ui64OptionalMapAddress & ((1 << psHeap->uiLog2Quantum) - 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid address to map to. Please prove an address aligned to"
+ "a page multiple of the heap."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+
+ uiAllocatedAddr = ui64OptionalMapAddress;
+
+ if (psImport->uiSize & ((1 << psHeap->uiLog2Quantum) - 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid heap to map to. "
+ "Please choose a heap that can handle smaller page sizes."
+ , __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto failVMRAAlloc;
+ }
+ uiAllocatedSize = psImport->uiSize;
+ sBase.uiAddr = uiAllocatedAddr;
+ }
+
+ /* Setup page tables for the allocated VM space */
+ eError = BridgeDevmemIntReserveRange(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap,
+ sBase,
+ uiAllocatedSize,
+ &hReservation);
+ if (eError != PVRSRV_OK)
+ {
+ goto failReserve;
+ }
+
+ if (bMap)
+ {
+ DEVMEM_FLAGS_T uiMapFlags;
+
+ uiMapFlags = psImport->uiFlags & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK;
+
+ /* Actually map the PMR to allocated VM space */
+ eError = BridgeDevmemIntMapPMR(psHeap->psCtx->hDevConnection,
+ psHeap->hDevMemServerHeap,
+ hReservation,
+ psImport->hPMR,
+ uiMapFlags,
+ &psDeviceImport->hMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+ psDeviceImport->bMapped = IMG_TRUE;
+ }
+
+ /* Setup device mapping specific parts of the mapping info */
+ psDeviceImport->hReservation = hReservation;
+ psDeviceImport->sDevVAddr.uiAddr = uiAllocatedAddr;
+ psDeviceImport->psHeap = psHeap;
+ }
+ else
+ {
+ /*
+ Check that we've been asked to map it into the
+ same heap 2nd time around
+ */
+ if (psHeap != psDeviceImport->psHeap)
+ {
+ eError = PVRSRV_ERROR_INVALID_HEAP;
+ goto failParams;
+ }
+ }
+ OSLockRelease(psDeviceImport->hLock);
+
+ return PVRSRV_OK;
+
+ failMap:
+ BridgeDevmemIntUnreserveRange(psHeap->psCtx->hDevConnection,
+ hReservation);
+ failReserve:
+ if (ui64OptionalMapAddress == 0)
+ {
+ RA_Free(psHeap->psQuantizedVMRA,
+ uiAllocatedAddr);
+ }
+ failVMRAAlloc:
+ bDestroyed = _DevmemImportStructRelease(psImport);
+ OSAtomicDecrement(&psHeap->hImportCount);
+ failParams:
+ if (!bDestroyed)
+ {
+ psDeviceImport->ui32RefCount--;
+ OSLockRelease(psDeviceImport->hLock);
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ Unmap an import from the Device
+ */
+IMG_INTERNAL
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_DEVICE_IMPORT *psDeviceImport;
+
+ psDeviceImport = &psImport->sDeviceImport;
+
+ OSLockAcquire(psDeviceImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psDeviceImport->ui32RefCount,
+ psDeviceImport->ui32RefCount-1);
+
+ if (--psDeviceImport->ui32RefCount == 0)
+ {
+ DEVMEM_HEAP *psHeap = psDeviceImport->psHeap;
+
+ if (psDeviceImport->bMapped)
+ {
+ eError = BridgeDevmemIntUnmapPMR(psImport->hDevConnection,
+ psDeviceImport->hMapping);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ eError = BridgeDevmemIntUnreserveRange(psImport->hDevConnection,
+ psDeviceImport->hReservation);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDeviceImport->bMapped = IMG_FALSE;
+ psDeviceImport->hMapping = LACK_OF_MAPPING_POISON;
+ psDeviceImport->hReservation = LACK_OF_RESERVATION_POISON;
+
+ if (psHeap->eHeapType == DEVMEM_HEAP_TYPE_RA_MANAGED)
+ {
+ RA_Free(psHeap->psQuantizedVMRA,
+ psDeviceImport->sDevVAddr.uiAddr);
+ }
+
+ if (PVRSRV_CHECK_SVM_ALLOC(psImport->uiFlags))
+ {
+ _DevmemImportStructDevUnmapSVM(psHeap, psImport);
+ }
+
+ OSLockRelease(psDeviceImport->hLock);
+
+ _DevmemImportStructRelease(psImport);
+
+ OSAtomicDecrement(&psHeap->hImportCount);
+ }
+ else
+ {
+ OSLockRelease(psDeviceImport->hLock);
+ }
+}
+
+/*
+ Map an import into the CPU
+ */
+IMG_INTERNAL
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_CPU_IMPORT *psCPUImport;
+ size_t uiMappingLength;
+
+ psCPUImport = &psImport->sCPUImport;
+
+ OSLockAcquire(psCPUImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psCPUImport->ui32RefCount,
+ psCPUImport->ui32RefCount+1);
+
+ if (psCPUImport->ui32RefCount++ == 0)
+ {
+ _DevmemImportStructAcquire(psImport);
+
+ eError = OSMMapPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ psImport->uiSize,
+ psImport->uiFlags,
+ &psCPUImport->hOSMMapData,
+ &psCPUImport->pvCPUVAddr,
+ &uiMappingLength);
+ if (eError != PVRSRV_OK)
+ {
+ goto failMap;
+ }
+
+ /* MappingLength might be rounded up to page size */
+ PVR_ASSERT(uiMappingLength >= psImport->uiSize);
+ }
+ OSLockRelease(psCPUImport->hLock);
+
+ return PVRSRV_OK;
+
+ failMap:
+ psCPUImport->ui32RefCount--;
+ if (!_DevmemImportStructRelease(psImport))
+ {
+ OSLockRelease(psCPUImport->hLock);
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ Unmap an import from the CPU
+ */
+IMG_INTERNAL
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport)
+{
+ DEVMEM_CPU_IMPORT *psCPUImport;
+
+ psCPUImport = &psImport->sCPUImport;
+
+ OSLockAcquire(psCPUImport->hLock);
+ DEVMEM_REFCOUNT_PRINT("%s (%p) %d->%d",
+ __FUNCTION__,
+ psImport,
+ psCPUImport->ui32RefCount,
+ psCPUImport->ui32RefCount-1);
+
+ if (--psCPUImport->ui32RefCount == 0)
+ {
+ /* psImport->uiSize is a 64-bit quantity whereas the 5th
+ * argument to OSUnmapPMR is a 32-bit quantity on 32-bit systems
+ * hence a compiler warning of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+ PVR_ASSERT(psImport->uiSize<IMG_UINT32_MAX);
+#endif
+ OSMUnmapPMR(psImport->hDevConnection,
+ psImport->hPMR,
+ psCPUImport->hOSMMapData,
+ psCPUImport->pvCPUVAddr,
+ psImport->uiSize);
+
+ OSLockRelease(psCPUImport->hLock);
+
+ _DevmemImportStructRelease(psImport);
+ }
+ else
+ {
+ OSLockRelease(psCPUImport->hLock);
+ }
+}
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.h b/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.h
new file mode 100644
index 00000000000000..444430cfffe891
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicemem_utils.h
@@ -0,0 +1,457 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management internal utility functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEM_UTILS_H_
+#define _DEVICEMEM_UTILS_H_
+
+#include "devicemem.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "ra.h"
+#include "osfunc.h"
+#include "lock.h"
+#include "osmmap.h"
+#include "devicemem_utils.h"
+
+#define DEVMEM_HEAPNAME_MAXLENGTH 160
+
+#if defined(DEVMEM_DEBUG) && defined(REFCOUNT_DEBUG)
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_ERROR, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define DEVMEM_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* If we need a "hMapping" but we don't have a server-side mapping, we
+ poison the entry with this value so that it's easily recognised in
+ the debugger. Note that this is potentially a valid handle, but
+ then so is NULL, which is no better, indeed worse, as it's not
+ obvious in the debugger. The value doesn't matter. We _never_ use
+ it (and because it's valid, we never assert it isn't this) but it's
+ nice to have a value in the source code that we can grep for when
+ things go wrong. */
+#define LACK_OF_MAPPING_POISON ((IMG_HANDLE)0x6116dead)
+#define LACK_OF_RESERVATION_POISON ((IMG_HANDLE)0x7117dead)
+
+#define DEVICEMEM_HISTORY_ALLOC_INDEX_NONE 0xFFFFFFFF
+
+struct _DEVMEM_CONTEXT_ {
+
+ SHARED_DEV_CONNECTION hDevConnection;
+
+ /* Number of heaps that have been created in this context
+ (regardless of whether they have allocations) */
+ IMG_UINT32 uiNumHeaps;
+
+ /*
+ Each "DEVMEM_CONTEXT" has a counterpart in the server,
+ which is responsible for handling the mapping into device MMU.
+ We have a handle to that here.
+ */
+ IMG_HANDLE hDevMemServerContext;
+
+ /* Number of automagically created heaps in this context,
+ i.e. those that are born at context creation time from the
+ chosen "heap config" or "blueprint" */
+ IMG_UINT32 uiAutoHeapCount;
+
+ /* pointer to array of such heaps */
+ struct _DEVMEM_HEAP_ **ppsAutoHeapArray;
+
+ /* The cache line size for use when allocating memory, as it is not queryable on the client side */
+ IMG_UINT32 ui32CPUCacheLineSize;
+
+ /* Private data handle for device specific data */
+ IMG_HANDLE hPrivData;
+
+ /* Memory allocated to be used for MCU fences */
+ DEVMEM_MEMDESC *psMCUFenceMemDesc;
+};
+
+
+typedef enum
+{
+ DEVMEM_HEAP_TYPE_UNKNOWN = 0,
+ DEVMEM_HEAP_TYPE_USER_MANAGED,
+ DEVMEM_HEAP_TYPE_KERNEL_MANAGED,
+ DEVMEM_HEAP_TYPE_RA_MANAGED,
+}DEVMEM_HEAP_TYPE;
+
+struct _DEVMEM_HEAP_ {
+ /* Name of heap - for debug and lookup purposes. */
+ IMG_CHAR *pszName;
+
+ /* Number of live imports in the heap */
+ ATOMIC_T hImportCount;
+
+ /*
+ * Base address and size of heap, required by clients due to some requesters
+ * not being full range
+ */
+ IMG_DEV_VIRTADDR sBaseAddress;
+ DEVMEM_SIZE_T uiSize;
+
+ /* The heap type, describing if the space is managed by the user or an RA*/
+ DEVMEM_HEAP_TYPE eHeapType;
+
+ /* This RA is for managing sub-allocations in virtual space. Two
+ more RA's will be used under the Hood for managing the coarser
+ allocation of virtual space from the heap, and also for
+ managing the physical backing storage. */
+ RA_ARENA *psSubAllocRA;
+ IMG_CHAR *pszSubAllocRAName;
+ /*
+ This RA is for the coarse allocation of virtual space from the heap
+ */
+ RA_ARENA *psQuantizedVMRA;
+ IMG_CHAR *pszQuantizedVMRAName;
+
+ /* We also need to store a copy of the quantum size in order to
+ feed this down to the server */
+ IMG_UINT32 uiLog2Quantum;
+
+ /* Store a copy of the minimum import alignment */
+ IMG_UINT32 uiLog2ImportAlignment;
+
+ /* The relationship between tiled heap alignment and heap byte-stride
+ * (dependent on tiling mode, abstracted here) */
+ IMG_UINT32 uiLog2TilingStrideFactor;
+
+ /* The parent memory context for this heap */
+ struct _DEVMEM_CONTEXT_ *psCtx;
+
+ /* Lock to protect this structure */
+ POS_LOCK hLock;
+
+ /*
+ Each "DEVMEM_HEAP" has a counterpart in the server,
+ which is responsible for handling the mapping into device MMU.
+ We have a handle to that here.
+ */
+ IMG_HANDLE hDevMemServerHeap;
+};
+
+typedef IMG_UINT32 DEVMEM_PROPERTIES_T; /*!< Typedef for Devicemem properties */
+#define DEVMEM_PROPERTIES_EXPORTABLE (1UL<<0) /*!< Is it exportable? */
+#define DEVMEM_PROPERTIES_IMPORTED (1UL<<1) /*!< Is it imported from another process? */
+#define DEVMEM_PROPERTIES_SUBALLOCATABLE (1UL<<2) /*!< Is it suballocatable? */
+#define DEVMEM_PROPERTIES_UNPINNED (1UL<<3) /*!< Is it currently pinned? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_ZEROED (1UL<<4) /*!< Is the memory fully zeroed? */
+#define DEVMEM_PROPERTIES_IMPORT_IS_CLEAN (1UL<<5) /*!< Is the memory clean, i.e. not been used before? */
+#define DEVMEM_PROPERTIES_SECURE (1UL<<6) /*!< Is it a special secure buffer? No CPU maps allowed! */
+#define DEVMEM_PROPERTIES_IMPORT_IS_POISONED (1UL<<7) /*!< Is the memory fully poisoned? */
+
+
+typedef struct _DEVMEM_DEVICE_IMPORT_ {
+ DEVMEM_HEAP *psHeap; /*!< Heap this import is bound to */
+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */
+ IMG_HANDLE hReservation; /*!< Device memory reservation handle */
+ IMG_HANDLE hMapping; /*!< Device mapping handle */
+ IMG_BOOL bMapped; /*!< This is import mapped? */
+ POS_LOCK hLock; /*!< Lock to protect the device import */
+} DEVMEM_DEVICE_IMPORT;
+
+typedef struct _DEVMEM_CPU_IMPORT_ {
+ void *pvCPUVAddr; /*!< CPU virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the CPU virtual address */
+ IMG_HANDLE hOSMMapData; /*!< CPU mapping handle */
+ POS_LOCK hLock; /*!< Lock to protect the CPU import */
+} DEVMEM_CPU_IMPORT;
+
+typedef struct _DEVMEM_IMPORT_ {
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_DEVMEM_ALIGN_T uiAlign; /*!< Alignment of the PMR */
+ DEVMEM_SIZE_T uiSize; /*!< Size of import */
+ ATOMIC_T hRefCount; /*!< Refcount for this import */
+ DEVMEM_PROPERTIES_T uiProperties; /*!< Stores properties of an import like if
+ it is exportable, pinned or suballocatable */
+ IMG_HANDLE hPMR; /*!< Handle to the PMR */
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ POS_LOCK hLock; /*!< Lock to protect the import */
+
+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the import */
+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the import */
+} DEVMEM_IMPORT;
+
+typedef struct _DEVMEM_DEVICE_MEMDESC_ {
+ IMG_DEV_VIRTADDR sDevVAddr; /*!< Device virtual address of the allocation */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device virtual address */
+ POS_LOCK hLock; /*!< Lock to protect device memdesc */
+} DEVMEM_DEVICE_MEMDESC;
+
+typedef struct _DEVMEM_CPU_MEMDESC_ {
+ void *pvCPUVAddr; /*!< CPU virtual address of the import */
+ IMG_UINT32 ui32RefCount; /*!< Refcount of the device CPU address */
+ POS_LOCK hLock; /*!< Lock to protect CPU memdesc */
+} DEVMEM_CPU_MEMDESC;
+
+struct _DEVMEM_MEMDESC_ {
+ DEVMEM_IMPORT *psImport; /*!< Import this memdesc is on */
+ IMG_DEVMEM_OFFSET_T uiOffset; /*!< Offset into import where our allocation starts */
+ IMG_DEVMEM_SIZE_T uiAllocSize; /*!< Size of the allocation */
+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */
+ POS_LOCK hLock; /*!< Lock to protect memdesc */
+ IMG_HANDLE hPrivData;
+
+ DEVMEM_DEVICE_MEMDESC sDeviceMemDesc; /*!< Device specifics of the memdesc */
+ DEVMEM_CPU_MEMDESC sCPUMemDesc; /*!< CPU specifics of the memdesc */
+
+ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this memdesc */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ IMG_UINT32 ui32AllocationIndex;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */
+#endif
+};
+
+/* The physical descriptor used to store handles and information of
+ * device physical allocations. */
+struct _DEVMEMX_PHYS_MEMDESC_ {
+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/
+ IMG_UINT32 uiLog2PageSize; /*!< Page size */
+ ATOMIC_T hRefCount; /*!< Refcount of the memdesc */
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ IMG_HANDLE hPMR; /*!< Handle to the PMR */
+ DEVMEM_CPU_IMPORT sCPUImport; /*!< CPU specifics of the memdesc */
+ DEVMEM_BRIDGE_HANDLE hBridge; /*!< Bridge connection for the server */
+};
+
+/* The virtual descriptor used to store handles and information of a
+ * device virtual range and the mappings to it. */
+struct _DEVMEMX_VIRT_MEMDESC_ {
+ IMG_UINT32 uiNumPages; /*!< Number of pages that the import has*/
+ DEVMEM_FLAGS_T uiFlags; /*!< Flags for this import */
+ DEVMEMX_PHYSDESC **apsPhysDescTable; /*!< Table to store links to physical descs */
+ DEVMEM_DEVICE_IMPORT sDeviceImport; /*!< Device specifics of the memdesc */
+
+ IMG_CHAR szText[DEVMEM_ANNOTATION_MAX_LEN]; /*!< Annotation for this virt memdesc */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ IMG_UINT32 ui32AllocationIndex; /*!< To track mappings in this range */
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ IMG_HANDLE hRIHandle; /*!< Handle to RI information */
+#endif
+};
+
+#define DEVICEMEM_UTILS_NO_ADDRESS 0
+
+/******************************************************************************
+@Function _DevmemValidateParams
+@Description Check if flags are conflicting and if align is a size multiple.
+
+@Input uiSize Size of the import.
+@Input uiAlign Alignment of the import.
+@Input puiFlags Pointer to the flags for the import.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemValidateParams(IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T *puiFlags);
+
+/******************************************************************************
+@Function _DevmemImportStructAlloc
+@Description Allocates memory for an import struct. Does not allocate a PMR!
+ Create locks for CPU and Devmem mappings.
+
+@Input hBridge Bridge to use for calls from the import.
+@Input ppsImport The import to allocate.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ DEVMEM_IMPORT **ppsImport);
+
+/******************************************************************************
+@Function _DevmemImportStructInit
+@Description Initialises the import struct with the given parameters.
+ Set it's refcount to 1!
+
+@Input psImport The import to initialise.
+@Input uiSize Size of the import.
+@Input uiAlign Alignment of allocations in the import.
+@Input uiMapFlags
+@Input hPMR Reference to the PMR of this import struct.
+@Input uiProperties Properties of the import. Is it exportable,
+ imported, suballocatable, unpinned?
+******************************************************************************/
+void _DevmemImportStructInit(DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ IMG_HANDLE hPMR,
+ DEVMEM_PROPERTIES_T uiProperties);
+
+/******************************************************************************
+@Function _DevmemImportStructDevMap
+@Description NEVER call after the last _DevmemMemDescRelease()
+ Maps the PMR referenced by the import struct to the device's
+ virtual address space.
+ Does nothing but increase the cpu mapping refcount if the
+ import struct was already mapped.
+
+@Input psHeap The heap to map to.
+@Input bMap Caller can choose if the import should be really
+ mapped in the page tables or if just a virtual range
+ should be reserved and the refcounts increased.
+@Input psImport The import we want to map.
+@Input uiOptionalMapAddress An optional address to map to.
+ Pass DEVICEMEM_UTILS_NOADDRESS if not used.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructDevMap(DEVMEM_HEAP *psHeap,
+ IMG_BOOL bMap,
+ DEVMEM_IMPORT *psImport,
+ IMG_UINT64 uiOptionalMapAddress);
+
+/******************************************************************************
+@Function _DevmemImportStructDevUnmap
+@Description Unmaps the PMR referenced by the import struct from the
+ device's virtual address space.
+ If this was not the last remaining CPU mapping on the import
+ struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructDevUnmap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructCPUMap
+@Description NEVER call after the last _DevmemMemDescRelease()
+ Maps the PMR referenced by the import struct to the CPU's
+ virtual address space.
+ Does nothing but increase the cpu mapping refcount if the
+ import struct was already mapped.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemImportStructCPUMap(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructCPUUnmap
+@Description Unmaps the PMR referenced by the import struct from the CPU's
+ virtual address space.
+ If this was not the last remaining CPU mapping on the import
+ struct only the cpu mapping refcount is decreased.
+******************************************************************************/
+void _DevmemImportStructCPUUnmap(DEVMEM_IMPORT *psImport);
+
+
+/******************************************************************************
+@Function _DevmemImportStructAcquire
+@Description Acquire an import struct by increasing it's refcount.
+******************************************************************************/
+void _DevmemImportStructAcquire(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportStructRelease
+@Description Reduces the refcount of the import struct.
+ Destroys the import in the case it was the last reference.
+ Destroys underlying PMR if this import was the last reference
+ to it.
+@return A boolean to signal if the import was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemImportStructRelease(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemImportDiscard
+@Description Discard a created, but unitilised import structure.
+ This must only be called before _DevmemImportStructInit
+ after which _DevmemImportStructRelease must be used to
+ "free" the import structure.
+******************************************************************************/
+void _DevmemImportDiscard(DEVMEM_IMPORT *psImport);
+
+/******************************************************************************
+@Function _DevmemMemDescAlloc
+@Description Allocates a MemDesc and create it's various locks.
+ Zero the allocated memory.
+@return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR _DevmemMemDescAlloc(DEVMEM_MEMDESC **ppsMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescInit
+@Description Sets the given offset and import struct fields in the MemDesc.
+ Initialises refcount to 1 and other values to 0.
+
+@Input psMemDesc MemDesc to initialise.
+@Input uiOffset Offset in the import structure.
+@Input psImport Import the MemDesc is on.
+@Input uiAllocSize Size of the allocation
+******************************************************************************/
+void _DevmemMemDescInit(DEVMEM_MEMDESC *psMemDesc,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ DEVMEM_IMPORT *psImport,
+ IMG_DEVMEM_SIZE_T uiAllocSize);
+
+/******************************************************************************
+@Function _DevmemMemDescAcquire
+@Description Acquires the MemDesc by increasing it's refcount.
+******************************************************************************/
+void _DevmemMemDescAcquire(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescRelease
+@Description Releases the MemDesc by reducing it's refcount.
+ Destroy the MemDesc if it's recount is 0.
+ Destroy the import struct the MemDesc is on if that was the
+ last MemDesc on the import, probably following the destruction
+ of the underlying PMR.
+@return A boolean to signal if the MemDesc was destroyed. True = yes.
+******************************************************************************/
+IMG_BOOL _DevmemMemDescRelease(DEVMEM_MEMDESC *psMemDesc);
+
+/******************************************************************************
+@Function _DevmemMemDescDiscard
+@Description Discard a created, but unitilised MemDesc structure.
+ This must only be called before _DevmemMemDescInit
+ after which _DevmemMemDescRelease must be used to
+ "free" the MemDesc structure.
+******************************************************************************/
+void _DevmemMemDescDiscard(DEVMEM_MEMDESC *psMemDesc);
+
+#endif /* _DEVICEMEM_UTILS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicememx.h b/drivers/gpu/drm/img-rogue/1.10/devicememx.h
new file mode 100644
index 00000000000000..b0646ccc45e8e5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicememx.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File
+@Title X Device Memory Management core internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface for extended device memory management.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef DEVICEMEMX_H
+#define DEVICEMEMX_H
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "devicemem_utils.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "osfunc.h"
+
+/* DevmemXAllocPhysical()
+ *
+ * Allocate physical device memory and return a physical
+ * descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocPhysical(DEVMEM_CONTEXT *psCtx,
+ IMG_UINT32 uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+/* DevmemXReleasePhysical()
+ *
+ * Removes a physical device allocation if all references
+ * to it are dropped, otherwise just decreases the refcount.
+ */
+void
+DevmemXReleasePhysical(DEVMEMX_PHYSDESC *psPhysDesc);
+
+/* DevmemAllocVirtual()
+ *
+ * Allocate and reserve a device virtual range and return
+ * a virtual descriptor for it.
+ */
+PVRSRV_ERROR
+DevmemXAllocVirtual(DEVMEM_HEAP* hHeap,
+ IMG_UINT32 uiNumPages,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEMX_VIRTDESC **ppsVirtDesc,
+ IMG_DEV_VIRTADDR *psVirtAddr);
+
+/* DevmemXFreeVirtual()
+ *
+ * Removes a device virtual range if all mappings on it
+ * have been removed.
+ */
+PVRSRV_ERROR
+DevmemXFreeVirtual(DEVMEMX_VIRTDESC *psVirtDesc);
+
+/* DevmemXMapVirtualRange()
+ *
+ * Map memory from a physical descriptor into a
+ * virtual range.
+ */
+PVRSRV_ERROR
+DevmemXMapVirtualRange(IMG_UINT32 ui32PageCount,
+ DEVMEMX_PHYSDESC *psPhysDesc,
+ IMG_UINT32 ui32PhysOffset,
+ DEVMEMX_VIRTDESC *psVirtDesc,
+ IMG_UINT32 ui32VirtOffset);
+
+/* DevmemXUnmapVirtualRange()
+ *
+ * Unmap pages from a device virtual range.
+ */
+PVRSRV_ERROR
+DevmemXUnmapVirtualRange(IMG_UINT32 ui32PageCount,
+ DEVMEMX_VIRTDESC *psVirtDesc,
+ IMG_UINT32 ui32VirtPgOffset);
+
+/* DevmemXMapPhysicalToCPU()
+ *
+ * Map a full physical descriptor to CPU space.
+ */
+PVRSRV_ERROR
+DevmemXMapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys,
+ IMG_CPU_VIRTADDR *psVirtAddr);
+
+/* DevmemXUnmapPhysicalToCPU()
+ *
+ * Remove the CPU mapping from the descriptor.
+ */
+PVRSRV_ERROR
+DevmemXUnmapPhysicalToCPU(DEVMEMX_PHYSDESC *psMemAllocPhys);
+
+/* DevmemXCreateDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Create a devmem memdesc from a physical and
+ * virtual descriptor.
+ * Always destroy with DevmemFreePhysVirtMemDesc().
+ */
+
+PVRSRV_ERROR
+DevmemXCreateDevmemMemDesc(const IMG_DEV_VIRTADDR sVirtualAddress,
+ DEVMEM_MEMDESC **ppsMemDesc);
+
+/* DevmemXFreeDevmemMemDesc()
+ *
+ * DEPRICATED!
+ * DO NOT USE IN PRODUCTION DRIVER!
+ *
+ * Free the memdesc again. Has no impact on the underlying
+ * physical and virtual descriptors.
+ */
+PVRSRV_ERROR
+DevmemXFreeDevmemMemDesc(DEVMEM_MEMDESC *psMemDesc);
+
+PVRSRV_ERROR
+_DevmemXFlagCompatibilityCheck(IMG_UINT32 uiPhysFlags,
+ IMG_UINT32 uiVirtFlags);
+
+PVRSRV_ERROR
+_DevmemXPhysDescAlloc(DEVMEMX_PHYSDESC **ppsPhysDesc);
+
+void
+_DevmemXPhysDescInit(DEVMEMX_PHYSDESC *psPhysDesc,
+ IMG_HANDLE hPMR,
+ IMG_UINT32 uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_HANDLE hBridge);
+
+void
+_DevmemXPhysDescFree(DEVMEMX_PHYSDESC *psPhysDesc);
+
+#endif /* DEVICEMEMX_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.c b/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.c
new file mode 100644
index 00000000000000..42db38036f1db0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.c
@@ -0,0 +1,79 @@
+/*************************************************************************/ /*!
+@File
+@Title Shared X device memory management PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements common (client & server) PDump functions for the
+ memory management code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if defined(PDUMP)
+
+#include "devicememx_pdump.h"
+#include "pdump.h"
+#include "client_pdumpmm_bridge.h"
+#include "devicemem_utils.h"
+
+IMG_INTERNAL void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(uiSize != 0);
+ PVR_ASSERT(uiOffset + uiSize <= (psMemDescPhys->uiNumPages << psMemDescPhys->uiLog2PageSize));
+
+ eError = BridgePMRPDumpLoadMem(psMemDescPhys->hBridge,
+ psMemDescPhys->hPMR,
+ uiOffset,
+ uiSize,
+ uiPDumpFlags,
+ IMG_FALSE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+}
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.h b/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.h
new file mode 100644
index 00000000000000..81743edf08f59c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/devicememx_pdump.h
@@ -0,0 +1,81 @@
+/*************************************************************************/ /*!
+@File
+@Title X Device Memory Management PDump internal
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal interface to PDump device memory management
+ functions that are shared between client and server code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DEVICEMEMX_PDUMP_H_
+#define _DEVICEMEMX_PDUMP_H_
+
+#include "devicememx.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+
+#if defined(PDUMP)
+/*
+ * PVRSRVDevMemXPDumpLoadMem()
+ *
+ * Same as DevmemPDumpLoadMem().
+ */
+extern void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVDevMemXPDumpLoadMem)
+#endif
+
+static INLINE void
+DevmemXPDumpLoadMem(DEVMEMX_PHYSDESC *psMemDescPhys,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMemDescPhys);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+}
+#endif /* PDUMP */
+#endif /* _DEVICEMEMX_PDUMP_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/dllist.h b/drivers/gpu/drm/img-rogue/1.10/dllist.h
new file mode 100644
index 00000000000000..38f9b6f0c7e12a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dllist.h
@@ -0,0 +1,278 @@
+/*************************************************************************/ /*!
+@File
+@Title Double linked list header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Double linked list interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DLLIST_
+#define _DLLIST_
+
+#include "img_types.h"
+
+/*!
+ Pointer to a linked list node
+*/
+typedef struct _DLLIST_NODE_ *PDLLIST_NODE;
+
+
+/*!
+ Node in a linked list
+*/
+/*
+ * Note: the following structure's size is architecture-dependent and
+ * clients may need to create a mirror the structure definition if it needs
+ * to be used in a structure shared between host and device. Consider such
+ * clients if any changes are made to this structure.
+ */
+typedef struct _DLLIST_NODE_
+{
+ struct _DLLIST_NODE_ *psPrevNode;
+ struct _DLLIST_NODE_ *psNextNode;
+} DLLIST_NODE;
+
+
+/*!
+ Static initialiser
+*/
+#define DECLARE_DLLIST(n) \
+DLLIST_NODE n = {&n, &n}
+
+
+/*************************************************************************/ /*!
+@Function dllist_init
+
+@Description Initialize a new double linked list
+
+@Input psListHead List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_init(PDLLIST_NODE psListHead)
+{
+ psListHead->psPrevNode = psListHead;
+ psListHead->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_is_empty
+
+@Description Returns whether the list is empty
+
+@Input psListHead List head Node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_is_empty(PDLLIST_NODE psListHead)
+{
+ return (IMG_BOOL) ((psListHead->psPrevNode == psListHead)
+ && (psListHead->psNextNode == psListHead));
+}
+
+/*************************************************************************/ /*!
+@Function dllist_add_to_head
+
+@Description Add psNewNode to head of list psListHead
+
+@Input psListHead Head Node
+@Input psNewNode New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_head(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+ PDLLIST_NODE psTmp;
+
+ psTmp = psListHead->psNextNode;
+
+ psListHead->psNextNode = psNewNode;
+ psNewNode->psNextNode = psTmp;
+
+ psTmp->psPrevNode = psNewNode;
+ psNewNode->psPrevNode = psListHead;
+}
+
+
+/*************************************************************************/ /*!
+@Function dllist_add_to_tail
+
+@Description Add psNewNode to tail of list psListHead
+
+@Input psListHead Head Node
+@Input psNewNode New Node
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_add_to_tail(PDLLIST_NODE psListHead, PDLLIST_NODE psNewNode)
+{
+ PDLLIST_NODE psTmp;
+
+ psTmp = psListHead->psPrevNode;
+
+ psListHead->psPrevNode = psNewNode;
+ psNewNode->psPrevNode = psTmp;
+
+ psTmp->psNextNode = psNewNode;
+ psNewNode->psNextNode = psListHead;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_node_is_in_list
+
+@Description Returns IMG_TRUE if psNode is in a list
+
+@Input psNode List node
+
+*/
+/*****************************************************************************/
+static INLINE
+IMG_BOOL dllist_node_is_in_list(PDLLIST_NODE psNode)
+{
+ return (IMG_BOOL) (psNode->psNextNode != NULL);
+}
+
+/*************************************************************************/ /*!
+@Function dllist_get_next_node
+
+@Description Returns the list node after psListHead or NULL psListHead
+ is the only element in the list.
+
+@Input psListHead List node to start the operation
+
+*/
+/*****************************************************************************/
+static INLINE
+PDLLIST_NODE dllist_get_next_node(PDLLIST_NODE psListHead)
+{
+ if (psListHead->psNextNode == psListHead)
+ {
+ return NULL;
+ }
+ else
+ {
+ return psListHead->psNextNode;
+ }
+}
+
+
+/*************************************************************************/ /*!
+@Function dllist_remove_node
+
+@Description Removes psListNode from the list where it currently belongs
+
+@Input psListNode List node to be removed
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_remove_node(PDLLIST_NODE psListNode)
+{
+ psListNode->psNextNode->psPrevNode = psListNode->psPrevNode;
+ psListNode->psPrevNode->psNextNode = psListNode->psNextNode;
+
+ /* Clear the node to show it's not on a list */
+ psListNode->psPrevNode = NULL;
+ psListNode->psNextNode = NULL;
+}
+
+/*************************************************************************/ /*!
+@Function dllist_replace_head
+
+@Description Moves the list from psOldHead to psNewHead
+
+@Input psOldHead List node to be replaced. Will become a head
+ node of an empty list.
+@Input psNewHead List node to be inserted. Must be an empty list
+ head.
+
+*/
+/*****************************************************************************/
+static INLINE
+void dllist_replace_head(PDLLIST_NODE psOldHead, PDLLIST_NODE psNewHead)
+{
+ if (dllist_is_empty(psOldHead))
+ {
+ psNewHead->psNextNode = psNewHead;
+ psNewHead->psPrevNode = psNewHead;
+ }
+ else
+ {
+ /* Change the neighbouring nodes */
+ psOldHead->psNextNode->psPrevNode = psNewHead;
+ psOldHead->psPrevNode->psNextNode = psNewHead;
+
+ /* Copy the old data to the new node */
+ psNewHead->psNextNode = psOldHead->psNextNode;
+ psNewHead->psPrevNode = psOldHead->psPrevNode;
+
+ /* Remove links to the previous list */
+ psOldHead->psNextNode = psOldHead;
+ psOldHead->psPrevNode = psOldHead;
+ }
+
+
+}
+
+/*************************************************************************/ /*!
+@Function dllist_foreach_node
+
+@Description Walk through all the nodes on the list.
+ Safe against removal of (node).
+
+@Input list_head List node to start the operation
+@Input node Current list node
+@Input next Node after the current one
+
+*/
+/*****************************************************************************/
+#define dllist_foreach_node(list_head, node, next) \
+ for (node = (list_head)->psNextNode, next = (node)->psNextNode; \
+ node != (list_head); \
+ node = next, next = (node)->psNextNode)
+
+#define dllist_foreach_node_backwards(list_head, node, prev) \
+ for (node = (list_head)->psPrevNode, prev = (node)->psPrevNode; \
+ node != (list_head); \
+ node = prev, prev = (node)->psPrevNode)
+
+#endif /* _DLLIST_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/dma_fence_sync_native_server.c b/drivers/gpu/drm/img-rogue/1.10/dma_fence_sync_native_server.c
new file mode 100644
index 00000000000000..b6209b7a08123f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/dma_fence_sync_native_server.c
@@ -0,0 +1,188 @@
+/*************************************************************************/ /*!
+@File sync_native_server.c
+@Title Native implementation of server fence sync interface.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description The server implementation of software native synchronisation.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "osfunc.h"
+#include "rgxhwperf.h"
+#include "pvrsrv_sync_server.h"
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/sync_file.h>
+#include <linux/version.h>
+
+#include "kernel_compatibility.h"
+
+#include "pvr_sync.h"
+#include "pvr_counting_timeline.h"
+
+PVRSRV_ERROR SyncSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline,
+ IMG_UINT32 ui32NextSyncPtValue,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_FENCE *piOutputFence)
+{
+ PVRSRV_ERROR eError;
+ struct pvr_counting_fence_timeline *psSWTimeline;
+ struct dma_fence *psFence = NULL;
+ struct sync_file *psSyncFile = NULL;
+ int iFd = get_unused_fd_flags(O_CLOEXEC);
+
+ if (iFd < 0)
+ {
+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ goto ErrorOut;
+ }
+
+ psSWTimeline = pvr_sync_get_sw_timeline(iSWTimeline);
+ if (!psSWTimeline)
+ {
+ /* unrecognised timeline */
+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ goto ErrorPutFd;
+ }
+
+ psFence = pvr_counting_fence_create(psSWTimeline, ui32NextSyncPtValue);
+ pvr_counting_fence_timeline_put(psSWTimeline);
+ if(!psFence)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorPutFd;
+ }
+
+ psSyncFile = sync_file_create(psFence);
+ if (!psSyncFile)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorPutFence;
+ }
+
+ fd_install(iFd, psSyncFile->file);
+
+ *piOutputFence = (PVRSRV_FENCE)iFd;
+ return PVRSRV_OK;
+
+ErrorPutFence:
+ dma_fence_put(psFence);
+ErrorPutFd:
+ put_unused_fd(iFd);
+ErrorOut:
+ return eError;
+}
+
+PVRSRV_ERROR SyncSWTimelineAdvanceKM(SYNC_TIMELINE_OBJ pvSWTimelineObj)
+{
+ if (pvSWTimelineObj == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ pvr_counting_fence_timeline_inc(pvSWTimelineObj, 1);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncSWTimelineReleaseKM(SYNC_TIMELINE_OBJ pvSWTimelineObj)
+{
+ if (pvSWTimelineObj == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ pvr_counting_fence_timeline_put(pvSWTimelineObj);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncSWTimelineFenceReleaseKM(SYNC_FENCE_OBJ pvSWFenceObj)
+{
+ dma_fence_put(pvSWFenceObj);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncSWTimelineFenceWaitKM(SYNC_FENCE_OBJ pvSWFenceObj,
+ IMG_UINT32 uiTimeout)
+{
+ long lJiffies = msecs_to_jiffies(uiTimeout);
+ int err;
+
+ err = dma_fence_wait_timeout(pvSWFenceObj, true, lJiffies);
+ /* dma_fence_wait_timeout returns:
+ * 0 on timeout,
+ * -ERESTARTSYS if interrupted
+ * or the 'remaining timeout' on success*/
+ if (err == 0)
+ {
+#if 0
+ _DumpFence("sync_fence_wait", pvSWFenceObj, NULL, NULL);
+#endif
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ else if (err < 0)
+ {
+ return PVRSRV_ERROR_FAILED_DEPENDENCIES;
+ }
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+ SYNC_TIMELINE_OBJ *ppvSWTimelineObj)
+{
+ struct pvr_counting_fence_timeline *timeline =
+ pvr_sync_get_sw_timeline(iSWTimeline);
+
+ if (timeline == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *ppvSWTimelineObj = timeline;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SyncSWGetFenceObj(PVRSRV_FENCE iSWFence,
+ SYNC_FENCE_OBJ *ppvSWFenceObj)
+{
+ struct dma_fence *psFence = sync_file_get_fence(iSWFence);
+
+ if(psFence == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ *ppvSWFenceObj = (SYNC_FENCE_OBJ*)psFence;
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/env_connection.h b/drivers/gpu/drm/img-rogue/1.10/env_connection.h
new file mode 100644
index 00000000000000..abaa9d96d9c128
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/env_connection.h
@@ -0,0 +1,93 @@
+/*************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux specific server side connection management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_ENV_CONNECTION_H_)
+#define _ENV_CONNECTION_H_
+
+#include <linux/version.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "handle.h"
+#include "pvr_debug.h"
+#include "device.h"
+
+#if defined(SUPPORT_ION)
+#include PVR_ANDROID_ION_HEADER
+#include "ion_sys.h"
+#include "allocmem.h"
+#endif
+
+typedef struct _ENV_CONNECTION_PRIVATE_DATA_
+{
+ struct file *psFile;
+ PVRSRV_DEVICE_NODE *psDevNode;
+} ENV_CONNECTION_PRIVATE_DATA;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+#define ION_CLIENT_NAME_SIZE 50
+
+typedef struct _ENV_ION_CONNECTION_DATA_
+{
+ IMG_CHAR azIonClientName[ION_CLIENT_NAME_SIZE];
+ struct ion_device *psIonDev;
+ struct ion_client *psIonClient;
+} ENV_ION_CONNECTION_DATA;
+#endif
+
+typedef struct _ENV_CONNECTION_DATA_
+{
+ pid_t owner;
+
+ struct file *psFile;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ ENV_ION_CONNECTION_DATA *psIonData;
+#endif
+#if defined(SUPPORT_DRM_EXT)
+ void *pPriv;
+#endif
+} ENV_CONNECTION_DATA;
+
+#endif /* !defined(_ENV_CONNECTION_H_) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/event.c b/drivers/gpu/drm/img-rogue/1.10/event.c
new file mode 100644
index 00000000000000..4c4b1137e29f3d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/event.c
@@ -0,0 +1,436 @@
+/*************************************************************************/ /*!
+@File
+@Title Event Object
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <asm/hardirq.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/freezer.h>
+#include <linux/uaccess.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "event.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "pvr_bridge_k.h"
+
+#include "osfunc.h"
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/* Returns pointer to task_struct that belongs to thread which acquired
+ * bridge lock. */
+extern struct task_struct *BridgeLockGetOwner(void);
+extern IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_LIST_TAG
+{
+ rwlock_t sLock;
+ struct list_head sList;
+
+} PVRSRV_LINUX_EVENT_OBJECT_LIST;
+
+
+typedef struct PVRSRV_LINUX_EVENT_OBJECT_TAG
+{
+ atomic_t sTimeStamp;
+ IMG_UINT32 ui32TimeStampPrevious;
+#if defined(DEBUG)
+ IMG_UINT ui32Stats;
+#endif
+ wait_queue_head_t sWait;
+ struct list_head sList;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList;
+} PVRSRV_LINUX_EVENT_OBJECT;
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectListCreate
+
+ @Description
+
+ Linux wait object list creation
+
+ @Output hOSEventKM : Pointer to the event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList;
+
+ psEvenObjectList = OSAllocMem(sizeof(*psEvenObjectList));
+ if (psEvenObjectList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectCreate: failed to allocate memory for event list"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psEvenObjectList->sList);
+
+ rwlock_init(&psEvenObjectList->sLock);
+
+ *phEventObjectList = (IMG_HANDLE *) psEvenObjectList;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectListDestroy
+
+ @Description
+
+ Linux wait object list destruction
+
+ @Input hOSEventKM : Event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList)
+{
+
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psEvenObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST *) hEventObjectList ;
+
+ if(psEvenObjectList)
+ {
+ if (!list_empty(&psEvenObjectList->sList))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectListDestroy: Event List is not empty"));
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+ OSFreeMem(psEvenObjectList);
+ /*not nulling pointer, copy on stack*/
+ }
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectDelete
+
+ @Description
+
+ Linux wait object removal
+
+ @Input hOSEventObject : Event object handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject)
+{
+ if(hOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)hOSEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = psLinuxEventObject->psLinuxEventObjectList;
+
+ write_lock_bh(&psLinuxEventObjectList->sLock);
+ list_del(&psLinuxEventObject->sList);
+ write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+#if defined(DEBUG)
+// PVR_DPF((PVR_DBG_MESSAGE, "LinuxEventObjectDelete: Event object waits: %u", psLinuxEventObject->ui32Stats));
+#endif
+
+ OSFreeMem(psLinuxEventObject);
+ /*not nulling pointer, copy on stack*/
+
+ return PVRSRV_OK;
+ }
+ return PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectAdd
+
+ @Description
+
+ Linux wait object addition
+
+ @Input hOSEventObjectList : Event object list handle
+ @Output phOSEventObject : Pointer to the event object handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject)
+ {
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+
+ /* allocate completion variable */
+ psLinuxEventObject = OSAllocMem(sizeof(*psLinuxEventObject));
+ if (psLinuxEventObject == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed to allocate memory "));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ INIT_LIST_HEAD(&psLinuxEventObject->sList);
+
+ atomic_set(&psLinuxEventObject->sTimeStamp, 0);
+ psLinuxEventObject->ui32TimeStampPrevious = 0;
+
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats = 0;
+#endif
+ init_waitqueue_head(&psLinuxEventObject->sWait);
+
+ psLinuxEventObject->psLinuxEventObjectList = psLinuxEventObjectList;
+
+ write_lock_bh(&psLinuxEventObjectList->sLock);
+ list_add(&psLinuxEventObject->sList, &psLinuxEventObjectList->sList);
+ write_unlock_bh(&psLinuxEventObjectList->sLock);
+
+ *phOSEventObject = psLinuxEventObject;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectSignal
+
+ @Description
+
+ Linux wait object signaling function
+
+ @Input hOSEventObjectList : Event object list handle
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList)
+{
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject;
+ PVRSRV_LINUX_EVENT_OBJECT_LIST *psLinuxEventObjectList = (PVRSRV_LINUX_EVENT_OBJECT_LIST*)hOSEventObjectList;
+ struct list_head *psListEntry, *psListEntryTemp, *psList;
+ psList = &psLinuxEventObjectList->sList;
+
+ read_lock_bh(&psLinuxEventObjectList->sLock);
+ list_for_each_safe(psListEntry, psListEntryTemp, psList)
+ {
+
+ psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *)list_entry(psListEntry, PVRSRV_LINUX_EVENT_OBJECT, sList);
+
+ atomic_inc(&psLinuxEventObject->sTimeStamp);
+ wake_up_interruptible(&psLinuxEventObject->sWait);
+ }
+ read_unlock_bh(&psLinuxEventObjectList->sLock);
+
+ return PVRSRV_OK;
+
+}
+
+static void _TryToFreeze(void)
+{
+ /* if we reach zero it means that all of the threads called try_to_freeze */
+ LinuxBridgeNumActiveKernelThreadsDecrement();
+
+ /* Returns true if the thread was frozen, should we do anything with this
+ * information? What do we return? Which one is the error case? */
+ try_to_freeze();
+
+ LinuxBridgeNumActiveKernelThreadsIncrement();
+}
+
+/*!
+******************************************************************************
+
+ @Function LinuxEventObjectWait
+
+ @Description
+
+ Linux wait object routine
+
+ @Input hOSEventObject : Event object handle
+
+ @Input ui64Timeoutus : Time out value in usec
+
+ @Return PVRSRV_ERROR : Error code
+
+******************************************************************************/
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+ IMG_UINT64 ui64Timeoutus,
+ IMG_BOOL bHoldBridgeLock,
+ IMG_BOOL bFreezable)
+{
+ IMG_UINT32 ui32TimeStamp;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ IMG_BOOL bReleasePVRLock;
+#endif
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_UINT32 ui32Remainder;
+ long timeOutJiffies;
+ DEFINE_WAIT(sWait);
+
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject = (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+ /* Check if the driver is good shape */
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* usecs_to_jiffies only takes an uint. So if our timeout is bigger than an
+ * uint use the msec version. With such a long timeout we really don't need
+ * the high resolution of usecs. */
+ if (ui64Timeoutus > 0xffffffffULL)
+ timeOutJiffies = msecs_to_jiffies(OSDivide64(ui64Timeoutus, 1000, &ui32Remainder));
+ else
+ timeOutJiffies = usecs_to_jiffies(ui64Timeoutus);
+
+ do
+ {
+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+ ui32TimeStamp = (IMG_UINT32)atomic_read(&psLinuxEventObject->sTimeStamp);
+
+ if(psLinuxEventObject->ui32TimeStampPrevious != ui32TimeStamp)
+ {
+ /* there is a pending signal so return without waiting */
+ break;
+ }
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ /* Check thread holds the current PVR/bridge lock before obeying the
+ * 'release before deschedule' behaviour. Some threads choose not to
+ * hold the bridge lock in their implementation.
+ */
+ bReleasePVRLock = (!bHoldBridgeLock && BridgeLockIsLocked() && current == BridgeLockGetOwner());
+ if (bReleasePVRLock == IMG_TRUE)
+ {
+ OSReleaseBridgeLock();
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(bHoldBridgeLock);
+#endif
+
+ timeOutJiffies = schedule_timeout(timeOutJiffies);
+
+ if (bFreezable)
+ {
+ _TryToFreeze();
+ }
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (bReleasePVRLock == IMG_TRUE)
+ {
+ OSAcquireBridgeLock();
+ }
+#endif
+#if defined(DEBUG)
+ psLinuxEventObject->ui32Stats++;
+#endif
+
+
+ } while (timeOutJiffies);
+
+ finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+ psLinuxEventObject->ui32TimeStampPrevious = ui32TimeStamp;
+
+ return timeOutJiffies ? PVRSRV_OK : PVRSRV_ERROR_TIMEOUT;
+
+}
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ DEFINE_WAIT(sWait);
+
+ PVRSRV_LINUX_EVENT_OBJECT *psLinuxEventObject =
+ (PVRSRV_LINUX_EVENT_OBJECT *) hOSEventObject;
+
+ /* Check if the driver is in good shape */
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ prepare_to_wait(&psLinuxEventObject->sWait, &sWait, TASK_INTERRUPTIBLE);
+
+ if (psLinuxEventObject->ui32TimeStampPrevious !=
+ (IMG_UINT32) atomic_read(&psLinuxEventObject->sTimeStamp))
+ {
+ /* There is a pending signal, so return without waiting */
+ goto finish;
+ }
+
+ schedule();
+
+ _TryToFreeze();
+
+finish:
+ finish_wait(&psLinuxEventObject->sWait, &sWait);
+
+ psLinuxEventObject->ui32TimeStampPrevious =
+ (IMG_UINT32) atomic_read(&psLinuxEventObject->sTimeStamp);
+
+ return PVRSRV_OK;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/event.h b/drivers/gpu/drm/img-rogue/1.10/event.h
new file mode 100644
index 00000000000000..a9a4af96ce3557
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/event.h
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File
+@Title Event Object
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR LinuxEventObjectListCreate(IMG_HANDLE *phEventObjectList);
+PVRSRV_ERROR LinuxEventObjectListDestroy(IMG_HANDLE hEventObjectList);
+PVRSRV_ERROR LinuxEventObjectAdd(IMG_HANDLE hOSEventObjectList, IMG_HANDLE *phOSEventObject);
+PVRSRV_ERROR LinuxEventObjectDelete(IMG_HANDLE hOSEventObject);
+PVRSRV_ERROR LinuxEventObjectSignal(IMG_HANDLE hOSEventObjectList);
+PVRSRV_ERROR LinuxEventObjectWait(IMG_HANDLE hOSEventObject,
+ IMG_UINT64 ui64Timeoutus,
+ IMG_BOOL bHoldBridgeLock,
+ IMG_BOOL bFreezable);
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+PVRSRV_ERROR LinuxEventObjectWaitUntilSignalled(IMG_HANDLE hOSEventObject);
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/fwtrace_string.h b/drivers/gpu/drm/img-rogue/1.10/fwtrace_string.h
new file mode 100644
index 00000000000000..1cded113290ac7
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/fwtrace_string.h
@@ -0,0 +1,57 @@
+/*************************************************************************/ /*!
+@File fwtrace_string.h
+@Title RGX Firmware trace strings
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform Generic
+@Description This file defines SFs tuple
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _FW_TRACE_STRING_H_
+#define _FW_TRACE_STRING_H_
+
+#include "rgx_fwif_sf.h"
+
+/* The tuple pairs that will be generated using XMacros will be stored here.
+ * This macro definition must match the definition of SFids in rgx_fwif_sf.h */
+static const RGXFW_STID_FMT SFs[]= {
+#define X(a, b, c, d, e) { RGXFW_LOG_CREATESFID(a,b,e) , d },
+ RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#endif /* _FW_TRACE_STRING_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/handle.c b/drivers/gpu/drm/img-rogue/1.10/handle.c
new file mode 100644
index 00000000000000..b8e995c966012d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/handle.c
@@ -0,0 +1,2480 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide resource handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+/* See handle.h for a description of the handle API. */
+
+/*
+ * The implementation supports movable handle structures, allowing the address
+ * of a handle structure to change without having to fix up pointers in
+ * any of the handle structures. For example, the linked list mechanism
+ * used to link subhandles together uses handle array indices rather than
+ * pointers to the structures themselves.
+ */
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "handle.h"
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#define HANDLE_HASH_TAB_INIT_SIZE 32
+
+#define SET_FLAG(v, f) ((void)((v) |= (f)))
+#define CLEAR_FLAG(v, f) ((void)((v) &= (IMG_UINT)~(f)))
+#define TEST_FLAG(v, f) ((IMG_BOOL)(((v) & (f)) != 0))
+
+#define TEST_ALLOC_FLAG(psHandleData, f) TEST_FLAG((psHandleData)->eFlag, f)
+
+
+/* Linked list structure. Used for both the list head and list items */
+typedef struct _HANDLE_LIST_
+{
+ IMG_HANDLE hPrev;
+ IMG_HANDLE hNext;
+ IMG_HANDLE hParent;
+} HANDLE_LIST;
+
+typedef struct _HANDLE_DATA_
+{
+ /* The handle that represents this structure */
+ IMG_HANDLE hHandle;
+
+ /* Handle type */
+ PVRSRV_HANDLE_TYPE eType;
+
+ /* Flags specified when the handle was allocated */
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag;
+
+ /* Pointer to the data that the handle represents */
+ void *pvData;
+
+ /*
+ * Callback specified at handle allocation time to
+ * release/destroy/free the data represented by the
+ * handle when it's reference count reaches 0. This
+ * should always be NULL for subhandles.
+ */
+ PFN_HANDLE_RELEASE pfnReleaseData;
+
+ /* List head for subhandles of this handle */
+ HANDLE_LIST sChildren;
+
+ /* List entry for sibling subhandles */
+ HANDLE_LIST sSiblings;
+
+ /* Reference count. The pfnReleaseData callback gets called when the
+ * reference count hits zero
+ */
+ IMG_UINT32 ui32RefCount;
+} HANDLE_DATA;
+
+struct _HANDLE_BASE_
+{
+ /* Pointer to a handle implementations base structure */
+ HANDLE_IMPL_BASE *psImplBase;
+
+ /*
+ * Pointer to handle hash table.
+ * The hash table is used to do reverse lookups, converting data
+ * pointers to handles.
+ */
+ HASH_TABLE *psHashTab;
+
+ /* Can be connection, process, global */
+ PVRSRV_HANDLE_BASE_TYPE eType;
+};
+
+/*
+ * The key for the handle hash table is an array of three elements, the
+ * pointer to the resource, the resource type and the parent handle (or
+ * NULL if there is no parent). The eHandKey enumeration gives the
+ * array indices of the elements making up the key.
+ */
+enum eHandKey
+{
+ HAND_KEY_DATA = 0,
+ HAND_KEY_TYPE,
+ HAND_KEY_PARENT,
+ HAND_KEY_LEN /* Must be last item in list */
+};
+
+/* HAND_KEY is the type of the hash table key */
+typedef uintptr_t HAND_KEY[HAND_KEY_LEN];
+
+/* Stores a pointer to the function table of the handle back-end in use */
+static HANDLE_IMPL_FUNCTAB const *gpsHandleFuncs;
+
+/*
+ * Global lock added to avoid to call the handling functions
+ * only in a single threaded context.
+ */
+static POS_LOCK gHandleLock;
+static IMG_BOOL gbLockInitialised = IMG_FALSE;
+
+void LockHandle(void)
+{
+ OSLockAcquire(gHandleLock);
+}
+
+void UnlockHandle(void)
+{
+ OSLockRelease(gHandleLock);
+}
+
+/*
+ * Kernel handle base structure. This is used for handles that are not
+ * allocated on behalf of a particular process.
+ */
+PVRSRV_HANDLE_BASE *gpsKernelHandleBase = NULL;
+
+/* Increase the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the increment
+ */
+static inline IMG_UINT32 _HandleRef(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+ if (!OSLockIsLocked(gHandleLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+ OSDumpStack();
+ }
+#endif
+ psHandleData->ui32RefCount++;
+ return psHandleData->ui32RefCount;
+}
+
+/* Decrease the reference count on the given handle.
+ * The handle lock must already be acquired.
+ * Returns: the reference count after the decrement
+ */
+static inline IMG_UINT32 _HandleUnref(HANDLE_DATA *psHandleData)
+{
+#if defined PVRSRV_DEBUG_HANDLE_LOCK
+ if (!OSLockIsLocked(gHandleLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle lock is not locked", __func__));
+ OSDumpStack();
+ }
+#endif
+ PVR_ASSERT(psHandleData->ui32RefCount > 0);
+ psHandleData->ui32RefCount--;
+
+ return psHandleData->ui32RefCount;
+}
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+static const IMG_CHAR *HandleTypeToString(PVRSRV_HANDLE_TYPE eType)
+{
+ #define HANDLETYPE(x) \
+ case PVRSRV_HANDLE_TYPE_##x: \
+ return #x;
+ switch(eType)
+ {
+ #include "handle_types.h"
+ #undef HANDLETYPE
+
+ default:
+ return "INVALID";
+ }
+}
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+/*!
+******************************************************************************
+
+ @Function GetHandleData
+
+ @Description Get the handle data structure for a given handle
+
+ @Input psBase - pointer to handle base structure
+ ppsHandleData - location to return pointer to handle data structure
+ hHandle - handle from client
+ eType - handle type or PVRSRV_HANDLE_TYPE_NONE if the
+ handle type is not to be checked.
+
+ @Output ppsHandleData - points to a pointer to the handle data structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleData)
+#endif
+static INLINE
+PVRSRV_ERROR GetHandleData(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA **ppsHandleData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ HANDLE_DATA *psHandleData;
+ PVRSRV_ERROR eError;
+
+ eError = gpsHandleFuncs->pfnGetHandleData(psBase->psImplBase,
+ hHandle,
+ (void **)&psHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /*
+ * Unless PVRSRV_HANDLE_TYPE_NONE was passed in to this function,
+ * check handle is of the correct type.
+ */
+ if (eType != PVRSRV_HANDLE_TYPE_NONE && eType != psHandleData->eType)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "GetHandleData: Type mismatch. Lookup request: Handle %p, type: %s (%u) but stored handle is type %s (%u)",
+ hHandle,
+ HandleTypeToString(eType),
+ eType,
+ HandleTypeToString(psHandleData->eType),
+ psHandleData->eType));
+ return PVRSRV_ERROR_HANDLE_TYPE_MISMATCH;
+ }
+
+ /* Return the handle structure */
+ *ppsHandleData = psHandleData;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListInit
+
+ @Description Initialise a linked list structure embedded in a handle
+ structure.
+
+ @Input hHandle - handle containing the linked list structure
+ psList - pointer to linked list structure
+ hParent - parent handle or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInit)
+#endif
+static INLINE
+void HandleListInit(IMG_HANDLE hHandle, HANDLE_LIST *psList, IMG_HANDLE hParent)
+{
+ psList->hPrev = hHandle;
+ psList->hNext = hHandle;
+ psList->hParent = hParent;
+}
+
+/*!
+******************************************************************************
+
+ @Function InitParentList
+
+ @Description Initialise the children list head in a handle structure.
+ The children are the subhandles of this handle.
+
+ @Input psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitParentList)
+#endif
+static INLINE
+void InitParentList(HANDLE_DATA *psHandleData)
+{
+ IMG_HANDLE hParent = psHandleData->hHandle;
+
+ HandleListInit(hParent, &psHandleData->sChildren, hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function InitChildEntry
+
+ @Description Initialise the child list entry in a handle structure.
+ The list entry is used to link together subhandles of
+ a given handle.
+
+ @Input psHandleData - pointer to handle data structure
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitChildEntry)
+#endif
+static INLINE
+void InitChildEntry(HANDLE_DATA *psHandleData)
+{
+ HandleListInit(psHandleData->hHandle, &psHandleData->sSiblings, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListIsEmpty
+
+ @Description Determine whether a given linked list is empty.
+
+ @Input hHandle - handle containing the list head
+ psList - pointer to the list head
+
+ @Return IMG_TRUE if the list is empty, IMG_FALSE if it isn't.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIsEmpty)
+#endif
+static INLINE
+IMG_BOOL HandleListIsEmpty(IMG_HANDLE hHandle, HANDLE_LIST *psList) /* Instead of passing in the handle can we not just do (psList->hPrev == psList->hNext) ? IMG_TRUE : IMG_FALSE ??? */
+{
+ IMG_BOOL bIsEmpty;
+
+ bIsEmpty = (IMG_BOOL)(psList->hNext == hHandle);
+
+#ifdef DEBUG
+ {
+ IMG_BOOL bIsEmpty2;
+
+ bIsEmpty2 = (IMG_BOOL)(psList->hPrev == hHandle);
+ PVR_ASSERT(bIsEmpty == bIsEmpty2);
+ }
+#endif
+
+ return bIsEmpty;
+}
+
+#ifdef DEBUG
+/*!
+******************************************************************************
+
+ @Function NoChildren
+
+ @Description Determine whether a handle has any subhandles
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return IMG_TRUE if the handle has no subhandles, IMG_FALSE if it does.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoChildren)
+#endif
+static INLINE
+IMG_BOOL NoChildren(HANDLE_DATA *psHandleData)
+{
+ PVR_ASSERT(psHandleData->sChildren.hParent == psHandleData->hHandle);
+
+ return HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sChildren);
+}
+
+/*!
+******************************************************************************
+
+ @Function NoParent
+
+ @Description Determine whether a handle is a subhandle
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return IMG_TRUE if the handle is not a subhandle, IMG_FALSE if it is.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(NoParent)
+#endif
+static INLINE
+IMG_BOOL NoParent(HANDLE_DATA *psHandleData)
+{
+ if (HandleListIsEmpty(psHandleData->hHandle, &psHandleData->sSiblings))
+ {
+ PVR_ASSERT(psHandleData->sSiblings.hParent == NULL);
+
+ return IMG_TRUE;
+ }
+ else
+ {
+ PVR_ASSERT(psHandleData->sSiblings.hParent != NULL);
+ }
+ return IMG_FALSE;
+}
+#endif /*DEBUG*/
+
+/*!
+******************************************************************************
+
+ @Function ParentHandle
+
+ @Description Determine the parent of a handle
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return Parent handle, or NULL if the handle is not a subhandle.
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentHandle)
+#endif
+static INLINE
+IMG_HANDLE ParentHandle(HANDLE_DATA *psHandleData)
+{
+ return psHandleData->sSiblings.hParent;
+}
+
+/*
+ * GetHandleListFromHandleAndOffset is used to generate either a
+ * pointer to the subhandle list head, or a pointer to the linked list
+ * structure of an item on a subhandle list.
+ * The list head is itself on the list, but is at a different offset
+ * in the handle structure to the linked list structure for items on
+ * the list. The two linked list structures are differentiated by
+ * the third parameter, containing the parent handle. The parent field
+ * in the list head structure references the handle structure that contains
+ * it. For items on the list, the parent field in the linked list structure
+ * references the parent handle, which will be different from the handle
+ * containing the linked list structure.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(GetHandleListFromHandleAndOffset)
+#endif
+static INLINE
+HANDLE_LIST *GetHandleListFromHandleAndOffset(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ IMG_HANDLE hParent,
+ size_t uiParentOffset,
+ size_t uiEntryOffset)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psBase != NULL);
+
+ eError = GetHandleData(psBase,
+ &psHandleData,
+ hEntry,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ return NULL;
+ }
+
+ if (hEntry == hParent)
+ {
+ return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiParentOffset);
+ }
+ else
+ {
+ return (HANDLE_LIST *)((IMG_CHAR *)psHandleData + uiEntryOffset);
+ }
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListInsertBefore
+
+ @Description Insert a handle before a handle currently on the list.
+
+ @Input hEntry - handle to be inserted after
+ psEntry - pointer to handle structure to be inserted after
+ uiParentOffset - offset to list head struct in handle structure
+ hNewEntry - handle to be inserted
+ psNewEntry - pointer to handle structure of item to be inserted
+ uiEntryOffset - offset of list item struct in handle structure
+ hParent - parent handle of hNewEntry
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListInsertBefore)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListInsertBefore(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ HANDLE_LIST *psEntry,
+ size_t uiParentOffset,
+ IMG_HANDLE hNewEntry,
+ HANDLE_LIST *psNewEntry,
+ size_t uiEntryOffset,
+ IMG_HANDLE hParent)
+{
+ HANDLE_LIST *psPrevEntry;
+
+ if (psBase == NULL || psEntry == NULL || psNewEntry == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psPrevEntry = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hPrev,
+ hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psPrevEntry == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ PVR_ASSERT(psNewEntry->hParent == NULL);
+ PVR_ASSERT(hEntry == psPrevEntry->hNext);
+
+#if defined(DEBUG)
+ {
+ HANDLE_LIST *psParentList;
+
+ psParentList = GetHandleListFromHandleAndOffset(psBase,
+ hParent,
+ hParent,
+ uiParentOffset,
+ uiParentOffset);
+ PVR_ASSERT(psParentList && psParentList->hParent == hParent);
+ }
+#endif /* defined(DEBUG) */
+
+ psNewEntry->hPrev = psEntry->hPrev;
+ psEntry->hPrev = hNewEntry;
+
+ psNewEntry->hNext = hEntry;
+ psPrevEntry->hNext = hNewEntry;
+
+ psNewEntry->hParent = hParent;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function AdoptChild
+
+ @Description Assign a subhandle to a handle
+
+ @Input psParentData - pointer to handle structure of parent handle
+ psChildData - pointer to handle structure of child subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(AdoptChild)
+#endif
+static INLINE
+PVRSRV_ERROR AdoptChild(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psParentData,
+ HANDLE_DATA *psChildData)
+{
+ IMG_HANDLE hParent = psParentData->sChildren.hParent;
+
+ PVR_ASSERT(hParent == psParentData->hHandle);
+
+ return HandleListInsertBefore(psBase,
+ hParent,
+ &psParentData->sChildren,
+ offsetof(HANDLE_DATA, sChildren),
+ psChildData->hHandle,
+ &psChildData->sSiblings,
+ offsetof(HANDLE_DATA, sSiblings),
+ hParent);
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListRemove
+
+ @Description Remove a handle from a list
+
+ @Input hEntry - handle to be removed
+ psEntry - pointer to handle structure of item to be removed
+ uiEntryOffset - offset of list item struct in handle structure
+ uiParentOffset - offset to list head struct in handle structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListRemove)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListRemove(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hEntry,
+ HANDLE_LIST *psEntry,
+ size_t uiEntryOffset,
+ size_t uiParentOffset)
+{
+ if (psBase == NULL || psEntry == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!HandleListIsEmpty(hEntry, psEntry))
+ {
+ HANDLE_LIST *psPrev;
+ HANDLE_LIST *psNext;
+
+ psPrev = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hPrev,
+ psEntry->hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psPrev == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ psNext = GetHandleListFromHandleAndOffset(psBase,
+ psEntry->hNext,
+ psEntry->hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psNext == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ /*
+ * The list head is on the list, and we don't want to
+ * remove it.
+ */
+ PVR_ASSERT(psEntry->hParent != NULL);
+
+ psPrev->hNext = psEntry->hNext;
+ psNext->hPrev = psEntry->hPrev;
+
+ HandleListInit(hEntry, psEntry, NULL);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function UnlinkFromParent
+
+ @Description Remove a subhandle from its parents list
+
+ @Input psHandleData - pointer to handle data structure of child subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(UnlinkFromParent)
+#endif
+static INLINE
+PVRSRV_ERROR UnlinkFromParent(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psHandleData)
+{
+ return HandleListRemove(psBase,
+ psHandleData->hHandle,
+ &psHandleData->sSiblings,
+ offsetof(HANDLE_DATA, sSiblings),
+ offsetof(HANDLE_DATA, sChildren));
+}
+
+/*!
+******************************************************************************
+
+ @Function HandleListIterate
+
+ @Description Iterate over the items in a list
+
+ @Input psHead - pointer to list head
+ uiParentOffset - offset to list head struct in handle structure
+ uiEntryOffset - offset of list item struct in handle structure
+ pfnIterFunc - function to be called for each handle in the list
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(HandleListIterate)
+#endif
+static INLINE
+PVRSRV_ERROR HandleListIterate(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_LIST *psHead,
+ size_t uiParentOffset,
+ size_t uiEntryOffset,
+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+ IMG_HANDLE hHandle = psHead->hNext;
+ IMG_HANDLE hParent = psHead->hParent;
+ IMG_HANDLE hNext;
+
+ PVR_ASSERT(psHead->hParent != NULL);
+
+ /*
+ * Follow the next chain from the list head until we reach
+ * the list head again, which signifies the end of the list.
+ */
+ while (hHandle != hParent)
+ {
+ HANDLE_LIST *psEntry;
+ PVRSRV_ERROR eError;
+
+ psEntry = GetHandleListFromHandleAndOffset(psBase,
+ hHandle,
+ hParent,
+ uiParentOffset,
+ uiEntryOffset);
+ if (psEntry == NULL)
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ PVR_ASSERT(psEntry->hParent == psHead->hParent);
+
+ /*
+ * Get the next index now, in case the list item is
+ * modified by the iteration function.
+ */
+ hNext = psEntry->hNext;
+
+ eError = (*pfnIterFunc)(psBase, hHandle);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ hHandle = hNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function IterateOverChildren
+
+ @Description Iterate over the subhandles of a parent handle
+
+ @Input psParentData - pointer to parent handle structure
+ pfnIterFunc - function to be called for each subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(IterateOverChildren)
+#endif
+static INLINE
+PVRSRV_ERROR IterateOverChildren(PVRSRV_HANDLE_BASE *psBase,
+ HANDLE_DATA *psParentData,
+ PVRSRV_ERROR (*pfnIterFunc)(PVRSRV_HANDLE_BASE *, IMG_HANDLE))
+{
+ return HandleListIterate(psBase,
+ &psParentData->sChildren,
+ offsetof(HANDLE_DATA, sChildren),
+ offsetof(HANDLE_DATA, sSiblings),
+ pfnIterFunc);
+}
+
+/*!
+******************************************************************************
+
+ @Function ParentIfPrivate
+
+ @Description Return the parent handle if the handle was allocated
+ with PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE, else return
+ NULL
+
+ @Input psHandleData - pointer to handle data structure
+
+ @Return Parent handle, or NULL
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(ParentIfPrivate)
+#endif
+static INLINE
+IMG_HANDLE ParentIfPrivate(HANDLE_DATA *psHandleData)
+{
+ return TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ?
+ ParentHandle(psHandleData) : NULL;
+}
+
+/*!
+******************************************************************************
+
+ @Function InitKey
+
+ @Description Initialise a hash table key for the current process
+
+ @Input psBase - pointer to handle base structure
+ aKey - pointer to key
+ pvData - pointer to the resource the handle represents
+ eType - type of resource
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(InitKey)
+#endif
+static INLINE
+void InitKey(HAND_KEY aKey,
+ PVRSRV_HANDLE_BASE *psBase,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hParent)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+
+ aKey[HAND_KEY_DATA] = (uintptr_t)pvData;
+ aKey[HAND_KEY_TYPE] = (uintptr_t)eType;
+ aKey[HAND_KEY_PARENT] = (uintptr_t)hParent;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle);
+
+/*!
+******************************************************************************
+
+ @Function FreeHandle
+
+ @Description Free a handle data structure.
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle to be freed
+ eType - Type of the handle to be freed
+ ppvData - Location for data associated with the freed handle
+
+ @Output ppvData - Points to data that was associated with the freed handle
+
+ @Return PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FreeHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ void **ppvData)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ HANDLE_DATA *psReleasedHandleData;
+ PVRSRV_ERROR eError;
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (_HandleUnref(psHandleData) > 0)
+ {
+ /* this handle still has references so do not destroy it
+ * or the underlying object yet
+ */
+ return PVRSRV_OK;
+ }
+
+ /* Call the release data callback for each reference on the handle */
+ if (psHandleData->pfnReleaseData != NULL)
+ {
+ eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeHandle: "
+ "Got retry while calling release data callback for %p (type = %d)",
+ hHandle,
+ (IMG_UINT32)psHandleData->eType));
+
+ /* the caller should retry, so retain a reference on the handle */
+ _HandleRef(psHandleData);
+
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+ IMG_HANDLE hRemovedHandle;
+
+ InitKey(aKey, psBase, psHandleData->pvData, psHandleData->eType, ParentIfPrivate(psHandleData));
+
+ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psBase->psHashTab, aKey);
+
+ PVR_ASSERT(hRemovedHandle != NULL);
+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+ }
+
+ eError = UnlinkFromParent(psBase, psHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "FreeHandle: Error whilst unlinking from parent handle (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* Free children */
+ eError = IterateOverChildren(psBase, psHandleData, FreeHandleWrapper);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "FreeHandle: Error whilst freeing subhandles (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ eError = gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase,
+ psHandleData->hHandle,
+ (void **)&psReleasedHandleData);
+ if (eError == PVRSRV_OK)
+ {
+ PVR_ASSERT(psReleasedHandleData == psHandleData);
+ }
+
+ if (ppvData)
+ {
+ *ppvData = psHandleData->pvData;
+ }
+
+ OSFreeMem(psHandleData);
+
+ return eError;
+}
+
+static PVRSRV_ERROR FreeHandleWrapper(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle)
+{
+ return FreeHandle(psBase, hHandle, PVRSRV_HANDLE_TYPE_NONE, NULL);
+}
+
+/*!
+******************************************************************************
+
+ @Function FindHandle
+
+ @Description Find handle corresponding to a resource pointer
+
+ @Input psBase - pointer to handle base structure
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Return the handle, or NULL if not found
+
+******************************************************************************/
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(FindHandle)
+#endif
+static INLINE
+IMG_HANDLE FindHandle(PVRSRV_HANDLE_BASE *psBase,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hParent)
+{
+ HAND_KEY aKey;
+
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+ return (IMG_HANDLE) HASH_Retrieve_Extended(psBase->psHashTab, aKey);
+}
+
+/*!
+******************************************************************************
+
+ @Function AllocHandle
+
+ @Description Allocate a new handle
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ hParent - parent handle or NULL
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ HANDLE_DATA *psNewHandleData;
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(psBase != NULL && psBase->psHashTab != NULL);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ /* Handle must not already exist */
+ PVR_ASSERT(FindHandle(psBase, pvData, eType, hParent) == NULL);
+ }
+
+ psNewHandleData = OSAllocZMem(sizeof(*psNewHandleData));
+ if (psNewHandleData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't allocate handle data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = gpsHandleFuncs->pfnAcquireHandle(psBase->psImplBase, &hHandle, psNewHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Failed to acquire a handle"));
+ goto ErrorFreeHandleData;
+ }
+
+ /*
+ * If a data pointer can be associated with multiple handles, we
+ * don't put the handle in the hash table, as the data pointer
+ * may not map to a unique handle
+ */
+ if (!TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+
+ /* Initialise hash key */
+ InitKey(aKey, psBase, pvData, eType, hParent);
+
+ /* Put the new handle in the hash table */
+ if (!HASH_Insert_Extended(psBase->psHashTab, aKey, (uintptr_t)hHandle))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AllocHandle: Couldn't add handle to hash table"));
+ eError = PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+ goto ErrorReleaseHandle;
+ }
+ }
+
+ psNewHandleData->hHandle = hHandle;
+ psNewHandleData->eType = eType;
+ psNewHandleData->eFlag = eFlag;
+ psNewHandleData->pvData = pvData;
+ psNewHandleData->pfnReleaseData = pfnReleaseData;
+ psNewHandleData->ui32RefCount = 1;
+
+ InitParentList(psNewHandleData);
+#if defined(DEBUG)
+ PVR_ASSERT(NoChildren(psNewHandleData));
+#endif
+
+ InitChildEntry(psNewHandleData);
+#if defined(DEBUG)
+ PVR_ASSERT(NoParent(psNewHandleData));
+#endif
+
+ /* Return the new handle to the client */
+ *phHandle = psNewHandleData->hHandle;
+
+ return PVRSRV_OK;
+
+ErrorReleaseHandle:
+ (void)gpsHandleFuncs->pfnReleaseHandle(psBase->psImplBase, hHandle, NULL);
+
+ErrorFreeHandleData:
+ OSFreeMem(psNewHandleData);
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandle
+
+ @Description Allocate a handle
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVAllocHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, pfnReleaseData);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandleUnlocked
+
+ @Description Allocate a handle without acquiring/releasing the handle
+ lock. The function assumes you hold the lock when called.
+
+ @Input phHandle - location for new handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+ pfnReleaseData - Function to release resource at handle release
+ time
+
+ @Output phHandle - points to new handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ PFN_HANDLE_RELEASE pfnReleaseData)
+{
+ PVRSRV_ERROR eError;
+
+ *phHandle = NULL;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ if (pfnReleaseData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandle: Missing release function"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = AllocHandle(psBase, phHandle, pvData, eType, eFlag, NULL, pfnReleaseData);
+
+Exit:
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocSubHandle
+
+ @Description Allocate a subhandle
+
+ @Input phHandle - location for new subhandle
+ pvData - pointer to resource to be associated with the subhandle
+ eType - the type of resource
+ hParent - parent handle
+
+ @Output phHandle - points to new subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVAllocSubHandleUnlocked(psBase, phHandle, pvData, eType, eFlag, hParent);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocSubHandleUnlocked
+
+ @Description Allocate a subhandle without acquiring/releasing the
+ handle lock. The function assumes you hold the lock when called.
+
+ @Input phHandle - location for new subhandle
+ pvData - pointer to resource to be associated with the subhandle
+ eType - the type of resource
+ hParent - parent handle
+
+ @Output phHandle - points to new subhandle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType,
+ PVRSRV_HANDLE_ALLOC_FLAG eFlag,
+ IMG_HANDLE hParent)
+{
+ HANDLE_DATA *psPHandleData = NULL;
+ HANDLE_DATA *psCHandleData = NULL;
+ IMG_HANDLE hParentKey;
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ *phHandle = NULL;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ hParentKey = TEST_FLAG(eFlag, PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE) ? hParent : NULL;
+
+ /* Lookup the parent handle */
+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+ goto Exit;
+ }
+
+ eError = AllocHandle(psBase, &hHandle, pvData, eType, eFlag, hParentKey, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto Exit;
+ }
+
+ eError = GetHandleData(psBase, &psCHandleData, hHandle, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+ /* If we were able to allocate the handle then there should be no reason why we
+ can't also get it's handle structure. Otherwise something has gone badly wrong. */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ goto Exit;
+ }
+
+ /*
+ * Get the parent handle structure again, in case the handle
+ * structure has moved (depending on the implementation
+ * of AllocHandle).
+ */
+ eError = GetHandleData(psBase, &psPHandleData, hParent, PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Failed to get parent handle structure"));
+
+ (void)FreeHandle(psBase, hHandle, eType, NULL);
+ goto Exit;
+ }
+
+ eError = AdoptChild(psBase, psPHandleData, psCHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocSubHandle: Parent handle failed to adopt subhandle"));
+
+ (void)FreeHandle(psBase, hHandle, eType, NULL);
+ goto Exit;
+ }
+
+ *phHandle = hHandle;
+
+ eError = PVRSRV_OK;
+
+Exit:
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFindHandle
+
+ @Description Find handle corresponding to a resource pointer
+
+ @Input phHandle - location for returned handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Output phHandle - points to handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVFindHandleUnlocked(psBase, phHandle, pvData, eType);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFindHandleUnlocked
+
+ @Description Find handle corresponding to a resource pointer without
+ acquiring/releasing the handle lock. The function assumes you hold
+ the lock when called.
+
+ @Input phHandle - location for returned handle
+ pvData - pointer to resource to be associated with the handle
+ eType - the type of resource
+
+ @Output phHandle - points to handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ IMG_HANDLE hHandle;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVFindHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ /* See if there is a handle for this data pointer */
+ hHandle = FindHandle(psBase, pvData, eType, NULL);
+ if (hHandle == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVFindHandle: Error finding handle. Type %u",
+ eType));
+
+ eError = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto Exit;
+ }
+
+ *phHandle = hHandle;
+
+ eError = PVRSRV_OK;
+
+Exit:
+ return eError;
+
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupHandle
+
+ @Description Lookup the data pointer corresponding to a handle
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ bRef - If TRUE, a reference will be added on the handle if the
+ lookup is successful.
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_BOOL bRef)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVLookupHandleUnlocked(psBase, ppvData, hHandle, eType, bRef);
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupHandleUnlocked
+
+ @Description Lookup the data pointer corresponding to a handle without
+ acquiring/releasing the handle lock. The function assumes you
+ hold the lock when called.
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ bRef - If TRUE, a reference will be added on the handle if the
+ lookup is successful.
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_BOOL bRef)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVLookupHandle: Error looking up handle (%s). Handle %p, type %u",
+ PVRSRVGetErrorStringKM(eError),
+ (void*) hHandle,
+ eType));
+#if defined(DEBUG) || defined(PVRSRV_NEED_PVR_DPF)
+ OSDumpStack();
+#endif
+ goto Exit;
+ }
+
+ if (bRef)
+ {
+ _HandleRef(psHandleData);
+ }
+
+ *ppvData = psHandleData->pvData;
+
+ eError = PVRSRV_OK;
+
+Exit:
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVLookupSubHandle
+
+ @Description Lookup the data pointer corresponding to a subhandle
+
+ @Input ppvData - location to return data pointer
+ hHandle - handle from client
+ eType - handle type
+ hAncestor - ancestor handle
+
+ @Output ppvData - points to the data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ void **ppvData,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType,
+ IMG_HANDLE hAncestor)
+{
+ HANDLE_DATA *psPHandleData = NULL;
+ HANDLE_DATA *psCHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVLookupSubHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = GetHandleData(psBase, &psCHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVLookupSubHandle: Error looking up subhandle (%s). Handle %p, type %u",
+ PVRSRVGetErrorStringKM(eError),
+ (void*) hHandle,
+ eType));
+ OSDumpStack();
+ goto ExitUnlock;
+ }
+
+ /* Look for hAncestor among the handle's ancestors */
+ for (psPHandleData = psCHandleData; ParentHandle(psPHandleData) != hAncestor; )
+ {
+ eError = GetHandleData(psBase, &psPHandleData, ParentHandle(psPHandleData), PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVLookupSubHandle: Subhandle doesn't belong to given ancestor"));
+ eError = PVRSRV_ERROR_INVALID_SUBHANDLE;
+ goto ExitUnlock;
+ }
+ }
+
+ *ppvData = psCHandleData->pvData;
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetParentHandle
+
+ @Description Lookup the parent of a handle
+
+ @Input phParent - location for returning parent handle
+ hHandle - handle for which the parent handle is required
+ eType - handle type
+ hParent - parent handle
+
+ @Output *phParent - parent handle, or NULL if there is no parent
+
+ @Return Error code or PVRSRV_OK. Note that not having a parent is
+ not regarded as an error.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE *phParent,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVGetParentHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = GetHandleData(psBase, &psHandleData, hHandle, eType);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVGetParentHandle: Error looking up subhandle (%s). Type %u",
+ PVRSRVGetErrorStringKM(eError),
+ eType));
+ OSDumpStack();
+ goto ExitUnlock;
+ }
+
+ *phParent = ParentHandle(psHandleData);
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVReleaseHandle
+
+ @Description Release a handle that is no longer needed
+
+ @Input hHandle - handle from client
+ eType - handle type
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ LockHandle();
+ eError = PVRSRVReleaseHandleUnlocked(psBase, hHandle, eType);
+ UnlockHandle();
+
+ return eError;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVReleaseHandleUnlocked
+
+ @Description Release a handle that is no longer needed without
+ acquiring/releasing the handle lock. The function assumes you
+ hold the lock when called.
+
+ @Input hHandle - handle from client
+ eType - handle type
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase,
+ IMG_HANDLE hHandle,
+ PVRSRV_HANDLE_TYPE eType)
+{
+ PVRSRV_ERROR eError;
+
+ /* PVRSRV_HANDLE_TYPE_NONE is reserved for internal use */
+ PVR_ASSERT(eType != PVRSRV_HANDLE_TYPE_NONE);
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVReleaseHandle: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Exit;
+ }
+
+ eError = FreeHandle(psBase, hHandle, eType, NULL);
+
+Exit:
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPurgeHandles
+
+ @Description Purge handles for a given handle base
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVPurgeHandles: Missing handle base"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ExitUnlock;
+ }
+
+ eError = gpsHandleFuncs->pfnPurgeHandles(psBase->psImplBase);
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVAllocHandleBase
+
+ @Description Allocate a handle base structure for a process
+
+ @Input ppsBase - pointer to handle base structure pointer
+
+ @Output ppsBase - points to handle base structure pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+ PVRSRV_HANDLE_BASE_TYPE eType)
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ PVRSRV_ERROR eError;
+
+ if (gpsHandleFuncs == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Handle management not initialised"));
+ return PVRSRV_ERROR_NOT_READY;
+ }
+
+ LockHandle();
+
+ if (ppsBase == NULL)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrorUnlock;
+ }
+
+ psBase = OSAllocZMem(sizeof(*psBase));
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't allocate handle base"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorUnlock;
+ }
+
+ psBase->eType = eType;
+
+ eError = gpsHandleFuncs->pfnCreateHandleBase(&psBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeHandleBase;
+ }
+
+ psBase->psHashTab = HASH_Create_Extended(HANDLE_HASH_TAB_INIT_SIZE,
+ sizeof(HAND_KEY),
+ HASH_Func_Default,
+ HASH_Key_Comp_Default);
+ if (psBase->psHashTab == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAllocHandleBase: Couldn't create data pointer hash table"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ goto ErrorDestroyHandleBase;
+ }
+
+ *ppsBase = psBase;
+
+ UnlockHandle();
+
+ return PVRSRV_OK;
+
+ErrorDestroyHandleBase:
+ (void)gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+
+ErrorFreeHandleBase:
+ OSFreeMem(psBase);
+
+ErrorUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+#if defined(DEBUG)
+typedef struct _COUNT_HANDLE_DATA_
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ IMG_UINT32 uiHandleDataCount;
+} COUNT_HANDLE_DATA;
+
+/* Used to count the number of handles that have data associated with them */
+static PVRSRV_ERROR CountHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+ COUNT_HANDLE_DATA *psData = (COUNT_HANDLE_DATA *)pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psData == NULL ||
+ psData->psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Missing free data"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psData->psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CountHandleDataWrapper: Couldn't get handle data for handle"));
+ return eError;
+ }
+
+ if (psHandleData != NULL)
+ {
+ psData->uiHandleDataCount++;
+ }
+
+ return PVRSRV_OK;
+}
+
+/* Print a handle in the handle base. Used with the iterator callback. */
+static PVRSRV_ERROR ListHandlesInBase(IMG_HANDLE hHandle, void *pvData)
+{
+ PVRSRV_HANDLE_BASE *psBase = (PVRSRV_HANDLE_BASE*) pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Missing base", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't get handle data for handle", __func__));
+ return eError;
+ }
+
+ if (psHandleData != NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, " Handle: %6u, Refs: %3u, Type: %s (%u)",
+ (IMG_UINT32) (uintptr_t) psHandleData->hHandle,
+ psHandleData->ui32RefCount,
+ HandleTypeToString(psHandleData->eType),
+ psHandleData->eType));
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+#endif /* defined(DEBUG) */
+
+typedef struct FREE_HANDLE_DATA_TAG
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ PVRSRV_HANDLE_TYPE eHandleFreeType;
+ /* timing data (ns) to release bridge lock upon the deadline */
+ IMG_UINT64 ui64TimeStart;
+ IMG_UINT64 ui64MaxBridgeTime;
+} FREE_HANDLE_DATA;
+
+typedef struct FREE_KERNEL_HANDLE_DATA_TAG
+{
+ PVRSRV_HANDLE_BASE *psBase;
+ HANDLE_DATA *psProcessHandleData;
+ IMG_HANDLE hKernelHandle;
+} FREE_KERNEL_HANDLE_DATA;
+
+static INLINE IMG_BOOL _CheckIfMaxTimeExpired(IMG_UINT64 ui64TimeStart, IMG_UINT64 ui64MaxBridgeTime)
+{
+ IMG_UINT64 ui64Diff;
+ IMG_UINT64 ui64Now = OSClockns64();
+
+ if (ui64Now >= ui64TimeStart)
+ {
+ ui64Diff = ui64Now - ui64TimeStart;
+ }
+ else
+ {
+ /* time has wrapped around */
+ ui64Diff = (UINT64_MAX - ui64TimeStart) + ui64Now;
+ }
+
+ return ui64Diff >= ui64MaxBridgeTime;
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterKernel(IMG_HANDLE hHandle, void *pvData)
+{
+ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+ HANDLE_DATA *psKernelHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ /* Get kernel handle data. */
+ eError = GetHandleData(KERNEL_HANDLE_BASE,
+ &psKernelHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterKernel: Couldn't get handle data for kernel handle"));
+ return eError;
+ }
+
+ if (psKernelHandleData->pvData == psData->psProcessHandleData->pvData)
+ {
+ /* This kernel handle belongs to our process handle. */
+ psData->hKernelHandle = hHandle;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeKernelHandlesWrapperIterProcess(IMG_HANDLE hHandle, void *pvData)
+{
+ FREE_KERNEL_HANDLE_DATA *psData = (FREE_KERNEL_HANDLE_DATA *)pvData;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ /* Get process handle data. */
+ eError = GetHandleData(psData->psBase,
+ &psData->psProcessHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Couldn't get handle data for process handle"));
+ return eError;
+ }
+
+ if (psData->psProcessHandleData->eFlag == PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+#if defined(SUPPORT_INSECURE_EXPORT)
+ || psData->psProcessHandleData->eType == PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT
+#endif
+ )
+ {
+ /* Only multi alloc process handles might be in kernel handle base. */
+ psData->hKernelHandle = NULL;
+ /* Iterate over kernel handles. */
+ eError = gpsHandleFuncs->pfnIterateOverHandles(KERNEL_HANDLE_BASE->psImplBase,
+ &FreeKernelHandlesWrapperIterKernel,
+ (void *)psData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Failed to iterate over kernel handles"));
+ return eError;
+ }
+
+ if (psData->hKernelHandle)
+ {
+ /* Release kernel handle which belongs to our process handle. */
+ eError = gpsHandleFuncs->pfnReleaseHandle(KERNEL_HANDLE_BASE->psImplBase,
+ psData->hKernelHandle,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeKernelHandlesWrapperIterProcess: Couldn't release kernel handle"));
+ return eError;
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR FreeHandleDataWrapper(IMG_HANDLE hHandle, void *pvData)
+{
+ FREE_HANDLE_DATA *psData = (FREE_HANDLE_DATA *)pvData;
+ HANDLE_DATA *psHandleData = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ if (psData == NULL ||
+ psData->psBase == NULL ||
+ psData->eHandleFreeType == PVRSRV_HANDLE_TYPE_NONE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Missing free data"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = GetHandleData(psData->psBase,
+ &psHandleData,
+ hHandle,
+ PVRSRV_HANDLE_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FreeHandleDataWrapper: Couldn't get handle data for handle"));
+ return eError;
+ }
+
+ if (psHandleData == NULL || psHandleData->eType != psData->eHandleFreeType)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVR_ASSERT(psHandleData->ui32RefCount > 0);
+
+ while (psHandleData->ui32RefCount != 0)
+ {
+ if (psHandleData->pfnReleaseData != NULL)
+ {
+ eError = psHandleData->pfnReleaseData(psHandleData->pvData);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "FreeHandleDataWrapper: "
+ "Got retry while calling release data callback for %p (type = %d)",
+ hHandle,
+ (IMG_UINT32)psHandleData->eType));
+
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ _HandleUnref(psHandleData);
+ }
+
+ if (!TEST_ALLOC_FLAG(psHandleData, PVRSRV_HANDLE_ALLOC_FLAG_MULTI))
+ {
+ HAND_KEY aKey;
+ IMG_HANDLE hRemovedHandle;
+
+ InitKey(aKey,
+ psData->psBase,
+ psHandleData->pvData,
+ psHandleData->eType,
+ ParentIfPrivate(psHandleData));
+
+ hRemovedHandle = (IMG_HANDLE)HASH_Remove_Extended(psData->psBase->psHashTab, aKey);
+
+ PVR_ASSERT(hRemovedHandle != NULL);
+ PVR_ASSERT(hRemovedHandle == psHandleData->hHandle);
+ PVR_UNREFERENCED_PARAMETER(hRemovedHandle);
+ }
+
+ eError = gpsHandleFuncs->pfnSetHandleData(psData->psBase->psImplBase, hHandle, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSFreeMem(psHandleData);
+
+ /* If we reach the end of the time slice release we can release the global
+ * lock, invoke the scheduler and reacquire the lock */
+ if ((psData->ui64MaxBridgeTime != 0) && _CheckIfMaxTimeExpired(psData->ui64TimeStart, psData->ui64MaxBridgeTime))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock timeout (timeout: %" IMG_UINT64_FMTSPEC")",
+ psData->ui64MaxBridgeTime));
+ UnlockHandle();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+ /* Invoke the scheduler to check if other processes are waiting for the lock */
+ OSReleaseThreadQuanta();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+ LockHandle();
+ /* Set again lock timeout and reset the counter */
+ psData->ui64TimeStart = OSClockns64();
+ PVR_DPF((PVR_DBG_MESSAGE, "FreeResourceByCriteria: Lock acquired again"));
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_HANDLE_TYPE g_aeOrderedFreeList[] =
+{
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_MEMORY_BLOCK,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_EXPORT,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_TYPE_PVRSRV_TIMELINE_SERVER,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_EXPORT,
+ PVRSRV_HANDLE_TYPE_PVRSRV_FENCE_SERVER,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_PAGELIST,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_SECURE_EXPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_TYPE_DC_PIN_HANDLE,
+ PVRSRV_HANDLE_TYPE_DC_BUFFER,
+ PVRSRV_HANDLE_TYPE_DC_DISPLAY_CONTEXT,
+ PVRSRV_HANDLE_TYPE_DC_DEVICE,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_TYPE_MM_PLAT_CLEANUP
+};
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFreeKernelHandles
+
+ @Description Free kernel handles which belongs to process handles
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase)
+{
+ FREE_KERNEL_HANDLE_DATA sHandleData = { };
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ sHandleData.psBase = psBase;
+ /* Iterate over process handles. */
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &FreeKernelHandlesWrapperIterProcess,
+ (void *)&sHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVFreeKernelHandles: Failed to iterate over handles (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ExitUnlock;
+ }
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVFreeHandleBase
+
+ @Description Free a handle base structure
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime)
+{
+#if defined(DEBUG)
+ COUNT_HANDLE_DATA sCountData = { };
+#endif
+ FREE_HANDLE_DATA sHandleData = { };
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsHandleFuncs);
+
+ LockHandle();
+
+ sHandleData.psBase = psBase;
+ sHandleData.ui64TimeStart = OSClockns64();
+ sHandleData.ui64MaxBridgeTime = ui64MaxBridgeTime;
+
+
+#if defined(DEBUG)
+
+ sCountData.psBase = psBase;
+
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &CountHandleDataWrapper,
+ (void *)&sCountData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVFreeHandleBase: Failed to perform handle count (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ExitUnlock;
+ }
+
+ if (sCountData.uiHandleDataCount != 0)
+ {
+ IMG_BOOL bList = sCountData.uiHandleDataCount < HANDLE_DEBUG_LISTING_MAX_NUM;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: %u remaining handles in handle base 0x%p "
+ "(PVRSRV_HANDLE_BASE_TYPE %u).%s",
+ __func__,
+ sCountData.uiHandleDataCount,
+ psBase,
+ psBase->eType,
+ bList ? "": " Skipping details, too many items..."));
+
+ if (bList)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "-------- Listing Handles --------"));
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &ListHandlesInBase,
+ psBase);
+ PVR_DPF((PVR_DBG_WARNING, "-------- Done Listing --------"));
+ }
+ }
+
+#endif /* defined(DEBUG) */
+
+ /*
+ * As we're freeing handles based on type, make sure all
+ * handles have actually had their data freed to avoid
+ * resources being leaked
+ */
+ for (i = 0; i < ARRAY_SIZE(g_aeOrderedFreeList); i++)
+ {
+ sHandleData.eHandleFreeType = g_aeOrderedFreeList[i];
+
+ /* Make sure all handles have been freed before destroying the handle base */
+ eError = gpsHandleFuncs->pfnIterateOverHandles(psBase->psImplBase,
+ &FreeHandleDataWrapper,
+ (void *)&sHandleData);
+ if (eError != PVRSRV_OK)
+ {
+ goto ExitUnlock;
+ }
+ }
+
+
+ if (psBase->psHashTab != NULL)
+ {
+ HASH_Delete(psBase->psHashTab);
+ }
+
+ eError = gpsHandleFuncs->pfnDestroyHandleBase(psBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ goto ExitUnlock;
+ }
+
+ OSFreeMem(psBase);
+
+ eError = PVRSRV_OK;
+
+ExitUnlock:
+ UnlockHandle();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVHandleInit
+
+ @Description Initialise handle management
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(gpsKernelHandleBase == NULL);
+ PVR_ASSERT(gpsHandleFuncs == NULL);
+ PVR_ASSERT(!gbLockInitialised);
+
+ eError = OSLockCreate(&gHandleLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: Creation of handle global lock failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+ gbLockInitialised = IMG_TRUE;
+
+ eError = PVRSRVHandleGetFuncTable(&gpsHandleFuncs);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVHandleGetFuncTable failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ eError = PVRSRVAllocHandleBase(&gpsKernelHandleBase,
+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVAllocHandleBase failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ eError = gpsHandleFuncs->pfnEnableHandlePurging(gpsKernelHandleBase->psImplBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleInit: PVRSRVEnableHandlePurging failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorHandleDeinit;
+ }
+
+ return PVRSRV_OK;
+
+ErrorHandleDeinit:
+ (void) PVRSRVHandleDeInit();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVHandleDeInit
+
+ @Description De-initialise handle management
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVHandleDeInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (gpsHandleFuncs != NULL)
+ {
+ if (gpsKernelHandleBase != NULL)
+ {
+ eError = PVRSRVFreeHandleBase(gpsKernelHandleBase, 0 /* do not release bridge lock */);
+ if (eError == PVRSRV_OK)
+ {
+ gpsKernelHandleBase = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVHandleDeInit: FreeHandleBase failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+
+ if (eError == PVRSRV_OK)
+ {
+ gpsHandleFuncs = NULL;
+ }
+ }
+ else
+ {
+ /* If we don't have a handle function table we shouldn't have a handle base either */
+ PVR_ASSERT(gpsKernelHandleBase == NULL);
+ }
+
+ if (gbLockInitialised)
+ {
+ OSLockDestroy(gHandleLock);
+ gbLockInitialised = IMG_FALSE;
+ }
+
+ return eError;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/handle.h b/drivers/gpu/drm/img-rogue/1.10/handle.h
new file mode 100644
index 00000000000000..ec049cb0a32f2b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/handle.h
@@ -0,0 +1,202 @@
+/**************************************************************************/ /*!
+@File
+@Title Handle Manager API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_H__)
+#define __HANDLE_H__
+
+/*
+ * Handle API
+ * ----------
+ * The handle API is intended to provide handles for kernel resources,
+ * which can then be passed back to user space processes.
+ *
+ * The following functions comprise the API. Each function takes a
+ * pointer to a PVRSRV_HANDLE_BASE strcture, one of which is allocated
+ * for each process, and stored in the per-process data area. Use
+ * KERNEL_HANDLE_BASE for handles not allocated for a particular process,
+ * or for handles that need to be allocated before the PVRSRV_HANDLE_BASE
+ * structure for the process is available.
+ *
+ * PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag);
+ *
+ * Allocate a handle phHandle, for the resource of type eType pointed to by
+ * pvData.
+ *
+ * For handles that have a definite lifetime, where the corresponding
+ * resource is explicitly created and destroyed, eFlag should be zero.
+ *
+ * If a particular resource may be referenced multiple times by a
+ * given process, setting eFlag to PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ * will allow multiple handles to be allocated for the resource.
+ * Such handles cannot be found with PVRSRVFindHandle.
+ *
+ * PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType,
+ * PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+ *
+ * This function is similar to PVRSRVAllocHandle, except that the allocated
+ * handles are associated with a parent handle, hParent, that has been
+ * allocated previously. Subhandles are automatically deallocated when their
+ * parent handle is deallocated.
+ * Subhandles can be treated as ordinary handles. For example, they may
+ * have subhandles of their own, and may be explicity deallocated using
+ * PVRSRVReleaseHandle (see below).
+ *
+ * PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Find the handle previously allocated for the resource pointed to by
+ * pvData, of type eType. Handles allocated with the flag
+ * PVRSRV_HANDLE_ALLOC_FLAG_MULTI cannot be found using this
+ * function.
+ *
+ * PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Given a handle for a resource of type eType, return the pointer to the
+ * resource.
+ *
+ * PVRSRV_ERROR PVRSRVLookuSubHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType,
+ * IMH_HANDLE hAncestor);
+ *
+ * Similar to PVRSRVLookupHandle, but checks the handle is a descendant
+ * of hAncestor.
+ *
+ * PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase,
+ * IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Deallocate a handle of given type.
+ *
+ * PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase,
+ * void **phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+ *
+ * Return the parent of a handle in *phParent, or NULL if the handle has
+ * no parent.
+ */
+
+#include "img_types.h"
+#include "hash.h"
+
+typedef enum
+{
+ #define HANDLETYPE(x) PVRSRV_HANDLE_TYPE_##x,
+ #include "handle_types.h"
+ #undef HANDLETYPE
+} PVRSRV_HANDLE_TYPE;
+
+static_assert(PVRSRV_HANDLE_TYPE_NONE == 0, "PVRSRV_HANDLE_TYPE_NONE must be zero");
+
+typedef enum
+{
+ PVRSRV_HANDLE_BASE_TYPE_CONNECTION,
+ PVRSRV_HANDLE_BASE_TYPE_PROCESS,
+ PVRSRV_HANDLE_BASE_TYPE_GLOBAL
+} PVRSRV_HANDLE_BASE_TYPE;
+
+
+typedef enum
+{
+ /* No flags */
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE = 0,
+ /* Muliple handles can point at the given data pointer */
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI = 0x01,
+ /* Subhandles are allocated in a private handle space */
+ PVRSRV_HANDLE_ALLOC_FLAG_PRIVATE = 0x02
+} PVRSRV_HANDLE_ALLOC_FLAG;
+
+typedef struct _HANDLE_BASE_ PVRSRV_HANDLE_BASE;
+
+typedef struct _PROCESS_HANDLE_BASE_
+{
+ PVRSRV_HANDLE_BASE *psHandleBase;
+ ATOMIC_T iRefCount;
+
+} PROCESS_HANDLE_BASE;
+
+extern PVRSRV_HANDLE_BASE *gpsKernelHandleBase;
+#define KERNEL_HANDLE_BASE (gpsKernelHandleBase)
+
+#define HANDLE_DEBUG_LISTING_MAX_NUM 20
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_RELEASE)(void *pvData);
+
+PVRSRV_ERROR PVRSRVAllocHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+PVRSRV_ERROR PVRSRVAllocHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, PFN_HANDLE_RELEASE pfnReleaseData);
+
+PVRSRV_ERROR PVRSRVAllocSubHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+PVRSRV_ERROR PVRSRVAllocSubHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType, PVRSRV_HANDLE_ALLOC_FLAG eFlag, IMG_HANDLE hParent);
+
+PVRSRV_ERROR PVRSRVFindHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVFindHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phHandle, void *pvData, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVLookupHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+PVRSRV_ERROR PVRSRVLookupHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_BOOL bRef);
+
+PVRSRV_ERROR PVRSRVLookupSubHandle(PVRSRV_HANDLE_BASE *psBase, void **ppvData, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType, IMG_HANDLE hAncestor);
+
+PVRSRV_ERROR PVRSRVGetParentHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE *phParent, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVReleaseHandle(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+PVRSRV_ERROR PVRSRVReleaseHandleUnlocked(PVRSRV_HANDLE_BASE *psBase, IMG_HANDLE hHandle, PVRSRV_HANDLE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVPurgeHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVAllocHandleBase(PVRSRV_HANDLE_BASE **ppsBase,
+ PVRSRV_HANDLE_BASE_TYPE eType);
+
+PVRSRV_ERROR PVRSRVFreeHandleBase(PVRSRV_HANDLE_BASE *psBase, IMG_UINT64 ui64MaxBridgeTime);
+
+PVRSRV_ERROR PVRSRVFreeKernelHandles(PVRSRV_HANDLE_BASE *psBase);
+
+PVRSRV_ERROR PVRSRVHandleInit(void);
+
+PVRSRV_ERROR PVRSRVHandleDeInit(void);
+
+void LockHandle(void);
+void UnlockHandle(void);
+
+
+#endif /* !defined(__HANDLE_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/handle_idr.c b/drivers/gpu/drm/img-rogue/1.10/handle_idr.c
new file mode 100644
index 00000000000000..a203ebdfb8e3c3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/handle_idr.c
@@ -0,0 +1,439 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Handle Manager - IDR Back-end
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide IDR based resource handle management back-end
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+
+#include "handle_impl.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#define ID_VALUE_MIN 1
+#define ID_VALUE_MAX INT_MAX
+
+#define ID_TO_HANDLE(i) ((IMG_HANDLE)(uintptr_t)(i))
+#define HANDLE_TO_ID(h) ((IMG_INT)(uintptr_t)(h))
+
+struct _HANDLE_IMPL_BASE_
+{
+ struct idr sIdr;
+
+ IMG_UINT32 ui32MaxHandleValue;
+
+ IMG_UINT32 ui32TotalHandCount;
+};
+
+typedef struct _HANDLE_ITER_DATA_WRAPPER_
+{
+ PFN_HANDLE_ITER pfnHandleIter;
+ void *pvHandleIterData;
+} HANDLE_ITER_DATA_WRAPPER;
+
+
+static int HandleIterFuncWrapper(int id, void *data, void *iter_data)
+{
+ HANDLE_ITER_DATA_WRAPPER *psIterData = (HANDLE_ITER_DATA_WRAPPER *)iter_data;
+
+ PVR_UNREFERENCED_PARAMETER(data);
+
+ return (int)psIterData->pfnHandleIter(ID_TO_HANDLE(id), psIterData->pvHandleIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function AcquireHandle
+
+ @Description Acquire a new handle
+
+ @Input psBase - Pointer to handle base structure
+ phHandle - Points to a handle pointer
+ pvData - Pointer to resource to be associated with the handle
+
+ @Output phHandle - Points to a handle pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR AcquireHandle(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE *phHandle,
+ void *pvData)
+{
+ int id;
+ int result;
+
+ PVR_ASSERT(psBase != NULL);
+ PVR_ASSERT(phHandle != NULL);
+ PVR_ASSERT(pvData != NULL);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+ idr_preload(GFP_KERNEL);
+ id = idr_alloc(&psBase->sIdr, pvData, ID_VALUE_MIN, psBase->ui32MaxHandleValue + 1, 0);
+ idr_preload_end();
+
+ result = id;
+#else
+ do
+ {
+ if (idr_pre_get(&psBase->sIdr, GFP_KERNEL) == 0)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ result = idr_get_new_above(&psBase->sIdr, pvData, ID_VALUE_MIN, &id);
+ } while (result == -EAGAIN);
+
+ if ((IMG_UINT32)id > psBase->ui32MaxHandleValue)
+ {
+ idr_remove(&psBase->sIdr, id);
+ result = -ENOSPC;
+ }
+#endif
+
+ if (result < 0)
+ {
+ if (result == -ENOSPC)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Limit of %u handles reached",
+ __FUNCTION__, psBase->ui32MaxHandleValue));
+
+ return PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE;
+ }
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psBase->ui32TotalHandCount++;
+
+ *phHandle = ID_TO_HANDLE(id);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function ReleaseHandle
+
+ @Description Release a handle that is no longer needed.
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle to release
+ ppvData - Points to a void data pointer
+
+ @Output ppvData - Points to a void data pointer
+
+ @Return PVRSRV_OK or PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ReleaseHandle(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void **ppvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvData;
+
+ PVR_ASSERT(psBase);
+
+ /* Get the data associated with the handle. If we get back NULL then
+ it's an invalid handle */
+
+ pvData = idr_find(&psBase->sIdr, id);
+ if (pvData)
+ {
+ idr_remove(&psBase->sIdr, id);
+ psBase->ui32TotalHandCount--;
+ }
+
+ if (pvData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Handle out of range (%u > %u)",
+ __FUNCTION__, id, psBase->ui32TotalHandCount));
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+
+ if (ppvData)
+ {
+ *ppvData = pvData;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function GetHandleData
+
+ @Description Get the data associated with the given handle
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle from which data should be retrieved
+ ppvData - Points to a void data pointer
+
+ @Output ppvData - Points to a void data pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR GetHandleData(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void **ppvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvData;
+
+ PVR_ASSERT(psBase);
+ PVR_ASSERT(ppvData);
+
+ pvData = idr_find(&psBase->sIdr, id);
+ if (pvData)
+ {
+ *ppvData = pvData;
+
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+}
+
+/*!
+******************************************************************************
+
+ @Function SetHandleData
+
+ @Description Set the data associated with the given handle
+
+ @Input psBase - Pointer to handle base structure
+ hHandle - Handle for which data should be changed
+ pvData - Pointer to new data to be associated with the handle
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR SetHandleData(HANDLE_IMPL_BASE *psBase,
+ IMG_HANDLE hHandle,
+ void *pvData)
+{
+ int id = HANDLE_TO_ID(hHandle);
+ void *pvOldData;
+
+ PVR_ASSERT(psBase);
+
+ pvOldData = idr_replace(&psBase->sIdr, pvData, id);
+ if (IS_ERR(pvOldData))
+ {
+ if (PTR_ERR(pvOldData) == -ENOENT)
+ {
+ return PVRSRV_ERROR_HANDLE_NOT_ALLOCATED;
+ }
+ else
+ {
+ return PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR IterateOverHandles(HANDLE_IMPL_BASE *psBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData)
+{
+ HANDLE_ITER_DATA_WRAPPER sIterData;
+
+ PVR_ASSERT(psBase);
+ PVR_ASSERT(pfnHandleIter);
+
+ sIterData.pfnHandleIter = pfnHandleIter;
+ sIterData.pvHandleIterData = pvHandleIterData;
+
+ return (PVRSRV_ERROR)idr_for_each(&psBase->sIdr, HandleIterFuncWrapper, &sIterData);
+}
+
+/*!
+******************************************************************************
+
+ @Function EnableHandlePurging
+
+ @Description Enable purging for a given handle base
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR EnableHandlePurging(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_ASSERT(psBase);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PurgeHandles
+
+ @Description Purge handles for a given handle base
+
+ @Input psBase - Pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR PurgeHandles(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psBase);
+ PVR_ASSERT(psBase);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function CreateHandleBase
+
+ @Description Create a handle base structure
+
+ @Input ppsBase - pointer to handle base structure pointer
+
+ @Output ppsBase - points to handle base structure pointer
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR CreateHandleBase(HANDLE_IMPL_BASE **ppsBase)
+{
+ HANDLE_IMPL_BASE *psBase;
+
+ PVR_ASSERT(ppsBase);
+
+ psBase = OSAllocZMem(sizeof(*psBase));
+ if (psBase == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Couldn't allocate generic handle base", __FUNCTION__));
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ idr_init(&psBase->sIdr);
+
+ psBase->ui32MaxHandleValue = ID_VALUE_MAX;
+ psBase->ui32TotalHandCount = 0;
+
+ *ppsBase = psBase;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function DestroyHandleBase
+
+ @Description Destroy a handle base structure
+
+ @Input psBase - pointer to handle base structure
+
+ @Return Error code or PVRSRV_OK
+
+******************************************************************************/
+static PVRSRV_ERROR DestroyHandleBase(HANDLE_IMPL_BASE *psBase)
+{
+ PVR_ASSERT(psBase);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0))
+ idr_remove_all(&psBase->sIdr);
+#endif
+
+ /* Finally destroy the idr */
+ idr_destroy(&psBase->sIdr);
+
+ OSFreeMem(psBase);
+
+ return PVRSRV_OK;
+}
+
+
+static const HANDLE_IMPL_FUNCTAB g_sHandleFuncTab =
+{
+ .pfnAcquireHandle = AcquireHandle,
+ .pfnReleaseHandle = ReleaseHandle,
+ .pfnGetHandleData = GetHandleData,
+ .pfnSetHandleData = SetHandleData,
+ .pfnIterateOverHandles = IterateOverHandles,
+ .pfnEnableHandlePurging = EnableHandlePurging,
+ .pfnPurgeHandles = PurgeHandles,
+ .pfnCreateHandleBase = CreateHandleBase,
+ .pfnDestroyHandleBase = DestroyHandleBase
+};
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs)
+{
+ static IMG_BOOL bAcquired = IMG_FALSE;
+
+ if (bAcquired)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Function table already acquired",
+ __FUNCTION__));
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ if (ppsFuncs == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *ppsFuncs = &g_sHandleFuncTab;
+
+ bAcquired = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/handle_impl.h b/drivers/gpu/drm/img-rogue/1.10/handle_impl.h
new file mode 100644
index 00000000000000..95043d781f918b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/handle_impl.h
@@ -0,0 +1,89 @@
+/**************************************************************************/ /*!
+@File
+@Title Implementation Callbacks for Handle Manager API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the handle manager API. This file is for declarations
+ and definitions that are private/internal to the handle manager
+ API but need to be shared between the generic handle manager
+ code and the various handle manager backends, i.e. the code that
+ implements the various callbacks.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__HANDLE_IMPL_H__)
+#define __HANDLE_IMPL_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _HANDLE_IMPL_BASE_ HANDLE_IMPL_BASE;
+
+typedef PVRSRV_ERROR (*PFN_HANDLE_ITER)(IMG_HANDLE hHandle, void *pvData);
+
+typedef struct _HANDLE_IMPL_FUNCTAB_
+{
+ /* Acquire a new handle which is associated with the given data */
+ PVRSRV_ERROR (*pfnAcquireHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE *phHandle, void *pvData);
+
+ /* Release the given handle (optionally returning the data associated with it) */
+ PVRSRV_ERROR (*pfnReleaseHandle)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+ /* Get the data associated with the given handle */
+ PVRSRV_ERROR (*pfnGetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void **ppvData);
+
+ /* Set the data associated with the given handle */
+ PVRSRV_ERROR (*pfnSetHandleData)(HANDLE_IMPL_BASE *psHandleBase, IMG_HANDLE hHandle, void *pvData);
+
+ PVRSRV_ERROR (*pfnIterateOverHandles)(HANDLE_IMPL_BASE *psHandleBase, PFN_HANDLE_ITER pfnHandleIter, void *pvHandleIterData);
+
+ /* Enable handle purging on the given handle base */
+ PVRSRV_ERROR (*pfnEnableHandlePurging)(HANDLE_IMPL_BASE *psHandleBase);
+
+ /* Purge handles on the given handle base */
+ PVRSRV_ERROR (*pfnPurgeHandles)(HANDLE_IMPL_BASE *psHandleBase);
+
+ /* Create handle base */
+ PVRSRV_ERROR (*pfnCreateHandleBase)(HANDLE_IMPL_BASE **psHandleBase);
+
+ /* Destroy handle base */
+ PVRSRV_ERROR (*pfnDestroyHandleBase)(HANDLE_IMPL_BASE *psHandleBase);
+} HANDLE_IMPL_FUNCTAB;
+
+PVRSRV_ERROR PVRSRVHandleGetFuncTable(HANDLE_IMPL_FUNCTAB const **ppsFuncs);
+
+#endif /* !defined(__HANDLE_IMPL_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/handle_types.h b/drivers/gpu/drm/img-rogue/1.10/handle_types.h
new file mode 100644
index 00000000000000..5a6a74ad11cf21
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/handle_types.h
@@ -0,0 +1,90 @@
+/**************************************************************************/ /*!
+@File
+@Title Handle Manager handle types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provide handle management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+/* NOTE: Do not add include guards to this file */
+
+HANDLETYPE(NONE)
+HANDLETYPE(SHARED_EVENT_OBJECT)
+HANDLETYPE(EVENT_OBJECT_CONNECT)
+HANDLETYPE(PMR_LOCAL_EXPORT_HANDLE)
+HANDLETYPE(PHYSMEM_PMR)
+HANDLETYPE(PHYSMEM_PMR_EXPORT)
+HANDLETYPE(PHYSMEM_PMR_SECURE_EXPORT)
+HANDLETYPE(DEVMEMINT_CTX)
+HANDLETYPE(DEVMEMINT_CTX_EXPORT)
+HANDLETYPE(DEVMEMINT_HEAP)
+HANDLETYPE(DEVMEMINT_RESERVATION)
+HANDLETYPE(DEVMEMINT_MAPPING)
+HANDLETYPE(RGX_FW_MEMDESC)
+HANDLETYPE(RGX_RTDATA_CLEANUP)
+HANDLETYPE(RGX_FREELIST)
+HANDLETYPE(RGX_SERVER_RPM_CONTEXT)
+HANDLETYPE(RGX_RPM_FREELIST)
+HANDLETYPE(RGX_MEMORY_BLOCK)
+HANDLETYPE(RGX_SERVER_RENDER_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_CONTEXT)
+HANDLETYPE(RGX_SERVER_TQ_TDM_CONTEXT)
+HANDLETYPE(RGX_SERVER_COMPUTE_CONTEXT)
+HANDLETYPE(RGX_SERVER_RAY_CONTEXT)
+HANDLETYPE(RGX_SERVER_KICKSYNC_CONTEXT)
+HANDLETYPE(SYNC_PRIMITIVE_BLOCK)
+HANDLETYPE(SERVER_SYNC_PRIMITIVE)
+HANDLETYPE(SERVER_SYNC_EXPORT)
+HANDLETYPE(SERVER_OP_COOKIE)
+HANDLETYPE(SYNC_RECORD_HANDLE)
+HANDLETYPE(PVRSRV_TIMELINE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_SERVER)
+HANDLETYPE(PVRSRV_FENCE_EXPORT)
+HANDLETYPE(RGX_FWIF_RENDERTARGET)
+HANDLETYPE(RGX_FWIF_ZSBUFFER)
+HANDLETYPE(RGX_POPULATION)
+HANDLETYPE(DC_DEVICE)
+HANDLETYPE(DC_DISPLAY_CONTEXT)
+HANDLETYPE(DC_BUFFER)
+HANDLETYPE(DC_PIN_HANDLE)
+HANDLETYPE(DEVMEM_MEM_IMPORT)
+HANDLETYPE(PHYSMEM_PMR_PAGELIST)
+HANDLETYPE(PVR_TL_SD)
+HANDLETYPE(RI_HANDLE)
+HANDLETYPE(DEV_PRIV_DATA)
+HANDLETYPE(MM_PLAT_CLEANUP)
+HANDLETYPE(WORKEST_RETURN_DATA)
diff --git a/drivers/gpu/drm/img-rogue/1.10/hash.c b/drivers/gpu/drm/img-rogue/1.10/hash.c
new file mode 100644
index 00000000000000..913e266d86a501
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/hash.c
@@ -0,0 +1,681 @@
+/*************************************************************************/ /*!
+@File
+@Title Self scaling hash tables.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+ Implements simple self scaling hash tables. Hash collisions are
+ handled by chaining entries together. Hash tables are increased in
+ size when they become more than (50%?) full and decreased in size
+ when less than (25%?) full. Hash tables are never decreased below
+ their initial size.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* include/ */
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/shared/include/ */
+#include "hash.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+#include "allocmem.h"
+
+//#define PERF_DBG_RESIZE
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+#include <sys/time.h>
+#endif
+
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+#define KEY_TO_INDEX(pHash, key, uSize) \
+ ((pHash)->pfnHashFunc((pHash)->uKeySize, (key), (uSize)) % (uSize))
+
+#define KEY_COMPARE(pHash, pKey1, pKey2) \
+ ((pHash)->pfnKeyComp((pHash)->uKeySize, (pKey1), (pKey2)))
+
+#if defined(__linux__) && defined(__KERNEL__)
+#define _AllocMem OSAllocMemNoStats
+#define _AllocZMem OSAllocZMemNoStats
+#define _FreeMem OSFreeMemNoStats
+#else
+#define _AllocMem OSAllocMem
+#define _AllocZMem OSAllocZMem
+#define _FreeMem OSFreeMem
+#endif
+
+#define NO_SHRINK 0
+
+/* Each entry in a hash table is placed into a bucket */
+typedef struct _BUCKET_
+{
+ struct _BUCKET_ *pNext; /*!< the next bucket on the same chain */
+ uintptr_t v; /*!< entry value */
+#if defined (WIN32)
+ uintptr_t k[1]; /*<! entry key */
+#else
+ uintptr_t k[]; /* PRQA S 0642 */
+ /* override dynamic array declaration warning */
+#endif
+} BUCKET;
+
+struct _HASH_TABLE_
+{
+ IMG_UINT32 uSize; /*!< current size of the hash table */
+ IMG_UINT32 uCount; /*!< number of entries currently in the hash table */
+ IMG_UINT32 uMinimumSize; /*!< the minimum size that the hash table should be re-sized to */
+ IMG_UINT32 uKeySize; /*!< size of key in bytes */
+ IMG_UINT32 uShrinkThreshold; /*!< The threshold at which to trigger a shrink */
+ IMG_UINT32 uGrowThreshold; /*!< The threshold at which to trigger a grow */
+ HASH_FUNC* pfnHashFunc; /*!< hash function */
+ HASH_KEY_COMP* pfnKeyComp; /*!< key comparison function */
+ BUCKET** ppBucketTable; /*!< the hash table array */
+};
+
+/*************************************************************************/ /*!
+@Function HASH_Func_Default
+@Description Hash function intended for hashing keys composed of
+ uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey A pointer to the key to hash.
+@Input uHashTabLen The length of the hash table.
+@Return The hash value.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_UINT32
+HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ uintptr_t *p = (uintptr_t *)pKey;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+ IMG_UINT32 ui;
+ IMG_UINT32 uHashKey = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ IMG_UINT32 uHashPart = (IMG_UINT32)*p++;
+
+ uHashPart += (uHashPart << 12);
+ uHashPart ^= (uHashPart >> 22);
+ uHashPart += (uHashPart << 4);
+ uHashPart ^= (uHashPart >> 9);
+ uHashPart += (uHashPart << 10);
+ uHashPart ^= (uHashPart >> 2);
+ uHashPart += (uHashPart << 7);
+ uHashPart ^= (uHashPart >> 12);
+
+ uHashKey += uHashPart;
+ }
+
+ return uHashKey;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Key_Comp_Default
+@Description Compares keys composed of uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey1 Pointer to first hash key to compare.
+@Input pKey2 Pointer to second hash key to compare.
+@Return IMG_TRUE The keys match.
+ IMG_FALSE The keys don't match.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2)
+{
+ uintptr_t *p1 = (uintptr_t *)pKey1;
+ uintptr_t *p2 = (uintptr_t *)pKey2;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(uintptr_t);
+ IMG_UINT32 ui;
+
+ PVR_ASSERT((uKeySize % sizeof(uintptr_t)) == 0);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ if (*p1++ != *p2++)
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function _ChainInsert
+@Description Insert a bucket into the appropriate hash table chain.
+@Input pBucket The bucket
+@Input ppBucketTable The hash table
+@Input uSize The size of the hash table
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static void
+_ChainInsert (HASH_TABLE *pHash, BUCKET *pBucket, BUCKET **ppBucketTable, IMG_UINT32 uSize)
+{
+ IMG_UINT32 uIndex;
+
+ /* We assume that all parameters passed by the caller are valid. */
+ PVR_ASSERT (pBucket != NULL);
+ PVR_ASSERT (ppBucketTable != NULL);
+ PVR_ASSERT (uSize != 0);
+
+ uIndex = KEY_TO_INDEX(pHash, pBucket->k, uSize); /* PRQA S 0432,0541 */ /* ignore dynamic array warning */
+ pBucket->pNext = ppBucketTable[uIndex];
+ ppBucketTable[uIndex] = pBucket;
+}
+
+/*************************************************************************/ /*!
+@Function _Rehash
+@Description Iterate over every entry in an old hash table and
+ rehash into the new table.
+@Input ppOldTable The old hash table
+@Input uOldSize The size of the old hash table
+@Input ppNewTable The new hash table
+@Input uNewSize The size of the new hash table
+@Return None
+*/ /**************************************************************************/
+static void
+_Rehash (HASH_TABLE *pHash,
+ BUCKET **ppOldTable, IMG_UINT32 uOldSize,
+ BUCKET **ppNewTable, IMG_UINT32 uNewSize)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex< uOldSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = ppOldTable[uIndex];
+ while (pBucket != NULL)
+ {
+ BUCKET *pNextBucket = pBucket->pNext;
+ _ChainInsert (pHash, pBucket, ppNewTable, uNewSize);
+ pBucket = pNextBucket;
+ }
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _Resize
+@Description Attempt to resize a hash table, failure to allocate a
+ new larger hash table is not considered a hard failure.
+ We simply continue and allow the table to fill up, the
+ effect is to allow hash chains to become longer.
+@Input pHash Hash table to resize.
+@Input uNewSize Required table size.
+@Return IMG_TRUE Success
+ IMG_FALSE Failed
+*/ /**************************************************************************/
+static IMG_BOOL
+_Resize (HASH_TABLE *pHash, IMG_UINT32 uNewSize)
+{
+ BUCKET **ppNewTable;
+ IMG_UINT32 uiThreshold = uNewSize >> 2;
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+ struct timeval start, end;
+#endif
+
+ if (uNewSize == pHash->uSize)
+ {
+ return IMG_TRUE;
+ }
+
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+ gettimeofday(&start, NULL);
+#endif
+
+ ppNewTable = _AllocZMem(sizeof(BUCKET *) * uNewSize);
+ if (ppNewTable == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ _Rehash(pHash, pHash->ppBucketTable, pHash->uSize, ppNewTable, uNewSize);
+
+ _FreeMem(pHash->ppBucketTable);
+
+#if !defined(__KERNEL__) && defined(PERF_DBG_RESIZE)
+ gettimeofday(&end, NULL);
+ if( start.tv_usec > end.tv_usec )
+ {
+ end.tv_usec = 1000000 - start.tv_usec + end.tv_usec;
+ }
+ else
+ {
+ end.tv_usec -= start.tv_usec;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: H:%p O:%d N:%d C:%d G:%d S:%d T:%06luus", __func__, pHash, pHash->uSize, uNewSize, pHash->uCount, pHash->uGrowThreshold, pHash->uShrinkThreshold, end.tv_usec));
+#endif
+
+ /*not nulling pointer, being reassigned just below*/
+ pHash->ppBucketTable = ppNewTable;
+ pHash->uSize = uNewSize;
+
+ pHash->uGrowThreshold = uiThreshold * 3;
+ pHash->uShrinkThreshold = (uNewSize <= pHash->uMinimumSize) ? NO_SHRINK : uiThreshold;
+
+ return IMG_TRUE;
+}
+
+
+/*************************************************************************/ /*!
+@Function HASH_Create_Extended
+@Description Create a self scaling hash table, using the supplied
+ key size, and the supplied hash and key comparsion
+ functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the number
+ of entries in the hash table, not its size in
+ bytes.
+@Input uKeySize The size of the key, in bytes.
+@Input pfnHashFunc Pointer to hash function.
+@Input pfnKeyComp Pointer to key comparsion function.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp)
+{
+ HASH_TABLE *pHash;
+
+ if (uInitialLen == 0 || uKeySize == 0 || pfnHashFunc == NULL || pfnKeyComp == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Create_Extended: invalid input parameters"));
+ return NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Create_Extended: InitialSize=0x%x", uInitialLen));
+
+ pHash = _AllocMem(sizeof(HASH_TABLE));
+ if (pHash == NULL)
+ {
+ return NULL;
+ }
+
+ pHash->uCount = 0;
+ pHash->uSize = uInitialLen;
+ pHash->uMinimumSize = uInitialLen;
+ pHash->uKeySize = uKeySize;
+ pHash->uGrowThreshold = (uInitialLen >> 2) * 3;
+ pHash->uShrinkThreshold = NO_SHRINK;
+ pHash->pfnHashFunc = pfnHashFunc;
+ pHash->pfnKeyComp = pfnKeyComp;
+
+ pHash->ppBucketTable = _AllocZMem(sizeof (BUCKET *) * pHash->uSize);
+ if (pHash->ppBucketTable == NULL)
+ {
+ _FreeMem(pHash);
+ /*not nulling pointer, out of scope*/
+ return NULL;
+ }
+
+ return pHash;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Create
+@Description Create a self scaling hash table with a key
+ consisting of a single uintptr_t, and using
+ the default hash and key comparison functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the
+ number of entries in the hash table, not its size
+ in bytes.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+IMG_INTERNAL
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen)
+{
+ return HASH_Create_Extended(uInitialLen, sizeof(uintptr_t),
+ &HASH_Func_Default, &HASH_Key_Comp_Default);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Delete
+@Description Delete a hash table created by HASH_Create_Extended or
+ HASH_Create. All entries in the table must have been
+ removed before calling this function.
+@Input pHash Hash table
+@Return None
+*/ /**************************************************************************/
+IMG_INTERNAL void
+HASH_Delete (HASH_TABLE *pHash)
+{
+ IMG_BOOL bDoCheck = IMG_TRUE;
+#if defined(__KERNEL__) && !defined(__QNXNTO__)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (psPVRSRVData != NULL)
+ {
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bDoCheck = IMG_FALSE;
+ }
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ else
+ {
+ bDoCheck = IMG_FALSE;
+ }
+#endif
+#endif
+ if (pHash != NULL)
+ {
+ PVR_DPF ((PVR_DBG_MESSAGE, "HASH_Delete"));
+
+ if (bDoCheck)
+ {
+ PVR_ASSERT (pHash->uCount==0);
+ }
+ if(pHash->uCount != 0)
+ {
+ IMG_UINT32 uiEntriesLeft = pHash->uCount;
+ IMG_UINT32 i;
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Leak detected in hash table!", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmemcontext", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Removing remaining %u hash entries.", __func__, uiEntriesLeft));
+
+ for (i = 0; i < uiEntriesLeft; i++)
+ {
+ _FreeMem(pHash->ppBucketTable[i]);
+ }
+ }
+ _FreeMem(pHash->ppBucketTable);
+ pHash->ppBucketTable = NULL;
+ _FreeMem(pHash);
+ /*not nulling pointer, copy on stack*/
+ }
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Insert_Extended
+@Description Insert a key value pair into a hash table created
+ with HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to the key.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v)
+{
+ BUCKET *pBucket;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Insert_Extended: invalid parameter"));
+ return IMG_FALSE;
+ }
+
+ pBucket = _AllocMem(sizeof(BUCKET) + pHash->uKeySize);
+ if (pBucket == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ pBucket->v = v;
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k (linux)*/
+ OSCachedMemCopy(pBucket->k, pKey, pHash->uKeySize);
+
+ _ChainInsert (pHash, pBucket, pHash->ppBucketTable, pHash->uSize);
+
+ pHash->uCount++;
+
+ /* check if we need to think about re-balancing */
+ if (pHash->uCount > pHash->uGrowThreshold)
+ {
+ /* Ignore the return code from _Resize because the hash table is
+ still in a valid state and although not ideally sized, it is still
+ functional */
+ _Resize (pHash, pHash->uSize << 1);
+ }
+
+ return IMG_TRUE;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Insert
+@Description Insert a key value pair into a hash table created with
+ HASH_Create.
+@Input pHash Hash table
+@Input k The key value.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success.
+ IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v)
+{
+ return HASH_Insert_Extended(pHash, &k, v);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Remove_Extended
+@Description Remove a key from a hash table created with
+ HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Remove_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ uintptr_t v = pBucket->v;
+ (*ppBucket) = pBucket->pNext;
+
+ _FreeMem(pBucket);
+ /*not nulling original pointer, already overwritten*/
+
+ pHash->uCount--;
+
+ /* check if we need to think about re-balancing, when the shrink
+ * threshold is 0 we are at the minimum size, no further shrink */
+ if (pHash->uCount < pHash->uShrinkThreshold)
+ {
+ /* Ignore the return code from _Resize because the
+ hash table is still in a valid state and although
+ not ideally sized, it is still functional */
+ _Resize(pHash, MAX(pHash->uSize >> 1, pHash->uMinimumSize));
+ }
+
+ return v;
+ }
+ }
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Remove
+@Description Remove a key value pair from a hash table created
+ with HASH_Create.
+@Input pHash Hash table
+@Input k The key
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Remove (HASH_TABLE *pHash, uintptr_t k)
+{
+ return HASH_Remove_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve_Extended
+@Description Retrieve a value from a hash table created with
+ HASH_Create_Extended.
+@Input pHash Hash table
+@Input pKey Pointer to the key.
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey)
+{
+ BUCKET **ppBucket;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pHash != NULL);
+
+ if (pHash == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HASH_Retrieve_Extended: Null hash table"));
+ return 0;
+ }
+
+ uIndex = KEY_TO_INDEX(pHash, pKey, pHash->uSize);
+
+ for (ppBucket = &(pHash->ppBucketTable[uIndex]); *ppBucket != NULL; ppBucket = &((*ppBucket)->pNext))
+ {
+ /* PRQA S 0432,0541 1 */ /* ignore warning about dynamic array k */
+ if (KEY_COMPARE(pHash, (*ppBucket)->k, pKey))
+ {
+ BUCKET *pBucket = *ppBucket;
+ uintptr_t v = pBucket->v;
+
+ return v;
+ }
+ }
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve
+@Description Retrieve a value from a hash table created with
+ HASH_Create.
+@Input pHash Hash table
+@Input k The key
+@Return 0 if the key is missing, or the value associated with the key.
+*/ /**************************************************************************/
+IMG_INTERNAL uintptr_t
+HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k)
+{
+ return HASH_Retrieve_Extended(pHash, &k);
+}
+
+/*************************************************************************/ /*!
+@Function HASH_Iterate
+@Description Iterate over every entry in the hash table
+@Input pHash - Hash table to iterate
+@Input pfnCallback - Callback to call with the key and data for each
+ entry in the hash table
+@Return Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback)
+{
+ IMG_UINT32 uIndex;
+ for (uIndex=0; uIndex < pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ pBucket = pHash->ppBucketTable[uIndex];
+ while (pBucket != NULL)
+ {
+ PVRSRV_ERROR eError;
+ BUCKET *pNextBucket = pBucket->pNext;
+
+ eError = pfnCallback((uintptr_t) ((void *) *(pBucket->k)), pBucket->v);
+
+ /* The callback might want us to break out early */
+ if (eError != PVRSRV_OK)
+ return eError;
+
+ pBucket = pNextBucket;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function HASH_Dump
+@Description To dump the contents of a hash table in human readable
+ form.
+@Input pHash Hash table
+*/ /**************************************************************************/
+void
+HASH_Dump (HASH_TABLE *pHash)
+{
+ IMG_UINT32 uIndex;
+ IMG_UINT32 uMaxLength=0;
+ IMG_UINT32 uEmptyCount=0;
+
+ PVR_ASSERT (pHash != NULL);
+ for (uIndex=0; uIndex<pHash->uSize; uIndex++)
+ {
+ BUCKET *pBucket;
+ IMG_UINT32 uLength = 0;
+ if (pHash->ppBucketTable[uIndex] == NULL)
+ {
+ uEmptyCount++;
+ }
+ for (pBucket=pHash->ppBucketTable[uIndex];
+ pBucket != NULL;
+ pBucket = pBucket->pNext)
+ {
+ uLength++;
+ }
+ uMaxLength = MAX(uMaxLength, uLength);
+ }
+
+ PVR_TRACE(("hash table: uMinimumSize=%d size=%d count=%d",
+ pHash->uMinimumSize, pHash->uSize, pHash->uCount));
+ PVR_TRACE((" empty=%d max=%d", uEmptyCount, uMaxLength));
+}
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/hash.h b/drivers/gpu/drm/img-rogue/1.10/hash.h
new file mode 100644
index 00000000000000..6c8171b1235e53
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/hash.h
@@ -0,0 +1,229 @@
+/*************************************************************************/ /*!
+@File
+@Title Self scaling hash tables
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements simple self scaling hash tables.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HASH_H_
+#define _HASH_H_
+
+/* include5/ */
+#include "img_types.h"
+
+/* services/client/include/ or services/server/include/ */
+#include "osfunc.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Keys passed to the comparsion function are only guaranteed to
+ * be aligned on an uintptr_t boundary.
+ */
+typedef IMG_UINT32 HASH_FUNC(size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+typedef IMG_BOOL HASH_KEY_COMP(size_t uKeySize, void *pKey1, void *pKey2);
+
+typedef struct _HASH_TABLE_ HASH_TABLE;
+
+typedef PVRSRV_ERROR (*HASH_pfnCallback) (
+ uintptr_t k,
+ uintptr_t v
+);
+
+/*************************************************************************/ /*!
+@Function HASH_Func_Default
+@Description Hash function intended for hashing keys composed of
+ uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey A pointer to the key to hash.
+@Input uHashTabLen The length of the hash table.
+@Return The hash value.
+*/ /**************************************************************************/
+IMG_UINT32 HASH_Func_Default (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+/*************************************************************************/ /*!
+@Function HASH_Key_Comp_Default
+@Description Compares keys composed of uintptr_t arrays.
+@Input uKeySize The size of the hash key, in bytes.
+@Input pKey1 Pointer to first hash key to compare.
+@Input pKey2 Pointer to second hash key to compare.
+@Return IMG_TRUE - the keys match.
+ IMG_FALSE - the keys don't match.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Key_Comp_Default (size_t uKeySize, void *pKey1, void *pKey2);
+
+/*************************************************************************/ /*!
+@Function HASH_Create_Extended
+@Description Create a self scaling hash table, using the supplied
+ key size, and the supllied hash and key comparsion
+ functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the number
+ of entries in the hash table, not its size in
+ bytes.
+@Input uKeySize The size of the key, in bytes.
+@Input pfnHashFunc Pointer to hash function.
+@Input pfnKeyComp Pointer to key comparsion function.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create_Extended (IMG_UINT32 uInitialLen, size_t uKeySize, HASH_FUNC *pfnHashFunc, HASH_KEY_COMP *pfnKeyComp);
+
+/*************************************************************************/ /*!
+@Function HASH_Create
+@Description Create a self scaling hash table with a key
+ consisting of a single uintptr_t, and using
+ the default hash and key comparison functions.
+@Input uInitialLen Initial and minimum length of the
+ hash table, where the length refers to the
+ number of entries in the hash table, not its size
+ in bytes.
+@Return NULL or hash table handle.
+*/ /**************************************************************************/
+HASH_TABLE * HASH_Create (IMG_UINT32 uInitialLen);
+
+/*************************************************************************/ /*!
+@Function HASH_Delete
+@Description Delete a hash table created by HASH_Create_Extended or
+ HASH_Create. All entries in the table must have been
+ removed before calling this function.
+@Input pHash Hash table
+*/ /**************************************************************************/
+void HASH_Delete (HASH_TABLE *pHash);
+
+/*************************************************************************/ /*!
+@Function HASH_Insert_Extended
+@Description Insert a key value pair into a hash table created
+ with HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to the key.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert_Extended (HASH_TABLE *pHash, void *pKey, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function HASH_Insert
+
+@Description Insert a key value pair into a hash table created with
+ HASH_Create.
+@Input pHash The hash table.
+@Input k The key value.
+@Input v The value associated with the key.
+@Return IMG_TRUE - success.
+ IMG_FALSE - failure.
+*/ /**************************************************************************/
+IMG_BOOL HASH_Insert (HASH_TABLE *pHash, uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function HASH_Remove_Extended
+@Description Remove a key from a hash table created with
+ HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated
+ with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove_Extended(HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function HASH_Remove
+@Description Remove a key value pair from a hash table created
+ with HASH_Create.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated
+ with the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Remove (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve_Extended
+@Description Retrieve a value from a hash table created with
+ HASH_Create_Extended.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with
+ the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve_Extended (HASH_TABLE *pHash, void *pKey);
+
+/*************************************************************************/ /*!
+@Function HASH_Retrieve
+@Description Retrieve a value from a hash table created with
+ HASH_Create.
+@Input pHash The hash table.
+@Input pKey Pointer to key.
+@Return 0 if the key is missing, or the value associated with
+ the key.
+*/ /**************************************************************************/
+uintptr_t HASH_Retrieve (HASH_TABLE *pHash, uintptr_t k);
+
+/*************************************************************************/ /*!
+@Function HASH_Iterate
+@Description Iterate over every entry in the hash table
+@Input pHash Hash table to iterate
+@Input pfnCallback Callback to call with the key and data for
+ each entry in the hash table
+@Return Callback error if any, otherwise PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR HASH_Iterate(HASH_TABLE *pHash, HASH_pfnCallback pfnCallback);
+
+#ifdef HASH_TRACE
+/*************************************************************************/ /*!
+@Function HASH_Dump
+@Description Dump out some information about a hash table.
+@Input pHash The hash table.
+*/ /**************************************************************************/
+void HASH_Dump (HASH_TABLE *pHash);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _HASH_H_ */
+
+/******************************************************************************
+ End of file (hash.h)
+******************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/hostfunc.c b/drivers/gpu/drm/img-rogue/1.10/hostfunc.c
new file mode 100644
index 00000000000000..b0bf414fdc917e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/hostfunc.c
@@ -0,0 +1,216 @@
+/*************************************************************************/ /*!
+@File
+@Title Debug driver file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/mutex.h>
+#include <linux/hardirq.h>
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+#include "dbgdrvif_srv5.h"
+#include "hostfunc.h"
+#include "dbgdriv.h"
+
+
+/*!
+******************************************************************************
+
+ @Function HostMemSet
+
+ @Description Function that does the same as the C memset() functions
+
+ @Modified *pvDest : pointer to start of buffer to be set
+
+ @Input ui8Value: value to set each byte to
+
+ @Input ui32Size : number of bytes to set
+
+ @Return void
+
+******************************************************************************/
+void HostMemSet(void *pvDest, IMG_UINT8 ui8Value, IMG_UINT32 ui32Size)
+{
+ memset(pvDest, (int) ui8Value, (size_t) ui32Size);
+}
+
+/*!
+******************************************************************************
+
+ @Function HostMemCopy
+
+ @Description Copies memory around
+
+ @Input pvDst - pointer to dst
+ @Output pvSrc - pointer to src
+ @Input ui32Size - bytes to copy
+
+ @Return none
+
+******************************************************************************/
+void HostMemCopy(void *pvDst, void *pvSrc, IMG_UINT32 ui32Size)
+{
+#if defined(USE_UNOPTIMISED_MEMCPY)
+ unsigned char *src,*dst;
+ int i;
+
+ src=(unsigned char *)pvSrc;
+ dst=(unsigned char *)pvDst;
+ for(i=0;i<ui32Size;i++)
+ {
+ dst[i]=src[i];
+ }
+#else
+ memcpy(pvDst, pvSrc, ui32Size);
+#endif
+}
+
+IMG_UINT32 HostReadRegistryDWORDFromString(char *pcKey, char *pcValueName, IMG_UINT32 *pui32Data)
+{
+ /* XXX Not yet implemented */
+ return 0;
+}
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostPageablePageFree(void * pvBase)
+{
+ vfree(pvBase);
+}
+
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages)
+{
+ return (void*)vmalloc(ui32Pages * PAGE_SIZE);/*, GFP_KERNEL);*/
+}
+
+void HostNonPageablePageFree(void * pvBase)
+{
+ vfree(pvBase);
+}
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void **ppvMdl)
+{
+ /* XXX Not yet implemented */
+ return NULL;
+}
+
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess)
+{
+ /* XXX Not yet implemented */
+}
+
+void HostCreateRegDeclStreams(void)
+{
+ /* XXX Not yet implemented */
+}
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+
+#define EVENT_WAIT_TIMEOUT_MS 500
+#define EVENT_WAIT_TIMEOUT_JIFFIES (EVENT_WAIT_TIMEOUT_MS * HZ / 1000)
+
+static int iStreamData;
+static wait_queue_head_t sStreamDataEvent;
+
+IMG_INT32 HostCreateEventObjects(void)
+{
+ init_waitqueue_head(&sStreamDataEvent);
+
+ return 0;
+}
+
+void HostWaitForEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ /*
+ * More than one process may be woken up.
+ * Any process that wakes up should consume
+ * all the data from the streams.
+ */
+ wait_event_interruptible_timeout(sStreamDataEvent, iStreamData != 0, EVENT_WAIT_TIMEOUT_JIFFIES);
+ iStreamData = 0;
+ break;
+ default:
+ /*
+ * For unknown events, enter an interruptible sleep.
+ */
+ msleep_interruptible(EVENT_WAIT_TIMEOUT_MS);
+ break;
+ }
+}
+
+void HostSignalEvent(DBG_EVENT eEvent)
+{
+ switch(eEvent)
+ {
+ case DBG_EVENT_STREAM_DATA:
+ iStreamData = 1;
+ wake_up_interruptible(&sStreamDataEvent);
+ break;
+ default:
+ break;
+ }
+}
+
+void HostDestroyEventObjects(void)
+{
+}
+#endif /* defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/hostfunc.h b/drivers/gpu/drm/img-rogue/1.10/hostfunc.h
new file mode 100644
index 00000000000000..b677cfd98aef0d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/hostfunc.h
@@ -0,0 +1,105 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HOSTFUNC_
+#define _HOSTFUNC_
+
+/*****************************************************************************
+ Defines
+*****************************************************************************/
+#define HOST_PAGESIZE (4096UL)
+#define DBG_MEMORY_INITIALIZER (0xe2)
+
+/*****************************************************************************
+ Function prototypes
+*****************************************************************************/
+IMG_UINT32 HostReadRegistryDWORDFromString(IMG_CHAR *pcKey, IMG_CHAR *pcValueName, IMG_UINT32 *pui32Data);
+
+void * HostPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostPageablePageFree(void * pvBase);
+void * HostNonPageablePageAlloc(IMG_UINT32 ui32Pages);
+void HostNonPageablePageFree(void * pvBase);
+
+void * HostMapKrnBufIntoUser(void * pvKrnAddr, IMG_UINT32 ui32Size, void * *ppvMdl);
+void HostUnMapKrnBufFromUser(void * pvUserAddr, void * pvMdl, void * pvProcess);
+
+void HostCreateRegDeclStreams(void);
+
+/* Direct macros for Linux to avoid LockDep false-positives from occurring */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#undef HOST_PAGESIZE
+#define HOST_PAGESIZE (PAGE_SIZE)
+
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#define HostCreateMutex(void) ({ \
+ struct mutex* pMutex = NULL; \
+ pMutex = kmalloc(sizeof(struct mutex), GFP_KERNEL); \
+ if (pMutex) { mutex_init(pMutex); }; \
+ pMutex;})
+#define HostDestroyMutex(hLock) ({mutex_destroy((hLock)); kfree((hLock)); PVRSRV_OK;})
+
+#define HostAquireMutex(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define HostReleaseMutex(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+void * HostCreateMutex(void);
+void HostAquireMutex(void * pvMutex);
+void HostReleaseMutex(void * pvMutex);
+void HostDestroyMutex(void * pvMutex);
+
+#endif
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+IMG_INT32 HostCreateEventObjects(void);
+void HostWaitForEvent(DBG_EVENT eEvent);
+void HostSignalEvent(DBG_EVENT eEvent);
+void HostDestroyEventObjects(void);
+#endif /*defined(SUPPORT_DBGDRV_EVENT_OBJECTS) */
+
+#endif
+
+/*****************************************************************************
+ End of file (HOSTFUNC.H)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/htb_debug.c b/drivers/gpu/drm/img-rogue/1.10/htb_debug.c
new file mode 100644
index 00000000000000..c01c21288480a3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htb_debug.c
@@ -0,0 +1,1180 @@
+/*************************************************************************/ /*!
+@File htb_debug.c
+@Title Debug Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides kernel side debugFS Functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxdevice.h"
+#include "debugmisc_server.h"
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvr_debugfs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "htb_debug.h"
+#include "kernel_compatibility.h"
+
+// Global data handles for buffer manipulation and processing
+typedef struct
+{
+ PPVR_DEBUGFS_ENTRY_DATA psDumpHostDebugFSEntry; /* debugFS entry hook */
+ IMG_HANDLE hStream; /* Stream handle for debugFS use */
+} HTB_DBG_INFO;
+
+static HTB_DBG_INFO g_sHTBData;
+
+// Enable for extra debug level
+//#define HTB_CHATTY 1
+
+/*****************************************************************************
+ * debugFS display routines
+ ******************************************************************************/
+static int HTBDumpBuffer(DUMPDEBUG_PRINTF_FUNC *, void *, void *);
+static void _HBTraceSeqPrintf(void *, const IMG_CHAR *, ...);
+static int _DebugHBTraceSeqShow(struct seq_file *, void *);
+static void *_DebugHBTraceSeqStart(struct seq_file *, loff_t *);
+static void _DebugHBTraceSeqStop(struct seq_file *, void *);
+static void *_DebugHBTraceSeqNext(struct seq_file *, void *, loff_t *);
+
+static void _HBTraceSeqPrintf(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ seq_printf(psSeqFile, pszFormat, ArgList);
+ va_end(ArgList);
+}
+
+static int _DebugHBTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ int retVal;
+
+ PVR_ASSERT(NULL != psSeqFile);
+
+ /* psSeqFile should never be NULL */
+ if (psSeqFile == NULL)
+ {
+ return -1;
+ }
+
+ /*
+ * Ensure that we have a valid address to use to dump info from. If NULL we
+ * return a failure code to terminate the seq_read() call. pvData is either
+ * SEQ_START_TOKEN (for the initial call) or an HTB buffer address for
+ * subsequent calls [returned from the NEXT function].
+ */
+ if (pvData == NULL)
+ {
+ return -1;
+ }
+
+
+ retVal = HTBDumpBuffer(_HBTraceSeqPrintf, psSeqFile, pvData);
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Returning %d", __func__, retVal));
+#endif /* HTB_CHATTY */
+
+ return retVal;
+}
+
+typedef struct {
+ IMG_PBYTE pBuf; /* Raw data buffer from TL stream */
+ IMG_UINT32 uiBufLen; /* Amount of data to process from 'pBuf' */
+ IMG_UINT32 uiTotal; /* Total bytes processed */
+ IMG_UINT32 uiMsgLen; /* Length of HTB message to be processed */
+ IMG_PBYTE pCurr; /* pointer to current message to be decoded */
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN]; /* Output string */
+} HTB_Sentinel_t;
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32); /* Forward declaration */
+
+/*
+ * HTB_GetNextMessage
+ *
+ * Get next non-empty message block from the buffer held in pSentinel->pBuf
+ * If we exhaust the data buffer we refill it (after releasing the previous
+ * message(s) [only one non-NULL message, but PAD messages will get released
+ * as we traverse them].
+ *
+ * Input:
+ * pSentinel references the already acquired data buffer
+ *
+ * Output:
+ * pSentinel
+ * -> uiMsglen updated to the size of the non-NULL message
+ *
+ * Returns:
+ * Address of first non-NULL message in the buffer (if any)
+ * NULL if there is no further data available from the stream and the buffer
+ * contents have been drained.
+ */
+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *);
+static IMG_PBYTE HTB_GetNextMessage(HTB_Sentinel_t *pSentinel)
+{
+ IMG_PBYTE pNext, pLast, pStart, pData = NULL;
+ IMG_PBYTE pCurrent; /* Current processing point within buffer */
+ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */
+ IMG_UINT32 uiHdrType; /* Packet header type */
+ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */
+ IMG_UINT32 ui32DataSize;
+ IMG_UINT32 uiBufLen;
+ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE;
+ IMG_UINT32 ui32Data;
+ IMG_UINT32 ui32LogIdx;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(NULL != pSentinel);
+
+ uiBufLen = pSentinel->uiBufLen;
+ /* Convert from byte to uint32 size */
+ ui32DataSize = pSentinel->uiBufLen / sizeof (IMG_UINT32);
+
+ pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+
+ pStart = pSentinel->pBuf;
+
+ pNext = pStart;
+ pSentinel->uiMsgLen = 0; // Reset count for this message
+ uiMsgSize = 0; // nothing processed so far
+ ui32LogIdx = HTB_SF_LAST; // Loop terminator condition
+
+ do
+ {
+ /*
+ * If we've drained the buffer we must RELEASE and ACQUIRE some more.
+ */
+ if (pNext >= pLast)
+ {
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'", __func__,
+ "TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+ return NULL;
+ }
+
+ // Reset our limits - if we've returned an empty buffer we're done.
+ pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+ pStart = pSentinel->pBuf;
+ pNext = pStart;
+
+ if (pStart == NULL || pLast == NULL)
+ {
+ return NULL;
+ }
+ }
+
+ /*
+ * We should have a header followed by data block(s) in the stream.
+ */
+
+ pCurrent = pNext;
+ ppHdr = GET_PACKET_HDR(pCurrent);
+
+ if (ppHdr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unexpected NULL packet in Host Trace buffer", __func__));
+ pSentinel->uiMsgLen += uiMsgSize;
+ return NULL; // This should never happen
+ }
+
+ /*
+ * This should *NEVER* fire. If it does it means we have got some
+ * dubious packet header back from the HTB stream. In this case
+ * the sensible thing is to abort processing and return to
+ * the caller
+ */
+ uiHdrType = GET_PACKET_TYPE(ppHdr);
+
+ PVR_ASSERT(uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF);
+
+ if (uiHdrType < PVRSRVTL_PACKETTYPE_LAST &&
+ uiHdrType > PVRSRVTL_PACKETTYPE_UNDEF)
+ {
+ /*
+ * We have a (potentially) valid data header. We should see if
+ * the associated packet header matches one of our expected
+ * types.
+ */
+ pNext = (IMG_PBYTE)GET_NEXT_PACKET_ADDR(ppHdr);
+
+ PVR_ASSERT(pNext != NULL);
+
+ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+ pSentinel->uiMsgLen += uiMsgSize;
+
+ pData = GET_PACKET_DATA_PTR(ppHdr);
+
+ /*
+ * Handle non-DATA packet types. These include PAD fields which
+ * may have data associated and other types. We simply discard
+ * these as they have no decodable information within them.
+ */
+ if (uiHdrType != PVRSRVTL_PACKETTYPE_DATA)
+ {
+ /*
+ * Now release the current non-data packet and proceed to the
+ * next entry (if any).
+ */
+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, uiMsgSize);
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Packet Type %x Length %u",
+ __func__, uiHdrType, uiMsgSize));
+#endif
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - '%s' message"
+ " size %u", __func__, "TLClientReleaseDataLess",
+ PVRSRVGETERRORSTRING(eError), uiMsgSize));
+ }
+
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s Giving up",
+ __func__, "TLClientAcquireData",
+ PVRSRVGETERRORSTRING(eError)));
+
+ return NULL;
+ }
+ pSentinel->uiMsgLen = 0;
+ // Reset our limits - if we've returned an empty buffer we're done.
+ pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+ pStart = pSentinel->pBuf;
+ pNext = pStart;
+
+ if (pStart == NULL || pLast == NULL)
+ {
+ return NULL;
+ }
+ continue;
+ }
+ if (pData == NULL || pData >= pLast)
+ {
+ continue;
+ }
+ ui32Data = *(IMG_UINT32 *)pData;
+ ui32LogIdx = idToLogIdx(ui32Data);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Unexpected Header @%p value %x",
+ ppHdr, uiHdrType));
+
+ return NULL;
+ }
+
+ /*
+ * Check if the unrecognized ID is valid and therefore, tracebuf
+ * needs updating.
+ */
+ if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data)
+ && IMG_FALSE == bUnrecognizedErrorPrinted)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+ __func__, ui32Data, HTB_SF_GID(ui32Data),
+ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+ bUnrecognizedErrorPrinted = IMG_FALSE;
+ }
+
+ } while (HTB_SF_LAST == ui32LogIdx);
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Returning data @ %p Log value '%x'",
+ __func__, pCurrent, ui32Data));
+#endif /* HTB_CHATTY */
+
+ return pCurrent;
+}
+
+/*
+ * HTB_GetFirstMessage
+ *
+ * Called from START to obtain the buffer address of the first message within
+ * pSentinel->pBuf. Will ACQUIRE data if the buffer is empty.
+ *
+ * Input:
+ * pSentinel
+ * puiPosition Offset within the debugFS file
+ *
+ * Output:
+ * pSentinel->pCurr Set to reference the first valid non-NULL message within
+ * the buffer. If no valid message is found set to NULL.
+ * pSentinel
+ * ->pBuf if unset on entry
+ * ->uiBufLen if pBuf unset on entry
+ *
+ * Side-effects:
+ * HTB TL stream will be updated to bypass any zero-length PAD messages before
+ * the first non-NULL message (if any).
+ */
+static void HTB_GetFirstMessage(HTB_Sentinel_t *, loff_t *);
+static void HTB_GetFirstMessage(HTB_Sentinel_t *pSentinel, loff_t *puiPosition)
+{
+ PVRSRV_ERROR eError;
+
+ if (pSentinel == NULL)
+ return;
+
+ if (pSentinel->pBuf == NULL)
+ {
+ /* Acquire data */
+ pSentinel->uiMsgLen = 0;
+
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'",
+ __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError)));
+
+ pSentinel->pBuf = NULL;
+ pSentinel->pCurr = NULL;
+ }
+ else
+ {
+ /*
+ * If there is no data available we set pSentinel->pCurr to NULL
+ * and return. This is expected behaviour if we've drained the
+ * data and nothing else has yet been produced.
+ */
+ if (pSentinel->uiBufLen == 0 || pSentinel->pBuf == NULL)
+ {
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Empty Buffer @ %p", __func__,
+ pSentinel->pBuf));
+#endif /* HTB_CHATTY */
+ pSentinel->pCurr = NULL;
+ return;
+ }
+ }
+ }
+
+ /* Locate next message within buffer. NULL => no more data to process */
+ pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+}
+
+/*
+ * _DebugHBTraceSeqStart:
+ *
+ * Returns the address to use for subsequent 'Show', 'Next', 'Stop' file ops.
+ * Return SEQ_START_TOKEN for the very first call and allocate a sentinel for
+ * use by the 'Show' routine and its helpers.
+ * This is stored in the psSeqFile->private hook field.
+ *
+ * We obtain access to the TLstream associated with the HTB. If this doesn't
+ * exist (because no pvrdebug capture trace has been set) we simply return with
+ * a NULL value which will stop the seq_file traversal.
+ */
+static void *_DebugHBTraceSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ HTB_Sentinel_t *pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiTLMode;
+ void *retVal;
+ IMG_HANDLE hStream;
+
+ /* Open the stream in non-blocking mode so that we can determine if there
+ * is no data to consume. Also disable the producer callback (if any) and
+ * the open callback so that we do not generate spurious trace data when
+ * accessing the stream.
+ */
+ uiTLMode = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING|
+ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK|
+ PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK;
+
+ /* If two or more processes try to read from this file at the same time
+ * the TLClientOpenStream() function will handle this by allowing only
+ * one of them to actually open the stream. The other process will get
+ * an error stating that the stream is already open. The open function
+ * is threads safe. */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE, HTB_STREAM_NAME, uiTLMode,
+ &hStream);
+
+ if (PVRSRV_ERROR_ALREADY_OPEN == eError)
+ {
+ /* Stream allows only one reader so return error if it's already
+ * opened. */
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Stream handle %p already exists for %s",
+ __func__, g_sHTBData.hStream, HTB_STREAM_NAME));
+#endif
+ return ERR_PTR(-EBUSY);
+ }
+ else if (PVRSRV_OK != eError)
+ {
+ /*
+ * No stream available so nothing to report
+ */
+ return NULL;
+ }
+
+ /* There is a window where hStream can be NULL but the stream is already
+ * opened. This shouldn't matter since the TLClientOpenStream() will make
+ * sure that only one stream can be opened and only one process can reach
+ * this place at a time. Also the .stop function will be always called
+ * after this function returns so there should be no risk of stream
+ * not being closed. */
+ PVR_ASSERT(g_sHTBData.hStream == NULL);
+ g_sHTBData.hStream = hStream;
+
+ /*
+ * Ensure we have our debug-specific data store allocated and hooked from
+ * our seq_file private data.
+ * If the allocation fails we can safely return NULL which will stop
+ * further calls from the seq_file routines (NULL return from START or NEXT
+ * means we have no (more) data to process)
+ */
+ if (pSentinel == NULL)
+ {
+ pSentinel = (HTB_Sentinel_t *)OSAllocZMem(sizeof (HTB_Sentinel_t));
+ psSeqFile->private = pSentinel;
+ }
+
+ /*
+ * Find the first message location within pSentinel->pBuf
+ * => for SEQ_START_TOKEN we must issue our first ACQUIRE, also for the
+ * subsequent re-START calls (if any).
+ */
+
+ HTB_GetFirstMessage(pSentinel, puiPosition);
+
+ if (*puiPosition == 0)
+ {
+ retVal = SEQ_START_TOKEN;
+ }
+ else
+ {
+ if (pSentinel == NULL)
+ {
+ retVal = NULL;
+ }
+ else
+ {
+ retVal = (void *)pSentinel->pCurr;
+ }
+ }
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p, Stream %s @ %p", __func__,
+ retVal, HTB_STREAM_NAME, g_sHTBData.hStream));
+#endif /* HTB_CHATTY */
+
+ return retVal;
+
+}
+
+/*
+ * _DebugTBTraceSeqStop:
+ *
+ * Stop processing data collection and release any previously allocated private
+ * data structure if we have exhausted the previously filled data buffers.
+ */
+static void _DebugHBTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ HTB_Sentinel_t *pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+ IMG_UINT32 uiMsgLen;
+
+ if (NULL == pSentinel)
+ return;
+
+ uiMsgLen = pSentinel->uiMsgLen;
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: MsgLen = %d", __func__, uiMsgLen));
+#endif /* HTB_CHATTY */
+
+ /* If we get here the handle should never be NULL because
+ * _DebugHBTraceSeqStart() shouldn't allow that. */
+ if (g_sHTBData.hStream != NULL)
+ {
+ PVRSRV_ERROR eError;
+
+ if (uiMsgLen != 0)
+ {
+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, uiMsgLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED - %s, nBytes %u",
+ __func__, "TLClientReleaseDataLess",
+ PVRSRVGETERRORSTRING(eError), uiMsgLen));
+ }
+ }
+
+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()",
+ "TLClientCloseStream", PVRSRVGETERRORSTRING(eError),
+ __func__));
+ }
+ g_sHTBData.hStream = NULL;
+ }
+
+ if (pSentinel != NULL)
+ {
+ psSeqFile->private = NULL;
+ OSFreeMem(pSentinel);
+ }
+}
+
+
+/*
+ * _DebugHBTraceSeqNext:
+ *
+ * This is where we release any acquired data which has been processed by the
+ * SeqShow routine. If we have encountered a seq_file overflow we stop
+ * processing and return NULL. Otherwise we release the message that we
+ * previously processed and simply update our position pointer to the next
+ * valid HTB message (if any)
+ */
+static void *_DebugHBTraceSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ loff_t curPos;
+ HTB_Sentinel_t *pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (puiPosition)
+ {
+ curPos = *puiPosition;
+ *puiPosition = curPos+1;
+ }
+
+ /*
+ * Determine if we've had an overflow on the previous 'Show' call. If so
+ * we leave the previously acquired data in the queue (by releasing 0 bytes)
+ * and return NULL to end this seq_read() iteration.
+ * If we have not overflowed we simply get the next HTB message and use that
+ * for our display purposes
+ */
+
+ if (seq_has_overflowed(psSeqFile))
+ {
+ (void)TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream, 0);
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: OVERFLOW - returning NULL", __func__));
+#endif /* HTB_CHATTY */
+
+ return (void *)NULL;
+ }
+ else
+ {
+ eError = TLClientReleaseDataLess(DIRECT_BRIDGE_HANDLE, g_sHTBData.hStream,
+ pSentinel->uiMsgLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s' @ %p Length %d",
+ __func__, "TLClientReleaseDataLess",
+ PVRSRVGETERRORSTRING(eError), pSentinel->pCurr,
+ pSentinel->uiMsgLen));
+ PVR_DPF((PVR_DBG_WARNING, "%s: Buffer @ %p..%p", __func__,
+ pSentinel->pBuf,
+ (IMG_PBYTE)(pSentinel->pBuf+pSentinel->uiBufLen)));
+
+ }
+
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ g_sHTBData.hStream, &pSentinel->pBuf, &pSentinel->uiBufLen);
+
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: %s FAILED '%s'\nPrev message len %d",
+ __func__, "TLClientAcquireData", PVRSRVGETERRORSTRING(eError),
+ pSentinel->uiMsgLen));
+ pSentinel->pBuf = NULL;
+ }
+
+ pSentinel->uiMsgLen = 0; // We don't (yet) know the message size
+ }
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Returning %p Msglen %d",
+ __func__, pSentinel->pBuf, pSentinel->uiMsgLen));
+#endif /* HTB_CHATTY */
+
+ if (pSentinel->pBuf == NULL || pSentinel->uiBufLen == 0)
+ {
+ return NULL;
+ }
+
+ pSentinel->pCurr = HTB_GetNextMessage(pSentinel);
+
+ return pSentinel->pCurr;
+}
+
+static const struct seq_operations gsHTBReadOps = {
+ .start = _DebugHBTraceSeqStart,
+ .stop = _DebugHBTraceSeqStop,
+ .next = _DebugHBTraceSeqNext,
+ .show = _DebugHBTraceSeqShow,
+};
+
+
+/******************************************************************************
+ * HTB Dumping routines and definitions
+ ******************************************************************************/
+#define IS_VALID_FMT_STRING(FMT) (strchr(FMT, '%') != NULL)
+#define MAX_STRING_SIZE (128)
+
+typedef enum
+{
+ TRACEBUF_ARG_TYPE_INT,
+ TRACEBUF_ARG_TYPE_ERR,
+ TRACEBUF_ARG_TYPE_NONE
+} TRACEBUF_ARG_TYPE;
+
+/*
+ * Array of all Host Trace log IDs used to convert the tracebuf data
+ */
+typedef struct _HTB_TRACEBUF_LOG_ {
+ HTB_LOG_SFids eSFId;
+ IMG_CHAR *pszName;
+ IMG_CHAR *pszFmt;
+ IMG_UINT32 ui32ArgNum;
+} HTB_TRACEBUF_LOG;
+
+static const HTB_TRACEBUF_LOG aLogs[] = {
+#define X(a, b, c, d, e) {HTB_LOG_CREATESFID(a,b,e), #c, d, e},
+ HTB_LOG_SFIDLIST
+#undef X
+};
+
+static const IMG_CHAR *aGroups[] = {
+#define X(A,B) #B,
+ HTB_LOG_SFGROUPLIST
+#undef X
+};
+static const IMG_UINT32 uiMax_aGroups = ARRAY_SIZE(aGroups) - 1;
+
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(IMG_CHAR **, IMG_CHAR *);
+/*
+ * ExtractOneArgFmt
+ *
+ * Scan the input 'printf-like' string *ppszFmt and return the next
+ * value string to be displayed. If there is no '%' format field in the
+ * string we return 'TRACEBUF_ARG_TYPE_NONE' and leave the input string
+ * untouched.
+ *
+ * Input
+ * ppszFmt reference to format string to be decoded
+ * pszOneArgFmt single field format from *ppszFmt
+ *
+ * Returns
+ * TRACEBUF_ARG_TYPE_ERR unrecognised argument
+ * TRACEBUF_ARG_TYPE_INT variable is of numeric type
+ * TRACEBUF_ARG_TYPE_NONE no variable reference in *ppszFmt
+ *
+ * Side-effect
+ * *ppszFmt is updated to reference the next part of the format string
+ * to be scanned
+ */
+static TRACEBUF_ARG_TYPE ExtractOneArgFmt(
+ IMG_CHAR **ppszFmt,
+ IMG_CHAR *pszOneArgFmt)
+{
+ IMG_CHAR *pszFmt;
+ IMG_CHAR *psT;
+ IMG_UINT32 ui32Count = MAX_STRING_SIZE;
+ IMG_UINT32 ui32OneArgSize;
+ TRACEBUF_ARG_TYPE eRet = TRACEBUF_ARG_TYPE_ERR;
+
+ if (NULL == ppszFmt)
+ return TRACEBUF_ARG_TYPE_ERR;
+
+ pszFmt = *ppszFmt;
+ if (NULL == pszFmt)
+ return TRACEBUF_ARG_TYPE_ERR;
+
+ /*
+ * Find the first '%'
+ * NOTE: we can be passed a simple string to display which will have no
+ * parameters embedded within it. In this case we simply return
+ * TRACEBUF_ARG_TYPE_NONE and the string contents will be the full pszFmt
+ */
+ psT = strchr(pszFmt, '%');
+ if (psT == NULL)
+ {
+ return TRACEBUF_ARG_TYPE_NONE;
+ }
+
+ /* Find next conversion identifier after the initial '%' */
+ while ((*psT++) && (ui32Count-- > 0))
+ {
+ switch (*psT)
+ {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ {
+ eRet = TRACEBUF_ARG_TYPE_INT;
+ goto _found_arg;
+ }
+ case 's':
+ {
+ eRet = TRACEBUF_ARG_TYPE_ERR;
+ goto _found_arg;
+ }
+ }
+ }
+
+ if ((psT == NULL) || (ui32Count == 0)) return TRACEBUF_ARG_TYPE_ERR;
+
+_found_arg:
+ ui32OneArgSize = psT - pszFmt + 1;
+ OSCachedMemCopy(pszOneArgFmt, pszFmt, ui32OneArgSize);
+ pszOneArgFmt[ui32OneArgSize] = '\0';
+
+ *ppszFmt = psT + 1;
+
+ return eRet;
+}
+
+static IMG_UINT32 idToLogIdx(IMG_UINT32);
+static IMG_UINT32 idToLogIdx(IMG_UINT32 ui32CheckData)
+{
+ IMG_UINT32 i = 0;
+ for (i = 0; aLogs[i].eSFId != HTB_SF_LAST; i++)
+ {
+ if ( ui32CheckData == aLogs[i].eSFId )
+ return i;
+ }
+ /* Nothing found, return max value */
+ return HTB_SF_LAST;
+}
+
+/*
+ * DecodeHTB
+ *
+ * Decode the data buffer message located at pBuf. This should be a valid
+ * HTB message as we are provided with the start of the buffer. If empty there
+ * is no message to process. We update the uiMsgLen field with the size of the
+ * HTB message that we have processed so that it can be returned to the system
+ * on successful logging of the message to the output file.
+ *
+ * Input
+ * pSentinel reference to newly read data and pending completion data
+ * from a previous invocation [handle seq_file buffer overflow]
+ * -> pBuf reference to raw data that we are to parse
+ * -> uiBufLen total number of bytes of data available
+ * -> pCurr start of message to decode
+ *
+ * pvDumpDebugFile output file
+ * pfnDumpDebugPrintf output generating routine
+ *
+ * Output
+ * pSentinel
+ * -> uiMsgLen length of the decoded message which will be freed to
+ * the system on successful completion of the seq_file
+ * update via _DebugHBTraceSeqNext(),
+ * Return Value
+ * 0 successful decode
+ * -1 unsuccessful decode
+ */
+static int
+DecodeHTB(HTB_Sentinel_t *pSentinel,
+ void *pvDumpDebugFile, DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf)
+{
+ IMG_UINT32 ui32Data, ui32LogIdx, ui32ArgsCur;
+ IMG_CHAR *pszFmt = NULL;
+ IMG_CHAR aszOneArgFmt[MAX_STRING_SIZE];
+ IMG_BOOL bUnrecognizedErrorPrinted = IMG_FALSE;
+
+ IMG_UINT32 ui32DataSize;
+ IMG_UINT32 uiBufLen = pSentinel->uiBufLen;
+ size_t nPrinted;
+
+ IMG_PBYTE pNext, pLast, pStart, pData = NULL;
+ PVRSRVTL_PPACKETHDR ppHdr; /* Current packet header */
+ IMG_UINT32 uiHdrType; /* Packet header type */
+ IMG_UINT32 uiMsgSize; /* Message size of current packet (bytes) */
+ IMG_BOOL bPacketsDropped;
+
+ /* Convert from byte to uint32 size */
+ ui32DataSize = uiBufLen / sizeof (IMG_UINT32);
+
+ pLast = pSentinel->pBuf + pSentinel->uiBufLen;
+ pStart = pSentinel->pCurr;
+
+ pNext = pStart;
+ pSentinel->uiMsgLen = 0; // Reset count for this message
+ uiMsgSize = 0; // nothing processed so far
+ ui32LogIdx = HTB_SF_LAST; // Loop terminator condition
+
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: Buf @ %p..%p, Length = %d", __func__,
+ pStart, pLast, uiBufLen));
+#endif /* HTB_CHATTY */
+
+ /*
+ * We should have a DATA header with the necessary information following
+ */
+ ppHdr = GET_PACKET_HDR(pStart);
+
+ if (ppHdr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unexpected NULL packet in Host Trace buffer", __func__));
+ return -1;
+ }
+
+ uiHdrType = GET_PACKET_TYPE(ppHdr);
+ PVR_ASSERT(uiHdrType == PVRSRVTL_PACKETTYPE_DATA);
+
+ pNext = (IMG_PBYTE)GET_NEXT_PACKET_ADDR(ppHdr);
+
+ PVR_ASSERT(pNext != NULL);
+
+ uiMsgSize = (IMG_UINT32)((size_t)pNext - (size_t)ppHdr);
+
+ pSentinel->uiMsgLen += uiMsgSize;
+
+ pData = GET_PACKET_DATA_PTR(ppHdr);
+
+ if (pData == NULL || pData >= pLast)
+ {
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: pData = %p, pLast = %p Returning 0",
+ __func__, pData, pLast));
+#endif /* HTB_CHATTY */
+ return 0;
+ }
+
+ ui32Data = *(IMG_UINT32 *)pData;
+ ui32LogIdx = idToLogIdx(ui32Data);
+
+ /*
+ * Check if the unrecognized ID is valid and therefore, tracebuf
+ * needs updating.
+ */
+ if (HTB_SF_LAST == ui32LogIdx && HTB_LOG_VALIDID(ui32Data)
+ && IMG_FALSE == bUnrecognizedErrorPrinted)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Unrecognised LOG value '%x' GID %x Params %d ID %x @ '%p'",
+ __func__, ui32Data, HTB_SF_GID(ui32Data),
+ HTB_SF_PARAMNUM(ui32Data), ui32Data & 0xfff, pData));
+ bUnrecognizedErrorPrinted = IMG_FALSE;
+
+ return 0;
+ }
+
+ /* The string format we are going to display */
+ /*
+ * The display will show the header (log-ID, group-ID, number of params)
+ * The maximum parameter list length = 15 (only 4bits used to encode)
+ * so we need HEADER + 15 * sizeof (UINT32) and the displayed string
+ * describing the event. We use a buffer in the per-process pSentinel
+ * structure to hold the data.
+ */
+ pszFmt = aLogs[ui32LogIdx].pszFmt;
+
+ /* add the message payload size to the running count */
+ ui32ArgsCur = HTB_SF_PARAMNUM(ui32Data);
+
+ /* Determine if we've over-filled the buffer and had to drop packets */
+ bPacketsDropped = CHECK_PACKETS_DROPPED(ppHdr);
+ if (bPacketsDropped ||
+ (uiHdrType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED))
+ {
+ /* Flag this as it is useful to know ... */
+ nPrinted = OSSNPrintf(aszOneArgFmt, sizeof (aszOneArgFmt),
+"\n<========================== *** PACKETS DROPPED *** ======================>\n");
+
+ PVR_DUMPDEBUG_LOG(aszOneArgFmt);
+ }
+
+ {
+ IMG_UINT32 ui32Timestamp, ui32PID;
+ IMG_CHAR *szBuffer = pSentinel->szBuffer; // Buffer start
+ IMG_CHAR *pszBuffer = pSentinel->szBuffer; // Current place in buf
+ size_t uBufBytesAvailable = sizeof(pSentinel->szBuffer);
+ IMG_UINT32 *pui32Data = (IMG_UINT32 *)pData;
+ IMG_UINT32 ui_aGroupIdx;
+
+ // Get PID field from data stream
+ pui32Data++;
+ ui32PID = *pui32Data;
+ // Get Timestamp from data stream
+ pui32Data++;
+ ui32Timestamp = *pui32Data;
+ // Move to start of message contents data
+ pui32Data++;
+
+ /*
+ * We need to snprintf the data to a local in-kernel buffer
+ * and then PVR_DUMPDEBUG_LOG() that in one shot
+ */
+ ui_aGroupIdx = MIN(HTB_SF_GID(ui32Data), uiMax_aGroups);
+ nPrinted = OSSNPrintf(szBuffer, uBufBytesAvailable, "%10u:%5u-%s> ",
+ ui32Timestamp, ui32PID, aGroups[ui_aGroupIdx]);
+ if (nPrinted >= uBufBytesAvailable)
+ {
+ PVR_DUMPDEBUG_LOG("Buffer overrun - %ld printed, max space %ld\n",
+ (long) nPrinted, (long) uBufBytesAvailable);
+ }
+
+ /* Update where our next 'output' point in the buffer is */
+ pszBuffer += nPrinted;
+ uBufBytesAvailable -= nPrinted;
+
+ /*
+ * Print one argument at a time as this simplifies handling variable
+ * number of arguments. Special case handling for no arguments.
+ * This is the case for simple format strings such as
+ * HTB_SF_MAIN_KICK_UNCOUNTED.
+ */
+ if (ui32ArgsCur == 0)
+ {
+ if (pszFmt)
+ {
+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, pszFmt);
+ if (nPrinted >= uBufBytesAvailable)
+ {
+ PVR_DUMPDEBUG_LOG("Buffer overrun - %ld printed,"
+ " max space %ld\n", (long) nPrinted, (long) uBufBytesAvailable);
+ }
+ pszBuffer += nPrinted;
+ /* Don't update the uBufBytesAvailable as we have finished this
+ * message decode. pszBuffer - szBuffer is the total amount of
+ * data we have decoded.
+ */
+ }
+ }
+ else
+ {
+ while ( IS_VALID_FMT_STRING(pszFmt) && (uBufBytesAvailable > 0) )
+ {
+ IMG_UINT32 ui32TmpArg = *pui32Data;
+ TRACEBUF_ARG_TYPE eArgType;
+
+ eArgType = ExtractOneArgFmt(&pszFmt, aszOneArgFmt);
+
+ pui32Data++;
+ ui32ArgsCur--;
+
+ switch (eArgType)
+ {
+ case TRACEBUF_ARG_TYPE_INT:
+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+ aszOneArgFmt, ui32TmpArg);
+ break;
+
+ case TRACEBUF_ARG_TYPE_NONE:
+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+ pszFmt);
+ break;
+
+ default:
+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable,
+ "Error processing arguments, type not "
+ "recognized (fmt: %s)", aszOneArgFmt);
+ break;
+ }
+ if (nPrinted >= uBufBytesAvailable)
+ {
+ PVR_DUMPDEBUG_LOG("Buffer overrun - %ld printed,"
+ " max space %ld\n", (long) nPrinted,
+ (long) uBufBytesAvailable);
+ }
+ pszBuffer += nPrinted;
+ uBufBytesAvailable -= nPrinted;
+ }
+ /* Display any remaining text in pszFmt string */
+ if (pszFmt)
+ {
+ nPrinted = OSSNPrintf(pszBuffer, uBufBytesAvailable, pszFmt);
+ if (nPrinted >= uBufBytesAvailable)
+ {
+ PVR_DUMPDEBUG_LOG("Buffer overrun - %ld printed,"
+ " max space %ld\n", (long) nPrinted, (long) uBufBytesAvailable);
+ }
+ pszBuffer += nPrinted;
+ /* Don't update the uBufBytesAvailable as we have finished this
+ * message decode. pszBuffer - szBuffer is the total amount of
+ * data we have decoded.
+ */
+ }
+ }
+
+ PVR_DUMPDEBUG_LOG(szBuffer);
+
+ /* Update total bytes processed */
+ pSentinel->uiTotal += (pszBuffer - szBuffer);
+ }
+ return 0;
+}
+
+/*
+ * HTBDumpBuffer: Dump the Host Trace Buffer using the TLClient API
+ *
+ * This routine just parses *one* message from the buffer.
+ * The stream will be opened by the Start() routine, closed by the Stop() and
+ * updated for data consumed by this routine once we have DebugPrintf'd it.
+ * We use the new TLReleaseDataLess() routine which enables us to update the
+ * HTB contents with just the amount of data we have successfully processed.
+ * If we need to leave the data available we can call this with a 0 count.
+ * This will happen in the case of a buffer overflow so that we can reprocess
+ * any data which wasn't handled before.
+ *
+ * In case of overflow or an error we return -1 otherwise 0
+ *
+ * Input:
+ * pfnDumpDebugPrintf output routine to display data
+ * pvDumpDebugFile seq_file handle (from kernel seq_read() call)
+ * pvData data address to start dumping from
+ * (set by Start() / Next())
+ */
+static int HTBDumpBuffer(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ void *pvData)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ HTB_Sentinel_t *pSentinel = (HTB_Sentinel_t *)psSeqFile->private;
+
+ PVR_ASSERT(NULL != pvData);
+
+ if (pvData == SEQ_START_TOKEN)
+ {
+ if (pSentinel->pCurr == NULL)
+ {
+#ifdef HTB_CHATTY
+ PVR_DPF((PVR_DBG_WARNING, "%s: SEQ_START_TOKEN, Empty buffer",
+ __func__));
+#endif /* HTB_CHATTY */
+ return 0;
+ }
+ PVR_ASSERT(pSentinel->pCurr != NULL);
+
+ /* Display a Header as we have data to process */
+ seq_printf(psSeqFile, "%-10s:%-5s-%s %s\n",
+ "Timestamp", "Proc ID", "Group", "Log Entry");
+ }
+ else
+ {
+ if (pvData != NULL)
+ {
+ PVR_ASSERT(pSentinel->pCurr == pvData);
+ }
+ }
+
+ return DecodeHTB(pSentinel, pvDumpDebugFile, pfnDumpDebugPrintf);
+}
+
+
+/******************************************************************************
+ * External Entry Point routines ...
+ ******************************************************************************/
+/**************************************************************************/ /*!
+ @Function HTB_CreateFSEntry
+
+ @Description Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns eError internal error code, PVRSRV_OK on success
+
+ */ /**************************************************************************/
+PVRSRV_ERROR HTB_CreateFSEntry(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRDebugFSCreateEntry("host_trace", NULL,
+ &gsHTBReadOps,
+ NULL,
+ NULL, NULL, NULL,
+ &g_sHTBData.psDumpHostDebugFSEntry);
+
+ PVR_LOGR_IF_ERROR(eError, "PVRDebugFSCreateEntry");
+
+ return eError;
+}
+
+
+/**************************************************************************/ /*!
+ @Function HTB_DestroyFSEntry
+
+ @Description Destroy the debugFS entry-point created by earlier
+ HTB_CreateFSEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyFSEntry(void)
+{
+ if (g_sHTBData.psDumpHostDebugFSEntry)
+ {
+ PVRDebugFSRemoveEntry(&g_sHTBData.psDumpHostDebugFSEntry);
+ g_sHTBData.psDumpHostDebugFSEntry = NULL;
+ }
+}
+
+/* EOF */
diff --git a/drivers/gpu/drm/img-rogue/1.10/htb_debug.h b/drivers/gpu/drm/img-rogue/1.10/htb_debug.h
new file mode 100644
index 00000000000000..ce6835bd20cf27
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htb_debug.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File htb_debug.h
+@Title Linux debugFS routine setup header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _HTB_DEBUG_H_
+#define _HTB_DEBUG_H_
+
+/**************************************************************************/ /*!
+ @Function HTB_CreateFSEntry
+
+ @Description Create the debugFS entry-point for the host-trace-buffer
+
+ @Returns eError internal error code, PVRSRV_OK on success
+
+ */ /**************************************************************************/
+PVRSRV_ERROR HTB_CreateFSEntry(void);
+
+/**************************************************************************/ /*!
+ @Function HTB_DestroyFSEntry
+
+ @Description Destroy the debugFS entry-point created by earlier
+ HTB_CreateFSEntry() call.
+*/ /**************************************************************************/
+void HTB_DestroyFSEntry(void);
+
+#endif /* _HTB_DEBUG_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbserver.c b/drivers/gpu/drm/img-rogue/1.10/htbserver.c
new file mode 100644
index 00000000000000..6fce3012c7bde0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbserver.c
@@ -0,0 +1,753 @@
+/*************************************************************************/ /*!
+@File htbserver.c
+@Title Host Trace Buffer server implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "htbserver.h"
+#include "htbuffer.h"
+#include "htbuffer_types.h"
+#include "tlstream.h"
+#include "pvrsrv_tlcommon.h"
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+
+/* size of circular buffer controlling the maximum number of concurrent PIDs logged */
+#define HTB_MAX_NUM_PID 8
+
+/* number of times to try rewriting a log entry */
+#define HTB_LOG_RETRY_COUNT 5
+
+/*************************************************************************/ /*!
+ Host Trace Buffer control information structure
+*/ /**************************************************************************/
+typedef struct
+{
+ IMG_UINT32 ui32BufferSize; /*!< Requested buffer size in bytes
+ Once set this may not be changed */
+
+ HTB_OPMODE_CTRL eOpMode; /*!< Control what trace data is dropped if
+ the buffer is full.
+ Once set this may not be changed */
+
+/* IMG_UINT32 ui32GroupEnable; */ /*!< Flags word controlling groups to be
+ logged */
+
+ IMG_UINT32 ui32LogLevel; /*!< Log level to control messages logged */
+
+ IMG_UINT32 aui32EnablePID[HTB_MAX_NUM_PID]; /*!< PIDs to enable logging for
+ a specific set of processes */
+
+ IMG_UINT32 ui32PIDCount; /*!< Current number of PIDs being logged */
+
+ IMG_UINT32 ui32PIDHead; /*!< Head of the PID circular buffer */
+
+ HTB_LOGMODE_CTRL eLogMode; /*!< Logging mode control */
+
+ IMG_BOOL bLogDropSignalled; /*!< Flag indicating if a log message has
+ been signalled as dropped */
+
+ /* synchronisation parameters */
+ IMG_UINT64 ui64SyncOSTS;
+ IMG_UINT64 ui64SyncCRTS;
+ IMG_UINT32 ui32SyncCalcClkSpd;
+ IMG_UINT32 ui32SyncMarker;
+
+ IMG_BOOL bInitDone; /* Set by HTBInit, reset by HTBDeInit */
+} HTB_CTRL_INFO;
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static const IMG_UINT32 MapFlags[] =
+{
+ 0, /* HTB_OPMODE_UNDEF = 0 */
+ TL_OPMODE_DROP_NEWER, /* HTB_OPMODE_DROPLATEST */
+ TL_OPMODE_DROP_OLDEST,/* HTB_OPMODE_DROPOLDEST */
+ TL_OPMODE_BLOCK /* HTB_OPMODE_BLOCK */
+};
+
+static_assert(0 == HTB_OPMODE_UNDEF, "Unexpected value for HTB_OPMODE_UNDEF");
+static_assert(1 == HTB_OPMODE_DROPLATEST, "Unexpected value for HTB_OPMODE_DROPLATEST");
+static_assert(2 == HTB_OPMODE_DROPOLDEST, "Unexpected value for HTB_OPMODE_DROPOLDEST");
+static_assert(3 == HTB_OPMODE_BLOCK, "Unexpected value for HTB_OPMODE_BLOCK");
+
+static_assert(1 == TL_OPMODE_DROP_NEWER, "Unexpected value for TL_OPMODE_DROP_NEWER");
+static_assert(2 == TL_OPMODE_DROP_OLDEST, "Unexpected value for TL_OPMODE_DROP_OLDEST");
+static_assert(3 == TL_OPMODE_BLOCK, "Unexpected value for TL_OPMODE_BLOCK");
+
+static const IMG_UINT32 g_ui32TLBaseFlags; //TL_FLAG_NO_SIGNAL_ON_COMMIT
+
+/* Minimum TL buffer size,
+ * large enough for around 60 worst case messages or 200 average messages
+ */
+#define HTB_TL_BUFFER_SIZE_MIN (0x10000)
+
+
+static HTB_CTRL_INFO g_sCtrl;
+static IMG_BOOL g_bConfigured = IMG_FALSE;
+static IMG_HANDLE g_hTLStream;
+
+
+/************************************************************************/ /*!
+ @Function _LookupFlags
+ @Description Convert HTBuffer Operation mode to TLStream flags
+
+ @Input eModeHTBuffer Operation Mode
+
+ @Return IMG_UINT32 TLStream FLags
+*/ /**************************************************************************/
+static IMG_UINT32
+_LookupFlags( HTB_OPMODE_CTRL eMode )
+{
+ return (eMode < ARRAY_SIZE(MapFlags)) ? MapFlags[eMode] : 0;
+}
+
+
+/************************************************************************/ /*!
+ @Function _HTBLogDebugInfo
+ @Description Debug dump handler used to dump the state of the HTB module.
+ Called for each verbosity level during a debug dump. Function
+ only prints state when called for High verbosity.
+
+ @Input hDebugRequestHandle See PFN_DBGREQ_NOTIFY
+
+ @Input ui32VerbLevel See PFN_DBGREQ_NOTIFY
+
+ @Input pfnDumpDebugPrintf See PFN_DBGREQ_NOTIFY
+
+ @Input pvDumpDebugFile See PFN_DBGREQ_NOTIFY
+
+*/ /**************************************************************************/
+static void _HTBLogDebugInfo(
+ PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile
+)
+{
+ PVR_UNREFERENCED_PARAMETER(hDebugRequestHandle);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+
+ if (g_bConfigured)
+ {
+ IMG_INT i;
+
+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: On ]------");
+
+ PVR_DUMPDEBUG_LOG("HTB Log mode: %d", g_sCtrl.eLogMode);
+ PVR_DUMPDEBUG_LOG("HTB Log level: %d", g_sCtrl.ui32LogLevel);
+ PVR_DUMPDEBUG_LOG("HTB Buffer Opmode: %d", g_sCtrl.eOpMode);
+
+ for (i=0; i < HTB_FLAG_NUM_EL; i++)
+ {
+ PVR_DUMPDEBUG_LOG("HTB Log group %d: %x", i, g_auiHTBGroupEnable[i]);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("------[ HTB Log state: Off ]------");
+ }
+ }
+}
+
+/************************************************************************/ /*!
+ @Function HTBDeviceCreate
+ @Description Initialisation actions for HTB at device creation.
+
+ @Input psDeviceNode Reference to the device node in context
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hHtbDbgReqNotify,
+ psDeviceNode, &_HTBLogDebugInfo, DEBUG_REQUEST_HTB, NULL);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ return eError;
+}
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceDestroy
+ @Description De-initialisation actions for HTB at device destruction.
+
+ @Input psDeviceNode Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+)
+{
+ if (psDeviceNode->hHtbDbgReqNotify)
+ {
+ /* No much we can do if it fails, driver unloading */
+ (void)PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hHtbDbgReqNotify);
+ psDeviceNode->hHtbDbgReqNotify = NULL;
+ }
+}
+
+static IMG_UINT32 g_ui32HTBufferSize = HTB_TL_BUFFER_SIZE_MIN;
+
+/*
+ * AppHint access routine forward definitions
+ */
+static PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+ IMG_UINT32);
+static PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *, const void *,
+ IMG_UINT32 *);
+
+static PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+ IMG_UINT32);
+static PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *, const void *,
+ IMG_UINT32 *);
+
+static void _OnTLReaderOpenCallback(void *);
+
+/************************************************************************/ /*!
+ @Function HTBInit
+ @Description Allocate and initialise the Host Trace Buffer
+ The buffer size may be changed by specifying
+ HTBufferSizeInKB=xxxx
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit(void)
+{
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault;
+ IMG_UINT32 ui32BufBytes;
+
+ if (g_sCtrl.bInitDone)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "HTBInit: Driver already initialised"));
+ return PVRSRV_ERROR_ALREADY_EXISTS;
+ }
+
+ /*
+ * Buffer Size can be configured by specifying a value in the AppHint
+ * This will only take effect at module load time so there is no query
+ * or setting mechanism available.
+ */
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBufferSizeInKB,
+ NULL,
+ NULL,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ NULL);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableHTBLogGroup,
+ _HTBReadLogGroup,
+ _HTBSetLogGroup,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HTBOperationMode,
+ _HTBReadOpMode,
+ _HTBSetOpMode,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ NULL);
+
+ /*
+ * Now get whatever values have been configured for our AppHints
+ */
+ OSCreateKMAppHintState(&pvAppHintState);
+ ui32AppHintDefault = HTB_TL_BUFFER_SIZE_MIN / 1024;
+ OSGetKMAppHintUINT32(pvAppHintState, HTBufferSizeInKB,
+ &ui32AppHintDefault, &g_ui32HTBufferSize);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ ui32BufBytes = g_ui32HTBufferSize * 1024;
+
+ /* initialise rest of state */
+ g_sCtrl.ui32BufferSize =
+ (ui32BufBytes < HTB_TL_BUFFER_SIZE_MIN)
+ ? HTB_TL_BUFFER_SIZE_MIN
+ : ui32BufBytes;
+ g_sCtrl.eOpMode = HTB_OPMODE_DROPOLDEST;
+ g_sCtrl.ui32LogLevel = 0;
+ g_sCtrl.ui32PIDCount = 0;
+ g_sCtrl.ui32PIDHead = 0;
+ g_sCtrl.eLogMode = HTB_LOGMODE_ALLPID;
+ g_sCtrl.bLogDropSignalled = IMG_FALSE;
+
+ g_sCtrl.bInitDone = IMG_TRUE;
+
+ /* Log the current driver parameter setting for the HTBufferSizeInKB.
+ * We do this here as there is no other infrastructure for obtaining
+ * the value.
+ */
+ if (g_ui32HTBufferSize != ui32AppHintDefault)
+ {
+ PVR_LOG(("Increasing HTBufferSize to %uKB", g_ui32HTBufferSize));
+ }
+
+ return PVRSRV_OK;
+}
+
+/************************************************************************/ /*!
+ @Function HTBDeInit
+ @Description Close the Host Trace Buffer and free all resources. Must
+ perform a no-op if already de-initialised.
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void )
+{
+ if (!g_sCtrl.bInitDone)
+ return PVRSRV_OK;
+
+ if (g_hTLStream)
+ {
+ TLStreamClose( g_hTLStream );
+ g_hTLStream = NULL;
+ }
+
+ g_sCtrl.bInitDone = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+
+/*************************************************************************/ /*!
+ AppHint interface functions
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR _HTBSetLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return HTBControlKM(1, &ui32Value, 0, 0,
+ HTB_LOGMODE_UNDEF, HTB_OPMODE_UNDEF);
+}
+
+static
+PVRSRV_ERROR _HTBReadLogGroup(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ *pui32Value = g_auiHTBGroupEnable[0];
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _HTBSetOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return HTBControlKM(0, NULL, 0, 0, HTB_LOGMODE_UNDEF, ui32Value);
+}
+
+static
+PVRSRV_ERROR _HTBReadOpMode(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ *pui32Value = (IMG_UINT32)g_sCtrl.eOpMode;
+ return PVRSRV_OK;
+}
+
+
+static void
+_OnTLReaderOpenCallback( void *pvArg )
+{
+ if ( g_hTLStream )
+ {
+ IMG_UINT32 ui32Time = OSClockus();
+ (void) HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)),
+ ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)),
+ ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ }
+
+ PVR_UNREFERENCED_PARAMETER(pvArg);
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBControlKM
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control the behaviour of the data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+ const IMG_UINT32 ui32NumFlagGroups,
+ const IMG_UINT32 * aui32GroupEnable,
+ const IMG_UINT32 ui32LogLevel,
+ const IMG_UINT32 ui32EnablePID,
+ const HTB_LOGMODE_CTRL eLogMode,
+ const HTB_OPMODE_CTRL eOpMode
+)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Time = OSClockus();
+
+ if ( !g_bConfigured && ui32NumFlagGroups )
+ {
+ eError = TLStreamCreate(
+ &g_hTLStream,
+ PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+ HTB_STREAM_NAME,
+ g_sCtrl.ui32BufferSize,
+ _LookupFlags(HTB_OPMODE_DROPOLDEST) | g_ui32TLBaseFlags,
+ _OnTLReaderOpenCallback, NULL, NULL, NULL);
+ PVR_LOGR_IF_ERROR(eError, "TLStreamCreate");
+ g_bConfigured = IMG_TRUE;
+ }
+
+ if (HTB_OPMODE_UNDEF != eOpMode && g_sCtrl.eOpMode != eOpMode)
+ {
+ g_sCtrl.eOpMode = eOpMode;
+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+ {
+ OSReleaseThreadQuanta();
+ eError = TLStreamReconfigure(g_hTLStream, _LookupFlags(g_sCtrl.eOpMode | g_ui32TLBaseFlags));
+ }
+ PVR_LOGR_IF_ERROR(eError, "TLStreamReconfigure");
+ }
+
+ if ( ui32EnablePID )
+ {
+ g_sCtrl.aui32EnablePID[g_sCtrl.ui32PIDHead] = ui32EnablePID;
+ g_sCtrl.ui32PIDHead++;
+ g_sCtrl.ui32PIDHead %= HTB_MAX_NUM_PID;
+ g_sCtrl.ui32PIDCount++;
+ if ( g_sCtrl.ui32PIDCount > HTB_MAX_NUM_PID )
+ {
+ g_sCtrl.ui32PIDCount = HTB_MAX_NUM_PID;
+ }
+ }
+
+ /* HTB_LOGMODE_ALLPID overrides ui32EnablePID */
+ if ( HTB_LOGMODE_ALLPID == eLogMode )
+ {
+ OSCachedMemSet(g_sCtrl.aui32EnablePID, 0, sizeof(g_sCtrl.aui32EnablePID));
+ g_sCtrl.ui32PIDCount = 0;
+ g_sCtrl.ui32PIDHead = 0;
+ }
+ if ( HTB_LOGMODE_UNDEF != eLogMode )
+ {
+ g_sCtrl.eLogMode = eLogMode;
+ }
+
+ if ( ui32NumFlagGroups )
+ {
+ for (i = 0; i < HTB_FLAG_NUM_EL && i < ui32NumFlagGroups; i++)
+ {
+ g_auiHTBGroupEnable[i] = aui32GroupEnable[i];
+ }
+ for (; i < HTB_FLAG_NUM_EL; i++)
+ {
+ g_auiHTBGroupEnable[i] = 0;
+ }
+ }
+
+ if ( ui32LogLevel )
+ {
+ g_sCtrl.ui32LogLevel = ui32LogLevel;
+ }
+
+ /* Dump the current configuration state */
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_OPMODE, g_sCtrl.eOpMode);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_ENABLE_GROUP, g_auiHTBGroupEnable[0]);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_LOG_LEVEL, g_sCtrl.ui32LogLevel);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_LOGMODE, g_sCtrl.eLogMode);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+ {
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_ENABLE_PID, g_sCtrl.aui32EnablePID[i]);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ }
+
+ if (0 != g_sCtrl.ui32SyncMarker && 0 != g_sCtrl.ui32SyncCalcClkSpd)
+ {
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK_RPT,
+ g_sCtrl.ui32SyncMarker);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE_RPT,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ PVR_LOG_IF_ERROR(eError, "HTBLog");
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static IMG_BOOL
+_ValidPID( IMG_UINT32 PID )
+{
+ IMG_UINT32 i;
+
+ for (i = 0; i < g_sCtrl.ui32PIDCount; i++)
+ {
+ if ( g_sCtrl.aui32EnablePID[i] == PID )
+ {
+ return IMG_TRUE;
+ }
+ }
+ return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncPartitionMarker
+ @Description Write an HTB sync partition marker to the HTB log
+
+ @Input ui33Marker Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+ const IMG_UINT32 ui32Marker
+)
+{
+ g_sCtrl.ui32SyncMarker = ui32Marker;
+ if ( g_hTLStream )
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Time = OSClockus();
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_MARK, ui32Marker);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ if (0 != g_sCtrl.ui32SyncCalcClkSpd)
+ {
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((g_sCtrl.ui64SyncOSTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncOSTS&0xffffffff)),
+ ((IMG_UINT32)((g_sCtrl.ui64SyncCRTS>>32)&0xffffffff)), ((IMG_UINT32)(g_sCtrl.ui64SyncCRTS&0xffffffff)),
+ g_sCtrl.ui32SyncCalcClkSpd);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ }
+ }
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncScale
+ @Description Write FW-Host synchronisation data to the HTB log when clocks
+ change or are re-calibrated
+
+ @Input bLogValues IMG_TRUE if value should be immediately written
+ out to the log
+
+ @Input ui32OSTS OS Timestamp
+
+ @Input ui32CRTS Rogue timestamp
+
+ @Input ui32CalcClkSpd Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+ const IMG_BOOL bLogValues,
+ const IMG_UINT64 ui64OSTS,
+ const IMG_UINT64 ui64CRTS,
+ const IMG_UINT32 ui32CalcClkSpd
+)
+{
+ g_sCtrl.ui64SyncOSTS = ui64OSTS;
+ g_sCtrl.ui64SyncCRTS = ui64CRTS;
+ g_sCtrl.ui32SyncCalcClkSpd = ui32CalcClkSpd;
+ if (g_hTLStream && bLogValues)
+ {
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Time = OSClockus();
+ eError = HTBLog((IMG_HANDLE) NULL, 0, ui32Time, HTB_SF_CTRL_FWSYNC_SCALE,
+ ((IMG_UINT32)((ui64OSTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64OSTS&0xffffffff)),
+ ((IMG_UINT32)((ui64CRTS>>32)&0xffffffff)), ((IMG_UINT32)(ui64CRTS&0xffffffff)),
+ ui32CalcClkSpd);
+ /*
+ * Don't spam the log with non-failure cases
+ */
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "HTBLog",
+ PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ }
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogKM
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStamp The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+ IMG_UINT32 PID,
+ IMG_UINT32 ui32TimeStamp,
+ HTB_LOG_SFids SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 * aui32Args
+)
+{
+ /* format of messages is: SF:PID:TIME:[PARn]*
+ * 32-bit timestamp (us) gives about 1h before looping
+ * Buffer allocated on the stack so don't need a semaphore to guard it
+ */
+ IMG_UINT32 aui32MessageBuffer[HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS];
+
+ /* Min HTB size is HTB_TL_BUFFER_SIZE_MIN : 10000 bytes and Max message/
+ * packet size is 4*(HTB_LOG_HEADER_SIZE+HTB_LOG_MAX_PARAMS) = 72 bytes,
+ * hence with these constraints this design is unlikely to get
+ * PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED error*/
+
+ PVRSRV_ERROR eError = PVRSRV_ERROR_NOT_ENABLED;
+ IMG_UINT32 ui32RetryCount = HTB_LOG_RETRY_COUNT;
+ IMG_UINT32 * pui32Message = aui32MessageBuffer;
+ IMG_UINT32 ui32MessageSize = 4 * (HTB_LOG_HEADER_SIZE+ui32NumArgs);
+
+ if ( g_hTLStream
+ && ( 0 == PID || ~0 == PID || HTB_LOGMODE_ALLPID == g_sCtrl.eLogMode || _ValidPID(PID) )
+/* && ( g_sCtrl.ui32GroupEnable & (0x1 << HTB_SF_GID(SF)) ) */
+/* && ( g_sCtrl.ui32LogLevel >= HTB_SF_LVL(SF) ) */
+ )
+ {
+ *pui32Message++ = SF;
+ *pui32Message++ = PID;
+ *pui32Message++ = ui32TimeStamp;
+ while ( ui32NumArgs )
+ {
+ ui32NumArgs--;
+ pui32Message[ui32NumArgs] = aui32Args[ui32NumArgs];
+ }
+
+ eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+ while ( PVRSRV_ERROR_NOT_READY == eError && ui32RetryCount-- )
+ {
+ OSReleaseThreadQuanta();
+ eError = TLStreamWrite( g_hTLStream, (IMG_UINT8*)aui32MessageBuffer, ui32MessageSize );
+ }
+
+ if ( PVRSRV_OK == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_FALSE;
+ }
+ else if ( PVRSRV_ERROR_STREAM_FULL != eError || !g_sCtrl.bLogDropSignalled )
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", "TLStreamWrite", PVRSRVGETERRORSTRING(eError), __func__));
+ }
+ if ( PVRSRV_ERROR_STREAM_FULL == eError )
+ {
+ g_sCtrl.bLogDropSignalled = IMG_TRUE;
+ }
+ }
+
+ return eError;
+}
+
+/* EOF */
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbserver.h b/drivers/gpu/drm/img-rogue/1.10/htbserver.h
new file mode 100644
index 00000000000000..3bf1feaea643c6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbserver.h
@@ -0,0 +1,247 @@
+/*************************************************************************/ /*!
+@File htbserver.h
+@Title Host Trace Buffer server implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+
+ A Host Trace can be merged with a corresponding Firmware Trace.
+ This is achieved by inserting synchronisation data into both
+ traces and post processing to merge them.
+
+ The FW Trace will contain a "Sync Partition Marker". This is
+ updated every time the RGX is brought out of reset (RGX clock
+ timestamps reset at this point) and is repeated when the FW
+ Trace buffer wraps to ensure there is always at least 1
+ partition marker in the Firmware Trace buffer whenever it is
+ read.
+
+ The Host Trace will contain corresponding "Sync Partition
+ Markers" - #HTBSyncPartitionMarker(). Each partition is then
+ subdivided into "Sync Scale" sections - #HTBSyncScale(). The
+ "Sync Scale" data allows the timestamps from the two traces to
+ be correlated. The "Sync Scale" data is updated as part of the
+ standard RGX time correlation code (rgxtimecorr.c) and is
+ updated periodically including on power and clock changes.
+
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __HTBSERVER_H__
+#define __HTBSERVER_H__
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceCreate
+ @Description Initialisation actions for HTB at device creation.
+
+ @Input psDeviceNode Reference to the device node in context
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeviceCreate(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function HTBIDeviceDestroy
+ @Description De-initialisation actions for HTB at device destruction.
+
+ @Input psDeviceNode Reference to the device node in context
+
+*/ /**************************************************************************/
+void
+HTBDeviceDestroy(
+ PVRSRV_DEVICE_NODE *psDeviceNode
+);
+
+
+/************************************************************************/ /*!
+ @Function HTBInit
+ @Description Initialise the Host Trace Buffer and allocate all resources
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBInit( void );
+
+/************************************************************************/ /*!
+ @Function HTBDeInit
+ @Description Close the Host Trace Buffer and free all resources
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBDeInit( void );
+
+
+/*************************************************************************/ /*!
+ @Function HTBConfigureKM
+ @Description Configure or update the configuration of the Host Trace Buffer
+
+ @Input ui32NameSize Size of the pszName string
+
+ @Input pszName Name to use for the underlying data buffer
+
+ @Input ui32BufferSize Size of the underlying data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBConfigureKM(
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR * pszName,
+ const IMG_UINT32 ui32BufferSize
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBControlKM
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control the behaviour of the data buffer
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBControlKM(
+ const IMG_UINT32 ui32NumFlagGroups,
+ const IMG_UINT32 * aui32GroupEnable,
+ const IMG_UINT32 ui32LogLevel,
+ const IMG_UINT32 ui32EnablePID,
+ const HTB_LOGMODE_CTRL eLogMode,
+ const HTB_OPMODE_CTRL eOpMode
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncPartitionMarker
+ @Description Write an HTB sync partition marker to the HTB log
+
+ @Input ui33Marker Marker value
+
+*/ /**************************************************************************/
+void
+HTBSyncPartitionMarker(
+ const IMG_UINT32 ui32Marker
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBSyncScale
+ @Description Write FW-Host synchronisation data to the HTB log when clocks
+ change or are re-calibrated
+
+ @Input bLogValues IMG_TRUE if value should be immediately written
+ out to the log
+
+ @Input ui32OSTS OS Timestamp
+
+ @Input ui32CRTS Rogue timestamp
+
+ @Input ui32CalcClkSpd Calculated clock speed
+
+*/ /**************************************************************************/
+void
+HTBSyncScale(
+ const IMG_BOOL bLogValues,
+ const IMG_UINT64 ui64OSTS,
+ const IMG_UINT64 ui64CRTS,
+ const IMG_UINT32 ui32CalcClkSpd
+);
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogKM
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStamp The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+PVRSRV_ERROR
+HTBLogKM(
+ IMG_UINT32 PID,
+ IMG_UINT32 ui32TimeStamp,
+ HTB_LOG_SFids SF,
+ IMG_UINT32 ui32NumArgs,
+ IMG_UINT32 * aui32Args
+);
+
+
+#endif /* __HTBSERVER_H__ */
+
+/* EOF */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbuffer.c b/drivers/gpu/drm/img-rogue/1.10/htbuffer.c
new file mode 100644
index 00000000000000..1d3d14fccc7794
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbuffer.c
@@ -0,0 +1,195 @@
+/*************************************************************************/ /*!
+@File htbuffer.c
+@Title Host Trace Buffer shared API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+#include "htbuffer.h"
+//#include "allocmem.h"
+#include "osfunc.h"
+#include "client_htbuffer_bridge.h"
+#if defined(__KERNEL__)
+//#include "osfunc.h"
+#endif
+
+/* the group flags array of ints large enough to store all the group flags
+ * NB: This will only work while all logging is in the kernel
+ */
+IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL] = {0};
+
+/*************************************************************************/ /*!
+ @Function HTBControl
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input hSrvHandle Server Handle
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogPidMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control what trace data is dropped if the TL
+ buffer is full
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+ IMG_HANDLE hSrvHandle,
+ IMG_UINT32 ui32NumFlagGroups,
+ IMG_UINT32 * aui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ HTB_LOGMODE_CTRL eLogPidMode,
+ HTB_OPMODE_CTRL eOpMode
+)
+{
+ return BridgeHTBControl(
+ hSrvHandle,
+ ui32NumFlagGroups,
+ aui32GroupEnable,
+ ui32LogLevel,
+ ui32EnablePID,
+ eLogPidMode,
+ eOpMode
+ );
+}
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, HTB_LOG_SFids SF, va_list args)
+{
+#if defined(__KERNEL__)
+ IMG_UINT32 i;
+ IMG_UINT32 ui32NumArgs = HTB_SF_PARAMNUM(SF);
+ IMG_UINT32 aui32Args[HTB_LOG_MAX_PARAMS];
+
+ PVR_ASSERT(ui32NumArgs <= HTB_LOG_MAX_PARAMS);
+ ui32NumArgs = (ui32NumArgs>HTB_LOG_MAX_PARAMS)? HTB_LOG_MAX_PARAMS: ui32NumArgs;
+
+ /* unpack var args before sending over bridge */
+ for (i=0; i<ui32NumArgs; i++)
+ {
+ aui32Args[i] = va_arg(args, IMG_UINT32);
+ }
+
+ return BridgeHTBLog(hSrvHandle, PID, ui32TimeStampus, SF, ui32NumArgs, aui32Args);
+#else
+ PVR_UNREFERENCED_PARAMETER(hSrvHandle);
+ PVR_UNREFERENCED_PARAMETER(PID);
+ PVR_UNREFERENCED_PARAMETER(ui32TimeStampus);
+ PVR_UNREFERENCED_PARAMETER(SF);
+ PVR_UNREFERENCED_PARAMETER(args);
+
+ PVR_ASSERT(0=="HTB Logging in UM is not yet supported");
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLog
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input ui32TimeStampus The timestamp to be associated with this log event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...)
+{
+ PVRSRV_ERROR eError;
+ va_list args;
+ va_start(args, SF);
+ eError =_HTBLog(hSrvHandle, PID, ui32TimeStampus, SF, args);
+ va_end(args);
+ return eError;
+}
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...)
+{
+ PVRSRV_ERROR eError;
+ va_list args;
+ va_start(args, SF);
+ eError = _HTBLog(hSrvHandle, OSGetCurrentProcessID(), OSClockus(), SF, args);
+ va_end(args);
+ return eError;
+}
+
+
+/* EOF */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbuffer.h b/drivers/gpu/drm/img-rogue/1.10/htbuffer.h
new file mode 100644
index 00000000000000..0c6ca6caa943ec
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbuffer.h
@@ -0,0 +1,133 @@
+/*************************************************************************/ /*!
+@File htbuffer.h
+@Title Host Trace Buffer shared API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_H__
+#define __HTBUFFER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "htbuffer_sf.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#if defined(__KERNEL__)
+#define HTBLOGK(SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple((IMG_HANDLE) NULL, SF, ## args); } while (0)
+
+/* Host Trace Buffer name */
+#define HTB_STREAM_NAME "PVRHTBuffer"
+
+#else
+#define HTBLOG(handle, SF, args...) do { if (HTB_GROUP_ENABLED(SF)) HTBLogSimple(handle, SF, ## args); } while (0)
+#endif
+
+/* macros to cast 64 or 32-bit pointers into 32-bit integer components for Host Trace */
+#define HTBLOG_PTR_BITS_HIGH(p) ((IMG_UINT32)((((IMG_UINT64)((uintptr_t)p))>>32)&0xffffffff))
+#define HTBLOG_PTR_BITS_LOW(p) ((IMG_UINT32)(((IMG_UINT64)((uintptr_t)p))&0xffffffff))
+
+/* macros to cast 64-bit integers into 32-bit integer components for Host Trace */
+#define HTBLOG_U64_BITS_HIGH(u) ((IMG_UINT32)((u>>32)&0xffffffff))
+#define HTBLOG_U64_BITS_LOW(u) ((IMG_UINT32)(u&0xffffffff))
+
+/*************************************************************************/ /*!
+ @Function HTBLog
+ @Description Record a Host Trace Buffer log event
+
+ @Input PID The PID of the process the event is associated
+ with. This is provided as an argument rather
+ than querying internally so that events associated
+ with a particular process, but performed by
+ another can be logged correctly.
+
+ @Input TimeStampus The timestamp in us for this event
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLog(IMG_HANDLE hSrvHandle, IMG_UINT32 PID, IMG_UINT32 ui32TimeStampus, IMG_UINT32 SF, ...);
+
+
+/*************************************************************************/ /*!
+ @Function HTBLogSimple
+ @Description Record a Host Trace Buffer log event with implicit PID and Timestamp
+
+ @Input SF The log event ID
+
+ @Input ... Log parameters
+
+ @Return PVRSRV_OK Success.
+
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBLogSimple(IMG_HANDLE hSrvHandle, IMG_UINT32 SF, ...);
+
+
+
+/* DEBUG log group enable */
+#if !defined(HTB_DEBUG_LOG_GROUP)
+#undef HTB_LOG_TYPE_DBG /* No trace statements in this log group should be checked in */
+#define HTB_LOG_TYPE_DBG __BUILDERROR__
+#endif
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_H__ */
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbuffer_init.h b/drivers/gpu/drm/img-rogue/1.10/htbuffer_init.h
new file mode 100644
index 00000000000000..8e782ff24d22d0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbuffer_init.h
@@ -0,0 +1,115 @@
+/*************************************************************************/ /*!
+@File htbuffer_init.h
+@Title Host Trace Buffer functions needed for Services initialisation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_INIT_H__
+#define __HTBUFFER_INIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_types.h"
+
+/*************************************************************************/ /*!
+ @Function HTBConfigure
+ @Description Configure the Host Trace Buffer.
+ Once these parameters are set they may not be changed
+
+ @Input hSrvHandle Server Handle
+
+ @Input pszBufferName Name to use for the TL buffer, this will be
+ required to request trace data from the TL
+
+ @Input ui32BufferSize Requested TL buffer size in bytes
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBConfigure(
+ IMG_HANDLE hSrvHandle,
+ IMG_CHAR * pszBufferName,
+ IMG_UINT32 ui32BufferSize
+);
+
+/*************************************************************************/ /*!
+ @Function HTBControl
+ @Description Update the configuration of the Host Trace Buffer
+
+ @Input hSrvHandle Server Handle
+
+ @Input ui32NumFlagGroups Number of group enable flags words
+
+ @Input aui32GroupEnable Flags words controlling groups to be logged
+
+ @Input ui32LogLevel Log level to record
+
+ @Input ui32EnablePID PID to enable logging for a specific process
+
+ @Input eLogMode Enable logging for all or specific processes,
+
+ @Input eOpMode Control what trace data is dropped if the TL
+ buffer is full
+
+ @Return eError Internal services call returned eError error
+ number
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+HTBControl(
+ IMG_HANDLE hSrvHandle,
+ IMG_UINT32 ui32NumFlagGroups,
+ IMG_UINT32 * aui32GroupEnable,
+ IMG_UINT32 ui32LogLevel,
+ IMG_UINT32 ui32EnablePID,
+ HTB_LOGMODE_CTRL eLogMode,
+ HTB_OPMODE_CTRL eOpMode
+);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_INIT_H__ */
+/*****************************************************************************
+ End of file (htbuffer_init.h)
+*****************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbuffer_sf.h b/drivers/gpu/drm/img-rogue/1.10/htbuffer_sf.h
new file mode 100644
index 00000000000000..40f9eb720d938b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbuffer_sf.h
@@ -0,0 +1,220 @@
+/*************************************************************************/ /*!
+@File htbuffer_sf.h
+@Title Host Trace Buffer interface string format specifiers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the Host Trace Buffer logging messages. The following
+ list are the messages the host driver prints. Changing anything
+ but the first column or spelling mistakes in the strings will
+ break compatibility with log files created with older/newer
+ driver versions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_SF_H__
+#define __HTBUFFER_SF_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ * WILL BREAK host tracing message compatibility with previous
+ * driver versions. Only add new ones, if so required.
+ ****************************************************************************/
+
+
+/* String used in pvrdebug -h output */
+#define HTB_LOG_GROUPS_STRING_LIST "ctrl,mmu,sync,main,brg"
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define HTB_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s"
+
+/* Available log groups - Master template
+ *
+ * Group usage is as follows:
+ * CTRL - Internal Host Trace information and synchronisation data
+ * MMU - MMU page mapping information
+ * SYNC - Synchronisation debug
+ * MAIN - Data master kicks, etc. tying in with the MAIN group in FWTrace
+ * DBG - Temporary debugging group, logs not to be left in the driver
+ *
+ */
+#define HTB_LOG_SFGROUPLIST \
+ X( HTB_GROUP_NONE, NONE ) \
+/* gid, group flag / apphint name */ \
+ X( HTB_GROUP_CTRL, CTRL ) \
+ X( HTB_GROUP_MMU, MMU ) \
+ X( HTB_GROUP_SYNC, SYNC ) \
+ X( HTB_GROUP_MAIN, MAIN ) \
+ X( HTB_GROUP_BRG, BRG ) \
+/* Debug group HTB_GROUP_DBG must always be last */ \
+ X( HTB_GROUP_DBG, DBG )
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id : unique id within a group
+ * gid : group id as defined above
+ * sym name : symbolic name of enumerations used to identify message strings
+ * string : Actual string
+ * #args : number of arguments the string format requires
+ */
+#define HTB_LOG_SFIDLIST \
+/*id, gid, sym name, string, # arguments */ \
+X( 0, HTB_GROUP_NONE, HTB_SF_FIRST, "You should not use this string", 0) \
+\
+X( 1, HTB_GROUP_CTRL, HTB_SF_CTRL_LOGMODE, "HTB log mode set to %d (1- all PID, 2 - restricted PID)\n", 1) \
+X( 2, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_PID, "HTB enable logging for PID %d\n", 1) \
+X( 3, HTB_GROUP_CTRL, HTB_SF_CTRL_ENABLE_GROUP, "HTB enable logging groups 0x%08x\n", 1) \
+X( 4, HTB_GROUP_CTRL, HTB_SF_CTRL_LOG_LEVEL, "HTB log level set to %d\n", 1) \
+X( 5, HTB_GROUP_CTRL, HTB_SF_CTRL_OPMODE, "HTB operating mode set to %d (1 - droplatest, 2 - drop oldest, 3 - block)\n", 1) \
+X( 6, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE, "HTBFWSync OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 7, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_SCALE_RPT, "FW Sync scale info OSTS=%08x%08x CRTS=%08x%08x CalcClkSpd=%d\n", 5) \
+X( 8, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK, "FW Sync Partition marker: %d\n", 1) \
+X( 9, HTB_GROUP_CTRL, HTB_SF_CTRL_FWSYNC_MARK_RPT, "FW Sync Partition repeat: %d\n", 1) \
+\
+X( 1, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_TABLE, "MMU page op table entry page_id=%08x%08x index=%d level=%d val=%08x%08x map=%d\n", 7) \
+X( 2, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_ALLOC, "MMU allocating DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 3, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_FREE, "MMU freeing DevVAddr from %08x%08x to %08x%08x\n", 4) \
+X( 4, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_MAP, "MMU mapping DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 5, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_PMRMAP, "MMU mapping PMR DevVAddr %08x%08x to DevPAddr %08x%08x\n", 4) \
+X( 6, HTB_GROUP_MMU, HTB_SF_MMU_PAGE_OP_UNMAP, "MMU unmapping DevVAddr %08x%08x\n", 2) \
+\
+X( 1, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_ALLOC, "Server sync allocation [%08X]\n", 1) \
+X( 2, HTB_GROUP_SYNC, HTB_SF_SYNC_SERVER_UNREF, "Server sync unreferenced [%08X]\n", 1) \
+X( 3, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_CREATE, "Sync OP create 0x%08x, block count=%d, server syncs=%d, client syncs=%d\n", 4) \
+X( 4, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_TAKE, "Sync OP take 0x%08x server syncs=%d, client syncs=%d\n", 3) \
+X( 5, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_COMPLETE, "Sync OP complete 0x%08x\n", 1) \
+X( 6, HTB_GROUP_SYNC, HTB_SF_SYNC_PRIM_OP_DESTROY, "Sync OP destroy 0x%08x\n", 1) \
+\
+X( 1, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_TA, "Kick TA: FWCtx %08X @ %d\n", 2) \
+X( 2, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_3D, "Kick 3D: FWCtx %08X @ %d\n", 2) \
+X( 3, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_CDM, "Kick CDM: FWCtx %08X @ %d\n", 2) \
+X( 4, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx %08X @ %d\n", 2) \
+X( 5, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx %08X @ %d\n", 2) \
+X( 6, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_2D, "Kick 2D: FWCtx %08X @ %d\n", 2) \
+X( 7, HTB_GROUP_MAIN, HTB_SF_MAIN_KICK_UNCOUNTED, "Kick (uncounted) for all DMs\n", 0) \
+X( 8, HTB_GROUP_MAIN, HTB_SF_MAIN_FWCCB_CMD, "FW CCB Cmd: %d\n", 1) \
+X( 9, HTB_GROUP_MAIN, HTB_SF_MAIN_PRE_POWER, "Pre-power duration @ phase [%d] (0-shutdown,1-startup) RGX: %llu ns SYS: %llu ns\n", 3) \
+X(10, HTB_GROUP_MAIN, HTB_SF_MAIN_POST_POWER, "Post-power duration @ phase [%d] (0-shutdown,1-startup) SYS: %llu ns RGX: %llu ns\n", 3) \
+\
+X( 1, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL, "Bridge call: start: %010u: bid %03d fid %d\n", 3) \
+X( 2, HTB_GROUP_BRG, HTB_SF_BRG_BRIDGE_CALL_ERR, "Bridge call: start: %010u: bid %03d fid %d error %d\n", 4) \
+\
+X( 1, HTB_GROUP_DBG, HTB_SF_DBG_INTPAIR, "0x%8.8x 0x%8.8x\n", 2) \
+\
+X( 65535, HTB_GROUP_NONE, HTB_SF_LAST, "You should not use this string\n", 15)
+
+
+
+/* gid - Group numbers */
+typedef enum _HTB_LOG_SFGROUPS {
+#define X(A,B) A,
+ HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_SFGROUPS;
+
+
+/* group flags are stored in an array of elements */
+/* each of which have a certain number of bits */
+#define HTB_FLAG_EL_T IMG_UINT32
+#define HTB_FLAG_NUM_BITS_IN_EL ( sizeof(HTB_FLAG_EL_T) * 8 )
+
+#define HTB_LOG_GROUP_FLAG_GROUP(gid) ( (gid-1) / HTB_FLAG_NUM_BITS_IN_EL )
+#define HTB_LOG_GROUP_FLAG(gid) (gid? (0x1 << ((gid-1)%HTB_FLAG_NUM_BITS_IN_EL)): 0)
+#define HTB_LOG_GROUP_FLAG_NAME(gid) HTB_LOG_TYPE_ ## gid
+
+/* group enable flags */
+typedef enum _HTB_LOG_TYPE {
+#define X(a, b) HTB_LOG_GROUP_FLAG_NAME(b) = HTB_LOG_GROUP_FLAG(a),
+ HTB_LOG_SFGROUPLIST
+#undef X
+} HTB_LOG_TYPE;
+
+
+
+/* The symbolic names found in the table above are assigned an ui32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define HTB_LOG_IDMARKER (0x70000000)
+#define HTB_LOG_CREATESFID(a,b,e) ((a) | (b<<12) | (e<<16)) | HTB_LOG_IDMARKER
+
+#define HTB_LOG_IDMASK (0xFFF00000)
+#define HTB_LOG_VALIDID(I) ( ((I) & HTB_LOG_IDMASK) == HTB_LOG_IDMARKER )
+
+typedef enum HTB_LOG_SFids {
+#define X(a, b, c, d, e) c = HTB_LOG_CREATESFID(a,b,e),
+ HTB_LOG_SFIDLIST
+#undef X
+} HTB_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define HTB_SF_GID(x) (((x)>>12) & 0xf)
+/* future improvement to support log levels */
+#define HTB_SF_LVL(x) (0)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define HTB_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+/* format of messages is: SF:PID:TIME:[PARn]*
+ */
+#define HTB_LOG_HEADER_SIZE 3
+#define HTB_LOG_MAX_PARAMS 15
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_SF_H__ */
+/*****************************************************************************
+ End of file (htbuffer_sf.h)
+*****************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/htbuffer_types.h b/drivers/gpu/drm/img-rogue/1.10/htbuffer_types.h
new file mode 100644
index 00000000000000..c4f19b3e040b44
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/htbuffer_types.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File htbuffer_types.h
+@Title Host Trace Buffer types.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Host Trace Buffer provides a mechanism to log Host events to a
+ buffer in a similar way to the Firmware Trace mechanism.
+ Host Trace Buffer logs data using a Transport Layer buffer.
+ The Transport Layer and pvrtld tool provides the mechanism to
+ retrieve the trace data.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __HTBUFFER_TYPES_H__
+#define __HTBUFFER_TYPES_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "htbuffer_sf.h"
+
+/* the group flags array of ints large enough to store all the group flags */
+#define HTB_FLAG_NUM_EL ( ((HTB_GROUP_DBG-1) / HTB_FLAG_NUM_BITS_IN_EL) + 1 )
+extern IMG_INTERNAL HTB_FLAG_EL_T g_auiHTBGroupEnable[HTB_FLAG_NUM_EL];
+
+#define HTB_GROUP_ENABLED(SF) (g_auiHTBGroupEnable[HTB_LOG_GROUP_FLAG_GROUP(HTB_SF_GID(SF))] & HTB_LOG_GROUP_FLAG(HTB_SF_GID(SF)))
+
+/*************************************************************************/ /*!
+ Host Trace Buffer operation mode
+ Care must be taken if changing this enum to ensure the MapFlags[] array
+ in htbserver.c is kept in-step.
+*/ /**************************************************************************/
+typedef enum
+{
+ /*! Undefined operation mode */
+ HTB_OPMODE_UNDEF = 0,
+
+ /*! Drop latest, intended for continuous logging to a UM daemon.
+ * If the daemon does not keep up, the most recent log data
+ * will be dropped
+ */
+ HTB_OPMODE_DROPLATEST,
+
+ /*! Drop oldest, intended for crash logging.
+ * Data will be continuously written to a circular buffer.
+ * After a crash the buffer will contain events leading up to the crash
+ */
+ HTB_OPMODE_DROPOLDEST,
+
+ /*! Block write if buffer is full
+ */
+ HTB_OPMODE_BLOCK,
+
+ HTB_OPMODE_LAST = HTB_OPMODE_BLOCK
+} HTB_OPMODE_CTRL;
+
+
+/*************************************************************************/ /*!
+ Host Trace Buffer log mode control
+*/ /**************************************************************************/
+typedef enum
+{
+ /*! Undefined log mode, used if update is not applied */
+ HTB_LOGMODE_UNDEF = 0,
+
+ /*! Log trace messages for all PIDs.
+ */
+ HTB_LOGMODE_ALLPID,
+
+ /*! Log trace messages for specific PIDs only.
+ */
+ HTB_LOGMODE_RESTRICTEDPID,
+
+ HTB_LOGMODE_LAST = HTB_LOGMODE_RESTRICTEDPID
+} HTB_LOGMODE_CTRL;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __HTBUFFER_TYPES_H__ */
+
+/*****************************************************************************
+ End of file (htbuffer.h)
+*****************************************************************************/
+
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/img_3dtypes.h b/drivers/gpu/drm/img-rogue/1.10/img_3dtypes.h
new file mode 100644
index 00000000000000..639aabf78f44b0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/img_3dtypes.h
@@ -0,0 +1,247 @@
+/*************************************************************************/ /*!
+@File
+@Title Global 3D types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines 3D types for use by IMG APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_3DTYPES_H__
+#define __IMG_3DTYPES_H__
+
+#include <powervr/buffer_attribs.h>
+#include "img_types.h"
+
+/**
+ * Comparison functions
+ * This comparison function is defined as:
+ * A {CmpFunc} B
+ * A is a reference value, e.g., incoming depth etc.
+ * B is the sample value, e.g., value in depth buffer.
+ */
+typedef enum _IMG_COMPFUNC_
+{
+ IMG_COMPFUNC_NEVER, /**< The comparison never succeeds */
+ IMG_COMPFUNC_LESS, /**< The comparison is a less-than operation */
+ IMG_COMPFUNC_EQUAL, /**< The comparison is an equal-to operation */
+ IMG_COMPFUNC_LESS_EQUAL, /**< The comparison is a less-than or equal-to
+ operation */
+ IMG_COMPFUNC_GREATER, /**< The comparison is a greater-than operation
+ */
+ IMG_COMPFUNC_NOT_EQUAL, /**< The comparison is a no-equal-to operation
+ */
+ IMG_COMPFUNC_GREATER_EQUAL, /**< The comparison is a greater-than or
+ equal-to operation */
+ IMG_COMPFUNC_ALWAYS, /**< The comparison always succeeds */
+} IMG_COMPFUNC;
+
+/**
+ * Stencil op functions
+ */
+typedef enum _IMG_STENCILOP_
+{
+ IMG_STENCILOP_KEEP, /**< Keep original value */
+ IMG_STENCILOP_ZERO, /**< Set stencil to 0 */
+ IMG_STENCILOP_REPLACE, /**< Replace stencil entry */
+ IMG_STENCILOP_INCR_SAT, /**< Increment stencil entry, clamping to max */
+ IMG_STENCILOP_DECR_SAT, /**< Decrement stencil entry, clamping to zero */
+ IMG_STENCILOP_INVERT, /**< Invert bits in stencil entry */
+ IMG_STENCILOP_INCR, /**< Increment stencil entry,
+ wrapping if necessary */
+ IMG_STENCILOP_DECR, /**< Decrement stencil entry,
+ wrapping if necessary */
+} IMG_STENCILOP;
+
+/**
+ * Alpha blending allows colours and textures on one surface
+ * to be blended with transparency onto another surface.
+ * These definitions apply to both source and destination blending
+ * states
+ */
+typedef enum _IMG_BLEND_
+{
+ IMG_BLEND_ZERO = 0, /**< Blend factor is (0,0,0,0) */
+ IMG_BLEND_ONE, /**< Blend factor is (1,1,1,1) */
+ IMG_BLEND_SRC_COLOUR, /**< Blend factor is the source colour */
+ IMG_BLEND_INV_SRC_COLOUR, /**< Blend factor is the inverted source colour
+ (i.e. 1-src_col) */
+ IMG_BLEND_SRC_ALPHA, /**< Blend factor is the source alpha */
+ IMG_BLEND_INV_SRC_ALPHA, /**< Blend factor is the inverted source alpha
+ (i.e. 1-src_alpha) */
+ IMG_BLEND_DEST_ALPHA, /**< Blend factor is the destination alpha */
+ IMG_BLEND_INV_DEST_ALPHA, /**< Blend factor is the inverted destination
+ alpha */
+ IMG_BLEND_DEST_COLOUR, /**< Blend factor is the destination colour */
+ IMG_BLEND_INV_DEST_COLOUR, /**< Blend factor is the inverted destination
+ colour */
+ IMG_BLEND_SRC_ALPHASAT, /**< Blend factor is the alpha saturation (the
+ minimum of (Src alpha,
+ 1 - destination alpha)) */
+ IMG_BLEND_BLEND_FACTOR, /**< Blend factor is a constant */
+ IMG_BLEND_INVBLEND_FACTOR, /**< Blend factor is a constant (inverted)*/
+ IMG_BLEND_SRC1_COLOUR, /**< Blend factor is the colour outputted from
+ the pixel shader */
+ IMG_BLEND_INV_SRC1_COLOUR, /**< Blend factor is the inverted colour
+ outputted from the pixel shader */
+ IMG_BLEND_SRC1_ALPHA, /**< Blend factor is the alpha outputted from
+ the pixel shader */
+ IMG_BLEND_INV_SRC1_ALPHA /**< Blend factor is the inverted alpha
+ outputted from the pixel shader */
+} IMG_BLEND;
+
+/**
+ * The arithmetic operation to perform when blending
+ */
+typedef enum _IMG_BLENDOP_
+{
+ IMG_BLENDOP_ADD = 0, /**< Result = (Source + Destination) */
+ IMG_BLENDOP_SUBTRACT, /**< Result = (Source - Destination) */
+ IMG_BLENDOP_REV_SUBTRACT, /**< Result = (Destination - Source) */
+ IMG_BLENDOP_MIN, /**< Result = min (Source, Destination) */
+ IMG_BLENDOP_MAX /**< Result = max (Source, Destination) */
+} IMG_BLENDOP;
+
+/**
+ * Logical operation to perform when logic ops are enabled
+ */
+typedef enum _IMG_LOGICOP_
+{
+ IMG_LOGICOP_CLEAR = 0, /**< Result = 0 */
+ IMG_LOGICOP_SET, /**< Result = -1 */
+ IMG_LOGICOP_COPY, /**< Result = Source */
+ IMG_LOGICOP_COPY_INVERTED, /**< Result = ~Source */
+ IMG_LOGICOP_NOOP, /**< Result = Destination */
+ IMG_LOGICOP_INVERT, /**< Result = ~Destination */
+ IMG_LOGICOP_AND, /**< Result = Source & Destination */
+ IMG_LOGICOP_NAND, /**< Result = ~(Source & Destination) */
+ IMG_LOGICOP_OR, /**< Result = Source | Destination */
+ IMG_LOGICOP_NOR, /**< Result = ~(Source | Destination) */
+ IMG_LOGICOP_XOR, /**< Result = Source ^ Destination */
+ IMG_LOGICOP_EQUIV, /**< Result = ~(Source ^ Destination) */
+ IMG_LOGICOP_AND_REVERSE, /**< Result = Source & ~Destination */
+ IMG_LOGICOP_AND_INVERTED, /**< Result = ~Source & Destination */
+ IMG_LOGICOP_OR_REVERSE, /**< Result = Source | ~Destination */
+ IMG_LOGICOP_OR_INVERTED /**< Result = ~Source | Destination */
+} IMG_LOGICOP;
+
+/**
+ * Type of fog blending supported
+ */
+typedef enum _IMG_FOGMODE_
+{
+ IMG_FOGMODE_NONE, /**< No fog blending - fog calculations are
+ * based on the value output from the vertex phase */
+ IMG_FOGMODE_LINEAR, /**< Linear interpolation */
+ IMG_FOGMODE_EXP, /**< Exponential */
+ IMG_FOGMODE_EXP2, /**< Exponential squaring */
+} IMG_FOGMODE;
+
+/**
+ * Types of filtering
+ */
+typedef enum _IMG_FILTER_
+{
+ IMG_FILTER_DONTCARE, /**< Any filtering mode is acceptable */
+ IMG_FILTER_POINT, /**< Point filtering */
+ IMG_FILTER_LINEAR, /**< Bi-linear filtering */
+ IMG_FILTER_BICUBIC, /**< Bi-cubic filtering */
+} IMG_FILTER;
+
+/**
+ * Addressing modes for textures
+ */
+typedef enum _IMG_ADDRESSMODE_
+{
+ IMG_ADDRESSMODE_REPEAT, /**< Texture repeats continuously */
+ IMG_ADDRESSMODE_FLIP, /**< Texture flips on odd integer part */
+ IMG_ADDRESSMODE_CLAMP, /**< Texture clamped at 0 or 1 */
+ IMG_ADDRESSMODE_FLIPCLAMP, /**< Flipped once, then clamp */
+ IMG_ADDRESSMODE_CLAMPBORDER,
+ IMG_ADDRESSMODE_OGL_CLAMP,
+ IMG_ADDRESSMODE_OVG_TILEFILL,
+ IMG_ADDRESSMODE_DONTCARE,
+} IMG_ADDRESSMODE;
+
+/**
+ * Culling based on winding order of triangle.
+ */
+typedef enum _IMG_CULLMODE_
+{
+ IMG_CULLMODE_NONE, /**< Don't cull */
+ IMG_CULLMODE_FRONTFACING, /**< Front facing triangles */
+ IMG_CULLMODE_BACKFACING, /**< Back facing triangles */
+} IMG_CULLMODE;
+
+/**
+ Colour for clearing surfaces.
+ The four elements of the 4 x 32 bit array will map to colour
+ R,G,B,A components, in order.
+ For YUV colour space the order is Y,U,V.
+ For Depth and Stencil formats D maps to R and S maps to G.
+*/
+typedef union _IMG_CLEAR_COLOUR_ {
+ IMG_UINT32 aui32[4];
+ IMG_INT32 ai32[4];
+ IMG_FLOAT af32[4];
+} IMG_CLEAR_COLOUR;
+
+static_assert(sizeof(IMG_FLOAT) == sizeof(IMG_INT32), "Size of IMG_FLOAT is not 32 bits.");
+
+/*! ************************************************************************//**
+@brief Specifies the MSAA resolve operation.
+*/ /**************************************************************************/
+typedef enum _IMG_RESOLVE_OP_
+{
+ IMG_RESOLVE_BLEND = 0, /*!< box filter on the samples */
+ IMG_RESOLVE_MIN = 1, /*!< minimum of the samples */
+ IMG_RESOLVE_MAX = 2, /*!< maximum of the samples */
+ IMG_RESOLVE_SAMPLE0 = 3, /*!< choose sample 0 */
+ IMG_RESOLVE_SAMPLE1 = 4, /*!< choose sample 1 */
+ IMG_RESOLVE_SAMPLE2 = 5, /*!< choose sample 2 */
+ IMG_RESOLVE_SAMPLE3 = 6, /*!< choose sample 3 */
+ IMG_RESOLVE_SAMPLE4 = 7, /*!< choose sample 4 */
+ IMG_RESOLVE_SAMPLE5 = 8, /*!< choose sample 5 */
+ IMG_RESOLVE_SAMPLE6 = 9, /*!< choose sample 6 */
+ IMG_RESOLVE_SAMPLE7 = 10, /*!< choose sample 7 */
+} IMG_RESOLVE_OP;
+
+
+#endif /* __IMG_3DTYPES_H__ */
+/******************************************************************************
+ End of file (img_3dtypes.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/img_defs.h b/drivers/gpu/drm/img-rogue/1.10/img_defs.h
new file mode 100644
index 00000000000000..68f6aada0a42e8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/img_defs.h
@@ -0,0 +1,452 @@
+/*************************************************************************/ /*!
+@File
+@Title Common header containing type definitions for portability
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Contains variable and structure definitions. Any platform
+ specific types should be defined in this file.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__IMG_DEFS_H__)
+#define __IMG_DEFS_H__
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/types.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_types.h"
+
+#if defined (NO_INLINE_FUNCS)
+ #define INLINE
+ #define FORCE_INLINE
+#elif defined(INTEGRITY_OS)
+ #ifndef INLINE
+ #define INLINE
+ #endif
+ #define FORCE_INLINE static
+ #define INLINE_IS_PRAGMA
+#else
+#if defined (__cplusplus)
+ #define INLINE inline
+ #define FORCE_INLINE static inline
+#else
+#if !defined(INLINE)
+ #define INLINE __inline
+#endif
+#if (defined(UNDER_WDDM) || defined(WINDOWS_WDF)) && defined(_X86_)
+ #define FORCE_INLINE __forceinline
+#else
+ #define FORCE_INLINE static __inline
+#endif
+#endif
+#endif
+
+/* True if the GCC version is at least the given version. False for older
+ * versions of GCC, or other compilers.
+ */
+#define GCC_VERSION_AT_LEAST(major, minor) \
+ (__GNUC__ > (major) || \
+ (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+
+/* Ensure Clang's __has_extension macro is defined for all compilers so we
+ * can use it safely in preprocessor conditionals.
+ */
+#if !defined(__has_extension)
+#define __has_extension(e) 0
+#endif
+
+/* Use this in any file, or use attributes under GCC - see below */
+#ifndef PVR_UNREFERENCED_PARAMETER
+#define PVR_UNREFERENCED_PARAMETER(param) ((void)(param))
+#endif
+
+/* static_assert(condition, "message to print if it fails");
+ *
+ * Assert something at compile time. If the assertion fails, try to print
+ * the message, otherwise do nothing. static_assert is available if:
+ *
+ * - It's already defined as a macro (e.g. by <assert.h> in C11)
+ * - We're using MSVC which exposes static_assert unconditionally
+ * - We're using a C++ compiler that supports C++11
+ * - We're using GCC 4.6 and up in C mode (in which case it's available as
+ * _Static_assert)
+ *
+ * In all other cases, fall back to an equivalent that makes an invalid
+ * declaration.
+ */
+#if !defined(static_assert) && !defined(_MSC_VER) && \
+ (!defined(__cplusplus) || __cplusplus < 201103L)
+ /* static_assert isn't already available */
+ #if !defined(__cplusplus) && (GCC_VERSION_AT_LEAST(4, 6) || \
+ (defined(__clang__) && __has_extension(c_static_assert)))
+ #define static_assert _Static_assert
+ #else
+ #define static_assert(expr, message) \
+ extern int _static_assert_failed[2*!!(expr) - 1] __attribute__((unused))
+ #endif
+#else
+#if defined(CONFIG_L4)
+ /* Defined but not compatible with DDK usage
+ so undefine & ignore */
+ #undef static_assert
+ #define static_assert(expr, message)
+#endif
+#endif
+
+/*! Macro to calculate the n-byte aligned value from that supplied rounding up.
+ * n must be a power of two.
+ *
+ * Both arguments should be of a type with the same size otherwise the macro may
+ * cut off digits, e.g. imagine a 64 bit address in _x and a 32 bit value in _n.
+ */
+#define PVR_ALIGN(_x, _n) (((_x)+((_n)-1)) & ~((_n)-1))
+
+#if defined(_WIN32)
+
+#if defined(WINDOWS_WDF)
+
+ /*
+ * For WINDOWS_WDF drivers we don't want these defines to overwrite calling conventions propagated through the build system.
+ * This 'empty' choice helps to resolve all the calling conv issues.
+ *
+ */
+ #define IMG_CALLCONV
+ #define C_CALLCONV
+
+ #define IMG_INTERNAL
+ #define IMG_RESTRICT __restrict
+
+ /*
+ * The proper way of dll linking under MS compilers is made of two things:
+ * - decorate implementation with __declspec(dllexport)
+ * this decoration helps compiler with making the so called 'export library'
+ * - decorate forward-declaration (in a source dependent on a dll) with __declspec(dllimport),
+ * this decoration helps compiler with making faster and smaller code in terms of calling dll-imported functions
+ *
+ * Usually these decorations are performed by having a single macro define that expands to a proper __declspec()
+ * depending on the translation unit, dllexport inside the dll source and dllimport outside the dll source.
+ * Having IMG_EXPORT and IMG_IMPORT resolving to the same __declspec() makes no sense, but at least works.
+ */
+ #define IMG_IMPORT __declspec(dllexport)
+ #define IMG_EXPORT __declspec(dllexport)
+
+#else
+
+ #define IMG_CALLCONV __stdcall
+ #define IMG_INTERNAL
+ #define IMG_EXPORT __declspec(dllexport)
+ #define IMG_RESTRICT __restrict
+ #define C_CALLCONV __cdecl
+
+ /*
+ * IMG_IMPORT is defined as IMG_EXPORT so that headers and implementations match.
+ * Some compilers require the header to be declared IMPORT, while the implementation is declared EXPORT
+ */
+ #define IMG_IMPORT IMG_EXPORT
+
+#endif
+
+#if defined(UNDER_WDDM)
+ #ifndef _INC_STDLIB
+ #if defined(__mips)
+ /* do nothing */
+ #elif defined(UNDER_MSBUILD)
+ _CRTIMP __declspec(noreturn) void __cdecl abort(void);
+ #else
+ _CRTIMP void __cdecl abort(void);
+ #endif
+ #endif
+#endif /* UNDER_WDDM */
+#else
+ #if (defined(LINUX) || defined(__QNXNTO__)) && defined(__KERNEL__)
+ #define IMG_INTERNAL
+ #define IMG_EXPORT
+ #define IMG_CALLCONV
+ #elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__)
+ #define IMG_CALLCONV
+ #define C_CALLCONV
+
+ #if defined(__METAG)
+ #define IMG_INTERNAL
+ #else
+ #define IMG_INTERNAL __attribute__((visibility("hidden")))
+ #endif
+
+ #define IMG_EXPORT __attribute__((visibility("default")))
+ #define IMG_RESTRICT __restrict__
+ #elif defined(INTEGRITY_OS)
+ #define IMG_CALLCONV
+ #define IMG_INTERNAL
+ #define IMG_EXPORT
+ #define IMG_RESTRICT
+ #define C_CALLCONV
+ #define __cdecl
+
+ #ifndef USE_CODE
+ #define IMG_ABORT() printf("IMG_ABORT was called.\n")
+ #endif
+ #else
+ #error("define an OS")
+ #endif
+
+#endif
+
+// Use default definition if not overridden
+#ifndef IMG_ABORT
+ #if defined(EXIT_ON_ABORT)
+ #define IMG_ABORT() exit(1)
+ #else
+ #define IMG_ABORT() abort()
+ #endif
+#endif
+
+/* The best way to suppress unused parameter warnings using GCC is to use a
+ * variable attribute. Place the __maybe_unused between the type and name of an
+ * unused parameter in a function parameter list, eg `int __maybe_unused var'. This
+ * should only be used in GCC build environments, for example, in files that
+ * compile only on Linux. Other files should use PVR_UNREFERENCED_PARAMETER */
+
+/* Kernel macros for compiler attributes */
+/* Note: param positions start at 1 */
+#if defined(LINUX) && defined(__KERNEL__)
+ #include <linux/compiler.h>
+#elif defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+ #define __must_check __attribute__((warn_unused_result))
+ #define __maybe_unused __attribute__((unused))
+ #define __malloc __attribute__((malloc))
+
+ /* Bionic's <sys/cdefs.h> might have defined these already */
+ /* See https://android.googlesource.com/platform/bionic.git/+/master/libc/include/sys/cdefs.h */
+ #if !defined(__packed)
+ #define __packed __attribute__((packed))
+ #endif
+ #if !defined(__aligned)
+ #define __aligned(n) __attribute__((aligned(n)))
+ #endif
+ #if !defined(__noreturn)
+ #define __noreturn __attribute__((noreturn))
+ #endif
+
+ /* That one compiler that supports attributes but doesn't support
+ * the printf attribute... */
+ #if defined(__GNUC__)
+ #define __printf(fmt, va) __attribute__((format(printf, fmt, va)))
+ #else
+ #define __printf(fmt, va)
+ #endif /* defined(__GNUC__) */
+
+ #define __user
+ #define __force
+ #define __iomem
+#else
+ /* Silently ignore those attributes */
+ #define __printf(fmt, va)
+ #define __packed
+ #define __aligned(n)
+ #define __must_check
+ #define __maybe_unused
+ #define __malloc
+
+ #if defined(_MSC_VER) || defined(CC_ARM)
+ #define __noreturn __declspec(noreturn)
+ #else
+ #define __noreturn
+ #endif
+
+ #define __user
+ #define __force
+ #define __iomem
+#endif
+
+
+/* Other attributes, following the same style */
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+ #define __param_nonnull(...) __attribute__((nonnull(__VA_ARGS__)))
+ #define __returns_nonnull __attribute__((returns_nonnull))
+#else
+ #define __param_nonnull(...)
+ #define __returns_nonnull
+#endif
+
+
+/* GCC builtins */
+#if defined(LINUX) && defined(__KERNEL__)
+ #include <linux/compiler.h>
+#elif defined(__GNUC__)
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+
+ /* Compiler memory barrier to prevent reordering */
+ #define barrier() __asm__ __volatile__("": : :"memory")
+#else
+ #define barrier() do { static_assert(0, "barrier() isn't supported by your compiler"); } while(0)
+#endif
+
+/* That one OS that defines one but not the other... */
+#ifndef likely
+ #define likely(x) (x)
+#endif
+#ifndef unlikely
+ #define unlikely(x) (x)
+#endif
+
+/* These two macros are also provided by the kernel */
+#ifndef BIT
+#define BIT(b) (1UL << (b))
+#endif
+
+#ifndef BIT_ULL
+#define BIT_ULL(b) (1ULL << (b))
+#endif
+
+#define BIT_SET(f, b) BITMASK_SET((f), BIT_ULL(b))
+#define BIT_UNSET(f, b) BITMASK_UNSET((f), BIT_ULL(b))
+#define BIT_TOGGLE(f, b) BITMASK_TOGGLE((f), BIT_ULL(b))
+#define BIT_ISSET(f, b) BITMASK_HAS((f), BIT_ULL(b))
+
+#define BITMASK_SET(f, m) (void) ((f) |= (m))
+#define BITMASK_UNSET(f, m) (void) ((f) &= ~(m))
+#define BITMASK_TOGGLE(f, m) (void) ((f) ^= (m))
+#define BITMASK_HAS(f, m) (((f) & (m)) == (m)) /* the bits from the mask are all set */
+
+#ifndef MAX
+#define MAX(a,b) (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a) < (b)) ? (a) : (b))
+#endif
+
+#ifndef CLAMP
+#define CLAMP(min, max, n) ((n) < (min) ? (min) : ((n) > (max) ? (max) : (n)))
+#endif
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+ #include <linux/kernel.h>
+ #include <linux/bug.h>
+#endif
+
+/* Get a structures address from the address of a member */
+#define IMG_CONTAINER_OF(ptr, type, member) \
+ (type *) ((uintptr_t) (ptr) - offsetof(type, member))
+
+/* The number of elements in a fixed-sized array */
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(ARR) (sizeof(ARR) / sizeof((ARR)[0]))
+#endif
+
+/* To guarantee that __func__ can be used, define it as a macro here if it
+ isn't already provided by the compiler. */
+#if defined(_MSC_VER) || (defined(__cplusplus) && __cplusplus < 201103L)
+#define __func__ __FUNCTION__
+#endif
+
+#if defined(__cplusplus)
+/* C++ Specific:
+ * Disallow use of copy and assignment operator within a class.
+ * Should be placed under private. */
+#define IMG_DISALLOW_COPY_AND_ASSIGN(C) \
+ C(const C&); \
+ void operator=(const C&)
+#endif
+
+#if defined(SUPPORT_PVR_VALGRIND) && !defined(__METAG) && !defined(__mips)
+ #include "/usr/include/valgrind/memcheck.h"
+
+ #define VG_MARK_INITIALIZED(pvData,ui32Size) VALGRIND_MAKE_MEM_DEFINED(pvData,ui32Size)
+ #define VG_MARK_NOACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_NOACCESS(pvData,ui32Size)
+ #define VG_MARK_ACCESS(pvData,ui32Size) VALGRIND_MAKE_MEM_UNDEFINED(pvData,ui32Size)
+#else
+ #if defined(_MSC_VER)
+ # define PVR_MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+ #else
+ # define PVR_MSC_SUPPRESS_4127
+ #endif
+
+ #define VG_MARK_INITIALIZED(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+ #define VG_MARK_NOACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+ #define VG_MARK_ACCESS(pvData,ui32Size) PVR_MSC_SUPPRESS_4127 do { } while(0)
+#endif
+
+#define _STRINGIFY(x) # x
+#define IMG_STRINGIFY(x) _STRINGIFY(x)
+
+#if defined(INTEGRITY_OS)
+ /* Definitions not present in INTEGRITY. */
+ #define PATH_MAX 200
+#endif
+
+#if defined (__clang__) || defined (__GNUC__)
+ /* __SIZEOF_POINTER__ is defined already by these compilers */
+#elif defined (INTEGRITY_OS)
+ #if defined (__Ptr_Is_64)
+ #define __SIZEOF_POINTER__ 8
+ #else
+ #define __SIZEOF_POINTER__ 4
+ #endif
+#elif defined(_WIN32)
+ #define __SIZEOF_POINTER__ sizeof(char *)
+#else
+ #warning Unknown OS - using default method to determine whether CPU arch is 64-bit.
+ #define __SIZEOF_POINTER__ sizeof(char *)
+#endif
+
+/* RDI8567: gcc/clang/llvm load/store optimisations may cause issues with
+ * uncached device memory allocations. Some pointers are made 'volatile' to
+ * prevent those optimisations being applied to writes through those
+ * pointers.
+ */
+#if (GCC_VERSION_AT_LEAST(7, 0) || defined(__clang__)) && (defined(__arm64__) || defined(__aarch64__))
+#define NOLDSTOPT volatile
+/* after applying 'volatile' to a pointer, we may need to cast it to 'void *'
+ * to keep it compatible with its existing uses
+ */
+#define NOLDSTOPT_VOID (void *)
+#else
+#define NOLDSTOPT
+#define NOLDSTOPT_VOID
+#endif
+
+#endif /* #if !defined (__IMG_DEFS_H__) */
+/*****************************************************************************
+ End of file (IMG_DEFS.H)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/img_types.h b/drivers/gpu/drm/img-rogue/1.10/img_types.h
new file mode 100644
index 00000000000000..7d7a874f47e2db
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/img_types.h
@@ -0,0 +1,298 @@
+/*************************************************************************/ /*!
+@File
+@Title Global types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines type aliases for use by IMG APIs.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __IMG_TYPES_H__
+#define __IMG_TYPES_H__
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* To use C99 types and definitions, there are two special cases we need to
+ * cater for:
+ *
+ * - Visual Studio: in VS2010 or later, some standard headers are available,
+ * and MSVC has its own built-in sized types. We can define the C99 types
+ * in terms of these.
+ *
+ * - Linux kernel code: C99 sized types are defined in <linux/types.h>, but
+ * some other features (like macros for constants or printf format
+ * strings) are missing, so we need to fill in the gaps ourselves.
+ *
+ * For other cases (userspace code under Linux, Android or Neutrino, or
+ * firmware code), we can include the standard headers.
+ */
+#if defined(_MSC_VER)
+ #include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+ #include <linux/types.h>
+ #include "kernel_types.h"
+#elif defined(LINUX) || defined(__METAG) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+ #include <stddef.h> /* NULL */
+ #include <stdint.h>
+ #include <inttypes.h> /* intX_t/uintX_t, format specifiers */
+ #include <limits.h> /* INT_MIN, etc */
+#elif defined(__mips)
+ #include <stddef.h> /* NULL */
+ #include <inttypes.h> /* intX_t/uintX_t, format specifiers */
+#else
+ #error C99 support not set up for this build
+#endif
+
+typedef unsigned int IMG_UINT, *IMG_PUINT;
+typedef int IMG_INT, *IMG_PINT;
+
+typedef uint8_t IMG_UINT8, *IMG_PUINT8;
+typedef uint8_t IMG_BYTE, *IMG_PBYTE;
+typedef int8_t IMG_INT8, *IMG_PINT8;
+typedef char IMG_CHAR, *IMG_PCHAR;
+
+typedef uint16_t IMG_UINT16, *IMG_PUINT16;
+typedef int16_t IMG_INT16, *IMG_PINT16;
+typedef uint32_t IMG_UINT32, *IMG_PUINT32;
+typedef int32_t IMG_INT32, *IMG_PINT32;
+
+typedef uint64_t IMG_UINT64, *IMG_PUINT64;
+typedef int64_t IMG_INT64, *IMG_PINT64;
+#define IMG_INT64_C(c) INT64_C(c)
+#define IMG_UINT64_C(c) UINT64_C(c)
+#define IMG_UINT64_FMTSPEC PRIu64
+#define IMG_UINT64_FMTSPECX PRIX64
+#define IMG_UINT64_FMTSPECx PRIx64
+#define IMG_UINT64_FMTSPECo PRIo64
+#define IMG_INT64_FMTSPECd PRId64
+
+#define IMG_UINT16_MAX UINT16_MAX
+#define IMG_UINT32_MAX UINT32_MAX
+#define IMG_UINT64_MAX UINT64_MAX
+
+#define IMG_INT16_MAX INT16_MAX
+#define IMG_INT32_MAX INT32_MAX
+#define IMG_INT64_MAX INT64_MAX
+
+/* Linux kernel mode does not use floating point */
+typedef float IMG_FLOAT, *IMG_PFLOAT;
+typedef double IMG_DOUBLE, *IMG_PDOUBLE;
+
+typedef union _IMG_UINT32_FLOAT_
+{
+ IMG_UINT32 ui32;
+ IMG_FLOAT f;
+} IMG_UINT32_FLOAT;
+
+typedef int IMG_SECURE_TYPE;
+
+typedef enum tag_img_bool
+{
+ IMG_FALSE = 0,
+ IMG_TRUE = 1,
+ IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+typedef IMG_CHAR const* IMG_PCCHAR;
+#endif
+
+#if defined(_MSC_VER)
+#define IMG_SIZE_FMTSPEC "%Iu"
+#define IMG_SIZE_FMTSPECX "%Ix"
+#else
+#define IMG_SIZE_FMTSPEC "%zu"
+#define IMG_SIZE_FMTSPECX "%zx"
+#endif
+
+#if defined(LINUX) && defined(__KERNEL__)
+/* prints the function name when used with printk */
+#define IMG_PFN_FMTSPEC "%pf"
+#else
+#define IMG_PFN_FMTSPEC "%p"
+#endif
+
+typedef void *IMG_HANDLE;
+
+/* services/stream ID */
+typedef IMG_UINT64 IMG_SID;
+
+/* Process IDs */
+typedef IMG_UINT32 IMG_PID;
+
+/* OS connection type */
+typedef int IMG_OS_CONNECTION;
+
+
+/*
+ * Address types.
+ * All types used to refer to a block of memory are wrapped in structures
+ * to enforce some degree of type safety, i.e. a IMG_DEV_VIRTADDR cannot
+ * be assigned to a variable of type IMG_DEV_PHYADDR because they are not the
+ * same thing.
+ *
+ * There is an assumption that the system contains at most one non-cpu mmu,
+ * and a memory block is only mapped by the MMU once.
+ *
+ * Different devices could have offset views of the physical address space.
+ *
+ */
+
+
+/*
+ *
+ * +------------+ +------------+ +------------+ +------------+
+ * | CPU | | DEV | | DEV | | DEV |
+ * +------------+ +------------+ +------------+ +------------+
+ * | | | |
+ * | void * |IMG_DEV_VIRTADDR |IMG_DEV_VIRTADDR |
+ * | \-------------------/ |
+ * | | |
+ * +------------+ +------------+ |
+ * | MMU | | MMU | |
+ * +------------+ +------------+ |
+ * | | |
+ * | | |
+ * | | |
+ * +--------+ +---------+ +--------+
+ * | Offset | | (Offset)| | Offset |
+ * +--------+ +---------+ +--------+
+ * | | IMG_DEV_PHYADDR |
+ * | | |
+ * | | IMG_DEV_PHYADDR |
+ * +---------------------------------------------------------------------+
+ * | System Address bus |
+ * +---------------------------------------------------------------------+
+ *
+ */
+
+#define IMG_DEV_VIRTADDR_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_SIZE_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_ALIGN_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+#define IMG_DEVMEM_OFFSET_FMTSPEC "0x%010" IMG_UINT64_FMTSPECX
+
+/* cpu physical address */
+typedef struct _IMG_CPU_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+ uintptr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (uintptr_t)(var)
+#elif defined(LINUX) && defined(__KERNEL__)
+ phys_addr_t uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (phys_addr_t)(var)
+#else
+ IMG_UINT64 uiAddr;
+#define IMG_CAST_TO_CPUPHYADDR_UINT(var) (IMG_UINT64)(var)
+#endif
+} IMG_CPU_PHYADDR;
+
+/* device physical address */
+typedef struct _IMG_DEV_PHYADDR
+{
+ IMG_UINT64 uiAddr;
+} IMG_DEV_PHYADDR;
+
+/* system physical address */
+typedef struct _IMG_SYS_PHYADDR
+{
+#if defined(UNDER_WDDM) || defined(WINDOWS_WDF)
+ uintptr_t uiAddr;
+#else
+ IMG_UINT64 uiAddr;
+#endif
+} IMG_SYS_PHYADDR;
+
+/* 32-bit device virtual address (e.g. MSVDX) */
+typedef struct _IMG_DEV_VIRTADDR32
+{
+ IMG_UINT32 uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT32(var) (IMG_UINT32)(var)
+} IMG_DEV_VIRTADDR32;
+
+/*
+ rectangle structure
+*/
+typedef struct _IMG_RECT_
+{
+ IMG_INT32 x0;
+ IMG_INT32 y0;
+ IMG_INT32 x1;
+ IMG_INT32 y1;
+}IMG_RECT, *PIMG_RECT;
+
+typedef struct _IMG_RECT_16_
+{
+ IMG_INT16 x0;
+ IMG_INT16 y0;
+ IMG_INT16 x1;
+ IMG_INT16 y1;
+}IMG_RECT_16, *PIMG_RECT_16;
+
+typedef struct _IMG_RECT_32_
+{
+ IMG_FLOAT x0;
+ IMG_FLOAT y0;
+ IMG_FLOAT x1;
+ IMG_FLOAT y1;
+} IMG_RECT_F32, *PIMG_RECT_F32;
+
+/*
+ * box structure
+ */
+typedef struct _IMG_BOX_
+{
+ IMG_INT32 x0;
+ IMG_INT32 y0;
+ IMG_INT32 z0;
+ IMG_INT32 x1;
+ IMG_INT32 y1;
+ IMG_INT32 z1;
+} IMG_BOX, *PIMG_BOX;
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#include "img_defs.h"
+
+#endif /* __IMG_TYPES_H__ */
+/******************************************************************************
+ End of file (img_types.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/info_page.h b/drivers/gpu/drm/img-rogue/1.10/info_page.h
new file mode 100644
index 00000000000000..090f0f072e1fa2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/info_page.h
@@ -0,0 +1,88 @@
+/*************************************************************************/ /*!
+@File
+@Title Kernel/User mode general purpose shared memory.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description General purpose memory shared between kernel driver and user
+ mode.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_KM_H_
+#define _INFO_PAGE_KM_H_
+
+#include "pvrsrv_error.h"
+
+#include "pmr.h"
+#include "pvrsrv.h"
+
+/**
+ * @Function InfoPageCreate
+ * @Description Allocates resources for global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData);
+
+/**
+ * @Function InfoPageDestroy
+ * @Description Frees all of the resource of global information page.
+ * @Input psData pointer to PVRSRV data
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+void InfoPageDestroy(PVRSRV_DATA *psData);
+
+/**
+ * @Function PVRSRVAcquireInfoPageKM()
+ * @Description This interface is used for obtaining the global information page
+ * which acts as a general purpose shared memory between KM and UM.
+ * The use of this information page outside of services is _not_
+ * recommended.
+ * @Output ppsPMR handle to exported PMR
+ * @Return
+ */
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR);
+
+/**
+ * @Function PVRSRVReleaseInfoPageKM()
+ * @Description This function matches PVRSRVAcquireInfoPageKM().
+ * @Input psPMR handle to exported PMR
+ * @Return PVRSRV_OK on success and other PVRSRV_ERROR code on error.
+ */
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *psPMR);
+
+#endif /* _INFO_PAGE_KM_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/info_page_defs.h b/drivers/gpu/drm/img-rogue/1.10/info_page_defs.h
new file mode 100644
index 00000000000000..e333aeb7b4af97
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/info_page_defs.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@File
+@Title Kernel/User mode general purpose shared memory.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description General purpose shared memory (i.e. information page) mapped by
+ kernel space driver and user space clients. All information page
+ entries are sizeof(IMG_UINT32) on both 32/64-bit environments.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _INFO_PAGE_DEFS_H_
+#define _INFO_PAGE_DEFS_H_
+
+
+/* CacheOp information page entries */
+#define CACHEOP_INFO_IDX_START 0x00
+#define CACHEOP_INFO_GFSEQNUM0 (CACHEOP_INFO_IDX_START + 0) /*!< Current global flush sequence number */
+#define CACHEOP_INFO_GFSEQNUM1 (CACHEOP_INFO_IDX_START + 1) /*!< Validity global flush sequence number */
+#define CACHEOP_INFO_UMRBFONLY (CACHEOP_INFO_IDX_START + 2) /*!< Use UM flush only (i.e no KM GF) */
+#define CACHEOP_INFO_UMKMTHRESHLD (CACHEOP_INFO_IDX_START + 3) /*!< UM=>KM routing threshold in bytes */
+#define CACHEOP_INFO_KMGFTHRESHLD (CACHEOP_INFO_IDX_START + 4) /*!< KM/GF threshold in bytes */
+#define CACHEOP_INFO_KMDFTHRESHLD (CACHEOP_INFO_IDX_START + 5) /*!< KM/DF threshold in bytes */
+#define CACHEOP_INFO_LINESIZE (CACHEOP_INFO_IDX_START + 6) /*!< CPU data cache line size */
+#define CACHEOP_INFO_PGSIZE (CACHEOP_INFO_IDX_START + 7) /*!< CPU MMU page size */
+
+/* HWPerf information page entries */
+#define HWPERF_INFO_IDX_START 0x08
+#define HWPERF_FILTER_SERVICES_IDX (HWPERF_INFO_IDX_START + 0)
+#define HWPERF_FILTER_EGL_IDX (HWPERF_INFO_IDX_START + 1)
+#define HWPERF_FILTER_OPENGLES_IDX (HWPERF_INFO_IDX_START + 2)
+#define HWPERF_FILTER_OPENCL_IDX (HWPERF_INFO_IDX_START + 3)
+#define HWPERF_FILTER_OPENRL_IDX (HWPERF_INFO_IDX_START + 4)
+#define HWPERF_FILTER_VULKAN_IDX (HWPERF_INFO_IDX_START + 5)
+#define HWPERF_INFO_IDX_END (HWPERF_INFO_IDX_START + 6)
+
+/* BVNC of the core */
+#define CORE_ID_IDX_START (HWPERF_INFO_IDX_END)
+#define CORE_ID_BRANCH (CORE_ID_IDX_START + 0)
+#define CORE_ID_VERSION (CORE_ID_IDX_START + 1)
+#define CORE_ID_NUMBER_OF_SCALABLE_UNITS (CORE_ID_IDX_START + 2)
+#define CORE_ID_CONFIG (CORE_ID_IDX_START + 3)
+
+#endif /* _INFO_PAGE_DEFS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/info_page_km.c b/drivers/gpu/drm/img-rogue/1.10/info_page_km.c
new file mode 100644
index 00000000000000..06bb7a7252b225
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/info_page_km.c
@@ -0,0 +1,134 @@
+/*************************************************************************/ /*!
+@File info_page_km.c
+@Title Kernel/User space shared memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements general purpose shared memory between kernel driver
+ and user mode.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "info_page_defs.h"
+#include "info_page.h"
+#include "pvrsrv.h"
+#include "devicemem.h"
+#include "pmr.h"
+
+PVRSRV_ERROR InfoPageCreate(PVRSRV_DATA *psData)
+{
+ const DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psData != NULL);
+
+ /* Create the CacheOp information page */
+ eError = DevmemAllocateExportable(psData->psHostMemDeviceNode,
+ OSGetPageSize(),
+ OSGetPageSize(),
+ OSGetPageShift(),
+ uiMemFlags,
+ "PVRSRVInfoPage",
+ &psData->psInfoPageMemDesc);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+ eError = DevmemAcquireCpuVirtAddr(psData->psInfoPageMemDesc,
+ (void **) &psData->pui32InfoPage);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+ /* This PMR is also used for deferring timelines, global flush & logging KM
+ * requests in debug */
+ eError = DevmemLocalGetImportHandle(psData->psInfoPageMemDesc,
+ (void **) &psData->psInfoPagePMR);
+ PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e0);
+
+ eError = OSLockCreate(&psData->hInfoPageLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", e0);
+
+ return PVRSRV_OK;
+
+e0:
+ InfoPageDestroy(psData);
+ return eError;
+}
+
+void InfoPageDestroy(PVRSRV_DATA *psData)
+{
+ if (psData->psInfoPageMemDesc)
+ {
+ if (psData->pui32InfoPage != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psData->psInfoPageMemDesc);
+ psData->pui32InfoPage = NULL;
+ }
+
+ DevmemFree(psData->psInfoPageMemDesc);
+ psData->psInfoPageMemDesc = NULL;
+ }
+
+ if (psData->hInfoPageLock)
+ {
+ OSLockDestroy(psData->hInfoPageLock);
+ psData->hInfoPageLock = NULL;
+ }
+}
+
+PVRSRV_ERROR PVRSRVAcquireInfoPageKM(PMR **ppsPMR)
+{
+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+ PVR_LOGR_IF_FALSE(psData->psInfoPageMemDesc != NULL, "invalid MEMDESC"
+ " handle", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE(psData->psInfoPagePMR != NULL, "invalid PMR handle",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Copy the PMR import handle back */
+ *ppsPMR = psData->psInfoPagePMR;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVReleaseInfoPageKM(PMR *ppsPMR)
+{
+ /* Nothing to do here as PMR is singleton */
+ PVR_UNREFERENCED_PARAMETER(ppsPMR);
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/ioctl.c b/drivers/gpu/drm/img-rogue/1.10/ioctl.c
new file mode 100644
index 00000000000000..734c7ebe25232a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ioctl.c
@@ -0,0 +1,315 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(_WIN32)
+#pragma warning(disable:4201)
+#pragma warning(disable:4214)
+#pragma warning(disable:4115)
+#pragma warning(disable:4514)
+
+#include <ntddk.h>
+#include <windef.h>
+
+#endif /* _WIN32 */
+
+#ifdef LINUX
+#include <linux/uaccess.h>
+#include "pvr_uaccess.h"
+#endif /* LINUX */
+
+#include "img_types.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "dbgdriv_ioctl.h"
+#include "hostfunc.h"
+
+#ifdef _WIN32
+#pragma warning(default:4214)
+#pragma warning(default:4115)
+#endif /* _WIN32 */
+
+/*****************************************************************************
+ Code
+*****************************************************************************/
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetServiceTable
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetServiceTable(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ void **ppvOut;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+ ppvOut = (void **) pvOutBuffer;
+
+ *ppvOut = DBGDrivGetServiceTable();
+
+ return IMG_TRUE;
+}
+
+#if defined(__QNXNTO__)
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivCreateStream
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivCreateStream(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_CREATESTREAM psIn;
+ PDBG_OUT_CREATESTREAM psOut;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psIn = (PDBG_IN_CREATESTREAM) pvInBuffer;
+ psOut = (PDBG_OUT_CREATESTREAM) pvOutBuffer;
+
+ return ExtDBGDrivCreateStream(psIn->u.pszName, DEBUG_FLAGS_NO_BUF_EXPANDSION, psIn->ui32Pages, &psOut->phInit, &psOut->phMain, &psOut->phDeinit);
+}
+#endif
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetStream
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetStream(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_FINDSTREAM psParams;
+ IMG_SID *phStream;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psParams = (PDBG_IN_FINDSTREAM)pvInBuffer;
+ phStream = (IMG_SID *)pvOutBuffer;
+
+ /* Ensure that the name will be NULL terminated */
+ psParams->pszName[DEBUG_STREAM_NAME_MAX-1] = '\0';
+
+ *phStream = PStream2SID(ExtDBGDrivFindStream(psParams->pszName, psParams->bResetStream));
+
+ return IMG_TRUE;
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivRead
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivRead(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ IMG_UINT32 *pui32BytesCopied;
+ PDBG_IN_READ psInParams;
+ PDBG_STREAM psStream;
+ IMG_UINT8 __user *pui8ReadBuffer;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psInParams = (PDBG_IN_READ) pvInBuffer;
+ pui32BytesCopied = (IMG_UINT32 *) pvOutBuffer;
+ pui8ReadBuffer = WIDEPTR_GET_PTR(psInParams->pui8OutBuffer, bCompat);
+
+ psStream = SID2PStream(psInParams->hStream);
+
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ *pui32BytesCopied = ExtDBGDrivRead(psStream,
+ psInParams->ui32BufID,
+ psInParams->ui32OutBufferSize,
+ (IMG_UINT8 __force *) pui8ReadBuffer);
+ return IMG_TRUE;
+ }
+ else
+ {
+ /* invalid SID */
+ *pui32BytesCopied = 0;
+ return IMG_FALSE;
+ }
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivSetMarker
+
+ PURPOSE : Sets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivSetMarker(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_IN_SETMARKER psParams;
+ PDBG_STREAM psStream;
+
+ psParams = (PDBG_IN_SETMARKER) pvInBuffer;
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ psStream = SID2PStream(psParams->hStream);
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ ExtDBGDrivSetMarker(psStream, psParams->ui32Marker);
+ return IMG_TRUE;
+ }
+ else
+ {
+ /* invalid SID */
+ return IMG_FALSE;
+ }
+}
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetMarker
+
+ PURPOSE : Gets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetMarker(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ PDBG_STREAM psStream;
+ IMG_UINT32 *pui32Current;
+
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ psStream = SID2PStream(*(IMG_SID *)pvInBuffer);
+ if (psStream != (PDBG_STREAM)NULL)
+ {
+ *pui32Current = ExtDBGDrivGetMarker(psStream);
+ return IMG_TRUE;
+ }
+ else
+ {
+ /* invalid SID */
+ *pui32Current = 0;
+ return IMG_FALSE;
+ }
+}
+
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivWaitForEvent
+
+ PURPOSE :
+
+ PARAMETERS :
+
+ RETURNS :
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivWaitForEvent(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ DBG_EVENT eEvent = (DBG_EVENT)(*(IMG_UINT32 *)pvInBuffer);
+
+ PVR_UNREFERENCED_PARAMETER(pvOutBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ ExtDBGDrivWaitForEvent(eEvent);
+
+ return IMG_TRUE;
+}
+
+
+/*****************************************************************************
+ FUNCTION : DBGDIOCDrivGetFrame
+
+ PURPOSE : Gets the marker in the stream to split output files
+
+ PARAMETERS : pvInBuffer, pvOutBuffer
+
+ RETURNS : success
+*****************************************************************************/
+static IMG_UINT32 DBGDIOCDrivGetFrame(void *pvInBuffer, void *pvOutBuffer, IMG_BOOL bCompat)
+{
+ IMG_UINT32 *pui32Current;
+
+ PVR_UNREFERENCED_PARAMETER(pvInBuffer);
+ PVR_UNREFERENCED_PARAMETER(bCompat);
+
+ pui32Current = (IMG_UINT32 *) pvOutBuffer;
+
+ *pui32Current = ExtDBGDrivGetFrame();
+
+ return IMG_TRUE;
+}
+
+/*
+ ioctl interface jump table.
+ Accessed from the UM debug driver client
+*/
+IMG_UINT32 (*g_DBGDrivProc[DEBUG_SERVICE_MAX_API])(void *, void *, IMG_BOOL) =
+{
+ DBGDIOCDrivGetServiceTable, /* WDDM only for KMD to retrieve address from DBGDRV, Not used by umdbgdrvlnx */
+ DBGDIOCDrivGetStream,
+ DBGDIOCDrivRead,
+ DBGDIOCDrivSetMarker,
+ DBGDIOCDrivGetMarker,
+ DBGDIOCDrivWaitForEvent,
+ DBGDIOCDrivGetFrame,
+#if defined(__QNXNTO__)
+ DBGDIOCDrivCreateStream
+#endif
+};
+
+/*****************************************************************************
+ End of file (ioctl.c)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/kernel_compatibility.h b/drivers/gpu/drm/img-rogue/1.10/kernel_compatibility.h
new file mode 100644
index 00000000000000..62d147ae98b958
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/kernel_compatibility.h
@@ -0,0 +1,365 @@
+/*************************************************************************/ /*!
+@Title Kernel versions compatibility macros
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Per-version macros to allow code to seamlessly use older kernel
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_COMPATIBILITY_H__
+#define __KERNEL_COMPATIBILITY_H__
+
+#include <linux/version.h>
+
+/*
+ * Stop supporting an old kernel? Remove the top block.
+ * New incompatible kernel? Append a new block at the bottom.
+ *
+ * Please write you version test as `VERSION < X.Y`, and use the earliest
+ * possible version :)
+ */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
+
+/* Linux 3.7 split VM_RESERVED into VM_DONTDUMP and VM_DONTEXPAND */
+#define VM_DONTDUMP VM_RESERVED
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0)) */
+
+/*
+ * Note: this fix had to be written backwards because get_unused_fd_flags
+ * was already defined but not exported on kernels < 3.7
+ *
+ * When removing support for kernels < 3.7, this block should be removed
+ * and all `get_unused_fd()` should be manually replaced with
+ * `get_unused_fd_flags(0)`
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0))
+
+/* Linux 3.19 removed get_unused_fd() */
+/* get_unused_fd_flags was introduced in 3.7 */
+#define get_unused_fd() get_unused_fd_flags(0)
+
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case as it's not obvious from the name that devfreq_add_device needs this
+ * include.
+ */
+#include <linux/string.h>
+
+#define devfreq_add_device(dev, profile, name, data) \
+ ({ \
+ struct devfreq *__devfreq; \
+ if (name && !strcmp(name, "simple_ondemand")) \
+ __devfreq = devfreq_add_device(dev, profile, \
+ &devfreq_simple_ondemand, data); \
+ else \
+ __devfreq = ERR_PTR(-EINVAL); \
+ __devfreq; \
+ })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+
+#define DRIVER_RENDER 0
+#define DRM_RENDER_ALLOW 0
+
+/* Linux 3.12 introduced a new shrinker API */
+#define SHRINK_STOP (~0UL)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+
+#define dev_pm_opp_get_opp_count(dev) opp_get_opp_count(dev)
+#define dev_pm_opp_get_freq(opp) opp_get_freq(opp)
+#define dev_pm_opp_get_voltage(opp) opp_get_voltage(opp)
+#define dev_pm_opp_add(dev, freq, u_volt) opp_add(dev, freq, u_volt)
+#define dev_pm_opp_find_freq_ceil(dev, freq) opp_find_freq_ceil(dev, freq)
+
+#if defined(CONFIG_ARM)
+/* Linux 3.13 renamed ioremap_cached to ioremap_cache */
+#define ioremap_cache(cookie,size) ioremap_cached(cookie,size)
+#endif /* defined(CONFIG_ARM) */
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0))
+
+/* Linux 3.14 introduced a new set of sized min and max defines */
+#ifndef U32_MAX
+#define U32_MAX ((u32)UINT_MAX)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+
+/* Linux 3.17 changed the 3rd argument from a `struct page ***pages` to
+ * `struct page **pages` */
+#define map_vm_area(area, prot, pages) map_vm_area(area, prot, &pages)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
+
+/*
+ * Linux 4.7 removed this function but its replacement was available since 3.19.
+ */
+#define drm_crtc_send_vblank_event(crtc, e) drm_send_vblank_event((crtc)->dev, drm_crtc_index(crtc), e)
+
+/* seq_has_overflowed() was introduced in 3.19 but the structure elements
+ * have been available since 2.x
+ */
+#include <linux/seq_file.h>
+static inline bool seq_has_overflowed(struct seq_file *m)
+{
+ return m->count == m->size;
+}
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0))
+
+#define debugfs_create_file_size(name, mode, parent, data, fops, file_size) \
+ ({ \
+ struct dentry *de; \
+ de = debugfs_create_file(name, mode, parent, data, fops); \
+ if (de) \
+ de->d_inode->i_size = file_size; \
+ de; \
+ })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0))
+
+/* Linux 4.4 renamed GFP_WAIT to GFP_RECLAIM */
+#define __GFP_RECLAIM __GFP_WAIT
+
+#if !defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+#define dev_pm_opp_of_add_table(dev) of_init_opp_table(dev)
+#define dev_pm_opp_of_remove_table(dev) of_free_opp_table(dev)
+#else
+#define sync_fence_create(data_name, sync_pt) sync_fence_create(data_name, &(sync_pt)->base)
+#endif
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/* Linux 4.5 added a new printf-style parameter for debug messages */
+
+#define drm_encoder_init(dev, encoder, funcs, encoder_type, name, ...) \
+ drm_encoder_init(dev, encoder, funcs, encoder_type)
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type); })
+
+#define drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, name, ...) \
+ drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0))
+
+#define drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, format_modifiers, type, name, ...) \
+ ({ (void) format_modifiers; drm_universal_plane_init(dev, plane, possible_crtcs, funcs, formats, format_count, type, name, ##__VA_ARGS__); })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+
+/*
+ * Linux 4.6 removed the first two parameters, the "struct task_struct" type
+ * pointer "current" is defined in asm/current.h, which makes it pointless
+ * to pass it on every function call.
+*/
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+ get_user_pages(current, current->mm, start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 replaced the write/force parameters with "gup_flags" */
+#define get_user_pages(start, nr_pages, gup_flags, pages, vmas) \
+ get_user_pages(start, nr_pages, gup_flags & FOLL_WRITE, gup_flags & FOLL_FORCE, pages, vmas)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+
+/*
+ * Linux 4.6 removed the start and end arguments as it now always maps
+ * the entire DMA-BUF.
+ * Additionally, dma_buf_end_cpu_access() now returns an int error.
+ */
+#define dma_buf_begin_cpu_access(DMABUF, DIRECTION) dma_buf_begin_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION)
+#define dma_buf_end_cpu_access(DMABUF, DIRECTION) ({ dma_buf_end_cpu_access(DMABUF, 0, DMABUF->size, DIRECTION); 0; })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
+
+/* Linux 4.7 removed the first arguments as it's never been used */
+#define drm_gem_object_lookup(filp, handle) drm_gem_object_lookup((filp)->minor->dev, filp, handle)
+
+/* Linux 4.7 replaced nla_put_u64 with nla_put_u64_64bit */
+#define nla_put_u64_64bit(skb, attrtype, value, padattr) nla_put_u64(skb, attrtype, value)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0))
+
+/* Linux 4.9 changed the second argument to a drm_file pointer */
+#define drm_vma_node_is_allowed(node, file_priv) drm_vma_node_is_allowed(node, (file_priv)->filp)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+#define refcount_read(r) atomic_read(r)
+#define drm_mm_insert_node(mm, node, size) drm_mm_insert_node(mm, node, size, 0, DRM_MM_SEARCH_DEFAULT)
+
+#define drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd) drm_helper_mode_fill_fb_struct(fb, mode_cmd)
+
+/*
+ * In Linux Kernels >= 4.12 for x86 another level of page tables has been
+ * added. The added level (p4d) sits between pgd and pud, so when it
+ * doesn`t exist, pud_offset function takes pgd as a parameter instead
+ * of p4d.
+ */
+#define p4d_t pgd_t
+#define p4d_offset(pgd, address) (pgd)
+#define p4d_none(p4d) (0)
+#define p4d_bad(p4d) (0)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)) */
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+
+#define drm_mode_object_get(obj) drm_mode_object_reference(obj)
+#define drm_mode_object_put(obj) drm_mode_object_unreference(obj)
+#define drm_connector_get(obj) drm_connector_reference(obj)
+#define drm_connector_put(obj) drm_connector_unreference(obj)
+#define drm_framebuffer_get(obj) drm_framebuffer_reference(obj)
+#define drm_framebuffer_put(obj) drm_framebuffer_unreference(obj)
+#define drm_gem_object_get(obj) drm_gem_object_reference(obj)
+#define drm_gem_object_put(obj) drm_gem_object_unreference(obj)
+#define __drm_gem_object_put(obj) __drm_gem_object_unreference(obj)
+#define drm_gem_object_put_unlocked(obj) drm_gem_object_unreference_unlocked(obj)
+#define drm_property_blob_get(obj) drm_property_reference_blob(obj)
+#define drm_property_blob_put(obj) drm_property_unreference_blob(obj)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0))
+
+#define drm_mode_object_find(dev, file_priv, id, type) drm_mode_object_find(dev, id, type)
+#define drm_encoder_find(dev, file_priv, id) drm_encoder_find(dev, id)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)) */
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+ min_scale, max_scale, \
+ can_position, can_update_disabled) \
+ ({ \
+ const struct drm_rect __clip = { \
+ .x2 = crtc_state->crtc->mode.hdisplay, \
+ .y2 = crtc_state->crtc->mode.vdisplay, \
+ }; \
+ int __ret = drm_plane_helper_check_state(plane_state, \
+ &__clip, \
+ min_scale, max_scale, \
+ can_position, \
+ can_update_disabled); \
+ __ret; \
+ })
+
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
+
+#define drm_atomic_helper_check_plane_state(plane_state, crtc_state, \
+ min_scale, max_scale, \
+ can_position, can_update_disabled) \
+ ({ \
+ const struct drm_rect __clip = { \
+ .x2 = crtc_state->crtc->mode.hdisplay, \
+ .y2 = crtc_state->crtc->mode.vdisplay, \
+ }; \
+ int __ret = drm_atomic_helper_check_plane_state(plane_state, \
+ crtc_state, \
+ &__clip, \
+ min_scale, max_scale, \
+ can_position, \
+ can_update_disabled); \
+ __ret; \
+ })
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0)) */
+
+#if defined(CONFIG_L4)
+
+/*
+ * Headers shouldn't normally be included by this file but this is a special
+ * case to access the memory translation API when running on the L4 ukernel
+ */
+#include <asm/api-l4env/api.h>
+
+#undef page_to_phys
+#define page_to_phys(x) l4x_virt_to_phys((void *)((phys_addr_t)page_to_pfn(x) << PAGE_SHIFT))
+
+#endif /* defined(CONFIG_L4) */
+
+#endif /* __KERNEL_COMPATIBILITY_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/kernel_nospec.h b/drivers/gpu/drm/img-rogue/1.10/kernel_nospec.h
new file mode 100644
index 00000000000000..e27a3ebc2ac6aa
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/kernel_nospec.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@Title Macro to limit CPU speculative execution in kernel code
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Per-version macros to allow code to seamlessly use older kernel
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KERNEL_NOSPEC_H__
+#define __KERNEL_NOSPEC_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 2) || \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 18)) || \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 81)) || \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && \
+ LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 118)))
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/nospec.h>
+#else
+#define array_index_nospec(index, size) (index)
+#endif
+
+/*
+ * For Ubuntu kernels, the features available for a given Linux version code
+ * may not match those in upstream kernels. This is the case for the
+ * availability of the array_index_nospec macro.
+ */
+#if !defined(array_index_nospec)
+#define array_index_nospec(index, size) (index)
+#endif
+
+#endif /* __KERNEL_NOSPEC_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/kernel_types.h b/drivers/gpu/drm/img-rogue/1.10/kernel_types.h
new file mode 100644
index 00000000000000..c93b59ebe2effe
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/kernel_types.h
@@ -0,0 +1,138 @@
+/*************************************************************************/ /*!
+@Title C99-compatible types and definitions for Linux kernel code
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+
+/* Limits of specified-width integer types */
+
+/* S8_MIN, etc were added in kernel version 3.14. The other versions are for
+ * earlier kernels. They can be removed once older kernels don't need to be
+ * supported.
+ */
+#ifdef S8_MIN
+ #define INT8_MIN S8_MIN
+#else
+ #define INT8_MIN (-128)
+#endif
+
+#ifdef S8_MAX
+ #define INT8_MAX S8_MAX
+#else
+ #define INT8_MAX 127
+#endif
+
+#ifdef U8_MAX
+ #define UINT8_MAX U8_MAX
+#else
+ #define UINT8_MAX 0xFF
+#endif
+
+#ifdef S16_MIN
+ #define INT16_MIN S16_MIN
+#else
+ #define INT16_MIN (-32768)
+#endif
+
+#ifdef S16_MAX
+ #define INT16_MAX S16_MAX
+#else
+ #define INT16_MAX 32767
+#endif
+
+#ifdef U16_MAX
+ #define UINT16_MAX U16_MAX
+#else
+ #define UINT16_MAX 0xFFFF
+#endif
+
+#ifdef S32_MIN
+ #define INT32_MIN S32_MIN
+#else
+ #define INT32_MIN (-2147483647 - 1)
+#endif
+
+#ifdef S32_MAX
+ #define INT32_MAX S32_MAX
+#else
+ #define INT32_MAX 2147483647
+#endif
+
+#ifdef U32_MAX
+ #define UINT32_MAX U32_MAX
+#else
+ #define UINT32_MAX 0xFFFFFFFF
+#endif
+
+#ifdef S64_MIN
+ #define INT64_MIN S64_MIN
+#else
+ #define INT64_MIN (-9223372036854775807LL)
+#endif
+
+#ifdef S64_MAX
+ #define INT64_MAX S64_MAX
+#else
+ #define INT64_MAX 9223372036854775807LL
+#endif
+
+#ifdef U64_MAX
+ #define UINT64_MAX U64_MAX
+#else
+ #define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL
+#endif
+
+/* Macros for integer constants */
+#define INT8_C S8_C
+#define UINT8_C U8_C
+#define INT16_C S16_C
+#define UINT16_C U16_C
+#define INT32_C S32_C
+#define UINT32_C U32_C
+#define INT64_C S64_C
+#define UINT64_C U64_C
+
+/* Format conversion of integer types <inttypes.h> */
+
+#define PRIX64 "llX"
+#define PRIx64 "llx"
+#define PRIu64 "llu"
+#define PRId64 "lld"
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_defs_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_defs_km.h
new file mode 100644
index 00000000000000..f9a1814baec48c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_defs_km.h
@@ -0,0 +1,323 @@
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_defs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/***************************************************
+* Auto generated file by rgxbvnc_tablegen.py *
+* This file should not be edited manually *
+****************************************************/
+
+#ifndef _RGX_BVNC_DEFS_KM_H_
+#define _RGX_BVNC_DEFS_KM_H_
+
+#include "img_types.h"
+
+#define BVNC_FIELD_WIDTH (16U)
+
+
+/******************************************************************************
+ * Mask and bit-position macros for features without values
+ *****************************************************************************/
+
+#define RGX_FEATURE_AXI_ACELITE_POS (0U)
+#define RGX_FEATURE_AXI_ACELITE_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define RGX_FEATURE_CLUSTER_GROUPING_POS (1U)
+#define RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define RGX_FEATURE_COMPUTE_POS (2U)
+#define RGX_FEATURE_COMPUTE_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_POS (3U)
+#define RGX_FEATURE_COMPUTE_MORTON_CAPABLE_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_POS (4U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_POS (5U)
+#define RGX_FEATURE_COMPUTE_OVERLAP_WITH_BARRIERS_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define RGX_FEATURE_DYNAMIC_DUST_POWER_POS (6U)
+#define RGX_FEATURE_DYNAMIC_DUST_POWER_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+#define RGX_FEATURE_FASTRENDER_DM_POS (7U)
+#define RGX_FEATURE_FASTRENDER_DM_BIT_MASK (IMG_UINT64_C(0x0000000000000080))
+
+#define RGX_FEATURE_GPU_VIRTUALISATION_POS (8U)
+#define RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK (IMG_UINT64_C(0x0000000000000100))
+
+#define RGX_FEATURE_GS_RTA_SUPPORT_POS (9U)
+#define RGX_FEATURE_GS_RTA_SUPPORT_BIT_MASK (IMG_UINT64_C(0x0000000000000200))
+
+#define RGX_FEATURE_META_DMA_POS (10U)
+#define RGX_FEATURE_META_DMA_BIT_MASK (IMG_UINT64_C(0x0000000000000400))
+
+#define RGX_FEATURE_MIPS_POS (11U)
+#define RGX_FEATURE_MIPS_BIT_MASK (IMG_UINT64_C(0x0000000000000800))
+
+#define RGX_FEATURE_PBE2_IN_XE_POS (12U)
+#define RGX_FEATURE_PBE2_IN_XE_BIT_MASK (IMG_UINT64_C(0x0000000000001000))
+
+#define RGX_FEATURE_PBVNC_COREID_REG_POS (13U)
+#define RGX_FEATURE_PBVNC_COREID_REG_BIT_MASK (IMG_UINT64_C(0x0000000000002000))
+
+#define RGX_FEATURE_PDS_PER_DUST_POS (14U)
+#define RGX_FEATURE_PDS_PER_DUST_BIT_MASK (IMG_UINT64_C(0x0000000000004000))
+
+#define RGX_FEATURE_PDS_TEMPSIZE8_POS (15U)
+#define RGX_FEATURE_PDS_TEMPSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000000008000))
+
+#define RGX_FEATURE_PERFBUS_POS (16U)
+#define RGX_FEATURE_PERFBUS_BIT_MASK (IMG_UINT64_C(0x0000000000010000))
+
+#define RGX_FEATURE_RAY_TRACING_DEPRECATED_POS (17U)
+#define RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
+
+#define RGX_FEATURE_ROGUEXE_POS (18U)
+#define RGX_FEATURE_ROGUEXE_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_POS (19U)
+#define RGX_FEATURE_S7_CACHE_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_POS (20U)
+#define RGX_FEATURE_S7_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
+
+#define RGX_FEATURE_SCALABLE_VDM_GPP_POS (21U)
+#define RGX_FEATURE_SCALABLE_VDM_GPP_BIT_MASK (IMG_UINT64_C(0x0000000000200000))
+
+#define RGX_FEATURE_SIGNAL_SNOOPING_POS (22U)
+#define RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK (IMG_UINT64_C(0x0000000000400000))
+
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_POS (23U)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_BIT_MASK (IMG_UINT64_C(0x0000000000800000))
+
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_POS (24U)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V1_BIT_MASK (IMG_UINT64_C(0x0000000001000000))
+
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_POS (25U)
+#define RGX_FEATURE_SIMPLE_INTERNAL_PARAMETER_FORMAT_V2_BIT_MASK (IMG_UINT64_C(0x0000000002000000))
+
+#define RGX_FEATURE_SINGLE_BIF_POS (26U)
+#define RGX_FEATURE_SINGLE_BIF_BIT_MASK (IMG_UINT64_C(0x0000000004000000))
+
+#define RGX_FEATURE_SLCSIZE8_POS (27U)
+#define RGX_FEATURE_SLCSIZE8_BIT_MASK (IMG_UINT64_C(0x0000000008000000))
+
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_POS (28U)
+#define RGX_FEATURE_SLC_HYBRID_CACHELINE_64_128_BIT_MASK (IMG_UINT64_C(0x0000000010000000))
+
+#define RGX_FEATURE_SLC_VIVT_POS (29U)
+#define RGX_FEATURE_SLC_VIVT_BIT_MASK (IMG_UINT64_C(0x0000000020000000))
+
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_POS (30U)
+#define RGX_FEATURE_SYS_BUS_SECURE_RESET_BIT_MASK (IMG_UINT64_C(0x0000000040000000))
+
+#define RGX_FEATURE_TESSELLATION_POS (31U)
+#define RGX_FEATURE_TESSELLATION_BIT_MASK (IMG_UINT64_C(0x0000000080000000))
+
+#define RGX_FEATURE_TLA_POS (32U)
+#define RGX_FEATURE_TLA_BIT_MASK (IMG_UINT64_C(0x0000000100000000))
+
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_POS (33U)
+#define RGX_FEATURE_TPU_CEM_DATAMASTER_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000000200000000))
+
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_POS (34U)
+#define RGX_FEATURE_TPU_DM_GLOBAL_REGISTERS_BIT_MASK (IMG_UINT64_C(0x0000000400000000))
+
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_POS (35U)
+#define RGX_FEATURE_TPU_FILTERING_MODE_CONTROL_BIT_MASK (IMG_UINT64_C(0x0000000800000000))
+
+#define RGX_FEATURE_VDM_DRAWINDIRECT_POS (36U)
+#define RGX_FEATURE_VDM_DRAWINDIRECT_BIT_MASK (IMG_UINT64_C(0x0000001000000000))
+
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_POS (37U)
+#define RGX_FEATURE_VDM_OBJECT_LEVEL_LLS_BIT_MASK (IMG_UINT64_C(0x0000002000000000))
+
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_POS (38U)
+#define RGX_FEATURE_XE_MEMORY_HIERARCHY_BIT_MASK (IMG_UINT64_C(0x0000004000000000))
+
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_POS (39U)
+#define RGX_FEATURE_XT_TOP_INFRASTRUCTURE_BIT_MASK (IMG_UINT64_C(0x0000008000000000))
+
+#define RGX_FEATURE_TDM_PDS_CHECKSUM_POS (40U)
+#define RGX_FEATURE_TDM_PDS_CHECKSUM_BIT_MASK (IMG_UINT64_C(0x0000010000000000))
+
+#define RGX_FEATURE_PERF_COUNTER_BATCH_POS (41U)
+#define RGX_FEATURE_PERF_COUNTER_BATCH_BIT_MASK (IMG_UINT64_C(0x0000020000000000))
+
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_POS (42U)
+#define RGX_FEATURE_DUST_POWER_ISLAND_S7_BIT_MASK (IMG_UINT64_C(0x0000040000000000))
+
+
+/******************************************************************************
+ * Features with values indexes
+ *****************************************************************************/
+
+typedef enum _RGX_FEATURE_WITH_VALUE_INDEX_ {
+ RGX_FEATURE_PHYS_BUS_WIDTH_IDX,
+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_IDX,
+ RGX_FEATURE_META_IDX,
+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_IDX,
+ RGX_FEATURE_META_COREMEM_BANKS_IDX,
+ RGX_FEATURE_META_COREMEM_SIZE_IDX,
+ RGX_FEATURE_FBCDC_ARCHITECTURE_IDX,
+ RGX_FEATURE_FBCDC_ALGORITHM_IDX,
+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_IDX,
+ RGX_FEATURE_NUM_CLUSTERS_IDX,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_IDX,
+ RGX_FEATURE_SLC_BANKS_IDX,
+ RGX_FEATURE_SCALABLE_TE_ARCH_IDX,
+ RGX_FEATURE_SCALABLE_VCE_IDX,
+ RGX_FEATURE_NUM_RASTER_PIPES_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX,
+ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_IDX,
+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_IDX,
+ RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_IDX,
+ RGX_FEATURE_WITH_VALUES_MAX_IDX,
+} RGX_FEATURE_WITH_VALUE_INDEX;
+
+
+/******************************************************************************
+ * Mask and bit-position macros for ERNs and BRNs
+ *****************************************************************************/
+
+#define HW_ERN_36400_POS (0U)
+#define HW_ERN_36400_BIT_MASK (IMG_UINT64_C(0x0000000000000001))
+
+#define FIX_HW_BRN_38344_POS (1U)
+#define FIX_HW_BRN_38344_BIT_MASK (IMG_UINT64_C(0x0000000000000002))
+
+#define HW_ERN_42290_POS (2U)
+#define HW_ERN_42290_BIT_MASK (IMG_UINT64_C(0x0000000000000004))
+
+#define FIX_HW_BRN_42321_POS (3U)
+#define FIX_HW_BRN_42321_BIT_MASK (IMG_UINT64_C(0x0000000000000008))
+
+#define FIX_HW_BRN_42480_POS (4U)
+#define FIX_HW_BRN_42480_BIT_MASK (IMG_UINT64_C(0x0000000000000010))
+
+#define HW_ERN_42606_POS (5U)
+#define HW_ERN_42606_BIT_MASK (IMG_UINT64_C(0x0000000000000020))
+
+#define FIX_HW_BRN_43276_POS (6U)
+#define FIX_HW_BRN_43276_BIT_MASK (IMG_UINT64_C(0x0000000000000040))
+
+#define FIX_HW_BRN_44455_POS (7U)
+#define FIX_HW_BRN_44455_BIT_MASK (IMG_UINT64_C(0x0000000000000080))
+
+#define FIX_HW_BRN_44871_POS (8U)
+#define FIX_HW_BRN_44871_BIT_MASK (IMG_UINT64_C(0x0000000000000100))
+
+#define HW_ERN_45914_POS (9U)
+#define HW_ERN_45914_BIT_MASK (IMG_UINT64_C(0x0000000000000200))
+
+#define HW_ERN_46066_POS (10U)
+#define HW_ERN_46066_BIT_MASK (IMG_UINT64_C(0x0000000000000400))
+
+#define HW_ERN_47025_POS (11U)
+#define HW_ERN_47025_BIT_MASK (IMG_UINT64_C(0x0000000000000800))
+
+#define HW_ERN_50539_POS (12U)
+#define HW_ERN_50539_BIT_MASK (IMG_UINT64_C(0x0000000000001000))
+
+#define FIX_HW_BRN_50767_POS (13U)
+#define FIX_HW_BRN_50767_BIT_MASK (IMG_UINT64_C(0x0000000000002000))
+
+#define FIX_HW_BRN_52563_POS (14U)
+#define FIX_HW_BRN_52563_BIT_MASK (IMG_UINT64_C(0x0000000000004000))
+
+#define FIX_HW_BRN_54441_POS (15U)
+#define FIX_HW_BRN_54441_BIT_MASK (IMG_UINT64_C(0x0000000000008000))
+
+#define FIX_HW_BRN_57193_POS (16U)
+#define FIX_HW_BRN_57193_BIT_MASK (IMG_UINT64_C(0x0000000000010000))
+
+#define FIX_HW_BRN_57289_POS (17U)
+#define FIX_HW_BRN_57289_BIT_MASK (IMG_UINT64_C(0x0000000000020000))
+
+#define HW_ERN_57596_POS (18U)
+#define HW_ERN_57596_BIT_MASK (IMG_UINT64_C(0x0000000000040000))
+
+#define FIX_HW_BRN_60084_POS (19U)
+#define FIX_HW_BRN_60084_BIT_MASK (IMG_UINT64_C(0x0000000000080000))
+
+#define HW_ERN_61389_POS (20U)
+#define HW_ERN_61389_BIT_MASK (IMG_UINT64_C(0x0000000000100000))
+
+#define FIX_HW_BRN_61450_POS (21U)
+#define FIX_HW_BRN_61450_BIT_MASK (IMG_UINT64_C(0x0000000000200000))
+
+#define FIX_HW_BRN_62204_POS (22U)
+#define FIX_HW_BRN_62204_BIT_MASK (IMG_UINT64_C(0x0000000000400000))
+
+#define FIX_HW_BRN_63027_POS (23U)
+#define FIX_HW_BRN_63027_BIT_MASK (IMG_UINT64_C(0x0000000000800000))
+
+#define FIX_HW_BRN_63142_POS (24U)
+#define FIX_HW_BRN_63142_BIT_MASK (IMG_UINT64_C(0x0000000001000000))
+
+#define FIX_HW_BRN_65273_POS (25U)
+#define FIX_HW_BRN_65273_BIT_MASK (IMG_UINT64_C(0x0000000002000000))
+
+#define FIX_HW_BRN_63553_POS (26U)
+#define FIX_HW_BRN_63553_BIT_MASK (IMG_UINT64_C(0x0000000004000000))
+
+#define HW_ERN_66622_POS (27U)
+#define HW_ERN_66622_BIT_MASK (IMG_UINT64_C(0x0000000008000000))
+
+#define FIX_HW_BRN_64839_POS (28U)
+#define FIX_HW_BRN_64839_BIT_MASK (IMG_UINT64_C(0x0000000010000000))
+
+#define FIX_HW_BRN_67349_POS (29U)
+#define FIX_HW_BRN_67349_BIT_MASK (IMG_UINT64_C(0x0000000020000000))
+
+/* Macro used for padding the unavailable values for features with values */
+#define RGX_FEATURE_VALUE_INVALID (0xFFFFFFFE)
+
+/* Macro used for marking a feature with value as disabled for a specific bvnc */
+#define RGX_FEATURE_VALUE_DISABLED (0xFFFFFFFF)
+
+
+
+#endif /*_RGX_BVNC_DEFS_KM_H_ */
+
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_table_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_table_km.h
new file mode 100644
index 00000000000000..eec72d301c4000
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgx_bvnc_table_km.h
@@ -0,0 +1,376 @@
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_bvnc_table_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/***************************************************
+* Auto generated file by rgxbvnc_tablegen.py *
+* This file should not be edited manually *
+****************************************************/
+
+#ifndef _RGX_BVNC_TABLE_KM_H_
+#define _RGX_BVNC_TABLE_KM_H_
+
+#include "img_types.h"
+#include "rgxdefs_km.h"
+
+#ifndef _RGXBVNC_C_
+#error "This file should only be included from rgxbvnc.c"
+#endif
+
+
+/******************************************************************************
+ * Defines and arrays for each feature with values used
+ * for handling the corresponding values
+ *****************************************************************************/
+
+#define RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values[RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 32, 36, 40, };
+
+#define RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values[RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 40, };
+
+#define RGX_FEATURE_META_MAX_VALUE_IDX (5)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_values[RGX_FEATURE_META_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, LTP217, LTP218, MTP218, MTP219, };
+
+#define RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values[RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 4, };
+
+#define RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_BANKS_values[RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+#define RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX (5)
+static const IMG_UINT16 aui16_RGX_FEATURE_META_COREMEM_SIZE_values[RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 32, 64, 256, };
+
+#define RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values[RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+#define RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_FBCDC_ALGORITHM_values[RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, };
+
+#define RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values[RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 512, };
+
+#define RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX (7)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_CLUSTERS_values[RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, 6, 8, 16, };
+
+#define RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX (11)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values[RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 3, 4, 6, 7, 8, 12, 16, 32, };
+
+#define RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values[RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 128, 256, 512, };
+
+#define RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_BANKS_values[RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values[RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX (4)
+static const IMG_UINT16 aui16_RGX_FEATURE_SCALABLE_VCE_values[RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, 4, };
+
+#define RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX (3)
+static const IMG_UINT16 aui16_RGX_FEATURE_NUM_RASTER_PIPES_values[RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX (6)
+static const IMG_UINT16 aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values[RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 0, 16, 64, 128, 256, };
+
+#define RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX (3)
+static const IMG_UINT16 aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values[RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 1, 2, };
+
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values[RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 2, };
+
+#define RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX (2)
+static const IMG_UINT16 aui16_RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_values[RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX] = {(IMG_UINT16)RGX_FEATURE_VALUE_DISABLED, 8, };
+
+
+/******************************************************************************
+ * Table contains pointers to each feature value array for features that have values.
+ * Indexed using enum RGX_FEATURE_WITH_VALUE_INDEX from rgx_bvnc_defs_km.h
+ *****************************************************************************/
+
+static const IMG_UINT16 *gaFeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX] = {
+ aui16_RGX_FEATURE_PHYS_BUS_WIDTH_values,
+ aui16_RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_values,
+ aui16_RGX_FEATURE_META_values,
+ aui16_RGX_FEATURE_META_DMA_CHANNEL_COUNT_values,
+ aui16_RGX_FEATURE_META_COREMEM_BANKS_values,
+ aui16_RGX_FEATURE_META_COREMEM_SIZE_values,
+ aui16_RGX_FEATURE_FBCDC_ARCHITECTURE_values,
+ aui16_RGX_FEATURE_FBCDC_ALGORITHM_values,
+ aui16_RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_values,
+ aui16_RGX_FEATURE_NUM_CLUSTERS_values,
+ aui16_RGX_FEATURE_NUM_ISP_IPP_PIPES_values,
+ aui16_RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_values,
+ aui16_RGX_FEATURE_SLC_BANKS_values,
+ aui16_RGX_FEATURE_SCALABLE_TE_ARCH_values,
+ aui16_RGX_FEATURE_SCALABLE_VCE_values,
+ aui16_RGX_FEATURE_NUM_RASTER_PIPES_values,
+ aui16_RGX_FEATURE_SLC_SIZE_IN_BYTES_values,
+ aui16_RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_values,
+ aui16_RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_values,
+ aui16_RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_values,
+};
+
+
+/******************************************************************************
+ * Array containing the lengths of the arrays containing the values.
+ * Used for indexing the aui16_<FEATURE>_values defined upwards
+ *****************************************************************************/
+
+
+static const IMG_UINT16 gaFeaturesValuesMaxIndexes[] = {
+ RGX_FEATURE_PHYS_BUS_WIDTH_MAX_VALUE_IDX,
+ RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_MAX_VALUE_IDX,
+ RGX_FEATURE_META_MAX_VALUE_IDX,
+ RGX_FEATURE_META_DMA_CHANNEL_COUNT_MAX_VALUE_IDX,
+ RGX_FEATURE_META_COREMEM_BANKS_MAX_VALUE_IDX,
+ RGX_FEATURE_META_COREMEM_SIZE_MAX_VALUE_IDX,
+ RGX_FEATURE_FBCDC_ARCHITECTURE_MAX_VALUE_IDX,
+ RGX_FEATURE_FBCDC_ALGORITHM_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_CLUSTERS_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_BANKS_MAX_VALUE_IDX,
+ RGX_FEATURE_SCALABLE_TE_ARCH_MAX_VALUE_IDX,
+ RGX_FEATURE_SCALABLE_VCE_MAX_VALUE_IDX,
+ RGX_FEATURE_NUM_RASTER_PIPES_MAX_VALUE_IDX,
+ RGX_FEATURE_SLC_SIZE_IN_BYTES_MAX_VALUE_IDX,
+ RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_MAX_VALUE_IDX,
+ RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_MAX_VALUE_IDX,
+ RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_MAX_VALUE_IDX,
+};
+
+
+/******************************************************************************
+ * Bit-positions for features with values
+ *****************************************************************************/
+
+static const IMG_UINT16 aui16FeaturesWithValuesBitPositions[] = {
+ (0U), /* RGX_FEATURE_PHYS_BUS_WIDTH_POS */
+ (3U), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_POS */
+ (5U), /* RGX_FEATURE_META_POS */
+ (8U), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_POS */
+ (10U), /* RGX_FEATURE_META_COREMEM_BANKS_POS */
+ (12U), /* RGX_FEATURE_META_COREMEM_SIZE_POS */
+ (15U), /* RGX_FEATURE_FBCDC_ARCHITECTURE_POS */
+ (18U), /* RGX_FEATURE_FBCDC_ALGORITHM_POS */
+ (21U), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_POS */
+ (23U), /* RGX_FEATURE_NUM_CLUSTERS_POS */
+ (26U), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_POS */
+ (30U), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_POS */
+ (33U), /* RGX_FEATURE_SLC_BANKS_POS */
+ (36U), /* RGX_FEATURE_SCALABLE_TE_ARCH_POS */
+ (39U), /* RGX_FEATURE_SCALABLE_VCE_POS */
+ (42U), /* RGX_FEATURE_NUM_RASTER_PIPES_POS */
+ (44U), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_POS */
+ (47U), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_POS */
+ (49U), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_POS */
+ (51U), /* RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_POS */
+};
+
+
+/******************************************************************************
+ * Bit-masks for features with values
+ *****************************************************************************/
+
+static const IMG_UINT64 aui64FeaturesWithValuesBitMasks[] = {
+ (IMG_UINT64_C(0x0000000000000007)), /* RGX_FEATURE_PHYS_BUS_WIDTH_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000018)), /* RGX_FEATURE_VIRTUAL_ADDRESS_SPACE_BITS_BIT_MASK */
+ (IMG_UINT64_C(0x00000000000000E0)), /* RGX_FEATURE_META_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000300)), /* RGX_FEATURE_META_DMA_CHANNEL_COUNT_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000000C00)), /* RGX_FEATURE_META_COREMEM_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000007000)), /* RGX_FEATURE_META_COREMEM_SIZE_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000038000)), /* RGX_FEATURE_FBCDC_ARCHITECTURE_BIT_MASK */
+ (IMG_UINT64_C(0x00000000001C0000)), /* RGX_FEATURE_FBCDC_ALGORITHM_BIT_MASK */
+ (IMG_UINT64_C(0x0000000000600000)), /* RGX_FEATURE_SLC_CACHE_LINE_SIZE_BITS_BIT_MASK */
+ (IMG_UINT64_C(0x0000000003800000)), /* RGX_FEATURE_NUM_CLUSTERS_BIT_MASK */
+ (IMG_UINT64_C(0x000000003C000000)), /* RGX_FEATURE_NUM_ISP_IPP_PIPES_BIT_MASK */
+ (IMG_UINT64_C(0x00000001C0000000)), /* RGX_FEATURE_SLC_SIZE_IN_KILOBYTES_BIT_MASK */
+ (IMG_UINT64_C(0x0000000E00000000)), /* RGX_FEATURE_SLC_BANKS_BIT_MASK */
+ (IMG_UINT64_C(0x0000007000000000)), /* RGX_FEATURE_SCALABLE_TE_ARCH_BIT_MASK */
+ (IMG_UINT64_C(0x0000038000000000)), /* RGX_FEATURE_SCALABLE_VCE_BIT_MASK */
+ (IMG_UINT64_C(0x00000C0000000000)), /* RGX_FEATURE_NUM_RASTER_PIPES_BIT_MASK */
+ (IMG_UINT64_C(0x0000700000000000)), /* RGX_FEATURE_SLC_SIZE_IN_BYTES_BIT_MASK */
+ (IMG_UINT64_C(0x0001800000000000)), /* RGX_FEATURE_SIMPLE_PARAMETER_FORMAT_VERSION_BIT_MASK */
+ (IMG_UINT64_C(0x0006000000000000)), /* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT_BIT_MASK */
+ (IMG_UINT64_C(0x0018000000000000)), /* RGX_FEATURE_GPU_VIRTUALISATION_NUM_OS_BIT_MASK */
+};
+
+
+/******************************************************************************
+ * Table mapping bitmasks for features and features with values
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaFeatures[][3]=
+{
+ { IMG_UINT64_C(0x0001000000020000), IMG_UINT64_C(0x0000000100010215), IMG_UINT64_C(0x000040000d20106b) }, /* 1.0.2.0 */
+ { IMG_UINT64_C(0x0001000000020014), IMG_UINT64_C(0x0000000100010215), IMG_UINT64_C(0x000040000d20106b) }, /* 1.0.2.20 */
+ { IMG_UINT64_C(0x0004000000020033), IMG_UINT64_C(0x0000028b0001025f), IMG_UINT64_C(0x000040001d29204b) }, /* 4.0.2.51 */
+ { IMG_UINT64_C(0x000a000000040019), IMG_UINT64_C(0x0000063ef079c7bf), IMG_UINT64_C(0x000a0096dda9c58b) }, /* 10.0.4.25 */
+ { IMG_UINT64_C(0x0016000000150010), IMG_UINT64_C(0x000000004d853b05), IMG_UINT64_C(0x0008840208a00009) }, /* 22.0.21.16 */
+ { IMG_UINT64_C(0x0016000000360019), IMG_UINT64_C(0x0000000045853b05), IMG_UINT64_C(0x0008b4020ca00009) }, /* 22.0.54.25 */
+ { IMG_UINT64_C(0x001600000036001e), IMG_UINT64_C(0x0000000045853b05), IMG_UINT64_C(0x0008b40210a00009) }, /* 22.0.54.30 */
+ { IMG_UINT64_C(0x0016000000680012), IMG_UINT64_C(0x0000000045853b05), IMG_UINT64_C(0x0008b40218a0000a) }, /* 22.0.104.18 */
+ { IMG_UINT64_C(0x00180000006800cb), IMG_UINT64_C(0x0000014042853b85), IMG_UINT64_C(0x0009340214ac800a) }, /* 24.0.104.203 */
+ { IMG_UINT64_C(0x0018000000d000cb), IMG_UINT64_C(0x0000014042853b85), IMG_UINT64_C(0x00094400212c800a) }, /* 24.0.208.203 */
+ { IMG_UINT64_C(0x0018000000d001f7), IMG_UINT64_C(0x0000014042853b85), IMG_UINT64_C(0x00094400212c800a) }, /* 24.0.208.503 */
+ { IMG_UINT64_C(0x0018000000d001f8), IMG_UINT64_C(0x0000014042853b85), IMG_UINT64_C(0x00094800212c800a) }, /* 24.0.208.504 */
+};
+
+/******************************************************************************
+ * Table mapping bitmasks for ERNs/BRNs
+ *****************************************************************************/
+
+
+static const IMG_UINT64 gaErnsBrns[][2]=
+{
+ { IMG_UINT64_C(0x0001003000020000), IMG_UINT64_C(0x000000000000808b) }, /* 1.48.2.0 */
+ { IMG_UINT64_C(0x0001004b00020014), IMG_UINT64_C(0x0000000000008089) }, /* 1.75.2.20 */
+ { IMG_UINT64_C(0x0004001d00020033), IMG_UINT64_C(0x000000000101a025) }, /* 4.29.2.51 */
+ { IMG_UINT64_C(0x0004002800020033), IMG_UINT64_C(0x000000000101a025) }, /* 4.40.2.51 */
+ { IMG_UINT64_C(0x000a002100040019), IMG_UINT64_C(0x0000000010401e25) }, /* 10.33.4.25 */
+ { IMG_UINT64_C(0x0016001e00360019), IMG_UINT64_C(0x0000000026ac0161) }, /* 22.30.54.25 */
+ { IMG_UINT64_C(0x001600280036001e), IMG_UINT64_C(0x0000000026ac0161) }, /* 22.40.54.30 */
+ { IMG_UINT64_C(0x0016003100150010), IMG_UINT64_C(0x0000000026ac0021) }, /* 22.49.21.16 */
+ { IMG_UINT64_C(0x001600440036001e), IMG_UINT64_C(0x0000000026940161) }, /* 22.68.54.30 */
+ { IMG_UINT64_C(0x0016005700680012), IMG_UINT64_C(0x0000000024140161) }, /* 22.87.104.18 */
+ { IMG_UINT64_C(0x0018001a006800cb), IMG_UINT64_C(0x0000000024040165) }, /* 24.26.104.203 */
+ { IMG_UINT64_C(0x0018001b00d000cb), IMG_UINT64_C(0x0000000024040165) }, /* 24.27.208.203 */
+ { IMG_UINT64_C(0x0018001e00d001f7), IMG_UINT64_C(0x0000000024040165) }, /* 24.30.208.503 */
+ { IMG_UINT64_C(0x0018002800d001f8), IMG_UINT64_C(0x0000000024040165) }, /* 24.40.208.504 */
+ { IMG_UINT64_C(0x0018003200d001f8), IMG_UINT64_C(0x000000002c040165) }, /* 24.50.208.504 */
+};
+
+#if defined(DEBUG)
+
+#define FEATURE_NO_VALUES_NAMES_MAX_IDX (43)
+
+static const IMG_PCHAR gaszFeaturesNoValuesNames[FEATURE_NO_VALUES_NAMES_MAX_IDX] =
+{
+ "AXI_ACELITE",
+ "CLUSTER_GROUPING",
+ "COMPUTE",
+ "COMPUTE_MORTON_CAPABLE",
+ "COMPUTE_OVERLAP",
+ "COMPUTE_OVERLAP_WITH_BARRIERS",
+ "DYNAMIC_DUST_POWER",
+ "FASTRENDER_DM",
+ "GPU_VIRTUALISATION",
+ "GS_RTA_SUPPORT",
+ "META_DMA",
+ "MIPS",
+ "PBE2_IN_XE",
+ "PBVNC_COREID_REG",
+ "PDS_PER_DUST",
+ "PDS_TEMPSIZE8",
+ "PERFBUS",
+ "RAY_TRACING_DEPRECATED",
+ "ROGUEXE",
+ "S7_CACHE_HIERARCHY",
+ "S7_TOP_INFRASTRUCTURE",
+ "SCALABLE_VDM_GPP",
+ "SIGNAL_SNOOPING",
+ "SIMPLE_INTERNAL_PARAMETER_FORMAT",
+ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V1",
+ "SIMPLE_INTERNAL_PARAMETER_FORMAT_V2",
+ "SINGLE_BIF",
+ "SLCSIZE8",
+ "SLC_HYBRID_CACHELINE_64_128",
+ "SLC_VIVT",
+ "SYS_BUS_SECURE_RESET",
+ "TESSELLATION",
+ "TLA",
+ "TPU_CEM_DATAMASTER_GLOBAL_REGISTERS",
+ "TPU_DM_GLOBAL_REGISTERS",
+ "TPU_FILTERING_MODE_CONTROL",
+ "VDM_DRAWINDIRECT",
+ "VDM_OBJECT_LEVEL_LLS",
+ "XE_MEMORY_HIERARCHY",
+ "XT_TOP_INFRASTRUCTURE",
+ "TDM_PDS_CHECKSUM",
+ "PERF_COUNTER_BATCH",
+ "DUST_POWER_ISLAND_S7",
+};
+
+#define ERNSBRNS_IDS_MAX_IDX (30)
+
+static const IMG_UINT32 gaui64ErnsBrnsIDs[ERNSBRNS_IDS_MAX_IDX] =
+{
+ 36400,
+ 38344,
+ 42290,
+ 42321,
+ 42480,
+ 42606,
+ 43276,
+ 44455,
+ 44871,
+ 45914,
+ 46066,
+ 47025,
+ 50539,
+ 50767,
+ 52563,
+ 54441,
+ 57193,
+ 57289,
+ 57596,
+ 60084,
+ 61389,
+ 61450,
+ 62204,
+ 63027,
+ 63142,
+ 65273,
+ 63553,
+ 66622,
+ 64839,
+ 67349,
+};
+
+#endif /* defined(DEBUG) */
+
+#endif /*_RGX_BVNC_TABLE_KM_H_ */
+
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgx_cr_defs_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgx_cr_defs_km.h
new file mode 100644
index 00000000000000..64e8c6b9824840
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgx_cr_defs_km.h
@@ -0,0 +1,5608 @@
+/*************************************************************************/ /*!
+@Title Hardware definition file rgx_cr_defs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ */
+
+#if !defined(__IMG_EXPLICIT_INCLUDE_HWDEFS)
+#error This file may only be included if explicitly defined
+#endif
+
+#ifndef _RGX_CR_DEFS_KM_H_
+#define _RGX_CR_DEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGX_CR_DEFS_KM_REVISION 1
+
+/*
+ Register RGX_CR_RASTERISATION_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_INDIRECT (0x8238U)
+#define RGX_CR_RASTERISATION_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_RASTERISATION_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_PBE_INDIRECT
+*/
+#define RGX_CR_PBE_INDIRECT (0x83E0U)
+#define RGX_CR_PBE_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_PBE_PERF_INDIRECT
+*/
+#define RGX_CR_PBE_PERF_INDIRECT (0x83D8U)
+#define RGX_CR_PBE_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_PBE_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_TPU_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_PERF_INDIRECT (0x83F0U)
+#define RGX_CR_TPU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TPU_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_INDIRECT
+*/
+#define RGX_CR_RASTERISATION_PERF_INDIRECT (0x8318U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_INDIRECT
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT (0x8028U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_USC_PERF_INDIRECT
+*/
+#define RGX_CR_USC_PERF_INDIRECT (0x8030U)
+#define RGX_CR_USC_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_USC_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_INDIRECT (0x8388U)
+#define RGX_CR_BLACKPEARL_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_INDIRECT
+*/
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT (0x83F8U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_TEXAS3_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS3_PERF_INDIRECT (0x83D0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TEXAS3_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFF8U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_INDIRECT
+*/
+#define RGX_CR_TEXAS_PERF_INDIRECT (0x8288U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_BX_TU_PERF_INDIRECT
+*/
+#define RGX_CR_BX_TU_PERF_INDIRECT (0xC900U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_SHIFT (0U)
+#define RGX_CR_BX_TU_PERF_INDIRECT_ADDRESS_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_CLK_CTRL
+*/
+#define RGX_CR_CLK_CTRL (0x0000U)
+#define RGX_CR_CLK_CTRL__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL__S7_TOP__MASKFULL (IMG_UINT64_C(0xCFCF03000F3F3F0F))
+#define RGX_CR_CLK_CTRL_MASKFULL (IMG_UINT64_C(0xFFFFFF003F3FFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_SHIFT (62U)
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0X3FFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_ON (IMG_UINT64_C(0x4000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_TEXAS_AUTO (IMG_UINT64_C(0x8000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_SHIFT (60U)
+#define RGX_CR_CLK_CTRL_IPP_CLRMSK (IMG_UINT64_C(0XCFFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_IPP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_ON (IMG_UINT64_C(0x1000000000000000))
+#define RGX_CR_CLK_CTRL_IPP_AUTO (IMG_UINT64_C(0x2000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_SHIFT (58U)
+#define RGX_CR_CLK_CTRL_FBC_CLRMSK (IMG_UINT64_C(0XF3FFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBC_ON (IMG_UINT64_C(0x0400000000000000))
+#define RGX_CR_CLK_CTRL_FBC_AUTO (IMG_UINT64_C(0x0800000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_SHIFT (56U)
+#define RGX_CR_CLK_CTRL_FBDC_CLRMSK (IMG_UINT64_C(0XFCFFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FBDC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_ON (IMG_UINT64_C(0x0100000000000000))
+#define RGX_CR_CLK_CTRL_FBDC_AUTO (IMG_UINT64_C(0x0200000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_SHIFT (54U)
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFF3FFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_ON (IMG_UINT64_C(0x0040000000000000))
+#define RGX_CR_CLK_CTRL_FB_TLCACHE_AUTO (IMG_UINT64_C(0x0080000000000000))
+#define RGX_CR_CLK_CTRL_USCS_SHIFT (52U)
+#define RGX_CR_CLK_CTRL_USCS_CLRMSK (IMG_UINT64_C(0XFFCFFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_USCS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USCS_ON (IMG_UINT64_C(0x0010000000000000))
+#define RGX_CR_CLK_CTRL_USCS_AUTO (IMG_UINT64_C(0x0020000000000000))
+#define RGX_CR_CLK_CTRL_PBE_SHIFT (50U)
+#define RGX_CR_CLK_CTRL_PBE_CLRMSK (IMG_UINT64_C(0XFFF3FFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_PBE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PBE_ON (IMG_UINT64_C(0x0004000000000000))
+#define RGX_CR_CLK_CTRL_PBE_AUTO (IMG_UINT64_C(0x0008000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_SHIFT (48U)
+#define RGX_CR_CLK_CTRL_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L1_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_ON (IMG_UINT64_C(0x0001000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L1_AUTO (IMG_UINT64_C(0x0002000000000000))
+#define RGX_CR_CLK_CTRL_CDM_SHIFT (46U)
+#define RGX_CR_CLK_CTRL_CDM_CLRMSK (IMG_UINT64_C(0XFFFF3FFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_CDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_CDM_ON (IMG_UINT64_C(0x0000400000000000))
+#define RGX_CR_CLK_CTRL_CDM_AUTO (IMG_UINT64_C(0x0000800000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_SHIFT (44U)
+#define RGX_CR_CLK_CTRL_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFCFFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_SIDEKICK_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_ON (IMG_UINT64_C(0x0000100000000000))
+#define RGX_CR_CLK_CTRL_SIDEKICK_AUTO (IMG_UINT64_C(0x0000200000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_SHIFT (42U)
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFF3FFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_ON (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_CLK_CTRL_BIF_SIDEKICK_AUTO (IMG_UINT64_C(0x0000080000000000))
+#define RGX_CR_CLK_CTRL_BIF_SHIFT (40U)
+#define RGX_CR_CLK_CTRL_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_CLK_CTRL_BIF_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_BIF_ON (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_CLK_CTRL_BIF_AUTO (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_SHIFT (28U)
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_CTRL_TPU_MCU_DEMUX_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_SHIFT (26U)
+#define RGX_CR_CLK_CTRL_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_CTRL_MCU_L0_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_CTRL_MCU_L0_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_CTRL_TPU_SHIFT (24U)
+#define RGX_CR_CLK_CTRL_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_CTRL_TPU_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TPU_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_CTRL_TPU_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_CTRL_USC_SHIFT (20U)
+#define RGX_CR_CLK_CTRL_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_CTRL_USC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_USC_ON (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_CTRL_USC_AUTO (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_CTRL_TLA_SHIFT (18U)
+#define RGX_CR_CLK_CTRL_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_CTRL_TLA_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TLA_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_CTRL_TLA_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_CTRL_SLC_SHIFT (16U)
+#define RGX_CR_CLK_CTRL_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_CTRL_SLC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_SLC_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_CTRL_SLC_AUTO (IMG_UINT64_C(0x0000000000020000))
+#define RGX_CR_CLK_CTRL_UVS_SHIFT (14U)
+#define RGX_CR_CLK_CTRL_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_CLK_CTRL_UVS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_UVS_ON (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_CTRL_UVS_AUTO (IMG_UINT64_C(0x0000000000008000))
+#define RGX_CR_CLK_CTRL_PDS_SHIFT (12U)
+#define RGX_CR_CLK_CTRL_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_CLK_CTRL_PDS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PDS_ON (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_CTRL_PDS_AUTO (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_CTRL_VDM_SHIFT (10U)
+#define RGX_CR_CLK_CTRL_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL_VDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_VDM_ON (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL_VDM_AUTO (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL_PM_SHIFT (8U)
+#define RGX_CR_CLK_CTRL_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL_PM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_PM_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL_PM_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL_GPP_SHIFT (6U)
+#define RGX_CR_CLK_CTRL_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF3F))
+#define RGX_CR_CLK_CTRL_GPP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_GPP_ON (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_CTRL_GPP_AUTO (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_CTRL_TE_SHIFT (4U)
+#define RGX_CR_CLK_CTRL_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL_TE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TE_ON (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL_TE_AUTO (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL_TSP_SHIFT (2U)
+#define RGX_CR_CLK_CTRL_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF3))
+#define RGX_CR_CLK_CTRL_TSP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_TSP_ON (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_CTRL_TSP_AUTO (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_CTRL_ISP_SHIFT (0U)
+#define RGX_CR_CLK_CTRL_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL_ISP_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL_ISP_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL_ISP_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS
+*/
+#define RGX_CR_CLK_STATUS (0x0008U)
+#define RGX_CR_CLK_STATUS__PBE2_XE__MASKFULL (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS__S7_TOP__MASKFULL (IMG_UINT64_C(0x00000001B3101773))
+#define RGX_CR_CLK_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFF077FF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_SHIFT (32U)
+#define RGX_CR_CLK_STATUS_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_FBTC_RUNNING (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_SHIFT (31U)
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_TEXAS_RUNNING (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_STATUS_IPP_SHIFT (30U)
+#define RGX_CR_CLK_STATUS_IPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_CLK_STATUS_IPP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_IPP_RUNNING (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_STATUS_FBC_SHIFT (29U)
+#define RGX_CR_CLK_STATUS_FBC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBC_RUNNING (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_STATUS_FBDC_SHIFT (28U)
+#define RGX_CR_CLK_STATUS_FBDC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_CLK_STATUS_FBDC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FBDC_RUNNING (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_SHIFT (27U)
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_FB_TLCACHE_RUNNING (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_STATUS_USCS_SHIFT (26U)
+#define RGX_CR_CLK_STATUS_USCS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_CLK_STATUS_USCS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USCS_RUNNING (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_STATUS_PBE_SHIFT (25U)
+#define RGX_CR_CLK_STATUS_PBE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_CLK_STATUS_PBE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PBE_RUNNING (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_SHIFT (24U)
+#define RGX_CR_CLK_STATUS_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_CLK_STATUS_MCU_L1_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L1_RUNNING (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_STATUS_CDM_SHIFT (23U)
+#define RGX_CR_CLK_STATUS_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_CLK_STATUS_CDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_CDM_RUNNING (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_SHIFT (22U)
+#define RGX_CR_CLK_STATUS_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_CLK_STATUS_SIDEKICK_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_SHIFT (21U)
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_SIDEKICK_RUNNING (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_STATUS_BIF_SHIFT (20U)
+#define RGX_CR_CLK_STATUS_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_CLK_STATUS_BIF_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_BIF_RUNNING (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_SHIFT (14U)
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_MCU_DEMUX_RUNNING (IMG_UINT64_C(0x0000000000004000))
+#define RGX_CR_CLK_STATUS_MCU_L0_SHIFT (13U)
+#define RGX_CR_CLK_STATUS_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_CLK_STATUS_MCU_L0_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_MCU_L0_RUNNING (IMG_UINT64_C(0x0000000000002000))
+#define RGX_CR_CLK_STATUS_TPU_SHIFT (12U)
+#define RGX_CR_CLK_STATUS_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_CLK_STATUS_TPU_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TPU_RUNNING (IMG_UINT64_C(0x0000000000001000))
+#define RGX_CR_CLK_STATUS_USC_SHIFT (10U)
+#define RGX_CR_CLK_STATUS_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_STATUS_USC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_USC_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_STATUS_TLA_SHIFT (9U)
+#define RGX_CR_CLK_STATUS_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_STATUS_TLA_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TLA_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_STATUS_SLC_SHIFT (8U)
+#define RGX_CR_CLK_STATUS_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_STATUS_SLC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_SLC_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_STATUS_UVS_SHIFT (7U)
+#define RGX_CR_CLK_STATUS_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_STATUS_UVS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_UVS_RUNNING (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_STATUS_PDS_SHIFT (6U)
+#define RGX_CR_CLK_STATUS_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_STATUS_PDS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PDS_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_STATUS_VDM_SHIFT (5U)
+#define RGX_CR_CLK_STATUS_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_STATUS_VDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_VDM_RUNNING (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_STATUS_PM_SHIFT (4U)
+#define RGX_CR_CLK_STATUS_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS_PM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_PM_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS_GPP_SHIFT (3U)
+#define RGX_CR_CLK_STATUS_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_STATUS_GPP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_GPP_RUNNING (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_STATUS_TE_SHIFT (2U)
+#define RGX_CR_CLK_STATUS_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS_TE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TE_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS_TSP_SHIFT (1U)
+#define RGX_CR_CLK_STATUS_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_STATUS_TSP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_TSP_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_STATUS_ISP_SHIFT (0U)
+#define RGX_CR_CLK_STATUS_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS_ISP_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS_ISP_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID__PBVNC (0x0020U)
+#define RGX_CR_CORE_ID__PBVNC__MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT (48U)
+#define RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT (32U)
+#define RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT (16U)
+#define RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT (0U)
+#define RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_CORE_ID
+*/
+#define RGX_CR_CORE_ID (0x0018U)
+#define RGX_CR_CORE_ID_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_ID_ID_SHIFT (16U)
+#define RGX_CR_CORE_ID_ID_CLRMSK (0X0000FFFFU)
+#define RGX_CR_CORE_ID_CONFIG_SHIFT (0U)
+#define RGX_CR_CORE_ID_CONFIG_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_CORE_REVISION
+*/
+#define RGX_CR_CORE_REVISION (0x0020U)
+#define RGX_CR_CORE_REVISION_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CORE_REVISION_DESIGNER_SHIFT (24U)
+#define RGX_CR_CORE_REVISION_DESIGNER_CLRMSK (0X00FFFFFFU)
+#define RGX_CR_CORE_REVISION_MAJOR_SHIFT (16U)
+#define RGX_CR_CORE_REVISION_MAJOR_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CORE_REVISION_MINOR_SHIFT (8U)
+#define RGX_CR_CORE_REVISION_MINOR_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_SHIFT (0U)
+#define RGX_CR_CORE_REVISION_MAINTENANCE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_DESIGNER_REV_FIELD1
+*/
+#define RGX_CR_DESIGNER_REV_FIELD1 (0x0028U)
+#define RGX_CR_DESIGNER_REV_FIELD1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD1_DESIGNER_REV_FIELD1_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_DESIGNER_REV_FIELD2
+*/
+#define RGX_CR_DESIGNER_REV_FIELD2 (0x0030U)
+#define RGX_CR_DESIGNER_REV_FIELD2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_SHIFT (0U)
+#define RGX_CR_DESIGNER_REV_FIELD2_DESIGNER_REV_FIELD2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_CHANGESET_NUMBER
+*/
+#define RGX_CR_CHANGESET_NUMBER (0x0040U)
+#define RGX_CR_CHANGESET_NUMBER_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_SHIFT (0U)
+#define RGX_CR_CHANGESET_NUMBER_CHANGESET_NUMBER_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_CLK_XTPLUS_CTRL
+*/
+#define RGX_CR_CLK_XTPLUS_CTRL (0x0080U)
+#define RGX_CR_CLK_XTPLUS_CTRL_MASKFULL (IMG_UINT64_C(0x0000003FFFFF0000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_SHIFT (36U)
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_CLRMSK (IMG_UINT64_C(0XFFFFFFCFFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_ON (IMG_UINT64_C(0x0000001000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_TDM_AUTO (IMG_UINT64_C(0x0000002000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_SHIFT (34U)
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_CLRMSK (IMG_UINT64_C(0XFFFFFFF3FFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_ON (IMG_UINT64_C(0x0000000400000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_ASTC_AUTO (IMG_UINT64_C(0x0000000800000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_SHIFT (32U)
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_CLRMSK (IMG_UINT64_C(0XFFFFFFFCFFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_ON (IMG_UINT64_C(0x0000000100000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_IPF_AUTO (IMG_UINT64_C(0x0000000200000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_SHIFT (30U)
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF3FFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_ON (IMG_UINT64_C(0x0000000040000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_COMPUTE_AUTO (IMG_UINT64_C(0x0000000080000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_SHIFT (28U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFCFFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_ON (IMG_UINT64_C(0x0000000010000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PIXEL_AUTO (IMG_UINT64_C(0x0000000020000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_SHIFT (26U)
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF3FFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_ON (IMG_UINT64_C(0x0000000004000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_VERTEX_AUTO (IMG_UINT64_C(0x0000000008000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_SHIFT (24U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_ON (IMG_UINT64_C(0x0000000001000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USCPS_AUTO (IMG_UINT64_C(0x0000000002000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_SHIFT (22U)
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF3FFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_ON (IMG_UINT64_C(0x0000000000400000))
+#define RGX_CR_CLK_XTPLUS_CTRL_PDS_SHARED_AUTO (IMG_UINT64_C(0x0000000000800000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_SHIFT (20U)
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_ON (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_CLK_XTPLUS_CTRL_BIF_BLACKPEARL_AUTO (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_SHIFT (18U)
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_ON (IMG_UINT64_C(0x0000000000040000))
+#define RGX_CR_CLK_XTPLUS_CTRL_USC_SHARED_AUTO (IMG_UINT64_C(0x0000000000080000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_SHIFT (16U)
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_ON (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_CLK_XTPLUS_CTRL_GEOMETRY_AUTO (IMG_UINT64_C(0x0000000000020000))
+
+
+/*
+ Register RGX_CR_CLK_XTPLUS_STATUS
+*/
+#define RGX_CR_CLK_XTPLUS_STATUS (0x0088U)
+#define RGX_CR_CLK_XTPLUS_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000007FF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_SHIFT (10U)
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_TDM_RUNNING (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_SHIFT (9U)
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_IPF_RUNNING (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_SHIFT (8U)
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_COMPUTE_RUNNING (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_SHIFT (7U)
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_ASTC_RUNNING (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_SHIFT (6U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PIXEL_RUNNING (IMG_UINT64_C(0x0000000000000040))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_SHIFT (5U)
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_VERTEX_RUNNING (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_SHIFT (4U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USCPS_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_SHIFT (3U)
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_PDS_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000008))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_SHIFT (2U)
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_BIF_BLACKPEARL_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_SHIFT (1U)
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_USC_SHARED_RUNNING (IMG_UINT64_C(0x0000000000000002))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_SHIFT (0U)
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_XTPLUS_STATUS_GEOMETRY_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET
+*/
+#define RGX_CR_SOFT_RESET (0x0100U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL (IMG_UINT64_C(0xFFEFFFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET_MASKFULL (IMG_UINT64_C(0x00E7FFFFFFFFFC1D))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_SHIFT (63U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM3_CORE_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_SHIFT (62U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_CLRMSK (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__PHANTOM2_CORE_EN (IMG_UINT64_C(0X4000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_SHIFT (61U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_CLRMSK (IMG_UINT64_C(0XDFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__BERNADO2_CORE_EN (IMG_UINT64_C(0X2000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_SHIFT (60U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_CLRMSK (IMG_UINT64_C(0XEFFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__JONES_CORE_EN (IMG_UINT64_C(0X1000000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_SHIFT (59U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_CLRMSK (IMG_UINT64_C(0XF7FFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TILING_CORE_EN (IMG_UINT64_C(0X0800000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_SHIFT (58U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_CLRMSK (IMG_UINT64_C(0XFBFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__TE3_EN (IMG_UINT64_C(0X0400000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_SHIFT (57U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_CLRMSK (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VCE_EN (IMG_UINT64_C(0X0200000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_SHIFT (56U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_CLRMSK (IMG_UINT64_C(0XFEFFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__VBS_EN (IMG_UINT64_C(0X0100000000000000))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_SHIFT (55U)
+#define RGX_CR_SOFT_RESET_DPX1_CORE_CLRMSK (IMG_UINT64_C(0XFF7FFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX1_CORE_EN (IMG_UINT64_C(0X0080000000000000))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_SHIFT (54U)
+#define RGX_CR_SOFT_RESET_DPX0_CORE_CLRMSK (IMG_UINT64_C(0XFFBFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DPX0_CORE_EN (IMG_UINT64_C(0X0040000000000000))
+#define RGX_CR_SOFT_RESET_FBA_SHIFT (53U)
+#define RGX_CR_SOFT_RESET_FBA_CLRMSK (IMG_UINT64_C(0XFFDFFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_FBA_EN (IMG_UINT64_C(0X0020000000000000))
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_SHIFT (51U)
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_CLRMSK (IMG_UINT64_C(0XFFF7FFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET__PBE2_XE__FB_CDC_EN (IMG_UINT64_C(0X0008000000000000))
+#define RGX_CR_SOFT_RESET_SH_SHIFT (50U)
+#define RGX_CR_SOFT_RESET_SH_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_SH_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_SOFT_RESET_VRDM_SHIFT (49U)
+#define RGX_CR_SOFT_RESET_VRDM_CLRMSK (IMG_UINT64_C(0XFFFDFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_VRDM_EN (IMG_UINT64_C(0X0002000000000000))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_SHIFT (48U)
+#define RGX_CR_SOFT_RESET_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFEFFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MCU_FBTC_EN (IMG_UINT64_C(0X0001000000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_SHIFT (47U)
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_CLRMSK (IMG_UINT64_C(0XFFFF7FFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM1_CORE_EN (IMG_UINT64_C(0X0000800000000000))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_SHIFT (46U)
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_CLRMSK (IMG_UINT64_C(0XFFFFBFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_PHANTOM0_CORE_EN (IMG_UINT64_C(0X0000400000000000))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_SHIFT (45U)
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_CLRMSK (IMG_UINT64_C(0XFFFFDFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO1_CORE_EN (IMG_UINT64_C(0X0000200000000000))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_SHIFT (44U)
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_CLRMSK (IMG_UINT64_C(0XFFFFEFFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BERNADO0_CORE_EN (IMG_UINT64_C(0X0000100000000000))
+#define RGX_CR_SOFT_RESET_IPP_SHIFT (43U)
+#define RGX_CR_SOFT_RESET_IPP_CLRMSK (IMG_UINT64_C(0XFFFFF7FFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_IPP_EN (IMG_UINT64_C(0X0000080000000000))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_SHIFT (42U)
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_CLRMSK (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF_TEXAS_EN (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_SHIFT (41U)
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFDFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_TORNADO_CORE_EN (IMG_UINT64_C(0X0000020000000000))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_SHIFT (40U)
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_H_CORE_EN (IMG_UINT64_C(0X0000010000000000))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_SHIFT (39U)
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFF7FFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_G_CORE_EN (IMG_UINT64_C(0X0000008000000000))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_SHIFT (38U)
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_F_CORE_EN (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_SHIFT (37U)
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_E_CORE_EN (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_SHIFT (36U)
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_D_CORE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_SHIFT (35U)
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_C_CORE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_SOFT_RESET_MMU_SHIFT (34U)
+#define RGX_CR_SOFT_RESET_MMU_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_SOFT_RESET_MMU_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_SOFT_RESET_BIF1_SHIFT (33U)
+#define RGX_CR_SOFT_RESET_BIF1_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_SOFT_RESET_BIF1_EN (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_SOFT_RESET_GARTEN_SHIFT (32U)
+#define RGX_CR_SOFT_RESET_GARTEN_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_SOFT_RESET_GARTEN_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_SHIFT (31U)
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_SOFT_RESET_RASCAL_CORE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_SHIFT (30U)
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_B_CORE_EN (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_SHIFT (29U)
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_SOFT_RESET_DUST_A_CORE_EN (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_SHIFT (28U)
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFEFFFFFFF))
+#define RGX_CR_SOFT_RESET_FB_TLCACHE_EN (IMG_UINT64_C(0X0000000010000000))
+#define RGX_CR_SOFT_RESET_SLC_SHIFT (27U)
+#define RGX_CR_SOFT_RESET_SLC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF7FFFFFF))
+#define RGX_CR_SOFT_RESET_SLC_EN (IMG_UINT64_C(0X0000000008000000))
+#define RGX_CR_SOFT_RESET_TLA_SHIFT (26U)
+#define RGX_CR_SOFT_RESET_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_SOFT_RESET_TLA_EN (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_SOFT_RESET_UVS_SHIFT (25U)
+#define RGX_CR_SOFT_RESET_UVS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFDFFFFFF))
+#define RGX_CR_SOFT_RESET_UVS_EN (IMG_UINT64_C(0X0000000002000000))
+#define RGX_CR_SOFT_RESET_TE_SHIFT (24U)
+#define RGX_CR_SOFT_RESET_TE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_SOFT_RESET_TE_EN (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_SOFT_RESET_GPP_SHIFT (23U)
+#define RGX_CR_SOFT_RESET_GPP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_SOFT_RESET_GPP_EN (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_SOFT_RESET_FBDC_SHIFT (22U)
+#define RGX_CR_SOFT_RESET_FBDC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_SOFT_RESET_FBDC_EN (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_SOFT_RESET_FBC_SHIFT (21U)
+#define RGX_CR_SOFT_RESET_FBC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SOFT_RESET_FBC_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SOFT_RESET_PM_SHIFT (20U)
+#define RGX_CR_SOFT_RESET_PM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFEFFFFF))
+#define RGX_CR_SOFT_RESET_PM_EN (IMG_UINT64_C(0X0000000000100000))
+#define RGX_CR_SOFT_RESET_PBE_SHIFT (19U)
+#define RGX_CR_SOFT_RESET_PBE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_SOFT_RESET_PBE_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_SOFT_RESET_USC_SHARED_SHIFT (18U)
+#define RGX_CR_SOFT_RESET_USC_SHARED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_SOFT_RESET_USC_SHARED_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_SOFT_RESET_MCU_L1_SHIFT (17U)
+#define RGX_CR_SOFT_RESET_MCU_L1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_SOFT_RESET_MCU_L1_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_SOFT_RESET_BIF_SHIFT (16U)
+#define RGX_CR_SOFT_RESET_BIF_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_SOFT_RESET_BIF_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_SOFT_RESET_CDM_SHIFT (15U)
+#define RGX_CR_SOFT_RESET_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_SOFT_RESET_CDM_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_SOFT_RESET_VDM_SHIFT (14U)
+#define RGX_CR_SOFT_RESET_VDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_SOFT_RESET_VDM_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_SOFT_RESET_TESS_SHIFT (13U)
+#define RGX_CR_SOFT_RESET_TESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_SOFT_RESET_TESS_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_SOFT_RESET_PDS_SHIFT (12U)
+#define RGX_CR_SOFT_RESET_PDS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_SOFT_RESET_PDS_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_SOFT_RESET_ISP_SHIFT (11U)
+#define RGX_CR_SOFT_RESET_ISP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_SOFT_RESET_ISP_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_SOFT_RESET_TSP_SHIFT (10U)
+#define RGX_CR_SOFT_RESET_TSP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_SOFT_RESET_TSP_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_SHIFT (4U)
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_SOFT_RESET_TPU_MCU_DEMUX_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_SOFT_RESET_MCU_L0_SHIFT (3U)
+#define RGX_CR_SOFT_RESET_MCU_L0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_SOFT_RESET_MCU_L0_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_SOFT_RESET_TPU_SHIFT (2U)
+#define RGX_CR_SOFT_RESET_TPU_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SOFT_RESET_TPU_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SOFT_RESET_USC_SHIFT (0U)
+#define RGX_CR_SOFT_RESET_USC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SOFT_RESET_USC_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SOFT_RESET2
+*/
+#define RGX_CR_SOFT_RESET2 (0x0108U)
+#define RGX_CR_SOFT_RESET2_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_SOFT_RESET2_SPFILTER_SHIFT (12U)
+#define RGX_CR_SOFT_RESET2_SPFILTER_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_SOFT_RESET2_TDM_SHIFT (11U)
+#define RGX_CR_SOFT_RESET2_TDM_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_SOFT_RESET2_TDM_EN (0X00000800U)
+#define RGX_CR_SOFT_RESET2_ASTC_SHIFT (10U)
+#define RGX_CR_SOFT_RESET2_ASTC_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SOFT_RESET2_ASTC_EN (0X00000400U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_SHIFT (9U)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SOFT_RESET2_BLACKPEARL_EN (0X00000200U)
+#define RGX_CR_SOFT_RESET2_USCPS_SHIFT (8U)
+#define RGX_CR_SOFT_RESET2_USCPS_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SOFT_RESET2_USCPS_EN (0X00000100U)
+#define RGX_CR_SOFT_RESET2_IPF_SHIFT (7U)
+#define RGX_CR_SOFT_RESET2_IPF_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SOFT_RESET2_IPF_EN (0X00000080U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_SHIFT (6U)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SOFT_RESET2_GEOMETRY_EN (0X00000040U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_SHIFT (5U)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SOFT_RESET2_USC_SHARED_EN (0X00000020U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_SHIFT (4U)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SOFT_RESET2_PDS_SHARED_EN (0X00000010U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_SHIFT (3U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SOFT_RESET2_BIF_BLACKPEARL_EN (0X00000008U)
+#define RGX_CR_SOFT_RESET2_PIXEL_SHIFT (2U)
+#define RGX_CR_SOFT_RESET2_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SOFT_RESET2_PIXEL_EN (0X00000004U)
+#define RGX_CR_SOFT_RESET2_CDM_SHIFT (1U)
+#define RGX_CR_SOFT_RESET2_CDM_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SOFT_RESET2_CDM_EN (0X00000002U)
+#define RGX_CR_SOFT_RESET2_VERTEX_SHIFT (0U)
+#define RGX_CR_SOFT_RESET2_VERTEX_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SOFT_RESET2_VERTEX_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_EVENT_STATUS
+*/
+#define RGX_CR_EVENT_STATUS (0x0130U)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__MASKFULL (IMG_UINT64_C(0x00000000E005FFFF))
+#define RGX_CR_EVENT_STATUS__SIGNALS__MASKFULL (IMG_UINT64_C(0x00000000E007FFFF))
+#define RGX_CR_EVENT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_SHIFT (31U)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_CLRMSK (0X7FFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_FENCE_FINISHED_EN (0X80000000U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_SHIFT (30U)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_TDM_BUFFER_STALL_EN (0X40000000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_SHIFT (29U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_SIGNAL_FAILURE_EN (0X20000000U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_SHIFT (28U)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_OUT_OF_MEMORY_EN (0X10000000U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_SHIFT (27U)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_EVENT_STATUS_DPX_MMU_PAGE_FAULT_EN (0X08000000U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_SHIFT (26U)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_EVENT_STATUS_RPM_OUT_OF_MEMORY_EN (0X04000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_SHIFT (25U)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC3_FINISHED_EN (0X02000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_SHIFT (24U)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC2_FINISHED_EN (0X01000000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_SHIFT (23U)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_CLRMSK (0XFF7FFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC1_FINISHED_EN (0X00800000U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_SHIFT (22U)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_EVENT_STATUS_FBA_FC0_FINISHED_EN (0X00400000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_SHIFT (21U)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC3_FINISHED_EN (0X00200000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_SHIFT (20U)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC2_FINISHED_EN (0X00100000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_SHIFT (19U)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC1_FINISHED_EN (0X00080000U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS_RDM_FC0_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS__ROGUEXE__TDM_CONTEXT_STORE_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_SHIFT (18U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__TDM_CONTEXT_STORE_FINISHED_EN (0X00040000U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS_SHG_FINISHED_EN (0X00020000U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_SHIFT (17U)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_EVENT_STATUS__SIGNALS__SPFILTER_SIGNAL_UPDATE_EN (0X00020000U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_SHIFT (16U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_BUFFER_STALL_EN (0X00010000U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_SHIFT (15U)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_CLRMSK (0XFFFF7FFFU)
+#define RGX_CR_EVENT_STATUS_USC_TRIGGER_EN (0X00008000U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_SHIFT (14U)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_EVENT_STATUS_ZLS_FINISHED_EN (0X00004000U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_SHIFT (13U)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_ACK_EN (0X00002000U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_SHIFT (12U)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_EVENT_STATUS_GPIO_REQ_EN (0X00001000U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_SHIFT (11U)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_EVENT_STATUS_POWER_ABORT_EN (0X00000800U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_SHIFT (10U)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_EVENT_STATUS_POWER_COMPLETE_EN (0X00000400U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_SHIFT (9U)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_EVENT_STATUS_MMU_PAGE_FAULT_EN (0X00000200U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_SHIFT (8U)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_EVENT_STATUS_PM_3D_MEM_FREE_EN (0X00000100U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_SHIFT (7U)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_EVENT_STATUS_PM_OUT_OF_MEMORY_EN (0X00000080U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_SHIFT (6U)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_EVENT_STATUS_TA_TERMINATE_EN (0X00000040U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_SHIFT (5U)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_EVENT_STATUS_TA_FINISHED_EN (0X00000020U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_SHIFT (4U)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_EVENT_STATUS_ISP_END_MACROTILE_EN (0X00000010U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_SHIFT (3U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_EVENT_STATUS_PIXELBE_END_RENDER_EN (0X00000008U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_SHIFT (2U)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_EVENT_STATUS_COMPUTE_FINISHED_EN (0X00000004U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_SHIFT (1U)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_EVENT_STATUS_KERNEL_FINISHED_EN (0X00000002U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_SHIFT (0U)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_EVENT_STATUS_TLA_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TIMER
+*/
+#define RGX_CR_TIMER (0x0160U)
+#define RGX_CR_TIMER_MASKFULL (IMG_UINT64_C(0x8000FFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_SHIFT (63U)
+#define RGX_CR_TIMER_BIT31_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_TIMER_BIT31_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_TIMER_VALUE_SHIFT (0U)
+#define RGX_CR_TIMER_VALUE_CLRMSK (IMG_UINT64_C(0XFFFF000000000000))
+
+
+/*
+ Register RGX_CR_TLA_STATUS
+*/
+#define RGX_CR_TLA_STATUS (0x0178U)
+#define RGX_CR_TLA_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_SHIFT (39U)
+#define RGX_CR_TLA_STATUS_BLIT_COUNT_CLRMSK (IMG_UINT64_C(0X0000007FFFFFFFFF))
+#define RGX_CR_TLA_STATUS_REQUEST_SHIFT (7U)
+#define RGX_CR_TLA_STATUS_REQUEST_CLRMSK (IMG_UINT64_C(0XFFFFFF800000007F))
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_SHIFT (1U)
+#define RGX_CR_TLA_STATUS_FIFO_FULLNESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF81))
+#define RGX_CR_TLA_STATUS_BUSY_SHIFT (0U)
+#define RGX_CR_TLA_STATUS_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_TLA_STATUS_BUSY_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_PM_PARTIAL_RENDER_ENABLE
+*/
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE (0x0338U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_SHIFT (0U)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_PM_PARTIAL_RENDER_ENABLE_OP_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SIDEKICK_IDLE
+*/
+#define RGX_CR_SIDEKICK_IDLE (0x03C8U)
+#define RGX_CR_SIDEKICK_IDLE_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_SHIFT (6U)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SIDEKICK_IDLE_FB_CDC_EN (0X00000040U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_SHIFT (5U)
+#define RGX_CR_SIDEKICK_IDLE_MMU_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SIDEKICK_IDLE_MMU_EN (0X00000020U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_SHIFT (4U)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SIDEKICK_IDLE_BIF128_EN (0X00000010U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_SHIFT (3U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SIDEKICK_IDLE_TLA_EN (0X00000008U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_SHIFT (2U)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SIDEKICK_IDLE_GARTEN_EN (0X00000004U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_SHIFT (1U)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SIDEKICK_IDLE_HOSTIF_EN (0X00000002U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_SHIFT (0U)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SIDEKICK_IDLE_SOCIF_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS (0x0430U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x00000000000000F3))
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_SHIFT (4U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_LAST_PIPE_CLRMSK (0XFFFFFF0FU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0X00000002U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_VDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0 (0x0438U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1 (0x0440U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2 (0x0448U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK0
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0 (0x0450U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE1_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK0_PDS_STATE0_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK1
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1 (0x0458U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK1_PDS_STATE2_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_RESUME_TASK2
+*/
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2 (0x0460U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_SHIFT (32U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT2_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_RESUME_TASK2_STREAM_OUT1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_STORE_STATUS
+*/
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS (0x04A0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_NEED_RESUME_EN (0X00000002U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_STORE_STATUS_COMPLETE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_PDS0 (0x04A8U)
+#define RGX_CR_CDM_CONTEXT_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_PDS1 (0x04B0U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS
+*/
+#define RGX_CR_CDM_TERMINATE_PDS (0x04B8U)
+#define RGX_CR_CDM_TERMINATE_PDS_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_TERMINATE_PDS_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_TERMINATE_PDS1
+*/
+#define RGX_CR_CDM_TERMINATE_PDS1 (0x04C0U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_TERMINATE_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_TERMINATE_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_TERMINATE_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_TERMINATE_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS0
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0 (0x04D8U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_MASKFULL (IMG_UINT64_C(0xFFFFFFF0FFFFFFF0))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_SHIFT (36U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_DATA_ADDR_ALIGNSIZE (16U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_SHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000000F))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS0_CODE_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_CDM_CONTEXT_LOAD_PDS1
+*/
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1 (0x04E0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__MASKFULL (IMG_UINT64_C(0x000000007FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_SHIFT (30U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__PDS_SEQ_DEP_EN (0X40000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_PDS_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_SHIFT (29U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__USC_SEQ_DEP_EN (0X20000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_USC_SEQ_DEP_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_SHIFT (28U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TARGET_EN (0X10000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_SHIFT (27U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TARGET_EN (0X08000000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_SHIFT (22U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__UNIFIED_SIZE_CLRMSK (0XF03FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_UNIFIED_SIZE_CLRMSK (0XF81FFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_SHIFT (21U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SHARED_EN (0X00200000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_SHIFT (20U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SHARED_EN (0X00100000U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_SHIFT (12U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__COMMON_SIZE_CLRMSK (0XFFE00FFFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_SHIFT (11U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_COMMON_SIZE_CLRMSK (0XFFF007FFU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_TEMP_SIZE_CLRMSK (0XFFFFF87FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_SHIFT (7U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1__TEMPSIZE8__TEMP_SIZE_CLRMSK (0XFFFFF07FU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_SHIFT (1U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_DATA_SIZE_CLRMSK (0XFFFFFF81U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_SHIFT (0U)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_CDM_CONTEXT_LOAD_PDS1_FENCE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_CONFIG
+*/
+#define RGX_CR_MIPS_WRAPPER_CONFIG (0x0810U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x000000030F01FFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_SHIFT (33U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_DISABLE_BOOT_EN (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_SHIFT (32U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_L2_CACHE_OFF_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_SHIFT (25U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF1FFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_SHIFT (24U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFEFFFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_TRUSTED_EN (IMG_UINT64_C(0X0000000001000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_SHIFT (16U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MIPS32 (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_CONFIG_REGBANK_BASE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1 (0x0818U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP1_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2 (0x0820U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1 (0x0828U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP2_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2 (0x0830U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1 (0x0838U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP3_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2 (0x0840U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1 (0x0848U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP4_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2 (0x0850U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG1
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1 (0x0858U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MASKFULL (IMG_UINT64_C(0x00000000FFFFF001))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP5_CONFIG2
+*/
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2 (0x0860U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF1FF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_SHIFT (6U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFE3F))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_SHIFT (5U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_TRUSTED_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_REGION_SIZE_POW2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE0))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS (0x0868U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_MASKFULL (IMG_UINT64_C(0x00000001FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_EVENT_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR (0x0870U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_UNMAPPED_CLEAR_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG (0x0878U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFFBF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_SHIFT (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ADDR_OUT_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_SHIFT (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_TRUSTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4KB (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16KB (IMG_UINT64_C(0x0000000000000080))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64KB (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256KB (IMG_UINT64_C(0x0000000000000180))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_1MB (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_4MB (IMG_UINT64_C(0x0000000000000280))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_16MB (IMG_UINT64_C(0x0000000000000300))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_64MB (IMG_UINT64_C(0x0000000000000380))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_REGION_SIZE_256MB (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_SHIFT (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_ENTRY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFC1))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_CONFIG_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_READ
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ (0x0880U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_SHIFT (1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_ENTRY_CLRMSK (0XFFFFFFC1U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_READ_REQUEST_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA
+*/
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA (0x0888U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MASKFULL (IMG_UINT64_C(0xFFFFFFF7FFFFFF81))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_SHIFT (36U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_ADDR_OUT_CLRMSK (IMG_UINT64_C(0X0000000FFFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_SHIFT (32U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_OS_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_SHIFT (12U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_BASE_ADDR_IN_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_SHIFT (11U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_TRUSTED_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_SHIFT (7U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_REGION_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF87F))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_SHIFT (0U)
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MIPS_ADDR_REMAP_RANGE_DATA_MODE_ENABLE_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE (0x08A0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_ENABLE_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS (0x08A8U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+*/
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR (0x08B0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_NMI_ENABLE
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE (0x08B8U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_WRAPPER_NMI_EVENT
+*/
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT (0x08C0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_SHIFT (0U)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_DEBUG_CONFIG
+*/
+#define RGX_CR_MIPS_DEBUG_CONFIG (0x08C8U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_SHIFT (0U)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_DEBUG_CONFIG_DISABLE_PROBE_DEBUG_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MIPS_EXCEPTION_STATUS
+*/
+#define RGX_CR_MIPS_EXCEPTION_STATUS (0x08D0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000003F))
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_SHIFT (5U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_SLEEP_EN (0X00000020U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_SHIFT (4U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN (0X00000010U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_SHIFT (3U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_EXL_EN (0X00000008U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_SHIFT (2U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_NEST_ERL_EN (0X00000004U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_SHIFT (1U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_EXL_EN (0X00000002U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_SHIFT (0U)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAX
+*/
+#define RGX_CR_META_SP_MSLVDATAX (0x0A00U)
+#define RGX_CR_META_SP_MSLVDATAX_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAX_MSLVDATAX_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVDATAT
+*/
+#define RGX_CR_META_SP_MSLVDATAT (0x0A08U)
+#define RGX_CR_META_SP_MSLVDATAT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVDATAT_MSLVDATAT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL0
+*/
+#define RGX_CR_META_SP_MSLVCTRL0 (0x0A10U)
+#define RGX_CR_META_SP_MSLVCTRL0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL0_ADDR_CLRMSK (0X00000003U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_SHIFT (1U)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_META_SP_MSLVCTRL0_AUTOINCR_EN (0X00000002U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVCTRL0_RD_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVCTRL1
+*/
+#define RGX_CR_META_SP_MSLVCTRL1 (0x0A18U)
+#define RGX_CR_META_SP_MSLVCTRL1_MASKFULL (IMG_UINT64_C(0x00000000F7F4003F))
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_SHIFT (30U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRTHREAD_CLRMSK (0X3FFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_SHIFT (29U)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_LOCK2_INTERLOCK_EN (0X20000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_SHIFT (28U)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_ATOMIC_INTERLOCK_EN (0X10000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_SHIFT (26U)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN (0X04000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_SHIFT (25U)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_COREMEM_IDLE_EN (0X02000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_SHIFT (24U)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_READY_EN (0X01000000U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_SHIFT (21U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERRID_CLRMSK (0XFF1FFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_SHIFT (20U)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_DEFERR_EN (0X00100000U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_SHIFT (18U)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_META_SP_MSLVCTRL1_WR_ACTIVE_EN (0X00040000U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_SHIFT (4U)
+#define RGX_CR_META_SP_MSLVCTRL1_THREAD_CLRMSK (0XFFFFFFCFU)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVCTRL1_TRANS_SIZE_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVCTRL1_BYTE_ROUND_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVHANDSHKE
+*/
+#define RGX_CR_META_SP_MSLVHANDSHKE (0x0A50U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_INPUT_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVHANDSHKE_OUTPUT_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICK
+*/
+#define RGX_CR_META_SP_MSLVT0KICK (0x0A80U)
+#define RGX_CR_META_SP_MSLVT0KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICK_MSLVT0KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT0KICKI
+*/
+#define RGX_CR_META_SP_MSLVT0KICKI (0x0A88U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT0KICKI_MSLVT0KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICK
+*/
+#define RGX_CR_META_SP_MSLVT1KICK (0x0A90U)
+#define RGX_CR_META_SP_MSLVT1KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICK_MSLVT1KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT1KICKI
+*/
+#define RGX_CR_META_SP_MSLVT1KICKI (0x0A98U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT1KICKI_MSLVT1KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICK
+*/
+#define RGX_CR_META_SP_MSLVT2KICK (0x0AA0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICK_MSLVT2KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT2KICKI
+*/
+#define RGX_CR_META_SP_MSLVT2KICKI (0x0AA8U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT2KICKI_MSLVT2KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICK
+*/
+#define RGX_CR_META_SP_MSLVT3KICK (0x0AB0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICK_MSLVT3KICK_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVT3KICKI
+*/
+#define RGX_CR_META_SP_MSLVT3KICKI (0x0AB8U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVT3KICKI_MSLVT3KICKI_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVRST
+*/
+#define RGX_CR_META_SP_MSLVRST (0x0AC0U)
+#define RGX_CR_META_SP_MSLVRST_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVRST_SOFTRESET_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQSTATUS
+*/
+#define RGX_CR_META_SP_MSLVIRQSTATUS (0x0AC8U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT3_EN (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN (0X00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQENABLE
+*/
+#define RGX_CR_META_SP_MSLVIRQENABLE (0x0AD0U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_MASKFULL (IMG_UINT64_C(0x000000000000000C))
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_SHIFT (3U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT1_EN (0X00000008U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_SHIFT (2U)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_META_SP_MSLVIRQENABLE_EVENT0_EN (0X00000004U)
+
+
+/*
+ Register RGX_CR_META_SP_MSLVIRQLEVEL
+*/
+#define RGX_CR_META_SP_MSLVIRQLEVEL (0x0AD8U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_SHIFT (0U)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_SP_MSLVIRQLEVEL_MODE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE
+*/
+#define RGX_CR_MTS_SCHEDULE (0x0B00U)
+#define RGX_CR_MTS_SCHEDULE_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE1
+*/
+#define RGX_CR_MTS_SCHEDULE1 (0x10B00U)
+#define RGX_CR_MTS_SCHEDULE1_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE1_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE1_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE1_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE1_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE1_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE1_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE2
+*/
+#define RGX_CR_MTS_SCHEDULE2 (0x20B00U)
+#define RGX_CR_MTS_SCHEDULE2_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE2_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE2_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE2_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE2_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE2_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE2_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE3
+*/
+#define RGX_CR_MTS_SCHEDULE3 (0x30B00U)
+#define RGX_CR_MTS_SCHEDULE3_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE3_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE3_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE3_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE3_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE3_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE3_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE4
+*/
+#define RGX_CR_MTS_SCHEDULE4 (0x40B00U)
+#define RGX_CR_MTS_SCHEDULE4_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE4_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE4_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE4_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE4_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE4_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE4_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE5
+*/
+#define RGX_CR_MTS_SCHEDULE5 (0x50B00U)
+#define RGX_CR_MTS_SCHEDULE5_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE5_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE5_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE5_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE5_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE5_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE5_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE6
+*/
+#define RGX_CR_MTS_SCHEDULE6 (0x60B00U)
+#define RGX_CR_MTS_SCHEDULE6_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE6_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE6_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE6_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE6_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE6_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE6_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_SCHEDULE7
+*/
+#define RGX_CR_MTS_SCHEDULE7 (0x70B00U)
+#define RGX_CR_MTS_SCHEDULE7_MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_MTS_SCHEDULE7_HOST_SHIFT (8U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_MTS_SCHEDULE7_HOST_BG_TIMER (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_HOST_HOST (0X00000100U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_SHIFT (6U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT1 (0X00000040U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT2 (0X00000080U)
+#define RGX_CR_MTS_SCHEDULE7_PRIORITY_PRT3 (0X000000C0U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_SHIFT (5U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_BGCTX (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_CONTEXT_INTCTX (0X00000020U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_SHIFT (4U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_MTS_SCHEDULE7_TASK_NON_COUNTED (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_TASK_COUNTED (0X00000010U)
+#define RGX_CR_MTS_SCHEDULE7_DM_SHIFT (0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_CLRMSK (0XFFFFFFF0U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM0 (00000000U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM1 (0X00000001U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM2 (0X00000002U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM3 (0X00000003U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM4 (0X00000004U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM5 (0X00000005U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM6 (0X00000006U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM7 (0X00000007U)
+#define RGX_CR_MTS_SCHEDULE7_DM_DM_ALL (0X0000000FU)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC (0x0B30U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC (0x0B38U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC (0x0B40U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC
+*/
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC (0x0B48U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_MTS_GARTEN_WRAPPER_CONFIG
+*/
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG (0x0B50U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__MASKFULL (IMG_UINT64_C(0x000FF0FFFFFFF701))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFF001))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFFF0FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT (44U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK (IMG_UINT64_C(0XFFF00FFFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT (40U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_SHIFT (12U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_SHIFT (9U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF9FF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_SHIFT (8U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_SHIFT (0U)
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_MTS (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_MTS_INTCTX
+*/
+#define RGX_CR_MTS_INTCTX (0x0B98U)
+#define RGX_CR_MTS_INTCTX_MASKFULL (IMG_UINT64_C(0x000000003FFFFFFF))
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_SHIFT (22U)
+#define RGX_CR_MTS_INTCTX_DM_HOST_SCHEDULE_CLRMSK (0XC03FFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_PTR_SHIFT (18U)
+#define RGX_CR_MTS_INTCTX_DM_PTR_CLRMSK (0XFFC3FFFFU)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_SHIFT (16U)
+#define RGX_CR_MTS_INTCTX_THREAD_ACTIVE_CLRMSK (0XFFFCFFFFU)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_SHIFT (8U)
+#define RGX_CR_MTS_INTCTX_DM_TIMER_SCHEDULE_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_INTCTX_DM_INTERRUPT_SCHEDULE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX
+*/
+#define RGX_CR_MTS_BGCTX (0x0BA0U)
+#define RGX_CR_MTS_BGCTX_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#define RGX_CR_MTS_BGCTX_DM_PTR_SHIFT (10U)
+#define RGX_CR_MTS_BGCTX_DM_PTR_CLRMSK (0XFFFFC3FFU)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_SHIFT (8U)
+#define RGX_CR_MTS_BGCTX_THREAD_ACTIVE_CLRMSK (0XFFFFFCFFU)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_DM_NONCOUNTED_SCHEDULE_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE
+*/
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE (0x0BA8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_SHIFT (56U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM7_CLRMSK (IMG_UINT64_C(0X00FFFFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_SHIFT (48U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM6_CLRMSK (IMG_UINT64_C(0XFF00FFFFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_SHIFT (40U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM5_CLRMSK (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_SHIFT (32U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM4_CLRMSK (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_SHIFT (24U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM3_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_SHIFT (16U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_SHIFT (8U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM1_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_SHIFT (0U)
+#define RGX_CR_MTS_BGCTX_COUNTED_SCHEDULE_DM0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_MTS_GPU_INT_STATUS
+*/
+#define RGX_CR_MTS_GPU_INT_STATUS (0x0BB0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_SHIFT (0U)
+#define RGX_CR_MTS_GPU_INT_STATUS_STATUS_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_META_BOOT
+*/
+#define RGX_CR_META_BOOT (0x0BF8U)
+#define RGX_CR_META_BOOT_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_META_BOOT_MODE_SHIFT (0U)
+#define RGX_CR_META_BOOT_MODE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_META_BOOT_MODE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_GARTEN_SLC
+*/
+#define RGX_CR_GARTEN_SLC (0x0BB8U)
+#define RGX_CR_GARTEN_SLC_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_SHIFT (0U)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_GARTEN_SLC_FORCE_COHERENCY_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_PPP
+*/
+#define RGX_CR_PPP (0x0CD0U)
+#define RGX_CR_PPP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CHECKSUM_SHIFT (0U)
+#define RGX_CR_PPP_CHECKSUM_CLRMSK (00000000U)
+
+
+#define RGX_CR_ISP_RENDER_DIR_TYPE_MASK (0x00000003U)
+/*
+ Top-left to bottom-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TL2BR (0x00000000U)
+/*
+ Top-right to bottom-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_TR2BL (0x00000001U)
+/*
+ Bottom-left to top-right */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BL2TR (0x00000002U)
+/*
+ Bottom-right to top-left */
+#define RGX_CR_ISP_RENDER_DIR_TYPE_BR2TL (0x00000003U)
+
+
+#define RGX_CR_ISP_RENDER_MODE_TYPE_MASK (0x00000003U)
+/*
+ Normal render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_NORM (0x00000000U)
+/*
+ Fast 2D render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_2D (0x00000002U)
+/*
+ Fast scale render */
+#define RGX_CR_ISP_RENDER_MODE_TYPE_FAST_SCALE (0x00000003U)
+
+
+/*
+ Register RGX_CR_ISP_RENDER
+*/
+#define RGX_CR_ISP_RENDER (0x0F08U)
+#define RGX_CR_ISP_RENDER_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_ISP_RENDER_RESUME_SHIFT (4U)
+#define RGX_CR_ISP_RENDER_RESUME_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_ISP_RENDER_RESUME_EN (0X00000010U)
+#define RGX_CR_ISP_RENDER_DIR_SHIFT (2U)
+#define RGX_CR_ISP_RENDER_DIR_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_ISP_RENDER_DIR_TL2BR (00000000U)
+#define RGX_CR_ISP_RENDER_DIR_TR2BL (0X00000004U)
+#define RGX_CR_ISP_RENDER_DIR_BL2TR (0X00000008U)
+#define RGX_CR_ISP_RENDER_DIR_BR2TL (0X0000000CU)
+#define RGX_CR_ISP_RENDER_MODE_SHIFT (0U)
+#define RGX_CR_ISP_RENDER_MODE_CLRMSK (0XFFFFFFFCU)
+#define RGX_CR_ISP_RENDER_MODE_NORM (00000000U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_2D (0X00000002U)
+#define RGX_CR_ISP_RENDER_MODE_FAST_SCALE (0X00000003U)
+
+
+/*
+ Register RGX_CR_ISP_CTL
+*/
+#define RGX_CR_ISP_CTL (0x0F38U)
+#define RGX_CR_ISP_CTL_MASKFULL (IMG_UINT64_C(0x000000007FFFF3FF))
+#define RGX_CR_ISP_CTL_LINE_STYLE_SHIFT (30U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_EN (0X40000000U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_SHIFT (29U)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_ISP_CTL_LINE_STYLE_PIX_EN (0X20000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_SHIFT (28U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_VERT_EN (0X10000000U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_SHIFT (27U)
+#define RGX_CR_ISP_CTL_PAIR_TILES_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_ISP_CTL_PAIR_TILES_EN (0X08000000U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_SHIFT (26U)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_ISP_CTL_CREQ_BUF_EN_EN (0X04000000U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_SHIFT (25U)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_ISP_CTL_TILE_AGE_EN_EN (0X02000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_SHIFT (23U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_CLRMSK (0XFE7FFFFFU)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX9 (00000000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_DX10 (0X00800000U)
+#define RGX_CR_ISP_CTL_ISP_SAMPLE_POS_MODE_OGL (0X01000000U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_SHIFT (21U)
+#define RGX_CR_ISP_CTL_NUM_TILES_PER_USC_CLRMSK (0XFF9FFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_SHIFT (20U)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_ISP_CTL_DBIAS_IS_INT_EN (0X00100000U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_SHIFT (19U)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_ISP_CTL_OVERLAP_CHECK_MODE_EN (0X00080000U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_SHIFT (18U)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_ISP_CTL_PT_UPFRONT_DEPTH_DISABLE_EN (0X00040000U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_SHIFT (17U)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_ISP_CTL_PROCESS_EMPTY_TILES_EN (0X00020000U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_SHIFT (16U)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_ISP_CTL_SAMPLE_POS_EN (0X00010000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_SHIFT (12U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ONE (00000000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWO (0X00001000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THREE (0X00002000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOUR (0X00003000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIVE (0X00004000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIX (0X00005000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SEVEN (0X00006000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_EIGHT (0X00007000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_NINE (0X00008000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TEN (0X00009000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_ELEVEN (0X0000A000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_TWELVE (0X0000B000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_THIRTEEN (0X0000C000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FOURTEEN (0X0000D000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_FIFTEEN (0X0000E000U)
+#define RGX_CR_ISP_CTL_PIPE_ENABLE_PIPE_SIXTEEN (0X0000F000U)
+#define RGX_CR_ISP_CTL_VALID_ID_SHIFT (4U)
+#define RGX_CR_ISP_CTL_VALID_ID_CLRMSK (0XFFFFFC0FU)
+#define RGX_CR_ISP_CTL_UPASS_START_SHIFT (0U)
+#define RGX_CR_ISP_CTL_UPASS_START_CLRMSK (0XFFFFFFF0U)
+
+
+/*
+ Register RGX_CR_ISP_STATUS
+*/
+#define RGX_CR_ISP_STATUS (0x1038U)
+#define RGX_CR_ISP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_SHIFT (2U)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_ISP_STATUS_SPLIT_MAX_EN (0X00000004U)
+#define RGX_CR_ISP_STATUS_ACTIVE_SHIFT (1U)
+#define RGX_CR_ISP_STATUS_ACTIVE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_ISP_STATUS_ACTIVE_EN (0X00000002U)
+#define RGX_CR_ISP_STATUS_EOR_SHIFT (0U)
+#define RGX_CR_ISP_STATUS_EOR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_ISP_STATUS_EOR_EN (0X00000001U)
+
+
+/*
+ Register group: RGX_CR_ISP_XTP_RESUME, with 64 repeats
+*/
+#define RGX_CR_ISP_XTP_RESUME_REPEATCOUNT (64)
+/*
+ Register RGX_CR_ISP_XTP_RESUME0
+*/
+#define RGX_CR_ISP_XTP_RESUME0 (0x3A00U)
+#define RGX_CR_ISP_XTP_RESUME0_MASKFULL (IMG_UINT64_C(0x00000000003FF3FF))
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_SHIFT (12U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_X_CLRMSK (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_SHIFT (0U)
+#define RGX_CR_ISP_XTP_RESUME0_TILE_Y_CLRMSK (0XFFFFFC00U)
+
+
+/*
+ Register group: RGX_CR_ISP_XTP_STORE, with 32 repeats
+*/
+#define RGX_CR_ISP_XTP_STORE_REPEATCOUNT (32)
+/*
+ Register RGX_CR_ISP_XTP_STORE0
+*/
+#define RGX_CR_ISP_XTP_STORE0 (0x3C00U)
+#define RGX_CR_ISP_XTP_STORE0_MASKFULL (IMG_UINT64_C(0x000000007F3FF3FF))
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_SHIFT (30U)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_ACTIVE_EN (0X40000000U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_SHIFT (29U)
+#define RGX_CR_ISP_XTP_STORE0_EOR_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_EOR_EN (0X20000000U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_SHIFT (28U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_LAST_EN (0X10000000U)
+#define RGX_CR_ISP_XTP_STORE0_MT_SHIFT (24U)
+#define RGX_CR_ISP_XTP_STORE0_MT_CLRMSK (0XF0FFFFFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_SHIFT (12U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_X_CLRMSK (0XFFC00FFFU)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_SHIFT (0U)
+#define RGX_CR_ISP_XTP_STORE0_TILE_Y_CLRMSK (0XFFFFFC00U)
+
+
+/*
+ Register group: RGX_CR_BIF_CAT_BASE, with 8 repeats
+*/
+#define RGX_CR_BIF_CAT_BASE_REPEATCOUNT (8)
+/*
+ Register RGX_CR_BIF_CAT_BASE0
+*/
+#define RGX_CR_BIF_CAT_BASE0 (0x1200U)
+#define RGX_CR_BIF_CAT_BASE0_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE1
+*/
+#define RGX_CR_BIF_CAT_BASE1 (0x1208U)
+#define RGX_CR_BIF_CAT_BASE1_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE1_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE2
+*/
+#define RGX_CR_BIF_CAT_BASE2 (0x1210U)
+#define RGX_CR_BIF_CAT_BASE2_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE2_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE3
+*/
+#define RGX_CR_BIF_CAT_BASE3 (0x1218U)
+#define RGX_CR_BIF_CAT_BASE3_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE3_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE4
+*/
+#define RGX_CR_BIF_CAT_BASE4 (0x1220U)
+#define RGX_CR_BIF_CAT_BASE4_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE4_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE5
+*/
+#define RGX_CR_BIF_CAT_BASE5 (0x1228U)
+#define RGX_CR_BIF_CAT_BASE5_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE5_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE6
+*/
+#define RGX_CR_BIF_CAT_BASE6 (0x1230U)
+#define RGX_CR_BIF_CAT_BASE6_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE6_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE7
+*/
+#define RGX_CR_BIF_CAT_BASE7 (0x1238U)
+#define RGX_CR_BIF_CAT_BASE7_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF000))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_BIF_CAT_BASE7_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_BIF_CAT_BASE_INDEX
+*/
+#define RGX_CR_BIF_CAT_BASE_INDEX (0x1240U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_MASKFULL (IMG_UINT64_C(0x00070707073F0707))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_SHIFT (48U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RVTX_CLRMSK (IMG_UINT64_C(0XFFF8FFFFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_SHIFT (40U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_RAY_CLRMSK (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_SHIFT (32U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_HOST_CLRMSK (IMG_UINT64_C(0XFFFFFFF8FFFFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_SHIFT (24U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TLA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF8FFFFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_SHIFT (19U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC7FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_SHIFT (16U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_CDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF8FFFF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_SHIFT (8U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_PIXEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF8FF))
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_SHIFT (0U)
+#define RGX_CR_BIF_CAT_BASE_INDEX_TA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF8))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_VCE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0 (0x1248U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_TE0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE0 (0x1250U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST0
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0 (0x1260U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST0_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_VCE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1 (0x1268U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_VCE1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_TE1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_TE1 (0x1270U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_TE1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_PM_CAT_BASE_ALIST1
+*/
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1 (0x1280U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_MASKFULL (IMG_UINT64_C(0x0FFFFFFFFFFFF003))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_SHIFT (40U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_INIT_PAGE_CLRMSK (IMG_UINT64_C(0XF00000FFFFFFFFFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_SHIFT (12U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_SHIFT (1U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_WRAP_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_SHIFT (0U)
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_BIF_PM_CAT_BASE_ALIST1_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_BIF_MMU_ENTRY_STATUS
+*/
+#define RGX_CR_BIF_MMU_ENTRY_STATUS (0x1288U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_MASKFULL (IMG_UINT64_C(0x000000FFFFFFF0F3))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_SHIFT (12U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT (4U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT (0U)
+#define RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+/*
+ Register RGX_CR_BIF_MMU_ENTRY
+*/
+#define RGX_CR_BIF_MMU_ENTRY (0x1290U)
+#define RGX_CR_BIF_MMU_ENTRY_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_SHIFT (1U)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_ENTRY_ENABLE_EN (0X00000002U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_SHIFT (0U)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_ENTRY_PENDING_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_CTRL_INVAL
+*/
+#define RGX_CR_BIF_CTRL_INVAL (0x12A0U)
+#define RGX_CR_BIF_CTRL_INVAL_MASKFULL (IMG_UINT64_C(0x000000000000000F))
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_SHIFT (3U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_INVAL_TLB1_EN (0X00000008U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_SHIFT (2U)
+#define RGX_CR_BIF_CTRL_INVAL_PC_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_INVAL_PC_EN (0X00000004U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_SHIFT (1U)
+#define RGX_CR_BIF_CTRL_INVAL_PD_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_INVAL_PD_EN (0X00000002U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_SHIFT (0U)
+#define RGX_CR_BIF_CTRL_INVAL_PT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_INVAL_PT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_CTRL
+*/
+#define RGX_CR_BIF_CTRL (0x12A8U)
+#define RGX_CR_BIF_CTRL__XE_MEMORY_HIERARCHY__MASKFULL (IMG_UINT64_C(0x00000000000001FF))
+#define RGX_CR_BIF_CTRL_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_CTRL__XE_MEMORY_HIERARCHY__PAUSE_MMU_BIF4_SHIFT (8U)
+#define RGX_CR_BIF_CTRL__XE_MEMORY_HIERARCHY__PAUSE_MMU_BIF4_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_BIF_CTRL__XE_MEMORY_HIERARCHY__PAUSE_MMU_BIF4_EN (0X00000100U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_SHIFT (7U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_QUEUE_BYPASS_EN (0X00000080U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_SHIFT (6U)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_CTRL_ENABLE_MMU_AUTO_PREFETCH_EN (0X00000040U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_SHIFT (5U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF3_EN (0X00000020U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_SHIFT (4U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF2_EN (0X00000010U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_SHIFT (3U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_CTRL_PAUSE_BIF1_EN (0X00000008U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_SHIFT (2U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_PM_EN (0X00000004U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_SHIFT (1U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF1_EN (0X00000002U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_SHIFT (0U)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_CTRL_PAUSE_MMU_BIF0_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS (0x12B0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS (0x12B8U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__MASKFULL (IMG_UINT64_C(0x001FFFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__RNW_SHIFT (52U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__RNW_CLRMSK (IMG_UINT64_C(0XFFEFFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__RNW_EN (IMG_UINT64_C(0X0010000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_SB_SHIFT (46U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_SB_CLRMSK (IMG_UINT64_C(0XFFF03FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFC0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK1_MMU_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS (0x12C0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_FAULT_BANK1_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_BANK1_REQ_STATUS
+*/
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS (0x12C8U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_BANK1_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_BIF_MMU_STATUS
+*/
+#define RGX_CR_BIF_MMU_STATUS (0x12D0U)
+#define RGX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000001FFFFFF7))
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_SHIFT (28U)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PM_FAULT_EN (0X10000000U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U)
+#define RGX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0XF00FFFFFU)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U)
+#define RGX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0XFFF00FFFU)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U)
+#define RGX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0XFFFFF00FU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_MMU_STATUS_STALLED_EN (0X00000004U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_MMU_STATUS_PAUSED_EN (0X00000002U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_MMU_STATUS_BUSY_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BIF_READS_EXT_STATUS
+*/
+#define RGX_CR_BIF_READS_EXT_STATUS (0x1320U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_SHIFT (16U)
+#define RGX_CR_BIF_READS_EXT_STATUS_MMU_CLRMSK (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_SHIFT (0U)
+#define RGX_CR_BIF_READS_EXT_STATUS_BANK1_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_READS_INT_STATUS
+*/
+#define RGX_CR_BIF_READS_INT_STATUS (0x1328U)
+#define RGX_CR_BIF_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_SHIFT (16U)
+#define RGX_CR_BIF_READS_INT_STATUS_MMU_CLRMSK (0XFF80FFFFU)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_SHIFT (0U)
+#define RGX_CR_BIF_READS_INT_STATUS_BANK1_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_READS_INT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_INT_STATUS (0x1330U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_SHIFT (0U)
+#define RGX_CR_BIFPM_READS_INT_STATUS_BANK0_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_READS_EXT_STATUS
+*/
+#define RGX_CR_BIFPM_READS_EXT_STATUS (0x1338U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_SHIFT (0U)
+#define RGX_CR_BIFPM_READS_EXT_STATUS_BANK0_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIFPM_STATUS_MMU
+*/
+#define RGX_CR_BIFPM_STATUS_MMU (0x1350U)
+#define RGX_CR_BIFPM_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_SHIFT (0U)
+#define RGX_CR_BIFPM_STATUS_MMU_REQUESTS_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_STATUS_MMU
+*/
+#define RGX_CR_BIF_STATUS_MMU (0x1358U)
+#define RGX_CR_BIF_STATUS_MMU_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_SHIFT (0U)
+#define RGX_CR_BIF_STATUS_MMU_REQUESTS_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_FAULT_READ
+*/
+#define RGX_CR_BIF_FAULT_READ (0x13E0U)
+#define RGX_CR_BIF_FAULT_READ_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_SHIFT (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_BIF_FAULT_READ_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS (0x1430U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS
+*/
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS (0x1438U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x0007FFFFFFFFFFF0))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_SHIFT (50U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFC0FFFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_MCU_FENCE
+*/
+#define RGX_CR_MCU_FENCE (0x1740U)
+#define RGX_CR_MCU_FENCE_MASKFULL (IMG_UINT64_C(0x000007FFFFFFFFE0))
+#define RGX_CR_MCU_FENCE_DM_SHIFT (40U)
+#define RGX_CR_MCU_FENCE_DM_CLRMSK (IMG_UINT64_C(0XFFFFF8FFFFFFFFFF))
+#define RGX_CR_MCU_FENCE_DM_VERTEX (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_MCU_FENCE_DM_PIXEL (IMG_UINT64_C(0x0000010000000000))
+#define RGX_CR_MCU_FENCE_DM_COMPUTE (IMG_UINT64_C(0x0000020000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY_VERTEX (IMG_UINT64_C(0x0000030000000000))
+#define RGX_CR_MCU_FENCE_DM_RAY (IMG_UINT64_C(0x0000040000000000))
+#define RGX_CR_MCU_FENCE_ADDR_SHIFT (5U)
+#define RGX_CR_MCU_FENCE_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSHIFT (5U)
+#define RGX_CR_MCU_FENCE_ADDR_ALIGNSIZE (32U)
+
+
+/*
+ Register RGX_CR_SPFILTER_SIGNAL_DESCR
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR (0x2700U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_SHIFT (0U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_CLRMSK (0XFFFF0000U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_SIZE_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_SPFILTER_SIGNAL_DESCR_MIN
+*/
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN (0x2708U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_MASKFULL (IMG_UINT64_C(0x000000FFFFFFFFF0))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_SHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSHIFT (4U)
+#define RGX_CR_SPFILTER_SIGNAL_DESCR_MIN_ADDR_ALIGNSIZE (16U)
+
+
+/*
+ Register RGX_CR_SLC_CTRL_MISC
+*/
+#define RGX_CR_SLC_CTRL_MISC (0x3800U)
+#define RGX_CR_SLC_CTRL_MISC_MASKFULL (IMG_UINT64_C(0xFFFFFFFF00FF010F))
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_SHIFT (32U)
+#define RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0X00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (16U)
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_64_BYTE (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_INTERLEAVED_128_BYTE (IMG_UINT64_C(0x0000000000010000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH1 (IMG_UINT64_C(0x0000000000100000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_SIMPLE_HASH2 (IMG_UINT64_C(0x0000000000110000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1 (IMG_UINT64_C(0x0000000000200000))
+#define RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH2_SCRAMBLE (IMG_UINT64_C(0x0000000000210000))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_SLC_CTRL_MISC_PAUSE_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_SHIFT (3U)
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_SLC_CTRL_MISC_RESP_PRIORITY_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_LINE_USE_LIMIT_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SLC_CTRL_FLUSH_INVAL
+*/
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL (0x3818U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_MASKFULL (IMG_UINT64_C(0x00000000800007FF))
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_SHIFT (31U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_CLRMSK (0X7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_LAZY_EN (0X80000000U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_SHIFT (10U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_VERTEX_EN (0X00000400U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_SHIFT (9U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_RAY_EN (0X00000200U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_FRC_EN (0X00000100U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_SHIFT (7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXE_EN (0X00000080U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_SHIFT (6U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_VXD_EN (0X00000040U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_SHIFT (5U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_HOST_META_EN (0X00000020U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_SHIFT (4U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_MMU_EN (0X00000010U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_SHIFT (3U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_COMPUTE_EN (0X00000008U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_PIXEL_EN (0X00000004U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_DM_TA_EN (0X00000002U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS0
+*/
+#define RGX_CR_SLC_STATUS0 (0x3820U)
+#define RGX_CR_SLC_STATUS0_MASKFULL (IMG_UINT64_C(0x0000000000000007))
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_SHIFT (2U)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_STATUS0_FLUSH_INVAL_PENDING_EN (0X00000004U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_SHIFT (1U)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_STATUS0_INVAL_PENDING_EN (0X00000002U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_SHIFT (0U)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_STATUS0_FLUSH_PENDING_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_CTRL_BYPASS
+*/
+#define RGX_CR_SLC_CTRL_BYPASS (0x3828U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__MASKFULL (IMG_UINT64_C(0x0003FFFFFFFF7FFF))
+#define RGX_CR_SLC_CTRL_BYPASS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_RREQ_SHIFT (49U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_RREQ_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_RREQ_EN (0X00020000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_CREQ_SHIFT (48U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_CREQ_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_CREQ_EN (0X00010000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_PREQ_SHIFT (47U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_PREQ_CLRMSK (0XFFFF7FFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_PREQ_EN (0X00008000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_DBSC_SHIFT (46U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_DBSC_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_IPF_DBSC_EN (0X00004000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TCU_SHIFT (45U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TCU_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TCU_EN (0X00002000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PBE_SHIFT (44U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PBE_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PBE_EN (0X00001000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_ISP_SHIFT (43U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_ISP_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_ISP_EN (0X00000800U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PM_SHIFT (42U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PM_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_PM_EN (0X00000400U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TDM_SHIFT (41U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TDM_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TDM_EN (0X00000200U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_CDM_SHIFT (40U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_CDM_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_CDM_EN (0X00000100U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_PDS_STATE_SHIFT (39U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_PDS_STATE_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_PDS_STATE_EN (0X00000080U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_DB_SHIFT (38U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_DB_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_DB_EN (0X00000040U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_VTX_VAR_SHIFT (37U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_VTX_VAR_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TSPF_VTX_VAR_EN (0X00000020U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_VDM_SHIFT (36U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_VDM_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_VDM_EN (0X00000010U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_STREAM_SHIFT (35U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_STREAM_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_STREAM_EN (0X00000008U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_REGION_SHIFT (34U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_REGION_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PSG_REGION_EN (0X00000004U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_VCE_SHIFT (33U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_VCE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_VCE_EN (0X00000002U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PPP_SHIFT (32U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PPP_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__REQ_TA_PPP_EN (0X00000001U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_FASTRENDER_SHIFT (31U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_FASTRENDER_CLRMSK (0X7FFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_FASTRENDER_EN (0X80000000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PM_ALIST_SHIFT (30U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PM_ALIST_CLRMSK (0XBFFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PM_ALIST_EN (0X40000000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_TE_SHIFT (29U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_TE_CLRMSK (0XDFFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_TE_EN (0X20000000U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_VCE_SHIFT (28U)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_VCE_CLRMSK (0XEFFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS__XE_MEMORY_HIERARCHY__DM_PB_VCE_EN (0X10000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_SHIFT (27U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_CLRMSK (0XF7FFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_VERTEX_EN (0X08000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_SHIFT (26U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_CLRMSK (0XFBFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_RAY_EN (0X04000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_SHIFT (25U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_CLRMSK (0XFDFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN (0X02000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_SHIFT (24U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_CLRMSK (0XFEFFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN (0X01000000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_SHIFT (23U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_CLRMSK (0XFF7FFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_FBDC_EN (0X00800000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_SHIFT (22U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TLA_EN (0X00400000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_SHIFT (21U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_CLRMSK (0XFFDFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_N_EN (0X00200000U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_SHIFT (20U)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN (0X00100000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_SHIFT (19U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MCU_EN (0X00080000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_SHIFT (18U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_PDS_EN (0X00040000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_SHIFT (17U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TPF_EN (0X00020000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_SHIFT (16U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_TA_TPC_EN (0X00010000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_SHIFT (15U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_CLRMSK (0XFFFF7FFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN (0X00008000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_SHIFT (14U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_USC_EN (0X00004000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_SHIFT (13U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_META_EN (0X00002000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_SHIFT (12U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_HOST_EN (0X00001000U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_SHIFT (11U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PT_EN (0X00000800U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_SHIFT (10U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PD_EN (0X00000400U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_SHIFT (9U)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_REQ_MMU_PC_EN (0X00000200U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_SHIFT (8U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_FRC_EN (0X00000100U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_SHIFT (7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXE_EN (0X00000080U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_SHIFT (6U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_VXD_EN (0X00000040U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_SHIFT (5U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_HOST_META_EN (0X00000020U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_SHIFT (4U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_MMU_EN (0X00000010U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_SHIFT (3U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_COMPUTE_EN (0X00000008U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_SHIFT (2U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_PIXEL_EN (0X00000004U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_SHIFT (1U)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_CTRL_BYPASS_DM_TA_EN (0X00000002U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CTRL_BYPASS_ALL_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS1
+*/
+#define RGX_CR_SLC_STATUS1 (0x3870U)
+#define RGX_CR_SLC_STATUS1_MASKFULL (IMG_UINT64_C(0x800003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_SHIFT (63U)
+#define RGX_CR_SLC_STATUS1_PAUSED_CLRMSK (IMG_UINT64_C(0X7FFFFFFFFFFFFFFF))
+#define RGX_CR_SLC_STATUS1_PAUSED_EN (IMG_UINT64_C(0X8000000000000000))
+#define RGX_CR_SLC_STATUS1_READS1_SHIFT (32U)
+#define RGX_CR_SLC_STATUS1_READS1_CLRMSK (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS1_READS0_SHIFT (16U)
+#define RGX_CR_SLC_STATUS1_READS0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS1_READS1_EXT_SHIFT (8U)
+#define RGX_CR_SLC_STATUS1_READS1_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS1_READS0_EXT_SHIFT (0U)
+#define RGX_CR_SLC_STATUS1_READS0_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_SLC_IDLE
+*/
+#define RGX_CR_SLC_IDLE (0x3898U)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MASKFULL (IMG_UINT64_C(0x00000000000003FF))
+#define RGX_CR_SLC_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB1_SHIFT (9U)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB1_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB1_EN (0X00000200U)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB0_SHIFT (8U)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB0_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC_IDLE__XE_MEMORY_HIERARCHY__MH_SYSARB0_EN (0X00000100U)
+#define RGX_CR_SLC_IDLE_IMGBV4_SHIFT (7U)
+#define RGX_CR_SLC_IDLE_IMGBV4_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_SLC_IDLE_IMGBV4_EN (0X00000080U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_SHIFT (6U)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_SLC_IDLE_CACHE_BANKS_EN (0X00000040U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_SHIFT (5U)
+#define RGX_CR_SLC_IDLE_RBOFIFO_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_SLC_IDLE_RBOFIFO_EN (0X00000020U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_SHIFT (4U)
+#define RGX_CR_SLC_IDLE_FRC_CONV_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SLC_IDLE_FRC_CONV_EN (0X00000010U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_SHIFT (3U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SLC_IDLE_VXE_CONV_EN (0X00000008U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_SHIFT (2U)
+#define RGX_CR_SLC_IDLE_VXD_CONV_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SLC_IDLE_VXD_CONV_EN (0X00000004U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_SHIFT (1U)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC_IDLE_BIF1_CONV_EN (0X00000002U)
+#define RGX_CR_SLC_IDLE_CBAR_SHIFT (0U)
+#define RGX_CR_SLC_IDLE_CBAR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_IDLE_CBAR_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC_STATUS2
+*/
+#define RGX_CR_SLC_STATUS2 (0x3908U)
+#define RGX_CR_SLC_STATUS2_MASKFULL (IMG_UINT64_C(0x000003FF03FFFFFF))
+#define RGX_CR_SLC_STATUS2_READS3_SHIFT (32U)
+#define RGX_CR_SLC_STATUS2_READS3_CLRMSK (IMG_UINT64_C(0XFFFFFC00FFFFFFFF))
+#define RGX_CR_SLC_STATUS2_READS2_SHIFT (16U)
+#define RGX_CR_SLC_STATUS2_READS2_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC00FFFF))
+#define RGX_CR_SLC_STATUS2_READS3_EXT_SHIFT (8U)
+#define RGX_CR_SLC_STATUS2_READS3_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_SLC_STATUS2_READS2_EXT_SHIFT (0U)
+#define RGX_CR_SLC_STATUS2_READS2_EXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+/*
+ Register RGX_CR_SLC_CTRL_MISC2
+*/
+#define RGX_CR_SLC_CTRL_MISC2 (0x3930U)
+#define RGX_CR_SLC_CTRL_MISC2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_SHIFT (0U)
+#define RGX_CR_SLC_CTRL_MISC2_SCRAMBLE_BITS_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SLC_CROSSBAR_LOAD_BALANCE
+*/
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE (0x3938U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_SHIFT (0U)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC_CROSSBAR_LOAD_BALANCE_BYPASS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_USC_UVS0_CHECKSUM
+*/
+#define RGX_CR_USC_UVS0_CHECKSUM (0x5000U)
+#define RGX_CR_USC_UVS0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS1_CHECKSUM
+*/
+#define RGX_CR_USC_UVS1_CHECKSUM (0x5008U)
+#define RGX_CR_USC_UVS1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS2_CHECKSUM
+*/
+#define RGX_CR_USC_UVS2_CHECKSUM (0x5010U)
+#define RGX_CR_USC_UVS2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS2_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS3_CHECKSUM
+*/
+#define RGX_CR_USC_UVS3_CHECKSUM (0x5018U)
+#define RGX_CR_USC_UVS3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS3_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PPP_SIGNATURE
+*/
+#define RGX_CR_PPP_SIGNATURE (0x5020U)
+#define RGX_CR_PPP_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_PPP_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TE_SIGNATURE
+*/
+#define RGX_CR_TE_SIGNATURE (0x5028U)
+#define RGX_CR_TE_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_TE_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TE_CHECKSUM
+*/
+#define RGX_CR_TE_CHECKSUM (0x5110U)
+#define RGX_CR_TE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVB_CHECKSUM
+*/
+#define RGX_CR_USC_UVB_CHECKSUM (0x5118U)
+#define RGX_CR_USC_UVB_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVB_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_VCE_CHECKSUM
+*/
+#define RGX_CR_VCE_CHECKSUM (0x5030U)
+#define RGX_CR_VCE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_VCE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_VCE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_ISP_PDS_CHECKSUM
+*/
+#define RGX_CR_ISP_PDS_CHECKSUM (0x5038U)
+#define RGX_CR_ISP_PDS_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_PDS_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_ISP_TPF_CHECKSUM
+*/
+#define RGX_CR_ISP_TPF_CHECKSUM (0x5040U)
+#define RGX_CR_ISP_TPF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_ISP_TPF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TFPU_PLANE0_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE0_CHECKSUM (0x5048U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TFPU_PLANE0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TFPU_PLANE1_CHECKSUM
+*/
+#define RGX_CR_TFPU_PLANE1_CHECKSUM (0x5050U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_TFPU_PLANE1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PBE_CHECKSUM
+*/
+#define RGX_CR_PBE_CHECKSUM (0x5058U)
+#define RGX_CR_PBE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PBE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PDS_DOUTM_STM_SIGNATURE
+*/
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE (0x5060U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_SHIFT (0U)
+#define RGX_CR_PDS_DOUTM_STM_SIGNATURE_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_IFPU_ISP_CHECKSUM
+*/
+#define RGX_CR_IFPU_ISP_CHECKSUM (0x5068U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_IFPU_ISP_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS4_CHECKSUM
+*/
+#define RGX_CR_USC_UVS4_CHECKSUM (0x5100U)
+#define RGX_CR_USC_UVS4_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS4_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_UVS5_CHECKSUM
+*/
+#define RGX_CR_USC_UVS5_CHECKSUM (0x5108U)
+#define RGX_CR_USC_UVS5_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_USC_UVS5_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PPP_CLIP_CHECKSUM
+*/
+#define RGX_CR_PPP_CLIP_CHECKSUM (0x5120U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_PPP_CLIP_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_PHASE
+*/
+#define RGX_CR_PERF_TA_PHASE (0x6008U)
+#define RGX_CR_PERF_TA_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_PHASE
+*/
+#define RGX_CR_PERF_3D_PHASE (0x6010U)
+#define RGX_CR_PERF_3D_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_3D_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_COMPUTE_PHASE
+*/
+#define RGX_CR_PERF_COMPUTE_PHASE (0x6018U)
+#define RGX_CR_PERF_COMPUTE_PHASE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_COMPUTE_PHASE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_CYCLE
+*/
+#define RGX_CR_PERF_TA_CYCLE (0x6020U)
+#define RGX_CR_PERF_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_CYCLE
+*/
+#define RGX_CR_PERF_3D_CYCLE (0x6028U)
+#define RGX_CR_PERF_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_3D_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_COMPUTE_CYCLE
+*/
+#define RGX_CR_PERF_COMPUTE_CYCLE (0x6030U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_COMPUTE_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_TA_OR_3D_CYCLE
+*/
+#define RGX_CR_PERF_TA_OR_3D_CYCLE (0x6038U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_TA_OR_3D_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_INITIAL_TA_CYCLE
+*/
+#define RGX_CR_PERF_INITIAL_TA_CYCLE (0x6040U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_INITIAL_TA_CYCLE_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_READ_STALL
+*/
+#define RGX_CR_PERF_SLC0_READ_STALL (0x60B8U)
+#define RGX_CR_PERF_SLC0_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC0_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC0_WRITE_STALL (0x60C0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC0_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_READ_STALL
+*/
+#define RGX_CR_PERF_SLC1_READ_STALL (0x60E0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC1_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC1_WRITE_STALL (0x60E8U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC1_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_READ_STALL
+*/
+#define RGX_CR_PERF_SLC2_READ_STALL (0x6158U)
+#define RGX_CR_PERF_SLC2_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC2_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC2_WRITE_STALL (0x6160U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC2_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_READ_STALL
+*/
+#define RGX_CR_PERF_SLC3_READ_STALL (0x6180U)
+#define RGX_CR_PERF_SLC3_READ_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_READ_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_SLC3_WRITE_STALL
+*/
+#define RGX_CR_PERF_SLC3_WRITE_STALL (0x6188U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_SHIFT (0U)
+#define RGX_CR_PERF_SLC3_WRITE_STALL_COUNT_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PERF_3D_SPINUP
+*/
+#define RGX_CR_PERF_3D_SPINUP (0x6220U)
+#define RGX_CR_PERF_3D_SPINUP_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_SHIFT (0U)
+#define RGX_CR_PERF_3D_SPINUP_CYCLES_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_AXI_ACE_LITE_CONFIGURATION
+*/
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION (0x38C0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_MASKFULL (IMG_UINT64_C(0x00001FFFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT (37U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_CLRMSK (IMG_UINT64_C(0XFFFFE01FFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT (36U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_SHIFT (35U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_CLRMSK (IMG_UINT64_C(0XFFFFFFF7FFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITE_EN (IMG_UINT64_C(0X0000000800000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_SHIFT (34U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_READ_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT (30U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFC3FFFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT (26U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC3FFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT (22U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFC3FFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_SHIFT (20U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFCFFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_SHIFT (18U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_BARRIER_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF3FFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT (16U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT (14U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF3FFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT (12U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFCFFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT (10U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT (8U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_SHIFT (4U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF0F))
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_SHIFT (0U)
+#define RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_NON_SNOOPING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF0))
+
+
+/*
+ Register RGX_CR_POWER_ESTIMATE_RESULT
+*/
+#define RGX_CR_POWER_ESTIMATE_RESULT (0x6328U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_SHIFT (0U)
+#define RGX_CR_POWER_ESTIMATE_RESULT_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF
+*/
+#define RGX_CR_TA_PERF (0x7600U)
+#define RGX_CR_TA_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TA_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TA_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TA_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TA_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TA_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TA_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TA_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TA_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TA_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TA_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TA_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TA_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TA_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT0
+*/
+#define RGX_CR_TA_PERF_SELECT0 (0x7608U)
+#define RGX_CR_TA_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT1
+*/
+#define RGX_CR_TA_PERF_SELECT1 (0x7610U)
+#define RGX_CR_TA_PERF_SELECT1_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT1_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT1_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT1_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT1_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT1_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT2
+*/
+#define RGX_CR_TA_PERF_SELECT2 (0x7618U)
+#define RGX_CR_TA_PERF_SELECT2_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT2_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT2_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT2_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT2_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT2_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECT3
+*/
+#define RGX_CR_TA_PERF_SELECT3 (0x7620U)
+#define RGX_CR_TA_PERF_SELECT3_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECT3_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_SHIFT (21U)
+#define RGX_CR_TA_PERF_SELECT3_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TA_PERF_SELECT3_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECT3_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECT3_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_SELECTED_BITS
+*/
+#define RGX_CR_TA_PERF_SELECTED_BITS (0x7648U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_SHIFT (48U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG3_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_SHIFT (32U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG2_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_SHIFT (16U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_SHIFT (0U)
+#define RGX_CR_TA_PERF_SELECTED_BITS_REG0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_0
+*/
+#define RGX_CR_TA_PERF_COUNTER_0 (0x7650U)
+#define RGX_CR_TA_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_1
+*/
+#define RGX_CR_TA_PERF_COUNTER_1 (0x7658U)
+#define RGX_CR_TA_PERF_COUNTER_1_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_1_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_1_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_2
+*/
+#define RGX_CR_TA_PERF_COUNTER_2 (0x7660U)
+#define RGX_CR_TA_PERF_COUNTER_2_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_2_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_2_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TA_PERF_COUNTER_3
+*/
+#define RGX_CR_TA_PERF_COUNTER_3 (0x7668U)
+#define RGX_CR_TA_PERF_COUNTER_3_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TA_PERF_COUNTER_3_REG_SHIFT (0U)
+#define RGX_CR_TA_PERF_COUNTER_3_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF
+*/
+#define RGX_CR_RASTERISATION_PERF (0x7700U)
+#define RGX_CR_RASTERISATION_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_RASTERISATION_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_RASTERISATION_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_RASTERISATION_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_RASTERISATION_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_RASTERISATION_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_RASTERISATION_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_SELECT0
+*/
+#define RGX_CR_RASTERISATION_PERF_SELECT0 (0x7708U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_RASTERISATION_PERF_COUNTER_0
+*/
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0 (0x7750U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_RASTERISATION_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF (0x7800U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0 (0x7808U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0
+*/
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0 (0x7850U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF
+*/
+#define RGX_CR_TPU_MCU_L0_PERF (0x7900U)
+#define RGX_CR_TPU_MCU_L0_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TPU_MCU_L0_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TPU_MCU_L0_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_SELECT0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0 (0x7908U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TPU_MCU_L0_PERF_COUNTER_0
+*/
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0 (0x7950U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TPU_MCU_L0_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_USC_PERF
+*/
+#define RGX_CR_USC_PERF (0x8100U)
+#define RGX_CR_USC_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_USC_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_USC_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_USC_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_USC_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_USC_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_USC_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_USC_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_USC_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_USC_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_USC_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_USC_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_USC_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_USC_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_USC_PERF_SELECT0
+*/
+#define RGX_CR_USC_PERF_SELECT0 (0x8108U)
+#define RGX_CR_USC_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_USC_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_USC_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_USC_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_USC_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_USC_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_USC_PERF_COUNTER_0
+*/
+#define RGX_CR_USC_PERF_COUNTER_0 (0x8150U)
+#define RGX_CR_USC_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_USC_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_USC_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_JONES_IDLE
+*/
+#define RGX_CR_JONES_IDLE (0x8328U)
+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000007FFF))
+#define RGX_CR_JONES_IDLE_TDM_SHIFT (14U)
+#define RGX_CR_JONES_IDLE_TDM_CLRMSK (0XFFFFBFFFU)
+#define RGX_CR_JONES_IDLE_TDM_EN (0X00004000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_SHIFT (13U)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_CLRMSK (0XFFFFDFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_TLA_EN (0X00002000U)
+#define RGX_CR_JONES_IDLE_FB_CDC_SHIFT (12U)
+#define RGX_CR_JONES_IDLE_FB_CDC_CLRMSK (0XFFFFEFFFU)
+#define RGX_CR_JONES_IDLE_FB_CDC_EN (0X00001000U)
+#define RGX_CR_JONES_IDLE_MMU_SHIFT (11U)
+#define RGX_CR_JONES_IDLE_MMU_CLRMSK (0XFFFFF7FFU)
+#define RGX_CR_JONES_IDLE_MMU_EN (0X00000800U)
+#define RGX_CR_JONES_IDLE_TLA_SHIFT (10U)
+#define RGX_CR_JONES_IDLE_TLA_CLRMSK (0XFFFFFBFFU)
+#define RGX_CR_JONES_IDLE_TLA_EN (0X00000400U)
+#define RGX_CR_JONES_IDLE_GARTEN_SHIFT (9U)
+#define RGX_CR_JONES_IDLE_GARTEN_CLRMSK (0XFFFFFDFFU)
+#define RGX_CR_JONES_IDLE_GARTEN_EN (0X00000200U)
+#define RGX_CR_JONES_IDLE_HOSTIF_SHIFT (8U)
+#define RGX_CR_JONES_IDLE_HOSTIF_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_JONES_IDLE_HOSTIF_EN (0X00000100U)
+#define RGX_CR_JONES_IDLE_SOCIF_SHIFT (7U)
+#define RGX_CR_JONES_IDLE_SOCIF_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_JONES_IDLE_SOCIF_EN (0X00000080U)
+#define RGX_CR_JONES_IDLE_TILING_SHIFT (6U)
+#define RGX_CR_JONES_IDLE_TILING_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_JONES_IDLE_TILING_EN (0X00000040U)
+#define RGX_CR_JONES_IDLE_IPP_SHIFT (5U)
+#define RGX_CR_JONES_IDLE_IPP_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_JONES_IDLE_IPP_EN (0X00000020U)
+#define RGX_CR_JONES_IDLE_USCS_SHIFT (4U)
+#define RGX_CR_JONES_IDLE_USCS_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_JONES_IDLE_USCS_EN (0X00000010U)
+#define RGX_CR_JONES_IDLE_PM_SHIFT (3U)
+#define RGX_CR_JONES_IDLE_PM_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_JONES_IDLE_PM_EN (0X00000008U)
+#define RGX_CR_JONES_IDLE_CDM_SHIFT (2U)
+#define RGX_CR_JONES_IDLE_CDM_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_JONES_IDLE_CDM_EN (0X00000004U)
+#define RGX_CR_JONES_IDLE_VDM_SHIFT (1U)
+#define RGX_CR_JONES_IDLE_VDM_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_JONES_IDLE_VDM_EN (0X00000002U)
+#define RGX_CR_JONES_IDLE_BIF_SHIFT (0U)
+#define RGX_CR_JONES_IDLE_BIF_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_JONES_IDLE_BIF_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TORNADO_PERF
+*/
+#define RGX_CR_TORNADO_PERF (0x8228U)
+#define RGX_CR_TORNADO_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_TORNADO_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TORNADO_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TORNADO_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TORNADO_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TORNADO_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TORNADO_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TORNADO_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TORNADO_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TORNADO_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TORNADO_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TORNADO_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TORNADO_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TORNADO_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TORNADO_PERF_SELECT0
+*/
+#define RGX_CR_TORNADO_PERF_SELECT0 (0x8230U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TORNADO_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TORNADO_PERF_COUNTER_0
+*/
+#define RGX_CR_TORNADO_PERF_COUNTER_0 (0x8268U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TORNADO_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF
+*/
+#define RGX_CR_TEXAS_PERF (0x8290U)
+#define RGX_CR_TEXAS_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_TEXAS_PERF_CLR_5_SHIFT (6U)
+#define RGX_CR_TEXAS_PERF_CLR_5_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_TEXAS_PERF_CLR_5_EN (0X00000040U)
+#define RGX_CR_TEXAS_PERF_CLR_4_SHIFT (5U)
+#define RGX_CR_TEXAS_PERF_CLR_4_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_TEXAS_PERF_CLR_4_EN (0X00000020U)
+#define RGX_CR_TEXAS_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_TEXAS_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_TEXAS_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_TEXAS_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_TEXAS_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_TEXAS_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_TEXAS_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_TEXAS_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_TEXAS_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_TEXAS_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_TEXAS_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_TEXAS_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_TEXAS_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_SELECT0
+*/
+#define RGX_CR_TEXAS_PERF_SELECT0 (0x8298U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_SHIFT (31U)
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_TEXAS_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_TEXAS_PERF_COUNTER_0
+*/
+#define RGX_CR_TEXAS_PERF_COUNTER_0 (0x82D8U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_TEXAS_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_JONES_PERF
+*/
+#define RGX_CR_JONES_PERF (0x8330U)
+#define RGX_CR_JONES_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_JONES_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_JONES_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_JONES_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_JONES_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_JONES_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_JONES_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_JONES_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_JONES_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_JONES_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_JONES_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_JONES_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_JONES_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_JONES_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_JONES_PERF_SELECT0
+*/
+#define RGX_CR_JONES_PERF_SELECT0 (0x8338U)
+#define RGX_CR_JONES_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_JONES_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_JONES_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_JONES_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_JONES_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_JONES_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_JONES_PERF_COUNTER_0
+*/
+#define RGX_CR_JONES_PERF_COUNTER_0 (0x8368U)
+#define RGX_CR_JONES_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_JONES_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF
+*/
+#define RGX_CR_BLACKPEARL_PERF (0x8400U)
+#define RGX_CR_BLACKPEARL_PERF_MASKFULL (IMG_UINT64_C(0x000000000000007F))
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_SHIFT (6U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_5_EN (0X00000040U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_SHIFT (5U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_4_EN (0X00000020U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BLACKPEARL_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BLACKPEARL_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_SELECT0
+*/
+#define RGX_CR_BLACKPEARL_PERF_SELECT0 (0x8408U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF803FFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_SHIFT (31U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFC0FFFF))
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_BLACKPEARL_PERF_COUNTER_0
+*/
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0 (0x8448U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_BLACKPEARL_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_PBE_PERF
+*/
+#define RGX_CR_PBE_PERF (0x8478U)
+#define RGX_CR_PBE_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_PBE_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_PBE_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_PBE_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_PBE_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_PBE_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_PBE_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_PBE_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_PBE_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_PBE_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_PBE_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_PBE_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_PBE_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_PBE_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_PBE_PERF_SELECT0
+*/
+#define RGX_CR_PBE_PERF_SELECT0 (0x8480U)
+#define RGX_CR_PBE_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_PBE_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_PBE_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_PBE_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_PBE_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_PBE_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_PBE_PERF_COUNTER_0
+*/
+#define RGX_CR_PBE_PERF_COUNTER_0 (0x84B0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_PBE_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_OCP_REVINFO
+*/
+#define RGX_CR_OCP_REVINFO (0x9000U)
+#define RGX_CR_OCP_REVINFO_MASKFULL (IMG_UINT64_C(0x00000007FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_SHIFT (33U)
+#define RGX_CR_OCP_REVINFO_HWINFO_SYSBUS_CLRMSK (IMG_UINT64_C(0XFFFFFFF9FFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_SHIFT (32U)
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_REVINFO_HWINFO_MEMBUS_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_REVINFO_REVISION_SHIFT (0U)
+#define RGX_CR_OCP_REVINFO_REVISION_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_OCP_SYSCONFIG
+*/
+#define RGX_CR_OCP_SYSCONFIG (0x9010U)
+#define RGX_CR_OCP_SYSCONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000FFF))
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_SHIFT (10U)
+#define RGX_CR_OCP_SYSCONFIG_DUST2_STANDBY_MODE_CLRMSK (0XFFFFF3FFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_SHIFT (8U)
+#define RGX_CR_OCP_SYSCONFIG_DUST1_STANDBY_MODE_CLRMSK (0XFFFFFCFFU)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_SHIFT (6U)
+#define RGX_CR_OCP_SYSCONFIG_DUST0_STANDBY_MODE_CLRMSK (0XFFFFFF3FU)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_SHIFT (4U)
+#define RGX_CR_OCP_SYSCONFIG_RASCAL_STANDBYMODE_CLRMSK (0XFFFFFFCFU)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_SHIFT (2U)
+#define RGX_CR_OCP_SYSCONFIG_STANDBY_MODE_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_SHIFT (0U)
+#define RGX_CR_OCP_SYSCONFIG_IDLE_MODE_CLRMSK (0XFFFFFFFCU)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_0 (0x9020U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_0_INIT_MINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_1 (0x9028U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_1_TARGET_SINTERRUPT_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_RAW_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_RAW_2 (0x9030U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_RAW_2_RGX_IRQ_RAW_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_0
+*/
+#define RGX_CR_OCP_IRQSTATUS_0 (0x9038U)
+#define RGX_CR_OCP_IRQSTATUS_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_0_INIT_MINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_1
+*/
+#define RGX_CR_OCP_IRQSTATUS_1 (0x9040U)
+#define RGX_CR_OCP_IRQSTATUS_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_1_TARGET_SINTERRUPT_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQSTATUS_2
+*/
+#define RGX_CR_OCP_IRQSTATUS_2 (0x9048U)
+#define RGX_CR_OCP_IRQSTATUS_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_SHIFT (0U)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_0
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_0 (0x9050U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_0_INIT_MINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_1
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_1 (0x9058U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_1_TARGET_SINTERRUPT_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_SET_2
+*/
+#define RGX_CR_OCP_IRQENABLE_SET_2 (0x9060U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_SET_2_RGX_IRQ_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_0
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_0 (0x9068U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_0_INIT_MINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_1
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_1 (0x9070U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_1_TARGET_SINTERRUPT_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQENABLE_CLR_2
+*/
+#define RGX_CR_OCP_IRQENABLE_CLR_2 (0x9078U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_SHIFT (0U)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_IRQENABLE_CLR_2_RGX_IRQ_DISABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_IRQ_EVENT
+*/
+#define RGX_CR_OCP_IRQ_EVENT (0x9080U)
+#define RGX_CR_OCP_IRQ_EVENT_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_SHIFT (19U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF7FFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000080000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_SHIFT (18U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETH_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_SHIFT (17U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFDFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNEXPECTED_RDATA_EN (IMG_UINT64_C(0X0000000000020000))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_SHIFT (16U)
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFEFFFF))
+#define RGX_CR_OCP_IRQ_EVENT_TARGETS_RCVD_UNSUPPORTED_MCMD_EN (IMG_UINT64_C(0X0000000000010000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_SHIFT (15U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_SHIFT (14U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_SHIFT (13U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_SHIFT (12U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFEFFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT3_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000001000))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_SHIFT (11U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF7FF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000800))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_SHIFT (10U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_SHIFT (9U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFDFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000200))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_SHIFT (8U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFEFF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT2_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000100))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_SHIFT (7U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_SHIFT (6U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_SHIFT (5U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_SHIFT (4U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_OCP_IRQ_EVENT_INIT1_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000010))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_SHIFT (3U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_IMG_PAGE_BOUNDARY_CROSS_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_SHIFT (2U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_RESP_ERR_FAIL_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_SHIFT (1U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RCVD_UNUSED_TAGID_EN (IMG_UINT64_C(0X0000000000000002))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_SHIFT (0U)
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_OCP_IRQ_EVENT_INIT0_RDATA_FIFO_OVERFILL_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_OCP_DEBUG_CONFIG
+*/
+#define RGX_CR_OCP_DEBUG_CONFIG (0x9088U)
+#define RGX_CR_OCP_DEBUG_CONFIG_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_SHIFT (0U)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_OCP_DEBUG_CONFIG_REG_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_OCP_DEBUG_STATUS
+*/
+#define RGX_CR_OCP_DEBUG_STATUS (0x9090U)
+#define RGX_CR_OCP_DEBUG_STATUS_MASKFULL (IMG_UINT64_C(0x001F1F77FFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_SHIFT (51U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SDISCACK_CLRMSK (IMG_UINT64_C(0XFFE7FFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_SHIFT (50U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFBFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SCONNECT_EN (IMG_UINT64_C(0X0004000000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_SHIFT (48U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFCFFFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_SHIFT (43U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFE7FFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_SHIFT (42U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFBFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SCONNECT_EN (IMG_UINT64_C(0X0000040000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_SHIFT (40U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFCFFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_SHIFT (38U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFBFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_BUSY_EN (IMG_UINT64_C(0X0000004000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_SHIFT (37U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFDFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_CMD_FIFO_FULL_EN (IMG_UINT64_C(0X0000002000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_SHIFT (36U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFEFFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETH_SRESP_ERROR_EN (IMG_UINT64_C(0X0000001000000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_SHIFT (34U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_CLRMSK (IMG_UINT64_C(0XFFFFFFFBFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_BUSY_EN (IMG_UINT64_C(0X0000000400000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_SHIFT (33U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_CLRMSK (IMG_UINT64_C(0XFFFFFFFDFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_CMD_FIFO_FULL_EN (IMG_UINT64_C(0X0000000200000000))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_SHIFT (32U)
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_CLRMSK (IMG_UINT64_C(0XFFFFFFFEFFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_TARGETS_SRESP_ERROR_EN (IMG_UINT64_C(0X0000000100000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_SHIFT (31U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFF7FFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_RESERVED_EN (IMG_UINT64_C(0X0000000080000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_SHIFT (30U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFBFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SWAIT_EN (IMG_UINT64_C(0X0000000040000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_SHIFT (29U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFDFFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCREQ_EN (IMG_UINT64_C(0X0000000020000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_SHIFT (27U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFE7FFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_SHIFT (26U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFBFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_SCONNECT_EN (IMG_UINT64_C(0X0000000004000000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_SHIFT (24U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT3_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFCFFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_SHIFT (23U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF7FFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_RESERVED_EN (IMG_UINT64_C(0X0000000000800000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_SHIFT (22U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFBFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SWAIT_EN (IMG_UINT64_C(0X0000000000400000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_SHIFT (21U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCREQ_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_SHIFT (19U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE7FFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_SHIFT (18U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFBFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_SCONNECT_EN (IMG_UINT64_C(0X0000000000040000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_SHIFT (16U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT2_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFCFFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_SHIFT (15U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF7FFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_RESERVED_EN (IMG_UINT64_C(0X0000000000008000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_SHIFT (14U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFBFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SWAIT_EN (IMG_UINT64_C(0X0000000000004000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_SHIFT (13U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFDFFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCREQ_EN (IMG_UINT64_C(0X0000000000002000))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_SHIFT (11U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFE7FF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_SHIFT (10U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFBFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_SCONNECT_EN (IMG_UINT64_C(0X0000000000000400))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_SHIFT (8U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT1_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_SHIFT (7U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF7F))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_RESERVED_EN (IMG_UINT64_C(0X0000000000000080))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_SHIFT (6U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFBF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SWAIT_EN (IMG_UINT64_C(0X0000000000000040))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_SHIFT (5U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCREQ_EN (IMG_UINT64_C(0X0000000000000020))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_SHIFT (3U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MDISCACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFE7))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_SHIFT (2U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_SCONNECT_EN (IMG_UINT64_C(0X0000000000000004))
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_SHIFT (0U)
+#define RGX_CR_OCP_DEBUG_STATUS_INIT0_MCONNECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+
+
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_SHIFT (6U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PM_ALIST_EN (0X00000040U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_SHIFT (5U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_HOST_EN (0X00000020U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_SHIFT (4U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_META_EN (0X00000010U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_SHIFT (3U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_ZLS_EN (0X00000008U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_SHIFT (2U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_TE_EN (0X00000004U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_SHIFT (1U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_PB_VCE_EN (0X00000002U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_SHIFT (0U)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_DM_TYPE_TLA_EN (0X00000001U)
+
+
+#define RGX_CR_BIF_TRUST_DM_MASK (0x0000007FU)
+
+
+/*
+ Register RGX_CR_BIF_TRUST
+*/
+#define RGX_CR_BIF_TRUST (0xA000U)
+#define RGX_CR_BIF_TRUST_MASKFULL (IMG_UINT64_C(0x00000000001FFFFF))
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_SHIFT (20U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFEFFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_VERTEX_DM_TRUSTED_EN (0X00100000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_SHIFT (19U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_CLRMSK (0XFFF7FFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_VERTEX_DM_TRUSTED_EN (0X00080000U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_SHIFT (18U)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_CLRMSK (0XFFFBFFFFU)
+#define RGX_CR_BIF_TRUST_OTHER_RAY_DM_TRUSTED_EN (0X00040000U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_SHIFT (17U)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_BIF_TRUST_MCU_RAY_DM_TRUSTED_EN (0X00020000U)
+#define RGX_CR_BIF_TRUST_ENABLE_SHIFT (16U)
+#define RGX_CR_BIF_TRUST_ENABLE_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_BIF_TRUST_ENABLE_EN (0X00010000U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_SHIFT (9U)
+#define RGX_CR_BIF_TRUST_DM_TRUSTED_CLRMSK (0XFFFF01FFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_SHIFT (8U)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_BIF_TRUST_OTHER_COMPUTE_DM_TRUSTED_EN (0X00000100U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_SHIFT (7U)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFF7FU)
+#define RGX_CR_BIF_TRUST_MCU_COMPUTE_DM_TRUSTED_EN (0X00000080U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_SHIFT (6U)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_CLRMSK (0XFFFFFFBFU)
+#define RGX_CR_BIF_TRUST_PBE_COMPUTE_DM_TRUSTED_EN (0X00000040U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_SHIFT (5U)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFDFU)
+#define RGX_CR_BIF_TRUST_OTHER_PIXEL_DM_TRUSTED_EN (0X00000020U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_SHIFT (4U)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_BIF_TRUST_MCU_PIXEL_DM_TRUSTED_EN (0X00000010U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_SHIFT (3U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_BIF_TRUST_PBE_PIXEL_DM_TRUSTED_EN (0X00000008U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_SHIFT (2U)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_BIF_TRUST_OTHER_VERTEX_DM_TRUSTED_EN (0X00000004U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_SHIFT (1U)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_BIF_TRUST_MCU_VERTEX_DM_TRUSTED_EN (0X00000002U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_SHIFT (0U)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_BIF_TRUST_PBE_VERTEX_DM_TRUSTED_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SYS_BUS_SECURE
+*/
+#define RGX_CR_SYS_BUS_SECURE (0xA100U)
+#define RGX_CR_SYS_BUS_SECURE__SECR__MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_MASKFULL (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_SHIFT (0U)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SYS_BUS_SECURE_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_FBA_FC0_CHECKSUM
+*/
+#define RGX_CR_FBA_FC0_CHECKSUM (0xD170U)
+#define RGX_CR_FBA_FC0_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC0_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC1_CHECKSUM
+*/
+#define RGX_CR_FBA_FC1_CHECKSUM (0xD178U)
+#define RGX_CR_FBA_FC1_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC1_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC2_CHECKSUM
+*/
+#define RGX_CR_FBA_FC2_CHECKSUM (0xD180U)
+#define RGX_CR_FBA_FC2_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC2_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_FBA_FC3_CHECKSUM
+*/
+#define RGX_CR_FBA_FC3_CHECKSUM (0xD188U)
+#define RGX_CR_FBA_FC3_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_FBA_FC3_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_CLK_CTRL2
+*/
+#define RGX_CR_CLK_CTRL2 (0xD200U)
+#define RGX_CR_CLK_CTRL2_MASKFULL (IMG_UINT64_C(0x0000000000000F33))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_SHIFT (10U)
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF3FF))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_ON (IMG_UINT64_C(0x0000000000000400))
+#define RGX_CR_CLK_CTRL2_MCU_FBTC_AUTO (IMG_UINT64_C(0x0000000000000800))
+#define RGX_CR_CLK_CTRL2_VRDM_SHIFT (8U)
+#define RGX_CR_CLK_CTRL2_VRDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFCFF))
+#define RGX_CR_CLK_CTRL2_VRDM_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_VRDM_ON (IMG_UINT64_C(0x0000000000000100))
+#define RGX_CR_CLK_CTRL2_VRDM_AUTO (IMG_UINT64_C(0x0000000000000200))
+#define RGX_CR_CLK_CTRL2_SH_SHIFT (4U)
+#define RGX_CR_CLK_CTRL2_SH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_CLK_CTRL2_SH_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_SH_ON (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_CTRL2_SH_AUTO (IMG_UINT64_C(0x0000000000000020))
+#define RGX_CR_CLK_CTRL2_FBA_SHIFT (0U)
+#define RGX_CR_CLK_CTRL2_FBA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFC))
+#define RGX_CR_CLK_CTRL2_FBA_OFF (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_CTRL2_FBA_ON (IMG_UINT64_C(0x0000000000000001))
+#define RGX_CR_CLK_CTRL2_FBA_AUTO (IMG_UINT64_C(0x0000000000000002))
+
+
+/*
+ Register RGX_CR_CLK_STATUS2
+*/
+#define RGX_CR_CLK_STATUS2 (0xD208U)
+#define RGX_CR_CLK_STATUS2_MASKFULL (IMG_UINT64_C(0x0000000000000015))
+#define RGX_CR_CLK_STATUS2_VRDM_SHIFT (4U)
+#define RGX_CR_CLK_STATUS2_VRDM_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_CR_CLK_STATUS2_VRDM_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_VRDM_RUNNING (IMG_UINT64_C(0x0000000000000010))
+#define RGX_CR_CLK_STATUS2_SH_SHIFT (2U)
+#define RGX_CR_CLK_STATUS2_SH_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_CR_CLK_STATUS2_SH_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_SH_RUNNING (IMG_UINT64_C(0x0000000000000004))
+#define RGX_CR_CLK_STATUS2_FBA_SHIFT (0U)
+#define RGX_CR_CLK_STATUS2_FBA_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_CLK_STATUS2_FBA_GATED (IMG_UINT64_C(0000000000000000))
+#define RGX_CR_CLK_STATUS2_FBA_RUNNING (IMG_UINT64_C(0x0000000000000001))
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL
+*/
+#define RGX_CR_RPM_SHF_FPL (0xD520U)
+#define RGX_CR_RPM_SHF_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHF_FPL_SIZE_SHIFT (40U)
+#define RGX_CR_RPM_SHF_FPL_SIZE_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHF_FPL_BASE_SHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL_READ
+*/
+#define RGX_CR_RPM_SHF_FPL_READ (0xD528U)
+#define RGX_CR_RPM_SHF_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_READ_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHF_FPL_READ_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHF_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHF_FPL_WRITE (0xD530U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHF_FPL_WRITE_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHF_FPL_WRITE_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL
+*/
+#define RGX_CR_RPM_SHG_FPL (0xD538U)
+#define RGX_CR_RPM_SHG_FPL_MASKFULL (IMG_UINT64_C(0x3FFFFFFFFFFFFFFC))
+#define RGX_CR_RPM_SHG_FPL_SIZE_SHIFT (40U)
+#define RGX_CR_RPM_SHG_FPL_SIZE_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+#define RGX_CR_RPM_SHG_FPL_BASE_SHIFT (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000003))
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHG_FPL_BASE_ALIGNSIZE (4U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL_READ
+*/
+#define RGX_CR_RPM_SHG_FPL_READ (0xD540U)
+#define RGX_CR_RPM_SHG_FPL_READ_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_READ_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHG_FPL_READ_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_RPM_SHG_FPL_WRITE
+*/
+#define RGX_CR_RPM_SHG_FPL_WRITE (0xD548U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_MASKFULL (IMG_UINT64_C(0x00000000007FFFFF))
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_SHIFT (22U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_CLRMSK (0XFFBFFFFFU)
+#define RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN (0X00400000U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_SHIFT (0U)
+#define RGX_CR_RPM_SHG_FPL_WRITE_OFFSET_CLRMSK (0XFFC00000U)
+
+
+/*
+ Register RGX_CR_SH_PERF
+*/
+#define RGX_CR_SH_PERF (0xD5F8U)
+#define RGX_CR_SH_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define RGX_CR_SH_PERF_CLR_3_SHIFT (4U)
+#define RGX_CR_SH_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define RGX_CR_SH_PERF_CLR_3_EN (0X00000010U)
+#define RGX_CR_SH_PERF_CLR_2_SHIFT (3U)
+#define RGX_CR_SH_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define RGX_CR_SH_PERF_CLR_2_EN (0X00000008U)
+#define RGX_CR_SH_PERF_CLR_1_SHIFT (2U)
+#define RGX_CR_SH_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define RGX_CR_SH_PERF_CLR_1_EN (0X00000004U)
+#define RGX_CR_SH_PERF_CLR_0_SHIFT (1U)
+#define RGX_CR_SH_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SH_PERF_CLR_0_EN (0X00000002U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_SHIFT (0U)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SH_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SH_PERF_SELECT0
+*/
+#define RGX_CR_SH_PERF_SELECT0 (0xD600U)
+#define RGX_CR_SH_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define RGX_CR_SH_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_SHIFT (21U)
+#define RGX_CR_SH_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define RGX_CR_SH_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define RGX_CR_SH_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define RGX_CR_SH_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_SH_PERF_COUNTER_0
+*/
+#define RGX_CR_SH_PERF_COUNTER_0 (0xD628U)
+#define RGX_CR_SH_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SH_PERF_COUNTER_0_REG_SHIFT (0U)
+#define RGX_CR_SH_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_SHG_CHECKSUM
+*/
+#define RGX_CR_SHF_SHG_CHECKSUM (0xD1C0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_SHG_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_VERTEX_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM (0xD1C8U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_VERTEX_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHF_VARY_BIF_CHECKSUM
+*/
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM (0xD1D0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHF_VARY_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_RPM_BIF_CHECKSUM
+*/
+#define RGX_CR_RPM_BIF_CHECKSUM (0xD1D8U)
+#define RGX_CR_RPM_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_RPM_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHG_BIF_CHECKSUM
+*/
+#define RGX_CR_SHG_BIF_CHECKSUM (0xD1E0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHG_BIF_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register RGX_CR_SHG_FE_BE_CHECKSUM
+*/
+#define RGX_CR_SHG_FE_BE_CHECKSUM (0xD1E8U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_SHIFT (0U)
+#define RGX_CR_SHG_FE_BE_CHECKSUM_VALUE_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BF_PERF
+*/
+#define DPX_CR_BF_PERF (0xC458U)
+#define DPX_CR_BF_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BF_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BF_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BF_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BF_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BF_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BF_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BF_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BF_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BF_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BF_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BF_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BF_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BF_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BF_PERF_SELECT0
+*/
+#define DPX_CR_BF_PERF_SELECT0 (0xC460U)
+#define DPX_CR_BF_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BF_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BF_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BF_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BF_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BF_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BF_PERF_COUNTER_0
+*/
+#define DPX_CR_BF_PERF_COUNTER_0 (0xC488U)
+#define DPX_CR_BF_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BF_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BF_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BT_PERF
+*/
+#define DPX_CR_BT_PERF (0xC3D0U)
+#define DPX_CR_BT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BT_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BT_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BT_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BT_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BT_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BT_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BT_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BT_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BT_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BT_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BT_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BT_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BT_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BT_PERF_SELECT0
+*/
+#define DPX_CR_BT_PERF_SELECT0 (0xC3D8U)
+#define DPX_CR_BT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BT_PERF_COUNTER_0
+*/
+#define DPX_CR_BT_PERF_COUNTER_0 (0xC420U)
+#define DPX_CR_BT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BT_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BT_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_RQ_USC_DEBUG
+*/
+#define DPX_CR_RQ_USC_DEBUG (0xC110U)
+#define DPX_CR_RQ_USC_DEBUG_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_SHIFT (0U)
+#define DPX_CR_RQ_USC_DEBUG_CHECKSUM_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register DPX_CR_BIF_FAULT_BANK_MMU_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS (0xC5C8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000000F775))
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_SHIFT (12U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_CAT_BASE_CLRMSK (0XFFFF0FFFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_SHIFT (8U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_PAGE_SIZE_CLRMSK (0XFFFFF8FFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_SHIFT (5U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_DATA_TYPE_CLRMSK (0XFFFFFF9FU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_SHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_RO_EN (0X00000010U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_SHIFT (2U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_PM_META_RO_EN (0X00000004U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_SHIFT (0U)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BIF_FAULT_BANK_MMU_STATUS_FAULT_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BIF_FAULT_BANK_REQ_STATUS
+*/
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS (0xC5D0U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_MASKFULL (IMG_UINT64_C(0x03FFFFFFFFFFFFF0))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_SHIFT (57U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFDFFFFFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN (IMG_UINT64_C(0X0200000000000000))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_SHIFT (44U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFE000FFFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_SHIFT (40U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_TAG_ID_CLRMSK (IMG_UINT64_C(0XFFFFF0FFFFFFFFFF))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_SHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0XFFFFFF000000000F))
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSHIFT (4U)
+#define DPX_CR_BIF_FAULT_BANK_REQ_STATUS_ADDRESS_ALIGNSIZE (16U)
+
+
+/*
+ Register DPX_CR_BIF_MMU_STATUS
+*/
+#define DPX_CR_BIF_MMU_STATUS (0xC5D8U)
+#define DPX_CR_BIF_MMU_STATUS_MASKFULL (IMG_UINT64_C(0x000000000FFFFFF7))
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_SHIFT (20U)
+#define DPX_CR_BIF_MMU_STATUS_PC_DATA_CLRMSK (0XF00FFFFFU)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_SHIFT (12U)
+#define DPX_CR_BIF_MMU_STATUS_PD_DATA_CLRMSK (0XFFF00FFFU)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_SHIFT (4U)
+#define DPX_CR_BIF_MMU_STATUS_PT_DATA_CLRMSK (0XFFFFF00FU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_SHIFT (2U)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BIF_MMU_STATUS_STALLED_EN (0X00000004U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_SHIFT (1U)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BIF_MMU_STATUS_PAUSED_EN (0X00000002U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_SHIFT (0U)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BIF_MMU_STATUS_BUSY_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_RT_PERF
+*/
+#define DPX_CR_RT_PERF (0xC700U)
+#define DPX_CR_RT_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_RT_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_RT_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_RT_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_RT_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_RT_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_RT_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_RT_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_RT_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_RT_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_RT_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_RT_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_RT_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_RT_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_RT_PERF_SELECT0
+*/
+#define DPX_CR_RT_PERF_SELECT0 (0xC708U)
+#define DPX_CR_RT_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_RT_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_RT_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_RT_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_RT_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_RT_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_RT_PERF_COUNTER_0
+*/
+#define DPX_CR_RT_PERF_COUNTER_0 (0xC730U)
+#define DPX_CR_RT_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RT_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_RT_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_BX_TU_PERF
+*/
+#define DPX_CR_BX_TU_PERF (0xC908U)
+#define DPX_CR_BX_TU_PERF_MASKFULL (IMG_UINT64_C(0x000000000000001F))
+#define DPX_CR_BX_TU_PERF_CLR_3_SHIFT (4U)
+#define DPX_CR_BX_TU_PERF_CLR_3_CLRMSK (0XFFFFFFEFU)
+#define DPX_CR_BX_TU_PERF_CLR_3_EN (0X00000010U)
+#define DPX_CR_BX_TU_PERF_CLR_2_SHIFT (3U)
+#define DPX_CR_BX_TU_PERF_CLR_2_CLRMSK (0XFFFFFFF7U)
+#define DPX_CR_BX_TU_PERF_CLR_2_EN (0X00000008U)
+#define DPX_CR_BX_TU_PERF_CLR_1_SHIFT (2U)
+#define DPX_CR_BX_TU_PERF_CLR_1_CLRMSK (0XFFFFFFFBU)
+#define DPX_CR_BX_TU_PERF_CLR_1_EN (0X00000004U)
+#define DPX_CR_BX_TU_PERF_CLR_0_SHIFT (1U)
+#define DPX_CR_BX_TU_PERF_CLR_0_CLRMSK (0XFFFFFFFDU)
+#define DPX_CR_BX_TU_PERF_CLR_0_EN (0X00000002U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_CLRMSK (0XFFFFFFFEU)
+#define DPX_CR_BX_TU_PERF_CTRL_ENABLE_EN (0X00000001U)
+
+
+/*
+ Register DPX_CR_BX_TU_PERF_SELECT0
+*/
+#define DPX_CR_BX_TU_PERF_SELECT0 (0xC910U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MASKFULL (IMG_UINT64_C(0x3FFF3FFF003FFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_SHIFT (48U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MAX_CLRMSK (IMG_UINT64_C(0XC000FFFFFFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_SHIFT (32U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BATCH_MIN_CLRMSK (IMG_UINT64_C(0XFFFFC000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_SHIFT (21U)
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFDFFFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_MODE_EN (IMG_UINT64_C(0X0000000000200000))
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_SHIFT (16U)
+#define DPX_CR_BX_TU_PERF_SELECT0_GROUP_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE0FFFF))
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_SELECT0_BIT_SELECT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register DPX_CR_BX_TU_PERF_COUNTER_0
+*/
+#define DPX_CR_BX_TU_PERF_COUNTER_0 (0xC938U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_SHIFT (0U)
+#define DPX_CR_BX_TU_PERF_COUNTER_0_REG_CLRMSK (00000000U)
+
+
+/*
+ Register DPX_CR_RS_PDS_RR_CHECKSUM
+*/
+#define DPX_CR_RS_PDS_RR_CHECKSUM (0xC0F0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_SHIFT (0U)
+#define DPX_CR_RS_PDS_RR_CHECKSUM_VALUE_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000000))
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING_CONTEXT
+*/
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT (0xE140U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_MASKFULL (IMG_UINT64_C(0x00000000000000FF))
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_CONTEXT_ID_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_MMU_CBASE_MAPPING
+*/
+#define RGX_CR_MMU_CBASE_MAPPING (0xE148U)
+#define RGX_CR_MMU_CBASE_MAPPING_MASKFULL (IMG_UINT64_C(0x000000000FFFFFFF))
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT (0U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK (0XF0000000U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT (12U)
+#define RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSIZE (4096U)
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS
+*/
+#define RGX_CR_MMU_FAULT_STATUS (0xE150U)
+#define RGX_CR_MMU_FAULT_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT (28U)
+#define RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT (20U)
+#define RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT (6U)
+#define RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_RNW_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_RNW_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_FAULT_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_MMU_FAULT_STATUS_META
+*/
+#define RGX_CR_MMU_FAULT_STATUS_META (0xE158U)
+#define RGX_CR_MMU_FAULT_STATUS_META_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT (28U)
+#define RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK (IMG_UINT64_C(0X000000000FFFFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT (20U)
+#define RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFF00FFFFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT (12U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFF00FFF))
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT (6U)
+#define RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT (4U)
+#define RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFCF))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_SHIFT (3U)
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_CR_MMU_FAULT_STATUS_META_RNW_EN (IMG_UINT64_C(0X0000000000000008))
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT (1U)
+#define RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF9))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_SHIFT (0U)
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+ Register RGX_CR_SLC3_CTRL_MISC
+*/
+#define RGX_CR_SLC3_CTRL_MISC (0xE200U)
+#define RGX_CR_SLC3_CTRL_MISC_MASKFULL (IMG_UINT64_C(0x0000000000000107))
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_SHIFT (8U)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_CLRMSK (0XFFFFFEFFU)
+#define RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN (0X00000100U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SHIFT (0U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_CLRMSK (0XFFFFFFF8U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_LINEAR (00000000U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_IN_PAGE_HASH (0X00000001U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_FIXED_PVR_HASH (0X00000002U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH (0X00000003U)
+#define RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_WEAVED_HASH (0X00000004U)
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE
+*/
+#define RGX_CR_SLC3_SCRAMBLE (0xE208U)
+#define RGX_CR_SLC3_SCRAMBLE_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE2
+*/
+#define RGX_CR_SLC3_SCRAMBLE2 (0xE210U)
+#define RGX_CR_SLC3_SCRAMBLE2_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE2_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE3
+*/
+#define RGX_CR_SLC3_SCRAMBLE3 (0xE218U)
+#define RGX_CR_SLC3_SCRAMBLE3_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE3_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_SCRAMBLE4
+*/
+#define RGX_CR_SLC3_SCRAMBLE4 (0xE260U)
+#define RGX_CR_SLC3_SCRAMBLE4_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_SHIFT (0U)
+#define RGX_CR_SLC3_SCRAMBLE4_BITS_CLRMSK (IMG_UINT64_C(0000000000000000))
+
+
+/*
+ Register RGX_CR_SLC3_STATUS
+*/
+#define RGX_CR_SLC3_STATUS (0xE220U)
+#define RGX_CR_SLC3_STATUS_MASKFULL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES1_SHIFT (48U)
+#define RGX_CR_SLC3_STATUS_WRITES1_CLRMSK (IMG_UINT64_C(0X0000FFFFFFFFFFFF))
+#define RGX_CR_SLC3_STATUS_WRITES0_SHIFT (32U)
+#define RGX_CR_SLC3_STATUS_WRITES0_CLRMSK (IMG_UINT64_C(0XFFFF0000FFFFFFFF))
+#define RGX_CR_SLC3_STATUS_READS1_SHIFT (16U)
+#define RGX_CR_SLC3_STATUS_READS1_CLRMSK (IMG_UINT64_C(0XFFFFFFFF0000FFFF))
+#define RGX_CR_SLC3_STATUS_READS0_SHIFT (0U)
+#define RGX_CR_SLC3_STATUS_READS0_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF0000))
+
+
+/*
+ Register RGX_CR_SLC3_IDLE
+*/
+#define RGX_CR_SLC3_IDLE (0xE228U)
+#define RGX_CR_SLC3_IDLE_MASKFULL (IMG_UINT64_C(0x00000000000FFFFF))
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_SHIFT (18U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST2_CLRMSK (0XFFF3FFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_SHIFT (17U)
+#define RGX_CR_SLC3_IDLE_MMU_CLRMSK (0XFFFDFFFFU)
+#define RGX_CR_SLC3_IDLE_MMU_EN (0X00020000U)
+#define RGX_CR_SLC3_IDLE_RDI_SHIFT (16U)
+#define RGX_CR_SLC3_IDLE_RDI_CLRMSK (0XFFFEFFFFU)
+#define RGX_CR_SLC3_IDLE_RDI_EN (0X00010000U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_SHIFT (12U)
+#define RGX_CR_SLC3_IDLE_IMGBV4_CLRMSK (0XFFFF0FFFU)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_SHIFT (4U)
+#define RGX_CR_SLC3_IDLE_CACHE_BANKS_CLRMSK (0XFFFFF00FU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_SHIFT (2U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_DUST_CLRMSK (0XFFFFFFF3U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_SHIFT (1U)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_CLRMSK (0XFFFFFFFDU)
+#define RGX_CR_SLC3_IDLE_ORDERQ_JONES_EN (0X00000002U)
+#define RGX_CR_SLC3_IDLE_XBAR_SHIFT (0U)
+#define RGX_CR_SLC3_IDLE_XBAR_CLRMSK (0XFFFFFFFEU)
+#define RGX_CR_SLC3_IDLE_XBAR_EN (0X00000001U)
+
+
+/*
+ Register RGX_CR_SLC3_FAULT_STOP_STATUS
+*/
+#define RGX_CR_SLC3_FAULT_STOP_STATUS (0xE248U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_MASKFULL (IMG_UINT64_C(0x0000000000001FFF))
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_SHIFT (0U)
+#define RGX_CR_SLC3_FAULT_STOP_STATUS_BIF_CLRMSK (0XFFFFE000U)
+
+
+/*
+ Register RGX_CR_VDM_CONTEXT_STORE_MODE
+*/
+#define RGX_CR_VDM_CONTEXT_STORE_MODE (0xF048U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MASKFULL (IMG_UINT64_C(0x0000000000000003))
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_SHIFT (0U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_CLRMSK (0XFFFFFFFCU)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX (00000000U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE (0X00000001U)
+#define RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST (0X00000002U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING0
+*/
+#define RGX_CR_CONTEXT_MAPPING0 (0xF078U)
+#define RGX_CR_CONTEXT_MAPPING0_MASKFULL (IMG_UINT64_C(0x00000000FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING0_2D_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING0_2D_CLRMSK (0X00FFFFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING0_CDM_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING0_3D_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING0_3D_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING0_TA_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING0_TA_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING1
+*/
+#define RGX_CR_CONTEXT_MAPPING1 (0xF080U)
+#define RGX_CR_CONTEXT_MAPPING1_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_CONTEXT_MAPPING1_HOST_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING1_HOST_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING1_TLA_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING2
+*/
+#define RGX_CR_CONTEXT_MAPPING2 (0xF088U)
+#define RGX_CR_CONTEXT_MAPPING2_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING2_ALIST0_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING2_TE0_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING2_VCE0_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING3
+*/
+#define RGX_CR_CONTEXT_MAPPING3 (0xF090U)
+#define RGX_CR_CONTEXT_MAPPING3_MASKFULL (IMG_UINT64_C(0x0000000000FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING3_ALIST1_CLRMSK (0XFF00FFFFU)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING3_TE1_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING3_VCE1_CLRMSK (0XFFFFFF00U)
+
+
+/*
+ Register RGX_CR_BIF_JONES_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ (0xF098U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_JONES_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ (0xF0A0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_BLACKPEARL_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_BIF_DUST_OUTSTANDING_READ
+*/
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ (0xF0A8U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_MASKFULL (IMG_UINT64_C(0x000000000000FFFF))
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_SHIFT (0U)
+#define RGX_CR_BIF_DUST_OUTSTANDING_READ_COUNTER_CLRMSK (0XFFFF0000U)
+
+
+/*
+ Register RGX_CR_CONTEXT_MAPPING4
+*/
+#define RGX_CR_CONTEXT_MAPPING4 (0xF210U)
+#define RGX_CR_CONTEXT_MAPPING4_MASKFULL (IMG_UINT64_C(0x0000FFFFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_SHIFT (40U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_MMU_STACK_CLRMSK (IMG_UINT64_C(0XFFFF00FFFFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_SHIFT (32U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_UFSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFF00FFFFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_SHIFT (24U)
+#define RGX_CR_CONTEXT_MAPPING4_3D_FSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00FFFFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_SHIFT (16U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_MMU_STACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFF00FFFF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_SHIFT (8U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_UFSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFF00FF))
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_SHIFT (0U)
+#define RGX_CR_CONTEXT_MAPPING4_TA_FSTACK_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFF00))
+
+
+#endif /* _RGX_CR_DEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgx_cr_defs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgxdefs_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgxdefs_km.h
new file mode 100644
index 00000000000000..81f465e7c58e6a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgxdefs_km.h
@@ -0,0 +1,335 @@
+/*************************************************************************/ /*!
+@Title Rogue hw definitions (kernel mode)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGXDEFS_KM_H_
+#define _RGXDEFS_KM_H_
+
+#include RGX_BVNC_CORE_KM_HEADER
+#include RGX_BNC_CONFIG_KM_HEADER
+
+#define __IMG_EXPLICIT_INCLUDE_HWDEFS
+#if defined(__KERNEL__)
+#include "rgx_cr_defs_km.h"
+#else
+#include RGX_BVNC_CORE_HEADER
+#include RGX_BNC_CONFIG_HEADER
+#include "rgx_cr_defs.h"
+#endif
+#undef __IMG_EXPLICIT_INCLUDE_HWDEFS
+
+/* The following Macros are picked up through BVNC headers for PDUMP and
+ * no hardware operations to be compatible with old build infrastructure.
+ */
+#if defined(PDUMP) || defined(NO_HARDWARE) || !defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+/******************************************************************************
+ * Check for valid B.X.N.C
+ *****************************************************************************/
+#if !defined(RGX_BVNC_KM_B) || !defined(RGX_BVNC_KM_V) || !defined(RGX_BVNC_KM_N) || !defined(RGX_BVNC_KM_C)
+#error "Need to specify BVNC (RGX_BVNC_KM_B, RGX_BVNC_KM_V, RGX_BVNC_KM_N and RGX_BVNC_C)"
+#endif
+#endif
+
+#if defined(PDUMP) || defined(NO_HARDWARE)
+/* Check core/config compatibility */
+#if (RGX_BVNC_KM_B != RGX_BNC_KM_B) || (RGX_BVNC_KM_N != RGX_BNC_KM_N) || (RGX_BVNC_KM_C != RGX_BNC_KM_C)
+#error "BVNC headers are mismatching (KM core/config)"
+#endif
+
+#endif
+
+/******************************************************************************
+ * RGX Version name
+ *****************************************************************************/
+#define _RGX_BVNC_ST2(S) #S
+#define _RGX_BVNC_ST(S) _RGX_BVNC_ST2(S)
+#define RGX_BVNC_KM _RGX_BVNC_ST(RGX_BVNC_KM_B) "." _RGX_BVNC_ST(RGX_BVNC_KM_V) "." _RGX_BVNC_ST(RGX_BVNC_KM_N) "." _RGX_BVNC_ST(RGX_BVNC_KM_C)
+#define RGX_BVNC_KM_V_ST _RGX_BVNC_ST(RGX_BVNC_KM_V)
+
+/******************************************************************************
+ * RGX Defines
+ *****************************************************************************/
+
+#define BVNC_FIELD_MASK ((1 << BVNC_FIELD_WIDTH) - 1)
+#define C_POSITION (0)
+#define N_POSITION ((C_POSITION) + (BVNC_FIELD_WIDTH))
+#define V_POSITION ((N_POSITION) + (BVNC_FIELD_WIDTH))
+#define B_POSITION ((V_POSITION) + (BVNC_FIELD_WIDTH))
+
+#define B_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (B_POSITION)))
+#define V_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (V_POSITION)))
+#define N_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (N_POSITION)))
+#define C_POSTION_MASK (((IMG_UINT64)(BVNC_FIELD_MASK) << (C_POSITION)))
+
+#define GET_B(x) (((x) & (B_POSTION_MASK)) >> (B_POSITION))
+#define GET_V(x) (((x) & (V_POSTION_MASK)) >> (V_POSITION))
+#define GET_N(x) (((x) & (N_POSTION_MASK)) >> (N_POSITION))
+#define GET_C(x) (((x) & (C_POSTION_MASK)) >> (C_POSITION))
+
+#define BVNC_PACK(B,V,N,C) ((((IMG_UINT64)B)) << (B_POSITION) | \
+ (((IMG_UINT64)V)) << (V_POSITION) | \
+ (((IMG_UINT64)N)) << (N_POSITION) | \
+ (((IMG_UINT64)C)) << (C_POSITION) \
+ )
+
+#define RGX_CR_CORE_ID_CONFIG_N_SHIFT (8U)
+#define RGX_CR_CORE_ID_CONFIG_C_SHIFT (0U)
+
+#define RGX_CR_CORE_ID_CONFIG_N_CLRMSK (0XFFFF00FFU)
+#define RGX_CR_CORE_ID_CONFIG_C_CLRMSK (0XFFFFFF00U)
+
+/* The default number of OSID is 1, higher number implies VZ enabled firmware */
+#if !defined(RGXFW_NATIVE) && defined(PVRSRV_VZ_NUM_OSID) && (PVRSRV_VZ_NUM_OSID +1> 1)
+#define RGXFW_NUM_OS PVRSRV_VZ_NUM_OSID
+#else
+#define RGXFW_NUM_OS 1
+#endif
+
+/* META cores (required for the RGX_FEATURE_META) */
+#define MTP218 (1)
+#define MTP219 (2)
+#define LTP218 (3)
+#define LTP217 (4)
+
+/* META Core memory feature depending on META variants */
+#define RGX_META_COREMEM_32K (32*1024)
+#define RGX_META_COREMEM_48K (48*1024)
+#define RGX_META_COREMEM_64K (64*1024)
+#define RGX_META_COREMEM_96K (96*1024)
+#define RGX_META_COREMEM_128K (128*1024)
+#define RGX_META_COREMEM_256K (256*1024)
+
+#if !defined(__KERNEL__)
+#if (!defined(SUPPORT_TRUSTED_DEVICE) || defined(RGX_FEATURE_META_DMA)) && \
+ (defined(RGX_FEATURE_META_COREMEM_SIZE) && RGX_FEATURE_META_COREMEM_SIZE != 0)
+#define RGX_META_COREMEM_SIZE (RGX_FEATURE_META_COREMEM_SIZE*1024)
+#define RGX_META_COREMEM (1)
+#define RGX_META_COREMEM_CODE (1)
+#if !defined(FIX_HW_BRN_50767) && (RGXFW_NUM_OS == 1)
+#define RGX_META_COREMEM_DATA (1)
+#endif
+#else
+#undef SUPPORT_META_COREMEM
+#undef RGX_FEATURE_META_COREMEM_SIZE
+#undef RGX_FEATURE_META_DMA
+#define RGX_FEATURE_META_COREMEM_SIZE (0)
+#define RGX_META_COREMEM_SIZE (0)
+#endif
+#endif
+
+/* ISP requires valid state on all three pipes regardless of the number of
+ * active pipes/tiles in flight.
+ */
+#define RGX_MAX_NUM_PIPES 3
+
+#define GET_ROGUE_CACHE_LINE_SIZE(x) (((IMG_INT32)x) > 0) ? ((x)/8) : (0)
+
+
+#define MAX_HW_TA3DCONTEXTS 2
+
+/* useful extra defines for clock ctrl*/
+#define RGX_CR_CLK_CTRL_ALL_ON (IMG_UINT64_C(0x5555555555555555)&RGX_CR_CLK_CTRL_MASKFULL)
+#define RGX_CR_CLK_CTRL_ALL_AUTO (IMG_UINT64_C(0xaaaaaaaaaaaaaaaa)&RGX_CR_CLK_CTRL_MASKFULL)
+
+#define RGX_CR_SOFT_RESET_DUST_n_CORE_EN (RGX_CR_SOFT_RESET_DUST_A_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_B_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_C_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_D_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_E_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_F_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_G_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_H_CORE_EN)
+
+/* SOFT_RESET Rascal and DUSTs bits */
+#define RGX_CR_SOFT_RESET_RASCALDUSTS_EN (RGX_CR_SOFT_RESET_RASCAL_CORE_EN | \
+ RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+
+
+
+/* SOFT_RESET steps as defined in the TRM */
+#define RGX_S7_SOFT_RESET_DUSTS (RGX_CR_SOFT_RESET_DUST_n_CORE_EN)
+
+#define RGX_S7_SOFT_RESET_JONES (RGX_CR_SOFT_RESET_PM_EN | \
+ RGX_CR_SOFT_RESET_VDM_EN | \
+ RGX_CR_SOFT_RESET_ISP_EN)
+
+#define RGX_S7_SOFT_RESET_JONES_ALL (RGX_S7_SOFT_RESET_JONES | \
+ RGX_CR_SOFT_RESET_BIF_EN | \
+ RGX_CR_SOFT_RESET_SLC_EN | \
+ RGX_CR_SOFT_RESET_GARTEN_EN)
+
+#define RGX_S7_SOFT_RESET2 (RGX_CR_SOFT_RESET2_BLACKPEARL_EN | \
+ RGX_CR_SOFT_RESET2_PIXEL_EN | \
+ RGX_CR_SOFT_RESET2_CDM_EN | \
+ RGX_CR_SOFT_RESET2_VERTEX_EN)
+
+
+
+#define RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT (12)
+#define RGX_BIF_PM_PHYSICAL_PAGE_SIZE (1 << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT (14)
+#define RGX_BIF_PM_VIRTUAL_PAGE_SIZE (1 << RGX_BIF_PM_VIRTUAL_PAGE_ALIGNSHIFT)
+
+#define RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE (16)
+
+/* To get the number of required Dusts, divide the number of clusters by 2 and round up */
+#define RGX_REQ_NUM_DUSTS(CLUSTERS) ((CLUSTERS + 1) / 2)
+
+/* To get the number of required Bernado/Phantom, divide the number of clusters by 4 and round up */
+#define RGX_REQ_NUM_PHANTOMS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BERNADOS(CLUSTERS) ((CLUSTERS + 3) / 4)
+#define RGX_REQ_NUM_BLACKPEARLS(CLUSTERS) ((CLUSTERS + 3) / 4)
+
+#if !defined(__KERNEL__)
+# define RGX_NUM_PHANTOMS (RGX_REQ_NUM_PHANTOMS(RGX_FEATURE_NUM_CLUSTERS))
+#endif
+
+
+/* RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT is not defined for format 1 cores (so define it now). */
+#if !defined(RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT)
+#define RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT (1)
+#endif
+
+/* META second thread feature depending on META variants and available CoreMem*/
+#if defined(RGX_FEATURE_META) && (RGX_FEATURE_META == MTP218 || RGX_FEATURE_META == MTP219) && defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && (RGX_FEATURE_META_COREMEM_SIZE == 256)
+#define RGXFW_META_SUPPORT_2ND_THREAD
+#endif
+
+/*
+ Start at 903GiB. Size of 32MB per OSID (see rgxheapconfig.h)
+ NOTE:
+ The firmware heaps bases and sizes are defined here to
+ simplify #include dependencies, see rgxheapconfig.h
+ for the full RGX virtual address space layout.
+
+ The config heap takes up the last 64 KBytes from the total firmware heap space.
+ It is intended to act as a storage space for the kernel and firmware CCB offset storage.
+ The Main Firmware heap size is reduced accordingly but most of the map / unmap functions must take
+ into consideration the entire range (i.e. main and config heap) */
+
+#define RGX_FIRMWARE_HEAP_SHIFT RGX_FW_HEAP_SHIFT
+#define RGX_FIRMWARE_RAW_HEAP_BASE (0xE1C0000000ULL)
+#define RGX_FIRMWARE_RAW_HEAP_SIZE (1U << RGX_FIRMWARE_HEAP_SHIFT)
+#define RGX_FIRMWARE_CONFIG_HEAP_SIZE (0x10000U) /* 64K */
+#define RGX_FIRMWARE_META_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+/*
+ * MIPS FW needs space in the Main heap to map GPU memory.
+ * This space is taken from the MAIN heap, to avoid creating a new heap.
+ */
+#define RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE (0x100000U) /* 1M */
+#define RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE (RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE - RGX_FIRMWARE_MIPS_GPU_MAP_RESERVED_SIZE)
+
+/* Hypervisor sub-heap order: MAIN + CONFIG */
+#define RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE
+#define RGX_FIRMWARE_HYPERV_CONFIG_HEAP_BASE (RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE - RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+/* Guest sub-heap order: CONFIG + MAIN */
+#define RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE RGX_FIRMWARE_RAW_HEAP_BASE
+#define RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE (RGX_FIRMWARE_GUEST_CONFIG_HEAP_BASE + RGX_FIRMWARE_CONFIG_HEAP_SIZE)
+
+#define RGXFW_GUEST_OSID_START 1
+
+/******************************************************************************
+ * WA HWBRNs
+ *****************************************************************************/
+#if defined(FIX_HW_BRN_36492)
+
+#undef RGX_CR_SOFT_RESET_SLC_EN
+#undef RGX_CR_SOFT_RESET_SLC_CLRMSK
+#undef RGX_CR_SOFT_RESET_SLC_SHIFT
+
+/* Remove the SOFT_RESET_SLC_EN bit from SOFT_RESET_MASKFULL */
+#undef RGX_CR_SOFT_RESET_MASKFULL
+#define RGX_CR_SOFT_RESET_MASKFULL IMG_UINT64_C(0x000001FFF7FFFC1D)
+
+#endif /* FIX_HW_BRN_36492 */
+
+
+#if defined(RGX_CR_JONES_IDLE_MASKFULL)
+/* Workaround for HW BRN 57289 */
+#if (RGX_CR_JONES_IDLE_MASKFULL != 0x0000000000007FFF)
+#error This WA must be updated if RGX_CR_JONES_IDLE is expanded!!!
+#endif
+#undef RGX_CR_JONES_IDLE_MASKFULL
+#undef RGX_CR_JONES_IDLE_TDM_SHIFT
+#undef RGX_CR_JONES_IDLE_TDM_CLRMSK
+#undef RGX_CR_JONES_IDLE_TDM_EN
+#define RGX_CR_JONES_IDLE_MASKFULL (IMG_UINT64_C(0x0000000000003FFF))
+#endif
+
+
+#define DPX_MAX_RAY_CONTEXTS 4 /* FIXME should this be in dpx file? */
+#define DPX_MAX_FBA_AP 16
+#define DPX_MAX_FBA_FILTER_WIDTH 24
+
+#if !defined(__KERNEL__)
+#if !defined(RGX_FEATURE_SLC_SIZE_IN_BYTES)
+#if defined(RGX_FEATURE_SLC_SIZE_IN_KILOBYTES)
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (RGX_FEATURE_SLC_SIZE_IN_KILOBYTES * 1024)
+#else
+#define RGX_FEATURE_SLC_SIZE_IN_BYTES (0)
+#endif
+#endif
+#endif
+
+#if defined(__KERNEL__)
+
+#define RGX_GET_NUM_RASTERISATION_MODULES(DEV_FEATURE_CFG) \
+ ( \
+ ((DEV_FEATURE_CFG).ui64Features & RGX_FEATURE_ROGUEXE_BIT_MASK) != 0 ? \
+ (DEV_FEATURE_CFG).ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX] : \
+ RGX_REQ_NUM_PHANTOMS((DEV_FEATURE_CFG).ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]) \
+ )
+
+#else
+
+#if defined(RGX_FEATURE_ROGUEXE)
+#define RGX_NUM_RASTERISATION_MODULES RGX_FEATURE_NUM_CLUSTERS
+#else
+#define RGX_NUM_RASTERISATION_MODULES RGX_NUM_PHANTOMS
+#endif
+
+#define RGX_GET_NUM_RASTERISATION_MODULES(DEV_FEATURE_CFG) RGX_NUM_RASTERISATION_MODULES
+
+#endif /* !__KERNEL__ */
+
+#endif /* _RGXDEFS_KM_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgxmhdefs_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgxmhdefs_km.h
new file mode 100644
index 00000000000000..485e23ec197821
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgxmhdefs_km.h
@@ -0,0 +1,380 @@
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxmhdefs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ * rogue_mh.def
+ */
+
+
+#ifndef _RGXMHDEFS_KM_H_
+#define _RGXMHDEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGXMHDEFS_KM_REVISION 0
+
+/*
+
+ Encoding of MH_TAG_SB for TDM CTL
+
+*/
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE (0x00000002U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for TDM DMA
+
+*/
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER (0x00000001U)
+#define RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL (0x00000002U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for PMD
+
+*/
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK (0x00000008U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST (0x00000009U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK (0x0000000aU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST (0x0000000bU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0 (0x0000000cU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1 (0x0000002dU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK (0x0000000fU)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK (0x00000012U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK (0x00000013U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK (0x00000016U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK (0x00000017U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP (0x00000019U)
+#define RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP (0x0000001aU)
+
+
+/*
+
+ Encoding of MH_TAG_SB for PMA
+
+*/
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK (0x00000000U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST (0x00000001U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK (0x00000002U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST (0x00000003U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0 (0x00000004U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1 (0x00000025U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP (0x00000006U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK (0x00000007U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK (0x00000008U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK (0x00000009U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK (0x00000014U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK (0x00000015U)
+#define RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP (0x00000018U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for TA
+
+*/
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PPP (0x00000008U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCERTC (0x00000007U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TEACRTC (0x00000006U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGRTC (0x00000005U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGR (0x00000004U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_PSGS (0x00000003U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_TPC (0x00000002U)
+#define RGX_MH_TAG_SB_TA_ENCODING_TA_TAG_VCE (0x00000001U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for IPF when there are 2 IPF pipes
+
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ00 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_PREQ01 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_RREQ (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DBSC (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_CPF (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_2_ENCODING_IPF_TAG_DELTA (0x00000007U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for IPF when there are 4 IPF pipes
+
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ00 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ01 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ02 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_PREQ03 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_RREQ (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DBSC (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_CPF (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_4_ENCODING_IPF_TAG_DELTA (0x0000000bU)
+
+
+/*
+
+ Encoding of MH_TAG_SB for IPF when there are 7 IPF pipes
+
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ00 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ01 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ02 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ03 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ04 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ05 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_PREQ06 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_RREQ (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DBSC (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_CPF (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_7_ENCODING_IPF_TAG_DELTA (0x00000011U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for IPF when there are 14 IPF pipes
+
+*/
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ00 (0x00000000U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ01 (0x00000001U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ02 (0x00000002U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ03 (0x00000003U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ04 (0x00000004U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ05 (0x00000005U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ06 (0x00000006U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ07 (0x00000007U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ08 (0x00000008U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ09 (0x00000009U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ10 (0x0000000aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ11 (0x0000000bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ12 (0x0000000cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CREQ13 (0x0000000dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ00 (0x0000000eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ01 (0x0000000fU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ02 (0x00000010U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ03 (0x00000011U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ04 (0x00000012U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ05 (0x00000013U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ06 (0x00000014U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ07 (0x00000015U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ08 (0x00000016U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ09 (0x00000017U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ10 (0x00000018U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ11 (0x00000019U)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ12 (0x0000001aU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_PREQ13 (0x0000001bU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_RREQ (0x0000001cU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DBSC (0x0000001dU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_CPF (0x0000001eU)
+#define RGX_MH_TAG_SB_IPF_IPF_NUM_PIPES_14_ENCODING_IPF_TAG_DELTA (0x0000001fU)
+
+
+/*
+
+ Encoding of MH_TAG_SB for TPF
+
+*/
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE (0x00000000U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS (0x00000001U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA (0x00000002U)
+#define RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA (0x00000003U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for ISP
+
+*/
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_ZLS (0x00000000U)
+#define RGX_MH_TAG_SB_ISP_ENCODING_ISP_TAG_DS (0x00000001U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for VDM
+
+*/
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL (0x00000000U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE (0x00000001U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX (0x00000002U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK (0x00000004U)
+#define RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT (0x00000008U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for CDM
+
+*/
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM (0x00000000U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA (0x00000001U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA (0x00000002U)
+#define RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE (0x00000003U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for MIPS
+
+*/
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH (0x00000002U)
+#define RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS (0x00000003U)
+
+
+/*
+
+ Encoding of MH_TAG_SB for MMU
+
+*/
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST (0x00000000U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST (0x00000001U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST (0x00000002U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST (0x00000003U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST (0x00000004U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST (0x00000005U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST (0x00000006U)
+#define RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST (0x00000007U)
+
+
+/*
+
+ Encoding of MH TAG
+
+*/
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT (0x00000000U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD (0x00000001U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC (0x00000002U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM (0x00000003U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MIPS (0x00000004U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0 (0x00000005U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1 (0x00000006U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2 (0x00000007U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3 (0x00000008U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0 (0x00000009U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1 (0x0000000aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2 (0x0000000bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3 (0x0000000cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4 (0x0000000dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_0 (0x0000000eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PDS_1 (0x0000000fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA (0x00000010U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB (0x00000011U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC (0x00000012U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD (0x00000013U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA (0x00000014U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB (0x00000015U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC (0x00000016U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD (0x00000017U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW (0x00000018U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_0 (0x00000019U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TCU_1 (0x0000001aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0 (0x0000001bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1 (0x0000001cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_USC (0x0000001fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS (0x00000020U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS (0x00000021U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF (0x00000022U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS (0x00000023U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF (0x00000024U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ (0x00000025U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS (0x00000026U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5 (0x00000027U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP (0x00000028U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC (0x00000029U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC (0x0000002aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC (0x0000002bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION (0x0000002cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM (0x0000002dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW (0x0000002eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC (0x0000002fU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC (0x00000030U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC (0x00000031U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA (0x00000032U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL (0x00000033U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE0 (0x00000034U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_PBE1 (0x00000035U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPP (0x00000038U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP1_ZLS (0x00000039U)
+#define RGX_MH_TAG_ENCODING_MH_TAG_ISP1_DS (0x0000003aU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1 (0x0000003bU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS (0x0000003cU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF (0x0000003dU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ (0x0000003eU)
+#define RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS (0x0000003fU)
+
+#endif /* _RGXMHDEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgxmhdefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km/rgxmmudefs_km.h b/drivers/gpu/drm/img-rogue/1.10/km/rgxmmudefs_km.h
new file mode 100644
index 00000000000000..43bedd2da3cd3d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km/rgxmmudefs_km.h
@@ -0,0 +1,396 @@
+/*************************************************************************/ /*!
+@Title Hardware definition file rgxmmudefs_km.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* **** Autogenerated C -- do not edit **** */
+
+/*
+ * rogue_bif.def
+ * rogue_bif.def
+ */
+
+
+#ifndef _RGXMMUDEFS_KM_H_
+#define _RGXMMUDEFS_KM_H_
+
+#include "img_types.h"
+
+
+#define RGXMMUDEFS_KM_REVISION 0
+
+/*
+
+ Encoding of DM (note value 0x6 not used)
+
+*/
+#define RGX_BIF_DM_ENCODING_VERTEX (0x00000000U)
+#define RGX_BIF_DM_ENCODING_PIXEL (0x00000001U)
+#define RGX_BIF_DM_ENCODING_COMPUTE (0x00000002U)
+#define RGX_BIF_DM_ENCODING_TLA (0x00000003U)
+#define RGX_BIF_DM_ENCODING_PB_VCE (0x00000004U)
+#define RGX_BIF_DM_ENCODING_PB_TE (0x00000005U)
+#define RGX_BIF_DM_ENCODING_META (0x00000007U)
+#define RGX_BIF_DM_ENCODING_HOST (0x00000008U)
+#define RGX_BIF_DM_ENCODING_PM_ALIST (0x00000009U)
+
+
+/*
+
+ Labelling of fields within virtual address
+
+*/
+/*
+Page Catalogue entry #
+*/
+#define RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT (30U)
+#define RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFF003FFFFFFF))
+/*
+Page Directory entry #
+*/
+#define RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT (21U)
+#define RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFC01FFFFF))
+/*
+Page Table entry #
+*/
+#define RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFE00FFF))
+
+
+/*
+
+ Number of entries in a PC
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PC_VALUE (0x00000400U)
+
+
+/*
+
+ Number of entries in a PD
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PD_VALUE (0x00000200U)
+
+
+/*
+
+ Number of entries in a PT
+
+*/
+#define RGX_MMUCTRL_ENTRIES_PT_VALUE (0x00000200U)
+
+
+/*
+
+ Size in bits of the PC entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PC_VALUE (0x00000020U)
+
+
+/*
+
+ Size in bits of the PD entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PD_VALUE (0x00000040U)
+
+
+/*
+
+ Size in bits of the PT entries in memory
+
+*/
+#define RGX_MMUCTRL_ENTRY_SIZE_PT_VALUE (0x00000040U)
+
+
+/*
+
+ Encoding of page size field
+
+*/
+#define RGX_MMUCTRL_PAGE_SIZE_MASK (0x00000007U)
+#define RGX_MMUCTRL_PAGE_SIZE_4KB (0x00000000U)
+#define RGX_MMUCTRL_PAGE_SIZE_16KB (0x00000001U)
+#define RGX_MMUCTRL_PAGE_SIZE_64KB (0x00000002U)
+#define RGX_MMUCTRL_PAGE_SIZE_256KB (0x00000003U)
+#define RGX_MMUCTRL_PAGE_SIZE_1MB (0x00000004U)
+#define RGX_MMUCTRL_PAGE_SIZE_2MB (0x00000005U)
+
+
+/*
+
+ Range of bits used for 4KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PAGE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+ Range of bits used for 16KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_SHIFT (14U)
+#define RGX_MMUCTRL_PAGE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000003FFF))
+
+
+/*
+
+ Range of bits used for 64KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_SHIFT (16U)
+#define RGX_MMUCTRL_PAGE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000FFFF))
+
+
+/*
+
+ Range of bits used for 256KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_SHIFT (18U)
+#define RGX_MMUCTRL_PAGE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000003FFFF))
+
+
+/*
+
+ Range of bits used for 1MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_SHIFT (20U)
+#define RGX_MMUCTRL_PAGE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000FFFFF))
+
+
+/*
+
+ Range of bits used for 2MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_SHIFT (21U)
+#define RGX_MMUCTRL_PAGE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00001FFFFF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 4KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_BASE_4KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 16KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_SHIFT (10U)
+#define RGX_MMUCTRL_PT_BASE_16KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000003FF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 64KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_SHIFT (8U)
+#define RGX_MMUCTRL_PT_BASE_64KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF00000000FF))
+
+
+/*
+
+ Range of bits used for PT Base Address for 256KB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_SHIFT (6U)
+#define RGX_MMUCTRL_PT_BASE_256KB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000003F))
+
+
+/*
+
+ Range of bits used for PT Base Address for 1MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_1MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+ Range of bits used for PT Base Address for 2MB Physical Page
+
+*/
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_SHIFT (5U)
+#define RGX_MMUCTRL_PT_BASE_2MB_RANGE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+
+
+/*
+
+ Format of Page Table data
+
+*/
+/*
+PM/Meta protect bit
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_SHIFT (62U)
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_CLRMSK (IMG_UINT64_C(0XBFFFFFFFFFFFFFFF))
+#define RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN (IMG_UINT64_C(0X4000000000000000))
+/*
+Upper part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_SHIFT (40U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_HI_CLRMSK (IMG_UINT64_C(0XC00000FFFFFFFFFF))
+/*
+Physical page address
+*/
+#define RGX_MMUCTRL_PT_DATA_PAGE_SHIFT (12U)
+#define RGX_MMUCTRL_PT_DATA_PAGE_CLRMSK (IMG_UINT64_C(0XFFFFFF0000000FFF))
+/*
+Lower part of vp page field
+*/
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_SHIFT (6U)
+#define RGX_MMUCTRL_PT_DATA_VP_PAGE_LO_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFF03F))
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_SHIFT (5U)
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFDF))
+#define RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0X0000000000000020))
+/*
+PM Src
+*/
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_SHIFT (4U)
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFEF))
+#define RGX_MMUCTRL_PT_DATA_PM_SRC_EN (IMG_UINT64_C(0X0000000000000010))
+/*
+SLC Bypass Ctrl
+*/
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_SHIFT (3U)
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF7))
+#define RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN (IMG_UINT64_C(0X0000000000000008))
+/*
+Cache Coherency bit
+*/
+#define RGX_MMUCTRL_PT_DATA_CC_SHIFT (2U)
+#define RGX_MMUCTRL_PT_DATA_CC_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFB))
+#define RGX_MMUCTRL_PT_DATA_CC_EN (IMG_UINT64_C(0X0000000000000004))
+/*
+Read only
+*/
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_SHIFT (1U)
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFD))
+#define RGX_MMUCTRL_PT_DATA_READ_ONLY_EN (IMG_UINT64_C(0X0000000000000002))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PT_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PT_DATA_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PT_DATA_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+ Format of Page Directory data
+
+*/
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_SHIFT (40U)
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_CLRMSK (IMG_UINT64_C(0XFFFFFEFFFFFFFFFF))
+#define RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN (IMG_UINT64_C(0X0000010000000000))
+/*
+Page Table base address
+*/
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_SHIFT (5U)
+#define RGX_MMUCTRL_PD_DATA_PT_BASE_CLRMSK (IMG_UINT64_C(0XFFFFFF000000001F))
+/*
+Page Size
+*/
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_SHIFT (1U)
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFF1))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB (IMG_UINT64_C(0000000000000000))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB (IMG_UINT64_C(0x0000000000000002))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB (IMG_UINT64_C(0x0000000000000004))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB (IMG_UINT64_C(0x0000000000000006))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB (IMG_UINT64_C(0x0000000000000008))
+#define RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB (IMG_UINT64_C(0x000000000000000a))
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PD_DATA_VALID_CLRMSK (IMG_UINT64_C(0XFFFFFFFFFFFFFFFE))
+#define RGX_MMUCTRL_PD_DATA_VALID_EN (IMG_UINT64_C(0X0000000000000001))
+
+
+/*
+
+ Format of Page Catalogue data
+
+*/
+/*
+Page Catalogue base address
+*/
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_SHIFT (4U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_CLRMSK (0X0000000FU)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT (12U)
+#define RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSIZE (4096U)
+/*
+Entry pending
+*/
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_SHIFT (1U)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_CLRMSK (0XFFFFFFFDU)
+#define RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN (0X00000002U)
+/*
+Entry valid
+*/
+#define RGX_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define RGX_MMUCTRL_PC_DATA_VALID_CLRMSK (0XFFFFFFFEU)
+#define RGX_MMUCTRL_PC_DATA_VALID_EN (0X00000001U)
+
+
+#endif /* _RGXMMUDEFS_KM_H_ */
+
+/*****************************************************************************
+ End of file (rgxmmudefs_km.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km_apphint.c b/drivers/gpu/drm/img-rogue/1.10/km_apphint.c
new file mode 100644
index 00000000000000..c3eff508f45ff5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km_apphint.c
@@ -0,0 +1,1430 @@
+/*************************************************************************/ /*!
+@File km_apphint.c
+@Title Apphint routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debugfs.h"
+#include "pvr_uaccess.h"
+#include <linux/moduleparam.h>
+#include <linux/workqueue.h>
+#include <linux/string.h>
+#include <stdbool.h>
+
+/* Common and SO layer */
+#include "img_defs.h"
+#include "sofunc_pvr.h"
+
+/* for action device access */
+#include "pvrsrv.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgxhwperf.h"
+#include "debugmisc_server.h"
+#include "htbserver.h"
+#include "rgxutils.h"
+#include "rgxapi_km.h"
+
+
+/* defines for default values */
+#include "rgx_fwif.h"
+#include "htbuffer_types.h"
+
+#include "pvr_notifier.h"
+
+#include "km_apphint_defs.h"
+#include "km_apphint.h"
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#include "pdump_km.h"
+#endif
+
+/* Size of temporary buffers used to read and write AppHint data.
+ * Must be large enough to contain any strings read/written
+ * but no larger than 4096 with is the buffer size for the
+ * kernel_param_ops .get function.
+ * And less than 1024 to keep the stack frame size within bounds.
+ */
+#define APPHINT_BUFFER_SIZE 512
+
+#define APPHINT_DEVICES_MAX 16
+
+/*
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+struct apphint_lookup {
+ char *name;
+ int value;
+};
+
+static const struct apphint_lookup fwt_logtype_tbl[] = {
+ { "trace", 2},
+ { "tbi", 1},
+ { "none", 0}
+};
+
+static const struct apphint_lookup fwt_loggroup_tbl[] = {
+ RGXFWIF_LOG_GROUP_NAME_VALUE_MAP
+};
+
+static const struct apphint_lookup htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+ HTB_LOG_SFGROUPLIST
+#undef X
+};
+
+static const struct apphint_lookup htb_opmode_tbl[] = {
+ { "droplatest", HTB_OPMODE_DROPLATEST},
+ { "dropoldest", HTB_OPMODE_DROPOLDEST},
+ { "block", HTB_OPMODE_BLOCK}
+};
+
+__maybe_unused
+static const struct apphint_lookup htb_logmode_tbl[] = {
+ { "all", HTB_LOGMODE_ALLPID},
+ { "restricted", HTB_LOGMODE_RESTRICTEDPID}
+};
+
+static const struct apphint_lookup timecorr_clk_tbl[] = {
+ { "mono", 0 },
+ { "mono_raw", 1 },
+ { "sched", 2 }
+};
+
+/*
+*******************************************************************************
+ Data types
+******************************************************************************/
+union apphint_value {
+ IMG_UINT64 UINT64;
+ IMG_UINT32 UINT32;
+ IMG_BOOL BOOL;
+ IMG_CHAR *STRING;
+};
+
+struct apphint_action {
+ union {
+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value);
+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value);
+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value);
+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value);
+ } query;
+ union {
+ PVRSRV_ERROR (*UINT64)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value);
+ PVRSRV_ERROR (*UINT32)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value);
+ PVRSRV_ERROR (*BOOL)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value);
+ PVRSRV_ERROR (*STRING)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value);
+ } set;
+ const PVRSRV_DEVICE_NODE *device;
+ const void *private_data;
+ union apphint_value stored;
+ bool free;
+};
+
+struct apphint_param {
+ IMG_UINT32 id;
+ APPHINT_DATA_TYPE data_type;
+ const void *data_type_helper;
+ IMG_UINT32 helper_size;
+};
+
+struct apphint_init_data {
+ IMG_UINT32 id; /* index into AppHint Table */
+ APPHINT_CLASS class;
+ IMG_CHAR *name;
+ union apphint_value default_value;
+};
+
+struct apphint_class_state {
+ APPHINT_CLASS class;
+ IMG_BOOL enabled;
+};
+
+struct apphint_work {
+ struct work_struct work;
+ union apphint_value new_value;
+ struct apphint_action *action;
+};
+
+/*
+*******************************************************************************
+ Initialization / configuration table data
+******************************************************************************/
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+
+static const struct apphint_init_data init_data_buildvar[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_BUILDVAR
+#undef X
+};
+
+static const struct apphint_init_data init_data_modparam[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_MODPARAM
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_DEBUGFS
+#undef X
+};
+
+static const struct apphint_init_data init_data_debugfs_device[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_CLASS_ ## c, #a, {.b=d} },
+ APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+};
+
+#undef UINT32Bitfield
+#undef UINT32List
+
+__maybe_unused static const char NO_PARAM_TABLE[] = {};
+
+static const struct apphint_param param_lookup[] = {
+#define X(a, b, c, d, e) \
+ {APPHINT_ID_ ## a, APPHINT_DATA_TYPE_ ## b, e, ARRAY_SIZE(e) },
+ APPHINT_LIST_ALL
+#undef X
+};
+
+static const struct apphint_class_state class_state[] = {
+#define X(a) {APPHINT_CLASS_ ## a, APPHINT_ENABLED_CLASS_ ## a},
+ APPHINT_CLASS_LIST
+#undef X
+};
+
+/*
+*******************************************************************************
+ Global state
+******************************************************************************/
+/* If the union apphint_value becomes such that it is not possible to read
+ * and write atomically, a mutex may be desirable to prevent a read returning
+ * a partially written state.
+ * This would require a statically initialized mutex outside of the
+ * struct apphint_state to prevent use of an uninitialized mutex when
+ * module_params are provided on the command line.
+ * static DEFINE_MUTEX(apphint_mutex);
+ */
+static struct apphint_state
+{
+ struct workqueue_struct *workqueue;
+ PPVR_DEBUGFS_DIR_DATA debugfs_device_rootdir[APPHINT_DEVICES_MAX];
+ PPVR_DEBUGFS_ENTRY_DATA debugfs_device_entry[APPHINT_DEVICES_MAX][APPHINT_DEBUGFS_DEVICE_ID_MAX];
+ PPVR_DEBUGFS_DIR_DATA debugfs_rootdir;
+ PPVR_DEBUGFS_ENTRY_DATA debugfs_entry[APPHINT_DEBUGFS_ID_MAX];
+ PPVR_DEBUGFS_DIR_DATA buildvar_rootdir;
+ PPVR_DEBUGFS_ENTRY_DATA buildvar_entry[APPHINT_BUILDVAR_ID_MAX];
+
+ int num_devices;
+ PVRSRV_DEVICE_NODE *devices[APPHINT_DEVICES_MAX];
+ int initialized;
+
+ /* Array contains value space for 1 copy of all apphint values defined
+ * (for device 1) and N copies of device specific apphint values for
+ * multi-device platforms.
+ */
+ struct apphint_action val[APPHINT_ID_MAX + ((APPHINT_DEVICES_MAX-1)*APPHINT_DEBUGFS_DEVICE_ID_MAX)];
+
+} apphint = {
+/* statically initialise default values to ensure that any module_params
+ * provided on the command line are not overwritten by defaults.
+ */
+ .val = {
+#define UINT32Bitfield UINT32
+#define UINT32List UINT32
+#define X(a, b, c, d, e) \
+ { {NULL}, {NULL}, NULL, NULL, {.b=d}, false },
+ APPHINT_LIST_ALL
+#undef X
+#undef UINT32Bitfield
+#undef UINT32List
+ },
+ .initialized = 0,
+ .num_devices = 0
+};
+
+#define APPHINT_DEBUGFS_DEVICE_ID_OFFSET (APPHINT_ID_MAX-APPHINT_DEBUGFS_DEVICE_ID_MAX)
+
+static inline void
+get_apphint_id_from_action_addr(const struct apphint_action * const addr,
+ APPHINT_ID * const id)
+{
+ *id = (APPHINT_ID)(addr - apphint.val);
+ if (*id >= APPHINT_ID_MAX) {
+ *id -= APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+ *id %= APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ *id += APPHINT_DEBUGFS_DEVICE_ID_OFFSET;
+ }
+}
+
+static inline void
+get_value_offset_from_device(const PVRSRV_DEVICE_NODE * const device,
+ int * const offset)
+{
+ int i;
+
+ /* No device offset if not a device specific apphint */
+ if (APPHINT_OF_DRIVER_NO_DEVICE == device) {
+ *offset = 0;
+ return;
+ }
+
+ for (i = 0; device && i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i] == device)
+ break;
+ }
+ if (APPHINT_DEVICES_MAX == i) {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Unregistered device", __func__));
+ i = 0;
+ }
+ *offset = i * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+}
+
+/**
+ * apphint_action_worker - perform an action after an AppHint update has been
+ * requested by a UM process
+ * And update the record of the current active value
+ */
+static void apphint_action_worker(struct work_struct *work)
+{
+ struct apphint_work *work_pkt = container_of(work,
+ struct apphint_work,
+ work);
+ struct apphint_action *a = work_pkt->action;
+ union apphint_value value = work_pkt->new_value;
+ APPHINT_ID id;
+ PVRSRV_ERROR result = PVRSRV_OK;
+
+ get_apphint_id_from_action_addr(a, &id);
+
+ if (a->set.UINT64) {
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result = a->set.UINT64(a->device,
+ a->private_data,
+ value.UINT64);
+ break;
+
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ result = a->set.UINT32(a->device,
+ a->private_data,
+ value.UINT32);
+ break;
+
+ case APPHINT_DATA_TYPE_BOOL:
+ result = a->set.BOOL(a->device,
+ a->private_data,
+ value.BOOL);
+ break;
+
+ case APPHINT_DATA_TYPE_STRING:
+ result = a->set.STRING(a->device,
+ a->private_data,
+ value.STRING);
+ kfree(value.STRING);
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, param_lookup[id].data_type, id));
+ }
+
+ if (PVRSRV_OK != result) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed (%s)",
+ __func__, PVRSRVGetErrorStringKM(result)));
+ }
+ } else {
+ if (a->free) {
+ kfree(a->stored.STRING);
+ }
+ a->stored = value;
+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+ a->free = true;
+ }
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: AppHint value updated before handler is registered, ID(%d)",
+ __func__, id));
+ }
+ kfree((void *)work_pkt);
+}
+
+static void apphint_action(union apphint_value new_value,
+ struct apphint_action *action)
+{
+ struct apphint_work *work_pkt = kmalloc(sizeof(*work_pkt), GFP_KERNEL);
+
+ /* queue apphint update on a serialized workqueue to avoid races */
+ if (work_pkt) {
+ work_pkt->new_value = new_value;
+ work_pkt->action = action;
+ INIT_WORK(&work_pkt->work, apphint_action_worker);
+ if (0 == queue_work(apphint.workqueue, &work_pkt->work)) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to queue apphint change request",
+ __func__));
+ goto err_exit;
+ }
+ } else {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to alloc memory for apphint change request",
+ __func__));
+ goto err_exit;
+ }
+ return;
+err_exit:
+ kfree(new_value.STRING);
+}
+
+/**
+ * apphint_read - read the different AppHint data types
+ * return -errno or the buffer size
+ */
+static int apphint_read(char *buffer, size_t count, APPHINT_ID ue,
+ union apphint_value *value)
+{
+ APPHINT_DATA_TYPE data_type = param_lookup[ue].data_type;
+ int result = 0;
+
+ switch (data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ if (kstrtou64(buffer, 0, &value->UINT64) < 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid UINT64 input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32:
+ if (kstrtou32(buffer, 0, &value->UINT32) < 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid UINT32 input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_BOOL:
+ switch (buffer[0]) {
+ case '0':
+ case 'n':
+ case 'N':
+ case 'f':
+ case 'F':
+ value->BOOL = IMG_FALSE;
+ break;
+ case '1':
+ case 'y':
+ case 'Y':
+ case 't':
+ case 'T':
+ value->BOOL = IMG_TRUE;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid BOOL input data for id %d: %s",
+ __func__, ue, buffer));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32List:
+ {
+ int i;
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *)
+ param_lookup[ue].data_type_helper;
+ int size = param_lookup[ue].helper_size;
+ /* buffer may include '\n', remove it */
+ char *arg = strsep(&buffer, "\n");
+
+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < size; i++) {
+ if (strcasecmp(lookup[i].name, arg) == 0) {
+ value->UINT32 = lookup[i].value;
+ break;
+ }
+ }
+ if (i == size) {
+ if (strlen(arg) == 0) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: No value set for AppHint",
+ __func__));
+ } else {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unrecognised AppHint value (%s)",
+ __func__, arg));
+ }
+ result = -EINVAL;
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ {
+ int i;
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *)
+ param_lookup[ue].data_type_helper;
+ int size = param_lookup[ue].helper_size;
+ /* buffer may include '\n', remove it */
+ char *string = strsep(&buffer, "\n");
+ char *token = strsep(&string, ",");
+
+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ value->UINT32 = 0;
+ /* empty string is valid to clear the bitfield */
+ while (token && *token) {
+ for (i = 0; i < size; i++) {
+ if (strcasecmp(lookup[i].name, token) == 0) {
+ value->UINT32 |= lookup[i].value;
+ break;
+ }
+ }
+ if (i == size) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unrecognised AppHint value (%s)",
+ __func__, token));
+ result = -EINVAL;
+ goto err_exit;
+ }
+ token = strsep(&string, ",");
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_STRING:
+ {
+ /* buffer may include '\n', remove it */
+ char *string = strsep(&buffer, "\n");
+ size_t len = strlen(string);
+
+ if (!len) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ ++len;
+
+ value->STRING = kmalloc(len, GFP_KERNEL);
+ if (!value->STRING) {
+ result = -ENOMEM;
+ goto err_exit;
+ }
+
+ strlcpy(value->STRING, string, len);
+ break;
+ }
+ default:
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+err_exit:
+ return (result < 0) ? result : count;
+}
+
+/**
+ * apphint_write - write the current AppHint data to a buffer
+ *
+ * Returns length written or -errno
+ */
+static int apphint_write(char *buffer, const size_t size,
+ const struct apphint_action *a)
+{
+ const struct apphint_param *hint;
+ int result = 0;
+ APPHINT_ID id;
+ union apphint_value value;
+
+ get_apphint_id_from_action_addr(a, &id);
+ hint = &param_lookup[id];
+
+ if (a->query.UINT64) {
+ switch (hint->data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result = a->query.UINT64(a->device,
+ a->private_data,
+ &value.UINT64);
+ break;
+
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ result = a->query.UINT32(a->device,
+ a->private_data,
+ &value.UINT32);
+ break;
+
+ case APPHINT_DATA_TYPE_BOOL:
+ result = a->query.BOOL(a->device,
+ a->private_data,
+ &value.BOOL);
+ break;
+
+ case APPHINT_DATA_TYPE_STRING:
+ result = a->query.STRING(a->device,
+ a->private_data,
+ &value.STRING);
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, hint->data_type, id));
+ }
+
+ if (PVRSRV_OK != result) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed (%d), index (%d)",
+ __func__, result, id));
+ }
+ } else {
+ value = a->stored;
+ }
+
+ switch (hint->data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ result += snprintf(buffer + result, size - result,
+ "0x%016llx",
+ value.UINT64);
+ break;
+ case APPHINT_DATA_TYPE_UINT32:
+ result += snprintf(buffer + result, size - result,
+ "0x%08x",
+ value.UINT32);
+ break;
+ case APPHINT_DATA_TYPE_BOOL:
+ result += snprintf(buffer + result, size - result,
+ "%s",
+ value.BOOL ? "Y" : "N");
+ break;
+ case APPHINT_DATA_TYPE_STRING:
+ if (value.STRING) {
+ result += snprintf(buffer + result, size - result,
+ "%s",
+ *value.STRING ? value.STRING : "(none)");
+ } else {
+ result += snprintf(buffer + result, size - result,
+ "(none)");
+ }
+ break;
+ case APPHINT_DATA_TYPE_UINT32List:
+ {
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *) hint->data_type_helper;
+ IMG_UINT32 i;
+
+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < hint->helper_size; i++) {
+ if (lookup[i].value == value.UINT32) {
+ result += snprintf(buffer + result,
+ size - result,
+ "%s",
+ lookup[i].name);
+ break;
+ }
+ }
+ break;
+ }
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ {
+ struct apphint_lookup *lookup =
+ (struct apphint_lookup *) hint->data_type_helper;
+ IMG_UINT32 i;
+
+ if (lookup == (struct apphint_lookup *)NO_PARAM_TABLE) {
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ for (i = 0; i < hint->helper_size; i++) {
+ if (lookup[i].value & value.UINT32) {
+ result += snprintf(buffer + result,
+ size - result,
+ "%s,",
+ lookup[i].name);
+ }
+ }
+ if (result) {
+ /* remove any trailing ',' */
+ --result;
+ *(buffer + result) = '\0';
+ } else {
+ result += snprintf(buffer + result,
+ size - result, "none");
+ }
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: unrecognised data type (%d), index (%d)",
+ __func__, hint->data_type, id));
+ result = -EINVAL;
+ }
+
+err_exit:
+ return result;
+}
+
+/*
+*******************************************************************************
+ Module parameters initialization - different from debugfs
+******************************************************************************/
+/**
+ * apphint_kparam_set - Handle an update of a module parameter
+ *
+ * Returns 0, or -errno. arg is in kp->arg.
+ */
+static int apphint_kparam_set(const char *val, const struct kernel_param *kp)
+{
+ char val_copy[APPHINT_BUFFER_SIZE];
+ APPHINT_ID id;
+ union apphint_value value;
+ int result;
+
+ /* need to discard const in case of string comparison */
+ result = strlcpy(val_copy, val, APPHINT_BUFFER_SIZE);
+
+ get_apphint_id_from_action_addr(kp->arg, &id);
+ if (result < APPHINT_BUFFER_SIZE) {
+ result = apphint_read(val_copy, result, id, &value);
+ if (result >= 0) {
+ ((struct apphint_action *)kp->arg)->stored = value;
+ if (param_lookup[id].data_type == APPHINT_DATA_TYPE_STRING) {
+ ((struct apphint_action *)kp->arg)->free = true;
+ }
+ }
+ } else {
+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long", __func__));
+ }
+ return (result > 0) ? 0 : result;
+}
+
+/**
+ * apphint_kparam_get - handle a read of a module parameter
+ *
+ * Returns length written or -errno. Buffer is 4k (ie. be short!)
+ */
+static int apphint_kparam_get(char *buffer, const struct kernel_param *kp)
+{
+ return apphint_write(buffer, PAGE_SIZE, kp->arg);
+}
+
+__maybe_unused
+static const struct kernel_param_ops apphint_kparam_fops = {
+ .set = apphint_kparam_set,
+ .get = apphint_kparam_get,
+};
+
+/*
+ * call module_param_cb() for all AppHints listed in APPHINT_LIST_MODPARAM
+ * apphint_modparam_class_ ## resolves to apphint_modparam_enable() except for
+ * AppHint classes that have been disabled.
+ */
+
+#define apphint_modparam_enable(name, number, perm) \
+ module_param_cb(name, &apphint_kparam_fops, &apphint.val[number], perm);
+
+#define X(a, b, c, d, e) \
+ apphint_modparam_class_ ##c(a, APPHINT_ID_ ## a, 0444)
+ APPHINT_LIST_MODPARAM
+#undef X
+
+/*
+*******************************************************************************
+ Debugfs get (seq file) operations - supporting functions
+******************************************************************************/
+static void *apphint_seq_start(struct seq_file *s, loff_t *pos)
+{
+ if (*pos == 0) {
+ /* We want only one entry in the sequence, one call to show() */
+ return (void *) 1;
+ }
+
+ PVR_UNREFERENCED_PARAMETER(s);
+
+ return NULL;
+}
+
+static void apphint_seq_stop(struct seq_file *s, void *v)
+{
+ PVR_UNREFERENCED_PARAMETER(s);
+ PVR_UNREFERENCED_PARAMETER(v);
+}
+
+static void *apphint_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ PVR_UNREFERENCED_PARAMETER(s);
+ PVR_UNREFERENCED_PARAMETER(v);
+ PVR_UNREFERENCED_PARAMETER(pos);
+ return NULL;
+}
+
+static int apphint_seq_show(struct seq_file *s, void *v)
+{
+ IMG_CHAR km_buffer[APPHINT_BUFFER_SIZE];
+ int result;
+
+ PVR_UNREFERENCED_PARAMETER(v);
+
+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE, s->private);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failure", __func__));
+ } else {
+ /* debugfs requires a trailing \n, module_params don't */
+ result += snprintf(km_buffer + result,
+ APPHINT_BUFFER_SIZE - result,
+ "\n");
+ seq_puts(s, km_buffer);
+ }
+
+ /* have to return 0 to see output */
+ return (result < 0) ? result : 0;
+}
+
+static const struct seq_operations apphint_seq_fops = {
+ .start = apphint_seq_start,
+ .stop = apphint_seq_stop,
+ .next = apphint_seq_next,
+ .show = apphint_seq_show,
+};
+
+/*
+*******************************************************************************
+ Debugfs supporting functions
+******************************************************************************/
+/**
+ * apphint_set - Handle a debugfs value update
+ */
+static ssize_t apphint_set(const char __user *buffer,
+ size_t count,
+ loff_t *ppos,
+ void *data)
+{
+ APPHINT_ID id;
+ union apphint_value value;
+ struct apphint_action *action = data;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ int result = 0;
+
+ if (ppos == NULL)
+ return -EIO;
+
+ if (count >= APPHINT_BUFFER_SIZE) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: String too long (%zd)",
+ __func__, count));
+ result = -EINVAL;
+ goto err_exit;
+ }
+
+ if (pvr_copy_from_user(km_buffer, buffer, count)) {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Copy of user data failed",
+ __func__));
+ result = -EFAULT;
+ goto err_exit;
+ }
+ km_buffer[count] = '\0';
+
+ get_apphint_id_from_action_addr(action, &id);
+ result = apphint_read(km_buffer, count, id, &value);
+ if (result >= 0)
+ apphint_action(value, action);
+
+ *ppos += count;
+err_exit:
+ return result;
+}
+
+/**
+ * apphint_debugfs_init - Create the specified debugfs entries
+ */
+static int apphint_debugfs_init(char *sub_dir,
+ int device_num,
+ unsigned init_data_size,
+ const struct apphint_init_data *init_data,
+ PPVR_DEBUGFS_DIR_DATA parentdir,
+ PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+ int result = 0;
+ unsigned i;
+ int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+
+ if (*rootdir) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "AppHint DebugFS already created, skipping"));
+ result = -EEXIST;
+ goto err_exit;
+ }
+
+ result = PVRDebugFSCreateEntryDir(sub_dir, parentdir,
+ rootdir);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Failed to create \"%s\" DebugFS directory.", sub_dir));
+ goto err_exit;
+ }
+
+ for (i = 0; i < init_data_size; i++) {
+ if (!class_state[init_data[i].class].enabled)
+ continue;
+
+ result = PVRDebugFSCreateEntry(init_data[i].name,
+ *rootdir,
+ &apphint_seq_fops,
+ apphint_set,
+ NULL,
+ NULL,
+ (void *) &apphint.val[init_data[i].id + device_value_offset],
+ &entry[i]);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Failed to create \"%s/%s\" DebugFS entry.",
+ sub_dir, init_data[i].name));
+ }
+ }
+
+err_exit:
+ return result;
+}
+
+/**
+ * apphint_debugfs_deinit- destroy the debugfs entries
+ */
+static void apphint_debugfs_deinit(unsigned num_entries,
+ PPVR_DEBUGFS_DIR_DATA *rootdir, PPVR_DEBUGFS_ENTRY_DATA *entry)
+{
+ unsigned i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (entry[i]) {
+ PVRDebugFSRemoveEntry(&entry[i]);
+ entry[i] = NULL;
+ }
+ }
+
+ if (*rootdir) {
+ PVRDebugFSRemoveEntryDir(rootdir);
+ *rootdir = NULL;
+ }
+}
+
+/*
+*******************************************************************************
+ AppHint status dump implementation
+******************************************************************************/
+#if defined(PDUMP)
+static void apphint_pdump_values(void *flags, const IMG_CHAR *format, ...)
+{
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ IMG_UINT32 ui32Flags = *(IMG_UINT32 *)flags;
+ va_list ap;
+
+ va_start(ap, format);
+ (void)vsnprintf(km_buffer, APPHINT_BUFFER_SIZE, format, ap);
+ va_end(ap);
+
+ PDumpCommentKM(km_buffer, ui32Flags);
+}
+#endif
+
+static void apphint_dump_values(char *group_name,
+ int device_num,
+ const struct apphint_init_data *group_data,
+ int group_size,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ int i, result;
+ int device_value_offset = device_num * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+
+ PVR_DUMPDEBUG_LOG(" %s", group_name);
+ for (i = 0; i < group_size; i++) {
+ result = apphint_write(km_buffer, APPHINT_BUFFER_SIZE,
+ &apphint.val[group_data[i].id + device_value_offset]);
+
+ if (result <= 0) {
+ PVR_DUMPDEBUG_LOG(" %s: <Error>",
+ group_data[i].name);
+ } else {
+ PVR_DUMPDEBUG_LOG(" %s: %s",
+ group_data[i].name, km_buffer);
+ }
+ }
+}
+
+/**
+ * Callback for debug dump
+ */
+static void apphint_dump_state(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ int i, result;
+ char km_buffer[APPHINT_BUFFER_SIZE];
+ PVRSRV_DEVICE_NODE *device = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+
+ if (DEBUG_REQUEST_VERBOSITY_HIGH == ui32VerbLevel) {
+ PVR_DUMPDEBUG_LOG("------[ AppHint Settings ]------");
+
+ apphint_dump_values("Build Vars", 0,
+ init_data_buildvar, ARRAY_SIZE(init_data_buildvar),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ apphint_dump_values("Module Params", 0,
+ init_data_modparam, ARRAY_SIZE(init_data_modparam),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ apphint_dump_values("Debugfs Params", 0,
+ init_data_debugfs, ARRAY_SIZE(init_data_debugfs),
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+ if (!apphint.devices[i]
+ || (device && device != apphint.devices[i]))
+ continue;
+
+ result = snprintf(km_buffer,
+ APPHINT_BUFFER_SIZE,
+ "Debugfs Params Device ID: %d",
+ i);
+ if (0 > result)
+ continue;
+
+ apphint_dump_values(km_buffer, i,
+ init_data_debugfs_device,
+ ARRAY_SIZE(init_data_debugfs_device),
+ pfnDumpDebugPrintf,
+ pvDumpDebugFile);
+ }
+ }
+}
+
+/*
+*******************************************************************************
+ Public interface
+******************************************************************************/
+int pvr_apphint_init(void)
+{
+ int result, i;
+
+ if (apphint.initialized) {
+ result = -EEXIST;
+ goto err_out;
+ }
+
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++)
+ apphint.devices[i] = NULL;
+
+ /* create workqueue with strict execution ordering to ensure no
+ * race conditions when setting/updating apphints from different
+ * contexts
+ */
+ apphint.workqueue = alloc_workqueue("apphint_workqueue",
+ WQ_UNBOUND | WQ_FREEZABLE, 1);
+ if (!apphint.workqueue) {
+ result = -ENOMEM;
+ goto err_out;
+ }
+
+ result = apphint_debugfs_init("apphint", 0,
+ ARRAY_SIZE(init_data_debugfs), init_data_debugfs,
+ NULL,
+ &apphint.debugfs_rootdir, apphint.debugfs_entry);
+ if (0 != result)
+ goto err_out;
+
+ result = apphint_debugfs_init("buildvar", 0,
+ ARRAY_SIZE(init_data_buildvar), init_data_buildvar,
+ NULL,
+ &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+ apphint.initialized = 1;
+
+err_out:
+ return result;
+}
+
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device)
+{
+ int result, i;
+ char device_num[APPHINT_BUFFER_SIZE];
+ int device_value_offset;
+
+ if (!apphint.initialized) {
+ result = -EAGAIN;
+ goto err_out;
+ }
+
+ if (apphint.num_devices+1 >= APPHINT_DEVICES_MAX) {
+ result = -EMFILE;
+ goto err_out;
+ }
+
+ result = snprintf(device_num, APPHINT_BUFFER_SIZE, "%d", apphint.num_devices);
+ if (result < 0) {
+ PVR_DPF((PVR_DBG_WARNING,
+ "snprintf failed (%d)", result));
+ result = -EINVAL;
+ goto err_out;
+ }
+
+ /* Set the default values for the new device */
+ device_value_offset = apphint.num_devices * APPHINT_DEBUGFS_DEVICE_ID_MAX;
+ for (i = 0; i < APPHINT_DEBUGFS_DEVICE_ID_MAX; i++) {
+ apphint.val[init_data_debugfs_device[i].id + device_value_offset].stored
+ = init_data_debugfs_device[i].default_value;
+ }
+
+ result = apphint_debugfs_init(device_num, apphint.num_devices,
+ ARRAY_SIZE(init_data_debugfs_device),
+ init_data_debugfs_device,
+ apphint.debugfs_rootdir,
+ &apphint.debugfs_device_rootdir[apphint.num_devices],
+ apphint.debugfs_device_entry[apphint.num_devices]);
+ if (0 != result)
+ goto err_out;
+
+ apphint.devices[apphint.num_devices] = device;
+ apphint.num_devices++;
+
+ (void)SOPvrDbgRequestNotifyRegister(
+ &device->hAppHintDbgReqNotify,
+ device,
+ apphint_dump_state,
+ DEBUG_REQUEST_APPHINT,
+ device);
+
+err_out:
+ return result;
+}
+
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device)
+{
+ int i;
+
+ if (!apphint.initialized)
+ return;
+
+ /* find the device */
+ for (i = 0; i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i] == device)
+ break;
+ }
+
+ if (APPHINT_DEVICES_MAX == i)
+ return;
+
+ if (device->hAppHintDbgReqNotify) {
+ (void)SOPvrDbgRequestNotifyUnregister(
+ device->hAppHintDbgReqNotify);
+ device->hAppHintDbgReqNotify = NULL;
+ }
+
+ apphint_debugfs_deinit(APPHINT_DEBUGFS_DEVICE_ID_MAX,
+ &apphint.debugfs_device_rootdir[i],
+ apphint.debugfs_device_entry[i]);
+
+ apphint.devices[i] = NULL;
+ apphint.num_devices--;
+}
+
+void pvr_apphint_deinit(void)
+{
+ int i;
+
+ if (!apphint.initialized)
+ return;
+
+ /* remove any remaining device data */
+ for (i = 0; apphint.num_devices && i < APPHINT_DEVICES_MAX; i++) {
+ if (apphint.devices[i])
+ pvr_apphint_device_unregister(apphint.devices[i]);
+ }
+
+ /* free all alloc'd string apphints and set to NULL */
+ for (i = 0; i < ARRAY_SIZE(apphint.val); i++) {
+ if (apphint.val[i].free && apphint.val[i].stored.STRING) {
+ kfree(apphint.val[i].stored.STRING);
+ apphint.val[i].stored.STRING = NULL;
+ apphint.val[i].free = false;
+ }
+ }
+
+ apphint_debugfs_deinit(APPHINT_DEBUGFS_ID_MAX,
+ &apphint.debugfs_rootdir, apphint.debugfs_entry);
+ apphint_debugfs_deinit(APPHINT_BUILDVAR_ID_MAX,
+ &apphint.buildvar_rootdir, apphint.buildvar_entry);
+
+ destroy_workqueue(apphint.workqueue);
+
+ apphint.initialized = 0;
+}
+
+void pvr_apphint_dump_state(void)
+{
+#if defined(PDUMP)
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+ apphint_pdump_values, (void *)&ui32Flags);
+#endif
+ apphint_dump_state(NULL, DEBUG_REQUEST_VERBOSITY_HIGH,
+ NULL, NULL);
+}
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ *pVal = apphint.val[ue].stored.UINT64;
+ error = 0;
+ }
+ return error;
+}
+
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ *pVal = apphint.val[ue].stored.UINT32;
+ error = 0;
+ }
+ return error;
+}
+
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal)
+{
+ int error = -ERANGE;
+
+ if (ue < APPHINT_ID_MAX) {
+ error = 0;
+ *pVal = apphint.val[ue].stored.BOOL;
+ }
+ return error;
+}
+
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size)
+{
+ int error = -ERANGE;
+ if (ue < APPHINT_ID_MAX && apphint.val[ue].stored.STRING) {
+ if (strlcpy(pBuffer, apphint.val[ue].stored.STRING, size) < size) {
+ error = 0;
+ }
+ }
+ return error;
+}
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT64:
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.UINT64 = query,
+ .set.UINT64 = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_UINT32:
+ case APPHINT_DATA_TYPE_UINT32Bitfield:
+ case APPHINT_DATA_TYPE_UINT32List:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.UINT32 = query,
+ .set.UINT32 = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_BOOL:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.BOOL = query,
+ .set.BOOL = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data)
+{
+ int device_value_offset;
+
+ if (id >= APPHINT_ID_MAX) {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: AppHint ID (%d) is out of range, max (%d)",
+ __func__, id, APPHINT_ID_MAX-1));
+ return;
+ }
+
+ get_value_offset_from_device(device, &device_value_offset);
+
+ switch (param_lookup[id].data_type) {
+ case APPHINT_DATA_TYPE_STRING:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Does not match AppHint data type for ID (%d)",
+ __func__, id));
+ return;
+ }
+
+ apphint.val[id + device_value_offset] = (struct apphint_action){
+ .query.STRING = query,
+ .set.STRING = set,
+ .device = device,
+ .private_data = private_data,
+ .stored = apphint.val[id + device_value_offset].stored
+ };
+}
+
+/* EOF */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km_apphint.h b/drivers/gpu/drm/img-rogue/1.10/km_apphint.h
new file mode 100644
index 00000000000000..4746c23492e343
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km_apphint.h
@@ -0,0 +1,100 @@
+/*************************************************************************/ /*!
+@File km_apphint.h
+@Title Apphint internal header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux kernel AppHint control
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __KM_APPHINT_H__
+#define __KM_APPHINT_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include "km_apphint_defs.h"
+#include "device.h"
+
+int pvr_apphint_init(void);
+void pvr_apphint_deinit(void);
+int pvr_apphint_device_register(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_device_unregister(PVRSRV_DEVICE_NODE *device);
+void pvr_apphint_dump_state(void);
+
+int pvr_apphint_get_uint64(APPHINT_ID ue, IMG_UINT64 *pVal);
+int pvr_apphint_get_uint32(APPHINT_ID ue, IMG_UINT32 *pVal);
+int pvr_apphint_get_bool(APPHINT_ID ue, IMG_BOOL *pVal);
+int pvr_apphint_get_string(APPHINT_ID ue, IMG_CHAR *pBuffer, size_t size);
+
+/* Supplied to pvr_apphint_register_handlers_*() functions when the apphint
+ * is a global driver apphint, i.e. apphints not present in
+ * APPHINT_DEBUGFS_DEVICE_ID
+ */
+#define APPHINT_OF_DRIVER_NO_DEVICE ((void*)-1U)
+
+void pvr_apphint_register_handlers_uint64(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT64 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void * private_data);
+void pvr_apphint_register_handlers_uint32(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_UINT32 value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+void pvr_apphint_register_handlers_bool(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL *value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_BOOL value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+void pvr_apphint_register_handlers_string(APPHINT_ID id,
+ PVRSRV_ERROR (*query)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR **value),
+ PVRSRV_ERROR (*set)(const PVRSRV_DEVICE_NODE *device, const void *private_data, IMG_CHAR *value),
+ const PVRSRV_DEVICE_NODE *device,
+ const void *private_data);
+
+#if defined(__cplusplus)
+}
+#endif
+#endif /* __KM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (apphint.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/km_apphint_defs.h b/drivers/gpu/drm/img-rogue/1.10/km_apphint_defs.h
new file mode 100644
index 00000000000000..aea9140d851feb
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/km_apphint_defs.h
@@ -0,0 +1,320 @@
+/*************************************************************************/ /*!
+@File
+@Title Services AppHint definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#ifndef __KM_APPHINT_DEFS_H__
+#define __KM_APPHINT_DEFS_H__
+
+/* NB: The 'DEVICE' AppHints must be last in this list as they will be
+ * duplicated in the case of a driver supporting multiple devices
+ */
+#define APPHINT_LIST_ALL \
+ APPHINT_LIST_BUILDVAR \
+ APPHINT_LIST_MODPARAM \
+ APPHINT_LIST_DEBUGFS \
+ APPHINT_LIST_DEBUGFS_DEVICE
+
+
+/*
+*******************************************************************************
+ Build variables
+ All of these should be configurable only through the 'default' value
+******************************************************************************/
+#define APPHINT_LIST_BUILDVAR \
+/* name, type, class, default, helper, */ \
+X(HWRDebugDumpLimit, UINT32, ALWAYS, PVRSRV_APPHINT_HWRDEBUGDUMPLIMIT, NO_PARAM_TABLE ) \
+X(EnableTrustedDeviceAceConfig, BOOL, GPUVIRT_VAL, PVRSRV_APPHINT_ENABLETRUSTEDDEVICEACECONFIG, NO_PARAM_TABLE ) \
+X(CleanupThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADPRIORITY, NO_PARAM_TABLE ) \
+X(CleanupThreadWeight, UINT32, NEVER, PVRSRV_APPHINT_CLEANUPTHREADWEIGHT, NO_PARAM_TABLE ) \
+X(WatchdogThreadPriority, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY, NO_PARAM_TABLE ) \
+X(WatchdogThreadWeight, UINT32, NEVER, PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT, NO_PARAM_TABLE ) \
+X(HWPerfClientBufferSize, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE, NO_PARAM_TABLE ) \
+
+/*
+*******************************************************************************
+ Module parameters
+******************************************************************************/
+#define APPHINT_LIST_MODPARAM \
+/* name, type, class, default, helper, */ \
+X(EnableSignatureChecks, BOOL, PDUMP, PVRSRV_APPHINT_ENABLESIGNATURECHECKS, NO_PARAM_TABLE ) \
+X(SignatureChecksBufSize, UINT32, PDUMP, PVRSRV_APPHINT_SIGNATURECHECKSBUFSIZE, NO_PARAM_TABLE ) \
+\
+X(DisableClockGating, BOOL, FWDBGCTRL, PVRSRV_APPHINT_DISABLECLOCKGATING, NO_PARAM_TABLE ) \
+X(BIFTilingMode, UINT32, VALIDATION, PVRSRV_APPHINT_BIFTILINGMODE, NO_PARAM_TABLE ) \
+X(DisableDMOverlap, BOOL, FWDBGCTRL, PVRSRV_APPHINT_DISABLEDMOVERLAP, NO_PARAM_TABLE ) \
+\
+X(EnableCDMKillingRandMode, BOOL, VALIDATION, PVRSRV_APPHINT_ENABLECDMKILLINGRANDMODE, NO_PARAM_TABLE ) \
+X(EnableFWContextSwitch, UINT32, FWDBGCTRL, PVRSRV_APPHINT_ENABLEFWCONTEXTSWITCH, NO_PARAM_TABLE ) \
+X(VDMContextSwitchMode, UINT32, VALIDATION, PVRSRV_APPHINT_VDMCONTEXTSWITCHMODE, NO_PARAM_TABLE ) \
+X(EnableRDPowerIsland, UINT32, FWDBGCTRL, PVRSRV_APPHINT_ENABLERDPOWERISLAND, NO_PARAM_TABLE ) \
+\
+X(GeneralNon4KHeapPageSize, UINT32, ALWAYS, PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE, NO_PARAM_TABLE ) \
+\
+X(DriverMode, UINT32, ALWAYS, PVRSRV_APPHINT_DRIVERMODE, NO_PARAM_TABLE ) \
+\
+X(FirmwarePerf, UINT32, VALIDATION, PVRSRV_APPHINT_FIRMWAREPERF, NO_PARAM_TABLE ) \
+X(FWContextSwitchProfile, UINT32, VALIDATION, PVRSRV_APPHINT_FWCONTEXTSWITCHPROFILE, NO_PARAM_TABLE ) \
+X(HWPerfDisableCustomCounterFilter, BOOL, VALIDATION, PVRSRV_APPHINT_HWPERFDISABLECUSTOMCOUNTERFILTER, NO_PARAM_TABLE ) \
+X(HWPerfFWBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB, NO_PARAM_TABLE ) \
+X(HWPerfHostBufSizeInKB, UINT32, VALIDATION, PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB, NO_PARAM_TABLE ) \
+\
+X(JonesDisableMask, UINT32, VALIDATION, PVRSRV_APPHINT_JONESDISABLEMASK, NO_PARAM_TABLE ) \
+X(NewFilteringMode, BOOL, VALIDATION, PVRSRV_APPHINT_NEWFILTERINGMODE, NO_PARAM_TABLE ) \
+X(TruncateMode, UINT32, VALIDATION, PVRSRV_APPHINT_TRUNCATEMODE, NO_PARAM_TABLE ) \
+X(UseMETAT1, UINT32, VALIDATION, PVRSRV_APPHINT_USEMETAT1, NO_PARAM_TABLE ) \
+X(EmuMaxFreq, UINT32, ALWAYS, PVRSRV_APPHINT_EMUMAXFREQ, NO_PARAM_TABLE ) \
+X(GPIOValidationMode, UINT32, VALIDATION, PVRSRV_APPHINT_GPIOVALIDATIONMODE, NO_PARAM_TABLE ) \
+X(RGXBVNC, STRING, ALWAYS, PVRSRV_APPHINT_RGXBVNC, NO_PARAM_TABLE ) \
+\
+X(FWContextSwitchCrossDM, UINT32, ALWAYS, 0, NO_PARAM_TABLE ) \
+\
+X(OSidRegion0Min, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MIN, NO_PARAM_TABLE ) \
+X(OSidRegion0Max, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION0MAX, NO_PARAM_TABLE ) \
+X(OSidRegion1Min, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MIN, NO_PARAM_TABLE ) \
+X(OSidRegion1Max, STRING, GPUVIRT_VAL, PVRSRV_APPHINT_OSIDREGION1MAX, NO_PARAM_TABLE ) \
+X(HTBufferSizeInKB, UINT32, ALWAYS, PVRSRV_APPHINT_HTBUFFERSIZE, NO_PARAM_TABLE ) \
+
+/*
+*******************************************************************************
+ Debugfs parameters - driver configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS \
+/* name, type, class, default, helper, */ \
+X(EnableHTBLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLEHTBLOGGROUP, htb_loggroup_tbl ) \
+X(HTBOperationMode, UINT32List, ALWAYS, PVRSRV_APPHINT_HTBOPERATIONMODE, htb_opmode_tbl ) \
+X(EnableFTraceGPU, BOOL, GPUTRACE_EV, PVRSRV_APPHINT_ENABLEFTRACEGPU, NO_PARAM_TABLE ) \
+X(HWPerfFWFilter, UINT64, ALWAYS, PVRSRV_APPHINT_HWPERFFWFILTER, NO_PARAM_TABLE ) \
+X(HWPerfHostFilter, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFHOSTFILTER, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_Services, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_SERVICES, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_EGL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_EGL, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_OpenGLES, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENGLES, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_OpenCL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENCL, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_OpenRL, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_OPENRL, NO_PARAM_TABLE ) \
+X(HWPerfClientFilter_Vulkan, UINT32, ALWAYS, PVRSRV_APPHINT_HWPERFCLIENTFILTER_VULKAN, NO_PARAM_TABLE ) \
+X(CacheOpConfig, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPCONFIG, NO_PARAM_TABLE ) \
+X(CacheOpGFThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPGFTHRESHOLDSIZE, NO_PARAM_TABLE ) \
+X(CacheOpUMKMThresholdSize, UINT32, ALWAYS, PVRSRV_APPHINT_CACHEOPUMKMHRESHOLDSIZE, NO_PARAM_TABLE ) \
+X(TimeCorrClock, UINT32List, ALWAYS, PVRSRV_APPHINT_TIMECORRCLOCK, timecorr_clk_tbl )
+
+/*
+*******************************************************************************
+ Debugfs parameters - device configuration
+******************************************************************************/
+#define APPHINT_LIST_DEBUGFS_DEVICE \
+/* name, type, class, default, helper, */ \
+/* Device Firmware config */\
+X(AssertOnHWRTrigger, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTONHWRTRIGGER, NO_PARAM_TABLE ) \
+X(AssertOutOfMemory, BOOL, ALWAYS, PVRSRV_APPHINT_ASSERTOUTOFMEMORY, NO_PARAM_TABLE ) \
+X(CheckMList, BOOL, ALWAYS, PVRSRV_APPHINT_CHECKMLIST, NO_PARAM_TABLE ) \
+X(EnableHWR, BOOL, ALWAYS, APPHNT_BLDVAR_ENABLEHWR, NO_PARAM_TABLE ) \
+X(EnableLogGroup, UINT32Bitfield, ALWAYS, PVRSRV_APPHINT_ENABLELOGGROUP, fwt_loggroup_tbl ) \
+X(FirmwareLogType, UINT32List, ALWAYS, PVRSRV_APPHINT_FIRMWARELOGTYPE, fwt_logtype_tbl ) \
+/* Device host config */ \
+X(EnableAPM, UINT32, ALWAYS, PVRSRV_APPHINT_ENABLEAPM, NO_PARAM_TABLE ) \
+X(DisableFEDLogging, BOOL, ALWAYS, PVRSRV_APPHINT_DISABLEFEDLOGGING, NO_PARAM_TABLE ) \
+X(ZeroFreelist, BOOL, ALWAYS, PVRSRV_APPHINT_ZEROFREELIST, NO_PARAM_TABLE ) \
+X(DustRequestInject, BOOL, VALIDATION, PVRSRV_APPHINT_DUSTREQUESTINJECT, NO_PARAM_TABLE ) \
+X(DisablePDumpPanic, BOOL, PDUMP, PVRSRV_APPHINT_DISABLEPDUMPPANIC, NO_PARAM_TABLE ) \
+X(EnableFWPoisonOnFree, BOOL, ALWAYS, PVRSRV_APPHINT_ENABLEFWPOISONONFREE, NO_PARAM_TABLE ) \
+X(FWPoisonOnFreeValue, UINT32, ALWAYS, PVRSRV_APPHINT_FWPOISONONFREEVALUE, NO_PARAM_TABLE ) \
+
+/*
+*******************************************************************************
+ * Types used in the APPHINT_LIST_<GROUP> lists must be defined here.
+ * New types require specific handling code to be added
+******************************************************************************/
+#define APPHINT_DATA_TYPE_LIST \
+X(BOOL) \
+X(UINT64) \
+X(UINT32) \
+X(UINT32Bitfield) \
+X(UINT32List) \
+X(STRING)
+
+#define APPHINT_CLASS_LIST \
+X(ALWAYS) \
+X(NEVER) \
+X(DEBUG) \
+X(FWDBGCTRL) \
+X(PDUMP) \
+X(VALIDATION) \
+X(GPUVIRT_VAL) \
+X(GPUTRACE_EV)
+
+/*
+*******************************************************************************
+ Visibility control for module parameters
+ These bind build variables to AppHint Visibility Groups.
+******************************************************************************/
+#define APPHINT_ENABLED_CLASS_ALWAYS IMG_TRUE
+#define APPHINT_ENABLED_CLASS_NEVER IMG_FALSE
+#define apphint_modparam_class_ALWAYS(a, b, c) apphint_modparam_enable(a, b, c)
+#if defined(DEBUG)
+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_TRUE
+ #define apphint_modparam_class_DEBUG(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_DEBUG IMG_FALSE
+ #define apphint_modparam_class_DEBUG(a, b, c)
+#endif
+#if defined(SUPPORT_FWDBGCTRL)
+ #define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_TRUE
+ #define apphint_modparam_class_FWDBGCTRL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_FWDBGCTRL IMG_FALSE
+ #define apphint_modparam_class_FWDBGCTRL(a, b, c)
+#endif
+#if defined(PDUMP)
+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_TRUE
+ #define apphint_modparam_class_PDUMP(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_PDUMP IMG_FALSE
+ #define apphint_modparam_class_PDUMP(a, b, c)
+#endif
+#if defined(SUPPORT_VALIDATION)
+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_TRUE
+ #define apphint_modparam_class_VALIDATION(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_VALIDATION IMG_FALSE
+ #define apphint_modparam_class_VALIDATION(a, b, c)
+#endif
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_TRUE
+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_GPUVIRT_VAL IMG_FALSE
+ #define apphint_modparam_class_GPUVIRT_VAL(a, b, c)
+#endif
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ #define APPHINT_ENABLED_CLASS_GPUTRACE_EV IMG_TRUE
+ #define apphint_modparam_class_GPUTRACE_EV(a, b, c) apphint_modparam_enable(a, b, c)
+#else
+ #define APPHINT_ENABLED_CLASS_GPUTRACE_EV IMG_FALSE
+ #define apphint_modparam_class_GPUTRACE_EV(a, b, c)
+#endif
+
+/*
+*******************************************************************************
+ AppHint defaults based on other build parameters
+******************************************************************************/
+#if defined(HWR_DEFAULT_ENABLED)
+ #define APPHNT_BLDVAR_ENABLEHWR 1
+#else
+ #define APPHNT_BLDVAR_ENABLEHWR 0
+#endif
+#if defined(DEBUG)
+ #define APPHNT_BLDVAR_DEBUG 1
+ #define APPHNT_BLDVAR_DBGDUMPLIMIT RGXFWIF_HWR_DEBUG_DUMP_ALL
+#else
+ #define APPHNT_BLDVAR_DEBUG 0
+ #define APPHNT_BLDVAR_DBGDUMPLIMIT 1
+#endif
+#if defined(DEBUG) || defined(PDUMP)
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_TRUE
+#else
+#define APPHNT_BLDVAR_ENABLESIGNATURECHECKS IMG_FALSE
+#endif
+
+/*
+*******************************************************************************
+
+ Table generated enums
+
+******************************************************************************/
+/* Unique ID for all AppHints */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_ID_ ## a,
+ APPHINT_LIST_ALL
+#undef X
+ APPHINT_ID_MAX
+} APPHINT_ID;
+
+/* ID for build variable Apphints - used for build variable only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_BUILDVAR_ID_ ## a,
+ APPHINT_LIST_BUILDVAR
+#undef X
+ APPHINT_BUILDVAR_ID_MAX
+} APPHINT_BUILDVAR_ID;
+
+/* ID for Modparam Apphints - used for modparam only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_MODPARAM_ID_ ## a,
+ APPHINT_LIST_MODPARAM
+#undef X
+ APPHINT_MODPARAM_ID_MAX
+} APPHINT_MODPARAM_ID;
+
+/* ID for Debugfs Apphints - used for debugfs only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_ID_ ## a,
+ APPHINT_LIST_DEBUGFS
+#undef X
+ APPHINT_DEBUGFS_ID_MAX
+} APPHINT_DEBUGFS_ID;
+
+/* ID for Debugfs Device Apphints - used for debugfs device only structures */
+typedef enum {
+#define X(a, b, c, d, e) APPHINT_DEBUGFS_DEVICE_ID_ ## a,
+ APPHINT_LIST_DEBUGFS_DEVICE
+#undef X
+ APPHINT_DEBUGFS_DEVICE_ID_MAX
+} APPHINT_DEBUGFS_DEVICE_ID;
+
+/* data types and actions */
+typedef enum {
+ APPHINT_DATA_TYPE_INVALID = 0,
+#define X(a) APPHINT_DATA_TYPE_ ## a,
+ APPHINT_DATA_TYPE_LIST
+#undef X
+ APPHINT_DATA_TYPE_MAX
+} APPHINT_DATA_TYPE;
+
+typedef enum {
+#define X(a) APPHINT_CLASS_ ## a,
+ APPHINT_CLASS_LIST
+#undef X
+ APPHINT_CLASS_MAX
+} APPHINT_CLASS;
+
+#endif /* __KM_APPHINT_DEFS_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/linkage.h b/drivers/gpu/drm/img-rogue/1.10/linkage.h
new file mode 100644
index 00000000000000..27c1092cfe89b2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/linkage.h
@@ -0,0 +1,55 @@
+/*************************************************************************/ /*!
+@File
+@Title Linux specific Services code internal interfaces
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Interfaces between various parts of the Linux specific
+ Services code, that don't have any other obvious
+ header file to go into.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__LINKAGE_H__)
+#define __LINKAGE_H__
+
+PVRSRV_ERROR PVROSFuncInit(void);
+void PVROSFuncDeInit(void);
+
+int PVRDebugCreateDebugFSEntries(void);
+void PVRDebugRemoveDebugFSEntries(void);
+
+#endif /* !defined(__LINKAGE_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/linux_sw_sync.h b/drivers/gpu/drm/img-rogue/1.10/linux_sw_sync.h
new file mode 100644
index 00000000000000..002b98aba8b624
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/linux_sw_sync.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _UAPI_LINUX_PVR_SW_SYNC_H
+#define _UAPI_LINUX_PVR_SW_SYNC_H
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+
+struct pvr_sw_sync_create_fence_data {
+ __u32 value;
+ char name[PVRSRV_SYNC_NAME_LENGTH];
+ __s32 fence;
+};
+
+#define PVR_SW_SYNC_IOC_MAGIC 'W'
+#define PVR_SW_SYNC_IOC_CREATE_FENCE _IOWR(PVR_SW_SYNC_IOC_MAGIC, 0, struct pvr_sw_sync_create_fence_data)
+#define PVR_SW_SYNC_IOC_INC _IOW(PVR_SW_SYNC_IOC_MAGIC, 1, __u32)
+
+#endif /* defined(SUPPORT_NATIVE_FENCE_SYNC) */
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/linuxsrv.h b/drivers/gpu/drm/img-rogue/1.10/linuxsrv.h
new file mode 100644
index 00000000000000..702387073d655a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/linuxsrv.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title Module defs for pvr core drivers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LINUXSRV_H__
+#define _LINUXSRV_H__
+
+#include "dbgdrvif_srv5.h"
+
+IMG_UINT32 DeviceIoControl(IMG_UINT32 hDevice,
+ IMG_UINT32 ui32ControlCode,
+ void *pInBuffer,
+ IMG_UINT32 ui32InBufferSize,
+ void *pOutBuffer,
+ IMG_UINT32 ui32OutBufferSize,
+ IMG_UINT32 *pui32BytesReturned);
+
+#endif /* _LINUXSRV_H__*/
diff --git a/drivers/gpu/drm/img-rogue/1.10/lists.c b/drivers/gpu/drm/img-rogue/1.10/lists.c
new file mode 100644
index 00000000000000..e8e7088a3296e1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/lists.c
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title Linked list shared functions implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implementation of the list iterators for types shared among
+ more than one file in the services code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "lists.h"
+
+/*===================================================================
+ LIST ITERATOR FUNCTIONS USED IN MORE THAN ONE FILE (those used just
+ once are implemented locally).
+ ===================================================================*/
+
+IMPLEMENT_LIST_ANY(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE)
+IMPLEMENT_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_ANY_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK)
+IMPLEMENT_LIST_FOR_EACH(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE)
+IMPLEMENT_LIST_REMOVE(PVRSRV_DEVICE_NODE)
diff --git a/drivers/gpu/drm/img-rogue/1.10/lists.h b/drivers/gpu/drm/img-rogue/1.10/lists.h
new file mode 100644
index 00000000000000..e7a900fcf9abb9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/lists.h
@@ -0,0 +1,355 @@
+/*************************************************************************/ /*!
+@File
+@Title Linked list shared functions templates.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Definition of the linked list function templates.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __LISTS_UTILS__
+#define __LISTS_UTILS__
+
+/* instruct QAC to ignore warnings about the following custom formatted macros */
+/* PRQA S 0881,3410 ++ */
+#include <stdarg.h>
+#include "img_types.h"
+#include "device.h"
+#include "power.h"
+
+/*
+ - USAGE -
+
+ The list functions work with any structure that provides the fields psNext and
+ ppsThis. In order to make a function available for a given type, it is required
+ to use the funcion template macro that creates the actual code.
+
+ There are 5 main types of functions:
+ - INSERT : given a pointer to the head pointer of the list and a pointer
+ to the node, inserts it as the new head.
+ - INSERT TAIL : given a pointer to the head pointer of the list and a pointer
+ to the node, inserts the node at the tail of the list.
+ - REMOVE : given a pointer to a node, removes it from its list.
+ - FOR EACH : apply a function over all the elements of a list.
+ - ANY : apply a function over the elements of a list, until one of them
+ return a non null value, and then returns it.
+
+ The two last functions can have a variable argument form, with allows to pass
+ additional parameters to the callback function. In order to do this, the
+ callback function must take two arguments, the first is the current node and
+ the second is a list of variable arguments (va_list).
+
+ The ANY functions have also another for which specifies the return type of the
+ callback function and the default value returned by the callback function.
+
+*/
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_ForEach
+@Description Apply a callback function to all the elements of a list.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH(TYPE) \
+void List_##TYPE##_ForEach(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+ while(psHead)\
+ {\
+ pfnCallBack(psHead);\
+ psHead = psHead->psNext;\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_ForEachSafe
+@Description Apply a callback function to all the elements of a list. Do it
+ in a safe way that handles the fact that a node might remove itself
+ from the list during the iteration.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+*/ /**************************************************************************/
+#define DECLARE_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_FOR_EACH_SAFE(TYPE) \
+void List_##TYPE##_ForEachSafe(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode))\
+{\
+ TYPE *psNext;\
+\
+ while(psHead)\
+ {\
+ psNext = psHead->psNext; \
+ pfnCallBack(psHead);\
+ psHead = psNext;\
+ }\
+}
+
+
+#define DECLARE_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_FOR_EACH_VA(TYPE) \
+void List_##TYPE##_ForEach_va(TYPE *psHead, void(*pfnCallBack)(TYPE* psNode, va_list va), ...) \
+{\
+ va_list ap;\
+ while(psHead)\
+ {\
+ va_start(ap, pfnCallBack);\
+ pfnCallBack(psHead, ap);\
+ psHead = psHead->psNext;\
+ va_end(ap);\
+ }\
+}
+
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Any
+@Description Applies a callback function to the elements of a list until the function
+ returns a non null value, then returns it.
+@Input psHead The head of the list to be processed.
+@Input pfnCallBack The function to be applied to each element of the list.
+@Return The first non null value returned by the callback function.
+*/ /**************************************************************************/
+#define DECLARE_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY(TYPE) \
+void* List_##TYPE##_Any(TYPE *psHead, void* (*pfnCallBack)(TYPE* psNode))\
+{ \
+ void *pResult;\
+ TYPE *psNextNode;\
+ pResult = NULL;\
+ psNextNode = psHead;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ pResult = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+
+/*with variable arguments, that will be passed as a va_list to the callback function*/
+
+#define DECLARE_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA(TYPE) \
+void* List_##TYPE##_Any_va(TYPE *psHead, void*(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ void* pResult = NULL;\
+ while(psHead && !pResult)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ pResult = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return pResult;\
+}
+
+/*those ones are for extra type safety, so there's no need to use castings for the results*/
+
+#define DECLARE_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))
+
+#define IMPLEMENT_LIST_ANY_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any(TYPE *psHead, RTYPE (*pfnCallBack)(TYPE* psNode))\
+{ \
+ RTYPE result;\
+ TYPE *psNextNode;\
+ result = CONTINUE;\
+ psNextNode = psHead;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psNextNode->psNext;\
+ result = pfnCallBack(psHead);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+#define DECLARE_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)
+
+#define IMPLEMENT_LIST_ANY_VA_2(TYPE, RTYPE, CONTINUE) \
+RTYPE List_##TYPE##_##RTYPE##_Any_va(TYPE *psHead, RTYPE(*pfnCallBack)(TYPE* psNode, va_list va), ...)\
+{\
+ va_list ap;\
+ TYPE *psNextNode;\
+ RTYPE result = CONTINUE;\
+ while(psHead && result == CONTINUE)\
+ {\
+ psNextNode = psHead->psNext;\
+ va_start(ap, pfnCallBack);\
+ result = pfnCallBack(psHead, ap);\
+ va_end(ap);\
+ psHead = psNextNode;\
+ }\
+ return result;\
+}
+
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Remove
+@Description Removes a given node from the list.
+@Input psNode The pointer to the node to be removed.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)
+
+#define IMPLEMENT_LIST_REMOVE(TYPE) \
+void List_##TYPE##_Remove(TYPE *psNode)\
+{\
+ (*psNode->ppsThis)=psNode->psNext;\
+ if(psNode->psNext)\
+ {\
+ psNode->psNext->ppsThis = psNode->ppsThis;\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Insert
+@Description Inserts a given node at the beginnning of the list.
+@Input psHead The pointer to the pointer to the head node.
+@Input psNode The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT(TYPE) \
+void List_##TYPE##_Insert(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+ psNewNode->ppsThis = ppsHead;\
+ psNewNode->psNext = *ppsHead;\
+ *ppsHead = psNewNode;\
+ if(psNewNode->psNext)\
+ {\
+ psNewNode->psNext->ppsThis = &(psNewNode->psNext);\
+ }\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_InsertTail
+@Description Inserts a given node at the end of the list.
+@Input psHead The pointer to the pointer to the head node.
+@Input psNode The pointer to the node to be inserted.
+*/ /**************************************************************************/
+#define DECLARE_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)
+
+#define IMPLEMENT_LIST_INSERT_TAIL(TYPE) \
+void List_##TYPE##_InsertTail(TYPE **ppsHead, TYPE *psNewNode)\
+{\
+ TYPE *psTempNode = *ppsHead;\
+ if (psTempNode != NULL)\
+ {\
+ while (psTempNode->psNext)\
+ psTempNode = psTempNode->psNext;\
+ ppsHead = &psTempNode->psNext;\
+ }\
+ psNewNode->ppsThis = ppsHead;\
+ psNewNode->psNext = NULL;\
+ *ppsHead = psNewNode;\
+}
+
+/*************************************************************************/ /*!
+@Function List_##TYPE##_Reverse
+@Description Reverse a list in place
+@Input ppsHead The pointer to the pointer to the head node.
+*/ /**************************************************************************/
+#define DECLARE_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)
+
+#define IMPLEMENT_LIST_REVERSE(TYPE) \
+void List_##TYPE##_Reverse(TYPE **ppsHead)\
+{\
+ TYPE *psTmpNode1; \
+ TYPE *psTmpNode2; \
+ TYPE *psCurNode; \
+ psTmpNode1 = NULL; \
+ psCurNode = *ppsHead; \
+ while(psCurNode) { \
+ psTmpNode2 = psCurNode->psNext; \
+ psCurNode->psNext = psTmpNode1; \
+ psTmpNode1 = psCurNode; \
+ psCurNode = psTmpNode2; \
+ if(psCurNode) \
+ { \
+ psTmpNode1->ppsThis = &(psCurNode->psNext); \
+ } \
+ else \
+ { \
+ psTmpNode1->ppsThis = ppsHead; \
+ } \
+ } \
+ *ppsHead = psTmpNode1; \
+}
+
+#define IS_LAST_ELEMENT(x) ((x)->psNext == NULL)
+
+
+DECLARE_LIST_ANY(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, IMG_BOOL, IMG_FALSE);
+DECLARE_LIST_ANY_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_ANY_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_ANY_VA_2(PVRSRV_DEVICE_NODE, PVRSRV_ERROR, PVRSRV_OK);
+DECLARE_LIST_FOR_EACH(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_FOR_EACH_VA(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_INSERT_TAIL(PVRSRV_DEVICE_NODE);
+DECLARE_LIST_REMOVE(PVRSRV_DEVICE_NODE);
+
+#undef DECLARE_LIST_ANY_2
+#undef DECLARE_LIST_ANY_VA
+#undef DECLARE_LIST_ANY_VA_2
+#undef DECLARE_LIST_FOR_EACH
+#undef DECLARE_LIST_FOR_EACH_VA
+#undef DECLARE_LIST_INSERT
+#undef DECLARE_LIST_REMOVE
+
+#endif
+
+/* re-enable warnings */
+/* PRQA S 0881,3410 -- */
diff --git a/drivers/gpu/drm/img-rogue/1.10/lock.h b/drivers/gpu/drm/img-rogue/1.10/lock.h
new file mode 100644
index 00000000000000..f1f28a86dcced9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/lock.h
@@ -0,0 +1,352 @@
+/*************************************************************************/ /*!
+@File lock.h
+@Title Locking interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services internal locking interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_H_
+#define _LOCK_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#include "lock_types.h"
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include "allocmem.h"
+#include <asm/atomic.h>
+
+#define OSLockCreateNoStats(phLock, eLockType) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(phLock) = OSAllocMemNoStats(sizeof(struct mutex)); \
+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSLockCreate(phLock, eLockType) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(phLock) = OSAllocMem(sizeof(struct mutex)); \
+ if (*(phLock)) { mutex_init(*(phLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSLockDestroy(hLock) ({mutex_destroy((hLock)); OSFreeMem((hLock)); PVRSRV_OK;})
+#define OSLockDestroyNoStats(hLock) ({mutex_destroy((hLock)); OSFreeMemNoStats((hLock)); PVRSRV_OK;})
+
+#define OSLockAcquire(hLock) ({mutex_lock((hLock)); PVRSRV_OK;})
+#define OSLockAcquireNested(hLock, subclass) ({mutex_lock_nested((hLock), (subclass)); PVRSRV_OK;})
+#define OSLockRelease(hLock) ({mutex_unlock((hLock)); PVRSRV_OK;})
+
+#define OSLockIsLocked(hLock) ((mutex_is_locked((hLock)) == 1) ? IMG_TRUE : IMG_FALSE)
+#define OSTryLockAcquire(hLock) ((mutex_trylock(hLock) == 1) ? IMG_TRUE : IMG_FALSE)
+
+/* These _may_ be reordered or optimized away entirely by the compiler/hw */
+#define OSAtomicRead(pCounter) atomic_read(pCounter)
+#define OSAtomicWrite(pCounter, i) atomic_set(pCounter, i)
+
+/* The following atomic operations, in addition to being SMP-safe, also
+ imply a memory barrier around the operation */
+#define OSAtomicIncrement(pCounter) atomic_inc_return(pCounter)
+#define OSAtomicDecrement(pCounter) atomic_dec_return(pCounter)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) atomic_cmpxchg(pCounter,oldv,newv)
+
+#define OSAtomicAdd(pCounter, incr) atomic_add_return(incr,pCounter)
+#define OSAtomicAddUnless(pCounter, incr, test) __atomic_add_unless(pCounter,incr,test)
+
+#define OSAtomicSubtract(pCounter, incr) atomic_add_return(-(incr),pCounter)
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/* Define for mapping INT32 to UINT32 for implementation that
+ * use unsigned integers
+ * -2147483648 to -1 ---> 0 to +2147483647
+ * 0 to +2147483647 ---> +2147483648 to +4294967295 */
+#define MAP_UNSIGNED32_TO_SIGNED32(x) ((x) - 0x80000000)
+#define MAP_SIGNED32_TO_UNSIGNED32(x) ((x) + 0x80000000)
+
+/**************************************************************************/ /*!
+@Function OSLockCreate
+@Description Creates an operating system lock object.
+@Output phLock The created lock.
+@Input eLockType The type of lock required. This may be:
+ LOCK_TYPE_PASSIVE - the lock will not be used
+ in interrupt context or
+ LOCK_TYPE_DISPATCH - the lock may be used
+ in interrupt context.
+@Return PVRSRV_OK on success. PVRSRV_ERROR_OUT_OF_MEMORY if the driver
+ cannot allocate CPU memory needed for the lock.
+ PVRSRV_ERROR_INIT_FAILURE if the Operating System fails to
+ allocate the lock.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockCreate(POS_LOCK *phLock, LOCK_TYPE eLockType);
+#if defined(INTEGRITY_OS)
+#define OSLockCreateNoStats OSLockCreate
+#endif
+
+/**************************************************************************/ /*!
+@Function OSLockDestroy
+@Description Destroys an operating system lock object.
+@Input hLock The lock to be destroyed.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR OSLockDestroy(POS_LOCK hLock);
+
+#if defined(INTEGRITY_OS)
+#define OSLockDestroyNoStats OSLockDestroy
+#endif
+/**************************************************************************/ /*!
+@Function OSLockAcquire
+@Description Acquires an operating system lock.
+ NB. This function must not return until the lock is acquired
+ (meaning the implementation should not timeout or return with
+ an error, as the caller will assume they have the lock).
+@Input hLock The lock to be acquired.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockAcquire(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function OSTryLockAcquire
+@Description Try to acquire an operating system lock.
+ NB. If lock is acquired successfully in the first attempt,
+ then the function returns true and else it will return false.
+@Input hLock The lock to be acquired.
+@Return IMG_TRUE if lock acquired successfully,
+ IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSTryLockAcquire(POS_LOCK hLock);
+
+/* Nested notation isn't used in UM or other OS's */
+/**************************************************************************/ /*!
+@Function OSLockAcquireNested
+@Description For operating systems other than Linux, this equates to an
+ OSLockAcquire() call. On Linux, this function wraps a call
+ to mutex_lock_nested(). This recognises the scenario where
+ there may be multiple subclasses within a particular class
+ of lock. In such cases, the order in which the locks belonging
+ these various subclasses are acquired is important and must be
+ validated.
+@Input hLock The lock to be acquired.
+@Input subclass The subclass of the lock.
+@Return None.
+ */ /**************************************************************************/
+#define OSLockAcquireNested(hLock, subclass) OSLockAcquire((hLock))
+
+/**************************************************************************/ /*!
+@Function OSLockRelease
+@Description Releases an operating system lock.
+@Input hLock The lock to be released.
+@Return None.
+ */ /**************************************************************************/
+IMG_INTERNAL
+void OSLockRelease(POS_LOCK hLock);
+
+/**************************************************************************/ /*!
+@Function OSLockIsLocked
+@Description Tests whether or not an operating system lock is currently
+ locked.
+@Input hLock The lock to be tested.
+@Return IMG_TRUE if locked, IMG_FALSE if not locked.
+ */ /**************************************************************************/
+IMG_INTERNAL
+IMG_BOOL OSLockIsLocked(POS_LOCK hLock);
+
+#if defined(LINUX)
+
+/* Use GCC intrinsics (read/write semantics consistent with kernel-side implementation) */
+#define OSAtomicRead(pCounter) (*(volatile IMG_INT32 *)&(pCounter)->counter)
+#define OSAtomicWrite(pCounter, i) ((pCounter)->counter = (IMG_INT32) i)
+#define OSAtomicIncrement(pCounter) __sync_add_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicDecrement(pCounter) __sync_sub_and_fetch((&(pCounter)->counter), 1)
+#define OSAtomicCompareExchange(pCounter, oldv, newv) \
+ __sync_val_compare_and_swap((&(pCounter)->counter), oldv, newv)
+
+#define OSAtomicAdd(pCounter, incr) __sync_add_and_fetch((&(pCounter)->counter), incr)
+#define OSAtomicAddUnless(pCounter, incr, test) ({ \
+ IMG_INT32 c; IMG_INT32 old; \
+ c = OSAtomicRead(pCounter); \
+ while (1) { \
+ if (c == (test)) break; \
+ old = OSAtomicCompareExchange(pCounter, c, c+(incr)); \
+ if (old == c) break; \
+ c = old; \
+ } c; })
+
+#define OSAtomicSubtract(pCounter, incr) OSAtomicAdd(pCounter, -(incr))
+#define OSAtomicSubtractUnless(pCounter, incr, test) OSAtomicAddUnless(pCounter, -(incr), test)
+
+#else
+
+/*************************************************************************/ /*!
+@Function OSAtomicRead
+@Description Read the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to read
+@Return The value of the atomic variable
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicRead(const ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicWrite
+@Description Write the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be written to
+@Input v The value to write
+@Return None
+*/ /**************************************************************************/
+IMG_INTERNAL
+void OSAtomicWrite(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/* For the following atomic operations, in addition to being SMP-safe,
+ should also have a memory barrier around each operation */
+/*************************************************************************/ /*!
+@Function OSAtomicIncrement
+@Description Increment the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be incremented
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicIncrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicDecrement
+@Description Decrement the value of a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be decremented
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicDecrement(ATOMIC_T *pCounter);
+
+/*************************************************************************/ /*!
+@Function OSAtomicAdd
+@Description Add a specified value to a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to add the value to
+@Input v The value to be added
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAdd(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function OSAtomicAddUnless
+@Description Add a specified value to a variable atomically unless it
+ already equals a particular value.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to add the value to
+@Input v The value to be added to 'pCounter'
+@Input t The test value. If 'pCounter' equals this,
+ its value will not be adjusted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicAddUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function OSAtomicSubtract
+@Description Subtract a specified value to a variable atomically.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to subtract the value from
+@Input v The value to be subtracted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtract(ATOMIC_T *pCounter, IMG_INT32 v);
+
+/*************************************************************************/ /*!
+@Function OSAtomicSubtractUnless
+@Description Subtract a specified value from a variable atomically unless
+ it already equals a particular value.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to subtract the value from
+@Input v The value to be subtracted from 'pCounter'
+@Input t The test value. If 'pCounter' equals this,
+ its value will not be adjusted
+@Return The new value of *pCounter.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicSubtractUnless(ATOMIC_T *pCounter, IMG_INT32 v, IMG_INT32 t);
+
+/*************************************************************************/ /*!
+@Function OSAtomicCompareExchange
+@Description Set a variable to a given value only if it is currently
+ equal to a specified value. The whole operation must be atomic.
+ Atomic functions must be implemented in a manner that
+ is both symmetric multiprocessor (SMP) safe and has a memory
+ barrier around each operation.
+@Input pCounter The atomic variable to be checked and
+ possibly updated
+@Input oldv The value the atomic variable must have in
+ order to be modified
+@Input newv The value to write to the atomic variable if
+ it equals 'oldv'
+@Return The value of *pCounter after the function.
+*/ /**************************************************************************/
+IMG_INTERNAL
+IMG_INT32 OSAtomicCompareExchange(ATOMIC_T *pCounter, IMG_INT32 oldv, IMG_INT32 newv);
+
+#endif /* defined(LINUX) */
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+#endif /* _LOCK_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/lock_types.h b/drivers/gpu/drm/img-rogue/1.10/lock_types.h
new file mode 100644
index 00000000000000..c377afd60a1885
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/lock_types.h
@@ -0,0 +1,103 @@
+/*************************************************************************/ /*!
+@File lock_types.h
+@Title Locking types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Locking specific enums, defines and structures
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _LOCK_TYPES_H_
+#define _LOCK_TYPES_H_
+
+/* In Linux kernel mode we are using the kernel mutex implementation directly
+ * with macros. This allows us to use the kernel lockdep feature for lock
+ * debugging. */
+#if defined(LINUX) && defined(__KERNEL__)
+
+#include <linux/types.h>
+#include <linux/mutex.h>
+/* The mutex is defined as a pointer to be compatible with the other code. This
+ * isn't ideal and usually you wouldn't do that in kernel code. */
+typedef struct mutex *POS_LOCK;
+typedef struct rw_semaphore *POSWR_LOCK;
+typedef atomic_t ATOMIC_T;
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+#include "img_types.h" /* needed for IMG_INT */
+typedef struct _OS_LOCK_ *POS_LOCK;
+
+#if defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS)
+typedef struct _OSWR_LOCK_ *POSWR_LOCK;
+#else /* defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS) */
+typedef struct _OSWR_LOCK_ {
+ IMG_UINT32 ui32Dummy;
+} *POSWR_LOCK;
+#endif /* defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS) */
+
+#if defined(LINUX)
+ typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(__QNXNTO__)
+ typedef struct _OS_ATOMIC {IMG_UINT32 counter;} ATOMIC_T;
+#elif defined(_WIN32)
+ /*
+ * Dummy definition. WDDM doesn't use Services, but some headers
+ * still have to be shared. This is one such case.
+ */
+ typedef struct _OS_ATOMIC {IMG_INT32 counter;} ATOMIC_T;
+#elif defined(INTEGRITY_OS)
+ /* Only lower 32bits are used in OS ATOMIC APIs to have consistent behaviour across all OS */
+ typedef struct _OS_ATOMIC {IMG_UINT64 counter;} ATOMIC_T;
+#else
+ #error "Please type-define an atomic lock for this environment"
+#endif
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+
+typedef enum
+{
+ LOCK_TYPE_NONE = 0x00,
+
+ LOCK_TYPE_MASK = 0x0F,
+ LOCK_TYPE_PASSIVE = 0x01, /* Passive level lock e.g. mutex, system may promote to dispatch */
+ LOCK_TYPE_DISPATCH = 0x02, /* Dispatch level lock e.g. spin lock, may be used in ISR/MISR */
+
+ LOCK_TYPE_INSIST_FLAG = 0x80, /* When set caller can guarantee lock not used in ISR/MISR */
+ LOCK_TYPE_PASSIVE_ONLY = LOCK_TYPE_INSIST_FLAG | LOCK_TYPE_PASSIVE
+
+} LOCK_TYPE;
+#endif /* _LOCK_TYPES_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/log2.h b/drivers/gpu/drm/img-rogue/1.10/log2.h
new file mode 100644
index 00000000000000..98d33390378e45
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/log2.h
@@ -0,0 +1,414 @@
+/*************************************************************************/ /*!
+@Title Integer log2 and related functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef LOG2_H
+#define LOG2_H
+
+#include "img_defs.h"
+
+/**************************************************************************/ /*!
+@Description Determine if a number is a power of two.
+@Input n
+@Return True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2(uint32_t n)
+{
+ /* C++ needs this cast. */
+ return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/**************************************************************************/ /*!
+@Description Determine if a number is a power of two.
+@Input n
+@Return True if n is a power of 2, false otherwise. True if n == 0.
+*/ /***************************************************************************/
+static INLINE IMG_BOOL IsPower2_64(uint64_t n)
+{
+ /* C++ needs this cast. */
+ return (IMG_BOOL)((n & (n - 1)) == 0);
+}
+
+/* Code using GNU GCC intrinsics */
+#if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER))
+
+/* CHAR_BIT is typically found in <limits.h>. For all the platforms where
+ * CHAR_BIT is not available, defined it here with the assumption that there
+ * are 8 bits in a byte */
+#ifndef CHAR_BIT
+#define CHAR_BIT 8U
+#endif
+
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2(uint32_t n)
+{
+ if(unlikely(n == 0))
+ {
+ return 0;
+ }
+ else
+ {
+ uint32_t uNumBits = CHAR_BIT * sizeof(n);
+ return uNumBits - (uint32_t)__builtin_clz(n) - 1U;
+ }
+}
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2_64(uint64_t n)
+{
+ if(unlikely(n == 0))
+ {
+ return 0;
+ }
+ else
+ {
+ uint32_t uNumBits = CHAR_BIT * sizeof(n);
+ return uNumBits - (uint32_t)__builtin_clzll(n) - 1U;
+ }
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2(uint32_t n)
+{
+ if(unlikely(n == 0 || n == 1))
+ {
+ return 0;
+ }
+ else
+ {
+ uint32_t uNumBits = CHAR_BIT * sizeof(n);
+
+ n--; /* Handle powers of 2 */
+ return uNumBits - (uint32_t)__builtin_clz(n);
+ }
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2_64(uint64_t n)
+{
+ if(unlikely(n == 0 || n == 1))
+ {
+ return 0;
+ }
+ else
+ {
+ uint32_t uNumBits = CHAR_BIT * sizeof(n);
+
+ n--; /* Handle powers of 2 */
+ return uNumBits - (uint32_t)__builtin_clzll(n);
+ }
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2(uint32_t n)
+{
+ return (uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clz(n) - 1U;
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2_64(uint64_t n)
+{
+ return (uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clzll(n) - 1U;
+}
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint32_t RoundUpToNextPowerOfTwo(uint32_t n)
+{
+ /* Cases with n greater than 2^31 needs separate handling
+ * as result of (1<<32) is undefined. */
+ if( unlikely( n == 0 || n > (uint32_t)1 << (CHAR_BIT * sizeof(n) - 1) ))
+ {
+ return 0;
+ }
+
+ /* Return n if it is already a power of 2 */
+ if((IMG_BOOL)((n & (n - 1)) == 0))
+ {
+ return n;
+ }
+
+ return (uint32_t)1 << ((uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clz(n));
+}
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint64_t RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+ /* Cases with n greater than 2^63 needs separate handling
+ * as result of (1<<64) is undefined. */
+ if( unlikely( n == 0 || n > (uint64_t)1 << (CHAR_BIT * sizeof(n) - 1) ))
+ {
+ return 0;
+ }
+
+ /* Return n if it is already a power of 2 */
+ if((IMG_BOOL)((n & (n - 1)) == 0))
+ {
+ return n;
+ }
+
+ return (uint64_t)1 << ((uint32_t)(CHAR_BIT * sizeof(n)) - (uint32_t)__builtin_clzll(n));
+}
+
+#else /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint32_t RoundUpToNextPowerOfTwo(uint32_t n)
+{
+ n--;
+ n |= n >> 1; /* handle 2 bit numbers */
+ n |= n >> 2; /* handle 4 bit numbers */
+ n |= n >> 4; /* handle 8 bit numbers */
+ n |= n >> 8; /* handle 16 bit numbers */
+ n |= n >> 16; /* handle 32 bit numbers */
+ n++;
+
+ return n;
+}
+
+/**************************************************************************/ /*!
+@Description Round a non-power-of-two number up to the next power of two.
+@Input n
+@Return n rounded up to the next power of two. If n is zero or
+ already a power of two, return n unmodified.
+*/ /***************************************************************************/
+static INLINE uint64_t RoundUpToNextPowerOfTwo_64(uint64_t n)
+{
+ n--;
+ n |= n >> 1; /* handle 2 bit numbers */
+ n |= n >> 2; /* handle 4 bit numbers */
+ n |= n >> 4; /* handle 8 bit numbers */
+ n |= n >> 8; /* handle 16 bit numbers */
+ n |= n >> 16; /* handle 32 bit numbers */
+ n |= n >> 32; /* handle 64 bit numbers */
+ n++;
+
+ return n;
+}
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2(uint32_t n)
+{
+ uint32_t log2 = 0;
+
+ while (n >>= 1)
+ log2++;
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(n))
+@Input n
+@Return log2(n) rounded down to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t FloorLog2_64(uint64_t n)
+{
+ uint32_t log2 = 0;
+
+ while (n >>= 1)
+ log2++;
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2(uint32_t n)
+{
+ uint32_t log2 = 0;
+
+ if(n == 0)
+ return 0;
+
+ n--; /* Handle powers of 2 */
+
+ while(n)
+ {
+ log2++;
+ n >>= 1;
+ }
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute ceil(log2(n))
+@Input n
+@Return log2(n) rounded up to the nearest integer. Returns 0 if n == 0
+*/ /***************************************************************************/
+static INLINE uint32_t CeilLog2_64(uint64_t n)
+{
+ uint32_t log2 = 0;
+
+ if(n == 0)
+ return 0;
+
+ n--; /* Handle powers of 2 */
+
+ while(n)
+ {
+ log2++;
+ n >>= 1;
+ }
+
+ return log2;
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2(uint32_t n)
+{
+ static const uint32_t b[] =
+ { 0xAAAAAAAA, 0xCCCCCCCC, 0xF0F0F0F0, 0xFF00FF00, 0xFFFF0000};
+ uint32_t r = (n & b[0]) != 0;
+
+ r |= (uint32_t) ((n & b[4]) != 0) << 4;
+ r |= (uint32_t) ((n & b[3]) != 0) << 3;
+ r |= (uint32_t) ((n & b[2]) != 0) << 2;
+ r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+ return r;
+}
+
+/**************************************************************************/ /*!
+@Description Compute log2(n) for exact powers of two only
+@Input n Must be a power of two
+@Return log2(n)
+*/ /***************************************************************************/
+static INLINE uint32_t ExactLog2_64(uint64_t n)
+{
+ static const uint64_t b[] =
+ { 0xAAAAAAAAAAAAAAAAULL, 0xCCCCCCCCCCCCCCCCULL,
+ 0xF0F0F0F0F0F0F0F0ULL, 0xFF00FF00FF00FF00ULL,
+ 0xFFFF0000FFFF0000ULL, 0xFFFFFFFF00000000ULL };
+ uint32_t r = (n & b[0]) != 0;
+
+ r |= (uint32_t) ((n & b[5]) != 0) << 5;
+ r |= (uint32_t) ((n & b[4]) != 0) << 4;
+ r |= (uint32_t) ((n & b[3]) != 0) << 3;
+ r |= (uint32_t) ((n & b[2]) != 0) << 2;
+ r |= (uint32_t) ((n & b[1]) != 0) << 1;
+
+ return r;
+}
+
+#endif /* #if (defined(__GNUC__) || defined(__GNUG__)) && !(defined(__clang__) || defined(__INTEL_COMPILER)) */
+
+/**************************************************************************/ /*!
+@Description Compute floor(log2(size)) , where size is the max of 3 sizes
+ This is almost always the ONLY EVER valid use of FloorLog2.
+ Usually CeilLog2() should be used instead.
+ For a 5x5x1 texture, the 3 miplevels are:
+ 0: 5x5x1
+ 1: 2x2x1
+ 2: 1x1x1
+
+ For an 8x8x1 texture, the 4 miplevels are:
+ 0: 8x8x1
+ 1: 4x4x1
+ 2: 2x2x1
+ 3: 1x1x1
+
+
+@Input sizeX, sizeY, sizeZ
+@Return Count of mipmap levels for given dimensions
+*/ /***************************************************************************/
+static INLINE uint32_t NumMipLevels(uint32_t sizeX, uint32_t sizeY, uint32_t sizeZ)
+{
+
+ uint32_t maxSize = MAX(MAX(sizeX, sizeY), sizeZ);
+ return FloorLog2(maxSize) + 1;
+}
+
+
+#endif /* LOG2_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/main.c b/drivers/gpu/drm/img-rogue/1.10/main.c
new file mode 100644
index 00000000000000..64d4ba78d24ec6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/main.c
@@ -0,0 +1,248 @@
+/*************************************************************************/ /*!
+@File
+@Title Debug driver main file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/uaccess.h>
+#include <drm/drmP.h>
+
+#include "img_types.h"
+#include "linuxsrv.h"
+#include "dbgdriv_ioctl.h"
+#include "dbgdrvif_srv5.h"
+#include "dbgdriv.h"
+#include "hostfunc.h"
+#include "pvr_debug.h"
+#include "pvrmodule.h"
+#include "pvr_uaccess.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+/* Outward temp buffer used by IOCTL handler allocated once and grows as needed.
+ * This optimisation means the debug driver performs less vmallocs/vfrees
+ * reducing the chance of kernel vmalloc space exhaustion.
+ * Singular out buffer for PDump UM reads is not multi-thread safe and so
+ * it now needs a mutex to protect it from multiple simultaneous reads in
+ * the future.
+ */
+static IMG_CHAR* g_outTmpBuf = NULL;
+static IMG_UINT32 g_outTmpBufSize = 64*PAGE_SIZE;
+static void* g_pvOutTmpBufMutex = NULL;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+void DBGDrvGetServiceTable(void **fn_table)
+{
+ *fn_table = &g_sDBGKMServices;
+}
+
+void dbgdrv_cleanup(void)
+{
+ if (g_outTmpBuf)
+ {
+ vfree(g_outTmpBuf);
+ g_outTmpBuf = NULL;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ HostDestroyEventObjects();
+#endif
+ HostDestroyMutex(g_pvOutTmpBufMutex);
+ HostDestroyMutex(g_pvAPIMutex);
+ return;
+}
+
+IMG_INT dbgdrv_init(void)
+{
+ /* Init API mutex */
+ if ((g_pvAPIMutex=HostCreateMutex()) == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ /* Init TmpBuf mutex */
+ if ((g_pvOutTmpBufMutex=HostCreateMutex()) == NULL)
+ {
+ return -ENOMEM;
+ }
+
+#if defined(SUPPORT_DBGDRV_EVENT_OBJECTS)
+ /*
+ * The current implementation of HostCreateEventObjects on Linux
+ * can never fail, so there is no need to check for error.
+ */
+ (void) HostCreateEventObjects();
+#endif
+
+ return 0;
+}
+
+static IMG_INT dbgdrv_ioctl_work(void *arg, IMG_BOOL bCompat)
+{
+ struct drm_pvr_dbgdrv_cmd *psDbgdrvCmd = (struct drm_pvr_dbgdrv_cmd *) arg;
+ char *buffer, *in, *out;
+ unsigned int cmd;
+ void __user *pBufferIn;
+ void __user *pBufferOut;
+
+ if (psDbgdrvCmd->pad)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid pad value\n"));
+ return -EINVAL;
+ }
+
+ if ((psDbgdrvCmd->in_data_size > (PAGE_SIZE >> 1)) ||
+ (psDbgdrvCmd->out_data_size > (PAGE_SIZE >> 1)))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Sizes of the buffers are too large, cannot do ioctl\n"));
+ return -1;
+ }
+
+ buffer = (char *) HostPageablePageAlloc(1);
+ if (!buffer)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate buffer, cannot do ioctl\n"));
+ return -EFAULT;
+ }
+
+ in = buffer;
+ out = buffer + (PAGE_SIZE >>1);
+
+ pBufferIn = (void __user *)(uintptr_t) psDbgdrvCmd->in_data_ptr;
+ pBufferOut = (void __user *)(uintptr_t) psDbgdrvCmd->out_data_ptr;
+
+ if (pvr_copy_from_user(in, pBufferIn, psDbgdrvCmd->in_data_size) != 0)
+ {
+ goto init_failed;
+ }
+
+ /* Extra -1 because ioctls start at DEBUG_SERVICE_IOCTL_BASE + 1 */
+ cmd = MAKEIOCTLINDEX(psDbgdrvCmd->cmd) - DEBUG_SERVICE_IOCTL_BASE - 1;
+
+ if (psDbgdrvCmd->cmd == DEBUG_SERVICE_READ)
+ {
+ IMG_UINT32 *pui32BytesCopied = (IMG_UINT32 *)out;
+ DBG_OUT_READ *psReadOutParams = (DBG_OUT_READ *)out;
+ DBG_IN_READ *psReadInParams = (DBG_IN_READ *)in;
+ void __user *pvOutBuffer;
+ PDBG_STREAM psStream;
+
+ psStream = SID2PStream(psReadInParams->hStream);
+ if (!psStream)
+ {
+ goto init_failed;
+ }
+
+ /* Serialise IOCTL Read op access to the singular output buffer */
+ HostAquireMutex(g_pvOutTmpBufMutex);
+
+ if ((g_outTmpBuf == NULL) || (psReadInParams->ui32OutBufferSize > g_outTmpBufSize))
+ {
+ if (psReadInParams->ui32OutBufferSize > g_outTmpBufSize)
+ {
+ g_outTmpBufSize = psReadInParams->ui32OutBufferSize;
+ }
+ g_outTmpBuf = vmalloc(g_outTmpBufSize);
+ if (!g_outTmpBuf)
+ {
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+ goto init_failed;
+ }
+ }
+
+ /* Ensure only one thread is allowed into the DBGDriv core at a time */
+ HostAquireMutex(g_pvAPIMutex);
+
+ psReadOutParams->ui32DataRead = DBGDrivRead(psStream,
+ psReadInParams->ui32BufID,
+ psReadInParams->ui32OutBufferSize,
+ g_outTmpBuf);
+ psReadOutParams->ui32SplitMarker = DBGDrivGetMarker(psStream);
+
+ HostReleaseMutex(g_pvAPIMutex);
+
+ pvOutBuffer = WIDEPTR_GET_PTR(psReadInParams->pui8OutBuffer, bCompat);
+
+ if (pvr_copy_to_user(pvOutBuffer,
+ g_outTmpBuf,
+ *pui32BytesCopied) != 0)
+ {
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+ goto init_failed;
+ }
+
+ HostReleaseMutex(g_pvOutTmpBufMutex);
+
+ }
+ else
+ {
+ (g_DBGDrivProc[cmd])(in, out, bCompat);
+ }
+
+ if (copy_to_user(pBufferOut, out, psDbgdrvCmd->out_data_size) != 0)
+ {
+ goto init_failed;
+ }
+
+ HostPageablePageFree((void *)buffer);
+ return 0;
+
+init_failed:
+ HostPageablePageFree((void *)buffer);
+ return -EFAULT;
+}
+
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *pFile)
+{
+ return dbgdrv_ioctl_work((void *) arg, IMG_FALSE);
+}
+
+int dbgdrv_ioctl_compat(struct file *file, unsigned int ioctlCmd, unsigned long arg)
+{
+ return dbgdrv_ioctl_work((void *) arg, IMG_TRUE);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/mem_utils.c b/drivers/gpu/drm/img-rogue/1.10/mem_utils.c
new file mode 100644
index 00000000000000..6eb99dc1d0fe59
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mem_utils.c
@@ -0,0 +1,313 @@
+/*************************************************************************/ /*!
+@File
+@Title Memory manipulation functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory related functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This workaround is only *required* on ARM64. Avoid building or including
+ * it by default on other architectures, unless the 'safe memcpy' test flag
+ * is enabled. (The code should work on other architectures.)
+ */
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* NOTE: This C file is compiled with -ffreestanding to avoid pattern matching
+ * by the compiler to stdlib functions, and it must only use the below
+ * headers. Do not include any IMG or services headers in this file.
+ */
+#include <stddef.h>
+
+/* Prototypes to suppress warnings in -ffreestanding mode */
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize);
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize);
+
+/* This file is only intended to be used on platforms which use GCC or Clang,
+ * due to its requirement on __attribute__((vector_size(n))), typeof() and
+ * __SIZEOF__ macros.
+ */
+#if defined(__GNUC__)
+
+#define MIN(a, b) \
+ ({__typeof(a) _a = (a); __typeof(b) _b = (b); _a > _b ? _b : _a;})
+
+#if !defined(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)
+#define DEVICE_MEMSETCPY_ALIGN_IN_BYTES __SIZEOF_LONG__
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES % 2 != 0
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be a power of 2"
+#endif
+#if DEVICE_MEMSETCPY_ALIGN_IN_BYTES < 4
+#error "DEVICE_MEMSETCPY_ALIGN_IN_BYTES must be equal or greater than 4"
+#endif
+
+#if __SIZEOF_POINTER__ != __SIZEOF_LONG__
+#error No support for architectures where void* and long are sized differently
+#endif
+
+#if __SIZEOF_LONG__ > DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+/* Meaningless, and harder to do correctly */
+# error Cannot handle DEVICE_MEMSETCPY_ALIGN_IN_BYTES < sizeof(long)
+typedef unsigned long block_t;
+#elif __SIZEOF_LONG__ <= DEVICE_MEMSETCPY_ALIGN_IN_BYTES
+typedef unsigned int block_t
+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES)));
+# if defined(__arm64__) || defined(__aarch64__)
+# if DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 8
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "w"
+# define REGCL "w"
+# define BVCLB "r"
+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 16
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "x"
+# define REGCL "x"
+# define BVCLB "r"
+# elif DEVICE_MEMSETCPY_ALIGN_IN_BYTES == 32
+# if defined(__ARM_NEON_FP)
+# define DEVICE_MEMSETCPY_ARM64
+# define REGSZ "q"
+# define REGCL "v"
+# define BVCLB "w"
+# endif
+# endif
+# if defined(DEVICE_MEMSETCPY_ARM64)
+# if defined(DEVICE_MEMSETCPY_ARM64_NON_TEMPORAL)
+# define NSHLD() __asm__ ("dmb nshld")
+# define NSHST() __asm__ ("dmb nshst")
+# define LDP "ldnp"
+# define STP "stnp"
+# else
+# define NSHLD()
+# define NSHST()
+# define LDP "ldp"
+# define STP "stp"
+# endif
+ typedef unsigned int block_half_t
+ __attribute__((vector_size(DEVICE_MEMSETCPY_ALIGN_IN_BYTES / 2)));
+# endif
+# endif
+#endif
+
+__attribute__((visibility("hidden")))
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+ volatile const char *pcSrc = pvSrc;
+ volatile char *pcDst = pvDst;
+ size_t uPreambleBytes;
+ int bBlockCopy = 0;
+
+ size_t uSrcUnaligned = (size_t)pcSrc % sizeof(block_t);
+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+ if (!uSrcUnaligned && !uDstUnaligned)
+ {
+ /* Neither pointer is unaligned. Optimal case. */
+ bBlockCopy = 1;
+ }
+ else
+ {
+ if (uSrcUnaligned == uDstUnaligned)
+ {
+ /* Neither pointer is usefully aligned, but they are misaligned in
+ * the same way, so we can copy a preamble in a slow way, then
+ * optimize the rest.
+ */
+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+ uSize -= uPreambleBytes;
+ while (uPreambleBytes)
+ {
+ *pcDst++ = *pcSrc++;
+ uPreambleBytes--;
+ }
+
+ bBlockCopy = 1;
+ }
+ else if ((uSrcUnaligned | uDstUnaligned) % sizeof(int) == 0)
+ {
+ /* Both pointers are at least 32-bit aligned, and we assume that
+ * the processor must handle all kinds of 32-bit load-stores.
+ * NOTE: Could we optimize this with a non-temporal version?
+ */
+ if (uSize >= sizeof(int))
+ {
+ volatile int *piSrc = (int *)pcSrc;
+ volatile int *piDst = (int *)pcDst;
+
+ while (uSize >= sizeof(int))
+ {
+ *piDst++ = *piSrc++;
+ uSize -= sizeof(int);
+ }
+
+ pcSrc = (char *)piSrc;
+ pcDst = (char *)piDst;
+ }
+ }
+ }
+
+ if (bBlockCopy && uSize >= sizeof(block_t))
+ {
+ volatile block_t *pSrc = (block_t *)pcSrc;
+ volatile block_t *pDst = (block_t *)pcDst;
+
+ NSHLD();
+
+ while (uSize >= sizeof(block_t))
+ {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ __asm__ (LDP " " REGSZ "0, " REGSZ "1, [%[pSrc]]\n\t"
+ STP " " REGSZ "0, " REGSZ "1, [%[pDst]]"
+ :
+ : [pSrc] "r" (pSrc), [pDst] "r" (pDst)
+ : "memory", REGCL "0", REGCL "1");
+#else
+ *pDst = *pSrc;
+#endif
+ pDst++;
+ pSrc++;
+ uSize -= sizeof(block_t);
+ }
+
+ NSHST();
+
+ pcSrc = (char *)pSrc;
+ pcDst = (char *)pDst;
+ }
+
+ while (uSize)
+ {
+ *pcDst++ = *pcSrc++;
+ uSize--;
+ }
+}
+
+__attribute__((visibility("hidden")))
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+ volatile char *pcDst = pvDst;
+ size_t uPreambleBytes;
+
+ size_t uDstUnaligned = (size_t)pcDst % sizeof(block_t);
+
+ if (uDstUnaligned)
+ {
+ uPreambleBytes = MIN(sizeof(block_t) - uDstUnaligned, uSize);
+ uSize -= uPreambleBytes;
+ while (uPreambleBytes)
+ {
+ *pcDst++ = ui8Value;
+ uPreambleBytes--;
+ }
+ }
+
+ if (uSize >= sizeof(block_t))
+ {
+ volatile block_t *pDst = (block_t *)pcDst;
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ block_half_t bValue = {0};
+#else
+ block_t bValue= {0};
+# endif
+ size_t i;
+
+ for (i = 0; i < sizeof(bValue) / sizeof(unsigned int); i++)
+ bValue[i] = ui8Value << 24U |
+ ui8Value << 16U |
+ ui8Value << 8U |
+ ui8Value;
+
+ NSHLD();
+
+ while (uSize >= sizeof(block_t))
+ {
+#if defined(DEVICE_MEMSETCPY_ARM64)
+ __asm__ (STP " %" REGSZ "[bValue], %" REGSZ "[bValue], [%[pDst]]"
+ :
+ : [bValue] BVCLB (bValue), [pDst] "r" (pDst)
+ : "memory");
+#else
+ *pDst = bValue;
+#endif
+ pDst++;
+ uSize -= sizeof(block_t);
+ }
+
+ NSHST();
+
+ pcDst = (char *)pDst;
+ }
+
+ while (uSize)
+ {
+ *pcDst++ = ui8Value;
+ uSize--;
+ }
+}
+
+#else /* !defined(__GNUC__) */
+
+/* Potentially very slow (but safe) fallbacks for non-GNU C compilers */
+
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t uSize)
+{
+ volatile const char *pcSrc = pvSrc;
+ volatile char *pcDst = pvDst;
+
+ while (uSize)
+ {
+ *pcDst++ = *pcSrc++;
+ uSize--;
+ }
+}
+
+void DeviceMemSet(void *pvDst, unsigned char ui8Value, size_t uSize)
+{
+ volatile char *pcDst = pvDst;
+
+ while (uSize)
+ {
+ *pcDst++ = ui8Value;
+ uSize--;
+ }
+}
+
+#endif /* !defined(__GNUC__) */
+
+#endif /* defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/mmu_common.c b/drivers/gpu/drm/img-rogue/1.10/mmu_common.c
new file mode 100644
index 00000000000000..6504c858208550
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mmu_common.c
@@ -0,0 +1,4263 @@
+/*************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /***************************************************************************/
+
+#include "devicemem_server_utils.h"
+
+/* Our own interface */
+#include "mmu_common.h"
+
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmmudefs_km.h"
+/*
+Interfaces to other modules:
+
+Let's keep this graph up-to-date:
+
+ +-----------+
+ | devicemem |
+ +-----------+
+ |
+ +============+
+ | mmu_common |
+ +============+
+ |
+ +-----------------+
+ | |
+ +---------+ +----------+
+ | pmr | | device |
+ +---------+ +----------+
+ */
+
+#include "img_types.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#if defined(PDUMP)
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#endif
+#include "pmr.h"
+/* include/ */
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv.h"
+#include "htbuffer.h"
+
+#include "rgxdevice.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#endif
+
+#include "dllist.h"
+
+// #define MMU_OBJECT_REFCOUNT_DEBUGING 1
+#if defined (MMU_OBJECT_REFCOUNT_DEBUGING)
+#define MMU_OBJ_DBG(x) PVR_DPF(x);
+#else
+#define MMU_OBJ_DBG(x)
+#endif
+
+typedef IMG_UINT32 MMU_FLAGS_T;
+
+typedef enum _MMU_MOD_
+{
+ MMU_MOD_UNKNOWN = 0,
+ MMU_MOD_MAP,
+ MMU_MOD_UNMAP,
+} MMU_MOD;
+
+
+/*!
+ * Refcounted structure that is shared between the context and
+ * the cleanup thread items.
+ * It is used to keep track of all cleanup items and whether the creating
+ * MMU context has been destroyed and therefore is not allowed to be
+ * accessed anymore.
+ *
+ * The cleanup thread is used to defer the freeing of the page tables
+ * because we have to make sure that the MMU cache has been invalidated.
+ * If we don't take care of this the MMU might partially access cached
+ * and uncached tables which might lead to inconsistencies and in the
+ * worst case to MMU pending faults on random memory.
+ */
+typedef struct _MMU_CTX_CLEANUP_DATA_
+{
+ /*! Refcount to know when this structure can be destroyed */
+ ATOMIC_T iRef;
+ /*! Protect items in this structure, especially the refcount */
+ POS_LOCK hCleanupLock;
+ /*! List of all cleanup items currently in flight */
+ DLLIST_NODE sMMUCtxCleanupItemsHead;
+ /*! Was the MMU context destroyed and should not be accessed anymore? */
+ IMG_BOOL bMMUContextExists;
+} MMU_CTX_CLEANUP_DATA;
+
+
+/*!
+ * Structure holding one or more page tables that need to be
+ * freed after the MMU cache has been flushed which is signalled when
+ * the stored sync has a value that is <= the required value.
+ */
+typedef struct _MMU_CLEANUP_ITEM_
+{
+ /*! Cleanup thread data */
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupThreadFn;
+ /*! List to hold all the MMU_MEMORY_MAPPINGs, i.e. page tables */
+ DLLIST_NODE sMMUMappingHead;
+ /*! Node of the cleanup item list for the context */
+ DLLIST_NODE sMMUCtxCleanupItem;
+ /* Pointer to the cleanup meta data */
+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData;
+ /* Sync to query if the MMU cache was flushed */
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ /*! The update value of the sync to signal that the cache was flushed */
+ IMG_UINT16 uiRequiredSyncVal;
+ /*! The device node needed to free the page tables */
+ PVRSRV_DEVICE_NODE *psDevNode;
+} MMU_CLEANUP_ITEM;
+
+/*!
+ All physical allocations and frees are relative to this context, so
+ we would get all the allocations of PCs, PDs, and PTs from the same
+ RA.
+
+ We have one per MMU context in case we have mixed UMA/LMA devices
+ within the same system.
+ */
+typedef struct _MMU_PHYSMEM_CONTEXT_
+{
+ /*! Parent device node */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /*! Refcount so we know when to free up the arena */
+ IMG_UINT32 uiNumAllocations;
+
+ /*! Arena from which physical memory is derived */
+ RA_ARENA *psPhysMemRA;
+ /*! Arena name */
+ IMG_CHAR *pszPhysMemRAName;
+ /*! Size of arena name string */
+ size_t uiPhysMemRANameAllocSize;
+
+ /*! Meta data for deferred cleanup */
+ MMU_CTX_CLEANUP_DATA *psCleanupData;
+ /*! Temporary list of all deferred MMU_MEMORY_MAPPINGs. */
+ DLLIST_NODE sTmpMMUMappingHead;
+
+} MMU_PHYSMEM_CONTEXT;
+
+/*!
+ Mapping structure for MMU memory allocation
+ */
+typedef struct _MMU_MEMORY_MAPPING_
+{
+ /*! Physmem context to allocate from */
+ MMU_PHYSMEM_CONTEXT *psContext;
+ /*! OS/system Handle for this allocation */
+ PG_HANDLE sMemHandle;
+ /*! CPU virtual address of this allocation */
+ void *pvCpuVAddr;
+ /*! Device physical address of this allocation */
+ IMG_DEV_PHYADDR sDevPAddr;
+ /*! Size of this allocation */
+ size_t uiSize;
+ /*! Number of current mappings of this allocation */
+ IMG_UINT32 uiCpuVAddrRefCount;
+ /*! Node for the defer free list */
+ DLLIST_NODE sMMUMappingItem;
+} MMU_MEMORY_MAPPING;
+
+/*!
+ Memory descriptor for MMU objects. There can be more than one memory
+ descriptor per MMU memory allocation.
+ */
+typedef struct _MMU_MEMORY_DESC_
+{
+ /* NB: bValid is set if this descriptor describes physical
+ memory. This allows "empty" descriptors to exist, such that we
+ can allocate them in batches. */
+ /*! Does this MMU object have physical backing */
+ IMG_BOOL bValid;
+ /*! Device Physical address of physical backing */
+ IMG_DEV_PHYADDR sDevPAddr;
+ /*! CPU virtual address of physical backing */
+ void *pvCpuVAddr;
+ /*! Mapping data for this MMU object */
+ MMU_MEMORY_MAPPING *psMapping;
+ /*! Memdesc offset into the psMapping */
+ IMG_UINT32 uiOffset;
+ /*! Size of the Memdesc */
+ IMG_UINT32 uiSize;
+} MMU_MEMORY_DESC;
+
+/*!
+ MMU levelx structure. This is generic and is used
+ for all levels (PC, PD, PT).
+ */
+typedef struct _MMU_Levelx_INFO_
+{
+ /*! The Number of entries in this level */
+ IMG_UINT32 ui32NumOfEntries;
+
+ /*! Number of times this level has been reference. Note: For Level1 (PTE)
+ we still take/drop the reference when setting up the page tables rather
+ then at map/unmap time as this simplifies things */
+ IMG_UINT32 ui32RefCount;
+
+ /*! MemDesc for this level */
+ MMU_MEMORY_DESC sMemDesc;
+
+ /*! Array of infos for the next level. Must be last member in structure */
+ struct _MMU_Levelx_INFO_ *apsNextLevel[1];
+} MMU_Levelx_INFO;
+
+/*!
+ MMU context structure
+ */
+struct _MMU_CONTEXT_
+{
+ /*! Parent device node */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ MMU_DEVICEATTRIBS *psDevAttrs;
+
+ /*! For allocation and deallocation of the physical memory where
+ the pagetables live */
+ struct _MMU_PHYSMEM_CONTEXT_ *psPhysMemCtx;
+
+#if defined(PDUMP)
+ /*! PDump context ID (required for PDump commands with virtual addresses) */
+ IMG_UINT32 uiPDumpContextID;
+
+ /*! The refcount of the PDump context ID */
+ IMG_UINT32 ui32PDumpContextIDRefCount;
+#endif
+
+ /*! Data that is passed back during device specific callbacks */
+ IMG_HANDLE hDevData;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+#endif
+
+ /*! Lock to ensure exclusive access when manipulating the MMU context or
+ * reading and using its content
+ */
+ POS_LOCK hLock;
+
+ /*! Base level info structure. Must be last member in structure */
+ MMU_Levelx_INFO sBaseLevelInfo;
+ /* NO OTHER MEMBERS AFTER THIS STRUCTURE ! */
+};
+
+static const IMG_DEV_PHYADDR gsBadDevPhyAddr = {MMU_BAD_PHYS_ADDR};
+
+#if defined(DEBUG)
+#include "log2.h"
+#endif
+
+
+/*****************************************************************************
+ * Utility functions *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _FreeMMUMapping
+
+@Description Free a given dllist of MMU_MEMORY_MAPPINGs and the page tables
+ they represent.
+
+@Input psDevNode Device node
+
+@Input psTmpMMUMappingHead List of MMU_MEMORY_MAPPINGs to free
+ */
+/*****************************************************************************/
+static void
+_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+ PDLLIST_NODE psTmpMMUMappingHead)
+{
+ PDLLIST_NODE psNode, psNextNode;
+
+ /* Free the current list unconditionally */
+ dllist_foreach_node(psTmpMMUMappingHead,
+ psNode,
+ psNextNode)
+ {
+ MMU_MEMORY_MAPPING *psMapping = IMG_CONTAINER_OF(psNode,
+ MMU_MEMORY_MAPPING,
+ sMMUMappingItem);
+
+ psDevNode->pfnDevPxFree(psDevNode, &psMapping->sMemHandle);
+ dllist_remove_node(psNode);
+ OSFreeMem(psMapping);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function _CleanupThread_FreeMMUMapping
+
+@Description Function to be executed by the cleanup thread to free
+ MMU_MEMORY_MAPPINGs after the MMU cache has been invalidated.
+
+ This function will request a MMU cache invalidate once and
+ retry to free the MMU_MEMORY_MAPPINGs until the invalidate
+ has been executed.
+
+ If the memory context that created this cleanup item has been
+ destroyed in the meantime this function will directly free the
+ MMU_MEMORY_MAPPINGs without waiting for any MMU cache
+ invalidation.
+
+@Input pvData Cleanup data in form of a MMU_CLEANUP_ITEM
+
+@Return PVRSRV_OK if successful otherwise PVRSRV_ERROR_RETRY
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_CleanupThread_FreeMMUMapping(void* pvData)
+{
+ PVRSRV_ERROR eError;
+ MMU_CLEANUP_ITEM *psCleanup = (MMU_CLEANUP_ITEM *) pvData;
+ MMU_CTX_CLEANUP_DATA *psMMUCtxCleanupData = psCleanup->psMMUCtxCleanupData;
+ PVRSRV_DEVICE_NODE *psDevNode = psCleanup->psDevNode;
+ IMG_BOOL bFreeNow;
+ IMG_UINT32 uiSyncCurrent;
+ IMG_UINT32 uiSyncReq;
+
+ OSLockAcquire(psMMUCtxCleanupData->hCleanupLock);
+
+ /* Don't attempt to free anything when the context has been destroyed.
+ * Especially don't access any device specific structures anymore!*/
+ if (!psMMUCtxCleanupData->bMMUContextExists)
+ {
+ OSFreeMem(psCleanup);
+ eError = PVRSRV_OK;
+ goto e0;
+ }
+
+ if (psCleanup->psSync == NULL)
+ {
+ /* Kick to invalidate the MMU caches and get sync info */
+ psDevNode->pfnMMUCacheInvalidateKick(psDevNode,
+ &psCleanup->uiRequiredSyncVal,
+ IMG_TRUE);
+ psCleanup->psSync = psDevNode->psMMUCacheSyncPrim;
+ }
+
+ uiSyncCurrent = OSReadDeviceMem32(psCleanup->psSync->pui32LinAddr);
+ uiSyncReq = psCleanup->uiRequiredSyncVal;
+
+ /* Either the invalidate has been executed ... */
+ bFreeNow = (uiSyncCurrent >= uiSyncReq) ? IMG_TRUE :
+ /* ... with the counter wrapped around ... */
+ (uiSyncReq - uiSyncCurrent) > 0xEFFFFFFFUL ? IMG_TRUE :
+ /* ... or are we still waiting for the invalidate? */
+ IMG_FALSE;
+
+#if defined(NO_HARDWARE)
+ /* In NOHW the syncs will never be updated so just free the tables */
+ bFreeNow = IMG_TRUE;
+#endif
+
+ if (bFreeNow)
+ {
+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+ dllist_remove_node(&psCleanup->sMMUCtxCleanupItem);
+ OSFreeMem(psCleanup);
+
+ eError = PVRSRV_OK;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ }
+
+ e0:
+
+ /* If this cleanup task has been successfully executed we can
+ * decrease the context cleanup data refcount. Successfully
+ * means here that the MMU_MEMORY_MAPPINGs have been freed by
+ * either this cleanup task of when the MMU context has been
+ * destroyed. */
+ if (eError == PVRSRV_OK)
+ {
+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+
+ if (OSAtomicDecrement(&psMMUCtxCleanupData->iRef) == 0)
+ {
+ OSLockDestroy(psMMUCtxCleanupData->hCleanupLock);
+ OSFreeMem(psMMUCtxCleanupData);
+ }
+ }
+ else
+ {
+ OSLockRelease(psMMUCtxCleanupData->hCleanupLock);
+ }
+
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _SetupCleanup_FreeMMUMapping
+
+@Description Setup a cleanup item for the cleanup thread that will
+ kick off a MMU invalidate request and free the associated
+ MMU_MEMORY_MAPPINGs when the invalidate was successful.
+
+@Input psDevNode Device node
+
+@Input psPhysMemCtx The current MMU physmem context
+ */
+/*****************************************************************************/
+static void
+_SetupCleanup_FreeMMUMapping(PVRSRV_DEVICE_NODE *psDevNode,
+ MMU_PHYSMEM_CONTEXT *psPhysMemCtx)
+{
+
+ MMU_CLEANUP_ITEM *psCleanupItem;
+ MMU_CTX_CLEANUP_DATA *psCleanupData = psPhysMemCtx->psCleanupData;
+
+ if (dllist_is_empty(&psPhysMemCtx->sTmpMMUMappingHead))
+ {
+ goto e0;
+ }
+
+#if !defined(SUPPORT_MMU_PENDING_FAULT_PROTECTION)
+ /* If users deactivated this we immediately free the page tables */
+ goto e1;
+#endif
+
+ /* Don't defer the freeing if we are currently unloading the driver
+ * or if the sync has been destroyed */
+ if (PVRSRVGetPVRSRVData()->bUnload ||
+ psDevNode->psMMUCacheSyncPrim == NULL)
+ {
+ goto e1;
+ }
+
+ /* Allocate a cleanup item */
+ psCleanupItem = OSAllocMem(sizeof(*psCleanupItem));
+ if (!psCleanupItem)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get memory for deferred page table cleanup. "
+ "Freeing tables immediately",
+ __FUNCTION__));
+ goto e1;
+ }
+
+ /* Set sync to NULL to indicate we did not interact with
+ * the FW yet. Kicking off an MMU cache invalidate should
+ * be done in the cleanup thread to not waste time here. */
+ psCleanupItem->psSync = NULL;
+ psCleanupItem->uiRequiredSyncVal = 0;
+ psCleanupItem->psDevNode = psDevNode;
+ psCleanupItem->psMMUCtxCleanupData = psCleanupData;
+
+ OSAtomicIncrement(&psCleanupData->iRef);
+
+ /* Move the page tables to free to the cleanup item */
+ dllist_replace_head(&psPhysMemCtx->sTmpMMUMappingHead,
+ &psCleanupItem->sMMUMappingHead);
+
+ /* Add the cleanup item itself to the context list */
+ dllist_add_to_tail(&psCleanupData->sMMUCtxCleanupItemsHead,
+ &psCleanupItem->sMMUCtxCleanupItem);
+
+ /* Setup the cleanup thread data and add the work item */
+ psCleanupItem->sCleanupThreadFn.pfnFree = _CleanupThread_FreeMMUMapping;
+ psCleanupItem->sCleanupThreadFn.pvData = psCleanupItem;
+ psCleanupItem->sCleanupThreadFn.bDependsOnHW = IMG_TRUE;
+ CLEANUP_THREAD_SET_RETRY_TIMEOUT(&psCleanupItem->sCleanupThreadFn,
+ CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT);
+
+ PVRSRVCleanupThreadAddWork(&psCleanupItem->sCleanupThreadFn);
+
+ return;
+
+ e1:
+ /* Free the page tables now */
+ _FreeMMUMapping(psDevNode, &psPhysMemCtx->sTmpMMUMappingHead);
+ e0:
+ return;
+}
+
+/*************************************************************************/ /*!
+@Function _CalcPCEIdx
+
+@Description Calculate the page catalogue index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page catalogue index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPCEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPCIndexMask)
+ >> psDevVAddrConfig->uiPCIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function _CalcPCEIdx
+
+@Description Calculate the page directory index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page directory index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPDEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPDIndexMask)
+ >> psDevVAddrConfig->uiPDIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+
+/*************************************************************************/ /*!
+@Function _CalcPTEIdx
+
+@Description Calculate the page entry index
+
+@Input sDevVAddr Device virtual address
+
+@Input psDevVAddrConfig Configuration of the virtual address
+
+@Input bRoundUp Round up the index
+
+@Return The page entry index
+ */
+/*****************************************************************************/
+static IMG_UINT32 _CalcPTEIdx(IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ IMG_BOOL bRoundUp)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_UINT32 ui32RetVal;
+
+ sTmpDevVAddr = sDevVAddr;
+ sTmpDevVAddr.uiAddr -= psDevVAddrConfig->uiOffsetInBytes;
+ if (bRoundUp)
+ {
+ sTmpDevVAddr.uiAddr --;
+ }
+ ui32RetVal = (IMG_UINT32) ((sTmpDevVAddr.uiAddr & psDevVAddrConfig->uiPTIndexMask)
+ >> psDevVAddrConfig->uiPTIndexShift);
+
+ if (bRoundUp)
+ {
+ ui32RetVal ++;
+ }
+
+ return ui32RetVal;
+}
+
+/*****************************************************************************
+ * MMU memory allocation/management functions (mem desc) *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMem_RAImportAlloc
+
+@Description Imports MMU Px memory into the RA. This is where the
+ actual allocation of physical memory happens.
+
+@Input hArenaHandle Handle that was passed in during the
+ creation of the RA
+
+@Input uiSize Size of the memory to import
+
+@Input uiFlags Flags that where passed in the allocation.
+
+@Output puiBase The address of where to insert this import
+
+@Output puiActualSize The actual size of the import
+
+@Output phPriv Handle which will be passed back when
+ this import is freed
+
+@Return PVRSRV_OK if import alloc was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_PhysMem_RAImportAlloc(RA_PERARENA_HANDLE hArenaHandle,
+ RA_LENGTH_T uiSize,
+ RA_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phPriv)
+{
+ MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psCtx->psDevNode;
+ MMU_MEMORY_MAPPING *psMapping;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(pszAnnotation);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+ psMapping = OSAllocMem(sizeof(MMU_MEMORY_MAPPING));
+ if (psMapping == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ eError = psDevNode->pfnDevPxAlloc(psDevNode, TRUNCATE_64BITS_TO_SIZE_T(uiSize), &psMapping->sMemHandle,
+ &psMapping->sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ psMapping->psContext = psCtx;
+ psMapping->uiSize = TRUNCATE_64BITS_TO_SIZE_T(uiSize);
+
+ psMapping->uiCpuVAddrRefCount = 0;
+
+ *phPriv = (RA_PERISPAN_HANDLE) psMapping;
+
+ /* Note: This assumes this memory never gets paged out */
+ *puiBase = (RA_BASE_T)psMapping->sDevPAddr.uiAddr;
+ *puiActualSize = uiSize;
+
+ return PVRSRV_OK;
+
+ e1:
+ OSFreeMem(psMapping);
+ e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMem_RAImportFree
+
+@Description Imports MMU Px memory into the RA. This is where the
+ actual free of physical memory happens.
+
+@Input hArenaHandle Handle that was passed in during the
+ creation of the RA
+
+@Input puiBase The address of where to insert this import
+
+@Output phPriv Private data that the import alloc provided
+
+@Return None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMem_RAImportFree(RA_PERARENA_HANDLE hArenaHandle,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hPriv)
+{
+ MMU_MEMORY_MAPPING *psMapping = (MMU_MEMORY_MAPPING *) hPriv;
+ MMU_PHYSMEM_CONTEXT *psCtx = (MMU_PHYSMEM_CONTEXT *) hArenaHandle;
+
+ PVR_UNREFERENCED_PARAMETER(uiBase);
+
+ /* Check we have dropped all CPU mappings */
+ PVR_ASSERT(psMapping->uiCpuVAddrRefCount == 0);
+
+ /* Add mapping to defer free list */
+ psMapping->psContext = NULL;
+ dllist_add_to_tail(&psCtx->sTmpMMUMappingHead, &psMapping->sMMUMappingItem);
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMemAlloc
+
+@Description Allocates physical memory for MMU objects
+
+@Input psCtx Physmem context to do the allocation from
+
+@Output psMemDesc Allocation description
+
+@Input uiBytes Size of the allocation in bytes
+
+@Input uiAlignment Alignment requirement of this allocation
+
+@Return PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static PVRSRV_ERROR _MMU_PhysMemAlloc(MMU_PHYSMEM_CONTEXT *psCtx,
+ MMU_MEMORY_DESC *psMemDesc,
+ size_t uiBytes,
+ size_t uiAlignment)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiPhysAddr;
+
+ if (!psMemDesc || psMemDesc->bValid)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = RA_Alloc(psCtx->psPhysMemRA,
+ uiBytes,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* flags */
+ uiAlignment,
+ "",
+ &uiPhysAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psMemDesc->psMapping);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_PhysMemAlloc: ERROR call to RA_Alloc() failed"));
+ return eError;
+ }
+
+ psMemDesc->bValid = IMG_TRUE;
+ psMemDesc->pvCpuVAddr = NULL;
+ psMemDesc->sDevPAddr.uiAddr = (IMG_UINT64) uiPhysAddr;
+
+ if (psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+ {
+ eError = psCtx->psDevNode->pfnDevPxMap(psCtx->psDevNode,
+ &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->psMapping->uiSize,
+ &psMemDesc->psMapping->sDevPAddr,
+ &psMemDesc->psMapping->pvCpuVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ RA_Free(psCtx->psPhysMemRA, psMemDesc->sDevPAddr.uiAddr);
+ return eError;
+ }
+ }
+
+ psMemDesc->psMapping->uiCpuVAddrRefCount++;
+ psMemDesc->uiOffset = (psMemDesc->sDevPAddr.uiAddr - psMemDesc->psMapping->sDevPAddr.uiAddr);
+ psMemDesc->pvCpuVAddr = (IMG_UINT8 *) psMemDesc->psMapping->pvCpuVAddr + psMemDesc->uiOffset;
+ psMemDesc->uiSize = uiBytes;
+ PVR_ASSERT(psMemDesc->pvCpuVAddr != NULL);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PhysMemFree
+
+@Description Allocates physical memory for MMU objects
+
+@Input psCtx Physmem context to do the free on
+
+@Input psMemDesc Allocation description
+
+@Return None
+ */
+/*****************************************************************************/
+static void _MMU_PhysMemFree(MMU_PHYSMEM_CONTEXT *psCtx,
+ MMU_MEMORY_DESC *psMemDesc)
+{
+ RA_BASE_T uiPhysAddr;
+
+ PVR_ASSERT(psMemDesc->bValid);
+
+ if (--psMemDesc->psMapping->uiCpuVAddrRefCount == 0)
+ {
+ psCtx->psDevNode->pfnDevPxUnMap(psCtx->psDevNode, &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->psMapping->pvCpuVAddr);
+ }
+
+ psMemDesc->pvCpuVAddr = NULL;
+
+ uiPhysAddr = psMemDesc->sDevPAddr.uiAddr;
+ RA_Free(psCtx->psPhysMemRA, uiPhysAddr);
+
+ psMemDesc->bValid = IMG_FALSE;
+}
+
+
+/*****************************************************************************
+ * MMU object allocation/management functions *
+ *****************************************************************************/
+
+static INLINE PVRSRV_ERROR _MMU_ConvertDevMemFlags(IMG_BOOL bInvalidate,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ MMU_PROTFLAGS_T *uiMMUProtFlags,
+ MMU_CONTEXT *psMMUContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiGPUCacheMode;
+
+ /* Do flag conversion between devmem flags and MMU generic flags */
+ if (bInvalidate == IMG_FALSE)
+ {
+ *uiMMUProtFlags |= ( (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+ >> PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET)
+ << MMU_PROTFLAGS_DEVICE_OFFSET;
+
+ if (PVRSRV_CHECK_GPU_READABLE(uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_READABLE;
+ }
+ if (PVRSRV_CHECK_GPU_WRITEABLE(uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_WRITEABLE;
+ }
+
+ eError = DevmemDeviceCacheMode(psMMUContext->psDevNode,
+ uiMappingFlags,
+ &uiGPUCacheMode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ switch (uiGPUCacheMode)
+ {
+ case PVRSRV_MEMALLOCFLAG_GPU_UNCACHED:
+ case PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE:
+ break;
+ case PVRSRV_MEMALLOCFLAG_GPU_CACHED:
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_DerivePTProtFlags: Wrong parameters"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (DevmemDeviceCacheCoherency(psMMUContext->psDevNode, uiMappingFlags))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHE_COHERENT;
+ }
+
+#if defined(SUPPORT_RGX)
+ if( (psMMUContext->psDevNode->pfnCheckDeviceFeature) && \
+ PVRSRV_IS_FEATURE_SUPPORTED(psMMUContext->psDevNode, MIPS))
+ {
+ /*
+ If we are allocating on the MMU of the firmware processor, the cached/uncached attributes
+ must depend on the FIRMWARE_CACHED allocation flag.
+ */
+ if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+ {
+ if (uiMappingFlags & PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED))
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_CACHED;
+ }
+ else
+ {
+ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHED;
+
+ }
+ *uiMMUProtFlags &= ~MMU_PROTFLAGS_CACHE_COHERENT;
+ }
+ }
+#endif
+ }
+ else
+ {
+ *uiMMUProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _PxMemAlloc
+
+@Description Allocates physical memory for MMU objects, initialises
+ and PDumps it.
+
+@Input psMMUContext MMU context
+
+@Input uiNumEntries Number of entries to allocate
+
+@Input psConfig MMU Px config
+
+@Input eMMULevel MMU level that that allocation is for
+
+@Output psMemDesc Description of allocation
+
+@Return PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _PxMemAlloc(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiNumEntries,
+ const MMU_PxE_CONFIG *psConfig,
+ MMU_LEVEL eMMULevel,
+ MMU_MEMORY_DESC *psMemDesc,
+ IMG_UINT32 uiLog2Align)
+{
+ PVRSRV_ERROR eError;
+ size_t uiBytes;
+ size_t uiAlign;
+
+ PVR_ASSERT(psConfig->uiBytesPerEntry != 0);
+
+ uiBytes = uiNumEntries * psConfig->uiBytesPerEntry;
+ /* We need here the alignment of the previous level because that is the entry for we generate here */
+ uiAlign = 1 << uiLog2Align;
+
+ /*
+ * If the hardware specifies an alignment requirement for a page table then
+ * it also requires that all memory up to the next aligned address is
+ * zeroed.
+ *
+ * Failing to do this can result in uninitialised data outside of the actual
+ * page table range being read by the MMU and treated as valid, e.g. the
+ * pending flag.
+ *
+ * Typically this will affect 1MiB, 2MiB PT pages which have a size of 16
+ * and 8 bytes respectively but an alignment requirement of 64 bytes each.
+ */
+ uiBytes = PVR_ALIGN(uiBytes, uiAlign);
+
+ /* allocate the object */
+ eError = _MMU_PhysMemAlloc(psMMUContext->psPhysMemCtx,
+ psMemDesc, uiBytes, uiAlign);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_PxMemAlloc: failed to allocate memory for the MMU object"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /*
+ Clear the object
+ Note: if any MMUs are cleared with non-zero values then will need a
+ custom clear function
+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+ unlikely
+ */
+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, uiBytes);
+
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psMemDesc->psMapping->sMemHandle,
+ psMemDesc->uiOffset,
+ psMemDesc->uiSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Alloc MMU object");
+
+ PDumpMMUMalloc(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ eMMULevel,
+ &psMemDesc->sDevPAddr,
+ uiBytes,
+ uiAlign,
+ psMMUContext->psDevAttrs->eMMUType);
+
+ PDumpMMUDumpPxEntries(eMMULevel,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ 0,
+ uiNumEntries,
+ NULL, NULL, 0, /* pdump symbolic info is irrelevant here */
+ psConfig->uiBytesPerEntry,
+ uiLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+ return PVRSRV_OK;
+ e1:
+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx,
+ psMemDesc);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _PxMemFree
+
+@Description Frees physical memory for MMU objects, de-initialises
+ and PDumps it.
+
+@Input psMemDesc Description of allocation
+
+@Return PVRSRV_OK if allocation was successful
+ */
+/*****************************************************************************/
+
+static void _PxMemFree(MMU_CONTEXT *psMMUContext,
+ MMU_MEMORY_DESC *psMemDesc, MMU_LEVEL eMMULevel)
+{
+#if defined(MMU_CLEARMEM_ON_FREE)
+ PVRSRV_ERROR eError;
+
+ /*
+ Clear the MMU object
+ Note: if any MMUs are cleared with non-zero values then will need a
+ custom clear function
+ Note: 'Cached' is wrong for the LMA + ARM64 combination, but this is
+ unlikely
+ */
+ OSCachedMemSet(psMemDesc->pvCpuVAddr, 0, psMemDesc->ui32Bytes);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Clear MMU object before freeing it");
+#endif
+#endif/* MMU_CLEARMEM_ON_FREE */
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Free MMU object");
+ {
+ PDumpMMUFree(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ eMMULevel,
+ &psMemDesc->sDevPAddr,
+ psMMUContext->psDevAttrs->eMMUType);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(eMMULevel);
+#endif
+ /* free the PC */
+ _MMU_PhysMemFree(psMMUContext->psPhysMemCtx, psMemDesc);
+}
+
+static INLINE PVRSRV_ERROR _SetupPTE(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 uiIndex,
+ const MMU_PxE_CONFIG *psConfig,
+ const IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bUnmap,
+#if defined(PDUMP)
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+ IMG_UINT64 uiProtFlags)
+{
+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+ IMG_UINT64 ui64PxE64;
+ IMG_UINT64 uiAddr = psDevPAddr->uiAddr;
+
+ if(PVRSRV_IS_FEATURE_SUPPORTED(psMMUContext->psDevNode, MIPS))
+ {
+ /*
+ * If mapping for the MIPS FW context, check for sensitive PAs
+ */
+ if (psMMUContext->psDevAttrs == psMMUContext->psDevNode->psFirmwareMMUDevAttrs)
+ {
+ PVRSRV_RGXDEV_INFO *psDevice = (PVRSRV_RGXDEV_INFO *)psMMUContext->psDevNode->pvDevice;
+
+ if (RGXMIPSFW_SENSITIVE_ADDR(uiAddr))
+ {
+ uiAddr = psDevice->psTrampoline->sPhysAddr.uiAddr + RGXMIPSFW_TRAMPOLINE_OFFSET(uiAddr);
+ }
+ /* FIX_HW_BRN_63553 is mainlined for all MIPS cores */
+ else if (uiAddr == 0x0 && !psDevice->sLayerParams.bDevicePA0IsValid)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s attempt to map addr 0x0 in the FW but 0x0 is not considered valid.", __func__));
+ return PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE;
+ }
+ }
+ }
+
+ /* Calculate Entry */
+ ui64PxE64 = uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the useless bits, because the alignment is very coarse and we address by alignment */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused bits */
+ ui64PxE64 |= uiProtFlags;
+
+ /* Set the entry */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ pui64Px[uiIndex] = ui64PxE64;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ /* assert that the result fits into 32 bits before writing
+ it into the 32-bit array with a cast */
+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+ }
+ else
+ {
+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ }
+
+
+ /* Log modification */
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, MMU_LEVEL_1,
+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+ !bUnmap);
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ uiIndex,
+ 1,
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _SetupPxE
+
+@Description Sets up an entry of an MMU object to point to the
+ provided address
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info for MMU object
+
+@Input uiIndex Index into the MMU object to setup
+
+@Input psConfig MMU Px config
+
+@Input eMMULevel Level of MMU object
+
+@Input psDevPAddr Address to setup the MMU object to point to
+
+@Input pszMemspaceName Name of the PDump memory space that the entry
+ will point to
+
+@Input pszSymbolicAddr PDump symbolic address that the entry will
+ point to
+
+@Input uiProtFlags MMU protection flags
+
+@Return PVRSRV_OK if the setup was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _SetupPxE(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 uiIndex,
+ const MMU_PxE_CONFIG *psConfig,
+ MMU_LEVEL eMMULevel,
+ const IMG_DEV_PHYADDR *psDevPAddr,
+#if defined(PDUMP)
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset,
+#endif
+ MMU_FLAGS_T uiProtFlags,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = psMMUContext->psDevNode;
+ MMU_MEMORY_DESC *psMemDesc = &psLevel->sMemDesc;
+
+ IMG_UINT32 (*pfnDerivePxEProt4)(IMG_UINT32);
+ IMG_UINT64 (*pfnDerivePxEProt8)(IMG_UINT32, IMG_UINT32);
+
+ if (!psDevPAddr)
+ {
+ /* Invalidate entry */
+ if (~uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Error, no physical address specified, but not invalidating entry"));
+ uiProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+ psDevPAddr = &gsBadDevPhyAddr;
+ }
+ else
+ {
+ if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "A physical address was specified when requesting invalidation of entry"));
+ uiProtFlags |= MMU_PROTFLAGS_INVALID;
+ }
+ }
+
+ switch(eMMULevel)
+ {
+ case MMU_LEVEL_3:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePCEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePCEProt8;
+ break;
+
+ case MMU_LEVEL_2:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePDEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePDEProt8;
+ break;
+
+ case MMU_LEVEL_1:
+ pfnDerivePxEProt4 = psMMUContext->psDevAttrs->pfnDerivePTEProt4;
+ pfnDerivePxEProt8 = psMMUContext->psDevAttrs->pfnDerivePTEProt8;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid MMU level", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* How big is a PxE in bytes? */
+ /* Filling the actual Px entry with an address */
+ switch(psConfig->uiBytesPerEntry)
+ {
+ case 4:
+ {
+ IMG_UINT32 *pui32Px;
+ IMG_UINT64 ui64PxE64;
+
+ pui32Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ ui64PxE64 = psDevPAddr->uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused higher bits */
+
+ ui64PxE64 |= (IMG_UINT64)pfnDerivePxEProt4(uiProtFlags);
+ /* assert that the result fits into 32 bits before writing
+ it into the 32-bit array with a cast */
+ PVR_ASSERT(ui64PxE64 == (ui64PxE64 & 0xffffffffU));
+
+ /* We should never invalidate an invalid page */
+ if (uiProtFlags & MMU_PROTFLAGS_INVALID)
+ {
+ PVR_ASSERT(pui32Px[uiIndex] != ui64PxE64);
+ }
+ pui32Px[uiIndex] = (IMG_UINT32) ui64PxE64;
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, eMMULevel,
+ HTBLOG_U64_BITS_HIGH(ui64PxE64), HTBLOG_U64_BITS_LOW(ui64PxE64),
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+ break;
+ }
+ case 8:
+ {
+ IMG_UINT64 *pui64Px = psMemDesc->pvCpuVAddr; /* Give the virtual base address of Px */
+
+ pui64Px[uiIndex] = psDevPAddr->uiAddr /* Calculate the offset to that base */
+ >> psConfig->uiAddrLog2Align /* Shift away the unnecessary bits of the address */
+ << psConfig->uiAddrShift /* Shift back to fit address in the Px entry */
+ & psConfig->uiAddrMask; /* Delete unused higher bits */
+ pui64Px[uiIndex] |= pfnDerivePxEProt8(uiProtFlags, uiLog2DataPageSize);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiIndex, eMMULevel,
+ HTBLOG_U64_BITS_HIGH(pui64Px[uiIndex]), HTBLOG_U64_BITS_LOW(pui64Px[uiIndex]),
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)? 0: 1);
+ break;
+ }
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: PxE size not supported (%d) for level %d",
+ __func__, psConfig->uiBytesPerEntry, eMMULevel));
+
+ return PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ }
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(eMMULevel,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psMemDesc->pvCpuVAddr,
+ psMemDesc->sDevPAddr,
+ uiIndex,
+ 1,
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif
+
+ psDevNode->pfnMMUCacheInvalidate(psDevNode, psMMUContext->hDevData,
+ eMMULevel,
+ (uiProtFlags & MMU_PROTFLAGS_INVALID)?IMG_TRUE:IMG_FALSE);
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ * MMU host control functions (Level Info) *
+ *****************************************************************************/
+
+
+/*************************************************************************/ /*!
+@Function _MMU_FreeLevel
+
+@Description Recursively frees the specified range of Px entries. If any
+ level has its last reference dropped then the MMU object
+ memory and the MMU_Levelx_Info will be freed.
+
+ At each level we might be crossing a boundary from one Px to
+ another. The values for auiStartArray should be by used for
+ the first call into each level and the values in auiEndArray
+ should only be used in the last call for each level.
+ In order to determine if this is the first/last call we pass
+ in bFirst and bLast.
+ When one level calls down to the next only if bFirst/bLast is set
+ and it's the first/last iteration of the loop at its level will
+ bFirst/bLast set for the next recursion.
+ This means that each iteration has the knowledge of the previous
+ level which is required.
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info on which to free the
+ specified range
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input auiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input pui32CurrentLevel Pointer to a variable which is set to our
+ current level
+
+@Input uiStartIndex Start index of the range to free
+
+@Input uiEndIndex End index of the range to free
+
+@Input bFirst This is the first call for this level
+
+@Input bLast This is the last call for this level
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static IMG_BOOL _MMU_FreeLevel(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPxArray[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ IMG_UINT32 *pui32CurrentLevel,
+ IMG_UINT32 uiStartIndex,
+ IMG_UINT32 uiEndIndex,
+ IMG_BOOL bFirst,
+ IMG_BOOL bLast,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel;
+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel];
+ IMG_UINT32 i;
+ IMG_BOOL bFreed = IMG_FALSE;
+
+ /* Sanity check */
+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+ PVR_ASSERT(psLevel != NULL);
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel: level = %d, range %d - %d, refcount = %d",
+ aeMMULevel[uiThisLevel], uiStartIndex,
+ uiEndIndex, psLevel->ui32RefCount));
+
+ for (i = uiStartIndex;(i < uiEndIndex) && (psLevel != NULL);i++)
+ {
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ MMU_Levelx_INFO *psNextLevel = psLevel->apsNextLevel[i];
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ /* Recurse into the next level */
+ (*pui32CurrentLevel)++;
+ if (_MMU_FreeLevel(psMMUContext, psNextLevel, auiStartArray,
+ auiEndArray, auiEntriesPerPxArray,
+ apsConfig, aeMMULevel, pui32CurrentLevel,
+ uiNextStartIndex, uiNextEndIndex,
+ bNextFirst, bNextLast, uiLog2DataPageSize))
+ {
+ PVRSRV_ERROR eError;
+
+ /* Un-wire the entry */
+ eError = _SetupPxE(psMMUContext,
+ psLevel,
+ i,
+ psConfig,
+ aeMMULevel[uiThisLevel],
+ NULL,
+#if defined(PDUMP)
+ NULL, /* Only required for data page */
+ NULL, /* Only required for data page */
+ 0, /* Only required for data page */
+#endif
+ MMU_PROTFLAGS_INVALID,
+ uiLog2DataPageSize);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Free table of the level below, pointed to by this table entry.
+ * We don't destroy the table inside the above _MMU_FreeLevel call because we
+ * first have to set the table entry of the level above to invalid. */
+ _PxMemFree(psMMUContext, &psNextLevel->sMemDesc, aeMMULevel[*pui32CurrentLevel]);
+ OSFreeMem(psNextLevel);
+
+ /* The level below us is empty, drop the refcount and clear the pointer */
+ psLevel->ui32RefCount--;
+ psLevel->apsNextLevel[i] = NULL;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ (*pui32CurrentLevel)--;
+ }
+ else
+ {
+ psLevel->ui32RefCount--;
+ }
+
+ /*
+ Free this level if it is no longer referenced, unless it's the base
+ level in which case it's part of the MMU context and should be freed
+ when the MMU context is freed
+ */
+ if ((psLevel->ui32RefCount == 0) && (psLevel != &psMMUContext->sBaseLevelInfo))
+ {
+ bFreed = IMG_TRUE;
+ }
+ }
+
+ /* Level one flushing is done when we actually write the table entries */
+ if ((aeMMULevel[uiThisLevel] != MMU_LEVEL_1) && (psLevel != NULL))
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+ }
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_FreeLevel end: level = %d, refcount = %d",
+ aeMMULevel[uiThisLevel], bFreed?0: (psLevel)?psLevel->ui32RefCount:-1));
+
+ return bFreed;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_AllocLevel
+
+@Description Recursively allocates the specified range of Px entries. If any
+ level has its last reference dropped then the MMU object
+ memory and the MMU_Levelx_Info will be freed.
+
+ At each level we might be crossing a boundary from one Px to
+ another. The values for auiStartArray should be by used for
+ the first call into each level and the values in auiEndArray
+ should only be used in the last call for each level.
+ In order to determine if this is the first/last call we pass
+ in bFirst and bLast.
+ When one level calls down to the next only if bFirst/bLast is set
+ and it's the first/last iteration of the loop at its level will
+ bFirst/bLast set for the next recursion.
+ This means that each iteration has the knowledge of the previous
+ level which is required.
+
+@Input psMMUContext MMU context to operate on
+
+@Input psLevel Level info on which to to free the
+ specified range
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input auiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input pui32CurrentLevel Pointer to a variable which is set to our
+ current level
+
+@Input uiStartIndex Start index of the range to free
+
+@Input uiEndIndex End index of the range to free
+
+@Input bFirst This is the first call for this level
+
+@Input bLast This is the last call for this level
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR _MMU_AllocLevel(MMU_CONTEXT *psMMUContext,
+ MMU_Levelx_INFO *psLevel,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPxArray[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ IMG_UINT32 *pui32CurrentLevel,
+ IMG_UINT32 uiStartIndex,
+ IMG_UINT32 uiEndIndex,
+ IMG_BOOL bFirst,
+ IMG_BOOL bLast,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 uiThisLevel = *pui32CurrentLevel; /* Starting with 0 */
+ const MMU_PxE_CONFIG *psConfig = apsConfig[uiThisLevel]; /* The table config for the current level */
+ PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ IMG_UINT32 uiAllocState = 99; /* Debug info to check what progress was made in the function. Updated during this function. */
+ IMG_UINT32 i;
+
+ /* Sanity check */
+ PVR_ASSERT(*pui32CurrentLevel < MMU_MAX_LEVEL);
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel: level = %d, range %d - %d, refcount = %d",
+ aeMMULevel[uiThisLevel], uiStartIndex,
+ uiEndIndex, psLevel->ui32RefCount));
+
+ /* Go from uiStartIndex to uiEndIndex through the Px */
+ for (i = uiStartIndex;i < uiEndIndex;i++)
+ {
+ /* Only try an allocation if this is not the last level */
+ /*Because a PT allocation is already done while setting the entry in PD */
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ /* If there is already a next Px level existing, do not allocate it */
+ if (!psLevel->apsNextLevel[i])
+ {
+ MMU_Levelx_INFO *psNextLevel;
+ IMG_UINT32 ui32AllocSize;
+ IMG_UINT32 uiNextEntries;
+
+ /* Allocate and setup the next level */
+ uiNextEntries = auiEntriesPerPxArray[uiThisLevel + 1];
+ ui32AllocSize = sizeof(MMU_Levelx_INFO);
+ if (aeMMULevel[uiThisLevel + 1] != MMU_LEVEL_1)
+ {
+ ui32AllocSize += sizeof(MMU_Levelx_INFO *) * (uiNextEntries - 1);
+ }
+ psNextLevel = OSAllocZMem(ui32AllocSize);
+ if (psNextLevel == NULL)
+ {
+ uiAllocState = 0;
+ goto e0;
+ }
+
+ /* Hook in this level for next time */
+ psLevel->apsNextLevel[i] = psNextLevel;
+
+ psNextLevel->ui32NumOfEntries = uiNextEntries;
+ psNextLevel->ui32RefCount = 0;
+ /* Allocate Px memory for a sub level*/
+ eError = _PxMemAlloc(psMMUContext, uiNextEntries, apsConfig[uiThisLevel + 1],
+ aeMMULevel[uiThisLevel + 1],
+ &psNextLevel->sMemDesc,
+ psConfig->uiAddrLog2Align);
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 1;
+ goto e0;
+ }
+
+ /* Wire up the entry */
+ eError = _SetupPxE(psMMUContext,
+ psLevel,
+ i,
+ psConfig,
+ aeMMULevel[uiThisLevel],
+ &psNextLevel->sMemDesc.sDevPAddr,
+#if defined(PDUMP)
+ NULL, /* Only required for data page */
+ NULL, /* Only required for data page */
+ 0, /* Only required for data page */
+#endif
+ 0,
+ uiLog2DataPageSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 2;
+ goto e0;
+ }
+
+ psLevel->ui32RefCount++;
+ }
+
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ /* Recurse into the next level */
+ (*pui32CurrentLevel)++;
+ eError = _MMU_AllocLevel(psMMUContext, psLevel->apsNextLevel[i],
+ auiStartArray,
+ auiEndArray,
+ auiEntriesPerPxArray,
+ apsConfig,
+ aeMMULevel,
+ pui32CurrentLevel,
+ uiNextStartIndex,
+ uiNextEndIndex,
+ bNextFirst,
+ bNextLast,
+ uiLog2DataPageSize);
+ (*pui32CurrentLevel)--;
+ if (eError != PVRSRV_OK)
+ {
+ uiAllocState = 2;
+ goto e0;
+ }
+ }
+ else
+ {
+ /* All we need to do for level 1 is bump the refcount */
+ psLevel->ui32RefCount++;
+ }
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+
+ /* Level one flushing is done when we actually write the table entries */
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiStartIndex * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiEndIndex - uiStartIndex) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e0;
+ }
+
+ MMU_OBJ_DBG((PVR_DBG_ERROR, "_MMU_AllocLevel end: level = %d, refcount = %d",
+ aeMMULevel[uiThisLevel], psLevel->ui32RefCount));
+ return PVRSRV_OK;
+
+ e0:
+ /* Sanity check that we've not come down this route unexpectedly */
+ PVR_ASSERT(uiAllocState!=99);
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_AllocLevel: Error %d allocating Px for level %d in stage %d"
+ ,eError, aeMMULevel[uiThisLevel], uiAllocState));
+
+ /* the start value of index variable i is nor initialised on purpose
+ indeed this for loop deinitialise what has already been initialised
+ just before failing in reverse order. So the i index has already the
+ right value. */
+ for (/* i already set */ ; i>= uiStartIndex && i< uiEndIndex; i--)
+ {
+ switch(uiAllocState)
+ {
+ IMG_UINT32 uiNextStartIndex;
+ IMG_UINT32 uiNextEndIndex;
+ IMG_BOOL bNextFirst;
+ IMG_BOOL bNextLast;
+
+ case 3:
+ /* If we're crossing a Px then the start index changes */
+ if (bFirst && (i == uiStartIndex))
+ {
+ uiNextStartIndex = auiStartArray[uiThisLevel + 1];
+ bNextFirst = IMG_TRUE;
+ }
+ else
+ {
+ uiNextStartIndex = 0;
+ bNextFirst = IMG_FALSE;
+ }
+
+ /* If we're crossing a Px then the end index changes */
+ if (bLast && (i == (uiEndIndex - 1)))
+ {
+ uiNextEndIndex = auiEndArray[uiThisLevel + 1];
+ bNextLast = IMG_TRUE;
+ }
+ else
+ {
+ uiNextEndIndex = auiEntriesPerPxArray[uiThisLevel + 1];
+ bNextLast = IMG_FALSE;
+ }
+
+ if (aeMMULevel[uiThisLevel] != MMU_LEVEL_1)
+ {
+ (*pui32CurrentLevel)++;
+ if (_MMU_FreeLevel(psMMUContext, psLevel->apsNextLevel[i],
+ auiStartArray, auiEndArray,
+ auiEntriesPerPxArray, apsConfig,
+ aeMMULevel, pui32CurrentLevel,
+ uiNextStartIndex, uiNextEndIndex,
+ bNextFirst, bNextLast, uiLog2DataPageSize))
+ {
+ psLevel->ui32RefCount--;
+ psLevel->apsNextLevel[i] = NULL;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ (*pui32CurrentLevel)--;
+ }
+ else
+ {
+ /* We should never come down this path, but it's here
+ for completeness */
+ psLevel->ui32RefCount--;
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ }
+ /* fallthrough */
+ case 2:
+ if (psLevel->apsNextLevel[i] != NULL &&
+ psLevel->apsNextLevel[i]->ui32RefCount == 0)
+ {
+ _PxMemFree(psMMUContext, &psLevel->sMemDesc,
+ aeMMULevel[uiThisLevel]);
+ }
+ /* fallthrough */
+ case 1:
+ if (psLevel->apsNextLevel[i] != NULL &&
+ psLevel->apsNextLevel[i]->ui32RefCount == 0)
+ {
+ OSFreeMem(psLevel->apsNextLevel[i]);
+ psLevel->apsNextLevel[i] = NULL;
+ }
+ /* fallthrough */
+ case 0:
+ uiAllocState = 3;
+ break;
+ }
+ }
+ return eError;
+}
+
+/*****************************************************************************
+ * MMU page table functions *
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function _MMU_GetLevelData
+
+@Description Get the all the level data and calculates the indexes for the
+ specified address range
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Log2 of the page size to use
+
+@Input auiStartArray Array of start indexes (one for each level)
+
+@Input auiEndArray Array of end indexes (one for each level)
+
+@Input uiEntriesPerPxArray Array of number of entries for the Px
+ (one for each level)
+
+@Input apsConfig Array of PxE configs (one for each level)
+
+@Input aeMMULevel Array of MMU levels (one for each level)
+
+@Input ppsMMUDevVAddrConfig Device virtual address config
+
+@Input phPriv Private data of page size config
+
+@Return IMG_TRUE if the last reference to psLevel was dropped
+ */
+/*****************************************************************************/
+static void _MMU_GetLevelData(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize,
+ IMG_UINT32 auiStartArray[],
+ IMG_UINT32 auiEndArray[],
+ IMG_UINT32 auiEntriesPerPx[],
+ const MMU_PxE_CONFIG *apsConfig[],
+ MMU_LEVEL aeMMULevel[],
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ const MMU_PxE_CONFIG *psMMUPDEConfig;
+ const MMU_PxE_CONFIG *psMMUPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i = 0;
+
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ ppsMMUDevVAddrConfig,
+ phPriv);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDevVAddrConfig = *ppsMMUDevVAddrConfig;
+
+ if (psDevVAddrConfig->uiPCIndexMask != 0)
+ {
+ auiStartArray[i] = _CalcPCEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ auiEndArray[i] = _CalcPCEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPC;
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ aeMMULevel[i] = MMU_LEVEL_3;
+ i++;
+ }
+
+ if (psDevVAddrConfig->uiPDIndexMask != 0)
+ {
+ auiStartArray[i] = _CalcPDEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ auiEndArray[i] = _CalcPDEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPD;
+ if (i == 0)
+ {
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ }
+ else
+ {
+ apsConfig[i] = psMMUPDEConfig;
+ }
+ aeMMULevel[i] = MMU_LEVEL_2;
+ i++;
+ }
+
+ /*
+ There is always a PTE entry so we have a slightly different behaviour than above.
+ E.g. for 2 MB RGX pages the uiPTIndexMask is 0x0000000000 but still there
+ is a PT with one entry.
+
+ */
+ auiStartArray[i] = _CalcPTEIdx(sDevVAddrStart, psDevVAddrConfig, IMG_FALSE);
+ if (psDevVAddrConfig->uiPTIndexMask !=0)
+ {
+ auiEndArray[i] = _CalcPTEIdx(sDevVAddrEnd, psDevVAddrConfig, IMG_TRUE);
+ }
+ else
+ {
+ /*
+ If the PTE mask is zero it means there is only 1 PTE and thus, as an
+ an exclusive bound, the end array index is equal to the start index + 1.
+ */
+
+ auiEndArray[i] = auiStartArray[i] + 1;
+ }
+
+ auiEntriesPerPx[i] = psDevVAddrConfig->uiNumEntriesPT;
+
+ if (i == 0)
+ {
+ apsConfig[i] = psDevAttrs->psBaseConfig;
+ }
+ else
+ {
+ apsConfig[i] = psMMUPTEConfig;
+ }
+ aeMMULevel[i] = MMU_LEVEL_1;
+}
+
+static void _MMU_PutLevelData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hPriv)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+}
+
+/*************************************************************************/ /*!
+@Function _AllocPageTables
+
+@Description Allocate page tables and any higher level MMU objects required
+ for the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Page size of the data pages
+
+@Return PVRSRV_OK if the allocation was successful
+ */
+/*****************************************************************************/
+static PVRSRV_ERROR
+_AllocPageTables(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ IMG_UINT32 ui32CurrentLevel = 0;
+
+
+ PVR_DPF((PVR_DBG_ALLOC,
+ "_AllocPageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr
+ ));
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Allocating page tables for %"IMG_UINT64_FMTSPEC" bytes virtual range: "
+ IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+ (IMG_UINT64)sDevVAddrEnd.uiAddr - (IMG_UINT64)sDevVAddrStart.uiAddr,
+ (IMG_UINT64)sDevVAddrStart.uiAddr,
+ (IMG_UINT64)sDevVAddrEnd.uiAddr);
+#endif
+
+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+ (IMG_UINT32) uiLog2DataPageSize, auiStartArray, auiEndArray,
+ auiEntriesPerPx, apsConfig, aeMMULevel,
+ &psDevVAddrConfig, &hPriv);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_ALLOC,
+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+ eError = _MMU_AllocLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+ auiStartArray, auiEndArray, auiEntriesPerPx,
+ apsConfig, aeMMULevel, &ui32CurrentLevel,
+ auiStartArray[0], auiEndArray[0],
+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+ _MMU_PutLevelData(psMMUContext, hPriv);
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _FreePageTables
+
+@Description Free page tables and any higher level MMU objects at are no
+ longer referenced for the specified virtual range.
+ This will fill the temporary free list of the MMU context which
+ needs cleanup after the call.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrStart Start device virtual address
+
+@Input sDevVAddrEnd End device virtual address
+
+@Input uiLog2DataPageSize Page size of the data pages
+
+@Return None
+ */
+/*****************************************************************************/
+static void _FreePageTables(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrStart,
+ IMG_DEV_VIRTADDR sDevVAddrEnd,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT32 auiStartArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEndArray[MMU_MAX_LEVEL];
+ IMG_UINT32 auiEntriesPerPx[MMU_MAX_LEVEL];
+ MMU_LEVEL aeMMULevel[MMU_MAX_LEVEL];
+ const MMU_PxE_CONFIG *apsConfig[MMU_MAX_LEVEL];
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT32 ui32CurrentLevel = 0;
+ IMG_HANDLE hPriv;
+
+
+ PVR_DPF((PVR_DBG_ALLOC,
+ "_FreePageTables: vaddr range: "IMG_DEV_VIRTADDR_FMTSPEC":"IMG_DEV_VIRTADDR_FMTSPEC,
+ sDevVAddrStart.uiAddr,
+ sDevVAddrEnd.uiAddr
+ ));
+
+ _MMU_GetLevelData(psMMUContext, sDevVAddrStart, sDevVAddrEnd,
+ uiLog2DataPageSize, auiStartArray, auiEndArray,
+ auiEntriesPerPx, apsConfig, aeMMULevel,
+ &psDevVAddrConfig, &hPriv);
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_FREE,
+ HTBLOG_U64_BITS_HIGH(sDevVAddrStart.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrStart.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevVAddrEnd.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddrEnd.uiAddr));
+
+ _MMU_FreeLevel(psMMUContext, &psMMUContext->sBaseLevelInfo,
+ auiStartArray, auiEndArray, auiEntriesPerPx,
+ apsConfig, aeMMULevel, &ui32CurrentLevel,
+ auiStartArray[0], auiEndArray[0],
+ IMG_TRUE, IMG_TRUE, uiLog2DataPageSize);
+
+ _MMU_PutLevelData(psMMUContext, hPriv);
+}
+
+
+/*************************************************************************/ /*!
+@Function _MMU_GetPTInfo
+
+@Description Get the PT level information and PT entry index for the specified
+ virtual address
+
+@Input psMMUContext MMU context to operate on
+
+@Input psDevVAddr Device virtual address to get the PTE info
+ from.
+
+@Input psDevVAddrConfig The current virtual address config obtained
+ by another function call before.
+
+@Output psLevel Level info of the PT
+
+@Output pui32PTEIndex Index into the PT the address corresponds to
+
+@Return None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTInfo(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig,
+ MMU_Levelx_INFO **psLevel,
+ IMG_UINT32 *pui32PTEIndex)
+{
+ MMU_Levelx_INFO *psLocalLevel = NULL;
+ MMU_LEVEL eMMULevel = psMMUContext->psDevAttrs->eTopLevel;
+ IMG_UINT32 uiPCEIndex;
+ IMG_UINT32 uiPDEIndex;
+
+ if ((eMMULevel <= MMU_LEVEL_0) || (eMMULevel >= MMU_LEVEL_LAST))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTEInfo: Invalid MMU level"));
+ psLevel = NULL;
+ return;
+ }
+
+ for (; eMMULevel > MMU_LEVEL_0; eMMULevel--)
+ {
+ if (eMMULevel == MMU_LEVEL_3)
+ {
+ /* find the page directory containing the PCE */
+ uiPCEIndex = _CalcPCEIdx (sDevVAddr, psDevVAddrConfig,
+ IMG_FALSE);
+ psLocalLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiPCEIndex];
+ }
+
+ if (eMMULevel == MMU_LEVEL_2)
+ {
+ /* find the page table containing the PDE */
+ uiPDEIndex = _CalcPDEIdx (sDevVAddr, psDevVAddrConfig,
+ IMG_FALSE);
+ if (psLocalLevel != NULL)
+ {
+ psLocalLevel = psLocalLevel->apsNextLevel[uiPDEIndex];
+ }
+ else
+ {
+ psLocalLevel =
+ psMMUContext->sBaseLevelInfo.apsNextLevel[uiPDEIndex];
+ }
+ }
+
+ if (eMMULevel == MMU_LEVEL_1)
+ {
+ /* find PTE index into page table */
+ *pui32PTEIndex = _CalcPTEIdx (sDevVAddr, psDevVAddrConfig,
+ IMG_FALSE);
+ if (psLocalLevel == NULL)
+ {
+ psLocalLevel = &psMMUContext->sBaseLevelInfo;
+ }
+ }
+ }
+ *psLevel = psLocalLevel;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_GetPTConfig
+
+@Description Get the level config. Call _MMU_PutPTConfig after use!
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiLog2DataPageSize Log 2 of the page size
+
+@Output ppsConfig Config of the PTE
+
+@Output phPriv Private data handle to be passed back
+ when the info is put
+
+@Output ppsDevVAddrConfig Config of the device virtual addresses
+
+@Return None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_GetPTConfig(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsConfig,
+ IMG_HANDLE *phPriv,
+ const MMU_DEVVADDR_CONFIG **ppsDevVAddrConfig)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+
+ if (psDevAttrs->pfnGetPageSizeConfiguration(uiLog2DataPageSize,
+ &psPDEConfig,
+ &psPTEConfig,
+ &psDevVAddrConfig,
+ phPriv) != PVRSRV_OK)
+ {
+ /*
+ There should be no way we got here unless uiLog2DataPageSize
+ has changed after the MMU_Alloc call (in which case it's a bug in
+ the MM code)
+ */
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not get valid page size config"));
+ PVR_ASSERT(0);
+ }
+
+ *ppsConfig = psPTEConfig;
+ *ppsDevVAddrConfig = psDevVAddrConfig;
+}
+
+/*************************************************************************/ /*!
+@Function _MMU_PutPTConfig
+
+@Description Put the level info. Has to be called after _MMU_GetPTConfig to
+ ensure correct refcounting.
+
+@Input psMMUContext MMU context to operate on
+
+@Input phPriv Private data handle created by
+ _MMU_GetPTConfig.
+
+@Return None
+ */
+/*****************************************************************************/
+static INLINE void _MMU_PutPTConfig(MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE hPriv)
+{
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+
+ if (psDevAttrs->pfnPutPageSizeConfiguration(hPriv) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_MMU_GetPTConfig: Could not put page size config"));
+ PVR_ASSERT(0);
+ }
+
+}
+
+
+/*****************************************************************************
+ * Public interface functions *
+ *****************************************************************************/
+
+/*
+ MMU_ContextCreate
+ */
+PVRSRV_ERROR
+MMU_ContextCreate(PVRSRV_DEVICE_NODE *psDevNode,
+ MMU_CONTEXT **ppsMMUContext,
+ MMU_DEVICEATTRIBS *psDevAttrs)
+{
+ MMU_CONTEXT *psMMUContext;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psConfig;
+ MMU_PHYSMEM_CONTEXT *psCtx;
+ IMG_UINT32 ui32BaseObjects;
+ IMG_UINT32 ui32Size;
+ IMG_CHAR sBuf[40];
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ psConfig = psDevAttrs->psBaseConfig;
+ psDevVAddrConfig = psDevAttrs->psTopLevelDevVAddrConfig;
+
+ switch(psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPC;
+ break;
+
+ case MMU_LEVEL_2:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPD;
+ break;
+
+ case MMU_LEVEL_1:
+ ui32BaseObjects = psDevVAddrConfig->uiNumEntriesPT;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Invalid MMU config"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* Allocate the MMU context with the Level 1 Px info's */
+ ui32Size = sizeof(MMU_CONTEXT) +
+ ((ui32BaseObjects - 1) * sizeof(MMU_Levelx_INFO *));
+
+ psMMUContext = OSAllocZMem(ui32Size);
+ if (psMMUContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+#if defined(PDUMP)
+ /* Clear the refcount */
+ psMMUContext->ui32PDumpContextIDRefCount = 0;
+#endif
+ /* Record Device specific attributes in the context for subsequent use */
+ psMMUContext->psDevAttrs = psDevAttrs;
+ psMMUContext->psDevNode = psDevNode;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ {
+ IMG_UINT32 ui32OSid, ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+
+ RetrieveOSidsfromPidList(OSGetCurrentClientProcessIDKM(), &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ MMU_SetOSids(psMMUContext, ui32OSid, ui32OSidReg, bOSidAxiProt);
+ }
+#endif
+
+ /*
+ Allocate physmem context and set it up
+ */
+ psCtx = OSAllocZMem(sizeof(MMU_PHYSMEM_CONTEXT));
+ if (psCtx == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+ psMMUContext->psPhysMemCtx = psCtx;
+
+ psCtx->psDevNode = psDevNode;
+
+ OSSNPrintf(sBuf, sizeof(sBuf)-1, "pgtables %p", psCtx);
+ psCtx->uiPhysMemRANameAllocSize = OSStringLength(sBuf)+1;
+ psCtx->pszPhysMemRAName = OSAllocMem(psCtx->uiPhysMemRANameAllocSize);
+ if (psCtx->pszPhysMemRAName == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Out of memory"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+
+ OSStringCopy(psCtx->pszPhysMemRAName, sBuf);
+
+ psCtx->psPhysMemRA = RA_Create(psCtx->pszPhysMemRAName,
+ /* subsequent import */
+ psDevNode->uiMMUPxLog2AllocGran,
+ RA_LOCKCLASS_1,
+ _MMU_PhysMem_RAImportAlloc,
+ _MMU_PhysMem_RAImportFree,
+ psCtx, /* priv */
+ IMG_FALSE);
+ if (psCtx->psPhysMemRA == NULL)
+ {
+ OSFreeMem(psCtx->pszPhysMemRAName);
+ psCtx->pszPhysMemRAName = NULL;
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e3;
+ }
+
+ /* Setup cleanup meta data to check if a MMU context
+ * has been destroyed and should not be accessed anymore */
+ psCtx->psCleanupData = OSAllocMem(sizeof(*(psCtx->psCleanupData)));
+ if (psCtx->psCleanupData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: ERROR call to OSAllocMem failed"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e4;
+ }
+
+ OSLockCreate(&psCtx->psCleanupData->hCleanupLock, LOCK_TYPE_PASSIVE);
+ psCtx->psCleanupData->bMMUContextExists = IMG_TRUE;
+ dllist_init(&psCtx->psCleanupData->sMMUCtxCleanupItemsHead);
+ OSAtomicWrite(&psCtx->psCleanupData->iRef, 1);
+
+ /* allocate the base level object */
+ /*
+ Note: Although this is not required by the this file until
+ the 1st allocation is made, a device specific callback
+ might request the base object address so we allocate
+ it up front.
+ */
+ if (_PxMemAlloc(psMMUContext,
+ ui32BaseObjects,
+ psConfig,
+ psDevAttrs->eTopLevel,
+ &psMMUContext->sBaseLevelInfo.sMemDesc,
+ psDevAttrs->ui32BaseAlign))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to alloc level 1 object"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e5;
+ }
+
+ dllist_init(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+
+ psMMUContext->sBaseLevelInfo.ui32NumOfEntries = ui32BaseObjects;
+ psMMUContext->sBaseLevelInfo.ui32RefCount = 0;
+
+ eError = OSLockCreate(&psMMUContext->hLock, LOCK_TYPE_PASSIVE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_ContextCreate: Failed to create lock for MMU_CONTEXT"));
+ goto e6;
+ }
+
+ /* return context */
+ *ppsMMUContext = psMMUContext;
+
+ return PVRSRV_OK;
+
+ e6:
+ _PxMemFree(psMMUContext, &psMMUContext->sBaseLevelInfo.sMemDesc, psDevAttrs->eTopLevel);
+ e5:
+ OSFreeMem(psCtx->psCleanupData);
+ e4:
+ RA_Delete(psCtx->psPhysMemRA);
+ e3:
+ OSFreeMem(psCtx->pszPhysMemRAName);
+ e2:
+ OSFreeMem(psCtx);
+ e1:
+ OSFreeMem(psMMUContext);
+ e0:
+ return eError;
+}
+
+/*
+ MMU_ContextDestroy
+ */
+void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PDLLIST_NODE psNode, psNextNode;
+
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *) psMMUContext->psDevNode;
+ MMU_CTX_CLEANUP_DATA *psCleanupData = psMMUContext->psPhysMemCtx->psCleanupData;
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Enter"));
+
+ if (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+ {
+ /* There should be no way to get here with live pages unless
+ there is a bug in this module or the MM code */
+ PVR_ASSERT(psMMUContext->sBaseLevelInfo.ui32RefCount == 0);
+ }
+
+ /* Cleanup lock must be acquired before MMUContext lock. Reverse order
+ * may lead to a deadlock and is reported by lockdep. */
+ OSLockAcquire(psCleanupData->hCleanupLock);
+ OSLockAcquire(psMMUContext->hLock);
+
+ /* Free the top level MMU object - will be put on defer free list.
+ * This has to be done before the step below that will empty the
+ * defer-free list. */
+ _PxMemFree(psMMUContext,
+ &psMMUContext->sBaseLevelInfo.sMemDesc,
+ psMMUContext->psDevAttrs->eTopLevel);
+
+ /* Empty the temporary defer-free list of Px */
+ _FreeMMUMapping(psDevNode, &psMMUContext->psPhysMemCtx->sTmpMMUMappingHead);
+ PVR_ASSERT(dllist_is_empty(&psMMUContext->psPhysMemCtx->sTmpMMUMappingHead));
+
+ /* Empty the defer free list so the cleanup thread will
+ * not have to access any MMU context related structures anymore */
+ dllist_foreach_node(&psCleanupData->sMMUCtxCleanupItemsHead,
+ psNode,
+ psNextNode)
+ {
+ MMU_CLEANUP_ITEM *psCleanup = IMG_CONTAINER_OF(psNode,
+ MMU_CLEANUP_ITEM,
+ sMMUCtxCleanupItem);
+
+ _FreeMMUMapping(psDevNode, &psCleanup->sMMUMappingHead);
+
+ dllist_remove_node(psNode);
+ }
+ PVR_ASSERT(dllist_is_empty(&psCleanupData->sMMUCtxCleanupItemsHead));
+
+ psCleanupData->bMMUContextExists = IMG_FALSE;
+
+ OSLockRelease(psCleanupData->hCleanupLock);
+
+ if (OSAtomicDecrement(&psCleanupData->iRef) == 0)
+ {
+ OSLockDestroy(psCleanupData->hCleanupLock);
+ OSFreeMem(psCleanupData);
+ }
+
+ /* Free physmem context */
+ RA_Delete(psMMUContext->psPhysMemCtx->psPhysMemRA);
+ psMMUContext->psPhysMemCtx->psPhysMemRA = NULL;
+ OSFreeMem(psMMUContext->psPhysMemCtx->pszPhysMemRAName);
+ psMMUContext->psPhysMemCtx->pszPhysMemRAName = NULL;
+
+ OSFreeMem(psMMUContext->psPhysMemCtx);
+
+ OSLockRelease(psMMUContext->hLock);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ RemovePidOSidCoupling(OSGetCurrentClientProcessIDKM());
+#endif
+
+ OSLockDestroy(psMMUContext->hLock);
+
+ /* free the context itself. */
+ OSFreeMem(psMMUContext);
+ /*not nulling pointer, copy on stack*/
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_ContextDestroy: Exit"));
+}
+
+/*
+ MMU_Alloc
+ */
+PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+ IMG_DEVMEM_SIZE_T uSize,
+ IMG_DEVMEM_SIZE_T *puActualSize,
+ IMG_UINT32 uiProtFlags,
+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 uiLog2PageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+ MMU_DEVICEATTRIBS *psDevAttrs;
+ IMG_HANDLE hPriv;
+
+#if !defined (DEBUG)
+ PVR_UNREFERENCED_PARAMETER(uDevVAddrAlignment);
+#endif
+ PVR_DPF ((PVR_DBG_MESSAGE, "MMU_Alloc: uSize=" IMG_DEVMEM_SIZE_FMTSPEC
+ ", uiProtFlags=0x%x, align="IMG_DEVMEM_ALIGN_FMTSPEC, uSize, uiProtFlags, uDevVAddrAlignment));
+
+ /* check params */
+ if (!psMMUContext || !psDevVAddr || !puActualSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid params"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevAttrs = psMMUContext->psDevAttrs;
+
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(uiLog2PageSize,
+ &psPDEConfig,
+ &psPTEConfig,
+ &psDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: Failed to get config info (%d)", eError));
+ return eError;
+ }
+
+ /* size and alignment must be datapage granular */
+ if (((psDevVAddr->uiAddr & psDevVAddrConfig->uiPageOffsetMask) != 0)
+ || ((uSize & psDevVAddrConfig->uiPageOffsetMask) != 0))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: invalid address or size granularity"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sDevVAddrEnd = *psDevVAddr;
+ sDevVAddrEnd.uiAddr += uSize;
+
+ OSLockAcquire(psMMUContext->hLock);
+ eError = _AllocPageTables(psMMUContext, *psDevVAddr, sDevVAddrEnd, uiLog2PageSize);
+ OSLockRelease(psMMUContext->hLock);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"MMU_Alloc: _DeferredAllocPagetables failed"));
+ return PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES;
+ }
+
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_Free
+ */
+void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_DEV_VIRTADDR sDevVAddrEnd;
+
+ if (psMMUContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "MMU_Free: invalid parameter"));
+ return;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "MMU_Free: Freeing DevVAddr " IMG_DEV_VIRTADDR_FMTSPEC,
+ sDevVAddr.uiAddr));
+
+ /* ensure the address range to free is inside the heap */
+ sDevVAddrEnd = sDevVAddr;
+ sDevVAddrEnd.uiAddr += uiSize;
+
+ /* The Cleanup lock has to be taken before the MMUContext hLock to
+ * prevent deadlock scenarios. It is necessary only for parts of
+ * _SetupCleanup_FreeMMUMapping though.*/
+ OSLockAcquire(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _FreePageTables(psMMUContext,
+ sDevVAddr,
+ sDevVAddrEnd,
+ uiLog2DataPageSize);
+
+ _SetupCleanup_FreeMMUMapping(psMMUContext->psDevNode,
+ psMMUContext->psPhysMemCtx);
+
+ OSLockRelease(psMMUContext->hLock);
+
+ OSLockRelease(psMMUContext->psPhysMemCtx->psCleanupData->hCleanupLock);
+
+ return;
+
+}
+
+PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ PMR *psPMR,
+ IMG_UINT32 ui32PhysPgOffset,
+ IMG_UINT32 ui32MapPageCount,
+ IMG_UINT32 *paui32MapIndices,
+ IMG_UINT32 uiLog2HeapPageSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hPriv;
+
+ MMU_Levelx_INFO *psLevel = NULL;
+
+ MMU_Levelx_INFO *psPrevLevel = NULL;
+
+ IMG_UINT32 uiPTEIndex = 0;
+ IMG_UINT32 uiPageSize = (1 << uiLog2HeapPageSize);
+ IMG_UINT32 uiLoop = 0;
+ IMG_UINT32 ui32MappedCount = 0;
+ IMG_UINT32 uiPgOffset = 0;
+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_BOOL *pbValid;
+ IMG_BOOL bValid;
+ IMG_BOOL bDummyBacking = IMG_FALSE;
+ IMG_BOOL bNeedBacking = IMG_FALSE;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+
+ PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)",
+ (IMG_UINT64)(ui32MapPageCount * uiPageSize));
+#endif /*PDUMP*/
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+ /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+ * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+ if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+#endif
+
+ /* Validate the most essential parameters */
+ if ((NULL == psMMUContext) || (0 == sDevVAddrBase.uiAddr) || (NULL == psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Invalid mapping parameter issued", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* Allocate memory for page-frame-numbers and validity states,
+ N.B. assert could be triggered by an illegal uiSizeBytes */
+ if (ui32MapPageCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(ui32MapPageCount * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pbValid = OSAllocMem(ui32MapPageCount * sizeof(IMG_BOOL));
+ if (pbValid == NULL)
+ {
+ /* Should allocation fail, clean-up here before exit */
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psDevPAddr);
+ goto e0;
+ }
+ }
+ else
+ {
+ psDevPAddr = asDevPAddr;
+ pbValid = abValid;
+ }
+
+ /* Get the Device physical addresses of the pages we are trying to map
+ * In the case of non indexed mapping we can get all addresses at once */
+ if (NULL == paui32MapIndices)
+ {
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2HeapPageSize,
+ ui32MapPageCount,
+ (ui32PhysPgOffset << uiLog2HeapPageSize),
+ psDevPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+
+ /*Get the Page table level configuration */
+ _MMU_GetPTConfig(psMMUContext,
+ (IMG_UINT32) uiLog2HeapPageSize,
+ &psConfig,
+ &hPriv,
+ &psDevVAddrConfig);
+
+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Callback to get device specific protection flags */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e2;
+ }
+
+ if (PMR_IsSparse(psPMR))
+ {
+ /* We know there will not be 4G number of PMR's */
+ bDummyBacking = PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags);
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ for (uiLoop = 0; uiLoop < ui32MapPageCount; uiLoop++)
+ {
+
+#if defined(PDUMP)
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif /*PDUMP*/
+
+ if (NULL != paui32MapIndices)
+ {
+ uiPgOffset = paui32MapIndices[uiLoop];
+
+ /*Calculate the Device Virtual Address of the page */
+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr + (uiPgOffset * uiPageSize);
+ /* Get the physical address to map */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2HeapPageSize,
+ 1,
+ uiPgOffset * uiPageSize,
+ &sDevPAddr,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+ }
+ else
+ {
+ uiPgOffset = uiLoop + ui32PhysPgOffset;
+ sDevPAddr = psDevPAddr[uiLoop];
+ bValid = pbValid[uiLoop];
+ }
+
+ /*
+ The default value of the entry is invalid so we don't need to mark
+ it as such if the page wasn't valid, we just advance pass that address
+ */
+ if (bValid || bDummyBacking)
+ {
+
+ if (!bValid)
+ {
+ sDevPAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+ }
+ else
+ {
+ /* check the physical alignment of the memory to map */
+ PVR_ASSERT((sDevPAddr.uiAddr & (uiPageSize-1)) == 0);
+ }
+
+#if defined(DEBUG)
+ {
+ IMG_INT32 i32FeatureVal = 0;
+ IMG_UINT32 ui32BitLength = FloorLog2(sDevPAddr.uiAddr);
+
+ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psMMUContext->psDevNode, PHYS_BUS_WIDTH);
+ do {
+ /* i32FeatureVal can be negative for cases where this feature is undefined
+ * In that situation we need to bail out than go ahead with debug comparison */
+ if(0 > i32FeatureVal)
+ break;
+
+ if (ui32BitLength > i32FeatureVal )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+ "is greater than what the chip can handle (%d).",
+ ui32BitLength, i32FeatureVal));
+
+ PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e3;
+ }
+ }while(0);
+ }
+#endif /*DEBUG*/
+
+#if defined(PDUMP)
+ if (bValid)
+ {
+ eError = PMR_PDumpSymbolicAddr(psPMR, uiPgOffset * uiPageSize,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+#endif /*PDUMP*/
+
+ psPrevLevel = psLevel;
+ /* Calculate PT index and get new table descriptor */
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+
+ if (psPrevLevel == psLevel)
+ {
+ uiFlushEnd = uiPTEIndex;
+ }
+ else
+ {
+ /* Flush if we moved to another psLevel, i.e. page table */
+ if (psPrevLevel != NULL)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e3;
+ }
+
+ uiFlushStart = uiPTEIndex;
+ uiFlushEnd = uiFlushStart;
+ }
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_MAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+ HTBLOG_U64_BITS_HIGH(sDevPAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevPAddr.uiAddr));
+
+ eError = _SetupPTE(psMMUContext,
+ psLevel,
+ uiPTEIndex,
+ psConfig,
+ &sDevPAddr,
+ IMG_FALSE,
+#if defined(PDUMP)
+ (bValid)?aszMemspaceName:(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName),
+ (bValid)?aszSymbolicAddress:DUMMY_PAGE,
+ (bValid)?uiSymbolicAddrOffset:0,
+#endif /*PDUMP*/
+ uiProtFlags);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mapping failed", __func__));
+ goto e3;
+ }
+
+ if (bValid)
+ {
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "%s: devVAddr=" IMG_DEV_VIRTADDR_FMTSPEC ", size=0x%x",
+ __func__,
+ sDevVAddr.uiAddr,
+ uiPgOffset * uiPageSize));
+
+ ui32MappedCount++;
+ }
+ }
+
+ sDevVAddr.uiAddr += uiPageSize;
+ }
+
+ /* Flush the last level we touched */
+ if (psLevel != NULL)
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e3;
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_FALSE);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, ui32MapPageCount);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+
+ e3:
+ OSLockRelease(psMMUContext->hLock);
+
+ if (PMR_IsSparse(psPMR) && PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiMappingFlags))
+ {
+ bNeedBacking = IMG_TRUE;
+ }
+
+ MMU_UnmapPages(psMMUContext,(bNeedBacking)?uiMappingFlags:0, sDevVAddrBase, uiLoop, paui32MapIndices, uiLog2HeapPageSize, bNeedBacking);
+ e2:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+ e1:
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+ e0:
+ return eError;
+}
+
+/*
+ MMU_UnmapPages
+ */
+void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bDummyBacking)
+{
+ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ IMG_UINT32 uiFlushEnd = 0, uiFlushStart = 0;
+ MMU_Levelx_INFO *psLevel = NULL;
+ MMU_Levelx_INFO *psPrevLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_DEV_PHYADDR sDummyPgDevPhysAddr;
+ IMG_BOOL bUnmap = IMG_TRUE;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+ ui32PageCount,
+ (IMG_UINT64)sDevVAddr.uiAddr,
+ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+ sDummyPgDevPhysAddr.uiAddr = psMMUContext->psDevNode->sDummyPage.ui64DummyPgPhysAddr;
+ bUnmap = (bDummyBacking)?IMG_FALSE:IMG_TRUE;
+ /* Get PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ if (_MMU_ConvertDevMemFlags(bUnmap,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext) != PVRSRV_OK)
+ {
+ return;
+ }
+
+ /* Callback to get device specific protection flags */
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+ }
+
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ /* Unmap page by page */
+ while (ui32Loop < ui32PageCount)
+ {
+ if (NULL != pai32FreeIndices)
+ {
+ /*Calculate the Device Virtual Address of the page */
+ sDevVAddr.uiAddr = sDevVAddrBase.uiAddr +
+ pai32FreeIndices[ui32Loop] * uiPageSize;
+ }
+
+ psPrevLevel = psLevel;
+ /* Calculate PT index and get new table descriptor */
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+
+ if (psPrevLevel == psLevel)
+ {
+ uiFlushEnd = uiPTEIndex;
+ }
+ else
+ {
+ /* Flush if we moved to another psLevel, i.e. page table */
+ if (psPrevLevel != NULL)
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psPrevLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psPrevLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ }
+
+ uiFlushStart = uiPTEIndex;
+ uiFlushEnd = uiFlushStart;
+ }
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+ if (_SetupPTE(psMMUContext,
+ psLevel,
+ uiPTEIndex,
+ psConfig,
+ (bDummyBacking)?&sDummyPgDevPhysAddr:&gsBadDevPhyAddr,
+ bUnmap,
+#if defined(PDUMP)
+ (bDummyBacking)?(psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName):NULL,
+ (bDummyBacking)?DUMMY_PAGE:NULL,
+ 0U,
+#endif
+ uiProtFlags) != PVRSRV_OK )
+ {
+ goto e0;
+ }
+
+ /* Check we haven't wrapped around */
+ PVR_ASSERT(psLevel->ui32RefCount <= psLevel->ui32NumOfEntries);
+ ui32Loop++;
+ sDevVAddr.uiAddr += uiPageSize;
+ }
+
+ /* Flush the last level we touched */
+ if (psLevel != NULL)
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiFlushEnd+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_TRUE);
+
+ return;
+
+ e0:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+ PVR_ASSERT(0);
+ OSLockRelease(psMMUContext->hLock);
+ return;
+}
+
+PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_UINT32 uiLog2HeapPageSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiCount, i;
+ IMG_UINT32 uiPageSize = 1 << uiLog2HeapPageSize;
+ IMG_UINT32 uiPTEIndex = 0;
+ IMG_UINT64 uiProtFlags;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr;
+ IMG_BOOL *pbValid;
+ IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+ IMG_UINT32 ui32MappedCount = 0;
+ PDUMPCOMMENT("Wire up Page Table entries to point to the Data Pages (%"IMG_INT64_FMTSPECd" bytes)", uiSizeBytes);
+#endif /*PDUMP*/
+
+ /* We should verify the size and contiguity when supporting variable page size */
+
+ PVR_ASSERT (psMMUContext != NULL);
+ PVR_ASSERT (psPMR != NULL);
+
+#if defined(TC_MEMORY_CONFIG) || defined(PLATO_MEMORY_CONFIG)
+ /* We're aware that on TC based platforms, accesses from GPU to CPU_LOCAL
+ * allocated DevMem fail, so we forbid mapping such a PMR into device mmu */
+ if (PMR_Flags(psPMR) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping a CPU_LOCAL PMR to device is forbidden on this platform", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+#endif
+
+ /* Allocate memory for page-frame-numbers and validity states,
+ N.B. assert could be triggered by an illegal uiSizeBytes */
+ uiCount = uiSizeBytes >> uiLog2HeapPageSize;
+ PVR_ASSERT((IMG_DEVMEM_OFFSET_T)uiCount << uiLog2HeapPageSize == uiSizeBytes);
+ if (uiCount > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(uiCount * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pbValid = OSAllocMem(uiCount * sizeof(IMG_BOOL));
+ if (pbValid == NULL)
+ {
+ /* Should allocation fail, clean-up here before exit */
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR device PFN state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psDevPAddr);
+ goto e0;
+ }
+ }
+ else
+ {
+ psDevPAddr = asDevPAddr;
+ pbValid = abValid;
+ }
+
+ /* Get general PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2HeapPageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ eError = _MMU_ConvertDevMemFlags(IMG_FALSE,
+ uiMappingFlags,
+ &uiMMUProtFlags,
+ psMMUContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ /* Callback to get device specific protection flags */
+
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2HeapPageSize);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ goto e1;
+ }
+
+
+ /* "uiSize" is the amount of contiguity in the underlying
+ page. Normally this would be constant for the system, but,
+ that constant needs to be communicated, in case it's ever
+ different; caller guarantees that PMRLockSysPhysAddr() has
+ already been called */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2HeapPageSize,
+ uiCount,
+ 0,
+ psDevPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+
+ /* Map in all pages of that PMR page by page*/
+ for (i=0, uiCount=0; uiCount < uiSizeBytes; i++)
+ {
+#if defined(DEBUG)
+ {
+ IMG_INT32 i32FeatureVal = 0;
+ IMG_UINT32 ui32BitLength = FloorLog2(psDevPAddr[i].uiAddr);
+ i32FeatureVal = PVRSRV_GET_DEVICE_FEATURE_VALUE(psMMUContext->psDevNode, PHYS_BUS_WIDTH);
+ do {
+ if(0 > i32FeatureVal)
+ break;
+
+ if (ui32BitLength > i32FeatureVal )
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_MMU_MapPage Failed. The physical address bitlength (%d) "
+ "is greater than what the chip can handle (%d).",
+ ui32BitLength, i32FeatureVal));
+
+ PVR_ASSERT(ui32BitLength <= i32FeatureVal );
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ OSLockRelease(psMMUContext->hLock);
+ goto e1;
+ }
+ }while(0);
+ }
+#endif /*DEBUG*/
+#if defined(PDUMP)
+ {
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR, uiCount,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ui32MappedCount++;
+ }
+#endif /*PDUMP*/
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_PMRMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr),
+ HTBLOG_U64_BITS_HIGH(psDevPAddr[i].uiAddr), HTBLOG_U64_BITS_LOW(psDevPAddr[i].uiAddr));
+
+ /* Set the PT entry with the specified address and protection flags */
+ eError = _SetupPTE(psMMUContext, psLevel, uiPTEIndex,
+ psConfig, &psDevPAddr[i], IMG_FALSE,
+#if defined(PDUMP)
+ aszMemspaceName,
+ aszSymbolicAddress,
+ uiSymbolicAddrOffset,
+#endif /*PDUMP*/
+ uiProtFlags);
+ if (eError != PVRSRV_OK)
+ goto e2;
+
+ sDevVAddr.uiAddr += uiPageSize;
+ uiCount += uiPageSize;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (uiCount != uiSizeBytes))
+ {
+ uiPTEIndex++;
+ }
+ else
+ {
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e2;
+
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+ }
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_FALSE);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wired up %d Page Table entries (out of %d)", ui32MappedCount, i);
+#endif /*PDUMP*/
+
+ return PVRSRV_OK;
+
+ e2:
+ OSLockRelease(psMMUContext->hLock);
+ MMU_UnmapPMRFast(psMMUContext,
+ sDevVAddrBase,
+ uiSizeBytes >> uiLog2HeapPageSize,
+ uiLog2HeapPageSize);
+ e1:
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(pbValid);
+ OSFreeMem(psDevPAddr);
+ }
+ e0:
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+/*
+ MMU_UnmapPages
+ */
+void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 uiLog2PageSize)
+{
+ IMG_UINT32 uiPTEIndex = 0, ui32Loop=0;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_HANDLE hPriv;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_DEV_VIRTADDR sDevVAddr = sDevVAddrBase;
+ IMG_UINT64 uiProtFlags = 0;
+ MMU_PROTFLAGS_T uiMMUProtFlags = 0;
+ IMG_UINT64 uiEntry = 0;
+ IMG_UINT32 uiFlushStart = 0;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Invalidate %d entries in page tables for virtual range: 0x%010"IMG_UINT64_FMTSPECX" to 0x%010"IMG_UINT64_FMTSPECX,
+ ui32PageCount,
+ (IMG_UINT64)sDevVAddr.uiAddr,
+ ((IMG_UINT64)sDevVAddr.uiAddr) + (uiPageSize*ui32PageCount)-1);
+#endif
+
+ /* Get PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ if (_MMU_ConvertDevMemFlags(IMG_TRUE,
+ 0,
+ &uiMMUProtFlags,
+ psMMUContext) != PVRSRV_OK)
+ {
+ return;
+ }
+
+ /* Callback to get device specific protection flags */
+
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt8(uiMMUProtFlags , uiLog2PageSize);
+
+ /* Fill the entry with a bad address but leave space for protection flags */
+ uiEntry = (gsBadDevPhyAddr.uiAddr & ~psConfig->uiProtMask) | uiProtFlags;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ uiProtFlags = psMMUContext->psDevAttrs->pfnDerivePTEProt4(uiMMUProtFlags);
+
+ /* Fill the entry with a bad address but leave space for protection flags */
+ uiEntry = (((IMG_UINT32) gsBadDevPhyAddr.uiAddr) & ~psConfig->uiProtMask) | (IMG_UINT32) uiProtFlags;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ goto e0;
+ }
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+
+ /* Unmap page by page and keep the loop as quick as possible.
+ * Only use parts of _SetupPTE that need to be executed. */
+ while (ui32Loop < ui32PageCount)
+ {
+
+ /* Set the PT entry to invalid and poison it with a bad address */
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = uiEntry;
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32*) psLevel->sMemDesc.pvCpuVAddr)[uiPTEIndex] = (IMG_UINT32) uiEntry;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: The page table entry byte length is not supported", __func__));
+ goto e1;
+ }
+
+ /* Log modifications */
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_UNMAP,
+ HTBLOG_U64_BITS_HIGH(sDevVAddr.uiAddr), HTBLOG_U64_BITS_LOW(sDevVAddr.uiAddr));
+
+ HTBLOGK(HTB_SF_MMU_PAGE_OP_TABLE,
+ HTBLOG_PTR_BITS_HIGH(psLevel), HTBLOG_PTR_BITS_LOW(psLevel),
+ uiPTEIndex, MMU_LEVEL_1,
+ HTBLOG_U64_BITS_HIGH(uiEntry), HTBLOG_U64_BITS_LOW(uiEntry),
+ IMG_FALSE);
+
+#if defined (PDUMP)
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psLevel->sMemDesc.pvCpuVAddr,
+ psLevel->sMemDesc.sDevPAddr,
+ uiPTEIndex,
+ 1,
+ NULL,
+ NULL,
+ 0,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ sDevVAddr.uiAddr += uiPageSize;
+ ui32Loop++;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTEIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (ui32Loop != ui32PageCount))
+ {
+ uiPTEIndex++;
+ }
+ else
+ {
+ psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTEIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTEIndex);
+ uiFlushStart = uiPTEIndex;
+ }
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ IMG_TRUE);
+
+ return;
+
+ e1:
+ OSLockRelease(psMMUContext->hLock);
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+ e0:
+ PVR_DPF((PVR_DBG_ERROR, "MMU_UnmapPages: Failed to map/unmap page table"));
+ PVR_ASSERT(0);
+ return;
+}
+
+/*
+ MMU_ChangeValidity
+ */
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiNumPages,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bMakeValid,
+ PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ IMG_HANDLE hPriv;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ const MMU_PxE_CONFIG *psConfig;
+ MMU_Levelx_INFO *psLevel = NULL;
+ IMG_UINT32 uiFlushStart = 0;
+ IMG_UINT32 uiPTIndex = 0;
+ IMG_UINT32 i;
+ IMG_UINT32 uiPageSize = 1 << uiLog2PageSize;
+ IMG_BOOL bValid;
+
+#if defined(PDUMP)
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiSymbolicAddrOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMPCOMMENT("Change valid bit of the data pages to %d (0x%"IMG_UINT64_FMTSPECX" - 0x%"IMG_UINT64_FMTSPECX")",
+ bMakeValid,
+ sDevVAddr.uiAddr,
+ sDevVAddr.uiAddr + (uiNumPages<<uiLog2PageSize) - 1 );
+#endif /*PDUMP*/
+
+ /* We should verify the size and contiguity when supporting variable page size */
+ PVR_ASSERT (psMMUContext != NULL);
+ PVR_ASSERT (psPMR != NULL);
+
+ /* Get general PT and address configs */
+ _MMU_GetPTConfig(psMMUContext, (IMG_UINT32) uiLog2PageSize,
+ &psConfig, &hPriv, &psDevVAddrConfig);
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTIndex);
+ uiFlushStart = uiPTIndex;
+
+ /* Do a page table walk and change attribute for every page in range. */
+ for (i=0; i < uiNumPages; )
+ {
+
+ /* Set the entry */
+ if (bMakeValid == IMG_TRUE)
+ {
+ /* Only set valid if physical address exists (sparse allocs might have none)*/
+ eError = PMR_IsOffsetValid(psPMR, uiLog2PageSize, 1, i<<uiLog2PageSize, &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot determine validity of page table entries page"));
+ goto e_exit;
+ }
+
+ if (bValid)
+ {
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] |= (psConfig->uiValidEnMask);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+ goto e_exit;
+ }
+ }
+ }
+ else
+ {
+ if (psConfig->uiBytesPerEntry == 8)
+ {
+ ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+ }
+ else if (psConfig->uiBytesPerEntry == 4)
+ {
+ ((IMG_UINT32 *) psLevel->sMemDesc.pvCpuVAddr)[uiPTIndex] &= ~(psConfig->uiValidEnMask);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_MMU_CONFIG_IS_WRONG;
+ PVR_DPF((PVR_DBG_ERROR, "Cannot change page table entries due to wrong configuration"));
+ goto e_exit;
+ }
+ }
+
+#if defined(PDUMP)
+ PMR_PDumpSymbolicAddr(psPMR, i<<uiLog2PageSize,
+ sizeof(aszMemspaceName), &aszMemspaceName[0],
+ sizeof(aszSymbolicAddress), &aszSymbolicAddress[0],
+ &uiSymbolicAddrOffset,
+ &uiNextSymName);
+
+ PDumpMMUDumpPxEntries(MMU_LEVEL_1,
+ psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psLevel->sMemDesc.pvCpuVAddr,
+ psLevel->sMemDesc.sDevPAddr,
+ uiPTIndex,
+ 1,
+ aszMemspaceName,
+ aszSymbolicAddress,
+ uiSymbolicAddrOffset,
+ psConfig->uiBytesPerEntry,
+ psConfig->uiAddrLog2Align,
+ psConfig->uiAddrShift,
+ psConfig->uiAddrMask,
+ psConfig->uiProtMask,
+ psConfig->uiValidEnMask,
+ 0,
+ psMMUContext->psDevAttrs->eMMUType);
+#endif /*PDUMP*/
+
+ sDevVAddr.uiAddr += uiPageSize;
+ i++;
+
+ /* Calculate PT index and get new table descriptor */
+ if (uiPTIndex < (psDevVAddrConfig->uiNumEntriesPT - 1) && (i != uiNumPages))
+ {
+ uiPTIndex++;
+ }
+ else
+ {
+
+ eError = psMMUContext->psDevNode->pfnDevPxClean(psMMUContext->psDevNode,
+ &psLevel->sMemDesc.psMapping->sMemHandle,
+ uiFlushStart * psConfig->uiBytesPerEntry + psLevel->sMemDesc.uiOffset,
+ (uiPTIndex+1 - uiFlushStart) * psConfig->uiBytesPerEntry);
+ if (eError != PVRSRV_OK)
+ goto e_exit;
+
+ _MMU_GetPTInfo(psMMUContext, sDevVAddr, psDevVAddrConfig,
+ &psLevel, &uiPTIndex);
+ uiFlushStart = uiPTIndex;
+ }
+ }
+
+ e_exit:
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ /* Flush TLB for PTs*/
+ psMMUContext->psDevNode->pfnMMUCacheInvalidate(psMMUContext->psDevNode,
+ psMMUContext->hDevData,
+ MMU_LEVEL_1,
+ !bMakeValid);
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ MMU_AcquireBaseAddr
+ */
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr)
+{
+ if (!psMMUContext)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ *psPhysAddr = psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr;
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_ReleaseBaseAddr
+ */
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext)
+{
+ PVR_UNREFERENCED_PARAMETER(psMMUContext);
+}
+
+/*
+ MMU_SetDeviceData
+ */
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData)
+{
+ psMMUContext->hDevData = hDevData;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ MMU_SetOSid, MMU_GetOSid
+ */
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+ psMMUContext->ui32OSid = ui32OSid;
+ psMMUContext->ui32OSidReg = ui32OSidReg;
+ psMMUContext->bOSidAxiProt = bOSidAxiProt;
+
+ return;
+}
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+ *pui32OSid = psMMUContext->ui32OSid;
+ *pui32OSidReg = psMMUContext->ui32OSidReg;
+ *pbOSidAxiProt = psMMUContext->bOSidAxiProt;
+
+ return;
+}
+
+#endif
+
+/*
+ MMU_CheckFaultAddress
+ */
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ MMU_FAULT_DATA *psOutFaultData)
+{
+ /* Ideally the RGX defs should be via callbacks, but the function is only called from RGX. */
+#define MMU_VALID_STR(entry,level) \
+ (apszMMUValidStr[((((entry)&(RGX_MMUCTRL_##level##_DATA_ENTRY_PENDING_EN))!=0) << 1)| \
+ ((((entry)&(RGX_MMUCTRL_##level##_DATA_VALID_EN))!=0) << 0)])
+ static const IMG_PCHAR apszMMUValidStr[1<<2] = {/*--*/ "not valid",
+ /*-V*/ "valid",
+ /*P-*/ "pending",
+ /*PV*/ "inconsistent (pending and valid)"};
+ MMU_DEVICEATTRIBS *psDevAttrs = psMMUContext->psDevAttrs;
+ MMU_LEVEL eMMULevel = psDevAttrs->eTopLevel;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_PxE_CONFIG *psMMUPDEConfig;
+ const MMU_PxE_CONFIG *psMMUPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psMMUDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ MMU_Levelx_INFO *psLevel = NULL;
+ PVRSRV_ERROR eError;
+ IMG_UINT64 uiIndex;
+ IMG_UINT32 ui32PCIndex;
+ IMG_UINT32 ui32PDIndex;
+ IMG_UINT32 ui32PTIndex;
+ IMG_UINT32 ui32Log2PageSize;
+ MMU_LEVEL_DATA *psMMULevelData;
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ /*
+ At this point we don't know the page size so assume it's 4K.
+ When we get the PD level (MMU_LEVEL_2) we can check to see
+ if this assumption is correct.
+ */
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(12,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ &psMMUDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size info for log2 page sizeof 12"));
+ }
+
+ psLevel = &psMMUContext->sBaseLevelInfo;
+ psConfig = psDevAttrs->psBaseConfig;
+
+ PVR_ASSERT(psOutFaultData);
+ psOutFaultData->eTopLevel = psDevAttrs->eTopLevel;
+ psOutFaultData->eType = MMU_FAULT_TYPE_NON_PM;
+
+
+ for( ; eMMULevel > MMU_LEVEL_0 ; eMMULevel--)
+ {
+ if( eMMULevel == MMU_LEVEL_3)
+ {
+ /* Determine the PC index */
+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexMask;
+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPCIndexShift;
+ ui32PCIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PCIndex));
+
+ psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_3];
+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+ psMMULevelData->ui32Index = ui32PCIndex;
+
+ if (ui32PCIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PC index (%d) out of bounds (%d)", ui32PCIndex, psLevel->ui32NumOfEntries);
+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%08x and is %s",
+ ui32PCIndex,
+ pui32Ptr[ui32PCIndex],
+ MMU_VALID_STR(pui32Ptr[ui32PCIndex], PC));
+
+ psMMULevelData->ui64Address = pui32Ptr[ui32PCIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PCIndex], PC);
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PCE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+ ui32PCIndex,
+ pui64Ptr[ui32PCIndex],
+ MMU_VALID_STR(pui64Ptr[ui32PCIndex], PC));
+
+ psMMULevelData->ui64Address = pui64Ptr[ui32PCIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PCIndex], PC);
+ }
+
+ psLevel = psLevel->apsNextLevel[ui32PCIndex];
+ if (!psLevel)
+ {
+ break;
+ }
+ psConfig = psMMUPDEConfig;
+ }
+
+
+ if( eMMULevel == MMU_LEVEL_2)
+ {
+ /* Determine the PD index */
+ uiIndex = psDevVAddr->uiAddr & psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexMask;
+ uiIndex = uiIndex >> psDevAttrs->psTopLevelDevVAddrConfig->uiPDIndexShift;
+ ui32PDIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PDIndex));
+
+ psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_2];
+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+ psMMULevelData->ui32Index = ui32PDIndex;
+
+ if (ui32PDIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PD index (%d) out of bounds (%d)", ui32PDIndex, psLevel->ui32NumOfEntries);
+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%08x and is %s",
+ ui32PDIndex,
+ pui32Ptr[ui32PDIndex],
+ MMU_VALID_STR(pui32Ptr[ui32PDIndex], PD));
+
+ psMMULevelData->ui64Address = pui32Ptr[ui32PDIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PDIndex], PD);
+
+ if (psDevAttrs->pfnGetPageSizeFromPDE4(pui32Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size from the PDE"));
+ }
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PDE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+ ui32PDIndex,
+ pui64Ptr[ui32PDIndex],
+ MMU_VALID_STR(pui64Ptr[ui32PDIndex], PD));
+
+ psMMULevelData->ui64Address = pui64Ptr[ui32PDIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PDIndex], PD);
+
+ if (psDevAttrs->pfnGetPageSizeFromPDE8(pui64Ptr[ui32PDIndex], &ui32Log2PageSize) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size from the PDE"));
+ }
+ }
+
+ /*
+ We assumed the page size was 4K, now we have the actual size
+ from the PDE we can confirm if our assumption was correct.
+ Until now it hasn't mattered as the PC and PD are the same
+ regardless of the page size
+ */
+ if (ui32Log2PageSize != 12)
+ {
+ /* Put the 4K page size data */
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+
+ /* Get the correct size data */
+ eError = psDevAttrs->pfnGetPageSizeConfiguration(ui32Log2PageSize,
+ &psMMUPDEConfig,
+ &psMMUPTEConfig,
+ &psMMUDevVAddrConfig,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get the page size info for log2 page sizeof %d", ui32Log2PageSize));
+ break;
+ }
+ }
+ psLevel = psLevel->apsNextLevel[ui32PDIndex];
+ if (!psLevel)
+ {
+ break;
+ }
+ psConfig = psMMUPTEConfig;
+ }
+
+
+ if( eMMULevel == MMU_LEVEL_1)
+ {
+ /* Determine the PT index */
+ uiIndex = psDevVAddr->uiAddr & psMMUDevVAddrConfig->uiPTIndexMask;
+ uiIndex = uiIndex >> psMMUDevVAddrConfig->uiPTIndexShift;
+ ui32PTIndex = (IMG_UINT32) uiIndex;
+ PVR_ASSERT(uiIndex == ((IMG_UINT64) ui32PTIndex));
+
+ psMMULevelData = &psOutFaultData->sLevelData[MMU_LEVEL_1];
+ psMMULevelData->uiBytesPerEntry = psConfig->uiBytesPerEntry;
+ psMMULevelData->ui32Index = ui32PTIndex;
+
+ if (ui32PTIndex >= psLevel->ui32NumOfEntries)
+ {
+ PVR_DUMPDEBUG_LOG("PT index (%d) out of bounds (%d)", ui32PTIndex, psLevel->ui32NumOfEntries);
+ psMMULevelData->ui32NumOfEntries = psLevel->ui32NumOfEntries;
+ break;
+ }
+
+ if (psConfig->uiBytesPerEntry == 4)
+ {
+ IMG_UINT32 *pui32Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%08x and is %s",
+ ui32PTIndex,
+ pui32Ptr[ui32PTIndex],
+ MMU_VALID_STR(pui32Ptr[ui32PTIndex], PT));
+
+ psMMULevelData->ui64Address = pui32Ptr[ui32PTIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui32Ptr[ui32PTIndex], PT);
+ }
+ else
+ {
+ IMG_UINT64 *pui64Ptr = psLevel->sMemDesc.pvCpuVAddr;
+
+ PVR_DUMPDEBUG_LOG("PTE for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+ ui32PTIndex,
+ pui64Ptr[ui32PTIndex],
+ MMU_VALID_STR(pui64Ptr[ui32PTIndex], PT));
+
+ psMMULevelData->ui64Address = pui64Ptr[ui32PTIndex];
+ psMMULevelData->psDebugStr = MMU_VALID_STR(pui64Ptr[ui32PTIndex], PT);
+ }
+ goto e1;
+ }
+
+ PVR_LOG(("Unsupported MMU setup"));
+ }
+
+ e1:
+ /* Put the page size data back */
+ psDevAttrs->pfnPutPageSizeConfiguration(hPriv);
+ OSLockRelease(psMMUContext->hLock);
+}
+
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_DEV_VIRTADDR sDevVAddr)
+{
+ MMU_Levelx_INFO *psLevel = NULL;
+ const MMU_PxE_CONFIG *psConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_HANDLE hPriv;
+ IMG_UINT32 uiIndex = 0;
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ _MMU_GetPTConfig(psMMUContext, uiLog2PageSize, &psConfig, &hPriv, &psDevVAddrConfig);
+
+ OSLockAcquire(psMMUContext->hLock);
+
+ switch(psMMUContext->psDevAttrs->eTopLevel)
+ {
+ case MMU_LEVEL_3:
+ uiIndex = _CalcPCEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+ if (psLevel == NULL)
+ break;
+ /* fall through */
+ case MMU_LEVEL_2:
+ uiIndex = _CalcPDEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+ if (psLevel != NULL)
+ psLevel = psLevel->apsNextLevel[uiIndex];
+ else
+ psLevel = psMMUContext->sBaseLevelInfo.apsNextLevel[uiIndex];
+
+ if (psLevel == NULL)
+ break;
+ /* fall through */
+ case MMU_LEVEL_1:
+ uiIndex = _CalcPTEIdx(sDevVAddr, psDevVAddrConfig, IMG_FALSE);
+
+ if (psLevel == NULL)
+ psLevel = &psMMUContext->sBaseLevelInfo;
+
+ bStatus = ((IMG_UINT64 *) psLevel->sMemDesc.pvCpuVAddr)[uiIndex]
+ & psConfig->uiValidEnMask;
+ break;
+ default:
+ PVR_LOG(("MMU_IsVDevAddrValid: Unsupported MMU setup"));
+ break;
+ }
+
+ OSLockRelease(psMMUContext->hLock);
+
+ _MMU_PutPTConfig(psMMUContext, hPriv);
+
+ return bStatus;
+}
+
+#if defined(PDUMP)
+/*
+ MMU_ContextDerivePCPDumpSymAddr
+ */
+PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+ IMG_CHAR *pszPDumpSymbolicNameBuffer,
+ size_t uiPDumpSymbolicNameBufferSize)
+{
+ size_t uiCount;
+ IMG_UINT64 ui64PhysAddr;
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ if (!psMMUContext->sBaseLevelInfo.sMemDesc.bValid)
+ {
+ /* We don't have any allocations. You're not allowed to ask
+ for the page catalogue base address until you've made at
+ least one allocation */
+ return PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR;
+ }
+
+ ui64PhysAddr = (IMG_UINT64)psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr.uiAddr;
+
+ PVR_ASSERT(uiPDumpSymbolicNameBufferSize >= (IMG_UINT32)(21 + OSStringLength(psDevId->pszPDumpDevName)));
+
+ /* Page table Symbolic Name is formed from page table phys addr
+ prefixed with MMUPT_. */
+
+ uiCount = OSSNPrintf(pszPDumpSymbolicNameBuffer,
+ uiPDumpSymbolicNameBufferSize,
+ ":%s:%s%016"IMG_UINT64_FMTSPECX,
+ psDevId->pszPDumpDevName,
+ psMMUContext->sBaseLevelInfo.sMemDesc.bValid?"MMUPC_":"XXX",
+ ui64PhysAddr);
+
+ if (uiCount + 1 > uiPDumpSymbolicNameBufferSize)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_PDumpWritePageCatBase
+ */
+PVRSRV_ERROR
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszPageCatBaseSymbolicAddr[100];
+ const IMG_CHAR *pszPDumpDevName = psMMUContext->psDevAttrs->pszMMUPxPDumpMemSpaceName;
+
+ eError = MMU_ContextDerivePCPDumpSymAddr(psMMUContext,
+ &aszPageCatBaseSymbolicAddr[0],
+ sizeof(aszPageCatBaseSymbolicAddr));
+ if (eError == PVRSRV_OK)
+ {
+ eError = PDumpWriteSymbAddress(pszSpaceName,
+ uiOffset,
+ aszPageCatBaseSymbolicAddr,
+ 0, /* offset -- Could be non-zero for var. pgsz */
+ pszPDumpDevName,
+ ui32WordSize,
+ ui32AlignShift,
+ ui32Shift,
+ uiPdumpFlags | PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ return eError;
+}
+
+/*
+ MMU_AcquirePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 *pui32PDumpMMUContextID)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ if (!psMMUContext->ui32PDumpContextIDRefCount)
+ {
+ PDUMP_MMU_ALLOC_MMUCONTEXT(psDevId->pszPDumpDevName,
+ psMMUContext->sBaseLevelInfo.sMemDesc.sDevPAddr,
+ psMMUContext->psDevAttrs->eMMUType,
+ &psMMUContext->uiPDumpContextID);
+ }
+
+ psMMUContext->ui32PDumpContextIDRefCount++;
+ *pui32PDumpMMUContextID = psMMUContext->uiPDumpContextID;
+
+ return PVRSRV_OK;
+}
+
+/*
+ MMU_ReleasePDumpMMUContext
+ */
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psMMUContext->psDevNode->sDevId;
+
+ PVR_ASSERT(psMMUContext->ui32PDumpContextIDRefCount != 0);
+ psMMUContext->ui32PDumpContextIDRefCount--;
+
+ if (psMMUContext->ui32PDumpContextIDRefCount == 0)
+ {
+ PDUMP_MMU_FREE_MMUCONTEXT(psDevId->pszPDumpDevName,
+ psMMUContext->uiPDumpContextID);
+ }
+
+ return PVRSRV_OK;
+}
+#endif
+
+/******************************************************************************
+ End of file (mmu_common.c)
+ ******************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/mmu_common.h b/drivers/gpu/drm/img-rogue/1.10/mmu_common.h
new file mode 100644
index 00000000000000..360c7ae55021c5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mmu_common.h
@@ -0,0 +1,755 @@
+/**************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef MMU_COMMON_H
+#define MMU_COMMON_H
+
+/*
+ The Memory Management Unit (MMU) performs device virtual to physical translation.
+
+ Terminology:
+ - page catalogue, PC (optional, 3 tier MMU)
+ - page directory, PD
+ - page table, PT (can be variable sized)
+ - data page, DP (can be variable sized)
+ Note: PD and PC are fixed size and can't be larger than
+ the native physical (CPU) page size
+ Shifts and AlignShift variables:
+ - 'xxxShift' represent the number of bits a bitfield is shifted left from bit0
+ - 'xxxAlignShift' is used to convert a bitfield (based at bit0) into byte units
+ by applying a bit shift left by 'xxxAlignShift' bits
+*/
+
+/*
+ Device Virtual Address Config:
+
+ Incoming Device Virtual Address is deconstructed into up to 4
+ fields, where the virtual address is up to 64bits:
+ MSB-----------------------------------------------LSB
+ | PC Index: | PD Index: | PT Index: | DP offset: |
+ | d bits | c bits | b-v bits | a+v bits |
+ -----------------------------------------------------
+ where v is the variable page table modifier, e.g.
+ v == 0 -> 4KB DP
+ v == 2 -> 16KB DP
+ v == 4 -> 64KB DP
+ v == 6 -> 256KB DP
+ v == 8 -> 1MB DP
+ v == 10 -> 4MB DP
+*/
+
+/* services/server/include/ */
+#include "pmr.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+
+
+/*!
+ The level of the MMU
+*/
+typedef enum
+{
+ MMU_LEVEL_0 = 0, /* Level 0 = Page */
+
+ MMU_LEVEL_1,
+ MMU_LEVEL_2,
+ MMU_LEVEL_3,
+ MMU_LEVEL_LAST
+} MMU_LEVEL;
+
+/* moved after declaration of MMU_LEVEL, as pdump_mmu.h references it */
+#include "pdump_mmu.h"
+
+#define MMU_MAX_LEVEL 3
+
+typedef struct _MMU_LEVEL_DATA_
+{
+ IMG_UINT32 ui32Index;
+ IMG_UINT32 ui32NumOfEntries;
+ IMG_CHAR const *psDebugStr;
+ IMG_UINT8 uiBytesPerEntry;
+ IMG_UINT64 ui64Address;
+} MMU_LEVEL_DATA;
+
+typedef enum _MMU_FAULT_TYPE_
+{
+ MMU_FAULT_TYPE_UNKNOWN = 0, /* If fault is not analysed by Host */
+ MMU_FAULT_TYPE_PM,
+ MMU_FAULT_TYPE_NON_PM,
+} MMU_FAULT_TYPE;
+
+typedef struct _MMU_FAULT_DATA_
+{
+ MMU_LEVEL eTopLevel;
+ MMU_FAULT_TYPE eType;
+ MMU_LEVEL_DATA sLevelData[MMU_MAX_LEVEL];
+} MMU_FAULT_DATA;
+
+struct _MMU_DEVVADDR_CONFIG_;
+
+/*!
+ MMU device attributes. This structure is the interface between the generic
+ MMU code and the device specific MMU code.
+*/
+typedef struct _MMU_DEVICEATTRIBS_
+{
+ PDUMP_MMU_TYPE eMMUType;
+
+ IMG_CHAR *pszMMUPxPDumpMemSpaceName;
+
+ /*! The type of the top level object */
+ MMU_LEVEL eTopLevel;
+
+ /*! Alignment requirement of the base object */
+ IMG_UINT32 ui32BaseAlign;
+
+ /*! HW config of the base object */
+ struct _MMU_PxE_CONFIG_ *psBaseConfig;
+
+ /*! Address split for the base object */
+ const struct _MMU_DEVVADDR_CONFIG_ *psTopLevelDevVAddrConfig;
+
+ /*! Callback for creating protection bits for the page catalogue entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePCEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page catalogue entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePCEProt4)(IMG_UINT32);
+ /*! Callback for creating protection bits for the page directory entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePDEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page directory entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePDEProt4)(IMG_UINT32);
+ /*! Callback for creating protection bits for the page table entry with 8 byte entry */
+ IMG_UINT64 (*pfnDerivePTEProt8)(IMG_UINT32, IMG_UINT32);
+ /*! Callback for creating protection bits for the page table entry with 4 byte entry */
+ IMG_UINT32 (*pfnDerivePTEProt4)(IMG_UINT32);
+
+ /*! Callback for getting the MMU configuration based on the specified page size */
+ PVRSRV_ERROR (*pfnGetPageSizeConfiguration)(IMG_UINT32 ui32DataPageSize,
+ const struct _MMU_PxE_CONFIG_ **ppsMMUPDEConfig,
+ const struct _MMU_PxE_CONFIG_ **ppsMMUPTEConfig,
+ const struct _MMU_DEVVADDR_CONFIG_ **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv2);
+ /*! Callback for putting the MMU configuration obtained from pfnGetPageSizeConfiguration */
+ PVRSRV_ERROR (*pfnPutPageSizeConfiguration)(IMG_HANDLE hPriv);
+
+ /*! Callback for getting the page size from the PDE for the page table entry with 4 byte entry */
+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE4)(IMG_UINT32, IMG_UINT32 *);
+ /*! Callback for getting the page size from the PDE for the page table entry with 8 byte entry */
+ PVRSRV_ERROR (*pfnGetPageSizeFromPDE8)(IMG_UINT64, IMG_UINT32 *);
+
+ /*! Private data handle */
+ IMG_HANDLE hGetPageSizeFnPriv;
+} MMU_DEVICEATTRIBS;
+
+/*!
+ MMU virtual address split
+*/
+typedef struct _MMU_DEVVADDR_CONFIG_
+{
+ /*! Page catalogue index mask */
+ IMG_UINT64 uiPCIndexMask;
+ /*! Page catalogue index shift */
+ IMG_UINT8 uiPCIndexShift;
+ /*! Total number of PC entries */
+ IMG_UINT32 uiNumEntriesPC;
+ /*! Page directory mask */
+ IMG_UINT64 uiPDIndexMask;
+ /*! Page directory shift */
+ IMG_UINT8 uiPDIndexShift;
+ /*! Total number of PD entries */
+ IMG_UINT32 uiNumEntriesPD;
+ /*! Page table mask */
+ IMG_UINT64 uiPTIndexMask;
+ /*! Page index shift */
+ IMG_UINT8 uiPTIndexShift;
+ /*! Total number of PT entries */
+ IMG_UINT32 uiNumEntriesPT;
+ /*! Page offset mask */
+ IMG_UINT64 uiPageOffsetMask;
+ /*! Page offset shift */
+ IMG_UINT8 uiPageOffsetShift;
+ /*! First virtual address mappable for this config */
+ IMG_UINT64 uiOffsetInBytes;
+
+} MMU_DEVVADDR_CONFIG;
+
+/*
+ P(C/D/T) Entry Config:
+
+ MSB-----------------------------------------------LSB
+ | PT Addr: | variable PT ctrl | protection flags: |
+ | bits c+v | b bits | a bits |
+ -----------------------------------------------------
+ where v is the variable page table modifier and is optional
+*/
+/*!
+ Generic MMU entry description. This is used to describe PC, PD and PT entries.
+*/
+typedef struct _MMU_PxE_CONFIG_
+{
+ IMG_UINT8 uiBytesPerEntry; /*! Size of an entry in bytes */
+
+ IMG_UINT64 uiAddrMask; /*! Physical address mask */
+ IMG_UINT8 uiAddrShift; /*! Physical address shift */
+ IMG_UINT8 uiAddrLog2Align; /*! Physical address Log 2 alignment */
+
+ IMG_UINT64 uiVarCtrlMask; /*! Variable control mask */
+ IMG_UINT8 uiVarCtrlShift; /*! Variable control shift */
+
+ IMG_UINT64 uiProtMask; /*! Protection flags mask */
+ IMG_UINT8 uiProtShift; /*! Protection flags shift */
+
+ IMG_UINT64 uiValidEnMask; /*! Entry valid bit mask */
+ IMG_UINT8 uiValidEnShift; /*! Entry valid bit shift */
+} MMU_PxE_CONFIG;
+
+/* MMU Protection flags */
+
+
+/* These are specified generically and in a h/w independent way, and
+ are interpreted at each level (PC/PD/PT) separately. */
+
+/* The following flags are for internal use only, and should not
+ traverse the API */
+#define MMU_PROTFLAGS_INVALID 0x80000000U
+
+typedef IMG_UINT32 MMU_PROTFLAGS_T;
+
+/* The following flags should be supplied by the caller: */
+#define MMU_PROTFLAGS_READABLE (1U<<0)
+#define MMU_PROTFLAGS_WRITEABLE (1U<<1)
+#define MMU_PROTFLAGS_CACHE_COHERENT (1U<<2)
+#define MMU_PROTFLAGS_CACHED (1U<<3)
+
+/* Device specific flags*/
+#define MMU_PROTFLAGS_DEVICE_OFFSET 16
+#define MMU_PROTFLAGS_DEVICE_MASK 0x000f0000UL
+#define MMU_PROTFLAGS_DEVICE(n) \
+ (((n) << MMU_PROTFLAGS_DEVICE_OFFSET) & \
+ MMU_PROTFLAGS_DEVICE_MASK)
+
+
+typedef struct _MMU_CONTEXT_ MMU_CONTEXT;
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef struct _MMU_PAGESIZECONFIG_
+{
+ const MMU_PxE_CONFIG *psPDEConfig;
+ const MMU_PxE_CONFIG *psPTEConfig;
+ const MMU_DEVVADDR_CONFIG *psDevVAddrConfig;
+ IMG_UINT32 uiRefCount;
+ IMG_UINT32 uiMaxRefCount;
+} MMU_PAGESIZECONFIG;
+
+/*************************************************************************/ /*!
+@Function MMU_ContextCreate
+
+@Description Create a new MMU context
+
+@Input psDevNode Device node of the device to create the
+ MMU context for
+
+@Output ppsMMUContext The created MMU context
+
+@Return PVRSRV_OK if the MMU context was successfully created
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_ContextCreate (struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ MMU_CONTEXT **ppsMMUContext,
+ MMU_DEVICEATTRIBS *psDevAttrs);
+
+
+/*************************************************************************/ /*!
+@Function MMU_ContextDestroy
+
+@Description Destroy a MMU context
+
+@Input psMMUContext MMU context to destroy
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_ContextDestroy (MMU_CONTEXT *psMMUContext);
+
+/*************************************************************************/ /*!
+@Function MMU_Alloc
+
+@Description Allocate the page tables required for the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input uSize The size of the allocation
+
+@Output puActualSize Actual size of allocation
+
+@Input uiProtFlags Generic MMU protection flags
+
+@Input uDevVAddrAlignment Alignment requirement of the virtual
+ allocation
+
+@Input psDevVAddr Virtual address to start the allocation
+ from
+
+@Return PVRSRV_OK if the allocation of the page tables was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_Alloc (MMU_CONTEXT *psMMUContext,
+ IMG_DEVMEM_SIZE_T uSize,
+ IMG_DEVMEM_SIZE_T *puActualSize,
+ IMG_UINT32 uiProtFlags,
+ IMG_DEVMEM_SIZE_T uDevVAddrAlignment,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 uiLog2PageSize);
+
+
+/*************************************************************************/ /*!
+@Function MMU_Free
+
+@Description Free the page tables of the specified virtual range
+
+@Input psMMUContext MMU context to operate on
+
+@Input psDevVAddr Virtual address to start the free
+ from
+
+@Input uSize The size of the allocation
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_Free (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiLog2DataPageSize);
+
+
+/*************************************************************************/ /*!
+@Function MMU_MapPages
+
+@Description Map pages to the MMU.
+ Two modes of operation: One requires a list of physical page
+ indices that are going to be mapped, the other just takes
+ the PMR and a possible offset to map parts of it.
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Input sDevVAddrBase Device virtual address of the 1st page
+
+@Input psPMR PMR to map
+
+@Input ui32PhysPgOffset Physical offset into the PMR
+
+@Input ui32MapPageCount Number of pages to map
+
+@Input paui32MapIndices List of page indices to map,
+ can be NULL
+
+@Input uiLog2PageSize Log2 page size of the pages to map
+
+@Return PVRSRV_OK if the mapping was successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPages(MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ PMR *psPMR,
+ IMG_UINT32 ui32PhysPgOffset,
+ IMG_UINT32 ui32MapPageCount,
+ IMG_UINT32 *paui32MapIndices,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_UnmapPages
+
+@Description Unmap pages from the MMU.
+
+@Input psMMUContext MMU context to operate on
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Input psDevVAddr Device virtual address of the 1st page
+
+@Input ui32PageCount Number of pages to unmap
+
+@Input pai32UnmapIndicies Array of page indices to be unmapped
+
+@Input uiLog2PageSize log2 size of the page
+
+
+@Input bDummyBacking Bool that indicates if the unmapped
+ regions need to be backed by dummy
+ page
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPages (MMU_CONTEXT *psMMUContext,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 *pai32UnmapIndicies,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bDummyBacking);
+
+/*************************************************************************/ /*!
+@Function MMU_MapPMRFast
+
+@Description Map a PMR into the MMU. Must be not sparse.
+ This is supposed to cover most mappings and, as the name suggests,
+ should be as fast as possible.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddr Device virtual address to map the PMR
+ into
+
+@Input psPMR PMR to map
+
+@Input uiSizeBytes Size in bytes to map
+
+@Input uiMappingFlags Memalloc flags for the mapping
+
+@Return PVRSRV_OK if the PMR was successfully mapped
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR
+MMU_MapPMRFast (MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ PVRSRV_MEMALLOCFLAGS_T uiMappingFlags,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_UnmapPMRFast
+
+@Description Unmap pages from the MMU as fast as possible.
+ PMR must be non-sparse!
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddrBase Device virtual address of the 1st page
+
+@Input ui32PageCount Number of pages to unmap
+
+@Input uiLog2PageSize log2 size of the page
+
+@Return None
+*/
+/*****************************************************************************/
+extern void
+MMU_UnmapPMRFast(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddrBase,
+ IMG_UINT32 ui32PageCount,
+ IMG_UINT32 uiLog2PageSize);
+
+/*************************************************************************/ /*!
+@Function MMU_ChangeValidity
+
+@Description Sets or unsets the valid bit of page table entries for a given
+ address range.
+
+@Input psMMUContext MMU context to operate on
+
+@Input sDevVAddr The device virtual base address of
+ the range we want to modify
+
+@Input uiSizeBytes The size of the range in bytes
+
+@Input uiLog2PageSize Log2 of the used page size
+
+@Input bMakeValid Choose to set or unset the valid bit.
+ (bMakeValid == IMG_TRUE ) -> SET
+ (bMakeValid == IMG_FALSE) -> UNSET
+
+@Input psPMR The PMR backing the allocation.
+ Needed in case we have sparse memory
+ where we have to check whether a physical
+ address actually backs the virtual.
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_ChangeValidity(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEVMEM_SIZE_T uiSizeBytes,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bMakeValid,
+ PMR *psPMR);
+
+/*************************************************************************/ /*!
+@Function MMU_AcquireBaseAddr
+
+@Description Acquire the device physical address of the base level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Output psPhysAddr Device physical address of the base level
+ MMU object
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+MMU_AcquireBaseAddr(MMU_CONTEXT *psMMUContext, IMG_DEV_PHYADDR *psPhysAddr);
+
+/*************************************************************************/ /*!
+@Function MMU_ReleaseBaseAddr
+
+@Description Release the device physical address of the base level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+void
+MMU_ReleaseBaseAddr(MMU_CONTEXT *psMMUContext);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/***********************************************************************************/ /*!
+@Function MMU_SetOSid
+
+@Description Set the OSid associated with the application (and the MMU Context)
+
+@Input psMMUContext MMU context to store the OSid on
+
+@Input ui32OSid the OSid in question
+
+@Input ui32OSidReg The value that the firmware will assign to the
+ registers.
+
+@Input bOSidAxiProt Toggles whether the AXI prot bit will be set or
+ not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_SetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+
+/***********************************************************************************/ /*!
+@Function MMU_GetOSid
+
+@Description Retrieve the OSid associated with the MMU context.
+
+@Input psMMUContext MMU context in which the OSid is stored
+
+@Output pui32OSid The OSid in question
+
+@Output pui32OSidReg The OSid that the firmware will assign to the
+ registers.
+
+@Output pbOSidAxiProt Toggles whether the AXI prot bit will be set or
+ not.
+@Return None
+*/
+/***********************************************************************************/
+
+void MMU_GetOSids(MMU_CONTEXT *psMMUContext, IMG_UINT32 * pui32OSid, IMG_UINT32 * pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+#endif
+
+/*************************************************************************/ /*!
+@Function MMU_SetDeviceData
+
+@Description Set the device specific callback data
+
+@Input psMMUContext MMU context to store the data on
+
+@Input hDevData Device data
+
+@Return None
+*/
+/*****************************************************************************/
+void MMU_SetDeviceData(MMU_CONTEXT *psMMUContext, IMG_HANDLE hDevData);
+
+/*************************************************************************/ /*!
+@Function MMU_CheckFaultAddress
+
+@Description Check the specified MMU context to see if the provided address
+ should be valid
+
+@Input psMMUContext MMU context to store the data on
+
+@Input psDevVAddr Address to check
+
+@Input pfnDumpDebugPrintf Debug print function
+
+@Input pvDumpDebugFile Optional file identifier to be passed
+ to the debug print function if required
+
+@Output psOutFaultData To store fault details after checking
+
+@Return None
+*/
+/*****************************************************************************/
+void MMU_CheckFaultAddress(MMU_CONTEXT *psMMUContext,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ MMU_FAULT_DATA *psOutFaultData);
+
+/*************************************************************************/ /*!
+@Function MMUI_IsVDevAddrValid
+@Description Checks if given address is valid.
+@Input psMMUContext MMU context to store the data on
+@Input uiLog2PageSize page size
+@Input psDevVAddr Address to check
+@Return IMG_TRUE of address is valid
+*/ /**************************************************************************/
+IMG_BOOL MMU_IsVDevAddrValid(MMU_CONTEXT *psMMUContext,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function MMU_ContextDerivePCPDumpSymAddr
+
+@Description Derives a PDump Symbolic address for the top level MMU object
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszPDumpSymbolicNameBuffer Buffer to write the PDump symbolic
+ address to
+
+@Input uiPDumpSymbolicNameBufferSize Size of the buffer
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR MMU_ContextDerivePCPDumpSymAddr(MMU_CONTEXT *psMMUContext,
+ IMG_CHAR *pszPDumpSymbolicNameBuffer,
+ size_t uiPDumpSymbolicNameBufferSize);
+
+/*************************************************************************/ /*!
+@Function MMU_PDumpWritePageCatBase
+
+@Description PDump write of the top level MMU object to a device register
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszSpaceName PDump name of the mem/reg space
+
+@Input uiOffset Offset to write the address to
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags);
+
+/*************************************************************************/ /*!
+@Function MMU_AcquirePDumpMMUContext
+
+@Description Acquire a reference to the PDump MMU context for this MMU
+ context
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszRegSpaceName PDump name of the register space
+
+@Output pui32PDumpMMUContextID PDump MMU context ID
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_AcquirePDumpMMUContext(MMU_CONTEXT *psMMUContext, IMG_UINT32 *pui32PDumpMMUContextID);
+
+/*************************************************************************/ /*!
+@Function MMU_ReleasePDumpMMUContext
+
+@Description Release a reference to the PDump MMU context for this MMU context
+
+@Input psMMUContext MMU context to operate on
+
+@Input pszRegSpaceName PDump name of the register space
+
+@Output pui32PDumpMMUContextID PDump MMU context ID
+
+@Return PVRSRV_OK if successful
+*/
+/*****************************************************************************/
+PVRSRV_ERROR MMU_ReleasePDumpMMUContext(MMU_CONTEXT *psMMUContext);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(MMU_PDumpWritePageCatBase)
+#endif
+static INLINE void
+MMU_PDumpWritePageCatBase(MMU_CONTEXT *psMMUContext,
+ const IMG_CHAR *pszSpaceName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ PDUMP_FLAGS_T uiPdumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psMMUContext);
+ PVR_UNREFERENCED_PARAMETER(pszSpaceName);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32WordSize);
+ PVR_UNREFERENCED_PARAMETER(ui32AlignShift);
+ PVR_UNREFERENCED_PARAMETER(ui32Shift);
+ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+}
+#endif /* PDUMP */
+
+#endif /* #ifdef MMU_COMMON_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/module_common.c b/drivers/gpu/drm/img-rogue/1.10/module_common.c
new file mode 100644
index 00000000000000..d0646a892166aa
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/module_common.c
@@ -0,0 +1,569 @@
+/*************************************************************************/ /*!
+@File
+@Title Common linux module setup
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "lists.h"
+#include "power.h"
+#include "env_connection.h"
+#include "process_stats.h"
+#include "module_common.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#endif
+#include "pvrsrv_error.h"
+#include "pvr_drv.h"
+#include "pvr_bridge_k.h"
+
+#include <pvr_fence.h>
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+#include "pvr_sync.h"
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
+#include <linux/trace_events.h>
+#else
+#include <linux/ftrace_event.h>
+#endif
+#endif
+#include "pvr_gputrace.h"
+
+#include "km_apphint.h"
+#include "srvinit.h"
+
+#include "htb_debug.h"
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+/* Display class interface */
+#include "kerneldisplay.h"
+EXPORT_SYMBOL(DCRegisterDevice);
+EXPORT_SYMBOL(DCUnregisterDevice);
+EXPORT_SYMBOL(DCDisplayConfigurationRetired);
+EXPORT_SYMBOL(DCDisplayHasPendingCommand);
+EXPORT_SYMBOL(DCImportBufferAcquire);
+EXPORT_SYMBOL(DCImportBufferRelease);
+
+/* Physmem interface (required by LMA DC drivers) */
+#include "physheap.h"
+EXPORT_SYMBOL(PhysHeapAcquire);
+EXPORT_SYMBOL(PhysHeapRelease);
+EXPORT_SYMBOL(PhysHeapGetType);
+EXPORT_SYMBOL(PhysHeapRegionGetCpuPAddr);
+EXPORT_SYMBOL(PhysHeapRegionGetSize);
+EXPORT_SYMBOL(PhysHeapCpuPAddrToDevPAddr);
+
+EXPORT_SYMBOL(PVRSRVGetDriverStatus);
+EXPORT_SYMBOL(PVRSRVSystemInstallDeviceLISR);
+EXPORT_SYMBOL(PVRSRVSystemUninstallDeviceLISR);
+
+#include "pvr_notifier.h"
+EXPORT_SYMBOL(PVRSRVCheckStatus);
+
+#include "pvr_debug.h"
+EXPORT_SYMBOL(PVRSRVGetErrorStringKM);
+#endif /* defined(SUPPORT_DISPLAY_CLASS) */
+
+#include "rgxapi_km.h"
+#if defined(SUPPORT_SHARED_SLC)
+EXPORT_SYMBOL(RGXInitSLC);
+#endif
+
+#if defined(SUPPORT_RGX)
+EXPORT_SYMBOL(RGXHWPerfConnect);
+EXPORT_SYMBOL(RGXHWPerfDisconnect);
+EXPORT_SYMBOL(RGXHWPerfControl);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
+EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCustomCounters);
+EXPORT_SYMBOL(RGXHWPerfDisableCounters);
+EXPORT_SYMBOL(RGXHWPerfAcquireEvents);
+EXPORT_SYMBOL(RGXHWPerfReleaseEvents);
+EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp);
+#if defined(SUPPORT_KERNEL_HWPERF_TEST)
+EXPORT_SYMBOL(OSAddTimer);
+EXPORT_SYMBOL(OSEnableTimer);
+EXPORT_SYMBOL(OSDisableTimer);
+EXPORT_SYMBOL(OSRemoveTimer);
+#endif
+#endif
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile)
+{
+ if (pFile)
+ {
+ struct drm_file *psDRMFile = pFile->private_data;
+
+ return psDRMFile->driver_priv;
+ }
+
+ return NULL;
+}
+
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+ PVR_ASSERT(psEnvConnection != NULL);
+
+ return psEnvConnection->psFile;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDriverInit
+@Description Common one time driver initialisation
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDriverInit(void)
+{
+ PVRSRV_ERROR pvrerr;
+ int error = 0;
+
+#if defined(PDUMP)
+ error = dbgdrv_init();
+ if (error != 0)
+ {
+ return error;
+ }
+#endif
+
+ error = PVRDebugFSInit();
+ if (error != 0)
+ {
+ return error;
+ }
+
+ if (HTB_CreateFSEntry() != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ if (PVRSRVStatsInitialise() != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+#endif
+
+ if (PVROSFuncInit() != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+
+ error = pvr_apphint_init();
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed AppHint setup(%d)",
+ __func__, error));
+ }
+
+ pvrerr = PVRSRVDriverInit();
+ if (pvrerr != PVRSRV_OK)
+ {
+ return -ENODEV;
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ /* calling here because we need to handle input from the file even
+ * before the devices are initialised
+ * note: we're not passing a device node because apphint callback don't
+ * need it */
+ PVRGpuTraceInitAppHintCallbacks(NULL);
+#endif
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDriverDeinit
+@Description Common one time driver de-initialisation
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDriverDeinit(void)
+{
+ PVRSRVDriverDeInit();
+
+ pvr_apphint_deinit();
+
+ PVROSFuncDeInit();
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsDestroy();
+#endif
+
+ HTB_DestroyFSEntry();
+
+ PVRDebugFSDeInit();
+
+#if defined(PDUMP)
+ dbgdrv_cleanup();
+#endif
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceInit
+@Description Common device related initialisation.
+@Input psDeviceNode The device node for which initialisation should be
+ performed
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ int error = 0;
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ {
+ PVRSRV_ERROR eError = pvr_sync_init(psDeviceNode->psDevConfig->pvOSDevice);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: unable to create sync (%d)",
+ __func__, eError));
+ return -EBUSY;
+ }
+ }
+#endif
+
+ error = PVRDebugCreateDebugFSEntries();
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to create default debugfs entries (%d)",
+ __func__, error));
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ error = PVRGpuTraceInitDevice(psDeviceNode);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to initialise PVR GPU Tracing on device%d (%d)",
+ __func__, psDeviceNode->sDevId.i32UMIdentifier, error));
+ }
+#endif
+
+ /* register the AppHint device control before device initialisation
+ * so individual AppHints can be configured during the init phase
+ */
+ error = pvr_apphint_device_register(psDeviceNode);
+ if (error != 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: failed to initialise device AppHints (%d)",
+ __func__, error));
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceDeinit
+@Description Common device related de-initialisation.
+@Input psDeviceNode The device node for which de-initialisation should
+ be performed
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceDeinit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ pvr_apphint_device_unregister(psDeviceNode);
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ PVRGpuTraceDeInitDevice(psDeviceNode);
+#endif
+
+ PVRDebugRemoveDebugFSEntries();
+
+#if defined(SUPPORT_NATIVE_FENCE_SYNC)
+ pvr_sync_deinit();
+#endif
+
+ pvr_fence_cleanup();
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceShutdown
+@Description Common device shutdown.
+@Input psDeviceNode The device node representing the device that should
+ be shutdown
+@Return void
+*/ /***************************************************************************/
+
+void PVRSRVCommonDeviceShutdown(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ /*
+ * Disable the bridge to stop processes trying to use the driver
+ * after it has been shut down.
+ */
+ eError = LinuxBridgeBlockClientsAccess(IMG_TRUE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to suspend driver (%d)",
+ __func__, eError));
+ return;
+ }
+
+ (void) PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_OFF);
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceSuspend
+@Description Common device suspend.
+@Input psDeviceNode The device node representing the device that should
+ be suspended
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceSuspend(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /*
+ * LinuxBridgeBlockClientsAccess prevents processes from using the driver
+ * while it's suspended (this is needed for Android). Acquire the bridge
+ * lock first to ensure the driver isn't currently in use.
+ */
+
+ LinuxBridgeBlockClientsAccess(IMG_FALSE);
+
+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_OFF) != PVRSRV_OK)
+ {
+ LinuxBridgeUnblockClientsAccess();
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceResume
+@Description Common device resume.
+@Input psDeviceNode The device node representing the device that should
+ be resumed
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceResume(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ if (PVRSRVSetDeviceSystemPowerState(psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_ON) != PVRSRV_OK)
+ {
+ return -EINVAL;
+ }
+
+ LinuxBridgeUnblockClientsAccess();
+
+ /*
+ * Reprocess the device queues in case commands were blocked during
+ * suspend.
+ */
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE)
+ {
+ PVRSRVCheckStatus(NULL);
+ }
+
+ return 0;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceOpen
+@Description Common device open.
+@Input psDeviceNode The device node representing the device being
+ opened by a user mode process
+@Input psDRMFile The DRM file data that backs the file handle
+ returned to the user mode process
+@Return int 0 on success and a Linux error code otherwise
+*/ /***************************************************************************/
+int PVRSRVCommonDeviceOpen(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct drm_file *psDRMFile)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ ENV_CONNECTION_PRIVATE_DATA sPrivData;
+ void *pvConnectionData;
+ PVRSRV_ERROR eError;
+ int iErr = 0;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+
+ if (!psPVRSRVData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: No device data", __func__));
+ iErr = -ENODEV;
+ goto e1;
+ }
+
+ /*
+ * If the first attempt already set the state to bad,
+ * there is no point in going the second time, so get out
+ */
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_BAD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already in bad state. Device open failed.",
+ __func__));
+ iErr = -ENODEV;
+ goto e1;
+ }
+
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+ {
+ eError = PVRSRVDeviceInitialise(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ iErr = -ENODEV;
+ goto e1;
+ }
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ if (PVRGpuTraceEnabled())
+ {
+ PVRSRV_ERROR eError = PVRGpuTraceEnabledSetNoBridgeLock(psDeviceNode,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to initialise GPU event tracing"
+ " (%s)", PVRSRVGetErrorStringKM(eError)));
+ }
+
+ /* below functions will enable FTrace events which in turn will
+ * execute HWPerf callbacks that set appropriate filter values
+ * note: unfortunately the functions don't allow to pass private
+ * data so they enable events for all of the devices
+ * at once, which means that this can happen more than once
+ * if there is more than one device */
+
+ /* single events can be enabled by calling trace_set_clr_event()
+ * with the event name, e.g.:
+ * trace_set_clr_event("rogue", "rogue_ufo_update", 1) */
+ if (trace_set_clr_event("gpu", NULL, 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"gpu\" event"
+ " group"));
+ }
+ else
+ {
+ PVR_LOG(("FTrace events from \"gpu\" group enabled"));
+ }
+ if (trace_set_clr_event("rogue", NULL, 1))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to enable \"rogue\" event"
+ " group"));
+ }
+ else
+ {
+ PVR_LOG(("FTrace events from \"rogue\" group enabled"));
+ }
+ }
+
+#endif
+ }
+
+ sPrivData.psDevNode = psDeviceNode;
+ sPrivData.psFile = psDRMFile->filp;
+
+ /*
+ * Here we pass the file pointer which will passed through to our
+ * OSConnectionPrivateDataInit function where we can save it so
+ * we can back reference the file structure from it's connection
+ */
+ eError = PVRSRVConnectionConnect(&pvConnectionData, (void *) &sPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ iErr = -ENOMEM;
+ goto e1;
+ }
+
+ psDRMFile->driver_priv = pvConnectionData;
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+
+out:
+ return iErr;
+e1:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+ goto out;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVCommonDeviceRelease
+@Description Common device release.
+@Input psDeviceNode The device node for the device that the given file
+ represents
+@Input psDRMFile The DRM file data that's being released
+@Return void
+*/ /***************************************************************************/
+void PVRSRVCommonDeviceRelease(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct drm_file *psDRMFile)
+{
+ void *pvConnectionData;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+
+ pvConnectionData = psDRMFile->driver_priv;
+ if (pvConnectionData)
+ {
+ PVRSRVConnectionDisconnect(pvConnectionData);
+ psDRMFile->driver_priv = NULL;
+ }
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/module_common.h b/drivers/gpu/drm/img-rogue/1.10/module_common.h
new file mode 100644
index 00000000000000..69d55e426d12ef
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/module_common.h
@@ -0,0 +1,67 @@
+/*************************************************************************/ /*!
+@File module_common.h
+@Title Common linux module setup header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _MODULE_COMMON_H_
+#define _MODULE_COMMON_H_
+
+/* DRVNAME is the name we use to register our driver. */
+#define DRVNAME PVR_LDM_DRIVER_REGISTRATION_NAME
+
+struct _PVRSRV_DEVICE_NODE_;
+struct drm_file;
+
+int PVRSRVCommonDriverInit(void);
+void PVRSRVCommonDriverDeinit(void);
+
+int PVRSRVCommonDeviceInit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+void PVRSRVCommonDeviceDeinit(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+void PVRSRVCommonDeviceShutdown(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceSuspend(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+int PVRSRVCommonDeviceResume(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+int PVRSRVCommonDeviceOpen(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ struct drm_file *psDRMFile);
+void PVRSRVCommonDeviceRelease(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode,
+ struct drm_file *psDRMFile);
+
+#endif /* _MODULE_COMMON_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/mt8173/Makefile b/drivers/gpu/drm/img-rogue/1.10/mt8173/Makefile
new file mode 100644
index 00000000000000..6832ac1da4752a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mt8173/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(img_basedir)/mt8173
+
+pvrsrvkm_1_10-y += \
+ mt8173/mt8173_mfgsys.o \
+ mt8173/mt8173_sysconfig.o
diff --git a/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.c b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.c
new file mode 100644
index 00000000000000..f32f525fa1ec48
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.c
@@ -0,0 +1,350 @@
+/*
+* Copyright (c) 2014 MediaTek Inc.
+* Author: Chiawen Lee <chiawen.lee@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/thermal.h>
+
+#include "mt8173_mfgsys.h"
+
+static const char * const top_mfg_clk_name[] = {
+ "mfg_mem_in_sel",
+ "mfg_axi_in_sel",
+ "top_axi",
+ "top_mem",
+};
+
+#define MAX_TOP_MFG_CLK ARRAY_SIZE(top_mfg_clk_name)
+
+#define REG_MFG_AXI BIT(0)
+#define REG_MFG_MEM BIT(1)
+#define REG_MFG_G3D BIT(2)
+#define REG_MFG_26M BIT(3)
+#define REG_MFG_ALL (REG_MFG_AXI | REG_MFG_MEM | REG_MFG_G3D | REG_MFG_26M)
+
+#define REG_MFG_CG_STA 0x00
+#define REG_MFG_CG_SET 0x04
+#define REG_MFG_CG_CLR 0x08
+
+static void mtk_mfg_clr_clock_gating(void __iomem *reg)
+{
+ writel(REG_MFG_ALL, reg + REG_MFG_CG_CLR);
+}
+
+static int mtk_mfg_prepare_clock(struct mtk_mfg *mfg)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ ret = clk_prepare(mfg->top_clk[i]);
+ if (ret)
+ goto unwind;
+ }
+ ret = clk_prepare(mfg->top_mfg);
+ if (ret)
+ goto unwind;
+
+ return 0;
+unwind:
+ while (i--)
+ clk_unprepare(mfg->top_clk[i]);
+
+ return ret;
+}
+
+static void mtk_mfg_unprepare_clock(struct mtk_mfg *mfg)
+{
+ int i;
+
+ clk_unprepare(mfg->top_mfg);
+ for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+ clk_unprepare(mfg->top_clk[i]);
+}
+
+static int mtk_mfg_enable_clock(struct mtk_mfg *mfg)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ ret = clk_enable(mfg->top_clk[i]);
+ if (ret)
+ {
+ dev_warn(mfg->dev, "Enabling %s failed with error %d\n",
+ __clk_get_name(mfg->top_clk[i]), ret);
+ goto unwind;
+ }
+ }
+ ret = clk_enable(mfg->top_mfg);
+ if (ret)
+ {
+ dev_warn(mfg->dev, "Enabling %s failed with error %d\n",
+ __clk_get_name(mfg->top_mfg), ret);
+ goto unwind;
+ }
+ mtk_mfg_clr_clock_gating(mfg->reg_base);
+
+ return 0;
+unwind:
+ while (i--)
+ clk_disable(mfg->top_clk[i]);
+
+ return ret;
+}
+
+static void mtk_mfg_disable_clock(struct mtk_mfg *mfg)
+{
+ int i;
+
+ clk_disable(mfg->top_mfg);
+ for (i = MAX_TOP_MFG_CLK - 1; i >= 0; i--)
+ clk_disable(mfg->top_clk[i]);
+}
+
+static void mtk_mfg_enable_hw_apm(struct mtk_mfg *mfg)
+{
+ writel(0x003c3d4d, mfg->reg_base + 0x24);
+ writel(0x4d45440b, mfg->reg_base + 0x28);
+ writel(0x7a710184, mfg->reg_base + 0xe0);
+ writel(0x835f6856, mfg->reg_base + 0xe4);
+ writel(0x002b0234, mfg->reg_base + 0xe8);
+ writel(0x80000000, mfg->reg_base + 0xec);
+ writel(0x08000000, mfg->reg_base + 0xa0);
+}
+
+int mtk_mfg_enable(struct mtk_mfg *mfg)
+{
+ int ret;
+
+ ret = regulator_enable(mfg->vgpu);
+ if (ret)
+ {
+ dev_err(mfg->dev, "Enable vgpu regulator failed with error %d\n", ret);
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(mfg->dev);
+ if (ret)
+ {
+ dev_err(mfg->dev, "pm_runtime_get_sync failed with error %d\n", ret);
+ goto err_regulator_disable;
+ }
+
+ ret = mtk_mfg_enable_clock(mfg);
+ if (ret)
+ {
+ dev_err(mfg->dev, "mtk_mfg_enable_clock failed with error %d\n", ret);
+ goto err_pm_runtime_put;
+ }
+
+ mtk_mfg_enable_hw_apm(mfg);
+
+ dev_dbg(mfg->dev, "Enabled\n");
+
+ return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put_sync(mfg->dev);
+err_regulator_disable:
+ regulator_disable(mfg->vgpu);
+ return ret;
+}
+
+void mtk_mfg_disable(struct mtk_mfg *mfg)
+{
+ mtk_mfg_disable_clock(mfg);
+ pm_runtime_put_sync(mfg->dev);
+ regulator_disable(mfg->vgpu);
+
+ dev_dbg(mfg->dev, "Disabled\n");
+}
+
+int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq)
+{
+ int ret;
+
+ ret = clk_prepare_enable(mfg->top_mfg);
+ if (ret) {
+ dev_err(mfg->dev, "enable and prepare top_mfg failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_set_parent(mfg->top_mfg, mfg->clk26m);
+ if (ret) {
+ dev_err(mfg->dev, "Set clk parent to clk26m failed, %d\n", ret);
+ goto unprepare_top_mfg;
+ }
+
+ ret = clk_set_rate(mfg->mmpll, freq);
+ if (ret)
+ dev_err(mfg->dev, "Set freq to %lu Hz failed, %d\n", freq, ret);
+
+ ret = clk_set_parent(mfg->top_mfg, mfg->top_mmpll);
+ if (ret)
+ dev_err(mfg->dev, "Set clk parent to top_mmpll failed, %d\n", ret);
+
+unprepare_top_mfg:
+ clk_disable_unprepare(mfg->top_mfg);
+
+ if (!ret)
+ dev_dbg(mfg->dev, "Freq set to %lu Hz\n", freq);
+
+ return ret;
+}
+
+int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt)
+{
+ int ret;
+
+ ret = regulator_set_voltage(mfg->vgpu, volt, volt);
+ if (ret != 0) {
+ dev_err(mfg->dev, "Set voltage to %u uV failed, %d\n",
+ volt, ret);
+ return ret;
+ }
+
+ dev_dbg(mfg->dev, "Voltage set to %d uV\n", volt);
+
+ return 0;
+}
+
+static int mtk_mfg_bind_device_resource(struct mtk_mfg *mfg)
+{
+ struct device *dev = mfg->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i;
+ struct resource *res;
+
+ mfg->top_clk = devm_kcalloc(dev, MAX_TOP_MFG_CLK,
+ sizeof(*mfg->top_clk), GFP_KERNEL);
+ if (!mfg->top_clk)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ mfg->rgx_start = res->start;
+ mfg->rgx_size = resource_size(res);
+
+ mfg->rgx_irq = platform_get_irq_byname(pdev, "RGX");
+ if (mfg->rgx_irq < 0)
+ return mfg->rgx_irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ mfg->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mfg->reg_base))
+ return PTR_ERR(mfg->reg_base);
+
+ mfg->mmpll = devm_clk_get(dev, "mmpll_clk");
+ if (IS_ERR(mfg->mmpll)) {
+ dev_err(dev, "devm_clk_get mmpll_clk failed !!!\n");
+ return PTR_ERR(mfg->mmpll);
+ }
+
+ for (i = 0; i < MAX_TOP_MFG_CLK; i++) {
+ mfg->top_clk[i] = devm_clk_get(dev, top_mfg_clk_name[i]);
+ if (IS_ERR(mfg->top_clk[i])) {
+ dev_err(dev, "devm_clk_get %s failed !!!\n",
+ top_mfg_clk_name[i]);
+ return PTR_ERR(mfg->top_clk[i]);
+ }
+ }
+
+ mfg->top_mfg = devm_clk_get(dev, "top_mfg");
+ if (IS_ERR(mfg->top_mfg)) {
+ dev_err(dev, "devm_clk_get top_mfg failed !!!\n");
+ return PTR_ERR(mfg->top_mfg);
+ }
+
+ mfg->top_mmpll = devm_clk_get(dev, "top_mmpll");
+ if (IS_ERR(mfg->top_mmpll)) {
+ dev_err(dev, "devm_clk_get top_mmpll failed !!!\n");
+ return PTR_ERR(mfg->top_mmpll);
+ }
+
+ mfg->clk26m = devm_clk_get(dev, "clk26m");
+ if (IS_ERR(mfg->clk26m)) {
+ dev_err(dev, "devm_clk_get clk26m failed !!!\n");
+ return PTR_ERR(mfg->clk26m);
+ }
+
+ mfg->tz = thermal_zone_get_zone_by_name("cpu_thermal");
+ if (IS_ERR(mfg->tz)) {
+ dev_err(dev, "Failed to get cpu_thermal zone\n");
+ return PTR_ERR(mfg->tz);
+ }
+
+ mfg->vgpu = devm_regulator_get(dev, "mfgsys-power");
+ if (IS_ERR(mfg->vgpu))
+ return PTR_ERR(mfg->vgpu);
+
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static void mtk_mfg_unbind_device_resource(struct mtk_mfg *mfg)
+{
+ struct device *dev = mfg->dev;
+
+ pm_runtime_disable(dev);
+}
+
+struct mtk_mfg *mtk_mfg_create(struct device *dev)
+{
+ int err;
+ struct mtk_mfg *mfg;
+
+ mtk_mfg_debug("mtk_mfg_create Begin\n");
+
+ mfg = devm_kzalloc(dev, sizeof(*mfg), GFP_KERNEL);
+ if (!mfg)
+ return ERR_PTR(-ENOMEM);
+ mfg->dev = dev;
+
+ err = mtk_mfg_bind_device_resource(mfg);
+ if (err != 0)
+ return ERR_PTR(err);
+
+ mutex_init(&mfg->set_power_state);
+
+ err = mtk_mfg_prepare_clock(mfg);
+ if (err)
+ goto err_unbind_resource;
+
+ mtk_mfg_debug("mtk_mfg_create End\n");
+
+ return mfg;
+err_unbind_resource:
+ mtk_mfg_unbind_device_resource(mfg);
+
+ return ERR_PTR(err);
+}
+
+void mtk_mfg_destroy(struct mtk_mfg *mfg)
+{
+ mtk_mfg_unprepare_clock(mfg);
+
+ mtk_mfg_unbind_device_resource(mfg);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.h b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.h
new file mode 100644
index 00000000000000..c8f07e6f7a266c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_mfgsys.h
@@ -0,0 +1,63 @@
+/*
+* Copyright (c) 2014 MediaTek Inc.
+* Author: Chiawen Lee <chiawen.lee@mediatek.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License version 2 as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*/
+
+#ifndef MT8173_MFGSYS_H
+#define MT8173_MFGSYS_H
+
+#include <linux/device.h>
+
+/* unit ms, timeout interval for DVFS detection */
+#define MTK_DVFS_SWITCH_INTERVAL 300
+
+#define ENABLE_MTK_MFG_DEBUG 0
+
+#if ENABLE_MTK_MFG_DEBUG
+#define mtk_mfg_debug(fmt, args...) pr_info("[MFG]" fmt, ##args)
+#else
+#define mtk_mfg_debug(fmt, args...) do { } while (0)
+#endif
+
+struct mtk_mfg {
+ struct device *dev;
+
+ struct clk **top_clk;
+ void __iomem *reg_base;
+
+ resource_size_t rgx_start;
+ resource_size_t rgx_size;
+ int rgx_irq;
+
+ /* mutex protect for set power state */
+ struct mutex set_power_state;
+
+ /* for gpu device freq/volt update */
+ struct regulator *vgpu;
+ struct clk *mmpll;
+ struct clk *top_mfg;
+ struct clk *top_mmpll;
+ struct clk *clk26m;
+
+ struct thermal_zone_device *tz;
+};
+
+struct mtk_mfg *mtk_mfg_create(struct device *dev);
+void mtk_mfg_destroy(struct mtk_mfg *mfg);
+
+int mtk_mfg_enable(struct mtk_mfg *mfg);
+void mtk_mfg_disable(struct mtk_mfg *mfg);
+
+int mtk_mfg_freq_set(struct mtk_mfg *mfg, unsigned long freq);
+int mtk_mfg_volt_set(struct mtk_mfg *mfg, int volt);
+
+#endif /* MT8173_MFGSYS_H*/
diff --git a/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_sysconfig.c b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_sysconfig.c
new file mode 100644
index 00000000000000..aab7efb548b360
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mt8173/mt8173_sysconfig.c
@@ -0,0 +1,521 @@
+/*************************************************************************/ /*!
+@File
+@Title System Configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System Configuration functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/thermal.h>
+#include <linux/devfreq_cooling.h>
+
+#include "physheap.h"
+#include "pvrsrv_device.h"
+#include "rgxdevice.h"
+#include "syscommon.h"
+
+#include "mt8173_mfgsys.h"
+
+#define SYS_RGX_ACTIVE_POWER_LATENCY_MS 10
+#define RGX_HW_CORE_CLOCK_SPEED 395000000
+
+/* Setup RGX specific timing data */
+static RGX_TIMING_INFORMATION gsRGXTimingInfo = {
+ .ui32CoreClockSpeed = RGX_HW_CORE_CLOCK_SPEED,
+ .bEnableActivePM = IMG_TRUE,
+ .ui32ActivePMLatencyms = SYS_RGX_ACTIVE_POWER_LATENCY_MS,
+ .bEnableRDPowIsland = IMG_TRUE,
+};
+
+static RGX_DATA gsRGXData = {
+ .psRGXTimingInfo = &gsRGXTimingInfo,
+};
+
+static PVRSRV_DEVICE_CONFIG gsDevice;
+
+static RGXFWIF_BIFTILINGMODE geBIFTilingMode = RGXFWIF_BIFTILINGMODE_256x16;
+
+static IMG_UINT32 gauiBIFTilingHeapXStrides[RGXFWIF_NUM_BIF_TILING_CONFIGS] = {
+ 0, /* BIF tiling heap 1 x-stride */
+ 1, /* BIF tiling heap 2 x-stride */
+ 2, /* BIF tiling heap 3 x-stride */
+ 3 /* BIF tiling heap 4 x-stride */
+};
+
+typedef struct
+{
+ IMG_UINT32 ui32IRQ;
+ PFN_LISR pfnLISR;
+ void *pvLISRData;
+} LISR_WRAPPER_DATA;
+
+static irqreturn_t MTKLISRWrapper(int iIrq, void *pvData)
+{
+ LISR_WRAPPER_DATA *psWrapperData = pvData;
+
+ if (psWrapperData->pfnLISR(psWrapperData->pvLISRData))
+ {
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+/*
+ * CPU to Device physical address translation
+ */
+static
+void UMAPhysHeapCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+}
+
+/*
+ * Device to CPU physical address translation
+ */
+static
+void UMAPhysHeapDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = psDevPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1) {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ psCpuPAddr[ui32Idx].uiAddr = psDevPAddr[ui32Idx].uiAddr;
+ }
+}
+
+static PHYS_HEAP_FUNCTIONS gsPhysHeapFuncs = {
+ .pfnCpuPAddrToDevPAddr = UMAPhysHeapCpuPAddrToDevPAddr,
+ .pfnDevPAddrToCpuPAddr = UMAPhysHeapDevPAddrToCpuPAddr,
+};
+
+static PHYS_HEAP_REGION gsPhysHeapRegion = {
+ .sStartAddr.uiAddr = 0,
+ .uiSize = 0,
+};
+
+static PHYS_HEAP_CONFIG gsPhysHeapConfig = {
+ .ui32PhysHeapID = 0,
+ .pszPDumpMemspaceName = "SYSMEM",
+ .eType = PHYS_HEAP_TYPE_UMA,
+ .psMemFuncs = &gsPhysHeapFuncs,
+ .pasRegions = &gsPhysHeapRegion,
+ .ui32NumOfRegions = 1,
+};
+
+static PVRSRV_ERROR MTKSysDevPrePowerState(
+ IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct mtk_mfg *mfg = hSysData;
+
+ mtk_mfg_debug("MTKSysDevPrePowerState (%d->%d), bForced = %d\n",
+ eCurrentPowerState, eNewPowerState, bForced);
+
+ mutex_lock(&mfg->set_power_state);
+
+ if ((PVRSRV_DEV_POWER_STATE_OFF == eNewPowerState) &&
+ (PVRSRV_DEV_POWER_STATE_ON == eCurrentPowerState))
+ mtk_mfg_disable(mfg);
+
+ mutex_unlock(&mfg->set_power_state);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR MTKSysDevPostPowerState(
+ IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ struct mtk_mfg *mfg = hSysData;
+ PVRSRV_ERROR ret;
+ int retmfg = 0;
+
+ mtk_mfg_debug("MTKSysDevPostPowerState (%d->%d)\n",
+ eCurrentPowerState, eNewPowerState);
+
+ mutex_lock(&mfg->set_power_state);
+
+ if ((PVRSRV_DEV_POWER_STATE_ON == eNewPowerState) &&
+ (PVRSRV_DEV_POWER_STATE_OFF == eCurrentPowerState)) {
+ retmfg = mtk_mfg_enable(mfg);
+
+ if (retmfg) {
+ PVR_DPF((PVR_DBG_ERROR, "mtk_mfg_enable failed with error (%d)", retmfg));
+ ret = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+ goto done;
+ }
+ }
+
+ ret = PVRSRV_OK;
+done:
+ mutex_unlock(&mfg->set_power_state);
+
+ return ret;
+}
+
+#ifdef PVR_DVFS
+
+#define FALLBACK_STATIC_TEMPERATURE 65000
+
+/* Temperatures on power over-temp-and-voltage curve (C) */
+static const int vt_temperatures[] = { 25, 45, 65, 85, 105 };
+
+/* Voltages on power over-temp-and-voltage curve (mV) */
+static const int vt_voltages[] = { 900, 1000, 1130 };
+
+#define POWER_TABLE_NUM_TEMP ARRAY_SIZE(vt_temperatures)
+#define POWER_TABLE_NUM_VOLT ARRAY_SIZE(vt_voltages)
+
+static const unsigned int
+power_table[POWER_TABLE_NUM_VOLT][POWER_TABLE_NUM_TEMP] = {
+ /* 25 45 65 85 105 */
+ { 14540, 35490, 60420, 120690, 230000 }, /* 900 mV */
+ { 21570, 41910, 82380, 159140, 298620 }, /* 1000 mV */
+ { 32320, 72950, 111320, 209290, 382700 }, /* 1130 mV */
+};
+
+/** Frequency and Power in Khz and mW respectively */
+static const int f_range[] = {253500, 299000, 396500, 455000, 494000, 598000};
+static const IMG_UINT32 max_dynamic_power[] = {612, 722, 957, 1100, 1194, 1445};
+
+static u32 interpolate(int value, const int *x, const unsigned int *y, int len)
+{
+ u64 tmp64;
+ u32 dx;
+ u32 dy;
+ int i, ret;
+
+ if (value <= x[0])
+ return y[0];
+ if (value >= x[len - 1])
+ return y[len - 1];
+
+ for (i = 1; i < len - 1; i++) {
+ /* If value is identical, no need to interpolate */
+ if (value == x[i])
+ return y[i];
+ if (value < x[i])
+ break;
+ }
+
+ /* Linear interpolation between the two (x,y) points */
+ dy = y[i] - y[i - 1];
+ dx = x[i] - x[i - 1];
+
+ tmp64 = value - x[i - 1];
+ tmp64 *= dy;
+ do_div(tmp64, dx);
+ ret = y[i - 1] + tmp64;
+
+ return ret;
+}
+
+static unsigned long mtk_mfg_get_static_power(unsigned long voltage)
+{
+ struct mtk_mfg *mfg = gsDevice.hSysData;
+ struct thermal_zone_device *tz = mfg->tz;
+ unsigned long power;
+ int temperature = FALLBACK_STATIC_TEMPERATURE;
+ int low_idx = 0, high_idx = POWER_TABLE_NUM_VOLT - 1;
+ int i;
+
+ if (tz->ops->get_temp(tz, &temperature))
+ dev_warn(mfg->dev, "Failed to read temperature\n");
+ do_div(temperature, 1000);
+
+ for (i = 0; i < POWER_TABLE_NUM_VOLT; i++) {
+ if (voltage <= vt_voltages[POWER_TABLE_NUM_VOLT - 1 - i])
+ high_idx = POWER_TABLE_NUM_VOLT - 1 - i;
+
+ if (voltage >= vt_voltages[i])
+ low_idx = i;
+ }
+
+ if (low_idx == high_idx) {
+ power = interpolate(temperature,
+ vt_temperatures,
+ &power_table[low_idx][0],
+ POWER_TABLE_NUM_TEMP);
+ } else {
+ unsigned long dvt =
+ vt_voltages[high_idx] - vt_voltages[low_idx];
+ unsigned long power1, power2;
+
+ power1 = interpolate(temperature,
+ vt_temperatures,
+ &power_table[high_idx][0],
+ POWER_TABLE_NUM_TEMP);
+
+ power2 = interpolate(temperature,
+ vt_temperatures,
+ &power_table[low_idx][0],
+ POWER_TABLE_NUM_TEMP);
+
+ power = (power1 - power2) * (voltage - vt_voltages[low_idx]);
+ do_div(power, dvt);
+ power += power2;
+ }
+
+ /* convert to mw */
+ do_div(power, 1000);
+
+ mtk_mfg_debug("mtk_mfg_get_static_power: %lu at Temperature %d\n",
+ power, temperature);
+ return power;
+}
+
+static unsigned long mtk_mfg_get_dynamic_power(unsigned long freq,
+ unsigned long voltage)
+{
+ #define NUM_RANGE ARRAY_SIZE(f_range)
+ /** Frequency and Power in Khz and mW respectively */
+ IMG_INT32 i, low_idx = 0, high_idx = NUM_RANGE - 1;
+ IMG_UINT32 power;
+
+ for (i = 0; i < NUM_RANGE; i++) {
+ if (freq <= f_range[NUM_RANGE - 1 - i])
+ high_idx = NUM_RANGE - 1 - i;
+
+ if (freq >= f_range[i])
+ low_idx = i;
+ }
+
+ if (low_idx == high_idx) {
+ power = max_dynamic_power[low_idx];
+ } else {
+ IMG_UINT32 f_interval = f_range[high_idx] - f_range[low_idx];
+ IMG_UINT32 p_interval = max_dynamic_power[high_idx] -
+ max_dynamic_power[low_idx];
+
+ power = p_interval * (freq - f_range[low_idx]);
+ do_div(power, f_interval);
+ power += max_dynamic_power[low_idx];
+ }
+
+ power = (IMG_UINT32)(((IMG_UINT64)power * voltage * voltage)/1000000);
+
+ return power;
+ #undef NUM_RANGE
+}
+
+static struct devfreq_cooling_power sPowerOps = {
+ .get_static_power = mtk_mfg_get_static_power,
+ .get_dynamic_power = mtk_mfg_get_dynamic_power,
+};
+
+static void SetFrequency(IMG_UINT32 freq)
+{
+ struct mtk_mfg *mfg = gsDevice.hSysData;
+
+ /* freq is in Hz */
+ mtk_mfg_freq_set(mfg, freq);
+}
+
+static void SetVoltage(IMG_UINT32 volt)
+{
+ struct mtk_mfg *mfg = gsDevice.hSysData;
+
+ mtk_mfg_volt_set(mfg, volt);
+}
+#endif
+
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig)
+{
+ struct device *dev = pvOSDevice;
+ struct mtk_mfg *mfg;
+
+ if (gsDevice.pvOSDevice)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ mfg = mtk_mfg_create(dev);
+ if (IS_ERR(mfg)) {
+ if (PTR_ERR(mfg) == -EPROBE_DEFER)
+ return PVRSRV_ERROR_PROBE_DEFER;
+ else
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ dma_set_mask(dev, DMA_BIT_MASK(33));
+
+ /* Make sure everything we don't care about is set to 0 */
+ memset(&gsDevice, 0, sizeof(gsDevice));
+
+ /* Setup RGX device */
+ gsDevice.pvOSDevice = pvOSDevice;
+ gsDevice.pszName = "mt8173";
+ gsDevice.pszVersion = NULL;
+
+ /* Device's physical heaps */
+ gsDevice.pasPhysHeaps = &gsPhysHeapConfig;
+ gsDevice.ui32PhysHeapCount = 1;
+
+ gsDevice.ui32IRQ = mfg->rgx_irq;
+
+ gsDevice.sRegsCpuPBase.uiAddr = mfg->rgx_start;
+ gsDevice.ui32RegsSize = mfg->rgx_size;
+
+#ifdef PVR_DVFS
+ gsDevice.sDVFS.sDVFSDeviceCfg.bIdleReq = IMG_TRUE;
+ gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetFrequency = SetFrequency;
+ gsDevice.sDVFS.sDVFSDeviceCfg.pfnSetVoltage = SetVoltage;
+ gsDevice.sDVFS.sDVFSDeviceCfg.ui32PollMs = MTK_DVFS_SWITCH_INTERVAL;
+ gsDevice.sDVFS.sDVFSDeviceCfg.psPowerOps = &sPowerOps;
+
+ gsDevice.sDVFS.sDVFSGovernorCfg.ui32UpThreshold = 90;
+ gsDevice.sDVFS.sDVFSGovernorCfg.ui32DownDifferential = 10;
+#endif
+
+ /* Device's physical heap IDs */
+ gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = 0;
+ gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = 0;
+ gsDevice.aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = 0;
+
+ gsDevice.eBIFTilingMode = geBIFTilingMode;
+ gsDevice.pui32BIFTilingHeapConfigs = gauiBIFTilingHeapXStrides;
+ gsDevice.ui32BIFTilingHeapCount = ARRAY_SIZE(gauiBIFTilingHeapXStrides);
+
+ /* power management on HW system */
+ gsDevice.pfnPrePowerState = MTKSysDevPrePowerState;
+ gsDevice.pfnPostPowerState = MTKSysDevPostPowerState;
+
+ /* clock frequency */
+ gsDevice.pfnClockFreqGet = NULL;
+
+ gsDevice.hDevData = &gsRGXData;
+ gsDevice.hSysData = mfg;
+
+ *ppsDevConfig = &gsDevice;
+
+ return PVRSRV_OK;
+}
+
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ struct mtk_mfg *mfg = psDevConfig->hSysData;
+
+ mtk_mfg_destroy(mfg);
+
+ psDevConfig->pvOSDevice = NULL;
+}
+
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ LISR_WRAPPER_DATA *psWrapperData;
+
+ PVR_UNREFERENCED_PARAMETER(hSysData);
+
+ psWrapperData = kmalloc(sizeof(*psWrapperData), GFP_KERNEL);
+ if (!psWrapperData)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psWrapperData->ui32IRQ = ui32IRQ;
+ psWrapperData->pfnLISR = pfnLISR;
+ psWrapperData->pvLISRData = pvData;
+
+ if (request_irq(ui32IRQ, MTKLISRWrapper, IRQF_TRIGGER_LOW, pszName,
+ psWrapperData))
+ {
+ kfree(psWrapperData);
+
+ return PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER;
+ }
+
+ *phLISRData = (IMG_HANDLE) psWrapperData;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ LISR_WRAPPER_DATA *psWrapperData = hLISRData;
+
+ free_irq(psWrapperData->ui32IRQ, psWrapperData);
+
+ OSFreeMem(psWrapperData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(pfnDumpDebugPrintf);
+ PVR_UNREFERENCED_PARAMETER(pvDumpDebugFile);
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/mt8173/sysinfo.h b/drivers/gpu/drm/img-rogue/1.10/mt8173/sysinfo.h
new file mode 100644
index 00000000000000..84c7b4a2f7032d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/mt8173/sysinfo.h
@@ -0,0 +1,47 @@
+/*************************************************************************/ /*!
+@File
+@Title System Description Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSINFO_H__)
+#define __SYSINFO_H__
+
+/*!< System specific poll/timeout details */
+#if defined(PVR_LINUX_USING_WORKQUEUES)
+#define MAX_HW_TIME_US (1000000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (20000)
+#else
+#define MAX_HW_TIME_US (500000)
+#define DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT (10000)
+#define DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT (3600000)
+#define WAIT_TRY_COUNT (10000)
+#endif
+
+#define SYS_RGX_OF_COMPATIBLE "mediatek,mt8173-gpu"
+
+#endif /* !defined(__SYSINFO_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/opaque_types.h b/drivers/gpu/drm/img-rogue/1.10/opaque_types.h
new file mode 100644
index 00000000000000..766bc22ea418c4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/opaque_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title Opaque Types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines opaque types for various services types
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef SERVICES_OPAQUE_TYPES_H
+#define SERVICES_OPAQUE_TYPES_H
+
+#include "img_defs.h"
+#include "img_types.h"
+
+typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+typedef const struct _PVRSRV_DEVICE_NODE_ *PCPVRSRV_DEVICE_NODE;
+
+#endif /* SERVICES_OPAQUE_TYPES_H */
+
+/******************************************************************************
+ End of file (opaque_types.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/os_cpu_cache.h b/drivers/gpu/drm/img-rogue/1.10/os_cpu_cache.h
new file mode 100644
index 00000000000000..1c476322646bc3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/os_cpu_cache.h
@@ -0,0 +1,73 @@
+/*************************************************************************/ /*!
+@File
+@Title OS and CPU d-cache maintenance mechanisms
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines for cache management which are visible internally only
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OS_CPU_CACHE_H_
+#define _OS_CPU_CACHE_H_
+
+#include "info_page_defs.h"
+
+#define PVRSRV_CACHE_OP_GLOBAL 0x4 /*!< Extends cache_ops.h with explicit global flush w/ invalidate */
+#define PVRSRV_CACHE_OP_TIMELINE 0x8 /*!< Request SW_SYNC timeline notification when executed */
+
+#define CACHEFLUSH_ISA_X86 0x1 /*!< x86/x64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_ARM64 0x2 /*!< Aarch64 specific UM range-based cache flush */
+#define CACHEFLUSH_ISA_GENERIC 0x3 /*!< Other ISA's without UM range-based cache flush */
+#ifndef CACHEFLUSH_ISA_TYPE
+ #if defined(__i386__) || defined(__x86_64__)
+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_X86
+ #elif defined(__arm64__) || defined(__aarch64__)
+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_ARM64
+ #else
+ #define CACHEFLUSH_ISA_TYPE CACHEFLUSH_ISA_GENERIC
+ #endif
+#endif
+
+#if (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_X86) || (CACHEFLUSH_ISA_TYPE == CACHEFLUSH_ISA_ARM64)
+#define CACHEFLUSH_ISA_SUPPORTS_UM_FLUSH /*!< x86/x86_64/ARM64 supports user-mode d-cache flush */
+#endif
+
+#if !defined(__mips__)
+#define CACHEFLUSH_ISA_SUPPORTS_GLOBAL_FLUSH /*!< MIPS32/64 has no concept of a global d-cache flush */
+#endif
+
+#endif /* _OS_CPU_CACHE_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/os_srvinit_param.h b/drivers/gpu/drm/img-rogue/1.10/os_srvinit_param.h
new file mode 100644
index 00000000000000..a5dfe7c0cf60ab
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/os_srvinit_param.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File
+@Title Services initialisation parameters header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services initialisation parameter support for the Linux kernel.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __OS_SRVINIT_PARAM_H__
+#define __OS_SRVINIT_PARAM_H__
+
+#include "km_apphint_defs.h"
+
+#define SrvInitParamOpen() NULL
+#define SrvInitParamClose(pvState) ((void)(pvState))
+
+#define SrvInitParamGetBOOL(state, name, value) \
+ (void) pvr_apphint_get_bool(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT32(state, name, value) \
+ (void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT64(state, name, value) \
+ (void) pvr_apphint_get_uint64(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetSTRING(state, name, buffer, size) \
+ (void) pvr_apphint_get_string(APPHINT_ID_ ## name, buffer, size)
+
+#define SrvInitParamGetUINT32BitField(state, name, value) \
+ (void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+#define SrvInitParamGetUINT32List(state, name, value) \
+ (void) pvr_apphint_get_uint32(APPHINT_ID_ ## name, &value)
+
+
+#endif /* __OS_SRVINIT_PARAM_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/osconnection_server.c b/drivers/gpu/drm/img-rogue/1.10/osconnection_server.c
new file mode 100644
index 00000000000000..42a882027ad0fb
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osconnection_server.c
@@ -0,0 +1,155 @@
+/*************************************************************************/ /*!
+@File
+@Title Linux specific per process data functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "connection_server.h"
+#include "osconnection_server.h"
+
+#include "env_connection.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+
+#include <linux/sched.h>
+
+#if defined (SUPPORT_ION)
+#include <linux/err.h>
+#include PVR_ANDROID_ION_HEADER
+
+/*
+ The ion device (the base object for all requests)
+ gets created by the system and we acquire it via
+ linux specific functions provided by the system layer
+*/
+#include "ion_sys.h"
+#endif
+
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+ ENV_CONNECTION_PRIVATE_DATA *psPrivData = pvOSData;
+ ENV_CONNECTION_DATA *psEnvConnection;
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ ENV_ION_CONNECTION_DATA *psIonConnection;
+#endif
+
+ *phOsPrivateData = OSAllocZMem(sizeof(ENV_CONNECTION_DATA));
+
+ if (*phOsPrivateData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psEnvConnection = (ENV_CONNECTION_DATA *)*phOsPrivateData;
+
+ psEnvConnection->owner = current->tgid;
+
+ /* Save the pointer to our struct file */
+ psEnvConnection->psFile = psPrivData->psFile;
+ psEnvConnection->psDevNode = psPrivData->psDevNode;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ psIonConnection = (ENV_ION_CONNECTION_DATA *)OSAllocZMem(sizeof(ENV_ION_CONNECTION_DATA));
+ if (psIonConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSAllocMem failed", __FUNCTION__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psEnvConnection->psIonData = psIonConnection;
+ /*
+ We can have more then one connection per process so we need more then
+ the PID to have a unique name
+ */
+ psEnvConnection->psIonData->psIonDev = IonDevAcquire();
+ OSSNPrintf(psEnvConnection->psIonData->azIonClientName, ION_CLIENT_NAME_SIZE, "pvr_ion_client-%p-%d", *phOsPrivateData, OSGetCurrentClientProcessIDKM());
+ psEnvConnection->psIonData->psIonClient =
+ ion_client_create(psEnvConnection->psIonData->psIonDev,
+ psEnvConnection->psIonData->azIonClientName);
+
+ if (IS_ERR_OR_NULL(psEnvConnection->psIonData->psIonClient))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSConnectionPrivateDataInit: Couldn't create "
+ "ion client for per connection data"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+#endif /* SUPPORT_ION && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0)) */
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ if (hOsPrivateData == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ psEnvConnection = hOsPrivateData;
+
+#if defined(SUPPORT_ION) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 12, 0))
+ PVR_ASSERT(psEnvConnection->psIonData != NULL);
+
+ PVR_ASSERT(psEnvConnection->psIonData->psIonClient != NULL);
+ ion_client_destroy(psEnvConnection->psIonData->psIonClient);
+
+ IonDevRelease(psEnvConnection->psIonData->psIonDev);
+ OSFreeMem(psEnvConnection->psIonData);
+#endif
+
+ OSFreeMem(hOsPrivateData);
+ /*not nulling pointer, copy on stack*/
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_DEVICE_NODE *OSGetDevData(CONNECTION_DATA *psConnection)
+{
+ ENV_CONNECTION_DATA *psEnvConnection;
+
+ psEnvConnection = PVRSRVConnectionPrivateData(psConnection);
+ PVR_ASSERT(psEnvConnection);
+
+ return psEnvConnection->psDevNode;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/osconnection_server.h b/drivers/gpu/drm/img-rogue/1.10/osconnection_server.h
new file mode 100644
index 00000000000000..192ef584821d2e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osconnection_server.h
@@ -0,0 +1,120 @@
+/**************************************************************************/ /*!
+@File
+@Title Server side connection management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description API for OS specific callbacks from server side connection
+ management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#ifndef _OSCONNECTION_SERVER_H_
+#define _OSCONNECTION_SERVER_H_
+
+#include "handle.h"
+
+
+#if defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS)
+PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData);
+PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData);
+
+PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase);
+
+PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection);
+
+#else /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataInit)
+#endif
+/*************************************************************************/ /*!
+@Function OSConnectionPrivateDataInit
+@Description Allocates and initialises any OS-specific private data
+ relating to a connection.
+ Called from PVRSRVConnectionConnect().
+@Input pvOSData pointer to any OS private data
+@Output phOsPrivateData handle to the created connection
+ private data
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataInit(IMG_HANDLE *phOsPrivateData, void *pvOSData)
+{
+ PVR_UNREFERENCED_PARAMETER(phOsPrivateData);
+ PVR_UNREFERENCED_PARAMETER(pvOSData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSConnectionPrivateDataDeInit)
+#endif
+/*************************************************************************/ /*!
+@Function OSConnectionPrivateDataDeInit
+@Description Frees previously allocated OS-specific private data
+ relating to a connection.
+@Input hOsPrivateData handle to the connection private data
+ to be freed
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSConnectionPrivateDataDeInit(IMG_HANDLE hOsPrivateData)
+{
+ PVR_UNREFERENCED_PARAMETER(hOsPrivateData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSPerProcessSetHandleOptions)
+#endif
+static INLINE PVRSRV_ERROR OSConnectionSetHandleOptions(PVRSRV_HANDLE_BASE *psHandleBase)
+{
+ PVR_UNREFERENCED_PARAMETER(psHandleBase);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(OSGetDevData)
+#endif
+static INLINE PVRSRV_DEVICE_NODE* OSGetDevData(CONNECTION_DATA *psConnection)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return NULL;
+}
+#endif /* defined(__linux__) || defined(__QNXNTO__) || defined(INTEGRITY_OS) */
+
+
+#endif /* _OSCONNECTION_SERVER_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/osfunc.c b/drivers/gpu/drm/img-rogue/1.10/osfunc.c
new file mode 100644
index 00000000000000..750a1f8f0cc07f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osfunc.c
@@ -0,0 +1,1885 @@
+/*************************************************************************/ /*!
+@File
+@Title Environment related functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/div64.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/hugetlb.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/string.h>
+#include <linux/freezer.h>
+#include <asm/hardirq.h>
+#include <asm/tlbflush.h>
+#include <linux/timer.h>
+#include <linux/capability.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/utsname.h>
+#include <asm/atomic.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+#include <linux/sched/clock.h>
+#include <linux/sched/signal.h>
+#else
+#include <linux/sched.h>
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0)) */
+
+#include "log2.h"
+#include "osfunc.h"
+#include "cache_km.h"
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pvr_debugfs.h"
+#include "event.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvr_debug.h"
+#include "pvr_bridge_k.h"
+#include "pvrsrv_memallocflags.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#include "physmem_osmem_linux.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#if defined(VIRTUAL_PLATFORM)
+#define EVENT_OBJECT_TIMEOUT_US (120000000ULL)
+#else
+#if defined(EMULATOR) || defined(TC_APOLLO_TCF5)
+#define EVENT_OBJECT_TIMEOUT_US (2000000ULL)
+#else
+#define EVENT_OBJECT_TIMEOUT_US (100000ULL)
+#endif /* EMULATOR */
+#endif
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+/*
+ * Main driver lock, used to ensure driver code is single threaded. There are
+ * some places where this lock must not be taken, such as in the mmap related
+ * driver entry points.
+ */
+static DEFINE_MUTEX(gPVRSRVLock);
+
+static void *g_pvBridgeBuffers;
+
+struct task_struct *BridgeLockGetOwner(void);
+IMG_BOOL BridgeLockIsLocked(void);
+#endif
+
+typedef void (*PFN_DEBUG_DUMP)(IMG_HANDLE hDbgReqestHandle,
+ DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+typedef struct {
+ struct task_struct *kthread;
+ PFN_THREAD pfnThread;
+ void *hData;
+ OS_THREAD_LEVEL eThreadPriority;
+ IMG_CHAR *pszThreadName;
+ IMG_BOOL bIsThreadRunning;
+ IMG_BOOL bIsSupportingThread;
+ PFN_DEBUG_DUMP pfnDebugDumpCB;
+ DLLIST_NODE sNode;
+} OSThreadData;
+
+static DLLIST_NODE gsThreadListHead;
+
+static void _ThreadListAddEntry(OSThreadData *psThreadListNode)
+{
+ dllist_add_to_tail(&gsThreadListHead, &(psThreadListNode->sNode));
+}
+
+static void _ThreadListRemoveEntry(OSThreadData *psThreadListNode)
+{
+ dllist_remove_node(&(psThreadListNode->sNode));
+}
+
+static void _ThreadSetStopped(OSThreadData *psOSThreadData)
+{
+ psOSThreadData->bIsThreadRunning = IMG_FALSE;
+}
+
+static void _OSInitThreadList(void)
+{
+ dllist_init(&gsThreadListHead);
+}
+
+void OSThreadDumpInfo(IMG_HANDLE hDbgReqestHandle,
+ DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PDLLIST_NODE psNodeCurr, psNodeNext;
+
+ dllist_foreach_node(&gsThreadListHead, psNodeCurr, psNodeNext)
+ {
+ OSThreadData *psThreadListNode;
+ psThreadListNode = IMG_CONTAINER_OF(psNodeCurr, OSThreadData, sNode);
+
+ PVR_DUMPDEBUG_LOG(" %s : %s",
+ psThreadListNode->pszThreadName,
+ (psThreadListNode->bIsThreadRunning) ? "Running" : "Stopped");
+
+ if(psThreadListNode->pfnDebugDumpCB)
+ {
+ psThreadListNode->pfnDebugDumpCB(hDbgReqestHandle,
+ pfnDumpDebugPrintf,
+ pvDumpDebugFile);
+ }
+ }
+}
+
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ struct page *psPage;
+ IMG_UINT32 ui32Order=0;
+ gfp_t gfp_flags;
+
+ PVR_ASSERT(uiSize != 0);
+ /*Align the size to the page granularity */
+ uiSize = PAGE_ALIGN(uiSize);
+
+ /*Get the order to be used with the allocation */
+ ui32Order = get_order(uiSize);
+
+ gfp_flags = GFP_KERNEL;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+ if (psDev)
+ {
+ if (*psDev->dma_mask == DMA_BIT_MASK(32))
+ {
+ /* Limit to 32 bit.
+ * Achieved by setting __GFP_DMA32 for 64 bit systems */
+ gfp_flags |= __GFP_DMA32;
+ }
+ else if (*psDev->dma_mask < DMA_BIT_MASK(32))
+ {
+ /* Limit to whatever the size of DMA zone is. */
+ gfp_flags |= __GFP_DMA;
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psDev);
+#endif
+
+ /*allocate the pages */
+ psPage = alloc_pages(gfp_flags, ui32Order);
+ if (psPage == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ uiSize = (1 << ui32Order) * PAGE_SIZE;
+
+ psMemHandle->u.pvHandle = psPage;
+ psMemHandle->ui32Order = ui32Order;
+ sCpuPAddr.uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(page_to_phys(psPage));
+
+ /*
+ * Even when more pages are allocated as base MMU object we still need one single physical address because
+ * they are physically contiguous.
+ */
+ PhysHeapCpuPAddrToDevPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL], 1, psDevPAddr, &sCpuPAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ uiSize,
+ (IMG_UINT64)(uintptr_t) psPage,
+ OSGetCurrentClientProcessIDKM());
+#else
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ psPage,
+ sCpuPAddr,
+ uiSize,
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+ return PVRSRV_OK;
+}
+
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+ struct page *psPage = (struct page*) psMemHandle->u.pvHandle;
+ IMG_UINT32 uiSize, uiPageCount=0;
+
+ uiPageCount = (1 << psMemHandle->ui32Order);
+ uiSize = (uiPageCount * PAGE_SIZE);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ (IMG_UINT64)(uintptr_t) psPage);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA,
+ (IMG_UINT64)(uintptr_t) psPage,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+ __free_pages(psPage, psMemHandle->ui32Order);
+ psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr)
+{
+ size_t actualSize = 1 << (PAGE_SHIFT + psMemHandle->ui32Order);
+ *pvPtr = kmap((struct page*)psMemHandle->u.pvHandle);
+
+ PVR_UNREFERENCED_PARAMETER(psDevPAddr);
+
+ PVR_UNREFERENCED_PARAMETER(actualSize); /* If we don't take an #ifdef path */
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, actualSize, OSGetCurrentClientProcessIDKM());
+#else
+ {
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = 0;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+ *pvPtr,
+ sCpuPAddr,
+ actualSize,
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+ }
+#endif
+#endif
+
+ return PVRSRV_OK;
+}
+
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Mapping is done a page at a time */
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+ (1 << (PAGE_SHIFT + psMemHandle->ui32Order)),
+ OSGetCurrentClientProcessIDKM());
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA,
+ (IMG_UINT64)(uintptr_t)pvPtr,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(pvPtr);
+
+ kunmap((struct page*) psMemHandle->u.pvHandle);
+}
+
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ struct page* psPage = (struct page*) psMemHandle->u.pvHandle;
+
+ void* pvVirtAddrStart = kmap(psPage) + uiOffset;
+ IMG_CPU_PHYADDR sPhysStart, sPhysEnd;
+
+ if (uiLength == 0)
+ {
+ goto e0;
+ }
+
+ if ((uiOffset + uiLength) > ((1 << psMemHandle->ui32Order) * PAGE_SIZE))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid size params, uiOffset %u, uiLength %u",
+ __FUNCTION__,
+ uiOffset,
+ uiLength));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ sPhysStart.uiAddr = page_to_phys(psPage) + uiOffset;
+ sPhysEnd.uiAddr = sPhysStart.uiAddr + uiLength;
+
+ CacheOpExec(psDevNode,
+ pvVirtAddrStart,
+ pvVirtAddrStart + uiLength,
+ sPhysStart,
+ sPhysEnd,
+ PVRSRV_CACHE_OP_CLEAN);
+
+e0:
+ kunmap(psPage);
+
+ return eError;
+}
+
+#if defined(__GNUC__)
+#define PVRSRV_MEM_ALIGN __attribute__ ((aligned (0x8)))
+#define PVRSRV_MEM_ALIGN_MASK (0x7)
+#else
+#error "PVRSRV Alignment macros need to be defined for this compiler"
+#endif
+
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute)
+{
+ IMG_UINT32 uiSize = 0;
+
+ switch(eCacheAttribute)
+ {
+ case PVR_DCACHE_LINE_SIZE:
+ uiSize = cache_line_size();
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid cache attribute type %d",
+ __FUNCTION__, (IMG_UINT32)eCacheAttribute));
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return uiSize;
+}
+
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...)
+{
+ va_list argList;
+ IMG_INT32 iCount = 0;
+
+ va_start(argList, pszFormat);
+ iCount = vsscanf(pStr, pszFormat, argList);
+ va_end(argList);
+
+ return iCount;
+}
+
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen)
+{
+ return (IMG_INT) memcmp(pvBufA, pvBufB, uiLen);
+}
+
+size_t OSStringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+ size_t uSrcSize = strlcpy(pszDest, pszSrc, uSize);
+
+#if defined(PVR_DEBUG_STRLCPY) && defined(DEBUG)
+ /* Handle truncation by dumping calling stack if debug allows */
+ if (uSrcSize >= uSize)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: String truncated Src = '<%s>' %ld bytes, Dest = '%s'",
+ __FUNCTION__, pszSrc, (long)uSize, pszDest));
+ OSDumpStack();
+ }
+#endif /* defined (PVR_DEBUG_STRLCPY) && defined(DEBUG) */
+
+ return uSrcSize;
+}
+
+IMG_CHAR *OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize)
+{
+ /*
+ * Let strlcpy handle any truncation cases correctly.
+ * We will definitely get a NUL-terminated string set in pszDest
+ */
+ (void) OSStringLCopy(pszDest, pszSrc, uSize);
+
+ return pszDest;
+}
+
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...)
+{
+ va_list argList;
+ IMG_INT32 iCount;
+
+ va_start(argList, pszFormat);
+ iCount = vsnprintf(pStr, (size_t)ui32Size, pszFormat, argList);
+ va_end(argList);
+
+ return iCount;
+}
+
+size_t OSStringLength(const IMG_CHAR *pStr)
+{
+ return strlen(pStr);
+}
+
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount)
+{
+ return strnlen(pStr, uiCount);
+}
+
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2)
+{
+ return strcmp(pStr1, pStr2);
+}
+
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+ size_t uiSize)
+{
+ return strncmp(pStr1, pStr2, uiSize);
+}
+
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+ IMG_UINT32 *ui32Result)
+{
+ if (kstrtou32(pStr, ui32Base, ui32Result) != 0)
+ return PVRSRV_ERROR_CONVERSION_FAILED;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSInitEnvData(void)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ /* allocate memory for the bridge buffers to be used during an ioctl */
+ g_pvBridgeBuffers = OSAllocMem(PVRSRV_MAX_BRIDGE_IN_SIZE + PVRSRV_MAX_BRIDGE_OUT_SIZE);
+ if (g_pvBridgeBuffers == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+#endif
+
+ LinuxInitPhysmem();
+
+ _OSInitThreadList();
+
+ return PVRSRV_OK;
+}
+
+
+void OSDeInitEnvData(void)
+{
+
+ LinuxDeinitPhysmem();
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (g_pvBridgeBuffers)
+ {
+ /* free-up the memory allocated for bridge buffers */
+ OSFreeMem(g_pvBridgeBuffers);
+ g_pvBridgeBuffers = NULL;
+ }
+#endif
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+PVRSRV_ERROR OSGetGlobalBridgeBuffers(void **ppvBridgeInBuffer,
+ void **ppvBridgeOutBuffer)
+{
+ PVR_ASSERT (ppvBridgeInBuffer && ppvBridgeOutBuffer);
+
+ *ppvBridgeInBuffer = g_pvBridgeBuffers;
+ *ppvBridgeOutBuffer = *ppvBridgeInBuffer + PVRSRV_MAX_BRIDGE_IN_SIZE;
+
+ return PVRSRV_OK;
+}
+#endif
+
+void OSReleaseThreadQuanta(void)
+{
+ schedule();
+}
+
+/* Not matching/aligning this API to the Clockus() API above to avoid necessary
+ * multiplication/division operations in calling code.
+ */
+static inline IMG_UINT64 Clockns64(void)
+{
+ IMG_UINT64 timenow;
+
+ /* Kernel thread preempt protection. Some architecture implementations
+ * (ARM) of sched_clock are not preempt safe when the kernel is configured
+ * as such e.g. CONFIG_PREEMPT and others.
+ */
+ preempt_disable();
+
+ /* Using sched_clock instead of ktime_get since we need a time stamp that
+ * correlates with that shown in kernel logs and trace data not one that
+ * is a bit behind. */
+ timenow = sched_clock();
+
+ preempt_enable();
+
+ return timenow;
+}
+
+IMG_UINT64 OSClockns64(void)
+{
+ return Clockns64();
+}
+
+IMG_UINT64 OSClockus64(void)
+{
+ IMG_UINT64 timenow = Clockns64();
+ IMG_UINT32 remainder;
+
+ return OSDivide64r64(timenow, 1000, &remainder);
+}
+
+IMG_UINT32 OSClockus(void)
+{
+ return (IMG_UINT32) OSClockus64();
+}
+
+IMG_UINT32 OSClockms(void)
+{
+ IMG_UINT64 timenow = Clockns64();
+ IMG_UINT32 remainder;
+
+ return OSDivide64(timenow, 1000000, &remainder);
+}
+
+static inline IMG_UINT64 KClockns64(void)
+{
+ ktime_t sTime = ktime_get();
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ return sTime;
+#else
+ return sTime.tv64;
+#endif
+}
+
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time)
+{
+ *pui64Time = KClockns64();
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time)
+{
+ IMG_UINT64 timenow = KClockns64();
+ IMG_UINT32 remainder;
+
+ *pui64Time = OSDivide64r64(timenow, 1000, &remainder);
+ return PVRSRV_OK;
+}
+
+IMG_UINT64 OSClockMonotonicRawns64(void)
+{
+ struct timespec ts;
+
+ getrawmonotonic(&ts);
+ return (IMG_UINT64) ts.tv_sec * 1000000000 + ts.tv_nsec;
+}
+
+IMG_UINT64 OSClockMonotonicRawus64(void)
+{
+ IMG_UINT32 rem;
+ return OSDivide64r64(OSClockMonotonicRawns64(), 1000, &rem);
+}
+
+/*
+ OSWaitus
+*/
+void OSWaitus(IMG_UINT32 ui32Timeus)
+{
+ udelay(ui32Timeus);
+}
+
+
+/*
+ OSSleepms
+*/
+void OSSleepms(IMG_UINT32 ui32Timems)
+{
+ msleep(ui32Timems);
+}
+
+
+INLINE IMG_UINT64 OSGetCurrentProcessVASpaceSize(void)
+{
+ return (IMG_UINT64)TASK_SIZE;
+}
+
+INLINE IMG_PID OSGetCurrentProcessID(void)
+{
+ if (in_interrupt())
+ {
+ return KERNEL_ID;
+ }
+
+ return (IMG_PID)task_tgid_nr(current);
+}
+
+INLINE IMG_CHAR *OSGetCurrentProcessName(void)
+{
+ return current->comm;
+}
+
+INLINE uintptr_t OSGetCurrentThreadID(void)
+{
+ if (in_interrupt())
+ {
+ return KERNEL_ID;
+ }
+
+ return current->pid;
+}
+
+IMG_PID OSGetCurrentClientProcessIDKM(void)
+{
+ return OSGetCurrentProcessID();
+}
+
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void)
+{
+ return OSGetCurrentProcessName();
+}
+
+uintptr_t OSGetCurrentClientThreadIDKM(void)
+{
+ return OSGetCurrentThreadID();
+}
+
+size_t OSGetPageSize(void)
+{
+ return PAGE_SIZE;
+}
+
+size_t OSGetPageShift(void)
+{
+ return PAGE_SHIFT;
+}
+
+size_t OSGetPageMask(void)
+{
+ return (OSGetPageSize()-1);
+}
+
+size_t OSGetOrder(size_t uSize)
+{
+ return get_order(PAGE_ALIGN(uSize));
+}
+
+IMG_UINT64 OSGetRAMSize(void)
+{
+ struct sysinfo SI;
+ si_meminfo(&SI);
+
+ return (PAGE_SIZE * SI.totalram);
+}
+
+typedef struct
+{
+ int os_error;
+ PVRSRV_ERROR pvr_error;
+} error_map_t;
+
+/* return -ve versions of POSIX errors as they are used in this form */
+static const error_map_t asErrorMap[] =
+{
+ {-EFAULT, PVRSRV_ERROR_BRIDGE_EFAULT},
+ {-EINVAL, PVRSRV_ERROR_BRIDGE_EINVAL},
+ {-ENOMEM, PVRSRV_ERROR_BRIDGE_ENOMEM},
+ {-ERANGE, PVRSRV_ERROR_BRIDGE_ERANGE},
+ {-EPERM, PVRSRV_ERROR_BRIDGE_EPERM},
+ {-ENOTTY, PVRSRV_ERROR_BRIDGE_ENOTTY},
+ {-ENOTTY, PVRSRV_ERROR_BRIDGE_CALL_FAILED},
+ {-ERANGE, PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL},
+ {-ENOMEM, PVRSRV_ERROR_OUT_OF_MEMORY},
+ {-EINVAL, PVRSRV_ERROR_INVALID_PARAMS},
+
+ {0, PVRSRV_OK}
+};
+
+#define num_rows(a) (sizeof(a)/sizeof(a[0]))
+
+int PVRSRVToNativeError(PVRSRV_ERROR e)
+{
+ int os_error = -EFAULT;
+ int i;
+ for (i = 0; i < num_rows(asErrorMap); i++)
+ {
+ if (e == asErrorMap[i].pvr_error)
+ {
+ os_error = asErrorMap[i].os_error;
+ break;
+ }
+ }
+ return os_error;
+}
+
+typedef struct _MISR_DATA_ {
+ struct workqueue_struct *psWorkQueue;
+ struct work_struct sMISRWork;
+ PFN_MISR pfnMISR;
+ void *hData;
+} MISR_DATA;
+
+/*
+ MISRWrapper
+*/
+static void MISRWrapper(struct work_struct *data)
+{
+ MISR_DATA *psMISRData = container_of(data, MISR_DATA, sMISRWork);
+
+ psMISRData->pfnMISR(psMISRData->hData);
+}
+
+/*
+ OSInstallMISR
+*/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData, PFN_MISR pfnMISR,
+ void *hData)
+{
+ MISR_DATA *psMISRData;
+
+ psMISRData = OSAllocMem(sizeof(*psMISRData));
+ if (psMISRData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psMISRData->hData = hData;
+ psMISRData->pfnMISR = pfnMISR;
+
+ PVR_TRACE(("Installing MISR with cookie %p", psMISRData));
+
+ psMISRData->psWorkQueue = create_singlethread_workqueue("pvr_misr");
+
+ if (psMISRData->psWorkQueue == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSInstallMISR: create_singlethreaded_workqueue failed"));
+ OSFreeMem(psMISRData);
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+ }
+
+ INIT_WORK(&psMISRData->sMISRWork, MISRWrapper);
+
+ *hMISRData = (IMG_HANDLE) psMISRData;
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSUninstallMISR
+*/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+ PVR_TRACE(("Uninstalling MISR with cookie %p", psMISRData));
+
+ destroy_workqueue(psMISRData->psWorkQueue);
+ OSFreeMem(psMISRData);
+
+ return PVRSRV_OK;
+}
+
+/*
+ OSScheduleMISR
+*/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData)
+{
+ MISR_DATA *psMISRData = (MISR_DATA *) hMISRData;
+
+ /*
+ Note:
+
+ In the case of NO_HARDWARE we want the driver to be synchronous so
+ that we don't have to worry about waiting for previous operations
+ to complete
+ */
+#if defined(NO_HARDWARE)
+ psMISRData->pfnMISR(psMISRData->hData);
+ return PVRSRV_OK;
+#else
+ {
+ bool rc = queue_work(psMISRData->psWorkQueue, &psMISRData->sMISRWork);
+ return (rc ? PVRSRV_OK : PVRSRV_ERROR_ALREADY_EXISTS);
+ }
+#endif
+}
+
+/* OS specific values for thread priority */
+static const IMG_INT32 ai32OSPriorityValues[OS_THREAD_LAST_PRIORITY] =
+{
+ -20, /* OS_THREAD_HIGHEST_PRIORITY */
+ -10, /* OS_THREAD_HIGH_PRIORITY */
+ 0, /* OS_THREAD_NORMAL_PRIORITY */
+ 9, /* OS_THREAD_LOW_PRIORITY */
+ 19, /* OS_THREAD_LOWEST_PRIORITY */
+ -22, /* OS_THREAD_NOSET_PRIORITY */
+};
+
+static int OSThreadRun(void *data)
+{
+ OSThreadData *psOSThreadData = data;
+
+ /* count freezable threads */
+ LinuxBridgeNumActiveKernelThreadsIncrement();
+
+ /* If i32NiceValue is acceptable, set the nice value for the new thread */
+ if (psOSThreadData->eThreadPriority != OS_THREAD_NOSET_PRIORITY &&
+ psOSThreadData->eThreadPriority < OS_THREAD_LAST_PRIORITY)
+ set_user_nice(current, ai32OSPriorityValues[psOSThreadData->eThreadPriority]);
+
+ /* Returns true if the thread was frozen, should we do anything with this
+ * information? What do we return? Which one is the error case? */
+ set_freezable();
+
+ /* Call the client's kernel thread with the client's data pointer */
+ psOSThreadData->pfnThread(psOSThreadData->hData);
+
+ if(psOSThreadData->bIsSupportingThread)
+ {
+ _ThreadSetStopped(psOSThreadData);
+ }
+
+ /* Wait for OSThreadDestroy() to call kthread_stop() */
+ while (!kthread_freezable_should_stop(NULL))
+ {
+ schedule();
+ }
+
+ LinuxBridgeNumActiveKernelThreadsDecrement();
+
+ return 0;
+}
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ IMG_HANDLE pfnDebugDumpCB,
+ IMG_BOOL bIsSupportingThread,
+ void *hData)
+{
+ return OSThreadCreatePriority(phThread, pszThreadName, pfnThread, pfnDebugDumpCB, bIsSupportingThread, hData, OS_THREAD_NOSET_PRIORITY);
+}
+
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ IMG_HANDLE pfnDebugDumpCB,
+ IMG_BOOL bIsSupportingThread,
+ void *hData,
+ OS_THREAD_LEVEL eThreadPriority)
+{
+ OSThreadData *psOSThreadData;
+ PVRSRV_ERROR eError;
+
+ psOSThreadData = OSAllocZMem(sizeof(*psOSThreadData));
+ if (psOSThreadData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psOSThreadData->pfnThread = pfnThread;
+ psOSThreadData->hData = hData;
+ psOSThreadData->eThreadPriority= eThreadPriority;
+ psOSThreadData->kthread = kthread_run(OSThreadRun, psOSThreadData, "%s", pszThreadName);
+
+ if (IS_ERR(psOSThreadData->kthread))
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_kthread;
+ }
+
+ if(bIsSupportingThread)
+ {
+ psOSThreadData->pszThreadName = pszThreadName;
+ psOSThreadData->pfnDebugDumpCB = pfnDebugDumpCB;
+ psOSThreadData->bIsThreadRunning = IMG_TRUE;
+ psOSThreadData->bIsSupportingThread = IMG_TRUE;
+
+ _ThreadListAddEntry(psOSThreadData);
+ }
+
+ *phThread = psOSThreadData;
+
+ return PVRSRV_OK;
+
+fail_kthread:
+ OSFreeMem(psOSThreadData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread)
+{
+ OSThreadData *psOSThreadData = hThread;
+ int ret;
+
+ /* Let the thread know we are ready for it to end and wait for it. */
+ ret = kthread_stop(psOSThreadData->kthread);
+ if (0 != ret)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "kthread_stop failed(%d)", ret));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ if(psOSThreadData->bIsSupportingThread)
+ {
+ _ThreadListRemoveEntry(psOSThreadData);
+ }
+
+ OSFreeMem(psOSThreadData);
+
+ return PVRSRV_OK;
+}
+
+void OSPanic(void)
+{
+ BUG();
+
+#if defined(__KLOCWORK__)
+ /* Klocworks does not understand that BUG is terminal... */
+ abort();
+#endif
+}
+
+PVRSRV_ERROR OSSetThreadPriority(IMG_HANDLE hThread,
+ IMG_UINT32 nThreadPriority,
+ IMG_UINT32 nThreadWeight)
+{
+ PVR_UNREFERENCED_PARAMETER(hThread);
+ PVR_UNREFERENCED_PARAMETER(nThreadPriority);
+ PVR_UNREFERENCED_PARAMETER(nThreadWeight);
+ /* Default priorities used on this platform */
+
+ return PVRSRV_OK;
+}
+
+void *
+OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr,
+ size_t ui32Bytes,
+ IMG_UINT32 ui32MappingFlags)
+{
+ void __iomem *pvLinAddr;
+
+ if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+ {
+ PVR_ASSERT(!"Found non-cpu cache mode flag when mapping to the cpu");
+ return NULL;
+ }
+
+ if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+ {
+ /*
+ This is required to support DMA physheaps for GPU virtualization.
+ Unfortunately, if a region of kernel managed memory is turned into
+ a DMA buffer, conflicting mappings can come about easily on Linux
+ as the original memory is mapped by the kernel as normal cached
+ memory whilst DMA buffers are mapped mostly as uncached device or
+ cache-coherent device memory. In both cases the system will have
+ two conflicting mappings for the same memory region and will have
+ "undefined behaviour" for most processors notably ARMv6 onwards
+ and some x86 micro-architectures. As a result, perform ioremapping
+ manually for DMA physheap allocations by translating from CPU/VA
+ to BUS/PA thereby preventing the creation of conflicting mappings.
+ */
+ pvLinAddr = (void __iomem *) SysDmaDevPAddrToCpuVAddr(BasePAddr.uiAddr, ui32Bytes);
+ if (pvLinAddr != NULL)
+ {
+ return (void __force *) pvLinAddr;
+ }
+ }
+
+ switch (ui32MappingFlags)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ pvLinAddr = (void __iomem *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ pvLinAddr = (void __iomem *)ioremap_wc(BasePAddr.uiAddr, ui32Bytes);
+#else
+ pvLinAddr = (void __iomem *)ioremap_nocache(BasePAddr.uiAddr, ui32Bytes);
+#endif
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+#if defined(CONFIG_X86) || defined(CONFIG_ARM)
+ pvLinAddr = (void __iomem *)ioremap_cache(BasePAddr.uiAddr, ui32Bytes);
+#else
+ pvLinAddr = (void __iomem *)ioremap(BasePAddr.uiAddr, ui32Bytes);
+#endif
+ break;
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT:
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT:
+ PVR_ASSERT(!"Unexpected cpu cache mode");
+ pvLinAddr = NULL;
+ break;
+ default:
+ PVR_ASSERT(!"Unsupported cpu cache mode");
+ pvLinAddr = NULL;
+ break;
+ }
+
+ return (void __force *) pvLinAddr;
+}
+
+
+IMG_BOOL
+OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32MappingFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Bytes);
+
+ if (ui32MappingFlags & ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK))
+ {
+ PVR_ASSERT(!"Found non-cpu cache mode flag when unmapping from the cpu");
+ return IMG_FALSE;
+ }
+
+ if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+ {
+ if (SysDmaCpuVAddrToDevPAddr(pvLinAddr))
+ {
+ return IMG_TRUE;
+ }
+ }
+
+ iounmap((void __iomem *) pvLinAddr);
+
+ return IMG_TRUE;
+}
+
+#define OS_MAX_TIMERS 8
+
+/* Timer callback strucure used by OSAddTimer */
+typedef struct TIMER_CALLBACK_DATA_TAG
+{
+ IMG_BOOL bInUse;
+ PFN_TIMER_FUNC pfnTimerFunc;
+ void *pvData;
+ struct timer_list sTimer;
+ IMG_UINT32 ui32Delay;
+ IMG_BOOL bActive;
+ struct work_struct sWork;
+}TIMER_CALLBACK_DATA;
+
+static struct workqueue_struct *psTimerWorkQueue;
+
+static TIMER_CALLBACK_DATA sTimers[OS_MAX_TIMERS];
+
+static DEFINE_MUTEX(sTimerStructLock);
+
+static void OSTimerCallbackBody(TIMER_CALLBACK_DATA *psTimerCBData)
+{
+ if (!psTimerCBData->bActive)
+ return;
+
+ /* call timer callback */
+ psTimerCBData->pfnTimerFunc(psTimerCBData->pvData);
+
+ /* reset timer */
+ mod_timer(&psTimerCBData->sTimer, psTimerCBData->ui32Delay + jiffies);
+}
+
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+/*************************************************************************/ /*!
+@Function OSTimerCallbackWrapper
+@Description OS specific timer callback wrapper function
+@Input psTimer Timer list structure
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(struct timer_list *psTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = from_timer(psTimerCBData, psTimer, sTimer);
+#else
+/*************************************************************************/ /*!
+@Function OSTimerCallbackWrapper
+@Description OS specific timer callback wrapper function
+@Input uData Timer callback data
+*/ /**************************************************************************/
+static void OSTimerCallbackWrapper(uintptr_t uData)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = (TIMER_CALLBACK_DATA*)uData;
+#endif
+ int res;
+
+ res = queue_work(psTimerWorkQueue, &psTimerCBData->sWork);
+ if (res == 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "OSTimerCallbackWrapper: work already queued"));
+ }
+}
+
+
+static void OSTimerWorkQueueCallBack(struct work_struct *psWork)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = container_of(psWork, TIMER_CALLBACK_DATA, sWork);
+
+ OSTimerCallbackBody(psTimerCBData);
+}
+
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData;
+ IMG_UINT32 ui32i;
+
+ /* check callback */
+ if(!pfnTimerFunc)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: passed invalid callback"));
+ return NULL;
+ }
+
+ /* Allocate timer callback data structure */
+ mutex_lock(&sTimerStructLock);
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ psTimerCBData = &sTimers[ui32i];
+ if (!psTimerCBData->bInUse)
+ {
+ psTimerCBData->bInUse = IMG_TRUE;
+ break;
+ }
+ }
+ mutex_unlock(&sTimerStructLock);
+ if (ui32i >= OS_MAX_TIMERS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSAddTimer: all timers are in use"));
+ return NULL;
+ }
+
+ psTimerCBData->pfnTimerFunc = pfnTimerFunc;
+ psTimerCBData->pvData = pvData;
+ psTimerCBData->bActive = IMG_FALSE;
+
+ /*
+ HZ = ticks per second
+ ui32MsTimeout = required ms delay
+ ticks = (Hz * ui32MsTimeout) / 1000
+ */
+ psTimerCBData->ui32Delay = ((HZ * ui32MsTimeout) < 1000)
+ ? 1
+ : ((HZ * ui32MsTimeout) / 1000);
+
+ /* initialise object */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0))
+ timer_setup(&psTimerCBData->sTimer, OSTimerCallbackWrapper, 0);
+#else
+ init_timer(&psTimerCBData->sTimer);
+
+ /* setup timer object */
+ psTimerCBData->sTimer.function = (void *)OSTimerCallbackWrapper;
+ psTimerCBData->sTimer.data = (uintptr_t)psTimerCBData;
+#endif
+
+ return (IMG_HANDLE)(uintptr_t)(ui32i + 1);
+}
+
+
+static inline TIMER_CALLBACK_DATA *GetTimerStructure(IMG_HANDLE hTimer)
+{
+ IMG_UINT32 ui32i = (IMG_UINT32)((uintptr_t)hTimer) - 1;
+
+ PVR_ASSERT(ui32i < OS_MAX_TIMERS);
+
+ return &sTimers[ui32i];
+}
+
+PVRSRV_ERROR OSRemoveTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+ /* free timer callback data struct */
+ psTimerCBData->bInUse = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR OSEnableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(!psTimerCBData->bActive);
+
+ /* Start timer arming */
+ psTimerCBData->bActive = IMG_TRUE;
+
+ /* set the expire time */
+ psTimerCBData->sTimer.expires = psTimerCBData->ui32Delay + jiffies;
+
+ /* Add the timer to the list */
+ add_timer(&psTimerCBData->sTimer);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSDisableTimer (IMG_HANDLE hTimer)
+{
+ TIMER_CALLBACK_DATA *psTimerCBData = GetTimerStructure(hTimer);
+
+ PVR_ASSERT(psTimerCBData->bInUse);
+ PVR_ASSERT(psTimerCBData->bActive);
+
+ /* Stop timer from arming */
+ psTimerCBData->bActive = IMG_FALSE;
+ smp_mb();
+
+ flush_workqueue(psTimerWorkQueue);
+
+ /* remove timer */
+ del_timer_sync(&psTimerCBData->sTimer);
+
+ /*
+ * This second flush is to catch the case where the timer ran
+ * before we managed to delete it, in which case, it will have
+ * queued more work for the workqueue. Since the bActive flag
+ * has been cleared, this second flush won't result in the
+ * timer being rearmed.
+ */
+ flush_workqueue(psTimerWorkQueue);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName, IMG_HANDLE *hEventObject)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_UNREFERENCED_PARAMETER(pszName);
+
+ if(hEventObject)
+ {
+ if(LinuxEventObjectListCreate(hEventObject) != PVRSRV_OK)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectCreate: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hEventObject)
+ {
+ LinuxEventObjectListDestroy(hEventObject);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+#define _FREEZABLE IMG_TRUE
+#define _NON_FREEZABLE IMG_FALSE
+
+/*
+ * EventObjectWaitTimeout()
+ */
+static PVRSRV_ERROR EventObjectWaitTimeout(IMG_HANDLE hOSEventKM,
+ IMG_UINT64 uiTimeoutus,
+ IMG_BOOL bHoldBridgeLock)
+{
+ PVRSRV_ERROR eError;
+
+ if(hOSEventKM && uiTimeoutus > 0)
+ {
+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, bHoldBridgeLock, _NON_FREEZABLE);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWait: invalid arguments %p, %lld", hOSEventKM, uiTimeoutus));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_FALSE);
+}
+
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM)
+{
+ return OSEventObjectWaitTimeout(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus)
+{
+ return EventObjectWaitTimeout(hOSEventKM, uiTimeoutus, IMG_TRUE);
+}
+
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM)
+{
+ return OSEventObjectWaitTimeoutAndHoldBridgeLock(hOSEventKM, EVENT_OBJECT_TIMEOUT_US);
+}
+
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM,
+ IMG_UINT64 uiTimeoutus)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+ if (hOSEventKM)
+ {
+ if (uiTimeoutus > 0)
+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, IMG_FALSE,
+ _FREEZABLE);
+ else
+ eError = LinuxEventObjectWaitUntilSignalled(hOSEventKM);
+ }
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+ if (hOSEventKM && uiTimeoutus > 0)
+ {
+ eError = LinuxEventObjectWait(hOSEventKM, uiTimeoutus, IMG_FALSE,
+ _FREEZABLE);
+ }
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectWaitKernel: invalid arguments %p",
+ hOSEventKM));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+ IMG_HANDLE *phOSEvent)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hEventObject)
+ {
+ if(LinuxEventObjectAdd(hEventObject, phOSEvent) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectAdd: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectOpen: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if(hOSEventKM)
+ {
+ if(LinuxEventObjectDelete(hOSEventKM) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "LinuxEventObjectDelete: failed"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectDestroy: hEventObject is not a valid pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject)
+{
+ PVRSRV_ERROR eError;
+
+ if(hEventObject)
+ {
+ eError = LinuxEventObjectSignal(hEventObject);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectSignal: hOSEventKM is not a valid handle"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSCopyToUser(void *pvProcess,
+ void __user *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_to_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess,
+ void *pvDest,
+ const void __user *pvSrc,
+ size_t ui32Bytes)
+{
+ PVR_UNREFERENCED_PARAMETER(pvProcess);
+
+ if(pvr_copy_from_user(pvDest, pvSrc, ui32Bytes)==0)
+ return PVRSRV_OK;
+ else
+ return PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY;
+}
+
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+ *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+ return ui64Divident;
+}
+
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder)
+{
+ *pui32Remainder = do_div(ui64Divident, ui32Divisor);
+
+ return (IMG_UINT32) ui64Divident;
+}
+
+/* One time osfunc initialisation */
+PVRSRV_ERROR PVROSFuncInit(void)
+{
+ {
+ PVR_ASSERT(!psTimerWorkQueue);
+
+ psTimerWorkQueue = create_freezable_workqueue("pvr_timer");
+ if (psTimerWorkQueue == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: couldn't create timer workqueue", __FUNCTION__));
+ return PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD;
+ }
+ }
+
+ {
+ IMG_UINT32 ui32i;
+
+ for (ui32i = 0; ui32i < OS_MAX_TIMERS; ui32i++)
+ {
+ TIMER_CALLBACK_DATA *psTimerCBData = &sTimers[ui32i];
+
+ INIT_WORK(&psTimerCBData->sWork, OSTimerWorkQueueCallBack);
+ }
+ }
+ return PVRSRV_OK;
+}
+
+/*
+ * Osfunc deinitialisation.
+ * Note that PVROSFuncInit may not have been called
+ */
+void PVROSFuncDeInit(void)
+{
+ if (psTimerWorkQueue != NULL)
+ {
+ destroy_workqueue(psTimerWorkQueue);
+ psTimerWorkQueue = NULL;
+ }
+}
+
+void OSDumpStack(void)
+{
+ dump_stack();
+}
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+
+static struct task_struct *gsOwner;
+
+void OSAcquireBridgeLock(void)
+{
+ mutex_lock(&gPVRSRVLock);
+ gsOwner = current;
+}
+
+void OSReleaseBridgeLock(void)
+{
+ gsOwner = NULL;
+ mutex_unlock(&gPVRSRVLock);
+}
+
+struct task_struct *BridgeLockGetOwner(void)
+{
+ return gsOwner;
+}
+
+IMG_BOOL BridgeLockIsLocked(void)
+{
+ return OSLockIsLocked(&gPVRSRVLock);
+}
+
+#endif
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticEntry
+@Description Create a statistic entry in the specified folder.
+@Input pszName String containing the name for the entry.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Input pfnIncMemRefCt Pointer to function that can be used to take a
+ reference on the memory backing the statistic
+ entry.
+@Input pfnDecMemRefCt Pointer to function that can be used to drop a
+ reference on the memory backing the statistic
+ entry.
+@Input pvData OS specific reference that can be used by
+ pfnGetElement.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint,
+ OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+ OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+ void *pvData)
+{
+ return (void *)PVRDebugFSCreateStatisticEntry(pszName, (PPVR_DEBUGFS_DIR_DATA)pvFolder, pfnStatsPrint, pfnIncMemRefCt, pfnDecMemRefCt, pvData);
+} /* OSCreateStatisticEntry */
+
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticEntry
+@Description Removes a statistic entry.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry)
+{
+ PVRDebugFSRemoveStatisticEntry((PPVR_DEBUGFS_DRIVER_STAT)pvEntry);
+} /* OSRemoveStatisticEntry */
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+ return (void *) PVRDebugFSCreateRawStatisticEntry(pszFileName, pvParentDir,
+ pfStatsPrint);
+}
+
+void OSRemoveRawStatisticEntry(void *pvEntry)
+{
+ PVRDebugFSRemoveRawStatisticEntry(pvEntry);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticFolder
+@Description Create a statistic folder to hold statistic entries.
+@Input pszName String containing the name for the folder.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the folder
+ to create the folder in, or NULL for the root.
+@Return Pointer void reference to the folder created, which can be
+ passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder)
+{
+ PPVR_DEBUGFS_DIR_DATA psNewStatFolder = NULL;
+ int iResult;
+
+ iResult = PVRDebugFSCreateEntryDir(pszName, (PPVR_DEBUGFS_DIR_DATA)pvFolder, &psNewStatFolder);
+ return (iResult == 0) ? (void *)psNewStatFolder : NULL;
+} /* OSCreateStatisticFolder */
+
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticFolder
+@Description Removes a statistic folder.
+@Input ppvFolder Reference from OSCreateStatisticFolder() of the
+ folder that should be removed.
+ This needs to be double pointer because it has to
+ be NULLed right after memory is freed to avoid
+ possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder)
+{
+ PVRDebugFSRemoveEntryDir((PPVR_DEBUGFS_DIR_DATA *)ppvFolder);
+} /* OSRemoveStatisticFolder */
+
+
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_CPU_PHYADDR sCpuPAHeapBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bIsLMA)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ IMG_UINT64 uiPFN;
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+
+ PVRSRV_ERROR eError;
+
+ struct mm_struct *psMM = current->mm;
+ struct vm_area_struct *psVMA = NULL;
+ struct address_space *psMapping = NULL;
+ struct page *psPage = NULL;
+
+ IMG_UINT64 uiCPUVirtAddr = 0;
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32PageSize = OSGetPageSize();
+ IMG_BOOL bMixedMap = IMG_FALSE;
+
+ /*
+ * Acquire the lock before manipulating the VMA
+ * In this case only mmap_sem lock would suffice as the pages associated with this VMA
+ * are never meant to be swapped out.
+ *
+ * In the future, in case the pages are marked as swapped, page_table_lock needs
+ * to be acquired in conjunction with this to disable page swapping.
+ */
+
+ /* Find the Virtual Memory Area associated with the user base address */
+ psVMA = find_vma(psMM, (uintptr_t)sCpuVAddrBase);
+ if (NULL == psVMA)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND;
+ return eError;
+ }
+
+ /* Acquire the memory sem */
+ down_write(&psMM->mmap_sem);
+
+ psMapping = psVMA->vm_file->f_mapping;
+
+ /* Set the page offset to the correct value as this is disturbed in MMAP_PMR func */
+ psVMA->vm_pgoff = (psVMA->vm_start >> PAGE_SHIFT);
+
+ /* Delete the entries for the pages that got freed */
+ if (ui32FreePageCount && (pai32FreeIndices != NULL))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32FreeIndices[ui32Loop] * ui32PageSize));
+
+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+#ifndef PVRSRV_UNMAP_ON_SPARSE_CHANGE
+ /*
+ * Still need to map pages in case remap flag is set.
+ * That is not done until the remap case succeeds
+ */
+#endif
+ }
+ eError = PVRSRV_OK;
+ }
+
+ if ((psVMA->vm_flags & VM_MIXEDMAP) || bIsLMA)
+ {
+ psVMA->vm_flags |= VM_MIXEDMAP;
+ bMixedMap = IMG_TRUE;
+ }
+ else
+ {
+ if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+
+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = page_to_pfn_t(psPage);
+
+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+ uiPFN = page_to_pfn(psPage);
+
+ if (!pfn_valid(uiPFN) || (page_count(pfn_to_page(uiPFN)) == 0))
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ {
+ bMixedMap = IMG_TRUE;
+ psVMA->vm_flags |= VM_MIXEDMAP;
+ break;
+ }
+ }
+ }
+ }
+
+ /* Map the pages that got allocated */
+ if (ui32AllocPageCount && (NULL != pai32AllocIndices))
+ {
+ for (ui32Loop = 0; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ int err;
+
+ uiCPUVirtAddr = (uintptr_t)(sCpuVAddrBase + (pai32AllocIndices[ui32Loop] * ui32PageSize));
+ unmap_mapping_range(psMapping, uiCPUVirtAddr, ui32PageSize, 1);
+
+ if (bIsLMA)
+ {
+ phys_addr_t uiAddr = sCpuPAHeapBase.uiAddr +
+ ((IMG_DEV_PHYADDR *)psPageArray)[pai32AllocIndices[ui32Loop]].uiAddr;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(uiAddr, 0);
+ psPage = pfn_t_to_page(sPFN);
+#else
+ uiPFN = uiAddr >> PAGE_SHIFT;
+ psPage = pfn_to_page(uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+ else
+ {
+ psPage = (struct page *)psPageArray[pai32AllocIndices[ui32Loop]];
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = page_to_pfn_t(psPage);
+#else
+ uiPFN = page_to_pfn(psPage);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+
+ if (bMixedMap)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, sPFN);
+#else
+ err = vm_insert_mixed(psVMA, uiCPUVirtAddr, uiPFN);
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ }
+ else
+ {
+ err = vm_insert_page(psVMA, uiCPUVirtAddr, psPage);
+ }
+
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Remap failure error code: %d", err));
+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+ goto eFailed;
+ }
+ }
+ }
+
+ eError = PVRSRV_OK;
+ eFailed:
+ up_write(&psMM->mmap_sem);
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function OSDebugSignalPID
+@Description Sends a SIGTRAP signal to a specific PID in user mode for
+ debugging purposes. The user mode process can register a handler
+ against this signal.
+ This is necessary to support the Rogue debugger. If the Rogue
+ debugger is not used then this function may be implemented as
+ a stub.
+@Input ui32PID The PID for the signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID)
+{
+ int err;
+ struct pid *psPID;
+
+ psPID = find_vpid(ui32PID);
+ if (psPID == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get PID struct.", __func__));
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+
+ err = kill_pid(psPID, SIGTRAP, 0);
+ if (err != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Signal Failure %d", __func__, err));
+ return PVRSRV_ERROR_SIGNAL_FAILED;
+ }
+
+ return PVRSRV_OK;
+}
+
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVR_DUMPDEBUG_LOG("OS kernel info: %s %s %s %s",
+ utsname()->sysname,
+ utsname()->release,
+ utsname()->version,
+ utsname()->machine);
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/osfunc.h b/drivers/gpu/drm/img-rogue/1.10/osfunc.h
new file mode 100644
index 00000000000000..402fe1f1d0a463
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osfunc.h
@@ -0,0 +1,1829 @@
+/**************************************************************************/ /*!
+@File
+@Title OS functions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS specific API definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifdef DEBUG_RELEASE_BUILD
+#pragma optimize( "", off )
+#define DEBUG 1
+#endif
+
+#ifndef __OSFUNC_H__
+#define __OSFUNC_H__
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include "kernel_nospec.h"
+#if !defined(NO_HARDWARE)
+#include <asm/io.h>
+#endif
+#endif
+
+#if defined(__QNXNTO__)
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <stdio.h>
+#include <string.h>
+#endif
+
+#include "img_types.h"
+#include "device.h"
+#include "pvrsrv_device.h"
+
+/******************************************************************************
+ * Static defines
+ *****************************************************************************/
+#define KERNEL_ID 0xffffffffL
+#define ISR_ID 0xfffffffdL
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSConfineArrayIndexNoSpeculation(index, size) array_index_nospec((index), (size))
+#elif defined(__QNXNTO__)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#elif defined(INTEGRITY_OS)
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#else
+/*************************************************************************/ /*!
+@Function OSConfineArrayIndexNoSpeculation
+@Description This macro aims to avoid code exposure to Cache Timing
+ Side-Channel Mechanisms which rely on speculative code
+ execution (Variant 1). It does so by ensuring a value to be
+ used as an array index will be set to zero if outside of the
+ bounds of the array, meaning any speculative execution of code
+ which uses this suitably adjusted index value will not then
+ attempt to load data from memory outside of the array bounds.
+ Code calling this macro must still first verify that the
+ original unmodified index value is within the bounds of the
+ array, and should then only use the modified value returned
+ by this function when accessing the array itself.
+ NB. If no OS-specific implementation of this macro is
+ defined, the original index is returned unmodified and no
+ protection against the potential exploit is provided.
+@Input index The original array index value that would be used to
+ access the array.
+@Input size The number of elements in the array being accessed.
+@Return The value to use for the array index, modified so that it
+ remains within array bounds.
+*/ /**************************************************************************/
+#define OSConfineArrayIndexNoSpeculation(index, size) (index)
+#if !defined(DOXYGEN)
+#define PVRSRV_MISSING_NO_SPEC_IMPL
+#endif
+#endif
+
+/*************************************************************************/ /*!
+@Function OSClockns64
+@Description This function returns the number of ticks since system boot
+ expressed in nanoseconds. Unlike OSClockns, OSClockns64 has
+ a near 64-bit range.
+@Return The 64-bit clock value, in nanoseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockns64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockus64
+@Description This function returns the number of ticks since system boot
+ expressed in microseconds. Unlike OSClockus, OSClockus64 has
+ a near 64-bit range.
+@Return The 64-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT64 OSClockus64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockus
+@Description This function returns the number of ticks since system boot
+ in microseconds.
+@Return The 32-bit clock value, in microseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockus(void);
+
+/*************************************************************************/ /*!
+@Function OSClockms
+@Description This function returns the number of ticks since system boot
+ in milliseconds.
+@Return The 32-bit clock value, in milliseconds.
+*/ /**************************************************************************/
+IMG_UINT32 OSClockms(void);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicns64
+@Description This function returns a clock value based on the system
+ monotonic clock.
+@Output pui64Time The 64-bit clock value, in nanoseconds.
+@Return Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicns64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicus64
+@Description This function returns a clock value based on the system
+ monotonic clock.
+@Output pui64Time The 64-bit clock value, in microseconds.
+@Return Error Code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSClockMonotonicus64(IMG_UINT64 *pui64Time);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicRawns64
+@Description This function returns a clock value based on the system
+ monotonic raw clock.
+@Return 64bit ns timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawns64(void);
+
+/*************************************************************************/ /*!
+@Function OSClockMonotonicRawns64
+@Description This function returns a clock value based on the system
+ monotonic raw clock.
+@Return 64bit us timestamp
+*/ /**************************************************************************/
+IMG_UINT64 OSClockMonotonicRawus64(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageSize
+@Description This function returns the page size.
+ If the OS is not using memory mappings it should return a
+ default value of 4096.
+@Return The size of a page, in bytes.
+*/ /**************************************************************************/
+size_t OSGetPageSize(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageShift
+@Description This function returns the page size expressed as a power
+ of two. A number of pages, left-shifted by this value, gives
+ the equivalent size in bytes.
+ If the OS is not using memory mappings it should return a
+ default value of 12.
+@Return The page size expressed as a power of two.
+*/ /**************************************************************************/
+size_t OSGetPageShift(void);
+
+/*************************************************************************/ /*!
+@Function OSGetPageMask
+@Description This function returns a bitmask that may be applied to an
+ address to mask off the least-significant bits so as to
+ leave the start address of the page containing that address.
+@Return The page mask.
+*/ /**************************************************************************/
+size_t OSGetPageMask(void);
+
+/*************************************************************************/ /*!
+@Function OSGetOrder
+@Description This function returns the order of power of two for a given
+ size. Eg. for a uSize of 4096 bytes the function would
+ return 12 (4096 = 2^12).
+@Input uSize The size in bytes.
+@Return The order of power of two.
+*/ /**************************************************************************/
+size_t OSGetOrder(size_t uSize);
+
+/*************************************************************************/ /*!
+@Function OSGetRAMSize
+@Description This function returns the total amount of GPU-addressable
+ memory provided by the system. In other words, after loading
+ the driver this would be the largest allocation an
+ application would reasonably expect to be able to make.
+ Note that this is function is not expected to return the
+ current available memory but the amount which would be
+ available on startup.
+@Return Total GPU-addressable memory size, in bytes.
+*/ /**************************************************************************/
+IMG_UINT64 OSGetRAMSize(void);
+
+typedef void (*PFN_MISR)(void *pvData);
+typedef void (*PFN_THREAD)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function OSChangeSparseMemCPUAddrMap
+@Description This function changes the CPU mapping of the underlying
+ sparse allocation. It is used by a PMR 'factory'
+ implementation if that factory supports sparse
+ allocations.
+@Input psPageArray array representing the pages in the
+ sparse allocation
+@Input sCpuVAddrBase the virtual base address of the sparse
+ allocation ('first' page)
+@Input sCpuPAHeapBase the physical address of the virtual
+ base address 'sCpuVAddrBase'
+@Input ui32AllocPageCount the number of pages referenced in
+ 'pai32AllocIndices'
+@Input pai32AllocIndices list of indices of pages within
+ 'psPageArray' that we now want to
+ allocate and map
+@Input ui32FreePageCount the number of pages referenced in
+ 'pai32FreeIndices'
+@Input pai32FreeIndices list of indices of pages within
+ 'psPageArray' we now want to
+ unmap and free
+@Input bIsLMA flag indicating if the sparse allocation
+ is from LMA or UMA memory
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSChangeSparseMemCPUAddrMap(void **psPageArray,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_CPU_PHYADDR sCpuPAHeapBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bIsLMA);
+
+/*************************************************************************/ /*!
+@Function OSInstallMISR
+@Description Installs a Mid-level Interrupt Service Routine (MISR)
+ which handles higher-level processing of interrupts from
+ the device (GPU).
+ An MISR runs outside of interrupt context, and so may be
+ descheduled. This means it can contain code that would
+ not be permitted in the LISR.
+ An MISR is invoked when OSScheduleMISR() is called. This
+ call should be made by installed LISR once it has completed
+ its interrupt processing.
+ Multiple MISRs may be installed by the driver to handle
+ different causes of interrupt.
+@Input pfnMISR pointer to the function to be installed
+ as the MISR
+@Input hData private data provided to the MISR
+@Output hMISRData handle to the installed MISR (to be used
+ for a subsequent uninstall)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSInstallMISR(IMG_HANDLE *hMISRData,
+ PFN_MISR pfnMISR,
+ void *hData);
+
+/*************************************************************************/ /*!
+@Function OSUninstallMISR
+@Description Uninstalls a Mid-level Interrupt Service Routine (MISR).
+@Input hMISRData handle to the installed MISR
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSUninstallMISR(IMG_HANDLE hMISRData);
+
+/*************************************************************************/ /*!
+@Function OSScheduleMISR
+@Description Schedules a Mid-level Interrupt Service Routine (MISR) to be
+ executed. An MISR should be executed outside of interrupt
+ context, for example in a work queue.
+@Input hMISRData handle to the installed MISR
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSScheduleMISR(IMG_HANDLE hMISRData);
+
+
+/*************************************************************************/ /*!
+@Function OSThreadCreate
+@Description Creates a kernel thread and starts it running. The caller
+ is responsible for informing the thread that it must finish
+ and return from the pfnThread function. It is not possible
+ to kill or terminate it. The new thread runs with the default
+ priority provided by the Operating System.
+ Note: Kernel threads are freezable which means that they
+ can be frozen by the kernel on for example driver suspend.
+ Because of that only OSEventObjectWaitKernel() function should
+ be used to put kernel threads in waiting state.
+@Output phThread Returned handle to the thread.
+@Input pszThreadName Name to assign to the thread.
+@Input pfnThread Thread entry point function.
+@Input pfnDebugDumpCB Used to dump info of the created thread
+@Input bIsSupportingThread Set, if summary of this thread needs to
+ be dumped in debug_dump
+@Input hData Thread specific data pointer for pfnThread().
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+
+PVRSRV_ERROR OSThreadCreate(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ IMG_HANDLE pfnDebugDumpCB,
+ IMG_BOOL bIsSupportingThread,
+ void *hData);
+
+/*! Available priority levels for the creation of a new Kernel Thread. */
+typedef enum priority_levels
+{
+ OS_THREAD_HIGHEST_PRIORITY = 0,
+ OS_THREAD_HIGH_PRIORITY,
+ OS_THREAD_NORMAL_PRIORITY,
+ OS_THREAD_LOW_PRIORITY,
+ OS_THREAD_LOWEST_PRIORITY,
+ OS_THREAD_NOSET_PRIORITY, /* With this option the priority level is the default for the given OS */
+ OS_THREAD_LAST_PRIORITY /* This must be always the last entry */
+} OS_THREAD_LEVEL;
+
+/*************************************************************************/ /*!
+@Function OSThreadCreatePriority
+@Description As OSThreadCreate, this function creates a kernel thread and
+ starts it running. The difference is that with this function
+ is possible to specify the priority used to schedule the new
+ thread.
+
+@Output phThread Returned handle to the thread.
+@Input pszThreadName Name to assign to the thread.
+@Input pfnThread Thread entry point function.
+@Input pfnDebugDumpCB Used to dump info of the created thread
+@Input bIsSupportingThread Set, if summary of this thread needs to
+ be dumped in debug_dump
+@Input hData Thread specific data pointer for pfnThread().
+@Input eThreadPriority Priority level to assign to the new thread.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadCreatePriority(IMG_HANDLE *phThread,
+ IMG_CHAR *pszThreadName,
+ PFN_THREAD pfnThread,
+ IMG_HANDLE pfnDebugDumpCB,
+ IMG_BOOL bIsSupportingThread,
+ void *hData,
+ OS_THREAD_LEVEL eThreadPriority);
+
+/*************************************************************************/ /*!
+@Function OSThreadDestroy
+@Description Waits for the thread to end and then destroys the thread
+ handle memory. This function will block and wait for the
+ thread to finish successfully, thereby providing a sync point
+ for the thread completing its work. No attempt is made to kill
+ or otherwise terminate the thread.
+@Input hThread The thread handle returned by OSThreadCreate().
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSThreadDestroy(IMG_HANDLE hThread);
+
+/*************************************************************************/ /*!
+@Function OSSetThreadPriority
+@Description Set the priority and weight of a thread
+@Input hThread The thread handle.
+@Input nThreadPriority The integer value of the thread priority
+@Input nThreadWeight The integer value of the thread weight
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSSetThreadPriority( IMG_HANDLE hThread,
+ IMG_UINT32 nThreadPriority,
+ IMG_UINT32 nThreadWeight);
+
+#if defined(__arm64__) || defined(__aarch64__) || defined (PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)
+
+/* Workarounds for assumptions made that memory will not be mapped uncached
+ * in kernel or user address spaces on arm64 platforms (or other testing).
+ */
+
+/**************************************************************************/ /*!
+@Function DeviceMemSet
+@Description Set memory, whose mapping may be uncached, to a given value.
+ On some architectures, additional processing may be needed
+ if the mapping is uncached. In such cases, OSDeviceMemSet()
+ is defined as a call to this function.
+@Input pvDest void pointer to the memory to be set
+@Input ui8Value byte containing the value to be set
+@Input ui32Size the number of bytes to be set to the given value
+@Return None
+ */ /**************************************************************************/
+void DeviceMemSet(void *pvDest, IMG_UINT8 ui8Value, size_t ui32Size);
+
+/**************************************************************************/ /*!
+@Function DeviceMemCopy
+@Description Copy values from one area of memory, to another, when one
+ or both mappings may be uncached.
+ On some architectures, additional processing may be needed
+ if mappings are uncached. In such cases, OSDeviceMemCopy()
+ is defined as a call to this function.
+@Input pvDst void pointer to the destination memory
+@Input pvSrc void pointer to the source memory
+@Input ui32Size the number of bytes to be copied
+@Return None
+ */ /**************************************************************************/
+void DeviceMemCopy(void *pvDst, const void *pvSrc, size_t ui32Size);
+
+#define OSDeviceMemSet(a,b,c) DeviceMemSet((a), (b), (c))
+#define OSDeviceMemCopy(a,b,c) DeviceMemCopy((a), (b), (c))
+#define OSCachedMemSet(a,b,c) memset((a), (b), (c))
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#else /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/* Everything else */
+
+/**************************************************************************/ /*!
+@Function OSDeviceMemSet
+@Description Set memory, whose mapping may be uncached, to a given value.
+ On some architectures, additional processing may be needed
+ if the mapping is uncached.
+@Input a void pointer to the memory to be set
+@Input b byte containing the value to be set
+@Input c the number of bytes to be set to the given value
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSDeviceMemCopy
+@Description Copy values from one area of memory, to another, when one
+ or both mappings may be uncached.
+ On some architectures, additional processing may be needed
+ if mappings are uncached.
+@Input a void pointer to the destination memory
+@Input b void pointer to the source memory
+@Input c the number of bytes to be copied
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSDeviceMemCopy(a,b,c) memcpy((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSCachedMemSet
+@Description Set memory, where the mapping is known to be cached, to a
+ given value. This function exists to allow an optimal memset
+ to be performed when memory is known to be cached.
+@Input a void pointer to the memory to be set
+@Input b byte containing the value to be set
+@Input c the number of bytes to be set to the given value
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemSet(a,b,c) memset((a), (b), (c))
+
+/**************************************************************************/ /*!
+@Function OSCachedMemCopy
+@Description Copy values from one area of memory, to another, when both
+ mappings are known to be cached.
+ This function exists to allow an optimal memcpy to be
+ performed when memory is known to be cached.
+@Input a void pointer to the destination memory
+@Input b void pointer to the source memory
+@Input c the number of bytes to be copied
+@Return Pointer to the destination memory.
+ */ /**************************************************************************/
+#define OSCachedMemCopy(a,b,c) memcpy((a), (b), (c))
+
+#endif /* !(defined(__arm64__) || defined(__aarch64__) || defined(PVRSRV_DEVMEM_TEST_SAFE_MEMSETCPY)) */
+
+/**************************************************************************/ /*!
+@Function OSMapPhysToLin
+@Description Maps physical memory into a linear address range.
+@Input BasePAddr physical CPU address
+@Input ui32Bytes number of bytes to be mapped
+@Input ui32Flags flags denoting the caching mode to be employed
+ for the mapping (uncached/write-combined,
+ cached coherent or cached incoherent).
+ See pvrsrv_memallocflags.h for full flag bit
+ definitions.
+@Return Pointer to the new mapping if successful, NULL otherwise.
+ */ /**************************************************************************/
+void *OSMapPhysToLin(IMG_CPU_PHYADDR BasePAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function OSUnMapPhysToLin
+@Description Unmaps physical memory previously mapped by OSMapPhysToLin().
+@Input pvLinAddr the linear mapping to be unmapped
+@Input ui32Bytes number of bytes to be unmapped
+@Input ui32Flags flags denoting the caching mode that was employed
+ for the original mapping.
+@Return IMG_TRUE if unmapping was successful, IMG_FALSE otherwise.
+ */ /**************************************************************************/
+IMG_BOOL OSUnMapPhysToLin(void *pvLinAddr, size_t ui32Bytes, IMG_UINT32 ui32Flags);
+
+/**************************************************************************/ /*!
+@Function OSCPUOperation
+@Description Perform the specified cache operation on the CPU.
+@Input eCacheOp the type of cache operation to be performed
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP eCacheOp);
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheFlushRangeKM
+@Description Clean and invalidate the CPU cache for the specified
+ address range.
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ flushed
+@Input pvVirtEnd virtual end address of the range to be
+ flushed
+@Input sCPUPhysStart physical start address of the range to be
+ flushed
+@Input sCPUPhysEnd physical end address of the range to be
+ flushed
+@Return None
+ */ /**************************************************************************/
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheCleanRangeKM
+@Description Clean the CPU cache for the specified address range.
+ This writes out the contents of the cache and clears the
+ 'dirty' bit (which indicates the physical memory is
+ consistent with the cache contents).
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ cleaned
+@Input pvVirtEnd virtual end address of the range to be
+ cleaned
+@Input sCPUPhysStart physical start address of the range to be
+ cleaned
+@Input sCPUPhysEnd physical end address of the range to be
+ cleaned
+@Return None
+ */ /**************************************************************************/
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheInvalidateRangeKM
+@Description Invalidate the CPU cache for the specified address range.
+ The cache must reload data from those addresses if they
+ are accessed.
+@Input psDevNode device on which the allocation was made
+@Input pvVirtStart virtual start address of the range to be
+ invalidated
+@Input pvVirtEnd virtual end address of the range to be
+ invalidated
+@Input sCPUPhysStart physical start address of the range to be
+ invalidated
+@Input sCPUPhysEnd physical end address of the range to be
+ invalidated
+@Return None
+ */ /**************************************************************************/
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd);
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheOpAddressType
+@Description Returns the address type (i.e. virtual/physical/both) that OS
+ uses to perform cache maintenance on the CPU. This is used
+ to infer whether the virtual or physical address supplied to
+ the OSCPUCacheXXXRangeKM functions can be omitted when called.
+@Return PVRSRV_CACHE_OP_ADDR_TYPE
+ */ /**************************************************************************/
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void);
+
+/*!
+ ******************************************************************************
+ * Cache attribute size type
+ *****************************************************************************/
+typedef enum _IMG_DCACHE_ATTRIBUTE_
+{
+ PVR_DCACHE_LINE_SIZE = 0, /*!< The cache line size */
+ PVR_DCACHE_ATTRIBUTE_COUNT /*!< The number of attributes (must be last) */
+} IMG_DCACHE_ATTRIBUTE;
+
+/**************************************************************************/ /*!
+@Function OSCPUCacheAttributeSize
+@Description Returns the size of a given cache attribute.
+ Typically this function is used to return the cache line
+ size, but may be extended to return the size of other
+ cache attributes.
+@Input eCacheAttribute the cache attribute whose size should
+ be returned.
+@Return The size of the specified cache attribute, in bytes.
+ */ /**************************************************************************/
+IMG_UINT32 OSCPUCacheAttributeSize(IMG_DCACHE_ATTRIBUTE eCacheAttribute);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessID
+@Description Returns ID of current process (thread group)
+@Return ID of current process
+*****************************************************************************/
+IMG_PID OSGetCurrentProcessID(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessName
+@Description Gets the name of current process
+@Return Process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentProcessName(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentProcessVASpaceSize
+@Description Returns the CPU virtual address space size of current process
+@Return Process VA space size
+*/ /**************************************************************************/
+IMG_UINT64 OSGetCurrentProcessVASpaceSize(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentThreadID
+@Description Returns ID for current thread
+@Return ID of current thread
+*****************************************************************************/
+uintptr_t OSGetCurrentThreadID(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientProcessIDKM
+@Description Returns ID of current client process (thread group) which
+ has made a bridge call into the server.
+ For some operating systems, this may simply be the current
+ process id. For others, it may be that a dedicated thread
+ is used to handle the processing of bridge calls and that
+ some additional processing is required to obtain the ID of
+ the client process making the bridge call.
+@Return ID of current client process
+*****************************************************************************/
+IMG_PID OSGetCurrentClientProcessIDKM(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientProcessNameKM
+@Description Gets the name of current client process
+@Return Client process name
+*****************************************************************************/
+IMG_CHAR *OSGetCurrentClientProcessNameKM(void);
+
+/*************************************************************************/ /*!
+@Function OSGetCurrentClientThreadIDKM
+@Description Returns ID for current client thread
+ For some operating systems, this may simply be the current
+ thread id. For others, it may be that a dedicated thread
+ is used to handle the processing of bridge calls and that
+ some additional processing is require to obtain the ID of
+ the client thread making the bridge call.
+@Return ID of current client thread
+*****************************************************************************/
+uintptr_t OSGetCurrentClientThreadIDKM(void);
+
+/**************************************************************************/ /*!
+@Function OSMemCmp
+@Description Compares two blocks of memory for equality.
+@Input pvBufA Pointer to the first block of memory
+@Input pvBufB Pointer to the second block of memory
+@Input uiLen The number of bytes to be compared
+@Return Value < 0 if pvBufA is less than pvBufB.
+ Value > 0 if pvBufB is less than pvBufA.
+ Value = 0 if pvBufA is equal to pvBufB.
+*****************************************************************************/
+IMG_INT OSMemCmp(void *pvBufA, void *pvBufB, size_t uiLen);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesAlloc
+@Description Allocates a number of contiguous physical pages.
+ If allocations made by this function are CPU cached then
+ OSPhyContigPagesClean has to be implemented to write the
+ cached data to memory.
+@Input psDevNode the device for which the allocation is
+ required
+@Input uiSize the size of the required allocation (in bytes)
+@Output psMemHandle a returned handle to be used to refer to this
+ allocation
+@Output psDevPAddr the physical address of the allocation
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesFree
+@Description Frees a previous allocation of contiguous physical pages
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be freed
+@Return None.
+*****************************************************************************/
+void OSPhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesMap
+@Description Maps the specified allocation of contiguous physical pages
+ to a kernel virtual address
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be mapped
+@Input uiSize the size of the allocation (in bytes)
+@Input psDevPAddr the physical address of the allocation
+@Output pvPtr the virtual kernel address to which the
+ allocation is now mapped
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesUnmap
+@Description Unmaps the kernel mapping for the specified allocation of
+ contiguous physical pages
+@Input psDevNode the device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be unmapped
+@Input pvPtr the virtual kernel address to which the
+ allocation is currently mapped
+@Return None.
+*****************************************************************************/
+void OSPhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle, void *pvPtr);
+
+/*************************************************************************/ /*!
+@Function OSPhyContigPagesClean
+@Description Write the content of the specified allocation from CPU cache to
+ memory from (start + uiOffset) to (start + uiOffset + uiLength)
+ It is expected to be implemented as a cache clean operation but
+ it is allowed to fall back to a cache clean + invalidate
+ (i.e. flush).
+ If allocations returned by OSPhyContigPagesAlloc are always
+ uncached this can be implemented as nop.
+@Input psDevNode device on which the allocation was made
+@Input psMemHandle the handle of the allocation to be flushed
+@Input uiOffset the offset in bytes from the start of the
+ allocation from where to start flushing
+@Input uiLength the amount to flush from the offset in bytes
+@Return PVRSRV_OK on success, a failure code otherwise.
+*****************************************************************************/
+PVRSRV_ERROR OSPhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+
+/**************************************************************************/ /*!
+@Function OSInitEnvData
+@Description Called to initialise any environment-specific data. This
+ could include initialising the bridge calling infrastructure
+ or device memory management infrastructure.
+@Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function OSDeInitEnvData
+@Description The counterpart to OSInitEnvData(). Called to free any
+ resources which may have been allocated by OSInitEnvData().
+@Return None.
+ */ /**************************************************************************/
+void OSDeInitEnvData(void);
+
+/**************************************************************************/ /*!
+@Function OSSScanf
+@Description OS function to support the standard C sscanf() function.
+ */ /**************************************************************************/
+IMG_UINT32 OSVSScanf(IMG_CHAR *pStr, const IMG_CHAR *pszFormat, ...);
+
+/**************************************************************************/ /*!
+@Function OSStringNCopy
+@Description OS function to support the standard C strncpy() function.
+ */ /**************************************************************************/
+IMG_CHAR* OSStringNCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function OSStringLCopy
+@Description OS function to support the BSD C strlcpy() function.
+ */ /**************************************************************************/
+size_t OSStringLCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc, size_t uSize);
+
+/**************************************************************************/ /*!
+@Function OSSNPrintf
+@Description OS function to support the standard C snprintf() function.
+ */ /**************************************************************************/
+IMG_INT32 OSSNPrintf(IMG_CHAR *pStr, size_t ui32Size, const IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function OSStringLength
+@Description OS function to support the standard C strlen() function.
+ */ /**************************************************************************/
+size_t OSStringLength(const IMG_CHAR *pStr);
+
+/**************************************************************************/ /*!
+@Function OSStringNLength
+@Description Return the length of a string, excluding the terminating null
+ byte ('\0'), but return at most 'uiCount' bytes. Only the first
+ 'uiCount' bytes of 'pStr' are interrogated.
+@Input pStr pointer to the string
+@Input uiCount the maximum length to return
+@Return Length of the string if less than 'uiCount' bytes, otherwise
+ 'uiCount'.
+ */ /**************************************************************************/
+size_t OSStringNLength(const IMG_CHAR *pStr, size_t uiCount);
+
+/**************************************************************************/ /*!
+@Function OSStringCompare
+@Description OS function to support the standard C strcmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2);
+
+/**************************************************************************/ /*!
+@Function OSStringNCompare
+@Description OS function to support the standard C strncmp() function.
+ */ /**************************************************************************/
+IMG_INT32 OSStringNCompare(const IMG_CHAR *pStr1, const IMG_CHAR *pStr2,
+ size_t uiSize);
+
+/**************************************************************************/ /*!
+@Function OSStringToUINT32
+@Description Changes string to IMG_UINT32.
+ */ /**************************************************************************/
+PVRSRV_ERROR OSStringToUINT32(const IMG_CHAR *pStr, IMG_UINT32 ui32Base,
+ IMG_UINT32 *ui32Result);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectCreate
+@Description Create an event object.
+@Input pszName name to assign to the new event object.
+@Output EventObject the created event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectCreate(const IMG_CHAR *pszName,
+ IMG_HANDLE *EventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectDestroy
+@Description Destroy an event object.
+@Input hEventObject the event object to destroy.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectDestroy(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectSignal
+@Description Signal an event object. Any thread waiting on that event
+ object will be woken.
+@Input hEventObject the event object to signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectSignal(IMG_HANDLE hEventObject);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWait
+@Description Wait for an event object to signal. The function is passed
+ an OS event object handle (which allows the OS to have the
+ calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after a default timeout
+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+ Note: If use of the global bridge lock is supported (if the
+ DDK has been built with PVRSRV_USE_BRIDGE_LOCK defined), the
+ global bridge lock should be released while waiting for the
+ event object to signal (if held by the current thread).
+ The following logic should be implemented in the OS
+ implementation:
+ ...
+ bReleasePVRLock = (!bHoldBridgeLock &&
+ BridgeLockIsLocked() &&
+ current == BridgeLockGetOwner());
+ if (bReleasePVRLock == IMG_TRUE) OSReleaseBridgeLock();
+ ...
+ / * sleep & reschedule - wait for signal * /
+ ...
+ / * if lock was previously held, re-acquire it * /
+ if (bReleasePVRLock == IMG_TRUE) OSAcquireBridgeLock();
+ ...
+
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWait(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitKernel
+@Description Wait for an event object to signal. The function is passed
+ an OS event object handle (which allows the OS to have the
+ calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after a default timeout
+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+
+ Note: This function should be used only by kernel thread.
+ This is because all kernel threads are freezable and
+ this function allows the kernel to freeze the threads
+ when waiting.
+
+ See OSEventObjectWait() for more details.
+
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+#if defined(LINUX) && defined(__KERNEL__)
+PVRSRV_ERROR OSEventObjectWaitKernel(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+#else
+#define OSEventObjectWaitKernel OSEventObjectWaitTimeout
+#endif
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitTimeout
+@Description Wait for an event object to signal or timeout. The function
+ is passed an OS event object handle (which allows the OS to
+ have the calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after the specified
+ timeout period (passed in 'uiTimeoutus'), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ NB. If use of the global bridge lock is supported (if
+ PVRSRV_USE_BRIDGE_LOCK is defined) it should be released while
+ waiting for the event object to signal (if held by the current
+ thread).
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Input uiTimeoutus the timeout period (in usecs)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeout(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitAndHoldBridgeLock
+@Description Wait for an event object to signal. The function is passed
+ an OS event object handle (which allows the OS to have the
+ calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after a default timeout
+ period (defined in EVENT_OBJECT_TIMEOUT_MS), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ If use of the global bridge lock is supported (if
+ PVRSRV_USE_BRIDGE_LOCK is defined), it will be held while
+ waiting for the event object to signal (this will prevent
+ other bridge calls from being serviced during this time).
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitAndHoldBridgeLock(IMG_HANDLE hOSEventKM);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectWaitTimeoutAndHoldBridgeLock
+@Description Wait for an event object to signal or timeout. The function
+ is passed an OS event object handle (which allows the OS to
+ have the calling thread wait on the associated event object).
+ The calling thread will be rescheduled when the associated
+ event object signals.
+ If the event object has not signalled after the specified
+ timeout period (passed in 'uiTimeoutus'), the function
+ will return with the result code PVRSRV_ERROR_TIMEOUT.
+ If use of the global bridge lock is supported (if
+ PVRSRV_USE_BRIDGE_LOCK is defined) it will be held while
+ waiting for the event object to signal (this will prevent
+ other bridge calls from being serviced during this time).
+ See OSEventObjectWait() for details.
+@Input hOSEventKM the OS event object handle associated with
+ the event object.
+@Input uiTimeoutus the timeout period (in usecs)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectWaitTimeoutAndHoldBridgeLock(IMG_HANDLE hOSEventKM, IMG_UINT64 uiTimeoutus);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectOpen
+@Description Open an OS handle on the specified event object.
+ This OS handle may then be used to make a thread wait for
+ that event object to signal.
+@Input hEventObject Event object handle.
+@Output phOSEvent OS handle to the returned event object.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectOpen(IMG_HANDLE hEventObject,
+ IMG_HANDLE *phOSEvent);
+
+/*************************************************************************/ /*!
+@Function OSEventObjectClose
+@Description Close an OS handle previously opened for an event object.
+@Input hOSEventKM OS event object handle to close.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEventObjectClose(IMG_HANDLE hOSEventKM);
+
+/**************************************************************************/ /*!
+@Function OSStringCopy
+@Description OS function to support the standard C strcpy() function.
+ */ /**************************************************************************/
+/* Avoid macros so we don't evaluate pszSrc twice */
+static INLINE IMG_CHAR *OSStringCopy(IMG_CHAR *pszDest, const IMG_CHAR *pszSrc)
+{
+ return OSStringNCopy(pszDest, pszSrc, OSStringLength(pszSrc) + 1);
+}
+
+/*************************************************************************/ /*!
+@Function OSWaitus
+@Description Implements a busy wait of the specified number of microseconds.
+ This function does NOT release thread quanta.
+@Input ui32Timeus The duration of the wait period (in us)
+@Return None.
+*/ /**************************************************************************/
+void OSWaitus(IMG_UINT32 ui32Timeus);
+
+/*************************************************************************/ /*!
+@Function OSSleepms
+@Description Implements a sleep of the specified number of milliseconds.
+ This function may allow pre-emption, meaning the thread
+ may potentially not be rescheduled for a longer period.
+@Input ui32Timems The duration of the sleep (in ms)
+@Return None.
+*/ /**************************************************************************/
+void OSSleepms(IMG_UINT32 ui32Timems);
+
+/*************************************************************************/ /*!
+@Function OSReleaseThreadQuanta
+@Description Relinquishes the current thread's execution time-slice,
+ permitting the OS scheduler to schedule another thread.
+@Return None.
+*/ /**************************************************************************/
+void OSReleaseThreadQuanta(void);
+
+
+/*************************************************************************/ /*!
+*/ /**************************************************************************/
+
+/* The access method is dependent on the location of the physical memory that
+ * makes up the PhyHeaps defined for the system and the CPU architecture. These
+ * macros may change in future to accommodate different access requirements.
+ */
+#define OSReadDeviceMem32(addr) (*((volatile IMG_UINT32 __force *)(addr)))
+#define OSWriteDeviceMem32(addr, val) (*((volatile IMG_UINT32 __force *)(addr)) = (IMG_UINT32)(val))
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(NO_HARDWARE)
+ #define OSReadHWReg8(addr, off) (IMG_UINT8)readb((IMG_BYTE __iomem *)(addr) + (off))
+ #define OSReadHWReg16(addr, off) (IMG_UINT16)readw((IMG_BYTE __iomem *)(addr) + (off))
+ #define OSReadHWReg32(addr, off) (IMG_UINT32)readl((IMG_BYTE __iomem *)(addr) + (off))
+
+ /* Little endian support only */
+ #define OSReadHWReg64(addr, off) \
+ ({ \
+ __typeof__(addr) _addr = addr; \
+ __typeof__(off) _off = off; \
+ (IMG_UINT64) \
+ ( \
+ ( (IMG_UINT64)(readl((IMG_BYTE __iomem *)(_addr) + (_off) + 4)) << 32) \
+ | readl((IMG_BYTE __iomem *)(_addr) + (_off)) \
+ ); \
+ })
+
+ #define OSWriteHWReg8(addr, off, val) writeb((IMG_UINT8)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ #define OSWriteHWReg16(addr, off, val) writew((IMG_UINT16)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ #define OSWriteHWReg32(addr, off, val) writel((IMG_UINT32)(val), (IMG_BYTE __iomem *)(addr) + (off))
+ /* Little endian support only */
+ #define OSWriteHWReg64(addr, off, val) do \
+ { \
+ __typeof__(addr) _addr = addr; \
+ __typeof__(off) _off = off; \
+ __typeof__(val) _val = val; \
+ writel((IMG_UINT32)((_val) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off)); \
+ writel((IMG_UINT32)(((IMG_UINT64)(_val) >> 32) & 0xffffffff), (IMG_BYTE __iomem *)(_addr) + (_off) + 4); \
+ } while (0)
+
+
+#elif defined(NO_HARDWARE)
+ /* FIXME: OSReadHWReg should not exist in no hardware builds */
+ #define OSReadHWReg8(addr, off) (0x4eU)
+ #define OSReadHWReg16(addr, off) (0x3a4eU)
+ #define OSReadHWReg32(addr, off) (0x30f73a4eU)
+ #define OSReadHWReg64(addr, off) ((IMG_UINT64)0x5b376c9d30f73a4eU)
+
+ #define OSWriteHWReg8(addr, off, val)
+ #define OSWriteHWReg16(addr, off, val)
+ #define OSWriteHWReg32(addr, off, val)
+ #define OSWriteHWReg64(addr, off, val)
+
+#else
+/*************************************************************************/ /*!
+@Function OSReadHWReg8
+@Description Read from an 8-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The byte read.
+*/ /**************************************************************************/
+ IMG_UINT8 OSReadHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg16
+@Description Read from a 16-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The word read.
+*/ /**************************************************************************/
+ IMG_UINT16 OSReadHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg32
+@Description Read from a 32-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The long word read.
+*/ /**************************************************************************/
+ IMG_UINT32 OSReadHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSReadHWReg64
+@Description Read from a 64-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to read from a location
+ but instead returns a constant value.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be read.
+@Return The long long word read.
+*/ /**************************************************************************/
+ IMG_UINT64 OSReadHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg8
+@Description Write to an 8-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui8Value The byte to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg8(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT8 ui8Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg16
+@Description Write to a 16-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui16Value The word to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg16(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT16 ui16Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg32
+@Description Write to a 32-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui32Value The long word to be written to the register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg32(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function OSWriteHWReg64
+@Description Write to a 64-bit memory-mapped device register.
+ The implementation should not permit the compiler to
+ reorder the I/O sequence.
+ The implementation should ensure that for a NO_HARDWARE
+ build the code does not attempt to write to a location.
+@Input pvLinRegBaseAddr The virtual base address of the register
+ block.
+@Input ui32Offset The byte offset from the base address of
+ the register to be written to.
+@Input ui64Value The long long word to be written to the
+ register.
+@Return None.
+*/ /**************************************************************************/
+ void OSWriteHWReg64(volatile void *pvLinRegBaseAddr, IMG_UINT32 ui32Offset, IMG_UINT64 ui64Value);
+#endif
+
+typedef void (*PFN_TIMER_FUNC)(void*);
+/*************************************************************************/ /*!
+@Function OSAddTimer
+@Description OS specific function to install a timer callback. The
+ timer will then need to be enabled, as it is disabled by
+ default.
+ When enabled, the callback will be invoked once the specified
+ timeout has elapsed.
+@Input pfnTimerFunc Timer callback
+@Input *pvData Callback data
+@Input ui32MsTimeout Callback period
+@Return Valid handle on success, NULL if a failure
+*/ /**************************************************************************/
+IMG_HANDLE OSAddTimer(PFN_TIMER_FUNC pfnTimerFunc, void *pvData, IMG_UINT32 ui32MsTimeout);
+
+/*************************************************************************/ /*!
+@Function OSRemoveTimer
+@Description Removes the specified timer. The handle becomes invalid and
+ should no longer be used.
+@Input hTimer handle of the timer to be removed
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSRemoveTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function OSEnableTimer
+@Description Enable the specified timer. after enabling, the timer will
+ invoke the associated callback at an interval determined by
+ the configured timeout period until disabled.
+@Input hTimer handle of the timer to be enabled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSEnableTimer(IMG_HANDLE hTimer);
+
+/*************************************************************************/ /*!
+@Function OSDisableTimer
+@Description Disable the specified timer
+@Input hTimer handle of the timer to be disabled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDisableTimer(IMG_HANDLE hTimer);
+
+
+/*************************************************************************/ /*!
+ @Function OSPanic
+ @Description Take action in response to an unrecoverable driver error
+ @Return None
+*/ /**************************************************************************/
+void OSPanic(void);
+
+/*************************************************************************/ /*!
+@Function OSCopyToUser
+@Description Copy data to user-addressable memory from kernel-addressable
+ memory.
+ Note that pvDest may be an invalid address or NULL and the
+ function should return an error in this case.
+ For operating systems that do not have a user/kernel space
+ distinction, this function should be implemented as a stub
+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination User memory
+@Input pvSrc pointer to the source Kernel memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyToUser(void *pvProcess, void __user *pvDest, const void *pvSrc, size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function OSCopyFromUser
+@Description Copy data from user-addressable memory to kernel-addressable
+ memory.
+ Note that pvSrc may be an invalid address or NULL and the
+ function should return an error in this case.
+ For operating systems that do not have a user/kernel space
+ distinction, this function should be implemented as a stub
+ which simply returns PVRSRV_ERROR_NOT_SUPPORTED.
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination Kernel memory
+@Input pvSrc pointer to the source User memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSCopyFromUser(void *pvProcess, void *pvDest, const void __user *pvSrc, size_t ui32Bytes);
+
+#if defined (__linux__) || defined (WINDOWS_WDF) || defined(INTEGRITY_OS)
+#define OSBridgeCopyFromUser OSCopyFromUser
+#define OSBridgeCopyToUser OSCopyToUser
+#else
+/*************************************************************************/ /*!
+@Function OSBridgeCopyFromUser
+@Description Copy data from user-addressable memory into kernel-addressable
+ memory as part of a bridge call operation.
+ For operating systems that do not have a user/kernel space
+ distinction, this function will require whatever implementation
+ is needed to pass data for making the bridge function call.
+ For operating systems which do have a user/kernel space
+ distinction (such as Linux) this function may be defined so
+ as to equate to a call to OSCopyFromUser().
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination Kernel memory
+@Input pvSrc pointer to the source User memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyFromUser (void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes);
+
+/*************************************************************************/ /*!
+@Function OSBridgeCopyToUser
+@Description Copy data to user-addressable memory from kernel-addressable
+ memory as part of a bridge call operation.
+ For operating systems that do not have a user/kernel space
+ distinction, this function will require whatever implementation
+ is needed to pass data for making the bridge function call.
+ For operating systems which do have a user/kernel space
+ distinction (such as Linux) this function may be defined so
+ as to equate to a call to OSCopyToUser().
+@Input pvProcess handle of the connection
+@Input pvDest pointer to the destination User memory
+@Input pvSrc pointer to the source Kernel memory
+@Input ui32Bytes size of the data to be copied
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSBridgeCopyToUser (void *pvProcess,
+ void *pvDest,
+ const void *pvSrc,
+ size_t ui32Bytes);
+#endif
+
+/* To be increased if required in future */
+#define PVRSRV_MAX_BRIDGE_IN_SIZE 0x2000 /*!< Size of the memory block used to hold data passed in to a bridge call */
+#define PVRSRV_MAX_BRIDGE_OUT_SIZE 0x1000 /*!< Size of the memory block used to hold data returned from a bridge call */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function OSGetGlobalBridgeBuffers
+@Description Returns the addresses and sizes of the buffers used to pass
+ data into and out of bridge function calls.
+@Output ppvBridgeInBuffer pointer to the input bridge data buffer
+ of size PVRSRV_MAX_BRIDGE_IN_SIZE.
+@Output ppvBridgeOutBuffer pointer to the output bridge data buffer
+ of size PVRSRV_MAX_BRIDGE_OUT_SIZE.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSGetGlobalBridgeBuffers (void **ppvBridgeInBuffer,
+ void **ppvBridgeOutBuffer);
+#endif
+
+/*************************************************************************/ /*!
+@Function OSPlatformBridgeInit
+@Description Called during device creation to allow the OS port to register
+ other bridge modules and related resources that it requires.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPlatformBridgeInit(void);
+
+/*************************************************************************/ /*!
+@Function OSPlatformBridgeDeInit
+@Description Called during device destruction to allow the OS port to
+ deregister its OS specific bridges and clean up other
+ related resources.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSPlatformBridgeDeInit(void);
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+#define OSWriteMemoryBarrier() wmb()
+#define OSReadMemoryBarrier() rmb()
+#define OSMemoryBarrier() mb()
+#else
+/*************************************************************************/ /*!
+@Function OSWriteMemoryBarrier
+@Description Insert a write memory barrier.
+ The write memory barrier guarantees that all store operations
+ (writes) specified before the barrier will appear to happen
+ before all of the store operations specified after the barrier.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+void OSWriteMemoryBarrier(void);
+#define OSReadMemoryBarrier() OSMemoryBarrier()
+/*************************************************************************/ /*!
+@Function OSMemoryBarrier
+@Description Insert a read/write memory barrier.
+ The read and write memory barrier guarantees that all load
+ (read) and all store (write) operations specified before the
+ barrier will appear to happen before all of the load/store
+ operations specified after the barrier.
+@Return None.
+*/ /**************************************************************************/
+void OSMemoryBarrier(void);
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVToNativeError
+@Description Returns the OS-specific equivalent error number/code for
+ the specified PVRSRV_ERROR value.
+ If there is no equivalent, or the PVRSRV_ERROR value is
+ PVRSRV_OK (no error), 0 is returned.
+@Return The OS equivalent error code.
+*/ /**************************************************************************/
+int PVRSRVToNativeError(PVRSRV_ERROR e);
+#define OSPVRSRVToNativeError(e) ( (PVRSRV_OK == e)? 0: PVRSRVToNativeError(e) )
+
+
+#if defined(LINUX) && defined(__KERNEL__)
+
+/* Provide LockDep friendly definitions for Services RW locks */
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include "allocmem.h"
+
+#define OSWRLockCreate(ppsLock) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(ppsLock) = OSAllocMem(sizeof(struct rw_semaphore)); \
+ if (*(ppsLock)) { init_rwsem(*(ppsLock)); e = PVRSRV_OK; }; \
+ e;})
+#define OSWRLockDestroy(psLock) ({OSFreeMem(psLock); PVRSRV_OK;})
+
+#define OSWRLockAcquireRead(psLock) ({down_read(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseRead(psLock) ({up_read(psLock); PVRSRV_OK;})
+#define OSWRLockAcquireWrite(psLock) ({down_write(psLock); PVRSRV_OK;})
+#define OSWRLockReleaseWrite(psLock) ({up_write(psLock); PVRSRV_OK;})
+
+typedef spinlock_t *POS_SPINLOCK;
+
+#define OSSpinLockCreate(_ppsLock) ({ \
+ PVRSRV_ERROR e = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ *(_ppsLock) = OSAllocMem(sizeof(spinlock_t)); \
+ if (*(_ppsLock)) {spin_lock_init(*(_ppsLock)); e = PVRSRV_OK;} \
+ e;})
+#define OSSpinLockDestroy(_psLock) ({OSFreeMem(_psLock);})
+
+#define OSSpinLockAcquire(_pLock, _pFlags) {unsigned long *p = (unsigned long *)_pFlags; spin_lock_irqsave(_pLock, *p);}
+#define OSSpinLockRelease(_pLock, _flags) {spin_unlock_irqrestore(_pLock, _flags);}
+
+#elif defined(LINUX) || defined(__QNXNTO__) || defined (INTEGRITY_OS)
+/* User-mode unit tests use these definitions on Linux */
+
+PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock);
+void OSWRLockDestroy(POSWR_LOCK psLock);
+void OSWRLockAcquireRead(POSWR_LOCK psLock);
+void OSWRLockReleaseRead(POSWR_LOCK psLock);
+void OSWRLockAcquireWrite(POSWR_LOCK psLock);
+void OSWRLockReleaseWrite(POSWR_LOCK psLock);
+
+/* For now, spin-locks are required on Linux only, so other platforms fake
+ * spinlocks with normal mutex locks */
+#define POS_SPINLOCK POS_LOCK
+#define OSSpinLockCreate(ppLock) OSLockCreate(ppLock, LOCK_TYPE_PASSIVE)
+#define OSSpinLockDestroy(pLock) OSLockDestroy(pLock)
+#define OSSpinLockAcquire(pLock, pFlags) {PVR_UNREFERENCED_PARAMETER(pFlags); OSLockAcquire(pLock);}
+#define OSSpinLockRelease(pLock, flags) {PVR_UNREFERENCED_PARAMETER(flags); OSLockRelease(pLock);}
+
+#else
+
+/*************************************************************************/ /*!
+@Function OSWRLockCreate
+@Description Create a writer/reader lock.
+ This type of lock allows multiple concurrent readers but
+ only a single writer, allowing for optimized performance.
+@Output ppsLock A handle to the created WR lock.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR OSWRLockCreate(POSWR_LOCK *ppsLock)
+{
+ PVR_UNREFERENCED_PARAMETER(ppsLock);
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockDestroy
+@Description Destroys a writer/reader lock.
+@Input psLock The handle of the WR lock to be destroyed.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockDestroy(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockAcquireRead
+@Description Acquire a writer/reader read lock.
+ If the write lock is already acquired, the caller will
+ block until it is released.
+@Input psLock The handle of the WR lock to be acquired for
+ reading.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireRead(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockReleaseRead
+@Description Release a writer/reader read lock.
+@Input psLock The handle of the WR lock whose read lock is to
+ be released.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseRead(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockAcquireWrite
+@Description Acquire a writer/reader write lock.
+ If the write lock or any read lock are already acquired,
+ the caller will block until all are released.
+@Input psLock The handle of the WR lock to be acquired for
+ writing.
+@Return None.
+*/ /**************************************************************************/
+static INLINE void OSWRLockAcquireWrite(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+
+/*************************************************************************/ /*!
+@Function OSWRLockReleaseWrite
+@Description Release a writer/reader write lock.
+@Input psLock The handle of the WR lock whose write lock is to
+ be released.
+@Return None
+*/ /**************************************************************************/
+static INLINE void OSWRLockReleaseWrite(POSWR_LOCK psLock)
+{
+ PVR_UNREFERENCED_PARAMETER(psLock);
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function OSDivide64r64
+@Description Divide a 64-bit value by a 32-bit value. Return the 64-bit
+ quotient.
+ The remainder is also returned in 'pui32Remainder'.
+@Input ui64Divident The number to be divided.
+@Input ui32Divisor The 32-bit value 'ui64Divident' is to
+ be divided by.
+@Output pui32Remainder The remainder of the division.
+@Return The 64-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT64 OSDivide64r64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function OSDivide64
+@Description Divide a 64-bit value by a 32-bit value. Return a 32-bit
+ quotient.
+ The remainder is also returned in 'pui32Remainder'.
+ This function allows for a more optional implementation
+ of a 64-bit division when the result is known to be
+ representable in 32-bits.
+@Input ui64Divident The number to be divided.
+@Input ui32Divisor The 32-bit value 'ui64Divident' is to
+ be divided by.
+@Output pui32Remainder The remainder of the division.
+@Return The 32-bit quotient (result of the division).
+*/ /**************************************************************************/
+IMG_UINT32 OSDivide64(IMG_UINT64 ui64Divident, IMG_UINT32 ui32Divisor, IMG_UINT32 *pui32Remainder);
+
+/*************************************************************************/ /*!
+@Function OSDumpStack
+@Description Dump the current task information and its stack trace.
+@Return None
+*/ /**************************************************************************/
+void OSDumpStack(void);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK) || defined(DOXYGEN)
+/*************************************************************************/ /*!
+@Function OSAcquireBridgeLock
+@Description Acquire the global bridge lock.
+ This prevents another bridge call from being actioned while
+ we are still servicing the current bridge call.
+ NB. This function must not return until the lock is acquired
+ (meaning the implementation should not timeout or return with
+ an error, as the caller will assume they have the lock).
+ This function has an OS-specific implementation rather than
+ an abstracted implementation for efficiency reasons, as it
+ is called frequently.
+@Return None
+*/ /**************************************************************************/
+void OSAcquireBridgeLock(void);
+/*************************************************************************/ /*!
+@Function OSReleaseBridgeLock
+@Description Release the global bridge lock.
+ This function has an OS-specific implementation rather than
+ an abstracted implementation for efficiency reasons, as it
+ is called frequently.
+@Return None
+*/ /**************************************************************************/
+void OSReleaseBridgeLock(void);
+#endif
+
+/*
+ * Functions for providing support for PID statistics.
+ */
+typedef void (OS_STATS_PRINTF_FUNC)(void *pvFilePtr, const IMG_CHAR *pszFormat, ...);
+
+typedef void (OS_STATS_PRINT_FUNC)(void *pvFilePtr,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+typedef IMG_UINT32 (OS_INC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (OS_DEC_STATS_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticEntry
+@Description Create a statistic entry in the specified folder.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pszName String containing the name for the entry.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Input pfnIncMemRefCt Pointer to function that can be used to take a
+ reference on the memory backing the statistic
+ entry.
+@Input pfnDecMemRefCt Pointer to function that can be used to drop a
+ reference on the memory backing the statistic
+ entry.
+@Input pvData OS specific reference that can be used by
+ pfnGetElement.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateStatisticEntry(IMG_CHAR* pszName, void *pvFolder,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint,
+ OS_INC_STATS_MEM_REFCOUNT_FUNC* pfnIncMemRefCt,
+ OS_DEC_STATS_MEM_REFCOUNT_FUNC* pfnDecMemRefCt,
+ void *pvData);
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticEntry
+@Description Removes a statistic entry.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveStatisticEntry(void *pvEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+/*************************************************************************/ /*!
+@Function OSCreateRawStatisticEntry
+@Description Create a raw statistic entry in the specified folder.
+ Where operating systems do not support a debugfs
+ file system this function may be implemented as a stub.
+@Input pszFileName String containing the name for the entry.
+@Input pvParentDir Reference from OSCreateStatisticFolder() of the
+ folder to create the entry in, or NULL for the
+ root.
+@Input pfnStatsPrint Pointer to function that can be used to print the
+ values of all the statistics.
+@Return Pointer void reference to the entry created, which can be
+ passed to OSRemoveRawStatisticEntry() to remove the entry.
+*/ /**************************************************************************/
+void *OSCreateRawStatisticEntry(const IMG_CHAR *pszFileName, void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint);
+
+/*************************************************************************/ /*!
+@Function OSRemoveRawStatisticEntry
+@Description Removes a raw statistic entry.
+ Where operating systems do not support a debugfs
+ file system this function may be implemented as a stub.
+@Input pvEntry Pointer void reference to the entry created by
+ OSCreateRawStatisticEntry().
+*/ /**************************************************************************/
+void OSRemoveRawStatisticEntry(void *pvEntry);
+#endif
+
+/*************************************************************************/ /*!
+@Function OSCreateStatisticFolder
+@Description Create a statistic folder to hold statistic entries.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input pszName String containing the name for the folder.
+@Input pvFolder Reference from OSCreateStatisticFolder() of the folder
+ to create the folder in, or NULL for the root.
+@Return Pointer void reference to the folder created, which can be
+ passed to OSRemoveStatisticFolder() to remove the folder.
+*/ /**************************************************************************/
+void *OSCreateStatisticFolder(IMG_CHAR *pszName, void *pvFolder);
+
+/*************************************************************************/ /*!
+@Function OSRemoveStatisticFolder
+@Description Removes a statistic folder.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+@Input ppvFolder Reference from OSCreateStatisticFolder() of the
+ folder that should be removed.
+ This needs to be double pointer because it has to
+ be NULLed right after memory is freed to avoid
+ possible races and use-after-free situations.
+*/ /**************************************************************************/
+void OSRemoveStatisticFolder(void **ppvFolder);
+
+/*************************************************************************/ /*!
+@Function OSUserModeAccessToPerfCountersEn
+@Description Permit User-mode access to CPU performance counter
+ registers.
+ This function is called during device initialisation.
+ Certain CPU architectures may need to explicitly permit
+ User mode access to performance counters - if this is
+ required, the necessary code should be implemented inside
+ this function.
+@Return None.
+*/ /**************************************************************************/
+void OSUserModeAccessToPerfCountersEn(void);
+
+/*************************************************************************/ /*!
+@Function OSDebugSignalPID
+@Description Sends a SIGTRAP signal to a specific PID in user mode for
+ debugging purposes. The user mode process can register a handler
+ against this signal.
+ This is necessary to support the Rogue debugger. If the Rogue
+ debugger is not used then this function may be implemented as
+ a stub.
+@Input ui32PID The PID for the signal.
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR OSDebugSignalPID(IMG_UINT32 ui32PID);
+
+#if defined(LINUX) && defined(__KERNEL__) && !defined(DOXYGEN)
+#define OSWarnOn(a) WARN_ON(a)
+#else
+/*************************************************************************/ /*!
+@Function OSWarnOn
+@Description This API allows the driver to emit a special token and stack
+ dump to the server log when an issue is detected that needs the
+ OS to be notified. The token or call may be used to trigger
+ log collection by the OS environment.
+ PVR_DPF log messages will have been emitted prior to this call.
+@Input a Expression to evaluate, if true trigger Warn signal
+@Return None
+*/ /**************************************************************************/
+#define OSWarnOn(a) do { if ((a)) { OSDumpStack(); } } while(0)
+#endif
+
+/*************************************************************************/ /*!
+@Function OSThreadDumpInfo
+@Description Traverse the thread list and call each of the stored
+ callbacks to dump the info in debug_dump.
+ Where operating systems do not support a debugfs,
+ file system this function may be implemented as a stub.
+*/ /**************************************************************************/
+void OSThreadDumpInfo(IMG_HANDLE hDbgReqestHandle,
+ DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/*************************************************************************/ /*!
+@Function OSDumpVersionInfo
+@Description Store OS version information in debug dump.
+@Input pfnDumpDebugPrintf The 'printf' function to be called to
+ display the debug info
+@Input pvDumpDebugFile Optional file identifier to be passed to
+ the 'printf' function if required
+*/ /**************************************************************************/
+void OSDumpVersionInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+#endif /* __OSFUNC_H__ */
+
+/******************************************************************************
+ End of file (osfunc.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/osfunc_arm64.c b/drivers/gpu/drm/img-rogue/1.10/osfunc_arm64.c
new file mode 100644
index 00000000000000..da0f4f473c9f0d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osfunc_arm64.c
@@ -0,0 +1,293 @@
+/*************************************************************************/ /*!
+@File
+@Title arm specific OS functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS functions who's implementation are processor specific
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/cpumask.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+#if defined(CONFIG_OUTER_CACHE)
+ /* If you encounter a 64-bit ARM system with an outer cache, you'll need
+ * to add the necessary code to manage that cache. See osfunc_arm.c
+ * for an example of how to do so.
+ */
+ #error "CONFIG_OUTER_CACHE not supported on arm64."
+#endif
+
+static void per_cpu_cache_flush(void *arg)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
+ unsigned long irqflags;
+ signed long Clidr, Csselr, LoC, Assoc, Nway, Nsets, Level, Lsize, Var;
+ static DEFINE_SPINLOCK(spinlock);
+
+ spin_lock_irqsave(&spinlock, irqflags);
+
+ /* Read cache level ID register */
+ asm volatile (
+ "dmb sy\n\t"
+ "mrs %[rc], clidr_el1\n\t"
+ : [rc] "=r" (Clidr));
+
+ /* Exit if there is no cache level of coherency */
+ LoC = (Clidr & (((1UL << 3)-1) << 24)) >> 23;
+ if (! LoC)
+ {
+ goto e0;
+ }
+
+ /*
+ This walks the cache hierarchy until the LLC/LOC cache, at each level skip
+ only instruction caches and determine the attributes at this dcache level.
+ */
+ for (Level = 0; LoC > Level; Level += 2)
+ {
+ /* Mask off this CtypeN bit, skip if not unified cache or separate
+ instruction and data caches */
+ Var = (Clidr >> (Level + (Level >> 1))) & ((1UL << 3) - 1);
+ if (Var < 2)
+ {
+ continue;
+ }
+
+ /* Select this dcache level for query */
+ asm volatile (
+ "msr csselr_el1, %[val]\n\t"
+ "isb\n\t"
+ "mrs %[rc], ccsidr_el1\n\t"
+ : [rc] "=r" (Csselr) : [val] "r" (Level));
+
+ /* Look-up this dcache organisation attributes */
+ Nsets = (Csselr >> 13) & ((1UL << 15) - 1);
+ Assoc = (Csselr >> 3) & ((1UL << 10) - 1);
+ Lsize = (Csselr & ((1UL << 3) - 1)) + 4;
+ Nway = 0;
+
+ /* For performance, do these in assembly; foreach dcache level/set,
+ foreach dcache set/way, construct the "DC CISW" instruction
+ argument and issue instruction */
+ asm volatile (
+ "mov x6, %[val0]\n\t"
+ "mov x9, %[rc1]\n\t"
+ "clz w9, w6\n\t"
+ "mov %[rc1], x9\n\t"
+ "lsetloop:\n\t"
+ "mov %[rc5], %[val0]\n\t"
+ "swayloop:\n\t"
+ "lsl x6, %[rc5], %[rc1]\n\t"
+ "orr x9, %[val2], x6\n\t"
+ "lsl x6, %[rc3], %[val4]\n\t"
+ "orr x9, x9, x6\n\t"
+ "dc cisw, x9\n\t"
+ "subs %[rc5], %[rc5], #1\n\t"
+ "b.ge swayloop\n\t"
+ "subs %[rc3], %[rc3], #1\n\t"
+ "b.ge lsetloop\n\t"
+ : [rc1] "+r" (Nway), [rc3] "+r" (Nsets), [rc5] "+r" (Var)
+ : [val0] "r" (Assoc), [val2] "r" (Level), [val4] "r" (Lsize)
+ : "x6", "x9", "cc");
+ }
+
+e0:
+ /* Re-select L0 d-cache as active level, issue barrier before exit */
+ Var = 0;
+ asm volatile (
+ "msr csselr_el1, %[val]\n\t"
+ "dsb sy\n\t"
+ "isb\n\t"
+ : : [val] "r" (Var));
+
+ spin_unlock_irqrestore(&spinlock, irqflags);
+#else
+ flush_cache_all();
+#endif
+ PVR_UNREFERENCED_PARAMETER(arg);
+}
+
+static inline void FlushRange(void *pvRangeAddrStart,
+ void *pvRangeAddrEnd,
+ PVRSRV_CACHE_OP eCacheOp)
+{
+ IMG_UINT32 ui32CacheLineSize = OSCPUCacheAttributeSize(PVR_DCACHE_LINE_SIZE);
+ IMG_BYTE *pbStart = pvRangeAddrStart;
+ IMG_BYTE *pbEnd = pvRangeAddrEnd;
+ IMG_BYTE *pbBase;
+
+ /*
+ On arm64, the TRM states in D5.8.1 (data and unified caches) that if cache
+ maintenance is performed on a memory location using a VA, the effect of
+ that cache maintenance is visible to all VA aliases of the physical memory
+ location. So here it's quicker to issue the machine cache maintenance
+ instruction directly without going via the Linux kernel DMA framework as
+ this is sufficient to maintain the CPU d-caches on arm64.
+ */
+ pbEnd = (IMG_BYTE *) PVR_ALIGN((uintptr_t)pbEnd, (uintptr_t)ui32CacheLineSize);
+ for (pbBase = pbStart; pbBase < pbEnd; pbBase += ui32CacheLineSize)
+ {
+ switch (eCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ asm volatile ("dc cvac, %0" :: "r" (pbBase));
+ break;
+
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ asm volatile ("dc ivac, %0" :: "r" (pbBase));
+ break;
+
+ case PVRSRV_CACHE_OP_FLUSH:
+ asm volatile ("dc civac, %0" :: "r" (pbBase));
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cache maintenance operation type %d is invalid",
+ __FUNCTION__, eCacheOp));
+ break;
+ }
+ }
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch(uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_CLEAN:
+ case PVRSRV_CACHE_OP_FLUSH:
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+ break;
+
+ case PVRSRV_CACHE_OP_NONE:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Global cache operation type %d is invalid",
+ __FUNCTION__, uiCacheOp));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return eError;
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct device *dev;
+ const struct dma_map_ops *dma_ops;
+
+ if (pvVirtStart)
+ {
+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_FLUSH);
+ return;
+ }
+
+ dev = psDevNode->psDevConfig->pvOSDevice;
+
+ dma_ops = get_dma_ops(dev);
+ dma_ops->sync_single_for_device(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+ dma_ops->sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct device *dev;
+ const struct dma_map_ops *dma_ops;
+
+ if (pvVirtStart)
+ {
+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_CLEAN);
+ return;
+ }
+
+ dev = psDevNode->psDevConfig->pvOSDevice;
+
+ dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+ dma_ops->sync_single_for_device(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_TO_DEVICE);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ struct device *dev;
+ const struct dma_map_ops *dma_ops;
+
+ if (pvVirtStart)
+ {
+ FlushRange(pvVirtStart, pvVirtEnd, PVRSRV_CACHE_OP_INVALIDATE);
+ return;
+ }
+
+ dev = psDevNode->psDevConfig->pvOSDevice;
+
+ dma_ops = get_dma_ops(psDevNode->psDevConfig->pvOSDevice);
+ dma_ops->sync_single_for_cpu(dev, sCPUPhysStart.uiAddr, sCPUPhysEnd.uiAddr - sCPUPhysStart.uiAddr, DMA_FROM_DEVICE);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+ return PVRSRV_CACHE_OP_ADDR_TYPE_PHYSICAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+ /* FIXME: implement similarly to __arm__ */
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/osfunc_x86.c b/drivers/gpu/drm/img-rogue/1.10/osfunc_x86.c
new file mode 100644
index 00000000000000..4a599a1bd858be
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osfunc_x86.c
@@ -0,0 +1,158 @@
+/*************************************************************************/ /*!
+@File
+@Title x86 specific OS functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS functions who's implementation are processor specific
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/smp.h>
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_defs.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+
+
+static void per_cpu_cache_flush(void *arg)
+{
+ PVR_UNREFERENCED_PARAMETER(arg);
+#if !defined(CONFIG_L4)
+ wbinvd();
+#endif
+}
+
+PVRSRV_ERROR OSCPUOperation(PVRSRV_CACHE_OP uiCacheOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ switch(uiCacheOp)
+ {
+ /* Fall-through */
+ case PVRSRV_CACHE_OP_CLEAN:
+ case PVRSRV_CACHE_OP_FLUSH:
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ on_each_cpu(per_cpu_cache_flush, NULL, 1);
+ break;
+
+ case PVRSRV_CACHE_OP_NONE:
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Global cache operation type %d is invalid",
+ __FUNCTION__, uiCacheOp));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_ASSERT(0);
+ break;
+ }
+
+ return eError;
+}
+
+static void x86_flush_cache_range(const void *pvStart, const void *pvEnd)
+{
+ IMG_BYTE *pbStart = (IMG_BYTE *)pvStart;
+ IMG_BYTE *pbEnd = (IMG_BYTE *)pvEnd;
+ IMG_BYTE *pbBase;
+
+ pbEnd = (IMG_BYTE *)PVR_ALIGN((uintptr_t)pbEnd,
+ (uintptr_t)boot_cpu_data.x86_clflush_size);
+
+ mb();
+ for(pbBase = pbStart; pbBase < pbEnd; pbBase += boot_cpu_data.x86_clflush_size)
+ {
+#if !defined(CONFIG_L4)
+ clflush(pbBase);
+#endif
+ }
+ mb();
+}
+
+void OSCPUCacheFlushRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+
+void OSCPUCacheCleanRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ /* No clean feature on x86 */
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+void OSCPUCacheInvalidateRangeKM(PVRSRV_DEVICE_NODE *psDevNode,
+ void *pvVirtStart,
+ void *pvVirtEnd,
+ IMG_CPU_PHYADDR sCPUPhysStart,
+ IMG_CPU_PHYADDR sCPUPhysEnd)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysStart);
+ PVR_UNREFERENCED_PARAMETER(sCPUPhysEnd);
+
+ /* No invalidate-only support */
+ x86_flush_cache_range(pvVirtStart, pvVirtEnd);
+}
+
+PVRSRV_CACHE_OP_ADDR_TYPE OSCPUCacheOpAddressType(void)
+{
+ return PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL;
+}
+
+void OSUserModeAccessToPerfCountersEn(void)
+{
+ /* Not applicable to x86 architecture. */
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/oskm_apphint.h b/drivers/gpu/drm/img-rogue/1.10/oskm_apphint.h
new file mode 100644
index 00000000000000..af4ccb11e96609
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/oskm_apphint.h
@@ -0,0 +1,176 @@
+/*************************************************************************/ /*!
+@File oskm_apphint.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS-independent interface for retrieving KM apphints
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_defs.h"
+#if defined(LINUX)
+#include "km_apphint.h"
+#else
+#include "services_client_porting.h"
+#endif
+#if !defined(__OSKM_APPHINT_H__)
+#define __OSKM_APPHINT_H__
+
+
+#if defined(LINUX) && !defined(DOXYGEN)
+static INLINE IMG_UINT os_get_km_apphint_UINT32(void *state, APPHINT_ID id, IMG_UINT32 *pAppHintDefault, IMG_UINT32 *pVal) {
+ return !pvr_apphint_get_uint32(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_UINT64(void *state, APPHINT_ID id, IMG_UINT64 *pAppHintDefault, IMG_UINT64 *pVal) {
+ return !pvr_apphint_get_uint64(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_BOOL(void *state, APPHINT_ID id, IMG_BOOL *pAppHintDefault, IMG_BOOL *pVal) {
+ return !pvr_apphint_get_bool(id, pVal);
+}
+static INLINE IMG_UINT os_get_km_apphint_STRING(void *state, APPHINT_ID id, IMG_CHAR **pAppHintDefault, IMG_CHAR *buffer, size_t size) {
+ return !pvr_apphint_get_string(id, buffer, size);
+}
+
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+ os_get_km_apphint_UINT32(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+ os_get_km_apphint_UINT64(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+ os_get_km_apphint_BOOL(state, APPHINT_ID_ ## name, appHintDefault, value)
+
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+ os_get_km_apphint_STRING(state, APPHINT_ID_ ## name, appHintDefault, buffer, size)
+
+
+#define OSCreateKMAppHintState(state) \
+ PVR_UNREFERENCED_PARAMETER(state)
+
+#define OSFreeKMAppHintState(state) \
+ PVR_UNREFERENCED_PARAMETER(state)
+
+#else /* #if defined(LINUX) && !defined(DOXYGEN) */
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT32(state, name, appHintDefault, value)
+@Description Interface for retrieval of uint32 km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT32(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintUINT64(state, name, appHintDefault, value)
+@Description Interface for retrieval of uint64 km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintUINT64(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintBOOL(state, name, appHintDefault, value)
+@Description Interface for retrieval of IMG_BOOL km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output value Pointer to returned app hint value.
+ */ /**************************************************************************/
+#define OSGetKMAppHintBOOL(state, name, appHintDefault, value) \
+ PVRSRVGetAppHint(state, # name, IMG_UINT_TYPE, appHintDefault, value)
+
+/**************************************************************************/ /*!
+@def OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size)
+@Description Interface for retrieval of string km app hint.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVGetAppHint() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Input state App hint state
+@Input name Name used to identify app hint
+@Input appHintDefault Default value to be returned if no
+ app hint is found.
+@Output buffer Buffer used to return app hint string.
+@Input size Size of the buffer.
+ */ /**************************************************************************/
+#define OSGetKMAppHintSTRING(state, name, appHintDefault, buffer, size) \
+ (PVR_UNREFERENCED_PARAMETER(size), PVRSRVGetAppHint(state, # name, IMG_STRING_TYPE, appHintDefault, buffer))
+
+/**************************************************************************/ /*!
+@def OSCreateKMAppHintState(state)
+@Description Creates the app hint state.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVCreateAppHintState() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Output state App hint state
+ */ /**************************************************************************/
+#define OSCreateKMAppHintState(state) \
+ PVRSRVCreateAppHintState(IMG_SRV_UM, 0, state)
+
+/**************************************************************************/ /*!
+@def OSFreeKMAppHintState
+@Description Free the app hint state.
+ For non-linux operating systems, this macro implements a call
+ from server code to PVRSRVCreateAppHintState() declared in
+ services_client_porting.h, effectively making it 'shared' code.
+@Output state App hint state
+ */ /**************************************************************************/
+#define OSFreeKMAppHintState(state) \
+ PVRSRVFreeAppHintState(IMG_SRV_UM, state)
+
+#endif /* #if defined(LINUX) */
+
+#endif /* __OSKM_APPHINT_H__ */
+
+/******************************************************************************
+ End of file (oskm_apphint.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/osmmap.h b/drivers/gpu/drm/img-rogue/1.10/osmmap.h
new file mode 100644
index 00000000000000..bc831517175499
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osmmap.h
@@ -0,0 +1,123 @@
+/*************************************************************************/ /*!
+@File
+@Title OS Interface for mapping PMRs into CPU space.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS abstraction for the mmap2 interface for mapping PMRs into
+ User Mode memory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _OSMMAP_H_
+#define _OSMMAP_H_
+
+#include <powervr/mem_types.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/**************************************************************************/ /*!
+@Function OSMMapPMR
+@Description Maps the specified PMR into CPU memory so that it may be
+ accessed by the user process.
+ Whether the memory is mapped read only, read/write, or not at
+ all, is dependent on the PMR itself.
+ The PMR handle is opaque to the user, and lower levels of this
+ stack ensure that the handle is private to this process, such that
+ this API cannot be abused to gain access to other people's PMRs.
+ The OS implementation of this function should return the virtual
+ address and length for the User to use. The "PrivData" is to be
+ stored opaquely by the caller (N.B. he should make no assumptions,
+ in particular, NULL is a valid handle) and given back to the
+ call to OSMUnmapPMR.
+ The OS implementation is free to use the PrivData handle for any
+ purpose it sees fit.
+@Input hBridge The bridge handle.
+@Input hPMR The handle of the PMR to be mapped.
+@Input uiPMRLength The size of the PMR.
+@Input uiFlags Flags indicating how the mapping should
+ be done (read-only, etc). These may not
+ be honoured if the PMR does not permit
+ them.
+@Input uiPMRLength The size of the PMR.
+@Output phOSMMapPrivDataOut Returned private data.
+@Output ppvMappingAddressOut The returned mapping.
+@Output puiMappingLengthOut The size of the returned mapping.
+@Return PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+extern PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiPMRLength,
+ IMG_UINT32 uiFlags,
+ IMG_HANDLE *phOSMMapPrivDataOut,
+ void **ppvMappingAddressOut,
+ size_t *puiMappingLengthOut);
+
+/**************************************************************************/ /*!
+@Function OSMUnmapPMR
+@Description Unmaps the specified PMR from CPU memory.
+ This function is the counterpart to OSMMapPMR.
+ The caller is required to pass the PMR handle back in along
+ with the same 3-tuple of information that was returned by the
+ call to OSMMapPMR in phOSMMapPrivDataOut.
+ It is possible to unmap only part of the original mapping
+ with this call, by specifying only the address range to be
+ unmapped in pvMappingAddress and uiMappingLength.
+@Input hBridge The bridge handle.
+@Input hPMR The handle of the PMR to be unmapped.
+@Input hOSMMapPrivData The OS private data of the mapping.
+@Input pvMappingAddress The address to be unmapped.
+@Input uiMappingLength The size to be unmapped.
+@Return PVRSRV_OK on success, failure code otherwise.
+ */ /**************************************************************************/
+/*
+ FIXME:
+ perhaps this function should take _only_ the hOSMMapPrivData arg,
+ and the implementation is required to store any of the other data
+ items that it requires to do the unmap?
+*/
+extern void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE hOSMMapPrivData,
+ void *pvMappingAddress,
+ size_t uiMappingLength);
+
+
+#endif /* _OSMMAP_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/osmmap_stub.c b/drivers/gpu/drm/img-rogue/1.10/osmmap_stub.c
new file mode 100644
index 00000000000000..f5b6e6eeb8273d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/osmmap_stub.c
@@ -0,0 +1,133 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS abstraction for the mmap2 interface for mapping PMRs into
+ User Mode memory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* our exported API */
+#include "osmmap.h"
+
+/* include/ */
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+/* services/include/ */
+
+/* services/include/srvhelper/ */
+#include "ra.h"
+
+#include "pmr.h"
+
+PVRSRV_ERROR
+OSMMapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_DEVMEM_SIZE_T uiPMRSize,
+ IMG_UINT32 uiFlags,
+ IMG_HANDLE *phOSMMapPrivDataOut,
+ void **ppvMappingAddressOut,
+ size_t *puiMappingLengthOut)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMR;
+ void *pvKernelAddress;
+ size_t uiLength;
+ IMG_HANDLE hPriv;
+
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+
+ /*
+ Normally this function would mmap a PMR into the memory space of
+ user process, but in this case we're taking a PMR and mapping it
+ into kernel virtual space. We keep the same function name for
+ symmetry as this allows the higher layers of the software stack
+ to not care whether they are user mode or kernel
+ */
+
+ psPMR = hPMR;
+
+ eError = PMRAcquireKernelMappingData(psPMR,
+ 0,
+ 0,
+ &pvKernelAddress,
+ &uiLength,
+ &hPriv);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *phOSMMapPrivDataOut = hPriv;
+ *ppvMappingAddressOut = pvKernelAddress;
+ *puiMappingLengthOut = uiLength;
+
+ /* MappingLength might be rounded up to page size */
+ PVR_ASSERT(*puiMappingLengthOut >= uiPMRSize);
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void
+OSMUnmapPMR(IMG_HANDLE hBridge,
+ IMG_HANDLE hPMR,
+ IMG_HANDLE hOSMMapPrivData,
+ void *pvMappingAddress,
+ size_t uiMappingLength)
+{
+ PMR *psPMR;
+
+ PVR_UNREFERENCED_PARAMETER(hBridge);
+ PVR_UNREFERENCED_PARAMETER(pvMappingAddress);
+ PVR_UNREFERENCED_PARAMETER(uiMappingLength);
+
+ psPMR = hPMR;
+ PMRReleaseKernelMappingData(psPMR,
+ hOSMMapPrivData);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump.c b/drivers/gpu/drm/img-rogue/1.10/pdump.c
new file mode 100644
index 00000000000000..068f2098a55199
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump.c
@@ -0,0 +1,471 @@
+/*************************************************************************/ /*!
+@File
+@Title Parameter dump macro target routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined (PDUMP)
+
+#include <asm/atomic.h>
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+
+#include "dbgdrvif_srv5.h"
+#include "allocmem.h"
+#include "pdump_km.h"
+#include "pdump_osfunc.h"
+#include "services_km.h"
+
+#include <linux/kernel.h> // sprintf
+#include <linux/string.h> // strncpy, strlen
+#include <linux/mutex.h>
+
+#define PDUMP_DATAMASTER_PIXEL (1)
+#define PDUMP_DATAMASTER_EDM (3)
+
+static PDBGKM_SERVICE_TABLE gpfnDbgDrv;
+
+
+typedef struct PDBG_PDUMP_STATE_TAG
+{
+ PDBG_STREAM psStream[PDUMP_NUM_CHANNELS];
+
+ IMG_CHAR *pszMsg;
+ IMG_CHAR *pszScript;
+ IMG_CHAR *pszFile;
+
+} PDBG_PDUMP_STATE;
+
+static PDBG_PDUMP_STATE gsDBGPdumpState;
+
+#define SZ_MSG_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_SCRIPT_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+#define SZ_FILENAME_SIZE_MAX PVRSRV_PDUMP_MAX_COMMENT_SIZE-1
+
+static struct mutex gsPDumpMutex;
+
+void DBGDrvGetServiceTable(void **fn_table);
+
+
+/*!
+ * \name PDumpOSGetScriptString
+ */
+PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *phScript = (IMG_HANDLE)gsDBGPdumpState.pszScript;
+ *pui32MaxLen = SZ_SCRIPT_SIZE_MAX;
+ if (!*phScript)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSGetMessageString
+ */
+PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszMsg = gsDBGPdumpState.pszMsg;
+ *pui32MaxLen = SZ_MSG_SIZE_MAX;
+ if (!*ppszMsg)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSGetFilenameString
+ */
+PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile,
+ IMG_UINT32 *pui32MaxLen)
+{
+ *ppszFile = gsDBGPdumpState.pszFile;
+ *pui32MaxLen = SZ_FILENAME_SIZE_MAX;
+ if (!*ppszFile)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ACTIVE;
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSBufprintf
+ */
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...)
+{
+ IMG_CHAR* pszBuf = hBuf;
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszBuf, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ g_ui32EveryLineCounter++;
+#endif
+
+ /* Put line ending sequence at the end if it isn't already there */
+ PDumpOSVerifyLineEnding(pszBuf, ui32ScriptSizeMax);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSVSprintf
+ */
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs)
+{
+ IMG_INT32 n;
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+ * \name PDumpOSSprintf
+ */
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...)
+{
+ IMG_INT32 n;
+ va_list vaArgs;
+
+ va_start(vaArgs, pszFormat);
+
+ n = vsnprintf(pszComment, ui32ScriptSizeMax, pszFormat, vaArgs);
+
+ va_end(vaArgs);
+
+ if (n>=(IMG_INT32)ui32ScriptSizeMax || n==-1) /* glibc >= 2.1 or glibc 2.0 */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Buffer overflow detected, pdump output may be incomplete."));
+
+ return PVRSRV_ERROR_PDUMP_BUF_OVERFLOW;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * \name PDumpOSBuflen
+ */
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_CHAR* pszBuf = hBuffer;
+ IMG_UINT32 ui32Count = 0;
+
+ while ((ui32Count<ui32BufferSizeMax) && (pszBuf[ui32Count]!=0))
+ {
+ ui32Count++;
+ }
+ return(ui32Count);
+}
+
+/*!
+ * \name PDumpOSVerifyLineEnding
+ */
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax)
+{
+ IMG_UINT32 ui32Count;
+ IMG_CHAR* pszBuf = hBuffer;
+
+ /* strlen */
+ ui32Count = PDumpOSBuflen(hBuffer, ui32BufferSizeMax);
+
+ /* Put \n sequence at the end if it isn't already there */
+ if ((ui32Count >= 1) && (pszBuf[ui32Count-1] != '\n') && (ui32Count<ui32BufferSizeMax))
+ {
+ pszBuf[ui32Count] = '\n';
+ ui32Count++;
+ pszBuf[ui32Count] = '\0';
+ }
+}
+
+
+/*!
+ * \name PDumpOSSetSplitMarker
+ */
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker)
+{
+ PDBG_STREAM psStream = (PDBG_STREAM) hStream;
+
+ PVR_ASSERT(gpfnDbgDrv);
+ gpfnDbgDrv->pfnSetMarker(psStream, ui32Marker);
+ return IMG_TRUE;
+}
+
+
+/*!
+ * \name PDumpOSGetSplitMarker
+ */
+IMG_UINT32 PDumpOSGetSplitMarker(IMG_HANDLE hStream)
+{
+ PDBG_STREAM psStream = (PDBG_STREAM) hStream;
+
+ PVR_ASSERT(gpfnDbgDrv);
+ return gpfnDbgDrv->pfnGetMarker(psStream);
+}
+
+/*!
+ * \name PDumpOSDebugDriverWrite
+ */
+IMG_UINT32 PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount)
+{
+ PVR_ASSERT(gpfnDbgDrv != NULL);
+
+ return gpfnDbgDrv->pfnDBGDrivWrite2(psStream, pui8Data, ui32BCount);
+}
+
+/*!
+ * \name PDumpOSReleaseExecution
+ */
+void PDumpOSReleaseExecution(void)
+{
+ OSReleaseThreadQuanta();
+}
+
+/**************************************************************************
+ * Function Name : PDumpOSInit
+ * Outputs : None
+ * Returns :
+ * Description : Reset connection to vldbgdrv
+ * Then try to connect to PDUMP streams
+**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript, PDUMP_CHANNEL* psBlkScript,
+ IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment)
+{
+ PVRSRV_ERROR eError;
+
+ *pui32InitCapMode = DEBUG_CAPMODE_FRAMED;
+ *ppszEnvComment = NULL;
+
+ /* If we tried this earlier, then we might have connected to the driver
+ * But if pdump.exe was running then the stream connected would fail
+ */
+ if (!gpfnDbgDrv)
+ {
+ DBGDrvGetServiceTable((void **)&gpfnDbgDrv);
+
+ // If something failed then no point in trying to connect streams
+ if (gpfnDbgDrv == NULL)
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ if(!gsDBGPdumpState.pszFile)
+ {
+ gsDBGPdumpState.pszFile = OSAllocMem(SZ_FILENAME_SIZE_MAX);
+ if (gsDBGPdumpState.pszFile == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszMsg)
+ {
+ gsDBGPdumpState.pszMsg = OSAllocMem(SZ_MSG_SIZE_MAX);
+ if (gsDBGPdumpState.pszMsg == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ if(!gsDBGPdumpState.pszScript)
+ {
+ gsDBGPdumpState.pszScript = OSAllocMem(SZ_SCRIPT_SIZE_MAX);
+ if (gsDBGPdumpState.pszScript == NULL)
+ {
+ goto init_failed;
+ }
+ }
+
+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ if (!gpfnDbgDrv->pfnCreateStream(PDUMP_PARAM_CHANNEL_NAME, 0, 10, &psParam->hInit, &psParam->hMain, &psParam->hDeinit))
+ {
+ goto init_failed;
+ }
+ gsDBGPdumpState.psStream[PDUMP_CHANNEL_PARAM] = psParam->hMain;
+
+
+ if (!gpfnDbgDrv->pfnCreateStream(PDUMP_SCRIPT_CHANNEL_NAME, 0, 10, &psScript->hInit, &psScript->hMain, &psScript->hDeinit))
+ {
+ goto init_failed;
+ }
+ gsDBGPdumpState.psStream[PDUMP_CHANNEL_SCRIPT] = psScript->hMain;
+
+ if (!gpfnDbgDrv->pfnCreateStream(PDUMP_BLKSCRIPT_CHANNEL_NAME, 0, 10, &psBlkScript->hInit, &psBlkScript->hMain, &psBlkScript->hDeinit))
+ {
+ goto init_failed;
+ }
+ gsDBGPdumpState.psStream[PDUMP_CHANNEL_BLKSCRIPT] = psBlkScript->hMain;
+ }
+
+ return PVRSRV_OK;
+
+init_failed:
+ PDumpOSDeInit(psParam, psScript, psBlkScript);
+ return eError;
+}
+
+
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript, PDUMP_CHANNEL* psBlkScript)
+{
+ gpfnDbgDrv->pfnDestroyStream(psScript->hInit, psScript->hMain, psScript->hDeinit);
+ gpfnDbgDrv->pfnDestroyStream(psBlkScript->hInit, psBlkScript->hMain, psBlkScript->hDeinit);
+ gpfnDbgDrv->pfnDestroyStream(psParam->hInit, psParam->hMain, psParam->hDeinit);
+
+ if(gsDBGPdumpState.pszFile)
+ {
+ OSFreeMem(gsDBGPdumpState.pszFile);
+ gsDBGPdumpState.pszFile = NULL;
+ }
+
+ if(gsDBGPdumpState.pszScript)
+ {
+ OSFreeMem(gsDBGPdumpState.pszScript);
+ gsDBGPdumpState.pszScript = NULL;
+ }
+
+ if(gsDBGPdumpState.pszMsg)
+ {
+ OSFreeMem(gsDBGPdumpState.pszMsg);
+ gsDBGPdumpState.pszMsg = NULL;
+ }
+
+ gpfnDbgDrv = NULL;
+}
+
+PVRSRV_ERROR PDumpOSCreateLock(void)
+{
+ mutex_init(&gsPDumpMutex);
+ return PVRSRV_OK;
+}
+
+void PDumpOSDestroyLock(void)
+{
+ /* no destruction work to be done, just assert
+ * the lock is not held */
+ PVR_ASSERT(mutex_is_locked(&gsPDumpMutex) == 0);
+}
+
+void PDumpOSLock(void)
+{
+ mutex_lock(&gsPDumpMutex);
+}
+
+void PDumpOSUnlock(void)
+{
+ mutex_unlock(&gsPDumpMutex);
+}
+
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream,
+ IMG_UINT32 ui32StateID)
+{
+ return (gpfnDbgDrv->pfnGetCtrlState((PDBG_STREAM)hDbgStream, ui32StateID));
+}
+
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame)
+{
+ gpfnDbgDrv->pfnSetFrame(ui32Frame);
+ return;
+}
+
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ return (bInitClient);
+}
+
+#if defined(PVR_TESTING_UTILS)
+void PDumpOSDumpState(void);
+
+void PDumpOSDumpState(void)
+{
+ PVR_LOG(("---- PDUMP LINUX: gpfnDbgDrv( %p ) gpfnDbgDrv.ui32Size( %d )",
+ gpfnDbgDrv, gpfnDbgDrv->ui32Size));
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState( %p )",
+ &gsDBGPdumpState));
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[0]( %p )",
+ gsDBGPdumpState.psStream[0]));
+
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[0], 0xFE);
+
+ PVR_LOG(("---- PDUMP LINUX: gsDBGPdumpState.psStream[1]( %p )",
+ gsDBGPdumpState.psStream[1]));
+
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFE);
+
+ /* Now dump non-stream specific info */
+ (void) gpfnDbgDrv->pfnGetCtrlState(gsDBGPdumpState.psStream[1], 0xFF);
+}
+#endif
+
+#endif /* #if defined (PDUMP) */
+/*****************************************************************************
+ End of file (PDUMP.C)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump.h b/drivers/gpu/drm/img-rogue/1.10/pdump.h
new file mode 100644
index 00000000000000..616f71940ac34f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump.h
@@ -0,0 +1,152 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _SERVICES_PDUMP_H_
+#define _SERVICES_PDUMP_H_
+
+#include "img_types.h"
+#include "services_km.h"
+
+
+/* A PDump out2.txt script is made up of 3 sections from three buffers:
+ * *
+ * - Init phase buffer - holds PDump data written during driver
+ * initialisation, non-volatile.
+ * - Main phase buffer - holds PDump data written after driver init,
+ * volatile.
+ * - Deinit phase buffer - holds PDump data needed to shutdown HW/play back,
+ * written only during driver initialisation using
+ * the DEINIT flag.
+ *
+ * Volatile in this sense means that the buffer is drained and cleared when
+ * the pdump capture application connects and transfers the data to file.
+ *
+ * The PDump sub-system uses the driver state (init/post-init), whether
+ * the pdump capture application is connected or not (capture range set/unset)
+ * and, if pdump connected whether the frame is in the range set, to decide
+ * which of the 3 buffers to write the PDump data. Hence there are several
+ * key time periods in the lifetime of the kernel driver that is enabled
+ * with PDUMP=1 (flag XX labels below time line):
+ *
+ * Events:load init pdump enter exit pdump
+ * driver done connects range range disconnects
+ * |__________________|____________|__________|______________|____________|______ . . .
+ * State: | init phase | no capture | <- capture client connected -> | no capture
+ * | | | |
+ * |__________________|____________|______________________________________|_____ . . .
+ * Flag: | CT,DI | NONE,CT,PR | NONE,CT,PR | See no
+ * | Never NONE or PR | Never DI | Never DI | capture
+ * |__________________|____________|______________________________________|_____ . . .
+ * Write | NONE -undef | -No write | -No write | -Main buf | -No write | See no
+ * buffer | CT -Init buf | -Main buf | -Main buf | -Main buf | -Main buf | capture
+ * | PR -undef | -Init buf | -undef | -Init & Main | -undef |
+ * | DI -Deinit buf | -undef | -undef | -undef | -undef |
+ * |__________________|____________|___________|______________|___________|_____ . . .
+ *
+ * Note: The time line could repeat if the pdump capture application is
+ * disconnected and reconnected without unloading the driver module.
+ *
+ * The DEINIT (DI) | CONTINUOUS (CT) | PERSISTENT (PR) flags must never
+ * be OR'd together and given to a PDump call since undefined behaviour may
+ * result and produce an invalid PDump which does not play back cleanly.
+ *
+ * The decision on which flag to use comes down to which time period the
+ * client or server driver makes the PDump write call AND the nature/purpose
+ * of the data.
+ *
+ * Note: This is a simplified time line, not all conditions represented.
+ *
+ */
+
+typedef IMG_UINT32 PDUMP_FLAGS_T;
+
+#define PDUMP_FLAGS_NONE PDUMP_NONE /*<! Output this entry with no special treatment i.e. output
+ only if in frame range. */
+#define PDUMP_FLAGS_BLKDATA PDUMP_BLKDATA /*<! This flag indicates block-mode PDump data to be recorded
+ in Block script stream in addition to Main script stream */
+
+#define PDUMP_FLAGS_DEINIT 0x20000000UL /*<! Output this entry to the de-initialisation section, must
+ only be used by the initialisation code in the Server. */
+
+#define PDUMP_FLAGS_POWER 0x08000000UL /*<! Output this entry even when a power transition is ongoing,
+ as directed by other PDUMP flags. */
+
+#define PDUMP_FLAGS_CONTINUOUS PDUMP_CONT /*<! Output this entry always regardless of framed capture range,
+ used by client applications being dumped.
+ During init phase of driver such data carrying this flag
+ will be recorded and present for all PDump client
+ connections.
+ Never combine with the PERSIST flag. */
+
+#define PDUMP_FLAGS_PERSISTENT PDUMP_PERSIST /*<! Output this entry always regardless of app and range,
+ used by persistent resources created *after* driver
+ initialisation that must appear in all PDump captures
+ for that driver instantiation/session.
+ Effectively this is data that is not forgotten
+ for the second and subsequent PDump client connections.
+ Never combine with the CONTINUOUS flag. */
+
+#define PDUMP_FLAGS_DEBUG 0x00010000U /*<! For internal debugging use */
+
+#define PDUMP_FLAGS_NOHW 0x00000001U /* For internal use: Skip sending instructions to the hardware
+ when NO_HARDWARE=0 AND PDUMP=1 */
+
+#define PDUMP_FLAGS_FORCESPLIT 0x00000002U /* Forces Main and Block script out files to split - Internal
+ flag used in BLKMODE of PDump */
+
+#define PDUMP_FILEOFFSET_FMTSPEC "0x%08X"
+typedef IMG_UINT32 PDUMP_FILEOFFSET_T;
+
+#define PDUMP_SCRIPT_CHANNEL_NAME "ScriptChannel2"
+#define PDUMP_BLKSCRIPT_CHANNEL_NAME "BlkScriptChannel2"
+#define PDUMP_PARAM_CHANNEL_NAME "ParamChannel2"
+
+#define PDUMP_CHANNEL_SCRIPT 0
+#define PDUMP_CHANNEL_BLKSCRIPT 1
+#define PDUMP_CHANNEL_PARAM 2
+#define PDUMP_NUM_CHANNELS 3
+
+#define PDUMP_PARAM_0_FILE_NAME "%%0%%.prm" /*!< Initial Param filename used in PDump capture */
+#define PDUMP_PARAM_N_FILE_NAME "%%0%%_%02u.prm" /*!< Param filename used when PRM file split */
+#define PDUMP_PARAM_MAX_FILE_NAME 32 /*!< Max Size of parameter name used in out2.txt */
+
+#define PDUMP_IS_CONTINUOUS(flags) ((flags & PDUMP_FLAGS_CONTINUOUS) != 0)
+
+#endif /* _SERVICES_PDUMP_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_common.c b/drivers/gpu/drm/img-rogue/1.10/pdump_common.c
new file mode 100644
index 00000000000000..52b284143c8742
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_common.c
@@ -0,0 +1,4083 @@
+/*************************************************************************/ /*!
+@File
+@Title Common Server PDump functions layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include <stdarg.h>
+
+#include "pvrversion.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pdump_physmem.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "services_km.h"
+#include <powervr/buffer_attribs.h>
+#include "oskm_apphint.h"
+
+/* pdump headers */
+#include "dbgdrvif_srv5.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+#include <pdumpdesc.h>
+#include "rgxpdump.h"
+
+/* Allow temporary buffer size override */
+#if !defined(PDUMP_TEMP_BUFFER_SIZE)
+#define PDUMP_TEMP_BUFFER_SIZE (64 * 1024U)
+#endif
+
+#define PTR_PLUS(t, p, x) ((t)(((IMG_CHAR *)(p)) + (x)))
+#define VPTR_PLUS(p, x) PTR_PLUS(void *, p, x)
+#define VPTR_INC(p, x) ((p) = VPTR_PLUS(p, x))
+#define MAX_PDUMP_MMU_CONTEXTS (32)
+static void *gpvTempBuffer;
+
+#define PRM_FILE_SIZE_MAX 0x7FDFFFFFU /*!< Default maximum file size to split output files, 2GB-2MB as fwrite limits it to 2GB-1 on 32bit systems */
+
+
+static IMG_BOOL g_PDumpInitialised = IMG_FALSE;
+static IMG_UINT32 g_ConnectionCount;
+
+typedef struct
+{
+ IMG_UINT32 ui32Init; /*!< Count of bytes written to the init phase stream */
+ IMG_UINT32 ui32Main; /*!< Count of bytes written to the main stream */
+ IMG_UINT32 ui32Deinit; /*!< Count of bytes written to the deinit stream */
+} PDUMP_CHANNEL_WOFFSETS;
+
+typedef struct
+{
+ PDUMP_CHANNEL sCh; /*!< Channel handles */
+ PDUMP_CHANNEL_WOFFSETS sWOff; /*!< Channel file write offsets */
+ IMG_UINT32 ui32FileIdx; /*!< File index increased when each pdump-block finishes in block-mode */
+} PDUMP_SCRIPT;
+
+typedef struct
+{
+ PDUMP_CHANNEL sCh; /*!< Channel handles */
+ PDUMP_CHANNEL_WOFFSETS sWOff; /*!< Channel file write offsets */
+ IMG_UINT32 ui32FileIdx; /*!< File index used when file size limit reached and a new file is started, parameter channel only */
+ IMG_UINT32 ui32MaxFileSize; /*!< Maximum file size for parameter files */
+
+ PDUMP_FILEOFFSET_T uiZeroPageOffset; /*!< Offset of the zero page in the parameter file */
+ size_t uiZeroPageSize; /*!< Size of the zero page in the parameter file */
+ IMG_CHAR szZeroPageFilename[PDUMP_PARAM_MAX_FILE_NAME]; /*< PRM file name where the zero page was pdumped */
+} PDUMP_PARAMETERS;
+
+static PDUMP_SCRIPT g_PDumpScript = { { NULL, NULL, NULL}, {0, 0, 0}, 0};
+static PDUMP_SCRIPT g_PDumpBlkScript = { { NULL, NULL, NULL}, {0, 0, 0}, 0};
+static PDUMP_PARAMETERS g_PDumpParameters = { { NULL, NULL, NULL}, {0, 0, 0}, 0, PRM_FILE_SIZE_MAX};
+
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+IMG_UINT32 g_ui32EveryLineCounter = 1U;
+#endif
+
+// #define PDUMP_DEBUG_TRANSITION
+#if defined(PDUMP_DEBUG_TRANSITION)
+# define DEBUG_OUTFILES_COMMENT(fmt, ...) (void)PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, fmt, __VA_ARGS__)
+#else
+# define DEBUG_OUTFILES_COMMENT(fmt, ...)
+#endif
+
+#if defined(PDUMP_DEBUG) || defined(REFCOUNT_DEBUG)
+# define PDUMP_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+# define PDUMP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+/* Prototype for the test/debug state dump routine used in debugging */
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState);
+#undef PDUMP_TRACE_STATE
+
+
+/*****************************************************************************/
+/* PDump Control Module Definitions */
+/*****************************************************************************/
+
+typedef struct _PDUMP_CAPTURE_RANGE_
+{
+ IMG_UINT32 ui32Start; /*!< Start frame number of range, In block-mode of pdump this variable is interpreted differently to keep full-length first block */
+ IMG_UINT32 ui32End; /*!< Send frame number of range, In block-mode of pdump, this variable can be changed by server dynamically on forced capture stop */
+ IMG_UINT32 ui32Interval; /*!< Frame sample rate interval */
+} PDUMP_CAPTURE_RANGE;
+
+/* No direct access to members from outside the control module - please */
+typedef struct _PDUMP_CTRL_STATE_
+{
+ IMG_BOOL bInitPhaseActive; /*!< State of driver initialisation phase */
+ IMG_UINT32 ui32Flags; /*!< Unused */
+
+ IMG_UINT32 ui32DefaultCapMode; /*!< Capture mode of the dump */
+ PDUMP_CAPTURE_RANGE sCaptureRange; /*!< The capture range for capture mode 'framed' */
+ IMG_UINT32 ui32CurrentFrame; /*!< Current frame number */
+ IMG_UINT32 ui32BlockLength; /*!< PDump block length in terms of number of frames in each pdump-block for capture mode 'block' */
+ IMG_UINT32 ui32CurrentBlock; /*!< Current pdump-block number */
+
+ IMG_BOOL bCaptureOn; /*!< Current capture status, is current frame in range */
+ IMG_BOOL bFirstFrameInBlock; /*!< Is current frame first in current pdump-block */
+ IMG_BOOL bSuspended; /*!< Suspend flag set on unrecoverable error */
+ IMG_BOOL bInPowerTransition; /*!< Device power transition state */
+ POS_LOCK hLock; /*!< Exclusive lock to this structure */
+} PDUMP_CTRL_STATE;
+
+static PDUMP_CTRL_STATE g_PDumpCtrl =
+{
+ IMG_TRUE,
+ 0,
+
+ 0, /*!< Value obtained from OS PDump layer during initialisation */
+ {
+ PDUMP_FRAME_UNSET,
+ PDUMP_FRAME_UNSET,
+ 1
+ },
+ 0,
+ 0,
+ PDUMP_BLOCKNUM_INVALID, /* Reset value is invalid */
+
+ IMG_FALSE,
+ IMG_FALSE,
+ IMG_FALSE,
+ IMG_FALSE,
+ NULL
+};
+
+static PVRSRV_ERROR PDumpCtrlInit(IMG_UINT32 ui32InitCapMode)
+{
+ g_PDumpCtrl.ui32DefaultCapMode = ui32InitCapMode;
+ PVR_ASSERT(g_PDumpCtrl.ui32DefaultCapMode != 0);
+
+ /* Create lock for PDUMP_CTRL_STATE struct, which is shared between pdump client
+ and PDumping app. This lock will help us serialize calls from pdump client
+ and PDumping app */
+ PVR_LOGR_IF_ERROR(OSLockCreate(&g_PDumpCtrl.hLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ return PVRSRV_OK;
+}
+
+static void PDumpCtrlDeInit(void)
+{
+ if (g_PDumpCtrl.hLock)
+ {
+ OSLockDestroy(g_PDumpCtrl.hLock);
+ g_PDumpCtrl.hLock = NULL;
+ }
+}
+
+static INLINE void PDumpCtrlLockAcquire(void)
+{
+ OSLockAcquire(g_PDumpCtrl.hLock);
+}
+
+static INLINE void PDumpCtrlLockRelease(void)
+{
+ OSLockRelease(g_PDumpCtrl.hLock);
+}
+
+/**********************************************************************************************************
+ NOTE:
+ The following PDumpCtrl*** functions require the PDUMP_CTRL_STATE lock be acquired BEFORE they are
+ called. This is because the PDUMP_CTRL_STATE data is shared between the PDumping App and the PDump
+ client, hence an exclusive access is required. The lock can be acquired and released by using the
+ PDumpCtrlLockAcquire & PDumpCtrlLockRelease functions respectively.
+**********************************************************************************************************/
+
+static void PDumpCtrlUpdateCaptureStatus(void)
+{
+ if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+ {
+ if ((g_PDumpCtrl.ui32CurrentFrame >= g_PDumpCtrl.sCaptureRange.ui32Start) &&
+ (g_PDumpCtrl.ui32CurrentFrame <= g_PDumpCtrl.sCaptureRange.ui32End) &&
+ (((g_PDumpCtrl.ui32CurrentFrame - g_PDumpCtrl.sCaptureRange.ui32Start) % g_PDumpCtrl.sCaptureRange.ui32Interval) == 0))
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+ }
+ else
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+ }
+ }
+ else if ((g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS) || (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_BLKMODE))
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_TRUE;
+ }
+ else
+ {
+ g_PDumpCtrl.bCaptureOn = IMG_FALSE;
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCtrlSetCurrentFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+ }
+
+}
+
+static INLINE void PDumpCtrlSuspend(void)
+{
+ g_PDumpCtrl.bSuspended = IMG_TRUE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsDumpSuspended(void)
+{
+ return g_PDumpCtrl.bSuspended;
+}
+
+static INLINE IMG_UINT32 PDumpCtrlCapModIsBlkMode(void)
+{
+ return (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_BLKMODE);
+}
+
+static INLINE IMG_BOOL PDumpCtrlIsCaptureForceStopped(void)
+{
+ return (PDumpCtrlCapModIsBlkMode() && (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End));
+}
+
+static INLINE IMG_UINT32 PDumpCtrlIsFullLenFirstBlockSet(void)
+{
+ /* In block-mode of pdump if sCaptureRange.ui32Start is non-zero, set first pdump-block length to full (i.e. ui32BlockLength) */
+ return (PDumpCtrlCapModIsBlkMode() && (g_PDumpCtrl.sCaptureRange.ui32Start > 0));
+}
+
+/* Set bFirstFrameInBlock in block-mode */
+static void PDumpCtrlSetFirstFrameInBlock(IMG_BOOL bFirstFrameInBlock)
+{
+ g_PDumpCtrl.bFirstFrameInBlock = PDumpCtrlCapModIsBlkMode() && bFirstFrameInBlock;
+}
+
+static IMG_BOOL PDumpCtrlIsFirstFrameInBlock(void)
+{
+ return (PDumpCtrlCapModIsBlkMode() && g_PDumpCtrl.bFirstFrameInBlock);
+}
+
+/* This is used to set current pdump-block number, PDUMP_BLOCKNUM_INVALID indicates invalid value */
+static void PDumpCtrlSetBlock(IMG_UINT32 ui32BlockNum)
+{
+ g_PDumpCtrl.ui32CurrentBlock = PDumpCtrlCapModIsBlkMode()? ui32BlockNum : PDUMP_BLOCKNUM_INVALID;
+}
+
+static INLINE IMG_UINT32 PDumpCtrlGetBlock(void)
+{
+ return (PDumpCtrlCapModIsBlkMode()? g_PDumpCtrl.ui32CurrentBlock : PDUMP_BLOCKNUM_INVALID);
+}
+
+static PVRSRV_ERROR PDumpCtrlForcedStop(void)
+{
+ /* In block-mode on forced stop request, capture will be stopped after (current_frame + 1)th frame number.
+ * This ensures that DumpAfterRender always be called on last frame before exiting the PDump capturing
+ */
+ g_PDumpCtrl.sCaptureRange.ui32End = g_PDumpCtrl.ui32CurrentFrame + 1;
+
+ PDumpCtrlUpdateCaptureStatus();
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpForceCaptureStopKM(void)
+{
+ PVRSRV_ERROR eError;
+
+ if(!PDumpCtrlCapModIsBlkMode())
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: This call is valid only in block-mode of PDump i.e. pdump -b<block_frame_len>", __func__));
+ return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+ }
+
+ (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA, "PDdump STOP capture received at frame %u", g_PDumpCtrl.ui32CurrentFrame);
+ PDumpCtrlLockAcquire();
+ eError = PDumpCtrlForcedStop();
+ PDumpCtrlLockRelease();
+ return eError;
+}
+
+static void PDumpCtrlUpdateSuspendStatus(void)
+{
+ if(!PDumpCtrlIsDumpSuspended() && PDumpCtrlIsCaptureForceStopped())
+ {
+ /* Suspend PDump once received forced capture stop, driver needs restarting to recapture the PDump after this */
+ PVR_LOG(("PDump suspended, forced stop capture received."));
+ PDumpCtrlSuspend();
+ }
+}
+
+static void PDumpCtrlSetCurrentFrame(IMG_UINT32 ui32Frame)
+{
+ g_PDumpCtrl.ui32CurrentFrame = ui32Frame;
+ /* Mirror the value into the debug driver */
+ PDumpOSSetFrame(ui32Frame);
+
+ PDumpCtrlUpdateCaptureStatus();
+ PDumpCtrlUpdateSuspendStatus();
+
+#if defined(PDUMP_TRACE_STATE)
+ PDumpCommonDumpState(IMG_FALSE);
+#endif
+}
+
+static void PDumpCtrlSetDefaultCaptureParams(IMG_UINT32 ui32Mode, IMG_UINT32 ui32Start, IMG_UINT32 ui32End, IMG_UINT32 ui32Interval)
+{
+ /* Set the capture range to that supplied by the PDump client tool
+ */
+ g_PDumpCtrl.ui32DefaultCapMode = ui32Mode;
+ g_PDumpCtrl.sCaptureRange.ui32Start = ui32Start;
+ g_PDumpCtrl.sCaptureRange.ui32End = ui32End;
+ g_PDumpCtrl.sCaptureRange.ui32Interval = ui32Interval;
+
+ g_PDumpCtrl.ui32BlockLength = (ui32Mode == DEBUG_CAPMODE_BLKMODE)?ui32Interval:0;
+
+ /* Reset variables */
+ g_PDumpCtrl.ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID;
+ g_PDumpCtrl.bFirstFrameInBlock = IMG_FALSE;
+
+ /* Reset the current frame on reset of the capture range, the helps to
+ * avoid inter-pdump start frame issues when the driver is not reloaded.
+ * No need to call PDumpCtrlUpdateCaptureStatus() direct as the set
+ * current frame call will.
+ */
+ PDumpCtrlSetCurrentFrame(0);
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsFramed(void)
+{
+ return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCapModIsContinuous(void)
+{
+ return g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_CONTINUOUS;
+}
+
+static IMG_UINT32 PDumpCtrlGetCurrentFrame(void)
+{
+ return g_PDumpCtrl.ui32CurrentFrame;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureOn(void)
+{
+ return !g_PDumpCtrl.bSuspended && g_PDumpCtrl.bCaptureOn;
+}
+
+static INLINE IMG_BOOL PDumpCtrlCaptureRangePast(void)
+{
+ return (g_PDumpCtrl.ui32CurrentFrame > g_PDumpCtrl.sCaptureRange.ui32End);
+}
+
+/* Used to imply if the PDump client is connected or not. */
+static INLINE IMG_BOOL PDumpCtrlCaptureRangeUnset(void)
+{
+ return ((g_PDumpCtrl.sCaptureRange.ui32Start == PDUMP_FRAME_UNSET) &&
+ (g_PDumpCtrl.sCaptureRange.ui32End == PDUMP_FRAME_UNSET));
+}
+
+static IMG_BOOL PDumpCtrlIsLastCaptureFrame(void)
+{
+ if (g_PDumpCtrl.ui32DefaultCapMode == DEBUG_CAPMODE_FRAMED)
+ {
+ /* Is the next capture frame within the range end limit? */
+ if ((g_PDumpCtrl.ui32CurrentFrame + g_PDumpCtrl.sCaptureRange.ui32Interval) > g_PDumpCtrl.sCaptureRange.ui32End)
+ {
+ return IMG_TRUE;
+ }
+ }
+ else if(PDumpCtrlCapModIsBlkMode())
+ {
+ /* Is the next capture frame within the range end limit? (end limit will be modified on forced capture stop in block-mode) */
+ if((g_PDumpCtrl.ui32CurrentFrame + 1) > g_PDumpCtrl.sCaptureRange.ui32End)
+ {
+ return IMG_TRUE;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpCtrIsLastCaptureFrame: Unexpected capture mode (%x)", g_PDumpCtrl.ui32DefaultCapMode));
+ }
+
+ /* Return false for continuous capture mode or when in framed mode */
+ return IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInitPhaseComplete(void)
+{
+ return !g_PDumpCtrl.bInitPhaseActive;
+}
+
+static INLINE void PDumpCtrlSetInitPhaseComplete(IMG_BOOL bIsComplete)
+{
+ if (bIsComplete)
+ {
+ g_PDumpCtrl.bInitPhaseActive = IMG_FALSE;
+ PDUMP_HEREA(102);
+ }
+ else
+ {
+ g_PDumpCtrl.bInitPhaseActive = IMG_TRUE;
+ PDUMP_HEREA(103);
+ }
+}
+
+static INLINE void PDumpCtrlPowerTransitionStart(void)
+{
+ g_PDumpCtrl.bInPowerTransition = IMG_TRUE;
+}
+
+static INLINE void PDumpCtrlPowerTransitionEnd(void)
+{
+ g_PDumpCtrl.bInPowerTransition = IMG_FALSE;
+}
+
+static INLINE IMG_BOOL PDumpCtrlInPowerTransition(void)
+{
+ return g_PDumpCtrl.bInPowerTransition;
+}
+
+static PVRSRV_ERROR PDumpCtrlGetState(IMG_UINT64 *ui64State)
+{
+ *ui64State = 0;
+ if(PDumpCtrlCaptureOn())
+ {
+ *ui64State |= PDUMP_STATE_CAPTURE_FRAME;
+ }
+
+ if(!PDumpCtrlCaptureRangeUnset() && PDumpCtrlInitPhaseComplete())
+ {
+ *ui64State |= PDUMP_STATE_CONNECTED;
+ }
+
+ return PVRSRV_OK;
+}
+
+/********************************************************************************
+ End of PDumpCtrl*** functions
+*********************************************************************************/
+
+/*
+ Wrapper functions which need to be exposed in pdump_km.h for use in other
+ pdump_*** modules safely. These functions call the specific PDumpCtrl layer
+ function after acquiring the PDUMP_CTRL_STATE lock, hence making the calls
+ from other modules hassle free by avoiding the acquire/release CtrlLock
+ calls.
+*/
+
+void PDumpPowerTransitionStart(void)
+{
+ PDumpCtrlLockAcquire();
+ PDumpCtrlPowerTransitionStart();
+ PDumpCtrlLockRelease();
+}
+
+void PDumpPowerTransitionEnd(void)
+{
+ PDumpCtrlLockAcquire();
+ PDumpCtrlPowerTransitionEnd();
+ PDumpCtrlLockRelease();
+}
+
+IMG_BOOL PDumpInPowerTransition(void)
+{
+ IMG_BOOL bPDumpInPowerTransition = IMG_FALSE;
+
+ PDumpCtrlLockAcquire();
+ bPDumpInPowerTransition = PDumpCtrlInPowerTransition();
+ PDumpCtrlLockRelease();
+
+ return bPDumpInPowerTransition;
+}
+
+IMG_BOOL PDumpIsDumpSuspended(void)
+{
+ IMG_BOOL bPDumpIsDumpSuspended;
+
+ PDumpCtrlLockAcquire();
+ bPDumpIsDumpSuspended = PDumpCtrlIsDumpSuspended();
+ PDumpCtrlLockRelease();
+
+ return bPDumpIsDumpSuspended;
+}
+
+/*****************************************************************************/
+/* PDump Common Write Layer just above PDump OS Layer */
+/*****************************************************************************/
+
+
+/*
+ Checks in this method were seeded from the original PDumpWriteILock()
+ and DBGDrivWriteCM() and have grown since to ensure PDump output
+ matches legacy output.
+ Note: the order of the checks in this method is important as some
+ writes have multiple pdump flags set!
+ */
+static IMG_BOOL PDumpWriteAllowed(IMG_UINT32 ui32Flags)
+{
+ /* Lock down the PDUMP_CTRL_STATE struct before calling the following
+ PDumpCtrl*** functions. This is to avoid updates to the Control data
+ while we are reading from it */
+ PDumpCtrlLockAcquire();
+
+ /* No writes if in framed mode and range pasted */
+ if (PDumpCtrlCaptureRangePast())
+ {
+ PDUMP_HERE(10);
+ goto unlockAndReturnFalse;
+ }
+
+ /* No writes while writing is suspended */
+ if (PDumpCtrlIsDumpSuspended())
+ {
+ PDUMP_HERE(11);
+ goto unlockAndReturnFalse;
+ }
+
+ /* Prevent PDumping during a power transition */
+ if (PDumpCtrlInPowerTransition())
+ { /* except when it's flagged */
+ if (ui32Flags & PDUMP_FLAGS_POWER)
+ {
+ PDUMP_HERE(20);
+ goto unlockAndReturnTrue;
+ }
+ PDUMP_HERE(16);
+ goto unlockAndReturnFalse;
+ }
+
+ /* Always allow dumping in init phase and when persistent flagged */
+ if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+ {
+ PDUMP_HERE(12);
+ goto unlockAndReturnTrue;
+ }
+ if (!PDumpCtrlInitPhaseComplete())
+ {
+ PDUMP_HERE(15);
+ goto unlockAndReturnTrue;
+ }
+
+ /* The following checks are made when the driver has completed initialisation */
+
+ /* If PDump client connected allow continuous flagged writes */
+ if (PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ if (PDumpCtrlCaptureRangeUnset()) /* Is client connected? */
+ {
+ PDUMP_HERE(13);
+ goto unlockAndReturnFalse;
+ }
+ PDUMP_HERE(14);
+ goto unlockAndReturnTrue;
+ }
+
+ /* No last/deinit statements allowed when not in initialisation phase */
+ if (PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ if (PDumpCtrlInitPhaseComplete())
+ {
+ PDUMP_HERE(17);
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteAllowed: DEINIT flag used at the wrong time outside of initialisation!"));
+ goto unlockAndReturnFalse;
+ }
+ }
+
+ /*
+ If no flags are provided then it is FRAMED output and the frame
+ range must be checked matching expected behaviour.
+ */
+ if (PDumpCtrlCapModIsFramed() && !PDumpCtrlCaptureOn())
+ {
+ PDUMP_HERE(18);
+ goto unlockAndReturnFalse;
+ }
+
+ PDUMP_HERE(19);
+
+unlockAndReturnTrue:
+ /* Allow the write to take place */
+ PDumpCtrlLockRelease();
+ return IMG_TRUE;
+
+unlockAndReturnFalse:
+ PDumpCtrlLockRelease();
+ return IMG_FALSE;
+}
+
+#undef PDUMP_DEBUG_SCRIPT_LINES
+
+#if defined(PDUMP_DEBUG_SCRIPT_LINES)
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) _PDumpOSDebugDriverWrite(a,b,c,d)
+static IMG_UINT32 _PDumpOSDebugDriverWrite( IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount,
+ IMG_UINT32 ui32Flags)
+{
+ IMG_CHAR tmp1[80];
+ IMG_CHAR* streamName = "unkn";
+
+ if (g_PDumpScript.sCh.hDeinit == psStream)
+ streamName = "dein";
+ else if (g_PDumpScript.sCh.hInit == psStream)
+ streamName = "init";
+ else if (g_PDumpScript.sCh.hMain == psStream)
+ streamName = "main";
+
+ (void) PDumpOSSprintf(tmp1, 80, "-- %s, %x\n", streamName, ui32Flags);
+ (void) PDumpOSDebugDriverWrite(psStream, tmp1, OSStringLength(tmp1));
+
+ return PDumpOSDebugDriverWrite(psStream, pui8Data, ui32BCount);
+}
+#else
+#define PDUMPOSDEBUGDRIVERWRITE(a,b,c,d) PDumpOSDebugDriverWrite(a,b,c)
+#endif
+
+
+/**************************************************************************/ /*!
+ @Function PDumpWriteToBuffer
+ @Description Write the supplied data to the PDump stream buffer and attempt
+ to handle any buffer full conditions to ensure all the data
+ requested to be written, is.
+
+ @Input psStream The address of the PDump stream buffer to write to
+ @Input pui8Data Pointer to the data to be written
+ @Input ui32BCount Number of bytes to write
+ @Input ui32Flags PDump statement flags.
+
+ @Return IMG_UINT32 Actual number of bytes written, may be less than
+ ui32BCount when buffer full condition could not
+ be avoided.
+*/ /***************************************************************************/
+static IMG_UINT32 PDumpWriteToBuffer(IMG_HANDLE psStream, IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32BytesWritten = 0;
+ IMG_UINT32 ui32Off = 0;
+
+ while (ui32BCount > 0)
+ {
+ ui32BytesWritten = PDUMPOSDEBUGDRIVERWRITE(psStream, &pui8Data[ui32Off], ui32BCount, ui32Flags);
+
+ if (ui32BytesWritten == 0)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: Zero bytes written - release execution"));
+ PDumpOSReleaseExecution();
+ }
+
+ if (ui32BytesWritten != 0xFFFFFFFFU)
+ {
+ if (ui32BCount != ui32BytesWritten)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "PDumpWriteToBuffer: partial write of %d bytes of %d bytes", ui32BytesWritten, ui32BCount));
+ }
+ ui32Off += ui32BytesWritten;
+ ui32BCount -= ui32BytesWritten;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToBuffer: Unrecoverable error received from the debug driver"));
+ if( PDumpOSGetCtrlState(psStream, DBG_GET_STATE_FLAG_IS_READONLY) )
+ {
+ /* Fatal -suspend PDump to prevent flooding kernel log buffer */
+ PVR_LOG(("PDump suspended, debug driver out of memory"));
+ /*
+ Acquire the control lock before updating "suspended" state. This may not be required
+ because "this" is the context which checks the "suspended" state in PDumpWriteAllowed
+ before calling this function. So, this update is mainly for other contexts.
+ Also, all the other contexts which will/wish-to read the "suspended" state ought to be
+ waiting on the bridge lock first and then the PDUMP_OSLOCK (to pdump into script or
+ parameter buffer). However, this acquire may be useful in case the PDump call is being
+ made from a direct bridge
+ */
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSuspend();
+ PDumpCtrlLockRelease();
+ }
+ return 0;
+ }
+ }
+
+ /* reset buffer counters */
+ ui32BCount = ui32Off; ui32Off = 0; ui32BytesWritten = 0;
+
+ return ui32BCount;
+}
+
+
+/**************************************************************************/ /*!
+ @Function PDumpWriteToChannel
+ @Description Write the supplied data to the PDump channel specified obeying
+ flags to write to the necessary channel buffers.
+
+ @Input psChannel The address of the script or parameter channel object
+ @Input/Output psWOff The address of the channel write offsets object to
+ update on successful writing
+ @Input pui8Data Pointer to the data to be written
+ @Input ui32Size Number of bytes to write
+ @Input ui32Flags PDump statement flags, they may be clear (no flags)
+ which implies framed data, continuous flagged,
+ persistent flagged, or continuous AND persistent
+ flagged and they determine how the data is output.
+ On the first test app run after driver load, the
+ Display Controller dumps a resource that is both
+ continuous and persistent and this needs writing to
+ both the init (persistent) and main (continuous)
+ channel buffers to ensure the data is dumped in
+ subsequent test runs without reloading the driver.
+ In subsequent runs the PDump client 'freezes' the
+ init buffer so that only one dump of persistent data
+ for the "extended init phase" is captured to the
+ init buffer.
+
+ @Return IMG_BOOL True when the data has been consumed, false otherwise
+*/ /***************************************************************************/
+static IMG_BOOL PDumpWriteToChannel(PDUMP_CHANNEL* psChannel, PDUMP_CHANNEL_WOFFSETS* psWOff,
+ IMG_UINT8* pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32BytesWritten = 0;
+
+ PDUMP_HERE(210);
+
+ /* Dump data to deinit buffer when flagged as deinit */
+ if (ui32Flags & PDUMP_FLAGS_DEINIT)
+ {
+ PDUMP_HERE(211);
+ ui32BytesWritten = PDumpWriteToBuffer(psChannel->hDeinit, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: DEINIT Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(212);
+ return IMG_FALSE;
+ }
+
+ if (psWOff)
+ {
+ psWOff->ui32Deinit += ui32Size;
+ }
+
+ }
+ else
+ {
+ IMG_BOOL bDumpedToInitAlready = IMG_FALSE;
+ IMG_HANDLE* phStream = NULL;
+ IMG_UINT32* pui32Offset = NULL;
+
+ /* Always append persistent data to init phase so it's available on
+ * subsequent app runs, but also to the main stream if client connected */
+ if (ui32Flags & PDUMP_FLAGS_PERSISTENT)
+ {
+ PDUMP_HERE(213);
+ ui32BytesWritten = PDumpWriteToBuffer( psChannel->hInit, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: PERSIST Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(214);
+ return IMG_FALSE;
+ }
+
+ bDumpedToInitAlready = IMG_TRUE;
+ if (psWOff)
+ {
+ psWOff->ui32Init += ui32Size;
+ }
+
+ /* Don't write continuous data if client not connected */
+ PDumpCtrlLockAcquire();
+ if (PDumpCtrlCaptureRangeUnset())
+ {
+ PDumpCtrlLockRelease();
+ return IMG_TRUE;
+ }
+ PDumpCtrlLockRelease();
+ }
+
+ /* Prepare to write the data to the main stream for
+ * persistent, continuous or framed data. Override and use init
+ * stream if driver still in init phase and we have not written
+ * to it yet.*/
+ PDumpCtrlLockAcquire();
+ if (!PDumpCtrlInitPhaseComplete() && !bDumpedToInitAlready)
+ {
+ PDUMP_HERE(215);
+ phStream = &psChannel->hInit;
+ if (psWOff)
+ {
+ pui32Offset = &psWOff->ui32Init;
+ }
+ }
+ else
+ {
+ PDUMP_HERE(216);
+ phStream = &psChannel->hMain;
+ if (psWOff)
+ {
+ pui32Offset = &psWOff->ui32Main;
+ }
+ }
+ PDumpCtrlLockRelease();
+
+ /* Write the data to the stream */
+ ui32BytesWritten = PDumpWriteToBuffer(*phStream, pui8Data, ui32Size, ui32Flags);
+ if (ui32BytesWritten != ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpWriteToChannel: MAIN Written length (%d) does not match data length (%d), PDump incomplete!", ui32BytesWritten, ui32Size));
+ PDUMP_HERE(217);
+ return IMG_FALSE;
+ }
+
+ if (pui32Offset)
+ {
+ *pui32Offset += ui32BytesWritten;
+ }
+ }
+
+ return IMG_TRUE;
+}
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+
+static IMG_UINT32 _GenerateChecksum(void *pvData, size_t uiSize)
+{
+ IMG_UINT32 ui32Sum = 0;
+ IMG_UINT32 *pui32Data = pvData;
+ IMG_UINT8 *pui8Data = pvData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32LeftOver;
+
+ for(i = 0; i < uiSize / sizeof(IMG_UINT32); i++)
+ {
+ ui32Sum += pui32Data[i];
+ }
+
+ ui32LeftOver = uiSize % sizeof(IMG_UINT32);
+
+ while(ui32LeftOver)
+ {
+ ui32Sum += pui8Data[uiSize - ui32LeftOver];
+ ui32LeftOver--;
+ }
+
+ return ui32Sum;
+}
+
+#endif
+
+PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *pui8Data, IMG_UINT32 ui32Size, IMG_UINT32 ui32Flags,
+ IMG_UINT32* pui32FileOffset, IMG_CHAR* aszFilenameStr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bPDumpCtrlInitPhaseComplete = IMG_FALSE;
+
+ PVR_ASSERT(pui8Data && (ui32Size!=0));
+ PVR_ASSERT(pui32FileOffset && aszFilenameStr);
+
+ PDUMP_HERE(1);
+
+ if (!PDumpWriteAllowed(ui32Flags))
+ {
+ /* Abort write for the above reason but indicate what happened to
+ * caller to avoid disrupting the driver, caller should treat it as OK
+ * but skip any related PDump writes to the script file. */
+ return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+ }
+
+ PDUMP_HERE(2);
+
+ PDumpCtrlLockAcquire();
+ bPDumpCtrlInitPhaseComplete = PDumpCtrlInitPhaseComplete();
+ PDumpCtrlLockRelease();
+
+ if (!bPDumpCtrlInitPhaseComplete || (ui32Flags & PDUMP_FLAGS_PERSISTENT))
+ {
+ PDUMP_HERE(3);
+
+ /* Init phase stream not expected to get above the file size max */
+ PVR_ASSERT(g_PDumpParameters.sWOff.ui32Init < g_PDumpParameters.ui32MaxFileSize);
+
+ /* Return the file write offset at which the parameter data was dumped */
+ *pui32FileOffset = g_PDumpParameters.sWOff.ui32Init;
+ }
+ else
+ {
+ PDUMP_HERE(4);
+
+ /* Do we need to signal the PDump client that a split is required? */
+ if (g_PDumpParameters.sWOff.ui32Main + ui32Size > g_PDumpParameters.ui32MaxFileSize)
+ {
+ PDUMP_HERE(5);
+ PDumpOSSetSplitMarker(g_PDumpParameters.sCh.hMain, g_PDumpParameters.sWOff.ui32Main);
+ g_PDumpParameters.ui32FileIdx++;
+ g_PDumpParameters.sWOff.ui32Main = 0;
+ }
+
+ /* Return the file write offset at which the parameter data was dumped */
+ *pui32FileOffset = g_PDumpParameters.sWOff.ui32Main;
+ }
+
+ /* Create the parameter file name, based on index, to be used in the script */
+ if (g_PDumpParameters.ui32FileIdx == 0)
+ {
+ eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_0_FILE_NAME);
+ }
+ else
+ {
+ PDUMP_HERE(6);
+ eError = PDumpOSSprintf(aszFilenameStr, PDUMP_PARAM_MAX_FILE_NAME, PDUMP_PARAM_N_FILE_NAME, g_PDumpParameters.ui32FileIdx);
+ }
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSSprintf", errExit);
+
+ /* Write the parameter data to the parameter channel */
+ eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ if (!PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff, pui8Data, ui32Size, ui32Flags))
+ {
+ PDUMP_HERE(7);
+ PVR_LOGG_IF_ERROR(eError, "PDumpWrite", errExit);
+ }
+#if defined(PDUMP_DEBUG_OUTFILES)
+ else
+ {
+ IMG_UINT32 ui32Checksum;
+ PDUMP_GET_SCRIPT_STRING();
+
+ ui32Checksum = _GenerateChecksum(pui8Data, ui32Size);
+
+ /* CHK CHKSUM SIZE PRMOFFSET PRMFILE */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- CHK 0x%08X 0x%08X 0x%08X %s",
+ ui32Checksum,
+ ui32Size,
+ *pui32FileOffset,
+ aszFilenameStr);
+ if(eError != PVRSRV_OK)
+ {
+ goto errExit;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ }
+#endif
+
+ return PVRSRV_OK;
+
+errExit:
+ return eError;
+}
+
+
+IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags)
+{
+ PVR_ASSERT(hString);
+
+ PDUMP_HERE(201);
+
+ if (!PDumpWriteAllowed(ui32Flags))
+ {
+ /* Abort write for the above reasons but indicated it was OK to
+ * caller to avoid disrupting the driver */
+ return IMG_TRUE;
+ }
+
+ if (PDumpCtrlCapModIsBlkMode())
+ {
+ if(ui32Flags & PDUMP_FLAGS_FORCESPLIT)
+ {
+ /* Split Block script stream */
+ PVR_DPF((PVR_DBG_MESSAGE,"[g_PDumpBlkScript] Splitting stream with Marker set to %d, ui32FileIdx %d", g_PDumpBlkScript.sWOff.ui32Main, g_PDumpBlkScript.ui32FileIdx));
+
+ while(PDumpOSGetSplitMarker(g_PDumpBlkScript.sCh.hMain)){}; /* Let the pdump client process last split requested before splitting again */
+ PDumpOSSetSplitMarker(g_PDumpBlkScript.sCh.hMain, g_PDumpBlkScript.sWOff.ui32Main);
+ g_PDumpBlkScript.ui32FileIdx++;
+ g_PDumpBlkScript.sWOff.ui32Main = 0;
+
+
+ /* Split main script stream */
+ PVR_DPF((PVR_DBG_MESSAGE,"[g_PDumpScript] Splitting stream with Marker set to %d, ui32FileIdx %d", g_PDumpScript.sWOff.ui32Main, g_PDumpScript.ui32FileIdx));
+
+ while(PDumpOSGetSplitMarker(g_PDumpScript.sCh.hMain)){}; /* Let the pdump client process last split requested before splitting again */
+ PDumpOSSetSplitMarker(g_PDumpScript.sCh.hMain, g_PDumpScript.sWOff.ui32Main);
+ g_PDumpScript.ui32FileIdx++;
+ g_PDumpScript.sWOff.ui32Main = 0;
+ }
+
+ if(ui32Flags & PDUMP_FLAGS_BLKDATA)
+ {
+ /* Write data to block stream if PDUMP_FLAGS_BLKDATA flag is set */
+ if(!PDumpWriteToChannel(&g_PDumpBlkScript.sCh, &g_PDumpBlkScript.sWOff, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags))
+ {
+ return IMG_FALSE;
+ }
+ }
+ }
+
+ return PDumpWriteToChannel(&g_PDumpScript.sCh, &g_PDumpScript.sWOff, (IMG_UINT8*) hString, (IMG_UINT32) OSStringLength((IMG_CHAR*) hString), ui32Flags);
+}
+
+
+/*****************************************************************************/
+
+
+
+
+
+
+struct _PDUMP_CONNECTION_DATA_ {
+ ATOMIC_T sRefCount;
+ POS_LOCK hLock; /*!< Protects access to sListHead. */
+ DLLIST_NODE sListHead;
+ IMG_BOOL bLastInto;
+ IMG_UINT32 ui32LastSetFrameNumber;
+ IMG_BOOL bWasInCaptureRange;
+ IMG_BOOL bIsInCaptureRange;
+ IMG_BOOL bLastTransitionFailed;
+ SYNC_CONNECTION_DATA *psSyncConnectionData;
+};
+
+static PDUMP_CONNECTION_DATA * _PDumpConnectionAcquire(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psPDumpConnectionData->sRefCount);
+
+ PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__,
+ psPDumpConnectionData, iRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+
+ return psPDumpConnectionData;
+}
+
+static void _PDumpConnectionRelease(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ IMG_INT iRefCount = OSAtomicDecrement(&psPDumpConnectionData->sRefCount);
+ if (iRefCount == 0)
+ {
+ OSLockDestroy(psPDumpConnectionData->hLock);
+ PVR_ASSERT(dllist_is_empty(&psPDumpConnectionData->sListHead));
+ OSFreeMem(psPDumpConnectionData);
+ }
+
+ PDUMP_REFCOUNT_PRINT("%s: PDump connection %p, refcount = %d", __func__,
+ psPDumpConnectionData, iRefCount);
+}
+
+/**************************************************************************
+ * Function Name : GetTempBuffer
+ * Inputs : None
+ * Outputs : None
+ * Returns : Temporary buffer address, or NULL
+ * Description : Get temporary buffer address.
+**************************************************************************/
+static void *GetTempBuffer(void)
+{
+ /*
+ * Allocate the temporary buffer, if it hasn't been allocated already.
+ * Return the address of the temporary buffer, or NULL if it
+ * couldn't be allocated.
+ * It is expected that the buffer will be allocated once, at driver
+ * load time, and left in place until the driver unloads.
+ */
+
+ if (gpvTempBuffer == NULL)
+ {
+ gpvTempBuffer = OSAllocMem(PDUMP_TEMP_BUFFER_SIZE);
+ if (gpvTempBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "GetTempBuffer: OSAllocMem failed"));
+ }
+ }
+
+ return gpvTempBuffer;
+}
+
+static void FreeTempBuffer(void)
+{
+
+ if (gpvTempBuffer != NULL)
+ {
+ OSFreeMem(gpvTempBuffer);
+ gpvTempBuffer = NULL;
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpParameterChannelZeroedPageBlock
+ * Inputs : None
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Set up the zero page block in the parameter stream
+**************************************************************************/
+static PVRSRV_ERROR PDumpParameterChannelZeroedPageBlock(void)
+{
+ IMG_UINT8 aui8Zero[32] = { 0 };
+ size_t uiBytesToWrite;
+ PVRSRV_ERROR eError;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+ IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+ &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ /* ZeroPageSize can't be smaller than page size */
+ g_PDumpParameters.uiZeroPageSize = MAX(ui32GeneralNon4KHeapPageSize, OSGetPageSize());
+
+ /* ensure the zero page size of a multiple of the zero source on the stack */
+ PVR_ASSERT(g_PDumpParameters.uiZeroPageSize % sizeof(aui8Zero) == 0);
+
+ /* the first write gets the parameter file name and stream offset,
+ * then subsequent writes do not need to know this as the data is
+ * contiguous in the stream
+ */
+ PDUMP_LOCK();
+ eError = PDumpWriteParameter(aui8Zero,
+ sizeof(aui8Zero),
+ 0,
+ &g_PDumpParameters.uiZeroPageOffset,
+ g_PDumpParameters.szZeroPageFilename);
+
+ if(eError != PVRSRV_OK)
+ {
+ /* Also treat PVRSRV_ERROR_PDUMP_NOT_ALLOWED as an error in this case
+ * as it should never happen since all writes during driver Init are allowed.
+ */
+ goto err_write;
+ }
+
+ uiBytesToWrite = g_PDumpParameters.uiZeroPageSize - sizeof(aui8Zero);
+
+ while(uiBytesToWrite)
+ {
+ IMG_BOOL bOK;
+
+ bOK = PDumpWriteToChannel(&g_PDumpParameters.sCh, &g_PDumpParameters.sWOff,
+ aui8Zero,
+ sizeof(aui8Zero), 0);
+
+ if(!bOK)
+ {
+ eError = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ goto err_write;
+ }
+
+ uiBytesToWrite -= sizeof(aui8Zero);
+ }
+
+err_write:
+ PDUMP_UNLOCK();
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to initialise parameter stream zero block"));
+ }
+
+ return eError;
+}
+
+/**************************************************************************
+ * Function Name : PDumpGetParameterZeroPageInfo
+ * Inputs : None
+ * Outputs : puiZeroPageOffset: will be set to the offset of the zero page
+ * : puiZeroPageSize: will be set to the size of the zero page
+ * : ppszZeroPageFilename: will be set to a pointer to the PRM file name
+ * : containing the zero page
+ * Returns : None
+ * Description : Get information about the zero page
+**************************************************************************/
+void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+ size_t *puiZeroPageSize,
+ const IMG_CHAR **ppszZeroPageFilename)
+{
+ *puiZeroPageOffset = g_PDumpParameters.uiZeroPageOffset;
+ *puiZeroPageSize = g_PDumpParameters.uiZeroPageSize;
+ *ppszZeroPageFilename = g_PDumpParameters.szZeroPageFilename;
+}
+
+PVRSRV_ERROR PDumpInitCommon(void)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32InitCapMode = 0;
+ IMG_CHAR* pszEnvComment = NULL;
+
+ PDUMP_HEREA(2010);
+
+ /* Allocate temporary buffer for copying from user space */
+ (void) GetTempBuffer();
+
+ /* create the global PDump lock */
+ eError = PDumpCreateLockKM();
+ PVR_LOGG_IF_ERROR(eError, "PDumpCreateLockKM", errExit);
+
+ /* Call environment specific PDump initialisation */
+ eError = PDumpOSInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &g_PDumpBlkScript.sCh, &ui32InitCapMode, &pszEnvComment);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSInit", errExitLock);
+
+ /* Initialise PDump control module in common layer */
+ eError = PDumpCtrlInit(ui32InitCapMode);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCtrlInit", errExitOSDeInit);
+
+ /* Test PDump initialised and ready by logging driver details */
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Driver Product Version: %s - %s (%s)", PVRVERSION_STRING, PVR_BUILD_DIR, PVR_BUILD_TYPE);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+ if (pszEnvComment != NULL)
+ {
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "%s", pszEnvComment);
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+ }
+ eError = PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Start of Init Phase");
+ PVR_LOGG_IF_ERROR(eError, "PDumpCommentWithFlags", errExitCtrl);
+
+ eError = PDumpParameterChannelZeroedPageBlock();
+ PVR_LOGG_IF_ERROR(eError, "PDumpParameterChannelZeroedPageBlock", errExitCtrl);
+
+ g_PDumpInitialised = IMG_TRUE;
+
+ PDUMP_HEREA(2011);
+
+ return PVRSRV_OK;
+
+errExitCtrl:
+ PDumpCtrlDeInit();
+errExitOSDeInit:
+ PDUMP_HEREA(2018);
+ PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &g_PDumpBlkScript.sCh);
+errExitLock:
+ PDUMP_HEREA(2019);
+ PDumpDestroyLockKM();
+errExit:
+ return eError;
+}
+
+void PDumpDeInitCommon(void)
+{
+ PDUMP_HEREA(2020);
+
+ g_PDumpInitialised = IMG_FALSE;
+
+ /* Free temporary buffer */
+ FreeTempBuffer();
+
+ /* DeInit the PDUMP_CTRL_STATE data */
+ PDumpCtrlDeInit();
+
+ /* Call environment specific PDump Deinitialisation */
+ PDumpOSDeInit(&g_PDumpParameters.sCh, &g_PDumpScript.sCh, &g_PDumpBlkScript.sCh);
+
+ /* take down the global PDump lock */
+ PDumpDestroyLockKM();
+}
+
+IMG_BOOL PDumpReady(void)
+{
+ return g_PDumpInitialised;
+}
+
+void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ /* Check with the OS we a running on */
+ if (PDumpOSAllowInitPhaseToComplete(bPDumpClient, bInitClient))
+ {
+ if (bInitClient)
+ {
+ /* We only ouptut this once for bInitClient init phase ending OSs */
+ PDUMPCOMMENT("Stop Init Phase");
+ }
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetInitPhaseComplete(IMG_TRUE);
+ PDumpCtrlLockRelease();
+ }
+}
+
+PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+ PDumpCtrlLockAcquire();
+ *pbIsLastCaptureFrame = PDumpCtrlIsLastCaptureFrame();
+ PDumpCtrlLockRelease();
+
+ return PVRSRV_OK;
+}
+
+
+
+typedef struct _PDUMP_Transition_DATA_ {
+ PFN_PDUMP_TRANSITION pfnCallback;
+ void *hPrivData;
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+ DLLIST_NODE sNode;
+} PDUMP_Transition_DATA;
+
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle)
+{
+ PDUMP_Transition_DATA *psData;
+ PVRSRV_ERROR eError;
+
+ psData = OSAllocMem(sizeof(*psData));
+ if (psData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ /* Setup the callback and add it to the list for this process */
+ psData->pfnCallback = pfnCallback;
+ psData->hPrivData = hPrivData;
+
+ OSLockAcquire(psPDumpConnectionData->hLock);
+ dllist_add_to_head(&psPDumpConnectionData->sListHead, &psData->sNode);
+ OSLockRelease(psPDumpConnectionData->hLock);
+
+ /* Take a reference on the connection so it doesn't get freed too early */
+ psData->psPDumpConnectionData =_PDumpConnectionAcquire(psPDumpConnectionData);
+ *ppvHandle = psData;
+
+ return PVRSRV_OK;
+
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+ PDUMP_Transition_DATA *psData = pvHandle;
+
+ OSLockAcquire(psData->psPDumpConnectionData->hLock);
+ dllist_remove_node(&psData->sNode);
+ OSLockRelease(psData->psPDumpConnectionData->hLock);
+ _PDumpConnectionRelease(psData->psPDumpConnectionData);
+ OSFreeMem(psData);
+}
+
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ DLLIST_NODE *psNode, *psNext;
+ PVRSRV_ERROR eError;
+
+ /* Only call the callbacks if we've really done a Transition */
+ if (bInto != psPDumpConnectionData->bLastInto)
+ {
+ OSLockAcquire(psPDumpConnectionData->hLock);
+ /* We're Transitioning either into or out of capture range */
+ dllist_foreach_node(&psPDumpConnectionData->sListHead, psNode, psNext)
+ {
+ PDUMP_Transition_DATA *psData =
+ IMG_CONTAINER_OF(psNode, PDUMP_Transition_DATA, sNode);
+
+ eError = psData->pfnCallback(psData->hPrivData, bInto, ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ OSLockRelease(psPDumpConnectionData->hLock);
+ return eError;
+ }
+ }
+ OSLockRelease(psPDumpConnectionData->hLock);
+
+ if (bInto)
+ {
+ /* Client sync prims are managed in blocks.
+ * In case of block-mode of PDump, sync-blocks will get re-dumped
+ * at start of each pdump-block after live-FW thread and app-thread gets
+ * synchronised.
+ *
+ * At playback time, we will first synchronise script-thread and sim-FW
+ * threads and then re-load sync-blocks before processing next pdump-block.
+ * */
+ SyncConnectionPDumpSyncBlocks(psPDumpConnectionData->psSyncConnectionData);
+ }
+ psPDumpConnectionData->bLastInto = bInto;
+ }
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bInCaptureRange)
+{
+ IMG_UINT64 ui64State = 0;
+ PVRSRV_ERROR eError;
+
+ eError = PDumpCtrlGetState(&ui64State);
+
+ *bInCaptureRange = (ui64State & PDUMP_STATE_CAPTURE_FRAME) ? IMG_TRUE : IMG_FALSE;
+
+ return eError;
+}
+
+PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State)
+{
+ PVRSRV_ERROR eError;
+
+ PDumpCtrlLockAcquire();
+ eError = PDumpCtrlGetState(ui64State);
+ PDumpCtrlLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum)
+{
+ PDumpCtrlLockAcquire();
+ *pui32BlockNum = PDumpCtrlGetBlock();
+ PDumpCtrlLockRelease();
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpIsFirstFrameInBlockKM(IMG_BOOL *bIsFirstFrameInBlock)
+{
+ PDumpCtrlLockAcquire();
+ *bIsFirstFrameInBlock = PDumpCtrlIsFirstFrameInBlock();
+ PDumpCtrlLockRelease();
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32Frame)
+{
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData = psConnection->psPDumpConnectionData;
+ IMG_BOOL bWasInCaptureRange = IMG_FALSE;
+ IMG_BOOL bIsFirstFrameInBlock = IMG_FALSE;
+ IMG_BOOL bIsInCaptureRange = IMG_FALSE;
+ IMG_BOOL bForceBlockTransition = IMG_FALSE; /* In block-mode this flag forces pdump-block transition if set */
+ IMG_UINT32 ui32CurrentBlock = PDUMP_BLOCKNUM_INVALID;
+ PVRSRV_ERROR eError;
+
+ /*
+ Note:
+ As we can't test to see if the new frame will be in capture range
+ before we set the frame number and we don't want to roll back the
+ frame number if we fail then we have to save the "transient" data
+ which decides if we're entering or exiting capture range along
+ with a failure boolean so we know what's required on a retry
+ */
+ if (psPDumpConnectionData->ui32LastSetFrameNumber != ui32Frame)
+ {
+ if(PDumpCtrlCapModIsBlkMode())
+ {
+ /* In block-mode of pdump, default length of first block will be PDUMP_BLOCKLEN_MIN.
+ * If asked to force first block length to full-length it will be ui32BlockLength
+ *
+ * E.g.
+ * Assume,
+ *
+ * ui32BlockLength = 20
+ * PDUMP_BLOCKLEN_MIN = 5
+ *
+ * Then different pdump blocks will have following number of frames in it:
+ *
+ * if(!PDumpCtrlIsFullLenFirstBlockSet())
+ * {
+ * //pdump -b<block len>
+ * block 0 -> 0...4
+ * block 1 -> 5...19
+ * block 2 -> 20...39
+ * block 3 -> 40...59
+ * ...
+ * }
+ * else
+ * {
+ * //pdump -bf<block len>
+ * block 0 -> 0...19
+ * block 1 -> 20...39
+ * block 2 -> 40...59
+ * block 3 -> 60...79
+ * ...
+ * }
+ *
+ * */
+
+ if(!PDumpCtrlIsFullLenFirstBlockSet())
+ {
+ /* First pdump-block will be of PDUMP_BLOCKLEN_MIN length only if ui32BlockLength provided is greater than PDUMP_BLOCKLEN_MIN */
+ bForceBlockTransition = (ui32Frame == PDUMP_BLOCKLEN_MIN) && (g_PDumpCtrl.ui32BlockLength > PDUMP_BLOCKLEN_MIN);
+ }
+ /* Check if we are entering in new pdump-block based on block frame length */
+ bForceBlockTransition |= !(ui32Frame % g_PDumpCtrl.ui32BlockLength);
+
+ if(bForceBlockTransition == IMG_TRUE) /* entering in new pdump-block */
+ {
+ IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+
+ /* Increment and set current block number */
+ PDumpCtrlLockAcquire();
+ /* Logic below is to handle the case where SetFrame(0) gets called twice */
+ ui32CurrentBlock = (ui32Frame == 0)? 0 : (PDumpCtrlGetBlock() + 1);
+ PDumpCtrlLockRelease();
+
+ /* Add markers in script file to differentiate pdump-blocks of size ui32BlockLength */
+ if(ui32CurrentBlock > 0)
+ {
+ /* Add pdump-block end marker */
+ (void) PDumpCommentWithFlags(ui32PDumpFlags, "}PDUMP_BLOCK_END_0x%08X", ui32CurrentBlock - 1);
+ }
+
+ if(PDumpCtrlCaptureOn() && (ui32CurrentBlock > 0))
+ {
+ /* Split MAIN and BLK script out files on current pdump-block end */
+ ui32PDumpFlags |= PDUMP_FLAGS_FORCESPLIT;
+ }
+
+ /* Add pdump-block start marker */
+ (void) PDumpCommentWithFlags(ui32PDumpFlags, "PDUMP_BLOCK_START_0x%08X{", ui32CurrentBlock);
+
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetBlock(ui32CurrentBlock);
+ PDumpCtrlSetFirstFrameInBlock(IMG_TRUE);
+ PDumpCtrlLockRelease();
+
+ bIsFirstFrameInBlock = IMG_TRUE;
+ }
+ else
+ {
+ /* This is NOT the first frame in current pdump-block */
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetFirstFrameInBlock(IMG_FALSE);
+ PDumpCtrlLockRelease();
+ }
+ }
+
+ (void) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, "Set pdump frame %u", ui32Frame);
+
+ /*
+ The boolean values below decide if the PDump transition
+ should trigger because of the current context setting the
+ frame number, hence the functions below should execute
+ atomically and do not give a chance to some other context
+ to transition
+ */
+ PDumpCtrlLockAcquire();
+
+ PDumpIsCaptureFrameKM(&bWasInCaptureRange);
+ PDumpCtrlSetCurrentFrame(ui32Frame);
+ PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+
+ PDumpCtrlLockRelease();
+
+ psPDumpConnectionData->ui32LastSetFrameNumber = ui32Frame;
+
+ /* Save the Transition data in case we fail the Transition */
+ psPDumpConnectionData->bWasInCaptureRange = bWasInCaptureRange;
+ psPDumpConnectionData->bIsInCaptureRange = bIsInCaptureRange;
+ }
+ else if (psPDumpConnectionData->bLastTransitionFailed)
+ {
+ /* Load the Transition data so we can try again */
+ bWasInCaptureRange = psPDumpConnectionData->bWasInCaptureRange;
+ bIsInCaptureRange = psPDumpConnectionData->bIsInCaptureRange;
+ }
+ else
+ {
+ /* New frame is the same as the last frame set and the last
+ * transition succeeded, no need to perform another transition.
+ */
+ return PVRSRV_OK;
+ }
+
+ /* If this is first frame in a new pdump-block, do dummy transition-out and then transition-in */
+ if(PDumpCtrlCapModIsBlkMode() && (bIsFirstFrameInBlock && (ui32CurrentBlock > 0)))
+ {
+ if((!psPDumpConnectionData->bLastTransitionFailed) || (bWasInCaptureRange && !bIsInCaptureRange))
+ {
+ /* Forced dummy transition-OUT of capture range */
+ eError = PDumpTransition(psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ /* Save transition data, so that we'll know we had failed transition-out while retrying */
+ psPDumpConnectionData->bWasInCaptureRange = IMG_TRUE;
+ psPDumpConnectionData->bIsInCaptureRange = IMG_FALSE;
+ goto fail_Transition;
+ }
+ }
+
+ /* Force dummy transition-IN, Save dummy transition data in case we fail the transition-IN*/
+ psPDumpConnectionData->bWasInCaptureRange = bWasInCaptureRange = IMG_FALSE;
+ psPDumpConnectionData->bIsInCaptureRange = bIsInCaptureRange = IMG_TRUE;
+ }
+
+ if (!bWasInCaptureRange && bIsInCaptureRange)
+ {
+ DEBUG_OUTFILES_COMMENT("PDump transition ENTER-begin frame %u (post)", ui32Frame);
+ eError = PDumpTransition(psPDumpConnectionData, IMG_TRUE, PDUMP_FLAGS_NONE);
+ DEBUG_OUTFILES_COMMENT("PDump transition ENTER-complete frame %u (post)", ui32Frame);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_Transition;
+ }
+ }
+ else if (bWasInCaptureRange && !bIsInCaptureRange)
+ {
+ DEBUG_OUTFILES_COMMENT("PDump transition EXIT-begin frame %u (post)", ui32Frame);
+ eError = PDumpTransition(psPDumpConnectionData, IMG_FALSE, PDUMP_FLAGS_NONE);
+ DEBUG_OUTFILES_COMMENT("PDump transition EXIT-complete frame %u (post)", ui32Frame);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_Transition;
+ }
+ }
+ else
+ {
+ /* Here both previous and current frames are in or out of range.
+ * There is no transition in this case.
+ */
+ }
+
+ psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+ return PVRSRV_OK;
+
+fail_Transition:
+ psPDumpConnectionData->bLastTransitionFailed = IMG_TRUE;
+ return eError;
+}
+
+PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Frame)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if defined(PDUMP_TRACE_STATE)
+ PVR_DPF((PVR_DBG_WARNING, "PDumpSetFrameKM: ui32Frame( %d )", ui32Frame));
+#endif
+
+ DEBUG_OUTFILES_COMMENT("(pre) Set pdump frame %u", ui32Frame);
+
+ /* Setting frame-number to PDUMP_FRAME_UNSET will be treated as receiving force PDump capture STOP request in Server */
+ if(ui32Frame == PDUMP_FRAME_UNSET)
+ {
+ eError = _PDumpForceCaptureStopKM();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "_PDumpForceCaptureStopKM");
+ }
+ }
+ else
+ {
+ eError = _PDumpSetFrameKM(psConnection, ui32Frame);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_LOG_ERROR(eError, "_PDumpSetFrameKM");
+ }
+ }
+ DEBUG_OUTFILES_COMMENT("(post) Set pdump frame %u", ui32Frame);
+
+ return eError;
+}
+
+PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32* pui32Frame)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /*
+ It may be safe to avoid acquiring this lock here as all the other calls
+ which read/modify current frame will wait on the PDump Control bridge
+ lock first. Also, in no way as of now, does the PDumping app modify the
+ current frame through a call which acquires the global bridge lock.
+ Still, as a legacy we acquire and then read.
+ */
+ PDumpCtrlLockAcquire();
+
+ *pui32Frame = PDumpCtrlGetCurrentFrame();
+
+ PDumpCtrlLockRelease();
+ return eError;
+}
+
+PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ /*
+ Acquire PDUMP_CTRL_STATE struct lock before modifications as a
+ PDumping app may be reading the state data for some checks
+ */
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Validate parameters */
+ if((ui32End < ui32Start) ||
+ ((ui32Mode != DEBUG_CAPMODE_FRAMED) && (ui32Mode != DEBUG_CAPMODE_CONTINUOUS) && (ui32Mode != DEBUG_CAPMODE_BLKMODE)))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else if((ui32Mode == DEBUG_CAPMODE_BLKMODE) && ((ui32Interval < PDUMP_BLOCKLEN_MIN) || (ui32Interval > PDUMP_BLOCKLEN_MAX)))
+ {
+ /* force client to set ui32Interval (i.e. block length in block-mode) in valid range */
+ eError = PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN;
+ }
+ else if(ui32Interval < 1)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else if((ui32Mode == DEBUG_CAPMODE_BLKMODE) && (ui32End != PDUMP_FRAME_MAX))
+ {
+ /* force client to set ui32End to MAX value in block-mode */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "PDumpSetDefaultCaptureParamsKM");
+ return eError;
+ }
+
+ if(PDumpIsDumpSuspended())
+ {
+ PVR_LOG(("PDump is in suspended state, need to reload the driver."));
+ }
+
+ PDumpCtrlLockAcquire();
+ PDumpCtrlSetDefaultCaptureParams(ui32Mode, ui32Start, ui32End, ui32Interval);
+ PDumpCtrlLockRelease();
+
+ if (ui32MaxParamFileSize == 0)
+ {
+ g_PDumpParameters.ui32MaxFileSize = PRM_FILE_SIZE_MAX;
+ }
+ else
+ {
+ g_PDumpParameters.ui32MaxFileSize = ui32MaxParamFileSize;
+ }
+ return eError;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpReg32
+ * Inputs : pszPDumpDevName, Register offset, and value to write
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X 0x%08X", pszPDumpRegName, ui32Reg, ui32Data);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpReg64
+ * Inputs : pszPDumpDevName, Register offset, and value to write
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write
+**************************************************************************/
+PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT64 ui64Data,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X 0x%010" IMG_UINT64_FMTSPECX, pszPDumpRegName, ui32Reg, ui64Data);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToReg64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegDst,
+ IMG_UINT32 ui32RegSrc,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X :%s:0x%08X", pszPDumpRegName, ui32RegDst, pszPDumpRegName, ui32RegSrc);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToMem32
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+ uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpRegLabelToMem64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a register label
+**************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:0x%08X",aszMemspaceName, aszSymbolicName,
+ uiPDumpSymbolicOffset, pszPDumpRegName, ui32Reg);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpPhysHandleToInternalVar64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents an internal var
+ write using a PDump pages handle
+**************************************************************************/
+PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar,
+ IMG_HANDLE hPdumpPages,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR *pszSymbolicName;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PDumpGetSymbolicAddr(hPdumpPages,
+ &pszSymbolicName);
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "WRW %s %s:0x%llX",
+ pszInternalVar, pszSymbolicName, 0llu);
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpMemLabelToInternalVar64
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents an internal var write using a memory label
+**************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceName,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicName,
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:%s:0x%"IMG_UINT64_FMTSPECX, pszInternalVar,
+ aszMemspaceName, aszSymbolicName, uiPDumpSymbolicOffset);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical OR operation
+ Var <- Var OR Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarORValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR %s %s 0x%"IMG_UINT64_FMTSPECX,
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/*******************************************************************************************************
+ * Function Name : PDumpRegLabelToInternalVar
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which writes a register label into an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW %s :%s:0x%08X", pszInternalVar, pszPDumpRegName, ui32Reg);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpInternalVarToReg32
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpInternalVarToReg64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a register write from an internal variable
+********************************************************************************************************/
+PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:0x%08X %s", pszPDumpRegName, ui32Reg, pszInternalVar);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+
+/*******************************************************************************************************
+ * Function Name : PDumpMemLabelToMem32
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX,aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpMemLabelToMem64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from a memory label
+********************************************************************************************************/
+PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s:0x%"IMG_UINT64_FMTSPECX,aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteVarSHRValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical SHR operation
+ Var <- Var SHR Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR %s %s 0x%"IMG_UINT64_FMTSPECX,
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+******************************************************************************
+
+ @Function PDumpWriteRegANDValueOp
+
+ @Description
+
+ Emits the PDump commands for the logical AND operation
+ Var <- Var AND Value
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "AND %s %s 0x%"IMG_UINT64_FMTSPECX,
+ pszInternalVariable,
+ pszInternalVariable,
+ ui64Value);
+
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpSAW
+ * Inputs : pszDevSpaceName -- device space from which to output
+ * ui32Offset -- offset value from register base
+ * ui32NumSaveBytes -- number of bytes to output
+ * pszOutfileName -- name of file to output to
+ * ui32OutfileOffsetByte -- offset into output file to write
+ * uiPDumpFlags -- flags to pass to PDumpOSWriteScript
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Dumps the contents of a register bank into a file
+ * NB: ui32NumSaveBytes must be divisible by 4
+**************************************************************************/
+PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName,
+ IMG_UINT32 ui32HPOffsetBytes,
+ IMG_UINT32 ui32NumSaveBytes,
+ IMG_CHAR *pszOutfileName,
+ IMG_UINT32 ui32OutfileOffsetByte,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW\n"));
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAW :%s:0x%x 0x%x 0x%x %s\n",
+ pszDevSpaceName,
+ ui32HPOffsetBytes,
+ ui32NumSaveBytes / (IMG_UINT32)sizeof(IMG_UINT32),
+ ui32OutfileOffsetByte,
+ pszOutfileName);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpOSBufprintf failed: eError=%u\n", eError));
+ PDUMP_UNLOCK();
+ return eError;
+ }
+
+ if(! PDumpWriteScript(hScript, uiPDumpFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpSAW PDumpWriteScript failed!\n"));
+ }
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpRegPolKM
+ * Inputs : Description of what this register read is trying to do
+ * pszPDumpDevName
+ * Register offset
+ * expected value
+ * mask for that value
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents a register read
+ * with the expected value
+**************************************************************************/
+PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator)
+{
+ /* Timings correct for linux and XP */
+ /* Timings should be passed in */
+ #define POLL_DELAY 1000U
+ #define POLL_COUNT_LONG (2000000000U / POLL_DELAY)
+ #define POLL_COUNT_SHORT (1000000U / POLL_DELAY)
+
+ PVRSRV_ERROR eErr;
+ IMG_UINT32 ui32PollCount;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ ui32PollCount = POLL_COUNT_LONG;
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "POL :%s:0x%08X 0x%08X 0x%08X %d %u %d",
+ pszPDumpRegName, ui32RegAddr, ui32RegValue,
+ ui32Mask, eOperator, ui32PollCount, POLL_DELAY);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+/* Never call direct, needs caller to hold OS Lock.
+ * Use PDumpCommentWithFlags() from within the server.
+ * Clients call this via the bridge and PDumpCommentKM().
+ */
+static PVRSRV_ERROR _PDumpWriteComment(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+#if defined(PDUMP_DEBUG_OUTFILES)
+ IMG_CHAR pszTemp[256];
+#endif
+ PDUMP_GET_SCRIPT_STRING();
+
+ if((pszComment == NULL) || (PDumpOSBuflen(pszComment, ui32MaxLen) == 0))
+ {
+ /* PDumpOSVerifyLineEnding silently fails if pszComment is too short to
+ actually hold the line endings that it's trying to enforce, so
+ short circuit it and force safety */
+ pszComment = "\n";
+ }
+ else
+ {
+ /* Put line ending sequence at the end if it isn't already there */
+ PDumpOSVerifyLineEnding(pszComment, ui32MaxLen);
+ }
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+ /* Prefix comment with PID and line number */
+ eErr = PDumpOSSprintf(pszTemp, 256, "%u %u:%lu %s: %s",
+ g_ui32EveryLineCounter,
+ OSGetCurrentClientProcessIDKM(),
+ (unsigned long)OSGetCurrentClientThreadIDKM(),
+ OSGetCurrentClientProcessNameKM(),
+ pszComment);
+
+ /* Append the comment to the script stream */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+ pszTemp);
+#else
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- %s",
+ pszComment);
+#endif
+ if( (eErr != PVRSRV_OK) &&
+ (eErr != PVRSRV_ERROR_PDUMP_BUF_OVERFLOW))
+ {
+ PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrUnlock);
+ }
+
+ if (!PDumpWriteScript(hScript, ui32Flags))
+ {
+ if(PDUMP_IS_CONTINUOUS(ui32Flags))
+ {
+ eErr = PVRSRV_ERROR_PDUMP_BUFFER_FULL;
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+ }
+ else
+ {
+ eErr = PVRSRV_ERROR_CMD_NOT_PROCESSED;
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteScript", ErrUnlock);
+ }
+ }
+
+ErrUnlock:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentKM
+ * Inputs : pszComment, ui32Flags
+ * Outputs : None
+ * Returns : None
+ * Description : Dumps a pre-formatted comment, primarily called from the
+ * : bridge.
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_LOCK();
+
+ eErr = _PDumpWriteComment(pszComment, ui32Flags);
+
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentWithFlags
+ * Inputs : psPDev - PDev for PDump device
+ * : pszFormat - format string for comment
+ * : ... - args for format string
+ * Outputs : None
+ * Returns : None
+ * Description : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags, IMG_CHAR * pszFormat, ...)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ va_list args;
+
+ va_start(args, pszFormat);
+ PDumpCommentWithFlagsVA(ui32Flags, pszFormat, args);
+ va_end(args);
+
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PDumpCommentWithFlagsVA
+ * Inputs : psPDev - PDev for PDump device
+ * : pszFormat - format string for comment
+ * : args - pre-started va_list args for format string
+ * Outputs : None
+ * Returns : None
+ * Description : PDumps a comments
+**************************************************************************/
+PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags, const IMG_CHAR * pszFormat, va_list args)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_MSG_STRING();
+
+ PDUMP_LOCK();
+
+ /* Construct the string */
+ eErr = PDumpOSVSprintf(pszMsg, ui32MaxLen, pszFormat, args);
+
+ if(eErr != PVRSRV_OK)
+ {
+ goto Unlock;
+ }
+
+ eErr = _PDumpWriteComment(pszMsg, ui32Flags);
+
+Unlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/*************************************************************************/ /*!
+ * Function Name : PDumpPanic
+ * Inputs : ui32PanicNo - Unique number for panic condition
+ * : pszPanicMsg - Panic reason message limited to ~90 chars
+ * : pszPPFunc - Function name string where panic occurred
+ * : ui32PPline - Source line number where panic occurred
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : PDumps a panic assertion. Used when the host driver
+ * : detects a condition that will lead to an invalid PDump
+ * : script that cannot be played back off-line.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+ IMG_CHAR pszConsoleMsg[] =
+"COM ***************************************************************************\n"
+"COM Script invalid and not compatible with off-line playback. Check test \n"
+"COM parameters and driver configuration, stop imminent.\n"
+"COM ***************************************************************************\n";
+ PDUMP_GET_SCRIPT_STRING();
+
+ /* Log the panic condition to the live kern.log in both REL and DEB mode
+ * to aid user PDump trouble shooting. */
+ PVR_LOG(("PDUMP PANIC %08x: %s", ui32PanicNo, pszPanicMsg));
+ PVR_DPF((PVR_DBG_MESSAGE, "PDUMP PANIC start %s:%d", pszPPFunc, ui32PPline));
+
+ /* Check the supplied panic reason string is within length limits */
+ PVR_ASSERT(OSStringLength(pszPanicMsg)+sizeof("PANIC ") < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+ /* Obtain lock to keep the multi-line
+ * panic statement together in a single atomic write */
+ PDUMP_LOCK();
+
+
+ /* Write -- Panic start (Function:line) */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic start (%s:%d)", pszPPFunc, ui32PPline);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write COM <message> x4 */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "%s", pszConsoleMsg);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write PANIC no msg command */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "PANIC %08x %s", ui32PanicNo, pszPanicMsg);
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* Write -- Panic end */
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "-- Panic end");
+ PVR_LOGG_IF_ERROR(eError, "PDumpOSBufprintf", e1);
+ (void)PDumpWriteScript(hScript, uiPDumpFlags);
+
+e1:
+ PDUMP_UNLOCK();
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+ * Function Name : PDumpCaptureError
+ * Inputs : ui32ErrorNo - Unique number for panic condition
+ * : pszErrorMsg - Panic reason message limited to ~90 chars
+ * : pszPPFunc - Function name string where panic occurred
+ * : ui32PPline - Source line number where panic occurred
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : PDumps an error string to the script file to interrupt
+ * : play back to inform user of a fatal issue that occurred
+ * : during PDump capture.
+ */ /*************************************************************************/
+PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ IMG_CHAR* pszFormatStr = "DRIVER_ERROR: %3d: %s";
+ PDUMP_FLAGS_T uiPDumpFlags = PDUMP_FLAGS_CONTINUOUS;
+
+ /* Need to return an error using this macro */
+ PDUMP_GET_SCRIPT_STRING();
+
+ /* Check the supplied panic reason string is within length limits */
+ PVR_ASSERT(OSStringLength(pszErrorMsg)+sizeof(pszFormatStr) < PVRSRV_PDUMP_MAX_COMMENT_SIZE-1);
+
+ /* Obtain lock to keep the multi-line
+ * panic statement together in a single atomic write */
+ PDUMP_LOCK();
+
+ /* Write driver error message to the script file */
+ (void) PDumpOSBufprintf(hScript, ui32MaxLen, pszFormatStr, ui32ErrorNo, pszErrorMsg);
+ (void) PDumpWriteScript(hScript, uiPDumpFlags);
+
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpBitmapKM
+
+ @Description
+
+ Dumps a bitmap from device memory to a file
+
+ @Input psDevId
+ @Input pszFileName
+ @Input ui32FileOffset
+ @Input ui32Width
+ @Input ui32Height
+ @Input ui32StrideInBytes
+ @Input sDevBaseAddr
+ @Input ui32Size
+ @Input ePixelFormat
+ @Input eMemFormat
+ @Input ui32PDumpFlags
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpBitmapKM( PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_DEVICE_IDENTIFIER *psDevId = &psDeviceNode->sDevId;
+ PVRSRV_ERROR eErr=0;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "Dump bitmap of render.");
+
+ switch (ePixelFormat)
+ {
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV8:
+ {
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV data. Switching from SII to SAB. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ pszFileName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+ case PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8: // YUV420 2 planes
+ {
+ const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+ const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>1; // YUV420
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 2-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // Context id
+ sDevBaseAddr.uiAddr, // virtaddr
+ ui32Plane0Size, // size
+ ui32FileOffset, // fileoffset
+
+ // Plane 1 (UV)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // Context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32Plane1Size, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12: // YUV420 3 planes
+ {
+ const IMG_UINT32 ui32Plane0Size = ui32StrideInBytes*ui32Height;
+ const IMG_UINT32 ui32Plane1Size = ui32Plane0Size>>2; // YUV420
+ const IMG_UINT32 ui32Plane2Size = ui32Plane1Size;
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32Plane0Size;
+ const IMG_UINT32 ui32Plane2FileOffset = ui32Plane1FileOffset + ui32Plane1Size;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32Plane0Size;
+ const IMG_UINT32 ui32Plane2MemOffset = ui32Plane0Size+ui32Plane1Size;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YUV420 3-plane. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr, // virtaddr
+ ui32Plane0Size, // size
+ ui32FileOffset, // fileoffset
+
+ // Plane 1 (U)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32Plane1Size, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ // Plane 2 (V)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr
+ ui32Plane2Size, // size
+ ui32Plane2FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ case PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32: // YV32 - 4 contiguous planes in the order VUYA, stride can be > width.
+ {
+ const IMG_UINT32 ui32PlaneSize = ui32StrideInBytes*ui32Height; // All 4 planes are the same size
+ const IMG_UINT32 ui32Plane0FileOffset = ui32FileOffset + (ui32PlaneSize<<1); // SII plane 0 is Y, which is YV32 plane 2
+ const IMG_UINT32 ui32Plane1FileOffset = ui32FileOffset + ui32PlaneSize; // SII plane 1 is U, which is YV32 plane 1
+ const IMG_UINT32 ui32Plane2FileOffset = ui32FileOffset; // SII plane 2 is V, which is YV32 plane 0
+ const IMG_UINT32 ui32Plane3FileOffset = ui32Plane0FileOffset + ui32PlaneSize; // SII plane 3 is A, which is YV32 plane 3
+ const IMG_UINT32 ui32Plane0MemOffset = ui32PlaneSize<<1;
+ const IMG_UINT32 ui32Plane1MemOffset = ui32PlaneSize;
+ const IMG_UINT32 ui32Plane2MemOffset = 0;
+ const IMG_UINT32 ui32Plane3MemOffset = ui32Plane0MemOffset + ui32PlaneSize;
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 4 planes. Width=0x%08X Height=0x%08X Stride=0x%08X",
+ ui32Width, ui32Height, ui32StrideInBytes);
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 plane size is 0x%08X", ui32PlaneSize);
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 0 Mem Offset=0x%08X", ui32Plane0MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 1 Mem Offset=0x%08X", ui32Plane1MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 2 Mem Offset=0x%08X", ui32Plane2MemOffset);
+ PDumpCommentWithFlags(ui32PDumpFlags, "YV32 Plane 3 Mem Offset=0x%08X", ui32Plane3MemOffset);
+
+ /*
+ SII <imageset> <filename> :<memsp1>:v<id1>:<virtaddr1> <size1> <fileoffset1> Y
+ :<memsp2>:v<id2>:<virtaddr2> <size2> <fileoffset2> U
+ :<memsp3>:v<id3>:<virtaddr3> <size3> <fileoffset3> V
+ :<memsp4>:v<id4>:<virtaddr4> <size4> <fileoffset4> A
+ <pixfmt> <width> <height> <stride> <addrmode>
+ */
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+
+ // Plane 0 (V)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane0MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane0FileOffset, // fileoffset
+
+ // Plane 1 (U)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane1MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane1FileOffset, // fileoffset
+
+ // Plane 2 (Y)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane2MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane2FileOffset, // fileoffset
+
+ // Plane 3 (A)
+ psDevId->pszPDumpDevName, // memsp
+ ui32MMUContextID, // MMU context id
+ sDevBaseAddr.uiAddr+ui32Plane3MemOffset, // virtaddr
+ ui32PlaneSize, // size
+ ui32Plane3FileOffset, // fileoffset
+
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+
+ default: // Single plane formats
+ {
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SII %s %s.bin :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X",
+ pszFileName,
+ pszFileName,
+ psDevId->pszPDumpDevName,
+ ui32MMUContextID,
+ sDevBaseAddr.uiAddr,
+ ui32Size,
+ ui32FileOffset,
+ ePixelFormat,
+ ui32Width,
+ ui32Height,
+ ui32StrideInBytes,
+ ui32AddrMode);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+ break;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpImageDescriptorKM
+
+ @Description
+
+ Dumps an OutputImage command and its associated header info.
+
+ @Input psDeviceNode : device
+ @Input ui32MMUContextID : MMU context
+ @Input ui32Size : size of filename string
+ @Input pszSABFileName : filename string
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpImageDescriptorKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *pszSABFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+#if !defined(SUPPORT_RGX)
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+ PVR_UNREFERENCED_PARAMETER(sData);
+ PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ePixFmt);
+ PVR_UNREFERENCED_PARAMETER(eMemLayout);
+ PVR_UNREFERENCED_PARAMETER(eFBCompression);
+ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+ PVR_UNREFERENCED_PARAMETER(sHeader);
+ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+#else
+ PVRSRV_ERROR eErr=0;
+ IMG_CHAR *pszPDumpDevName = psDeviceNode->sDevId.pszPDumpDevName;
+ IMG_BYTE abyPDumpDesc[IMAGE_HEADER_SIZE];
+ IMG_UINT32 ui32ParamOutPos, ui32SABOffset = 0;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+ PVR_UNREFERENCED_PARAMETER(ui32MaxLenFileName);
+
+ if (ui32PDumpFlags & PDUMP_FLAGS_PERSISTENT)
+ {
+ return PVRSRV_OK;
+ }
+
+ PDumpCommentWithFlags(ui32PDumpFlags, "Dump Image descriptor");
+
+ /**
+ * Ensure string is NULL terminated.
+ */
+ pszSABFileName[ui32Size-1] = '\0';
+
+ /*
+ * Generate OutputImage command header
+ */
+ eErr = RGXPDumpOutputImageHdr( psDeviceNode,
+ ui32HeaderSize,
+ ui32DataSize,
+ ui32LogicalWidth,
+ ui32LogicalHeight,
+ ui32PhysicalWidth,
+ ui32PhysicalHeight,
+ ePixFmt,
+ eMemLayout,
+ eFBCompression,
+ paui32FBCClearColour,
+ &(abyPDumpDesc[0]));
+ if (eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write image header data, error %d", eErr));
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpWriteParameter(abyPDumpDesc,
+ IMAGE_HEADER_SIZE,
+ ui32PDumpFlags,
+ &ui32ParamOutPos,
+ pszFileName);
+ if (eErr != PVRSRV_OK)
+ {
+ if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ PDUMP_ERROR(eErr, "Failed to write device allocation to parameter file");
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eErr));
+ }
+ else
+ {
+ /*
+ * Write to parameter file prevented under the flags and
+ * current state of the driver so skip write to script and return.
+ */
+ eErr = PVRSRV_OK;
+ }
+ goto error;
+ }
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "MALLOC :%s:BINHEADER 0x%08X 0x%08X\n",
+ pszPDumpDevName,
+ IMAGE_HEADER_SIZE,
+ IMAGE_HEADER_SIZE);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "LDB :%s:BINHEADER:0x00 0x%08x 0x%08x %s\n",
+ pszPDumpDevName,
+ IMAGE_HEADER_SIZE,
+ ui32ParamOutPos,
+ pszFileName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "SAB :%s:BINHEADER:0x00 0x%08X 0x00000000 %s.bin\n",
+ pszPDumpDevName,
+ IMAGE_HEADER_SIZE,
+ pszSABFileName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ ui32SABOffset += IMAGE_HEADER_SIZE;
+
+ /*
+ * Write out the header section if image is FB compressed
+ */
+ if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+ pszPDumpDevName,
+ ui32MMUContextID,
+ (IMG_UINT64)sHeader.uiAddr,
+ ui32HeaderSize,
+ ui32SABOffset,
+ pszSABFileName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ ui32SABOffset += ui32HeaderSize;
+ }
+
+ /*
+ * Now dump out the actual data associated with the surface
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "SAB :%s:v%x:0x%010"IMG_UINT64_FMTSPECX" 0x%08X 0x%08X %s.bin\n",
+ pszPDumpDevName,
+ ui32MMUContextID,
+ (IMG_UINT64)sData.uiAddr,
+ ui32DataSize,
+ ui32SABOffset,
+ pszSABFileName);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ /*
+ * The OutputImage command is required to trigger processing of the output
+ * data
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "CMD:OutputImage %s.bin\n",
+ pszSABFileName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLenScript,
+ "FREE :%s:BINHEADER\n",
+ pszPDumpDevName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto error;
+ }
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+error:
+ PDUMP_UNLOCK();
+ return eErr;
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function PDumpReadRegKM
+
+ @Description
+
+ Dumps a read from a device register to a file
+
+ @Input psConnection : connection info
+ @Input pszFileName
+ @Input ui32FileOffset
+ @Input ui32Address
+ @Input ui32Size
+ @Input ui32PDumpFlags
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR PDumpReadRegKM ( IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:0x%08X 0x%08X %s",
+ pszPDumpRegName,
+ ui32Address,
+ ui32FileOffset,
+ pszFileName);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript( hScript, ui32PDumpFlags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name PDumpRegRead32
+ @brief Dump 32-bit register read to script
+ @param pszPDumpDevName - pdump device name
+ @param ui32RegOffset - register offset
+ @param ui32Flags - pdump flags
+ @return Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW :%s:0x%X",
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+/*****************************************************************************
+ @name PDumpRegRead64ToInternalVar
+ @brief Read 64-bit register into an internal variable
+ @param pszPDumpDevName - pdump device name
+ @param ui32RegOffset - register offset
+ @param ui32Flags - pdump flags
+ @return Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszInternalVar,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW64 %s :%s:0x%X",
+ pszInternalVar,
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+/*****************************************************************************
+ @name PDumpRegRead64
+ @brief Dump 64-bit register read to script
+ @param pszPDumpDevName - pdump device name
+ @param ui32RegOffset - register offset
+ @param ui32Flags - pdump flags
+ @return Error
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "RDW64 :%s:0x%X",
+ pszPDumpRegName,
+ ui32RegOffset);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+
+/*****************************************************************************
+ FUNCTION : PDumpWriteShiftedMaskedValue
+
+ PURPOSE : Emits the PDump commands for writing a masked shifted address
+ into another location
+
+ PARAMETERS : PDump symbolic name and offset of target word
+ PDump symbolic name and offset of source address
+ right shift amount
+ left shift amount
+ mask
+
+ RETURNS : None
+*****************************************************************************/
+PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+ const IMG_CHAR *pszDestSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefRegspaceName,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ IMG_UINT32 uiSHRAmount,
+ IMG_UINT32 uiSHLAmount,
+ IMG_UINT32 uiMask,
+ IMG_DEVMEM_SIZE_T uiWordSize,
+ IMG_UINT32 uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Suffix of WRW command in PDump (i.e. WRW or WRW64) */
+ const IMG_CHAR *pszWrwSuffix;
+
+ /* Internal PDump register used for interim calculation */
+ const IMG_CHAR *pszPDumpIntRegSpace;
+ IMG_UINT32 uiPDumpIntRegNum;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if ((uiWordSize != 4) && (uiWordSize != 8))
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ pszWrwSuffix = (uiWordSize == 8) ? "64" : "";
+
+ /* Should really "Acquire" a pdump register here */
+ pszPDumpIntRegSpace = pszDestRegspaceName;
+ uiPDumpIntRegNum = 1;
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ /* Should this be "MOV" instead? */
+ "WRW :%s:$%d :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src */
+ pszRefRegspaceName,
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ if (uiSHRAmount > 0)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiSHRAmount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ if (uiSHLAmount > 0)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiSHLAmount);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ if (uiMask != (1ULL << (8*uiWordSize))-1)
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "AND :%s:$%d :%s:$%d 0x%X\n",
+ /* dest */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src A */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum,
+ /* src B */
+ uiMask);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " :%s:$%d\n",
+ pszWrwSuffix,
+ /* dest */
+ pszDestRegspaceName,
+ pszDestSymbolicName,
+ uiDestOffset,
+ /* src */
+ pszPDumpIntRegSpace,
+ uiPDumpIntRegNum);
+ if(eError != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+
+PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ const IMG_CHAR *pszPDumpDevName,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ IMG_UINT32 uiPDumpFlags)
+{
+ const IMG_CHAR *pszWrwSuffix = "";
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (ui32WordSize == 8)
+ {
+ pszWrwSuffix = "64";
+ }
+
+ PDUMP_LOCK();
+
+ if (ui32AlignShift != ui32Shift)
+ {
+ /* Write physical address into a variable */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:$1 %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* apply address alignment */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui32AlignShift);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ /* apply address shift */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui32Shift);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+
+ /* write result to register */
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:0x%08X :%s:$1",
+ pszWrwSuffix,
+ pszDestSpaceName,
+ (IMG_UINT32)uiDestOffset,
+ pszPDumpDevName);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+ else
+ {
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s:" IMG_DEVMEM_OFFSET_FMTSPEC "\n",
+ pszWrwSuffix,
+ /* dest */
+ pszDestSpaceName,
+ uiDestOffset,
+ /* src */
+ pszRefSymbolicName,
+ uiRefOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto symbAddress_error;
+ }
+ PDumpWriteScript(hScript, uiPDumpFlags);
+ }
+
+symbAddress_error:
+
+ PDUMP_UNLOCK();
+
+ return eError;
+}
+
+/**************************************************************************
+ * Function Name : PDumpIDLWithFlags
+ * Inputs : Idle time in clocks
+ * Outputs : None
+ * Returns : Error
+ * Description : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IDL %u", ui32Clocks);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+ return PVRSRV_OK;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpIDL
+ * Inputs : Idle time in clocks
+ * Outputs : None
+ * Returns : Error
+ * Description : Dump IDL command to script
+**************************************************************************/
+PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks)
+{
+ return PDumpIDLWithFlags(ui32Clocks, PDUMP_FLAGS_CONTINUOUS);
+}
+
+/*****************************************************************************
+ FUNCTION : PDumpRegBasedCBP
+
+ PURPOSE : Dump CBP command to script
+
+ PARAMETERS :
+
+ RETURNS : None
+*****************************************************************************/
+PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:0x%08X 0x%08X 0x%08X 0x%08X",
+ pszPDumpRegName,
+ ui32RegOffset,
+ ui32WPosVal,
+ ui32PacketSize,
+ ui32BufferSize);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUCtxID,
+ IMG_UINT32 ui32RegionID,
+ IMG_BOOL bEnable,
+ IMG_UINT64 ui64VAddr,
+ IMG_UINT64 ui64LenBytes,
+ IMG_UINT32 ui32XStride,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ if(bEnable)
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "TRG :%s:v%u %u 0x%08"IMG_UINT64_FMTSPECX" 0x%08"IMG_UINT64_FMTSPECX" %u",
+ pszMemSpace, ui32MMUCtxID, ui32RegionID,
+ ui64VAddr, ui64LenBytes, ui32XStride);
+ }
+ else
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "TRG :%s:v%u %u",
+ pszMemSpace, ui32MMUCtxID, ui32RegionID);
+
+ }
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpConnectionNotify
+ * Description : Called by the srvcore to tell PDump core that the
+ * PDump capture and control client has connected
+ **************************************************************************/
+void PDumpConnectionNotify(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psThis;
+
+ /* Give PDump control a chance to end the init phase, depends on OS */
+ if (!PDumpCtrlInitPhaseComplete())
+ {
+ PDumpStopInitPhase(IMG_TRUE, IMG_FALSE);
+ }
+
+ g_ConnectionCount++;
+ PVR_LOG(("PDump has connected (%u)", g_ConnectionCount));
+
+ /* Reset the parameter file attributes */
+ g_PDumpParameters.sWOff.ui32Main = g_PDumpParameters.sWOff.ui32Init;
+ g_PDumpParameters.ui32FileIdx = 0;
+
+ g_PDumpScript.sWOff.ui32Main = g_PDumpScript.sWOff.ui32Init;
+ g_PDumpScript.ui32FileIdx = 0;
+
+ g_PDumpBlkScript.sWOff.ui32Main = g_PDumpBlkScript.sWOff.ui32Init;
+ g_PDumpBlkScript.ui32FileIdx = 0;
+
+ /* Loop over all known devices */
+ psThis = psPVRSRVData->psDeviceNodeList;
+ while (psThis)
+ {
+ if (psThis->pfnPDumpInitDevice)
+ {
+ /* Reset pdump according to connected device */
+ psThis->pfnPDumpInitDevice(psThis);
+ }
+ psThis = psThis->psNext;
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpDisconnectionNotify
+ * Description : Called by the connection_server to tell PDump core that
+ * the PDump capture and control client has disconnected
+ **************************************************************************/
+void PDumpDisconnectionNotify(void)
+{
+ PVRSRV_ERROR eErr;
+
+ if (PDumpCtrlCaptureOn())
+ {
+ PVR_LOG(("PDump killed, output files may be invalid or incomplete!"));
+
+ /* Disable capture in server, in case PDump client was killed and did
+ * not get a chance to reset the capture parameters.
+ */
+ eErr = PDumpSetDefaultCaptureParamsKM( DEBUG_CAPMODE_FRAMED,
+ PDUMP_FRAME_UNSET, PDUMP_FRAME_UNSET, 1, 0);
+ PVR_LOG_IF_ERROR(eErr, "PVRSRVPDumpSetDefaultCaptureParams");
+ }
+ else
+ {
+ PVR_LOG(("PDump disconnected"));
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpIfKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents IF command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "IF %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpElseKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents ELSE command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "ELSE %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpFiKM
+ * Inputs : pszPDumpCond - string for condition
+ * Outputs : None
+ * Returns : None
+ * Description : Create a PDUMP string which represents FI command
+ with condition.
+**************************************************************************/
+PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond)
+{
+ PVRSRV_ERROR eErr;
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FI %s\n", pszPDumpCond);
+
+ if (eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ return eErr;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PDumpCreateLockKM(void)
+{
+ return PDumpOSCreateLock();
+}
+
+void PDumpDestroyLockKM(void)
+{
+ PDumpOSDestroyLock();
+}
+
+void PDumpLock(void)
+{
+ PDumpOSLock();
+}
+
+void PDumpUnlock(void)
+{
+ PDumpOSUnlock();
+}
+
+#if defined(PVR_TESTING_UTILS)
+extern void PDumpOSDumpState(void);
+
+#if !defined(LINUX)
+void PDumpOSDumpState(IMG_BOOL bDumpOSLayerState)
+{
+ PVR_UNREFERENCED_PARAMETER(bDumpOSLayerState);
+}
+#endif
+
+void PDumpCommonDumpState(IMG_BOOL bDumpOSLayerState)
+{
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpInitialised( %d )",
+ g_PDumpInitialised) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpScript.sCh.hInit( %p ) g_PDumpScript.sCh.hMain( %p ) g_PDumpScript.sCh.hDeinit( %p )",
+ g_PDumpScript.sCh.hInit, g_PDumpScript.sCh.hMain, g_PDumpScript.sCh.hDeinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sCh.hInit( %p ) g_PDumpParameters.sCh.hMain( %p ) g_PDumpParameters.sCh.hDeinit( %p )",
+ g_PDumpParameters.sCh.hInit, g_PDumpParameters.sCh.hMain, g_PDumpParameters.sCh.hDeinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.sWOff.ui32Init( %d ) g_PDumpParameters.sWOff.ui32Main( %d ) g_PDumpParameters.sWOff.ui32Deinit( %d )",
+ g_PDumpParameters.sWOff.ui32Init, g_PDumpParameters.sWOff.ui32Main, g_PDumpParameters.sWOff.ui32Deinit) );
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpParameters.ui32FileIdx( %d )",
+ g_PDumpParameters.ui32FileIdx) );
+
+ PVR_LOG(("--- PDUMP COMMON: g_PDumpCtrl( %p ) bInitPhaseActive( %d ) ui32Flags( %x )",
+ &g_PDumpCtrl, g_PDumpCtrl.bInitPhaseActive, g_PDumpCtrl.ui32Flags) );
+ PVR_LOG(("--- PDUMP COMMON: ui32DefaultCapMode( %d ) ui32CurrentFrame( %d )",
+ g_PDumpCtrl.ui32DefaultCapMode, g_PDumpCtrl.ui32CurrentFrame) );
+ PVR_LOG(("--- PDUMP COMMON: sCaptureRange.ui32Start( %x ) sCaptureRange.ui32End( %x ) sCaptureRange.ui32Interval( %u )",
+ g_PDumpCtrl.sCaptureRange.ui32Start, g_PDumpCtrl.sCaptureRange.ui32End, g_PDumpCtrl.sCaptureRange.ui32Interval) );
+ PVR_LOG(("--- PDUMP COMMON: bCaptureOn( %d ) bSuspended( %d ) bInPowerTransition( %d )",
+ g_PDumpCtrl.bCaptureOn, g_PDumpCtrl.bSuspended, g_PDumpCtrl.bInPowerTransition) );
+
+ if (bDumpOSLayerState)
+ {
+ PDumpOSDumpState();
+ }
+}
+#endif
+
+
+PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsPDumpConnectionData != NULL);
+
+ psPDumpConnectionData = OSAllocMem(sizeof(*psPDumpConnectionData));
+ if (psPDumpConnectionData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = OSLockCreate(&psPDumpConnectionData->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lockcreate;
+ }
+
+ dllist_init(&psPDumpConnectionData->sListHead);
+ OSAtomicWrite(&psPDumpConnectionData->sRefCount, 1);
+ psPDumpConnectionData->bLastInto = IMG_FALSE;
+ psPDumpConnectionData->ui32LastSetFrameNumber = PDUMP_FRAME_UNSET;
+ psPDumpConnectionData->bLastTransitionFailed = IMG_FALSE;
+
+ /*
+ * Although we don't take a ref count here, handle base destruction
+ * will ensure that any resource that might trigger us to do a
+ * Transition will have been freed before the sync blocks which
+ * are keeping the sync connection data alive.
+ */
+ psPDumpConnectionData->psSyncConnectionData = psSyncConnectionData;
+ *ppsPDumpConnectionData = psPDumpConnectionData;
+
+ return PVRSRV_OK;
+
+fail_lockcreate:
+ OSFreeMem(psPDumpConnectionData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ _PDumpConnectionRelease(psPDumpConnectionData);
+}
+
+#else /* defined(PDUMP) */
+/* disable warning about empty module */
+#ifdef _WIN32
+#pragma warning (disable:4206)
+#endif
+#endif /* defined(PDUMP) */
+/*****************************************************************************
+ End of file (pdump_common.c)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_km.h b/drivers/gpu/drm/img-rogue/1.10/pdump_km.h
new file mode 100644
index 00000000000000..c7e9dd01e03668
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_km.h
@@ -0,0 +1,926 @@
+/*************************************************************************/ /*!
+@File
+@Title pdump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for pdump functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PDUMP_KM_H_
+#define _PDUMP_KM_H_
+
+#if defined(PDUMP)
+#include <stdarg.h>
+#endif
+
+/* services/srvkm/include/ */
+#include "device.h"
+
+/* include/ */
+#include "pvrsrv_error.h"
+
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "connection_server.h"
+#include "sync_server.h"
+/*
+ * Pull in pdump flags from services include
+ */
+#include "pdump.h"
+#include "pdumpdefs.h"
+
+/* Define this to enable the PDUMP_HERE trace in the server */
+#undef PDUMP_TRACE
+
+#if defined(PDUMP_TRACE)
+#define PDUMP_HERE(a) if (ui32Flags & PDUMP_FLAGS_DEBUG) PVR_DPF((PVR_DBG_WARNING, "HERE %d", (a)))
+#define PDUMP_HEREA(a) PVR_DPF((PVR_DBG_WARNING, "HERE ALWAYS %d", (a)))
+#else
+#define PDUMP_HERE(a) (void)(a);
+#define PDUMP_HEREA(a) (void)(a);
+#endif
+
+#define PDUMP_PD_UNIQUETAG (IMG_HANDLE)0
+#define PDUMP_PT_UNIQUETAG (IMG_HANDLE)0
+
+/* Invalid value for block number - to be used in BLKMODE of PDump */
+#define PDUMP_BLOCKNUM_INVALID IMG_UINT32_MAX
+
+#if defined(PDUMP_DEBUG_OUTFILES)
+/* counter increments each time debug write is called */
+extern IMG_UINT32 g_ui32EveryLineCounter;
+#endif
+
+typedef struct _PDUMP_CONNECTION_DATA_ PDUMP_CONNECTION_DATA;
+typedef PVRSRV_ERROR (*PFN_PDUMP_TRANSITION)(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+#ifdef PDUMP
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) do \
+ { PVRSRV_ERROR _eE;\
+ _eE = PDumpPanic(((RGX_PDUMP_PANIC_ ## _id) & 0xFFFF), _msg, __FUNCTION__, __LINE__); \
+ PVR_LOG_IF_ERROR(_eE, "PDumpPanic");\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) do \
+ { (void) PDumpCaptureError((_err), (_msg), __FUNCTION__, __LINE__);\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ /* Shared across pdump_x files */
+ PVRSRV_ERROR PDumpInitCommon(void);
+ void PDumpDeInitCommon(void);
+ IMG_BOOL PDumpReady(void);
+ void PDumpGetParameterZeroPageInfo(PDUMP_FILEOFFSET_T *puiZeroPageOffset,
+ size_t *puiZeroPageSize,
+ const IMG_CHAR **ppszZeroPageFilename);
+
+ void PDumpConnectionNotify(void);
+ void PDumpDisconnectionNotify(void);
+
+ void PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+ PVRSRV_ERROR PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Frame);
+ PVRSRV_ERROR PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32* pui32Frame);
+ PVRSRV_ERROR PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize);
+
+
+ PVRSRV_ERROR PDumpReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegDst,
+ IMG_UINT32 ui32RegSrc,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpPhysHandleToInternalVar64(IMG_CHAR *pszInternalVar,
+ IMG_HANDLE hPdumpPages,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToInternalVar64(IMG_CHAR *pszInternalVar,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpWriteVarORValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpWriteVarANDValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpWriteVarSHRValueOp (const IMG_CHAR *pszInternalVariable,
+ const IMG_UINT64 ui64Value,
+ const IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpInternalVarToReg32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpInternalVarToReg64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToMem32(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpMemLabelToMem64(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToMem32(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToMem64(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegLabelToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32Reg,
+ IMG_CHAR *pszInternalVar,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpSAW(IMG_CHAR *pszDevSpaceName,
+ IMG_UINT32 ui32HPOffsetBytes,
+ IMG_UINT32 ui32NumSaveBytes,
+ IMG_CHAR *pszOutfileName,
+ IMG_UINT32 ui32OutfileOffsetByte,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+ PVRSRV_ERROR PDumpRegPolKM(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Flags,
+ PDUMP_POLL_OPERATOR eOperator);
+
+PVRSRV_ERROR PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+/**************************************************************************/ /*!
+@Function PDumpImageDescriptorKM
+@Description PDumps image data out as an IMGBv2 data section
+@Input psDeviceNode Pointer to device node.
+@Input ui32MMUContextID PDUMP MMU context ID.
+@Input psConnection Pointer to services connection data.
+@Input ui32Size File string size.
+@Input pszFileName Pointer to string containing file name of
+ Image being SABed
+@Input sData GPU virtual address of this surface.
+@Input ui32DataSize Image data size
+@Input ui32LogicalWidth Image logical width
+@Input ui32LogicalHeight Image logical height
+@Input ui32PhysicalWidth Image physical width
+@Input ui32PhysicalHeight Image physical height
+@Input ePixFmt Image pixel format
+@Input eFBCompression FB compression mode
+@Input paui32FBCClearColour FB clear colour ((Only applicable to FBC surfaces)
+@Input sHeader GPU virtual address of the headers of this
+ surface (Only applicable to FBC surfaces)
+@Input ui32DataSize Header size (Only applicable to FBC surfaces)
+@Input ui32PDumpFlags PDUMP flags
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+PVRSRV_ERROR PDumpImageDescriptorKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *pszSABFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+
+PVRSRV_ERROR PDumpReadRegKM(IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Address,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32PDumpFlags);
+
+ PVRSRV_ERROR PDumpCommentWithFlags(IMG_UINT32 ui32Flags,
+ IMG_CHAR* pszFormat,
+ ...) __printf(2, 3);
+
+ PVRSRV_ERROR PDumpCommentWithFlagsVA(IMG_UINT32 ui32Flags,
+ const IMG_CHAR * pszFormat,
+ va_list args);
+
+ PVRSRV_ERROR PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline);
+
+ PVRSRV_ERROR PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline);
+
+ PVRSRV_ERROR PDumpPDReg(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32dwData,
+ IMG_HANDLE hUniqueTag);
+ PVRSRV_ERROR PDumpPDRegWithFlags(PDUMP_MMU_ATTRIB *psMMUAttrib,
+ IMG_UINT32 ui32Reg,
+ IMG_UINT32 ui32Data,
+ IMG_UINT32 ui32Flags,
+ IMG_HANDLE hUniqueTag);
+
+ PVRSRV_ERROR PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame);
+
+ PVRSRV_ERROR PDumpGetStateKM(IMG_UINT64 *ui64State);
+
+ PVRSRV_ERROR PDumpGetCurrentBlockKM(IMG_UINT32 *pui32CurrentBlock);
+ PVRSRV_ERROR PDumpIsFirstFrameInBlockKM(IMG_BOOL *bIsFirstInBlock);
+
+ PVRSRV_ERROR PDumpIsCaptureFrameKM(IMG_BOOL *bIsCaptureRange);
+
+ PVRSRV_ERROR PDumpRegRead32(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpRegRead64(IMG_CHAR *pszPDumpRegName,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpRegRead64ToInternalVar(IMG_CHAR *pszPDumpRegName,
+ IMG_CHAR *pszInternalVar,
+ const IMG_UINT32 dwRegOffset,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpIDLWithFlags(IMG_UINT32 ui32Clocks, IMG_UINT32 ui32Flags);
+ PVRSRV_ERROR PDumpIDL(IMG_UINT32 ui32Clocks);
+
+ PVRSRV_ERROR PDumpRegBasedCBP(IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32WPosVal,
+ IMG_UINT32 ui32PacketSize,
+ IMG_UINT32 ui32BufferSize,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpTRG(IMG_CHAR *pszMemSpace,
+ IMG_UINT32 ui32MMUCtxID,
+ IMG_UINT32 ui32RegionID,
+ IMG_BOOL bEnable,
+ IMG_UINT64 ui64VAddr,
+ IMG_UINT64 ui64LenBytes,
+ IMG_UINT32 ui32XStride,
+ IMG_UINT32 ui32Flags);
+
+ PVRSRV_ERROR PDumpCreateLockKM(void);
+ void PDumpDestroyLockKM(void);
+ void PDumpLock(void);
+ void PDumpUnlock(void);
+
+ PVRSRV_ERROR PDumpIfKM(IMG_CHAR *pszPDumpCond);
+ PVRSRV_ERROR PDumpElseKM(IMG_CHAR *pszPDumpCond);
+ PVRSRV_ERROR PDumpFiKM(IMG_CHAR *pszPDumpCond);
+
+ void PDumpPowerTransitionStart(void);
+ void PDumpPowerTransitionEnd(void);
+ IMG_BOOL PDumpInPowerTransition(void);
+ IMG_BOOL PDumpIsDumpSuspended(void);
+
+ /*!
+ * @name PDumpWriteParameter
+ * @brief General function for writing to PDump stream. Used
+ * mainly for memory dumps to parameter stream.
+ * Usually more convenient to use PDumpWriteScript below
+ * for the script stream.
+ * @param psui8Data - data to write
+ * @param ui32Size - size of write
+ * @param ui32Flags - PDump flags
+ * @param pui32FileOffset - on return contains the file offset to
+ * the start of the parameter data
+ * @param aszFilenameStr - pointer to at least a 20 char buffer to
+ * return the parameter filename
+ * @return error
+ */
+ PVRSRV_ERROR PDumpWriteParameter(IMG_UINT8 *psui8Data, IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Flags, IMG_UINT32* pui32FileOffset,
+ IMG_CHAR* aszFilenameStr);
+
+ /*!
+ * @name PDumpWriteScript
+ * @brief Write an PDumpOS created string to the "script" output stream
+ * @param hString - PDump OS layer handle of string buffer to write
+ * @param ui32Flags - PDump flags
+ * @return IMG_TRUE on success.
+ */
+ IMG_BOOL PDumpWriteScript(IMG_HANDLE hString, IMG_UINT32 ui32Flags);
+
+ /*
+ PDumpWriteShiftedMaskedValue():
+
+ loads the "reference" address into an internal PDump register,
+ optionally shifts it right,
+ optionally shifts it left,
+ optionally masks it
+ then finally writes the computed value to the given destination address
+
+ i.e. it emits pdump language equivalent to this expression:
+
+ dest = ((&ref) >> SHRamount << SHLamount) & MASK
+ */
+extern PVRSRV_ERROR
+PDumpWriteShiftedMaskedValue(const IMG_CHAR *pszDestRegspaceName,
+ const IMG_CHAR *pszDestSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefRegspaceName,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ IMG_UINT32 uiSHRAmount,
+ IMG_UINT32 uiSHLAmount,
+ IMG_UINT32 uiMask,
+ IMG_DEVMEM_SIZE_T uiWordSize,
+ IMG_UINT32 uiPDumpFlags);
+
+ /*
+ PDumpWriteSymbAddress():
+
+ writes the address of the "reference" to the offset given
+ */
+extern PVRSRV_ERROR
+PDumpWriteSymbAddress(const IMG_CHAR *pszDestSpaceName,
+ IMG_DEVMEM_OFFSET_T uiDestOffset,
+ const IMG_CHAR *pszRefSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiRefOffset,
+ const IMG_CHAR *pszPDumpDevName,
+ IMG_UINT32 ui32WordSize,
+ IMG_UINT32 ui32AlignShift,
+ IMG_UINT32 ui32Shift,
+ IMG_UINT32 uiPDumpFlags);
+
+/* Register the connection with the PDump subsystem */
+extern PVRSRV_ERROR PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData);
+
+/* Unregister the connection with the PDump subsystem */
+extern void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData);
+
+/* Register for notification of PDump Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle);
+
+/* Unregister notification of PDump Transition */
+extern void PDumpUnregisterTransitionCallback(void *pvHandle);
+
+/* Notify PDump of a Transition into/out of capture range */
+extern PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags);
+
+/* Wires-up a MIPS TLB in the page table*/
+extern PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32AllocationFlags,
+ IMG_UINT32 ui32Flags);
+
+/*Invalidate a MIPS TLB in the page table */
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32MipsTLBValidClearMask,
+ IMG_UINT32 ui32Flags);
+
+
+
+ #define PDUMP_LOCK PDumpLock
+ #define PDUMP_UNLOCK PDumpUnlock
+
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT PDumpDeInitCommon
+ #define PDUMPREG32 PDumpReg32
+ #define PDUMPREG64 PDumpReg64
+ #define PDUMPREGREAD32 PDumpRegRead32
+ #define PDUMPREGREAD64 PDumpRegRead64
+ #define PDUMPCOMMENT(...) PDumpCommentWithFlags(PDUMP_FLAGS_CONTINUOUS, __VA_ARGS__)
+ #define PDUMPCOMMENTWITHFLAGS PDumpCommentWithFlags
+ #define PDUMPREGPOL PDumpRegPolKM
+ #define PDUMPPDREG PDumpPDReg
+ #define PDUMPPDREGWITHFLAGS PDumpPDRegWithFlags
+ #define PDUMPREGBASEDCBP PDumpRegBasedCBP
+ #define PDUMPENDINITPHASE PDumpStopInitPhase
+ #define PDUMPIDLWITHFLAGS PDumpIDLWithFlags
+ #define PDUMPIDL PDumpIDL
+ #define PDUMPPOWCMDSTART PDumpPowerTransitionStart
+ #define PDUMPPOWCMDEND PDumpPowerTransitionEnd
+ #define PDUMPPOWCMDINTRANS PDumpInPowerTransition
+ #define PDUMPIF PDumpIfKM
+ #define PDUMPELSE PDumpElseKM
+ #define PDUMPFI PDumpFiKM
+#else
+ /*
+ We should be clearer about which functions can be called
+ across the bridge as this looks rather unbalanced
+ */
+
+/*! Macro used to record a panic in the PDump script stream */
+#define PDUMP_PANIC(_id, _msg) ((void)0);
+
+/*! Macro used to record a driver error in the PDump script stream to invalidate the capture */
+#define PDUMP_ERROR(_err, _msg) ((void)0);
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpInitCommon)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpInitCommon(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpConnectionNotify)
+#endif
+static INLINE void
+PDumpConnectionNotify(void)
+{
+ return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDisconnectionNotify)
+#endif
+static INLINE void
+PDumpDisconnectionNotify(void)
+{
+ return;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCreateLockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCreateLockKM(void)
+{
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpDestroyLockKM)
+#endif
+static INLINE void
+PDumpDestroyLockKM(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpLock)
+#endif
+static INLINE void
+PDumpLock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnlock)
+#endif
+static INLINE void
+PDumpUnlock(void)
+{
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpStopInitPhase)
+#endif
+static INLINE void
+PDumpStopInitPhase(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient)
+{
+ PVR_UNREFERENCED_PARAMETER(bPDumpClient);
+ PVR_UNREFERENCED_PARAMETER(bInitClient);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpSetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32Frame)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(ui32Frame);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetFrameKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32* pui32Frame)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(pui32Frame);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCommentKM(IMG_CHAR *pszComment, IMG_UINT32 ui32Flags)
+{
+ PVR_UNREFERENCED_PARAMETER(pszComment);
+ PVR_UNREFERENCED_PARAMETER(ui32Flags);
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCommentKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpSetDefaultCaptureParamsKM(IMG_UINT32 ui32Mode,
+ IMG_UINT32 ui32Start,
+ IMG_UINT32 ui32End,
+ IMG_UINT32 ui32Interval,
+ IMG_UINT32 ui32MaxParamFileSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32Mode);
+ PVR_UNREFERENCED_PARAMETER(ui32Start);
+ PVR_UNREFERENCED_PARAMETER(ui32End);
+ PVR_UNREFERENCED_PARAMETER(ui32Interval);
+ PVR_UNREFERENCED_PARAMETER(ui32MaxParamFileSize);
+
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPanic)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpPanic(IMG_UINT32 ui32PanicNo,
+ IMG_CHAR* pszPanicMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PanicNo);
+ PVR_UNREFERENCED_PARAMETER(pszPanicMsg);
+ PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+ PVR_UNREFERENCED_PARAMETER(ui32PPline);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpCaptureError)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpCaptureError(PVRSRV_ERROR ui32ErrorNo,
+ IMG_CHAR* pszErrorMsg,
+ const IMG_CHAR* pszPPFunc,
+ IMG_UINT32 ui32PPline)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32ErrorNo);
+ PVR_UNREFERENCED_PARAMETER(pszErrorMsg);
+ PVR_UNREFERENCED_PARAMETER(pszPPFunc);
+ PVR_UNREFERENCED_PARAMETER(ui32PPline);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsLastCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsLastCaptureFrameKM(IMG_BOOL *pbIsLastCaptureFrame)
+{
+ *pbIsLastCaptureFrame = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetStateKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetStateKM(IMG_UINT64 *ui64State)
+{
+ *ui64State = 0;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsCaptureFrameKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsCaptureFrameKM(IMG_BOOL *bIsCapturing)
+{
+ *bIsCapturing = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpGetCurrentBlockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetCurrentBlockKM(IMG_UINT32 *pui32BlockNum)
+{
+ *pui32BlockNum = PDUMP_BLOCKNUM_INVALID;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpIsFirstFrameInBlockKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpIsFirstFrameInBlockKM(IMG_BOOL *bIsFirstInBlock)
+{
+ *bIsFirstInBlock = IMG_FALSE;
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpBitmapKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpBitmapKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32FileOffset,
+ IMG_UINT32 ui32Width,
+ IMG_UINT32 ui32Height,
+ IMG_UINT32 ui32StrideInBytes,
+ IMG_DEV_VIRTADDR sDevBaseAddr,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ PDUMP_PIXEL_FORMAT ePixelFormat,
+ IMG_UINT32 ui32AddrMode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(pszFileName);
+ PVR_UNREFERENCED_PARAMETER(ui32FileOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Width);
+ PVR_UNREFERENCED_PARAMETER(ui32Height);
+ PVR_UNREFERENCED_PARAMETER(ui32StrideInBytes);
+ PVR_UNREFERENCED_PARAMETER(sDevBaseAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(ePixelFormat);
+ PVR_UNREFERENCED_PARAMETER(ui32AddrMode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpImageDescriptorKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpImageDescriptorKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MMUContextID,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *pszSABFileName,
+ IMG_DEV_VIRTADDR sData,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_DEV_VIRTADDR sHeader,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32MMUContextID);
+ PVR_UNREFERENCED_PARAMETER(ui32Size);
+ PVR_UNREFERENCED_PARAMETER(pszSABFileName);
+ PVR_UNREFERENCED_PARAMETER(sData);
+ PVR_UNREFERENCED_PARAMETER(ui32DataSize);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32LogicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalWidth);
+ PVR_UNREFERENCED_PARAMETER(ui32PhysicalHeight);
+ PVR_UNREFERENCED_PARAMETER(ePixFmt);
+ PVR_UNREFERENCED_PARAMETER(eMemLayout);
+ PVR_UNREFERENCED_PARAMETER(eFBCompression);
+ PVR_UNREFERENCED_PARAMETER(paui32FBCClearColour);
+ PVR_UNREFERENCED_PARAMETER(sHeader);
+ PVR_UNREFERENCED_PARAMETER(ui32HeaderSize);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterConnection)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpRegisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData,
+ PDUMP_CONNECTION_DATA **ppsPDumpConnectionData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncConnectionData);
+ PVR_UNREFERENCED_PARAMETER(ppsPDumpConnectionData);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterConnection)
+#endif
+static INLINE
+void PDumpUnregisterConnection(PDUMP_CONNECTION_DATA *psPDumpConnectionData)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpRegisterTransitionCallback)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpRegisterTransitionCallback(PDUMP_CONNECTION_DATA *psPDumpConnectionData,
+ PFN_PDUMP_TRANSITION pfnCallback,
+ void *hPrivData,
+ void **ppvHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+ PVR_UNREFERENCED_PARAMETER(pfnCallback);
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+ PVR_UNREFERENCED_PARAMETER(ppvHandle);
+
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpUnregisterTransitionCallback)
+#endif
+static INLINE
+void PDumpUnregisterTransitionCallback(void *pvHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(pvHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpTransition)
+#endif
+static INLINE
+PVRSRV_ERROR PDumpTransition(PDUMP_CONNECTION_DATA *psPDumpConnectionData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPDumpConnectionData);
+ PVR_UNREFERENCED_PARAMETER(bInto);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+ #if defined WIN32
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT(...) / ## * PDUMPDEINIT(__VA_ARGS__) * ## /
+ #define PDUMPREG32(...) / ## * PDUMPREG32(__VA_ARGS__) * ## /
+ #define PDUMPREG64(...) / ## * PDUMPREG64(__VA_ARGS__) * ## /
+ #define PDUMPREGREAD32(...) / ## * PDUMPREGREAD32(__VA_ARGS__) * ## /
+ #define PDUMPREGREAD64(...) / ## * PDUMPREGREAD64(__VA_ARGS__) * ## /
+ #define PDUMPCOMMENT(...) / ## * PDUMPCOMMENT(__VA_ARGS__) * ## /
+ #define PDUMPREGPOL(...) / ## * PDUMPREGPOL(__VA_ARGS__) * ## /
+ #define PDUMPPDREG(...) / ## * PDUMPPDREG(__VA_ARGS__) * ## /
+ #define PDUMPPDREGWITHFLAGS(...) / ## * PDUMPPDREGWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPSYNC(...) / ## * PDUMPSYNC(__VA_ARGS__) * ## /
+ #define PDUMPCOPYTOMEM(...) / ## * PDUMPCOPYTOMEM(__VA_ARGS__) * ## /
+ #define PDUMPWRITE(...) / ## * PDUMPWRITE(__VA_ARGS__) * ## /
+ #define PDUMPCBP(...) / ## * PDUMPCBP(__VA_ARGS__) * ## /
+ #define PDUMPREGBASEDCBP(...) / ## * PDUMPREGBASEDCBP(__VA_ARGS__) * ## /
+ #define PDUMPCOMMENTWITHFLAGS(...) / ## * PDUMPCOMMENTWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPMALLOCPAGESPHYS(...) / ## * PDUMPMALLOCPAGESPHYS(__VA_ARGS__) * ## /
+ #define PDUMPENDINITPHASE(...) / ## * PDUMPENDINITPHASE(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREG(...) / ## * PDUMPMSVDXREG(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREGWRITE(...) / ## * PDUMPMSVDXREGWRITE(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXREGREAD(...) / ## * PDUMPMSVDXREGREAD(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXPOLEQ(...) / ## * PDUMPMSVDXPOLEQ(__VA_ARGS__) * ## /
+ #define PDUMPMSVDXPOL(...) / ## * PDUMPMSVDXPOL(__VA_ARGS__) * ## /
+ #define PDUMPIDLWITHFLAGS(...) / ## * PDUMPIDLWITHFLAGS(__VA_ARGS__) * ## /
+ #define PDUMPIDL(...) / ## * PDUMPIDL(__VA_ARGS__) * ## /
+ #define PDUMPPOWCMDSTART(...) / ## * PDUMPPOWCMDSTART(__VA_ARGS__) * ## /
+ #define PDUMPPOWCMDEND(...) / ## * PDUMPPOWCMDEND(__VA_ARGS__) * ## /
+ #define PDUMP_LOCK / ## * PDUMP_LOCK(__VA_ARGS__) * ## /
+ #define PDUMP_UNLOCK / ## * PDUMP_UNLOCK(__VA_ARGS__) * ## /
+ #else
+ #if defined LINUX || defined GCC_IA32 || defined GCC_ARM || defined __QNXNTO__ || defined(INTEGRITY_OS)
+ #define PDUMPINIT PDumpInitCommon
+ #define PDUMPDEINIT(args...)
+ #define PDUMPREG32(args...)
+ #define PDUMPREG64(args...)
+ #define PDUMPREGREAD32(args...)
+ #define PDUMPREGREAD64(args...)
+ #define PDUMPCOMMENT(args...)
+ #define PDUMPREGPOL(args...)
+ #define PDUMPPDREG(args...)
+ #define PDUMPPDREGWITHFLAGS(args...)
+ #define PDUMPSYNC(args...)
+ #define PDUMPCOPYTOMEM(args...)
+ #define PDUMPWRITE(args...)
+ #define PDUMPREGBASEDCBP(args...)
+ #define PDUMPCOMMENTWITHFLAGS(args...)
+ #define PDUMPENDINITPHASE(args...)
+ #define PDUMPIDLWITHFLAGS(args...)
+ #define PDUMPIDL(args...)
+ #define PDUMPPOWCMDSTART(args...)
+ #define PDUMPPOWCMDEND(args...)
+ #define PDUMP_LOCK(args...)
+ #define PDUMP_UNLOCK(args...)
+
+ #else
+ #error Compiler not specified
+ #endif
+ #endif
+#endif
+
+
+#endif /* _PDUMP_KM_H_ */
+
+/******************************************************************************
+ End of file (pdump_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.c b/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.c
new file mode 100644
index 00000000000000..d59a1d64d22923
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.c
@@ -0,0 +1,1326 @@
+/*************************************************************************/ /*!
+@File
+@Title MMU PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common PDump (MMU specific) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if defined (PDUMP)
+
+#include "img_types.h"
+#include "pdump_mmu.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#define MAX_PDUMP_MMU_CONTEXTS (10)
+static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1;
+
+
+#define MMUPX_FMT(X) ((X<3) ? ((X<2) ? "MMUPT_\0" : "MMUPD_\0") : "MMUPC_\0")
+#define MIPSMMUPX_FMT(X) ((X<3) ? ((X<2) ? "MIPSMMUPT_\0" : "MIPSMMUPD_\0") : "MIPSMMUPC_\0")
+
+
+/* Array used to look-up debug strings from MMU_LEVEL */
+static const IMG_CHAR * const apszMMULevelStringLookup[MMU_LEVEL_LAST] =
+{
+ "MMU_LEVEL_0",
+ "PAGE_TABLE",
+ "PAGE_DIRECTORY",
+ "PAGE_CATALOGUE",
+};
+
+static PVRSRV_ERROR
+_ContiguousPDumpBytes(const IMG_CHAR *pszSymbolicName,
+ IMG_UINT32 ui32SymAddrOffset,
+ IMG_BOOL bFlush,
+ IMG_UINT32 uiNumBytes,
+ void *pvBytes,
+ IMG_UINT32 ui32Flags)
+{
+ static const IMG_CHAR *pvBeyondLastPointer;
+ static const IMG_CHAR *pvBasePointer;
+ static IMG_UINT32 ui32BeyondLastOffset;
+ static IMG_UINT32 ui32BaseOffset;
+ static IMG_UINT32 uiAccumulatedBytes;
+ IMG_UINT32 ui32ParamOutPos;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_AND_FILE_STRING();
+ PVR_UNREFERENCED_PARAMETER(ui32MaxLenFileName);
+
+ /* Caller has PDUMP_LOCK */
+
+ if (!bFlush && uiAccumulatedBytes > 0)
+ {
+ /* do some tests for contiguity. If it fails, we flush anyway */
+
+ if (pvBeyondLastPointer != pvBytes ||
+ ui32SymAddrOffset != ui32BeyondLastOffset
+ /* NB: ought to check that symbolic name agrees too, but
+ we know this always to be the case in the current use-case */
+ )
+ {
+ bFlush = IMG_TRUE;
+ }
+ }
+
+ /* Flush if necessary */
+ if (bFlush && uiAccumulatedBytes > 0)
+ {
+ eErr = PDumpWriteParameter((IMG_UINT8 *)(uintptr_t)pvBasePointer,
+ uiAccumulatedBytes, ui32Flags,
+ &ui32ParamOutPos, pszFileName);
+ if (eErr == PVRSRV_OK)
+ {
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLenScript,
+ "LDB %s:0x%X 0x%X 0x%X %s",
+ /* dest */
+ pszSymbolicName,
+ ui32BaseOffset,
+ /* size */
+ uiAccumulatedBytes,
+ /* file offset */
+ ui32ParamOutPos,
+ /* filename */
+ pszFileName);
+ PVR_LOGG_IF_ERROR(eErr, "PDumpOSBufprintf", ErrOut);
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ }
+ else if (eErr != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ PVR_LOGG_IF_ERROR(eErr, "PDumpWriteParameter", ErrOut);
+ }
+ else
+ {
+ /* else Write to parameter file prevented under the flags and
+ * current state of the driver so skip write to script and error IF.
+ */
+ eErr = PVRSRV_OK;
+ }
+
+ uiAccumulatedBytes = 0;
+ }
+
+
+ /* Initialise offsets and pointers if necessary */
+ if (uiAccumulatedBytes == 0)
+ {
+ ui32BaseOffset = ui32BeyondLastOffset = ui32SymAddrOffset;
+ pvBeyondLastPointer = pvBasePointer = (const IMG_CHAR *)pvBytes;
+ }
+
+ /* Accumulate some bytes */
+ ui32BeyondLastOffset += uiNumBytes;
+ pvBeyondLastPointer += uiNumBytes;
+ uiAccumulatedBytes += uiNumBytes;
+
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUMalloc
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+ IMG_UINT64 ui64SymbolicAddr;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (eMMULevel >= MMU_LEVEL_LAST)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ /*
+ Write a comment to the PDump2 script streams indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X DevPAddr=0x%08"IMG_UINT64_FMTSPECX,
+ pszPDumpDevName,
+ apszMMULevelStringLookup[eMMULevel],
+ ui32Size,
+ ui32Align,
+ psDevPAddr->uiAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ construct the symbolic address
+ */
+ ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s%016"IMG_UINT64_FMTSPECX" 0x%X 0x%X",
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64SymbolicAddr,
+ ui32Size,
+ ui32Align
+ /* don't need this sDevPAddr.uiAddr*/);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFree
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT64 ui64SymbolicAddr;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (eMMULevel >= MMU_LEVEL_LAST)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDUMP2 script streams indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s",
+ pszPDumpDevName, apszMMULevelStringLookup[eMMULevel]);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ construct the symbolic address
+ */
+ ui64SymbolicAddr = (IMG_UINT64)psDevPAddr->uiAddr;
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s%016"IMG_UINT64_FMTSPECX,
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64SymbolicAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUMalloc2
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDump2 script streams indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- MALLOC :%s:%s Size=0x%08X Alignment=0x%08X\n",
+ pszPDumpDevName,
+ pszTableType,
+ ui32Size,
+ ui32Align);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC :%s:%s 0x%X 0x%X\n",
+ pszPDumpDevName,
+ pszSymbolicAddr,
+ ui32Size,
+ ui32Align
+ /* don't need this sDevPAddr.uiAddr*/);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFree2
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ /*
+ Write a comment to the PDUMP2 script streams indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "-- FREE :%s:%s\n",
+ pszPDumpDevName, pszTableType);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE :%s:%s\n",
+ pszPDumpDevName,
+ pszSymbolicAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+/*******************************************************************************************************
+ * Function Name : PDumpPTBaseObjectToMem64
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Create a PDUMP string, which represents a memory write from the baseobject
+ * for MIPS MMU device type
+********************************************************************************************************/
+PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags,
+ MMU_LEVEL eMMULevel,
+ IMG_UINT64 ui64PxSymAddr)
+{
+
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+ ui32Flags |= PDUMP_FLAGS_BLKDATA;
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ return eErr;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW64 :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:%s%016"IMG_UINT64_FMTSPECX":0x%"IMG_UINT64_FMTSPECX,aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, pszPDumpDevName, MIPSMMUPX_FMT(eMMULevel), ui64PxSymAddr,
+ (IMG_UINT64)0);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUDumpPxEntries
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+ const IMG_CHAR *pszPDumpDevName,
+ void *pvPxMem,
+ IMG_DEV_PHYADDR sPxDevPAddr,
+ IMG_UINT32 uiFirstEntry,
+ IMG_UINT32 uiNumEntries,
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT64 uiSymbolicAddrOffset,
+ IMG_UINT32 uiBytesPerEntry,
+ IMG_UINT32 uiLog2Align,
+ IMG_UINT32 uiAddrShift,
+ IMG_UINT64 uiAddrMask,
+ IMG_UINT64 uiPxEProtMask,
+ IMG_UINT64 uiDataValidEnable,
+ IMG_UINT32 ui32Flags,
+ PDUMP_MMU_TYPE eMMUType)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_UINT64 ui64PxSymAddr;
+ IMG_UINT64 ui64PxEValueSymAddr;
+ IMG_UINT32 ui32SymAddrOffset = 0;
+ IMG_UINT32 *pui32PxMem;
+ IMG_UINT64 *pui64PxMem;
+ IMG_BOOL bPxEValid;
+ IMG_UINT32 uiPxEIdx;
+ IMG_INT32 iShiftAmount;
+ IMG_CHAR *pszWrwSuffix = NULL;
+ void *pvRawBytes = NULL;
+ IMG_CHAR aszPxSymbolicAddr[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_UINT64 ui64PxE64;
+ IMG_UINT64 ui64Protflags64;
+ IMG_CHAR *pszMMUPX;
+
+ PDUMP_GET_SCRIPT_STRING();
+ ui32Flags |= PDUMP_FLAGS_BLKDATA;
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ eErr = PVRSRV_OK;
+ goto ErrOut;
+ }
+
+ if (pvPxMem == NULL)
+ {
+ eErr = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrOut;
+ }
+
+
+ /*
+ create the symbolic address of the Px
+ */
+ ui64PxSymAddr = sPxDevPAddr.uiAddr;
+
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ OSSNPrintf(aszPxSymbolicAddr,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ ":%s:%s%016"IMG_UINT64_FMTSPECX,
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr);
+
+ PDUMP_LOCK();
+
+ /*
+ traverse PxEs, dumping entries
+ */
+ for(uiPxEIdx = uiFirstEntry;
+ uiPxEIdx < uiFirstEntry + uiNumEntries;
+ uiPxEIdx++)
+ {
+ /* Calc the symbolic address offset of the PxE location
+ This is what we have to add to the table address to get to a certain entry */
+ ui32SymAddrOffset = (uiPxEIdx*uiBytesPerEntry);
+
+ /* Calc the symbolic address of the PxE value and HW protflags */
+ /* just read it here */
+ switch(uiBytesPerEntry)
+ {
+ case 4:
+ {
+ pui32PxMem = pvPxMem;
+ ui64PxE64 = pui32PxMem[uiPxEIdx];
+ pszWrwSuffix = "";
+ pvRawBytes = &pui32PxMem[uiPxEIdx];
+ break;
+ }
+ case 8:
+ {
+ pui64PxMem = pvPxMem;
+ ui64PxE64 = pui64PxMem[uiPxEIdx];
+ pszWrwSuffix = "64";
+ pvRawBytes = &pui64PxMem[uiPxEIdx];
+ break;
+ }
+ default:
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PDumpMMUPxEntries: error"));
+ ui64PxE64 = 0;
+ //!!error
+ break;
+ }
+ }
+
+ ui64PxEValueSymAddr = (ui64PxE64 & uiAddrMask) >> uiAddrShift << uiLog2Align;
+ ui64Protflags64 = ui64PxE64 & uiPxEProtMask;
+ bPxEValid = (ui64Protflags64 & uiDataValidEnable) ? IMG_TRUE : IMG_FALSE;
+ if(bPxEValid)
+ {
+ _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+ 0, NULL,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ iShiftAmount = (IMG_INT32)(uiLog2Align - uiAddrShift);
+
+ /* First put the symbolic representation of the actual
+ address of the entry into a pdump internal register */
+ /* MOV seemed cleaner here, since (a) it's 64-bit; (b) the
+ target is not memory. However, MOV cannot do the
+ "reference" of the symbolic address. Apparently WRW is
+ correct. */
+
+ if (pszSymbolicAddr == NULL)
+ {
+ pszSymbolicAddr = "none";
+ }
+
+ if (eMMULevel == MMU_LEVEL_1)
+ {
+ if (iShiftAmount == 0)
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:%s:0x%"IMG_UINT64_FMTSPECX" | 0x%"IMG_UINT64_FMTSPECX"\n",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset,
+ /* ORing prot flags */
+ ui64Protflags64);
+ }
+ else
+ {
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:$1 :%s:%s:0x%"IMG_UINT64_FMTSPECX"\n",
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszMemspaceName,
+ pszSymbolicAddr,
+ uiSymbolicAddrOffset);
+ }
+ }
+ else
+ {
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel - 1);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel - 1);
+ }
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:$1 :%s:%s%016"IMG_UINT64_FMTSPECX":0x0",
+ /* dest */
+ pszPDumpDevName,
+ /* src */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxEValueSymAddr);
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(eMMULevel);
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(eMMULevel);
+ }
+ }
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ /* Now shift it to the right place, if necessary: */
+ /* Now shift that value down, by the "Align shift"
+ amount, to get it into units (ought to assert that
+ we get an integer - i.e. we don't shift any bits
+ off the bottom, don't know how to do PDUMP
+ assertions yet) and then back up by the right
+ amount to get it into the position of the field.
+ This is optimised into a single shift right by the
+ difference between the two. */
+ if (iShiftAmount > 0)
+ {
+ /* Page X Address is specified in units larger
+ than the position in the PxE would suggest. */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHR :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ iShiftAmount);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ else if (iShiftAmount < 0)
+ {
+ /* Page X Address is specified in units smaller
+ than the position in the PxE would suggest. */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SHL :%s:$1 :%s:$1 0x%X",
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ -iShiftAmount);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ if (eMMULevel == MMU_LEVEL_1)
+ {
+ if( iShiftAmount != 0)
+ {
+ /* Now we can "or" in the protection flags */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX,
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui64Protflags64);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1 ",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ }
+ }
+ else
+ {
+ /* Now we can "or" in the protection flags */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "OR :%s:$1 :%s:$1 0x%"IMG_UINT64_FMTSPECX,
+ /* dest */
+ pszPDumpDevName,
+ /* src A */
+ pszPDumpDevName,
+ /* src B */
+ ui64Protflags64);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ /* Finally, we write the register into the actual PxE */
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW%s :%s:%s%016"IMG_UINT64_FMTSPECX":0x%08X :%s:$1",
+ pszWrwSuffix,
+ /* dest */
+ pszPDumpDevName,
+ pszMMUPX,
+ ui64PxSymAddr,
+ ui32SymAddrOffset,
+ /* src */
+ pszPDumpDevName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+ else
+ {
+ /* If the entry was "invalid", simply write the actual
+ value found to the memory location */
+ eErr = _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_FALSE,
+ uiBytesPerEntry, pvRawBytes,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ }
+ }
+
+ /* flush out any partly accumulated stuff for LDB */
+ _ContiguousPDumpBytes(aszPxSymbolicAddr, ui32SymAddrOffset, IMG_TRUE,
+ 0, NULL,
+ ui32Flags | PDUMP_FLAGS_CONTINUOUS);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : _PdumpAllocMMUContext
+ * Inputs : pui32MMUContextID
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : pdump util to allocate MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpAllocMMUContext(IMG_UINT32 *pui32MMUContextID)
+{
+ IMG_UINT32 i;
+
+ /* there are MAX_PDUMP_MMU_CONTEXTS contexts available, find one */
+ for(i=0; i<MAX_PDUMP_MMU_CONTEXTS; i++)
+ {
+ if((guiPDumpMMUContextAvailabilityMask & (1U << i)))
+ {
+ /* mark in use */
+ guiPDumpMMUContextAvailabilityMask &= ~(1U << i);
+ *pui32MMUContextID = i;
+ return PVRSRV_OK;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpAllocMMUContext: no free MMU context ids"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name : _PdumpFreeMMUContext
+ * Inputs : ui32MMUContextID
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : pdump util to free MMU contexts
+**************************************************************************/
+static PVRSRV_ERROR _PdumpFreeMMUContext(IMG_UINT32 ui32MMUContextID)
+{
+ if(ui32MMUContextID < MAX_PDUMP_MMU_CONTEXTS)
+ {
+ /* free the id */
+ PVR_ASSERT (!(guiPDumpMMUContextAvailabilityMask & (1U << ui32MMUContextID)));
+ guiPDumpMMUContextAvailabilityMask |= (1U << ui32MMUContextID);
+ return PVRSRV_OK;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "_PdumpFreeMMUContext: MMU context ids invalid"));
+
+ return PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUAllocMMUContext
+ * Inputs :
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Alloc MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_DEV_PHYADDR sPCDevPAddr,
+ PDUMP_MMU_TYPE eMMUType,
+ IMG_UINT32 *pui32MMUContextID)
+{
+ IMG_UINT64 ui64PCSymAddr;
+ IMG_CHAR *pszMMUPX;
+
+ IMG_UINT32 ui32MMUContextID;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_SCRIPT_STRING();
+
+ eErr = _PdumpAllocMMUContext(&ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpAllocMMUContext failed: %d",
+ __func__, eErr));
+ PVR_DBG_BREAK;
+ goto ErrOut;
+ }
+
+ /*
+ create the symbolic address of the PC
+ */
+ ui64PCSymAddr = sPCDevPAddr.uiAddr;
+
+ if (eMMUType == PDUMP_MMU_TYPE_MIPS_MICROAPTIV)
+ {
+ pszMMUPX = MIPSMMUPX_FMT(1);
+ /* Giving it a mock value until the Pdump player implements
+ the support for the MIPS microAptiv MMU*/
+ eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+ }
+ else
+ {
+ pszMMUPX = MMUPX_FMT(3);
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d %d :%s:%s%016"IMG_UINT64_FMTSPECX,
+ /* mmu context */
+ pszPDumpMemSpaceName,
+ ui32MMUContextID,
+ /* mmu type */
+ eMMUType,
+ /* PC base address */
+ pszPDumpMemSpaceName,
+ pszMMUPX,
+ ui64PCSymAddr);
+ if(eErr != PVRSRV_OK)
+ {
+ PDUMP_UNLOCK();
+ PVR_DBG_BREAK;
+ goto ErrOut;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+ PDUMP_UNLOCK();
+
+ /* return the MMU Context ID */
+ *pui32MMUContextID = ui32MMUContextID;
+
+ErrOut:
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUFreeMMUContext
+ * Inputs :
+ * Outputs : None
+ * Returns : PVRSRV_ERROR
+ * Description : Free MMU Context
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_UINT32 ui32MMUContextID)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ PDUMP_GET_SCRIPT_STRING();
+
+ PDUMP_LOCK();
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "-- Clear MMU Context for memory space %s", pszPDumpMemSpaceName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "MMU :%s:v%d",
+ pszPDumpMemSpaceName,
+ ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, PDUMP_FLAGS_CONTINUOUS);
+
+ eErr = _PdumpFreeMMUContext(ui32MMUContextID);
+ if(eErr != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: _PdumpFreeMMUContext failed: %d",
+ __func__, eErr));
+ goto ErrUnlock;
+ }
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ return eErr;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpMMUActivateCatalog
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+**************************************************************************/
+PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+ const IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 uiRegAddr,
+ const IMG_CHAR *pszPDumpPCSymbolicName)
+{
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen,
+ "-- Write Page Catalogue Address to %s",
+ pszPDumpRegName);
+ if(eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:0x%04X %s:0",
+ /* dest */
+ pszPDumpRegSpaceName,
+ uiRegAddr,
+ /* src */
+ pszPDumpPCSymbolicName);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+ IMG_UINT32 uiPDumpMMUCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+
+ // "SAB :%s:v%x:0x%010llX 0x%08X 0x%08X %s.bin",
+
+ PDUMP_GET_SCRIPT_STRING();
+
+ if (!PDumpReady())
+ {
+ eErr = PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ goto ErrOut;
+ }
+
+
+ if (PDumpIsDumpSuspended())
+ {
+ eErr = PVRSRV_OK;
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:v%x:" IMG_DEV_VIRTADDR_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ "0x%x %s.bin\n",
+ pszPDumpMemNamespace,
+ uiPDumpMMUCtx,
+ sDevAddrStart.uiAddr,
+ uiSize,
+ uiFileOffset,
+ pszFilename);
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PdumpWireUpMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpWireUpMipsTLB(PMR *psPMRSource,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32AllocationFlags,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_CHAR aszMemspaceNameSource[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameSource[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetSource;
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameSource;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+ ui32Flags |= PDUMP_FLAGS_BLKDATA;
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRSource,
+ uiLogicalOffsetSource,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameSource,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameSource,
+ &uiPDumpSymbolicOffsetSource,
+ &uiNextSymNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%"IMG_UINT64_FMTSPECX, aszMemspaceNameSource,
+ aszMemspaceNameSource, aszSymbolicNameSource,
+ uiPDumpSymbolicOffsetSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "SHR :%s:$1 :%s:$1 0x6", aszMemspaceNameSource,
+ aszMemspaceNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x03FFFFC0", aszMemspaceNameSource,
+ aszMemspaceNameSource);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "OR :%s:$1 :%s:$1 0x%X", aszMemspaceNameSource,
+ aszMemspaceNameSource, ui32AllocationFlags);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameSource);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+/**************************************************************************
+ * Function Name : PdumpInvalidateMipsTLB
+**************************************************************************/
+PVRSRV_ERROR PdumpInvalidateMipsTLB(PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32MipsTLBValidClearMask,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eErr = PVRSRV_OK;
+ IMG_CHAR aszMemspaceNameDest[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicNameDest[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffsetDest;
+ IMG_DEVMEM_OFFSET_T uiNextSymNameDest;
+
+
+ PDUMP_GET_SCRIPT_STRING()
+ ui32Flags |= PDUMP_FLAGS_BLKDATA;
+
+ eErr = PMR_PDumpSymbolicAddr(psPMRDest,
+ uiLogicalOffsetDest,
+ PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH,
+ aszMemspaceNameDest,
+ PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH,
+ aszSymbolicNameDest,
+ &uiPDumpSymbolicOffsetDest,
+ &uiNextSymNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrOut;
+ }
+
+ PDUMP_LOCK();
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:$1 :%s:%s:0x%"IMG_UINT64_FMTSPECX, aszMemspaceNameDest,
+ aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "AND :%s:$1 :%s:$1 0x%X", aszMemspaceNameDest,
+ aszMemspaceNameDest, ui32MipsTLBValidClearMask);
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+
+
+ eErr = PDumpOSBufprintf(hScript, ui32MaxLen, "WRW :%s:%s:0x%"IMG_UINT64_FMTSPECX" :%s:$1",aszMemspaceNameDest, aszSymbolicNameDest,
+ uiPDumpSymbolicOffsetDest, aszMemspaceNameDest);
+
+
+ if (eErr != PVRSRV_OK)
+ {
+ goto ErrUnlock;
+ }
+ PDumpWriteScript(hScript, ui32Flags);
+
+
+ErrUnlock:
+ PDUMP_UNLOCK();
+ErrOut:
+ return eErr;
+}
+
+
+#endif /* #if defined (PDUMP) */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.h b/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.h
new file mode 100644
index 00000000000000..24c26632f17346
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_mmu.h
@@ -0,0 +1,189 @@
+/**************************************************************************/ /*!
+@File
+@Title Common MMU Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_MMU_H
+#define SRVKM_PDUMP_MMU_H
+
+/* services/server/include/ */
+#include "pdump_symbolicaddr.h"
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "mmu_common.h"
+
+/*
+ PDUMP MMU attributes
+*/
+typedef struct _PDUMP_MMU_ATTRIB_DEVICE_
+{
+ /* Per-Device Pdump attribs */
+
+ /*!< Pdump memory bank name */
+ IMG_CHAR *pszPDumpMemDevName;
+
+ /*!< Pdump register bank name */
+ IMG_CHAR *pszPDumpRegDevName;
+
+} PDUMP_MMU_ATTRIB_DEVICE;
+
+typedef struct _PDUMP_MMU_ATTRIB_CONTEXT_
+{
+ IMG_UINT32 ui32Dummy;
+} PDUMP_MMU_ATTRIB_CONTEXT;
+
+typedef struct _PDUMP_MMU_ATTRIB_HEAP_
+{
+ /* data page info */
+ IMG_UINT32 ui32DataPageMask;
+} PDUMP_MMU_ATTRIB_HEAP;
+
+typedef struct _PDUMP_MMU_ATTRIB_
+{
+ /* FIXME: would these be better as pointers rather than copies? */
+ struct _PDUMP_MMU_ATTRIB_DEVICE_ sDevice;
+ struct _PDUMP_MMU_ATTRIB_CONTEXT_ sContext;
+ struct _PDUMP_MMU_ATTRIB_HEAP_ sHeap;
+} PDUMP_MMU_ATTRIB;
+
+#if defined(PDUMP)
+ extern PVRSRV_ERROR PDumpMMUMalloc(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align,
+ PDUMP_MMU_TYPE eMMUType);
+
+ extern PVRSRV_ERROR PDumpMMUFree(const IMG_CHAR *pszPDumpDevName,
+ MMU_LEVEL eMMULevel,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ PDUMP_MMU_TYPE eMMUType);
+
+ extern PVRSRV_ERROR PDumpMMUMalloc2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Align);
+
+ extern PVRSRV_ERROR PDumpMMUFree2(const IMG_CHAR *pszPDumpDevName,
+ const IMG_CHAR *pszTableType,/* PAGE_CATALOGUE, PAGE_DIRECTORY, PAGE_TABLE */
+ const IMG_CHAR *pszSymbolicAddr);
+
+
+ extern PVRSRV_ERROR PDumpPTBaseObjectToMem64(const IMG_CHAR *pszPDumpDevName,
+ PMR *psPMRDest,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetSource,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffsetDest,
+ IMG_UINT32 ui32Flags,
+ MMU_LEVEL eMMULevel,
+ IMG_UINT64 ui64PxSymAddr);
+
+ extern PVRSRV_ERROR PDumpMMUDumpPxEntries(MMU_LEVEL eMMULevel,
+ const IMG_CHAR *pszPDumpDevName,
+ void *pvPxMem,
+ IMG_DEV_PHYADDR sPxDevPAddr,
+ IMG_UINT32 uiFirstEntry,
+ IMG_UINT32 uiNumEntries,
+ const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicAddr,
+ IMG_UINT64 uiSymbolicAddrOffset,
+ IMG_UINT32 uiBytesPerEntry,
+ IMG_UINT32 uiLog2Align,
+ IMG_UINT32 uiAddrShift,
+ IMG_UINT64 uiAddrMask,
+ IMG_UINT64 uiPxEProtMask,
+ IMG_UINT64 uiDataValidEnable,
+ IMG_UINT32 ui32Flags,
+ PDUMP_MMU_TYPE eMMUType);
+
+
+ extern PVRSRV_ERROR PDumpMMUAllocMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_DEV_PHYADDR sPCDevPAddr,
+ PDUMP_MMU_TYPE eMMUType,
+ IMG_UINT32 *pui32MMUContextID);
+
+ extern PVRSRV_ERROR PDumpMMUFreeMMUContext(const IMG_CHAR *pszPDumpMemSpaceName,
+ IMG_UINT32 ui32MMUContextID);
+
+ extern PVRSRV_ERROR PDumpMMUActivateCatalog(const IMG_CHAR *pszPDumpRegSpaceName,
+ const IMG_CHAR *pszPDumpRegName,
+ IMG_UINT32 uiRegAddr,
+ const IMG_CHAR *pszPDumpPCSymbolicName);
+
+ /* FIXME: split to separate file... (debatable whether this is anything to do with MMU) */
+extern PVRSRV_ERROR
+PDumpMMUSAB(const IMG_CHAR *pszPDumpMemNamespace,
+ IMG_UINT32 uiPDumpMMUCtx,
+ IMG_DEV_VIRTADDR sDevAddrStart,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ IMG_UINT32 ui32PDumpFlags);
+
+ #define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, aszSymbolicAddr, ui32Size, ui32Align) \
+ PDumpMMUMalloc2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr, ui32Size, ui32Align)
+ #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, aszSymbolicAddr) \
+ PDumpMMUFree2(pszPDumpMemDevName, "DATA_PAGE", aszSymbolicAddr)
+
+ #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+ PDumpMMUAllocMMUContext(pszPDumpMemDevName, \
+ sPCDevPAddr, \
+ eMMUType, \
+ puiPDumpCtxID)
+
+ #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+ PDumpMMUFreeMMUContext(pszPDumpMemDevName, uiPDumpCtxID)
+#else
+
+ #define PDUMP_MMU_MALLOC_DP(pszPDumpMemDevName, pszDevPAddr, ui32Size, ui32Align) \
+ ((void)0)
+ #define PDUMP_MMU_FREE_DP(pszPDumpMemDevName, psDevPAddr) \
+ ((void)0)
+ #define PDUMP_MMU_ALLOC_MMUCONTEXT(pszPDumpMemDevName, sPCDevPAddr, eMMUType, puiPDumpCtxID) \
+ ((void)0)
+ #define PDUMP_MMU_FREE_MMUCONTEXT(pszPDumpMemDevName, uiPDumpCtxID) \
+ ((void)0)
+
+#endif // defined(PDUMP)
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_osfunc.h b/drivers/gpu/drm/img-rogue/1.10/pdump_osfunc.h
new file mode 100644
index 00000000000000..c1189fd1ed7f05
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_osfunc.h
@@ -0,0 +1,369 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS-independent interface to helper functions for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <stdarg.h>
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_device_types.h"
+
+
+/* FIXME
+ * Some OSes (WinXP,CE) allocate the string on the stack, but some
+ * (Linux) use a global variable/lock instead.
+ * Would be good to use the same across all OSes.
+ *
+ * A handle is returned which represents IMG_CHAR* type on all OSes.
+ *
+ * The allocated buffer length is also returned on OSes where it's
+ * supported (e.g. Linux).
+ */
+#define MAX_PDUMP_STRING_LENGTH (256)
+#if defined(WIN32)
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#else /* WIN32 */
+
+#if defined(__QNXNTO__)
+
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR pszMsg[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLen = MAX_PDUMP_STRING_LENGTH-1;
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_CHAR pszScript[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_CHAR pszFileName[MAX_PDUMP_STRING_LENGTH]; \
+ IMG_UINT32 ui32MaxLenScript = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_UINT32 ui32MaxLenFileName = MAX_PDUMP_STRING_LENGTH-1; \
+ IMG_HANDLE hScript = (IMG_HANDLE)pszScript;
+
+#else /* __QNXNTO__ */
+
+ /*
+ * Linux
+ */
+#define PDUMP_GET_SCRIPT_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLen);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");
+
+#define PDUMP_GET_MSG_STRING() \
+ IMG_CHAR *pszMsg; \
+ IMG_UINT32 ui32MaxLen; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetMessageString(&pszMsg, &ui32MaxLen);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetMessageString");
+
+#define PDUMP_GET_SCRIPT_AND_FILE_STRING() \
+ IMG_HANDLE hScript; \
+ IMG_CHAR *pszFileName; \
+ IMG_UINT32 ui32MaxLenScript; \
+ IMG_UINT32 ui32MaxLenFileName; \
+ PVRSRV_ERROR eErrorPDump; \
+ eErrorPDump = PDumpOSGetScriptString(&hScript, &ui32MaxLenScript);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetScriptString");\
+ eErrorPDump = PDumpOSGetFilenameString(&pszFileName, &ui32MaxLenFileName);\
+ PVR_LOGR_IF_ERROR(eErrorPDump, "PDumpOSGetFilenameString");
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetScriptString
+ @Description Get the handle of the PDump "script" buffer.
+ This function is only called if PDUMP is defined.
+ @Output phScript Handle of the PDump script buffer
+ @Output pui32MaxLen max length the script buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetScriptString(IMG_HANDLE *phScript, IMG_UINT32 *pui32MaxLen);
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetMessageString
+ @Description Get the PDump "message" buffer.
+ This function is only called if PDUMP is defined.
+ @Output ppszMsg Pointer to the PDump message buffer
+ @Output pui32MaxLen max length the message buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetMessageString(IMG_CHAR **ppszMsg, IMG_UINT32 *pui32MaxLen);
+
+ /**************************************************************************/ /*!
+ @Function PDumpOSGetFilenameString
+ @Description Get the PDump "filename" buffer.
+ This function is only called if PDUMP is defined.
+ @Output ppszFile Pointer to the PDump filename buffer
+ @Output pui32MaxLen max length the filename buffer can be
+ @Return PVRSRV_OK on success, a failure code otherwise.
+ */ /**************************************************************************/
+ PVRSRV_ERROR PDumpOSGetFilenameString(IMG_CHAR **ppszFile, IMG_UINT32 *pui32MaxLen);
+
+#endif /* __QNXNTO__ */
+#endif /* WIN32 */
+
+
+/*
+ * PDump streams, channels, init and deinit routines (common to all OSes)
+ */
+
+typedef struct
+{
+ IMG_HANDLE hInit; /*!< Driver initialisation PDump stream */
+ IMG_HANDLE hMain; /*!< App framed PDump stream */
+ IMG_HANDLE hDeinit; /*!< Driver/HW de-initialisation PDump stream */
+} PDUMP_CHANNEL;
+
+/**************************************************************************/ /*!
+@Function PDumpOSInit
+@Description Reset the connection to vldbgdrv, then try to connect to
+ PDump streams. This function is only called if PDUMP is
+ defined.
+@Input psParam PDump channel to be used for logging
+ parameters
+@Input psScript PDump channel to be used for logging
+ commands / events
+@Input psBlkScript PDump channel to be used for logging
+ BLKDATA commands / events in BLKMODE
+@Output pui32InitCapMode The initial PDump capture mode.
+@Output ppszEnvComment Environment-specific comment that is
+ output when writing to the PDump
+ stream (this may be NULL).
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript, PDUMP_CHANNEL* psBlkScript,
+ IMG_UINT32* pui32InitCapMode, IMG_CHAR** ppszEnvComment);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDeInit
+@Description Disconnect the PDump streams and close the connection to
+ vldbgdrv. This function is only called if PDUMP is defined.
+@Input psParam PDump parameter channel to be closed
+@Input psScript PDump command channel to be closed
+@Input psBlkScript PDump BLKDATA command channel to be closed
+@Return None
+*/ /**************************************************************************/
+void PDumpOSDeInit(PDUMP_CHANNEL* psParam, PDUMP_CHANNEL* psScript, PDUMP_CHANNEL* psBlkScript);
+
+/**************************************************************************/ /*!
+@Function PDumpOSSetSplitMarker
+@Description Inform the PDump client to start a new file at the given
+ marker. This function is only called if PDUMP is defined.
+@Input hStream handle of PDump stream
+@Input ui32Marker byte file position
+@Return IMG_TRUE
+*/ /**************************************************************************/
+IMG_BOOL PDumpOSSetSplitMarker(IMG_HANDLE hStream, IMG_UINT32 ui32Marker);
+
+
+/**************************************************************************/ /*!
+@Function PDumpOSGetSplitMarker
+@Description Gets current value of split marker from dbgdrv.
+ This function is only called if PDUMP is defined.
+@Input hStream handle of PDump stream
+@Return ui32Marker
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSGetSplitMarker(IMG_HANDLE hStream);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDebugDriverWrite
+@Description Writes a given number of bytes from the specified buffer
+ to a PDump stream. This function is only called if PDUMP
+ is defined.
+@Input psStream handle of PDump stream to write into
+@Input pui8Data buffer to write data from
+@Input ui32BCount number of bytes to write
+@Return The number of bytes actually written (may be less than
+ ui32BCount if there is insufficient space in the target
+ PDump stream buffer)
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSDebugDriverWrite(IMG_HANDLE psStream,
+ IMG_UINT8 *pui8Data,
+ IMG_UINT32 ui32BCount);
+
+/*
+ * Define macro for processing variable args list in OS-independent
+ * manner. See e.g. PDumpCommentWithFlags().
+ */
+#define PDUMP_va_list va_list
+#define PDUMP_va_start va_start
+#define PDUMP_va_end va_end
+
+
+/**************************************************************************/ /*!
+@Function PDumpOSBufprintf
+@Description Printf to OS-specific PDump state buffer. This function is
+ only called if PDUMP is defined.
+@Input hBuf handle of buffer to write into
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Return None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSBufprintf(IMG_HANDLE hBuf, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR* pszFormat, ...) __printf(3, 4);
+
+/*
+ * Write into a IMG_CHAR* on all OSes. Can be allocated on the stack or heap.
+ */
+/**************************************************************************/ /*!
+@Function PDumpOSSprintf
+@Description Printf to IMG char array. This function is only called if
+ PDUMP is defined.
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Output pszComment char array to print into
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSSprintf(IMG_CHAR *pszComment, IMG_UINT32 ui32ScriptSizeMax, IMG_CHAR *pszFormat, ...) __printf(3, 4);
+
+/**************************************************************************/ /*!
+@Function PDumpOSVSprintf
+@Description Printf to IMG string using variable args (see stdarg.h).
+ This is necessary because the '...' notation does not
+ support nested function calls.
+ This function is only called if PDUMP is defined.
+@Input ui32ScriptSizeMax maximum size of data to write (chars)
+@Input pszFormat format string
+@Input vaArgs variable args structure (from stdarg.h)
+@Output pszMsg char array to print into
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSVSprintf(IMG_CHAR *pszMsg, IMG_UINT32 ui32ScriptSizeMax, const IMG_CHAR* pszFormat, PDUMP_va_list vaArgs) __printf(3, 0);
+
+/**************************************************************************/ /*!
+@Function PDumpOSBuflen
+@Description Returns the length of the specified buffer (in chars).
+ This function is only called if PDUMP is defined.
+@Input hBuffer handle to buffer
+@Input ui32BufferSizeMax max size of buffer (chars)
+@Return The length of the buffer, will always be <= ui32BufferSizeMax
+*/ /**************************************************************************/
+IMG_UINT32 PDumpOSBuflen(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function PDumpOSVerifyLineEnding
+@Description Put line ending sequence at the end if it isn't already
+ there. This function is only called if PDUMP is defined.
+@Input hBuffer handle to buffer
+@Input ui32BufferSizeMax max size of buffer (chars)
+@Return None
+*/ /**************************************************************************/
+void PDumpOSVerifyLineEnding(IMG_HANDLE hBuffer, IMG_UINT32 ui32BufferSizeMax);
+
+/**************************************************************************/ /*!
+@Function PDumpOSReleaseExecution
+@Description OS function to switch to another process, to clear PDump
+ buffers.
+ This function can simply wrap OSReleaseThreadQuanta.
+ This function is only called if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSReleaseExecution(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSCreateLock
+@Description Create the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+PVRSRV_ERROR PDumpOSCreateLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSDestroyLock
+@Description Destroy the global pdump lock This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSDestroyLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSLock
+@Description Acquire the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSLock(void);
+
+/**************************************************************************/ /*!
+@Function PDumpOSUnlock
+@Description Release the global pdump lock. This function is only called
+ if PDUMP is defined.
+@Return None
+*/ /**************************************************************************/
+void PDumpOSUnlock(void);
+
+/*!
+ * @name PDumpOSGetCtrlState
+ * @brief Retrieve some state from the debug driver or debug driver stream
+ */
+IMG_UINT32 PDumpOSGetCtrlState(IMG_HANDLE hDbgStream, IMG_UINT32 ui32StateID);
+
+/*!
+ * @name PDumpOSSetFrame
+ * @brief Set the current frame value mirrored in the debug driver
+ */
+void PDumpOSSetFrame(IMG_UINT32 ui32Frame);
+
+/*!
+ * @name PDumpOSAllowInitPhaseToComplete
+ * @brief Some platforms wish to control when the init phase is marked as
+ * complete depending on who is instructing it so.
+ */
+IMG_BOOL PDumpOSAllowInitPhaseToComplete(IMG_BOOL bPDumpClient, IMG_BOOL bInitClient);
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.c b/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.c
new file mode 100644
index 00000000000000..b6b2b0b33d133d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.c
@@ -0,0 +1,634 @@
+/*************************************************************************/ /*!
+@File
+@Title Physmem PDump functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common PDump (PMR specific) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /***************************************************************************/
+
+#if defined(PDUMP)
+
+#if defined(LINUX)
+#include <linux/ctype.h>
+#else
+#include <ctype.h>
+#endif
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump_physmem.h"
+#include "pdump_osfunc.h"
+#include "pdump_km.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+
+/* #define MAX_PDUMP_MMU_CONTEXTS (10) */
+/* static IMG_UINT32 guiPDumpMMUContextAvailabilityMask = (1<<MAX_PDUMP_MMU_CONTEXTS)-1; */
+
+
+struct _PDUMP_PHYSMEM_INFO_T_
+{
+ IMG_CHAR aszSymbolicAddress[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Align;
+ IMG_UINT32 ui32SerialNum;
+};
+
+static IMG_BOOL _IsAllowedSym(IMG_CHAR sym)
+{
+ /* Numbers, Characters or '_' are allowed */
+ if (isalnum(sym) || sym == '_')
+ return IMG_TRUE;
+ else
+ return IMG_FALSE;
+}
+
+static IMG_BOOL _IsLowerCaseSym(IMG_CHAR sym)
+{
+ if (sym >= 'a' && sym <= 'z')
+ return IMG_TRUE;
+ else
+ return IMG_FALSE;
+}
+
+void PDumpMakeStringValid(IMG_CHAR *pszString,
+ IMG_UINT32 ui32StrLen)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < ui32StrLen; i++)
+ {
+ if (_IsAllowedSym(pszString[i]))
+ {
+ if (_IsLowerCaseSym(pszString[i]))
+ pszString[i] = pszString[i]-32;
+ else
+ pszString[i] = pszString[i];
+ }
+ else
+ {
+ pszString[i] = '_';
+ }
+ }
+}
+
+/**************************************************************************
+ * Function Name : PDumpGetSymbolicAddr
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+ **************************************************************************/
+PVRSRV_ERROR PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+ IMG_CHAR **ppszSymbolicAddress)
+{
+ PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+ if (!hPhysmemPDumpHandle)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPhysmemPDumpHandle;
+ *ppszSymbolicAddress = psPDumpAllocationInfo->aszSymbolicAddress;
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************
+ * Function Name : PDumpMalloc
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+ **************************************************************************/
+PVRSRV_ERROR PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ psPDumpAllocationInfo = OSAllocMem(sizeof*psPDumpAllocationInfo);
+ PVR_ASSERT(psPDumpAllocationInfo != NULL);
+
+ /*
+ Set continuous flag because there is no way of knowing beforehand which
+ allocation is needed for playback of the captured range.
+ */
+ ui32PDumpFlags |= (PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA);
+
+ /*
+ construct the symbolic address
+ */
+
+ OSSNPrintf(psPDumpAllocationInfo->aszSymbolicAddress,
+ sizeof(psPDumpAllocationInfo->aszSymbolicAddress)+sizeof(pszDevSpace),
+ ":%s:%s",
+ pszDevSpace,
+ pszSymbolicAddress);
+
+ /*
+ Write to the MMU script stream indicating the memory allocation
+ */
+ PDUMP_LOCK();
+ if (bInitialise)
+ {
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "CALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX" 0x%X\n",
+ psPDumpAllocationInfo->aszSymbolicAddress,
+ ui64Size,
+ uiAlign,
+ ui32InitValue);
+ }
+ else
+ {
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "MALLOC %s 0x%"IMG_UINT64_FMTSPECX" 0x%"IMG_UINT64_FMTSPECX"\n",
+ psPDumpAllocationInfo->aszSymbolicAddress,
+ ui64Size,
+ uiAlign);
+ }
+
+ if(eError != PVRSRV_OK)
+ {
+ OSFreeMem(psPDumpAllocationInfo);
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, ui32PDumpFlags);
+
+ psPDumpAllocationInfo->ui64Size = ui64Size;
+ psPDumpAllocationInfo->ui32Align = TRUNCATE_64BITS_TO_32BITS(uiAlign);
+
+ *phHandlePtr = (IMG_HANDLE)psPDumpAllocationInfo;
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+
+/**************************************************************************
+ * Function Name : PDumpFree
+ * Inputs :
+ * Outputs :
+ * Returns : PVRSRV_ERROR
+ * Description :
+ **************************************************************************/
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32Flags = PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_BLKDATA;
+
+ PDUMP_PHYSMEM_INFO_T *psPDumpAllocationInfo;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ psPDumpAllocationInfo = (PDUMP_PHYSMEM_INFO_T *)hPDumpAllocationInfoHandle;
+
+ /*
+ Write to the MMU script stream indicating the memory free
+ */
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript, ui32MaxLen, "FREE %s\n",
+ psPDumpAllocationInfo->aszSymbolicAddress);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, ui32Flags);
+ OSFreeMem(psPDumpAllocationInfo);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ PMR_VALUE32_FMTSPEC " ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ ui32Value);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ const IMG_CHAR *pszInternalVar,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ pszInternalVar);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "RDW %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " ",
+ pszInternalVar,
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ PMR_VALUE64_FMTSPEC " ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ ui64Value);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ const IMG_CHAR *pszInternalVar,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "WRW64 :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " %s ",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ pszInternalVar);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "RDW64 %s :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " ",
+ pszInternalVar,
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "LDB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ PDUMP_FILEOFFSET_FMTSPEC " %s\n",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ uiSize,
+ uiFileOffset,
+ pszFilename);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 uiPDumpFlags;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ uiPDumpFlags = 0;
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "SAB :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_SIZE_FMTSPEC " "
+ "0x%08X %s.bin\n",
+ pszDevSpace,
+ pszSymbolicName,
+ uiOffset,
+ uiSize,
+ uiFileOffset,
+ pszFileName);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 uiCount,
+ IMG_UINT32 uiDelay,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "POL :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ "0x%08X 0x%08X %d %d %d\n",
+ pszMemspaceName,
+ pszSymbolicName,
+ uiOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ uiCount,
+ uiDelay);
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PDUMP_FLAGS_T uiPDumpFlags = 0;
+
+ PDUMP_GET_SCRIPT_STRING()
+
+ PDUMP_LOCK();
+ eError = PDumpOSBufprintf(hScript,
+ ui32MaxLen,
+ "CBP :%s:%s:" IMG_DEVMEM_OFFSET_FMTSPEC " "
+ IMG_DEVMEM_OFFSET_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC " " IMG_DEVMEM_SIZE_FMTSPEC "\n",
+ pszMemspaceName,
+ pszSymbolicName,
+ uiReadOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto _return;
+ }
+
+ PDumpWriteScript(hScript, uiPDumpFlags);
+
+ _return:
+ PDUMP_UNLOCK();
+ return eError;
+}
+
+PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+ size_t uiNumBytes,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_CHAR *pszFilenameOut,
+ size_t uiFilenameBufSz,
+ PDUMP_FILEOFFSET_T *puiOffsetOut)
+{
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uiFilenameBufSz);
+
+ if (!PDumpReady())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_AVAILABLE;
+ }
+
+ PVR_ASSERT(uiNumBytes > 0);
+
+ /* PRQA S 3415 1 */ /* side effects desired */
+ if (PDumpIsDumpSuspended())
+ {
+ return PVRSRV_ERROR_PDUMP_NOT_ALLOWED;
+ }
+
+ PVR_ASSERT(uiFilenameBufSz <= PDUMP_PARAM_MAX_FILE_NAME);
+
+ PDUMP_LOCK();
+
+ eError = PDumpWriteParameter(pcBuffer, uiNumBytes, uiPDumpFlags, puiOffsetOut, pszFilenameOut);
+
+ PDUMP_UNLOCK();
+
+ if ((eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED) && (eError != PVRSRV_OK))
+ {
+ PVR_LOGR_IF_ERROR(eError, "PDumpWriteParameter");
+ }
+ /* else Write to parameter file Ok or Prevented under the flags and
+ * current state of the driver so skip further writes and let caller know.
+ */
+ return eError;
+}
+
+#endif /* PDUMP */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.h b/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.h
new file mode 100644
index 00000000000000..63f60a8a1fc5e4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_physmem.h
@@ -0,0 +1,242 @@
+/**************************************************************************/ /*!
+@File
+@Title pdump functions to assist with physmem allocations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements basic low level control of MMU.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVSRV_PDUMP_PHYSMEM_H
+#define SRVSRV_PDUMP_PHYSMEM_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pmr.h"
+
+#define PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH 40
+#define PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH 60
+#define PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH (PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH + PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH)
+
+typedef struct _PDUMP_PHYSMEM_INFO_T_ PDUMP_PHYSMEM_INFO_T;
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+ IMG_CHAR **ppszSymbolicAddress);
+
+extern PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ /* alignment is alignment of start of buffer _and_
+ minimum contiguity - i.e. smallest allowable
+ page-size. */
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags);
+
+extern
+PVRSRV_ERROR PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle);
+
+void
+PDumpMakeStringValid(IMG_CHAR *pszString,
+ IMG_UINT32 ui32StrLen);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PDumpGetSymbolicAddr(const IMG_HANDLE hPhysmemPDumpHandle,
+ IMG_CHAR **ppszSymbolicAddress)
+{
+ PVR_UNREFERENCED_PARAMETER(hPhysmemPDumpHandle);
+ PVR_UNREFERENCED_PARAMETER(ppszSymbolicAddress);
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpMalloc(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_UINT64 ui64Size,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phHandlePtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(pszDevSpace);
+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddress);
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(uiAlign);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(phHandlePtr);
+ return PVRSRV_OK;
+}
+
+static INLINE PVRSRV_ERROR
+PDumpFree(IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+#define PMR_DEFAULT_PREFIX "PMR"
+#define PMR_SYMBOLICADDR_FMTSPEC "%s%"IMG_UINT64_FMTSPEC"_%"IMG_UINT64_FMTSPEC"_%s"
+#define PMR_MEMSPACE_FMTSPEC "%s"
+#define PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC PMR_MEMSPACE_FMTSPEC
+
+#if defined(PDUMP)
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+ PDumpMalloc(pszPDumpMemDevName, PMR_OSALLOCPAGES_PREFIX, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr, PDUMP_NONE)
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+ PDumpFree(hHandle)
+#else
+#define PDUMP_PHYSMEM_MALLOC_OSPAGES(pszPDumpMemDevName, ui32SerialNum, ui32Size, ui32Align, bInitialise, ui32InitValue, phHandlePtr) \
+ ((void)(*phHandlePtr=NULL))
+#define PDUMP_PHYSMEM_FREE_OSPAGES(hHandle) \
+ ((void)(0))
+#endif // defined(PDUMP)
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW32InternalVarToMem(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ const IMG_CHAR *pszInternalVar,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW32MemToInternalVar(const IMG_CHAR *pszInternalVar,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRWRW64InternalVarToMem(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ const IMG_CHAR *pszInternalVar,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRRDW64MemToInternalVar(const IMG_CHAR *pszInternalVar,
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRLDB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRSAB(const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 uiFileOffset);
+
+/*
+ PDumpPMRPOL()
+
+ emits a POL to the PDUMP.
+*/
+extern PVRSRV_ERROR
+PDumpPMRPOL(const IMG_CHAR *pszMempaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 uiCount,
+ IMG_UINT32 uiDelay,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+extern PVRSRV_ERROR
+PDumpPMRCBP(const IMG_CHAR *pszMemspaceName,
+ const IMG_CHAR *pszSymbolicName,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+
+/*
+ * PDumpWriteBuffer()
+ *
+ * writes a binary blob to the pdump param stream containing the
+ * current contents of the memory, and returns the filename and offset
+ * of where that blob is located (for use in a subsequent LDB, for
+ * example)
+ *
+ * Caller to provide buffer to receive filename, and declare the size
+ * of that buffer
+ */
+extern PVRSRV_ERROR
+PDumpWriteBuffer(IMG_UINT8 *pcBuffer,
+ size_t uiNumBytes,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_CHAR *pszFilenameOut,
+ size_t uiFilenameBufSz,
+ PDUMP_FILEOFFSET_T *puiOffsetOut);
+
+#endif /* #ifndef SRVSRV_PDUMP_PHYSMEM_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdump_symbolicaddr.h b/drivers/gpu/drm/img-rogue/1.10/pdump_symbolicaddr.h
new file mode 100644
index 00000000000000..ed912a5096c955
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdump_symbolicaddr.h
@@ -0,0 +1,55 @@
+/**************************************************************************/ /*!
+@File
+@Title Abstraction of PDUMP symbolic address derivation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Allows pdump functions to derive symbolic addresses on-the-fly
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_PDUMP_SYMBOLICADDR_H
+#define SRVKM_PDUMP_SYMBOLICADDR_H
+
+#include "img_types.h"
+
+#include "pvrsrv_error.h"
+
+/* pdump symbolic addresses are generated on-the-fly with a callback */
+
+typedef PVRSRV_ERROR (*PVRSRV_SYMADDRFUNCPTR)(IMG_HANDLE hPriv, IMG_UINT32 uiOffset, IMG_CHAR *pszSymbolicAddr, IMG_UINT32 ui32SymbolicAddrLen, IMG_UINT32 *pui32NewOffset);
+
+#endif /* #ifndef SRVKM_PDUMP_SYMBOLICADDR_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdumpdefs.h b/drivers/gpu/drm/img-rogue/1.10/pdumpdefs.h
new file mode 100644
index 00000000000000..19941265553c31
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdumpdefs.h
@@ -0,0 +1,213 @@
+/*************************************************************************/ /*!
+@File
+@Title PDUMP definitions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description PDUMP definitions header
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PDUMPDEFS_H__)
+#define __PDUMPDEFS_H__
+
+/*! PDump Pixel Format Enumeration */
+typedef enum _PDUMP_PIXEL_FORMAT_
+{
+ PVRSRV_PDUMP_PIXEL_FORMAT_UNSUPPORTED = 0,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB8 = 1,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB332 = 2,
+ PVRSRV_PDUMP_PIXEL_FORMAT_KRGB555 = 3,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB565 = 4,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB4444 = 5,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB1555 = 6,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB888 = 7,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8888 = 8,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV8 = 9,
+// PVRSRV_PDUMP_PIXEL_FORMAT_AYUV4444 = 10,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VY0UY1_8888 = 11,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UY0VY1_8888 = 12,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0UY1V_8888 = 13,
+ PVRSRV_PDUMP_PIXEL_FORMAT_Y0VY1U_8888 = 14,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV888 = 15,
+ PVRSRV_PDUMP_PIXEL_FORMAT_UYVY10101010 = 16,
+ PVRSRV_PDUMP_PIXEL_FORMAT_VYAUYA8888 = 17,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV8888 = 18,
+ PVRSRV_PDUMP_PIXEL_FORMAT_AYUV2101010 = 19,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV101010 = 20,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y8 = 21,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_IMC2 = 22,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV12 = 23,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL8 = 24,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_PL12 = 25,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV8 = 26,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV8 = 27,
+ PVRSRV_PDUMP_PIXEL_FORMAT_PL12Y10 = 28,
+ PVRSRV_PDUMP_PIXEL_FORMAT_422PL12YUV10 = 29,
+ PVRSRV_PDUMP_PIXEL_FORMAT_420PL12YUV10 = 30,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR8888 = 31,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA8888 = 32,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB8332 = 33,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGB555 = 34,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16 = 35,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32 = 36,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L16 = 37,
+ PVRSRV_PDUMP_PIXEL_FORMAT_L32 = 38,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA8888 = 39,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR4444 = 40,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA4444 = 41,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA4444 = 42,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ABGR1555 = 43,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RGBA5551 = 44,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGRA5551 = 45,
+ PVRSRV_PDUMP_PIXEL_FORMAT_BGR565 = 46,
+ PVRSRV_PDUMP_PIXEL_FORMAT_A8 = 47,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16F16 = 49,
+ PVRSRV_PDUMP_PIXEL_FORMAT_A4 = 50,
+ PVRSRV_PDUMP_PIXEL_FORMAT_ARGB2101010 = 51,
+ PVRSRV_PDUMP_PIXEL_FORMAT_RSGSBS888 = 52,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32F32 = 53,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16 = 54,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32 = 55,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F16F16F16 = 56,
+ PVRSRV_PDUMP_PIXEL_FORMAT_F32F32F32 = 57,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U8 = 58,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U8U8 = 59,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16 = 60,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16 = 61,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U16U16U16U16 = 62,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32 = 63,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32 = 64,
+ PVRSRV_PDUMP_PIXEL_FORMAT_U32U32U32U32 = 65,
+ PVRSRV_PDUMP_PIXEL_FORMAT_YUV_YV32 = 66,
+
+ PVRSRV_PDUMP_PIXEL_FORMAT_FORCE_I32 = 0x7fffffff
+
+} PDUMP_PIXEL_FORMAT;
+
+/*! PDump addrmode */
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT 0
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_MASK 0x000000FF
+
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT 8
+#define PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_NEGATIVE (1 << PVRSRV_PDUMP_ADDRMODE_STRIDESENSE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_SHIFT 12
+#define PVRSRV_PDUMP_ADDRMODE_BIFTILE_MODE_MASK 0x000FF000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT 20
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_MASK 0x00F00000
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT 24
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT 28
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_MASK 0xF0000000
+
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_STRIDE (0 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE1 (1 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE2 (2 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE3 (3 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE4 (4 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE5 (5 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE6 (6 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_BIFTILE_STRIDE7 (7 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_TWIDDLED (9 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_PAGETILED (11 << PVRSRV_PDUMP_ADDRMODE_MEMFORMAT_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_NONE (0 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_DIRECT (1 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_DIRECT (2 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_32X2_DIRECT (3 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT (4 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT (5 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_8X8_INDIRECT_4TILE (6 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCMODE_16X4_INDIRECT_4TILE (7 << PVRSRV_PDUMP_ADDRMODE_FBCMODE_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBC_DECOR (1 << PVRSRV_PDUMP_ADDRMODE_FBCDECOR_SHIFT)
+
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_BASE (1 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_ENHANCED (2 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V2 (3 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_SURFACE (4 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+#define PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_V3_RESOURCE (5 << PVRSRV_PDUMP_ADDRMODE_FBCCOMPAT_SHIFT)
+
+
+/*! PDump Poll Operator */
+typedef enum _PDUMP_POLL_OPERATOR
+{
+ PDUMP_POLL_OPERATOR_EQUAL = 0,
+ PDUMP_POLL_OPERATOR_LESS = 1,
+ PDUMP_POLL_OPERATOR_LESSEQUAL = 2,
+ PDUMP_POLL_OPERATOR_GREATER = 3,
+ PDUMP_POLL_OPERATOR_GREATEREQUAL = 4,
+ PDUMP_POLL_OPERATOR_NOTEQUAL = 5,
+} PDUMP_POLL_OPERATOR;
+
+
+#define PVRSRV_PDUMP_MAX_FILENAME_SIZE 75 /*!< Max length of a pdump log file name */
+#define PVRSRV_PDUMP_MAX_COMMENT_SIZE 350 /*!< Max length of a pdump comment */
+
+/*!
+ PDump MMU type
+ (Maps to values listed in "PowerVR Tools.Pdump2 Script Functions.doc" Sec 2.13)
+*/
+typedef enum
+{
+ PDUMP_MMU_TYPE_4KPAGE_32BIT_STDTILE = 1,
+ PDUMP_MMU_TYPE_VARPAGE_32BIT_STDTILE = 2,
+ PDUMP_MMU_TYPE_4KPAGE_36BIT_EXTTILE = 3,
+ PDUMP_MMU_TYPE_4KPAGE_32BIT_EXTTILE = 4,
+ PDUMP_MMU_TYPE_4KPAGE_36BIT_STDTILE = 5,
+ PDUMP_MMU_TYPE_VARPAGE_40BIT = 6,
+ PDUMP_MMU_TYPE_VIDEO_40BIT_STDTILE = 7,
+ PDUMP_MMU_TYPE_VIDEO_40BIT_EXTTILE = 8,
+ PDUMP_MMU_TYPE_MIPS_MICROAPTIV = 9,
+ PDUMP_MMU_TYPE_LAST
+} PDUMP_MMU_TYPE;
+
+/*!
+ PDump states
+ These values are used by the bridge call PVRSRVPDumpGetState
+*/
+#define PDUMP_STATE_CAPTURE_FRAME (1) /*!< Flag represents the PDump being in capture range or not*/
+#define PDUMP_STATE_CONNECTED (2) /*!< Flag represents the PDump Client App being connected on not */
+
+#endif /* __PDUMPDEFS_H__ */
+
+/*****************************************************************************
+ End of file (pdumpdefs.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pdumpdesc.h b/drivers/gpu/drm/img-rogue/1.10/pdumpdesc.h
new file mode 100644
index 00000000000000..cc040fc386440c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pdumpdesc.h
@@ -0,0 +1,142 @@
+/*************************************************************************/ /*!
+@File pdumpdesc.h
+@Title PDump Descriptor format
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Describes PDump descriptors that may be passed to the
+ extraction routines (SAB).
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PDUMPDESC_H__)
+#define __PDUMPDESC_H__
+
+#include "pdumpdefs.h"
+
+/*
+ * Common fields
+ */
+#define HEADER_WORD0_TYPE_SHIFT (0)
+#define HEADER_WORD0_TYPE_CLRMSK (0xFFFFFFFFU)
+
+#define HEADER_WORD1_SIZE_SHIFT (0)
+#define HEADER_WORD1_SIZE_CLRMSK (0x0000FFFFU)
+#define HEADER_WORD1_VERSION_SHIFT (16)
+#define HEADER_WORD1_VERSION_CLRMSK (0xFFFF0000U)
+
+#define HEADER_WORD2_DATA_SIZE_SHIFT (0)
+#define HEADER_WORD2_DATA_SIZE_CLRMSK (0xFFFFFFFFU)
+
+
+/*
+ * The image type descriptor
+ */
+
+/*
+ * Header type (IMGBv2) - 'IMGB' in hex + VERSION 1
+ * Header size - 56 bytes
+ */
+#define IMAGE_HEADER_TYPE (0x42474D49)
+#define IMAGE_HEADER_SIZE (56)
+#define IMAGE_HEADER_VERSION (1)
+
+/*
+ * Image type-specific fields
+ */
+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT (0)
+#define IMAGE_HEADER_WORD3_LOGICAL_WIDTH_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT (0)
+#define IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD5_FORMAT_SHIFT (0)
+#define IMAGE_HEADER_WORD5_FORMAT_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT (0)
+#define IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT (0)
+#define IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD8_TWIDDLING_SHIFT (0)
+#define IMAGE_HEADER_WORD8_TWIDDLING_CLRMSK (0x000000FFU)
+#define IMAGE_HEADER_WORD8_TWIDDLING_STRIDED (0 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT)
+#define IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE (9 << IMAGE_HEADER_WORD8_TWIDDLING_SHIFT)
+
+
+#define IMAGE_HEADER_WORD8_STRIDE_SHIFT (8)
+#define IMAGE_HEADER_WORD8_STRIDE_CLRMSK (0x0000FF00U)
+#define IMAGE_HEADER_WORD8_STRIDE_POSITIVE (0 << IMAGE_HEADER_WORD8_STRIDE_SHIFT)
+#define IMAGE_HEADER_WORD8_STRIDE_NEGATIVE (1 << IMAGE_HEADER_WORD8_STRIDE_SHIFT)
+
+#define IMAGE_HEADER_WORD8_BIFTYPE_SHIFT (16)
+#define IMAGE_HEADER_WORD8_BIFTYPE_CLRMSK (0x00FF0000U)
+#define IMAGE_HEADER_WORD8_BIFTYPE_NONE (0 << IMAGE_HEADER_WORD8_BIFTYPE_SHIFT)
+
+#define IMAGE_HEADER_WORD8_FBCTYPE_SHIFT (24)
+#define IMAGE_HEADER_WORD8_FBCTYPE_CLRMSK (0xFF000000U)
+#define IMAGE_HEADER_WORD8_FBCTYPE_8X8 (1 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT)
+#define IMAGE_HEADER_WORD8_FBCTYPE_16x4 (2 << IMAGE_HEADER_WORD8_FBCTYPE_SHIFT)
+
+#define IMAGE_HEADER_WORD9_FBCDECOR_SHIFT (0)
+#define IMAGE_HEADER_WORD9_FBCDECOR_CLRMSK (0x000000FFU)
+#define IMAGE_HEADER_WORD9_FBCDECOR_ENABLE (1 << IMAGE_HEADER_WORD9_FBCDECOR_SHIFT)
+
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT (8)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_CLRMSK (0x0000FF00U)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_SAME_AS_GPU (0 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_BASE (1 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_TWIDDLED_EN (2 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V2 (3 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3 (4 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+#define IMAGE_HEADER_WORD9_FBCCOMPAT_V3_REMAP (5 << IMAGE_HEADER_WORD9_FBCCOMPAT_SHIFT)
+
+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_SHIFT (0)
+#define IMAGE_HEADER_WORD10_FBCCLEAR_CH0_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_SHIFT (0)
+#define IMAGE_HEADER_WORD11_FBCCLEAR_CH1_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_SHIFT (0)
+#define IMAGE_HEADER_WORD12_FBCCLEAR_CH2_CLRMSK (0xFFFFFFFFU)
+
+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_SHIFT (0)
+#define IMAGE_HEADER_WORD13_FBCCLEAR_CH3_CLRMSK (0xFFFFFFFFU)
+
+
+#endif /* __PDUMPDESC_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/physheap.c b/drivers/gpu/drm/img-rogue/1.10/physheap.c
new file mode 100644
index 00000000000000..1f6e4fe7b4536c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physheap.c
@@ -0,0 +1,349 @@
+/*************************************************************************/ /*!
+@File physheap.c
+@Title Physical heap management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Management functions for the physical heap(s). A heap contains
+ all the information required by services when using memory from
+ that heap (such as CPU <> Device physical address translation).
+ A system must register one heap but can have more then one which
+ is why a heap must register with a (system) unique ID.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "physheap.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+struct _PHYS_HEAP_
+{
+ /*! ID of this physical memory heap */
+ IMG_UINT32 ui32PhysHeapID;
+ /*! The type of this heap */
+ PHYS_HEAP_TYPE eType;
+
+ /*! PDump name of this physical memory heap */
+ IMG_CHAR *pszPDumpMemspaceName;
+ /*! Private data for the translate routines */
+ IMG_HANDLE hPrivData;
+ /*! Function callbacks */
+ PHYS_HEAP_FUNCTIONS *psMemFuncs;
+
+ /*! Array of sub-regions of the heap */
+ PHYS_HEAP_REGION *pasRegions;
+ IMG_UINT32 ui32NumOfRegions;
+
+ /*! Refcount */
+ IMG_UINT32 ui32RefCount;
+ /*! Pointer to next physical heap */
+ struct _PHYS_HEAP_ *psNext;
+};
+
+static PHYS_HEAP *g_psPhysHeapList;
+static POS_LOCK g_hPhysHeapLock;
+
+#if defined(REFCOUNT_DEBUG)
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...) \
+ PVRSRVDebugPrintf(PVR_DBG_WARNING, \
+ __FILE__, \
+ __LINE__, \
+ fmt, \
+ __VA_ARGS__)
+#else
+#define PHYSHEAP_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP **ppsPhysHeap)
+{
+ PHYS_HEAP *psNew;
+ PHYS_HEAP *psTmp;
+
+ PVR_DPF_ENTERED;
+
+ if (psConfig->eType == PHYS_HEAP_TYPE_UNKNOWN)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Check this heap ID isn't already in use */
+ psTmp = g_psPhysHeapList;
+ while (psTmp)
+ {
+ if (psTmp->ui32PhysHeapID == psConfig->ui32PhysHeapID)
+ {
+ return PVRSRV_ERROR_PHYSHEAP_ID_IN_USE;
+ }
+ psTmp = psTmp->psNext;
+ }
+
+ psNew = OSAllocMem(sizeof(PHYS_HEAP));
+ if (psNew == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psNew->ui32PhysHeapID = psConfig->ui32PhysHeapID;
+ psNew->eType = psConfig->eType;
+ psNew->psMemFuncs = psConfig->psMemFuncs;
+ psNew->hPrivData = psConfig->hPrivData;
+ psNew->ui32RefCount = 0;
+ psNew->pszPDumpMemspaceName = psConfig->pszPDumpMemspaceName;
+
+ psNew->pasRegions = psConfig->pasRegions;
+ psNew->ui32NumOfRegions = psConfig->ui32NumOfRegions;
+
+ psNew->psNext = g_psPhysHeapList;
+ g_psPhysHeapList = psNew;
+
+ *ppsPhysHeap = psNew;
+
+ PVR_DPF_RETURN_RC1(PVRSRV_OK, *ppsPhysHeap);
+}
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap)
+{
+ PVR_DPF_ENTERED1(psPhysHeap);
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+#endif
+ {
+ PVR_ASSERT(psPhysHeap->ui32RefCount == 0);
+ }
+
+ if (g_psPhysHeapList == psPhysHeap)
+ {
+ g_psPhysHeapList = psPhysHeap->psNext;
+ }
+ else
+ {
+ PHYS_HEAP *psTmp = g_psPhysHeapList;
+
+ while(psTmp->psNext != psPhysHeap)
+ {
+ psTmp = psTmp->psNext;
+ }
+ psTmp->psNext = psPhysHeap->psNext;
+ }
+
+ OSFreeMem(psPhysHeap);
+
+ PVR_DPF_RETURN;
+}
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+ PHYS_HEAP **ppsPhysHeap)
+{
+ PHYS_HEAP *psTmp = g_psPhysHeapList;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_DPF_ENTERED1(ui32PhysHeapID);
+
+ OSLockAcquire(g_hPhysHeapLock);
+
+ while (psTmp)
+ {
+ if (psTmp->ui32PhysHeapID == ui32PhysHeapID)
+ {
+ break;
+ }
+ psTmp = psTmp->psNext;
+ }
+
+ if (psTmp == NULL)
+ {
+ eError = PVRSRV_ERROR_PHYSHEAP_ID_INVALID;
+ }
+ else
+ {
+ psTmp->ui32RefCount++;
+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psTmp, psTmp->ui32RefCount);
+ }
+
+ OSLockRelease(g_hPhysHeapLock);
+
+ *ppsPhysHeap = psTmp;
+ PVR_DPF_RETURN_RC1(eError, *ppsPhysHeap);
+}
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap)
+{
+ PVR_DPF_ENTERED1(psPhysHeap);
+
+ OSLockAcquire(g_hPhysHeapLock);
+ psPhysHeap->ui32RefCount--;
+ PHYSHEAP_REFCOUNT_PRINT("%s: Heap %p, refcount = %d", __FUNCTION__, psPhysHeap, psPhysHeap->ui32RefCount);
+ OSLockRelease(g_hPhysHeapLock);
+
+ PVR_DPF_RETURN;
+}
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->eType;
+}
+
+/*
+ * This function will set the psDevPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psDevPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *psDevPAddr = psPhysHeap->pasRegions[ui32RegionId].sCardBase;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+/*
+ * This function will set the psCpuPAddr to whatever the system layer
+ * has set it for the referenced region.
+ * It will not fail if the psCpuPAddr is invalid.
+ */
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *psCpuPAddr = psPhysHeap->pasRegions[ui32RegionId].sStartAddr;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize)
+{
+ if (ui32RegionId < psPhysHeap->ui32NumOfRegions)
+ {
+ *puiSize = psPhysHeap->pasRegions[ui32RegionId].uiSize;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ psPhysHeap->psMemFuncs->pfnCpuPAddrToDevPAddr(psPhysHeap->hPrivData,
+ ui32NumOfAddr,
+ psDevPAddr,
+ psCpuPAddr);
+}
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ psPhysHeap->psMemFuncs->pfnDevPAddrToCpuPAddr(psPhysHeap->hPrivData,
+ ui32NumOfAddr,
+ psCpuPAddr,
+ psDevPAddr);
+}
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags)
+{
+ if (psPhysHeap->psMemFuncs->pfnGetRegionId == NULL)
+ {
+ return 0;
+ }
+
+ return psPhysHeap->psMemFuncs->pfnGetRegionId(psPhysHeap->hPrivData,
+ uiAllocFlags);
+}
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->pszPDumpMemspaceName;
+}
+
+PVRSRV_ERROR PhysHeapInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ g_psPhysHeapList = NULL;
+
+ eError = OSLockCreate(&g_hPhysHeapLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PhysHeapLock: %s",
+ __func__,
+ PVRSRVGETERRORSTRING(eError)));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PhysHeapDeinit(void)
+{
+ PVR_ASSERT(g_psPhysHeapList == NULL);
+
+ OSLockDestroy(g_hPhysHeapLock);
+
+ return PVRSRV_OK;
+}
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap)
+{
+ return psPhysHeap->ui32NumOfRegions;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/physheap.h b/drivers/gpu/drm/img-rogue/1.10/physheap.h
new file mode 100644
index 00000000000000..1e6d637db9d782
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physheap.h
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title Physical heap management header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the interface for the physical heap management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#ifndef _PHYSHEAP_H_
+#define _PHYSHEAP_H_
+
+typedef struct _PHYS_HEAP_ PHYS_HEAP;
+
+typedef void (*CpuPAddrToDevPAddr)(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+typedef void (*DevPAddrToCpuPAddr)(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+typedef IMG_UINT32 (*GetRegionId)(IMG_HANDLE hPrivData,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocationFlags);
+
+typedef struct _PHYS_HEAP_FUNCTIONS_
+{
+ /*! Translate CPU physical address to device physical address */
+ CpuPAddrToDevPAddr pfnCpuPAddrToDevPAddr;
+ /*! Translate device physical address to CPU physical address */
+ DevPAddrToCpuPAddr pfnDevPAddrToCpuPAddr;
+ /*! Return id of heap region to allocate from */
+ GetRegionId pfnGetRegionId;
+} PHYS_HEAP_FUNCTIONS;
+
+typedef enum _PHYS_HEAP_TYPE_
+{
+ PHYS_HEAP_TYPE_UNKNOWN = 0,
+ PHYS_HEAP_TYPE_UMA,
+ PHYS_HEAP_TYPE_LMA,
+ PHYS_HEAP_TYPE_DMA,
+#if defined(SUPPORT_WRAP_EXTMEMOBJECT)
+ PHYS_HEAP_TYPE_WRAP,
+#endif
+} PHYS_HEAP_TYPE;
+
+typedef struct _PHYS_HEAP_REGION_
+{
+ IMG_CPU_PHYADDR sStartAddr;
+ IMG_DEV_PHYADDR sCardBase;
+ IMG_UINT64 uiSize;
+
+ IMG_HANDLE hPrivData;
+} PHYS_HEAP_REGION;
+
+typedef struct _PHYS_HEAP_CONFIG_
+{
+ IMG_UINT32 ui32PhysHeapID;
+ PHYS_HEAP_TYPE eType;
+ IMG_CHAR *pszPDumpMemspaceName;
+ PHYS_HEAP_FUNCTIONS *psMemFuncs;
+
+ PHYS_HEAP_REGION *pasRegions;
+ IMG_UINT32 ui32NumOfRegions;
+ IMG_BOOL bDynAlloc;
+
+ IMG_HANDLE hPrivData;
+} PHYS_HEAP_CONFIG;
+
+PVRSRV_ERROR PhysHeapRegister(PHYS_HEAP_CONFIG *psConfig,
+ PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapUnregister(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapAcquire(IMG_UINT32 ui32PhysHeapID,
+ PHYS_HEAP **ppsPhysHeap);
+
+void PhysHeapRelease(PHYS_HEAP *psPhysHeap);
+
+PHYS_HEAP_TYPE PhysHeapGetType(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapRegionGetCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize);
+
+PVRSRV_ERROR PhysHeapRegionGetDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+PVRSRV_ERROR PhysHeapRegionGetSize(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32RegionId,
+ IMG_UINT64 *puiSize);
+
+IMG_UINT32 PhysHeapNumberOfRegions(PHYS_HEAP *psPhysHeap);
+
+void PhysHeapCpuPAddrToDevPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+void PhysHeapDevPAddrToCpuPAddr(PHYS_HEAP *psPhysHeap,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+IMG_UINT32 PhysHeapGetRegionId(PHYS_HEAP *psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags);
+
+
+IMG_CHAR *PhysHeapPDumpMemspaceName(PHYS_HEAP *psPhysHeap);
+
+PVRSRV_ERROR PhysHeapInit(void);
+PVRSRV_ERROR PhysHeapDeinit(void);
+
+#endif /* _PHYSHEAP_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem.c b/drivers/gpu/drm/img-rogue/1.10/physmem.c
new file mode 100644
index 00000000000000..ec6362c93255ea
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem.c
@@ -0,0 +1,639 @@
+/*************************************************************************/ /*!
+@File physmem.c
+@Title Physmem
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common entry point for creation of RAM backed PMR's
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "device.h"
+#include "physmem.h"
+#include "pvrsrv.h"
+#include "osfunc.h"
+#include "pdump_physmem.h"
+#include "pdump_km.h"
+#include "rgx_heaps.h"
+
+#if defined(DEBUG)
+static IMG_UINT32 gPMRAllocFail;
+
+#if defined(LINUX)
+#include <linux/moduleparam.h>
+
+module_param(gPMRAllocFail, uint, 0644);
+MODULE_PARM_DESC(gPMRAllocFail, "When number of PMR allocs reaches "
+ "this value, it will fail (default value is 0 which "
+ "means that alloc function will behave normally).");
+#endif /* defined(LINUX) */
+#endif /* defined(DEBUG) */
+
+PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32MemSize,
+ IMG_UINT32 ui32Log2Align,
+ const IMG_UINT8 u8Value,
+ IMG_BOOL bInitPage,
+#if defined(PDUMP)
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_HANDLE *phHandlePtr,
+#endif
+ IMG_HANDLE hMemHandle,
+ IMG_DEV_PHYADDR *psDevPhysAddr)
+{
+ void *pvCpuVAddr;
+ PVRSRV_ERROR eError;
+#if defined(PDUMP)
+ IMG_CHAR szFilenameOut[PDUMP_PARAM_MAX_FILE_NAME];
+ PDUMP_FILEOFFSET_T uiOffsetOut;
+ IMG_UINT32 ui32PageSize;
+ IMG_UINT32 ui32PDumpMemSize = ui32MemSize;
+#endif
+ PG_HANDLE *psMemHandle;
+ IMG_UINT64 uiMask;
+ IMG_DEV_PHYADDR sDevPhysAddr_int;
+
+ psMemHandle = hMemHandle;
+
+ /* Allocate the pages */
+ eError = psDevNode->pfnDevPxAlloc(psDevNode,
+ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+ psMemHandle,
+ &sDevPhysAddr_int);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to allocate the pages"));
+ return eError;
+ }
+
+ /* Check to see if the page allocator returned pages with our desired
+ * alignment, which is not unlikely
+ */
+ uiMask = (1 << ui32Log2Align) - 1;
+ if (ui32Log2Align && (sDevPhysAddr_int.uiAddr & uiMask))
+ {
+ /* use over allocation instead */
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+
+ ui32MemSize += (IMG_UINT32) uiMask;
+ eError = psDevNode->pfnDevPxAlloc(psDevNode,
+ TRUNCATE_64BITS_TO_SIZE_T(ui32MemSize),
+ psMemHandle,
+ &sDevPhysAddr_int);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to over-allocate the pages"));
+ return eError;
+ }
+
+ sDevPhysAddr_int.uiAddr += uiMask;
+ sDevPhysAddr_int.uiAddr &= ~uiMask;
+ }
+ *psDevPhysAddr = sDevPhysAddr_int;
+
+#if defined(PDUMP)
+ ui32PageSize = ui32Log2Align? (1 << ui32Log2Align) : OSGetPageSize();
+ eError = PDumpMalloc(pszDevSpace,
+ pszSymbolicAddress,
+ ui32PDumpMemSize,
+ ui32PageSize,
+ IMG_FALSE,
+ 0,
+ phHandlePtr,
+ PDUMP_NONE);
+ if (PVRSRV_OK != eError)
+ {
+ PDUMPCOMMENT("Allocating pages failed");
+ *phHandlePtr = NULL;
+ }
+#endif
+
+ if (bInitPage)
+ {
+ /*Map the page to the CPU VA space */
+ eError = psDevNode->pfnDevPxMap(psDevNode,
+ psMemHandle,
+ ui32MemSize,
+ &sDevPhysAddr_int,
+ &pvCpuVAddr);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to map the allocated page"));
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+ return eError;
+ }
+
+ /*Fill the memory with given content */
+ OSDeviceMemSet(pvCpuVAddr, u8Value, ui32MemSize);
+
+ /*Map the page to the CPU VA space */
+ eError = psDevNode->pfnDevPxClean(psDevNode,
+ psMemHandle,
+ 0,
+ ui32MemSize);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to clean the allocated page"));
+ psDevNode->pfnDevPxUnMap(psDevNode, psMemHandle, pvCpuVAddr);
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+ return eError;
+ }
+
+#if defined(PDUMP)
+ /*P-Dumping of the page contents can be done in two ways
+ * 1. Store the single byte init value to the .prm file
+ * and load the same value to the entire dummy page buffer
+ * This method requires lot of LDB's inserted into the out2.txt
+ *
+ * 2. Store the entire contents of the buffer to the .prm file
+ * and load them back.
+ * This only needs a single LDB instruction in the .prm file
+ * and chosen this method
+ * size of .prm file might go up but that's not huge at least
+ * for this allocation
+ */
+ /*Write the buffer contents to the prm file */
+ eError = PDumpWriteBuffer(pvCpuVAddr,
+ ui32PDumpMemSize,
+ PDUMP_FLAGS_CONTINUOUS,
+ szFilenameOut,
+ sizeof(szFilenameOut),
+ &uiOffsetOut);
+ if (PVRSRV_OK == eError)
+ {
+ /* Load the buffer back to the allocated memory when playing the pdump */
+ eError = PDumpPMRLDB(pszDevSpace,
+ pszSymbolicAddress,
+ 0,
+ ui32PDumpMemSize,
+ szFilenameOut,
+ uiOffsetOut,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (PVRSRV_OK != eError)
+ {
+ PDUMP_ERROR(eError, "Failed to write LDB statement to script file");
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write LDB statement to script file, error %d", eError));
+ }
+
+ }
+ else if (eError != PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ PDUMP_ERROR(eError, "Failed to write device allocation to parameter file");
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write device allocation to parameter file, error %d", eError));
+ }
+ else
+ {
+ /* else Write to parameter file prevented under the flags and
+ * current state of the driver so skip write to script and error IF.
+ */
+ eError = PVRSRV_OK;
+ }
+#endif
+
+ /*UnMap the page */
+ psDevNode->pfnDevPxUnMap(psDevNode,
+ psMemHandle,
+ pvCpuVAddr);
+ }
+
+ return PVRSRV_OK;
+}
+
+void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+ IMG_HANDLE hPDUMPMemHandle,
+#endif
+ IMG_HANDLE hMemHandle)
+{
+ PG_HANDLE *psMemHandle;
+
+ psMemHandle = hMemHandle;
+ psDevNode->pfnDevPxFree(psDevNode, psMemHandle);
+#if defined(PDUMP)
+ if (NULL != hPDUMPMemHandle)
+ {
+ PDumpFree(hPDUMPMemHandle);
+ }
+#endif
+
+}
+
+
+/* Checks the input parameters and adjusts them if possible and necessary */
+static inline PVRSRV_ERROR _ValidateParams(IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 *puiLog2AllocPageSize,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ PMR_SIZE_T *puiChunkSize)
+{
+ IMG_UINT32 uiLog2AllocPageSize = *puiLog2AllocPageSize;
+ IMG_DEVMEM_SIZE_T uiSize = *puiSize;
+ PMR_SIZE_T uiChunkSize = *puiChunkSize;
+ /* Sparse if we have different number of virtual and physical chunks plus
+ * in general all allocations with more than one virtual chunk */
+ IMG_BOOL bIsSparse = (ui32NumVirtChunks != ui32NumPhysChunks ||
+ ui32NumVirtChunks > 1) ? IMG_TRUE : IMG_FALSE;
+
+ /* Protect against ridiculous page sizes */
+ if (uiLog2AllocPageSize > RGX_HEAP_2MB_PAGE_SHIFT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Page size is too big: 2^%u.", uiLog2AllocPageSize));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Sanity check of the alloc size */
+ if (uiSize >= 0x1000000000ULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cancelling allocation request of over 64 GB. "
+ "This is likely a bug."
+ , __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Fail if requesting coherency on one side but uncached on the other */
+ if ( (PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) &&
+ (PVRSRV_CHECK_GPU_UNCACHED(uiFlags) || PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags))) )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for CPU coherency but specifying GPU uncached "
+ "Please use GPU cached flags for coherency."));
+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ }
+
+ if ( (PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) &&
+ (PVRSRV_CHECK_CPU_UNCACHED(uiFlags) || PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags))) )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for GPU coherency but specifying CPU uncached "
+ "Please use CPU cached flags for coherency."));
+ return PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE;
+ }
+
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) && PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Zero on Alloc and Poison on Alloc are mutually exclusive.",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (bIsSparse)
+ {
+ /* For sparse we need correct parameters like a suitable page size.... */
+ if (OSGetPageShift() > uiLog2AllocPageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid log2-contiguity for sparse allocation. "
+ "Requested %u, required minimum %zd",
+ __func__,
+ uiLog2AllocPageSize,
+ OSGetPageShift() ));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* ... chunk size must be a equal to page size ...*/
+ if ( uiChunkSize != (1 << uiLog2AllocPageSize) )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid chunk size for sparse allocation. "
+ "Requested %#llx, must be same as page size %#x.",
+ __func__,
+ (long long unsigned) uiChunkSize,
+ 1 << uiLog2AllocPageSize ));
+
+ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ }
+
+ if (ui32NumVirtChunks * uiChunkSize != uiSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Total alloc size (%#llx) is not qual "
+ "to virtual chunks * chunk size (%#llx)",
+ __func__,
+ (long long unsigned) uiSize,
+ (long long unsigned) (ui32NumVirtChunks * uiChunkSize) ));
+
+ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ }
+
+ if (ui32NumPhysChunks > ui32NumVirtChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Number of physical chunks (%u) must not be greater "
+ "than number of virtual chunks (%u)",
+ __func__,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks));
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+ else
+ {
+ /*
+ * Silently round up alignment/pagesize if request was less that PAGE_SHIFT
+ * because it would never be harmful for memory to be _more_ contiguous that
+ * was desired.
+ */
+ uiLog2AllocPageSize = OSGetPageShift() > uiLog2AllocPageSize ?
+ OSGetPageShift() : uiLog2AllocPageSize;
+
+ /* Same for total size */
+ uiSize = PVR_ALIGN(uiSize, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+ *puiChunkSize = uiSize;
+ }
+
+ if ((uiSize & ((1ULL << uiLog2AllocPageSize) - 1)) != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Total size (%#llx) must be a multiple "
+ "of the requested contiguity (%u)",
+ __func__,
+ (long long unsigned) uiSize,
+ (1 << uiLog2AllocPageSize)));
+ return PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ }
+
+ *puiLog2AllocPageSize = uiLog2AllocPageSize;
+ *puiSize = uiSize;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2AllocPageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize =
+ psDevNode->psDevConfig->pfnCheckMemAllocSize;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(uiAnnotationLength);
+
+ eError = _ValidateParams(ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ uiFlags,
+ &uiLog2AllocPageSize,
+ &uiSize,
+ &uiChunkSize);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /* Lookup the requested physheap index to use for this PMR allocation */
+ if (PVRSRV_CHECK_FW_LOCAL(uiFlags))
+ {
+ if (PVRSRV_CHECK_FW_GUEST(uiFlags))
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST;
+ if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ /* Shouldn't be reaching this code */
+ return PVRSRV_ERROR_INTERNAL_ERROR;
+ }
+ }
+ else
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ }
+ }
+ else if (PVRSRV_CHECK_CPU_LOCAL(uiFlags))
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL;
+ }
+ else
+ {
+ ePhysHeapIdx = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+ }
+
+ if (NULL == psDevNode->apsPhysHeap[ePhysHeapIdx])
+ {
+ /* In case a heap hasn't been acquired for this type, return invalid heap error */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Requested allocation on device node (%p) from "
+ "an invalid heap (HeapIndex=%d)",
+ __FUNCTION__, psDevNode, ePhysHeapIdx));
+ return PVRSRV_ERROR_INVALID_HEAP;
+ }
+
+ /* Apply memory budgeting policy */
+ if (pfnCheckMemAllocSize)
+ {
+ IMG_UINT64 uiMemSize = (IMG_UINT64)uiChunkSize * ui32NumPhysChunks;
+ PVRSRV_ERROR eError;
+
+ eError = pfnCheckMemAllocSize(psDevNode->psDevConfig->hSysData, uiMemSize);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if defined(DEBUG)
+ if (gPMRAllocFail > 0)
+ {
+ static IMG_UINT32 ui32AllocCount = 1;
+
+ if (ui32AllocCount < gPMRAllocFail)
+ {
+ ui32AllocCount++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s failed on %d allocation.",
+ __func__, ui32AllocCount));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+#endif /* defined(DEBUG) */
+
+ return psDevNode->pfnCreateRamBackedPMR[ePhysHeapIdx](psDevNode,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2AllocPageSize,
+ uiFlags,
+ pszAnnotation,
+ uiPid,
+ ppsPMRPtr);
+}
+
+PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr)
+{
+
+ PVRSRV_ERROR eError;
+ eError = PhysmemNewRamBackedPMR(psConnection,
+ psDevNode,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2PageSize,
+ uiFlags,
+ uiAnnotationLength,
+ pszAnnotation,
+ uiPid,
+ ppsPMRPtr);
+
+ if (eError == PVRSRV_OK)
+ {
+ eError = PMRLockSysPhysAddresses(*ppsPMRPtr);
+ }
+
+ return eError;
+}
+
+static void GetLMASize( IMG_DEVMEM_SIZE_T *puiLMASize,
+ PVRSRV_DEVICE_NODE *psDevNode )
+{
+ IMG_UINT uiRegionIndex = 0, uiNumRegions = 0;
+ PVR_ASSERT(psDevNode);
+
+ uiNumRegions = psDevNode->psDevConfig->pasPhysHeaps[0].ui32NumOfRegions;
+
+ for (uiRegionIndex = 0; uiRegionIndex < uiNumRegions; ++uiRegionIndex)
+ {
+ *puiLMASize += psDevNode->psDevConfig->pasPhysHeaps[0].pasRegions[uiRegionIndex].uiSize;
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize )
+{
+ IMG_BOOL bLMA = IMG_FALSE, bUMA = IMG_FALSE;
+
+ *puiLMASize = 0;
+ *puiUMASize = 0;
+
+#if defined(TC_MEMORY_CONFIG) /* For TC2 */
+#if (TC_MEMORY_CONFIG == TC_MEMORY_LOCAL)
+ bLMA = IMG_TRUE;
+#elif (TC_MEMORY_CONFIG == TC_MEMORY_HOST)
+ bUMA = IMG_TRUE;
+#else
+ bUMA = IMG_TRUE;
+ bLMA = IMG_TRUE;
+#endif
+
+#elif defined(PLATO_MEMORY_CONFIG) /* For Plato TC */
+#if (PLATO_MEMORY_CONFIG == PLATO_MEMORY_LOCAL)
+ bLMA = IMG_TRUE;
+#elif (PLATO_MEMORY_CONFIG == PLATO_MEMORY_HOST)
+ bUMA = IMG_TRUE;
+#else
+ bUMA = IMG_TRUE;
+ bLMA = IMG_TRUE;
+#endif
+
+#elif defined(LMA) /* For emu, vp_linux */
+ bLMA = IMG_TRUE;
+
+#else /* For all other platforms */
+ bUMA = IMG_TRUE;
+#endif
+
+ if (bLMA) { GetLMASize(puiLMASize, psDevNode); }
+ if (bUMA) { *puiUMASize = OSGetRAMSize(); }
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ return PVRSRV_OK;
+}
+
+/* 'Wrapper' function to call PMRImportPMR(), which
+ * first checks the PMR is for the current device.
+ * This avoids the need to do this in pmr.c, which
+ * would then need PVRSRV_DEVICE_NODE (defining this
+ * type in pmr.h causes a typedef redefinition issue).
+ */
+PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (PMRGetExportDeviceNode(psPMRExport) != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ return PMRImportPMR(psPMRExport,
+ uiPassword,
+ uiSize,
+ uiLog2Contig,
+ ppsPMR);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem.h b/drivers/gpu/drm/img-rogue/1.10/physmem.h
new file mode 100644
index 00000000000000..fb51e8325591e5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem.h
@@ -0,0 +1,239 @@
+/*************************************************************************/ /*!
+@File
+@Title Physmem header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for common entry point for creation of RAM backed PMR's
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_H_
+#define _SRVSRV_PHYSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/* Valid values for TC_MEMORY_CONFIG configuration option */
+#define TC_MEMORY_LOCAL (1)
+#define TC_MEMORY_HOST (2)
+#define TC_MEMORY_HYBRID (3)
+
+/* Valid values for the PLATO_MEMORY_CONFIG configuration option */
+#define PLATO_MEMORY_LOCAL (1)
+#define PLATO_MEMORY_HOST (2)
+#define PLATO_MEMORY_HYBRID (3)
+
+/*************************************************************************/ /*!
+@Function DevPhysMemAlloc
+
+@Description Allocate memory from device specific heaps directly.
+
+@Input psDevNode device node to operate on
+@Input ui32MemSize Size of the memory to be allocated
+@Input u8Value Value to be initialised to.
+@Input bInitPage Flag to control initialisation
+@Input pszDevSpace PDUMP memory space in which the
+ allocation is to be done
+@Input pszSymbolicAddress Symbolic name of the allocation
+@Input phHandlePtr PDUMP handle to the allocation
+@Output psMemHandle Handle to the allocated memory
+@Output psDevPhysAddr Device Physical address of allocated
+ page
+
+@Return PVRSRV_OK if the allocation is successful
+*/
+/*****************************************************************************/
+extern PVRSRV_ERROR DevPhysMemAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32MemSize,
+ IMG_UINT32 ui32Log2Align,
+ const IMG_UINT8 u8Value,
+ IMG_BOOL bInitPage,
+#if defined(PDUMP)
+ const IMG_CHAR *pszDevSpace,
+ const IMG_CHAR *pszSymbolicAddress,
+ IMG_HANDLE *phHandlePtr,
+#endif
+ IMG_HANDLE hMemHandle,
+ IMG_DEV_PHYADDR *psDevPhysAddr);
+
+/*************************************************************************/ /*!
+@Function DevPhysMemFree
+
+@Description Free memory to device specific heaps directly.
+
+@Input psDevNode device node to operate on
+@Input hPDUMPMemHandle Pdump handle to allocated memory
+@Input hMemHandle Devmem handle to allocated memory
+
+@Return
+*/
+/*****************************************************************************/
+extern void DevPhysMemFree(PVRSRV_DEVICE_NODE *psDevNode,
+#if defined(PDUMP)
+ IMG_HANDLE hPDUMPMemHandle,
+#endif
+ IMG_HANDLE hMemHandle);
+
+/*
+ * PhysmemNewRamBackedPMR
+ *
+ * This function will create a RAM backed PMR using the device specific
+ * callback, this allows control at a per-devicenode level to select the
+ * memory source thus supporting mixed UMA/LMA systems.
+ *
+ * The size must be a multiple of page size. The page size is
+ * specified in log2. It should be regarded as a minimum contiguity
+ * of which the that the resulting memory must be a multiple. It may
+ * be that this should be a fixed number. It may be that the
+ * allocation size needs to be a multiple of some coarser "page size"
+ * than that specified in the page size argument. For example, take
+ * an OS whose page granularity is a fixed 16kB, but the caller
+ * requests memory in page sizes of 4kB. The request can be satisfied
+ * if and only if the SIZE requested is a multiple of 16kB. If the
+ * arguments supplied are such that this OS cannot grant the request,
+ * PVRSRV_ERROR_INVALID_PARAMS will be returned.
+ *
+ * The caller should supply storage of a pointer. Upon successful
+ * return a PMR object will have been created and a pointer to it
+ * returned in the PMROut argument.
+ *
+ * A PMR thusly created should be destroyed with PhysmemUnrefPMR.
+ *
+ * Note that this function may cause memory allocations and on some
+ * OSes this may cause scheduling events, so it is important that this
+ * function be called with interrupts enabled and in a context where
+ * scheduling events and memory allocations are permitted.
+ *
+ * The flags may be used by the implementation to change its behaviour
+ * if required. The flags will also be stored in the PMR as immutable
+ * metadata and returned to mmu_common when it asks for it.
+ *
+ * The PID specified is used to tie this allocation to the process context
+ * that the allocation is made on behalf of.
+ */
+extern PVRSRV_ERROR
+PhysmemNewRamBackedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMROut);
+
+
+/*
+ * PhysmemNewRamBackedLockedPMR
+ *
+ * Same as function above but is additionally locking down the PMR.
+ *
+ * Get the physical memory and lock down the PMR directly, we do not want to
+ * defer the actual allocation to mapping time.
+ *
+ * In general the concept of on-demand allocations is not useful for allocations
+ * where we give the users the freedom to map and unmap memory at will. The user
+ * is not expecting his memory contents to suddenly vanish just because he unmapped
+ * the buffer.
+ * Even if he would know and be ok with it, we do not want to check for every page
+ * we unmap whether we have to unlock the underlying PMR.
+*/
+extern PVRSRV_ERROR
+PhysmemNewRamBackedLockedPMR(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 uiAnnotationLength,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr);
+
+/**************************************************************************/ /*!
+@Function PhysmemImportPMR
+@Description Import PMR a previously exported PMR
+@Input psPMRExport The exported PMR token
+@Input uiPassword Authorisation password
+ for the PMR being imported
+@Input uiSize Size of the PMR being imported
+ (for verification)
+@Input uiLog2Contig Log2 continuity of the PMR being
+ imported (for verification)
+@Output ppsPMR The imported PMR
+@Return PVRSRV_ERROR_PMR_NOT_PERMITTED if not for the same device
+ PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR if password incorrect
+ PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES if size or contiguity incorrect
+ PVRSRV_OK if successful
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PhysmemImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetMaxDevMemSizeKM
+@Description Get the amount of device memory on current platform
+@Output uiLMASize LMA memory size
+@Output uiUMASize UMA memory size
+@Return None
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PVRSRVGetMaxDevMemSizeKM( CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize );
+
+#endif /* _SRVSRV_PHYSMEM_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.c b/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.c
new file mode 100644
index 00000000000000..8de1674c1dd9ac
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.c
@@ -0,0 +1,1161 @@
+/*************************************************************************/ /*!
+@File physmem_dmabuf.c
+@Title dmabuf memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for dmabuf memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+
+#include "physmem_dmabuf.h"
+#include "pvrsrv.h"
+#include "pmr.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP)
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pmr_impl.h"
+#include "hash.h"
+#include "private_data.h"
+#include "module_common.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * dma_buf_ops
+ *
+ * These are all returning errors if used.
+ * The point is to prevent anyone outside of our driver from importing
+ * and using our dmabuf.
+ */
+
+static int PVRDmaBufOpsAttach(struct dma_buf *psDmaBuf, struct device *psDev,
+ struct dma_buf_attachment *psAttachment)
+{
+ return -ENOSYS;
+}
+
+static struct sg_table *PVRDmaBufOpsMap(struct dma_buf_attachment *psAttachment,
+ enum dma_data_direction eDirection)
+{
+ /* Attach hasn't been called yet */
+ return ERR_PTR(-EINVAL);
+}
+
+static void PVRDmaBufOpsUnmap(struct dma_buf_attachment *psAttachment,
+ struct sg_table *psTable,
+ enum dma_data_direction eDirection)
+{
+}
+
+static void PVRDmaBufOpsRelease(struct dma_buf *psDmaBuf)
+{
+ PMR *psPMR = (PMR *) psDmaBuf->priv;
+
+ PMRUnrefPMR(psPMR);
+}
+
+static void *PVRDmaBufOpsKMap(struct dma_buf *psDmaBuf, unsigned long uiPageNum)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static int PVRDmaBufOpsMMap(struct dma_buf *psDmaBuf, struct vm_area_struct *psVMA)
+{
+ return -ENOSYS;
+}
+
+static const struct dma_buf_ops sPVRDmaBufOps =
+{
+ .attach = PVRDmaBufOpsAttach,
+ .map_dma_buf = PVRDmaBufOpsMap,
+ .unmap_dma_buf = PVRDmaBufOpsUnmap,
+ .release = PVRDmaBufOpsRelease,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
+ .map_atomic = PVRDmaBufOpsKMap,
+ .map = PVRDmaBufOpsKMap,
+#else
+ .kmap_atomic = PVRDmaBufOpsKMap,
+ .kmap = PVRDmaBufOpsKMap,
+#endif
+ .mmap = PVRDmaBufOpsMMap,
+};
+
+/* end of dma_buf_ops */
+
+
+typedef struct _PMR_DMA_BUF_DATA_
+{
+ /* Filled in at PMR create time */
+ PHYS_HEAP *psPhysHeap;
+ struct dma_buf_attachment *psAttachment;
+ PFN_DESTROY_DMABUF_PMR pfnDestroy;
+ IMG_BOOL bPoisonOnFree;
+
+ /* Modified by PMR lock/unlock */
+ struct sg_table *psSgTable;
+ IMG_DEV_PHYADDR *pasDevPhysAddr;
+ IMG_UINT32 ui32PhysPageCount;
+ IMG_UINT32 ui32VirtPageCount;
+} PMR_DMA_BUF_DATA;
+
+/* Start size of the g_psDmaBufHash hash table */
+#define DMA_BUF_HASH_SIZE 20
+
+static DEFINE_MUTEX(g_HashLock);
+
+static HASH_TABLE *g_psDmaBufHash;
+static IMG_UINT32 g_ui32HashRefCount;
+
+#if defined(PVR_ANDROID_ION_USE_SG_LENGTH)
+#define pvr_sg_length(sg) ((sg)->length)
+#else
+#define pvr_sg_length(sg) sg_dma_len(sg)
+#endif
+
+/*****************************************************************************
+ * PMR callback functions *
+ *****************************************************************************/
+
+static PVRSRV_ERROR PMRFinalizeDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf_attachment *psAttachment = psPrivData->psAttachment;
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+ struct sg_table *psSgTable = psPrivData->psSgTable;
+ PMR *psPMR;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ mutex_lock(&g_HashLock);
+
+ if (psDmaBuf->ops != &sPVRDmaBufOps)
+ {
+ if(g_psDmaBufHash)
+ {
+ /* We have a hash table so check if we've seen this dmabuf before */
+ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+
+ if(psPMR)
+ {
+ if (!PMRIsPMRLive(psPMR))
+ {
+ HASH_Remove(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+ g_ui32HashRefCount--;
+
+ if (g_ui32HashRefCount == 0)
+ {
+ HASH_Delete(g_psDmaBufHash);
+ g_psDmaBufHash = NULL;
+ }
+ }
+ else{
+ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+ }
+ }
+ }
+ }else
+ {
+ psPMR = (PMR *) psDmaBuf->priv;
+ if (PMRIsPMRLive(psPMR))
+ {
+ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+ }
+
+ }
+
+ if(PVRSRV_OK != eError)
+ {
+ mutex_unlock(&g_HashLock);
+ return eError;
+ }
+
+ psPrivData->ui32PhysPageCount = 0;
+
+ dma_buf_unmap_attachment(psAttachment, psSgTable, DMA_BIDIRECTIONAL);
+
+
+ if (psPrivData->bPoisonOnFree)
+ {
+ void *pvKernAddr;
+ int i, err;
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to begin cpu access for free poisoning (err=%d)",
+ __func__, err));
+ PVR_ASSERT(IMG_FALSE);
+ goto exit;
+ }
+
+ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+ {
+ pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to poison allocation before free (err=%ld)",
+ __func__, pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+ PVR_ASSERT(IMG_FALSE);
+ goto exit_end_access;
+ }
+
+ memset(pvKernAddr, PVRSRV_POISON_ON_FREE_VALUE, PAGE_SIZE);
+
+ dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+ }
+
+exit_end_access:
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+ }
+
+exit:
+ if (psPrivData->pfnDestroy)
+ {
+ eError = psPrivData->pfnDestroy(psPrivData->psPhysHeap, psPrivData->psAttachment);
+ if (eError != PVRSRV_OK)
+ {
+ mutex_unlock(&g_HashLock);
+ return eError;
+ }
+ }
+
+ mutex_unlock(&g_HashLock);
+ OSFreeMem(psPrivData->pasDevPhysAddr);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRLockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRUnlockPhysAddressesDmaBuf(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRDevPhysAddrDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ IMG_UINT32 ui32PageIndex;
+ IMG_UINT32 idx;
+
+ if (ui32Log2PageSize != PAGE_SHIFT)
+ {
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ IMG_UINT32 ui32InPageOffset;
+
+ ui32PageIndex = puiOffset[idx] >> PAGE_SHIFT;
+ ui32InPageOffset = puiOffset[idx] - ((IMG_DEVMEM_OFFSET_T)ui32PageIndex << PAGE_SHIFT);
+
+
+ PVR_ASSERT(ui32PageIndex < psPrivData->ui32VirtPageCount);
+ PVR_ASSERT(ui32InPageOffset < PAGE_SIZE);
+ psDevPAddr[idx].uiAddr = psPrivData->pasDevPhysAddr[ui32PageIndex].uiAddr + ui32InPageOffset;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ void *pvKernAddr;
+ PVRSRV_ERROR eError;
+ int err;
+
+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Kernel mappings for sparse DMABufs "
+ "are not allowed!", __func__));
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail;
+ }
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ if (err)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail;
+ }
+
+ pvKernAddr = dma_buf_vmap(psDmaBuf);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto fail_kmap;
+ }
+
+ *ppvKernelAddressOut = pvKernAddr + uiOffset;
+ *phHandleOut = pvKernAddr;
+
+ return PVRSRV_OK;
+
+fail_kmap:
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ } while (err == -EAGAIN || err == -EINTR);
+
+fail:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ void *pvKernAddr = hHandle;
+ int err;
+
+ dma_buf_vunmap(psDmaBuf, pvKernAddr);
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_BIDIRECTIONAL);
+ } while (err == -EAGAIN || err == -EINTR);
+}
+
+static PVRSRV_ERROR PMRMMapDmaBuf(PMR_IMPL_PRIVDATA pvPriv,
+ PMR *psPMR,
+ PMR_MMAP_DATA pOSMMapData)
+{
+ PMR_DMA_BUF_DATA *psPrivData = pvPriv;
+ struct dma_buf *psDmaBuf = psPrivData->psAttachment->dmabuf;
+ struct vm_area_struct *psVma = pOSMMapData;
+ int err;
+
+ if (psPrivData->ui32PhysPageCount != psPrivData->ui32VirtPageCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not possible to MMAP sparse DMABufs",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ err = dma_buf_mmap(psDmaBuf, psVma, 0);
+ if (err)
+ {
+ return (err == -EINVAL) ? PVRSRV_ERROR_NOT_SUPPORTED : PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+ MMapStatsAddOrUpdatePMR(psPMR, psVma->vm_end - psVma->vm_start);
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRDmaBufFuncTab =
+{
+ .pfnLockPhysAddresses = PMRLockPhysAddressesDmaBuf,
+ .pfnUnlockPhysAddresses = PMRUnlockPhysAddressesDmaBuf,
+ .pfnDevPhysAddr = PMRDevPhysAddrDmaBuf,
+ .pfnAcquireKernelMappingData = PMRAcquireKernelMappingDataDmaBuf,
+ .pfnReleaseKernelMappingData = PMRReleaseKernelMappingDataDmaBuf,
+ .pfnMMap = PMRMMapDmaBuf,
+ .pfnFinalize = PMRFinalizeDmaBuf,
+};
+
+/*****************************************************************************
+ * Public facing interface *
+ *****************************************************************************/
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr)
+{
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+ PMR_DMA_BUF_DATA *psPrivData;
+ PMR_FLAGS_T uiPMRFlags;
+ IMG_BOOL bZeroOnAlloc;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i, j;
+ IMG_UINT32 uiPagesPerChunk = uiChunkSize >> PAGE_SHIFT;
+ IMG_UINT32 ui32PageCount = 0;
+ struct scatterlist *sg;
+ struct sg_table *table;
+ IMG_UINT32 uiSglOffset;
+ IMG_CHAR pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+ bZeroOnAlloc = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags);
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags);
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags);
+
+ if (bZeroOnAlloc && bPoisonOnFree)
+ {
+ /* Zero on Alloc and Poison on Alloc are mutually exclusive */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errReturn;
+ }
+
+ psPrivData = OSAllocZMem(sizeof(*psPrivData));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errReturn;
+ }
+
+ psPrivData->psPhysHeap = psHeap;
+ psPrivData->psAttachment = psAttachment;
+ psPrivData->pfnDestroy = pfnDestroy;
+ psPrivData->bPoisonOnFree = bPoisonOnFree;
+ psPrivData->ui32VirtPageCount =
+ (ui32NumVirtChunks * uiChunkSize) >> PAGE_SHIFT;
+
+ psPrivData->pasDevPhysAddr =
+ OSAllocZMem(sizeof(*(psPrivData->pasDevPhysAddr)) *
+ psPrivData->ui32VirtPageCount);
+ if (!psPrivData->pasDevPhysAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate buffer for physical addresses (oom)",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errFreePrivData;
+ }
+
+ if (bZeroOnAlloc || bPoisonOnAlloc)
+ {
+ void *pvKernAddr;
+ int i, err;
+
+ err = dma_buf_begin_cpu_access(psDmaBuf, DMA_FROM_DEVICE);
+ if (err)
+ {
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+ goto errFreePhysAddr;
+ }
+
+ for (i = 0; i < psDmaBuf->size / PAGE_SIZE; i++)
+ {
+ pvKernAddr = dma_buf_kmap(psDmaBuf, i);
+ if (IS_ERR_OR_NULL(pvKernAddr))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to map page for %s (err=%ld)",
+ __func__, bZeroOnAlloc ? "zeroing" : "poisoning",
+ pvKernAddr ? PTR_ERR(pvKernAddr) : -ENOMEM));
+ eError = PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING;
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+
+ goto errFreePhysAddr;
+ }
+
+ if (bZeroOnAlloc)
+ {
+ memset(pvKernAddr, 0, PAGE_SIZE);
+ }
+ else
+ {
+ memset(pvKernAddr, PVRSRV_POISON_ON_ALLOC_VALUE, PAGE_SIZE);
+ }
+
+ dma_buf_kunmap(psDmaBuf, i, pvKernAddr);
+ }
+
+ do {
+ err = dma_buf_end_cpu_access(psDmaBuf, DMA_TO_DEVICE);
+ } while (err == -EAGAIN || err == -EINTR);
+ }
+
+ table = dma_buf_map_attachment(psAttachment, DMA_BIDIRECTIONAL);
+ if (IS_ERR_OR_NULL(table))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errFreePhysAddr;
+ }
+
+ /*
+ * We do a two pass process: first work out how many pages there
+ * are and second, fill in the data.
+ */
+ for_each_sg(table->sgl, sg, table->nents, i)
+ {
+ ui32PageCount += PAGE_ALIGN(pvr_sg_length(sg)) / PAGE_SIZE;
+ }
+
+ if (WARN_ON(!ui32PageCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of phys. pages must not be zero",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+
+ if (WARN_ON(ui32PageCount != ui32NumPhysChunks * uiPagesPerChunk))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Requested physical chunks and actual "
+ "number of physical dma buf pages don't match",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+
+ psPrivData->ui32PhysPageCount = ui32PageCount;
+ psPrivData->psSgTable = table;
+ ui32PageCount = 0;
+ sg = table->sgl;
+ uiSglOffset = 0;
+
+
+ /* Fill physical address array */
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ for (j = 0; j < uiPagesPerChunk; j++)
+ {
+ IMG_UINT32 uiIdx = pui32MappingTable[i] * uiPagesPerChunk + j;
+
+ psPrivData->pasDevPhysAddr[uiIdx].uiAddr =
+ sg_dma_address(sg) + uiSglOffset;
+
+ /* Get the next offset for the current sgl or the next sgl */
+ uiSglOffset += PAGE_SIZE;
+ if (uiSglOffset >= pvr_sg_length(sg))
+ {
+ sg = sg_next(sg);
+ uiSglOffset = 0;
+
+ /* Check that we haven't looped */
+ if (WARN_ON(sg == table->sgl))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to fill phys. address "
+ "array ",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnmap;
+ }
+ }
+ }
+ }
+
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /*
+ * Check no significant bits were lost in cast due to different
+ * bit widths for flags
+ */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ if (OSSNPrintf((IMG_CHAR *)pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN, "ImpDmaBuf:%s", (IMG_CHAR *)pszName) < 0)
+ {
+ pszAnnotation[0] = '\0';
+ }
+ else
+ {
+ pszAnnotation[DEVMEM_ANNOTATION_MAX_LEN-1] = '\0';
+ }
+
+ eError = PMRCreatePMR(psDevNode,
+ psHeap,
+ ui32NumVirtChunks * uiChunkSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ PAGE_SHIFT,
+ uiPMRFlags,
+ pszAnnotation,
+ &_sPMRDmaBufFuncTab,
+ psPrivData,
+ PMR_TYPE_DMABUF,
+ ppsPMRPtr,
+ PDUMP_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create PMR (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto errFreePhysAddr;
+ }
+
+ return PVRSRV_OK;
+
+errUnmap:
+ dma_buf_unmap_attachment(psAttachment, table, DMA_BIDIRECTIONAL);
+errFreePhysAddr:
+ OSFreeMem(psPrivData->pasDevPhysAddr);
+errFreePrivData:
+ OSFreeMem(psPrivData);
+errReturn:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR PhysmemDestroyDmaBuf(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment)
+{
+ struct dma_buf *psDmaBuf = psAttachment->dmabuf;
+
+ PVR_UNREFERENCED_PARAMETER(psHeap);
+
+ dma_buf_detach(psDmaBuf, psAttachment);
+ dma_buf_put(psDmaBuf);
+
+ return PVRSRV_OK;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+ PMR_DMA_BUF_DATA *psPrivData;
+
+ psPrivData = PMRGetPrivateData(psPMR, &_sPMRDmaBufFuncTab);
+ if (psPrivData)
+ {
+ return psPrivData->psAttachment->dmabuf;
+ }
+
+ return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd)
+{
+ struct dma_buf *psDmaBuf;
+ IMG_DEVMEM_SIZE_T uiPMRSize;
+ PVRSRV_ERROR eError;
+ IMG_INT iFd;
+
+ mutex_lock(&g_HashLock);
+
+ PMRRefPMR(psPMR);
+
+ eError = PMR_LogicalSize(psPMR, &uiPMRSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_pmr_ref;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
+ {
+ DEFINE_DMA_BUF_EXPORT_INFO(sDmaBufExportInfo);
+
+ sDmaBufExportInfo.priv = psPMR;
+ sDmaBufExportInfo.ops = &sPVRDmaBufOps;
+ sDmaBufExportInfo.size = uiPMRSize;
+ sDmaBufExportInfo.flags = O_RDWR;
+
+ psDmaBuf = dma_buf_export(&sDmaBufExportInfo);
+ }
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+ uiPMRSize, O_RDWR, NULL);
+#else
+ psDmaBuf = dma_buf_export(psPMR, &sPVRDmaBufOps,
+ uiPMRSize, O_RDWR);
+#endif
+
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to export buffer (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_pmr_ref;
+ }
+
+ iFd = dma_buf_fd(psDmaBuf, O_RDWR);
+ if (iFd < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf fd (err=%d)",
+ __func__, iFd));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_dma_buf;
+ }
+
+ mutex_unlock(&g_HashLock);
+ *piFd = iFd;
+ return PVRSRV_OK;
+
+fail_dma_buf:
+ dma_buf_put(psDmaBuf);
+
+fail_pmr_ref:
+ PMRUnrefPMR(psPMR);
+ mutex_unlock(&g_HashLock);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32MappingTable = 0;
+ struct dma_buf *psDmaBuf;
+ PVRSRV_ERROR eError;
+
+ /* Get the buffer handle */
+ psDmaBuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ return PVRSRV_ERROR_BAD_MAPPING;
+
+ }
+
+ uiSize = psDmaBuf->size;
+
+ eError = PhysmemImportSparseDmaBuf(psConnection,
+ psDevNode,
+ fd,
+ uiFlags,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ ui32NameSize,
+ pszName,
+ ppsPMRPtr,
+ puiSize,
+ puiAlign);
+
+
+ dma_buf_put(psDmaBuf);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PMR *psPMR = NULL;
+ struct dma_buf_attachment *psAttachment;
+ struct dma_buf *psDmaBuf;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bHashTableCreated = IMG_FALSE;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (!psDevNode)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errReturn;
+ }
+
+ /* Terminate string from bridge to prevent corrupt annotations in RI */
+ {
+ IMG_CHAR* pszName0 = (IMG_CHAR*) pszName;
+ pszName0[ui32NameSize-1] = '\0';
+ }
+
+ /* Get the buffer handle */
+ psDmaBuf = dma_buf_get(fd);
+ if (IS_ERR_OR_NULL(psDmaBuf))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get dma-buf from fd (err=%ld)",
+ __func__, psDmaBuf ? PTR_ERR(psDmaBuf) : -ENOMEM));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto errReturn;
+ }
+
+ mutex_lock(&g_HashLock);
+
+ if (psDmaBuf->ops == &sPVRDmaBufOps)
+ {
+ PVRSRV_DEVICE_NODE *psPMRDevNode;
+
+ /* We exported this dma_buf, so we can just get its PMR */
+ psPMR = (PMR *) psDmaBuf->priv;
+
+ /* However, we can't import it if it belongs to a different device */
+ psPMRDevNode = PMR_DeviceNode(psPMR);
+ if (psPMRDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n",
+ __func__));
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ goto err;
+ }
+ }
+ else
+ {
+ if (g_psDmaBufHash)
+ {
+ /* We have a hash table so check if we've seen this dmabuf before */
+ psPMR = (PMR *) HASH_Retrieve(g_psDmaBufHash, (uintptr_t) psDmaBuf);
+ }
+ else
+ {
+ /*
+ * As different processes may import the same dmabuf we need to
+ * create a hash table so we don't generate a duplicate PMR but
+ * rather just take a reference on an existing one.
+ */
+ g_psDmaBufHash = HASH_Create(DMA_BUF_HASH_SIZE);
+ if (!g_psDmaBufHash)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err;
+ }
+ bHashTableCreated = IMG_TRUE;
+ }
+ }
+
+ if (psPMR)
+ {
+ /* Reuse the PMR we already created */
+ PMRRefPMR(psPMR);
+
+ *ppsPMRPtr = psPMR;
+ PMR_LogicalSize(psPMR, puiSize);
+ *puiAlign = PAGE_SIZE;
+ }
+ /* No errors so far */
+ eError = PVRSRV_OK;
+
+err:
+ if(psPMR || (PVRSRV_OK != eError))
+ {
+ mutex_unlock(&g_HashLock);
+ dma_buf_put(psDmaBuf);
+ return eError;
+ }
+
+ /* Do we want this to be a sparse PMR? */
+ if (ui32NumVirtChunks > 1)
+ {
+ IMG_UINT32 i;
+
+ /* Parameter validation */
+ if (psDmaBuf->size != (uiChunkSize * ui32NumPhysChunks) ||
+ uiChunkSize != PAGE_SIZE ||
+ ui32NumPhysChunks > ui32NumVirtChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requesting sparse buffer: "
+ "uiChunkSize ("IMG_DEVMEM_SIZE_FMTSPEC") must be equal to "
+ "OS page size (%lu). uiChunkSize * ui32NumPhysChunks "
+ "("IMG_DEVMEM_SIZE_FMTSPEC") must"
+ " be equal to the buffer size ("IMG_SIZE_FMTSPEC"). "
+ "ui32NumPhysChunks (%u) must be lesser or equal to "
+ "ui32NumVirtChunks (%u)",
+ __func__,
+ uiChunkSize,
+ PAGE_SIZE,
+ uiChunkSize * ui32NumPhysChunks,
+ psDmaBuf->size,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnlockAndDMAPut;
+ }
+
+ /* Parameter validation - Mapping table entries*/
+ for (i = 0; i < ui32NumPhysChunks; i++)
+ {
+ if (pui32MappingTable[i] > ui32NumVirtChunks)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requesting sparse buffer: "
+ "Entry in mapping table (%u) is out of allocation "
+ "bounds (%u)",
+ __func__,
+ (IMG_UINT32) pui32MappingTable[i],
+ (IMG_UINT32) ui32NumVirtChunks));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errUnlockAndDMAPut;
+ }
+ }
+ }
+ else
+ {
+ /* Make sure parameters are valid for non-sparse allocations as well */
+ uiChunkSize = psDmaBuf->size;
+ ui32NumPhysChunks = 1;
+ ui32NumVirtChunks = 1;
+ pui32MappingTable[0] = 0;
+ }
+
+
+ psAttachment = dma_buf_attach(psDmaBuf, psDevNode->psDevConfig->pvOSDevice);
+ if (IS_ERR_OR_NULL(psAttachment))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to attach to dma-buf (err=%ld)",
+ __func__, psAttachment? PTR_ERR(psAttachment) : -ENOMEM));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto errUnlockAndDMAPut;
+ }
+
+ /*
+ * Note:
+ * While we have no way to determine the type of the buffer we just
+ * assume that all dmabufs are from the same physical heap.
+ */
+ eError = PhysmemCreateNewDmaBufBackedPMR(psDevNode,
+ psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+ psAttachment,
+ PhysmemDestroyDmaBuf,
+ uiFlags,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ ui32NameSize,
+ pszName,
+ &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto errDMADetach;
+ }
+
+ /* First time we've seen this dmabuf so store it in the hash table */
+ HASH_Insert(g_psDmaBufHash, (uintptr_t) psDmaBuf, (uintptr_t) psPMR);
+ g_ui32HashRefCount++;
+
+ mutex_unlock(&g_HashLock);
+
+ *ppsPMRPtr = psPMR;
+ *puiSize = ui32NumVirtChunks * uiChunkSize;
+ *puiAlign = PAGE_SIZE;
+
+ return PVRSRV_OK;
+
+errDMADetach:
+ dma_buf_detach(psDmaBuf, psAttachment);
+
+errUnlockAndDMAPut:
+ if(IMG_TRUE == bHashTableCreated)
+ {
+ HASH_Delete(g_psDmaBufHash);
+ g_psDmaBufHash = NULL;
+ }
+ mutex_unlock(&g_HashLock);
+ dma_buf_put(psDmaBuf);
+
+errReturn:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) */
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr)
+{
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psHeap);
+ PVR_UNREFERENCED_PARAMETER(psAttachment);
+ PVR_UNREFERENCED_PARAMETER(pfnDestroy);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+ PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+
+ return NULL;
+}
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(piFd);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(fd);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiAlign);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(fd);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiAlign);
+ PVR_UNREFERENCED_PARAMETER(uiChunkSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(pui32MappingTable);
+ PVR_UNREFERENCED_PARAMETER(ui32NameSize);
+ PVR_UNREFERENCED_PARAMETER(pszName);
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+}
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) || defined(SUPPORT_ION) || defined(KERNEL_HAS_DMABUF_VMAP_MMAP) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.h b/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.h
new file mode 100644
index 00000000000000..33981789c86b8f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_dmabuf.h
@@ -0,0 +1,114 @@
+/**************************************************************************/ /*!
+@File physmem_dmabuf.h
+@Title Header for dmabuf PMR factory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks importing Ion allocations
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(_PHYSMEM_DMABUF_H_)
+#define _PHYSMEM_DMABUF_H_
+
+#include <linux/dma-buf.h>
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "connection_server.h"
+
+#include "pmr.h"
+
+typedef PVRSRV_ERROR (*PFN_DESTROY_DMABUF_PMR)(PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment);
+
+PVRSRV_ERROR
+PhysmemCreateNewDmaBufBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psHeap,
+ struct dma_buf_attachment *psAttachment,
+ PFN_DESTROY_DMABUF_PMR pfnDestroy,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr);
+
+struct dma_buf *
+PhysmemGetDmaBuf(PMR *psPMR);
+
+PVRSRV_ERROR
+PhysmemExportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PMR *psPMR,
+ IMG_INT *piFd);
+
+PVRSRV_ERROR
+PhysmemImportDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+PVRSRV_ERROR
+PhysmemImportSparseDmaBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_INT fd,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 ui32NameSize,
+ const IMG_CHAR pszName[DEVMEM_ANNOTATION_MAX_LEN],
+ PMR **ppsPMRPtr,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+#endif /* !defined(_PHYSMEM_DMABUF_H_) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.c b/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.c
new file mode 100644
index 00000000000000..6da1afe705050a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.c
@@ -0,0 +1,145 @@
+/*************************************************************************/ /*!
+@File physmem_hostmem.c
+@Title Host memory device node functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Functions relevant to device memory allocations made from host
+ mem device node.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "physmem_hostmem.h"
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_device.h"
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr);
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr);
+
+/* heap callbacks for host driver's device's heap */
+static PHYS_HEAP_FUNCTIONS gsHostMemDevPhysHeapFuncs =
+{
+ /* pfnCpuPAddrToDevPAddr */
+ HostMemCpuPAddrToDevPAddr,
+ /* pfnDevPAddrToCpuPAddr */
+ HostMemDevPAddrToCpuPAddr,
+ /* pfnGetRegionId */
+ NULL,
+};
+
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[];
+
+/* heap configuration for host driver's device */
+static PHYS_HEAP_CONFIG gsPhysHeapConfigHostMemDevice[] =
+{
+ {
+ PHYS_HEAP_ID_HOSTMEM,
+ PHYS_HEAP_TYPE_UMA,
+ "SYSMEM",
+ &gsHostMemDevPhysHeapFuncs,
+ NULL,
+ 0,
+ IMG_FALSE,
+ (IMG_HANDLE)&gsHostMemDevConfig[0],
+ }
+};
+
+/* device configuration for host driver's device */
+static PVRSRV_DEVICE_CONFIG gsHostMemDevConfig[] =
+{
+ {
+ .pszName = "HostMemDevice",
+ .eCacheSnoopingMode = PVRSRV_DEVICE_SNOOP_NONE,
+ .pasPhysHeaps = &gsPhysHeapConfigHostMemDevice[0],
+ .ui32PhysHeapCount = ARRAY_SIZE(gsPhysHeapConfigHostMemDevice),
+ .aui32PhysHeapID = {
+ PHYS_HEAP_ID_HOSTMEM,
+ PHYS_HEAP_ID_HOSTMEM,
+ PHYS_HEAP_ID_HOSTMEM
+ },
+ }
+};
+
+static void HostMemCpuPAddrToDevPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+ /* Optimise common case */
+ psDevPAddr[0].uiAddr = psCpuPAddr[0].uiAddr;
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psDevPAddr[ui32Idx].uiAddr = psCpuPAddr[ui32Idx].uiAddr;
+ }
+ }
+}
+
+static void HostMemDevPAddrToCpuPAddr(IMG_HANDLE hPrivData,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivData);
+ /* Optimise common case */
+ psCpuPAddr[0].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[0].uiAddr);
+ if (ui32NumOfAddr > 1)
+ {
+ IMG_UINT32 ui32Idx;
+ for (ui32Idx = 1; ui32Idx < ui32NumOfAddr; ++ui32Idx)
+ {
+ psCpuPAddr[ui32Idx].uiAddr = IMG_CAST_TO_CPUPHYADDR_UINT(psDevPAddr[ui32Idx].uiAddr);
+ }
+ }
+}
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void)
+{
+ return &gsHostMemDevConfig[0];
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.h b/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.h
new file mode 100644
index 00000000000000..883ca2aede69fc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_hostmem.h
@@ -0,0 +1,54 @@
+/*************************************************************************/ /*!
+@File physmem_hostmem.h
+@Title Host memory device node header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PHYSMEM_HOSTMEM_H__)
+#define __PHYSMEM_HOSTMEM_H__
+
+#include "pvrsrv_device.h"
+
+/*! Heap ID of the host driver's device heap */
+#define PHYS_HEAP_ID_HOSTMEM (~((IMG_UINT32)0))
+
+PVRSRV_DEVICE_CONFIG* HostMemGetDeviceConfig(void);
+
+#endif /* !defined (__PHYSMEM_HOSTMEM_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_lma.c b/drivers/gpu/drm/img-rogue/1.10/physmem_lma.c
new file mode 100644
index 00000000000000..63d9526ae4caa1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_lma.c
@@ -0,0 +1,1688 @@
+/*************************************************************************/ /*!
+@File physmem_lma.c
+@Title Local card memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for local card memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "devicemem_server_utils.h"
+#include "physmem_lma.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "rgxutils.h"
+#endif
+
+/* Since 0x0 is a valid DevPAddr, we rely on max 64-bit value to be an invalid
+ * page address */
+#define INVALID_PAGE_ADDR ~((IMG_UINT64)0x0)
+
+typedef struct _PMR_LMALLOCARRAY_DATA_ {
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_PID uiPid;
+ IMG_INT32 iNumPagesAllocated;
+ /*
+ * uiTotalNumPages:
+ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+ */
+ IMG_UINT32 uiTotalNumPages;
+ IMG_UINT32 uiPagesToAlloc;
+
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiContigAllocSize;
+ IMG_DEV_PHYADDR *pasDevPAddr;
+
+ IMG_BOOL bZeroOnAlloc;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bFwLocalAlloc;
+ IMG_BOOL bFwConfigAlloc;
+ IMG_BOOL bFwGuestAlloc;
+
+ IMG_BOOL bOnDemand;
+
+ /*
+ record at alloc time whether poisoning will be required when the
+ PMR is freed.
+ */
+ IMG_BOOL bPoisonOnFree;
+
+ /* Physical heap and arena pointers for this allocation */
+ PHYS_HEAP* psPhysHeap;
+ RA_ARENA* psArena;
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags;
+
+} PMR_LMALLOCARRAY_DATA;
+
+static PVRSRV_ERROR _MapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ size_t uiSize,
+ IMG_BOOL bFwLocalAlloc,
+ PMR_FLAGS_T ulFlags,
+ void **pvPtr)
+{
+ IMG_UINT32 ui32CPUCacheFlags;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ PHYS_HEAP *psPhysHeap;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemCPUCacheMode(psDevNode, ulFlags, &ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ if (bFwLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ PhysHeapDevPAddrToCpuPAddr(psPhysHeap, 1, &sCpuPAddr, psDevPAddr);
+
+ *pvPtr = OSMapPhysToLin(sCpuPAddr, uiSize, ui32CPUCacheFlags);
+ if (*pvPtr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ return PVRSRV_OK;
+ }
+}
+
+static void _UnMapAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ size_t uiSize,
+ IMG_BOOL bFwLocalAlloc,
+ PMR_FLAGS_T ulFlags,
+ void *pvPtr)
+{
+ OSUnMapPhysToLin(pvPtr, uiSize, PVRSRV_CPU_CACHE_MODE(ulFlags));
+}
+
+static PVRSRV_ERROR
+_PoisonAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bFwLocalAlloc,
+ IMG_UINT32 uiContigAllocSize,
+ IMG_BYTE ui8PoisonValue)
+{
+ PVRSRV_ERROR eError;
+ void *pvKernLin = NULL;
+
+ eError = _MapAlloc(psDevNode,
+ psDevPAddr,
+ uiContigAllocSize,
+ bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvKernLin);
+ if (eError != PVRSRV_OK)
+ {
+ goto map_failed;
+ }
+
+ OSDeviceMemSet(pvKernLin, ui8PoisonValue, uiContigAllocSize);
+
+ _UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0,pvKernLin);
+
+ return PVRSRV_OK;
+
+map_failed:
+ PVR_DPF((PVR_DBG_ERROR, "Failed to poison allocation"));
+ return eError;
+}
+
+static PVRSRV_ERROR
+_ZeroAlloc(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ IMG_BOOL bFwLocalAlloc,
+ IMG_UINT32 uiContigAllocSize)
+{
+ void *pvKernLin = NULL;
+ PVRSRV_ERROR eError;
+
+ eError = _MapAlloc(psDevNode,
+ psDevPAddr,
+ uiContigAllocSize,
+ bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvKernLin);
+ if (eError != PVRSRV_OK)
+ {
+ goto map_failed;
+ }
+
+ OSDeviceMemSet(pvKernLin, 0, uiContigAllocSize);
+
+ _UnMapAlloc(psDevNode, uiContigAllocSize, bFwLocalAlloc, 0, pvKernLin);
+
+ return PVRSRV_OK;
+
+map_failed:
+ PVR_DPF((PVR_DBG_ERROR, "Failed to zero allocation"));
+ return eError;
+}
+
+static PVRSRV_ERROR
+_AllocLMPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_SIZE_T uiSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pabMappingTable,
+ IMG_UINT32 uiLog2AllocPageSize,
+ IMG_BOOL bZero,
+ IMG_BOOL bPoisonOnAlloc,
+ IMG_BOOL bPoisonOnFree,
+ IMG_BOOL bContig,
+ IMG_BOOL bOnDemand,
+ IMG_BOOL bFwLocalAlloc,
+ IMG_BOOL bFwConfigAlloc,
+ IMG_BOOL bFwGuestAlloc,
+ PHYS_HEAP* psPhysHeap,
+ PVRSRV_MEMALLOCFLAGS_T uiAllocFlags,
+ IMG_PID uiPid,
+ PMR_LMALLOCARRAY_DATA **ppsPageArrayDataPtr
+ )
+{
+ PMR_LMALLOCARRAY_DATA *psPageArrayData = NULL;
+ IMG_UINT32 ui32Index;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(!bZero || !bPoisonOnAlloc);
+ PVR_ASSERT(OSGetPageShift() <= uiLog2AllocPageSize);
+
+ psPageArrayData = OSAllocZMem(sizeof(PMR_LMALLOCARRAY_DATA));
+ if (psPageArrayData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocArray;
+ }
+
+ if (bContig)
+ {
+ /*
+ Some allocations require kernel mappings in which case in order
+ to be virtually contiguous we also have to be physically contiguous.
+ */
+ psPageArrayData->uiTotalNumPages = 1;
+ psPageArrayData->uiPagesToAlloc = psPageArrayData->uiTotalNumPages;
+ psPageArrayData->uiContigAllocSize = TRUNCATE_64BITS_TO_32BITS(uiSize);
+ psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+ }
+ else
+ {
+ IMG_UINT32 uiNumPages;
+
+ /* Use of cast below is justified by the assertion that follows to
+ prove that no significant bits have been truncated */
+ uiNumPages = (IMG_UINT32) ( ((uiSize - 1) >> uiLog2AllocPageSize) + 1);
+ PVR_ASSERT( ((PMR_SIZE_T) uiNumPages << uiLog2AllocPageSize) == uiSize);
+
+ psPageArrayData->uiTotalNumPages = uiNumPages;
+
+ if ((ui32NumVirtChunks != ui32NumPhysChunks) || (1 < ui32NumVirtChunks))
+ {
+ psPageArrayData->uiPagesToAlloc = ui32NumPhysChunks;
+ }
+ else
+ {
+ psPageArrayData->uiPagesToAlloc = uiNumPages;
+ }
+ psPageArrayData->uiContigAllocSize = 1 << uiLog2AllocPageSize;
+ psPageArrayData->uiLog2AllocSize = uiLog2AllocPageSize;
+ }
+ psPageArrayData->psDevNode = psDevNode;
+ psPageArrayData->uiPid = uiPid;
+ psPageArrayData->pasDevPAddr = OSAllocMem(sizeof(IMG_DEV_PHYADDR) *
+ psPageArrayData->uiTotalNumPages);
+ if (psPageArrayData->pasDevPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocAddr;
+ }
+
+ /* Since no pages are allocated yet, initialise page addresses to INVALID_PAGE_ADDR */
+ for (ui32Index = 0; ui32Index < psPageArrayData->uiTotalNumPages; ui32Index++)
+ {
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+ }
+
+ psPageArrayData->iNumPagesAllocated = 0;
+ psPageArrayData->bZeroOnAlloc = bZero;
+ psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+ psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ psPageArrayData->bOnDemand = bOnDemand;
+ psPageArrayData->bFwLocalAlloc = bFwLocalAlloc;
+ psPageArrayData->bFwConfigAlloc = bFwConfigAlloc;
+ psPageArrayData->psPhysHeap = psPhysHeap;
+ psPageArrayData->uiAllocFlags = uiAllocFlags;
+ psPageArrayData->bFwGuestAlloc = bFwGuestAlloc;
+
+ *ppsPageArrayDataPtr = psPageArrayData;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+
+errorOnAllocAddr:
+ OSFreeMem(psPageArrayData);
+
+errorOnAllocArray:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR
+_AllocLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData, IMG_UINT32 *pui32MapTable)
+{
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiCardAddr;
+ RA_LENGTH_T uiActualSize;
+ IMG_UINT32 i,ui32Index=0;
+ IMG_UINT32 uiContigAllocSize;
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiRegionId;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bZeroOnAlloc;
+ RA_ARENA *pArena;
+
+ PVR_ASSERT(NULL != psPageArrayData);
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+ uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+ uiLog2AllocSize = psPageArrayData->uiLog2AllocSize;
+ psDevNode = psPageArrayData->psDevNode;
+ bPoisonOnAlloc = psPageArrayData->bPoisonOnAlloc;
+ bZeroOnAlloc = psPageArrayData->bZeroOnAlloc;
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) && psPageArrayData->bFwLocalAlloc)
+ {
+ if (! psPageArrayData->bFwGuestAlloc)
+ {
+ pArena = psPageArrayData->bFwConfigAlloc ?
+ psDevNode->psKernelFwConfigMemArena[0] :
+ psDevNode->psKernelFwMainMemArena[0];
+ }
+ else
+ {
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVR_ASSERT(PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST));
+ PVR_ASSERT(psDevNode->uiKernelFwRAIdx && psDevNode->uiKernelFwRAIdx < RGXFW_NUM_OS);
+
+ SysVzGetPhysHeapOrigin(psDevNode->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eHeapOrigin);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ pArena = psDevNode->psKernelFwRawMemArena[psDevNode->uiKernelFwRAIdx];
+ }
+ else
+ {
+ pArena = psPageArrayData->bFwConfigAlloc ?
+ psDevNode->psKernelFwConfigMemArena[psDevNode->uiKernelFwRAIdx] :
+ psDevNode->psKernelFwMainMemArena[psDevNode->uiKernelFwRAIdx];
+ }
+
+ psDevNode->uiKernelFwRAIdx = 0;
+ PVR_ASSERT(pArena != NULL);
+ }
+ }
+ else
+ {
+ /* Get suitable local memory region for this allocation */
+ uiRegionId = PhysHeapGetRegionId(psPageArrayData->psPhysHeap,
+ psPageArrayData->uiAllocFlags);
+
+ PVR_ASSERT(uiRegionId < psDevNode->ui32NumOfLocalMemArenas);
+ pArena = psDevNode->apsLocalDevMemArenas[uiRegionId];
+ }
+
+ if (psPageArrayData->uiTotalNumPages <
+ (psPageArrayData->iNumPagesAllocated + psPageArrayData->uiPagesToAlloc))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Pages requested to allocate don't fit PMR alloc Size. "
+ "Allocated: %u + Requested: %u > Total Allowed: %u",
+ psPageArrayData->iNumPagesAllocated,
+ psPageArrayData->uiPagesToAlloc,
+ psPageArrayData->uiTotalNumPages));
+ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ return eError;
+ }
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ {
+ IMG_UINT32 ui32OSid=0, ui32OSidReg=0;
+ IMG_BOOL bOSidAxiProt;
+ IMG_PID pId;
+
+ pId=OSGetCurrentClientProcessIDKM();
+ RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ pArena=psDevNode->psOSidSubArena[ui32OSid];
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Giving from OS slot %d",ui32OSid));
+ }
+#endif
+
+ psPageArrayData->psArena = pArena;
+
+ for (i = 0; i < psPageArrayData->uiPagesToAlloc; i++)
+ {
+
+ /* This part of index finding should happen before allocating the page.
+ * Just avoiding intricate paths */
+ if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ if (NULL == pui32MapTable)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Mapping table cannot be null"));
+ eError = PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY;
+ goto errorOnRAAlloc;
+ }
+
+ ui32Index = pui32MapTable[i];
+ if (ui32Index >= psPageArrayData->uiTotalNumPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Page alloc request Index out of bounds for PMR @0x%p",
+ __func__,
+ psPageArrayData));
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto errorOnRAAlloc;
+ }
+
+ if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Mapping already exists"));
+ eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ goto errorOnRAAlloc;
+ }
+ }
+
+ eError = RA_Alloc(pArena,
+ uiContigAllocSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ 1ULL << uiLog2AllocSize,
+ "LMA_Page_Alloc",
+ &uiCardAddr,
+ &uiActualSize,
+ NULL); /* No private handle */
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to Allocate the page @index:%d",
+ ui32Index));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto errorOnRAAlloc;
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "(GPU Virtualization Validation): Address: %llu \n",
+ uiCardAddr));
+}
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Allocation is done a page at a time */
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, uiActualSize, psPageArrayData->uiPid);
+#else
+ {
+ IMG_CPU_PHYADDR sLocalCpuPAddr;
+
+ sLocalCpuPAddr.uiAddr = (IMG_UINT64)uiCardAddr;
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ NULL,
+ sLocalCpuPAddr,
+ uiActualSize,
+ NULL,
+ psPageArrayData->uiPid);
+ }
+#endif
+#endif
+
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = uiCardAddr;
+ if (bPoisonOnAlloc)
+ {
+ eError = _PoisonAlloc(psDevNode,
+ &psPageArrayData->pasDevPAddr[ui32Index],
+ psPageArrayData->bFwLocalAlloc,
+ uiContigAllocSize,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to poison the page"));
+ goto errorOnPoison;
+ }
+ }
+
+ if (bZeroOnAlloc)
+ {
+ eError = _ZeroAlloc(psDevNode,
+ &psPageArrayData->pasDevPAddr[ui32Index],
+ psPageArrayData->bFwLocalAlloc,
+ uiContigAllocSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to zero the page"));
+ goto errorOnZero;
+ }
+ }
+ }
+ psPageArrayData->iNumPagesAllocated += psPageArrayData->uiPagesToAlloc;
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow:
+ */
+errorOnZero:
+errorOnPoison:
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+errorOnRAAlloc:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: alloc_pages failed to honour request %d @index: %d of %d pages: (%s)",
+ __func__,
+ ui32Index,
+ i,
+ psPageArrayData->uiPagesToAlloc,
+ PVRSRVGetErrorStringKM(eError)));
+ while (--i < psPageArrayData->uiPagesToAlloc)
+ {
+ if (psPageArrayData->uiTotalNumPages == psPageArrayData->uiPagesToAlloc)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ if (NULL == pui32MapTable)
+ {
+ break;
+ }
+
+ ui32Index = pui32MapTable[i];
+ }
+
+ if (ui32Index < psPageArrayData->uiTotalNumPages)
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Allocation is done a page at a time */
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ uiContigAllocSize,
+ psPageArrayData->uiPid);
+#else
+ {
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+ psPageArrayData->uiPid);
+ }
+#endif
+#endif
+ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+ }
+ }
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR
+_FreeLMPageArray(PMR_LMALLOCARRAY_DATA *psPageArrayData)
+{
+ OSFreeMem(psPageArrayData->pasDevPAddr);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "physmem_lma.c: freed local memory array structure for PMR @0x%p",
+ psPageArrayData));
+
+ OSFreeMem(psPageArrayData);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_FreeLMPages(PMR_LMALLOCARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pui32FreeIndices,
+ IMG_UINT32 ui32FreePageCount)
+{
+ IMG_UINT32 uiContigAllocSize;
+ IMG_UINT32 i, ui32PagesToFree=0, ui32PagesFreed=0, ui32Index=0;
+ RA_ARENA *pArena = psPageArrayData->psArena;
+
+ PVR_ASSERT(psPageArrayData->iNumPagesAllocated != 0);
+
+ uiContigAllocSize = psPageArrayData->uiContigAllocSize;
+
+ ui32PagesToFree = (NULL == pui32FreeIndices) ?
+ psPageArrayData->uiTotalNumPages : ui32FreePageCount;
+
+ for (i = 0; i < ui32PagesToFree; i++)
+ {
+ if (NULL == pui32FreeIndices)
+ {
+ ui32Index = i;
+ }
+ else
+ {
+ ui32Index = pui32FreeIndices[i];
+ }
+
+ if (INVALID_PAGE_ADDR != psPageArrayData->pasDevPAddr[ui32Index].uiAddr)
+ {
+ ui32PagesFreed++;
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ _PoisonAlloc(psPageArrayData->psDevNode,
+ &psPageArrayData->pasDevPAddr[ui32Index],
+ psPageArrayData->bFwLocalAlloc,
+ uiContigAllocSize,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ }
+
+ RA_Free(pArena, psPageArrayData->pasDevPAddr[ui32Index].uiAddr);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ /* Allocation is done a page at a time */
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ uiContigAllocSize,
+ psPageArrayData->uiPid);
+#else
+ {
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES,
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr,
+ psPageArrayData->uiPid);
+ }
+#endif
+#endif
+ psPageArrayData->pasDevPAddr[ui32Index].uiAddr = INVALID_PAGE_ADDR;
+ }
+ }
+ psPageArrayData->iNumPagesAllocated -= ui32PagesFreed;
+
+ PVR_ASSERT(0 <= psPageArrayData->iNumPagesAllocated);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: freed %d local memory for PMR @0x%p",
+ __func__,
+ (ui32PagesFreed * uiContigAllocSize),
+ psPageArrayData));
+
+ return PVRSRV_OK;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* destructor func is called after last reference disappears, but
+ before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeLocalMem(PMR_IMPL_PRIVDATA pvPriv
+ )
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+
+ psLMAllocArrayData = pvPriv;
+
+ /* We can't free pages until now. */
+ if (psLMAllocArrayData->iNumPagesAllocated != 0)
+ {
+ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ }
+
+ eError = _FreeLMPageArray(psLMAllocArrayData);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+
+ return PVRSRV_OK;
+}
+
+/* callback function for locking the system physical page addresses.
+ As we are LMA there is nothing to do as we control physical memory. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+ psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->bOnDemand)
+ {
+ /* Allocate Memory for deferred allocation */
+ eError = _AllocLMPages(psLMAllocArrayData, NULL);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesLocalMem(PMR_IMPL_PRIVDATA pvPriv
+ )
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData;
+
+ psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->bOnDemand)
+ {
+ /* Free Memory for deferred allocation */
+ eError = _FreeLMPages(psLMAllocArrayData, NULL, 0);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesLocalMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ IMG_UINT32 idx;
+ IMG_UINT32 uiLog2AllocSize;
+ IMG_UINT32 uiNumAllocs;
+ IMG_UINT64 uiAllocIndex;
+ IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = pvPriv;
+
+ if (psLMAllocArrayData->uiLog2AllocSize < ui32Log2PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested physical addresses from PMR "
+ "for incompatible contiguity %u!",
+ __FUNCTION__,
+ ui32Log2PageSize));
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ uiNumAllocs = psLMAllocArrayData->uiTotalNumPages;
+ if (uiNumAllocs > 1)
+ {
+ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+ uiLog2AllocSize = psLMAllocArrayData->uiLog2AllocSize;
+
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ uiAllocIndex = puiOffset[idx] >> uiLog2AllocSize;
+ uiInAllocOffset = puiOffset[idx] - (uiAllocIndex << uiLog2AllocSize);
+
+ PVR_ASSERT(uiAllocIndex < uiNumAllocs);
+ PVR_ASSERT(uiInAllocOffset < (1ULL << uiLog2AllocSize));
+
+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[uiAllocIndex].uiAddr + uiInAllocOffset;
+ }
+ }
+ }
+ else
+ {
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ if (pbValid[idx])
+ {
+ psDevPAddr[idx].uiAddr = psLMAllocArrayData->pasDevPAddr[0].uiAddr + puiOffset[idx];
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ void *pvKernLinAddr = NULL;
+ IMG_UINT32 ui32PageIndex = 0;
+ size_t uiOffsetMask = uiOffset;
+
+ psLMAllocArrayData = pvPriv;
+
+ /* Check that we can map this in contiguously */
+ if (psLMAllocArrayData->uiTotalNumPages != 1)
+ {
+ size_t uiStart = uiOffset;
+ size_t uiEnd = uiOffset + uiSize - 1;
+ size_t uiPageMask = ~((1 << psLMAllocArrayData->uiLog2AllocSize) - 1);
+
+ /* We can still map if only one page is required */
+ if ((uiStart & uiPageMask) != (uiEnd & uiPageMask))
+ {
+ eError = PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ goto e0;
+ }
+
+ /* Locate the desired physical page to map in */
+ ui32PageIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+ uiOffsetMask = (1U << psLMAllocArrayData->uiLog2AllocSize) - 1;
+ }
+
+ PVR_ASSERT(ui32PageIndex < psLMAllocArrayData->uiTotalNumPages);
+
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[ui32PageIndex],
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ ulFlags,
+ &pvKernLinAddr);
+
+ *ppvKernelAddressOut = ((IMG_CHAR *) pvKernLinAddr) + (uiOffset & uiOffsetMask);
+ *phHandleOut = pvKernLinAddr;
+
+ return eError;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ void *pvKernLinAddr = NULL;
+
+ psLMAllocArrayData = (PMR_LMALLOCARRAY_DATA *) pvPriv;
+ pvKernLinAddr = (void *) hHandle;
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvKernLinAddr);
+}
+
+
+static PVRSRV_ERROR
+CopyBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes,
+ void (*pfnCopyBytes)(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize))
+{
+ PMR_LMALLOCARRAY_DATA *psLMAllocArrayData = NULL;
+ size_t uiBytesCopied;
+ size_t uiBytesToCopy;
+ size_t uiBytesCopyableFromAlloc;
+ void *pvMapping = NULL;
+ IMG_UINT8 *pcKernelPointer = NULL;
+ size_t uiBufferOffset;
+ IMG_UINT64 uiAllocIndex;
+ IMG_DEVMEM_OFFSET_T uiInAllocOffset;
+ PVRSRV_ERROR eError;
+
+ psLMAllocArrayData = pvPriv;
+
+ uiBytesCopied = 0;
+ uiBytesToCopy = uiBufSz;
+ uiBufferOffset = 0;
+
+ if (psLMAllocArrayData->uiTotalNumPages > 1)
+ {
+ while (uiBytesToCopy > 0)
+ {
+ /* we have to map one alloc in at a time */
+ PVR_ASSERT(psLMAllocArrayData->uiLog2AllocSize != 0);
+ uiAllocIndex = uiOffset >> psLMAllocArrayData->uiLog2AllocSize;
+ uiInAllocOffset = uiOffset - (uiAllocIndex << psLMAllocArrayData->uiLog2AllocSize);
+ uiBytesCopyableFromAlloc = uiBytesToCopy;
+ if (uiBytesCopyableFromAlloc + uiInAllocOffset > (1ULL << psLMAllocArrayData->uiLog2AllocSize))
+ {
+ uiBytesCopyableFromAlloc = TRUNCATE_64BITS_TO_SIZE_T((1ULL << psLMAllocArrayData->uiLog2AllocSize)-uiInAllocOffset);
+ }
+
+ PVR_ASSERT(uiBytesCopyableFromAlloc != 0);
+ PVR_ASSERT(uiAllocIndex < psLMAllocArrayData->uiTotalNumPages);
+ PVR_ASSERT(uiInAllocOffset < (1ULL << psLMAllocArrayData->uiLog2AllocSize));
+
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[uiAllocIndex],
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ pcKernelPointer = pvMapping;
+ pfnCopyBytes(&pcBuffer[uiBufferOffset], &pcKernelPointer[uiInAllocOffset], uiBytesCopyableFromAlloc);
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvMapping);
+
+ uiBufferOffset += uiBytesCopyableFromAlloc;
+ uiBytesToCopy -= uiBytesCopyableFromAlloc;
+ uiOffset += uiBytesCopyableFromAlloc;
+ uiBytesCopied += uiBytesCopyableFromAlloc;
+ }
+ }
+ else
+ {
+ PVR_ASSERT((uiOffset + uiBufSz) <= psLMAllocArrayData->uiContigAllocSize);
+ PVR_ASSERT(psLMAllocArrayData->uiContigAllocSize != 0);
+ eError = _MapAlloc(psLMAllocArrayData->psDevNode,
+ &psLMAllocArrayData->pasDevPAddr[0],
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ &pvMapping);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ pcKernelPointer = pvMapping;
+ pfnCopyBytes(pcBuffer, &pcKernelPointer[uiOffset], uiBufSz);
+
+ _UnMapAlloc(psLMAllocArrayData->psDevNode,
+ psLMAllocArrayData->uiContigAllocSize,
+ psLMAllocArrayData->bFwLocalAlloc,
+ 0,
+ pvMapping);
+
+ uiBytesCopied = uiBufSz;
+ }
+ *puiNumBytes = uiBytesCopied;
+ return PVRSRV_OK;
+e0:
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+static void ReadLocalMem(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize)
+{
+ /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+ * we *assume* in the LMA code will be faster, and doesn't need to
+ * worry about ARM64.
+ */
+ OSCachedMemCopy(pcBuffer, pcPMR, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRReadBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ return CopyBytesLocalMem(pvPriv,
+ uiOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes,
+ ReadLocalMem);
+}
+
+static void WriteLocalMem(IMG_UINT8 *pcBuffer,
+ IMG_UINT8 *pcPMR,
+ size_t uiSize)
+{
+ /* NOTE: 'CachedMemCopy' means the operating system default memcpy, which
+ * we *assume* in the LMA code will be faster, and doesn't need to
+ * worry about ARM64.
+ */
+ OSCachedMemCopy(pcPMR, pcBuffer, uiSize);
+}
+
+static PVRSRV_ERROR
+PMRWriteBytesLocalMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ return CopyBytesLocalMem(pvPriv,
+ uiOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes,
+ WriteLocalMem);
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemLocalMem
+@Description This function Changes the sparse mapping by allocating & freeing
+ of pages. It does also change the GPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemLocalMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ IMG_UINT32 ui32AdtnlAllocPages = 0;
+ IMG_UINT32 ui32AdtnlFreePages = 0;
+ IMG_UINT32 ui32CommonRequstCount = 0;
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32Index = 0;
+ IMG_UINT32 uiAllocpgidx;
+ IMG_UINT32 uiFreepgidx;
+
+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+ IMG_DEV_PHYADDR sPhyAddr;
+
+#if defined(DEBUG)
+ IMG_BOOL bPoisonFail = IMG_FALSE;
+ IMG_BOOL bZeroFail = IMG_FALSE;
+#endif
+
+ /* Fetch the Page table array represented by the PMR */
+ IMG_DEV_PHYADDR *psPageArray = psPMRPageArrayData->pasDevPAddr;
+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+
+ /* The incoming request is classified into two operations independent of
+ * each other: alloc & free pages.
+ * These operations can be combined with two mapping operations as well
+ * which are GPU & CPU space mappings.
+ *
+ * From the alloc and free page requests, the net amount of pages to be
+ * allocated or freed is computed. Pages that were requested to be freed
+ * will be reused to fulfil alloc requests.
+ *
+ * The order of operations is:
+ * 1. Allocate new pages from the OS
+ * 2. Move the free pages from free request to alloc positions.
+ * 3. Free the rest of the pages not used for alloc
+ *
+ * Alloc parameters are validated at the time of allocation
+ * and any error will be handled then. */
+
+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+ {
+ ui32CommonRequstCount = (ui32AllocPageCount > ui32FreePageCount) ?
+ ui32FreePageCount : ui32AllocPageCount;
+
+ PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+ }
+
+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+ {
+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequstCount;
+ }
+ else
+ {
+ ui32AllocPageCount = 0;
+ }
+
+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+ {
+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequstCount;
+ }
+ else
+ {
+ ui32FreePageCount = 0;
+ }
+
+ if (0 == (ui32CommonRequstCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+
+ {
+ /* Validate the free page indices */
+ if (ui32FreePageCount)
+ {
+ if (NULL != pai32FreeIndices)
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+ if (uiFreepgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (INVALID_PAGE_ADDR == psPageArray[uiFreepgidx].uiAddr)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }else{
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ return eError;
+ }
+ }
+
+ /*The following block of code verifies any issues with common alloc page indices */
+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Loop];
+ if (uiAllocpgidx > psPMRPageArrayData->uiTotalNumPages)
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ if ((INVALID_PAGE_ADDR != psPageArray[uiAllocpgidx].uiAddr) ||
+ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ else
+ {
+ if ((INVALID_PAGE_ADDR == psPageArray[uiAllocpgidx].uiAddr) ||
+ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ }
+ }
+
+
+ ui32Loop = 0;
+
+ /* Allocate new pages */
+ if (0 != ui32AdtnlAllocPages)
+ {
+ /* Say how many pages to allocate */
+ psPMRPageArrayData->uiPagesToAlloc = ui32AdtnlAllocPages;
+
+ eError = _AllocLMPages(psPMRPageArrayData, pai32AllocIndices);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: New Addtl Allocation of pages failed",
+ __FUNCTION__));
+ goto e0;
+ }
+
+ /* Mark the corresponding pages of translation table as valid */
+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+ {
+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+ }
+
+ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+ }
+
+ ui32Index = ui32Loop;
+
+ /* Move the corresponding free pages to alloc request */
+ for (ui32Loop = 0; ui32Loop < ui32CommonRequstCount; ui32Loop++, ui32Index++)
+ {
+
+ uiAllocpgidx = pai32AllocIndices[ui32Index];
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+ sPhyAddr = psPageArray[uiAllocpgidx];
+ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+ /* Is remap mem used in real world scenario? Should it be turned to a
+ * debug feature? The condition check needs to be out of loop, will be
+ * done at later point though after some analysis */
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ psPageArray[uiFreepgidx].uiAddr = INVALID_PAGE_ADDR;
+ }
+ else
+ {
+ psPageArray[uiFreepgidx] = sPhyAddr;
+ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ }
+
+ /* Be sure to honour the attributes associated with the allocation
+ * such as zeroing, poisoning etc. */
+ if (psPMRPageArrayData->bPoisonOnAlloc)
+ {
+ eError = _PoisonAlloc(psPMRPageArrayData->psDevNode,
+ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+ psPMRPageArrayData->bFwLocalAlloc,
+ psPMRPageArrayData->uiContigAllocSize,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+
+ /* Consider this as a soft failure and go ahead but log error to kernel log */
+ if (eError != PVRSRV_OK)
+ {
+#if defined(DEBUG)
+ bPoisonFail = IMG_TRUE;
+#endif
+ }
+ }
+ else
+ {
+ if (psPMRPageArrayData->bZeroOnAlloc)
+ {
+ eError = _ZeroAlloc(psPMRPageArrayData->psDevNode,
+ &psPMRPageArrayData->pasDevPAddr[uiAllocpgidx],
+ psPMRPageArrayData->bFwLocalAlloc,
+ psPMRPageArrayData->uiContigAllocSize);
+ /* Consider this as a soft failure and go ahead but log error to kernel log */
+ if (eError != PVRSRV_OK)
+ {
+#if defined(DEBUG)
+ /*Don't think we need to zero any pages further*/
+ bZeroFail = IMG_TRUE;
+#endif
+ }
+ }
+ }
+ }
+
+ /*Free the additional free pages */
+ if (0 != ui32AdtnlFreePages)
+ {
+ ui32Index = ui32Loop;
+ _FreeLMPages(psPMRPageArrayData, &pai32FreeIndices[ui32Loop], ui32AdtnlFreePages);
+ ui32Loop = 0;
+
+ while(ui32Loop++ < ui32AdtnlFreePages)
+ {
+ /*Set the corresponding mapping table entry to invalid address */
+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Index++]] = TRANSLATION_INVALID;
+ }
+
+ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+ }
+
+ }
+
+#if defined(DEBUG)
+ if(IMG_TRUE == bPoisonFail)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in poisoning the page", __FUNCTION__));
+ }
+
+ if(IMG_TRUE == bZeroFail)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error in zeroing the page", __FUNCTION__));
+ }
+#endif
+
+ /* Update the PMR memory holding information */
+ eError = PVRSRV_OK;
+
+e0:
+ return eError;
+
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemCPUMapLocalMem
+@Description This function Changes CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapLocalMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ IMG_DEV_PHYADDR *psPageArray;
+ PMR_LMALLOCARRAY_DATA *psPMRPageArrayData = (PMR_LMALLOCARRAY_DATA *)pPriv;
+ uintptr_t sCpuVABase = sCpuVAddrBase;
+ IMG_CPU_PHYADDR sCpuAddrPtr;
+ IMG_BOOL bValid;
+
+ /*Get the base address of the heap */
+ PMR_CpuPhysAddr(psPMR,
+ psPMRPageArrayData->uiLog2AllocSize,
+ 1,
+ 0, /* offset zero here mean first page in the PMR */
+ &sCpuAddrPtr,
+ &bValid);
+
+ /* Phys address of heap is computed here by subtracting the offset of this page
+ * basically phys address of any page = Base address of heap + offset of the page */
+ sCpuAddrPtr.uiAddr -= psPMRPageArrayData->pasDevPAddr[0].uiAddr;
+ psPageArray = psPMRPageArrayData->pasDevPAddr;
+
+ return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+ sCpuVABase,
+ sCpuAddrPtr,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ IMG_TRUE);
+}
+
+
+static PMR_IMPL_FUNCTAB _sPMRLMAFuncTab = {
+ /* pfnLockPhysAddresses */
+ &PMRLockSysPhysAddressesLocalMem,
+ /* pfnUnlockPhysAddresses */
+ &PMRUnlockSysPhysAddressesLocalMem,
+ /* pfnDevPhysAddr */
+ &PMRSysPhysAddrLocalMem,
+ /* pfnAcquireKernelMappingData */
+ &PMRAcquireKernelMappingDataLocalMem,
+ /* pfnReleaseKernelMappingData */
+ &PMRReleaseKernelMappingDataLocalMem,
+#if defined(INTEGRITY_OS)
+ /* pfnMapMemoryObject */
+ NULL,
+ /* pfnUnmapMemoryObject */
+ NULL,
+#endif
+ /* pfnReadBytes */
+ &PMRReadBytesLocalMem,
+ /* pfnWriteBytes */
+ &PMRWriteBytesLocalMem,
+ /* .pfnUnpinMem */
+ NULL,
+ /* .pfnPinMem */
+ NULL,
+ /* pfnChangeSparseMem*/
+ &PMRChangeSparseMemLocalMem,
+ /* pfnChangeSparseMemCPUMap */
+ &PMRChangeSparseMemCPUMapLocalMem,
+ /* pfnMMap */
+ NULL,
+ /* pfnFinalize */
+ &PMRFinalizeLocalMem
+};
+
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2AllocPageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ PMR *psPMR = NULL;
+ PMR_LMALLOCARRAY_DATA *psPrivData = NULL;
+ PMR_FLAGS_T uiPMRFlags;
+ PHYS_HEAP *psPhysHeap;
+ IMG_BOOL bZero;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bContig;
+ IMG_BOOL bFwLocalAlloc;
+ IMG_BOOL bFwConfigAlloc;
+ IMG_BOOL bCpuLocalAlloc;
+ IMG_BOOL bFwGuestAlloc;
+
+ /* For sparse requests we have to do the allocation
+ * in chunks rather than requesting one contiguous block */
+ if (ui32NumPhysChunks != ui32NumVirtChunks || ui32NumVirtChunks > 1)
+ {
+ if (PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: LMA kernel mapping functions currently "
+ "don't work with discontiguous memory.",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errorOnParam;
+ }
+ bContig = IMG_FALSE;
+ }
+ else
+ {
+ bContig = IMG_TRUE;
+ }
+
+ bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwLocalAlloc = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwConfigAlloc = PVRSRV_CHECK_FW_CONFIG(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bCpuLocalAlloc = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwGuestAlloc = PVRSRV_CHECK_FW_GUEST(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+ if (bFwLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else if (bCpuLocalAlloc)
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ /* Create Array structure that holds the physical pages */
+ eError = _AllocLMPageArray(psDevNode,
+ uiChunkSize * ui32NumVirtChunks,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2AllocPageSize,
+ bZero,
+ bPoisonOnAlloc,
+ bPoisonOnFree,
+ bContig,
+ bOnDemand,
+ bFwLocalAlloc,
+ bFwConfigAlloc,
+ bFwGuestAlloc,
+ psPhysHeap,
+ uiFlags,
+ uiPid,
+ &psPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPageArray;
+ }
+
+ if (!bOnDemand)
+ {
+ /* Allocate the physical pages */
+ eError = _AllocLMPages(psPrivData,pui32MappingTable);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPages;
+ }
+ }
+
+ /* In this instance, we simply pass flags straight through.
+
+ Generically, uiFlags can include things that control the PMR
+ factory, but we don't need any such thing (at the time of
+ writing!), and our caller specifies all PMR flags so we don't
+ need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+ /* check no significant bits were lost in cast due to different
+ bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ if (bOnDemand)
+ {
+ PDUMPCOMMENT("Deferred Allocation PMR (LMA)");
+ }
+
+
+ eError = PMRCreatePMR(psDevNode,
+ psPhysHeap,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2AllocPageSize,
+ uiPMRFlags,
+ pszAnnotation,
+ &_sPMRLMAFuncTab,
+ psPrivData,
+ PMR_TYPE_LMA,
+ &psPMR,
+ PDUMP_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PhysmemNewLocalRamBackedPMR: Unable to create PMR (status=%d)",
+ eError));
+ goto errorOnCreate;
+ }
+
+ *ppsPMRPtr = psPMR;
+ return PVRSRV_OK;
+
+errorOnCreate:
+ if(!bOnDemand && psPrivData->iNumPagesAllocated)
+ {
+ eError2 = _FreeLMPages(psPrivData, NULL,0);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+errorOnAllocPages:
+ eError2 = _FreeLMPageArray(psPrivData);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+
+struct PidOSidCouplingList
+{
+ IMG_PID pId;
+ IMG_UINT32 ui32OSid;
+ IMG_UINT32 ui32OSidReg;
+ IMG_BOOL bOSidAxiProt;
+
+ struct PidOSidCouplingList *psNext;
+};
+typedef struct PidOSidCouplingList PidOSidCouplingList;
+
+static PidOSidCouplingList *psPidOSidHead;
+static PidOSidCouplingList *psPidOSidTail;
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt)
+{
+ PidOSidCouplingList *psTmp;
+
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Inserting (PID/ OSid/ OSidReg/ IsSecure) (%d/ %d/ %d/ %s) into list",
+ pId,ui32OSid, ui32OSidReg, (bOSidAxiProt)?"Yes":"No"));
+
+ psTmp=OSAllocMem(sizeof(PidOSidCouplingList));
+
+ if (psTmp==NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"(GPU Virtualization Validation): Memory allocation failed. No list insertion => program will execute normally.\n"));
+ return ;
+ }
+
+ psTmp->pId=pId;
+ psTmp->ui32OSid=ui32OSid;
+ psTmp->ui32OSidReg=ui32OSidReg;
+ psTmp->bOSidAxiProt = bOSidAxiProt;
+
+ psTmp->psNext=NULL;
+ if (psPidOSidHead==NULL)
+ {
+ psPidOSidHead=psTmp;
+ psPidOSidTail=psTmp;
+ }
+ else
+ {
+ psPidOSidTail->psNext=psTmp;
+ psPidOSidTail=psTmp;
+ }
+
+ return ;
+}
+
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt)
+{
+ PidOSidCouplingList *psTmp;
+
+ for (psTmp=psPidOSidHead;psTmp!=NULL;psTmp=psTmp->psNext)
+ {
+ if (psTmp->pId==pId)
+ {
+ (*pui32OSid) = psTmp->ui32OSid;
+ (*pui32OSidReg) = psTmp->ui32OSidReg;
+ (*pbOSidAxiProt) = psTmp->bOSidAxiProt;
+
+ return ;
+ }
+ }
+
+ (*pui32OSid)=0;
+ (*pui32OSidReg)=0;
+ (*pbOSidAxiProt) = IMG_FALSE;
+
+ return ;
+}
+
+void RemovePidOSidCoupling(IMG_PID pId)
+{
+ PidOSidCouplingList *psTmp, *psPrev=NULL;
+
+ for (psTmp=psPidOSidHead; psTmp!=NULL; psTmp=psTmp->psNext)
+ {
+ if (psTmp->pId==pId) break;
+ psPrev=psTmp;
+ }
+
+ if (psTmp==NULL)
+ {
+ return ;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): Deleting Pairing %d / (%d - %d) from list",psTmp->pId, psTmp->ui32OSid, psTmp->ui32OSidReg));
+
+ if (psTmp==psPidOSidHead)
+ {
+ if (psPidOSidHead->psNext==NULL)
+ {
+ psPidOSidHead=NULL;
+ psPidOSidTail=NULL;
+ OSFreeMem(psTmp);
+
+ return ;
+ }
+
+ psPidOSidHead=psPidOSidHead->psNext;
+ OSFreeMem(psTmp);
+ return ;
+ }
+
+ if (psPrev==NULL) return ;
+
+ psPrev->psNext=psTmp->psNext;
+ if (psTmp==psPidOSidTail)
+ {
+ psPidOSidTail=psPrev;
+ }
+
+ OSFreeMem(psTmp);
+
+ return ;
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_lma.h b/drivers/gpu/drm/img-rogue/1.10/physmem_lma.h
new file mode 100644
index 00000000000000..4b1ff733d1e304
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_lma.h
@@ -0,0 +1,86 @@
+/**************************************************************************/ /*!
+@File
+@Title Header for local card memory allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for local card memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PHYSMEM_LMA_H_
+#define _SRVSRV_PHYSMEM_LMA_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*
+ * PhysmemNewLocalRamBackedPMR
+ *
+ * This function will create a PMR using the local card memory and is OS
+ * agnostic.
+ */
+PVRSRV_ERROR
+PhysmemNewLocalRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Define some helper list functions for the virtualization validation code
+ */
+
+void InsertPidOSidsCoupling(IMG_PID pId, IMG_UINT32 ui32OSid, IMG_UINT32 ui32OSidReg, IMG_BOOL bOSidAxiProt);
+void RetrieveOSidsfromPidList(IMG_PID pId, IMG_UINT32 *pui32OSid, IMG_UINT32 *pui32OSidReg, IMG_BOOL *pbOSidAxiProt);
+void RemovePidOSidCoupling(IMG_PID pId);
+#endif
+
+#endif /* #ifndef _SRVSRV_PHYSMEM_LMA_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_osmem.h b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem.h
new file mode 100644
index 00000000000000..87c9b550ad9bdc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File
+@Title PhysmemNewOSRamBackedPMR function declaration header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of Services memory management. This file defines the
+ OS memory PMR factory API that must be defined so that the
+ common & device layer code in the Services Server can allocate
+ new PMRs back with pages from the OS page allocator. Applicable
+ for UMA based platforms, such platforms must implement this API
+ in the OS Porting layer, in the "env" directory for that
+ system.
+
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PHYSMEM_OSMEM_H_
+#define _PHYSMEM_OSMEM_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+
+/* services/server/include/ */
+#include "pmr.h"
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function PhysmemNewOSRamBackedPMR
+@Description Rogue Services will call this function to allocate GPU device
+ memory from the PMR factory supported by the OS DDK port. This
+ factory typically obtains physical memory from the kernel/OS
+ API that allocates memory from the default heap of shared system
+ memory available on the platform. The allocated memory must be
+ page-aligned and be a whole number of pages.
+ After allocating the required memory, the implementation must
+ then call PMRCreatePMR() to obtain the PMR structure that
+ describes this allocation to the upper layers of the Services.
+ memory management sub-system.
+ NB. Implementation of this function is mandatory. If shared
+ system memory is not to be used in the OS port then the
+ implementation must return PVRSRV_ERROR_NOT_SUPPORTED.
+
+@Input psDevNode the device node
+@Input uiSize the size of the allocation
+ (must be a multiple of page size)
+@Input uiChunkSize when sparse allocations are requested,
+ this is the allocated chunk size.
+ For regular allocations, this will be
+ the same as uiSize.
+ (must be a multiple of page size)
+@Input ui32NumPhysChunks when sparse allocations are requested,
+ this is the number of physical chunks
+ to be allocated.
+ For regular allocations, this will be 1.
+@Input ui32NumVirtChunks when sparse allocations are requested,
+ this is the number of virtual chunks
+ covering the sparse allocation.
+ For regular allocations, this will be 1.
+@Input pui32MappingTable when sparse allocations are requested,
+ this is the list of the indices of
+ each physically-backed virtual chunk
+ For regular allocations, this will
+ be NULL.
+@Input uiLog2PageSize the physical pagesize in log2(bytes).
+@Input uiFlags the allocation flags.
+@Input pszAnnotation string describing the PMR (for debug).
+ This should be passed into the function
+ PMRCreatePMR().
+@Input uiPid The process ID that this allocation should
+ be associated with.
+@Output ppsPMROut pointer to the PMR created for the
+ new allocation
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+extern PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_UINT32 uiLog2PageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMROut);
+
+#endif /* #ifndef _PHYSMEM_OSMEM_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.c b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.c
new file mode 100644
index 00000000000000..25f9a7550f70e7
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.c
@@ -0,0 +1,3837 @@
+/*************************************************************************/ /*!
+@File
+@Title Implementation of PMR functions for OS managed memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for physical memory borrowed
+ from that normally managed by the operating system.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <linux/version.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/vmalloc.h>
+#include <linux/gfp.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <asm/io.h>
+#include <asm/dma-mapping.h>
+#if defined(CONFIG_X86)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
+#include <asm/set_memory.h>
+#else
+#include <asm/cacheflush.h>
+#endif
+#endif
+
+/* include/ */
+#include "rgx_heaps.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "rgx_pdump_panics.h"
+/* services/server/include/ */
+#include "allocmem.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pmr.h"
+#include "pmr_impl.h"
+#include "cache_km.h"
+#include "devicemem_server_utils.h"
+
+/* ourselves */
+#include "physmem_osmem.h"
+#include "physmem_osmem_linux.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+static IMG_UINT32 g_uiMaxOrder = PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM;
+#else
+/* split_page not available on older kernels */
+#undef PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM
+#define PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM 0
+static IMG_UINT32 g_uiMaxOrder;
+#endif
+
+/*
+ These corresponds to the MMU min/max page sizes and associated PTE
+ alignment that can be used on the device for an allocation. It is
+ 4KB (min) and 2MB (max) respectively.
+*/
+#define PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_4KB_PAGE_SHIFT
+#define PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ RGX_HEAP_2MB_PAGE_SHIFT
+
+/* Defines how many pages should be mapped at once to the kernel */
+#define PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES 1024 /* 4 MB */
+
+/*
+ These are used to get/set/mask lower-order bits in a dma_addr_t
+ to provide side-band information associated with that address.
+ These includes whether the address was obtained via alloc_page
+ or dma_alloc and if address came allocated pre-aligned or an
+ adjustment was made manually to aligned it.
+*/
+#define DMA_SET_ADJUSTED_ADDR(x) ((x) | ((dma_addr_t)0x02))
+#define DMA_IS_ADDR_ADJUSTED(x) ((x) & ((dma_addr_t)0x02))
+#define DMA_SET_ALLOCPG_ADDR(x) ((x) | ((dma_addr_t)0x01))
+#define DMA_IS_ALLOCPG_ADDR(x) ((x) & ((dma_addr_t)0x01))
+#define DMA_GET_ALIGN_ADJUSTMENT(x) ((x>>2) & ((dma_addr_t)0x3ff))
+#define DMA_SET_ALIGN_ADJUSTMENT(x,y) ((x) | (((dma_addr_t)y)<<0x02))
+#define DMA_GET_ADDR(x) (((dma_addr_t)x) & ((dma_addr_t)~0xfff))
+#define DMA_VADDR_NOT_IN_USE 0xCAFEF00DDEADBEEFULL
+
+typedef struct _PMR_OSPAGEARRAY_DATA_ {
+ /* Device for which this allocation has been made */
+ PVRSRV_DEVICE_NODE *psDevNode;
+ /* The pid that made this allocation */
+ IMG_PID uiPid;
+
+ /*
+ * iNumOSPagesAllocated:
+ * Number of pages allocated in this PMR so far.
+ * This allows for up to (2^31 - 1) pages. With 4KB pages, that's 8TB of memory for each PMR.
+ */
+ IMG_INT32 iNumOSPagesAllocated;
+
+ /*
+ * uiTotalNumOSPages:
+ * Total number of pages supported by this PMR. (Fixed as of now due the fixed Page table array size)
+ * number of "pages" (a.k.a. macro pages, compound pages, higher order pages, etc...)
+ */
+ IMG_UINT32 uiTotalNumOSPages;
+
+ /*
+ uiLog2AllocPageSize;
+
+ size of each "page" -- this would normally be the same as
+ PAGE_SHIFT, but we support the idea that we may allocate pages
+ in larger chunks for better contiguity, using order>0 in the
+ call to alloc_pages()
+ */
+ IMG_UINT32 uiLog2AllocPageSize;
+
+ /*
+ ui64DmaMask;
+ */
+ IMG_UINT64 ui64DmaMask;
+
+ /*
+ For non DMA/CMA allocation, pagearray references the pages
+ thus allocated; one entry per compound page when compound
+ pages are used. In addition, for DMA/CMA allocations, we
+ track the returned cpu virtual and device bus address.
+ */
+ struct page **pagearray;
+ dma_addr_t *dmaphysarray;
+ void **dmavirtarray;
+
+ /*
+ Record at alloc time whether poisoning will be required when the
+ PMR is freed.
+ */
+ IMG_BOOL bZero;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bUnpinned; /* Should be protected by page pool lock */
+ IMG_BOOL bIsCMA; /* Is CMA memory allocated via DMA framework */
+
+ /*
+ The cache mode of the PMR. Additionally carrying the CPU-Cache-Clean
+ flag, advising us to do cache maintenance on behalf of the caller.
+ Boolean used to track if we need to revert the cache attributes
+ of the pages used in this allocation. Depends on OS/architecture.
+ */
+ IMG_UINT32 ui32CPUCacheFlags;
+ IMG_BOOL bUnsetMemoryType;
+} PMR_OSPAGEARRAY_DATA;
+
+/***********************************
+ * Page pooling for uncached pages *
+ ***********************************/
+
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+ size_t alloc_size,
+ IMG_UINT32 uiOrder,
+ void *virt_addr,
+ dma_addr_t dev_addr,
+ struct page *psPage);
+
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+ IMG_BOOL bUnsetMemoryType,
+ struct page *psPage);
+
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount);
+
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+ IMG_UINT32 *puiPagesFreed);
+
+/* A struct for our page pool holding an array of zeroed (!) pages.
+ * We always put units of page arrays to the pool but are
+ * able to take individual pages */
+typedef struct
+{
+ /* Linkage for page pool LRU list */
+ struct list_head sPagePoolItem;
+
+ /* How many items are still in the page array */
+ IMG_UINT32 uiItemsRemaining;
+ /* Array of the actual pages */
+ struct page **ppsPageArray;
+
+} LinuxPagePoolEntry;
+
+/* CleanupThread structure to put allocation in page pool */
+typedef struct
+{
+ PVRSRV_CLEANUP_THREAD_WORK sCleanupWork;
+ IMG_UINT32 ui32CPUCacheMode;
+ LinuxPagePoolEntry *psPoolEntry;
+} LinuxCleanupData;
+
+/* A struct for the unpinned items */
+typedef struct
+{
+ struct list_head sUnpinPoolItem;
+ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr;
+} LinuxUnpinEntry;
+
+
+/* Caches to hold page pool and page array structures */
+static struct kmem_cache *g_psLinuxPagePoolCache;
+static struct kmem_cache *g_psLinuxPageArray;
+
+/* Track what is live, all protected by pool lock.
+ * x86 needs two page pools because we have to change the memory attributes
+ * of the pages which is expensive due to an implicit flush.
+ * See set_pages_array_uc/wc/wb. */
+static IMG_UINT32 g_ui32UnpinPageCount;
+static IMG_UINT32 g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+static IMG_UINT32 g_ui32PagePoolWCCount;
+#endif
+/* Tracks asynchronous tasks currently accessing the page pool.
+ * It is incremented if a defer free task
+ * is created. Both will decrement the value when they finished the work.
+ * The atomic prevents piling up of deferred work in case the deferred thread
+ * cannot keep up with the application.*/
+static ATOMIC_T g_iPoolCleanTasks;
+/* We don't want too many asynchronous threads trying to access the page pool
+ * at the same time */
+#define PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS 128
+
+/* Defines how many pages the page cache should hold. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxEntries = PVR_LINUX_PHYSMEM_MAX_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxEntries;
+#endif
+
+/* We double check if we would exceed this limit if we are below MAX_POOL_PAGES
+ and want to add an allocation to the pool.
+ This prevents big allocations being given back to the OS just because they
+ exceed the MAX_POOL_PAGES limit even though the pool is currently empty. */
+#if defined(PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES)
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries = PVR_LINUX_PHYSMEM_MAX_EXCESS_POOL_PAGES;
+#else
+static const IMG_UINT32 g_ui32PagePoolMaxExcessEntries;
+#endif
+
+#if defined(CONFIG_X86)
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 2
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED,
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+};
+#else
+#define PHYSMEM_OSMEM_NUM_OF_POOLS 1
+static const IMG_UINT32 g_aui32CPUCacheFlags[PHYSMEM_OSMEM_NUM_OF_POOLS] = {
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+};
+#endif
+
+/* Global structures we use to manage the page pool */
+static DEFINE_MUTEX(g_sPagePoolMutex);
+
+/* List holding the page array pointers: */
+static LIST_HEAD(g_sPagePoolList_WC);
+static LIST_HEAD(g_sPagePoolList_UC);
+static LIST_HEAD(g_sUnpinList);
+
+static inline IMG_UINT32
+_PagesInPoolUnlocked(void)
+{
+ IMG_UINT32 uiCnt = g_ui32PagePoolUCCount;
+#if defined(CONFIG_X86)
+ uiCnt += g_ui32PagePoolWCCount;
+#endif
+ return uiCnt;
+}
+
+static inline void
+_PagePoolLock(void)
+{
+ mutex_lock(&g_sPagePoolMutex);
+}
+
+static inline int
+_PagePoolTrylock(void)
+{
+ return mutex_trylock(&g_sPagePoolMutex);
+}
+
+static inline void
+_PagePoolUnlock(void)
+{
+ mutex_unlock(&g_sPagePoolMutex);
+}
+
+static PVRSRV_ERROR
+_AddUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+ LinuxUnpinEntry *psUnpinEntry;
+
+ psUnpinEntry = OSAllocMem(sizeof(*psUnpinEntry));
+ if (!psUnpinEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSAllocMem failed. Cannot add entry to unpin list.",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psUnpinEntry->psPageArrayDataPtr = psOSPageArrayData;
+
+ /* Add into pool that the shrinker can access easily*/
+ list_add_tail(&psUnpinEntry->sUnpinPoolItem, &g_sUnpinList);
+
+ g_ui32UnpinPageCount += psOSPageArrayData->iNumOSPagesAllocated;
+
+ return PVRSRV_OK;
+}
+
+static void
+_RemoveUnpinListEntryUnlocked(PMR_OSPAGEARRAY_DATA *psOSPageArrayData)
+{
+ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+
+ /* Remove from pool */
+ list_for_each_entry_safe(psUnpinEntry,
+ psTempUnpinEntry,
+ &g_sUnpinList,
+ sUnpinPoolItem)
+ {
+ if (psUnpinEntry->psPageArrayDataPtr == psOSPageArrayData)
+ {
+ list_del(&psUnpinEntry->sUnpinPoolItem);
+ break;
+ }
+ }
+
+ OSFreeMem(psUnpinEntry);
+
+ g_ui32UnpinPageCount -= psOSPageArrayData->iNumOSPagesAllocated;
+}
+
+static inline IMG_BOOL
+_GetPoolListHead(IMG_UINT32 ui32CPUCacheFlags,
+ struct list_head **ppsPoolHead,
+ IMG_UINT32 **ppuiCounter)
+{
+ switch(PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+#if defined(CONFIG_X86)
+ /*
+ For x86 we need to keep different lists for uncached
+ and write-combined as we must always honour the PAT
+ setting which cares about this difference.
+ */
+
+ *ppsPoolHead = &g_sPagePoolList_WC;
+ *ppuiCounter = &g_ui32PagePoolWCCount;
+ break;
+#endif
+
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ *ppsPoolHead = &g_sPagePoolList_UC;
+ *ppuiCounter = &g_ui32PagePoolUCCount;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unknown CPU caching mode. "
+ "Using default UC pool.",
+ __func__));
+ *ppsPoolHead = &g_sPagePoolList_UC;
+ *ppuiCounter = &g_ui32PagePoolUCCount;
+ PVR_ASSERT(0);
+ return IMG_FALSE;
+ }
+ return IMG_TRUE;
+}
+
+static struct shrinker g_sShrinker;
+
+/* Returning the number of pages that still reside in the page pool. */
+static unsigned long
+_GetNumberOfPagesInPoolUnlocked(void)
+{
+ return _PagesInPoolUnlocked() + g_ui32UnpinPageCount;
+}
+
+/* Linux shrinker function that informs the OS about how many pages we are caching and
+ * it is able to reclaim. */
+static unsigned long
+_CountObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ int remain;
+
+ PVR_ASSERT(psShrinker == &g_sShrinker);
+ (void)psShrinker;
+ (void)psShrinkControl;
+
+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+ if (_PagePoolTrylock() == 0)
+ return 0;
+ remain = _GetNumberOfPagesInPoolUnlocked();
+ _PagePoolUnlock();
+
+ return remain;
+}
+
+/* Linux shrinker function to reclaim the pages from our page pool */
+static unsigned long
+_ScanObjectsInPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ unsigned long uNumToScan = psShrinkControl->nr_to_scan;
+ unsigned long uSurplus = 0;
+ LinuxUnpinEntry *psUnpinEntry, *psTempUnpinEntry;
+ IMG_UINT32 uiPagesFreed;
+
+ PVR_ASSERT(psShrinker == &g_sShrinker);
+ (void)psShrinker;
+
+ /* In order to avoid possible deadlock use mutex_trylock in place of mutex_lock */
+ if (_PagePoolTrylock() == 0)
+ return SHRINK_STOP;
+
+ _FreePagesFromPoolUnlocked(uNumToScan,
+ &uiPagesFreed);
+ uNumToScan -= uiPagesFreed;
+
+ if (uNumToScan == 0)
+ {
+ goto e_exit;
+ }
+
+ /* Free unpinned memory, starting with LRU entries */
+ list_for_each_entry_safe(psUnpinEntry,
+ psTempUnpinEntry,
+ &g_sUnpinList,
+ sUnpinPoolItem)
+ {
+ PMR_OSPAGEARRAY_DATA *psPageArrayDataPtr = psUnpinEntry->psPageArrayDataPtr;
+ IMG_UINT32 uiNumPages = (psPageArrayDataPtr->uiTotalNumOSPages > psPageArrayDataPtr->iNumOSPagesAllocated)?
+ psPageArrayDataPtr->iNumOSPagesAllocated:psPageArrayDataPtr->uiTotalNumOSPages;
+ PVRSRV_ERROR eError;
+
+ /* Free associated pages */
+ eError = _FreeOSPages(psPageArrayDataPtr,
+ NULL,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Shrinker is unable to free unpinned pages. Error: %s (%d)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ eError));
+ goto e_exit;
+ }
+
+ /* Remove item from pool */
+ list_del(&psUnpinEntry->sUnpinPoolItem);
+
+ g_ui32UnpinPageCount -= uiNumPages;
+
+ /* Check if there is more to free or if we already surpassed the limit */
+ if (uiNumPages < uNumToScan)
+ {
+ uNumToScan -= uiNumPages;
+
+ }
+ else if (uiNumPages > uNumToScan)
+ {
+ uSurplus += uiNumPages - uNumToScan;
+ uNumToScan = 0;
+ goto e_exit;
+ }
+ else
+ {
+ uNumToScan -= uiNumPages;
+ goto e_exit;
+ }
+ }
+
+e_exit:
+ if (list_empty(&g_sUnpinList))
+ {
+ PVR_ASSERT(g_ui32UnpinPageCount == 0);
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+ {
+ int remain;
+ remain = _GetNumberOfPagesInPoolUnlocked();
+ _PagePoolUnlock();
+ return remain;
+ }
+#else
+ /* Returning the number of pages freed during the scan */
+ _PagePoolUnlock();
+ return psShrinkControl->nr_to_scan - uNumToScan + uSurplus;
+#endif
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0))
+static int
+_ShrinkPagePool(struct shrinker *psShrinker, struct shrink_control *psShrinkControl)
+{
+ if (psShrinkControl->nr_to_scan != 0)
+ {
+ return _ScanObjectsInPagePool(psShrinker, psShrinkControl);
+ }
+ else
+ {
+ /* No pages are being reclaimed so just return the page count */
+ return _CountObjectsInPagePool(psShrinker, psShrinkControl);
+ }
+}
+
+static struct shrinker g_sShrinker =
+{
+ .shrink = _ShrinkPagePool,
+ .seeks = DEFAULT_SEEKS
+};
+#else
+static struct shrinker g_sShrinker =
+{
+ .count_objects = _CountObjectsInPagePool,
+ .scan_objects = _ScanObjectsInPagePool,
+ .seeks = DEFAULT_SEEKS
+};
+#endif
+
+/* Register the shrinker so Linux can reclaim cached pages */
+void LinuxInitPhysmem(void)
+{
+ g_psLinuxPageArray = kmem_cache_create("pvr-pa", sizeof(PMR_OSPAGEARRAY_DATA), 0, 0, NULL);
+
+ _PagePoolLock();
+ g_psLinuxPagePoolCache = kmem_cache_create("pvr-pp", sizeof(LinuxPagePoolEntry), 0, 0, NULL);
+ if (g_psLinuxPagePoolCache)
+ {
+ /* Only create the shrinker if we created the cache OK */
+ register_shrinker(&g_sShrinker);
+ }
+ _PagePoolUnlock();
+
+ OSAtomicWrite(&g_iPoolCleanTasks, 0);
+}
+
+/* Unregister the shrinker and remove all pages from the pool that are still left */
+void LinuxDeinitPhysmem(void)
+{
+ IMG_UINT32 uiPagesFreed;
+
+ if (OSAtomicRead(&g_iPoolCleanTasks) > 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Still deferred cleanup tasks running "
+ "while deinitialising memory subsystem."));
+ }
+
+ _PagePoolLock();
+ if (_FreePagesFromPoolUnlocked(IMG_UINT32_MAX, &uiPagesFreed) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Unable to free all pages from page pool when "
+ "deinitialising memory subsystem."));
+ PVR_ASSERT(0);
+ }
+
+ PVR_ASSERT(_PagesInPoolUnlocked() == 0);
+
+ /* Free the page cache */
+ kmem_cache_destroy(g_psLinuxPagePoolCache);
+
+ unregister_shrinker(&g_sShrinker);
+ _PagePoolUnlock();
+
+ kmem_cache_destroy(g_psLinuxPageArray);
+}
+
+static void EnableOOMKiller(void)
+{
+ current->flags &= ~PF_DUMPCORE;
+}
+
+static void DisableOOMKiller(void)
+{
+ /* PF_DUMPCORE is treated by the VM as if the OOM killer was disabled.
+ *
+ * As oom_killer_disable() is an inline, non-exported function, we
+ * can't use it from a modular driver. Furthermore, the OOM killer
+ * API doesn't look thread safe, which `current' is.
+ */
+ WARN_ON(current->flags & PF_DUMPCORE);
+ current->flags |= PF_DUMPCORE;
+}
+
+/* Prints out the addresses in a page array for debugging purposes
+ * Define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY locally to activate: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY 1 */
+static inline void
+_DumpPageArray(struct page **pagearray, IMG_UINT32 uiPagesToPrint)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_ARRAY)
+ IMG_UINT32 i;
+ if (pagearray)
+ {
+ printk("Array %p:\n", pagearray);
+ for (i = 0; i < uiPagesToPrint; i++)
+ {
+ printk("%p | ", (pagearray)[i]);
+ }
+ printk("\n");
+ }
+ else
+ {
+ printk("Array is NULL:\n");
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pagearray);
+ PVR_UNREFERENCED_PARAMETER(uiPagesToPrint);
+#endif
+}
+
+/* Debugging function that dumps out the number of pages for every
+ * page array that is currently in the page pool.
+ * Not defined by default. Define locally to activate feature: */
+/* #define PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL 1 */
+static void
+_DumpPoolStructure(void)
+{
+#if defined(PHYSMEM_OSMEM_DEBUG_DUMP_PAGE_POOL)
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 j;
+ IMG_UINT32 *puiCounter;
+
+ printk("\n");
+ /* Empty all pools */
+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+ {
+
+ printk("pool = %u \n", j);
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+ {
+ break;
+ }
+
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ printk("%u | ", psPagePoolEntry->uiItemsRemaining);
+ }
+ printk("\n");
+ }
+#endif
+}
+
+/* Free a certain number of pages from the page pool.
+ * Mainly used in error paths or at deinitialisation to
+ * empty the whole pool. */
+static PVRSRV_ERROR
+_FreePagesFromPoolUnlocked(IMG_UINT32 uiMaxPagesToFree,
+ IMG_UINT32 *puiPagesFreed)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 i, j;
+ IMG_UINT32 *puiCounter;
+
+ *puiPagesFreed = uiMaxPagesToFree;
+
+ /* Empty all pools */
+ for (j = 0; j < PHYSMEM_OSMEM_NUM_OF_POOLS; j++)
+ {
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(g_aui32CPUCacheFlags[j], &psPoolHead, &puiCounter))
+ {
+ break;
+ }
+
+ /* Free the pages and remove page arrays from the pool if they are exhausted */
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ IMG_UINT32 uiItemsToFree;
+ struct page **ppsPageArray;
+
+ /* Check if we are going to free the whole page array or just parts */
+ if (psPagePoolEntry->uiItemsRemaining <= uiMaxPagesToFree)
+ {
+ uiItemsToFree = psPagePoolEntry->uiItemsRemaining;
+ ppsPageArray = psPagePoolEntry->ppsPageArray;
+ }
+ else
+ {
+ uiItemsToFree = uiMaxPagesToFree;
+ ppsPageArray = &(psPagePoolEntry->ppsPageArray[psPagePoolEntry->uiItemsRemaining - uiItemsToFree]);
+ }
+
+#if defined(CONFIG_X86)
+ /* Set the correct page caching attributes on x86 */
+ if (!PVRSRV_CHECK_CPU_CACHED(g_aui32CPUCacheFlags[j]))
+ {
+ int ret;
+ ret = set_pages_array_wb(ppsPageArray, uiItemsToFree);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ eError = PVRSRV_ERROR_FAILED_TO_FREE_PAGES;
+ goto e_exit;
+ }
+ }
+#endif
+
+ /* Free the actual pages */
+ for (i = 0; i < uiItemsToFree; i++)
+ {
+ __free_pages(ppsPageArray[i], 0);
+ ppsPageArray[i] = NULL;
+ }
+
+ /* Reduce counters */
+ uiMaxPagesToFree -= uiItemsToFree;
+ *puiCounter -= uiItemsToFree;
+ psPagePoolEntry->uiItemsRemaining -= uiItemsToFree;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /*
+ * MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * uiItemsToFree);
+#endif
+
+ /* Is this pool entry exhausted, delete it */
+ if (psPagePoolEntry->uiItemsRemaining == 0)
+ {
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ }
+
+ /* Return if we have all our pages */
+ if (uiMaxPagesToFree == 0)
+ {
+ goto e_exit;
+ }
+ }
+ }
+
+e_exit:
+ *puiPagesFreed -= uiMaxPagesToFree;
+ _DumpPoolStructure();
+ return eError;
+}
+
+/* Get a certain number of pages from the page pool and
+ * copy them directly into a given page array. */
+static void
+_GetPagesFromPoolUnlocked(IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 uiMaxNumPages,
+ struct page **ppsPageArray,
+ IMG_UINT32 *puiNumReceivedPages)
+{
+ LinuxPagePoolEntry *psPagePoolEntry, *psTempPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 i;
+ IMG_UINT32 *puiCounter;
+
+ *puiNumReceivedPages = 0;
+
+ /* Get the correct list for this caching mode */
+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+ {
+ return;
+ }
+
+ /* Check if there are actually items in the list */
+ if (list_empty(psPoolHead))
+ {
+ return;
+ }
+
+ PVR_ASSERT(*puiCounter > 0);
+
+ /* Receive pages from the pool */
+ list_for_each_entry_safe(psPagePoolEntry,
+ psTempPoolEntry,
+ psPoolHead,
+ sPagePoolItem)
+ {
+ /* Get the pages from this pool entry */
+ for (i = psPagePoolEntry->uiItemsRemaining; i != 0 && *puiNumReceivedPages < uiMaxNumPages; i--)
+ {
+ ppsPageArray[*puiNumReceivedPages] = psPagePoolEntry->ppsPageArray[i-1];
+ (*puiNumReceivedPages)++;
+ psPagePoolEntry->uiItemsRemaining--;
+ }
+
+ /* Is this pool entry exhausted, delete it */
+ if (psPagePoolEntry->uiItemsRemaining == 0)
+ {
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ list_del(&psPagePoolEntry->sPagePoolItem);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ }
+
+ /* Return if we have all our pages */
+ if (*puiNumReceivedPages == uiMaxNumPages)
+ {
+ goto exit_ok;
+ }
+ }
+
+exit_ok:
+
+ /* Update counters */
+ *puiCounter -= *puiNumReceivedPages;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * (*puiNumReceivedPages));
+#endif
+
+ _DumpPoolStructure();
+ return;
+}
+
+/* Same as _GetPagesFromPoolUnlocked but handles locking and
+ * checks first whether pages from the pool are a valid option. */
+static inline void
+_GetPagesFromPoolLocked(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_UINT32 uiPagesToAlloc,
+ IMG_UINT32 uiOrder,
+ IMG_BOOL bZero,
+ struct page **ppsPageArray,
+ IMG_UINT32 *puiPagesFromPool)
+{
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+ PVR_UNREFERENCED_PARAMETER(bZero);
+#else
+ /* Don't get pages from pool if it doesn't provide zeroed pages */
+ if (bZero)
+ {
+ return;
+ }
+#endif
+
+ /* The page pool stores only order 0 pages. If we need zeroed memory we
+ * directly allocate from the OS because it is faster than
+ * doing it within the driver. */
+ if (uiOrder == 0 &&
+ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+ {
+
+ _PagePoolLock();
+ _GetPagesFromPoolUnlocked(ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ ppsPageArray,
+ puiPagesFromPool);
+ _PagePoolUnlock();
+ }
+
+ return;
+}
+
+/* Takes a page array and maps it into the kernel to write zeros */
+static PVRSRV_ERROR
+_ZeroPageArray(IMG_UINT32 uiNumToClean,
+ struct page **ppsCleanArray,
+ pgprot_t pgprot)
+{
+ IMG_CPU_VIRTADDR pvAddr;
+ IMG_UINT32 uiMaxPagesToMap = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+ uiNumToClean);
+
+ /* Map and fill the pages with zeros.
+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+ * at a time. */
+ while (uiNumToClean != 0)
+ {
+ IMG_UINT32 uiToClean = (uiNumToClean >= uiMaxPagesToMap) ?
+ uiMaxPagesToMap :
+ uiNumToClean;
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ pvAddr = vmap(ppsCleanArray, uiToClean, VM_WRITE, pgprot);
+#else
+ pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+#endif
+ if (!pvAddr)
+ {
+ if (uiMaxPagesToMap <= 1)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Out of vmalloc memory, "
+ "unable to map pages for zeroing.",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ /* Halve the pages to map at once and try again. */
+ uiMaxPagesToMap = uiMaxPagesToMap >> 1;
+ continue;
+ }
+ }
+
+ OSDeviceMemSet(pvAddr, 0, PAGE_SIZE * uiToClean);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ vunmap(pvAddr);
+#else
+ vm_unmap_ram(pvAddr, uiToClean);
+#endif
+
+ ppsCleanArray = &(ppsCleanArray[uiToClean]);
+ uiNumToClean -= uiToClean;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_CleanupThread_CleanPages(void *pvData)
+{
+ LinuxCleanupData *psCleanupData = (LinuxCleanupData*) pvData;
+ LinuxPagePoolEntry *psPagePoolEntry = psCleanupData->psPoolEntry;
+ struct list_head *psPoolHead = NULL;
+ IMG_UINT32 *puiCounter = NULL;
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+ PVRSRV_ERROR eError;
+ pgprot_t pgprot;
+ IMG_UINT32 i;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+ /* Get the correct pool for this caching mode. */
+ _GetPoolListHead(psCleanupData->ui32CPUCacheMode , &psPoolHead, &puiCounter);
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+ switch(PVRSRV_CPU_CACHE_MODE(psCleanupData->ui32CPUCacheMode))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+#if defined(CONFIG_X86)
+ /* For x86 we can only map with the same attributes
+ * as in the PAT settings*/
+ pgprot = pgprot_noncached(PAGE_KERNEL);
+ break;
+#endif
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unknown caching mode to set page protection flags.",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto eExit;
+ }
+
+ /* Map and fill the pages with zeros.
+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+ * at a time. */
+ eError = _ZeroPageArray(psPagePoolEntry->uiItemsRemaining,
+ psPagePoolEntry->ppsPageArray,
+ pgprot);
+ if (eError != PVRSRV_OK)
+ {
+ goto eExit;
+ }
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+
+ /* Lock down pool and add item */
+ _PagePoolLock();
+
+ /* Pool counters were already updated so don't do it here again*/
+
+ /* The pages are all zeroed so return them to the pool. */
+ list_add_tail(&psPagePoolEntry->sPagePoolItem, psPoolHead);
+
+ _DumpPoolStructure();
+ _PagePoolUnlock();
+
+ OSFreeMem(pvData);
+ OSAtomicDecrement(&g_iPoolCleanTasks);
+
+ return PVRSRV_OK;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+eExit:
+ /* we failed to zero the pages so return the error so we can
+ * retry during the next spin */
+ if ((psCleanupData->sCleanupWork.ui32RetryCount - 1) > 0)
+ {
+ return eError;
+ }
+
+ /* this was the last retry, give up and free pages to OS */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Deferred task error, freeing pages to OS.",
+ __func__));
+ _PagePoolLock();
+
+ *puiCounter -= psPagePoolEntry->uiItemsRemaining;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsDecrMemAllocPoolStat(PAGE_SIZE * psCleanupData->psPoolEntry->uiItemsRemaining);
+#endif
+
+ _PagePoolUnlock();
+
+ for (i = 0; i < psCleanupData->psPoolEntry->uiItemsRemaining; i++)
+ {
+ _FreeOSPage(0, IMG_TRUE, psPagePoolEntry->ppsPageArray[i]);
+ }
+ OSFreeMemNoStats(psPagePoolEntry->ppsPageArray);
+ kmem_cache_free(g_psLinuxPagePoolCache, psPagePoolEntry);
+ OSFreeMem(psCleanupData);
+
+ OSAtomicDecrement(&g_iPoolCleanTasks);
+
+ return PVRSRV_OK;
+#endif /* defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES) */
+}
+
+
+/* Put page array to the page pool.
+ * Handles locking and checks whether the pages are
+ * suitable to be stored in the pool. */
+static inline IMG_BOOL
+_PutPagesToPoolLocked(IMG_UINT32 ui32CPUCacheFlags,
+ struct page **ppsPageArray,
+ IMG_BOOL bUnpinned,
+ IMG_UINT32 uiOrder,
+ IMG_UINT32 uiNumPages)
+{
+ LinuxCleanupData *psCleanupData;
+ PVRSRV_CLEANUP_THREAD_WORK *psCleanupThreadFn;
+#if defined(SUPPORT_PHYSMEM_TEST)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#endif
+
+ if (uiOrder == 0 &&
+ !bUnpinned &&
+ !PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags))
+ {
+ IMG_UINT32 uiEntries;
+ IMG_UINT32 *puiCounter;
+ struct list_head *psPoolHead;
+
+
+ _PagePoolLock();
+
+ uiEntries = _PagesInPoolUnlocked();
+
+ /* Check for number of current page pool entries and whether
+ * we have other asynchronous tasks in-flight */
+ if ( (uiEntries < g_ui32PagePoolMaxEntries) &&
+ ((uiEntries + uiNumPages) <
+ (g_ui32PagePoolMaxEntries + g_ui32PagePoolMaxExcessEntries) ))
+ {
+ if (OSAtomicIncrement(&g_iPoolCleanTasks) <=
+ PVR_LINUX_PHYSMEM_MAX_ASYNC_CLEAN_TASKS)
+ {
+#if defined(SUPPORT_PHYSMEM_TEST)
+ if (!psPVRSRVData->hCleanupThread)
+ {
+ goto eDecrement;
+ }
+#endif
+
+ psCleanupData = OSAllocMem(sizeof(*psCleanupData));
+
+ if (!psCleanupData)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get memory for deferred page pool cleanup. "
+ "Trying to free pages immediately",
+ __FUNCTION__));
+ goto eDecrement;
+ }
+
+ psCleanupThreadFn = &psCleanupData->sCleanupWork;
+ psCleanupData->ui32CPUCacheMode = ui32CPUCacheFlags;
+ psCleanupData->psPoolEntry = kmem_cache_alloc(g_psLinuxPagePoolCache, GFP_KERNEL);
+
+ if (!psCleanupData->psPoolEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get memory for deferred page pool cleanup. "
+ "Trying to free pages immediately",
+ __FUNCTION__));
+ goto eFreeCleanupData;
+ }
+
+ if (!_GetPoolListHead(ui32CPUCacheFlags, &psPoolHead, &puiCounter))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get correct page pool",
+ __FUNCTION__));
+ goto eFreePoolEntry;
+ }
+
+ /* Increase counter here to avoid deferred cleanup tasks piling up */
+ *puiCounter = *puiCounter + uiNumPages;
+
+ psCleanupData->psPoolEntry->ppsPageArray = ppsPageArray;
+ psCleanupData->psPoolEntry->uiItemsRemaining = uiNumPages;
+
+ psCleanupThreadFn->pfnFree = _CleanupThread_CleanPages;
+ psCleanupThreadFn->pvData = psCleanupData;
+ psCleanupThreadFn->bDependsOnHW = IMG_FALSE;
+ CLEANUP_THREAD_SET_RETRY_COUNT(psCleanupThreadFn,
+ CLEANUP_THREAD_RETRY_COUNT_DEFAULT);
+
+ #if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* MemStats usually relies on having the bridge lock held, however
+ * the page pool code may call PVRSRVStatsIncrMemAllocPoolStat and
+ * PVRSRVStatsDecrMemAllocPoolStat without the bridge lock held, so
+ * the page pool lock is used to ensure these calls are mutually
+ * exclusive
+ */
+ PVRSRVStatsIncrMemAllocPoolStat(PAGE_SIZE * uiNumPages);
+ #endif
+
+ /* We must not hold the pool lock when calling AddWork because it might call us back to
+ * free pooled pages directly when unloading the driver */
+ _PagePoolUnlock();
+
+ PVRSRVCleanupThreadAddWork(psCleanupThreadFn);
+
+
+ }
+ else
+ {
+ goto eDecrement;
+ }
+
+ }
+ else
+ {
+ goto eUnlock;
+ }
+ }
+ else
+ {
+ goto eExitFalse;
+ }
+
+ return IMG_TRUE;
+
+eFreePoolEntry:
+ OSFreeMem(psCleanupData->psPoolEntry);
+eFreeCleanupData:
+ OSFreeMem(psCleanupData);
+eDecrement:
+ OSAtomicDecrement(&g_iPoolCleanTasks);
+eUnlock:
+ _PagePoolUnlock();
+eExitFalse:
+ return IMG_FALSE;
+}
+
+/* Get the GFP flags that we pass to the page allocator */
+static inline gfp_t
+_GetGFPFlags(IMG_BOOL bZero,
+ PVRSRV_DEVICE_NODE *psDevNode)
+{
+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+ gfp_t gfp_flags = GFP_USER | __GFP_NOWARN | __GFP_NOMEMALLOC;
+
+#if defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+ /* Force use of HIGHMEM */
+ gfp_flags |= __GFP_HIGHMEM;
+
+ PVR_UNREFERENCED_PARAMETER(psDev);
+#else
+
+ if (psDev)
+ {
+ if (*psDev->dma_mask > DMA_BIT_MASK(32))
+ {
+ /* If our system is able to handle large addresses use highmem */
+ gfp_flags |= __GFP_HIGHMEM;
+ }
+ else if (*psDev->dma_mask == DMA_BIT_MASK(32))
+ {
+ /* Limit to 32 bit.
+ * Achieved by NOT setting __GFP_HIGHMEM for 32 bit systems and
+ * setting __GFP_DMA32 for 64 bit systems */
+ gfp_flags |= __GFP_DMA32;
+ }
+ else
+ {
+ /* Limit to size of DMA zone. */
+ gfp_flags |= __GFP_DMA;
+ }
+ }
+#endif
+
+ if (bZero)
+ {
+ gfp_flags |= __GFP_ZERO;
+ }
+
+ return gfp_flags;
+}
+
+/*
+ * @Function _PoisonDevicePage
+ *
+ * @Description Poisons a device page. In normal case the device page has the
+ * same size as the OS page and so the ui32DevPageOrder will be
+ * equal to 0 and page argument will point to one OS page
+ * structure. In case of Non4K pages the order will be greater
+ * than 0 and page argument will point to an array of OS
+ * allocated pages.
+ *
+ * @Input psDevNode pointer to the device object
+ * @Input page array of the pages allocated by from the OS
+ * @Input ui32DevPageOrder order of the page (same as the one used to allocate
+ * the page array by alloc_pages())
+ * @Input ui32CPUCacheFlags CPU cache flags applied to the page
+ * @Input ui8PoisonValue value used to poison the page
+ */
+static void
+_PoisonDevicePage(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page *page,
+ IMG_UINT32 ui32DevPageOrder,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_BYTE ui8PoisonValue)
+{
+ IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+ IMG_UINT32 ui32OsPageIdx;
+
+ for (ui32OsPageIdx = 0;
+ ui32OsPageIdx < (1U << ui32DevPageOrder);
+ ui32OsPageIdx++)
+ {
+ struct page *current_page = page + ui32OsPageIdx;
+ void *kvaddr = kmap(current_page);
+
+ if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+ PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+ {
+ OSDeviceMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE);
+ }
+ else
+ {
+ OSCachedMemSet(kvaddr, ui8PoisonValue, PAGE_SIZE);
+ }
+
+ sCPUPhysAddrStart.uiAddr = page_to_phys(page);
+ sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+ OSCPUCacheFlushRangeKM(psDevNode,
+ kvaddr, kvaddr + PAGE_SIZE,
+ sCPUPhysAddrStart, sCPUPhysAddrEnd);
+
+ kunmap(current_page);
+ }
+}
+
+/* Allocate and initialise the structure to hold the metadata of the allocation */
+static PVRSRV_ERROR
+_AllocOSPageArray(PVRSRV_DEVICE_NODE *psDevNode,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 uiLog2AllocPageSize,
+ IMG_BOOL bZero,
+ IMG_BOOL bIsCMA,
+ IMG_BOOL bPoisonOnAlloc,
+ IMG_BOOL bPoisonOnFree,
+ IMG_BOOL bOnDemand,
+ IMG_UINT32 ui32CPUCacheFlags,
+ IMG_PID uiPid,
+ PMR_OSPAGEARRAY_DATA **ppsPageArrayDataPtr)
+{
+ PVRSRV_ERROR eError;
+ PMR_SIZE_T uiSize = uiChunkSize * ui32NumVirtChunks;
+ IMG_UINT32 uiNumOSPageSizeVirtPages;
+ IMG_UINT32 uiNumDevPageSizeVirtPages;
+ PMR_OSPAGEARRAY_DATA *psPageArrayData;
+ IMG_UINT64 ui64DmaMask = 0;
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+
+ /* Use of cast below is justified by the assertion that follows to
+ * prove that no significant bits have been truncated */
+ uiNumOSPageSizeVirtPages = (IMG_UINT32) (((uiSize - 1) >> PAGE_SHIFT) + 1);
+ PVR_ASSERT(((PMR_SIZE_T) uiNumOSPageSizeVirtPages << PAGE_SHIFT) == uiSize);
+
+ uiNumDevPageSizeVirtPages = uiNumOSPageSizeVirtPages >> (uiLog2AllocPageSize - PAGE_SHIFT);
+
+ /* Allocate the struct to hold the metadata */
+ psPageArrayData = kmem_cache_alloc(g_psLinuxPageArray, GFP_KERNEL);
+ if (psPageArrayData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OS refused the memory allocation for the private data.",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_freed_none;
+ }
+
+ /*
+ * Allocate the page array
+ *
+ * We avoid tracking this memory because this structure might go into the page pool.
+ * The OS can drain the pool asynchronously and when doing that we have to avoid
+ * any potential deadlocks.
+ *
+ * In one scenario the process stats vmalloc hash table lock is held and then
+ * the oom-killer softirq is trying to call _ScanObjectsInPagePool(), it must not
+ * try to acquire the vmalloc hash table lock again.
+ */
+ psPageArrayData->pagearray = OSAllocZMemNoStats(sizeof(struct page *) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->pagearray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_kmem_cache;
+ }
+ else
+ {
+ if (bIsCMA)
+ {
+ /* Allocate additional DMA/CMA cpu kernel virtual address & device bus address array state */
+ psPageArrayData->dmavirtarray = OSAllocZMemNoStats(sizeof(void*) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->dmavirtarray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_pagearray;
+ }
+
+ psPageArrayData->dmaphysarray = OSAllocZMemNoStats(sizeof(dma_addr_t) * uiNumDevPageSizeVirtPages);
+ if (psPageArrayData->dmaphysarray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_cpuvirtaddrarray;
+ }
+ }
+ }
+
+ if (psDevNode->psDevConfig && psDevNode->psDevConfig->pvOSDevice)
+ {
+ struct device *psDev = psDevNode->psDevConfig->pvOSDevice;
+ ui64DmaMask = *psDev->dma_mask;
+ }
+
+ /* Init metadata */
+ psPageArrayData->psDevNode = psDevNode;
+ psPageArrayData->uiPid = uiPid;
+ psPageArrayData->iNumOSPagesAllocated = 0;
+ psPageArrayData->uiTotalNumOSPages = uiNumOSPageSizeVirtPages;
+ psPageArrayData->uiLog2AllocPageSize = uiLog2AllocPageSize;
+ psPageArrayData->ui64DmaMask = ui64DmaMask;
+ psPageArrayData->bZero = bZero;
+ psPageArrayData->bIsCMA = bIsCMA;
+ psPageArrayData->bOnDemand = bOnDemand;
+ psPageArrayData->bUnpinned = IMG_FALSE;
+ psPageArrayData->bPoisonOnFree = bPoisonOnFree;
+ psPageArrayData->bPoisonOnAlloc = bPoisonOnAlloc;
+ psPageArrayData->ui32CPUCacheFlags = ui32CPUCacheFlags;
+
+ /* Indicate whether this is an allocation with default caching attribute (i.e cached) or not */
+ if (PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags) ||
+ PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags))
+ {
+ psPageArrayData->bUnsetMemoryType = IMG_TRUE;
+ }
+ else
+ {
+ psPageArrayData->bUnsetMemoryType = IMG_FALSE;
+ }
+
+ *ppsPageArrayDataPtr = psPageArrayData;
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_cpuvirtaddrarray:
+ OSFreeMemNoStats(psPageArrayData->dmavirtarray);
+
+e_free_pagearray:
+ OSFreeMemNoStats(psPageArrayData->pagearray);
+
+e_free_kmem_cache:
+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OS refused the memory allocation for the page pointer table. "
+ "Did you ask for too much?",
+ __func__));
+
+e_freed_none:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static inline void
+_ApplyCacheMaintenance(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_RETRY;
+ void * pvAddr;
+
+
+ if ((uiNumPages << PAGE_SHIFT) >= PVR_DIRTY_BYTES_FLUSH_THRESHOLD)
+ {
+ /* May fail so fallback to range-based flush */
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ }
+
+
+ if (eError != PVRSRV_OK)
+ {
+
+ if (OSCPUCacheOpAddressType() == PVRSRV_CACHE_OP_ADDR_TYPE_VIRTUAL)
+ {
+ pgprot_t pgprot = PAGE_KERNEL;
+
+ IMG_UINT32 uiNumToClean = uiNumPages;
+ struct page **ppsCleanArray = ppsPage;
+
+ /* Map and flush page.
+ * For large page arrays do it PVR_LINUX_PHYSMEM_MAX_KMAP_SIZE
+ * at a time. */
+ while (uiNumToClean != 0)
+ {
+ IMG_UINT32 uiToClean = MIN(PVR_LINUX_PHYSMEM_MAX_KMAP_PAGES,
+ uiNumToClean);
+ IMG_CPU_PHYADDR sUnused =
+ { IMG_CAST_TO_CPUPHYADDR_UINT(0xCAFEF00DDEADBEEFULL) };
+
+ pvAddr = vm_map_ram(ppsCleanArray, uiToClean, -1, pgprot);
+ if (!pvAddr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Unable to flush page cache for new allocation, skipping flush."));
+ return;
+ }
+
+ CacheOpExec(psDevNode,
+ pvAddr,
+ pvAddr + PAGE_SIZE,
+ sUnused,
+ sUnused,
+ PVRSRV_CACHE_OP_FLUSH);
+
+ vm_unmap_ram(pvAddr, uiToClean);
+
+ ppsCleanArray = &(ppsCleanArray[uiToClean]);
+ uiNumToClean -= uiToClean;
+ }
+ }
+ else
+ {
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < uiNumPages; ++ui32Idx)
+ {
+ IMG_CPU_PHYADDR sCPUPhysAddrStart, sCPUPhysAddrEnd;
+
+ pvAddr = kmap(ppsPage[ui32Idx]);
+ sCPUPhysAddrStart.uiAddr = page_to_phys(ppsPage[ui32Idx]);
+ sCPUPhysAddrEnd.uiAddr = sCPUPhysAddrStart.uiAddr + PAGE_SIZE;
+
+ /* If we're zeroing, we need to make sure the cleared memory is pushed out
+ * of the cache before the cache lines are invalidated */
+ CacheOpExec(psDevNode,
+ pvAddr,
+ pvAddr + PAGE_SIZE,
+ sCPUPhysAddrStart,
+ sCPUPhysAddrEnd,
+ PVRSRV_CACHE_OP_FLUSH);
+
+ kunmap(ppsPage[ui32Idx]);
+ }
+ }
+
+ }
+}
+
+/* Change the caching attribute of pages on x86 systems and takes care of
+ * cache maintenance. This function is supposed to be called once for pages that
+ * came from alloc_pages(). It expects an array of OS page sized pages!
+ *
+ * Flush/Invalidate pages in case the allocation is not cached. Necessary to
+ * remove pages from the cache that might be flushed later and corrupt memory. */
+static inline PVRSRV_ERROR
+_ApplyOSPagesAttribute(PVRSRV_DEVICE_NODE *psDevNode,
+ struct page **ppsPage,
+ IMG_UINT32 uiNumPages,
+ IMG_BOOL bFlush,
+ IMG_UINT32 ui32CPUCacheFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bCPUCached = PVRSRV_CHECK_CPU_CACHED(ui32CPUCacheFlags);
+ IMG_BOOL bCPUUncached = PVRSRV_CHECK_CPU_UNCACHED(ui32CPUCacheFlags);
+ IMG_BOOL bCPUWriteCombine = PVRSRV_CHECK_CPU_WRITE_COMBINE(ui32CPUCacheFlags);
+
+ if (ppsPage != NULL && uiNumPages != 0)
+ {
+#if defined (CONFIG_X86)
+ /* On x86 we have to set page cache attributes for non-cached pages.
+ * The call is implicitly taking care of all flushing/invalidating
+ * and therefore we can skip the usual cache maintenance after this. */
+ if (bCPUUncached || bCPUWriteCombine)
+ {
+ /* On x86 if we already have a mapping (e.g. low memory) we need to change the mode of
+ current mapping before we map it ourselves */
+ int ret = IMG_FALSE;
+ PVR_UNREFERENCED_PARAMETER(bFlush);
+
+ switch (PVRSRV_CPU_CACHE_MODE(ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ ret = set_pages_array_uc(ppsPage, uiNumPages);
+ if (ret)
+ {
+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to UC failed, returned %d", ret));
+ }
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ ret = set_pages_array_wc(ppsPage, uiNumPages);
+ if (ret)
+ {
+ eError = PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE;
+ PVR_DPF((PVR_DBG_ERROR, "Setting Linux page caching mode to WC failed, returned %d", ret));
+ }
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ break;
+
+ default:
+ break;
+ }
+ }
+ else
+#endif
+ {
+ if ( bFlush ||
+ bCPUUncached || bCPUWriteCombine ||
+ (bCPUCached && PVRSRV_CHECK_CPU_CACHE_CLEAN(ui32CPUCacheFlags)) )
+ {
+ /* We can be given pages which still remain in the cache.
+ In order to make sure that the data we write through our mappings
+ doesn't get overwritten by later cache evictions we invalidate the
+ pages that are given to us.
+
+ Note:
+ This still seems to be true if we request cold pages, it's just less
+ likely to be in the cache. */
+ _ApplyCacheMaintenance(psDevNode,
+ ppsPage,
+ uiNumPages,
+ bFlush);
+ }
+ }
+ }
+
+ return eError;
+}
+
+/* Same as _AllocOSPage except it uses DMA framework to perform allocation.
+ * uiPageIndex is expected to be the pagearray index where to store the higher order page. */
+static PVRSRV_ERROR
+_AllocOSPage_CMA(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ gfp_t gfp_flags,
+ IMG_UINT32 ui32AllocOrder,
+ IMG_UINT32 ui32MinOrder,
+ IMG_UINT32 uiPageIndex)
+{
+ void *virt_addr;
+ struct page *page;
+ dma_addr_t bus_addr;
+ IMG_UINT32 uiAllocIsMisaligned;
+ size_t alloc_size = PAGE_SIZE << ui32AllocOrder;
+ struct device *dev = psPageArrayData->psDevNode->psDevConfig->pvOSDevice;
+ PVR_ASSERT(ui32AllocOrder == ui32MinOrder);
+
+ do
+ {
+ DisableOOMKiller();
+#if defined(CONFIG_L4) || defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)
+ virt_addr = NULL;
+#else
+ /* virt_addr might be a cookie VA so may not be cpu-view of bus_addr
+ due to DMA_ATTR_NO_KERNEL_MAPPING being used by DMA backend */
+ virt_addr = dma_alloc_coherent(dev, alloc_size, &bus_addr, gfp_flags);
+#endif
+ if (virt_addr == NULL)
+ {
+ /* The idea here is primarily to support some older kernels with
+ broken or non-functioning DMA/CMA implementations (< Linux-3.4)
+ and to also handle DMA/CMA allocation failures by attempting a
+ normal page allocation though we expect dma_alloc_coherent()
+ already attempts this internally also before failing but
+ nonetheless it does no harm to retry the allocation ourselves */
+ page = alloc_pages(gfp_flags, ui32AllocOrder);
+ if (page)
+ {
+ /* Taint bus_addr as alloc_page, needed when freeing;
+ also acquire the low memory page address only, this
+ prevents mapping possible high memory pages into
+ kernel virtual address space which might exhaust
+ the VMALLOC address space */
+ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+ }
+ else
+ {
+ EnableOOMKiller();
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+ else
+ {
+#if !defined(CONFIG_ARM)
+#if defined(CONFIG_L4)
+ page = pfn_to_page((unsigned long)l4x_phys_to_virt(bus_addr) >> PAGE_SHIFT);
+#else
+ page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#endif
+#else /* !defined(CONFIG_ARM) */
+ page = pfn_to_page(dma_to_pfn(dev, bus_addr));
+#endif
+ }
+ EnableOOMKiller();
+
+ /* Physical allocation alignment works/hidden behind the scene transparently,
+ we do this here if the allocated buffer address does not meet its alignment
+ requirement by over-allocating using the next power-2 order and reporting
+ aligned-adjusted values back to meet the requested alignment constraint.
+ Evidently we waste memory by doing this so should only do so if we do not
+ initially meet the alignment constraint. */
+ uiAllocIsMisaligned = DMA_GET_ADDR(bus_addr) & ((PAGE_SIZE<<ui32MinOrder)-1);
+ if (uiAllocIsMisaligned || ui32AllocOrder > ui32MinOrder)
+ {
+ IMG_BOOL bUsedAllocPages = DMA_IS_ALLOCPG_ADDR(bus_addr);
+ if (ui32AllocOrder == ui32MinOrder)
+ {
+ if (bUsedAllocPages)
+ {
+ __free_pages(page, ui32AllocOrder);
+ }
+ else
+ {
+ dma_free_coherent(dev, alloc_size, virt_addr, bus_addr);
+ }
+
+ ui32AllocOrder = ui32AllocOrder + 1;
+ alloc_size = PAGE_SIZE << ui32AllocOrder;
+
+ PVR_ASSERT(uiAllocIsMisaligned != 0);
+ }
+ else
+ {
+ size_t align_adjust = PAGE_SIZE << ui32MinOrder;
+
+ /* Adjust virtual/bus addresses to meet alignment */
+ bus_addr = bUsedAllocPages ? page_to_phys(page) : bus_addr;
+ align_adjust = PVR_ALIGN((size_t)bus_addr, align_adjust);
+ align_adjust -= (size_t)bus_addr;
+
+ if (align_adjust)
+ {
+ if (bUsedAllocPages)
+ {
+ page += align_adjust >> PAGE_SHIFT;
+ bus_addr = DMA_SET_ALLOCPG_ADDR(page_to_phys(page));
+ virt_addr = (void*)(uintptr_t) DMA_VADDR_NOT_IN_USE;
+ }
+ else
+ {
+ bus_addr += align_adjust;
+#if !defined(CONFIG_ARM)
+#if defined(CONFIG_L4)
+ page = pfn_to_page((unsigned long)l4x_phys_to_virt(bus_addr) >> PAGE_SHIFT);
+#else
+ page = pfn_to_page(bus_addr >> PAGE_SHIFT);
+#endif
+#else /* !defined(CONFIG_ARM) */
+ page = pfn_to_page(dma_to_pfn(dev, bus_addr));
+#endif
+ }
+
+ /* Store adjustments in PAGE_SIZE counts */
+ align_adjust = align_adjust >> PAGE_SHIFT;
+ bus_addr = DMA_SET_ALIGN_ADJUSTMENT(bus_addr, align_adjust);
+ }
+
+ /* Taint bus_addr due to over-allocation, allows us to free
+ * memory correctly */
+ bus_addr = DMA_SET_ADJUSTED_ADDR(bus_addr);
+ uiAllocIsMisaligned = 0;
+ }
+ }
+ } while (uiAllocIsMisaligned);
+
+ /* Convert OSPageSize-based index into DevicePageSize-based index */
+ psPageArrayData->dmavirtarray[uiPageIndex] = virt_addr;
+ psPageArrayData->dmaphysarray[uiPageIndex] = bus_addr;
+ psPageArrayData->pagearray[uiPageIndex] = page;
+
+ return PVRSRV_OK;
+}
+
+/* Allocate a page of order uiAllocOrder and stores it in the page array ppsPage at
+ * position uiPageIndex.
+ *
+ * If the order is higher than 0, it splits the page into multiples and
+ * stores them at position uiPageIndex to uiPageIndex+(1<<uiAllocOrder).
+ *
+ * This function is supposed to be used for uiMinOrder == 0 only! */
+static PVRSRV_ERROR
+_AllocOSPage(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ gfp_t gfp_flags,
+ IMG_UINT32 uiAllocOrder,
+ IMG_UINT32 uiMinOrder,
+ IMG_UINT32 uiPageIndex)
+{
+ struct page *psPage;
+ IMG_UINT32 ui32Count;
+
+ /* Sanity check. If it fails we write into the wrong places in the array. */
+ PVR_ASSERT(uiMinOrder == 0);
+
+ /* Allocate the page */
+ DisableOOMKiller();
+ psPage = alloc_pages(gfp_flags, uiAllocOrder);
+ EnableOOMKiller();
+
+ if (psPage == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* In case we need to, split the higher order page;
+ this should only be used for order-0 allocations
+ as higher order allocations should use DMA/CMA */
+ if (uiAllocOrder != 0)
+ {
+ split_page(psPage, uiAllocOrder);
+ }
+#endif
+
+ /* Store the page (or multiple split pages) in the page array */
+ for (ui32Count = 0; ui32Count < (1 << uiAllocOrder); ui32Count++)
+ {
+ psPageArrayData->pagearray[uiPageIndex + ui32Count] = &(psPage[ui32Count]);
+ }
+
+ return PVRSRV_OK;
+}
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+
+static inline void _AddMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ struct page *psPage)
+{
+ IMG_CPU_PHYADDR sCPUPhysAddr = { page_to_phys(psPage) };
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+ NULL, sCPUPhysAddr,
+ 1 << psPageArrayData->uiLog2AllocPageSize,
+ NULL, psPageArrayData->uiPid);
+}
+
+static inline void _RemoveMemAllocRecord_UmaPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ struct page *psPage)
+{
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+ (IMG_UINT64) page_to_phys(psPage),
+ psPageArrayData->uiPid);
+}
+
+#else /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+
+static inline void _IncrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+ uiSize, uiPid);
+}
+
+static inline void _DecrMemAllocStat_UmaPages(size_t uiSize, IMG_PID uiPid)
+{
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES,
+ uiSize, uiPid);
+}
+
+#endif /* defined(PVRSRV_ENABLE_MEMORY_STATS) */
+#endif /* defined(PVRSRV_ENABLE_PROCESS_STATS) */
+
+/* Allocation of OS pages: We may allocate 2^N order pages at a time for two reasons.
+ *
+ * Firstly to support device pages which are larger than OS. By asking the OS for 2^N
+ * order OS pages at a time we guarantee the device page is contiguous.
+ *
+ * Secondly for performance where we may ask for 2^N order pages to reduce the number
+ * of calls to alloc_pages, and thus reduce time for huge allocations.
+ *
+ * Regardless of page order requested, we need to break them down to track _OS pages.
+ * The maximum order requested is increased if all max order allocations were successful.
+ * If any request fails we reduce the max order.
+ */
+static PVRSRV_ERROR
+_AllocOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiArrayIndex = 0;
+ IMG_UINT32 ui32Order;
+ IMG_UINT32 ui32MinOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+ IMG_BOOL bIncreaseMaxOrder = IMG_TRUE;
+
+ IMG_UINT32 ui32NumPageReq;
+ IMG_UINT32 uiPagesToAlloc;
+ IMG_UINT32 uiPagesFromPool = 0;
+
+ gfp_t gfp_flags = _GetGFPFlags(ui32MinOrder ? psPageArrayData->bZero : IMG_FALSE, /* Zero all pages later as batch */
+ psPageArrayData->psDevNode);
+ gfp_t ui32GfpFlags;
+ gfp_t ui32HighOrderGfpFlags = ((gfp_flags & ~__GFP_RECLAIM) | __GFP_NORETRY);
+
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ struct page **ppsPageAttributeArray = NULL;
+
+ uiPagesToAlloc = psPageArrayData->uiTotalNumOSPages;
+
+ /* Try to get pages from the pool since it is faster;
+ the page pool currently only supports zero-order pages
+ thus currently excludes all DMA/CMA allocated memory */
+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+ psPageArrayData->ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ ui32MinOrder,
+ psPageArrayData->bZero,
+ ppsPageArray,
+ &uiPagesFromPool);
+
+ uiArrayIndex = uiPagesFromPool;
+
+ if ((uiPagesToAlloc - uiPagesFromPool) < PVR_LINUX_HIGHORDER_ALLOCATION_THRESHOLD)
+ { /* Small allocations: ask for one device page at a time */
+ ui32Order = ui32MinOrder;
+ bIncreaseMaxOrder = IMG_FALSE;
+ }
+ else
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* Large zero-order or none zero-order allocations, ask for
+ MAX(max-order,min-order) order pages at a time; alloc
+ failures throttles this down to ZeroOrder allocations */
+ ui32Order = MAX(g_uiMaxOrder, ui32MinOrder);
+#else
+ /* Because split_pages() is not available on older kernels
+ we cannot mix-and-match any-order pages in the PMR;
+ only same-order pages must be present in page array.
+ So we unconditionally force it to use ui32MinOrder on
+ these older kernels */
+ ui32Order = ui32MinOrder;
+#if defined(DEBUG)
+ if (! psPageArrayData->bIsCMA)
+ {
+ /* Sanity check that this is zero */
+ PVR_ASSERT(! ui32Order);
+ }
+#endif
+#endif
+ }
+
+ /* Only if asking for more contiguity than we actually need, let it fail */
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ ui32NumPageReq = (1 << ui32Order);
+
+ while (uiArrayIndex < uiPagesToAlloc)
+ {
+ IMG_UINT32 ui32PageRemain = uiPagesToAlloc - uiArrayIndex;
+
+ while (ui32NumPageReq > ui32PageRemain)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0))
+ /* Pages to request is larger than that remaining
+ so ask for less so never over allocate */
+ ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+#else
+ /* Pages to request is larger than that remaining so
+ do nothing thus over allocate as we do not support
+ mix/match of any-order pages in PMR page-array in
+ older kernels (simplifies page free logic) */
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+ ui32NumPageReq = (1 << ui32Order);
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ }
+
+ if (psPageArrayData->bIsCMA)
+ {
+ /* As the DMA/CMA framework rounds-up request to the
+ next power-of-two, we request multiple uiMinOrder
+ pages to satisfy allocation request in order to
+ minimise wasting memory */
+ eError = _AllocOSPage_CMA(psPageArrayData,
+ ui32GfpFlags,
+ ui32Order,
+ ui32MinOrder,
+ uiArrayIndex >> ui32MinOrder);
+ }
+ else
+ {
+ /* Allocate uiOrder pages at uiArrayIndex */
+ eError = _AllocOSPage(psPageArrayData,
+ ui32GfpFlags,
+ ui32Order,
+ ui32MinOrder,
+ uiArrayIndex);
+ }
+
+ if (eError == PVRSRV_OK)
+ {
+ /* Successful request. Move onto next. */
+ uiArrayIndex += ui32NumPageReq;
+ }
+ else
+ {
+ if (ui32Order > ui32MinOrder)
+ {
+ /* Last request failed. Let's ask for less next time */
+ ui32Order = MAX(ui32Order >> 1,ui32MinOrder);
+ bIncreaseMaxOrder = IMG_FALSE;
+ ui32NumPageReq = (1 << ui32Order);
+ ui32GfpFlags = (ui32Order > ui32MinOrder) ? ui32HighOrderGfpFlags : gfp_flags;
+ g_uiMaxOrder = ui32Order;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0))
+ /* We should not trigger this code path in older kernels,
+ this is enforced by ensuring ui32Order == ui32MinOrder */
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+#endif
+ }
+ else
+ {
+ /* Failed to alloc pages at required contiguity. Failed allocation */
+ PVR_DPF((PVR_DBG_ERROR, "%s: %s failed to honour request at %u of %u, flags = %x, order = %u (%s)",
+ __FUNCTION__,
+ psPageArrayData->bIsCMA ? "dma_alloc_coherent" : "alloc_pages",
+ uiArrayIndex,
+ uiPagesToAlloc,
+ ui32GfpFlags,
+ ui32Order,
+ PVRSRVGetErrorStringKM(eError)));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_free_pages;
+ }
+ }
+ }
+
+ if (bIncreaseMaxOrder && (g_uiMaxOrder < PVR_LINUX_PHYSMEM_MAX_ALLOC_ORDER_NUM))
+ { /* All successful allocations on max order. Let's ask for more next time */
+ g_uiMaxOrder++;
+ }
+
+ /* Construct table of page pointers to apply attributes */
+ ppsPageAttributeArray = &ppsPageArray[uiPagesFromPool];
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiIdx, uiIdy, uiIdz;
+
+ ppsPageAttributeArray = OSAllocMem(sizeof(struct page *) * uiPagesToAlloc);
+ if (ppsPageAttributeArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed OSAllocMem() for page attributes table"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_free_pages;
+ }
+
+ for (uiIdx = 0; uiIdx < uiPagesToAlloc; uiIdx += ui32NumPageReq)
+ {
+ uiIdy = uiIdx >> ui32Order;
+ for (uiIdz = 0; uiIdz < ui32NumPageReq; uiIdz++)
+ {
+ ppsPageAttributeArray[uiIdx+uiIdz] = psPageArrayData->pagearray[uiIdy];
+ ppsPageAttributeArray[uiIdx+uiIdz] += uiIdz;
+ }
+ }
+ }
+
+ if (psPageArrayData->bZero && ui32MinOrder == 0)
+ {
+ eError = _ZeroPageArray(uiPagesToAlloc - uiPagesFromPool,
+ ppsPageAttributeArray,
+ PAGE_KERNEL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (fast)"));
+ goto e_free_pages;
+ }
+ }
+
+
+ /* Do the cache management as required */
+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+ ppsPageAttributeArray,
+ uiPagesToAlloc - uiPagesFromPool,
+ psPageArrayData->bZero,
+ psPageArrayData->ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+ goto e_free_pages;
+ }
+ else
+ {
+ if (psPageArrayData->bIsCMA)
+ {
+ OSFreeMem(ppsPageAttributeArray);
+ }
+ }
+
+ /* Update metadata */
+ psPageArrayData->iNumOSPagesAllocated = psPageArrayData->uiTotalNumOSPages;
+
+ {
+ IMG_UINT32 ui32NumPages =
+ psPageArrayData->iNumOSPagesAllocated >> ui32MinOrder;
+ IMG_UINT32 i;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ for (i = 0; i < ui32NumPages; i++)
+ {
+ _AddMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+ }
+#else
+ _IncrMemAllocStat_UmaPages(uiPagesToAlloc * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+#endif
+
+ if (psPageArrayData->bPoisonOnAlloc)
+ {
+ for (i = 0; i < ui32NumPages; i++)
+ {
+ _PoisonDevicePage(psPageArrayData->psDevNode,
+ ppsPageArray[i],
+ ui32MinOrder,
+ psPageArrayData->ui32CPUCacheFlags,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+ }
+ }
+ }
+
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+ {
+ IMG_UINT32 ui32PageToFree;
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiDevArrayIndex = uiArrayIndex >> ui32Order;
+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << ui32Order;
+ PVR_ASSERT(ui32Order == ui32MinOrder);
+
+ if (ppsPageAttributeArray)
+ {
+ OSFreeMem(ppsPageAttributeArray);
+ }
+
+ for (ui32PageToFree = 0; ui32PageToFree < uiDevArrayIndex; ui32PageToFree++)
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ ui32MinOrder,
+ psPageArrayData->dmavirtarray[ui32PageToFree],
+ psPageArrayData->dmaphysarray[ui32PageToFree],
+ ppsPageArray[ui32PageToFree]);
+ psPageArrayData->dmaphysarray[ui32PageToFree]= (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[ui32PageToFree] = NULL;
+ ppsPageArray[ui32PageToFree] = NULL;
+ }
+ }
+ else
+ {
+ /* Free the pages we got from the pool */
+ for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+ {
+ _FreeOSPage(ui32MinOrder,
+ psPageArrayData->bUnsetMemoryType,
+ ppsPageArray[ui32PageToFree]);
+ ppsPageArray[ui32PageToFree] = NULL;
+ }
+
+ for (ui32PageToFree = uiPagesFromPool; ui32PageToFree < uiArrayIndex; ui32PageToFree++)
+ {
+ _FreeOSPage(ui32MinOrder, IMG_FALSE, ppsPageArray[ui32PageToFree]);
+ ppsPageArray[ui32PageToFree] = NULL;
+ }
+ }
+
+ return eError;
+ }
+}
+
+/* Allocation of OS pages: This function is used for sparse allocations.
+ *
+ * Sparse allocations provide only a proportion of sparse physical backing within the total
+ * virtual range. */
+static PVRSRV_ERROR
+_AllocOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiPagesToAlloc)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+ IMG_UINT32 uiPagesFromPool = 0;
+ IMG_UINT32 uiNumOSPagesToAlloc = uiPagesToAlloc * (1 << uiOrder);
+ IMG_UINT32 uiTotalNumAllocPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+ gfp_t ui32GfpFlags = _GetGFPFlags(uiOrder ? psPageArrayData->bZero :
+ IMG_FALSE, /* Zero pages later as batch */
+ psPageArrayData->psDevNode);
+
+ /* We use this page array to receive pages from the pool and then reuse it afterwards to
+ * store pages that need their cache attribute changed on x86*/
+ struct page **ppsTempPageArray;
+ IMG_UINT32 uiTempPageArrayIndex = 0;
+
+ /* Allocate the temporary page array that we need here to receive pages
+ * from the pool and to store pages that need their caching attributes changed.
+ * Allocate number of OS pages to be able to use the attribute function later. */
+ ppsTempPageArray = OSAllocMem(sizeof(struct page*) * uiNumOSPagesToAlloc);
+ if (ppsTempPageArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed metadata allocation", __FUNCTION__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e_exit;
+ }
+
+ /* Check the requested number of pages if they fit in the page array */
+ if (uiTotalNumAllocPages <
+ ((psPageArrayData->iNumOSPagesAllocated >> uiOrder) + uiPagesToAlloc) )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to allocate more pages (Order %u) than this buffer can handle, "
+ "Request + Allocated < Max! Request %u, Allocated %u, Max %u.",
+ __FUNCTION__,
+ uiOrder,
+ uiPagesToAlloc,
+ psPageArrayData->iNumOSPagesAllocated >> uiOrder,
+ uiTotalNumAllocPages));
+ eError = PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ goto e_free_temp_array;
+ }
+
+ /* Try to get pages from the pool since it is faster */
+ _GetPagesFromPoolLocked(psPageArrayData->psDevNode,
+ psPageArrayData->ui32CPUCacheFlags,
+ uiPagesToAlloc,
+ uiOrder,
+ psPageArrayData->bZero,
+ ppsTempPageArray,
+ &uiPagesFromPool);
+
+ /* Allocate pages from the OS or move the pages that we got from the pool
+ * to the page array */
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ /* Check if the indices we are allocating are in range */
+ if (puiAllocIndices[i] >= uiTotalNumAllocPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Given alloc index %u at %u is larger than page array %u.",
+ __FUNCTION__,
+ i,
+ puiAllocIndices[i],
+ uiTotalNumAllocPages));
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e_free_pages;
+ }
+
+ /* Check if there is not already a page allocated at this position */
+ if (NULL != ppsPageArray[puiAllocIndices[i]])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Mapping number %u at page array index %u already exists. "
+ "Page struct %p",
+ __func__,
+ i,
+ puiAllocIndices[i],
+ ppsPageArray[puiAllocIndices[i]]));
+ eError = PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS;
+ goto e_free_pages;
+ }
+
+ /* Finally assign a page to the array.
+ * Either from the pool or allocate a new one. */
+ if (uiPagesFromPool != 0)
+ {
+ uiPagesFromPool--;
+ ppsPageArray[puiAllocIndices[i]] = ppsTempPageArray[uiPagesFromPool];
+ }
+ else
+ {
+ if (psPageArrayData->bIsCMA)
+ {
+
+ /* As the DMA/CMA framework rounds-up request to the
+ next power-of-two, we request multiple uiMinOrder
+ pages to satisfy allocation request in order to
+ minimise wasting memory */
+ eError = _AllocOSPage_CMA(psPageArrayData,
+ ui32GfpFlags,
+ uiOrder,
+ uiOrder,
+ puiAllocIndices[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to alloc CMA pages"));
+ goto e_free_pages;
+ }
+ }
+ else
+ {
+ DisableOOMKiller();
+ ppsPageArray[puiAllocIndices[i]] = alloc_pages(ui32GfpFlags, uiOrder);
+ EnableOOMKiller();
+ }
+
+ if (ppsPageArray[puiAllocIndices[i]] != NULL)
+ {
+ /* Reusing the temp page array if it has no pool pages anymore */
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 idx;
+ struct page* psPageAddr;
+
+ psPageAddr = ppsPageArray[puiAllocIndices[i]];
+
+ for (idx = 0; idx < (1 << uiOrder); idx++)
+ {
+ ppsTempPageArray[uiTempPageArrayIndex + idx] = psPageAddr;
+ psPageAddr++;
+ }
+ uiTempPageArrayIndex += (1 << uiOrder);
+ }
+ else
+ {
+ ppsTempPageArray[uiTempPageArrayIndex] = ppsPageArray[puiAllocIndices[i]];
+ uiTempPageArrayIndex++;
+ }
+ }
+ else
+ {
+ /* Failed to alloc pages at required contiguity. Failed allocation */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: alloc_pages failed to honour request at %u of %u, flags = %x, order = %u",
+ __FUNCTION__,
+ i,
+ uiPagesToAlloc,
+ ui32GfpFlags,
+ uiOrder));
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_free_pages;
+ }
+ }
+ }
+
+ if (psPageArrayData->bZero && uiOrder == 0)
+ {
+ eError = _ZeroPageArray(uiTempPageArrayIndex,
+ ppsTempPageArray,
+ PAGE_KERNEL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to zero pages (sparse)"));
+ goto e_free_pages;
+ }
+ }
+
+ /* Do the cache management as required */
+ eError = _ApplyOSPagesAttribute(psPageArrayData->psDevNode,
+ ppsTempPageArray,
+ uiTempPageArrayIndex,
+ psPageArrayData->bZero,
+ psPageArrayData->ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set page attributes"));
+ goto e_free_pages;
+ }
+
+ /* Update metadata */
+ psPageArrayData->iNumOSPagesAllocated += uiNumOSPagesToAlloc;
+
+ /* Free temporary page array */
+ OSFreeMem(ppsTempPageArray);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ _AddMemAllocRecord_UmaPages(psPageArrayData,
+ ppsPageArray[puiAllocIndices[i]]);
+ }
+#else
+ _IncrMemAllocStat_UmaPages(uiNumOSPagesToAlloc * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+#endif
+
+ if (psPageArrayData->bPoisonOnAlloc)
+ {
+ for (i = 0; i < uiPagesToAlloc; i++)
+ {
+ _PoisonDevicePage(psPageArrayData->psDevNode,
+ ppsPageArray[puiAllocIndices[i]],
+ uiOrder,
+ psPageArrayData->ui32CPUCacheFlags,
+ PVRSRV_POISON_ON_ALLOC_VALUE);
+ }
+ }
+
+ return PVRSRV_OK;
+
+/* Error path */
+e_free_pages:
+ {
+ IMG_UINT32 ui32PageToFree;
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+
+ for (ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ uiOrder,
+ psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]],
+ psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]],
+ ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+ psPageArrayData->dmaphysarray[puiAllocIndices[ui32PageToFree]]= (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[puiAllocIndices[ui32PageToFree]] = NULL;
+ ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL;
+ }
+ }
+ else
+ {
+ /* Free the pages we got from the pool */
+ for(ui32PageToFree = 0; ui32PageToFree < uiPagesFromPool; ui32PageToFree++)
+ {
+ _FreeOSPage(0,
+ psPageArrayData->bUnsetMemoryType,
+ ppsTempPageArray[ui32PageToFree]);
+ }
+
+ /* Free the pages we just allocated from the OS */
+ for(ui32PageToFree = uiPagesFromPool; ui32PageToFree < i; ui32PageToFree++)
+ {
+ _FreeOSPage(0,
+ IMG_FALSE,
+ ppsPageArray[puiAllocIndices[ui32PageToFree]]);
+ }
+
+ /* Reset all page array entries that have been set so far*/
+ for(ui32PageToFree = 0; ui32PageToFree < i; ui32PageToFree++)
+ {
+ ppsPageArray[puiAllocIndices[ui32PageToFree]] = NULL;
+ }
+ }
+ }
+
+e_free_temp_array:
+ OSFreeMem(ppsTempPageArray);
+
+e_exit:
+ return eError;
+}
+
+/* Allocate pages for a given page array.
+ *
+ * The executed allocation path depends whether an array with allocation
+ * indices has been passed or not */
+static PVRSRV_ERROR
+_AllocOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiPagesToAlloc)
+{
+ PVRSRV_ERROR eError;
+ struct page **ppsPageArray;
+
+ /* Sanity checks */
+ PVR_ASSERT(NULL != psPageArrayData);
+ if (psPageArrayData->bIsCMA)
+ {
+ PVR_ASSERT(psPageArrayData->dmaphysarray != NULL);
+ PVR_ASSERT(psPageArrayData->dmavirtarray != NULL);
+ }
+ PVR_ASSERT(psPageArrayData->pagearray != NULL);
+ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+
+ ppsPageArray = psPageArrayData->pagearray;
+
+ /* Go the sparse alloc path if we have an array with alloc indices.*/
+ if (puiAllocIndices != NULL)
+ {
+ eError = _AllocOSPages_Sparse(psPageArrayData,
+ puiAllocIndices,
+ uiPagesToAlloc);
+ }
+ else
+ {
+ eError = _AllocOSPages_Fast(psPageArrayData);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e_exit;
+ }
+
+ _DumpPageArray(ppsPageArray,
+ psPageArrayData->uiTotalNumOSPages >>
+ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: allocated OS memory for PMR @0x%p", psPageArrayData));
+ return PVRSRV_OK;
+
+e_exit:
+ return eError;
+}
+
+/* Same as _FreeOSPage except free memory using DMA framework */
+static INLINE void
+_FreeOSPage_CMA(struct device *dev,
+ size_t alloc_size,
+ IMG_UINT32 uiOrder,
+ void *virt_addr,
+ dma_addr_t dev_addr,
+ struct page *psPage)
+{
+ if (DMA_IS_ALLOCPG_ADDR(dev_addr))
+ {
+#if defined(CONFIG_X86)
+ void *pvPageVAddr = page_address(psPage);
+ if (pvPageVAddr)
+ {
+ int ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to reset page attribute",
+ __FUNCTION__));
+ }
+ }
+#endif
+
+ if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+ {
+ psPage -= DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+ uiOrder += 1;
+ }
+
+ __free_pages(psPage, uiOrder);
+ }
+ else
+ {
+ if (DMA_IS_ADDR_ADJUSTED(dev_addr))
+ {
+ size_t align_adjust;
+
+ align_adjust = DMA_GET_ALIGN_ADJUSTMENT(dev_addr);
+ alloc_size = alloc_size << 1;
+
+ dev_addr = DMA_GET_ADDR(dev_addr);
+ dev_addr -= align_adjust << PAGE_SHIFT;
+ }
+
+ dma_free_coherent(dev, alloc_size, virt_addr, DMA_GET_ADDR(dev_addr));
+ }
+}
+
+/* Free a single page back to the OS.
+ * Make sure the cache type is set back to the default value.
+ *
+ * Note:
+ * We must _only_ check bUnsetMemoryType in the case where we need to free
+ * the page back to the OS since we may have to revert the cache properties
+ * of the page to the default as given by the OS when it was allocated. */
+static void
+_FreeOSPage(IMG_UINT32 uiOrder,
+ IMG_BOOL bUnsetMemoryType,
+ struct page *psPage)
+{
+
+#if defined(CONFIG_X86)
+ void *pvPageVAddr;
+ pvPageVAddr = page_address(psPage);
+
+ if (pvPageVAddr && bUnsetMemoryType == IMG_TRUE)
+ {
+ int ret;
+
+ ret = set_memory_wb((unsigned long)pvPageVAddr, 1);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attribute", __FUNCTION__));
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(bUnsetMemoryType);
+#endif
+ __free_pages(psPage, uiOrder);
+}
+
+/* Free the struct holding the metadata */
+static PVRSRV_ERROR
+_FreeOSPagesArray(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ PVR_DPF((PVR_DBG_MESSAGE, "physmem_osmem_linux.c: freed OS memory for PMR @0x%p", psPageArrayData));
+
+ /* Check if the page array actually still exists.
+ * It might be the case that has been moved to the page pool */
+ if (psPageArrayData->pagearray != NULL)
+ {
+ OSFreeMemNoStats(psPageArrayData->pagearray);
+ }
+
+ kmem_cache_free(g_psLinuxPageArray, psPageArrayData);
+
+ return PVRSRV_OK;
+}
+
+/* Free all or some pages from a sparse page array */
+static PVRSRV_ERROR
+_FreeOSPages_Sparse(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount)
+{
+ IMG_BOOL bSuccess;
+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+ IMG_UINT32 uiPageIndex, i, j, uiTempIdx = 0;
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+ IMG_UINT32 uiNumPages;
+
+ struct page **ppsTempPageArray;
+ IMG_UINT32 uiTempArraySize;
+
+ /* We really should have something to free before we call this */
+ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+ if (pai32FreeIndices == NULL)
+ {
+ uiNumPages = psPageArrayData->uiTotalNumOSPages >> uiOrder;
+ uiTempArraySize = psPageArrayData->iNumOSPagesAllocated;
+ }
+ else
+ {
+ uiNumPages = ui32FreePageCount;
+ uiTempArraySize = ui32FreePageCount << uiOrder;
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ for (i = 0; i < uiNumPages; i++)
+ {
+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[idx]);
+ }
+#else
+ _DecrMemAllocStat_UmaPages(uiTempArraySize * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+#endif
+
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ for (i = 0; i < uiNumPages; i++)
+ {
+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+ _PoisonDevicePage(psPageArrayData->psDevNode,
+ ppsPageArray[idx],
+ uiOrder,
+ psPageArrayData->ui32CPUCacheFlags,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ }
+ }
+
+ if (psPageArrayData->bIsCMA)
+ {
+ IMG_UINT32 uiDevNumPages = uiNumPages;
+ IMG_UINT32 uiDevPageSize = 1<<psPageArrayData->uiLog2AllocPageSize;
+
+ for (i = 0; i < uiDevNumPages; i++)
+ {
+ IMG_UINT32 idx = pai32FreeIndices ? pai32FreeIndices[i] : i;
+ if (NULL != ppsPageArray[idx])
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ uiOrder,
+ psPageArrayData->dmavirtarray[idx],
+ psPageArrayData->dmaphysarray[idx],
+ ppsPageArray[idx]);
+ psPageArrayData->dmaphysarray[idx] = (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[idx] = NULL;
+ ppsPageArray[idx] = NULL;
+ uiTempIdx++;
+ }
+ }
+ }
+ else
+ {
+
+ /* OSAllocMemNoStats required because this code may be run without the bridge lock held */
+ ppsTempPageArray = OSAllocMemNoStats(sizeof(struct page*) * uiTempArraySize);
+ if (ppsTempPageArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed free_pages metadata allocation", __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Put pages in a contiguous array so further processing is easier */
+ for (i = 0; i < uiNumPages; i++)
+ {
+ uiPageIndex = pai32FreeIndices ? pai32FreeIndices[i] : i;
+ if (NULL != ppsPageArray[uiPageIndex])
+ {
+ struct page *psPage = ppsPageArray[uiPageIndex];
+
+ for (j = 0; j < (1<<uiOrder); j++)
+ {
+ ppsTempPageArray[uiTempIdx] = psPage;
+ uiTempIdx++;
+ psPage++;
+ }
+
+ ppsPageArray[uiPageIndex] = NULL;
+ }
+ }
+
+ /* Try to move the temp page array to the pool */
+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+ ppsTempPageArray,
+ psPageArrayData->bUnpinned,
+ uiOrder,
+ uiTempIdx);
+ if (bSuccess)
+ {
+ goto exit_ok;
+ }
+
+ /* Free pages and reset page caching attributes on x86 */
+#if defined(CONFIG_X86)
+ if (uiTempIdx != 0 && psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+ {
+ int iError;
+ iError = set_pages_array_wb(ppsTempPageArray, uiTempIdx);
+
+ if (iError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __func__));
+ }
+ }
+#endif
+
+ /* Free the pages */
+ for (i = 0; i < uiTempIdx; i++)
+ {
+ __free_pages(ppsTempPageArray[i], uiOrder);
+ }
+
+ /* Free the temp page array here if it did not move to the pool */
+ OSFreeMemNoStats(ppsTempPageArray);
+ }
+
+exit_ok:
+ /* Update metadata */
+ psPageArrayData->iNumOSPagesAllocated -= (uiTempIdx << uiOrder);
+ PVR_ASSERT(0 <= psPageArrayData->iNumOSPagesAllocated);
+ return PVRSRV_OK;
+}
+
+/* Free all the pages in a page array */
+static PVRSRV_ERROR
+_FreeOSPages_Fast(PMR_OSPAGEARRAY_DATA *psPageArrayData)
+{
+ IMG_BOOL bSuccess;
+ IMG_UINT32 i;
+ IMG_UINT32 uiNumPages = psPageArrayData->uiTotalNumOSPages;
+ IMG_UINT32 uiOrder = psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+ IMG_UINT32 uiDevNumPages = uiNumPages >> uiOrder;
+ IMG_UINT32 uiDevPageSize = PAGE_SIZE << uiOrder;
+ struct page **ppsPageArray = psPageArrayData->pagearray;
+
+ /* We really should have something to free before we call this */
+ PVR_ASSERT(psPageArrayData->iNumOSPagesAllocated != 0);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ for (i = 0; i < uiDevNumPages; i++)
+ {
+ _RemoveMemAllocRecord_UmaPages(psPageArrayData, ppsPageArray[i]);
+ }
+#else
+ _DecrMemAllocStat_UmaPages(uiNumPages * PAGE_SIZE,
+ psPageArrayData->uiPid);
+#endif
+#endif
+
+ if (psPageArrayData->bPoisonOnFree)
+ {
+ for (i = 0; i < uiDevNumPages; i++)
+ {
+ _PoisonDevicePage(psPageArrayData->psDevNode,
+ ppsPageArray[i],
+ uiOrder,
+ psPageArrayData->ui32CPUCacheFlags,
+ PVRSRV_POISON_ON_FREE_VALUE);
+ }
+ }
+
+ /* Try to move the page array to the pool */
+ bSuccess = _PutPagesToPoolLocked(psPageArrayData->ui32CPUCacheFlags,
+ ppsPageArray,
+ psPageArrayData->bUnpinned,
+ uiOrder,
+ uiNumPages);
+ if (bSuccess)
+ {
+ psPageArrayData->pagearray = NULL;
+ goto exit_ok;
+ }
+
+ if (psPageArrayData->bIsCMA)
+ {
+ for (i = 0; i < uiDevNumPages; i++)
+ {
+ _FreeOSPage_CMA(psPageArrayData->psDevNode->psDevConfig->pvOSDevice,
+ uiDevPageSize,
+ uiOrder,
+ psPageArrayData->dmavirtarray[i],
+ psPageArrayData->dmaphysarray[i],
+ ppsPageArray[i]);
+ psPageArrayData->dmaphysarray[i] = (dma_addr_t)0;
+ psPageArrayData->dmavirtarray[i] = NULL;
+ ppsPageArray[i] = NULL;
+ }
+ }
+ else
+ {
+#if defined(CONFIG_X86)
+ if (psPageArrayData->bUnsetMemoryType == IMG_TRUE)
+ {
+ int ret;
+
+ ret = set_pages_array_wb(ppsPageArray, uiNumPages);
+ if (ret)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to reset page attributes", __FUNCTION__));
+ }
+ }
+#endif
+
+ for (i = 0; i < uiNumPages; i++)
+ {
+ _FreeOSPage(uiOrder, IMG_FALSE, ppsPageArray[i]);
+ ppsPageArray[i] = NULL;
+ }
+ }
+
+exit_ok:
+ /* Update metadata */
+ psPageArrayData->iNumOSPagesAllocated = 0;
+ return PVRSRV_OK;
+}
+
+/* Free pages from a page array.
+ * Takes care of mem stats and chooses correct free path depending on parameters. */
+static PVRSRV_ERROR
+_FreeOSPages(PMR_OSPAGEARRAY_DATA *psPageArrayData,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 ui32FreePageCount)
+{
+ PVRSRV_ERROR eError;
+
+ /* Go the sparse or non-sparse path */
+ if (psPageArrayData->iNumOSPagesAllocated != psPageArrayData->uiTotalNumOSPages
+ || pai32FreeIndices != NULL)
+ {
+ eError = _FreeOSPages_Sparse(psPageArrayData,
+ pai32FreeIndices,
+ ui32FreePageCount);
+ }
+ else
+ {
+ eError = _FreeOSPages_Fast(psPageArrayData);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_FreeOSPages_FreePages failed"));
+ }
+
+ _DumpPageArray(psPageArrayData->pagearray,
+ psPageArrayData->uiTotalNumOSPages >>
+ (psPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT) );
+
+ return eError;
+}
+
+/*
+ *
+ * Implementation of callback functions
+ *
+ */
+
+/* Destructor func is called after last reference disappears, but
+ * before PMR itself is freed. */
+static PVRSRV_ERROR
+PMRFinalizeOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+
+ /* We can't free pages until now. */
+ if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+ {
+ _PagePoolLock();
+ if (psOSPageArrayData->bUnpinned == IMG_TRUE)
+ {
+ _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+ }
+ _PagePoolUnlock();
+
+ eError = _FreeOSPages(psOSPageArrayData,
+ NULL,
+ 0);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ }
+
+ eError = _FreeOSPagesArray(psOSPageArrayData);
+ PVR_ASSERT (eError == PVRSRV_OK); /* can we do better? */
+ return PVRSRV_OK;
+}
+
+/* Callback function for locking the system physical page addresses.
+ * This function must be called before the lookup address func. */
+static PVRSRV_ERROR
+PMRLockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+ if (psOSPageArrayData->bOnDemand)
+ {
+ /* Allocate Memory for deferred allocation */
+ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ eError = PVRSRV_OK;
+ return eError;
+}
+
+static PVRSRV_ERROR
+PMRUnlockSysPhysAddressesOSMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ /* Just drops the refcount. */
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+
+ if (psOSPageArrayData->bOnDemand)
+ {
+ /* Free Memory for deferred allocation */
+ eError = _FreeOSPages(psOSPageArrayData,
+ NULL,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ PVR_ASSERT (eError == PVRSRV_OK);
+ return eError;
+}
+
+/* N.B. It is assumed that PMRLockSysPhysAddressesOSMem() is called _before_ this function! */
+static PVRSRV_ERROR
+PMRSysPhysAddrOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ const PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+ IMG_UINT32 uiPageSize = 1U << psOSPageArrayData->uiLog2AllocPageSize;
+ IMG_UINT32 uiInPageOffset;
+ IMG_UINT32 uiPageIndex;
+ IMG_UINT32 uiIdx;
+
+ if (psOSPageArrayData->uiLog2AllocPageSize < ui32Log2PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Requested physical addresses from PMR "
+ "for incompatible contiguity %u!",
+ __FUNCTION__,
+ ui32Log2PageSize));
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (uiIdx=0; uiIdx < ui32NumOfPages; uiIdx++)
+ {
+ if (pbValid[uiIdx])
+ {
+ uiPageIndex = puiOffset[uiIdx] >> psOSPageArrayData->uiLog2AllocPageSize;
+ uiInPageOffset = puiOffset[uiIdx] - ((IMG_DEVMEM_OFFSET_T)uiPageIndex << psOSPageArrayData->uiLog2AllocPageSize);
+
+ PVR_ASSERT(uiPageIndex < psOSPageArrayData->uiTotalNumOSPages);
+ PVR_ASSERT(uiInPageOffset < uiPageSize);
+
+ psDevPAddr[uiIdx].uiAddr = page_to_phys(psOSPageArrayData->pagearray[uiPageIndex]);
+ psDevPAddr[uiIdx].uiAddr += uiInPageOffset;
+
+#if !defined(PVR_LINUX_PHYSMEM_USE_HIGHMEM_ONLY)
+ /* this is just a precaution, normally this should be always
+ * available */
+ if (psOSPageArrayData->ui64DmaMask)
+ {
+ if (psDevPAddr[uiIdx].uiAddr > psOSPageArrayData->ui64DmaMask)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: physical address"
+ " (%" IMG_UINT64_FMTSPECX ") out of allowable range"
+ " [0; %" IMG_UINT64_FMTSPECX "]", __func__,
+ psDevPAddr[uiIdx].uiAddr,
+ psOSPageArrayData->ui64DmaMask));
+ BUG();
+ }
+ }
+#endif
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+typedef struct _PMR_OSPAGEARRAY_KERNMAP_DATA_ {
+ void *pvBase;
+ IMG_UINT32 ui32PageCount;
+} PMR_OSPAGEARRAY_KERNMAP_DATA;
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pvPriv;
+ void *pvAddress;
+ pgprot_t prot = PAGE_KERNEL;
+ IMG_UINT32 ui32PageOffset=0;
+ size_t uiMapOffset=0;
+ IMG_UINT32 ui32PageCount=0;
+ IMG_UINT32 uiLog2AllocPageSize = psOSPageArrayData->uiLog2AllocPageSize;
+ IMG_UINT32 uiOSPageShift = OSGetPageShift();
+ IMG_UINT32 uiMapPageCount = 0;
+ IMG_INT32 uiPageSizeDiff = 0;
+ struct page **pagearray;
+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData;
+
+ /* For cases device page size greater than the OS page size,
+ * multiple physically contiguous OS pages constitute one device page.
+ * However only the first page address of such an ensemble is stored
+ * as part of the mapping table. Hence when mapping the PMR in part /full,
+ * all pages that constitute the device page must also be mapped to kernel.
+ *
+ * For the case where device page size less than OS page size,
+ * treat it the same way as the page sizes are equal */
+ if (uiLog2AllocPageSize > uiOSPageShift)
+ {
+ uiPageSizeDiff = uiLog2AllocPageSize - uiOSPageShift;
+ }
+
+ /*
+ Zero offset and size as a special meaning which means map in the
+ whole of the PMR, this is due to fact that the places that call
+ this callback might not have access to be able to determine the
+ physical size
+ */
+ if ((uiOffset == 0) && (uiSize == 0))
+ {
+ ui32PageOffset = 0;
+ uiMapOffset = 0;
+ ui32PageCount = psOSPageArrayData->iNumOSPagesAllocated;
+ }
+ else
+ {
+ size_t uiEndoffset;
+
+ ui32PageOffset = uiOffset >> uiLog2AllocPageSize;
+ uiMapOffset = uiOffset - (ui32PageOffset << uiLog2AllocPageSize);
+ uiEndoffset = uiOffset + uiSize - 1;
+ /* Add one as we want the count, not the offset */
+ ui32PageCount = (uiEndoffset >> uiLog2AllocPageSize) + 1;
+ ui32PageCount -= ui32PageOffset;
+ }
+
+ /* The page count to be mapped might be different if the
+ * OS page size is lesser than the device page size */
+ uiMapPageCount = ui32PageCount << uiPageSizeDiff;
+
+ switch (PVRSRV_CPU_CACHE_MODE(psOSPageArrayData->ui32CPUCacheFlags))
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ prot = pgprot_noncached(prot);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ prot = pgprot_writecombine(prot);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ break;
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ if (uiPageSizeDiff)
+ {
+ /* Map all the individual OS page that is part of a single device page. */
+ IMG_UINT32 ui32Temp = 0;
+ struct page **psPage = &psOSPageArrayData->pagearray[ui32PageOffset];
+
+ /* Allocate enough memory for the page pointers for this mapping */
+ pagearray = OSAllocMem(uiMapPageCount * sizeof(pagearray[0]));
+
+ if (pagearray == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* construct array that holds the page pointers that constitute the requested
+ * mapping */
+ while(ui32Temp < ui32PageCount)
+ {
+ IMG_UINT32 ui32SubPage = 0, ui32SubPageCount = (1 << uiPageSizeDiff);
+
+ /* Fill in the page pointers for the sub pages that constitute a single
+ * device page */
+ for(;ui32SubPage < ui32SubPageCount; ui32SubPage++)
+ {
+ pagearray[ui32Temp + ui32SubPage] = psPage[ui32Temp]+ui32SubPage;
+ }
+ ui32Temp++;
+ }
+ }
+ else
+ {
+ pagearray = &psOSPageArrayData->pagearray[ui32PageOffset];
+ }
+
+ psData = OSAllocMem(sizeof(*psData));
+ if (psData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ pvAddress = vmap(pagearray, uiMapPageCount, VM_READ | VM_WRITE, prot);
+#else
+ pvAddress = vm_map_ram(pagearray, uiMapPageCount, -1, prot);
+#endif
+ if (pvAddress == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+
+ *ppvKernelAddressOut = pvAddress + uiMapOffset;
+ psData->pvBase = pvAddress;
+ psData->ui32PageCount = uiMapPageCount;
+ *phHandleOut = psData;
+
+ if (uiPageSizeDiff)
+ {
+ OSFreeMem(pagearray);
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+ e2:
+ OSFreeMem(psData);
+ e1:
+ if (uiPageSizeDiff)
+ {
+ OSFreeMem(pagearray);
+ }
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void PMRReleaseKernelMappingDataOSMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_OSPAGEARRAY_KERNMAP_DATA *psData = hHandle;
+ PVR_UNREFERENCED_PARAMETER(pvPriv);
+
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ vunmap(psData->pvBase);
+#else
+ vm_unmap_ram(psData->pvBase, psData->ui32PageCount);
+#endif
+ OSFreeMem(psData);
+}
+
+static
+PVRSRV_ERROR PMRUnpinOSMem(PMR_IMPL_PRIVDATA pPriv)
+{
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Lock down the pool and add the array to the unpin list */
+ _PagePoolLock();
+
+ /* Sanity check */
+ PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_FALSE);
+ PVR_ASSERT(psOSPageArrayData->bOnDemand == IMG_FALSE);
+
+ eError = _AddUnpinListEntryUnlocked(psOSPageArrayData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to add allocation to unpinned list (%d).",
+ __FUNCTION__,
+ eError));
+
+ goto e_exit;
+ }
+
+ psOSPageArrayData->bUnpinned = IMG_TRUE;
+
+e_exit:
+ _PagePoolUnlock();
+ return eError;
+}
+
+static
+PVRSRV_ERROR PMRPinOSMem(PMR_IMPL_PRIVDATA pPriv,
+ PMR_MAPPING_TABLE *psMappingTable)
+{
+ PVRSRV_ERROR eError;
+ PMR_OSPAGEARRAY_DATA *psOSPageArrayData = pPriv;
+ IMG_UINT32 *pui32MapTable = NULL;
+ IMG_UINT32 i,j=0, ui32Temp=0;
+
+ _PagePoolLock();
+
+ /* Sanity check */
+ PVR_ASSERT(psOSPageArrayData->bUnpinned == IMG_TRUE);
+
+ psOSPageArrayData->bUnpinned = IMG_FALSE;
+
+ /* If there are still pages in the array remove entries from the pool */
+ if (psOSPageArrayData->iNumOSPagesAllocated != 0)
+ {
+ _RemoveUnpinListEntryUnlocked(psOSPageArrayData);
+ _PagePoolUnlock();
+
+ eError = PVRSRV_OK;
+ goto e_exit_mapalloc_failure;
+ }
+ _PagePoolUnlock();
+
+ /* If pages were reclaimed we allocate new ones and
+ * return PVRSRV_ERROR_PMR_NEW_MEMORY */
+ if (psMappingTable->ui32NumVirtChunks == 1)
+ {
+ eError = _AllocOSPages(psOSPageArrayData, NULL, psOSPageArrayData->uiTotalNumOSPages);
+ }
+ else
+ {
+ pui32MapTable = (IMG_UINT32 *)OSAllocMem(sizeof(*pui32MapTable) * psMappingTable->ui32NumPhysChunks);
+ if (NULL == pui32MapTable)
+ {
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to Alloc Map Table.",
+ __FUNCTION__));
+ goto e_exit_mapalloc_failure;
+ }
+
+ for (i = 0,j=0; i < psMappingTable->ui32NumVirtChunks; i++)
+ {
+ ui32Temp = psMappingTable->aui32Translation[i];
+ if (TRANSLATION_INVALID != ui32Temp)
+ {
+ pui32MapTable[j++] = ui32Temp;
+ }
+ }
+ eError = _AllocOSPages(psOSPageArrayData, pui32MapTable, psMappingTable->ui32NumPhysChunks);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not able to get new pages for unpinned allocation.",
+ __FUNCTION__));
+
+ eError = PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES;
+ goto e_exit;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Allocating new pages for unpinned allocation. "
+ "Old content is lost!",
+ __FUNCTION__));
+
+ eError = PVRSRV_ERROR_PMR_NEW_MEMORY;
+
+e_exit:
+ OSFreeMem(pui32MapTable);
+e_exit_mapalloc_failure:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemOSMem
+@Description This function Changes the sparse mapping by allocating & freeing
+ of pages. It does also change the GPU and CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+PMRChangeSparseMemOSMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PMR_MAPPING_TABLE *psPMRMapTable = PMR_GetMappigTable(psPMR);
+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+ struct page **psPageArray = psPMRPageArrayData->pagearray;
+ void **psDMAVirtArray = psPMRPageArrayData->dmavirtarray;
+ dma_addr_t *psDMAPhysArray = psPMRPageArrayData->dmaphysarray;
+
+ struct page *psPage;
+ dma_addr_t psDMAPAddr;
+ void *pvDMAVAddr;
+
+ IMG_UINT32 ui32AdtnlAllocPages = 0; /*<! Number of pages to alloc from the OS */
+ IMG_UINT32 ui32AdtnlFreePages = 0; /*<! Number of pages to free back to the OS */
+ IMG_UINT32 ui32CommonRequestCount = 0; /*<! Number of pages to move position in the page array */
+ IMG_UINT32 ui32Loop = 0;
+ IMG_UINT32 ui32Index = 0;
+ IMG_UINT32 uiAllocpgidx;
+ IMG_UINT32 uiFreepgidx;
+ IMG_UINT32 uiOrder = psPMRPageArrayData->uiLog2AllocPageSize - PAGE_SHIFT;
+ IMG_BOOL bCMA = psPMRPageArrayData->bIsCMA;
+
+
+ /* Check SPARSE flags and calculate pages to allocate and free */
+ if (SPARSE_RESIZE_BOTH == (uiFlags & SPARSE_RESIZE_BOTH))
+ {
+ ui32CommonRequestCount = (ui32AllocPageCount > ui32FreePageCount) ?
+ ui32FreePageCount : ui32AllocPageCount;
+
+ PDUMP_PANIC(SPARSEMEM_SWAP, "Request to swap alloc & free pages not supported");
+ }
+
+ if (SPARSE_RESIZE_ALLOC == (uiFlags & SPARSE_RESIZE_ALLOC))
+ {
+ ui32AdtnlAllocPages = ui32AllocPageCount - ui32CommonRequestCount;
+ }
+ else
+ {
+ ui32AllocPageCount = 0;
+ }
+
+ if (SPARSE_RESIZE_FREE == (uiFlags & SPARSE_RESIZE_FREE))
+ {
+ ui32AdtnlFreePages = ui32FreePageCount - ui32CommonRequestCount;
+ }
+ else
+ {
+ ui32FreePageCount = 0;
+ }
+
+ if (0 == (ui32CommonRequestCount || ui32AdtnlAllocPages || ui32AdtnlFreePages))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Missing parameters for number of pages to alloc/free",
+ __FUNCTION__));
+ return eError;
+ }
+
+ /* The incoming request is classified into two operations independent of
+ * each other: alloc & free pages.
+ * These operations can be combined with two mapping operations as well
+ * which are GPU & CPU space mappings.
+ *
+ * From the alloc and free page requests, the net amount of pages to be
+ * allocated or freed is computed. Pages that were requested to be freed
+ * will be reused to fulfil alloc requests.
+ *
+ * The order of operations is:
+ * 1. Allocate new pages from the OS
+ * 2. Move the free pages from free request to alloc positions.
+ * 3. Free the rest of the pages not used for alloc
+ *
+ * Alloc parameters are validated at the time of allocation
+ * and any error will be handled then. */
+
+ /* Validate the free indices */
+ if (ui32FreePageCount)
+ {
+ if (NULL != pai32FreeIndices){
+
+ for (ui32Loop = 0; ui32Loop < ui32FreePageCount; ui32Loop++)
+ {
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+ if (uiFreepgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (NULL == psPageArray[uiFreepgidx])
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to free non-allocated page",
+ __FUNCTION__));
+ goto e0;
+ }
+ }
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Given non-zero free count but missing indices array",
+ __FUNCTION__));
+ return eError;
+ }
+ }
+
+ /* Validate the alloc indices */
+ for (ui32Loop = ui32AdtnlAllocPages; ui32Loop < ui32AllocPageCount; ui32Loop++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Loop];
+
+ if (uiAllocpgidx > (psPMRPageArrayData->uiTotalNumOSPages >> uiOrder))
+ {
+ eError = PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE;
+ goto e0;
+ }
+
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ if ((NULL != psPageArray[uiAllocpgidx]) ||
+ (TRANSLATION_INVALID != psPMRMapTable->aui32Translation[uiAllocpgidx]))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Trying to allocate already allocated page again",
+ __FUNCTION__));
+ goto e0;
+ }
+ }
+ else
+ {
+ if ((NULL == psPageArray[uiAllocpgidx]) ||
+ (TRANSLATION_INVALID == psPMRMapTable->aui32Translation[uiAllocpgidx]) )
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to remap memory due to missing page",
+ __FUNCTION__));
+ goto e0;
+ }
+ }
+ }
+
+ ui32Loop = 0;
+
+ /* Allocate new pages from the OS */
+ if (0 != ui32AdtnlAllocPages)
+ {
+ eError = _AllocOSPages(psPMRPageArrayData, pai32AllocIndices, ui32AdtnlAllocPages);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: New Addtl Allocation of pages failed",
+ __FUNCTION__));
+ goto e0;
+ }
+
+ psPMRMapTable->ui32NumPhysChunks += ui32AdtnlAllocPages;
+ /*Mark the corresponding pages of translation table as valid */
+ for (ui32Loop = 0; ui32Loop < ui32AdtnlAllocPages; ui32Loop++)
+ {
+ psPMRMapTable->aui32Translation[pai32AllocIndices[ui32Loop]] = pai32AllocIndices[ui32Loop];
+ }
+ }
+
+
+ ui32Index = ui32Loop;
+
+ /* Move the corresponding free pages to alloc request */
+ for (ui32Loop = 0; ui32Loop < ui32CommonRequestCount; ui32Loop++, ui32Index++)
+ {
+ uiAllocpgidx = pai32AllocIndices[ui32Index];
+ uiFreepgidx = pai32FreeIndices[ui32Loop];
+
+ psPage = psPageArray[uiAllocpgidx];
+ psPageArray[uiAllocpgidx] = psPageArray[uiFreepgidx];
+
+ if (bCMA)
+ {
+ pvDMAVAddr = psDMAVirtArray[uiAllocpgidx];
+ psDMAPAddr = psDMAPhysArray[uiAllocpgidx];
+ psDMAVirtArray[uiAllocpgidx] = psDMAVirtArray[uiFreepgidx];
+ psDMAPhysArray[uiAllocpgidx] = psDMAPhysArray[uiFreepgidx];
+ }
+
+ /* Is remap mem used in real world scenario? Should it be turned to a
+ * debug feature? The condition check needs to be out of loop, will be
+ * done at later point though after some analysis */
+ if (SPARSE_REMAP_MEM != (uiFlags & SPARSE_REMAP_MEM))
+ {
+ psPMRMapTable->aui32Translation[uiFreepgidx] = TRANSLATION_INVALID;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ psPageArray[uiFreepgidx] = NULL;
+ if (bCMA)
+ {
+ psDMAVirtArray[uiFreepgidx] = NULL;
+ psDMAPhysArray[uiFreepgidx] = (dma_addr_t)0;
+ }
+ }
+ else
+ {
+ psPMRMapTable->aui32Translation[uiFreepgidx] = uiFreepgidx;
+ psPMRMapTable->aui32Translation[uiAllocpgidx] = uiAllocpgidx;
+ psPageArray[uiFreepgidx] = psPage;
+ if (bCMA)
+ {
+ psDMAVirtArray[uiFreepgidx] = pvDMAVAddr;
+ psDMAPhysArray[uiFreepgidx] = psDMAPAddr;
+ }
+ }
+ }
+
+ /* Free the additional free pages */
+ if (0 != ui32AdtnlFreePages)
+ {
+ eError = _FreeOSPages(psPMRPageArrayData,
+ &pai32FreeIndices[ui32Loop],
+ ui32AdtnlFreePages);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ psPMRMapTable->ui32NumPhysChunks -= ui32AdtnlFreePages;
+ while (ui32Loop < ui32FreePageCount)
+ {
+ psPMRMapTable->aui32Translation[pai32FreeIndices[ui32Loop]] = TRANSLATION_INVALID;
+ ui32Loop++;
+ }
+ }
+
+ eError = PVRSRV_OK;
+
+e0:
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PMRChangeSparseMemCPUMapOSMem
+@Description This function Changes CPU maps accordingly
+@Return PVRSRV_ERROR failure code
+*/ /**************************************************************************/
+static
+PVRSRV_ERROR PMRChangeSparseMemCPUMapOSMem(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ struct page **psPageArray;
+ PMR_OSPAGEARRAY_DATA *psPMRPageArrayData = (PMR_OSPAGEARRAY_DATA *)pPriv;
+ IMG_CPU_PHYADDR sCPUPAddr;
+
+ sCPUPAddr.uiAddr = 0;
+ psPageArray = psPMRPageArrayData->pagearray;
+
+ return OSChangeSparseMemCPUAddrMap((void **)psPageArray,
+ sCpuVAddrBase,
+ sCPUPAddr,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ IMG_FALSE);
+}
+
+static PMR_IMPL_FUNCTAB _sPMROSPFuncTab = {
+ .pfnLockPhysAddresses = &PMRLockSysPhysAddressesOSMem,
+ .pfnUnlockPhysAddresses = &PMRUnlockSysPhysAddressesOSMem,
+ .pfnDevPhysAddr = &PMRSysPhysAddrOSMem,
+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataOSMem,
+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataOSMem,
+ .pfnReadBytes = NULL,
+ .pfnWriteBytes = NULL,
+ .pfnUnpinMem = &PMRUnpinOSMem,
+ .pfnPinMem = &PMRPinOSMem,
+ .pfnChangeSparseMem = &PMRChangeSparseMemOSMem,
+ .pfnChangeSparseMemCPUMap = &PMRChangeSparseMemCPUMapOSMem,
+ .pfnFinalize = &PMRFinalizeOSMem,
+};
+
+PVRSRV_ERROR
+PhysmemNewOSRamBackedPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiAllocIndices,
+ IMG_UINT32 uiLog2AllocPageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ IMG_PID uiPid,
+ PMR **ppsPMRPtr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ PMR *psPMR;
+ struct _PMR_OSPAGEARRAY_DATA_ *psPrivData;
+ PMR_FLAGS_T uiPMRFlags;
+ PHYS_HEAP *psPhysHeap;
+ IMG_UINT32 ui32CPUCacheFlags;
+ IMG_BOOL bZero;
+ IMG_BOOL bIsCMA;
+ IMG_BOOL bPoisonOnAlloc;
+ IMG_BOOL bPoisonOnFree;
+ IMG_BOOL bOnDemand;
+ IMG_BOOL bCpuLocal;
+ IMG_BOOL bFwLocal;
+
+ /*
+ * The host driver (but not guest) can still use this factory for firmware
+ * allocations
+ */
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) && PVRSRV_CHECK_FW_LOCAL(uiFlags))
+ {
+ PVR_ASSERT(0);
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto errorOnParam;
+ }
+
+ /* Select correct caching mode */
+ eError = DevmemCPUCacheMode(psDevNode, uiFlags, &ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnParam;
+ }
+
+ if (PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags))
+ {
+ ui32CPUCacheFlags |= PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN;
+ }
+
+ /*
+ * Use CMA framework if order is greater than OS page size; please note
+ * that OSMMapPMRGeneric() has the same expectation as well.
+ */
+ bIsCMA = uiLog2AllocPageSize > PAGE_SHIFT ? IMG_TRUE : IMG_FALSE;
+ bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bCpuLocal = PVRSRV_CHECK_CPU_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bFwLocal = PVRSRV_CHECK_FW_LOCAL(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bZero = PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnAlloc = PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) ? IMG_TRUE : IMG_FALSE;
+ bPoisonOnFree = PVRSRV_CHECK_POISON_ON_FREE(uiFlags) ? IMG_TRUE : IMG_FALSE;
+
+#if defined(PVR_LINUX_PHYSMEM_ZERO_ALL_PAGES)
+ /* Overwrite flags and always zero pages that could go back to UM */
+ bZero = IMG_TRUE;
+ bPoisonOnAlloc = IMG_FALSE;
+#endif
+
+ /* Physical allocation alignment is generally not supported except under
+ very restrictive conditions, also there is a maximum alignment value
+ which must not exceed the largest device page-size. If these are not
+ met then fail the aligned-requested allocation */
+ if (bIsCMA)
+ {
+ IMG_UINT32 uiAlign = 1 << uiLog2AllocPageSize;
+ if (uiAlign > uiSize || uiAlign > (1 << PVR_MAX_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid PA alignment: size 0x%llx, align 0x%x",
+ __FUNCTION__, uiSize, uiAlign));
+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+ goto errorOnParam;
+ }
+ PVR_ASSERT(uiLog2AllocPageSize > PVR_MIN_PHYSMEM_CONTIG_ALLOC_LOG2PGSZ);
+ }
+
+ /* Create Array structure that hold the physical pages */
+ eError = _AllocOSPageArray(psDevNode,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ uiLog2AllocPageSize,
+ bZero,
+ bIsCMA,
+ bPoisonOnAlloc,
+ bPoisonOnFree,
+ bOnDemand,
+ ui32CPUCacheFlags,
+ uiPid,
+ &psPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPageArray;
+ }
+
+ if (!bOnDemand)
+ {
+ /* Do we fill the whole page array or just parts (sparse)? */
+ if (ui32NumPhysChunks == ui32NumVirtChunks)
+ {
+ /* Allocate the physical pages */
+ eError = _AllocOSPages(psPrivData,
+ NULL,
+ psPrivData->uiTotalNumOSPages >>
+ (uiLog2AllocPageSize - PAGE_SHIFT) );
+ }
+ else
+ {
+ if (ui32NumPhysChunks != 0)
+ {
+ /* Calculate the number of pages we want to allocate */
+ IMG_UINT32 uiPagesToAlloc =
+ (IMG_UINT32) ((((ui32NumPhysChunks * uiChunkSize) - 1) >> uiLog2AllocPageSize) + 1);
+
+ /* Make sure calculation is correct */
+ PVR_ASSERT(((PMR_SIZE_T) uiPagesToAlloc << uiLog2AllocPageSize) ==
+ (ui32NumPhysChunks * uiChunkSize) );
+
+ /* Allocate the physical pages */
+ eError = _AllocOSPages(psPrivData, puiAllocIndices,
+ uiPagesToAlloc);
+ }
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnAllocPages;
+ }
+ }
+
+ /*
+ * In this instance, we simply pass flags straight through.
+ *
+ * Generically, uiFlags can include things that control the PMR factory, but
+ * we don't need any such thing (at the time of writing!), and our caller
+ * specifies all PMR flags so we don't need to meddle with what was given to
+ * us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /*
+ * Check no significant bits were lost in cast due to different bit widths
+ * for flags
+ */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ if (bOnDemand)
+ {
+ PDUMPCOMMENT("Deferred Allocation PMR (UMA)");
+ }
+
+ if (bFwLocal)
+ {
+ PDUMPCOMMENT("FW_LOCAL allocation requested");
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+ else if (bCpuLocal)
+ {
+ PDUMPCOMMENT("CPU_LOCAL allocation requested");
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL];
+ }
+ else
+ {
+ psPhysHeap = psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ }
+
+ eError = PMRCreatePMR(psDevNode,
+ psPhysHeap,
+ uiSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ puiAllocIndices,
+ uiLog2AllocPageSize,
+ uiPMRFlags,
+ pszAnnotation,
+ &_sPMROSPFuncTab,
+ psPrivData,
+ PMR_TYPE_OSMEM,
+ &psPMR,
+ PDUMP_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreate;
+ }
+
+ *ppsPMRPtr = psPMR;
+
+ return PVRSRV_OK;
+
+errorOnCreate:
+ if (!bOnDemand)
+ {
+ eError2 = _FreeOSPages(psPrivData, NULL, 0);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ }
+
+errorOnAllocPages:
+ eError2 = _FreeOSPagesArray(psPrivData);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+
+errorOnAllocPageArray:
+errorOnParam:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.h b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.h
new file mode 100644
index 00000000000000..3fac82dd8612bf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_osmem_linux.h
@@ -0,0 +1,49 @@
+/*************************************************************************/ /*!
+@File
+@Title Linux OS physmem implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PHYSMEM_OSMEM_LINUX_H__
+#define __PHYSMEM_OSMEM_LINUX_H__
+
+void LinuxInitPhysmem(void);
+void LinuxDeinitPhysmem(void);
+
+#endif /* __PHYSMEM_OSMEM_LINUX_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.c b/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.c
new file mode 100644
index 00000000000000..2b78703c7958cc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.c
@@ -0,0 +1,586 @@
+/*************************************************************************/ /*!
+@File
+@Title Implementation of PMR functions for Trusted Device secure memory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks for physical memory imported
+ from a trusted environment. The driver cannot acquire CPU
+ mappings for this secure memory.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "physmem_tdsecbuf.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+
+#if defined (SUPPORT_TRUSTED_DEVICE)
+
+#if !defined(NO_HARDWARE)
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PHYS_HEAP *psTDSecBufPhysHeap;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT64 ui64Size;
+ IMG_UINT32 ui32Log2PageSize;
+ IMG_UINT64 ui64SecBufHandle;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ IMG_UINT32 i;
+
+ if (psPrivData->ui32Log2PageSize != ui32Log2PageSize)
+ {
+ return PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY;
+ }
+
+ for (i = 0; i < ui32NumOfPages; i++)
+ {
+ psDevPAddr[i].uiAddr = psPrivData->sDevPAddr.uiAddr + puiOffset[i];
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psPrivData->psDevNode->psDevConfig;
+ PVRSRV_ERROR eError;
+
+ eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+ psPrivData->ui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRFinalizeTDSecBufMem: TDSecBufFree cannot free the resource!"));
+ }
+ return eError;
+ }
+
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+ .pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+ .pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+ RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+ PMR_TDSECBUF_DATA *psPrivData = NULL;
+ PMR *psPMR = NULL;
+ IMG_UINT32 uiMappingTable = 0;
+ PMR_FLAGS_T uiPMRFlags;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+ /* In this instance, we simply pass flags straight through.
+ * Generically, uiFlags can include things that control the PMR
+ * factory, but we don't need any such thing (at the time of
+ * writing!), and our caller specifies all PMR flags so we don't
+ * need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /* Check no significant bits were lost in cast due to different bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ /* Many flags can be dropped as the driver cannot access this memory
+ * and it is assumed that the trusted zone is physically contiguous
+ */
+ uiPMRFlags &= ~(PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE |
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK);
+
+ psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocData;
+ }
+
+ /* Get required info for the TD Secure Buffer physical heap */
+ if (!psRGXData->bHasTDSecureBufPhysHeap)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnAcquireHeap;
+ }
+
+ eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+ &psPrivData->psTDSecBufPhysHeap);
+ if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+ psPrivData->ui64Size = uiSize;
+
+ if (psDevConfig->pfnTDSecureBufAlloc && psDevConfig->pfnTDSecureBufFree)
+ {
+ PVRSRV_TD_SECBUF_PARAMS sTDSecBufParams;
+
+ psPrivData->psDevNode = psDevNode;
+
+ /* Ask the Trusted Device to allocate secure memory */
+ sTDSecBufParams.uiSize = uiSize;
+ sTDSecBufParams.uiAlign = 1 << uiLog2Align;
+
+ /* These will be returned by pfnTDSecureBufAlloc on success */
+ sTDSecBufParams.psSecBufAddr = &psPrivData->sCpuPAddr;
+ sTDSecBufParams.pui64SecBufHandle = &psPrivData->ui64SecBufHandle;
+
+ eError = psDevConfig->pfnTDSecureBufAlloc(psDevConfig->hSysData,
+ &sTDSecBufParams);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc cannot allocate the resource!"));
+ }
+ goto errorOnAlloc;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufAlloc/Free not implemented!"));
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+ goto errorOnAlloc;
+ }
+
+ PhysHeapCpuPAddrToDevPAddr(psPrivData->psTDSecBufPhysHeap,
+ 1,
+ &psPrivData->sDevPAddr,
+ &psPrivData->sCpuPAddr);
+
+ /* Check that the secure buffer has the requested alignment */
+ if ((((1ULL << uiLog2Align) - 1) & psPrivData->sCpuPAddr.uiAddr) != 0)
+ /* Check that the secure buffer is aligned to a Rogue cache line */
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Trusted Device physical heap has the wrong alignment!"
+ "Physical address 0x%llx, alignment mask 0x%llx",
+ (unsigned long long) psPrivData->sCpuPAddr.uiAddr,
+ ((1ULL << uiLog2Align) - 1)));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnCheckAlign;
+ }
+
+ psPrivData->ui32Log2PageSize = uiLog2Align;
+
+ eError = PMRCreatePMR(psDevNode,
+ psPrivData->psTDSecBufPhysHeap,
+ psPrivData->ui64Size,
+ psPrivData->ui64Size,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable, /* pui32MappingTable (not used) */
+ uiLog2Align,
+ uiPMRFlags,
+ "TDSECUREBUF_PMR",
+ &_sPMRTDSecBufFuncTab,
+ psPrivData,
+ PMR_TYPE_TDSECBUF,
+ &psPMR,
+ PDUMP_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreatePMR;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ eError = RIWritePMREntryKM(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed to write PMR entry (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+#endif
+
+ *ppsPMRPtr = psPMR;
+ *pui64SecBufHandle = psPrivData->ui64SecBufHandle;
+
+ return PVRSRV_OK;
+
+
+errorOnCreatePMR:
+errorOnCheckAlign:
+ eError = psDevConfig->pfnTDSecureBufFree(psDevConfig->hSysData,
+ psPrivData->ui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree not implemented on the Trusted Device!"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR: TDSecBufFree cannot free the resource!"));
+ }
+ }
+errorOnAlloc:
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+errorOnAcquireHeap:
+ OSFreeMem(psPrivData);
+
+errorOnAllocData:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#else /* NO_HARDWARE */
+
+#include "physmem_osmem.h"
+
+typedef struct _PMR_TDSECBUF_DATA_ {
+ PHYS_HEAP *psTDSecBufPhysHeap;
+ PMR *psOSMemPMR;
+ IMG_UINT32 ui32Log2PageSize;
+} PMR_TDSECBUF_DATA;
+
+
+/*
+ * Implementation of callback functions
+ */
+
+static PVRSRV_ERROR
+PMRLockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ return PMRLockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRUnlockPhysAddressesTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ return PMRUnlockSysPhysAddresses(psPrivData->psOSMemPMR);
+}
+
+static PVRSRV_ERROR
+PMRSysPhysAddrTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevPAddr)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ /* On the assumption that this PMR was created with
+ * NumPhysChunks == NumVirtChunks then
+ * puiOffset[0] == uiLogicalOffset
+ */
+
+ return PMR_DevPhysAddr(psPrivData->psOSMemPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ puiOffset[0],
+ psDevPAddr,
+ pbValid);
+}
+
+static PVRSRV_ERROR
+PMRAcquireKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+ size_t uiLengthOut;
+
+ PVR_UNREFERENCED_PARAMETER(ulFlags);
+
+ return PMRAcquireKernelMappingData(psPrivData->psOSMemPMR,
+ uiOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ &uiLengthOut,
+ phHandleOut);
+}
+
+static void
+PMRReleaseKernelMappingDataTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ PMRReleaseKernelMappingData(psPrivData->psOSMemPMR, hHandle);
+}
+
+static PVRSRV_ERROR PMRFinalizeTDSecBufMem(PMR_IMPL_PRIVDATA pvPriv)
+{
+ PMR_TDSECBUF_DATA *psPrivData = pvPriv;
+
+ PMRUnrefPMR(psPrivData->psOSMemPMR);
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+ OSFreeMem(psPrivData);
+
+ return PVRSRV_OK;
+}
+
+static PMR_IMPL_FUNCTAB _sPMRTDSecBufFuncTab = {
+ .pfnLockPhysAddresses = &PMRLockPhysAddressesTDSecBufMem,
+ .pfnUnlockPhysAddresses = &PMRUnlockPhysAddressesTDSecBufMem,
+ .pfnDevPhysAddr = &PMRSysPhysAddrTDSecBufMem,
+ .pfnAcquireKernelMappingData = &PMRAcquireKernelMappingDataTDSecBufMem,
+ .pfnReleaseKernelMappingData = &PMRReleaseKernelMappingDataTDSecBufMem,
+ .pfnFinalize = &PMRFinalizeTDSecBufMem,
+};
+
+
+/*
+ * Public functions
+ */
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDevNode->psDevConfig;
+ RGX_DATA *psRGXData = (RGX_DATA *)(psDevConfig->hDevData);
+ PMR_TDSECBUF_DATA *psPrivData = NULL;
+ PMR *psPMR = NULL;
+ PMR *psOSPMR = NULL;
+ IMG_UINT32 uiMappingTable = 0;
+ PMR_FLAGS_T uiPMRFlags;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* In this instance, we simply pass flags straight through.
+ * Generically, uiFlags can include things that control the PMR
+ * factory, but we don't need any such thing (at the time of
+ * writing!), and our caller specifies all PMR flags so we don't
+ * need to meddle with what was given to us.
+ */
+ uiPMRFlags = (PMR_FLAGS_T)(uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK);
+
+ /* Check no significant bits were lost in cast due to different bit widths for flags */
+ PVR_ASSERT(uiPMRFlags == (uiFlags & PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK));
+
+ psPrivData = OSAllocZMem(sizeof(PMR_TDSECBUF_DATA));
+ if (psPrivData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto errorOnAllocData;
+ }
+
+ /* Get required info for the TD Secure Buffer physical heap */
+ if (!psRGXData->bHasTDSecureBufPhysHeap)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Trusted Device physical heap not available!"));
+ eError = PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL;
+ goto errorOnAcquireHeap;
+ }
+
+ eError = PhysHeapAcquire(psRGXData->uiTDSecureBufPhysHeapID,
+ &psPrivData->psTDSecBufPhysHeap);
+ if (eError != PVRSRV_OK) goto errorOnAcquireHeap;
+
+ psPrivData->ui32Log2PageSize = uiLog2Align;
+
+ /* Note that this PMR is only used to copy the FW blob to memory and
+ * to dump this memory to pdump, it doesn't need to have the alignment
+ * requested by the caller
+ */
+ eError = PhysmemNewOSRamBackedPMR(psDevNode,
+ uiSize,
+ uiSize,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable,
+ psPrivData->ui32Log2PageSize,
+ uiFlags,
+ "TDSECUREBUF_OSMEM",
+ OSGetCurrentClientProcessIDKM(),
+ &psOSPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreateOSPMR;
+ }
+
+ /* This is the primary PMR dumped with correct memspace and alignment */
+ eError = PMRCreatePMR(psDevNode,
+ psPrivData->psTDSecBufPhysHeap,
+ uiSize,
+ uiSize,
+ 1, /* ui32NumPhysChunks */
+ 1, /* ui32NumVirtChunks */
+ &uiMappingTable, /* pui32MappingTable (not used) */
+ uiLog2Align,
+ uiPMRFlags,
+ "TDSECUREBUF_PMR",
+ &_sPMRTDSecBufFuncTab,
+ psPrivData,
+ PMR_TYPE_TDSECBUF,
+ &psPMR,
+ PDUMP_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto errorOnCreateTDPMR;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ eError = RIWritePMREntryKM(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed to write PMR entry (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+#endif
+
+ psPrivData->psOSMemPMR = psOSPMR;
+ *ppsPMRPtr = psPMR;
+ *pui64SecBufHandle = 0x0ULL;
+
+ return PVRSRV_OK;
+
+errorOnCreateTDPMR:
+ PMRUnrefPMR(psOSPMR);
+
+errorOnCreateOSPMR:
+ PhysHeapRelease(psPrivData->psTDSecBufPhysHeap);
+
+errorOnAcquireHeap:
+ OSFreeMem(psPrivData);
+
+errorOnAllocData:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+#endif /* NO_HARDWARE */
+
+#else /* SUPPORT_TRUSTED_DEVICE */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Align);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRPtr);
+ PVR_UNREFERENCED_PARAMETER(pui64SecBufHandle);
+
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+#endif
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ return PhysmemNewTDSecureBufPMR(psConnection,
+ psDevNode,
+ uiSize,
+ (PMR_LOG2ALIGN_T)ui32Log2Align,
+ uiFlags,
+ ppsPMRPtr,
+ pui64SecBufHandle);
+};
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.h b/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.h
new file mode 100644
index 00000000000000..6d13802d163e8f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/physmem_tdsecbuf.h
@@ -0,0 +1,84 @@
+/**************************************************************************/ /*!
+@File
+@Title Header for secure buffer PMR factory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ implementing the function callbacks importing secure buffer
+ allocations.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PHYSMEM_TDSECBUF_H_
+#define _PHYSMEM_TDSECBUF_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "pmr.h"
+
+/*
+ * PhysmemNewTDSecureBufPMR
+ *
+ * This function is used as part of the facility to provide secure buffer
+ * memory. A default implementation is provided but it can be replaced by
+ * the SoC implementor if necessary.
+ *
+ * Calling this function will create a PMR for a memory allocation made
+ * in "secure buffer memory". It will only be writable by a trusted
+ * entity and when the feature is enabled on the SoC the GPU will only
+ * be able to perform operations permitted by security rules.
+ */
+
+PVRSRV_ERROR PhysmemNewTDSecureBufPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle);
+
+PVRSRV_ERROR PhysmemImportSecBuf(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 ui32Log2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ PMR **ppsPMRPtr,
+ IMG_UINT64 *pui64SecBufHandle);
+
+#endif /* _PHYSMEM_TDSECBUF_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pmr.c b/drivers/gpu/drm/img-rogue/1.10/pmr.c
new file mode 100644
index 00000000000000..cf02d368c0d9f3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pmr.c
@@ -0,0 +1,3522 @@
+/*************************************************************************/ /*!
+@File
+@Title Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ the "PMR" abstraction. A PMR (Physical Memory Resource)
+ represents some unit of physical memory which is
+ allocated/freed/mapped/unmapped as an indivisible unit
+ (higher software levels provide an abstraction above that
+ to deal with dividing this down into smaller manageable units).
+ Importantly, this module knows nothing of virtual memory, or
+ of MMUs etc., with one excusable exception. We have the
+ concept of a "page size", which really means nothing in
+ physical memory, but represents a "contiguity quantum" such
+ that the higher level modules which map this memory are able
+ to verify that it matches the needs of the page size for the
+ virtual realm into which it is being mapped.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+
+#include "pdump.h"
+#include "devicemem_server_utils.h"
+
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pmr_impl.h"
+#include "pmr_os.h"
+#include "pvrsrv.h"
+
+#include "allocmem.h"
+#include "lock.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "secure_export.h"
+#include "ossecure_export.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+/* ourselves */
+#include "pmr.h"
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+/* A "context" for the physical memory block resource allocator.
+
+ Context is probably the wrong word.
+
+ There is almost certainly only one of these, ever, in the system.
+ But, let's keep the notion of a context anyway, "just-in-case".
+ */
+static struct _PMR_CTX_
+{
+ /* For debugging, and PDump, etc., let's issue a forever
+ incrementing serial number to each allocation. */
+ IMG_UINT64 uiNextSerialNum;
+
+ /* For security, we only allow a PMR to be mapped if the caller
+ knows its key. We can pseudo-randomly generate keys */
+ IMG_UINT64 uiNextKey;
+
+ /* For debugging only, I guess: Number of live PMRs */
+ IMG_UINT32 uiNumLivePMRs;
+
+ /* Lock for this structure */
+ POS_LOCK hLock;
+
+ /* In order to seed the uiNextKey, we enforce initialisation at
+ driver load time. Also, we can debug check at driver unload
+ that the PMR count is zero. */
+ IMG_BOOL bModuleInitialised;
+} _gsSingletonPMRContext = { 1, 0, 0, NULL, IMG_FALSE };
+
+
+/* A PMR. One per physical allocation. May be "shared".
+
+ "shared" is ambiguous. We need to be careful with terminology.
+ There are two ways in which a PMR may be "shared" and we need to be
+ sure that we are clear which we mean.
+
+ i) multiple small allocations living together inside one PMR;
+
+ ii) one single allocation filling a PMR but mapped into multiple
+ memory contexts.
+
+ This is more important further up the stack - at this level, all we
+ care is that the PMR is being referenced multiple times.
+ */
+struct _PMR_
+{
+ /* This object is strictly refcounted. References include:
+ - mapping
+ - live handles (to this object)
+ - live export handles
+ (thus it is normal for allocated and exported memory to have a refcount of 3)
+ The object is destroyed when and only when the refcount reaches 0
+ */
+
+ /* Device node on which this PMR was created and is valid */
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ /*
+ Physical address translation (device <> cpu) is done on a per device
+ basis which means we need the physical heap info
+ */
+ PHYS_HEAP *psPhysHeap;
+
+ ATOMIC_T iRefCount;
+
+ /* lock count - this is the number of times
+ PMRLockSysPhysAddresses() has been called, less the number of
+ PMRUnlockSysPhysAddresses() calls. This is arguably here for
+ debug reasons only, as the refcount is already incremented as a
+ matter of course. Really, this just allows us to trap protocol
+ errors: i.e. calling PMRSysPhysAddr(),
+ without a lock, or calling PMRUnlockSysPhysAddresses() too many
+ or too few times. */
+ ATOMIC_T iLockCount;
+
+ /* Lock for this structure */
+ POS_LOCK hLock;
+
+ /* Incrementing serial number to each allocation. */
+ IMG_UINT64 uiSerialNum;
+
+ /* For security, we only allow a PMR to be mapped if the caller
+ knows its key. We can pseudo-randomly generate keys */
+ PMR_PASSWORD_T uiKey;
+
+ /* Callbacks for per-flavour functions */
+ const PMR_IMPL_FUNCTAB *psFuncTab;
+
+ /* Data associated with the "subtype" */
+ PMR_IMPL_PRIVDATA pvFlavourData;
+
+ /* What kind of PMR do we have? */
+ PMR_IMPL_TYPE eFlavour;
+
+ /* And for pdump */
+ const IMG_CHAR *pszPDumpDefaultMemspaceName;
+
+ /* Allocation annotation */
+ IMG_CHAR szAnnotation[DEVMEM_ANNOTATION_MAX_LEN];
+
+#if defined(PDUMP)
+
+ IMG_HANDLE hPDumpAllocHandle;
+
+ /* Whether PDumping of this PMR must be persistent
+ * (i.e. it must be present in every future PDump stream as well)
+ */
+ IMG_BOOL bForcePersistent;
+
+ IMG_UINT32 uiNumPDumpBlocks;
+#endif
+
+ /* Logical size of allocation. "logical", because a PMR can
+ represent memory that will never physically exist. This is the
+ amount of virtual space that the PMR would consume when it's
+ mapped into a virtual allocation. */
+ PMR_SIZE_T uiLogicalSize;
+
+ /* Mapping table for the allocation.
+ PMR's can be sparse in which case not all the "logic" addresses
+ in it are valid. We need to know which addresses are and aren't
+ valid when mapping or reading the PMR.
+ The mapping table translates "logical" offsets into physical
+ offsets which is what we always pass to the PMR factory
+ (so it doesn't have to be concerned about sparseness issues) */
+ PMR_MAPPING_TABLE *psMappingTable;
+
+ /* Indicates whether this PMR has been allocated as sparse.
+ * The condition for this variable to be set at allocation time is:
+ * (numVirtChunks != numPhysChunks) || (numVirtChunks > 1)
+ */
+ IMG_BOOL bSparseAlloc;
+
+ /* Indicates whether this PMR has been unpinned.
+ * By default, all PMRs are pinned at creation.
+ */
+ IMG_BOOL bIsUnpinned;
+
+ /* Minimum Physical Contiguity Guarantee. Might be called "page
+ size", but that would be incorrect, as page size is something
+ meaningful only in virtual realm. This contiguity guarantee
+ provides an inequality that can be verified/asserted/whatever
+ to ensure that this PMR conforms to the page size requirement
+ of the place the PMR gets mapped. (May be used to select an
+ appropriate heap in variable page size systems)
+
+ The absolutely necessary condition is this:
+
+ device MMU page size <= actual physical contiguity.
+
+ We go one step further in order to be able to provide an early warning / early compatibility check and say this:
+
+ device MMU page size <= 2**(uiLog2ContiguityGuarantee) <= actual physical contiguity.
+
+ In this way, it is possible to make the page table reservation
+ in the device MMU without even knowing the granularity of the
+ physical memory (i.e. useful for being able to allocate virtual
+ before physical)
+ */
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee;
+
+ /* Flags. We store a copy of the "PMR flags" (usually a subset of
+ the flags given at allocation time) and return them to any
+ caller of PMR_Flags(). The intention of these flags is that
+ the ones stored here are used to represent permissions, such
+ that no one is able to map a PMR in a mode in which they are not
+ allowed, e.g. writeable for a read-only PMR, etc. */
+ PMR_FLAGS_T uiFlags;
+
+ /* Do we really need this? For now we'll keep it, until we know we don't. */
+ /* NB: this is not the "memory context" in client terms - this is
+ _purely_ the "PMR" context, of which there is almost certainly only
+ ever one per system as a whole, but we'll keep the concept
+ anyway, just-in-case. */
+ struct _PMR_CTX_ *psContext;
+
+#if defined(PVR_RI_DEBUG)
+ /*
+ * Stored handle to PMR RI entry
+ */
+ void *hRIHandle;
+#endif
+};
+
+/* do we need a struct for the export handle? I'll use one for now, but if nothing goes in it, we'll lose it */
+struct _PMR_EXPORT_
+{
+ struct _PMR_ *psPMR;
+};
+
+struct _PMR_PAGELIST_
+{
+ struct _PMR_ *psReferencePMR;
+};
+
+PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR)
+{
+ PPVRSRV_DEVICE_NODE psReturnedDeviceNode = NULL;
+
+ PVR_ASSERT(psExportPMR != NULL);
+ if (psExportPMR)
+ {
+ PVR_ASSERT(psExportPMR->psPMR != NULL);
+ if (psExportPMR->psPMR)
+ {
+ PVR_ASSERT(OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0);
+ if (OSAtomicRead(&psExportPMR->psPMR->iRefCount) > 0)
+ {
+ psReturnedDeviceNode = PMR_DeviceNode(psExportPMR->psPMR);
+ }
+ }
+ }
+
+ return psReturnedDeviceNode;
+}
+
+static PVRSRV_ERROR
+_PMRCreate(PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ PMR **ppsPMR)
+{
+ void *pvPMRLinAddr;
+ PMR *psPMR;
+ PMR_MAPPING_TABLE *psMappingTable;
+ struct _PMR_CTX_ *psContext;
+ IMG_UINT32 i,ui32Temp=0;
+ IMG_UINT32 ui32Remainder;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bSparse = IMG_FALSE;
+
+ psContext = &_gsSingletonPMRContext;
+
+ /* Do we have a sparse allocation? */
+ if ( (ui32NumVirtChunks != ui32NumPhysChunks) ||
+ (ui32NumVirtChunks > 1) )
+ {
+ bSparse = IMG_TRUE;
+ }
+
+ /* Extra checks required for sparse PMRs */
+ if (uiLogicalSize != uiChunkSize)
+ {
+ /* Check the logical size and chunk information agree with each other */
+ if (uiLogicalSize != (uiChunkSize * ui32NumVirtChunks))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad mapping size (uiLogicalSize = 0x%llx, uiChunkSize = 0x%llx, ui32NumVirtChunks = %d)",
+ __FUNCTION__, (unsigned long long)uiLogicalSize, (unsigned long long)uiChunkSize, ui32NumVirtChunks));
+ return PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE;
+ }
+
+ /* Check that the chunk size is a multiple of the contiguity */
+ OSDivide64(uiChunkSize, (1<< uiLog2ContiguityGuarantee), &ui32Remainder);
+ if (ui32Remainder)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Bad chunk size, must be a multiple of the contiguity "
+ "(uiChunkSize = 0x%llx, uiLog2ContiguityGuarantee = %u)",
+ __FUNCTION__,
+ (unsigned long long) uiChunkSize,
+ uiLog2ContiguityGuarantee));
+ return PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE;
+ }
+ }
+
+ pvPMRLinAddr = OSAllocMem(sizeof(*psPMR) + sizeof(*psMappingTable) + sizeof(IMG_UINT32) * ui32NumVirtChunks);
+
+ if (pvPMRLinAddr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psPMR = (PMR *) pvPMRLinAddr;
+ psMappingTable = (PMR_MAPPING_TABLE *) (((IMG_CHAR *) pvPMRLinAddr) + sizeof(*psPMR));
+
+ eError = OSLockCreate(&psPMR->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(psPMR);
+ return eError;
+ }
+
+ /* Setup the mapping table */
+ psMappingTable->uiChunkSize = uiChunkSize;
+ psMappingTable->ui32NumVirtChunks = ui32NumVirtChunks;
+ psMappingTable->ui32NumPhysChunks = ui32NumPhysChunks;
+ OSCachedMemSet(&psMappingTable->aui32Translation[0], 0xFF, sizeof(psMappingTable->aui32Translation[0])*
+ ui32NumVirtChunks);
+ for (i=0; i<ui32NumPhysChunks; i++)
+ {
+ ui32Temp = pui32MappingTable[i];
+ psMappingTable->aui32Translation[ui32Temp] = ui32Temp;
+ }
+
+ /* Setup the PMR */
+ OSAtomicWrite(&psPMR->iRefCount, 0);
+
+ /* If allocation is not made on demand, it will be backed now and
+ * backing will not be removed until the PMR is destroyed, therefore
+ * we can initialise the iLockCount to 1 rather than 0.
+ */
+ OSAtomicWrite(&psPMR->iLockCount, (PVRSRV_CHECK_ON_DEMAND(uiFlags) ? 0 : 1));
+
+ psPMR->psContext = psContext;
+ psPMR->uiLogicalSize = uiLogicalSize;
+ psPMR->uiLog2ContiguityGuarantee = uiLog2ContiguityGuarantee;
+ psPMR->uiFlags = uiFlags;
+ psPMR->psMappingTable = psMappingTable;
+ psPMR->bSparseAlloc = bSparse;
+ psPMR->bIsUnpinned = IMG_FALSE;
+ psPMR->szAnnotation[0] = '\0';
+
+#if defined(PVR_RI_DEBUG)
+ psPMR->hRIHandle = NULL;
+#endif
+
+ OSLockAcquire(psContext->hLock);
+ psPMR->uiKey = psContext->uiNextKey;
+ psPMR->uiSerialNum = psContext->uiNextSerialNum;
+ psContext->uiNextKey = (0x80200003 * psContext->uiNextKey)
+ ^ (0xf00f0081 * (uintptr_t)pvPMRLinAddr);
+ psContext->uiNextSerialNum ++;
+ *ppsPMR = psPMR;
+ PVR_DPF((PVR_DBG_MESSAGE, "pmr.c: created PMR @0x%p", psPMR));
+ /* Increment live PMR count */
+ psContext->uiNumLivePMRs ++;
+ OSLockRelease(psContext->hLock);
+
+ return PVRSRV_OK;
+}
+
+/* This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL PMRIsPMRLive(PMR *psPMR)
+{
+ return (OSAtomicRead(&psPMR->iRefCount) > 0);
+}
+
+static IMG_UINT32
+_Ref(PMR *psPMR)
+{
+ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) >= 0);
+ /* We need to ensure that this function is always executed under
+ * PMRLock. The only exception acceptable is the unloading of the driver.
+ */
+ return OSAtomicIncrement(&psPMR->iRefCount);
+}
+
+static IMG_UINT32
+_Unref(PMR *psPMR)
+{
+ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) > 0);
+ /* We need to ensure that this function is always executed under
+ * PMRLock. The only exception acceptable is the unloading of the driver.
+ */
+ return OSAtomicDecrement(&psPMR->iRefCount);
+}
+
+static void
+_UnrefAndMaybeDestroy(PMR *psPMR)
+{
+ PVRSRV_ERROR eError2;
+ struct _PMR_CTX_ *psCtx;
+ IMG_INT iRefCount;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ iRefCount = _Unref(psPMR);
+
+ if (iRefCount == 0)
+ {
+ if (psPMR->psFuncTab->pfnFinalize != NULL)
+ {
+ eError2 = psPMR->psFuncTab->pfnFinalize(psPMR->pvFlavourData);
+
+ /* PMR unref can be called asynchronously by the kernel or other
+ * third party modules (eg. display) which doesn't go through the
+ * usual services bridge. The same PMR can be referenced simultaneously
+ * in a different path that results in a race condition.
+ * Hence depending on the race condition, a factory may refuse to destroy
+ * the resource associated with this PMR if a reference on it was taken
+ * prior to unref. In that case the PMR factory function returns the error.
+ *
+ * When such an error is encountered, the factory needs to ensure the state
+ * associated with PMR is undisturbed. At this point we just bail out from
+ * freeing the PMR itself. The PMR handle will then be freed at a later point
+ * when the same PMR is unreferenced.
+ * */
+ if (PVRSRV_ERROR_PMR_STILL_REFERENCED == eError2)
+ {
+ return;
+ }
+ PVR_ASSERT (eError2 == PVRSRV_OK); /* can we do better? */
+ }
+#if defined(PDUMP)
+ PDumpPMRFreePMR(psPMR,
+ psPMR->uiLogicalSize,
+ (1 << psPMR->uiLog2ContiguityGuarantee),
+ psPMR->uiLog2ContiguityGuarantee,
+ psPMR->hPDumpAllocHandle);
+#endif
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+ /* This PMR is about to be destroyed, update its mmap stats record (if present)
+ * to avoid dangling pointer. Additionally, this is required because mmap stats
+ * are identified by PMRs and a new PMR down the line "might" get the same address
+ * as the one we're about to free and we'd like 2 different entries in mmaps
+ * stats for such cases */
+ MMapStatsRemovePMR(psPMR);
+#endif
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ /* If not backed on demand, iLockCount should be 1 otherwise it should be 0 */
+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Delete RI entry */
+ if (psPMR->hRIHandle)
+ {
+ eError = RIDeletePMREntryKM (psPMR->hRIHandle);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: RIDeletePMREntryKM failed: %s",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ /* continue destroying the PMR */
+ }
+ }
+ }
+#endif /* if defined(PVR_RI_DEBUG) */
+ psCtx = psPMR->psContext;
+
+ OSLockDestroy(psPMR->hLock);
+
+ OSFreeMem(psPMR);
+
+ /* Decrement live PMR count. Probably only of interest for debugging */
+ PVR_ASSERT(psCtx->uiNumLivePMRs > 0);
+
+ OSLockAcquire(psCtx->hLock);
+ psCtx->uiNumLivePMRs --;
+ OSLockRelease(psCtx->hLock);
+ }
+}
+
+static IMG_BOOL _PMRIsSparse(const PMR *psPMR)
+{
+ return psPMR->bSparseAlloc;
+}
+
+PVRSRV_ERROR
+PMRCreatePMR(PVRSRV_DEVICE_NODE *psDevNode,
+ PHYS_HEAP *psPhysHeap,
+ PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ const PMR_IMPL_FUNCTAB *psFuncTab,
+ PMR_IMPL_PRIVDATA pvPrivData,
+ PMR_IMPL_TYPE eType,
+ PMR **ppsPMRPtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PMR *psPMR = NULL;
+ PVRSRV_ERROR eError;
+
+ eError = _PMRCreate(uiLogicalSize,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2ContiguityGuarantee,
+ uiFlags,
+ &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ psPMR->psDevNode = psDevNode;
+ psPMR->psPhysHeap = psPhysHeap;
+ psPMR->psFuncTab = psFuncTab;
+ psPMR->pszPDumpDefaultMemspaceName = PhysHeapPDumpMemspaceName(psPhysHeap);
+ psPMR->pvFlavourData = pvPrivData;
+ psPMR->eFlavour = eType;
+ OSAtomicWrite(&psPMR->iRefCount, 1);
+
+ OSStringLCopy(psPMR->szAnnotation, pszAnnotation, DEVMEM_ANNOTATION_MAX_LEN);
+
+#if defined(PDUMP)
+ {
+ PMR_FLAGS_T uiFlags = psPMR->uiFlags;
+ IMG_BOOL bInitialise = IMG_FALSE;
+ IMG_UINT32 ui32InitValue = 0;
+
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags))
+ {
+ bInitialise = IMG_TRUE;
+ }
+ else if (PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags))
+ {
+ ui32InitValue = 0xDEADBEEF;
+ bInitialise = IMG_TRUE;
+ }
+
+ PDumpPMRMallocPMR(psPMR,
+ (uiChunkSize * ui32NumVirtChunks),
+ 1ULL<<uiLog2ContiguityGuarantee,
+ uiChunkSize,
+ ui32NumPhysChunks,
+ ui32NumVirtChunks,
+ pui32MappingTable,
+ uiLog2ContiguityGuarantee,
+ bInitialise,
+ ui32InitValue,
+ &psPMR->hPDumpAllocHandle,
+ ui32PDumpFlags);
+ }
+#endif
+
+ *ppsPMRPtr = psPMR;
+
+ return PVRSRV_OK;
+
+ /*
+ * error exit paths follow
+ */
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMRLockSysPhysAddressesNested(PMR *psPMR,
+ IMG_UINT32 ui32NestingLevel)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ /* Note: taking this lock is not required to protect the PMR reference count,
+ * because the PMR reference count is atomic.
+ * Rather, taking the lock here guarantees that no caller will exit this function
+ * without the underlying physical addresses being locked.
+ */
+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+ /* We also count the locks as references, so that the PMR is not
+ freed while someone is using a physical address. */
+ /* "lock" here simply means incrementing the refcount. It means
+ the refcount is multipurpose, but that's okay. We only have to
+ promise that physical addresses are valid after this point, and
+ remain valid until the corresponding
+ PMRUnlockSysPhysAddressesOSMem() */
+ _Ref(psPMR);
+
+ /* Also count locks separately from other types of references, to
+ allow for debug assertions */
+
+ /* Only call callback if lockcount transitions from 0 to 1 (or 1 to 2 if not backed on demand) */
+ if (OSAtomicIncrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 1 : 2))
+ {
+ if (psPMR->psFuncTab->pfnLockPhysAddresses != NULL)
+ {
+ /* must always have lock and unlock in pairs! */
+ PVR_ASSERT(psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL);
+
+ eError = psPMR->psFuncTab->pfnLockPhysAddresses(psPMR->pvFlavourData);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+ }
+ OSLockRelease(psPMR->hLock);
+
+ return PVRSRV_OK;
+
+ e1:
+ OSAtomicDecrement(&psPMR->iLockCount);
+ _Unref(psPMR);
+ PVR_ASSERT(OSAtomicRead(&psPMR->iRefCount) != 0);
+ OSLockRelease(psPMR->hLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR)
+{
+ return PMRLockSysPhysAddressesNested(psPMR, 0);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR)
+{
+ return PMRUnlockSysPhysAddressesNested(psPMR, 2);
+}
+
+PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ /* Acquiring the lock here, as well as during the Lock operation ensures
+ * the lock count hitting zero and the unlocking of the phys addresses is
+ * an atomic operation
+ */
+ OSLockAcquireNested(psPMR->hLock, ui32NestingLevel);
+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+
+ if (OSAtomicDecrement(&psPMR->iLockCount) == (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1))
+ {
+ if (psPMR->psFuncTab->pfnUnlockPhysAddresses != NULL)
+ {
+ PVR_ASSERT(psPMR->psFuncTab->pfnLockPhysAddresses != NULL);
+
+ eError = psPMR->psFuncTab->pfnUnlockPhysAddresses(psPMR->pvFlavourData);
+ /* must never fail */
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+
+ OSLockRelease(psPMR->hLock);
+
+ /* We also count the locks as references, so that the PMR is not
+ freed while someone is using a physical address. */
+ _UnrefAndMaybeDestroy(psPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ OSLockAcquire(psPMR->hLock);
+ /* Stop if we still have references on the PMR */
+ if ( ( bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 2))
+ || (!bDevMapped && (OSAtomicRead(&psPMR->iRefCount) > 1)) )
+ {
+ OSLockRelease(psPMR->hLock);
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: PMR is still referenced %u times. "
+ "That means this PMR is probably exported or used somewhere else. "
+ "Allowed are 2 references if it is mapped to device, otherwise 1.",
+ __func__,
+ OSAtomicRead(&psPMR->iRefCount)));
+
+ eError = PVRSRV_ERROR_PMR_STILL_REFERENCED;
+ goto e_exit;
+ }
+ OSLockRelease(psPMR->hLock);
+
+ if (psPMR->psFuncTab->pfnUnpinMem != NULL)
+ {
+ eError = psPMR->psFuncTab->pfnUnpinMem(psPMR->pvFlavourData);
+ if (eError == PVRSRV_OK)
+ {
+ psPMR->bIsUnpinned = IMG_TRUE;
+ }
+ }
+
+ e_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRPinPMR(PMR *psPMR)
+{
+ PVRSRV_ERROR eError= PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ if (psPMR->psFuncTab->pfnPinMem != NULL)
+ {
+ eError = psPMR->psFuncTab->pfnPinMem(psPMR->pvFlavourData,
+ psPMR->psMappingTable);
+ if (eError == PVRSRV_OK)
+ {
+ psPMR->bIsUnpinned = IMG_FALSE;
+ }
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+ PMR **ppsPMR)
+{
+ PMRRefPMR(psPMR);
+ *ppsPMR = psPMR;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR)
+{
+ PMRUnrefPMR(psPMR);
+ return PVRSRV_OK;
+}
+
+/*
+ Note:
+ We pass back the PMR as it was passed in as a different handle type
+ (DEVMEM_MEM_IMPORT) and it allows us to change the import structure
+ type if we should need to embed any meta data in it.
+ */
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ _Ref(psPMR);
+
+ /* Return the PMR */
+ *ppsPMR = psPMR;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiAlign = 1ULL << psPMR->uiLog2ContiguityGuarantee;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+ IMG_UINT64 *pui64UID)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ *pui64UID = psPMR->uiSerialNum;
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExportPtr,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword)
+{
+ IMG_UINT64 uiPassword;
+ PMR_EXPORT *psPMRExport;
+
+ uiPassword = psPMR->uiKey;
+
+ psPMRExport = OSAllocMem(sizeof(*psPMRExport));
+ if (psPMRExport == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psPMRExport->psPMR = psPMR;
+ _Ref(psPMR);
+
+ *ppsPMRExportPtr = psPMRExport;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiLog2Contig = psPMR->uiLog2ContiguityGuarantee;
+ *puiPassword = uiPassword;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+ PVR_ASSERT(psPMRExport != NULL);
+ PVR_ASSERT(psPMRExport->psPMR != NULL);
+ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+ _UnrefAndMaybeDestroy(psPMRExport->psPMR);
+
+ OSFreeMem(psPMRExport);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR)
+{
+ PMR *psPMR;
+
+ PVR_ASSERT(OSAtomicRead(&psPMRExport->psPMR->iRefCount) > 0);
+
+ psPMR = psPMRExport->psPMR;
+
+
+ if (psPMR->uiKey != uiPassword)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PMRImport: Import failed, password specified does not match the export\n"));
+ return PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR;
+ }
+
+ if (psPMR->uiLogicalSize != uiSize || psPMR->uiLog2ContiguityGuarantee != uiLog2Contig)
+ {
+ return PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES;
+ }
+
+ _Ref(psPMR);
+
+ *ppsPMR = psPMR;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+
+ return PVRSRV_OK;
+}
+
+#else /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExportPtr,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(ppsPMRExportPtr);
+ PVR_UNREFERENCED_PARAMETER(puiSize);
+ PVR_UNREFERENCED_PARAMETER(puiLog2Contig);
+ PVR_UNREFERENCED_PARAMETER(puiPassword);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMRExport);
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMRExport);
+ PVR_UNREFERENCED_PARAMETER(uiPassword);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contig);
+ PVR_UNREFERENCED_PARAMETER(ppsPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ return PVRSRV_OK;
+}
+#endif /* if defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR PMRSecureUnexportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReleaseSecurePMR(void *psExport)
+{
+ return PMRSecureUnexportPMR(psExport);
+}
+
+PVRSRV_ERROR PMRSecureExportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ PMR *psPMR,
+ IMG_SECURE_TYPE *phSecure,
+ PMR **ppsPMR,
+ CONNECTION_DATA **ppsSecureConnection)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+ /* We are acquiring reference to PMR here because OSSecureExport
+ * releases bridge lock and PMR lock for a moment and we don't want PMR
+ * to be removed by other thread in the meantime. */
+ _Ref(psPMR);
+
+ eError = OSSecureExport("secure_pmr",
+ _ReleaseSecurePMR,
+ (void *) psPMR,
+ phSecure);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *ppsPMR = psPMR;
+
+ return PVRSRV_OK;
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ _UnrefAndMaybeDestroy(psPMR);
+ return eError;
+}
+
+PVRSRV_ERROR PMRSecureImportPMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign)
+{
+ PVRSRV_ERROR eError;
+ PMR *psPMR;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = OSSecureImport(hSecure, (void **) &psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (psPMR->psDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMR invalid for this device\n", __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ _Ref(psPMR);
+
+ /* Return the PMR */
+ *ppsPMR = psPMR;
+ *puiSize = psPMR->uiLogicalSize;
+ *puiAlign = 1 << psPMR->uiLog2ContiguityGuarantee;
+ return PVRSRV_OK;
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMRSecureUnimportPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+#endif
+
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+ void *hRIHandle)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ psPMR->hRIHandle = hRIHandle;
+ return PVRSRV_OK;
+}
+#endif
+
+static PVRSRV_ERROR
+_PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut,
+ IMG_BOOL bMapSparse)
+{
+ PVRSRV_ERROR eError;
+ void *pvKernelAddress;
+ IMG_HANDLE hPriv;
+
+ PVR_ASSERT(psPMR != NULL);
+
+ if (_PMRIsSparse(psPMR) && !bMapSparse)
+ {
+ /* Generally we don't support mapping of sparse allocations but if there
+ is a justified need we can do that by passing IMG_TRUE in bMapSparse.
+ Although the callback is supported by the PMR it will always map
+ the physical 1:1 as sparseness issues are handled here in the core */
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ /* Acquire/Release functions must be overridden in pairs */
+ if (psPMR->psFuncTab->pfnAcquireKernelMappingData == NULL)
+ {
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData == NULL);
+
+ /* If PMR implementation does not supply this pair of
+ functions, it means they do not permit the PMR to be mapped
+ into kernel memory at all */
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ goto e0;
+ }
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ uiLogicalOffset,
+ uiSize,
+ &pvKernelAddress,
+ &hPriv,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ *ppvKernelAddressOut = pvKernelAddress;
+ if (uiSize == 0)
+ {
+ /* Zero size means map the whole PMR in ...*/
+ *puiLengthOut = (size_t)psPMR->uiLogicalSize;
+ }
+ else if (uiSize > (1 << psPMR->uiLog2ContiguityGuarantee))
+ {
+ /* ... map in the requested pages ...*/
+ *puiLengthOut = uiSize;
+ }
+ else
+ {
+ /* ... otherwise we just map in one page */
+ *puiLengthOut = 1 << psPMR->uiLog2ContiguityGuarantee;
+ }
+ *phPrivOut = hPriv;
+
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut)
+{
+ return _PMRAcquireKernelMappingData(psPMR,
+ uiLogicalOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ puiLengthOut,
+ phPrivOut,
+ IMG_FALSE);
+}
+
+PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut)
+{
+ return _PMRAcquireKernelMappingData(psPMR,
+ uiLogicalOffset,
+ uiSize,
+ ppvKernelAddressOut,
+ puiLengthOut,
+ phPrivOut,
+ IMG_TRUE);
+}
+
+PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+ IMG_HANDLE hPriv)
+{
+ PVR_ASSERT (psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL);
+ PVR_ASSERT (psPMR->psFuncTab->pfnReleaseKernelMappingData != NULL);
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hPriv);
+
+ return PVRSRV_OK;
+}
+
+#if defined(INTEGRITY_OS)
+
+PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+ IMG_HANDLE *phMemObj,
+ void **pvClientAddr,
+ IMG_HANDLE *phPrivOut)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_HANDLE hPriv = *phPrivOut;
+
+ PVR_ASSERT (psPMR->psFuncTab->pfnMapMemoryObject != NULL);
+
+ eError = psPMR->psFuncTab->pfnMapMemoryObject(hPriv, phMemObj, pvClientAddr, phPrivOut);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+ IMG_HANDLE hPriv)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT (psPMR->psFuncTab->pfnUnmapMemoryObject != NULL);
+
+ eError = psPMR->psFuncTab->pfnUnmapMemoryObject(hPriv);
+
+ return eError;
+}
+
+#if defined(USING_HYPERVISOR)
+IMG_HANDLE PMRGetPmr(PMR *psPMR, size_t ulOffset)
+{
+ PVR_ASSERT(psPMR->psFuncTab->pfnGetPmr != NULL);
+ return psPMR->psFuncTab->pfnGetPmr(psPMR->pvFlavourData, ulOffset);
+}
+#endif
+#endif /* INTEGRITY_OS */
+
+/*
+ _PMRLogicalOffsetToPhysicalOffset
+
+ Translate between the "logical" offset which the upper levels
+ provide and the physical offset which is what the PMR
+ factories works on.
+
+ As well as returning the physical offset we return the number of
+ bytes remaining till the next chunk and if this chunk is valid.
+
+ For multi-page operations, upper layers communicate their
+ Log2PageSize else argument is redundant (set to zero).
+ */
+
+static void
+_PMRLogicalOffsetToPhysicalOffset(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset,
+ IMG_UINT32 *pui32BytesRemain,
+ IMG_BOOL *bValid)
+{
+ PMR_MAPPING_TABLE *psMappingTable = psPMR->psMappingTable;
+ IMG_DEVMEM_OFFSET_T uiPageSize = 1ULL << ui32Log2PageSize;
+ IMG_DEVMEM_OFFSET_T uiOffset = uiLogicalOffset;
+ IMG_UINT64 ui64ChunkIndex;
+ IMG_UINT32 ui32Remain;
+ IMG_UINT32 idx;
+
+ /* Must be translating at least a page */
+ PVR_ASSERT(ui32NumOfPages);
+
+ if (psMappingTable->ui32NumPhysChunks == psMappingTable->ui32NumVirtChunks)
+ {
+ /* Fast path the common case, as logical and physical offsets are
+ equal we assume the ui32NumOfPages span is also valid */
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiOffset);
+ puiPhysicalOffset[0] = uiOffset;
+ bValid[0] = IMG_TRUE;
+
+ if (ui32NumOfPages > 1)
+ {
+ /* initial offset may not be page aligned, round down */
+ uiOffset &= ~(uiPageSize-1);
+ for (idx=1; idx < ui32NumOfPages; idx++)
+ {
+ uiOffset += uiPageSize;
+ puiPhysicalOffset[idx] = uiOffset;
+ bValid[idx] = IMG_TRUE;
+ }
+ }
+ }
+ else
+ {
+ for (idx=0; idx < ui32NumOfPages; idx++)
+ {
+ ui64ChunkIndex = OSDivide64r64(
+ uiOffset,
+ TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize),
+ &ui32Remain);
+
+ if (psMappingTable->aui32Translation[ui64ChunkIndex] == TRANSLATION_INVALID)
+ {
+ bValid[idx] = IMG_FALSE;
+ }
+ else
+ {
+ bValid[idx] = IMG_TRUE;
+ }
+
+ if (idx == 0)
+ {
+ if (ui32Remain == 0)
+ {
+ /* Start of chunk so return the chunk size */
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize);
+ }
+ else
+ {
+ *pui32BytesRemain = TRUNCATE_64BITS_TO_32BITS(psMappingTable->uiChunkSize - ui32Remain);
+ }
+
+ puiPhysicalOffset[idx] = (psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize) + ui32Remain;
+
+ /* initial offset may not be page aligned, round down */
+ uiOffset &= ~(uiPageSize-1);
+ }
+ else
+ {
+ puiPhysicalOffset[idx] = psMappingTable->aui32Translation[ui64ChunkIndex] * psMappingTable->uiChunkSize + ui32Remain;
+ }
+ uiOffset += uiPageSize;
+ }
+ }
+}
+
+static PVRSRV_ERROR
+_PMR_ReadBytesPhysical(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError;
+
+ if (psPMR->psFuncTab->pfnReadBytes != NULL)
+ {
+ /* defer to callback if present */
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = psPMR->psFuncTab->pfnReadBytes(psPMR->pvFlavourData,
+ uiPhysicalOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes);
+ PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ }
+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+ {
+ /* "default" handler for reading bytes */
+
+ IMG_HANDLE hKernelMappingHandle;
+ IMG_UINT8 *pcKernelAddress;
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ (size_t) uiPhysicalOffset,
+ uiBufSz,
+ (void **)&pcKernelAddress,
+ &hKernelMappingHandle,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Use the conservative 'DeviceMemCopy' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemCopy(&pcBuffer[0], pcKernelAddress, uiBufSz);
+ *puiNumBytes = uiBufSz;
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hKernelMappingHandle);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMR_ReadBytes: can't read from this PMR"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ OSPanic();
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ size_t uiBytesCopied = 0;
+
+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+ {
+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+ }
+ PVR_ASSERT(uiBufSz > 0);
+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+ /*
+ PMR implementations can override this. If they don't, a
+ "default" handler uses kernel virtual mappings. If the kernel
+ can't provide a kernel virtual mapping, this function fails
+ */
+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+ psPMR->psFuncTab->pfnReadBytes != NULL);
+
+ while (uiBytesCopied != uiBufSz)
+ {
+ IMG_UINT32 ui32Remain;
+ size_t uiBytesToCopy;
+ size_t uiRead;
+ IMG_BOOL bValid;
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+ /*
+ Copy till either then end of the
+ chunk or end of the buffer
+ */
+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+ if (bValid)
+ {
+ /* Read the data from the PMR */
+ eError = _PMR_ReadBytesPhysical(psPMR,
+ uiPhysicalOffset,
+ &pcBuffer[uiBytesCopied],
+ uiBytesToCopy,
+ &uiRead);
+ if ((eError != PVRSRV_OK) || (uiRead != uiBytesToCopy))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read chunk (eError = %s, uiRead = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ uiRead,
+ uiBytesToCopy));
+ /* Bail out as soon as we hit an error */
+ break;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Invalid phys offset at logical offset (" IMG_DEVMEM_OFFSET_FMTSPEC ") logical size (" IMG_DEVMEM_OFFSET_FMTSPEC ")",
+ __FUNCTION__,
+ uiLogicalOffset,
+ psPMR->uiLogicalSize));
+ /* Fill invalid chunks with 0 */
+ OSCachedMemSet(&pcBuffer[uiBytesCopied], 0, uiBytesToCopy);
+ uiRead = uiBytesToCopy;
+ eError = PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR;
+ }
+ uiLogicalOffset += uiRead;
+ uiBytesCopied += uiRead;
+ }
+
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+static PVRSRV_ERROR
+_PMR_WriteBytesPhysical(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError;
+
+ if (psPMR->psFuncTab->pfnWriteBytes != NULL)
+ {
+ /* defer to callback if present */
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = psPMR->psFuncTab->pfnWriteBytes(psPMR->pvFlavourData,
+ uiPhysicalOffset,
+ pcBuffer,
+ uiBufSz,
+ puiNumBytes);
+ PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ }
+ else if (psPMR->psFuncTab->pfnAcquireKernelMappingData)
+ {
+ /* "default" handler for reading bytes */
+
+ IMG_HANDLE hKernelMappingHandle;
+ IMG_UINT8 *pcKernelAddress;
+
+ eError = psPMR->psFuncTab->pfnAcquireKernelMappingData(psPMR->pvFlavourData,
+ (size_t) uiPhysicalOffset,
+ uiBufSz,
+ (void **)&pcKernelAddress,
+ &hKernelMappingHandle,
+ psPMR->uiFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Use the conservative 'DeviceMemCopy' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemCopy(pcKernelAddress, &pcBuffer[0], uiBufSz);
+ *puiNumBytes = uiBufSz;
+
+ psPMR->psFuncTab->pfnReleaseKernelMappingData(psPMR->pvFlavourData,
+ hKernelMappingHandle);
+ }
+ else
+ {
+ /*
+ The write callback is optional as it's only required by the debug
+ tools
+ */
+ PVR_DPF((PVR_DBG_ERROR, "_PMR_WriteBytesPhysical: can't write to this PMR"));
+ eError = PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ OSPanic();
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ size_t uiBytesCopied = 0;
+
+ if (uiLogicalOffset + uiBufSz > psPMR->uiLogicalSize)
+ {
+ uiBufSz = TRUNCATE_64BITS_TO_32BITS(psPMR->uiLogicalSize - uiLogicalOffset);
+ }
+ PVR_ASSERT(uiBufSz > 0);
+ PVR_ASSERT(uiBufSz <= psPMR->uiLogicalSize);
+
+ /*
+ PMR implementations can override this. If they don't, a
+ "default" handler uses kernel virtual mappings. If the kernel
+ can't provide a kernel virtual mapping, this function fails
+ */
+ PVR_ASSERT(psPMR->psFuncTab->pfnAcquireKernelMappingData != NULL ||
+ psPMR->psFuncTab->pfnWriteBytes != NULL);
+
+ while (uiBytesCopied != uiBufSz)
+ {
+ IMG_UINT32 ui32Remain;
+ size_t uiBytesToCopy;
+ size_t uiWrite;
+ IMG_BOOL bValid;
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+
+ /*
+ Copy till either then end of the
+ chunk or end of the buffer
+ */
+ uiBytesToCopy = MIN(uiBufSz - uiBytesCopied, ui32Remain);
+
+ if (bValid)
+ {
+ /* Write the data to the PMR */
+ eError = _PMR_WriteBytesPhysical(psPMR,
+ uiPhysicalOffset,
+ &pcBuffer[uiBytesCopied],
+ uiBytesToCopy,
+ &uiWrite);
+ if ((eError != PVRSRV_OK) || (uiWrite != uiBytesToCopy))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read chunk (eError = %s, uiWrite = " IMG_SIZE_FMTSPEC " uiBytesToCopy = " IMG_SIZE_FMTSPEC ")",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError),
+ uiWrite,
+ uiBytesToCopy));
+ /* Bail out as soon as we hit an error */
+ break;
+ }
+ }
+ else
+ {
+ /* Ignore writes to invalid pages */
+ uiWrite = uiBytesToCopy;
+ }
+ uiLogicalOffset += uiWrite;
+ uiBytesCopied += uiWrite;
+ }
+
+ *puiNumBytes = uiBytesCopied;
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+ if (psPMR->psFuncTab->pfnMMap)
+ {
+ return psPMR->psFuncTab->pfnMMap(psPMR->pvFlavourData, psPMR, pOSMMapData);
+ }
+
+ return OSMMapPMRGeneric(psPMR, pOSMMapData);
+}
+
+void
+PMRRefPMR(PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ _Ref(psPMR);
+}
+
+PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR)
+{
+ _UnrefAndMaybeDestroy(psPMR);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR)
+{
+ PMRUnlockSysPhysAddresses(psPMR);
+
+ PMRUnrefPMR(psPMR);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_DEVICE_NODE *
+PMR_DeviceNode(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->psDevNode;
+}
+
+PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->uiFlags;
+}
+
+IMG_BOOL
+PMR_IsSparse(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return _PMRIsSparse(psPMR);
+}
+
+IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ return psPMR->bIsUnpinned;
+}
+
+PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiLogicalSize)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ *puiLogicalSize = psPMR->uiLogicalSize;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiPhysicalSize)
+{
+ PVR_ASSERT(psPMR != NULL);
+
+ /* iLockCount will be > 0 for any backed PMR (backed on demand or not) */
+ if ((OSAtomicRead(&psPMR->iLockCount) > 0) && !psPMR->bIsUnpinned)
+ {
+ if (psPMR->bSparseAlloc)
+ {
+ *puiPhysicalSize = psPMR->psMappingTable->uiChunkSize * psPMR->psMappingTable->ui32NumPhysChunks;
+ }
+ else
+ {
+ *puiPhysicalSize = psPMR->uiLogicalSize;
+ }
+ }
+ else
+ {
+ *puiPhysicalSize = 0;
+ }
+ return PVRSRV_OK;
+}
+
+PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR)
+{
+ return psPMR->psPhysHeap;
+}
+
+PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_BOOL *pbValid)
+{
+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_UINT32 aui32BytesRemain[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+ IMG_UINT32 *pui32BytesRemain = aui32BytesRemain;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_ASSERT(psPMR != NULL);
+ PVR_ASSERT(psPMR->uiLogicalSize >= uiLogicalOffset);
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+ if (puiPhysicalOffset == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ pui32BytesRemain = OSAllocMem(ui32NumOfPages * sizeof(IMG_UINT32));
+ if (pui32BytesRemain == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ uiLogicalOffset,
+ puiPhysicalOffset,
+ pui32BytesRemain,
+ pbValid);
+
+ e0:
+ if (puiPhysicalOffset != auiPhysicalOffset && puiPhysicalOffset != NULL)
+ {
+ OSFreeMem(puiPhysicalOffset);
+ }
+
+ if (pui32BytesRemain != aui32BytesRemain && pui32BytesRemain != NULL)
+ {
+ OSFreeMem(pui32BytesRemain);
+ }
+
+ return eError;
+}
+
+PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->psMappingTable;
+
+}
+
+IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->uiLog2ContiguityGuarantee;
+}
+
+const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->szAnnotation;
+}
+
+PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR)
+{
+ PVR_ASSERT(psPMR != NULL);
+ return psPMR->eFlavour;
+}
+
+/* must have called PMRLockSysPhysAddresses() before calling this! */
+PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEV_PHYADDR *psDevAddrPtr,
+ IMG_BOOL *pbValid)
+{
+ IMG_UINT32 ui32Remain;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_DEVMEM_OFFSET_T auiPhysicalOffset[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEVMEM_OFFSET_T *puiPhysicalOffset = auiPhysicalOffset;
+
+ PVR_ASSERT(psPMR != NULL);
+ PVR_ASSERT(ui32NumOfPages > 0);
+ PVR_ASSERT(psPMR->psFuncTab->pfnDevPhysAddr != NULL);
+
+#ifdef PVRSRV_NEED_PVR_ASSERT
+ PVR_ASSERT(OSAtomicRead(&psPMR->iLockCount) > (PVRSRV_CHECK_ON_DEMAND(psPMR->uiFlags) ? 0 : 1));
+#endif
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ puiPhysicalOffset = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEVMEM_OFFSET_T));
+ if (puiPhysicalOffset == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ uiLogicalOffset,
+ puiPhysicalOffset,
+ &ui32Remain,
+ pbValid);
+ if (*pbValid || _PMRIsSparse(psPMR))
+ {
+ /* Sparse PMR may not always have the first page valid */
+ eError = psPMR->psFuncTab->pfnDevPhysAddr(psPMR->pvFlavourData,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ puiPhysicalOffset,
+ pbValid,
+ psDevAddrPtr);
+#if defined(PVR_PMR_TRANSLATE_UMA_ADDRESSES)
+ /* Currently excluded from the default build because of performance concerns.
+ * We do not need this part in all systems because the GPU has the same address view of system RAM as the CPU.
+ * Alternatively this could be implemented as part of the PMR-factories directly */
+
+ if (PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_UMA ||
+ PhysHeapGetType(psPMR->psPhysHeap) == PHYS_HEAP_TYPE_DMA)
+ {
+ IMG_UINT32 i;
+ IMG_DEV_PHYADDR sDevPAddrCorrected;
+
+ /* Copy the translated addresses to the correct array */
+ for (i = 0; i < ui32NumOfPages; i++)
+ {
+ PhysHeapCpuPAddrToDevPAddr(psPMR->psPhysHeap,
+ 1,
+ &sDevPAddrCorrected,
+ (IMG_CPU_PHYADDR *) &psDevAddrPtr[i]);
+ psDevAddrPtr[i].uiAddr = sDevPAddrCorrected.uiAddr;
+ }
+
+ }
+#endif
+ }
+
+ if (puiPhysicalOffset != auiPhysicalOffset)
+ {
+ OSFreeMem(puiPhysicalOffset);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_CPU_PHYADDR *psCpuAddrPtr,
+ IMG_BOOL *pbValid)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *psDevPAddr = asDevPAddr;
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psDevPAddr = OSAllocMem(ui32NumOfPages * sizeof(IMG_DEV_PHYADDR));
+ if (psDevPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ }
+
+ eError = PMR_DevPhysAddr(psPMR, ui32Log2PageSize, ui32NumOfPages,
+ uiLogicalOffset, psDevPAddr, pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ PhysHeapDevPAddrToCpuPAddr(psPMR->psPhysHeap, ui32NumOfPages, psCpuAddrPtr, psDevPAddr);
+
+ if (ui32NumOfPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ OSFreeMem(psDevPAddr);
+ }
+
+ return PVRSRV_OK;
+ e1:
+ if (psDevPAddr != asDevPAddr)
+ {
+ OSFreeMem(psDevPAddr);
+ }
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiSparseFlags)
+{
+ PVRSRV_ERROR eError;
+
+ if (NULL == psPMR->psFuncTab->pfnChangeSparseMem)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This type of sparse PMR cannot be changed.",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psPMR->psFuncTab->pfnChangeSparseMem(psPMR->pvFlavourData,
+ psPMR,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ uiSparseFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PDUMP)
+ {
+ IMG_BOOL bInitialise = IMG_FALSE;
+ IMG_UINT32 ui32InitValue = 0;
+
+ if (PVRSRV_CHECK_ZERO_ON_ALLOC(PMR_Flags(psPMR)))
+ {
+ bInitialise = IMG_TRUE;
+ }
+ else if (PVRSRV_CHECK_POISON_ON_ALLOC(PMR_Flags(psPMR)))
+ {
+ ui32InitValue = 0xDEADBEEF;
+ bInitialise = IMG_TRUE;
+ }
+
+ PDumpPMRChangeSparsePMR(psPMR,
+ 1 << psPMR->uiLog2ContiguityGuarantee,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices,
+ bInitialise,
+ ui32InitValue,
+ &psPMR->hPDumpAllocHandle);
+ }
+
+#endif
+
+ e0:
+ return eError;
+}
+
+
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices)
+{
+ PVRSRV_ERROR eError;
+
+ if ((NULL == psPMR->psFuncTab) ||
+ (NULL == psPMR->psFuncTab->pfnChangeSparseMemCPUMap))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: This type of sparse PMR cannot be changed.",
+ __func__));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psPMR->psFuncTab->pfnChangeSparseMemCPUMap(psPMR->pvFlavourData,
+ psPMR,
+ sCpuVAddrBase,
+ ui32AllocPageCount,
+ pai32AllocIndices,
+ ui32FreePageCount,
+ pai32FreeIndices);
+
+ return eError;
+}
+
+
+
+#if defined(PDUMP)
+
+static PVRSRV_ERROR
+_PMR_PDumpSymbolicAddrPhysical(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *pszMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ if (DevmemCPUCacheCoherency(psPMR->psDevNode, psPMR->uiFlags) ||
+ DevmemDeviceCacheCoherency(psPMR->psDevNode, psPMR->uiFlags))
+ {
+ OSSNPrintf(pszMemspaceName,
+ ui32MemspaceNameLen,
+ PMR_MEMSPACE_CACHE_COHERENT_FMTSPEC,
+ psPMR->pszPDumpDefaultMemspaceName);
+ }
+ else
+ {
+ OSSNPrintf(pszMemspaceName, ui32MemspaceNameLen, PMR_MEMSPACE_FMTSPEC,
+ psPMR->pszPDumpDefaultMemspaceName);
+ }
+
+ OSSNPrintf(pszSymbolicAddr,
+ ui32SymbolicAddrLen,
+ PMR_SYMBOLICADDR_FMTSPEC,
+ PMR_DEFAULT_PREFIX,
+ psPMR->uiSerialNum,
+ uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR),
+ psPMR->szAnnotation);
+ PDumpMakeStringValid(pszSymbolicAddr, OSStringLength(pszSymbolicAddr));
+
+
+ *puiNewOffset = uiPhysicalOffset & ((1 << PMR_GetLog2Contiguity(psPMR))-1);
+ *puiNextSymName = (IMG_DEVMEM_OFFSET_T) (((uiPhysicalOffset >> PMR_GetLog2Contiguity(psPMR))+1)
+ << PMR_GetLog2Contiguity(psPMR));
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32MemspaceNameLen,
+ IMG_CHAR *pszMemspaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName
+)
+{
+ IMG_DEVMEM_OFFSET_T uiPhysicalOffset;
+ IMG_UINT32 ui32Remain;
+ IMG_BOOL bValid;
+
+ PVR_ASSERT(uiLogicalOffset < psPMR->uiLogicalSize);
+
+ _PMRLogicalOffsetToPhysicalOffset(psPMR,
+ 0,
+ 1,
+ uiLogicalOffset,
+ &uiPhysicalOffset,
+ &ui32Remain,
+ &bValid);
+
+ if (!bValid)
+ {
+ /* For sparse allocations, for a given logical address, there may not be a
+ * physical memory backing, the virtual range can still be valid.
+ */
+ uiPhysicalOffset = uiLogicalOffset;
+ }
+
+ return _PMR_PDumpSymbolicAddrPhysical(psPMR,
+ uiPhysicalOffset,
+ ui32MemspaceNameLen,
+ pszMemspaceName,
+ ui32SymbolicAddrLen,
+ pszSymbolicAddr,
+ puiNewOffset,
+ puiNextSymName);
+}
+
+/*!
+ * @brief Writes a WRW command to the script2 buffer, representing a
+ * dword write to a physical allocation. Size is always
+ * sizeof(IMG_UINT32).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui32Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+ PVR_ASSERT(uiLogicalOffset + sizeof(ui32Value) <= psPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+ <= uiPMRPageSize));
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the PMR */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW32(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ ui32Value,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW followed by a WRW command to the pdump script to perform
+ * an effective copy from memory to memory. Memory copied is of size
+ * sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+ PMR *psSrcPMR,
+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+ const IMG_CHAR *pszTmpVar,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+ <= uiSrcPMRPageSize));
+
+ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+ <= uiDstPMRPageSize));
+
+
+ eError = PMRLockSysPhysAddresses(psSrcPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the source PMR */
+ eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+ uiSrcLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Issue PDump read command */
+ eError = PDumpPMRRDW32MemToInternalVar(pszTmpVar,
+ aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+ eError = PMRLockSysPhysAddresses(psDstPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ /* Get the symbolic address of the destination PMR */
+ eError = PMR_PDumpSymbolicAddr(psDstPMR,
+ uiDstLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW32InternalVarToMem(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ pszTmpVar,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ eError = PMRUnlockSysPhysAddresses(psDstPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a WRW64 command to the script2 buffer, representing a
+ * dword write to a physical allocation. Size is always
+ * sizeof(IMG_UINT64).
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - offset
+ * @param ui64Value - value to write
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+
+ PVR_ASSERT(uiLogicalOffset + sizeof(ui64Value) <= psPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui64Value))
+ <= uiPMRPageSize));
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the PMR */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW64(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ ui64Value,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief Writes a RDW64 followed by a WRW64 command to the pdump script to
+ * perform an effective copy from memory to memory. Memory copied is of
+ * size sizeof (IMG_UINT32)
+ *
+ * @param psDstPMR - PMR object representing allocation of destination
+ * @param uiDstLogicalOffset - destination offset
+ * @param psSrcPMR - PMR object representing allocation of source
+ * @param uiSrcLogicalOffset - source offset
+ * @param pszTmpVar - pdump temporary variable used during the copy
+ * @param uiPDumpFlags - pdump flags
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+ PMR *psSrcPMR,
+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+ const IMG_CHAR *pszTmpVar,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpSymbolicOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ const IMG_UINT32 uiDstPMRPageSize = 1 << psDstPMR->uiLog2ContiguityGuarantee;
+ const IMG_UINT32 uiSrcPMRPageSize = 1 << psSrcPMR->uiLog2ContiguityGuarantee;
+
+ PVR_ASSERT(uiSrcLogicalOffset + sizeof(IMG_UINT32) <= psSrcPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiSrcLogicalOffset & (uiSrcPMRPageSize-1)) + sizeof(IMG_UINT32))
+ <= uiSrcPMRPageSize));
+
+ PVR_ASSERT(uiDstLogicalOffset + sizeof(IMG_UINT32) <= psDstPMR->uiLogicalSize);
+ /* Especially make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiDstLogicalOffset & (uiDstPMRPageSize-1)) + sizeof(IMG_UINT32))
+ <= uiDstPMRPageSize));
+
+
+ eError = PMRLockSysPhysAddresses(psSrcPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Get the symbolic address of the source PMR */
+ eError = PMR_PDumpSymbolicAddr(psSrcPMR,
+ uiSrcLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Issue PDump read command */
+ eError = PDumpPMRRDW64MemToInternalVar(pszTmpVar,
+ aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMRUnlockSysPhysAddresses(psSrcPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+
+ eError = PMRLockSysPhysAddresses(psDstPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ /* Get the symbolic address of the destination PMR */
+ eError = PMR_PDumpSymbolicAddr(psDstPMR,
+ uiDstLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpSymbolicOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ /* Write the WRW script command */
+ eError = PDumpPMRWRW64InternalVarToMem(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpSymbolicOffset,
+ pszTmpVar,
+ uiPDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+
+ eError = PMRUnlockSysPhysAddresses(psDstPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ return PVRSRV_OK;
+}
+
+/*!
+ * @brief PDumps the contents of the given allocation.
+ * If bZero is IMG_TRUE then the zero page in the parameter stream is used
+ * as the source of data, rather than the allocation's actual backing.
+ * @param psPMR - PMR object representing allocation
+ * @param uiLogicalOffset - Offset to write at
+ * @param uiSize - Number of bytes to write
+ * @param uiPDumpFlags - PDump flags
+ * @param bZero - Use the PDump zero page as the source
+ * @return PVRSRV_ERROR
+ */
+PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOutOffset;
+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+ const IMG_CHAR *pszParamStreamFileName;
+ PDUMP_FILEOFFSET_T uiParamStreamFileOffset;
+
+ /* required when !bZero */
+#define PMR_MAX_PDUMP_BUFSZ (1<<21)
+ IMG_CHAR aszParamStreamFilename[PDUMP_PARAM_MAX_FILE_NAME];
+ IMG_UINT8 *pcBuffer = NULL;
+ size_t uiBufSz;
+ IMG_BOOL bValid;
+ IMG_DEVMEM_SIZE_T uiSizeRemain = uiSize;
+
+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+ /* Get the correct PDump stream file name */
+ if (bZero)
+ {
+ PDumpCommentWithFlags(uiPDumpFlags,
+ "Zeroing allocation (%llu bytes)",
+ (unsigned long long) uiSize);
+
+ /* get the zero page information. it is constant for this function */
+ PDumpGetParameterZeroPageInfo(&uiParamStreamFileOffset,
+ &uiBufSz,
+ &pszParamStreamFileName);
+ }
+ else
+ {
+
+ uiBufSz = 1 << PMR_GetLog2Contiguity(psPMR);
+ PVR_ASSERT((1 << PMR_GetLog2Contiguity(psPMR)) <= PMR_MAX_PDUMP_BUFSZ);
+
+ pcBuffer = OSAllocMem(uiBufSz);
+
+ PVR_LOGR_IF_NOMEM(pcBuffer, "OSAllocMem");
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ pszParamStreamFileName = aszParamStreamFilename;
+ }
+
+ /* Loop over all touched symbolic addresses of the PMR and
+ * emit LDBs to load the contents. */
+ while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+ {
+ /* Get the correct symbolic name for the current offset */
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiCurrentOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOutOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ PVR_ASSERT((uiNextSymName - uiCurrentOffset) <= uiBufSz);
+
+ PMR_IsOffsetValid(psPMR,
+ 0,
+ 1,
+ uiCurrentOffset,
+ &bValid);
+
+ /* Either just LDB the zeros or read from the PMR and store that
+ * in the pdump stream */
+ if (bValid)
+ {
+ size_t uiNumBytes;
+
+ if (bZero)
+ {
+ uiNumBytes = MIN(uiSizeRemain, uiNextSymName - uiCurrentOffset);
+ }
+ else
+ {
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+ uiLogicalOffset + uiSize - uiCurrentOffset :
+ uiNextSymName - uiCurrentOffset);
+
+ eError = PMR_ReadBytes(psPMR,
+ uiCurrentOffset,
+ pcBuffer,
+ uiReadOffset,
+ &uiNumBytes);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpWriteBuffer(pcBuffer,
+ uiNumBytes,
+ uiPDumpFlags,
+ &aszParamStreamFilename[0],
+ sizeof(aszParamStreamFilename),
+ &uiParamStreamFileOffset);
+ if (eError == PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+ {
+ /* Write to parameter file prevented under the flags and
+ * current state of the driver so skip further writes.
+ */
+ eError = PVRSRV_OK;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PDUMP_ERROR(eError, "Failed to write PMR memory to parameter file");
+ }
+ }
+
+ /* Emit the LDB command to the current symbolic address*/
+ eError = PDumpPMRLDB(aszMemspaceName,
+ aszSymbolicName,
+ uiOutOffset,
+ uiNumBytes,
+ pszParamStreamFileName,
+ uiParamStreamFileOffset,
+ uiPDumpFlags);
+
+ uiSizeRemain = uiSizeRemain - uiNumBytes;
+ }
+ uiCurrentOffset = uiNextSymName;
+ }
+
+ if (!bZero)
+ {
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ OSFreeMem(pcBuffer);
+ }
+
+ return PVRSRV_OK;
+}
+
+
+
+PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOutOffset;
+ IMG_DEVMEM_OFFSET_T uiCurrentOffset = uiLogicalOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName = 0;
+ IMG_UINT32 uiCurrentFileOffset = uiFileOffset;
+
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+
+ PVR_ASSERT(uiLogicalOffset + uiSize <= psPMR->uiLogicalSize);
+
+ while (uiCurrentOffset < (uiLogicalOffset + uiSize))
+ {
+ IMG_DEVMEM_OFFSET_T uiReadOffset;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiCurrentOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOutOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ PVR_ASSERT(uiNextSymName <= psPMR->uiLogicalSize);
+
+ uiReadOffset = ((uiNextSymName > (uiLogicalOffset + uiSize)) ?
+ uiLogicalOffset + uiSize - uiCurrentOffset :
+ uiNextSymName - uiCurrentOffset);
+
+ eError = PDumpPMRSAB(aszMemspaceName,
+ aszSymbolicName,
+ uiOutOffset,
+ uiReadOffset,
+ pszFilename,
+ uiCurrentFileOffset);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ uiCurrentFileOffset += uiNextSymName - uiCurrentOffset;
+ uiCurrentOffset = uiNextSymName;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiPMRPageSize = 1 << psPMR->uiLog2ContiguityGuarantee;
+
+ /* Make sure to not cross a block boundary */
+ PVR_ASSERT(( ((uiLogicalOffset & (uiPMRPageSize-1)) + sizeof(ui32Value))
+ < uiPMRPageSize));
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiLogicalOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpOffset,
+ &uiNextSymName);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#define _MEMPOLL_DELAY (1000)
+#define _MEMPOLL_COUNT (2000000000 / _MEMPOLL_DELAY)
+
+ eError = PDumpPMRPOL(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpOffset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ _MEMPOLL_COUNT,
+ _MEMPOLL_DELAY,
+ uiPDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiReadOffset,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiPDumpOffset,
+ &uiNextSymName);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = PDumpPMRCBP(aszMemspaceName,
+ aszSymbolicName,
+ uiPDumpOffset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE *phPDumpAllocInfo = (IMG_HANDLE*) psPMR->hPDumpAllocHandle;
+
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 i, uiIndex;
+
+ /* Remove pages from the PMR */
+ for (i = 0; i < ui32FreePageCount; i++)
+ {
+ uiIndex = pai32FreeIndices[i];
+
+ eError = PDumpFree(phPDumpAllocInfo[uiIndex]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ phPDumpAllocInfo[uiIndex] = NULL;
+ }
+
+ /* Add new pages to the PMR */
+ for (i = 0; i < ui32AllocPageCount; i++)
+ {
+ uiIndex = pai32AllocIndices[i];
+
+ PVR_ASSERT(phPDumpAllocInfo[uiIndex] == NULL);
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiIndex * uiBlockSize,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpMalloc(aszMemspaceName,
+ aszSymbolicName,
+ uiBlockSize,
+ uiBlockSize,
+ bInitialise,
+ ui32InitValue,
+ &phPDumpAllocInfo[uiIndex],
+ PDUMP_NONE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+}
+
+void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+
+ /* (IMG_HANDLE*) <- (IMG_HANDLE) */
+ IMG_HANDLE *ahPDumpAllocHandleArray = (IMG_HANDLE*) hPDumpAllocationInfoHandle;
+
+ for (i = 0; i < psPMR->uiNumPDumpBlocks; i++)
+ {
+ if (ahPDumpAllocHandleArray[i] != NULL)
+ {
+ eError = PDumpFree(ahPDumpAllocHandleArray[i]);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ahPDumpAllocHandleArray[i] = NULL;
+ }
+ }
+
+ OSFreeMem(ahPDumpAllocHandleArray);
+}
+
+
+void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE *phPDumpAllocInfo;
+
+ IMG_CHAR aszMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+ IMG_UINT32 uiNumPhysBlocks;
+ IMG_UINT32 uiNumVirtBlocks;
+ IMG_UINT32 i, uiIndex;
+
+
+ if (PMR_IsSparse(psPMR))
+ {
+ uiNumPhysBlocks = (ui32ChunkSize * ui32NumPhysChunks) >> uiLog2Contiguity;
+ /* Make sure we did not cut off anything */
+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == (ui32ChunkSize * ui32NumPhysChunks));
+ }
+ else
+ {
+ uiNumPhysBlocks = uiSize >> uiLog2Contiguity;
+ /* Make sure we did not cut off anything */
+ PVR_ASSERT(uiNumPhysBlocks << uiLog2Contiguity == uiSize);
+ }
+
+ uiNumVirtBlocks = uiSize >> uiLog2Contiguity;
+ PVR_ASSERT(uiNumVirtBlocks << uiLog2Contiguity == uiSize);
+
+ psPMR->uiNumPDumpBlocks = uiNumVirtBlocks;
+
+ phPDumpAllocInfo = (IMG_HANDLE*) OSAllocZMem(uiNumVirtBlocks * sizeof(IMG_HANDLE));
+
+
+ for (i = 0; i < uiNumPhysBlocks; i++)
+ {
+ uiIndex = PMR_IsSparse(psPMR) ? puiMappingTable[i] : i;
+
+ eError = PMR_PDumpSymbolicAddr(psPMR,
+ uiIndex * uiBlockSize,
+ sizeof(aszMemspaceName),
+ &aszMemspaceName[0],
+ sizeof(aszSymbolicName),
+ &aszSymbolicName[0],
+ &uiOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpMalloc(aszMemspaceName,
+ aszSymbolicName,
+ uiBlockSize,
+ uiBlockSize,
+ bInitialise,
+ ui32InitValue,
+ &phPDumpAllocInfo[uiIndex],
+ ui32PDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* (IMG_HANDLE) <- (IMG_HANDLE*) */
+ *phPDumpAllocInfoOut = (IMG_HANDLE) phPDumpAllocInfo;
+
+}
+#endif /* PDUMP */
+
+
+void *PMRGetPrivateData(const PMR *psPMR,
+ const PMR_IMPL_FUNCTAB *psFuncTab)
+{
+ return (psFuncTab == psPMR->psFuncTab) ? psPMR->pvFlavourData : NULL;
+}
+
+#define PMR_PM_WORD_SIZE 4
+
+PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+ PMR *psPageListPMR,
+ IMG_DEVMEM_OFFSET_T uiTableOffset,
+ IMG_DEVMEM_SIZE_T uiTableLength,
+ /* Referenced PMR, and "page" granularity */
+ PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+ PMR_PAGELIST **ppsPageList)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiWordSize;
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ PMR_FLAGS_T uiFlags = psPageListPMR->uiFlags;
+ PMR_PAGELIST *psPageList;
+#if defined(PDUMP)
+ IMG_CHAR aszTableEntryMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszTableEntrySymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiTableEntryPDumpOffset;
+ IMG_CHAR aszPageMemspaceName[PHYSMEM_PDUMP_MEMSPACE_MAX_LENGTH];
+ IMG_CHAR aszPageSymbolicName[PHYSMEM_PDUMP_SYMNAME_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiPagePDumpOffset;
+ IMG_DEVMEM_OFFSET_T uiNextSymName;
+#endif
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 uiPageListPageSize = 1 << psPageListPMR->uiLog2ContiguityGuarantee;
+ IMG_UINT64 uiPageListPMRPage = 0;
+ IMG_UINT64 uiPrevPageListPMRPage = 0;
+ IMG_HANDLE hPrivData = NULL;
+ void *pvKernAddr = NULL;
+ IMG_DEV_PHYADDR asDevPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_DEV_PHYADDR *pasDevAddrPtr;
+ IMG_UINT32 *pui32DataPtr = NULL;
+ IMG_BOOL *pbPageIsValid;
+#endif
+
+ uiWordSize = PMR_PM_WORD_SIZE;
+
+ /* check we're being asked to write the same number of 4-byte units as there are pages */
+ uiNumPages = (IMG_UINT32)(psReferencePMR->uiLogicalSize >> uiLog2PageSize);
+
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psReferencePMR->uiLogicalSize)
+ {
+ /* Strictly speaking, it's possible to provoke this error in two ways:
+ (i) if it's not a whole multiple of the page size; or
+ (ii) if there are more than 4 billion pages.
+ The latter is unlikely. :) but the check is required in order to justify the cast.
+ */
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto e0;
+ }
+ uiWordSize = (IMG_UINT32)uiTableLength / uiNumPages;
+ if (uiNumPages * uiWordSize != uiTableLength)
+ {
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto e0;
+ }
+
+ /* Check we're not being asked to write off the end of the PMR */
+ if (uiTableOffset + uiTableLength > psPageListPMR->uiLogicalSize)
+ {
+ /* table memory insufficient to store all the entries */
+ /* table insufficient to store addresses of whole block */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /* the PMR into which we are writing must not be user CPU mappable: */
+ if (PVRSRV_CHECK_CPU_READABLE(uiFlags) || PVRSRV_CHECK_CPU_WRITEABLE(uiFlags))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "masked flags = 0x%08x", (uiFlags & (PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE))));
+ PVR_DPF((PVR_DBG_ERROR, "Page list PMR allows CPU mapping (0x%08x)", uiFlags));
+ eError = PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS;
+ goto e0;
+ }
+
+ if (_PMRIsSparse(psPageListPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PageList PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ if (_PMRIsSparse(psReferencePMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Reference PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psPageList = OSAllocMem(sizeof(PMR_PAGELIST));
+ if (psPageList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ psPageList->psReferencePMR = psReferencePMR;
+
+ /* Need to lock down the physical addresses of the reference PMR */
+ /* N.B. This also checks that the requested "contiguity" is achievable */
+ eError = PMRLockSysPhysAddresses(psReferencePMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if !defined(NO_HARDWARE)
+ if (uiNumPages > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pasDevAddrPtr = OSAllocMem(uiNumPages * sizeof(IMG_DEV_PHYADDR));
+ if (pasDevAddrPtr == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+
+ pbPageIsValid = OSAllocMem(uiNumPages * sizeof(IMG_BOOL));
+ if (pbPageIsValid == NULL)
+ {
+ /* Clean-up before exit */
+ OSFreeMem(pasDevAddrPtr);
+
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate PMR page state"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e2;
+ }
+ }
+ else
+ {
+ pasDevAddrPtr = asDevPAddr;
+ pbPageIsValid = abValid;
+ }
+
+
+ eError = PMR_DevPhysAddr(psReferencePMR, uiLog2PageSize, uiNumPages, 0,
+ pasDevAddrPtr, pbPageIsValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to map PMR pages into device physical addresses"));
+ goto e3;
+ }
+#endif
+
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ IMG_DEVMEM_OFFSET_T uiPMROffset = uiTableOffset + (uiWordSize * uiPageIndex);
+#if defined(PDUMP)
+ eError = PMR_PDumpSymbolicAddr(psPageListPMR,
+ uiPMROffset,
+ sizeof(aszTableEntryMemspaceName),
+ &aszTableEntryMemspaceName[0],
+ sizeof(aszTableEntrySymbolicName),
+ &aszTableEntrySymbolicName[0],
+ &uiTableEntryPDumpOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PMR_PDumpSymbolicAddr(psReferencePMR,
+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+ sizeof(aszPageMemspaceName),
+ &aszPageMemspaceName[0],
+ sizeof(aszPageSymbolicName),
+ &aszPageSymbolicName[0],
+ &uiPagePDumpOffset,
+ &uiNextSymName);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = PDumpWriteShiftedMaskedValue(/* destination */
+ aszTableEntryMemspaceName,
+ aszTableEntrySymbolicName,
+ uiTableEntryPDumpOffset,
+ /* source */
+ aszPageMemspaceName,
+ aszPageSymbolicName,
+ uiPagePDumpOffset,
+ /* shift right */
+ uiLog2PageSize,
+ /* shift left */
+ 0,
+ /* mask */
+ 0xffffffff,
+ /* word size */
+ uiWordSize,
+ /* flags */
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_ASSERT(eError == PVRSRV_OK);
+#else
+ PVR_UNREFERENCED_PARAMETER(uiPMROffset);
+#endif
+#if !defined(NO_HARDWARE)
+
+ /*
+ We check for sparse PMR's at function entry, but as we can,
+ check that every page is valid
+ */
+ PVR_ASSERT(pbPageIsValid[uiPageIndex]);
+ PVR_ASSERT(pasDevAddrPtr[uiPageIndex].uiAddr != 0);
+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+ uiPageListPMRPage = uiPMROffset >> psReferencePMR->uiLog2ContiguityGuarantee;
+
+ if ((pui32DataPtr == NULL) || (uiPageListPMRPage != uiPrevPageListPMRPage))
+ {
+ size_t uiMappingOffset = uiPMROffset & (~(uiPageListPageSize - 1));
+ size_t uiMappedSize;
+
+ /* If we already had a page list mapped, we need to unmap it... */
+ if (pui32DataPtr != NULL)
+ {
+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+ }
+
+ eError = PMRAcquireKernelMappingData(psPageListPMR,
+ uiMappingOffset,
+ uiPageListPageSize,
+ &pvKernAddr,
+ &uiMappedSize,
+ &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Error mapping page list PMR page (%" IMG_UINT64_FMTSPEC ") into kernel (%d)",
+ uiPageListPMRPage, eError));
+ goto e3;
+ }
+
+ uiPrevPageListPMRPage = uiPageListPMRPage;
+ PVR_ASSERT(uiMappedSize >= uiPageListPageSize);
+ PVR_ASSERT(pvKernAddr != NULL);
+
+ pui32DataPtr = (IMG_UINT32 *) (((IMG_CHAR *) pvKernAddr) + (uiPMROffset & (uiPageListPageSize - 1)));
+ }
+
+ PVR_ASSERT(((pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize) & 0xFFFFFFFF00000000ll) == 0);
+
+ /* Write the physical page index into the page list PMR */
+ *pui32DataPtr++ = TRUNCATE_64BITS_TO_32BITS(pasDevAddrPtr[uiPageIndex].uiAddr >> uiLog2PageSize);
+
+ /* Last page so unmap */
+ if (uiPageIndex == (uiNumPages - 1))
+ {
+ PMRReleaseKernelMappingData(psPageListPMR, hPrivData);
+ }
+#endif
+ }
+
+#if !defined(NO_HARDWARE)
+ if (pasDevAddrPtr != asDevPAddr)
+ {
+ OSFreeMem(pbPageIsValid);
+ OSFreeMem(pasDevAddrPtr);
+ }
+#endif
+ *ppsPageList = psPageList;
+ return PVRSRV_OK;
+
+ /*
+ error exit paths follow
+ */
+#if !defined(NO_HARDWARE)
+ e3:
+ if (pasDevAddrPtr != asDevPAddr)
+ {
+ OSFreeMem(pbPageIsValid);
+ OSFreeMem(pasDevAddrPtr);
+ }
+ e2:
+ PMRUnlockSysPhysAddresses(psReferencePMR);
+#endif
+ e1:
+ OSFreeMem(psPageList);
+ e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+PVRSRV_ERROR /* FIXME: should be void */
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList)
+{
+ PVRSRV_ERROR eError2;
+
+ eError2 = PMRUnlockSysPhysAddresses(psPageList->psReferencePMR);
+ PVR_ASSERT(eError2 == PVRSRV_OK);
+ OSFreeMem(psPageList);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ IMG_UINT32 ui32PageSize = 1 << uiLog2PageSize;
+ IMG_HANDLE hPrivData = NULL;
+ void *pvKernAddr = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ size_t uiMapedSize;
+
+ PVR_ASSERT(psPMR);
+
+ /* Calculate number of pages in this PMR */
+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+ /* Verify the logical Size is a multiple or the physical page size */
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is not a multiple of %u",ui32PageSize));
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto MultiPage_Error;
+ }
+
+ if (_PMRIsSparse(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Sparse_Error;
+ }
+
+ /* Scan through all pages of the PMR */
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ /* map the physical page (for a given PMR offset) into kernel space */
+ eError = PMRAcquireKernelMappingData(psPMR,
+ (size_t)uiPageIndex << uiLog2PageSize,
+ ui32PageSize,
+ &pvKernAddr,
+ &uiMapedSize,
+ &hPrivData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: AcquireKernelMapping failed with error %u", eError));
+ goto AcquireKernelMapping_Error;
+ }
+
+ /* ensure the mapped page size is the same as the physical page size */
+ if (uiMapedSize != ui32PageSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRZeroingPMR: Physical Page size = 0x%08x, Size of Mapping = 0x%016" IMG_UINT64_FMTSPECx,
+ ui32PageSize,
+ (IMG_UINT64)uiMapedSize));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto MappingSize_Error;
+ }
+
+ /* Use the conservative 'DeviceMemSet' here because we can't know
+ * if this PMR will be mapped cached.
+ */
+
+ OSDeviceMemSet(pvKernAddr, 0, ui32PageSize);
+
+ /* release mapping */
+ PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"PMRZeroingPMR: Zeroing PMR %p done (num pages %u, page size %u)",
+ psPMR,
+ uiNumPages,
+ ui32PageSize));
+
+ return PVRSRV_OK;
+
+
+ /* Error handling */
+
+ MappingSize_Error:
+ PMRReleaseKernelMappingData(psPMR, hPrivData);
+
+ AcquireKernelMapping_Error:
+ Sparse_Error:
+ MultiPage_Error:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize)
+{
+ IMG_DEV_PHYADDR sDevAddrPtr;
+ IMG_UINT32 uiNumPages;
+ IMG_UINT32 uiPageIndex;
+ IMG_BOOL bPageIsValid;
+ IMG_UINT32 ui32Col = 16;
+ IMG_UINT32 ui32SizePerCol = 11;
+ IMG_UINT32 ui32ByteCount = 0;
+ IMG_CHAR pszBuffer[16 /* ui32Col */ * 11 /* ui32SizePerCol */ + 1];
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Get number of pages */
+ uiNumPages = (IMG_UINT32)(psPMR->uiLogicalSize >> uiLog2PageSize);
+
+ /* Verify the logical Size is a multiple or the physical page size */
+ if ((PMR_SIZE_T)uiNumPages << uiLog2PageSize != psPMR->uiLogicalSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is not a multiple of %u", 1 << uiLog2PageSize));
+ eError = PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE;
+ goto MultiPage_Error;
+ }
+
+ if (_PMRIsSparse(psPMR))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR is sparse"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto Sparse_Error;
+ }
+
+ PVR_LOG((" PMR %p, Number of pages %u, Log2PageSize %d", psPMR, uiNumPages, uiLog2PageSize));
+
+ /* Print the address of the physical pages */
+ for (uiPageIndex = 0; uiPageIndex < uiNumPages; uiPageIndex++)
+ {
+ /* Get Device physical Address */
+ eError = PMR_DevPhysAddr(psPMR,
+ uiLog2PageSize,
+ 1,
+ (IMG_DEVMEM_OFFSET_T)uiPageIndex << uiLog2PageSize,
+ &sDevAddrPtr,
+ &bPageIsValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PMRPrintPageList: PMR %p failed to get DevPhysAddr with error %u",
+ psPMR,
+ eError));
+ goto DevPhysAddr_Error;
+ }
+
+ ui32ByteCount += OSSNPrintf(pszBuffer + ui32ByteCount, ui32SizePerCol + 1, "%08x ", (IMG_UINT32)(sDevAddrPtr.uiAddr >> uiLog2PageSize));
+ PVR_ASSERT(ui32ByteCount < ui32Col * ui32SizePerCol);
+
+ if (uiPageIndex % ui32Col == ui32Col -1)
+ {
+ PVR_LOG((" Phys Page: %s", pszBuffer));
+ ui32ByteCount = 0;
+ }
+ }
+ if (ui32ByteCount > 0)
+ {
+ PVR_LOG((" Phys Page: %s", pszBuffer));
+ }
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ DevPhysAddr_Error:
+ Sparse_Error:
+ MultiPage_Error:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ if (_gsSingletonPMRContext.bModuleInitialised)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context already initialized", __func__));
+ eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ goto out;
+ }
+
+ eError = OSLockCreate(&_gsSingletonPMRContext.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error: failed to create lock", __func__));
+ goto out;
+ }
+
+ _gsSingletonPMRContext.uiNextSerialNum = 1;
+
+ _gsSingletonPMRContext.uiNextKey = 0x8300f001 * (uintptr_t)&_gsSingletonPMRContext;
+
+ _gsSingletonPMRContext.bModuleInitialised = IMG_TRUE;
+
+ _gsSingletonPMRContext.uiNumLivePMRs = 0;
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+ eError = MMapStatsInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: MMap stats initialisation failed", __func__));
+ goto out;
+ }
+#endif
+
+ out:
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PMRDeInit(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ goto out;
+ }
+
+ if (!_gsSingletonPMRContext.bModuleInitialised)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error: Singleton PMR context is not initialized", __func__));
+ eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ goto out;
+ }
+
+#if defined (PVRSRV_ENABLE_LINUX_MMAP_STATS)
+ MMapStatsDeInit();
+#endif
+
+ if (_gsSingletonPMRContext.uiNumLivePMRs != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error: %d live PMRs remain",
+ __func__,
+ _gsSingletonPMRContext.uiNumLivePMRs));
+ PVR_DPF((PVR_DBG_ERROR, "%s: This is an unrecoverable error; a subsequent crash is inevitable",
+ __func__));
+ eError = PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR;
+ goto out;
+ }
+
+ OSLockDestroy(_gsSingletonPMRContext.hLock);
+
+ _gsSingletonPMRContext.bModuleInitialised = IMG_FALSE;
+
+ /*
+ FIXME:
+
+ should deinitialise the mutex here
+ */
+ out:
+ PVR_ASSERT(eError == PVRSRV_OK);
+ return eError;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pmr.h b/drivers/gpu/drm/img-rogue/1.10/pmr.h
new file mode 100644
index 00000000000000..ae5c77696341d9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pmr.h
@@ -0,0 +1,1105 @@
+/**************************************************************************/ /*!
+@File
+@Title Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This module is responsible for
+ the "PMR" abstraction. A PMR (Physical Memory Resource)
+ represents some unit of physical memory which is
+ allocated/freed/mapped/unmapped as an indivisible unit
+ (higher software levels provide an abstraction above that
+ to deal with dividing this down into smaller manageable units).
+ Importantly, this module knows nothing of virtual memory, or
+ of MMUs etc., with one excuseable exception. We have the
+ concept of a "page size", which really means nothing in
+ physical memory, but represents a "contiguity quantum" such
+ that the higher level modules which map this memory are able
+ to verify that it matches the needs of the page size for the
+ virtual realm into which it is being mapped.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_H_
+#define _SRVSRV_PMR_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include "devicemem_typedefs.h" /* Required for export DEVMEM_EXPORTCOOKIE */
+
+/* services/include */
+#include "pdump.h"
+
+/* services/server/include/ */
+#include "pmr_impl.h"
+#include "physheap.h"
+#include "opaque_types.h"
+
+#define PMR_MAX_TRANSLATION_STACK_ALLOC (32)
+
+typedef IMG_UINT64 PMR_BASE_T;
+typedef IMG_UINT64 PMR_SIZE_T;
+#define PMR_SIZE_FMTSPEC "0x%010"IMG_UINT64_FMTSPECX
+#define PMR_VALUE32_FMTSPEC "0x%08X"
+#define PMR_VALUE64_FMTSPEC "0x%016"IMG_UINT64_FMTSPECX
+typedef IMG_UINT32 PMR_LOG2ALIGN_T;
+typedef IMG_UINT64 PMR_PASSWORD_T;
+
+struct _PMR_MAPPING_TABLE_
+{
+ PMR_SIZE_T uiChunkSize; /*!< Size of a "chunk" */
+ IMG_UINT32 ui32NumPhysChunks; /*!< Number of physical chunks that are valid */
+ IMG_UINT32 ui32NumVirtChunks; /*!< Number of virtual chunks in the mapping */
+ /* Must be last */
+ IMG_UINT32 aui32Translation[1]; /*!< Translation mapping for "logical" to physical */
+};
+
+#define TRANSLATION_INVALID 0xFFFFFFFFUL
+
+typedef struct _PMR_EXPORT_ PMR_EXPORT;
+
+typedef struct _PMR_PAGELIST_ PMR_PAGELIST;
+
+//typedef struct _PVRSRV_DEVICE_NODE_ *PPVRSRV_DEVICE_NODE;
+
+/*
+ * PMRCreatePMR
+ *
+ * Not to be called directly, only via implementations of PMR
+ * factories, e.g. in physmem_osmem.c, deviceclass.c, etc.
+ *
+ * Creates a PMR object, with callbacks and private data as per the
+ * FuncTab/PrivData args.
+ *
+ * Note that at creation time the PMR must set in stone the "logical
+ * size" and the "contiguity guarantee"
+ *
+ * Flags are also set at this time. (T.B.D. flags also immutable for
+ * the life of the PMR?)
+ *
+ * Logical size is the amount of Virtual space this allocation would
+ * take up when mapped. Note that this does not have to be the same
+ * as the actual physical size of the memory. For example, consider
+ * the sparsely allocated non-power-of-2 texture case. In this
+ * instance, the "logical size" would be the virtual size of the
+ * rounded-up power-of-2 texture. That some pages of physical memory
+ * may not exist does not affect the logical size calculation.
+ *
+ * The PMR must also supply the "contiguity guarantee" which is the
+ * finest granularity of alignment and size of physical pages that the
+ * PMR will provide after LockSysPhysAddresses is called. Note that
+ * the calling code may choose to call PMRSysPhysAddr with a finer
+ * granularity than this, for example if it were to map into a device
+ * MMU with a smaller page size, and it's also OK for the PMR to
+ * supply physical memory in larger chunks than this. But
+ * importantly, never the other way around.
+ *
+ * More precisely, the following inequality must be maintained
+ * whenever mappings and/or physical addresses exist:
+ *
+ * (device MMU page size) <= 2**(uiLog2ContiguityGuarantee) <= (actual contiguity of physical memory)
+ *
+ * The function table will contain the following callbacks which may
+ * be overridden by the PMR implementation:
+ *
+ * pfnLockPhysAddresses
+ *
+ * Called when someone locks requests that Physical pages are to
+ * be locked down via the PMRLockSysPhysAddresses() API. Note
+ * that if physical pages are prefaulted at PMR creation time and
+ * therefore static, it would not be necessary to override this
+ * function, in which case NULL may be supplied.
+ *
+ * pfnUnlockPhysAddresses
+ *
+ * The reverse of pfnLockPhysAddresses. Note that this should be
+ * NULL if and only if pfnLockPhysAddresses is NULL
+ *
+ * pfnSysPhysAddr
+ *
+ * This function is mandatory. This is the one which returns the
+ * system physical address for a given offset into this PMR. The
+ * "lock" function will have been called, if overridden, before
+ * this function, thus the implementation should not increase any
+ * refcount when answering this call. Refcounting, if necessary,
+ * should be done in the lock/unlock calls. Refcounting would
+ * not be necessary in the prefaulted/static scenario, as the
+ * pmr.c abstraction will handle the refcounting for the whole
+ * PMR.
+ *
+ * pfnFinalize
+ *
+ * Called when the PMR's refcount reaches zero and it gets
+ * destroyed. This allows the implementation to free up any
+ * resource acquired during creation time.
+ *
+ */
+extern PVRSRV_ERROR
+PMRCreatePMR(PPVRSRV_DEVICE_NODE psDevNode,
+ PHYS_HEAP *psPhysHeap,
+ PMR_SIZE_T uiLogicalSize,
+ PMR_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ PMR_LOG2ALIGN_T uiLog2ContiguityGuarantee,
+ PMR_FLAGS_T uiFlags,
+ const IMG_CHAR *pszAnnotation,
+ const PMR_IMPL_FUNCTAB *psFuncTab,
+ PMR_IMPL_PRIVDATA pvPrivData,
+ PMR_IMPL_TYPE eType,
+ PMR **ppsPMRPtr,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*
+ * PMRLockSysPhysAddresses()
+ *
+ * Calls the relevant callback to lock down the system physical addresses of the memory that makes up the whole PMR.
+ *
+ * Before this call, it is not valid to use any of the information
+ * getting APIs: PMR_Flags(), PMR_SysPhysAddr(),
+ * [ see note below about lock/unlock semantics ]
+ *
+ * The caller of this function does not have to care about how the PMR
+ * is implemented. He only has to know that he is allowed access to
+ * the physical addresses _after_ calling this function and _until_
+ * calling PMRUnlockSysPhysAddresses().
+ *
+ *
+ * Notes to callback implementers (authors of PMR Factories):
+ *
+ * Some PMR implementations will be such that the physical memory
+ * exists for the lifetime of the PMR, with a static address, (and
+ * normally flags and symbolic address are static too) and so it is
+ * legal for a PMR implementation to not provide an implementation for
+ * the lock callback.
+ *
+ * Some PMR implementation may wish to page memory in from secondary
+ * storage on demand. The lock/unlock callbacks _may_ be the place to
+ * do this. (more likely, there would be a separate API for doing
+ * this, but this API provides a useful place to assert that it has
+ * been done)
+ */
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRLockSysPhysAddressesNested(PMR *psPMR,
+ IMG_UINT32 ui32NestingLevel);
+
+/*
+ * PMRUnlockSysPhysAddresses()
+ *
+ * the reverse of PMRLockSysPhysAddresses()
+ */
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddresses(PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMRUnlockSysPhysAddressesNested(PMR *psPMR, IMG_UINT32 ui32NestingLevel);
+
+
+/**************************************************************************/ /*!
+@Function PMRUnpinPMR
+@Description This is the counterpart to PMRPinPMR(). It is meant to be
+ called before repinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input psPMR The physical memory to unpin.
+
+@Input bDevMapped A flag that indicates if this PMR has been
+ mapped to device virtual space.
+ Needed to check if this PMR is allowed to be
+ unpinned or not.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the memory is
+ registered to be reclaimed. Error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRUnpinPMR(PMR *psPMR, IMG_BOOL bDevMapped);
+
+/**************************************************************************/ /*!
+@Function PMRPinPMR
+@Description This is the counterpart to PMRUnpinPMR(). It is meant to be
+ called after unpinning an allocation.
+
+ For a detailed description see client API documentation.
+
+@Input psPMR The physical memory to pin.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the allocation content
+ was successfully restored.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY when the content
+ could not be restored and new physical memory
+ was allocated.
+
+ A different error otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR PMRPinPMR(PMR *psPMR);
+
+
+/*
+ * PhysmemPMRExport()
+ *
+ * Given a PMR, creates a PMR "Export", which is a handle that
+ * provides sufficient data to be able to "import" this PMR elsewhere.
+ * The PMR Export is an object in its own right, whose existence
+ * implies a reference on the PMR, thus the PMR cannot be destroyed
+ * while the PMR Export exists. The intention is that the PMR Export
+ * will be wrapped in the devicemem layer by a cross process handle,
+ * and some IPC by which to communicate the handle value and password
+ * to other processes. The receiving process is able to unwrap this
+ * to gain access to the same PMR Export in this layer, and, via
+ * PhysmemPMRImport(), obtain a reference to the original PMR.
+ *
+ * The caller receives, along with the PMR Export object, information
+ * about the size and contiguity guarantee for the PMR, and also the
+ * PMRs secret password, in order to authenticate the subsequent
+ * import.
+ *
+ * N.B. If you call PMRExportPMR() (and it succeeds), you are
+ * promising to later call PMRUnexportPMR()
+ */
+extern PVRSRV_ERROR
+PMRExportPMR(PMR *psPMR,
+ PMR_EXPORT **ppsPMRExport,
+ PMR_SIZE_T *puiSize,
+ PMR_LOG2ALIGN_T *puiLog2Contig,
+ PMR_PASSWORD_T *puiPassword);
+
+/*!
+*******************************************************************************
+
+ @Function PMRMakeLocalImportHandle
+
+ @Description
+
+ Transform a general handle type into one that we are able to import.
+ Takes a PMR reference.
+
+ @Input psPMR The input PMR.
+ @Output ppsPMR The output PMR that is going to be transformed to the
+ correct handle type.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRMakeLocalImportHandle(PMR *psPMR,
+ PMR **ppsPMR);
+
+/*!
+*******************************************************************************
+
+ @Function PMRUnmakeLocalImportHandle
+
+ @Description
+
+ Take a PMR, destroy the handle and release a reference.
+ Counterpart to PMRMakeServerExportClientExport().
+
+ @Input psPMR PMR to destroy.
+ Created by PMRMakeLocalImportHandle().
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+extern PVRSRV_ERROR
+PMRUnmakeLocalImportHandle(PMR *psPMR);
+
+/*
+ * PMRUnexporPMRt()
+ *
+ * The reverse of PMRExportPMR(). This causes the PMR to no
+ * longer be exported. If the PMR has already been imported, the
+ * imported PMR reference will still be valid, but no further imports
+ * will be possible.
+ */
+extern PVRSRV_ERROR
+PMRUnexportPMR(PMR_EXPORT *psPMRExport);
+
+/*
+ * PMRImportPMR()
+ *
+ * Takes a PMR Export object, as obtained by PMRExportPMR(), and
+ * obtains a reference to the original PMR.
+ *
+ * The password must match, and is assumed to have been (by whatever
+ * means, IPC etc.) preserved intact from the former call to
+ * PMRExportPMR()
+ *
+ * The size and contiguity arguments are entirely irrelevant for the
+ * import, however they are verified in order to trap bugs.
+ *
+ * N.B. If you call PhysmemPMRImport() (and it succeeds), you are
+ * promising to later call PhysmemPMRUnimport()
+ */
+extern PVRSRV_ERROR
+PMRImportPMR(PMR_EXPORT *psPMRExport,
+ PMR_PASSWORD_T uiPassword,
+ PMR_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Contig,
+ PMR **ppsPMR);
+
+/*
+ * PMRUnimportPMR()
+ *
+ * releases the reference on the PMR as obtained by PMRImportPMR()
+ */
+extern PVRSRV_ERROR
+PMRUnimportPMR(PMR *psPMR);
+
+PVRSRV_ERROR
+PMRLocalImportPMR(PMR *psPMR,
+ PMR **ppsPMR,
+ IMG_DEVMEM_SIZE_T *puiSize,
+ IMG_DEVMEM_ALIGN_T *puiAlign);
+
+/*
+ * Equivalent mapping functions when in kernel mode - TOOD: should
+ * unify this and the PMRAcquireMMapArgs API with a suitable
+ * abstraction
+ */
+extern PVRSRV_ERROR
+PMRAcquireKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRAcquireSparseKernelMappingData(PMR *psPMR,
+ size_t uiLogicalOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ size_t *puiLengthOut,
+ IMG_HANDLE *phPrivOut);
+
+extern PVRSRV_ERROR
+PMRReleaseKernelMappingData(PMR *psPMR,
+ IMG_HANDLE hPriv);
+
+#if defined(INTEGRITY_OS)
+extern PVRSRV_ERROR
+PMRMapMemoryObject(PMR *psPMR,
+ IMG_HANDLE *phMemObj,
+ void **pvClientAddr,
+ IMG_HANDLE *phPrivOut);
+extern PVRSRV_ERROR
+PMRUnmapMemoryObject(PMR *psPMR,
+ IMG_HANDLE hPriv);
+#endif
+
+/*
+ * PMR_ReadBytes()
+ *
+ * calls into the PMR implementation to read up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will read up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is read, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just write 0 to invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_ReadBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*
+ * PMR_WriteBytes()
+ *
+ * calls into the PMR implementation to write up to uiBufSz bytes,
+ * returning the actual number read in *puiNumBytes
+ *
+ * this will write up to the end of the PMR, or the next symbolic name
+ * boundary, or until the requested number of bytes is written, whichever
+ * comes first
+ *
+ * In the case of sparse PMR's the caller doesn't know what offsets are
+ * valid and which ones aren't so we will just ignore data at invalid offsets
+ */
+extern PVRSRV_ERROR
+PMR_WriteBytes(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/**************************************************************************/ /*!
+@Function PMRMMapPMR
+@Description Performs the necessary steps to map the PMR into a user process
+ address space. The caller does not need to call
+ PMRLockSysPhysAddresses before calling this function.
+
+@Input psPMR PMR to map.
+
+@Input pOSMMapData OS specific data needed to create a mapping.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success or an error otherwise.
+*/ /***************************************************************************/
+extern PVRSRV_ERROR
+PMRMMapPMR(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+/*
+ * PMRRefPMR()
+ *
+ * Take a reference on the passed in PMR
+ */
+extern void
+PMRRefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefPMR()
+ *
+ * This undoes a call to any of the PhysmemNew* family of APIs
+ * (i.e. any PMR factory "constructor")
+ *
+ * This relinquishes a reference to the PMR, and, where the refcount
+ * reaches 0, causes the PMR to be destroyed (calling the finalizer
+ * callback on the PMR, if there is one)
+ */
+extern PVRSRV_ERROR
+PMRUnrefPMR(PMR *psPMR);
+
+/*
+ * PMRUnrefUnlockPMR()
+ *
+ * Same as above but also unlocks the PMR.
+ */
+extern PVRSRV_ERROR
+PMRUnrefUnlockPMR(PMR *psPMR);
+
+extern PPVRSRV_DEVICE_NODE
+PMR_DeviceNode(const PMR *psPMR);
+
+/*
+ * PMRIsPMRLive()
+ *
+ * This function returns true if the PMR is in use and false otherwise.
+ * This function is not thread safe and hence the caller
+ * needs to ensure the thread safety by explicitly taking
+ * the lock on the PMR or through other means */
+IMG_BOOL PMRIsPMRLive(PMR *psPMR);
+
+/*
+ * PMR_Flags()
+ *
+ * Flags are static and guaranteed for the life of the PMR. Thus this
+ * function is idempotent and acquire/release semantics is not
+ * required.
+ *
+ * Returns the flags as specified on the PMR. The flags are to be
+ * interpreted as mapping permissions
+ */
+extern PMR_FLAGS_T
+PMR_Flags(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsSparse(const PMR *psPMR);
+
+extern IMG_BOOL
+PMR_IsUnpinned(const PMR *psPMR);
+
+extern PVRSRV_ERROR
+PMR_LogicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiLogicalSize);
+
+extern PVRSRV_ERROR
+PMR_PhysicalSize(const PMR *psPMR,
+ IMG_DEVMEM_SIZE_T *puiPhysicalSize);
+
+extern PHYS_HEAP *
+PMR_PhysHeap(const PMR *psPMR);
+
+extern PMR_MAPPING_TABLE *
+PMR_GetMappigTable(const PMR *psPMR);
+
+extern IMG_UINT32
+PMR_GetLog2Contiguity(const PMR *psPMR);
+
+extern const IMG_CHAR *
+PMR_GetAnnotation(const PMR *psPMR);
+
+/*
+ * PMR_IsOffsetValid()
+ *
+ * Returns if an address offset inside a PMR has a valid
+ * physical backing.
+ */
+extern PVRSRV_ERROR
+PMR_IsOffsetValid(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_BOOL *pbValid);
+
+extern PMR_IMPL_TYPE
+PMR_GetType(const PMR *psPMR);
+
+/*
+ * PMR_SysPhysAddr()
+ *
+ * A note regarding Lock/Unlock semantics
+ * ======================================
+ *
+ * PMR_SysPhysAddr may only be called after PMRLockSysPhysAddresses()
+ * has been called. The data returned may be used only until
+ * PMRUnlockSysPhysAddresses() is called after which time the licence
+ * to use the data is revoked and the information may be invalid.
+ *
+ * Given an offset, this function returns the device physical address of the
+ * corresponding page in the PMR. It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ * If caller only wants one physical address it is sufficient to pass in:
+ * ui32Log2PageSize==0 and ui32NumOfPages==1
+ */
+extern PVRSRV_ERROR
+PMR_DevPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEV_PHYADDR *psDevAddr,
+ IMG_BOOL *pbValid);
+
+/*
+ * PMR_CpuPhysAddr()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * Given an offset, this function returns the CPU physical address of the
+ * corresponding page in the PMR. It may be called multiple times
+ * until the address of all relevant pages has been determined.
+ *
+ */
+extern PVRSRV_ERROR
+PMR_CpuPhysAddr(const PMR *psPMR,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_CPU_PHYADDR *psCpuAddrPtr,
+ IMG_BOOL *pbValid);
+
+PVRSRV_ERROR
+PMRGetUID(PMR *psPMR,
+ IMG_UINT64 *pui64UID);
+/*
+ * PMR_ChangeSparseMem()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in device space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMem(PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiSparseFlags);
+
+/*
+ * PMR_ChangeSparseMemCPUMap()
+ *
+ * See note above about Lock/Unlock semantics.
+ *
+ * This function alters the memory map of the given PMR in CPU space by adding/deleting the pages
+ * as requested.
+ *
+ */
+PVRSRV_ERROR PMR_ChangeSparseMemCPUMap(PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices);
+
+#if defined(PDUMP)
+
+extern void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32ChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoPtr,
+ IMG_UINT32 ui32PDumpFlags);
+
+extern void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle);
+
+extern void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut);
+/*
+ * PMR_PDumpSymbolicAddr()
+ *
+ * Given an offset, returns the pdump memspace name and symbolic
+ * address of the corresponding page in the PMR.
+ *
+ * Note that PDump memspace names and symbolic addresses are static
+ * and valid for the lifetime of the PMR, therefore we don't require
+ * acquire/release semantics here.
+ *
+ * Note that it is expected that the pdump "mapping" code will call
+ * this function multiple times as each page is mapped in turn
+ *
+ * Note that NextSymName is the offset from the base of the PMR to the
+ * next pdump symbolic address (or the end of the PMR if the PMR only
+ * had one PDUMPMALLOC
+ */
+extern PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NamespaceNameLen,
+ IMG_CHAR *pszNamespaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName
+ );
+
+/*
+ * PMRPDumpLoadMemValue32()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem32
+ *
+ * Adds in the pdump script stream a copy of a dword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem32(PMR *psDstPMR,
+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+ PMR *psSrcPMR,
+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+ const IMG_CHAR *pszTmpVar,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMemValue64()
+ *
+ * writes the current contents of a dword in PMR memory to the pdump
+ * script stream. Useful for patching a buffer by simply editing the
+ * script output file in ASCII plain text.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpCopyMem64
+ *
+ * Adds in the pdump script stream a copy of a quadword in one PMR memory location
+ * to another PMR memory location.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpCopyMem64(PMR *psDstPMR,
+ IMG_DEVMEM_OFFSET_T uiDstLogicalOffset,
+ PMR *psSrcPMR,
+ IMG_DEVMEM_OFFSET_T uiSrcLogicalOffset,
+ const IMG_CHAR *pszTmpVar,
+ PDUMP_FLAGS_T uiPDumpFlags);
+
+/*
+ * PMRPDumpLoadMem()
+ *
+ * writes the current contents of the PMR memory to the pdump PRM
+ * stream, and emits some PDump code to the script stream to LDB said
+ * bytes from said file. If bZero is IMG_TRUE then the PDump zero page
+ * is used as the source for the LDB.
+ *
+ */
+extern PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero);
+
+/*
+ * PMRPDumpSaveToFile()
+ *
+ * emits some PDump that does an SAB (save bytes) using the PDump
+ * symbolic address of the PMR. Note that this is generally not the
+ * preferred way to dump the buffer contents. There is an equivalent
+ * function in devicemem_server.h which also emits SAB but using the
+ * virtual address, which is the "right" way to dump the buffer
+ * contents to a file. This function exists just to aid testing by
+ * providing a means to dump the PMR directly by symbolic address
+ * also.
+ */
+extern PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRMallocPMR)
+#endif
+static INLINE void
+PDumpPMRMallocPMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *puiMappingTable,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoPtr,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(ui32NumPhysChunks);
+ PVR_UNREFERENCED_PARAMETER(ui32NumVirtChunks);
+ PVR_UNREFERENCED_PARAMETER(puiMappingTable);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoPtr);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRFreePMR)
+#endif
+static INLINE void
+PDumpPMRFreePMR(PMR *psPMR,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiBlockSize,
+ IMG_UINT32 uiLog2Contiguity,
+ IMG_HANDLE hPDumpAllocationInfoHandle)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(uiLog2Contiguity);
+ PVR_UNREFERENCED_PARAMETER(hPDumpAllocationInfoHandle);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PDumpPMRChangeSparsePMR)
+#endif
+static INLINE void
+PDumpPMRChangeSparsePMR(PMR *psPMR,
+ IMG_UINT32 uiBlockSize,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_BOOL bInitialise,
+ IMG_UINT32 ui32InitValue,
+ IMG_HANDLE *phPDumpAllocInfoOut)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiBlockSize);
+ PVR_UNREFERENCED_PARAMETER(ui32AllocPageCount);
+ PVR_UNREFERENCED_PARAMETER(pai32AllocIndices);
+ PVR_UNREFERENCED_PARAMETER(ui32FreePageCount);
+ PVR_UNREFERENCED_PARAMETER(pai32FreeIndices);
+ PVR_UNREFERENCED_PARAMETER(bInitialise);
+ PVR_UNREFERENCED_PARAMETER(ui32InitValue);
+ PVR_UNREFERENCED_PARAMETER(phPDumpAllocInfoOut);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMR_PDumpSymbolicAddr)
+#endif
+static INLINE PVRSRV_ERROR
+PMR_PDumpSymbolicAddr(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NamespaceNameLen,
+ IMG_CHAR *pszNamespaceName,
+ IMG_UINT32 ui32SymbolicAddrLen,
+ IMG_CHAR *pszSymbolicAddr,
+ IMG_DEVMEM_OFFSET_T *puiNewOffset,
+ IMG_DEVMEM_OFFSET_T *puiNextSymName)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32NamespaceNameLen);
+ PVR_UNREFERENCED_PARAMETER(pszNamespaceName);
+ PVR_UNREFERENCED_PARAMETER(ui32SymbolicAddrLen);
+ PVR_UNREFERENCED_PARAMETER(pszSymbolicAddr);
+ PVR_UNREFERENCED_PARAMETER(puiNewOffset);
+ PVR_UNREFERENCED_PARAMETER(puiNextSymName);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue32(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMemValue)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMemValue64(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT64 ui64Value,
+ PDUMP_FLAGS_T uiPDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui64Value);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpLoadMem)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpLoadMem(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PDUMP_FLAGS_T uiPDumpFlags,
+ IMG_BOOL bZero)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiPDumpFlags);
+ PVR_UNREFERENCED_PARAMETER(bZero);
+ return PVRSRV_OK;
+}
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpSaveToFile)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpSaveToFile(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT32 uiArraySize,
+ const IMG_CHAR *pszFilename,
+ IMG_UINT32 uiFileOffset)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+ PVR_UNREFERENCED_PARAMETER(uiArraySize);
+ PVR_UNREFERENCED_PARAMETER(pszFilename);
+ PVR_UNREFERENCED_PARAMETER(uiFileOffset);
+ return PVRSRV_OK;
+}
+
+#endif /* PDUMP */
+
+/* This function returns the private data that a pmr subtype
+ squirrelled in here. We use the function table pointer as
+ "authorization" that this function is being called by the pmr
+ subtype implementation. We can assume (assert) that. It would be
+ a bug in the implementation of the pmr subtype if this assertion
+ ever fails. */
+extern void *
+PMRGetPrivateData(const PMR *psPMR,
+ const PMR_IMPL_FUNCTAB *psFuncTab);
+
+extern PVRSRV_ERROR
+PMRZeroingPMR(PMR *psPMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+PVRSRV_ERROR
+PMRDumpPageList(PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize);
+
+extern PVRSRV_ERROR
+PMRWritePMPageList(/* Target PMR, offset, and length */
+ PMR *psPageListPMR,
+ IMG_DEVMEM_OFFSET_T uiTableOffset,
+ IMG_DEVMEM_SIZE_T uiTableLength,
+ /* Referenced PMR, and "page" granularity */
+ PMR *psReferencePMR,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2PageSize,
+ PMR_PAGELIST **ppsPageList);
+
+/* Doesn't actually erase the page list - just releases the appropriate refcounts */
+extern PVRSRV_ERROR // should be void, surely
+PMRUnwritePMPageList(PMR_PAGELIST *psPageList);
+
+#if defined(PDUMP)
+extern PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiFlags);
+
+extern PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize);
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpPol32)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpPol32(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiLogicalOffset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PMRPDumpCBP)
+#endif
+static INLINE PVRSRV_ERROR
+PMRPDumpCBP(const PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiReadOffset,
+ IMG_DEVMEM_OFFSET_T uiWriteOffset,
+ IMG_DEVMEM_SIZE_T uiPacketSize,
+ IMG_DEVMEM_SIZE_T uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psPMR);
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif
+
+extern PPVRSRV_DEVICE_NODE PMRGetExportDeviceNode(PMR_EXPORT *psExportPMR);
+
+/*
+ * PMRInit()
+ *
+ * To be called once and only once to initialise the internal data in
+ * the PMR module (mutexes and such)
+ *
+ * Not for general use. Only PVRSRVInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRInit(void);
+
+/*
+ * PMRDeInit()
+ *
+ * To be called once and only once to deinitialise the internal data in
+ * the PMR module (mutexes and such) and for debug checks
+ *
+ * Not for general use. Only PVRSRVDeInit(); should be calling this.
+ */
+extern PVRSRV_ERROR
+PMRDeInit(void);
+
+#if defined(PVR_RI_DEBUG)
+extern PVRSRV_ERROR
+PMRStoreRIHandle(PMR *psPMR,
+ void *hRIHandle);
+#endif
+
+#endif /* #ifdef _SRVSRV_PMR_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pmr_impl.h b/drivers/gpu/drm/img-rogue/1.10/pmr_impl.h
new file mode 100644
index 00000000000000..bac2c8d003a627
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pmr_impl.h
@@ -0,0 +1,522 @@
+/**************************************************************************/ /*!
+@File
+@Title Implementation Callbacks for Physmem (PMR) abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Part of the memory management. This file is for definitions
+ that are private to the world of PMRs, but that need to be
+ shared between pmr.c itself and the modules that implement the
+ callbacks for the PMR.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SRVSRV_PMR_IMPL_H_
+#define _SRVSRV_PMR_IMPL_H_
+
+/* include/ */
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+typedef struct _PMR_ PMR;
+/* stuff that per-flavour callbacks need to share with pmr.c */
+typedef void *PMR_IMPL_PRIVDATA;
+
+typedef PVRSRV_MEMALLOCFLAGS_T PMR_FLAGS_T;
+typedef struct _PMR_MAPPING_TABLE_ PMR_MAPPING_TABLE;
+typedef void *PMR_MMAP_DATA;
+
+/**
+ * Which PMR factory has created this PMR?
+ */
+typedef enum _PMR_IMPL_TYPE_
+{
+ PMR_TYPE_NONE = 0,
+ PMR_TYPE_OSMEM,
+ PMR_TYPE_LMA,
+ PMR_TYPE_DMABUF,
+ PMR_TYPE_EXTMEM,
+ PMR_TYPE_DC,
+ PMR_TYPE_TDFWCODE,
+ PMR_TYPE_TDSECBUF
+} PMR_IMPL_TYPE;
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_LOCK_PHYS_ADDRESSES_FN
+
+@Description Called to lock down the physical addresses for all pages
+ allocated for a PMR.
+ The default implementation is to simply increment a
+ lock-count for debugging purposes.
+ If overridden, the PFN_LOCK_PHYS_ADDRESSES_FN function will
+ be called when someone first requires a physical address,
+ and the PFN_UNLOCK_PHYS_ADDRESSES_FN counterpart will be
+ called when the last such reference is released.
+ The PMR implementation may assume that physical addresses
+ will have been "locked" in this manner before any call is
+ made to the pfnDevPhysAddr() callback
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_LOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_UNLOCK_PHYS_ADDRESSES_FN
+
+@Description Called to release the lock taken on the physical addresses
+ for all pages allocated for a PMR.
+ The default implementation is to simply decrement a
+ lock-count for debugging purposes.
+ If overridden, the PFN_UNLOCK_PHYS_ADDRESSES_FN will be
+ called when the last reference taken on the PMR is
+ released.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNLOCK_PHYS_ADDRESSES_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_DEV_PHYS_ADDR_FN
+
+@Description Called to obtain one or more physical addresses for given
+ offsets within a PMR.
+
+ The PFN_LOCK_PHYS_ADDRESSES_FN callback (if overridden) is
+ guaranteed to have been called prior to calling the
+ PFN_DEV_PHYS_ADDR_FN callback and the caller promises not to
+ rely on the physical address thus obtained after the
+ PFN_UNLOCK_PHYS_ADDRESSES_FN callback is called.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input ui32Log2PageSize The log2 page size.
+@Input ui32NumOfAddr The number of addresses to be
+ returned
+@Input puiOffset The offset from the start of the
+ PMR (in bytes) for which the
+ physical address is required.
+ Where multiple addresses are
+ requested, this will contain a
+ list of offsets.
+@Output pbValid List of boolean flags indicating
+ which addresses in the returned
+ list (psDevAddrPtr) are valid
+ (for sparse allocations, not all
+ pages may have a physical backing)
+@Output psDevAddrPtr Returned list of physical addresses
+
+@Return PVRSRV_OK if the operation was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_DEV_PHYS_ADDR_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfAddr,
+ IMG_DEVMEM_OFFSET_T *puiOffset,
+ IMG_BOOL *pbValid,
+ IMG_DEV_PHYADDR *psDevAddrPtr);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+
+@Description Called to obtain a kernel-accessible address (mapped to a
+ virtual address if required) for the PMR for use internally
+ in Services.
+
+ Implementation of this function for the (default) PMR factory providing
+ OS-allocations is mandatory (the driver will expect to be able to call
+ this function for OS-provided allocations).
+ For other PMR factories, implementation of this function is only necessary
+ where an MMU mapping is required for the Kernel to be able to access the
+ allocated memory.
+ If no mapping is needed, this function can remain unimplemented and the
+ pfn may be set to NULL.
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which mapping is to
+ start
+@Input uiSize Size of mapping (in bytes)
+@Output ppvKernelAddressOut Mapped kernel address
+@Output phHandleOut Returned handle of the new mapping
+@Input ulFlags Mapping flags
+
+@Return PVRSRV_OK if the mapping was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ size_t uiOffset,
+ size_t uiSize,
+ void **ppvKernelAddressOut,
+ IMG_HANDLE *phHandleOut,
+ PMR_FLAGS_T ulFlags);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_RELEASE_KERNEL_MAPPING_DATA_FN
+
+@Description Called to release a mapped kernel virtual address
+
+ Implementation of this callback is mandatory if PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ is provided for the PMR factory, otherwise this function can remain unimplemented
+ and the pfn may be set to NULL.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input hHandle Handle of the mapping to be
+ released
+
+@Return None
+*/
+/*****************************************************************************/
+typedef void (*PFN_RELEASE_KERNEL_MAPPING_DATA_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE hHandle);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_READ_BYTES_FN
+
+@Description Called to read bytes from an unmapped allocation
+
+ Implementation of this callback is optional -
+ where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ to map the entire PMR (if an MMU mapping is required for the Kernel to be
+ able to access the allocated memory).
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which to begin
+ reading
+@Output pcBuffer Buffer in which to return the
+ read data
+@Input uiBufSz Number of bytes to be read
+@Output puiNumBytes Number of bytes actually read
+ (may be less than uiBufSz)
+
+@Return PVRSRV_OK if the read was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_READ_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_WRITE_BYTES_FN
+
+@Description Called to write bytes into an unmapped allocation
+
+ Implementation of this callback is optional -
+ where it is not provided, the driver will use PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN
+ to map the entire PMR (if an MMU mapping is required for the Kernel to be
+ able to access the allocated memory).
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input uiOffset Offset from the beginning of
+ the PMR at which to begin
+ writing
+@Input pcBuffer Buffer containing the data to be
+ written
+@Input uiBufSz Number of bytes to be written
+@Output puiNumBytes Number of bytes actually written
+ (may be less than uiBufSz)
+
+@Return PVRSRV_OK if the write was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_WRITE_BYTES_FN)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_UINT8 *pcBuffer,
+ size_t uiBufSz,
+ size_t *puiNumBytes);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_UNPIN_MEM_FN
+
+@Description Called to unpin an allocation.
+ Once unpinned, the pages backing the allocation may be
+ re-used by the Operating System for another purpose.
+ When the pages are required again, they may be re-pinned
+ (by calling PFN_PIN_MEM_FN). The driver will try to return
+ same pages as before. The caller will be told if the
+ content of these returned pages has been modified or if
+ the pages returned are not the original pages.
+
+ Implementation of this callback is optional.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the unpin was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_UNPIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_PIN_MEM_FN
+
+@Description Called to pin a previously unpinned allocation.
+ The driver will try to return same pages as were previously
+ assigned to the allocation. The caller will be told if the
+ content of these returned pages has been modified or if
+ the pages returned are not the original pages.
+
+ Implementation of this callback is optional.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Input psMappingTable Mapping table, which describes how
+ virtual 'chunks' are to be mapped to
+ physical 'chunks' for the allocation.
+
+@Return PVRSRV_OK if the original pages were returned unmodified.
+ PVRSRV_ERROR_PMR_NEW_MEMORY if the memory returned was modified
+ or different pages were returned.
+ Another PVRSRV_ERROR code on failure.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_PIN_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+ PMR_MAPPING_TABLE *psMappingTable);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_FN
+
+@Description Called to modify the physical backing for a given sparse
+ allocation.
+ The caller provides a list of the pages within the sparse
+ allocation which should be backed with a physical allocation
+ and a list of the pages which do not require backing.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the sparse allocation
+ to be modified
+@Input ui32AllocPageCount The number of pages specified in
+ pai32AllocIndices
+@Input pai32AllocIndices The list of pages in the sparse
+ allocation that should be backed
+ with a physical allocation. Pages
+ are referenced by their index
+ within the sparse allocation
+ (e.g. in a 10 page allocation, pages
+ are denoted by indices 0 to 9)
+@Input ui32FreePageCount The number of pages specified in
+ pai32FreeIndices
+@Input pai32FreeIndices The list of pages in the sparse
+ allocation that do not require
+ a physical allocation.
+@Input ui32Flags Allocation flags
+
+@Return PVRSRV_OK if the sparse allocation physical backing was updated
+ successfully, an error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_FN)(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ IMG_UINT32 uiFlags);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN
+
+@Description Called to modify which pages are mapped for a given sparse
+ allocation.
+ The caller provides a list of the pages within the sparse
+ allocation which should be given a CPU mapping and a list
+ of the pages which do not require a CPU mapping.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the sparse allocation
+ to be modified
+@Input sCpuVAddrBase The virtual base address of the
+ sparse allocation
+@Input ui32AllocPageCount The number of pages specified in
+ pai32AllocIndices
+@Input pai32AllocIndices The list of pages in the sparse
+ allocation that should be given
+ a CPU mapping. Pages are referenced
+ by their index within the sparse
+ allocation (e.g. in a 10 page
+ allocation, pages are denoted by
+ indices 0 to 9)
+@Input ui32FreePageCount The number of pages specified in
+ pai32FreeIndices
+@Input pai32FreeIndices The list of pages in the sparse
+ allocation that do not require a CPU
+ mapping.
+
+@Return PVRSRV_OK if the page mappings were updated successfully, an
+ error code otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+ const PMR *psPMR,
+ IMG_UINT64 sCpuVAddrBase,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_MMAP_FN
+
+@Description Called to map pages in the specified PMR.
+
+ Implementation of this callback is optional.
+ Where it is provided, it will be used in place of OSMMapPMRGeneric().
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+@Input psPMR The PMR of the allocation to be
+ mapped
+@Input pMMapData OS-specific data to describe how
+ mapping should be performed
+
+@Return PVRSRV_OK if the mapping was successful, an error code
+ otherwise.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_MMAP_FN)(PMR_IMPL_PRIVDATA pPriv,
+ PMR *psPMR,
+ PMR_MMAP_DATA pMMapData);
+
+/*************************************************************************/ /*!
+@Brief Callback function type PFN_FINALIZE_FN
+
+@Description Called to destroy the PMR.
+ This callback will be called only when all references to
+ the PMR have been dropped.
+ The PMR was created via a call to PhysmemNewRamBackedPMR()
+ and is destroyed via this callback.
+
+ Implementation of this callback is mandatory.
+
+@Input pvPriv Private data (which was generated
+ by the PMR factory when PMR was
+ created)
+
+@Return PVRSRV_OK if the PMR destruction was successful, an error
+ code otherwise.
+ Currently PVRSRV_ERROR_PMR_STILL_REFERENCED is the only
+ error returned from physmem_dmabuf.c layer and on this
+ error, destroying of the PMR is aborted without disturbing
+ the PMR state.
+*/
+/*****************************************************************************/
+typedef PVRSRV_ERROR (*PFN_FINALIZE_FN)(PMR_IMPL_PRIVDATA pvPriv);
+
+struct _PMR_IMPL_FUNCTAB_ {
+ PFN_LOCK_PHYS_ADDRESSES_FN pfnLockPhysAddresses;
+ PFN_UNLOCK_PHYS_ADDRESSES_FN pfnUnlockPhysAddresses;
+
+ PFN_DEV_PHYS_ADDR_FN pfnDevPhysAddr;
+
+ PFN_ACQUIRE_KERNEL_MAPPING_DATA_FN pfnAcquireKernelMappingData;
+ PFN_RELEASE_KERNEL_MAPPING_DATA_FN pfnReleaseKernelMappingData;
+
+#if defined (INTEGRITY_OS)
+ /*
+ * MapMemoryObject()/UnmapMemoryObject()
+ *
+ * called to map/unmap memory objects in Integrity OS
+ */
+
+ PVRSRV_ERROR (*pfnMapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv,
+ IMG_HANDLE *phMemObj,
+ void **pvClientAddr,
+ IMG_HANDLE *phHandleOut);
+ PVRSRV_ERROR (*pfnUnmapMemoryObject)(PMR_IMPL_PRIVDATA pvPriv);
+
+#if defined(USING_HYPERVISOR)
+ IMG_HANDLE (*pfnGetPmr)(PMR_IMPL_PRIVDATA pvPriv, size_t ulOffset);
+#endif
+#endif
+
+ PFN_READ_BYTES_FN pfnReadBytes;
+ PFN_WRITE_BYTES_FN pfnWriteBytes;
+
+ PFN_UNPIN_MEM_FN pfnUnpinMem;
+ PFN_PIN_MEM_FN pfnPinMem;
+
+ PFN_CHANGE_SPARSE_MEM_FN pfnChangeSparseMem;
+ PFN_CHANGE_SPARSE_MEM_CPU_MAP_FN pfnChangeSparseMemCPUMap;
+
+ PFN_MMAP_FN pfnMMap;
+
+ PFN_FINALIZE_FN pfnFinalize;
+} ;
+typedef struct _PMR_IMPL_FUNCTAB_ PMR_IMPL_FUNCTAB;
+
+
+#endif /* of #ifndef _SRVSRV_PMR_IMPL_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pmr_os.c b/drivers/gpu/drm/img-rogue/1.10/pmr_os.c
new file mode 100644
index 00000000000000..4e0f803b32417c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pmr_os.c
@@ -0,0 +1,617 @@
+/*************************************************************************/ /*!
+@File
+@Title Linux OS PMR functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <asm/io.h>
+#include <asm/page.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#if defined(CONFIG_L4)
+#include <asm/api-l4env/api.h>
+#endif
+#include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+#include <linux/pfn_t.h>
+#include <linux/pfn.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "allocmem.h"
+#include "devicemem_server_utils.h"
+#include "pmr.h"
+#include "pmr_os.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+#include "mmap_stats.h"
+#endif
+
+#include "kernel_compatibility.h"
+
+/*
+ * x86_32:
+ * Use vm_insert_page because remap_pfn_range has issues when mapping HIGHMEM
+ * pages with default memory attributes; these HIGHMEM pages are skipped in
+ * set_pages_array_[uc,wc] during allocation; see reserve_pfn_range().
+ * Also vm_insert_page is faster.
+ *
+ * x86_64:
+ * Use vm_insert_page because it is faster.
+ *
+ * Other platforms:
+ * Use remap_pfn_range by default because it does not issue a cache flush.
+ * It is known that ARM32 benefits from this. When other platforms become
+ * available it has to be investigated if this assumption holds for them as well.
+ *
+ * Since vm_insert_page does more precise memory accounting we have the build
+ * flag PVR_MMAP_USE_VM_INSERT that forces its use. This is useful as a debug
+ * feature.
+ *
+ */
+#if defined(CONFIG_X86) || defined(PVR_MMAP_USE_VM_INSERT)
+#define PMR_OS_USE_VM_INSERT_PAGE 1
+#endif
+
+static void MMapPMROpen(struct vm_area_struct *ps_vma)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+
+ /* Our VM flags should ensure this function never gets called */
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Unexpected mmap open call, this is probably an application bug.",
+ __func__));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: vma struct: 0x%p, vAddr: %#lX, length: %#lX, PMR pointer: 0x%p",
+ __func__,
+ ps_vma,
+ ps_vma->vm_start,
+ ps_vma->vm_end - ps_vma->vm_start,
+ psPMR));
+
+ /* In case we get called anyway let's do things right by increasing the refcount and
+ * locking down the physical addresses. */
+ PMRRefPMR(psPMR);
+
+ if (PMRLockSysPhysAddresses(psPMR) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Could not lock down physical addresses, aborting.", __func__));
+ PMRUnrefPMR(psPMR);
+ }
+}
+
+static void MMapPMRClose(struct vm_area_struct *ps_vma)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ {
+ uintptr_t vAddr = ps_vma->vm_start;
+
+ while (vAddr < ps_vma->vm_end)
+ {
+ /* USER MAPPING */
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+ (IMG_UINT64)vAddr,
+ OSGetCurrentClientProcessIDKM());
+ vAddr += PAGE_SIZE;
+ }
+ }
+#else
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+ ps_vma->vm_end - ps_vma->vm_start,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+ PMRUnlockSysPhysAddresses(psPMR);
+ PMRUnrefPMR(psPMR);
+}
+
+/*
+ * This vma operation is used to read data from mmap regions. It is called
+ * by access_process_vm, which is called to handle PTRACE_PEEKDATA ptrace
+ * requests and reads from /proc/<pid>/mem.
+ */
+static int MMapVAccess(struct vm_area_struct *ps_vma, unsigned long addr,
+ void *buf, int len, int write)
+{
+ PMR *psPMR = ps_vma->vm_private_data;
+ unsigned long ulOffset = addr - ps_vma->vm_start;
+ size_t uiBytesCopied;
+ PVRSRV_ERROR eError;
+ int iRetVal = -EINVAL;
+
+ if (write)
+ {
+ eError = PMR_WriteBytes(psPMR,
+ (IMG_DEVMEM_OFFSET_T) ulOffset,
+ buf,
+ len,
+ &uiBytesCopied);
+ }
+ else
+ {
+ eError = PMR_ReadBytes(psPMR,
+ (IMG_DEVMEM_OFFSET_T) ulOffset,
+ buf,
+ len,
+ &uiBytesCopied);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Error from %s (%d)",
+ __func__,
+ write ? "PMR_WriteBytes" : "PMR_ReadBytes",
+ eError));
+ }
+ else
+ {
+ iRetVal = uiBytesCopied;
+ }
+
+ return iRetVal;
+}
+
+static const struct vm_operations_struct gsMMapOps =
+{
+ .open = &MMapPMROpen,
+ .close = &MMapPMRClose,
+ .access = MMapVAccess,
+};
+
+static INLINE int _OSMMapPMR(PVRSRV_DEVICE_NODE *psDevNode,
+ struct vm_area_struct *ps_vma,
+ IMG_DEVMEM_OFFSET_T uiOffset,
+ IMG_CPU_PHYADDR *psCpuPAddr,
+ IMG_UINT32 uiLog2PageSize,
+ IMG_BOOL bUseVMInsertPage,
+ IMG_BOOL bUseMixedMap)
+{
+ IMG_INT32 iStatus;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ unsigned long uiPFN;
+#endif
+
+#if defined(CONFIG_L4)
+ size_t size;
+ IMG_CPU_VIRTADDR pvVAddr;
+#if defined(ARM)
+ struct device *dev = psDevNode->psDevConfig->pvOSDevice;
+#endif
+
+ /* In L4 remaps from KM into UM is done via VA */
+ pvVAddr = l4x_phys_to_virt(psCpuPAddr->uiAddr);
+ if (pvVAddr == NULL)
+ {
+ return -1;
+ }
+
+ for (size = 0; size < 1ULL << uiLog2PageSize; size += PAGE_SIZE)
+ {
+ /* Fault-in pages now, ensure compiler does not optimise this out */
+ *((volatile int*)pvVAddr + size) = *((volatile int*)pvVAddr + size);
+ }
+
+#if defined(ARM)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = pfn_to_pfn_t(dma_to_pfn(dev, psCpuPAddr->uiAddr));
+#else
+ uiPFN = dma_to_pfn(dev, psCpuPAddr->uiAddr);
+#endif
+#else /* defined(ARM) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = pfn_to_pfn_t(((uintptr_t) pvVAddr) >> PAGE_SHIFT);
+#else
+ uiPFN = ((uintptr_t) pvVAddr) >> PAGE_SHIFT;
+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == (IMG_UINT64)(uintptr_t)pvVAddr);
+#endif
+#endif
+ PVR_ASSERT(bUseVMInsertPage == IMG_FALSE);
+#else /* defined(CONFIG_L4) */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(psCpuPAddr->uiAddr, 0);
+#else
+ uiPFN = psCpuPAddr->uiAddr >> PAGE_SHIFT;
+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr->uiAddr);
+#endif
+#endif
+
+ /*
+ * vm_insert_page() allows insertion of individual pages into user
+ * VMA space _only_ if page is a order-zero allocated page
+ */
+ if (bUseVMInsertPage)
+ {
+ if (bUseMixedMap)
+ {
+ /*
+ * This path is just for debugging. It should be
+ * equivalent to the remap_pfn_range() path.
+ */
+ iStatus = vm_insert_mixed(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN);
+#else
+ uiPFN);
+#endif
+ }
+ else
+ {
+ /* Since kernel 3.7 this sets VM_MIXEDMAP internally */
+ iStatus = vm_insert_page(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t_to_page(sPFN));
+#else
+ pfn_to_page(uiPFN));
+#endif
+ }
+ }
+ else
+ {
+ /*
+ NOTE: Regarding absence of dma_mmap_coherent() in _OSMMapPMR()
+
+ The current services mmap model maps in a PMR's full-length size
+ into the user VMA & applies any user specified offset to the kernel
+ returned zero-offset based VA in services client; this essentially
+ means services server ignores ps_vma->vm_pgoff (this houses hPMR)
+ during a mmap call.
+
+ Furthermore, during a DMA/CMA memory allocation, multiple order-n
+ pages are used to satisfy an allocation request due to DMA/CMA
+ framework rounding-up allocation size to next power-of-two which
+ can lead to wasted memory (so we don't allocate using single call).
+
+ The combination of the above two issues mean that we cannot use the
+ dma_mmap_coherent() for a number of reasons outlined below:
+
+ - Services mmap semantics does not fit with dma_mmap_coherent()
+ which requires proper ps_vma->vm_pgoff; seeing this houses a
+ hPMR handle value, calls into dma_mmap_coherent() fails. This
+ could be avoided by forcing ps_vma->vm_pgoff to zero but the
+ ps_vma->vm_pgoff is applied to DMA bus address PFN and not
+ user VMA which is always mapped at ps_vma->vm_start.
+
+ - As multiple order-n pages are used for DMA/CMA allocations, a
+ single dma_mmap_coherent() call with a vma->vm_pgoff set to
+ zero cannot (maybe) be used because there is no guarantee that
+ all of the multiple order-n pages in the PMR are physically
+ contiguous from the first entry to the last. Whilst this is
+ highly likely to be the case, there is no guarantee that it
+ will be so we cannot depend on this being the case.
+
+ The solution is to manually mmap DMA/CMA pages into user VMA
+ using remap_pfn_range() directly. Furthermore, accounting is
+ always compromised for DMA/CMA allocations.
+ */
+ size_t uiNumContiguousBytes = 1ULL << uiLog2PageSize;
+
+ iStatus = remap_pfn_range(ps_vma,
+ ps_vma->vm_start + uiOffset,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t_to_pfn(sPFN),
+#else
+ uiPFN,
+#endif
+ uiNumContiguousBytes,
+ ps_vma->vm_page_prot);
+ }
+
+ return iStatus;
+}
+
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData)
+{
+ struct vm_area_struct *ps_vma = pOSMMapData;
+ PVRSRV_DEVICE_NODE *psDevNode = PMR_DeviceNode(psPMR);
+ PVRSRV_ERROR eError;
+ size_t uiLength;
+ IMG_INT32 iStatus;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_UINT32 ui32CPUCacheFlags;
+ pgprot_t sPageProt;
+ IMG_CPU_PHYADDR asCpuPAddr[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_BOOL abValid[PMR_MAX_TRANSLATION_STACK_ALLOC];
+ IMG_UINT32 uiOffsetIdx;
+ IMG_UINT32 uiNumOfPFNs;
+ IMG_UINT32 uiLog2PageSize;
+ IMG_CPU_PHYADDR *psCpuPAddr;
+ IMG_BOOL *pbValid;
+ IMG_BOOL bUseMixedMap = IMG_FALSE;
+ IMG_BOOL bUseVMInsertPage = IMG_FALSE;
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ if (((ps_vma->vm_flags & VM_WRITE) != 0) &&
+ ((ps_vma->vm_flags & VM_SHARED) == 0))
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e1;
+ }
+
+ sPageProt = vm_get_page_prot(ps_vma->vm_flags);
+
+ eError = DevmemCPUCacheMode(psDevNode,
+ PMR_Flags(psPMR),
+ &ui32CPUCacheFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ switch (ui32CPUCacheFlags)
+ {
+ case PVRSRV_MEMALLOCFLAG_CPU_UNCACHED:
+ sPageProt = pgprot_noncached(sPageProt);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE:
+ sPageProt = pgprot_writecombine(sPageProt);
+ break;
+
+ case PVRSRV_MEMALLOCFLAG_CPU_CACHED:
+ {
+/* Do not set to write-combine for plato */
+#if !defined(PLATO_MEMORY_CONFIG)
+ PHYS_HEAP *psPhysHeap = PMR_PhysHeap(psPMR);
+
+ if (PhysHeapGetType(psPhysHeap) == PHYS_HEAP_TYPE_LMA)
+ sPageProt = pgprot_writecombine(sPageProt);
+#endif
+ break;
+ }
+
+ default:
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e1;
+ }
+ ps_vma->vm_page_prot = sPageProt;
+
+ ps_vma->vm_flags |= VM_IO;
+
+ /* Don't include the mapping in core dumps */
+ ps_vma->vm_flags |= VM_DONTDUMP;
+
+ /*
+ * Disable mremap because our nopage handler assumes all
+ * page requests have already been validated.
+ */
+ ps_vma->vm_flags |= VM_DONTEXPAND;
+
+ /* Don't allow mapping to be inherited across a process fork */
+ ps_vma->vm_flags |= VM_DONTCOPY;
+
+ uiLength = ps_vma->vm_end - ps_vma->vm_start;
+
+ /* Is this mmap targeting non order-zero pages or does it use pfn mappings?
+ * If yes, don't use vm_insert_page */
+ uiLog2PageSize = PMR_GetLog2Contiguity(psPMR);
+#if defined(PMR_OS_USE_VM_INSERT_PAGE)
+ bUseVMInsertPage = (uiLog2PageSize == PAGE_SHIFT) && (PMR_GetType(psPMR) != PMR_TYPE_EXTMEM);
+#if defined(CONFIG_L4)
+ bUseVMInsertPage = IMG_FALSE;
+#endif
+#endif
+
+ /* Can we use stack allocations */
+ uiNumOfPFNs = uiLength >> uiLog2PageSize;
+ if (uiNumOfPFNs > PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ psCpuPAddr = OSAllocMem(uiNumOfPFNs * sizeof(*psCpuPAddr));
+ if (psCpuPAddr == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ /* Should allocation fail, clean-up here before exiting */
+ pbValid = OSAllocMem(uiNumOfPFNs * sizeof(*pbValid));
+ if (pbValid == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ OSFreeMem(psCpuPAddr);
+ goto e1;
+ }
+ }
+ else
+ {
+ psCpuPAddr = asCpuPAddr;
+ pbValid = abValid;
+ }
+
+ /* Obtain map range pfns */
+ eError = PMR_CpuPhysAddr(psPMR,
+ uiLog2PageSize,
+ uiNumOfPFNs,
+ 0,
+ psCpuPAddr,
+ pbValid);
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+
+ /*
+ * Scan the map range for pfns without struct page* handling. If
+ * we find one, this is a mixed map, and we can't use vm_insert_page()
+ * NOTE: vm_insert_page() allows insertion of individual pages into user
+ * VMA space _only_ if said page is an order-zero allocated page.
+ */
+ if (bUseVMInsertPage)
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ pfn_t sPFN;
+#else
+ unsigned long uiPFN;
+#endif
+
+ for (uiOffsetIdx = 0; uiOffsetIdx < uiNumOfPFNs; ++uiOffsetIdx)
+ {
+ if (pbValid[uiOffsetIdx])
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0))
+ sPFN = phys_to_pfn_t(psCpuPAddr[uiOffsetIdx].uiAddr, 0);
+
+ if (!pfn_t_valid(sPFN) || page_count(pfn_t_to_page(sPFN)) == 0)
+#else
+ uiPFN = psCpuPAddr[uiOffsetIdx].uiAddr >> PAGE_SHIFT;
+ PVR_ASSERT(((IMG_UINT64)uiPFN << PAGE_SHIFT) == psCpuPAddr[uiOffsetIdx].uiAddr);
+
+ if (!pfn_valid(uiPFN) || page_count(pfn_to_page(uiPFN)) == 0)
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) */
+ {
+ bUseMixedMap = IMG_TRUE;
+ break;
+ }
+ }
+ }
+
+ if (bUseMixedMap)
+ {
+ ps_vma->vm_flags |= VM_MIXEDMAP;
+ }
+ }
+ else
+ {
+ ps_vma->vm_flags |= VM_PFNMAP;
+ }
+
+ /* For each PMR page-size contiguous bytes, map page(s) into user VMA */
+ for (uiOffset = 0; uiOffset < uiLength; uiOffset += 1ULL<<uiLog2PageSize)
+ {
+ uiOffsetIdx = uiOffset >> uiLog2PageSize;
+ /*
+ * Only map in pages that are valid, any that aren't will be
+ * picked up by the nopage handler which will return a zeroed
+ * page for us.
+ */
+ if (pbValid[uiOffsetIdx])
+ {
+ iStatus = _OSMMapPMR(psDevNode,
+ ps_vma,
+ uiOffset,
+ &psCpuPAddr[uiOffsetIdx],
+ uiLog2PageSize,
+ bUseVMInsertPage,
+ bUseMixedMap);
+ if (iStatus)
+ {
+ /* Failure error code doesn't get propagated */
+ eError = PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED;
+ PVR_ASSERT(0);
+ goto e3;
+ }
+ }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && defined(PVRSRV_ENABLE_MEMORY_STATS)
+#define PMR_OS_BAD_CPUADDR 0x0BAD0BAD
+ {
+ IMG_CPU_PHYADDR sPAddr;
+ sPAddr.uiAddr = pbValid[uiOffsetIdx] ?
+ psCpuPAddr[uiOffsetIdx].uiAddr :
+ IMG_CAST_TO_CPUPHYADDR_UINT(PMR_OS_BAD_CPUADDR);
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES,
+ (void*)(uintptr_t)(ps_vma->vm_start + uiOffset),
+ sPAddr,
+ 1<<uiLog2PageSize,
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+ }
+#undef PMR_OS_BAD_CPUADDR
+#endif
+ }
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, uiNumOfPFNs * PAGE_SIZE, OSGetCurrentClientProcessIDKM());
+#endif
+
+ if (psCpuPAddr != asCpuPAddr)
+ {
+ OSFreeMem(psCpuPAddr);
+ OSFreeMem(pbValid);
+ }
+
+ /* let us see the PMR so we can unlock it later */
+ ps_vma->vm_private_data = psPMR;
+
+ /* Install open and close handlers for ref-counting */
+ ps_vma->vm_ops = &gsMMapOps;
+
+ /*
+ * Take a reference on the PMR so that it can't be freed while mapped
+ * into the user process.
+ */
+ PMRRefPMR(psPMR);
+
+#if defined(PVRSRV_ENABLE_LINUX_MMAP_STATS)
+ /* record the stats */
+ MMapStatsAddOrUpdatePMR(psPMR, uiLength);
+#endif
+
+ return PVRSRV_OK;
+
+ /* Error exit paths follow */
+ e3:
+ if (psCpuPAddr != asCpuPAddr)
+ {
+ OSFreeMem(psCpuPAddr);
+ OSFreeMem(pbValid);
+ }
+ e1:
+ PMRUnlockSysPhysAddresses(psPMR);
+ e0:
+ return eError;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pmr_os.h b/drivers/gpu/drm/img-rogue/1.10/pmr_os.h
new file mode 100644
index 00000000000000..0dfbd492e4c538
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pmr_os.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title OS PMR functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description OS specific PMR functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PMR_OS_H__)
+#define __PMR_OS_H__
+
+#include "pmr_impl.h"
+
+/*************************************************************************/ /*!
+@Function OSMMapPMRGeneric
+@Description Implements a generic PMR mapping function, which is used
+ to CPU map a PMR where the PMR does not have a mapping
+ function defined by the creating PMR factory.
+@Input psPMR the PMR to be mapped
+@Output pOSMMapData pointer to any private data
+ needed by the generic mapping function
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+OSMMapPMRGeneric(PMR *psPMR, PMR_MMAP_DATA pOSMMapData);
+
+#endif /* !defined(__PMR_OS_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/power.c b/drivers/gpu/drm/img-rogue/1.10/power.c
new file mode 100644
index 00000000000000..e533b810b4ad31
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/power.c
@@ -0,0 +1,1024 @@
+/*************************************************************************/ /*!
+@File power.c
+@Title Power management functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for power management functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "osfunc.h"
+
+#include "lists.h"
+#include "pvrsrv.h"
+#include "pvr_debug.h"
+#include "process_stats.h"
+
+
+struct _PVRSRV_POWER_DEV_TAG_
+{
+ PFN_PRE_POWER pfnDevicePrePower;
+ PFN_POST_POWER pfnDevicePostPower;
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower;
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower;
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange;
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange;
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest;
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest;
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest;
+ IMG_HANDLE hSysData;
+ IMG_HANDLE hDevCookie;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState;
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState;
+};
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeNs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ return OSClockns64();
+#else
+ return 0;
+#endif
+}
+
+static inline IMG_UINT64 PVRSRVProcessStatsGetTimeUs(void)
+{
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ return OSClockus();
+#else
+ return 0;
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function _IsSystemStatePowered
+
+ @Description Tests whether a given system state represents powered-up.
+
+ @Input eSystemPowerState : a system power state
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+static IMG_BOOL _IsSystemStatePowered(PVRSRV_SYS_POWER_STATE eSystemPowerState)
+{
+ return (eSystemPowerState == PVRSRV_SYS_POWER_STATE_ON);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPowerLock
+
+ @Description Obtain the mutex for power transitions. Only allowed when
+ system power is on.
+
+ @Return PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ OSLockAcquire(psDeviceNode->hPowerLock);
+
+ /* Only allow to take powerlock when the system power is on */
+ if (_IsSystemStatePowered(psDeviceNode->eCurrentSysPowerState))
+ {
+ return PVRSRV_OK;
+ }
+
+ OSLockRelease(psDeviceNode->hPowerLock);
+
+ return PVRSRV_ERROR_RETRY;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVForcedPowerLock
+
+ @Description Obtain the mutex for power transitions regardless of
+ system power state
+
+ @Return PVRSRV_ERROR_RETRY or PVRSRV_OK
+
+******************************************************************************/
+void PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ OSLockAcquire(psDeviceNode->hPowerLock);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPowerUnlock
+
+ @Description Release the mutex for power transitions
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ OSLockRelease(psDeviceNode->hPowerLock);
+}
+
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice)
+{
+ return (psPowerDevice->eDefaultPowerState == PVRSRV_DEV_POWER_STATE_OFF);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetDeviceDefaultPowerState
+
+ @Description Set the default device power state to eNewPowerState
+
+ @Input psDeviceNode : Device node
+ @Input eNewPowerState : New power state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ psPowerDevice->eDefaultPowerState = eNewPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceIdleRequestKM
+
+ @Description
+
+ Perform device-specific processing required to force the device idle.
+
+ @Input psDeviceNode : Device node
+ @Input pfnCheckIdleReq : Filter function used to determine whether a forced idle is required for the device
+ @Input bDeviceOffPermitted : IMG_TRUE if the transition should not fail if device off
+ IMG_FALSE if the transition should fail if device off
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff,
+ IMG_BOOL bDeviceOffPermitted)
+{
+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+ if (psPowerDev && psPowerDev->pfnForcedIdleRequest)
+ {
+ if (!pfnIsDefaultStateOff || pfnIsDefaultStateOff(psPowerDev))
+ {
+ return psPowerDev->pfnForcedIdleRequest(psPowerDev->hDevCookie,
+ bDeviceOffPermitted);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceIdleCancelRequestKM
+
+ @Description
+
+ Perform device-specific processing required to cancel the forced idle state on the device, returning to normal operation.
+
+ @Input psDeviceNode : Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ PVRSRV_POWER_DEV *psPowerDev = psDeviceNode->psPowerDev;
+
+ if (psPowerDev && psPowerDev->pfnForcedIdleCancelRequest)
+ {
+ return psPowerDev->pfnForcedIdleCancelRequest(psPowerDev->hDevCookie);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePrePowerStateKM
+
+ @Description
+
+ Perform device-specific processing required before a power transition
+
+ @Input psPowerDevice : Power device
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePrePowerStateKM(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ IMG_UINT64 ui64SysTimer1 = 0;
+ IMG_UINT64 ui64SysTimer2 = 0;
+ IMG_UINT64 ui64DevTimer1 = 0;
+ IMG_UINT64 ui64DevTimer2 = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ if (psPowerDevice->pfnDevicePrePower != NULL)
+ {
+ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+ /* Call the device's power callback. */
+ eError = psPowerDevice->pfnDevicePrePower(psPowerDevice->hDevCookie,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+
+ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Do any required system-layer processing. */
+ if (psPowerDevice->pfnSystemPrePower != NULL)
+ {
+ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+ eError = psPowerDevice->pfnSystemPrePower(psPowerDevice->hSysData,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+
+ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ ui64DevTimer1, ui64DevTimer2,
+ bForced,
+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+ IMG_TRUE);
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDevicePostPowerStateKM
+
+ @Description
+
+ Perform device-specific processing required after a power transition
+
+ @Input psPowerDevice : Power device
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static
+PVRSRV_ERROR PVRSRVDevicePostPowerStateKM(PVRSRV_POWER_DEV *psPowerDevice,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ IMG_UINT64 ui64SysTimer1 = 0;
+ IMG_UINT64 ui64SysTimer2 = 0;
+ IMG_UINT64 ui64DevTimer1 = 0;
+ IMG_UINT64 ui64DevTimer2 = 0;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(eNewPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ /* Do any required system-layer processing. */
+ if (psPowerDevice->pfnSystemPostPower != NULL)
+ {
+ ui64SysTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+ eError = psPowerDevice->pfnSystemPostPower(psPowerDevice->hSysData,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+
+ ui64SysTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ if (psPowerDevice->pfnDevicePostPower != NULL)
+ {
+ ui64DevTimer1 = PVRSRVProcessStatsGetTimeNs();
+
+ /* Call the device's power callback. */
+ eError = psPowerDevice->pfnDevicePostPower(psPowerDevice->hDevCookie,
+ eNewPowerState,
+ psPowerDevice->eCurrentPowerState,
+ bForced);
+
+ ui64DevTimer2 = PVRSRVProcessStatsGetTimeNs();
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ InsertPowerTimeStatistic(ui64SysTimer1, ui64SysTimer2,
+ ui64DevTimer1, ui64DevTimer2,
+ bForced,
+ eNewPowerState == PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+
+ psPowerDevice->eCurrentPowerState = eNewPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVSetDevicePowerStateKM
+
+ @Description Set the Device into a new state
+
+ @Input psDeviceNode : Device node
+ @Input eNewPowerState : New power state
+ @Input bForced : TRUE if the transition should not fail (e.g. OS request)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (!psPowerDevice)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_DEFAULT)
+ {
+ eNewPowerState = psPowerDevice->eDefaultPowerState;
+ }
+
+ if (psPowerDevice->eCurrentPowerState != eNewPowerState)
+ {
+ eError = PVRSRVDevicePrePowerStateKM(psPowerDevice,
+ eNewPowerState,
+ bForced);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ eError = PVRSRVDevicePostPowerStateKM(psPowerDevice,
+ eNewPowerState,
+ bForced);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+
+ /* Signal Device Watchdog Thread about power mode change. */
+ if (eNewPowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ psPVRSRVData->ui32DevicesWatchdogPwrTrans++;
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+ if (psPVRSRVData->ui32DevicesWatchdogTimeout == DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT)
+#endif
+ {
+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ }
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+ else if (eNewPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /* signal watchdog thread and give it a chance to switch to
+ * longer / infinite wait time */
+ eError = OSEventObjectSignal(psPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+ }
+
+ return PVRSRV_OK;
+
+ErrorExit:
+
+ if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Transition to %d was denied, Forced=%d",
+ __func__, eNewPowerState, bForced));
+ }
+ else if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Transition to %d FAILED (%s)",
+ __func__, eNewPowerState, PVRSRVGetErrorStringKM(eError)));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVSetDeviceSystemPowerState
+@Description Set the device into a new power state based on the systems power
+ state
+@Input psDeviceNode Device node
+@Input eNewSysPowerState New system power state
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_SYS_POWER_STATE eNewSysPowerState)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT uiStage = 0;
+
+ PVRSRV_DEV_POWER_STATE eNewDevicePowerState =
+ _IsSystemStatePowered(eNewSysPowerState)? PVRSRV_DEV_POWER_STATE_DEFAULT : PVRSRV_DEV_POWER_STATE_OFF;
+
+ /* If setting devices to default state, force idle all devices whose default state is off */
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnIsDefaultStateOff =
+ (eNewDevicePowerState == PVRSRV_DEV_POWER_STATE_DEFAULT) ? PVRSRVDeviceIsDefaultStateOFF : NULL;
+
+ /* require a proper power state */
+ if (eNewSysPowerState == PVRSRV_SYS_POWER_STATE_Unspecified)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Prevent simultaneous SetPowerStateKM calls */
+ PVRSRVForcedPowerLock(psDeviceNode);
+
+ /* no power transition requested, so do nothing */
+ if (eNewSysPowerState == psDeviceNode->eCurrentSysPowerState)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return PVRSRV_OK;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+ pfnIsDefaultStateOff, IMG_TRUE);
+
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ PVRSRVForcedPowerLock(psDeviceNode);
+ }
+ else
+ {
+ uiStage++;
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+ uiStage++;
+ goto ErrorExit;
+ }
+
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode, eNewDevicePowerState,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ uiStage++;
+ goto ErrorExit;
+ }
+
+ psDeviceNode->eCurrentSysPowerState = eNewSysPowerState;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return PVRSRV_OK;
+
+ErrorExit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Transition from %d to %d FAILED (%s) at stage %u. Dumping debug info.",
+ __func__, psDeviceNode->eCurrentSysPowerState, eNewSysPowerState,
+ PVRSRVGetErrorStringKM(eError), uiStage));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PFN_PRE_POWER pfnDevicePrePower,
+ PFN_POST_POWER pfnDevicePostPower,
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower,
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest,
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest,
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ PVR_ASSERT(!psDeviceNode->psPowerDev);
+
+ PVR_ASSERT(eCurrentPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+ PVR_ASSERT(eDefaultPowerState != PVRSRV_DEV_POWER_STATE_DEFAULT);
+
+ psPowerDevice = OSAllocMem(sizeof(PVRSRV_POWER_DEV));
+ if (psPowerDevice == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to alloc PVRSRV_POWER_DEV", __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* setup device for power manager */
+ psPowerDevice->pfnDevicePrePower = pfnDevicePrePower;
+ psPowerDevice->pfnDevicePostPower = pfnDevicePostPower;
+ psPowerDevice->pfnSystemPrePower = pfnSystemPrePower;
+ psPowerDevice->pfnSystemPostPower = pfnSystemPostPower;
+ psPowerDevice->pfnPreClockSpeedChange = pfnPreClockSpeedChange;
+ psPowerDevice->pfnPostClockSpeedChange = pfnPostClockSpeedChange;
+ psPowerDevice->pfnForcedIdleRequest = pfnForcedIdleRequest;
+ psPowerDevice->pfnForcedIdleCancelRequest = pfnForcedIdleCancelRequest;
+ psPowerDevice->pfnDustCountRequest = pfnDustCountRequest;
+ psPowerDevice->hSysData = psDeviceNode->psDevConfig->hSysData;
+ psPowerDevice->hDevCookie = hDevCookie;
+ psPowerDevice->eCurrentPowerState = eCurrentPowerState;
+ psPowerDevice->eDefaultPowerState = eDefaultPowerState;
+
+ psDeviceNode->psPowerDev = psPowerDevice;
+
+ return (PVRSRV_OK);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVRemovePowerDevice
+
+ @Description
+
+ Removes device from power management register. Device is located by Device Index
+
+ @Input psDeviceNode : Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ if (psDeviceNode->psPowerDev)
+ {
+ OSFreeMem(psDeviceNode->psPowerDev);
+ psDeviceNode->psPowerDev = NULL;
+ }
+
+ return (PVRSRV_OK);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetDevicePowerState
+
+ @Description
+
+ Return the device power state
+
+ @Input psDeviceNode : Device node
+ @Output psPowerState : Current power state
+
+ @Return PVRSRV_ERROR_UNKNOWN_POWER_STATE if device could not be found. PVRSRV_OK otherwise.
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+ PPVRSRV_DEV_POWER_STATE pePowerState)
+{
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice == NULL)
+ {
+ return PVRSRV_ERROR_UNKNOWN_POWER_STATE;
+ }
+
+ *pePowerState = psPowerDevice->eCurrentPowerState;
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVIsDevicePowered
+
+ @Description
+
+ Whether the device is powered, for the purposes of lockup detection.
+
+ @Input psDeviceNode : Device node
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ if (OSLockIsLocked(psDeviceNode->hPowerLock))
+ {
+ return IMG_FALSE;
+ }
+
+ if (PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState) != PVRSRV_OK)
+ {
+ return IMG_FALSE;
+ }
+
+ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevicePreClockSpeedChange
+
+@Description This function is called before a voltage/frequency change is
+ made to the GPU HW. It informs the host driver of the intention
+ to make a DVFS change. If allows the host driver to idle
+ the GPU and begin a hold off period from starting new work
+ on the GPU.
+ When this call succeeds the caller *must* call
+ PVRSRVDevicePostClockSpeedChange() to end the hold off period
+ to allow new work to be submitted to the GPU.
+
+ Called form system layer or OS layer implementation that
+ is responsible for triggering a GPU DVFS transition.
+
+@Input psDeviceNode pointer to the device affected by DVFS transition.
+@Input bIdleDevice when True, the driver will wait for the GPU to
+ reach an idle state before the call returns.
+@Input pvInfo unused
+
+@Return PVRSRV_OK on success, power lock acquired and held on exit,
+ GPU idle.
+ PVRSRV_ERROR on failure, power lock not held on exit, do not
+ call PVRSRVDevicePostClockSpeedChange().
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void* pvInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POWER_DEV *psPowerDevice;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+ /* This mutex is released in PVRSRVDevicePostClockSpeedChange. */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ {
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ /* We can change the clock speed if the device is either IDLE or OFF */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ }
+
+ eError = psPowerDevice->pfnPreClockSpeedChange(psPowerDevice->hDevCookie,
+ psPowerDevice->eCurrentPowerState);
+ }
+
+ ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+ InsertPowerTimeStatisticExtraPre(ui64StartTimer, ui64StopTimer);
+
+ return eError;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevicePostClockSpeedChange
+
+@Description This function is called after a voltage/frequency change has
+ been made to the GPU HW following a call to
+ PVRSRVDevicePreClockSpeedChange().
+ Before calling this function the caller must ensure the system
+ data RGX_DATA->RGX_TIMING_INFORMATION->ui32CoreClockSpeed has
+ been updated with the new frequency set, measured in Hz.
+ The function informs the host driver that the DVFS change has
+ completed. The driver will end the work hold off period, cancel
+ the device idle period and update its time data records.
+ When this call returns work submissions are unblocked and
+ are submitted to the GPU as normal.
+ This function *must* not be called if the preceding call to
+ PVRSRVDevicePreClockSpeedChange() failed.
+
+ Called form system layer or OS layer implementation that
+ is responsible for triggering a GPU DVFS transition.
+
+@Input psDeviceNode pointer to the device affected by DVFS transition.
+@Input bIdleDevice when True, the driver will cancel the GPU
+ device idle state before the call returns. Value
+ given must match that used in the call to
+ PVRSRVDevicePreClockSpeedChange() otherwise
+ undefined behaviour will result.
+@Input pvInfo unused
+
+@Return void power lock released, no longer held on exit.
+*/ /**************************************************************************/
+void
+PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void* pvInfo)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_POWER_DEV *psPowerDevice;
+ IMG_UINT64 ui64StartTimer, ui64StopTimer;
+
+ PVR_UNREFERENCED_PARAMETER(pvInfo);
+
+ ui64StartTimer = PVRSRVProcessStatsGetTimeUs();
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ eError = psPowerDevice->pfnPostClockSpeedChange(psPowerDevice->hDevCookie,
+ psPowerDevice->eCurrentPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ }
+
+ if ((psPowerDevice->eCurrentPowerState == PVRSRV_DEV_POWER_STATE_ON) && bIdleDevice)
+ {
+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to cancel forced IDLE.", __func__));
+ }
+ }
+ }
+
+ /* This mutex was acquired in PVRSRVDevicePreClockSpeedChange. */
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ OSAtomicIncrement(&psDeviceNode->iNumClockSpeedChanges);
+
+ ui64StopTimer = PVRSRVProcessStatsGetTimeUs();
+
+ InsertPowerTimeStatisticExtraPost(ui64StartTimer, ui64StopTimer);
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVDeviceDustCountChange
+
+ @Description
+
+ Request from system layer that a dust count change is requested.
+
+ @Input psDeviceNode : Device node
+ @Input ui32DustCount : dust count to be set
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_UINT32 ui32DustCount)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POWER_DEV *psPowerDevice;
+
+ psPowerDevice = psDeviceNode->psPowerDev;
+ if (psPowerDevice)
+ {
+ PVRSRV_DEV_POWER_STATE eDevicePowerState;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ eDevicePowerState = psPowerDevice->eCurrentPowerState;
+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ /* Device must be idle to change dust count */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_FALSE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: error occurred whilst forcing idle (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+ goto ErrorExit;
+ }
+ }
+
+ if (psPowerDevice->pfnDustCountRequest != NULL)
+ {
+ PVRSRV_ERROR eError2 = psPowerDevice->pfnDustCountRequest(psPowerDevice->hDevCookie, ui32DustCount);
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device %p failed (%s)",
+ __func__, psDeviceNode,
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+
+ if (eDevicePowerState == PVRSRV_DEV_POWER_STATE_ON)
+ {
+ eError = PVRSRVDeviceIdleCancelRequestKM(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to cancel forced IDLE.", __func__));
+ goto ErrorExit;
+ }
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ }
+
+ return eError;
+
+ErrorExit:
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+}
+
+/******************************************************************************
+ End of file (power.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/power.h b/drivers/gpu/drm/img-rogue/1.10/power.h
new file mode 100644
index 00000000000000..70a2d68e30d316
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/power.h
@@ -0,0 +1,124 @@
+/*************************************************************************/ /*!
+@File
+@Title Power Management Functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for power management functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef POWER_H
+#define POWER_H
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+#include "servicesext.h"
+#include "opaque_types.h"
+
+/*!
+ *****************************************************************************
+ * Power management
+ *****************************************************************************/
+
+typedef struct _PVRSRV_POWER_DEV_TAG_ PVRSRV_POWER_DEV;
+
+typedef IMG_BOOL (*PFN_SYS_DEV_IS_DEFAULT_STATE_OFF)(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+PVRSRV_ERROR PVRSRVPowerLock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+void PVRSRVForcedPowerLock(PPVRSRV_DEVICE_NODE psDeviceNode);
+void PVRSRVPowerUnlock(PCPVRSRV_DEVICE_NODE psDeviceNode);
+
+IMG_BOOL PVRSRVDeviceIsDefaultStateOFF(PVRSRV_POWER_DEV *psPowerDevice);
+
+
+PVRSRV_ERROR PVRSRVSetDevicePowerStateKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ IMG_BOOL bForced);
+
+PVRSRV_ERROR PVRSRVSetDeviceSystemPowerState(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_SYS_POWER_STATE ePVRState);
+
+PVRSRV_ERROR PVRSRVSetDeviceDefaultPowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+ PVRSRV_DEV_POWER_STATE eNewPowerState);
+
+/* Type PFN_DC_REGISTER_POWER */
+PVRSRV_ERROR PVRSRVRegisterPowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PFN_PRE_POWER pfnDevicePrePower,
+ PFN_POST_POWER pfnDevicePostPower,
+ PFN_SYS_DEV_PRE_POWER pfnSystemPrePower,
+ PFN_SYS_DEV_POST_POWER pfnSystemPostPower,
+ PFN_PRE_CLOCKSPEED_CHANGE pfnPreClockSpeedChange,
+ PFN_POST_CLOCKSPEED_CHANGE pfnPostClockSpeedChange,
+ PFN_FORCED_IDLE_REQUEST pfnForcedIdleRequest,
+ PFN_FORCED_IDLE_CANCEL_REQUEST pfnForcedIdleCancelRequest,
+ PFN_DUST_COUNT_REQUEST pfnDustCountRequest,
+ IMG_HANDLE hDevCookie,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState);
+
+PVRSRV_ERROR PVRSRVRemovePowerDevice(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVGetDevicePowerState(PCPVRSRV_DEVICE_NODE psDeviceNode,
+ PPVRSRV_DEV_POWER_STATE pePowerState);
+
+IMG_BOOL PVRSRVIsDevicePowered(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDevicePreClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo);
+
+void PVRSRVDevicePostClockSpeedChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_BOOL bIdleDevice,
+ void *pvInfo);
+
+PVRSRV_ERROR PVRSRVDeviceIdleRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode,
+ PFN_SYS_DEV_IS_DEFAULT_STATE_OFF pfnCheckIdleReq,
+ IMG_BOOL bDeviceOffPermitted);
+
+PVRSRV_ERROR PVRSRVDeviceIdleCancelRequestKM(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR PVRSRVDeviceDustCountChange(PPVRSRV_DEVICE_NODE psDeviceNode,
+ IMG_UINT32 ui32DustCount);
+
+
+#endif /* POWER_H */
+
+/******************************************************************************
+ End of file (power.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/powervr/buffer_attribs.h b/drivers/gpu/drm/img-rogue/1.10/powervr/buffer_attribs.h
new file mode 100644
index 00000000000000..bd13e7f74d1421
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/powervr/buffer_attribs.h
@@ -0,0 +1,90 @@
+/*************************************************************************/ /*!
+@File
+@Title 3D types for use by IMG APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _POWERVR_BUFFER_ATTRIBS_H_
+#define _POWERVR_BUFFER_ATTRIBS_H_
+
+/**
+ * Memory layouts
+ * Defines how pixels are laid out within a surface.
+ */
+typedef enum
+{
+ IMG_MEMLAYOUT_STRIDED, /**< Resource is strided, one row at a time */
+ IMG_MEMLAYOUT_TWIDDLED, /**< Resource is 2D twiddled, classic style */
+ IMG_MEMLAYOUT_3DTWIDDLED, /**< Resource is 3D twiddled, classic style */
+ IMG_MEMLAYOUT_TILED, /**< Resource is tiled, tiling config specified elsewhere. */
+ IMG_MEMLAYOUT_PAGETILED, /**< Resource is pagetiled */
+} IMG_MEMLAYOUT;
+
+/**
+ * Rotation types
+ */
+typedef enum
+{
+ IMG_ROTATION_0DEG = 0,
+ IMG_ROTATION_90DEG = 1,
+ IMG_ROTATION_180DEG = 2,
+ IMG_ROTATION_270DEG = 3,
+ IMG_ROTATION_FLIP_Y = 4,
+
+ IMG_ROTATION_BAD = 255,
+} IMG_ROTATION;
+
+/**
+ * Alpha types.
+ */
+typedef enum
+{
+ IMG_COLOURSPACE_FORMAT_UNKNOWN = 0x0 << 16,
+ IMG_COLOURSPACE_FORMAT_LINEAR = 0x1 << 16,
+ IMG_COLOURSPACE_FORMAT_SRGB = 0x2 << 16,
+ IMG_COLOURSPACE_FORMAT_SCRGB = 0x3 << 16,
+ IMG_COLOURSPACE_FORMAT_SCRGB_LINEAR = 0x4 << 16,
+ IMG_COLOURSPACE_FORMAT_DISPLAY_P3_LINEAR = 0x5 << 16,
+ IMG_COLOURSPACE_FORMAT_DISPLAY_P3 = 0x6 << 16,
+ IMG_COLOURSPACE_FORMAT_BT2020_PQ = 0x7 << 16,
+ IMG_COLOURSPACE_FORMAT_BT2020_LINEAR = 0x8 << 16,
+ IMG_COLOURSPACE_FORMAT_MASK = 0xF << 16,
+} IMG_COLOURSPACE_FORMAT;
+
+/**
+ * Types of framebuffer compression
+ */
+typedef enum
+{
+ IMG_FB_COMPRESSION_NONE,
+ IMG_FB_COMPRESSION_DIRECT_8x8,
+ IMG_FB_COMPRESSION_DIRECT_16x4,
+ IMG_FB_COMPRESSION_DIRECT_32x2,
+ IMG_FB_COMPRESSION_INDIRECT_8x8,
+ IMG_FB_COMPRESSION_INDIRECT_16x4,
+ IMG_FB_COMPRESSION_INDIRECT_4TILE_8x8,
+ IMG_FB_COMPRESSION_INDIRECT_4TILE_16x4
+} IMG_FB_COMPRESSION;
+
+
+#endif /* _POWERVR_BUFFER_ATTRIBS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/powervr/mem_types.h b/drivers/gpu/drm/img-rogue/1.10/powervr/mem_types.h
new file mode 100644
index 00000000000000..4674ad2fc8fe40
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/powervr/mem_types.h
@@ -0,0 +1,64 @@
+/*************************************************************************/ /*!
+@File
+@Title Public types
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _POWERVR_TYPES_H_
+#define _POWERVR_TYPES_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#if defined(_MSC_VER)
+ #include "msvc_types.h"
+#elif defined(LINUX) && defined(__KERNEL__)
+ #include <linux/types.h>
+ #include <linux/compiler.h>
+#else
+ #include <stdint.h>
+ #define __iomem
+#endif
+
+typedef void *IMG_CPU_VIRTADDR;
+
+/* device virtual address */
+typedef struct _IMG_DEV_VIRTADDR
+{
+ uint64_t uiAddr;
+#define IMG_CAST_TO_DEVVADDR_UINT(var) (uint64_t)(var)
+
+} IMG_DEV_VIRTADDR;
+
+typedef uint64_t IMG_DEVMEM_SIZE_T;
+typedef uint64_t IMG_DEVMEM_ALIGN_T;
+typedef uint64_t IMG_DEVMEM_OFFSET_T;
+typedef uint32_t IMG_DEVMEM_LOG2ALIGN_T;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/powervr/pvrsrv_sync_ext.h b/drivers/gpu/drm/img-rogue/1.10/powervr/pvrsrv_sync_ext.h
new file mode 100644
index 00000000000000..62d0f9e5209ae5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/powervr/pvrsrv_sync_ext.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title Services external synchronisation interface header
+@Description Defines synchronisation structures that are visible internally
+ and externally
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _POWERVR_SYNC_EXT_H_
+#define _POWERVR_SYNC_EXT_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*! Implementation independent types for passing fence/timeline to Services.
+ */
+typedef int32_t PVRSRV_FENCE;
+typedef int32_t PVRSRV_TIMELINE;
+
+/*! Maximum length for an annotation name string for fence sync model objects.
+ */
+#define PVRSRV_SYNC_NAME_LENGTH 32
+
+/* Macros for API callers using the fence sync model
+ */
+#define PVRSRV_NO_TIMELINE ((PVRSRV_TIMELINE) -1)
+#define PVRSRV_NO_FENCE ((PVRSRV_FENCE) -1)
+#define PVRSRV_NO_FENCE_PTR NULL
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/powervr/sync_external.h b/drivers/gpu/drm/img-rogue/1.10/powervr/sync_external.h
new file mode 100644
index 00000000000000..d7b906e3897975
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/powervr/sync_external.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title Services external synchronisation interface header
+@Description Defines synchronisation structures that are visible internally
+ and externally
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_EXTERNAL_
+#define _SYNC_EXTERNAL_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include <powervr/mem_types.h>
+
+/*!
+ * Maximum byte length for a sync prim name
+ */
+#define SYNC_MAX_CLASS_NAME_LEN 32
+
+/*!
+ * Number of sync primitives in operations
+ */
+#define PVRSRV_MAX_SYNC_PRIMS 32
+
+typedef void* PVRSRV_CLIENT_SYNC_PRIM_HANDLE;
+typedef void* SYNC_BRIDGE_HANDLE;
+typedef struct SYNC_PRIM_CONTEXT *PSYNC_PRIM_CONTEXT;
+typedef struct _SYNC_OP_COOKIE_ *PSYNC_OP_COOKIE;
+
+/*!
+ * Client sync prim definition holding a CPU accessible address
+ *
+ * Structure: #PVRSRV_CLIENT_SYNC_PRIM
+ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM
+{
+ volatile uint32_t __iomem *pui32LinAddr; /*!< User pointer to the primitive */
+} PVRSRV_CLIENT_SYNC_PRIM;
+
+/*!
+ * Bundled information for a sync prim operation
+ *
+ * Structure: #PVRSRV_CLIENT_SYNC_PRIM_OP
+ * Typedef: ::PVRSRV_CLIENT_SYNC_PRIM_OP
+ */
+typedef struct PVRSRV_CLIENT_SYNC_PRIM_OP
+{
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK (1 << 0)
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE (1 << 1)
+ #define PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE (PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE | (1<<2))
+ uint32_t ui32Flags; /*!< Operation flags: PVRSRV_CLIENT_SYNC_PRIM_OP_XXX */
+ PVRSRV_CLIENT_SYNC_PRIM *psSync; /*!< Pointer to the client sync primitive */
+ uint32_t ui32FenceValue; /*!< The Fence value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK is set) */
+ uint32_t ui32UpdateValue; /*!< The Update value (only used if PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE is set) */
+} PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* _SYNC_EXTERNAL_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/private_data.h b/drivers/gpu/drm/img-rogue/1.10/private_data.h
new file mode 100644
index 00000000000000..6d63f151bbd18d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/private_data.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title Linux private data structure
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__INCLUDED_PRIVATE_DATA_H_)
+#define __INCLUDED_PRIVATE_DATA_H_
+
+#include <linux/fs.h>
+
+#include "connection_server.h"
+
+CONNECTION_DATA *LinuxConnectionFromFile(struct file *pFile);
+struct file *LinuxFileFromConnection(CONNECTION_DATA *psConnection);
+
+#endif /* !defined(__INCLUDED_PRIVATE_DATA_H_) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/proc_stats.h b/drivers/gpu/drm/img-rogue/1.10/proc_stats.h
new file mode 100644
index 00000000000000..d1bcc00f56d015
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/proc_stats.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File
+@Title Process and driver statistic definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PROC_STATS_H
+#define PROC_STATS_H
+
+
+/* X-Macro for Process stat keys */
+#define PVRSRV_PROCESS_STAT_KEY \
+ X(PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS, "Connections") \
+ X(PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS, "ConnectionsMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_OOMS, "RenderContextOutOfMemoryEvents") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_PRS, "RenderContextPartialRenders") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_GROWS, "RenderContextGrows") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS, "RenderContextPushGrows") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES, "RenderContextTAStores") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES, "RenderContext3DStores") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES, "RenderContextSHStores") \
+ X(PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES, "RenderContextCDMStores") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP, "ZSBufferRequestsByApp") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW, "ZSBufferRequestsByFirmware") \
+ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP, "FreeListGrowRequestsByApp") \
+ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW, "FreeListGrowRequestsByFirmware") \
+ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT, "FreeListInitialPages") \
+ X(PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES, "FreeListMaxPages") \
+ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+ X(PVRSRV_PROCESS_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+ X(PVRSRV_PROCESS_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, "MemoryUsageAllocPTMemoryUMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, "MemoryUsageAllocPTMemoryLMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, "MemoryUsageAllocGPUMemLMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, "MemoryUsageAllocGPUMemUMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, "MemoryUsageMappedGPUMemUMA/LMA") \
+ X(PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES_MAX, "MemoryUsageMappedGPUMemUMA/LMAMax") \
+ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL, "MemoryUsageTotal") \
+ X(PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX, "MemoryUsageTotalMax")
+
+
+/* X-Macro for Driver stat keys */
+#define PVRSRV_DRIVER_STAT_KEY \
+ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC, "MemoryUsageKMalloc") \
+ X(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX, "MemoryUsageKMallocMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC, "MemoryUsageVMalloc") \
+ X(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX, "MemoryUsageVMallocMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, "MemoryUsageAllocPTMemoryUMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX, "MemoryUsageAllocPTMemoryUMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, "MemoryUsageVMapPTUMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX, "MemoryUsageVMapPTUMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, "MemoryUsageAllocPTMemoryLMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX, "MemoryUsageAllocPTMemoryLMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, "MemoryUsageIORemapPTLMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX, "MemoryUsageIORemapPTLMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, "MemoryUsageAllocGPUMemLMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX, "MemoryUsageAllocGPUMemLMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, "MemoryUsageAllocGPUMemUMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX, "MemoryUsageAllocGPUMemUMAMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, "MemoryUsageAllocGPUMemUMAPool") \
+ X(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX, "MemoryUsageAllocGPUMemUMAPoolMax") \
+ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, "MemoryUsageMappedGPUMemUMA_LMA") \
+ X(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX, "MemoryUsageMappedGPUMemUMA_LMAMax")
+
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+ PVRSRV_PROCESS_STAT_KEY
+#undef X
+ PVRSRV_PROCESS_STAT_TYPE_COUNT
+}PVRSRV_PROCESS_STAT_TYPE;
+
+typedef enum {
+#define X(stat_type, stat_str) stat_type,
+ PVRSRV_DRIVER_STAT_KEY
+#undef X
+ PVRSRV_DRIVER_STAT_TYPE_COUNT
+}PVRSRV_DRIVER_STAT_TYPE;
+
+extern const IMG_CHAR *const pszProcessStatType[];
+
+extern const IMG_CHAR *const pszDriverStatType[];
+
+#endif // PROC_STATS_H
diff --git a/drivers/gpu/drm/img-rogue/1.10/process_stats.c b/drivers/gpu/drm/img-rogue/1.10/process_stats.c
new file mode 100644
index 00000000000000..094643b7774f73
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/process_stats.c
@@ -0,0 +1,3658 @@
+/*************************************************************************/ /*!
+@File
+@Title Process based statistics
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Manages a collection of statistics based around a process
+ and referenced via OS agnostic methods.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "lock.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "lists.h"
+#include "process_stats.h"
+#include "ri_server.h"
+#include "hash.h"
+#include "connection_server.h"
+#include "pvrsrv.h"
+#include "proc_stats.h"
+#include "htbuffer.h"
+#include "pvr_ricommon.h"
+
+ /* Enabled OS Statistics entries: DEBUGFS on Linux, undefined for other OSs */
+#if defined(LINUX) && ( \
+ defined(PVRSRV_ENABLE_PERPID_STATS) || \
+ defined(PVRSRV_ENABLE_CACHEOP_STATS) || \
+ defined(PVRSRV_ENABLE_MEMORY_STATS) || \
+ defined(PVR_RI_DEBUG) )
+#define ENABLE_DEBUGFS_PIDS
+#endif
+
+/*
+ * Maximum history of process statistics that will be kept.
+ */
+#define MAX_DEAD_LIST_PROCESSES (10)
+
+/*
+ * Definition of all the strings used to format process based statistics.
+ */
+
+/* Array of Process stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszProcessStatType[PVRSRV_PROCESS_STAT_TYPE_COUNT] = { PVRSRV_PROCESS_STAT_KEY };
+#undef X
+
+/* Array of Driver stat type defined using the X-Macro */
+#define X(stat_type, stat_str) stat_str,
+const IMG_CHAR *const pszDriverStatType[PVRSRV_DRIVER_STAT_TYPE_COUNT] = { PVRSRV_DRIVER_STAT_KEY };
+#undef X
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+static const IMG_CHAR *const pszProcessStatFmt[] = {
+ "Connections %10d\n", /* PVRSRV_STAT_TYPE_CONNECTIONS */
+ "ConnectionsMax %10d\n", /* PVRSRV_STAT_TYPE_MAXCONNECTIONS */
+
+ "RenderContextOutOfMemoryEvents %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_OOMS */
+ "RenderContextPartialRenders %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PRS */
+ "RenderContextGrows %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_GROWS */
+ "RenderContextPushGrows %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_PUSH_GROWS */
+ "RenderContextTAStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES */
+ "RenderContext3DStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES */
+ "RenderContextSHStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES */
+ "RenderContextCDMStores %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES */
+ "ZSBufferRequestsByApp %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP */
+ "ZSBufferRequestsByFirmware %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW */
+ "FreeListGrowRequestsByApp %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP */
+ "FreeListGrowRequestsByFirmware %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW */
+ "FreeListInitialPages %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT */
+ "FreeListMaxPages %10d\n", /* PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES */
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ "MemoryUsageKMalloc %10d %8dK\n", /* PVRSRV_STAT_TYPE_KMALLOC */
+ "MemoryUsageKMallocMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_KMALLOC */
+ "MemoryUsageVMalloc %10d %8dK\n", /* PVRSRV_STAT_TYPE_VMALLOC */
+ "MemoryUsageVMallocMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_VMALLOC */
+#else
+ "","","","", /* Empty strings if these stats are not logged */
+#endif
+ "MemoryUsageAllocPTMemoryUMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_UMA */
+ "MemoryUsageAllocPTMemoryUMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_UMA */
+ "MemoryUsageVMapPTUMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_VMAP_PT_UMA */
+ "MemoryUsageVMapPTUMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_VMAP_PT_UMA */
+ "MemoryUsageAllocPTMemoryLMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_ALLOC_PAGES_PT_LMA */
+ "MemoryUsageAllocPTMemoryLMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_PAGES_PT_LMA */
+ "MemoryUsageIORemapPTLMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_IOREMAP_PT_LMA */
+ "MemoryUsageIORemapPTLMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_IOREMAP_PT_LMA */
+ "MemoryUsageAllocGPUMemLMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_ALLOC_LMA_PAGES */
+ "MemoryUsageAllocGPUMemLMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_LMA_PAGES */
+ "MemoryUsageAllocGPUMemUMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_ALLOC_UMA_PAGES */
+ "MemoryUsageAllocGPUMemUMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_ALLOC_UMA_PAGES */
+ "MemoryUsageMappedGPUMemUMA/LMA %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAP_UMA_LMA_PAGES */
+ "MemoryUsageMappedGPUMemUMA/LMAMax %10d %8dK\n", /* PVRSRV_STAT_TYPE_MAX_MAP_UMA_LMA_PAGES */
+ "MemoryUsageTotal %10d %8dK\n", /* PVRSRV_PROCESS_STAT_TYPE_TOTAL */
+ "MemoryUsageTotalMax %10d %8dK\n", /* PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX */
+};
+
+static_assert(ARRAY_SIZE(pszProcessStatFmt) == PVRSRV_PROCESS_STAT_TYPE_COUNT,
+ "Fix number of entries in pszProcessStatFmt array : %d");
+#endif
+
+/* structure used in hash table to track statistic entries */
+typedef struct{
+ size_t uiSizeInBytes;
+ IMG_PID uiPid;
+}_PVR_STATS_TRACKING_HASH_ENTRY;
+
+/* Function used internally to decrement tracked per-process statistic entries */
+static void _StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+ PVRSRV_MEM_ALLOC_TYPE eAllocType);
+
+/*
+ * Functions for printing the information stored...
+ */
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+void ProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+#endif
+
+void MemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void RIMemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void PowerStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void GlobalStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+void CacheOpStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr);
+#endif
+
+/* Note: all of the accesses to the global stats should be protected
+ * by the gsGlobalStats.hGlobalStatsLock lock. This means all of the
+ * invocations of macros *_GLOBAL_STAT_VALUE. */
+
+/* Macro for fetching stat values */
+#define GET_GLOBAL_STAT_VALUE(idx) gsGlobalStats.ui32StatValue[idx]
+/*
+ * Macros for updating stat values.
+ */
+#define UPDATE_MAX_VALUE(a,b) do { if ((b) > (a)) {(a) = (b);} } while(0)
+#define INCREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] += (val); if ((ptr)->i32StatValue[(var)] > (ptr)->i32StatValue[(var##_MAX)]) {(ptr)->i32StatValue[(var##_MAX)] = (ptr)->i32StatValue[(var)];} } while(0)
+#define INCREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] += (val); if ((var).ui32StatValue[(idx)] > (var).ui32StatValue[(idx##_MAX)]) {(var).ui32StatValue[(idx##_MAX)] = (var).ui32StatValue[(idx)];} } while(0)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* Allow stats to go negative */
+#define DECREASE_STAT_VALUE(ptr,var,val) do { (ptr)->i32StatValue[(var)] -= (val); } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { (var).ui32StatValue[(idx)] -= (val); } while(0)
+#else
+#define DECREASE_STAT_VALUE(ptr,var,val) do { if ((ptr)->i32StatValue[(var)] >= (val)) { (ptr)->i32StatValue[(var)] -= (val); } else { (ptr)->i32StatValue[(var)] = 0; } } while(0)
+#define DECREASE_GLOBAL_STAT_VALUE(var,idx,val) do { if ((var).ui32StatValue[(idx)] >= (val)) { (var).ui32StatValue[(idx)] -= (val); } else { (var).ui32StatValue[(idx)] = 0; } } while(0)
+#endif
+#define MAX_CACHEOP_STAT 16
+#define INCREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x+1) >= MAX_CACHEOP_STAT ? 0 : (x+1))
+#define DECREMENT_CACHEOP_STAT_IDX_WRAP(x) ((x-1) < 0 ? (MAX_CACHEOP_STAT-1) : (x-1))
+
+/*
+ * Structures for holding statistics...
+ */
+typedef enum
+{
+ PVRSRV_STAT_STRUCTURE_PROCESS = 1,
+ PVRSRV_STAT_STRUCTURE_RENDER_CONTEXT = 2,
+ PVRSRV_STAT_STRUCTURE_MEMORY = 3,
+ PVRSRV_STAT_STRUCTURE_RIMEMORY = 4,
+ PVRSRV_STAT_STRUCTURE_CACHEOP = 5
+} PVRSRV_STAT_STRUCTURE_TYPE;
+
+#define MAX_PROC_NAME_LENGTH (32)
+
+typedef struct _PVRSRV_PROCESS_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* Linked list pointers */
+ struct _PVRSRV_PROCESS_STATS_* psNext;
+ struct _PVRSRV_PROCESS_STATS_* psPrev;
+
+ /* Create per process lock that need to be held
+ * to edit of its members */
+ POS_LOCK hLock;
+
+ /* OS level process ID */
+ IMG_PID pid;
+ IMG_UINT32 ui32RefCount;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ ATOMIC_T iMemRefCount;
+#else
+ IMG_UINT32 ui32MemRefCount;
+#endif
+
+ /* Folder name used to store the statistic */
+ IMG_CHAR szFolderName[MAX_PROC_NAME_LENGTH];
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* OS specific data */
+ void *pvOSPidFolderData;
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+ void *pvOSPidEntryData;
+#endif
+#endif
+
+ /* Stats... */
+ IMG_INT32 i32StatValue[PVRSRV_PROCESS_STAT_TYPE_COUNT];
+ IMG_UINT32 ui32StatAllocFlags;
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ struct _CACHEOP_STRUCT_ {
+ PVRSRV_CACHE_OP uiCacheOp;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ RGXFWIF_DM eFenceOpType;
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT64 ui64ExecuteTime;
+ IMG_BOOL bRangeBasedFlush;
+ IMG_BOOL bUserModeFlush;
+ IMG_UINT32 ui32OpSeqNum;
+ IMG_BOOL bIsFence;
+ IMG_PID ownerPid;
+ } asCacheOp[MAX_CACHEOP_STAT];
+ IMG_INT32 uiCacheOpWriteIndex;
+ struct _PVRSRV_CACHEOP_STATS_* psCacheOpStats;
+#endif
+
+ /* Other statistics structures */
+ struct _PVRSRV_MEMORY_STATS_* psMemoryStats;
+ struct _PVRSRV_RI_MEMORY_STATS_* psRIMemoryStats;
+} PVRSRV_PROCESS_STATS;
+
+typedef struct _PVRSRV_MEM_ALLOC_REC_
+{
+ PVRSRV_MEM_ALLOC_TYPE eAllocType;
+ IMG_UINT64 ui64Key;
+ void *pvCpuVAddr;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ size_t uiBytes;
+ void *pvPrivateData;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ void *pvAllocdFromFile;
+ IMG_UINT32 ui32AllocdFromLine;
+#endif
+ IMG_PID pid;
+ struct _PVRSRV_MEM_ALLOC_REC_ *psNext;
+ struct _PVRSRV_MEM_ALLOC_REC_ **ppsThis;
+} PVRSRV_MEM_ALLOC_REC;
+
+typedef struct _PVRSRV_MEMORY_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS specific data */
+ void *pvOSMemEntryData;
+
+ /* Stats... */
+ PVRSRV_MEM_ALLOC_REC *psMemoryRecords;
+} PVRSRV_MEMORY_STATS;
+
+typedef struct _PVRSRV_RI_MEMORY_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS level process ID */
+ IMG_PID pid;
+
+#if defined(PVR_RI_DEBUG) && defined(ENABLE_DEBUGFS_PIDS)
+ /* OS specific data */
+ void *pvOSRIMemEntryData;
+#endif
+} PVRSRV_RI_MEMORY_STATS;
+
+typedef struct _PVRSRV_CACHEOP_STATS_ {
+ /* Structure type (must be first!) */
+ PVRSRV_STAT_STRUCTURE_TYPE eStructureType;
+
+ /* OS specific data */
+ void *pvOSCacheOpEntryData;
+} PVRSRV_CACHEOP_STATS;
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+static IMPLEMENT_LIST_INSERT(PVRSRV_MEM_ALLOC_REC)
+static IMPLEMENT_LIST_REMOVE(PVRSRV_MEM_ALLOC_REC)
+#endif
+
+/*
+ * Global Boolean to flag when the statistics are ready to monitor
+ * memory allocations.
+ */
+static IMG_BOOL bProcessStatsInitialised = IMG_FALSE;
+
+/*
+ * Linked lists for process stats. Live stats are for processes which are still running
+ * and the dead list holds those that have exited.
+ */
+static PVRSRV_PROCESS_STATS *g_psLiveList;
+static PVRSRV_PROCESS_STATS *g_psDeadList;
+
+static POS_LOCK g_psLinkedListLock;
+/* Lockdep feature in the kernel cannot differentiate between different instances of same lock type.
+ * This allows it to group all such instances of the same lock type under one class
+ * The consequence of this is that, if lock acquisition is nested on different instances, it generates
+ * a false warning message about the possible occurrence of deadlock due to recursive lock acquisition.
+ * Hence we create the following sub classes to explicitly appraise Lockdep of such safe lock nesting */
+#define PROCESS_LOCK_SUBCLASS_CURRENT 1
+#define PROCESS_LOCK_SUBCLASS_PREV 2
+#define PROCESS_LOCK_SUBCLASS_NEXT 3
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*
+ * Pointer to OS folder to hold PID folders.
+ */
+static IMG_CHAR *pszOSLivePidFolderName = "pids";
+static IMG_CHAR *pszOSDeadPidFolderName = "pids_retired";
+static void *pvOSLivePidFolder;
+static void *pvOSDeadPidFolder;
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *pvOSProcStats;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+/* global driver PID stats registration handle */
+static IMG_HANDLE g_hDriverProcessStats;
+#endif
+
+/* global driver-data folders */
+typedef struct _GLOBAL_STATS_
+{
+ IMG_UINT32 ui32StatValue[PVRSRV_DRIVER_STAT_TYPE_COUNT];
+ POS_LOCK hGlobalStatsLock;
+} GLOBAL_STATS;
+
+static void *pvOSGlobalMemEntryRef;
+static IMG_CHAR* const pszDriverStatFilename = "driver_stats";
+static GLOBAL_STATS gsGlobalStats;
+
+#define HASH_INITIAL_SIZE 5
+/* A hash table used to store the size of any vmalloc'd allocation
+ * against its address (not needed for kmallocs as we can use ksize()) */
+static HASH_TABLE* gpsSizeTrackingHashTable;
+static POS_LOCK gpsSizeTrackingHashTableLock;
+
+static PVRSRV_ERROR _RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid);
+
+static void _AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats);
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr);
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr);
+#endif
+#if defined(PVRSRV_ENABLE_PERPID_STATS) || !defined(ENABLE_DEBUGFS_PIDS)
+static void _DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats);
+#endif
+static void _RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void _RemovePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats);
+static void _CreatePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats, void *pvOSPidFolder);
+#endif
+static void _DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ PVRSRV_PROCESS_STATS* psProcessStats,
+ IMG_UINT32 uiBytes);
+/*
+ * Power statistics related definitions
+ */
+
+/* For the mean time, use an exponentially weighted moving average with a
+ * 1/4 weighting for the new measurement.
+ */
+#define MEAN_TIME(A, B) ( ((3*(A))/4) + ((1 * (B))/4) )
+
+#define UPDATE_TIME(time, newtime) \
+ ((time) > 0 ? MEAN_TIME((time),(newtime)) : (newtime))
+
+/* Enum to be used as input to GET_POWER_STAT_INDEX */
+typedef enum
+{
+ DEVICE = 0,
+ SYSTEM = 1,
+ POST_POWER = 0,
+ PRE_POWER = 2,
+ POWER_OFF = 0,
+ POWER_ON = 4,
+ NOT_FORCED = 0,
+ FORCED = 8,
+} PVRSRV_POWER_STAT_TYPE;
+
+/* Macro used to access one of the power timing statistics inside an array */
+#define GET_POWER_STAT_INDEX(forced,powon,prepow,system) \
+ ((forced) + (powon) + (prepow) + (system))
+
+/* For the power timing stats we need 16 variables to store all the
+ * combinations of forced/not forced, power-on/power-off, pre-power/post-power
+ * and device/system statistics
+ */
+#define NUM_POWER_STATS (16)
+static IMG_UINT32 aui32PowerTimingStats[NUM_POWER_STATS];
+
+static void *pvOSPowerStatsEntryData;
+
+typedef struct _EXTRA_POWER_STATS_
+{
+ IMG_UINT64 ui64PreClockSpeedChangeDuration;
+ IMG_UINT64 ui64BetweenPreEndingAndPostStartingDuration;
+ IMG_UINT64 ui64PostClockSpeedChangeDuration;
+} EXTRA_POWER_STATS;
+
+#define NUM_EXTRA_POWER_STATS 10
+
+static EXTRA_POWER_STATS asClockSpeedChanges[NUM_EXTRA_POWER_STATS];
+static IMG_UINT32 ui32ClockSpeedIndexStart, ui32ClockSpeedIndexEnd;
+
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower)
+{
+ IMG_UINT32 *pui32Stat;
+ IMG_UINT64 ui64DeviceDiff = ui64DevEndTime - ui64DevStartTime;
+ IMG_UINT64 ui64SystemDiff = ui64SysEndTime - ui64SysStartTime;
+ IMG_UINT32 ui32Index;
+
+ if (bPrePower)
+ {
+ HTBLOGK(HTB_SF_MAIN_PRE_POWER, bPowerOn, ui64DeviceDiff, ui64SystemDiff);
+ }
+ else
+ {
+ HTBLOGK(HTB_SF_MAIN_POST_POWER, bPowerOn, ui64SystemDiff,ui64DeviceDiff);
+ }
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ DEVICE);
+ pui32Stat = &aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64DeviceDiff);
+
+ ui32Index = GET_POWER_STAT_INDEX(bForced ? FORCED : NOT_FORCED,
+ bPowerOn ? POWER_ON : POWER_OFF,
+ bPrePower ? PRE_POWER : POST_POWER,
+ SYSTEM);
+ pui32Stat = &aui32PowerTimingStats[ui32Index];
+ *pui32Stat = UPDATE_TIME(*pui32Stat, ui64SystemDiff);
+}
+
+static IMG_UINT64 ui64PreClockSpeedChangeMark;
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer)
+{
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PreClockSpeedChangeDuration = ui64Stoptimer - ui64StartTimer;
+
+ ui64PreClockSpeedChangeMark = OSClockus();
+}
+
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer)
+{
+ IMG_UINT64 ui64Duration = ui64StartTimer - ui64PreClockSpeedChangeMark;
+
+ PVR_ASSERT(ui64PreClockSpeedChangeMark > 0);
+
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64BetweenPreEndingAndPostStartingDuration = ui64Duration;
+ asClockSpeedChanges[ui32ClockSpeedIndexEnd].ui64PostClockSpeedChangeDuration = ui64StopTimer - ui64StartTimer;
+
+ ui32ClockSpeedIndexEnd = (ui32ClockSpeedIndexEnd + 1) % NUM_EXTRA_POWER_STATS;
+
+ if (ui32ClockSpeedIndexEnd == ui32ClockSpeedIndexStart)
+ {
+ ui32ClockSpeedIndexStart = (ui32ClockSpeedIndexStart + 1) % NUM_EXTRA_POWER_STATS;
+ }
+
+ ui64PreClockSpeedChangeMark = 0;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function _FindProcessStatsInLiveList
+@Description Searches the Live Process List for a statistics structure that
+ matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInLiveList(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats->pid == pid)
+ {
+ return psProcessStats;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ return NULL;
+} /* _FindProcessStatsInLiveList */
+
+/*************************************************************************/ /*!
+@Function _FindProcessStatsInDeadList
+@Description Searches the Dead Process List for a statistics structure that
+ matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStatsInDeadList(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats->pid == pid)
+ {
+ return psProcessStats;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ return NULL;
+} /* _FindProcessStatsInDeadList */
+
+/*************************************************************************/ /*!
+@Function _FindProcessStats
+@Description Searches the Live and Dead Process Lists for a statistics
+ structure that matches the PID given.
+@Input pid Process to search for.
+@Return Pointer to stats structure for the process.
+*/ /**************************************************************************/
+static PVRSRV_PROCESS_STATS*
+_FindProcessStats(IMG_PID pid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats = _FindProcessStatsInLiveList(pid);
+
+ if (psProcessStats == NULL)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(pid);
+ }
+
+ return psProcessStats;
+} /* _FindProcessStats */
+
+/*************************************************************************/ /*!
+@Function _CompressMemoryUsage
+@Description Reduces memory usage by deleting old statistics data.
+ This function requires that the list lock is not held!
+*/ /**************************************************************************/
+static void
+_CompressMemoryUsage(void)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ PVRSRV_PROCESS_STATS* psProcessStatsToBeFreed;
+ IMG_UINT32 ui32ItemsRemaining;
+
+ /*
+ * We hold the lock whilst checking the list, but we'll release it
+ * before freeing memory (as that will require the lock too)!
+ */
+ OSLockAcquire(g_psLinkedListLock);
+
+ /* Check that the dead list is not bigger than the max size... */
+ psProcessStats = g_psDeadList;
+ psProcessStatsToBeFreed = NULL;
+ ui32ItemsRemaining = MAX_DEAD_LIST_PROCESSES;
+
+ while (psProcessStats != NULL && ui32ItemsRemaining > 0)
+ {
+ ui32ItemsRemaining--;
+ if (ui32ItemsRemaining == 0)
+ {
+ /* This is the last allowed process, cut the linked list here! */
+ psProcessStatsToBeFreed = psProcessStats->psNext;
+ psProcessStats->psNext = NULL;
+ }
+ else
+ {
+ psProcessStats = psProcessStats->psNext;
+ }
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Any processes stats remaining will need to be destroyed... */
+ while (psProcessStatsToBeFreed != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psNextProcessStats = psProcessStatsToBeFreed->psNext;
+
+ psProcessStatsToBeFreed->psNext = NULL;
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psProcessStatsToBeFreed->hLock);
+#endif
+ _RemovePIDOSStatisticEntries(psProcessStatsToBeFreed);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStatsToBeFreed->hLock);
+#endif
+#else
+ _DestroyProcessStat(psProcessStatsToBeFreed);
+#endif
+ psProcessStatsToBeFreed = psNextProcessStats;
+ }
+} /* _CompressMemoryUsage */
+
+/* These functions move the process stats from the live to the dead list.
+ * _MoveProcessToDeadList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToDeadListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Take the element out of the live list and append to the dead list... */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfDeadList(psProcessStats);
+} /* _MoveProcessToDeadList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void
+_MoveProcessToDeadListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Transfer the OS entries to the folder for dead processes... */
+ _RemovePIDOSStatisticEntries(psProcessStats);
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSDeadPidFolder);
+} /* _MoveProcessToDeadListDebugFS */
+#endif
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+/* These functions move the process stats from the dead to the live list.
+ * _MoveProcessToLiveList moves the entry in the global lists and
+ * it needs to be protected by g_psLinkedListLock.
+ * _MoveProcessToLiveListDebugFS performs the OS calls and it
+ * shouldn't be used under g_psLinkedListLock because this could generate a
+ * lockdep warning. */
+static void
+_MoveProcessToLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Take the element out of the live list and append to the dead list... */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+} /* _MoveProcessToLiveList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+static void
+_MoveProcessToLiveListDebugFS(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* Transfer the OS entries to the folder for live processes... */
+ _RemovePIDOSStatisticEntries(psProcessStats);
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+} /* _MoveProcessToLiveListDebugFS */
+#endif
+#endif
+
+/*************************************************************************/ /*!
+@Function _AddProcessStatsToFrontOfLiveList
+@Description Add a statistic to the live list head.
+@Input psProcessStats Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfLiveList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ /* This function should always be called under global list lock g_psLinkedListLock.
+ */
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ if (g_psLiveList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psLiveList);
+ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psLiveList->psPrev = psProcessStats;
+ OSLockRelease(g_psLiveList->hLock);
+ psProcessStats->psNext = g_psLiveList;
+ }
+
+ g_psLiveList = psProcessStats;
+
+ OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfLiveList */
+
+/*************************************************************************/ /*!
+@Function _AddProcessStatsToFrontOfDeadList
+@Description Add a statistic to the dead list head.
+@Input psProcessStats Process stats to add.
+*/ /**************************************************************************/
+static void
+_AddProcessStatsToFrontOfDeadList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ if (g_psDeadList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psDeadList);
+ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psDeadList->psPrev = psProcessStats;
+ OSLockRelease(g_psDeadList->hLock);
+ psProcessStats->psNext = g_psDeadList;
+ }
+
+ g_psDeadList = psProcessStats;
+
+ OSLockRelease(psProcessStats->hLock);
+} /* _AddProcessStatsToFrontOfDeadList */
+
+/*************************************************************************/ /*!
+@Function _RemoveProcessStatsFromList
+@Description Detaches a process from either the live or dead list.
+@Input psProcessStats Process stats to remove.
+*/ /**************************************************************************/
+static void
+_RemoveProcessStatsFromList(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Remove the item from the linked lists... */
+ if (g_psLiveList == psProcessStats)
+ {
+ g_psLiveList = psProcessStats->psNext;
+
+ if (g_psLiveList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psLiveList);
+ OSLockAcquireNested(g_psLiveList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psLiveList->psPrev = NULL;
+ OSLockRelease(g_psLiveList->hLock);
+
+ }
+ }
+ else if (g_psDeadList == psProcessStats)
+ {
+ g_psDeadList = psProcessStats->psNext;
+
+ if (g_psDeadList != NULL)
+ {
+ PVR_ASSERT(psProcessStats != g_psDeadList);
+ OSLockAcquireNested(g_psDeadList->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ g_psDeadList->psPrev = NULL;
+ OSLockRelease(g_psDeadList->hLock);
+ }
+ }
+ else
+ {
+ PVRSRV_PROCESS_STATS* psNext = psProcessStats->psNext;
+ PVRSRV_PROCESS_STATS* psPrev = psProcessStats->psPrev;
+
+ if (psProcessStats->psNext != NULL)
+ {
+ PVR_ASSERT(psProcessStats != psNext);
+ OSLockAcquireNested(psNext->hLock, PROCESS_LOCK_SUBCLASS_NEXT);
+ psProcessStats->psNext->psPrev = psPrev;
+ OSLockRelease(psNext->hLock);
+ }
+ if (psProcessStats->psPrev != NULL)
+ {
+ PVR_ASSERT(psProcessStats != psPrev);
+ OSLockAcquireNested(psPrev->hLock, PROCESS_LOCK_SUBCLASS_PREV);
+ psProcessStats->psPrev->psNext = psNext;
+ OSLockRelease(psPrev->hLock);
+ }
+ }
+
+
+ /* Reset the pointers in this cell, as it is not attached to anything */
+ psProcessStats->psNext = NULL;
+ psProcessStats->psPrev = NULL;
+
+ OSLockRelease(psProcessStats->hLock);
+
+} /* _RemoveProcessStatsFromList */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function _CreatePIDOSStatisticEntries
+@Description Create all OS entries for this statistic.
+@Input psProcessStats Process stats to destroy.
+@Input pvOSPidFolder Pointer to OS folder to place the entries in.
+*/ /**************************************************************************/
+static void
+_CreatePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats,
+ void *pvOSPidFolder)
+{
+ void *pvOSPidFolderData;
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+ void *pvOSPidEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ void *pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+ void *pvOSRIMemEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ void *pvOSCacheOpEntryData;
+#endif
+
+ PVR_ASSERT(psProcessStats != NULL);
+
+ pvOSPidFolderData = OSCreateStatisticFolder(psProcessStats->szFolderName, pvOSPidFolder);
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+ pvOSPidEntryData = OSCreateStatisticEntry("process_stats",
+ pvOSPidFolderData,
+ ProcessStatsPrintElements,
+ _PVRSRVIncrMemStatRefCount,
+ _PVRSRVDecrMemStatRefCount,
+ (void *) psProcessStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ pvOSMemEntryData = OSCreateStatisticEntry("mem_area",
+ pvOSPidFolderData,
+ MemStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats->psMemoryStats);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ pvOSRIMemEntryData = OSCreateStatisticEntry("ri_mem_area",
+ pvOSPidFolderData,
+ RIMemStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats->psRIMemoryStats);
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ pvOSCacheOpEntryData = OSCreateStatisticEntry("cache_ops_exec",
+ pvOSPidFolderData,
+ CacheOpStatsPrintElements,
+ NULL,
+ NULL,
+ (void *) psProcessStats);
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+#endif
+
+ psProcessStats->pvOSPidFolderData = pvOSPidFolderData;
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+ psProcessStats->pvOSPidEntryData = pvOSPidEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats->pvOSMemEntryData = pvOSMemEntryData;
+#endif
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats->pvOSRIMemEntryData = pvOSRIMemEntryData;
+#endif
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ psProcessStats->psCacheOpStats->pvOSCacheOpEntryData = pvOSCacheOpEntryData;
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStats->hLock);
+#endif
+} /* _CreatePIDOSStatisticEntries */
+
+/*************************************************************************/ /*!
+@Function _RemovePIDOSStatisticEntries
+@Description Removed all OS entries used by this statistic.
+@Input psProcessStats Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_RemovePIDOSStatisticEntries(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ OSRemoveStatisticEntry(psProcessStats->psCacheOpStats->pvOSCacheOpEntryData);
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ OSRemoveStatisticEntry(psProcessStats->psRIMemoryStats->pvOSRIMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ OSRemoveStatisticEntry(psProcessStats->psMemoryStats->pvOSMemEntryData);
+#endif
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+ if( psProcessStats->pvOSPidEntryData != NULL)
+ {
+ OSRemoveStatisticEntry(psProcessStats->pvOSPidEntryData);
+ }
+#endif
+
+ if( psProcessStats->pvOSPidFolderData != NULL)
+ {
+ OSRemoveStatisticFolder(&psProcessStats->pvOSPidFolderData);
+ }
+} /* _RemovePIDOSStatisticEntries */
+#endif /* defined(ENABLE_DEBUGFS_PIDS) */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS) || !defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function _DestroyProcessStat
+@Description Frees memory and resources held by a process statistic.
+@Input psProcessStats Process stats to destroy.
+*/ /**************************************************************************/
+static void
+_DestroyProcessStat(PVRSRV_PROCESS_STATS* psProcessStats)
+{
+ PVR_ASSERT(psProcessStats != NULL);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Free the memory statistics... */
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ while (psProcessStats->psMemoryStats->psMemoryRecords)
+ {
+ List_PVRSRV_MEM_ALLOC_REC_Remove(psProcessStats->psMemoryStats->psMemoryRecords);
+ }
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+#endif
+#if defined(PVR_RI_DEBUG)
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+#endif
+ OSLockRelease(psProcessStats->hLock);
+
+ /*Destroy the lock */
+ OSLockDestroyNoStats(psProcessStats->hLock);
+
+ /* Free the memory... */
+ OSFreeMemNoStats(psProcessStats);
+} /* _DestroyProcessStat */
+#endif
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+static IMG_UINT32 _PVRSRVIncrMemStatRefCount(void *pvStatPtr)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32Res = 0;
+
+ switch (*peStructureType)
+ {
+ case PVRSRV_STAT_STRUCTURE_PROCESS:
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ ui32Res = OSAtomicIncrement(&psProcessStats->iMemRefCount);
+#else
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ ui32Res = ++psProcessStats->ui32MemRefCount;
+ OSLockRelease(psProcessStats->hLock);
+#endif
+ break;
+ }
+ default:
+ {
+ /* _PVRSRVIncrMemStatRefCount was passed a pointer to an unrecognised struct */
+ PVR_ASSERT(0);
+ break;
+ }
+ }
+
+ return ui32Res;
+}
+
+static IMG_UINT32 _PVRSRVDecrMemStatRefCount(void *pvStatPtr)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32Res = 0;
+
+ switch (*peStructureType)
+ {
+ case PVRSRV_STAT_STRUCTURE_PROCESS:
+ {
+ /* Decrement stat memory refCount and free if now zero */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ ui32Res = OSAtomicDecrement(&psProcessStats->iMemRefCount);
+#else
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ ui32Res = --psProcessStats->ui32MemRefCount;
+ OSLockRelease(psProcessStats->hLock);
+#endif
+ if (ui32Res == 0)
+ {
+ _DestroyProcessStat(psProcessStats);
+ }
+ break;
+ }
+ default:
+ {
+ /* _PVRSRVDecrMemStatRefCount was passed a pointer to an unrecognised struct */
+ PVR_ASSERT(0);
+ break;
+ }
+ }
+ return ui32Res;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsInitialise
+@Description Entry point for initialising the statistics module.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsInitialise(void)
+{
+ PVRSRV_ERROR error;
+
+ PVR_ASSERT(g_psLiveList == NULL);
+ PVR_ASSERT(g_psDeadList == NULL);
+ PVR_ASSERT(g_psLinkedListLock == NULL);
+ PVR_ASSERT(gpsSizeTrackingHashTable == NULL);
+ PVR_ASSERT(bProcessStatsInitialised == IMG_FALSE);
+
+ /* We need a lock to protect the linked lists... */
+ error = OSLockCreate(&g_psLinkedListLock, LOCK_TYPE_NONE);
+ if (error == PVRSRV_OK)
+ {
+ /* We also need a lock to protect the hash table used for size tracking.. */
+ error = OSLockCreate(&gpsSizeTrackingHashTableLock, LOCK_TYPE_NONE);
+
+ if (error != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* We also need a lock to protect the GlobalStat counters */
+ error = OSLockCreate(&gsGlobalStats.hGlobalStatsLock, LOCK_TYPE_NONE);
+ if (error != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* Create a pid folders for putting the PID files in... */
+ pvOSLivePidFolder = OSCreateStatisticFolder(pszOSLivePidFolderName, NULL);
+ pvOSDeadPidFolder = OSCreateStatisticFolder(pszOSDeadPidFolderName, NULL);
+#endif
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+ pvOSProcStats = OSCreateRawStatisticEntry("memtrack_stats", NULL,
+ RawProcessStatsPrintElements);
+#endif
+
+ /* Create power stats entry... */
+ pvOSPowerStatsEntryData = OSCreateStatisticEntry("power_timing_stats",
+ NULL,
+ PowerStatsPrintElements,
+ NULL,
+ NULL,
+ NULL);
+
+ pvOSGlobalMemEntryRef = OSCreateStatisticEntry(pszDriverStatFilename,
+ NULL,
+ GlobalStatsPrintElements,
+ NULL,
+ NULL,
+ NULL);
+
+ /* Flag that we are ready to start monitoring memory allocations. */
+
+ gpsSizeTrackingHashTable = HASH_Create(HASH_INITIAL_SIZE);
+
+ OSCachedMemSet(asClockSpeedChanges, 0, sizeof(asClockSpeedChanges));
+
+ bProcessStatsInitialised = IMG_TRUE;
+#if defined(PVR_RI_DEBUG)
+ /* Register our 'system' PID to hold driver-wide alloc stats */
+ _RegisterProcess(&g_hDriverProcessStats, PVR_SYS_ALLOC_PID);
+#endif
+ }
+ return error;
+e1:
+ OSLockDestroy(gpsSizeTrackingHashTableLock);
+ gpsSizeTrackingHashTableLock = NULL;
+e0:
+ OSLockDestroy(g_psLinkedListLock);
+ g_psLinkedListLock = NULL;
+ return error;
+
+} /* PVRSRVStatsInitialise */
+
+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v);
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsDestroy
+@Description Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDestroy(void)
+{
+ PVR_ASSERT(bProcessStatsInitialised == IMG_TRUE);
+
+#if defined(PVR_RI_DEBUG)
+ /* Deregister our 'system' PID which holds driver-wide alloc stats */
+ PVRSRVStatsDeregisterProcess(g_hDriverProcessStats);
+#endif
+
+ /* Stop monitoring memory allocations... */
+ bProcessStatsInitialised = IMG_FALSE;
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+ if (pvOSProcStats)
+ {
+ OSRemoveRawStatisticEntry(pvOSProcStats);
+ pvOSProcStats = NULL;
+ }
+#endif
+
+ /* Destroy the power stats entry... */
+ if (pvOSPowerStatsEntryData!=NULL)
+ {
+ OSRemoveStatisticEntry(pvOSPowerStatsEntryData);
+ pvOSPowerStatsEntryData=NULL;
+ }
+
+ /* Destroy the global data entry */
+ if (pvOSGlobalMemEntryRef!=NULL)
+ {
+ OSRemoveStatisticEntry(pvOSGlobalMemEntryRef);
+ pvOSGlobalMemEntryRef=NULL;
+ }
+
+ /* Destroy the locks... */
+ if (g_psLinkedListLock != NULL)
+ {
+ OSLockDestroy(g_psLinkedListLock);
+ g_psLinkedListLock = NULL;
+ }
+
+ /* Free the live and dead lists... */
+ while (g_psLiveList != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psLiveList;
+
+ _RemoveProcessStatsFromList(psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+ _RemovePIDOSStatisticEntries(psProcessStats);
+#else
+ _DestroyProcessStat(psProcessStats);
+#endif
+ }
+
+ while (g_psDeadList != NULL)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = g_psDeadList;
+
+ _RemoveProcessStatsFromList(psProcessStats);
+#if defined(ENABLE_DEBUGFS_PIDS)
+ _RemovePIDOSStatisticEntries(psProcessStats);
+#else
+ _DestroyProcessStat(psProcessStats);
+#endif
+ }
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* Remove the OS folders used by the PID folders...
+ * OSRemoveStatisticFolder will NULL the pointers */
+ OSRemoveStatisticFolder(&pvOSLivePidFolder);
+ OSRemoveStatisticFolder(&pvOSDeadPidFolder);
+#endif
+
+ if (gpsSizeTrackingHashTable != NULL)
+ {
+ /* Dump all remaining entries in HASH table (list any remaining vmallocs) */
+ HASH_Iterate(gpsSizeTrackingHashTable, (HASH_pfnCallback)_DumpAllVMallocEntries);
+ HASH_Delete(gpsSizeTrackingHashTable);
+ }
+ if (gpsSizeTrackingHashTableLock != NULL)
+ {
+ OSLockDestroy(gpsSizeTrackingHashTableLock);
+ gpsSizeTrackingHashTableLock = NULL;
+ }
+
+ if(NULL != gsGlobalStats.hGlobalStatsLock)
+ {
+ OSLockDestroy(gsGlobalStats.hGlobalStatsLock);
+ gsGlobalStats.hGlobalStatsLock = NULL;
+ }
+
+} /* PVRSRVStatsDestroy */
+
+static void _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+ DECREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static void _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes)
+{
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_KMALLOC, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMALLOC, uiBytes);
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA, uiBytes);
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES:
+ INCREASE_GLOBAL_STAT_VALUE(gsGlobalStats, PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL, uiBytes);
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+}
+
+static PVRSRV_ERROR
+_RegisterProcess(IMG_HANDLE *phProcessStats, IMG_PID ownerPid)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats=NULL;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bMoveProcess = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+#endif
+
+ PVR_ASSERT(phProcessStats != NULL);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Register process PID %d [%s]",
+ __FUNCTION__, ownerPid, (ownerPid == PVR_SYS_ALLOC_PID)
+ ? "system" : OSGetCurrentClientProcessNameKM()));
+
+ /* Check the PID has not already moved to the dead list... */
+ OSLockAcquire(g_psLinkedListLock);
+ psProcessStats = _FindProcessStatsInDeadList(ownerPid);
+ if (psProcessStats != NULL)
+ {
+ /* Move it back onto the live list! */
+ _RemoveProcessStatsFromList(psProcessStats);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+ /* we can perform the OS operation out of lock */
+ bMoveProcess = IMG_TRUE;
+ }
+ else
+ {
+ /* Check the PID is not already registered in the live list... */
+ psProcessStats = _FindProcessStatsInLiveList(ownerPid);
+ }
+
+ /* If the PID is on the live list then just increment the ref count and return... */
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ psProcessStats->ui32RefCount++;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+ UPDATE_MAX_VALUE(psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS]);
+ OSLockRelease(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+
+ *phProcessStats = psProcessStats;
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* Check if we need to perform any OS operation */
+ if (bMoveProcess)
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psProcessStats->hLock);
+#endif
+ /* Transfer the OS entries back to the folder for live processes... */
+ _RemovePIDOSStatisticEntries(psProcessStats);
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStats->hLock);
+#endif
+ }
+#endif
+
+ return PVRSRV_OK;
+ }
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Allocate a new node structure and initialise it... */
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ *phProcessStats = (IMG_HANDLE) NULL;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = ownerPid;
+ psProcessStats->ui32RefCount = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+ psProcessStats->ui32MemRefCount = 1;
+#endif
+
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ goto e0;
+ }
+
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ goto e0;
+ }
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = ownerPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ goto e0;
+ }
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ /* Add it to the live list... */
+ OSLockAcquire(g_psLinkedListLock);
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* Create the process stat in the OS... */
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", ownerPid, acFolderName);
+#else
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d", ownerPid);
+#endif
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+
+ /* Done */
+ *phProcessStats = (IMG_HANDLE) psProcessStats;
+
+ return PVRSRV_OK;
+
+e0:
+ OSFreeMemNoStats(psProcessStats);
+ *phProcessStats = (IMG_HANDLE) NULL;
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+} /* _RegisterProcess */
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsRegisterProcess
+@Description Register a process into the list statistics list.
+@Output phProcessStats Handle to the process to be used to deregister.
+@Return Standard PVRSRV_ERROR error code.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats)
+{
+ return _RegisterProcess(phProcessStats, OSGetCurrentClientProcessIDKM());
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVStatsDeregisterProcess
+@Input hProcessStats Handle to the process returned when registered.
+@Description Method for destroying the statistics module data.
+*/ /**************************************************************************/
+void
+PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats)
+{
+ IMG_BOOL bMoveProcess = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Deregister process entered PID %d [%s]",
+ __FUNCTION__, OSGetCurrentClientProcessIDKM(),
+ OSGetCurrentProcessName()));
+
+ if (hProcessStats != (IMG_HANDLE) NULL)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) hProcessStats;
+
+ /* Lower the reference count, if zero then move it to the dead list */
+ OSLockAcquire(g_psLinkedListLock);
+ if (psProcessStats->ui32RefCount > 0)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->ui32RefCount--;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = psProcessStats->ui32RefCount;
+
+#if !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (psProcessStats->ui32RefCount == 0)
+ {
+ OSLockRelease(psProcessStats->hLock);
+ _MoveProcessToDeadList(psProcessStats);
+ bMoveProcess = IMG_TRUE;
+ }else
+#endif
+ {
+ OSLockRelease(psProcessStats->hLock);
+ }
+ }
+ OSLockRelease(g_psLinkedListLock);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* The OS calls need to be performed without g_psLinkedListLock */
+ if (bMoveProcess == IMG_TRUE)
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psProcessStats->hLock);
+#endif
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStats->hLock);
+#endif
+ }
+#endif
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+} /* PVRSRVStatsDeregisterProcess */
+
+void
+PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ IMG_PID currentPid)
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+{
+ _PVRSRVStatsAddMemAllocRecord(eAllocType, pvCpuVAddr, sCpuPAddr, uiBytes, pvPrivateData, currentPid, NULL, 0);
+}
+void
+
+_PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ IMG_PID currentPid,
+ void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine)
+#endif
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL;
+ PVRSRV_PROCESS_STATS* psProcessStats;
+ PVRSRV_MEMORY_STATS* psMemoryStats;
+ enum { PVRSRV_PROC_NOTFOUND,
+ PVRSRV_PROC_FOUND,
+ PVRSRV_PROC_RESURRECTED
+ } eProcSearch = PVRSRV_PROC_FOUND;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVRSRV_ERROR eError;
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+#endif
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Called when process statistics module"
+ "is not initialised", __FUNCTION__));
+ }
+#endif
+
+ /*
+ * To prevent a recursive loop, we make the memory allocations
+ * for our memstat records via OSAllocMemNoStats(), which does not try to
+ * create a memstat record entry..
+ */
+
+ /* Allocate the memory record... */
+ psRecord = OSAllocZMemNoStats(sizeof(PVRSRV_MEM_ALLOC_REC));
+ if (psRecord == NULL)
+ {
+ return;
+ }
+
+ psRecord->eAllocType = eAllocType;
+ psRecord->pvCpuVAddr = pvCpuVAddr;
+ psRecord->sCpuPAddr.uiAddr = sCpuPAddr.uiAddr;
+ psRecord->uiBytes = uiBytes;
+ psRecord->pvPrivateData = pvPrivateData;
+
+ psRecord->pid = currentPid;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ psRecord->pvAllocdFromFile = pvAllocFromFile;
+ psRecord->ui32AllocdFromLine = ui32AllocFromLine;
+#endif
+
+ _increase_global_stat(eAllocType, uiBytes);
+ /* Lock while we find the correct process... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ eProcSearch = PVRSRV_PROC_RESURRECTED;
+ }
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ eProcSearch = PVRSRV_PROC_RESURRECTED;
+ }
+ }
+
+ if (psProcessStats == NULL)
+ {
+ eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process stat increment called for "
+ "'unknown' process PID(%d)",__FUNCTION__, currentPid));
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ OSLockRelease(g_psLinkedListLock);
+ return;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = currentPid;
+ psProcessStats->ui32RefCount = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+ psProcessStats->ui32MemRefCount = 1;
+#endif
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockRelease(g_psLinkedListLock);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ psProcessStats->hLock = NULL;
+ goto e0;
+ }
+
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ psProcessStats->hLock = NULL;
+ OSLockRelease(g_psLinkedListLock);
+ goto e0;
+ }
+
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = currentPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+ psProcessStats->hLock = NULL;
+ goto e0;
+ }
+
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ /* Add it to the live list... */
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+ OSLockRelease(g_psLinkedListLock);
+
+ /* Create the process stat in the OS... */
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", currentPid, acFolderName);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+#else /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+ OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+ }
+ else
+ {
+ OSLockRelease(g_psLinkedListLock);
+ }
+
+ if (psProcessStats == NULL)
+ {
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVR_DPF((PVR_DBG_ERROR, "%s UNABLE TO CREATE process_stats entry for pid %d [%s] (" IMG_SIZE_FMTSPEC " bytes)", __FUNCTION__, currentPid, OSGetCurrentProcessName(), uiBytes));
+#endif
+ if (psRecord != NULL)
+ {
+ OSFreeMemNoStats(psRecord);
+ }
+ return;
+ }
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ psMemoryStats = psProcessStats->psMemoryStats;
+
+ /* Insert the memory record... */
+ if (psRecord != NULL)
+ {
+ List_PVRSRV_MEM_ALLOC_REC_Insert(&psMemoryStats->psMemoryRecords, psRecord);
+ }
+
+ /* Update the memory watermarks... */
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ psRecord->ui64Key = sCpuPAddr.uiAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ if (psRecord != NULL)
+ {
+ if (pvCpuVAddr == NULL)
+ {
+ break;
+ }
+ psRecord->ui64Key = (IMG_UINT64)(uintptr_t)pvCpuVAddr;
+ }
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+ OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process stat incremented on 'dead' "
+ "process PID(%d)",__FUNCTION__, currentPid));
+ /* Move process from dead list to live list */
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS) && defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ _MoveProcessToLiveListDebugFS(psProcessStats);
+#endif
+ }
+#endif
+
+ return;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+e0:
+ OSFreeMemNoStats(psRecord);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+#endif
+#endif
+} /* PVRSRVStatsAddMemAllocRecord */
+
+void
+PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 ui64Key,
+ IMG_PID currentPid)
+{
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+ PVRSRV_MEMORY_STATS* psMemoryStats = NULL;
+ PVRSRV_MEM_ALLOC_REC* psRecord = NULL;
+ IMG_BOOL bFound = IMG_FALSE;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Called when process statistics module"
+ "is not initialised", __FUNCTION__));
+ }
+#endif
+
+
+ /* Lock while we find the correct process and remove this record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ if (psProcessStats != NULL)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ /* If not found, we need to do a full search in case it was allocated to a different PID... */
+ if (!bFound)
+ {
+ PVRSRV_PROCESS_STATS* psProcessStatsAlreadyChecked = psProcessStats;
+
+ /* Search all live lists first... */
+ psProcessStats = g_psLiveList;
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats != psProcessStatsAlreadyChecked)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ if (bFound)
+ {
+ break;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ /* If not found, then search all dead lists next... */
+ if (!bFound)
+ {
+ psProcessStats = g_psDeadList;
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats != psProcessStatsAlreadyChecked)
+ {
+ psMemoryStats = psProcessStats->psMemoryStats;
+ psRecord = psMemoryStats->psMemoryRecords;
+ while (psRecord != NULL)
+ {
+ if (psRecord->ui64Key == ui64Key && psRecord->eAllocType == eAllocType)
+ {
+ bFound = IMG_TRUE;
+ break;
+ }
+
+ psRecord = psRecord->psNext;
+ }
+ }
+
+ if (bFound)
+ {
+ break;
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+ }
+ }
+
+ /* Update the watermark and remove this record...*/
+ if (bFound)
+ {
+ _decrease_global_stat(eAllocType, psRecord->uiBytes);
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ psRecord->uiBytes);
+
+ List_PVRSRV_MEM_ALLOC_REC_Remove(psRecord);
+ OSLockRelease(psProcessStats->hLock);
+ OSLockRelease(g_psLinkedListLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* If all stats are now zero, remove the entry for this thread */
+ if (psProcessStats->ui32StatAllocFlags == 0)
+ {
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToDeadList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psProcessStats->hLock);
+#endif
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStats->hLock);
+#endif
+#endif
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+#endif
+ /*
+ * Free the record outside the lock so we don't deadlock and so we
+ * reduce the time the lock is held.
+ */
+ OSFreeMemNoStats(psRecord);
+ }
+ else
+ {
+ OSLockRelease(g_psLinkedListLock);
+ }
+
+#else
+PVR_UNREFERENCED_PARAMETER(eAllocType);
+PVR_UNREFERENCED_PARAMETER(ui64Key);
+#endif
+} /* PVRSRVStatsRemoveMemAllocRecord */
+
+static PVRSRV_ERROR _DumpAllVMallocEntries (uintptr_t k, uintptr_t v)
+{
+ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)(uintptr_t)v;
+ IMG_UINT64 uiCpuVAddr = (IMG_UINT64)k;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: " IMG_SIZE_FMTSPEC " bytes @ 0x%" IMG_UINT64_FMTSPECx " (PID %u)", __func__,
+ psNewTrackingHashEntry->uiSizeInBytes,
+ uiCpuVAddr,
+ psNewTrackingHashEntry->uiPid));
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_UINT64 uiCpuVAddr,
+ IMG_PID uiPid)
+{
+ IMG_BOOL bRes = IMG_FALSE;
+ _PVR_STATS_TRACKING_HASH_ENTRY *psNewTrackingHashEntry = NULL;
+
+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+ {
+ return;
+ }
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Called when process statistics module"
+ "is not initialised", __FUNCTION__));
+ }
+#endif
+
+
+ /* Alloc untracked memory for the new hash table entry */
+ psNewTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)OSAllocMemNoStats(sizeof(*psNewTrackingHashEntry));
+ if (psNewTrackingHashEntry)
+ {
+ /* Fill-in the size of the allocation and PID of the allocating process */
+ psNewTrackingHashEntry->uiSizeInBytes = uiBytes;
+ psNewTrackingHashEntry->uiPid = uiPid;
+ OSLockAcquire(gpsSizeTrackingHashTableLock);
+ /* Insert address of the new struct into the hash table */
+ bRes = HASH_Insert(gpsSizeTrackingHashTable, uiCpuVAddr, (uintptr_t)psNewTrackingHashEntry);
+ OSLockRelease(gpsSizeTrackingHashTableLock);
+ }
+
+ if (psNewTrackingHashEntry)
+ {
+ if (bRes)
+ {
+ PVRSRVStatsIncrMemAllocStat(eAllocType, uiBytes, uiPid);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d HASH_Insert() failed!!", __FUNCTION__, __LINE__));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "*** %s : @ line %d Failed to alloc memory for psNewTrackingHashEntry!!", __FUNCTION__, __LINE__));
+ }
+}
+
+void
+PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_PID currentPid)
+
+{
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+ enum { PVRSRV_PROC_NOTFOUND,
+ PVRSRV_PROC_FOUND,
+ PVRSRV_PROC_RESURRECTED
+ } eProcSearch = PVRSRV_PROC_FOUND;
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVRSRV_ERROR eError;
+ IMG_CHAR acFolderName[30];
+ IMG_CHAR *pszProcName = OSGetCurrentProcessName();
+#endif
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Called when process statistics module"
+ "is not initialised", __FUNCTION__));
+ }
+#endif
+
+ _increase_global_stat(eAllocType, uiBytes);
+ OSLockAcquire(g_psLinkedListLock);
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ eProcSearch = PVRSRV_PROC_RESURRECTED;
+ }
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStatsInLiveList(currentPid);
+ if (!psProcessStats)
+ {
+ psProcessStats = _FindProcessStatsInDeadList(currentPid);
+ eProcSearch = PVRSRV_PROC_RESURRECTED;
+ }
+ }
+
+ if (psProcessStats == NULL)
+ {
+ eProcSearch = PVRSRV_PROC_NOTFOUND;
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process stat increment called for 'unknown' "
+ "process PID(%d)",__FUNCTION__, currentPid));
+
+ strncpy(acFolderName, pszProcName, sizeof(acFolderName));
+ StripBadChars(acFolderName);
+
+ if (bProcessStatsInitialised)
+ {
+ psProcessStats = OSAllocZMemNoStats(sizeof(PVRSRV_PROCESS_STATS));
+ if (psProcessStats == NULL)
+ {
+ return;
+ }
+
+ psProcessStats->eStructureType = PVRSRV_STAT_STRUCTURE_PROCESS;
+ psProcessStats->pid = currentPid;
+ psProcessStats->ui32RefCount = 1;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAtomicWrite(&psProcessStats->iMemRefCount, 1);
+#else
+ psProcessStats->ui32MemRefCount = 1;
+#endif
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_CONNECTIONS] = 1;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAX_CONNECTIONS] = 1;
+
+ eError = OSLockCreateNoStats(&psProcessStats->hLock ,LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS)
+ psProcessStats->psMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_MEMORY_STATS));
+ if (psProcessStats->psMemoryStats == NULL)
+ {
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_MEMORY;
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ psProcessStats->psRIMemoryStats = OSAllocZMemNoStats(sizeof(PVRSRV_RI_MEMORY_STATS));
+ if (psProcessStats->psRIMemoryStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psRIMemoryStats->eStructureType = PVRSRV_STAT_STRUCTURE_RIMEMORY;
+ psProcessStats->psRIMemoryStats->pid = currentPid;
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+ psProcessStats->psCacheOpStats = OSAllocZMemNoStats(sizeof(PVRSRV_CACHEOP_STATS));
+ if (psProcessStats->psCacheOpStats == NULL)
+ {
+ OSFreeMemNoStats(psProcessStats->psMemoryStats);
+ OSFreeMemNoStats(psProcessStats->psRIMemoryStats);
+ OSLockDestroyNoStats(psProcessStats->hLock);
+ OSFreeMemNoStats(psProcessStats);
+ return;
+ }
+ psProcessStats->psCacheOpStats->eStructureType = PVRSRV_STAT_STRUCTURE_CACHEOP;
+#endif
+
+ /* Add it to the live list... */
+ _AddProcessStatsToFrontOfLiveList(psProcessStats);
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+ /* Create the process stat in the OS... */
+ OSSNPrintf(psProcessStats->szFolderName, sizeof(psProcessStats->szFolderName),
+ "%d_%s", currentPid, acFolderName);
+
+ _CreatePIDOSStatisticEntries(psProcessStats, pvOSLivePidFolder);
+#endif
+ }
+#else
+ OSLockRelease(g_psLinkedListLock);
+#endif /* defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) */
+
+ }
+
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /*Release the list lock as soon as we acquire the process lock,
+ * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+ OSLockRelease(g_psLinkedListLock);
+ /* Update the memory watermarks... */
+ switch (eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ INCREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ psProcessStats->ui32StatAllocFlags |= (IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+ OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ if (eProcSearch == PVRSRV_PROC_RESURRECTED)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Process stat incremented on 'dead' "
+ "process PID(%d)",__FUNCTION__, currentPid));
+
+ /* Move process from dead list to live list */
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToLiveList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+ _MoveProcessToLiveListDebugFS(psProcessStats);
+#endif
+ }
+#endif
+ }
+
+}
+
+static void
+_DecreaseProcStatValue(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ PVRSRV_PROCESS_STATS* psProcessStats,
+ IMG_UINT32 uiBytes)
+{
+ switch (eAllocType)
+ {
+ #if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_KMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMALLOC, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMALLOC] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMALLOC-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+ #else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ break;
+ #endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_VMAP_PT_UMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_IOREMAP_PT_LMA-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES, (IMG_UINT32)uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES:
+ {
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES, (IMG_UINT32)uiBytes);
+ if( psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES] == 0 )
+ {
+ psProcessStats->ui32StatAllocFlags &= ~(IMG_UINT32)(1 << (PVRSRV_PROCESS_STAT_TYPE_MAP_UMA_LMA_PAGES-PVRSRV_PROCESS_STAT_TYPE_KMALLOC));
+ }
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(0);
+ }
+ break;
+ }
+
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+void RawProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC *pfnOSStatsPrintf)
+{
+ PVRSRV_PROCESS_STATS *psProcessStats;
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: pfnOSStatsPrintf not set", __func__));
+ return;
+ }
+
+ pfnOSStatsPrintf(pvFile, "%s,%s,%s,%s,%s,%s\n",
+ "PID",
+ "MemoryUsageKMalloc", // PVRSRV_PROCESS_STAT_TYPE_KMALLOC
+ "MemoryUsageAllocPTMemoryUMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA
+ "MemoryUsageAllocPTMemoryLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA
+ "MemoryUsageAllocGPUMemLMA", // PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES
+ "MemoryUsageAllocGPUMemUMA" // PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES
+ );
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = g_psLiveList;
+
+ while (psProcessStats != NULL)
+ {
+ if (psProcessStats->pid != PVR_SYS_ALLOC_PID)
+ {
+ pfnOSStatsPrintf(pvFile, "%d,%d,%d,%d,%d,%d\n",
+ psProcessStats->pid,
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_KMALLOC],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_UMA],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_PAGES_PT_LMA],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES],
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES]
+ );
+ }
+
+ psProcessStats = psProcessStats->psNext;
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* RawProcessStatsPrintElements */
+#endif
+
+void
+PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+ IMG_PID decrPID)
+{
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_KMALLOC, uiBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(decrPID);
+
+ if (psProcessStats != NULL)
+ {
+ /* Decrement the kmalloc memory stat... */
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_KMALLOC, uiBytes);
+ DECREASE_STAT_VALUE(psProcessStats, PVRSRV_PROCESS_STAT_TYPE_TOTAL, uiBytes);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+#endif
+}
+
+static void
+_StatsDecrMemTrackedStat(_PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry,
+ PVRSRV_MEM_ALLOC_TYPE eAllocType)
+{
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(eAllocType, psTrackingHashEntry->uiSizeInBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(psTrackingHashEntry->uiPid);
+
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /* Decrement the memory stat... */
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ psTrackingHashEntry->uiSizeInBytes);
+ OSLockRelease(psProcessStats->hLock);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+}
+
+void
+PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 uiCpuVAddr)
+{
+ _PVR_STATS_TRACKING_HASH_ENTRY *psTrackingHashEntry = NULL;
+
+ if (!bProcessStatsInitialised || (gpsSizeTrackingHashTable == NULL) )
+ {
+ return;
+ }
+
+ OSLockAcquire(gpsSizeTrackingHashTableLock);
+ psTrackingHashEntry = (_PVR_STATS_TRACKING_HASH_ENTRY *)HASH_Remove(gpsSizeTrackingHashTable, uiCpuVAddr);
+ OSLockRelease(gpsSizeTrackingHashTableLock);
+ if (psTrackingHashEntry)
+ {
+ _StatsDecrMemTrackedStat(psTrackingHashEntry, eAllocType);
+ OSFreeMemNoStats(psTrackingHashEntry);
+ }
+}
+
+void
+PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_PID currentPid)
+{
+ IMG_PID currentCleanupPid = PVRSRVGetPurgeConnectionPid();
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_PROCESS_STATS* psProcessStats = NULL;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ _decrease_global_stat(eAllocType, uiBytes);
+
+ OSLockAcquire(g_psLinkedListLock);
+ if (psPVRSRVData)
+ {
+ if ( (currentPid == psPVRSRVData->cleanupThreadPid) &&
+ (currentCleanupPid != 0))
+ {
+ psProcessStats = _FindProcessStats(currentCleanupPid);
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+ }
+ else
+ {
+ psProcessStats = _FindProcessStats(currentPid);
+ }
+
+
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ /*Release the list lock as soon as we acquire the process lock,
+ * this ensures if the process is in deadlist the entry cannot be deleted or modified */
+ OSLockRelease(g_psLinkedListLock);
+ /* Update the memory watermarks... */
+ _DecreaseProcStatValue(eAllocType,
+ psProcessStats,
+ uiBytes);
+ OSLockRelease(psProcessStats->hLock);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* If all stats are now zero, remove the entry for this thread */
+ if (psProcessStats->ui32StatAllocFlags == 0)
+ {
+ OSLockAcquire(g_psLinkedListLock);
+ _MoveProcessToDeadList(psProcessStats);
+ OSLockRelease(g_psLinkedListLock);
+#if defined(ENABLE_DEBUGFS_PIDS)
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psProcessStats->hLock);
+#endif
+ _MoveProcessToDeadListDebugFS(psProcessStats);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psProcessStats->hLock);
+#endif
+#endif
+
+ /* Check if the dead list needs to be reduced */
+ _CompressMemoryUsage();
+ }
+#endif
+ }else{
+ OSLockRelease(g_psLinkedListLock);
+ }
+}
+
+/* For now we do not want to expose the global stats API
+ * so we wrap it into this specific function for pooled pages.
+ * As soon as we need to modify the global stats directly somewhere else
+ * we want to replace these functions with more general ones.
+ */
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes)
+{
+ _increase_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes)
+{
+ _decrease_global_stat(PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, uiBytes);
+}
+
+void
+PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+ IMG_UINT32 ui32TotalNumOutOfMemory,
+ IMG_UINT32 ui32NumTAStores,
+ IMG_UINT32 ui32Num3DStores,
+ IMG_UINT32 ui32NumSHStores,
+ IMG_UINT32 ui32NumCDMStores,
+ IMG_PID pidOwner)
+{
+ IMG_PID pidCurrent = pidOwner;
+
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(pidCurrent);
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_PRS] += ui32TotalNumPartialRenders;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_OOMS] += ui32TotalNumOutOfMemory;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_TA_STORES] += ui32NumTAStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_3D_STORES] += ui32Num3DStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_SH_STORES] += ui32NumSHStores;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_RC_CDM_STORES]+= ui32NumCDMStores;
+ OSLockRelease(psProcessStats->hLock);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVStatsUpdateRenderContextStats: Null process. Pid=%d", pidCurrent));
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateRenderContextStats */
+
+void
+PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+ IMG_UINT32 ui32NumReqByFW,
+ IMG_PID owner)
+{
+ IMG_PID currentPid = (owner==0)?OSGetCurrentClientProcessIDKM():owner;
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+ if (psProcessStats != NULL)
+ {
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_APP] += ui32NumReqByApp;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_ZSBUFFER_REQS_BY_FW] += ui32NumReqByFW;
+ OSLockRelease(psProcessStats->hLock);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateZSBufferStats */
+
+void
+PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+ IMG_UINT32 ui32NumGrowReqByFW,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32NumHighPages,
+ IMG_PID ownerPid)
+{
+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+
+ if (psProcessStats != NULL)
+ {
+ /* Avoid signed / unsigned mismatch which is flagged by some compilers */
+ IMG_INT32 a, b;
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_APP] += ui32NumGrowReqByApp;
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_GROW_REQS_BY_FW] += ui32NumGrowReqByFW;
+
+ a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT];
+ b=(IMG_INT32)(ui32InitFLPages);
+ UPDATE_MAX_VALUE(a, b);
+
+
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+ ui32InitFLPages=(IMG_UINT32)b;
+
+ a=psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_MAX_PAGES];
+ b=(IMG_INT32)ui32NumHighPages;
+
+ UPDATE_MAX_VALUE(a, b);
+ psProcessStats->i32StatValue[PVRSRV_PROCESS_STAT_TYPE_FREELIST_PAGES_INIT]=a;
+ ui32InitFLPages=(IMG_UINT32)b;
+ OSLockRelease(psProcessStats->hLock);
+
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateFreelistStats */
+
+#if defined(PVRSRV_ENABLE_PERPID_STATS)
+/*************************************************************************/ /*!
+@Function ProcessStatsPrintElements
+@Description Prints all elements for this process statistic record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+ProcessStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_UINT32 ui32StatNumber = 0;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ pfnOSStatsPrintf(pvFile, "PID %d\n", psProcessStats->pid);
+
+ /* Loop through all the values and print them... */
+ while (ui32StatNumber < PVRSRV_PROCESS_STAT_TYPE_COUNT)
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (OSAtomicRead(&psProcessStats->iMemRefCount) > 0)
+#else
+ if (psProcessStats->ui32MemRefCount > 0)
+#endif
+ {
+#if defined(PVR_RI_DEBUG)
+ if ((ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) || (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_UMA_PAGES))
+ {
+ /* get the stat from RI */
+ IMG_INT32 ui32Total = RITotalAllocProcessKM(psProcessStats->psRIMemoryStats->pid, (ui32StatNumber == PVRSRV_PROCESS_STAT_TYPE_ALLOC_LMA_PAGES) ? PHYS_HEAP_TYPE_LMA : PHYS_HEAP_TYPE_UMA);
+ pfnOSStatsPrintf(pvFile, pszProcessStatFmt[ui32StatNumber], ui32Total, ui32Total>>10 );
+ }
+ else
+#endif
+ {
+ if (ui32StatNumber >= PVRSRV_PROCESS_STAT_TYPE_KMALLOC &&
+ ui32StatNumber <= PVRSRV_PROCESS_STAT_TYPE_TOTAL_MAX)
+ {
+ pfnOSStatsPrintf(pvFile, pszProcessStatFmt[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber] >> 10);
+
+ }
+ else
+ {
+ pfnOSStatsPrintf(pvFile, pszProcessStatFmt[ui32StatNumber], psProcessStats->i32StatValue[ui32StatNumber]);
+ }
+ }
+ }
+ else
+ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->iMemRefCount=%d", __FUNCTION__, OSAtomicRead(&psProcessStats->iMemRefCount)));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: Called with psProcessStats->ui32MemRefCount=%d", __FUNCTION__, psProcessStats->ui32MemRefCount));
+#endif
+ }
+ ui32StatNumber++;
+ }
+
+ OSLockRelease(psProcessStats->hLock);
+} /* ProcessStatsPrintElements */
+#endif
+
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void
+PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+ IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT32 eFenceOpType,
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64ExecuteTime,
+ IMG_BOOL bRangeBasedFlush,
+ IMG_BOOL bUserModeFlush,
+ IMG_BOOL bIsFence,
+ IMG_PID ownerPid)
+{
+ IMG_PID currentPid = (ownerPid!=0)?ownerPid:OSGetCurrentClientProcessIDKM();
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ /* Don't do anything if we are not initialised or we are shutting down! */
+ if (!bProcessStatsInitialised)
+ {
+ return;
+ }
+
+ /* Lock while we find the correct process and update the record... */
+ OSLockAcquire(g_psLinkedListLock);
+
+ psProcessStats = _FindProcessStats(currentPid);
+
+ if (psProcessStats != NULL)
+ {
+ IMG_INT32 Idx;
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+
+ /* Look-up next buffer write index */
+ Idx = psProcessStats->uiCacheOpWriteIndex;
+ psProcessStats->uiCacheOpWriteIndex = INCREMENT_CACHEOP_STAT_IDX_WRAP(Idx);
+
+ /* Store all CacheOp meta-data */
+ psProcessStats->asCacheOp[Idx].uiCacheOp = uiCacheOp;
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ psProcessStats->asCacheOp[Idx].sDevVAddr = sDevVAddr;
+ psProcessStats->asCacheOp[Idx].sDevPAddr = sDevPAddr;
+ psProcessStats->asCacheOp[Idx].eFenceOpType = eFenceOpType;
+#endif
+ psProcessStats->asCacheOp[Idx].uiOffset = uiOffset;
+ psProcessStats->asCacheOp[Idx].uiSize = uiSize;
+ psProcessStats->asCacheOp[Idx].bRangeBasedFlush = bRangeBasedFlush;
+ psProcessStats->asCacheOp[Idx].bUserModeFlush = bUserModeFlush;
+ psProcessStats->asCacheOp[Idx].ui64ExecuteTime = ui64ExecuteTime;
+ psProcessStats->asCacheOp[Idx].ui32OpSeqNum = ui32OpSeqNum;
+ psProcessStats->asCacheOp[Idx].bIsFence = bIsFence;
+
+ OSLockRelease(psProcessStats->hLock);
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* PVRSRVStatsUpdateCacheOpStats */
+
+#if defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function CacheOpStatsPrintElements
+@Description Prints all elements for this process statistic CacheOp record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+CacheOpStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_PROCESS_STATS* psProcessStats = (PVRSRV_PROCESS_STATS*) pvStatPtr;
+ IMG_CHAR *pszCacheOpType, *pszFlushType, *pszFlushMode;
+ IMG_INT32 i32WriteIdx, i32ReadIdx;
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ #define CACHEOP_RI_PRINTF_HEADER \
+ "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12s %-12s\n"
+ #define CACHEOP_RI_PRINTF_FENCE \
+ "%-10s %-10s %-5s %-16s %-16s %-10s %-10s %-12llu 0x%-10x\n"
+ #define CACHEOP_RI_PRINTF \
+ "%-10s %-10s %-5s 0x%-14llx 0x%-14llx 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#else
+ #define CACHEOP_PRINTF_HEADER \
+ "%-10s %-10s %-5s %-10s %-10s %-12s %-12s\n"
+ #define CACHEOP_PRINTF_FENCE \
+ "%-10s %-10s %-5s %-10s %-10s %-12llu 0x%-10x\n"
+ #define CACHEOP_PRINTF \
+ "%-10s %-10s %-5s 0x%-8llx 0x%-8llx %-12llu 0x%-10x\n"
+#endif
+
+ if (peStructureType == NULL ||
+ *peStructureType != PVRSRV_STAT_STRUCTURE_PROCESS ||
+ psProcessStats->psCacheOpStats->eStructureType != PVRSRV_STAT_STRUCTURE_CACHEOP)
+ {
+ PVR_ASSERT(peStructureType != NULL);
+ PVR_ASSERT(*peStructureType == PVRSRV_STAT_STRUCTURE_PROCESS);
+ PVR_ASSERT(psProcessStats->psCacheOpStats->eStructureType == PVRSRV_STAT_STRUCTURE_CACHEOP);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* File header info */
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF_HEADER,
+#else
+ CACHEOP_PRINTF_HEADER,
+#endif
+ "CacheOp",
+ "Type",
+ "Mode",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ "DevVAddr",
+ "DevPAddr",
+#endif
+ "Offset",
+ "Size",
+ "Time (us)",
+ "SeqNo");
+
+ /* Take a snapshot of write index, read backwards in buffer
+ and wrap round at boundary */
+ i32WriteIdx = psProcessStats->uiCacheOpWriteIndex;
+ for (i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32WriteIdx);
+ i32ReadIdx != i32WriteIdx;
+ i32ReadIdx = DECREMENT_CACHEOP_STAT_IDX_WRAP(i32ReadIdx))
+ {
+ IMG_UINT64 ui64ExecuteTime;
+
+ if (! psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum)
+ {
+ break;
+ }
+
+ ui64ExecuteTime = psProcessStats->asCacheOp[i32ReadIdx].ui64ExecuteTime;
+
+ if (psProcessStats->asCacheOp[i32ReadIdx].bIsFence)
+ {
+ IMG_CHAR *pszFenceType = "";
+ pszCacheOpType = "Fence";
+
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ switch (psProcessStats->asCacheOp[i32ReadIdx].eFenceOpType)
+ {
+ case RGXFWIF_DM_GP:
+ pszFenceType = "GP";
+ break;
+
+ case RGXFWIF_DM_TDM:
+ /* Also case RGXFWIF_DM_2D: */
+ pszFenceType = "TDM/2D";
+ break;
+
+ case RGXFWIF_DM_TA:
+ pszFenceType = "TA";
+ break;
+
+ case RGXFWIF_DM_3D:
+ pszFenceType = "3D";
+ break;
+
+ case RGXFWIF_DM_CDM:
+ pszFenceType = "CDM";
+ break;
+
+ case RGXFWIF_DM_RTU:
+ pszFenceType = "RTU";
+ break;
+
+ case RGXFWIF_DM_SHG:
+ pszFenceType = "SHG";
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+#endif
+
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF_FENCE,
+#else
+ CACHEOP_PRINTF_FENCE,
+#endif
+ pszCacheOpType,
+ pszFenceType,
+ "",
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ "",
+ "",
+#endif
+ "",
+ "",
+ ui64ExecuteTime,
+ psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+ }
+ else
+ {
+ if (psProcessStats->asCacheOp[i32ReadIdx].bRangeBasedFlush)
+ {
+ IMG_DEVMEM_SIZE_T ui64NumOfPages;
+
+ ui64NumOfPages = psProcessStats->asCacheOp[i32ReadIdx].uiSize >> OSGetPageShift();
+ if (ui64NumOfPages <= PMR_MAX_TRANSLATION_STACK_ALLOC)
+ {
+ pszFlushType = "RBF.Fast";
+ }
+ else
+ {
+ pszFlushType = "RBF.Slow";
+ }
+ }
+ else
+ {
+ pszFlushType = "GF";
+ }
+
+ if (psProcessStats->asCacheOp[i32ReadIdx].bUserModeFlush)
+ {
+ pszFlushMode = "UM";
+ }
+ else
+ {
+ pszFlushMode = "KM";
+ }
+
+ switch (psProcessStats->asCacheOp[i32ReadIdx].uiCacheOp)
+ {
+ case PVRSRV_CACHE_OP_NONE:
+ pszCacheOpType = "None";
+ break;
+ case PVRSRV_CACHE_OP_CLEAN:
+ pszCacheOpType = "Clean";
+ break;
+ case PVRSRV_CACHE_OP_INVALIDATE:
+ pszCacheOpType = "Invalidate";
+ break;
+ case PVRSRV_CACHE_OP_FLUSH:
+ pszCacheOpType = "Flush";
+ break;
+ default:
+ pszCacheOpType = "Unknown";
+ break;
+ }
+
+ pfnOSStatsPrintf(pvFile,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ CACHEOP_RI_PRINTF,
+#else
+ CACHEOP_PRINTF,
+#endif
+ pszCacheOpType,
+ pszFlushType,
+ pszFlushMode,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ psProcessStats->asCacheOp[i32ReadIdx].sDevVAddr.uiAddr,
+ psProcessStats->asCacheOp[i32ReadIdx].sDevPAddr.uiAddr,
+#endif
+ psProcessStats->asCacheOp[i32ReadIdx].uiOffset,
+ psProcessStats->asCacheOp[i32ReadIdx].uiSize,
+ ui64ExecuteTime,
+ psProcessStats->asCacheOp[i32ReadIdx].ui32OpSeqNum);
+ }
+ }
+} /* CacheOpStatsPrintElements */
+#endif
+#endif
+
+#if defined(PVRSRV_ENABLE_MEMORY_STATS) && defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function MemStatsPrintElements
+@Description Prints all elements for the memory statistic record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+MemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE* peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_MEMORY_STATS* psMemoryStats = (PVRSRV_MEMORY_STATS*) pvStatPtr;
+ IMG_UINT32 ui32VAddrFields = sizeof(void*)/sizeof(IMG_UINT32);
+ IMG_UINT32 ui32PAddrFields = sizeof(IMG_CPU_PHYADDR)/sizeof(IMG_UINT32);
+ PVRSRV_MEM_ALLOC_REC *psRecord;
+ IMG_UINT32 ui32ItemNumber;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_MEMORY)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_MEMORY);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* Write the header... */
+ pfnOSStatsPrintf(pvFile, "Type VAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, " ");
+ }
+
+ pfnOSStatsPrintf(pvFile, " PAddress");
+ for (ui32ItemNumber = 1; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, " ");
+ }
+
+ pfnOSStatsPrintf(pvFile, " Size(bytes)\n");
+
+ /* The lock has to be held whilst moving through the memory list... */
+ OSLockAcquire(g_psLinkedListLock);
+ psRecord = psMemoryStats->psMemoryRecords;
+
+ while (psRecord != NULL)
+ {
+ IMG_BOOL bPrintStat = IMG_TRUE;
+
+ switch (psRecord->eAllocType)
+ {
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC: pfnOSStatsPrintf(pvFile, "KMALLOC "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC: pfnOSStatsPrintf(pvFile, "VMALLOC "); break;
+#else
+ case PVRSRV_MEM_ALLOC_TYPE_KMALLOC:
+ case PVRSRV_MEM_ALLOC_TYPE_VMALLOC:
+ bPrintStat = IMG_FALSE; break;
+#endif
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA: pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_LMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA: pfnOSStatsPrintf(pvFile, "ALLOC_PAGES_PT_UMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA: pfnOSStatsPrintf(pvFile, "IOREMAP_PT_LMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA: pfnOSStatsPrintf(pvFile, "VMAP_PT_UMA "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES: pfnOSStatsPrintf(pvFile, "ALLOC_LMA_PAGES "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES: pfnOSStatsPrintf(pvFile, "ALLOC_UMA_PAGES "); break;
+ case PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES: pfnOSStatsPrintf(pvFile, "MAP_UMA_LMA_PAGES "); break;
+ default: pfnOSStatsPrintf(pvFile, "INVALID "); break;
+ }
+
+ if (bPrintStat)
+ {
+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32VAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->pvCpuVAddr) + ui32VAddrFields - ui32ItemNumber - 1));
+ }
+ pfnOSStatsPrintf(pvFile, " ");
+
+ for (ui32ItemNumber = 0; ui32ItemNumber < ui32PAddrFields; ui32ItemNumber++)
+ {
+ pfnOSStatsPrintf(pvFile, "%08x", *(((IMG_UINT32*) &psRecord->sCpuPAddr.uiAddr) + ui32PAddrFields - ui32ItemNumber - 1));
+ }
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+ pfnOSStatsPrintf(pvFile, " %u", psRecord->uiBytes);
+
+ pfnOSStatsPrintf(pvFile, " %s", (IMG_CHAR*)psRecord->pvAllocdFromFile);
+
+ pfnOSStatsPrintf(pvFile, " %d\n", psRecord->ui32AllocdFromLine);
+#else
+ pfnOSStatsPrintf(pvFile, " %u\n", psRecord->uiBytes);
+#endif
+ }
+ /* Move to next record... */
+ psRecord = psRecord->psNext;
+ }
+
+ OSLockRelease(g_psLinkedListLock);
+} /* MemStatsPrintElements */
+#endif
+
+#if defined(PVR_RI_DEBUG) && defined(ENABLE_DEBUGFS_PIDS)
+/*************************************************************************/ /*!
+@Function RIMemStatsPrintElements
+@Description Prints all elements for the RI Memory record.
+@Input pvStatPtr Pointer to statistics structure.
+@Input pfnOSStatsPrintf Printf function to use for output.
+*/ /**************************************************************************/
+void
+RIMemStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ PVRSRV_STAT_STRUCTURE_TYPE *peStructureType = (PVRSRV_STAT_STRUCTURE_TYPE*) pvStatPtr;
+ PVRSRV_RI_MEMORY_STATS *psRIMemoryStats = (PVRSRV_RI_MEMORY_STATS*) pvStatPtr;
+ IMG_CHAR *pszStatFmtText = NULL;
+ IMG_HANDLE *pRIHandle = NULL;
+
+ if (peStructureType == NULL || *peStructureType != PVRSRV_STAT_STRUCTURE_RIMEMORY)
+ {
+ PVR_ASSERT(peStructureType != NULL && *peStructureType == PVRSRV_STAT_STRUCTURE_RIMEMORY);
+ return;
+ }
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ /* Acquire RI lock*/
+ RILockAcquireKM();
+
+ /*
+ * Loop through the RI system to get each line of text.
+ */
+ while (RIGetListEntryKM(psRIMemoryStats->pid,
+ &pRIHandle,
+ &pszStatFmtText))
+ {
+ pfnOSStatsPrintf(pvFile, "%s", pszStatFmtText);
+ }
+
+ /* Release RI lock*/
+ RILockReleaseKM();
+
+} /* RIMemStatsPrintElements */
+#endif
+
+static IMG_UINT32 ui32FirmwareStartTimestamp;
+static IMG_UINT64 ui64FirmwareIdleDuration;
+
+void SetFirmwareStartTime(IMG_UINT32 ui32Time)
+{
+ ui32FirmwareStartTimestamp = UPDATE_TIME(ui32FirmwareStartTimestamp, ui32Time);
+}
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration)
+{
+ ui64FirmwareIdleDuration = UPDATE_TIME(ui64FirmwareIdleDuration, ui64Duration);
+}
+
+static INLINE void PowerStatsPrintGroup(IMG_UINT32 *pui32Stats,
+ void *pvFile,
+ OS_STATS_PRINTF_FUNC *pfnPrintf,
+ PVRSRV_POWER_STAT_TYPE eForced,
+ PVRSRV_POWER_STAT_TYPE ePowerOn)
+{
+ IMG_UINT32 ui32Index;
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, DEVICE);
+ pfnPrintf(pvFile, " Pre-Device: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, PRE_POWER, SYSTEM);
+ pfnPrintf(pvFile, " Pre-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, SYSTEM);
+ pfnPrintf(pvFile, " Post-System: %9u\n", pui32Stats[ui32Index]);
+
+ ui32Index = GET_POWER_STAT_INDEX(eForced, ePowerOn, POST_POWER, DEVICE);
+ pfnPrintf(pvFile, " Post-Device: %9u\n", pui32Stats[ui32Index]);
+}
+
+void PowerStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSStatsPrintf)
+{
+ IMG_UINT32 *pui32Stats = &aui32PowerTimingStats[0];
+ IMG_UINT32 ui32Idx;
+
+ PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+ if (pfnOSStatsPrintf == NULL)
+ {
+ return;
+ }
+
+ pfnOSStatsPrintf(pvFile, "Forced Power-on Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_ON);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Forced Power-off Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, FORCED, POWER_OFF);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Not Forced Power-on Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_ON);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Not Forced Power-off Transition (nanoseconds):\n");
+ PowerStatsPrintGroup(pui32Stats, pvFile, pfnOSStatsPrintf, NOT_FORCED, POWER_OFF);
+ pfnOSStatsPrintf(pvFile, "\n");
+
+
+ pfnOSStatsPrintf(pvFile, "FW bootup time (timer ticks): %u\n", ui32FirmwareStartTimestamp);
+ pfnOSStatsPrintf(pvFile, "Host Acknowledge Time for FW Idle Signal (timer ticks): %u\n", (IMG_UINT32)(ui64FirmwareIdleDuration));
+ pfnOSStatsPrintf(pvFile, "\n");
+
+ pfnOSStatsPrintf(pvFile, "Last %d Clock Speed Change Timers (nanoseconds):\n", NUM_EXTRA_POWER_STATS);
+ pfnOSStatsPrintf(pvFile, "Prepare DVFS\tDVFS Change\tPost DVFS\n");
+
+ for (ui32Idx = ui32ClockSpeedIndexStart; ui32Idx !=ui32ClockSpeedIndexEnd; ui32Idx = (ui32Idx + 1) % NUM_EXTRA_POWER_STATS)
+ {
+ pfnOSStatsPrintf(pvFile, "%12llu\t%11llu\t%9llu\n",asClockSpeedChanges[ui32Idx].ui64PreClockSpeedChangeDuration,
+ asClockSpeedChanges[ui32Idx].ui64BetweenPreEndingAndPostStartingDuration,
+ asClockSpeedChanges[ui32Idx].ui64PostClockSpeedChangeDuration);
+ }
+
+
+} /* PowerStatsPrintElements */
+
+void GlobalStatsPrintElements(void *pvFile,
+ void *pvStatPtr,
+ OS_STATS_PRINTF_FUNC* pfnOSGetStatsPrintf)
+{
+ PVR_UNREFERENCED_PARAMETER(pvStatPtr);
+
+ if (pfnOSGetStatsPrintf != NULL)
+ {
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+#if !defined(PVR_DISABLE_KMALLOC_MEMSTATS)
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMalloc %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageKMallocMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_KMALLOC_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMalloc %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMallocMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMALLOC_MAX));
+#endif
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryUMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_UMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageVMapPTUMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_VMAP_PT_UMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocPTMemoryLMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_PT_MEMORY_LMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageIORemapPTLMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_IOREMAP_PT_LMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemLMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_LMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPool %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageAllocGPUMemUMAPoolMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_ALLOC_GPUMEM_UMA_POOL_MAX));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMA %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA));
+ pfnOSGetStatsPrintf(pvFile, "MemoryUsageMappedGPUMemUMA/LMAMax %10d\n", GET_GLOBAL_STAT_VALUE(PVRSRV_DRIVER_STAT_TYPE_MAPPED_GPUMEM_UMA_LMA_MAX));
+
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+ }
+}
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+static void StripBadChars( IMG_CHAR *psStr)
+{
+ IMG_INT cc;
+
+ /* Remove any '/' chars that may be in the ProcName (kernel thread could contain these) */
+ for (cc=0; cc<30; cc++)
+ {
+ if( *psStr == '/')
+ {
+ *psStr = '-';
+ }
+ psStr++;
+ }
+}
+#endif
+
+
+/*************************************************************************/ /*!
+@Function PVRSRVFindProcessMemStats
+@Description Using the provided PID find memory stats for that process.
+ Memstats will be provided for live/connected processes only.
+ Memstat values provided by this API relate only to the physical
+ memory allocated by the process and does not relate to any of
+ the mapped or imported memory.
+@Input pid Process to search for.
+@Input ArraySize Size of the array where memstat
+ records will be stored
+@Input bAllProcessStats Flag to denote if stats for
+ individual process are requested
+ stats for all processes are
+ requested
+@Input MemoryStats Handle to the memory where memstats
+ are stored.
+@Output Memory statistics records for the requested pid.
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *ui32MemoryStats)
+{
+ IMG_INT i;
+ PVRSRV_PROCESS_STATS* psProcessStats;
+
+ if(bAllProcessStats)
+ {
+ OSLockAcquire(gsGlobalStats.hGlobalStatsLock);
+
+ for ( i=0; i < PVRSRV_DRIVER_STAT_TYPE_COUNT; i++ )
+ {
+ ui32MemoryStats[i] = GET_GLOBAL_STAT_VALUE(i);
+ }
+
+ OSLockRelease(gsGlobalStats.hGlobalStatsLock);
+
+ return PVRSRV_OK;
+ }
+
+ OSLockAcquire(g_psLinkedListLock);
+
+ /* Search for the given PID in the Live List */
+ psProcessStats = _FindProcessStatsInLiveList(pid);
+
+ if(psProcessStats == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Process %d not found. This process may not be live anymore.", (IMG_INT)pid));
+ return PVRSRV_ERROR_PROCESS_NOT_FOUND;
+ }
+
+ OSLockAcquireNested(psProcessStats->hLock, PROCESS_LOCK_SUBCLASS_CURRENT);
+ for ( i=0; i < PVRSRV_PROCESS_STAT_TYPE_COUNT; i++ )
+ {
+ ui32MemoryStats[i] = psProcessStats->i32StatValue[i];
+ }
+ OSLockRelease(psProcessStats->hLock);
+
+ OSLockRelease(g_psLinkedListLock);
+
+ return PVRSRV_OK;
+
+} /* PVRSRVFindProcessMemStats */
diff --git a/drivers/gpu/drm/img-rogue/1.10/process_stats.h b/drivers/gpu/drm/img-rogue/1.10/process_stats.h
new file mode 100644
index 00000000000000..a860d2c692c3c3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/process_stats.h
@@ -0,0 +1,214 @@
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating and reading proc filesystem entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PROCESS_STATS_H__
+#define __PROCESS_STATS_H__
+
+#include <powervr/mem_types.h>
+
+#include "pvrsrv_error.h"
+#include "cache_ops.h"
+
+/*
+ * The publishing of Process Stats is controlled by the
+ * PVRSRV_ENABLE_PROCESS_STATS build option. The recording of all Memory
+ * allocations is controlled by the PVRSRV_ENABLE_MEMORY_STATS build option.
+ *
+ * Note: There will be a performance degradation with memory allocation
+ * recording enabled!
+ */
+
+
+/*
+ * Memory types which can be tracked...
+ */
+typedef enum {
+ PVRSRV_MEM_ALLOC_TYPE_KMALLOC, /* memory allocated by kmalloc() */
+ PVRSRV_MEM_ALLOC_TYPE_VMALLOC, /* memory allocated by vmalloc() */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_UMA, /* pages allocated from UMA to hold page table information */
+ PVRSRV_MEM_ALLOC_TYPE_VMAP_PT_UMA, /* ALLOC_PAGES_PT_UMA mapped to kernel address space */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA, /* pages allocated from LMA to hold page table information */
+ PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA, /* ALLOC_PAGES_PT_LMA mapped to kernel address space */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_LMA_PAGES, /* pages allocated from LMA */
+ PVRSRV_MEM_ALLOC_TYPE_ALLOC_UMA_PAGES, /* pages allocated from UMA */
+ PVRSRV_MEM_ALLOC_TYPE_MAP_UMA_LMA_PAGES, /* mapped UMA/LMA pages */
+ PVRSRV_MEM_ALLOC_TYPE_UMA_POOL_PAGES, /* pages in the page pool */
+
+ /* Must be the last enum...*/
+ PVRSRV_MEM_ALLOC_TYPE_COUNT
+} PVRSRV_MEM_ALLOC_TYPE;
+
+
+/*
+ * Functions for managing the processes recorded...
+ */
+PVRSRV_ERROR PVRSRVStatsInitialise(void);
+
+void PVRSRVStatsDestroy(void);
+
+PVRSRV_ERROR PVRSRVStatsRegisterProcess(IMG_HANDLE* phProcessStats);
+
+void PVRSRVStatsDeregisterProcess(IMG_HANDLE hProcessStats);
+
+#define MAX_POWER_STAT_ENTRIES 51
+
+/*
+ * Functions for recording the statistics...
+ */
+void PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ IMG_PID uiPid);
+
+#if defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS) && defined(DEBUG)
+void _PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ void *pvCpuVAddr,
+ IMG_CPU_PHYADDR sCpuPAddr,
+ size_t uiBytes,
+ void *pvPrivateData,
+ IMG_PID uiPid,
+ void *pvAllocFromFile, IMG_UINT32 ui32AllocFromLine);
+#endif
+void PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 ui64Key,
+ IMG_PID uiPid);
+
+void PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_PID uiPid);
+
+/*
+ * Increases the memory stat for eAllocType. Tracks the allocation size value
+ * by inserting a value into a hash table with uiCpuVAddr as key.
+ * Pair with PVRSRVStatsDecrMemAllocStatAndUntrack().
+ */
+void PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_UINT64 uiCpuVAddr,
+ IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ size_t uiBytes,
+ IMG_PID uiPid);
+
+void PVRSRVStatsDecrMemKAllocStat(size_t uiBytes,
+ IMG_PID decrPID);
+
+/*
+ * Decrease the memory stat for eAllocType. Takes the allocation size value from the
+ * hash table with uiCpuVAddr as key. Pair with PVRSRVStatsIncrMemAllocStatAndTrack().
+ */
+void PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE eAllocType,
+ IMG_UINT64 uiCpuVAddr);
+
+void
+PVRSRVStatsIncrMemAllocPoolStat(size_t uiBytes);
+
+void
+PVRSRVStatsDecrMemAllocPoolStat(size_t uiBytes);
+
+void PVRSRVStatsUpdateRenderContextStats(IMG_UINT32 ui32TotalNumPartialRenders,
+ IMG_UINT32 ui32TotalNumOutOfMemory,
+ IMG_UINT32 ui32TotalTAStores,
+ IMG_UINT32 ui32Total3DStores,
+ IMG_UINT32 ui32TotalSHStores,
+ IMG_UINT32 ui32TotalCDMStores,
+ IMG_PID owner);
+
+void PVRSRVStatsUpdateZSBufferStats(IMG_UINT32 ui32NumReqByApp,
+ IMG_UINT32 ui32NumReqByFW,
+ IMG_PID owner);
+
+void PVRSRVStatsUpdateFreelistStats(IMG_UINT32 ui32NumGrowReqByApp,
+ IMG_UINT32 ui32NumGrowReqByFW,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32NumHighPages,
+ IMG_PID ownerPid);
+#if defined(PVRSRV_ENABLE_CACHEOP_STATS)
+void PVRSRVStatsUpdateCacheOpStats(PVRSRV_CACHE_OP uiCacheOp,
+ IMG_UINT32 ui32OpSeqNum,
+#if defined(PVR_RI_DEBUG) && defined(DEBUG)
+ IMG_DEV_VIRTADDR sDevVAddr,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT32 eFenceOpType,
+#endif
+ IMG_DEVMEM_SIZE_T uiOffset,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_UINT64 ui64ExecuteTimeMs,
+ IMG_BOOL bRangeBasedFlush,
+ IMG_BOOL bUserModeFlush,
+ IMG_BOOL bIsFence,
+ IMG_PID ownerPid);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+/* Update pre/post power transition timing statistics */
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower);
+
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer);
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer);
+#else
+/* Update pre/post power transition timing statistics */
+static inline
+void InsertPowerTimeStatistic(IMG_UINT64 ui64SysStartTime, IMG_UINT64 ui64SysEndTime,
+ IMG_UINT64 ui64DevStartTime, IMG_UINT64 ui64DevEndTime,
+ IMG_BOOL bForced, IMG_BOOL bPowerOn, IMG_BOOL bPrePower) {}
+static inline
+void InsertPowerTimeStatisticExtraPre(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64Stoptimer) {}
+
+static inline
+void InsertPowerTimeStatisticExtraPost(IMG_UINT64 ui64StartTimer, IMG_UINT64 ui64StopTimer) {}
+#endif
+
+void SetFirmwareStartTime(IMG_UINT32 ui32TimeStamp);
+
+void SetFirmwareHandshakeIdleTime(IMG_UINT64 ui64Duration);
+
+/* Functions used for calculating the memory usage statistics of a process */
+PVRSRV_ERROR PVRSRVFindProcessMemStats(IMG_PID pid, IMG_UINT32 ui32ArrSize,
+ IMG_BOOL bAllProcessStats, IMG_UINT32 *ui32MemoryStats);
+
+#endif /* __PROCESS_STATS_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_bridge.h b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge.h
new file mode 100644
index 00000000000000..66d4ed0cf9c162
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge.h
@@ -0,0 +1,469 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the PVR Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BRIDGE_H__
+#define __PVR_BRIDGE_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "pvrsrv_error.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "common_dc_bridge.h"
+# if defined(SUPPORT_DCPLAT_BRIDGE)
+# include "common_dcplat_bridge.h"
+# endif
+#endif
+#include "common_mm_bridge.h"
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#include "common_mmplat_bridge.h"
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+#include "common_mmextmem_bridge.h"
+#endif
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#include "common_cmm_bridge.h"
+#endif
+#if defined(LINUX)
+#include "common_dmabuf_bridge.h"
+#endif
+#if defined(PDUMP)
+#include "common_pdump_bridge.h"
+#include "common_pdumpctrl_bridge.h"
+#include "common_pdumpmm_bridge.h"
+#endif
+#include "common_cache_bridge.h"
+#include "common_srvcore_bridge.h"
+#include "common_sync_bridge.h"
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+#include "common_syncexport_bridge.h"
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_syncsexport_bridge.h"
+#endif
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "common_smm_bridge.h"
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#include "common_htbuffer_bridge.h"
+#endif
+#include "common_pvrtl_bridge.h"
+#if defined(PVR_RI_DEBUG)
+#include "common_ri_bridge.h"
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#include "common_validation_bridge.h"
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+#include "common_tutils_bridge.h"
+#endif
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#include "common_devicememhistory_bridge.h"
+#endif
+
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#include "common_synctracking_bridge.h"
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#include "common_syncfallback_bridge.h"
+#endif
+
+/*
+ * Bridge Cmd Ids
+ */
+
+
+/* Note: The pattern
+ * #define PVRSRV_BRIDGE_FEATURE (PVRSRV_BRIDGE_PREVFEATURE + 1)
+ * #if defined(SUPPORT_FEATURE)
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST + 1)
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST + PVRSRV_BRIDGE_FEATURE_CMD_LAST)
+ * #else
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_FIRST 0
+ * #define PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST (PVRSRV_BRIDGE_PREVFEATURE_DISPATCH_LAST)
+ * #endif
+ * is used in the macro definitions below to make PVRSRV_BRIDGE_FEATURE_*
+ * take up no space in the dispatch table if SUPPORT_FEATURE is disabled.
+ *
+ * Note however that a bridge always defines PVRSRV_BRIDGE_FEATURE, even where
+ * the feature is not enabled (each bridge group retains its own ioctl number).
+ */
+
+#define PVRSRV_BRIDGE_FIRST 0UL
+
+/* 0: Default handler */
+#define PVRSRV_BRIDGE_DEFAULT 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST 0UL
+#define PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST)
+/* 1: CORE functions */
+#define PVRSRV_BRIDGE_SRVCORE 1UL
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST (PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST+1)
+#define PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST + PVRSRV_BRIDGE_SRVCORE_CMD_LAST)
+
+/* 2: SYNC functions */
+#define PVRSRV_BRIDGE_SYNC 2UL
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNC_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNC_CMD_LAST)
+
+/* 3: SYNCEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCEXPORT 3UL
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNC_DISPATCH_LAST)
+#endif
+
+/* 4: SYNCSEXPORT functions */
+#define PVRSRV_BRIDGE_SYNCSEXPORT 4UL
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCSEXPORT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST)
+#endif
+
+/* 5: PDUMP CTRL layer functions*/
+#define PVRSRV_BRIDGE_PDUMPCTRL 5UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPCTRL_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST)
+#endif
+
+/* 6: Memory Management functions */
+#define PVRSRV_BRIDGE_MM 6UL
+#define PVRSRV_BRIDGE_MM_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MM_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_FIRST + PVRSRV_BRIDGE_MM_CMD_LAST)
+
+/* 7: Non-Linux Memory Management functions */
+#define PVRSRV_BRIDGE_MMPLAT 7UL
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_MM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_MMPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_MM_DISPATCH_LAST)
+#endif
+
+/* 8: Context Memory Management functions */
+#define PVRSRV_BRIDGE_CMM 8UL
+#if !defined(EXCLUDE_CMM_BRIDGE)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_FIRST + PVRSRV_BRIDGE_CMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_CMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_CMM_DISPATCH_LAST (PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST)
+#endif
+
+/* 9: PDUMP Memory Management functions */
+#define PVRSRV_BRIDGE_PDUMPMM 9UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMPMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST (PVRSRV_BRIDGE_CMM_DISPATCH_LAST)
+#endif
+
+/* 10: PDUMP functions */
+#define PVRSRV_BRIDGE_PDUMP 10UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_PDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST)
+#endif
+
+/* 11: DMABUF functions */
+#define PVRSRV_BRIDGE_DMABUF 11UL
+#if defined(LINUX)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST + PVRSRV_BRIDGE_DMABUF_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST (PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST)
+#endif
+
+/* 12: Display Class functions */
+#define PVRSRV_BRIDGE_DC 12UL
+#if defined(SUPPORT_DISPLAY_CLASS)
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DC_DISPATCH_FIRST + PVRSRV_BRIDGE_DC_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DC_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DC_DISPATCH_LAST (PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST)
+#endif
+
+/* 13: Cache interface functions */
+#define PVRSRV_BRIDGE_CACHE 13UL
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST (PVRSRV_BRIDGE_DC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_CACHE_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST + PVRSRV_BRIDGE_CACHE_CMD_LAST)
+
+/* 14: Secure Memory Management functions*/
+#define PVRSRV_BRIDGE_SMM 14UL
+#if defined(SUPPORT_SECURE_EXPORT)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_SMM_DISPATCH_FIRST + PVRSRV_BRIDGE_SMM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SMM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SMM_DISPATCH_LAST (PVRSRV_BRIDGE_CACHE_DISPATCH_LAST)
+#endif
+
+/* 15: Transport Layer interface functions */
+#define PVRSRV_BRIDGE_PVRTL 15UL
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST (PVRSRV_BRIDGE_SMM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST + PVRSRV_BRIDGE_PVRTL_CMD_LAST)
+
+/* 16: Resource Information (RI) interface functions */
+#define PVRSRV_BRIDGE_RI 16UL
+#if defined(PVR_RI_DEBUG)
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_FIRST + PVRSRV_BRIDGE_RI_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RI_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RI_DISPATCH_LAST (PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST)
+#endif
+
+/* 17: Validation interface functions */
+#define PVRSRV_BRIDGE_VALIDATION 17UL
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST (PVRSRV_BRIDGE_RI_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST + PVRSRV_BRIDGE_VALIDATION_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST (PVRSRV_BRIDGE_RI_DISPATCH_LAST)
+#endif
+
+/* 18: TUTILS interface functions */
+#define PVRSRV_BRIDGE_TUTILS 18UL
+#if defined(PVR_TESTING_UTILS)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST + PVRSRV_BRIDGE_TUTILS_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST (PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST)
+#endif
+
+/* 19: DevMem history interface functions */
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY 19UL
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST + PVRSRV_BRIDGE_DEVICEMEMHISTORY_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST (PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST)
+#endif
+
+/* 20: Host Trace Buffer interface functions */
+#define PVRSRV_BRIDGE_HTBUFFER 20UL
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST + PVRSRV_BRIDGE_HTBUFFER_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST (PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST)
+#endif
+
+/* 21: Non-Linux Display functions */
+#define PVRSRV_BRIDGE_DCPLAT 21UL
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST + PVRSRV_BRIDGE_DCPLAT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST (PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST)
+#endif
+
+/* 22: Extmem functions */
+#define PVRSRV_BRIDGE_MMEXTMEM 22UL
+#if defined(SUPPORT_WRAP_EXTMEM)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST + PVRSRV_BRIDGE_MMEXTMEM_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST (PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST)
+#endif
+
+/* 23: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCTRACKING 23UL
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCTRACKING_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST (PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST)
+#endif
+
+/* 24: Sync tracking functions */
+#define PVRSRV_BRIDGE_SYNCFALLBACK 24UL
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST + PVRSRV_BRIDGE_SYNCFALLBACK_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST)
+#endif
+
+/* NB PVRSRV_BRIDGE_LAST below must be the last bridge group defined above (PVRSRV_BRIDGE_FEATURE) */
+#define PVRSRV_BRIDGE_LAST (PVRSRV_BRIDGE_SYNCFALLBACK)
+/* NB PVRSRV_BRIDGE_DISPATCH LAST below must be the last dispatch entry defined above (PVRSRV_BRIDGE_FEATURE_DISPATCH_LAST) */
+#define PVRSRV_BRIDGE_DISPATCH_LAST (PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST)
+
+/* bit mask representing the enabled PVR bridges */
+
+static const IMG_UINT32 gui32PVRBridges =
+ (1U << (PVRSRV_BRIDGE_DEFAULT - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_SRVCORE - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_SYNC - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_INSECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+ | (1U << (PVRSRV_BRIDGE_SYNCEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SECURE_EXPORT) && defined(SUPPORT_SERVER_SYNC)
+ | (1U << (PVRSRV_BRIDGE_SYNCSEXPORT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_PDUMPCTRL - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_MM - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_MMPLAT_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_MMPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_CMM)
+ | (1U << (PVRSRV_BRIDGE_CMM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_PDUMPMM - PVRSRV_BRIDGE_FIRST))
+ | (1U << (PVRSRV_BRIDGE_PDUMP - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(LINUX)
+ | (1U << (PVRSRV_BRIDGE_DMABUF - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+ | (1U << (PVRSRV_BRIDGE_DC - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_CACHE - PVRSRV_BRIDGE_FIRST))
+#if defined(SUPPORT_SECURE_EXPORT)
+ | (1U << (PVRSRV_BRIDGE_SMM - PVRSRV_BRIDGE_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_PVRTL - PVRSRV_BRIDGE_FIRST))
+#if defined(PVR_RI_DEBUG)
+ | (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_VALIDATION)
+ | (1U << (PVRSRV_BRIDGE_VALIDATION - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(PVR_TESTING_UTILS)
+ | (1U << (PVRSRV_BRIDGE_TUTILS - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_HTBUFFER)
+ | (1U << (PVRSRV_BRIDGE_HTBUFFER - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS) && defined (SUPPORT_DCPLAT_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_DCPLAT - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+ | (1U << (PVRSRV_BRIDGE_MMEXTMEM - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_SYNCTRACKING_BRIDGE)
+ | (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST))
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+ | (1U << (PVRSRV_BRIDGE_SYNCFALLBACK - PVRSRV_BRIDGE_FIRST))
+#endif
+ ;
+
+/* bit field representing which PVR bridge groups may optionally not
+ * be present in the server
+ */
+#define PVR_BRIDGES_OPTIONAL \
+ ( \
+ (1U << (PVRSRV_BRIDGE_RI - PVRSRV_BRIDGE_FIRST)) | \
+ (1U << (PVRSRV_BRIDGE_DEVICEMEMHISTORY - PVRSRV_BRIDGE_FIRST)) | \
+ (1U << (PVRSRV_BRIDGE_SYNCTRACKING - PVRSRV_BRIDGE_FIRST)) \
+ )
+
+/******************************************************************************
+ * Generic bridge structures
+ *****************************************************************************/
+
+
+/******************************************************************************
+ * bridge packaging structure
+ *****************************************************************************/
+typedef struct PVRSRV_BRIDGE_PACKAGE_TAG
+{
+ IMG_UINT32 ui32BridgeID; /*!< ioctl bridge group */
+ IMG_UINT32 ui32FunctionID; /*!< ioctl function index */
+ IMG_UINT32 ui32Size; /*!< size of structure */
+ void __user *pvParamIn; /*!< input data buffer */
+ IMG_UINT32 ui32InBufferSize; /*!< size of input data buffer */
+ void __user *pvParamOut; /*!< output data buffer */
+ IMG_UINT32 ui32OutBufferSize; /*!< size of output data buffer */
+}PVRSRV_BRIDGE_PACKAGE;
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (pvr_bridge.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.c b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.c
new file mode 100644
index 00000000000000..ed67b48880e84c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.c
@@ -0,0 +1,637 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Bridge Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Receives calls from the user portion of services and
+ despatches them to functions in the kernel portion.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/mm_types.h>
+
+#include "img_defs.h"
+#include "pvr_bridge.h"
+#include "pvr_bridge_k.h"
+#include "connection_server.h"
+#include "syscommon.h"
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "private_data.h"
+#include "linkage.h"
+#include "pmr.h"
+#include "rgx_bvnc_defs_km.h"
+#include "pvrsrv_bridge_init.h"
+
+#include <drm/drmP.h>
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+
+#include "env_connection.h"
+#include <linux/sched.h>
+#include <linux/freezer.h>
+
+/* RGX: */
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+#include "srvcore.h"
+#include "common_srvcore_bridge.h"
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+#if defined(SUPPORT_DRM_EXT)
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (ptr)
+#else
+#define CAST_BRIDGE_CMD_PTR_TO_PTR(ptr) (void __user *)(uintptr_t)(ptr)
+#endif
+
+#if defined(MODULE_TEST)
+/************************************************************************/
+// additional includes for services testing
+/************************************************************************/
+#include "pvr_test_bridge.h"
+#include "kern_test.h"
+/************************************************************************/
+// end of additional includes
+/************************************************************************/
+#endif
+
+/* WARNING!
+ * The mmap code has its own mutex, to prevent a possible deadlock,
+ * when using gPVRSRVLock.
+ * The Linux kernel takes the mm->mmap_sem before calling the mmap
+ * entry points (PVRMMap, MMapVOpen, MMapVClose), but the ioctl
+ * entry point may take mm->mmap_sem during fault handling, or
+ * before calling get_user_pages. If gPVRSRVLock was used in the
+ * mmap entry points, a deadlock could result, due to the ioctl
+ * and mmap code taking the two locks in different orders.
+ * As a corollary to this, the mmap entry points must not call
+ * any driver code that relies on gPVRSRVLock is held.
+ */
+static DEFINE_MUTEX(g_sMMapMutex);
+
+#if defined(DEBUG_BRIDGE_KM)
+static PPVR_DEBUGFS_ENTRY_DATA gpsPVRDebugFSBridgeStatsEntry;
+static struct seq_operations gsBridgeStatsReadOps;
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData);
+#endif
+
+#define _DRIVER_SUSPENDED 1
+#define _DRIVER_NOT_SUSPENDED 0
+static ATOMIC_T g_iDriverSuspended;
+static ATOMIC_T g_iNumActiveDriverThreads;
+static ATOMIC_T g_iNumActiveKernelThreads;
+static IMG_HANDLE g_hDriverThreadEventObject;
+
+PVRSRV_ERROR OSPlatformBridgeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = InitDMABUFBridge();
+ PVR_LOG_IF_ERROR(eError, "InitDMABUFBridge");
+
+ OSAtomicWrite(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED);
+ OSAtomicWrite(&g_iNumActiveDriverThreads, 0);
+ OSAtomicWrite(&g_iNumActiveKernelThreads, 0);
+
+ eError = OSEventObjectCreate("Global driver thread event object",
+ &g_hDriverThreadEventObject);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", error_);
+
+#if defined(DEBUG_BRIDGE_KM)
+ {
+ IMG_INT iResult;
+ iResult = PVRDebugFSCreateEntry("bridge_stats",
+ NULL,
+ &gsBridgeStatsReadOps,
+ BridgeStatsWrite,
+ NULL,
+ NULL,
+ &g_BridgeDispatchTable[0],
+ &gpsPVRDebugFSBridgeStatsEntry);
+ if (iResult != 0)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto error_;
+ }
+ }
+#endif
+
+ return PVRSRV_OK;
+
+error_:
+ if (g_hDriverThreadEventObject) {
+ OSEventObjectDestroy(g_hDriverThreadEventObject);
+ g_hDriverThreadEventObject = NULL;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR OSPlatformBridgeDeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(DEBUG_BRIDGE_KM)
+ if (gpsPVRDebugFSBridgeStatsEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsPVRDebugFSBridgeStatsEntry);
+ }
+#endif
+
+ eError = DeinitDMABUFBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitDMABUFBridge");
+
+ if (g_hDriverThreadEventObject != NULL) {
+ OSEventObjectDestroy(g_hDriverThreadEventObject);
+ g_hDriverThreadEventObject = NULL;
+ }
+
+ return eError;
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+static void *BridgeStatsSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#else
+ BridgeGlobalStatsLock();
+#endif
+
+ if (psDispatchTable == NULL || (*puiPosition) > BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ return NULL;
+ }
+
+ if ((*puiPosition) == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return &(psDispatchTable[(*puiPosition) - 1]);
+}
+
+static void BridgeStatsSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#else
+ BridgeGlobalStatsUnlock();
+#endif
+}
+
+static void *BridgeStatsSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psDispatchTable = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)psSeqFile->private;
+ loff_t uiItemAskedFor = *puiPosition; /* puiPosition on entry is the index to return */
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ /* Is the item asked for (starts at 0) a valid table index? */
+ if (uiItemAskedFor < BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ (*puiPosition)++; /* on exit it is the next seq index to ask for */
+ return &(psDispatchTable[uiItemAskedFor]);
+ }
+
+ /* Now passed the end of the table to indicate stop */
+ return NULL;
+}
+
+static int BridgeStatsSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData == SEQ_START_TOKEN)
+ {
+ seq_printf(psSeqFile,
+ "Total ioctl call count = %u\n"
+ "Total number of bytes copied via copy_from_user = %u\n"
+ "Total number of bytes copied via copy_to_user = %u\n"
+ "Total number of bytes copied via copy_*_user = %u\n\n"
+ "%3s: %-60s | %-48s | %10s | %20s | %20s | %20s | %20s \n",
+ g_BridgeGlobalStats.ui32IOCTLCount,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes + g_BridgeGlobalStats.ui32TotalCopyToUserBytes,
+ "#",
+ "Bridge Name",
+ "Wrapper Function",
+ "Call Count",
+ "copy_from_user (B)",
+ "copy_to_user (B)",
+ "Total Time (us)",
+ "Max Time (us)");
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *psEntry = (PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY *)pvData;
+ IMG_UINT32 ui32Remainder;
+
+ seq_printf(psSeqFile,
+ "%3d: %-60s %-48s %-10u %-20u %-20u %-20llu %-20llu\n",
+ (IMG_UINT32)(((size_t)psEntry-(size_t)g_BridgeDispatchTable)/sizeof(*g_BridgeDispatchTable)),
+ psEntry->pszIOCName,
+ (psEntry->pfFunction != NULL) ? psEntry->pszFunctionName : "(null)",
+ psEntry->ui32CallCount,
+ psEntry->ui32CopyFromUserTotalBytes,
+ psEntry->ui32CopyToUserTotalBytes,
+ (unsigned long long) OSDivide64r64(psEntry->ui64TotalTimeNS, 1000, &ui32Remainder),
+ (unsigned long long) OSDivide64r64(psEntry->ui64MaxTimeNS, 1000, &ui32Remainder));
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsBridgeStatsReadOps =
+{
+ .start = BridgeStatsSeqStart,
+ .stop = BridgeStatsSeqStop,
+ .next = BridgeStatsSeqNext,
+ .show = BridgeStatsSeqShow,
+};
+
+static ssize_t BridgeStatsWrite(const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData)
+{
+ IMG_UINT32 i;
+ /* We only care if a '0' is written to the file, if so we reset results. */
+ char buf[1];
+ ssize_t iResult = simple_write_to_buffer(&buf[0], sizeof(buf), puiPosition, pszBuffer, uiCount);
+
+ if (iResult < 0)
+ {
+ return iResult;
+ }
+
+ if (iResult == 0 || buf[0] != '0')
+ {
+ return -EINVAL;
+ }
+
+ /* Reset stats. */
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#else
+ BridgeGlobalStatsLock();
+#endif
+
+ g_BridgeGlobalStats.ui32IOCTLCount = 0;
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes = 0;
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes = 0;
+
+ for (i = 0; i < ARRAY_SIZE(g_BridgeDispatchTable); i++)
+ {
+ g_BridgeDispatchTable[i].ui32CallCount = 0;
+ g_BridgeDispatchTable[i].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[i].ui32CopyToUserTotalBytes = 0;
+ g_BridgeDispatchTable[i].ui64TotalTimeNS = 0;
+ g_BridgeDispatchTable[i].ui64MaxTimeNS = 0;
+ }
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#else
+ BridgeGlobalStatsUnlock();
+#endif
+
+ return uiCount;
+}
+
+#endif /* defined(DEBUG_BRIDGE_KM) */
+
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hEvent;
+
+ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+ return eError;
+ }
+
+ if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_NOT_SUSPENDED,
+ _DRIVER_SUSPENDED) == _DRIVER_SUSPENDED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver is already suspended", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto out_put;
+ }
+
+ /* now wait for any threads currently in the server to exit */
+ while(OSAtomicRead(&g_iNumActiveDriverThreads) != 0 ||
+ (OSAtomicRead(&g_iNumActiveKernelThreads) != 0 && !bShutdown))
+ {
+ if (OSAtomicRead(&g_iNumActiveDriverThreads) != 0)
+ {
+ PVR_LOG(("%s: waiting for user threads (%d)", __func__,
+ OSAtomicRead(&g_iNumActiveDriverThreads)));
+ }
+ if (OSAtomicRead(&g_iNumActiveKernelThreads) != 0)
+ {
+ PVR_LOG(("%s: waiting for kernel threads (%d)", __func__,
+ OSAtomicRead(&g_iNumActiveKernelThreads)));
+ }
+ /* Regular wait is called here (and not OSEventObjectWaitKernel) because
+ * this code is executed by the caller of .suspend/.shutdown callbacks
+ * which is most likely PM (or other actor responsible for suspend
+ * process). Because of that this thread shouldn't and most likely
+ * event cannot be frozen. */
+ OSEventObjectWait(hEvent);
+ }
+
+out_put:
+ OSEventObjectClose(hEvent);
+
+ return eError;
+}
+
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void)
+{
+ PVRSRV_ERROR eError;
+
+ /* resume the driver and then signal so any waiting threads wake up */
+ if (OSAtomicCompareExchange(&g_iDriverSuspended, _DRIVER_SUSPENDED,
+ _DRIVER_NOT_SUSPENDED) == _DRIVER_NOT_SUSPENDED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver is not suspended", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSEventObjectSignal failed: %s",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR LinuxBridgeSignalIfSuspended(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ {
+ PVRSRV_ERROR eError = OSEventObjectSignal(g_hDriverThreadEventObject);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to signal driver thread event"
+ " object: %s", __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+
+ return eError;
+}
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void)
+{
+ OSAtomicIncrement(&g_iNumActiveKernelThreads);
+}
+
+void LinuxBridgeNumActiveKernelThreadsDecrement(void)
+{
+ OSAtomicDecrement(&g_iNumActiveKernelThreads);
+ PVR_ASSERT(OSAtomicRead(&g_iNumActiveKernelThreads) >= 0);
+
+ /* Signal on every decrement in case LinuxBridgeBlockClientsAccess() is
+ * waiting for the threads to freeze.
+ * (error is logged in called function so ignore, we can't do much with
+ * it anyway) */
+ (void) LinuxBridgeSignalIfSuspended();
+}
+
+static PVRSRV_ERROR _WaitForDriverUnsuspend(void)
+{
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hEvent;
+
+ eError = OSEventObjectOpen(g_hDriverThreadEventObject, &hEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to open event object", __func__));
+ return eError;
+ }
+
+ while (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ {
+ /* we should be able to use normal (not kernel) wait here since
+ * we were just unfrozen and most likely we're not going to
+ * be frozen again (?) */
+ OSEventObjectWait(hEvent);
+ }
+
+ OSEventObjectClose(hEvent);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR PVRSRVDriverThreadEnter(void)
+{
+ PVRSRV_ERROR eError;
+
+ /* increment first so there is no race between this value and
+ * g_iDriverSuspended in LinuxBridgeBlockClientsAccess() */
+ OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+ if (OSAtomicRead(&g_iDriverSuspended) == _DRIVER_SUSPENDED)
+ {
+ /* decrement here because the driver is going to be suspended and
+ * this thread is going to be frozen so we don't want to wait for
+ * it in LinuxBridgeBlockClientsAccess() */
+ OSAtomicDecrement(&g_iNumActiveDriverThreads);
+
+ /* during suspend procedure this will put the current thread to
+ * the freezer but during shutdown this will just return */
+ try_to_freeze();
+
+ /* if the thread was unfrozen but the flag is not yet set to
+ * _DRIVER_NOT_SUSPENDED wait for it
+ * in case this is a shutdown the thread was not frozen so we'll
+ * wait here indefinitely but this is ok (and this is in fact what
+ * we want) because no thread should be entering the driver in such
+ * case */
+ eError = _WaitForDriverUnsuspend();
+
+ /* increment here because that means that the thread entered the
+ * driver */
+ OSAtomicIncrement(&g_iNumActiveDriverThreads);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for driver"
+ " unsuspend: %s", __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+static INLINE void PVRSRVDriverThreadExit(void)
+{
+ OSAtomicDecrement(&g_iNumActiveDriverThreads);
+ /* if the driver is being suspended then we need to signal the
+ * event object as the thread suspending the driver is waiting
+ * for active threads to exit
+ * error is logged in called function so ignore returned error
+ */
+ (void) LinuxBridgeSignalIfSuspended();
+}
+
+int
+PVRSRV_BridgeDispatchKM(struct drm_device __maybe_unused *dev, void *arg, struct drm_file *pDRMFile)
+{
+ struct drm_pvr_srvkm_cmd *psSrvkmCmd = (struct drm_pvr_srvkm_cmd *) arg;
+ PVRSRV_BRIDGE_PACKAGE sBridgePackageKM = { 0 };
+ CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pDRMFile->filp);
+ PVRSRV_ERROR error;
+
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Connection is closed", __FUNCTION__));
+ return -EFAULT;
+ }
+
+ PVR_ASSERT(psSrvkmCmd != NULL);
+
+ DRM_DEBUG("tgid=%d, tgid_connection=%d, bridge_id=%d, func_id=%d",
+ task_tgid_nr(current),
+ ((ENV_CONNECTION_DATA *)PVRSRVConnectionPrivateData(psConnection))->owner,
+ psSrvkmCmd->bridge_id,
+ psSrvkmCmd->bridge_func_id);
+
+ if ((error = PVRSRVDriverThreadEnter()) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVDriverThreadEnter failed: %s",
+ __func__,
+ PVRSRVGetErrorStringKM(error)));
+ goto e0;
+ }
+
+ sBridgePackageKM.ui32BridgeID = psSrvkmCmd->bridge_id;
+ sBridgePackageKM.ui32FunctionID = psSrvkmCmd->bridge_func_id;
+ sBridgePackageKM.ui32Size = sizeof(sBridgePackageKM);
+ sBridgePackageKM.pvParamIn = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->in_data_ptr);
+ sBridgePackageKM.ui32InBufferSize = psSrvkmCmd->in_data_size;
+ sBridgePackageKM.pvParamOut = CAST_BRIDGE_CMD_PTR_TO_PTR(psSrvkmCmd->out_data_ptr);
+ sBridgePackageKM.ui32OutBufferSize = psSrvkmCmd->out_data_size;
+
+ error = BridgedDispatchKM(psConnection, &sBridgePackageKM);
+
+ PVRSRVDriverThreadExit();
+
+e0:
+ return OSPVRSRVToNativeError(error);
+}
+
+int
+PVRSRV_MMap(struct file *pFile, struct vm_area_struct *ps_vma)
+{
+ CONNECTION_DATA *psConnection = LinuxConnectionFromFile(pFile);
+ IMG_HANDLE hSecurePMRHandle = (IMG_HANDLE)((uintptr_t)ps_vma->vm_pgoff);
+ PMR *psPMR;
+ PVRSRV_ERROR eError;
+
+ if (psConnection == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid connection data"));
+ return -ENOENT;
+ }
+
+ /*
+ * The bridge lock used here to protect PVRSRVLookupHandle is replaced
+ * by a specific lock considering that the handle functions have now
+ * their own lock. This change was necessary to solve the lockdep issues
+ * related with the PVRSRV_MMap.
+ */
+ mutex_lock(&g_sMMapMutex);
+
+ eError = PVRSRVLookupHandle(psConnection->psHandleBase,
+ (void **)&psPMR,
+ hSecurePMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Note: PMRMMapPMR will take a reference on the PMR.
+ * Unref the handle immediately, because we have now done
+ * the required operation on the PMR (whether it succeeded or not)
+ */
+ eError = PMRMMapPMR(psPMR, ps_vma);
+ PVRSRVReleaseHandle(psConnection->psHandleBase, hSecurePMRHandle, PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMRMMapPMR failed (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto e0;
+ }
+
+ mutex_unlock(&g_sMMapMutex);
+
+ return 0;
+
+e0:
+ mutex_unlock(&g_sMMapMutex);
+
+ PVR_DPF((PVR_DBG_ERROR, "Unable to translate error %d", eError));
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return -ENOENT; // -EAGAIN // or what?
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.h b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.h
new file mode 100644
index 00000000000000..e1a03c92360f09
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_bridge_k.h
@@ -0,0 +1,78 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Bridge Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Receives calls from the user portion of services and
+ despatches them to functions in the kernel portion.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_BRIDGE_K_H_
+#define _PVR_BRIDGE_K_H_
+
+#include "pvrsrv_error.h"
+
+/*!
+******************************************************************************
+ @Function LinuxBridgeBlockClientsAccess
+ @Description This function will wait for any existing threads in the Server
+ to exit and then disable access to the driver. New threads will
+ not be allowed to enter the Server until the driver is
+ unsuspended (see LinuxBridgeUnblockClientsAccess).
+ @Input bShutdown this flag indicates that the function was called
+ from a shutdown callback and therefore it will
+ not wait for the kernel threads to get frozen
+ (because this doesn't happen during shutdown
+ procedure)
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeBlockClientsAccess(IMG_BOOL bShutdown);
+
+/*!
+******************************************************************************
+ @Function LinuxBridgeUnblockClientsAccess
+ @Description This function will re-enable the bridge and allow any threads
+ waiting to enter the Server to continue.
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR LinuxBridgeUnblockClientsAccess(void);
+
+void LinuxBridgeNumActiveKernelThreadsIncrement(void);
+void LinuxBridgeNumActiveKernelThreadsDecrement(void);
+
+#endif /* _PVR_BRIDGE_K_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.c b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.c
new file mode 100644
index 00000000000000..ed4a56e533af0d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.c
@@ -0,0 +1,577 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title Linux buffer sync interface
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
+#include "services_kernel_client.h"
+#include "pvr_buffer_sync.h"
+#include "pvr_buffer_sync_shared.h"
+#include "pvr_drv.h"
+#include "pvr_fence.h"
+
+
+struct pvr_buffer_sync_context {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ struct mutex ctx_lock;
+#endif
+ struct pvr_fence_context *fence_ctx;
+ struct ww_acquire_ctx acquire_ctx;
+};
+
+struct pvr_buffer_sync_check_data {
+ struct dma_fence_cb base;
+
+ u32 nr_fences;
+ struct pvr_fence **fences;
+};
+
+struct pvr_buffer_sync_append_data {
+ struct pvr_buffer_sync_context *ctx;
+
+ u32 nr_pmrs;
+ struct _PMR_ **pmrs;
+ u32 *pmr_flags;
+
+ struct pvr_fence *update_fence;
+ struct pvr_buffer_sync_check_data *check_data;
+};
+
+
+static struct reservation_object *
+pmr_reservation_object_get(struct _PMR_ *pmr)
+{
+ struct dma_buf *dmabuf;
+
+ dmabuf = PhysmemGetDmaBuf(pmr);
+ if (dmabuf)
+ return dmabuf->resv;
+
+ return NULL;
+}
+
+static int
+pvr_buffer_sync_pmrs_lock(struct pvr_buffer_sync_context *ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs)
+{
+ struct reservation_object *resv, *cresv = NULL, *lresv = NULL;
+ int i, err;
+ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ mutex_lock(&ctx->ctx_lock);
+#endif
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+retry:
+ for (i = 0; i < nr_pmrs; i++) {
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (!resv) {
+ pr_err("%s: Failed to get reservation object from pmr %p\n",
+ __func__, pmrs[i]);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ if (resv != lresv) {
+ err = ww_mutex_lock_interruptible(&resv->lock,
+ acquire_ctx);
+ if (err) {
+ cresv = (err == -EDEADLK) ? resv : NULL;
+ goto fail;
+ }
+ } else {
+ lresv = NULL;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+
+fail:
+ while (i--) {
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+ ww_mutex_unlock(&resv->lock);
+ }
+
+ if (lresv)
+ ww_mutex_unlock(&lresv->lock);
+
+ if (cresv) {
+ err = ww_mutex_lock_slow_interruptible(&cresv->lock,
+ acquire_ctx);
+ if (!err) {
+ lresv = cresv;
+ cresv = NULL;
+ goto retry;
+ }
+ }
+
+ ww_acquire_fini(acquire_ctx);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ mutex_unlock(&ctx->ctx_lock);
+#endif
+ return err;
+}
+
+static void
+pvr_buffer_sync_pmrs_unlock(struct pvr_buffer_sync_context *ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs)
+{
+ struct reservation_object *resv;
+ int i;
+ struct ww_acquire_ctx *acquire_ctx = &ctx->acquire_ctx;
+
+ for (i = 0; i < nr_pmrs; i++) {
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+ ww_mutex_unlock(&resv->lock);
+ }
+
+ ww_acquire_fini(acquire_ctx);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ mutex_unlock(&ctx->ctx_lock);
+#endif
+}
+
+static u32
+pvr_buffer_sync_pmrs_fence_count(u32 nr_pmrs, struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct reservation_object *resv;
+ struct reservation_object_list *resv_list;
+ struct dma_fence *fence;
+ u32 fence_count = 0;
+ bool exclusive;
+ int i;
+
+ for (i = 0; i < nr_pmrs; i++) {
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ resv_list = reservation_object_get_list(resv);
+ fence = reservation_object_get_excl(resv);
+
+ if (fence &&
+ (!exclusive || !resv_list || !resv_list->shared_count))
+ fence_count++;
+
+ if (exclusive && resv_list)
+ fence_count += resv_list->shared_count;
+ }
+
+ return fence_count;
+}
+
+static struct pvr_buffer_sync_check_data *
+pvr_buffer_sync_check_fences_create(struct pvr_fence_context *fence_ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs,
+ u32 *pmr_flags)
+{
+ struct pvr_buffer_sync_check_data *data;
+ struct reservation_object *resv;
+ struct reservation_object_list *resv_list;
+ struct dma_fence *fence;
+ u32 fence_count;
+ bool exclusive;
+ int i, j;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ fence_count = pvr_buffer_sync_pmrs_fence_count(nr_pmrs, pmrs,
+ pmr_flags);
+ if (fence_count) {
+ data->fences = kcalloc(fence_count, sizeof(*data->fences),
+ GFP_KERNEL);
+ if (!data->fences)
+ goto err_check_data_free;
+ }
+
+ for (i = 0; i < nr_pmrs; i++) {
+ resv = pmr_reservation_object_get(pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ exclusive = !!(pmr_flags[i] & PVR_BUFFER_FLAG_WRITE);
+ if (!exclusive) {
+ err = reservation_object_reserve_shared(resv);
+ if (err)
+ goto err_destroy_fences;
+ }
+
+ resv_list = reservation_object_get_list(resv);
+ fence = reservation_object_get_excl(resv);
+
+ if (fence &&
+ (!exclusive || !resv_list || !resv_list->shared_count)) {
+ data->fences[data->nr_fences++] =
+ pvr_fence_create_from_fence(fence_ctx,
+ fence,
+ "exclusive check fence");
+ if (!data->fences[data->nr_fences - 1]) {
+ data->nr_fences--;
+ PVR_FENCE_TRACE(fence,
+ "waiting on exclusive fence\n");
+ WARN_ON(dma_fence_wait(fence, true) <= 0);
+ }
+ }
+
+ if (exclusive && resv_list) {
+ for (j = 0; j < resv_list->shared_count; j++) {
+ fence = rcu_dereference_protected(resv_list->shared[j],
+ reservation_object_held(resv));
+ data->fences[data->nr_fences++] =
+ pvr_fence_create_from_fence(fence_ctx,
+ fence,
+ "check fence");
+ if (!data->fences[data->nr_fences - 1]) {
+ data->nr_fences--;
+ PVR_FENCE_TRACE(fence,
+ "waiting on non-exclusive fence\n");
+ WARN_ON(dma_fence_wait(fence, true) <= 0);
+ }
+ }
+ }
+ }
+
+ WARN_ON((i != nr_pmrs) || (data->nr_fences != fence_count));
+
+ return data;
+
+err_destroy_fences:
+ for (i = 0; i < data->nr_fences; i++)
+ pvr_fence_destroy(data->fences[i]);
+ kfree(data->fences);
+err_check_data_free:
+ kfree(data);
+ return NULL;
+}
+
+static void
+pvr_buffer_sync_check_fences_destroy(struct pvr_buffer_sync_check_data *data)
+{
+ int i;
+
+ for (i = 0; i < data->nr_fences; i++)
+ pvr_fence_destroy(data->fences[i]);
+
+ kfree(data->fences);
+ kfree(data);
+}
+
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct pvr_drm_private *priv = ddev->dev_private;
+ struct pvr_buffer_sync_context *ctx;
+ int err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ctx->fence_ctx = pvr_fence_context_create(priv->dev_node,
+ priv->fence_status_wq,
+ name);
+ if (!ctx->fence_ctx) {
+ err = -ENOMEM;
+ goto err_free_ctx;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ mutex_init(&ctx->ctx_lock);
+#endif
+
+ return ctx;
+
+err_free_ctx:
+ kfree(ctx);
+err_exit:
+ return ERR_PTR(err);
+}
+
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx)
+{
+ pvr_fence_context_destroy(ctx->fence_ctx);
+ kfree(ctx);
+}
+
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs,
+ u32 *pmr_flags,
+ u32 *nr_fence_checkpoints_out,
+ PSYNC_CHECKPOINT **fence_checkpoints_out,
+ PSYNC_CHECKPOINT *update_checkpoints_out,
+ struct pvr_buffer_sync_append_data **data_out)
+{
+ struct pvr_buffer_sync_append_data *data;
+ PSYNC_CHECKPOINT *fence_checkpoints;
+ const size_t data_size = sizeof(*data);
+ const size_t pmrs_size = sizeof(*pmrs) * nr_pmrs;
+ const size_t pmr_flags_size = sizeof(*pmr_flags) * nr_pmrs;
+ int i;
+ int j;
+ int err;
+
+ if ((nr_pmrs && !(pmrs && pmr_flags)) ||
+ !nr_fence_checkpoints_out || !fence_checkpoints_out ||
+ !update_checkpoints_out)
+ return -EINVAL;
+
+ for (i = 0; i < nr_pmrs; i++) {
+ if (!(pmr_flags[i] & PVR_BUFFER_FLAG_MASK)) {
+ pr_err("%s: Invalid flags %#08x for pmr %p\n",
+ __func__, pmr_flags[i], pmrs[i]);
+ return -EINVAL;
+ }
+ }
+
+#if defined(NO_HARDWARE)
+ /*
+ * For NO_HARDWARE there's no checking or updating of sync checkpoints
+ * which means SW waits on our fences will cause a deadlock (since they
+ * will never be signalled). Avoid this by not creating any fences.
+ */
+ nr_pmrs = 0;
+#endif
+
+ if (!nr_pmrs) {
+ *nr_fence_checkpoints_out = 0;
+ *fence_checkpoints_out = NULL;
+ *update_checkpoints_out = NULL;
+ *data_out = NULL;
+
+ return 0;
+ }
+
+ data = kzalloc(data_size + pmrs_size + pmr_flags_size, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->ctx = ctx;
+ data->pmrs = (struct _PMR_ **)((char *)data + data_size);
+ data->pmr_flags = (u32 *)((char *)data->pmrs + pmrs_size);
+
+ /*
+ * It's expected that user space will provide a set of unique PMRs
+ * but, as a PMR can have multiple handles, it's still possible to
+ * end up here with duplicates. Take this opportunity to filter out
+ * any remaining duplicates (updating flags when necessary) before
+ * trying to process them further.
+ */
+ for (i = 0; i < nr_pmrs; i++) {
+ for (j = 0; j < data->nr_pmrs; j++) {
+ if (data->pmrs[j] == pmrs[i]) {
+ data->pmr_flags[j] |= pmr_flags[i];
+ break;
+ }
+ }
+
+ if (j == data->nr_pmrs) {
+ data->pmrs[j] = pmrs[i];
+ data->pmr_flags[j] = pmr_flags[i];
+ data->nr_pmrs++;
+ }
+ }
+
+ err = pvr_buffer_sync_pmrs_lock(ctx, data->nr_pmrs, data->pmrs);
+ if (err) {
+ pr_err("%s: failed to lock pmrs (errno=%d)\n",
+ __func__, err);
+ goto err_free_data;
+ }
+
+ /* create the check data */
+ data->check_data = pvr_buffer_sync_check_fences_create(ctx->fence_ctx,
+ data->nr_pmrs,
+ data->pmrs,
+ data->pmr_flags);
+ if (!data->check_data) {
+ err = -ENOMEM;
+ goto err_pmrs_unlock;
+ }
+
+ fence_checkpoints = kcalloc(data->check_data->nr_fences,
+ sizeof(*fence_checkpoints),
+ GFP_KERNEL);
+ if (fence_checkpoints) {
+ pvr_fence_get_checkpoints(data->check_data->fences,
+ data->check_data->nr_fences,
+ fence_checkpoints);
+ } else {
+ if (data->check_data->nr_fences) {
+ err = -ENOMEM;
+ goto err_free_check_data;
+ }
+ }
+
+ /* create the update fence */
+ data->update_fence = pvr_fence_create(ctx->fence_ctx,
+ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT, "update fence");
+ if (!data->update_fence) {
+ err = -ENOMEM;
+ goto err_free_fence_checkpoints;
+ }
+
+ /*
+ * We need to clean up the fences once the HW has finished with them.
+ * We can do this using fence callbacks. However, instead of adding a
+ * callback to every fence, which would result in more work, we can
+ * simply add one to the update fence since this will be the last fence
+ * to be signalled. This callback can do all the necessary clean up.
+ *
+ * Note: we take an additional reference on the update fence in case
+ * it signals before we can add it to a reservation object.
+ */
+ PVR_FENCE_TRACE(&data->update_fence->base,
+ "create fence calling dma_fence_get\n");
+ dma_fence_get(&data->update_fence->base);
+
+ *nr_fence_checkpoints_out = data->check_data->nr_fences;
+ *fence_checkpoints_out = fence_checkpoints;
+ *update_checkpoints_out = pvr_fence_get_checkpoint(data->update_fence);
+ *data_out = data;
+
+ return err;
+
+err_free_fence_checkpoints:
+ kfree(fence_checkpoints);
+err_free_check_data:
+ pvr_buffer_sync_check_fences_destroy(data->check_data);
+err_pmrs_unlock:
+ pvr_buffer_sync_pmrs_unlock(ctx, data->nr_pmrs, data->pmrs);
+err_free_data:
+ kfree(data);
+ return err;
+}
+
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data)
+{
+ struct reservation_object *resv;
+ int i;
+
+ dma_fence_enable_sw_signaling(&data->update_fence->base);
+
+ for (i = 0; i < data->nr_pmrs; i++) {
+ resv = pmr_reservation_object_get(data->pmrs[i]);
+ if (WARN_ON_ONCE(!resv))
+ continue;
+
+ if (data->pmr_flags[i] & PVR_BUFFER_FLAG_WRITE) {
+ PVR_FENCE_TRACE(&data->update_fence->base,
+ "added exclusive fence (%s) to resv %p\n",
+ data->update_fence->name, resv);
+ reservation_object_add_excl_fence(resv,
+ &data->update_fence->base);
+ } else if (data->pmr_flags[i] & PVR_BUFFER_FLAG_READ) {
+ PVR_FENCE_TRACE(&data->update_fence->base,
+ "added non-exclusive fence (%s) to resv %p\n",
+ data->update_fence->name, resv);
+ reservation_object_add_shared_fence(resv,
+ &data->update_fence->base);
+ }
+ }
+
+ /*
+ * Now that the fence has been added to the necessary
+ * reservation objects we can safely drop the extra reference
+ * we took in pvr_buffer_sync_resolve_and_create_fences().
+ */
+ dma_fence_put(&data->update_fence->base);
+ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+ data->pmrs);
+
+ /* destroy the check fences */
+ pvr_buffer_sync_check_fences_destroy(data->check_data);
+ /* destroy the update fence */
+ pvr_fence_destroy(data->update_fence);
+
+ /* free the append data */
+ kfree(data);
+}
+
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data)
+{
+
+ /* drop the extra reference we took on the update fence in
+ * pvr_buffer_sync_resolve_and_create_fences().
+ */
+ dma_fence_put(&data->update_fence->base);
+
+ if (data->nr_pmrs > 0)
+ pvr_buffer_sync_pmrs_unlock(data->ctx, data->nr_pmrs,
+ data->pmrs);
+
+ /* destroy the check fences */
+ pvr_buffer_sync_check_fences_destroy(data->check_data);
+ /* destroy the update fence */
+ pvr_fence_destroy(data->update_fence);
+
+ /* free the append data */
+ kfree(data);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.h b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.h
new file mode 100644
index 00000000000000..378a191837d848
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync.h
@@ -0,0 +1,123 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File pvr_buffer_sync.h
+@Title PowerVR Linux buffer sync interface
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_BUFFER_SYNC_H__)
+#define __PVR_BUFFER_SYNC_H__
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+struct _PMR_;
+struct pvr_buffer_sync_context;
+struct pvr_buffer_sync_append_data;
+
+/**
+ * pvr_buffer_sync_context_create - creates a buffer sync context
+ * @dev: linux device
+ * @name: context name (used for debugging)
+ *
+ * This function returns a buffer sync context, or NULL if it fails for any
+ * reason.
+ *
+ * @pvr_buffer_sync_context_destroy should be used to clean up the buffer sync
+ * context.
+ */
+struct pvr_buffer_sync_context *
+pvr_buffer_sync_context_create(struct device *dev, const char *name);
+
+/**
+ * pvr_buffer_sync_context_destroy - frees a buffer sync context
+ * @ctx: buffer sync context
+ */
+void
+pvr_buffer_sync_context_destroy(struct pvr_buffer_sync_context *ctx);
+
+/**
+ * pvr_buffer_sync_resolve_and_create_fences - create checkpoints from buffers
+ * @ctx: buffer sync context
+ * @nr_pmrs: number of buffer objects (PMRs)
+ * @pmrs: buffer array
+ * @pmr_flags: internal flags
+ * @nr_fence_checkpoints_out: returned number of fence sync checkpoints
+ * @fence_checkpoints_out: returned array of fence sync checkpoints
+ * @update_checkpoint_out: returned update sync checkpoint
+ * @data_out: returned buffer sync data
+ *
+ * This function returns 0 on success, or an error code otherwise.
+ *
+ * After this call, either @pvr_buffer_sync_kick_succeeded or
+ * @pvr_buffer_sync_kick_failed must be called.
+ */
+int
+pvr_buffer_sync_resolve_and_create_fences(struct pvr_buffer_sync_context *ctx,
+ u32 nr_pmrs,
+ struct _PMR_ **pmrs,
+ u32 *pmr_flags,
+ u32 *nr_fence_checkpoints_out,
+ PSYNC_CHECKPOINT **fence_checkpoints_out,
+ PSYNC_CHECKPOINT *update_checkpoint_out,
+ struct pvr_buffer_sync_append_data **data_out);
+
+/**
+ * pvr_buffer_sync_kick_succeeded - cleans up after a successful kick operation
+ * @data: buffer sync data returned by @pvr_buffer_sync_resolve_and_create_fences
+ *
+ * Should only be called following @pvr_buffer_sync_resolve_and_create_fences.
+ */
+void
+pvr_buffer_sync_kick_succeeded(struct pvr_buffer_sync_append_data *data);
+
+/**
+ * pvr_buffer_sync_kick_succeeded - cleans up after a failed kick operation
+ * @data: buffer sync data returned by @pvr_buffer_sync_resolve_and_create_fences
+ *
+ * Should only be called following @pvr_buffer_sync_resolve_and_create_fences.
+ */
+void
+pvr_buffer_sync_kick_failed(struct pvr_buffer_sync_append_data *data);
+
+#endif /* !defined(__PVR_BUFFER_SYNC_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync_shared.h b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync_shared.h
new file mode 100644
index 00000000000000..9258a45f2778dd
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_buffer_sync_shared.h
@@ -0,0 +1,52 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR buffer sync shared
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared definitions between client and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_BUFFER_SYNC_SHARED_H__
+#define __PVR_BUFFER_SYNC_SHARED_H__
+
+#define PVR_BUFFER_FLAG_READ (1 << 0)
+#define PVR_BUFFER_FLAG_WRITE (1 << 1)
+#define PVR_BUFFER_FLAG_MASK (PVR_BUFFER_FLAG_READ | \
+ PVR_BUFFER_FLAG_WRITE)
+
+#endif /* __PVR_BUFFER_SYNC_SHARED_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.c b/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.c
new file mode 100644
index 00000000000000..feaad79fd31aa8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.c
@@ -0,0 +1,280 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR Linux software "counting" timeline fence implementation
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/kref.h>
+
+#include "services_kernel_client.h"
+#include "pvr_counting_timeline.h"
+#include "pvr_sw_fence.h"
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+ do { \
+ if (pfnDumpDebugPrintf) \
+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \
+ ## __VA_ARGS__); \
+ else \
+ pr_err(fmt "\n", ## __VA_ARGS__); \
+ } while (0)
+
+struct pvr_counting_fence_timeline {
+ struct pvr_sw_fence_context *context;
+
+ void *dbg_request_handle;
+
+ spinlock_t active_fences_lock;
+ u64 current_value; /* guarded by active_fences_lock */
+ u64 next_value; /* guarded by active_fences_lock */
+ struct list_head active_fences;
+
+ struct kref kref;
+};
+
+struct pvr_counting_fence {
+ u64 value;
+ struct dma_fence *fence;
+ struct list_head active_list_entry;
+};
+
+static void
+pvr_counting_fence_timeline_debug_request(void *data, u32 verbosity,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ struct pvr_counting_fence_timeline *timeline =
+ (struct pvr_counting_fence_timeline *)data;
+ struct pvr_counting_fence *obj;
+ unsigned long flags;
+ char value[128];
+
+ if (verbosity == DEBUG_REQUEST_VERBOSITY_MEDIUM) {
+ spin_lock_irqsave(&timeline->active_fences_lock, flags);
+ pvr_sw_fence_context_value_str(timeline->context, value,
+ sizeof(value));
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ "sw: %s @%s cur=%llu",
+ pvr_sw_fence_context_name(timeline->context),
+ value, timeline->current_value);
+ list_for_each_entry(obj, &timeline->active_fences,
+ active_list_entry) {
+ obj->fence->ops->fence_value_str(obj->fence,
+ value, sizeof(value));
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ " @%s: val=%llu", value, obj->value);
+ }
+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+ }
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+ void *dev_cookie,
+ const char *name)
+{
+ PVRSRV_ERROR srv_err;
+ struct pvr_counting_fence_timeline *timeline =
+ kmalloc(sizeof(*timeline), GFP_KERNEL);
+
+ if (!timeline)
+ goto err_out;
+
+ timeline->context = pvr_sw_fence_context_create(name,
+ "pvr_sw_sync");
+ if (!timeline->context)
+ goto err_free_timeline;
+
+ srv_err = PVRSRVRegisterDbgRequestNotify(&timeline->dbg_request_handle,
+ dev_cookie,
+ pvr_counting_fence_timeline_debug_request,
+ DEBUG_REQUEST_LINUXFENCE,
+ timeline);
+ if (srv_err != PVRSRV_OK) {
+ pr_err("%s: failed to register debug request callback (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(srv_err));
+ goto err_free_timeline_ctx;
+ }
+
+ timeline->current_value = 0;
+ timeline->next_value = 1;
+ kref_init(&timeline->kref);
+ spin_lock_init(&timeline->active_fences_lock);
+ INIT_LIST_HEAD(&timeline->active_fences);
+
+err_out:
+ return timeline;
+
+err_free_timeline_ctx:
+ pvr_sw_fence_context_destroy(timeline->context);
+
+err_free_timeline:
+ kfree(timeline);
+ timeline = NULL;
+ goto err_out;
+}
+
+void pvr_counting_fence_timeline_force_complete(
+ struct pvr_counting_fence_timeline *timeline)
+{
+ struct list_head *entry, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+ /* This is just a safety measurement. Normally we should never see any
+ * unsignaled sw fences when we come here. Warn if we still do! */
+ WARN_ON(!list_empty(&timeline->active_fences));
+
+ list_for_each_safe(entry, tmp, &timeline->active_fences) {
+ struct pvr_counting_fence *fence =
+ list_entry(entry, struct pvr_counting_fence,
+ active_list_entry);
+ dma_fence_signal(fence->fence);
+ dma_fence_put(fence->fence);
+ fence->fence = NULL;
+ list_del(&fence->active_list_entry);
+ kfree(fence);
+ }
+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+}
+
+static void pvr_counting_fence_timeline_destroy(
+ struct kref *kref)
+{
+ struct pvr_counting_fence_timeline *timeline =
+ container_of(kref, struct pvr_counting_fence_timeline, kref);
+
+ WARN_ON(!list_empty(&timeline->active_fences));
+
+ PVRSRVUnregisterDbgRequestNotify(timeline->dbg_request_handle);
+
+ pvr_sw_fence_context_destroy(timeline->context);
+ kfree(timeline);
+}
+
+void pvr_counting_fence_timeline_put(
+ struct pvr_counting_fence_timeline *timeline)
+{
+ kref_put(&timeline->kref, pvr_counting_fence_timeline_destroy);
+}
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+ struct pvr_counting_fence_timeline *timeline)
+{
+ if (!timeline)
+ return NULL;
+ kref_get(&timeline->kref);
+ return timeline;
+}
+
+struct dma_fence *pvr_counting_fence_create(
+ struct pvr_counting_fence_timeline *timeline, u64 value)
+{
+ unsigned long flags;
+ struct dma_fence *sw_fence;
+ struct pvr_counting_fence *fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (!fence)
+ return NULL;
+
+ sw_fence = pvr_sw_fence_create(timeline->context);
+ if (!sw_fence)
+ goto err_free_fence;
+
+ fence->fence = dma_fence_get(sw_fence);
+
+ spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+ fence->value = timeline->next_value++;
+
+ list_add_tail(&fence->active_list_entry, &timeline->active_fences);
+
+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+ /* Counting fences can be signalled any time after creation */
+ dma_fence_enable_sw_signaling(sw_fence);
+
+ return sw_fence;
+
+err_free_fence:
+ kfree(fence);
+ return NULL;
+}
+
+bool pvr_counting_fence_timeline_inc(
+ struct pvr_counting_fence_timeline *timeline, u64 value)
+{
+ struct list_head *entry, *tmp;
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&timeline->active_fences_lock, flags);
+
+ if (timeline->current_value == timeline->next_value-1) {
+ res = false;
+ goto exit_unlock;
+ }
+
+ timeline->current_value++;
+
+ list_for_each_safe(entry, tmp, &timeline->active_fences) {
+ struct pvr_counting_fence *fence =
+ list_entry(entry, struct pvr_counting_fence,
+ active_list_entry);
+ if (fence->value <= timeline->current_value) {
+ dma_fence_signal(fence->fence);
+ dma_fence_put(fence->fence);
+ fence->fence = NULL;
+ list_del(&fence->active_list_entry);
+ kfree(fence);
+ }
+ }
+
+ res = true;
+
+exit_unlock:
+ spin_unlock_irqrestore(&timeline->active_fences_lock, flags);
+
+ return res;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.h b/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.h
new file mode 100644
index 00000000000000..63938f99f08134
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_counting_timeline.h
@@ -0,0 +1,66 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_COUNTING_TIMELINE_H__)
+#define __PVR_COUNTING_TIMELINE_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_counting_fence_timeline;
+
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_create(
+ void *dev_cookie,
+ const char *name);
+void pvr_counting_fence_timeline_put(
+ struct pvr_counting_fence_timeline *fence_timeline);
+struct pvr_counting_fence_timeline *pvr_counting_fence_timeline_get(
+ struct pvr_counting_fence_timeline *fence_timeline);
+struct dma_fence *pvr_counting_fence_create(
+ struct pvr_counting_fence_timeline *fence_timeline, u64 value);
+bool pvr_counting_fence_timeline_inc(
+ struct pvr_counting_fence_timeline *fence_timeline, u64 value);
+void pvr_counting_fence_timeline_force_complete(
+ struct pvr_counting_fence_timeline *fence_timeline);
+
+#endif /* !defined(__PVR_COUNTING_TIMELINE_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_debug.c b/drivers/gpu/drm/img-rogue/1.10/pvr_debug.c
new file mode 100644
index 00000000000000..775bae9043b11f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_debug.c
@@ -0,0 +1,1672 @@
+/*************************************************************************/ /*!
+@File
+@Title Debug Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides kernel side Debug Functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <stdarg.h>
+
+#include "allocmem.h"
+#include "pvrversion.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "pvr_debugfs.h"
+#include "linkage.h"
+#include "pvr_uaccess.h"
+#include "pvrsrv.h"
+#include "lists.h"
+#include "osfunc.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "rgxinit.h"
+#include "rgxfwutils.h"
+#include "sofunc_rgx.h"
+/* Handle used by DebugFS to get GPU utilisation stats */
+static IMG_HANDLE ghGpuUtilUserDebugFS;
+#endif
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/******** BUFFERED LOG MESSAGES ********/
+
+/* Because we don't want to have to handle CCB wrapping, each buffered
+ * message is rounded up to PVRSRV_DEBUG_CCB_MESG_MAX bytes. This means
+ * there is the same fixed number of messages that can be stored,
+ * regardless of message length.
+ */
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+
+#define PVRSRV_DEBUG_CCB_MESG_MAX PVR_MAX_DEBUG_MESSAGE_LEN
+
+#include <linux/syscalls.h>
+#include <linux/time.h>
+
+typedef struct
+{
+ const IMG_CHAR *pszFile;
+ IMG_INT iLine;
+ IMG_UINT32 ui32TID;
+ IMG_UINT32 ui32PID;
+ IMG_CHAR pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX];
+ struct timeval sTimeVal;
+}
+PVRSRV_DEBUG_CCB;
+
+static PVRSRV_DEBUG_CCB gsDebugCCB[PVRSRV_DEBUG_CCB_MAX];
+
+static IMG_UINT giOffset;
+
+static DEFINE_MUTEX(gsDebugCCBMutex);
+
+static void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+ const IMG_CHAR *szBuffer)
+{
+ mutex_lock(&gsDebugCCBMutex);
+
+ gsDebugCCB[giOffset].pszFile = pszFileName;
+ gsDebugCCB[giOffset].iLine = ui32Line;
+ gsDebugCCB[giOffset].ui32TID = current->pid;
+ gsDebugCCB[giOffset].ui32PID = current->tgid;
+
+ do_gettimeofday(&gsDebugCCB[giOffset].sTimeVal);
+
+ strncpy(gsDebugCCB[giOffset].pcMesg, szBuffer, PVRSRV_DEBUG_CCB_MESG_MAX - 1);
+ gsDebugCCB[giOffset].pcMesg[PVRSRV_DEBUG_CCB_MESG_MAX - 1] = 0;
+
+ giOffset = (giOffset + 1) % PVRSRV_DEBUG_CCB_MAX;
+
+ mutex_unlock(&gsDebugCCBMutex);
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+ int i;
+
+ mutex_lock(&gsDebugCCBMutex);
+
+ for (i = 0; i < PVRSRV_DEBUG_CCB_MAX; i++)
+ {
+ PVRSRV_DEBUG_CCB *psDebugCCBEntry =
+ &gsDebugCCB[(giOffset + i) % PVRSRV_DEBUG_CCB_MAX];
+
+ /* Early on, we won't have PVRSRV_DEBUG_CCB_MAX messages */
+ if (!psDebugCCBEntry->pszFile)
+ {
+ continue;
+ }
+
+ printk(KERN_ERR "%s:%d: (%ld.%ld, tid=%u, pid=%u) %s\n",
+ psDebugCCBEntry->pszFile,
+ psDebugCCBEntry->iLine,
+ (long)psDebugCCBEntry->sTimeVal.tv_sec,
+ (long)psDebugCCBEntry->sTimeVal.tv_usec,
+ psDebugCCBEntry->ui32TID,
+ psDebugCCBEntry->ui32PID,
+ psDebugCCBEntry->pcMesg);
+
+ /* Clear this entry so it doesn't get printed the next time again. */
+ psDebugCCBEntry->pszFile = NULL;
+ }
+
+ mutex_unlock(&gsDebugCCBMutex);
+}
+
+#else /* defined(PVRSRV_DEBUG_CCB_MAX) */
+static INLINE void
+AddToBufferCCB(const IMG_CHAR *pszFileName, IMG_UINT32 ui32Line,
+ const IMG_CHAR *szBuffer)
+{
+ (void)pszFileName;
+ (void)szBuffer;
+ (void)ui32Line;
+}
+
+void PVRSRVDebugPrintfDumpCCB(void)
+{
+ /* Not available */
+}
+
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR *pszFormat, va_list VArgs)
+ __printf(3, 0);
+
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+#define PVR_MAX_FILEPATH_LEN 256
+
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz,
+ const IMG_CHAR *pszFormat, ...)
+ __printf(3, 4);
+
+#if !defined(PVR_TESTING_UTILS)
+static
+#endif
+IMG_UINT32 gPVRDebugLevel =
+ (
+ DBGPRIV_FATAL | DBGPRIV_ERROR | DBGPRIV_WARNING
+
+#if defined(PVRSRV_DEBUG_CCB_MAX)
+ | DBGPRIV_BUFFERED
+#endif /* defined(PVRSRV_DEBUG_CCB_MAX) */
+
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+ | DBGPRIV_DEBUG
+#endif /* defined(PVR_DPF_ADHOC_DEBUG_ON) */
+ );
+
+module_param(gPVRDebugLevel, uint, 0644);
+MODULE_PARM_DESC(gPVRDebugLevel,
+ "Sets the level of debug output (default 0x7)");
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) || defined(PVRSRV_NEED_PVR_TRACE) */
+
+#define PVR_MAX_MSG_LEN PVR_MAX_DEBUG_MESSAGE_LEN
+
+/* Message buffer for non-IRQ messages */
+static IMG_CHAR gszBufferNonIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* Message buffer for IRQ messages */
+static IMG_CHAR gszBufferIRQ[PVR_MAX_MSG_LEN + 1];
+
+/* The lock is used to control access to gszBufferNonIRQ */
+static DEFINE_MUTEX(gsDebugMutexNonIRQ);
+
+/* The lock is used to control access to gszBufferIRQ */
+static DEFINE_SPINLOCK(gsDebugLockIRQ);
+
+#define USE_SPIN_LOCK (in_interrupt() || !preemptible())
+
+static inline void GetBufferLock(unsigned long *pulLockFlags)
+{
+ if (USE_SPIN_LOCK)
+ {
+ spin_lock_irqsave(&gsDebugLockIRQ, *pulLockFlags);
+ }
+ else
+ {
+ __acquire(&gsDebugLockIRQ);
+ mutex_lock(&gsDebugMutexNonIRQ);
+ }
+}
+
+static inline void ReleaseBufferLock(unsigned long ulLockFlags)
+{
+ if (USE_SPIN_LOCK)
+ {
+ spin_unlock_irqrestore(&gsDebugLockIRQ, ulLockFlags);
+ }
+ else
+ {
+ __release(&gsDebugLockIRQ);
+ mutex_unlock(&gsDebugMutexNonIRQ);
+ }
+}
+
+static inline void SelectBuffer(IMG_CHAR **ppszBuf, IMG_UINT32 *pui32BufSiz)
+{
+ if (USE_SPIN_LOCK)
+ {
+ *ppszBuf = gszBufferIRQ;
+ *pui32BufSiz = sizeof(gszBufferIRQ);
+ }
+ else
+ {
+ *ppszBuf = gszBufferNonIRQ;
+ *pui32BufSiz = sizeof(gszBufferNonIRQ);
+ }
+}
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, pointed
+ * to by the var args list.
+ */
+static IMG_BOOL VBAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, va_list VArgs)
+{
+ IMG_UINT32 ui32Used;
+ IMG_UINT32 ui32Space;
+ IMG_INT32 i32Len;
+
+ ui32Used = strlen(pszBuf);
+ BUG_ON(ui32Used >= ui32BufSiz);
+ ui32Space = ui32BufSiz - ui32Used;
+
+ i32Len = vsnprintf(&pszBuf[ui32Used], ui32Space, pszFormat, VArgs);
+ pszBuf[ui32BufSiz - 1] = 0;
+
+ /* Return true if string was truncated */
+ return i32Len < 0 || i32Len >= (IMG_INT32)ui32Space;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVReleasePrintf
+@Description To output an important message to the user in release builds
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...)
+{
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+ IMG_INT32 result;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR_K: %u: ", current->pid);
+ PVR_ASSERT(result>0);
+ ui32BufSiz -= result;
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+ va_end(vaArgs);
+}
+
+#if defined(PVRSRV_NEED_PVR_TRACE)
+
+/*************************************************************************/ /*!
+@Function PVRTrace
+@Description To output a debug message to the user
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVTrace(const IMG_CHAR *pszFormat, ...)
+{
+ va_list VArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+ IMG_INT32 result;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(VArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ result = snprintf(pszBuf, (ui32BufSiz - 2), "PVR: %u: ", current->pid);
+ PVR_ASSERT(result>0);
+ ui32BufSiz -= result;
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end(VArgs);
+}
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/*
+ * Append a string to a buffer using formatted conversion.
+ * The function takes a variable number of arguments, calling
+ * VBAppend to do the actual work.
+ */
+static IMG_BOOL BAppend(IMG_CHAR *pszBuf, IMG_UINT32 ui32BufSiz, const IMG_CHAR *pszFormat, ...)
+{
+ va_list VArgs;
+ IMG_BOOL bTrunc;
+
+ va_start (VArgs, pszFormat);
+
+ bTrunc = VBAppend(pszBuf, ui32BufSiz, pszFormat, VArgs);
+
+ va_end (VArgs);
+
+ return bTrunc;
+}
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintf
+@Description To output a debug message to the user
+@Input uDebugLevel The current debug level
+@Input pszFile The source file generating the message
+@Input uLine The line of the source file
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+void PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR *pszFullFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszFormat,
+ ...)
+{
+ IMG_BOOL bNoLoc;
+ const IMG_CHAR *pszFileName = pszFullFileName;
+ IMG_CHAR *pszLeafName;
+
+ bNoLoc = (IMG_BOOL)((ui32DebugLevel & DBGPRIV_CALLTRACE) |
+ (ui32DebugLevel & DBGPRIV_BUFFERED)) ? IMG_TRUE : IMG_FALSE;
+
+ if (gPVRDebugLevel & ui32DebugLevel)
+ {
+ va_list vaArgs;
+ unsigned long ulLockFlags = 0;
+ IMG_CHAR *pszBuf;
+ IMG_UINT32 ui32BufSiz;
+
+ SelectBuffer(&pszBuf, &ui32BufSiz);
+
+ va_start(vaArgs, pszFormat);
+
+ GetBufferLock(&ulLockFlags);
+
+ switch (ui32DebugLevel)
+ {
+ case DBGPRIV_FATAL:
+ {
+ strncpy(pszBuf, "PVR_K:(Fatal): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_ERROR:
+ {
+ strncpy(pszBuf, "PVR_K:(Error): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_WARNING:
+ {
+ strncpy(pszBuf, "PVR_K:(Warn): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_MESSAGE:
+ {
+ strncpy(pszBuf, "PVR_K:(Mesg): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_VERBOSE:
+ {
+ strncpy(pszBuf, "PVR_K:(Verb): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_DEBUG:
+ {
+ strncpy(pszBuf, "PVR_K:(Debug): ", (ui32BufSiz - 2));
+ break;
+ }
+ case DBGPRIV_CALLTRACE:
+ case DBGPRIV_ALLOC:
+ case DBGPRIV_BUFFERED:
+ default:
+ {
+ strncpy(pszBuf, "PVR_K: ", (ui32BufSiz - 2));
+ break;
+ }
+ }
+ pszBuf[ui32BufSiz - 1] = '\0';
+
+ if (current->pid == task_tgid_nr(current))
+ {
+ (void) BAppend(pszBuf, ui32BufSiz, "%5u: ", current->pid);
+ }
+ else
+ {
+ (void) BAppend(pszBuf, ui32BufSiz, "%5u-%5u: ", task_tgid_nr(current) /* pid id of group*/, current->pid /* task id */);
+ }
+
+ if (VBAppend(pszBuf, ui32BufSiz, pszFormat, vaArgs))
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ IMG_BOOL bTruncated = IMG_FALSE;
+
+#if !defined(__sh__)
+ pszLeafName = (IMG_CHAR *)strrchr (pszFileName, '/');
+
+ if (pszLeafName)
+ {
+ pszFileName = pszLeafName+1;
+ }
+#endif /* __sh__ */
+
+#if defined(DEBUG)
+ {
+ static const IMG_CHAR *lastFile;
+
+ if (lastFile == pszFileName)
+ {
+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%u]", ui32Line);
+ }
+ else
+ {
+ bTruncated = BAppend(pszBuf, ui32BufSiz, " [%s:%u]", pszFileName, ui32Line);
+ lastFile = pszFileName;
+ }
+ }
+#endif
+
+ if (bTruncated)
+ {
+ printk(KERN_ERR "PVR_K:(Message Truncated): %s\n", pszBuf);
+ }
+ else
+ {
+ if (ui32DebugLevel & DBGPRIV_BUFFERED)
+ {
+ AddToBufferCCB(pszFileName, ui32Line, pszBuf);
+ }
+ else
+ {
+ printk(KERN_ERR "%s\n", pszBuf);
+ }
+ }
+ }
+
+ ReleaseBufferLock(ulLockFlags);
+
+ va_end (vaArgs);
+ }
+}
+
+#endif /* PVRSRV_NEED_PVR_DPF */
+
+
+/*************************************************************************/ /*!
+ Version DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugVersionCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+ va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugVersionSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugVersionCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugVersionSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugVersionSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugVersionCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static int _DebugVersionSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (pvData == SEQ_START_TOKEN)
+ {
+ if(psPVRSRVData->sDriverInfo.bIsNoMatch)
+ {
+ seq_printf(psSeqFile, "Driver UM Version: %d (%s) %s\n",
+ psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision,
+ (psPVRSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+ PVR_BUILD_DIR);
+ seq_printf(psSeqFile, "Driver KM Version: %d (%s) %s\n",
+ psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision,
+ (BUILD_TYPE_RELEASE == psPVRSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug",
+ PVR_BUILD_DIR);
+ }else
+ {
+ seq_printf(psSeqFile, "Driver Version: %s (%s) %s\n",
+ PVRVERSION_STRING,
+ PVR_BUILD_TYPE, PVR_BUILD_DIR);
+ }
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ seq_printf(psSeqFile, "\nDevice Name: %s\n", psDevNode->psDevConfig->pszName);
+
+ if (psDevNode->psDevConfig->pszVersion)
+ {
+ seq_printf(psSeqFile, "Device Version: %s\n", psDevNode->psDevConfig->pszVersion);
+ }
+
+ if (psDevNode->pfnDeviceVersionString)
+ {
+ IMG_CHAR *pszDeviceVersionString;
+
+ if (psDevNode->pfnDeviceVersionString(psDevNode, &pszDeviceVersionString) == PVRSRV_OK)
+ {
+ seq_printf(psSeqFile, "%s\n", pszDeviceVersionString);
+
+ OSFreeMem(pszDeviceVersionString);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsDebugVersionReadOps =
+{
+ .start = _DebugVersionSeqStart,
+ .stop = _DebugVersionSeqStop,
+ .next = _DebugVersionSeqNext,
+ .show = _DebugVersionSeqShow,
+};
+
+#if defined(SUPPORT_RGX) && defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*************************************************************************/ /*!
+ Power data DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugPowerDataCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+ va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugPowerDataSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 0;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugPowerDataCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugPowerDataSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugPowerDataSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 0;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugPowerDataCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static PVRSRV_ERROR SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode,
+ RGXFWIF_COUNTER_DUMP_REQUEST eRequestType)
+{
+ PVRSRV_ERROR eError;
+
+ RGXFWIF_KCCB_CMD sCounterDumpCmd;
+
+ sCounterDumpCmd.eCmdType = RGXFWIF_KCCB_CMD_COUNTER_DUMP;
+ sCounterDumpCmd.uCmdData.sCounterDumpConfigData.eCounterDumpRequest = eRequestType;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sCounterDumpCmd,
+ sizeof(sCounterDumpCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SendPowerCounterCommand: RGXScheduleCommand failed. Error:%u", eError));
+ }
+
+ return eError;
+}
+
+static void *_IsDevNodeNotInitialised(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ return psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_ACTIVE ? NULL : psDeviceNode;
+}
+
+static void _SendPowerCounterCommand(PVRSRV_DEVICE_NODE* psDeviceNode,
+ va_list va)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+ SendPowerCounterCommand(psDeviceNode, va_arg(va, RGXFWIF_COUNTER_DUMP_REQUEST));
+
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+}
+
+static int _DebugPowerDataSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (pvData != NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_ACTIVE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when power counter data was requested!"));
+ return -EIO;
+ }
+
+ OSLockAcquire(psDevInfo->hCounterDumpingLock);
+
+ eError = SendPowerCounterCommand(psDeviceNode, RGXFWIF_PWR_COUNTER_DUMP_SAMPLE);
+
+ if (eError != PVRSRV_OK)
+ {
+ return -EIO;
+ }
+
+ /* Create update command to notify the host that the copy is finished. */
+ {
+ PVRSRV_CLIENT_SYNC_PRIM* psCopySyncPrim;
+ RGXFWIF_DEV_VIRTADDR sSyncFWAddr;
+ RGXFWIF_KCCB_CMD sSyncCmd;
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psCopySyncPrim,
+ "power counter dump sync prim");
+
+ SyncPrimSet(psCopySyncPrim, 0);
+
+ SyncPrimGetFirmwareAddr(psCopySyncPrim, &sSyncFWAddr.ui32Addr);
+
+ sSyncCmd.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+ sSyncCmd.uCmdData.sSyncData.sSyncObjDevVAddr = sSyncFWAddr;
+ sSyncCmd.uCmdData.sSyncData.uiUpdateVal = 1;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sSyncCmd,
+ sizeof(sSyncCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: RGXScheduleCommand failed. Error:%u", eError));
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+ return -EIO;
+ }
+
+ eError = PVRSRVWaitForValueKM(psCopySyncPrim->pui32LinAddr, 1, 0xffffffff);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: PVRSRVWaitForValueKM failed. Error:%u", eError));
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+ return -EIO;
+ }
+
+ eError = SyncPrimFree(psCopySyncPrim);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_DebugPowerDataSeqShow: SyncPrimFree failed. Error:%u", eError));
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+ return -EIO;
+ }
+ }
+
+ /* Read back the buffer */
+ {
+ IMG_UINT32* pui32PowerBuffer;
+ IMG_UINT32 ui32NumOfRegs, ui32SamplePeriod;
+ IMG_UINT32 i,j;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCounterBufferMemDesc, (void**)&pui32PowerBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_DebugPowerDataSeqShow: Failed to acquire buffer memory mapping (%u)", eError));
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+ return -EIO;
+ }
+
+ ui32NumOfRegs = *pui32PowerBuffer++;
+ ui32SamplePeriod = *pui32PowerBuffer++;
+
+ if (ui32NumOfRegs)
+ {
+ seq_printf(psSeqFile, "Power counter data for device id: %d\n", psDeviceNode->sDevId.i32UMIdentifier);
+ seq_printf(psSeqFile, "Sample period: 0x%08x\n", ui32SamplePeriod);
+
+ for (i = 0; i < ui32NumOfRegs; i++)
+ {
+ IMG_UINT32 ui32High, ui32Low;
+ IMG_UINT32 ui32RegOffset = *pui32PowerBuffer++;
+ IMG_UINT32 ui32NumOfInstances = *pui32PowerBuffer++;
+
+ PVR_ASSERT(ui32NumOfInstances);
+
+ seq_printf(psSeqFile, "0x%08x:", ui32RegOffset);
+
+ for (j = 0; j < ui32NumOfInstances; j++)
+ {
+ ui32Low = *pui32PowerBuffer++;
+ ui32High = *pui32PowerBuffer++;
+
+ seq_printf(psSeqFile, " 0x%016llx", (IMG_UINT64)ui32Low | (IMG_UINT64)ui32High << 32);
+ }
+
+ seq_printf(psSeqFile, "\n");
+ }
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psCounterBufferMemDesc);
+ }
+
+ OSLockRelease(psDevInfo->hCounterDumpingLock);
+ }
+
+ return eError;
+}
+
+static IMG_INT PowerDataSet(const char __user *pcBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData)
+{
+ IMG_CHAR acDataBuffer[2];
+ PVRSRV_DATA* psPVRSRVData = (PVRSRV_DATA*) pvData;
+
+ if (puiPosition == NULL || *puiPosition != 0)
+ {
+ return -EIO;
+ }
+
+ if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+ {
+ return -EINVAL;
+ }
+
+ if (acDataBuffer[uiCount - 1] != '\n')
+ {
+ return -EINVAL;
+ }
+
+ if (List_PVRSRV_DEVICE_NODE_Any(psPVRSRVData->psDeviceNodeList, _IsDevNodeNotInitialised))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not all device nodes were initialised when power counter data was requested!"));
+ return -EIO;
+ }
+
+ if ((acDataBuffer[0] == '1') && uiCount == 2)
+ {
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+ _SendPowerCounterCommand,
+ RGXFWIF_PWR_COUNTER_DUMP_START);
+
+ }
+ else if((acDataBuffer[0] == '0') && uiCount == 2)
+ {
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+ _SendPowerCounterCommand,
+ RGXFWIF_PWR_COUNTER_DUMP_STOP);
+ }
+ else
+ {
+
+ return -EINVAL;
+ }
+
+ *puiPosition += uiCount;
+ return uiCount;
+}
+
+static struct seq_operations gsDebugPowerDataReadOps =
+{
+ .start = _DebugPowerDataSeqStart,
+ .stop = _DebugPowerDataSeqStop,
+ .next = _DebugPowerDataSeqNext,
+ .show = _DebugPowerDataSeqShow,
+};
+
+#endif /* SUPPORT_RGX && SUPPORT_POWER_SAMPLING_VIA_DEBUGFS*/
+/*************************************************************************/ /*!
+ Status DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugStatusCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode,
+ va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugStatusSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugStatusCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugStatusSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugStatusSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugStatusCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static int _DebugStatusSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData == SEQ_START_TOKEN)
+ {
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+
+ if (psPVRSRVData != NULL)
+ {
+ switch (psPVRSRVData->eServicesState)
+ {
+ case PVRSRV_SERVICES_STATE_OK:
+ seq_printf(psSeqFile, "Driver Status: OK\n");
+ break;
+ case PVRSRV_SERVICES_STATE_BAD:
+ seq_printf(psSeqFile, "Driver Status: BAD\n");
+ break;
+ case PVRSRV_SERVICES_STATE_UNDEFINED:
+ seq_printf(psSeqFile, "Driver Status: UNDEFINED\n");
+ break;
+ default:
+ seq_printf(psSeqFile, "Driver Status: UNKNOWN (%d)\n", psPVRSRVData->eServicesState);
+ break;
+ }
+ }
+ }
+ else if (pvData != NULL)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+ IMG_CHAR *pszStatus = "";
+ IMG_CHAR *pszReason = "";
+ PVRSRV_DEVICE_HEALTH_STATUS eHealthStatus;
+ PVRSRV_DEVICE_HEALTH_REASON eHealthReason;
+
+ /* Update the health status now if possible... */
+ if (psDeviceNode->pfnUpdateHealthStatus)
+ {
+ psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_FALSE);
+ }
+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+ eHealthReason = OSAtomicRead(&psDeviceNode->eHealthReason);
+
+ switch (eHealthStatus)
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszStatus = "OK"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszStatus = "NOT RESPONDING"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszStatus = "DEAD"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszStatus = "FAULT"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszStatus = "UNDEFINED"; break;
+ default: pszStatus = "UNKNOWN"; break;
+ }
+
+ switch (eHealthReason)
+ {
+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " (Asserted)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " (Poll failure)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " (Global Event Object timeouts rising)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " (KCCB offset invalid)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " (KCCB stalled)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " (Idling)"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " (Restarting)"; break;
+ default: pszReason = " (Unknown reason)"; break;
+ }
+
+ seq_printf(psSeqFile, "Firmware Status: %s%s\n", pszStatus, pszReason);
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ /*
+ * Guest drivers do not support the following functionality:
+ * - Perform actual on-chip fw tracing.
+ * - Collect actual on-chip GPU utilization stats.
+ * - Perform actual on-chip GPU power/dvfs management.
+ * - As a result no more information can be provided.
+ */
+ return 0;
+ }
+
+ /* Write other useful stats to aid the test cycle... */
+ if (psDeviceNode->pvDevice != NULL)
+ {
+#if defined(SUPPORT_RGX)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Calculate the number of HWR events in total across all the DMs... */
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_UINT32 ui32HWREventCount = 0;
+ IMG_UINT32 ui32CRREventCount = 0;
+ IMG_UINT32 ui32DMIndex;
+
+ for (ui32DMIndex = 0; ui32DMIndex < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; ui32DMIndex++)
+ {
+ ui32HWREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[ui32DMIndex];
+ ui32CRREventCount += psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[ui32DMIndex];
+ }
+
+ seq_printf(psSeqFile, "HWR Event Count: %d\n", ui32HWREventCount);
+ seq_printf(psSeqFile, "CRR Event Count: %d\n", ui32CRREventCount);
+ seq_printf(psSeqFile, "FWF Event Count: %d\n", psRGXFWIfTraceBufCtl->ui32FWFaults);
+ }
+
+ /* Write the number of APM events... */
+ seq_printf(psSeqFile, "APM Event Count: %d\n", psDevInfo->ui32ActivePMReqTotal);
+
+ /* Write the current GPU Utilisation values... */
+ if (psDevInfo->pfnGetGpuUtilStats &&
+ eHealthStatus == PVRSRV_DEVICE_HEALTH_STATUS_OK)
+ {
+ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ eError = psDevInfo->pfnGetGpuUtilStats(psDeviceNode,
+ ghGpuUtilUserDebugFS,
+ &sGpuUtilStats);
+
+ if ((eError == PVRSRV_OK) &&
+ ((IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative))
+ {
+ IMG_UINT64 util;
+ IMG_UINT32 rem;
+
+ util = 100 * (sGpuUtilStats.ui64GpuStatActiveHigh +
+ sGpuUtilStats.ui64GpuStatActiveLow);
+ util = OSDivide64(util, (IMG_UINT32)sGpuUtilStats.ui64GpuStatCumulative, &rem);
+
+ seq_printf(psSeqFile, "GPU Utilisation: %u%%\n", (IMG_UINT32)util);
+ }
+ else
+ {
+ seq_printf(psSeqFile, "GPU Utilisation: -\n");
+ }
+ }
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static IMG_INT DebugStatusSet(const char __user *pcBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData)
+{
+ IMG_CHAR acDataBuffer[6];
+
+ if (puiPosition == NULL || *puiPosition != 0)
+ {
+ return -EIO;
+ }
+
+ if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+ {
+ return -EINVAL;
+ }
+
+ if (acDataBuffer[uiCount - 1] != '\n')
+ {
+ return -EINVAL;
+ }
+
+ if (((acDataBuffer[0] == 'k') || ((acDataBuffer[0] == 'K'))) && uiCount == 2)
+ {
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ psPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_BAD;
+ }
+ else
+ {
+ return -EINVAL;
+ }
+
+ *puiPosition += uiCount;
+ return uiCount;
+}
+
+static struct seq_operations gsDebugStatusReadOps =
+{
+ .start = _DebugStatusSeqStart,
+ .stop = _DebugStatusSeqStop,
+ .next = _DebugStatusSeqNext,
+ .show = _DebugStatusSeqShow,
+};
+
+/*************************************************************************/ /*!
+ Dump Debug DebugFS entry
+*/ /**************************************************************************/
+
+static void *_DebugDumpDebugCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugDumpDebugSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugDumpDebugCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugDumpDebugSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugDumpDebugSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugDumpDebugCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DumpDebugSeqPrintf(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ va_end(ArgList);
+ seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugDumpDebugSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL && pvData != SEQ_START_TOKEN)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ if (psDeviceNode->pvDevice != NULL)
+ {
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+ _DumpDebugSeqPrintf, psSeqFile);
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsDumpDebugReadOps =
+{
+ .start = _DebugDumpDebugSeqStart,
+ .stop = _DebugDumpDebugSeqStop,
+ .next = _DebugDumpDebugSeqNext,
+ .show = _DebugDumpDebugSeqShow,
+};
+
+#if defined(SUPPORT_RGX)
+/*************************************************************************/ /*!
+ Firmware Trace DebugFS entry
+*/ /**************************************************************************/
+static void *_DebugFWTraceCompare_AnyVaCb(PVRSRV_DEVICE_NODE *psDevNode, va_list va)
+{
+ loff_t *puiCurrentPosition = va_arg(va, loff_t *);
+ loff_t uiPosition = va_arg(va, loff_t);
+ loff_t uiCurrentPosition = *puiCurrentPosition;
+
+ (*puiCurrentPosition)++;
+
+ return (uiCurrentPosition == uiPosition) ? psDevNode : NULL;
+}
+
+static void *_DebugFWTraceSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ if (*puiPosition == 0)
+ {
+ return SEQ_START_TOKEN;
+ }
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugFWTraceCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _DebugFWTraceSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *_DebugFWTraceSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVRSRV_DATA *psPVRSRVData = (PVRSRV_DATA *)psSeqFile->private;
+ loff_t uiCurrentPosition = 1;
+
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ (*puiPosition)++;
+
+ return List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ _DebugFWTraceCompare_AnyVaCb,
+ &uiCurrentPosition,
+ *puiPosition);
+}
+
+static void _FWTraceSeqPrintf(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...)
+{
+ struct seq_file *psSeqFile = (struct seq_file *)pvDumpDebugFile;
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ va_end(ArgList);
+ seq_printf(psSeqFile, "%s\n", szBuffer);
+}
+
+static int _DebugFWTraceSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL && pvData != SEQ_START_TOKEN)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)pvData;
+
+ if (psDeviceNode->pvDevice != NULL)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ RGXDumpFirmwareTrace(_FWTraceSeqPrintf, psSeqFile, psDevInfo);
+ }
+ }
+
+ return 0;
+}
+
+static struct seq_operations gsFWTraceReadOps =
+{
+ .start = _DebugFWTraceSeqStart,
+ .stop = _DebugFWTraceSeqStop,
+ .next = _DebugFWTraceSeqNext,
+ .show = _DebugFWTraceSeqShow,
+};
+#endif
+
+/*************************************************************************/ /*!
+ Debug level DebugFS entry
+*/ /**************************************************************************/
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static void *DebugLevelSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ if (*puiPosition == 0)
+ {
+ return psSeqFile->private;
+ }
+
+ return NULL;
+}
+
+static void DebugLevelSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+}
+
+static void *DebugLevelSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_UNREFERENCED_PARAMETER(psSeqFile);
+ PVR_UNREFERENCED_PARAMETER(pvData);
+ PVR_UNREFERENCED_PARAMETER(puiPosition);
+
+ return NULL;
+}
+
+static int DebugLevelSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ if (pvData != NULL)
+ {
+ IMG_UINT32 uiDebugLevel = *((IMG_UINT32 *)pvData);
+
+ seq_printf(psSeqFile, "%u\n", uiDebugLevel);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static struct seq_operations gsDebugLevelReadOps =
+{
+ .start = DebugLevelSeqStart,
+ .stop = DebugLevelSeqStop,
+ .next = DebugLevelSeqNext,
+ .show = DebugLevelSeqShow,
+};
+
+
+static IMG_INT DebugLevelSet(const char __user *pcBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData)
+{
+ IMG_UINT32 *uiDebugLevel = (IMG_UINT32 *)pvData;
+ IMG_CHAR acDataBuffer[6];
+
+ if (puiPosition == NULL || *puiPosition != 0)
+ {
+ return -EIO;
+ }
+
+ if (uiCount == 0 || uiCount > ARRAY_SIZE(acDataBuffer))
+ {
+ return -EINVAL;
+ }
+
+ if (pvr_copy_from_user(acDataBuffer, pcBuffer, uiCount))
+ {
+ return -EINVAL;
+ }
+
+ if (acDataBuffer[uiCount - 1] != '\n')
+ {
+ return -EINVAL;
+ }
+
+ if (sscanf(acDataBuffer, "%u", &gPVRDebugLevel) == 0)
+ {
+ return -EINVAL;
+ }
+
+ /* As this is Linux the next line uses a GCC builtin function */
+ (*uiDebugLevel) &= (1 << __builtin_ffsl(DBGPRIV_LAST)) - 1;
+
+ *puiPosition += uiCount;
+ return uiCount;
+}
+#endif /* defined(DEBUG) */
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsVersionDebugFSEntry;
+
+static PPVR_DEBUGFS_ENTRY_DATA gpsStatusDebugFSEntry;
+static PPVR_DEBUGFS_ENTRY_DATA gpsDumpDebugDebugFSEntry;
+
+#if defined(SUPPORT_RGX)
+static PPVR_DEBUGFS_ENTRY_DATA gpsFWTraceDebugFSEntry;
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+static PPVR_DEBUGFS_ENTRY_DATA gpsPowerDataDebugFSEntry;
+#endif
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+static PPVR_DEBUGFS_ENTRY_DATA gpsDebugLevelDebugFSEntry;
+#endif
+
+int PVRDebugCreateDebugFSEntries(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ int iResult;
+
+ PVR_ASSERT(psPVRSRVData != NULL);
+
+ /*
+ * The DebugFS entries are designed to work in a single device system but
+ * this function will be called multiple times in a multi-device system.
+ * Return an error in this case.
+ */
+ if (gpsVersionDebugFSEntry)
+ {
+ return -EEXIST;
+ }
+
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+ if (SORgxGpuUtilStatsRegister(&ghGpuUtilUserDebugFS) != PVRSRV_OK)
+ {
+ return -ENOMEM;
+ }
+#endif
+
+ iResult = PVRDebugFSCreateEntry("version",
+ NULL,
+ &gsDebugVersionReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsVersionDebugFSEntry);
+ if (iResult != 0)
+ {
+ return iResult;
+ }
+
+ iResult = PVRDebugFSCreateEntry("status",
+ NULL,
+ &gsDebugStatusReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)DebugStatusSet,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsStatusDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveVersionEntry;
+ }
+
+ iResult = PVRDebugFSCreateEntry("debug_dump",
+ NULL,
+ &gsDumpDebugReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsDumpDebugDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveStatusEntry;
+ }
+
+#if defined(SUPPORT_RGX)
+ if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ iResult = PVRDebugFSCreateEntry("firmware_trace",
+ NULL,
+ &gsFWTraceReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsFWTraceDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveDumpDebugEntry;
+ }
+ }
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ iResult = PVRDebugFSCreateEntry("power_data",
+ NULL,
+ &gsDebugPowerDataReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)PowerDataSet,
+ NULL,
+ NULL,
+ psPVRSRVData,
+ &gpsPowerDataDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemoveFWTraceLogEntry;
+ }
+#endif
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ iResult = PVRDebugFSCreateEntry("debug_level",
+ NULL,
+ &gsDebugLevelReadOps,
+ (PVRSRV_ENTRY_WRITE_FUNC *)DebugLevelSet,
+ NULL,
+ NULL,
+ &gPVRDebugLevel,
+ &gpsDebugLevelDebugFSEntry);
+ if (iResult != 0)
+ {
+ goto ErrorRemovePowerDataEntry;
+ }
+#endif
+
+ return 0;
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ErrorRemovePowerDataEntry:
+#endif
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS) && defined(SUPPORT_RGX)
+ PVRDebugFSRemoveEntry(&gpsPowerDataDebugFSEntry);
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ErrorRemoveFWTraceLogEntry:
+#endif
+#if defined(SUPPORT_RGX)
+ PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+#endif
+
+#if defined(SUPPORT_RGX)
+ErrorRemoveDumpDebugEntry:
+#endif
+ if (gpsDumpDebugDebugFSEntry)
+ {
+ PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+ }
+ErrorRemoveStatusEntry:
+ PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+ErrorRemoveVersionEntry:
+ PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+
+ return iResult;
+}
+
+void PVRDebugRemoveDebugFSEntries(void)
+{
+#if defined(SUPPORT_RGX) && !defined(NO_HARDWARE)
+ if (ghGpuUtilUserDebugFS != NULL)
+ {
+ SORgxGpuUtilStatsUnregister(ghGpuUtilUserDebugFS);
+ ghGpuUtilUserDebugFS = NULL;
+ }
+#endif
+
+#if defined(DEBUG) || defined(PVR_DPF_ADHOC_DEBUG_ON)
+ if (gpsDebugLevelDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsDebugLevelDebugFSEntry);
+ }
+#endif
+
+#if defined(SUPPORT_RGX)
+ if (gpsFWTraceDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsFWTraceDebugFSEntry);
+ }
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ if(gpsPowerDataDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsPowerDataDebugFSEntry);
+ }
+#endif
+#endif
+
+ if (gpsDumpDebugDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsDumpDebugDebugFSEntry);
+ }
+
+ if (gpsStatusDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsStatusDebugFSEntry);
+ }
+
+ if (gpsVersionDebugFSEntry != NULL)
+ {
+ PVRDebugFSRemoveEntry(&gpsVersionDebugFSEntry);
+ }
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_debug.h b/drivers/gpu/drm/img-rogue/1.10/pvr_debug.h
new file mode 100644
index 00000000000000..0f2a7dd00a3290
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_debug.h
@@ -0,0 +1,580 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Debug Declarations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides debug functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __PVR_DEBUG_H__
+#define __PVR_DEBUG_H__
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if defined(_MSC_VER)
+# define MSC_SUPPRESS_4127 __pragma(warning(suppress:4127))
+#else
+# define MSC_SUPPRESS_4127
+#endif
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#define PVR_MAX_DEBUG_MESSAGE_LEN (512) /*!< Max length of a Debug Message */
+
+/* These are privately used by pvr_debug, use the PVR_DBG_ defines instead */
+#define DBGPRIV_FATAL 0x001UL /*!< Debug-Fatal. Privately used by pvr_debug. */
+#define DBGPRIV_ERROR 0x002UL /*!< Debug-Error. Privately used by pvr_debug. */
+#define DBGPRIV_WARNING 0x004UL /*!< Debug-Warning. Privately used by pvr_debug. */
+#define DBGPRIV_MESSAGE 0x008UL /*!< Debug-Message. Privately used by pvr_debug. */
+#define DBGPRIV_VERBOSE 0x010UL /*!< Debug-Verbose. Privately used by pvr_debug. */
+#define DBGPRIV_CALLTRACE 0x020UL /*!< Debug-CallTrace. Privately used by pvr_debug. */
+#define DBGPRIV_ALLOC 0x040UL /*!< Debug-Alloc. Privately used by pvr_debug. */
+#define DBGPRIV_BUFFERED 0x080UL /*!< Debug-Buffered. Privately used by pvr_debug. */
+#define DBGPRIV_DEBUG 0x100UL /*!< Debug-AdHoc-Debug. Never submitted. Privately used by pvr_debug. */
+#define DBGPRIV_DBGDRV_MESSAGE 0x200UL /*!< Debug-DbgDrivMessage. Privately used by pvr_debug. */
+#define DBGPRIV_LAST 0x200UL /*!< Always set to highest mask value. Privately used by pvr_debug. */
+
+#if !defined(PVRSRV_NEED_PVR_ASSERT) && defined(DEBUG)
+#define PVRSRV_NEED_PVR_ASSERT
+#endif
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) && !defined(PVRSRV_NEED_PVR_DPF)
+#define PVRSRV_NEED_PVR_DPF
+#endif
+
+#if !defined(PVRSRV_NEED_PVR_TRACE) && (defined(DEBUG) || defined(TIMING))
+#define PVRSRV_NEED_PVR_TRACE
+#endif
+
+#if !defined(DOXYGEN)
+#if defined(__KERNEL__)
+ IMG_EXPORT const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError);
+# define PVRSRVGETERRORSTRING PVRSRVGetErrorStringKM
+#else
+/*************************************************************************/ /*
+PVRSRVGetErrorString
+Returns a string describing the provided PVRSRV_ERROR code
+NB No doxygen comments provided as this function does not require porting
+ for other operating systems
+*/ /**************************************************************************/
+ const IMG_CHAR *PVRSRVGetErrorString(PVRSRV_ERROR eError);
+# define PVRSRVGETERRORSTRING PVRSRVGetErrorString
+#endif
+#endif
+
+/* PVR_ASSERT() and PVR_DBG_BREAK handling */
+
+#if defined(PVRSRV_NEED_PVR_ASSERT) || defined(DOXYGEN)
+
+/* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+#if defined(__KLOCWORK__)
+ #define PVR_ASSERT(x) do { if (!(x)) abort(); } while (0)
+#else /* ! __KLOCWORKS__ */
+
+#if defined(_WIN32)
+#define PVR_ASSERT(expr) do \
+ { \
+ MSC_SUPPRESS_4127 \
+ if (unlikely(!(expr))) \
+ { \
+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__,\
+ "*** Debug assertion failed!"); \
+ __debugbreak(); \
+ } \
+ MSC_SUPPRESS_4127 \
+ } while (0)
+
+#else
+
+#if defined(LINUX) && defined(__KERNEL__)
+#include <linux/kernel.h>
+#include <linux/bug.h>
+
+/* In Linux kernel mode, use WARN_ON() directly. This produces the
+ correct filename and line number in the warning message. */
+#define PVR_ASSERT(EXPR) do \
+ { \
+ if (unlikely(!(EXPR))) \
+ { \
+ PVRSRVDebugPrintf(DBGPRIV_FATAL, __FILE__, __LINE__, \
+ "Debug assertion failed!"); \
+ WARN_ON(1); \
+ } \
+ } while (0)
+
+#else /* defined(LINUX) && defined(__KERNEL__) */
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugAssertFail
+@Description Indicate to the user that a debug assertion has failed and
+ prevent the program from continuing.
+ Invoked from the macro PVR_ASSERT().
+@Input pszFile The name of the source file where the assertion failed
+@Input ui32Line The line number of the failed assertion
+@Input pszAssertion String describing the assertion
+@Return NEVER!
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV __noreturn
+PVRSRVDebugAssertFail(const IMG_CHAR *pszFile,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszAssertion);
+
+#define PVR_ASSERT(EXPR) do \
+ { \
+ if (unlikely(!(EXPR))) \
+ PVRSRVDebugAssertFail(__FILE__, __LINE__, #EXPR); \
+ } while (0)
+
+#endif /* defined(LINUX) && defined(__KERNEL__) */
+#endif /* defined(_WIN32) */
+#endif /* defined(__KLOCWORK__) */
+
+#if defined(__KLOCWORK__)
+ #define PVR_DBG_BREAK do { abort(); } while (0)
+#else
+ #if defined (WIN32)
+ #define PVR_DBG_BREAK __debugbreak(); /*!< Implementation of PVR_DBG_BREAK for (non-WinCE) Win32 */
+ #else
+ #if defined(PVR_DBG_BREAK_ASSERT_FAIL)
+ /*!< Implementation of PVR_DBG_BREAK that maps onto PVRSRVDebugAssertFail */
+ #if defined(_WIN32)
+ #define PVR_DBG_BREAK DBG_BREAK
+ #else
+ #if defined(LINUX) && defined(__KERNEL__)
+ #define PVR_DBG_BREAK BUG()
+ #else
+ #define PVR_DBG_BREAK PVRSRVDebugAssertFail(__FILE__, __LINE__, "PVR_DBG_BREAK")
+ #endif
+ #endif
+ #else
+ /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+ #define PVR_DBG_BREAK
+ #endif
+ #endif
+#endif
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+ /* Unfortunately the klocworks static analysis checker doesn't understand our
+ * ASSERT macros. Thus it reports lots of false positive. Defining our Assert
+ * macros in a special way when the code is analysed by klocworks avoids
+ * them. */
+ #if defined(__KLOCWORK__)
+ #define PVR_ASSERT(EXPR) do { if (unlikely(!(EXPR))) abort(); } while (0)
+ #else
+ #define PVR_ASSERT(EXPR) (void)(EXPR) /*!< Null Implementation of PVR_ASSERT (does nothing) */
+ #endif
+
+ #define PVR_DBG_BREAK /*!< Null Implementation of PVR_DBG_BREAK (does nothing) */
+
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+
+/* PVR_DPF() handling */
+
+#if defined(PVRSRV_NEED_PVR_DPF) || defined(DOXYGEN)
+
+ /* New logging mechanism */
+ #define PVR_DBG_FATAL DBGPRIV_FATAL /*!< Debug level passed to PVRSRVDebugPrintf() for fatal errors. */
+ #define PVR_DBG_ERROR DBGPRIV_ERROR /*!< Debug level passed to PVRSRVDebugPrintf() for non-fatal errors. */
+ #define PVR_DBG_WARNING DBGPRIV_WARNING /*!< Debug level passed to PVRSRVDebugPrintf() for warnings. */
+ #define PVR_DBG_MESSAGE DBGPRIV_MESSAGE /*!< Debug level passed to PVRSRVDebugPrintf() for information only. */
+ #define PVR_DBG_VERBOSE DBGPRIV_VERBOSE /*!< Debug level passed to PVRSRVDebugPrintf() for very low-priority debug. */
+ #define PVR_DBG_CALLTRACE DBGPRIV_CALLTRACE
+ #define PVR_DBG_ALLOC DBGPRIV_ALLOC
+ #define PVR_DBG_BUFFERED DBGPRIV_BUFFERED /*!< Debug level passed to PVRSRVDebugPrintf() when debug should be written to the debug circular buffer. */
+ #define PVR_DBG_DEBUG DBGPRIV_DEBUG
+ #define PVR_DBGDRIV_MESSAGE DBGPRIV_DBGDRV_MESSAGE
+
+ /* These levels are always on with PVRSRV_NEED_PVR_DPF */
+ #define __PVR_DPF_0x001UL(...) PVRSRVDebugPrintf(DBGPRIV_FATAL, __VA_ARGS__)
+ #define __PVR_DPF_0x002UL(...) PVRSRVDebugPrintf(DBGPRIV_ERROR, __VA_ARGS__)
+ #define __PVR_DPF_0x080UL(...) PVRSRVDebugPrintf(DBGPRIV_BUFFERED, __VA_ARGS__)
+
+ /*
+ The AdHoc-Debug level is only supported when enabled in the local
+ build environment and may need to be used in both debug and release
+ builds. An error is generated in the formal build if it is checked in.
+ */
+#if defined(PVR_DPF_ADHOC_DEBUG_ON)
+ #define __PVR_DPF_0x100UL(...) PVRSRVDebugPrintf(DBGPRIV_DEBUG, __VA_ARGS__)
+#else
+ /* Use an undefined token here to stop compilation dead in the offending module */
+ #define __PVR_DPF_0x100UL(...) __ERROR__PVR_DBG_DEBUG_is_in_use_but_has_not_been_enabled__Note_Debug_DPF_must_not_be_checked_in__Define_PVR_DPF_ADHOC_DEBUG_ON_for_testing
+#endif
+
+ /* Some are compiled out completely in release builds */
+#if defined(DEBUG) || defined(DOXYGEN)
+ #define __PVR_DPF_0x004UL(...) PVRSRVDebugPrintf(DBGPRIV_WARNING, __VA_ARGS__)
+ #define __PVR_DPF_0x008UL(...) PVRSRVDebugPrintf(DBGPRIV_MESSAGE, __VA_ARGS__)
+ #define __PVR_DPF_0x010UL(...) PVRSRVDebugPrintf(DBGPRIV_VERBOSE, __VA_ARGS__)
+ #define __PVR_DPF_0x020UL(...) PVRSRVDebugPrintf(DBGPRIV_CALLTRACE, __VA_ARGS__)
+ #define __PVR_DPF_0x040UL(...) PVRSRVDebugPrintf(DBGPRIV_ALLOC, __VA_ARGS__)
+ #define __PVR_DPF_0x200UL(...) PVRSRVDebugPrintf(DBGPRIV_DBGDRV_MESSAGE, __VA_ARGS__)
+#else
+ #define __PVR_DPF_0x004UL(...)
+ #define __PVR_DPF_0x008UL(...)
+ #define __PVR_DPF_0x010UL(...)
+ #define __PVR_DPF_0x020UL(...)
+ #define __PVR_DPF_0x040UL(...)
+ #define __PVR_DPF_0x200UL(...)
+#endif
+
+ /* Translate the different log levels to separate macros
+ * so they can each be compiled out.
+ */
+#if defined(DEBUG)
+ #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl (__FILE__, __LINE__, __VA_ARGS__)
+#else
+ #define __PVR_DPF(lvl, ...) __PVR_DPF_ ## lvl ("", 0, __VA_ARGS__)
+#endif
+
+ /* Get rid of the double bracketing */
+ #define PVR_DPF(x) __PVR_DPF x
+
+ #define PVR_LOG_ERROR(_rc, _call) \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+ #define PVR_LOG_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_NOMEM(_expr, _call) do \
+ { if (unlikely(_expr == NULL)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+ return (PVRSRV_ERROR_OUT_OF_MEMORY); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do \
+ { if (unlikely(_expr == NULL)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (PVRSRV_ERROR_OUT_OF_MEMORY) in %s()", _call, __func__)); \
+ _err = PVRSRV_ERROR_OUT_OF_MEMORY; \
+ goto _go; } \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ return (_rc); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGRN_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ return; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_ERROR(_rc, _call, _go) do \
+ { if (unlikely(_rc != PVRSRV_OK)) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ goto _go; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOG_IF_FALSE(_expr, _msg) do \
+ { if (unlikely(!(_expr))) \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do \
+ { if (unlikely(!(_expr))) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ return (_rc); }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+ #define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do \
+ { if (unlikely(!(_expr))) { \
+ PVR_DPF((PVR_DBG_ERROR, "%s in %s()", _msg, __func__)); \
+ goto _go; }\
+ MSC_SUPPRESS_4127\
+ } while (0)
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintf
+@Description Output a debug message to the user, using an OS-specific
+ method, to a log or console which can be read by developers
+ Invoked from the macro PVR_DPF().
+@Input ui32DebugLevel The debug level of the message. This can
+ be used to restrict the output of debug
+ messages based on their severity.
+ If this is PVR_DBG_BUFFERED, the message
+ should be written into a debug circular
+ buffer instead of being output immediately
+ (useful when performance would otherwise
+ be adversely affected).
+ The debug circular buffer shall only be
+ output when PVRSRVDebugPrintfDumpCCB() is
+ called.
+@Input pszFileName The source file containing the code that is
+ generating the message
+@Input ui32Line The line number in the source file
+@Input pszFormat The formatted message string
+@Input ... Zero or more arguments for use by the
+ formatted string
+@Return None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintf(IMG_UINT32 ui32DebugLevel,
+ const IMG_CHAR *pszFileName,
+ IMG_UINT32 ui32Line,
+ const IMG_CHAR *pszFormat,
+ ...) __printf(4, 5);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDebugPrintfDumpCCB
+@Description When PVRSRVDebugPrintf() is called with the ui32DebugLevel
+ specified as DBGPRIV_BUFFERED, the debug shall be written to
+ the debug circular buffer instead of being output immediately.
+ (This could be used to obtain debug without incurring a
+ performance hit by printing it at that moment).
+ This function shall dump the contents of that debug circular
+ buffer to be output in an OS-specific method to a log or
+ console which can be read by developers.
+@Return None
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVDebugPrintfDumpCCB(void);
+
+#else /* defined(PVRSRV_NEED_PVR_DPF) */
+
+ #define PVR_DPF(X) /*!< Null Implementation of PowerVR Debug Printf (does nothing) */
+
+ #define PVR_LOG_ERROR(_rc, _call) (void)(_rc)
+ #define PVR_LOG_IF_ERROR(_rc, _call) (void)(_rc)
+
+ #define PVR_LOGR_IF_NOMEM(_expr, _call) do { if (unlikely(_expr == NULL)) { return (PVRSRV_ERROR_OUT_OF_MEMORY); } MSC_SUPPRESS_4127 } while (0)
+ #define PVR_LOGG_IF_NOMEM(_expr, _call, _err, _go) do { if (unlikely(_expr == NULL)) { _err = PVRSRV_ERROR_OUT_OF_MEMORY; goto _go; } MSC_SUPPRESS_4127 } while (0)
+ #define PVR_LOGR_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGRN_IF_ERROR(_rc, _call) do { if (unlikely(_rc != PVRSRV_OK)) { return; } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGG_IF_ERROR(_rc, _call, _go) do { if (unlikely(_rc != PVRSRV_OK)) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+ #define PVR_LOG_IF_FALSE(_expr, _msg) (void)(_expr)
+ #define PVR_LOGR_IF_FALSE(_expr, _msg, _rc) do { if (unlikely(!(_expr))) { return (_rc); } MSC_SUPPRESS_4127 } while(0)
+ #define PVR_LOGG_IF_FALSE(_expr, _msg, _go) do { if (unlikely(!(_expr))) { goto _go; } MSC_SUPPRESS_4127 } while(0)
+
+ #undef PVR_DPF_FUNCTION_TRACE_ON
+
+#endif /* defined(PVRSRV_NEED_PVR_DPF) */
+
+
+#if defined(DEBUG)
+ #define PVR_LOG_WARN(_rc, _call) \
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__));
+
+ #define PVR_LOG_WARN_IF_ERROR(_rc, _call) do \
+ { if (unlikely(_rc != PVRSRV_OK)) \
+ PVR_DPF((PVR_DBG_WARNING, "%s() failed (%s) in %s()", _call, PVRSRVGETERRORSTRING(_rc), __func__)); \
+ MSC_SUPPRESS_4127\
+ } while (0)
+#else
+ #define PVR_LOG_WARN(_rc, _call) (void)(_rc)
+ #define PVR_LOG_WARN_IF_ERROR(_rc, _call) (void)(_rc)
+#endif
+
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+
+ #define PVR_DPF_ENTERED \
+ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered", __func__, __LINE__))
+
+ #define PVR_DPF_ENTERED1(p1) \
+ PVR_DPF((PVR_DBG_CALLTRACE, "|-> %s:%d entered (0x%lx)", __func__, __LINE__, ((unsigned long)p1)))
+
+ #define PVR_DPF_RETURN_RC(a) \
+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d", __func__, __LINE__, (_r))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_RC1(a,p1) \
+ do { int _r = (a); PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned %d (0x%lx)", __func__, __LINE__, (_r), ((unsigned long)p1))); return (_r); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_VAL(a) \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned with value", __func__, __LINE__ )); return (a); MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN_OK \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned ok", __func__, __LINE__)); return PVRSRV_OK; MSC_SUPPRESS_4127 } while (0)
+
+ #define PVR_DPF_RETURN \
+ do { PVR_DPF((PVR_DBG_CALLTRACE, "<-| %s:%d returned", __func__, __LINE__)); return; MSC_SUPPRESS_4127 } while (0)
+
+ #if !defined(DEBUG)
+ #error PVR DPF Function trace enabled in release build, rectify
+ #endif
+
+#else /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+ #define PVR_DPF_ENTERED
+ #define PVR_DPF_ENTERED1(p1)
+ #define PVR_DPF_RETURN_RC(a) return (a)
+ #define PVR_DPF_RETURN_RC1(a,p1) return (a)
+ #define PVR_DPF_RETURN_VAL(a) return (a)
+ #define PVR_DPF_RETURN_OK return PVRSRV_OK
+ #define PVR_DPF_RETURN return
+
+#endif /* defined(PVR_DPF_FUNCTION_TRACE_ON) */
+
+#if defined(__KERNEL__) || defined(DOXYGEN) || defined(__QNXNTO__)
+/*Use PVR_DPF() unless message is necessary in release build */
+#ifdef PVR_DISABLE_LOGGING
+#define PVR_LOG(X)
+#else
+#define PVR_LOG(X) PVRSRVReleasePrintf X;
+#endif
+
+/*************************************************************************/ /*!
+@Function PVRSRVReleasePrintf
+@Description Output an important message, using an OS-specific method,
+ to a log or console which can be read by developers in
+ release builds.
+ Invoked from the macro PVR_LOG().
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+@Return None
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVReleasePrintf(const IMG_CHAR *pszFormat, ...) __printf(1, 2);
+#endif
+
+/* PVR_TRACE() handling */
+
+#if defined(PVRSRV_NEED_PVR_TRACE) || defined(DOXYGEN)
+
+ #define PVR_TRACE(X) PVRSRVTrace X /*!< PowerVR Debug Trace Macro */
+ /* Empty string implementation that is -O0 build friendly */
+ #define PVR_TRACE_EMPTY_LINE() PVR_TRACE(("%s", ""))
+
+/*************************************************************************/ /*!
+@Function PVRTrace
+@Description Output a debug message to the user
+ Invoked from the macro PVR_TRACE().
+@Input pszFormat The message format string
+@Input ... Zero or more arguments for use by the format string
+*/ /**************************************************************************/
+IMG_EXPORT void IMG_CALLCONV PVRSRVTrace(const IMG_CHAR* pszFormat, ... )
+ __printf(1, 2);
+
+#else /* defined(PVRSRV_NEED_PVR_TRACE) */
+ /*! Null Implementation of PowerVR Debug Trace Macro (does nothing) */
+ #define PVR_TRACE(X)
+
+#endif /* defined(PVRSRV_NEED_PVR_TRACE) */
+
+
+#if defined(PVRSRV_NEED_PVR_ASSERT)
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_32BITS)
+#endif
+ INLINE static IMG_UINT32 TRUNCATE_64BITS_TO_32BITS(IMG_UINT64 uiInput)
+ {
+ IMG_UINT32 uiTruncated;
+
+ uiTruncated = (IMG_UINT32)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_64BITS_TO_SIZE_T)
+#endif
+ INLINE static size_t TRUNCATE_64BITS_TO_SIZE_T(IMG_UINT64 uiInput)
+ {
+ size_t uiTruncated;
+
+ uiTruncated = (size_t)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(TRUNCATE_SIZE_T_TO_32BITS)
+#endif
+ INLINE static IMG_UINT32 TRUNCATE_SIZE_T_TO_32BITS(size_t uiInput)
+ {
+ IMG_UINT32 uiTruncated;
+
+ uiTruncated = (IMG_UINT32)uiInput;
+ PVR_ASSERT(uiInput == uiTruncated);
+ return uiTruncated;
+ }
+
+
+#else /* defined(PVRSRV_NEED_PVR_ASSERT) */
+ #define TRUNCATE_64BITS_TO_32BITS(expr) ((IMG_UINT32)(expr))
+ #define TRUNCATE_64BITS_TO_SIZE_T(expr) ((size_t)(expr))
+ #define TRUNCATE_SIZE_T_TO_32BITS(expr) ((IMG_UINT32)(expr))
+#endif /* defined(PVRSRV_NEED_PVR_ASSERT) */
+
+/* Macros used to trace calls */
+#if defined(DEBUG)
+ #define PVR_DBG_FILELINE , (__FILE__), (__LINE__)
+ #define PVR_DBG_FILELINE_PARAM , const IMG_CHAR *pszaFile, IMG_UINT32 ui32Line
+ #define PVR_DBG_FILELINE_ARG , pszaFile, ui32Line
+ #define PVR_DBG_FILELINE_FMT " %s:%u"
+ #define PVR_DBG_FILELINE_UNREF() do { PVR_UNREFERENCED_PARAMETER(pszaFile); \
+ PVR_UNREFERENCED_PARAMETER(ui32Line); } while(0)
+#else
+ #define PVR_DBG_FILELINE
+ #define PVR_DBG_FILELINE_PARAM
+ #define PVR_DBG_FILELINE_ARG
+ #define PVR_DBG_FILELINE_FMT
+ #define PVR_DBG_FILELINE_UNREF()
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_DEBUG_H__ */
+
+/******************************************************************************
+ End of file (pvr_debug.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.c b/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.c
new file mode 100644
index 00000000000000..425f00151c2e8a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.c
@@ -0,0 +1,1170 @@
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating debugfs directories and entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_debug.h"
+#include "pvr_debugfs.h"
+#include "allocmem.h"
+
+#ifdef MODULE
+#define PVR_DEBUGFS_DIR_NAME (PVR_DRM_NAME "_1_10")
+#else
+#define PVR_DEBUGFS_DIR_NAME PVR_DRM_NAME
+#endif
+
+/* Define to set the PVR_DPF debug output level for pvr_debugfs.
+ * Normally, leave this set to PVR_DBGDRIV_MESSAGE, but when debugging
+ * you can temporarily change this to PVR_DBG_ERROR.
+ */
+#if defined(PVRSRV_NEED_PVR_DPF)
+#define PVR_DEBUGFS_PVR_DPF_LEVEL PVR_DBGDRIV_MESSAGE
+#else
+#define PVR_DEBUGFS_PVR_DPF_LEVEL 0
+#endif
+
+static struct dentry *gpsPVRDebugFSEntryDir;
+
+/* Lock used when adjusting refCounts and deleting entries */
+static struct mutex gDebugFSLock;
+
+/*************************************************************************/ /*!
+ Statistic entry read functions
+*/ /**************************************************************************/
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_
+{
+ OS_STATS_PRINT_FUNC *pfStatsPrint;
+ PPVR_DEBUGFS_ENTRY_DATA pvDebugFsEntry;
+} PVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_
+{
+ void *pvData;
+ OS_STATS_PRINT_FUNC *pfnStatsPrint;
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount;
+ PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount;
+ IMG_UINT32 ui32RefCount;
+ PPVR_DEBUGFS_ENTRY_DATA pvDebugFSEntry;
+} PVR_DEBUGFS_DRIVER_STAT;
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_
+{
+ struct dentry *psDir;
+ PPVR_DEBUGFS_DIR_DATA psParentDir;
+ IMG_UINT32 ui32RefCount;
+} PVR_DEBUGFS_DIR_DATA;
+
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_
+{
+ struct dentry *psEntry;
+ PVR_DEBUGFS_DIR_DATA *psParentDir;
+ IMG_UINT32 ui32RefCount;
+ PVR_DEBUGFS_DRIVER_STAT *psStatData;
+} PVR_DEBUGFS_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_BLOB_ENTRY_DATA_
+{
+ struct dentry *psEntry;
+ PVR_DEBUGFS_DIR_DATA *psParentDir;
+ struct debugfs_blob_wrapper blob;
+} PVR_DEBUGFS_BLOB_ENTRY_DATA;
+
+typedef struct _PVR_DEBUGFS_PRIV_DATA_
+{
+ const struct seq_operations *psReadOps;
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite;
+ void *pvData;
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfIncPvDataRefCnt;
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfDecPvDataRefCnt;
+ IMG_BOOL bValid;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+} PVR_DEBUGFS_PRIV_DATA;
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry);
+static void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry);
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry);
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry);
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry);
+
+static void _StatsSeqPrintf(void *pvFile, const IMG_CHAR *pszFormat, ...)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list ArgList;
+
+ va_start(ArgList, pszFormat);
+ vsnprintf(szBuffer, PVR_MAX_DEBUG_MESSAGE_LEN, pszFormat, ArgList);
+ seq_printf((struct seq_file *)pvFile, "%s", szBuffer);
+ va_end(ArgList);
+}
+
+static void *_DebugFSStatisticSeqStart(struct seq_file *psSeqFile, loff_t *puiPosition)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+
+ if (psStatData)
+ {
+ /* take reference on psStatData (for duration of stat iteration) */
+ if (!_RefStatEntry(psStatData))
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for '%s' but failed"
+ " to take ref on stat entry, returning -EIO(%d)", __func__,
+ psStatData->pvDebugFSEntry->psEntry->d_iname, -EIO));
+ return NULL;
+ }
+
+ if (*puiPosition == 0)
+ {
+ return psStatData;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+
+ return NULL;
+}
+
+static void _DebugFSStatisticSeqStop(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (psStatData)
+ {
+ /* drop ref taken on stat memory, and if it is now zero, be sure we don't try to read it again */
+ if (psStatData->ui32RefCount > 0)
+ {
+ /* drop reference on psStatData (held for duration of stat iteration) */
+ _UnrefAndMaybeDestroyStatEntry((void*)psStatData);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVR_DEBUGFS_DRIVER_STAT has zero refcount",
+ __func__));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+}
+
+static void *_DebugFSStatisticSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (psStatData)
+ {
+ if (psStatData->pvData)
+ {
+ if (puiPosition)
+ {
+ (*puiPosition)++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called with puiPosition NULL", __FUNCTION__));
+ }
+ }
+ else
+ {
+ /* psStatData->pvData is NULL */
+ /* NB This is valid if the stat has no structure associated with it (eg. driver_stats, which prints totals stored in a number of global vars) */
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL", __FUNCTION__));
+ }
+
+ return NULL;
+}
+
+static int _DebugFSStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData = (PVR_DEBUGFS_DRIVER_STAT *)pvData;
+
+ if (psStatData != NULL)
+ {
+ psStatData->pfnStatsPrint((void*)psSeqFile, psStatData->pvData, _StatsSeqPrintf);
+ return 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+ }
+
+ return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSStatisticReadOps =
+{
+ .start = _DebugFSStatisticSeqStart,
+ .stop = _DebugFSStatisticSeqStop,
+ .next = _DebugFSStatisticSeqNext,
+ .show = _DebugFSStatisticSeqShow,
+};
+
+
+/*************************************************************************/ /*!
+ Common internal API
+*/ /**************************************************************************/
+
+static int _DebugFSFileOpen(struct inode *psINode, struct file *psFile)
+{
+ PVR_DEBUGFS_PRIV_DATA *psPrivData;
+ int iResult = -EIO;
+ IMG_BOOL bRefRet;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+
+ mutex_lock(&gDebugFSLock);
+
+ PVR_ASSERT(psINode);
+ psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+ if (psPrivData)
+ {
+ /* Check that psPrivData is still valid to use */
+ if (psPrivData->bValid)
+ {
+ psDebugFSEntry = psPrivData->psDebugFSEntry;
+
+ /* Take ref on stat entry before opening seq file - this ref will be dropped if we
+ * fail to open the seq file or when we close it
+ */
+ if (psDebugFSEntry)
+ {
+ bRefRet = _RefDebugFSEntryNoLock(psDebugFSEntry);
+ mutex_unlock(&gDebugFSLock);
+ if (psPrivData->pfIncPvDataRefCnt)
+ {
+ psPrivData->pfIncPvDataRefCnt(psPrivData->pvData);
+ }
+ if (bRefRet)
+ {
+ iResult = seq_open(psFile, psPrivData->psReadOps);
+ if (iResult == 0)
+ {
+ struct seq_file *psSeqFile = psFile->private_data;
+
+ psSeqFile->private = psPrivData->pvData;
+ }
+ else
+ {
+ if (psPrivData->pfDecPvDataRefCnt)
+ {
+ psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+ }
+ /* Drop ref if we failed to open seq file */
+ _UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to seq_open psFile, returning %d", __FUNCTION__, iResult));
+ }
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+
+ return iResult;
+}
+
+static int _DebugFSFileClose(struct inode *psINode, struct file *psFile)
+{
+ int iResult;
+ PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry = NULL;
+
+ if (psPrivData)
+ {
+ psDebugFSEntry = psPrivData->psDebugFSEntry;
+ }
+ iResult = seq_release(psINode, psFile);
+ if (psDebugFSEntry)
+ {
+ _UnrefAndMaybeDestroyDebugFSEntry(&psPrivData->psDebugFSEntry);
+ }
+ if (psPrivData && psPrivData->pfDecPvDataRefCnt)
+ {
+ psPrivData->pfDecPvDataRefCnt(psPrivData->pvData);
+ }
+ return iResult;
+}
+
+static ssize_t _DebugFSFileWrite(struct file *psFile,
+ const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t *puiPosition)
+{
+ struct inode *psINode = psFile->f_path.dentry->d_inode;
+ PVR_DEBUGFS_PRIV_DATA *psPrivData = (PVR_DEBUGFS_PRIV_DATA *)psINode->i_private;
+
+ if (psPrivData->pfnWrite == NULL)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called for file '%s', which does not have pfnWrite defined, returning -EIO(%d)", __FUNCTION__, psFile->f_path.dentry->d_iname, -EIO));
+ return -EIO;
+ }
+
+ return psPrivData->pfnWrite(pszBuffer, uiCount, puiPosition, psPrivData->pvData);
+}
+
+static const struct file_operations gsPVRDebugFSFileOps =
+{
+ .owner = THIS_MODULE,
+ .open = _DebugFSFileOpen,
+ .read = seq_read,
+ .write = _DebugFSFileWrite,
+ .llseek = seq_lseek,
+ .release = _DebugFSFileClose,
+};
+
+
+/*************************************************************************/ /*!
+ Public API
+*/ /**************************************************************************/
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSInit
+@Description Initialise PVR debugfs support. This should be called before
+ using any PVRDebugFS functions.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSInit(void)
+{
+ PVR_ASSERT(gpsPVRDebugFSEntryDir == NULL);
+
+ mutex_init(&gDebugFSLock);
+
+ gpsPVRDebugFSEntryDir = debugfs_create_dir(PVR_DEBUGFS_DIR_NAME, NULL);
+ if (gpsPVRDebugFSEntryDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create '%s' debugfs root directory",
+ __FUNCTION__, PVR_DEBUGFS_DIR_NAME));
+
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSDeInit
+@Description Deinitialise PVR debugfs support. This should be called only
+ if PVRDebugFSInit() has already been called. All debugfs
+ directories and entries should be removed otherwise this
+ function will fail.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSDeInit(void)
+{
+ if (gpsPVRDebugFSEntryDir != NULL)
+ {
+ debugfs_remove(gpsPVRDebugFSEntryDir);
+ gpsPVRDebugFSEntryDir = NULL;
+ mutex_destroy(&gDebugFSLock);
+ }
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateEntryDir
+@Description Create a directory for debugfs entries that will be located
+ under the root directory, as created by
+ PVRDebugFSCreateEntries().
+@Input pszName String containing the name for the directory.
+@Input psParentDir The parent directory in which to create the new
+ directory. This should either be NULL, meaning it
+ should be created in the root directory, or a
+ pointer to a directory as returned by this
+ function.
+@Output ppsNewDir On success, points to the newly created
+ directory.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ PVR_DEBUGFS_DIR_DATA **ppsNewDir)
+{
+ PVR_DEBUGFS_DIR_DATA *psNewDir;
+
+ PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+ if (pszName == NULL || ppsNewDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid param", __FUNCTION__));
+ return -EINVAL;
+ }
+
+ psNewDir = OSAllocMemNoStats(sizeof(*psNewDir));
+
+ if (psNewDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot allocate memory for '%s' pvr_debugfs structure",
+ __FUNCTION__, pszName));
+ return -ENOMEM;
+ }
+
+ psNewDir->psParentDir = psParentDir;
+ psNewDir->psDir = debugfs_create_dir(pszName, (psNewDir->psParentDir) ? psNewDir->psParentDir->psDir : gpsPVRDebugFSEntryDir);
+
+ if (psNewDir->psDir == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create '%s' debugfs directory",
+ __FUNCTION__, pszName));
+
+ OSFreeMemNoStats(psNewDir);
+ return -ENOMEM;
+ }
+
+ *ppsNewDir = psNewDir;
+ psNewDir->ui32RefCount = 1;
+
+ /* if parent directory is not gpsPVRDebugFSEntryDir, increment its refCount */
+ if (psNewDir->psParentDir)
+ {
+ /* if we fail to acquire the reference that probably means that
+ * parent dir was already freed - we have to cleanup in this situation */
+ if (!_RefDirEntry(psNewDir->psParentDir))
+ {
+ _UnrefAndMaybeDestroyDirEntry(ppsNewDir);
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveEntryDir
+@Description Remove a directory that was created by
+ PVRDebugFSCreateEntryDir(). Any directories or files created
+ under the directory being removed should be removed first.
+@Input ppsDir Pointer representing the directory to be removed.
+ Has to be double pointer to avoid possible races
+ and use-after-free situations.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntryDir(PVR_DEBUGFS_DIR_DATA **ppsDir)
+{
+ _UnrefAndMaybeDestroyDirEntry(ppsDir);
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateEntry
+@Description Create an entry in the specified directory.
+@Input pszName String containing the name for the entry.
+@Input psParentDir Pointer from PVRDebugFSCreateEntryDir()
+ representing the directory in which to create
+ the entry or NULL for the root directory.
+@Input psReadOps Pointer to structure containing the necessary
+ functions to read from the entry.
+@Input pfnWrite Callback function used to write to the entry.
+ This function must update the offset pointer
+ before it returns.
+@Input pvData Private data to be passed to the read
+ functions, in the seq_file private member, and
+ the write function callback.
+@Output ppsNewEntry On success, points to the newly created entry.
+@Return int On success, returns 0. Otherwise, returns an
+ error code.
+*/ /**************************************************************************/
+int PVRDebugFSCreateEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ const struct seq_operations *psReadOps,
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+ void *pvData,
+ PVR_DEBUGFS_ENTRY_DATA **ppsNewEntry)
+{
+ PVR_DEBUGFS_PRIV_DATA *psPrivData;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+ struct dentry *psEntry;
+ umode_t uiMode;
+
+ PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+ PVR_ASSERT(!((pfnIncPvDataRefCnt != NULL && pfnDecPvDataRefCnt == NULL) ||
+ (pfnIncPvDataRefCnt == NULL && pfnDecPvDataRefCnt != NULL)));
+
+ psPrivData = OSAllocMemNoStats(sizeof(*psPrivData));
+ if (psPrivData == NULL)
+ {
+ return -ENOMEM;
+ }
+ psDebugFSEntry = OSAllocMemNoStats(sizeof(*psDebugFSEntry));
+ if (psDebugFSEntry == NULL)
+ {
+ OSFreeMemNoStats(psPrivData);
+ return -ENOMEM;
+ }
+
+ psPrivData->psReadOps = psReadOps;
+ psPrivData->pfnWrite = pfnWrite;
+ psPrivData->pvData = (void*)pvData;
+ psPrivData->pfIncPvDataRefCnt = pfnIncPvDataRefCnt;
+ psPrivData->pfDecPvDataRefCnt = pfnDecPvDataRefCnt;
+ psPrivData->bValid = IMG_TRUE;
+ /* Store ptr to debugFSEntry in psPrivData, so a ref can be taken on it
+ * when the client opens a file */
+ psPrivData->psDebugFSEntry = psDebugFSEntry;
+
+ uiMode = S_IFREG;
+
+ if (psReadOps != NULL)
+ {
+ uiMode |= S_IRUGO;
+ }
+
+ if (pfnWrite != NULL)
+ {
+ uiMode |= S_IWUSR;
+ }
+
+ psDebugFSEntry->psParentDir = psParentDir;
+ psDebugFSEntry->ui32RefCount = 1;
+ psDebugFSEntry->psStatData = (PVR_DEBUGFS_DRIVER_STAT*)pvData;
+
+ if (psDebugFSEntry->psParentDir)
+ {
+ /* increment refCount of parent directory */
+ if (!_RefDirEntry(psDebugFSEntry->psParentDir))
+ {
+ kfree(psDebugFSEntry);
+ kfree(psPrivData);
+ return -EFAULT;
+ }
+ }
+
+ psEntry = debugfs_create_file(pszName,
+ uiMode,
+ (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+ psPrivData,
+ &gsPVRDebugFSFileOps);
+ if (IS_ERR(psEntry))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create debugfs '%s' file",
+ __FUNCTION__, pszName));
+
+ return PTR_ERR(psEntry);
+ }
+
+ psDebugFSEntry->psEntry = psEntry;
+ *ppsNewEntry = (void*)psDebugFSEntry;
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveEntry
+@Description Removes an entry that was created by PVRDebugFSCreateEntry().
+@Input ppsDebugFSEntry Pointer representing the entry to be removed.
+ Has to be double pointer to avoid possible races
+ and use-after-free situations.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+ _UnrefAndMaybeDestroyDebugFSEntry(ppsDebugFSEntry);
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSCreateStatisticEntry
+@Description Create a statistic entry in the specified directory.
+@Input pszName String containing the name for the entry.
+@Input psDir Pointer from PVRDebugFSCreateEntryDir()
+ representing the directory in which to create
+ the entry or NULL for the root directory.
+@Input pfnStatsPrint A callback function used to print all the
+ statistics when reading from the statistic
+ entry.
+@Input pfnIncStatMemRefCount A callback function used take a
+ reference on the memory backing the
+ statistic.
+@Input pfnDecStatMemRefCount A callback function used drop a
+ reference on the memory backing the
+ statistic.
+@Input pvData Private data to be passed to the provided
+ callback function.
+
+@Return PVR_DEBUGFS_DRIVER_STAT* On success, a pointer representing
+ the newly created statistic entry.
+ Otherwise, NULL.
+*/ /**************************************************************************/
+PVR_DEBUGFS_DRIVER_STAT *PVRDebugFSCreateStatisticEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psDir,
+ OS_STATS_PRINT_FUNC *pfnStatsPrint,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+ void *pvData)
+{
+ PVR_DEBUGFS_DRIVER_STAT *psStatData;
+ PVR_DEBUGFS_ENTRY_DATA * psDebugFSEntry;
+
+ int iResult;
+
+ if (pszName == NULL || pfnStatsPrint == NULL)
+ {
+ return NULL;
+ }
+ if ((pfnIncStatMemRefCount != NULL || pfnDecStatMemRefCount != NULL) && pvData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+ if (psStatData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData->pvData = pvData;
+ psStatData->pfnStatsPrint = pfnStatsPrint;
+ psStatData->pfnIncStatMemRefCount = pfnIncStatMemRefCount;
+ psStatData->pfnDecStatMemRefCount = pfnDecStatMemRefCount;
+ psStatData->ui32RefCount = 1;
+
+ iResult = PVRDebugFSCreateEntry(pszName,
+ psDir,
+ &gsDebugFSStatisticReadOps,
+ NULL,
+ (PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *) _RefStatEntry,
+ (PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *) _UnrefAndMaybeDestroyStatEntry,
+ psStatData,
+ &psDebugFSEntry);
+ if (iResult != 0)
+ {
+ OSFreeMemNoStats(psStatData);
+ return NULL;
+ }
+ psStatData->pvDebugFSEntry = (void*)psDebugFSEntry;
+
+ if (pfnIncStatMemRefCount)
+ {
+ /* call function to take reference on the memory holding the stat */
+ psStatData->pfnIncStatMemRefCount((void*)psStatData->pvData);
+ }
+
+ psDebugFSEntry->ui32RefCount = 1;
+
+ return psStatData;
+}
+
+/*************************************************************************/ /*!
+@Function PVRDebugFSRemoveStatisticEntry
+@Description Removes a statistic entry that was created by
+ PVRDebugFSCreateStatisticEntry().
+@Input psStatEntry Pointer representing the statistic entry to be
+ removed.
+@Return void
+*/ /**************************************************************************/
+void PVRDebugFSRemoveStatisticEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ PVR_ASSERT(psStatEntry != NULL);
+ /* drop reference on pvStatEntry*/
+ _UnrefAndMaybeDestroyStatEntry(psStatEntry);
+}
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+static void *_DebugFSRawStatisticSeqStart(struct seq_file *psSeqFile,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+ if (psStatData)
+ {
+ if (*puiPosition == 0)
+ {
+ return psStatData;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+
+ return NULL;
+}
+
+static void _DebugFSRawStatisticSeqStop(struct seq_file *psSeqFile,
+ void *pvData)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+
+ if (!psStatData)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+}
+
+static void *_DebugFSRawStatisticSeqNext(struct seq_file *psSeqFile,
+ void *pvData,
+ loff_t *puiPosition)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) psSeqFile->private;
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ if (!psStatData)
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL", __func__));
+ }
+
+ return NULL;
+}
+
+static int _DebugFSRawStatisticSeqShow(struct seq_file *psSeqFile, void *pvData)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData =
+ (PVR_DEBUGFS_RAW_DRIVER_STAT *) pvData;
+
+ if (psStatData != NULL)
+ {
+ psStatData->pfStatsPrint((void *) psSeqFile, NULL,
+ _StatsSeqPrintf);
+ return 0;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called when psStatData is"
+ " NULL, returning -ENODATA(%d)", __FUNCTION__, -ENODATA));
+ }
+
+ return -ENODATA;
+}
+
+static struct seq_operations gsDebugFSRawStatisticReadOps =
+{
+ .start = _DebugFSRawStatisticSeqStart,
+ .stop = _DebugFSRawStatisticSeqStop,
+ .next = _DebugFSRawStatisticSeqNext,
+ .show = _DebugFSRawStatisticSeqShow,
+};
+
+PVR_DEBUGFS_RAW_DRIVER_STAT *PVRDebugFSCreateRawStatisticEntry(
+ const IMG_CHAR *pszFileName,
+ void *pvParentDir,
+ OS_STATS_PRINT_FUNC *pfStatsPrint)
+{
+ PVR_DEBUGFS_RAW_DRIVER_STAT *psStatData;
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFsEntry;
+
+ int iResult;
+
+ if (pszFileName == NULL || pfStatsPrint == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData = OSAllocZMemNoStats(sizeof(*psStatData));
+ if (psStatData == NULL)
+ {
+ return NULL;
+ }
+
+ psStatData->pfStatsPrint = pfStatsPrint;
+
+ PVR_ASSERT((pvParentDir == NULL));
+
+ iResult = PVRDebugFSCreateEntry(pszFileName,
+ pvParentDir,
+ &gsDebugFSRawStatisticReadOps,
+ NULL,
+ NULL,
+ NULL,
+ psStatData,
+ &psDebugFsEntry);
+ if (iResult != 0)
+ {
+ OSFreeMemNoStats(psStatData);
+ return NULL;
+ }
+ psStatData->pvDebugFsEntry = (void *) psDebugFsEntry;
+
+ psDebugFsEntry->ui32RefCount = 1;
+
+ return psStatData;
+}
+
+void PVRDebugFSRemoveRawStatisticEntry(PVR_DEBUGFS_RAW_DRIVER_STAT *psStatEntry)
+{
+ PVR_ASSERT(psStatEntry != NULL);
+
+ PVRDebugFSRemoveEntry(&psStatEntry->pvDebugFsEntry);
+ OSFreeMemNoStats(psStatEntry);
+}
+#endif
+
+static IMG_BOOL _RefDirEntry(PVR_DEBUGFS_DIR_DATA *psDirEntry)
+{
+ IMG_BOOL bStatus = IMG_FALSE;
+
+ PVR_ASSERT(psDirEntry != NULL && psDirEntry->psDir != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ if (psDirEntry->ui32RefCount > 0)
+ {
+ /* Increment refCount */
+ psDirEntry->ui32RefCount++;
+ bStatus = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psDirEntry '%s'"
+ " when ui32RefCount is zero", __FUNCTION__,
+ psDirEntry->psDir->d_iname));
+ }
+
+ mutex_unlock(&gDebugFSLock);
+
+ return bStatus;
+}
+
+/* decrements refCount on a directory and removes it if the count reaches
+ * 0, this function also walks recursively over parent directories and
+ * decrements refCount on them too
+ * note: it's safe to call this function with *ppsDirEntry pointing to NULL */
+static void _UnrefAndMaybeDestroyDirEntry(PVR_DEBUGFS_DIR_DATA **ppsDirEntry)
+{
+ PVR_DEBUGFS_DIR_DATA *psDirEntry, *psParentDir = NULL;
+ struct dentry *psDir = NULL;
+
+ PVR_ASSERT(ppsDirEntry != NULL);
+
+ psDirEntry = *ppsDirEntry;
+
+ /* it's ok to call this function with NULL pointer */
+ if (psDirEntry == NULL)
+ {
+ return;
+ }
+
+ mutex_lock(&gDebugFSLock);
+
+ PVR_ASSERT(psDirEntry->psDir != NULL);
+
+ if (psDirEntry->ui32RefCount > 0)
+ {
+ /* Decrement refCount and free if now zero */
+ if (--psDirEntry->ui32RefCount == 0)
+ {
+ psDir = psDirEntry->psDir;
+ psParentDir = psDirEntry->psParentDir;
+
+ psDirEntry->psDir = NULL;
+ psDirEntry->psParentDir = NULL;
+
+ *ppsDirEntry = NULL;
+
+ OSFreeMemNoStats(psDirEntry);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDirEntry '%s'"
+ " when ui32RefCount is zero", __FUNCTION__,
+ psDirEntry->psDir->d_iname));
+ }
+
+ /* unlock here so we don't have any relation with the locks that might
+ * be taken in debugfs_remove() */
+ mutex_unlock(&gDebugFSLock);
+
+ debugfs_remove(psDir);
+
+ /* decrement refcount of parent directory */
+ _UnrefAndMaybeDestroyDirEntry(&psParentDir);
+}
+
+static IMG_BOOL _RefDebugFSEntryNoLock(PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry)
+{
+ IMG_BOOL bResult;
+
+ PVR_ASSERT(psDebugFSEntry != NULL);
+
+ bResult = (psDebugFSEntry->ui32RefCount > 0);
+ if (bResult)
+ {
+ /* Increment refCount of psDebugFSEntry */
+ psDebugFSEntry->ui32RefCount++;
+ }
+
+ return bResult;
+}
+
+static void _UnrefAndMaybeDestroyDebugFSEntry(PVR_DEBUGFS_ENTRY_DATA **ppsDebugFSEntry)
+{
+ PVR_DEBUGFS_ENTRY_DATA *psDebugFSEntry;
+ PVR_DEBUGFS_DIR_DATA *psParentDir = NULL;
+ struct dentry *psEntry = NULL;
+
+ mutex_lock(&gDebugFSLock);
+
+ /* Decrement refCount of psDebugFSEntry, and free if now zero */
+ psDebugFSEntry = *ppsDebugFSEntry;
+ PVR_ASSERT(psDebugFSEntry != NULL);
+
+ if (psDebugFSEntry->ui32RefCount > 0)
+ {
+ if (--psDebugFSEntry->ui32RefCount == 0)
+ {
+ psEntry = psDebugFSEntry->psEntry;
+ psParentDir = psDebugFSEntry->psParentDir;
+
+ if (psEntry)
+ {
+ PVR_DEBUGFS_PRIV_DATA *psPrivData =
+ (PVR_DEBUGFS_PRIV_DATA*) psEntry->d_inode->i_private;
+
+ /* set to NULL so nothing can reference this pointer, we have
+ * a copy that will be used to free the memory */
+ *ppsDebugFSEntry = NULL;
+
+ /* Free any private data that was provided to debugfs_create_file() */
+ if (psPrivData != NULL)
+ {
+ psPrivData->bValid = IMG_FALSE;
+ psPrivData->psDebugFSEntry = NULL;
+ psEntry->d_inode->i_private = NULL;
+ OSFreeMemNoStats(psPrivData);
+ }
+ }
+
+ /* now free the memory allocated for psDebugFSEntry */
+ OSFreeMemNoStats(psDebugFSEntry);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psDebugFSEntry"
+ " '%s' when ui32RefCount is zero", __func__,
+ psDebugFSEntry->psEntry->d_iname));
+ }
+
+ /* unlock here so we don't have any relation with the locks that might
+ * be taken in debugfs_remove() */
+ mutex_unlock(&gDebugFSLock);
+
+ /* we should be able to do it outside of the lock now since
+ * even if something opens the file the private data is already
+ * NULL*/
+ debugfs_remove(psEntry);
+
+ /* decrement refcount of parent directory */
+ _UnrefAndMaybeDestroyDirEntry(&psParentDir);
+}
+
+static IMG_BOOL _RefStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ IMG_BOOL bResult;
+
+ PVR_ASSERT(psStatEntry != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ bResult = (psStatEntry->ui32RefCount > 0);
+ if (bResult)
+ {
+ /* Increment refCount of psStatEntry */
+ psStatEntry->ui32RefCount++;
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to ref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+ }
+
+ mutex_unlock(&gDebugFSLock);
+
+ return bResult;
+}
+
+static IMG_BOOL _UnrefAndMaybeDestroyStatEntry(PVR_DEBUGFS_DRIVER_STAT *psStatEntry)
+{
+ IMG_BOOL bResult;
+
+ PVR_ASSERT(psStatEntry != NULL);
+
+ mutex_lock(&gDebugFSLock);
+
+ bResult = (psStatEntry->ui32RefCount > 0);
+
+ if (bResult)
+ {
+ /* Decrement refCount of psStatData, and free if now zero */
+ if (--psStatEntry->ui32RefCount == 0)
+ {
+ mutex_unlock(&gDebugFSLock);
+
+ if (psStatEntry->pvDebugFSEntry)
+ {
+ _UnrefAndMaybeDestroyDebugFSEntry((PVR_DEBUGFS_ENTRY_DATA**)&psStatEntry->pvDebugFSEntry);
+ }
+ if (psStatEntry->pfnDecStatMemRefCount)
+ {
+ /* call function to drop reference on the memory holding the stat */
+ psStatEntry->pfnDecStatMemRefCount((void*)psStatEntry->pvData);
+ }
+ OSFreeMemNoStats(psStatEntry);
+ }
+ else
+ {
+ mutex_unlock(&gDebugFSLock);
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DEBUGFS_PVR_DPF_LEVEL, "%s: Called to unref psStatEntry '%s' when ui32RefCount is zero", __FUNCTION__, psStatEntry->pvDebugFSEntry->psEntry->d_iname));
+ mutex_unlock(&gDebugFSLock);
+ }
+
+ return bResult;
+}
+
+int PVRDebugFSCreateBlobEntry(const char *pszName,
+ PVR_DEBUGFS_DIR_DATA *psParentDir,
+ void *pvData,
+ unsigned long size,
+ PVR_DEBUGFS_BLOB_ENTRY_DATA **ppsNewEntry)
+{
+ PVR_DEBUGFS_BLOB_ENTRY_DATA *psDebugFSEntry;
+ struct dentry *psEntry;
+ umode_t uiMode;
+
+ PVR_ASSERT(gpsPVRDebugFSEntryDir != NULL);
+
+ psDebugFSEntry = OSAllocMemNoStats(sizeof(*psDebugFSEntry));
+ if (psDebugFSEntry == NULL)
+ {
+ return -ENOMEM;
+ }
+
+ uiMode = S_IFREG | S_IRUGO;
+
+ psDebugFSEntry->psParentDir = psParentDir;
+ psDebugFSEntry->blob.data = pvData;
+ psDebugFSEntry->blob.size = size;
+
+ if (psDebugFSEntry->psParentDir)
+ {
+ /* increment refCount of parent directory */
+ if (!_RefDirEntry(psDebugFSEntry->psParentDir))
+ {
+ OSFreeMemNoStats(psDebugFSEntry);
+ return -EFAULT;
+ }
+ }
+
+ psEntry = debugfs_create_blob(pszName,
+ uiMode,
+ (psParentDir != NULL) ? psParentDir->psDir : gpsPVRDebugFSEntryDir,
+ &psDebugFSEntry->blob);
+ if (IS_ERR(psEntry))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Cannot create debugfs '%s' blob file",
+ __FUNCTION__, pszName));
+
+ OSFreeMemNoStats(psDebugFSEntry);
+ return PTR_ERR(psEntry);
+ }
+
+ psDebugFSEntry->psEntry = psEntry;
+ *ppsNewEntry = (void*)psDebugFSEntry;
+
+ return 0;
+}
+
+void PVRDebugFSRemoveBlobEntry(PVR_DEBUGFS_BLOB_ENTRY_DATA **ppsDebugFSEntry)
+{
+ PVR_DEBUGFS_BLOB_ENTRY_DATA *psDebugFSEntry;
+ PVR_DEBUGFS_DIR_DATA *psParentDir = NULL;
+
+ mutex_lock(&gDebugFSLock);
+
+ PVR_ASSERT(ppsDebugFSEntry != NULL);
+ PVR_ASSERT(*ppsDebugFSEntry != NULL);
+
+ psDebugFSEntry = *ppsDebugFSEntry;
+ psParentDir = psDebugFSEntry->psParentDir;
+
+ *ppsDebugFSEntry = NULL;
+
+ mutex_unlock(&gDebugFSLock);
+
+ debugfs_remove(psDebugFSEntry->psEntry);
+
+ /* now free the memory allocated for psDebugFSEntry */
+ OSFreeMemNoStats(psDebugFSEntry);
+ *ppsDebugFSEntry = NULL;
+
+ /* decrement refcount of parent directory */
+ _UnrefAndMaybeDestroyDirEntry(&psParentDir);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.h b/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.h
new file mode 100644
index 00000000000000..c5ad960e2348da
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_debugfs.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File
+@Title Functions for creating debugfs directories and entries.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DEBUGFS_H__)
+#define __PVR_DEBUGFS_H__
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "img_types.h"
+#include "osfunc.h"
+
+typedef ssize_t (PVRSRV_ENTRY_WRITE_FUNC)(const char __user *pszBuffer,
+ size_t uiCount,
+ loff_t *puiPosition,
+ void *pvData);
+
+
+typedef IMG_UINT32 (PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+typedef IMG_UINT32 (PVRSRV_DEC_STAT_MEM_REFCOUNT_FUNC)(void *pvStatPtr);
+
+typedef IMG_UINT32 (PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+typedef IMG_UINT32 (PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN)(void *pvData);
+
+typedef struct _PVR_DEBUGFS_DIR_DATA_ *PPVR_DEBUGFS_DIR_DATA;
+typedef struct _PVR_DEBUGFS_ENTRY_DATA_ *PPVR_DEBUGFS_ENTRY_DATA;
+typedef struct _PVR_DEBUGFS_DRIVER_STAT_ *PPVR_DEBUGFS_DRIVER_STAT;
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+typedef struct _PVR_DEBUGFS_RAW_DRIVER_STAT_ *PPVR_DEBUGFS_RAW_DRIVER_STAT;
+#endif
+typedef struct _PVR_DEBUGFS_BLOB_ENTRY_DATA_ *PPVR_DEBUGFS_BLOB_ENTRY_DATA;
+
+int PVRDebugFSInit(void);
+void PVRDebugFSDeInit(void);
+
+int PVRDebugFSCreateEntryDir(IMG_CHAR *pszName,
+ PPVR_DEBUGFS_DIR_DATA psParentDir,
+ PPVR_DEBUGFS_DIR_DATA *ppsNewDir);
+
+void PVRDebugFSRemoveEntryDir(PPVR_DEBUGFS_DIR_DATA *ppsDir);
+
+int PVRDebugFSCreateEntry(const char *pszName,
+ PPVR_DEBUGFS_DIR_DATA psParentDir,
+ const struct seq_operations *psReadOps,
+ PVRSRV_ENTRY_WRITE_FUNC *pfnWrite,
+ PVRSRV_INC_FSENTRY_PVDATA_REFCNT_FN *pfnIncPvDataRefCnt,
+ PVRSRV_DEC_FSENTRY_PVDATA_REFCNT_FN *pfnDecPvDataRefCnt,
+ void *pvData,
+ PPVR_DEBUGFS_ENTRY_DATA *ppsNewEntry);
+
+void PVRDebugFSRemoveEntry(PPVR_DEBUGFS_ENTRY_DATA *ppsDebugFSEntry);
+
+PPVR_DEBUGFS_DRIVER_STAT PVRDebugFSCreateStatisticEntry(const char *pszName,
+ PPVR_DEBUGFS_DIR_DATA psDir,
+ OS_STATS_PRINT_FUNC *pfnStatsPrint,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnIncStatMemRefCount,
+ PVRSRV_INC_STAT_MEM_REFCOUNT_FUNC *pfnDecStatMemRefCount,
+ void *pvData);
+
+void PVRDebugFSRemoveStatisticEntry(PPVR_DEBUGFS_DRIVER_STAT psStatEntry);
+
+#if defined(PVRSRV_ENABLE_MEMTRACK_STATS_FILE)
+PPVR_DEBUGFS_RAW_DRIVER_STAT PVRDebugFSCreateRawStatisticEntry(
+ const IMG_CHAR *pszFileName,
+ void *pvParentDir,
+ OS_STATS_PRINT_FUNC* pfnStatsPrint);
+
+void PVRDebugFSRemoveRawStatisticEntry(PPVR_DEBUGFS_RAW_DRIVER_STAT psStatEntry);
+#endif
+
+int PVRDebugFSCreateBlobEntry(const char *pszName,
+ PPVR_DEBUGFS_DIR_DATA psParentDir,
+ void *pvData,
+ unsigned long size,
+ PPVR_DEBUGFS_BLOB_ENTRY_DATA *ppsNewEntry);
+
+void PVRDebugFSRemoveBlobEntry(PPVR_DEBUGFS_BLOB_ENTRY_DATA *ppsDebugFSEntry);
+
+#endif /* !defined(__PVR_DEBUGFS_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_drm.c b/drivers/gpu/drm/img-rogue/1.10/pvr_drm.c
new file mode 100644
index 00000000000000..c3c08ee67fa2da
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_drm.c
@@ -0,0 +1,314 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drm.h>
+#include <drm/drmP.h> /* include before drm_crtc.h for kernels older than 3.9 */
+#include <drm/drm_crtc.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drm.h"
+#include "pvr_drv.h"
+#include "pvrversion.h"
+#include "services_kernel_client.h"
+
+#include "kernel_compatibility.h"
+
+#define PVR_DRM_DRIVER_NAME PVR_DRM_NAME
+#define PVR_DRM_DRIVER_DESC "Imagination Technologies PVR DRM"
+#define PVR_DRM_DRIVER_DATE "20170530"
+
+
+static int pvr_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct pvr_drm_private *priv = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("device %p\n", dev);
+
+ return PVRSRVCommonDeviceSuspend(priv->dev_node);
+}
+
+static int pvr_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct pvr_drm_private *priv = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("device %p\n", dev);
+
+ return PVRSRVCommonDeviceResume(priv->dev_node);
+}
+
+const struct dev_pm_ops pvr_pm_ops = {
+ .suspend = pvr_pm_suspend,
+ .resume = pvr_pm_resume,
+};
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags)
+{
+ struct pvr_drm_private *priv;
+ enum PVRSRV_ERROR srv_err;
+ int err, deviceId;
+
+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+ dev_set_drvdata(ddev->dev, ddev);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+ /* older kernels do not have render drm_minor member in drm_device,
+ * so we fallback to primary node for device identification */
+ deviceId = ddev->primary->index;
+#else
+ if (ddev->render)
+ deviceId = ddev->render->index;
+ else /* when render node is NULL, fallback to primary node */
+ deviceId = ddev->primary->index;
+#endif
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ ddev->dev_private = priv;
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+ priv->fence_status_wq = create_freezable_workqueue("pvr_fce_status");
+ if (!priv->fence_status_wq) {
+ DRM_ERROR("failed to create fence status workqueue\n");
+ err = -ENOMEM;
+ goto err_free_priv;
+ }
+#endif
+
+ srv_err = PVRSRVDeviceCreate(ddev->dev, deviceId, &priv->dev_node);
+ if (srv_err != PVRSRV_OK) {
+ DRM_ERROR("failed to create device node for device %p (%s)\n",
+ ddev->dev, PVRSRVGetErrorStringKM(srv_err));
+ if (srv_err == PVRSRV_ERROR_PROBE_DEFER)
+ err = -EPROBE_DEFER;
+ else
+ err = -ENODEV;
+ goto err_workqueue_destroy;
+ }
+
+ err = PVRSRVCommonDeviceInit(priv->dev_node);
+ if (err) {
+ DRM_ERROR("device %p initialisation failed (err=%d)\n",
+ ddev->dev, err);
+ goto err_device_destroy;
+ }
+
+ drm_mode_config_init(ddev);
+
+ return 0;
+
+err_device_destroy:
+ PVRSRVDeviceDestroy(priv->dev_node);
+err_workqueue_destroy:
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+ destroy_workqueue(priv->fence_status_wq);
+err_free_priv:
+#endif
+ kfree(priv);
+err_exit:
+ return err;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0))
+static
+#endif
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev)
+#else
+void pvr_drm_unload(struct drm_device *ddev)
+#endif
+{
+ struct pvr_drm_private *priv = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("device %p\n", ddev->dev);
+
+ drm_mode_config_cleanup(ddev);
+
+ PVRSRVCommonDeviceDeinit(priv->dev_node);
+
+ PVRSRVDeviceDestroy(priv->dev_node);
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+ destroy_workqueue(priv->fence_status_wq);
+#endif
+
+ kfree(priv);
+ ddev->dev_private = NULL;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ return 0;
+#endif
+}
+
+static int pvr_drm_open(struct drm_device *ddev, struct drm_file *dfile)
+{
+ struct pvr_drm_private *priv = ddev->dev_private;
+ int err;
+
+ if (!try_module_get(THIS_MODULE)) {
+ DRM_ERROR("failed to get module reference\n");
+ return -ENOENT;
+ }
+
+ err = PVRSRVCommonDeviceOpen(priv->dev_node, dfile);
+ if (err)
+ module_put(THIS_MODULE);
+
+ return err;
+}
+
+static void pvr_drm_release(struct drm_device *ddev, struct drm_file *dfile)
+{
+ struct pvr_drm_private *priv = ddev->dev_private;
+
+ PVRSRVCommonDeviceRelease(priv->dev_node, dfile);
+
+ module_put(THIS_MODULE);
+}
+
+/*
+ * The DRM global lock is taken for ioctls unless the DRM_UNLOCKED flag is set.
+ * If you revise one of the driver specific ioctls, or add a new one, that has
+ * DRM_UNLOCKED set then consider whether the gPVRSRVLock mutex needs to be
+ * taken.
+ */
+static struct drm_ioctl_desc pvr_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(PVR_SRVKM_CMD, PVRSRV_BridgeDispatchKM, DRM_RENDER_ALLOW | DRM_UNLOCKED),
+#if defined(PDUMP)
+ DRM_IOCTL_DEF_DRV(PVR_DBGDRV_CMD, dbgdrv_ioctl, DRM_RENDER_ALLOW | DRM_AUTH | DRM_UNLOCKED),
+#endif
+};
+
+#if defined(CONFIG_COMPAT)
+#if defined(PDUMP)
+static drm_ioctl_compat_t *pvr_drm_compat_ioctls[] = {
+ [DRM_PVR_DBGDRV_CMD] = dbgdrv_ioctl_compat,
+};
+#endif
+
+static long pvr_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(file, cmd, arg);
+
+#if defined(PDUMP)
+ if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(pvr_drm_compat_ioctls)) {
+ drm_ioctl_compat_t *pfnBridge;
+
+ pfnBridge = pvr_drm_compat_ioctls[nr - DRM_COMMAND_BASE];
+ if (pfnBridge)
+ return pfnBridge(file, cmd, arg);
+ }
+#endif
+
+ return drm_ioctl(file, cmd, arg);
+}
+#endif /* defined(CONFIG_COMPAT) */
+
+static const struct file_operations pvr_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ /*
+ * FIXME:
+ * Wrap this in a function that checks enough data has been
+ * supplied with the ioctl (e.g. _IOCDIR(nr) != _IOC_NONE &&
+ * _IOC_SIZE(nr) == size).
+ */
+ .unlocked_ioctl = drm_ioctl,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = pvr_compat_ioctl,
+#endif
+ .mmap = PVRSRV_MMap,
+ .poll = drm_poll,
+ .read = drm_read,
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0))
+ .fasync = drm_fasync,
+#endif
+};
+
+const struct drm_driver pvr_drm_generic_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_RENDER,
+
+ .dev_priv_size = 0,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ .load = NULL,
+ .unload = NULL,
+#else
+ .load = pvr_drm_load,
+ .unload = pvr_drm_unload,
+#endif
+ .open = pvr_drm_open,
+ .postclose = pvr_drm_release,
+
+ .ioctls = pvr_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(pvr_drm_ioctls),
+ .fops = &pvr_drm_fops,
+
+ .name = PVR_DRM_DRIVER_NAME,
+ .desc = PVR_DRM_DRIVER_DESC,
+ .date = PVR_DRM_DRIVER_DATE,
+ .major = PVRVERSION_MAJ,
+ .minor = PVRVERSION_MIN,
+ .patchlevel = PVRVERSION_BUILD,
+};
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_drm.h b/drivers/gpu/drm/img-rogue/1.10/pvr_drm.h
new file mode 100644
index 00000000000000..408be8930492a5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_drm.h
@@ -0,0 +1,93 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PVR DRM definitions shared between kernel and user space.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_H__)
+#define __PVR_DRM_H__
+
+#include "pvr_drm_core.h"
+
+/*
+ * IMPORTANT:
+ * All structures below are designed to be the same size when compiled for 32
+ * and/or 64 bit architectures, i.e. there should be no compiler inserted
+ * padding. This is achieved by sticking to the following rules:
+ * 1) only use fixed width types
+ * 2) always naturally align fields by arranging them appropriately and by using
+ * padding fields when necessary
+ *
+ * These rules should _always_ be followed when modifying or adding new
+ * structures to this file.
+ */
+
+struct drm_pvr_srvkm_cmd {
+ __u32 bridge_id;
+ __u32 bridge_func_id;
+ __u64 in_data_ptr;
+ __u64 out_data_ptr;
+ __u32 in_data_size;
+ __u32 out_data_size;
+};
+
+struct drm_pvr_dbgdrv_cmd {
+ __u32 cmd;
+ __u32 pad;
+ __u64 in_data_ptr;
+ __u64 out_data_ptr;
+ __u32 in_data_size;
+ __u32 out_data_size;
+};
+
+/*
+ * DRM command numbers, relative to DRM_COMMAND_BASE.
+ * These defines must be prefixed with "DRM_".
+ */
+#define DRM_PVR_SRVKM_CMD 0 /* Used for PVR Services ioctls */
+#define DRM_PVR_DBGDRV_CMD 1 /* Debug driver (PDUMP) ioctls */
+
+
+/* These defines must be prefixed with "DRM_IOCTL_". */
+#define DRM_IOCTL_PVR_SRVKM_CMD DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_SRVKM_CMD, struct drm_pvr_srvkm_cmd)
+#define DRM_IOCTL_PVR_DBGDRV_CMD DRM_IOWR(DRM_COMMAND_BASE + DRM_PVR_DBGDRV_CMD, struct drm_pvr_dbgdrv_cmd)
+
+#endif /* defined(__PVR_DRM_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_drm_core.h b/drivers/gpu/drm/img-rogue/1.10/pvr_drm_core.h
new file mode 100644
index 00000000000000..e4088adb94e9e9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_drm_core.h
@@ -0,0 +1,78 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title Linux DRM definitions shared between kernel and user space.
+@Codingstyle LinuxKernel
+@Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ All rights reserved.
+@Description This header contains a subset of the Linux kernel DRM uapi
+ and is designed to be used in kernel and user mode. When
+ included from kernel mode, it pulls in the full version of
+ drm.h. Whereas, when included from user mode, it defines a
+ minimal version of drm.h (as found in libdrm). As such, the
+ structures and ioctl commands must exactly match those found
+ in the Linux kernel/libdrm.
+@License MIT
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRM_CORE_H__)
+#define __PVR_DRM_CORE_H__
+
+#if defined(__KERNEL__)
+#include <drm/drm.h>
+#else
+#include <asm/ioctl.h>
+#include <linux/types.h>
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_COMMAND_BASE 0x40
+
+#define DRM_IOWR(nr, type) _IOWR(DRM_IOCTL_BASE, nr, type)
+
+struct drm_version {
+ int version_major;
+ int version_minor;
+ int version_patchlevel;
+ __kernel_size_t name_len;
+ char *name;
+ __kernel_size_t date_len;
+ char *date;
+ __kernel_size_t desc_len;
+ char *desc;
+};
+
+struct drm_set_version {
+ int drm_di_major;
+ int drm_di_minor;
+ int drm_dd_major;
+ int drm_dd_minor;
+};
+
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_drv.h b/drivers/gpu/drm/img-rogue/1.10/pvr_drv.h
new file mode 100644
index 00000000000000..4d5ee6549af2df
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_drv.h
@@ -0,0 +1,90 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_DRV_H__)
+#define __PVR_DRV_H__
+
+#include <linux/version.h>
+#include <drm/drmP.h>
+#include <linux/pm.h>
+
+struct file;
+struct _PVRSRV_DEVICE_NODE_;
+struct workqueue_struct;
+struct vm_area_struct;
+
+/* This structure is used to store Linux specific per-device information. */
+struct pvr_drm_private {
+ struct _PVRSRV_DEVICE_NODE_ *dev_node;
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined(SUPPORT_NATIVE_FENCE_SYNC)
+ struct workqueue_struct *fence_status_wq;
+#endif
+};
+
+extern const struct dev_pm_ops pvr_pm_ops;
+extern const struct drm_driver pvr_drm_generic_driver;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+int pvr_drm_load(struct drm_device *ddev, unsigned long flags);
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+int pvr_drm_unload(struct drm_device *ddev);
+#else
+void pvr_drm_unload(struct drm_device *ddev);
+#endif
+#endif
+
+#if defined(PDUMP)
+int dbgdrv_init(void);
+void dbgdrv_cleanup(void);
+int dbgdrv_ioctl(struct drm_device *dev, void *arg, struct drm_file *file);
+int dbgdrv_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
+int PVRSRV_BridgeDispatchKM(struct drm_device *dev, void *arg,
+ struct drm_file *file);
+int PVRSRV_MMap(struct file *file, struct vm_area_struct *ps_vma);
+
+#endif /* !defined(__PVR_DRV_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs.h b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs.h
new file mode 100644
index 00000000000000..4fa6584124f584
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs.h
@@ -0,0 +1,147 @@
+/*************************************************************************/ /*!
+@File pvr_dvfs.h
+@Title System level interface for DVFS
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_DVFS_H_
+#define _PVR_DVFS_H_
+
+#include <linux/version.h>
+
+#if defined(PVR_DVFS)
+ #include <linux/devfreq.h>
+ #include <linux/thermal.h>
+
+ #if defined(CONFIG_DEVFREQ_THERMAL)
+ #include <linux/devfreq_cooling.h>
+ #endif
+
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ #include <linux/opp.h>
+ #else
+ #include <linux/pm_opp.h>
+ #endif
+#endif
+
+#include "img_types.h"
+
+typedef void (*PFN_SYS_DEV_DVFS_SET_FREQUENCY)(IMG_UINT32 ui32Freq);
+typedef void (*PFN_SYS_DEV_DVFS_SET_VOLTAGE)(IMG_UINT32 ui32Volt);
+
+typedef struct _IMG_OPP_
+{
+ IMG_UINT32 ui32Volt;
+ /*
+ * Unit of frequency in Hz.
+ */
+ IMG_UINT32 ui32Freq;
+} IMG_OPP;
+
+typedef struct _IMG_DVFS_DEVICE_CFG_
+{
+ const IMG_OPP *pasOPPTable;
+ IMG_UINT32 ui32OPPTableSize;
+#if defined(PVR_DVFS)
+ IMG_UINT32 ui32PollMs;
+#endif
+ IMG_BOOL bIdleReq;
+ PFN_SYS_DEV_DVFS_SET_FREQUENCY pfnSetFrequency;
+ PFN_SYS_DEV_DVFS_SET_VOLTAGE pfnSetVoltage;
+
+#if defined(CONFIG_DEVFREQ_THERMAL) && defined(PVR_DVFS)
+ struct devfreq_cooling_power *psPowerOps;
+#endif
+} IMG_DVFS_DEVICE_CFG;
+
+#if defined(PVR_DVFS)
+typedef struct _IMG_DVFS_GOVERNOR_
+{
+ IMG_BOOL bEnabled;
+} IMG_DVFS_GOVERNOR;
+
+typedef struct _IMG_DVFS_GOVERNOR_CFG_
+{
+ IMG_UINT32 ui32UpThreshold;
+ IMG_UINT32 ui32DownDifferential;
+} IMG_DVFS_GOVERNOR_CFG;
+#endif
+
+#if defined(__linux__)
+#if defined(PVR_DVFS)
+typedef struct _IMG_DVFS_DEVICE_
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ struct opp *psOPP;
+#else
+ struct dev_pm_opp *psOPP;
+#endif
+ struct devfreq *psDevFreq;
+ IMG_BOOL bEnabled;
+ IMG_HANDLE hGpuUtilUserDVFS;
+ struct devfreq_simple_ondemand_data data;
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ struct thermal_cooling_device *psDevfreqCoolingDevice;
+#endif
+} IMG_DVFS_DEVICE;
+#endif
+
+#if defined(SUPPORT_PDVFS)
+typedef struct _PDVFS_DATA_
+{
+ IMG_HANDLE hReactiveTimer;
+ IMG_BOOL bWorkInFrame;
+} PDVFS_DATA;
+#endif
+
+typedef struct _IMG_DVFS_
+{
+#if defined(PVR_DVFS)
+ IMG_DVFS_DEVICE sDVFSDevice;
+ IMG_DVFS_GOVERNOR sDVFSGovernor;
+ IMG_DVFS_GOVERNOR_CFG sDVFSGovernorCfg;
+#endif
+#if defined(SUPPORT_PDVFS)
+ PDVFS_DATA sPDVFSData;
+#endif
+ IMG_DVFS_DEVICE_CFG sDVFSDeviceCfg;
+} PVRSRV_DVFS;
+#endif/* (__linux__) */
+
+#endif /* _PVR_DVFS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.c b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.c
new file mode 100644
index 00000000000000..1219fe1963f5d0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.c
@@ -0,0 +1,595 @@
+/*************************************************************************/ /*!
+@File
+@Title PowerVR devfreq device implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Linux module setup
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(NO_HARDWARE)
+
+#include <linux/devfreq.h>
+#if defined(CONFIG_DEVFREQ_THERMAL)
+#include <linux/devfreq_cooling.h>
+#endif
+#include <linux/version.h>
+#include <linux/device.h>
+
+#include "power.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#include "sofunc_rgx.h"
+
+#include "syscommon.h"
+
+#include "pvr_dvfs_device.h"
+
+#include "kernel_compatibility.h"
+
+static PVRSRV_DEVICE_NODE *gpsDeviceNode;
+
+static IMG_INT32 devfreq_target(struct device *dev, long unsigned *requested_freq, IMG_UINT32 flags)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+ IMG_UINT32 ui32Freq, ui32CurFreq, ui32Volt;
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ struct opp *opp;
+#else
+ struct dev_pm_opp *opp;
+#endif
+
+ if (!psDVFSDevice->bEnabled)
+ {
+ *requested_freq = psRGXTimingInfo->ui32CoreClockSpeed;
+ return 0;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ rcu_read_lock();
+#endif
+
+ opp = devfreq_recommended_opp(dev, requested_freq, flags);
+ if (IS_ERR(opp)) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ rcu_read_unlock();
+#endif
+ PVR_DPF((PVR_DBG_ERROR, "Invalid OPP"));
+ return PTR_ERR(opp);
+ }
+
+ ui32Freq = dev_pm_opp_get_freq(opp);
+ ui32Volt = dev_pm_opp_get_voltage(opp);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ rcu_read_unlock();
+#else
+ dev_pm_opp_put(opp);
+#endif
+
+ ui32CurFreq = psRGXTimingInfo->ui32CoreClockSpeed;
+
+ if (ui32CurFreq == ui32Freq)
+ {
+ return 0;
+ }
+
+ if (PVRSRV_OK != PVRSRVDevicePreClockSpeedChange(gpsDeviceNode,
+ psDVFSDeviceCfg->bIdleReq,
+ NULL))
+ {
+ dev_err(dev, "PVRSRVDevicePreClockSpeedChange failed\n");
+ return -EPERM;
+ }
+
+ /* Increasing frequency, change voltage first */
+ if (ui32Freq > ui32CurFreq)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ }
+
+ psDVFSDeviceCfg->pfnSetFrequency(ui32Freq);
+
+ /* Decreasing frequency, change frequency first */
+ if (ui32Freq < ui32CurFreq)
+ {
+ psDVFSDeviceCfg->pfnSetVoltage(ui32Volt);
+ }
+
+ psRGXTimingInfo->ui32CoreClockSpeed = ui32Freq;
+
+ PVRSRVDevicePostClockSpeedChange(gpsDeviceNode, psDVFSDeviceCfg->bIdleReq,
+ NULL);
+
+ return 0;
+}
+
+static int devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = gpsDeviceNode->pvDevice;
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = psRGXData->psRGXTimingInfo;
+ RGXFWIF_GPU_UTIL_STATS sGpuUtilStats;
+ PVRSRV_ERROR eError;
+
+ stat->current_frequency = psRGXTimingInfo->ui32CoreClockSpeed;
+
+ if (psDevInfo->pfnGetGpuUtilStats == NULL)
+ {
+ /* Not yet ready. So set times to something sensible. */
+ stat->busy_time = 0;
+ stat->total_time = 0;
+ return 0;
+ }
+
+ eError = psDevInfo->pfnGetGpuUtilStats(psDevInfo->psDeviceNode,
+ psDVFSDevice->hGpuUtilUserDVFS,
+ &sGpuUtilStats);
+
+ if (eError != PVRSRV_OK)
+ {
+ return -EAGAIN;
+ }
+
+ stat->busy_time = sGpuUtilStats.ui64GpuStatActiveHigh + sGpuUtilStats.ui64GpuStatActiveLow;
+ stat->total_time = sGpuUtilStats.ui64GpuStatCumulative;
+
+ return 0;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+static IMG_INT32 devfreq_cur_freq(struct device *dev, unsigned long *freq)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) gpsDeviceNode->psDevConfig->hDevData;
+
+ *freq = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ return 0;
+}
+#endif
+
+static struct devfreq_dev_profile img_devfreq_dev_profile =
+{
+ .target = devfreq_target,
+ .get_dev_status = devfreq_get_dev_status,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ .get_cur_freq = devfreq_cur_freq,
+#endif
+};
+
+static int FillOPPTable(struct device *dev)
+{
+ const IMG_OPP *iopp;
+ int i, err = 0;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+
+ for (i = 0, iopp = psDVFSDeviceCfg->pasOPPTable;
+ i < psDVFSDeviceCfg->ui32OPPTableSize;
+ i++, iopp++)
+ {
+ err = dev_pm_opp_add(dev, iopp->ui32Freq, iopp->ui32Volt);
+ if (err) {
+ dev_err(dev, "Could not add OPP entry, %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int GetOPPValues(struct device *dev,
+ unsigned long *min_freq,
+ unsigned long *min_volt,
+ unsigned long *max_freq)
+{
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ struct opp *opp;
+#else
+ struct dev_pm_opp *opp;
+#endif
+ int count, i, err = 0;
+ unsigned long freq;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)))
+ unsigned int *freq_table;
+#else
+ unsigned long *freq_table;
+#endif
+
+ count = dev_pm_opp_get_opp_count(dev);
+ if (count < 0)
+ {
+ dev_err(dev, "Could not fetch OPP count, %d\n", count);
+ return count;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ freq_table = devm_kcalloc(dev, count, sizeof(*freq_table), GFP_ATOMIC);
+#else
+ freq_table = kcalloc(count, sizeof(*freq_table), GFP_ATOMIC);
+#endif
+ if (! freq_table)
+ {
+ return -ENOMEM;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ /* Start RCU read-side critical section to map frequency to OPP */
+ rcu_read_lock();
+#endif
+
+ /* Iterate over OPP table; Iteration 0 finds "opp w/ freq >= 0 Hz". */
+ freq = 0;
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ {
+ err = PTR_ERR(opp);
+ dev_err(dev, "Couldn't find lowest frequency, %d\n", err);
+ goto exit;
+ }
+
+ *min_volt = dev_pm_opp_get_voltage(opp);
+ *max_freq = *min_freq = freq_table[0] = freq;
+ dev_info(dev, "opp[%d/%d]: (%lu Hz, %lu uV)\n", 1, count, freq, *min_volt);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ dev_pm_opp_put(opp);
+#endif
+
+ /* Iteration i > 0 finds "opp w/ freq >= (opp[i-1].freq + 1)". */
+ for (i = 1; i < count; i++)
+ {
+ freq++;
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ {
+ err = PTR_ERR(opp);
+ dev_err(dev, "Couldn't find %dth frequency, %d\n", i, err);
+ goto exit;
+ }
+
+ freq_table[i] = freq;
+ *max_freq = freq;
+ dev_info(dev,
+ "opp[%d/%d]: (%lu Hz, %lu uV)\n",
+ i + 1,
+ count,
+ freq,
+ dev_pm_opp_get_voltage(opp));
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0))
+ dev_pm_opp_put(opp);
+#endif
+ }
+
+exit:
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ rcu_read_unlock();
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0))
+ if (!err)
+ {
+ img_devfreq_dev_profile.freq_table = freq_table;
+ img_devfreq_dev_profile.max_state = count;
+ }
+ else
+#endif
+ {
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
+ devm_kfree(dev, freq_table);
+#else
+ kfree(freq_table);
+#endif
+ }
+
+ return err;
+}
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+static int RegisterCoolingDevice(struct device *dev,
+ IMG_DVFS_DEVICE *psDVFSDevice,
+ struct devfreq_cooling_power *powerOps)
+{
+ struct device_node *of_node;
+ int err = 0;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, err);
+
+ if (!powerOps)
+ {
+ dev_info(dev, "Cooling: power ops not registered, not enabling cooling");
+ return 0;
+ }
+
+ of_node = of_node_get(dev->of_node);
+
+ psDVFSDevice->psDevfreqCoolingDevice = of_devfreq_cooling_register_power(
+ of_node, psDVFSDevice->psDevFreq, powerOps);
+
+ if (IS_ERR(psDVFSDevice->psDevfreqCoolingDevice))
+ {
+ err = PTR_ERR(psDVFSDevice->psDevfreqCoolingDevice);
+ dev_err(dev, "Failed to register as devfreq cooling device %d", err);
+ }
+
+ of_node_put(of_node);
+
+ return err;
+}
+#endif
+
+#define TO_IMG_ERR(err) ((err == -EPROBE_DEFER) ? PVRSRV_ERROR_PROBE_DEFER : PVRSRV_ERROR_INIT_FAILURE)
+
+PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = NULL;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg = NULL;
+ IMG_DVFS_GOVERNOR_CFG *psDVFSGovernorCfg = NULL;
+ RGX_TIMING_INFORMATION *psRGXTimingInfo = NULL;
+ struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice;
+ unsigned long min_freq = 0, max_freq = 0, min_volt = 0;
+ PVRSRV_ERROR eError;
+ int err;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ if (gpsDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "DVFS already initialised for device node %p",
+ gpsDeviceNode));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ gpsDeviceNode = psDeviceNode;
+ psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ psDVFSGovernorCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSGovernorCfg;
+ psRGXTimingInfo = ((RGX_DATA *)psDeviceNode->psDevConfig->hDevData)->psRGXTimingInfo;
+
+#if defined(SUPPORT_SOC_TIMER)
+ if (! psDeviceNode->psDevConfig->pfnSoCTimerRead)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "System layer SoC timer callback not implemented"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+#endif
+
+ eError = SORgxGpuUtilStatsRegister(&psDVFSDevice->hGpuUtilUserDVFS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to register to the GPU utilisation stats, %d", eError));
+ return eError;
+ }
+
+#if defined(CONFIG_OF)
+ err = dev_pm_opp_of_add_table(psDev);
+ if (err)
+ {
+ /*
+ * If there are no device tree or system layer provided operating points
+ * then return an error
+ */
+ if (err != -ENODEV || !psDVFSDeviceCfg->pasOPPTable)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to init opp table from devicetree, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+ }
+#endif
+
+ if (psDVFSDeviceCfg->pasOPPTable)
+ {
+ err = FillOPPTable(psDev);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to fill OPP table with data, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+ }
+
+ err = GetOPPValues(psDev, &min_freq, &min_volt, &max_freq);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to read OPP points, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+
+ img_devfreq_dev_profile.initial_freq = min_freq;
+ img_devfreq_dev_profile.polling_ms = psDVFSDeviceCfg->ui32PollMs;
+
+ psRGXTimingInfo->ui32CoreClockSpeed = min_freq;
+
+ psDVFSDeviceCfg->pfnSetFrequency(min_freq);
+ psDVFSDeviceCfg->pfnSetVoltage(min_volt);
+
+ psDVFSDevice->data.upthreshold = psDVFSGovernorCfg->ui32UpThreshold;
+ psDVFSDevice->data.downdifferential = psDVFSGovernorCfg->ui32DownDifferential;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
+ psDVFSDevice->psDevFreq = devm_devfreq_add_device(psDev,
+ &img_devfreq_dev_profile,
+ "simple_ondemand",
+ &psDVFSDevice->data);
+#else
+ psDVFSDevice->psDevFreq = devfreq_add_device(psDev,
+ &img_devfreq_dev_profile,
+ "simple_ondemand",
+ &psDVFSDevice->data);
+#endif
+
+ if (IS_ERR(psDVFSDevice->psDevFreq))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to add as devfreq device %p, %ld",
+ psDVFSDevice->psDevFreq,
+ PTR_ERR(psDVFSDevice->psDevFreq)));
+ eError = TO_IMG_ERR(PTR_ERR(psDVFSDevice->psDevFreq));
+ goto err_exit;
+ }
+
+ eError = SuspendDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVInit: Failed to suspend DVFS"));
+ goto err_exit;
+ }
+
+#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+ psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq;
+ psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq;
+#else
+ psDVFSDevice->psDevFreq->min_freq = min_freq;
+ psDVFSDevice->psDevFreq->max_freq = max_freq;
+#endif
+
+ err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to register opp notifier, %d", err));
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ err = RegisterCoolingDevice(psDev, psDVFSDevice, psDVFSDeviceCfg->psPowerOps);
+ if (err)
+ {
+ eError = TO_IMG_ERR(err);
+ goto err_exit;
+ }
+#endif
+
+ PVR_TRACE(("PVR DVFS activated: %lu-%lu Hz, Period: %ums",
+ min_freq,
+ max_freq,
+ psDVFSDeviceCfg->ui32PollMs));
+
+ return PVRSRV_OK;
+
+err_exit:
+ DeinitDVFS(psDeviceNode);
+ return eError;
+}
+
+void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &psDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+ struct device *psDev = psDeviceNode->psDevConfig->pvOSDevice;
+ IMG_INT32 i32Error;
+
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ PVR_ASSERT(psDeviceNode == gpsDeviceNode);
+
+ if (! psDVFSDevice)
+ {
+ return;
+ }
+
+#if defined(CONFIG_DEVFREQ_THERMAL)
+ if (!IS_ERR_OR_NULL(psDVFSDevice->psDevfreqCoolingDevice))
+ {
+ devfreq_cooling_unregister(psDVFSDevice->psDevfreqCoolingDevice);
+ psDVFSDevice->psDevfreqCoolingDevice = NULL;
+ }
+#endif
+
+ if (psDVFSDevice->psDevFreq)
+ {
+ i32Error = devfreq_unregister_opp_notifier(psDev, psDVFSDevice->psDevFreq);
+ if (i32Error < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to unregister OPP notifier"));
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 0))
+ devfreq_remove_device(psDVFSDevice->psDevFreq);
+#else
+ devm_devfreq_remove_device(psDev, psDVFSDevice->psDevFreq);
+#endif
+
+ psDVFSDevice->psDevFreq = NULL;
+ }
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) && \
+ LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
+ kfree(img_devfreq_dev_profile.freq_table);
+#endif
+
+#if defined(CONFIG_OF)
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) || \
+ (defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)))
+ dev_pm_opp_of_remove_table(psDev);
+#endif
+#endif
+
+ SORgxGpuUtilStatsUnregister(psDVFSDevice->hGpuUtilUserDVFS);
+ psDVFSDevice->hGpuUtilUserDVFS = NULL;
+
+ gpsDeviceNode = NULL;
+}
+
+PVRSRV_ERROR SuspendDVFS(void)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+
+ psDVFSDevice->bEnabled = IMG_FALSE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR ResumeDVFS(void)
+{
+ IMG_DVFS_DEVICE *psDVFSDevice = &gpsDeviceNode->psDevConfig->sDVFS.sDVFSDevice;
+
+ /* Not supported in GuestOS drivers */
+ psDVFSDevice->bEnabled = !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST);
+
+ return PVRSRV_OK;
+}
+
+#endif /* !NO_HARDWARE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.h b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.h
new file mode 100644
index 00000000000000..808fc3de3f81d2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_dvfs_device.h
@@ -0,0 +1,58 @@
+/*************************************************************************/ /*!
+@File pvr_dvfs.c
+@Title System level interface for DVFS
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_DVFS_DEVICE_H_
+#define _PVR_DVFS_DEVICE_H_
+
+#include "opaque_types.h"
+#include "pvrsrv_error.h"
+
+
+PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+void DeinitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode);
+
+PVRSRV_ERROR SuspendDVFS(void);
+
+PVRSRV_ERROR ResumeDVFS(void);
+
+#endif /* _PVR_DVFS_DEVICE_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_fd_sync_kernel.h b/drivers/gpu/drm/img-rogue/1.10/pvr_fd_sync_kernel.h
new file mode 100644
index 00000000000000..9a85f19c06d2d4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_fd_sync_kernel.h
@@ -0,0 +1,77 @@
+/*************************************************************************/ /*!
+@File pvr_fd_sync_kernel.h
+@Title Kernel/userspace interface definitions to use the kernel sync
+ driver
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+/* vi: set ts=8: */
+
+
+#ifndef _PVR_FD_SYNC_KERNEL_H_
+#define _PVR_FD_SYNC_KERNEL_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define PVR_SYNC_MAX_QUERY_FENCE_POINTS 14
+
+#define PVR_SYNC_IOC_MAGIC 'W'
+
+#define PVR_SYNC_IOC_RENAME \
+ _IOW(PVR_SYNC_IOC_MAGIC, 4, struct pvr_sync_rename_ioctl_data)
+
+#define PVR_SYNC_IOC_FORCE_SW_ONLY \
+ _IO(PVR_SYNC_IOC_MAGIC, 5)
+
+struct pvr_sync_pt_info {
+ /* Output */
+ __u32 id;
+ __u32 ui32FWAddr;
+ __u32 ui32CurrOp;
+ __u32 ui32NextOp;
+ __u32 ui32TlTaken;
+} __attribute__((packed, aligned(8)));
+
+struct pvr_sync_rename_ioctl_data
+{
+ /* Input */
+ char szName[32];
+} __attribute__((packed, aligned(8)));
+
+#endif /* _PVR_FD_SYNC_KERNEL_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_fence.c b/drivers/gpu/drm/img-rogue/1.10/pvr_fence.c
new file mode 100644
index 00000000000000..751ddb59d987bf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_fence.c
@@ -0,0 +1,1089 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR Linux fence interface
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "pvr_fence.h"
+#include "services_kernel_client.h"
+#include "sync_checkpoint_external.h"
+
+#define CREATE_TRACE_POINTS
+#include "pvr_fence_trace.h"
+
+/* This header must always be included last */
+#include "kernel_compatibility.h"
+
+#define PVR_FENCE_CONTEXT_DESTROY_INITAL_WAIT_MS 100
+#define PVR_FENCE_CONTEXT_DESTROY_RETRIES 5
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+ do { \
+ if (pfnDumpDebugPrintf) \
+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \
+ ## __VA_ARGS__); \
+ else \
+ pr_err(fmt "\n", ## __VA_ARGS__); \
+ } while (0)
+
+static inline void
+pvr_fence_sync_signal(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+ SyncCheckpointSignal(pvr_fence->sync_checkpoint, fence_sync_flags);
+}
+
+static inline bool
+pvr_fence_sync_is_signaled(struct pvr_fence *pvr_fence, u32 fence_sync_flags)
+{
+ return SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, fence_sync_flags);
+}
+
+static inline u32
+pvr_fence_sync_value(struct pvr_fence *pvr_fence)
+{
+ if (SyncCheckpointIsErrored(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ return PVRSRV_SYNC_CHECKPOINT_ERRORED;
+ else if (SyncCheckpointIsSignalled(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ return PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ else
+ return PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED;
+}
+
+static void
+pvr_fence_context_check_status(struct work_struct *data)
+{
+ PVRSRVCheckStatus(NULL);
+}
+
+void
+pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size)
+{
+ snprintf(str, size,
+ "%u ctx=%llu refs=%u",
+ atomic_read(&fctx->fence_seqno),
+ fctx->fence_context,
+ refcount_read(&fctx->kref.refcount));
+}
+
+static void
+pvr_fence_context_fences_dump(struct pvr_fence_context *fctx,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ struct pvr_fence *pvr_fence;
+ unsigned long flags;
+ char value[128];
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ pvr_context_value_str(fctx, value, sizeof(value));
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ "%s: @%s", fctx->name, value);
+ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+ pvr_fence->base.ops->fence_value_str(&pvr_fence->base, value,
+ sizeof(value));
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ " @%s", value);
+
+ if (!is_pvr_fence(pvr_fence->fence)) {
+ struct dma_fence *fence = pvr_fence->fence;
+ const char *timeline_value_str = "unknown timeline value";
+ const char *fence_value_str = "unknown fence value";
+
+ if (fence->ops->timeline_value_str) {
+ fence->ops->timeline_value_str(fence, value,
+ sizeof(value));
+ timeline_value_str = value;
+ }
+
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ " | %s: %s (driver: %s)",
+ fence->ops->get_timeline_name(fence),
+ timeline_value_str,
+ fence->ops->get_driver_name(fence));
+
+ if (fence->ops->fence_value_str) {
+ fence->ops->fence_value_str(fence, value,
+ sizeof(value));
+ fence_value_str = value;
+ }
+
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile,
+ " | @%s (foreign)", value);
+ }
+ }
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+}
+
+static inline unsigned int
+pvr_fence_context_seqno_next(struct pvr_fence_context *fctx)
+{
+ return atomic_inc_return(&fctx->fence_seqno) - 1;
+}
+
+static inline void
+pvr_fence_context_free_deferred(struct pvr_fence_context *fctx)
+{
+ struct pvr_fence *pvr_fence, *tmp;
+ LIST_HEAD(deferred_free_list);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_for_each_entry_safe(pvr_fence, tmp,
+ &fctx->deferred_free_list,
+ fence_head)
+ list_move(&pvr_fence->fence_head, &deferred_free_list);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+ list_for_each_entry_safe(pvr_fence, tmp,
+ &deferred_free_list,
+ fence_head) {
+ list_del(&pvr_fence->fence_head);
+ SyncCheckpointFree(pvr_fence->sync_checkpoint);
+ dma_fence_free(&pvr_fence->base);
+ }
+}
+
+static void
+pvr_fence_context_signal_fences(void *data)
+{
+ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+ struct pvr_fence *pvr_fence, *tmp;
+ unsigned long flags;
+ LIST_HEAD(signal_list);
+
+ /*
+ * We can't call fence_signal while holding the lock as we can end up
+ * in a situation whereby pvr_fence_foreign_signal_sync, which also
+ * takes the list lock, ends up being called as a result of the
+ * fence_signal below, i.e. fence_signal(fence) -> fence->callback()
+ * -> fence_signal(foreign_fence) -> foreign_fence->callback() where
+ * the foreign_fence callback is pvr_fence_foreign_signal_sync.
+ *
+ * So extract the items we intend to signal and add them to their own
+ * queue.
+ */
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_for_each_entry_safe(pvr_fence, tmp, &fctx->signal_list,
+ signal_head) {
+ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ list_move(&pvr_fence->signal_head, &signal_list);
+ }
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+ list_for_each_entry_safe(pvr_fence, tmp, &signal_list, signal_head) {
+
+ PVR_FENCE_TRACE(&pvr_fence->base, "signalled fence (%s)\n",
+ pvr_fence->name);
+ trace_pvr_fence_signal_fence(pvr_fence);
+ list_del(&pvr_fence->signal_head);
+ dma_fence_signal(pvr_fence->fence);
+ dma_fence_put(pvr_fence->fence);
+ }
+
+ /*
+ * Take this opportunity to free up any fence objects we
+ * have deferred freeing.
+ */
+ pvr_fence_context_free_deferred(fctx);
+}
+
+void
+pvr_fence_context_signal_fences_nohw(void *data)
+{
+ pvr_fence_context_signal_fences(data);
+}
+
+static void
+pvr_fence_context_destroy_work(struct work_struct *data)
+{
+ struct delayed_work *dwork =
+ container_of(data, struct delayed_work, work);
+ struct pvr_fence_context *fctx =
+ container_of(dwork, struct pvr_fence_context, destroy_work);
+ PVRSRV_ERROR srv_err;
+
+ PVR_FENCE_CTX_TRACE(fctx, "destroyed fence context (%s)\n", fctx->name);
+
+ pvr_fence_context_free_deferred(fctx);
+
+ srv_err = SyncCheckpointContextDestroy(fctx->sync_checkpoint_context);
+ if (srv_err != PVRSRV_OK) {
+ if (fctx->destroy_retries_left) {
+ unsigned long destroy_delay_jiffies =
+ msecs_to_jiffies(fctx->destroy_delay_ms);
+
+ pr_debug("%s: SyncCheckpointContextDestroy of %p failed, retrying in %ums\n",
+ __func__, fctx->sync_checkpoint_context,
+ fctx->destroy_delay_ms);
+
+ fctx->destroy_retries_left--;
+ fctx->destroy_delay_ms *= 2;
+
+ schedule_delayed_work(&fctx->destroy_work,
+ destroy_delay_jiffies);
+ return;
+ } else {
+ if (fctx->global_complete)
+ pr_err("%s: SyncCheckpointContextDestroy of %p failed, Sync Checkpoint context leaked\n",
+ __func__, fctx->sync_checkpoint_context);
+ else
+ pr_err("%s: SyncCheckpointContextDestroy of %p failed, module unloadable\n",
+ __func__, fctx->sync_checkpoint_context);
+ }
+ } else {
+ unsigned int retries =
+ PVR_FENCE_CONTEXT_DESTROY_RETRIES -
+ fctx->destroy_retries_left;
+
+ if (retries)
+ pr_debug("%s: SyncCheckpointContextDestroy of %p successful, after %u %s\n",
+ __func__,
+ fctx->sync_checkpoint_context,
+ retries,
+ (retries == 1) ? "retry" : "retries");
+
+ if (!fctx->global_complete)
+ module_put(THIS_MODULE);
+ }
+
+ if (WARN_ON(!list_empty_careful(&fctx->fence_list)))
+ pvr_fence_context_fences_dump(fctx, NULL, NULL);
+
+ PVRSRVUnregisterDbgRequestNotify(fctx->dbg_request_handle);
+ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+
+ if (fctx->global_complete)
+ complete(fctx->global_complete);
+ else
+ kfree(fctx);
+}
+
+static void
+pvr_fence_context_debug_request(void *data, u32 verbosity,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ struct pvr_fence_context *fctx = (struct pvr_fence_context *)data;
+
+ if (verbosity == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ pvr_fence_context_fences_dump(fctx, pfnDumpDebugPrintf,
+ pvDumpDebugFile);
+}
+
+static struct pvr_fence_context *
+pvr_fence_context_create_common(void *dev_cookie,
+ struct workqueue_struct *fence_status_wq,
+ const char *name)
+{
+ struct pvr_fence_context *fctx;
+ PVRSRV_ERROR srv_err;
+
+ fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return NULL;
+
+ spin_lock_init(&fctx->lock);
+ atomic_set(&fctx->fence_seqno, 0);
+ INIT_WORK(&fctx->check_status_work, pvr_fence_context_check_status);
+ INIT_DELAYED_WORK(&fctx->destroy_work, pvr_fence_context_destroy_work);
+ spin_lock_init(&fctx->list_lock);
+ INIT_LIST_HEAD(&fctx->signal_list);
+ INIT_LIST_HEAD(&fctx->fence_list);
+ INIT_LIST_HEAD(&fctx->deferred_free_list);
+
+ fctx->destroy_retries_left = PVR_FENCE_CONTEXT_DESTROY_RETRIES;
+ fctx->destroy_delay_ms = PVR_FENCE_CONTEXT_DESTROY_INITAL_WAIT_MS;
+
+ fctx->fence_wq = fence_status_wq;
+
+ fctx->fence_context = dma_fence_context_alloc(1);
+ strlcpy(fctx->name, name, sizeof(fctx->name));
+
+ srv_err = SyncCheckpointContextCreate(dev_cookie,
+ &fctx->sync_checkpoint_context);
+ if (srv_err != PVRSRV_OK) {
+ pr_err("%s: failed to create sync checkpoint context (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(srv_err));
+ goto err_free_fctx;
+ }
+
+ srv_err = PVRSRVRegisterCmdCompleteNotify(&fctx->cmd_complete_handle,
+ pvr_fence_context_signal_fences,
+ fctx);
+ if (srv_err != PVRSRV_OK) {
+ pr_err("%s: failed to register command complete callback (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(srv_err));
+ goto err_sync_prim_context_destroy;
+ }
+
+ srv_err = PVRSRVRegisterDbgRequestNotify(&fctx->dbg_request_handle,
+ dev_cookie,
+ pvr_fence_context_debug_request,
+ DEBUG_REQUEST_LINUXFENCE,
+ fctx);
+ if (srv_err != PVRSRV_OK) {
+ pr_err("%s: failed to register debug request callback (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(srv_err));
+ goto err_unregister_cmd_complete_notify;
+ }
+
+ kref_init(&fctx->kref);
+
+ PVR_FENCE_CTX_TRACE(fctx, "created fence context (%s)\n", name);
+ trace_pvr_fence_context_create(fctx);
+
+ return fctx;
+
+err_unregister_cmd_complete_notify:
+ PVRSRVUnregisterCmdCompleteNotify(fctx->cmd_complete_handle);
+err_sync_prim_context_destroy:
+ SyncCheckpointContextDestroy(fctx->sync_checkpoint_context);
+err_free_fctx:
+ kfree(fctx);
+ return NULL;
+}
+
+/**
+ * pvr_fence_context_create - creates a PVR fence context
+ * @dev_cookie: services device cookie
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR fence context that can be used to create PVR fences or to
+ * create PVR fences from an existing fence.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_fence_context_create(void *dev_cookie,
+ struct workqueue_struct *fence_status_wq,
+ const char *name)
+{
+ struct pvr_fence_context *fctx;
+
+ if (!try_module_get(THIS_MODULE)) {
+ pr_err("%s: failed to get module reference\n", __func__);
+ return NULL;
+ }
+
+ fctx = pvr_fence_context_create_common(dev_cookie, fence_status_wq,
+ name);
+ if (!fctx)
+ module_put(THIS_MODULE);
+
+ return fctx;
+}
+
+/**
+ * pvr_global_fence_context_create - creates a global PVR fence context
+ * @dev_cookie: services device cookie
+ * @name: context name (used for debugging)
+ *
+ * Creates a PVR fence context that can be used to create PVR fences or to
+ * create PVR fences from an existing fence. Unlike pvr_fence_context_create,
+ * this doesn't take a module reference, so can be used to create fence
+ * contexts at module load time, without preventing module unload.
+ *
+ * pvr_fence_context_destroy should be called to clean up the fence context.
+ *
+ * Returns NULL if a context cannot be created.
+ */
+struct pvr_fence_context *
+pvr_global_fence_context_create(void *dev_cookie,
+ struct workqueue_struct *fence_status_wq,
+ const char *name)
+{
+ struct pvr_fence_context *fctx;
+ struct completion *global_complete;
+
+ global_complete = kmalloc(sizeof(*global_complete), GFP_KERNEL);
+ if (!global_complete)
+ return NULL;
+
+ fctx = pvr_fence_context_create_common(dev_cookie, fence_status_wq,
+ name);
+
+ if (fctx) {
+ init_completion(global_complete);
+ fctx->global_complete = global_complete;
+ } else {
+ kfree(global_complete);
+ }
+
+ return fctx;
+}
+
+static void pvr_fence_context_destroy_kref(struct kref *kref)
+{
+ struct pvr_fence_context *fctx =
+ container_of(kref, struct pvr_fence_context, kref);
+ bool is_global_context = (fctx->global_complete != NULL);
+
+ PVR_FENCE_CTX_TRACE(fctx,
+ "scheduling destruction of fence context (%s)\n",
+ fctx->name);
+ trace_pvr_fence_context_destroy_kref(fctx);
+
+ schedule_delayed_work(&fctx->destroy_work, 0);
+
+ if (is_global_context) {
+ wait_for_completion(fctx->global_complete);
+ kfree(fctx->global_complete);
+ kfree(fctx);
+ }
+}
+
+/**
+ * pvr_fence_context_destroy - destroys a context
+ * @fctx: PVR fence context to destroy
+ *
+ * Destroys a PVR fence context with the expectation that all fences have been
+ * destroyed.
+ */
+void
+pvr_fence_context_destroy(struct pvr_fence_context *fctx)
+{
+ trace_pvr_fence_context_destroy(fctx);
+
+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+}
+
+static const char *
+pvr_fence_get_driver_name(struct dma_fence *fence)
+{
+ return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+ if (pvr_fence)
+ return pvr_fence->fctx->name;
+ return NULL;
+}
+
+static
+void pvr_fence_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+ if (pvr_fence) {
+ snprintf(str, size,
+ "%u: (%s%s) refs=%u fwaddr=%#08x enqueue=%u status=%-9s %s%s",
+ pvr_fence->fence->seqno,
+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &pvr_fence->fence->flags) ? "+" : "-",
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &pvr_fence->fence->flags) ? "+" : "-",
+ refcount_read(&pvr_fence->fence->refcount.refcount),
+ SyncCheckpointGetFirmwareAddr(
+ pvr_fence->sync_checkpoint),
+ SyncCheckpointGetEnqueuedCount(pvr_fence->sync_checkpoint),
+ SyncCheckpointGetStateString(pvr_fence->sync_checkpoint),
+ pvr_fence->name,
+ (&pvr_fence->base != pvr_fence->fence) ?
+ "(foreign)" : "");
+ }
+}
+
+static
+void pvr_fence_timeline_value_str(struct dma_fence *fence, char *str, int size)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+ if (pvr_fence)
+ pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_enable_signaling(struct dma_fence *fence)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+ unsigned long flags;
+
+ if (!pvr_fence)
+ return false;
+
+ WARN_ON_SMP(!spin_is_locked(&pvr_fence->fctx->lock));
+
+ if (pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ return false;
+
+ dma_fence_get(&pvr_fence->base);
+
+ spin_lock_irqsave(&pvr_fence->fctx->list_lock, flags);
+ list_add_tail(&pvr_fence->signal_head, &pvr_fence->fctx->signal_list);
+ spin_unlock_irqrestore(&pvr_fence->fctx->list_lock, flags);
+
+ PVR_FENCE_TRACE(&pvr_fence->base, "signalling enabled (%s)\n",
+ pvr_fence->name);
+ trace_pvr_fence_enable_signaling(pvr_fence);
+
+ return true;
+}
+
+static bool
+pvr_fence_is_signaled(struct dma_fence *fence)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+ if (pvr_fence)
+ return pvr_fence_sync_is_signaled(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC);
+ return false;
+}
+
+static void
+pvr_fence_release(struct dma_fence *fence)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+ unsigned long flags;
+
+ if (pvr_fence) {
+ struct pvr_fence_context *fctx = pvr_fence->fctx;
+ bool is_global_context = (fctx->global_complete != NULL);
+
+ PVR_FENCE_TRACE(&pvr_fence->base, "released fence (%s)\n",
+ pvr_fence->name);
+ trace_pvr_fence_release(pvr_fence);
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_move(&pvr_fence->fence_head,
+ &fctx->deferred_free_list);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+
+ if (is_global_context)
+ module_put(THIS_MODULE);
+ }
+}
+
+const struct dma_fence_ops pvr_fence_ops = {
+ .get_driver_name = pvr_fence_get_driver_name,
+ .get_timeline_name = pvr_fence_get_timeline_name,
+ .fence_value_str = pvr_fence_fence_value_str,
+ .timeline_value_str = pvr_fence_timeline_value_str,
+ .enable_signaling = pvr_fence_enable_signaling,
+ .signaled = pvr_fence_is_signaled,
+ .wait = dma_fence_default_wait,
+ .release = pvr_fence_release,
+};
+
+/**
+ * pvr_fence_create - creates a PVR fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+struct pvr_fence *
+pvr_fence_create(struct pvr_fence_context *fctx, int timeline_fd,
+ const char *name)
+{
+ struct pvr_fence *pvr_fence;
+ unsigned int seqno;
+ unsigned long flags;
+ PVRSRV_ERROR srv_err;
+
+ /*
+ * If the fence context is global, take a reference on the module
+ * to ensure the driver can't be unloaded while there are outstanding
+ * fences.
+ */
+ if (fctx->global_complete && !try_module_get(THIS_MODULE))
+ return NULL;
+
+ pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+ if (!pvr_fence)
+ goto err_put_module;
+
+ srv_err = SyncCheckpointAlloc(fctx->sync_checkpoint_context,
+ (PVRSRV_TIMELINE) timeline_fd, name, &pvr_fence->sync_checkpoint);
+
+ if (srv_err != PVRSRV_OK)
+ goto err_free_fence;
+
+ INIT_LIST_HEAD(&pvr_fence->fence_head);
+ INIT_LIST_HEAD(&pvr_fence->signal_head);
+ pvr_fence->fctx = fctx;
+ seqno = pvr_fence_context_seqno_next(fctx);
+ /* Add the seqno to the fence name for easier debugging */
+ snprintf(pvr_fence->name, sizeof(pvr_fence->name), "%d-%s",
+ seqno, name);
+ pvr_fence->fence = &pvr_fence->base;
+
+ dma_fence_init(&pvr_fence->base, &pvr_fence_ops, &fctx->lock,
+ fctx->fence_context, seqno);
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+ kref_get(&fctx->kref);
+
+ PVR_FENCE_TRACE(&pvr_fence->base, "created fence (%s)\n", name);
+ trace_pvr_fence_create(pvr_fence);
+
+ return pvr_fence;
+
+err_free_fence:
+ kfree(pvr_fence);
+err_put_module:
+ if (fctx->global_complete)
+ module_put(THIS_MODULE);
+ return NULL;
+}
+
+static const char *
+pvr_fence_foreign_get_driver_name(struct dma_fence *fence)
+{
+ return PVR_LDM_DRIVER_REGISTRATION_NAME;
+}
+
+static const char *
+pvr_fence_foreign_get_timeline_name(struct dma_fence *fence)
+{
+ return "foreign";
+}
+
+static
+void pvr_fence_foreign_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+ u32 sync_addr = 0;
+ u32 sync_value_next;
+
+ if (WARN_ON(!pvr_fence))
+ return;
+
+ sync_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint);
+ sync_value_next = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+ /*
+ * Include the fence flag bits from the foreign fence instead of our
+ * shadow copy. This is done as the shadow fence flag bits aren't used.
+ */
+ snprintf(str, size,
+ "%u: (%s%s) refs=%u fwaddr=%#08x cur=%#08x nxt=%#08x %s",
+ fence->seqno,
+ test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &pvr_fence->fence->flags) ? "+" : "-",
+ test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &pvr_fence->fence->flags) ? "+" : "-",
+ refcount_read(&fence->refcount.refcount),
+ sync_addr,
+ pvr_fence_sync_value(pvr_fence),
+ sync_value_next,
+ pvr_fence->name);
+}
+
+static
+void pvr_fence_foreign_timeline_value_str(struct dma_fence *fence, char *str, int size)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+
+ if (pvr_fence)
+ pvr_context_value_str(pvr_fence->fctx, str, size);
+}
+
+static bool
+pvr_fence_foreign_enable_signaling(struct dma_fence *fence)
+{
+ WARN_ON("cannot enable signalling on foreign fence");
+ return false;
+}
+
+static signed long
+pvr_fence_foreign_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+ WARN_ON("cannot wait on foreign fence");
+ return 0;
+}
+
+static void
+pvr_fence_foreign_release(struct dma_fence *fence)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+ unsigned long flags;
+
+ if (pvr_fence) {
+ struct pvr_fence_context *fctx = pvr_fence->fctx;
+ bool is_global_context = (fctx->global_complete != NULL);
+ struct dma_fence *foreign_fence = pvr_fence->fence;
+
+ PVR_FENCE_TRACE(&pvr_fence->base,
+ "released fence for foreign fence %llu#%d (%s)\n",
+ (u64) pvr_fence->fence->context,
+ pvr_fence->fence->seqno, pvr_fence->name);
+ trace_pvr_fence_foreign_release(pvr_fence);
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_move(&pvr_fence->fence_head,
+ &fctx->deferred_free_list);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+
+ dma_fence_put(foreign_fence);
+
+ kref_put(&fctx->kref,
+ pvr_fence_context_destroy_kref);
+
+ if (is_global_context)
+ module_put(THIS_MODULE);
+ }
+}
+
+const struct dma_fence_ops pvr_fence_foreign_ops = {
+ .get_driver_name = pvr_fence_foreign_get_driver_name,
+ .get_timeline_name = pvr_fence_foreign_get_timeline_name,
+ .fence_value_str = pvr_fence_foreign_fence_value_str,
+ .timeline_value_str = pvr_fence_foreign_timeline_value_str,
+ .enable_signaling = pvr_fence_foreign_enable_signaling,
+ .wait = pvr_fence_foreign_wait,
+ .release = pvr_fence_foreign_release,
+};
+
+static void
+pvr_fence_foreign_signal_sync(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct pvr_fence *pvr_fence = container_of(cb, struct pvr_fence, cb);
+ struct pvr_fence_context *fctx = pvr_fence->fctx;
+
+ WARN_ON_ONCE(is_pvr_fence(fence));
+
+ /* Callback registered by dma_fence_add_callback can be called from an atomic ctx */
+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_CTX_ATOMIC);
+
+ trace_pvr_fence_foreign_signal(pvr_fence);
+
+ queue_work(fctx->fence_wq, &fctx->check_status_work);
+
+ PVR_FENCE_TRACE(&pvr_fence->base,
+ "foreign fence %llu#%d signalled (%s)\n",
+ (u64) pvr_fence->fence->context,
+ pvr_fence->fence->seqno, pvr_fence->name);
+
+ /* Drop the reference on the base fence */
+ dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_create_from_fence - creates a PVR fence from a fence
+ * @fctx: PVR fence context on which the PVR fence should be created
+ * @fence: fence from which the PVR fence should be created
+ * @name: PVR fence name (used for debugging)
+ *
+ * Creates a PVR fence from an existing fence. If the fence is a foreign fence,
+ * i.e. one that doesn't originate from a PVR fence context, then a new PVR
+ * fence will be created. Otherwise, a reference will be taken on the underlying
+ * fence and the PVR fence will be returned.
+ *
+ * Once the fence is finished with, pvr_fence_destroy should be called.
+ *
+ * Returns NULL if a PVR fence cannot be created.
+ */
+
+struct pvr_fence *
+pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+ struct dma_fence *fence,
+ const char *name)
+{
+ struct pvr_fence *pvr_fence = to_pvr_fence(fence);
+ unsigned int seqno;
+ unsigned long flags;
+ PVRSRV_ERROR srv_err;
+ int err;
+
+ if (pvr_fence) {
+ if (WARN_ON(fence->ops == &pvr_fence_foreign_ops))
+ return NULL;
+ dma_fence_get(fence);
+
+ PVR_FENCE_TRACE(fence, "created fence from PVR fence (%s)\n",
+ name);
+ return pvr_fence;
+ }
+
+ /*
+ * If the fence context is global, take a reference on the module
+ * to ensure the driver can't be unloaded while there are outstanding
+ * fences.
+ */
+ if (fctx->global_complete && !try_module_get(THIS_MODULE))
+ return NULL;
+
+ pvr_fence = kzalloc(sizeof(*pvr_fence), GFP_KERNEL);
+ if (!pvr_fence)
+ goto err_put_module;
+
+ srv_err = SyncCheckpointAlloc(fctx->sync_checkpoint_context,
+ SYNC_CHECKPOINT_FOREIGN_CHECKPOINT,
+ name, &pvr_fence->sync_checkpoint);
+ if (srv_err != PVRSRV_OK)
+ goto err_free_pvr_fence;
+
+ INIT_LIST_HEAD(&pvr_fence->fence_head);
+ INIT_LIST_HEAD(&pvr_fence->signal_head);
+ pvr_fence->fctx = fctx;
+ pvr_fence->fence = dma_fence_get(fence);
+ seqno = pvr_fence_context_seqno_next(fctx);
+ /* Add the seqno to the fence name for easier debugging */
+ snprintf(pvr_fence->name, sizeof(pvr_fence->name), "%d-%s",
+ seqno, name);
+ /*
+ * We use the base fence to refcount the PVR fence and to do the
+ * necessary clean up once the refcount drops to 0.
+ */
+ dma_fence_init(&pvr_fence->base, &pvr_fence_foreign_ops, &fctx->lock,
+ fctx->fence_context, seqno);
+
+ /*
+ * Take an extra reference on the base fence that gets dropped when the
+ * foreign fence is signalled.
+ */
+ dma_fence_get(&pvr_fence->base);
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_add_tail(&pvr_fence->fence_head, &fctx->fence_list);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+ kref_get(&fctx->kref);
+
+ PVR_FENCE_TRACE(&pvr_fence->base,
+ "created fence from foreign fence %llu#%d (%s)\n",
+ (u64) pvr_fence->fence->context,
+ pvr_fence->fence->seqno, name);
+
+ err = dma_fence_add_callback(fence, &pvr_fence->cb,
+ pvr_fence_foreign_signal_sync);
+ if (err) {
+ if (err != -ENOENT)
+ goto err_put_ref;
+
+ /*
+ * The fence has already signalled so set the sync as signalled.
+ * The "signalled" hwperf packet should be emitted because the
+ * callback won't be called for already signalled fence hence,
+ * PVRSRV_FENCE_FLAG_NONE flag.
+ */
+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+ PVR_FENCE_TRACE(&pvr_fence->base,
+ "foreign fence %llu#%d already signaled (%s)\n",
+ (u64) pvr_fence->fence->context,
+ pvr_fence->fence->seqno,
+ name);
+ dma_fence_put(&pvr_fence->base);
+ }
+
+ trace_pvr_fence_foreign_create(pvr_fence);
+
+ return pvr_fence;
+
+err_put_ref:
+ kref_put(&fctx->kref, pvr_fence_context_destroy_kref);
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ list_del(&pvr_fence->fence_head);
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+ SyncCheckpointFree(pvr_fence->sync_checkpoint);
+err_free_pvr_fence:
+ kfree(pvr_fence);
+err_put_module:
+ if (fctx->global_complete)
+ module_put(THIS_MODULE);
+ return NULL;
+}
+
+/**
+ * pvr_fence_destroy - destroys a PVR fence
+ * @pvr_fence: PVR fence to destroy
+ *
+ * Destroys a PVR fence. Upon return, the PVR fence may still exist if something
+ * else still references the underlying fence, e.g. a reservation object, or if
+ * software signalling has been enabled and the fence hasn't yet been signalled.
+ */
+void
+pvr_fence_destroy(struct pvr_fence *pvr_fence)
+{
+ PVR_FENCE_TRACE(&pvr_fence->base, "destroyed fence (%s)\n",
+ pvr_fence->name);
+
+ dma_fence_put(&pvr_fence->base);
+}
+
+/**
+ * pvr_fence_sw_signal - signals a PVR fence sync
+ * @pvr_fence: PVR fence to signal
+ *
+ * Sets the PVR fence sync value to signalled.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_signal(struct pvr_fence *pvr_fence)
+{
+ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+ return -EINVAL;
+
+ pvr_fence_sync_signal(pvr_fence, PVRSRV_FENCE_FLAG_NONE);
+
+ queue_work(pvr_fence->fctx->fence_wq,
+ &pvr_fence->fctx->check_status_work);
+
+ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync signalled (%s)\n",
+ pvr_fence->name);
+
+ return 0;
+}
+
+/**
+ * pvr_fence_sw_error - errors the sync checkpoint backing a PVR fence
+ * @pvr_fence: PVR fence to error
+ *
+ * Sets the PVR fence sync checkpoint value to errored.
+ *
+ * Returns -EINVAL if the PVR fence represents a foreign fence.
+ */
+int
+pvr_fence_sw_error(struct pvr_fence *pvr_fence)
+{
+ if (!is_our_fence(pvr_fence->fctx, &pvr_fence->base))
+ return -EINVAL;
+
+ SyncCheckpointError(pvr_fence->sync_checkpoint, PVRSRV_FENCE_FLAG_NONE);
+ PVR_FENCE_TRACE(&pvr_fence->base, "sw set fence sync errored (%s)\n",
+ pvr_fence->name);
+
+ return 0;
+}
+
+int
+pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+ struct _SYNC_CHECKPOINT **fence_checkpoints)
+{
+ struct _SYNC_CHECKPOINT **next_fence_checkpoint = fence_checkpoints;
+ struct pvr_fence **next_pvr_fence = pvr_fences;
+ int fence_checkpoint_idx;
+
+ if (nr_fences > 0) {
+
+ for (fence_checkpoint_idx = 0; fence_checkpoint_idx < nr_fences;
+ fence_checkpoint_idx++) {
+ struct pvr_fence *next_fence = *next_pvr_fence++;
+ *next_fence_checkpoint++ = next_fence->sync_checkpoint;
+ /* Take reference on sync checkpoint (will be dropped
+ * later by kick code)
+ */
+ SyncCheckpointTakeRef(next_fence->sync_checkpoint);
+ }
+ }
+
+ return 0;
+}
+
+struct _SYNC_CHECKPOINT *
+pvr_fence_get_checkpoint(struct pvr_fence *update_fence)
+{
+ return update_fence->sync_checkpoint;
+}
+
+/**
+ * pvr_fence_dump_info_on_stalled_ufos - displays debug
+ * information on a native fence associated with any of
+ * the ufos provided. This function will be called from
+ * pvr_sync_file.c if the driver determines any GPU work
+ * is stuck waiting for a sync checkpoint representing a
+ * foreign sync to be signalled.
+ * @nr_ufos: number of ufos in vaddrs
+ * @vaddrs: array of FW addresses of UFOs which the
+ * driver is waiting on.
+ *
+ * Output debug information to kernel log on linux fences
+ * which would be responsible for signalling the sync
+ * checkpoints indicated by the ufo vaddresses.
+ *
+ * Returns the number of ufos in the array which were found
+ * to be associated with foreign syncs.
+ */
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+ u32 nr_ufos,
+ u32 *vaddrs)
+{
+ int our_ufo_ct = 0;
+ struct pvr_fence *pvr_fence;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fctx->list_lock, flags);
+ /* dump info on any ufos in our active list */
+ list_for_each_entry(pvr_fence, &fctx->fence_list, fence_head) {
+ u32 *this_ufo_vaddr = vaddrs;
+ int ufo_num;
+ DUMPDEBUG_PRINTF_FUNC *pfnDummy = NULL;
+
+ for (ufo_num=0; ufo_num < nr_ufos; ufo_num++, this_ufo_vaddr++) {
+ u32 fence_ufo_addr = SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint);
+
+ if (fence_ufo_addr == *this_ufo_vaddr) {
+ /* Dump sync info */
+ PVR_DUMPDEBUG_LOG(pfnDummy, NULL,
+ "\tSyncID = %d, FWAddr = 0x%08x: TLID = %d (Foreign Fence - [%p] %s)",
+ SyncCheckpointGetId(pvr_fence->sync_checkpoint),
+ SyncCheckpointGetFirmwareAddr(pvr_fence->sync_checkpoint),
+ SyncCheckpointGetTimeline(pvr_fence->sync_checkpoint),
+ pvr_fence->fence,//sync->foreign_sync_fence,
+ pvr_fence->name);//sync->foreign_sync_name);
+ our_ufo_ct++;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&fctx->list_lock, flags);
+ return our_ufo_ct;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_fence.h b/drivers/gpu/drm/img-rogue/1.10/pvr_fence.h
new file mode 100644
index 00000000000000..8743d99917387d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_fence.h
@@ -0,0 +1,234 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR Linux fence interface
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_FENCE_H__)
+#define __PVR_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+static inline void pvr_fence_cleanup(void)
+{
+}
+#else
+#include "pvr_linux_fence.h"
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+
+struct _SYNC_CHECKPOINT_CONTEXT;
+struct _SYNC_CHECKPOINT;
+
+/**
+ * pvr_fence_context - PVR fence context used to create and manage PVR fences
+ * @lock: protects the context and fences created on the context
+ * @name: fence context name (used for debugging)
+ * @dbg_request_handle: handle for callback used to dump debug data
+ * @sync_checkpoint_context: context used to create services sync checkpoints
+ * @fence_context: fence context with which to associate fences
+ * @fence_seqno: sequence number to use for the next fence
+ * @fence_wq: work queue for signalled fence work
+ * @check_status_work: work item used to inform services when a foreign fence
+ * has signalled
+ * @cmd_complete_handle: handle for callback used to signal fences when fence
+ * syncs are met
+ * @list_lock: protects the active and active foreign lists
+ * @signal_list: list of fences waiting to be signalled
+ * @fence_list: list of fences (used for debugging)
+ * @deferred_free_list: list of fences that we will free when we are no longer
+ * holding spinlocks. The frees get implemented when an update fence is
+ * signalled or the context is freed.
+ */
+struct pvr_fence_context {
+ spinlock_t lock;
+ char name[32];
+ void *dbg_request_handle;
+
+ struct _SYNC_CHECKPOINT_CONTEXT *sync_checkpoint_context;
+ u64 fence_context;
+ atomic_t fence_seqno;
+
+ struct workqueue_struct *fence_wq;
+ struct work_struct check_status_work;
+
+ void *cmd_complete_handle;
+
+ spinlock_t list_lock;
+ struct list_head signal_list;
+ struct list_head fence_list;
+ struct list_head deferred_free_list;
+
+ struct kref kref;
+ struct delayed_work destroy_work;
+ unsigned int destroy_retries_left;
+ unsigned int destroy_delay_ms;
+ struct completion *global_complete;
+};
+
+/**
+ * pvr_fence - PVR fence that represents both native and foreign fences
+ * @base: fence structure
+ * @fctx: fence context on which this fence was created
+ * @name: fence name (used for debugging)
+ * @fence: pointer to base fence structure or foreign fence
+ * @sync_checkpoint: services sync checkpoint used by hardware
+ * @fence_head: entry on the context fence and deferred free list
+ * @signal_head: entry on the context signal list
+ * @cb: foreign fence callback to set the sync to signalled
+ */
+struct pvr_fence {
+ struct dma_fence base;
+ struct pvr_fence_context *fctx;
+ char name[32];
+
+ struct dma_fence *fence;
+ struct _SYNC_CHECKPOINT *sync_checkpoint;
+
+ struct list_head fence_head;
+ struct list_head signal_head;
+ struct dma_fence_cb cb;
+};
+
+extern const struct dma_fence_ops pvr_fence_ops;
+extern const struct dma_fence_ops pvr_fence_foreign_ops;
+
+static inline bool is_our_fence(struct pvr_fence_context *fctx,
+ struct dma_fence *fence)
+{
+ return (fence->context == fctx->fence_context);
+}
+
+static inline bool is_pvr_fence(struct dma_fence *fence)
+{
+ return ((fence->ops == &pvr_fence_ops) ||
+ (fence->ops == &pvr_fence_foreign_ops));
+}
+
+static inline struct pvr_fence *to_pvr_fence(struct dma_fence *fence)
+{
+ if (is_pvr_fence(fence))
+ return container_of(fence, struct pvr_fence, base);
+
+ return NULL;
+}
+
+struct pvr_fence_context *pvr_fence_context_create(void *dev_cookie,
+ struct workqueue_struct *fence_status_wq,
+ const char *name);
+struct pvr_fence_context *pvr_global_fence_context_create(void *dev_cookie,
+ struct workqueue_struct *fence_status_wq,
+ const char *name);
+void pvr_fence_context_destroy(struct pvr_fence_context *fctx);
+void pvr_context_value_str(struct pvr_fence_context *fctx, char *str, int size);
+
+struct pvr_fence *pvr_fence_create(struct pvr_fence_context *fctx,
+ int timeline_fd, const char *name);
+struct pvr_fence *pvr_fence_create_from_fence(struct pvr_fence_context *fctx,
+ struct dma_fence *fence,
+ const char *name);
+void pvr_fence_destroy(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_signal(struct pvr_fence *pvr_fence);
+int pvr_fence_sw_error(struct pvr_fence *pvr_fence);
+
+int pvr_fence_get_checkpoints(struct pvr_fence **pvr_fences, u32 nr_fences,
+ struct _SYNC_CHECKPOINT **fence_checkpoints);
+struct _SYNC_CHECKPOINT *pvr_fence_get_checkpoint(struct pvr_fence *update_fence);
+
+void pvr_fence_context_signal_fences_nohw(void *data);
+
+u32 pvr_fence_dump_info_on_stalled_ufos(struct pvr_fence_context *fctx,
+ u32 nr_ufos,
+ u32 *vaddrs);
+
+static inline void pvr_fence_cleanup(void)
+{
+ /*
+ * Ensure any outstanding work needed to destroy PVR fence
+ * contexts has completed, by flushing the global workqueue.
+ */
+ flush_scheduled_work();
+}
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...) \
+ do { \
+ struct pvr_fence_context *__fctx = (c); \
+ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define PVR_FENCE_CTX_TRACE(c, fmt, ...)
+#endif
+
+#define PVR_FENCE_CTX_WARN(c, fmt, ...) \
+ do { \
+ struct pvr_fence_context *__fctx = (c); \
+ pr_warn("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+ ## __VA_ARGS__); \
+ } while (0)
+
+#define PVR_FENCE_CTX_ERR(c, fmt, ...) \
+ do { \
+ struct pvr_fence_context *__fctx = (c); \
+ pr_err("c %llu: (PVR) " fmt, (u64) __fctx->fence_context, \
+ ## __VA_ARGS__); \
+ } while (0)
+
+#if defined(PVR_FENCE_DEBUG)
+#define PVR_FENCE_TRACE(f, fmt, ...) \
+ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+#else
+#define PVR_FENCE_TRACE(f, fmt, ...)
+#endif
+
+#define PVR_FENCE_WARN(f, fmt, ...) \
+ DMA_FENCE_WARN(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#define PVR_FENCE_ERR(f, fmt, ...) \
+ DMA_FENCE_ERR(f, "(PVR) " fmt, ## __VA_ARGS__)
+
+#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)) */
+#endif /* !defined(__PVR_FENCE_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_fence_trace.h b/drivers/gpu/drm/img-rogue/1.10/pvr_fence_trace.h
new file mode 100644
index 00000000000000..2ded06861e8c5a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_fence_trace.h
@@ -0,0 +1,224 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pvr_fence
+
+#if !defined(_TRACE_PVR_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PVR_FENCE_H
+
+#include <linux/tracepoint.h>
+
+struct pvr_fence;
+struct pvr_fence_context;
+
+DECLARE_EVENT_CLASS(pvr_fence_context,
+
+ TP_PROTO(struct pvr_fence_context *fctx),
+
+ TP_ARGS(fctx),
+
+ TP_STRUCT__entry(
+ __string(name, fctx->name)
+ __array(char, val, 128)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, fctx->name)
+ pvr_context_value_str(fctx, __entry->val, sizeof(__entry->val));
+ ),
+
+ TP_printk("name=%s val=%s",
+ __get_str(name),
+ __entry->val
+ )
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_create,
+
+ TP_PROTO(struct pvr_fence_context *fctx),
+
+ TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy,
+
+ TP_PROTO(struct pvr_fence_context *fctx),
+
+ TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_destroy_kref,
+
+ TP_PROTO(struct pvr_fence_context *fctx),
+
+ TP_ARGS(fctx)
+);
+
+DEFINE_EVENT(pvr_fence_context, pvr_fence_context_signal_fences,
+
+ TP_PROTO(struct pvr_fence_context *fctx),
+
+ TP_ARGS(fctx)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
+ __string(driver, fence->base.ops->get_driver_name(&fence->base))
+ __string(timeline, fence->base.ops->get_timeline_name(&fence->base))
+ __array(char, val, 128)
+ __field(u64, context)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver, fence->base.ops->get_driver_name(&fence->base))
+ __assign_str(timeline, fence->base.ops->get_timeline_name(&fence->base))
+ fence->base.ops->fence_value_str(&fence->base, __entry->val, sizeof(__entry->val));
+ __entry->context = fence->base.context;
+ ),
+
+ TP_printk("driver=%s timeline=%s ctx=%llu val=%s",
+ __get_str(driver), __get_str(timeline), __entry->context, __entry->val
+ )
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_create,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_release,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_enable_signaling,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence, pvr_fence_signal_fence,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DECLARE_EVENT_CLASS(pvr_fence_foreign,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
+ __string(driver, fence->base.ops->get_driver_name(&fence->base))
+ __string(timeline, fence->base.ops->get_timeline_name(&fence->base))
+ __array(char, val, 128)
+ __field(u64, context)
+ __string(foreign_driver, fence->fence->ops->get_driver_name ? fence->fence->ops->get_driver_name(fence->fence) : "unknown")
+ __string(foreign_timeline, fence->fence->ops->get_timeline_name ? fence->fence->ops->get_timeline_name(fence->fence) : "unknown")
+ __array(char, foreign_val, 128)
+ __field(u64, foreign_context)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver, fence->base.ops->get_driver_name(&fence->base))
+ __assign_str(timeline, fence->base.ops->get_timeline_name(&fence->base))
+ fence->base.ops->fence_value_str(&fence->base, __entry->val, sizeof(__entry->val));
+ __entry->context = fence->base.context;
+ __assign_str(foreign_driver, fence->fence->ops->get_driver_name ? fence->fence->ops->get_driver_name(fence->fence) : "unknown")
+ __assign_str(foreign_timeline, fence->fence->ops->get_timeline_name ? fence->fence->ops->get_timeline_name(fence->fence) : "unknown")
+ fence->fence->ops->fence_value_str ? fence->fence->ops->fence_value_str(fence->fence, __entry->foreign_val, sizeof(__entry->foreign_val)) :
+ strlcpy(__entry->foreign_val, "unknown", sizeof(__entry->foreign_val));
+ __entry->foreign_context = fence->fence->context;
+ ),
+
+ TP_printk("driver=%s timeline=%s ctx=%llu val=%s foreign: driver=%s timeline=%s ctx=%llu val=%s",
+ __get_str(driver), __get_str(timeline), __entry->context, __entry->val,
+ __get_str(foreign_driver), __get_str(foreign_timeline), __entry->foreign_context, __entry->foreign_val
+ )
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_create,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_release,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+DEFINE_EVENT(pvr_fence_foreign, pvr_fence_foreign_signal,
+
+ TP_PROTO(struct pvr_fence *fence),
+
+ TP_ARGS(fence)
+);
+
+#endif /* _TRACE_PVR_FENCE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE pvr_fence_trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.c b/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.c
new file mode 100644
index 00000000000000..aaf58b1e4b027c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.c
@@ -0,0 +1,264 @@
+/*************************************************************************/ /*!
+@File pvr_gputrace.c
+@Title PVR GPU Trace module Linux implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(CONFIG_GPU_TRACEPOINTS)
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+#undef CREATE_TRACE_POINTS
+#else
+#include <trace/events/gpu.h>
+#endif
+
+#include "pvrsrv_error.h"
+#include "pvrsrv_apphint.h"
+#include "pvr_debug.h"
+#include "pvr_gputrace.h"
+#include "rgxhwperf.h"
+#include "device.h"
+#include "trace_events.h"
+#define CREATE_TRACE_POINTS
+#include "rogue_trace_events.h"
+
+/******************************************************************************
+ Module internal implementation
+******************************************************************************/
+
+/* This lock ensures state change of GPU_TRACING on/off is done atomically */
+static POS_LOCK ghGPUTraceStateLock;
+static IMG_BOOL gbFTraceGPUEventsEnabled = PVRSRV_APPHINT_ENABLEFTRACEGPU;
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+void PVRGpuTraceClientWork(
+ PVRSRV_DEVICE_NODE *psDevNode,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszKickType)
+{
+ PVR_ASSERT(pszKickType);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRGpuTraceClientKick(%s): contextId %u, "
+ "jobId %u", pszKickType, ui32CtxId, ui32JobId));
+
+ if (PVRGpuTraceEnabled())
+ {
+ trace_gpu_job_enqueue(ui32CtxId, ui32JobId, pszKickType);
+ }
+}
+
+void PVRGpuTraceWorkSwitch(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32CtxPriority,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ PVR_ASSERT(pszWorkType);
+
+ /* Invert the priority cause this is what systrace expects. Lower values
+ * convey a higher priority to systrace. */
+ trace_gpu_sched_switch(pszWorkType, ui64HWTimestampInOSTime,
+ eSwType == PVR_GPUTRACE_SWITCH_TYPE_END ? 0 : ui32CtxId,
+ 2-ui32CtxPriority, ui32JobId);
+}
+
+void PVRGpuTraceUfo(
+ IMG_UINT64 ui64OSTimestamp,
+ const RGX_HWPERF_UFO_EV eEvType,
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ switch (eEvType) {
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ trace_rogue_ufo_updates(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ trace_rogue_ufo_checks_success(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_FALSE, ui32UFOCount, puData);
+ break;
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ trace_rogue_ufo_checks_fail(ui64OSTimestamp, ui32CtxId,
+ ui32JobId, IMG_TRUE, ui32UFOCount, puData);
+ break;
+ default:
+ break;
+ }
+}
+
+void PVRGpuTraceFirmware(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ trace_rogue_firmware_activity(ui64HWTimestampInOSTime, pszWorkType, eSwType);
+}
+
+void PVRGpuTraceEventsLost(
+ const RGX_HWPERF_STREAM_ID eStreamId,
+ const IMG_UINT32 ui32LastOrdinal,
+ const IMG_UINT32 ui32CurrOrdinal)
+{
+ trace_rogue_events_lost(eStreamId, ui32LastOrdinal, ui32CurrOrdinal);
+}
+
+PVRSRV_ERROR PVRGpuTraceSupportInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXHWPerfFTraceGPUInitSupport();
+ PVR_LOGR_IF_ERROR (eError, "RGXHWPerfFTraceGPUSupportInit");
+
+ eError = OSLockCreate(&ghGPUTraceStateLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGR_IF_ERROR (eError, "OSLockCreate");
+
+ return PVRSRV_OK;
+}
+
+void PVRGpuTraceSupportDeInit(void)
+{
+ if (ghGPUTraceStateLock)
+ {
+ OSLockDestroy(ghGPUTraceStateLock);
+ }
+
+ RGXHWPerfFTraceGPUDeInitSupport();
+}
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = RGXHWPerfFTraceGPUInitDevice(psDeviceNode);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfFTraceGPUInitDevice", e0);
+
+ return PVRSRV_OK;
+
+e0:
+ RGXHWPerfFTraceGPUDeInitDevice(psDeviceNode);
+ return eError;
+}
+
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ RGXHWPerfFTraceGPUDeInitDevice(psDeviceNode);
+}
+
+IMG_BOOL PVRGpuTraceEnabled(void)
+{
+ return gbFTraceGPUEventsEnabled;
+}
+
+static PVRSRV_ERROR _IsGpuTraceEnabled(const PVRSRV_DEVICE_NODE *device,
+ const void *private_data,
+ IMG_BOOL *value)
+{
+ PVR_UNREFERENCED_PARAMETER(device);
+ PVR_UNREFERENCED_PARAMETER(private_data);
+
+ *value = gbFTraceGPUEventsEnabled;
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SetGpuTraceEnabled(const PVRSRV_DEVICE_NODE *device,
+ const void *private_data,
+ IMG_BOOL value)
+{
+ PVR_UNREFERENCED_PARAMETER(device);
+
+ /* Lock down the state to avoid concurrent writes */
+ OSLockAcquire(ghGPUTraceStateLock);
+
+ if (value != gbFTraceGPUEventsEnabled)
+ {
+ PVRSRV_ERROR eError;
+ if ((eError = PVRGpuTraceEnabledSet(value)) == PVRSRV_OK)
+ {
+ PVR_TRACE(("%s GPU FTrace", value ? "ENABLED" : "DISABLED"));
+ gbFTraceGPUEventsEnabled = value;
+ }
+ else
+ {
+ PVR_TRACE(("FAILED to %s GPU FTrace", value ? "enable" : "disable"));
+ /* On failure, partial enable/disable might have resulted.
+ * Try best to restore to previous state. Ignore error */
+ PVRGpuTraceEnabledSet(gbFTraceGPUEventsEnabled);
+
+ OSLockRelease(ghGPUTraceStateLock);
+ return eError;
+ }
+ }
+ else
+ {
+ PVR_TRACE(("GPU FTrace already %s!", value ? "enabled" : "disabled"));
+ }
+
+ OSLockRelease(ghGPUTraceStateLock);
+
+ return PVRSRV_OK;
+}
+
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFTraceGPU,
+ _IsGpuTraceEnabled, _SetGpuTraceEnabled,
+ psDeviceNode, NULL);
+}
+
+/******************************************************************************
+ End of file (pvr_gputrace.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.h b/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.h
new file mode 100644
index 00000000000000..3358a8d0f37d5a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_gputrace.h
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File pvr_gputrace.h
+@Title PVR GPU Trace module common environment interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVR_GPUTRACE_H_
+#define PVR_GPUTRACE_H_
+
+#include "img_types.h"
+#include "rgx_hwperf.h"
+#include "device.h"
+
+
+/******************************************************************************
+ Module out-bound API
+******************************************************************************/
+
+/*
+ The device layer of the KM driver defines these two APIs to allow a
+ platform module to set and retrieve the feature's on/off state.
+*/
+extern PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue);
+extern PVRSRV_ERROR PVRGpuTraceEnabledSetNoBridgeLock(
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bNewValue);
+
+/******************************************************************************
+ Module In-bound API
+******************************************************************************/
+
+typedef enum {
+ PVR_GPUTRACE_SWITCH_TYPE_UNDEF = 0,
+
+ PVR_GPUTRACE_SWITCH_TYPE_BEGIN = 1,
+ PVR_GPUTRACE_SWITCH_TYPE_END = 2
+
+} PVR_GPUTRACE_SWITCH_TYPE;
+
+void PVRGpuTraceClientWork(
+ PVRSRV_DEVICE_NODE *psDevNode,
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32IntJobRef,
+ const IMG_CHAR* pszKickType);
+
+
+void PVRGpuTraceWorkSwitch(
+ IMG_UINT64 ui64OSTimestamp,
+ const IMG_UINT32 ui32ContextId,
+ const IMG_UINT32 ui32CtxPriority,
+ const IMG_UINT32 ui32JobId,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceUfo(
+ IMG_UINT64 ui64OSTimestamp,
+ const RGX_HWPERF_UFO_EV eEvType,
+ const IMG_UINT32 ui32ExtJobRef,
+ const IMG_UINT32 ui32CtxId,
+ const IMG_UINT32 ui32JobId,
+ const IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void PVRGpuTraceFirmware(
+ IMG_UINT64 ui64HWTimestampInOSTime,
+ const IMG_CHAR* pszWorkType,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType);
+
+void PVRGpuTraceEventsLost(
+ const RGX_HWPERF_STREAM_ID eStreamId,
+ const IMG_UINT32 ui32LastOrdinal,
+ const IMG_UINT32 ui32CurrOrdinal);
+
+/* Early initialisation of GPU Ftrace events logic.
+ * This function initialises some necessary structures. */
+PVRSRV_ERROR PVRGpuTraceSupportInit(void);
+void PVRGpuTraceSupportDeInit(void);
+
+PVRSRV_ERROR PVRGpuTraceInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+void PVRGpuTraceDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_BOOL PVRGpuTraceEnabled(void);
+
+void PVRGpuTraceInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/* FTrace events callbacks interface */
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+
+#endif /* PVR_GPUTRACE_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_intrinsics.h b/drivers/gpu/drm/img-rogue/1.10/pvr_intrinsics.h
new file mode 100644
index 00000000000000..ee7de678ae85e3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_intrinsics.h
@@ -0,0 +1,70 @@
+/*************************************************************************/ /*!
+@File
+@Title Intrinsics definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_INTRINSICS_H_
+#define _PVR_INTRINSICS_H_
+
+/* PVR_CTZLL:
+ * Count the number of trailing zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__)
+
+ #define PVR_CTZLL __builtin_ctzll
+#endif
+#endif
+
+/* PVR_CLZLL:
+ * Count the number of leading zeroes in a long long integer
+ */
+
+#if defined(__GNUC__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__aarch64__) || \
+ defined(__arm__) || defined(__mips)
+
+#define PVR_CLZLL __builtin_clzll
+
+#endif
+#endif
+
+#endif /* _PVR_INTRINSICS_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_linux_fence.h b/drivers/gpu/drm/img-rogue/1.10/pvr_linux_fence.h
new file mode 100644
index 00000000000000..64ca84dc1e6516
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_linux_fence.h
@@ -0,0 +1,105 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR Linux fence compatibility header
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_LINUX_FENCE_H__)
+#define __PVR_LINUX_FENCE_H__
+
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+#include <linux/fence.h>
+#else
+#include <linux/dma-fence.h>
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)) && \
+ (!defined(CHROMIUMOS_KERNEL) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)))
+/* Structures */
+#define dma_fence fence
+#define dma_fence_array fence_array
+#define dma_fence_cb fence_cb
+#define dma_fence_ops fence_ops
+
+/* Defines and Enums */
+#define DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT FENCE_FLAG_ENABLE_SIGNAL_BIT
+#define DMA_FENCE_FLAG_SIGNALED_BIT FENCE_FLAG_SIGNALED_BIT
+#define DMA_FENCE_FLAG_USER_BITS FENCE_FLAG_USER_BITS
+
+#define DMA_FENCE_ERR FENCE_ERR
+#define DMA_FENCE_TRACE FENCE_TRACE
+#define DMA_FENCE_WARN FENCE_WARN
+
+/* Functions */
+#define dma_fence_add_callback fence_add_callback
+#define dma_fence_context_alloc fence_context_alloc
+#define dma_fence_default_wait fence_default_wait
+#define dma_fence_is_signaled fence_is_signaled
+#define dma_fence_enable_sw_signaling fence_enable_sw_signaling
+#define dma_fence_free fence_free
+#define dma_fence_get fence_get
+#define dma_fence_get_rcu fence_get_rcu
+#define dma_fence_init fence_init
+#define dma_fence_is_array fence_is_array
+#define dma_fence_put fence_put
+#define dma_fence_signal fence_signal
+#define dma_fence_wait fence_wait
+#define to_dma_fence_array to_fence_array
+
+static inline signed long
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
+{
+ signed long lret;
+
+ lret = fence_wait_timeout(fence, intr, timeout);
+ if (!lret && !timeout)
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) ? 1 : 0;
+
+ return lret;
+}
+
+#endif
+
+#endif /* !defined(__PVR_LINUX_FENCE_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.c b/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.c
new file mode 100644
index 00000000000000..9dffc3071b173a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.c
@@ -0,0 +1,511 @@
+/*************************************************************************/ /*!
+@File
+@Title PowerVR notifier interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "allocmem.h"
+#include "dllist.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrversion.h"
+#include "connection_server.h"
+
+#include "osfunc.h"
+#include "sofunc_pvr.h"
+
+/*************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct PVRSRV_CMDCOMP_NOTIFY_TAG
+{
+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle;
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify;
+ DLLIST_NODE sListNode;
+} PVRSRV_CMDCOMP_NOTIFY;
+
+/* Head of the list of callbacks called when command complete happens */
+static DLLIST_NODE g_sCmdCompNotifyHead;
+static POSWR_LOCK g_hCmdCompNotifyLock;
+
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSWRLockCreate(&g_hCmdCompNotifyLock);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ dllist_init(&g_sCmdCompNotifyHead);
+
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVCmdCompleteDeinit(void)
+{
+ /* Check that all notify function have been unregistered */
+ if (!dllist_is_empty(&g_sCmdCompNotifyHead))
+ {
+ PDLLIST_NODE psNode;
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Command complete notify list is not empty!", __func__));
+
+ /* Clean up any stragglers */
+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+ while (psNode)
+ {
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ dllist_remove_node(psNode);
+
+ psNotify = IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+ OSFreeMem(psNotify);
+
+ psNode = dllist_get_next_node(&g_sCmdCompNotifyHead);
+ }
+ }
+
+ if (g_hCmdCompNotifyLock)
+ {
+ OSWRLockDestroy(g_hCmdCompNotifyLock);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+ PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ if (!phNotify || !pfnCmdCompleteNotify || !hCmdCompHandle)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+ __func__, phNotify, pfnCmdCompleteNotify, hCmdCompHandle));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psNotify = OSAllocMem(sizeof(*psNotify));
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not enough memory to allocate CmdCompleteNotify function",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Set-up the notify data */
+ psNotify->hCmdCompHandle = hCmdCompHandle;
+ psNotify->pfnCmdCompleteNotify = pfnCmdCompleteNotify;
+
+ /* Add it to the list of Notify functions */
+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+ dllist_add_to_tail(&g_sCmdCompNotifyHead, &psNotify->sListNode);
+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+ *phNotify = psNotify;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify)
+{
+ PVRSRV_CMDCOMP_NOTIFY *psNotify;
+
+ psNotify = (PVRSRV_CMDCOMP_NOTIFY *) hNotify;
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR," %s: Bad arguments (%p)", __func__, hNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ OSWRLockAcquireWrite(g_hCmdCompNotifyLock);
+ dllist_remove_node(&psNotify->sListNode);
+ OSWRLockReleaseWrite(g_hCmdCompNotifyLock);
+
+ OSFreeMem(psNotify);
+
+ return PVRSRV_OK;
+}
+
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#if !defined(NO_HARDWARE)
+ DLLIST_NODE *psNode, *psNext;
+#endif
+
+ /* Call notify callbacks to check if blocked work items can now proceed */
+#if !defined(NO_HARDWARE)
+ OSWRLockAcquireRead(g_hCmdCompNotifyLock);
+ dllist_foreach_node(&g_sCmdCompNotifyHead, psNode, psNext)
+ {
+ PVRSRV_CMDCOMP_NOTIFY *psNotify =
+ IMG_CONTAINER_OF(psNode, PVRSRV_CMDCOMP_NOTIFY, sListNode);
+
+ if (hCmdCompCallerHandle != psNotify->hCmdCompHandle)
+ {
+ psNotify->pfnCmdCompleteNotify(psNotify->hCmdCompHandle);
+ }
+ }
+ OSWRLockReleaseRead(g_hCmdCompNotifyLock);
+#endif
+
+ if (psPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectSignal(psPVRSRVData->hGlobalEventObject);
+ }
+}
+
+/*************************************************************************/ /*!
+Debug Notifier Interface
+*/ /**************************************************************************/
+
+typedef struct DEBUG_REQUEST_ENTRY_TAG
+{
+ IMG_UINT32 ui32RequesterID;
+ DLLIST_NODE sListHead;
+} DEBUG_REQUEST_ENTRY;
+
+typedef struct DEBUG_REQUEST_TABLE_TAG
+{
+ POSWR_LOCK hLock;
+ IMG_UINT32 ui32RequestCount;
+ DEBUG_REQUEST_ENTRY asEntry[1];
+} DEBUG_REQUEST_TABLE;
+
+typedef struct DEBUG_REQUEST_NOTIFY_TAG
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle;
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify;
+ IMG_UINT32 ui32RequesterID;
+ DLLIST_NODE sListNode;
+} DEBUG_REQUEST_NOTIFY;
+
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 *paui32Table,
+ IMG_UINT32 ui32Length)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if (psDevNode->hDebugTable)
+ {
+ return PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED;
+ }
+
+ psDebugTable = OSAllocMem(sizeof(DEBUG_REQUEST_TABLE) +
+ (sizeof(DEBUG_REQUEST_ENTRY) * (ui32Length-1)));
+ if (!psDebugTable)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ eError = OSWRLockCreate(&psDebugTable->hLock);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorFreeDebugTable;
+ }
+
+ psDebugTable->ui32RequestCount = ui32Length;
+
+ /* Init the list heads */
+ for (i = 0; i < ui32Length; i++)
+ {
+ psDebugTable->asEntry[i].ui32RequesterID = paui32Table[i];
+ dllist_init(&psDebugTable->asEntry[i].sListHead);
+ }
+
+ psDevNode->hDebugTable = (IMG_HANDLE *) psDebugTable;
+
+ return PVRSRV_OK;
+
+ErrorFreeDebugTable:
+ OSFreeMem(psDebugTable);
+ psDebugTable = NULL;
+
+ return eError;
+}
+
+void
+PVRSRVUnregisterDbgTable(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ IMG_UINT32 i;
+
+ PVR_ASSERT(psDevNode->hDebugTable);
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+ psDevNode->hDebugTable = NULL;
+
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ if (!dllist_is_empty(&psDebugTable->asEntry[i].sListHead))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Found registered callback(s) on %d",
+ __func__, i));
+ }
+ }
+
+ OSWRLockDestroy(psDebugTable->hLock);
+ psDebugTable->hLock = NULL;
+
+ OSFreeMem(psDebugTable);
+}
+
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+ DEBUG_REQUEST_TABLE *psDebugTable;
+ DEBUG_REQUEST_NOTIFY *psNotify;
+ PDLLIST_NODE psHead = NULL;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+ if (!phNotify || !psDevNode || !pfnDbgRequestNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p, %p, %p)",
+ __func__, phNotify, psDevNode, pfnDbgRequestNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+
+ PVR_ASSERT(psDebugTable);
+
+ /* NoStats used since this may be called outside of the register/de-register
+ * process calls which track memory use. */
+ psNotify = OSAllocMemNoStats(sizeof(*psNotify));
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Not enough memory to allocate DbgRequestNotify structure",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Set-up the notify data */
+ psNotify->psDevNode = psDevNode;
+ psNotify->hDbgRequestHandle = hDbgRequestHandle;
+ psNotify->pfnDbgRequestNotify = pfnDbgRequestNotify;
+ psNotify->ui32RequesterID = ui32RequesterID;
+
+ /* Lock down all the lists */
+ OSWRLockAcquireWrite(psDebugTable->hLock);
+
+ /* Find which list to add it to */
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ if (psDebugTable->asEntry[i].ui32RequesterID == ui32RequesterID)
+ {
+ psHead = &psDebugTable->asEntry[i].sListHead;
+ }
+ }
+
+ if (!psHead)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to find debug requester", __func__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto ErrorReleaseLock;
+ }
+
+ /* Add it to the list of Notify functions */
+ dllist_add_to_tail(psHead, &psNotify->sListNode);
+
+ /* Unlock the lists */
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+
+ *phNotify = psNotify;
+
+ return PVRSRV_OK;
+
+ErrorReleaseLock:
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+ OSFreeMem(psNotify);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle)
+{
+ return PVRSRVRegisterDbgRequestNotify(phNotify,
+ psDevNode,
+ pfnDbgRequestNotify,
+ ui32RequesterID,
+ hDbgRequestHandle);
+}
+
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify)
+{
+ DEBUG_REQUEST_NOTIFY *psNotify = (DEBUG_REQUEST_NOTIFY *) hNotify;
+ DEBUG_REQUEST_TABLE *psDebugTable;
+
+ if (!psNotify)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad arguments (%p)", __func__, hNotify));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDebugTable = (DEBUG_REQUEST_TABLE *) psNotify->psDevNode->hDebugTable;
+
+ OSWRLockAcquireWrite(psDebugTable->hLock);
+ dllist_remove_node(&psNotify->sListNode);
+ OSWRLockReleaseWrite(psDebugTable->hLock);
+
+ OSFreeMemNoStats(psNotify);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify)
+{
+ return PVRSRVUnregisterDbgRequestNotify(hNotify);
+}
+
+void
+PVRSRVDebugRequest(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ DEBUG_REQUEST_TABLE *psDebugTable =
+ (DEBUG_REQUEST_TABLE *) psDevNode->hDebugTable;
+ static const IMG_CHAR *apszVerbosityTable[] = { "Low", "Medium", "High" };
+ const IMG_CHAR *szVerbosityLevel;
+ IMG_UINT32 i;
+ IMG_UINT32 j;
+
+ static_assert(ARRAY_SIZE(apszVerbosityTable) == DEBUG_REQUEST_VERBOSITY_MAX+1,
+ "Incorrect number of verbosity levels");
+
+ PVR_ASSERT(psDebugTable);
+
+ OSWRLockAcquireRead(psDebugTable->hLock);
+
+ if (ui32VerbLevel < ARRAY_SIZE(apszVerbosityTable))
+ {
+ szVerbosityLevel = apszVerbosityTable[ui32VerbLevel];
+ }
+ else
+ {
+ szVerbosityLevel = "unknown";
+ PVR_ASSERT(!"Invalid verbosity level received");
+ }
+
+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: START (%s) ]------------",
+ szVerbosityLevel);
+
+ OSDumpVersionInfo(pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ PVR_DUMPDEBUG_LOG("DDK info: %s (%s) %s",
+ PVRVERSION_STRING, PVR_BUILD_TYPE, PVR_BUILD_DIR);
+ PVR_DUMPDEBUG_LOG("Time now: %015" IMG_UINT64_FMTSPECx, OSClockus64());
+
+ switch (psPVRSRVData->eServicesState)
+ {
+ case PVRSRV_SERVICES_STATE_OK:
+ PVR_DUMPDEBUG_LOG("Services State: OK");
+ break;
+ case PVRSRV_SERVICES_STATE_BAD:
+ PVR_DUMPDEBUG_LOG("Services State: BAD");
+ break;
+ case PVRSRV_SERVICES_STATE_UNDEFINED:
+ PVR_DUMPDEBUG_LOG("Services State: UNDEFINED");
+ break;
+ default:
+ PVR_DUMPDEBUG_LOG("Services State: UNKNOWN (%d)",
+ psPVRSRVData->eServicesState);
+ break;
+ }
+
+ PVRSRVConnectionDebugNotify(pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ /* For each verbosity level */
+ for (j = 0; j <= ui32VerbLevel; j++)
+ {
+ /* For each requester */
+ for (i = 0; i < psDebugTable->ui32RequestCount; i++)
+ {
+ DLLIST_NODE *psNode;
+ DLLIST_NODE *psNext;
+
+ dllist_foreach_node(&psDebugTable->asEntry[i].sListHead, psNode, psNext)
+ {
+ DEBUG_REQUEST_NOTIFY *psNotify =
+ IMG_CONTAINER_OF(psNode, DEBUG_REQUEST_NOTIFY, sListNode);
+ psNotify->pfnDbgRequestNotify(psNotify->hDbgRequestHandle, j,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+ }
+
+ PVR_DUMPDEBUG_LOG("------------[ PVR DBG: END ]------------");
+ OSWRLockReleaseRead(psDebugTable->hLock);
+
+ if (!pfnDumpDebugPrintf)
+ {
+ /* Only notify OS of an issue if the debug dump has gone there */
+ OSWarnOn(IMG_TRUE);
+ }
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.h b/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.h
new file mode 100644
index 00000000000000..93bb9e7e25a6ef
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_notifier.h
@@ -0,0 +1,248 @@
+/**************************************************************************/ /*!
+@File
+@Title PowerVR notifier interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVR_NOTIFIER_H__)
+#define __PVR_NOTIFIER_H__
+
+#include "img_types.h"
+#include "pvr_debug.h"
+
+
+/**************************************************************************/ /*!
+Command Complete Notifier Interface
+*/ /***************************************************************************/
+
+typedef IMG_HANDLE PVRSRV_CMDCOMP_HANDLE;
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+
+/**************************************************************************/ /*!
+@Function PVRSRVCmdCompleteInit
+@Description Performs initialisation of the command complete notifier
+ interface.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVCmdCompleteInit(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVCmdCompleteDeinit
+@Description Performs cleanup for the command complete notifier interface.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+void
+PVRSRVCmdCompleteDeinit(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterCmdCompleteNotify
+@Description Register a callback function that is called when some device
+ finishes some work, which is signalled via a call to
+ PVRSRVCheckStatus.
+@Output phNotify On success, points to command complete
+ notifier handle
+@Input pfnCmdCompleteNotify Function callback
+@Input hPrivData Data to be passed back to the caller via
+ the callback function
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterCmdCompleteNotify(IMG_HANDLE *phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify,
+ PVRSRV_CMDCOMP_HANDLE hPrivData);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterCmdCompleteNotify
+@Description Unregister a previously registered callback function.
+@Input hNotify Command complete notifier handle
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterCmdCompleteNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function PVRSRVCheckStatus
+@Description Notify any registered command complete handlers that some work
+ has been finished (unless hCmdCompCallerHandle matches a
+ handler's hPrivData). Also signal the global event object.
+@Input hCmdCompCallerHandle Used to prevent a handler from being
+ notified. A NULL value results in all
+ handlers being notified.
+*/ /***************************************************************************/
+void
+PVRSRVCheckStatus(PVRSRV_CMDCOMP_HANDLE hCmdCompCallerHandle);
+
+
+/**************************************************************************/ /*!
+Debug Notifier Interface
+*/ /***************************************************************************/
+
+#define DEBUG_REQUEST_DC 0
+#define DEBUG_REQUEST_SERVERSYNC 1
+#define DEBUG_REQUEST_SYS 2
+#define DEBUG_REQUEST_ANDROIDSYNC 3
+#define DEBUG_REQUEST_LINUXFENCE 4
+#define DEBUG_REQUEST_SYNCCHECKPOINT 5
+#define DEBUG_REQUEST_HTB 6
+#define DEBUG_REQUEST_APPHINT 7
+#define DEBUG_REQUEST_FALLBACKSYNC 8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW 0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH 2
+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH
+
+/*
+ * Macro used within debug dump functions to send output either to PVR_LOG or
+ * a custom function. The custom function should be stored as a function pointer
+ * in a local variable called 'pfnDumpDebugPrintf'. 'pvDumpDebugFile' is also
+ * required as a local variable to serve as a file identifier for the printf
+ * function if required.
+ */
+#define PVR_DUMPDEBUG_LOG(...) \
+ do \
+ { \
+ if (pfnDumpDebugPrintf) \
+ pfnDumpDebugPrintf(pvDumpDebugFile, __VA_ARGS__); \
+ else \
+ PVR_LOG((__VA_ARGS__)); \
+ } while(0)
+
+struct _PVRSRV_DEVICE_NODE_;
+
+typedef IMG_HANDLE PVRSRV_DBGREQ_HANDLE;
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+ const IMG_CHAR *pszFormat, ...);
+typedef void (*PFN_DBGREQ_NOTIFY)(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterDbgTable
+@Description Registers a debug requester table for the given device. The
+ order in which the debug requester IDs appear in the given
+ table determine the order in which a set of notifier callbacks
+ will be called. In other words, the requester ID that appears
+ first will have all of its associated debug notifier callbacks
+ called first. This will then be followed by all the callbacks
+ associated with the next requester ID in the table and so on.
+@Input psDevNode Device node with which to register requester table
+@Input paui32Table Array of requester IDs
+@Input ui32Length Number of elements in paui32Table
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT32 *paui32Table, IMG_UINT32 ui32Length);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterDbgTable
+@Description Unregisters a debug requester table.
+@Input psDevNode Device node for which the requester table should
+ be unregistered
+@Return void
+*/ /***************************************************************************/
+void
+PVRSRVUnregisterDbgTable(struct _PVRSRV_DEVICE_NODE_ *psDevNode);
+
+/**************************************************************************/ /*!
+@Function PVRSRVRegisterDbgRequestNotify
+@Description Register a callback function that is called when a debug request
+ is made via a call PVRSRVDebugRequest. There are a number of
+ verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+ for each level up to the highest level specified to
+ PVRSRVDebugRequest.
+@Output phNotify On success, points to debug notifier handle
+@Input psDevNode Device node for which the debug callback
+ should be registered
+@Input pfnDbgRequestNotify Function callback
+@Input ui32RequesterID Requester ID. This is used to determine
+ the order in which callbacks are called
+@Input hDbgReqeustHandle Data to be passed back to the caller via
+ the callback function
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRegisterDbgRequestNotify(IMG_HANDLE *phNotify,
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgReqeustHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnregisterDbgRequestNotify
+@Description Unregister a previously registered callback function.
+@Input hNotify Debug notifier handle.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVUnregisterDbgRequestNotify(IMG_HANDLE hNotify);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDebugRequest
+@Description Notify any registered debug request handlers that a debug
+ request has been made and at what level.
+@Input psDevNode Device node for which the debug request has
+ been made
+@Input ui32VerbLevel The maximum verbosity level to dump
+@Input pfnDumpDebugPrintf Used to specify the print function that
+ should be used to dump any debug
+ information. If this argument is NULL then
+ PVR_LOG() will be used as the default print
+ function.
+@Input pvDumpDebugFile Optional file identifier to be passed to
+ the print function if required.
+@Return void
+*/ /***************************************************************************/
+void
+PVRSRVDebugRequest(struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+#endif /* !defined(__PVR_NOTIFIER_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_platform_drv.c b/drivers/gpu/drm/img-rogue/1.10/pvr_platform_drv.c
new file mode 100644
index 00000000000000..23f5b25215f4d1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_platform_drv.c
@@ -0,0 +1,306 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Title PowerVR DRM platform driver
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <drm/drmP.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/version.h>
+
+#include "module_common.h"
+#include "pvr_drv.h"
+#include "pvrmodule.h"
+#include "sysinfo.h"
+
+static struct drm_driver pvr_drm_platform_driver;
+
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+/*
+ * This is an arbitrary value. If it's changed then the 'num_devices' module
+ * parameter description should also be updated to match.
+ */
+#define MAX_DEVICES 16
+
+static unsigned int pvr_num_devices = 1;
+static struct platform_device **pvr_devices;
+
+#if defined(NO_HARDWARE)
+static int pvr_num_devices_set(const char *val,
+ const struct kernel_param *param)
+{
+ int err;
+
+ err = param_set_uint(val, param);
+ if (err)
+ return err;
+
+ if (pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct kernel_param_ops pvr_num_devices_ops = {
+ .set = pvr_num_devices_set,
+ .get = param_get_uint,
+};
+
+module_param_cb(num_devices, &pvr_num_devices_ops, &pvr_num_devices, 0444);
+MODULE_PARM_DESC(num_devices,
+ "Number of platform devices to register (default: 1 - max: 16)");
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+static int pvr_devices_register(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+ struct platform_device_info pvr_dev_info = {
+ .name = SYS_RGX_DEV_NAME,
+ .id = -2,
+#if defined(NO_HARDWARE)
+ /* Not all cores have 40 bit physical support, but this
+ * will work unless > 32 bit address is returned on those cores.
+ * In the future this will be fixed more correctly.
+ */
+ .dma_mask = DMA_BIT_MASK(40),
+#else
+ .dma_mask = DMA_BIT_MASK(32),
+#endif
+ };
+ unsigned int i;
+
+ BUG_ON(pvr_num_devices == 0 || pvr_num_devices > MAX_DEVICES);
+
+ pvr_devices = kmalloc_array(pvr_num_devices, sizeof(*pvr_devices),
+ GFP_KERNEL);
+ if (!pvr_devices)
+ return -ENOMEM;
+
+ for (i = 0; i < pvr_num_devices; i++) {
+ pvr_devices[i] = platform_device_register_full(&pvr_dev_info);
+ if (IS_ERR(pvr_devices[i])) {
+ DRM_ERROR("unable to register device %u (err=%ld)\n",
+ i, PTR_ERR(pvr_devices[i]));
+ pvr_devices[i] = NULL;
+ return -ENODEV;
+ }
+ }
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+
+ return 0;
+}
+
+static void pvr_devices_unregister(void)
+{
+#if defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED)
+ unsigned int i;
+
+ BUG_ON(!pvr_devices);
+
+ for (i = 0; i < pvr_num_devices && pvr_devices[i]; i++)
+ platform_device_unregister(pvr_devices[i]);
+
+ kfree(pvr_devices);
+ pvr_devices = NULL;
+#endif /* defined(MODULE) && !defined(PVR_LDM_PLATFORM_PRE_REGISTERED) */
+}
+
+static int pvr_probe(struct platform_device *pdev)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ struct drm_device *ddev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ ddev = drm_dev_alloc(&pvr_drm_platform_driver, &pdev->dev);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 9, 0))
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+#else
+ if (!ddev)
+ return -ENOMEM;
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+ /* Needed by drm_platform_set_busid */
+ ddev->platformdev = pdev;
+#endif
+
+ /*
+ * The load callback, called from drm_dev_register, is deprecated,
+ * because of potential race conditions. Calling the function here,
+ * before calling drm_dev_register, avoids those potential races.
+ */
+ BUG_ON(pvr_drm_platform_driver.load != NULL);
+ ret = pvr_drm_load(ddev, 0);
+ if (ret)
+ goto err_drm_dev_unref;
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto err_drm_dev_unload;
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ pvr_drm_platform_driver.name,
+ pvr_drm_platform_driver.major,
+ pvr_drm_platform_driver.minor,
+ pvr_drm_platform_driver.patchlevel,
+ pvr_drm_platform_driver.date,
+ ddev->primary->index);
+#endif
+ return 0;
+
+err_drm_dev_unload:
+ pvr_drm_unload(ddev);
+err_drm_dev_unref:
+ drm_dev_unref(ddev);
+ return ret;
+#else
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ return drm_platform_init(&pvr_drm_platform_driver, pdev);
+#endif
+}
+
+static int pvr_remove(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
+ drm_dev_unregister(ddev);
+
+ /* The unload callback, called from drm_dev_unregister, is
+ * deprecated. Call the unload function directly.
+ */
+ BUG_ON(pvr_drm_platform_driver.unload != NULL);
+ pvr_drm_unload(ddev);
+
+ drm_dev_unref(ddev);
+#else
+ drm_put_dev(ddev);
+#endif
+ return 0;
+}
+
+static void pvr_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct pvr_drm_private *priv = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("device %p\n", &pdev->dev);
+
+ PVRSRVCommonDeviceShutdown(priv->dev_node);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+static struct of_device_id pvr_of_ids[] = {
+#if defined(SYS_RGX_OF_COMPATIBLE)
+ { .compatible = SYS_RGX_OF_COMPATIBLE, },
+#endif
+ {},
+};
+
+#endif
+
+static struct platform_device_id pvr_platform_ids[] = {
+#if defined(SYS_RGX_DEV_NAME)
+ { SYS_RGX_DEV_NAME, 0 },
+#endif
+ { }
+};
+
+static struct platform_driver pvr_platform_driver = {
+ .driver = {
+ .name = DRVNAME,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0))
+ .of_match_table = of_match_ptr(pvr_of_ids),
+#endif
+ .pm = &pvr_pm_ops,
+ },
+ .id_table = pvr_platform_ids,
+ .probe = pvr_probe,
+ .remove = pvr_remove,
+ .shutdown = pvr_shutdown,
+};
+
+static int __init pvr_init(void)
+{
+ int err;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ pvr_drm_platform_driver = pvr_drm_generic_driver;
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
+ pvr_drm_platform_driver.set_busid = drm_platform_set_busid;
+#endif
+
+ err = PVRSRVCommonDriverInit();
+ if (err)
+ return err;
+
+ err = platform_driver_register(&pvr_platform_driver);
+ if (err)
+ return err;
+
+ return pvr_devices_register();
+}
+
+static void __exit pvr_exit(void)
+{
+ DRM_DEBUG_DRIVER("\n");
+
+ pvr_devices_unregister();
+ platform_driver_unregister(&pvr_platform_driver);
+ PVRSRVCommonDriverDeinit();
+
+ DRM_DEBUG_DRIVER("done\n");
+}
+
+late_initcall(pvr_init);
+module_exit(pvr_exit);
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_ricommon.h b/drivers/gpu/drm/img-rogue/1.10/pvr_ricommon.h
new file mode 100644
index 00000000000000..73a7ca0c12568f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_ricommon.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File
+@Title Services Resource Information (RI) common types and definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Resource Information (RI) common types and definitions included
+ in both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_RICOMMON_H__
+#define __PVR_RICOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Maximum text string length including the null byte */
+#define PRVSRVRI_MAX_TEXT_LENGTH 20U
+
+/* PID used to hold PMR allocations which are driver-wide (ie have a lifetime
+ longer than an application process) */
+#define PVR_SYS_ALLOC_PID 1
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_RICOMMON_H__ */
+/******************************************************************************
+ End of file (pvr_ricommon.h)
+******************************************************************************/
+
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.c b/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.c
new file mode 100644
index 00000000000000..7ebc8432006bf3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.c
@@ -0,0 +1,201 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/spinlock_types.h>
+#include <linux/atomic.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+#include "pvr_sw_fence.h"
+
+struct pvr_sw_fence_context {
+ struct kref kref;
+ unsigned int context;
+ char context_name[32];
+ char driver_name[32];
+ atomic_t seqno;
+ atomic_t fence_count;
+};
+
+struct pvr_sw_fence {
+ struct dma_fence base;
+ struct pvr_sw_fence_context *fence_context;
+ spinlock_t lock;
+};
+
+#define to_pvr_sw_fence(fence) container_of(fence, struct pvr_sw_fence, base)
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx)
+{
+ return fctx->context_name;
+}
+
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+ char *str, int size)
+{
+ snprintf(str, size, "%d", atomic_read(&fctx->seqno));
+}
+
+static inline unsigned
+pvr_sw_fence_context_seqno_next(struct pvr_sw_fence_context *fence_context)
+{
+ return atomic_inc_return(&fence_context->seqno) - 1;
+}
+
+static const char *pvr_sw_fence_get_driver_name(struct dma_fence *fence)
+{
+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+ return pvr_sw_fence->fence_context->driver_name;
+}
+
+static const char *pvr_sw_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+ return pvr_sw_fence_context_name(pvr_sw_fence->fence_context);
+}
+
+static void pvr_sw_fence_value_str(struct dma_fence *fence, char *str, int size)
+{
+ snprintf(str, size, "%d", fence->seqno);
+}
+
+static void pvr_sw_fence_timeline_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+ pvr_sw_fence_context_value_str(pvr_sw_fence->fence_context, str, size);
+}
+
+static bool pvr_sw_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void pvr_sw_fence_context_destroy_kref(struct kref *kref)
+{
+ struct pvr_sw_fence_context *fence_context =
+ container_of(kref, struct pvr_sw_fence_context, kref);
+ unsigned int fence_count;
+
+ fence_count = atomic_read(&fence_context->fence_count);
+ if (WARN_ON(fence_count))
+ pr_debug("%s context has %u fence(s) remaining\n",
+ fence_context->context_name, fence_count);
+
+ kfree(fence_context);
+}
+
+static void pvr_sw_fence_release(struct dma_fence *fence)
+{
+ struct pvr_sw_fence *pvr_sw_fence = to_pvr_sw_fence(fence);
+
+ atomic_dec(&pvr_sw_fence->fence_context->fence_count);
+ kref_put(&pvr_sw_fence->fence_context->kref,
+ pvr_sw_fence_context_destroy_kref);
+ kfree(pvr_sw_fence);
+}
+
+static const struct dma_fence_ops pvr_sw_fence_ops = {
+ .get_driver_name = pvr_sw_fence_get_driver_name,
+ .get_timeline_name = pvr_sw_fence_get_timeline_name,
+ .fence_value_str = pvr_sw_fence_value_str,
+ .timeline_value_str = pvr_sw_fence_timeline_value_str,
+ .enable_signaling = pvr_sw_fence_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = pvr_sw_fence_release,
+};
+
+struct pvr_sw_fence_context *
+pvr_sw_fence_context_create(const char *context_name, const char *driver_name)
+{
+ struct pvr_sw_fence_context *fence_context;
+
+ fence_context = kmalloc(sizeof(*fence_context), GFP_KERNEL);
+ if (!fence_context)
+ return NULL;
+
+ fence_context->context = dma_fence_context_alloc(1);
+ strlcpy(fence_context->context_name, context_name,
+ sizeof(fence_context->context_name));
+ strlcpy(fence_context->driver_name, driver_name,
+ sizeof(fence_context->driver_name));
+ atomic_set(&fence_context->seqno, 0);
+ atomic_set(&fence_context->fence_count, 0);
+ kref_init(&fence_context->kref);
+
+ return fence_context;
+}
+
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context)
+{
+ kref_put(&fence_context->kref, pvr_sw_fence_context_destroy_kref);
+}
+
+struct dma_fence *
+pvr_sw_fence_create(struct pvr_sw_fence_context *fence_context)
+{
+ struct pvr_sw_fence *pvr_sw_fence;
+ unsigned int seqno;
+
+ pvr_sw_fence = kmalloc(sizeof(*pvr_sw_fence), GFP_KERNEL);
+ if (!pvr_sw_fence)
+ return NULL;
+
+ spin_lock_init(&pvr_sw_fence->lock);
+ pvr_sw_fence->fence_context = fence_context;
+
+ seqno = pvr_sw_fence_context_seqno_next(fence_context);
+ dma_fence_init(&pvr_sw_fence->base, &pvr_sw_fence_ops,
+ &pvr_sw_fence->lock, fence_context->context, seqno);
+
+ atomic_inc(&fence_context->fence_count);
+ kref_get(&fence_context->kref);
+
+ return &pvr_sw_fence->base;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.h b/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.h
new file mode 100644
index 00000000000000..5463b745d76387
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_sw_fence.h
@@ -0,0 +1,62 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVR_SW_FENCES_H__)
+#define __PVR_SW_FENCES_H__
+
+#include "pvr_linux_fence.h"
+
+struct pvr_sw_fence_context;
+
+struct pvr_sw_fence_context *pvr_sw_fence_context_create(const char *name,
+ const char *driver_name);
+void pvr_sw_fence_context_destroy(struct pvr_sw_fence_context *fence_context);
+struct dma_fence *pvr_sw_fence_create(struct pvr_sw_fence_context *
+ fence_context);
+
+const char *pvr_sw_fence_context_name(struct pvr_sw_fence_context *fctx);
+void pvr_sw_fence_context_value_str(struct pvr_sw_fence_context *fctx,
+ char *str, int size);
+
+#endif /* !defined(__PVR_SW_FENCES_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_sync.h b/drivers/gpu/drm/img-rogue/1.10/pvr_sync.h
new file mode 100644
index 00000000000000..5a5d01efefa6d2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_sync.h
@@ -0,0 +1,168 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File pvr_sync.h
+@Title Kernel driver for Android's sync mechanism
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVR_SYNC_H
+#define _PVR_SYNC_H
+
+#include <linux/device.h>
+#include <linux/kref.h>
+
+#include "pvr_fd_sync_kernel.h"
+
+
+/* Services internal interface */
+
+/**************************************************************************/ /*!
+@Function pvr_sync_init
+@Description Create an internal sync context
+@Input dev: Linux device
+@Return PVRSRV_OK on success
+*/ /***************************************************************************/
+enum PVRSRV_ERROR pvr_sync_init(struct device *dev);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_deinit
+@Description Destroy an internal sync context. Drains any work items with
+ outstanding sync fence updates/dependencies.
+@Input None
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_deinit(void);
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+struct pvr_sync_append_data;
+
+/**************************************************************************/ /*!
+@Function pvr_sync_get_updates
+@Description Internal API to resolve sync update data
+@Input sync_data: PVR sync data
+@Output nr_fences: number of UFO fence updates
+@Output ufo_addrs: UFO fence addresses
+@Output values: UFO fence values
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_get_updates(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences,
+ struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs,
+ u32 **values);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_get_checks
+@Description Internal API to resolve sync check data
+@Input sync_data: PVR sync data
+@Output nr_fences: number of UFO fence checks
+@Output ufo_addrs: UFO fence addresses
+@Output values: UFO fence values
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_get_checks(const struct pvr_sync_append_data *sync_data,
+ u32 *nr_fences,
+ struct _RGXFWIF_DEV_VIRTADDR_ **ufo_addrs,
+ u32 **values);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_rollback_append_fences
+@Description Undo the last sync fence and its timeline if present
+ Must be called before pvr_sync_free_append_fences_data which may
+ free the fence sync object.
+@Input sync_data: PVR sync data
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_rollback_append_fences(struct pvr_sync_append_data *sync_data);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_nohw_complete_fences
+@Description Force updates to progress sync timeline when hardware is not present.
+@Input sync_data: PVR sync data
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_nohw_complete_fences(struct pvr_sync_append_data *sync_data);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_free_append_fences_data
+@Description Commit the sync fences/updates
+@Input sync_data: PVR sync data
+@Return None
+*/ /***************************************************************************/
+void pvr_sync_free_append_fences_data(struct pvr_sync_append_data *sync_data);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_get_update_fd
+@Description Get the file descriptor for the sync fence updates
+@Input sync_data: PVR sync data
+@Return Valid sync file descriptor on success; -EINVAL on failure
+*/ /***************************************************************************/
+int pvr_sync_get_update_fd(struct pvr_sync_append_data *sync_data);
+
+/**************************************************************************/ /*!
+@Function pvr_sync_get_sw_timeline
+@Description Get the PVR sync timeline from its file descriptor
+@Input fd: Linux file descriptor
+@Return PVR sync timeline
+*/ /***************************************************************************/
+struct pvr_counting_fence_timeline;
+struct pvr_counting_fence_timeline *pvr_sync_get_sw_timeline(int fd);
+
+/* PVR sync 2 SW timeline interface */
+
+struct pvr_sw_sync_timeline {
+ /* sw_sync_timeline must come first to allow casting of a ptr */
+ /* to the wrapping struct to a ptr to the sw_sync_timeline */
+ struct sw_sync_timeline *sw_sync_timeline;
+ u64 current_value;
+ u64 next_value;
+ /* Reference count for this object */
+ struct kref kref;
+};
+
+/**************************************************************************/ /*!
+@Function pvr_sw_sync_release_timeline
+@Description Release the current reference on a PVR SW sync timeline
+@Input timeline: the PVR SW sync timeline
+@Return None
+*/ /***************************************************************************/
+void pvr_sw_sync_release_timeline(struct pvr_sw_sync_timeline *timeline);
+
+#endif /* _PVR_SYNC_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_sync_file.c b/drivers/gpu/drm/img-rogue/1.10/pvr_sync_file.c
new file mode 100644
index 00000000000000..61c7be1170994e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_sync_file.c
@@ -0,0 +1,776 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File pvr_sync_file.c
+@Title Kernel driver for Android's sync mechanism
+@Codingstyle LinuxKernel
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "services_kernel_client.h"
+#include "pvr_drv.h"
+#include "pvr_sync.h"
+#include "pvr_fence.h"
+#include "pvr_counting_timeline.h"
+
+#include "linux_sw_sync.h"
+
+#include <linux/version.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/sync_file.h>
+#include <linux/file.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0)) && !defined(CHROMIUMOS_KERNEL)
+#define sync_file_user_name(s) ((s)->name)
+#else
+#define sync_file_user_name(s) ((s)->user_name)
+#endif
+
+#define PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, fmt, ...) \
+ do { \
+ if (pfnDumpDebugPrintf) \
+ pfnDumpDebugPrintf(pvDumpDebugFile, fmt, \
+ ## __VA_ARGS__); \
+ else \
+ pr_err(fmt "\n", ## __VA_ARGS__); \
+ } while (0)
+
+#define FILE_NAME "pvr_sync_file"
+
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence;
+};
+#define SW_SYNC_IOC_MAGIC 'W'
+#define SW_SYNC_IOC_CREATE_FENCE \
+ (_IOWR(SW_SYNC_IOC_MAGIC, 0, struct sw_sync_create_fence_data))
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+/* Global data for the sync driver */
+static struct {
+ void *dev_cookie;
+ void *dbg_request_handle;
+ struct workqueue_struct *fence_status_wq;
+ struct pvr_fence_context *foreign_fence_context;
+#if defined(NO_HARDWARE)
+ spinlock_t pvr_timeline_active_list_lock;
+ struct list_head pvr_timeline_active_list;
+#endif
+} pvr_sync_data;
+
+static const struct file_operations pvr_sync_fops;
+
+/* This is the actual timeline metadata. We might keep this around after the
+ * base sync driver has destroyed the pvr_sync_timeline_wrapper object.
+ */
+struct pvr_sync_timeline {
+ char name[32];
+ struct file *file;
+ bool is_sw;
+ /* Fence context used for hw fences */
+ struct pvr_fence_context *hw_fence_context;
+ /* Timeline and context for sw fences */
+ struct pvr_counting_fence_timeline *sw_fence_timeline;
+#if defined(NO_HARDWARE)
+ /* List of all timelines (used to advance all timelines in nohw builds) */
+ struct list_head list;
+#endif
+};
+
+static
+void pvr_sync_free_checkpoint_list_mem(void *mem_ptr)
+{
+ kfree(mem_ptr);
+}
+
+#if defined(NO_HARDWARE)
+/* function used to signal pvr fence in nohw builds */
+static
+void pvr_sync_nohw_signal_fence(void *fence_data_to_signal)
+{
+ struct pvr_sync_timeline *this_timeline;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+ list_for_each_entry(this_timeline, &pvr_sync_data.pvr_timeline_active_list, list) {
+ pvr_fence_context_signal_fences_nohw(this_timeline->hw_fence_context);
+ }
+ spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+}
+#endif
+
+static bool is_pvr_timeline(struct file *file)
+{
+ return file->f_op == &pvr_sync_fops;
+}
+
+static struct pvr_sync_timeline *pvr_sync_timeline_fget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+
+ if (!is_pvr_timeline(file)) {
+ fput(file);
+ return NULL;
+ }
+
+ return file->private_data;
+}
+
+static void pvr_sync_timeline_fput(struct pvr_sync_timeline *timeline)
+{
+ fput(timeline->file);
+}
+
+/* ioctl and fops handling */
+
+static int pvr_sync_open(struct inode *inode, struct file *file)
+{
+ struct pvr_sync_timeline *timeline;
+ char task_comm[TASK_COMM_LEN];
+ int err = -ENOMEM;
+
+ get_task_comm(task_comm, current);
+
+ timeline = kzalloc(sizeof(*timeline), GFP_KERNEL);
+ if (!timeline)
+ goto err_out;
+
+ strlcpy(timeline->name, task_comm, sizeof(timeline->name));
+ timeline->file = file;
+ timeline->is_sw = false;
+
+ file->private_data = timeline;
+ err = 0;
+err_out:
+ return err;
+}
+
+static int pvr_sync_close(struct inode *inode, struct file *file)
+{
+ struct pvr_sync_timeline *timeline = file->private_data;
+
+ if (timeline->sw_fence_timeline) {
+ /* This makes sure any outstanding SW syncs are marked as
+ * complete at timeline close time. Otherwise it'll leak the
+ * timeline (as outstanding fences hold a ref) and possibly
+ * wedge the system if something is waiting on one of those
+ * fences
+ */
+ pvr_counting_fence_timeline_force_complete(
+ timeline->sw_fence_timeline);
+ pvr_counting_fence_timeline_put(timeline->sw_fence_timeline);
+ }
+
+ if (timeline->hw_fence_context) {
+#if defined(NO_HARDWARE)
+ list_del(&timeline->list);
+#endif
+ pvr_fence_context_destroy(timeline->hw_fence_context);
+ }
+
+ kfree(timeline);
+
+ return 0;
+}
+
+enum PVRSRV_ERROR pvr_sync_finalise_fence(PVRSRV_FENCE fence_fd,
+ void *finalise_data)
+{
+ struct sync_file *sync_file = finalise_data;
+ struct pvr_fence *pvr_fence;
+
+ if (!sync_file || (fence_fd < 0)) {
+ pr_err(FILE_NAME ": %s: Invalid input fence\n", __func__);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ pvr_fence = to_pvr_fence(sync_file->fence);
+
+ /* pvr fences can be signalled any time after creation */
+ dma_fence_enable_sw_signaling(&pvr_fence->base);
+
+ fd_install(fence_fd, sync_file->file);
+
+ return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_create_fence(const char *fence_name,
+ PVRSRV_TIMELINE new_fence_timeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *new_fence, u64 *fence_uid, void **fence_finalise_data,
+ PSYNC_CHECKPOINT *new_checkpoint_handle, void **timeline_update_sync,
+ __u32 *timeline_update_value)
+{
+ PVRSRV_ERROR err = PVRSRV_OK;
+ PVRSRV_FENCE new_fence_fd = -1;
+ struct pvr_sync_timeline *timeline;
+ struct pvr_fence *pvr_fence;
+ PSYNC_CHECKPOINT checkpoint;
+ struct sync_file *sync_file;
+
+ if (new_fence_timeline < 0 || !new_fence || !new_checkpoint_handle
+ || !fence_finalise_data) {
+ pr_err(FILE_NAME ": %s: Invalid input params\n", __func__);
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ /* We reserve the new fence FD before taking any operations
+ * as we do not want to fail (e.g. run out of FDs)
+ */
+ new_fence_fd = get_unused_fd_flags(0);
+ if (new_fence_fd < 0) {
+ pr_err(FILE_NAME ": %s: Failed to get fd\n", __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ timeline = pvr_sync_timeline_fget(new_fence_timeline);
+ if (!timeline) {
+ pr_err(FILE_NAME ": %s: Failed to open supplied timeline fd (%d)\n",
+ __func__, new_fence_timeline);
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_put_fd;
+ }
+
+ if (timeline->is_sw) {
+ /* This should never happen! */
+ pr_err(FILE_NAME ": %s: Request to create a pvr fence on sw timeline (%d)\n",
+ __func__, new_fence_timeline);
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_put_timeline;
+ }
+
+ if (!timeline->hw_fence_context) {
+#if defined(NO_HARDWARE)
+ unsigned long flags;
+#endif
+ /* First time we use this timeline, so create a context. */
+ timeline->hw_fence_context =
+ pvr_fence_context_create(pvr_sync_data.dev_cookie,
+ pvr_sync_data.fence_status_wq,
+ timeline->name);
+ if (!timeline->hw_fence_context) {
+ pr_err(FILE_NAME ": %s: Failed to create fence context (%d)\n",
+ __func__, new_fence_timeline);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_put_timeline;
+ }
+#if defined(NO_HARDWARE)
+ /* Add timeline to active list */
+ INIT_LIST_HEAD(&timeline->list);
+ spin_lock_irqsave(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+ list_add_tail(&timeline->list, &pvr_sync_data.pvr_timeline_active_list);
+ spin_unlock_irqrestore(&pvr_sync_data.pvr_timeline_active_list_lock, flags);
+#endif
+ }
+
+ pvr_fence = pvr_fence_create(timeline->hw_fence_context, new_fence_timeline,
+ fence_name);
+ if (!pvr_fence) {
+ pr_err(FILE_NAME ": %s: Failed to create new pvr_fence\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_put_timeline;
+ }
+
+ checkpoint = pvr_fence_get_checkpoint(pvr_fence);
+ if (!checkpoint) {
+ pr_err(FILE_NAME ": %s: Failed to get fence checkpoint\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_destroy_fence;
+ }
+
+ sync_file = sync_file_create(&pvr_fence->base);
+ if (!sync_file) {
+ pr_err(FILE_NAME ": %s: Failed to create sync_file\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_destroy_fence;
+ }
+ strlcpy(sync_file_user_name(sync_file),
+ pvr_fence->name,
+ sizeof(sync_file_user_name(sync_file)));
+ dma_fence_put(&pvr_fence->base);
+
+ *new_fence = new_fence_fd;
+ *fence_finalise_data = sync_file;
+ *new_checkpoint_handle = checkpoint;
+ *fence_uid = OSGetCurrentClientProcessIDKM();
+ *fence_uid = (*fence_uid << 32) | (new_fence_fd & U32_MAX);
+
+ pvr_sync_timeline_fput(timeline);
+err_out:
+ return err;
+
+err_destroy_fence:
+ pvr_fence_destroy(pvr_fence);
+err_put_timeline:
+ pvr_sync_timeline_fput(timeline);
+err_put_fd:
+ put_unused_fd(new_fence_fd);
+ *fence_uid = PVRSRV_NO_FENCE;
+ goto err_out;
+}
+
+enum PVRSRV_ERROR pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback,
+ void *fence_data_to_rollback)
+{
+ struct sync_file *sync_file = fence_data_to_rollback;
+ struct pvr_fence *pvr_fence;
+
+ if (!sync_file || fence_to_rollback < 0) {
+ pr_err(FILE_NAME ": %s: Invalid fence (%d)\n", __func__,
+ fence_to_rollback);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ pvr_fence = to_pvr_fence(sync_file->fence);
+ if (!pvr_fence) {
+ pr_err(FILE_NAME
+ ": %s: Non-PVR fence (%p)\n",
+ __func__, sync_file->fence);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ fput(sync_file->file);
+
+ put_unused_fd(fence_to_rollback);
+
+ return PVRSRV_OK;
+}
+
+enum PVRSRV_ERROR pvr_sync_resolve_fence(
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE fence_to_resolve, u32 *nr_checkpoints,
+ PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid)
+{
+ PSYNC_CHECKPOINT *checkpoints = NULL;
+ unsigned int i, num_fences, num_used_fences = 0;
+ struct dma_fence **fences = NULL;
+ struct dma_fence *fence;
+ PVRSRV_ERROR err = PVRSRV_OK;
+
+ if (!nr_checkpoints || !checkpoint_handles || !fence_uid) {
+ pr_err(FILE_NAME ": %s: Invalid input checkpoint pointer\n",
+ __func__);
+ err = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ *nr_checkpoints = 0;
+ *checkpoint_handles = NULL;
+ *fence_uid = 0;
+
+ if (fence_to_resolve < 0)
+ goto err_out;
+
+ fence = sync_file_get_fence(fence_to_resolve);
+ if (!fence) {
+ pr_err(FILE_NAME ": %s: Failed to read sync private data for fd %d\n",
+ __func__, fence_to_resolve);
+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto err_out;
+ }
+
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ if (!array) {
+ pr_err(FILE_NAME ": %s: Failed to resolve fence array %d\n",
+ __func__, fence_to_resolve);
+ err = PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ goto err_put_fence;
+ }
+ fences = array->fences;
+ num_fences = array->num_fences;
+ } else {
+ fences = &fence;
+ num_fences = 1;
+ }
+
+ checkpoints = kmalloc_array(num_fences, sizeof(PSYNC_CHECKPOINT),
+ GFP_KERNEL);
+ if (!checkpoints) {
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_put_fence;
+ }
+ for (i = 0; i < num_fences; i++) {
+ /* Only return the checkpoint if the fence is still active. */
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fences[i]->flags)) {
+ struct pvr_fence *pvr_fence =
+ pvr_fence_create_from_fence(
+ pvr_sync_data.foreign_fence_context,
+ fences[i],
+ "foreign");
+ if (!pvr_fence) {
+ pr_err(FILE_NAME ": %s: Failed to create fence\n",
+ __func__);
+ err = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free_checkpoints;
+ }
+ checkpoints[num_used_fences] =
+ pvr_fence_get_checkpoint(pvr_fence);
+ SyncCheckpointTakeRef(checkpoints[num_used_fences]);
+ ++num_used_fences;
+ dma_fence_put(&pvr_fence->base);
+ }
+ }
+ /* If we don't return any checkpoints, delete the array because
+ * the caller will not.
+ */
+ if (num_used_fences == 0) {
+ kfree(checkpoints);
+ checkpoints = NULL;
+ }
+
+ *checkpoint_handles = checkpoints;
+ *nr_checkpoints = num_used_fences;
+ *fence_uid = OSGetCurrentClientProcessIDKM();
+ *fence_uid = (*fence_uid << 32) | (fence_to_resolve & U32_MAX);
+
+err_put_fence:
+ dma_fence_put(fence);
+err_out:
+ return err;
+
+err_free_checkpoints:
+ for (i = 0; i < num_used_fences; i++) {
+ if (checkpoints[i])
+ SyncCheckpointDropRef(checkpoints[i]);
+ }
+ kfree(checkpoints);
+ goto err_put_fence;
+}
+
+u32 pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs)
+{
+ return pvr_fence_dump_info_on_stalled_ufos(pvr_sync_data.foreign_fence_context,
+ nr_ufos,
+ vaddrs);
+}
+
+static long pvr_sync_ioctl_rename(struct pvr_sync_timeline *timeline,
+ void __user *user_data)
+{
+ int err = 0;
+ struct pvr_sync_rename_ioctl_data data;
+
+ if (!access_ok(VERIFY_READ, user_data, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ if (copy_from_user(&data, user_data, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ data.szName[sizeof(data.szName) - 1] = '\0';
+ strlcpy(timeline->name, data.szName, sizeof(timeline->name));
+ if (timeline->hw_fence_context)
+ strlcpy(timeline->hw_fence_context->name, data.szName,
+ sizeof(timeline->hw_fence_context->name));
+
+err:
+ return err;
+}
+
+static long pvr_sync_ioctl_force_sw_only(struct pvr_sync_timeline *timeline,
+ void **private_data)
+{
+ /* Already in SW mode? */
+ if (timeline->sw_fence_timeline)
+ return 0;
+
+ /* Create a sw_sync timeline with the old GPU timeline's name */
+ timeline->sw_fence_timeline = pvr_counting_fence_timeline_create(
+ pvr_sync_data.dev_cookie,
+ timeline->name);
+ if (!timeline->sw_fence_timeline)
+ return -ENOMEM;
+
+ timeline->is_sw = true;
+
+ return 0;
+}
+
+static long pvr_sync_ioctl_sw_create_fence(struct pvr_sync_timeline *timeline,
+ void __user *user_data)
+{
+ struct pvr_sw_sync_create_fence_data data;
+ struct sync_file *sync_file;
+ int fd = get_unused_fd_flags(0);
+ struct dma_fence *fence;
+ int err = -EFAULT;
+
+ if (fd < 0) {
+ pr_err(FILE_NAME ": %s: Failed to find unused fd (%d)\n",
+ __func__, fd);
+ goto err_out;
+ }
+
+ if (copy_from_user(&data, user_data, sizeof(data))) {
+ pr_err(FILE_NAME ": %s: Failed copy from user\n", __func__);
+ goto err_put_fd;
+ }
+
+ fence = pvr_counting_fence_create(timeline->sw_fence_timeline,
+ data.value);
+ if (!fence) {
+ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+ __func__, fd);
+ err = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ sync_file = sync_file_create(fence);
+ if (!sync_file) {
+ pr_err(FILE_NAME ": %s: Failed to create a sync point (%d)\n",
+ __func__, fd);
+ err = -ENOMEM;
+ goto err_put_fence;
+ }
+
+ data.fence = fd;
+
+ if (copy_to_user(user_data, &data, sizeof(data))) {
+ pr_err(FILE_NAME ": %s: Failed copy to user\n", __func__);
+ goto err_put_fence;
+ }
+
+ fd_install(fd, sync_file->file);
+ err = 0;
+
+ dma_fence_put(fence);
+err_out:
+ return err;
+
+err_put_fence:
+ dma_fence_put(fence);
+err_put_fd:
+ put_unused_fd(fd);
+ goto err_out;
+}
+
+static long pvr_sync_ioctl_sw_inc(struct pvr_sync_timeline *timeline,
+ void __user *user_data)
+{
+ u32 value;
+ bool res;
+
+ if (copy_from_user(&value, user_data, sizeof(value)))
+ return -EFAULT;
+
+ res = pvr_counting_fence_timeline_inc(timeline->sw_fence_timeline, value);
+
+ /* pvr_counting_fence_timeline_inc won't allow sw timeline to be
+ * advanced beyond the last defined point
+ */
+ if (!res) {
+ pr_err("pvr_sync_file: attempt to advance SW timeline beyond last defined point\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static long
+pvr_sync_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *user_data = (void __user *)arg;
+ long err = -ENOTTY;
+ struct pvr_sync_timeline *timeline = file->private_data;
+
+ if (!timeline->is_sw) {
+
+ switch (cmd) {
+ case PVR_SYNC_IOC_RENAME:
+ err = pvr_sync_ioctl_rename(timeline, user_data);
+ break;
+ case PVR_SYNC_IOC_FORCE_SW_ONLY:
+ err = pvr_sync_ioctl_force_sw_only(timeline,
+ &file->private_data);
+ break;
+ default:
+ break;
+ }
+ } else {
+
+ switch (cmd) {
+ case PVR_SW_SYNC_IOC_CREATE_FENCE:
+ err = pvr_sync_ioctl_sw_create_fence(timeline,
+ user_data);
+ break;
+ case PVR_SW_SYNC_IOC_INC:
+ err = pvr_sync_ioctl_sw_inc(timeline, user_data);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return err;
+}
+
+static const struct file_operations pvr_sync_fops = {
+ .owner = THIS_MODULE,
+ .open = pvr_sync_open,
+ .release = pvr_sync_close,
+ .unlocked_ioctl = pvr_sync_ioctl,
+ .compat_ioctl = pvr_sync_ioctl,
+};
+
+static struct miscdevice pvr_sync_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = PVRSYNC_MODNAME,
+ .fops = &pvr_sync_fops,
+};
+
+static void
+pvr_sync_debug_request_heading(void *data, u32 verbosity,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ if (verbosity == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ PVR_DUMPDEBUG_LOG(pfnDumpDebugPrintf, pvDumpDebugFile, "------[ Native Fence Sync: timelines ]------");
+}
+
+enum PVRSRV_ERROR pvr_sync_init(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct pvr_drm_private *priv = ddev->dev_private;
+ enum PVRSRV_ERROR error;
+ int err;
+
+ error = PVRSRVRegisterDbgRequestNotify(&pvr_sync_data.dbg_request_handle,
+ priv->dev_node,
+ pvr_sync_debug_request_heading,
+ DEBUG_REQUEST_LINUXFENCE,
+ NULL);
+ if (error != PVRSRV_OK) {
+ pr_err("%s: failed to register debug request callback (%s)\n",
+ __func__, PVRSRVGetErrorStringKM(error));
+ goto err_out;
+ }
+
+ pvr_sync_data.dev_cookie = priv->dev_node;
+ pvr_sync_data.fence_status_wq = priv->fence_status_wq;
+
+ pvr_sync_data.foreign_fence_context =
+ pvr_global_fence_context_create(pvr_sync_data.dev_cookie,
+ pvr_sync_data.fence_status_wq,
+ "foreign_sync");
+ if (!pvr_sync_data.foreign_fence_context) {
+ pr_err(FILE_NAME ": %s: Failed to create foreign sync context\n",
+ __func__);
+ error = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+#if defined(NO_HARDWARE)
+ INIT_LIST_HEAD(&pvr_sync_data.pvr_timeline_active_list);
+#endif
+
+ /* Register the resolve fence and create fence functions with
+ * sync_checkpoint.c
+ * The pvr_fence context registers its own EventObject callback to
+ * update sync status
+ */
+ SyncCheckpointRegisterFunctions(pvr_sync_resolve_fence,
+ pvr_sync_create_fence, pvr_sync_rollback_fence_data,
+ pvr_sync_finalise_fence,
+#if defined(NO_HARDWARE)
+ pvr_sync_nohw_signal_fence,
+#else
+ NULL,
+#endif
+ pvr_sync_free_checkpoint_list_mem,
+ pvr_sync_dump_info_on_stalled_ufos);
+
+ err = misc_register(&pvr_sync_device);
+ if (err) {
+ pr_err(FILE_NAME ": %s: Failed to register pvr_sync device (%d)\n",
+ __func__, err);
+ error = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ goto err_unregister_checkpoint_funcs;
+ }
+ error = PVRSRV_OK;
+
+err_out:
+ return error;
+
+err_unregister_checkpoint_funcs:
+ SyncCheckpointRegisterFunctions(NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+ goto err_out;
+}
+
+void pvr_sync_deinit(void)
+{
+ SyncCheckpointRegisterFunctions(NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ misc_deregister(&pvr_sync_device);
+ pvr_fence_context_destroy(pvr_sync_data.foreign_fence_context);
+ PVRSRVUnregisterDbgRequestNotify(pvr_sync_data.dbg_request_handle);
+}
+
+struct pvr_counting_fence_timeline *pvr_sync_get_sw_timeline(int fd)
+{
+ struct pvr_sync_timeline *timeline;
+ struct pvr_counting_fence_timeline *sw_timeline = NULL;
+
+ timeline = pvr_sync_timeline_fget(fd);
+ if (!timeline)
+ return NULL;
+
+ sw_timeline =
+ pvr_counting_fence_timeline_get(timeline->sw_fence_timeline);
+
+ pvr_sync_timeline_fput(timeline);
+ return sw_timeline;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvr_uaccess.h b/drivers/gpu/drm/img-rogue/1.10/pvr_uaccess.h
new file mode 100644
index 00000000000000..382cca4e78c873
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvr_uaccess.h
@@ -0,0 +1,91 @@
+/*************************************************************************/ /*!
+@File
+@Title Utility functions for user space access
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_UACCESS_H__
+#define __PVR_UACCESS_H__
+
+#include <linux/uaccess.h>
+
+static inline unsigned long pvr_copy_to_user(void __user *pvTo, const void *pvFrom, unsigned long ulBytes)
+{
+ if (access_ok(VERIFY_WRITE, pvTo, ulBytes))
+ {
+ return __copy_to_user(pvTo, pvFrom, ulBytes);
+ }
+
+ return ulBytes;
+}
+
+
+#if defined(__KLOCWORK__)
+ /* this part is only to tell Klocwork not to report false positive because
+ it doesn't understand that pvr_copy_from_user will initialise the memory
+ pointed to by pvTo */
+#include <linux/string.h> /* get the memset prototype */
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+ if (pvTo != NULL)
+ {
+ memset(pvTo, 0xAA, ulBytes);
+ return 0;
+ }
+ return 1;
+}
+
+#else /* real implementation */
+
+static inline unsigned long pvr_copy_from_user(void *pvTo, const void __user *pvFrom, unsigned long ulBytes)
+{
+ /*
+ * The compile time correctness checking introduced for copy_from_user in
+ * Linux 2.6.33 isn't fully compatible with our usage of the function.
+ */
+ if (access_ok(VERIFY_READ, pvFrom, ulBytes))
+ {
+ return __copy_from_user(pvTo, pvFrom, ulBytes);
+ }
+
+ return ulBytes;
+}
+#endif /* klocworks */
+
+#endif /* __PVR_UACCESS_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrmodule.h b/drivers/gpu/drm/img-rogue/1.10/pvrmodule.h
new file mode 100644
index 00000000000000..267c7b6874872e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrmodule.h
@@ -0,0 +1,48 @@
+/*************************************************************************/ /*!
+@Title Module Author and License.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRMODULE_H_
+#define _PVRMODULE_H_
+
+MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
+MODULE_LICENSE("Dual MIT/GPL");
+
+#endif /* _PVRMODULE_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv.c b/drivers/gpu/drm/img-rogue/1.10/pvrsrv.c
new file mode 100644
index 00000000000000..2d2a1521a977ff
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv.c
@@ -0,0 +1,3876 @@
+/*************************************************************************/ /*!
+@File
+@Title core services functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main APIs for core services functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxdebug.h"
+#include "handle.h"
+#include "connection_server.h"
+#include "osconnection_server.h"
+#include "pdump_km.h"
+#include "ra.h"
+#include "allocmem.h"
+#include "pmr.h"
+#include "pvrsrv.h"
+#include "srvcore.h"
+#include "services_km.h"
+#include "pvrsrv_device.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "sync.h"
+#include "sync_server.h"
+#include "sync_checkpoint.h"
+#include "sync_fallback_server.h"
+#include "sync_checkpoint_init.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pvrsrv_pool.h"
+#include "info_page.h"
+#include "pvrsrv_bridge_init.h"
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+#include "devicemem_server.h"
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+#include "log2.h"
+
+#include "lists.h"
+#include "dllist.h"
+#include "syscommon.h"
+#include "sysvalidation.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+#include "physmem_hostmem.h"
+
+#include "tlintern.h"
+#include "htbserver.h"
+
+#if defined (SUPPORT_RGX)
+#include "rgxinit.h"
+#include "rgxhwperf.h"
+#include "rgxfwutils.h"
+#endif
+
+#if defined(PVR_RI_DEBUG)
+#include "ri_server.h"
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ #if !defined(GPUVIRT_SIZEOF_ARENA0)
+ #define GPUVIRT_SIZEOF_ARENA0 64 * 1024 * 1024 //Giving 64 megs of LMA memory to arena 0 for firmware and other allocations
+ #endif
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+
+#include "rgx_options.h"
+#include "srvinit.h"
+#include "rgxutils.h"
+
+#include "oskm_apphint.h"
+#include "pvrsrv_apphint.h"
+
+#include "rgx_bvnc_defs_km.h"
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+#if defined(SUPPORT_PHYSMEM_TEST)
+#include "physmem_test.h"
+#endif
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define INFINITE_SLEEP_TIMEOUT 0ULL
+#endif
+
+/*! Wait 100ms before retrying deferred clean-up again */
+#define CLEANUP_THREAD_WAIT_RETRY_TIMEOUT 100000ULL
+
+/*! Wait 8hrs when no deferred clean-up required. Allows a poll several times
+ * a day to check for any missed clean-up. */
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT INFINITE_SLEEP_TIMEOUT
+#else
+#define CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT 28800000000ULL
+#endif
+
+/*! When unloading try a few times to free everything remaining on the list */
+#define CLEANUP_THREAD_UNLOAD_RETRY 4
+
+#define PVRSRV_PROC_HANDLE_BASE_INIT 10
+
+#define PVRSRV_TL_CTLR_STREAM_SIZE 4096
+
+#define PVRSRV_MAX_POOLED_BRIDGE_BUFFERS 8 /*!< Max number of pooled bridge buffers */
+
+static PVRSRV_DATA *gpsPVRSRVData;
+static IMG_UINT32 g_ui32InitFlags;
+
+/* mark which parts of Services were initialised */
+#define INIT_DATA_ENABLE_PDUMPINIT 0x1U
+
+static IMG_UINT32 g_aui32DebugOrderTable[] = {
+ DEBUG_REQUEST_SYS,
+ DEBUG_REQUEST_APPHINT,
+ DEBUG_REQUEST_HTB,
+ DEBUG_REQUEST_DC,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ DEBUG_REQUEST_SERVERSYNC,
+ DEBUG_REQUEST_ANDROIDSYNC,
+ DEBUG_REQUEST_FALLBACKSYNC,
+ DEBUG_REQUEST_LINUXFENCE
+};
+
+static PVRSRV_ERROR _VzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode);
+static void _VzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR _VzConstructRAforFwHeap(RA_ARENA **ppsArena, IMG_CHAR *szName,
+ IMG_UINT64 uBase, RA_LENGTH_T uSize);
+static void _VzTearDownRAforFwHeap(RA_ARENA **ppsArena, IMG_UINT64 uBase);
+
+/* Callback to dump info of cleanup thread in debug_dump */
+static void CleanupThreadDumpInfo(IMG_HANDLE hDbgReqestHandle,
+ DUMPDEBUG_PRINTF_FUNC* pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DATA *psPVRSRVData;
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ PVR_DUMPDEBUG_LOG(" Number of deferred cleanup items : %u",
+ OSAtomicRead(&psPVRSRVData->i32NumCleanupItems));
+}
+
+/* Add work to the cleanup thread work list.
+ * The work item will be executed by the cleanup thread
+ */
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData)
+{
+ PVRSRV_DATA *psPVRSRVData;
+ PVRSRV_ERROR eError;
+
+ psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ PVR_ASSERT(psData != NULL);
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK || psPVRSRVData->bUnload)
+#else
+ if (psPVRSRVData->bUnload)
+#endif
+ {
+ CLEANUP_THREAD_FN pfnFree = psData->pfnFree;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Cleanup thread has already quit: doing work immediately"));
+
+ eError = pfnFree(psData->pvData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+ "(callback " IMG_PFN_FMTSPEC "). "
+ "Immediate free will not be retried.",
+ pfnFree));
+ }
+ }
+ else
+ {
+ /* add this work item to the list */
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, &psData->sNode);
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ OSAtomicIncrement(&psPVRSRVData->i32NumCleanupItems);
+
+ /* signal the cleanup thread to ensure this item gets processed */
+ eError = OSEventObjectSignal(psPVRSRVData->hCleanupEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+}
+
+/* Pop an item from the head of the cleanup thread work list */
+static INLINE DLLIST_NODE *_CleanupThreadWorkListPop(PVRSRV_DATA *psPVRSRVData)
+{
+ DLLIST_NODE *psNode;
+
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ psNode = dllist_get_next_node(&psPVRSRVData->sCleanupThreadWorkList);
+ if (psNode != NULL)
+ {
+ dllist_remove_node(psNode);
+ }
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ return psNode;
+}
+
+/* Process the cleanup thread work list */
+static IMG_BOOL _CleanupThreadProcessWorkList(PVRSRV_DATA *psPVRSRVData,
+ IMG_BOOL *pbUseGlobalEO)
+{
+ DLLIST_NODE *psNodeIter, *psNodeLast;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bNeedRetry = IMG_FALSE;
+
+ /* any callback functions which return error will be
+ * moved to the back of the list, and additional items can be added
+ * to the list at any time so we ensure we only iterate from the
+ * head of the list to the current tail (since the tail may always
+ * be changing)
+ */
+
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ psNodeLast = psPVRSRVData->sCleanupThreadWorkList.psPrevNode;
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ do
+ {
+ PVRSRV_CLEANUP_THREAD_WORK *psData;
+
+ psNodeIter = _CleanupThreadWorkListPop(psPVRSRVData);
+
+ if (psNodeIter != NULL)
+ {
+ CLEANUP_THREAD_FN pfnFree;
+
+ psData = IMG_CONTAINER_OF(psNodeIter, PVRSRV_CLEANUP_THREAD_WORK, sNode);
+
+ /* get the function pointer address here so we have access to it
+ * in order to report the error in case of failure, without having
+ * to depend on psData not having been freed
+ */
+ pfnFree = psData->pfnFree;
+
+ *pbUseGlobalEO = psData->bDependsOnHW;
+ eError = pfnFree(psData->pvData);
+
+ if (eError != PVRSRV_OK)
+ {
+ /* move to back of the list, if this item's
+ * retry count hasn't hit zero.
+ */
+ if (CLEANUP_THREAD_IS_RETRY_TIMEOUT(psData))
+ {
+ if (CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(psData))
+ {
+ bNeedRetry = IMG_TRUE;
+ }
+ }
+ else
+ {
+ if (psData->ui32RetryCount-- > 0)
+ {
+ bNeedRetry = IMG_TRUE;
+ }
+ }
+
+ if (bNeedRetry)
+ {
+ OSLockAcquire(psPVRSRVData->hCleanupThreadWorkListLock);
+ dllist_add_to_tail(&psPVRSRVData->sCleanupThreadWorkList, psNodeIter);
+ OSLockRelease(psPVRSRVData->hCleanupThreadWorkListLock);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to free resource "
+ "(callback " IMG_PFN_FMTSPEC "). "
+ "Retry limit reached",
+ pfnFree));
+ }
+ }
+ else
+ {
+ OSAtomicDecrement(&psPVRSRVData->i32NumCleanupItems);
+ }
+ }
+ } while((psNodeIter != NULL) && (psNodeIter != psNodeLast));
+
+ return bNeedRetry;
+}
+
+// #define CLEANUP_DPFL PVR_DBG_WARNING
+#define CLEANUP_DPFL PVR_DBG_MESSAGE
+
+/* Create/initialise data required by the cleanup thread,
+ * before the cleanup thread is started
+ */
+static PVRSRV_ERROR _CleanupThreadPrepare(PVRSRV_DATA *psPVRSRVData)
+{
+ PVRSRV_ERROR eError;
+
+ /* Create the clean up event object */
+
+ eError = OSEventObjectCreate("PVRSRV_CLEANUP_EVENTOBJECT", &gpsPVRSRVData->hCleanupEventObject);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Exit);
+
+ /* initialise the mutex and linked list required for the cleanup thread work list */
+
+ eError = OSLockCreate(&psPVRSRVData->hCleanupThreadWorkListLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Exit);
+
+ dllist_init(&psPVRSRVData->sCleanupThreadWorkList);
+
+Exit:
+ return eError;
+}
+
+static void CleanupThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ IMG_BOOL bRetryWorkList = IMG_FALSE;
+ IMG_HANDLE hGlobalEvent;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eRc;
+ IMG_BOOL bUseGlobalEO = IMG_FALSE;
+ IMG_UINT32 uiUnloadRetry = 0;
+
+ /* Store the process id (pid) of the clean-up thread */
+ psPVRSRVData->cleanupThreadPid = OSGetCurrentProcessID();
+ OSAtomicWrite(&psPVRSRVData->i32NumCleanupItems, 0);
+
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread starting... "));
+
+ /* Open an event on the clean up event object so we can listen on it,
+ * abort the clean up thread and driver if this fails.
+ */
+ eRc = OSEventObjectOpen(psPVRSRVData->hCleanupEventObject, &hOSEvent);
+ PVR_ASSERT(eRc == PVRSRV_OK);
+
+ eRc = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hGlobalEvent);
+ PVR_ASSERT(eRc == PVRSRV_OK);
+
+ /* While the driver is in a good state and is not being unloaded
+ * try to free any deferred items when signalled
+ */
+ while (psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK)
+ {
+ IMG_HANDLE hEvent;
+
+ if (psPVRSRVData->bUnload)
+ {
+ if (dllist_is_empty(&psPVRSRVData->sCleanupThreadWorkList) ||
+ uiUnloadRetry > CLEANUP_THREAD_UNLOAD_RETRY)
+ {
+ break;
+ }
+ uiUnloadRetry++;
+ }
+
+ /* Wait until signalled for deferred clean up OR wait for a
+ * short period if the previous deferred clean up was not able
+ * to release all the resources before trying again.
+ * Bridge lock re-acquired on our behalf before the wait call returns.
+ */
+
+ if (bRetryWorkList && bUseGlobalEO)
+ {
+ hEvent = hGlobalEvent;
+ }
+ else
+ {
+ hEvent = hOSEvent;
+ }
+
+ eRc = OSEventObjectWaitKernel(hEvent,
+ bRetryWorkList ?
+ CLEANUP_THREAD_WAIT_RETRY_TIMEOUT :
+ CLEANUP_THREAD_WAIT_SLEEP_TIMEOUT);
+ if (eRc == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait timeout"));
+ }
+ else if (eRc == PVRSRV_OK)
+ {
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: wait OK, signal received"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "CleanupThread: wait error %d", eRc));
+ }
+
+ bRetryWorkList = _CleanupThreadProcessWorkList(psPVRSRVData, &bUseGlobalEO);
+ }
+
+ OSLockDestroy(psPVRSRVData->hCleanupThreadWorkListLock);
+
+ eRc = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+ eRc = OSEventObjectClose(hGlobalEvent);
+ PVR_LOG_IF_ERROR(eRc, "OSEventObjectClose");
+
+ PVR_DPF((CLEANUP_DPFL, "CleanupThread: thread ending... "));
+}
+
+static IMG_BOOL DevicesWatchdogThread_Powered_Any(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_DEV_POWER_STATE ePowerState = PVRSRV_DEV_POWER_STATE_ON;
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ /* Power lock cannot be acquired at this time (sys power is off) */
+ return IMG_FALSE;
+ }
+
+ /* Any other error is unexpected so we assume the device is on */
+ PVR_DPF((PVR_DBG_ERROR,
+ "DevicesWatchdogThread: Failed to acquire power lock for device %p (%s)",
+ psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ return IMG_TRUE;
+ }
+
+ (void) PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return (ePowerState == PVRSRV_DEV_POWER_STATE_ON) ? IMG_TRUE : IMG_FALSE;
+}
+
+static void DevicesWatchdogThread_ForEachVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+ va_list va)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PVRSRV_DEVICE_HEALTH_STATUS *pePreviousHealthStatus, eHealthStatus;
+ PVRSRV_ERROR eError;
+
+ pePreviousHealthStatus = va_arg(va, PVRSRV_DEVICE_HEALTH_STATUS *);
+
+ if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+ {
+ eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "DevicesWatchdogThread: "
+ "Could not check for fatal error (%d)!",
+ eError));
+ }
+ }
+ eHealthStatus = OSAtomicRead(&psDeviceNode->eHealthStatus);
+
+ if (eHealthStatus != PVRSRV_DEVICE_HEALTH_STATUS_OK)
+ {
+ if (eHealthStatus != *pePreviousHealthStatus)
+ {
+ if (!(psDevInfo->ui32DeviceFlags &
+ RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+ "Device status not OK!!!"));
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX,
+ NULL, NULL);
+ }
+ }
+ }
+
+ *pePreviousHealthStatus = eHealthStatus;
+}
+
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+typedef enum
+{
+ DWT_ST_INIT,
+ DWT_ST_SLEEP_POWERON,
+ DWT_ST_SLEEP_POWEROFF,
+ DWT_ST_SLEEP_DEFERRED,
+ DWT_ST_FINAL
+} DWT_STATE;
+
+typedef enum
+{
+ DWT_SIG_POWERON,
+ DWT_SIG_POWEROFF,
+ DWT_SIG_TIMEOUT,
+ DWT_SIG_UNLOAD,
+ DWT_SIG_ERROR
+} DWT_SIGNAL;
+
+static inline IMG_BOOL _DwtIsPowerOn(PVRSRV_DATA *psPVRSRVData)
+{
+ return List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_Powered_Any);
+}
+
+static inline void _DwtCheckHealthStatus(PVRSRV_DATA *psPVRSRVData,
+ PVRSRV_DEVICE_HEALTH_STATUS *peStatus)
+{
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_ForEachVaCb,
+ peStatus);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+ SysPrintAndResetFaultStatusRegister();
+#endif
+}
+
+static DWT_SIGNAL _DwtWait(PVRSRV_DATA *psPVRSRVData, IMG_HANDLE hOSEvent,
+ IMG_UINT32 ui32Timeout)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64) ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+ psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+
+ if (eError == PVRSRV_OK)
+ {
+ if (psPVRSRVData->bUnload)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event"
+ " received."));
+ return DWT_SIG_UNLOAD;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state "
+ "change event received."));
+
+ if (_DwtIsPowerOn(psPVRSRVData))
+ {
+ return DWT_SIG_POWERON;
+ }
+ else
+ {
+ return DWT_SIG_POWEROFF;
+ }
+ }
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ return DWT_SIG_TIMEOUT;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: Error (%d) when"
+ " waiting for event!", eError));
+ return DWT_SIG_ERROR;
+}
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+
+static void DevicesWatchdogThread(void *pvData)
+{
+ PVRSRV_DATA *psPVRSRVData = pvData;
+ PVRSRV_DEVICE_HEALTH_STATUS ePreviousHealthStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+ DWT_STATE eState = DWT_ST_INIT;
+ const IMG_UINT32 ui32OnTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+ const IMG_UINT32 ui32OffTimeout = INFINITE_SLEEP_TIMEOUT;
+#else
+ IMG_UINT32 ui32Timeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+ /* Flag used to defer the sleep timeout change by 1 loop iteration.
+ * This helps to ensure at least two health checks are performed before a long sleep.
+ */
+ IMG_BOOL bDoDeferredTimeoutChange = IMG_FALSE;
+#endif
+
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power off sleep time: %d.",
+ DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT));
+
+ /* Open an event on the devices watchdog event object so we can listen on it
+ and abort the devices watchdog thread. */
+ eError = OSEventObjectOpen(psPVRSRVData->hDevicesWatchdogEvObj, &hOSEvent);
+ PVR_LOGRN_IF_ERROR(eError, "OSEventObjectOpen");
+
+ /* Loop continuously checking the device status every few seconds. */
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ while ((psPVRSRVData->eServicesState == PVRSRV_SERVICES_STATE_OK) &&
+ !psPVRSRVData->bUnload)
+#else
+ while (!psPVRSRVData->bUnload)
+#endif
+ {
+#if defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+
+ switch (eState)
+ {
+ case DWT_ST_INIT:
+ {
+ if (_DwtIsPowerOn(psPVRSRVData))
+ {
+ eState = DWT_ST_SLEEP_POWERON;
+ }
+ else
+ {
+ eState = DWT_ST_SLEEP_POWEROFF;
+ }
+
+ break;
+ }
+ case DWT_ST_SLEEP_POWERON:
+ {
+ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+ ui32OnTimeout);
+
+ switch (eSignal) {
+ case DWT_SIG_POWERON:
+ /* self-transition, nothing to do */
+ break;
+ case DWT_SIG_POWEROFF:
+ eState = DWT_ST_SLEEP_DEFERRED;
+ break;
+ case DWT_SIG_TIMEOUT:
+ _DwtCheckHealthStatus(psPVRSRVData,
+ &ePreviousHealthStatus);
+ /* self-transition */
+ break;
+ case DWT_SIG_UNLOAD:
+ eState = DWT_ST_FINAL;
+ break;
+ case DWT_SIG_ERROR:
+ /* deliberately ignored */
+ break;
+ }
+
+ break;
+ }
+ case DWT_ST_SLEEP_POWEROFF:
+ {
+ DWT_SIGNAL eSignal = _DwtWait(psPVRSRVData, hOSEvent,
+ ui32OffTimeout);
+
+ switch (eSignal) {
+ case DWT_SIG_POWERON:
+ eState = DWT_ST_SLEEP_POWERON;
+ _DwtCheckHealthStatus(psPVRSRVData,
+ &ePreviousHealthStatus);
+ break;
+ case DWT_SIG_POWEROFF:
+ /* self-transition, nothing to do */
+ break;
+ case DWT_SIG_TIMEOUT:
+ /* self-transition */
+ _DwtCheckHealthStatus(psPVRSRVData,
+ &ePreviousHealthStatus);
+ break;
+ case DWT_SIG_UNLOAD:
+ eState = DWT_ST_FINAL;
+ break;
+ case DWT_SIG_ERROR:
+ /* deliberately ignored */
+ break;
+ }
+
+ break;
+ }
+ case DWT_ST_SLEEP_DEFERRED:
+ {
+ DWT_SIGNAL eSignal =_DwtWait(psPVRSRVData, hOSEvent,
+ ui32OnTimeout);
+
+ switch (eSignal) {
+ case DWT_SIG_POWERON:
+ eState = DWT_ST_SLEEP_POWERON;
+ _DwtCheckHealthStatus(psPVRSRVData,
+ &ePreviousHealthStatus);
+ break;
+ case DWT_SIG_POWEROFF:
+ /* self-transition, nothing to do */
+ break;
+ case DWT_SIG_TIMEOUT:
+ eState = DWT_ST_SLEEP_POWEROFF;
+ _DwtCheckHealthStatus(psPVRSRVData,
+ &ePreviousHealthStatus);
+ break;
+ case DWT_SIG_UNLOAD:
+ eState = DWT_ST_FINAL;
+ break;
+ case DWT_SIG_ERROR:
+ /* deliberately ignored */
+ break;
+ }
+
+ break;
+ }
+ case DWT_ST_FINAL:
+ /* the loop should terminate on next spin if this state is
+ * reached so nothing to do here. */
+ break;
+ }
+
+#else /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+ IMG_BOOL bPwrIsOn = IMG_FALSE;
+
+ /* Wait time between polls (done at the start of the loop to allow devices
+ to initialise) or for the event signal (shutdown or power on). */
+ eError = OSEventObjectWaitKernel(hOSEvent, (IMG_UINT64)ui32Timeout * 1000);
+
+#ifdef PVR_TESTING_UTILS
+ psPVRSRVData->ui32DevicesWdWakeupCounter++;
+#endif
+ if (eError == PVRSRV_OK)
+ {
+ if (psPVRSRVData->bUnload)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Shutdown event received."));
+ break;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "DevicesWatchdogThread: Power state change event received."));
+ }
+ }
+ else if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ /* If timeout do nothing otherwise print warning message. */
+ PVR_DPF((PVR_DBG_ERROR, "DevicesWatchdogThread: "
+ "Error (%d) when waiting for event!", eError));
+ }
+
+ bPwrIsOn = List_PVRSRV_DEVICE_NODE_IMG_BOOL_Any(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_Powered_Any);
+
+ if (bPwrIsOn || psPVRSRVData->ui32DevicesWatchdogPwrTrans)
+ {
+ psPVRSRVData->ui32DevicesWatchdogPwrTrans = 0;
+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_ON_SLEEP_TIMEOUT;
+ bDoDeferredTimeoutChange = IMG_FALSE;
+ }
+ else
+ {
+ /* First, check if the previous loop iteration signalled a need to change the timeout period */
+ if (bDoDeferredTimeoutChange == IMG_TRUE)
+ {
+ ui32Timeout = psPVRSRVData->ui32DevicesWatchdogTimeout = DEVICES_WATCHDOG_POWER_OFF_SLEEP_TIMEOUT;
+ bDoDeferredTimeoutChange = IMG_FALSE;
+ }
+ else
+ {
+ /* Signal that we need to change the sleep timeout in the next loop iteration
+ * to allow the device health check code a further iteration at the current
+ * sleep timeout in order to determine bad health (e.g. stalled cCCB) by
+ * comparing past and current state snapshots */
+ bDoDeferredTimeoutChange = IMG_TRUE;
+ }
+ }
+
+ List_PVRSRV_DEVICE_NODE_ForEach_va(psPVRSRVData->psDeviceNodeList,
+ DevicesWatchdogThread_ForEachVaCb,
+ &ePreviousHealthStatus);
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+ SysPrintAndResetFaultStatusRegister();
+#endif
+
+#endif /* defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP) */
+ }
+
+ eError = OSEventObjectClose(hOSEvent);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectClose");
+}
+
+
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void)
+{
+ return gpsPVRSRVData;
+}
+
+static PVRSRV_ERROR _HostMemDeviceCreate(void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_CONFIG *psDevConfig = HostMemGetDeviceConfig();
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Assert ensures HostMemory device isn't already created and
+ * that data is initialized */
+ PVR_ASSERT(psPVRSRVData->psHostMemDeviceNode == NULL);
+
+ /* for now, we only know a single heap (UMA) config for host device */
+ PVR_ASSERT(psDevConfig->ui32PhysHeapCount == 1 &&
+ psDevConfig->pasPhysHeaps[0].eType == PHYS_HEAP_TYPE_UMA);
+
+ /* N.B.- In case of any failures in this function, we just return error to
+ the caller, as clean-up is taken care by _HostMemDeviceDestroy function */
+
+ psDeviceNode = OSAllocZMem(sizeof(*psDeviceNode));
+ PVR_LOGR_IF_NOMEM(psDeviceNode, "OSAllocZMem");
+
+ /* early save return pointer to aid clean-up */
+ psPVRSRVData->psHostMemDeviceNode = psDeviceNode;
+
+ psDeviceNode->psDevConfig = psDevConfig;
+ psDeviceNode->papsRegisteredPhysHeaps =
+ OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+ psDevConfig->ui32PhysHeapCount);
+ PVR_LOGR_IF_NOMEM(psDeviceNode->papsRegisteredPhysHeaps, "OSAllocZMem");
+
+ eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[0],
+ &psDeviceNode->papsRegisteredPhysHeaps[0]);
+ PVR_LOGR_IF_ERROR(eError, "PhysHeapRegister");
+ psDeviceNode->ui32RegisteredPhysHeaps = 1;
+
+ /* Only CPU local heap is valid on host-mem DevNode, so enable minimal callbacks */
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+ PVR_LOGR_IF_ERROR(eError, "PhysHeapAcquire");
+
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+
+ return PVRSRV_OK;
+}
+
+static void _HostMemDeviceDestroy(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode = psPVRSRVData->psHostMemDeviceNode;
+
+ if (!psDeviceNode)
+ {
+ return;
+ }
+
+ psPVRSRVData->psHostMemDeviceNode = NULL;
+ if (psDeviceNode->papsRegisteredPhysHeaps)
+ {
+ if (psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+ }
+
+ if (psDeviceNode->papsRegisteredPhysHeaps[0])
+ {
+ /* clean-up function as well is aware of only one heap */
+ PVR_ASSERT(psDeviceNode->ui32RegisteredPhysHeaps == 1);
+ PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[0]);
+ }
+
+ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+ }
+ OSFreeMem(psDeviceNode);
+}
+
+static PVRSRV_ERROR _BridgeBufferAlloc(void *pvPrivData, void **pvOut)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+ *pvOut = OSAllocZMem(PVRSRV_MAX_BRIDGE_IN_SIZE +
+ PVRSRV_MAX_BRIDGE_OUT_SIZE);
+
+ if(*pvOut == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return PVRSRV_OK;
+}
+
+static void _BridgeBufferFree(void *pvPrivData, void *pvFreeData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvPrivData);
+
+ OSFreeMem(pvFreeData);
+}
+
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDriverInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ PVRSRV_DATA *psPVRSRVData = NULL;
+
+ IMG_UINT32 ui32AppHintCleanupThreadPriority;
+ IMG_UINT32 ui32AppHintCleanupThreadWeight;
+ IMG_UINT32 ui32AppHintWatchdogThreadPriority;
+ IMG_UINT32 ui32AppHintWatchdogThreadWeight;
+
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault;
+
+ /*
+ * As this function performs one time driver initialisation, use the
+ * Services global device-independent data to determine whether or not
+ * this function has already been called.
+ */
+ if (gpsPVRSRVData)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Driver already initialised", __func__));
+ return PVRSRV_ERROR_ALREADY_EXISTS;
+ }
+
+ /*
+ * Initialise the server bridges
+ */
+ eError = CommonBridgeInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = PhysHeapInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = DevmemIntInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /*
+ * Allocate the device-independent data
+ */
+ psPVRSRVData = OSAllocZMem(sizeof(*gpsPVRSRVData));
+ if (psPVRSRVData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto Error;
+ }
+
+ /* Now it is set up, point gpsPVRSRVData to the actual data */
+ gpsPVRSRVData = psPVRSRVData;
+
+#if defined(SUPPORT_PHYSMEM_TEST) && !defined(TC_MEMORY_CONFIG) && !defined(PLATO_MEMORY_CONFIG)
+ eError = PhysMemTest();
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(gpsPVRSRVData);
+ gpsPVRSRVData = NULL;
+ return eError;
+ }
+#endif
+
+ eError = PVRSRVPoolCreate(_BridgeBufferAlloc,
+ _BridgeBufferFree,
+ PVRSRV_MAX_POOLED_BRIDGE_BUFFERS,
+ "Bridge buffer pool",
+ NULL,
+ &psPVRSRVData->psBridgeBufferPool);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create bridge buffer pool: %s",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto Error;
+ }
+
+ /* Init any OS specific's */
+ eError = OSInitEnvData();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /* Early init. server cache maintenance */
+ eError = CacheOpInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+#if defined(PVR_RI_DEBUG)
+ RIInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = DevicememHistoryInitKM();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to initialise DevicememHistoryInitKM", __func__));
+ goto Error;
+ }
+#endif
+
+ eError = BridgeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to initialise bridge",
+ __func__));
+ goto Error;
+ }
+
+ eError = PMRInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DCInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+#endif
+
+ /* Initialise overall system state */
+ gpsPVRSRVData->eServicesState = PVRSRV_SERVICES_STATE_OK;
+
+ /* Create an event object */
+ eError = OSEventObjectCreate("PVRSRV_GLOBAL_EVENTOBJECT", &gpsPVRSRVData->hGlobalEventObject);
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+ gpsPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+
+ eError = PVRSRVCmdCompleteInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /* Initialise pdump */
+ eError = PDUMPINIT();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ g_ui32InitFlags |= INIT_DATA_ENABLE_PDUMPINIT;
+
+ eError = PVRSRVHandleInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = _CleanupThreadPrepare(gpsPVRSRVData);
+ PVR_LOGG_IF_ERROR(eError, "_CleanupThreadPrepare", Error);
+
+ /* Create a thread which is used to do the deferred cleanup */
+ eError = OSThreadCreatePriority(&gpsPVRSRVData->hCleanupThread,
+ "pvr_defer_free",
+ CleanupThread,
+ CleanupThreadDumpInfo,
+ IMG_TRUE,
+ gpsPVRSRVData,
+ OS_THREAD_LOWEST_PRIORITY);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create deferred cleanup thread",
+ __func__));
+ goto Error;
+ }
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADPRIORITY;
+ OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadPriority,
+ &ui32AppHintDefault, &ui32AppHintCleanupThreadPriority);
+ ui32AppHintDefault = PVRSRV_APPHINT_CLEANUPTHREADWEIGHT;
+ OSGetKMAppHintUINT32(pvAppHintState, CleanupThreadWeight,
+ &ui32AppHintDefault, &ui32AppHintCleanupThreadWeight);
+ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADPRIORITY;
+ OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadPriority,
+ &ui32AppHintDefault, &ui32AppHintWatchdogThreadPriority);
+ ui32AppHintDefault = PVRSRV_APPHINT_WATCHDOGTHREADWEIGHT;
+ OSGetKMAppHintUINT32(pvAppHintState, WatchdogThreadWeight,
+ &ui32AppHintDefault, &ui32AppHintWatchdogThreadWeight);
+ OSFreeKMAppHintState(pvAppHintState);
+ pvAppHintState = NULL;
+
+ eError = OSSetThreadPriority(gpsPVRSRVData->hCleanupThread,
+ ui32AppHintCleanupThreadPriority,
+ ui32AppHintCleanupThreadWeight);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of deferred cleanup thread.",
+ __func__));
+ goto Error;
+ }
+
+ /* Create the devices watchdog event object */
+ eError = OSEventObjectCreate("PVRSRV_DEVICESWATCHDOG_EVENTOBJECT", &gpsPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOGG_IF_ERROR(eError, "OSEventObjectCreate", Error);
+
+ /* Create a thread which is used to detect fatal errors */
+ eError = OSThreadCreate(&gpsPVRSRVData->hDevicesWatchdogThread,
+ "pvr_device_wdg",
+ DevicesWatchdogThread,
+ NULL,
+ IMG_TRUE,
+ gpsPVRSRVData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create devices watchdog thread",
+ __func__));
+ goto Error;
+ }
+
+ eError = OSSetThreadPriority(gpsPVRSRVData->hDevicesWatchdogThread,
+ ui32AppHintWatchdogThreadPriority,
+ ui32AppHintWatchdogThreadWeight);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set thread priority of the watchdog thread.",
+ __func__));
+ goto Error;
+ }
+
+ gpsPVRSRVData->psProcessHandleBase_Table = HASH_Create(PVRSRV_PROC_HANDLE_BASE_INIT);
+
+ if (gpsPVRSRVData->psProcessHandleBase_Table == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create hash table for process handle base.",
+ __func__));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE;
+ goto Error;
+ }
+
+ eError = OSLockCreate(&gpsPVRSRVData->hProcessHandleBase_Lock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create lock for process handle base.",
+ __func__));
+ goto Error;
+ }
+
+ eError = _HostMemDeviceCreate();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ eError = InfoPageCreate(psPVRSRVData);
+ PVR_LOGG_IF_ERROR(eError, "InfoPageCreate", Error);
+
+ /* Initialise the Transport Layer */
+ eError = TLInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+ /* Initialise TL control stream */
+ eError = TLStreamCreate(&psPVRSRVData->hTLCtrlStream,
+ psPVRSRVData->psHostMemDeviceNode,
+ PVRSRV_TL_CTLR_STREAM, PVRSRV_TL_CTLR_STREAM_SIZE,
+ TL_OPMODE_DROP_OLDEST, NULL, NULL, NULL,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to create TL control plane stream"
+ " (%d).", eError));
+ psPVRSRVData->hTLCtrlStream = NULL;
+ }
+
+/* Initialise the Host Trace Buffer */
+ eError = HTBInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+ eError = PVRGpuTraceSupportInit();
+ if (eError != PVRSRV_OK)
+ {
+ goto Error;
+ }
+#endif
+
+#if defined(SUPPORT_RGX)
+ RGXHWPerfClientInitAppHintCallbacks();
+#endif
+
+ /* Late init. client cache maintenance via info. page */
+ eError = CacheOpInit2();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to initialise the CacheOp framework (%d)",
+ __func__, eError));
+ goto Error;
+ }
+
+ eError = ServerSyncInitOnce(psPVRSRVData);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to initialise sync server",
+ __func__));
+ goto Error;
+ }
+
+ dllist_init(&psPVRSRVData->sConnections);
+ eError = OSLockCreate(&psPVRSRVData->hConnectionsLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", Error);
+
+ return 0;
+
+Error:
+ PVRSRVDriverDeInit();
+ return eError;
+}
+
+void IMG_CALLCONV
+PVRSRVDriverDeInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (gpsPVRSRVData == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: missing device-independent data",
+ __func__));
+ return;
+ }
+
+ gpsPVRSRVData->bUnload = IMG_TRUE;
+
+ if (gpsPVRSRVData->hProcessHandleBase_Lock)
+ {
+ OSLockDestroy(gpsPVRSRVData->hProcessHandleBase_Lock);
+ gpsPVRSRVData->hProcessHandleBase_Lock = NULL;
+ }
+
+ if (gpsPVRSRVData->psProcessHandleBase_Table)
+ {
+ HASH_Delete(gpsPVRSRVData->psProcessHandleBase_Table);
+ gpsPVRSRVData->psProcessHandleBase_Table = NULL;
+ }
+
+ if (gpsPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectSignal(gpsPVRSRVData->hGlobalEventObject);
+ }
+
+ /* Stop and cleanup the devices watchdog thread */
+ if (gpsPVRSRVData->hDevicesWatchdogThread)
+ {
+ if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+ {
+ eError = OSEventObjectSignal(gpsPVRSRVData->hDevicesWatchdogEvObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(gpsPVRSRVData->hDevicesWatchdogThread);
+ if (PVRSRV_OK == eError)
+ {
+ gpsPVRSRVData->hDevicesWatchdogThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ }
+
+ if (gpsPVRSRVData->hDevicesWatchdogEvObj)
+ {
+ eError = OSEventObjectDestroy(gpsPVRSRVData->hDevicesWatchdogEvObj);
+ gpsPVRSRVData->hDevicesWatchdogEvObj = NULL;
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ }
+
+ /* Stop and cleanup the deferred clean up thread, event object and
+ * deferred context list.
+ */
+ if (gpsPVRSRVData->hCleanupThread)
+ {
+ if (gpsPVRSRVData->hCleanupEventObject)
+ {
+ eError = OSEventObjectSignal(gpsPVRSRVData->hCleanupEventObject);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectSignal");
+ }
+ LOOP_UNTIL_TIMEOUT(OS_THREAD_DESTROY_TIMEOUT_US)
+ {
+ eError = OSThreadDestroy(gpsPVRSRVData->hCleanupThread);
+ if (PVRSRV_OK == eError)
+ {
+ gpsPVRSRVData->hCleanupThread = NULL;
+ break;
+ }
+ OSWaitus(OS_THREAD_DESTROY_TIMEOUT_US/OS_THREAD_DESTROY_RETRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ PVR_LOG_IF_ERROR(eError, "OSThreadDestroy");
+ }
+
+ if (gpsPVRSRVData->hCleanupEventObject)
+ {
+ eError = OSEventObjectDestroy(gpsPVRSRVData->hCleanupEventObject);
+ gpsPVRSRVData->hCleanupEventObject = NULL;
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+ }
+
+ /* Tear down the HTB before PVRSRVHandleDeInit() removes its TL handle */
+ /* HTB De-init happens in device de-registration currently */
+ eError = HTBDeInit();
+ PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+#if defined (SUPPORT_GPUTRACE_EVENTS)
+ PVRGpuTraceSupportDeInit();
+#endif
+
+ /* Tear down CacheOp framework information page first */
+ CacheOpDeInit2();
+
+ ServerSyncDeinitOnce(gpsPVRSRVData);
+
+ /* Close the TL control plane stream. */
+ TLStreamClose(gpsPVRSRVData->hTLCtrlStream);
+
+ /* Clean up Transport Layer resources that remain */
+ TLDeInit();
+
+ /* Clean up information page */
+ InfoPageDestroy(gpsPVRSRVData);
+
+ _HostMemDeviceDestroy();
+
+ eError = PVRSRVHandleDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRVHandleDeInit failed", __func__));
+ }
+
+ /* deinitialise pdump */
+ if ((g_ui32InitFlags & INIT_DATA_ENABLE_PDUMPINIT) > 0)
+ {
+ PDUMPDEINIT();
+ }
+
+ /* destroy event object */
+ if (gpsPVRSRVData->hGlobalEventObject)
+ {
+ OSEventObjectDestroy(gpsPVRSRVData->hGlobalEventObject);
+ gpsPVRSRVData->hGlobalEventObject = NULL;
+ }
+
+ PVRSRVCmdCompleteDeinit();
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DCDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: DCDeInit failed", __func__));
+ }
+#endif
+
+ eError = PMRDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PMRDeInit failed", __func__));
+ }
+
+ BridgeDeinit();
+
+#if defined(PVR_RI_DEBUG)
+ RIDeInitKM();
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ DevicememHistoryDeInitKM();
+#endif
+
+ CacheOpDeInit();
+
+ PVRSRVPoolDestroy(gpsPVRSRVData->psBridgeBufferPool);
+
+ OSDeInitEnvData();
+
+ (void) DevmemIntDeInit();
+
+ eError = CommonBridgeDeInit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: CommonBridgeDeinit failed", __func__));
+ }
+
+ eError = PhysHeapDeinit();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PhysHeapDeinit failed", __func__));
+ }
+
+ if (OSLockDestroy(gpsPVRSRVData->hConnectionsLock) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ConnectionLock destruction failed", __func__));
+ }
+
+ OSFreeMem(gpsPVRSRVData);
+ gpsPVRSRVData = NULL;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+static PVRSRV_ERROR CreateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT uiCounter=0;
+
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ psDeviceNode->psOSidSubArena[uiCounter] =
+ RA_Create(psDeviceNode->apszRANames[0],
+ OSGetPageShift(), /* Use host page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+
+ if (psDeviceNode->psOSidSubArena[uiCounter] == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"\n(GPU Virtualization Validation): Calling RA_Add with base %u and size %u \n",0, GPUVIRT_SIZEOF_ARENA0));
+
+ /* Arena creation takes place earlier than when the client side reads the apphints and transfers them over the bridge. Since we don't
+ * know how the memory is going to be partitioned and since we already need some memory for all the initial allocations that take place,
+ * we populate the first sub-arena (0) with a span of 64 megabytes. This has been shown to be enough even for cases where EWS is allocated
+ * memory in this sub arena and then a multi app example is executed. This pre-allocation also means that consistency must be maintained
+ * between apphints and reality. That's why in the Apphints, the OSid0 region must start from 0 and end at 3FFFFFF. */
+
+ if (!RA_Add(psDeviceNode->psOSidSubArena[0], 0, GPUVIRT_SIZEOF_ARENA0, 0 , NULL))
+ {
+ RA_Delete(psDeviceNode->psOSidSubArena[0]);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psDeviceNode->apsLocalDevMemArenas[0] = psDeviceNode->psOSidSubArena[0];
+
+ return PVRSRV_OK;
+}
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS])
+{
+ IMG_UINT uiCounter;
+
+ /* Since Sub Arena[0] has been populated already, now we populate the rest starting from 1*/
+
+ for (uiCounter = 1; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"\n[GPU Virtualization Validation]: Calling RA_Add with base %u and size %u \n",aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1));
+
+ if (!RA_Add(psDeviceNode->psOSidSubArena[uiCounter], aui32OSidMin[0][uiCounter], aui32OSidMax[0][uiCounter]-aui32OSidMin[0][uiCounter]+1, 0, NULL))
+ {
+ goto error;
+ }
+ }
+
+ #if defined(EMULATOR)
+ {
+ SysSetOSidRegisters(aui32OSidMin, aui32OSidMax);
+ }
+ #endif
+
+ return;
+
+error:
+ for (uiCounter = 0; uiCounter < GPUVIRT_VALIDATION_NUM_OS; uiCounter++)
+ {
+ RA_Delete(psDeviceNode->psOSidSubArena[uiCounter]);
+ }
+
+ return;
+}
+
+#endif
+
+static void _SysDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ /* Only dump info once */
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_LOW)
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode =
+ (PVRSRV_DEVICE_NODE *) hDebugRequestHandle;
+
+ switch (psDeviceNode->eCurrentSysPowerState)
+ {
+ case PVRSRV_SYS_POWER_STATE_OFF:
+ PVR_DUMPDEBUG_LOG("Device System Power State: OFF");
+ break;
+ case PVRSRV_SYS_POWER_STATE_ON:
+ PVR_DUMPDEBUG_LOG("Device System Power State: ON");
+ break;
+ default:
+ PVR_DUMPDEBUG_LOG("Device System Power State: UNKNOWN (%d)",
+ psDeviceNode->eCurrentSysPowerState);
+ break;
+ }
+
+ SysDebugInfo(psDeviceNode->psDevConfig, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+}
+
+static void _ThreadsDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ if(ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_HIGH)
+ {
+ PVR_DUMPDEBUG_LOG("------[ Server Thread Summary ]------");
+ OSThreadDumpInfo(hDbgReqestHandle, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceCreate(void *pvOSDevice,
+ IMG_INT32 i32UMIdentifier,
+ PVRSRV_DEVICE_NODE **ppsDeviceNode)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo = NULL;
+ PVRSRV_DEVICE_PHYS_HEAP physHeapIndex;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32AppHintDefault;
+ IMG_UINT32 ui32AppHintDriverMode;
+ void *pvAppHintState = NULL;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ IMG_HANDLE hProcessStats;
+#endif
+
+ psDeviceNode = OSAllocZMemNoStats(sizeof(*psDeviceNode));
+ if (!psDeviceNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate device node",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Couldn't register process statistics (%d)",
+ __func__, eError));
+ goto ErrorFreeDeviceNode;
+ }
+#endif
+
+ psDeviceNode->sDevId.i32UMIdentifier = i32UMIdentifier;
+
+ /* Read driver mode (i.e. native, host or guest) AppHint early */
+ ui32AppHintDefault = PVRSRV_APPHINT_DRIVERMODE;
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, DriverMode,
+ &ui32AppHintDefault, &ui32AppHintDriverMode);
+ OSFreeKMAppHintState(pvAppHintState);
+ pvAppHintState = NULL;
+ psPVRSRVData->eDriverMode = PVRSRV_VZ_APPHINT_MODE(ui32AppHintDriverMode);
+
+ eError = SysDevInit(pvOSDevice, &psDevConfig);
+ if (eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to get device config (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ goto ErrorDeregisterStats;
+ }
+
+ PVR_ASSERT(psDevConfig);
+ PVR_ASSERT(psDevConfig->pvOSDevice == pvOSDevice);
+ PVR_ASSERT(!psDevConfig->psDevNode);
+
+ /* Store the device node in the device config for the system layer to use */
+ psDevConfig->psDevNode = psDeviceNode;
+
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_INIT;
+ psDeviceNode->psDevConfig = psDevConfig;
+ psDeviceNode->eCurrentSysPowerState = PVRSRV_SYS_POWER_STATE_ON;
+
+ if (psDeviceNode->psDevConfig->pfnSysDriverMode)
+ {
+ if (! PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(ui32AppHintDriverMode))
+ {
+ /*
+ * The driver mode AppHint can be an override and non-override (default)
+ * value. If the system layer provides a callback in SysDevInit() to
+ * force the driver into a particular driver mode, then only comply
+ * if the apphint value provided is a non-override mode value.
+ */
+ psPVRSRVData->eDriverMode = psDeviceNode->psDevConfig->pfnSysDriverMode();
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Override driver mode specified, ignoring SysDriveMode"));
+ }
+ }
+
+ /*
+ * Ensure that the supplied driver execution mode is consistent with the number
+ * of OSIDs the firmware can support. Any failure here is (should be) fatal as
+ * the requested for driver mode cannot be supported by the firmware.
+ */
+ switch (psPVRSRVData->eDriverMode)
+ {
+ case DRIVER_MODE_NATIVE:
+ /* Always supported mode */
+ break;
+
+ case DRIVER_MODE_HOST:
+ case DRIVER_MODE_GUEST:
+#if (RGXFW_NUM_OS == 1)
+ PVR_DPF((PVR_DBG_ERROR, "The number of firmware supported OSID(s) is 1"));
+ PVR_DPF((PVR_DBG_ERROR, "Halting initialisation, cannot transition to %s mode",
+ psPVRSRVData->eDriverMode == DRIVER_MODE_HOST ? "host" : "guest"));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ goto ErrorSysDevDeInit;
+#endif
+ break;
+
+ default:
+ if ((IMG_INT32)psPVRSRVData->eDriverMode < (IMG_INT32)DRIVER_MODE_NATIVE ||
+ (IMG_INT32)psPVRSRVData->eDriverMode >= (IMG_INT32)RGXFW_NUM_OS)
+ {
+ /* Running on non-VZ capable BVNC so simulating OSID using eDriverMode but
+ value is outside of permitted range */
+ PVR_DPF((PVR_DBG_ERROR,
+ "Halting initialisation, OSID %d is outside of range [0:%d] supported",
+ (IMG_INT)psPVRSRVData->eDriverMode, RGXFW_NUM_OS-1));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ goto ErrorSysDevDeInit;
+ }
+ else
+ {
+ /* Invalid driver mode enumeration integer value */
+ PVR_DPF((PVR_DBG_ERROR, "Halting initialisation due to invalid driver mode %d",
+ (IMG_INT32)psPVRSRVData->eDriverMode));
+ eError = PVRSRV_ERROR_NOT_SUPPORTED;
+ goto ErrorSysDevDeInit;
+ }
+ break;
+ }
+
+ /* Perform additional VZ system initialisation */
+ eError = SysVzDevInit(psDevConfig);
+ if (eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed system virtualization initialisation (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorSysDevDeInit;
+ }
+
+ eError = PVRSRVRegisterDbgTable(psDeviceNode,
+ g_aui32DebugOrderTable,
+ ARRAY_SIZE(g_aui32DebugOrderTable));
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorSysVzDevDeInit;
+ }
+
+ eError = OSLockCreate(&psDeviceNode->hPowerLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorUnregisterDbgTable;
+ }
+
+ /* Register the physical memory heaps */
+ psDeviceNode->papsRegisteredPhysHeaps =
+ OSAllocZMem(sizeof(*psDeviceNode->papsRegisteredPhysHeaps) *
+ psDevConfig->ui32PhysHeapCount);
+ if (!psDeviceNode->papsRegisteredPhysHeaps)
+ {
+ goto ErrorPowerLockDestroy;
+ }
+
+ for (i = 0; i < psDevConfig->ui32PhysHeapCount; i++)
+ {
+ /* No real device should register a heap with ID same as host device's heap ID */
+ PVR_ASSERT(psDevConfig->pasPhysHeaps[i].ui32PhysHeapID != PHYS_HEAP_ID_HOSTMEM);
+
+ eError = PhysHeapRegister(&psDevConfig->pasPhysHeaps[i],
+ &psDeviceNode->papsRegisteredPhysHeaps[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to register physical heap %d (%s)",
+ __func__, psDevConfig->pasPhysHeaps[i].ui32PhysHeapID,
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorPhysHeapsUnregister;
+ }
+
+ psDeviceNode->ui32RegisteredPhysHeaps++;
+ }
+
+ /*
+ * The physical backing storage for the following physical heaps
+ * [CPU,GPU,FW] may or may not come from the same underlying source
+ */
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL physical memory heap",
+ __func__));
+ goto ErrorPhysHeapsUnregister;
+ }
+
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL physical memory heap",
+ __func__));
+ goto ErrorPhysHeapsRelease;
+ }
+
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL physical memory heap",
+ __func__));
+ goto ErrorPhysHeapsRelease;
+ }
+
+ eError = PhysHeapAcquire(psDevConfig->aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL],
+ &psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL physical memory heap",
+ __func__));
+ goto ErrorPhysHeapsRelease;
+ }
+
+#if defined(SUPPORT_RGX)
+ /* Requires registered GPU local heap */
+ /* Requires debug table */
+ /* Initialises psDevInfo */
+ eError = RGXRegisterDevice(psDeviceNode, &psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to register device", __func__));
+ eError = PVRSRV_ERROR_DEVICE_REGISTER_FAILED;
+ goto ErrorPhysHeapsRelease;
+ }
+#endif
+
+ /* Do we have card memory? If so create RAs to manage it */
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ RA_BASE_T uBase;
+ RA_LENGTH_T uSize;
+ IMG_UINT64 ui64Size;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+
+ IMG_UINT32 ui32NumOfLMARegions;
+ IMG_UINT32 ui32RegionId;
+ PHYS_HEAP* psLMAHeap;
+
+ psLMAHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ ui32NumOfLMARegions = PhysHeapNumberOfRegions(psLMAHeap);
+
+ if (ui32NumOfLMARegions == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: LMA heap has no memory regions defined.", __func__));
+ eError = PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP;
+ goto ErrorDeInitRgx;
+ }
+
+ /* Allocate memory for RA pointers and name strings */
+ psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*) * ui32NumOfLMARegions);
+ psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfLMARegions;
+ psDeviceNode->apszRANames = OSAllocMem(ui32NumOfLMARegions * sizeof(IMG_PCHAR));
+
+ for (ui32RegionId = 0; ui32RegionId < ui32NumOfLMARegions; ui32RegionId++)
+ {
+ eError = PhysHeapRegionGetSize(psLMAHeap, ui32RegionId, &ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ eError = PhysHeapRegionGetCpuPAddr(psLMAHeap, ui32RegionId, &sCpuPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ eError = PhysHeapRegionGetDevPAddr(psLMAHeap, ui32RegionId, &sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ /* We can only get here if there is a bug in this module */
+ PVR_ASSERT(IMG_FALSE);
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Creating RA for card memory - region %d - 0x%016"
+ IMG_UINT64_FMTSPECx"-0x%016" IMG_UINT64_FMTSPECx,
+ ui32RegionId, (IMG_UINT64) sCpuPAddr.uiAddr,
+ sCpuPAddr.uiAddr + ui64Size));
+
+ psDeviceNode->apszRANames[ui32RegionId] =
+ OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+ OSSNPrintf(psDeviceNode->apszRANames[ui32RegionId],
+ PVRSRV_MAX_RA_NAME_LENGTH,
+ "%s card mem",
+ psDevConfig->pszName);
+
+ uBase = sDevPAddr.uiAddr;
+ uSize = (RA_LENGTH_T) ui64Size;
+ PVR_ASSERT(uSize == ui64Size);
+
+ /* Use host page size, keeps things simple */
+ psDeviceNode->apsLocalDevMemArenas[ui32RegionId] =
+ RA_Create(psDeviceNode->apszRANames[ui32RegionId],
+ OSGetPageShift(), RA_LOCKCLASS_0, NULL, NULL, NULL,
+ IMG_FALSE);
+
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId] == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create LMA memory arena",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorRAsDelete;
+ }
+
+ if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[ui32RegionId],
+ uBase, uSize, 0, NULL))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to add memory to LMA memory arena",
+ __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorRAsDelete;
+ }
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ eError = CreateLMASubArenas(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create LMA memory sub-arenas", __func__));
+ goto ErrorRAsDelete;
+ }
+#endif
+
+ /* If additional psDeviceNode->pfnDevPx* callbacks are added,
+ update the corresponding virtualization-specific override
+ in pvrsrv_vz.c:_VzDeviceCreate() */
+ psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+ psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+ psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+ psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+ psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory only, no local card memory"));
+
+ /* else we only have OS system memory */
+ psDeviceNode->pfnDevPxAlloc = OSPhyContigPagesAlloc;
+ psDeviceNode->pfnDevPxFree = OSPhyContigPagesFree;
+ psDeviceNode->pfnDevPxMap = OSPhyContigPagesMap;
+ psDeviceNode->pfnDevPxUnMap = OSPhyContigPagesUnmap;
+ psDeviceNode->pfnDevPxClean = OSPhyContigPagesClean;
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 2nd phys heap"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ if (PhysHeapGetType(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]) == PHYS_HEAP_TYPE_LMA)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Local card memory only, no OS system memory"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "===== OS System memory, 3rd phys heap"));
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewOSRamBackedPMR;
+ }
+
+ psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+
+ eError = ServerSyncInit(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorRAsDelete;
+ }
+
+ eError = SyncCheckpointInit(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "SyncCheckpointInit");
+
+ /* Perform additional vz initialisation */
+ eError = _VzDeviceCreate(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "_VzDeviceCreate");
+
+ /*
+ * This is registered before doing device specific initialisation to ensure
+ * generic device information is dumped first during a debug request.
+ */
+ eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hDbgReqNotify,
+ psDeviceNode,
+ _SysDebugRequestNotify,
+ DEBUG_REQUEST_SYS,
+ psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDeviceNode->hThreadsDbgReqNotify,
+ psDeviceNode,
+ _ThreadsDebugRequestNotify,
+ DEBUG_REQUEST_SYS,
+ psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ eError = HTBDeviceCreate(psDeviceNode);
+ PVR_LOG_IF_ERROR(eError, "HTBDeviceCreate");
+
+ psPVRSRVData->ui32RegisteredDevices++;
+
+#if defined(PVR_DVFS) && !defined(NO_HARDWARE)
+ eError = InitDVFS(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to start DVFS", __func__));
+ goto ErrorDecrementDeviceCount;
+ }
+#endif
+
+ OSAtomicWrite(&psDeviceNode->iNumClockSpeedChanges, 0);
+
+#if defined(PVR_TESTING_UTILS)
+ TUtilsInit(psDeviceNode);
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSWRLockCreate(&psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for PF notify list",
+ __func__));
+ goto ErrorDecrementDeviceCount;
+ }
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+ dllist_init(&psDeviceNode->sMemoryContextPageFaultNotifyListHead);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Registered device %p", psDeviceNode));
+ PVR_DPF((PVR_DBG_MESSAGE, "Register bank address = 0x%08lx",
+ (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+ PVR_DPF((PVR_DBG_MESSAGE, "IRQ = %d", psDevConfig->ui32IRQ));
+
+#if defined(SUPPORT_ALT_REGBASE)
+ PVR_LOG(("%s: Using alternate Register bank address: 0x%08lx (orig: 0x%08lx)", __func__,
+ (unsigned long)psDevConfig->sAltRegsCpuPBase.uiAddr,
+ (unsigned long)psDevConfig->sRegsCpuPBase.uiAddr));
+#endif
+
+ /* Finally insert the device into the dev-list and set it as active */
+ List_PVRSRV_DEVICE_NODE_InsertTail(&psPVRSRVData->psDeviceNodeList,
+ psDeviceNode);
+
+ *ppsDeviceNode = psDeviceNode;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* Close the process statistics */
+ PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockCreateNoStats(&psDeviceNode->hValidationLock, LOCK_TYPE_PASSIVE);
+#endif
+
+ return PVRSRV_OK;
+
+#if (defined(PVR_DVFS) && !defined(NO_HARDWARE)) || !defined(PVRSRV_USE_BRIDGE_LOCK)
+ErrorDecrementDeviceCount:
+#endif
+ psPVRSRVData->ui32RegisteredDevices--;
+
+ if (psDeviceNode->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+ }
+
+ if (psDeviceNode->hThreadsDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify);
+ }
+
+ /* Perform vz deinitialisation */
+ _VzDeviceDestroy(psDeviceNode);
+
+ ServerSyncDeinit(psDeviceNode);
+
+ErrorRAsDelete:
+ {
+ IMG_UINT32 ui32RegionId;
+
+ for (ui32RegionId = 0;
+ ui32RegionId < psDeviceNode->ui32NumOfLocalMemArenas;
+ ui32RegionId++)
+ {
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionId])
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionId]);
+ }
+ }
+ }
+
+ErrorDeInitRgx:
+#if defined(SUPPORT_RGX)
+ DevDeInitRGX(psDeviceNode);
+#endif
+ErrorPhysHeapsRelease:
+ for (physHeapIndex = 0;
+ physHeapIndex < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+ physHeapIndex++)
+ {
+ if (psDeviceNode->apsPhysHeap[physHeapIndex])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[physHeapIndex]);
+ }
+ }
+ErrorPhysHeapsUnregister:
+ for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+ {
+ PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+ }
+
+ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+ErrorPowerLockDestroy:
+ OSLockDestroy(psDeviceNode->hPowerLock);
+ErrorUnregisterDbgTable:
+ PVRSRVUnregisterDbgTable(psDeviceNode);
+ErrorSysVzDevDeInit:
+ psDevConfig->psDevNode = NULL;
+ SysVzDevDeInit(psDevConfig);
+ErrorSysDevDeInit:
+ SysDevDeInit(psDevConfig);
+ErrorDeregisterStats:
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* Close the process statistics */
+ PVRSRVStatsDeregisterProcess(hProcessStats);
+ErrorFreeDeviceNode:
+#endif
+ OSFreeMemNoStats(psDeviceNode);
+
+ return eError;
+}
+
+#if defined(SUPPORT_RGX)
+static PVRSRV_ERROR _SetDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL bValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eResult = RGXSetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ ui32Flag, bValue);
+
+ return eResult;
+}
+
+static PVRSRV_ERROR _ReadDeviceFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL *pbValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+ IMG_UINT32 ui32State;
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eResult = RGXGetDeviceFlags((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ &ui32State);
+
+ if (PVRSRV_OK == eResult)
+ {
+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+ }
+
+ return eResult;
+}
+static PVRSRV_ERROR _SetStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL bValue)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* EnableHWR is a special case
+ * only possible to disable after FW is running
+ */
+ if (bValue && RGXFWIF_INICFG_HWR_EN == ui32Flag)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+ eResult = RGXStateFlagCtrl((PVRSRV_RGXDEV_INFO *)psDevice->pvDevice,
+ ui32Flag, NULL, bValue);
+
+ return eResult;
+}
+
+static PVRSRV_ERROR _ReadStateFlag(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivate, IMG_BOOL *pbValue)
+{
+ IMG_UINT32 ui32Flag = (IMG_UINT32)((uintptr_t)psPrivate);
+ IMG_UINT32 ui32State;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDevice->pvDevice;
+
+ if (!ui32Flag)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ ui32State = psDevInfo->psFWIfOSConfig->ui32ConfigFlags;
+
+ if(pbValue)
+ {
+ *pbValue = (ui32State & ui32Flag)? IMG_TRUE: IMG_FALSE;
+ }
+
+ return PVRSRV_OK;
+}
+#endif
+
+PVRSRV_ERROR PVRSRVDeviceInitialise(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_BOOL bInitSuccesful = IMG_FALSE;
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ IMG_HANDLE hProcessStats;
+#endif
+ PVRSRV_ERROR eError;
+
+ if (psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_INIT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Device already initialised", __func__));
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ /* Allocate process statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ eError = PVRSRVStatsRegisterProcess(&hProcessStats);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Couldn't register process statistics (%d)",
+ __func__, eError));
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_RGX)
+ eError = RGXInit(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Initialisation of Rogue device failed (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto Exit;
+ }
+#endif
+
+ bInitSuccesful = IMG_TRUE;
+
+#if defined(SUPPORT_RGX)
+Exit:
+#endif
+ eError = PVRSRVDeviceFinalise(psDeviceNode, bInitSuccesful);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Services failed to finalise the device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+
+#if defined(SUPPORT_RGX)
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableClockGating,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_CLKGATING_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableDMOverlap,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_DISABLE_DM_OVERLAP));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOnHWRTrigger,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_AssertOutOfMemory,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_CheckMList,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_CHECK_MLIST_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableHWR,
+ _ReadStateFlag, _SetStateFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXFWIF_INICFG_HWR_EN));
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisableFEDLogging,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_ZeroFreelist,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_ZERO_FREELIST));
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DustRequestInject,
+ _ReadDeviceFlag, _SetDeviceFlag,
+ psDeviceNode,
+ (void*)((uintptr_t)RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN));
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_DisablePDumpPanic,
+ RGXQueryPdumpPanicEnable, RGXSetPdumpPanicEnable,
+ psDeviceNode,
+ NULL);
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS) && !defined(PVRSRV_DEBUG_LINUX_MEMORY_STATS)
+ /* Close the process statistics */
+ PVRSRVStatsDeregisterProcess(hProcessStats);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeapIdx;
+ IMG_UINT32 ui32RegionIdx;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ IMG_BOOL bForceUnload = IMG_FALSE;
+
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ bForceUnload = IMG_TRUE;
+ }
+#endif
+
+ psPVRSRVData->ui32RegisteredDevices--;
+
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_DEINIT;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (psDeviceNode->hMemoryContextPageFaultNotifyListLock != NULL)
+ {
+ OSWRLockDestroy(psDeviceNode->hMemoryContextPageFaultNotifyListLock);
+ }
+#endif /* !defined(PVRSRV_USE_BRIDGE_LOCK) */
+
+#if defined(SUPPORT_VALIDATION) && !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroyNoStats(psDeviceNode->hValidationLock);
+ psDeviceNode->hValidationLock = NULL;
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+ TUtilsDeinit(psDeviceNode);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+ SyncFbDeregisterDevice(psDeviceNode);
+#endif
+ /* Counter part to what gets done in PVRSRVDeviceFinalise */
+ if (psDeviceNode->hSyncCheckpointContext)
+ {
+ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+ psDeviceNode->hSyncCheckpointContext = NULL;
+ }
+ if (psDeviceNode->hSyncPrimContext)
+ {
+ if (psDeviceNode->psSyncPrim)
+ {
+ /* Free general pupose sync primitive */
+ SyncPrimFree(psDeviceNode->psSyncPrim);
+ psDeviceNode->psSyncPrim = NULL;
+ }
+
+ if (psDeviceNode->psMMUCacheSyncPrim)
+ {
+ PVRSRV_CLIENT_SYNC_PRIM *psSync = psDeviceNode->psMMUCacheSyncPrim;
+
+ /* Ensure there are no pending MMU Cache Ops in progress before freeing this sync. */
+ eError = PVRSRVPollForValueKM(psSync->pui32LinAddr,
+ psDeviceNode->ui16NextMMUInvalidateUpdate-1,
+ 0xFFFFFFFF);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to wait for MMU Cache op", __func__));
+ return eError;
+ }
+
+ /* Important to set the device node pointer to NULL
+ * before we free the sync-prim to make sure we don't
+ * defer the freeing of the sync-prim's page tables itself.
+ * The sync is used to defer the MMU page table
+ * freeing. */
+ psDeviceNode->psMMUCacheSyncPrim = NULL;
+
+ /* Free general pupose sync primitive */
+ SyncPrimFree(psSync);
+
+ }
+
+ SyncPrimContextDestroy(psDeviceNode->hSyncPrimContext);
+ psDeviceNode->hSyncPrimContext = NULL;
+ }
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock", __func__));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (bForceUnload)
+ {
+ /*
+ * Firmware probably not responding but we still want to unload the
+ * driver.
+ */
+ break;
+ }
+#endif
+ /* Force idle device */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode, NULL, IMG_TRUE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRV_ERROR eError2;
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ eError2 = PVRSRVPowerLock(psDeviceNode);
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock",
+ __func__));
+ return eError2;
+ }
+ }
+ else
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+ PVRSRVPowerUnlock(psDeviceNode);
+ return eError;
+ }
+
+ /* Power down the device if necessary */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ IMG_TRUE);
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed PVRSRVSetDevicePowerStateKM call (%s). Dump debug.",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+
+ /*
+ * If the driver is okay then return the error, otherwise we can ignore
+ * this error.
+ */
+ if (PVRSRVGetPVRSRVData()->eServicesState == PVRSRV_SERVICES_STATE_OK)
+ {
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Will continue to unregister as driver status is not OK",
+ __func__));
+ }
+ }
+
+#if defined(SUPPORT_RGX)
+ DevDeInitRGX(psDeviceNode);
+#endif
+
+ HTBDeviceDestroy(psDeviceNode);
+
+ if (psDeviceNode->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hDbgReqNotify);
+ }
+
+ if (psDeviceNode->hThreadsDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDeviceNode->hThreadsDbgReqNotify);
+ }
+
+ SyncCheckpointDeinit(psDeviceNode);
+
+ ServerSyncDeinit(psDeviceNode);
+
+ /* Remove RAs and RA names for local card memory */
+ for (ui32RegionIdx = 0;
+ ui32RegionIdx < psDeviceNode->ui32NumOfLocalMemArenas;
+ ui32RegionIdx++)
+ {
+ if (psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx])
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx]);
+ psDeviceNode->apsLocalDevMemArenas[ui32RegionIdx] = NULL;
+ }
+
+ if (psDeviceNode->apszRANames[ui32RegionIdx])
+ {
+ OSFreeMem(psDeviceNode->apszRANames[ui32RegionIdx]);
+ psDeviceNode->apszRANames[ui32RegionIdx] = NULL;
+ }
+ }
+
+ if (psDeviceNode->apsLocalDevMemArenas)
+ {
+ OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+ psDeviceNode->apsLocalDevMemArenas = NULL;
+ }
+ if (psDeviceNode->apszRANames)
+ {
+ OSFreeMem(psDeviceNode->apszRANames);
+ psDeviceNode->apszRANames = NULL;
+ }
+
+ /* Perform vz deinitialisation */
+ _VzDeviceDestroy(psDeviceNode);
+
+ List_PVRSRV_DEVICE_NODE_Remove(psDeviceNode);
+
+ for (ePhysHeapIdx = 0;
+ ePhysHeapIdx < ARRAY_SIZE(psDeviceNode->apsPhysHeap);
+ ePhysHeapIdx++)
+ {
+ if (psDeviceNode->apsPhysHeap[ePhysHeapIdx])
+ {
+ PhysHeapRelease(psDeviceNode->apsPhysHeap[ePhysHeapIdx]);
+ }
+ }
+
+ for (i = 0; i < psDeviceNode->ui32RegisteredPhysHeaps; i++)
+ {
+ PhysHeapUnregister(psDeviceNode->papsRegisteredPhysHeaps[i]);
+ }
+
+ OSFreeMem(psDeviceNode->papsRegisteredPhysHeaps);
+
+#if defined(PVR_DVFS) && !defined(NO_HARDWARE)
+ DeinitDVFS(psDeviceNode);
+#endif
+
+ OSLockDestroy(psDeviceNode->hPowerLock);
+
+ PVRSRVUnregisterDbgTable(psDeviceNode);
+
+ psDeviceNode->psDevConfig->psDevNode = NULL;
+ SysVzDevDeInit(psDeviceNode->psDevConfig);
+ SysDevDeInit(psDeviceNode->psDevConfig);
+
+ OSFreeMemNoStats(psDeviceNode);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr)
+{
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid = 0;
+#endif
+ RA_BASE_T uiCardAddr;
+ RA_LENGTH_T uiActualSize;
+ PVRSRV_ERROR eError;
+
+ RA_ARENA *pArena=psDevNode->apsLocalDevMemArenas[0];
+ IMG_UINT32 ui32Log2NumPages = 0;
+
+ PVR_ASSERT(uiSize != 0);
+ ui32Log2NumPages = OSGetOrder(uiSize);
+ uiSize = (1 << ui32Log2NumPages) * OSGetPageSize();
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProt;
+
+ IMG_PID pId = OSGetCurrentClientProcessIDKM();
+
+ RetrieveOSidsfromPidList(pId, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ pArena = psDevNode->psOSidSubArena[ui32OSid];
+}
+#endif
+
+ eError = RA_Alloc(pArena,
+ uiSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0, /* No flags */
+ uiSize,
+ "LMA_PhyContigPagesAlloc",
+ &uiCardAddr,
+ &uiActualSize,
+ NULL); /* No private handle */
+
+ PVR_ASSERT(uiSize == uiActualSize);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVR_DPF((PVR_DBG_MESSAGE,"(GPU Virtualization Validation): LMA_PhyContigPagesAlloc: Address:%llu, size:%llu", uiCardAddr,uiActualSize));
+}
+#endif
+
+ psMemHandle->u.ui64Handle = uiCardAddr;
+ psDevPAddr->uiAddr = (IMG_UINT64) uiCardAddr;
+
+ if (PVRSRV_OK == eError)
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStatAndTrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ uiSize,
+ (IMG_UINT64)(uintptr_t) psMemHandle,
+ OSGetCurrentClientProcessIDKM());
+#else
+ IMG_CPU_PHYADDR sCpuPAddr;
+ sCpuPAddr.uiAddr = psDevPAddr->uiAddr;
+
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ NULL,
+ sCpuPAddr,
+ uiSize,
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ psMemHandle->ui32Order = ui32Log2NumPages;
+ }
+
+ return eError;
+}
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle)
+{
+ RA_BASE_T uiCardAddr = (RA_BASE_T) psMemHandle->u.ui64Handle;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStatAndUntrack(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ (IMG_UINT64)(uintptr_t) psMemHandle);
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_ALLOC_PAGES_PT_LMA,
+ (IMG_UINT64)uiCardAddr,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+ RA_Free(psDevNode->apsLocalDevMemArenas[0], uiCardAddr);
+ psMemHandle->ui32Order = 0;
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr)
+{
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(uiSize);
+
+ PhysHeapDevPAddrToCpuPAddr(psDevNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL], 1, &sCpuPAddr, psDevPAddr);
+ *pvPtr = OSMapPhysToLin(sCpuPAddr,
+ ui32NumPages * OSGetPageSize(),
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE);
+ if (*pvPtr == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsIncrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+ ui32NumPages * OSGetPageSize(),
+ OSGetCurrentClientProcessIDKM());
+#else
+ {
+ PVRSRVStatsAddMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+ *pvPtr,
+ sCpuPAddr,
+ ui32NumPages * OSGetPageSize(),
+ NULL,
+ OSGetCurrentClientProcessIDKM());
+ }
+#endif
+#endif
+ return PVRSRV_OK;
+ }
+}
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ void *pvPtr)
+{
+ IMG_UINT32 ui32NumPages = (1 << psMemHandle->ui32Order);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#if !defined(PVRSRV_ENABLE_MEMORY_STATS)
+ PVRSRVStatsDecrMemAllocStat(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+ ui32NumPages * OSGetPageSize(),
+ OSGetCurrentClientProcessIDKM());
+#else
+ PVRSRVStatsRemoveMemAllocRecord(PVRSRV_MEM_ALLOC_TYPE_IOREMAP_PT_LMA,
+ (IMG_UINT64)(uintptr_t)pvPtr,
+ OSGetCurrentClientProcessIDKM());
+#endif
+#endif
+
+ OSUnMapPhysToLin(pvPtr, ui32NumPages * OSGetPageSize(),
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+}
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength)
+{
+ /* No need to flush because we map as uncached */
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(psMemHandle);
+ PVR_UNREFERENCED_PARAMETER(uiOffset);
+ PVR_UNREFERENCED_PARAMETER(uiLength);
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVDeviceFinalise
+@Description Performs the final parts of device initialisation.
+@Input psDeviceNode Device node of the device to finish
+ initialising
+@Input bInitSuccessful Whether or not device specific
+ initialisation was successful
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDeviceFinalise(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bInitSuccessful)
+{
+ PVRSRV_ERROR eError;
+
+ if (bInitSuccessful)
+ {
+ eError = SyncCheckpointContextCreate(psDeviceNode,
+ &psDeviceNode->hSyncCheckpointContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create sync checkpoint context (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ goto ErrorExit;
+ }
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+ eError = SyncFbRegisterDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto ErrorExit;
+ }
+#endif
+ eError = SyncPrimContextCreate(psDeviceNode,
+ &psDeviceNode->hSyncPrimContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to create sync prim context (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ SyncCheckpointContextDestroy(psDeviceNode->hSyncCheckpointContext);
+ goto ErrorExit;
+ }
+
+ /* Allocate general purpose sync primitive */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psDeviceNode->psSyncPrim,
+ "pvrsrv dev general");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate sync primitive with error (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /* Allocate MMU cache invalidate sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psDeviceNode->psMMUCacheSyncPrim,
+ "pvrsrv dev MMU cache");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to allocate sync primitive with error (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /* Next update value will be 1 since sync prim starts with 0 */
+ psDeviceNode->ui16NextMMUInvalidateUpdate = 1;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire power lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ /*
+ * Always ensure a single power on command appears in the pdump. This
+ * should be the only power related call outside of PDUMPPOWCMDSTART
+ * and PDUMPPOWCMDEND.
+ */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_TRUE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to set device %p power state to 'on' (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+
+ /* Verify firmware compatibility for device */
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ /* defer the compatibility checks in case of Guest Mode until after
+ * the first kick was submitted, as the firmware only fills the
+ * compatibility data then. */
+ eError = PVRSRV_OK;
+ }
+ else
+ {
+ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed compatibility check for device %p (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ goto ErrorExit;
+ }
+
+ PDUMPPOWCMDSTART();
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ /* Force the device to idle if its default power state is off */
+ eError = PVRSRVDeviceIdleRequestKM(psDeviceNode,
+ &PVRSRVDeviceIsDefaultStateOFF,
+ IMG_TRUE);
+ if (eError == PVRSRV_OK)
+ {
+ break;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire power lock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to idle device %p (%s)",
+ __func__, psDeviceNode,
+ PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError == PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forced idle DENIED", __func__));
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+
+ /* Place device into its default power state. */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_DEFAULT,
+ IMG_TRUE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to set device %p into its default power state (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ goto ErrorExit;
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ /*
+ * If PDUMP is enabled and RGX device is supported, then initialise the
+ * performance counters that can be further modified in PDUMP. Then,
+ * before ending the init phase of the pdump, drain the commands put in
+ * the kCCB during the init phase.
+ */
+#if defined(SUPPORT_RGX) && defined(PDUMP)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo =
+ (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+ eError = PVRSRVRGXInitHWPerfCountersKM(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to init hwperf counters (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+
+ eError = RGXPdumpDrainKCCB(psDevInfo,
+ psDevInfo->psKernelCCBCtl->ui32WriteOffset, PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Problem draining kCCB (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto ErrorExit;
+ }
+ }
+#endif
+
+ /* Now that the device(s) are fully initialised set them as active */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_ACTIVE;
+ eError = PVRSRV_OK;
+
+#if defined(SUPPORT_RGX)
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ eError = RGXFWOSConfig((PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Cannot kick initialisation configuration to the Device (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+
+ goto ErrorExit;
+ }
+
+ eError = PVRSRVDevInitCompatCheck(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed compatibility check for device %p (%s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ PVRSRVPowerUnlock(psDeviceNode);
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ goto ErrorExit;
+ }
+
+ }
+#endif
+ }
+ else
+ {
+ /* Initialisation failed so set the device(s) into a bad state */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+ eError = PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ /* Give PDump control a chance to end the init phase, depends on OS */
+ PDumpStopInitPhase(IMG_FALSE, IMG_TRUE);
+
+ return eError;
+
+ErrorExit:
+ /* Initialisation failed so set the device(s) into a bad state */
+ psDeviceNode->eDevState = PVRSRV_DEVICE_STATE_BAD;
+
+ return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* Only check devices which specify a compatibility check callback */
+ if (psDeviceNode->pfnInitDeviceCompatCheck)
+ return psDeviceNode->pfnInitDeviceCompatCheck(psDeviceNode);
+ else
+ return PVRSRV_OK;
+}
+
+/*
+ PollForValueKM
+*/
+static
+PVRSRV_ERROR IMG_CALLCONV PollForValueKM (volatile IMG_UINT32 __iomem * pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_UINT32 ui32Timeoutus,
+ IMG_UINT32 ui32PollPeriodus,
+ IMG_BOOL bAllowPreemption)
+{
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(ui32Timeoutus);
+ PVR_UNREFERENCED_PARAMETER(ui32PollPeriodus);
+ PVR_UNREFERENCED_PARAMETER(bAllowPreemption);
+ return PVRSRV_OK;
+#else
+ IMG_UINT32 ui32ActualValue = 0xFFFFFFFFU; /* Initialiser only required to prevent incorrect warning */
+
+ if (bAllowPreemption)
+ {
+ PVR_ASSERT(ui32PollPeriodus >= 1000);
+ }
+
+ LOOP_UNTIL_TIMEOUT(ui32Timeoutus)
+ {
+ ui32ActualValue = OSReadHWReg32(pui32LinMemAddr, 0) & ui32Mask;
+
+ if (ui32ActualValue == ui32Value)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (gpsPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ if (bAllowPreemption)
+ {
+ OSSleepms(ui32PollPeriodus / 1000);
+ }
+ else
+ {
+ OSWaitus(ui32PollPeriodus);
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ PVR_DPF((PVR_DBG_ERROR,"PollForValueKM: Timeout. Expected 0x%x but found 0x%x (mask 0x%x).",
+ ui32Value, ui32ActualValue, ui32Mask));
+
+ return PVRSRV_ERROR_TIMEOUT;
+#endif /* NO_HARDWARE */
+}
+
+
+/*
+ PVRSRVPollForValueKM
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM (volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ return PollForValueKM(pui32LinMemAddr, ui32Value, ui32Mask,
+ MAX_HW_TIME_US,
+ MAX_HW_TIME_US/WAIT_TRY_COUNT,
+ IMG_FALSE);
+}
+
+static
+PVRSRV_ERROR IMG_CALLCONV WaitForValueKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ IMG_BOOL bHoldBridgeLock)
+{
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(pui32LinMemAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ return PVRSRV_OK;
+#else
+
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ IMG_HANDLE hOSEvent;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eErrorWait;
+ IMG_UINT32 ui32ActualValue;
+
+ eError = OSEventObjectOpen(psPVRSRVData->hGlobalEventObject, &hOSEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVWaitForValueKM: Failed to setup EventObject with error (%d)", eError));
+ goto EventObjectOpenError;
+ }
+
+ eError = PVRSRV_ERROR_TIMEOUT;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32ActualValue = (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask);
+
+ if (ui32ActualValue == ui32Value)
+ {
+ /* Expected value has been found */
+ eError = PVRSRV_OK;
+ break;
+ }
+ else if (psPVRSRVData->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ /* Services in bad state, don't wait any more */
+ eError = PVRSRV_ERROR_NOT_READY;
+ break;
+ }
+ else
+ {
+ /* wait for event and retry */
+ eErrorWait = bHoldBridgeLock ? OSEventObjectWaitAndHoldBridgeLock(hOSEvent) : OSEventObjectWait(hOSEvent);
+ if (eErrorWait != PVRSRV_OK && eErrorWait != PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"PVRSRVWaitForValueKM: Waiting for value failed with error %d. Expected 0x%x but found 0x%x (Mask 0x%08x). Retrying",
+ eErrorWait,
+ ui32Value,
+ ui32ActualValue,
+ ui32Mask));
+ }
+ }
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ OSEventObjectClose(hOSEvent);
+
+ /* One last check in case the object wait ended after the loop timeout... */
+ if (eError != PVRSRV_OK && (OSReadDeviceMem32(pui32LinMemAddr) & ui32Mask) == ui32Value)
+ {
+ eError = PVRSRV_OK;
+ }
+
+ /* Provide event timeout information to aid the Device Watchdog Thread... */
+ if (eError == PVRSRV_OK)
+ {
+ psPVRSRVData->ui32GEOConsecutiveTimeouts = 0;
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ psPVRSRVData->ui32GEOConsecutiveTimeouts++;
+ }
+
+EventObjectOpenError:
+
+ return eError;
+
+#endif /* NO_HARDWARE */
+}
+
+/*
+ PVRSRVWaitForValueKM
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM (volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ /* In this case we are NOT retaining bridge lock while waiting
+ for bridge lock. */
+ return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_FALSE);
+}
+
+/*
+ PVRSRVWaitForValueKMAndHoldBridgeLock
+*/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask)
+{
+ return WaitForValueKM(pui32LinMemAddr, ui32Value, ui32Mask, IMG_TRUE);
+}
+
+int PVRSRVGetDriverStatus(void)
+{
+ return PVRSRVGetPVRSRVData()->eServicesState;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function PVRSRVGetErrorStringKM
+
+ @Description Returns a text string relating to the PVRSRV_ERROR enum.
+
+ @Note case statement used rather than an indexed array to ensure text is
+ synchronised with the correct enum
+
+ @Input eError : PVRSRV_ERROR enum
+
+ @Return const IMG_CHAR * : Text string
+
+ @Note Must be kept in sync with servicesext.h
+
+******************************************************************************/
+
+const IMG_CHAR *PVRSRVGetErrorStringKM(PVRSRV_ERROR eError)
+{
+ switch(eError)
+ {
+ case PVRSRV_OK:
+ return "PVRSRV_OK";
+#define PVRE(x) \
+ case x: \
+ return #x;
+#include "pvrsrv_errors.h"
+#undef PVRE
+ default:
+ return "Unknown PVRSRV error number";
+ }
+}
+
+/*
+ PVRSRVSystemHasCacheSnooping
+*/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if ((psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_NONE) &&
+ (psDevConfig->eCacheSnoopingMode != PVRSRV_DEVICE_SNOOP_EMULATED))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_EMULATED)
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CPU_ONLY) ||
+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ if ((psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_DEVICE_ONLY) ||
+ (psDevConfig->eCacheSnoopingMode == PVRSRV_DEVICE_SNOOP_CROSS))
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ return psDevConfig->bHasNonMappableLocalMemory;
+}
+
+/*
+ PVRSRVSystemWaitCycles
+*/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles)
+{
+ /* Delay in us */
+ IMG_UINT32 ui32Delayus = 1;
+
+ /* obtain the device freq */
+ if (psDevConfig->pfnClockFreqGet != NULL)
+ {
+ IMG_UINT32 ui32DeviceFreq;
+
+ ui32DeviceFreq = psDevConfig->pfnClockFreqGet(psDevConfig->hSysData);
+
+ ui32Delayus = (ui32Cycles*1000000)/ui32DeviceFreq;
+
+ if (ui32Delayus == 0)
+ {
+ ui32Delayus = 1;
+ }
+ }
+
+ OSWaitus(ui32Delayus);
+}
+
+static void *
+PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb(PVRSRV_DEVICE_NODE *psDeviceNode,
+ va_list va)
+{
+ void *pvOSDevice = va_arg(va, void *);
+
+ if (psDeviceNode->psDevConfig->pvOSDevice == pvOSDevice)
+ {
+ return psDeviceNode;
+ }
+
+ return NULL;
+}
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ psDeviceNode =
+ List_PVRSRV_DEVICE_NODE_Any_va(psPVRSRVData->psDeviceNodeList,
+ &PVRSRVSystemInstallDeviceLISR_Match_AnyVaCb,
+ pvOSDevice);
+ if (!psDeviceNode)
+ {
+ /* Device can't be found in the list so it isn't in the system */
+ PVR_DPF((PVR_DBG_ERROR, "%s: device %p with irq %d is not present",
+ __func__, pvOSDevice, ui32IRQ));
+ return PVRSRV_ERROR_INVALID_DEVICE;
+ }
+
+ return SysInstallDeviceLISR(psDeviceNode->psDevConfig->hSysData, ui32IRQ,
+ pszName, pfnLISR, pvData, phLISRData);
+}
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData)
+{
+ return SysUninstallDeviceLISR(hLISRData);
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 uiHeapNum,
+ IMG_UINT32 *puiXStride)
+{
+ PVR_ASSERT(puiXStride != NULL);
+
+ if (uiHeapNum < 1 || uiHeapNum > psDevConfig->ui32BIFTilingHeapCount)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *puiXStride = psDevConfig->pui32BIFTilingHeapConfigs[uiHeapNum - 1];
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+ IMG_UINT32 *puiNumHeaps)
+{
+ *peBifTilingMode = psDevConfig->eBIFTilingMode;
+ *puiNumHeaps = psDevConfig->ui32BIFTilingHeapCount;
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState)
+{
+ SysSetAxiProtOSid(ui32OSid, bState);
+}
+
+void SetTrustedDeviceAceEnabled(void)
+{
+ SysSetTrustedDeviceAceEnabled();
+}
+#endif
+
+static PVRSRV_ERROR _VzDeviceCreate(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ RA_BASE_T uBase;
+ RA_LENGTH_T uSize;
+ IMG_UINT ui32OSID;
+ IMG_UINT64 ui64Size;
+ PVRSRV_ERROR eError;
+ PHYS_HEAP *psPhysHeap;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ PHYS_HEAP_TYPE eHeapType;
+ IMG_UINT32 ui32NumOfHeapRegions;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+ /* First, register device GPU physical heap based on physheap config */
+ psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+ eHeapType = PhysHeapGetType(psPhysHeap);
+
+ /* Normally, for GPU UMA physheap, use OS services but here we override this
+ if said physheap is DMA/UMA carve-out; for this create an RA to manage it */
+ if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+ {
+ if (ui32NumOfHeapRegions)
+ {
+ eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+
+ eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+
+ eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+ }
+ else
+ {
+ sDevPAddr.uiAddr = (IMG_UINT64)0;
+ sCpuPAddr.uiAddr = (IMG_UINT64)0;
+ ui64Size = (IMG_UINT64)0;
+ }
+
+ if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+ {
+ psDeviceNode->ui32NumOfLocalMemArenas = ui32NumOfHeapRegions;
+ PVR_ASSERT(ui32NumOfHeapRegions == 1);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "===== UMA (carve-out) memory, 1st phys heap (gpu)"));
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for gpu memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+ (IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+ uBase = sDevPAddr.uiAddr;
+ uSize = (RA_LENGTH_T) ui64Size;
+ PVR_ASSERT(uSize == ui64Size);
+
+ psDeviceNode->apsLocalDevMemArenas = OSAllocMem(sizeof(RA_ARENA*));
+ PVR_ASSERT(psDeviceNode->apsLocalDevMemArenas);
+ psDeviceNode->apszRANames = OSAllocMem(sizeof(IMG_PCHAR));
+ PVR_ASSERT(psDeviceNode->apszRANames);
+ psDeviceNode->apszRANames[0] = OSAllocMem(PVRSRV_MAX_RA_NAME_LENGTH);
+ PVR_ASSERT(psDeviceNode->apszRANames[0]);
+
+ OSSNPrintf(psDeviceNode->apszRANames[0], PVRSRV_MAX_RA_NAME_LENGTH,
+ "%s gpu mem", psDeviceNode->psDevConfig->pszName);
+
+ psDeviceNode->apsLocalDevMemArenas[0] =
+ RA_Create(psDeviceNode->apszRANames[0],
+ OSGetPageShift(), /* Use OS page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+ if (psDeviceNode->apsLocalDevMemArenas[0] == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ if (!RA_Add(psDeviceNode->apsLocalDevMemArenas[0], uBase, uSize, 0 , NULL))
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Replace the UMA allocator with LMA allocator */
+ psDeviceNode->pfnDevPxAlloc = LMA_PhyContigPagesAlloc;
+ psDeviceNode->pfnDevPxFree = LMA_PhyContigPagesFree;
+ psDeviceNode->pfnDevPxMap = LMA_PhyContigPagesMap;
+ psDeviceNode->pfnDevPxUnMap = LMA_PhyContigPagesUnmap;
+ psDeviceNode->pfnDevPxClean = LMA_PhyContigPagesClean;
+ psDeviceNode->uiMMUPxLog2AllocGran = OSGetPageShift();
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+ }
+ else
+ {
+ /* LMA heap sanity check */
+ PVR_ASSERT(ui32NumOfHeapRegions);
+ }
+
+ /* Next, register device firmware physical heap based on heap config */
+ psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+ eHeapType = PhysHeapGetType(psPhysHeap);
+ PVR_ASSERT(eHeapType != PHYS_HEAP_TYPE_UNKNOWN);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "===== LMA/DMA/UMA (carve-out) memory, 2nd phys heap (fw)"));
+
+ if (ui32NumOfHeapRegions)
+ {
+ eError = PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+
+ eError = PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+
+ eError = PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ goto e0;
+ }
+ }
+ else
+ {
+ sDevPAddr.uiAddr = (IMG_UINT64)0;
+ sCpuPAddr.uiAddr = (IMG_UINT64)0;
+ ui64Size = (IMG_UINT64)0;
+ }
+
+ if (ui32NumOfHeapRegions)
+ {
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ RA_LENGTH_T uConfigSize = RGX_FIRMWARE_CONFIG_HEAP_SIZE;
+ RA_LENGTH_T uMainSize = 0;
+
+#if defined(SUPPORT_RGX)
+ uMainSize = (RA_LENGTH_T) RGXGetFwMainHeapSize(psDeviceNode->pvDevice);
+#endif
+
+ SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eHeapOrigin);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for fw memory 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX,
+ (IMG_UINT64) sCpuPAddr.uiAddr, sCpuPAddr.uiAddr + ui64Size - 1));
+
+ /* Now we construct RA to manage FW heap */
+ uBase = sDevPAddr.uiAddr;
+ uSize = (RA_LENGTH_T) ui64Size;
+ PVR_ASSERT(sCpuPAddr.uiAddr && uSize == ui64Size);
+ if (eHeapType != PHYS_HEAP_TYPE_LMA)
+ {
+ /* On some LMA config, fw base starts at zero */
+ PVR_ASSERT(sDevPAddr.uiAddr);
+ }
+
+ /* All vz drivers go through this motion, loop terminates early for guest driver(s) */
+ for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+ {
+ RA_BASE_T uOSIDConfigBase, uOSIDMainBase;
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) && ui32OSID == 0)
+ {
+ uOSIDMainBase = uBase;
+ uOSIDConfigBase = uOSIDMainBase + RGXGetFwMainHeapSize((PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice);
+ }
+ else
+ {
+ uOSIDConfigBase = uBase + (ui32OSID * RGX_FIRMWARE_RAW_HEAP_SIZE);
+ uOSIDMainBase = uOSIDConfigBase + uConfigSize;
+ }
+
+ OSSNPrintf(psDeviceNode->szKernelFwConfigRAName[ui32OSID], sizeof(psDeviceNode->szKernelFwConfigRAName[ui32OSID]),
+ "%s fw mem", psDeviceNode->psDevConfig->pszName);
+
+ psDeviceNode->psKernelFwConfigMemArena[ui32OSID] =
+ RA_Create(psDeviceNode->szKernelFwConfigRAName[ui32OSID],
+ OSGetPageShift(), /* Use OS page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+ if (psDeviceNode->psKernelFwConfigMemArena[ui32OSID] == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ if (!RA_Add(psDeviceNode->psKernelFwConfigMemArena[ui32OSID], uOSIDConfigBase, uConfigSize, 0 , NULL))
+ {
+ RA_Delete(psDeviceNode->psKernelFwConfigMemArena[ui32OSID]);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ OSSNPrintf(psDeviceNode->szKernelFwMainRAName[ui32OSID], sizeof(psDeviceNode->szKernelFwMainRAName[ui32OSID]),
+ "%s fw mem", psDeviceNode->psDevConfig->pszName);
+
+ psDeviceNode->psKernelFwMainMemArena[ui32OSID] =
+ RA_Create(psDeviceNode->szKernelFwMainRAName[ui32OSID],
+ OSGetPageShift(), /* Use OS page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas. */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+ if (psDeviceNode->psKernelFwMainMemArena[ui32OSID] == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ if (!RA_Add(psDeviceNode->psKernelFwMainMemArena[ui32OSID], uOSIDMainBase, uMainSize, 0 , NULL))
+ {
+ RA_Delete(psDeviceNode->psKernelFwMainMemArena[ui32OSID]);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ /* Guest drivers should not initialize subsequent array entries as the driver depends on this */
+ if (eHeapOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST || PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ break;
+ }
+ }
+
+ /* Fw physheap is always managed by LMA PMR factory */
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL] = PhysmemNewLocalRamBackedPMR;
+ }
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ /* Guest Fw physheap is a pseudo-heap which is always managed by LMA PMR factory and exclusively used
+ by the host driver. For this pseudo-heap, we do not create an actual heap meta-data to represent
+ it seeing it's only used during guest driver FW initialisation so this saves us having to provide
+ heap pfnCpuPAddrToDevPAddr/pfnDevPAddrToCpuPAddr callbacks here which are not needed as the host
+ driver will _never_ access this guest firmware heap - instead we reuse the real FW heap meta-data */
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = PhysmemNewLocalRamBackedPMR;
+ psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] =
+ psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ }
+
+ return PVRSRV_OK;
+e1:
+ _VzDeviceDestroy(psDeviceNode);
+e0:
+ return eError;
+}
+
+static void _VzDeviceDestroy(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ IMG_UINT ui32OSID;
+ IMG_UINT64 ui64Size;
+ PHYS_HEAP *psPhysHeap;
+ IMG_CPU_PHYADDR sCpuPAddr;
+ IMG_DEV_PHYADDR sDevPAddr;
+ PHYS_HEAP_TYPE eHeapType;
+ IMG_UINT32 ui32NumOfHeapRegions;
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+
+ /* First, unregister device firmware physical heap based on heap config */
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ /* Remove pseudo-heap pointer, rest of heap deinitialization is unaffected */
+ psDeviceNode->pfnCreateRamBackedPMR[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = NULL;
+ psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST] = NULL;
+ }
+
+ psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+ ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+
+ if (ui32NumOfHeapRegions)
+ {
+ for (ui32OSID = 0; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+ {
+ if (psDeviceNode->psKernelFwMainMemArena[ui32OSID])
+ {
+ RA_Delete(psDeviceNode->psKernelFwMainMemArena[ui32OSID]);
+ psDeviceNode->psKernelFwMainMemArena[ui32OSID] = NULL;
+ }
+
+ if (psDeviceNode->psKernelFwConfigMemArena[ui32OSID])
+ {
+ RA_Delete(psDeviceNode->psKernelFwConfigMemArena[ui32OSID]);
+ psDeviceNode->psKernelFwConfigMemArena[ui32OSID] = NULL;
+ }
+ }
+ }
+
+ /* Next, unregister device GPU physical heap based on heap config */
+ psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL];
+ ui32NumOfHeapRegions = PhysHeapNumberOfRegions(psPhysHeap);
+ eHeapType = PhysHeapGetType(psPhysHeap);
+
+ if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_DMA)
+ {
+ if (ui32NumOfHeapRegions)
+ {
+ if (PhysHeapRegionGetCpuPAddr(psPhysHeap, 0, &sCpuPAddr) != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ if (PhysHeapRegionGetSize(psPhysHeap, 0, &ui64Size) != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ if (PhysHeapRegionGetDevPAddr(psPhysHeap, 0, &sDevPAddr) != PVRSRV_OK)
+ {
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+ }
+ else
+ {
+ sDevPAddr.uiAddr = (IMG_UINT64)0;
+ sCpuPAddr.uiAddr = (IMG_UINT64)0;
+ ui64Size = (IMG_UINT64)0;
+ }
+
+ if (sCpuPAddr.uiAddr && sDevPAddr.uiAddr && ui64Size)
+ {
+ if (psDeviceNode->apsLocalDevMemArenas && psDeviceNode->apsLocalDevMemArenas[0])
+ {
+ RA_Delete(psDeviceNode->apsLocalDevMemArenas[0]);
+ psDeviceNode->apsLocalDevMemArenas[0] = NULL;
+ OSFreeMem(psDeviceNode->apsLocalDevMemArenas);
+ psDeviceNode->apsLocalDevMemArenas = NULL;
+ }
+
+ if (psDeviceNode->apszRANames)
+ {
+ OSFreeMem(psDeviceNode->apszRANames[0]);
+ psDeviceNode->apszRANames[0] = NULL;
+ OSFreeMem(psDeviceNode->apszRANames);
+ psDeviceNode->apszRANames = NULL;
+ }
+ }
+ }
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize,
+ IMG_UINT32 uiOSID)
+{
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PHYS_HEAP *psPhysHeap;
+ PVRSRV_ERROR eError;
+
+ /*
+ This is called by the host driver only, it creates an RA to manage this guest firmware
+ physheaps so we fail the call if an invalid guest OSID is supplied.
+ */
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Registering OSID: %d fw physheap memory", uiOSID));
+ PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Verify guest size with host size (support only same sized FW heaps) */
+ psPhysHeap = psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL];
+
+ if (ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "OSID: %d fw physheap size 0x%"IMG_UINT64_FMTSPECX" differs from host fw phyheap size 0x%X",
+ uiOSID,
+ ui64DevPSize,
+ RGX_FIRMWARE_RAW_HEAP_SIZE));
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "Truncating OSID: %d requested fw physheap to: 0x%X\n",
+ uiOSID,
+ RGX_FIRMWARE_RAW_HEAP_SIZE));
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Creating RA for fw 0x%016"IMG_UINT64_FMTSPECX"-0x%016"IMG_UINT64_FMTSPECX" [DEV/PA]",
+ (IMG_UINT64) sDevPAddr.uiAddr, sDevPAddr.uiAddr + RGX_FIRMWARE_RAW_HEAP_SIZE - 1));
+
+ SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eHeapOrigin);
+ PVR_LOGR_IF_FALSE((eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST),
+ "PVRSRVVzRegisterFirmwarePhysHeap: Host PVZ config: Invalid PVZ setup\n"
+ "=>: all driver types (i.e. host/guest) must use same FW heap origin",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSSNPrintf(psDeviceNode->szKernelFwRawRAName[uiOSID],
+ sizeof(psDeviceNode->szKernelFwRawRAName[uiOSID]),
+ "[OSID: %d]: raw guest fw mem", uiOSID);
+
+ eError = _VzConstructRAforFwHeap(&psDeviceNode->psKernelFwRawMemArena[uiOSID],
+ psDeviceNode->szKernelFwRawRAName[uiOSID],
+ sDevPAddr.uiAddr,
+ RGX_FIRMWARE_RAW_HEAP_SIZE);
+ if (eError == PVRSRV_OK)
+ {
+ psDeviceNode->ui64RABase[uiOSID] = sDevPAddr.uiAddr;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR IMG_CALLCONV PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiOSID)
+{
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+ PVR_DPF((PVR_DBG_MESSAGE, "===== Unregistering OSID: %d fw physheap memory", uiOSID));
+ PVR_LOGR_IF_FALSE(((uiOSID > 0)&&(uiOSID < RGXFW_NUM_OS)), "Invalid guest OSID", PVRSRV_ERROR_INVALID_PARAMS);
+
+ SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eHeapOrigin);
+ PVR_LOGR_IF_FALSE((eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST),
+ "PVRSRVVzUnregisterFirmwarePhysHeap: Host PVZ config: Invalid PVZ setup\n"
+ "=>: all driver types (i.e. host/guest) must use same FW heap origin",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ _VzTearDownRAforFwHeap(&psDeviceNode->psKernelFwRawMemArena[uiOSID], (IMG_UINT64)psDeviceNode->ui64RABase[uiOSID]);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _VzConstructRAforFwHeap(RA_ARENA **ppsArena, IMG_CHAR *szName,
+ IMG_UINT64 uBase, RA_LENGTH_T uSize)
+{
+ PVRSRV_ERROR eError;
+
+ /* Construct RA to manage FW Raw heap */
+ *ppsArena = RA_Create(szName,
+ OSGetPageShift(), /* Use host page size, keeps things simple */
+ RA_LOCKCLASS_0, /* This arena doesn't use any other arenas */
+ NULL, /* No Import */
+ NULL, /* No free import */
+ NULL, /* No import handle */
+ IMG_FALSE);
+ eError = (*ppsArena == NULL) ? (PVRSRV_ERROR_OUT_OF_MEMORY) : (PVRSRV_OK);
+
+ if (eError == PVRSRV_OK && !RA_Add(*ppsArena, uBase, uSize, 0 , NULL))
+ {
+ RA_Delete(*ppsArena);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ return eError;
+}
+
+static void _VzTearDownRAforFwHeap(RA_ARENA **ppsArena, IMG_UINT64 uBase)
+{
+ RA_Free(*ppsArena, uBase);
+ RA_Delete(*ppsArena);
+ *ppsArena = NULL;
+}
+
+/*****************************************************************************
+ End of file (pvrsrv.c)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv.h
new file mode 100644
index 00000000000000..f072f2f7264781
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv.h
@@ -0,0 +1,490 @@
+/**************************************************************************/ /*!
+@File
+@Title PowerVR services server header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef PVRSRV_H
+#define PVRSRV_H
+
+
+#include "connection_server.h"
+#include "device.h"
+#include "power.h"
+#include "syscommon.h"
+#include "sysinfo.h"
+#include "physheap.h"
+#include "cache_ops.h"
+#include "pvr_notifier.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_physheap.h"
+
+/*!
+ * For OSThreadDestroy(), which may require a retry
+ * Try for 100 ms to destroy an OS thread before failing
+ */
+#define OS_THREAD_DESTROY_TIMEOUT_US 100000ULL
+#define OS_THREAD_DESTROY_RETRY_COUNT 10
+
+typedef enum _VMM_CONF_PARAM_
+{
+ VMM_CONF_PRIO_OSID0 = 0,
+ VMM_CONF_PRIO_OSID1 = 1,
+ VMM_CONF_PRIO_OSID2 = 2,
+ VMM_CONF_PRIO_OSID3 = 3,
+ VMM_CONF_PRIO_OSID4 = 4,
+ VMM_CONF_PRIO_OSID5 = 5,
+ VMM_CONF_PRIO_OSID6 = 6,
+ VMM_CONF_PRIO_OSID7 = 7,
+ VMM_CONF_ISOL_THRES = 8,
+ VMM_CONF_HCS_DEADLINE = 9
+} VMM_CONF_PARAM;
+
+typedef struct _BUILD_INFO_
+{
+ IMG_UINT32 ui32BuildOptions;
+ IMG_UINT32 ui32BuildVersion;
+ IMG_UINT32 ui32BuildRevision;
+ IMG_UINT32 ui32BuildType;
+#define BUILD_TYPE_DEBUG 0
+#define BUILD_TYPE_RELEASE 1
+ /*The above fields are self explanatory */
+ /* B.V.N.C can be added later if required */
+} BUILD_INFO;
+
+typedef struct _DRIVER_INFO_
+{
+ BUILD_INFO sUMBuildInfo;
+ BUILD_INFO sKMBuildInfo;
+ IMG_UINT8 ui8UMSupportedArch;
+ IMG_UINT8 ui8KMBitArch;
+
+#define BUILD_ARCH_64BIT (1 << 0)
+#define BUILD_ARCH_32BIT (1 << 1)
+#define BUILD_ARCH_BOTH (BUILD_ARCH_32BIT | BUILD_ARCH_64BIT)
+ IMG_BOOL bIsNoMatch;
+}DRIVER_INFO;
+
+typedef struct PVRSRV_DATA_TAG
+{
+ PVRSRV_DRIVER_MODE eDriverMode; /*!< Driver mode (i.e. native, host or guest) */
+ DRIVER_INFO sDriverInfo;
+ IMG_UINT32 ui32RegisteredDevices;
+ PVRSRV_DEVICE_NODE *psDeviceNodeList; /*!< List head of device nodes */
+ PVRSRV_DEVICE_NODE *psHostMemDeviceNode; /*!< DeviceNode to be used for device independent
+ host based memory allocations where the DevMem
+ framework is to be used e.g. TL */
+ PVRSRV_SERVICES_STATE eServicesState; /*!< global driver state */
+
+ HASH_TABLE *psProcessHandleBase_Table; /*!< Hash table with process handle bases */
+ POS_LOCK hProcessHandleBase_Lock; /*!< Lock for the process handle base table */
+
+ IMG_HANDLE hGlobalEventObject; /*!< OS Global Event Object */
+ IMG_UINT32 ui32GEOConsecutiveTimeouts; /*!< OS Global Event Object Timeouts */
+
+ IMG_HANDLE hCleanupThread; /*!< Cleanup thread */
+ IMG_HANDLE hCleanupEventObject; /*!< Event object to drive cleanup thread */
+ POS_LOCK hCleanupThreadWorkListLock; /*!< Lock protecting the cleanup thread work list */
+ DLLIST_NODE sCleanupThreadWorkList; /*!< List of work for the cleanup thread */
+ IMG_PID cleanupThreadPid; /*!< Cleanup thread process id */
+ ATOMIC_T i32NumCleanupItems; /*!< Number of items in cleanup thread work list */
+
+ IMG_HANDLE hDevicesWatchdogThread; /*!< Devices watchdog thread */
+ IMG_HANDLE hDevicesWatchdogEvObj; /*! Event object to drive devices watchdog thread */
+ volatile IMG_UINT32 ui32DevicesWatchdogPwrTrans;/*! Number of off -> on power state transitions */
+#if !defined(PVRSRV_SERVER_THREADS_INDEFINITE_SLEEP)
+ volatile IMG_UINT32 ui32DevicesWatchdogTimeout; /*! Timeout for the Devices watchdog Thread */
+#endif
+#ifdef PVR_TESTING_UTILS
+ volatile IMG_UINT32 ui32DevicesWdWakeupCounter; /* Need this for the unit tests. */
+#endif
+
+ IMG_HANDLE hPvzConnection; /*!< PVZ connection used for cross-VM hyper-calls */
+ POS_LOCK hPvzConnectionLock; /*!< Lock protecting PVZ connection */
+ IMG_BOOL abVmOnline[RGXFW_NUM_OS];
+
+ IMG_BOOL bUnload; /*!< Driver unload is in progress */
+
+ IMG_HANDLE hTLCtrlStream; /*! Control plane for TL streams */
+
+ PVRSRV_POOL *psBridgeBufferPool; /*! Pool of bridge buffers */
+ IMG_HANDLE hDriverThreadEventObject; /*! Event object relating to multi-threading in the Server */
+ IMG_BOOL bDriverSuspended; /*! if TRUE, the driver is suspended and new threads should not enter */
+ ATOMIC_T iNumActiveDriverThreads; /*! Number of threads active in the Server */
+
+ PMR *psInfoPagePMR; /*! Handle to exportable PMR of the information page. */
+ IMG_UINT32 *pui32InfoPage; /*! CPU memory mapping for information page. */
+ DEVMEM_MEMDESC *psInfoPageMemDesc; /*! Memory descriptor of the information page. */
+ POS_LOCK hInfoPageLock; /*! Lock guarding access to information page. */
+
+ POS_LOCK hConnectionsLock; /*!< Lock protecting sConnections */
+ DLLIST_NODE sConnections; /*!< The list of currently active connection objects */
+} PVRSRV_DATA;
+
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetPVRSRVData
+
+ @Description Get a pointer to the global data
+
+ @Return PVRSRV_DATA *
+
+******************************************************************************/
+PVRSRV_DATA *PVRSRVGetPVRSRVData(void);
+
+/*!
+******************************************************************************************
+@Note Kernel code must always query the driver mode using the PVRSRV_VZ_MODE_IS() macro
+ _only_ and PVRSRV_DATA->eDriverMode should not be read directly as the field also
+ overloads as driver OSID (i.e. not to be confused with hardware kick register OSID)
+ when running on non-VZ capable BVNC as the driver has to simulate OSID propagation
+ to the firmware in the absence of the hardware kick register propagating this OSID
+ on any non-VZ BVNC.
+******************************************************************************************/
+#define PVRSRV_VZ_MODE_IS(_expr) (((((IMG_INT)_expr)>0)&&((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode>0)) ? \
+ (IMG_TRUE) : ((_expr) == (PVRSRVGetPVRSRVData()->eDriverMode)))
+#define PVRSRV_VZ_RETN_IF_MODE(_expr) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RETN_IF_NOT_MODE(_expr) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return; } } while(0)
+#define PVRSRV_VZ_RET_IF_MODE(_expr, _rc) do { if ( PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_RET_IF_NOT_MODE(_expr, _rc) do { if (! PVRSRV_VZ_MODE_IS(_expr)) { return (_rc); } } while(0)
+#define PVRSRV_VZ_DRIVER_OSID (((IMG_INT)PVRSRVGetPVRSRVData()->eDriverMode) > (0) ? \
+ ((IMG_UINT32)(PVRSRVGetPVRSRVData()->eDriverMode)) : (0))
+
+/*!
+************************************************************************************************
+@Note The driver execution mode AppHint (i.e. PVRSRV_APPHINT_DRIVERMODE) can be an override or
+ non-override 32-bit value. An override value has the MSB bit set & a non-override value
+ has this MSB bit cleared. Excluding this MSB bit & interpreting the remaining 31-bit as
+ a signed 31-bit integer, the mode values are [-1 native <default>: 0 host : +1 guest ].
+************************************************************************************************/
+#define PVRSRV_VZ_APPHINT_MODE_IS_OVERRIDE(_expr) ((IMG_UINT32)(_expr)&(IMG_UINT32)(1<<31))
+#define PVRSRV_VZ_APPHINT_MODE(_expr) \
+ ((((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) == (IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_NATIVE : \
+ !((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF) ? DRIVER_MODE_HOST : \
+ ((IMG_UINT32)((IMG_UINT32)(_expr)&(IMG_UINT)0x7FFFFFFF)==(IMG_UINT32)0x1) ? DRIVER_MODE_GUEST : \
+ ((IMG_UINT32)(_expr)&(IMG_UINT32)0x7FFFFFFF))
+
+/*!
+******************************************************************************
+
+ @Function LMA memory management API
+
+******************************************************************************/
+PVRSRV_ERROR LMA_PhyContigPagesAlloc(PVRSRV_DEVICE_NODE *psDevNode, size_t uiSize,
+ PG_HANDLE *psMemHandle, IMG_DEV_PHYADDR *psDevPAddr);
+
+void LMA_PhyContigPagesFree(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle);
+
+PVRSRV_ERROR LMA_PhyContigPagesMap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ size_t uiSize, IMG_DEV_PHYADDR *psDevPAddr,
+ void **pvPtr);
+
+void LMA_PhyContigPagesUnmap(PVRSRV_DEVICE_NODE *psDevNode, PG_HANDLE *psMemHandle,
+ void *pvPtr);
+
+PVRSRV_ERROR LMA_PhyContigPagesClean(PVRSRV_DEVICE_NODE *psDevNode,
+ PG_HANDLE *psMemHandle,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 uiLength);
+
+
+/*!
+******************************************************************************
+ @Function PVRSRVPollForValueKM
+
+ @Description
+ Polls for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVPollForValueKM(
+ volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+******************************************************************************
+ @Function PVRSRVWaitForValueKM
+
+ @Description
+ Waits (using EventObjects) for a value to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKM(
+ volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+******************************************************************************
+ @Function PVRSRVWaitForValueKMAndHoldBridgeLockKM
+
+ @Description
+ Waits without releasing bridge lock (using EventObjects) for a value
+ to match a masked read
+
+ @Input pui32LinMemAddr : CPU linear address to poll
+ @Input ui32Value : required value
+ @Input ui32Mask : Mask
+
+ @Return PVRSRV_ERROR :
+******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVWaitForValueKMAndHoldBridgeLockKM(
+ volatile IMG_UINT32 __iomem *pui32LinMemAddr,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemHasCacheSnooping
+
+ @Description : Returns whether the system has cache snooping
+
+ @Return : IMG_TRUE if the system has cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasCacheSnooping(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemSnoopingIsEmulated
+
+ @Description : Returns whether system cache snooping support is emulated
+
+ @Return : IMG_TRUE if the system cache snooping is emulated in software
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingIsEmulated(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemSnoopingOfCPUCache
+
+ @Description : Returns whether the system supports snooping of the CPU cache
+
+ @Return : IMG_TRUE if the system has CPU cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfCPUCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemSnoopingOfDeviceCache
+
+ @Description : Returns whether the system supports snooping of the device cache
+
+ @Return : IMG_TRUE if the system has device cache snooping
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemSnoopingOfDeviceCache(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemHasNonMappableLocalMemory
+
+ @Description : Returns whether the device has non-mappable part of local memory
+
+ @Return : IMG_TRUE if the device has non-mappable part of local memory
+*****************************************************************************/
+IMG_BOOL PVRSRVSystemHasNonMappableLocalMemory(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemWaitCycles
+
+ @Description : Waits for at least ui32Cycles of the Device clk.
+
+*****************************************************************************/
+void PVRSRVSystemWaitCycles(PVRSRV_DEVICE_CONFIG *psDevConfig, IMG_UINT32 ui32Cycles);
+
+PVRSRV_ERROR PVRSRVSystemInstallDeviceLISR(void *pvOSDevice,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData);
+
+PVRSRV_ERROR PVRSRVSystemUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+int PVRSRVGetDriverStatus(void);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVIsBridgeEnabled
+
+ @Description : Returns whether the given bridge group is enabled
+
+ @Return : IMG_TRUE if the given bridge group is enabled
+*****************************************************************************/
+static inline IMG_BOOL PVRSRVIsBridgeEnabled(IMG_HANDLE hServices, IMG_UINT32 ui32BridgeGroup)
+{
+ PVR_UNREFERENCED_PARAMETER(hServices);
+
+#if defined(SUPPORT_RGX)
+ if(ui32BridgeGroup >= PVRSRV_BRIDGE_RGX_FIRST)
+ {
+ return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_RGX_FIRST)) &
+ gui32RGXBridges) != 0;
+ }
+ else
+#endif /* SUPPORT_RGX */
+ {
+ return ((1U << (ui32BridgeGroup - PVRSRV_BRIDGE_FIRST)) &
+ gui32PVRBridges) != 0;
+ }
+}
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemBIFTilingHeapGetXStride
+
+ @Description : return the default x-stride configuration for the given
+ BIF tiling heap number
+
+ @Input psDevConfig: Pointer to a device config
+
+ @Input uiHeapNum: BIF tiling heap number, starting from 1
+
+ @Output puiXStride: pointer to x-stride output of the requested heap
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingHeapGetXStride(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 uiHeapNum,
+ IMG_UINT32 *puiXStride);
+
+/*!
+*****************************************************************************
+ @Function : PVRSRVSystemBIFTilingGetConfig
+
+ @Description : return the BIF tiling mode and number of BIF
+ tiling heaps for the given device config
+
+ @Input psDevConfig : Pointer to a device config
+
+ @Output peBifTilingMode: Pointer to a BIF tiling mode enum
+
+ @Output puiNumHeaps : pointer to uint to hold number of heaps
+
+*****************************************************************************/
+PVRSRV_ERROR
+PVRSRVSystemBIFTilingGetConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ RGXFWIF_BIFTILINGMODE *peBifTilingMode,
+ IMG_UINT32 *puiNumHeaps);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*!
+***********************************************************************************
+ @Function : PopulateLMASubArenas
+
+ @Description : Uses the Apphints passed by the client at initialization
+ time to add bases and sizes in the various arenas in the
+ LMA memory
+
+ @Input psDeviceNode : Pointer to the device node struct containing all the
+ arena information
+
+ @Input ui32OSidMin : Single dimensional array containing the minimum values
+ for each OSid area
+
+ @Input ui32OSidMax : Single dimensional array containing the maximum values
+ for each OSid area
+***********************************************************************************/
+
+void PopulateLMASubArenas(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS], IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+
+#if defined(EMULATOR)
+ void SetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+ void SetTrustedDeviceAceEnabled(void);
+#endif
+
+#endif
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVVzRegisterFirmwarePhysHeap
+
+ @Description Request to map a physical heap to kernel FW memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize,
+ IMG_UINT32 uiOSID);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVVzUnregisterFirmwarePhysHeap
+
+ @Description Request to unmap a physical heap from kernel FW memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR PVRSRVVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 uiOSID);
+
+#endif /* PVRSRV_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_apphint.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_apphint.h
new file mode 100644
index 00000000000000..43eccef18beda9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_apphint.h
@@ -0,0 +1,66 @@
+/**************************************************************************/ /*!
+@File
+@Title PowerVR AppHint generic interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRV_APPHINT_H__)
+#define __PVRSRV_APPHINT_H__
+
+#if defined(LINUX)
+
+#include "km_apphint.h"
+#define PVRSRVAppHintDumpState() pvr_apphint_dump_state()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p) pvr_apphint_register_handlers_uint64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p) pvr_apphint_register_handlers_uint32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p) pvr_apphint_register_handlers_bool(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p) pvr_apphint_register_handlers_string(i,q,s,d,p)
+
+#else
+
+#define PVRSRVAppHintDumpState()
+#define PVRSRVAppHintRegisterHandlersUINT64(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersUINT32(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersBOOL(i,q,s,d,p)
+#define PVRSRVAppHintRegisterHandlersSTRING(i,q,s,d,p)
+
+#endif
+
+#endif /* !defined(__PVRSRV_APPHINT_H__) */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.c b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.c
new file mode 100644
index 00000000000000..47539959c50365
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.c
@@ -0,0 +1,515 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements common PVR Bridge init/deinit code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv_bridge_init.h"
+#include "srvcore.h"
+
+/* These will go when full bridge gen comes in */
+#if defined(PDUMP)
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+#endif
+#if defined(SUPPORT_DISPLAY_CLASS)
+PVRSRV_ERROR InitDCBridge(void);
+PVRSRV_ERROR DeinitDCBridge(void);
+#endif
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+#endif
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR InitSYNCEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCEXPORTBridge(void);
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSYNCSEXPORTBridge(void);
+PVRSRV_ERROR DeinitSYNCSEXPORTBridge(void);
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined (SUPPORT_RGX)
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+#endif
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+#endif
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+#endif /* SUPPORT_RGX */
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR InitSMMBridge(void);
+PVRSRV_ERROR DeinitSMMBridge(void);
+#endif
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+#endif
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+#if defined(PVR_RI_DEBUG)
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+#endif
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+#endif
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+PVRSRV_ERROR InitVALIDATIONBridge(void);
+PVRSRV_ERROR DeinitVALIDATIONBridge(void);
+#endif
+#if defined(PVR_TESTING_UTILS)
+PVRSRV_ERROR InitTUTILSBridge(void);
+PVRSRV_ERROR DeinitTUTILSBridge(void);
+#endif
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+#endif
+#if defined(SUPPORT_WRAP_EXTMEM)
+PVRSRV_ERROR InitMMEXTMEMBridge(void);
+PVRSRV_ERROR DeinitMMEXTMEMBridge(void);
+#endif
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+PVRSRV_ERROR InitSYNCFALLBACKBridge(void);
+PVRSRV_ERROR DeinitSYNCFALLBACKBridge(void);
+#endif
+
+
+PVRSRV_ERROR
+CommonBridgeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ BridgeDispatchTableStartOffsetsInit();
+
+ eError = InitSRVCOREBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSRVCOREBridge");
+
+ eError = InitSYNCBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSYNCBridge");
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+ eError = InitSYNCEXPORTBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSYNCEXPORTBridge");
+#endif
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = InitSYNCSEXPORTBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSYNCSEXPORTBridge");
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+ eError = InitPDUMPCTRLBridge();
+ PVR_LOG_IF_ERROR(eError, "InitPDUMPCTRLBridge");
+#endif
+
+ eError = InitMMBridge();
+ PVR_LOG_IF_ERROR(eError, "InitMMBridge");
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+ eError = InitCMMBridge();
+ PVR_LOG_IF_ERROR(eError, "InitCMMBridge");
+#endif
+
+#if defined(PDUMP)
+ eError = InitPDUMPMMBridge();
+ PVR_LOG_IF_ERROR(eError, "InitPDUMPMMBridge");
+
+ eError = InitPDUMPBridge();
+ PVR_LOG_IF_ERROR(eError, "InitPDUMPBridge");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = InitDCBridge();
+ PVR_LOG_IF_ERROR(eError, "InitDCBridge");
+#endif
+
+ eError = InitCACHEBridge();
+ PVR_LOG_IF_ERROR(eError, "InitCACHEBridge");
+
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = InitSMMBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSMMBridge");
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+ eError = InitHTBUFFERBridge();
+ PVR_LOG_IF_ERROR(eError, "InitHTBUFFERBridge");
+#endif
+
+ eError = InitPVRTLBridge();
+ PVR_LOG_IF_ERROR(eError, "InitPVRTLBridge");
+
+#if defined(PVR_RI_DEBUG)
+ eError = InitRIBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRIBridge");
+#endif
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+ eError = InitVALIDATIONBridge();
+ PVR_LOG_IF_ERROR(eError, "InitVALIDATIONBridge");
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+ eError = InitTUTILSBridge();
+ PVR_LOG_IF_ERROR(eError, "InitTUTILSBridge");
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = InitDEVICEMEMHISTORYBridge();
+ PVR_LOG_IF_ERROR(eError, "InitDEVICEMEMHISTORYBridge");
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = InitSYNCTRACKINGBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSYNCTRACKINGBridge");
+#endif
+
+#if defined (SUPPORT_RGX)
+
+ eError = InitRGXTQBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRGXTQBridge");
+
+ eError = InitRGXTA3DBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRGXTA3DBridge");
+
+ #if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+ eError = InitBREAKPOINTBridge();
+ PVR_LOG_IF_ERROR(eError, "InitBREAKPOINTBridge");
+#endif
+
+ eError = InitDEBUGMISCBridge();
+ PVR_LOG_IF_ERROR(eError, "InitDEBUGMISCBridge");
+
+#if defined(PDUMP)
+ eError = InitRGXPDUMPBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRGXPDUMPBridge");
+#endif
+
+ eError = InitRGXHWPERFBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRGXHWPERFBridge");
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+ eError = InitREGCONFIGBridge();
+ PVR_LOG_IF_ERROR(eError, "InitREGCONFIGBridge");
+#endif
+
+ eError = InitTIMERQUERYBridge();
+ PVR_LOG_IF_ERROR(eError, "InitTIMERQUERYBridge");
+
+ eError = InitRGXKICKSYNCBridge();
+ PVR_LOG_IF_ERROR(eError, "InitRGXKICKSYNCBridge");
+
+#endif /* SUPPORT_RGX */
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+ eError = InitMMEXTMEMBridge();
+ PVR_LOG_IF_ERROR(eError, "InitMMEXTMEMBridge");
+#endif
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+ eError = InitSYNCFALLBACKBridge();
+ PVR_LOG_IF_ERROR(eError, "InitSYNCFALLBACKBridge");
+#endif
+
+ eError = OSPlatformBridgeInit();
+ PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeInit");
+
+ return eError;
+}
+
+PVRSRV_ERROR
+CommonBridgeDeInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSPlatformBridgeDeInit();
+ PVR_LOG_IF_ERROR(eError, "OSPlatformBridgeDeInit");
+
+#if defined(SUPPORT_FALLBACK_FENCE_SYNC)
+ eError = DeinitSYNCFALLBACKBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSYNCFALLBACKBridge");
+#endif
+
+#if defined(SUPPORT_WRAP_EXTMEM)
+ eError = DeinitMMEXTMEMBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitMMEXTMEMBridge");
+#endif
+
+ eError = DeinitSRVCOREBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSRVCOREBridge");
+
+ eError = DeinitSYNCBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSYNCBridge");
+
+#if defined(SUPPORT_SERVER_SYNC)
+#if defined(SUPPORT_INSECURE_EXPORT)
+ eError = DeinitSYNCEXPORTBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSYNCEXPORTBridge");
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = DeinitSYNCSEXPORTBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSYNCSEXPORTBridge");
+#endif
+#endif /* defined(SUPPORT_SERVER_SYNC) */
+
+#if defined(PDUMP)
+ eError = DeinitPDUMPCTRLBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPCTRLBridge");
+#endif
+
+ eError = DeinitMMBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitMMBridge");
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+ eError = DeinitCMMBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitCMMBridge");
+#endif
+
+#if defined(PDUMP)
+ eError = DeinitPDUMPMMBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPMMBridge");
+
+ eError = DeinitPDUMPBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitPDUMPBridge");
+#endif
+
+#if defined(PVR_TESTING_UTILS)
+ eError = DeinitTUTILSBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitTUTILSBridge");
+#endif
+
+#if defined(SUPPORT_DISPLAY_CLASS)
+ eError = DeinitDCBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitDCBridge");
+#endif
+
+ eError = DeinitCACHEBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitCACHEBridge");
+
+#if defined(SUPPORT_SECURE_EXPORT)
+ eError = DeinitSMMBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSMMBridge");
+#endif
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+ eError = DeinitHTBUFFERBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitHTBUFFERBridge");
+#endif
+
+ eError = DeinitPVRTLBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitPVRTLBridge");
+
+#if defined(SUPPORT_VALIDATION_BRIDGE)
+ eError = DeinitVALIDATIONBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitVALIDATIONBridge");
+#endif
+
+#if defined(PVR_RI_DEBUG)
+ eError = DeinitRIBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRIBridge");
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = DeinitDEVICEMEMHISTORYBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitDEVICEMEMHISTORYBridge");
+#endif
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = DeinitSYNCTRACKINGBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitSYNCTRACKINGBridge");
+#endif
+
+#if defined (SUPPORT_RGX)
+
+ eError = DeinitRGXTQBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXTQBridge");
+
+ eError = DeinitRGXTA3DBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXTA3DBridge");
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+ eError = DeinitBREAKPOINTBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitBREAKPOINTBridge");
+#endif
+
+ eError = DeinitDEBUGMISCBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitDEBUGMISCBridge");
+
+#if defined(PDUMP)
+ eError = DeinitRGXPDUMPBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXPDUMPBridge");
+#endif
+
+ eError = DeinitRGXHWPERFBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXHWPERFBridge");
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+ eError = DeinitREGCONFIGBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitREGCONFIGBridge");
+#endif
+
+ eError = DeinitTIMERQUERYBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitTIMERQUERYBridge");
+
+ eError = DeinitRGXKICKSYNCBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXKICKSYNCBridge");
+
+#endif /* SUPPORT_RGX */
+
+ return eError;
+}
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR
+DeviceDepBridgeInit(IMG_UINT64 ui64Features)
+{
+ PVRSRV_ERROR eError;
+
+ if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ eError = InitRGXCMPBridge();
+ PVR_LOGR_IF_ERROR(eError, "InitRGXCMPBridge");
+ }
+
+ if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+ {
+ eError = InitRGXSIGNALSBridge();
+ PVR_LOGR_IF_ERROR(eError, "InitRGXCMPBridge");
+ }
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(ui64Features & RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK)
+ {
+ eError = InitRGXRAYBridge();
+ PVR_LOGR_IF_ERROR(eError, "InitRGXRAYBridge");
+ }
+#endif
+
+ if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ {
+ eError = InitRGXTQ2Bridge();
+ PVR_LOGR_IF_ERROR(eError, "InitRGXTQ2Bridge");
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+DeviceDepBridgeDeInit(IMG_UINT64 ui64Features)
+{
+ PVRSRV_ERROR eError;
+
+ if(ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ eError = DeinitRGXCMPBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXCMPBridge");
+ }
+
+ if(ui64Features & RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK)
+ {
+ eError = DeinitRGXSIGNALSBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXSIGNALSBridge");
+ }
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(ui64Features & RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK)
+ {
+ eError = DeinitRGXRAYBridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXRAYBridge");
+ }
+#endif
+
+ if(ui64Features & RGX_FEATURE_FASTRENDER_DM_BIT_MASK)
+ {
+ eError = DeinitRGXTQ2Bridge();
+ PVR_LOGR_IF_ERROR(eError, "DeinitRGXTQ2Bridge");
+ }
+
+ return PVRSRV_OK;
+}
+#endif /* SUPPORT_RGX */
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.h
new file mode 100644
index 00000000000000..b14e8e0bbc14df
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_bridge_init.h
@@ -0,0 +1,57 @@
+/**************************************************************************/ /*!
+@File
+@Title PVR Common Bridge Init/Deinit Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the common PVR Bridge init/deinit code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_BRIDGE_INIT_H_
+#define _PVRSRV_BRIDGE_INIT_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+PVRSRV_ERROR CommonBridgeInit(void);
+PVRSRV_ERROR DeviceDepBridgeInit(IMG_UINT64 ui64Features);
+
+PVRSRV_ERROR CommonBridgeDeInit(void);
+PVRSRV_ERROR DeviceDepBridgeDeInit(IMG_UINT64 ui64Features);
+
+
+#endif /* _PVRSRV_BRIDGE_INIT_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_cleanup.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_cleanup.h
new file mode 100644
index 00000000000000..18f088eb38539e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_cleanup.h
@@ -0,0 +1,158 @@
+/**************************************************************************/ /*!
+@File
+@Title PowerVR SrvKM cleanup thread deferred work interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_CLEANUP_H
+#define _PVRSRV_CLEANUP_H
+
+
+/**************************************************************************/ /*!
+@Brief CLEANUP_THREAD_FN
+
+@Description This is the function prototype for the pfnFree member found in
+ the structure PVRSRV_CLEANUP_THREAD_WORK. The function is
+ responsible for carrying out the clean up work and if successful
+ freeing the memory originally supplied to the call
+ PVRSRVCleanupThreadAddWork().
+
+@Input pvParam This is private data originally supplied by the caller
+ to PVRSRVCleanupThreadAddWork() when registering the
+ clean up work item, psDAta->pvData. Itr can be cast
+ to a relevant type within the using module.
+
+@Return PVRSRV_OK if the cleanup operation was successful and the
+ callback has freed the PVRSRV_CLEANUP_THREAD_WORK* work item
+ memory original supplied to PVRSRVCleanupThreadAddWork()
+ Any other error code will lead to the work item
+ being re-queued and hence the original
+ PVRSRV_CLEANUP_THREAD_WORK* must not be freed.
+*/ /***************************************************************************/
+
+typedef PVRSRV_ERROR (*CLEANUP_THREAD_FN)(void *pvParam);
+
+
+/* Typical number of times a caller should want the work to be retried in case
+ * of the callback function (pfnFree) returning an error.
+ * Callers to PVRSRVCleanupThreadAddWork should provide this value as the retry
+ * count (ui32RetryCount) unless there are special requirements.
+ * A value of 200 corresponds to around ~20s (200 * 100ms). If it is not
+ * successful by then give up as an unrecoverable problem has occurred.
+ */
+#define CLEANUP_THREAD_RETRY_COUNT_DEFAULT 200u
+/* Like for CLEANUP_THREAD_RETRY_COUNT_DEFAULT but call will wait for
+ * a specified amount of time rather than number of retries.
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT 2000u /* 2s */
+
+/* Use to set retry count on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _count - retry count
+ */
+#define CLEANUP_THREAD_SET_RETRY_COUNT(_item,_count) \
+ do { \
+ (_item)->ui32RetryCount = (_count); \
+ (_item)->ui32TimeStart = 0; \
+ (_item)->ui32TimeEnd = 0; \
+ } while (0)
+
+/* Use to set timeout deadline on a cleanup item.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * _timeout - timeout in milliseconds, if 0
+ * CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT is used
+ */
+#define CLEANUP_THREAD_SET_RETRY_TIMEOUT(_item,_timeout) \
+ do { \
+ (_item)->ui32RetryCount = 0; \
+ (_item)->ui32TimeStart = OSClockms(); \
+ (_item)->ui32RetryCount = (_item)->ui32TimeStart + ((_timeout) > 0 ? \
+ (_timeout) : CLEANUP_THREAD_RETRY_TIMEOUT_MS_DEFAULT); \
+ } while (0)
+
+/* Indicates if the timeout on a given item has been reached.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ */
+#define CLEANUP_THREAD_RETRY_TIMEOUT_REACHED(_item) \
+ ((_item)->ui32TimeEnd - (_item)->ui32TimeStart >= \
+ OSClockms() - (_item)->ui32TimeStart)
+
+/* Indicates if the current item is waiting on timeout or retry count.
+ * _item - pointer to the PVRSRV_CLEANUP_THREAD_WORK
+ * */
+#define CLEANUP_THREAD_IS_RETRY_TIMEOUT(_item) \
+ ((_item)->ui32TimeStart != (_item->ui32TimeEnd))
+
+/* Clean up work item specifics so that the task can be managed by the
+ * pvr_defer_free cleanup thread in the Server.
+ */
+typedef struct _PVRSRV_CLEANUP_THREAD_WORK_
+{
+ DLLIST_NODE sNode; /*!< List node used internally by the cleanup
+ thread */
+ CLEANUP_THREAD_FN pfnFree; /*!< Pointer to the function to be called to
+ carry out the deferred cleanup */
+ void *pvData; /*!< private data for pfnFree, usually a way back
+ to the original PVRSRV_CLEANUP_THREAD_WORK*
+ pointer supplied in the call to
+ PVRSRVCleanupThreadAddWork(). */
+ IMG_UINT32 ui32TimeStart; /*!< Timestamp in ms of the moment when
+ cleanup item has been created. */
+ IMG_UINT32 ui32TimeEnd; /*!< Time in ms after which no further retry
+ attempts will be made, item discard and
+ error logged when this is reached. */
+ IMG_UINT32 ui32RetryCount; /*!< Number of times the callback should be
+ re-tried when it returns error. */
+ IMG_BOOL bDependsOnHW; /*!< Retry again after the RGX interrupt signals
+ the global event object */
+} PVRSRV_CLEANUP_THREAD_WORK;
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVCleanupThreadAddWork
+
+@Description Add a work item to be called from the cleanup thread
+
+@Input psData : The function pointer and private data for the callback
+
+@Return None
+*/ /***************************************************************************/
+void PVRSRVCleanupThreadAddWork(PVRSRV_CLEANUP_THREAD_WORK *psData);
+
+#endif /* _PVRSRV_CLEANUP_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device.h
new file mode 100644
index 00000000000000..ec5c2e09ab2021
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device.h
@@ -0,0 +1,328 @@
+/**************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __PVRSRV_DEVICE_H__
+#define __PVRSRV_DEVICE_H__
+
+#include "img_types.h"
+#include "physheap.h"
+#include "pvrsrv_error.h"
+#include "rgx_fwif_km.h"
+#include "servicesext.h"
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+#include "pvr_dvfs.h"
+#endif
+
+typedef struct _PVRSRV_DEVICE_CONFIG_ PVRSRV_DEVICE_CONFIG;
+typedef enum _DRIVER_MODE_
+{
+/* Do not use these enumerations directly, to query the
+ current driver mode, use the PVRSRV_VZ_MODE_IS()
+ macro */
+ DRIVER_MODE_NATIVE = -1,
+ DRIVER_MODE_HOST = 0,
+ DRIVER_MODE_GUEST
+} PVRSRV_DRIVER_MODE;
+
+/*
+ * All the heaps from which regular device memory allocations can be made in
+ * terms of their locality to the respective device.
+ */
+typedef enum
+{
+ PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL = 0,
+ PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL = 1,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL = 2,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_GUEST = 3,
+ PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL = 4,
+ PVRSRV_DEVICE_PHYS_HEAP_LAST
+} PVRSRV_DEVICE_PHYS_HEAP;
+
+typedef enum
+{
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_MAPPABLE = 0,
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_NON_MAPPABLE = 1,
+ PVRSRV_DEVICE_LOCAL_MEMORY_ARENA_LAST
+} PVRSRV_DEVICE_LOCAL_MEMORY_ARENA;
+
+typedef enum _PVRSRV_DEVICE_SNOOP_MODE_
+{
+ PVRSRV_DEVICE_SNOOP_NONE = 0,
+ PVRSRV_DEVICE_SNOOP_CPU_ONLY,
+ PVRSRV_DEVICE_SNOOP_DEVICE_ONLY,
+ PVRSRV_DEVICE_SNOOP_CROSS,
+ PVRSRV_DEVICE_SNOOP_EMULATED,
+} PVRSRV_DEVICE_SNOOP_MODE;
+
+#if defined(SUPPORT_SOC_TIMER)
+typedef IMG_UINT64
+(*PFN_SYS_DEV_SOC_TIMER_READ)(IMG_HANDLE hSysData);
+#endif
+
+typedef IMG_UINT32
+(*PFN_SYS_DEV_CLK_FREQ_GET)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_PRE_POWER)(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_POST_POWER)(IMG_HANDLE hSysData,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+typedef void
+(*PFN_SYS_DEV_INTERRUPT_HANDLED)(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+typedef PVRSRV_ERROR
+(*PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE)(IMG_HANDLE hSysData,
+ IMG_UINT64 ui64MemSize);
+
+typedef void (*PFN_SYS_DEV_FEAT_DEP_INIT)(PVRSRV_DEVICE_CONFIG *, IMG_UINT64);
+
+typedef PVRSRV_DRIVER_MODE (*PFN_SYS_DRIVER_MODE)(void);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+
+#define PVRSRV_DEVICE_FW_CODE_REGION (0)
+#define PVRSRV_DEVICE_FW_COREMEM_CODE_REGION (1)
+
+typedef PVRSRV_ERROR
+(*PFN_TD_GET_FW_CODE_PARAMS)(IMG_HANDLE hSysData,
+ IMG_CPU_PHYADDR **pasCpuPAddr,
+ IMG_DEV_PHYADDR **pasDevPAddr,
+ IMG_UINT32 *pui32Log2Align,
+ IMG_UINT32 *pui32NumPages,
+ IMG_UINT64 *pui64FWSize);
+
+typedef struct _PVRSRV_TD_FW_PARAMS_
+{
+ const void *pvFirmware;
+ IMG_UINT32 ui32FirmwareSize;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+ RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+} PVRSRV_TD_FW_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SEND_FW_IMAGE)(IMG_HANDLE hSysData,
+ PVRSRV_TD_FW_PARAMS *psTDFWParams);
+
+typedef struct _PVRSRV_TD_POWER_PARAMS_
+{
+ IMG_DEV_PHYADDR sPCAddr; /* META only used param */
+
+ /* MIPS only used fields */
+ IMG_DEV_PHYADDR sGPURegAddr;
+ IMG_DEV_PHYADDR sBootRemapAddr;
+ IMG_DEV_PHYADDR sCodeRemapAddr;
+ IMG_DEV_PHYADDR sDataRemapAddr;
+} PVRSRV_TD_POWER_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SET_POWER_PARAMS)(IMG_HANDLE hSysData,
+ PVRSRV_TD_POWER_PARAMS *psTDPowerParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTART)(IMG_HANDLE hSysData);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_RGXSTOP)(IMG_HANDLE hSysData);
+
+typedef struct _PVRSRV_TD_SECBUF_PARAMS_
+{
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_DEVMEM_ALIGN_T uiAlign;
+ IMG_CPU_PHYADDR *psSecBufAddr;
+ IMG_UINT64 *pui64SecBufHandle;
+} PVRSRV_TD_SECBUF_PARAMS;
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_ALLOC)(IMG_HANDLE hSysData,
+ PVRSRV_TD_SECBUF_PARAMS *psTDSecBufParams);
+
+typedef PVRSRV_ERROR
+(*PFN_TD_SECUREBUF_FREE)(IMG_HANDLE hSysData,
+ IMG_UINT64 ui64SecBufHandle);
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+struct _PVRSRV_DEVICE_CONFIG_
+{
+ /*! OS device passed to SysDevInit (linux: 'struct device') */
+ void *pvOSDevice;
+
+ /*!
+ *! Service representation of pvOSDevice. Should be set to NULL when the
+ *! config is created in SysDevInit. Set by Services once a device node has
+ *! been created for this config and unset before SysDevDeInit is called.
+ */
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode;
+
+ /*! Name of the device */
+ IMG_CHAR *pszName;
+
+ /*! Version of the device (optional) */
+ IMG_CHAR *pszVersion;
+
+ /*! Register bank address */
+ IMG_CPU_PHYADDR sRegsCpuPBase;
+ /*! Register bank size */
+ IMG_UINT32 ui32RegsSize;
+ /*! Device interrupt number */
+ IMG_UINT32 ui32IRQ;
+
+ PVRSRV_DEVICE_SNOOP_MODE eCacheSnoopingMode;
+
+ /*! Device specific data handle */
+ IMG_HANDLE hDevData;
+
+ /*! System specific data that gets passed into system callback functions. */
+ IMG_HANDLE hSysData;
+
+ IMG_BOOL bHasNonMappableLocalMemory;
+
+ PHYS_HEAP_CONFIG *pasPhysHeaps;
+ IMG_UINT32 ui32PhysHeapCount;
+
+ /*!
+ *! ID of the Physical memory heap to use.
+ *!
+ *! The first entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+ *! flag is not set. Normally this will be the PhysHeapID of an LMA heap
+ *! but the configuration could specify a UMA heap here (if desired).
+ *!
+ *! The second entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+ *! flag is set. Normally this will be the PhysHeapID of a UMA heap but
+ *! the configuration could specify an LMA heap here (if desired).
+ *!
+ *! The third entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL])
+ *! will be used for allocations where the PVRSRV_MEMALLOCFLAG_FW_LOCAL
+ *! flag is set.
+ *!
+ *! The fourth entry (aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_EXTERNAL])
+ *! will be used for allocations that are imported into the driver and
+ *! are local to other devices, e.g. a display controller.
+ *!
+ *! In the event of there being only one Physical Heap, the configuration
+ *! should specify the same heap details in all entries.
+ */
+ IMG_UINT32 aui32PhysHeapID[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+
+ RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+ IMG_UINT32 *pui32BIFTilingHeapConfigs;
+ IMG_UINT32 ui32BIFTilingHeapCount;
+
+ /*!
+ *! Callbacks to change system device power state at the beginning and end
+ *! of a power state change (optional).
+ */
+ PFN_SYS_DEV_PRE_POWER pfnPrePowerState;
+ PFN_SYS_DEV_POST_POWER pfnPostPowerState;
+
+ /*! Callback to obtain the clock frequency from the device (optional). */
+ PFN_SYS_DEV_CLK_FREQ_GET pfnClockFreqGet;
+
+#if defined(SUPPORT_SOC_TIMER)
+ /*! Callback to read SoC timer register value (mandatory). */
+ PFN_SYS_DEV_SOC_TIMER_READ pfnSoCTimerRead;
+#endif
+
+ /*!
+ *! Callback to handle memory budgeting. Can be used to reject allocations
+ *! over a certain size (optional).
+ */
+ PFN_SYS_DEV_CHECK_MEM_ALLOC_SIZE pfnCheckMemAllocSize;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /*!
+ *! Callback to get FW code parameters (physical address, size) from
+ *! the trusted device.
+ */
+ PFN_TD_GET_FW_CODE_PARAMS pfnTDGetFWCodeParams;
+
+ /*!
+ *! Callback to send FW image and FW boot time parameters to the trusted
+ *! device.
+ */
+ PFN_TD_SEND_FW_IMAGE pfnTDSendFWImage;
+
+ /*!
+ *! Callback to send parameters needed in a power transition to the trusted
+ *! device.
+ */
+ PFN_TD_SET_POWER_PARAMS pfnTDSetPowerParams;
+
+ /*! Callbacks to ping the trusted device to securely run RGXStart/Stop() */
+ PFN_TD_RGXSTART pfnTDRGXStart;
+ PFN_TD_RGXSTOP pfnTDRGXStop;
+
+ /*! Callback to request allocation/freeing of secure buffers */
+ PFN_TD_SECUREBUF_ALLOC pfnTDSecureBufAlloc;
+ PFN_TD_SECUREBUF_FREE pfnTDSecureBufFree;
+#endif /* defined(SUPPORT_TRUSTED_DEVICE) */
+
+ /*! Function that does device feature specific system layer initialisation */
+ PFN_SYS_DEV_FEAT_DEP_INIT pfnSysDevFeatureDepInit;
+
+ /*! Function returns system layer execution environment */
+ PFN_SYS_DRIVER_MODE pfnSysDriverMode;
+
+#if defined(PVR_DVFS) || defined(SUPPORT_PDVFS)
+ PVRSRV_DVFS sDVFS;
+#endif
+
+#if defined(SUPPORT_ALT_REGBASE)
+ IMG_CPU_PHYADDR sAltRegsCpuPBase;
+#endif
+
+#if defined(SUPPORT_DEVICE_PA0_AS_VALID)
+ IMG_BOOL bDevicePA0IsValid;
+#endif
+};
+
+#endif /* __PVRSRV_DEVICE_H__*/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device_types.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device_types.h
new file mode 100644
index 00000000000000..0439c349e9fe44
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_device_types.h
@@ -0,0 +1,56 @@
+/*************************************************************************/ /*!
+@File
+@Title PowerVR device type definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__PVRSRV_DEVICE_TYPES_H__)
+#define __PVRSRV_DEVICE_TYPES_H__
+
+#include "img_types.h"
+
+#define PVRSRV_MAX_DEVICES 16 /*!< Largest supported number of devices on the system */
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#endif /* __PVRSRV_DEVICE_TYPES_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_devmem.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_devmem.h
new file mode 100644
index 00000000000000..9e2b796c20de02
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_devmem.h
@@ -0,0 +1,912 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management core
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of device memory management -- This
+ file defines the exposed Services API to core memory management
+ functions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_DEVMEM_H
+#define PVRSRV_DEVMEM_H
+
+#if defined __cplusplus
+extern "C" {
+#endif
+
+#include "img_types.h"
+#include "devicemem_typedefs.h"
+#include "pdumpdefs.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_memallocflags.h"
+#include <powervr/sync_external.h>
+#include "services_km.h" /* for PVRSRV_DEV_CONNECTION */
+
+
+/*
+ Device memory contexts, heaps and memory descriptors are passed
+ through to underlying memory APIs directly, but are to be regarded
+ as an opaque handle externally.
+*/
+typedef struct _PVRSRV_DEVMEMCTX_ *PVRSRV_DEVMEMCTX; /*!< Device-Mem Client-Side Interface: Typedef for Context Ptr */
+typedef DEVMEM_HEAP *PVRSRV_HEAP; /*!< Device-Mem Client-Side Interface: Typedef for Heap Ptr */
+typedef DEVMEM_MEMDESC *PVRSRV_MEMDESC; /*!< Device-Mem Client-Side Interface: Typedef for Memory Descriptor Ptr */
+typedef DEVMEM_EXPORTCOOKIE PVRSRV_DEVMEM_EXPORTCOOKIE; /*!< Device-Mem Client-Side Interface: Typedef for Export Cookie */
+typedef DEVMEM_FLAGS_T PVRSRV_MEMMAP_FLAGS_T; /*!< Device-Mem Client-Side Interface: Typedef for Memory-Mapping Flags Enum */
+typedef IMG_HANDLE PVRSRV_REMOTE_DEVMEMCTX; /*!< Type to use with context export import */
+typedef struct _PVRSRV_EXPORT_DEVMEMCTX_ *PVRSRV_EXPORT_DEVMEMCTX;
+
+/* To use with PVRSRVSubAllocDeviceMem() as the default factor if no
+ * over-allocation is desired. */
+#define PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE DEVMEM_NO_PRE_ALLOCATE_MULTIPLIER
+
+/* N.B. Flags are now defined in pvrsrv_memallocflags.h as they need
+ to be omnipresent. */
+
+/*
+ *
+ * API functions
+ *
+ */
+
+/**************************************************************************/ /*!
+@Function PVRSRVCreateDeviceMemContext
+@Description Creates a device memory context. There is a one-to-one
+ correspondence between this context data structure and the top
+ level MMU page table (known as the Page Catalogue, in the case of a
+ 3-tier MMU). It is intended that a process with its own virtual
+ space on the CPU will also have its own virtual space on the GPU.
+ Thus there is loosely a one-to-one correspondence between process
+ and device memory context, but this is not enforced at this API.
+
+ Every process must create the device memory context before any
+ memory allocations are made, and is responsible for freeing all
+ such allocations before destroying the context
+
+ This is a wrapper function above the "bare-metal" device memory
+ context creation function which would create just a context and no
+ heaps. This function will also create the heaps, according to the
+ heap config that the device specific initialization code has
+ nominated for use by this API.
+
+ The number of heaps thus created is returned to the caller, such
+ that the caller can allocate an array and the call in to fetch
+ details of each heap, or look up the heap with the "Find Heap" API
+ described below.
+
+ In order to derive the details of the MMU configuration for the
+ device, and for retrieving the "bridge handle" for communication
+ internally in services, it is necessary to pass in a
+ PVRSRV_DEV_CONNECTION.
+@Input psDev dev data
+@Output phCtxOut On success, the returned DevMem Context. The
+ caller is responsible for providing storage
+ for this.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVCreateDeviceMemContext(PVRSRV_DEV_CONNECTION *psDevConnection,
+ PVRSRV_DEVMEMCTX *phCtxOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDestroyDeviceMemContext
+@Description Destroy cannot fail. Well. It shouldn't, assuming the caller
+ has obeyed the protocol, i.e. has freed all his allocations
+ beforehand.
+@Input hCtx Handle to a DevMem Context
+@Return None
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVDestroyDeviceMemContext(PVRSRV_DEVMEMCTX hCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVFindHeapByName
+@Description Returns the heap handle for the named heap which is assumed to
+ exist in this context. PVRSRV_HEAP *phHeapOut,
+
+ N.B. No need for acquire/release semantics here, as when using
+ this wrapper layer, the heaps are automatically instantiated at
+ context creation time and destroyed when the context is
+ destroyed.
+
+ The caller is required to know the heap names already as these
+ will vary from device to device and from purpose to purpose.
+@Input hCtx Handle to a DevMem Context
+@Input pszHeapName Name of the heap to look for
+@Output phHeapOut a handle to the heap, for use in future calls
+ to OpenAllocation / AllocDeviceMemory / Map
+ DeviceClassMemory, etc. (The PVRSRV_HEAP type
+ to be regarded by caller as an opaque, but
+ strongly typed, handle)
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVFindHeapByName(PVRSRV_DEVMEMCTX hCtx,
+ const IMG_CHAR *pszHeapName,
+ PVRSRV_HEAP *phHeapOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemGetHeapBaseDevVAddr
+@Description returns the device virtual address of the base of the heap.
+@Input hHeap Handle to a Heap
+@Output pDevVAddr On success, the device virtual address of the
+ base of the heap.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemGetHeapBaseDevVAddr(PVRSRV_HEAP hHeap,
+ IMG_DEV_VIRTADDR *pDevVAddr);
+
+/**************************************************************************/ /*!
+@Function PVRSRVSubAllocDeviceMem
+@Description Allocate memory from the specified heap, acquiring physical
+ memory from OS as we go and mapping this into
+ the GPU (required) and CPU (optional)
+
+ Size must be a positive integer multiple of alignment, or, to
+ put it another way, the uiLog2Align LSBs should all be zero, but
+ at least one other bit should not be.
+
+ Caller to take charge of the PVRSRV_MEMDESC (the memory
+ descriptor) which is to be regarded as an opaque handle.
+
+ If the allocation is supposed to be used with PVRSRVDevmemUnpin()
+ the size must be a page multiple.
+ This is a general rule when suballocations are to
+ be avoided.
+
+@Input uiPreAllocMultiplier Size factor for internal pre-allocation of
+ memory to make subsequent calls with the
+ same flags faster. Independently if a value
+ is set, the function will try to allocate
+ from any pre-allocated memory first and -if
+ successful- not pre-allocate anything more.
+ That means the factor can always be set and
+ the correct thing will be done internally.
+@Input hHeap Handle to the heap from which memory will be
+ allocated
+@Input uiSize Amount of memory to be allocated.
+@Input uiLog2Align LOG2 of the required alignment
+@Input uiMemAllocFlags Allocation Flags
+@Input pszText Allocation descriptive name, this will
+ be truncated to the number of characters
+ specified in the PVR_ANNOTATION_MAX_LEN.
+@Output phMemDescOut On success, the resulting memory descriptor
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVSubAllocDeviceMem(IMG_UINT8 uiPreAllocMultiplier,
+ PVRSRV_HEAP hHeap,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ PVRSRV_MEMALLOCFLAGS_T uiMemAllocFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *phMemDescOut);
+
+#define PVRSRVAllocDeviceMem(...) \
+ PVRSRVSubAllocDeviceMem(PVRSRV_DEVMEM_PRE_ALLOC_MULTIPLIER_NONE, __VA_ARGS__)
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetMaxDevMemSize
+@Description Get the amount of device memory on current platform
+ (Memory size in Bytes)
+ (Consider scaling down the values returned by this API)
+@Output puiLMASize LMA memory size
+@Output puiUMASize UMA memory size
+@Return None
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVGetMaxDevMemSize(PVRSRV_DEV_CONNECTION *psConnection,
+ IMG_DEVMEM_SIZE_T *puiLMASize,
+ IMG_DEVMEM_SIZE_T *puiUMASize);
+
+/**************************************************************************/ /*!
+@Function PVRSRVFreeDeviceMem
+@Description Free that allocated by PVRSRVSubAllocDeviceMem (Memory descriptor
+ will be destroyed)
+@Input hMemDesc Handle to the descriptor of the memory to be
+ freed
+@Return None
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVFreeDeviceMem(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireCPUMapping
+@Description Causes the allocation referenced by this memory descriptor to be
+ mapped into cpu virtual memory, if it wasn't already, and the
+ CPU virtual address returned in the caller-provided location.
+
+ The caller must call PVRSRVReleaseCPUMapping to advise when he
+ has finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle to the memory descriptor for which a
+ CPU mapping is required
+@Output ppvCpuVirtAddrOut On success, the caller's ptr is set to the
+ new CPU mapping
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAcquireCPUMapping(PVRSRV_MEMDESC hMemDesc,
+ void **ppvCpuVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseCPUMapping
+@Description Relinquishes the cpu mapping acquired with
+ PVRSRVAcquireCPUMapping()
+@Input hMemDesc Handle of the memory descriptor
+@Return None
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVReleaseCPUMapping(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVMapToDevice
+@Description Map allocation into the device MMU. This function must only be
+ called once, any further calls will return
+ PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED
+
+ The caller must call PVRSRVReleaseDeviceMapping when they
+ are finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle of the memory descriptor
+@Input hHeap Device heap to map the allocation into
+@Output psDevVirtAddrOut Device virtual address
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVMapToDevice(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_HEAP hHeap,
+ IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVMapToDeviceAddress
+@Description Same as PVRSRVMapToDevice but caller chooses the address to
+ map into.
+
+ The caller is able to overwrite existing mappings so never use
+ this function on a heap where PVRSRVMapToDevice() has been
+ used before or will be used in the future.
+
+ In general the caller has to know which regions of the heap have
+ been mapped already and should avoid overlapping mappings.
+
+@Input hMemDesc Handle of the memory descriptor
+@Input hHeap Device heap to map the allocation into
+@Output sDevVirtAddr Device virtual address to map to
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVMapToDeviceAddress(DEVMEM_MEMDESC *psMemDesc,
+ DEVMEM_HEAP *psHeap,
+ IMG_DEV_VIRTADDR sDevVirtAddr);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireDeviceMapping
+@Description Acquire a reference on the device mapping the allocation.
+ If the allocation wasn't mapped into the device then
+ and the device virtual address returned in the
+ PVRSRV_ERROR_DEVICEMEM_NO_MAPPING will be returned as
+ PVRSRVMapToDevice must be called first.
+
+ The caller must call PVRSRVReleaseDeviceMapping when they
+ are finished with the mapping.
+
+ Does not accept unpinned allocations.
+ Returns PVRSRV_ERROR_INVALID_MAP_REQUEST if an unpinned
+ MemDesc is passed in.
+
+@Input hMemDesc Handle to the memory descriptor for which a
+ device mapping is required
+@Output psDevVirtAddrOut On success, the caller's ptr is set to the
+ new device mapping
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAcquireDeviceMapping(PVRSRV_MEMDESC hMemDesc,
+ IMG_DEV_VIRTADDR *psDevVirtAddrOut);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseDeviceMapping
+@Description Relinquishes the device mapping acquired with
+ PVRSRVAcquireDeviceMapping or PVRSRVMapToDevice
+@Input hMemDesc Handle of the memory descriptor
+@Return None
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVReleaseDeviceMapping(PVRSRV_MEMDESC hMemDesc);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDevmemLocalImport
+
+@Description Import a PMR that was created with this connection.
+ The general usage of this function is as follows:
+ 1) Create a devmem allocation on server side.
+ 2) Pass back the PMR of that allocation to client side by
+ creating a handle of type PMR_LOCAL_EXPORT_HANDLE.
+ 3) Pass the PMR_LOCAL_EXPORT_HANDLE to
+ PVRSRVMakeLocalImportHandle()to create a new handle type
+ (DEVMEM_MEM_IMPORT) that can be used with this function.
+
+@Input hExtHandle External memory handle
+
+@Input uiFlags Import flags
+
+@Output phMemDescPtr Created MemDesc
+
+@Output puiSizePtr Size of the created MemDesc
+
+@Input pszAnnotation Allocation descriptive name, this will
+ be truncated to the number of characters
+ specified in the PVR_ANNOTATION_MAX_LEN.
+
+@Return PVRSRV_OK is successful
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemLocalImport(const PVRSRV_DEV_CONNECTION *psDevConnection,
+ IMG_HANDLE hExtHandle,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ PVRSRV_MEMDESC *phMemDescPtr,
+ IMG_DEVMEM_SIZE_T *puiSizePtr,
+ const IMG_CHAR *pszAnnotation);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDevmemGetImportUID
+
+@Description Get the UID of the import that backs this MemDesc
+
+@Input hMemDesc MemDesc
+
+@Return UID of import
+*/
+/*****************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVDevmemGetImportUID(PVRSRV_MEMDESC hMemDesc,
+ IMG_UINT64 *pui64UID);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAllocExportableDevMem
+@Description Allocate memory without mapping into device memory context. This
+ memory is exported and ready to be mapped into the device memory
+ context of other processes, or to CPU only with
+ PVRSRVMapMemoryToCPUOnly(). The caller agrees to later call
+ PVRSRVFreeUnmappedExportedMemory(). The caller must give the page
+ size of the heap into which this memory may be subsequently
+ mapped, or the largest of such page sizes if it may be mapped
+ into multiple places. This information is to be communicated in
+ the Log2Align field.
+
+ Size must be a positive integer multiple of the page size
+@Input uiLog2Align Log2 of the alignment required
+@Input uiLog2HeapPageSize The page size to allocate. Must be a
+ multiple of the heap that this is going
+ to be mapped into.
+@Input uiSize the amount of memory to be allocated
+@Input uiFlags Allocation flags
+@Input pszText Text to describe the allocation, this will
+ be truncated to the number of characters
+ specified in the PVR_ANNOTATION_MAX_LEN.
+@Output hMemDesc
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAllocExportableDevMem(const PVRSRV_DEV_CONNECTION *psDevConnection,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiLog2HeapPageSize,
+ PVRSRV_MEMALLOCFLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVChangeSparseDevMem
+@Description This function alters the underlying memory layout of the given
+ allocation by allocating/removing pages as requested
+ This function also re-writes the GPU & CPU Maps accordingly
+ The specific actions can be controlled by corresponding flags
+
+@Input psMemDesc The memory layout that needs to be modified
+@Input ui32AllocPageCount New page allocation count
+@Input pai32AllocIndices New page allocation indices (page granularity)
+@Input ui32FreePageCount Number of pages that need to be freed
+@Input pai32FreeIndices Indices of the pages that need to be freed
+@Input uiFlags Flags that control the behaviour of the call
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVChangeSparseDevMem(PVRSRV_MEMDESC psMemDesc,
+ IMG_UINT32 ui32AllocPageCount,
+ IMG_UINT32 *pai32AllocIndices,
+ IMG_UINT32 ui32FreePageCount,
+ IMG_UINT32 *pai32FreeIndices,
+ SPARSE_MEM_RESIZE_FLAGS uiFlags);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAllocSparseDevMem2
+@Description Allocate sparse memory without mapping into device memory
+ context. Sparse memory is used where you have an allocation
+ that has a logical size (i.e. the amount of VM space it will
+ need when mapping it into a device) that is larger than the
+ amount of physical memory that allocation will use. An example
+ of this is a NPOT texture where the twiddling algorithm requires
+ you to round the width and height to next POT and so you know
+ there will be pages that are never accessed.
+
+ This memory can be exported and mapped into the device
+ memory context of other processes, or to CPU address space.
+
+ Size must be a positive integer multiple of the page size, see
+ PVRSRVGetHeapLog2PageSize().
+
+ Mapping Table array has ui32NumPhysChunks elements. Each
+ element holds the page index of the VM space where the physical
+ memory page will be mapped. All elements in this array are
+ valid.
+
+@Input psDevConnection Device to allocation the memory for
+@Input uiSize The logical size of allocation
+@Input uiChunkSize The size of the chunk (== page size in byte)
+@Input ui32NumPhysChunks The number of physical chunks required
+@Input ui32NumVirtChunks The number of virtual chunks required
+@Input pui32MappingTable VM space page index table
+@Input uiLog2Align Log2 of the required alignment
+@Input uiLog2HeapPageSize Log2 page size of the target heap
+@Input uiFlags Allocation flags
+@Input pszText Text to describe the allocation, this will
+ be truncated to the number of characters
+ specified in PVR_ANNOTATION_MAX_LEN.
+@Output hMemDesc
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAllocSparseDevMem2(const PVRSRV_DEVMEMCTX psDevMemCtx,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_SIZE_T uiChunkSize,
+ IMG_UINT32 ui32NumPhysChunks,
+ IMG_UINT32 ui32NumVirtChunks,
+ IMG_UINT32 *pui32MappingTable,
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiLog2HeapPageSize,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ PVRSRV_MEMDESC *hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetOSLog2PageSize
+@Description Just call AFTER setting up the connection to the kernel module
+ otherwise it will run into an assert.
+ Gives the log2 of the page size that is utilised by the OS.
+
+@Return The page size
+*/ /***************************************************************************/
+
+IMG_EXPORT IMG_UINT32 PVRSRVGetOSLog2PageSize(void);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetHeapLog2PageSize
+@Description Queries the page size of a passed heap.
+
+@Input hHeap Heap that is queried
+@Output puiLog2PageSize Log2 page size will be returned in this
+
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVGetHeapLog2PageSize(PVRSRV_HEAP hHeap, IMG_UINT32* puiLog2PageSize);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetHeapTilingProperties
+@Description Queries the import alignment and tiling stride conversion
+ factor of a passed heap.
+
+@Input hHeap Heap that is queried
+@Output puiLog2ImportAlignment Log2 import alignment will be
+ returned in this
+@Output puiLog2TilingStrideFactor Log2 alignment to tiling stride
+ conversion factor will be returned
+ in this
+
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVGetHeapTilingProperties(PVRSRV_HEAP hHeap,
+ IMG_UINT32* puiLog2ImportAlignment,
+ IMG_UINT32* puiLog2TilingStrideFactor);
+
+/**************************************************************************/ /*!
+@Function PVRSRVMakeLocalImportHandle
+@Description This is a "special case" function for making a local import
+ handle. The server handle is a handle to a PMR of bridge type
+ PMR_LOCAL_EXPORT_HANDLE. The returned local import handle will
+ be of the bridge type DEVMEM_MEM_IMPORT that can be used with
+ PVRSRVDevmemLocalImport().
+@Input psConnection Services connection
+@Input hServerHandle Server export handle
+@Output hLocalImportHandle Returned client import handle
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVMakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+ IMG_HANDLE hServerHandle,
+ IMG_HANDLE *hLocalImportHandle);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnmakeLocalImportHandle
+@Description Destroy the hLocalImportHandle created with
+ PVRSRVMakeLocalImportHandle().
+@Input psConnection Services connection
+@Output hLocalImportHandle Local import handle
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVUnmakeLocalImportHandle(const PVRSRV_DEV_CONNECTION *psConnection,
+ IMG_HANDLE hLocalImportHandle);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+/**************************************************************************/ /*!
+@Function PVRSRVExport
+@Description Given a memory allocation allocated with Devmem_Allocate(),
+ create a "cookie" that can be passed intact by the caller's own
+ choice of secure IPC to another process and used as the argument
+ to "map" to map this memory into a heap in the target processes.
+ N.B. This can also be used to map into multiple heaps in one
+ process, though that's not the intention.
+
+ Note, the caller must later call Unexport before freeing the
+ memory.
+@Input hMemDesc handle to the descriptor of the memory to be
+ exported
+@Output phExportCookie On success, a handle to the exported cookie
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVExportDevMem(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnexport
+@Description Undo the export caused by "PVRSRVExport" - note - it doesn't
+ actually tear down any mapping made by processes that received
+ the export cookie. It will simply make the cookie null and void
+ and prevent further mappings.
+@Input hMemDesc handle to the descriptor of the memory which
+ will no longer be exported
+@Output phExportCookie On success, the export cookie provided will be
+ set to null
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVUnexportDevMem(PVRSRV_MEMDESC hMemDesc,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie);
+
+/**************************************************************************/ /*!
+@Function PVRSRVImportDevMem
+@Description Import memory that was previously exported with PVRSRVExport()
+ into the current process.
+
+ Note: This call only makes the memory accessible to this
+ process, it doesn't map it into the device or CPU.
+
+@Input psConnection Connection to services
+@Input phExportCookie Ptr to the handle of the export-cookie
+ identifying
+@Output phMemDescOut On Success, a handle to a new memory descriptor
+ representing the memory as mapped into the
+ local process address space.
+@Input uiFlags Device memory mapping flags
+@Input pszText Text to describe the import
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVImportDevMem(const PVRSRV_DEV_CONNECTION *psConnection,
+ PVRSRV_DEVMEM_EXPORTCOOKIE *phExportCookie,
+ PVRSRV_MEMMAP_FLAGS_T uiFlags,
+ PVRSRV_MEMDESC *phMemDescOut);
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+/**************************************************************************/ /*!
+@Function PVRSRVIsDeviceMemAddrValid
+@Description Checks if given device virtual memory address is valid
+ from the GPU's point of view.
+
+ This method is intended to be called by a process that imported
+ another process' memory context, hence the expected
+ PVRSRV_REMOTE_DEVMEMCTX parameter.
+
+ See PVRSRVAcquireRemoteDevMemContext for details about
+ importing memory contexts.
+
+@Input hContext handle to memory context
+@Input sDevVAddr device 40bit virtual memory address
+@Return PVRSRV_OK if address is valid or
+ PVRSRV_ERROR_INVALID_GPU_ADDR when address is invalid
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVIsDeviceMemAddrValid(PVRSRV_REMOTE_DEVMEMCTX hContext,
+ IMG_DEV_VIRTADDR sDevVAddr);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemPin
+@Description This is the counterpart to PVRSRVDevmemUnpin. It is meant to be
+ called after unpinning an allocation.
+
+ It will make an unpinned allocation available again and
+ unregister it from the OS shrinker. In the case the shrinker
+ was invoked by the OS while the allocation was unpinned it will
+ allocate new physical pages.
+
+ If any GPU mapping existed before, the same virtual address
+ range will be valid again.
+
+@Input hMemDesc The MemDesc that is going to be pinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success and the pre-unpin content
+ is still present and can be reused.
+
+ PVRSRV_ERROR_PMR_NEW_MEMORY if the memory has
+ been pinned successfully but the pre-unpin
+ content was lost.
+
+ PVRSRV_ERROR_INVALID_PARAMS if the MemDesc is
+ invalid e.g. NULL.
+
+ PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES if the
+ memory of the allocation is lost and we failed
+ to allocate new one.
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemPin(PVRSRV_MEMDESC hMemDesc);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemUnpin
+@Description Unpins an allocation. Unpinning means that the
+ memory must not be accessed anymore by neither CPU nor GPU.
+ The physical memory pages will be registered with a shrinker
+ and the OS is able to reclaim them in OOM situations when the
+ shrinker is invoked.
+
+ The counterpart to this is PVRSRVDevmemPin() which
+ checks if the physical pages were reclaimed by the OS and then
+ either allocates new physical pages or just unregisters the
+ allocation from the shrinker. The device virtual address range
+ (if any existed) will be kept.
+
+ The GPU mapping will be kept but is going be invalidated.
+ It is allowed to free an unpinned allocation or remove the GPU
+ mapping.
+
+ RESTRICTIONS:
+ - Unpinning should only be done if the caller is sure that
+ the GPU finished all pending/running operations on the allocation.
+
+ - The caller must ensure that no other process than the calling
+ one itself has imported or mapped the allocation, otherwise the
+ unpinning will fail.
+
+ - All CPU mappings have to be removed beforehand by the caller.
+
+ - Any attempts to map the allocation while it is unpinned are
+ forbidden.
+
+ - When using PVRSRVAllocDeviceMem() the caller must allocate
+ whole pages from the chosen heap to avoid suballocations.
+
+@Input hMemDesc The MemDesc that is going to be unpinned.
+
+@Return PVRSRV_ERROR: PVRSRV_OK on success.
+
+ PVRSRV_ERROR_INVALID_PARAMS if the passed
+ allocation is not a multiple of the heap page
+ size but was allocated with
+ PVRSRVAllocDeviceMem(), or if its NULL.
+
+ PVRSRV_ERROR_PMR_STILL_REFERENCED if the passed
+ allocation is still referenced i.e. is still
+ exported or mapped somewhere else.
+
+ PVRSRV_ERROR_STILL_MAPPED will be thrown if the
+ calling process still has CPU mappings set up
+ or the GPU mapping was acquired more than once.
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemUnpin(PVRSRV_MEMDESC hMemDesc);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemGetSize
+@Description Returns the allocated size for this device-memory.
+
+@Input hMemDesc handle to memory allocation
+@Output puiSize return value for size
+@Return PVRSRV_OK on success or
+ PVRSRV_ERROR_INVALID_PARAMS
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemGetSize(PVRSRV_MEMDESC hMemDesc, IMG_DEVMEM_SIZE_T* puiSize);
+
+/**************************************************************************/ /*!
+@Function PVRSRVDevmemGetAnnotation
+@Description Returns the annotation for this device-memory
+
+@Input hMemDesc handle to memory allocation
+@Output pszAnnotation return value for annotation
+@Return PVRSRV_OK on success or
+ PVRSRV_ERROR_INVALID_PARAMS
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVDevmemGetAnnotation(PVRSRV_MEMDESC hMemDesc, IMG_CHAR **pszAnnotation);
+
+/**************************************************************************/ /*!
+@Function PVRSRVExportDevMemContext
+@Description Makes the given memory context available to other processes that
+ can get a handle to it via PVRSRVAcquireRemoteDevmemContext.
+ This handle can be used for e.g. the breakpoint functions.
+
+ The context will be only available to other processes that are able
+ to pass in a memory descriptor that is shared between this and the
+ importing process. We use the memory descriptor to identify the
+ correct context and verify that the caller is allowed to request
+ the context.
+
+ The whole mechanism is intended to be used with the debugger that
+ for example can load USC breakpoint handlers into the shared allocation
+ and then use the acquired remote context (that is exported here)
+ to set/clear breakpoints in USC code.
+
+@Input hLocalDevmemCtx Context to export
+@Input hSharedAllocation A memory descriptor that points to a shared allocation
+ between the two processes. Must be in the given context.
+@Output phExportCtx A handle to the exported context that is needed for
+ the destruction with PVRSRVUnexportDevMemContext().
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVExportDevMemContext(PVRSRV_DEVMEMCTX hLocalDevmemCtx,
+ PVRSRV_MEMDESC hSharedAllocation,
+ PVRSRV_EXPORT_DEVMEMCTX *phExportCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVUnexportDevMemContext
+@Description Removes the context from the list of sharable contexts that
+ that can be imported via PVRSRVReleaseRemoteDevmemContext.
+
+@Input psExportCtx An export context retrieved from
+ PVRSRVExportDevmemContext.
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVUnexportDevMemContext(PVRSRV_EXPORT_DEVMEMCTX hExportCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireRemoteDevMemContext
+@Description Retrieves an exported context that has been made available with
+ PVRSRVExportDevmemContext in the remote process.
+
+ hSharedMemDesc must be a memory descriptor pointing to the same
+ physical resource as the one passed to PVRSRVExportDevmemContext
+ in the remote process.
+ The memory descriptor has to be retrieved from the remote process
+ via a secure buffer export/import mechanism like DMABuf.
+
+@Input hDevmemCtx Memory context of the calling process.
+@Input hSharedAllocation The memory descriptor used to export the context
+@Output phRemoteCtx Handle to the remote context.
+@Return PVRSRV_ERROR: PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVAcquireRemoteDevMemContext(PVRSRV_DEVMEMCTX hDevmemCtx,
+ PVRSRV_MEMDESC hSharedAllocation,
+ PVRSRV_REMOTE_DEVMEMCTX *phRemoteCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseRemoteDevMemContext
+@Description Releases the remote context and destroys it if this is the last
+ reference.
+
+@Input hRemoteCtx Handle to the remote context that will be removed.
+*/ /***************************************************************************/
+IMG_EXPORT void
+PVRSRVReleaseRemoteDevMemContext(PVRSRV_REMOTE_DEVMEMCTX hRemoteCtx);
+
+/*************************************************************************/ /*!
+@Function PVRSRVRegisterDevmemPageFaultNotify
+@Description Registers to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be notified about.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVRegisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+/*************************************************************************/ /*!
+@Function PVRSRVUnregisterDevmemPageFaultNotify
+@Description Unegisters to be notified when a page fault occurs on a
+ specific device memory context.
+@Input psDevmemCtx The context to be unregistered from.
+@Return PVRSRV_ERROR.
+*/ /**************************************************************************/
+IMG_EXPORT PVRSRV_ERROR
+PVRSRVUnregisterDevmemPageFaultNotify(PVRSRV_DEVMEMCTX psDevmemCtx);
+
+/**************************************************************************/ /*!
+@Function PVRSRVGetRemoteDeviceMemFaultAddress
+@Description Returns the device virtual address of a page fault
+ on a given remote memory context.
+ Only one address is stored at a time until consumed.
+
+ This method is intended to be called by a process that imported
+ another process' memory context, hence the expected
+ PVRSRV_REMOTE_DEVMEMCTX parameter.
+
+ See PVRSRVAcquireRemoteDevMemContext for details about
+ importing memory contexts.
+
+@Input hContext handle to memory context.
+@Output psFaultAddress device 40bit virtual memory address.
+@Return PVRSRV_OK if an address is returned,
+ PVRSRV_ERROR_RESOURCE_UNAVAILABLE otherwise.
+*/ /***************************************************************************/
+IMG_EXPORT PVRSRV_ERROR PVRSRVGetRemoteDeviceMemFaultAddress(PVRSRV_REMOTE_DEVMEMCTX hContext,
+ IMG_DEV_VIRTADDR *psFaultAddress);
+
+#if defined __cplusplus
+};
+#endif
+#endif /* PVRSRV_DEVMEM_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_error.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_error.h
new file mode 100644
index 00000000000000..82ef82a464404f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_error.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File pvrsrv_error.h
+@Title services error enumerant
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines error codes used by any/all services modules
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__PVRSRV_ERROR_H__)
+#define __PVRSRV_ERROR_H__
+
+/*!
+ *****************************************************************************
+ * Error values
+ *****************************************************************************/
+typedef enum PVRSRV_ERROR
+{
+ PVRSRV_OK,
+#define PVRE(x) x,
+#include "pvrsrv_errors.h"
+#undef PVRE
+ PVRSRV_ERROR_FORCE_I32 = 0x7fffffff
+
+} PVRSRV_ERROR;
+
+#endif /* !defined (__PVRSRV_ERROR_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_errors.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_errors.h
new file mode 100644
index 00000000000000..c44147abd20024
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_errors.h
@@ -0,0 +1,383 @@
+/*************************************************************************/ /*!
+@File pvrsrv_errors.h
+@Title services error codes
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines error codes used by any/all services modules
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Don't add include guards to this file! */
+
+PVRE(PVRSRV_ERROR_OUT_OF_MEMORY)
+PVRE(PVRSRV_ERROR_TOO_FEW_BUFFERS)
+PVRE(PVRSRV_ERROR_INVALID_PARAMS)
+PVRE(PVRSRV_ERROR_INIT_FAILURE)
+PVRE(PVRSRV_ERROR_CANT_REGISTER_CALLBACK)
+PVRE(PVRSRV_ERROR_INVALID_DEVICE)
+PVRE(PVRSRV_ERROR_NOT_OWNER)
+PVRE(PVRSRV_ERROR_BAD_MAPPING)
+PVRE(PVRSRV_ERROR_TIMEOUT)
+PVRE(PVRSRV_ERROR_NOT_IMPLEMENTED)
+PVRE(PVRSRV_ERROR_FLIP_CHAIN_EXISTS)
+PVRE(PVRSRV_ERROR_INVALID_SWAPINTERVAL)
+PVRE(PVRSRV_ERROR_SCENE_INVALID)
+PVRE(PVRSRV_ERROR_STREAM_ERROR)
+PVRE(PVRSRV_ERROR_FAILED_DEPENDENCIES)
+PVRE(PVRSRV_ERROR_CMD_NOT_PROCESSED)
+PVRE(PVRSRV_ERROR_CMD_TOO_BIG)
+PVRE(PVRSRV_ERROR_DEVICE_REGISTER_FAILED)
+PVRE(PVRSRV_ERROR_TOOMANYBUFFERS)
+PVRE(PVRSRV_ERROR_NOT_SUPPORTED)
+PVRE(PVRSRV_ERROR_PROCESSING_BLOCKED)
+PVRE(PVRSRV_ERROR_CANNOT_FLUSH_QUEUE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_QUEUE_SPACE)
+PVRE(PVRSRV_ERROR_CANNOT_GET_RENDERDETAILS)
+PVRE(PVRSRV_ERROR_RETRY)
+PVRE(PVRSRV_ERROR_DDK_VERSION_MISMATCH)
+PVRE(PVRSRV_ERROR_DDK_BUILD_MISMATCH)
+PVRE(PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH)
+PVRE(PVRSRV_ERROR_BVNC_MISMATCH)
+PVRE(PVRSRV_ERROR_FWPROCESSOR_MISMATCH)
+PVRE(PVRSRV_ERROR_UPLOAD_TOO_BIG)
+PVRE(PVRSRV_ERROR_INVALID_FLAGS)
+PVRE(PVRSRV_ERROR_FAILED_TO_REGISTER_PROCESS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOAD_LIBRARY)
+PVRE(PVRSRV_ERROR_UNABLE_GET_FUNC_ADDR)
+PVRE(PVRSRV_ERROR_UNLOAD_LIBRARY_FAILED)
+PVRE(PVRSRV_ERROR_BRIDGE_CALL_FAILED)
+PVRE(PVRSRV_ERROR_IOCTL_CALL_FAILED)
+PVRE(PVRSRV_ERROR_MMU_API_PROTOCOL_ERROR)
+PVRE(PVRSRV_ERROR_MMU_CONFIG_IS_WRONG)
+PVRE(PVRSRV_ERROR_MMU_CONTEXT_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_ALLOCATE_PAGETABLES)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_CREATE_HEAP)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_FAILED_TO_UNMAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE)
+PVRE(PVRSRV_ERROR_MMU_LIVE_ALLOCATIONS_IN_HEAP)
+PVRE(PVRSRV_ERROR_MMU_RESERVATION_NOT_INSIDE_HEAP)
+PVRE(PVRSRV_ERROR_PMR_NEW_MEMORY)
+PVRE(PVRSRV_ERROR_PMR_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_PMR_CLIENT_NOT_TRUSTED)
+PVRE(PVRSRV_ERROR_PMR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_PMR_INCOMPATIBLE_CONTIGUITY)
+PVRE(PVRSRV_ERROR_PMR_MISMATCHED_ATTRIBUTES)
+PVRE(PVRSRV_ERROR_PMR_NOT_PAGE_MULTIPLE)
+PVRE(PVRSRV_ERROR_PMR_NOT_PERMITTED)
+PVRE(PVRSRV_ERROR_PMR_ALREADY_OCCUPIED)
+PVRE(PVRSRV_ERROR_PMR_UNRECOVERABLE_ERROR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PASSWORD_OR_STALE_PMR)
+PVRE(PVRSRV_ERROR_PMR_WRONG_PMR_TYPE)
+PVRE(PVRSRV_ERROR_PMR_MAPPING_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_PMR_BAD_MAPPINGTABLE_SIZE)
+PVRE(PVRSRV_ERROR_PMR_BAD_CHUNK_SIZE)
+PVRE(PVRSRV_ERROR_PMR_MAPPINGTABLE_MISMATCH)
+PVRE(PVRSRV_ERROR_PMR_INVALID_CHUNK)
+PVRE(PVRSRV_ERROR_PMR_NO_KERNEL_MAPPING)
+PVRE(PVRSRV_ERROR_PMR_EMPTY)
+PVRE(PVRSRV_ERROR_PMR_NO_CPU_MAP_FOUND)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_UNMAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_CPU_PAGE_MAP_FAILED)
+PVRE(PVRSRV_ERROR_PMR_PAGE_POISONING_FAILED)
+PVRE(PVRSRV_ERROR_PMR_INVALID_MAP_INDEX_ARRAY)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP)
+PVRE(PVRSRV_ERROR_DEVICEMEM_BAD_IMPORT_SIZE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_CANT_EXPORT_SUBALLOCATION)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_CONFIG_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_HEAP_INDEX)
+PVRE(PVRSRV_ERROR_DEVICEMEM_MAP_FAILED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NON_ZERO_USAGE_COUNT)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_UNABLE_TO_CREATE_ARENA)
+PVRE(PVRSRV_ERROR_DEVICEMEM_OUT_OF_DEVICE_VM)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ALREADY_MAPPED)
+PVRE(PVRSRV_ERROR_DEVICEMEM_NO_MAPPING)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_PMR_FLAGS)
+PVRE(PVRSRV_ERROR_DEVICEMEM_INVALID_LMA_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_MMU_TYPE)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_BUFFER_DEVICE_ALREADY_PRESENT)
+PVRE(PVRSRV_ERROR_PCI_DEVICE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PCI_CALL_FAILED)
+PVRE(PVRSRV_ERROR_PCI_REGION_TOO_SMALL)
+PVRE(PVRSRV_ERROR_PCI_REGION_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_BAD_REGION_SIZE_MISMATCH)
+PVRE(PVRSRV_ERROR_REGISTER_BASE_NOT_SET)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_USER_MEM)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VP_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_SHARED_PBDESC)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_KERNELVIRTUAL)
+PVRE(PVRSRV_ERROR_FAILED_TO_GET_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_VIRT_MEMORY)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_FREE_PAGES)
+PVRE(PVRSRV_ERROR_FAILED_TO_COPY_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_LOCK_PAGES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNLOCK_PAGES)
+PVRE(PVRSRV_ERROR_STILL_MAPPED)
+PVRE(PVRSRV_ERROR_MAPPING_NOT_FOUND)
+PVRE(PVRSRV_ERROR_PHYS_ADDRESS_EXCEEDS_32BIT)
+PVRE(PVRSRV_ERROR_FAILED_TO_MAP_PAGE_TABLE)
+PVRE(PVRSRV_ERROR_INVALID_SEGMENT_BLOCK)
+PVRE(PVRSRV_ERROR_INVALID_GFXDEVDEVDATA)
+PVRE(PVRSRV_ERROR_INVALID_DEVINFO)
+PVRE(PVRSRV_ERROR_INVALID_MEMINFO)
+PVRE(PVRSRV_ERROR_INVALID_MISCINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_IOCTL)
+PVRE(PVRSRV_ERROR_INVALID_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT)
+PVRE(PVRSRV_ERROR_INVALID_HEAP)
+PVRE(PVRSRV_ERROR_INVALID_KERNELINFO)
+PVRE(PVRSRV_ERROR_UNKNOWN_POWER_STATE)
+PVRE(PVRSRV_ERROR_INVALID_HANDLE_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_WRAP_TYPE)
+PVRE(PVRSRV_ERROR_INVALID_PHYS_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_CPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_PERPROC)
+PVRE(PVRSRV_ERROR_FAILED_TO_RETRIEVE_HEAPINFO)
+PVRE(PVRSRV_ERROR_INVALID_MAP_REQUEST)
+PVRE(PVRSRV_ERROR_INVALID_UNMAP_REQUEST)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_MAPPING_HEAP)
+PVRE(PVRSRV_ERROR_MAPPING_STILL_IN_USE)
+PVRE(PVRSRV_ERROR_EXCEEDED_HW_LIMITS)
+PVRE(PVRSRV_ERROR_NO_STAGING_BUFFER_ALLOCATED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_PERPROC_AREA)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_THREAD)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_THREAD)
+PVRE(PVRSRV_ERROR_THREAD_READ_ERROR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_ISR_HANDLER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSTALL_ISR)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNINSTALL_ISR)
+PVRE(PVRSRV_ERROR_ISR_ALREADY_INSTALLED)
+PVRE(PVRSRV_ERROR_ISR_NOT_INSTALLED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INITIALISE_INTERRUPT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_INFO)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DO_BACKWARDS_BLIT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CLOSE_SERVICES)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_CONTEXT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REGISTER_RESOURCE)
+PVRE(PVRSRV_ERROR_INVALID_CCB_COMMAND)
+PVRE(PVRSRV_ERROR_KERNEL_CCB_FULL)
+PVRE(PVRSRV_ERROR_FLIP_FAILED)
+PVRE(PVRSRV_ERROR_UNBLANK_DISPLAY_FAILED)
+PVRE(PVRSRV_ERROR_TIMEOUT_POLLING_FOR_VALUE)
+PVRE(PVRSRV_ERROR_TIMEOUT_WAITING_FOR_CLIENT_CCB)
+PVRE(PVRSRV_ERROR_CREATE_RENDER_CONTEXT_FAILED)
+PVRE(PVRSRV_ERROR_UNKNOWN_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_SECONDARY_FRAG)
+PVRE(PVRSRV_ERROR_UNEXPECTED_PRIMARY_FRAG)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_FENCE_ID)
+PVRE(PVRSRV_ERROR_BLIT_SETUP_FAILED)
+PVRE(PVRSRV_ERROR_SUBMIT_NEEDED)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_PDUMP_BUFFER_FULL)
+PVRE(PVRSRV_ERROR_PDUMP_BUF_OVERFLOW)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ACTIVE)
+PVRE(PVRSRV_ERROR_INCOMPLETE_LINE_OVERLAPS_PAGES)
+PVRE(PVRSRV_ERROR_MUTEX_DESTROY_FAILED)
+PVRE(PVRSRV_ERROR_MUTEX_INTERRUPTIBLE_ERROR)
+PVRE(PVRSRV_ERROR_INSUFFICIENT_SPACE_FOR_COMMAND)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_PROCESS_NOT_FOUND)
+PVRE(PVRSRV_ERROR_SRV_CONNECT_FAILED)
+PVRE(PVRSRV_ERROR_SRV_DISCONNECT_FAILED)
+PVRE(PVRSRV_ERROR_DEINT_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_INIT2_PHASE_FAILED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_FIND_RESOURCE)
+PVRE(PVRSRV_ERROR_NO_DC_DEVICES_FOUND)
+PVRE(PVRSRV_ERROR_DC_DEVICE_INACCESSIBLE)
+PVRE(PVRSRV_ERROR_DC_INVALID_MAXDEPTH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_OPEN_DC_DEVICE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_UNREGISTER_DEVICE)
+PVRE(PVRSRV_ERROR_NO_DEVICEDATA_FOUND)
+PVRE(PVRSRV_ERROR_NO_DEVICENODE_FOUND)
+PVRE(PVRSRV_ERROR_NO_CLIENTNODE_FOUND)
+PVRE(PVRSRV_ERROR_FAILED_TO_PROCESS_QUEUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INIT_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SCHEDULE_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_KILL_TASK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DISABLE_TIMER)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_TIMER)
+PVRE(PVRSRV_ERROR_UNKNOWN_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_HANDLE_INDEX_OUT_OF_RANGE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_HANDLE_TYPE_MISMATCH)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_HANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_SHAREABLE)
+PVRE(PVRSRV_ERROR_HANDLE_NOT_FOUND)
+PVRE(PVRSRV_ERROR_INVALID_SUBHANDLE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_IN_USE)
+PVRE(PVRSRV_ERROR_HANDLE_BATCH_COMMIT_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_CREATE_HASH_TABLE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RETRIEVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_REMOVE_HASH_VALUE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_INSERT_HASH_VALUE)
+PVRE(PVRSRV_ERROR_INSERT_HASH_TABLE_DATA_FAILED)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_BACKING_STORE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_DESTROY_BM_HEAP)
+PVRE(PVRSRV_ERROR_UNKNOWN_INIT_SERVER_STATE)
+PVRE(PVRSRV_ERROR_NO_FREE_DEVICEIDS_AVAILABLE)
+PVRE(PVRSRV_ERROR_INVALID_DEVICEID)
+PVRE(PVRSRV_ERROR_DEVICEID_NOT_FOUND)
+PVRE(PVRSRV_ERROR_MEMORY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_CPUPADDR_TEST_FAILED)
+PVRE(PVRSRV_ERROR_COPY_TEST_FAILED)
+PVRE(PVRSRV_ERROR_SEMAPHORE_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CLOCK)
+PVRE(PVRSRV_ERROR_CLOCK_REQUEST_FAILED)
+PVRE(PVRSRV_ERROR_DISABLE_CLOCK_FAILURE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ROUND_CLOCK_RATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ENABLE_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_PARENT_CLOCK)
+PVRE(PVRSRV_ERROR_UNABLE_TO_GET_SYSTEM_CLOCK)
+PVRE(PVRSRV_ERROR_UNKNOWN_SGL_ERROR)
+PVRE(PVRSRV_ERROR_SYSTEM_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE)
+PVRE(PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+PVRE(PVRSRV_ERROR_BAD_SYNC_STATE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SET_CACHE_MODE)
+PVRE(PVRSRV_ERROR_FAILED_TO_ALLOC_MMUCONTEXT_ID)
+PVRE(PVRSRV_ERROR_PARAMETER_BUFFER_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ACQUIRE_CONNECTION)
+PVRE(PVRSRV_ERROR_UNABLE_TO_RELEASE_CONNECTION)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_IN_USE)
+PVRE(PVRSRV_ERROR_PHYSHEAP_ID_INVALID)
+PVRE(PVRSRV_ERROR_HP_REQUEST_TOO_LONG)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_PRIM_OP)
+PVRE(PVRSRV_ERROR_INVALID_SYNC_CONTEXT)
+PVRE(PVRSRV_ERROR_BP_NOT_SET)
+PVRE(PVRSRV_ERROR_BP_ALREADY_SET)
+PVRE(PVRSRV_ERROR_FEATURE_DISABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_ENABLED)
+PVRE(PVRSRV_ERROR_REG_CONFIG_FULL)
+PVRE(PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE)
+PVRE(PVRSRV_ERROR_MEMORY_ACCESS)
+PVRE(PVRSRV_ERROR_NO_SYSTEM_BUFFER)
+PVRE(PVRSRV_ERROR_DC_INVALID_CONFIG)
+PVRE(PVRSRV_ERROR_DC_INVALID_CROP_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_DISPLAY_RECT)
+PVRE(PVRSRV_ERROR_DC_INVALID_BUFFER_DIMS)
+PVRE(PVRSRV_ERROR_DC_INVALID_TRANSFORM)
+PVRE(PVRSRV_ERROR_DC_INVALID_SCALE)
+PVRE(PVRSRV_ERROR_DC_INVALID_CUSTOM)
+PVRE(PVRSRV_ERROR_DC_TOO_MANY_PIPES)
+PVRE(PVRSRV_ERROR_DC_INVALID_PLANE_ALPHA)
+PVRE(PVRSRV_ERROR_NOT_READY)
+PVRE(PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_PIXEL_FORMAT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_DIMS)
+PVRE(PVRSRV_ERROR_UNSUPPORTED_CACHE_MODE)
+PVRE(PVRSRV_ERROR_UNABLE_TO_ADD_TIMER)
+PVRE(PVRSRV_ERROR_NOT_FOUND)
+PVRE(PVRSRV_ERROR_ALREADY_OPEN)
+PVRE(PVRSRV_ERROR_STREAM_MISUSE)
+PVRE(PVRSRV_ERROR_STREAM_FULL)
+PVRE(PVRSRV_ERROR_PHYSMEM_NOT_ALLOCATED)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_PBSIZE_ALREADY_MIN)
+PVRE(PVRSRV_ERROR_INVALID_PB_CONFIG)
+PVRE(PVRSRV_ERROR_META_THREAD0_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_NOT_AUTHENTICATED)
+PVRE(PVRSRV_ERROR_REQUEST_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDFWCODE_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_REQUEST_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_INIT_TDSECUREBUF_PAGES_FAIL)
+PVRE(PVRSRV_ERROR_MUTEX_ALREADY_CREATED)
+PVRE(PVRSRV_ERROR_DBGTABLE_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_ALREADY_EXISTS)
+PVRE(PVRSRV_ERROR_UNABLE_TO_SEND_PULSE)
+PVRE(PVRSRV_ERROR_TASK_FAILED)
+PVRE(PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED)
+PVRE(PVRSRV_ERROR_INVALID_GPU_ADDR)
+PVRE(PVRSRV_ERROR_INVALID_OFFSET)
+PVRE(PVRSRV_ERROR_CCCB_STALLED)
+PVRE(PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE)
+PVRE(PVRSRV_ERROR_NOT_ENABLED)
+PVRE(PVRSRV_ERROR_SYSTEM_LOCAL_MEMORY_INIT_FAIL)
+PVRE(PVRSRV_ERROR_FW_IMAGE_MISMATCH)
+PVRE(PVRSRV_ERROR_PDUMP_NOT_ALLOWED)
+PVRE(PVRSRV_ERROR_BRIDGE_BUFFER_TOO_SMALL)
+PVRE(PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX)
+PVRE(PVRSRV_ERROR_NONZERO_REFCOUNT)
+PVRE(PVRSRV_ERROR_SETAFFINITY_FAILED)
+PVRE(PVRSRV_ERROR_INTERNAL_ERROR)
+PVRE(PVRSRV_ERROR_BRIDGE_EFAULT)
+PVRE(PVRSRV_ERROR_BRIDGE_EINVAL)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOMEM)
+PVRE(PVRSRV_ERROR_BRIDGE_ERANGE)
+PVRE(PVRSRV_ERROR_BRIDGE_EPERM)
+PVRE(PVRSRV_ERROR_BRIDGE_ENOTTY)
+PVRE(PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_PROBE_DEFER)
+PVRE(PVRSRV_ERROR_INVALID_ALIGNMENT)
+PVRE(PVRSRV_ERROR_CLOSE_FAILED)
+PVRE(PVRSRV_ERROR_NOT_INITIALISED)
+PVRE(PVRSRV_ERROR_CONVERSION_FAILED)
+PVRE(PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL)
+PVRE(PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED)
+PVRE(PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED)
+PVRE(PVRSRV_ERROR_OBJECT_STILL_REFERENCED)
+PVRE(PVRSRV_ERROR_BVNC_UNSUPPORTED)
+PVRE(PVRSRV_ERROR_INVALID_BVNC_PARAMS)
+PVRE(PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE)
+PVRE(PVRSRV_ERROR_DEVICEMEM_ADDITIONAL_HEAPS_IN_CONTEXT)
+PVRE(PVRSRV_ERROR_PID_ALREADY_REGISTERED)
+PVRE(PVRSRV_ERROR_PID_NOT_REGISTERED)
+PVRE(PVRSRV_ERROR_SIGNAL_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_NOTIF_STREAM)
+PVRE(PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_CONFIG)
+PVRE(PVRSRV_ERROR_INVALID_PVZ_OSID)
+PVRE(PVRSRV_ERROR_PVZ_OSID_IS_ONLINE)
+PVRE(PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE)
+PVRE(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED)
+PVRE(PVRSRV_ERROR_NOT_SW_TIMELINE)
+PVRE(PVRSRV_ERROR_SW_TIMELINE_AT_LATEST_POINT)
+PVRE(PVRSRV_ERROR_PDUMP_INVALID_BLOCKLEN)
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_memallocflags.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_memallocflags.h
new file mode 100644
index 00000000000000..a96b53c8491b34
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_memallocflags.h
@@ -0,0 +1,622 @@
+/*************************************************************************/ /*!
+@File
+@Title Device Memory Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defines flags used on memory allocations and mappings
+ These flags are relevant throughout the memory management
+ software stack and are specified by users of services and
+ understood by all levels of the memory management in both
+ client and server.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef PVRSRV_MEMALLOCFLAGS_H
+#define PVRSRV_MEMALLOCFLAGS_H
+
+#include "img_types.h"
+#include "rgx_memallocflags.h"
+typedef IMG_UINT32 PVRSRV_MEMALLOCFLAGS_T;
+
+/*
+ * --- MAPPING FLAGS ---
+ * | 0-3 | 4-7 | 8-10 | 11-13 | 14 |
+ * | GPU-RW | CPU-RW | GPU-Caching | CPU-Caching | KM-Mappable |
+ *
+ * --- MISC FLAGS ---
+ * | 15 | 16 | 17 | 18 | 19 | 20 |
+ * | Defer | CPU-Local | FW-Local | SVM | Sparse-Dummy-Page | CPU-Cache-Clean |
+ *
+ * --- DEV CONTROL FLAGS ---
+ * | 24-27 |
+ * | Device-Flags |
+ *
+ * --- MEMSET FLAGS ---
+ * | 29 | 30 | 31 |
+ * | Poison-On-Free | P.-On-Alloc | Zero-On-Alloc |
+ *
+ */
+
+/*!
+ * **********************************************************
+ * * *
+ * * MAPPING FLAGS *
+ * * *
+ * **********************************************************
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be read by the GPU (is this always true?)
+ *
+ * Typically all device memory allocations would specify this flag.
+ *
+ * At the moment, memory allocations without this flag are not supported
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag. It specifies that memory is permitted
+ * to be read by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a readable mapping
+ *
+ * To be clear:
+ * - When used as an argument on PMR creation; it specifies
+ * that GPU readable mappings will be _permitted_
+ * - When used as an argument to a "map" function: it specifies
+ * that a GPU readable mapping is _desired_
+ * - When used as an argument to "AllocDeviceMem": it specifies
+ * that the PMR will be created with permission to be mapped
+ * with a GPU readable mapping, _and_ that this PMR will be
+ * mapped with a GPU readble mapping.
+ * This distinction becomes important when (a) we export allocations;
+ * and (b) when we separate the creation of the PMR from the mapping.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_READABLE (1U<<0)
+#define PVRSRV_CHECK_GPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READABLE) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ *
+ * This flag affects the device MMU protection flags, and specifies
+ * that the memory may be written by the GPU
+ *
+ * Using this flag on an allocation signifies that the allocation is
+ * intended to be written by the GPU.
+ *
+ * Omitting this flag causes a read-only mapping.
+ *
+ * This flag will live with the PMR, thus subsequent mappings would
+ * honour this flag.
+ *
+ * This is a dual purpose flag. It specifies that memory is permitted
+ * to be written by the GPU, and also requests that the allocation is
+ * mapped into the GPU as a writable mapping (see note above about
+ * permission vs. mapping mode, and why this flag causes permissions
+ * to be inferred from mapping mode on first allocation)
+ *
+ * N.B. This flag has no relevance to the CPU's MMU mapping, if any,
+ * and would therefore not enforce read-only mapping on CPU.
+ */
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE (1U<<1) /*!< mapped as writable to the GPU */
+#define PVRSRV_CHECK_GPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED (1U<<2) /*!< can be mapped is GPU readable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED (1U<<3) /*!< can be mapped is GPU writable in another GPU mem context */
+#define PVRSRV_CHECK_GPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READABLE (1U<<4) /*!< mapped as readable to the CPU */
+#define PVRSRV_CHECK_CPU_READABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE (1U<<5) /*!< mapped as writable to the CPU */
+#define PVRSRV_CHECK_CPU_WRITEABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED (1U<<6) /*!< can be mapped is CPU readable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_READ_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED) != 0)
+
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED (1U<<7) /*!< can be mapped is CPU writable in another CPU mem context */
+#define PVRSRV_CHECK_CPU_WRITE_PERMITTED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED) != 0)
+
+
+/*
+ * **********************************************************
+ * * *
+ * * CACHE CONTROL FLAGS *
+ * * *
+ * **********************************************************
+ */
+
+/*
+ GPU domain
+ ==========
+
+ The following defines are used to control the GPU cache bit field.
+ The defines are mutually exclusive.
+
+ A helper macro, PVRSRV_GPU_CACHE_MODE, is provided to obtain just the GPU cache
+ bit field from the flags. This should be used whenever the GPU cache mode
+ needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_UNCACHED
+
+ GPU domain. Request uncached memory. This means that any writes to memory
+ allocated with this flag are written straight to memory and thus are coherent
+ for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_UNCACHED (0U<<8)
+#define PVRSRV_CHECK_GPU_UNCACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE
+
+ GPU domain. Use write combiner (if supported) to combine sequential writes
+ together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE (1U<<8)
+#define PVRSRV_CHECK_GPU_WRITE_COMBINE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT
+
+ GPU domain. This flag affects the GPU MMU protection flags.
+ The allocation will be cached.
+ Services will try to set the coherent bit in the GPU MMU tables so the
+ GPU cache is snooping the CPU cache. If coherency is not supported the
+ caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT (2U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_COHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT
+
+ GPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+ Services will flush the GPU internal caches after every GPU task so no
+ cache maintenance requests from the users are necessary.
+
+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+ expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT (3U<<8)
+#define PVRSRV_CHECK_GPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHED
+
+ GPU domain. This flag is for internal use only and is used to indicate
+ that the underlying allocation should be cached on the GPU
+ after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHED (7U<<8)
+#define PVRSRV_CHECK_GPU_CACHED(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_GPU_CACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK
+
+ GPU domain. GPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK (7U<<8)
+#define PVRSRV_GPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+
+/*
+ CPU domain
+ ==========
+
+ The following defines are used to control the CPU cache bit field.
+ The defines are mutually exclusive.
+
+ A helper macro, PVRSRV_CPU_CACHE_MODE, is provided to obtain just the CPU cache
+ bit field from the flags. This should be used whenever the CPU cache mode
+ needs to be determined.
+*/
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_UNCACHED
+
+ CPU domain. Request uncached memory. This means that any writes to memory
+ allocated with this flag are written straight to memory and thus are coherent
+ for any device in the system.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_UNCACHED (0U<<11)
+#define PVRSRV_CHECK_CPU_UNCACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE
+
+ CPU domain. Use write combiner (if supported) to combine sequential writes
+ together to reduce memory access by doing burst writes.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE (1U<<11)
+#define PVRSRV_CHECK_CPU_WRITE_COMBINE(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT
+
+ CPU domain. This flag affects the CPU MMU protection flags.
+ The allocation will be cached.
+ Services will try to set the coherent bit in the CPU MMU tables so the
+ CPU cache is snooping the GPU cache. If coherency is not supported the
+ caller is responsible to ensure the caches are up to date.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT (2U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_COHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT
+
+ CPU domain. Request cached memory, but not coherent (i.e. no cache snooping).
+ This means that if the allocation needs to transition from one device
+ to another services has to be informed so it can flush/invalidate the
+ appropriate caches.
+
+ Note: We reserve 3 bits in the CPU/GPU cache mode to allow for future
+ expansion.
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT (3U<<11)
+#define PVRSRV_CHECK_CPU_CACHE_INCOHERENT(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHED
+
+ CPU domain. This flag is for internal use only and is used to indicate
+ that the underlying allocation should be cached on the CPU
+ after all the snooping and coherent checks have been done
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHED (7U<<11)
+#define PVRSRV_CHECK_CPU_CACHED(uiFlags) (PVRSRV_CPU_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CPU_CACHED)
+
+/*!
+ CPU domain. CPU cache mode mask
+*/
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK (7U<<11)
+#define PVRSRV_CPU_CACHE_MODE(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/* Helper flags for usual cases */
+
+/*! PVRSRV_MEMALLOCFLAG_UNCACHED
+ * Memory will be uncached on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_UNCACHED (PVRSRV_MEMALLOCFLAG_GPU_UNCACHED | PVRSRV_MEMALLOCFLAG_CPU_UNCACHED)
+#define PVRSRV_CHECK_UNCACHED(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_UNCACHED)
+
+/*! PVRSRV_MEMALLOCFLAG_WRITE_COMBINE
+ * Memory will be write-combined on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_WRITE_COMBINE (PVRSRV_MEMALLOCFLAG_GPU_WRITE_COMBINE | PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE)
+#define PVRSRV_CHECK_WRITE_COMBINE(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_WRITE_COMBINE)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_COHERENT
+ * Memory will be cached on CPU and GPU
+ * Services will try to set the correct flags in the MMU tables.
+ * In case there is no coherency support the caller has to ensure caches are up to date
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_COHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_COHERENT)
+#define PVRSRV_CHECK_CACHE_COHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_COHERENT)
+
+/*! PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT
+ * Memory will be cache-incoherent on CPU and GPU
+ */
+#define PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT (PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT)
+#define PVRSRV_CHECK_CACHE_INCOHERENT(uiFlags) (PVRSRV_CACHE_MODE(uiFlags) == PVRSRV_MEMALLOCFLAG_CACHE_INCOHERENT)
+
+/*!
+ Cache mode mask
+*/
+#define PVRSRV_CACHE_MODE(uiFlags) (PVRSRV_GPU_CACHE_MODE(uiFlags) | PVRSRV_CPU_CACHE_MODE(uiFlags))
+
+
+/*!
+ CPU MMU Flags mask -- intended for use internal to services only
+ */
+#define PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK)
+
+/*!
+ MMU Flags mask -- intended for use internal to services only - used
+ for partitioning the flags bits and determining which flags to pass
+ down to mmu_common.c
+ */
+#define PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK (PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_MODE_MASK)
+
+/*!
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+
+ Indicates that the PMR created due to this allocation will support
+ in-kernel CPU mappings. Only privileged processes may use this
+ flag as it may cause wastage of precious kernel virtual memory on
+ some platforms.
+ */
+#define PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE (1U<<14)
+#define PVRSRV_CHECK_KERNEL_CPU_MAPPABLE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE) != 0)
+
+
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * ALLOC MEMORY FLAGS *
+ * * *
+ * **********************************************************
+ *
+ * (Bits 15)
+ *
+ */
+#define PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC (1U<<15)
+#define PVRSRV_CHECK_ON_DEMAND(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC) != 0)
+
+/*!
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL
+
+ Indicates that the allocation will primarily be accessed by
+ the CPU, so a UMA allocation (if available) is preferable.
+ If not set, the allocation will primarily be accessed by
+ the GPU, so LMA allocation (if available) is preferable.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_LOCAL (1U<<16)
+#define PVRSRV_CHECK_CPU_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_LOCAL) != 0)
+
+
+/*!
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL
+
+ Indicates that the allocation will primarily be accessed by
+ the FW.
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_LOCAL (1U<<17)
+#define PVRSRV_CHECK_FW_LOCAL(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_LOCAL) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SVM
+
+ Indicates that the allocation will be accessed by the
+ CPU and GPU using the same virtual address, i.e. for
+ all SVM allocs, IMG_CPU_VIRTADDR == IMG_DEV_VIRTADDR
+ */
+#define PVRSRV_MEMALLOCFLAG_SVM_ALLOC (1U<<18)
+#define PVRSRV_CHECK_SVM_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SVM_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING
+
+ Indicates the particular memory that's being allocated is sparse
+ and the sparse regions should not be backed by dummy page */
+#define PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING (1U << 19)
+#define PVRSRV_IS_SPARSE_DUMMY_BACKING_REQUIRED(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING) == 0)
+
+/*! PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN
+
+ Services is going to clean the cache for the allocated memory.
+ For performance reasons avoid usage if allocation is written to by the CPU anyway
+ before the next GPU kick.
+ */
+#define PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN (1U<<20)
+#define PVRSRV_CHECK_CPU_CACHE_CLEAN(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_FW_CONFIG
+ *
+ * Indicates that the particular allocation will exist at the FW Config heap
+ * residing right after the end of the FW Main heap
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_CONFIG (1U<<21)
+#define PVRSRV_CHECK_FW_CONFIG(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_CONFIG) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_FW_GUEST
+ *
+ * Indicates that the particular allocation is being mapped into FW by
+ * the privileged OSID-0 (i.e. host/primary) driver on behalf of an
+ * unprivileged guest OSID-x (i.e. OSID-1 up to OSID-7) driver
+ */
+#define PVRSRV_MEMALLOCFLAG_FW_GUEST (1U<<22)
+#define PVRSRV_CHECK_FW_GUEST(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_FW_GUEST) != 0)
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * MEMORY ZEROING AND POISONING FLAGS *
+ * * *
+ * **********************************************************
+ *
+ * Zero / Poison, on alloc/free
+ *
+ * We think the following usecases are required:
+ *
+ * don't poison or zero on alloc or free
+ * (normal operation, also most efficient)
+ * poison on alloc
+ * (for helping to highlight bugs)
+ * poison on alloc and free
+ * (for helping to highlight bugs)
+ * zero on alloc
+ * (avoid highlighting security issues in other uses of memory)
+ * zero on alloc and poison on free
+ * (avoid highlighting security issues in other uses of memory,
+ * while helping to highlight a subset of bugs e.g. memory
+ * freed prematurely)
+ *
+ * Since there are more than 4, we can't encode this in just two bits,
+ * so we might as well have a separate flag for each of the three
+ * actions.
+ */
+
+/*! PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+
+ Ensures that the memory allocated is initialised with zeroes.
+ */
+#define PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC (1U<<31)
+#define PVRSRV_CHECK_ZERO_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC) != 0)
+#define PVRSRV_GET_ZERO_ON_ALLOC_FLAG(uiFlags) ((uiFlags) & PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+
+ Scribbles over the allocated memory with a poison value
+
+ Not compatible with ZERO_ON_ALLOC
+
+ Poisoning is very deliberately _not_ reflected in PDump as we want
+ a simulation to cry loudly if the initialised data propagates to a
+ result.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC (1U<<30)
+#define PVRSRV_CHECK_POISON_ON_ALLOC(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC) != 0)
+
+/*! PVRSRV_MEMALLOCFLAG_POISON_ON_FREE
+
+ Causes memory to be trashed when freed, as a lazy man's security
+ measure.
+ */
+#define PVRSRV_MEMALLOCFLAG_POISON_ON_FREE (1U<<29)
+#define PVRSRV_CHECK_POISON_ON_FREE(uiFlags) (((uiFlags) & PVRSRV_MEMALLOCFLAG_POISON_ON_FREE) != 0)
+
+/*
+ *
+ * **********************************************************
+ * * *
+ * * Device specific MMU flags *
+ * * *
+ * **********************************************************
+ *
+ * (Bits 24 to 27)
+ *
+ * Some services controlled devices have device specific control
+ * bits in their page table entries, we need to allow these flags
+ * to be passed down the memory management layers so the user
+ * can control these bits.
+ */
+
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET 24
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK 0x0f000000UL
+#define PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(n) \
+ (((n) << PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_OFFSET) & \
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK)
+
+
+/*!
+ * Secure buffer mask -- Flags in the mask are allowed for secure buffers
+ * because they are not related to CPU mappings.
+ */
+#define PVRSRV_MEMALLOCFLAGS_SECBUFMASK ~(PVRSRV_MEMALLOCFLAG_CPU_CACHE_MODE_MASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED)
+
+
+
+/*!
+ PMR flags mask -- for internal services use only. This is the set
+ of flags that will be passed down and stored with the PMR, this also
+ includes the MMU flags which the PMR has to pass down to mm_common.c
+ at PMRMap time.
+*/
+#define PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE | \
+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | \
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL | \
+ PVRSRV_MEMALLOCFLAG_FW_CONFIG | \
+ PVRSRV_MEMALLOCFLAG_FW_GUEST | \
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL)
+
+/*!
+ RA differentiation mask
+
+ for use internal to services
+
+ this is the set of flags bits that are able to determine whether a
+ pair of allocations are permitted to live in the same page table.
+ Allocations whose flags differ in any of these places would be
+ allocated from separate RA Imports and therefore would never coexist
+ in the same page.
+ Special cases are zeroing and poisoning of memory. The caller is responsible
+ to set the sub-allocations to the value he wants it to be. To differentiate
+ between zeroed and poisoned RA Imports does not make sense because the
+ memory might be reused.
+
+*/
+#define PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK (PVRSRV_MEMALLOCFLAGS_PMRFLAGSMASK \
+ & \
+ ~(PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC))
+
+/*!
+ Flags that affect _allocation_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERALLOCFLAGSMASK (0xFFFFFFFFU)
+
+/*!
+ Flags that affect _mapping_
+*/
+#define PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK (PVRSRV_MEMALLOCFLAG_DEVICE_FLAGS_MASK | \
+ PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_NO_OSPAGES_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SVM_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING)
+
+#if ((~(PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK) & PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK) != 0)
+#error PVRSRV_MEMALLOCFLAGS_PERMAPPINGFLAGSMASK is not a subset of PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK
+#endif
+
+
+/*!
+ Flags that affect _physical allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_PHYSICAL_MASK (PVRSRV_MEMALLOCFLAGS_CPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_CPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_CLEAN | \
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC | \
+ PVRSRV_MEMALLOCFLAG_POISON_ON_FREE)
+
+/*!
+ Flags that affect _virtual allocations_ in the DevMemX API
+ */
+#define PVRSRV_MEMALLOCFLAGS_DEVMEMX_VIRTUAL_MASK (PVRSRV_MEMALLOCFLAGS_GPU_MMUFLAGSMASK | \
+ PVRSRV_MEMALLOCFLAG_GPU_READ_PERMITTED | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITE_PERMITTED)
+
+#endif /* #ifndef PVRSRV_MEMALLOCFLAGS_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.c b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.c
new file mode 100644
index 00000000000000..3889912e694a9a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.c
@@ -0,0 +1,266 @@
+/**************************************************************************/ /*!
+@File
+@Title Services pool implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides a generic pool implementation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "lock.h"
+#include "dllist.h"
+#include "allocmem.h"
+
+struct _PVRSRV_POOL_
+{
+ POS_LOCK hLock;
+ /* total max number of permitted entries in the pool */
+ IMG_UINT uiMaxEntries;
+ /* currently number of pool entries created. these may be in the pool
+ * or in-use
+ */
+ IMG_UINT uiNumBusy;
+ /* number of not-in-use entries currently free in the pool */
+ IMG_UINT uiNumFree;
+
+ DLLIST_NODE sFreeList;
+
+ const IMG_CHAR *pszName;
+
+ PVRSRV_POOL_ALLOC_FUNC *pfnAlloc;
+ PVRSRV_POOL_FREE_FUNC *pfnFree;
+ void *pvPrivData;
+};
+
+typedef struct _PVRSRV_POOL_ENTRY_
+{
+ DLLIST_NODE sNode;
+ void *pvData;
+} PVRSRV_POOL_ENTRY;
+
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+ PVRSRV_POOL_FREE_FUNC *pfnFree,
+ IMG_UINT32 ui32MaxEntries,
+ const IMG_CHAR *pszName,
+ void *pvPrivData,
+ PVRSRV_POOL **ppsPool)
+{
+ PVRSRV_POOL *psPool;
+ PVRSRV_ERROR eError;
+
+ psPool = OSAllocMem(sizeof(PVRSRV_POOL));
+
+ if (psPool == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_alloc;
+ }
+
+ eError = OSLockCreate(&psPool->hLock, LOCK_TYPE_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto err_lock_create;
+ }
+
+ psPool->uiMaxEntries = ui32MaxEntries;
+ psPool->uiNumBusy = 0;
+ psPool->uiNumFree = 0;
+ psPool->pfnAlloc = pfnAlloc;
+ psPool->pfnFree = pfnFree;
+ psPool->pvPrivData = pvPrivData;
+ psPool->pszName = pszName;
+
+ dllist_init(&psPool->sFreeList);
+
+ *ppsPool = psPool;
+
+ return PVRSRV_OK;
+
+err_lock_create:
+ OSFreeMem(psPool);
+err_alloc:
+ return eError;
+}
+
+static PVRSRV_ERROR _DestroyPoolEntry(PVRSRV_POOL *psPool,
+ PVRSRV_POOL_ENTRY *psEntry)
+{
+ psPool->pfnFree(psPool->pvPrivData, psEntry->pvData);
+ OSFreeMem(psEntry);
+
+ return PVRSRV_OK;
+}
+
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool)
+{
+ if (psPool->uiNumBusy != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Attempt to destroy pool %s "
+ "with %u entries still in use",
+ __func__,
+ psPool->pszName,
+ psPool->uiNumBusy));
+ return;
+ }
+
+ OSLockDestroy(psPool->hLock);
+
+ while (psPool->uiNumFree)
+ {
+ PVRSRV_POOL_ENTRY *psEntry;
+ DLLIST_NODE *psChosenNode;
+
+ psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+ dllist_remove_node(psChosenNode);
+
+ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+ _DestroyPoolEntry(psPool, psEntry);
+
+ psPool->uiNumFree--;
+ }
+
+ OSFreeMem(psPool);
+}
+
+static PVRSRV_ERROR _CreateNewPoolEntry(PVRSRV_POOL *psPool,
+ PVRSRV_POOL_ENTRY **ppsEntry)
+{
+ PVRSRV_POOL_ENTRY *psNewEntry;
+ PVRSRV_ERROR eError;
+
+ psNewEntry = OSAllocMem(sizeof(PVRSRV_POOL_ENTRY));
+
+ if (psNewEntry == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_allocmem;
+ }
+
+ dllist_init(&psNewEntry->sNode);
+
+ eError = psPool->pfnAlloc(psPool->pvPrivData, &psNewEntry->pvData);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto err_pfn_alloc;
+ }
+
+ *ppsEntry = psNewEntry;
+
+ return PVRSRV_OK;
+
+err_pfn_alloc:
+ OSFreeMem(psNewEntry);
+err_allocmem:
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+ PVRSRV_POOL_TOKEN *hToken,
+ void **ppvDataOut)
+{
+ PVRSRV_POOL_ENTRY *psEntry;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ OSLockAcquire(psPool->hLock);
+
+ /* check if we already have a free element ready */
+ if (psPool->uiNumFree)
+ {
+ DLLIST_NODE *psChosenNode;
+ psChosenNode = dllist_get_next_node(&psPool->sFreeList);
+ dllist_remove_node(psChosenNode);
+
+ psEntry = IMG_CONTAINER_OF(psChosenNode, PVRSRV_POOL_ENTRY, sNode);
+
+ psPool->uiNumFree--;
+ }
+ else
+ {
+ /* no available elements in the pool. try to create one */
+
+ eError = _CreateNewPoolEntry(psPool, &psEntry);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto out_unlock;
+ }
+ }
+
+ psPool->uiNumBusy++;
+ *hToken = psEntry;
+ *ppvDataOut = psEntry->pvData;
+
+out_unlock:
+ OSLockRelease(psPool->hLock);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool, PVRSRV_POOL_TOKEN hToken)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_POOL_ENTRY *psEntry = hToken;
+
+ PVR_ASSERT(psPool->uiNumBusy > 0);
+
+ OSLockAcquire(psPool->hLock);
+
+ /* put this entry in the pool if the pool has space,
+ * otherwise free it
+ */
+ if (psPool->uiNumFree < psPool->uiMaxEntries)
+ {
+ dllist_add_to_tail(&psPool->sFreeList, &psEntry->sNode);
+ psPool->uiNumFree++;
+ }
+ else
+ {
+ eError = _DestroyPoolEntry(psPool, psEntry);
+ }
+
+ psPool->uiNumBusy--;
+
+ OSLockRelease(psPool->hLock);
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.h
new file mode 100644
index 00000000000000..71a204fe4e5137
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_pool.h
@@ -0,0 +1,135 @@
+/**************************************************************************/ /*!
+@File
+@Title Services pool implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides a generic pool implementation.
+ The pool allows to dynamically retrieve and return entries from
+ it using functions pair PVRSRVPoolGet/PVRSRVPoolPut. The entries
+ are created in lazy manner which means not until first usage.
+ The pool API allows to pass and allocation/free functions
+ pair that will allocate entry's private data and return it
+ to the caller on every entry 'Get'.
+ The pool will keep up to ui32MaxEntries entries allocated.
+ Every entry that exceeds this number and is 'Put' back to the
+ pool will be freed on the spot instead being returned to the
+ pool.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__PVRSRVPOOL_H__)
+#define __PVRSRVPOOL_H__
+
+/**************************************************************************/ /*!
+ @Description Callback function called during creation of the new element. This
+ function allocates an object that will be stored in the pool.
+ The object can be retrieved from the pool by calling
+ PVRSRVPoolGet.
+ @Input pvPrivData Private data passed to the alloc function.
+ @Output pvOut Allocated object.
+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+typedef PVRSRV_ERROR (PVRSRV_POOL_ALLOC_FUNC)(void *pvPrivData, void **pvOut);
+
+/**************************************************************************/ /*!
+ @Description Callback function called to free the object allocated by
+ the counterpart alloc function.
+ @Input pvPrivData Private data passed to the free function.
+ @Output pvFreeData Object allocated by PVRSRV_POOL_ALLOC_FUNC.
+*/ /***************************************************************************/
+typedef void (PVRSRV_POOL_FREE_FUNC)(void *pvPrivData, void *pvFreeData);
+
+typedef IMG_HANDLE PVRSRV_POOL_TOKEN;
+
+typedef struct _PVRSRV_POOL_ PVRSRV_POOL;
+
+/**************************************************************************/ /*!
+ @Function PVRSRVPoolCreate
+ @Description Creates new buffer pool.
+ @Input pfnAlloc Allocation function pointer. Function is used
+ to allocate new pool entries' data.
+ @Input pfnFree Free function pointer. Function is used to
+ free memory allocated by pfnAlloc function.
+ @Input ui32MaxEntries Total maximum number of entries in the pool.
+ @Input pszName Name of the pool. String has to be NULL
+ terminated.
+ @Input pvPrivData Private data that will be passed to pfnAlloc and
+ pfnFree functions.
+ @Output ppsPool New buffer pool object.
+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolCreate(PVRSRV_POOL_ALLOC_FUNC *pfnAlloc,
+ PVRSRV_POOL_FREE_FUNC *pfnFree,
+ IMG_UINT32 ui32MaxEntries,
+ const IMG_CHAR *pszName,
+ void *pvPrivData,
+ PVRSRV_POOL **ppsPool);
+
+/**************************************************************************/ /*!
+ @Function PVRSRVPoolDestroy
+ @Description Destroys pool created by PVRSRVPoolCreate.
+ @Input psPool Buffer pool object meant to be destroyed.
+*/ /***************************************************************************/
+void PVRSRVPoolDestroy(PVRSRV_POOL *psPool);
+
+/**************************************************************************/ /*!
+ @Function PVRSRVPoolGet
+ @Description Retrieves an entry form a pool. If no free elements are
+ available new entry will be allocated.
+ @Input psPool Pointer to the pool.
+ @Output hToken Pointer to the entry handle.
+ @Output ppvDataOut Pointer to data stored in the entry (the data
+ allocated by the pfnAlloc function).
+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolGet(PVRSRV_POOL *psPool,
+ PVRSRV_POOL_TOKEN *hToken,
+ void **ppvDataOut);
+
+/**************************************************************************/ /*!
+ @Function PVRSRVPoolPut
+ @Description Returns entry to the pool. If number of entries is greater
+ than ui32MaxEntries set during pool creation the entry will
+ be freed instead.
+ @Input psPool Pointer to the pool.
+ @Input hToken Entry handle.
+ @Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR PVRSRVPoolPut(PVRSRV_POOL *psPool,
+ PVRSRV_POOL_TOKEN hToken);
+
+#endif /* __PVRSRVPOOL_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_km.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_km.h
new file mode 100644
index 00000000000000..50dbc0959f47c7
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_km.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR synchronization interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Types for server side code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef PVRSRV_SYNC_KM_H
+#define PVRSRV_SYNC_KM_H
+
+#include <powervr/pvrsrv_sync_ext.h>
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* By default, fence-sync module emits into HWPerf (of course, if enabled) and
+ * considers a process (sleepable) context */
+#define PVRSRV_FENCE_FLAG_NONE (0U)
+#define PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT (1U << 0)
+#define PVRSRV_FENCE_FLAG_CTX_ATOMIC (1U << 1)
+
+/*! Possible states for a PVRSRV_FENCE */
+typedef enum
+{
+ PVRSRV_FENCE_NOT_SIGNALLED, /*!< fence has not yet signalled (not all components have signalled) */
+ PVRSRV_FENCE_SIGNALLED /*!< fence has signalled (all components have signalled/errored) */
+} PVRSRV_FENCE_STATE;
+
+/* Typedefs for opaque pointers to implementation-specific structures
+ */
+typedef void *SYNC_TIMELINE_OBJ;
+typedef void *SYNC_FENCE_OBJ;
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* PVRSRV_SYNC_KM_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_server.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_server.h
new file mode 100644
index 00000000000000..e31c98a54e2e34
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_sync_server.h
@@ -0,0 +1,78 @@
+/**************************************************************************/ /*!
+@File
+@Title Software sync interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _PVRSRV_SYNC_SERVER_H_
+#define _PVRSRV_SYNC_SERVER_H_
+
+#include "img_types.h"
+#include "pvrsrv_sync_km.h"
+
+#define SYNC_SW_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_SW_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/* */
+/* SW TIMELINE SPECIFIC FUNCTIONS */
+/* */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncSWTimelineFenceCreateKM(PVRSRV_TIMELINE iSWTimeline,
+ IMG_UINT32 ui32NextSyncPtVal,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_FENCE *piOutputFence);
+
+PVRSRV_ERROR SyncSWTimelineAdvanceKM(SYNC_TIMELINE_OBJ pvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWTimelineReleaseKM(SYNC_TIMELINE_OBJ pvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWTimelineFenceReleaseKM(SYNC_FENCE_OBJ pvSWFenceObj);
+
+PVRSRV_ERROR SyncSWTimelineFenceWaitKM(SYNC_FENCE_OBJ pvSWFenceObj,
+ IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncSWGetTimelineObj(PVRSRV_TIMELINE iSWTimeline,
+ SYNC_TIMELINE_OBJ *ppvSWTimelineObj);
+
+PVRSRV_ERROR SyncSWGetFenceObj(PVRSRV_FENCE iSWFence,
+ SYNC_FENCE_OBJ *ppvSWFenceObj);
+
+#endif /* _PVRSRV_SYNC_SERVER_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlcommon.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlcommon.h
new file mode 100644
index 00000000000000..0e034a5f82f7bc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlcommon.h
@@ -0,0 +1,240 @@
+/*************************************************************************/ /*!
+@File
+@Title Services Transport Layer common types and definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common types and definitions included into
+ both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __PVR_TLCOMMON_H__
+#define __PVR_TLCOMMON_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+
+/*! Handle type for stream descriptor objects as created by this API */
+typedef IMG_HANDLE PVRSRVTL_SD;
+
+/*! Maximum stream name length including the null byte */
+#define PRVSRVTL_MAX_STREAM_NAME_SIZE 40U
+
+/*! Packet lengths are always rounded up to a multiple of 8 bytes */
+#define PVRSRVTL_PACKET_ALIGNMENT 8U
+#define PVRSRVTL_ALIGN(x) ((x+PVRSRVTL_PACKET_ALIGNMENT-1) & ~(PVRSRVTL_PACKET_ALIGNMENT-1))
+
+
+/*! A packet is made up of a header structure followed by the data bytes.
+ * There are 3 types of packet: normal (has data), data lost and padding,
+ * see packet flags. Header kept small to reduce data overhead.
+ *
+ * if the ORDER of the structure members is changed, please UPDATE the
+ * PVRSRVTL_PACKET_FLAG_OFFSET macro.
+ *
+ * Layout of uiTypeSize member is :
+ *
+ * |<---------------------------32-bits------------------------------>|
+ * |<----8---->|<-----1----->|<----7--->|<------------16------------->|
+ * | Type | Drop-Oldest | UNUSED | Size |
+ *
+ */
+typedef struct _PVRSRVTL_PACKETHDR_
+{
+ IMG_UINT32 uiTypeSize; /*!< Type, Drop-Oldest flag & number of bytes following header */
+ IMG_UINT32 uiReserved; /*!< Reserve, packets and data must be 8 byte aligned */
+
+ /* First bytes of TL packet data follow header ... */
+} PVRSRVTL_PACKETHDR, *PVRSRVTL_PPACKETHDR;
+
+/* Structure must always be a size multiple of 8 as stream buffer
+ * still an array of IMG_UINT32s.
+ */
+static_assert((sizeof(PVRSRVTL_PACKETHDR) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(PVRSRVTL_PACKETHDR) must be a multiple of 8");
+
+/*! Packet header reserved word fingerprint "TLP1" */
+#define PVRSRVTL_PACKETHDR_RESERVED 0x31504C54U
+
+/*! Packet header mask used to extract the size from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_SIZE_MASK 0x0000FFFFU
+#define PVRSRVTL_MAX_PACKET_SIZE (PVRSRVTL_PACKETHDR_SIZE_MASK & ~0xFU)
+
+
+/*! Packet header mask used to extract the type from the uiTypeSize member.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_TYPE_MASK 0xFF000000U
+#define PVRSRVTL_PACKETHDR_TYPE_OFFSET 24U
+
+/*! Packet header mask used to check if packets before this one were dropped or not.
+ * Do not use directly, see GET macros.
+ */
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK 0x00800000U
+#define PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET 23U
+
+/*! Packet type enumeration.
+ */
+typedef enum _PVRSRVTL_PACKETTYPE_
+{
+ /*! Undefined packet */
+ PVRSRVTL_PACKETTYPE_UNDEF = 0,
+
+ /*! Normal packet type. Indicates data follows the header.
+ */
+ PVRSRVTL_PACKETTYPE_DATA = 1,
+
+ /*! When seen this packet type indicates that at this moment in the stream
+ * packet(s) were not able to be accepted due to space constraints and that
+ * recent data may be lost - depends on how the producer handles the
+ * error. Such packets have no data, data length is 0.
+ */
+ PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED = 2,
+
+ /*! Packets with this type set are padding packets that contain undefined
+ * data and must be ignored/skipped by the client. They are used when the
+ * circular stream buffer wraps around and there is not enough space for
+ * the data at the end of the buffer. Such packets have a length of 0 or
+ * more.
+ */
+ PVRSRVTL_PACKETTYPE_PADDING = 3,
+
+ /*! This packet type conveys to the stream consumer that the stream producer
+ * has reached the end of data for that data sequence. The TLDaemon
+ * has several options for processing these packets that can be selected
+ * on a per stream basis.
+ */
+ PVRSRVTL_PACKETTYPE_MARKER_EOS = 4,
+
+ /*! Packet emitted on first stream opened by writer. Packet carries a name
+ * of the opened stream in a form of null-terminated string.
+ */
+ PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE = 5,
+
+ /*! Packet emitted on last stream closed by writer. Packet carries a name
+ * of the closed stream in a form of null-terminated string.
+ */
+ PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE = 6,
+
+ PVRSRVTL_PACKETTYPE_LAST
+} PVRSRVTL_PACKETTYPE;
+
+/* The SET_PACKET_* macros rely on the order the PVRSRVTL_PACKETHDR members are declared:
+ * uiFlags is the upper half of a structure consisting of 2 uint16 quantities.
+ */
+#define PVRSRVTL_SET_PACKET_DATA(len) (len) | (PVRSRVTL_PACKETTYPE_DATA << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_PADDING(len) (len) | (PVRSRVTL_PACKETTYPE_PADDING << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_WRITE_FAILED (0) | (PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+#define PVRSRVTL_SET_PACKET_HDR(len,type) (len) | ((type) << PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Returns the number of bytes of data in the packet. p may be any address type
+ * */
+#define GET_PACKET_DATA_LEN(p) \
+ ((IMG_UINT32) ((PVRSRVTL_PPACKETHDR)(p))->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK)
+
+
+/*! Returns a IMG_BYTE* pointer to the first byte of data in the packet */
+#define GET_PACKET_DATA_PTR(p) \
+ ((IMG_PBYTE) ( ((size_t)p) + sizeof(PVRSRVTL_PACKETHDR)) )
+
+/*! Given a PVRSRVTL_PPACKETHDR address, return the address of the next pack
+ * It is up to the caller to determine if the new address is within the packet
+ * buffer.
+ */
+#define GET_NEXT_PACKET_ADDR(p) \
+ ((PVRSRVTL_PPACKETHDR) ( ((IMG_UINT8 *)p) + sizeof(PVRSRVTL_PACKETHDR) + \
+ (((((PVRSRVTL_PPACKETHDR)p)->uiTypeSize & PVRSRVTL_PACKETHDR_SIZE_MASK) + \
+ (PVRSRVTL_PACKET_ALIGNMENT-1)) & (~(PVRSRVTL_PACKET_ALIGNMENT-1)) ) ))
+
+/*! Turns the packet address p into a PVRSRVTL_PPACKETHDR pointer type
+ */
+#define GET_PACKET_HDR(p) ((PVRSRVTL_PPACKETHDR)(p))
+
+/*! Get the type of the packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define GET_PACKET_TYPE(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_TYPE_MASK)>>PVRSRVTL_PACKETHDR_TYPE_OFFSET)
+
+/*! Set PACKETS_DROPPED flag in packet header as a part of uiTypeSize. p is of type PVRSRVTL_PPACKETHDR.
+ */
+#define SET_PACKETS_DROPPED(p) (((p)->uiTypeSize) | (1<<PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET))
+
+/*! Check if packets were dropped before this packet. p is of type PVRSRVTL_PPACKETHDR
+ */
+#define CHECK_PACKETS_DROPPED(p) (((p)->uiTypeSize & PVRSRVTL_PACKETHDR_OLDEST_DROPPED_MASK)>>PVRSRVTL_PACKETHDR_OLDEST_DROPPED_OFFSET)
+
+/*! Flags for use with PVRSRVTLOpenStream
+ * 0x01 - Do not block in PVRSRVTLAcquireData() when no bytes are available
+ * 0x02 - When the stream does not exist wait for a bit (2s) in
+ * PVRSRVTLOpenStream() and then exit with a timeout error if it still
+ * does not exist.
+ * 0x04 - Open stream for write only operations.
+ * If flag is not used stream is opened as read-only. This flag is
+ * required if one wants to call reserve/commit/write function on the
+ * stream descriptor. Read from on the stream descriptor opened
+ * with this flag will fail.
+ * 0x08 - Disable Producer Callback.
+ * If this flag is set and the stream becomes empty, do not call any
+ * associated producer callback to generate more data from the reader
+ * context.
+ * 0x10 - Reset stream on open.
+ * When this flag is used the stream will drop all of the stored data.
+ * 0x40 - Ignore Open Callback.
+ * When this flag is set ignore any OnReaderOpenCallback setting for
+ * the stream. This allows access to the stream to be made without
+ * generating any extra packets into the stream.
+ */
+#define PVRSRV_STREAM_FLAG_NONE (0U)
+#define PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING (1U<<0)
+#define PVRSRV_STREAM_FLAG_OPEN_WAIT (1U<<1)
+#define PVRSRV_STREAM_FLAG_OPEN_WO (1U<<2)
+#define PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK (1U<<3)
+#define PVRSRV_STREAM_FLAG_RESET_ON_OPEN (1U<<4)
+#define PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK (1U<<6)
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __PVR_TLCOMMON_H__ */
+/******************************************************************************
+ End of file (pvrsrv_tlcommon.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlstreams.h b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlstreams.h
new file mode 100644
index 00000000000000..a4ead13899e64c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrv_tlstreams.h
@@ -0,0 +1,62 @@
+/*************************************************************************/ /*!
+@File
+@Title Services Transport Layer stream names
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common types and definitions included into
+ both user mode and kernel mode source.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRSRV_TLSTREAMS_H_
+#define _PVRSRV_TLSTREAMS_H_
+
+#define PVRSRV_TL_CTLR_STREAM "tlctrl"
+
+#define PVRSRV_TL_HWPERF_RGX_FW_STREAM "hwperf_fw_"
+#define PVRSRV_TL_HWPERF_HOST_SERVER_STREAM "hwperf_host_"
+
+/* Host HWPerf client stream names are of the form 'hwperf_client_<pid>' */
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM "hwperf_client_"
+#define PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC "hwperf_client_%u_%u"
+
+#endif /* _PVRSRV_TLSTREAMS_H_ */
+
+/******************************************************************************
+ End of file (pvrsrv_tlstreams.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrsrvkm.mk b/drivers/gpu/drm/img-rogue/1.10/pvrsrvkm.mk
new file mode 100644
index 00000000000000..11341a9c01e0b8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrsrvkm.mk
@@ -0,0 +1,166 @@
+pvrsrvkm_1_10-y += \
+ server_breakpoint_bridge.o \
+ client_cache_direct_bridge.o \
+ server_cache_bridge.o \
+ server_cmm_bridge.o \
+ server_debugmisc_bridge.o \
+ server_dmabuf_bridge.o \
+ client_htbuffer_direct_bridge.o \
+ server_htbuffer_bridge.o \
+ client_mm_direct_bridge.o \
+ server_mm_bridge.o \
+ client_pvrtl_direct_bridge.o \
+ server_pvrtl_bridge.o \
+ server_regconfig_bridge.o \
+ server_rgxcmp_bridge.o \
+ server_rgxhwperf_bridge.o \
+ server_rgxkicksync_bridge.o \
+ server_rgxray_bridge.o \
+ server_rgxsignals_bridge.o \
+ server_rgxta3d_bridge.o \
+ server_rgxtq2_bridge.o \
+ server_rgxtq_bridge.o \
+ server_srvcore_bridge.o \
+ client_sync_direct_bridge.o \
+ server_sync_bridge.o \
+ server_timerquery_bridge.o \
+ pvr_buffer_sync.o \
+ pvr_drm.o \
+ pvr_fence.o \
+ pvr_platform_drv.o \
+ cache_km.o \
+ connection_server.o \
+ devicemem_heapcfg.o \
+ devicemem_server.o \
+ handle.o \
+ htbserver.o \
+ info_page_km.o \
+ lists.o \
+ mmu_common.o \
+ physheap.o \
+ physmem.o \
+ physmem_hostmem.o \
+ physmem_lma.o \
+ physmem_tdsecbuf.o \
+ pmr.o \
+ power.o \
+ process_stats.o \
+ pvr_notifier.o \
+ pvrsrv.o \
+ pvrsrv_bridge_init.o \
+ pvrsrv_pool.o \
+ srvcore.o \
+ sync_checkpoint.o \
+ sync_server.o \
+ tlintern.o \
+ tlserver.o \
+ tlstream.o \
+ debugmisc_server.o \
+ rgxfwload.o \
+ rgxbreakpoint.o \
+ rgxbvnc.o \
+ rgxccb.o \
+ rgxcompute.o \
+ rgxdebug.o \
+ rgxfwimageutils.o \
+ rgxfwutils.o \
+ rgxhwperf.o \
+ rgxinit.o \
+ rgxkicksync.o \
+ rgxlayer_impl.o \
+ rgxmem.o \
+ rgxmipsmmuinit.o \
+ rgxmmuinit.o \
+ rgxpower.o \
+ rgxray.o \
+ rgxregconfig.o \
+ rgxsignals.o \
+ rgxsrvinit.o \
+ rgxstartstop.o \
+ rgxsyncutils.o \
+ rgxta3d.o \
+ rgxtdmtransfer.o \
+ rgxtimecorr.o \
+ rgxtimerquery.o \
+ rgxtransfer.o \
+ rgxutils.o \
+ allocmem.o \
+ event.o \
+ handle_idr.o \
+ htb_debug.o \
+ km_apphint.o \
+ module_common.o \
+ osconnection_server.o \
+ osfunc.o \
+ osmmap_stub.o \
+ pdump.o \
+ physmem_dmabuf.o \
+ physmem_osmem_linux.o \
+ pmr_os.o \
+ pvr_bridge_k.o \
+ pvr_debug.o \
+ pvr_debugfs.o \
+ pvr_dvfs_device.o \
+ pvr_gputrace.o \
+ devicemem.o \
+ devicemem_utils.o \
+ hash.o \
+ htbuffer.o \
+ mem_utils.o \
+ ra.o \
+ sync.o \
+ tlclient.o \
+ uniq_key_splay_tree.o \
+ rgx_compat_bvnc.o \
+ rgx_hwperf_table.o \
+ system/dma_support.o \
+ system/vmm_pvz_client.o \
+ system/vmm_pvz_server.o \
+ system/vmm_type_stub.o \
+ system/vz_physheap_common.o \
+ system/vz_physheap_generic.o \
+ system/vz_support.o \
+ system/vz_vmm_pvz.o \
+ system/vz_vmm_vm.o
+pvrsrvkm_1_10-$(CONFIG_DRM_POWERVR_ROGUE_DEBUG) += \
+ client_devicememhistory_direct_bridge.o \
+ server_devicememhistory_bridge.o \
+ client_ri_direct_bridge.o \
+ server_ri_bridge.o \
+ client_synctracking_direct_bridge.o \
+ server_synctracking_bridge.o \
+ devicemem_history_server.o \
+ ri_server.o
+pvrsrvkm_1_10-$(CONFIG_DRM_POWERVR_ROGUE_PDUMP) += \
+ client_pdump_direct_bridge.o \
+ server_pdump_bridge.o \
+ client_pdumpctrl_direct_bridge.o \
+ server_pdumpctrl_bridge.o \
+ client_pdumpmm_direct_bridge.o \
+ server_pdumpmm_bridge.o \
+ client_rgxpdump_direct_bridge.o \
+ server_rgxpdump_bridge.o \
+ pdump_common.o \
+ pdump_mmu.o \
+ pdump_physmem.o \
+ rgxpdump.o \
+ devicemem_pdump.o \
+ devicememx_pdump.o \
+ dbgdriv.o \
+ dbgdriv_handle.o \
+ ioctl.o \
+ hostfunc.o \
+ main.o
+ifneq ($(CONFIG_DRM_POWERVR_ROGUE_PDUMP),y)
+pvrsrvkm_1_10-y += \
+ pvr_sync_file.o \
+ pvr_counting_timeline.o \
+ pvr_sw_fence.o \
+ pvr_fence.o \
+ dma_fence_sync_native_server.o
+endif
+pvrsrvkm_1_10-$(CONFIG_ARM) += osfunc_arm.o
+pvrsrvkm_1_10-$(CONFIG_ARM64) += osfunc_arm64.o
+pvrsrvkm_1_10-$(CONFIG_EVENT_TRACING) += trace_events.o
+pvrsrvkm_1_10-$(CONFIG_MIPS) += osfunc_mips.o
+pvrsrvkm_1_10-$(CONFIG_X86) += osfunc_x86.o
diff --git a/drivers/gpu/drm/img-rogue/1.10/pvrversion.h b/drivers/gpu/drm/img-rogue/1.10/pvrversion.h
new file mode 100644
index 00000000000000..fb4cbe2b6b8943
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/pvrversion.h
@@ -0,0 +1,72 @@
+/*************************************************************************/ /*!
+@File
+@Title Version numbers and strings.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Version numbers and strings for PVR Consumer services
+ components.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _PVRVERSION_H_
+#define _PVRVERSION_H_
+
+#define PVR_STR(X) #X
+#define PVR_STR2(X) PVR_STR(X)
+
+#define PVRVERSION_MAJ 1
+#define PVRVERSION_MIN 10
+
+#define PVRVERSION_FAMILY "rogueddk"
+#define PVRVERSION_BRANCHNAME "1.10"
+#define PVRVERSION_BUILD 5221057
+#define PVRVERSION_BSCONTROL "Rogue_DDK_ChromiumOS"
+
+#define PVRVERSION_STRING "Rogue_DDK_ChromiumOS rogueddk 1.10@" PVR_STR2(PVRVERSION_BUILD)
+#define PVRVERSION_STRING_SHORT "1.10@" PVR_STR2(PVRVERSION_BUILD) ""
+
+#define COPYRIGHT_TXT "Copyright (c) Imagination Technologies Ltd. All Rights Reserved."
+
+#define PVRVERSION_BUILD_HI 522
+#define PVRVERSION_BUILD_LO 1057
+#define PVRVERSION_STRING_NUMERIC PVR_STR2(PVRVERSION_MAJ) "." PVR_STR2(PVRVERSION_MIN) "." PVR_STR2(PVRVERSION_BUILD_HI) "." PVR_STR2(PVRVERSION_BUILD_LO)
+
+#define PVRVERSION_PACK(MAJ,MIN) ((((MAJ)&0xFFFF) << 16) | (((MIN)&0xFFFF) << 0))
+#define PVRVERSION_UNPACK_MAJ(VERSION) (((VERSION) >> 16) & 0xFFFF)
+#define PVRVERSION_UNPACK_MIN(VERSION) (((VERSION) >> 0) & 0xFFFF)
+
+#endif /* _PVRVERSION_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/ra.c b/drivers/gpu/drm/img-rogue/1.10/ra.c
new file mode 100644
index 00000000000000..eb71401cc7ecd3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ra.c
@@ -0,0 +1,1388 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Allocator
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+@Description
+ Implements generic resource allocation. The resource
+ allocator was originally intended to manage address spaces. In
+ practice the resource allocator is generic and can manage arbitrary
+ sets of integers.
+
+ Resources are allocated from arenas. Arena's can be created with an
+ initial span of resources. Further resources spans can be added to
+ arenas. A call back mechanism allows an arena to request further
+ resource spans on demand.
+
+ Each arena maintains an ordered list of resource segments each
+ described by a boundary tag. Each boundary tag describes a segment
+ of resources which are either 'free', available for allocation, or
+ 'busy' currently allocated. Adjacent 'free' segments are always
+ coallesced to avoid fragmentation.
+
+ For allocation, all 'free' segments are kept on lists of 'free'
+ segments in a table index by pvr_log2(segment size). ie Each table index
+ n holds 'free' segments in the size range 2^n -> 2^(n+1) - 1.
+
+ Allocation policy is based on an *almost* good fit strategy.
+
+ Allocated segments are inserted into a self scaling hash table which
+ maps the base resource of the span to the relevant boundary
+ tag. This allows the code to get back to the bounary tag without
+ exporting explicit boundary tag references through the API.
+
+ Each arena has an associated quantum size, all allocations from the
+ arena are made in multiples of the basic quantum.
+
+ On resource exhaustion in an arena, a callback if provided will be
+ used to request further resources. Resouces spans allocated by the
+ callback mechanism will be returned when freed (through one of the
+ two callbacks).
+*/ /**************************************************************************/
+
+/* Issues:
+ * - flags, flags are passed into the resource allocator but are not currently used.
+ * - determination, of import size, is currently braindead.
+ * - debug code should be moved out to own module and #ifdef'd
+ */
+
+#include "img_types.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "uniq_key_splay_tree.h"
+
+#include "hash.h"
+#include "ra.h"
+#include "pvrsrv_memallocflags.h"
+
+#include "osfunc.h"
+#include "allocmem.h"
+#include "lock.h"
+#include "pvr_intrinsics.h"
+
+/* The initial, and minimum size of the live address -> boundary tag
+ structure hash table. The value 64 is a fairly arbitrary
+ choice. The hash table resizes on demand so the value chosen is
+ not critical. */
+#define MINIMUM_HASH_SIZE (64)
+
+
+/* #define RA_VALIDATE */
+
+#if defined(__KLOCWORK__)
+ /* make sure Klocworks analyse all the code (including the debug one) */
+ #if !defined(RA_VALIDATE)
+ #define RA_VALIDATE
+ #endif
+#endif
+
+#if (!defined(PVRSRV_NEED_PVR_ASSERT)) || (!defined(RA_VALIDATE))
+ /* Disable the asserts unless explicitly told otherwise. They slow the driver
+ too much for other people */
+
+ #undef PVR_ASSERT
+ /* let's use a macro that really do not do anything when compiling in release
+ mode! */
+ #define PVR_ASSERT(x)
+#endif
+
+/* boundary tags, used to describe a resource segment */
+struct _BT_
+{
+ enum bt_type
+ {
+ btt_free, /* free resource segment */
+ btt_live /* allocated resource segment */
+ } type;
+
+ unsigned int is_leftmost;
+ unsigned int is_rightmost;
+ unsigned int free_import;
+
+ /* The base resource and extent of this segment */
+ RA_BASE_T base;
+ RA_LENGTH_T uSize;
+
+ /* doubly linked ordered list of all segments within the arena */
+ struct _BT_ *pNextSegment;
+ struct _BT_ *pPrevSegment;
+
+ /* doubly linked un-ordered list of free segments with the same flags. */
+ struct _BT_ * next_free;
+ struct _BT_ * prev_free;
+
+ /* a user reference associated with this span, user references are
+ * currently only provided in the callback mechanism */
+ IMG_HANDLE hPriv;
+
+ /* Flags to match on this span */
+ IMG_UINT32 uFlags;
+
+};
+typedef struct _BT_ BT;
+
+
+/* resource allocation arena */
+struct _RA_ARENA_
+{
+ /* arena name for diagnostics output */
+ IMG_CHAR *name;
+
+ /* allocations within this arena are quantum sized */
+ RA_LENGTH_T uQuantum;
+
+ /* import interface, if provided */
+ PVRSRV_ERROR (*pImportAlloc)(RA_PERARENA_HANDLE h,
+ RA_LENGTH_T uSize,
+ IMG_UINT32 uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv);
+ void (*pImportFree) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE hPriv);
+
+ /* arbitrary handle provided by arena owner to be passed into the
+ * import alloc and free hooks */
+ void *pImportHandle;
+
+ IMG_PSPLAY_TREE per_flags_buckets;
+
+ /* resource segment list */
+ BT *pHeadSegment;
+
+ /* segment address to boundary tag hash table */
+ HASH_TABLE *pSegmentHash;
+
+ /* Lock for this arena */
+ POS_LOCK hLock;
+
+ /* LockClass of this arena. This is used within lockdep to decide if a
+ * recursive call sequence with the same lock class is allowed or not. */
+ IMG_UINT32 ui32LockClass;
+
+ /* If TRUE, imports will not be split up. Allocations will always get their
+ * own import
+ */
+ IMG_BOOL bNoSplit;
+};
+
+/*************************************************************************/ /*!
+@Function _RequestAllocFail
+@Description Default callback allocator used if no callback is
+ specified, always fails to allocate further resources to the
+ arena.
+@Input _h - callback handle
+@Input _uSize - requested allocation size
+@Output _pActualSize - actual allocation size
+@Input _pRef - user reference
+@Input _uflags - allocation flags
+@Input _pBase - receives allocated base
+@Return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL, this function always fails to allocate.
+*/ /**************************************************************************/
+static PVRSRV_ERROR
+_RequestAllocFail (RA_PERARENA_HANDLE _h,
+ RA_LENGTH_T _uSize,
+ IMG_UINT32 _uFlags,
+ const IMG_CHAR *_pszAnnotation,
+ RA_BASE_T *_pBase,
+ RA_LENGTH_T *_pActualSize,
+ RA_PERISPAN_HANDLE *_phPriv)
+{
+ PVR_UNREFERENCED_PARAMETER (_h);
+ PVR_UNREFERENCED_PARAMETER (_uSize);
+ PVR_UNREFERENCED_PARAMETER (_pActualSize);
+ PVR_UNREFERENCED_PARAMETER (_phPriv);
+ PVR_UNREFERENCED_PARAMETER (_uFlags);
+ PVR_UNREFERENCED_PARAMETER (_pBase);
+ PVR_UNREFERENCED_PARAMETER (_pszAnnotation);
+
+ return PVRSRV_ERROR_RA_REQUEST_ALLOC_FAIL;
+}
+
+
+#if defined (PVR_CTZLL)
+ /* make sure to trigger an error if someone change the buckets or the bHasEltsMapping size
+ the bHasEltsMapping is used to quickly determine the smallest bucket containing elements.
+ therefore it must have at least as many bits has the buckets array have buckets. The RA
+ implementation actually uses one more bit. */
+ static_assert((sizeof(((IMG_PSPLAY_TREE) 0)->buckets) / sizeof(((IMG_PSPLAY_TREE) 0)->buckets[0]))
+ < 8 * sizeof(((IMG_PSPLAY_TREE) 0)->bHasEltsMapping),
+ "Too many buckets for bHasEltsMapping bitmap");
+#endif
+
+
+/*************************************************************************/ /*!
+@Function pvr_log2
+@Description Computes the floor of the log base 2 of a unsigned integer
+@Input n Unsigned integer
+@Return Floor(Log2(n))
+*/ /**************************************************************************/
+#if defined(PVR_CLZLL)
+/* make sure to trigger a problem if someone changes the RA_LENGTH_T type
+ indeed the __builtin_clzll is for unsigned long long variables.
+
+ if someone changes RA_LENGTH to unsigned long, then use __builtin_clzl
+ if it changes to unsigned int, use __builtin_clz
+
+ if it changes for something bigger than unsigned long long,
+ then revert the pvr_log2 to the classic implementation */
+static_assert(sizeof(RA_LENGTH_T) == sizeof(unsigned long long),
+ "RA log routines not tuned for sizeof(RA_LENGTH_T)");
+
+static inline IMG_UINT32 pvr_log2(RA_LENGTH_T n)
+{
+ PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+ return (8 * sizeof(RA_LENGTH_T)) - 1 - PVR_CLZLL(n);
+}
+#else
+static IMG_UINT32
+pvr_log2 (RA_LENGTH_T n)
+{
+ IMG_UINT32 l = 0;
+
+ PVR_ASSERT( n != 0 ); /* Log2 is not defined on 0 */
+
+ n>>=1;
+ while (n>0)
+ {
+ n>>=1;
+ l++;
+ }
+ return l;
+}
+#endif
+
+
+#if defined(RA_VALIDATE)
+/*************************************************************************/ /*!
+@Function _IsInSegmentList
+@Description Tests if a BT is in the segment list.
+@Input pArena The arena.
+@Input pBT The boundary tag to look for.
+@Return IMG_FALSE BT was not in the arena's segment list.
+ IMG_TRUE BT was in the arena's segment list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInSegmentList (RA_ARENA *pArena, BT *pBT)
+{
+ BT* pBTScan;
+
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (pBT != NULL);
+
+ /* Walk the segment list until we see the BT pointer... */
+ pBTScan = pArena->pHeadSegment;
+ while (pBTScan != NULL && pBTScan != pBT)
+ {
+ pBTScan = pBTScan->pNextSegment;
+ }
+
+ /* Test if we found it and then return */
+ return (pBTScan == pBT);
+}
+
+/*************************************************************************/ /*!
+@Function _IsInFreeList
+@Description Tests if a BT is in the free list.
+@Input pArena The arena.
+@Input pBT The boundary tag to look for.
+@Return IMG_FALSE BT was not in the arena's free list.
+ IMG_TRUE BT was in the arena's free list.
+*/ /**************************************************************************/
+static IMG_BOOL
+_IsInFreeList (RA_ARENA *pArena, BT *pBT)
+{
+ BT* pBTScan;
+ IMG_UINT32 uIndex;
+
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (pBT != NULL);
+
+ /* Look for the free list that holds BTs of this size... */
+ uIndex = pvr_log2 (pBT->uSize);
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->flags != pBT->uFlags))
+ {
+ return 0;
+ }
+ else
+ {
+ pBTScan = pArena->per_flags_buckets->buckets[uIndex];
+ while (pBTScan != NULL && pBTScan != pBT)
+ {
+ pBTScan = pBTScan->next_free;
+ }
+
+ /* Test if we found it and then return */
+ return (pBTScan == pBT);
+ }
+}
+
+/* is_arena_valid should only be used in debug mode.
+ it checks that some properties an arena must have are verified */
+static int is_arena_valid(struct _RA_ARENA_ * arena)
+{
+ struct _BT_ * chunk;
+#if defined(PVR_CTZLL)
+ unsigned int i;
+#endif
+
+ for (chunk = arena->pHeadSegment; chunk != NULL; chunk = chunk->pNextSegment)
+ {
+ /* if next segment is NULL, then it must be a rightmost */
+ PVR_ASSERT((chunk->pNextSegment != NULL) || (chunk->is_rightmost));
+ /* if prev segment is NULL, then it must be a leftmost */
+ PVR_ASSERT((chunk->pPrevSegment != NULL) || (chunk->is_leftmost));
+
+ if (chunk->type == btt_free)
+ {
+ /* checks the correctness of the type field */
+ PVR_ASSERT(_IsInFreeList(arena, chunk));
+
+ /* check that there can't be two consecutive free chunks.
+ Indeed, instead of having two consecutive free chunks,
+ there should be only one that span the size of the two. */
+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->type != btt_free));
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->pNextSegment->type != btt_free));
+ }
+ else
+ {
+ /* checks the correctness of the type field */
+ PVR_ASSERT(!_IsInFreeList(arena, chunk));
+ }
+
+ PVR_ASSERT((chunk->is_leftmost) || (chunk->pPrevSegment->base + chunk->pPrevSegment->uSize == chunk->base));
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->base + chunk->uSize == chunk->pNextSegment->base));
+
+ /* all segments of the same imports must have the same flags ... */
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->uFlags == chunk->pNextSegment->uFlags));
+ /* ... and the same import handle */
+ PVR_ASSERT((chunk->is_rightmost) || (chunk->hPriv == chunk->pNextSegment->hPriv));
+
+
+ /* if a free chunk spans a whole import, then it must be an 'not to free import'.
+ Otherwise it should have been freed. */
+ PVR_ASSERT((!chunk->is_leftmost) || (!chunk->is_rightmost) || (chunk->type == btt_live) || (!chunk->free_import));
+ }
+
+#if defined(PVR_CTZLL)
+ if (arena->per_flags_buckets != NULL)
+ {
+ for (i = 0; i < FREE_TABLE_LIMIT; ++i)
+ {
+ /* verify that the bHasEltsMapping is correct for this flags bucket */
+ PVR_ASSERT(
+ ((arena->per_flags_buckets->buckets[i] == NULL) &&
+ (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) == 0)))
+ ||
+ ((arena->per_flags_buckets->buckets[i] != NULL) &&
+ (( (arena->per_flags_buckets->bHasEltsMapping & ((IMG_ELTS_MAPPINGS) 1 << i)) != 0)))
+ );
+ }
+ }
+#endif
+
+ /* if arena was not valid, one of the assert before should have triggered */
+ return 1;
+}
+#endif
+/*************************************************************************/ /*!
+@Function _SegmentListInsertAfter
+@Description Insert a boundary tag into an arena segment list after a
+ specified boundary tag.
+@Input pInsertionPoint The insertion point.
+@Input pBT The boundary tag to insert.
+@Return PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsertAfter (BT *pInsertionPoint,
+ BT *pBT)
+{
+ PVR_ASSERT (pBT != NULL);
+ PVR_ASSERT (pInsertionPoint != NULL);
+
+ pBT->pNextSegment = pInsertionPoint->pNextSegment;
+ pBT->pPrevSegment = pInsertionPoint;
+ if (pInsertionPoint->pNextSegment != NULL)
+ {
+ pInsertionPoint->pNextSegment->pPrevSegment = pBT;
+ }
+ pInsertionPoint->pNextSegment = pBT;
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function _SegmentListInsert
+@Description Insert a boundary tag into an arena segment list
+@Input pArena The arena.
+@Input pBT The boundary tag to insert.
+@Return PVRSRV_OK (doesn't fail)
+*/ /**************************************************************************/
+static INLINE PVRSRV_ERROR
+_SegmentListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_ASSERT (!_IsInSegmentList(pArena, pBT));
+
+ /* insert into the segment chain */
+ pBT->pNextSegment = pArena->pHeadSegment;
+ pArena->pHeadSegment = pBT;
+ if (pBT->pNextSegment != NULL)
+ {
+ pBT->pNextSegment->pPrevSegment = pBT;
+ }
+
+ pBT->pPrevSegment = NULL;
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function _SegmentListRemove
+@Description Remove a boundary tag from an arena segment list.
+@Input pArena The arena.
+@Input pBT The boundary tag to remove.
+*/ /**************************************************************************/
+static void
+_SegmentListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ PVR_ASSERT (_IsInSegmentList(pArena, pBT));
+
+ if (pBT->pPrevSegment == NULL)
+ pArena->pHeadSegment = pBT->pNextSegment;
+ else
+ pBT->pPrevSegment->pNextSegment = pBT->pNextSegment;
+
+ if (pBT->pNextSegment != NULL)
+ pBT->pNextSegment->pPrevSegment = pBT->pPrevSegment;
+}
+
+
+/*************************************************************************/ /*!
+@Function _BuildBT
+@Description Construct a boundary tag for a free segment.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resouce segment.
+@Input uFlags The flags to give to the boundary tag
+@Return Boundary tag or NULL
+*/ /**************************************************************************/
+static BT *
+_BuildBT (RA_BASE_T base, RA_LENGTH_T uSize, RA_FLAGS_T uFlags)
+{
+ BT *pBT;
+
+ pBT = OSAllocZMem(sizeof(BT));
+ if (pBT == NULL)
+ {
+ return NULL;
+ }
+
+ pBT->is_leftmost = 1;
+ pBT->is_rightmost = 1;
+ /* pBT->free_import = 0; */
+ pBT->type = btt_live;
+ pBT->base = base;
+ pBT->uSize = uSize;
+ pBT->uFlags = uFlags;
+
+ return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function _SegmentSplit
+@Description Split a segment into two, maintain the arena segment list. The
+ boundary tag should not be in the free table. Neither the
+ original or the new neighbour bounary tag will be in the free
+ table.
+@Input pBT The boundary tag to split.
+@Input uSize The required segment size of boundary tag after
+ splitting.
+@Return New neighbour boundary tag or NULL.
+*/ /**************************************************************************/
+static BT *
+_SegmentSplit (BT *pBT, RA_LENGTH_T uSize)
+{
+ BT *pNeighbour;
+
+ pNeighbour = _BuildBT(pBT->base + uSize, pBT->uSize - uSize, pBT->uFlags);
+ if (pNeighbour == NULL)
+ {
+ return NULL;
+ }
+
+ _SegmentListInsertAfter(pBT, pNeighbour);
+
+ pNeighbour->is_leftmost = 0;
+ pNeighbour->is_rightmost = pBT->is_rightmost;
+ pNeighbour->free_import = pBT->free_import;
+ pBT->is_rightmost = 0;
+ pNeighbour->hPriv = pBT->hPriv;
+ pBT->uSize = uSize;
+ pNeighbour->uFlags = pBT->uFlags;
+
+ return pNeighbour;
+}
+
+/*************************************************************************/ /*!
+@Function _FreeListInsert
+@Description Insert a boundary tag into an arena free table.
+@Input pArena The arena.
+@Input pBT The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListInsert (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+ pBT->type = btt_free;
+
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ /* the flags item in the splay tree must have been created before-hand by
+ _InsertResource */
+ PVR_ASSERT(pArena->per_flags_buckets != NULL);
+ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+ pBT->next_free = pArena->per_flags_buckets->buckets[uIndex];
+ if (pBT->next_free != NULL)
+ {
+ pBT->next_free->prev_free = pBT;
+ }
+ pBT->prev_free = NULL;
+ pArena->per_flags_buckets->buckets[uIndex] = pBT;
+
+#if defined(PVR_CTZLL)
+ /* tells that bucket[index] now contains elements */
+ pArena->per_flags_buckets->bHasEltsMapping |= ((IMG_ELTS_MAPPINGS) 1 << uIndex);
+#endif
+}
+
+/*************************************************************************/ /*!
+@Function _FreeListRemove
+@Description Remove a boundary tag from an arena free table.
+@Input pArena The arena.
+@Input pBT The boundary tag.
+*/ /**************************************************************************/
+static void
+_FreeListRemove (RA_ARENA *pArena, BT *pBT)
+{
+ IMG_UINT32 uIndex;
+ uIndex = pvr_log2 (pBT->uSize);
+
+ PVR_ASSERT (uIndex < FREE_TABLE_LIMIT);
+ PVR_ASSERT (_IsInFreeList(pArena, pBT));
+
+ if (pBT->next_free != NULL)
+ {
+ pBT->next_free->prev_free = pBT->prev_free;
+ }
+
+ if (pBT->prev_free != NULL)
+ {
+ pBT->prev_free->next_free = pBT->next_free;
+ }
+ else
+ {
+ pArena->per_flags_buckets = PVRSRVSplay(pBT->uFlags, pArena->per_flags_buckets);
+ /* the flags item in the splay tree must have already been created
+ (otherwise how could there be a segment with these flags */
+ PVR_ASSERT(pArena->per_flags_buckets != NULL);
+ PVR_ASSERT(pArena->per_flags_buckets->buckets != NULL);
+
+ pArena->per_flags_buckets->buckets[uIndex] = pBT->next_free;
+#if defined(PVR_CTZLL)
+ if (pArena->per_flags_buckets->buckets[uIndex] == NULL)
+ {
+ /* there is no more elements in this bucket. Update the mapping. */
+ pArena->per_flags_buckets->bHasEltsMapping &= ~((IMG_ELTS_MAPPINGS) 1 << uIndex);
+ }
+#endif
+ }
+
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+ pBT->type = btt_live;
+}
+
+
+/*************************************************************************/ /*!
+@Function _InsertResource
+@Description Add a free resource segment to an arena.
+@Input pArena The arena.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resource segment.
+@Input uFlags The flags of the new resources.
+@Return New bucket pointer
+ NULL on failure
+*/ /**************************************************************************/
+static BT *
+_InsertResource (RA_ARENA *pArena, RA_BASE_T base, RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags)
+{
+ BT *pBT;
+ PVR_ASSERT (pArena!=NULL);
+
+ pBT = _BuildBT (base, uSize, uFlags);
+
+ if (pBT != NULL)
+ {
+ IMG_PSPLAY_TREE tmp = PVRSRVInsert(pBT->uFlags, pArena->per_flags_buckets);
+ if (tmp == NULL)
+ {
+ OSFreeMem(pBT);
+ return NULL;
+ }
+
+ pArena->per_flags_buckets = tmp;
+ _SegmentListInsert (pArena, pBT);
+ _FreeListInsert (pArena, pBT);
+ }
+ return pBT;
+}
+
+/*************************************************************************/ /*!
+@Function _InsertResourceSpan
+@Description Add a free resource span to an arena, marked for free_import.
+@Input pArena The arena.
+@Input base The base of the resource segment.
+@Input uSize The extent of the resource segment.
+@Return The boundary tag representing the free resource segment,
+ or NULL on failure.
+*/ /**************************************************************************/
+static INLINE BT *
+_InsertResourceSpan (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags)
+{
+ BT *pBT = _InsertResource(pArena, base, uSize, uFlags);
+ if (pBT != NULL)
+ {
+ pBT->free_import = 1;
+ }
+ return pBT;
+}
+
+
+/*************************************************************************/ /*!
+@Function _RemoveResourceSpan
+@Description Frees a resource span from an arena, returning the imported
+ span via the callback.
+@Input pArena The arena.
+@Input pBT The boundary tag to free.
+@Return IMG_FALSE failure - span was still in use
+ IMG_TRUE success - span was removed and returned
+*/ /**************************************************************************/
+static INLINE IMG_BOOL
+_RemoveResourceSpan (RA_ARENA *pArena, BT *pBT)
+{
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (pBT!=NULL);
+
+ if (pBT->free_import &&
+ pBT->is_leftmost &&
+ pBT->is_rightmost)
+ {
+ _SegmentListRemove (pArena, pBT);
+ pArena->pImportFree (pArena->pImportHandle, pBT->base, pBT->hPriv);
+ OSFreeMem(pBT);
+
+ return IMG_TRUE;
+ }
+
+
+ return IMG_FALSE;
+}
+
+
+/*************************************************************************/ /*!
+@Function _FreeBT
+@Description Free a boundary tag taking care of the segment list and the
+ boundary tag free table.
+@Input pArena The arena.
+@Input pBT The boundary tag to free.
+*/ /**************************************************************************/
+static void
+_FreeBT (RA_ARENA *pArena, BT *pBT)
+{
+ BT *pNeighbour;
+
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (pBT!=NULL);
+ PVR_ASSERT (!_IsInFreeList(pArena, pBT));
+
+ /* try and coalesce with left neighbour */
+ pNeighbour = pBT->pPrevSegment;
+ if ((!pBT->is_leftmost) && (pNeighbour->type == btt_free))
+ {
+ /* Sanity check. */
+ PVR_ASSERT(pNeighbour->base + pNeighbour->uSize == pBT->base);
+
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->base = pNeighbour->base;
+
+ pBT->uSize += pNeighbour->uSize;
+ pBT->is_leftmost = pNeighbour->is_leftmost;
+ OSFreeMem(pNeighbour);
+ }
+
+ /* try to coalesce with right neighbour */
+ pNeighbour = pBT->pNextSegment;
+ if ((!pBT->is_rightmost) && (pNeighbour->type == btt_free))
+ {
+ /* sanity check */
+ PVR_ASSERT(pBT->base + pBT->uSize == pNeighbour->base);
+
+ _FreeListRemove (pArena, pNeighbour);
+ _SegmentListRemove (pArena, pNeighbour);
+ pBT->uSize += pNeighbour->uSize;
+ pBT->is_rightmost = pNeighbour->is_rightmost;
+ OSFreeMem(pNeighbour);
+ }
+
+ if (_RemoveResourceSpan(pArena, pBT) == IMG_FALSE)
+ {
+ _FreeListInsert (pArena, pBT);
+ PVR_ASSERT( (!pBT->is_rightmost) || (!pBT->is_leftmost) || (!pBT->free_import) );
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+}
+
+
+/*
+ This function returns the first element in a bucket that can be split
+ in a way that one of the subsegment can meet the size and alignment
+ criteria.
+
+ The first_elt is the bucket to look into. Remember that a bucket is
+ implemented as a pointer to the first element of the linked list.
+
+ nb_max_try is used to limit the number of elements considered.
+ This is used to only consider the first nb_max_try elements in the
+ free-list. The special value ~0 is used to say unlimited i.e. consider
+ all elements in the free list
+ */
+static INLINE
+struct _BT_ * find_chunk_in_bucket(struct _BT_ * first_elt,
+ RA_LENGTH_T uSize,
+ RA_LENGTH_T uAlignment,
+ unsigned int nb_max_try)
+{
+ struct _BT_ * walker;
+
+ for (walker = first_elt; (walker != NULL) && (nb_max_try != 0); walker = walker->next_free)
+ {
+ const RA_BASE_T aligned_base = (uAlignment > 1) ?
+ (walker->base + uAlignment - 1) & ~(uAlignment - 1)
+ : walker->base;
+
+ if (walker->base + walker->uSize >= aligned_base + uSize)
+ {
+ return walker;
+ }
+
+ /* 0xFFFF...FFFF is used has nb_max_try = infinity. */
+ if (nb_max_try != (unsigned int) ~0)
+ {
+ nb_max_try--;
+ }
+ }
+
+ return NULL;
+}
+
+
+/*************************************************************************/ /*!
+@Function _AttemptAllocAligned
+@Description Attempt an allocation from an arena.
+@Input pArena The arena.
+@Input uSize The requested allocation size.
+@Output phPriv The user references associated with
+ the imported segment. (optional)
+@Input flags Allocation flags
+@Input uAlignment Required uAlignment, or 0.
+ Must be a power of 2 if not 0
+@Output base Allocated resource base (non optional, must not be NULL)
+@Return IMG_FALSE failure
+ IMG_TRUE success
+*/ /**************************************************************************/
+static IMG_BOOL
+_AttemptAllocAligned (RA_ARENA *pArena,
+ RA_LENGTH_T uSize,
+ IMG_UINT32 uFlags,
+ RA_LENGTH_T uAlignment,
+ RA_BASE_T *base,
+ RA_PERISPAN_HANDLE *phPriv) /* this is the "per-import" private data */
+{
+
+ IMG_UINT32 index_low;
+ IMG_UINT32 index_high;
+ IMG_UINT32 i;
+ struct _BT_ * pBT = NULL;
+ RA_BASE_T aligned_base;
+
+ PVR_ASSERT (pArena!=NULL);
+ PVR_ASSERT (base != NULL);
+
+ pArena->per_flags_buckets = PVRSRVSplay(uFlags, pArena->per_flags_buckets);
+ if ((pArena->per_flags_buckets == NULL) || (pArena->per_flags_buckets->ui32Flags != uFlags))
+ {
+ /* no chunks with these flags. */
+ return IMG_FALSE;
+ }
+
+ index_low = pvr_log2(uSize);
+ index_high = pvr_log2(uSize + uAlignment - 1);
+
+ PVR_ASSERT(index_low < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_high < FREE_TABLE_LIMIT);
+ PVR_ASSERT(index_low <= index_high);
+
+#if defined(PVR_CTZLL)
+ i = PVR_CTZLL((IMG_ELTS_MAPPINGS) (~((1 << (index_high + 1)) - 1)) & pArena->per_flags_buckets->bHasEltsMapping);
+#else
+ for (i = index_high + 1; (i < FREE_TABLE_LIMIT) && (pArena->per_flags_buckets->buckets[i] == NULL); ++i)
+ {
+ }
+#endif
+ PVR_ASSERT(i <= FREE_TABLE_LIMIT);
+
+ if (i != FREE_TABLE_LIMIT)
+ {
+ /* since we start at index_high + 1, we are guarantee to exit */
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, 1);
+ }
+ else
+ {
+ for (i = index_high; (i != index_low - 1) && (pBT == NULL); --i)
+ {
+ pBT = find_chunk_in_bucket(pArena->per_flags_buckets->buckets[i], uSize, uAlignment, (unsigned int) ~0);
+ }
+ }
+
+ if (pBT == NULL)
+ {
+ return IMG_FALSE;
+ }
+
+ aligned_base = (uAlignment > 1) ? (pBT->base + uAlignment - 1) & ~(uAlignment - 1) : pBT->base;
+
+ _FreeListRemove (pArena, pBT);
+
+ if(pArena->bNoSplit)
+ {
+ goto nosplit;
+ }
+
+ /* with uAlignment we might need to discard the front of this segment */
+ if (aligned_base > pBT->base)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit (pBT, (RA_LENGTH_T)(aligned_base - pBT->base));
+ /* partition the buffer, create a new boundary tag */
+ if (pNeighbour == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Front split failed", __FUNCTION__));
+ /* Put pBT back in the list */
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert(pArena, pBT);
+ pBT = pNeighbour;
+ }
+
+ /* the segment might be too big, if so, discard the back of the segment */
+ if (pBT->uSize > uSize)
+ {
+ BT *pNeighbour;
+ pNeighbour = _SegmentSplit(pBT, uSize);
+ /* partition the buffer, create a new boundary tag */
+ if (pNeighbour == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Back split failed", __FUNCTION__));
+ /* Put pBT back in the list */
+ _FreeListInsert (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ _FreeListInsert (pArena, pNeighbour);
+ }
+nosplit:
+ pBT->type = btt_live;
+
+ if (!HASH_Insert_Extended (pArena->pSegmentHash, &pBT->base, (uintptr_t)pBT))
+ {
+ _FreeBT (pArena, pBT);
+ return IMG_FALSE;
+ }
+
+ if (phPriv != NULL)
+ *phPriv = pBT->hPriv;
+
+ *base = pBT->base;
+
+ return IMG_TRUE;
+}
+
+
+
+/*************************************************************************/ /*!
+@Function RA_Create
+@Description To create a resource arena.
+@Input name The name of the arena for diagnostic purposes.
+@Input base The base of an initial resource span or 0.
+@Input uSize The size of an initial resource span or 0.
+@Input uFlags The flags of an initial resource span or 0.
+@Input ulog2Quantum The arena allocation quantum.
+@Input imp_alloc A resource allocation callback or 0.
+@Input imp_free A resource de-allocation callback or 0.
+@Input pImportHandle Handle passed to alloc and free or 0.
+@Input bNoSplit Disable splitting up imports.
+@Return arena handle, or NULL.
+*/ /**************************************************************************/
+IMG_INTERNAL RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ RA_LOG2QUANTUM_T uLog2Quantum,
+ IMG_UINT32 ui32LockClass,
+ PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE h,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T _flags,
+ const IMG_CHAR *pszAnnotation,
+ /* returned data */
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv),
+ void (*imp_free) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE),
+ RA_PERARENA_HANDLE arena_handle,
+ IMG_BOOL bNoSplit)
+{
+ RA_ARENA *pArena;
+ PVRSRV_ERROR eError;
+
+ if (name == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "RA_Create: invalid parameter 'name' (NULL not accepted)"));
+ return NULL;
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Create: name='%s'", name));
+
+ pArena = OSAllocMem(sizeof (*pArena));
+ if (pArena == NULL)
+ {
+ goto arena_fail;
+ }
+
+ eError = OSLockCreate(&pArena->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto lock_fail;
+ }
+
+ pArena->pSegmentHash = HASH_Create_Extended(MINIMUM_HASH_SIZE, sizeof(RA_BASE_T), HASH_Func_Default, HASH_Key_Comp_Default);
+
+ if (pArena->pSegmentHash==NULL)
+ {
+ goto hash_fail;
+ }
+
+ pArena->name = name;
+ pArena->pImportAlloc = (imp_alloc!=NULL) ? imp_alloc : &_RequestAllocFail;
+ pArena->pImportFree = imp_free;
+ pArena->pImportHandle = arena_handle;
+ pArena->pHeadSegment = NULL;
+ pArena->uQuantum = (IMG_UINT64) (1 << uLog2Quantum);
+ pArena->per_flags_buckets = NULL;
+ pArena->ui32LockClass = ui32LockClass;
+ pArena->bNoSplit = bNoSplit;
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ return pArena;
+
+hash_fail:
+ OSLockDestroy(pArena->hLock);
+lock_fail:
+ OSFreeMem(pArena);
+ /*not nulling pointer, out of scope*/
+arena_fail:
+ return NULL;
+}
+
+/*************************************************************************/ /*!
+@Function RA_Delete
+@Description To delete a resource arena. All resources allocated from
+ the arena must be freed before deleting the arena.
+@Input pArena The arena to delete.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Delete (RA_ARENA *pArena)
+{
+ IMG_UINT32 uIndex;
+ IMG_BOOL bWarn = IMG_TRUE;
+
+ PVR_ASSERT(pArena != NULL);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Delete: invalid parameter - pArena"));
+ return;
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Delete: name='%s'", pArena->name));
+
+ while (pArena->pHeadSegment != NULL)
+ {
+ BT *pBT = pArena->pHeadSegment;
+
+ if (pBT->type != btt_free)
+ {
+ if (bWarn)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Allocations still exist in the arena that is being destroyed", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: Likely Cause: client drivers not freeing allocations before destroying devmem context", __func__));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: base = 0x%llx size=0x%llx", __func__,
+ (unsigned long long)pBT->base, (unsigned long long)pBT->uSize));
+ PVR_DPF ((PVR_DBG_ERROR, "%s: This warning will be issued only once for the first allocation found!", __func__));
+ bWarn = IMG_FALSE;
+ }
+ }
+ else
+ {
+ _FreeListRemove(pArena, pBT);
+ }
+
+ _SegmentListRemove (pArena, pBT);
+ OSFreeMem(pBT);
+ /*not nulling original pointer, it has changed*/
+ }
+
+ while (pArena->per_flags_buckets != NULL)
+ {
+ for (uIndex=0; uIndex<FREE_TABLE_LIMIT; uIndex++)
+ {
+ PVR_ASSERT(pArena->per_flags_buckets->buckets[uIndex] == NULL);
+ }
+
+ pArena->per_flags_buckets = PVRSRVDelete(pArena->per_flags_buckets->ui32Flags, pArena->per_flags_buckets);
+ }
+
+ HASH_Delete (pArena->pSegmentHash);
+ OSLockDestroy(pArena->hLock);
+ OSFreeMem(pArena);
+ /*not nulling pointer, copy on stack*/
+}
+
+/*************************************************************************/ /*!
+@Function RA_Add
+@Description To add a resource span to an arena. The span must not
+ overlapp with any span previously added to the arena.
+@Input pArena The arena to add a span into.
+@Input base The base of the span.
+@Input uSize The extent of the span.
+@Input uFlags the flags of the new import
+@Input hPriv a private handle associate to the span. (reserved for user)
+@Return IMG_TRUE - Success
+ IMG_FALSE - failure
+*/ /**************************************************************************/
+IMG_INTERNAL IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ RA_PERISPAN_HANDLE hPriv)
+{
+ struct _BT_* bt;
+ PVR_ASSERT (pArena != NULL);
+ PVR_ASSERT (uSize != 0);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Add: invalid parameter - pArena"));
+ return IMG_FALSE;
+ }
+
+ if(uSize == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RA_Add: invalid size 0 added to arena %s", pArena->name));
+ return IMG_FALSE;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Add: name='%s', "
+ "base=0x%llx, size=0x%llx", pArena->name,
+ (unsigned long long)base, (unsigned long long)uSize));
+
+ uSize = (uSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+ bt = _InsertResource(pArena, base, uSize, uFlags);
+ if (bt != NULL)
+ {
+ bt->hPriv = hPriv;
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ OSLockRelease(pArena->hLock);
+
+ return bt != NULL;
+}
+
+/*************************************************************************/ /*!
+@Function RA_Alloc
+@Description To allocate resource from an arena.
+@Input pArena The arena
+@Input uRequestSize The size of resource segment requested.
+@Input uImportMultiplier Import x-times more for future requests if
+ we have to import new memory.
+@Output pActualSize The actual size of resource segment
+ allocated, typcially rounded up by quantum.
+@Output phPriv The user reference associated with allocated resource span.
+@Input uImportFlags Flags influencing allocation policy.
+@Input uAlignment The uAlignment constraint required for the
+ allocated segment, use 0 if uAlignment not required, otherwise
+ must be a power of 2.
+@Output base Allocated base resource
+@Return PVRSRV_OK - success
+*/ /**************************************************************************/
+IMG_INTERNAL PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+ RA_LENGTH_T uRequestSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uImportFlags,
+ RA_LENGTH_T uAlignment,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *base,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bResult;
+ RA_LENGTH_T uSize = uRequestSize;
+ RA_FLAGS_T uFlags = (uImportFlags & PVRSRV_MEMALLOCFLAGS_RA_DIFFERENTIATION_MASK);
+
+ if (pArena == NULL || uImportMultiplier == 0 || uSize == 0)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: One of the necessary parameters is 0"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ if (pActualSize != NULL)
+ {
+ *pActualSize = uSize;
+ }
+
+ /* Must be a power of 2 or 0 */
+ PVR_ASSERT((uAlignment == 0) || (uAlignment & (uAlignment - 1)) == 0);
+
+ PVR_DPF ((PVR_DBG_MESSAGE,
+ "RA_Alloc: arena='%s', size=0x%llx(0x%llx), "
+ "alignment=0x%llx", pArena->name,
+ (unsigned long long)uSize, (unsigned long long)uRequestSize,
+ (unsigned long long)uAlignment));
+
+ /* if allocation failed then we might have an import source which
+ can provide more resource, else we will have to fail the
+ allocation to the caller. */
+ bResult = _AttemptAllocAligned (pArena, uSize, uFlags, uAlignment, base, phPriv);
+ if (!bResult)
+ {
+ IMG_HANDLE hPriv;
+ RA_BASE_T import_base;
+ RA_LENGTH_T uImportSize = uSize;
+
+ /*
+ Ensure that we allocate sufficient space to meet the uAlignment
+ constraint
+ */
+ if (uAlignment > pArena->uQuantum)
+ {
+ uImportSize += (uAlignment - pArena->uQuantum);
+ }
+
+ /* apply over-allocation multiplier after all alignment adjustments */
+ uImportSize *= uImportMultiplier;
+
+ /* ensure that we import according to the quanta of this arena */
+ uImportSize = (uImportSize + pArena->uQuantum - 1) & ~(pArena->uQuantum - 1);
+
+ eError = pArena->pImportAlloc (pArena->pImportHandle,
+ uImportSize, uImportFlags,
+ pszAnnotation,
+ &import_base, &uImportSize,
+ &hPriv);
+ if (PVRSRV_OK != eError)
+ {
+ OSLockRelease(pArena->hLock);
+ return eError;
+ }
+ else
+ {
+ BT *pBT;
+ pBT = _InsertResourceSpan (pArena, import_base, uImportSize, uFlags);
+ /* successfully import more resource, create a span to
+ represent it and retry the allocation attempt */
+ if (pBT == NULL)
+ {
+ /* insufficient resources to insert the newly acquired span,
+ so free it back again */
+ pArena->pImportFree(pArena->pImportHandle, import_base, hPriv);
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', "
+ "size=0x%llx failed!", pArena->name,
+ (unsigned long long)uSize));
+ /* RA_Dump (arena); */
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_ERROR_RA_INSERT_RESOURCE_SPAN_FAILED;
+ }
+
+ pBT->hPriv = hPriv;
+
+ bResult = _AttemptAllocAligned(pArena, uSize, uFlags, uAlignment, base, phPriv);
+ if (!bResult)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: name='%s' second alloc failed!",
+ pArena->name));
+
+ /*
+ On failure of _AttemptAllocAligned() depending on the exact point
+ of failure, the imported segment may have been used and freed, or
+ left untouched. If the later, we need to return it.
+ */
+ _FreeBT(pArena, pBT);
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_ERROR_RA_ATTEMPT_ALLOC_ALIGNED_FAILED;
+ }
+ else
+ {
+ /* Check if the new allocation was in the span we just added... */
+ if (*base < import_base || *base > (import_base + uImportSize))
+ {
+ PVR_DPF ((PVR_DBG_ERROR,
+ "RA_Alloc: name='%s' alloc did not occur in the imported span!",
+ pArena->name));
+
+ /*
+ Remove the imported span which should not be in use (if it is then
+ that is okay, but essentially no span should exist that is not used).
+ */
+ _FreeBT(pArena, pBT);
+ }
+ }
+ }
+ }
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Alloc: name='%s', size=0x%llx, "
+ "*base=0x%llx = %d",pArena->name, (unsigned long long)uSize,
+ (unsigned long long)*base, bResult));
+
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ OSLockRelease(pArena->hLock);
+ return PVRSRV_OK;
+}
+
+
+
+
+/*************************************************************************/ /*!
+@Function RA_Free
+@Description To free a resource segment.
+@Input pArena The arena the segment was originally allocated from.
+@Input base The base of the resource span to free.
+*/ /**************************************************************************/
+IMG_INTERNAL void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base)
+{
+ BT *pBT;
+
+ PVR_ASSERT (pArena != NULL);
+
+ if (pArena == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR,"RA_Free: invalid parameter - pArena"));
+ return;
+ }
+
+ OSLockAcquireNested(pArena->hLock, pArena->ui32LockClass);
+ PVR_ASSERT(is_arena_valid(pArena));
+
+ PVR_DPF ((PVR_DBG_MESSAGE, "RA_Free: name='%s', base=0x%llx", pArena->name,
+ (unsigned long long)base));
+
+ pBT = (BT *) HASH_Remove_Extended (pArena->pSegmentHash, &base);
+ PVR_ASSERT (pBT != NULL);
+
+ if (pBT)
+ {
+ PVR_ASSERT (pBT->base == base);
+ _FreeBT (pArena, pBT);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RA_Free: no resource span found for given base (0x%llX) in arena %s",
+ (unsigned long long) base,
+ pArena->name));
+ }
+
+ PVR_ASSERT(is_arena_valid(pArena));
+ OSLockRelease(pArena->hLock);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/ra.h b/drivers/gpu/drm/img-rogue/1.10/ra.h
new file mode 100644
index 00000000000000..cfccd595fda598
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ra.h
@@ -0,0 +1,206 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Allocator API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RA_H_
+#define _RA_H_
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/** Resource arena.
+ * struct _RA_ARENA_ deliberately opaque
+ */
+typedef struct _RA_ARENA_ RA_ARENA; //PRQA S 3313
+
+/*
+ * Per-Arena handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it upon
+ * RA_Create, and promises to pass it to calls to the ImportAlloc and
+ * ImportFree callbacks
+ */
+typedef IMG_HANDLE RA_PERARENA_HANDLE;
+/*
+ * Per-Import handle - this is private data for the caller of the RA.
+ * The RA knows nothing about this data. It is given it on a
+ * per-import basis, either the "initial" import at RA_Create time, or
+ * further imports via the ImportAlloc callback. It sends it back via
+ * the ImportFree callback, and also provides it in answer to any
+ * RA_Alloc request to signify from which "import" the allocation came
+ */
+typedef IMG_HANDLE RA_PERISPAN_HANDLE;
+
+typedef IMG_UINT64 RA_BASE_T;
+typedef IMG_UINT32 RA_LOG2QUANTUM_T;
+typedef IMG_UINT64 RA_LENGTH_T;
+
+/* Lock classes: describes the level of nesting between different arenas. */
+#define RA_LOCKCLASS_0 0
+#define RA_LOCKCLASS_1 1
+#define RA_LOCKCLASS_2 2
+
+#define RA_NO_IMPORT_MULTIPLIER 1
+
+/*
+ * Flags in an "import" must much the flags for an allocation
+ */
+typedef IMG_UINT32 RA_FLAGS_T;
+
+/**
+ * @Function RA_Create
+ *
+ * @Description
+ *
+ * To create a resource arena.
+ *
+ * @Input name - the name of the arena for diagnostic purposes.
+ * @Input uQuantum - the arena allocation quantum.
+ * @Input ui32LockClass - the lock class level this arena uses.
+ * @Input alloc - a resource allocation callback or 0.
+ * @Input free - a resource de-allocation callback or 0.
+ * @Input per_arena_handle - user private handle passed to alloc and free or 0.
+ * @Input bNoSplit - Disable splitting up imports.
+ * @Return pointer to arena, or NULL.
+ */
+RA_ARENA *
+RA_Create (IMG_CHAR *name,
+ /* subsequent imports: */
+ RA_LOG2QUANTUM_T uLog2Quantum,
+ IMG_UINT32 ui32LockClass,
+ PVRSRV_ERROR (*imp_alloc)(RA_PERARENA_HANDLE _h,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv),
+ void (*imp_free) (RA_PERARENA_HANDLE,
+ RA_BASE_T,
+ RA_PERISPAN_HANDLE),
+ RA_PERARENA_HANDLE per_arena_handle,
+ IMG_BOOL bNoSplit);
+
+/**
+ * @Function RA_Delete
+ *
+ * @Description
+ *
+ * To delete a resource arena. All resources allocated from the arena
+ * must be freed before deleting the arena.
+ *
+ * @Input pArena - the arena to delete.
+ * @Return None
+ */
+void
+RA_Delete (RA_ARENA *pArena);
+
+/**
+ * @Function RA_Add
+ *
+ * @Description
+ *
+ * To add a resource span to an arena. The span must not overlap with
+ * any span previously added to the arena.
+ *
+ * @Input pArena - the arena to add a span into.
+ * @Input base - the base of the span.
+ * @Input uSize - the extent of the span.
+ * @Input hPriv - handle associated to the span (reserved to user uses)
+ * @Return IMG_TRUE - success, IMG_FALSE - failure
+ */
+IMG_BOOL
+RA_Add (RA_ARENA *pArena,
+ RA_BASE_T base,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ RA_PERISPAN_HANDLE hPriv);
+
+/**
+ * @Function RA_Alloc
+ *
+ * @Description
+ *
+ * To allocate resource from an arena.
+ *
+ * @Input pArena - the arena
+ * @Input uRequestSize - the size of resource segment requested.
+ * @Input uImportMultiplier - Import x-times of the uRequestSize
+ * for future RA_Alloc calls.
+ * Use RA_NO_IMPORT_MULTIPLIER to import the exact size.
+ * @Output pActualSize - the actual_size of resource segment allocated,
+ * typcially rounded up by quantum.
+ * @Input uImportFlags - flags influencing allocation policy.
+ * @Input uAlignment - the alignment constraint required for the
+ * allocated segment, use 0 if alignment not required.
+ * @Input pszAnnotation - a string to describe the allocation
+ * @Output pBase - allocated base resource
+ * @Output phPriv - the user reference associated with allocated
+ * resource span.
+ * @Return PVRSRV_OK - success
+ */
+PVRSRV_ERROR
+RA_Alloc (RA_ARENA *pArena,
+ RA_LENGTH_T uSize,
+ IMG_UINT8 uImportMultiplier,
+ RA_FLAGS_T uFlags,
+ RA_LENGTH_T uAlignment,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *pBase,
+ RA_LENGTH_T *pActualSize,
+ RA_PERISPAN_HANDLE *phPriv);
+
+/**
+ * @Function RA_Free
+ *
+ * @Description To free a resource segment.
+ *
+ * @Input pArena - the arena the segment was originally allocated from.
+ * @Input base - the base of the resource span to free.
+ * @Input bFreeBackingStore - Should backing store memory be freed?
+ *
+ * @Return None
+ */
+void
+RA_Free (RA_ARENA *pArena, RA_BASE_T base);
+
+#endif
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_bridge.h b/drivers/gpu/drm/img-rogue/1.10/rgx_bridge.h
new file mode 100644
index 00000000000000..e47515ea3e3e62
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_bridge.h
@@ -0,0 +1,235 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the rgx Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_BRIDGE_H__)
+#define __RGX_BRIDGE_H__
+
+#include "pvr_bridge.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "rgx_fwif.h"
+
+#define RGXFWINITPARAMS_VERSION 1
+#define RGXFWINITPARAMS_EXTENSION 128
+
+#include "common_rgxta3d_bridge.h"
+#include "common_rgxcmp_bridge.h"
+
+#include "common_rgxtq2_bridge.h"
+#include "common_rgxtq_bridge.h"
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#include "common_breakpoint_bridge.h"
+#endif
+#include "common_debugmisc_bridge.h"
+#if defined(PDUMP)
+#include "common_rgxpdump_bridge.h"
+#endif
+#include "common_rgxhwperf_bridge.h"
+#include "common_rgxray_bridge.h"
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#include "common_regconfig_bridge.h"
+#endif
+#include "common_timerquery_bridge.h"
+#include "common_rgxkicksync_bridge.h"
+
+#include "common_rgxsignals_bridge.h"
+
+
+/*
+ * Bridge Cmd Ids
+ */
+
+/* *REMEMBER* to update PVRSRV_BRIDGE_RGX_LAST if you add/remove a bridge
+ * group!
+ * Also you need to ensure all PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST
+ * offsets follow on from the previous bridge group's commands!
+ *
+ * If a bridge group is optional, ensure you *ALWAYS* define its index
+ * (e.g. PVRSRV_BRIDGE_RGXCMP is always 151, even is the feature is
+ * not defined). If an optional bridge group is not defined you must
+ * still define PVRSRV_BRIDGE_RGX_xxx_DISPATCH_FIRST for it with an
+ * assigned value of 0.
+ */
+
+/* The RGX bridge groups start at 128 (PVRSRV_BRIDGE_RGX_FIRST) rather than follow-on from the other
+ * non-device bridge groups (meaning that they then won't be displaced if
+ * other non-device bridge groups are added)
+ */
+
+#define PVRSRV_BRIDGE_RGX_FIRST 128UL
+
+/* 128: RGX TQ interface functions */
+#define PVRSRV_BRIDGE_RGXTQ 128UL
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST (PVRSRV_BRIDGE_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ_CMD_LAST)
+
+
+/* 129: RGX Compute interface functions */
+#define PVRSRV_BRIDGE_RGXCMP 129UL
+# define PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST + 1)
+# define PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXCMP_CMD_LAST)
+
+
+/* 130: RGX TA/3D interface functions */
+#define PVRSRV_BRIDGE_RGXTA3D 130UL
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTA3D_CMD_LAST)
+
+/* 131: RGX Breakpoint interface functions */
+#define PVRSRV_BRIDGE_BREAKPOINT 131UL
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST + PVRSRV_BRIDGE_BREAKPOINT_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST)
+#endif
+
+/* 132: RGX Debug/Misc interface functions */
+#define PVRSRV_BRIDGE_DEBUGMISC 132UL
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST (PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST + PVRSRV_BRIDGE_DEBUGMISC_CMD_LAST)
+
+/* 133: RGX PDump interface functions */
+#define PVRSRV_BRIDGE_RGXPDUMP 133UL
+#if defined(PDUMP)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST +1)
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXPDUMP_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST (PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST)
+#endif
+
+/* 134: RGX HWPerf interface functions */
+#define PVRSRV_BRIDGE_RGXHWPERF 134UL
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXHWPERF_CMD_LAST)
+
+/* 135: RGX Ray Tracing interface functions */
+#define PVRSRV_BRIDGE_RGXRAY 135UL
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXRAY_CMD_LAST)
+
+/* 136: RGX Register Configuration interface functions */
+#define PVRSRV_BRIDGE_REGCONFIG 136UL
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST + PVRSRV_BRIDGE_REGCONFIG_CMD_LAST)
+#else
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST 0
+#define PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST (PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST)
+#endif
+
+/* 137: RGX Timer Query interface functions */
+#define PVRSRV_BRIDGE_TIMERQUERY 137UL
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST (PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST + PVRSRV_BRIDGE_TIMERQUERY_CMD_LAST)
+
+/* 138: RGX kicksync interface */
+#define PVRSRV_BRIDGE_RGXKICKSYNC 138UL
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST (PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXKICKSYNC_CMD_LAST)
+
+/* 139: RGX signals interface */
+#define PVRSRV_BRIDGE_RGXSIGNALS 139UL
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXSIGNALS_CMD_LAST)
+
+
+#define PVRSRV_BRIDGE_RGXTQ2 140UL
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST (PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST + 1)
+#define PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST + PVRSRV_BRIDGE_RGXTQ2_CMD_LAST)
+
+#define PVRSRV_BRIDGE_RGX_LAST (PVRSRV_BRIDGE_RGXTQ2)
+#define PVRSRV_BRIDGE_RGX_DISPATCH_LAST (PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST)
+
+/* bit mask representing the enabled RGX bridges */
+
+static const IMG_UINT32 gui32RGXBridges =
+ (1U << (PVRSRV_BRIDGE_RGXTQ - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_COMPUTE)
+ | (1U << (PVRSRV_BRIDGE_RGXCMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXTA3D - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(SUPPORT_BREAKPOINT)
+ | (1U << (PVRSRV_BRIDGE_BREAKPOINT - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_DEBUGMISC)
+ | (1U << (PVRSRV_BRIDGE_DEBUGMISC - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(PDUMP)
+ | (1U << (PVRSRV_BRIDGE_RGXPDUMP - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXHWPERF - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_RAY_TRACING)
+ | (1U << (PVRSRV_BRIDGE_RGXRAY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_REGCONFIG)
+ | (1U << (PVRSRV_BRIDGE_REGCONFIG - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+#if defined(SUPPORT_TIMERQUERY)
+ | (1U << (PVRSRV_BRIDGE_TIMERQUERY - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXKICKSYNC - PVRSRV_BRIDGE_RGX_FIRST))
+#if defined(RGX_FEATURE_SIGNAL_SNOOPING)
+ | (1U << (PVRSRV_BRIDGE_RGXSIGNALS - PVRSRV_BRIDGE_RGX_FIRST))
+#endif
+ | (1U << (PVRSRV_BRIDGE_RGXTQ2 - PVRSRV_BRIDGE_RGX_FIRST));
+
+/* bit field representing which RGX bridge groups may optionally not
+ * be present in the server
+ */
+
+#define RGX_BRIDGES_OPTIONAL \
+ ( \
+ 0 /* no RGX bridges are currently optional */ \
+ )
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* __RGX_BRIDGE_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_common.h b/drivers/gpu/drm/img-rogue/1.10/rgx_common.h
new file mode 100644
index 00000000000000..49ef13b6baa6a3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_common.h
@@ -0,0 +1,219 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Common Types and Defines Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common types and definitions for RGX software
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_COMMON_H_
+#define RGX_COMMON_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+
+/* Included to get the BVNC_KM_N defined and other feature defs */
+#include "km/rgxdefs_km.h"
+
+/*! This macro represents a mask of LSBs that must be zero on data structure
+ * sizes and offsets to ensure they are 8-byte granular on types shared between
+ * the FW and host driver */
+#define RGX_FW_ALIGNMENT_LSB (7)
+
+/*! Macro to test structure size alignment */
+#define RGX_FW_STRUCT_SIZE_ASSERT(_a) \
+ static_assert((sizeof(_a) & RGX_FW_ALIGNMENT_LSB) == 0, \
+ "Size of " #_a " is not properly aligned")
+
+/*! Macro to test structure member alignment */
+#define RGX_FW_STRUCT_OFFSET_ASSERT(_a, _b) \
+ static_assert((offsetof(_a, _b) & RGX_FW_ALIGNMENT_LSB) == 0, \
+ "Offset of " #_a "." #_b " is not properly aligned")
+
+
+/* The following enum assumes only one of RGX_FEATURE_TLA or RGX_FEATURE_FASTRENDER_DM feature
+ * is present. In case this is no more true, fail build to fix code */
+#if defined (RGX_FEATURE_TLA) && defined (RGX_FEATURE_FASTRENDER_DM)
+#error "Both RGX_FEATURE_TLA and RGX_FEATURE_FASTRENDER_DM defined. Fix code to handle this!"
+#endif
+
+/*! The master definition for data masters known to the firmware of RGX.
+ * When a new DM is added to this enum, relevant entry should be added to
+ * RGX_HWPERF_DM enum list.
+ * The DM in a V1 HWPerf packet uses this definition. */
+typedef enum _RGXFWIF_DM_
+{
+ RGXFWIF_DM_GP = 0,
+
+ /* Either TDM or 2D DM is present. The above build time error is present to verify this */
+ RGXFWIF_DM_2D = 1, /* when RGX_FEATURE_TLA defined */
+ RGXFWIF_DM_TDM = 1, /* when RGX_FEATURE_FASTRENDER_DM defined */
+
+ RGXFWIF_DM_TA = 2,
+ RGXFWIF_DM_3D = 3,
+ RGXFWIF_DM_CDM = 4,
+
+ /* present on Ray cores only */
+ RGXFWIF_DM_RTU = 5,
+ RGXFWIF_DM_SHG = 6,
+
+ RGXFWIF_DM_LAST,
+
+ RGXFWIF_DM_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+} RGXFWIF_DM;
+
+typedef enum _RGX_KICK_TYPE_DM_
+{
+ RGX_KICK_TYPE_DM_GP = 1 << 0,
+ RGX_KICK_TYPE_DM_TDM_2D = 1 << 1,
+ RGX_KICK_TYPE_DM_TA = 1 << 2,
+ RGX_KICK_TYPE_DM_3D = 1 << 3,
+ RGX_KICK_TYPE_DM_CDM = 1 << 4,
+ RGX_KICK_TYPE_DM_RTU = 1 << 5,
+ RGX_KICK_TYPE_DM_SHG = 1 << 6,
+ RGX_KICK_TYPE_DM_TQ2D = 1 << 7,
+ RGX_KICK_TYPE_DM_TQ3D = 1 << 8,
+ RGX_KICK_TYPE_DM_LAST = 1 << 9
+} RGX_KICK_TYPE_DM;
+
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_DEFAULT_MAX (7)
+
+#if !defined(__KERNEL__)
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFWIF_DM_MAX_MTS 8
+#else
+#define RGXFWIF_DM_MAX_MTS 6
+#endif
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM, SHG, RTU */
+#define RGXFWIF_DM_MAX (7)
+#else
+/* Maximum number of DM in use: GP, 2D/TDM, TA, 3D, CDM*/
+#define RGXFWIF_DM_MAX (5)
+#endif
+#define RGXFWIF_HWDM_MAX (RGXFWIF_DM_MAX)
+#else
+ #define RGXFWIF_DM_MIN_MTS_CNT (6)
+ #define RGXFWIF_RAY_TRACING_DM_MTS_CNT (2)
+ #define RGXFWIF_DM_MIN_CNT (5)
+ #define RGXFWIF_RAY_TRACING_DM_CNT (2)
+ #define RGXFWIF_DM_MAX (RGXFWIF_DM_MIN_CNT + RGXFWIF_RAY_TRACING_DM_CNT)
+#endif
+
+/* Min/Max number of HW DMs (all but GP) */
+#if defined(RGX_FEATURE_TLA)
+#define RGXFWIF_HWDM_MIN (1)
+#else
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFWIF_HWDM_MIN (1)
+#else
+#define RGXFWIF_HWDM_MIN (2)
+#endif
+#endif
+
+/*
+ * Data Master Tags to be appended to resources created on behalf of each RGX
+ * Context.
+ */
+#define RGX_RI_DM_TAG_KS 'K'
+#define RGX_RI_DM_TAG_CDM 'C'
+#define RGX_RI_DM_TAG_RC 'R' // To be removed once TA/3D Timelines are split
+#define RGX_RI_DM_TAG_TA 'V'
+#define RGX_RI_DM_TAG_3D 'P'
+#define RGX_RI_DM_TAG_TDM 'T'
+#define RGX_RI_DM_TAG_TQ2D '2'
+#define RGX_RI_DM_TAG_TQ3D 'Q'
+
+/*
+ * Client API Tags to be appended to resources created on behalf of each
+ * Client API.
+ */
+#define RGX_RI_CLIENT_API_GLES1 '1'
+#define RGX_RI_CLIENT_API_GLES3 '3'
+#define RGX_RI_CLIENT_API_VULKAN 'V'
+#define RGX_RI_CLIENT_API_EGL 'E'
+#define RGX_RI_CLIENT_API_OPENCL 'C'
+#define RGX_RI_CLIENT_API_OPENGL 'G'
+#define RGX_RI_CLIENT_API_SERVICES 'S'
+#define RGX_RI_CLIENT_API_WSEGL 'W'
+#define RGX_RI_CLIENT_API_ANDROID 'A'
+#define RGX_RI_CLIENT_API_LWS 'L'
+
+/*
+ * Format a RI annotation for a given RGX Data Master context
+ */
+#define RGX_RI_FORMAT_DM_ANNOTATION(annotation, dmTag, clientAPI) do \
+ { \
+ annotation[0] = dmTag; \
+ annotation[1] = clientAPI; \
+ annotation[2] = '\0'; \
+ } while (0)
+
+/*!
+ ******************************************************************************
+ * RGXFW Compiler alignment definitions
+ *****************************************************************************/
+#if defined(__GNUC__) || defined(HAS_GNUC_ATTRIBUTES)
+#define RGXFW_ALIGN __attribute__ ((aligned (8)))
+#elif defined(_MSC_VER)
+#define RGXFW_ALIGN __declspec(align(8))
+#pragma warning (disable : 4324)
+#else
+#error "Align MACROS need to be defined for this compiler"
+#endif
+
+/*!
+ ******************************************************************************
+ * Force 8-byte alignment for structures allocated uncached.
+ *****************************************************************************/
+#define UNCACHED_ALIGN RGXFW_ALIGN
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_COMMON_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.c b/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.c
new file mode 100644
index 00000000000000..aa61e22f01e3bd
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.c
@@ -0,0 +1,218 @@
+/*************************************************************************/ /*!
+@File rgx_compact_bvnc.c
+@Title BVNC compatibility check utilities
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used for packing BNC and V.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_compat_bvnc.h"
+#if defined(RGX_FIRMWARE)
+#include "rgxfw_utils.h"
+#elif !defined(RGX_BUILD_BINARY)
+#include "pvr_debug.h"
+#endif
+
+#if defined(RGX_FIRMWARE)
+#define PVR_COMPAT_ASSERT RGXFW_ASSERT
+#elif !defined(RGX_BUILD_BINARY)
+#define PVR_COMPAT_ASSERT PVR_ASSERT
+#else
+#include <assert.h>
+#define PVR_COMPAT_ASSERT assert
+#endif
+
+/**************************************************************************//**
+ * C library strlen function.
+ *****************************************************************************/
+static INLINE __maybe_unused IMG_UINT32 OSStringLength(const IMG_CHAR* pszInput)
+{
+ const IMG_CHAR* pszTemp = pszInput;
+
+ while (*pszTemp)
+ pszTemp++;
+
+ return (pszTemp - pszInput);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC
+ *****************************************************************************/
+static INLINE IMG_UINT64 rgx_bnc_pack(IMG_UINT32 ui32B, IMG_UINT32 ui32N,
+ IMG_UINT32 ui32C)
+{
+ /*
+ * Test for input B, N and C exceeding max bit width.
+ */
+ PVR_COMPAT_ASSERT((ui32B & (~(RGX_BVNC_PACK_MASK_B >> RGX_BVNC_PACK_SHIFT_B))) == 0);
+ PVR_COMPAT_ASSERT((ui32N & (~(RGX_BVNC_PACK_MASK_N >> RGX_BVNC_PACK_SHIFT_N))) == 0);
+ PVR_COMPAT_ASSERT((ui32C & (~(RGX_BVNC_PACK_MASK_C >> RGX_BVNC_PACK_SHIFT_C))) == 0);
+
+ return (((IMG_UINT64)ui32B << RGX_BVNC_PACK_SHIFT_B) |
+ ((IMG_UINT64)ui32N << RGX_BVNC_PACK_SHIFT_N) |
+ ((IMG_UINT64)ui32C << RGX_BVNC_PACK_SHIFT_C));
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * BNC is packed into 48 bit format.
+ * If the array pointed to by pszV is a string that is shorter than
+ * ui32OutVMaxLen characters, null characters are appended to the copy in the
+ * array pointed to by pszOutV, until 'ui32OutVMaxLen' characters in all have
+ * been written.
+ *
+ * @param: pui64OutBNC Output containing packed BNC.
+ * @param pszOutV Output containing version string.
+ * @param ui32OutVMaxLen Max characters that can be written to
+ pszOutV (excluding terminating null character)
+ * @param ui32B Input 'B' value
+ * @param pszV Input 'V' string
+ * @param ui32N Input 'N' value
+ * @param ui32C Input 'C' value
+ * @return None
+ *****************************************************************************/
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+ *pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+ if (!pszOutV)
+ return;
+
+ if (pszV)
+ {
+ /*
+ * Assert can fail for two reasons
+ * 1. Caller is passing invalid 'V' string or
+ * 2. Dest buffer does not have enough memory allocated for max 'V' size.
+ */
+ PVR_COMPAT_ASSERT(OSStringLength(pszV) <= ui32OutVMaxLen);
+
+
+ for (; ui32OutVMaxLen > 0 && *pszV != '\0'; --ui32OutVMaxLen)
+ {
+ /* When copying the V, omit any characters as these would cause
+ * the compatibility check against the V read from HW to fail
+ */
+ if (*pszV && (*pszV >= '0') && (*pszV <='9'))
+ {
+ *pszOutV++ = *pszV++;
+ }
+ else
+ {
+ pszV++;
+ }
+ }
+ }
+
+ do
+ {
+ *pszOutV++ = '\0';
+ }while(ui32OutVMaxLen-- > 0);
+}
+
+/**************************************************************************//**
+ * Utility function for packing BNC and V to be used by compatibility check.
+ * Input B,N and C is packed into 48 bit format.
+ * Input V is converted into string. If number of characters required to
+ * represent 16 bit wide version number is less than ui32OutVMaxLen, than null
+ * characters are appended to pszOutV, until ui32OutVMaxLen characters in all
+ * have been written.
+ *
+ * @param: pui64OutBNC Output containing packed BNC.
+ * @param pszOutV Output containing version string.
+ * @param ui32OutVMaxLen Max characters that can be written to
+ pszOutV (excluding terminating null character)
+ * @param ui32B Input 'B' value (16 bit wide)
+ * @param ui32V Input 'V' value (16 bit wide)
+ * @param ui32N Input 'N' value (16 bit wide)
+ * @param ui32C Input 'C' value (16 bit wide)
+ * @return .None
+ *****************************************************************************/
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C)
+{
+ /*
+ * Allocate space for max digits required to represent 16 bit wide version
+ * number (including NULL terminating character).
+ */
+ IMG_CHAR aszBuf[6];
+ IMG_CHAR *pszPointer = aszBuf;
+
+ *pui64OutBNC = rgx_bnc_pack(ui32B, ui32N, ui32C);
+
+ if (!pszOutV)
+ return;
+
+ /*
+ * Function only supports 16 bits wide version number.
+ */
+ PVR_COMPAT_ASSERT((ui32V & ~0xFFFF) == 0);
+
+ if (ui32V > 9999)
+ pszPointer+=5;
+ else if (ui32V > 999)
+ pszPointer+=4;
+ else if (ui32V > 99)
+ pszPointer+=3;
+ else if (ui32V > 9)
+ pszPointer+=2;
+ else
+ pszPointer+=1;
+
+ *pszPointer-- = '\0';
+ *pszPointer = '0';
+
+ while (ui32V > 0)
+ {
+ *pszPointer-- = (ui32V % 10) + '0';
+ ui32V /= 10;
+ }
+
+ for (pszPointer = aszBuf; ui32OutVMaxLen > 0 && *pszPointer != '\0'; --ui32OutVMaxLen)
+ *pszOutV++ = *pszPointer++;
+
+ /*
+ * Append NULL characters.
+ */
+ do
+ {
+ *pszOutV++ = '\0';
+ }while(ui32OutVMaxLen-- > 0);
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.h b/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.h
new file mode 100644
index 00000000000000..51e5d76d93e651
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_compat_bvnc.h
@@ -0,0 +1,136 @@
+/*************************************************************************/ /*!
+@File
+@Title Functions for BVNC manipulating
+
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally by device memory management
+ code.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_COMPAT_BVNC_H__)
+#define __RGX_COMPAT_BVNC_H__
+
+#include "img_types.h"
+
+/* 64bit endian converting macros */
+#if defined(__BIG_ENDIAN__)
+#define RGX_INT64_TO_BE(N) (N)
+#define RGX_INT64_FROM_BE(N) (N)
+#define RGX_INT32_TO_BE(N) (N)
+#define RGX_INT32_FROM_BE(N) (N)
+#else
+#define RGX_INT64_TO_BE(N) \
+ ((((N) >> 56) & 0xff) \
+ | (((N) >> 40) & 0xff00) \
+ | (((N) >> 24) & 0xff0000) \
+ | (((N) >> 8) & 0xff000000) \
+ | ((N) << 56) \
+ | (((N) & 0xff00) << 40) \
+ | (((N) & 0xff0000) << 24) \
+ | (((N) & 0xff000000) << 8))
+#define RGX_INT64_FROM_BE(N) RGX_INT64_TO_BE(N)
+
+#define RGX_INT32_TO_BE(N) \
+ ((((N) >> 24) & 0xff) \
+ | (((N) >> 8) & 0xff00) \
+ | ((N) << 24) \
+ | (((N & 0xff00) << 8)))
+#define RGX_INT32_FROM_BE(N) RGX_INT32_TO_BE(N)
+#endif
+
+/******************************************************************************
+ * RGX Version packed into 24-bit (BNC) and string (V) to be used by Compatibility Check
+ *****************************************************************************/
+
+#define RGX_BVNC_PACK_SHIFT_B 32
+#define RGX_BVNC_PACK_SHIFT_N 16
+#define RGX_BVNC_PACK_SHIFT_C 0
+
+#define RGX_BVNC_PACK_MASK_B (IMG_UINT64_C(0x0000FFFF00000000))
+#define RGX_BVNC_PACK_MASK_N (IMG_UINT64_C(0x00000000FFFF0000))
+#define RGX_BVNC_PACK_MASK_C (IMG_UINT64_C(0x000000000000FFFF))
+
+#define RGX_BVNC_PACKED_EXTR_B(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_B) >> RGX_BVNC_PACK_SHIFT_B))
+#define RGX_BVNC_PACKED_EXTR_V(BVNC) ((BVNC).aszV)
+#define RGX_BVNC_PACKED_EXTR_N(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_N) >> RGX_BVNC_PACK_SHIFT_N))
+#define RGX_BVNC_PACKED_EXTR_C(BVNC) ((IMG_UINT32)(((BVNC).ui64BNC & RGX_BVNC_PACK_MASK_C) >> RGX_BVNC_PACK_SHIFT_C))
+
+#if !defined(RGX_SKIP_BVNC_CHECK)
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) do { \
+ (lenmax) = IMG_FALSE; \
+ (bnc) = IMG_FALSE; \
+ (v) = IMG_FALSE; \
+ (version) = ((L).ui32LayoutVersion == (R).ui32LayoutVersion); \
+ if (version) \
+ { \
+ (lenmax) = ((L).ui32VLenMax == (R).ui32VLenMax); \
+ } \
+ if (lenmax) \
+ { \
+ (bnc) = ((L).ui64BNC == (R).ui64BNC); \
+ } \
+ if (bnc) \
+ { \
+ (L).aszV[(L).ui32VLenMax] = '\0'; \
+ (R).aszV[(R).ui32VLenMax] = '\0'; \
+ (v) = (OSStringCompare((L).aszV, (R).aszV)==0); \
+ } \
+ (all) = (version) && (lenmax) && (bnc) && (v); \
+ } while (0)
+#else
+#define RGX_BVNC_EQUAL(L,R,all,version,lenmax,bnc,v) \
+ (all) = IMG_TRUE; \
+ (version) = IMG_TRUE; \
+ (lenmax) = IMG_TRUE; \
+ (bnc) = IMG_TRUE; \
+ (v) = IMG_TRUE; \
+
+#endif
+
+void rgx_bvnc_packed(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_CHAR *pszV, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+void rgx_bvnc_pack_hw(IMG_UINT64 *pui64OutBNC, IMG_CHAR *pszOutV, IMG_UINT32 ui32OutVMaxLen,
+ IMG_UINT32 ui32B, IMG_UINT32 ui32V, IMG_UINT32 ui32N, IMG_UINT32 ui32C);
+
+#endif /* __RGX_COMPAT_BVNC_H__ */
+
+/******************************************************************************
+ End of file (rgx_compat_bvnc.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_firmware_processor.h b/drivers/gpu/drm/img-rogue/1.10/rgx_firmware_processor.h
new file mode 100644
index 00000000000000..2b2daef3e561c0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_firmware_processor.h
@@ -0,0 +1,92 @@
+/*************************************************************************/ /*!
+@File rgx_firmware_processor.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform RGX
+@Description Generic include file for firmware processors (META and MIPS)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#if !defined(RGX_FIRMWARE_PROCESSOR_H)
+#define RGX_FIRMWARE_PROCESSOR_H
+
+#include "km/rgxdefs_km.h"
+
+#include "rgx_meta.h"
+#include "rgx_mips.h"
+
+/* Processor independent need to be defined here common for all processors */
+typedef enum
+{
+ FW_PERF_CONF_NONE = 0,
+ FW_PERF_CONF_ICACHE = 1,
+ FW_PERF_CONF_DCACHE = 2,
+ FW_PERF_CONF_POLLS = 3,
+ FW_PERF_CONF_CUSTOM_TIMER = 4,
+ FW_PERF_CONF_JTLB_INSTR = 5,
+ FW_PERF_CONF_INSTRUCTIONS = 6
+} FW_PERF_CONF;
+
+#if !defined(__KERNEL__)
+ #if defined(RGX_FEATURE_MIPS)
+
+ #define FW_CORE_ID_VALUE RGXMIPSFW_CORE_ID_VALUE
+ #define RGXFW_PROCESSOR RGXFW_PROCESSOR_MIPS
+
+ /* Firmware to host interrupts defines */
+ #define RGXFW_CR_IRQ_STATUS RGX_CR_MIPS_WRAPPER_IRQ_STATUS
+ #define RGXFW_CR_IRQ_STATUS_EVENT_EN RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN
+ #define RGXFW_CR_IRQ_CLEAR RGX_CR_MIPS_WRAPPER_IRQ_CLEAR
+ #define RGXFW_CR_IRQ_CLEAR_MASK RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN
+
+ #else
+
+ #define RGXFW_PROCESSOR RGXFW_PROCESSOR_META
+
+ /* Firmware to host interrupts defines */
+ #define RGXFW_CR_IRQ_STATUS RGX_CR_META_SP_MSLVIRQSTATUS
+ #define RGXFW_CR_IRQ_STATUS_EVENT_EN RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN
+ #define RGXFW_CR_IRQ_CLEAR RGX_CR_META_SP_MSLVIRQSTATUS
+ #define RGXFW_CR_IRQ_CLEAR_MASK (RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK & \
+ RGX_CR_META_SP_MSLVIRQSTATUS_MASKFULL)
+
+ #endif
+#endif
+
+#endif /* RGX_FIRMWARE_PROCESSOR_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif.h
new file mode 100644
index 00000000000000..d7cc3711ac5a56
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif.h
@@ -0,0 +1,562 @@
+/*************************************************************************/ /*!
+@File rgx_fwif.h
+@Title RGX firmware interface structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by srvinit and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_H__)
+#define __RGX_FWIF_H__
+
+#include "rgx_firmware_processor.h"
+#include "rgx_fwif_shared.h"
+
+/*************************************************************************/ /*!
+ Logging type
+*/ /**************************************************************************/
+#define RGXFWIF_LOG_TYPE_NONE 0x00000000
+#define RGXFWIF_LOG_TYPE_TRACE 0x00000001
+#define RGXFWIF_LOG_TYPE_GROUP_MAIN 0x00000002
+#define RGXFWIF_LOG_TYPE_GROUP_MTS 0x00000004
+#define RGXFWIF_LOG_TYPE_GROUP_CLEANUP 0x00000008
+#define RGXFWIF_LOG_TYPE_GROUP_CSW 0x00000010
+#define RGXFWIF_LOG_TYPE_GROUP_BIF 0x00000020
+#define RGXFWIF_LOG_TYPE_GROUP_PM 0x00000040
+#define RGXFWIF_LOG_TYPE_GROUP_RTD 0x00000080
+#define RGXFWIF_LOG_TYPE_GROUP_SPM 0x00000100
+#define RGXFWIF_LOG_TYPE_GROUP_POW 0x00000200
+#define RGXFWIF_LOG_TYPE_GROUP_HWR 0x00000400
+#define RGXFWIF_LOG_TYPE_GROUP_HWP 0x00000800
+#define RGXFWIF_LOG_TYPE_GROUP_RPM 0x00001000
+#define RGXFWIF_LOG_TYPE_GROUP_DMA 0x00002000
+#define RGXFWIF_LOG_TYPE_GROUP_MISC 0x00004000
+#define RGXFWIF_LOG_TYPE_GROUP_DEBUG 0x80000000
+#define RGXFWIF_LOG_TYPE_GROUP_MASK 0x80007FFE
+#define RGXFWIF_LOG_TYPE_MASK 0x80007FFF
+
+/* String used in pvrdebug -h output */
+#define RGXFWIF_LOG_GROUPS_STRING_LIST "main,mts,cleanup,csw,bif,pm,rtd,spm,pow,hwr,hwp,rpm,dma,misc,debug"
+
+/* Table entry to map log group strings to log type value */
+typedef struct {
+ const IMG_CHAR* pszLogGroupName;
+ IMG_UINT32 ui32LogGroupType;
+} RGXFWIF_LOG_GROUP_MAP_ENTRY;
+
+/*
+ Macro for use with the RGXFWIF_LOG_GROUP_MAP_ENTRY type to create a lookup
+ table where needed. Keep log group names short, no more than 20 chars.
+*/
+#define RGXFWIF_LOG_GROUP_NAME_VALUE_MAP { "none", RGXFWIF_LOG_TYPE_NONE }, \
+ { "main", RGXFWIF_LOG_TYPE_GROUP_MAIN }, \
+ { "mts", RGXFWIF_LOG_TYPE_GROUP_MTS }, \
+ { "cleanup", RGXFWIF_LOG_TYPE_GROUP_CLEANUP }, \
+ { "csw", RGXFWIF_LOG_TYPE_GROUP_CSW }, \
+ { "bif", RGXFWIF_LOG_TYPE_GROUP_BIF }, \
+ { "pm", RGXFWIF_LOG_TYPE_GROUP_PM }, \
+ { "rtd", RGXFWIF_LOG_TYPE_GROUP_RTD }, \
+ { "spm", RGXFWIF_LOG_TYPE_GROUP_SPM }, \
+ { "pow", RGXFWIF_LOG_TYPE_GROUP_POW }, \
+ { "hwr", RGXFWIF_LOG_TYPE_GROUP_HWR }, \
+ { "hwp", RGXFWIF_LOG_TYPE_GROUP_HWP }, \
+ { "rpm", RGXFWIF_LOG_TYPE_GROUP_RPM }, \
+ { "dma", RGXFWIF_LOG_TYPE_GROUP_DMA }, \
+ { "debug", RGXFWIF_LOG_TYPE_GROUP_DEBUG }
+
+
+/* Used in print statements to display log group state, one %s per group defined */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC "%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+
+/* Used in a print statement to display log group state, one per group */
+#define RGXFWIF_LOG_ENABLED_GROUPS_LIST(types) (((types) & RGXFWIF_LOG_TYPE_GROUP_MAIN) ?("main ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_MTS) ?("mts ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CLEANUP) ?("cleanup ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_CSW) ?("csw ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_BIF) ?("bif ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_PM) ?("pm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RTD) ?("rtd ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_SPM) ?("spm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_POW) ?("pow ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWR) ?("hwr ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_HWP) ?("hwp ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_RPM) ?("rpm ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DMA) ?("dma ") :("")), \
+ (((types) & RGXFWIF_LOG_TYPE_GROUP_DEBUG) ?("debug ") :(""))
+
+
+/*! Logging function */
+typedef void (*PFN_RGXFW_LOG) (const IMG_CHAR* pszFmt, ...);
+
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+#define RGXFW_SIG_BUFFER_SIZE_MIN (8192)
+
+/*!
+ ******************************************************************************
+ * HWPERF
+ *****************************************************************************/
+/* Size of the Firmware L1 HWPERF buffer in bytes (2MB). Accessed by the
+ * Firmware and host driver. */
+#define RGXFW_HWPERF_L1_SIZE_MIN (16U)
+#define RGXFW_HWPERF_L1_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFFWBUFSIZEINKB
+#define RGXFW_HWPERF_L1_SIZE_MAX (12288U)
+
+/* This padding value must always be large enough to hold the biggest
+ * variable sized packet. */
+#define RGXFW_HWPERF_L1_PADDING_DEFAULT (RGX_HWPERF_MAX_PACKET_SIZE)
+
+
+/*!
+ ******************************************************************************
+ * Trace Buffer
+ *****************************************************************************/
+
+/*! Number of elements on each line when dumping the trace buffer */
+#define RGXFW_TRACE_BUFFER_LINESIZE (30)
+
+/*! Total size of RGXFWIF_TRACEBUF dword (needs to be a multiple of RGXFW_TRACE_BUFFER_LINESIZE) */
+#define RGXFW_TRACE_BUFFER_SIZE (400*RGXFW_TRACE_BUFFER_LINESIZE)
+#define RGXFW_TRACE_BUFFER_ASSERT_SIZE 200
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+#define RGXFW_THREAD_NUM 2
+#else
+#define RGXFW_THREAD_NUM 1
+#endif
+
+#define RGXFW_POLL_TYPE_SET 0x80000000
+
+typedef struct _RGXFWIF_FILE_INFO_BUF_
+{
+ IMG_CHAR szPath[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_CHAR szInfo[RGXFW_TRACE_BUFFER_ASSERT_SIZE];
+ IMG_UINT32 ui32LineNum;
+} UNCACHED_ALIGN RGXFWIF_FILE_INFO_BUF;
+
+typedef struct _RGXFWIF_TRACEBUF_SPACE_
+{
+ IMG_UINT32 ui32TracePointer;
+
+#if defined (RGX_FIRMWARE)
+ IMG_UINT32 *pui32RGXFWIfTraceBuffer; /* To be used by firmware for writing into trace buffer */
+#else
+ RGXFWIF_DEV_VIRTADDR pui32RGXFWIfTraceBuffer;
+#endif
+ IMG_PUINT32 pui32TraceBuffer; /* To be used by host when reading from trace buffer */
+
+ RGXFWIF_FILE_INFO_BUF sAssertBuf;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF_SPACE;
+
+
+#define RGXFWIF_FWFAULTINFO_MAX (8) /* Total number of FW fault logs stored */
+
+typedef struct _RGX_FWFAULTINFO_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+ IMG_UINT32 RGXFW_ALIGN ui32Data;
+ RGXFWIF_FILE_INFO_BUF sFaultBuf;
+} UNCACHED_ALIGN RGX_FWFAULTINFO;
+
+
+#define RGXFWIF_POW_STATES \
+ X(RGXFWIF_POW_OFF) /* idle and handshaked with the host (ready to full power down) */ \
+ X(RGXFWIF_POW_ON) /* running HW commands */ \
+ X(RGXFWIF_POW_FORCED_IDLE) /* forced idle */ \
+ X(RGXFWIF_POW_IDLE) /* idle waiting for host handshake */
+
+typedef enum _RGXFWIF_POW_STATE_
+{
+#define X(NAME) NAME,
+ RGXFWIF_POW_STATES
+#undef X
+} RGXFWIF_POW_STATE;
+
+/* Firmware HWR states */
+#define RGXFWIF_HWR_HARDWARE_OK (0x1 << 0) /*!< The HW state is ok or locked up */
+#define RGXFWIF_HWR_ANALYSIS_DONE (0x1 << 2) /*!< The analysis of a GPU lockup has been performed */
+#define RGXFWIF_HWR_GENERAL_LOCKUP (0x1 << 3) /*!< A DM unrelated lockup has been detected */
+#define RGXFWIF_HWR_DM_RUNNING_OK (0x1 << 4) /*!< At least one DM is running without being close to a lockup */
+#define RGXFWIF_HWR_DM_STALLING (0x1 << 5) /*!< At least one DM is close to lockup */
+#define RGXFWIF_HWR_FW_FAULT (0x1 << 6) /*!< The FW has faulted and needs to restart */
+#define RGXFWIF_HWR_RESTART_REQUESTED (0x1 << 7) /*!< The FW has requested the host to restart it */
+typedef IMG_UINT32 RGXFWIF_HWR_STATEFLAGS;
+
+/* Firmware per-DM HWR states */
+#define RGXFWIF_DM_STATE_WORKING (0x00) /*!< DM is working if all flags are cleared */
+#define RGXFWIF_DM_STATE_READY_FOR_HWR (0x1 << 0) /*!< DM is idle and ready for HWR */
+#define RGXFWIF_DM_STATE_NEEDS_SKIP (0x1 << 2) /*!< DM need to skip to next cmd before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP (0x1 << 3) /*!< DM need partial render cleanup before resuming processing */
+#define RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR (0x1 << 4) /*!< DM need to increment Recovery Count once fully recovered */
+#define RGXFWIF_DM_STATE_GUILTY_LOCKUP (0x1 << 5) /*!< DM was identified as locking up and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_LOCKUP (0x1 << 6) /*!< DM was innocently affected by another lockup which caused HWR */
+#define RGXFWIF_DM_STATE_GUILTY_OVERRUNING (0x1 << 7) /*!< DM was identified as over-running and causing HWR */
+#define RGXFWIF_DM_STATE_INNOCENT_OVERRUNING (0x1 << 8) /*!< DM was innocently affected by another DM over-running which caused HWR */
+#define RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH (0x1 << 9) /*!< DM was forced into HWR as it delayed more important workloads */
+
+/* Per-OSid States */
+#define RGXFW_OS_STATE_ACTIVE_OS (1 << 0) /*!< Non active operating systems should not be served by the FW */
+#define RGXFW_OS_STATE_FREELIST_OK (1 << 1) /*!< Pending freelist reconstruction from that particular OS */
+#define RGXFW_OS_STATE_OFFLOADING (1 << 2) /*!< Transient state while all the OS resources in the FW are cleaned up */
+#define RGXFW_OS_STATE_GROW_REQUEST_PENDING (1 << 3) /*!< Signifies whether a request to grow a freelist is pending completion */
+
+typedef IMG_UINT32 RGXFWIF_HWR_RECOVERYFLAGS;
+
+typedef struct _RGXFWIF_TRACEBUF_
+{
+ IMG_UINT32 ui32LogType;
+ volatile RGXFWIF_POW_STATE ePowState;
+ RGXFWIF_TRACEBUF_SPACE sTraceBuf[RGXFW_THREAD_NUM];
+
+ IMG_UINT32 aui32HwrDmLockedUpCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmOverranCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmRecoveredCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 aui32HwrDmFalseDetectCount[RGXFWIF_DM_DEFAULT_MAX];
+ IMG_UINT32 ui32HwrCounter;
+
+ IMG_UINT32 aui32CrPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 aui32CrPollMask[RGXFW_THREAD_NUM];
+
+ RGXFWIF_HWR_STATEFLAGS ui32HWRStateFlags;
+ RGXFWIF_HWR_RECOVERYFLAGS aui32HWRRecoveryFlags[RGXFWIF_DM_DEFAULT_MAX];
+
+ volatile IMG_UINT32 ui32HWPerfRIdx;
+ volatile IMG_UINT32 ui32HWPerfWIdx;
+ volatile IMG_UINT32 ui32HWPerfWrapCount;
+ IMG_UINT32 ui32HWPerfSize; /* Constant after setup, needed in FW */
+ IMG_UINT32 ui32HWPerfDropCount; /* The number of times the FW drops a packet due to buffer full */
+
+ /* These next three items are only valid at runtime when the FW is built
+ * with RGX_HWPERF_UTILIZATION & RGX_HWPERF_DROP_TRACKING defined
+ * in rgxfw_hwperf.c */
+ IMG_UINT32 ui32HWPerfUt; /* Buffer utilisation, high watermark of bytes in use */
+ IMG_UINT32 ui32FirstDropOrdinal;/* The ordinal of the first packet the FW dropped */
+ IMG_UINT32 ui32LastDropOrdinal; /* The ordinal of the last packet the FW dropped */
+
+ volatile IMG_UINT32 aui32InterruptCount[RGXFW_THREAD_NUM]; /*!< Interrupt count from Threads > */
+ IMG_UINT32 ui32KCCBCmdsExecuted;
+ IMG_UINT64 RGXFW_ALIGN ui64StartIdleTime;
+ IMG_UINT32 ui32PowMonEnergy; /* Non-volatile power monitor energy count */
+
+#define RGXFWIF_MAX_PCX 16
+ IMG_UINT32 ui32T1PCX[RGXFWIF_MAX_PCX];
+ IMG_UINT32 ui32T1PCXWOff;
+
+ IMG_UINT32 ui32OSStateFlags[RGXFW_NUM_OS]; /*!< State flags for each Operating System > */
+
+ IMG_UINT32 ui32MMUFlushCounter;
+
+ RGX_FWFAULTINFO sFaultInfo[RGXFWIF_FWFAULTINFO_MAX];
+ IMG_UINT32 ui32FWFaults;
+} UNCACHED_ALIGN RGXFWIF_TRACEBUF;
+
+
+/*!
+ ******************************************************************************
+ * GPU Utilisation
+ *****************************************************************************/
+#define RGXFWIF_GPU_STATS_MAX_VALUE_OF_STATE 10000
+
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW (0U)
+#define RGXFWIF_GPU_UTIL_STATE_IDLE (1U)
+#define RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH (2U)
+#define RGXFWIF_GPU_UTIL_STATE_BLOCKED (3U)
+#define RGXFWIF_GPU_UTIL_STATE_NUM (4U)
+
+#define RGXFWIF_GPU_UTIL_TIME_MASK IMG_UINT64_C(0xFFFFFFFFFFFFFFFC)
+#define RGXFWIF_GPU_UTIL_STATE_MASK IMG_UINT64_C(0x0000000000000003)
+
+#define RGXFWIF_GPU_UTIL_GET_TIME(word) ((word) & RGXFWIF_GPU_UTIL_TIME_MASK)
+#define RGXFWIF_GPU_UTIL_GET_STATE(word) ((word) & RGXFWIF_GPU_UTIL_STATE_MASK)
+
+/* The OS timestamps computed by the FW are approximations of the real time,
+ * which means they could be slightly behind or ahead the real timer on the Host.
+ * In some cases we can perform subtractions between FW approximated
+ * timestamps and real OS timestamps, so we need a form of protection against
+ * negative results if for instance the FW one is a bit ahead of time.
+ */
+#define RGXFWIF_GPU_UTIL_GET_PERIOD(newtime,oldtime) \
+ ((newtime) > (oldtime) ? ((newtime) - (oldtime)) : 0)
+
+#define RGXFWIF_GPU_UTIL_MAKE_WORD(time,state) \
+ (RGXFWIF_GPU_UTIL_GET_TIME(time) | RGXFWIF_GPU_UTIL_GET_STATE(state))
+
+
+/* The timer correlation array must be big enough to ensure old entries won't be
+ * overwritten before all the HWPerf events linked to those entries are processed
+ * by the MISR. The update frequency of this array depends on how fast the system
+ * can change state (basically how small the APM latency is) and perform DVFS transitions.
+ *
+ * The minimum size is 2 (not 1) to avoid race conditions between the FW reading
+ * an entry while the Host is updating it. With 2 entries in the worst case the FW
+ * will read old data, which is still quite ok if the Host is updating the timer
+ * correlation at that time.
+ */
+#define RGXFWIF_TIME_CORR_ARRAY_SIZE 256
+#define RGXFWIF_TIME_CORR_CURR_INDEX(seqcount) ((seqcount) % RGXFWIF_TIME_CORR_ARRAY_SIZE)
+
+/* Make sure the timer correlation array size is a power of 2 */
+static_assert((RGXFWIF_TIME_CORR_ARRAY_SIZE & (RGXFWIF_TIME_CORR_ARRAY_SIZE - 1)) == 0,
+ "RGXFWIF_TIME_CORR_ARRAY_SIZE must be a power of two");
+
+typedef struct _RGXFWIF_GPU_UTIL_FWCB_
+{
+ RGXFWIF_TIME_CORR sTimeCorr[RGXFWIF_TIME_CORR_ARRAY_SIZE];
+ IMG_UINT32 ui32TimeCorrSeqCount;
+
+ /* Last GPU state + OS time of the last state update */
+ IMG_UINT64 RGXFW_ALIGN ui64LastWord;
+
+ /* Counters for the amount of time the GPU was active/idle/blocked */
+ IMG_UINT64 RGXFW_ALIGN aui64StatsCounters[RGXFWIF_GPU_UTIL_STATE_NUM];
+} UNCACHED_ALIGN RGXFWIF_GPU_UTIL_FWCB;
+
+
+/*!
+ ******************************************************************************
+ * HWR Data
+ *****************************************************************************/
+typedef enum _RGX_HWRTYPE_
+{
+ RGX_HWRTYPE_UNKNOWNFAILURE = 0,
+ RGX_HWRTYPE_OVERRUN = 1,
+ RGX_HWRTYPE_POLLFAILURE = 2,
+ RGX_HWRTYPE_BIF0FAULT = 3,
+ RGX_HWRTYPE_BIF1FAULT = 4,
+ RGX_HWRTYPE_TEXASBIF0FAULT = 5,
+ RGX_HWRTYPE_DPXMMUFAULT = 6,
+ RGX_HWRTYPE_MMUFAULT = 7,
+ RGX_HWRTYPE_MMUMETAFAULT = 8,
+} RGX_HWRTYPE;
+
+#define RGXFWIF_HWRTYPE_BIF_BANK_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT) ? 0 : 1 )
+
+#define RGXFWIF_HWRTYPE_PAGE_FAULT_GET(eHWRType) ((eHWRType == RGX_HWRTYPE_BIF0FAULT || \
+ eHWRType == RGX_HWRTYPE_BIF1FAULT || \
+ eHWRType == RGX_HWRTYPE_TEXASBIF0FAULT || \
+ eHWRType == RGX_HWRTYPE_MMUFAULT || \
+ eHWRType == RGX_HWRTYPE_MMUMETAFAULT) ? 1 : 0 )
+
+typedef struct _RGX_BIFINFO_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64BIFReqStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64BIFMMUStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_BIFINFO;
+
+typedef struct _RGX_MMUINFO_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64MMUStatus;
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< phys address of the page catalogue */
+} RGX_MMUINFO;
+
+typedef struct _RGX_POLLINFO_
+{
+ IMG_UINT32 ui32ThreadNum;
+ IMG_UINT32 ui32CrPollAddr;
+ IMG_UINT32 ui32CrPollMask;
+ IMG_UINT32 ui32CrPollLastValue;
+} UNCACHED_ALIGN RGX_POLLINFO;
+
+typedef struct _RGX_HWRINFO_
+{
+ union
+ {
+ RGX_BIFINFO sBIFInfo;
+ RGX_MMUINFO sMMUInfo;
+ RGX_POLLINFO sPollInfo;
+ } uHWRData;
+
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimer;
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimer;
+ IMG_UINT32 ui32FrameNum;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ActiveHWRTData;
+ IMG_UINT32 ui32HWRNumber;
+ IMG_UINT32 ui32EventStatus;
+ IMG_UINT32 ui32HWRRecoveryFlags;
+ RGX_HWRTYPE eHWRType;
+ RGXFWIF_DM eDM;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeOfKick;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetStart;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeHWResetFinish;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeFreelistReady;
+} UNCACHED_ALIGN RGX_HWRINFO;
+
+#define RGXFWIF_HWINFO_MAX_FIRST 8 /* Number of first HWR logs recorded (never overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX_LAST 8 /* Number of latest HWR logs (older logs are overwritten by newer logs) */
+#define RGXFWIF_HWINFO_MAX (RGXFWIF_HWINFO_MAX_FIRST + RGXFWIF_HWINFO_MAX_LAST) /* Total number of HWR logs stored in a buffer */
+#define RGXFWIF_HWINFO_LAST_INDEX (RGXFWIF_HWINFO_MAX - 1) /* Index of the last log in the HWR log buffer */
+typedef struct _RGXFWIF_HWRINFOBUF_
+{
+ RGX_HWRINFO sHWRInfo[RGXFWIF_HWINFO_MAX];
+
+ IMG_UINT32 ui32FirstCrPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32FirstCrPollMask[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32FirstCrPollLastValue[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32WriteIndex;
+ IMG_UINT32 ui32DDReqCount;
+} UNCACHED_ALIGN RGXFWIF_HWRINFOBUF;
+
+
+#define RGXFWIF_CTXSWITCH_PROFILE_FAST_EN (1)
+#define RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN (2)
+#define RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN (3)
+#define RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN (4)
+
+/*!
+ ******************************************************************************
+ * RGX firmware Init Config Data
+ *****************************************************************************/
+#define RGXFWIF_INICFG_CTXSWITCH_TA_EN (0x1 << 0)
+#define RGXFWIF_INICFG_CTXSWITCH_3D_EN (0x1 << 1)
+#define RGXFWIF_INICFG_CTXSWITCH_CDM_EN (0x1 << 2)
+#define RGXFWIF_INICFG_CTXSWITCH_MODE_RAND (0x1 << 3)
+#define RGXFWIF_INICFG_CTXSWITCH_SRESET_EN (0x1 << 4)
+#define RGXFWIF_INICFG_USE_EXTENDED (0x1 << 5)
+#define RGXFWIF_INICFG_POW_RASCALDUST (0x1 << 6)
+#define RGXFWIF_INICFG_HWPERF_EN (0x1 << 7)
+#define RGXFWIF_INICFG_HWR_EN (0x1 << 8)
+#define RGXFWIF_INICFG_CHECK_MLIST_EN (0x1 << 9)
+#define RGXFWIF_INICFG_DISABLE_CLKGATING_EN (0x1 << 10)
+#define RGXFWIF_INICFG_POLL_COUNTERS_EN (0x1 << 11)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INDEX << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_INSTANCE << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST (RGX_CR_VDM_CONTEXT_STORE_MODE_MODE_LIST << 12)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK (0xFFFFCFFFU)
+#define RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT (12)
+#define RGXFWIF_INICFG_SHG_BYPASS_EN (0x1 << 14)
+/*
+ * #define RGXFWIF_INICFG_RTU_BYPASS_EN (0x1 << 15)
+ * Removed this obsolete flag from DDK.
+*/
+#define RGXFWIF_INICFG_REGCONFIG_EN (0x1 << 16)
+#define RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY (0x1 << 17)
+#define RGXFWIF_INICFG_HWP_DISABLE_FILTER (0x1 << 18)
+#define RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN (0x1 << 19)
+#define RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN (0x1 << 20)
+#define RGXFWIF_INICFG_DISABLE_DM_OVERLAP (0x1 << 21)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT (22)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST (RGXFWIF_CTXSWITCH_PROFILE_FAST_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM (RGXFWIF_CTXSWITCH_PROFILE_MEDIUM_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW (RGXFWIF_CTXSWITCH_PROFILE_SLOW_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY (RGXFWIF_CTXSWITCH_PROFILE_NODELAY_EN << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK (0x7 << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT)
+#define RGXFWIF_INICFG_METAT1_SHIFT (25)
+#define RGXFWIF_INICFG_METAT1_MAIN (RGX_META_T1_MAIN << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_DUMMY (RGX_META_T1_DUMMY << RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_METAT1_ENABLED (RGXFWIF_INICFG_METAT1_MAIN | RGXFWIF_INICFG_METAT1_DUMMY)
+#define RGXFWIF_INICFG_METAT1_MASK (RGXFWIF_INICFG_METAT1_ENABLED >> RGXFWIF_INICFG_METAT1_SHIFT)
+#define RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER (0x1 << 27)
+#define RGXFWIF_INICFG_WORKEST_V1 (0x1 << 28)
+#define RGXFWIF_INICFG_WORKEST_V2 (0x1 << 29)
+#define RGXFWIF_INICFG_PDVFS_V1 (0x1 << 30)
+#define RGXFWIF_INICFG_PDVFS_V2 (0x1 << 31)
+#define RGXFWIF_INICFG_ALL (0xFFFFFFFFU)
+
+#define RGXFWIF_INICFG_EXT_USE_EXTENDED (0x1 << 31)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM (0x1 << 0)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA (0x1 << 1)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D (0x1 << 2)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM (0x1 << 3)
+#define RGXFWIF_INICFG_EXT_LOW_PRIO_CS_SHG (0x1 << 4)
+#define RGXFWIF_INICFG_EXT_TRACEBUF_FIELD (0x1 << 5) /*!< The tracebuffer and HWRInfoBufCtl fields are included in the OSConfig Struct > */
+#define RGXFWIF_INICFG_EXT_HWPERF_FEATURE_FLAGS (0x1 << 6) /*!< The BvncKmFeatureFlags are included in the init struct > */
+
+#define RGXFWIF_SRVCFG_DISABLE_PDP_EN (0x1 << 31)
+#define RGXFWIF_SRVCFG_ALL (0x80000000U)
+#define RGXFWIF_FILTCFG_TRUNCATE_HALF (0x1 << 3)
+#define RGXFWIF_FILTCFG_TRUNCATE_INT (0x1 << 2)
+#define RGXFWIF_FILTCFG_NEW_FILTER_MODE (0x1 << 1)
+
+#define RGXFWIF_INICFG_CTXSWITCH_DM_ALL (RGXFWIF_INICFG_CTXSWITCH_TA_EN | \
+ RGXFWIF_INICFG_CTXSWITCH_3D_EN | \
+ RGXFWIF_INICFG_CTXSWITCH_CDM_EN)
+
+#define RGXFWIF_INICFG_CTXSWITCH_CLRMSK ~(RGXFWIF_INICFG_CTXSWITCH_DM_ALL | \
+ RGXFWIF_INICFG_CTXSWITCH_MODE_RAND | \
+ RGXFWIF_INICFG_CTXSWITCH_SRESET_EN)
+
+typedef enum
+{
+ RGX_ACTIVEPM_FORCE_OFF = 0,
+ RGX_ACTIVEPM_FORCE_ON = 1,
+ RGX_ACTIVEPM_DEFAULT = 2
+} RGX_ACTIVEPM_CONF;
+
+typedef enum
+{
+ RGX_RD_POWER_ISLAND_FORCE_OFF = 0,
+ RGX_RD_POWER_ISLAND_FORCE_ON = 1,
+ RGX_RD_POWER_ISLAND_DEFAULT = 2
+} RGX_RD_POWER_ISLAND_CONF;
+
+typedef enum
+{
+ RGX_META_T1_OFF = 0x0, /*!< No thread 1 running (unless 2nd thread is used for HWPerf) */
+ RGX_META_T1_MAIN = 0x1, /*!< Run the main thread 0 code on thread 1 (and vice versa if 2nd thread is used for HWPerf) */
+ RGX_META_T1_DUMMY = 0x2 /*!< Run dummy test code on thread 1 */
+} RGX_META_T1_CONF;
+
+/*!
+ ******************************************************************************
+ * Querying DM state
+ *****************************************************************************/
+
+typedef enum _RGXFWIF_DM_STATE_
+{
+ RGXFWIF_DM_STATE_NORMAL = 0,
+ RGXFWIF_DM_STATE_LOCKEDUP = 1
+} RGXFWIF_DM_STATE;
+
+typedef struct
+{
+ IMG_UINT16 ui16RegNum; /*!< Register number */
+ IMG_UINT16 ui16IndirectRegNum; /*!< Indirect register number (or 0 if not used) */
+ IMG_UINT16 ui16IndirectStartVal; /*!< Start value for indirect register */
+ IMG_UINT16 ui16IndirectEndVal; /*!< End value for indirect register */
+} RGXFW_REGISTER_LIST;
+
+#endif /* __RGX_FWIF_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_alignchecks.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_alignchecks.h
new file mode 100644
index 00000000000000..2786b89f79a012
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_alignchecks.h
@@ -0,0 +1,194 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX fw interface alignment checks
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Checks to avoid disalignment in RGX fw data structures
+ shared with the host
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_ALIGNCHECKS_H__)
+#define __RGX_FWIF_ALIGNCHECKS_H__
+
+/* for the offsetof macro */
+#if defined(__KERNEL__) && defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+/*!
+ ******************************************************************************
+ * Alignment UM/FW checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_UM_MAX 128
+
+#define RGXFW_ALIGN_CHECKS_INIT0 \
+ sizeof(RGXFWIF_TRACEBUF), \
+ offsetof(RGXFWIF_TRACEBUF, ui32LogType), \
+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmLockedUpCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmOverranCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmRecoveredCount), \
+ offsetof(RGXFWIF_TRACEBUF, aui32HwrDmFalseDetectCount), \
+ \
+ /* RGXFWIF_CMDTA checks */ \
+ sizeof(RGXFWIF_CMDTA), \
+ offsetof(RGXFWIF_CMDTA, sTARegs), \
+ \
+ /* RGXFWIF_CMD3D checks */ \
+ sizeof(RGXFWIF_CMD3D), \
+ offsetof(RGXFWIF_CMD3D, s3DRegs), \
+ \
+ /* RGXFWIF_CMDTRANSFER checks */ \
+ sizeof(RGXFWIF_CMDTRANSFER), \
+ offsetof(RGXFWIF_CMDTRANSFER, sTransRegs), \
+ \
+ \
+ /* RGXFWIF_CMD_COMPUTE checks */ \
+ sizeof(RGXFWIF_CMD_COMPUTE), \
+ offsetof(RGXFWIF_CMD_COMPUTE, sCDMRegs), \
+ \
+ sizeof(RGXFWIF_FREELIST), \
+ offsetof(RGXFWIF_FREELIST, psFreeListDevVAddr),\
+ offsetof(RGXFWIF_FREELIST, ui32MaxPages),\
+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),\
+ offsetof(RGXFWIF_FREELIST, ui32HWRCounter),\
+ \
+ sizeof(RGXFWIF_RENDER_TARGET),\
+ offsetof(RGXFWIF_RENDER_TARGET, psVHeapTableDevVAddr), \
+ \
+ sizeof(RGXFWIF_HWRTDATA), \
+ offsetof(RGXFWIF_HWRTDATA, psPMMListDevVAddr), \
+ offsetof(RGXFWIF_HWRTDATA, apsFreeLists),\
+ offsetof(RGXFWIF_HWRTDATA, ui64VCECatBase), \
+ offsetof(RGXFWIF_HWRTDATA, psParentRenderTarget), \
+ offsetof(RGXFWIF_HWRTDATA, eState), \
+ offsetof(RGXFWIF_HWRTDATA, ui32NumPartialRenders), \
+ \
+ sizeof(RGXFWIF_HWPERF_CTL_BLK), \
+ offsetof(RGXFWIF_HWPERF_CTL_BLK, aui64CounterCfg), \
+ \
+ sizeof(RGXFWIF_REGISTER_GUESTOS_OFFSETS), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, ui32OSid), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCBCtl), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sKCCB), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCBCtl), \
+ offsetof(RGXFWIF_REGISTER_GUESTOS_OFFSETS, sFirmwareCCB), \
+\
+ sizeof(RGXFWIF_HWPERF_CTL), \
+ offsetof(RGXFWIF_HWPERF_CTL, SelCntr)
+
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+#define RGXFW_ALIGN_CHECKS_INIT1 \
+ RGXFW_ALIGN_CHECKS_INIT0, \
+ sizeof(RGXFWIF_RPM_FREELIST), \
+ offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32MaxPages), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages), \
+ offsetof(RGXFWIF_RPM_FREELIST, ui32GrowPages)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT1 RGXFW_ALIGN_CHECKS_INIT0
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+
+#if defined(RGX_FEATURE_TLA)
+#define RGXFW_ALIGN_CHECKS_INIT2 \
+ RGXFW_ALIGN_CHECKS_INIT1, \
+ /* RGXFWIF_CMD2D checks */ \
+ sizeof(RGXFWIF_CMD2D), \
+ offsetof(RGXFWIF_CMD2D, s2DRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT2 RGXFW_ALIGN_CHECKS_INIT1
+#endif /* RGX_FEATURE_TLA */
+
+
+#if defined(RGX_FEATURE_FASTRENDER_DM)
+#define RGXFW_ALIGN_CHECKS_INIT \
+ RGXFW_ALIGN_CHECKS_INIT2, \
+ /* RGXFWIF_CMDTDM checks */ \
+ sizeof(RGXFWIF_CMDTDM), \
+ offsetof(RGXFWIF_CMDTDM, sTDMRegs)
+#else
+#define RGXFW_ALIGN_CHECKS_INIT RGXFW_ALIGN_CHECKS_INIT2
+#endif /* ! RGX_FEATURE_FASTRENDER_DM */
+
+
+
+/*!
+ ******************************************************************************
+ * Alignment KM checks array
+ *****************************************************************************/
+
+#define RGXFW_ALIGN_CHECKS_INIT_KM \
+ sizeof(RGXFWIF_INIT), \
+ offsetof(RGXFWIF_INIT, sFaultPhysAddr), \
+ offsetof(RGXFWIF_INIT, sPDSExecBase), \
+ offsetof(RGXFWIF_INIT, sUSCExecBase), \
+ offsetof(RGXFWIF_INIT, psKernelCCBCtl), \
+ offsetof(RGXFWIF_INIT, psKernelCCB), \
+ offsetof(RGXFWIF_INIT, psFirmwareCCBCtl), \
+ offsetof(RGXFWIF_INIT, psFirmwareCCB), \
+ offsetof(RGXFWIF_INIT, asSigBufCtl), \
+ offsetof(RGXFWIF_INIT, sTraceBufCtl), \
+ offsetof(RGXFWIF_INIT, sRGXCompChecks), \
+ \
+ /* RGXFWIF_FWRENDERCONTEXT checks */ \
+ sizeof(RGXFWIF_FWRENDERCONTEXT), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext), \
+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext), \
+ \
+ sizeof(RGXFWIF_FWCOMMONCONTEXT), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psFWMemContext), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, sRunNode), \
+ offsetof(RGXFWIF_FWCOMMONCONTEXT, psCCB), \
+ \
+ sizeof(RGXFWIF_MMUCACHEDATA), \
+ offsetof(RGXFWIF_MMUCACHEDATA,sMemoryContext), \
+ offsetof(RGXFWIF_MMUCACHEDATA,ui32Flags), \
+ offsetof(RGXFWIF_MMUCACHEDATA,sMMUCacheSync), \
+ offsetof(RGXFWIF_MMUCACHEDATA,ui16MMUCacheSyncUpdateValue)
+
+
+#endif /* __RGX_FWIF_ALIGNCHECKS_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_alignchecks.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_hwperf.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_hwperf.h
new file mode 100644
index 00000000000000..155e44a3a32b57
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_hwperf.h
@@ -0,0 +1,242 @@
+/*************************************************************************/ /*!
+@File rgx_fwif_hwperf.h
+@Title RGX HWPerf support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared header between RGX firmware and Init process
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_HWPERF_H
+#define RGX_FWIF_HWPERF_H
+
+#include "rgx_fwif_shared.h"
+#include "rgx_hwperf.h"
+#include "rgxdefs_km.h"
+
+
+/*****************************************************************************/
+
+/* Structure to hold a block's parameters for passing between the BG context
+ * and the IRQ context when applying a configuration request. */
+typedef struct _RGXFWIF_HWPERF_CTL_BLK_
+{
+ IMG_BOOL bValid;
+ IMG_BOOL bEnabled;
+ IMG_UINT32 eBlockID;
+ IMG_UINT32 uiCounterMask;
+ IMG_UINT64 RGXFW_ALIGN aui64CounterCfg[RGX_CNTBLK_COUNTERS_MAX];
+} RGXFWIF_HWPERF_CTL_BLK;
+
+/* Structure used to hold the configuration of the non-mux counters blocks */
+typedef struct _RGXFW_HWPERF_SELECT_
+{
+ IMG_UINT32 ui32NumSelectedCounters;
+ IMG_UINT32 aui32SelectedCountersIDs[RGX_HWPERF_MAX_CUSTOM_CNTRS];
+} RGXFW_HWPERF_SELECT;
+
+/* Structure to hold the whole configuration request details for all blocks
+ * The block masks and counts are used to optimise reading of this data. */
+typedef struct _RGXFWIF_HWPERF_CTL_
+{
+ IMG_BOOL bResetOrdinal;
+
+ IMG_UINT32 ui32SelectedCountersBlockMask;
+ RGXFW_HWPERF_SELECT RGXFW_ALIGN SelCntr[RGX_HWPERF_MAX_CUSTOM_BLKS];
+
+ IMG_UINT32 ui32EnabledBlksCount;
+ RGXFWIF_HWPERF_CTL_BLK RGXFW_ALIGN sBlkCfg[RGX_HWPERF_MAX_DEFINED_BLKS];
+} UNCACHED_ALIGN RGXFWIF_HWPERF_CTL;
+
+/* NOTE: The switch statement in this function must be kept in alignment with
+ * the enumeration RGX_HWPERF_CNTBLK_ID defined in rgx_hwperf.h. ASSERTs may
+ * result if not.
+ * The function provides a hash lookup to get a handle on the global store for
+ * a block's configuration store from it's block ID.
+ */
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(rgxfw_hwperf_get_block_ctl)
+#endif
+static INLINE RGXFWIF_HWPERF_CTL_BLK* rgxfw_hwperf_get_block_ctl(
+ RGX_HWPERF_CNTBLK_ID eBlockID, RGXFWIF_HWPERF_CTL *psHWPerfInitData)
+{
+ IMG_INT32 i32Idx = -1;
+
+ /* Hash the block ID into a control configuration array index */
+ switch(eBlockID)
+ {
+ case RGX_CNTBLK_ID_TA:
+ case RGX_CNTBLK_ID_RASTER:
+ case RGX_CNTBLK_ID_HUB:
+ case RGX_CNTBLK_ID_TORNADO:
+ case RGX_CNTBLK_ID_JONES:
+ case RGX_CNTBLK_ID_BF:
+ case RGX_CNTBLK_ID_BT:
+ case RGX_CNTBLK_ID_RT:
+ case RGX_CNTBLK_ID_SH:
+ {
+ i32Idx = eBlockID;
+ break;
+ }
+ case RGX_CNTBLK_ID_TPU_MCU0:
+ case RGX_CNTBLK_ID_TPU_MCU1:
+ case RGX_CNTBLK_ID_TPU_MCU2:
+ case RGX_CNTBLK_ID_TPU_MCU3:
+ case RGX_CNTBLK_ID_TPU_MCU4:
+ case RGX_CNTBLK_ID_TPU_MCU5:
+ case RGX_CNTBLK_ID_TPU_MCU6:
+ case RGX_CNTBLK_ID_TPU_MCU7:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_USC0:
+ case RGX_CNTBLK_ID_USC1:
+ case RGX_CNTBLK_ID_USC2:
+ case RGX_CNTBLK_ID_USC3:
+ case RGX_CNTBLK_ID_USC4:
+ case RGX_CNTBLK_ID_USC5:
+ case RGX_CNTBLK_ID_USC6:
+ case RGX_CNTBLK_ID_USC7:
+ case RGX_CNTBLK_ID_USC8:
+ case RGX_CNTBLK_ID_USC9:
+ case RGX_CNTBLK_ID_USC10:
+ case RGX_CNTBLK_ID_USC11:
+ case RGX_CNTBLK_ID_USC12:
+ case RGX_CNTBLK_ID_USC13:
+ case RGX_CNTBLK_ID_USC14:
+ case RGX_CNTBLK_ID_USC15:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_TEXAS0:
+ case RGX_CNTBLK_ID_TEXAS1:
+ case RGX_CNTBLK_ID_TEXAS2:
+ case RGX_CNTBLK_ID_TEXAS3:
+ case RGX_CNTBLK_ID_TEXAS4:
+ case RGX_CNTBLK_ID_TEXAS5:
+ case RGX_CNTBLK_ID_TEXAS6:
+ case RGX_CNTBLK_ID_TEXAS7:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_RASTER0:
+ case RGX_CNTBLK_ID_RASTER1:
+ case RGX_CNTBLK_ID_RASTER2:
+ case RGX_CNTBLK_ID_RASTER3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_BLACKPEARL0:
+ case RGX_CNTBLK_ID_BLACKPEARL1:
+ case RGX_CNTBLK_ID_BLACKPEARL2:
+ case RGX_CNTBLK_ID_BLACKPEARL3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_PBE0:
+ case RGX_CNTBLK_ID_PBE1:
+ case RGX_CNTBLK_ID_PBE2:
+ case RGX_CNTBLK_ID_PBE3:
+ case RGX_CNTBLK_ID_PBE4:
+ case RGX_CNTBLK_ID_PBE5:
+ case RGX_CNTBLK_ID_PBE6:
+ case RGX_CNTBLK_ID_PBE7:
+ case RGX_CNTBLK_ID_PBE8:
+ case RGX_CNTBLK_ID_PBE9:
+ case RGX_CNTBLK_ID_PBE10:
+ case RGX_CNTBLK_ID_PBE11:
+ case RGX_CNTBLK_ID_PBE12:
+ case RGX_CNTBLK_ID_PBE13:
+ case RGX_CNTBLK_ID_PBE14:
+ case RGX_CNTBLK_ID_PBE15:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ case RGX_CNTBLK_ID_BX_TU0:
+ case RGX_CNTBLK_ID_BX_TU1:
+ case RGX_CNTBLK_ID_BX_TU2:
+ case RGX_CNTBLK_ID_BX_TU3:
+ {
+ i32Idx = RGX_CNTBLK_ID_DIRECT_LAST +
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15) +
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7) +
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3) +
+ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15) +
+ (eBlockID & RGX_CNTBLK_ID_UNIT_MASK);
+ break;
+ }
+ default:
+ {
+ return NULL;
+ }
+ }
+ if ((i32Idx < 0) || (i32Idx >= RGX_HWPERF_MAX_DEFINED_BLKS))
+ {
+ return NULL;
+ }
+ return &psHWPerfInitData->sBlkCfg[i32Idx];
+}
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_km.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_km.h
new file mode 100644
index 00000000000000..223a409b47db86
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_km.h
@@ -0,0 +1,1090 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures used by pvrsrvkm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by pvrsrvkm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_KM_H__)
+#define __RGX_FWIF_KM_H__
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdefs_km.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "rgx_firmware_processor.h"
+
+#if !defined(ALIGN)
+#define ALIGN(val, align) (((val) + ((align) - 1)) & ~((align) - 1))
+#endif
+
+#if defined(RGX_FIRMWARE)
+typedef DLLIST_NODE RGXFWIF_DLLIST_NODE;
+#else
+typedef struct {RGXFWIF_DEV_VIRTADDR p;
+ RGXFWIF_DEV_VIRTADDR n;} RGXFWIF_DLLIST_NODE;
+#endif
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_SIGBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TRACEBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TBIBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERFBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRINFOBUF;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RUNTIME_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_GPU_UTIL_FWCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_REG_CFG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWPERF_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_CONFIG_CNTBLK;
+typedef RGXFWIF_DEV_VIRTADDR PRGX_HWPERF_SELECT_CUSTOM_CNTRS;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWMEMCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FWCOMMONCONTEXT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ZSBUFFER;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_INIT;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMMONCTX_STATE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RF_CMD;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COMPCHECKS;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_ALIGNCHECK;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CORE_CLK_RATE;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_OS_CONFIG;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_COUNTERBUFFER;
+
+/*!
+ * This number is used to represent an invalid page catalogue physical address
+ */
+#define RGXFWIF_INVALID_PC_PHYADDR 0xFFFFFFFFFFFFFFFFLLU
+
+/*!
+ Firmware memory context.
+*/
+typedef struct _RGXFWIF_FWMEMCONTEXT_
+{
+ IMG_DEV_PHYADDR RGXFW_ALIGN sPCDevPAddr; /*!< device physical address of context's page catalogue */
+ IMG_INT32 uiPageCatBaseRegID; /*!< associated page catalog base register (-1 == unallocated) */
+ IMG_UINT32 uiBreakpointAddr; /*!< breakpoint address */
+ IMG_UINT32 uiBPHandlerAddr; /*!< breakpoint handler address */
+ IMG_UINT32 uiBreakpointCtl; /*!< DM and enable control for BP */
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 ui32OSid;
+ IMG_BOOL bOSidAxiProt;
+#endif
+
+} UNCACHED_ALIGN RGXFWIF_FWMEMCONTEXT;
+
+/*!
+ * FW context state flags
+ */
+#define RGXFWIF_CONTEXT_TAFLAGS_NEED_RESUME (0x00000001)
+#define RGXFWIF_CONTEXT_RENDERFLAGS_NEED_RESUME (0x00000002)
+#define RGXFWIF_CONTEXT_CDMFLAGS_NEED_RESUME (0x00000004)
+#define RGXFWIF_CONTEXT_SHGFLAGS_NEED_RESUME (0x00000008)
+#define RGXFWIF_CONTEXT_TDMFLAGS_CONTEXT_STORED (0x00000010)
+#define RGXFWIF_CONTEXT_ALLFLAGS_NEED_RESUME (0x0000001F)
+
+/*
+ * Fast scale blit renders can be divided into smaller slices.
+ * The maximum screen size is 8192x8192 pixels or 256x256 tiles.
+ * The blit is sliced into 512x512 pixel blits or 16x16 tiles.
+ * Therefore, there are at most 256 slices of 16x16 tiles, which
+ * means we need 8bits to count up to which slice we have
+ * blitted so far.
+ */
+#define RGXFWIF_CONTEXT_SLICE_BLIT_X_MASK (0x00000F00)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_X_SHIFT (8)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_MASK (0x0000F000)
+#define RGXFWIF_CONTEXT_SLICE_BLIT_Y_SHIFT (12)
+
+typedef struct _RGXFWIF_TACTX_STATE_
+{
+ /* FW-accessible TA state which must be written out to memory on context store */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER; /* To store in mid-TA */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_CALL_STACK_POINTER_Init; /* Initial value (in case is 'lost' due to a lock-up */
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VDM_BATCH;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM0;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM1;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM2;
+ IMG_UINT64 RGXFW_ALIGN uTAReg_VBS_SO_PRIM3;
+ IMG_UINT16 RGXFW_ALIGN ui16TACurrentIdx;
+} UNCACHED_ALIGN RGXFWIF_TACTX_STATE;
+
+typedef struct _RGXFWIF_3DCTX_STATE_
+{
+ /* FW-accessible ISP state which must be written out to memory on context store */
+ IMG_UINT64 RGXFW_ALIGN u3DReg_PM_DEALLOCATED_MASK_STATUS;
+ IMG_UINT64 RGXFW_ALIGN u3DReg_PM_PDS_MTILEFREE_STATUS;
+ /*au3DReg_ISP_STORE should be the last element of the structure
+ * as this is an array whose size is determined at runtime
+ * after detecting the RGX core */
+ IMG_UINT32 RGXFW_ALIGN au3DReg_ISP_STORE[];
+} UNCACHED_ALIGN RGXFWIF_3DCTX_STATE;
+
+typedef struct _RGXFWIF_COMPUTECTX_STATE_
+{
+ IMG_BOOL RGXFW_ALIGN bBufferB;
+} RGXFWIF_COMPUTECTX_STATE;
+
+
+typedef struct _RGXFWIF_VRDMCTX_STATE_
+{
+ /* FW-accessible TA state which must be written out to memory on context store */
+ IMG_UINT64 RGXFW_ALIGN uVRDMReg_VRM_CALL_STACK_POINTER;
+ IMG_UINT64 RGXFW_ALIGN uVRDMReg_VRM_BATCH;
+} UNCACHED_ALIGN RGXFWIF_VRDMCTX_STATE;
+
+
+typedef struct _RGXFWIF_FWCOMMONCONTEXT_
+{
+ /*
+ Used by bg and irq context
+ */
+ /* CCB details for this firmware context */
+ PRGXFWIF_CCCB_CTL psCCBCtl; /*!< CCB control */
+ PRGXFWIF_CCCB psCCB; /*!< CCB base */
+ RGXFWIF_DMA_ADDR sCCBMetaDMAAddr;
+
+ /*
+ Used by the bg context only
+ */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sWaitingNode; /*!< List entry for the waiting list */
+ RGXFWIF_UFO sLastFailedUFO; /*!< UFO that last failed (or NULL) */
+
+ /*
+ Used by the irq context only
+ */
+ RGXFWIF_DLLIST_NODE sRunNode; /*!< List entry for the run list */
+
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+
+ /* Context suspend state */
+ PRGXFWIF_COMMONCTX_STATE RGXFW_ALIGN psContextState; /*!< TA/3D context suspend state, read/written by FW */
+
+ /* Framework state
+ */
+ PRGXFWIF_RF_CMD RGXFW_ALIGN psRFCmd; /*!< Register updates for Framework */
+
+ /*
+ * Flags e.g. for context switching
+ */
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32Priority;
+ IMG_UINT32 ui32PrioritySeqNum;
+
+ /* References to the host side originators */
+ IMG_UINT32 ui32ServerCommonContextID; /*!< the Server Common Context */
+ IMG_UINT32 ui32PID; /*!< associated process ID */
+
+ /* Statistic updates waiting to be passed back to the host... */
+ IMG_BOOL bStatsPending; /*!< True when some stats are pending */
+ IMG_INT32 i32StatsNumStores; /*!< Number of stores on this context since last update */
+ IMG_INT32 i32StatsNumOutOfMemory; /*!< Number of OOMs on this context since last update */
+ IMG_INT32 i32StatsNumPartialRenders; /*!< Number of PRs on this context since last update */
+ RGXFWIF_DM eDM; /*!< Data Master type */
+ IMG_UINT64 RGXFW_ALIGN ui64WaitSignalAddress; /*!< Device Virtual Address of the signal the context is waiting on */
+ RGXFWIF_DLLIST_NODE sWaitSignalNode; /*!< List entry for the wait-signal list */
+ RGXFWIF_DLLIST_NODE RGXFW_ALIGN sBufStalledNode; /*!< List entry for the buffer stalled list */
+ IMG_UINT64 RGXFW_ALIGN ui64CBufQueueCtrlAddr; /*!< Address of the circular buffer queue pointers */
+ IMG_UINT64 RGXFW_ALIGN ui64ResumeSignalAddr; /*!< Address of the Services Signal for resuming the buffer */
+ IMG_BOOL bReadOffsetNeedsReset; /*!< Following HWR circular buffer read-offset needs resetting */
+} UNCACHED_ALIGN RGXFWIF_FWCOMMONCONTEXT;
+
+/*!
+ Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRENDERCONTEXT_
+{
+ RGXFWIF_FWCOMMONCONTEXT sTAContext; /*!< Firmware context for the TA */
+ RGXFWIF_FWCOMMONCONTEXT s3DContext; /*!< Firmware context for the 3D */
+
+ /*
+ * Note: The following fields keep track of OOM and partial render statistics.
+ * Because these data structures are allocated cache-incoherent,
+ * and because these fields are updated by the firmware,
+ * the host will read valid values only after an SLC flush/inval.
+ * This is only guaranteed to happen while destroying the render-context.
+ */
+
+ /* The following variable has been reused to avoid breaking compatibility.
+ *
+ * It was previously:
+ * IMG_UINT32 ui32TotalNumPartialRenders; Total number of partial renders
+ *
+ * And is changed to:
+ */
+ IMG_UINT32 ui32WorkEstCCBSubmitted; /*!< Number of commands submitted to the WorkEst FW CCB */
+
+ IMG_UINT32 ui32TotalNumOutOfMemory; /*!< Total number of OOMs */
+
+} UNCACHED_ALIGN RGXFWIF_FWRENDERCONTEXT;
+
+/*!
+ Firmware render context.
+*/
+typedef struct _RGXFWIF_FWRAYCONTEXT_
+{
+ IMG_UINT32 ui32ActiveFCMask; /* move here to avoid that fwrayctx and shgctx have the same addr */
+ IMG_UINT32 ui32NextFC;
+ RGXFWIF_FWCOMMONCONTEXT sSHGContext; /*!< Firmware context for the SHG */
+ RGXFWIF_FWCOMMONCONTEXT sRTUContext; /*!< Firmware context for the RTU */
+ PRGXFWIF_CCCB_CTL psCCBCtl[DPX_MAX_RAY_CONTEXTS];
+ PRGXFWIF_CCCB psCCB[DPX_MAX_RAY_CONTEXTS];
+} UNCACHED_ALIGN RGXFWIF_FWRAYCONTEXT;
+
+#define RGXFWIF_INVALID_FRAME_CONTEXT (0xFFFFFFFF)
+
+/*!
+ BIF tiling mode
+*/
+typedef enum _RGXFWIF_BIFTILINGMODE_
+{
+ RGXFWIF_BIFTILINGMODE_NONE = 0,
+ RGXFWIF_BIFTILINGMODE_256x16 = 0,
+ RGXFWIF_BIFTILINGMODE_512x8 = 1,
+ RGXFWIF_BIFTILINGMODE_MAX = 4
+} RGXFWIF_BIFTILINGMODE;
+
+/*!
+ BIF requester selection
+*/
+typedef enum _RGXFWIF_BIFREQ_
+{
+ RGXFWIF_BIFREQ_TA = 0,
+ RGXFWIF_BIFREQ_3D = 1,
+ RGXFWIF_BIFREQ_CDM = 2,
+ RGXFWIF_BIFREQ_2D = 3,
+ RGXFWIF_BIFREQ_TDM = 3,
+ RGXFWIF_BIFREQ_HOST = 4,
+ RGXFWIF_BIFREQ_RTU = 5,
+ RGXFWIF_BIFREQ_SHG = 6,
+ RGXFWIF_BIFREQ_MAX = 7
+} RGXFWIF_BIFREQ;
+
+typedef enum _RGXFWIF_PM_DM_
+{
+ RGXFWIF_PM_DM_TA = 0,
+ RGXFWIF_PM_DM_3D = 1,
+} RGXFWIF_PM_DM;
+
+typedef enum _RGXFWIF_RPM_DM_
+{
+ RGXFWIF_RPM_DM_SHF = 0,
+ RGXFWIF_RPM_DM_SHG = 1,
+ RGXFWIF_RPM_DM_MAX,
+} RGXFWIF_RPM_DM;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB control for RGX
+ *****************************************************************************/
+typedef struct _RGXFWIF_CCB_CTL_
+{
+ volatile IMG_UINT32 ui32WriteOffset; /*!< write offset into array of commands (MUST be aligned to 16 bytes!) */
+ volatile IMG_UINT32 ui32ReadOffset; /*!< read offset into array of commands */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask (Total capacity of the CCB - 1) */
+ IMG_UINT32 ui32CmdSize; /*!< size of each command in bytes */
+} UNCACHED_ALIGN RGXFWIF_CCB_CTL;
+
+/*!
+ ******************************************************************************
+ * Kernel CCB command structure for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PT (0x1) /* MMU_CTRL_INVAL_PT_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PD (0x2) /* MMU_CTRL_INVAL_PD_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PC (0x4) /* MMU_CTRL_INVAL_PC_EN */
+
+#if !defined(__KERNEL)
+
+#if !defined(RGX_FEATURE_SLC_VIVT)
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x0) /* not used */
+
+#else /* RGX_FEATURE_SLC_VIVT */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (0x0) /* not used */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#else
+#define RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB (0x10) /* can't use PM_TLB0 bit from BIFPM_CTRL reg because it collides with PT bit from BIF_CTRL reg */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_TLB (RGXFWIF_MMUCACHEDATA_FLAGS_PMTLB | 0x8) /* BIF_CTRL_INVAL_TLB1_EN */
+#define RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL (0x800) /* MMU_CTRL_INVAL_ALL_CONTEXTS_EN */
+#endif
+
+#define RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT (0x4000000) /* indicates FW should interrupt the host */
+
+typedef struct _RGXFWIF_MMUCACHEDATA_
+{
+ PRGXFWIF_FWMEMCONTEXT sMemoryContext;
+ IMG_UINT32 ui32Flags;
+ RGXFWIF_DEV_VIRTADDR sMMUCacheSync;
+ IMG_UINT16 ui16MMUCacheSyncUpdateValue;
+} __attribute__ ((packed)) RGXFWIF_MMUCACHEDATA;
+
+typedef struct _RGXFWIF_SLCBPCTLDATA_
+{
+ IMG_BOOL bSetBypassed; /*!< Should SLC be/not be bypassed for indicated units? */
+ IMG_UINT32 uiFlags; /*!< Units to enable/disable */
+} RGXFWIF_SLCBPCTLDATA;
+
+#define RGXFWIF_BPDATA_FLAGS_WRITE (1 << 0)
+#define RGXFWIF_BPDATA_FLAGS_CTL (1 << 1)
+#define RGXFWIF_BPDATA_FLAGS_REGS (1 << 2)
+
+typedef struct _RGXFWIF_FWBPDATA_
+{
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+ IMG_UINT32 ui32BPAddr; /*!< Breakpoint address */
+ IMG_UINT32 ui32HandlerAddr; /*!< Breakpoint handler */
+ IMG_UINT32 ui32BPDM; /*!< Breakpoint control */
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32Flags;
+ IMG_UINT32 ui32TempRegs; /*!< Number of temporary registers to overallocate */
+ IMG_UINT32 ui32SharedRegs; /*!< Number of shared registers to overallocate */
+} RGXFWIF_BPDATA;
+
+#define RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS 4
+
+typedef struct _RGXFWIF_KCCB_CMD_KICK_DATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+ IMG_UINT32 ui32CWoffUpdate; /*!< Client CCB woff update */
+ IMG_UINT32 ui32NumCleanupCtl; /*!< number of CleanupCtl pointers attached */
+ PRGXFWIF_CLEANUP_CTL apsCleanupCtl[RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS]; /*!< CleanupCtl structures associated with command */
+ IMG_UINT32 ui32WorkEstCmdHeaderOffset; /*!< offset to the CmdHeader which houses the workload estimation kick data. */
+} RGXFWIF_KCCB_CMD_KICK_DATA;
+
+typedef struct _RGXFWIF_KCCB_CMD_FENCE_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr;
+ IMG_UINT32 uiUpdateVal;
+} RGXFWIF_KCCB_CMD_SYNC_DATA;
+
+typedef struct _RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< address of the firmware context */
+ IMG_UINT32 ui32CCBFenceOffset; /*!< Client CCB fence offset */
+} RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA;
+
+typedef enum _RGXFWIF_CLEANUP_TYPE_
+{
+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT, /*!< FW common context cleanup */
+ RGXFWIF_CLEANUP_HWRTDATA, /*!< FW HW RT data cleanup */
+ RGXFWIF_CLEANUP_FREELIST, /*!< FW freelist cleanup */
+ RGXFWIF_CLEANUP_ZSBUFFER, /*!< FW ZS Buffer cleanup */
+ RGXFWIF_CLEANUP_HWFRAMEDATA, /*!< FW RPM/RTU frame data */
+ RGXFWIF_CLEANUP_RPM_FREELIST, /*!< FW RPM freelist */
+} RGXFWIF_CLEANUP_TYPE;
+
+#define RGXFWIF_CLEANUP_RUN (1 << 0) /*!< The requested cleanup command has run on the FW */
+#define RGXFWIF_CLEANUP_BUSY (1 << 1) /*!< The requested resource is busy */
+
+typedef struct _RGXFWIF_CLEANUP_REQUEST_
+{
+ RGXFWIF_CLEANUP_TYPE eCleanupType; /*!< Cleanup type */
+ union {
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< FW common context to cleanup */
+ PRGXFWIF_HWRTDATA psHWRTData; /*!< HW RT to cleanup */
+ PRGXFWIF_FREELIST psFreelist; /*!< Freelist to cleanup */
+ PRGXFWIF_ZSBUFFER psZSBuffer; /*!< ZS Buffer to cleanup */
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData; /*!< RPM/RTU frame data to cleanup */
+ PRGXFWIF_RPM_FREELIST psRPMFreelist; /*!< RPM Freelist to cleanup */
+ } uCleanupData;
+ RGXFWIF_DEV_VIRTADDR sSyncObjDevVAddr; /*!< sync primitive used to indicate state of the request */
+} RGXFWIF_CLEANUP_REQUEST;
+
+typedef enum _RGXFWIF_POWER_TYPE_
+{
+ RGXFWIF_POW_OFF_REQ = 1,
+ RGXFWIF_POW_FORCED_IDLE_REQ,
+ RGXFWIF_POW_NUMDUST_CHANGE,
+ RGXFWIF_POW_APM_LATENCY_CHANGE
+} RGXFWIF_POWER_TYPE;
+
+typedef enum
+{
+ RGXFWIF_OS_ONLINE = 1,
+ RGXFWIF_OS_OFFLINE
+} RGXFWIF_OS_STATE_CHANGE;
+
+typedef enum
+{
+ RGXFWIF_PWR_COUNTER_DUMP_START = 1,
+ RGXFWIF_PWR_COUNTER_DUMP_STOP,
+ RGXFWIF_PWR_COUNTER_DUMP_SAMPLE,
+} RGXFWIF_COUNTER_DUMP_REQUEST;
+
+typedef enum
+{
+ RGXFWIF_POWER_FORCE_IDLE = 1,
+ RGXFWIF_POWER_CANCEL_FORCED_IDLE,
+ RGXFWIF_POWER_HOST_TIMEOUT,
+} RGXFWIF_POWER_FORCE_IDLE_TYPE;
+
+typedef struct _RGXFWIF_POWER_REQUEST_
+{
+ RGXFWIF_POWER_TYPE ePowType; /*!< Type of power request */
+ union
+ {
+ IMG_UINT32 ui32NumOfDusts; /*!< Number of active Dusts */
+ IMG_BOOL bForced; /*!< If the operation is mandatory */
+ RGXFWIF_POWER_FORCE_IDLE_TYPE ePowRequestType; /*!< Type of Request. Consolidating Force Idle, Cancel Forced Idle, Host Timeout */
+ IMG_UINT32 ui32ActivePMLatencyms; /*!< Number of milliseconds to set APM latency */
+ } uPoweReqData;
+} RGXFWIF_POWER_REQUEST;
+
+typedef struct _RGXFWIF_SLCFLUSHINVALDATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to fence on (only useful when bDMContext == TRUE) */
+ IMG_BOOL bInval; /*!< Invalidate the cache as well as flushing */
+ IMG_BOOL bDMContext; /*!< The data to flush/invalidate belongs to a specific DM context */
+ RGXFWIF_DM eDM; /*!< DM to flush entries for (only useful when bDMContext == TRUE) */
+} RGXFWIF_SLCFLUSHINVALDATA;
+
+typedef struct _RGXFWIF_HCS_CTL_
+{
+ IMG_UINT32 ui32HCSDeadlineMS; /* New number of milliseconds C/S is allowed to last */
+} RGXFWIF_HCS_CTL;
+
+typedef struct _RGXFWIF_HWPERF_CTRL_
+{
+ IMG_BOOL bToggle; /*!< Toggle masked bits or apply full mask? */
+ IMG_UINT64 RGXFW_ALIGN ui64Mask; /*!< Mask of events to toggle */
+} RGXFWIF_HWPERF_CTRL;
+
+typedef struct _RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS_
+{
+ IMG_UINT32 ui32NumBlocks; /*!< Number of RGX_HWPERF_CONFIG_CNTBLK in the array */
+ PRGX_HWPERF_CONFIG_CNTBLK sBlockConfigs; /*!< Address of the RGX_HWPERF_CONFIG_CNTBLK array */
+} RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS;
+
+typedef struct _RGXFWIF_CORECLKSPEEDCHANGE_DATA_
+{
+ IMG_UINT32 ui32NewClockSpeed; /*!< New clock speed */
+} RGXFWIF_CORECLKSPEEDCHANGE_DATA;
+
+#define RGXFWIF_HWPERF_CTRL_BLKS_MAX 16
+
+typedef struct _RGXFWIF_HWPERF_CTRL_BLKS_
+{
+ IMG_BOOL bEnable;
+ IMG_UINT32 ui32NumBlocks; /*!< Number of block IDs in the array */
+ IMG_UINT16 aeBlockIDs[RGXFWIF_HWPERF_CTRL_BLKS_MAX]; /*!< Array of RGX_HWPERF_CNTBLK_ID values */
+} RGXFWIF_HWPERF_CTRL_BLKS;
+
+
+typedef struct _RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS_
+{
+ IMG_UINT16 ui16CustomBlock;
+ IMG_UINT16 ui16NumCounters;
+ PRGX_HWPERF_SELECT_CUSTOM_CNTRS sCustomCounterIDs;
+} RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS;
+
+typedef struct _RGXFWIF_ZSBUFFER_BACKING_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr; /*!< ZS-Buffer FW address */
+ IMG_UINT32 bDone; /*!< action backing/unbacking succeeded */
+} RGXFWIF_ZSBUFFER_BACKING_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32IsolationPriorityThreshold;
+} RGXFWIF_OSID_ISOLATION_GROUP_DATA;
+
+/*
+ * Flags to pass in the unused bits of the page size grow request
+ */
+#define RGX_FREELIST_GSDATA_RPM_RESTART_EN (1 << 31) /*!< Restart RPM after freelist grow command */
+#define RGX_FREELIST_GSDATA_RPM_PAGECNT_MASK (0x3FFFFFU) /*!< Mask for page count. */
+
+typedef struct _RGXFWIF_FREELIST_GS_DATA_
+{
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr; /*!< Freelist FW address */
+ IMG_UINT32 ui32DeltaPages; /*!< Amount of the Freelist change */
+ IMG_UINT32 ui32NewPages; /*!< New amount of pages on the freelist (including ready pages) */
+ IMG_UINT32 ui32ReadyPages; /*!< Number of ready pages to be held in reserve until OOM */
+} RGXFWIF_FREELIST_GS_DATA;
+
+#define RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG 0x80000000
+
+typedef struct _RGXFWIF_FREELISTS_RECONSTRUCTION_DATA_
+{
+ IMG_UINT32 ui32FreelistsCount;
+ IMG_UINT32 aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FREELISTS_RECONSTRUCTION_DATA;
+
+
+typedef struct _RGXFWIF_SIGNAL_UPDATE_DATA_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sDevSignalAddress; /*!< device virtual address of the updated signal */
+ PRGXFWIF_FWMEMCONTEXT psFWMemContext; /*!< Memory context */
+} UNCACHED_ALIGN RGXFWIF_SIGNAL_UPDATE_DATA;
+
+
+typedef struct _RGXFWIF_WRITE_OFFSET_UPDATE_DATA_
+{
+ PRGXFWIF_FWCOMMONCONTEXT psContext; /*!< Context to that may need to be resumed following write offset update */
+} UNCACHED_ALIGN RGXFWIF_WRITE_OFFSET_UPDATE_DATA;
+
+typedef struct _RGXFWIF_WORKEST_FWCCB_CMD_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex; /*!< Index for return data array */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesTaken; /*!< The cycles the workload took on the hardware */
+} RGXFWIF_WORKEST_FWCCB_CMD;
+
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+#define NUM_OPP_VALUES 16
+
+typedef struct _PDVFS_OPP_
+{
+ IMG_UINT32 ui32Volt; /* V */
+ IMG_UINT32 ui32Freq; /* Hz */
+} UNCACHED_ALIGN PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_
+{
+ PDVFS_OPP asOPPValues[NUM_OPP_VALUES];
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP;
+
+typedef struct _RGXFWIF_PDVFS_OPP_DATA_
+{
+ RGXFWIF_PDVFS_OPP sPDFVSOppInfo;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_OPP_DATA;
+
+typedef struct _RGXFWIF_PDVFS_MAX_FREQ_DATA_
+{
+ IMG_UINT32 ui32MaxOPPPoint;
+} UNCACHED_ALIGN RGXFWIF_PDVFS_MAX_FREQ_DATA;
+
+/*!
+ ******************************************************************************
+ * Register configuration structures
+ *****************************************************************************/
+
+#define RGXFWIF_REG_CFG_MAX_SIZE 512
+
+typedef enum _RGXFWIF_REGDATA_CMD_TYPE_
+{
+ RGXFWIF_REGCFG_CMD_ADD = 101,
+ RGXFWIF_REGCFG_CMD_CLEAR = 102,
+ RGXFWIF_REGCFG_CMD_ENABLE = 103,
+ RGXFWIF_REGCFG_CMD_DISABLE = 104
+} RGXFWIF_REGDATA_CMD_TYPE;
+
+typedef struct _RGXFWIF_REGCONFIG_DATA_
+{
+ RGXFWIF_REGDATA_CMD_TYPE eCmdType;
+ RGXFWIF_REG_CFG_TYPE eRegConfigType;
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN sRegConfig;
+
+} RGXFWIF_REGCONFIG_DATA;
+
+typedef struct _RGXFWIF_REG_CFG_
+{
+ /**
+ * PDump WRW command write granularity is 32 bits.
+ * Add padding to ensure array size is 32 bit granular.
+ */
+ IMG_UINT8 RGXFW_ALIGN aui8NumRegsType[ALIGN(RGXFWIF_REG_CFG_TYPE_ALL,sizeof(IMG_UINT32))];
+ RGXFWIF_REG_CFG_REC RGXFW_ALIGN asRegConfigs[RGXFWIF_REG_CFG_MAX_SIZE];
+} UNCACHED_ALIGN RGXFWIF_REG_CFG;
+
+typedef struct _RGXFWIF_REGISTER_GUESTOS_OFFSETS_
+{
+ IMG_UINT32 ui32OSid;
+ RGXFWIF_DEV_VIRTADDR RGXFW_ALIGN sKCCBCtl;
+ RGXFWIF_DEV_VIRTADDR sKCCB;
+ RGXFWIF_DEV_VIRTADDR sFirmwareCCBCtl;
+ RGXFWIF_DEV_VIRTADDR sFirmwareCCB;
+} UNCACHED_ALIGN RGXFWIF_REGISTER_GUESTOS_OFFSETS;
+
+/* OSid Scheduling Priority Change */
+typedef struct _RGXFWIF_OSID_PRIORITY_DATA_
+{
+ IMG_UINT32 ui32OSidNum;
+ IMG_UINT32 ui32Priority;
+} RGXFWIF_OSID_PRIORITY_DATA;
+
+typedef struct
+{
+ IMG_UINT32 ui32OSid;
+ RGXFWIF_OS_STATE_CHANGE eNewOSState;
+} UNCACHED_ALIGN RGXFWIF_OS_STATE_CHANGE_DATA;
+
+typedef struct
+{
+ PRGXFWIF_OS_CONFIG sOSConfig;
+} RGXFW_ALIGN RGXFWIF_OS_CONFIG_DATA;
+
+typedef struct
+{
+ RGXFWIF_COUNTER_DUMP_REQUEST eCounterDumpRequest;
+} RGXFW_ALIGN RGXFWIF_COUNTER_DUMP_DATA;
+
+typedef enum _RGXFWIF_KCCB_CMD_TYPE_
+{
+ RGXFWIF_KCCB_CMD_KICK = 101 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_MMUCACHE = 102 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_BP = 104 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_SLCBPCTL = 106 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< slc bypass control. Requires sSLCBPCtlData. For validation */
+ RGXFWIF_KCCB_CMD_SYNC = 107 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< host sync command. Requires sSyncData. */
+ RGXFWIF_KCCB_CMD_SLCFLUSHINVAL = 108 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< slc flush and invalidation request */
+ RGXFWIF_KCCB_CMD_CLEANUP = 109 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests cleanup of a FW resource (type specified in the command data) */
+ RGXFWIF_KCCB_CMD_POW = 110 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Power request */
+ RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG = 111 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure HWPerf events (to be generated) and HWPerf buffer address (if required) */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS = 112 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks */
+ RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS = 113 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Enable or disable multiple HWPerf blocks (reusing existing configuration) */
+ RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE = 114 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< CORE clock speed change event */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE = 115 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Backing for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE = 116 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Unbacking for on-demand ZS-Buffer done */
+ RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE = 117 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Grow done */
+ RGXFWIF_KCCB_CMD_FREELIST_SHRINK_UPDATE = 118 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelist Shrink done */
+ RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE = 119 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Freelists Reconstruction done */
+ RGXFWIF_KCCB_CMD_HEALTH_CHECK = 120 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Health check request */
+ RGXFWIF_KCCB_CMD_REGCONFIG = 121 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS = 122 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure the custom counters for HWPerf */
+ RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT = 123 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Configure, clear and enable multiple HWPerf blocks during the init process*/
+ RGXFWIF_KCCB_CMD_LOGTYPE_UPDATE = 124 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Ask the firmware to update its cached ui32LogType value from the (shared) tracebuf control structure */
+ RGXFWIF_KCCB_CMD_WORKEST_CLEAR_BUFFER = 125 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_PDVFS_LIMIT_MAX_FREQ = 127 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_PDVFS_REQUEST_REACTIVE_UPDATE = 129 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW = 130 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+
+ RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE = 131 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has performed a signal update */
+
+ RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE = 132 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the firmware that the host has added more data to a CDM2 Circular Buffer */
+
+ RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE = 133 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the relative scheduling priority for a particular OSid. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL = 134 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set or clear firmware state flags */
+ RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE = 135 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Set hard context switching deadline */
+ RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE = 136 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Changes the configuration of (or even disables) the OSid Isolation scheduling group. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE = 137 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Informs the FW that a Guest OS has come online / offline. It can only be serviced for the Host DDK */
+ RGXFWIF_KCCB_CMD_OS_CFG_INIT = 138 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< First kick of the DDK which initializes all OS specific data on the FW */
+ RGXFWIF_KCCB_CMD_COUNTER_DUMP = 139 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Controls counter dumping in the FW */
+ RGXFWIF_KCCB_CMD_FORCE_UPDATE = 140 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Forcing signalling of all unmet UFOs for a given CCB offset */
+ RGXFWIF_KCCB_CMD_HWPERF_BVNC_FEATURES = 141 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Request HWPerf Feature Packet in Firmware */
+} RGXFWIF_KCCB_CMD_TYPE;
+
+/* Kernel CCB command packet */
+typedef struct _RGXFWIF_KCCB_CMD_
+{
+ RGXFWIF_KCCB_CMD_TYPE eCmdType; /*!< Command type */
+ RGXFWIF_DM eDM; /*!< DM associated with the command */
+
+ union
+ {
+ RGXFWIF_KCCB_CMD_KICK_DATA sCmdKickData; /*!< Data for Kick command */
+ RGXFWIF_MMUCACHEDATA sMMUCacheData; /*!< Data for MMUCACHE command */
+ RGXFWIF_BPDATA sBPData; /*!< Data for Breakpoint Commands */
+ RGXFWIF_SLCBPCTLDATA sSLCBPCtlData; /*!< Data for SLC Bypass Control */
+ RGXFWIF_KCCB_CMD_SYNC_DATA sSyncData; /*!< Data for host sync commands */
+ RGXFWIF_SLCFLUSHINVALDATA sSLCFlushInvalData; /*!< Data for SLC Flush/Inval commands */
+ RGXFWIF_CLEANUP_REQUEST sCleanupData; /*!< Data for cleanup commands */
+ RGXFWIF_POWER_REQUEST sPowData; /*!< Data for power request commands */
+ RGXFWIF_HWPERF_CTRL sHWPerfCtrl; /*!< Data for HWPerf control command */
+ RGXFWIF_HWPERF_CONFIG_ENABLE_BLKS sHWPerfCfgEnableBlks; /*!< Data for HWPerf configure, clear and enable performance counter block command */
+ RGXFWIF_HWPERF_CTRL_BLKS sHWPerfCtrlBlks; /*!< Data for HWPerf enable or disable performance counter block commands */
+ RGXFWIF_HWPERF_SELECT_CUSTOM_CNTRS sHWPerfSelectCstmCntrs; /*!< Data for HWPerf configure the custom counters to read */
+ RGXFWIF_CORECLKSPEEDCHANGE_DATA sCORECLKSPEEDCHANGEData;/*!< Data for CORE clock speed change */
+ RGXFWIF_ZSBUFFER_BACKING_DATA sZSBufferBackingData; /*!< Feedback for Z/S Buffer backing/unbacking */
+ RGXFWIF_FREELIST_GS_DATA sFreeListGSData; /*!< Feedback for Freelist grow/shrink */
+ RGXFWIF_FREELISTS_RECONSTRUCTION_DATA sFreeListsReconstructionData; /*!< Feedback for Freelists reconstruction */
+ RGXFWIF_REGCONFIG_DATA sRegConfigData; /*!< Data for custom register configuration */
+ RGXFWIF_REGISTER_GUESTOS_OFFSETS sRegisterGuestOsOffests;/*!< Data for registering a guestOS with the FW */
+ RGXFWIF_SIGNAL_UPDATE_DATA sSignalUpdateData; /*!< Data for informing the FW about the signal update */
+ RGXFWIF_WRITE_OFFSET_UPDATE_DATA sWriteOffsetUpdateData; /*!< Data for informing the FW about the write offset update */
+ RGXFWIF_PDVFS_OPP_DATA sPDVFSOppData;
+ RGXFWIF_PDVFS_MAX_FREQ_DATA sPDVFSMaxFreqData;
+ RGXFWIF_OSID_PRIORITY_DATA sCmdOSidPriorityData; /*!< Data for updating an OSid priority */
+ RGXFWIF_HCS_CTL sHCSCtrl; /*!< Data for Hard Context Switching */
+ RGXFWIF_OSID_ISOLATION_GROUP_DATA sCmdOSidIsolationData; /*!< Data for updating the OSid isolation group */
+ RGXFWIF_OS_STATE_CHANGE_DATA sCmdOSOnlineStateData; /*!< Data for updating the Guest Online states */
+ RGXFWIF_OS_CONFIG_DATA sCmdOSConfigData; /*!< Data for the OS-specific initialization part of the FW */
+ RGXFWIF_DEV_VIRTADDR sTBIBuffer; /*!< Dev address for TBI buffer allocated on demand */
+ RGXFWIF_COUNTER_DUMP_DATA sCounterDumpConfigData; /*!< Data for dumping of register ranges */
+ RGXFWIF_KCCB_CMD_FORCE_UPDATE_DATA sForceUpdateData; /*!< Data for signalling all unmet fences for a given CCB */
+ } UNCACHED_ALIGN uCmdData;
+} UNCACHED_ALIGN RGXFWIF_KCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_KCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Firmware CCB command structure for RGX
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA_
+{
+ IMG_UINT32 ui32ZSBufferID;
+ IMG_BOOL bPopulate;
+} RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA_
+{
+ IMG_UINT32 ui32FreelistID;
+} RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA_
+{
+ IMG_UINT32 ui32FreelistsCount;
+ IMG_UINT32 ui32HwrCounter;
+ IMG_UINT32 aui32FreelistIDs[MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS];
+} RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA;
+
+/* If set, the FW will send the page fault address in a separate command */
+#define RGXFWIF_FWCCB_CMD_CONTEXT_RESET_PAGE_FAULT_ADDRESS_FLAG (1U << 31)
+
+typedef struct _RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA_
+{
+ IMG_UINT32 ui32ServerCommonContextID; /*!< Context affected by the reset */
+ RGXFWIF_CONTEXT_RESET_REASON eResetReason; /*!< Reason for reset */
+ IMG_UINT32 ui32ResetJobRef; /*!< Job ref running at the time of reset */
+ IMG_BOOL bPageFault; /*!< Did a page fault happen */
+ IMG_UINT64 RGXFW_ALIGN ui64PCAddress; /*!< At what page catalog address */
+} RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA;
+
+typedef enum _RGXFWIF_FWCCB_CMD_TYPE_
+{
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING = 101 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be backed with physical pages */
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING = 102 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests ZSBuffer to be unbacked */
+ RGXFWIF_FWCCB_CMD_FREELIST_GROW = 103 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand freelist grow/shrink */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION = 104 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests freelists reconstruction */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION = 105 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Notifies host of a HWR event on a context */
+ RGXFWIF_FWCCB_CMD_DEBUG_DUMP = 106 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand debug dump */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS = 107 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand update on process stats */
+
+ RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW = 108 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Requests an on-demand RPM freelist grow */
+ RGXFWIF_FWCCB_CMD_WORKLOAD_FINISHED = 109 | RGX_CMD_MAGIC_DWORD_SHIFTED, /*!< Supplies data for the workload matching algorithm */
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE = 110 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM = 111 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART = 112 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+} RGXFWIF_FWCCB_CMD_TYPE;
+
+typedef enum
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS=1, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumPartialRenders stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32TotalNumOutOfMemory stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumTAStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32Num3DStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES, /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumSHStores stat */
+ RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES /*!< PVRSRVStatsUpdateRenderContextStats should increase the value of the ui32NumCDMStores stat */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE;
+
+
+/* Firmware CCB command packet */
+
+typedef struct
+{
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_TYPE eElementToUpdate; /*!< Element to update */
+ IMG_PID pidOwner; /*!< The pid of the process whose stats are being updated */
+ IMG_INT32 i32AdjustmentValue; /*!< Adjustment to be made to the statistic */
+} RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA;
+/*!
+ ******************************************************************************
+ * Workload Estimation Structures
+ *****************************************************************************/
+
+/*!
+ ******************************************************************************
+ * Proactive DVFS Structures
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA_
+{
+ IMG_UINT32 ui32CoreClkRate;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64MemDesc;
+} UNCACHED_ALIGN RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA;
+
+/* These values must appear as the first 32bit of a partial FWCCB command
+ * (excluding NONE which is not used in a partial FWCCB command) */
+#define RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_NONE (0U)
+#define RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_CONTEXT_RESET_DATA (1U)
+
+typedef struct _RGXFWIF_FWCCB_CMD_PARTIAL_CONTEXT_RESET_DATA_
+{
+ IMG_UINT32 ui32PartialCmdType;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFaultAddress;
+} RGXFWIF_FWCCB_CMD_PARTIAL_CONTEXT_RESET_DATA;
+
+typedef struct _RGXFWIF_FWCCB_CMD_
+{
+ RGXFWIF_FWCCB_CMD_TYPE eCmdType; /*!< Command type */
+ union
+ {
+ RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING_DATA sCmdZSBufferBacking; /*!< Data for Z/S-Buffer on-demand (un)backing*/
+ RGXFWIF_FWCCB_CMD_FREELIST_GS_DATA sCmdFreeListGS; /*!< Data for on-demand freelist grow/shrink */
+ RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION_DATA sCmdFreeListsReconstruction; /*!< Data for freelists reconstruction */
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA sCmdContextResetNotification; /*!< Data for context reset notification */
+ RGXFWIF_FWCCB_CMD_UPDATE_STATS_DATA sCmdUpdateStatsData; /*!< Data for updating process stats */
+ RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE_DATA sCmdCoreClkRateChange;
+ RGXFWIF_FWCCB_CMD_PDVFS_FREEMEM_DATA sCmdPDVFSFreeMem;
+ RGXFWIF_FWCCB_CMD_PARTIAL_CONTEXT_RESET_DATA sCmdPartialContextResetNotification; /*!< Additional data for context reset notification */
+ } RGXFW_ALIGN uCmdData;
+} RGXFW_ALIGN RGXFWIF_FWCCB_CMD;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGXFWIF_FWCCB_CMD);
+
+/*!
+ ******************************************************************************
+ * Signature and Checksums Buffer
+ *****************************************************************************/
+typedef struct _RGXFWIF_SIGBUF_CTL_
+{
+ PRGXFWIF_SIGBUFFER sBuffer; /*!< Ptr to Signature Buffer memory */
+ IMG_UINT32 ui32LeftSizeInRegs; /*!< Amount of space left for storing regs in the buffer */
+} UNCACHED_ALIGN RGXFWIF_SIGBUF_CTL;
+
+typedef struct _PRGXFWIF_COUNTER_DUMP_CTL_
+{
+ PRGXFWIF_COUNTERBUFFER sBuffer; /*!< Ptr to counter dump buffer */
+ IMG_UINT32 ui32SizeInDwords; /*!< Amount of space for storing in the buffer */
+} UNCACHED_ALIGN RGXFWIF_COUNTER_DUMP_CTL;
+
+/*!
+ ******************************************************************************
+ * Updated configuration post FW data init.
+ *****************************************************************************/
+typedef struct _RGXFWIF_RUNTIME_CFG_
+{
+ IMG_UINT32 ui32ActivePMLatencyms; /* APM latency in ms before signalling IDLE to the host */
+ IMG_BOOL bActivePMLatencyPersistant; /* If set, APM latency does not reset to system default each GPU power transition */
+ IMG_UINT32 ui32CoreClockSpeed; /* Core clock speed, currently only used to calculate timer ticks */
+ IMG_UINT32 ui32DefaultDustsNumInit; /* Last number of dusts change requested by the host */
+ PRGXFWIF_HWPERFBUF sHWPerfBuf; /* On-demand allocated HWPerf buffer address, to be passed to the FW */
+} RGXFWIF_RUNTIME_CFG;
+
+/*!
+ *****************************************************************************
+ * Control data for RGX
+ *****************************************************************************/
+
+#define RGXFWIF_HWR_DEBUG_DUMP_ALL (99999)
+
+#if defined(PDUMP)
+
+#define RGXFWIF_PID_FILTER_MAX_NUM_PIDS 32
+
+typedef enum _RGXFWIF_PID_FILTER_MODE_
+{
+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT
+} RGXFWIF_PID_FILTER_MODE;
+
+typedef struct _RGXFWIF_PID_FILTER_ITEM_
+{
+ IMG_PID uiPID;
+ IMG_UINT32 ui32OSID;
+} RGXFW_ALIGN RGXFWIF_PID_FILTER_ITEM;
+
+typedef struct _RGXFWIF_PID_FILTER_
+{
+ RGXFWIF_PID_FILTER_MODE eMode;
+ /* each process in the filter list is specified by a PID and OS ID pair.
+ * each PID and OS pair is an item in the items array (asItems).
+ * if the array contains less than RGXFWIF_PID_FILTER_MAX_NUM_PIDS entries
+ * then it must be terminated by an item with pid of zero.
+ */
+ RGXFWIF_PID_FILTER_ITEM asItems[RGXFWIF_PID_FILTER_MAX_NUM_PIDS];
+} RGXFW_ALIGN RGXFWIF_PID_FILTER;
+#endif
+
+typedef struct
+{
+ IMG_UINT32 ui32ConfigFlags; /*!< Configuration flags from host */
+ RGXFWIF_DEV_VIRTADDR sPowerSync;
+ IMG_UINT32 ui32ConfigFlagsExt; /*!< Extended configuration flags from host */
+ PRGXFWIF_TRACEBUF sTraceBufCtl; /*!< structure containing trace control data and actual trace buffer */
+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl;
+ PRGXFWIF_TRACEBUF sGuestTraceBufCtl; /*!< structure containing trace control data and actual trace buffer for the Guest OSes*/
+ PRGXFWIF_HWRINFOBUF sRGXFWIfGuestHWRInfoBufCtl;
+
+} RGXFWIF_OS_CONFIG;
+
+typedef enum
+{
+ RGXFWIF_GPIO_VAL_OFF = 0, /*!< No GPIO validation */
+ RGXFWIF_GPIO_VAL_GENERAL = 1, /*!< Simple test case that
+ initiates by sending data via the
+ GPIO and then sends back any data
+ received over the GPIO */
+ RGXFWIF_GPIO_VAL_AP = 2, /*!< More complex test case that writes
+ and reads data across the entire
+ GPIO AP address range.*/
+#if defined(SUPPORT_STRIP_RENDERING)
+ RGXFWIF_GPIO_VAL_SR_BASIC = 3, /*!< Strip Rendering AP based basic test.*/
+ RGXFWIF_GPIO_VAL_SR_COMPLEX = 4, /*!< Strip Rendering AP based complex test.*/
+#endif
+ RGXFWIF_GPIO_VAL_LAST
+} RGXFWIF_GPIO_VAL_MODE;
+
+typedef struct _RGXFWIF_INIT_
+{
+
+ PRGXFWIF_OS_CONFIG sOSConfig; /*!< OS configuration data for the FW initialization */
+
+ IMG_DEV_PHYADDR RGXFW_ALIGN sFaultPhysAddr;
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sPDSExecBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sUSCExecBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sResultDumpBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sDPXControlStreamBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sRTUHeapBase;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTDMTPUYUVCeoffsHeapBase;
+
+ IMG_BOOL bFirstTA;
+ IMG_BOOL bFirstRender;
+ IMG_BOOL bFrameworkAfterInit;
+ IMG_BOOL bDisableFilterHWPerfCustomCounter;
+
+ IMG_UINT32 ui32FilterFlags;
+
+ /* Kernel CCB */
+ PRGXFWIF_CCB_CTL psKernelCCBCtl;
+ PRGXFWIF_CCB psKernelCCB;
+
+ /* Firmware CCB */
+ PRGXFWIF_CCB_CTL psFirmwareCCBCtl;
+ PRGXFWIF_CCB psFirmwareCCB;
+
+ RGXFWIF_SIGBUF_CTL asSigBufCtl[RGXFWIF_DM_DEFAULT_MAX];
+
+ IMG_UINT32 ui32BreakpointTemps;
+ IMG_UINT32 ui32BreakpointShareds;
+ IMG_UINT32 ui32HWRDebugDumpLimit;
+
+ RGXFWIF_BIFTILINGMODE eBifTilingMode;
+ struct
+ {
+ IMG_UINT64 uiBase;
+ IMG_UINT64 uiLen;
+ IMG_UINT64 uiXStride;
+ } RGXFW_ALIGN sBifTilingCfg[RGXFWIF_NUM_BIF_TILING_CONFIGS];
+
+ PRGXFWIF_RUNTIME_CFG sRuntimeCfg;
+
+ PRGXFWIF_TRACEBUF sTraceBufCtl;
+ PRGXFWIF_TBIBUF sTBIBuf;
+ IMG_UINT64 RGXFW_ALIGN ui64HWPerfFilter;
+
+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl;
+ PRGXFWIF_GPU_UTIL_FWCB sGpuUtilFWCbCtl;
+ PRGXFWIF_REG_CFG sRegCfg;
+ PRGXFWIF_HWPERF_CTL sHWPerfCtl;
+
+ RGXFWIF_COUNTER_DUMP_CTL sCounterDumpCtl;
+
+ RGXFWIF_DEV_VIRTADDR sAlignChecks;
+
+ /* Core clock speed at FW boot time */
+ IMG_UINT32 ui32InitialCoreClockSpeed;
+
+ /* APM latency in ms before signalling IDLE to the host */
+ IMG_UINT32 ui32ActivePMLatencyms;
+
+ /* Flag to be set by the Firmware after successful start */
+ IMG_BOOL bFirmwareStarted;
+
+ IMG_UINT32 ui32MarkerVal;
+
+ IMG_UINT32 ui32FirmwareStartedTimeStamp;
+
+ IMG_UINT32 ui32JonesDisableMask;
+
+ /* Compatibility checks to be populated by the Firmware */
+ RGXFWIF_COMPCHECKS sRGXCompChecks;
+
+ RGXFWIF_DMA_ADDR sCorememDataStore;
+
+ FW_PERF_CONF eFirmwarePerf;
+
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sSLC3FenceDevVAddr;
+
+ RGXFWIF_DEV_VIRTADDR sT1Stack;
+
+ RGXFWIF_PDVFS_OPP sPDVFSOPPInfo;
+
+ /**
+ * FW Pointer to memory containing core clock rate in Hz.
+ * Firmware (PDVFS) updates the memory when running on non primary FW thread
+ * to communicate to host driver.
+ */
+ PRGXFWIF_CORE_CLK_RATE sCoreClockRate;
+
+#if defined(PDUMP)
+ RGXFWIF_PID_FILTER sPIDFilter;
+#endif
+
+ /* Workload Estimation Firmware CCB */
+ PRGXFWIF_CCB_CTL psWorkEstFirmwareCCBCtl;
+ PRGXFWIF_CCB psWorkEstFirmwareCCB;
+
+ RGXFWIF_GPIO_VAL_MODE eGPIOValidationMode;
+
+ /*Used in HWPerf for decoding BVNC Features*/
+ IMG_UINT32 ui32BvncKmFeatureFlags;
+
+} UNCACHED_ALIGN RGXFWIF_INIT;
+
+
+/*!
+ ******************************************************************************
+ * Client CCB commands which are only required by the kernel
+ *****************************************************************************/
+typedef struct _RGXFWIF_CMD_PRIORITY_
+{
+ IMG_UINT32 ui32Priority;
+} RGXFWIF_CMD_PRIORITY;
+
+#endif /* __RGX_FWIF_KM_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_resetframework.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_resetframework.h
new file mode 100644
index 00000000000000..dceeb485834b8f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_resetframework.h
@@ -0,0 +1,74 @@
+/*************************************************************************/ /*!
+@File rgx_fwif_resetframework.h
+@Title Post-reset work-around framework FW interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_FWIF_RESETFRAMEWORK_H)
+#define _RGX_FWIF_RESETFRAMEWORK_H
+
+#include "img_types.h"
+#include "rgx_fwif_shared.h"
+
+typedef struct _RGXFWIF_RF_REGISTERS_
+{
+#if RGX_FEATURE_CDM_CONTROL_STREAM_FORMAT == 2
+ IMG_UINT64 uCDMReg_CDM_CB_QUEUE;
+ IMG_UINT64 uCDMReg_CDM_CB_BASE;
+ IMG_UINT64 uCDMReg_CDM_CB;
+#else
+ IMG_UINT64 uCDMReg_CDM_CTRL_STREAM_BASE;
+#endif
+} RGXFWIF_RF_REGISTERS;
+
+#define RGXFWIF_RF_FLAG_ENABLE 0x00000001 /*!< enables the reset framework in the firmware */
+
+typedef struct _RGXFWIF_RF_CMD_
+{
+ IMG_UINT32 ui32Flags;
+
+ /* THIS MUST BE THE LAST MEMBER OF THE CONTAINING STRUCTURE */
+ RGXFWIF_RF_REGISTERS RGXFW_ALIGN sFWRegisters;
+
+} RGXFWIF_RF_CMD;
+
+/* to opaquely allocate and copy in the kernel */
+#define RGXFWIF_RF_CMD_SIZE sizeof(RGXFWIF_RF_CMD)
+
+#endif /* _RGX_FWIF_RESETFRAMEWORK_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sf.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sf.h
new file mode 100644
index 00000000000000..5159d160e184aa
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sf.h
@@ -0,0 +1,724 @@
+/*************************************************************************/ /*!
+@File rgx_fwif_sf.h
+@Title RGX firmware interface string format specifiers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the rgx firmware logging messages. The following
+ list are the messages the firmware prints. Changing anything
+ but the first column or spelling mistakes in the strings will
+ break compatibility with log files created with older/newer
+ firmware versions.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_FWIF_SF_H
+#define RGX_FWIF_SF_H
+
+/*****************************************************************************
+ * *DO*NOT* rearrange or delete lines in SFIDLIST or SFGROUPLIST or you
+ * WILL BREAK fw tracing message compatibility with previous
+ * fw versions. Only add new ones, if so required.
+ ****************************************************************************/
+/* Available log groups */
+#define RGXFW_LOG_SFGROUPLIST \
+ X(RGXFW_GROUP_NULL,NULL) \
+ X(RGXFW_GROUP_MAIN,MAIN) \
+ X(RGXFW_GROUP_CLEANUP,CLEANUP) \
+ X(RGXFW_GROUP_CSW,CSW) \
+ X(RGXFW_GROUP_PM, PM) \
+ X(RGXFW_GROUP_RTD,RTD) \
+ X(RGXFW_GROUP_SPM,SPM) \
+ X(RGXFW_GROUP_MTS,MTS) \
+ X(RGXFW_GROUP_BIF,BIF) \
+ X(RGXFW_GROUP_MISC,MISC) \
+ X(RGXFW_GROUP_POW,POW) \
+ X(RGXFW_GROUP_HWR,HWR) \
+ X(RGXFW_GROUP_HWP,HWP) \
+ X(RGXFW_GROUP_RPM,RPM) \
+ X(RGXFW_GROUP_DMA,DMA) \
+ X(RGXFW_GROUP_DBG,DBG)
+
+enum RGXFW_LOG_SFGROUPS {
+#define X(A,B) A,
+ RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+#define IMG_SF_STRING_MAX_SIZE 256
+
+typedef struct _TUPLE_ {
+ IMG_UINT32 ui32Id;
+ IMG_CHAR sName[IMG_SF_STRING_MAX_SIZE];
+} RGXFW_STID_FMT; /* pair of string format id and string formats */
+
+
+/* Table of String Format specifiers, the group they belong and the number of
+ * arguments each expects. Xmacro styled macros are used to generate what is
+ * needed without requiring hand editing.
+ *
+ * id : id within a group
+ * gid : group id
+ * Sym name : name of enumerations used to identify message strings
+ * String : Actual string
+ * #args : number of arguments the string format requires
+ */
+#define RGXFW_LOG_SFIDLIST \
+/*id, gid, id name, string, # arguments */ \
+X( 0, RGXFW_GROUP_NULL, RGXFW_SF_FIRST, "You should not use this string\n", 0) \
+\
+X( 1, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D_DEPRECATED, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x. Partial render:%d, CSW resume:%d, prio:%d\n", 6) \
+X( 2, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_FINISHED, "3D finished, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X( 3, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK3D_TQ_DEPRECATED, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d, prio: %d\n", 4) \
+X( 4, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_TQ_FINISHED, "3D Transfer finished\n", 0) \
+X( 5, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE_DEPRECATED, "Kick Compute: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X( 6, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_FINISHED, "Compute finished\n", 0) \
+X( 7, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA_DEPRECATED, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x. First kick:%d, Last kick:%d, CSW resume:%d, prio:%d\n", 7) \
+X( 8, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_FINISHED, "TA finished\n", 0) \
+X( 9, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESTART_AFTER_PRENDER, "Restart TA after partial render\n", 0) \
+X(10, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_RESUME_WOUT_PRENDER, "Resume TA without partial render\n", 0) \
+X(11, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OOM, "Out of memory! Context 0x%08x, HWRTData 0x%x\n", 2) \
+X(12, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA_DEPRECATED, "Kick TLA: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(13, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TLA_FINISHED, "TLA finished\n", 0) \
+X(14, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CCCB_WOFF_UPDATE, "cCCB Woff update = %d, DM = %d, FWCtx = 0x%08.8X\n", 3) \
+X(16, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_START, "UFO Checks for FWCtx %08.8X @ %d\n", 2) \
+X(17, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK, "UFO Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(18, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_SUCCEEDED, "UFO Checks succeeded\n", 0) \
+X(19, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_PR_CHECK, "UFO PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(20, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_START, "UFO SPM PR-Checks for FWCtx %08.8X\n", 1) \
+X(21, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK_DEPRECATED, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= ????????, [%08.8X] is ???????? requires %08.8X\n", 4) \
+X(22, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE_START, "UFO Updates for FWCtx %08.8X @ %d\n", 2) \
+X(23, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(24, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ASSERT_FAILED, "ASSERT Failed: line %d of: \n", 1) \
+X(25, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_LOCKUP_DEPRECATED, "HWR: Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(26, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_FW_DEPRECATED, "HWR: Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(27, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_RESET_HW_DEPRECATED, "HWR: Reset HW\n", 0) \
+X(28, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_TERMINATED_DEPRECATED, "HWR: Lockup recovered.\n", 0) \
+X(29, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_FALSE_LOCKUP_DEPRECATED, "HWR: False lockup detected for DM%u\n", 1) \
+X(30, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ALIGN_FAILED, "Alignment check %d failed: host = %X, fw = %X\n", 3) \
+X(31, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_GP_USC_TRIGGERED, "GP USC triggered\n", 0) \
+X(32, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_OVERALLOC_REGS, "Overallocating %u temporary registers and %u shared registers for breakpoint handler\n", 2) \
+X(33, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED, "Setting breakpoint: Addr 0x%08.8X\n", 1) \
+X(34, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_STORE, "Store breakpoint state\n", 0) \
+X(35, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_UNSET, "Unsetting BP Registers\n", 0) \
+X(36, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NONZERO_RT, "Active RTs expected to be zero, actually %u\n", 1) \
+X(37, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTC_PRESENT, "RTC present, %u active render targets\n", 1) \
+X(38, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_EST_POWER, "Estimated Power 0x%x\n", 1) \
+X(39, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_TARGET, "RTA render target %u\n", 1) \
+X(40, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTA_KICK_RENDER, "Kick RTA render %u of %u\n", 2) \
+X(41, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SIZES_CHECK, "HWR sizes check %d failed: addresses = %d, sizes = %d\n", 3) \
+X(42, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_ENABLE_DEPRECATED, "Pow: DUSTS_ENABLE = 0x%X\n", 1) \
+X(43, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_HWREQ_DEPRECATED, "Pow: On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X(44, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_DEPRECATED, "Pow: Changing number of dusts from %d to %d\n", 2) \
+X(45, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_SIDEKICK_IDLE_DEPRECATED, "Pow: Sidekick ready to be powered down\n", 0) \
+X(46, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_POW_DUSTS_CHANGE_REQ_DEPRECATED, "Pow: Request to change num of dusts to %d (bPowRascalDust=%d)\n", 2) \
+X(47, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_STORE, "No ZS Buffer used for partial render (store)\n", 0) \
+X(48, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_PARTIALRENDER_WITHOUT_ZSBUFFER_LOAD, "No Depth/Stencil Buffer used for partial render (load)\n", 0) \
+X(49, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_SET_LOCKUP_DEPRECATED, "HWR: Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(50, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE_DEPRECATED, "MLIST%d checker: CatBase TE=0x%08x (%d Pages), VCE=0x%08x (%d Pages), ALIST=0x%08x, IsTA=%d\n", 7) \
+X(51, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_MLIST_VALUE, "MLIST%d checker: MList[%d] = 0x%08x\n", 3) \
+X(52, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_OK, "MLIST%d OK\n", 1) \
+X(53, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_EMPTY, "MLIST%d is empty\n", 1) \
+X(54, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_MLIST_CHECKER_REG_VALUE, "MLIST%d checker: CatBase TE=0x%08X%08X, VCE=0x%08x%08X, ALIST=0x%08x%08X, IsTA=%d\n", 8) \
+X(55, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_40480KICK, "3D OQ flush kick\n", 0) \
+X(56, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWP_UNSUPPORTED_BLOCK, "HWPerf block ID (0x%x) unsupported by device\n", 1) \
+X(57, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET_DEPRECATED2, "Setting breakpoint: Addr 0x%08.8X DM%u\n", 2) \
+X(58, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(59, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_FINISHED, "RDM finished on context %u\n", 1) \
+X(60, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG_DEPRECATED, "Kick SHG: FWCtx 0x%08.8X @ %d, prio: %d\n", 3) \
+X(61, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SHG_FINISHED, "SHG finished\n", 0) \
+X(62, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FBA_FINISHED, "FBA finished on context %u\n", 1) \
+X(63, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHECK_FAILED, "UFO Checks failed\n", 0) \
+X(64, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_START, "Kill DM%d start\n", 1) \
+X(65, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_COMPLETE, "Kill DM%d complete\n", 1) \
+X(66, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FC_CCB_UPDATE, "FC%u cCCB Woff update = %u\n", 2) \
+X(67, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU_DEPRECATED2, "Kick RTU: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(68, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_INIT, "Sidekick init\n", 0) \
+X(69, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_INIT, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(70, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGTIMES, "Register access cycles: read: %d cycles, write: %d cycles, iterations: %d\n", 3) \
+X(71, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_ADD, "Register configuration added. Address: 0x%x Value: 0x%x%x\n", 3) \
+X(72, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_REGCONFIG_SET, "Register configuration applied to type %d. (0:pow on, 1:Rascal/dust init, 2-5: TA,3D,CDM,TLA, 6:All)\n", 1) \
+X(73, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TPC_FLUSH, "Perform TPC flush.\n", 0) \
+X(74, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_LOCKUP, "GPU has locked up (see HWR logs for more info)\n", 0) \
+X(75, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_OUTOFTIME, "HWR has been triggered - GPU has overrun its deadline (see HWR logs)\n", 0) \
+X(76, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HWR_HIT_POLLFAILURE, "HWR has been triggered - GPU has failed a poll (see HWR logs)\n", 0) \
+X(77, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DOPPLER_OOM, "Doppler out of memory event for FC %u\n", 1) \
+X(78, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK1, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires >= %08.8X\n", 3) \
+X(79, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_SPM_PR_CHECK2, "UFO SPM special PR-Check: [%08.8X] is %08.8X requires %08.8X\n", 3) \
+X(80, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TIMESTAMP, "TIMESTAMP -> [%08.8X]\n", 1) \
+X(81, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE_START, "UFO RMW Updates for FWCtx %08.8X @ %d\n", 2) \
+X(82, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_RMW_UPDATE, "UFO Update: [%08.8X] = %08.8X\n", 2) \
+X(83, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NULLCMD, "Kick Null cmd: FWCtx 0x%08.8X @ %d\n", 2) \
+X(84, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RPM_OOM, "RPM Out of memory! Context 0x%08x, SH requestor %d\n", 2) \
+X(85, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RTU_ABORT_DISCARD, "Discard RTU due to RPM abort: FWCtx 0x%08.8X @ %d, prio: %d, Frame Context: %d\n", 4) \
+X(86, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED, "Deferring DM%u from running context 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(87, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_WAITING_TURN, "Deferring DM%u from running context 0x%08x @ %d to let other deferred DMs run (deferred DMs = 0x%08x)\n", 4) \
+X(88, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_DEFERRED_NO_LONGER, "No longer deferring DM%u from running context = 0x%08x @ %d (deferred DMs = 0x%08x)\n", 4) \
+X(89, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB_DEPRECATED, "FWCCB for DM%u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(90, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_FWCCB, "FWCCB for OSid %u is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 3) \
+X(91, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART, "Host Sync Partition marker: %d\n", 1) \
+X(92, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SYNC_PART_RPT, "Host Sync Partition repeat: %d\n", 1) \
+X(93, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CLOCK_SPEED_CHANGE, "Core clock set to %d Hz\n", 1) \
+X(94, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_OFFSETS, "Compute Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(95, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE, "Signal check failed, Required Data: 0x%X, Address: 0x%08x%08x\n", 3) \
+X(96, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE, "Signal update, Snoop Filter: %u, MMU Ctx: %u, Signal Id: %u, Signals Base: 0x%08x%08x\n", 5) \
+X(97, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNALED, "Signalled the previously waiting FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(98, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED_DEPRECATED, "Compute stalled\n", 0) \
+X(99, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_STALLED, "Compute stalled (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(100, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED_FROM_STALL, "Compute resumed (Roff = %u, Woff = %u, Size = %u)\n", 3) \
+X(101, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_SIGNAL_UPDATE, "Signal update notification from the host, PC Physical Address: 0x%08x%08x, Signal Virtual Address: 0x%08x%08x\n", 4) \
+X(102, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_UPDATE_OSID_DM, "Signal update from DM: %u, OSId: %u, PC Physical Address: 0x%08x%08x\n", 4) \
+X(103, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_DM, "DM: %u signal check failed\n", 1) \
+X(104, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM_DEPRECATED, "Kick TDM: FWCtx 0x%08.8X @ %d, prio:%d\n", 3) \
+X(105, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_FINISHED, "TDM finished\n", 0) \
+X(106, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TE_PIPE_STATUS, "MMU_PM_CAT_BASE_TE[%d]_PIPE[%d]: 0x%08X 0x%08X)\n", 4) \
+X(107, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_HIT, "BRN 54141 HIT\n", 0) \
+X(108, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_APPLYING_DUMMY_TA, "BRN 54141 Dummy TA kicked\n", 0) \
+X(109, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_RESUME_TA, "BRN 54141 resume TA\n", 0) \
+X(110, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DOUBLE_HIT, "BRN 54141 double hit after applying WA\n", 0) \
+X(111, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BRN_54141_DUMMY_TA_VDM_BASE, "BRN 54141 Dummy TA VDM base address: 0x%08x%08x\n", 2) \
+X(112, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIGNAL_WAIT_FAILURE_WITH_CURRENT, "Signal check failed, Required Data: 0x%X, Current Data: 0x%X, Address: 0x%08x%08x\n", 4) \
+X(113, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BUFFER_STALL, "TDM stalled (Roff = %u, Woff = %u)\n", 2) \
+X(114, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_NOTIFY_WRITE_OFFSET_UPDATE, "Write Offset update notification for stalled FWCtx %08.8X\n", 1) \
+X(115, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_PRIORITY_CHANGE, "Changing OSid %d's priority from %u to %u \n", 3) \
+X(116, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_COMPUTE_RESUMED, "Compute resumed\n", 0) \
+X(117, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TLA, "Kick TLA: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(118, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TDM, "Kick TDM: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(119, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_TA, "Kick TA: FWCtx 0x%08.8X @ %d, RTD 0x%08x, First kick:%d, Last kick:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 11) \
+X(120, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3D, "Kick 3D: FWCtx 0x%08.8X @ %d, RTD 0x%08x, Partial render:%d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 10) \
+X(121, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_3DTQ, "Kick 3D TQ: FWCtx 0x%08.8X @ %d, CSW resume:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(122, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_COMPUTE, "Kick Compute: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, ext:0x%08X, int:0x%08X)\n", 6) \
+X(123, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_RTU, "Kick RTU: FWCtx 0x%08.8X @ %d, Frame Context:%d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 8) \
+X(124, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KICK_SHG, "Kick SHG: FWCtx 0x%08.8X @ %d. (PID:%d, prio:%d, frame:%d, ext:0x%08X, int:0x%08X)\n", 7) \
+X(125, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_CSRM_RECONFIG, "Reconfigure CSRM: special coeff support enable %d.\n", 1) \
+X(127, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TA_REQ_MAX_COEFFS, "TA requires max coeff mode, deferring: %d.\n", 1) \
+X(128, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_REQ_MAX_COEFFS, "3D requires max coeff mode, deferring: %d.\n", 1) \
+X(129, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_KILLDM_FAILED, "Kill DM%d failed\n", 1) \
+X(130, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE, "Thread Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(131, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_WAITING_FOR_QUEUE_FENCE, "Thread Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(132, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_HCS_TRIGGERED, "DM %d failed to Context Switch on time. Triggered HCS (see HWR logs).\n", 1) \
+X(133, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_HCS_SET, "HCS changed to %d ms\n", 1) \
+X(134, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UPDATE_TILES_IN_FLIGHT, "Updating Tiles In Flight (Dusts=%d, PartitionMask=0x%08x, ISPCtl=0x%08x%08x)\n", 4) \
+X(135, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SET_TILES_IN_FLIGHT, " Phantom %d: USCTiles=%d\n", 2) \
+X(136, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF_OFF, "Isolation grouping is disabled \n", 0) \
+X(137, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_ISOLATION_CONF, "Isolation group configured with a priority threshold of %d\n", 1) \
+X(138, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_ONLINE, "OS %d has come online \n", 1) \
+X(139, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_OFFLINE, "OS %d has gone offline \n", 1) \
+X(140, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWCONTEXT_SIGNAL_REKICK, "Signalled the previously stalled FWCtx: 0x%08.8X, OSId: %u, Signal Address: 0x%08x%08x\n", 4) \
+X(141, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSETS, "TDM Queue: FWCtx 0x%08.8X, prio: %d, queue: 0x%08X%08X (Roff = %u, Woff = %u, Size = %u)\n", 7) \
+X(142, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_OFFSET_READ_RESET, "Reset TDM Queue Read Offset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes 0, Woff = %u, Size = %u)\n", 6) \
+X(143, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UMQ_MISMATCHED_READ_OFFSET, "User Mode Queue mismatched stream start: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u, StreamStartOffset = %u)\n", 5) \
+X(144, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_SIDEKICK_DEINIT, "Sidekick deinit\n", 0) \
+X(145, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_RD_DEINIT, "Rascal+Dusts deinit\n", 0) \
+X(146, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG_DEPRECATED, "Initialised OS %d with config flags 0x%08x\n", 2) \
+X(147, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_CHKPT_LIMIT, "Fence checkpoint UFO limit exceeded %d/%d\n", 2) \
+X(148, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_3D_62850KICK, "3D Dummy stencil store\n", 0) \
+X(149, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CONFIG, "Initialised OS %d with config flags 0x%08x and extended config flags 0x%08x\n", 3) \
+X(150, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UNKNOWN_COMMAND, "Unknown Command (eCmdType=0x%08x)\n", 1) \
+X(151, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE, "UFO forced update: FWCtx %08.8X @ %d [%08.8X] = %08.8X\n", 4) \
+X(152, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_UFO_FORCED_UPDATE_NOP, "UFO forced update NOP: FWCtx %08.8X @ %d [%08.8X] = %08.8X, reason %d\n", 5) \
+X(153, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_TDM_BRN66075_CHECK, "TDM context switch check: Roff %u points to 0x%08x, Match=%u\n", 3) \
+X(154, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_OS_INIT_CCBS, "OSid %d CCB init status: %d (1-ok 0-fail): kCCBCtl@0x%x kCCB@0x%x fwCCBCtl@0x%x fwCCB@0x%x \n", 6) \
+X(155, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FWIRQ, "FW IRQ # %u @ %u\n", 2) \
+X(156, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_SET, "Setting breakpoint: Addr 0x%08.8X DM%u usc_breakpoint_ctrl_dm = %u\n", 3) \
+X(157, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB, "Invalid KCCB setup for OSid %u: KCCB 0x%08x, KCCB Ctrl 0x%08x\n", 3) \
+X(158, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KERNEL_CCB_CMD, "Invalid KCCB cmd (%u) for OSid %u @ KCCB 0x%08x\n", 3) \
+X(159, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FW_FAULT, "FW FAULT: At line %d in file 0x%08x%08x, additional data=0x%08x\n", 4) \
+X(160, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_BREAKPOINT_INVALID, "Invalid breakpoint: MemCtx 0x%08x Addr 0x%08.8X DM%u usc_breakpoint_ctrl_dm = %u\n", 4) \
+X(161, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_FLUSHINVAL_CMD_INVALID, "Discarding invalid SLC flushinval command for OSid %u: DM %u, FWCtx 0x%08x\n", 3) \
+X(162, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_NOTIFY_WRITE_OFFSET_UPDATE, "Invalid Write Offset update notification from OSid %u to DM %u: FWCtx 0x%08x, MemCtx 0x%08x\n", 4) \
+X(163, RGXFW_GROUP_MAIN, RGXFW_SF_MAIN_INVALID_KCCB_KICK_CMD, "Null FWCtx in KCCB kick cmd for OSid %u: KCCB 0x%08x, ROff %u, WOff %u\n", 4) \
+\
+X( 1, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK_DEPRECATED, "Bg Task DM = %u, counted = %d\n", 2) \
+X( 2, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE_DEPRECATED, "Bg Task complete DM = %u\n", 1) \
+X( 3, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_KICK, "Irq Task DM = %u, Breq = %d, SBIrq = 0x%X\n", 3) \
+X( 4, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE_DEPRECATED, "Irq Task complete DM = %u\n", 1) \
+X( 5, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_BG_ALL, "Kick MTS Bg task DM=All\n", 0) \
+X( 6, RGXFW_GROUP_MTS, RGXFW_SF_MTS_KICK_MTS_IRQ, "Kick MTS Irq task DM=%d \n", 1) \
+X( 7, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE_DEPRECATED, "Ready queue debug DM = %u, celltype = %d\n", 2) \
+X( 8, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN_DEPRECATED, "Ready-to-run debug DM = %u, item = 0x%x\n", 2) \
+X( 9, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMDHEADER, "Client command header DM = %u, client CCB = %x, cmd = %x\n", 3) \
+X(10, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYTORUN, "Ready-to-run debug OSid = %u, DM = %u, item = 0x%x\n", 3) \
+X(11, RGXFW_GROUP_MTS, RGXFW_SF_MTS_READYCELLTYPE, "Ready queue debug DM = %u, celltype = %d, OSid = %u\n", 3) \
+X(12, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_KICK, "Bg Task DM = %u, counted = %d, OSid = %u\n", 3 ) \
+X(13, RGXFW_GROUP_MTS, RGXFW_SF_MTS_BG_COMPLETE, "Bg Task complete DM Bitfield: %u\n", 1) \
+X(14, RGXFW_GROUP_MTS, RGXFW_SF_MTS_IRQ_COMPLETE, "Irq Task complete.\n", 0) \
+X(15, RGXFW_GROUP_MTS, RGXFW_SF_MTS_CMD_DISCARD, "Discarded Command Type: %d OS ID = %d PID = %d context = 0x%08x cccb ROff = %x, due to USC breakpoint hit by OS ID = %d PID = %d.\n", 7) \
+\
+X( 1, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_CLEANUP, "FwCommonContext [0x%08x] cleaned\n", 1) \
+X( 2, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FWCTX_BUSY, "FwCommonContext [0x%08x] is busy: ReadOffset = %d, WriteOffset = %d\n", 3) \
+X( 3, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANUP, "HWRTData [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X( 4, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED_FOR_DM, "HWRTData [0x%08x] HW Context cleaned for DM%u, executed commands = %d\n", 3) \
+X( 5, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY_DEPRECATED, "HWRTData [0x%08x] HW Context for DM%u is busy\n", 2) \
+X( 6, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_CLEANED, "HWRTData [0x%08x] HW Context %u cleaned\n", 2) \
+X( 7, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_FL_CLEANED, "Freelist [0x%08x] cleaned\n", 1) \
+X( 8, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_CLEANED, "ZSBuffer [0x%08x] cleaned\n", 1) \
+X( 9, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_ZSBUFFER_BUSY, "ZSBuffer [0x%08x] is busy: submitted = %d, executed = %d\n", 3) \
+X(10, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRTD_BUSY, "HWRTData [0x%08x] HW Context for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(11, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANUP, "HW Ray Frame data [0x%08x] for DM=%d, received cleanup request\n", 2) \
+X(12, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED_FOR_DM, "HW Ray Frame Data [0x%08x] cleaned for DM%u, executed commands = %d\n", 3) \
+X(13, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_BUSY, "HW Ray Frame Data [0x%08x] for DM%u is busy: submitted = %d, executed = %d\n", 4) \
+X(14, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_HWRFD_CLEANED, "HW Ray Frame Data [0x%08x] HW Context %u cleaned\n", 2) \
+X(15, RGXFW_GROUP_CLEANUP, RGXFW_SF_CLEANUP_INVALID_REQUEST, "Discarding invalid cleanup request of type 0x%x\n", 1) \
+\
+X( 1, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_NEEDS_RESUME, "CDM FWCtx 0x%08.8X needs resume\n", 1) \
+X( 2, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X( 3, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SHARED, "CDM FWCtx shared alloc size load 0x%X\n", 1) \
+X( 4, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_COMPLETE, "*** CDM FWCtx store complete\n", 0) \
+X( 5, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_START, "*** CDM FWCtx store start\n", 0) \
+X( 6, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_SOFT_RESET, "CDM Soft Reset\n", 0) \
+X( 7, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_NEEDS_RESUME, "3D FWCtx 0x%08.8X needs resume\n", 1) \
+X( 8, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME, "*** 3D FWCtx 0x%08.8X resume\n", 1) \
+X( 9, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_COMPLETE, "*** 3D context store complete\n", 0) \
+X(10, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED, "3D context store pipe state: 0x%08.8X 0x%08.8X 0x%08.8X\n", 3) \
+X(11, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_START, "*** 3D context store start\n", 0) \
+X(12, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_TQ_RESUME, "*** 3D TQ FWCtx 0x%08.8X resume\n", 1) \
+X(13, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_NEEDS_RESUME, "TA FWCtx 0x%08.8X needs resume\n", 1) \
+X(14, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_RESUME, "*** TA FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(15, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_SHARED, "TA context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(16, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_COMPLETE, "*** TA context store complete\n", 0) \
+X(17, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_START, "*** TA context store start\n", 0) \
+X(18, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED_AGAIN, "Higher priority context scheduled for DM %u, old prio:%d, new prio:%d\n", 3) \
+X(19, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SET_CONTEXT_PRIORITY, "Set FWCtx 0x%x priority to %u\n", 2) \
+X(20, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE_DEPRECATED2, "3D context store pipe%d state: 0x%08.8X\n", 2) \
+X(21, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE_DEPRECATED, "3D context resume pipe%d state: 0x%08.8X\n", 2) \
+X(22, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_NEEDS_RESUME, "SHG FWCtx 0x%08.8X needs resume\n", 1) \
+X(23, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_RESUME, "*** SHG FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X\n", 3) \
+X(24, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_SHARED, "SHG context shared alloc size store 0x%X, load 0x%X\n", 2) \
+X(25, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_COMPLETE, "*** SHG context store complete\n", 0) \
+X(26, RGXFW_GROUP_CSW, RGXFW_SF_CSW_SHG_STORE_START, "*** SHG context store start\n", 0) \
+X(27, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_PIPE_INDIRECT, "Performing TA indirection, last used pipe %d\n", 1) \
+X(28, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_STORE_CTRL_STREAM_TERMINATE, "CDM context store hit ctrl stream terminate. Skip resume.\n", 0) \
+X(29, RGXFW_GROUP_CSW, RGXFW_SF_CSW_CDM_RESUME_AB_BUFFER, "*** CDM FWCtx 0x%08.8X resume from snapshot buffer 0x%08X%08X, shader state %u\n", 4) \
+X(30, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STATE_BUFFER_FLIP, "TA PDS/USC state buffer flip (%d->%d)\n", 2) \
+X(31, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_52563_HIT, "TA context store hit BRN 52563: vertex store tasks outstanding\n", 0) \
+X(32, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_USC_POLL_FAILED, "TA USC poll failed (USC vertex task count: %d)\n", 1) \
+X(33, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TA_STORE_DEFERRED, "TA context store deferred due to BRN 54141.", 0) \
+X(34, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED_DEPRECATED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u\n", 7) \
+X(35, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_START, "*** TDM context store start\n", 0) \
+X(36, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_COMPLETE, "*** TDM context store complete\n", 0) \
+X(37, RGXFW_GROUP_CSW, RGXFW_SF_CSW_TDM_STORE_NEEDS_RESUME, "TDM context needs resume, header [%08.8X, %08.8X]\n", 2) \
+X(38, RGXFW_GROUP_CSW, RGXFW_SF_CSW_HIGHER_PRIORITY_SCHEDULED, "Higher priority context scheduled for DM %u. Prios (OSid, OSid Prio, Context Prio): Current: %u, %u, %u New: %u, %u, %u. Hard Context Switching: %u\n", 8) \
+X(39, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_STORE_PIPE_STATE, "3D context store pipe %2d (%2d) state: 0x%08.8X\n", 3) \
+X(40, RGXFW_GROUP_CSW, RGXFW_SF_CSW_3D_RESUME_PIPE_STATE, "3D context resume pipe %2d (%2d) state: 0x%08.8X\n", 3) \
+\
+X( 1, RGXFW_GROUP_BIF, RGXFW_SF_BIF_ACTIVATE, "Activate MemCtx=0x%08x BIFreq=%d secure=%d\n", 3) \
+X( 2, RGXFW_GROUP_BIF, RGXFW_SF_BIF_DEACTIVATE, "Deactivate MemCtx=0x%08x \n", 1) \
+X( 3, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_ALLOC, "Alloc PC reg %d\n", 1) \
+X( 4, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_GRAB, "Grab reg %d refcount now %d\n", 2) \
+X( 5, RGXFW_GROUP_BIF, RGXFW_SF_BIF_PCREG_UNGRAB, "Ungrab reg %d refcount now %d\n", 2) \
+X( 6, RGXFW_GROUP_BIF, RGXFW_SF_BIF_SETUP_REG, "Setup reg=%d BIFreq=%d, expect=0x%08x%08x, actual=0x%08x%08x\n", 6) \
+X( 7, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TRUST, "Trust enabled:%d, for BIFreq=%d\n", 2) \
+X( 8, RGXFW_GROUP_BIF, RGXFW_SF_BIF_TILECFG, "BIF Tiling Cfg %d base %08x%08x len %08x%08x enable %d stride %d --> %08x%08x\n", 9) \
+X( 9, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID0, "Wrote the Value %d to OSID0, Cat Base %d, Register's contents are now %08x %08x\n", 4) \
+X(10, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSID1, "Wrote the Value %d to OSID1, Context %d, Register's contents are now %04x\n", 3) \
+X(11, RGXFW_GROUP_BIF, RGXFW_SF_BIF_OSIDx, "ui32OSid = %u, Catbase = %u, Reg Address = %x, Reg index = %u, Bitshift index = %u, Val = %08x%08x\n", 7) \
+X(12, RGXFW_GROUP_BIF, RGXFW_SF_BIF_MAP_GPU_MEMORY, "Map GPU memory DevVAddr 0x%x%08x, Size %u, Context ID %u, BIFREQ %u\n", 5) \
+X(13, RGXFW_GROUP_BIF, RGXFW_SF_BIF_UNMAP_GPU_MEMORY, "Unmap GPU memory (event status 0x%x)\n", 1) \
+\
+X( 1, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_WRITE, "GPIO write 0x%02x\n", 1) \
+X( 2, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_READ, "GPIO read 0x%02x\n", 1) \
+X( 3, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ENABLED, "GPIO enabled\n", 0) \
+X( 4, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_DISABLED, "GPIO disabled\n", 0) \
+X( 5, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_STATUS, "GPIO status=%d (0=OK, 1=Disabled)\n", 1) \
+X( 6, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_READ, "GPIO_AP: Read address=0x%02x (%d byte(s))\n", 2) \
+X( 7, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_WRITE, "GPIO_AP: Write address=0x%02x (%d byte(s))\n", 2) \
+X( 8, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_TIMEOUT, "GPIO_AP timeout!\n", 0) \
+X( 9, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_AP_ERROR, "GPIO_AP error. GPIO status=%d (0=OK, 1=Disabled)\n", 1) \
+X(10, RGXFW_GROUP_MISC, RGXFW_SF_MISC_GPIO_ALREADY_READ, "GPIO already read 0x%02x\n", 1) \
+X(11, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_CHECK_BUFFER_AVAILABLE, "SR: Check buffer %d available returned %d\n", 2) \
+X(12, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAITING_BUFFER_AVAILABLE, "SR: Waiting for buffer %d\n", 1) \
+X(13, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_WAIT_BUFFER_TIMEOUT, "SR: Timeout waiting for buffer %d (after %d ticks)\n", 2) \
+X(14, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_CHECK, "SR: Skip frame check for strip %d returned %d (0=No skip, 1=Skip frame)\n", 2) \
+X(15, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_REMAINING_STRIPS, "SR: Skip remaining strip %d in frame\n", 1) \
+X(16, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_FRAME_SKIP_NEW_FRAME, "SR: Inform HW that strip %d is a new frame\n", 1) \
+X(17, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_SKIP_FRAME_TIMEOUT, "SR: Timeout waiting for INTERRUPT_FRAME_SKIP (after %d ticks)\n", 1) \
+X(18, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_MODE, "SR: Strip mode is %d\n", 1) \
+X(19, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_STRIP_INDEX, "SR: Strip Render start (strip %d)\n", 1) \
+X(20, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_RENDERED, "SR: Strip Render complete (buffer %d)\n", 1) \
+X(21, RGXFW_GROUP_MISC, RGXFW_SF_MISC_SR_BUFFER_FAULT, "SR: Strip Render fault (buffer %d)\n", 1) \
+\
+X( 1, RGXFW_GROUP_PM, RGXFW_SF_PM_AMLIST, "ALIST%d SP = %u, MLIST%d SP = %u (VCE 0x%08x%08x, TE 0x%08x%08x, ALIST 0x%08x%08x)\n", 10) \
+X( 2, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED_DEPRECATED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d, mmu:%d\n", 8) \
+X( 3, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE_DEPRECATED, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-3D-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 4, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE_DEPRECATED, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), MFL-TA-Base: 0x%08x%08x (SP = %u, 4PT = %u)\n", 14) \
+X( 5, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_COMPLETE, "Freelist grow completed [0x%08x]: added pages 0x%08x, total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 6, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_GROW_DENIED, "Grow for freelist ID=0x%08x denied by host\n", 1) \
+X( 7, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_UPDATE_COMPLETE, "Freelist update completed [0x%08x]: old total pages 0x%08x, new total pages 0x%08x, new DevVirtAddr 0x%08x%08x\n", 5) \
+X( 8, RGXFW_GROUP_PM, RGXFW_SF_PM_FL_RECONSTRUCTION_FAILED_DEPRECATED, "Reconstruction of freelist ID=0x%08x failed\n", 1) \
+X( 9, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_WARNING, "Ignored attempt to pause or unpause the DM while there is no relevant operation in progress (0-TA,1-3D): %d, operation(0-unpause, 1-pause): %d\n", 2) \
+X( 10, RGXFW_GROUP_PM, RGXFW_SF_PM_3D_TIMEOUT_STATUS, "Force free 3D Context memory, FWCtx: %08x, status(1:success, 0:fail): %d\n", 2)\
+X( 11, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_ALLOC, "PM pause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 12, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_ALLOC, "PM unpause TA ALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 13, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_DALLOC, "PM pause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 14, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_UNPAUSE_DALLOC, "PM unpause 3D DALLOC: PM_PAGE_MANAGEOP set to 0x%x\n", 1) \
+X( 15, RGXFW_GROUP_PM, RGXFW_SF_PM_DM_PAUSE_FAILED, "PM ALLOC/DALLOC change was not actioned: PM_PAGE_MANAGEOP_STATUS=0x%x\n", 1) \
+X( 16, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_SHARED, "Is TA: %d, finished: %d on HW %u (HWRTData = 0x%08x, MemCtx = 0x%08x). FL different between TA/3D: global:%d, local:%d\n", 7) \
+X( 17, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_3DBASE, "UFL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-3D-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+X( 18, RGXFW_GROUP_PM, RGXFW_SF_PM_UFL_TABASE, "UFL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u), FL-TA-Base: 0x%08x%08x (SP = %u, 4PB = %u, 4PT = %u)\n", 10) \
+\
+X( 1, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_DYNAMIC_STATUS, "Global link list dynamic page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 2, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GLL_STATIC_STATUS, "Global link list static page count: vertex 0x%x, varying 0x%x, node 0x%x\n", 3) \
+X( 3, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_GROW, "RPM request failed. Waiting for freelist grow.\n", 0) \
+X( 4, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_ABORT, "RPM request failed. Aborting the current frame.\n", 0) \
+X( 5, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_WAIT_FOR_PENDING_GROW, "RPM waiting for pending grow on freelist 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_RPM, RGXFW_SF_RPM_REQUEST_HOST_GROW, "Request freelist grow [0x%08x] current pages %d, grow size %d\n", 3) \
+X( 7, RGXFW_GROUP_RPM, RGXFW_SF_RPM_FREELIST_LOAD, "Freelist load: SHF = 0x%08x, SHG = 0x%08x\n", 2) \
+X( 8, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_DEPRECATED, "SHF FPL register: 0x%08X.%08X\n", 2) \
+X( 9, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_DEPRECATED, "SHG FPL register: 0x%08X.%08X\n", 2) \
+X(10, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_FREELIST, "Kernel requested RPM grow on freelist (type %d) at 0x%08x from current size %d to new size %d, RPM restart: %d (1=Yes)\n", 5) \
+X(11, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_RESTART, "Restarting SHG\n", 0) \
+X(12, RGXFW_GROUP_RPM, RGXFW_SF_RPM_GROW_ABORTED, "Grow failed, aborting the current frame.\n", 0) \
+X(13, RGXFW_GROUP_RPM, RGXFW_SF_RPM_ABORT_COMPLETE, "RPM abort complete on HWFrameData [0x%08x].\n", 1) \
+X(14, RGXFW_GROUP_RPM, RGXFW_SF_RPM_CLEANUP_NEEDS_ABORT, "RPM freelist cleanup [0x%08x] requires abort to proceed.\n", 1) \
+X(15, RGXFW_GROUP_RPM, RGXFW_SF_RPM_RPM_PT, "RPM page table base register: 0x%08X.%08X\n", 2) \
+X(16, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_ABORT, "Issuing RPM abort.\n", 0) \
+X(17, RGXFW_GROUP_RPM, RGXFW_SF_RPM_OOM_TOGGLE_CHECK_FULL, "RPM OOM received but toggle bits indicate free pages available\n", 0) \
+X(18, RGXFW_GROUP_RPM, RGXFW_SF_RPM_STATE_HW_TIMEOUT, "RPM hardware timeout. Unable to process OOM event.\n", 0) \
+X(19, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_LOAD, "SHF FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(20, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_LOAD, "SHG FL (0x%08x) load, FPL: 0x%08X.%08X, roff: 0x%08X, woff: 0x%08X\n", 5) \
+X(21, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHF_FPL_STORE, "SHF FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+X(22, RGXFW_GROUP_RPM, RGXFW_SF_RPM_SHG_FPL_STORE, "SHG FL (0x%08x) store, roff: 0x%08X, woff: 0x%08X\n", 3) \
+\
+X( 1, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_FINISHED, "3D RTData 0x%08x finished on HW context %u\n", 2) \
+X( 2, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_READY, "3D RTData 0x%08x ready on HW context %u\n", 2) \
+X( 3, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO_DEPRECATED, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d, mmu: %d\n", 4) \
+X( 4, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_3D, "Loading VFP table 0x%08x%08x for 3D\n", 2) \
+X( 5, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOADVFP_TA, "Loading VFP table 0x%08x%08x for TA\n", 2) \
+X( 6, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X( 7, RGXFW_GROUP_RTD, RGXFW_SF_RTD_VHEAP_STORE, "Perform VHEAP table store\n", 0) \
+X( 8, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_MATCH_FOUND, "RTData 0x%08x: found match in Context=%d: Load=No, Store=No\n", 2) \
+X( 9, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_NULL_FOUND, "RTData 0x%08x: found NULL in Context=%d: Load=Yes, Store=No\n", 2) \
+X(10, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_3D_FINISHED, "RTData 0x%08x: found state 3D finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(11, RGXFW_GROUP_RTD, RGXFW_SF_RTD_RTDATA_TA_FINISHED, "RTData 0x%08x: found state TA finished (0x%08x) in Context=%d: Load=Yes, Store=Yes \n", 3) \
+X(12, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_STACK_POINTERS, "Loading stack-pointers for %d (0:MidTA,1:3D) on context %d, MLIST = 0x%08x, ALIST = 0x%08x%08x\n", 5) \
+X(13, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: TotalPMPages = %d, FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 10) \
+X(14, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_FINISHED, "TA RTData 0x%08x finished on HW context %u\n", 2) \
+X(15, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED, "TA RTData 0x%08x loaded on HW context %u\n", 2) \
+X(16, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_PB_DEPRECATED2, "Store Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(17, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL_DEPRECATED2, "Load Freelist 0x%X type: %d (0:local,1:global,2:mmu) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(18, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG, "Freelist 0x%X RESET!!!!!!!!\n", 1) \
+X(19, RGXFW_GROUP_RTD, RGXFW_SF_RTD_DEBUG2, "Freelist 0x%X stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 5) \
+X(20, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_DEPRECATED, "Request reconstruction of Freelist 0x%X type: %d (0:local,1:global,2:mmu) on HW context %u\n", 3) \
+X(21, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED, "Freelist reconstruction ACK from host (HWR state :%u)\n", 1) \
+X(22, RGXFW_GROUP_RTD, RGXFW_SF_RTD_FL_RECON_ACK_DEPRECATED2, "Freelist reconstruction completed\n", 0) \
+X(23, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_LOADED_DEPRECATED, "TA RTData 0x%08x loaded on HW context %u HWRTDataNeedsLoading=%d\n", 3) \
+X(24, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TE_RGNHDR_INFO, "TE Region headers base 0x%08x%08x (RGNHDR Init: %d)\n", 3) \
+X(25, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RTDATA_BUFFER_ADDRS, "TA Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x, VHeap 0x%08x%08x, TPC 0x%08x%08x (MemCtx 0x%08x)\n", 8) \
+X(26, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_LOADED, "3D RTData 0x%08x loaded on HW context %u\n", 2) \
+X(27, RGXFW_GROUP_RTD, RGXFW_SF_RTD_3D_RTDATA_BUFFER_ADDRS, "3D Buffers: FWCtx 0x%08x, RT 0x%08x, RTData 0x%08x (MemCtx 0x%08x)\n", 4) \
+X(28, RGXFW_GROUP_RTD, RGXFW_SF_RTD_TA_RESTART_AFTER_PR_EXECUTED, "Restarting TA after partial render, HWRTData0State=%d, HWRTData1State=%d\n", 2) \
+X(29, RGXFW_GROUP_RTD, RGXFW_SF_RTD_PB_SET_TO, "CONTEXT_PB_BASE set to %X, FL different between TA/3D: local: %d, global: %d\n", 3) \
+X(30, RGXFW_GROUP_RTD, RGXFW_SF_RTD_STORE_FL, "Store Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+X(31, RGXFW_GROUP_RTD, RGXFW_SF_RTD_LOAD_FL, "Load Freelist 0x%X type: %d (0:local,1:global) for DM%d: FL Total Pages %u (max=%u,grow size=%u), FL-addr = 0x%08x%08x, stacktop = 0x%08x%08x, Alloc Page Count = %u, Alloc MMU Page Count = %u\n", 12) \
+\
+X( 1, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_DEPRECATED, "Force Z-Load for partial render\n", 0) \
+X( 2, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_DEPRECATED, "Force Z-Store for partial render\n", 0) \
+X( 3, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_LOCAL, "3D MemFree: Local FL 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_MMU, "3D MemFree: MMU FL 0x%08x\n", 1) \
+X( 5, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_GLOBAL, "3D MemFree: Global FL 0x%08x\n", 1) \
+X( 6, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD, "OOM TA/3D PR Check: [%08.8X] is %08.8X requires %08.8X, HardwareSync Fence [%08.8X] is %08.8X requires %08.8X\n", 6) \
+X( 7, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_FL, "OOM TA_cmd=0x%08x, U-FL 0x%08x, N-FL 0x%08x\n", 3) \
+X( 8, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OOM_TACMD_UN_MMU_FL, "OOM TA_cmd=0x%08x, OOM MMU:%d, U-FL 0x%08x, N-FL 0x%08x, MMU-FL 0x%08x\n", 5) \
+X( 9, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_AVOIDED, "Partial render avoided\n", 0) \
+X(10, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_DISCARDED, "Partial render discarded\n", 0) \
+X(11, RGXFW_GROUP_SPM, RGXFW_SF_SPM_PRENDER_FINISHED, "Partial Render finished\n", 0) \
+X(12, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DBG, "SPM Owner = 3D-BG\n", 0) \
+X(13, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_3DIRQ, "SPM Owner = 3D-IRQ\n", 0) \
+X(14, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_NONE, "SPM Owner = NONE\n", 0) \
+X(15, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TABG, "SPM Owner = TA-BG\n", 0) \
+X(16, RGXFW_GROUP_SPM, RGXFW_SF_SPM_OWNER_TAIRQ, "SPM Owner = TA-IRQ\n", 0) \
+X(17, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSTORE_ADDRESS, "ZStore address 0x%08x%08x\n", 2) \
+X(18, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SSTORE_ADDRESS, "SStore address 0x%08x%08x\n", 2) \
+X(19, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZLOAD_ADDRESS, "ZLoad address 0x%08x%08x\n", 2) \
+X(20, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SLOAD_ADDRESS, "SLoad address 0x%08x%08x\n", 2) \
+X(21, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_ZSBUFFER_DEPRECATED, "No deferred ZS Buffer provided\n", 0) \
+X(22, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POPULATED, "ZS Buffer successfully populated (ID=0x%08x)\n", 1) \
+X(23, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_POP_UNNEEDED_DEPRECATED, "No need to populate ZS Buffer (ID=0x%08x)\n", 1) \
+X(24, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOPULATED, "ZS Buffer successfully unpopulated (ID=0x%08x)\n", 1) \
+X(25, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNPOP_UNNEEDED_DEPRECATED, "No need to unpopulate ZS Buffer (ID=0x%08x)\n", 1) \
+X(26, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_DEPRECATED, "Send ZS-Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(27, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_DEPRECATED, "Send ZS-Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(28, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(29, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_UNBACKING_REQUEST_PENDING_DEPRECATED, "Don't send ZS-Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(30, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for ZBuffer to be backed (ID=0x%08x)\n", 1) \
+X(31, RGXFW_GROUP_SPM, RGXFW_SF_SPM_SBUFFER_NOT_READY_DEPRECATED, "Partial Render waiting for SBuffer to be backed (ID=0x%08x)\n", 1) \
+X(32, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_NONE, "SPM State = none\n", 0) \
+X(33, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_BLOCKED, "SPM State = PR blocked\n", 0) \
+X(34, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_GROW, "SPM State = wait for grow\n", 0) \
+X(35, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_HW, "SPM State = wait for HW\n", 0) \
+X(36, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_RUNNING, "SPM State = PR running\n", 0) \
+X(37, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_AVOIDED, "SPM State = PR avoided\n", 0) \
+X(38, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_PR_EXECUTED, "SPM State = PR executed\n", 0) \
+X(39, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FREELIST_MATCH, "3DMemFree matches freelist 0x%08x (FL type = %u)\n", 2) \
+X(40, RGXFW_GROUP_SPM, RGXFW_SF_SPM_3DMEMFREE_FLAG_SET, "Raise the 3DMemFreeDedected flag\n", 0) \
+X(41, RGXFW_GROUP_SPM, RGXFW_SF_SPM_STATE_WAIT_FOR_PENDING_GROW, "Wait for pending grow on Freelist 0x%08x\n", 1) \
+X(42, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ZSBUFFER_BACKING_REQUEST_FAILED, "ZS Buffer failed to be populated (ID=0x%08x)\n", 1) \
+X(43, RGXFW_GROUP_SPM, RGXFW_SF_SPM_FL_GROW_DEBUG, "Grow update inconsistency: FL addr: 0x%02x%08x, curr pages: %u, ready: %u, new: %u\n", 5) \
+X(44, RGXFW_GROUP_SPM, RGXFW_SF_SPM_RESUMED_TA, "OOM: Resumed TA with ready pages, FL addr: 0x%02x%08x, current pages: %u, SP : %u, \n", 4) \
+X(45, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE_DEPRECATED, "Received grow update, FL addr: 0x%02x%08x, current pages: %u, ready pages: %u, threshold: %u\n", 5) \
+X(46, RGXFW_GROUP_SPM, RGXFW_SF_SPM_NO_DEFERRED_PRBUFFER, "No deferred partial render FW (Type=%d) Buffer provided\n", 1) \
+X(47, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_POP_UNNEEDED, "No need to populate PR Buffer (ID=0x%08x)\n", 1) \
+X(48, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNPOP_UNNEEDED, "No need to unpopulate PR Buffer (ID=0x%08x)\n", 1) \
+X(49, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST, "Send PR Buffer backing request to host (ID=0x%08x)\n", 1) \
+X(50, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST, "Send PR Buffer unbacking request to host (ID=0x%08x)\n", 1) \
+X(51, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_BACKING_REQUEST_PENDING, "Don't send PR Buffer backing request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(52, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_UNBACKING_REQUEST_PENDING, "Don't send PR Buffer unbacking request. Previous request still pending (ID=0x%08x)\n", 1) \
+X(53, RGXFW_GROUP_SPM, RGXFW_SF_SPM_BUFFER_NOT_READY, "Partial Render waiting for Buffer %d type to be backed (ID=0x%08x)\n", 2) \
+X(54, RGXFW_GROUP_SPM, RGXFW_SF_SPM_ACK_GROW_UPDATE, "Received grow update, FL addr: 0x%02x%08x, new pages: %u, ready pages: %u\n", 4) \
+\
+X( 1, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED_AGAIN, "Check Pow state DM%d int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 4) \
+X( 2, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_IDLE, "Sidekick idle (might be powered down). Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 3, RGXFW_GROUP_POW, RGXFW_SF_POW_OSREQ, "OS requested pow off (forced = %d), DM%d, pow flags: 0x%8.8X\n", 3) \
+X( 4, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_DEPRECATED, "Initiate powoff query. Inactive DMs: %d %d %d %d\n", 4) \
+X( 5, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECKOFF_DEPRECATED, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X( 6, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_OFF, "Sidekick ready to be powered down. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X( 7, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ, "HW Request On(1)/Off(0): %d, Units: 0x%08.8X\n", 2) \
+X( 8, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_REQ, "Request to change num of dusts to %d (Power flags=%d)\n", 2) \
+X( 9, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE, "Changing number of dusts from %d to %d\n", 2) \
+X(11, RGXFW_GROUP_POW, RGXFW_SF_POW_SIDEKICK_INIT_DEPRECATED, "Sidekick init\n", 0) \
+X(12, RGXFW_GROUP_POW, RGXFW_SF_POW_RD_INIT_DEPRECATED, "Rascal+Dusts init (# dusts mask: %X)\n", 1) \
+X(13, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_RD, "Initiate powoff query for RD-DMs.\n", 0) \
+X(14, RGXFW_GROUP_POW, RGXFW_SF_POW_INIOFF_TLA, "Initiate powoff query for TLA-DM.\n", 0) \
+X(15, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_RD, "Any RD-DM pending? %d, Any RD-DM Active? %d\n", 2) \
+X(16, RGXFW_GROUP_POW, RGXFW_SF_POW_REQUESTEDOFF_TLA, "TLA-DM pending? %d, TLA-DM Active? %d\n", 2) \
+X(17, RGXFW_GROUP_POW, RGXFW_SF_POW_BRN37566, "Request power up due to BRN37566. Pow stat int: 0x%X\n", 1) \
+X(18, RGXFW_GROUP_POW, RGXFW_SF_POW_REQ_CANCEL, "Cancel power off request int: 0x%X, ext: 0x%X, pow flags: 0x%X\n", 3) \
+X(19, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_IDLE, "OS requested forced IDLE, pow flags: 0x%X\n", 1) \
+X(20, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE, "OS cancelled forced IDLE, pow flags: 0x%X\n", 1) \
+X(21, RGXFW_GROUP_POW, RGXFW_SF_POW_IDLE_TIMER, "Idle timer start. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(22, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_IDLE_TIMER, "Cancel idle timer. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(23, RGXFW_GROUP_POW, RGXFW_SF_POW_APM_LATENCY_CHANGE, "Active PM latency set to %dms. Core clock: %d Hz\n", 2) \
+X(24, RGXFW_GROUP_POW, RGXFW_SF_POW_CDM_CLUSTERS, "Compute cluster mask change to 0x%X, %d dusts powered.\n", 2) \
+X(25, RGXFW_GROUP_POW, RGXFW_SF_POW_NULL_CMD_INIOFF_RD, "Null command executed, repeating initiate powoff query for RD-DMs.\n", 0) \
+X(26, RGXFW_GROUP_POW, RGXFW_SF_POW_POWMON_ENERGY, "Power monitor: Estimate of dynamic energy %u\n", 1) \
+X(27, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK_DEPRECATED, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X\n", 3) \
+X(28, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_DEADLINE, "Proactive DVFS: New deadline, time = 0x%08x%08x\n", 2) \
+X(29, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_NEW_WORKLOAD, "Proactive DVFS: New workload, cycles = 0x%08x%08x\n", 2) \
+X(30, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_CALCULATE, "Proactive DVFS: Proactive frequency calculated = 0x%08x\n", 1) \
+X(31, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UTILISATION, "Proactive DVFS: Reactive utilisation = 0x%08x\n", 1) \
+X(32, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_REACT, "Proactive DVFS: Reactive frequency calculated = 0x%08x%08x\n", 2) \
+X(33, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND_DEPRECATED, "Proactive DVFS: OPP Point Sent = 0x%x\n", 1) \
+X(34, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DEADLINE_REMOVED, "Proactive DVFS: Deadline removed = 0x%08x%08x\n", 2) \
+X(35, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_WORKLOAD_REMOVED, "Proactive DVFS: Workload removed = 0x%08x%08x\n", 2) \
+X(36, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_THROTTLE, "Proactive DVFS: Throttle to a maximum = 0x%x\n", 1) \
+X(37, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_FAILURE, "Proactive DVFS: Failed to pass OPP point via GPIO.\n", 0) \
+X(38, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_INVALID_NODE, "Proactive DVFS: Invalid node passed to function.\n", 0) \
+X(39, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GUEST_BAD_ACCESS, "Proactive DVFS: Guest OS attempted to do a privileged action. OSid = %u\n", 1) \
+X(40, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_STARTED, "Proactive DVFS: Unprofiled work started. Total unprofiled work present: 0x%x\n", 1) \
+X(41, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_UNPROFILED_FINISHED, "Proactive DVFS: Unprofiled work finished. Total unprofiled work present: 0x%x\n", 1) \
+X(42, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_DISABLED, "Proactive DVFS: Disabled: Not enabled by host.\n", 0) \
+X(43, RGXFW_GROUP_POW, RGXFW_SF_POW_HWREQ_RESULT, "HW Request Completed(1)/Aborted(0): %d, Ticks: %d\n", 2) \
+X(44, RGXFW_GROUP_POW, RGXFW_SF_POW_DUSTS_CHANGE_FIX_59042, "Allowed number of dusts is %d due to BRN59042.\n", 1) \
+X(45, RGXFW_GROUP_POW, RGXFW_SF_POW_HOST_TIMEOUT_NOTIFICATION, "Host timed out while waiting for a forced idle state. Pow state int: 0x%X, ext: 0x%X, flags: 0x%X\n", 3) \
+X(46, RGXFW_GROUP_POW, RGXFW_SF_POW_CHECK, "Check Pow state: Int: 0x%X, Ext: 0x%X, Pow flags: 0x%X, Fence Counters: Check: %u - Update: %u\n", 5) \
+X(47, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_GPIO_SEND, "Proactive DVFS: OPP Point Sent = 0x%x, Success = %x\n", 2) \
+X(48, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_IDLE, "Proactive DVFS: GPU transitioned to idle\n", 0) \
+X(49, RGXFW_GROUP_POW, RGXFW_SF_POW_PDVFS_TO_ACTIVE, "Proactive DVFS: GPU transitioned to active\n", 0) \
+X(50, RGXFW_GROUP_POW, RGXFW_SF_POW_POWDUMP_BUFFER_SIZE, "Power counter dumping: Data truncated writing register %u. Buffer too small.\n", 1) \
+X(51, RGXFW_GROUP_POW, RGXFW_SF_POW_POWCTRL_ABORT, "Power controller returned ABORT for last request so retrying.\n", 0) \
+X(52, RGXFW_GROUP_POW, RGXFW_SF_POW_INVALID_POWER_REQUEST, "Discarding invalid power request: type 0x%x, DM %u\n", 2) \
+X(53, RGXFW_GROUP_POW, RGXFW_SF_POW_CANCEL_FORCED_IDLE_NOT_IDLE, "Detected attempt to cancel forced idle while not forced idle (pow state 0x%x, pow flags 0x%x)\n", 2) \
+X(54, RGXFW_GROUP_POW, RGXFW_SF_POW_FORCED_POW_OFF_NOT_IDLE, "Detected attempt to force power off while not forced idle (pow state 0x%x, pow flags 0x%x)\n", 2) \
+X(55, RGXFW_GROUP_POW, RGXFW_SF_POW_NUMDUST_CHANGE_NOT_IDLE, "Detected attempt to change dust count while not forced idle (pow state 0x%x)\n", 1) \
+\
+X(1, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DEPRECATED, "Lockup detected on DM%d, FWCtx: %08.8X\n", 2) \
+X(2, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_FW_DEPRECATED, "Reset fw state for DM%d, FWCtx: %08.8X, MemCtx: %08.8X\n", 3) \
+X(3, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED, "Reset HW\n", 0) \
+X(4, RGXFW_GROUP_HWR, RGXFW_SF_HWR_TERMINATED_DEPRECATED, "Lockup recovered.\n", 0) \
+X(5, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP_DEPRECATED, "Lock-up DM%d FWCtx: %08.8X \n", 2) \
+X(6, RGXFW_GROUP_HWR, RGXFW_SF_HWR_LOCKUP_DETECTED_DEPRECATED, "Lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(7, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EARLY_FAULT_DETECTION_DEPRECATED, "Early fault detection: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(8, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP_DEPRECATED, "Hold scheduling due lockup: GLB(%d), PER-DM(0x%08X->0x%08X)\n", 3) \
+X(9, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FALSE_LOCKUP_DEPRECATED, "False lockup detected: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(10, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729_DEPRECATED, "BRN37729: GLB(%d->%d), PER-DM(0x%08X->0x%08X)\n", 4) \
+X(11, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED_DEPRECATED, "Freelists reconstructed: GLB(%d->%d), PER-DM(0x%08X)\n", 3) \
+X(12, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RECONSTRUCTING_FREELISTS_DEPRECATED, "Reconstructing freelists: %u (0-No, 1-Yes): GLB(%d->%d), PER-DM(0x%08X)\n", 4) \
+X(13, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FAILED_HW_POLL, "HW poll %u (0-Unset 1-Set) failed (reg:0x%08X val:0x%08X)\n", 3) \
+X(14, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED_DEPRECATED, "Discarded cmd on DM%u FWCtx=0x%08X\n", 2) \
+X(15, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_DISCARDED, "Discarded cmd on DM%u (reason=%u) HWRTData=0x%08X (st: %d), FWCtx 0x%08X @ %d\n", 6) \
+X(16, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PM_FENCE, "PM fence WA could not be applied, Valid TA Setup: %d, RD powered off: %d\n", 2) \
+X(17, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_SNAPSHOT, "FL snapshot RTD 0x%08.8X - local (0x%08.8X): %d, global (0x%08.8X): %d\n", 5) \
+X(18, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_CHECK, "FL check RTD 0x%08.8X, discard: %d - local (0x%08.8X): s%d?=c%d, global (0x%08.8X): s%d?=c%d\n", 8) \
+X(19, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_DEPRECATED, "FL reconstruction 0x%08.8X c%d\n", 2) \
+X(20, RGXFW_GROUP_HWR, RGXFW_SF_HWR_3D_CHECK, "3D check: missing TA FWCtx 0x%08.8X @ %d, RTD 0x%08x.\n", 3) \
+X(21, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW_DEPRECATED2, "Reset HW (mmu:%d, extmem: %d)\n", 2) \
+X(22, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_TA_CACHES, "Zero TA caches for FWCtx: %08.8X (TPC addr: %08X%08X, size: %d bytes)\n", 4) \
+X(23, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FREELISTS_RECONSTRUCTED, "Recovery DM%u: Freelists reconstructed. New R-Flags=0x%08X\n", 2) \
+X(24, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SKIPPED_CMD, "Recovery DM%u: FWCtx 0x%08x skipped to command @ %u. PR=%u. New R-Flags=0x%08X \n", 5) \
+X(25, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_RECOVERED, "Recovery DM%u: DM fully recovered\n", 1) \
+X(26, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_LOCKUP, "DM%u: Hold scheduling due to R-Flag = 0x%08x\n", 2) \
+X(27, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_RECONSTRUCTION, "Analysis: Need freelist reconstruction\n", 0) \
+X(28, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP, "Analysis DM%u: Lockup FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(29, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_SKIP_OOM_TA, "Analysis DM%u: Lockup while TA is OOM FWCtx: %08.8X. Need to skip to next command\n", 2) \
+X(30, RGXFW_GROUP_HWR, RGXFW_SF_HWR_NEEDS_PR_CLEANUP, "Analysis DM%u: Lockup while partial render FWCtx: %08.8X. Need PR cleanup\n", 2) \
+X(31, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_LOCKUP, "GPU has locked up\n", 0) \
+X(32, RGXFW_GROUP_HWR, RGXFW_SF_HWR_READY, "DM%u ready for HWR\n", 1) \
+X(33, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_UPDATE_RECOVERY, "Recovery DM%u: Updated Recovery counter. New R-Flags=0x%08X\n", 2) \
+X(34, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BRN37729, "Analysis: BRN37729 detected, reset TA and re-kicked 0x%08X)\n", 1) \
+X(35, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_TIMED_OUT, "DM%u timed out\n", 1) \
+X(36, RGXFW_GROUP_HWR, RGXFW_SF_HWR_EVENT_STATUS_REG, "RGX_CR_EVENT_STATUS=0x%08x\n", 1) \
+X(37, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DM_FALSE_LOCKUP, "DM%u lockup falsely detected, R-Flags=0x%08X\n", 2) \
+X(38, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_OUTOFTIME, "GPU has overrun its deadline\n", 0) \
+X(39, RGXFW_GROUP_HWR, RGXFW_SF_HWR_SET_POLLFAILURE, "GPU has failed a poll\n", 0) \
+X(40, RGXFW_GROUP_HWR, RGXFW_SF_HWR_PERF_PHASE_REG, "RGX DM%u phase count=0x%08x\n", 2) \
+X(41, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_HW, "Reset HW (loop:%d, poll failures: 0x%08X)\n", 2) \
+X(42, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_FAULT_EVENT, "MMU fault event: 0x%08X\n", 1) \
+X(43, RGXFW_GROUP_HWR, RGXFW_SF_HWR_BIF1_FAULT, "BIF1 page fault detected (Bank1 MMU Status: 0x%08X)\n", 1) \
+X(44, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK_TRUE_DEPRECATED, "Fast CRC Failed. Proceeding to full register checking (DM: %u).\n", 1) \
+X(45, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MMU_META_FAULT, "Meta MMU page fault detected (Meta MMU Status: 0x%08X%08X)\n", 2) \
+X(46, RGXFW_GROUP_HWR, RGXFW_SF_HWR_CRC_CHECK, "Fast CRC Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(47, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FULL_CHECK, "Full Signature Check result for DM%u is HWRNeeded=%u\n", 2) \
+X(48, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINAL_RESULT, "Final result for DM%u is HWRNeeded=%u with HWRChecksToGo=%u\n", 3) \
+X(49, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_CHECK, "USC Slots result for DM%u is HWRNeeded=%u USCSlotsUsedByDM=%d\n", 3) \
+X(50, RGXFW_GROUP_HWR, RGXFW_SF_HWR_DEADLINE_CHECK, "Deadline counter for DM%u is HWRDeadline=%u\n", 2) \
+X(51, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST_DEPRECATED, "Holding Scheduling on OSid %u due to pending freelist reconstruction\n", 1) \
+X(52, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_REQUEST, "Requesting reconstruction for freelist 0x%X (ID=%d)\n", 2) \
+X(53, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_PASSED, "Reconstruction of freelist ID=%d complete\n", 1) \
+X(54, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED_DEPRECATED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global,2:mmu) on HW context %u\n", 4) \
+X(55, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FAILED, "Reconstruction of freelist ID=%d failed\n", 1) \
+X(56, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESTRICTING_PDS_TASKS, "Restricting PDS Tasks to help other stalling DMs (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(57, RGXFW_GROUP_HWR, RGXFW_SF_HWR_UNRESTRICTING_PDS_TASKS, "Unrestricting PDS Tasks again (RunningMask=0x%02X, StallingMask=0x%02X, PDS_CTRL=0x%08X%08X)\n", 4) \
+X(58, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_USED, "USC slots: %u used by DM%u\n", 2) \
+X(59, RGXFW_GROUP_HWR, RGXFW_SF_HWR_USC_SLOTS_EMPTY, "USC slots: %u empty\n", 1) \
+X(60, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HCS_FIRE, "HCS DM%d's Context Switch failed to meet deadline. Current time: %08x%08x, deadline: %08x%08x\n", 5) \
+X(61, RGXFW_GROUP_HWR, RGXFW_SF_HWR_START_HW_RESET, "Begin hardware reset (HWR Counter=%d)\n", 1) \
+X(62, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FINISH_HW_RESET, "Finished hardware reset (HWR Counter=%d)\n", 1) \
+X(63, RGXFW_GROUP_HWR, RGXFW_SF_HWR_HOLD_SCHEDULING_DUE_TO_FREELIST, "Holding Scheduling on DM %u for OSid %u due to pending freelist reconstruction\n", 2) \
+X(64, RGXFW_GROUP_HWR, RGXFW_SF_HWR_RESET_UMQ_READ_OFFSET, "User Mode Queue ROff reset: FWCtx 0x%08.8X, queue: 0x%08X%08X (Roff = %u becomes StreamStartOffset = %u)\n", 5) \
+X(65, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_NEEDED, "Reconstruction needed for freelist 0x%X (ID=%d) type: %d (0:local,1:global) on HW context %u\n", 4) \
+X(66, RGXFW_GROUP_HWR, RGXFW_SF_HWR_MIPS_FAULT, "Mips page fault detected (BadVAddr: 0x%08x, EntryLo0: 0x%08x, EntryLo1: 0x%08x)\n", 3) \
+X(67, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ANOTHER_CHANCE, "At least one other DM is running okay so DM%u will get another chance\n", 1) \
+X(68, RGXFW_GROUP_HWR, RGXFW_SF_HWR_FL_RECON_FW, "Reconstructing in FW, FL: 0x%X (ID=%d)\n", 2) \
+X(69, RGXFW_GROUP_HWR, RGXFW_SF_HWR_ZERO_RTC, "Zero RTC for FWCtx: %08.8X (RTC addr: %08X%08X, size: %d bytes)\n", 4) \
+\
+X( 1, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGBLK, "Block 0x%x mapped to Config Idx %u\n", 2) \
+X( 2, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_OMTBLK, "Block 0x%x omitted from event - not enabled in HW\n", 1) \
+X( 3, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INCBLK, "Block 0x%x included in event - enabled in HW\n", 1) \
+X( 4, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELREG, "Select register state hi_0x%x lo_0x%x\n", 2) \
+X( 5, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CSBHDR, "Counter stream block header word 0x%x\n", 1) \
+X( 6, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CTROFF, "Counter register offset 0x%x\n", 1) \
+X( 7, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CFGSKP, "Block 0x%x config unset, skipping\n", 1) \
+X( 8, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INDBLK, "Accessing Indirect block 0x%x\n", 1) \
+X( 9, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DIRBLK, "Accessing Direct block 0x%x\n", 1) \
+X(10, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CNTPRG, "Programmed counter select register at offset 0x%x\n", 1) \
+X(11, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKPRG, "Block register offset 0x%x and value 0x%x\n", 2) \
+X(12, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKCG, "Reading config block from driver 0x%x\n", 1) \
+X(13, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKRG, "Reading block range 0x%x to 0x%x\n", 2) \
+X(14, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_BLKREC, "Recording block 0x%x config from driver\n", 1) \
+X(15, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_UBLKED, "Finished reading config block from driver\n", 0) \
+X(16, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_COUNTER, "Custom Counter offset: %x value: %x \n", 2) \
+X(17, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_SELECT_CNTR, "Select counter n:%u ID:%x\n", 2) \
+X(18, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_SELECT_PACK, "The counter ID %x is not allowed. The package [b:%u, n:%u] will be discarded\n", 3) \
+X(19, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHANGE_FILTER_STATUS, "Custom Counters filter status %d\n", 1) \
+X(20, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_WRONG_BLOCK, "The Custom block %d is not allowed. Use only blocks lower than %d. The package will be discarded\n", 2) \
+X(21, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_DROP_TOO_MANY_ID, "The package will be discarded because it contains %d counters IDs while the upper limit is %d\n", 2) \
+X(22, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CHECK_FILTER, "Check Filter %x is %x ?\n", 2) \
+X(23, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_RESET_CUSTOM_BLOCK, "The custom block %u is reset\n", 1) \
+X(24, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_INVALID_CMD, "Encountered an invalid command (%d)\n", 1) \
+X(25, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_DEPRECATED, "HWPerf Queue is full, we will have to wait for space! (Roff = %u, Woff = %u)\n", 2) \
+X(26, RGXFW_GROUP_HWP, RGXFW_SF_HWP_WAITING_FOR_QUEUE_FENCE_DEPRECATED, "HWPerf Queue is fencing, we are waiting for Roff = %d (Roff = %u, Woff = %u)\n", 3) \
+X(27, RGXFW_GROUP_HWP, RGXFW_SF_HWP_I_CUSTOM_BLOCK, "Custom Counter block: %d \n", 1) \
+\
+X( 1, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_REQUEST, "Transfer 0x%02x request: 0x%02x%08x -> 0x%08x, size %u\n", 5) \
+X( 2, RGXFW_GROUP_DMA, RGXFW_SF_DMA_TRANSFER_COMPLETE, "Transfer of type 0x%02x expected on channel %u, 0x%02x found, status %u\n", 4) \
+X( 3, RGXFW_GROUP_DMA, RGXFW_SF_DMA_INT_REG, "DMA Interrupt register 0x%08x\n", 1) \
+X( 4, RGXFW_GROUP_DMA, RGXFW_SF_DMA_WAIT, "Waiting for transfer ID %u completion...\n", 1) \
+X( 5, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOADING_FAILED, "Loading of cCCB data from FW common context 0x%08x (offset: %u, size: %u) failed\n", 3) \
+X( 6, RGXFW_GROUP_DMA, RGXFW_SF_DMA_CCB_LOAD_INVALID, "Invalid load of cCCB data from FW common context 0x%08x (offset: %u, size: %u)\n", 3) \
+X( 7, RGXFW_GROUP_DMA, RGXFW_SF_DMA_POLL_FAILED, "Transfer 0x%02x request poll failure\n", 1) \
+X( 8, RGXFW_GROUP_DMA, RGXFW_SF_DMA_BOOT_TRANSFER_FAILED, "Boot transfer(s) failed (code? %u, data? %u), used slower memcpy instead\n", 2) \
+\
+X( 1, RGXFW_GROUP_DBG, RGXFW_SF_DBG_INTPAIR, "0x%08x 0x%08x\n", 2) \
+X( 2, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1HEX, "0x%08x\n", 1) \
+X( 3, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2HEX, "0x%08x 0x%08x\n", 2) \
+X( 4, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3HEX, "0x%08x 0x%08x 0x%08x\n", 3) \
+X( 5, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4HEX, "0x%08x 0x%08x 0x%08x 0x%08x\n", 4) \
+X( 6, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 5) \
+X( 7, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 6) \
+X( 8, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 7) \
+X( 9, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8HEX, "0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n", 8) \
+X(10, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1SIGNED, "%d\n", 1) \
+X(11, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2SIGNED, "%d %d\n", 2) \
+X(12, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3SIGNED, "%d %d %d\n", 3) \
+X(13, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4SIGNED, "%d %d %d %d\n", 4) \
+X(14, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5SIGNED, "%d %d %d %d %d\n", 5) \
+X(15, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6SIGNED, "%d %d %d %d %d %d\n", 6) \
+X(16, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7SIGNED, "%d %d %d %d %d %d %d\n", 7) \
+X(17, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8SIGNED, "%d %d %d %d %d %d %d %d\n", 8) \
+X(18, RGXFW_GROUP_DBG, RGXFW_SF_DBG_1UNSIGNED, "%u\n", 1) \
+X(19, RGXFW_GROUP_DBG, RGXFW_SF_DBG_2UNSIGNED, "%u %u\n", 2) \
+X(20, RGXFW_GROUP_DBG, RGXFW_SF_DBG_3UNSIGNED, "%u %u %u\n", 3) \
+X(21, RGXFW_GROUP_DBG, RGXFW_SF_DBG_4UNSIGNED, "%u %u %u %u\n", 4) \
+X(22, RGXFW_GROUP_DBG, RGXFW_SF_DBG_5UNSIGNED, "%u %u %u %u %u\n", 5) \
+X(23, RGXFW_GROUP_DBG, RGXFW_SF_DBG_6UNSIGNED, "%u %u %u %u %u %u\n", 6) \
+X(24, RGXFW_GROUP_DBG, RGXFW_SF_DBG_7UNSIGNED, "%u %u %u %u %u %u %u\n", 7) \
+X(25, RGXFW_GROUP_DBG, RGXFW_SF_DBG_8UNSIGNED, "%u %u %u %u %u %u %u %u\n", 8) \
+\
+X(65535, RGXFW_GROUP_NULL, RGXFW_SF_LAST, "You should not use this string\n", 15)
+
+
+/* The symbolic names found in the table above are assigned an ui32 value of
+ * the following format:
+ * 31 30 28 27 20 19 16 15 12 11 0 bits
+ * - --- ---- ---- ---- ---- ---- ---- ----
+ * 0-11: id number
+ * 12-15: group id number
+ * 16-19: number of parameters
+ * 20-27: unused
+ * 28-30: active: identify SF packet, otherwise regular int32
+ * 31: reserved for signed/unsigned compatibility
+ *
+ * The following macro assigns those values to the enum generated SF ids list.
+ */
+#define RGXFW_LOG_IDMARKER (0x70000000)
+#define RGXFW_LOG_CREATESFID(a,b,e) ((a) | (b<<12) | (e<<16)) | RGXFW_LOG_IDMARKER
+
+#define RGXFW_LOG_IDMASK (0xFFF00000)
+#define RGXFW_LOG_VALIDID(I) (((I) & RGXFW_LOG_IDMASK) == RGXFW_LOG_IDMARKER)
+
+typedef enum RGXFW_LOG_SFids {
+#define X(a, b, c, d, e) c = RGXFW_LOG_CREATESFID(a,b,e),
+ RGXFW_LOG_SFIDLIST
+#undef X
+} RGXFW_LOG_SFids;
+
+/* Return the group id that the given (enum generated) id belongs to */
+#define RGXFW_SF_GID(x) (((x)>>12) & 0xf)
+/* Returns how many arguments the SF(string format) for the given (enum generated) id requires */
+#define RGXFW_SF_PARAMNUM(x) (((x)>>16) & 0xf)
+
+#endif /* RGX_FWIF_SF_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_shared.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_shared.h
new file mode 100644
index 00000000000000..b5329280dc00c5
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_shared.h
@@ -0,0 +1,641 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware interface structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures shared by both host client
+ and host server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SHARED_H__)
+#define __RGX_FWIF_SHARED_H__
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+#include "img_types.h"
+#include "rgx_common.h"
+#include "devicemem_typedefs.h"
+
+/*
+ * Firmware binary block unit in bytes.
+ * Raw data stored in FW binary will be aligned on this size.
+ */
+#define FW_BLOCK_SIZE 4096L
+
+/* Offset for BVNC struct from the end of the FW binary */
+#define FW_BVNC_BACKWARDS_OFFSET (FW_BLOCK_SIZE)
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKMIF_DEVICE_STATE_ZERO_FREELIST (0x1 << 0) /*!< Zeroing the physical pages of reconstructed free lists */
+/*
+ * #define RGXKMIF_DEVICE_STATE_FTRACE_EN (0x1 << 1)
+ * Removed this obsolete flag from DDK.
+*/
+#define RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x1 << 2) /*!< Used to disable the Devices Watchdog logging */
+#define RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN (0x1 << 3) /*!< Used for validation to inject dust requests every TA/3D kick */
+#define RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN (0x1 << 4) /*!< Used to enable host-side-only HWPerf stream */
+
+/* Required memory alignment for 64-bit variables accessible by Meta
+ (the gcc meta aligns 64-bit vars to 64-bit; therefore, mem shared between
+ the host and meta that contains 64-bit vars has to maintain this aligment)*/
+#define RGXFWIF_FWALLOC_ALIGN sizeof(IMG_UINT64)
+
+typedef struct _RGXFWIF_DEV_VIRTADDR_
+{
+ IMG_UINT32 ui32Addr;
+} RGXFWIF_DEV_VIRTADDR;
+
+typedef struct _RGXFWIF_DMA_ADDR_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psDevVirtAddr;
+ RGXFWIF_DEV_VIRTADDR pbyFWAddr;
+} UNCACHED_ALIGN RGXFWIF_DMA_ADDR;
+
+typedef IMG_UINT8 RGXFWIF_CCCB;
+
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CCCB_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RENDER_TARGET;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_HWRTDATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RAY_FRAME_DATA;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RPM_FREELIST;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_RTA_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_UFO_ADDR;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_CLEANUP_CTL;
+typedef RGXFWIF_DEV_VIRTADDR PRGXFWIF_TIMESTAMP_ADDR;
+
+/* FIXME PRGXFWIF_UFO_ADDR and RGXFWIF_UFO should move back into rgx_fwif_client.h */
+typedef struct _RGXFWIF_UFO_
+{
+ PRGXFWIF_UFO_ADDR puiAddrUFO;
+ IMG_UINT32 ui32Value;
+} RGXFWIF_UFO;
+
+
+/*!
+ Last reset reason for a context.
+*/
+typedef enum _RGXFWIF_CONTEXT_RESET_REASON_
+{
+ RGXFWIF_CONTEXT_RESET_REASON_NONE = 0, /*!< No reset reason recorded */
+ RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1, /*!< Caused a reset due to locking up */
+ RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2, /*!< Affected by another context locking up */
+ RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3, /*!< Overran the global deadline */
+ RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4, /*!< Affected by another context overrunning */
+ RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5, /*!< Forced reset to ensure scheduling requirements */
+} RGXFWIF_CONTEXT_RESET_REASON;
+
+
+/*!
+ HWRTData state the render is in
+*/
+typedef enum
+{
+ RGXFWIF_RTDATA_STATE_NONE = 0,
+ RGXFWIF_RTDATA_STATE_KICKTA,
+ RGXFWIF_RTDATA_STATE_KICKTAFIRST,
+ RGXFWIF_RTDATA_STATE_TAFINISHED,
+ RGXFWIF_RTDATA_STATE_KICK3D,
+ RGXFWIF_RTDATA_STATE_3DFINISHED,
+ RGXFWIF_RTDATA_STATE_TAOUTOFMEM,
+ RGXFWIF_RTDATA_STATE_PARTIALRENDERFINISHED,
+ RGXFWIF_RTDATA_STATE_HWR /*!< In case of HWR, we can't set the RTDATA state to NONE,
+ as this will cause any TA to become a first TA.
+ To ensure all related TA's are skipped, we use the HWR state */
+} RGXFWIF_RTDATA_STATE;
+
+typedef struct _RGXFWIF_CLEANUP_CTL_
+{
+ IMG_UINT32 ui32SubmittedCommands; /*!< Number of commands received by the FW */
+ IMG_UINT32 ui32ExecutedCommands; /*!< Number of commands executed by the FW */
+} UNCACHED_ALIGN RGXFWIF_CLEANUP_CTL;
+
+
+/*!
+ * Client Circular Command Buffer (CCCB) control structure.
+ * This is shared between the Server and the Firmware and holds byte offsets
+ * into the CCCB as well as the wrapping mask to aid wrap around. A given
+ * snapshot of this queue with Cmd 1 running on the GPU might be:
+ *
+ * Roff Doff Woff
+ * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
+ * < runnable commands >< !ready to run >
+ *
+ * Cmd 1 : Currently executing on the GPU data master.
+ * Cmd 2,3,4: Fence dependencies met, commands runnable.
+ * Cmd 5... : Fence dependency not met yet.
+ */
+typedef struct _RGXFWIF_CCCB_CTL_
+{
+ IMG_UINT32 ui32WriteOffset; /*!< Host write offset into CCB. This
+ * must be aligned to 16 bytes. */
+ IMG_UINT32 ui32ReadOffset; /*!< Firmware read offset into CCB.
+ Points to the command that is
+ * runnable on GPU, if R!=W */
+ IMG_UINT32 ui32DepOffset; /*!< Firmware fence dependency offset.
+ * Points to commands not ready, i.e.
+ * fence dependencies are not met. */
+ IMG_UINT32 ui32WrapMask; /*!< Offset wrapping mask, total capacity
+ * in bytes of the CCB-1 */
+} UNCACHED_ALIGN RGXFWIF_CCCB_CTL;
+
+typedef enum
+{
+ RGXFW_LOCAL_FREELIST = 0,
+ RGXFW_GLOBAL_FREELIST = 1,
+ RGXFW_FREELIST_TYPE_LAST = RGXFW_GLOBAL_FREELIST,
+} RGXFW_FREELIST_TYPE;
+
+#define RGXFW_MAX_FREELISTS (RGXFW_FREELIST_TYPE_LAST + 1)
+
+typedef struct _RGXFWIF_RTA_CTL_
+{
+ IMG_UINT32 ui32RenderTargetIndex; //Render number
+ IMG_UINT32 ui32CurrentRenderTarget; //index in RTA
+ IMG_UINT32 ui32ActiveRenderTargets; //total active RTs
+ IMG_UINT32 ui32CumulActiveRenderTargets; //total active RTs from the first TA kick, for OOM
+ RGXFWIF_DEV_VIRTADDR sValidRenderTargets; //Array of valid RT indices
+ RGXFWIF_DEV_VIRTADDR sNumRenders; //Array of number of occurred partial renders per render target
+ IMG_UINT16 ui16MaxRTs; //Number of render targets in the array
+} UNCACHED_ALIGN RGXFWIF_RTA_CTL;
+
+typedef struct _RGXFWIF_FREELIST_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psFreeListDevVAddr;
+ IMG_UINT64 RGXFW_ALIGN ui64CurrentDevVAddr;
+ IMG_UINT32 ui32CurrentStackTop;
+ IMG_UINT32 ui32MaxPages;
+ IMG_UINT32 ui32GrowPages;
+ IMG_UINT32 ui32CurrentPages; /* HW pages */
+ IMG_UINT32 ui32AllocatedPageCount;
+ IMG_UINT32 ui32AllocatedMMUPageCount;
+ IMG_UINT32 ui32HWRCounter;
+ IMG_UINT32 ui32FreeListID;
+ IMG_BOOL bGrowPending;
+ IMG_UINT32 ui32ReadyPages; /* Pages that should be used only when OOM is reached */
+} UNCACHED_ALIGN RGXFWIF_FREELIST;
+
+typedef enum
+{
+ RGXFW_RPM_SHF_FREELIST = 0,
+ RGXFW_RPM_SHG_FREELIST = 1,
+} RGXFW_RPM_FREELIST_TYPE;
+
+#define RGXFW_MAX_RPM_FREELISTS (2)
+
+typedef struct _RGXFWIF_RPM_FREELIST_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sFreeListDevVAddr; /*!< device base address */
+ //IMG_DEV_VIRTADDR RGXFW_ALIGN sRPMPageListDevVAddr; /*!< device base address for RPM pages in-use */
+ IMG_UINT32 sSyncAddr; /*!< Free list sync object for OOM event */
+ IMG_UINT32 ui32MaxPages; /*!< maximum size */
+ IMG_UINT32 ui32GrowPages; /*!< grow size = maximum pages which may be added later */
+ IMG_UINT32 ui32CurrentPages; /*!< number of pages */
+ IMG_UINT32 ui32ReadOffset; /*!< head: where to read alloc'd pages */
+ IMG_UINT32 ui32WriteOffset; /*!< tail: where to write de-alloc'd pages */
+ IMG_BOOL bReadToggle; /*!< toggle bit for circular buffer */
+ IMG_BOOL bWriteToggle;
+ IMG_UINT32 ui32AllocatedPageCount; /*!< TODO: not sure yet if this is useful */
+ IMG_UINT32 ui32HWRCounter;
+ IMG_UINT32 ui32FreeListID; /*!< unique ID per device, e.g. rolling counter */
+ IMG_BOOL bGrowPending; /*!< FW is waiting for host to grow the freelist */
+} UNCACHED_ALIGN RGXFWIF_RPM_FREELIST;
+
+typedef struct _RGXFWIF_RAY_FRAME_DATA_
+{
+ /* state manager for shared state between vertex and ray processing */
+
+ /* TODO: not sure if this will be useful, link it here for now */
+ IMG_UINT32 sRPMFreeLists[RGXFW_MAX_RPM_FREELISTS];
+
+ IMG_BOOL bAbortOccurred;
+
+ /* cleanup state.
+ * Both the SHG and RTU must complete or discard any outstanding work
+ * which references this frame data.
+ */
+ RGXFWIF_CLEANUP_CTL sCleanupStateSHG;
+ RGXFWIF_CLEANUP_CTL sCleanupStateRTU;
+ IMG_UINT32 ui32CleanupStatus;
+#define HWFRAMEDATA_SHG_CLEAN (1 << 0)
+#define HWFRAMEDATA_RTU_CLEAN (1 << 1)
+
+} UNCACHED_ALIGN RGXFWIF_RAY_FRAME_DATA;
+
+
+typedef struct _RGXFWIF_RENDER_TARGET_
+{
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psVHeapTableDevVAddr; /*!< VHeap Data Store */
+ IMG_BOOL bTACachesNeedZeroing; /*!< Whether RTC and TPC caches (on mem) need to be zeroed on next first TA kick */
+
+} UNCACHED_ALIGN RGXFWIF_RENDER_TARGET;
+
+
+typedef struct _RGXFWIF_HWRTDATA_
+{
+ RGXFWIF_RTDATA_STATE eState;
+
+ IMG_UINT32 ui32NumPartialRenders; /*!< Number of partial renders. Used to setup ZLS bits correctly */
+ IMG_BOOL bLastWasPartial; /*!< Whether the last render was a partial render */
+ IMG_DEV_VIRTADDR RGXFW_ALIGN psPMMListDevVAddr; /*!< MList Data Store */
+
+ IMG_UINT64 RGXFW_ALIGN ui64VCECatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64VCELastCatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64TECatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64TELastCatBase[4];
+ IMG_UINT64 RGXFW_ALIGN ui64AlistCatBase;
+ IMG_UINT64 RGXFW_ALIGN ui64AlistLastCatBase;
+
+ IMG_UINT64 RGXFW_ALIGN ui64PMAListStackPointer;
+ IMG_UINT32 ui32PMMListStackPointer;
+
+ PRGXFWIF_FREELIST RGXFW_ALIGN apsFreeLists[RGXFW_MAX_FREELISTS];
+ IMG_UINT32 aui32FreeListHWRSnapshot[RGXFW_MAX_FREELISTS];
+
+ PRGXFWIF_RENDER_TARGET psParentRenderTarget;
+
+ RGXFWIF_CLEANUP_CTL sTACleanupState;
+ RGXFWIF_CLEANUP_CTL s3DCleanupState;
+ IMG_UINT32 ui32CleanupStatus;
+#define HWRTDATA_TA_CLEAN (1 << 0)
+#define HWRTDATA_3D_CLEAN (1 << 1)
+
+ PRGXFWIF_RTA_CTL psRTACtl;
+
+ IMG_UINT32 bHasLastTA;
+ IMG_BOOL bPartialRendered;
+
+ IMG_UINT32 ui32PPPScreen;
+ IMG_UINT32 ui32PPPGridOffset;
+ IMG_UINT64 RGXFW_ALIGN ui64PPPMultiSampleCtl;
+ IMG_UINT32 ui32TPCStride;
+ IMG_DEV_VIRTADDR RGXFW_ALIGN sTailPtrsDevVAddr;
+ IMG_UINT32 ui32TPCSize;
+ IMG_UINT32 ui32TEScreen;
+ IMG_UINT32 ui32MTileStride;
+ IMG_UINT32 ui32TEAA;
+ IMG_UINT32 ui32TEMTILE1;
+ IMG_UINT32 ui32TEMTILE2;
+ IMG_UINT32 ui32ISPMergeLowerX;
+ IMG_UINT32 ui32ISPMergeLowerY;
+ IMG_UINT32 ui32ISPMergeUpperX;
+ IMG_UINT32 ui32ISPMergeUpperY;
+ IMG_UINT32 ui32ISPMergeScaleX;
+ IMG_UINT32 ui32ISPMergeScaleY;
+ IMG_BOOL bDisableTileReordering;
+#if defined(RGX_FIRMWARE)
+ struct _RGXFWIF_FWCOMMONCONTEXT_* psOwnerTA;
+#else
+ RGXFWIF_DEV_VIRTADDR pui32OwnerTANotUsedByHost;
+#endif
+#if defined(FIX_HW_BRN_65101)
+ IMG_BOOL bNeedBRN65101Blit;
+#endif
+#if defined(FIX_HW_BRN_67182)
+ IMG_BOOL bNeedBRN67182SecondRender;
+#endif
+} UNCACHED_ALIGN RGXFWIF_HWRTDATA;
+
+typedef enum
+{
+ RGXFWIF_PRBUFFER_START = 0,
+ RGXFWIF_PRBUFFER_ZBUFFER = 0,
+ RGXFWIF_PRBUFFER_SBUFFER,
+ RGXFWIF_PRBUFFER_MSAABUFFER,
+ RGXFWIF_PRBUFFER_MAXSUPPORTED,
+}RGXFWIF_PRBUFFER_TYPE;
+
+typedef enum
+{
+ RGXFWIF_PRBUFFER_UNBACKED = 0,
+ RGXFWIF_PRBUFFER_BACKED,
+ RGXFWIF_PRBUFFER_BACKING_PENDING,
+ RGXFWIF_PRBUFFER_UNBACKING_PENDING,
+}RGXFWIF_PRBUFFER_STATE;
+
+typedef struct _RGXFWIF_PRBUFFER_
+{
+ IMG_UINT32 ui32BufferID; /*!< Buffer ID*/
+ IMG_BOOL bOnDemand; /*!< Needs On-demand Z/S/MSAA Buffer allocation */
+ RGXFWIF_PRBUFFER_STATE eState; /*!< Z/S/MSAA -Buffer state */
+ RGXFWIF_CLEANUP_CTL sCleanupState; /*!< Cleanup state */
+} UNCACHED_ALIGN RGXFWIF_PRBUFFER;
+
+/* Number of BIF tiling configurations / heaps */
+#define RGXFWIF_NUM_BIF_TILING_CONFIGS 4
+
+/*!
+ *****************************************************************************
+ * RGX Compatibility checks
+ *****************************************************************************/
+/* WARNING: RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX can be increased only and
+ always equal to (N * sizeof(IMG_UINT32) - 1) */
+#define RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX 7
+
+/* WARNING: Whenever the layout of RGXFWIF_COMPCHECKS_BVNC is a subject of change,
+ following define should be increased by 1 to indicate to compatibility logic,
+ that layout has changed */
+#define RGXFWIF_COMPCHECKS_LAYOUT_VERSION 2
+
+typedef struct _RGXFWIF_COMPCHECKS_BVNC_
+{
+ IMG_UINT32 ui32LayoutVersion; /* WARNING: This field must be defined as first one in this structure */
+ IMG_UINT32 ui32VLenMax;
+ IMG_UINT64 RGXFW_ALIGN ui64BNC;
+ IMG_CHAR aszV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX + 1];
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS_BVNC;
+
+#define RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(name) \
+ RGXFWIF_COMPCHECKS_BVNC name = { \
+ RGXFWIF_COMPCHECKS_LAYOUT_VERSION, \
+ RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX, \
+ 0, \
+ { 0 }, \
+ }
+#define RGXFWIF_COMPCHECKS_BVNC_INIT(name) \
+ do { \
+ (name).ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION; \
+ (name).ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX; \
+ (name).ui64BNC = 0; \
+ (name).aszV[0] = 0; \
+ } while (0)
+
+typedef struct _RGXFWIF_COMPCHECKS_
+{
+ RGXFWIF_COMPCHECKS_BVNC sHWBVNC; /*!< hardware BNC (from the RGX registers) */
+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC; /*!< firmware BNC */
+ IMG_UINT32 ui32FWProcessorVersion; /*!< identifier of the MIPS/META version */
+ IMG_UINT32 ui32DDKVersion; /*!< software DDK version */
+ IMG_UINT32 ui32DDKBuild; /*!< software DDK build no. */
+ IMG_UINT32 ui32BuildOptions; /*!< build options bit-field */
+ IMG_BOOL bUpdated; /*!< Information is valid */
+} UNCACHED_ALIGN RGXFWIF_COMPCHECKS;
+
+
+#define GET_CCB_SPACE(WOff, ROff, CCBSize) \
+ ((((ROff) - (WOff)) + ((CCBSize) - 1)) & ((CCBSize) - 1))
+
+#define UPDATE_CCB_OFFSET(Off, PacketSize, CCBSize) \
+ (Off) = (((Off) + (PacketSize)) & ((CCBSize) - 1))
+
+#define RESERVED_CCB_SPACE (sizeof(IMG_UINT32))
+
+
+/* Defines relating to the per-context CCBs */
+
+/* This size is to be used when a client CCB is found to consume very negligible space
+ * (e.g. a few hundred bytes to few KBs - less than a page). In such a case, instead of
+ * allocating CCB of size of only a few KBs, we allocate at-least this much to be future
+ * risk-free. */
+#define MIN_SAFE_CCB_SIZE_LOG2 13 /* 8K (2 Pages) */
+
+#define RGX_TQ3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ3D
+static_assert(RGX_TQ3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TQ3D CCB size is too small");
+#define RGX_TQ2D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TQ2D
+static_assert(RGX_TQ2D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TQ2D CCB size is too small");
+#define RGX_CDM_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_CDM
+static_assert(RGX_CDM_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "CDM CCB size is too small");
+#define RGX_TA_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_TA
+static_assert(RGX_TA_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "TA CCB size is too small");
+#define RGX_3D_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_3D
+static_assert(RGX_3D_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "3D CCB size is too small");
+#define RGX_KICKSYNC_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_KICKSYNC
+static_assert(RGX_KICKSYNC_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "KickSync CCB size is too small");
+#define RGX_RTU_CCB_SIZE_LOG2 PVRSRV_RGX_LOG2_CLIENT_CCB_SIZE_RTU
+static_assert(RGX_RTU_CCB_SIZE_LOG2 >= MIN_SAFE_CCB_SIZE_LOG2, "RTU CCB size is too small");
+
+/*!
+ ******************************************************************************
+ * Defines for CMD_TYPE corruption detection and forward compatibility check
+ *****************************************************************************/
+
+/* CMD_TYPE 32bit contains:
+ * 31:16 Reserved for magic value to detect corruption (16 bits)
+ * 15 Reserved for RGX_CCB_TYPE_TASK (1 bit)
+ * 14:0 Bits available for CMD_TYPEs (15 bits) */
+
+
+/* Magic value to detect corruption */
+#define RGX_CMD_MAGIC_DWORD (0x2ABCU)
+#define RGX_CMD_MAGIC_DWORD_MASK (0xFFFF0000U)
+#define RGX_CMD_MAGIC_DWORD_SHIFT (16U)
+#define RGX_CMD_MAGIC_DWORD_SHIFTED (RGX_CMD_MAGIC_DWORD << RGX_CMD_MAGIC_DWORD_SHIFT)
+
+/* Maximum number of CMD_TYPEs supported = 32767 (i.e. 15 bits length) */
+#define RGX_CMD_TYPE_LENGTH (15U)
+#define RGX_CMD_TYPE_MASK (0x00007FFFU)
+#define RGX_CMD_TYPE_SHIFT (0U)
+
+/*!
+ ******************************************************************************
+ * Client CCB commands for RGX
+ *****************************************************************************/
+
+#define RGX_CCB_TYPE_TASK (1U << 15)
+#define RGX_CCB_FWALLOC_ALIGN(size) (((size) + (RGXFWIF_FWALLOC_ALIGN-1)) & ~(RGXFWIF_FWALLOC_ALIGN - 1))
+
+typedef enum _RGXFWIF_CCB_CMD_TYPE_
+{
+ RGXFWIF_CCB_CMD_TYPE_TA = 201 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_3D = 202 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_CDM = 203 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_3D = 204 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_2D = 205 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_3D_PR = 206 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_NULL = 207 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_SHG = 208 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_RTU = 209 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_RTU_FC = 210 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP = 211 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+ RGXFWIF_CCB_CMD_TYPE_TQ_TDM = 212 | RGX_CMD_MAGIC_DWORD_SHIFTED | RGX_CCB_TYPE_TASK,
+
+/* Leave a gap between CCB specific commands and generic commands */
+ RGXFWIF_CCB_CMD_TYPE_FENCE = 213 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_UPDATE = 214 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE = 215 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_FENCE_PR = 216 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_PRIORITY = 217 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+/* Pre and Post timestamp commands are supposed to sandwich the DM cmd. The
+ padding code with the CCB wrap upsets the FW if we don't have the task type
+ bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+*/
+ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP = 218 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE = 219 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+ RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE = 220 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+
+ RGXFWIF_CCB_CMD_TYPE_PADDING = 221 | RGX_CMD_MAGIC_DWORD_SHIFTED,
+} RGXFWIF_CCB_CMD_TYPE;
+
+typedef struct _RGXFWIF_WORKEST_KICK_DATA_
+{
+ /* Index for the KM Workload estimation return data array */
+ IMG_UINT64 RGXFW_ALIGN ui64ReturnDataIndex;
+ /* Deadline for the workload */
+ IMG_UINT64 RGXFW_ALIGN ui64Deadline;
+ /* Predicted time taken to do the work in cycles */
+ IMG_UINT64 RGXFW_ALIGN ui64CyclesPrediction;
+} RGXFWIF_WORKEST_KICK_DATA;
+
+typedef struct _RGXFWIF_CCB_CMD_HEADER_
+{
+ RGXFWIF_CCB_CMD_TYPE eCmdType;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT32 ui32ExtJobRef; /*!< external job reference - provided by client and used in debug for tracking submitted work */
+ IMG_UINT32 ui32IntJobRef; /*!< internal job reference - generated by services and used in debug for tracking submitted work */
+ RGXFWIF_WORKEST_KICK_DATA sWorkEstKickData; /*!< Workload Estimation - Workload Estimation Data */
+ IMG_DEV_VIRTADDR sRobustnessResetReason; /*!< Address to write reset reason to */
+ IMG_UINT32 ui32SubmissionOrdinal; /*!< 'Timestamp' indicating order of command submission */
+} RGXFWIF_CCB_CMD_HEADER;
+
+typedef enum _RGXFWIF_REG_CFG_TYPE_
+{
+ RGXFWIF_REG_CFG_TYPE_PWR_ON=0, /* Sidekick power event */
+ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE, /* Rascal / dust power event */
+ RGXFWIF_REG_CFG_TYPE_TA, /* TA kick */
+ RGXFWIF_REG_CFG_TYPE_3D, /* 3D kick */
+ RGXFWIF_REG_CFG_TYPE_CDM, /* Compute kick */
+ RGXFWIF_REG_CFG_TYPE_TLA, /* TLA kick */
+ RGXFWIF_REG_CFG_TYPE_TDM, /* TDM kick */
+ RGXFWIF_REG_CFG_TYPE_ALL /* Applies to all types. Keep as last element */
+} RGXFWIF_REG_CFG_TYPE;
+
+typedef struct _RGXFWIF_REG_CFG_REC_
+{
+ IMG_UINT64 ui64Addr;
+ IMG_UINT64 ui64Mask;
+ IMG_UINT64 ui64Value;
+} RGXFWIF_REG_CFG_REC;
+
+
+typedef struct _RGXFWIF_TIME_CORR_
+{
+ IMG_UINT64 RGXFW_ALIGN ui64OSTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64OSMonoTimeStamp;
+ IMG_UINT64 RGXFW_ALIGN ui64CRTimeStamp;
+
+ /* Utility variable used to convert CR timer deltas to OS timer deltas (nS),
+ * where the deltas are relative to the timestamps above:
+ * deltaOS = (deltaCR * K) >> decimal_shift, see full explanation below */
+ IMG_UINT64 RGXFW_ALIGN ui64CRDeltaToOSDeltaKNs;
+
+ IMG_UINT32 ui32CoreClockSpeed;
+} UNCACHED_ALIGN RGXFWIF_TIME_CORR;
+
+
+/* These macros are used to help converting FW timestamps to the Host time domain.
+ * On the FW the RGX_CR_TIMER counter is used to keep track of the time;
+ * it increments by 1 every 256 GPU clock ticks, so the general formula
+ * to perform the conversion is:
+ *
+ * [ GPU clock speed in Hz, if (scale == 10^9) then deltaOS is in nS,
+ * otherwise if (scale == 10^6) then deltaOS is in uS ]
+ *
+ * deltaCR * 256 256 * scale
+ * deltaOS = --------------- * scale = deltaCR * K [ K = --------------- ]
+ * GPUclockspeed GPUclockspeed
+ *
+ * The actual K is multiplied by 2^20 (and deltaCR * K is divided by 2^20)
+ * to get some better accuracy and to avoid returning 0 in the integer
+ * division 256000000/GPUfreq if GPUfreq is greater than 256MHz.
+ * This is the same as keeping K as a decimal number.
+ *
+ * The maximum deltaOS is slightly more than 5hrs for all GPU frequencies
+ * (deltaCR * K is more or less a constant), and it's relative to
+ * the base OS timestamp sampled as a part of the timer correlation data.
+ * This base is refreshed on GPU power-on, DVFS transition and
+ * periodic frequency calibration (executed every few seconds if the FW is
+ * doing some work), so as long as the GPU is doing something and one of these
+ * events is triggered then deltaCR * K will not overflow and deltaOS will be
+ * correct.
+ */
+
+#define RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT (20)
+
+/*
+ * Calibrated GPU frequencies are rounded to the nearest multiple of 1 KHz
+ * before use, to reduce the noise introduced by calculations done with
+ * imperfect operands (correlated timers not sampled at exactly the same time,
+ * GPU CR timer incrementing only once every 256 GPU cycles).
+ * This also helps reducing the variation between consecutive calculations.
+ */
+#define RGXFWIF_CONVERT_TO_KHZ(freq) (((freq) + 500) / 1000)
+#define RGXFWIF_ROUND_TO_KHZ(freq) ((((freq) + 500) / 1000) * 1000)
+
+#define RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(clockfreq, remainder) \
+ OSDivide64r64((256000000ULL << RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT), \
+ RGXFWIF_CONVERT_TO_KHZ(clockfreq), \
+ &(remainder))
+
+#define RGXFWIF_GET_DELTA_OSTIME_NS(deltaCR, K) \
+ ( ((deltaCR) * (K)) >> RGXFWIF_CRDELTA_TO_OSDELTA_ACCURACY_SHIFT)
+
+#define RGXFWIF_GET_DELTA_OSTIME_US(deltacr, clockfreq, remainder) \
+ OSDivide64r64((deltacr) * 256000, \
+ RGXFWIF_CONVERT_TO_KHZ(clockfreq), \
+ &(remainder))
+
+/* Use this macro to get a more realistic GPU core clock speed than
+ * the one given by the upper layers (used when doing GPU frequency
+ * calibration)
+ */
+#define RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(deltacr_us, deltaos_us, remainder) \
+ OSDivide64((deltacr_us) * 256000000, (deltaos_us), &(remainder))
+
+/*
+ The maximum configurable size via RGX_FW_HEAP_SHIFT is
+ 32MiB (1<<25) and the minimum is 4MiB (1<<22); the
+ default firmware heap size is set to maximum 32MiB.
+*/
+#if (RGX_FW_HEAP_SHIFT < 22 || RGX_FW_HEAP_SHIFT > 25)
+#error "RGX_FW_HEAP_SHIFT is outside valid range [22, 25]"
+#endif
+
+#endif /* __RGX_FWIF_SHARED_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_shared.h)
+******************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sig.h b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sig.h
new file mode 100644
index 00000000000000..4a6af31fac4e3b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_fwif_sig.h
@@ -0,0 +1,168 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware signature checks
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware interface structures used by srvinit and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_FWIF_SIG_H__)
+#define __RGX_FWIF_SIG_H__
+
+#include "rgxdefs_km.h"
+
+/************************************************************************
+* RGX FW signature checks
+************************************************************************/
+
+#if defined(__KERNEL__)
+
+#if defined(PDUMP)
+
+#define SIG_REG_TA_MAX_COUNT (12)
+static RGXFW_REGISTER_LIST asTASigRegList[SIG_REG_TA_MAX_COUNT];
+static IMG_UINT32 gui32TASigRegCount = 0;
+
+#define SIG_REG_3D_MAX_COUNT (6)
+static RGXFW_REGISTER_LIST as3DSigRegList[SIG_REG_3D_MAX_COUNT];
+static IMG_UINT32 gui323DSigRegCount = 0;
+
+#endif /* PDUMP */
+
+#else
+
+/* List of TA signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asTASigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+ {RGX_CR_USC_UVB_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+#else
+ {RGX_CR_USC_UVS0_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS1_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS2_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS3_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS4_CHECKSUM, 0, 0, 0},
+ {RGX_CR_USC_UVS5_CHECKSUM, 0, 0, 0},
+#endif
+#if defined(RGX_FEATURE_SCALABLE_TE_ARCH)
+#if defined(RGX_FEATURE_SCALABLE_VDM_GPP)
+ {RGX_CR_PPP_CLIP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, RGX_NUM_PHANTOMS-1},
+#else
+ {RGX_CR_PPP, 0, 0, 0},
+#endif
+ {RGX_CR_TE_CHECKSUM, 0, 0, 0},
+#else
+ {RGX_CR_PPP_SIGNATURE, 0, 0, 0},
+ {RGX_CR_TE_SIGNATURE, 0, 0, 0},
+#endif
+ {RGX_CR_VCE_CHECKSUM, 0, 0, 0},
+#if !defined(RGX_FEATURE_PDS_PER_DUST) || !defined(FIX_HW_BRN_62204)
+ {RGX_CR_PDS_DOUTM_STM_SIGNATURE, 0, 0, 0},
+#endif
+};
+
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_ROGUEXE)
+#define HWR_SIG_RAST_INDIRECT (0)
+#define HWR_SIG_RAST_INDIRECT_NUM (0)
+
+#if defined(RGX_FEATURE_PBE2_IN_XE) && RGX_FEATURE_NUM_CLUSTERS > 1
+#define HWR_SIG_PBE_INDIRECT (0)
+#define HWR_SIG_PBE_INDIRECT_NUM (0)
+#else
+#define HWR_SIG_PBE_INDIRECT (RGX_CR_PBE_INDIRECT)
+#define HWR_SIG_PBE_INDIRECT_NUM (RGX_FEATURE_NUM_CLUSTERS-1)
+#endif /* !(defined(RGX_FEATURE_PBE2_IN_XE) && RGX_FEATURE_NUM_CLUSTERS > 1) */
+
+#else
+
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_ROGUEXE)
+#define HWR_SIG_RAST_INDIRECT (RGX_CR_RASTERISATION_INDIRECT)
+#define HWR_SIG_RAST_INDIRECT_NUM (RGX_NUM_RASTERISATION_MODULES-1)
+#else
+#define HWR_SIG_RAST_INDIRECT (RGX_CR_BLACKPEARL_INDIRECT)
+#define HWR_SIG_RAST_INDIRECT_NUM (RGX_NUM_PHANTOMS-1)
+#endif /* !(defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(RGX_FEATURE_ROGUEXE)) */
+
+#define HWR_SIG_PBE_INDIRECT (RGX_CR_PBE_INDIRECT)
+#define HWR_SIG_PBE_INDIRECT_NUM (RGX_FEATURE_NUM_CLUSTERS-1)
+
+#endif /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && !defined(RGX_FEATURE_ROGUEXE) */
+
+/* List of 3D signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST as3DSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+ {RGX_CR_ISP_PDS_CHECKSUM, HWR_SIG_RAST_INDIRECT, 0, HWR_SIG_RAST_INDIRECT_NUM},
+ {RGX_CR_ISP_TPF_CHECKSUM, HWR_SIG_RAST_INDIRECT, 0, HWR_SIG_RAST_INDIRECT_NUM},
+ {RGX_CR_TFPU_PLANE0_CHECKSUM, HWR_SIG_RAST_INDIRECT, 0, HWR_SIG_RAST_INDIRECT_NUM},
+ {RGX_CR_TFPU_PLANE1_CHECKSUM, HWR_SIG_RAST_INDIRECT, 0, HWR_SIG_RAST_INDIRECT_NUM},
+ {RGX_CR_PBE_CHECKSUM, HWR_SIG_PBE_INDIRECT, 0, HWR_SIG_PBE_INDIRECT_NUM},
+ {RGX_CR_IFPU_ISP_CHECKSUM, HWR_SIG_RAST_INDIRECT, 0, HWR_SIG_RAST_INDIRECT_NUM},
+};
+#endif /* !__KERNEL__ */
+
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asRTUSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+ {DPX_CR_RS_PDS_RR_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC0_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC1_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC2_CHECKSUM, 0, 0, 0},
+ {RGX_CR_FBA_FC3_CHECKSUM, 0, 0, 0},
+ {DPX_CR_RQ_USC_DEBUG, 0, 0, 0},
+};
+
+/* List of SHG signature and checksum register addresses */
+static const RGXFW_REGISTER_LIST asSHGSigRegList[] =
+{ /* Register */ /* Indirect_Reg */ /* Start, End */
+ {RGX_CR_SHF_SHG_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHF_VERTEX_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHF_VARY_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_RPM_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHG_BIF_CHECKSUM, 0, 0, 0},
+ {RGX_CR_SHG_FE_BE_CHECKSUM, 0, 0, 0},
+};
+#endif /* RGX_FEATURE_RAY_TRACING */
+
+#endif /* __RGX_FWIF_SIG_H__ */
+
+/******************************************************************************
+ End of file (rgx_fwif_sig.h)
+******************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_heaps.h b/drivers/gpu/drm/img-rogue/1.10/rgx_heaps.h
new file mode 100644
index 00000000000000..2d6409b7feaae1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_heaps.h
@@ -0,0 +1,188 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX heap definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGX_HEAPS_H__)
+#define __RGX_HEAPS_H__
+
+#include "km/rgxdefs_km.h"
+#include "img_defs.h"
+#include "log2.h"
+#include "pvr_debug.h"
+
+/* RGX Heap IDs, note: not all heaps are available to clients */
+/* N.B. Old heap identifiers are deprecated now that the old memory
+ management is. New heap identifiers should be suitably renamed */
+#define RGX_UNDEFINED_HEAP_ID (~0LU) /*!< RGX Undefined Heap ID */
+#define RGX_GENERAL_SVM_HEAP_ID 0 /*!< RGX General SVM (shared virtual memory) Heap ID */
+#define RGX_GENERAL_HEAP_ID 1 /*!< RGX General Heap ID */
+#define RGX_GENERAL_NON4K_HEAP_ID 2 /*!< RGX General none-4K Heap ID */
+#define RGX_RGNHDR_BRN_63142_HEAP_ID 3 /*!< RGX RgnHdr BRN63142 Heap ID */
+#define RGX_MMU_INIA_BRN_65273_ID 4 /*!< RGX MMU INIA Heap ID */
+#define RGX_MMU_INIB_BRN_65273_ID 5 /*!< RGX MMU INIB Heap ID */
+#define RGX_PDSCODEDATA_HEAP_ID 6 /*!< RGX PDS Code/Data Heap ID */
+#define RGX_USCCODE_HEAP_ID 7 /*!< RGX USC Code Heap ID */
+#define RGX_FIRMWARE_MAIN_HEAP_ID 8 /*!< RGX Main Firmware Heap ID */
+#define RGX_TQ3DPARAMETERS_HEAP_ID 9 /*!< RGX Firmware Heap ID */
+#define RGX_BIF_TILING_HEAP_1_ID 10 /*!< RGX BIF Tiling Heap 1 ID */
+#define RGX_BIF_TILING_HEAP_2_ID 11 /*!< RGX BIF Tiling Heap 2 ID */
+#define RGX_BIF_TILING_HEAP_3_ID 12 /*!< RGX BIF Tiling Heap 3 ID */
+#define RGX_BIF_TILING_HEAP_4_ID 13 /*!< RGX BIF Tiling Heap 4 ID */
+#define RGX_DOPPLER_HEAP_ID 14 /*!< Doppler Heap ID */
+#define RGX_DOPPLER_OVERFLOW_HEAP_ID 15 /*!< Doppler Overflow Heap ID */
+#define RGX_SERVICES_SIGNALS_HEAP_ID 16 /*!< Services Signals Heap ID */
+#define RGX_SIGNALS_HEAP_ID 17 /*!< Signals Heap ID */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_ID 18
+#define RGX_FIRMWARE_CONFIG_HEAP_ID 19 /*!< Additional OSIDs Firmware */
+#define RGX_GUEST_FIRMWARE_RAW_HEAP_ID 21 /*!< Additional OSIDs Firmware */
+#define RGX_MAX_HEAP_ID (RGX_GUEST_FIRMWARE_RAW_HEAP_ID + RGXFW_NUM_OS) /*!< Max Valid Heap ID */
+
+
+
+/*
+ Identify heaps by their names
+*/
+#define RGX_GENERAL_SVM_HEAP_IDENT "General SVM" /*!< RGX General SVM (shared virtual memory) Heap Identifier */
+#define RGX_GENERAL_HEAP_IDENT "General" /*!< RGX General Heap Identifier */
+#define RGX_GENERAL_NON4K_HEAP_IDENT "General NON-4K" /*!< RGX General non-4K Heap Identifier */
+#define RGX_RGNHDR_BRN_63142_HEAP_IDENT "RgnHdr BRN63142" /*!< RGX RgnHdr BRN63142 Heap Identifier */
+#define RGX_MMU_INIA_BRN_65273_HEAP_IDENT "MMU INIA BRN65273" /*!< MMU BRN65273 Heap A Identifier */
+#define RGX_MMU_INIB_BRN_65273_HEAP_IDENT "MMU INIB BRN65273" /*!< MMU BRN65273 Heap B Identifier */
+#define RGX_PDSCODEDATA_HEAP_IDENT "PDS Code and Data" /*!< RGX PDS Code/Data Heap Identifier */
+#define RGX_USCCODE_HEAP_IDENT "USC Code" /*!< RGX USC Code Heap Identifier */
+#define RGX_TQ3DPARAMETERS_HEAP_IDENT "TQ3DParameters" /*!< RGX TQ 3D Parameters Heap Identifier */
+#define RGX_BIF_TILING_HEAP_1_IDENT "BIF Tiling Heap l" /*!< RGX BIF Tiling Heap 1 identifier */
+#define RGX_BIF_TILING_HEAP_2_IDENT "BIF Tiling Heap 2" /*!< RGX BIF Tiling Heap 2 identifier */
+#define RGX_BIF_TILING_HEAP_3_IDENT "BIF Tiling Heap 3" /*!< RGX BIF Tiling Heap 3 identifier */
+#define RGX_BIF_TILING_HEAP_4_IDENT "BIF Tiling Heap 4" /*!< RGX BIF Tiling Heap 4 identifier */
+#define RGX_DOPPLER_HEAP_IDENT "Doppler" /*!< Doppler Heap Identifier */
+#define RGX_DOPPLER_OVERFLOW_HEAP_IDENT "Doppler Overflow" /*!< Doppler Heap Identifier */
+#define RGX_SERVICES_SIGNALS_HEAP_IDENT "Services Signals" /*!< Services Signals Heap Identifier */
+#define RGX_SIGNALS_HEAP_IDENT "Signals" /*!< Signals Heap Identifier */
+#define RGX_VISTEST_HEAP_IDENT "VisTest" /*!< VisTest heap */
+#define RGX_TDM_TPU_YUV_COEFFS_HEAP_IDENT "TDM TPU YUV Coeffs"
+#define RGX_FIRMWARE_MAIN_HEAP_IDENT "Firmware Main"
+#define RGX_FIRMWARE_CONFIG_HEAP_IDENT "Firmware Config"
+#define RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT "Firmware Raw Guest %d"
+
+/* BIF tiling heaps have specific buffer requirements based on their XStride
+ * configuration. This is detailed in the BIF tiling documentation and ensures
+ * that the bits swapped by the BIF tiling algorithm do not result in addresses
+ * outside the allocated buffer. The representation here reflects the diagram
+ * in the BIF tiling documentation for tiling mode '0'.
+ *
+ * For tiling mode '1', the overall tile size does not change, width increases
+ * to 2^9 but the height drops to 2^3.
+ * This means the RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE macro can be
+ * used for both modes.
+ *
+ * Previous TILING_HEAP_STRIDE macros are retired in preference to storing an
+ * alignment to stride factor, derived from the tiling mode, with the tiling
+ * heap configuration data.
+ *
+ * XStride is defined for a platform in sysconfig.h, but the resulting
+ * alignment and stride factor can be queried through the
+ * PVRSRVGetHeapLog2ImportAlignmentAndTilingStrideFactor() API.
+ * For reference:
+ * Log2BufferStride = Log2Alignment - Log2AlignmentToTilingStrideFactor
+ */
+#define RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(X) (4+X+1+8)
+#define RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE (4)
+
+/*
+ * Supported log2 page size values for RGX_GENERAL_NON_4K_HEAP_ID
+ */
+#define RGX_HEAP_4KB_PAGE_SHIFT (12)
+#define RGX_HEAP_16KB_PAGE_SHIFT (14)
+#define RGX_HEAP_64KB_PAGE_SHIFT (16)
+#define RGX_HEAP_256KB_PAGE_SHIFT (18)
+#define RGX_HEAP_1MB_PAGE_SHIFT (20)
+#define RGX_HEAP_2MB_PAGE_SHIFT (21)
+
+/* Takes a log2 page size parameter and calculates a suitable page size
+ * for the RGX heaps. Returns 0 if parameter is wrong.*/
+static INLINE IMG_UINT32 RGXHeapDerivePageSize(IMG_UINT32 uiLog2PageSize)
+{
+ IMG_BOOL bFound = IMG_FALSE;
+
+ /* OS page shift must be at least RGX_HEAP_4KB_PAGE_SHIFT,
+ * max RGX_HEAP_2MB_PAGE_SHIFT, non-zero and a power of two*/
+ if ( uiLog2PageSize == 0 ||
+ (uiLog2PageSize < RGX_HEAP_4KB_PAGE_SHIFT) ||
+ (uiLog2PageSize > RGX_HEAP_2MB_PAGE_SHIFT))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Provided incompatible log2 page size %u",
+ __func__,
+ uiLog2PageSize));
+ PVR_ASSERT(0);
+ return 0;
+ }
+
+ do
+ {
+ switch (uiLog2PageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ /* All good, RGX page size equals given page size
+ * => use it as default for heaps */
+ bFound = IMG_TRUE;
+ break;
+ default:
+ /* We have to fall back to a smaller device
+ * page size than given page size because there
+ * is no exact match for any supported size. */
+ uiLog2PageSize -= 1;
+ break;
+ }
+ } while (!bFound);
+
+ return uiLog2PageSize;
+}
+
+
+#endif /* __RGX_HEAPS_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf.h b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf.h
new file mode 100644
index 00000000000000..61f3c6c49b1235
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf.h
@@ -0,0 +1,1265 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX HWPerf and Debug Types and Defines Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Common data types definitions for hardware performance API
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef RGX_HWPERF_H_
+#define RGX_HWPERF_H_
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* These structures are used on both GPU and CPU and must be a size that is a
+ * multiple of 64 bits, 8 bytes to allow the FW to write 8 byte quantities
+ * at 8 byte aligned addresses. RGX_FW_STRUCT_*_ASSERT() is used to check this.
+ */
+
+/******************************************************************************
+ * Includes and Defines
+ *****************************************************************************/
+
+#include "img_types.h"
+#include "img_defs.h"
+
+#include "rgx_common.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_sync_km.h"
+
+
+/* HWPerf interface assumption checks */
+static_assert(RGX_FEATURE_NUM_CLUSTERS <= 16, "Cluster count too large for HWPerf protocol definition");
+
+
+#if !defined(__KERNEL__)
+/* User-mode and Firmware definitions only */
+
+/*! The number of indirectly addressable TPU_MSC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST MAX((RGX_FEATURE_NUM_CLUSTERS>>1),1)
+
+/*! The number of indirectly addressable USC blocks in the GPU */
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER (RGX_FEATURE_NUM_CLUSTERS)
+
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map for S. */
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 1 /* JONES */
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS)
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 1 /* BLACKPEARL */
+# define RGX_HWPERF_PHANTOM_DUST_BLKS 2 /* TPU, TEXAS */
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 2 /* USC, PBE */
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0
+
+# elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+
+# if defined(RGX_FEATURE_RAY_TRACING)
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map for Series 6XT. */
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 6 /* TORNADO, TA, BF, BT, RT, SH */
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 4 /* Doppler unit unconditionally has 4 instances of BX_TU */
+# else /*#if defined(RAY_TRACING) */
+ /*! Defines the number of performance counter blocks that are directly
+ * addressable in the RGX register map. */
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 2 /* TORNADO, TA */
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0
+# endif /*#if defined(RAY_TRACING) */
+
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM (RGX_NUM_PHANTOMS)
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 2 /* RASTER, TEXAS */
+# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+
+# else /* !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && ! defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) i.e. S6 */
+
+ /*! Defines the number of performance counter blocks that are
+ * addressable in the RGX register map for Series 6. */
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 3 /* TA, RASTER, HUB */
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0 /* PHANTOM is not there is Rogue1. Just using it to keep naming same as later series (RogueXT n Rogue XT+) */
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0
+# define RGX_HWPERF_PHANTOM_DUST_BLKS 1 /* TPU */
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 1 /* USC */
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0
+
+# endif
+
+/*! The number of performance counters in each layout block defined for UM/FW code */
+#if defined(RGX_FEATURE_CLUSTER_GROUPING)
+ #define RGX_HWPERF_CNTRS_IN_BLK 6
+ #else
+ #define RGX_HWPERF_CNTRS_IN_BLK 4
+#endif
+
+#else /* defined(__KERNEL__) */
+/* Kernel/server definitions - not used, hence invalid definitions */
+
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST 0xFF
+# define RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER 0xFF
+
+# define RGX_HWPERF_MAX_DIRECT_ADDR_BLKS 0xFF
+# define RGX_HWPERF_INDIRECT_BY_PHANTOM 0xFF
+# define RGX_HWPERF_PHANTOM_NONDUST_BLKS 0xFF
+# define RGX_HWPERF_PHANTOM_DUST_BLKS 0xFF
+# define RGX_HWPERF_PHANTOM_DUST_CLUSTER_BLKS 0xFF
+
+# if defined(RGX_FEATURE_RAY_TRACING)
+ /* Exception case, must have valid value since ray-tracing BX_TU unit does
+ * not vary by feature. Always read by rgx_hwperf_blk_present_raytracing()
+ * regardless of call context */
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 4
+# else
+# define RGX_HWPERF_DOPPLER_BX_TU_BLKS 0
+# endif
+
+#endif
+
+/*! The number of custom non-mux counter blocks supported */
+#define RGX_HWPERF_MAX_CUSTOM_BLKS 5
+
+/*! The number of counters supported in each non-mux counter block */
+#define RGX_HWPERF_MAX_CUSTOM_CNTRS 8
+
+
+/******************************************************************************
+ * Packet Event Type Enumerations
+ *****************************************************************************/
+
+/*! Type used to encode the event that generated the packet.
+ * NOTE: When this type is updated the corresponding hwperfbin2json tool source
+ * needs to be updated as well. The RGX_HWPERF_EVENT_MASK_* macros will also need
+ * updating when adding new types.
+ */
+typedef enum
+{
+ RGX_HWPERF_INVALID = 0x00,
+
+ /* FW types 0x01..0x06 */
+ RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE = 0x01,
+
+ RGX_HWPERF_FW_BGSTART = 0x01,
+ RGX_HWPERF_FW_BGEND = 0x02,
+ RGX_HWPERF_FW_IRQSTART = 0x03,
+
+ RGX_HWPERF_FW_IRQEND = 0x04,
+ RGX_HWPERF_FW_DBGSTART = 0x05,
+ RGX_HWPERF_FW_DBGEND = 0x06,
+
+ RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE = 0x06,
+
+ /* HW types 0x07..0x19 */
+ RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE = 0x07,
+
+ RGX_HWPERF_HW_PMOOM_TAPAUSE = 0x07,
+ RGX_HWPERF_HW_TAKICK = 0x08,
+/* RGX_HWPERF_HW_PMOOM_TAPAUSE = 0x07, */
+/* RGX_HWPERF_HW_PMOOM_TARESUME = 0x19, */
+ RGX_HWPERF_HW_TAFINISHED = 0x09,
+ RGX_HWPERF_HW_3DTQKICK = 0x0A,
+/* RGX_HWPERF_HW_3DTQFINISHED = 0x17, */
+/* RGX_HWPERF_HW_3DSPMKICK = 0x11, */
+/* RGX_HWPERF_HW_3DSPMFINISHED = 0x18, */
+ RGX_HWPERF_HW_3DKICK = 0x0B,
+ RGX_HWPERF_HW_3DFINISHED = 0x0C,
+ RGX_HWPERF_HW_CDMKICK = 0x0D,
+ RGX_HWPERF_HW_CDMFINISHED = 0x0E,
+ RGX_HWPERF_HW_TLAKICK = 0x0F,
+ RGX_HWPERF_HW_TLAFINISHED = 0x10,
+ RGX_HWPERF_HW_3DSPMKICK = 0x11,
+ RGX_HWPERF_HW_PERIODIC = 0x12,
+ RGX_HWPERF_HW_RTUKICK = 0x13,
+ RGX_HWPERF_HW_RTUFINISHED = 0x14,
+ RGX_HWPERF_HW_SHGKICK = 0x15,
+ RGX_HWPERF_HW_SHGFINISHED = 0x16,
+ RGX_HWPERF_HW_3DTQFINISHED = 0x17,
+ RGX_HWPERF_HW_3DSPMFINISHED = 0x18,
+ RGX_HWPERF_HW_PMOOM_TARESUME = 0x19,
+
+ /* HW_EVENT_RANGE0 used up. Use next empty range below to add new hardware events */
+ RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE = 0x19,
+
+ /* other types 0x1A..0x1F */
+ RGX_HWPERF_CLKS_CHG = 0x1A,
+ RGX_HWPERF_GPU_STATE_CHG = 0x1B,
+
+ /* power types 0x20..0x27 */
+ RGX_HWPERF_PWR_EST_RANGE_FIRST_TYPE = 0x20,
+ RGX_HWPERF_PWR_EST_REQUEST = 0x20,
+ RGX_HWPERF_PWR_EST_READY = 0x21,
+ RGX_HWPERF_PWR_EST_RESULT = 0x22,
+ RGX_HWPERF_PWR_EST_RANGE_LAST_TYPE = 0x22,
+
+ RGX_HWPERF_PWR_CHG = 0x23,
+
+ /* HW_EVENT_RANGE1 0x28..0x2F, for accommodating new hardware events */
+ RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE = 0x28,
+
+ RGX_HWPERF_HW_TDMKICK = 0x28,
+ RGX_HWPERF_HW_TDMFINISHED = 0x29,
+
+ RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE = 0x29,
+
+ /* context switch types 0x30..0x31 */
+ RGX_HWPERF_CSW_START = 0x30,
+ RGX_HWPERF_CSW_FINISHED = 0x31,
+
+ /* firmware misc 0x38..0x39 */
+ RGX_HWPERF_UFO = 0x38,
+ RGX_HWPERF_FWACT = 0x39,
+
+ /* last */
+ RGX_HWPERF_LAST_TYPE,
+
+ /* This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 64 bits long).
+ */
+ RGX_HWPERF_MAX_TYPE = 0x40
+} RGX_HWPERF_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 63 event types.
+ */
+static_assert(RGX_HWPERF_LAST_TYPE < RGX_HWPERF_MAX_TYPE, "Too many HWPerf event types");
+
+/* Macro used to check if an event type ID is present in the known set of hardware type events */
+#define HWPERF_PACKET_IS_HW_TYPE(_etype) (((_etype) >= RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE) || \
+ ((_etype) >= RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE && (_etype) <= RGX_HWPERF_HW_EVENT_RANGE1_LAST_TYPE))
+
+#define HWPERF_PACKET_IS_FW_TYPE(_etype) \
+ ((_etype) >= RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE && \
+ (_etype) <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE)
+
+
+typedef enum {
+ RGX_HWPERF_HOST_INVALID = 0x00,
+ RGX_HWPERF_HOST_ENQ = 0x01,
+ RGX_HWPERF_HOST_UFO = 0x02,
+ RGX_HWPERF_HOST_ALLOC = 0x03,
+ RGX_HWPERF_HOST_CLK_SYNC = 0x04,
+ RGX_HWPERF_HOST_FREE = 0x05,
+ RGX_HWPERF_HOST_MODIFY = 0x06,
+
+ /* last */
+ RGX_HWPERF_HOST_LAST_TYPE,
+
+ /* This enumeration must have a value that is a power of two as it is
+ * used in masks and a filter bit field (currently 32 bits long).
+ */
+ RGX_HWPERF_HOST_MAX_TYPE = 0x20
+} RGX_HWPERF_HOST_EVENT_TYPE;
+
+/* The event type values are incrementing integers for use as a shift ordinal
+ * in the event filtering process at the point events are generated.
+ * This scheme thus implies a limit of 31 event types.
+ */
+static_assert(RGX_HWPERF_HOST_LAST_TYPE < RGX_HWPERF_HOST_MAX_TYPE, "Too many HWPerf host event types");
+
+
+/******************************************************************************
+ * Packet Header Format Version 2 Types
+ *****************************************************************************/
+
+/*! Major version number of the protocol in operation
+ */
+#define RGX_HWPERF_V2_FORMAT 2
+
+/*! Signature ASCII pattern 'HWP2' found in the first word of a HWPerfV2 packet
+ */
+#define HWPERF_PACKET_V2_SIG 0x48575032
+
+/*! Signature ASCII pattern 'HWPA' found in the first word of a HWPerfV2a packet
+ */
+#define HWPERF_PACKET_V2A_SIG 0x48575041
+
+/*! Signature ASCII pattern 'HWPB' found in the first word of a HWPerfV2b packet
+ */
+#define HWPERF_PACKET_V2B_SIG 0x48575042
+
+#define HWPERF_PACKET_ISVALID(_ptr) (((_ptr) == HWPERF_PACKET_V2_SIG) || ((_ptr) == HWPERF_PACKET_V2A_SIG)|| ((_ptr) == HWPERF_PACKET_V2B_SIG))
+
+/*! Type defines the HWPerf packet header common to all events. */
+typedef struct
+{
+ IMG_UINT32 ui32Sig; /*!< Always the value HWPERF_PACKET_SIG */
+ IMG_UINT32 ui32Size; /*!< Overall packet size in bytes */
+ IMG_UINT32 eTypeId; /*!< Event type information field */
+ IMG_UINT32 ui32Ordinal; /*!< Sequential number of the packet */
+ IMG_UINT64 ui64Timestamp; /*!< Event timestamp */
+} RGX_HWPERF_V2_PACKET_HDR, *RGX_PHWPERF_V2_PACKET_HDR;
+
+#ifndef __CHECKER__
+RGX_FW_STRUCT_OFFSET_ASSERT(RGX_HWPERF_V2_PACKET_HDR, ui64Timestamp);
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_V2_PACKET_HDR);
+#endif
+
+
+/*! Mask for use with the IMG_UINT32 ui32Size header field */
+#define RGX_HWPERF_SIZE_MASK 0xFFFFU
+
+/*! This macro defines an upper limit to which the size of the largest variable
+ * length HWPerf packet must fall within, currently 3KB. This constant may be
+ * used to allocate a buffer to hold one packet.
+ * This upper limit is policed by packet producing code.
+ */
+#define RGX_HWPERF_MAX_PACKET_SIZE 0xC00U
+
+/*! Defines an upper limit to the size of a variable length packet payload.
+ */
+#define RGX_HWPERF_MAX_PAYLOAD_SIZE ((IMG_UINT32)(RGX_HWPERF_MAX_PACKET_SIZE-\
+ sizeof(RGX_HWPERF_V2_PACKET_HDR)))
+
+
+/*! Macro which takes a structure name and provides the packet size for
+ * a fixed size payload packet, rounded up to 8 bytes to align packets
+ * for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_FIXED(_struct) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(sizeof(_struct), PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro which takes the number of bytes written in the data payload of a
+ * packet for a variable size payload packet, rounded up to 8 bytes to
+ * align packets for 64 bit architectures. */
+#define RGX_HWPERF_MAKE_SIZE_VARIABLE(_size) ((IMG_UINT32)(RGX_HWPERF_SIZE_MASK&(sizeof(RGX_HWPERF_V2_PACKET_HDR)+PVR_ALIGN(_size, PVRSRVTL_PACKET_ALIGNMENT))))
+
+/*! Macro to obtain the size of the packet */
+#define RGX_HWPERF_GET_SIZE(_packet_addr) ((IMG_UINT16)(((_packet_addr)->ui32Size) & RGX_HWPERF_SIZE_MASK))
+
+/*! Macro to obtain the size of the packet data */
+#define RGX_HWPERF_GET_DATA_SIZE(_packet_addr) (RGX_HWPERF_GET_SIZE(_packet_addr) - sizeof(RGX_HWPERF_V2_PACKET_HDR))
+
+
+
+/*! Masks for use with the IMG_UINT32 eTypeId header field */
+#define RGX_HWPERF_TYPEID_MASK 0x7FFFFU
+#define RGX_HWPERF_TYPEID_EVENT_MASK 0x07FFFU
+#define RGX_HWPERF_TYPEID_THREAD_MASK 0x08000U
+#define RGX_HWPERF_TYPEID_STREAM_MASK 0x70000U
+#define RGX_HWPERF_TYPEID_OSID_MASK 0xFF000000U
+
+/*! Meta thread macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_META_THREAD_SHIFT 15U
+#define RGX_HWPERF_META_THREAD_ID0 0x0U
+#define RGX_HWPERF_META_THREAD_ID1 0x1U
+/*! Obsolete, kept for source compatibility */
+#define RGX_HWPERF_META_THREAD_MASK 0x1U
+/*! Stream ID macros for encoding the ID into the type field of a packet */
+#define RGX_HWPERF_STREAM_SHIFT 16U
+/*! OSID bit-shift macro used for encoding OSID into type field of a packet */
+#define RGX_HWPERF_OSID_SHIFT 24U
+typedef enum {
+ RGX_HWPERF_STREAM_ID0_FW, /*!< Events from the Firmware/GPU */
+ RGX_HWPERF_STREAM_ID1_HOST, /*!< Events from the Server host driver component */
+ RGX_HWPERF_STREAM_ID2_CLIENT, /*!< Events from the Client host driver component */
+ RGX_HWPERF_STREAM_ID_LAST,
+} RGX_HWPERF_STREAM_ID;
+
+/* Checks if all stream IDs can fit under RGX_HWPERF_TYPEID_STREAM_MASK. */
+static_assert((RGX_HWPERF_STREAM_ID_LAST - 1) < (RGX_HWPERF_TYPEID_STREAM_MASK >> RGX_HWPERF_STREAM_SHIFT),
+ "To many HWPerf stream IDs.");
+
+/*! Macros used to set the packet type and encode meta thread ID (0|1), HWPerf stream ID, and OSID within */
+#define RGX_HWPERF_MAKE_TYPEID(_stream,_type,_thread,_osid)\
+ ((IMG_UINT32) ((RGX_HWPERF_TYPEID_STREAM_MASK&((_stream)<<RGX_HWPERF_STREAM_SHIFT)) | \
+ (RGX_HWPERF_TYPEID_THREAD_MASK&((_thread)<<RGX_HWPERF_META_THREAD_SHIFT)) | \
+ (RGX_HWPERF_TYPEID_EVENT_MASK&(_type)) | \
+ (RGX_HWPERF_TYPEID_OSID_MASK & ((_osid) << RGX_HWPERF_OSID_SHIFT))))
+
+/*! Obtains the event type that generated the packet */
+#define RGX_HWPERF_GET_TYPE(_packet_addr) (((_packet_addr)->eTypeId) & RGX_HWPERF_TYPEID_EVENT_MASK)
+
+/*! Obtains the META Thread number that generated the packet */
+#define RGX_HWPERF_GET_THREAD_ID(_packet_addr) (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_THREAD_MASK) >> RGX_HWPERF_META_THREAD_SHIFT))
+
+/*! Obtains the guest OSID which resulted in packet generation */
+#define RGX_HWPERF_GET_OSID(_packet_addr) (((_packet_addr)->eTypeId & RGX_HWPERF_TYPEID_OSID_MASK) >> RGX_HWPERF_OSID_SHIFT)
+
+/*! Obtain stream id */
+#define RGX_HWPERF_GET_STREAM_ID(_packet_addr) (((((_packet_addr)->eTypeId)&RGX_HWPERF_TYPEID_STREAM_MASK) >> RGX_HWPERF_STREAM_SHIFT))
+
+/*! Macros to obtain a typed pointer to a packet or data structure given a packet address */
+#define RGX_HWPERF_GET_PACKET(_buffer_addr) ((RGX_HWPERF_V2_PACKET_HDR*) (_buffer_addr))
+#define RGX_HWPERF_GET_PACKET_DATA_BYTES(_packet_addr) ((IMG_BYTE*) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+#define RGX_HWPERF_GET_NEXT_PACKET(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR*) ( ((IMG_BYTE*)(_packet_addr))+(RGX_HWPERF_SIZE_MASK&(_packet_addr)->ui32Size)) )
+
+/*! Obtains a typed pointer to a packet header given the packed data address */
+#define RGX_HWPERF_GET_PACKET_HEADER(_packet_addr) ((RGX_HWPERF_V2_PACKET_HDR*) ( ((IMG_BYTE*)(_packet_addr)) - sizeof(RGX_HWPERF_V2_PACKET_HDR) ))
+
+
+/******************************************************************************
+ * Other Common Defines
+ *****************************************************************************/
+
+/* This macro is not a real array size, but indicates the array has a
+ * variable length only known at run-time but always contains at least 1 element.
+ * The final size of the array is deduced from the size field of a packet
+ * header. */
+#define RGX_HWPERF_ONE_OR_MORE_ELEMENTS 1U
+
+/* This macro is not a real array size, but indicates the array is optional
+ * and if present has a variable length only known at run-time. The final
+ * size of the array is deduced from the size field of a packet header. */
+#define RGX_HWPERF_ZERO_OR_MORE_ELEMENTS 1U
+
+
+/*! Masks for use with the IMG_UINT32 ui32BlkInfo field */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_MASK 0xFFFF0000U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the NumBlocks and counter block offset field in ui32BlkInfo */
+#define RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT 16U
+#define RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT 0U
+
+/*! Macro used to set the block info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_BLKINFO(_numblks,_blkoffset) ((IMG_UINT32) ((RGX_HWPERF_BLKINFO_BLKCOUNT_MASK&((_numblks) << RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)) | (RGX_HWPERF_BLKINFO_BLKOFFSET_MASK&((_blkoffset) << RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT))))
+
+/*! Macro used to obtain get the number of counter blocks present in the packet */
+#define RGX_HWPERF_GET_BLKCOUNT(_blkinfo) ((_blkinfo & RGX_HWPERF_BLKINFO_BLKCOUNT_MASK) >> RGX_HWPERF_BLKINFO_BLKCOUNT_SHIFT)
+
+/*! Obtains the offset of the counter block stream in the packet */
+#define RGX_HWPERF_GET_BLKOFFSET(_blkinfo) ((_blkinfo & RGX_HWPERF_BLKINFO_BLKOFFSET_MASK) >> RGX_HWPERF_BLKINFO_BLKOFFSET_SHIFT)
+
+/* This macro gets the number of blocks depending on the packet version */
+#define RGX_HWPERF_GET_NUMBLKS(_sig, _packet_data, _numblocks) \
+ if(HWPERF_PACKET_V2B_SIG == _sig)\
+ {\
+ (_numblocks) = RGX_HWPERF_GET_BLKCOUNT((_packet_data)->ui32BlkInfo);\
+ }\
+ else\
+ {\
+ IMG_UINT32 ui32VersionOffset = (((_sig) == HWPERF_PACKET_V2_SIG) ? 1 : 3);\
+ (_numblocks) = *(IMG_UINT16 *)(&((_packet_data)->ui32WorkTarget) + ui32VersionOffset);\
+ }
+
+/* This macro gets the counter stream pointer depending on the packet version */
+#define RGX_HWPERF_GET_CNTSTRM(_sig, _hw_packet_data, _cntstream_ptr) \
+{\
+ if(HWPERF_PACKET_V2B_SIG == _sig)\
+ {\
+ (_cntstream_ptr) = (IMG_UINT32 *)((IMG_BYTE *)(_hw_packet_data) + RGX_HWPERF_GET_BLKOFFSET((_hw_packet_data)->ui32BlkInfo));\
+ }\
+ else\
+ {\
+ IMG_UINT32 ui32BlkStreamOffsetInWords = ((_sig == HWPERF_PACKET_V2_SIG) ? 6 : 8);\
+ (_cntstream_ptr) = ((IMG_UINT32 *)_hw_packet_data) + ui32BlkStreamOffsetInWords;\
+ }\
+}
+
+/* This is the maximum frame contexts that are supported in the driver at the moment */
+#define RGX_HWPERF_HW_MAX_WORK_CONTEXT 2
+
+/*! Masks for use with the RGX_HWPERF_UFO_EV eEvType field */
+#define RGX_HWPERF_UFO_STREAMSIZE_MASK 0xFFFF0000U
+#define RGX_HWPERF_UFO_STREAMOFFSET_MASK 0x0000FFFFU
+
+/*! Shift for the UFO count and data stream fields */
+#define RGX_HWPERF_UFO_STREAMSIZE_SHIFT 16U
+#define RGX_HWPERF_UFO_STREAMOFFSET_SHIFT 0U
+
+/*! Macro used to set UFO stream info word as a combination of two 16-bit integers */
+#define RGX_HWPERF_MAKE_UFOPKTINFO(_ssize,_soff)\
+ ((IMG_UINT32) ((RGX_HWPERF_UFO_STREAMSIZE_MASK&((_ssize) << RGX_HWPERF_UFO_STREAMSIZE_SHIFT)) |\
+ (RGX_HWPERF_UFO_STREAMOFFSET_MASK&((_soff) << RGX_HWPERF_UFO_STREAMOFFSET_SHIFT))))
+
+/*! Macro used to obtain UFO count*/
+#define RGX_HWPERF_GET_UFO_STREAMSIZE(_streaminfo)\
+ ((_streaminfo & RGX_HWPERF_UFO_STREAMSIZE_MASK) >> RGX_HWPERF_UFO_STREAMSIZE_SHIFT)
+
+/*! Obtains the offset of the UFO stream in the packet */
+#define RGX_HWPERF_GET_UFO_STREAMOFFSET(_streaminfo)\
+ ((_streaminfo & RGX_HWPERF_UFO_STREAMOFFSET_MASK) >> RGX_HWPERF_UFO_STREAMOFFSET_SHIFT)
+
+
+
+/******************************************************************************
+ * Data Stream Common Types
+ *****************************************************************************/
+
+/* All the Data Masters HWPerf is aware of. When a new DM is added to this list,
+ * it should be appended at the end to maintain backward compatibility of HWPerf data */
+typedef enum _RGX_HWPERF_DM {
+
+ RGX_HWPERF_DM_GP,
+ RGX_HWPERF_DM_2D,
+ RGX_HWPERF_DM_TA,
+ RGX_HWPERF_DM_3D,
+ RGX_HWPERF_DM_CDM,
+ RGX_HWPERF_DM_RTU,
+ RGX_HWPERF_DM_SHG,
+ RGX_HWPERF_DM_TDM,
+
+ RGX_HWPERF_DM_LAST,
+
+ RGX_HWPERF_DM_INVALID = 0x1FFFFFFF
+} RGX_HWPERF_DM;
+
+/* Enum containing bit pos for 32bit feature flags used in hwperf and api */
+typedef enum {
+ RGX_HWPERF_FEATURE_PERFBUS_FLAG = 0x001,
+ RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG = 0x002,
+ RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG = 0x004,
+ RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG = 0x008,
+ RGX_HWPERF_FEATURE_ROGUEXE_FLAG = 0x010,
+ RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG = 0x020,
+ RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG = 0x040,
+ RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION = 0x080
+} RGX_HWPERF_FEATURE_FLAGS;
+
+/*! This structure holds the data of a firmware packet. */
+typedef struct
+{
+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ IMG_UINT32 ui32FWPerfCount0; /*!< Meta/MIPS PERF_COUNT0 register */
+ IMG_UINT32 ui32FWPerfCount1; /*!< Meta/MIPS PERF_COUNT1 register */
+ IMG_UINT32 ui32TimeCorrIndex;
+ IMG_UINT32 ui32Padding;
+} RGX_HWPERF_FW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FW_DATA);
+
+/*! This structure holds the data of a hardware packet, including counters. */
+typedef struct
+{
+ IMG_UINT32 ui32DMCyc; /*!< DataMaster cycle count register, 0 if none */
+ IMG_UINT32 ui32FrameNum; /*!< Frame number, undefined on some DataMasters */
+ IMG_UINT32 ui32PID; /*!< Process identifier */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32WorkTarget; /*!< RenderTarget for a TA,3D; Frame context for RTU, 0x0 otherwise */
+ IMG_UINT32 ui32ExtJobRef; /*!< Client driver context job reference used for tracking/debugging */
+ IMG_UINT32 ui32IntJobRef; /*!< RGX Data master context job reference used for tracking/debugging */
+ IMG_UINT32 ui32TimeCorrIndex; /*!< Index to the time correlation at the time the packet was generated */
+ IMG_UINT32 ui32BlkInfo; /*!< <31..16> NumBlocks <15..0> Counter block stream offset */
+ IMG_UINT32 ui32WorkCtx; /*!< Work context: Render Context for TA/3D; RayTracing Context for RTU/SHG; 0x0 otherwise */
+ IMG_UINT32 ui32CtxPriority; /*!< Context priority */
+ IMG_UINT32 ui32Padding1; /* To ensure correct alignment */
+ IMG_UINT32 aui32CountBlksStream[RGX_HWPERF_ZERO_OR_MORE_ELEMENTS]; /*!< Counter data */
+ IMG_UINT32 ui32Padding2; /* To ensure correct alignment */
+} RGX_HWPERF_HW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_HW_DATA);
+
+/*! Mask for use with the aui32CountBlksStream field when decoding the
+ * counter block ID and mask word. */
+#define RGX_HWPERF_CNTBLK_ID_MASK 0xFFFF0000U
+#define RGX_HWPERF_CNTBLK_ID_SHIFT 16U
+
+/*! Obtains the counter block ID from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words of
+ * a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNTBLK_ID(_data_addr, _idx) ((IMG_UINT16)(((_data_addr)->aui32CountBlksStream[(_idx)]&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+#define RGX_HWPERF_GET_CNTBLK_IDW(_word) ((IMG_UINT16)(((_word)&RGX_HWPERF_CNTBLK_ID_MASK)>>RGX_HWPERF_CNTBLK_ID_SHIFT))
+
+/*! Obtains the counter mask from the supplied RGX_HWPERF_HW_DATA address
+ * and stream index. May be used in decoding the counter block stream words
+ * of a RGX_HWPERF_HW_DATA structure. */
+#define RGX_HWPERF_GET_CNT_MASK(_data_addr, _idx) ((IMG_UINT16)((_data_addr)->aui32CountBlksStream[(_idx)]&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+#define RGX_HWPERF_GET_CNT_MASKW(_word) ((IMG_UINT16)((_word)&((1<<RGX_CNTBLK_COUNTERS_MAX)-1)))
+
+
+typedef struct
+{
+ RGX_HWPERF_DM eDM; /*!< DataMaster identifier, see RGX_HWPERF_DM */
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32FrameNum; /*!< Frame number */
+ IMG_UINT32 ui32TxtActCyc; /*!< Meta TXTACTCYC register value */
+ IMG_UINT32 ui32PerfCycle; /*!< Cycle count. Used to measure HW context store latency */
+ IMG_UINT32 ui32PerfPhase; /*!< Phase. Used to determine geometry content */
+ IMG_UINT32 ui32Padding[2]; /*!< Padding to 8 DWords */
+} RGX_HWPERF_CSW_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CSW_DATA);
+
+/*! Enumeration of clocks supporting this event */
+typedef enum
+{
+ RGX_HWPERF_CLKS_CHG_INVALID = 0,
+
+ RGX_HWPERF_CLKS_CHG_NAME_CORE = 1,
+
+ RGX_HWPERF_CLKS_CHG_LAST,
+} RGX_HWPERF_CLKS_CHG_NAME;
+
+/*! This structure holds the data of a clocks change packet. */
+typedef struct
+{
+ IMG_UINT64 ui64NewClockSpeed; /*!< New Clock Speed (in Hz) */
+ RGX_HWPERF_CLKS_CHG_NAME eClockName; /*!< Clock name */
+ IMG_UINT32 ui32CalibratedClockSpeed; /*!< Calibrated new GPU clock speed (in Hz) */
+ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */
+ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and
+ correlated to OSTimeStamp */
+} RGX_HWPERF_CLKS_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CLKS_CHG_DATA);
+
+/*! Enumeration of GPU utilisation states supported by this event */
+typedef enum
+{
+ RGX_HWPERF_GPU_STATE_ACTIVE_LOW = 0,
+ RGX_HWPERF_GPU_STATE_IDLE = 1,
+ RGX_HWPERF_GPU_STATE_ACTIVE_HIGH = 2,
+ RGX_HWPERF_GPU_STATE_BLOCKED = 3,
+ RGX_HWPERF_GPU_STATE_LAST,
+} RGX_HWPERF_GPU_STATE;
+
+/*! This structure holds the data of a GPU utilisation state change packet. */
+typedef struct
+{
+ RGX_HWPERF_GPU_STATE eState; /*!< New GPU utilisation state */
+ IMG_UINT32 uiUnused1; /*!< Padding */
+ IMG_UINT32 uiUnused2; /*!< Padding */
+ IMG_UINT32 uiUnused3; /*!< Padding */
+} RGX_HWPERF_GPU_STATE_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_GPU_STATE_CHG_DATA);
+
+
+/*! Signature pattern 'HPE1' found in the first word of a PWR_EST packet data */
+#define HWPERF_PWR_EST_V1_SIG 0x48504531
+
+/*! Macros to obtain a component field from a counter ID word */
+#define RGX_HWPERF_GET_PWR_EST_HIGH_FLAG(_word) (((_word)&0x80000000)>>31)
+#define RGX_HWPERF_GET_PWR_EST_UNIT(_word) (((_word)&0x0F000000)>>24)
+#define RGX_HWPERF_GET_PWR_EST_NUMBER(_word) ((_word)&0x0000FFFF)
+
+/*! This macro constructs a counter ID for a power estimate data stream from
+ * the component parts of: high word flag, unit id, counter number */
+#define RGX_HWPERF_MAKE_PWR_EST_COUNTERID(_high, _unit, _number) \
+ ((IMG_UINT32)((((_high)&0x1)<<31) | (((_unit)&0xF)<<24) | \
+ ((_number)&0x0000FFFF)))
+
+/*! This structure holds the data for a power estimate packet. */
+typedef struct
+{
+ IMG_UINT32 ui32StreamVersion; /*!< HWPERF_PWR_EST_V1_SIG */
+ IMG_UINT32 ui32StreamSize; /*!< Size of array in bytes of stream data
+ held in the aui32StreamData member */
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS]; /*!< Counter data */
+ IMG_UINT32 ui32Padding; /* To ensure correct alignment */
+} RGX_HWPERF_PWR_EST_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_EST_DATA);
+
+/*! Enumeration of the kinds of power change events that can occur */
+typedef enum
+{
+ RGX_HWPERF_PWR_UNDEFINED = 0,
+ RGX_HWPERF_PWR_ON = 1, /*!< Whole device powered on */
+ RGX_HWPERF_PWR_OFF = 2, /*!< Whole device powered off */
+ RGX_HWPERF_PWR_UP = 3, /*!< Power turned on to a HW domain */
+ RGX_HWPERF_PWR_DOWN = 4, /*!< Power turned off to a HW domain */
+
+ RGX_HWPERF_PWR_LAST,
+} RGX_HWPERF_PWR;
+
+/*! This structure holds the data of a power packet. */
+typedef struct
+{
+ RGX_HWPERF_PWR eChange; /*!< Defines the type of power change */
+ IMG_UINT32 ui32Domains; /*!< HW Domains affected */
+ IMG_UINT64 ui64OSTimeStamp; /*!< OSTimeStamp sampled by the host */
+ IMG_UINT64 ui64CRTimeStamp; /*!< CRTimeStamp sampled by the host and
+ correlated to OSTimeStamp */
+ IMG_UINT32 ui32CalibratedClockSpeed; /*!< GPU clock speed (in Hz) at the time
+ the two timers were correlated */
+ IMG_UINT32 ui32Unused1; /*!< Padding */
+} RGX_HWPERF_PWR_CHG_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_PWR_CHG_DATA);
+
+
+
+/*! Firmware Activity event. */
+typedef enum
+{
+ RGX_HWPERF_FWACT_EV_INVALID, /*! Invalid value. */
+ RGX_HWPERF_FWACT_EV_REGS_SET, /*! Registers set. */
+ RGX_HWPERF_FWACT_EV_HWR_DETECTED, /*! HWR detected. */
+ RGX_HWPERF_FWACT_EV_HWR_RESET_REQUIRED, /*! Reset required. */
+ RGX_HWPERF_FWACT_EV_HWR_RECOVERED, /*! HWR recovered. */
+ RGX_HWPERF_FWACT_EV_HWR_FREELIST_READY, /*! Freelist ready. */
+ RGX_HWPERF_FWACT_EV_FEATURES, /*! Features present */
+
+ RGX_HWPERF_FWACT_EV_LAST /*! Number of element. */
+} RGX_HWPERF_FWACT_EV;
+
+/*! Cause of the HWR event. */
+typedef enum
+{
+ RGX_HWPERF_HWR_REASON_INVALID, /*! Invalid value.*/
+ RGX_HWPERF_HWR_REASON_LOCKUP, /*! Lockup. */
+ RGX_HWPERF_HWR_REASON_PAGEFAULT, /*! Page fault. */
+ RGX_HWPERF_HWR_REASON_POLLFAIL, /*! Poll fail. */
+ RGX_HWPERF_HWR_REASON_DEADLINE_OVERRUN, /*! Deadline overrun. */
+ RGX_HWPERF_HWR_REASON_CSW_DEADLINE_OVERRUN, /*! Hard Context Switch deadline overrun. */
+
+ RGX_HWPERF_HWR_REASON_LAST /*! Number of elements. */
+} RGX_HWPERF_HWR_REASON;
+
+/*! Sub-event's data. */
+typedef union
+{
+ struct
+ {
+ RGX_HWPERF_DM eDM; /*!< Data Master ID. */
+ RGX_HWPERF_HWR_REASON eReason; /*!< Reason of the HWR. */
+ IMG_UINT32 ui32DMContext; /*!< FW render context */
+ } sHWR; /*!< HWR sub-event data. */
+
+ struct
+ {
+ IMG_UINT32 ui32BvncKmFeatureFlags; /*!< BVNC Feature Flags */
+ } sBVNC;
+} RGX_HWPERF_FWACT_DETAIL;
+
+/*! This structure holds the data of a FW activity event packet */
+typedef struct
+{
+ RGX_HWPERF_FWACT_EV eEvType; /*!< Event type. */
+ RGX_HWPERF_FWACT_DETAIL uFwActDetail; /*!< Data of the sub-event. */
+} RGX_HWPERF_FWACT_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_FWACT_DATA);
+
+
+
+typedef enum {
+ RGX_HWPERF_UFO_EV_UPDATE,
+ RGX_HWPERF_UFO_EV_CHECK_SUCCESS,
+ RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS,
+ RGX_HWPERF_UFO_EV_CHECK_FAIL,
+ RGX_HWPERF_UFO_EV_PRCHECK_FAIL,
+ RGX_HWPERF_UFO_EV_FORCE_UPDATE,
+
+ RGX_HWPERF_UFO_EV_LAST
+} RGX_HWPERF_UFO_EV;
+
+/*! Data stream tuple. */
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32Value;
+ } sCheckSuccess;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32Value;
+ IMG_UINT32 ui32Required;
+ } sCheckFail;
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32OldValue;
+ IMG_UINT32 ui32NewValue;
+ } sUpdate;
+} RGX_HWPERF_UFO_DATA_ELEMENT;
+
+/*! This structure holds the packet payload data for UFO event. */
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType;
+ IMG_UINT32 ui32TimeCorrIndex;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32StreamInfo;
+ RGX_HWPERF_DM eDM;
+ IMG_UINT32 ui32Padding;
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+} RGX_HWPERF_UFO_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_UFO_DATA);
+
+
+
+typedef enum
+{
+ RGX_HWPERF_KICK_TYPE_TA3D, /*!< Replaced by separate TA and 3D types */
+ RGX_HWPERF_KICK_TYPE_TQ2D,
+ RGX_HWPERF_KICK_TYPE_TQ3D,
+ RGX_HWPERF_KICK_TYPE_CDM,
+ RGX_HWPERF_KICK_TYPE_RS,
+ RGX_HWPERF_KICK_TYPE_VRDM,
+ RGX_HWPERF_KICK_TYPE_TQTDM,
+ RGX_HWPERF_KICK_TYPE_SYNC,
+ RGX_HWPERF_KICK_TYPE_TA,
+ RGX_HWPERF_KICK_TYPE_3D,
+ RGX_HWPERF_KICK_TYPE_LAST
+} RGX_HWPERF_KICK_TYPE;
+
+typedef struct
+{
+ RGX_HWPERF_KICK_TYPE ui32EnqType;
+ IMG_UINT32 ui32PID;
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32DMContext; /*!< GPU Data Master (FW) Context */
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ IMG_UINT64 ui64CheckFence_UID;
+ IMG_UINT64 ui64UpdateFence_UID;
+ IMG_UINT64 ui64DeadlineInus; /*!< Workload deadline in system monotonic time */
+ IMG_UINT64 ui64CycleEstimate; /*!< Estimated cycle time for the workload */
+} RGX_HWPERF_HOST_ENQ_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ENQ_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_ENQ_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ RGX_HWPERF_UFO_EV eEvType;
+ IMG_UINT32 ui32StreamInfo;
+ IMG_UINT32 aui32StreamData[RGX_HWPERF_ONE_OR_MORE_ELEMENTS];
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_UFO_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_UFO_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_UFO_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef enum
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE_INVALID,
+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC, //PRIM
+ RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE,
+ RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR, // Fence for use on GPU (SYNCP backed)
+ RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP,
+
+ RGX_HWPERF_HOST_RESOURCE_TYPE_LAST
+} RGX_HWPERF_HOST_RESOURCE_TYPE;
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 uiPid;
+ IMG_UINT64 ui64Timeline_UID1;
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sTimelineAlloc;
+
+ struct
+ {
+ IMG_UINT64 ui64Fence_UID;
+ IMG_UINT32 ui32CheckPt_FWAddr;
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sFenceAlloc;
+
+ struct
+ {
+ IMG_UINT32 ui32CheckPt_FWAddr;
+ IMG_UINT64 ui64Timeline_UID;
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of original fence synCP created for */
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sSyncCheckPointAlloc;
+
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ } sSyncAlloc;
+} RGX_HWPERF_HOST_ALLOC_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32AllocType;
+ RGX_HWPERF_HOST_ALLOC_DETAIL RGXFW_ALIGN uAllocDetail;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_ALLOC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_ALLOC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_ALLOC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT32 uiPid;
+ IMG_UINT64 ui64Timeline_UID1;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sTimelineDestroy;
+
+ struct
+ {
+ IMG_UINT64 ui64Fence_UID;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sFenceDestroy;
+
+ struct
+ {
+ IMG_UINT32 ui32CheckPt_FWAddr;
+ } sSyncCheckPointFree;
+
+ struct
+ {
+ IMG_UINT32 ui32FWAddr;
+ } sSyncFree;
+} RGX_HWPERF_HOST_FREE_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32FreeType;
+ RGX_HWPERF_HOST_FREE_DETAIL uFreeDetail;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_FREE_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_FREE_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_FREE_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+typedef struct
+{
+ IMG_UINT64 ui64CRTimestamp;
+ IMG_UINT64 ui64OSTimestamp;
+ IMG_UINT32 ui32ClockSpeed;
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+} RGX_HWPERF_HOST_CLK_SYNC_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_CLK_SYNC_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+
+typedef union
+{
+ struct
+ {
+ IMG_UINT64 ui64NewFence_UID;
+ IMG_UINT64 ui64InFence1_UID;
+ IMG_UINT64 ui64InFence2_UID;
+ IMG_CHAR acName[PVRSRV_SYNC_NAME_LENGTH];
+ IMG_UINT32 ui32Padding; /* Align structure size to 8 bytes */
+ } sFenceMerge;
+} RGX_HWPERF_HOST_MODIFY_DETAIL;
+
+typedef struct
+{
+ RGX_HWPERF_HOST_RESOURCE_TYPE ui32ModifyType;
+ RGX_HWPERF_HOST_MODIFY_DETAIL uModifyDetail;
+} RGX_HWPERF_HOST_MODIFY_DATA;
+
+/* Payload size must be multiple of 8 bytes to align start of next packet. */
+static_assert((sizeof(RGX_HWPERF_HOST_MODIFY_DATA) & (PVRSRVTL_PACKET_ALIGNMENT-1)) == 0,
+ "sizeof(RGX_HWPERF_HOST_MODIFY_DATA) must be a multiple PVRSRVTL_PACKET_ALIGNMENT");
+
+
+/*! This type is a union of packet payload data structures associated with
+ * various FW and Host events */
+typedef union
+{
+ RGX_HWPERF_FW_DATA sFW; /*!< Firmware event packet data */
+ RGX_HWPERF_HW_DATA sHW; /*!< Hardware event packet data */
+ RGX_HWPERF_CLKS_CHG_DATA sCLKSCHG; /*!< Clock change event packet data */
+ RGX_HWPERF_GPU_STATE_CHG_DATA sGPUSTATECHG; /*!< GPU utilisation state change event packet data */
+ RGX_HWPERF_PWR_EST_DATA sPWREST; /*!< Power estimate event packet data */
+ RGX_HWPERF_PWR_CHG_DATA sPWR; /*!< Power event packet data */
+ RGX_HWPERF_CSW_DATA sCSW; /*!< Context switch packet data */
+ RGX_HWPERF_UFO_DATA sUFO; /*!< UFO data */
+ RGX_HWPERF_FWACT_DATA sFWACT; /*!< Firmware activity event packet data */
+ /* */
+ RGX_HWPERF_HOST_ENQ_DATA sENQ; /*!< Host ENQ data */
+ RGX_HWPERF_HOST_UFO_DATA sHUFO; /*!< Host UFO data */
+ RGX_HWPERF_HOST_ALLOC_DATA sHALLOC; /*!< Host Alloc data */
+ RGX_HWPERF_HOST_CLK_SYNC_DATA sHCLKSYNC; /*!< Host CLK_SYNC data */
+ RGX_HWPERF_HOST_FREE_DATA sHFREE; /*!< Host Free data */
+ RGX_HWPERF_HOST_MODIFY_DATA sHMOD; /*!< Host Modify data */
+} _RGX_HWPERF_V2_PACKET_DATA_, *RGX_PHWPERF_V2_PACKET_DATA;
+
+RGX_FW_STRUCT_SIZE_ASSERT(_RGX_HWPERF_V2_PACKET_DATA_);
+
+#define RGX_HWPERF_GET_PACKET_DATA(_packet_addr) ((RGX_PHWPERF_V2_PACKET_DATA) ( ((IMG_BYTE*)(_packet_addr)) +sizeof(RGX_HWPERF_V2_PACKET_HDR) ) )
+
+
+/******************************************************************************
+ * API Types
+ *****************************************************************************/
+
+/*! Counter block IDs for all the hardware blocks with counters.
+ * Directly addressable blocks must have a value between 0..15.
+ * First hex digit represents a group number and the second hex digit represents
+ * the unit within the group. Group 0 is the direct group, all others are
+ * indirect groups.
+ */
+typedef enum
+{
+ /* Directly addressable counter blocks */
+ RGX_CNTBLK_ID_TA = 0x0000,
+ RGX_CNTBLK_ID_RASTER = 0x0001, /* Non-cluster grouping cores */
+ RGX_CNTBLK_ID_HUB = 0x0002, /* Non-cluster grouping cores */
+ RGX_CNTBLK_ID_TORNADO = 0x0003, /* XT cores */
+ RGX_CNTBLK_ID_JONES = 0x0004, /* S7 cores */
+ RGX_CNTBLK_ID_BF = 0x0005, /* Doppler unit */
+ RGX_CNTBLK_ID_BT = 0x0006, /* Doppler unit */
+ RGX_CNTBLK_ID_RT = 0x0007, /* Doppler unit */
+ RGX_CNTBLK_ID_SH = 0x0008, /* Ray tracing unit */
+
+ RGX_CNTBLK_ID_DIRECT_LAST,
+
+ /* Indirectly addressable counter blocks */
+ RGX_CNTBLK_ID_TPU_MCU0 = 0x0010, /* Addressable by Dust */
+ RGX_CNTBLK_ID_TPU_MCU1 = 0x0011,
+ RGX_CNTBLK_ID_TPU_MCU2 = 0x0012,
+ RGX_CNTBLK_ID_TPU_MCU3 = 0x0013,
+ RGX_CNTBLK_ID_TPU_MCU4 = 0x0014,
+ RGX_CNTBLK_ID_TPU_MCU5 = 0x0015,
+ RGX_CNTBLK_ID_TPU_MCU6 = 0x0016,
+ RGX_CNTBLK_ID_TPU_MCU7 = 0x0017,
+ RGX_CNTBLK_ID_TPU_MCU_ALL = 0x4010,
+
+ RGX_CNTBLK_ID_USC0 = 0x0020, /* Addressable by Cluster */
+ RGX_CNTBLK_ID_USC1 = 0x0021,
+ RGX_CNTBLK_ID_USC2 = 0x0022,
+ RGX_CNTBLK_ID_USC3 = 0x0023,
+ RGX_CNTBLK_ID_USC4 = 0x0024,
+ RGX_CNTBLK_ID_USC5 = 0x0025,
+ RGX_CNTBLK_ID_USC6 = 0x0026,
+ RGX_CNTBLK_ID_USC7 = 0x0027,
+ RGX_CNTBLK_ID_USC8 = 0x0028,
+ RGX_CNTBLK_ID_USC9 = 0x0029,
+ RGX_CNTBLK_ID_USC10 = 0x002A,
+ RGX_CNTBLK_ID_USC11 = 0x002B,
+ RGX_CNTBLK_ID_USC12 = 0x002C,
+ RGX_CNTBLK_ID_USC13 = 0x002D,
+ RGX_CNTBLK_ID_USC14 = 0x002E,
+ RGX_CNTBLK_ID_USC15 = 0x002F,
+ RGX_CNTBLK_ID_USC_ALL = 0x4020,
+
+ RGX_CNTBLK_ID_TEXAS0 = 0x0030, /* Addressable by Phantom in XT, Dust in S7 */
+ RGX_CNTBLK_ID_TEXAS1 = 0x0031,
+ RGX_CNTBLK_ID_TEXAS2 = 0x0032,
+ RGX_CNTBLK_ID_TEXAS3 = 0x0033,
+ RGX_CNTBLK_ID_TEXAS4 = 0x0034,
+ RGX_CNTBLK_ID_TEXAS5 = 0x0035,
+ RGX_CNTBLK_ID_TEXAS6 = 0x0036,
+ RGX_CNTBLK_ID_TEXAS7 = 0x0037,
+ RGX_CNTBLK_ID_TEXAS_ALL = 0x4030,
+
+ RGX_CNTBLK_ID_RASTER0 = 0x0040, /* Addressable by Phantom, XT only */
+ RGX_CNTBLK_ID_RASTER1 = 0x0041,
+ RGX_CNTBLK_ID_RASTER2 = 0x0042,
+ RGX_CNTBLK_ID_RASTER3 = 0x0043,
+ RGX_CNTBLK_ID_RASTER_ALL = 0x4040,
+
+ RGX_CNTBLK_ID_BLACKPEARL0 = 0x0050, /* Addressable by Phantom, S7 only */
+ RGX_CNTBLK_ID_BLACKPEARL1 = 0x0051,
+ RGX_CNTBLK_ID_BLACKPEARL2 = 0x0052,
+ RGX_CNTBLK_ID_BLACKPEARL3 = 0x0053,
+ RGX_CNTBLK_ID_BLACKPEARL_ALL= 0x4050,
+
+ RGX_CNTBLK_ID_PBE0 = 0x0060, /* Addressable by Cluster, S7 only */
+ RGX_CNTBLK_ID_PBE1 = 0x0061,
+ RGX_CNTBLK_ID_PBE2 = 0x0062,
+ RGX_CNTBLK_ID_PBE3 = 0x0063,
+ RGX_CNTBLK_ID_PBE4 = 0x0064,
+ RGX_CNTBLK_ID_PBE5 = 0x0065,
+ RGX_CNTBLK_ID_PBE6 = 0x0066,
+ RGX_CNTBLK_ID_PBE7 = 0x0067,
+ RGX_CNTBLK_ID_PBE8 = 0x0068,
+ RGX_CNTBLK_ID_PBE9 = 0x0069,
+ RGX_CNTBLK_ID_PBE10 = 0x006A,
+ RGX_CNTBLK_ID_PBE11 = 0x006B,
+ RGX_CNTBLK_ID_PBE12 = 0x006C,
+ RGX_CNTBLK_ID_PBE13 = 0x006D,
+ RGX_CNTBLK_ID_PBE14 = 0x006E,
+ RGX_CNTBLK_ID_PBE15 = 0x006F,
+ RGX_CNTBLK_ID_PBE_ALL = 0x4060,
+
+ RGX_CNTBLK_ID_BX_TU0 = 0x0070, /* Doppler unit, XT only */
+ RGX_CNTBLK_ID_BX_TU1 = 0x0071,
+ RGX_CNTBLK_ID_BX_TU2 = 0x0072,
+ RGX_CNTBLK_ID_BX_TU3 = 0x0073,
+ RGX_CNTBLK_ID_BX_TU_ALL = 0x4070,
+
+ RGX_CNTBLK_ID_LAST = 0x0074,
+
+ RGX_CNTBLK_ID_CUSTOM0 = 0x7FF0,
+ RGX_CNTBLK_ID_CUSTOM1 = 0x7FF1,
+ RGX_CNTBLK_ID_CUSTOM2 = 0x7FF2,
+ RGX_CNTBLK_ID_CUSTOM3 = 0x7FF3,
+ RGX_CNTBLK_ID_CUSTOM4_FW = 0x7FF4 /* Custom block used for getting statistics held in the FW */
+
+} RGX_HWPERF_CNTBLK_ID;
+
+/* Masks for the counter block ID*/
+#define RGX_CNTBLK_ID_GROUP_MASK (0x00F0U)
+#define RGX_CNTBLK_ID_GROUP_SHIFT (4)
+#define RGX_CNTBLK_ID_UNIT_ALL_MASK (0x4000U)
+#define RGX_CNTBLK_ID_UNIT_MASK (0xf)
+
+#define RGX_CNTBLK_INDIRECT_COUNT(_class, _n) ((RGX_CNTBLK_ID_ ## _class ## _n) - (RGX_CNTBLK_ID_ ## _class ## 0) +1)
+
+/*! The number of layout blocks defined with configurable multiplexed
+ * performance counters, hence excludes custom counter blocks.
+ */
+#define RGX_HWPERF_MAX_DEFINED_BLKS (\
+ RGX_CNTBLK_ID_DIRECT_LAST +\
+ RGX_CNTBLK_INDIRECT_COUNT(TPU_MCU, 7)+\
+ RGX_CNTBLK_INDIRECT_COUNT(USC, 15)+\
+ RGX_CNTBLK_INDIRECT_COUNT(TEXAS, 7)+\
+ RGX_CNTBLK_INDIRECT_COUNT(RASTER, 3)+\
+ RGX_CNTBLK_INDIRECT_COUNT(BLACKPEARL, 3)+\
+ RGX_CNTBLK_INDIRECT_COUNT(PBE, 15)+\
+ RGX_CNTBLK_INDIRECT_COUNT(BX_TU, 3) )
+
+#define RGX_HWPERF_EVENT_MASK_VALUE(e) (((IMG_UINT64)1)<<(e))
+
+#define RGX_CUSTOM_FW_CNTRS \
+ X(TA_LOCAL_FL_SIZE, 0x0, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ X(TA_GLOBAL_FL_SIZE, 0x1, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TAPAUSE) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PMOOM_TARESUME) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_TAFINISHED)) \
+ X(3D_LOCAL_FL_SIZE, 0x2, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED)) \
+ X(3D_GLOBAL_FL_SIZE, 0x3, RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DKICK) | \
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_3DFINISHED))
+
+/*! Counter IDs for the firmware held statistics */
+typedef enum
+{
+#define X(ctr, id, allow_mask) RGX_CUSTOM_FW_CNTR_##ctr = id,
+ RGX_CUSTOM_FW_CNTRS
+#undef X
+
+ /* always the last entry in the list */
+ RGX_CUSTOM_FW_CNTR_LAST
+} RGX_HWPERF_CUSTOM_FW_CNTR_ID;
+
+/*! Identifier for each counter in a performance counting module */
+typedef enum
+{
+ RGX_CNTBLK_COUNTER0_ID = 0,
+ RGX_CNTBLK_COUNTER1_ID = 1,
+ RGX_CNTBLK_COUNTER2_ID = 2,
+ RGX_CNTBLK_COUNTER3_ID = 3,
+ RGX_CNTBLK_COUNTER4_ID = 4,
+ RGX_CNTBLK_COUNTER5_ID = 5,
+ /* MAX value used in server handling of counter config arrays */
+ RGX_CNTBLK_COUNTERS_MAX
+} RGX_HWPERF_CNTBLK_COUNTER_ID;
+
+/* sets all the bits from bit _b1 to _b2, in a IMG_UINT64 type */
+#define _MASK_RANGE(_b1, _b2) (((IMG_UINT64_C(1) << ((_b2)-(_b1)+1)) - 1) << _b1)
+#define MASK_RANGE(R) _MASK_RANGE(R##_FIRST_TYPE, R##_LAST_TYPE)
+#define RGX_HWPERF_HOST_EVENT_MASK_VALUE(e) ((IMG_UINT32)(1<<(e)))
+
+/*! Mask macros for use with RGXCtrlHWPerf() API.
+ */
+#define RGX_HWPERF_EVENT_MASK_NONE (IMG_UINT64_C(0x0000000000000000))
+#define RGX_HWPERF_EVENT_MASK_ALL (IMG_UINT64_C(0xFFFFFFFFFFFFFFFF))
+
+/*! HWPerf Firmware event masks
+ * Next macro covers all FW Start/End/Debug (SED) events.
+ */
+#define RGX_HWPERF_EVENT_MASK_FW_SED (MASK_RANGE(RGX_HWPERF_FW_EVENT_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_FW_UFO (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO))
+#define RGX_HWPERF_EVENT_MASK_FW_CSW (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_START) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CSW_FINISHED))
+#define RGX_HWPERF_EVENT_MASK_ALL_FW (RGX_HWPERF_EVENT_MASK_FW_SED |\
+ RGX_HWPERF_EVENT_MASK_FW_UFO |\
+ RGX_HWPERF_EVENT_MASK_FW_CSW)
+
+#define RGX_HWPERF_EVENT_MASK_HW_PERIODIC (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_HW_PERIODIC))
+#define RGX_HWPERF_EVENT_MASK_HW_KICKFINISH ((MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE0) |\
+ MASK_RANGE(RGX_HWPERF_HW_EVENT_RANGE1)) &\
+ ~(RGX_HWPERF_EVENT_MASK_HW_PERIODIC))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_HW (RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |\
+ RGX_HWPERF_EVENT_MASK_HW_PERIODIC)
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR_EST (MASK_RANGE(RGX_HWPERF_PWR_EST_RANGE))
+
+#define RGX_HWPERF_EVENT_MASK_ALL_PWR (RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_CLKS_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_GPU_STATE_CHG) |\
+ RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_PWR_CHG))
+
+/*! HWPerf Host event masks
+ */
+#define RGX_HWPERF_EVENT_MASK_HOST_WORK_ENQ (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_ENQ))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_UFO (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_UFO))
+#define RGX_HWPERF_EVENT_MASK_HOST_ALL_PWR (RGX_HWPERF_HOST_EVENT_MASK_VALUE(RGX_HWPERF_HOST_CLK_SYNC))
+
+
+/*! Type used in the RGX API RGXConfigureAndEnableHWPerfCounters() */
+ typedef struct _RGX_HWPERF_CONFIG_CNTBLK_
+{
+ /*! Counter block ID, see RGX_HWPERF_CNTBLK_ID */
+ IMG_UINT16 ui16BlockID;
+
+ /*! 4 or 6 LSBs used to select counters to configure in this block. */
+ IMG_UINT8 ui8CounterSelect;
+
+ /*! 4 or 6 LSBs used as MODE bits for the counters in the group. */
+ IMG_UINT8 ui8Mode;
+
+ /*! 5 or 6 LSBs used as the GROUP_SELECT value for the counter. */
+ IMG_UINT8 aui8GroupSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 16 LSBs used as the BIT_SELECT value for the counter. */
+ IMG_UINT16 aui16BitSelect[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 14 LSBs used as the BATCH_MAX value for the counter. */
+ IMG_UINT32 aui32BatchMax[RGX_CNTBLK_COUNTERS_MAX];
+
+ /*! 14 LSBs used as the BATCH_MIN value for the counter. */
+ IMG_UINT32 aui32BatchMin[RGX_CNTBLK_COUNTERS_MAX];
+} UNCACHED_ALIGN RGX_HWPERF_CONFIG_CNTBLK;
+
+RGX_FW_STRUCT_SIZE_ASSERT(RGX_HWPERF_CONFIG_CNTBLK);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* RGX_HWPERF_H_ */
+
+/******************************************************************************
+ End of file
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.c b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.c
new file mode 100644
index 00000000000000..17f12c897ea821
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.c
@@ -0,0 +1,609 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance counter table
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX HW Performance counters table
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgx_fwif_hwperf.h"
+#include "rgxdefs_km.h"
+#include "rgx_hwperf_table.h"
+
+/* Includes needed for PVRSRVKM (Server) context */
+# include "rgx_bvnc_defs_km.h"
+# if defined(__KERNEL__)
+# include "rgxdevice.h"
+# endif
+
+/* Shared compile-time context ASSERT macro */
+#if defined(RGX_FIRMWARE)
+# include "rgxfw_utils.h"
+/* firmware context */
+# define DBG_ASSERT(_c) RGXFW_ASSERT((_c))
+#else
+# include "pvr_debug.h"
+/* host client/server context */
+# define DBG_ASSERT(_c) PVR_ASSERT((_c))
+#endif
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered()
+
+ Referenced in gasCntBlkTypeModel[] table below and only called from
+ RGX_FIRMWARE run-time context. Therefore compile time configuration is used.
+ *****************************************************************************/
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_PERFBUS)
+# include "rgxfw_pow.h"
+# include "rgxfw_utils.h"
+
+static IMG_BOOL rgxfw_hwperf_pow_st_direct(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ /* S7XT: JONES */
+ return (eBlkType == RGX_CNTBLK_ID_JONES) ? IMG_TRUE : IMG_FALSE;
+#elif defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+ /* S6XT: TA, TORNADO */
+ return IMG_TRUE;
+#else
+ /* S6 : TA, HUB, RASTER (RASCAL) */
+ return (gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) ? IMG_TRUE : IMG_FALSE;
+#endif
+}
+
+/* Only use conditional compilation when counter blocks appear in different
+ * islands for different Rogue families.
+ */
+static IMG_BOOL rgxfw_hwperf_pow_st_indirect(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ IMG_UINT32 ui32NumDustsEnabled = rgxfw_pow_get_enabled_dusts_num();
+
+ if ((gsPowCtl.ePowState & RGXFW_POW_ST_RD_ON) &&
+ (ui32NumDustsEnabled > 0))
+ {
+#if defined(RGX_FEATURE_DYNAMIC_DUST_POWER)
+ IMG_UINT32 ui32NumUscEnabled = ui32NumDustsEnabled*2;
+
+ switch (eBlkType)
+ {
+ case RGX_CNTBLK_ID_TPU_MCU0: /* S6 and S6XT */
+#if defined (RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_TEXAS0: /* S7 */
+#endif
+ if (ui8UnitId >= ui32NumDustsEnabled)
+ {
+ return IMG_FALSE;
+ }
+ break;
+ case RGX_CNTBLK_ID_USC0: /* S6, S6XT, S7 */
+ case RGX_CNTBLK_ID_PBE0: /* S7 */
+ /* Handle single cluster cores */
+ if (ui8UnitId >= ((ui32NumUscEnabled > RGX_FEATURE_NUM_CLUSTERS) ? RGX_FEATURE_NUM_CLUSTERS : ui32NumUscEnabled))
+ {
+ return IMG_FALSE;
+ }
+ break;
+ case RGX_CNTBLK_ID_BLACKPEARL0: /* S7 */
+ case RGX_CNTBLK_ID_RASTER0: /* S6XT */
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE)
+ case RGX_CNTBLK_ID_TEXAS0: /* S6XT */
+#endif
+ if (ui8UnitId >= (RGX_REQ_NUM_PHANTOMS(ui32NumUscEnabled)))
+ {
+ return IMG_FALSE;
+ }
+ break;
+ default:
+ RGXFW_ASSERT(IMG_FALSE); /* should never get here, table error */
+ break;
+ }
+#else
+ /* Always true, no fused DUSTs, all powered so do not check unit */
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+#endif
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+ return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+# define rgxfw_hwperf_pow_st_direct ((void*)NULL)
+# define rgxfw_hwperf_pow_st_indirect ((void*)NULL)
+# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_PERFBUS) */
+
+#if defined(RGX_FIRMWARE) && defined(RGX_FEATURE_RAY_TRACING)
+
+/* Currently there is no power island control in the firmware for ray tracing
+ * so we currently assume these blocks are always powered. */
+static IMG_BOOL rgxfw_hwperf_pow_st_gandalf(RGX_HWPERF_CNTBLK_ID eBlkType, IMG_UINT8 ui8UnitId)
+{
+ PVR_UNREFERENCED_PARAMETER(eBlkType);
+ PVR_UNREFERENCED_PARAMETER(ui8UnitId);
+
+ return IMG_TRUE;
+}
+
+#else /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+# define rgxfw_hwperf_pow_st_gandalf ((void*)NULL)
+
+#endif /* !defined(RGX_FIRMWARE) || !defined(RGX_FEATURE_RAY_TRACING) */
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPowered() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() start
+
+ Referenced in gasCntBlkTypeModel[] table below and called from all build
+ contexts:
+ RGX_FIRMWARE, PVRSRVCTL (UM) and PVRSRVKM (Server).
+
+ Therefore each function has two implementations, one for compile time and one
+ run time configuration depending on the context. The functions will inform the
+ caller whether this block is valid for this particular RGX device. Other
+ run-time dependent data is returned in psRtInfo for the caller to use.
+ *****************************************************************************/
+
+/* Used for block types: USC */
+static IMG_BOOL rgx_hwperf_blk_present_perfbus(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT(psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_USC0);
+
+#if defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+ {
+ psRtInfo->uiNumUnits = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0;
+ return IMG_TRUE;
+ }
+ }
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: Direct RASTERISATION, HUB */
+static IMG_BOOL rgx_hwperf_blk_present_not_clustergrouping(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_HUB));
+
+#if defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if ((!RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING)) &&
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS)))
+ {
+ psRtInfo->uiNumUnits = 1;
+ return IMG_TRUE;
+ }
+ }
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = 1;
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: BF, BT, RT, SH, BX_TU */
+static IMG_BOOL rgx_hwperf_blk_present_raytracing(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BF) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BT) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RT) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_SH) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BX_TU0));
+
+#if defined(RGX_FEATURE_RAY_TRACING) && defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ /* Exception case, read from table as ray-tracing units do not vary by feature. */
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+ }
+ }
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_RAY_TRACING)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ DBG_ASSERT(psBlkTypeDesc->uiPerfReg != 0); /* Check for broken config */
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+#if defined(__KERNEL__) /* Server context */
+static INLINE IMG_UINT32 rgx_units_indirect_by_phantom(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_INDIRECT_BY_PHANTOM */
+ return ((psFeatCfg->ui64Features & RGX_FEATURE_CLUSTER_GROUPING_BIT_MASK) == 0) ? 1
+ : (psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]+3)/4;
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_dust(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST */
+ return MAX((psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX]>>1),1);
+}
+
+static INLINE IMG_UINT32 rgx_units_phantom_indirect_by_cluster(PVRSRV_DEVICE_FEATURE_CONFIG *psFeatCfg)
+{
+ /* Run-time math for RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER */
+ return psFeatCfg->ui32FeaturesValues[RGX_FEATURE_NUM_CLUSTERS_IDX];
+}
+#endif /* defined(__KERNEL__) */
+
+/* Used for block types: TORNADO, TEXAS, Indirect RASTERISATION */
+static IMG_BOOL rgx_hwperf_blk_present_xttop(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_RASTER0));
+
+#if defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+ {
+ psRtInfo->uiNumUnits =
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TORNADO) ? 1
+ : rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg); // Texas, Ind. Raster
+ return IMG_TRUE;
+ }
+ }
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: JONES, TPU_MCU, TEXAS, BLACKPERL, PBE */
+static IMG_BOOL rgx_hwperf_blk_present_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0));
+
+#if defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0)
+ {
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TEXAS0)
+ {
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_BLACKPEARL0)
+ {
+ psRtInfo->uiNumUnits = rgx_units_indirect_by_phantom(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_PBE0)
+ {
+ psRtInfo->uiNumUnits = rgx_units_phantom_indirect_by_cluster(&psDevInfo->sDevFeatureCfg);
+ return IMG_TRUE;
+ }
+ else if (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_JONES)
+ {
+ psRtInfo->uiNumUnits = 1;
+ return IMG_TRUE;
+ }
+ }
+ }
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+/* Used for block types: TA, TPU_MCU */
+static IMG_BOOL rgx_hwperf_blk_present_not_s7top(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ DBG_ASSERT(psBlkTypeDesc != NULL);
+ DBG_ASSERT(psRtInfo != NULL);
+ DBG_ASSERT((psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ||
+ (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TPU_MCU0));
+
+#if defined(__KERNEL__) /* Server context */
+ PVR_ASSERT(pvDev_km != NULL);
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)pvDev_km;
+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE) &&
+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+ {
+ psRtInfo->uiNumUnits = (psBlkTypeDesc->uiCntBlkIdBase == RGX_CNTBLK_ID_TA) ? 1
+ : rgx_units_phantom_indirect_by_dust(&psDevInfo->sDevFeatureCfg); // TPU_MCU0
+ return IMG_TRUE;
+ }
+ }
+#else /* FW context */
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+# if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS)
+ psRtInfo->uiNumUnits = psBlkTypeDesc->uiNumUnits;
+ return IMG_TRUE;
+# else
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+# endif
+#endif
+ return IMG_FALSE;
+}
+
+#if !defined(__KERNEL__) /* Firmware or User-mode context */
+static IMG_BOOL rgx_hwperf_blk_present_false(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc, void *pvDev_km, RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo)
+{
+ PVR_UNREFERENCED_PARAMETER(psBlkTypeDesc);
+ PVR_UNREFERENCED_PARAMETER(pvDev_km);
+ PVR_UNREFERENCED_PARAMETER(psRtInfo);
+
+ /* Some functions not used on some BVNCs, silence compiler warnings */
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_perfbus);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_clustergrouping);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_raytracing);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_xttop);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_s7top);
+ PVR_UNREFERENCED_PARAMETER(rgx_hwperf_blk_present_not_s7top);
+
+ return IMG_FALSE;
+}
+
+/* Used to instantiate a null row in the block type model table below where the
+ * block is not supported for a given build BVNC in firmware/user mode context.
+ * This is needed as the blockid to block type lookup uses the table as well
+ * and clients may try to access blocks not in the hardware. */
+#define RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(_blkid) {_blkid, 0, 0, 0, 0, 0, 0, 0, 0, #_blkid, NULL, rgx_hwperf_blk_present_false}
+
+#endif
+
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL struct PFNs pfnIsBlkPresent() end
+ *****************************************************************************/
+
+/*****************************************************************************
+ RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] table
+
+ This table holds the entries for the performance counter block type model.
+ Where the block is not present on an RGX device in question the
+ pfnIsBlkPresent() returns false, if valid and present it returns true.
+ Columns in the table with a ** indicate the value is a default and the
+ value returned in RGX_HWPERF_CNTBLK_RT_INFO when calling pfnIsBlkPresent()
+ should be used at runtime by the caller. These columns are only valid for
+ compile time BVNC configured contexts.
+
+ Order of table rows must match order of counter block IDs in the enumeration
+ RGX_HWPERF_CNTBLK_ID.
+ *****************************************************************************/
+
+static const RGXFW_HWPERF_CNTBLK_TYPE_MODEL gasCntBlkTypeModel[] =
+{
+ /* uiCntBlkIdBase, iIndirectReg, uiPerfReg, uiSelect0BaseReg, uiCounter0BaseReg uiNumCounters, uiNumUnits**, uiSelectRegModeShift, uiSelectRegOffsetShift, pfnIsBlkPowered pfnIsBlkPresent
+ * pszBlockNameComment, */
+ /*RGX_CNTBLK_ID_TA*/
+#if !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TA, 0, /* direct */ RGX_CR_TA_PERF, RGX_CR_TA_PERF_SELECT0, RGX_CR_TA_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_TA_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TA),
+#endif
+
+ /*RGX_CNTBLK_ID_RASTER*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_RASTER, 0, /* direct */ RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER),
+#endif
+
+ /*RGX_CNTBLK_ID_HUB*/
+#if !defined(RGX_FEATURE_CLUSTER_GROUPING) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_HUB, 0, /* direct */ RGX_CR_HUB_BIFPMCACHE_PERF, RGX_CR_HUB_BIFPMCACHE_PERF_SELECT0, RGX_CR_HUB_BIFPMCACHE_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_HUB_BIFPMCACHE_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_not_clustergrouping },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_HUB),
+#endif
+
+ /*RGX_CNTBLK_ID_TORNADO*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TORNADO, 0, /* direct */ RGX_CR_TORNADO_PERF, RGX_CR_TORNADO_PERF_SELECT0, RGX_CR_TORNADO_PERF_COUNTER_0, 4, 1, 21, 4, "RGX_CR_TORNADO_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TORNADO),
+#endif
+
+ /*RGX_CNTBLK_ID_JONES*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_JONES, 0, /* direct */ RGX_CR_JONES_PERF, RGX_CR_JONES_PERF_SELECT0, RGX_CR_JONES_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_JONES_PERF", rgxfw_hwperf_pow_st_direct, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_JONES),
+#endif
+
+ /*RGX_CNTBLK_ID_BF RGX_CNTBLK_ID_BT RGX_CNTBLK_ID_RT RGX_CNTBLK_ID_SH*/
+#if defined(RGX_FEATURE_RAY_TRACING ) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BF, 0, /* direct */ DPX_CR_BF_PERF, DPX_CR_BF_PERF_SELECT0, DPX_CR_BF_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BF_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_BT, 0, /* direct */ DPX_CR_BT_PERF, DPX_CR_BT_PERF_SELECT0, DPX_CR_BT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_BT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_RT, 0, /* direct */ DPX_CR_RT_PERF, DPX_CR_RT_PERF_SELECT0, DPX_CR_RT_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_RT_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+ {RGX_CNTBLK_ID_SH, 0, /* direct */ RGX_CR_SH_PERF, RGX_CR_SH_PERF_SELECT0, RGX_CR_SH_PERF_COUNTER_0, 4, 1, 21, 3, "RGX_CR_SH_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BF),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BT),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RT),
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_SH),
+#endif
+
+ /*RGX_CNTBLK_ID_TPU_MCU0*/
+#if defined(RGX_FEATURE_PERFBUS) && !defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) && defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_MCU_L0_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_not_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TPU_MCU0, RGX_CR_TPU_PERF_INDIRECT, RGX_CR_TPU_MCU_L0_PERF, RGX_CR_TPU_MCU_L0_PERF_SELECT0, RGX_CR_TPU_MCU_L0_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 21, 3, "RGX_CR_TPU_MCU_L0_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TPU_MCU0),
+#endif
+
+ /*RGX_CNTBLK_ID_USC0*/
+#if defined(RGX_FEATURE_PERFBUS) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_USC0, RGX_CR_USC_PERF_INDIRECT, RGX_CR_USC_PERF, RGX_CR_USC_PERF_SELECT0, RGX_CR_USC_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_USC_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_perfbus },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_USC0),
+#endif
+
+ /*RGX_CNTBLK_ID_TEXAS0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS3_PERF_INDIRECT, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_PHANTOM_INDIRECT_BY_DUST, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_TEXAS0, RGX_CR_TEXAS_PERF_INDIRECT, RGX_CR_TEXAS_PERF, RGX_CR_TEXAS_PERF_SELECT0, RGX_CR_TEXAS_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 31, 3, "RGX_CR_TEXAS_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_TEXAS0),
+#endif
+
+ /*RGX_CNTBLK_ID_RASTER0*/
+#if defined(RGX_FEATURE_XT_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_RASTER0, RGX_CR_RASTERISATION_PERF_INDIRECT, RGX_CR_RASTERISATION_PERF, RGX_CR_RASTERISATION_PERF_SELECT0, RGX_CR_RASTERISATION_PERF_COUNTER_0, 4, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_RASTERISATION_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_xttop },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_RASTER0),
+#endif
+
+ /*RGX_CNTBLK_ID_BLACKPEARL0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BLACKPEARL0, RGX_CR_BLACKPEARL_PERF_INDIRECT, RGX_CR_BLACKPEARL_PERF, RGX_CR_BLACKPEARL_PERF_SELECT0, RGX_CR_BLACKPEARL_PERF_COUNTER_0, 6, RGX_HWPERF_INDIRECT_BY_PHANTOM, 21, 3, "RGX_CR_BLACKPEARL_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BLACKPEARL0),
+#endif
+
+ /*RGX_CNTBLK_ID_PBE0*/
+#if defined(RGX_FEATURE_S7_TOP_INFRASTRUCTURE) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_PBE0, RGX_CR_PBE_PERF_INDIRECT, RGX_CR_PBE_PERF, RGX_CR_PBE_PERF_SELECT0, RGX_CR_PBE_PERF_COUNTER_0, 4, RGX_HWPERF_PHANTOM_INDIRECT_BY_CLUSTER, 21, 3, "RGX_CR_PBE_PERF", rgxfw_hwperf_pow_st_indirect, rgx_hwperf_blk_present_s7top },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_PBE0),
+#endif
+
+ /*RGX_CNTBLK_ID_BX_TU0*/
+#if defined (RGX_FEATURE_RAY_TRACING) || defined(__KERNEL__)
+ {RGX_CNTBLK_ID_BX_TU0, RGX_CR_BX_TU_PERF_INDIRECT, DPX_CR_BX_TU_PERF, DPX_CR_BX_TU_PERF_SELECT0, DPX_CR_BX_TU_PERF_COUNTER_0, 4, RGX_HWPERF_DOPPLER_BX_TU_BLKS, 21, 3, "RGX_CR_BX_TU_PERF", rgxfw_hwperf_pow_st_gandalf, rgx_hwperf_blk_present_raytracing },
+#else
+ RGXFW_HWPERF_CNTBLK_TYPE_UNSUPPORTED(RGX_CNTBLK_ID_BX_TU0),
+#endif
+};
+
+
+IMG_INTERNAL IMG_UINT32
+RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel)
+{
+ *ppsModel = gasCntBlkTypeModel;
+ return ARRAY_SIZE(gasCntBlkTypeModel);
+}
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.h b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.h
new file mode 100644
index 00000000000000..5f563c8fd5085b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_hwperf_table.h
@@ -0,0 +1,111 @@
+/*************************************************************************/ /*!
+@File
+@Title HWPerf counter table header
+
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Utility functions used internally for HWPerf data retrieval
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_HWPERF_TABLE_H__)
+#define __RGX_HWPERF_TABLE_H__
+
+#include "img_types.h"
+#include "rgx_fwif_hwperf.h"
+
+
+/*****************************************************************************/
+
+/* Forward declaration */
+typedef struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_ RGXFW_HWPERF_CNTBLK_TYPE_MODEL;
+
+/* Function pointer type for functions to check dynamic power state of
+ * counter block instance. Used only in firmware. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_POWERED)(
+ RGX_HWPERF_CNTBLK_ID eBlkType,
+ IMG_UINT8 ui8UnitId);
+
+/* Counter block run-time info */
+typedef struct _RGX_HWPERF_CNTBLK_RT_INFO_
+{
+ IMG_UINT32 uiNumUnits; /* Number of instances of this block type in the core */
+} RGX_HWPERF_CNTBLK_RT_INFO;
+
+/* Function pointer type for functions to check block is valid and present
+ * on that RGX Device at runtime. It may have compile logic or run-time
+ * logic depending on where the code executes: server, srvinit or firmware.
+ * Values in the psRtInfo output parameter are only valid if true returned. */
+typedef IMG_BOOL (*PFN_RGXFW_HWPERF_CNTBLK_PRESENT)(
+ const struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_* psBlkTypeDesc,
+ void *pvDev_km,
+ RGX_HWPERF_CNTBLK_RT_INFO *psRtInfo);
+
+/* This structure encodes properties of a type of performance counter block.
+ * The structure is sometimes referred to as a block type descriptor. These
+ * properties contained in this structure represent the columns in the
+ * block type model table variable below. There values vary depending on
+ * the build BVNC and core type.
+ * Each direct block has a unique type descriptor and each indirect group has
+ * a type descriptor. */
+struct _RGXFW_HWPERF_CNTBLK_TYPE_MODEL_
+{
+ /* Could use RGXFW_ALIGN_DCACHEL here but then we would waste 40% of the cache line? */
+ IMG_UINT32 uiCntBlkIdBase; /* The starting block id for this block type */
+ IMG_UINT32 uiIndirectReg; /* 0 if direct type otherwise the indirect control register to select indirect unit */
+ IMG_UINT32 uiPerfReg; /* RGX_CR_*_PERF register for this block type */
+ IMG_UINT32 uiSelect0BaseReg; /* RGX_CR_*_PERF_SELECT0 register for this block type */
+ IMG_UINT32 uiCounter0BaseReg; /* RGX_CR_*_PERF_COUNTER_0 register for this block type */
+ IMG_UINT8 uiNumCounters; /* Number of counters in this block type */
+ IMG_UINT8 uiNumUnits; /* Number of instances of this block type in the core */
+ IMG_UINT8 uiSelectRegModeShift; /* Mode field shift value of select registers */
+ IMG_UINT8 uiSelectRegOffsetShift; /* Interval between select registers, either 8 bytes or 16, hence << 3 or << 4 */
+ IMG_CHAR pszBlockNameComment[30]; /* Name of the PERF register. Used while dumping the perf counters to pdumps */
+ PFN_RGXFW_HWPERF_CNTBLK_POWERED pfnIsBlkPowered; /* A function to determine dynamic power state for the block type */
+ PFN_RGXFW_HWPERF_CNTBLK_PRESENT pfnIsBlkPresent; /* A function to determine presence on RGX Device at run-time */
+};
+
+/*****************************************************************************/
+
+IMG_INTERNAL IMG_UINT32 RGXGetHWPerfBlockConfig(const RGXFW_HWPERF_CNTBLK_TYPE_MODEL **ppsModel);
+
+
+#endif /* __RGX_HWPERF_TABLE_H__ */
+
+/******************************************************************************
+ End of file (rgx_hwperf_table.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_memallocflags.h b/drivers/gpu/drm/img-rogue/1.10/rgx_memallocflags.h
new file mode 100644
index 00000000000000..130eb44c5dc667
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_memallocflags.h
@@ -0,0 +1,49 @@
+/**************************************************************************/ /*!
+@File
+@Title RGX memory allocation flags
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RGX_MEMALLOCFLAGS_H_
+#define _RGX_MEMALLOCFLAGS_H_
+
+#define PMMETA_PROTECT (1 << 0) /* Memory that only the PM and Meta can access */
+#define FIRMWARE_CACHED (1 << 1) /* Memory that is cached in META/MIPS */
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_meta.h b/drivers/gpu/drm/img-rogue/1.10/rgx_meta.h
new file mode 100644
index 00000000000000..fda169d3e561b4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_meta.h
@@ -0,0 +1,456 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX META definitions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX META helper definitions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_META_H__)
+#define __RGX_META_H__
+
+
+/***** The META HW register definitions in the file are updated manually *****/
+
+
+#include "img_defs.h"
+#include "km/rgxdefs_km.h"
+
+
+/************************************************************************
+* META registers and MACROS
+************************************************************************/
+#define META_CR_CTRLREG_BASE(T) (0x04800000 + 0x1000*(T))
+
+#define META_CR_TXPRIVEXT (0x048000E8)
+#define META_CR_TXPRIVEXT_MINIM_EN (0x1<<7)
+
+#define META_CR_SYSC_JTAG_THREAD (0x04830030)
+#define META_CR_SYSC_JTAG_THREAD_PRIV_EN (0x00000004)
+
+#define META_CR_PERF_COUNT0 (0x0480FFE0)
+#define META_CR_PERF_COUNT1 (0x0480FFE8)
+#define META_CR_PERF_COUNT_CTRL_SHIFT (28)
+#define META_CR_PERF_COUNT_CTRL_MASK (0xF0000000)
+#define META_CR_PERF_COUNT_CTRL_DCACHEHITS (0x8 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEHITS (0x9 << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICACHEMISS (0xA << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_CTRL_ICORE (0xD << META_CR_PERF_COUNT_CTRL_SHIFT)
+#define META_CR_PERF_COUNT_THR_SHIFT (24)
+#define META_CR_PERF_COUNT_THR_MASK (0x0F000000)
+#define META_CR_PERF_COUNT_THR_0 (0x1 << META_CR_PERF_COUNT_THR_SHIFT)
+#define META_CR_PERF_COUNT_THR_1 (0x2 << META_CR_PERF_COUNT_THR_1)
+
+#define META_CR_TxVECINT_BHALT (0x04820500)
+#define META_CR_PERF_ICORE0 (0x0480FFD0)
+#define META_CR_PERF_ICORE1 (0x0480FFD8)
+#define META_CR_PERF_ICORE_DCACHEMISS (0x8)
+
+#define META_CR_PERF_COUNT(CTRL, THR) ((META_CR_PERF_COUNT_CTRL_##CTRL << META_CR_PERF_COUNT_CTRL_SHIFT) | \
+ (THR << META_CR_PERF_COUNT_THR_SHIFT))
+
+#define META_CR_TXUXXRXDT_OFFSET (META_CR_CTRLREG_BASE(0) + 0x0000FFF0)
+#define META_CR_TXUXXRXRQ_OFFSET (META_CR_CTRLREG_BASE(0) + 0x0000FFF8)
+
+#define META_CR_TXUXXRXRQ_DREADY_BIT (0x80000000) /* Poll for done */
+#define META_CR_TXUXXRXRQ_RDnWR_BIT (0x00010000) /* Set for read */
+#define META_CR_TXUXXRXRQ_TX_S (12)
+#define META_CR_TXUXXRXRQ_RX_S (4)
+#define META_CR_TXUXXRXRQ_UXX_S (0)
+
+#define META_CR_TXUIN_ID (0x0) /* Internal ctrl regs */
+#define META_CR_TXUD0_ID (0x1) /* Data unit regs */
+#define META_CR_TXUD1_ID (0x2) /* Data unit regs */
+#define META_CR_TXUA0_ID (0x3) /* Address unit regs */
+#define META_CR_TXUA1_ID (0x4) /* Address unit regs */
+#define META_CR_TXUPC_ID (0x5) /* PC registers */
+
+/* Macros to calculate register access values */
+#define META_CR_CORE_REG(Thr, RegNum, Unit) (((Thr) << META_CR_TXUXXRXRQ_TX_S ) | \
+ ((RegNum) << META_CR_TXUXXRXRQ_RX_S ) | \
+ ((Unit) << META_CR_TXUXXRXRQ_UXX_S))
+
+#define META_CR_THR0_PC META_CR_CORE_REG(0, 0, META_CR_TXUPC_ID)
+#define META_CR_THR0_PCX META_CR_CORE_REG(0, 1, META_CR_TXUPC_ID)
+#define META_CR_THR0_SP META_CR_CORE_REG(0, 0, META_CR_TXUA0_ID)
+
+#define META_CR_THR1_PC META_CR_CORE_REG(1, 0, META_CR_TXUPC_ID)
+#define META_CR_THR1_PCX META_CR_CORE_REG(1, 1, META_CR_TXUPC_ID)
+#define META_CR_THR1_SP META_CR_CORE_REG(1, 0, META_CR_TXUA0_ID)
+
+#define SP_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUA0_ID)
+#define PC_ACCESS(Thread) META_CR_CORE_REG(Thread, 0, META_CR_TXUPC_ID)
+
+#define META_CR_COREREG_ENABLE (0x0000000)
+#define META_CR_COREREG_STATUS (0x0000010)
+#define META_CR_COREREG_DEFR (0x00000A0)
+#define META_CR_COREREG_PRIVEXT (0x00000E8)
+
+#define META_CR_T0ENABLE_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_ENABLE)
+#define META_CR_T0STATUS_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_STATUS)
+#define META_CR_T0DEFR_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_DEFR)
+#define META_CR_T0PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(0) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_T1ENABLE_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_ENABLE)
+#define META_CR_T1STATUS_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_STATUS)
+#define META_CR_T1DEFR_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_DEFR)
+#define META_CR_T1PRIVEXT_OFFSET (META_CR_CTRLREG_BASE(1) + META_CR_COREREG_PRIVEXT)
+
+#define META_CR_TXENABLE_ENABLE_BIT (0x00000001) /* Set if running */
+#define META_CR_TXSTATUS_PRIV (0x00020000)
+#define META_CR_TXPRIVEXT_MINIM (0x00000080)
+
+#define META_MEM_GLOBAL_RANGE_BIT (0x80000000)
+
+
+/************************************************************************
+* META LDR Format
+************************************************************************/
+/* Block header structure */
+typedef struct
+{
+ IMG_UINT32 ui32DevID;
+ IMG_UINT32 ui32SLCode;
+ IMG_UINT32 ui32SLData;
+ IMG_UINT16 ui16PLCtrl;
+ IMG_UINT16 ui16CRC;
+
+} RGX_META_LDR_BLOCK_HDR;
+
+/* High level data stream block structure */
+typedef struct
+{
+ IMG_UINT16 ui16Cmd;
+ IMG_UINT16 ui16Length;
+ IMG_UINT32 ui32Next;
+ IMG_UINT32 aui32CmdData[4];
+
+} RGX_META_LDR_L1_DATA_BLK;
+
+/* High level data stream block structure */
+typedef struct
+{
+ IMG_UINT16 ui16Tag;
+ IMG_UINT16 ui16Length;
+ IMG_UINT32 aui32BlockData[4];
+
+} RGX_META_LDR_L2_DATA_BLK;
+
+/* Config command structure */
+typedef struct
+{
+ IMG_UINT32 ui32Type;
+ IMG_UINT32 aui32BlockData[4];
+
+} RGX_META_LDR_CFG_BLK;
+
+/* Block type definitions */
+#define RGX_META_LDR_COMMENT_TYPE_MASK (0x0010)
+#define RGX_META_LDR_BLK_IS_COMMENT(X) ((X & RGX_META_LDR_COMMENT_TYPE_MASK) != 0)
+
+/* Command definitions
+ Value Name Description
+ 0 LoadMem Load memory with binary data.
+ 1 LoadCore Load a set of core registers.
+ 2 LoadMMReg Load a set of memory mapped registers.
+ 3 StartThreads Set each thread PC and SP, then enable threads.
+ 4 ZeroMem Zeros a memory region.
+ 5 Config Perform a configuration command. */
+#define RGX_META_LDR_CMD_MASK (0x000F)
+
+#define RGX_META_LDR_CMD_LOADMEM (0x0000)
+#define RGX_META_LDR_CMD_LOADCORE (0x0001)
+#define RGX_META_LDR_CMD_LOADMMREG (0x0002)
+#define RGX_META_LDR_CMD_START_THREADS (0x0003)
+#define RGX_META_LDR_CMD_ZEROMEM (0x0004)
+#define RGX_META_LDR_CMD_CONFIG (0x0005)
+
+/* Config Command definitions
+ Value Name Description
+ 0 Pause Pause for x times 100 instructions
+ 1 Read Read a value from register - No value return needed.
+ Utilises effects of issuing reads to certain registers
+ 2 Write Write to mem location
+ 3 MemSet Set mem to value
+ 4 MemCheck check mem for specific value.*/
+#define RGX_META_LDR_CFG_PAUSE (0x0000)
+#define RGX_META_LDR_CFG_READ (0x0001)
+#define RGX_META_LDR_CFG_WRITE (0x0002)
+#define RGX_META_LDR_CFG_MEMSET (0x0003)
+#define RGX_META_LDR_CFG_MEMCHECK (0x0004)
+
+
+/************************************************************************
+* RGX FW segmented MMU definitions
+************************************************************************/
+/* All threads can access the segment */
+#define RGXFW_SEGMMU_ALLTHRS (0xf << 8)
+/* Writable */
+#define RGXFW_SEGMMU_WRITEABLE (0x1 << 1)
+/* All threads can access and writable */
+#define RGXFW_SEGMMU_ALLTHRS_WRITEABLE (RGXFW_SEGMMU_ALLTHRS | RGXFW_SEGMMU_WRITEABLE)
+
+/* Direct map region 11 used for mapping GPU memory */
+#define RGXFW_SEGMMU_DMAP_GPU_ID (11)
+#define RGXFW_SEGMMU_DMAP_GPU_ADDR_START (0x07800000U)
+
+/* Segment IDs */
+#define RGXFW_SEGMMU_DATA_ID (1)
+#define RGXFW_SEGMMU_BOOTLDR_ID (2)
+#define RGXFW_SEGMMU_TEXT_ID (RGXFW_SEGMMU_BOOTLDR_ID)
+
+#define RGXFW_SEGMMU_META_DM_ID (0x7)
+
+
+/*
+ * SLC caching strategy in S7 is emitted through the segment MMU. All the segments
+ * configured through the macro RGXFW_SEGMMU_OUTADDR_TOP are CACHED in the SLC.
+ * The interface has been kept the same to simplify the code changes.
+ * The bifdm argument is ignored (no longer relevant) in S7.
+ */
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(pers, coheren, mmu_ctx) ( (((IMG_UINT64) ((pers) & 0x3)) << 52) | \
+ (((IMG_UINT64) ((mmu_ctx) & 0xFF)) << 44) | \
+ (((IMG_UINT64) ((coheren) & 0x1)) << 40) )
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x3, 0x0, mmu_ctx)
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(mmu_ctx) RGXFW_SEGMMU_OUTADDR_TOP_S7_ERN_45914(0x0, 0x1, mmu_ctx)
+/* Set FW code/data cached in the SLC as default */
+#define RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(mmu_ctx, bifdm) RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(mmu_ctx | (bifdm&0x0))
+
+/* To configure the Page Catalog and BIF-DM fed into the BIF for Garten accesses through this segment */
+#define RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(pc, bifdm) ( (((IMG_UINT64) ((pc) & 0xF)) << 44) | \
+ (((IMG_UINT64) ((bifdm) & 0xF)) << 40) )
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+#if defined(HW_ERN_45914)
+#define RGXFW_SEGMMU_OUTADDR_TOP RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914
+#define RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914
+#else
+#define RGXFW_SEGMMU_OUTADDR_TOP RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7
+#endif
+#endif
+
+
+/* META segments have 4kB minimum size */
+#define RGXFW_SEGMMU_ALIGN (0x1000)
+
+/* Segmented MMU registers (n = segment id) */
+#define META_CR_MMCU_SEGMENTn_BASE(n) (0x04850000 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_LIMIT(n) (0x04850004 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA0(n) (0x04850008 + (n)*0x10)
+#define META_CR_MMCU_SEGMENTn_OUTA1(n) (0x0485000C + (n)*0x10)
+
+/* The following defines must be recalculated if the Meta MMU segments
+ * used to access Host-FW data are changed
+ * Current combinations are:
+ * - SLC uncached, META cached, FW base address 0x70000000
+ * - SLC uncached, META uncached, FW base address 0xF0000000
+ * - SLC cached, META cached, FW base address 0x10000000
+ * - SLC cached, META uncached, FW base address 0x90000000
+ */
+#define RGXFW_SEGMMU_DATA_BASE_ADDRESS (0x10000000)
+#define RGXFW_SEGMMU_DATA_META_CACHED (0x0)
+#define RGXFW_SEGMMU_DATA_META_UNCACHED (META_MEM_GLOBAL_RANGE_BIT) // 0x80000000
+#define RGXFW_SEGMMU_DATA_META_CACHE_MASK (META_MEM_GLOBAL_RANGE_BIT)
+/* For non-VIVT SLCs the cacheability of the FW data in the SLC is selected
+ * in the PTEs for the FW data, not in the Meta Segment MMU,
+ * which means these defines have no real effect in those cases */
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED (0x0)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED (0x60000000)
+#define RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK (0x60000000)
+
+
+/************************************************************************
+* RGX FW RGX MMU definitions
+************************************************************************/
+#if defined(RGX_FEATURE_SLC_VIVT) && defined(SUPPORT_TRUSTED_DEVICE)
+
+#define META_MMU_CONTEXT_MAPPING (0x1) /* fw data */
+#define META_MMU_CONTEXT_MAPPING_CODE (0x0) /* fw code */
+
+#else
+
+#define META_MMU_CONTEXT_MAPPING (0x0)
+
+#endif
+
+/************************************************************************
+* RGX FW Bootloader defaults
+************************************************************************/
+#define RGXFW_BOOTLDR_META_ADDR (0x40000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_0 (0xC0000000)
+#define RGXFW_BOOTLDR_DEVV_ADDR_1 (0x000000E1)
+#define RGXFW_BOOTLDR_DEVV_ADDR ((((IMG_UINT64) RGXFW_BOOTLDR_DEVV_ADDR_1) << 32) | RGXFW_BOOTLDR_DEVV_ADDR_0)
+#define RGXFW_BOOTLDR_LIMIT (0x1FFFF000)
+
+/* Bootloader configuration offset is in dwords (512 bytes) */
+#define RGXFW_BOOTLDR_CONF_OFFSET (0x80)
+
+
+/************************************************************************
+* RGX META Stack
+************************************************************************/
+#define RGX_META_STACK_SIZE (0x1000)
+
+/************************************************************************
+ RGX META Core memory
+ ====================
+ Sections:
+ * Stack: Thread internal stack
+ * BSS: Internal/private FW memory (rgxfw_ctl.h and static vars)
+ * CCB Buf: DMA buffer to request CCB data
+ * Code: Functions marked with RGXFW_COREMEM_CODE_<xx>
+
+ +---------+ 0
+ | |
+ | Stack |
+ | |
+ +---------+- RGX_META_COREMEM_2ND_STACK_ADDR
+ * *
+ * 2nd Thr * #if RGXFW_META_SUPPORT_2ND_THREAD
+ * Stack *
+ * *
+ +---------+- RGX_META_COREMEM_BSS_ADDR
+ | |
+ | BSS |
+ | |
+ +---------+- RGX_META_COREMEM_CCBBUF_ADDR
+ * *
+ * CCB Buf * #if RGX_FEATURE_META_DMA
+ * *
+ +---------+- RGX_META_COREMEM_DATA_SIZE
+ | |
+ | Code |
+ | |
+ +---------+
+ / /
+ / Unused /
+ / /
+ +---------+ RGX_META_COREMEM_SIZE
+************************************************************************/
+/* code and data both map to the same physical memory */
+#define RGX_META_COREMEM_CODE_ADDR (0x80000000)
+#define RGX_META_COREMEM_DATA_ADDR (0x82000000)
+#define RGX_META_COREMEM_OFFSET_MASK (0x01ffffff)
+
+#define RGX_META_COREMEM_STACK_ADDR (RGX_META_COREMEM_DATA_ADDR)
+
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ #define RGX_META_COREMEM_STACK_SIZE (RGX_META_STACK_SIZE*2)
+ #define RGX_META_COREMEM_BSS_SIZE (0xF40)
+ #define RGX_META_COREMEM_2ND_STACK_ADDR (RGX_META_COREMEM_STACK_ADDR + RGX_META_STACK_SIZE)
+#else
+ #define RGX_META_COREMEM_STACK_SIZE (RGX_META_STACK_SIZE)
+ #define RGX_META_COREMEM_BSS_SIZE (0xE00)
+#endif
+
+#define RGX_META_COREMEM_BSS_ADDR (RGX_META_COREMEM_STACK_ADDR + RGX_META_COREMEM_STACK_SIZE)
+
+#if defined(RGX_FEATURE_META_DMA)
+ #define RGX_META_COREMEM_CCBBUF_ADDR (RGX_META_COREMEM_BSS_ADDR + RGX_META_COREMEM_BSS_SIZE)
+ #define RGX_META_COREMEM_CCBBUF_SIZE (0x3C0)
+ #define RGXFW_DMA_BLOCK_SIZE (32U)
+ #define RGXFW_DMA_BLOCK_ALIGNMENT_MASK (0xFFFFFFE0)
+#else
+ #define RGX_META_COREMEM_CCBBUF_SIZE (0x0)
+ #define RGXFW_DMA_BLOCK_SIZE (0x0)
+ #define RGXFW_DMA_BLOCK_ALIGNMENT_MASK (0x0)
+#endif
+
+#define RGX_META_COREMEM_DATA_SIZE (RGX_META_COREMEM_STACK_SIZE + RGX_META_COREMEM_BSS_SIZE + RGX_META_COREMEM_CCBBUF_SIZE)
+
+#if defined (RGX_META_COREMEM_CODE)
+#define RGX_META_COREMEM_CODE_SIZE (RGX_META_COREMEM_SIZE - RGX_META_COREMEM_DATA_SIZE)
+#endif
+
+/* because data and code share the same memory, base address for code is offset by the data */
+#define RGX_META_COREMEM_CODE_BADDR (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_DATA_SIZE)
+
+#if !defined(__KERNEL__)
+#define RGX_META_IS_COREMEM_CODE(A) ((((IMG_UINT32)A) >= RGX_META_COREMEM_CODE_BADDR) && (((IMG_UINT32)A) < (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_SIZE)))
+#define RGX_META_IS_COREMEM_DATA(A) ((((IMG_UINT32)A) >= RGX_META_COREMEM_DATA_ADDR) && (((IMG_UINT32)A) < (RGX_META_COREMEM_DATA_ADDR + RGX_META_COREMEM_DATA_SIZE)))
+#define RGX_META_IS_COREMEM_FUNC(A) ((((IMG_UINT32)&A) >= RGX_META_COREMEM_CODE_ADDR) && (((IMG_UINT32)&A) < (RGX_META_COREMEM_CODE_ADDR + RGX_META_COREMEM_SIZE*2)))
+#else
+#define RGX_META_IS_COREMEM_CODE(A, B) (((A) >= RGX_META_COREMEM_CODE_ADDR) && ((A) < (RGX_META_COREMEM_CODE_ADDR + (B))))
+#define RGX_META_IS_COREMEM_DATA(A, B) (((A) >= RGX_META_COREMEM_DATA_ADDR) && ((A) < (RGX_META_COREMEM_DATA_ADDR + (B))))
+#endif
+
+/************************************************************************
+* 2nd thread
+************************************************************************/
+#define RGXFW_THR1_PC (0x18930000)
+#define RGXFW_THR1_SP (0x78890000)
+
+/************************************************************************
+* META compatibility
+************************************************************************/
+
+#define META_CR_CORE_ID (0x04831000)
+#define META_CR_CORE_ID_VER_SHIFT (16U)
+#define META_CR_CORE_ID_VER_CLRMSK (0XFF00FFFFU)
+
+#if !defined(__KERNEL__) && defined(RGX_FEATURE_META)
+
+ #if (RGX_FEATURE_META == MTP218)
+ #define RGX_CR_META_CORE_ID_VALUE 0x19
+ #elif (RGX_FEATURE_META == MTP219)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1E
+ #elif (RGX_FEATURE_META == LTP218)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1C
+ #elif (RGX_FEATURE_META == LTP217)
+ #define RGX_CR_META_CORE_ID_VALUE 0x1F
+ #else
+ #error "Unknown META ID"
+ #endif
+#else
+
+ #define RGX_CR_META_MTP218_CORE_ID_VALUE 0x19
+ #define RGX_CR_META_MTP219_CORE_ID_VALUE 0x1E
+ #define RGX_CR_META_LTP218_CORE_ID_VALUE 0x1C
+ #define RGX_CR_META_LTP217_CORE_ID_VALUE 0x1F
+
+#endif
+#define RGXFW_PROCESSOR_META "META"
+
+
+#endif /* __RGX_META_H__ */
+
+/******************************************************************************
+ End of file (rgx_meta.h)
+******************************************************************************/
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_mips.h b/drivers/gpu/drm/img-rogue/1.10/rgx_mips.h
new file mode 100644
index 00000000000000..6f7e2f9a925d2e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_mips.h
@@ -0,0 +1,478 @@
+/*************************************************************************/ /*!
+@File rgx_mips.h
+@Title
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform RGX
+@Description RGX MIPS definitions, user space
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGX_MIPS_H__)
+#define __RGX_MIPS_H__
+
+/*
+ * Utility defines for memory management
+ */
+#define RGXMIPSFW_LOG2_PAGE_SIZE (12)
+#define RGXMIPSFW_LOG2_PAGE_SIZE_64K (16)
+#define RGXMIPSFW_PAGE_SIZE (0x1 << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_PAGE_MASK (RGXMIPSFW_PAGE_SIZE - 1)
+#define RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE (15)
+#define RGXMIPSFW_LOG2_PTE_ENTRY_SIZE (2)
+/* Page mask MIPS register setting for bigger pages */
+#define RGXMIPSFW_PAGE_MASK_16K (0x00007800)
+#define RGXMIPSFW_PAGE_MASK_64K (0x0001F800)
+/* Total number of TLB entries */
+#define RGXMIPSFW_NUMBER_OF_TLB_ENTRIES (16)
+/* "Uncached" caching policy */
+#define RGXMIPSFW_UNCACHED_CACHE_POLICY (0X00000002)
+/* "Write-back write-allocate" caching policy */
+#define RGXMIPSFW_WRITEBACK_CACHE_POLICY (0X00000003)
+/* "Write-through no write-allocate" caching policy */
+#define RGXMIPSFW_WRITETHROUGH_CACHE_POLICY (0X00000001)
+/* Cached policy used by MIPS in case of physical bus on 32 bit */
+#define RGXMIPSFW_CACHED_POLICY (RGXMIPSFW_WRITEBACK_CACHE_POLICY)
+/* Cached policy used by MIPS in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT (RGXMIPSFW_WRITETHROUGH_CACHE_POLICY)
+/* Total number of Remap entries */
+#define RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES (2 * RGXMIPSFW_NUMBER_OF_TLB_ENTRIES)
+
+
+/*
+ * MIPS EntryLo/PTE format
+ */
+
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_SHIFT (31U)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_CLRMSK (0X7FFFFFFF)
+#define RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN (0X80000000)
+
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_SHIFT (30U)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_CLRMSK (0XBFFFFFFF)
+#define RGXMIPSFW_ENTRYLO_EXEC_INHIBIT_EN (0X40000000)
+
+/* Page Frame Number */
+#define RGXMIPSFW_ENTRYLO_PFN_SHIFT (6)
+#define RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT (12)
+/* Mask used for the MIPS Page Table in case of physical bus on 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK (0x03FFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE (20)
+/* Mask used for the MIPS Page Table in case of physical bus on more than 32 bit */
+#define RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT (0x3FFFFFC0)
+#define RGXMIPSFW_ENTRYLO_PFN_SIZE_ABOVE_32BIT (24)
+#define RGXMIPSFW_ADDR_TO_ENTRYLO_PFN_RSHIFT (RGXMIPSFW_ENTRYLO_PFN_ALIGNSHIFT - \
+ RGXMIPSFW_ENTRYLO_PFN_SHIFT)
+
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT (3U)
+#define RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK (0XFFFFFFC7)
+
+#define RGXMIPSFW_ENTRYLO_DIRTY_SHIFT (2U)
+#define RGXMIPSFW_ENTRYLO_DIRTY_CLRMSK (0XFFFFFFFB)
+#define RGXMIPSFW_ENTRYLO_DIRTY_EN (0X00000004)
+
+#define RGXMIPSFW_ENTRYLO_VALID_SHIFT (1U)
+#define RGXMIPSFW_ENTRYLO_VALID_CLRMSK (0XFFFFFFFD)
+#define RGXMIPSFW_ENTRYLO_VALID_EN (0X00000002)
+
+#define RGXMIPSFW_ENTRYLO_GLOBAL_SHIFT (0U)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_CLRMSK (0XFFFFFFFE)
+#define RGXMIPSFW_ENTRYLO_GLOBAL_EN (0X00000001)
+
+#define RGXMIPSFW_ENTRYLO_DVG (RGXMIPSFW_ENTRYLO_DIRTY_EN | \
+ RGXMIPSFW_ENTRYLO_VALID_EN | \
+ RGXMIPSFW_ENTRYLO_GLOBAL_EN)
+#define RGXMIPSFW_ENTRYLO_UNCACHED (RGXMIPSFW_UNCACHED_CACHE_POLICY << \
+ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT)
+#define RGXMIPSFW_ENTRYLO_DVG_UNCACHED (RGXMIPSFW_ENTRYLO_DVG | RGXMIPSFW_ENTRYLO_UNCACHED)
+
+
+/* Remap Range Config Addr Out */
+/* These defines refer to the upper half of the Remap Range Config register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_MASK (0x0FFFFFF0)
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT (4) /* wrt upper half of the register */
+#define RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT (12)
+#define RGXMIPSFW_ADDR_TO_RR_ADDR_OUT_RSHIFT (RGXMIPSFW_REMAP_RANGE_ADDR_OUT_ALIGNSHIFT - \
+ RGXMIPSFW_REMAP_RANGE_ADDR_OUT_SHIFT)
+
+
+/*
+ * Firmware physical layout
+ */
+#define RGXMIPSFW_CODE_BASE_PAGE (0x0)
+#define RGXMIPSFW_CODE_OFFSET (RGXMIPSFW_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_CODE) || defined(SUPPORT_MIPS_64K_PAGE_KERNEL)
+/* Clean way of getting a 256K allocation (62 + 1 + 1 pages) without using too many ifdefs */
+/* This will need to be changed if the non-contiguous builds reach this amount of pages */
+#define RGXMIPSFW_CODE_NUMPAGES (62)
+#else
+#define RGXMIPSFW_CODE_NUMPAGES (44)
+#endif
+#define RGXMIPSFW_CODE_SIZE (RGXMIPSFW_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE (RGXMIPSFW_CODE_BASE_PAGE + RGXMIPSFW_CODE_NUMPAGES)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES (1)
+#define RGXMIPSFW_EXCEPTIONSVECTORS_SIZE (RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE (RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE + RGXMIPSFW_EXCEPTIONSVECTORS_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_CODE_OFFSET (RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES (1)
+#define RGXMIPSFW_BOOT_NMI_CODE_SIZE (RGXMIPSFW_BOOT_NMI_CODE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+
+#define RGXMIPSFW_DATA_BASE_PAGE (0x0)
+#define RGXMIPSFW_DATA_OFFSET (RGXMIPSFW_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#if defined(SUPPORT_MIPS_64K_PAGE_KERNEL)
+/* Clean way of getting a 64K allocation (14 + 1 + 1 pages) without using too many ifdefs */
+#define RGXMIPSFW_DATA_NUMPAGES (14)
+#else
+#define RGXMIPSFW_DATA_NUMPAGES (7)
+#endif
+#define RGXMIPSFW_DATA_SIZE (RGXMIPSFW_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE (RGXMIPSFW_DATA_BASE_PAGE + RGXMIPSFW_DATA_NUMPAGES)
+#define RGXMIPSFW_BOOT_NMI_DATA_OFFSET (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES (1)
+#define RGXMIPSFW_BOOT_NMI_DATA_SIZE (RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_STACK_BASE_PAGE (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE + RGXMIPSFW_BOOT_NMI_DATA_NUMPAGES)
+#define RGXMIPSFW_STACK_OFFSET (RGXMIPSFW_STACK_BASE_PAGE << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_STACK_NUMPAGES (1)
+#define RGXMIPSFW_STACK_SIZE (RGXMIPSFW_STACK_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+
+/*
+ * Pages to trampoline problematic physical addresses:
+ * - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN : 0x1FC0_0000
+ * - RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN : 0x1FC0_1000
+ * - RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN : 0x1FC0_2000
+ * - (benign trampoline) : 0x1FC0_3000
+ * that would otherwise be erroneously remapped by the MIPS wrapper
+ * (see "Firmware virtual layout and remap configuration" section below)
+ */
+
+#define RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES (2)
+#define RGXMIPSFW_TRAMPOLINE_NUMPAGES (1 << RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES)
+#define RGXMIPSFW_TRAMPOLINE_SIZE (RGXMIPSFW_TRAMPOLINE_NUMPAGES << RGXMIPSFW_LOG2_PAGE_SIZE)
+#define RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE (RGXMIPSFW_TRAMPOLINE_LOG2_NUMPAGES + RGXMIPSFW_LOG2_PAGE_SIZE)
+
+#define RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+#define RGXMIPSFW_TRAMPOLINE_OFFSET(a) (a - RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN)
+
+#define RGXMIPSFW_SENSITIVE_ADDR(a) (RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN == (~((1<<RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE)-1) & a))
+
+/*
+ * Firmware virtual layout and remap configuration
+ */
+/*
+ * For each remap region we define:
+ * - the virtual base used by the Firmware to access code/data through that region
+ * - the microAptivAP physical address correspondent to the virtual base address,
+ * used as input address and remapped to the actual physical address
+ * - log2 of size of the region remapped by the MIPS wrapper, i.e. number of bits from
+ * the bottom of the base input address that survive onto the output address
+ * (this defines both the alignment and the maximum size of the remapped region)
+ * - one or more code/data segments within the remapped region
+ */
+
+/* Boot remap setup */
+#define RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE (0xBFC00000)
+#define RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN (0x1FC00000)
+#define RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE (RGXMIPSFW_BOOT_REMAP_VIRTUAL_BASE)
+
+/* Data remap setup */
+#define RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE (0xBFC01000)
+#define RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN (0x1FC01000)
+#define RGXMIPSFW_DATA_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE (RGXMIPSFW_DATA_REMAP_VIRTUAL_BASE)
+
+/* Code remap setup */
+#define RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE (0x9FC02000)
+#define RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN (0x1FC02000)
+#define RGXMIPSFW_CODE_REMAP_LOG2_SEGMENT_SIZE (12)
+#define RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE (RGXMIPSFW_CODE_REMAP_VIRTUAL_BASE)
+
+/* Fixed TLB setup */
+#define RGXMIPSFW_PT_VIRTUAL_BASE (0xCF000000)
+#define RGXMIPSFW_REGISTERS_VIRTUAL_BASE (0xCF400000)
+#define RGXMIPSFW_STACK_VIRTUAL_BASE (0xCF600000)
+
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_CODE)
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB (5)
+#else
+#define RGXMIPSFW_NUMBER_OF_RESERVED_TLB (3)
+#endif
+
+/* Firmware heap setup */
+#define RGXMIPSFW_FIRMWARE_HEAP_BASE (0xC0000000)
+#define RGXMIPSFW_CODE_VIRTUAL_BASE (RGXMIPSFW_FIRMWARE_HEAP_BASE)
+/* The data virtual base takes into account the exception vectors page
+ * and the boot code page mapped in the FW heap together with the FW code
+ * (we can only map Firmware code allocation as a whole) */
+#define RGXMIPSFW_DATA_VIRTUAL_BASE (RGXMIPSFW_CODE_VIRTUAL_BASE + RGXMIPSFW_CODE_SIZE + \
+ RGXMIPSFW_EXCEPTIONSVECTORS_SIZE + RGXMIPSFW_BOOT_NMI_CODE_SIZE)
+
+
+/*
+ * Bootloader configuration data
+ */
+/* Bootloader configuration offset within the bootloader/NMI data page */
+#define RGXMIPSFW_BOOTLDR_CONF_OFFSET (0x0)
+/* Offsets of bootloader configuration parameters in 64-bit words */
+#define RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET (0x0)
+#define RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET (0x1)
+#define RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET (0x2)
+#define RGXMIPSFW_RESERVED_FUTURE_OFFSET (0x3)
+#define RGXMIPSFW_FWINIT_VIRTADDR_OFFSET (0x4)
+
+/*
+ * MIPS Fence offset in the bootloader/NMI data page
+ */
+#define RGXMIPSFW_FENCE_OFFSET (0x80)
+
+/*
+ * NMI shared data
+ */
+/* Base address of the shared data within the bootloader/NMI data page */
+#define RGXMIPSFW_NMI_SHARED_DATA_BASE (0x100)
+/* Size used by Debug dump data */
+#define RGXMIPSFW_NMI_SHARED_SIZE (0x2B0)
+/* Offsets in the NMI shared area in 32-bit words */
+#define RGXMIPSFW_NMI_SYNC_FLAG_OFFSET (0x0)
+#define RGXMIPSFW_NMI_STATE_OFFSET (0x1)
+#define RGXMIPSFW_NMI_ERROR_STATE_SET (0x1)
+
+/*
+ * MIPS fault data
+ */
+/* Base address of the fault data within the bootloader/NMI data page */
+#define RGXMIPSFW_FAULT_DATA_BASE (0x404)
+
+/* The things that follow are excluded when compiling assembly sources*/
+#if !defined (RGXMIPSFW_ASSEMBLY_CODE)
+#include "img_types.h"
+#include "km/rgxdefs_km.h"
+
+#define RGXMIPSFW_GET_OFFSET_IN_DWORDS(offset) (offset / sizeof(IMG_UINT32))
+#define RGXMIPSFW_GET_OFFSET_IN_QWORDS(offset) (offset / sizeof(IMG_UINT64))
+
+/* Used for compatibility checks */
+#define RGXMIPSFW_ARCHTYPE_VER_CLRMSK (0xFFFFE3FFU)
+#define RGXMIPSFW_ARCHTYPE_VER_SHIFT (10U)
+#define RGXMIPSFW_CORE_ID_VALUE (0x001U)
+#define RGXFW_PROCESSOR_MIPS "MIPS"
+
+/* microAptivAP cache line size */
+#define RGXMIPSFW_MICROAPTIVEAP_CACHELINE_SIZE (16U)
+
+/* The SOCIF transactions are identified with the top 16 bits of the physical address emitted by the MIPS */
+#define RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN (16U)
+
+/* Values to put in the MIPS selectors for performance counters*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_ACCESSES_C0 (9U) /* Icache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_ICACHE_MISSES_C1 (9U) /* Icache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_ACCESSES_C0 (10U) /* Dcache accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_DCACHE_MISSES_C1 (11U) /* Dcache misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_ITLB_INSTR_ACCESSES_C0 (5U) /* ITLB instruction accesses in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_INSTR_MISSES_C1 (7U) /* JTLB instruction accesses misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_INSTR_COMPLETED_C0 (1U) /* Instructions completed in COUNTER0 */
+#define RGXMIPSFW_PERF_COUNT_CTRL_JTLB_DATA_MISSES_C1 (8U) /* JTLB data misses in COUNTER1 */
+
+#define RGXMIPSFW_PERF_COUNT_CTRL_EVENT_SHIFT (5U) /* Shift for the Event field in the MIPS perf ctrl registers */
+/* Additional flags for performance counters. See MIPS manual for further reference*/
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_USER_MODE (8U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_KERNEL_MODE (2U)
+#define RGXMIPSFW_PERF_COUNT_CTRL_COUNT_EXL (1U)
+
+
+#define RGXMIPSFW_C0_NBHWIRQ 8
+
+/* Macros to decode C0_Cause register */
+#define RGXMIPSFW_C0_CAUSE_EXCCODE(CAUSE) (((CAUSE) & 0x7c) >> 2)
+#define RGXMIPSFW_C0_CAUSE_EXCCODE_FWERROR 9
+/* Use only when Coprocessor Unusable exception */
+#define RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(CAUSE) (((CAUSE) >> 28) & 0x3)
+#define RGXMIPSFW_C0_CAUSE_PENDING_HWIRQ(CAUSE) (((CAUSE) & 0x3fc00) >> 10)
+#define RGXMIPSFW_C0_CAUSE_FDCIPENDING (1 << 21)
+#define RGXMIPSFW_C0_CAUSE_IV (1 << 23)
+#define RGXMIPSFW_C0_CAUSE_IC (1 << 25)
+#define RGXMIPSFW_C0_CAUSE_PCIPENDING (1 << 26)
+#define RGXMIPSFW_C0_CAUSE_TIPENDING (1 << 30)
+#define RGXMIPSFW_C0_CAUSE_BRANCH_DELAY (1 << 31)
+
+/* Macros to decode C0_Debug register */
+#define RGXMIPSFW_C0_DEBUG_EXCCODE(DEBUG) (((DEBUG) >> 10) & 0x1f)
+#define RGXMIPSFW_C0_DEBUG_DSS (1 << 0)
+#define RGXMIPSFW_C0_DEBUG_DBP (1 << 1)
+#define RGXMIPSFW_C0_DEBUG_DDBL (1 << 2)
+#define RGXMIPSFW_C0_DEBUG_DDBS (1 << 3)
+#define RGXMIPSFW_C0_DEBUG_DIB (1 << 4)
+#define RGXMIPSFW_C0_DEBUG_DINT (1 << 5)
+#define RGXMIPSFW_C0_DEBUG_DIBIMPR (1 << 6)
+#define RGXMIPSFW_C0_DEBUG_DDBLIMPR (1 << 18)
+#define RGXMIPSFW_C0_DEBUG_DDBSIMPR (1 << 19)
+#define RGXMIPSFW_C0_DEBUG_IEXI (1 << 20)
+#define RGXMIPSFW_C0_DEBUG_DBUSEP (1 << 21)
+#define RGXMIPSFW_C0_DEBUG_CACHEEP (1 << 22)
+#define RGXMIPSFW_C0_DEBUG_MCHECKP (1 << 23)
+#define RGXMIPSFW_C0_DEBUG_IBUSEP (1 << 24)
+#define RGXMIPSFW_C0_DEBUG_DM (1 << 30)
+#define RGXMIPSFW_C0_DEBUG_DBD (1 << 31)
+
+/* ELF format defines */
+#define ELF_PT_LOAD (0x1U) /* Program header identifier as Load */
+#define ELF_SHT_SYMTAB (0x2U) /* Section identifier as Symbol Table */
+#define ELF_SHT_STRTAB (0x3U) /* Section identifier as String Table */
+#define MAX_STRTAB_NUM (0x8U) /* Maximum number of string table in the firmware ELF file */
+
+
+/* Redefined structs of ELF format */
+typedef struct
+{
+ IMG_UINT8 ui32Eident[16];
+ IMG_UINT16 ui32Etype;
+ IMG_UINT16 ui32Emachine;
+ IMG_UINT32 ui32Eversion;
+ IMG_UINT32 ui32Eentry;
+ IMG_UINT32 ui32Ephoff;
+ IMG_UINT32 ui32Eshoff;
+ IMG_UINT32 ui32Eflags;
+ IMG_UINT16 ui32Eehsize;
+ IMG_UINT16 ui32Ephentsize;
+ IMG_UINT16 ui32Ephnum;
+ IMG_UINT16 ui32Eshentsize;
+ IMG_UINT16 ui32Eshnum;
+ IMG_UINT16 ui32Eshtrndx;
+} RGX_MIPS_ELF_HDR;
+
+
+typedef struct
+{
+ IMG_UINT32 ui32Stname;
+ IMG_UINT32 ui32Stvalue;
+ IMG_UINT32 ui32Stsize;
+ IMG_UINT8 ui32Stinfo;
+ IMG_UINT8 ui32Stother;
+ IMG_UINT16 ui32Stshndx;
+} RGX_MIPS_ELF_SYM;
+
+
+typedef struct
+{
+ IMG_UINT32 ui32Shname;
+ IMG_UINT32 ui32Shtype;
+ IMG_UINT32 ui32Shflags;
+ IMG_UINT32 ui32Shaddr;
+ IMG_UINT32 ui32Shoffset;
+ IMG_UINT32 ui32Shsize;
+ IMG_UINT32 ui32Shlink;
+ IMG_UINT32 ui32Shinfo;
+ IMG_UINT32 ui32Shaddralign;
+ IMG_UINT32 ui32Shentsize;
+} RGX_MIPS_ELF_SHDR;
+
+typedef struct
+{
+ IMG_UINT32 ui32Ptype;
+ IMG_UINT32 ui32Poffset;
+ IMG_UINT32 ui32Pvaddr;
+ IMG_UINT32 ui32Ppaddr;
+ IMG_UINT32 ui32Pfilesz;
+ IMG_UINT32 ui32Pmemsz;
+ IMG_UINT32 ui32Pflags;
+ IMG_UINT32 ui32Palign;
+ } RGX_MIPS_ELF_PROGRAM_HDR;
+
+#define RGXMIPSFW_TLB_GET_MASK(PAGE_MASK) (((PAGE_MASK) >> 13) & 0XFFFFU)
+#define RGXMIPSFW_TLB_GET_PAGE_SIZE(PAGE_MASK) ((((PAGE_MASK) | 0x1FFF) + 1) >> 11)
+#define RGXMIPSFW_TLB_GET_VPN2(ENTRY_HI) ((ENTRY_HI) >> 13)
+#define RGXMIPSFW_TLB_GET_COHERENCY(ENTRY_LO) (((ENTRY_LO) >> 3) & 0x7U)
+#define RGXMIPSFW_TLB_GET_PFN(ENTRY_LO) (((ENTRY_LO) >> 6) & 0XFFFFFU)
+#define RGXMIPSFW_TLB_GET_PA(ENTRY_LO) (((ENTRY_LO) & 0x03FFFFC0) << 6)
+#define RGXMIPSFW_TLB_GET_INHIBIT(ENTRY_LO) (((ENTRY_LO) >> 30) & 0x3U)
+#define RGXMIPSFW_TLB_GET_DGV(ENTRY_LO) ((ENTRY_LO) & 0x7U)
+#define RGXMIPSFW_TLB_GLOBAL (1U)
+#define RGXMIPSFW_TLB_VALID (1U << 1)
+#define RGXMIPSFW_TLB_DIRTY (1U << 2)
+#define RGXMIPSFW_TLB_XI (1U << 30)
+#define RGXMIPSFW_TLB_RI (1U << 31)
+
+#define RGXMIPSFW_REMAP_GET_REGION_SIZE(REGION_SIZE_ENCODING) (1 << ((REGION_SIZE_ENCODING + 1) << 1))
+
+typedef struct {
+ IMG_UINT32 ui32TLBPageMask;
+ IMG_UINT32 ui32TLBHi;
+ IMG_UINT32 ui32TLBLo0;
+ IMG_UINT32 ui32TLBLo1;
+} RGX_MIPS_TLB_ENTRY;
+
+typedef struct {
+ IMG_UINT32 ui32RemapAddrIn; /* always 4k aligned */
+ IMG_UINT32 ui32RemapAddrOut; /* always 4k aligned */
+ IMG_UINT32 ui32RemapRegionSize;
+} RGX_MIPS_REMAP_ENTRY;
+
+typedef struct {
+ IMG_UINT32 ui32ErrorState; /* This must come first in the structure */
+ IMG_UINT32 ui32ErrorEPC;
+ IMG_UINT32 ui32StatusRegister;
+ IMG_UINT32 ui32CauseRegister;
+ IMG_UINT32 ui32BadRegister;
+ IMG_UINT32 ui32EPC;
+ IMG_UINT32 ui32SP;
+ IMG_UINT32 ui32Debug;
+ IMG_UINT32 ui32DEPC;
+ IMG_UINT32 ui32BadInstr;
+ IMG_UINT32 ui32UnmappedAddress;
+ RGX_MIPS_TLB_ENTRY asTLB[RGXMIPSFW_NUMBER_OF_TLB_ENTRIES];
+ RGX_MIPS_REMAP_ENTRY asRemap[RGXMIPSFW_NUMBER_OF_REMAP_ENTRIES];
+} RGX_MIPS_STATE;
+
+typedef struct {
+ IMG_UINT32 ui32FaultPageInfo;
+ IMG_UINT32 ui32BadVAddr;
+ IMG_UINT32 ui32EntryLo0;
+ IMG_UINT32 ui32EntryLo1;
+} RGX_MIPS_FAULT_DATA;
+
+#endif /* RGXMIPSFW_ASSEMBLY_CODE */
+
+
+#endif /*__RGX_MIPS_H__*/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_options.h b/drivers/gpu/drm/img-rogue/1.10/rgx_options.h
new file mode 100644
index 00000000000000..8ed11958dda3b2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_options.h
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX build options
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* Each build option listed here is packed into a dword which
+ * provides up to log2(RGX_BUILD_OPTIONS_MASK_KM + 1) flags for KM
+ * and (32 - log2(RGX_BUILD_OPTIONS_MASK_KM + 1)) flags for UM.
+ * The corresponding bit is set if the build option
+ * was enabled at compile time.
+ *
+ * In order to extract the enabled build flags the INTERNAL_TEST
+ * switch should be enabled in a client program which includes this
+ * header. Then the client can test specific build flags by reading
+ * the bit value at ##OPTIONNAME##_SET_OFFSET in RGX_BUILD_OPTIONS_KM
+ * RGX_BUILD_OPTIONS.
+ *
+ * IMPORTANT: add new options to unused bits or define a new dword
+ * (e.g. RGX_BUILD_OPTIONS_KM2 or RGX_BUILD_OPTIONS2) so that the bitfield
+ * remains backwards
+ * compatible.
+ */
+
+#define RGX_BUILD_OPTIONS_MASK_KM 0x0000FFFFUL
+
+#if defined(NO_HARDWARE) || defined (INTERNAL_TEST)
+ #define NO_HARDWARE_SET_OFFSET OPTIONS_BIT0
+ #define OPTIONS_BIT0 (0x1ul << 0)
+ #if OPTIONS_BIT0 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT0 0x0
+#endif /* NO_HARDWARE */
+
+
+#if defined(PDUMP) || defined (INTERNAL_TEST)
+ #define PDUMP_SET_OFFSET OPTIONS_BIT1
+ #define OPTIONS_BIT1 (0x1ul << 1)
+ #if OPTIONS_BIT1 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT1 0x0
+#endif /* PDUMP */
+
+
+#if defined (INTERNAL_TEST)
+ #define UNUSED_SET_OFFSET OPTIONS_BIT2
+ #define OPTIONS_BIT2 (0x1ul << 2)
+ #if OPTIONS_BIT2 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT2 0x0
+#endif /* SUPPORT_META_SLAVE_BOOT */
+
+/* No longer used */
+#if defined (INTERNAL_TEST)
+ #define OPTIONS_BIT3 (0x1ul << 3)
+ #if OPTIONS_BIT3 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT3 0x0
+#endif
+
+
+#if defined(SUPPORT_RGX) || defined (INTERNAL_TEST)
+ #define SUPPORT_RGX_SET_OFFSET OPTIONS_BIT4
+ #define OPTIONS_BIT4 (0x1ul << 4)
+ #if OPTIONS_BIT4 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT4 0x0
+#endif /* SUPPORT_RGX */
+
+
+#if defined(SUPPORT_SECURE_EXPORT) || defined (INTERNAL_TEST)
+ #define SUPPORT_SECURE_EXPORT_SET_OFFSET OPTIONS_BIT5
+ #define OPTIONS_BIT5 (0x1ul << 5)
+ #if OPTIONS_BIT5 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT5 0x0
+#endif /* SUPPORT_SECURE_EXPORT */
+
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined (INTERNAL_TEST)
+ #define SUPPORT_INSECURE_EXPORT_SET_OFFSET OPTIONS_BIT6
+ #define OPTIONS_BIT6 (0x1ul << 6)
+ #if OPTIONS_BIT6 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT6 0x0
+#endif /* SUPPORT_INSECURE_EXPORT */
+
+
+#if defined(SUPPORT_VFP) || defined (INTERNAL_TEST)
+ #define SUPPORT_VFP_SET_OFFSET OPTIONS_BIT7
+ #define OPTIONS_BIT7 (0x1ul << 7)
+ #if OPTIONS_BIT7 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT7 0x0
+#endif /* SUPPORT_VFP */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION) || defined (INTERNAL_TEST)
+ #define SUPPORT_WORKLOAD_ESTIMATION_OFFSET OPTIONS_BIT8
+ #define OPTIONS_BIT8 (0x1ul << 8)
+ #if OPTIONS_BIT8 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT8 0x0
+#endif /* SUPPORT_WORKLOAD_ESTIMAITON */
+#define OPTIONS_WORKLOAD_ESTIMATION_MASK (0x1ul << 8)
+
+#if defined(SUPPORT_PDVFS) || defined (INTERNAL_TEST)
+ #define SUPPORT_PDVFS_OFFSET OPTIONS_BIT9
+ #define OPTIONS_BIT9 (0x1ul << 9)
+ #if OPTIONS_BIT9 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT9 0x0
+#endif /* SUPPORT_PDVFS */
+#define OPTIONS_PDVFS_MASK (0x1ul << 9)
+
+#if defined(DEBUG) || defined (INTERNAL_TEST)
+ #define DEBUG_SET_OFFSET OPTIONS_BIT10
+ #define OPTIONS_BIT10 (0x1ul << 10)
+ #if OPTIONS_BIT10 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT10 0x0
+#endif /* DEBUG */
+/* The bit position of this should be the
+ * same as DEBUG_SET_OFFSET option when
+ * defined */
+#define OPTIONS_DEBUG_MASK (0x1ul << 10)
+
+#if defined(SUPPORT_BUFFER_SYNC) || defined (INTERNAL_TEST)
+ #define SUPPORT_BUFFER_SYNC_SET_OFFSET OPTIONS_BIT11
+ #define OPTIONS_BIT11 (0x1ul << 11)
+ #if OPTIONS_BIT11 > RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+#else
+ #define OPTIONS_BIT11 0x0
+#endif /* SUPPORT_BUFFER_SYNC */
+
+#define RGX_BUILD_OPTIONS_KM \
+ (OPTIONS_BIT0 |\
+ OPTIONS_BIT1 |\
+ OPTIONS_BIT2 |\
+ OPTIONS_BIT3 |\
+ OPTIONS_BIT4 |\
+ OPTIONS_BIT6 |\
+ OPTIONS_BIT7 |\
+ OPTIONS_BIT8 |\
+ OPTIONS_BIT9 |\
+ OPTIONS_BIT10 |\
+ OPTIONS_BIT11)
+
+
+#if defined(SUPPORT_PERCONTEXT_FREELIST) || defined (INTERNAL_TEST)
+ #define OPTIONS_BIT31 (0x1ul << 31)
+ #if OPTIONS_BIT31 <= RGX_BUILD_OPTIONS_MASK_KM
+ #error "Bit exceeds reserved range"
+ #endif
+ #define SUPPORT_PERCONTEXT_FREELIST_SET_OFFSET OPTIONS_BIT31
+#else
+ #define OPTIONS_BIT31 0x0
+#endif /* SUPPORT_PERCONTEXT_FREELIST */
+
+#define _KM_RGX_BUILD_OPTIONS_ RGX_BUILD_OPTIONS
+
+#define RGX_BUILD_OPTIONS (RGX_BUILD_OPTIONS_KM | OPTIONS_BIT31)
+
+#define OPTIONS_STRICT (RGX_BUILD_OPTIONS & \
+ ~(OPTIONS_DEBUG_MASK | \
+ OPTIONS_WORKLOAD_ESTIMATION_MASK | \
+ OPTIONS_PDVFS_MASK))
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_pdump_panics.h b/drivers/gpu/drm/img-rogue/1.10/rgx_pdump_panics.h
new file mode 100644
index 00000000000000..828e886433bf5f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_pdump_panics.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX PDump panic definitions header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX PDump panic definitions header
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (RGX_PDUMP_PANICS_H_)
+#define RGX_PDUMP_PANICS_H_
+
+
+/*! Unique device specific IMG_UINT16 panic IDs to identify the cause of a
+ * RGX PDump panic in a PDump script. */
+typedef enum
+{
+ RGX_PDUMP_PANIC_UNDEFINED = 0,
+
+ /* These panics occur when test parameters and driver configuration
+ * enable features that require the firmware and host driver to
+ * communicate. Such features are not supported with off-line playback.
+ */
+ RGX_PDUMP_PANIC_ZSBUFFER_BACKING = 101, /*!< Requests ZSBuffer to be backed with physical pages */
+ RGX_PDUMP_PANIC_ZSBUFFER_UNBACKING = 102, /*!< Requests ZSBuffer to be unbacked */
+ RGX_PDUMP_PANIC_FREELIST_GROW = 103, /*!< Requests an on-demand freelist grow/shrink */
+ RGX_PDUMP_PANIC_FREELISTS_RECONSTRUCTION = 104, /*!< Requests freelists reconstruction */
+ RGX_PDUMP_PANIC_SPARSEMEM_SWAP = 105, /*!< Requests sparse remap memory swap feature */
+} RGX_PDUMP_PANIC;
+
+
+#endif /* RGX_PDUMP_PANICS_H_ */
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgx_tq_shared.h b/drivers/gpu/drm/img-rogue/1.10/rgx_tq_shared.h
new file mode 100644
index 00000000000000..bd3460cca1a895
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgx_tq_shared.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX transfer queue shared
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Shared definitions between client and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGX_TQ_SHARED_H__
+#define __RGX_TQ_SHARED_H__
+
+#define TQ_MAX_PREPARES_PER_SUBMIT 16
+
+#define TQ_PREP_FLAGS_COMMAND_3D 0x0
+#define TQ_PREP_FLAGS_COMMAND_2D 0x1
+#define TQ_PREP_FLAGS_COMMAND_MASK (0xf)
+#define TQ_PREP_FLAGS_COMMAND_SHIFT 0
+#define TQ_PREP_FLAGS_PDUMPCONTINUOUS (1 << 4)
+#define TQ_PREP_FLAGS_START (1 << 5)
+#define TQ_PREP_FLAGS_END (1 << 6)
+
+#define TQ_PREP_FLAGS_COMMAND_SET(m) \
+ ((TQ_PREP_FLAGS_COMMAND_##m << TQ_PREP_FLAGS_COMMAND_SHIFT) & TQ_PREP_FLAGS_COMMAND_MASK)
+
+#define TQ_PREP_FLAGS_COMMAND_IS(m,n) \
+ (((m & TQ_PREP_FLAGS_COMMAND_MASK) >> TQ_PREP_FLAGS_COMMAND_SHIFT) == TQ_PREP_FLAGS_COMMAND_##n)
+
+#endif /* __RGX_TQ_SHARED_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxapi_km.h b/drivers/gpu/drm/img-rogue/1.10/rgxapi_km.h
new file mode 100644
index 00000000000000..b1206b0a09481d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxapi_km.h
@@ -0,0 +1,320 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX API Header kernel mode
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exported RGX API details
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXAPI_KM_H__
+#define __RGXAPI_KM_H__
+
+#if defined(SUPPORT_SHARED_SLC)
+/*!
+******************************************************************************
+
+ @Function RGXInitSLC
+
+ @Description Init the SLC after a power up. It is required to call this
+ function if using SUPPORT_SHARED_SLC. Otherwise, it shouldn't
+ be called.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle);
+#endif
+
+#include "rgx_hwperf.h"
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Control API(s)
+ *****************************************************************************/
+
+typedef struct _RGX_HWPERF_DEVICE_
+{
+ IMG_CHAR pszName[20]; /* Helps identify this device uniquely */
+ IMG_HANDLE hDevData; /* Handle for the server */
+
+ struct _RGX_HWPERF_DEVICE_ *psNext;
+} RGX_HWPERF_DEVICE;
+
+typedef struct
+{
+ RGX_HWPERF_DEVICE *psHWPerfDevList;
+} RGX_HWPERF_CONNECTION;
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfLazyConnect
+@Description Obtain a HWPerf connection object to the RGX device(s). The
+ connections to devices are not actually opened until HWPerfOpen()
+ is called.
+@Output ppsHWPerfConnection Address of a HWPerf connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfOpen
+@Description Opens connection(s) to the RGX device(s). Valid handle to the
+ connection object has to be provided which means the this
+ function needs to be preceded by the call to
+ RGXHWPerfLazyConnect() function.
+@Input psHWPerfConnection HWPerf connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION* psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConnect
+@Description Obtain a connection object to the RGX HWPerf module. Allocated
+ connection object(s) reference opened connection(s).
+ Calling this function is an equivalent of calling
+ RGXHWPerfLazyConnect and RGXHWPerfOpen.
+ This connect should be used when the caller will be retrieving
+ event data.
+@Output ppsHWPerfConnection Address of HWPerf connection object
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfFreeConnection
+@Description Frees the HWPerf connection object
+@Input ppsHWPerfConnection Pointer to connection object as returned
+ from RGXHWPerfLazyConnect()
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfClose
+@Description Closes all the opened connection(s) to RGX device(s)
+@Input psHWPerfConnection Pointer to HWPerf connection object as
+ returned from RGXHWPerfConnect() or RGXHWPerfOpen()
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfDisconnect
+@Description Disconnect from the RGX device
+@Input ppsHWPerfConnection Pointer to HWPerf connection object as
+ returned from RGXHWPerfConnect() or
+ RGXHWPerfOpen(). Calling this function is
+ an equivalent of calling RGXHWPerfClose
+ and RGXHWPerfFreeConnection.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfControl
+@Description Enable or disable the generation of RGX HWPerf event packets.
+ See RGXCtrlHWPerf().
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input eStreamId ID of the HWPerf stream
+@Input bToggle Switch to toggle or apply mask.
+@Input ui64Mask Mask of events to control.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfControl(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfGetFilter
+@Description Reads HWPerf stream filter where stream is identified by
+ the given stream ID.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output IMG_UINT64 HWPerf filter value
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfGetFilter(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT64 *ui64Filter
+);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConfigureAndEnableCounters
+@Description Enable and configure the performance counter block for
+ one or more device layout modules.
+ See RGXConfigureAndEnableHWPerfCounters().
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input ui32NumBlocks Number of elements in the array
+@Input asBlockConfigs Address of the array of configuration blocks
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs);
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConfigureAndEnableCustomCounters
+@Description Enable and configure custom performance counters
+@Input psHWPerfConnection Pointer to connection object
+@Input ui16CustomBlockID ID of the custom block to configure
+@Input ui16NumCustomCounters Number of custom counters
+@Input pui32CustomCounterIDs pointer to array containing
+ custom counter IDs
+@Return PVRSRV_ERROR for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfConfigureAndEnableCustomCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 *pui32CustomCounterIDs);
+
+/**************************************************************************/ /*!
+@Function RGXDisableHWPerfCounters
+@Description Disable the performance counter block for one or more
+ device layout modules. See RGXDisableHWPerfCounters().
+@Input psHWPerfConnection Pointer to HWPerf connection object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of bytes with values taken from
+ the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfDisableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/**************************************************************************/ /*!
+@Function RGXEnableHWPerfCounters
+@Description Enable the performance counter block for one or more
+ device layout modules. See RGXEnableHWPerfCounters().
+@Input hDevData Handle to connection/device object
+@Input ui32NumBlocks Number of elements in the array
+@Input aeBlockIDs An array of bytes with values taken from
+ the RGX_HWPERF_CNTBLK_ID enumeration.
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXHWPerfEnableCounters(
+ IMG_HANDLE hDevData,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs);
+
+/******************************************************************************
+ * RGX HW Performance Profiling Retrieval API(s)
+ *
+ * The client must ensure their use of this acquire/release API for a single
+ * connection/stream must not be shared with multiple execution contexts e.g.
+ * between a kernel thread and an ISR handler. It is the client’s
+ * responsibility to ensure this API is not interrupted by a high priority
+ * thread/ISR
+ *****************************************************************************/
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfAcquireEvents
+@Description When there is data available to read this call returns with OK
+ and the address and length of the data buffer the
+ client can safely read. This buffer may contain one or more
+ event packets.
+ When there is no data to read, this call returns with OK
+ and sets *puiBufLen to 0 on exit.
+ Clients must pair this call with a ReleaseEvents call.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Output ppBuf Address of a pointer to a byte buffer. On exit
+ it contains the address of buffer to read from
+@Output pui32BufLen Pointer to an integer. On exit it is the size
+ of the data to read from the buffer
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_PBYTE* ppBuf,
+ IMG_UINT32* pui32BufLen);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfReleaseEvents
+@Description Called after client has read the event data out of the buffer
+ retrieved from the Acquire Events call to release resources.
+@Input hDevData Handle to connection/device object
+@Input eStreamId ID of the HWPerf stream
+@Return PVRSRV_ERROR: for system error codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId);
+
+
+/**************************************************************************/ /*!
+@Function RGXHWPerfConvertCRTimeStamp
+@Description Converts the timestamp given by FW events to the common OS
+ timestamp. The first three inputs are obtained via
+ a CLK_SYNC event, ui64CRTimeStamp is the CR timestamp
+ from the FW event to be converted.
+@Input ui32ClkSpeed Clock speed given by sync event
+@Input ui64CorrCRTimeStamp CR Timestamp given by sync event
+@Input ui64CorrOSTimeStamp Correlating OS Timestamp given by sync
+ event
+@Input ui64CRTimeStamp CR Timestamp to convert
+@Return IMG_UINT64: Calculated OS Timestamp
+ */ /**************************************************************************/
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+ IMG_UINT32 ui32ClkSpeed,
+ IMG_UINT64 ui64CorrCRTimeStamp,
+ IMG_UINT64 ui64CorrOSTimeStamp,
+ IMG_UINT64 ui64CRTimeStamp);
+
+#endif /* __RGXAPI_KM_H__ */
+
+/******************************************************************************
+ End of file (rgxapi_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.c b/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.c
new file mode 100644
index 00000000000000..dba81c6304d4dc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.c
@@ -0,0 +1,347 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Breakpoint routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Breakpoint routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbreakpoint.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxmem.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGXFWIF_DM eFWDataMaster,
+ IMG_UINT32 ui32BPAddr,
+ IMG_UINT32 ui32HandlerAddr,
+ IMG_UINT32 ui32DataMaster)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+ if (psDevInfo->bBPSet == IMG_TRUE)
+ {
+ eError = PVRSRV_ERROR_BP_ALREADY_SET;
+ goto unlock;
+ }
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32BPAddr = ui32BPAddr;
+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = ui32HandlerAddr;
+ sBPCmd.uCmdData.sBPData.ui32BPDM = ui32DataMaster;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ eFWDataMaster,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ goto unlock;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, eFWDataMaster, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXSetBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ goto unlock;
+ }
+
+ psDevInfo->eBPDM = eFWDataMaster;
+ psDevInfo->bBPSet = IMG_TRUE;
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32BPAddr = 0;
+ sBPCmd.uCmdData.sBPData.ui32HandlerAddr = 0;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_WRITE | RGXFWIF_BPDATA_FLAGS_CTL;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ goto unlock;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXClearBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ goto unlock;
+ }
+
+ psDevInfo->bBPSet = IMG_FALSE;
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+ if (psDevInfo->bBPSet == IMG_FALSE)
+ {
+ eError = PVRSRV_ERROR_BP_NOT_SET;
+ goto unlock;
+ }
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_TRUE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ goto unlock;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXEnableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ goto unlock;
+ }
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+ if (psDevInfo->bBPSet == IMG_FALSE)
+ {
+ eError = PVRSRV_ERROR_BP_NOT_SET;
+ goto unlock;
+ }
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.bEnable = IMG_FALSE;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_CTL;
+
+ RGXSetFirmwareAddress(&sBPCmd.uCmdData.sBPData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0 ,
+ RFW_FWADDR_NOREF_FLAG);
+
+ eError = RGXScheduleCommand(psDevInfo,
+ psDevInfo->eBPDM,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableBreakpointKM: RGXScheduleCommand failed. Error:%u", eError));
+ goto unlock;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDevInfo, psDevInfo->eBPDM, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDisableBreakpointKM: Wait for completion aborted with error (%u)", eError));
+ goto unlock;
+ }
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32TempRegs,
+ IMG_UINT32 ui32SharedRegs)
+{
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sBPCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ sBPCmd.eCmdType = RGXFWIF_KCCB_CMD_BP;
+ sBPCmd.uCmdData.sBPData.ui32Flags = RGXFWIF_BPDATA_FLAGS_REGS;
+ sBPCmd.uCmdData.sBPData.ui32TempRegs = ui32TempRegs;
+ sBPCmd.uCmdData.sBPData.ui32SharedRegs = ui32SharedRegs;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hBPLock);
+#endif
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sBPCmd,
+ sizeof(sBPCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXOverallocateBPRegistersKM: RGXScheduleCommand failed. Error:%u", eError));
+ goto unlock;
+ }
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXOverallocateBPRegistersKM: Wait for completion aborted with error (%u)", eError));
+ goto unlock;
+ }
+
+unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hBPLock);
+#endif
+
+ return eError;
+}
+
+
+/******************************************************************************
+ End of file (rgxbreakpoint.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.h b/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.h
new file mode 100644
index 00000000000000..fc665688700998
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxbreakpoint.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX breakpoint functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX breakpoint functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBREAKPOINT_H__)
+#define __RGXBREAKPOINT_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetBreakpointKM
+
+ @Description
+ Server-side implementation of RGXSetBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input eDataMaster - Data Master to schedule command for
+ @Input hMemCtxPrivData - memory context private data
+ @Input ui32BPAddr - Address of breakpoint
+ @Input ui32HandlerAddr - Address of breakpoint handler
+ @Input ui32BPCtl - Breakpoint controls
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGXFWIF_DM eFWDataMaster,
+ IMG_UINT32 ui32BPAddr,
+ IMG_UINT32 ui32HandlerAddr,
+ IMG_UINT32 ui32DataMaster);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXClearBreakpointKM
+
+ @Description
+ Server-side implementation of RGXClearBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXEnableBreakpointKM
+
+ @Description
+ Server-side implementation of RGXEnableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDisableBreakpointKM
+
+ @Description
+ Server-side implementation of RGXDisableBreakpoint
+
+ @Input psDeviceNode - RGX Device node
+ @Input hMemCtxPrivData - memory context private data
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableBreakpointKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXOverallocateBPRegistersKM
+
+ @Description
+ Server-side implementation of RGXOverallocateBPRegisters
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui32TempRegs - Number of temporary registers to overallocate
+ @Input ui32SharedRegs - Number of shared registers to overallocate
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXOverallocateBPRegistersKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32TempRegs,
+ IMG_UINT32 ui32SharedRegs);
+#endif /* __RGXBREAKPOINT_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.c b/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.c
new file mode 100644
index 00000000000000..c63e6588123013
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.c
@@ -0,0 +1,581 @@
+/*************************************************************************/ /*!
+@File
+@Title BVNC handling specific routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Functions used for BNVC related work
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxbvnc.h"
+#define _RGXBVNC_C_
+#include "rgx_bvnc_table_km.h"
+#undef _RGXBVNC_C_
+#include "oskm_apphint.h"
+#include "pvrsrv.h"
+
+#define MAX_BVNC_LEN (12)
+#define RGXBVNC_BUFFER_SIZE (((PVRSRV_MAX_DEVICES)*(MAX_BVNC_LEN))+1)
+
+/* List of BVNC strings given as module param & count */
+static IMG_PCHAR gazRGXBVNCList[PVRSRV_MAX_DEVICES];
+static IMG_UINT32 gui32RGXLoadTimeDevCount;
+
+/* This function searches the given array for a given search value */
+static IMG_UINT64* _RGXSearchBVNCTable( IMG_UINT64 *pui64Array,
+ IMG_UINT uiEnd,
+ IMG_UINT64 ui64SearchValue,
+ IMG_UINT uiRowCount)
+{
+ IMG_UINT uiStart = 0, index;
+ IMG_UINT64 value, *pui64Ptr = NULL;
+
+ while (uiStart < uiEnd)
+ {
+ index = (uiStart + uiEnd)/2;
+ pui64Ptr = pui64Array + (index * uiRowCount);
+ value = *(pui64Ptr);
+
+ if (value == ui64SearchValue)
+ {
+ return pui64Ptr;
+ }
+
+ if (value > ui64SearchValue)
+ {
+ uiEnd = index;
+ }else
+ {
+ uiStart = index + 1;
+ }
+ }
+ return NULL;
+}
+#define RGX_SEARCH_BVNC_TABLE(t, b) (_RGXSearchBVNCTable((IMG_UINT64*)(t), \
+ sizeof((t))/sizeof((t)[0]), (b), \
+ sizeof((t)[0])/sizeof(IMG_UINT64)) )
+
+
+#if defined(DEBUG)
+
+#define PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, szShortName, Feature) \
+ if ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] != RGX_FEATURE_VALUE_DISABLED ) \
+ { PVR_LOG(("%s %d", szShortName, psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX])); } \
+ else \
+ { PVR_LOG(("%s N/A", szShortName)); }
+
+static void _RGXBvncDumpParsedConfig(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ IMG_UINT64 ui64Mask = 0, ui32IdOrNameIdx = 1;
+
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NC: ", NUM_CLUSTERS);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "CSF: ", CDM_CONTROL_STREAM_FORMAT);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "FBCDCA: ", FBCDC_ARCHITECTURE);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMB: ", META_COREMEM_BANKS);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MCMS: ", META_COREMEM_SIZE);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "MDMACnt: ", META_DMA_CHANNEL_COUNT);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "NIIP: ", NUM_ISP_IPP_PIPES);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "PBW: ", PHYS_BUS_WIDTH);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "STEArch: ", SCALABLE_TE_ARCH);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SVCEA: ", SCALABLE_VCE);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCBanks: ", SLC_BANKS);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCCLS: ", SLC_CACHE_LINE_SIZE_BITS);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "SLCSize: ", SLC_SIZE_IN_BYTES);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "VASB: ", VIRTUAL_ADDRESS_SPACE_BITS);
+ PVR_LOG_DUMP_FEATURE_VALUE(psDevInfo, "META: ", META);
+
+#if defined(FEATURE_NO_VALUES_NAMES_MAX_IDX)
+ /* Dump the features with no values */
+ ui64Mask = psDevInfo->sDevFeatureCfg.ui64Features;
+ while (ui64Mask)
+ {
+ if (ui64Mask & 0x01)
+ {
+ if (ui32IdOrNameIdx <= FEATURE_NO_VALUES_NAMES_MAX_IDX)
+ {
+ PVR_LOG(("%s", gaszFeaturesNoValuesNames[ui32IdOrNameIdx - 1]));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Feature with Mask doesn't exist: 0x%016" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+ }
+ }
+ ui64Mask >>= 1;
+ ui32IdOrNameIdx++;
+ }
+#endif
+
+#if defined(ERNSBRNS_IDS_MAX_IDX)
+ /* Dump the ERN and BRN flags for this core */
+ ui64Mask = psDevInfo->sDevFeatureCfg.ui64ErnsBrns;
+ ui32IdOrNameIdx = 1;
+
+ while (ui64Mask)
+ {
+ if (ui64Mask & 0x1)
+ {
+ if (ui32IdOrNameIdx <= ERNSBRNS_IDS_MAX_IDX)
+ {
+ PVR_LOG(("ERN/BRN : %d", gaui64ErnsBrnsIDs[ui32IdOrNameIdx - 1]));
+ }
+ else
+ {
+ PVR_LOG(("Unknown ErnBrn bit: 0x%0" IMG_UINT64_FMTSPECx, ((IMG_UINT64)1 << (ui32IdOrNameIdx - 1))));
+ }
+ }
+ ui64Mask >>= 1;
+ ui32IdOrNameIdx++;
+ }
+#endif
+
+}
+#endif
+
+static void _RGXBvncParseFeatureValues(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT64 ui64PackedValues)
+{
+ IMG_UINT32 ui32Index;
+
+ /* Read the feature values for the runtime BVNC */
+ for (ui32Index = 0; ui32Index < RGX_FEATURE_WITH_VALUES_MAX_IDX; ui32Index++)
+ {
+ IMG_UINT16 ui16ValueIndex = (ui64PackedValues & aui64FeaturesWithValuesBitMasks[ui32Index]) >> aui16FeaturesWithValuesBitPositions[ui32Index];
+
+ if (ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index])
+ {
+ if (gaFeaturesValues[ui32Index][ui16ValueIndex] == (IMG_UINT16)RGX_FEATURE_VALUE_DISABLED)
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_DISABLED;
+ }
+ else
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = gaFeaturesValues[ui32Index][ui16ValueIndex];
+ }
+ }
+ else
+ {
+ /* This case should never be reached */
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[ui32Index] = RGX_FEATURE_VALUE_INVALID;
+ PVR_DPF((PVR_DBG_ERROR, "%s: Feature with index (%d) decoded wrong value index (%d)", __func__, ui32Index, ui16ValueIndex));
+ PVR_ASSERT(ui16ValueIndex < gaFeaturesValuesMaxIndexes[ui32Index]);
+ }
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_IDX] = RGX_FEATURE_VALUE_DISABLED;
+ }
+
+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount = RGXFWIF_DM_MIN_CNT;
+ psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount = RGXFWIF_DM_MIN_MTS_CNT;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ /* ui64Features must be already initialized */
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ psDevInfo->sDevFeatureCfg.ui32MAXDMCount += RGXFWIF_RAY_TRACING_DM_CNT;
+ psDevInfo->sDevFeatureCfg.ui32MAXDMMTSCount += RGXFWIF_RAY_TRACING_DM_MTS_CNT;
+ }
+#endif
+
+ /* Get the max number of dusts in the core */
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS))
+ {
+ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = MAX(1, (RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) / 2));
+ }
+ else
+ {
+ /* This case should never be reached as all cores have clusters */
+ psDevInfo->sDevFeatureCfg.ui32MAXDustCount = RGX_FEATURE_VALUE_INVALID;
+ PVR_DPF((PVR_DBG_ERROR, "%s: Number of clusters feature value missing!", __func__));
+ PVR_ASSERT(0);
+ }
+
+
+ /* Transform the SLC cacheline size info in bytes */
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES))
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_SLC_SIZE_IN_BYTES_IDX] *= 1024;
+ }
+
+ /* Transform the META coremem size info in bytes */
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+ {
+ psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_META_COREMEM_SIZE_IDX] *= 1024;
+ }
+}
+
+static void _RGXBvncAcquireAppHint(IMG_CHAR *pszBVNCAppHint,
+ IMG_CHAR **apszRGXBVNCList,
+ IMG_UINT32 ui32BVNCListCount,
+ IMG_UINT32 *pui32BVNCCount)
+{
+ IMG_CHAR *pszAppHintDefault = NULL;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32BVNCIndex = 0;
+ IMG_BOOL bRet;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ pszAppHintDefault = PVRSRV_APPHINT_RGXBVNC;
+
+ bRet = (IMG_BOOL)OSGetKMAppHintSTRING(pvAppHintState,
+ RGXBVNC,
+ &pszAppHintDefault,
+ pszBVNCAppHint,
+ RGXBVNC_BUFFER_SIZE);
+
+ OSFreeKMAppHintState(pvAppHintState);
+
+ if (!bRet)
+ {
+ *pui32BVNCCount = 0;
+ return;
+ }
+
+ while (*pszBVNCAppHint != '\0')
+ {
+ if (ui32BVNCIndex >= ui32BVNCListCount)
+ {
+ break;
+ }
+ apszRGXBVNCList[ui32BVNCIndex++] = pszBVNCAppHint;
+ while (1)
+ {
+ if (*pszBVNCAppHint == ',')
+ {
+ pszBVNCAppHint[0] = '\0';
+ pszBVNCAppHint++;
+ break;
+ } else if (*pszBVNCAppHint == '\0')
+ {
+ break;
+ }
+ pszBVNCAppHint++;
+ }
+ }
+ *pui32BVNCCount = ui32BVNCIndex;
+}
+
+/* Function that parses the BVNC List passed as module parameter */
+static PVRSRV_ERROR _RGXBvncParseList(IMG_UINT64 *pB,
+ IMG_UINT64 *pV,
+ IMG_UINT64 *pN,
+ IMG_UINT64 *pC,
+ const IMG_UINT32 ui32RGXDevCount)
+{
+ unsigned int ui32ScanCount = 0;
+ IMG_CHAR *pszBVNCString = NULL;
+
+ if (ui32RGXDevCount == 0) {
+ IMG_CHAR pszBVNCAppHint[RGXBVNC_BUFFER_SIZE];
+ pszBVNCAppHint[0] = '\0';
+ _RGXBvncAcquireAppHint(pszBVNCAppHint, gazRGXBVNCList, PVRSRV_MAX_DEVICES, &gui32RGXLoadTimeDevCount);
+ }
+
+ /* 4 components of a BVNC string is B, V, N & C */
+#define RGX_BVNC_INFO_PARAMS (4)
+
+ /* If only one BVNC parameter is specified, the same is applied for all RGX
+ * devices detected */
+ if (1 == gui32RGXLoadTimeDevCount)
+ {
+ pszBVNCString = gazRGXBVNCList[0];
+ }else
+ {
+
+#if defined(DEBUG)
+ int i =0;
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: No. of BVNC module params : %u", __func__, gui32RGXLoadTimeDevCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC module param list ... ",__func__));
+ for (i=0; i < gui32RGXLoadTimeDevCount; i++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s, ", gazRGXBVNCList[i]));
+ }
+#endif
+
+ if (gui32RGXLoadTimeDevCount == 0)
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+
+ /* Total number of RGX devices detected should always be
+ * less than the gazRGXBVNCList count */
+ if (ui32RGXDevCount < gui32RGXLoadTimeDevCount)
+ {
+ pszBVNCString = gazRGXBVNCList[ui32RGXDevCount];
+ }else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Given module parameters list is shorter than "
+ "number of actual devices", __func__));
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ }
+
+ if (NULL == pszBVNCString)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+
+ /* Parse the given RGX_BVNC string */
+ ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llu.%llu.%llu", pB, pV, pN, pC);
+ if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+ {
+ ui32ScanCount = OSVSScanf(pszBVNCString, "%llu.%llup.%llu.%llu", pB, pV, pN, pC);
+ }
+ if (RGX_BVNC_INFO_PARAMS != ui32ScanCount)
+ {
+ return PVRSRV_ERROR_INVALID_BVNC_PARAMS;
+ }
+ PVR_LOG(("BVNC module parameter honoured: %s", pszBVNCString));
+
+ return PVRSRV_OK;
+}
+
+/* This function detects the Rogue variant and configures the
+ * essential config info associated with such a device.
+ * The config info includes features, errata, etc */
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ static IMG_UINT32 ui32RGXDevCnt;
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT64 ui64BVNC=0, B=0, V=0, N=0, C=0;
+ IMG_UINT64 *pui64Cfg = NULL;
+
+ /* Check for load time RGX BVNC parameter */
+ eError = _RGXBvncParseList(&B,&V,&N,&C, ui32RGXDevCnt);
+ if (PVRSRV_OK == eError)
+ {
+ PVR_LOG(("Read BVNC %" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC " from driver load parameter", B, V, N, C));
+
+ /* Extract the BVNC config from the Features table */
+ ui64BVNC = BVNC_PACK(B,0,N,C);
+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Driver parameter BVNC configuration not found!");
+ }
+
+#if !defined(NO_HARDWARE) && defined(SUPPORT_MULTIBVNC_RUNTIME_BVNC_ACQUISITION)
+
+ /* Try to detect the RGX BVNC from the HW device */
+ if ((NULL == pui64Cfg) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ IMG_UINT64 ui32ID;
+ IMG_HANDLE hSysData;
+
+ hSysData = psDeviceNode->psDevConfig->hSysData;
+
+ /* Power-up the device as required to read the registers */
+ if (psDeviceNode->psDevConfig->pfnPrePowerState)
+ {
+ eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+ PVR_LOGR_IF_ERROR(eError, "pfnPrePowerState ON");
+ }
+
+ if (psDeviceNode->psDevConfig->pfnPostPowerState)
+ {
+ eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_ON,
+ PVRSRV_DEV_POWER_STATE_OFF, IMG_FALSE);
+ PVR_LOGR_IF_ERROR(eError, "pfnPostPowerState ON");
+ }
+
+ /* Read the BVNC, in to new way first, if B not set, use old scheme */
+ ui32ID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID__PBVNC);
+
+ if (GET_B(ui32ID))
+ {
+ B = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__BRANCH_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__BRANCH_ID_SHIFT;
+ V = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__VERSION_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__VERSION_ID_SHIFT;
+ N = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS_SHIFT;
+ C = (ui32ID & ~RGX_CR_CORE_ID__PBVNC__CONFIG_ID_CLRMSK) >>
+ RGX_CR_CORE_ID__PBVNC__CONFIG_ID_SHIFT;
+
+ }
+ else
+ {
+ IMG_UINT64 ui32CoreID, ui32CoreRev;
+ ui32CoreRev = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_REVISION);
+ ui32CoreID = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_CORE_ID);
+ B = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MAJOR_CLRMSK) >>
+ RGX_CR_CORE_REVISION_MAJOR_SHIFT;
+ V = (ui32CoreRev & ~RGX_CR_CORE_REVISION_MINOR_CLRMSK) >>
+ RGX_CR_CORE_REVISION_MINOR_SHIFT;
+ N = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_N_CLRMSK) >>
+ RGX_CR_CORE_ID_CONFIG_N_SHIFT;
+ C = (ui32CoreID & ~RGX_CR_CORE_ID_CONFIG_C_CLRMSK) >>
+ RGX_CR_CORE_ID_CONFIG_C_SHIFT;
+ }
+ PVR_LOG(("Read BVNC %" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC " from HW device registers", B, V, N, C));
+
+ /* Power-down the device */
+ if (psDeviceNode->psDevConfig->pfnPrePowerState)
+ {
+ eError = psDeviceNode->psDevConfig->pfnPrePowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+ PVR_LOGR_IF_ERROR(eError, "pfnPrePowerState OFF");
+ }
+
+ if (psDeviceNode->psDevConfig->pfnPostPowerState)
+ {
+ eError = psDeviceNode->psDevConfig->pfnPostPowerState(hSysData, PVRSRV_DEV_POWER_STATE_OFF,
+ PVRSRV_DEV_POWER_STATE_ON, IMG_FALSE);
+ PVR_LOGR_IF_ERROR(eError, "pfnPostPowerState OFF");
+ }
+
+ /* Extract the BVNC config from the Features table */
+ ui64BVNC = BVNC_PACK(B,0,N,C);
+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "HW device BVNC configuration not found!");
+ }
+#endif
+
+ /* We reach here if the HW is not present, or we are running in a guest OS,
+ * or HW is unstable during register read giving invalid values, or
+ * runtime detection has been disabled - fall back to compile time BVNC */
+ if (NULL == pui64Cfg)
+ {
+ B = RGX_BVNC_KM_B;
+ N = RGX_BVNC_KM_N;
+ C = RGX_BVNC_KM_C;
+ {
+ IMG_UINT32 ui32ScanCount = 0;
+ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llu", &V);
+ if (1 != ui32ScanCount)
+ {
+ ui32ScanCount = OSVSScanf(RGX_BVNC_KM_V_ST, "%llup", &V);
+ if (1 != ui32ScanCount)
+ {
+ V = 0;
+ }
+ }
+ }
+ PVR_LOG(("Reverting to compile time BVNC %s", RGX_BVNC_KM));
+
+ /* Extract the BVNC config from the Features table */
+ ui64BVNC = BVNC_PACK(B,0,N,C);
+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaFeatures, ui64BVNC);
+ PVR_LOG_IF_FALSE((pui64Cfg != NULL), "Compile time BVNC configuration not found!");
+ }
+
+ /* Have we failed to identify the BVNC to use? */
+ if (NULL == pui64Cfg)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC Detection and feature lookup failed. "
+ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC));
+ return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC Feature found config: 0x%016"
+ IMG_UINT64_FMTSPECx " 0x%016" IMG_UINT64_FMTSPECx " 0x%016"
+ IMG_UINT64_FMTSPECx "\n",__func__, pui64Cfg[0], pui64Cfg[1],
+ pui64Cfg[2]));
+
+ /* Parsing feature config depends on available features on the core
+ * hence this parsing should always follow the above feature assignment */
+ psDevInfo->sDevFeatureCfg.ui64Features = pui64Cfg[1];
+ _RGXBvncParseFeatureValues(psDevInfo, pui64Cfg[2]);
+
+ /* Add 'V' to the packed BVNC value to get the BVNC ERN and BRN config. */
+ ui64BVNC = BVNC_PACK(B,V,N,C);
+ pui64Cfg = RGX_SEARCH_BVNC_TABLE(gaErnsBrns, ui64BVNC);
+ if (NULL == pui64Cfg)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: BVNC ERN/BRN lookup failed. "
+ "Unsupported BVNC: 0x%016" IMG_UINT64_FMTSPECx, __func__, ui64BVNC));
+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = 0;
+ return PVRSRV_ERROR_BVNC_UNSUPPORTED;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: BVNC ERN/BRN Cfg: 0x%016" IMG_UINT64_FMTSPECx
+ " 0x%016" IMG_UINT64_FMTSPECx " \n", __func__, *pui64Cfg, pui64Cfg[1]));
+ psDevInfo->sDevFeatureCfg.ui64ErnsBrns = pui64Cfg[1];
+
+ psDevInfo->sDevFeatureCfg.ui32B = (IMG_UINT32)B;
+ psDevInfo->sDevFeatureCfg.ui32V = (IMG_UINT32)V;
+ psDevInfo->sDevFeatureCfg.ui32N = (IMG_UINT32)N;
+ psDevInfo->sDevFeatureCfg.ui32C = (IMG_UINT32)C;
+
+ /* Message to confirm configuration look up was a success */
+ PVR_LOG(("RGX Device initialised with BVNC %" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC ".%" IMG_UINT64_FMTSPEC ".%"
+ IMG_UINT64_FMTSPEC, B, V, N, C));
+
+ ui32RGXDevCnt++;
+
+#if defined(DEBUG)
+ _RGXBvncDumpParsedConfig(psDeviceNode);
+#endif
+ return PVRSRV_OK;
+}
+
+/*
+ * This function checks if a particular feature is available on the given rgx device */
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ /* FIXME: need to implement a bounds check for passed feature mask */
+ if (psDevInfo->sDevFeatureCfg.ui64Features & ui64FeatureMask)
+ {
+ return IMG_TRUE;
+ }
+ return IMG_FALSE;
+}
+
+/*
+ * This function returns the value of a feature on the given rgx device */
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ /*FIXME: need to implement a bounds check for passed feature mask */
+
+ if (eFeatureIndex >= RGX_FEATURE_WITH_VALUES_MAX_IDX)
+ {
+ return -1;
+ }
+
+ if (psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex] == RGX_FEATURE_VALUE_DISABLED)
+ {
+ return -1;
+ }
+
+ return psDevInfo->sDevFeatureCfg.ui32FeaturesValues[eFeatureIndex];
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.h b/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.h
new file mode 100644
index 00000000000000..8f419c105b6e53
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxbvnc.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title BVNC handling specific header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the BVNC related work
+ (see hwdefs/km/rgx_bvnc_table_km.h and
+ hwdefs/km/rgx_bvnc_defs_km.h
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXBVNC_H__)
+#define __RGXBVNC_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "rgxdevice.h"
+
+/*************************************************************************/ /*!
+@brief This function detects the Rogue variant and configures the
+ essential config info associated with such a device.
+ The config info includes features, errata, etc
+@param psDeviceNode - Device Node pointer
+@return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXBvncInitialiseConfiguration(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@brief This function checks if a particular feature is available on
+ the given rgx device
+@param psDeviceNode - Device Node pointer
+@param ui64FeatureMask - feature to be checked
+@return true if feature is supported, false otherwise
+*/ /**************************************************************************/
+IMG_BOOL RGXBvncCheckFeatureSupported(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64FeatureMask);
+
+/*************************************************************************/ /*!
+@brief This function returns the value of a feature on the given
+ rgx device
+@param psDeviceNode - Device Node pointer
+@param ui64FeatureMask - feature for which to return the value
+@return the value for the specified feature
+*/ /**************************************************************************/
+IMG_INT32 RGXBvncGetSupportedFeatureValue(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_FEATURE_WITH_VALUE_INDEX eFeatureIndex);
+
+#endif /* __RGXBVNC_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxccb.c b/drivers/gpu/drm/img-rogue/1.10/rgxccb.c
new file mode 100644
index 00000000000000..0e7cf827be45b0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxccb.c
@@ -0,0 +1,2286 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX CCB routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX CCB routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pdump_km.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgx_memallocflags.h"
+#include "devicemem_pdump.h"
+#include "dllist.h"
+#include "rgx_fwif_shared.h"
+#include "rgxtimerquery.h"
+#if defined(LINUX)
+#include "trace_events.h"
+#endif
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "rgxutils.h"
+
+/*
+* Defines the number of fence updates to record so that future fences in the CCB
+* can be checked to see if they are already known to be satisfied.
+*/
+#define RGX_CCCB_FENCE_UPDATE_LIST_SIZE (32)
+
+#define RGX_UFO_PTR_ADDR(ufoptr) (((ufoptr)->puiAddrUFO.ui32Addr) & 0xFFFFFFFC)
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD 0x1
+#define PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED 0x2
+
+typedef struct _RGX_CLIENT_CCB_UTILISATION_
+{
+ /* the threshold in bytes.
+ * when the CCB utilisation hits the threshold then we will print
+ * a warning message.
+ */
+ IMG_UINT32 ui32ThresholdBytes;
+ /* Maximum cCCB usage at some point in time */
+ IMG_UINT32 ui32HighWaterMark;
+ /* keep track of the warnings already printed.
+ * bit mask of PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_xyz
+ */
+ IMG_UINT32 ui32Warnings;
+} RGX_CLIENT_CCB_UTILISATION;
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+struct _RGX_CLIENT_CCB_ {
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl; /*!< CPU mapping of the CCB control structure used by the fw */
+ IMG_UINT8 *pui8ClientCCB; /*!< CPU mapping of the CCB */
+ DEVMEM_MEMDESC *psClientCCBMemDesc; /*!< MemDesc for the CCB */
+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc; /*!< MemDesc for the CCB control */
+ IMG_UINT32 ui32HostWriteOffset; /*!< CCB write offset from the driver side */
+ IMG_UINT32 ui32LastPDumpWriteOffset; /*!< CCB write offset from the last time we submitted a command in capture range */
+ IMG_UINT32 ui32FinishedPDumpWriteOffset; /*!< Trails LastPDumpWriteOffset for last finished command, used for HW CB driven DMs */
+ IMG_BOOL bStateOpen; /*!< Commands will be appended to a non finished CCB */
+ IMG_UINT32 ui32LastROff; /*!< Last CCB Read offset to help detect any CCB wedge */
+ IMG_UINT32 ui32LastWOff; /*!< Last CCB Write offset to help detect any CCB wedge */
+ IMG_UINT32 ui32ByteCount; /*!< Count of the number of bytes written to CCCB */
+ IMG_UINT32 ui32LastByteCount; /*!< Last value of ui32ByteCount to help detect any CCB wedge */
+ IMG_UINT32 ui32Size; /*!< Size of the CCB */
+ DLLIST_NODE sNode; /*!< Node used to store this CCB on the per connection list */
+ PDUMP_CONNECTION_DATA *psPDumpConnectionData; /*!< Pointer to the per connection data in which we reside */
+ void *hTransition; /*!< Handle for Transition callback */
+ IMG_CHAR szName[MAX_CLIENT_CCB_NAME]; /*!< Name of this client CCB */
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext; /*!< Parent server common context that this CCB belongs to */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor;
+ RGX_CLIENT_CCB_UTILISATION sUtilisation; /*!< CCB utilisation data */
+#endif
+#if defined(DEBUG)
+ IMG_UINT32 ui32UpdateEntries; /*!< Number of Fence Updates in asFenceUpdateList */
+ RGXFWIF_UFO asFenceUpdateList[RGX_CCCB_FENCE_UPDATE_LIST_SIZE]; /*!< List of recent updates written in this CCB */
+#endif
+};
+
+/* Forms a table, with array of strings for each requestor type (listed in RGX_CCB_REQUESTORS X macro), to be used for
+ DevMemAllocation comments and PDump comments. Each tuple in the table consists of 3 strings:
+ { "FwClientCCB:" <requestor_name>, "FwClientCCBControl:" <requestor_name>, <requestor_name> },
+ The first string being used as comment when allocating ClientCCB for the given requestor, the second for CCBControl
+ structure, and the 3rd one for use in PDUMP comments. The number of tuples in the table must adhere to the following
+ build assert. */
+IMG_CHAR *const aszCCBRequestors[][3] =
+{
+#define REQUESTOR_STRING(prefix,req) #prefix ":" #req
+#define FORM_REQUESTOR_TUPLE(req) { REQUESTOR_STRING(FwClientCCB,req), REQUESTOR_STRING(FwClientCCBControl,req), #req },
+ RGX_CCB_REQUESTORS(FORM_REQUESTOR_TUPLE)
+#undef FORM_REQUESTOR_TUPLE
+};
+
+/* The number of tuples in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+ In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+ the RGX_CCB_REQUESTORS list. */
+static_assert((sizeof(aszCCBRequestors)/(3*sizeof(aszCCBRequestors[0][0]))) == (REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1),
+ "Mismatch between aszCCBRequestors table and DPX_MAX_RAY_CONTEXTS");
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32PDumpFlags)
+{
+
+ IMG_UINT32 ui32PollOffset;
+
+ if (psClientCCB->bStateOpen)
+ {
+ /* Draining CCB on a command that hasn't finished, and FW isn't expected
+ * to have updated Roff up to Woff. Only drain to the first
+ * finished command prior to this. The Roff for this
+ * is stored in ui32FinishedPDumpWriteOffset.
+ */
+ ui32PollOffset = psClientCCB->ui32FinishedPDumpWriteOffset;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "cCCB(%s@%p): Draining open CCB rgxfw_roff < woff (%d)",
+ psClientCCB->szName,
+ psClientCCB,
+ ui32PollOffset);
+ }
+ else
+ {
+ /* Command to a finished CCB stream and FW is drained to empty
+ * out remaining commands until R==W.
+ */
+ ui32PollOffset = psClientCCB->ui32LastPDumpWriteOffset;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "cCCB(%s@%p): Draining CCB rgxfw_roff == woff (%d)",
+ psClientCCB->szName,
+ psClientCCB,
+ ui32PollOffset);
+ }
+
+ return DevmemPDumpDevmemPol32(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ ui32PollOffset,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+}
+
+static PVRSRV_ERROR _RGXCCBPDumpTransition(void **pvData, IMG_BOOL bInto, IMG_UINT32 ui32PDumpFlags)
+{
+ RGX_CLIENT_CCB *psClientCCB = (RGX_CLIENT_CCB *) pvData;
+
+ /* We're about to transition into capture range and we've submitted
+ * new commands since the last time we entered capture range so drain
+ * the live CCB and simulation (sim) CCB as required, i.e. leave CCB
+ * idle in both live and sim contexts.
+ * This requires the host driver to ensure the live FW & the sim FW
+ * have both emptied out the remaining commands until R==W (CCB empty).
+ */
+ if (bInto)
+ {
+ volatile RGXFWIF_CCCB_CTL *psCCBCtl = psClientCCB->psClientCCBCtrl;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bIsFirstFrameInBlock;
+ IMG_UINT32 ui32CurrentBlock;
+
+ /* Wait for the live FW to catch up/empty CCB. This is done by returning
+ * retry which will get pushed back out to Services client where it
+ * waits on the event object and then resubmits the command.
+ */
+ if (psClientCCB->psClientCCBCtrl->ui32ReadOffset != psClientCCB->ui32HostWriteOffset)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Wait for the sim FW to catch up/empty sim CCB.
+ * We drain whenever capture range is entered, even if no commands
+ * have been issued on this CCB when out of capture range. We have to
+ * wait for commands that might have been issued in the last capture
+ * range to finish so the connection's sync block snapshot dumped after
+ * all the PDumpTransition callbacks have been execute doesn't clobber
+ * syncs which the sim FW is currently working on.
+ *
+ * Although this is sub-optimal for play-back - while out of capture
+ * range for every continuous operation we synchronise the sim
+ * play-back processing the script and the sim FW, there is no easy
+ * solution. Not all modules that work with syncs register a
+ * PDumpTransition callback and thus we have no way of knowing if we
+ * can skip this sim CCB drain and sync block dump or not.
+ */
+
+ PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+ /* In non-block-mode of pdump, ui32CurrentBlock will always be PDUMP_BLOCKNUM_INVALID */
+
+ if(ui32CurrentBlock == 1) /* If its second block in block-mode of pdump? */
+ {
+ ui32PDumpFlags |= PDUMP_FLAGS_BLKDATA;
+ }
+
+ /* If its block-mode of pdump, then drain on start of second (ui32CurrentBlock == 1) pdump-block only.
+ * If its a non-block-mode of pdump, then always drain if we reach here
+ * */
+ if((ui32CurrentBlock == PDUMP_BLOCKNUM_INVALID) || (ui32CurrentBlock == 1))
+ {
+ /* We have already synchronised app-thread and live-FW thread in
+ * services client where we wait on event object
+ */
+
+ /* Here is what this code is doing in short,
+ *
+ * if(block-mode)
+ * {
+ * if(ui32CurrentBlock == 1)
+ * {
+ * As we are keeping first (ui32CurrentBlock == 0) pdump-block, drain CCCB at start of
+ * second (ui32CurrentBlock == 1) pdump-block to synchronise sim-FW thread and script-thread
+ * at end of first pdump-block and before starting last pdump-block at playback time.
+ * }
+ * else
+ * {
+ * We already synced sim-FW thread and script-thread after first pdump-block
+ * (in 'if' statement above), so we can skip synchronisation for all other intermediate
+ * pdump-blocks and start with last pdump-block directly at playback time.
+ * }
+ * }
+ * else
+ * {
+ * Always CCCB if we reach here, as its non-block-mode capture.
+ * }
+ * */
+
+ eError = RGXCCBPDumpDrainCCB(psClientCCB, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "_RGXCCBPDumpTransition: problem pdumping POL for cCCBCtl (%d)", eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* clear block data flag */
+ ui32PDumpFlags &= ~PDUMP_FLAGS_BLKDATA;
+ }
+
+ /* Live CCB and simulation CCB now empty, FW idle on CCB in both
+ * contexts.
+ */
+ PDumpIsFirstFrameInBlockKM(&bIsFirstFrameInBlock);
+
+ if ((psClientCCB->ui32LastPDumpWriteOffset != psClientCCB->ui32HostWriteOffset) || bIsFirstFrameInBlock)
+ {
+ /* If new commands have been written when out of capture range in
+ * the live CCB then we need to fast forward the sim CCBCtl
+ * offsets past uncaptured commands. This is done by PDUMPing
+ * the CCBCtl memory to align sim values with the live CCBCtl
+ * values. Both live and sim FWs can start with the first
+ * command which is in the new capture range.
+ *
+ * In case of block-mode:
+ *
+ * We already synchronised live-FW thread and app-thread above, so
+ * now fast-forward sim-FW CCBCtrl read/write offsets to that of
+ * live-FW offsets at start of each new pdump-block.
+ *
+ * At playback time, after sim-FW thread and script-thread
+ * synchronisation at the end of first pdump-block, CCBCtrl
+ * offsets from last pdump-block will be loaded directly. So
+ * this will allow us to skip all commands from intermediate
+ * pdump-blocks and start with last pdump-block immediately
+ * after the first pdump-block.
+ */
+ psCCBCtl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+ psCCBCtl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+ psCCBCtl->ui32WriteOffset = psClientCCB->ui32HostWriteOffset;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "cCCB(%s@%p): Fast-forward from %d to %d",
+ psClientCCB->szName,
+ psClientCCB,
+ psClientCCB->ui32LastPDumpWriteOffset,
+ psClientCCB->ui32HostWriteOffset);
+
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCCB_CTL),
+ ui32PDumpFlags);
+
+ /* Although we've entered capture range for this process
+ * connection we might not do any work on this CCB, so update the
+ * ui32LastPDumpWriteOffset to reflect where we got to for next
+ * time so we start the drain from where we got to last time.
+ */
+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+
+static INLINE void _RGXInitCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ psClientCCB->sUtilisation.ui32HighWaterMark = 0; /* Initialize ui32HighWaterMark level to zero */
+ psClientCCB->sUtilisation.ui32ThresholdBytes = (psClientCCB->ui32Size *
+ PVRSRV_ENABLE_CCCB_UTILISATION_INFO_THRESHOLD) / 100;
+ psClientCCB->sUtilisation.ui32Warnings = 0;
+}
+
+static INLINE void _RGXPrintCCBUtilisationWarning(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32WarningType,
+ IMG_UINT32 ui32CmdSize)
+{
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+ if(ui32WarningType == PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED)
+ {
+ PVR_LOG(("Failed to acquire CCB space for %u byte command:", ui32CmdSize));
+ }
+
+ PVR_LOG(("%s: Client CCB (%s) watermark (%u) hit %d%% of its allocation size (%u)",
+ __func__,
+ psClientCCB->szName,
+ psClientCCB->sUtilisation.ui32HighWaterMark,
+ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size,
+ psClientCCB->ui32Size));
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32WarningType);
+ PVR_UNREFERENCED_PARAMETER(ui32CmdSize);
+
+ PVR_LOG(("GPU %s command buffer usage high (%u). This is not an error but the application may not run optimally.",
+ aszCCBRequestors[psClientCCB->eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ psClientCCB->sUtilisation.ui32HighWaterMark * 100 / psClientCCB->ui32Size));
+#endif
+}
+
+static INLINE void _RGXCCBUtilisationEvent(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32WarningType,
+ IMG_UINT32 ui32CmdSize)
+{
+ /* in VERBOSE mode we will print a message for each different
+ * event type as they happen.
+ * but by default we will only issue one message
+ */
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO_VERBOSE)
+ if(!(psClientCCB->sUtilisation.ui32Warnings & ui32WarningType))
+#else
+ if(!psClientCCB->sUtilisation.ui32Warnings)
+#endif
+ {
+ _RGXPrintCCBUtilisationWarning(psClientCCB,
+ ui32WarningType,
+ ui32CmdSize);
+ /* record that we have issued a warning of this type */
+ psClientCCB->sUtilisation.ui32Warnings |= ui32WarningType;
+ }
+}
+
+/* Check the current CCB utilisation. Print a one-time warning message if it is above the
+ * specified threshold
+ */
+static INLINE void _RGXCheckCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ /* Print a warning message if the cCCB watermark is above the threshold value */
+ if(psClientCCB->sUtilisation.ui32HighWaterMark >= psClientCCB->sUtilisation.ui32ThresholdBytes)
+ {
+ _RGXCCBUtilisationEvent(psClientCCB,
+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_THRESHOLD,
+ 0);
+ }
+}
+
+/* Update the cCCB high watermark level if necessary */
+static void _RGXUpdateCCBUtilisation(RGX_CLIENT_CCB *psClientCCB)
+{
+ IMG_UINT32 ui32FreeSpace, ui32MemCurrentUsage;
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+ ui32MemCurrentUsage = psClientCCB->ui32Size - ui32FreeSpace;
+
+ if (ui32MemCurrentUsage > psClientCCB->sUtilisation.ui32HighWaterMark)
+ {
+ psClientCCB->sUtilisation.ui32HighWaterMark = ui32MemCurrentUsage;
+
+ /* The high water mark has increased. Check if it is above the
+ * threshold so we can print a warning if necessary.
+ */
+ _RGXCheckCCBUtilisation(psClientCCB);
+ }
+}
+
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CCBSizeLog2,
+ CONNECTION_DATA *psConnectionData,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ RGX_CLIENT_CCB **ppsClientCCB,
+ DEVMEM_MEMDESC **ppsClientCCBMemDesc,
+ DEVMEM_MEMDESC **ppsClientCCBCtrlMemDesc)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiClientCCBMemAllocFlags, uiClientCCBCtlMemAllocFlags;
+ IMG_UINT32 ui32AllocSize = (1U << ui32CCBSizeLog2);
+ RGX_CLIENT_CCB *psClientCCB;
+
+ /* All client CCBs should be at-least of the "minimum" size declared by the API */
+ PVR_ASSERT (ui32CCBSizeLog2 >= MIN_SAFE_CCB_SIZE_LOG2);
+
+ psClientCCB = OSAllocMem(sizeof(*psClientCCB));
+ if (psClientCCB == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+ psClientCCB->psServerCommonContext = psServerCommonContext;
+
+ uiClientCCBMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ uiClientCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ PDUMPCOMMENT("Allocate RGXFW cCCB");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32AllocSize,
+ uiClientCCBMemAllocFlags,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_STRING],
+ &psClientCCB->psClientCCBMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_alloc_ccb;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBMemDesc,
+ (void **) &psClientCCB->pui8ClientCCB);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_map_ccb;
+ }
+
+ PDUMPCOMMENT("Allocate RGXFW cCCB control");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_CCCB_CTL),
+ uiClientCCBCtlMemAllocFlags,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING],
+ &psClientCCB->psClientCCBCtrlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to allocate RGX client CCB control (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_alloc_ccbctrl;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc,
+ (void **) &psClientCCB->psClientCCBCtrl);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateCCBKM: Failed to map RGX client CCB (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_map_ccbctrl;
+ }
+
+ psClientCCB->psClientCCBCtrl->ui32WriteOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32DepOffset = 0;
+ psClientCCB->psClientCCBCtrl->ui32WrapMask = ui32AllocSize - 1;
+ OSSNPrintf(psClientCCB->szName, MAX_CLIENT_CCB_NAME, "%s-P%lu-T%lu-%s",
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ (unsigned long) OSGetCurrentClientProcessIDKM(),
+ (unsigned long) OSGetCurrentClientThreadIDKM(),
+ OSGetCurrentClientProcessNameKM());
+
+ PDUMPCOMMENT("cCCB control");
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBCtrlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCCB_CTL),
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psClientCCB->ui32HostWriteOffset = 0;
+ psClientCCB->ui32LastPDumpWriteOffset = 0;
+ psClientCCB->ui32FinishedPDumpWriteOffset = 0;
+ psClientCCB->ui32Size = ui32AllocSize;
+ psClientCCB->ui32LastROff = ui32AllocSize - 1;
+ psClientCCB->ui32ByteCount = 0;
+ psClientCCB->ui32LastByteCount = 0;
+ psClientCCB->bStateOpen = IMG_FALSE;
+
+#if defined(DEBUG)
+ psClientCCB->ui32UpdateEntries = 0;
+#endif
+
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXInitCCBUtilisation(psClientCCB);
+ psClientCCB->eRGXCCBRequestor = eRGXCCBRequestor;
+#endif
+ eError = PDumpRegisterTransitionCallback(psConnectionData->psPDumpConnectionData,
+ _RGXCCBPDumpTransition,
+ psClientCCB,
+ &psClientCCB->hTransition);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_pdumpreg;
+ }
+
+ /*
+ * Note:
+ * Save the PDump specific structure, which is ref counted unlike
+ * the connection data, to ensure it's not freed too early
+ */
+ psClientCCB->psPDumpConnectionData = psConnectionData->psPDumpConnectionData;
+ PDUMPCOMMENT("New RGXFW cCCB(%s@%p) created",
+ psClientCCB->szName,
+ psClientCCB);
+
+ *ppsClientCCB = psClientCCB;
+ *ppsClientCCBMemDesc = psClientCCB->psClientCCBMemDesc;
+ *ppsClientCCBCtrlMemDesc = psClientCCB->psClientCCBCtrlMemDesc;
+ return PVRSRV_OK;
+
+fail_pdumpreg:
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+fail_map_ccbctrl:
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+fail_alloc_ccbctrl:
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+fail_map_ccb:
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+fail_alloc_ccb:
+ OSFreeMem(psClientCCB);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB)
+{
+ PDumpUnregisterTransitionCallback(psClientCCB->hTransition);
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBCtrlMemDesc);
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBCtrlMemDesc);
+ DevmemReleaseCpuVirtAddr(psClientCCB->psClientCCBMemDesc);
+ DevmemFwFree(psDevInfo, psClientCCB->psClientCCBMemDesc);
+ OSFreeMem(psClientCCB);
+}
+
+/******************************************************************************
+ FUNCTION : RGXAcquireCCB
+
+ PURPOSE : Obtains access to write some commands to a CCB
+
+ PARAMETERS : psClientCCB - The client CCB
+ ui32CmdSize - How much space is required
+ ppvBufferSpace - Pointer to space in the buffer
+ ui32PDumpFlags - Should this be PDump continuous?
+
+ RETURNS : PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ void **ppvBufferSpace,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_BOOL bInCaptureRange;
+ IMG_BOOL bPdumpEnabled;
+ IMG_UINT64 ui64PDumpState = 0;
+
+ PDumpGetStateKM(&ui64PDumpState);
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+ bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+ && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+ /*
+ PDumpSetFrame will detect as we Transition into capture range for
+ frame based data but if we are PDumping continuous data then we
+ need to inform the PDump layer ourselves
+ */
+ if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+ && PDUMP_IS_CONTINUOUS(ui32PDumpFlags)
+ && !bInCaptureRange)
+ {
+ eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_TRUE, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Check that the CCB can hold this command + padding */
+ if ((ui32CmdSize + PADDING_COMMAND_SIZE + 1) > psClientCCB->ui32Size)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Command size (%d bytes) too big for CCB (%d bytes)",
+ ui32CmdSize, psClientCCB->ui32Size));
+ return PVRSRV_ERROR_CMD_TOO_BIG;
+ }
+
+ /*
+ Check we don't overflow the end of the buffer and make sure we have
+ enough space for the padding command. We don't have enough space (including the
+ minimum amount for the padding command) we will need to make sure we insert a
+ padding command now and wrap before adding the main command.
+ */
+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) <= psClientCCB->ui32Size)
+ {
+ /*
+ The command can fit without wrapping...
+ */
+ IMG_UINT32 ui32FreeSpace;
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32CmdSize, psClientCCB->ui32HostWriteOffset,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+#endif
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+
+ /* Don't allow all the space to be used */
+ if (ui32FreeSpace > ui32CmdSize)
+ {
+ *ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+ psClientCCB->ui32HostWriteOffset);
+ return PVRSRV_OK;
+ }
+
+ goto e_retry;
+ }
+ else
+ {
+ /*
+ We're at the end of the buffer without enough contiguous space.
+ The command cannot fit without wrapping, we need to insert a
+ padding command and wrap. We need to do this in one go otherwise
+ we would be leaving unflushed commands and forcing the client to
+ deal with flushing the padding command but not the command they
+ wanted to write. Therefore we either do all or nothing.
+ */
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT32 ui32FreeSpace;
+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32Remain, psClientCCB->ui32HostWriteOffset,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ psClientCCB->ui32Size);
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for %u bytes to become available according cCCB Ctl (woff=%x) for %s",
+ ui32CmdSize, 0 /*ui32HostWriteOffset after wrap */,
+ psClientCCB->szName);
+ DevmemPDumpCBP(psClientCCB->psClientCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCCB_CTL, ui32ReadOffset),
+ 0 /*ui32HostWriteOffset after wrap */,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+#endif
+
+ ui32FreeSpace = GET_CCB_SPACE(psClientCCB->ui32HostWriteOffset,
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset,
+ psClientCCB->ui32Size);
+
+ /* Don't allow all the space to be used */
+ if (ui32FreeSpace > ui32Remain + ui32CmdSize)
+ {
+ psHeader = (void *) (psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PADDING;
+ psHeader->ui32CmdSize = ui32Remain - sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags, "cCCB(%p): Padding cmd %d", psClientCCB, psHeader->ui32CmdSize);
+ if (bPdumpEnabled)
+ {
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+ psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ ui32PDumpFlags);
+ }
+
+ *ppvBufferSpace = (void *) (psClientCCB->pui8ClientCCB +
+ 0 /*ui32HostWriteOffset after wrap */);
+ return PVRSRV_OK;
+ }
+
+ goto e_retry;
+ }
+e_retry:
+#if defined(PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXCCBUtilisationEvent(psClientCCB,
+ PVRSRV_CLIENT_CCCB_UTILISATION_WARNING_ACQUIRE_FAILED,
+ ui32CmdSize);
+#endif /* PVRSRV_ENABLE_CCCB_UTILISATION_INFO */
+ return PVRSRV_ERROR_RETRY;
+}
+
+/******************************************************************************
+ FUNCTION : RGXReleaseCCB
+
+ PURPOSE : Release a CCB that we have been writing to.
+
+ PARAMETERS : psDevData - device data
+ psCCB - the CCB
+
+ RETURNS : None
+******************************************************************************/
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_BOOL bInCaptureRange;
+ IMG_BOOL bPdumpEnabled;
+ IMG_UINT64 ui64PDumpState = 0;
+
+ PDumpGetStateKM(&ui64PDumpState);
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+ bPdumpEnabled = (ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+ && (bInCaptureRange || PDUMP_IS_CONTINUOUS(ui32PDumpFlags));
+
+ /*
+ * If a padding command was needed then we should now move ui32HostWriteOffset
+ * forward. The command has already be dumped (if bPdumpEnabled).
+ */
+ if ((psClientCCB->ui32HostWriteOffset + ui32CmdSize + PADDING_COMMAND_SIZE) > psClientCCB->ui32Size)
+ {
+ IMG_UINT32 ui32Remain = psClientCCB->ui32Size - psClientCCB->ui32HostWriteOffset;
+
+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+ ui32Remain,
+ psClientCCB->ui32Size);
+ psClientCCB->ui32ByteCount += ui32Remain;
+ }
+
+ /* Dump the CCB data */
+ if (bPdumpEnabled)
+ {
+ DevmemPDumpLoadMem(psClientCCB->psClientCCBMemDesc,
+ psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ ui32PDumpFlags);
+ }
+
+ /*
+ * Check if there any fences being written that will already be
+ * satisfied by the last written update command in this CCB. At the
+ * same time we can ASSERT that all sync addresses are not NULL.
+ */
+#if defined(DEBUG)
+ {
+ IMG_UINT8 *pui8BufferStart = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset);
+ IMG_UINT8 *pui8BufferEnd = (void *)((uintptr_t)psClientCCB->pui8ClientCCB + psClientCCB->ui32HostWriteOffset + ui32CmdSize);
+ IMG_BOOL bMessagePrinted = IMG_FALSE;
+
+ /* Walk through the commands in this section of CCB being released... */
+ while (pui8BufferStart < pui8BufferEnd)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8BufferStart;
+
+ if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+ {
+ /* If an UPDATE then record the values in case an adjacent fence uses it. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ psClientCCB->ui32UpdateEntries = 0;
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+ if (psClientCCB->ui32UpdateEntries < RGX_CCCB_FENCE_UPDATE_LIST_SIZE)
+ {
+ psClientCCB->asFenceUpdateList[psClientCCB->ui32UpdateEntries++] = *psUFOPtr++;
+ }
+ }
+ }
+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE)
+ {
+ /* If a FENCE then check the values against the last UPDATE issued. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+
+ if (bMessagePrinted == IMG_FALSE)
+ {
+ RGXFWIF_UFO *psUpdatePtr = psClientCCB->asFenceUpdateList;
+ IMG_UINT32 ui32UpdateIndex;
+
+ for (ui32UpdateIndex = 0; ui32UpdateIndex < psClientCCB->ui32UpdateEntries; ui32UpdateIndex++)
+ {
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+ {
+ if (RGX_UFO_PTR_ADDR(psUFOPtr) == RGX_UFO_PTR_ADDR(psUpdatePtr))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Redundant sync checkpoint check found in cCCB(%p) - 0x%x -> 0x%x",
+ psClientCCB, RGX_UFO_PTR_ADDR(psUFOPtr), psUFOPtr->ui32Value));
+ bMessagePrinted = IMG_TRUE;
+ break;
+ }
+ }
+ else
+ {
+ if (psUFOPtr->puiAddrUFO.ui32Addr == psUpdatePtr->puiAddrUFO.ui32Addr &&
+ psUFOPtr->ui32Value == psUpdatePtr->ui32Value)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Redundant fence check found in cCCB(%p) - 0x%x -> 0x%x",
+ psClientCCB, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+ bMessagePrinted = IMG_TRUE;
+ break;
+ }
+ }
+ psUpdatePtr++;
+ }
+ }
+
+ psUFOPtr++;
+ }
+ }
+ else if (psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR ||
+ psCmdHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE)
+ {
+ /* For all other UFO ops check the UFO address is not NULL. */
+ IMG_UINT32 ui32NumUFOs = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO*)(pui8BufferStart + sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ while (ui32NumUFOs-- > 0)
+ {
+ PVR_ASSERT(psUFOPtr->puiAddrUFO.ui32Addr != 0);
+ psUFOPtr++;
+ }
+ }
+
+ /* Move to the next command in this section of CCB being released... */
+ pui8BufferStart += sizeof(RGXFWIF_CCB_CMD_HEADER) + psCmdHeader->ui32CmdSize;
+ }
+ }
+#endif /* REDUNDANT_SYNCS_DEBUG */
+
+ /*
+ * Update the CCB write offset.
+ */
+ UPDATE_CCB_OFFSET(psClientCCB->ui32HostWriteOffset,
+ ui32CmdSize,
+ psClientCCB->ui32Size);
+ psClientCCB->ui32ByteCount += ui32CmdSize;
+
+#if defined (PVRSRV_ENABLE_CCCB_UTILISATION_INFO)
+ _RGXUpdateCCBUtilisation(psClientCCB);
+#endif
+ /*
+ PDumpSetFrame will detect as we Transition out of capture range for
+ frame based data but if we are PDumping continuous data then we
+ need to inform the PDump layer ourselves
+ */
+ if ((ui64PDumpState & PDUMP_STATE_CONNECTED) != 0
+ && PDUMP_IS_CONTINUOUS(ui32PDumpFlags)
+ && !bInCaptureRange)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Only Transitioning into capture range can cause an error */
+ eError = PDumpTransition(psClientCCB->psPDumpConnectionData, IMG_FALSE, ui32PDumpFlags);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ if (bPdumpEnabled)
+ {
+ if (!psClientCCB->bStateOpen)
+ {
+ /* Store offset to last finished CCB command. This offset can
+ * be needed when appending commands to a non finished CCB.
+ */
+ psClientCCB->ui32FinishedPDumpWriteOffset = psClientCCB->ui32LastPDumpWriteOffset;
+ }
+
+ /* Update the PDump write offset to show we PDumped this command */
+ psClientCCB->ui32LastPDumpWriteOffset = psClientCCB->ui32HostWriteOffset;
+ }
+
+#if defined(NO_HARDWARE)
+ /*
+ The firmware is not running, it cannot update these; we do here instead.
+ */
+ psClientCCB->psClientCCBCtrl->ui32ReadOffset = psClientCCB->ui32HostWriteOffset;
+ psClientCCB->psClientCCBCtrl->ui32DepOffset = psClientCCB->ui32HostWriteOffset;
+#endif
+}
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB)
+{
+ return psClientCCB->ui32HostWriteOffset;
+}
+
+#define SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL PVR_DBG_ERROR
+#define CHECK_COMMAND(cmd, fenceupdate) \
+ case RGXFWIF_CCB_CMD_TYPE_##cmd: \
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, #cmd " command (%d bytes)", psHeader->ui32CmdSize)); \
+ bFenceUpdate = fenceupdate; \
+ break
+
+static void _RGXClientCCBDumpCommands(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32ByteCount)
+{
+#if defined(SUPPORT_DUMP_CLIENT_CCB_COMMANDS)
+ IMG_UINT8 *pui8Ptr = psClientCCB->pui8ClientCCB + ui32Offset;
+ IMG_UINT32 ui32ConsumeSize = ui32ByteCount;
+
+ while (ui32ConsumeSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8Ptr;
+ IMG_BOOL bFenceUpdate = IMG_FALSE;
+
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "@offset 0x%08lx", pui8Ptr - psClientCCB->pui8ClientCCB));
+ switch(psHeader->eCmdType)
+ {
+ CHECK_COMMAND(TA, IMG_FALSE);
+ CHECK_COMMAND(3D, IMG_FALSE);
+ CHECK_COMMAND(CDM, IMG_FALSE);
+ CHECK_COMMAND(TQ_3D, IMG_FALSE);
+ CHECK_COMMAND(TQ_2D, IMG_FALSE);
+ CHECK_COMMAND(3D_PR, IMG_FALSE);
+ CHECK_COMMAND(NULL, IMG_FALSE);
+ CHECK_COMMAND(SHG, IMG_FALSE);
+ CHECK_COMMAND(RTU, IMG_FALSE);
+ CHECK_COMMAND(RTU_FC, IMG_FALSE);
+ CHECK_COMMAND(PRE_TIMESTAMP, IMG_FALSE);
+ CHECK_COMMAND(POST_TIMESTAMP, IMG_FALSE);
+ CHECK_COMMAND(FENCE, IMG_TRUE);
+ CHECK_COMMAND(UPDATE, IMG_TRUE);
+ CHECK_COMMAND(UNFENCED_UPDATE, IMG_FALSE);
+ CHECK_COMMAND(RMW_UPDATE, IMG_TRUE);
+ CHECK_COMMAND(FENCE_PR, IMG_TRUE);
+ CHECK_COMMAND(UNFENCED_RMW_UPDATE, IMG_FALSE);
+ CHECK_COMMAND(PADDING, IMG_FALSE);
+ CHECK_COMMAND(TQ_TDM, IMG_FALSE);
+ default:
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Unknown command!"));
+ break;
+ }
+ pui8Ptr += sizeof(*psHeader);
+ if (bFenceUpdate)
+ {
+ IMG_UINT32 j;
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8Ptr;
+ for (j=0;j<psHeader->ui32CmdSize/sizeof(RGXFWIF_UFO);j++)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "Addr = 0x%08x, value = 0x%08x",
+ psUFOPtr[j].puiAddrUFO.ui32Addr, psUFOPtr[j].ui32Value));
+ }
+ }
+ else
+ {
+ IMG_UINT32 *pui32Ptr = (IMG_UINT32 *) pui8Ptr;
+ IMG_UINT32 ui32Remain = psHeader->ui32CmdSize/sizeof(IMG_UINT32);
+ while(ui32Remain)
+ {
+ if (ui32Remain >= 4)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1], pui32Ptr[2], pui32Ptr[3]));
+ pui32Ptr += 4;
+ ui32Remain -= 4;
+ }
+ if (ui32Remain == 3)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1], pui32Ptr[2]));
+ pui32Ptr += 3;
+ ui32Remain -= 3;
+ }
+ if (ui32Remain == 2)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x 0x%08x",
+ pui32Ptr[0], pui32Ptr[1]));
+ pui32Ptr += 2;
+ ui32Remain -= 2;
+ }
+ if (ui32Remain == 1)
+ {
+ PVR_DPF((SUPPORT_DUMP_CLIENT_CCB_COMMANDS_DBG_LEVEL, "0x%08x",
+ pui32Ptr[0]));
+ pui32Ptr += 1;
+ ui32Remain -= 1;
+ }
+ }
+ }
+ pui8Ptr += psHeader->ui32CmdSize;
+ ui32ConsumeSize -= sizeof(*psHeader) + psHeader->ui32CmdSize;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psClientCCB);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32ByteCount);
+#endif
+}
+
+/*
+ Workout how much space this command will require
+*/
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32ClientFenceCount,
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ IMG_UINT32 ui32ServerSyncFlagMask,
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE eType,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT32 ui32PDumpFlags,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+ IMG_CHAR *pszCommandName,
+ IMG_BOOL bCCBStateOpen,
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData,
+ IMG_DEV_VIRTADDR sRobustnessResetReason)
+{
+ IMG_UINT32 ui32FenceCount;
+ IMG_UINT32 ui32UpdateCount;
+ IMG_UINT32 i;
+
+ /* Job reference values */
+ psCmdHelperData->ui32ExtJobRef = ui32ExtJobRef;
+ psCmdHelperData->ui32IntJobRef = ui32IntJobRef;
+
+ /* Save the data we require in the submit call */
+ psCmdHelperData->psClientCCB = psClientCCB;
+ psCmdHelperData->ui32PDumpFlags = ui32PDumpFlags;
+ psCmdHelperData->pszCommandName = pszCommandName;
+ psCmdHelperData->psClientCCB->bStateOpen = bCCBStateOpen;
+
+ /* Client sync data */
+ psCmdHelperData->ui32ClientFenceCount = ui32ClientFenceCount;
+ psCmdHelperData->pauiFenceUFOAddress = pauiFenceUFOAddress;
+ psCmdHelperData->paui32FenceValue = paui32FenceValue;
+ psCmdHelperData->ui32ClientUpdateCount = ui32ClientUpdateCount;
+ psCmdHelperData->pauiUpdateUFOAddress = pauiUpdateUFOAddress;
+ psCmdHelperData->paui32UpdateValue = paui32UpdateValue;
+
+ /* Server sync data */
+ psCmdHelperData->ui32ServerSyncCount = ui32ServerSyncCount;
+ psCmdHelperData->paui32ServerSyncFlags = paui32ServerSyncFlags;
+ psCmdHelperData->ui32ServerSyncFlagMask = ui32ServerSyncFlagMask;
+ psCmdHelperData->papsServerSyncs = papsServerSyncs;
+
+ /* Command data */
+ psCmdHelperData->ui32CmdSize = ui32CmdSize;
+ psCmdHelperData->pui8DMCmd = pui8DMCmd;
+ psCmdHelperData->eType = eType;
+
+ /* Robustness reset reason address */
+ psCmdHelperData->sRobustnessResetReason = sRobustnessResetReason;
+
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "%s Command Server Init on FWCtx %08x", pszCommandName,
+ FWCommonContextGetFWAddress(psClientCCB->psServerCommonContext).ui32Addr);
+
+ /* Init the generated data members */
+ psCmdHelperData->ui32ServerFenceCount = 0;
+ psCmdHelperData->ui32ServerUpdateCount = 0;
+ psCmdHelperData->ui32ServerUnfencedUpdateCount = 0;
+ psCmdHelperData->ui32PreTimeStampCmdSize = 0;
+ psCmdHelperData->ui32PostTimeStampCmdSize = 0;
+ psCmdHelperData->ui32RMWUFOCmdSize = 0;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Workload Data added */
+ psCmdHelperData->psWorkEstKickData = psWorkEstKickData;
+#endif
+
+ if (ppPreAddr && (ppPreAddr->ui32Addr != 0))
+ {
+
+ psCmdHelperData->pPreTimestampAddr = *ppPreAddr;
+ psCmdHelperData->ui32PreTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ }
+
+ if (ppPostAddr && (ppPostAddr->ui32Addr != 0))
+ {
+ psCmdHelperData->pPostTimestampAddr = *ppPostAddr;
+ psCmdHelperData->ui32PostTimeStampCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER)
+ + ((sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1));
+ }
+
+ if (ppRMWUFOAddr && (ppRMWUFOAddr->ui32Addr != 0))
+ {
+ psCmdHelperData->pRMWUFOAddr = * ppRMWUFOAddr;
+ psCmdHelperData->ui32RMWUFOCmdSize = sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_UFO);
+ }
+
+
+ /* Workout how many fences and updates this command will have */
+ for (i = 0; i < ui32ServerSyncCount; i++)
+ {
+ IMG_UINT32 ui32Flag = paui32ServerSyncFlags[i] & ui32ServerSyncFlagMask;
+
+ if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ /* Server syncs must fence */
+ psCmdHelperData->ui32ServerFenceCount++;
+ }
+
+ /* If it is an update */
+ if (ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ /* is it a fenced update or a progress update (a.k.a unfenced update) ?*/
+ if ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+ {
+ /* it is a progress update */
+ psCmdHelperData->ui32ServerUnfencedUpdateCount++;
+ }
+ else
+ {
+ /* it is a fenced update */
+ psCmdHelperData->ui32ServerUpdateCount++;
+ }
+ }
+ }
+
+
+ /* Total fence command size (header plus command data) */
+ ui32FenceCount = ui32ClientFenceCount + psCmdHelperData->ui32ServerFenceCount;
+ if (ui32FenceCount)
+ {
+ psCmdHelperData->ui32FenceCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32FenceCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32FenceCmdSize = 0;
+ }
+
+ /* Total DM command size (header plus command data) */
+ psCmdHelperData->ui32DMCmdSize = RGX_CCB_FWALLOC_ALIGN(ui32CmdSize +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+
+ /* Total update command size (header plus command data) */
+ ui32UpdateCount = ui32ClientUpdateCount + psCmdHelperData->ui32ServerUpdateCount;
+ if (ui32UpdateCount)
+ {
+ psCmdHelperData->ui32UpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((ui32UpdateCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32UpdateCmdSize = 0;
+ }
+
+ /* Total unfenced update command size (header plus command data) */
+ if (psCmdHelperData->ui32ServerUnfencedUpdateCount != 0)
+ {
+ psCmdHelperData->ui32UnfencedUpdateCmdSize = RGX_CCB_FWALLOC_ALIGN((psCmdHelperData->ui32ServerUnfencedUpdateCount * sizeof(RGXFWIF_UFO)) +
+ sizeof(RGXFWIF_CCB_CMD_HEADER));
+ }
+ else
+ {
+ psCmdHelperData->ui32UnfencedUpdateCmdSize = 0;
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ Reserve space in the CCB and fill in the command and client sync data
+*/
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+ IMG_UINT8 *pui8StartPtr;
+ PVRSRV_ERROR eError;
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+
+
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ if ((asCmdHelperData[0].ui32PDumpFlags ^ asCmdHelperData[i].ui32PDumpFlags) & PDUMP_FLAGS_CONTINUOUS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: PDump continuous is not consistent (%s != %s) for command %d",
+ __func__,
+ PDUMP_IS_CONTINUOUS(asCmdHelperData[0].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+ PDUMP_IS_CONTINUOUS(asCmdHelperData[i].ui32PDumpFlags)?"IMG_TRUE":"IMG_FALSE",
+ ui32CmdCount));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ /*
+ Acquire space in the CCB for all the command(s).
+ */
+ eError = RGXAcquireCCB(asCmdHelperData[0].psClientCCB,
+ ui32AllocSize,
+ (void **)&pui8StartPtr,
+ asCmdHelperData[0].ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /*
+ For each command fill in the fence, DM, and update command
+
+ Note:
+ We only fill in the client fences here, the server fences (and updates)
+ will be filled in together at the end. This is because we might fail the
+ kernel CCB alloc and would then have to rollback the server syncs if
+ we took the operation here
+ */
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = & asCmdHelperData[i];
+ IMG_UINT8 *pui8CmdPtr;
+ IMG_UINT8 *pui8ServerFenceStart = NULL;
+ IMG_UINT8 *pui8ServerUpdateStart = NULL;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CtxAddr = FWCommonContextGetFWAddress(asCmdHelperData->psClientCCB->psServerCommonContext).ui32Addr;
+ IMG_UINT32 ui32CcbWoff = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(asCmdHelperData->psClientCCB->psServerCommonContext));
+#endif
+
+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+ {
+ PDUMPCOMMENT("Start of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+
+ /*
+ Create the fence command.
+ */
+ if (psCmdHelperData->ui32FenceCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT k, uiNextValueIndex;
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext);
+
+ /* Fences are at the start of the command */
+ pui8CmdPtr = pui8StartPtr;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_FENCE;
+ /* Assign this Fence a 'timestamp' (and increment it) */
+ psHeader->ui32SubmissionOrdinal = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+
+ psHeader->ui32CmdSize = psCmdHelperData->ui32FenceCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* Fill in the client fences */
+ uiNextValueIndex = 0;
+ for (k = 0; k < psCmdHelperData->ui32ClientFenceCount; k++)
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiFenceUFOAddress[k];
+
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+ {
+ psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ }
+ else
+ {
+ /* Only increment uiNextValueIndex for non-sync checkpoints
+ * (as paui32FenceValue only contains values for sync prims)
+ */
+ psUFOPtr->ui32Value = psCmdHelperData->paui32FenceValue[uiNextValueIndex++];
+ }
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+ PVR_DPF((PVR_DBG_ERROR, "%s client sync fence - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+ PDUMPCOMMENT(".. %s client sync fence - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+
+ }
+ pui8ServerFenceStart = pui8CmdPtr;
+ }
+
+ /* jump over the Server fences */
+ pui8CmdPtr = pui8StartPtr + psCmdHelperData->ui32FenceCmdSize;
+
+
+ /*
+ Create the pre DM timestamp commands. Pre and Post timestamp commands are supposed to
+ sandwich the DM cmd. The padding code with the CCB wrap upsets the FW if we don't have
+ the task type bit cleared for POST_TIMESTAMPs. That's why we have 2 different cmd types.
+ */
+ if (psCmdHelperData->ui32PreTimeStampCmdSize != 0)
+ {
+ RGXWriteTimestampCommand(& pui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP,
+ psCmdHelperData->pPreTimestampAddr);
+ }
+
+ /*
+ Create the DM command
+ */
+ if (psCmdHelperData->ui32DMCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = psCmdHelperData->eType;
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCmdHelperData->psClientCCB->psServerCommonContext);
+
+ if (psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR)
+ {
+ /* Assign this PR Fence a timestamp (and increment it) */
+ psHeader->ui32SubmissionOrdinal = OSAtomicIncrement(&psDevInfo->iCCBSubmissionOrdinal);
+ }
+ }
+
+ psHeader->ui32CmdSize = psCmdHelperData->ui32DMCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (psCmdHelperData->psWorkEstKickData != NULL)
+ {
+ PVR_ASSERT(psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_TA ||
+ psCmdHelperData->eType == RGXFWIF_CCB_CMD_TYPE_3D);
+ psHeader->sWorkEstKickData = *psCmdHelperData->psWorkEstKickData;
+ }
+ else
+ {
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+ }
+#endif
+
+ psHeader->sRobustnessResetReason = psCmdHelperData->sRobustnessResetReason;
+
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* The buffer is write-combine, so no special device memory treatment required. */
+ OSCachedMemCopy(pui8CmdPtr, psCmdHelperData->pui8DMCmd, psCmdHelperData->ui32CmdSize);
+ pui8CmdPtr += psCmdHelperData->ui32CmdSize;
+ }
+
+ if (psCmdHelperData->ui32PostTimeStampCmdSize != 0)
+ {
+ RGXWriteTimestampCommand(& pui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP,
+ psCmdHelperData->pPostTimestampAddr);
+ }
+
+
+ if (psCmdHelperData->ui32RMWUFOCmdSize != 0)
+ {
+ RGXFWIF_CCB_CMD_HEADER * psHeader;
+ RGXFWIF_UFO * psUFO;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32RMWUFOCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ psUFO = (RGXFWIF_UFO *) pui8CmdPtr;
+ psUFO->puiAddrUFO = psCmdHelperData->pRMWUFOAddr;
+
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+ }
+
+
+ /*
+ Create the update command.
+
+ Note:
+ We only fill in the client updates here, the server updates (and fences)
+ will be filled in together at the end
+ */
+ if (psCmdHelperData->ui32UpdateCmdSize)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psHeader;
+ IMG_UINT k, uiNextValueIndex;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32UpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+ pui8CmdPtr += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ /* Fill in the client updates */
+ uiNextValueIndex = 0;
+ for (k = 0; k < psCmdHelperData->ui32ClientUpdateCount; k++)
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *) pui8CmdPtr;
+
+ psUFOPtr->puiAddrUFO = psCmdHelperData->pauiUpdateUFOAddress[k];
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+ {
+ psUFOPtr->ui32Value = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ }
+ else
+ {
+ /* Only increment uiNextValueIndex for non-sync checkpoints
+ * (as paui32UpdateValue only contains values for sync prims)
+ */
+ psUFOPtr->ui32Value = psCmdHelperData->paui32UpdateValue[uiNextValueIndex++];
+ }
+ pui8CmdPtr += sizeof(RGXFWIF_UFO);
+
+#if defined SYNC_COMMAND_DEBUG
+ PVR_DPF((PVR_DBG_ERROR, "%s client sync update - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value));
+#endif
+ PDUMPCOMMENT(".. %s client sync update - 0x%x -> 0x%x",
+ psCmdHelperData->psClientCCB->szName, psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value);
+
+ }
+ pui8ServerUpdateStart = pui8CmdPtr;
+ }
+
+ /* Save the server sync fence & update offsets for submit time */
+ psCmdHelperData->pui8ServerFenceStart = pui8ServerFenceStart;
+ psCmdHelperData->pui8ServerUpdateStart = pui8ServerUpdateStart;
+
+ /* jump over the fenced update */
+ if (psCmdHelperData->ui32UnfencedUpdateCmdSize != 0)
+ {
+ RGXFWIF_CCB_CMD_HEADER * const psHeader = (RGXFWIF_CCB_CMD_HEADER *) psCmdHelperData->pui8ServerUpdateStart + psCmdHelperData->ui32UpdateCmdSize;
+ /* set up the header for unfenced updates */
+ PVR_ASSERT(psHeader); /* Could be zero if ui32UpdateCmdSize is 0 which is never expected */
+ psHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE;
+ psHeader->ui32CmdSize = psCmdHelperData->ui32UnfencedUpdateCmdSize - sizeof(RGXFWIF_CCB_CMD_HEADER);
+ psHeader->ui32ExtJobRef = psCmdHelperData->ui32ExtJobRef;
+ psHeader->ui32IntJobRef = psCmdHelperData->ui32IntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ psHeader->sWorkEstKickData.ui64ReturnDataIndex = 0;
+ psHeader->sWorkEstKickData.ui64Deadline = 0;
+ psHeader->sWorkEstKickData.ui64CyclesPrediction = 0;
+#endif
+
+ /* jump over the header */
+ psCmdHelperData->pui8ServerUnfencedUpdateStart = ((IMG_UINT8*) psHeader) + sizeof(RGXFWIF_CCB_CMD_HEADER);
+ }
+ else
+ {
+ psCmdHelperData->pui8ServerUnfencedUpdateStart = NULL;
+ }
+
+ /* Save start for sanity checking at submit time */
+ psCmdHelperData->pui8StartPtr = pui8StartPtr;
+
+ /* Set the start pointer for the next iteration around the loop */
+ pui8StartPtr +=
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize +
+ psCmdHelperData->ui32UnfencedUpdateCmdSize;
+
+ if (psCmdHelperData->ui32ClientFenceCount+psCmdHelperData->ui32ClientUpdateCount != 0)
+ {
+ PDUMPCOMMENT("End of %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+ else
+ {
+ PDUMPCOMMENT("No %s client syncs for cmd[%d] on FWCtx %08x Woff 0x%x bytes",
+ psCmdHelperData->psClientCCB->szName, i, ui32CtxAddr, ui32CcbWoff);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+/*
+ Fill in the server syncs data and release the CCB space
+*/
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ const IMG_CHAR *pcszDMName,
+ IMG_UINT32 ui32CtxAddr)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+#if defined(LINUX)
+ IMG_BOOL bTraceChecks = trace_rogue_are_fence_checks_traced();
+ IMG_BOOL bTraceUpdates = trace_rogue_are_fence_updates_traced();
+#endif
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ ui32AllocSize = RGXCmdHelperGetCommandSize(ui32CmdCount, asCmdHelperData);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVLockServerSync();
+#endif
+
+ /*
+ For each command fill in the server sync info
+ */
+ for (i=0;i<ui32CmdCount;i++)
+ {
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData = &asCmdHelperData[i];
+ IMG_UINT8 *pui8ServerFenceStart = psCmdHelperData->pui8ServerFenceStart;
+ IMG_UINT8 *pui8ServerUpdateStart = psCmdHelperData->pui8ServerUpdateStart;
+ IMG_UINT8 *pui8ServerUnfencedUpdateStart = psCmdHelperData->pui8ServerUnfencedUpdateStart;
+ IMG_UINT32 j;
+
+ /* Now fill in the server fence and updates together */
+ for (j = 0; j < psCmdHelperData->ui32ServerSyncCount; j++)
+ {
+ RGXFWIF_UFO *psUFOPtr;
+ IMG_UINT32 ui32UpdateValue;
+ IMG_UINT32 ui32FenceValue;
+ IMG_UINT32 ui32SyncAddr;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32Flag = psCmdHelperData->paui32ServerSyncFlags[j] & psCmdHelperData->ui32ServerSyncFlagMask;
+ IMG_BOOL bFence = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)!=0)?IMG_TRUE:IMG_FALSE;
+ IMG_BOOL bUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)!=0)?IMG_TRUE:IMG_FALSE;
+ const IMG_BOOL bUnfencedUpdate = ((ui32Flag & PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE) == PVRSRV_CLIENT_SYNC_PRIM_OP_UNFENCED_UPDATE)
+ ? IMG_TRUE
+ : IMG_FALSE;
+
+ eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psCmdHelperData->papsServerSyncs[j],
+ bUpdate,
+ &ui32FenceValue,
+ &ui32UpdateValue);
+ /* This function can't fail */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /*
+ As server syncs always fence (we have a check in RGXCmcdHelperInitCmdCCB
+ which ensures the client is playing ball) the filling in of the fence
+ is unconditional.
+ */
+ eError = ServerSyncGetFWAddr(psCmdHelperData->papsServerSyncs[j], &ui32SyncAddr);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to read Server Sync FW address (%d)",
+ __func__, eError));
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ if (bFence)
+ {
+ PVR_ASSERT(pui8ServerFenceStart != NULL);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerFenceStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32FenceValue;
+ pui8ServerFenceStart += sizeof(RGXFWIF_UFO);
+
+#if defined(LINUX)
+ if (bTraceChecks)
+ {
+ trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ 1,
+ &psUFOPtr->puiAddrUFO,
+ &psUFOPtr->ui32Value);
+ }
+#endif
+ }
+
+ /* If there is an update then fill that in as well */
+ if (bUpdate)
+ {
+ if (bUnfencedUpdate)
+ {
+ PVR_ASSERT(pui8ServerUnfencedUpdateStart != NULL);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerUnfencedUpdateStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32UpdateValue;
+ pui8ServerUnfencedUpdateStart += sizeof(RGXFWIF_UFO);
+ }
+ else
+ {
+ /* fenced update */
+ PVR_ASSERT(pui8ServerUpdateStart != NULL);
+
+ psUFOPtr = (RGXFWIF_UFO *) pui8ServerUpdateStart;
+ psUFOPtr->puiAddrUFO.ui32Addr = ui32SyncAddr;
+ psUFOPtr->ui32Value = ui32UpdateValue;
+ pui8ServerUpdateStart += sizeof(RGXFWIF_UFO);
+ }
+#if defined(LINUX)
+ if (bTraceUpdates)
+ {
+ trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ 1,
+ &psUFOPtr->puiAddrUFO,
+ &psUFOPtr->ui32Value);
+ }
+#endif
+
+#if defined(NO_HARDWARE)
+ /*
+ There is no FW so the host has to do any Sync updates
+ (client sync updates are done in the client
+ */
+ PVRSRVServerSyncPrimSetKM(psCmdHelperData->papsServerSyncs[j], ui32UpdateValue);
+#endif
+ }
+ }
+
+#if defined(LINUX)
+ if (bTraceChecks)
+ {
+ trace_rogue_fence_checks(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ psCmdHelperData->ui32ClientFenceCount,
+ psCmdHelperData->pauiFenceUFOAddress,
+ psCmdHelperData->paui32FenceValue);
+ }
+ if (bTraceUpdates)
+ {
+ trace_rogue_fence_updates(psCmdHelperData->pszCommandName,
+ pcszDMName,
+ ui32CtxAddr,
+ psCmdHelperData->psClientCCB->ui32HostWriteOffset + ui32AllocSize,
+ psCmdHelperData->ui32ClientUpdateCount,
+ psCmdHelperData->pauiUpdateUFOAddress,
+ psCmdHelperData->paui32UpdateValue);
+ }
+#endif
+
+ if (psCmdHelperData->ui32ServerSyncCount)
+ {
+ /*
+ Do some sanity checks to ensure we did the pointer math right
+ */
+ if (pui8ServerFenceStart != NULL)
+ {
+ PVR_ASSERT(pui8ServerFenceStart ==
+ (psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize));
+ }
+
+ if (pui8ServerUpdateStart != NULL)
+ {
+ PVR_ASSERT(pui8ServerUpdateStart ==
+ psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize);
+ }
+
+ if (pui8ServerUnfencedUpdateStart != NULL)
+ {
+ PVR_ASSERT(pui8ServerUnfencedUpdateStart ==
+ psCmdHelperData->pui8StartPtr +
+ psCmdHelperData->ui32FenceCmdSize +
+ psCmdHelperData->ui32PreTimeStampCmdSize +
+ psCmdHelperData->ui32DMCmdSize +
+ psCmdHelperData->ui32RMWUFOCmdSize +
+ psCmdHelperData->ui32PostTimeStampCmdSize +
+ psCmdHelperData->ui32UpdateCmdSize +
+ psCmdHelperData->ui32UnfencedUpdateCmdSize);
+ }
+ }
+
+ /*
+ All the commands have been filled in so release the CCB space.
+ The FW still won't run this command until we kick it
+ */
+ PDUMPCOMMENTWITHFLAGS(psCmdHelperData->ui32PDumpFlags,
+ "%s Command Server Release on FWCtx %08x",
+ psCmdHelperData->pszCommandName, ui32CtxAddr);
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVUnlockServerSync();
+#endif
+
+ _RGXClientCCBDumpCommands(asCmdHelperData[0].psClientCCB,
+ asCmdHelperData[0].psClientCCB->ui32HostWriteOffset,
+ ui32AllocSize);
+
+ RGXReleaseCCB(asCmdHelperData[0].psClientCCB,
+ ui32AllocSize,
+ asCmdHelperData[0].ui32PDumpFlags);
+
+ asCmdHelperData[0].psClientCCB->bStateOpen = IMG_FALSE;
+}
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData)
+{
+ IMG_UINT32 ui32AllocSize = 0;
+ IMG_UINT32 i;
+
+ /*
+ Workout how much space we need for all the command(s)
+ */
+ for (i = 0; i < ui32CmdCount; i++)
+ {
+ ui32AllocSize +=
+ asCmdHelperData[i].ui32FenceCmdSize +
+ asCmdHelperData[i].ui32DMCmdSize +
+ asCmdHelperData[i].ui32UpdateCmdSize +
+ asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+ asCmdHelperData[i].ui32PreTimeStampCmdSize +
+ asCmdHelperData[i].ui32PostTimeStampCmdSize +
+ asCmdHelperData[i].ui32RMWUFOCmdSize;
+ }
+
+ return ui32AllocSize;
+}
+
+/* Work out how much of an offset there is to a specific command. */
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ IMG_UINT32 ui32Cmdindex)
+{
+ IMG_UINT32 ui32Offset = 0;
+ IMG_UINT32 i;
+
+ for (i = 0; i < ui32Cmdindex; i++)
+ {
+ ui32Offset +=
+ asCmdHelperData[i].ui32FenceCmdSize +
+ asCmdHelperData[i].ui32DMCmdSize +
+ asCmdHelperData[i].ui32UpdateCmdSize +
+ asCmdHelperData[i].ui32UnfencedUpdateCmdSize +
+ asCmdHelperData[i].ui32PreTimeStampCmdSize +
+ asCmdHelperData[i].ui32PostTimeStampCmdSize +
+ asCmdHelperData[i].ui32RMWUFOCmdSize;
+ }
+
+ return ui32Offset;
+}
+
+/* Returns the offset of the data master command from a write offset */
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData)
+{
+ return psCmdHelperData->ui32FenceCmdSize + psCmdHelperData->ui32PreTimeStampCmdSize;
+}
+
+static const char *_CCBCmdTypename(RGXFWIF_CCB_CMD_TYPE cmdType)
+{
+ switch (cmdType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_TA: return "TA";
+ case RGXFWIF_CCB_CMD_TYPE_3D: return "3D";
+ case RGXFWIF_CCB_CMD_TYPE_CDM: return "CDM";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_3D: return "TQ_3D";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_2D: return "TQ_2D";
+ case RGXFWIF_CCB_CMD_TYPE_3D_PR: return "3D_PR";
+ case RGXFWIF_CCB_CMD_TYPE_NULL: return "NULL";
+ case RGXFWIF_CCB_CMD_TYPE_SHG: return "SHG";
+ case RGXFWIF_CCB_CMD_TYPE_RTU: return "RTU";
+ case RGXFWIF_CCB_CMD_TYPE_RTU_FC: return "RTU_FC";
+ case RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP: return "PRE_TIMESTAMP";
+ case RGXFWIF_CCB_CMD_TYPE_TQ_TDM: return "TQ_TDM";
+
+ case RGXFWIF_CCB_CMD_TYPE_FENCE: return "FENCE";
+ case RGXFWIF_CCB_CMD_TYPE_UPDATE: return "UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE: return "RMW_UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR: return "FENCE_PR";
+ case RGXFWIF_CCB_CMD_TYPE_PRIORITY: return "PRIORITY";
+
+ case RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP: return "POST_TIMESTAMP";
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE: return "UNFENCED_UPDATE";
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE: return "UNFENCED_RMW_UPDATE";
+
+ case RGXFWIF_CCB_CMD_TYPE_PADDING: return "PADDING";
+
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return "INVALID";
+}
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl;
+ IMG_UINT32 ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psCurrentClientCCB == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB is NULL"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+ ui32SampledDpOff = psClientCCBCtrl->ui32DepOffset;
+ ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+ if (ui32SampledRdOff > psClientCCBCtrl->ui32WrapMask ||
+ ui32SampledDpOff > psClientCCBCtrl->ui32WrapMask ||
+ ui32SampledWrOff > psClientCCBCtrl->ui32WrapMask)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "CheckForStalledCCB: CCCB has invalid offset (ROFF=%d DOFF=%d WOFF=%d)",
+ ui32SampledRdOff, ui32SampledDpOff, ui32SampledWrOff));
+ return PVRSRV_ERROR_INVALID_OFFSET;
+ }
+
+ if (ui32SampledRdOff != ui32SampledWrOff &&
+ psCurrentClientCCB->ui32LastROff != psCurrentClientCCB->ui32LastWOff &&
+ ui32SampledRdOff == psCurrentClientCCB->ui32LastROff &&
+ (psCurrentClientCCB->ui32ByteCount - psCurrentClientCCB->ui32LastByteCount) < psCurrentClientCCB->ui32Size)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDevNode->pvDevice;
+
+ /* Only log a stalled CCB if GPU is idle (any state other than POW_ON is considered idle) */
+ if (psDevInfo->psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_ON)
+ {
+ static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+ "force";
+#else
+ "warn";
+#endif
+ /* Don't log this by default unless debugging since a higher up
+ * function will log the stalled condition. Helps avoid double
+ * messages in the log.
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s (%s): CCCB has not progressed (ROFF=%d DOFF=%d WOFF=%d) for \"%s\"",
+ __func__, pszStalledAction, ui32SampledRdOff,
+ ui32SampledDpOff, ui32SampledWrOff,
+ (IMG_PCHAR)&psCurrentClientCCB->szName));
+ eError = PVRSRV_ERROR_CCCB_STALLED;
+
+ {
+ IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+ RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+
+ /* Only try to recover a 'stalled' context (ie one waiting on a fence), as some work (eg compute) could
+ * take a long time to complete, during which time the CCB ptrs would not advance.
+ */
+ if ((psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE) ||
+ (psCommandHeader->eCmdType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+ {
+ /* Acquire the cCCB recovery lock */
+ OSLockAcquire(psDevInfo->hCCBRecoveryLock);
+
+ if (!psDevInfo->pvEarliestStalledClientCCB)
+ {
+
+ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32SubmissionOrdinal;
+ }
+ else
+ {
+ /* Check if this fence cmd header has an older submission stamp than the one we are currently considering unblocking
+ * (account for submission stamp wrap by checking diff is less than 0x80000000) - if it is older, then this becomes
+ * our preferred fence to be unblocked/
+ */
+ if ((psCommandHeader->ui32SubmissionOrdinal < psDevInfo->ui32OldestSubmissionOrdinal) &&
+ ((psDevInfo->ui32OldestSubmissionOrdinal - psCommandHeader->ui32SubmissionOrdinal) < 0x8000000))
+ {
+ psDevInfo->pvEarliestStalledClientCCB = (void*)psCurrentClientCCB;
+ psDevInfo->ui32OldestSubmissionOrdinal = psCommandHeader->ui32SubmissionOrdinal;
+ }
+ }
+
+ /* Release the cCCB recovery lock */
+ OSLockRelease(psDevInfo->hCCBRecoveryLock);
+ }
+ }
+ }
+ }
+
+ psCurrentClientCCB->ui32LastROff = ui32SampledRdOff;
+ psCurrentClientCCB->ui32LastWOff = ui32SampledWrOff;
+ psCurrentClientCCB->ui32LastByteCount = psCurrentClientCCB->ui32ByteCount;
+
+ return eError;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+#endif
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+ IMG_UINT32 ui32Offset = psClientCCBCtrl->ui32ReadOffset;
+ IMG_UINT32 ui32DepOffset = psClientCCBCtrl->ui32DepOffset;
+ IMG_UINT32 ui32EndOffset = psCurrentClientCCB->ui32HostWriteOffset;
+ IMG_UINT32 ui32WrapMask = psClientCCBCtrl->ui32WrapMask;
+ IMG_CHAR * pszState = "Ready";
+
+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X (%s)", sFWCommonContext.ui32Addr,
+ (IMG_PCHAR)&psCurrentClientCCB->szName);
+ if (ui32Offset == ui32EndOffset)
+ {
+ PVR_DUMPDEBUG_LOG(" `--<Empty>");
+ }
+
+ while (ui32Offset != ui32EndOffset)
+ {
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader = (RGXFWIF_CCB_CMD_HEADER*)(pui8ClientCCBBuff + ui32Offset);
+ IMG_UINT32 ui32NextOffset = (ui32Offset + psCmdHeader->ui32CmdSize + sizeof(RGXFWIF_CCB_CMD_HEADER)) & ui32WrapMask;
+ IMG_BOOL bLastCommand = (ui32NextOffset == ui32EndOffset)? IMG_TRUE: IMG_FALSE;
+ IMG_BOOL bLastUFO;
+ #define CCB_SYNC_INFO_LEN 80
+ IMG_CHAR pszSyncInfo[CCB_SYNC_INFO_LEN];
+ IMG_UINT32 ui32NoOfUpdates, i;
+ RGXFWIF_UFO *psUFOPtr;
+
+ ui32NoOfUpdates = psCmdHeader->ui32CmdSize / sizeof(RGXFWIF_UFO);
+ psUFOPtr = (RGXFWIF_UFO*)(pui8ClientCCBBuff + ui32Offset + sizeof(RGXFWIF_CCB_CMD_HEADER));
+ pszSyncInfo[0] = '\0';
+
+ if (ui32Offset == ui32DepOffset)
+ {
+ pszState = "Waiting";
+ }
+
+ PVR_DUMPDEBUG_LOG(" %s--%s %s @ %u Int=%u Ext=%u",
+ bLastCommand? "`": "|",
+ pszState, _CCBCmdTypename(psCmdHeader->eCmdType),
+ ui32Offset, psCmdHeader->ui32IntJobRef, psCmdHeader->ui32ExtJobRef
+ );
+
+ /* switch on type and write checks and updates */
+ switch (psCmdHeader->eCmdType)
+ {
+ case RGXFWIF_CCB_CMD_TYPE_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_FENCE:
+ case RGXFWIF_CCB_CMD_TYPE_FENCE_PR:
+ {
+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+ {
+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+ {
+ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+ }
+ else
+ {
+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+ }
+#endif
+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val=0x%08x %s",
+ bLastCommand? " ": "|",
+ bLastUFO? "`": "|",
+ psUFOPtr->puiAddrUFO.ui32Addr, psUFOPtr->ui32Value,
+ pszSyncInfo
+ );
+ }
+ break;
+ }
+
+ case RGXFWIF_CCB_CMD_TYPE_RMW_UPDATE:
+ case RGXFWIF_CCB_CMD_TYPE_UNFENCED_RMW_UPDATE:
+ {
+ for (i = 0; i < ui32NoOfUpdates; i++, psUFOPtr++)
+ {
+ bLastUFO = (ui32NoOfUpdates-1 == i)? IMG_TRUE: IMG_FALSE;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT(psUFOPtr))
+ {
+ SyncCheckpointRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+ }
+ else
+ {
+ SyncRecordLookup(psDeviceNode, psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo, CCB_SYNC_INFO_LEN);
+ }
+#endif
+ PVR_DUMPDEBUG_LOG(" %s %s--Addr:0x%08x Val++ %s",
+ bLastCommand? " ": "|",
+ bLastUFO? "`": "|",
+ psUFOPtr->puiAddrUFO.ui32Addr,
+ pszSyncInfo
+ );
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ ui32Offset = ui32NextOffset;
+ }
+
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP) */
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psCurrentClientCCB->psClientCCBCtrl;
+ IMG_UINT8 *pui8ClientCCBBuff = psCurrentClientCCB->pui8ClientCCB;
+ volatile IMG_UINT8 *pui8Ptr;
+ IMG_UINT32 ui32SampledRdOff = psClientCCBCtrl->ui32ReadOffset;
+ IMG_UINT32 ui32SampledDepOff = psClientCCBCtrl->ui32DepOffset;
+ IMG_UINT32 ui32SampledWrOff = psCurrentClientCCB->ui32HostWriteOffset;
+
+ pui8Ptr = pui8ClientCCBBuff + ui32SampledRdOff;
+
+ if ((ui32SampledRdOff == ui32SampledDepOff) &&
+ (ui32SampledRdOff != ui32SampledWrOff))
+ {
+ volatile RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8ClientCCBBuff + ui32SampledRdOff);
+ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType;
+ volatile IMG_UINT8 *pui8Ptr = (IMG_UINT8 *)psCommandHeader;
+
+ /* CCB is stalled on a fence... */
+ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+ {
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVRSRV_RGXDEV_INFO *psDevInfo = FWCommonContextGetRGXDevInfo(psCurrentClientCCB->psServerCommonContext);
+ IMG_UINT32 ui32Val;
+#endif
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+ IMG_UINT32 jj;
+
+ /* Display details of the fence object on which the context is pending */
+ PVR_DUMPDEBUG_LOG("FWCtx 0x%08X @ %d (%s) pending on %s:",
+ sFWCommonContext.ui32Addr,
+ ui32SampledRdOff,
+ (IMG_PCHAR)&psCurrentClientCCB->szName,
+ _CCBCmdTypename(eCommandType));
+ for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+ {
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+ ui32Val = 0;
+ RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value, ui32Val);
+#endif
+ }
+
+ /* Advance psCommandHeader past the FENCE to the next command header (this will be the TA/3D command that is fenced) */
+ pui8Ptr = (IMG_UINT8 *)psUFOPtr + psCommandHeader->ui32CmdSize;
+ psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+ if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X fenced command is of type %s",sFWCommonContext.ui32Addr, _CCBCmdTypename(psCommandHeader->eCmdType));
+ /* Advance psCommandHeader past the TA/3D to the next command header (this will possibly be an UPDATE) */
+ pui8Ptr += sizeof(*psCommandHeader) + psCommandHeader->ui32CmdSize;
+ psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)pui8Ptr;
+ /* If the next command is an update, display details of that so we can see what would then become unblocked */
+ if( (uintptr_t)psCommandHeader != ((uintptr_t)pui8ClientCCBBuff + ui32SampledWrOff))
+ {
+ eCommandType = psCommandHeader->eCmdType;
+
+ if (eCommandType == RGXFWIF_CCB_CMD_TYPE_UPDATE)
+ {
+ psUFOPtr = (RGXFWIF_UFO *)((IMG_UINT8 *)psCommandHeader + sizeof(*psCommandHeader));
+ PVR_DUMPDEBUG_LOG(" preventing %s:",_CCBCmdTypename(eCommandType));
+ for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+ {
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value=0x%08x",psUFOPtr[jj].puiAddrUFO.ui32Addr, psUFOPtr[jj].ui32Value);
+#else
+ ui32Val = 0;
+ RGXReadWithSP(psDevInfo, psUFOPtr[jj].puiAddrUFO.ui32Addr, &ui32Val);
+ PVR_DUMPDEBUG_LOG(" Addr:0x%08x Value(Host)=0x%08x Value(FW)=0x%08x",
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value,
+ ui32Val);
+#endif
+ }
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" FWCtx 0x%08X has no further commands",sFWCommonContext.ui32Addr);
+ }
+ }
+ }
+}
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGX_CLIENT_CCB *psStalledClientCCB;
+
+ PVR_ASSERT(psDevInfo);
+
+ psStalledClientCCB = (RGX_CLIENT_CCB *)psDevInfo->pvEarliestStalledClientCCB;
+
+ if (psStalledClientCCB)
+ {
+ volatile RGXFWIF_CCCB_CTL *psClientCCBCtrl = psStalledClientCCB->psClientCCBCtrl;
+ IMG_UINT32 ui32SampledReadOffset = psClientCCBCtrl->ui32ReadOffset;
+ IMG_UINT8 *pui8Ptr = (psStalledClientCCB->pui8ClientCCB + ui32SampledReadOffset);
+ RGXFWIF_CCB_CMD_HEADER *psCommandHeader = (RGXFWIF_CCB_CMD_HEADER *)(pui8Ptr);
+ RGXFWIF_CCB_CMD_TYPE eCommandType = psCommandHeader->eCmdType;
+
+ if ((eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE) || (eCommandType == RGXFWIF_CCB_CMD_TYPE_FENCE_PR))
+ {
+ RGXFWIF_UFO *psUFOPtr = (RGXFWIF_UFO *)(pui8Ptr + sizeof(*psCommandHeader));
+ IMG_UINT32 jj;
+ IMG_UINT32 ui32NumUnsignalledUFOs = 0;
+ IMG_UINT32 ui32UnsignalledUFOVaddrs[PVRSRV_MAX_SYNC_PRIMS];
+
+ PVR_LOG(("Fence found on context 0x%x '%s' has %d UFOs", FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext).ui32Addr, psStalledClientCCB->szName, (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO))));
+ for (jj=0; jj<psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO); jj++)
+ {
+ if (PVRSRV_UFO_IS_SYNC_CHECKPOINT((RGXFWIF_UFO *)&psUFOPtr[jj]))
+ {
+ IMG_UINT32 ui32ReadValue = SyncCheckpointStateFromUFO(psDevInfo->psDeviceNode,
+ psUFOPtr[jj].puiAddrUFO.ui32Addr);
+ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x (currently 0x%x)", jj+1,
+ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value,
+ ui32ReadValue));
+ /* If fence is unmet, dump debug info on it */
+ if (ui32ReadValue != psUFOPtr[jj].ui32Value)
+ {
+ /* Add to our list to pass to pvr_sync */
+ ui32UnsignalledUFOVaddrs[ui32NumUnsignalledUFOs] = psUFOPtr[jj].puiAddrUFO.ui32Addr;
+ ui32NumUnsignalledUFOs++;
+ }
+ }
+ else
+ {
+ PVR_LOG((" %d/%d FWAddr 0x%x requires 0x%x", jj+1,
+ (IMG_UINT32)(psCommandHeader->ui32CmdSize/sizeof(RGXFWIF_UFO)),
+ psUFOPtr[jj].puiAddrUFO.ui32Addr,
+ psUFOPtr[jj].ui32Value));
+ }
+ }
+#if defined(SUPPORT_NATIVE_FENCE_SYNC) || defined (SUPPORT_FALLBACK_FENCE_SYNC)
+ if (ui32NumUnsignalledUFOs > 0)
+ {
+ IMG_UINT32 ui32NumSyncsOwned;
+ PVRSRV_ERROR eErr = SyncCheckpointDumpInfoOnStalledUFOs(ui32NumUnsignalledUFOs, &ui32UnsignalledUFOVaddrs[0], &ui32NumSyncsOwned);
+
+ PVR_LOG_IF_ERROR(eErr, "SyncCheckpointDumpInfoOnStalledUFOs() call failed.");
+ PVR_LOG(("%d sync checkpoint%s owned by pvr_sync in stalled context", ui32NumSyncsOwned, ui32NumSyncsOwned==1 ? "" : "s"));
+ }
+#endif
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+ if (ui32NumUnsignalledUFOs > 0)
+ {
+ RGXFWIF_KCCB_CMD sSignalFencesCmd;
+
+ sSignalFencesCmd.eCmdType = RGXFWIF_KCCB_CMD_FORCE_UPDATE;
+ sSignalFencesCmd.eDM = RGXFWIF_DM_GP;
+ sSignalFencesCmd.uCmdData.sForceUpdateData.psContext = FWCommonContextGetFWAddress(psStalledClientCCB->psServerCommonContext);
+ sSignalFencesCmd.uCmdData.sForceUpdateData.ui32CCBFenceOffset = ui32SampledReadOffset;
+
+ PVR_LOG(("Forced update command issued for FWCtx 0x%08X", sSignalFencesCmd.uCmdData.sForceUpdateData.psContext.ui32Addr));
+
+ RGXScheduleCommand(FWCommonContextGetRGXDevInfo(psStalledClientCCB->psServerCommonContext),
+ RGXFWIF_DM_GP,
+ &sSignalFencesCmd,
+ sizeof(sSignalFencesCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+ }
+ psDevInfo->pvEarliestStalledClientCCB = NULL;
+ }
+}
+
+/******************************************************************************
+ End of file (rgxccb.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxccb.h b/drivers/gpu/drm/img-rogue/1.10/rgxccb.h
new file mode 100644
index 00000000000000..48d7f0bd1cef69
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxccb.h
@@ -0,0 +1,261 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Circular Command Buffer functionality.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Circular Command Buffer functionality.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCCB_H__)
+#define __RGXCCB_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgx_fwif_shared.h"
+#include "rgxdebug.h"
+#include "rgxdefs_km.h"
+#include "pvr_notifier.h"
+
+#define MAX_CLIENT_CCB_NAME 30
+#define SYNC_FLAG_MASK_ALL IMG_UINT32_MAX
+
+typedef struct _RGX_CLIENT_CCB_ RGX_CLIENT_CCB;
+
+/*
+ This structure is declared here as it's allocated on the heap by
+ the callers
+*/
+
+typedef struct _RGX_CCB_CMD_HELPER_DATA_ {
+ /* Data setup at command init time */
+ RGX_CLIENT_CCB *psClientCCB;
+ IMG_CHAR *pszCommandName;
+ IMG_UINT32 ui32PDumpFlags;
+
+ IMG_UINT32 ui32ClientFenceCount;
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 ui32ClientUpdateCount;
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress;
+ IMG_UINT32 *paui32UpdateValue;
+
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_UINT32 *paui32ServerSyncFlags;
+ IMG_UINT32 ui32ServerSyncFlagMask;
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs;
+
+ RGXFWIF_CCB_CMD_TYPE eType;
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT8 *pui8DMCmd;
+ IMG_UINT32 ui32FenceCmdSize;
+ IMG_UINT32 ui32DMCmdSize;
+ IMG_UINT32 ui32UpdateCmdSize;
+ IMG_UINT32 ui32UnfencedUpdateCmdSize;
+
+ /* timestamp commands */
+ PRGXFWIF_TIMESTAMP_ADDR pPreTimestampAddr;
+ IMG_UINT32 ui32PreTimeStampCmdSize;
+ PRGXFWIF_TIMESTAMP_ADDR pPostTimestampAddr;
+ IMG_UINT32 ui32PostTimeStampCmdSize;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+ IMG_UINT32 ui32RMWUFOCmdSize;
+
+ /* Data setup at command acquire time */
+ IMG_UINT8 *pui8StartPtr;
+ IMG_UINT8 *pui8ServerUpdateStart;
+ IMG_UINT8 *pui8ServerUnfencedUpdateStart;
+ IMG_UINT8 *pui8ServerFenceStart;
+ IMG_UINT32 ui32ServerFenceCount;
+ IMG_UINT32 ui32ServerUpdateCount;
+ IMG_UINT32 ui32ServerUnfencedUpdateCount;
+
+ /* Job reference fields */
+ IMG_UINT32 ui32ExtJobRef;
+ IMG_UINT32 ui32IntJobRef;
+
+ /* Workload kick information */
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData;
+
+ /* Robustness reset reason address */
+ IMG_DEV_VIRTADDR sRobustnessResetReason;
+} RGX_CCB_CMD_HELPER_DATA;
+
+#define PADDING_COMMAND_SIZE (sizeof(RGXFWIF_CCB_CMD_HEADER))
+
+
+#define RGX_CCB_REQUESTORS(TYPE) \
+ /* for debugging purposes */ TYPE(UNDEF) \
+ TYPE(TA) \
+ TYPE(3D) \
+ TYPE(CDM) \
+ TYPE(SH) \
+ TYPE(RS) \
+ TYPE(TQ_3D) \
+ TYPE(TQ_2D) \
+ TYPE(TQ_TDM) \
+ TYPE(KICKSYNC) \
+ /* Only used for validating the number of entries in this list */ TYPE(FIXED_COUNT) \
+ TYPE(FC0) \
+ TYPE(FC1) \
+ TYPE(FC2) \
+ TYPE(FC3) \
+
+/* Forms an enum constant for each type present in RGX_CCB_REQUESTORS list. The enum is mainly used as
+ an index to the aszCCBRequestors table defined in rgxccb.c. The total number of enums must adhere
+ to the following build assert.
+*/
+typedef enum _RGX_CCB_REQUESTOR_TYPE_
+{
+#define CONSTRUCT_ENUM(req) REQ_TYPE_##req,
+ RGX_CCB_REQUESTORS (CONSTRUCT_ENUM)
+#undef CONSTRUCT_ENUM
+
+ /* should always be at the end */
+ REQ_TYPE_TOTAL_COUNT,
+} RGX_CCB_REQUESTOR_TYPE;
+
+/* The number of enum constants in the above table is always equal to those provided in the RGX_CCB_REQUESTORS X macro list.
+ In an event of change in value of DPX_MAX_RAY_CONTEXTS to say 'n', appropriate entry/entries up to FC[n-1] must be added to
+ the RGX_CCB_REQUESTORS list.
+*/
+static_assert(REQ_TYPE_TOTAL_COUNT == REQ_TYPE_FIXED_COUNT + DPX_MAX_RAY_CONTEXTS + 1,
+ "Mismatch between DPX_MAX_RAY_CONTEXTS and RGX_CCB_REQUESTOR_TYPE enum");
+
+/* Tuple describing the columns of the following table */
+typedef enum _RGX_CCB_REQUESTOR_TUPLE_
+{
+ REQ_RGX_FW_CLIENT_CCB_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCB for this requestor */
+ REQ_RGX_FW_CLIENT_CCB_CONTROL_STRING, /* Index to comment to be dumped in DevMemAllocs when allocating FirmwareClientCCBControl for this requestor */
+ REQ_PDUMP_COMMENT, /* Index to comment to be dumped in PDUMPs */
+
+ /* should always be at the end */
+ REQ_TUPLE_CARDINALITY,
+} RGX_CCB_REQUESTOR_TUPLE;
+
+/* Table containing an array of strings for each requestor type in the list of RGX_CCB_REQUESTORS. In addition to its use in
+ this module (rgxccb.c), this table is also used to access string to be dumped in PDUMP comments, hence, marking it extern for
+ use in other modules.
+*/
+extern IMG_CHAR *const aszCCBRequestors[][REQ_TUPLE_CARDINALITY];
+
+PVRSRV_ERROR RGXCCBPDumpDrainCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXCreateCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CCBSizeLog2,
+ CONNECTION_DATA *psConnectionData,
+ RGX_CCB_REQUESTOR_TYPE eCCBRequestor,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ RGX_CLIENT_CCB **ppsClientCCB,
+ DEVMEM_MEMDESC **ppsClientCCBMemDesc,
+ DEVMEM_MEMDESC **ppsClientCCBCtlMemDesc);
+
+void RGXDestroyCCB(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXAcquireCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ void **ppvBufferSpace,
+ IMG_UINT32 ui32PDumpFlags);
+
+void RGXReleaseCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+IMG_UINT32 RGXGetHostWriteOffsetCCB(RGX_CLIENT_CCB *psClientCCB);
+
+PVRSRV_ERROR RGXCmdHelperInitCmdCCB(RGX_CLIENT_CCB *psClientCCB,
+ IMG_UINT32 ui32ClientFenceCount,
+ PRGXFWIF_UFO_ADDR *pauiFenceUFOAddress,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ PRGXFWIF_UFO_ADDR *pauiUpdateUFOAddress,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ IMG_UINT32 ui32ServerSyncFlagMask,
+ SERVER_SYNC_PRIMITIVE **papsServerSyncs,
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR *ppPostAddr,
+ PRGXFWIF_UFO_ADDR *ppRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE eType,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT32 ui32PDumpFlags,
+ RGXFWIF_WORKEST_KICK_DATA *psWorkEstKickData,
+ IMG_CHAR *pszCommandName,
+ IMG_BOOL bCCBStateOpen,
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelperData,
+ IMG_DEV_VIRTADDR sRobustnessResetReason);
+
+PVRSRV_ERROR RGXCmdHelperAcquireCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+void RGXCmdHelperReleaseCmdCCB(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ const IMG_CHAR *pcszDMName,
+ IMG_UINT32 ui32CtxAddr);
+
+IMG_UINT32 RGXCmdHelperGetCommandSize(IMG_UINT32 ui32CmdCount,
+ RGX_CCB_CMD_HELPER_DATA *asCmdHelperData);
+
+IMG_UINT32 RGXCmdHelperGetCommandOffset(RGX_CCB_CMD_HELPER_DATA *asCmdHelperData,
+ IMG_UINT32 ui32Cmdindex);
+
+IMG_UINT32 RGXCmdHelperGetDMCommandHeaderOffset(RGX_CCB_CMD_HELPER_DATA *psCmdHelperData);
+
+void DumpStalledCCBCommand(PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+void DumpCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext,
+ RGX_CLIENT_CCB *psCurrentClientCCB,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#endif
+
+PVRSRV_ERROR CheckForStalledCCB(PVRSRV_DEVICE_NODE *psDevNode, RGX_CLIENT_CCB *psCurrentClientCCB, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledContextInfo(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* __RGXCCB_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxcompute.c b/drivers/gpu/drm/img-rogue/1.10/rgxcompute.c
new file mode 100644
index 00000000000000..b7c7026662b9af
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxcompute.c
@@ -0,0 +1,1037 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Compute routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Compute routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvkm.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxcompute.h"
+#include "rgx_bvnc_defs_km.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_memallocflags.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_CMP_UFO_DUMP 0
+
+//#define CMP_CHECKPOINT_DEBUG 1
+
+#if defined(CMP_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_COMPUTE_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psFWComputeContextStateMemDesc;
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hIntJobRef;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pbyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sServicesSignalAddr,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext;
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Prepare cleanup struct */
+ *ppsComputeContext = NULL;
+ psComputeContext = OSAllocZMem(sizeof(*psComputeContext));
+ if (psComputeContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psComputeContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_createlock;
+ }
+#endif
+
+ psComputeContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psComputeContext->psSync,
+ "compute cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware compute context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_COMPUTECTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwComputeContextState",
+ &psComputeContext->psFWComputeContextStateMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_contextsuspendalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psComputeContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psComputeContext->psFWFrameworkMemDesc,
+ pbyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateComputeContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psComputeContext->psFWFrameworkMemDesc;
+
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+ RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) == 2 &&
+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING))
+ {
+ sInfo.psResumeSignalAddr = &sServicesSignalAddr;
+ }else
+ {
+ PVR_UNREFERENCED_PARAMETER(sServicesSignalAddr);
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_CDM,
+ RGXFWIF_DM_CDM,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ psComputeContext->psFWComputeContextStateMemDesc,
+ RGX_CDM_CCB_SIZE_LOG2,
+ ui32Priority,
+ &sInfo,
+ &psComputeContext->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ SyncAddrListInit(&psComputeContext->sSyncAddrListFence);
+ SyncAddrListInit(&psComputeContext->sSyncAddrListUpdate);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sComputeCtxtListHead), &(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+ }
+
+ *ppsComputeContext = psComputeContext;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+fail_contextsuspendalloc:
+ SyncPrimFree(psComputeContext->psSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psComputeContext->hLock);
+fail_createlock:
+#endif
+ OSFreeMem(psComputeContext);
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psComputeContext->psDeviceNode,
+ psComputeContext->psServerCommonContext,
+ psComputeContext->psSync,
+ RGXFWIF_DM_CDM,
+ PDUMP_FLAGS_NONE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+ OSWRLockAcquireWrite(psDevInfo->hComputeCtxListLock);
+ dllist_remove_node(&(psComputeContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hComputeCtxListLock);
+
+ FWCommonContextFree(psComputeContext->psServerCommonContext);
+ DevmemFwFree(psDevInfo, psComputeContext->psFWFrameworkMemDesc);
+ DevmemFwFree(psDevInfo, psComputeContext->psFWComputeContextStateMemDesc);
+ SyncPrimFree(psComputeContext->psSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psComputeContext->hLock);
+#endif
+ OSFreeMem(psComputeContext);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR pszUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason)
+{
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32CDMCmdOffset = 0;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32FWCtx;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ IMG_UINT32 *paui32IntFenceValue = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (iUpdateTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (%d) in non-supporting driver",
+ __func__, iUpdateTimeline));
+ }
+ if (iCheckFence >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ pszUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psComputeContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psComputeContext->hIntJobRef);
+
+ ui32IntClientFenceCount = ui32ClientFenceCount;
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ paui32IntFenceValue = paui32ClientFenceValue;
+
+ ui32IntClientUpdateCount = ui32ClientUpdateCount;
+
+ eError = SyncAddrListPopulate(&psComputeContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on CDM) must fence", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ goto err_populate_sync_addr_list;
+ }
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psComputeContext->psDeviceNode->hSyncCheckpointContext=<%p>)...", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psComputeContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psComputeContext->psDeviceNode,
+ pszUpdateFenceName,
+ iUpdateTimeline,
+ psComputeContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __FUNCTION__, eError));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __FUNCTION__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u, psFenceTimelineUpdateSync=<%p>", __FUNCTION__, ui32IntClientUpdateCount, (void*)psFenceTimelineUpdateSync));
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __FUNCTION__, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: append the timeline sync prim addr <%p> to the compute context update list", __FUNCTION__, (void*)psFenceTimelineUpdateSync));
+ /* Now append the timeline sync prim addr to the compute context update list */
+ SyncAddrListAppendSyncPrim(&psComputeContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32IntClientUpdateCount=%u:", __FUNCTION__, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Compute CDM Fence (&psComputeContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psComputeContext->sSyncAddrListFence));
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psComputeContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)paui32IntUpdateValue;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Dumping %d update values (paui32IntUpdateValue=<%p>)...", __FUNCTION__, ui32IntClientUpdateCount, (void*)paui32IntUpdateValue));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: paui32IntUpdateValue[%d] = <%p>", __FUNCTION__, iii, (void*)pui32Tmp));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: *paui32IntUpdateValue[%d] = 0x%x", __FUNCTION__, iii, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Compute CDM Update (&psComputeContext->sSyncAddrListUpdate=<%p>, psUpdateSyncCheckpoint=<%p>)...", __FUNCTION__, (void*)&psComputeContext->sSyncAddrListUpdate , (void*)psUpdateSyncCheckpoint));
+ SyncAddrListAppendCheckpoints(&psComputeContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psComputeContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(CMP_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress=<%p>, pui32Tmp=<%p>, ui32IntClientUpdateCount=%u", __FUNCTION__, (void*)pauiIntUpdateUFOAddress, (void*)pui32Tmp, ui32IntClientUpdateCount));
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_CMP_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Compute (CDM) fence/updates syncs...", __FUNCTION__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) fence syncs (&psComputeContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psComputeContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Compute (CDM) update syncs (&psComputeContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psComputeContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psComputeContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext),
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_CDM,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "Compute",
+ bCCBStateOpen,
+ asCmdHelperData,
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdinit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData),
+ asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdaquire;
+ }
+
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ if (eError == PVRSRV_OK)
+ {
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+
+ ui32CDMCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1, asCmdHelperData, "CDM", FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr);
+ }
+
+ /* Construct the kernel compute CCB command. */
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psComputeContext->psServerCommonContext));
+ sCmpKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_CDM,
+ sCmpKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32CDMCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psComputeContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_CDM,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ /*
+ * Submit the compute command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sCmpKCCBCmd,
+ sizeof(sCmpKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickCDMKM failed to schedule kernel CCB command. (0x%x)", eError));
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psComputeContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_CDM);
+#endif
+ }
+ /*
+ * Now check eError (which may have returned an error from our earlier call
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_cmdaquire;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ *piUpdateFence = iUpdateFence;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psComputeContext->hLock);
+#endif
+
+ return PVRSRV_OK;
+
+fail_cmdinit:
+fail_cmdaquire:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psComputeContext->psDeviceNode, &psComputeContext->sSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+ if(iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+fail_resolve_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+err_populate_sync_addr_list:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psComputeContext->hLock);
+#endif
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ RGXFWIF_KCCB_CMD sFlushCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit Compute flush");
+#endif
+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_FALSE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_TRUE;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.eDM = RGXFWIF_DM_CDM;
+ sFlushCmd.uCmdData.sSLCFlushInvalData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psComputeContext->hLock);
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sFlushCmd,
+ sizeof(sFlushCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ psComputeContext->psSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFlushComputeDataKM: Compute flush aborted with error (%u)", eError));
+ }
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psComputeContext->hLock);
+#endif
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psComputeContext->psDeviceNode->pvDevice;
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+ 2 == RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT))
+ {
+
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psComputeContext->hLock);
+#endif
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psComputeContext->psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psComputeContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_CDM,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psComputeContext->hLock);
+#endif
+ return eError;
+ }else
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psComputeContext->hLock);
+#endif
+
+ eError = ContextSetPriority(psComputeContext->psServerCommonContext,
+ psConnection,
+ psComputeContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_CDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the compute context (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psComputeContext->hLock);
+#endif
+ return eError;
+}
+
+/*
+ * PVRSRVRGXGetLastComputeContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ PVR_ASSERT(psComputeContext != NULL);
+ PVR_ASSERT(peLastResetReason != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ *peLastResetReason = FWCommonContextGetLastResetReason(psComputeContext->psServerCommonContext,
+ pui32LastResetJobRef);
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+ DumpStalledFWCommonContext(psCurrentServerComputeCtx->psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32ContextBitMask = 0;
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hComputeCtxListLock);
+ dllist_foreach_node(&psDevInfo->sComputeCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMPUTE_CONTEXT *psCurrentServerComputeCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMPUTE_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(psCurrentServerComputeCtx->psServerCommonContext, RGX_KICK_TYPE_DM_CDM)
+ == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_CDM;
+ }
+ }
+ OSWRLockReleaseRead(psDevInfo->hComputeCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxcompute.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxcompute.h b/drivers/gpu/drm/img-rogue/1.10/rgxcompute.h
new file mode 100644
index 00000000000000..d5a4d038504158
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxcompute.h
@@ -0,0 +1,178 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX compute functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX compute functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXCOMPUTE_H__)
+#define __RGXCOMPUTE_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+
+
+typedef struct _RGX_SERVER_COMPUTE_CONTEXT_ RGX_SERVER_COMPUTE_CONTEXT;
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXCreateComputeContextKM
+
+ @Description
+
+
+ @Input pvDeviceNode
+ @Input psCmpCCBMemDesc -
+ @Input psCmpCCBCtlMemDesc -
+ @Output ppsFWComputeContextMemDesc -
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateComputeContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pbyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sServicesSignalAddr,
+ RGX_SERVER_COMPUTE_CONTEXT **ppsComputeContext);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDestroyComputeContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyComputeContext
+
+ @Input psCleanupData -
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyComputeContextKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXKickCDMKM
+
+ @Description
+ Server-side implementation of RGXKickCDM
+
+ @Input psDeviceNode - RGX Device node
+ @Input psFWComputeContextMemDesc - Mem desc for firmware compute context
+ @Input ui32cCCBWoffUpdate - New fw Woff for the client CDM CCB
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickCDMKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR pcszUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXFlushComputeDataKM
+
+ @Description
+ Server-side implementation of RGXFlushComputeData
+
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFlushComputeDataKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXNotifyComputeWriteOffsetUpdateKM
+ @Description Server-side implementation of RGXNotifyComputeWriteOffsetUpdate
+
+ @Input psDeviceNode - RGX Device node
+ @Input psComputeContext - Compute context to flush
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext);
+
+PVRSRV_ERROR PVRSRVRGXSetComputeContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastComputeContextResetReasonKM(RGX_SERVER_COMPUTE_CONTEXT *psComputeContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+/* Debug - check if compute context is waiting on a fence */
+void CheckForStalledComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client compute contexts are stalled */
+IMG_UINT32 CheckForStalledClientComputeCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXCOMPUTE_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxdebug.c b/drivers/gpu/drm/img-rogue/1.10/rgxdebug.c
new file mode 100644
index 00000000000000..b75a57a7c1195a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxdebug.c
@@ -0,0 +1,5269 @@
+/*************************************************************************/ /*!
+@File
+@Title Rgx debug information
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX debugging functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "cache_km.h"
+#include "osfunc.h"
+
+#include "lists.h"
+
+#include "rgxdebug.h"
+#include "pvrversion.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+#include "rgxutils.h"
+#include "tlstream.h"
+#include "rgxfwutils.h"
+#include "pvrsrv.h"
+#include "services_km.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_utils.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_sf.h"
+#include "rgxfw_log_helper.h"
+#include "fwtrace_string.h"
+
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxtdmtransfer.h"
+#include "rgxray.h"
+#include "rgxtimecorr.h"
+#include "rgx_options.h"
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+#include "devicemem_history_server.h"
+#endif
+#include "rgx_bvnc_defs_km.h"
+#define PVR_DUMP_DRIVER_INFO(x, y) \
+ PVR_DUMPDEBUG_LOG("%s info: %d.%d @ %8d (%s) build options: 0x%08x ", \
+ (x), \
+ PVRVERSION_UNPACK_MAJ((y).ui32BuildVersion), \
+ PVRVERSION_UNPACK_MIN((y).ui32BuildVersion), \
+ (y).ui32BuildRevision, \
+ (BUILD_TYPE_DEBUG == (y).ui32BuildType) ? "debug":"release", \
+ (y).ui32BuildOptions);
+
+#define PVR_DUMP_FIRMWARE_INFO(x) \
+ PVR_DUMPDEBUG_LOG("FW info: %d.%d @ %8d (%s) build options: 0x%08x ", \
+ PVRVERSION_UNPACK_MAJ((x).ui32DDKVersion), \
+ PVRVERSION_UNPACK_MIN((x).ui32DDKVersion), \
+ (x).ui32DDKBuild, \
+ ((x).ui32BuildOptions & OPTIONS_DEBUG_MASK) ? "debug":"release",\
+ (x).ui32BuildOptions);
+
+
+#define RGX_DEBUG_STR_SIZE (150)
+#define MAX_FW_DESCRIPTION_LENGTH (500u)
+
+#define RGX_CR_BIF_CAT_BASE0 (0x1200U)
+#define RGX_CR_BIF_CAT_BASE1 (0x1208U)
+
+#define RGX_CR_BIF_CAT_BASEN(n) \
+ RGX_CR_BIF_CAT_BASE0 + \
+ ((RGX_CR_BIF_CAT_BASE1 - RGX_CR_BIF_CAT_BASE0) * n)
+
+
+#define RGXDBG_BIF_IDS \
+ X(BIF0)\
+ X(BIF1)\
+ X(TEXAS_BIF)\
+ X(DPX_BIF)
+
+#define RGXDBG_SIDEBAND_TYPES \
+ X(META)\
+ X(TLA)\
+ X(DMA)\
+ X(VDMM)\
+ X(CDM)\
+ X(IPP)\
+ X(PM)\
+ X(TILING)\
+ X(MCU)\
+ X(PDS)\
+ X(PBE)\
+ X(VDMS)\
+ X(IPF)\
+ X(ISP)\
+ X(TPF)\
+ X(USCS)\
+ X(PPP)\
+ X(VCE)\
+ X(TPF_CPF)\
+ X(IPF_CPF)\
+ X(FBCDC)
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+ RGXDBG_BIF_IDS
+#undef X
+} RGXDBG_BIF_ID;
+
+typedef enum
+{
+#define X(NAME) RGXDBG_##NAME,
+ RGXDBG_SIDEBAND_TYPES
+#undef X
+} RGXDBG_SIDEBAND_TYPE;
+
+static const IMG_CHAR *const pszPowStateName[] =
+{
+#define X(NAME) #NAME,
+ RGXFWIF_POW_STATES
+#undef X
+};
+
+static const IMG_CHAR *const pszBIFNames[] =
+{
+#define X(NAME) #NAME,
+ RGXDBG_BIF_IDS
+#undef X
+};
+
+#if !defined(SUPPORT_PAGE_FAULT_DEBUG)
+static IMG_UINT32 gui32FaultIndex = 0;
+static MMU_FAULT_DATA gsMMUFaultData[RGXFWIF_HWINFO_MAX];
+#endif
+
+typedef struct _IMG_FLAGS2DESC_
+{
+ IMG_UINT32 uiFlag;
+ IMG_CHAR *pszLabel;
+} IMG_FLAGS2DESC;
+
+static const IMG_FLAGS2DESC asCSW2Description[] =
+{
+ {RGXFWIF_INICFG_CTXSWITCH_TA_EN, "TA; "},
+ {RGXFWIF_INICFG_CTXSWITCH_3D_EN, "3D; "},
+ {RGXFWIF_INICFG_CTXSWITCH_CDM_EN, "CDM; "},
+ {RGXFWIF_INICFG_CTXSWITCH_MODE_RAND, "Random; "},
+ {RGXFWIF_INICFG_CTXSWITCH_SRESET_EN, "SoftReset; "},
+ {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX, "VDM CS INDEX mode; "},
+ {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE, "VDM CS INSTANCE mode; "},
+ {RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST, "VDM CS LIST mode; "},
+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_FAST, "Fast CSW profile; "},
+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_MEDIUM, "Medium CSW profile; "},
+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_SLOW, "Slow CSW profile; "},
+ {RGXFWIF_INICFG_CTXSWITCH_PROFILE_NODELAY, "No Delay CSW profile; "}
+};
+
+static const IMG_FLAGS2DESC asMisc2Description[] =
+{
+ {RGXFWIF_INICFG_USE_EXTENDED, "Use extended; "},
+ {RGXFWIF_INICFG_POW_RASCALDUST, "Power Rascal/Dust; "},
+ {RGXFWIF_INICFG_HWPERF_EN, "HwPerf EN; "},
+ {RGXFWIF_INICFG_HWR_EN, "HWR EN; "},
+ {RGXFWIF_INICFG_CHECK_MLIST_EN, "Check MList; "},
+ {RGXFWIF_INICFG_DISABLE_CLKGATING_EN, "ClockGating Off; "},
+ {RGXFWIF_INICFG_POLL_COUNTERS_EN, "Poll Counters; "},
+ {RGXFWIF_INICFG_SHG_BYPASS_EN, "SHG Bypass; "},
+ {RGXFWIF_INICFG_REGCONFIG_EN, "Register Config; "},
+ {RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY, "Assert on OOM; "},
+ {RGXFWIF_INICFG_HWP_DISABLE_FILTER, "HWP Filter Off; "},
+ {RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN, "Custom PerfTimer; "},
+ {RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN, "CDM Random kill; "},
+ {RGXFWIF_INICFG_DISABLE_DM_OVERLAP, "DM Overlap Off; "},
+ {RGXFWIF_INICFG_METAT1_MAIN, "Main; "},
+ {RGXFWIF_INICFG_METAT1_DUMMY, "Dummy; "},
+ {RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER, "Assert on HWR; "},
+ {RGXFWIF_INICFG_WORKEST_V1, "Workload Estim v1; "},
+ {RGXFWIF_INICFG_WORKEST_V2, "Workload Estim v2; "},
+ {RGXFWIF_INICFG_PDVFS_V1, "PDVFS v1; "},
+ {RGXFWIF_INICFG_PDVFS_V2, "PDVFS v2; "}
+};
+
+static const IMG_FLAGS2DESC asHwrState2Description[] =
+{
+ {RGXFWIF_HWR_HARDWARE_OK, "HWR OK; "},
+ {RGXFWIF_HWR_ANALYSIS_DONE, "Analysis done; "},
+ {RGXFWIF_HWR_GENERAL_LOCKUP, "General lockup; "},
+ {RGXFWIF_HWR_DM_RUNNING_OK, "DM running ok; "},
+ {RGXFWIF_HWR_DM_STALLING, "DM stalling; "},
+ {RGXFWIF_HWR_FW_FAULT, "FW fault; "},
+ {RGXFWIF_HWR_RESTART_REQUESTED, "Restarting; "},
+};
+
+static const IMG_FLAGS2DESC asDmState2Description[] =
+{
+ {RGXFWIF_DM_STATE_WORKING, "working; "},
+ {RGXFWIF_DM_STATE_READY_FOR_HWR, "ready for hwr; "},
+ {RGXFWIF_DM_STATE_NEEDS_SKIP, "needs skip; "},
+ {RGXFWIF_DM_STATE_NEEDS_PR_CLEANUP, "needs PR cleanup; "},
+ {RGXFWIF_DM_STATE_NEEDS_TRACE_CLEAR, "needs trace clear; "},
+ {RGXFWIF_DM_STATE_GUILTY_LOCKUP, "guilty lockup; "},
+ {RGXFWIF_DM_STATE_INNOCENT_LOCKUP, "innocent lockup; "},
+ {RGXFWIF_DM_STATE_GUILTY_OVERRUNING, "guilty overrunning; "},
+ {RGXFWIF_DM_STATE_INNOCENT_OVERRUNING, "innocent overrunning; "},
+ {RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH, "hard context switching; "},
+};
+
+#if !defined(NO_HARDWARE)
+/* Translation of MIPS exception encoding */
+static const IMG_CHAR * const apszMIPSExcCodes[32] =
+{
+ "Interrupt",
+ "TLB modified exception",
+ "TLB exception (load/instruction fetch)",
+ "TLB exception (store)",
+ "Address error exception (load/instruction fetch)",
+ "Address error exception (store)",
+ "Bus error exception (instruction fetch)",
+ "Bus error exception (load/store)",
+ "Syscall exception",
+ "Breakpoint exception (FW assert)",
+ "Reserved instruction exception",
+ "Coprocessor Unusable exception",
+ "Arithmetic Overflow exception",
+ "Trap exception",
+ NULL,
+ NULL,
+ "Implementation-Specific Exception 1 (COP2)",
+ "CorExtend Unusable",
+ "Coprocessor 2 exceptions",
+ "TLB Read-Inhibit",
+ "TLB Execute-Inhibit",
+ NULL,
+ NULL,
+ "Reference to WatchHi/WatchLo address",
+ "Machine check",
+ NULL,
+ "DSP Module State Disabled exception",
+ NULL,
+ NULL,
+ NULL,
+ /* Can only happen in MIPS debug mode */
+ "Parity error",
+ NULL
+};
+
+static IMG_CHAR const *_GetMIPSExcString(IMG_UINT32 ui32ExcCode)
+{
+ if (ui32ExcCode >= 32)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Only 32 exceptions available in MIPS, %u is not a valid exception code",
+ ui32ExcCode));
+ return NULL;
+ }
+
+ return apszMIPSExcCodes[ui32ExcCode];
+}
+#endif
+
+typedef struct _RGXMIPSFW_C0_DEBUG_TBL_ENTRY_
+{
+ IMG_UINT32 ui32Mask;
+ const IMG_CHAR * pszExplanation;
+} RGXMIPSFW_C0_DEBUG_TBL_ENTRY;
+
+#if !defined(NO_HARDWARE)
+static const RGXMIPSFW_C0_DEBUG_TBL_ENTRY sMIPS_C0_DebugTable[] =
+{
+ { RGXMIPSFW_C0_DEBUG_DSS, "Debug single-step exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DBP, "Debug software breakpoint exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBL, "Debug data break exception occurred on a load" },
+ { RGXMIPSFW_C0_DEBUG_DDBS, "Debug data break exception occurred on a store" },
+ { RGXMIPSFW_C0_DEBUG_DIB, "Debug instruction break exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DINT, "Debug interrupt exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DIBIMPR, "Imprecise debug instruction break exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBLIMPR, "Imprecise debug data break load exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DDBSIMPR, "Imprecise debug data break store exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_IEXI, "Imprecise error exception inhibit controls exception occurred" },
+ { RGXMIPSFW_C0_DEBUG_DBUSEP, "Data access Bus Error exception pending" },
+ { RGXMIPSFW_C0_DEBUG_CACHEEP, "Imprecise Cache Error pending" },
+ { RGXMIPSFW_C0_DEBUG_MCHECKP, "Imprecise Machine Check exception pending" },
+ { RGXMIPSFW_C0_DEBUG_IBUSEP, "Instruction fetch Bus Error exception pending" },
+ { RGXMIPSFW_C0_DEBUG_DBD, "Debug exception occurred in branch delay slot" }
+};
+#endif
+
+static PVRSRV_ERROR
+RGXPollMetaRegThroughSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegOffset,
+ IMG_UINT32 ui32PollValue, IMG_UINT32 ui32Mask)
+{
+ IMG_UINT32 ui32RegValue, ui32NumPolls = 0;
+ PVRSRV_ERROR eError;
+
+ do
+ {
+ eError = RGXReadWithSP(psDevInfo, ui32RegOffset, &ui32RegValue);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ } while(((ui32RegValue & ui32Mask) != ui32PollValue) && (ui32NumPolls++ < 1000));
+
+ return ((ui32RegValue & ui32Mask) == ui32PollValue) ? PVRSRV_OK : PVRSRV_ERROR_RETRY;
+}
+
+static PVRSRV_ERROR
+RGXReadMetaCoreReg(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32RegAddr, IMG_UINT32 *pui32RegVal)
+{
+ PVRSRV_ERROR eError;
+
+ /* Core Read Ready? */
+ eError = RGXPollMetaRegThroughSP(psDevInfo,
+ META_CR_TXUXXRXRQ_OFFSET,
+ META_CR_TXUXXRXRQ_DREADY_BIT,
+ META_CR_TXUXXRXRQ_DREADY_BIT);
+ PVR_LOGR_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+ /* Set the reg we are interested in reading */
+ eError = RGXWriteWithSP(psDevInfo, META_CR_TXUXXRXRQ_OFFSET,
+ ui32RegAddr | META_CR_TXUXXRXRQ_RDnWR_BIT);
+ PVR_LOGR_IF_ERROR(eError, "RGXWriteWithSP");
+
+ /* Core Read Done? */
+ eError = RGXPollMetaRegThroughSP(psDevInfo,
+ META_CR_TXUXXRXRQ_OFFSET,
+ META_CR_TXUXXRXRQ_DREADY_BIT,
+ META_CR_TXUXXRXRQ_DREADY_BIT);
+ PVR_LOGR_IF_ERROR(eError, "RGXPollMetaRegThroughSP");
+
+ /* Read the value */
+ return RGXReadWithSP(psDevInfo, META_CR_TXUXXRXDT_OFFSET, pui32RegVal);
+}
+
+PVRSRV_ERROR
+RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value)
+{
+ PVRSRV_ERROR eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, pui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXReadWithSP error: %s", PVRSRVGetErrorStringKM(eError)));
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXWriteMETAAddr error: %s", PVRSRVGetErrorStringKM(eError)));
+ }
+ return eError;
+}
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+static PVRSRV_ERROR _ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVMEM_MEMDESC *psMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psFWAddr,
+ const IMG_CHAR *pszDesc)
+{
+ PMR *psFWImagePMR;
+ IMG_UINT32 *pui32HostCodeAddr;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32FWCodeAddr, ui32FWImageLen, ui32Value, i;
+ IMG_HANDLE hFWImage;
+
+ eError = DevmemServerGetImportHandle(psMemDesc,
+ (void **)&psFWImagePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Error getting %s PMR (%u)",
+ pszDesc,
+ eError));
+ return eError;
+ }
+
+ /* Get a pointer to the FW code and the allocation size */
+ eError = PMRAcquireKernelMappingData(psFWImagePMR,
+ 0,
+ 0, /* Map whole PMR */
+ (void**)&pui32HostCodeAddr,
+ (size_t*)&ui32FWImageLen,
+ &hFWImage);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Acquire mapping for %s failed (%u)",
+ pszDesc,
+ eError));
+ return eError;
+ }
+
+ ui32FWCodeAddr = psFWAddr->ui32Addr;
+ ui32FWImageLen /= sizeof(IMG_UINT32); /* Byte -> 32 bit words */
+
+ for (i = 0; i < ui32FWImageLen; i++)
+ {
+ eError = RGXReadMETAAddr(psDevInfo, ui32FWCodeAddr, &ui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP error: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ goto validatefwimage_release;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "0x%x: CPU 0x%08x, FW 0x%08x",
+ i * 4, pui32HostCodeAddr[i], ui32Value));
+
+ if (pui32HostCodeAddr[i] != ui32Value)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Mismatch while validating %s at offset 0x%x: CPU 0x%08x, FW 0x%08x",
+ pszDesc,
+ i * 4, pui32HostCodeAddr[i], ui32Value));
+ eError = PVRSRV_ERROR_FW_IMAGE_MISMATCH;
+ goto validatefwimage_release;
+ }
+
+ ui32FWCodeAddr += 4;
+ }
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "ValidateFWImageWithSP: Match between Host and Meta views of the %s",
+ pszDesc));
+
+validatefwimage_release:
+ PMRReleaseKernelMappingData(psFWImagePMR, hFWImage);
+
+ return eError;
+}
+#endif
+
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+#if !defined(NO_HARDWARE) && !defined(SUPPORT_TRUSTED_DEVICE)
+ RGXFWIF_DEV_VIRTADDR sFWAddr;
+ PVRSRV_ERROR eError;
+
+#define VALIDATEFWIMAGEWITHSP_NUM_CHECKS (1U)
+ static IMG_UINT32 ui32NumChecks;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ if (ui32NumChecks == VALIDATEFWIMAGEWITHSP_NUM_CHECKS)
+ {
+ return PVRSRV_OK;
+ }
+ ui32NumChecks++;
+
+ if (psDevInfo->pvRegsBaseKM == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "ValidateFWImageWithSP: RGX registers not mapped yet!"));
+ return PVRSRV_ERROR_BAD_MAPPING;
+ }
+
+ sFWAddr.ui32Addr = RGXFW_BOOTLDR_META_ADDR;
+ eError = _ValidateFWImageWithSP(psDevInfo,
+ psDevInfo->psRGXFWCodeMemDesc,
+ &sFWAddr,
+ "FW code");
+ if (eError != PVRSRV_OK) return eError;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+ {
+ RGXSetFirmwareAddress(&sFWAddr,
+ psDevInfo->psRGXFWCorememMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = _ValidateFWImageWithSP(psDevInfo,
+ psDevInfo->psRGXFWCorememMemDesc,
+ &sFWAddr,
+ "FW coremem code");
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+#else
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+#endif
+
+ return PVRSRV_OK;
+}
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodePMPC
+
+ @Description
+
+ Return the name for the PM managed Page Catalogues
+
+ @Input ui32PC - Page Catalogue number
+
+ @Return void
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodePMPC(IMG_UINT32 ui32PC)
+{
+ const IMG_CHAR* pszPMPC = " (-)";
+
+ switch (ui32PC)
+ {
+ case 0x8: pszPMPC = " (PM-VCE0)"; break;
+ case 0x9: pszPMPC = " (PM-TE0)"; break;
+ case 0xA: pszPMPC = " (PM-ZLS0)"; break;
+ case 0xB: pszPMPC = " (PM-ALIST0)"; break;
+ case 0xC: pszPMPC = " (PM-VCE1)"; break;
+ case 0xD: pszPMPC = " (PM-TE1)"; break;
+ case 0xE: pszPMPC = " (PM-ZLS1)"; break;
+ case 0xF: pszPMPC = " (PM-ALIST1)"; break;
+ }
+
+ return pszPMPC;
+}
+
+/*!
+*******************************************************************************
+
+ @Function _DPXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from DPX_CR_BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID - BIF identifier
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+#if defined(RGX_FEATURE_RAY_TRACING)
+static void _DPXDecodeBIFReqTags(RGXDBG_BIF_ID eBankID,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ /* default to unknown */
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(eBankID == RGXDBG_DPX_BIF);
+ PVR_ASSERT(ppszTagID != NULL);
+
+ PVR_UNREFERENCED_PARAMETER(ui32TagSB);
+ PVR_UNREFERENCED_PARAMETER(pszScratchBuf);
+ PVR_UNREFERENCED_PARAMETER(ui32ScratchBufSize);
+
+ switch (ui32TagID)
+ {
+ case 0x0:
+ {
+ pszTagID = "MMU";
+ break;
+ }
+ case 0x1:
+ {
+ pszTagID = "RS_READ";
+ break;
+ }
+ case 0x2:
+ {
+ pszTagID = "RS_WRITE";
+ break;
+ }
+ case 0x3:
+ {
+ pszTagID = "RQ";
+ break;
+ }
+ case 0x4:
+ {
+ pszTagID = "PU";
+ break;
+ }
+ } /* switch(TagID) */
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeBIFReqTags
+
+ @Description
+
+ Decode the BIF Tag ID and sideband data fields from BIF_FAULT_BANK_REQ_STATUS regs
+
+ @Input eBankID - BIF identifier
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+#include "rgxmhdefs_km.h"
+
+static void _RGXDecodeBIFReqTagsXE(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ /* default to unknown */
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(ppszTagID != NULL);
+ PVR_ASSERT(ppszTagSB != NULL);
+
+ switch (ui32TagID)
+ {
+ /* MMU tags */
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT:
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD:
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC:
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM:
+ {
+ switch (ui32TagID)
+ {
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PT: pszTagID = "MMU PT"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PD: pszTagID = "MMU PD"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PC: pszTagID = "MMU PC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MMU_PM: pszTagID = "MMU PM"; break;
+ }
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PT_REQUEST: pszTagSB = "PT"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PD_REQUEST: pszTagSB = "PD"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PC_REQUEST: pszTagSB = "PC"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PT_REQUEST: pszTagSB = "PM PT"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_REQUEST: pszTagSB = "PM PD"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_REQUEST: pszTagSB = "PM PC"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PD_WREQUEST: pszTagSB = "PM PD W"; break;
+ case RGX_MH_TAG_SB_MMU_ENCODING_MMU_TAG_PM_PC_WREQUEST: pszTagSB = "PM PC W"; break;
+ }
+ break;
+ }
+
+ /* MIPS */
+ case RGX_MH_TAG_ENCODING_MH_TAG_MIPS:
+ {
+ pszTagID = "MIPS";
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_OPCODE_FETCH: pszTagSB = "Opcode"; break;
+ case RGX_MH_TAG_SB_MIPS_ENCODING_MIPS_TAG_DATA_ACCESS: pszTagSB = "Data"; break;
+ }
+ break;
+ }
+
+ /* CDM tags */
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0:
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1:
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2:
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3:
+ {
+ switch (ui32TagID)
+ {
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG0: pszTagID = "CDM Stage 0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG1: pszTagID = "CDM Stage 1"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG2: pszTagID = "CDM Stage 2"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_CDM_STG3: pszTagID = "CDM Stage 3"; break;
+ }
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTROL_STREAM: pszTagSB = "Control"; break;
+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_INDIRECT_DATA: pszTagSB = "Indirect"; break;
+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_EVENT_DATA: pszTagSB = "Event"; break;
+ case RGX_MH_TAG_SB_CDM_ENCODING_CDM_TAG_CONTEXT_STATE: pszTagSB = "Context"; break;
+ }
+ break;
+ }
+
+ /* VDM tags */
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0:
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1:
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2:
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3:
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4:
+ {
+ switch (ui32TagID)
+ {
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG0: pszTagID = "VDM Stage 0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG1: pszTagID = "VDM Stage 1"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG2: pszTagID = "VDM Stage 2"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG3: pszTagID = "VDM Stage 3"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG4: pszTagID = "VDM Stage 4"; break;
+ }
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTROL: pszTagSB = "Control"; break;
+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STATE: pszTagSB = "State"; break;
+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_INDEX: pszTagSB = "Index"; break;
+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_STACK: pszTagSB = "Stack"; break;
+ case RGX_MH_TAG_SB_VDM_ENCODING_VDM_TAG_CONTEXT: pszTagSB = "Context"; break;
+ }
+ break;
+ }
+
+ /* PDS */
+ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_0:
+ pszTagID = "PDS req 0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_PDS_1:
+ pszTagID = "PDS req 1"; break;
+
+ /* MCU */
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCA:
+ pszTagID = "MCU USCA"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCB:
+ pszTagID = "MCU USCB"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCC:
+ pszTagID = "MCU USCC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_USCD:
+ pszTagID = "MCU USCD"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCA:
+ pszTagID = "MCU PDS USCA"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCB:
+ pszTagID = "MCU PDS USCB"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCC:
+ pszTagID = "MCU PDS USCC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDS_USCD:
+ pszTagID = "MCU PDSUSCD"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_MCU_PDSRW:
+ pszTagID = "PDS PDSRW"; break;
+
+ /* TCU */
+ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_0:
+ pszTagID = "TCU req 0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TCU_1:
+ pszTagID = "TCU req 1"; break;
+
+ /* FBCDC */
+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_0:
+ pszTagID = "FBCDC0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_FBCDC_1:
+ pszTagID = "FBCDC1"; break;
+
+ /* USC Shared */
+ case RGX_MH_TAG_ENCODING_MH_TAG_USC:
+ pszTagID = "USCS"; break;
+
+ /* ISP */
+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_ZLS:
+ pszTagID = "ISP0 ZLS"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP_DS:
+ pszTagID = "ISP0 DS"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP1_ZLS:
+ pszTagID = "ISP1 ZLS"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_ISP1_DS:
+ pszTagID = "ISP1 DS"; break;
+
+ /* TPF */
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF:
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS:
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF:
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1:
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS:
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF:
+ {
+ switch (ui32TagID)
+ {
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF: pszTagID = "TPF0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_PBCDBIAS: pszTagID = "TPF0 DBIAS"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF_SPF: pszTagID = "TPF0 SPF"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1: pszTagID = "TPF1"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_PBCDBIAS: pszTagID = "TPF1 DBIAS"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TPF1_SPF: pszTagID = "TPF1 SPF"; break;
+ }
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_PDS_STATE: pszTagSB = "PDS state"; break;
+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DEPTH_BIAS: pszTagSB = "Depth bias"; break;
+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_FLOOR_OFFSET_DATA: pszTagSB = "Floor offset"; break;
+ case RGX_MH_TAG_SB_TPF_ENCODING_TPF_TAG_DELTA_DATA: pszTagSB = "Delta"; break;
+ }
+ break;
+ }
+
+ /* IPF */
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ:
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS:
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ:
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS:
+ {
+ switch (ui32TagID)
+ {
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_CREQ: pszTagID = "IPF0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF_OTHERS: pszTagID = "IPF0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_CREQ: pszTagID = "IPF1"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPF1_OTHERS: pszTagID = "IPF1"; break;
+ }
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_ISP_IPP_PIPES))
+ {
+ if (ui32TagID < RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+ {
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "CReq%d", ui32TagID);
+ pszTagSB = pszScratchBuf;
+ }
+ else if (ui32TagID < 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+ {
+ ui32TagID -= RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES);
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "PReq%d", ui32TagID);
+ pszTagSB = pszScratchBuf;
+ }
+ else
+ {
+ switch (ui32TagSB - 2 * RGX_GET_FEATURE_VALUE(psDevInfo, NUM_ISP_IPP_PIPES))
+ {
+ case 0: pszTagSB = "RReq"; break;
+ case 1: pszTagSB = "DBSC"; break;
+ case 2: pszTagSB = "CPF"; break;
+ case 3: pszTagSB = "Delta"; break;
+ }
+ }
+ }
+ break;
+ }
+
+ /* VDM Stage 5 (temporary) */
+ case RGX_MH_TAG_ENCODING_MH_TAG_VDM_STG5:
+ pszTagID = "VDM Stage 5"; break;
+
+ /* TA */
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PPP:
+ pszTagID = "PPP"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPWRTC:
+ pszTagID = "TPW RTC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TEACRTC:
+ pszTagID = "TEAC RTC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGRTC:
+ pszTagID = "PSG RTC"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGREGION:
+ pszTagID = "PSG Region"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_PSGSTREAM:
+ pszTagID = "PSG Stream"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPW:
+ pszTagID = "TPW"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_TA_TPC:
+ pszTagID = "TPC"; break;
+
+ /* PM */
+ case RGX_MH_TAG_ENCODING_MH_TAG_PM_ALLOC:
+ {
+ pszTagID = "PMA";
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAFSTACK: pszTagSB = "TA Fstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMLIST: pszTagSB = "TA MList"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DFSTACK: pszTagSB = "3D Fstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMLIST: pszTagSB = "3D MList"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX0: pszTagSB = "Context0"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_PMCTX1: pszTagSB = "Context1"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_MAVP: pszTagSB = "MAVP"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_UFSTACK: pszTagSB = "UFstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAMMUSTACK: pszTagSB = "TA MMUstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DMMUSTACK: pszTagSB = "3D MMUstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAUFSTACK: pszTagSB = "TA UFstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_3DUFSTACK: pszTagSB = "3D UFstack"; break;
+ case RGX_MH_TAG_SB_PMA_ENCODING_PM_TAG_PMA_TAVFP: pszTagSB = "TA VFP"; break;
+ }
+ break;
+ }
+ case RGX_MH_TAG_ENCODING_MH_TAG_PM_DEALLOC:
+ {
+ pszTagID = "PMD";
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAFSTACK: pszTagSB = "TA Fstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMLIST: pszTagSB = "TA MList"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DFSTACK: pszTagSB = "3D Fstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMLIST: pszTagSB = "3D MList"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX0: pszTagSB = "Context0"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_PMCTX1: pszTagSB = "Context1"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_UFSTACK: pszTagSB = "UFstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAMMUSTACK: pszTagSB = "TA MMUstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DMMUSTACK: pszTagSB = "3D MMUstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAUFSTACK: pszTagSB = "TA UFstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DUFSTACK: pszTagSB = "3D UFstack"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_TAVFP: pszTagSB = "TA VFP"; break;
+ case RGX_MH_TAG_SB_PMD_ENCODING_PM_TAG_PMD_3DVFP: pszTagSB = "3D VFP"; break;
+ }
+ break;
+ }
+
+ /* TDM */
+ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_DMA:
+ {
+ pszTagID = "TDM DMA";
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTL_STREAM: pszTagSB = "Ctl stream"; break;
+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_CTX_BUFFER: pszTagSB = "Ctx buffer"; break;
+ case RGX_MH_TAG_SB_TDM_DMA_ENCODING_TDM_DMA_TAG_QUEUE_CTL: pszTagSB = "Queue ctl"; break;
+ }
+ break;
+ }
+ case RGX_MH_TAG_ENCODING_MH_TAG_TDM_CTL:
+ {
+ pszTagID = "TDM CTL";
+ switch (ui32TagSB)
+ {
+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_FENCE: pszTagSB = "Fence"; break;
+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_CONTEXT: pszTagSB = "Context"; break;
+ case RGX_MH_TAG_SB_TDM_CTL_ENCODING_TDM_CTL_TAG_QUEUE: pszTagSB = "Queue"; break;
+ }
+ break;
+ }
+
+ /* PBE */
+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE0:
+ pszTagID = "PBE0"; break;
+ case RGX_MH_TAG_ENCODING_MH_TAG_PBE1:
+ pszTagID = "PBE1"; break;
+
+ /* IPP */
+ case RGX_MH_TAG_ENCODING_MH_TAG_IPP:
+ pszTagID = "IPP"; break;
+ }
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+
+static void _RGXDecodeBIFReqTags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXDBG_BIF_ID eBankID,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ /* default to unknown */
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(ppszTagID != NULL);
+ PVR_ASSERT(ppszTagSB != NULL);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED) && (eBankID == RGXDBG_DPX_BIF)))
+ {
+ _DPXDecodeBIFReqTags(eBankID, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+ return;
+ }
+#endif
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+ {
+ _RGXDecodeBIFReqTagsXE(psDevInfo, ui32TagID, ui32TagSB, ppszTagID, ppszTagSB, pszScratchBuf, ui32ScratchBufSize);
+ return;
+ }
+
+ switch (ui32TagID)
+ {
+ case 0x0:
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (eBankID == RGXDBG_BIF0)
+ {
+ pszTagID = "VRDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "SHF State"; break;
+ case 0x2: pszTagSB = "Index Data"; break;
+ case 0x4: pszTagSB = "Call Stack"; break;
+ case 0x8: pszTagSB = "Context State"; break;
+ }
+ }
+ else
+ {
+ pszTagID = "MMU";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Table"; break;
+ case 0x1: pszTagSB = "Directory"; break;
+ case 0x2: pszTagSB = "Catalogue"; break;
+ }
+ }
+ }else
+#endif
+ {
+ pszTagID = "MMU";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Table"; break;
+ case 0x1: pszTagSB = "Directory"; break;
+ case 0x2: pszTagSB = "Catalogue"; break;
+ }
+ }
+ break;
+ }
+ case 0x1:
+ {
+ pszTagID = "TLA";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Pixel data"; break;
+ case 0x1: pszTagSB = "Command stream data"; break;
+ case 0x2: pszTagSB = "Fence or flush"; break;
+ }
+ break;
+ }
+ case 0x2:
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED) && (eBankID == RGXDBG_BIF0)))
+ {
+ pszTagID = "SHF";
+ }else
+#endif
+ {
+ pszTagID = "HOST";
+ }
+ break;
+ }
+ case 0x3:
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (eBankID == RGXDBG_BIF0)
+ {
+ pszTagID = "SHG";
+ }
+ }
+ else
+#endif
+ if (!RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ pszTagID = "META";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "DCache - Thread 0"; break;
+ case 0x1: pszTagSB = "ICache - Thread 0"; break;
+ case 0x2: pszTagSB = "JTag - Thread 0"; break;
+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+ case 0x4: pszTagSB = "DCache - Thread "; break;
+ case 0x5: pszTagSB = "ICache - Thread 1"; break;
+ case 0x6: pszTagSB = "JTag - Thread 1"; break;
+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+ }
+ }
+ else if (RGX_IS_ERN_SUPPORTED(psDevInfo, 57596))
+ {
+ pszTagID="TCU";
+ }
+ else
+ {
+ /* Unreachable code */
+ PVR_ASSERT(IMG_FALSE);
+ }
+ break;
+ }
+ case 0x4:
+ {
+ pszTagID = "USC";
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "Cache line %d", (ui32TagSB & 0x3f));
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+ case 0x5:
+ {
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "PBE";
+ }
+ else
+ {
+ pszTagID = "RPM";
+ }
+ }
+ else
+#endif
+ {
+ pszTagID = "PBE";
+ }
+ }else
+ {
+ pszTagID = "PBE";
+ break;
+ }
+ break;
+ }
+ case 0x6:
+ {
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }else
+ {
+ pszTagID = "FBA";
+ }
+ }else
+#endif
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }
+ }else
+ {
+ pszTagID = "ISP";
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS"; break;
+ case 0x20: pszTagSB = "Occlusion Query"; break;
+ }
+ }
+ break;
+ }
+ case 0x7:
+ {
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+ if (eBankID == RGXDBG_TEXAS_BIF)
+ {
+ pszTagID = "IPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "CPF"; break;
+ case 0x1: pszTagSB = "DBSC"; break;
+ case 0x2:
+ case 0x4:
+ case 0x6:
+ case 0x8: pszTagSB = "Control Stream"; break;
+ case 0x3:
+ case 0x5:
+ case 0x7:
+ case 0x9: pszTagSB = "Primitive Block"; break;
+ }
+ }
+ else
+ {
+ pszTagID = "IPP";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ }
+ }
+ }
+ else if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIMPLE_INTERNAL_PARAMETER_FORMAT))
+ {
+ pszTagID = "IPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Region Header"; break;
+ case 0x1: pszTagSB = "DBSC"; break;
+ case 0x2: pszTagSB = "CPF"; break;
+ case 0x3: pszTagSB = "Control Stream"; break;
+ case 0x4: pszTagSB = "Primitive Block"; break;
+ }
+ }
+ else
+ {
+ pszTagID = "IPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ case 0x2: pszTagSB = "DBSC"; break;
+ case 0x3: pszTagSB = "CPF"; break;
+ case 0x4:
+ case 0x6:
+ case 0x8: pszTagSB = "Control Stream"; break;
+ case 0x5:
+ case 0x7:
+ case 0x9: pszTagSB = "Primitive Block"; break;
+ }
+ }
+ break;
+ }
+ case 0x8:
+ {
+ pszTagID = "CDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "Indirect Data"; break;
+ case 0x2: pszTagSB = "Event Write"; break;
+ case 0x3: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+ case 0x9:
+ {
+ pszTagID = "VDM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "PPP State"; break;
+ case 0x2: pszTagSB = "Index Data"; break;
+ case 0x4: pszTagSB = "Call Stack"; break;
+ case 0x8: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+ case 0xA:
+ {
+ pszTagID = "PM";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+ case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+ case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+ case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+ case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+ case 0x6: pszTagSB = "PMA_MAVP"; break;
+ case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+ case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+ case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+ case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+ case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+ case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+ case 0x18: pszTagSB = "PMA_TAVFP"; break;
+ case 0x19: pszTagSB = "PMD_3DVFP"; break;
+ case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+ }
+ break;
+ }
+ case 0xB:
+ {
+ pszTagID = "TA";
+ switch (ui32TagSB)
+ {
+ case 0x1: pszTagSB = "VCE"; break;
+ case 0x2: pszTagSB = "TPC"; break;
+ case 0x3: pszTagSB = "TE Control Stream"; break;
+ case 0x4: pszTagSB = "TE Region Header"; break;
+ case 0x5: pszTagSB = "TE Render Target Cache"; break;
+ case 0x6: pszTagSB = "TEAC Render Target Cache"; break;
+ case 0x7: pszTagSB = "VCE Render Target Cache"; break;
+ case 0x8: pszTagSB = "PPP Context State"; break;
+ }
+ break;
+ }
+ case 0xC:
+ {
+ pszTagID = "TPF";
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+ case 0x3: pszTagSB = "CPF - Tables"; break;
+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+ }
+ break;
+ }
+ case 0xD:
+ {
+ pszTagID = "PDS";
+ break;
+ }
+ case 0xE:
+ {
+ pszTagID = "MCU";
+ {
+ IMG_UINT32 ui32Burst = (ui32TagSB >> 5) & 0x7;
+ IMG_UINT32 ui32GroupEnc = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+ IMG_CHAR* pszBurst = "";
+ IMG_CHAR* pszGroupEnc = "";
+ IMG_CHAR* pszGroup = "";
+
+ switch (ui32Burst)
+ {
+ case 0x0:
+ case 0x1: pszBurst = "128bit word within the Lower 256bits"; break;
+ case 0x2:
+ case 0x3: pszBurst = "128bit word within the Upper 256bits"; break;
+ case 0x4: pszBurst = "Lower 256bits"; break;
+ case 0x5: pszBurst = "Upper 256bits"; break;
+ case 0x6: pszBurst = "512 bits"; break;
+ }
+ switch (ui32GroupEnc)
+ {
+ case 0x0: pszGroupEnc = "TPUA_USC"; break;
+ case 0x1: pszGroupEnc = "TPUB_USC"; break;
+ case 0x2: pszGroupEnc = "USCA_USC"; break;
+ case 0x3: pszGroupEnc = "USCB_USC"; break;
+ case 0x4: pszGroupEnc = "PDS_USC"; break;
+ case 0x5:
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+ 6 > RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+ {
+ pszGroupEnc = "PDSRW";
+ }else if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+ {
+ pszGroupEnc = "UPUC_USC";
+ }
+ break;
+ case 0x6:
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+ {
+ pszGroupEnc = "TPUC_USC";
+ }
+ break;
+ case 0x7:
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+ 6 == RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS))
+ {
+ pszGroupEnc = "PDSRW";
+ }
+ break;
+ }
+ switch (ui32Group)
+ {
+ case 0x0: pszGroup = "Banks 0-3"; break;
+ case 0x1: pszGroup = "Banks 4-7"; break;
+ case 0x2: pszGroup = "Banks 8-11"; break;
+ case 0x3: pszGroup = "Banks 12-15"; break;
+ }
+
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, %s, %s", pszBurst, pszGroupEnc, pszGroup);
+ pszTagSB = pszScratchBuf;
+ }
+ break;
+ }
+ case 0xF:
+ {
+ pszTagID = "FB_CDC";
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+ {
+ IMG_UINT32 ui32Req = (ui32TagSB >> 0) & 0xf;
+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 4) & 0x3;
+ IMG_CHAR* pszReqOrig = "";
+
+ switch (ui32Req)
+ {
+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+ case 0x8: pszReqOrig = "FBC Request, originator ZLS Requester Fence"; break;
+ case 0x9: pszReqOrig = "FBC Request, originator PBE Requester Fence"; break;
+ case 0xa: pszReqOrig = "FBC Request, originator Host Requester Fence"; break;
+ case 0xb: pszReqOrig = "FBC Request, originator TLA Requester Fence"; break;
+ case 0xc: pszReqOrig = "Reserved"; break;
+ case 0xd: pszReqOrig = "Reserved"; break;
+ case 0xe: pszReqOrig = "FBDC Request, originator FBCDC(Host) Memory Fence"; break;
+ case 0xf: pszReqOrig = "FBDC Request, originator FBCDC(TLA) Memory Fence"; break;
+ }
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+ pszTagSB = pszScratchBuf;
+ }
+ else
+ {
+ IMG_UINT32 ui32Req = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32MCUSB = (ui32TagSB >> 0) & 0x3;
+ IMG_CHAR* pszReqOrig = "";
+
+ switch (ui32Req)
+ {
+ case 0x0: pszReqOrig = "FBC Request, originator ZLS"; break;
+ case 0x1: pszReqOrig = "FBC Request, originator PBE"; break;
+ case 0x2: pszReqOrig = "FBC Request, originator Host"; break;
+ case 0x3: pszReqOrig = "FBC Request, originator TLA"; break;
+ case 0x4: pszReqOrig = "FBDC Request, originator ZLS"; break;
+ case 0x5: pszReqOrig = "FBDC Request, originator MCU"; break;
+ case 0x6: pszReqOrig = "FBDC Request, originator Host"; break;
+ case 0x7: pszReqOrig = "FBDC Request, originator TLA"; break;
+ }
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "%s, MCU sideband 0x%X", pszReqOrig, ui32MCUSB);
+ pszTagSB = pszScratchBuf;
+ }
+ break;
+ }
+ } /* switch(TagID) */
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeMMULevel
+
+ @Description
+
+ Return the name for the MMU level that faulted.
+
+ @Input ui32MMULevel - MMU level
+
+ @Return IMG_CHAR* to the sting describing the MMU level that faulted.
+
+******************************************************************************/
+static const IMG_CHAR* _RGXDecodeMMULevel(IMG_UINT32 ui32MMULevel)
+{
+ const IMG_CHAR* pszMMULevel = "";
+
+ switch (ui32MMULevel)
+ {
+ case 0x0: pszMMULevel = " (Page Table)"; break;
+ case 0x1: pszMMULevel = " (Page Directory)"; break;
+ case 0x2: pszMMULevel = " (Page Catalog)"; break;
+ case 0x3: pszMMULevel = " (Cat Base)"; break;
+ }
+
+ return pszMMULevel;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDecodeMMUReqTags
+
+ @Description
+
+ Decodes the MMU Tag ID and Sideband data fields from RGX_CR_MMU_FAULT_META_STATUS and
+ RGX_CR_MMU_FAULT_STATUS regs.
+
+ @Input ui32TagID - Tag ID value
+ @Input ui32TagSB - Tag Sideband data
+ @Input bRead - Read flag
+ @Output ppszTagID - Decoded string from the Tag ID
+ @Output ppszTagSB - Decoded string from the Tag SB
+ @Output pszScratchBuf - Buffer provided to the function to generate the debug strings
+ @Input ui32ScratchBufSize - Size of the provided buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDecodeMMUReqTags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32TagID,
+ IMG_UINT32 ui32TagSB,
+ IMG_BOOL bRead,
+ IMG_CHAR **ppszTagID,
+ IMG_CHAR **ppszTagSB,
+ IMG_CHAR *pszScratchBuf,
+ IMG_UINT32 ui32ScratchBufSize)
+{
+ IMG_INT32 i32SideBandType = -1;
+ IMG_CHAR *pszTagID = "-";
+ IMG_CHAR *pszTagSB = "-";
+
+ PVR_ASSERT(ppszTagID != NULL);
+ PVR_ASSERT(ppszTagSB != NULL);
+
+
+ switch (ui32TagID)
+ {
+ case 0: pszTagID = "META (Jones)"; i32SideBandType = RGXDBG_META; break;
+ case 1: pszTagID = "TLA (Jones)"; i32SideBandType = RGXDBG_TLA; break;
+ case 2: pszTagID = "DMA (Jones)"; i32SideBandType = RGXDBG_DMA; break;
+ case 3: pszTagID = "VDMM (Jones)"; i32SideBandType = RGXDBG_VDMM; break;
+ case 4: pszTagID = "CDM (Jones)"; i32SideBandType = RGXDBG_CDM; break;
+ case 5: pszTagID = "IPP (Jones)"; i32SideBandType = RGXDBG_IPP; break;
+ case 6: pszTagID = "PM (Jones)"; i32SideBandType = RGXDBG_PM; break;
+ case 7: pszTagID = "Tiling (Jones)"; i32SideBandType = RGXDBG_TILING; break;
+ case 8: pszTagID = "MCU (Texas 0)"; i32SideBandType = RGXDBG_MCU; break;
+ case 12: pszTagID = "VDMS (Black Pearl 0)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 13: pszTagID = "IPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF; break;
+ case 14: pszTagID = "ISP (Black Pearl 0)"; i32SideBandType = RGXDBG_ISP; break;
+ case 15: pszTagID = "TPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF; break;
+ case 16: pszTagID = "USCS (Black Pearl 0)"; i32SideBandType = RGXDBG_USCS; break;
+ case 17: pszTagID = "PPP (Black Pearl 0)"; i32SideBandType = RGXDBG_PPP; break;
+ case 20: pszTagID = "MCU (Texas 1)"; i32SideBandType = RGXDBG_MCU; break;
+ case 24: pszTagID = "MCU (Texas 2)"; i32SideBandType = RGXDBG_MCU; break;
+ case 28: pszTagID = "VDMS (Black Pearl 1)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 29: pszTagID = "IPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF; break;
+ case 30: pszTagID = "ISP (Black Pearl 1)"; i32SideBandType = RGXDBG_ISP; break;
+ case 31: pszTagID = "TPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF; break;
+ case 32: pszTagID = "USCS (Black Pearl 1)"; i32SideBandType = RGXDBG_USCS; break;
+ case 33: pszTagID = "PPP (Black Pearl 1)"; i32SideBandType = RGXDBG_PPP; break;
+ case 36: pszTagID = "MCU (Texas 3)"; i32SideBandType = RGXDBG_MCU; break;
+ case 40: pszTagID = "MCU (Texas 4)"; i32SideBandType = RGXDBG_MCU; break;
+ case 44: pszTagID = "VDMS (Black Pearl 2)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 45: pszTagID = "IPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF; break;
+ case 46: pszTagID = "ISP (Black Pearl 2)"; i32SideBandType = RGXDBG_ISP; break;
+ case 47: pszTagID = "TPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF; break;
+ case 48: pszTagID = "USCS (Black Pearl 2)"; i32SideBandType = RGXDBG_USCS; break;
+ case 49: pszTagID = "PPP (Black Pearl 2)"; i32SideBandType = RGXDBG_PPP; break;
+ case 52: pszTagID = "MCU (Texas 5)"; i32SideBandType = RGXDBG_MCU; break;
+ case 56: pszTagID = "MCU (Texas 6)"; i32SideBandType = RGXDBG_MCU; break;
+ case 60: pszTagID = "VDMS (Black Pearl 3)"; i32SideBandType = RGXDBG_VDMS; break;
+ case 61: pszTagID = "IPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF; break;
+ case 62: pszTagID = "ISP (Black Pearl 3)"; i32SideBandType = RGXDBG_ISP; break;
+ case 63: pszTagID = "TPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF; break;
+ case 64: pszTagID = "USCS (Black Pearl 3)"; i32SideBandType = RGXDBG_USCS; break;
+ case 65: pszTagID = "PPP (Black Pearl 3)"; i32SideBandType = RGXDBG_PPP; break;
+ case 68: pszTagID = "MCU (Texas 7)"; i32SideBandType = RGXDBG_MCU; break;
+ }
+ if(('-' == pszTagID[0]) && '\n' == pszTagID[1])
+ {
+
+ if(RGX_IS_ERN_SUPPORTED(psDevInfo, 50539) ||
+ (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, FBCDC_ARCHITECTURE) && RGX_GET_FEATURE_VALUE(psDevInfo, FBCDC_ARCHITECTURE) >= 3))
+ {
+ switch(ui32TagID)
+ {
+ case 18: pszTagID = "TPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 19: pszTagID = "IPF_CPF (Black Pearl 0)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 34: pszTagID = "TPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 35: pszTagID = "IPF_CPF (Black Pearl 1)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 50: pszTagID = "TPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 51: pszTagID = "IPF_CPF (Black Pearl 2)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ case 66: pszTagID = "TPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_TPF_CPF; break;
+ case 67: pszTagID = "IPF_CPF (Black Pearl 3)"; i32SideBandType = RGXDBG_IPF_CPF; break;
+ }
+
+ if(RGX_IS_ERN_SUPPORTED(psDevInfo, 50539))
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 10: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 22: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 25: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 26: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 38: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 41: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 42: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 54: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 57: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 58: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 70: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+ }
+ }else
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 10: pszTagID = "PBE (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 11: pszTagID = "FBCDC (Texas 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 22: pszTagID = "PBE (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 23: pszTagID = "FBCDC (Texas 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 26: pszTagID = "PBE (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 27: pszTagID = "FBCDC (Texas 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 38: pszTagID = "PBE (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 39: pszTagID = "FBCDC (Texas 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 42: pszTagID = "PBE (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 43: pszTagID = "FBCDC (Texas 4)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 54: pszTagID = "PBE (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 55: pszTagID = "FBCDC (Texas 5)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 58: pszTagID = "PBE (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 59: pszTagID = "FBCDC (Texas 6)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 70: pszTagID = "PBE (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 71: pszTagID = "FBCDC (Texas 7)"; i32SideBandType = RGXDBG_FBCDC; break;
+ }
+ }
+ }else
+ {
+ switch(ui32TagID)
+ {
+ case 9: pszTagID = "PDS (Texas 0)"; i32SideBandType = RGXDBG_PDS; break;
+ case 10: pszTagID = "PBE0 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 11: pszTagID = "PBE1 (Texas 0)"; i32SideBandType = RGXDBG_PBE; break;
+ case 18: pszTagID = "VCE (Black Pearl 0)"; i32SideBandType = RGXDBG_VCE; break;
+ case 19: pszTagID = "FBCDC (Black Pearl 0)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 21: pszTagID = "PDS (Texas 1)"; i32SideBandType = RGXDBG_PDS; break;
+ case 22: pszTagID = "PBE0 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 23: pszTagID = "PBE1 (Texas 1)"; i32SideBandType = RGXDBG_PBE; break;
+ case 25: pszTagID = "PDS (Texas 2)"; i32SideBandType = RGXDBG_PDS; break;
+ case 26: pszTagID = "PBE0 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 27: pszTagID = "PBE1 (Texas 2)"; i32SideBandType = RGXDBG_PBE; break;
+ case 34: pszTagID = "VCE (Black Pearl 1)"; i32SideBandType = RGXDBG_VCE; break;
+ case 35: pszTagID = "FBCDC (Black Pearl 1)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 37: pszTagID = "PDS (Texas 3)"; i32SideBandType = RGXDBG_PDS; break;
+ case 38: pszTagID = "PBE0 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 39: pszTagID = "PBE1 (Texas 3)"; i32SideBandType = RGXDBG_PBE; break;
+ case 41: pszTagID = "PDS (Texas 4)"; i32SideBandType = RGXDBG_PDS; break;
+ case 42: pszTagID = "PBE0 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 43: pszTagID = "PBE1 (Texas 4)"; i32SideBandType = RGXDBG_PBE; break;
+ case 50: pszTagID = "VCE (Black Pearl 2)"; i32SideBandType = RGXDBG_VCE; break;
+ case 51: pszTagID = "FBCDC (Black Pearl 2)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 53: pszTagID = "PDS (Texas 5)"; i32SideBandType = RGXDBG_PDS; break;
+ case 54: pszTagID = "PBE0 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 55: pszTagID = "PBE1 (Texas 5)"; i32SideBandType = RGXDBG_PBE; break;
+ case 57: pszTagID = "PDS (Texas 6)"; i32SideBandType = RGXDBG_PDS; break;
+ case 58: pszTagID = "PBE0 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 59: pszTagID = "PBE1 (Texas 6)"; i32SideBandType = RGXDBG_PBE; break;
+ case 66: pszTagID = "VCE (Black Pearl 3)"; i32SideBandType = RGXDBG_VCE; break;
+ case 67: pszTagID = "FBCDC (Black Pearl 3)"; i32SideBandType = RGXDBG_FBCDC; break;
+ case 69: pszTagID = "PDS (Texas 7)"; i32SideBandType = RGXDBG_PDS; break;
+ case 70: pszTagID = "PBE0 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ case 71: pszTagID = "PBE1 (Texas 7)"; i32SideBandType = RGXDBG_PBE; break;
+ }
+ }
+
+ }
+
+ switch (i32SideBandType)
+ {
+ case RGXDBG_META:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "DCache - Thread 0"; break;
+ case 0x1: pszTagSB = "ICache - Thread 0"; break;
+ case 0x2: pszTagSB = "JTag - Thread 0"; break;
+ case 0x3: pszTagSB = "Slave bus - Thread 0"; break;
+ case 0x4: pszTagSB = "DCache - Thread 1"; break;
+ case 0x5: pszTagSB = "ICache - Thread 1"; break;
+ case 0x6: pszTagSB = "JTag - Thread 1"; break;
+ case 0x7: pszTagSB = "Slave bus - Thread 1"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TLA:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Pixel data"; break;
+ case 0x1: pszTagSB = "Command stream data"; break;
+ case 0x2: pszTagSB = "Fence or flush"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_VDMM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream - Read Only"; break;
+ case 0x1: pszTagSB = "PPP State - Read Only"; break;
+ case 0x2: pszTagSB = "Indices - Read Only"; break;
+ case 0x4: pszTagSB = "Call Stack - Read/Write"; break;
+ case 0x6: pszTagSB = "DrawIndirect - Read Only"; break;
+ case 0xA: pszTagSB = "Context State - Write Only"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_CDM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Control Stream"; break;
+ case 0x1: pszTagSB = "Indirect Data"; break;
+ case 0x2: pszTagSB = "Event Write"; break;
+ case 0x3: pszTagSB = "Context State"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_IPP:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Macrotile Header"; break;
+ case 0x1: pszTagSB = "Region Header"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_PM:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PMA_TAFSTACK"; break;
+ case 0x1: pszTagSB = "PMA_TAMLIST"; break;
+ case 0x2: pszTagSB = "PMA_3DFSTACK"; break;
+ case 0x3: pszTagSB = "PMA_3DMLIST"; break;
+ case 0x4: pszTagSB = "PMA_PMCTX0"; break;
+ case 0x5: pszTagSB = "PMA_PMCTX1"; break;
+ case 0x6: pszTagSB = "PMA_MAVP"; break;
+ case 0x7: pszTagSB = "PMA_UFSTACK"; break;
+ case 0x8: pszTagSB = "PMD_TAFSTACK"; break;
+ case 0x9: pszTagSB = "PMD_TAMLIST"; break;
+ case 0xA: pszTagSB = "PMD_3DFSTACK"; break;
+ case 0xB: pszTagSB = "PMD_3DMLIST"; break;
+ case 0xC: pszTagSB = "PMD_PMCTX0"; break;
+ case 0xD: pszTagSB = "PMD_PMCTX1"; break;
+ case 0xF: pszTagSB = "PMD_UFSTACK"; break;
+ case 0x10: pszTagSB = "PMA_TAMMUSTACK"; break;
+ case 0x11: pszTagSB = "PMA_3DMMUSTACK"; break;
+ case 0x12: pszTagSB = "PMD_TAMMUSTACK"; break;
+ case 0x13: pszTagSB = "PMD_3DMMUSTACK"; break;
+ case 0x14: pszTagSB = "PMA_TAUFSTACK"; break;
+ case 0x15: pszTagSB = "PMA_3DUFSTACK"; break;
+ case 0x16: pszTagSB = "PMD_TAUFSTACK"; break;
+ case 0x17: pszTagSB = "PMD_3DUFSTACK"; break;
+ case 0x18: pszTagSB = "PMA_TAVFP"; break;
+ case 0x19: pszTagSB = "PMD_3DVFP"; break;
+ case 0x1A: pszTagSB = "PMD_TAVFP"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TILING:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "PSG Control Stream TP0"; break;
+ case 0x1: pszTagSB = "TPC TP0"; break;
+ case 0x2: pszTagSB = "VCE0"; break;
+ case 0x3: pszTagSB = "VCE1"; break;
+ case 0x4: pszTagSB = "PSG Control Stream TP1"; break;
+ case 0x5: pszTagSB = "TPC TP1"; break;
+ case 0x8: pszTagSB = "PSG Region Header TP0"; break;
+ case 0xC: pszTagSB = "PSG Region Header TP1"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_VDMS:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "Context State - Write Only"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_IPF:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x00:
+ case 0x20: pszTagSB = "CPF"; break;
+ case 0x01: pszTagSB = "DBSC"; break;
+ case 0x02:
+ case 0x04:
+ case 0x06:
+ case 0x08:
+ case 0x0A:
+ case 0x0C:
+ case 0x0E:
+ case 0x10: pszTagSB = "Control Stream"; break;
+ case 0x03:
+ case 0x05:
+ case 0x07:
+ case 0x09:
+ case 0x0B:
+ case 0x0D:
+ case 0x0F:
+ case 0x11: pszTagSB = "Primitive Block"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_ISP:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "ZLS read/write"; break;
+ case 0x20: pszTagSB = "Occlusion query read/write"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_TPF:
+ {
+ switch (ui32TagSB)
+ {
+ case 0x0: pszTagSB = "TPF0: Primitive Block"; break;
+ case 0x1: pszTagSB = "TPF0: Depth Bias"; break;
+ case 0x2: pszTagSB = "TPF0: Per Primitive IDs"; break;
+ case 0x3: pszTagSB = "CPF - Tables"; break;
+ case 0x4: pszTagSB = "TPF1: Primitive Block"; break;
+ case 0x5: pszTagSB = "TPF1: Depth Bias"; break;
+ case 0x6: pszTagSB = "TPF1: Per Primitive IDs"; break;
+ case 0x7: pszTagSB = "CPF - Data: Pipe 0"; break;
+ case 0x8: pszTagSB = "TPF2: Primitive Block"; break;
+ case 0x9: pszTagSB = "TPF2: Depth Bias"; break;
+ case 0xA: pszTagSB = "TPF2: Per Primitive IDs"; break;
+ case 0xB: pszTagSB = "CPF - Data: Pipe 1"; break;
+ case 0xC: pszTagSB = "TPF3: Primitive Block"; break;
+ case 0xD: pszTagSB = "TPF3: Depth Bias"; break;
+ case 0xE: pszTagSB = "TPF3: Per Primitive IDs"; break;
+ case 0xF: pszTagSB = "CPF - Data: Pipe 2"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_FBCDC:
+ {
+ /*
+ * FBC faults on a 4-cluster phantom does not always set SB
+ * bit 5, but since FBC is write-only and FBDC is read-only,
+ * we can set bit 5 if this is a write fault, before decoding.
+ */
+ if (bRead == IMG_FALSE)
+ {
+ ui32TagSB |= 0x20;
+ }
+
+ switch (ui32TagSB)
+ {
+ case 0x00: pszTagSB = "FBDC Request, originator ZLS"; break;
+ case 0x02: pszTagSB = "FBDC Request, originator MCU Dust 0"; break;
+ case 0x03: pszTagSB = "FBDC Request, originator MCU Dust 1"; break;
+ case 0x20: pszTagSB = "FBC Request, originator ZLS"; break;
+ case 0x22: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0"; break;
+ case 0x23: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1"; break;
+ case 0x24: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0"; break;
+ case 0x25: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1"; break;
+ case 0x28: pszTagSB = "FBC Request, originator ZLS Fence"; break;
+ case 0x2a: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 0, Fence"; break;
+ case 0x2b: pszTagSB = "FBC Request, originator PBE Dust 0, Cluster 1, Fence"; break;
+ case 0x2c: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 0, Fence"; break;
+ case 0x2d: pszTagSB = "FBC Request, originator PBE Dust 1, Cluster 1, Fence"; break;
+ }
+ break;
+ }
+
+ case RGXDBG_MCU:
+ {
+ IMG_UINT32 ui32SetNumber = (ui32TagSB >> 5) & 0x7;
+ IMG_UINT32 ui32WayNumber = (ui32TagSB >> 2) & 0x7;
+ IMG_UINT32 ui32Group = ui32TagSB & 0x3;
+
+ IMG_CHAR* pszGroup = "";
+
+ switch (ui32Group)
+ {
+ case 0x0: pszGroup = "Banks 0-1"; break;
+ case 0x1: pszGroup = "Banks 2-3"; break;
+ case 0x2: pszGroup = "Banks 4-5"; break;
+ case 0x3: pszGroup = "Banks 6-7"; break;
+ }
+
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize,
+ "Set=%d, Way=%d, %s", ui32SetNumber, ui32WayNumber, pszGroup);
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+
+ default:
+ {
+ OSSNPrintf(pszScratchBuf, ui32ScratchBufSize, "SB=0x%02x", ui32TagSB);
+ pszTagSB = pszScratchBuf;
+ break;
+ }
+ }
+
+ *ppszTagID = pszTagID;
+ *ppszTagSB = pszTagSB;
+}
+
+static void ConvertOSTimestampToSAndNS(IMG_UINT64 ui64OSTimer,
+ IMG_UINT64 *pui64Seconds,
+ IMG_UINT64 *pui64Nanoseconds)
+{
+ IMG_UINT32 ui32Remainder;
+
+ *pui64Seconds = OSDivide64r64(ui64OSTimer, 1000000000, &ui32Remainder);
+ *pui64Nanoseconds = ui64OSTimer - (*pui64Seconds * 1000000000ULL);
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+
+typedef enum _DEVICEMEM_HISTORY_QUERY_INDEX_
+{
+ DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING,
+ DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED,
+ DEVICEMEM_HISTORY_QUERY_INDEX_NEXT,
+ DEVICEMEM_HISTORY_QUERY_INDEX_COUNT,
+} DEVICEMEM_HISTORY_QUERY_INDEX;
+
+/*!
+*******************************************************************************
+
+ @Function _PrintDevicememHistoryQueryResult
+
+ @Description
+
+ Print details of a single result from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf - Debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psFaultProcessInfo - The process info derived from the page fault
+ @Input psResult - The DevicememHistory result to be printed
+ @Input ui32Index - The index of the result
+
+ @Return void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryResult(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+ DEVICEMEM_HISTORY_QUERY_OUT_RESULT *psResult,
+ IMG_UINT32 ui32Index)
+{
+ IMG_UINT32 ui32Remainder;
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ ConvertOSTimestampToSAndNS(psResult->ui64When,
+ &ui64Seconds,
+ &ui64Nanoseconds);
+
+ if(psFaultProcessInfo->uiPID != RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ PVR_DUMPDEBUG_LOG(" [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Operation: %s Modified: %llu us ago (OS time %llu.%09llu s)",
+ ui32Index,
+ psResult->szString,
+ (unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+ (unsigned long long) psResult->uiSize,
+ psResult->bMap ? "Map": "Unmap",
+ (unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" [%u] Name: %s Base address: " IMG_DEV_VIRTADDR_FMTSPEC
+ " Size: " IMG_DEVMEM_SIZE_FMTSPEC
+ " Operation: %s Modified: %llu us ago (OS time %llu.%09llu) PID: %u (%s)",
+ ui32Index,
+ psResult->szString,
+ (unsigned long long) psResult->sBaseDevVAddr.uiAddr,
+ (unsigned long long) psResult->uiSize,
+ psResult->bMap ? "Map": "Unmap",
+ (unsigned long long) OSDivide64r64(psResult->ui64Age, 1000, &ui32Remainder),
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds,
+ (unsigned int) psResult->sProcessInfo.uiPID,
+ psResult->sProcessInfo.szProcessName);
+ }
+
+ if(!psResult->bRange)
+ {
+ PVR_DUMPDEBUG_LOG(" Whole allocation was %s", psResult->bMap ? "mapped": "unmapped");
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" Pages %u to %u (" IMG_DEV_VIRTADDR_FMTSPEC "-" IMG_DEV_VIRTADDR_FMTSPEC ") %s%s",
+ psResult->ui32StartPage,
+ psResult->ui32StartPage + psResult->ui32PageCount - 1,
+ psResult->sMapStartAddr.uiAddr,
+ psResult->sMapEndAddr.uiAddr,
+ psResult->bAll ? "(whole allocation) " : "",
+ psResult->bMap ? "mapped": "unmapped");
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _PrintDevicememHistoryQueryOut
+
+ @Description
+
+ Print details of all the results from a DevicememHistory query
+
+ @Input pfnDumpDebugPrintf - Debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psFaultProcessInfo - The process info derived from the page fault
+ @Input psQueryOut - Storage for the query results
+
+ @Return void
+
+******************************************************************************/
+static void _PrintDevicememHistoryQueryOut(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXMEM_PROCESS_INFO *psFaultProcessInfo,
+ DEVICEMEM_HISTORY_QUERY_OUT *psQueryOut)
+{
+ IMG_UINT32 i;
+
+ if(psQueryOut->ui32NumResults == 0)
+ {
+ PVR_DUMPDEBUG_LOG(" No results");
+ }
+ else
+ {
+ for(i = 0; i < psQueryOut->ui32NumResults; i++)
+ {
+ _PrintDevicememHistoryQueryResult(pfnDumpDebugPrintf, pvDumpDebugFile,
+ psFaultProcessInfo,
+ &psQueryOut->sResults[i],
+ i);
+ }
+ }
+}
+
+/* table of HW page size values and the equivalent */
+static const unsigned int aui32HWPageSizeTable[][2] =
+{
+ { 0, PVRSRV_4K_PAGE_SIZE },
+ { 1, PVRSRV_16K_PAGE_SIZE },
+ { 2, PVRSRV_64K_PAGE_SIZE },
+ { 3, PVRSRV_256K_PAGE_SIZE },
+ { 4, PVRSRV_1M_PAGE_SIZE },
+ { 5, PVRSRV_2M_PAGE_SIZE }
+};
+
+/*!
+*******************************************************************************
+
+ @Function _PageSizeHWToBytes
+
+ @Description
+
+ Convert a HW page size value to its size in bytes
+
+ @Input ui32PageSizeHW - The HW page size value
+
+ @Return IMG_UINT32 The page size in bytes
+
+******************************************************************************/
+static IMG_UINT32 _PageSizeHWToBytes(IMG_UINT32 ui32PageSizeHW)
+{
+ if (ui32PageSizeHW > 5)
+ {
+ /* This is invalid, so return a default value as we cannot ASSERT in this code! */
+ return PVRSRV_4K_PAGE_SIZE;
+ }
+
+ return aui32HWPageSizeTable[ui32PageSizeHW][1];
+}
+
+/*!
+*******************************************************************************
+
+ @Function _GetDevicememHistoryData
+
+ @Description
+
+ Get the DevicememHistory results for the given PID and faulting device virtual address.
+ The function will query DevicememHistory for information about the faulting page, as well
+ as the page before and after.
+
+ @Input uiPID - The process ID to search for allocations belonging to
+ @Input sFaultDevVAddr - The device address to search for allocations at/before/after
+ @Input asQueryOut - Storage for the query results
+ @Input ui32PageSizeBytes - Faulted page size in bytes
+
+ @Return IMG_BOOL - IMG_TRUE if any results were found for this page fault
+
+******************************************************************************/
+static IMG_BOOL _GetDevicememHistoryData(IMG_PID uiPID, IMG_DEV_VIRTADDR sFaultDevVAddr,
+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT],
+ IMG_UINT32 ui32PageSizeBytes)
+{
+ IMG_UINT32 i;
+ DEVICEMEM_HISTORY_QUERY_IN sQueryIn;
+ IMG_BOOL bAnyHits = IMG_FALSE;
+
+ /* if the page fault originated in the firmware then the allocation may
+ * appear to belong to any PID, because FW allocations are attributed
+ * to the client process creating the allocation, so instruct the
+ * devicemem_history query to search all available PIDs
+ */
+ if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ sQueryIn.uiPID = DEVICEMEM_HISTORY_PID_ANY;
+ }
+ else
+ {
+ sQueryIn.uiPID = uiPID;
+ }
+
+ /* query the DevicememHistory about the preceding / faulting / next page */
+
+ for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ IMG_BOOL bHits;
+
+ switch(i)
+ {
+ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) - 1;
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+ sQueryIn.sDevVAddr = sFaultDevVAddr;
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+ sQueryIn.sDevVAddr.uiAddr = (sFaultDevVAddr.uiAddr & ~(IMG_UINT64)(ui32PageSizeBytes - 1)) + ui32PageSizeBytes;
+ break;
+ }
+
+ /* First try matching any record at the exact address... */
+ bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_FALSE);
+ if (!bHits)
+ {
+ /* If not matched then try matching any record in the same page... */
+ bHits = DevicememHistoryQuery(&sQueryIn, &asQueryOut[i], ui32PageSizeBytes, IMG_TRUE);
+ }
+
+ if(bHits)
+ {
+ bAnyHits = IMG_TRUE;
+ }
+ }
+
+ return bAnyHits;
+}
+
+/* stored data about one page fault */
+typedef struct _FAULT_INFO_
+{
+ /* the process info of the memory context that page faulted */
+ RGXMEM_PROCESS_INFO sProcessInfo;
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ MMU_FAULT_DATA sMMUFaultData;
+ DEVICEMEM_HISTORY_QUERY_OUT asQueryOut[DEVICEMEM_HISTORY_QUERY_INDEX_COUNT];
+ /* the CR timer value at the time of the fault, recorded by the FW.
+ * used to differentiate different page faults
+ */
+ IMG_UINT64 ui64CRTimer;
+ /* time when this FAULT_INFO entry was added. used for timing
+ * reference against the map/unmap information
+ */
+ IMG_UINT64 ui64When;
+} FAULT_INFO;
+
+/* history list of page faults.
+ * Keeps the first `n` page faults and the last `n` page faults, like the FW
+ * HWR log
+ */
+typedef struct _FAULT_INFO_LOG_
+{
+ IMG_UINT32 ui32Head;
+ IMG_UINT32 ui32NumWrites;
+ /* the number of faults in this log need not correspond exactly to
+ * the HWINFO number of the FW, as the FW HWINFO log may contain
+ * non-page fault HWRs
+ */
+ FAULT_INFO asFaults[RGXFWIF_HWINFO_MAX];
+} FAULT_INFO_LOG;
+
+static FAULT_INFO_LOG gsFaultInfoLog = { 0 };
+
+/*!
+*******************************************************************************
+
+ @Function _QueryFaultInfo
+
+ @Description
+
+ Searches the local list of previously analysed page faults to see if the given
+ fault has already been analysed and if so, returns a pointer to the analysis
+ object (FAULT_INFO *), otherwise returns NULL.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input sFaultDevVAddr - The faulting device virtual address
+ @Input ui64CRTimer - The CR timer value recorded by the FW at the time of the fault
+
+ @Return FAULT_INFO* Pointer to an existing fault analysis structure if found, otherwise NULL
+
+******************************************************************************/
+static FAULT_INFO *_QueryFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
+ IMG_UINT64 ui64CRTimer)
+{
+ IMG_UINT32 i;
+
+ for(i = 0; i < MIN(gsFaultInfoLog.ui32NumWrites, RGXFWIF_HWINFO_MAX); i++)
+ {
+ if((gsFaultInfoLog.asFaults[i].ui64CRTimer == ui64CRTimer) &&
+ (gsFaultInfoLog.asFaults[i].sFaultDevVAddr.uiAddr == sFaultDevVAddr.uiAddr))
+ {
+ return &gsFaultInfoLog.asFaults[i];
+ }
+ }
+
+ return NULL;
+}
+
+/*!
+*******************************************************************************
+
+ @Function __AcquireNextFaultInfoElement
+
+ @Description
+
+ Gets a pointer to the next element in the fault info log
+ (requires the fault info lock be held)
+
+
+ @Return FAULT_INFO* Pointer to the next record for writing
+
+******************************************************************************/
+
+static FAULT_INFO *_AcquireNextFaultInfoElement(void)
+{
+ IMG_UINT32 ui32Head = gsFaultInfoLog.ui32Head;
+ FAULT_INFO *psInfo = &gsFaultInfoLog.asFaults[ui32Head];
+
+ return psInfo;
+}
+
+static void _CommitFaultInfo(PVRSRV_RGXDEV_INFO *psDevInfo,
+ FAULT_INFO *psInfo,
+ RGXMEM_PROCESS_INFO *psProcessInfo,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
+ IMG_UINT64 ui64CRTimer,
+ MMU_FAULT_DATA *psMMUFaultData)
+{
+ IMG_UINT32 i, j;
+
+ /* commit the page fault details */
+
+ psInfo->sProcessInfo = *psProcessInfo;
+ psInfo->sFaultDevVAddr = sFaultDevVAddr;
+ psInfo->ui64CRTimer = ui64CRTimer;
+ psInfo->ui64When = OSClockns64();
+ if(psMMUFaultData != NULL)
+ {
+ OSDeviceMemCopy(&psInfo->sMMUFaultData, psMMUFaultData, sizeof(MMU_FAULT_DATA));
+ }
+
+ /* if the page fault was caused by the firmware then get information about
+ * which client application created the related allocations.
+ *
+ * Fill in the process info data for each query result.
+ */
+
+ if(psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ for(i = 0; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ for(j = 0; j < DEVICEMEM_HISTORY_QUERY_OUT_MAX_RESULTS; j++)
+ {
+ IMG_BOOL bFound;
+
+ RGXMEM_PROCESS_INFO *psProcInfo = &psInfo->asQueryOut[i].sResults[j].sProcessInfo;
+ bFound = RGXPCPIDToProcessInfo(psDevInfo,
+ psProcInfo->uiPID,
+ psProcInfo);
+ if(!bFound)
+ {
+ OSStringLCopy(psProcInfo->szProcessName,
+ "(unknown)",
+ sizeof(psProcInfo->szProcessName));
+ }
+ }
+ }
+ }
+
+ /* assert the faults circular buffer hasn't been moving and
+ * move the head along
+ */
+
+ PVR_ASSERT(psInfo == &gsFaultInfoLog.asFaults[gsFaultInfoLog.ui32Head]);
+
+ if(gsFaultInfoLog.ui32Head < RGXFWIF_HWINFO_MAX - 1)
+ {
+ gsFaultInfoLog.ui32Head++;
+ }
+ else
+ {
+ /* wrap back to the first of the 'LAST' entries */
+ gsFaultInfoLog.ui32Head = RGXFWIF_HWINFO_MAX_FIRST;
+ }
+
+ gsFaultInfoLog.ui32NumWrites++;
+
+
+}
+
+/*!
+*******************************************************************************
+
+ @Function _PrintFaultInfo
+
+ @Description
+
+ Print all the details of a page fault from a FAULT_INFO structure
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psInfo - The page fault occurrence to print
+ @Input pui32Index - (optional) index value to include in the print output
+
+ @Return void
+
+******************************************************************************/
+static void _PrintFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ FAULT_INFO *psInfo,
+ const IMG_UINT32 *pui32Index)
+{
+ IMG_UINT32 i;
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ IMG_PID uiPID;
+
+ uiPID = (psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_FIRMWARE || psInfo->sProcessInfo.uiPID == RGXMEM_SERVER_PID_PM) ?
+ 0 : psInfo->sProcessInfo.uiPID;
+
+ ConvertOSTimestampToSAndNS(psInfo->ui64When, &ui64Seconds, &ui64Nanoseconds);
+
+ if(pui32Index)
+ {
+ PVR_DUMPDEBUG_LOG("(%u) Device memory history for page fault address 0x%010llX, CRTimer: 0x%016llX, "
+ "PID: %u (%s, unregistered: %u) OS time: %llu.%09llu",
+ *pui32Index,
+ (unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+ psInfo->ui64CRTimer,
+ (unsigned int) uiPID,
+ psInfo->sProcessInfo.szProcessName,
+ psInfo->sProcessInfo.bUnregistered,
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Device memory history for page fault address 0x%010llX, PID: %u "
+ "(%s, unregistered: %u) OS time: %llu.%09llu",
+ (unsigned long long) psInfo->sFaultDevVAddr.uiAddr,
+ (unsigned int) uiPID,
+ psInfo->sProcessInfo.szProcessName,
+ psInfo->sProcessInfo.bUnregistered,
+ (unsigned long long) ui64Seconds,
+ (unsigned long long) ui64Nanoseconds);
+ }
+
+ if (psInfo->sProcessInfo.uiPID != RGXMEM_SERVER_PID_PM)
+ {
+ for(i = DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING; i < DEVICEMEM_HISTORY_QUERY_INDEX_COUNT; i++)
+ {
+ const IMG_CHAR *pszWhich;
+
+ switch(i)
+ {
+ case DEVICEMEM_HISTORY_QUERY_INDEX_PRECEDING:
+ pszWhich = "Preceding page";
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_FAULTED:
+ pszWhich = "Faulted page";
+ break;
+ case DEVICEMEM_HISTORY_QUERY_INDEX_NEXT:
+ pszWhich = "Next page";
+ break;
+ }
+
+ PVR_DUMPDEBUG_LOG("%s:", pszWhich);
+ _PrintDevicememHistoryQueryOut(pfnDumpDebugPrintf, pvDumpDebugFile,
+ &psInfo->sProcessInfo,
+ &psInfo->asQueryOut[i]);
+ }
+ }
+}
+
+static void _RecordFaultInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEV_VIRTADDR sFaultDevVAddr,
+ IMG_DEV_PHYADDR sPCDevPAddr,
+ IMG_UINT64 ui64CRTimer,
+ IMG_UINT32 ui32PageSizeBytes,
+ const IMG_CHAR *pszIndent,
+ MMU_FAULT_DATA *psMMUFaultData)
+{
+ IMG_BOOL bFound = IMG_FALSE, bIsPMFault = IMG_FALSE;
+ RGXMEM_PROCESS_INFO sProcessInfo;
+ FAULT_INFO *psInfo;
+
+ /* look to see if we have already processed this fault.
+ * if so then use the previously acquired information.
+ */
+ OSLockAcquire(psDevInfo->hDebugFaultInfoLock);
+ psInfo = _QueryFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, sFaultDevVAddr, ui64CRTimer);
+
+ if(psInfo == NULL)
+ {
+ if(sPCDevPAddr.uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+ {
+ /* Check if this is PM fault */
+ if (psMMUFaultData != NULL && psMMUFaultData->eType == MMU_FAULT_TYPE_PM)
+ {
+ bIsPMFault = IMG_TRUE;
+ bFound = IMG_TRUE;
+ }
+ else
+ {
+ /* look up the process details for the faulting page catalogue */
+ bFound = RGXPCAddrToProcessInfo(psDevInfo, sPCDevPAddr, &sProcessInfo);
+ }
+
+ if(bFound)
+ {
+ IMG_BOOL bHits;
+
+ psInfo = _AcquireNextFaultInfoElement();
+
+ if (bIsPMFault)
+ {
+ sProcessInfo.uiPID = RGXMEM_SERVER_PID_PM;
+ OSStringNCopy(sProcessInfo.szProcessName, "PM", sizeof(sProcessInfo.szProcessName));
+ sProcessInfo.szProcessName[sizeof(sProcessInfo.szProcessName) - 1] = '\0';
+ sProcessInfo.bUnregistered = IMG_FALSE;
+ bHits = IMG_TRUE;
+ }
+ else
+ {
+ /* get any DevicememHistory data for the faulting address */
+ bHits = _GetDevicememHistoryData(sProcessInfo.uiPID,
+ sFaultDevVAddr,
+ psInfo->asQueryOut,
+ ui32PageSizeBytes);
+ }
+
+ if(bHits)
+ {
+ _CommitFaultInfo(psDevInfo,
+ psInfo,
+ &sProcessInfo,
+ sFaultDevVAddr,
+ ui64CRTimer,
+ psMMUFaultData);
+ }
+ else
+ {
+ /* no hits, so no data to present */
+ PVR_DUMPDEBUG_LOG("%sNo matching Devmem History for fault address", pszIndent);
+ psInfo = NULL;
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sCould not find PID for PC 0x%016llX", pszIndent, sPCDevPAddr.uiAddr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%sPage fault not applicable to Devmem History", pszIndent);
+ }
+ }
+
+ if(psInfo != NULL)
+ {
+ _PrintFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psInfo, NULL);
+ }
+
+ OSLockRelease(psDevInfo->hDebugFaultInfoLock);
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function _DumpFWHWRHostView
+
+ @Description
+
+ Dump FW HWR fault status in human readable form.
+
+ @Input ui32Index - Index of global Fault info
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Return void
+
+******************************************************************************/
+static void _DumpFWHWRHostView(MMU_FAULT_DATA *psFaultData,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ MMU_FAULT_DATA *psOutFaultData)
+{
+ MMU_LEVEL eLevel;
+ const IMG_CHAR szPageLevel[][4] = {"", "PTE", "PDE", "PCE" };
+ const IMG_CHAR szPageError[][3] = {"", "PT", "PD", "PC" };
+
+ eLevel = psFaultData->eTopLevel;
+
+ if (psFaultData->eType == MMU_FAULT_TYPE_UNKNOWN)
+ {
+ return;
+ }
+ else if (psFaultData->eType == MMU_FAULT_TYPE_PM)
+ {
+ PVR_DUMPDEBUG_LOG("PM faulted at PC address = 0x%016" IMG_UINT64_FMTSPECx, psFaultData->sLevelData[MMU_LEVEL_0].ui64Address);
+ }
+ else
+ {
+ PVR_ASSERT(eLevel < MMU_LEVEL_LAST);
+ while(eLevel >= MMU_LEVEL_0)
+ {
+ MMU_LEVEL_DATA *psMMULevelData = &psFaultData->sLevelData[eLevel];
+ if (psMMULevelData->ui64Address)
+ {
+ if (psMMULevelData->uiBytesPerEntry == 4)
+ {
+ PVR_DUMPDEBUG_LOG("%s for index %d = 0x%08x and is %s",
+ szPageLevel[eLevel],
+ psMMULevelData->ui32Index,
+ (IMG_UINT) psMMULevelData->ui64Address,
+ psMMULevelData->psDebugStr);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%s for index %d = 0x%016" IMG_UINT64_FMTSPECx " and is %s",
+ szPageLevel[eLevel],
+ psMMULevelData->ui32Index,
+ psMMULevelData->ui64Address,
+ psMMULevelData->psDebugStr);
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("%s index (%d) out of bounds (%d)",
+ szPageError[eLevel],
+ psMMULevelData->ui32Index,
+ psMMULevelData->ui32NumOfEntries);
+ break;
+ }
+ eLevel--;
+ }
+ }
+
+ if (psOutFaultData)
+ {
+ OSDeviceMemCopy(psOutFaultData, psFaultData, sizeof(MMU_FAULT_DATA));
+ }
+}
+
+#if !defined(SUPPORT_PAGE_FAULT_DEBUG)
+static inline void _UpdateFaultInfo(MMU_FAULT_DATA *psDestData, MMU_FAULT_DATA *psSrcData)
+{
+ OSDeviceMemCopy(psDestData, psSrcData, sizeof(MMU_FAULT_DATA));
+
+ /* Update count for next entry */
+ if (gui32FaultIndex < RGXFWIF_HWINFO_MAX - 1)
+ {
+ gui32FaultIndex++;
+ }
+ else
+ {
+ gui32FaultIndex = RGXFWIF_HWINFO_MAX_FIRST;
+ }
+}
+#endif
+
+static void _HostFaultAnalysis(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_UINT64 ui64MMUStatus,
+ IMG_BOOL bPMFault,
+ IMG_DEV_PHYADDR *psPCDevPAddr,
+ IMG_DEV_VIRTADDR *psFaultAddr,
+ IMG_UINT64 *pui64CRTimer,
+ MMU_FAULT_DATA *psFaultData)
+{
+ IMG_UINT32 ui32Index = RGXFWIF_HWINFO_MAX;
+ IMG_UINT32 ui32LatestHWRNumber = 0;
+ IMG_UINT64 ui64LatestMMUStatus = 0;
+ IMG_UINT64 ui64LatestPCAddress = RGXFWIF_INVALID_PC_PHYADDR;
+ const IMG_CHAR *pszIndent = " ";
+
+ /*
+ * Few cat bases are memory contexts used for PM
+ * or firmware. The rest are application contexts.
+ *
+ * It is not possible for the host to obtain the cat base
+ * address while the FW is running (since the cat bases are
+ * indirectly accessed), but in the case of the 'live' PC
+ * we can see if the FW has already logged it in the HWR log.
+ */
+
+ for (ui32Index = 0; ui32Index < RGXFWIF_HWINFO_MAX; ui32Index++)
+ {
+ RGX_HWRINFO *psHWRInfo = &psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32Index];
+
+ if (psHWRInfo->ui32HWRNumber > ui32LatestHWRNumber && psHWRInfo->eHWRType == RGX_HWRTYPE_MMUFAULT)
+ {
+ ui32LatestHWRNumber = psHWRInfo->ui32HWRNumber;
+ ui64LatestMMUStatus = psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus;
+ ui64LatestPCAddress = psHWRInfo->uHWRData.sMMUInfo.ui64PCAddress;
+ *pui64CRTimer = psHWRInfo->ui64CRTimer;
+ }
+ }
+
+ if (ui64LatestMMUStatus == ui64MMUStatus && ui64LatestPCAddress != RGXFWIF_INVALID_PC_PHYADDR)
+ {
+ psPCDevPAddr->uiAddr = ui64LatestPCAddress;
+ PVR_DUMPDEBUG_LOG("%sLocated PC address: 0x%016" IMG_UINT64_FMTSPECX, pszIndent, psPCDevPAddr->uiAddr);
+ }
+ else
+ {
+ psPCDevPAddr->uiAddr = RGXFWIF_INVALID_PC_PHYADDR;
+ }
+
+ if (psPCDevPAddr->uiAddr != RGXFWIF_INVALID_PC_PHYADDR)
+ {
+ if (!bPMFault)
+ {
+ PVR_DUMPDEBUG_LOG("%sChecking faulting address " IMG_DEV_VIRTADDR_FMTSPEC, pszIndent, psFaultAddr->uiAddr);
+ RGXCheckFaultAddress(psDevInfo, psFaultAddr, psPCDevPAddr, pfnDumpDebugPrintf, pvDumpDebugFile, psFaultData);
+ }
+ else
+ {
+ /* PM fault and we dump PC details only */
+ psFaultData->eTopLevel = MMU_LEVEL_0;
+ psFaultData->eType = MMU_FAULT_TYPE_PM;
+ psFaultData->sLevelData[MMU_LEVEL_0].ui64Address = psPCDevPAddr->uiAddr;
+ }
+#if !defined(SUPPORT_PAGE_FAULT_DEBUG)
+ _UpdateFaultInfo(&gsMMUFaultData[gui32FaultIndex], psFaultData);
+#endif
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpRGXBIFBank
+
+ @Description
+
+ Dump BIF Bank state in human readable form.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input eBankID - BIF identifier
+ @Input ui64MMUStatus - MMU Status register value
+ @Input ui64ReqStatus - BIF request Status register value
+ @Input ui32HWRIndex - Index of FW HWR info if function is called
+ as a part of the debug dump summary else
+ RGXFWIF_HWINFO_MAX
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpRGXBIFBank(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXDBG_BIF_ID eBankID,
+ IMG_UINT64 ui64MMUStatus,
+ IMG_UINT64 ui64ReqStatus,
+ IMG_UINT32 ui32HWRIndex)
+{
+ IMG_BOOL bExistingHWR = ui32HWRIndex < RGXFWIF_HWINFO_MAX;
+ IMG_CHAR *pszIndent = (bExistingHWR ? "" : " ");
+
+ if (ui64MMUStatus == 0x0)
+ {
+ PVR_DUMPDEBUG_LOG("%s - OK", pszBIFNames[eBankID]);
+ }
+ else
+ {
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+ IMG_UINT32 ui32PageSize;
+ IMG_UINT64 ui64CRTimer = 0;
+ IMG_UINT32 ui32PC =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_CAT_BASE_SHIFT;
+ MMU_FAULT_DATA sFaultData = { 0 };
+
+ /* Bank 0 & 1 share the same fields */
+ PVR_DUMPDEBUG_LOG("%s%s - FAULT:",
+ pszIndent,
+ pszBIFNames[eBankID]);
+
+ /* MMU Status */
+ {
+ IMG_UINT32 ui32MMUDataType =
+ (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_DATA_TYPE_SHIFT;
+
+ IMG_BOOL bROFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_RO_EN) != 0;
+ IMG_BOOL bProtFault = (ui64MMUStatus & RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_FAULT_PM_META_RO_EN) != 0;
+
+ ui32PageSize = (ui64MMUStatus & ~RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_MMU_STATUS_PAGE_SIZE_SHIFT;
+
+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX "): PC = %d%s, Page Size = %d, MMU data type = %d%s%s.",
+ pszIndent,
+ ui64MMUStatus,
+ ui32PC,
+ (ui32PC < 0x8)?"":_RGXDecodePMPC(ui32PC),
+ ui32PageSize,
+ ui32MMUDataType,
+ (bROFault)?", Read Only fault":"",
+ (bProtFault)?", PM/META protection fault":"");
+ }
+
+ /* Req Status */
+ {
+ IMG_CHAR *pszTagID;
+ IMG_CHAR *pszTagSB;
+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+ IMG_BOOL bRead;
+ IMG_UINT32 ui32TagSB, ui32TagID;
+ IMG_UINT64 ui64Addr;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+ {
+ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__RNW_EN) != 0;
+ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_SB_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_SB_SHIFT;
+ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_ID_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS__XE_MEMORY_HIERARCHY__TAG_ID_SHIFT;
+ }
+ else
+ {
+ bRead = (ui64ReqStatus & RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_RNW_EN) != 0;
+ ui32TagSB = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_SB_SHIFT;
+ ui32TagID = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_TAG_ID_SHIFT;
+ }
+ ui64Addr = ((ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK) >>
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_SHIFT) <<
+ RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_ALIGNSHIFT;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ /* RNW bit offset is different. The TAG_SB, TAG_ID and address fields are the same. */
+ if( (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED) && (eBankID == RGXDBG_DPX_BIF)))
+ {
+ bRead = (ui64ReqStatus & DPX_CR_BIF_FAULT_BANK_REQ_STATUS_RNW_EN) != 0;
+ }
+#endif
+ _RGXDecodeBIFReqTags(psDevInfo, eBankID, ui32TagID, ui32TagSB, &pszTagID, &pszTagSB, &aszScratch[0], RGX_DEBUG_STR_SIZE);
+
+ PVR_DUMPDEBUG_LOG("%s * Request (0x%016" IMG_UINT64_FMTSPECX
+ "): %s (%s), %s " IMG_DEV_VIRTADDR_FMTSPEC ".",
+ pszIndent,
+ ui64ReqStatus,
+ pszTagID,
+ pszTagSB,
+ (bRead)?"Reading from":"Writing to",
+ ui64Addr);
+ }
+
+ /* Check if the host thinks this fault is valid */
+
+ sFaultDevVAddr.uiAddr = (ui64ReqStatus & ~RGX_CR_BIF_FAULT_BANK0_REQ_STATUS_ADDRESS_CLRMSK);
+
+ if (bExistingHWR)
+ {
+ /* Called from debug dump summary */
+ sPCDevPAddr.uiAddr = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].uHWRData.sBIFInfo.ui64PCAddress;
+ ui64CRTimer = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui64CRTimer;
+
+ PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016" IMG_UINT64_FMTSPECX, pszIndent, sPCDevPAddr.uiAddr);
+ if (psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui32HWRNumber < psDevInfo->psRGXFWIfHWRInfoBuf->ui32DDReqCount)
+ {
+ /* check if fault is already analysed from host */
+ _DumpFWHWRHostView(
+#if !defined(SUPPORT_PAGE_FAULT_DEBUG)
+ &gsMMUFaultData[ui32HWRIndex],
+#else
+ &(gsFaultInfoLog.asFaults[ui32HWRIndex].sMMUFaultData),
+#endif
+ pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+ }
+ }
+ else
+ {
+ /* Only the first 8 cat bases are application memory contexts which we can validate... */
+ IMG_BOOL bPMFault = (ui32PC >= 8);
+ _HostFaultAnalysis(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui64MMUStatus, bPMFault, \
+ &sPCDevPAddr, &sFaultDevVAddr, &ui64CRTimer, &sFaultData);
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ _RecordFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, sFaultDevVAddr, sPCDevPAddr, ui64CRTimer, \
+ _PageSizeHWToBytes(ui32PageSize), pszIndent, &sFaultData);
+#endif
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpRGXMMUFaultStatus
+
+ @Description
+
+ Dump MMU Fault status in human readable form.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input ui64MMUStatus - MMU Status register value
+ @Input ui32HWRIndex - Index of FW HWR info if function is called
+ as a part of the debug dump summary else
+ RGXFWIF_HWINFO_MAX
+ @Input pszMetaOrCore - string representing call is for META or MMU core
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpRGXMMUFaultStatus(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT64 ui64MMUStatus,
+ IMG_UINT32 ui32HWRIndex,
+ const IMG_PCHAR pszMetaOrCore)
+{
+ IMG_BOOL bExistingHWR = ui32HWRIndex < RGXFWIF_HWINFO_MAX;
+ IMG_CHAR *pszIndent = (!bExistingHWR ? "" : " ");
+
+ if (ui64MMUStatus == 0x0)
+ {
+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - OK", pszIndent, pszMetaOrCore);
+ }
+ else
+ {
+ IMG_UINT32 ui32PC = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT;
+ IMG_UINT64 ui64Addr = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT) << 4; /* align shift */
+ IMG_UINT32 ui32Requester = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT;
+ IMG_UINT32 ui32SideBand = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT;
+ IMG_UINT32 ui32MMULevel = (ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT;
+ IMG_BOOL bRead = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_RNW_EN) != 0;
+ IMG_BOOL bFault = (ui64MMUStatus & RGX_CR_MMU_FAULT_STATUS_FAULT_EN) != 0;
+ IMG_BOOL bROFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x2;
+ IMG_BOOL bProtFault = ((ui64MMUStatus & ~RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK) >>
+ RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT) == 0x3;
+ IMG_CHAR aszScratch[RGX_DEBUG_STR_SIZE];
+ IMG_CHAR *pszTagID;
+ IMG_CHAR *pszTagSB;
+ IMG_UINT64 ui64CRTimer = 0;
+ IMG_DEV_VIRTADDR sFaultDevVAddr;
+ IMG_DEV_PHYADDR sPCDevPAddr = { 0 };
+ MMU_FAULT_DATA sFaultData;
+ memset(&sFaultData, 0, sizeof(MMU_FAULT_DATA));
+
+ _RGXDecodeMMUReqTags(psDevInfo, ui32Requester, ui32SideBand, bRead, &pszTagID, &pszTagSB, aszScratch, RGX_DEBUG_STR_SIZE);
+
+ PVR_DUMPDEBUG_LOG("%sMMU (%s) - FAULT:", pszIndent, pszMetaOrCore);
+ PVR_DUMPDEBUG_LOG("%s * MMU status (0x%016" IMG_UINT64_FMTSPECX "): PC = %d, %s 0x%010" IMG_UINT64_FMTSPECX ", %s (%s)%s%s%s%s.",
+ pszIndent,
+ ui64MMUStatus,
+ ui32PC,
+ (bRead)?"Reading from":"Writing to",
+ ui64Addr,
+ pszTagID,
+ pszTagSB,
+ (bFault)?", Fault":"",
+ (bROFault)?", Read Only fault":"",
+ (bProtFault)?", PM/META protection fault":"",
+ _RGXDecodeMMULevel(ui32MMULevel));
+
+ /* Check if the host thinks this fault is valid */
+ sFaultDevVAddr.uiAddr = ui64Addr;
+
+ if (bExistingHWR)
+ {
+ /* Called from debug dump summary */
+ sPCDevPAddr.uiAddr = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].uHWRData.sMMUInfo.ui64PCAddress;
+ ui64CRTimer = psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui64CRTimer;
+
+ PVR_DUMPDEBUG_LOG("%sFW logged fault using PC Address: 0x%016" IMG_UINT64_FMTSPECX, pszIndent, sPCDevPAddr.uiAddr);
+ if (psDevInfo->psRGXFWIfHWRInfoBuf->sHWRInfo[ui32HWRIndex].ui32HWRNumber < psDevInfo->psRGXFWIfHWRInfoBuf->ui32DDReqCount)
+ {
+ /* check if Fault is already analysed from host */
+ _DumpFWHWRHostView(
+#if !defined(SUPPORT_PAGE_FAULT_DEBUG)
+ &gsMMUFaultData[ui32HWRIndex],
+#else
+ &(gsFaultInfoLog.asFaults[ui32HWRIndex].sMMUFaultData),
+#endif
+ pfnDumpDebugPrintf, pvDumpDebugFile, &sFaultData);
+ }
+ }
+ else
+ {
+ IMG_BOOL bPMFault;
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ ui32PC = ui32PC - 1;
+#endif
+ bPMFault = (ui32PC <= 8);
+ _HostFaultAnalysis(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile, ui64MMUStatus, bPMFault, \
+ &sPCDevPAddr, &sFaultDevVAddr, &ui64CRTimer, &sFaultData);
+ }
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ _RecordFaultInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, sFaultDevVAddr, sPCDevPAddr, ui64CRTimer, \
+ _PageSizeHWToBytes(0), pszIndent, &sFaultData);
+#endif
+ }
+}
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_CONTEXT_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_CONTEXT_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_ADDRESS_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_ADDRESS_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TAG_SB_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TAG_SB_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_REQ_ID_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_REQ_ID_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_LEVEL_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_LEVEL_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_RNW_EN == RGX_CR_MMU_FAULT_STATUS_META_RNW_EN),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_FAULT_EN == RGX_CR_MMU_FAULT_STATUS_META_FAULT_EN),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_CLRMSK == RGX_CR_MMU_FAULT_STATUS_META_TYPE_CLRMSK),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+static_assert((RGX_CR_MMU_FAULT_STATUS_TYPE_SHIFT == RGX_CR_MMU_FAULT_STATUS_META_TYPE_SHIFT),
+ "RGX_CR_MMU_FAULT_STATUS_META mismatch!");
+
+
+
+#if !defined(NO_HARDWARE)
+static PVRSRV_ERROR _RGXMipsExtraDebug(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_MIPS_STATE *psMIPSState)
+{
+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+ IMG_UINT32 ui32RegRead;
+ IMG_UINT32 eError = PVRSRV_OK;
+ /* This pointer contains a kernel mapping of a particular memory area shared
+ between the driver and the firmware. This area is used for exchanging info
+ about the internal state of the MIPS*/
+ IMG_UINT32 *pui32NMIMemoryPointer;
+ IMG_UINT32 *pui32NMIPageBasePointer;
+ IMG_DEVMEM_OFFSET_T uiNMIMemoryBootOffset;
+ PMR *psPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+
+ /* Map the FW code area to the kernel */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+ (void **)&pui32NMIMemoryPointer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXMipsExtraDebug: Failed to acquire NMI shared memory area (%u)", eError));
+ goto map_error_fail;
+ }
+ else
+ {
+ pui32NMIPageBasePointer = pui32NMIMemoryPointer;
+ }
+
+ /* Calculate offset to the boot/NMI data page */
+ uiNMIMemoryBootOffset = RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+
+ /* Jump to the NMI shared data area within the page above */
+ pui32NMIMemoryPointer += uiNMIMemoryBootOffset + RGXMIPSFW_GET_OFFSET_IN_DWORDS(RGXMIPSFW_NMI_SHARED_DATA_BASE);
+
+ /* Acquire the NMI operations lock */
+ OSLockAcquire(psDevInfo->hNMILock);
+
+ /* Make sure the synchronisation flag is set to 0 */
+ pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 0;
+
+ /* Flush out the dirty locations of the NMI page */
+ CacheOpValExec(psPMR,
+ (IMG_UINT64)(uintptr_t)pui32NMIPageBasePointer,
+ uiNMIMemoryBootOffset,
+ RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+ PVRSRV_CACHE_OP_FLUSH);
+
+ /* Enable NMI issuing in the MIPS wrapper */
+ OSWriteHWReg64(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE_EVENT_EN);
+
+ /* Check the MIPS is not in error state already (e.g. it is booting or an NMI has already been requested) */
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN) || (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN))
+ {
+
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Issue NMI */
+ OSWriteHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_EVENT,
+ RGX_CR_MIPS_WRAPPER_NMI_EVENT_TRIGGER_EN);
+
+
+ /* Wait for NMI Taken to be asserted */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if ((ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_NMI_TAKEN_EN) == 0)
+ {
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Allow the firmware to proceed */
+ pui32NMIMemoryPointer[RGXMIPSFW_NMI_SYNC_FLAG_OFFSET] = 1;
+
+ /* Flush out the dirty locations of the NMI page */
+ CacheOpValExec(psPMR,
+ (IMG_UINT64)(uintptr_t)pui32NMIPageBasePointer,
+ uiNMIMemoryBootOffset,
+ RGXMIPSFW_PAGE_SIZE/(sizeof(IMG_UINT32)),
+ PVRSRV_CACHE_OP_FLUSH);
+
+ /* Wait for the FW to have finished the NMI routine */
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ ui32RegRead = OSReadHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_EXCEPTION_STATUS);
+ if (!(ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN))
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ if (ui32RegRead & RGX_CR_MIPS_EXCEPTION_STATUS_SI_ERL_EN)
+ {
+ eError = PVRSRV_ERROR_MIPS_STATUS_UNAVAILABLE;
+ goto fail;
+ }
+ ui32RegRead = 0;
+
+ /* Copy state */
+ OSDeviceMemCopy(psMIPSState, pui32NMIMemoryPointer + RGXMIPSFW_NMI_STATE_OFFSET, sizeof(*psMIPSState));
+
+ --(psMIPSState->ui32ErrorEPC);
+ --(psMIPSState->ui32EPC);
+
+ /* Disable NMI issuing in the MIPS wrapper */
+ OSWriteHWReg32(pvRegsBaseKM,
+ RGX_CR_MIPS_WRAPPER_NMI_ENABLE,
+ 0);
+
+fail:
+ /* Release the NMI operations lock */
+ OSLockRelease(psDevInfo->hNMILock);
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+map_error_fail:
+ return eError;
+}
+
+/* Print decoded information from cause register */
+static void _RGXMipsDumpCauseDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ IMG_UINT32 ui32Cause,
+ IMG_UINT32 ui32ErrorState)
+{
+#define INDENT " "
+ const IMG_UINT32 ui32ExcCode = RGXMIPSFW_C0_CAUSE_EXCCODE(ui32Cause);
+ const IMG_CHAR * const pszException = _GetMIPSExcString(ui32ExcCode);
+
+ if (ui32ErrorState == RGXMIPSFW_NMI_ERROR_STATE_SET &&
+ pszException != NULL)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Cause exception: %s", pszException);
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_FDCIPENDING)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "FDC interrupt pending");
+ }
+
+ if (!(ui32Cause & RGXMIPSFW_C0_CAUSE_IV))
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Interrupt uses general interrupt vector");
+ }
+
+ if (ui32Cause & RGXMIPSFW_C0_CAUSE_PCIPENDING)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Performance Counter Interrupt pending");
+ }
+
+ /* Unusable Coproc exception */
+ if (ui32ExcCode == 11)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Unusable Coprocessor: %d", RGXMIPSFW_C0_CAUSE_UNUSABLE_UNIT(ui32Cause));
+ }
+
+#undef INDENT
+}
+
+static void _RGXMipsDumpDebugDecode(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf, void *pvDumpDebugFile, IMG_UINT32 ui32Debug, IMG_UINT32 ui32DEPC)
+{
+ const IMG_CHAR *pszDException = NULL;
+ IMG_UINT32 i;
+#define INDENT " "
+
+ if (!(ui32Debug & RGXMIPSFW_C0_DEBUG_DM))
+ {
+ return;
+ }
+
+ PVR_DUMPDEBUG_LOG("DEBUG :");
+
+ pszDException = _GetMIPSExcString(RGXMIPSFW_C0_DEBUG_EXCCODE(ui32Debug));
+
+ if (pszDException != NULL)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "Debug exception: %s", pszDException);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sMIPS_C0_DebugTable); ++i)
+ {
+ const RGXMIPSFW_C0_DEBUG_TBL_ENTRY * const psDebugEntry = &sMIPS_C0_DebugTable[i];
+
+ if (ui32Debug & psDebugEntry->ui32Mask)
+ {
+ PVR_DUMPDEBUG_LOG(INDENT "%s", psDebugEntry->pszExplanation);
+ }
+ }
+#undef INDENT
+ PVR_DUMPDEBUG_LOG("DEPC :0x%08X", ui32DEPC);
+}
+
+static inline void _GetMipsTLBPARanges(const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+ IMG_UINT64 *pui64PA0Start,
+ IMG_UINT64 *pui64PA0End,
+ IMG_UINT64 *pui64PA1Start,
+ IMG_UINT64 *pui64PA1End)
+{
+ IMG_BOOL bUseRemapOutput = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+ IMG_UINT64 ui64PageSize = RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask);
+
+ if ((psTLBEntry->ui32TLBLo0 & RGXMIPSFW_TLB_VALID) == 0)
+ {
+ /* Dummy values to fail the range checks later */
+ *pui64PA0Start = -1ULL;
+ *pui64PA0End = -1ULL;
+ }
+ else if (bUseRemapOutput)
+ {
+ *pui64PA0Start = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1;
+ }
+ else
+ {
+ *pui64PA0Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+ *pui64PA0End = *pui64PA0Start + ui64PageSize - 1;
+ }
+
+ if ((psTLBEntry->ui32TLBLo1 & RGXMIPSFW_TLB_VALID) == 0)
+ {
+ /* Dummy values to fail the range checks later */
+ *pui64PA1Start = -1ULL;
+ *pui64PA1End = -1ULL;
+ }
+ else if (bUseRemapOutput)
+ {
+ *pui64PA1Start = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1;
+ }
+ else
+ {
+ *pui64PA1Start = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+ *pui64PA1End = *pui64PA1Start + ui64PageSize - 1;
+ }
+}
+
+static void _CheckMipsTLBDuplicatePAs(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ const RGX_MIPS_TLB_ENTRY *psTLB,
+ const RGX_MIPS_REMAP_ENTRY *psRemap)
+{
+ IMG_UINT64 ui64PA0StartI, ui64PA1StartI, ui64PA0StartJ, ui64PA1StartJ;
+ IMG_UINT64 ui64PA0EndI, ui64PA1EndI, ui64PA0EndJ, ui64PA1EndJ;
+ IMG_UINT32 i, j;
+
+#define RANGES_OVERLAP(start0,end0,start1,end1) ((start0) < (end1) && (start1) < (end0))
+
+ for (i = 0; i < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; i++)
+ {
+ _GetMipsTLBPARanges(&psTLB[i],
+ psRemap ? &psRemap[i] : NULL,
+ psRemap ? &psRemap[i + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+ &ui64PA0StartI, &ui64PA0EndI,
+ &ui64PA1StartI, &ui64PA1EndI);
+
+ for (j = i + 1; j < RGXMIPSFW_NUMBER_OF_TLB_ENTRIES; j++)
+ {
+ _GetMipsTLBPARanges(&psTLB[j],
+ psRemap ? &psRemap[j] : NULL,
+ psRemap ? &psRemap[j + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES] : NULL,
+ &ui64PA0StartJ, &ui64PA0EndJ,
+ &ui64PA1StartJ, &ui64PA1EndJ);
+
+ if (RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+ RANGES_OVERLAP(ui64PA0StartI, ui64PA0EndI, ui64PA1StartJ, ui64PA1EndJ) ||
+ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA0StartJ, ui64PA0EndJ) ||
+ RANGES_OVERLAP(ui64PA1StartI, ui64PA1EndI, ui64PA1StartJ, ui64PA1EndJ) )
+ {
+ PVR_DUMPDEBUG_LOG("Overlap between TLB entry %u and %u", i , j);
+ }
+ }
+ }
+}
+
+static inline void _RGXMipsDumpTLBEntry(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ const RGX_MIPS_TLB_ENTRY *psTLBEntry,
+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry0,
+ const RGX_MIPS_REMAP_ENTRY *psRemapEntry1,
+ IMG_UINT32 ui32Index)
+{
+ IMG_BOOL bDumpRemapEntries = (psRemapEntry0 != NULL && psRemapEntry1 != NULL) ? IMG_TRUE : IMG_FALSE;
+ IMG_UINT64 ui64PA0 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo0);
+ IMG_UINT64 ui64PA1 = RGXMIPSFW_TLB_GET_PA(psTLBEntry->ui32TLBLo1);
+ IMG_UINT64 ui64Remap0AddrOut = 0, ui64Remap1AddrOut = 0;
+ IMG_UINT32 ui32Remap0AddrIn = 0, ui32Remap1AddrIn = 0;
+
+ static const IMG_CHAR * const apszPermissionInhibit[4] =
+ {
+ "",
+ "XI",
+ "RI",
+ "RIXI"
+ };
+
+ static const IMG_CHAR * const apszCoherencyTLB[8] =
+ {
+ "C",
+ "C",
+ " ",
+ "C",
+ "C",
+ "C",
+ "C",
+ " "
+ };
+
+ static const IMG_CHAR * const apszDirtyGlobalValid[8] =
+ {
+ " ",
+ " G",
+ " V ",
+ " VG",
+ "D ",
+ "D G",
+ "DV ",
+ "DVG"
+ };
+
+ if (bDumpRemapEntries)
+ {
+ /* RemapAddrIn is always 4k aligned and on 32 bit */
+ ui32Remap0AddrIn = psRemapEntry0->ui32RemapAddrIn << 12;
+ ui32Remap1AddrIn = psRemapEntry1->ui32RemapAddrIn << 12;
+
+ /* RemapAddrOut is always 4k aligned and on 32 or 36 bit */
+ ui64Remap0AddrOut = (IMG_UINT64)psRemapEntry0->ui32RemapAddrOut << 12;
+ ui64Remap1AddrOut = (IMG_UINT64)psRemapEntry1->ui32RemapAddrOut << 12;
+
+ /* If TLB and remap entries match, then merge them else, print them separately */
+ if ((IMG_UINT32)ui64PA0 == ui32Remap0AddrIn &&
+ (IMG_UINT32)ui64PA1 == ui32Remap1AddrIn)
+ {
+ ui64PA0 = ui64Remap0AddrOut;
+ ui64PA1 = ui64Remap1AddrOut;
+ bDumpRemapEntries = IMG_FALSE;
+ }
+ }
+
+ PVR_DUMPDEBUG_LOG("%2u) VA 0x%08X (%3uk) -> PA0 0x%08" IMG_UINT64_FMTSPECX " %s%s%s, "
+ "PA1 0x%08" IMG_UINT64_FMTSPECX " %s%s%s",
+ ui32Index,
+ psTLBEntry->ui32TLBHi,
+ RGXMIPSFW_TLB_GET_PAGE_SIZE(psTLBEntry->ui32TLBPageMask),
+ ui64PA0,
+ apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo0)],
+ apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo0)],
+ apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo0)],
+ ui64PA1,
+ apszPermissionInhibit[RGXMIPSFW_TLB_GET_INHIBIT(psTLBEntry->ui32TLBLo1)],
+ apszDirtyGlobalValid[RGXMIPSFW_TLB_GET_DGV(psTLBEntry->ui32TLBLo1)],
+ apszCoherencyTLB[RGXMIPSFW_TLB_GET_COHERENCY(psTLBEntry->ui32TLBLo1)]);
+
+ if (bDumpRemapEntries)
+ {
+ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECX,
+ ui32Index,
+ ui32Remap0AddrIn,
+ RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry0->ui32RemapRegionSize),
+ ui64Remap0AddrOut);
+
+ PVR_DUMPDEBUG_LOG(" Remap %2u : IN 0x%08X (%3uk) => OUT 0x%08" IMG_UINT64_FMTSPECX,
+ ui32Index + RGXMIPSFW_NUMBER_OF_TLB_ENTRIES,
+ ui32Remap1AddrIn,
+ RGXMIPSFW_REMAP_GET_REGION_SIZE(psRemapEntry1->ui32RemapRegionSize),
+ ui64Remap1AddrOut);
+ }
+}
+
+#endif /* !defined(NO_HARDWARE) */
+
+static void _Flags2Description(IMG_CHAR *sDesc, const IMG_FLAGS2DESC *psConvTable, IMG_UINT32 ui32TableSize, IMG_UINT32 ui32Flags)
+{
+ IMG_UINT32 ui32Idx;
+
+ for (ui32Idx = 0; ui32Idx < ui32TableSize; ui32Idx++)
+ {
+ if ((ui32Flags & psConvTable[ui32Idx].uiFlag) == psConvTable[ui32Idx].uiFlag)
+ {
+ strcat(sDesc, psConvTable[ui32Idx].pszLabel);
+ }
+ }
+}
+
+static void _GetFwFlagsDescription(IMG_CHAR *psDesc, IMG_UINT32 ui32RawFlags)
+{
+ const IMG_CHAR *psCswLabel = "Ctx switch: ";
+ strcat(psDesc, psCswLabel);
+ _Flags2Description(psDesc, asCSW2Description, ARRAY_SIZE(asCSW2Description), ui32RawFlags);
+ _Flags2Description(psDesc, asMisc2Description, ARRAY_SIZE(asMisc2Description), ui32RawFlags);
+}
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpFWAssert
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpFWAssert(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+ IMG_CHAR *pszTraceAssertPath;
+ IMG_CHAR *pszTraceAssertInfo;
+ IMG_INT32 ui32TraceAssertLine;
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ pszTraceAssertPath = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szPath;
+ pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.szInfo;
+ ui32TraceAssertLine = psRGXFWIfTraceBufCtl->sTraceBuf[i].sAssertBuf.ui32LineNum;
+
+ /* print non null assert strings */
+ if (*pszTraceAssertInfo)
+ {
+ PVR_DUMPDEBUG_LOG("FW-T%d Assert: %s (%s:%d)",
+ i, pszTraceAssertInfo, pszTraceAssertPath, ui32TraceAssertLine);
+ }
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _RGXDumpFWFaults
+
+ @Description
+
+ Dump FW assert strings when a thread asserts.
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psRGXFWIfTraceBufCtl - RGX FW trace buffer
+
+ @Return void
+
+******************************************************************************/
+static void _RGXDumpFWFaults(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+ if (psRGXFWIfTraceBufCtl->ui32FWFaults > 0)
+ {
+ IMG_UINT32 ui32StartFault = psRGXFWIfTraceBufCtl->ui32FWFaults - RGXFWIF_FWFAULTINFO_MAX;
+ IMG_UINT32 ui32EndFault = psRGXFWIfTraceBufCtl->ui32FWFaults - 1;
+ IMG_UINT32 ui32Index;
+
+ if (psRGXFWIfTraceBufCtl->ui32FWFaults < RGXFWIF_FWFAULTINFO_MAX)
+ {
+ ui32StartFault = 0;
+ }
+
+ for (ui32Index = ui32StartFault; ui32Index <= ui32EndFault; ui32Index++)
+ {
+ RGX_FWFAULTINFO *psFaultInfo = &psRGXFWIfTraceBufCtl->sFaultInfo[ui32Index % RGXFWIF_FWFAULTINFO_MAX];
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ /* Split OS timestamp in seconds and nanoseconds */
+ ConvertOSTimestampToSAndNS(psFaultInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+ PVR_DUMPDEBUG_LOG("FW Fault %d: %s (%s:%d)",
+ ui32Index+1, psFaultInfo->sFaultBuf.szInfo,
+ psFaultInfo->sFaultBuf.szPath,
+ psFaultInfo->sFaultBuf.ui32LineNum);
+ PVR_DUMPDEBUG_LOG(" Data = 0x%08x, CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC,
+ psFaultInfo->ui32Data,
+ psFaultInfo->ui64CRTimer,
+ ui64Seconds, ui64Nanoseconds);
+ }
+ }
+}
+
+static void _RGXDumpFWPoll(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl)
+{
+ IMG_UINT32 i;
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[i])
+ {
+ PVR_DUMPDEBUG_LOG("T%u polling %s (reg:0x%08X mask:0x%08X)",
+ i,
+ ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[i] & ~RGXFW_POLL_TYPE_SET,
+ psRGXFWIfTraceBufCtl->aui32CrPollMask[i]);
+ }
+ }
+
+}
+
+static void _RGXDumpFWHWRInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile, RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl,
+ RGXFWIF_HWRINFOBUF *psHWInfoBuf, PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_BOOL bAnyLocked = IMG_FALSE;
+ IMG_UINT32 dm, i;
+ IMG_UINT32 ui32LineSize;
+ IMG_CHAR *pszLine, *pszTemp;
+ IMG_CHAR *apszDmNames[] = { "GP(", "TDM(", "TA(", "3D(", "CDM(",
+ "RTU(", "SHG(",NULL };
+
+ const IMG_CHAR *pszMsgHeader = "Number of HWR: ";
+ IMG_CHAR *pszLockupType = "";
+ RGX_HWRINFO *psHWRInfo;
+ IMG_UINT32 ui32MsgHeaderSize = OSStringLength(pszMsgHeader);
+ IMG_UINT32 ui32HWRRecoveryFlags;
+ IMG_UINT32 ui32ReadIndex;
+
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM)))
+ {
+ apszDmNames[RGXFWIF_DM_TDM] = "2D(";
+ }
+
+ for (dm = 0; dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount; dm++)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm] ||
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm])
+ {
+ bAnyLocked = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (!bAnyLocked && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_HARDWARE_OK) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ /* No HWR situation, print nothing */
+ return;
+ }
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ||
+ (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) && psRGXFWIfTraceBufCtl == psDevInfo->psRGXFWIfGuestTraceBuf))
+ {
+ IMG_BOOL bAnyHWROccured = IMG_FALSE;
+
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm] != 0 ||
+ psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm] != 0 ||
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm] !=0)
+ {
+ bAnyHWROccured = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (!bAnyHWROccured)
+ {
+ return ;
+ }
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ PVR_DUMPDEBUG_LOG("\nUnaccounted Guest OS' HWR list:");
+ }
+ }
+
+ ui32LineSize = sizeof(IMG_CHAR) * ( ui32MsgHeaderSize +
+ (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*( 4/*DM name + left parenthesis*/ +
+ 10/*UINT32 max num of digits*/ +
+ 1/*slash*/ +
+ 10/*UINT32 max num of digits*/ +
+ 3/*right parenthesis + comma + space*/)) +
+ 7 + (psDevInfo->sDevFeatureCfg.ui32MAXDMCount*6)/* FALSE() + (UINT16 max num + comma) per DM */ +
+ 1/* \0 */);
+
+ pszLine = OSAllocMem(ui32LineSize);
+ if (pszLine == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXDumpFWHWRInfo: Out of mem allocating line string (size: %d)", ui32LineSize));
+ return;
+ }
+
+ OSStringCopy(pszLine,pszMsgHeader);
+ pszTemp = pszLine + ui32MsgHeaderSize;
+
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ OSStringCopy(pszTemp,apszDmNames[dm]);
+ pszTemp += OSStringLength(apszDmNames[dm]);
+ pszTemp += OSSNPrintf(pszTemp,
+ 10 + 1 + 10 + 1 + 10 + 1 + 1 + 1 + 1 /* UINT32 + slash + UINT32 + plus + UINT32 + right parenthesis + comma + space + \0 */,
+ "%u/%u+%u), ",
+ psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[dm],
+ psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[dm],
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[dm]);
+ }
+
+ OSStringCopy(pszTemp, "FALSE(");
+ pszTemp += 6;
+
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ pszTemp += OSSNPrintf(pszTemp,
+ 10 + 1 + 1 /* UINT32 max num + comma + \0 */,
+ (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount-1 ? "%u," : "%u)"),
+ psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[dm]);
+ }
+
+ PVR_DUMPDEBUG_LOG("%s", pszLine);
+
+ OSFreeMem(pszLine);
+
+ /* Print out per HWR info */
+ for (dm = 0; (dm < psDevInfo->sDevFeatureCfg.ui32MAXDMCount) && (apszDmNames[dm] != NULL); dm++)
+ {
+ if (dm == RGXFWIF_DM_GP)
+ {
+ PVR_DUMPDEBUG_LOG("DM %d (GP)", dm);
+ }
+ else
+ {
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ IMG_CHAR sPerDmHwrDescription[RGX_DEBUG_STR_SIZE] = "";
+
+ _Flags2Description(sPerDmHwrDescription, asDmState2Description, ARRAY_SIZE(asDmState2Description), psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm]);
+ PVR_DUMPDEBUG_LOG("DM %d (HWRflags 0x%08x: %s)", dm, psRGXFWIfTraceBufCtl->aui32HWRRecoveryFlags[dm], sPerDmHwrDescription);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("DM %d", dm);
+ }
+ }
+
+ ui32ReadIndex = 0;
+ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+ {
+ psHWRInfo = &psHWInfoBuf->sHWRInfo[ui32ReadIndex];
+
+ if((psHWRInfo->eDM == dm) && (psHWRInfo->ui32HWRNumber != 0))
+ {
+ IMG_CHAR aui8RecoveryNum[10+10+1];
+ IMG_UINT64 ui64Seconds, ui64Nanoseconds;
+
+ /* Split OS timestamp in seconds and nanoseconds */
+ ConvertOSTimestampToSAndNS(psHWRInfo->ui64OSTimer, &ui64Seconds, &ui64Nanoseconds);
+
+ ui32HWRRecoveryFlags = psHWRInfo->ui32HWRRecoveryFlags;
+ if(ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_LOCKUP) { pszLockupType = ", Guilty Lockup"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_LOCKUP) { pszLockupType = ", Innocent Lockup"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_GUILTY_OVERRUNING) { pszLockupType = ", Guilty Overrun"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_HARD_CONTEXT_SWITCH) { pszLockupType = ", Hard Context Switch"; }
+ else if (ui32HWRRecoveryFlags & RGXFWIF_DM_STATE_INNOCENT_OVERRUNING) { pszLockupType = ", Innocent Overrun"; }
+
+ OSSNPrintf(aui8RecoveryNum, sizeof(aui8RecoveryNum), "Recovery %d:", psHWRInfo->ui32HWRNumber);
+ PVR_DUMPDEBUG_LOG(" %s PID = %d, frame = %d, HWRTData = 0x%08X, EventStatus = 0x%08X%s",
+ aui8RecoveryNum,
+ psHWRInfo->ui32PID,
+ psHWRInfo->ui32FrameNum,
+ psHWRInfo->ui32ActiveHWRTData,
+ psHWRInfo->ui32EventStatus,
+ pszLockupType);
+ pszTemp = &aui8RecoveryNum[0];
+ while (*pszTemp != '\0')
+ {
+ *pszTemp++ = ' ';
+ }
+
+ /* There's currently no time correlation for the Guest OSes on the Firmware so there's no point printing OS Timestamps on Guests */
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", OSTimer = %" IMG_UINT64_FMTSPEC ".%09" IMG_UINT64_FMTSPEC ", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+ aui8RecoveryNum,
+ psHWRInfo->ui64CRTimer,
+ ui64Seconds,
+ ui64Nanoseconds,
+ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" %s CRTimer = 0x%012"IMG_UINT64_FMTSPECX", CyclesElapsed = %" IMG_INT64_FMTSPECd,
+ aui8RecoveryNum,
+ psHWRInfo->ui64CRTimer,
+ (psHWRInfo->ui64CRTimer-psHWRInfo->ui64CRTimeOfKick)*256);
+ }
+
+ if (psHWRInfo->ui64CRTimeHWResetFinish != 0)
+ {
+ if (psHWRInfo->ui64CRTimeFreelistReady != 0)
+ {
+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", FreelistReconTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+ aui8RecoveryNum,
+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimeHWResetFinish)*256,
+ (psHWRInfo->ui64CRTimeFreelistReady-psHWRInfo->ui64CRTimer)*256);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" %s PreResetTimeInCycles = %" IMG_INT64_FMTSPECd ", HWResetTimeInCycles = %" IMG_INT64_FMTSPECd ", TotalRecoveryTimeInCycles = %" IMG_INT64_FMTSPECd,
+ aui8RecoveryNum,
+ (psHWRInfo->ui64CRTimeHWResetStart-psHWRInfo->ui64CRTimer)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimeHWResetStart)*256,
+ (psHWRInfo->ui64CRTimeHWResetFinish-psHWRInfo->ui64CRTimer)*256);
+ }
+ }
+
+ switch(psHWRInfo->eHWRType)
+ {
+ case RGX_HWRTYPE_BIF0FAULT:
+ case RGX_HWRTYPE_BIF1FAULT:
+ {
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXFWIF_HWRTYPE_BIF_BANK_GET(psHWRInfo->eHWRType),
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ ui32ReadIndex);
+ }
+ }
+ break;
+ case RGX_HWRTYPE_TEXASBIF0FAULT:
+ {
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ ui32ReadIndex);
+ }
+ }
+ }
+ break;
+ case RGX_HWRTYPE_DPXMMUFAULT:
+ {
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)))
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFMMUStatus,
+ psHWRInfo->uHWRData.sBIFInfo.ui64BIFReqStatus,
+ ui32ReadIndex);
+ }
+#endif
+ }
+ }
+ break;
+ case RGX_HWRTYPE_MMUFAULT:
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+ psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+ ui32ReadIndex,
+ "Core");
+ }
+ }
+ break;
+ case RGX_HWRTYPE_MMUMETAFAULT:
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo,
+ psHWRInfo->uHWRData.sMMUInfo.ui64MMUStatus,
+ ui32ReadIndex,
+ "Meta");
+ }
+ }
+ break;
+
+
+ case RGX_HWRTYPE_POLLFAILURE:
+ {
+ PVR_DUMPDEBUG_LOG(" T%u polling %s (reg:0x%08X mask:0x%08X last:0x%08X)",
+ psHWRInfo->uHWRData.sPollInfo.ui32ThreadNum,
+ ((psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollAddr & ~RGXFW_POLL_TYPE_SET,
+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollMask,
+ psHWRInfo->uHWRData.sPollInfo.ui32CrPollLastValue);
+ }
+ break;
+
+ case RGX_HWRTYPE_OVERRUN:
+ case RGX_HWRTYPE_UNKNOWNFAILURE:
+ {
+ /* Nothing to dump */
+ }
+ break;
+
+ default:
+ {
+ PVR_ASSERT(IMG_FALSE);
+ }
+ break;
+ }
+ }
+
+ if(ui32ReadIndex == RGXFWIF_HWINFO_MAX_FIRST - 1)
+ ui32ReadIndex = psHWInfoBuf->ui32WriteIndex;
+ else
+ ui32ReadIndex = (ui32ReadIndex + 1) - (ui32ReadIndex / RGXFWIF_HWINFO_LAST_INDEX) * RGXFWIF_HWINFO_MAX_LAST;
+ }
+ }
+}
+
+#if !defined(NO_HARDWARE)
+
+/*!
+*******************************************************************************
+
+ @Function _CheckForPendingPage
+
+ @Description
+
+ Check if the MMU indicates it is blocked on a pending page
+
+ @Input psDevInfo - RGX device info
+
+ @Return IMG_BOOL - IMG_TRUE if there is a pending page
+
+******************************************************************************/
+static INLINE IMG_BOOL _CheckForPendingPage(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32BIFMMUEntry;
+
+ ui32BIFMMUEntry = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY);
+
+ if(ui32BIFMMUEntry & RGX_CR_BIF_MMU_ENTRY_PENDING_EN)
+ {
+ return IMG_TRUE;
+ }
+ else
+ {
+ return IMG_FALSE;
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function _GetPendingPageInfo
+
+ @Description
+
+ Get information about the pending page from the MMU status registers
+
+ @Input psDevInfo - RGX device info
+ @Output psDevVAddr - The device virtual address of the pending MMU address translation
+ @Output pui32CatBase - The page catalog base
+ @Output pui32DataType - The MMU entry data type
+
+ @Return void
+
+******************************************************************************/
+static void _GetPendingPageInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_UINT32 *pui32CatBase,
+ IMG_UINT32 *pui32DataType)
+{
+ IMG_UINT64 ui64BIFMMUEntryStatus;
+
+ ui64BIFMMUEntryStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_MMU_ENTRY_STATUS);
+
+ psDevVAddr->uiAddr = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_ADDRESS_CLRMSK);
+
+ *pui32CatBase = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_CLRMSK) >>
+ RGX_CR_BIF_MMU_ENTRY_STATUS_CAT_BASE_SHIFT;
+
+ *pui32DataType = (ui64BIFMMUEntryStatus & ~RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_CLRMSK) >>
+ RGX_CR_BIF_MMU_ENTRY_STATUS_DATA_TYPE_SHIFT;
+}
+
+#endif
+
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_BOOL bRGXPoweredON)
+{
+ IMG_CHAR *pszState, *pszReason;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32OSid;
+ IMG_CHAR sHwrStateDescription[RGX_DEBUG_STR_SIZE] = "";
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ /* space for the current clock speed and 3 previous */
+ RGXFWIF_TIME_CORR asTimeCorrs[4];
+ IMG_UINT32 ui32NumClockSpeedChanges;
+
+#if defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(bRGXPoweredON);
+#else
+ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+
+ IMG_UINT64 ui64RegValMMUStatus;
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS);
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, RGXFWIF_HWINFO_MAX, "Core");
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_MMU_FAULT_STATUS_META);
+ _RGXDumpRGXMMUFaultStatus(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui64RegValMMUStatus, RGXFWIF_HWINFO_MAX, "Meta");
+ }else
+ {
+ IMG_UINT64 ui64RegValMMUStatus, ui64RegValREQStatus;
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF0, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SINGLE_BIF)))
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_FAULT_BANK1_REQ_STATUS);
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_BIF1, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+ }
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+ IMG_UINT32 ui32PhantomCnt = RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_REQ_NUM_PHANTOMS(RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)) : 0;
+
+ if(ui32PhantomCnt > 1)
+ {
+ IMG_UINT32 ui32Phantom;
+ for (ui32Phantom = 0; ui32Phantom < ui32PhantomCnt; ui32Phantom++)
+ {
+ /* This can't be done as it may interfere with the FW... */
+ /*OSWriteHWReg64(RGX_CR_TEXAS_INDIRECT, ui32Phantom);*/
+
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+ }
+ }else
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TEXAS_BIF_FAULT_BANK0_REQ_STATUS);
+
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_TEXAS_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+ }
+ }
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ ui64RegValMMUStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_MMU_STATUS);
+ ui64RegValREQStatus = OSReadHWReg64(psDevInfo->pvRegsBaseKM, DPX_CR_BIF_FAULT_BANK_REQ_STATUS);
+ _RGXDumpRGXBIFBank(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, RGXDBG_DPX_BIF, ui64RegValMMUStatus, ui64RegValREQStatus, RGXFWIF_HWINFO_MAX);
+ }
+#endif
+ }
+
+ if(_CheckForPendingPage(psDevInfo))
+ {
+ IMG_UINT32 ui32CatBase;
+ IMG_UINT32 ui32DataType;
+ IMG_DEV_VIRTADDR sDevVAddr;
+
+ PVR_DUMPDEBUG_LOG("MMU Pending page: Yes");
+
+ _GetPendingPageInfo(psDevInfo, &sDevVAddr, &ui32CatBase, &ui32DataType);
+
+ if(ui32CatBase >= 8)
+ {
+ PVR_DUMPDEBUG_LOG("Cannot check address on PM cat base %u", ui32CatBase);
+ }
+ else
+ {
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ sPCDevPAddr.uiAddr = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_BIF_CAT_BASEN(ui32CatBase));
+
+ PVR_DUMPDEBUG_LOG("Checking device virtual address " IMG_DEV_VIRTADDR_FMTSPEC
+ " on cat base %u. PC Addr = 0x%" IMG_UINT64_FMTSPECX,
+ sDevVAddr.uiAddr,
+ ui32CatBase,
+ sPCDevPAddr.uiAddr);
+ RGXCheckFaultAddress(psDevInfo, &sDevVAddr, &sPCDevPAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile, NULL);
+ }
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ /* Firmware state */
+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthStatus))
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK: pszState = "OK"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING: pszState = "NOT RESPONDING"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD: pszState = "DEAD"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT: pszState = "FAULT"; break;
+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED: pszState = "UNDEFINED"; break;
+ default: pszState = "UNKNOWN"; break;
+ }
+
+ switch (OSAtomicRead(&psDevInfo->psDeviceNode->eHealthReason))
+ {
+ case PVRSRV_DEVICE_HEALTH_REASON_NONE: pszReason = ""; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_ASSERTED: pszReason = " - Asserted"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING: pszReason = " - Poll failure"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS: pszReason = " - Global Event Object timeouts rising"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT: pszReason = " - KCCB offset invalid"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED: pszReason = " - KCCB stalled"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_IDLING: pszReason = " - Idling"; break;
+ case PVRSRV_DEVICE_HEALTH_REASON_RESTARTING: pszReason = " - Restarting"; break;
+ default: pszReason = " - Unknown reason"; break;
+ }
+
+ if (psRGXFWIfTraceBuf == NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s", pszState, pszReason);
+
+ /* can't dump any more information */
+ return;
+ }
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ _Flags2Description(sHwrStateDescription, asHwrState2Description, ARRAY_SIZE(asHwrState2Description), psRGXFWIfTraceBuf->ui32HWRStateFlags);
+ PVR_DUMPDEBUG_LOG("RGX FW State: %s%s (HWRState 0x%08x: %s)", pszState, pszReason, psRGXFWIfTraceBuf->ui32HWRStateFlags, sHwrStateDescription);
+ PVR_DUMPDEBUG_LOG("RGX FW Power State: %s (APM %s: %d ok, %d denied, %d non-idle, %d other, %d total. Latency: %u ms)",
+ pszPowStateName[psRGXFWIfTraceBuf->ePowState],
+ (psDevInfo->pvAPMISRData)?"enabled":"disabled",
+ psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqNonIdle,
+ psDevInfo->ui32ActivePMReqTotal - psDevInfo->ui32ActivePMReqOk - psDevInfo->ui32ActivePMReqDenied,
+ psDevInfo->ui32ActivePMReqTotal,
+ psRuntimeCfg->ui32ActivePMLatencyms);
+
+ ui32NumClockSpeedChanges = (IMG_UINT32) OSAtomicRead(&psDevInfo->psDeviceNode->iNumClockSpeedChanges);
+ RGXGetTimeCorrData(psDevInfo->psDeviceNode, asTimeCorrs, ARRAY_SIZE(asTimeCorrs));
+
+ PVR_DUMPDEBUG_LOG("RGX DVFS: %u frequency changes. Current frequency: %u MHz (sampled at %llx)",
+ ui32NumClockSpeedChanges,
+ asTimeCorrs[0].ui32CoreClockSpeed / 1000000,
+ (unsigned long long) asTimeCorrs[0].ui64OSTimeStamp);
+ if(ui32NumClockSpeedChanges > 0)
+ {
+ PVR_DUMPDEBUG_LOG(" Previous frequencies: %u, %u, %u MHz (Sampled at %llx, %llx, %llx)",
+ asTimeCorrs[1].ui32CoreClockSpeed / 1000000,
+ asTimeCorrs[2].ui32CoreClockSpeed / 1000000,
+ asTimeCorrs[3].ui32CoreClockSpeed / 1000000,
+ (unsigned long long) asTimeCorrs[1].ui64OSTimeStamp,
+ (unsigned long long) asTimeCorrs[2].ui64OSTimeStamp,
+ (unsigned long long) asTimeCorrs[3].ui64OSTimeStamp);
+ }
+
+ for (ui32OSid = 0; ui32OSid < RGXFW_NUM_OS; ui32OSid++)
+ {
+ IMG_UINT32 ui32OSStateFlags = psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+ PVR_DUMPDEBUG_LOG("RGX FW OS %u State: 0x%08x (Active: %s%s, Freelists: %s, Grow Request Pending: %s)", ui32OSid, ui32OSStateFlags,
+ ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) != 0)?"Yes":"No",
+ ((ui32OSStateFlags & RGXFW_OS_STATE_OFFLOADING) != 0)?"- offloading":"",
+ ((ui32OSStateFlags & RGXFW_OS_STATE_FREELIST_OK) != 0)?"Ok":"Not Ok",
+ ((ui32OSStateFlags & RGXFW_OS_STATE_GROW_REQUEST_PENDING) != 0)?"Yes":"No"
+ );
+ }
+
+ _RGXDumpFWAssert(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+ _RGXDumpFWFaults(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+ _RGXDumpFWPoll(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW State: Unavailable under Guest Mode of operation");
+ PVR_DUMPDEBUG_LOG("RGX FW Power State: Unavailable under Guest Mode of operation");
+ }
+
+ _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psRGXFWIfTraceBuf, psDevInfo->psRGXFWIfHWRInfoBuf, psDevInfo);
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ _RGXDumpFWHWRInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo->psRGXFWIfGuestTraceBuf, psDevInfo->psRGXFWIfGuestHWRInfoBuf, \
+ psDevInfo);
+ }
+}
+
+static void _RGXDumpMetaSPExtraDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+/* List of extra META Slave Port debug registers */
+#define RGX_META_SP_EXTRA_DEBUG \
+ X(RGX_CR_META_SP_MSLVCTRL0) \
+ X(RGX_CR_META_SP_MSLVCTRL1) \
+ X(RGX_CR_META_SP_MSLVDATAX) \
+ X(RGX_CR_META_SP_MSLVIRQSTATUS) \
+ X(RGX_CR_META_SP_MSLVIRQENABLE) \
+ X(RGX_CR_META_SP_MSLVIRQLEVEL)
+
+ IMG_UINT32 ui32Idx, ui32RegIdx;
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT32 ui32RegAddr;
+
+ const IMG_UINT32 aui32DebugRegAddr [] = {
+#define X(A) A,
+ RGX_META_SP_EXTRA_DEBUG
+#undef X
+ };
+
+ const IMG_CHAR* apszDebugRegName [] = {
+#define X(A) #A,
+ RGX_META_SP_EXTRA_DEBUG
+#undef X
+ };
+
+ const IMG_UINT32 aui32Debug2RegAddr [] = {0xA28, 0x0A30, 0x0A38};
+
+ PVR_DUMPDEBUG_LOG("META Slave Port extra debug:");
+
+ /* dump first set of Slave Port debug registers */
+ for (ui32Idx = 0; ui32Idx < sizeof(aui32DebugRegAddr)/sizeof(IMG_UINT32); ui32Idx++)
+ {
+ const IMG_CHAR* pszRegName = apszDebugRegName[ui32Idx];
+
+ ui32RegAddr = aui32DebugRegAddr[ui32Idx];
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_DUMPDEBUG_LOG(" * %s: 0x%8.8X", pszRegName, ui32RegVal);
+ }
+
+ /* dump second set of Slave Port debug registers */
+ for (ui32Idx = 0; ui32Idx < 4; ui32Idx++)
+ {
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, 0xA20, ui32Idx);
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, 0xA20);
+ PVR_DUMPDEBUG_LOG(" * 0xA20[%d]: 0x%8.8X", ui32Idx, ui32RegVal);
+
+ }
+
+ for (ui32RegIdx = 0; ui32RegIdx < sizeof(aui32Debug2RegAddr)/sizeof(IMG_UINT32); ui32RegIdx++)
+ {
+ ui32RegAddr = aui32Debug2RegAddr[ui32RegIdx];
+ for (ui32Idx = 0; ui32Idx < 2; ui32Idx++)
+ {
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr, ui32Idx);
+ ui32RegVal = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32RegAddr);
+ PVR_DUMPDEBUG_LOG(" * 0x%X[%d]: 0x%8.8X", ui32RegAddr, ui32Idx, ui32RegVal);
+ }
+ }
+
+}
+
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 i;
+
+ for(i=0;i<=DEBUG_REQUEST_VERBOSITY_MAX;i++)
+ {
+ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile,
+ psDevInfo, i);
+ }
+}
+
+/*
+ * Array of all the Firmware Trace log IDs used to convert the trace data.
+ */
+typedef struct _TRACEBUF_LOG_ {
+ RGXFW_LOG_SFids eSFId;
+ IMG_CHAR *pszName;
+ IMG_CHAR *pszFmt;
+ IMG_UINT32 ui32ArgNum;
+} TRACEBUF_LOG;
+
+static TRACEBUF_LOG aLogDefinitions[] =
+{
+#define X(a, b, c, d, e) {RGXFW_LOG_CREATESFID(a,b,e), #c, d, e},
+ RGXFW_LOG_SFIDLIST
+#undef X
+};
+
+#define NARGS_MASK ~(0xF<<16)
+static IMG_BOOL _FirmwareTraceIntegrityCheck(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ TRACEBUF_LOG *psLogDef = &aLogDefinitions[0];
+ IMG_BOOL bIntegrityOk = IMG_TRUE;
+
+ /*
+ * For every log ID, check the format string and number of arguments is valid.
+ */
+ while (psLogDef->eSFId != RGXFW_SF_LAST)
+ {
+ IMG_UINT32 ui32Count;
+ IMG_CHAR *pszString;
+ TRACEBUF_LOG *psLogDef2;
+
+ /*
+ * Check the number of arguments matches the number of '%' in the string and
+ * check that no string uses %s which is not supported as it requires a
+ * pointer to memory that is not going to be valid.
+ */
+ pszString = psLogDef->pszFmt;
+ ui32Count = 0;
+
+ while (*pszString != '\0')
+ {
+ if (*pszString++ == '%')
+ {
+ ui32Count++;
+ if (*pszString == 's')
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has an unsupported type not recognized (fmt: %%%c). Please fix.",
+ psLogDef->pszName, *pszString);
+ }
+ else if (*pszString == '%')
+ {
+ /* Double % is a printable % sign and not a format string... */
+ ui32Count--;
+ }
+ }
+ }
+
+ if (ui32Count != psLogDef->ui32ArgNum)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but only %d are specified. Please fix.",
+ psLogDef->pszName, ui32Count, psLogDef->ui32ArgNum);
+ }
+
+ /* RGXDumpFirmwareTrace() has a hardcoded limit of supporting up to 20 arguments... */
+ if (ui32Count > 20)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s has %d arguments but a maximum of 20 are supported. Please fix.",
+ psLogDef->pszName, ui32Count);
+ }
+
+ /* Check the id number is unique (don't take into account the number of arguments) */
+ ui32Count = 0;
+ psLogDef2 = &aLogDefinitions[0];
+
+ while (psLogDef2->eSFId != RGXFW_SF_LAST)
+ {
+ if ((psLogDef->eSFId & NARGS_MASK) == (psLogDef2->eSFId & NARGS_MASK))
+ {
+ ui32Count++;
+ }
+ psLogDef2++;
+ }
+
+ if (ui32Count != 1)
+ {
+ bIntegrityOk = IMG_FALSE;
+ PVR_DUMPDEBUG_LOG("Integrity Check FAIL: %s id %x is not unique, there are %d more. Please fix.",
+ psLogDef->pszName, psLogDef->eSFId, ui32Count - 1);
+ }
+
+ /* Move to the next log ID... */
+ psLogDef++;
+ }
+
+ return bIntegrityOk;
+}
+
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ static IMG_BOOL bIntegrityCheckPassed = IMG_FALSE;
+
+ /* Check that the firmware trace is correctly defined... */
+ if (!bIntegrityCheckPassed)
+ {
+ bIntegrityCheckPassed = _FirmwareTraceIntegrityCheck(pfnDumpDebugPrintf, pvDumpDebugFile);
+ if (!bIntegrityCheckPassed)
+ {
+ return;
+ }
+ }
+
+ /* Dump FW trace information... */
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_UINT32 tid;
+
+ /* Print the log type settings... */
+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: none");
+ }
+
+ /* Print the decoded log for each thread... */
+ for (tid = 0; tid < RGXFW_THREAD_NUM; tid++)
+ {
+ IMG_UINT32 *pui32TraceBuf = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+ IMG_UINT32 ui32TracePtr = psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer;
+ IMG_UINT32 ui32Count = 0;
+
+ if (pui32TraceBuf == NULL)
+ {
+ /* trace buffer not yet allocated */
+ continue;
+ }
+
+ while (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+ {
+ IMG_UINT32 ui32Data, ui32DataToId;
+
+ /* Find the first valid log ID, skipping whitespace... */
+ do
+ {
+ ui32Data = pui32TraceBuf[ui32TracePtr];
+ ui32DataToId = idToStringID(ui32Data, SFs);
+
+ /* If an unrecognized id is found it may be inconsistent data or a firmware trace error. */
+ if (ui32DataToId == RGXFW_SF_LAST && RGXFW_LOG_VALIDID(ui32Data))
+ {
+ PVR_DUMPDEBUG_LOG("WARNING: Unrecognized id (%x). From here on the trace might be wrong!", ui32Data);
+ }
+
+ /* Update the trace pointer... */
+ ui32TracePtr = (ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE;
+ ui32Count++;
+ } while ((RGXFW_SF_LAST == ui32DataToId || ui32DataToId >= RGXFW_SF_FIRST) &&
+ ui32Count < RGXFW_TRACE_BUFFER_SIZE);
+
+ if (ui32Count < RGXFW_TRACE_BUFFER_SIZE)
+ {
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN] = "%llu:T%u-%s> ";
+ IMG_UINT64 ui64Timestamp;
+ IMG_UINT uiLen;
+
+ /* If we hit the ASSERT message then this is the end of the log... */
+ if (ui32Data == RGXFW_SF_MAIN_ASSERT_FAILED)
+ {
+ PVR_DUMPDEBUG_LOG("ASSERTION %s failed at %s:%u",
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szInfo,
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.szPath,
+ psRGXFWIfTraceBufCtl->sTraceBuf[tid].sAssertBuf.ui32LineNum);
+ break;
+ }
+
+ /*
+ * Print the trace string and provide up to 20 arguments which
+ * printf function will be able to use. We have already checked
+ * that no string uses more than this.
+ */
+ OSStringCopy(&szBuffer[OSStringLength(szBuffer)], SFs[ui32DataToId].sName);
+ uiLen = OSStringLength(szBuffer);
+ szBuffer[uiLen ? uiLen - 1 : 0] = '\0';
+ ui64Timestamp = (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 0) % RGXFW_TRACE_BUFFER_SIZE]) << 32 |
+ (IMG_UINT64)(pui32TraceBuf[(ui32TracePtr + 1) % RGXFW_TRACE_BUFFER_SIZE]);
+ PVR_DUMPDEBUG_LOG(szBuffer, ui64Timestamp, tid, groups[RGXFW_SF_GID(ui32Data)],
+ pui32TraceBuf[(ui32TracePtr + 2) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 3) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 4) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 5) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 6) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 7) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 8) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 9) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 10) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 11) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 12) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 13) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 14) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 15) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 16) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 17) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 18) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 19) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 20) % RGXFW_TRACE_BUFFER_SIZE],
+ pui32TraceBuf[(ui32TracePtr + 21) % RGXFW_TRACE_BUFFER_SIZE]);
+
+ /* Update the trace pointer... */
+ ui32TracePtr = (ui32TracePtr + 2 + RGXFW_SF_PARAMNUM(ui32Data)) % RGXFW_TRACE_BUFFER_SIZE;
+ ui32Count = (ui32Count + 2 + RGXFW_SF_PARAMNUM(ui32Data));
+ }
+ }
+ }
+ }
+}
+
+static const IMG_CHAR *_RGXGetDebugDevStateString(PVRSRV_DEVICE_STATE eDevState)
+{
+ switch (eDevState)
+ {
+ case PVRSRV_DEVICE_STATE_INIT:
+ return "Initialising";
+ case PVRSRV_DEVICE_STATE_ACTIVE:
+ return "Active";
+ case PVRSRV_DEVICE_STATE_DEINIT:
+ return "De-initialising";
+ case PVRSRV_DEVICE_STATE_BAD:
+ return "Bad";
+ case PVRSRV_DEVICE_STATE_UNDEFINED:
+ PVR_ASSERT(!"Device has undefined state");
+ /* fallthrough */
+ default:
+ return "Unknown";
+ }
+}
+
+static const IMG_CHAR* _RGXGetDebugDevPowerStateString(PVRSRV_DEV_POWER_STATE ePowerState)
+{
+ switch (ePowerState)
+ {
+ case PVRSRV_DEV_POWER_STATE_DEFAULT: return "DEFAULT";
+ case PVRSRV_DEV_POWER_STATE_OFF: return "OFF";
+ case PVRSRV_DEV_POWER_STATE_ON: return "ON";
+ default: return "UNKNOWN";
+ }
+}
+
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32Meta = (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META)) ? RGX_GET_FEATURE_VALUE(psDevInfo, META) : 0;
+ IMG_UINT32 ui32TACycles, ui323DCycles, ui32TAOr3DCycles, ui32TAAnd3DCycles;
+ IMG_UINT32 ui32RegVal;
+ IMG_BOOL bFirmwarePerf;
+ IMG_BOOL bS7Infra = RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE);
+ void __iomem *pvRegsBaseKM = psDevInfo->pvRegsBaseKM;
+ PVRSRV_ERROR eError;
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+
+ PVR_DUMPDEBUG_LOG("------[ RGX registers ]------");
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Linear): 0x%p", psDevInfo->pvRegsBaseKM);
+ PVR_DUMPDEBUG_LOG("RGX Register Base Address (Physical): 0x%08lX", (unsigned long)psDevInfo->sRegsPhysBase.uiAddr);
+
+ /* Check if firmware perf was set at Init time */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc, (void**)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to acquire kernel FW IF Init struct"));
+ return eError;
+ }
+ bFirmwarePerf = (psRGXFWInit->eFirmwarePerf != FW_PERF_CONF_NONE);
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+/* Helper macros to emit data */
+#define REG32_FMTSPEC "%-30s: 0x%08X"
+#define REG64_FMTSPEC "%-30s: 0x%016" IMG_UINT64_FMTSPECX
+#define DDLOG32(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG64(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, RGX_CR_##R));
+#define DDLOG32_DPX(R) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, #R, OSReadHWReg32(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOG64_DPX(R) PVR_DUMPDEBUG_LOG(REG64_FMTSPEC, #R, OSReadHWReg64(pvRegsBaseKM, DPX_CR_##R));
+#define DDLOGVAL32(S,V) PVR_DUMPDEBUG_LOG(REG32_FMTSPEC, S, V);
+
+#if defined(NO_HARDWARE)
+ /* OSReadHWReg variants don't use params passed in NoHW builds */
+ PVR_UNREFERENCED_PARAMETER(pvRegsBaseKM);
+#endif
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBVNC_COREID_REG))
+ {
+ DDLOG64(CORE_ID);
+ }
+ else
+ {
+ DDLOG32(CORE_ID);
+ }
+ DDLOG32(CORE_REVISION);
+ DDLOG32(DESIGNER_REV_FIELD1);
+ DDLOG32(DESIGNER_REV_FIELD2);
+ DDLOG64(CHANGESET_NUMBER);
+ if(ui32Meta)
+ {
+ DDLOG32(META_SP_MSLVIRQSTATUS);
+ }
+
+ DDLOG64(CLK_CTRL);
+ DDLOG64(CLK_STATUS);
+ DDLOG64(CLK_CTRL2);
+ DDLOG64(CLK_STATUS2);
+
+ if (bS7Infra)
+ {
+ DDLOG64(CLK_XTPLUS_CTRL);
+ DDLOG64(CLK_XTPLUS_STATUS);
+ }
+ DDLOG32(EVENT_STATUS);
+ DDLOG64(TIMER);
+ if (bS7Infra)
+ {
+ DDLOG64(MMU_FAULT_STATUS);
+ DDLOG64(MMU_FAULT_STATUS_META);
+ }
+ else
+ {
+ DDLOG32(BIF_FAULT_BANK0_MMU_STATUS);
+ DDLOG64(BIF_FAULT_BANK0_REQ_STATUS);
+ DDLOG32(BIF_FAULT_BANK1_MMU_STATUS);
+ DDLOG64(BIF_FAULT_BANK1_REQ_STATUS);
+ }
+ DDLOG32(BIF_MMU_STATUS);
+ DDLOG32(BIF_MMU_ENTRY);
+ DDLOG64(BIF_MMU_ENTRY_STATUS);
+
+ if (bS7Infra)
+ {
+ DDLOG32(BIF_JONES_OUTSTANDING_READ);
+ DDLOG32(BIF_BLACKPEARL_OUTSTANDING_READ);
+ DDLOG32(BIF_DUST_OUTSTANDING_READ);
+ }
+ else
+ {
+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE)))
+ {
+ DDLOG32(BIF_STATUS_MMU);
+ DDLOG32(BIF_READS_EXT_STATUS);
+ DDLOG32(BIF_READS_INT_STATUS);
+ }
+ DDLOG32(BIFPM_STATUS_MMU);
+ DDLOG32(BIFPM_READS_EXT_STATUS);
+ DDLOG32(BIFPM_READS_INT_STATUS);
+ }
+
+ if(RGX_IS_BRN_SUPPORTED(psDevInfo, 44871))
+ {
+ PVR_DUMPDEBUG_LOG("Warning: BRN44871 is present");
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+ {
+ DDLOG64(CONTEXT_MAPPING0);
+ DDLOG64(CONTEXT_MAPPING1);
+ DDLOG64(CONTEXT_MAPPING2);
+ DDLOG64(CONTEXT_MAPPING3);
+ DDLOG64(CONTEXT_MAPPING4);
+ }
+ else
+ {
+ DDLOG64(BIF_CAT_BASE_INDEX);
+ DDLOG64(BIF_CAT_BASE0);
+ DDLOG64(BIF_CAT_BASE1);
+ DDLOG64(BIF_CAT_BASE2);
+ DDLOG64(BIF_CAT_BASE3);
+ DDLOG64(BIF_CAT_BASE4);
+ DDLOG64(BIF_CAT_BASE5);
+ DDLOG64(BIF_CAT_BASE6);
+ DDLOG64(BIF_CAT_BASE7);
+ }
+
+ DDLOG32(BIF_CTRL_INVAL);
+ DDLOG32(BIF_CTRL);
+
+ DDLOG64(BIF_PM_CAT_BASE_VCE0);
+ DDLOG64(BIF_PM_CAT_BASE_TE0);
+ DDLOG64(BIF_PM_CAT_BASE_ALIST0);
+ DDLOG64(BIF_PM_CAT_BASE_VCE1);
+ DDLOG64(BIF_PM_CAT_BASE_TE1);
+ DDLOG64(BIF_PM_CAT_BASE_ALIST1);
+
+ DDLOG32(PERF_TA_PHASE);
+ DDLOG32(PERF_TA_CYCLE);
+ DDLOG32(PERF_3D_PHASE);
+ DDLOG32(PERF_3D_CYCLE);
+
+ ui32TACycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_CYCLE);
+ ui323DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_3D_CYCLE);
+ ui32TAOr3DCycles = OSReadHWReg32(pvRegsBaseKM, RGX_CR_PERF_TA_OR_3D_CYCLE);
+ ui32TAAnd3DCycles = ((ui32TACycles + ui323DCycles) > ui32TAOr3DCycles) ? (ui32TACycles + ui323DCycles - ui32TAOr3DCycles) : 0;
+ DDLOGVAL32("PERF_TA_OR_3D_CYCLE", ui32TAOr3DCycles);
+ DDLOGVAL32("PERF_TA_AND_3D_CYCLE", ui32TAAnd3DCycles);
+
+ DDLOG32(PERF_COMPUTE_PHASE);
+ DDLOG32(PERF_COMPUTE_CYCLE);
+
+ DDLOG32(PM_PARTIAL_RENDER_ENABLE);
+
+ DDLOG32(ISP_RENDER);
+ DDLOG64(TLA_STATUS);
+ DDLOG64(MCU_FENCE);
+
+ DDLOG32(VDM_CONTEXT_STORE_STATUS);
+ DDLOG64(VDM_CONTEXT_STORE_TASK0);
+ DDLOG64(VDM_CONTEXT_STORE_TASK1);
+ DDLOG64(VDM_CONTEXT_STORE_TASK2);
+ DDLOG64(VDM_CONTEXT_RESUME_TASK0);
+ DDLOG64(VDM_CONTEXT_RESUME_TASK1);
+ DDLOG64(VDM_CONTEXT_RESUME_TASK2);
+
+ DDLOG32(ISP_CTL);
+ DDLOG32(ISP_STATUS);
+ DDLOG32(MTS_INTCTX);
+ DDLOG32(MTS_BGCTX);
+ DDLOG32(MTS_BGCTX_COUNTED_SCHEDULE);
+ DDLOG32(MTS_SCHEDULE);
+ DDLOG32(MTS_GPU_INT_STATUS);
+
+ DDLOG32(CDM_CONTEXT_STORE_STATUS);
+ DDLOG64(CDM_CONTEXT_PDS0);
+ DDLOG64(CDM_CONTEXT_PDS1);
+ DDLOG64(CDM_TERMINATE_PDS);
+ DDLOG64(CDM_TERMINATE_PDS1);
+
+ if(RGX_IS_ERN_SUPPORTED(psDevInfo, 47025))
+ {
+ DDLOG64(CDM_CONTEXT_LOAD_PDS0);
+ DDLOG64(CDM_CONTEXT_LOAD_PDS1);
+ }
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ DDLOG32_DPX(BIF_MMU_STATUS);
+ DDLOG64_DPX(BIF_FAULT_BANK_MMU_STATUS);
+ DDLOG64_DPX(BIF_FAULT_BANK_REQ_STATUS);
+
+ DDLOG64(RPM_SHF_FPL);
+ DDLOG32(RPM_SHF_FPL_READ);
+ DDLOG32(RPM_SHF_FPL_WRITE);
+ DDLOG64(RPM_SHG_FPL);
+ DDLOG32(RPM_SHG_FPL_READ);
+ DDLOG32(RPM_SHG_FPL_WRITE);
+ }
+#endif
+ if (bS7Infra)
+ {
+ DDLOG32(JONES_IDLE);
+ }
+
+ DDLOG32(SIDEKICK_IDLE);
+
+ if (!bS7Infra)
+ {
+ DDLOG32(SLC_IDLE);
+ DDLOG32(SLC_STATUS0);
+ DDLOG64(SLC_STATUS1);
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS) && RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS))
+ {
+ DDLOG64(SLC_STATUS2);
+ }
+
+ DDLOG32(SLC_CTRL_BYPASS);
+ DDLOG64(SLC_CTRL_MISC);
+ }
+ else
+ {
+ DDLOG32(SLC3_IDLE);
+ DDLOG64(SLC3_STATUS);
+ DDLOG32(SLC3_FAULT_STOP_STATUS);
+ }
+
+ if (ui32Meta)
+ {
+ /* Forcing bit 6 of MslvCtrl1 to 0 to avoid internal reg read going through the core */
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL1, 0x0);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T0 TXENABLE", ui32RegVal);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_T0STATUS_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T0 TXSTATUS", ui32RegVal);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_T0DEFR_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T0 TXDEFR", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PC, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T0 PC", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_PCX, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T0 PCX", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR0_SP, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T0 SP", ui32RegVal);
+ }
+
+ if ((ui32Meta == MTP218) || (ui32Meta == MTP219))
+ {
+ eError = RGXReadWithSP(psDevInfo, META_CR_T1ENABLE_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T1 TXENABLE", ui32RegVal);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_T1STATUS_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T1 TXSTATUS", ui32RegVal);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_T1DEFR_OFFSET, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("T1 TXDEFR", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PC, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T1 PC", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_PCX, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T1 PCX", ui32RegVal);
+
+ eError = RGXReadMetaCoreReg(psDevInfo, META_CR_THR1_SP, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadMetaCoreReg", _METASPError);
+ DDLOGVAL32("T1 SP", ui32RegVal);
+ }
+
+ if (ui32Meta && bFirmwarePerf)
+ {
+ eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT0, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("PERF_COUNT0", ui32RegVal);
+
+ eError = RGXReadWithSP(psDevInfo, META_CR_PERF_COUNT1, &ui32RegVal);
+ PVR_LOGG_IF_ERROR(eError, "RGXReadWithSP", _METASPError);
+ DDLOGVAL32("PERF_COUNT1", ui32RegVal);
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ DDLOG32(MIPS_ADDR_REMAP1_CONFIG1);
+ DDLOG64(MIPS_ADDR_REMAP1_CONFIG2);
+ DDLOG32(MIPS_ADDR_REMAP2_CONFIG1);
+ DDLOG64(MIPS_ADDR_REMAP2_CONFIG2);
+ DDLOG32(MIPS_ADDR_REMAP3_CONFIG1);
+ DDLOG64(MIPS_ADDR_REMAP3_CONFIG2);
+ DDLOG32(MIPS_ADDR_REMAP4_CONFIG1);
+ DDLOG64(MIPS_ADDR_REMAP4_CONFIG2);
+ DDLOG32(MIPS_ADDR_REMAP5_CONFIG1);
+ DDLOG64(MIPS_ADDR_REMAP5_CONFIG2);
+ DDLOG64(MIPS_WRAPPER_CONFIG);
+ DDLOG32(MIPS_EXCEPTION_STATUS);
+
+#if !defined(NO_HARDWARE)
+ {
+ RGX_MIPS_STATE sMIPSState = {0};
+
+ eError = _RGXMipsExtraDebug(psDevInfo, &sMIPSState);
+ PVR_DUMPDEBUG_LOG("---- [ MIPS internal state ] ----");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DUMPDEBUG_LOG("MIPS extra debug not available");
+ }
+ else
+ {
+ DDLOGVAL32("PC", sMIPSState.ui32ErrorEPC);
+ DDLOGVAL32("STATUS_REGISTER", sMIPSState.ui32StatusRegister);
+ DDLOGVAL32("CAUSE_REGISTER", sMIPSState.ui32CauseRegister);
+ _RGXMipsDumpCauseDecode(pfnDumpDebugPrintf, pvDumpDebugFile,
+ sMIPSState.ui32CauseRegister, sMIPSState.ui32ErrorState);
+ DDLOGVAL32("BAD_REGISTER", sMIPSState.ui32BadRegister);
+ DDLOGVAL32("EPC", sMIPSState.ui32EPC);
+ DDLOGVAL32("SP", sMIPSState.ui32SP);
+ DDLOGVAL32("BAD_INSTRUCTION", sMIPSState.ui32BadInstr);
+ _RGXMipsDumpDebugDecode(pfnDumpDebugPrintf, pvDumpDebugFile,
+ sMIPSState.ui32Debug, sMIPSState.ui32DEPC);
+
+ {
+ IMG_UINT32 ui32Idx;
+
+ IMG_BOOL bCheckBRN63553WA =
+ RGX_IS_BRN_SUPPORTED(psDevInfo, 63553) &&
+ (OSReadHWReg32(pvRegsBaseKM, RGX_CR_MIPS_ADDR_REMAP5_CONFIG1) == (0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN));
+
+ IMG_BOOL bUseRemapRanges = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+
+ PVR_DUMPDEBUG_LOG("TLB :");
+
+ for (ui32Idx = 0; ui32Idx < ARRAY_SIZE(sMIPSState.asTLB); ui32Idx++)
+ {
+ RGX_MIPS_REMAP_ENTRY *psRemapEntry0 = NULL;
+ RGX_MIPS_REMAP_ENTRY *psRemapEntry1 = NULL;
+
+ if (bUseRemapRanges)
+ {
+ psRemapEntry0 = &sMIPSState.asRemap[ui32Idx];
+ psRemapEntry1 = &sMIPSState.asRemap[ui32Idx+16];
+ }
+
+
+ _RGXMipsDumpTLBEntry(pfnDumpDebugPrintf,
+ pvDumpDebugFile,
+ &sMIPSState.asTLB[ui32Idx],
+ psRemapEntry0,
+ psRemapEntry1,
+ ui32Idx);
+
+ if (bCheckBRN63553WA)
+ {
+ const RGX_MIPS_TLB_ENTRY *psTLBEntry = &sMIPSState.asTLB[ui32Idx];
+
+ #define BRN63553_TLB_IS_NUL(X) (((X) & RGXMIPSFW_TLB_VALID) && (RGXMIPSFW_TLB_GET_PA(X) == 0x0))
+
+ if (BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo0) || BRN63553_TLB_IS_NUL(psTLBEntry->ui32TLBLo1))
+ {
+ PVR_DUMPDEBUG_LOG("BRN63553 WA present with a valid TLB entry mapping address 0x0.");
+ }
+ }
+ }
+
+ /* This implicitly also checks for overlaps between memory and regbank addresses */
+ _CheckMipsTLBDuplicatePAs(pfnDumpDebugPrintf,
+ pvDumpDebugFile,
+ sMIPSState.asTLB,
+ bUseRemapRanges ? sMIPSState.asRemap : NULL);
+
+ if (bUseRemapRanges)
+ {
+ /* Dump unmapped address if it was dumped in FW, otherwise it will be 0 */
+ if(sMIPSState.ui32UnmappedAddress)
+ {
+ PVR_DUMPDEBUG_LOG("Remap unmapped address => 0x%08X",
+ sMIPSState.ui32UnmappedAddress );
+ }
+ }
+ }
+ }
+ PVR_DUMPDEBUG_LOG("--------------------------------");
+ }
+#endif
+ }
+
+ return PVRSRV_OK;
+
+_METASPError:
+ PVR_DPF((PVR_DBG_ERROR,"Dump Slave Port debug information"));
+ _RGXDumpMetaSPExtraDebugInfo(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+
+ return eError;
+}
+
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32VerbLevel)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess : failed to acquire lock, error:0x%x", eError));
+ return;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc, (void**)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to acquire kernel FW IF Init struct"));
+ return;
+ }
+
+ switch (ui32VerbLevel)
+ {
+ case DEBUG_REQUEST_VERBOSITY_LOW :
+ {
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ IMG_BOOL bRGXPoweredON;
+ IMG_CHAR *Bit32 = "32 Bit", *Bit64 = "64 Bit";
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+ goto Exit;
+ }
+
+ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+ PVR_DUMPDEBUG_LOG("------[ Driver Info ]------");
+ PVR_DUMP_DRIVER_INFO("UM", psPVRSRVData->sDriverInfo.sUMBuildInfo);
+ PVR_DUMP_DRIVER_INFO("KM", psPVRSRVData->sDriverInfo.sKMBuildInfo);
+ if (psRGXFWInit->sRGXCompChecks.bUpdated)
+ {
+ PVR_DUMP_FIRMWARE_INFO(psRGXFWInit->sRGXCompChecks);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("FW info: UNINITIALIZED");
+ }
+ PVR_DUMPDEBUG_LOG("Comparison of UM/KM components: %s", (psPVRSRVData->sDriverInfo.bIsNoMatch) ? ("MISMATCH") : ("MATCHING"));
+
+ PVR_DUMPDEBUG_LOG("KM Arch: %s", (psPVRSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)?
+ Bit64 : Bit32);
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE))
+ {
+ PVR_DUMPDEBUG_LOG("Driver Mode: %s", (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))?"Host":"Guest");
+ }
+
+ if(psPVRSRVData->sDriverInfo.ui8UMSupportedArch)
+ {
+ if((psPVRSRVData->sDriverInfo.ui8UMSupportedArch & BUILD_ARCH_BOTH) ==
+ BUILD_ARCH_BOTH)
+ {
+ PVR_DUMPDEBUG_LOG("UM Connected Clients Arch: %s and %s", Bit64, Bit32);
+
+ }else
+ {
+ PVR_DUMPDEBUG_LOG("UM Connected Clients: %s",(psPVRSRVData->sDriverInfo.ui8UMSupportedArch &
+ BUILD_ARCH_64BIT)? Bit64 : Bit32);
+ }
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX summary ]------");
+ PVR_DUMPDEBUG_LOG("RGX BVNC: %d.%d.%d.%d", psDevInfo->sDevFeatureCfg.ui32B, \
+ psDevInfo->sDevFeatureCfg.ui32V, \
+ psDevInfo->sDevFeatureCfg.ui32N, \
+ psDevInfo->sDevFeatureCfg.ui32C);
+ PVR_DUMPDEBUG_LOG("RGX Device State: %s", _RGXGetDebugDevStateString(psDeviceNode->eDevState));
+ PVR_DUMPDEBUG_LOG("RGX Power State: %s", _RGXGetDebugDevPowerStateString(ePowerState));
+
+ RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+
+ if ((bRGXPoweredON) && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+
+ eError = RGXDumpRGXRegisters(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: RGXDumpRGXRegisters failed (%d)", eError));
+ }
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG(" (!) %s. No registers dumped", PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest Mode of operation" : "RGX power is down");
+ }
+
+ /* Dump out the kernel CCB. */
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+ if (psKCCBCtl != NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB WO:0x%X RO:0x%X",
+ psKCCBCtl->ui32WriteOffset,
+ psKCCBCtl->ui32ReadOffset);
+ }
+ }
+
+ /* Dump out the firmware CCB. */
+ {
+ RGXFWIF_CCB_CTL *psFCCBCtl = psDevInfo->psFirmwareCCBCtl;
+
+ if (psFCCBCtl != NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX Firmware CCB WO:0x%X RO:0x%X",
+ psFCCBCtl->ui32WriteOffset,
+ psFCCBCtl->ui32ReadOffset);
+ }
+ }
+
+ /* Dump the KCCB commands executed */
+ {
+ PVR_DUMPDEBUG_LOG("RGX Kernel CCB commands executed = %d",
+ psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted);
+ }
+
+ /* Dump the IRQ info for threads*/
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ IMG_UINT32 ui32TID;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u",
+ ui32TID,
+ psDevInfo->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID],
+ psDevInfo->aui32SampleIRQCount[ui32TID]);
+ }
+ }
+
+ /* Dump the FW config flags */
+ {
+ RGXFWIF_OS_CONFIG *psOSConfig = psDevInfo->psFWIfOSConfig;
+ IMG_CHAR sFwFlagsDescription[MAX_FW_DESCRIPTION_LENGTH] = "";
+
+ if (!psOSConfig)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: OS Config is not mapped into CPU space"));
+ goto Exit;
+ }
+
+ _GetFwFlagsDescription(sFwFlagsDescription, psOSConfig->ui32ConfigFlags);
+ PVR_DUMPDEBUG_LOG("FW OS config flags = 0x%X (%s)", psOSConfig->ui32ConfigFlags, sFwFlagsDescription);
+ }
+
+ break;
+
+ }
+ case DEBUG_REQUEST_VERBOSITY_MEDIUM :
+ {
+ IMG_INT tid;
+ /* Dump FW trace information */
+ if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ for (tid = 0 ; tid < RGXFW_THREAD_NUM ; tid++)
+ {
+ IMG_UINT32 i;
+ IMG_BOOL bPrevLineWasZero = IMG_FALSE;
+ IMG_BOOL bLineIsAllZeros = IMG_FALSE;
+ IMG_UINT32 ui32CountLines = 0;
+ IMG_UINT32 *pui32TraceBuffer;
+ IMG_CHAR *pszLine;
+
+ if (psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: %s ( " RGXFWIF_LOG_ENABLED_GROUPS_LIST_PFSPEC ")",
+ ((psRGXFWIfTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)?("trace"):("tbi")),
+ RGXFWIF_LOG_ENABLED_GROUPS_LIST(psRGXFWIfTraceBufCtl->ui32LogType)
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("Debug log type: none");
+ }
+
+ pui32TraceBuffer = psRGXFWIfTraceBufCtl->sTraceBuf[tid].pui32TraceBuffer;
+
+ /* Skip if trace buffer is not allocated */
+ if (pui32TraceBuffer == NULL)
+ {
+ PVR_DUMPDEBUG_LOG("RGX FW thread %d: Trace buffer not yet allocated",tid);
+ continue;
+ }
+
+ /* each element in the line is 8 characters plus a space. The '+1' is because of the final trailing '\0'. */
+ pszLine = OSAllocMem(9*RGXFW_TRACE_BUFFER_LINESIZE+1);
+ if (pszLine == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: Out of mem allocating line string (size: %d)", 9*RGXFW_TRACE_BUFFER_LINESIZE));
+ goto Exit;
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace START ]------", tid);
+ PVR_DUMPDEBUG_LOG("FWT[traceptr]: %X", psRGXFWIfTraceBufCtl->sTraceBuf[tid].ui32TracePointer);
+ PVR_DUMPDEBUG_LOG("FWT[tracebufsize]: %X", RGXFW_TRACE_BUFFER_SIZE);
+
+ for (i = 0; i < RGXFW_TRACE_BUFFER_SIZE; i += RGXFW_TRACE_BUFFER_LINESIZE)
+ {
+ IMG_UINT32 k = 0;
+ IMG_UINT32 ui32Line = 0x0;
+ IMG_UINT32 ui32LineOffset = i*sizeof(IMG_UINT32);
+ IMG_CHAR *pszBuf = pszLine;
+
+ for (k = 0; k < RGXFW_TRACE_BUFFER_LINESIZE; k++)
+ {
+ ui32Line |= pui32TraceBuffer[i + k];
+
+ /* prepare the line to print it. The '+1' is because of the trailing '\0' added */
+ OSSNPrintf(pszBuf, 9 + 1, " %08x", pui32TraceBuffer[i + k]);
+ pszBuf += 9; /* write over the '\0' */
+ }
+
+ bLineIsAllZeros = (ui32Line == 0x0);
+
+ if (bLineIsAllZeros)
+ {
+ if (bPrevLineWasZero)
+ {
+ ui32CountLines++;
+ }
+ else
+ {
+ bPrevLineWasZero = IMG_TRUE;
+ ui32CountLines = 1;
+ PVR_DUMPDEBUG_LOG("FWT[%08x]: 00000000 ... 00000000", ui32LineOffset);
+ }
+ }
+ else
+ {
+ if (bPrevLineWasZero && ui32CountLines > 1)
+ {
+ PVR_DUMPDEBUG_LOG("FWT[...]: %d lines were all zero", ui32CountLines);
+ }
+ bPrevLineWasZero = IMG_FALSE;
+
+ PVR_DUMPDEBUG_LOG("FWT[%08x]:%s", ui32LineOffset, pszLine);
+ }
+
+ }
+ if (bPrevLineWasZero)
+ {
+ PVR_DUMPDEBUG_LOG("FWT[END]: %d lines were all zero", ui32CountLines);
+ }
+
+ PVR_DUMPDEBUG_LOG("------[ RGX FW thread %d trace END ]------", tid);
+
+ OSFreeMem(pszLine);
+ }
+
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ RGXFWIF_OS_CONFIG *psOSConfig = psDevInfo->psFWIfOSConfig;
+
+ if (!psOSConfig)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDebugRequestProcess: OS Config is not mapped into CPU space"));
+ goto Exit;
+ }
+
+ if ((psOSConfig->ui32ConfigFlags & RGXFWIF_INICFG_METAT1_DUMMY) != 0)
+ {
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 *pui32T1PCX = &psRGXFWIfTraceBufCtl->ui32T1PCX[0];
+ IMG_UINT32 ui32T1PCXWOff = psRGXFWIfTraceBufCtl->ui32T1PCXWOff;
+ IMG_UINT32 i = ui32T1PCXWOff;
+
+ PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list (most recent first) ]------");
+ do
+ {
+ PVR_DUMPDEBUG_LOG(" 0x%08x", pui32T1PCX[i]);
+ i = (i == 0) ? (RGXFWIF_MAX_PCX - 1) : (i - 1);
+
+ } while (i != ui32T1PCXWOff);
+
+ PVR_DUMPDEBUG_LOG("------[ FW Thread 1 PCX list [END] ]------");
+ }
+
+ }
+ }
+
+ {
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+ PVR_DUMPDEBUG_LOG("------[ Full CCB Status ]------");
+#else
+ PVR_DUMPDEBUG_LOG("------[ Stalled FWCtxs ]------");
+#endif
+ CheckForStalledTransferCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ CheckForStalledRenderCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ CheckForStalledKickSyncCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, COMPUTE))
+ {
+ CheckForStalledComputeCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, FASTRENDER_DM))
+ {
+ CheckForStalledTDMTransferCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ CheckForStalledRayCtxt(psDevInfo, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+#endif
+ }
+ break;
+ }
+ case DEBUG_REQUEST_VERBOSITY_HIGH:
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ IMG_BOOL bRGXPoweredON;
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDebugRequestProcess: Error retrieving RGX power state. No debug info dumped."));
+ return;
+ }
+
+ bRGXPoweredON = (ePowerState == PVRSRV_DEV_POWER_STATE_ON);
+
+ PVR_DUMPDEBUG_LOG("------[ Debug summary ]------");
+
+ RGXDumpRGXDebugSummary(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, bRGXPoweredON);
+ }
+ default:
+ break;
+ }
+
+Exit:
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/******************************************************************************
+ End of file (rgxdebug.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxdebug.h b/drivers/gpu/drm/img-rogue/1.10/rgxdebug.h
new file mode 100644
index 00000000000000..4bacfaf7e6b223
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxdebug.h
@@ -0,0 +1,236 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX debug header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX debugging functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEBUG_H__)
+#define __RGXDEBUG_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "rgxdevice.h"
+
+
+/**
+ * Debug utility macro for printing FW IRQ count and Last sampled IRQ count in
+ * LISR for each RGX FW thread.
+ * Macro takes pointer to PVRSRV_RGXDEV_INFO as input.
+ */
+#define RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo) \
+ do \
+ { \
+ IMG_UINT32 ui32TID; \
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++) \
+ { \
+ PVR_DPF((DBGPRIV_VERBOSE, \
+ "RGX FW thread %u: FW IRQ count = %u, Last sampled IRQ count in LISR = %u)", \
+ ui32TID, \
+ (psRgxDevInfo)->psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID], \
+ (psRgxDevInfo)->aui32SampleIRQCount[ui32TID])); \
+ } \
+ } while(0)
+
+/*!
+*******************************************************************************
+
+ @Function RGXDumpDebugInfo
+
+ @Description
+
+ Dump useful debugging info. Dumps lesser information than PVRSRVDebugRequest.
+ Does not dump debugging information for all requester types.(SysDebug, ServerSync info)
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXDumpDebugInfo(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDebugRequestProcess
+
+ @Description
+
+ This function will print out the debug for the specificed level of
+ verbosity
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input ui32VerbLevel - Verbosity level
+
+ @Return void
+
+******************************************************************************/
+void RGXDebugRequestProcess(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32VerbLevel);
+/*!
+*******************************************************************************
+
+ @Function RGXDumpRGXRegisters
+
+ @Description
+
+ Dumps an extensive list of RGX registers required for debugging
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success, error code otherwise
+
+******************************************************************************/
+PVRSRV_ERROR RGXDumpRGXRegisters(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDumpFirmwareTrace
+
+ @Description Dumps the decoded version of the firmware trace buffer.
+
+ Dump useful debugging info
+
+ @Input pfnDumpDebugPrintf - Optional replacement print function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+
+ @Return void
+
+******************************************************************************/
+void RGXDumpFirmwareTrace(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function RGXReadWithSP
+
+ @Description
+
+ Reads data from a memory location (FW memory map) using the META Slave Port
+
+ @Input psDevInfo - Pointer to RGX DevInfo to be used while reading
+ @Input ui32FWAddr - 32 bit FW address
+ @Input pui32Value - When the read is successful, value at above FW address
+ is returned at this location
+
+ @Return PVRSRV_ERROR PVRSRV_OK if read success, error code otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXReadWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 *pui32Value);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWriteWithSP
+
+ @Description
+
+ Writes data to a memory location (FW memory map) using the META Slave Port
+
+ @Input psDevInfo - Pointer to RGX DevInfo to be used while writing
+ @Input ui32FWAddr - 32 bit FW address
+
+ @Input ui32Value - 32 bit Value to write
+
+ @Return PVRSRV_ERROR PVRSRV_OK if write success, error code otherwise.
+******************************************************************************/
+PVRSRV_ERROR RGXWriteWithSP(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FWAddr, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+/*!
+*******************************************************************************
+
+ @Function ValidateFWImageWithSP
+
+ @Description Compare the Firmware image as seen from the CPU point of view
+ against the same memory area as seen from the META point of view
+
+ @Input psDevInfo - Device Info
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR ValidateFWImageWithSP(PVRSRV_RGXDEV_INFO *psDevInfo);
+#endif /* defined(SUPPORT_EXTRA_METASP_DEBUG) */
+
+/*!
+*******************************************************************************
+
+ @Function RGXDumpRGXDebugSummary
+
+ @Description
+
+ Dump a summary in human readable form with the RGX state
+
+ @Input pfnDumpDebugPrintf - The debug printf function
+ @Input pvDumpDebugFile - Optional file identifier to be passed to the
+ 'printf' function if required
+ @Input psDevInfo - RGX device info
+ @Input bRGXPoweredON - IMG_TRUE if RGX device is on
+
+ @Return void
+
+******************************************************************************/
+void RGXDumpRGXDebugSummary(DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_BOOL bRGXPoweredON);
+
+#endif /* __RGXDEBUG_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxdevice.h b/drivers/gpu/drm/img-rogue/1.10/rgxdevice.h
new file mode 100644
index 00000000000000..7890177d4381fd
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxdevice.h
@@ -0,0 +1,648 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX device node header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX device node
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXDEVICE_H__)
+#define __RGXDEVICE_H__
+
+#include "img_types.h"
+#include "pvrsrv_device_types.h"
+#include "mmu_common.h"
+#include "rgx_fwif_km.h"
+#include "rgx_fwif.h"
+#include "cache_ops.h"
+#include "device.h"
+#include "osfunc.h"
+#include "rgxlayer_impl.h"
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#endif
+typedef struct _RGX_SERVER_COMMON_CONTEXT_ RGX_SERVER_COMMON_CONTEXT;
+
+typedef struct {
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_DEV_VIRTADDR *psResumeSignalAddr;
+} RGX_COMMON_CONTEXT_INFO;
+
+
+/*!
+ ******************************************************************************
+ * Device state flags
+ *****************************************************************************/
+#define RGXKM_DEVICE_STATE_ZERO_FREELIST (0x1 << 0) /*!< Zeroing the physical pages of reconstructed free lists */
+#define RGXKM_DEVICE_STATE_FTRACE_EN (0x1 << 1) /*!< Used to enable device FTrace thread to consume HWPerf data */
+#define RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN (0x1 << 2) /*!< Used to disable the Devices Watchdog logging */
+#define RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN (0x1 << 3) /*!< Used for validation to inject dust requests every TA/3D kick */
+
+/*!
+ ******************************************************************************
+ * GPU DVFS Table
+ *****************************************************************************/
+
+#define RGX_GPU_DVFS_TABLE_SIZE 16
+#define RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US 25000 /* Time required to calibrate a clock frequency the first time */
+#define RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US 150000 /* Time required for a recalibration after a DVFS transition */
+#define RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US 10000000 /* Time before the next periodic calibration and correlation */
+
+typedef struct _GPU_FREQ_TRACKING_DATA_
+{
+ /* Core clock speed estimated by the driver */
+ IMG_UINT32 ui32EstCoreClockSpeed;
+
+ /* Amount of successful calculations of the estimated core clock speed */
+ IMG_UINT32 ui32CalibrationCount;
+} GPU_FREQ_TRACKING_DATA;
+
+typedef struct _RGX_GPU_DVFS_TABLE_
+{
+ /* Beginning of current calibration period (in us) */
+ IMG_UINT64 ui64CalibrationCRTimestamp;
+ IMG_UINT64 ui64CalibrationOSTimestamp;
+
+ /* Calculated calibration period (in us) */
+ IMG_UINT64 ui64CalibrationCRTimediff;
+ IMG_UINT64 ui64CalibrationOSTimediff;
+
+ /* Current calibration period (in us) */
+ IMG_UINT32 ui32CalibrationPeriod;
+
+ /* System layer frequency table and frequency tracking data */
+ IMG_UINT32 ui32FreqIndex;
+ IMG_UINT32 aui32GPUFrequency[RGX_GPU_DVFS_TABLE_SIZE];
+ GPU_FREQ_TRACKING_DATA asTrackingData[RGX_GPU_DVFS_TABLE_SIZE];
+} RGX_GPU_DVFS_TABLE;
+
+
+/*!
+ ******************************************************************************
+ * GPU utilisation statistics
+ *****************************************************************************/
+
+typedef struct _RGXFWIF_GPU_UTIL_STATS_
+{
+ IMG_BOOL bValid; /* If TRUE, statistics are valid.
+ FALSE if the driver couldn't get reliable stats. */
+ IMG_UINT64 ui64GpuStatActiveHigh; /* GPU active high statistic */
+ IMG_UINT64 ui64GpuStatActiveLow; /* GPU active low (i.e. TLA active only) statistic */
+ IMG_UINT64 ui64GpuStatBlocked; /* GPU blocked statistic */
+ IMG_UINT64 ui64GpuStatIdle; /* GPU idle statistic */
+ IMG_UINT64 ui64GpuStatCumulative; /* Sum of active/blocked/idle stats */
+ IMG_UINT64 ui64TimeStamp; /* Timestamp of the most recent sample of the GPU stats */
+} RGXFWIF_GPU_UTIL_STATS;
+
+
+typedef struct _RGX_REG_CONFIG_
+{
+ IMG_BOOL bEnabled;
+ RGXFWIF_REG_CFG_TYPE eRegCfgTypeToPush;
+ IMG_UINT32 ui32NumRegRecords;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+} RGX_REG_CONFIG;
+
+typedef struct _PVRSRV_STUB_PBDESC_ PVRSRV_STUB_PBDESC;
+
+typedef struct
+{
+ IMG_UINT32 ui32DustCount1;
+ IMG_UINT32 ui32DustCount2;
+ IMG_BOOL bToggle;
+} RGX_DUST_STATE;
+
+typedef struct _PVRSRV_DEVICE_FEATURE_CONFIG_
+{
+ IMG_UINT64 ui64ErnsBrns;
+ IMG_UINT64 ui64Features;
+ IMG_UINT32 ui32B;
+ IMG_UINT32 ui32V;
+ IMG_UINT32 ui32N;
+ IMG_UINT32 ui32C;
+ IMG_UINT32 ui32FeaturesValues[RGX_FEATURE_WITH_VALUES_MAX_IDX];
+ IMG_UINT32 ui32MAXDMCount;
+ IMG_UINT32 ui32MAXDMMTSCount;
+ IMG_UINT32 ui32MAXDustCount;
+#define MAX_BVNC_STRING_LEN (50)
+ IMG_PCHAR pszBVNCString;
+}PVRSRV_DEVICE_FEATURE_CONFIG;
+
+/* This is used to get the value of a specific feature.
+ * Note that it will assert if the feature is disabled or value is invalid. */
+#define RGX_GET_FEATURE_VALUE(psDevInfo, Feature) \
+ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] )
+
+/* This is used to check if the feature with value is available for the currently running bvnc or not */
+#define RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, Feature) \
+ ( psDevInfo->sDevFeatureCfg.ui32FeaturesValues[RGX_FEATURE_##Feature##_IDX] < RGX_FEATURE_VALUE_DISABLED )
+
+/* This is used to check if the feature WITHOUT value is available for the currently running bvnc or not */
+#define RGX_IS_FEATURE_SUPPORTED(psDevInfo, Feature) \
+ ( psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_##Feature##_BIT_MASK)
+
+/* This is used to check if the ERN is available for the currently running bvnc or not */
+#define RGX_IS_ERN_SUPPORTED(psDevInfo, ERN) \
+ ( psDevInfo->sDevFeatureCfg.ui64ErnsBrns & HW_ERN_##ERN##_BIT_MASK)
+
+/* This is used to check if the BRN is available for the currently running bvnc or not */
+#define RGX_IS_BRN_SUPPORTED(psDevInfo, BRN) \
+ ( psDevInfo->sDevFeatureCfg.ui64ErnsBrns & FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/* there is a corresponding define in rgxapi.h */
+#define RGX_MAX_TIMER_QUERIES 16
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+/*
+ For the workload estimation return data array, the max amount of commands the
+ MTS can have is 255, therefore 512 (LOG2 = 9) is large enough to account for
+ all corner cases
+*/
+#define RETURN_DATA_ARRAY_SIZE_LOG2 (9)
+#define RETURN_DATA_ARRAY_SIZE ((1UL) << RETURN_DATA_ARRAY_SIZE_LOG2)
+#define RETURN_DATA_ARRAY_WRAP_MASK (RETURN_DATA_ARRAY_SIZE - 1)
+
+#define WORKLOAD_HASH_SIZE_LOG2 6
+#define WORKLOAD_HASH_SIZE ((1UL) << WORKLOAD_HASH_SIZE_LOG2)
+#define WORKLOAD_HASH_WRAP_MASK (WORKLOAD_HASH_SIZE - 1)
+
+typedef struct _RGX_WORKLOAD_TA3D_
+{
+ IMG_UINT32 ui32RenderTargetSize;
+ IMG_UINT32 ui32NumberOfDrawCalls;
+ IMG_UINT32 ui32NumberOfIndices;
+ IMG_UINT32 ui32NumberOfMRTs;
+} RGX_WORKLOAD_TA3D;
+
+typedef struct _WORKLOAD_MATCHING_DATA_
+{
+ POS_LOCK psHashLock;
+ HASH_TABLE *psHashTable;
+ RGX_WORKLOAD_TA3D asHashKeys[WORKLOAD_HASH_SIZE];
+ IMG_UINT64 aui64HashData[WORKLOAD_HASH_SIZE];
+ IMG_UINT32 ui32HashArrayWO;
+
+} WORKLOAD_MATCHING_DATA;
+
+typedef struct _WORKEST_HOST_DATA_
+{
+ WORKLOAD_MATCHING_DATA sWorkloadMatchingDataTA;
+ WORKLOAD_MATCHING_DATA sWorkloadMatchingData3D;
+ IMG_UINT32 ui32WorkEstCCBReceived;
+} WORKEST_HOST_DATA;
+
+typedef struct _WORKEST_RETURN_DATA_
+{
+ WORKEST_HOST_DATA *psWorkEstHostData;
+ WORKLOAD_MATCHING_DATA *psWorkloadMatchingData;
+ RGX_WORKLOAD_TA3D sWorkloadCharacteristics;
+} WORKEST_RETURN_DATA;
+#endif
+
+
+typedef struct
+{
+#if defined(PDUMP)
+ IMG_HANDLE hPdumpPages;
+#endif
+ PG_HANDLE sPages;
+ IMG_DEV_PHYADDR sPhysAddr;
+} RGX_MIPS_ADDRESS_TRAMPOLINE;
+
+
+/*!
+ ******************************************************************************
+ * RGX Device info
+ *****************************************************************************/
+
+typedef struct _PVRSRV_RGXDEV_INFO_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ PVRSRV_DEVICE_FEATURE_CONFIG sDevFeatureCfg;
+
+ /* FIXME: This is a workaround due to having 2 inits but only 1 deinit */
+ IMG_BOOL bDevInit2Done;
+
+ IMG_BOOL bFirmwareInitialised;
+ IMG_BOOL bPDPEnabled;
+
+ IMG_HANDLE hDbgReqNotify;
+
+ /* Kernel mode linear address of device registers */
+ void __iomem *pvRegsBaseKM;
+
+ /* FIXME: The alloc for this should go through OSAllocMem in future */
+ IMG_HANDLE hRegMapping;
+
+ /* System physical address of device registers*/
+ IMG_CPU_PHYADDR sRegsPhysBase;
+ /* Register region size in bytes */
+ IMG_UINT32 ui32RegSize;
+
+ PVRSRV_STUB_PBDESC *psStubPBDescListKM;
+
+ /* Firmware memory context info */
+ DEVMEM_CONTEXT *psKernelDevmemCtx;
+ DEVMEM_HEAP *psFirmwareMainHeap;
+ DEVMEM_HEAP *psFirmwareConfigHeap;
+ MMU_CONTEXT *psKernelMMUCtx;
+
+ void *pvDeviceMemoryHeap;
+
+ /* Kernel CCB */
+ DEVMEM_MEMDESC *psKernelCCBCtlMemDesc; /*!< memdesc for Kernel CCB control */
+ RGXFWIF_CCB_CTL *psKernelCCBCtl; /*!< kernel mapping for Kernel CCB control */
+ DEVMEM_MEMDESC *psKernelCCBMemDesc; /*!< memdesc for Kernel CCB */
+ IMG_UINT8 *psKernelCCB; /*!< kernel mapping for Kernel CCB */
+
+ /* Firmware CCB */
+ DEVMEM_MEMDESC *psFirmwareCCBCtlMemDesc; /*!< memdesc for Firmware CCB control */
+ RGXFWIF_CCB_CTL *psFirmwareCCBCtl; /*!< kernel mapping for Firmware CCB control */
+ DEVMEM_MEMDESC *psFirmwareCCBMemDesc; /*!< memdesc for Firmware CCB */
+ IMG_UINT8 *psFirmwareCCB; /*!< kernel mapping for Firmware CCB */
+
+ /* Workload Estimation Firmware CCB */
+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBCtlMemDesc; /*!< memdesc for Workload Estimation Firmware CCB control */
+ RGXFWIF_CCB_CTL *psWorkEstFirmwareCCBCtl; /*!< kernel mapping for Workload Estimation Firmware CCB control */
+ DEVMEM_MEMDESC *psWorkEstFirmwareCCBMemDesc; /*!< memdesc for Workload Estimation Firmware CCB */
+ IMG_UINT8 *psWorkEstFirmwareCCB; /*!< kernel mapping for Workload Estimation Firmware CCB */
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ /* Counter dumping */
+ DEVMEM_MEMDESC *psCounterBufferMemDesc; /*!< mem desc for counter dumping buffer */
+ POS_LOCK hCounterDumpingLock; /*!< Lock for guarding access to counter dumping buffer */
+#endif
+
+ IMG_BOOL bEnableFWPoisonOnFree; /*!< Enable poisoning of FW allocations when freed */
+ IMG_BYTE ubFWPoisonOnFreeValue; /*!< Byte value used when poisoning FW allocations */
+
+ /*
+ if we don't preallocate the pagetables we must
+ insert newly allocated page tables dynamically
+ */
+ void *pvMMUContextList;
+
+ IMG_UINT32 ui32ClkGateStatusReg;
+ IMG_UINT32 ui32ClkGateStatusMask;
+
+ DEVMEM_MEMDESC *psRGXFWCodeMemDesc;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ DEVMEM_MEMDESC *psRGXFWDataMemDesc;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ RGX_MIPS_ADDRESS_TRAMPOLINE *psTrampoline;
+
+ DEVMEM_MEMDESC *psRGXFWCorememMemDesc;
+ IMG_DEV_VIRTADDR sFWCorememCodeDevVAddrBase;
+ RGXFWIF_DEV_VIRTADDR sFWCorememCodeFWAddr;
+
+#if defined(RGXFW_ALIGNCHECKS)
+ DEVMEM_MEMDESC *psRGXFWAlignChecksMemDesc;
+#endif
+
+ DEVMEM_MEMDESC *psRGXFWSigTAChecksMemDesc;
+ IMG_UINT32 ui32SigTAChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSig3DChecksMemDesc;
+ IMG_UINT32 ui32Sig3DChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSigRTChecksMemDesc;
+ IMG_UINT32 ui32SigRTChecksSize;
+
+ DEVMEM_MEMDESC *psRGXFWSigSHChecksMemDesc;
+ IMG_UINT32 ui32SigSHChecksSize;
+
+#if defined (PDUMP)
+ IMG_BOOL bDumpedKCCBCtlAlready;
+ IMG_UINT32 ui32LastBlockKCCBCtrlDumped; /* To be used in block-mode of pdump - Last pdump-block where we dumped KCCBCtrl */
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hRGXFWIfBufInitLock; /*!< trace buffer lock for initialisation phase */
+#endif
+
+ DEVMEM_MEMDESC *psRGXFWIfTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */
+ DEVMEM_MEMDESC *psRGXFWIfTraceBufferMemDesc[RGXFW_THREAD_NUM]; /*!< memdesc of actual FW trace (log) buffer(s) */
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf; /* structure containing trace control data and actual trace buffer */
+
+ DEVMEM_MEMDESC *psRGXFWIfGuestTraceBufCtlMemDesc; /*!< memdesc of trace buffer control structure */
+ RGXFWIF_TRACEBUF *psRGXFWIfGuestTraceBuf;
+
+ DEVMEM_MEMDESC *psRGXFWIfTBIBufferMemDesc; /*!< memdesc of actual FW TBI buffer */
+ RGXFWIF_DEV_VIRTADDR sRGXFWIfTBIBuffer; /* TBI buffer data */
+
+ DEVMEM_MEMDESC *psRGXFWIfGuestHWRInfoBufCtlMemDesc;
+ RGXFWIF_HWRINFOBUF *psRGXFWIfGuestHWRInfoBuf;
+
+ DEVMEM_MEMDESC *psRGXFWIfHWRInfoBufCtlMemDesc;
+ RGXFWIF_HWRINFOBUF *psRGXFWIfHWRInfoBuf;
+
+ DEVMEM_MEMDESC *psRGXFWIfGpuUtilFWCbCtlMemDesc;
+ RGXFWIF_GPU_UTIL_FWCB *psRGXFWIfGpuUtilFWCb;
+
+ DEVMEM_MEMDESC *psRGXFWIfHWPerfBufMemDesc;
+ IMG_BYTE *psRGXFWIfHWPerfBuf;
+ IMG_UINT32 ui32RGXFWIfHWPerfBufSize; /* in bytes */
+
+ DEVMEM_MEMDESC *psRGXFWIfCorememDataStoreMemDesc;
+
+ DEVMEM_MEMDESC *psRGXFWIfRegCfgMemDesc;
+
+ DEVMEM_MEMDESC *psRGXFWIfHWPerfCountersMemDesc;
+ DEVMEM_MEMDESC *psRGXFWIfInitMemDesc;
+ DEVMEM_MEMDESC *psRGXFWIfOSConfigDesc;
+ RGXFWIF_OS_CONFIG *psFWIfOSConfig;
+ RGXFWIF_DEV_VIRTADDR sFWInitFWAddr;
+
+ DEVMEM_MEMDESC *psRGXFWIfRuntimeCfgMemDesc;
+ RGXFWIF_RUNTIME_CFG *psRGXFWIfRuntimeCfg;
+
+ /* Additional guest firmware memory context info */
+ DEVMEM_HEAP *psGuestFirmwareRawHeap[RGXFW_NUM_OS];
+ DEVMEM_MEMDESC *psGuestFirmwareRawMemDesc[RGXFW_NUM_OS];
+ DEVMEM_MEMDESC *psGuestFirmwareMainMemDesc[RGXFW_NUM_OS];
+ DEVMEM_MEMDESC *psGuestFirmwareConfigMemDesc[RGXFW_NUM_OS];
+
+ DEVMEM_MEMDESC *psMETAT1StackMemDesc;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Array to store data needed for workload estimation when a workload
+ has finished and its cycle time is returned to the host. */
+ WORKEST_RETURN_DATA asReturnData[RETURN_DATA_ARRAY_SIZE];
+ IMG_UINT32 ui32ReturnDataWO;
+#endif
+
+#if defined (SUPPORT_PDVFS)
+ /**
+ * Host memdesc and pointer to memory containing core clock rate in Hz.
+ * Firmware updates the memory on changing the core clock rate over GPIO.
+ * Note: Shared memory needs atomic access from Host driver and firmware,
+ * hence size should not be greater than memory transaction granularity.
+ * Currently it is chosen to be 32 bits.
+ */
+ DEVMEM_MEMDESC *psRGXFWIFCoreClkRateMemDesc;
+ volatile IMG_UINT32 *pui32RGXFWIFCoreClkRate;
+ /**
+ * Last sampled core clk rate.
+ */
+ volatile IMG_UINT32 ui32CoreClkRateSnapshot;
+#endif
+
+ /*
+ HWPerf data for the RGX device
+ */
+
+ POS_LOCK hHWPerfLock; /*! Critical section lock that protects HWPerf code
+ * from multiple thread duplicate init/deinit
+ * and loss/freeing of FW & Host resources while in
+ * use in another thread e.g. MSIR. */
+
+ IMG_UINT64 ui64HWPerfFilter; /*! Event filter for FW events (settable by AppHint) */
+ IMG_HANDLE hHWPerfStream; /*! TL Stream buffer (L2) for firmware event stream */
+ IMG_UINT32 ui32MaxPacketSize;/*!< Max allowed packet size */
+
+ IMG_UINT32 ui32HWPerfHostFilter; /*! Event filter for HWPerfHost stream (settable by AppHint) */
+ POS_LOCK hLockHWPerfHostStream; /*! Lock guarding access to HWPerfHost stream from multiple threads */
+ IMG_HANDLE hHWPerfHostStream; /*! TL Stream buffer for host only event stream */
+ IMG_UINT32 ui32HWPerfHostBufSize; /*! Host side buffer size in bytes */
+ IMG_UINT32 ui32HWPerfHostLastOrdinal; /*! Ordinal of the last packet emitted in HWPerfHost TL stream.
+ * Guarded by hLockHWPerfHostStream */
+ IMG_UINT32 ui32HWPerfHostNextOrdinal; /*! Ordinal number for HWPerfHost events. Guarded by hHWPerfHostSpinLock */
+ IMG_UINT8 *pui8DeferredEvents; /*! List of HWPerfHost events yet to be emitted in the TL stream.
+ * Events generated from atomic context are deferred "emitted"
+ * as the "emission" code can sleep */
+ IMG_UINT16 ui16DEReadIdx; /*! Read index in the above deferred events buffer */
+ IMG_UINT16 ui16DEWriteIdx; /*! Write index in the above deferred events buffer */
+ void *pvHostHWPerfMISR; /*! MISR to emit pending/deferred events in HWPerfHost TL stream */
+ POS_SPINLOCK hHWPerfHostSpinLock; /*! Guards data shared between an atomic & sleepable-context */
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ IMG_UINT32 ui32DEHighWatermark; /*! High watermark of deferred events buffer usage. Protected by
+ *! hHWPerfHostSpinLock*/
+ /* Max number of times DeferredEmission waited for an atomic-context to "finish" packet write */
+ IMG_UINT32 ui32WaitForAtomicCtxPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+ /* Whether warning has been logged about an atomic-context packet loss (due to too long wait for "write" finish) */
+ IMG_BOOL bWarnedAtomicCtxPktLost;
+ /* Max number of times DeferredEmission scheduled-out to give a chance to the right-ordinal packet to be emitted */
+ IMG_UINT32 ui32WaitForRightOrdPktHighWatermark; /*! Protected by hLockHWPerfHostStream */
+ /* Whether warning has been logged about an packet loss (due to too long wait for right ordinal to emit) */
+ IMG_BOOL bWarnedPktOrdinalBroke;
+#endif
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ void *pvGpuFtraceData;
+#endif
+
+ /* Poll data for detecting firmware fatal errors */
+ IMG_UINT32 aui32CrLastPollAddr[RGXFW_THREAD_NUM];
+ IMG_UINT32 ui32KCCBCmdsExecutedLastTime;
+ IMG_BOOL bKCCBCmdsWaitingLastTime;
+ IMG_UINT32 ui32GEOTimeoutsLastTime;
+
+ /* Client stall detection */
+ IMG_UINT32 ui32StalledClientMask;
+
+ IMG_BOOL bWorkEstEnabled;
+ IMG_BOOL bPDVFSEnabled;
+
+ void *pvLISRData;
+ void *pvMISRData;
+ void *pvAPMISRData;
+ RGX_ACTIVEPM_CONF eActivePMConf;
+
+ volatile IMG_UINT32 aui32SampleIRQCount[RGXFW_THREAD_NUM];
+
+ DEVMEM_MEMDESC *psRGXFaultAddressMemDesc;
+
+ DEVMEM_MEMDESC *psSLC3FenceMemDesc;
+
+ /* If we do 10 deferred memory allocations per second, then the ID would wrap around after 13 years */
+ IMG_UINT32 ui32ZSBufferCurrID; /*!< ID assigned to the next deferred devmem allocation */
+ IMG_UINT32 ui32FreelistCurrID; /*!< ID assigned to the next freelist */
+ IMG_UINT32 ui32RPMFreelistCurrID; /*!< ID assigned to the next RPM freelist */
+
+ POS_LOCK hLockZSBuffer; /*!< Lock to protect simultaneous access to ZSBuffers */
+ DLLIST_NODE sZSBufferHead; /*!< List of on-demand ZSBuffers */
+ POS_LOCK hLockFreeList; /*!< Lock to protect simultaneous access to Freelists */
+ DLLIST_NODE sFreeListHead; /*!< List of growable Freelists */
+ POS_LOCK hLockRPMFreeList; /*!< Lock to protect simultaneous access to RPM Freelists */
+ DLLIST_NODE sRPMFreeListHead; /*!< List of growable RPM Freelists */
+ POS_LOCK hLockRPMContext; /*!< Lock to protect simultaneous access to RPM contexts */
+ PSYNC_PRIM_CONTEXT hSyncPrimContext;
+ PVRSRV_CLIENT_SYNC_PRIM *psPowSyncPrim;
+
+ IMG_UINT32 ui32ActivePMReqOk;
+ IMG_UINT32 ui32ActivePMReqDenied;
+ IMG_UINT32 ui32ActivePMReqNonIdle;
+ IMG_UINT32 ui32ActivePMReqTotal;
+
+ IMG_HANDLE hProcessQueuesMISR;
+
+ IMG_UINT32 ui32DeviceFlags; /*!< Flags to track general device state */
+
+ /* Timer Queries */
+ IMG_UINT32 ui32ActiveQueryId; /*!< id of the active line */
+ IMG_BOOL bSaveStart; /*!< save the start time of the next kick on the device*/
+ IMG_BOOL bSaveEnd; /*!< save the end time of the next kick on the device*/
+
+ DEVMEM_MEMDESC *psStartTimeMemDesc; /*!< memdesc for Start Times */
+ IMG_UINT64 *pui64StartTimeById; /*!< CPU mapping of the above */
+
+ DEVMEM_MEMDESC *psEndTimeMemDesc; /*!< memdesc for End Timer */
+ IMG_UINT64 *pui64EndTimeById; /*!< CPU mapping of the above */
+
+ IMG_UINT32 aui32ScheduledOnId[RGX_MAX_TIMER_QUERIES]; /*!< kicks Scheduled on QueryId */
+ DEVMEM_MEMDESC *psCompletedMemDesc; /*!< kicks Completed on QueryId */
+ IMG_UINT32 *pui32CompletedById; /*!< CPU mapping of the above */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hTimerQueryLock; /*!< lock to protect simultaneous access to timer query members */
+#endif
+
+ /* GPU DVFS Table */
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable;
+
+ /* Pointer to function returning the GPU utilisation statistics since the last
+ * time the function was called. Supports different users at the same time.
+ *
+ * psReturnStats [out]: GPU utilisation statistics (active high/active low/idle/blocked)
+ * in microseconds since the last time the function was called
+ * by a specific user (identified by hGpuUtilUser)
+ *
+ * Returns PVRSRV_OK in case the call completed without errors,
+ * some other value otherwise.
+ */
+ PVRSRV_ERROR (*pfnGetGpuUtilStats) (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hGpuUtilUser,
+ RGXFWIF_GPU_UTIL_STATS *psReturnStats);
+
+ POS_LOCK hGPUUtilLock;
+
+ /* Register configuration */
+ RGX_REG_CONFIG sRegCongfig;
+
+ IMG_BOOL bRGXPowered;
+ DLLIST_NODE sMemoryContextList;
+
+ POSWR_LOCK hRenderCtxListLock;
+ POSWR_LOCK hComputeCtxListLock;
+ POSWR_LOCK hTransferCtxListLock;
+ POSWR_LOCK hTDMCtxListLock;
+ POSWR_LOCK hRaytraceCtxListLock;
+ POSWR_LOCK hMemoryCtxListLock;
+ POSWR_LOCK hKickSyncCtxListLock;
+
+ /* Linked list of deferred KCCB commands due to a full KCCB */
+ POS_LOCK hLockKCCBDeferredCommandsList;
+ DLLIST_NODE sKCCBDeferredCommandsListHead;
+
+ /* Linked lists of contexts on this device */
+ DLLIST_NODE sRenderCtxtListHead;
+ DLLIST_NODE sComputeCtxtListHead;
+ DLLIST_NODE sTransferCtxtListHead;
+ DLLIST_NODE sTDMCtxtListHead;
+ DLLIST_NODE sRaytraceCtxtListHead;
+ DLLIST_NODE sKickSyncCtxtListHead;
+
+ DLLIST_NODE sCommonCtxtListHead;
+ POSWR_LOCK hCommonCtxtListLock;
+ IMG_UINT32 ui32CommonCtxtCurrentID; /*!< ID assigned to the next common context */
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ POS_LOCK hDebugFaultInfoLock; /*!< Lock to protect the debug fault info list */
+ POS_LOCK hMMUCtxUnregLock; /*!< Lock to protect list of unregistered MMU contexts */
+#endif
+
+ POS_LOCK hNMILock; /*!< Lock to protect NMI operations */
+
+ RGX_DUST_STATE sDustReqState;
+
+ RGX_LAYER_PARAMS sLayerParams;
+
+ RGXFWIF_DM eBPDM; /*!< Current breakpoint data master */
+ IMG_BOOL bBPSet; /*!< A Breakpoint has been set */
+ POS_LOCK hBPLock; /*!< Lock for break point operations */
+
+ IMG_UINT32 ui32CoherencyTestsDone;
+
+ ATOMIC_T iCCBSubmissionOrdinal; /* Rolling count used to indicate CCB submission order (all CCBs) */
+ POS_LOCK hCCBRecoveryLock; /* Lock to protect pvEarliestStalledClientCCB and ui32OldestSubmissionOrdinal variables*/
+ void *pvEarliestStalledClientCCB; /* Will point to cCCB command to unblock in the event of a stall */
+ IMG_UINT32 ui32OldestSubmissionOrdinal; /* Earliest submission ordinal of CCB entry found so far */
+
+ POS_LOCK hCCBStallCheckLock; /* Lock used to guard against multiple threads simultaneously checking for stalled CCBs */
+
+ IMG_UINT32 ui32ExpectedPartialFWCCBCmd; /* Partial FWCCB command expected from the FW */
+} PVRSRV_RGXDEV_INFO;
+
+
+
+typedef struct _RGX_TIMING_INFORMATION_
+{
+ /*! GPU default core clock speed in Hz */
+ IMG_UINT32 ui32CoreClockSpeed;
+
+ /*! Active Power Management: GPU actively requests the host driver to be powered off */
+ IMG_BOOL bEnableActivePM;
+
+ /*! Enable the GPU to power off internal Power Islands independently from the host driver */
+ IMG_BOOL bEnableRDPowIsland;
+
+ /*! Active Power Management: Delay between the GPU idle and the request to the host */
+ IMG_UINT32 ui32ActivePMLatencyms;
+
+} RGX_TIMING_INFORMATION;
+
+typedef struct _RGX_DATA_
+{
+ /*! Timing information */
+ RGX_TIMING_INFORMATION *psRGXTimingInfo;
+ IMG_BOOL bHasTDFWCodePhysHeap;
+ IMG_UINT32 uiTDFWCodePhysHeapID;
+ IMG_BOOL bHasTDSecureBufPhysHeap;
+ IMG_UINT32 uiTDSecureBufPhysHeapID;
+} RGX_DATA;
+
+
+/*
+ RGX PDUMP register bank name (prefix)
+*/
+#define RGX_PDUMPREG_NAME "RGXREG"
+
+#endif /* __RGXDEVICE_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfw_log_helper.h b/drivers/gpu/drm/img-rogue/1.10/rgxfw_log_helper.h
new file mode 100644
index 00000000000000..05f990fc8b7183
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfw_log_helper.h
@@ -0,0 +1,76 @@
+/*************************************************************************/ /*!
+@File rgxfw_log_helper.h
+@Title Firmware TBI logging helper function
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Platform Generic
+@Description This file contains some helper code to make TBI logging possible
+ Specifically, it uses the SFIDLIST xmacro to trace ids back to
+ the original strings.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef _RGXFW_LOG_HELPER_H_
+#define _RGXFW_LOG_HELPER_H_
+
+#include "rgx_fwif_sf.h"
+
+static IMG_CHAR *const groups[]= {
+#define X(A,B) #B,
+ RGXFW_LOG_SFGROUPLIST
+#undef X
+};
+
+/* idToStringID : Search SFs tuples {id,string} for a matching id.
+ * return index to array if found or RGXFW_SF_LAST if none found.
+ * bsearch could be used as ids are in increasing order. */
+static IMG_UINT32 idToStringID(IMG_UINT32 ui32CheckData, const RGXFW_STID_FMT *const psSFs)
+{
+ IMG_UINT32 i = 0, ui32Id = RGXFW_SF_LAST ;
+
+ for ( i = 0 ; psSFs[i].ui32Id != RGXFW_SF_LAST ; i++)
+ {
+ if ( ui32CheckData == psSFs[i].ui32Id )
+ {
+ ui32Id = i;
+ break;
+ }
+ }
+ return ui32Id;
+}
+
+#endif /* _RGXFW_LOG_HELPER_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.c b/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.c
new file mode 100644
index 00000000000000..db2a800690ba87
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.c
@@ -0,0 +1,1010 @@
+/*************************************************************************/ /*!
+@File
+@Title Services Firmware image utilities used at init time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Services Firmware image utilities used at init time
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxfwimageutils.h"
+
+
+/************************************************************************
+* FW Segments configuration
+************************************************************************/
+typedef struct _RGX_FW_SEGMENT_
+{
+ IMG_UINT32 ui32SegId; /*!< Segment Id */
+ IMG_UINT32 ui32SegStartAddr; /*!< Segment Start Addr */
+ IMG_UINT32 ui32SegAllocSize; /*!< Amount of memory to allocate for that segment */
+ IMG_UINT32 ui32FWMemOffset; /*!< Offset of this segment in the collated FW mem allocation */
+ const IMG_CHAR *pszSegName;
+} RGX_FW_SEGMENT;
+
+typedef struct _RGX_FW_SEGMENT_LIST_
+{
+ RGX_FW_SEGMENT *psRGXFWCodeSeg;
+ RGX_FW_SEGMENT *psRGXFWDataSeg;
+ IMG_UINT32 ui32CodeSegCount;
+ IMG_UINT32 ui32DataSegCount;
+} RGX_FW_SEGMENT_LIST;
+
+
+static RGX_FW_SEGMENT asRGXMetaFWCodeSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{RGXFW_SEGMMU_TEXT_ID, RGXFW_BOOTLDR_META_ADDR, 0x31000, 0, "Bootldr and Code"}, /* Has to be the first one to get the proper DevV addr */
+};
+static RGX_FW_SEGMENT asRGXMetaFWDataSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{RGXFW_SEGMMU_DATA_ID, 0x38880000, 0x17000, 0, "Local Shared and Data"},
+};
+#define RGXFW_META_NUM_CODE_SEGMENTS (sizeof(asRGXMetaFWCodeSegments)/sizeof(asRGXMetaFWCodeSegments[0]))
+#define RGXFW_META_NUM_DATA_SEGMENTS (sizeof(asRGXMetaFWDataSegments)/sizeof(asRGXMetaFWDataSegments[0]))
+
+static RGX_FW_SEGMENT asRGXMipsFWCodeSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{ 0, RGXMIPSFW_BOOT_NMI_CODE_VIRTUAL_BASE, RGXMIPSFW_BOOT_NMI_CODE_SIZE, RGXMIPSFW_BOOT_NMI_CODE_OFFSET, "Bootldr and NMI code"},
+{ 1, RGXMIPSFW_EXCEPTIONS_VIRTUAL_BASE, RGXMIPSFW_EXCEPTIONSVECTORS_SIZE, RGXMIPSFW_EXCEPTIONSVECTORS_OFFSET, "Exception vectors"},
+{ 2, RGXMIPSFW_CODE_VIRTUAL_BASE, RGXMIPSFW_CODE_SIZE, RGXMIPSFW_CODE_OFFSET, "Text"},
+};
+static RGX_FW_SEGMENT asRGXMipsFWDataSegments[] = {
+/* Seg ID Seg Start Addr Alloc size FWMem offset Name */
+{ 3, RGXMIPSFW_BOOT_NMI_DATA_VIRTUAL_BASE, RGXMIPSFW_BOOT_NMI_DATA_SIZE, RGXMIPSFW_BOOT_NMI_DATA_OFFSET, "Bootldr and NMI data"},
+{ 4, RGXMIPSFW_DATA_VIRTUAL_BASE, RGXMIPSFW_DATA_SIZE, RGXMIPSFW_DATA_OFFSET, "Local Data"},
+{ 5, RGXMIPSFW_STACK_VIRTUAL_BASE, RGXMIPSFW_STACK_SIZE, RGXMIPSFW_DATA_SIZE, "Stack"},
+};
+
+#define RGXFW_MIPS_NUM_CODE_SEGMENTS (sizeof(asRGXMipsFWCodeSegments)/sizeof(asRGXMipsFWCodeSegments[0]))
+#define RGXFW_MIPS_NUM_DATA_SEGMENTS (sizeof(asRGXMipsFWDataSegments)/sizeof(asRGXMipsFWDataSegments[0]))
+
+/*!
+*******************************************************************************
+
+ @Function FindMMUSegment
+
+ @Description Given a 32 bit FW address attempt to find the corresponding
+ pointer to FW allocation
+
+ @Input ui32OffsetIn : 32 bit FW address
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+ @Input uiHostAddrOut : CPU pointer equivalent to ui32OffsetIn
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR FindMMUSegment(IMG_UINT32 ui32OffsetIn,
+ void *pvHostFWCodeAddr,
+ void *pvHostFWDataAddr,
+ void **uiHostAddrOut,
+ RGX_FW_SEGMENT_LIST *psRGXFWSegList)
+{
+ RGX_FW_SEGMENT *psSegArr;
+ IMG_UINT32 i;
+
+ psSegArr = psRGXFWSegList->psRGXFWCodeSeg;
+ for (i = 0; i < psRGXFWSegList->ui32CodeSegCount; i++)
+ {
+ if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+ (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+ {
+ *uiHostAddrOut = pvHostFWCodeAddr;
+ goto found;
+ }
+ }
+
+ psSegArr = psRGXFWSegList->psRGXFWDataSeg;
+ for (i = 0; i < psRGXFWSegList->ui32DataSegCount; i++)
+ {
+ if ((ui32OffsetIn >= psSegArr[i].ui32SegStartAddr) &&
+ (ui32OffsetIn < (psSegArr[i].ui32SegStartAddr + psSegArr[i].ui32SegAllocSize)))
+ {
+ *uiHostAddrOut = pvHostFWDataAddr;
+ goto found;
+ }
+ }
+
+ return PVRSRV_ERROR_INIT_FAILURE;
+
+found:
+ /* Direct Mem write to mapped memory */
+ ui32OffsetIn -= psSegArr[i].ui32SegStartAddr;
+ ui32OffsetIn += psSegArr[i].ui32FWMemOffset;
+
+ /* Add offset to pointer to FW allocation only if
+ * that allocation is available
+ */
+ if (*uiHostAddrOut)
+ {
+ *(IMG_UINT8 **)uiHostAddrOut += ui32OffsetIn;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureSegID
+
+ @Description Configures a single segment of the Segment MMU
+ (base, limit and out_addr)
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64SegOutAddr : Segment output base address (40 bit devVaddr)
+ @Input ui32SegBase : Segment input base address (32 bit FW address)
+ @Input ui32SegLimit : Segment size
+ @Input ui32SegID : Segment ID
+ @Input pszName : Segment name
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureSegID(const void *hPrivate,
+ IMG_UINT64 ui64SegOutAddr,
+ IMG_UINT32 ui32SegBase,
+ IMG_UINT32 ui32SegLimit,
+ IMG_UINT32 ui32SegID,
+ const IMG_CHAR *pszName,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32SegOutAddr0 = ui64SegOutAddr & 0x00000000FFFFFFFFUL;
+ IMG_UINT32 ui32SegOutAddr1 = (ui64SegOutAddr >> 32) & 0x00000000FFFFFFFFUL;
+
+ /* META segments have a minimum size */
+ IMG_UINT32 ui32LimitOff = (ui32SegLimit < RGXFW_SEGMMU_ALIGN) ?
+ RGXFW_SEGMMU_ALIGN : ui32SegLimit;
+ /* the limit is an offset, therefore off = size - 1 */
+ ui32LimitOff -= 1;
+
+ RGXCommentLog(hPrivate,
+ "* FW %s - seg%d: meta_addr = 0x%08x, devv_addr = 0x%llx, limit = 0x%x",
+ pszName, ui32SegID,
+ ui32SegBase, (unsigned long long)ui64SegOutAddr,
+ ui32LimitOff);
+
+ ui32SegBase |= RGXFW_SEGMMU_ALLTHRS_WRITEABLE;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_BASE(ui32SegID);
+ *pui32BootConf++ = ui32SegBase;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_LIMIT(ui32SegID);
+ *pui32BootConf++ = ui32LimitOff;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA0(ui32SegID);
+ *pui32BootConf++ = ui32SegOutAddr0;
+
+ *pui32BootConf++ = META_CR_MMCU_SEGMENTn_OUTA1(ui32SegID);
+ *pui32BootConf++ = ui32SegOutAddr1;
+
+ *ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureSegMMU
+
+ @Description Configures META's Segment MMU
+
+ @Input hPrivate : Implementation specific data
+ @Input psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input psFWDataDevVAddrBase : FW data base device virtual address
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureSegMMU(const void *hPrivate,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT64 ui64SegOutAddrTop;
+ IMG_UINT32 i;
+
+ PVR_UNREFERENCED_PARAMETER(psFWCodeDevVAddrBase);
+
+ /* Configure Segment MMU */
+ RGXCommentLog(hPrivate, "********** FW configure Segment MMU **********");
+
+ if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+ {
+ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_ERN_45914(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+ }
+ else
+ {
+ ui64SegOutAddrTop = RGXFW_SEGMMU_OUTADDR_TOP_PRE_S7(META_MMU_CONTEXT_MAPPING, RGXFW_SEGMMU_META_DM_ID);
+ }
+
+ for (i = 0; i < RGXFW_META_NUM_DATA_SEGMENTS ; i++)
+ {
+ IMG_UINT64 ui64SegOutAddr;
+
+ ui64SegOutAddr = (psFWDataDevVAddrBase->uiAddr | ui64SegOutAddrTop) +
+ asRGXMetaFWDataSegments[i].ui32FWMemOffset;
+
+ RGXFWConfigureSegID(hPrivate,
+ ui64SegOutAddr,
+ asRGXMetaFWDataSegments[i].ui32SegStartAddr,
+ asRGXMetaFWDataSegments[i].ui32SegAllocSize,
+ asRGXMetaFWDataSegments[i].ui32SegId,
+ asRGXMetaFWDataSegments[i].pszSegName,
+ ppui32BootConf); /*write the sequence to the bootldr */
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXFWConfigureMetaCaches
+
+ @Description Configure and enable the Meta instruction and data caches
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32NumThreads : Number of FW threads in use
+ @Input ui32MainThreadID : ID of the FW thread in use
+ (only meaningful if ui32NumThreads == 1)
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return void
+
+******************************************************************************/
+static void RGXFWConfigureMetaCaches(const void *hPrivate,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID,
+ IMG_UINT32 **ppui32BootConf)
+{
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32DCacheT0, ui32ICacheT0;
+ IMG_UINT32 ui32DCacheT1, ui32ICacheT1;
+ IMG_UINT32 ui32DCacheT2, ui32ICacheT2;
+ IMG_UINT32 ui32DCacheT3, ui32ICacheT3;
+
+#define META_CR_MMCU_LOCAL_EBCTRL (0x04830600)
+#define META_CR_MMCU_LOCAL_EBCTRL_ICWIN (0x3 << 14)
+#define META_CR_MMCU_LOCAL_EBCTRL_DCWIN (0x3 << 6)
+#define META_CR_SYSC_DCPART(n) (0x04830200 + (n)*0x8)
+#define META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE (0x1 << 31)
+#define META_CR_SYSC_ICPART(n) (0x04830220 + (n)*0x8)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF (0x8 << 16)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE (0xF)
+#define META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE (0x7)
+#define META_CR_MMCU_DCACHE_CTRL (0x04830018)
+#define META_CR_MMCU_ICACHE_CTRL (0x04830020)
+#define META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN (0x1)
+
+ RGXCommentLog(hPrivate, "********** Meta caches configuration *********");
+
+ /* Initialise I/Dcache settings */
+ ui32DCacheT0 = ui32DCacheT1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ ui32DCacheT2 = ui32DCacheT3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE;
+ ui32ICacheT0 = ui32ICacheT1 = ui32ICacheT2 = ui32ICacheT3 = 0;
+
+ if (ui32NumThreads == 1)
+ {
+ if (ui32MainThreadID == 0)
+ {
+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ }
+ else
+ {
+ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE;
+ }
+ }
+ else
+ {
+ ui32DCacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+ ui32ICacheT0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE;
+
+ ui32DCacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+ ui32ICacheT1 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_HALF_CACHE |
+ META_CR_SYSC_XCPARTX_LOCAL_ADDR_OFFSET_TOP_HALF;
+ }
+
+ /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */
+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL;
+ *pui32BootConf++ = META_CR_MMCU_LOCAL_EBCTRL_ICWIN |
+ META_CR_MMCU_LOCAL_EBCTRL_DCWIN;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_LOCAL_EBCTRL,
+ META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN);
+
+ /* Data cache partitioning thread 0 to 3 */
+ *pui32BootConf++ = META_CR_SYSC_DCPART(0);
+ *pui32BootConf++ = ui32DCacheT0;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(1);
+ *pui32BootConf++ = ui32DCacheT1;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(2);
+ *pui32BootConf++ = ui32DCacheT2;
+ *pui32BootConf++ = META_CR_SYSC_DCPART(3);
+ *pui32BootConf++ = ui32DCacheT3;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(0), ui32DCacheT0);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(1), ui32DCacheT1);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(2), ui32DCacheT2);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_DCPART(3), ui32DCacheT3);
+
+ /* Enable data cache hits */
+ *pui32BootConf++ = META_CR_MMCU_DCACHE_CTRL;
+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_DCACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ /* Instruction cache partitioning thread 0 to 3 */
+ *pui32BootConf++ = META_CR_SYSC_ICPART(0);
+ *pui32BootConf++ = ui32ICacheT0;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(1);
+ *pui32BootConf++ = ui32ICacheT1;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(2);
+ *pui32BootConf++ = ui32ICacheT2;
+ *pui32BootConf++ = META_CR_SYSC_ICPART(3);
+ *pui32BootConf++ = ui32ICacheT3;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(0), ui32ICacheT0);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(1), ui32ICacheT1);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(2), ui32ICacheT2);
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_SYSC_ICPART(3), ui32ICacheT3);
+
+ /* Enable instruction cache hits */
+ *pui32BootConf++ = META_CR_MMCU_ICACHE_CTRL;
+ *pui32BootConf++ = META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ META_CR_MMCU_ICACHE_CTRL,
+ META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN);
+
+ *pui32BootConf++ = 0x040000C0;
+ *pui32BootConf++ = 0;
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x", 0x040000C0, 0);
+
+ *ppui32BootConf = pui32BootConf;
+}
+
+/*!
+*******************************************************************************
+
+ @Function GetCoreMemCodeSizeInUse
+
+ @Description Process the output of the Meta toolchain in the .LDR format
+ for calculating core mem code size
+
+ @Input ui32MaxCorememSize : Max core mem size on core
+ @Input pbLDR : Pointer to FW blob
+
+ @Return Size of coremem in use
+
+******************************************************************************/
+static IMG_UINT32 GetCoreMemCodeSizeInUse(IMG_UINT32 ui32MaxCorememSize, const IMG_BYTE* pbLDR)
+{
+ RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+ RGX_META_LDR_L1_DATA_BLK *psL1Data =
+ (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+ IMG_UINT32 ui32CorememSizeInUse = 0;
+
+ while (psL1Data != NULL)
+ {
+ if (!RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd) &&
+ (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK) == RGX_META_LDR_CMD_LOADMEM)
+ {
+ RGX_META_LDR_L2_DATA_BLK *psL2Block =
+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+ IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+
+ if (RGX_META_IS_COREMEM_CODE(ui32Offset, ui32MaxCorememSize))
+ {
+ ui32CorememSizeInUse += ui32DataSize;
+ }
+ }
+
+ if (psL1Data->ui32Next == 0xFFFFFFFF)
+ {
+ psL1Data = NULL;
+ }
+ else
+ {
+ psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+ }
+ }
+
+ return ui32CorememSizeInUse;
+}
+
+/*!
+*******************************************************************************
+
+ @Function ProcessLDRCommandStream
+
+ @Description Process the output of the Meta toolchain in the .LDR format
+ copying code and data sections into their final location and
+ passing some information to the Meta bootloader
+
+ @Input hPrivate : Implementation specific data
+ @Input pbLDR : Pointer to FW blob
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+ @Input pvHostFWCorememAddr : Pointer to FW coremem code
+ @Input ppui32BootConf : Pointer to bootloader data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessLDRCommandStream(const void *hPrivate,
+ const IMG_BYTE* pbLDR,
+ void* pvHostFWCodeAddr,
+ void* pvHostFWDataAddr,
+ void* pvHostFWCorememAddr,
+ IMG_UINT32 **ppui32BootConf)
+{
+ RGX_META_LDR_BLOCK_HDR *psHeader = (RGX_META_LDR_BLOCK_HDR *) pbLDR;
+ RGX_META_LDR_L1_DATA_BLK *psL1Data =
+ (RGX_META_LDR_L1_DATA_BLK*) ((IMG_UINT8 *) pbLDR + psHeader->ui32SLData);
+
+ IMG_UINT32 *pui32BootConf = *ppui32BootConf;
+ IMG_UINT32 ui32CorememSize = RGXGetFWCorememSize(hPrivate);
+ IMG_UINT32 ui32CorememCodeStartAddr = 0xFFFFFFFF;
+
+ RGXCommentLog(hPrivate, "**********************************************");
+ RGXCommentLog(hPrivate, "************** Begin LDR Parsing *************");
+ RGXCommentLog(hPrivate, "**********************************************");
+
+ while (psL1Data != NULL)
+ {
+ RGX_FW_SEGMENT_LIST sRGXFWSegList;
+ sRGXFWSegList.psRGXFWCodeSeg = asRGXMetaFWCodeSegments;
+ sRGXFWSegList.psRGXFWDataSeg = asRGXMetaFWDataSegments;
+ sRGXFWSegList.ui32CodeSegCount = RGXFW_META_NUM_CODE_SEGMENTS;
+ sRGXFWSegList.ui32DataSegCount = RGXFW_META_NUM_DATA_SEGMENTS;
+
+ if (RGX_META_LDR_BLK_IS_COMMENT(psL1Data->ui16Cmd))
+ {
+ /* Don't process comment blocks */
+ goto NextBlock;
+ }
+
+ switch (psL1Data->ui16Cmd & RGX_META_LDR_CMD_MASK)
+ {
+ case RGX_META_LDR_CMD_LOADMEM:
+ {
+ RGX_META_LDR_L2_DATA_BLK *psL2Block =
+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[1]);
+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+ IMG_UINT32 ui32DataSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+ void *pvWriteAddr;
+ PVRSRV_ERROR eError;
+
+ if (RGX_META_IS_COREMEM_CODE(ui32Offset, ui32CorememSize))
+ {
+ if (ui32Offset < ui32CorememCodeStartAddr)
+ {
+ if (ui32CorememCodeStartAddr == 0xFFFFFFFF)
+ {
+ /* Take the first coremem code address as the coremem code start address */
+ ui32CorememCodeStartAddr = ui32Offset;
+
+ /* Also check that there is a valid allocation for the coremem code */
+ if (pvHostFWCorememAddr == NULL)
+ {
+ RGXErrorLog(hPrivate,
+ "ProcessLDRCommandStream: Coremem code found"
+ "but no coremem allocation available!");
+
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+ else
+ {
+ /* The coremem addresses should be ordered in the LDR command stream */
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+ /* Copy coremem data to buffer. The FW copies it to the actual coremem */
+ ui32Offset -= ui32CorememCodeStartAddr;
+
+ RGXMemCopy(hPrivate,
+ (void*)((IMG_UINT8 *)pvHostFWCorememAddr + ui32Offset),
+ psL2Block->aui32BlockData,
+ ui32DataSize);
+ }
+ else
+ {
+ /* Global range is aliased to local range */
+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ eError = FindMMUSegment(ui32Offset,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLog(hPrivate,
+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+ ui32Offset, ui32DataSize);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemCopy(hPrivate,
+ pvWriteAddr,
+ psL2Block->aui32BlockData,
+ ui32DataSize);
+ }
+ }
+
+ break;
+ }
+ case RGX_META_LDR_CMD_LOADCORE:
+ case RGX_META_LDR_CMD_LOADMMREG:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ case RGX_META_LDR_CMD_START_THREADS:
+ {
+ /* Don't process this block */
+ break;
+ }
+ case RGX_META_LDR_CMD_ZEROMEM:
+ {
+ IMG_UINT32 ui32Offset = psL1Data->aui32CmdData[0];
+ IMG_UINT32 ui32ByteCount = psL1Data->aui32CmdData[1];
+ void *pvWriteAddr;
+ PVRSRV_ERROR eError;
+
+ if (RGX_META_IS_COREMEM_DATA(ui32Offset, ui32CorememSize))
+ {
+ /* cannot zero coremem directly */
+ break;
+ }
+
+ /* Global range is aliased to local range */
+ ui32Offset &= ~META_MEM_GLOBAL_RANGE_BIT;
+
+ eError = FindMMUSegment(ui32Offset,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLog(hPrivate,
+ "ProcessLDRCommandStream: Addr 0x%x (size: %d) not found in any segment",
+ ui32Offset, ui32ByteCount);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemSet(hPrivate, pvWriteAddr, 0, ui32ByteCount);
+ }
+
+ break;
+ }
+ case RGX_META_LDR_CMD_CONFIG:
+ {
+ RGX_META_LDR_L2_DATA_BLK *psL2Block =
+ (RGX_META_LDR_L2_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->aui32CmdData[0]);
+ RGX_META_LDR_CFG_BLK *psConfigCommand = (RGX_META_LDR_CFG_BLK*) psL2Block->aui32BlockData;
+ IMG_UINT32 ui32L2BlockSize = psL2Block->ui16Length - 6 /* L2 Tag length and checksum */;
+ IMG_UINT32 ui32CurrBlockSize = 0;
+
+ while (ui32L2BlockSize)
+ {
+ switch (psConfigCommand->ui32Type)
+ {
+ case RGX_META_LDR_CFG_PAUSE:
+ case RGX_META_LDR_CFG_READ:
+ {
+ ui32CurrBlockSize = 8;
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ case RGX_META_LDR_CFG_WRITE:
+ {
+ IMG_UINT32 ui32RegisterOffset = psConfigCommand->aui32BlockData[0];
+ IMG_UINT32 ui32RegisterValue = psConfigCommand->aui32BlockData[1];
+
+ /* Only write to bootloader if we got a valid
+ * pointer to the FW code allocation
+ */
+ if (pui32BootConf)
+ {
+ /* Do register write */
+ *pui32BootConf++ = ui32RegisterOffset;
+ *pui32BootConf++ = ui32RegisterValue;
+ }
+
+ RGXCommentLog(hPrivate, "Meta SP: [0x%08x] = 0x%08x",
+ ui32RegisterOffset, ui32RegisterValue);
+
+ ui32CurrBlockSize = 12;
+ break;
+ }
+ case RGX_META_LDR_CFG_MEMSET:
+ case RGX_META_LDR_CFG_MEMCHECK:
+ {
+ ui32CurrBlockSize = 20;
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ default:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+ ui32L2BlockSize -= ui32CurrBlockSize;
+ psConfigCommand = (RGX_META_LDR_CFG_BLK*) (((IMG_UINT8*) psConfigCommand) + ui32CurrBlockSize);
+ }
+
+ break;
+ }
+ default:
+ {
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+NextBlock:
+
+ if (psL1Data->ui32Next == 0xFFFFFFFF)
+ {
+ psL1Data = NULL;
+ }
+ else
+ {
+ psL1Data = (RGX_META_LDR_L1_DATA_BLK*) (((IMG_UINT8 *) pbLDR) + psL1Data->ui32Next);
+ }
+ }
+
+ *ppui32BootConf = pui32BootConf;
+
+ RGXCommentLog(hPrivate, "**********************************************");
+ RGXCommentLog(hPrivate, "************** End Loader Parsing ************");
+ RGXCommentLog(hPrivate, "**********************************************");
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function ProcessELFCommandStream
+
+ @Description Process the output of the Mips toolchain in the .ELF format
+ copying code and data sections into their final location
+
+ @Input hPrivate : Implementation specific data
+ @Input pbELF : Pointer to FW blob
+ @Input pvHostFWCodeAddr : Pointer to FW code
+ @Input pvHostFWDataAddr : Pointer to FW data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR ProcessELFCommandStream(const void *hPrivate,
+ const IMG_BYTE *pbELF,
+ void *pvHostFWCodeAddr,
+ void *pvHostFWDataAddr)
+{
+ IMG_UINT32 ui32Entry;
+ RGX_MIPS_ELF_HDR *psHeader = (RGX_MIPS_ELF_HDR *)pbELF;
+ RGX_MIPS_ELF_PROGRAM_HDR *psProgramHeader =
+ (RGX_MIPS_ELF_PROGRAM_HDR *)(pbELF + psHeader->ui32Ephoff);
+ PVRSRV_ERROR eError;
+
+ for (ui32Entry = 0; ui32Entry < psHeader->ui32Ephnum; ui32Entry++, psProgramHeader++)
+ {
+ void *pvWriteAddr;
+ RGX_FW_SEGMENT_LIST sRGXFWSegList;
+ sRGXFWSegList.psRGXFWCodeSeg = asRGXMipsFWCodeSegments;
+ sRGXFWSegList.psRGXFWDataSeg = asRGXMipsFWDataSegments;
+ sRGXFWSegList.ui32CodeSegCount = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+ sRGXFWSegList.ui32DataSegCount = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+
+ /* Only consider loadable entries in the ELF segment table */
+ if (psProgramHeader->ui32Ptype != ELF_PT_LOAD) continue;
+
+ eError = FindMMUSegment(psProgramHeader->ui32Pvaddr,
+ pvHostFWCodeAddr,
+ pvHostFWDataAddr,
+ &pvWriteAddr,
+ &sRGXFWSegList);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLog(hPrivate,
+ "%s: Addr 0x%x (size: %d) not found in any segment",__func__,
+ psProgramHeader->ui32Pvaddr,
+ psProgramHeader->ui32Pfilesz);
+ return eError;
+ }
+
+ /* Write to FW allocation only if available */
+ if (pvWriteAddr)
+ {
+ RGXMemCopy(hPrivate,
+ pvWriteAddr,
+ (IMG_PBYTE)(pbELF + psProgramHeader->ui32Poffset),
+ psProgramHeader->ui32Pfilesz);
+
+ RGXMemSet(hPrivate,
+ (IMG_PBYTE)pvWriteAddr + psProgramHeader->ui32Pfilesz,
+ 0,
+ psProgramHeader->ui32Pmemsz - psProgramHeader->ui32Pfilesz);
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWCorememAllocSize)
+{
+ IMG_UINT32 i, ui32NumCodeSegments = 0, ui32NumDataSegments = 0;
+ RGX_FW_SEGMENT *pasRGXFWCodeSegments = NULL, *pasRGXFWDataSegments = NULL;
+
+ IMG_BOOL bMIPS = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+
+ if (!bMIPS)
+ {
+ pasRGXFWCodeSegments = asRGXMetaFWCodeSegments;
+ pasRGXFWDataSegments = asRGXMetaFWDataSegments;
+ ui32NumCodeSegments = RGXFW_META_NUM_CODE_SEGMENTS;
+ ui32NumDataSegments = RGXFW_META_NUM_DATA_SEGMENTS;
+ }
+
+ if (bMIPS)
+ {
+ pasRGXFWCodeSegments = asRGXMipsFWCodeSegments;
+ pasRGXFWDataSegments = asRGXMipsFWDataSegments;
+ ui32NumCodeSegments = RGXFW_MIPS_NUM_CODE_SEGMENTS;
+ ui32NumDataSegments = RGXFW_MIPS_NUM_DATA_SEGMENTS;
+ }
+
+ *puiFWCodeAllocSize = 0;
+ *puiFWDataAllocSize = 0;
+ *puiFWCorememAllocSize = 0;
+
+ /* Calculate how much memory the FW needs for its code and data segments */
+
+ for(i = 0; i < ui32NumCodeSegments; i++) {
+ *puiFWCodeAllocSize += ((pasRGXFWCodeSegments + i)->ui32SegAllocSize);
+ }
+
+ for(i = 0; i < ui32NumDataSegments; i++) {
+ *puiFWDataAllocSize += ((pasRGXFWDataSegments + i)->ui32SegAllocSize);
+ }
+
+ *puiFWCorememAllocSize = RGXGetFWCorememSize(hPrivate);
+
+ if (*puiFWCorememAllocSize != 0)
+ {
+ if (pbRGXFirmware != NULL)
+ {
+ /* Calculate actual core mem in use by parsing FW binary blob */
+ *puiFWCorememAllocSize = GetCoreMemCodeSizeInUse(*puiFWCorememAllocSize, pbRGXFirmware);
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA))
+ {
+ /* Align size to DMA block size */
+ *puiFWCorememAllocSize = (*puiFWCorememAllocSize + (RGXFW_DMA_BLOCK_SIZE - 1)) & RGXFW_DMA_BLOCK_ALIGNMENT_MASK;
+ }
+ }
+ else
+ {
+ *puiFWCorememAllocSize = *puiFWCorememAllocSize - RGX_META_COREMEM_DATA_SIZE;
+ }
+ }
+
+ if (bMIPS)
+ {
+ if ((*puiFWCodeAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+ {
+ RGXErrorLog(hPrivate,
+ "%s: The MIPS FW code allocation is not"
+ " a multiple of the page size!", __func__);
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+
+ if ((*puiFWDataAllocSize % RGXMIPSFW_PAGE_SIZE) != 0)
+ {
+ RGXErrorLog(hPrivate,
+ "%s: The MIPS FW data allocation is not"
+ " a multiple of the page size!", __func__);
+ return PVRSRV_ERROR_INIT_FAILURE;
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ void *pvFWCode,
+ void *pvFWData,
+ void *pvFWCorememCode,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID,
+ IMG_UINT32 uiFWCorememCodeSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bMIPS = RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+
+ if (!bMIPS)
+ {
+ IMG_UINT32 *pui32BootConf = NULL;
+ /* Skip bootloader configuration if a pointer to the FW code
+ * allocation is not available
+ */
+ if (pvFWCode)
+ {
+ /* This variable points to the bootloader code which is mostly
+ * a sequence of <register address,register value> pairs
+ */
+ pui32BootConf = ((IMG_UINT32*) pvFWCode) + RGXFW_BOOTLDR_CONF_OFFSET;
+
+ /* Slave port and JTAG accesses are privileged */
+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD;
+ *pui32BootConf++ = META_CR_SYSC_JTAG_THREAD_PRIV_EN;
+
+ RGXFWConfigureSegMMU(hPrivate,
+ psFWCodeDevVAddrBase,
+ psFWDataDevVAddrBase,
+ &pui32BootConf);
+ }
+
+ /* Process FW image data stream */
+ eError = ProcessLDRCommandStream(hPrivate,
+ pbRGXFirmware,
+ pvFWCode,
+ pvFWData,
+ pvFWCorememCode,
+ &pui32BootConf);
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+ return eError;
+ }
+
+ /* Skip bootloader configuration if a pointer to the FW code
+ * allocation is not available
+ */
+ if (pvFWCode)
+ {
+ if ((ui32NumThreads == 0) || (ui32NumThreads > 2) || (ui32MainThreadID >= 2))
+ {
+ RGXErrorLog(hPrivate,
+ "ProcessFWImage: Wrong Meta threads configuration, using one thread only");
+
+ ui32NumThreads = 1;
+ ui32MainThreadID = 0;
+ }
+
+ RGXFWConfigureMetaCaches(hPrivate,
+ ui32NumThreads,
+ ui32MainThreadID,
+ &pui32BootConf);
+
+ /* Signal the end of the conf sequence */
+ *pui32BootConf++ = 0x0;
+ *pui32BootConf++ = 0x0;
+
+ /* The FW main argv arguments start here */
+ *pui32BootConf++ = psRGXFwInit->ui32Addr;
+
+ if (uiFWCorememCodeSize && (psFWCorememFWAddr != NULL))
+ {
+ *pui32BootConf++ = psFWCorememFWAddr->ui32Addr;
+ *pui32BootConf++ = uiFWCorememCodeSize;
+ }
+ else
+ {
+ *pui32BootConf++ = 0;
+ *pui32BootConf++ = 0;
+ }
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, META_DMA))
+ {
+ *pui32BootConf++ = (IMG_UINT32) (psFWCorememDevVAddrBase->uiAddr >> 32);
+ *pui32BootConf++ = (IMG_UINT32) psFWCorememDevVAddrBase->uiAddr;
+ }
+ else
+ {
+ *pui32BootConf++ = 0;
+ *pui32BootConf++ = 0;
+ }
+
+ }
+ }
+
+ if (bMIPS)
+ {
+ /* Process FW image data stream */
+ eError = ProcessELFCommandStream(hPrivate,
+ pbRGXFirmware,
+ pvFWCode,
+ pvFWData);
+ if (eError != PVRSRV_OK)
+ {
+ RGXErrorLog(hPrivate, "RGXProcessFWImage: Processing FW image failed (%d)", eError);
+ return eError;
+ }
+ }
+
+ return eError;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.h b/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.h
new file mode 100644
index 00000000000000..c579b01f13f607
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwimageutils.h
@@ -0,0 +1,122 @@
+/*************************************************************************/ /*!
+@File
+@Title Header for Services Firmware image utilities used at init time
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for Services Firmware image utilities used at init time
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWIMAGEUTILS_H__
+#define __RGXFWIMAGEUTILS_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetFWImageAllocSize
+
+ @Description Return size of Firmware code/data/coremem code allocations
+
+ @Input pbRGXFirmware : Pointer to FW blob
+ @Input puiFWCodeAllocSize : Returned code size
+ @Input puiFWDataAllocSize : Returned data size
+ @Input puiFWCorememCodeAllocSize : Returned coremem code size (0 if N/A)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetFWImageAllocSize(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ IMG_DEVMEM_SIZE_T *puiFWCodeAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWDataAllocSize,
+ IMG_DEVMEM_SIZE_T *puiFWCorememCodeAllocSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXProcessFWImage
+
+ @Description Process the Firmware binary blob copying code and data
+ sections into their final location and passing some
+ information to the Firmware bootloader.
+ If a pointer to the final memory location for FW code or data
+ is not valid (NULL) then the relative section will not be
+ processed.
+
+ @Input hPrivate : Implementation specific data
+ @Input pbRGXFirmware : Pointer to FW blob
+ @Input pvFWCode : Pointer to FW code
+ @Input pvFWData : Pointer to FW data
+ @Input pvFWCorememCode : Pointer to FW coremem code
+ @Input psFWCodeDevVAddrBase : FW code base device virtual address
+ @Input psFWDataDevVAddrBase : FW data base device virtual address
+ @Input psFWCorememDevVAddrBase : FW coremem code base device virtual address
+ @Input psFWCorememFWAddr : FW coremem code allocation 32 bit (FW) address
+ @Input psRGXFwInit : FW init structure 32 bit (FW) address
+ @Input ui32NumThreads : Number of FW threads in use
+ @Input ui32MainThreadID : ID of the FW thread in use
+ (only meaningful if ui32NumThreads == 1)
+ @Input ui32FWCoreMemCodeSize : FW coremem code size in use
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXProcessFWImage(const void *hPrivate,
+ const IMG_BYTE *pbRGXFirmware,
+ void *pvFWCode,
+ void *pvFWData,
+ void *pvFWCorememCode,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememFWAddr,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_UINT32 ui32NumThreads,
+ IMG_UINT32 ui32MainThreadID,
+ IMG_UINT32 ui32FWCoreMemCodeSize);
+
+#endif /* __RGXFWIMAGEUTILS_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwload.c b/drivers/gpu/drm/img-rogue/1.10/rgxfwload.c
new file mode 100644
index 00000000000000..0c6042944e9df9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwload.c
@@ -0,0 +1,317 @@
+/*************************************************************************/ /*!
+@File
+@Title Services firmware load and access routines for Linux
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/firmware.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "device.h"
+#include "module_common.h"
+#include "rgxfwload.h"
+#include "pvr_debug.h"
+#include "srvkm.h"
+
+struct RGXFW
+{
+ const struct firmware sFW;
+};
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) && defined(RGX_FW_SIGNED)
+
+/* The Linux kernel does not support the RSA PSS padding mode. It only
+ * supports the legacy PKCS#1 padding mode.
+ */
+#if defined(RGX_FW_PKCS1_PSS_PADDING)
+#error Linux does not support verification of RSA PSS padded signatures
+#endif
+
+#include <crypto/public_key.h>
+#include <crypto/hash_info.h>
+#include <crypto/hash.h>
+
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "signfw.h"
+
+static bool VerifyFirmware(const struct firmware *psFW)
+{
+ struct FirmwareSignatureHeader *psHeader;
+ struct public_key_signature *psPKS;
+ unsigned char *szKeyID, *pcKeyID;
+ size_t uDigestSize, uDescSize;
+ void *pvSignature, *pvSigner;
+ struct crypto_shash *psTFM;
+ struct shash_desc *psDesc;
+ uint32_t ui32SignatureLen;
+ bool bVerified = false;
+ key_ref_t hKey;
+ uint8_t i;
+ int res;
+
+ if (psFW->size < FW_SIGN_BACKWARDS_OFFSET)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+ __func__, psFW->size));
+ goto err_release_firmware;
+ }
+
+ psHeader = (struct FirmwareSignatureHeader *)
+ (psFW->data + (psFW->size - FW_SIGN_BACKWARDS_OFFSET));
+
+ /* All derived from u8 so can't be exploited to flow out of this page */
+ pvSigner = (u8 *)psHeader + sizeof(struct FirmwareSignatureHeader);
+ pcKeyID = (unsigned char *)((u8 *)pvSigner + psHeader->ui8SignerLen);
+ pvSignature = (u8 *)pcKeyID + psHeader->ui8KeyIDLen;
+
+ /* We cannot update KERNEL_RO in-place, so we must copy the len */
+ ui32SignatureLen = ntohl(psHeader->ui32SignatureLen);
+
+ if (psHeader->ui8Algo >= PKEY_ALGO__LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Public key algorithm %u is not supported",
+ __func__, psHeader->ui8Algo));
+ goto err_release_firmware;
+ }
+
+ if (psHeader->ui8HashAlgo >= PKEY_HASH__LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Hash algorithm %u is not supported",
+ __func__, psHeader->ui8HashAlgo));
+ goto err_release_firmware;
+ }
+
+ if (psHeader->ui8IDType != PKEY_ID_X509)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Only asymmetric X.509 PKI certificates "
+ "are supported", __func__));
+ goto err_release_firmware;
+ }
+
+ /* Generate a hash of the fw data (including the padding) */
+
+ psTFM = crypto_alloc_shash(hash_algo_name[psHeader->ui8HashAlgo], 0, 0);
+ if (IS_ERR(psTFM))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_alloc_shash() failed (%ld)",
+ __func__, PTR_ERR(psTFM)));
+ goto err_release_firmware;
+ }
+
+ uDescSize = crypto_shash_descsize(psTFM) + sizeof(*psDesc);
+ uDigestSize = crypto_shash_digestsize(psTFM);
+
+ psPKS = kzalloc(sizeof(*psPKS) + uDescSize + uDigestSize, GFP_KERNEL);
+ if (!psPKS)
+ goto err_free_crypto_shash;
+
+ psDesc = (struct shash_desc *)((u8 *)psPKS + sizeof(*psPKS));
+ psDesc->tfm = psTFM;
+ psDesc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ psPKS->pkey_algo = psHeader->ui8Algo;
+ psPKS->pkey_hash_algo = psHeader->ui8HashAlgo;
+
+ psPKS->digest = (u8 *)psPKS + sizeof(*psPKS) + uDescSize;
+ psPKS->digest_size = uDigestSize;
+
+ res = crypto_shash_init(psDesc);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_init() failed (%d)",
+ __func__, res));
+ goto err_free_pks;
+ }
+
+ res = crypto_shash_finup(psDesc, psFW->data, psFW->size - FW_SIGN_BACKWARDS_OFFSET,
+ psPKS->digest);
+ if (res < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: crypto_shash_finup() failed (%d)",
+ __func__, res));
+ goto err_free_pks;
+ }
+
+ /* Populate the MPI with the signature payload */
+
+ psPKS->nr_mpi = 1;
+ psPKS->rsa.s = mpi_read_raw_data(pvSignature, ui32SignatureLen);
+ if (!psPKS->rsa.s)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: mpi_read_raw_data() failed", __func__));
+ goto err_free_pks;
+ }
+
+ /* Look up the key we'll use to verify this signature */
+
+ szKeyID = kmalloc(psHeader->ui8SignerLen + 2 +
+ psHeader->ui8KeyIDLen * 2 + 1, GFP_KERNEL);
+ if (!szKeyID)
+ goto err_free_mpi;
+
+ memcpy(szKeyID, pvSigner, psHeader->ui8SignerLen);
+
+ szKeyID[psHeader->ui8SignerLen + 0] = ':';
+ szKeyID[psHeader->ui8SignerLen + 1] = ' ';
+
+ for (i = 0; i < psHeader->ui8KeyIDLen; i++)
+ sprintf(&szKeyID[psHeader->ui8SignerLen + 2 + i * 2],
+ "%02x", pcKeyID[i]);
+
+ szKeyID[psHeader->ui8SignerLen + 2 + psHeader->ui8KeyIDLen * 2] = 0;
+
+ hKey = keyring_search(make_key_ref(system_trusted_keyring, 1),
+ &key_type_asymmetric, szKeyID);
+ if (IS_ERR(hKey))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Request for unknown key '%s' (%ld)",
+ szKeyID, PTR_ERR(hKey)));
+ goto err_free_keyid_string;
+ }
+
+ res = verify_signature(key_ref_to_ptr(hKey), psPKS);
+ if (res)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware digital signature verification "
+ "failed (%d)", __func__, res));
+ goto err_put_key;
+ }
+
+ PVR_LOG(("Digital signature for '%s' verified successfully.",
+ RGX_FW_FILENAME));
+ bVerified = true;
+err_put_key:
+ key_put(key_ref_to_ptr(hKey));
+err_free_keyid_string:
+ kfree(szKeyID);
+err_free_mpi:
+ mpi_free(psPKS->rsa.s);
+err_free_pks:
+ kfree(psPKS);
+err_free_crypto_shash:
+ crypto_free_shash(psTFM);
+err_release_firmware:
+ return bVerified;
+}
+
+#else /* defined(RGX_FW_SIGNED) */
+
+static inline bool VerifyFirmware(const struct firmware *psFW)
+{
+ return true;
+}
+
+#endif /* defined(RGX_FW_SIGNED) */
+
+struct RGXFW *
+RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode, const IMG_CHAR *pszBVNCString, const IMG_CHAR *pszBVpNCString)
+{
+ const struct firmware *psFW;
+ int res;
+
+ if(pszBVNCString != NULL)
+ {
+ res = request_firmware(&psFW, pszBVNCString, psDeviceNode->psDevConfig->pvOSDevice);
+ if (res != 0)
+ {
+ if(pszBVpNCString != NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d), trying '%s'",
+ __func__, pszBVNCString, res, pszBVpNCString));
+ res = request_firmware(&psFW, pszBVpNCString, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ if (res != 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: request_firmware('%s') failed (%d), trying '%s'",
+ __func__, pszBVpNCString, res, RGX_FW_FILENAME));
+ res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ }
+ }
+ else
+ {
+ res = request_firmware(&psFW, RGX_FW_FILENAME, psDeviceNode->psDevConfig->pvOSDevice);
+ }
+ if (res != 0)
+ {
+ PVR_DPF((PVR_DBG_FATAL, "%s: request_firmware('%s') failed (%d)",
+ __func__, RGX_FW_FILENAME, res));
+ return NULL;
+ }
+
+ if (!VerifyFirmware(psFW))
+ {
+ release_firmware(psFW);
+ return NULL;
+ }
+
+ return (struct RGXFW *)psFW;
+}
+
+void
+RGXUnloadFirmware(struct RGXFW *psRGXFW)
+{
+ const struct firmware *psFW = &psRGXFW->sFW;
+
+ release_firmware(psFW);
+}
+
+size_t
+RGXFirmwareSize(struct RGXFW *psRGXFW)
+{
+ const struct firmware *psFW = &psRGXFW->sFW;
+ return psFW->size;
+}
+
+const void *
+RGXFirmwareData(struct RGXFW *psRGXFW)
+{
+ const struct firmware *psFW = &psRGXFW->sFW;
+
+ return psFW->data;
+}
+
+/******************************************************************************
+ End of file (rgxfwload.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwload.h b/drivers/gpu/drm/img-rogue/1.10/rgxfwload.h
new file mode 100644
index 00000000000000..231ef72e6d389c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwload.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@File
+@Title Services RGX OS Interface for loading the firmware
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This file defines the OS interface through which the RGX
+ device initialisation code in the kernel/server will obtain
+ the RGX firmware binary image. The API is used during the
+ initialisation of an RGX device via the PVRSRVDeviceInitialise()
+ call sequence.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXFWLOAD_H__
+#define __RGXFWLOAD_H__
+
+#include "img_defs.h"
+#include "rgxdefs_km.h"
+#include "device_connection.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+/*! Opaque type handle defined and known to the OS layer implementation of this
+ * rgxfwload.h OS API. This private data is allocated in the implementation
+ * of RGXLoadFirmware() and contains whatever data and information needed to
+ * be able to acquire and return the firmware binary image to the Services
+ * kernel/server during initialisation.
+ * It is no longer required and may be freed when RGXUnloadFirmware()
+ * is called.
+ */
+typedef struct RGXFW RGXFW_t;
+
+
+/*************************************************************************/ /*!
+@Function RGXLoadFirmware
+@Description The OS implementation must load or acquire the
+ firmware (FW) image binary needed by the RGX driver stack.
+ A handle to the common layer device node is given
+ to identify which device instance in the system is being
+ initialised. The BVNC string is also supplied so that the
+ implementation knows which FW image to retrieve
+ since each FW image only supports one GPU type/revision.
+ The calling server code supports multiple GPU types and revisions
+ and will detect the specific GPU type and revision before calling
+ this API. It will also have runtime configuration of the VZ mode,
+ hence this API must be able to retrieve different FW binary images
+ based on the pszBVNCString given. The purpose of the end
+ platform/system is key to understand which FW images must be
+ available to the kernel server.
+ On exit the implementation must return a pointer to some private
+ data it uses to hold the FW image information and data. It will
+ be passed onto later API calls by the kernel server code.
+ NULL should be returned if the FW image could not be retrieved.
+ The format of the BVNC string is as follows ([x] denotes
+ optional field):
+ "rgx.fw[.signed].B.V[p].N.C[.vz]"
+ The implementation must first try to load the FW identified
+ by the pszBVpNCString parameter. If this is not available then it
+ should drop back to retrieving the FW identified by the
+ pszBVNCString parameter. The fields in the string are:
+ B, V, N, C are all unsigned integer identifying type/revision,
+ [.signed] is present when RGX_FW_SIGNED=1 is defined in the
+ server build,
+ [p] is present for provisional GPU configurations (pre-silicon),
+ [.vz] is present when the kernel server is loaded on the HOST
+ of a virtualised platform. See the DriverMode server
+ AppHint for details.
+
+@Input psDeviceNode Device instance identifier.
+@Input pszBVNCString Identifier string of the FW image to
+ be loaded/acquired in production driver.
+@Input pszBVpNCString Identifier string of the FW image to
+ be loaded/acquired in production driver.
+@Return RGXFW* Ptr to private data on success, NULL otherwise.
+*/ /**************************************************************************/
+struct RGXFW* RGXLoadFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ const IMG_CHAR *pszBVNCString,
+ const IMG_CHAR *pszBVpNCString);
+
+/*************************************************************************/ /*!
+@Function RGXFirmwareData
+@Description This function returns a pointer to the start of the FW image
+ binary data held in memory. It must remain valid until
+ RGXUnloadFirmware() is called.
+@Input psRGXFW Private data opaque handle
+@Return void* Ptr to FW binary image to start on GPU.
+*/ /**************************************************************************/
+const void* RGXFirmwareData(struct RGXFW *psRGXFW);
+
+/*************************************************************************/ /*!
+@Function RGXFirmwareSize
+@Description This function returns the size of the FW image binary data.
+@Input psRGXFW Private data opaque handle
+@Return size_t Size in bytes of the firmware binary image
+*/ /**************************************************************************/
+size_t RGXFirmwareSize(struct RGXFW *psRGXFW);
+
+/*************************************************************************/ /*!
+@Function RGXUnloadFirmware
+@Description This is called when the server has completed firmware
+ initialisation and no longer needs the private data, possibly
+ allocated by RGXLoadFirmware().
+@Input psRGXFW Private data opaque handle
+*/ /**************************************************************************/
+void RGXUnloadFirmware(struct RGXFW *psRGXFW);
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* __RGXFWLOAD_H__ */
+
+/******************************************************************************
+ End of file (rgxfwload.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.c b/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.c
new file mode 100644
index 00000000000000..01c85908833ba1
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.c
@@ -0,0 +1,6256 @@
+/*************************************************************************/ /*!
+@File
+@Title Rogue firmware utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Rogue firmware utility routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "lists.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "oskm_apphint.h"
+#include "cache_km.h"
+#include "allocmem.h"
+#include "physheap.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "rgxfwutils.h"
+#include "rgx_options.h"
+#include "rgx_fwif.h"
+#include "rgx_fwif_alignchecks.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_pdump_panics.h"
+#include "fwtrace_string.h"
+#include "rgxheapconfig.h"
+#include "pvrsrv.h"
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#include "rgxccb.h"
+#include "rgxcompute.h"
+#include "rgxtransfer.h"
+#include "rgxpower.h"
+#include "rgxtdmtransfer.h"
+#include "rgxray.h"
+#if defined(SUPPORT_DISPLAY_CLASS)
+#include "dc_server.h"
+#endif
+#include "rgxmem.h"
+#include "rgxta3d.h"
+#include "rgxkicksync.h"
+#include "rgxutils.h"
+#include "rgxtimecorr.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "tlstream.h"
+#include "devicemem_server_utils.h"
+#include "htbuffer.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "physmem_lma.h"
+#include "physmem_osmem.h"
+
+#ifdef __linux__
+#include <linux/kernel.h> /* sprintf */
+#include <linux/string.h> /* strncpy, strlen */
+#include "rogue_trace_events.h"
+#else
+#include <stdio.h>
+#include <string.h>
+#endif
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#include "vz_support.h"
+#include "vz_physheap.h"
+#include "rgx_heaps.h"
+
+/* Kernel CCB length */
+/* Reducing the size of the KCCB in an attempt to avoid flooding and overflowing the FW kick queue
+ * in the case of multiple OSes */
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE (6)
+#define RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT (7)
+
+
+/* Firmware CCB length */
+#if defined(SUPPORT_PDVFS)
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (8)
+#else
+#define RGXFWIF_FWCCB_NUMCMDS_LOG2 (5)
+#endif
+
+/* Workload Estimation Firmware CCB length */
+#define RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2 (7)
+
+typedef struct
+{
+ RGXFWIF_KCCB_CMD sKCCBcmd;
+ DLLIST_NODE sListNode;
+ PDUMP_FLAGS_T uiPdumpFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_DM eDM;
+} RGX_DEFERRED_KCCB_CMD;
+
+#if defined(PDUMP)
+/* ensure PIDs are 32-bit because a 32-bit PDump load is generated for the
+ * PID filter example entries
+ */
+static_assert(sizeof(IMG_PID) == sizeof(IMG_UINT32),
+ "FW PID filtering assumes the IMG_PID type is 32-bits wide as it "
+ "generates WRW commands for loading the PID values");
+#endif
+
+static PVRSRV_ERROR _AllocateSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo, RGXFWIF_INIT* psRGXFWInit)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_MEMDESC** ppsSLC3FenceMemDesc = &psDevInfo->psSLC3FenceMemDesc;
+ IMG_UINT32 ui32CacheLineSize = GET_ROGUE_CACHE_LINE_SIZE(
+ RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+
+ PVR_DPF_ENTERED;
+
+ eError = DevmemAllocate(psDevInfo->psFirmwareMainHeap,
+ 1,
+ ui32CacheLineSize,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ "FwSLC3FenceWA",
+ ppsSLC3FenceMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsSLC3FenceMemDesc,
+ psDevInfo->psFirmwareMainHeap,
+ &psRGXFWInit->sSLC3FenceDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ DevmemFwFree(psDevInfo, *ppsSLC3FenceMemDesc);
+ *ppsSLC3FenceMemDesc = NULL;
+ }
+
+ PVR_DPF_RETURN_RC1(eError, *ppsSLC3FenceMemDesc);
+}
+
+static void _FreeSLC3Fence(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+ DEVMEM_MEMDESC* psSLC3FenceMemDesc = psDevInfo->psSLC3FenceMemDesc;
+
+ if (psSLC3FenceMemDesc)
+ {
+ DevmemReleaseDevVirtAddr(psSLC3FenceMemDesc);
+ DevmemFree(psSLC3FenceMemDesc);
+ }
+}
+
+static void __MTSScheduleWrite(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32Value)
+{
+ /* ensure memory is flushed before kicking MTS */
+ OSWriteMemoryBarrier();
+
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_MTS_SCHEDULE, ui32Value);
+
+ /* ensure the MTS kick goes through before continuing */
+ OSMemoryBarrier();
+}
+
+
+/*!
+ *******************************************************************************
+ @Function RGXFWSetupSignatureChecks
+ @Description
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupSignatureChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+ DEVMEM_MEMDESC** ppsSigChecksMemDesc,
+ IMG_UINT32 ui32SigChecksBufSize,
+ RGXFWIF_SIGBUF_CTL* psSigBufCtl,
+ const IMG_CHAR* pszBufferName)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for %s signature checks", pszBufferName);
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32SigChecksBufSize,
+ uiMemAllocFlags,
+ "FwSignatureChecks",
+ ppsSigChecksMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for signature checks (%u)",
+ ui32SigChecksBufSize,
+ eError));
+ return eError;
+ }
+
+ /* Prepare the pointer for the fw to access that memory */
+ RGXSetFirmwareAddress(&psSigBufCtl->sBuffer,
+ *ppsSigChecksMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ DevmemPDumpLoadMem( *ppsSigChecksMemDesc,
+ 0,
+ ui32SigChecksBufSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psSigBufCtl->ui32LeftSizeInRegs = ui32SigChecksBufSize / sizeof(IMG_UINT32);
+
+ return PVRSRV_OK;
+}
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+/*!
+ *******************************************************************************
+ @Function RGXFWSetupCounterBuffer
+ @Description
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupCounterBuffer(PVRSRV_RGXDEV_INFO* psDevInfo,
+ DEVMEM_MEMDESC** ppsBufferMemDesc,
+ IMG_UINT32 ui32CounterDataBufferSize,
+ RGXFWIF_COUNTER_DUMP_CTL* psCounterDumpCtl,
+ const IMG_CHAR* pszBufferName)
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for %s power counter buffer", pszBufferName);
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32CounterDataBufferSize,
+ uiMemAllocFlags,
+ "FwCounterBuffer",
+ ppsBufferMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for counter buffer (%u)",
+ ui32CounterDataBufferSize,
+ eError));
+ return eError;
+ }
+
+ /* Prepare the pointer for the fw to access that memory */
+ RGXSetFirmwareAddress(&psCounterDumpCtl->sBuffer,
+ *ppsBufferMemDesc,
+ 0,
+ RFW_FWADDR_NOREF_FLAG);
+
+ psCounterDumpCtl->ui32SizeInDwords = ui32CounterDataBufferSize >> 2;
+
+ return PVRSRV_OK;
+}
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+/*!
+ *******************************************************************************
+ @Function RGXFWSetupAlignChecks
+ @Description This functions allocates and fills memory needed for the
+ aligns checks of the UM and KM structures shared with the
+ firmware. The format of the data in the memory is as follows:
+ <number of elements in the KM array>
+ <array of KM structures' sizes and members' offsets>
+ <number of elements in the UM array>
+ <array of UM structures' sizes and members' offsets>
+ The UM array is passed from the user side. Now the firmware is
+ is responsible for filling this part of the memory. If that
+ happens the check of the UM structures will be performed
+ by the host driver on client's connect.
+ If the macro is not defined the client driver fills the memory
+ and the firmware checks for the alignment of all structures.
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXFWSetupAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo,
+ RGXFWIF_DEV_VIRTADDR *psAlignChecksDevFW,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength)
+{
+ IMG_UINT32 aui32RGXFWAlignChecksKM[] = { RGXFW_ALIGN_CHECKS_INIT_KM };
+ IMG_UINT32 ui32RGXFWAlingChecksTotal;
+ IMG_UINT32* paui32AlignChecks;
+ PVRSRV_ERROR eError;
+
+ /* In this case we don't know the number of elements in UM array.
+ * We have to assume something so we assume RGXFW_ALIGN_CHECKS_UM_MAX. */
+ PVR_ASSERT(ui32RGXFWAlignChecksArrLength == 0);
+ ui32RGXFWAlingChecksTotal = sizeof(aui32RGXFWAlignChecksKM)
+ + RGXFW_ALIGN_CHECKS_UM_MAX * sizeof(IMG_UINT32)
+ + 2 * sizeof(IMG_UINT32);
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for alignment checks");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32RGXFWAlingChecksTotal,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | PVRSRV_MEMALLOCFLAG_UNCACHED,
+ "FwAlignmentChecks",
+ &psDevInfo->psRGXFWAlignChecksMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for alignment checks (%u)",
+ ui32RGXFWAlingChecksTotal,
+ eError));
+ goto failAlloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+ (void **)&paui32AlignChecks);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel addr for alignment checks (%u)",
+ eError));
+ goto failAqCpuAddr;
+ }
+
+ /* Copy the values */
+ *paui32AlignChecks++ = sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+ OSDeviceMemCopy(paui32AlignChecks, &aui32RGXFWAlignChecksKM[0], sizeof(aui32RGXFWAlignChecksKM));
+ paui32AlignChecks += sizeof(aui32RGXFWAlignChecksKM)/sizeof(IMG_UINT32);
+
+ *paui32AlignChecks = 0;
+
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWAlignChecksMemDesc,
+ 0,
+ ui32RGXFWAlingChecksTotal,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Prepare the pointer for the fw to access that memory */
+ RGXSetFirmwareAddress(psAlignChecksDevFW,
+ psDevInfo->psRGXFWAlignChecksMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ return PVRSRV_OK;
+
+
+
+
+ failAqCpuAddr:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+ psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+ failAlloc:
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void RGXFWFreeAlignChecks(PVRSRV_RGXDEV_INFO* psDevInfo)
+{
+ if (psDevInfo->psRGXFWAlignChecksMemDesc != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWAlignChecksMemDesc);
+ psDevInfo->psRGXFWAlignChecksMemDesc = NULL;
+ }
+}
+#endif
+
+static void
+RGXVzDevMemFreeGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+ if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+ {
+ /* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+ PVR_DPF((PVR_DBG_ERROR,
+ "Deallocating guest fw heap with invalid OSID:%u, MAX:%u",
+ ui32OSID, RGXFW_NUM_OS - 1));
+ return;
+ }
+
+ if (psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID])
+ {
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+ DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+ DevmemFree(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+ psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID] = NULL;
+ }
+
+ if (psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID])
+ {
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+ DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+ DevmemFree(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+ psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID] = NULL;
+ }
+
+ if (psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID])
+ {
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+ DevmemReleaseDevVirtAddr(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+ DevmemFree(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+ psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID] = NULL;
+ }
+}
+
+static PVRSRV_ERROR
+RGXVzDevMemAllocateGuestFwHeap(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT32 ui32OSID)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR szHeapName[32];
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32CacheLineSize =
+ GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS));
+ IMG_UINT32 ui32FwHeapAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL |
+ PVRSRV_MEMALLOCFLAG_FW_GUEST;
+
+ /*
+ * This is called by the host driver only, it pre-allocates and maps
+ * into the firmware kernel memory context all guest firmware physheaps
+ * so we fail the call if an invalid OSID (i.e. either host OSID or
+ * OSID outside range) is supplied (i.e. as this would have been due
+ * to an internal error).
+ */
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_ERROR_INTERNAL_ERROR);
+ if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+ {
+ /* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+ PVR_DPF((PVR_DBG_ERROR,
+ "Allocating guest fw heap with invalid OSID:%u, MAX:%u",
+ ui32OSID, RGXFW_NUM_OS - 1));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail;
+ }
+
+ PDUMPCOMMENT("Mapping firmware physheaps for OSID: [%d]", ui32OSID);
+
+ SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eHeapOrigin);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ /* Target OSID physheap for allocation */
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+ OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareConfig%d", ui32OSID);
+ /* This allocates the memory for guest Fw Config heap */
+ eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ RGX_FIRMWARE_CONFIG_HEAP_SIZE,
+ ui32CacheLineSize,
+ ui32FwHeapAllocFlags | PVRSRV_MEMALLOCFLAG_FW_CONFIG,
+ szHeapName,
+ &psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemAllocate() failed for Firmware Config heap (%u)", eError));
+ goto fail;
+ }
+
+ /* If allocation is successful, permanently map this into device */
+ eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareConfigMemDesc[ui32OSID],
+ psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemMapToDevice() failed for Firmware Config heap (%u)", eError));
+ goto fail;
+ }
+
+ /* Target OSID physheap for allocation */
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+ OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareMain%d", ui32OSID);
+ /* This allocates the memory for guest Fw Main heap */
+ eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ RGXGetFwMainHeapSize(psDevInfo),
+ ui32CacheLineSize,
+ ui32FwHeapAllocFlags,
+ szHeapName,
+ &psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemAllocate() failed for Firmware Main heap (%u)", eError));
+ goto fail;
+ }
+
+ /* If allocation is successful, permanently map this into device */
+ eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareMainMemDesc[ui32OSID],
+ psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemMapToDevice() failed for Firmware Main heap (%u)", eError));
+ goto fail;
+ }
+ }
+ else
+ {
+ /* Target OSID physheap for allocation */
+ psDeviceNode->uiKernelFwRAIdx = ui32OSID;
+
+ OSSNPrintf(szHeapName, sizeof(szHeapName), "GuestFirmwareRaw%d", ui32OSID);
+ /* This allocates the memory for guest Fw Raw heap */
+ eError = DevmemAllocate(psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ ui32CacheLineSize,
+ ui32FwHeapAllocFlags,
+ szHeapName,
+ &psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemAllocate() failed for Firmware Raw heap (%u)", eError));
+ goto fail;
+ }
+
+ /* If allocation is successful, permanently map this into device */
+ eError = DevmemMapToDevice(psDevInfo->psGuestFirmwareRawMemDesc[ui32OSID],
+ psDevInfo->psGuestFirmwareRawHeap[ui32OSID],
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemMapToDevice() failed for Firmware Raw heap (%u)", eError));
+ goto fail;
+ }
+ }
+
+ return eError;
+
+fail:
+ RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXVzSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+ eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ IMG_UINT32 ui32OSID;
+ /* Guest OSID(s) in range [1 up to (RGXFW_NUM_OS-1)] */
+ for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+ {
+ eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+#endif
+
+ return eError;
+}
+
+static void
+RGXVzFreeFirmware(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ PVRSRV_VZ_RETN_IF_NOT_MODE(DRIVER_MODE_HOST);
+
+ eError = SysVzGetPhysHeapOrigin(psDeviceNode->psDevConfig, eHeapType, &eHeapOrigin);
+ if (eError != PVRSRV_OK)
+ {
+ return;
+ }
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ IMG_UINT32 ui32OSID;
+ /* Guest OSID(s) in range [1 up to (RGXFW_NUM_OS-1)] */
+ for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+ {
+ RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+ }
+ }
+#endif
+}
+
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest,
+ DEVMEM_MEMDESC *psSrc,
+ IMG_UINT32 uiExtraOffset,
+ IMG_UINT32 ui32Flags)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR psDevVirtAddr;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ psDeviceNode = (PVRSRV_DEVICE_NODE *) DevmemGetConnection(psSrc);
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ IMG_UINT32 ui32Offset;
+ IMG_BOOL bCachedInMETA;
+ DEVMEM_FLAGS_T uiDevFlags;
+ IMG_UINT32 uiGPUCacheMode;
+
+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* Convert to an address in META memmap */
+ ui32Offset = psDevVirtAddr.uiAddr + uiExtraOffset - RGX_FIRMWARE_RAW_HEAP_BASE;
+
+ /* Check in the devmem flags whether this memory is cached/uncached */
+ DevmemGetFlags(psSrc, &uiDevFlags);
+
+ /* Honour the META cache flags */
+ bCachedInMETA = (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) & uiDevFlags) != 0;
+
+ /* Honour the SLC cache flags */
+ eError = DevmemDeviceCacheMode(psDeviceNode, uiDevFlags, &uiGPUCacheMode);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ ui32Offset += RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+ if (bCachedInMETA)
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_META_CACHED;
+ }
+ else
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+ }
+
+ if (PVRSRV_CHECK_GPU_CACHED(uiGPUCacheMode))
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_CACHED;
+ }
+ else
+ {
+ ui32Offset |= RGXFW_SEGMMU_DATA_VIVT_SLC_UNCACHED;
+ }
+ ppDest->ui32Addr = ui32Offset;
+ }else
+ {
+ eError = DevmemAcquireDevVirtAddr(psSrc, &psDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ ppDest->ui32Addr = (IMG_UINT32)((psDevVirtAddr.uiAddr + uiExtraOffset) & 0xFFFFFFFF);
+ }
+
+ if (ui32Flags & RFW_FWADDR_NOREF_FLAG)
+ {
+ DevmemReleaseDevVirtAddr(psSrc);
+ }
+}
+
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest,
+ DEVMEM_MEMDESC *psSrcMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr,
+ IMG_UINT32 uiOffset)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEV_VIRTADDR sDevVirtAddr;
+
+ eError = DevmemAcquireDevVirtAddr(psSrcMemDesc, &sDevVirtAddr);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psDest->psDevVirtAddr.uiAddr = sDevVirtAddr.uiAddr;
+ psDest->psDevVirtAddr.uiAddr += uiOffset;
+ psDest->pbyFWAddr.ui32Addr = psSrcFWDevVAddr->ui32Addr;
+
+ DevmemReleaseDevVirtAddr(psSrcMemDesc);
+}
+
+
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc)
+{
+ DevmemReleaseDevVirtAddr(psSrc);
+}
+
+struct _RGX_SERVER_COMMON_CONTEXT_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ DEVMEM_MEMDESC *psFWCommonContextMemDesc;
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContextFWAddr;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_CLIENT_CCB *psClientCCB;
+ DEVMEM_MEMDESC *psClientCCBMemDesc;
+ DEVMEM_MEMDESC *psClientCCBCtrlMemDesc;
+ IMG_BOOL bCommonContextMemProvided;
+ IMG_UINT32 ui32ContextID;
+ DLLIST_NODE sListNode;
+ RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+ IMG_UINT32 ui32LastResetJobRef;
+};
+
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGXFWIF_DM eDM,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ DEVMEM_MEMDESC *psContextStateMemDesc,
+ IMG_UINT32 ui32CCBAllocSize,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ RGXFWIF_FWCOMMONCONTEXT *psFWCommonContext;
+ IMG_UINT32 ui32FWCommonContextOffset;
+ IMG_UINT8 *pui8Ptr;
+ PVRSRV_ERROR eError;
+
+ /*
+ * Allocate all the resources that are required
+ */
+ psServerCommonContext = OSAllocMem(sizeof(*psServerCommonContext));
+ if (psServerCommonContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psServerCommonContext->psDevInfo = psDevInfo;
+
+ if (psAllocatedMemDesc)
+ {
+ PDUMPCOMMENT("Using existing MemDesc for Rogue firmware %s context (offset = %d)",
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ ui32AllocatedOffset);
+ ui32FWCommonContextOffset = ui32AllocatedOffset;
+ psServerCommonContext->psFWCommonContextMemDesc = psAllocatedMemDesc;
+ psServerCommonContext->bCommonContextMemProvided = IMG_TRUE;
+ }
+ else
+ {
+ /* Allocate device memory for the firmware context */
+ PDUMPCOMMENT("Allocate Rogue firmware %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWCommonContext),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwContext",
+ &psServerCommonContext->psFWCommonContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate firmware %s context (%s)",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_contextalloc;
+ }
+ ui32FWCommonContextOffset = 0;
+ psServerCommonContext->bCommonContextMemProvided = IMG_FALSE;
+ }
+
+ /* Record this context so we can refer to it if the FW needs to tell us it was reset. */
+ psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+ psServerCommonContext->ui32LastResetJobRef = 0;
+ psServerCommonContext->ui32ContextID = psDevInfo->ui32CommonCtxtCurrentID++;
+
+ /*
+ * Temporarily map the firmware context to the kernel and init it
+ */
+ eError = DevmemAcquireCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc,
+ (void **)&pui8Ptr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s context (%s)to CPU",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_cpuvirtacquire;
+ }
+
+ /* Allocate the client CCB */
+ eError = RGXCreateCCB(psDevInfo,
+ ui32CCBAllocSize,
+ psConnection,
+ eRGXCCBRequestor,
+ psServerCommonContext,
+ &psServerCommonContext->psClientCCB,
+ &psServerCommonContext->psClientCCBMemDesc,
+ &psServerCommonContext->psClientCCBCtrlMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for %s context(%s)",
+ __FUNCTION__,
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_allocateccb;
+ }
+
+ psFWCommonContext = (RGXFWIF_FWCOMMONCONTEXT *) (pui8Ptr + ui32FWCommonContextOffset);
+ psFWCommonContext->eDM = eDM;
+
+ /* Set the firmware CCB device addresses in the firmware common context */
+ RGXSetFirmwareAddress(&psFWCommonContext->psCCB,
+ psServerCommonContext->psClientCCBMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ RGXSetFirmwareAddress(&psFWCommonContext->psCCBCtl,
+ psServerCommonContext->psClientCCBCtrlMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+ {
+ RGXSetMetaDMAAddress(&psFWCommonContext->sCCBMetaDMAAddr,
+ psServerCommonContext->psClientCCBMemDesc,
+ &psFWCommonContext->psCCB,
+ 0);
+ }
+
+ /* Set the memory context device address */
+ psServerCommonContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+ RGXSetFirmwareAddress(&psFWCommonContext->psFWMemContext,
+ psFWMemContextMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Set the framework register updates address */
+ psServerCommonContext->psFWFrameworkMemDesc = psInfo->psFWFrameworkMemDesc;
+ if (psInfo->psFWFrameworkMemDesc != NULL)
+ {
+ RGXSetFirmwareAddress(&psFWCommonContext->psRFCmd,
+ psInfo->psFWFrameworkMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ }
+ else
+ {
+ /* This should never be touched in this contexts without a framework
+ * memdesc, but ensure it is zero so we see crashes if it is.
+ */
+ psFWCommonContext->psRFCmd.ui32Addr = 0;
+ }
+
+ psFWCommonContext->ui32Priority = ui32Priority;
+ psFWCommonContext->ui32PrioritySeqNum = 0;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, CDM_CONTROL_STREAM_FORMAT) &&
+ (RGX_GET_FEATURE_VALUE(psDevInfo, CDM_CONTROL_STREAM_FORMAT) == 2) && \
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING)))
+ {
+ if (eDM == RGXFWIF_DM_CDM)
+ {
+ if (psInfo->psResumeSignalAddr != NULL)
+ {
+ psFWCommonContext->ui64ResumeSignalAddr = psInfo->psResumeSignalAddr->uiAddr;
+ }
+ }
+ }
+
+ /* Store a references to Server Common Context and PID for notifications back from the FW. */
+ psFWCommonContext->ui32ServerCommonContextID = psServerCommonContext->ui32ContextID;
+ psFWCommonContext->ui32PID = OSGetCurrentClientProcessIDKM();
+
+ /* Set the firmware GPU context state buffer */
+ psServerCommonContext->psContextStateMemDesc = psContextStateMemDesc;
+ if (psContextStateMemDesc)
+ {
+ RGXSetFirmwareAddress(&psFWCommonContext->psContextState,
+ psContextStateMemDesc,
+ 0,
+ RFW_FWADDR_FLAG_NONE);
+ }
+
+ /*
+ * Dump the created context
+ */
+ PDUMPCOMMENT("Dump %s context", aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT]);
+ DevmemPDumpLoadMem(psServerCommonContext->psFWCommonContextMemDesc,
+ ui32FWCommonContextOffset,
+ sizeof(*psFWCommonContext),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* We've finished the setup so release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+
+ /* Map this allocation into the FW */
+ RGXSetFirmwareAddress(&psServerCommonContext->sFWCommonContextFWAddr,
+ psServerCommonContext->psFWCommonContextMemDesc,
+ ui32FWCommonContextOffset,
+ RFW_FWADDR_FLAG_NONE);
+
+#if defined(LINUX)
+ {
+ IMG_UINT32 ui32FWAddr;
+ switch (eDM) {
+ case RGXFWIF_DM_TA:
+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, sTAContext));
+ break;
+ case RGXFWIF_DM_3D:
+ ui32FWAddr = (IMG_UINT32) ((uintptr_t) IMG_CONTAINER_OF((void *) ((uintptr_t)
+ psServerCommonContext->sFWCommonContextFWAddr.ui32Addr), RGXFWIF_FWRENDERCONTEXT, s3DContext));
+ break;
+ default:
+ ui32FWAddr = psServerCommonContext->sFWCommonContextFWAddr.ui32Addr;
+ break;
+ }
+
+ trace_rogue_create_fw_context(OSGetCurrentClientProcessNameKM(),
+ aszCCBRequestors[eRGXCCBRequestor][REQ_PDUMP_COMMENT],
+ ui32FWAddr);
+ }
+#endif
+ /*Add the node to the list when finalised */
+ OSWRLockAcquireWrite(psDevInfo->hCommonCtxtListLock);
+ dllist_add_to_tail(&(psDevInfo->sCommonCtxtListHead), &(psServerCommonContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hCommonCtxtListLock);
+
+ *ppsServerCommonContext = psServerCommonContext;
+ return PVRSRV_OK;
+
+ fail_allocateccb:
+ DevmemReleaseCpuVirtAddr(psServerCommonContext->psFWCommonContextMemDesc);
+ fail_cpuvirtacquire:
+ if (!psServerCommonContext->bCommonContextMemProvided)
+ {
+ DevmemFwFree(psDevInfo, psServerCommonContext->psFWCommonContextMemDesc);
+ psServerCommonContext->psFWCommonContextMemDesc = NULL;
+ }
+ fail_contextalloc:
+ OSFreeMem(psServerCommonContext);
+ fail_alloc:
+ return eError;
+}
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+
+ OSWRLockAcquireWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+ /* Remove the context from the list of all contexts. */
+ dllist_remove_node(&psServerCommonContext->sListNode);
+ OSWRLockReleaseWrite(psServerCommonContext->psDevInfo->hCommonCtxtListLock);
+
+ /*
+ Unmap the context itself and then all its resources
+ */
+
+ /* Unmap the FW common context */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWCommonContextMemDesc);
+ /* Umap context state buffer (if there was one) */
+ if (psServerCommonContext->psContextStateMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psServerCommonContext->psContextStateMemDesc);
+ }
+ /* Unmap the framework buffer */
+ if (psServerCommonContext->psFWFrameworkMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWFrameworkMemDesc);
+ }
+ /* Unmap client CCB and CCB control */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBCtrlMemDesc);
+ RGXUnsetFirmwareAddress(psServerCommonContext->psClientCCBMemDesc);
+ /* Unmap the memory context */
+ RGXUnsetFirmwareAddress(psServerCommonContext->psFWMemContextMemDesc);
+
+ /* Destroy the client CCB */
+ RGXDestroyCCB(psServerCommonContext->psDevInfo, psServerCommonContext->psClientCCB);
+
+
+ /* Free the FW common context (if there was one) */
+ if (!psServerCommonContext->bCommonContextMemProvided)
+ {
+ DevmemFwFree(psServerCommonContext->psDevInfo,
+ psServerCommonContext->psFWCommonContextMemDesc);
+ psServerCommonContext->psFWCommonContextMemDesc = NULL;
+ }
+ /* Free the hosts representation of the common context */
+ OSFreeMem(psServerCommonContext);
+}
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+ return psServerCommonContext->sFWCommonContextFWAddr;
+}
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+ return psServerCommonContext->psClientCCB;
+}
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ RGXFWIF_CONTEXT_RESET_REASON eLastResetReason;
+
+ PVR_ASSERT(psServerCommonContext != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ /* Take the most recent reason & job ref and reset for next time... */
+ eLastResetReason = psServerCommonContext->eLastResetReason;
+ *pui32LastResetJobRef = psServerCommonContext->ui32LastResetJobRef;
+ psServerCommonContext->eLastResetReason = RGXFWIF_CONTEXT_RESET_REASON_NONE;
+ psServerCommonContext->ui32LastResetJobRef = 0;
+
+ if (eLastResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"A Hard Context Switch was triggered on the GPU to ensure Quality of Service."));
+ }
+
+ return eLastResetReason;
+}
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext)
+{
+ return psServerCommonContext->psDevInfo;
+}
+
+/*!
+ *******************************************************************************
+ @Function RGXFreeCCB
+ @Description Free the kernel CCB
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+ ******************************************************************************/
+static void RGXFreeCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_CCB_CTL **ppsCCBCtl,
+ DEVMEM_MEMDESC **ppsCCBCtlMemDesc,
+ IMG_UINT8 **ppui8CCB,
+ DEVMEM_MEMDESC **ppsCCBMemDesc)
+{
+ if (*ppsCCBMemDesc != NULL)
+ {
+ if (*ppui8CCB != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(*ppsCCBMemDesc);
+ *ppui8CCB = NULL;
+ }
+ DevmemFwFree(psDevInfo, *ppsCCBMemDesc);
+ *ppsCCBMemDesc = NULL;
+ }
+ if (*ppsCCBCtlMemDesc != NULL)
+ {
+ if (*ppsCCBCtl != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(*ppsCCBCtlMemDesc);
+ *ppsCCBCtl = NULL;
+ }
+ DevmemFwFree(psDevInfo, *ppsCCBCtlMemDesc);
+ *ppsCCBCtlMemDesc = NULL;
+ }
+}
+
+/*!
+ *******************************************************************************
+ @Function RGXSetupCCB
+ @Description Allocate and initialise the kernel CCB
+ @Input psDevInfo
+ @Input ppsCCBCtl
+ @Input ppsCCBCtlMemDesc
+ @Input ppui8CCB
+ @Input ppsCCBMemDesc
+ @Input psCCBCtlFWAddr
+ @Input ui32NumCmdsLog2
+ @Input ui32CmdSize
+ @Input pszName
+
+ @Return PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXSetupCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_CCB_CTL **ppsCCBCtl,
+ DEVMEM_MEMDESC **ppsCCBCtlMemDesc,
+ IMG_UINT8 **ppui8CCB,
+ DEVMEM_MEMDESC **ppsCCBMemDesc,
+ PRGXFWIF_CCB_CTL *psCCBCtlFWAddr,
+ PRGXFWIF_CCB *psCCBFWAddr,
+ IMG_UINT32 ui32NumCmdsLog2,
+ IMG_UINT32 ui32CmdSize,
+ DEVMEM_FLAGS_T uiCCBMemAllocFlags,
+ const IMG_CHAR *pszName)
+{
+ const IMG_UINT32 ui32MaxInputStrSize = 13;
+ const IMG_UINT32 ui32AppendStrSize = 7;
+ const IMG_UINT32 ui32MaxTotalStrSize = ui32MaxInputStrSize + ui32AppendStrSize +1;
+ const IMG_CHAR sAppend[] = "Control";
+ PVRSRV_ERROR eError;
+ RGXFWIF_CCB_CTL *psCCBCtl;
+ DEVMEM_FLAGS_T uiCCBCtlMemAllocFlags;
+ IMG_UINT32 ui32CCBSize = (1U << ui32NumCmdsLog2);
+ IMG_CHAR sCCBCtlName[ui32MaxTotalStrSize];
+
+ PVR_ASSERT(strlen(sAppend) == ui32AppendStrSize);
+ PVR_ASSERT(strlen(pszName) <= ui32MaxInputStrSize);
+
+ uiCCBCtlMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Append "Control" to the name for the control struct. */
+ strcpy(sCCBCtlName, pszName);
+ strncat(sCCBCtlName, sAppend, ui32AppendStrSize);
+
+ /* Allocate memory for the CCB control.*/
+ PDUMPCOMMENT("Allocate memory for %s", sCCBCtlName);
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_CCB_CTL),
+ uiCCBCtlMemAllocFlags,
+ sCCBCtlName,
+ ppsCCBCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %s (%u)", __func__, sCCBCtlName, eError));
+ goto fail;
+ }
+
+ /*
+ * Allocate memory for the CCB.
+ * (this will reference further command data in non-shared CCBs)
+ */
+ PDUMPCOMMENT("Allocate memory for %s", pszName);
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32CCBSize * ui32CmdSize,
+ uiCCBMemAllocFlags,
+ pszName,
+ ppsCCBMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %s (%u)", __func__, pszName, eError));
+ goto fail;
+ }
+
+ /*
+ Map the CCB control to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(*ppsCCBCtlMemDesc,
+ (void **)ppsCCBCtl);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire cpu %s (%u)", __func__, sCCBCtlName, eError));
+ goto fail;
+ }
+
+ /*
+ * Map the CCB to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(*ppsCCBMemDesc,
+ (void **)ppui8CCB);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire cpu %s (%u)", __func__, pszName, eError));
+ goto fail;
+ }
+
+ /*
+ * Initialise the CCB control.
+ */
+ psCCBCtl = *ppsCCBCtl;
+ psCCBCtl->ui32WriteOffset = 0;
+ psCCBCtl->ui32ReadOffset = 0;
+ psCCBCtl->ui32WrapMask = ui32CCBSize - 1;
+ psCCBCtl->ui32CmdSize = ui32CmdSize;
+
+ /*
+ * Set-up RGXFWIfCtl pointers to access the kCCB
+ */
+ RGXSetFirmwareAddress(psCCBCtlFWAddr,
+ *ppsCCBCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ RGXSetFirmwareAddress(psCCBFWAddr,
+ *ppsCCBMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ /*
+ * Pdump the CCB control.
+ */
+ PDUMPCOMMENT("Initialise %s", sCCBCtlName);
+ DevmemPDumpLoadMem(*ppsCCBCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCB_CTL),
+ 0);
+
+ return PVRSRV_OK;
+
+ fail:
+ RGXFreeCCB(psDevInfo,
+ ppsCCBCtl,
+ ppsCCBCtlMemDesc,
+ ppui8CCB,
+ ppsCCBMemDesc);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static void RGXSetupFaultReadRegisterRollback(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PMR *psPMR;
+
+ if (psDevInfo->psRGXFaultAddressMemDesc)
+ {
+ if (DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR) == PVRSRV_OK)
+ {
+ PMRUnlockSysPhysAddresses(psPMR);
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+ }
+}
+
+static PVRSRV_ERROR RGXSetupFaultReadRegister(PVRSRV_DEVICE_NODE *psDeviceNode, RGXFWIF_INIT *psRGXFWInit)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 *pui32MemoryVirtAddr;
+ IMG_UINT32 i;
+ size_t ui32PageSize = OSGetPageSize();
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PMR *psPMR;
+
+ /* Allocate page of memory to use for page faults on non-blocking memory transactions */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED;
+
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32PageSize,
+ ui32PageSize,
+ uiMemAllocFlags,
+ "FwExFaultAddress",
+ &psDevInfo->psRGXFaultAddressMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate mem for fault address (%u)",
+ eError));
+ goto failFaultAddressDescAlloc;
+ }
+
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc,
+ (void **)&pui32MemoryVirtAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire mem for fault address (%u)",
+ eError));
+ goto failFaultAddressDescAqCpuVirt;
+ }
+
+ for (i = 0; i < ui32PageSize/sizeof(IMG_UINT32); i++)
+ {
+ *(pui32MemoryVirtAddr + i) = 0xDEADBEEF;
+ }
+
+ eError = DevmemServerGetImportHandle(psDevInfo->psRGXFaultAddressMemDesc,(void **)&psPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting PMR for fault address (%u)",
+ eError));
+
+ goto failFaultAddressDescGetPMR;
+ }
+ else
+ {
+ IMG_BOOL bValid;
+ IMG_UINT32 ui32Log2PageSize = OSGetPageShift();
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error locking physical address for fault address MemDesc (%u)",
+ eError));
+
+ goto failFaultAddressDescLockPhys;
+ }
+
+ eError = PMR_DevPhysAddr(psPMR,ui32Log2PageSize,1,0,&(psRGXFWInit->sFaultPhysAddr),&bValid);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Error getting physical address for fault address MemDesc (%u)",
+ eError));
+
+ goto failFaultAddressDescGetPhys;
+ }
+
+ if (!bValid)
+ {
+ psRGXFWInit->sFaultPhysAddr.uiAddr = 0;
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed getting physical address for fault address MemDesc - invalid page (0x%" IMG_UINT64_FMTSPECX ")",
+ psRGXFWInit->sFaultPhysAddr.uiAddr));
+
+ goto failFaultAddressDescGetPhys;
+ }
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+ return PVRSRV_OK;
+
+ failFaultAddressDescGetPhys:
+ PMRUnlockSysPhysAddresses(psPMR);
+
+ failFaultAddressDescLockPhys:
+
+ failFaultAddressDescGetPMR:
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFaultAddressMemDesc);
+
+ failFaultAddressDescAqCpuVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFaultAddressMemDesc);
+ psDevInfo->psRGXFaultAddressMemDesc = NULL;
+
+ failFaultAddressDescAlloc:
+
+ return eError;
+}
+
+#if defined(PDUMP)
+/* Replace the DevPhy address with the one Pdump allocates at pdump_player run time */
+static PVRSRV_ERROR RGXPDumpFaultReadRegister(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ PMR *psFWInitPMR, *psFaultAddrPMR;
+ IMG_UINT32 ui32Dstoffset;
+
+ psFWInitPMR = (PMR *)(psDevInfo->psRGXFWIfInitMemDesc->psImport->hPMR);
+ ui32Dstoffset = psDevInfo->psRGXFWIfInitMemDesc->uiOffset + offsetof(RGXFWIF_INIT, sFaultPhysAddr.uiAddr);
+
+ psFaultAddrPMR = (PMR *)(psDevInfo->psRGXFaultAddressMemDesc->psImport->hPMR);
+
+ eError = PDumpMemLabelToMem64(psFaultAddrPMR,
+ psFWInitPMR,
+ 0,
+ ui32Dstoffset,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpFaultReadRegister: Dump of Fault Page Phys address failed(%u)", eError));
+ }
+ return eError;
+}
+#endif
+
+/*************************************************************************/ /*!
+@Function RGXTBIBufferIsInitRequired
+
+@Description Returns true if the firmware tbi buffer is not allocated and
+ might be required by the firmware soon. TBI buffer allocated
+ on-demand to reduce RAM footprint on systems not needing
+ tbi.
+
+@Input psDevInfo RGX device info
+
+@Return IMG_BOOL Whether on-demand allocation(s) is/are needed
+ or not
+ */ /**************************************************************************/
+INLINE IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* The firmware expects a tbi buffer only when:
+ * - Logtype is "tbi"
+ */
+ if ((psDevInfo->psRGXFWIfTBIBufferMemDesc == NULL)
+ && (psTraceBufCtl->ui32LogType & ~RGXFWIF_LOG_TYPE_TRACE)
+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK))
+ {
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTBIBufferDeinit
+
+@Description Deinitialises all the allocations and references that are made
+ for the FW tbi buffer
+
+@Input ppsDevInfo RGX device info
+@Return void
+ */ /**************************************************************************/
+static void RGXTBIBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTBIBufferMemDesc);
+ psDevInfo->psRGXFWIfTBIBufferMemDesc = NULL;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTBIBufferInitOnDemandResources
+
+@Description Allocates the firmware TBI buffer required for reading SFs
+ strings and initialize it with SFs.
+
+@Input psDevInfo RGX device info
+
+@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32FWTBIBufsize = sizeof(SFs);
+ IMG_PUINT32 pui32TBIBuffer = NULL;
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw tbi buffer");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32FWTBIBufsize,
+ uiMemAllocFlags,
+ "FwTBIBuffer",
+ &psDevInfo->psRGXFWIfTBIBufferMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %u bytes for fw TBI buffer (Error code:%u)",
+ __FUNCTION__,
+ ui32FWTBIBufsize,
+ eError));
+ goto fail;
+ }
+
+ /* Firmware address should not be already set */
+ if (psDevInfo->sRGXFWIfTBIBuffer.ui32Addr)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: FW address for FWTBI is already set. Resetting it with newly allocated one", __FUNCTION__));
+ }
+
+ /* for the FW to use this address when reading strings from tbi buffer */
+ RGXSetFirmwareAddress(&psDevInfo->sRGXFWIfTBIBuffer,
+ psDevInfo->psRGXFWIfTBIBufferMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ /* Set an address for the host to be able to write SFs strings in buffer */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc,
+ (void **)&pui32TBIBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tbibuf ctl (Error code: %u)",
+ __FUNCTION__, eError));
+ goto fail;
+ }
+
+ /* Write SFs data in buffer */
+ OSDeviceMemCopy(pui32TBIBuffer, SFs, ui32FWTBIBufsize);
+
+ /* release CPU mapping */
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTBIBufferMemDesc);
+
+ return PVRSRV_OK;
+ fail:
+ RGXTBIBufferDeinit(psDevInfo);
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTraceBufferIsInitRequired
+
+@Description Returns true if the firmware trace buffer is not allocated and
+ might be required by the firmware soon. Trace buffer allocated
+ on-demand to reduce RAM footprint on systems not needing
+ firmware trace.
+
+@Input psDevInfo RGX device info
+
+@Return IMG_BOOL Whether on-demand allocation(s) is/are needed
+ or not
+ */ /**************************************************************************/
+INLINE IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* The firmware expects a trace buffer only when:
+ * - Logtype is "trace" AND
+ * - at least one LogGroup is configured
+ * - the Driver Mode is not Guest
+ */
+ if ((psDevInfo->psRGXFWIfTraceBufferMemDesc[0] == NULL)
+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_TRACE)
+ && (psTraceBufCtl->ui32LogType & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ && !PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ return IMG_TRUE;
+ }
+
+ return IMG_FALSE;
+}
+
+/*************************************************************************/ /*!
+@Function RGXTraceBufferDeinit
+
+@Description Deinitialises all the allocations and references that are made
+ for the FW trace buffer(s)
+
+@Input ppsDevInfo RGX device info
+@Return void
+ */ /**************************************************************************/
+static void RGXTraceBufferDeinit(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGXFW_THREAD_NUM; i++)
+ {
+ if (psDevInfo->psRGXFWIfTraceBufferMemDesc[i])
+ {
+ if (psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+ psTraceBufCtl->sTraceBuf[i].pui32TraceBuffer = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufferMemDesc[i]);
+ psDevInfo->psRGXFWIfTraceBufferMemDesc[i] = NULL;
+ }
+ }
+}
+
+/*************************************************************************/ /*!
+@Function RGXTraceBufferInitOnDemandResources
+
+@Description Allocates the firmware trace buffer required for dumping trace
+ info from the firmware.
+
+@Input psDevInfo RGX device info
+
+@Return PVRSRV_OK If all went good, PVRSRV_ERROR otherwise.
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF* psTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32FwThreadNum;
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ for (ui32FwThreadNum = 0; ui32FwThreadNum < RGXFW_THREAD_NUM; ui32FwThreadNum++)
+ {
+ /* Ensure allocation API is only called when not already allocated */
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum] == NULL);
+
+ PDUMPCOMMENT("Allocate rgxfw trace buffer(%u)", ui32FwThreadNum);
+ eError = DevmemFwAllocate(psDevInfo,
+ RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+ uiMemAllocFlags,
+ "FwTraceBuffer",
+ &psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate %zu bytes for fw trace buffer %u (Error code:%u)",
+ __FUNCTION__,
+ RGXFW_TRACE_BUFFER_SIZE * sizeof(*(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer)),
+ ui32FwThreadNum,
+ eError));
+ goto fail;
+ }
+
+ /* Firmware address should not be already set */
+ PVR_ASSERT(psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer.ui32Addr == 0x0);
+
+ /* for the FW to use this address when dumping in log (trace) buffer */
+ RGXSetFirmwareAddress(&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32RGXFWIfTraceBuffer,
+ psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+ 0, RFW_FWADDR_NOREF_FLAG);
+ /* Set an address for the host to be able to read fw trace buffer */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32FwThreadNum],
+ (void **)&psTraceBufCtl->sTraceBuf[ui32FwThreadNum].pui32TraceBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel tracebuf (%u) ctl (Error code: %u)",
+ __FUNCTION__, ui32FwThreadNum, eError));
+ goto fail;
+ }
+ }
+
+ return PVRSRV_OK;
+ fail:
+ RGXTraceBufferDeinit(psDevInfo);
+ return eError;
+}
+
+static PVRSRV_ERROR RGXSetupOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32ConfigFlagsExt,
+ RGXFWIF_DEV_VIRTADDR sTracebufCtl,
+ PRGXFWIF_HWRINFOBUF sRGXFWIfHWRInfoBufCtl,
+ RGXFWIF_DEV_VIRTADDR sGuestTracebufCtl,
+ RGXFWIF_DEV_VIRTADDR sGuestHWRInfoBufCtl)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ RGXFWIF_OS_CONFIG *psOSConfig;
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+
+ PDUMPCOMMENT("Allocate RGXFW_OS_CONFIG structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_OS_CONFIG),
+ uiMemAllocFlags,
+ "FwOSConfigStructure",
+ &psDevInfo->psRGXFWIfOSConfigDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupOSConfig: Failed to allocate %u bytes for OS Config (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_OS_CONFIG),
+ eError));
+ goto fail1;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInit->sOSConfig,
+ psDevInfo->psRGXFWIfOSConfigDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc,
+ (void **)&psOSConfig);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupOSConfig: Failed to acquire OS Config (%u)",
+ eError));
+ goto fail2;
+ }
+
+ psOSConfig->ui32ConfigFlags = ui32ConfigFlags & RGXFWIF_INICFG_ALL;
+ psOSConfig->ui32ConfigFlagsExt = ui32ConfigFlagsExt;
+
+ eError = SyncPrimGetFirmwareAddr(psDevInfo->psPowSyncPrim, &psOSConfig->sPowerSync.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get Sync Prim FW address with error (%u)",
+ __FUNCTION__, eError));
+ goto fail2;
+ }
+
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Dump initial state of RGXFW_OS_CONFIG structure");
+ DevmemPDumpLoadMem(psDevInfo->psRGXFWIfOSConfigDesc,
+ 0,
+ sizeof(RGXFWIF_OS_CONFIG),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ psDevInfo->psFWIfOSConfig = psOSConfig;
+
+ /* Inform the FW that there is also an extended field */
+ psOSConfig->ui32ConfigFlags |= RGXFWIF_INICFG_USE_EXTENDED;
+
+ /* Inform the FW that the TracebufCtl and HWRInfoBufCtl structs are kicked with OSConfig */
+ psOSConfig->ui32ConfigFlagsExt |= RGXFWIF_INICFG_EXT_TRACEBUF_FIELD;
+
+ /* Set the Tracebuf and HWRInfoBufCtl offsets */
+ psOSConfig->sTraceBufCtl = sTracebufCtl;
+ psOSConfig->sRGXFWIfHWRInfoBufCtl = sRGXFWIfHWRInfoBufCtl;
+
+ /* Set the Guest Tracebuf and HWRInfoBuf offsets */
+ psOSConfig->sGuestTraceBufCtl = sGuestTracebufCtl;
+ psOSConfig->sRGXFWIfGuestHWRInfoBufCtl = sGuestHWRInfoBufCtl;
+
+ return PVRSRV_OK;
+
+ fail2:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+ fail1:
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXSetupFirmware
+
+ @Description
+
+ Setups all the firmware related data
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ RGXFWIF_BIFTILINGMODE eBifTilingMode,
+ IMG_UINT32 ui32NumTilingCfgs,
+ IMG_UINT32 *pui32BIFTilingXStrides,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGXFWIF_DEV_VIRTADDR *psRGXFWInitFWAddr,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf,
+ IMG_UINT32 ui32ConfigFlagsExt)
+
+{
+ PVRSRV_ERROR eError;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ RGXFWIF_INIT *psRGXFWInitScratch = NULL;
+ RGXFWIF_INIT *psRGXFWInitActual = NULL;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 dm;
+ IMG_UINT32 ui32kCCBSize = (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+ !(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_GPU_VIRTUALISATION_BIT_MASK)) ?\
+ (RGXFWIF_KCCB_NUMCMDS_LOG2_GPUVIRT_WITHOUT_FEATURE) : (RGXFWIF_KCCB_NUMCMDS_LOG2_DEFAULT);
+#if defined (SUPPORT_PDVFS)
+ RGXFWIF_PDVFS_OPP *psPDVFSOPPInfo;
+ IMG_DVFS_DEVICE_CFG *psDVFSDeviceCfg;
+#endif
+
+ /* Fw init data */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_FW_LOCAL |
+ PVRSRV_MEMALLOCFLAG_FW_CONFIG;
+ /* FIXME: Change to Cached */
+
+
+ PDUMPCOMMENT("Allocate RGXFWIF_INIT structure");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_INIT),
+ uiMemAllocFlags,
+ "FwInitStructure",
+ &psDevInfo->psRGXFWIfInitMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw if ctl (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_INIT),
+ eError));
+ goto fail;
+ }
+
+ psRGXFWInitScratch = OSAllocZMem(sizeof(*psRGXFWInitScratch));
+
+ if(psRGXFWInitScratch == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate RGXFWInit scratch structure"));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psDevInfo->sFWInitFWAddr,
+ psDevInfo->psRGXFWIfInitMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+ *psRGXFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+ /* FW coremem data */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE) &&
+ (0 == (RGX_IS_BRN_SUPPORTED(psDevInfo, 50767))))
+ {
+ IMG_BOOL bMetaDMA = RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ if (bMetaDMA)
+ {
+ IMG_UINT64 ui64SecBufHandle;
+
+ PDUMPCOMMENT("Import secure buffer to store FW coremem data");
+ eError = DevmemImportTDSecureBuf(psDeviceNode,
+ RGX_META_COREMEM_BSS_SIZE,
+ OSGetPageShift(),
+ uiMemAllocFlags,
+ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ &ui64SecBufHandle);
+ }
+ else
+#endif
+ {
+ PDUMPCOMMENT("Allocate buffer to store FW coremem data");
+ eError = DevmemFwAllocate(psDevInfo,
+ RGX_META_COREMEM_BSS_SIZE,
+ uiMemAllocFlags,
+ "FwCorememDataStore",
+ &psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate coremem data store (%u)",
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sCorememDataStore.pbyFWAddr,
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ if (bMetaDMA)
+ {
+ RGXSetMetaDMAAddress(&psRGXFWInitScratch->sCorememDataStore,
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ &psRGXFWInitScratch->sCorememDataStore.pbyFWAddr,
+ 0);
+ }
+ }
+
+ /* init HW frame info */
+ PDUMPCOMMENT("Allocate rgxfw HW info buffer");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_HWRINFOBUF),
+ uiMemAllocFlags,
+ "FwHWInfoBuffer",
+ &psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for HW info (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl,
+ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfHWRInfoBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Might be uncached. Be conservative and use a DeviceMemSet */
+ OSDeviceMemSet(psDevInfo->psRGXFWIfHWRInfoBuf, 0, sizeof(RGXFWIF_HWRINFOBUF));
+
+ /* Allocate a sync for power management */
+ eError = SyncPrimContextCreate(psDevInfo->psDeviceNode,
+ &psDevInfo->hSyncPrimContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive context with error (%u)", eError));
+ goto fail;
+ }
+
+ eError = SyncPrimAlloc(psDevInfo->hSyncPrimContext, &psDevInfo->psPowSyncPrim, "fw power ack");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate sync primitive with error (%u)", eError));
+ goto fail;
+ }
+
+ /* Setup Fault read register */
+ eError = RGXSetupFaultReadRegister(psDeviceNode, psRGXFWInitScratch);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup fault read register"));
+ goto fail;
+ }
+
+ /* Allocation flags for the kernel CCB */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Set up kernel CCB */
+ eError = RGXSetupCCB(psDevInfo,
+ &psDevInfo->psKernelCCBCtl,
+ &psDevInfo->psKernelCCBCtlMemDesc,
+ &psDevInfo->psKernelCCB,
+ &psDevInfo->psKernelCCBMemDesc,
+ &psRGXFWInitScratch->psKernelCCBCtl,
+ &psRGXFWInitScratch->psKernelCCB,
+ ui32kCCBSize,
+ sizeof(RGXFWIF_KCCB_CMD),
+ uiMemAllocFlags,
+ "FwKernelCCB");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate Kernel CCB", __func__));
+ goto fail;
+ }
+
+ /* Allocation flags for the firmware CCB */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Set up firmware CCB */
+ eError = RGXSetupCCB(psDevInfo,
+ &psDevInfo->psFirmwareCCBCtl,
+ &psDevInfo->psFirmwareCCBCtlMemDesc,
+ &psDevInfo->psFirmwareCCB,
+ &psDevInfo->psFirmwareCCBMemDesc,
+ &psRGXFWInitScratch->psFirmwareCCBCtl,
+ &psRGXFWInitScratch->psFirmwareCCB,
+ RGXFWIF_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_FWCCB_CMD),
+ uiMemAllocFlags,
+ "FwCCB");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to allocate Firmware CCB", __func__));
+ goto fail;
+ }
+
+ /* RD Power Island */
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ IMG_BOOL bSysEnableRDPowIsland = psRGXData->psRGXTimingInfo->bEnableRDPowIsland;
+ IMG_BOOL bEnableRDPowIsland = ((eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_DEFAULT) && bSysEnableRDPowIsland) ||
+ (eRGXRDPowerIslandConf == RGX_RD_POWER_ISLAND_FORCE_ON);
+
+ ui32ConfigFlags |= bEnableRDPowIsland? RGXFWIF_INICFG_POW_RASCALDUST : 0;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui32ConfigFlags |= RGXFWIF_INICFG_WORKEST_V2;
+#if defined(SUPPORT_PDVFS)
+ /* Pro-active DVFS depends on Workload Estimation */
+ psPDVFSOPPInfo = &psRGXFWInitScratch->sPDVFSOPPInfo;
+ psDVFSDeviceCfg = &psDeviceNode->psDevConfig->sDVFS.sDVFSDeviceCfg;
+ PVR_LOG_IF_FALSE(psDVFSDeviceCfg->pasOPPTable, "RGXSetupFirmware: Missing OPP Table");
+
+ if (psDVFSDeviceCfg->pasOPPTable != NULL)
+ {
+ if (psDVFSDeviceCfg->ui32OPPTableSize >
+ sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXSetupFirmware: OPP Table too large: Size = %u, Maximum size = %lu",
+ psDVFSDeviceCfg->ui32OPPTableSize,
+ (unsigned long)(sizeof(psPDVFSOPPInfo->asOPPValues)/sizeof(psPDVFSOPPInfo->asOPPValues[0]))));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail;
+ }
+
+ OSDeviceMemCopy(psPDVFSOPPInfo->asOPPValues,
+ psDVFSDeviceCfg->pasOPPTable,
+ sizeof(psPDVFSOPPInfo->asOPPValues));
+
+ psPDVFSOPPInfo->ui32MaxOPPPoint = psDVFSDeviceCfg->ui32OPPTableSize - 1;
+
+ ui32ConfigFlags |= RGXFWIF_INICFG_PDVFS_V2;
+ }
+#endif
+#endif
+
+ /* FW trace control structure */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw trace control structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_TRACEBUF),
+ uiMemAllocFlags,
+ "FwTraceCtlStruct",
+ &psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw trace (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_TRACEBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sTraceBufCtl,
+ psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfTraceBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Set initial firmware log type/group(s) */
+ if (ui32LogType & ~RGXFWIF_LOG_TYPE_MASK)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Invalid initial log type (0x%X)",ui32LogType));
+ goto fail;
+ }
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType = ui32LogType;
+
+#if !defined(PDUMP)
+ /* When PDUMP is enabled, ALWAYS allocate on-demand trace buffer resource
+ * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+ * be set during PDump playback in logconfig, at any point of time,
+ * Otherwise, allocate only if required. */
+ if (RGXTraceBufferIsInitRequired(psDevInfo))
+#endif
+ {
+ eError = RGXTraceBufferInitOnDemandResources(psDevInfo);
+ }
+
+ PVR_LOGG_IF_ERROR(eError, "RGXTraceBufferInitOnDemandResources", fail);
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST))
+ {
+ RGXFWIF_DEV_VIRTADDR sGuestHWRInfoBuf;
+ RGXFWIF_DEV_VIRTADDR sGuestTraceBuf;
+
+ /* FW coremem data */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* init HW frame info */
+ PDUMPCOMMENT("Allocate Guest rgxfw HW info buffer");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_HWRINFOBUF),
+ uiMemAllocFlags,
+ "FwGuestHWInfoBuffer",
+ &psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %d bytes for Guest HW info (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_HWRINFOBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&sGuestHWRInfoBuf,
+ psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfGuestHWRInfoBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* FW trace control structure */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw trace control structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_TRACEBUF),
+ uiMemAllocFlags,
+ "FwGuestTraceCtlStruct",
+ &psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for Guest fw trace (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_TRACEBUF),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&sGuestTraceBuf,
+ psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfGuestTraceBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel tracebuf ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ eError = RGXSetupOSConfig(psDevInfo, psRGXFWInitScratch, ui32ConfigFlags, ui32ConfigFlagsExt,
+ psRGXFWInitScratch->sTraceBufCtl, psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl,
+ sGuestTraceBuf, sGuestHWRInfoBuf);
+ }
+ else
+ {
+ RGXFWIF_DEV_VIRTADDR sDummy;
+
+ sDummy.ui32Addr = 0;
+
+ eError = RGXSetupOSConfig(psDevInfo, psRGXFWInitScratch, ui32ConfigFlags, ui32ConfigFlagsExt,
+ psRGXFWInitScratch->sTraceBufCtl, psRGXFWInitScratch->sRGXFWIfHWRInfoBufCtl,
+ sDummy, sDummy);
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set up the per OS configuration"));
+ goto fail;
+ }
+
+ psRGXFWInitScratch->eGPIOValidationMode = RGXFWIF_GPIO_VAL_OFF;
+#if defined(SUPPORT_VALIDATION)
+ {
+ IMG_INT32 ui32AppHintDefault;
+ IMG_INT32 ui32GPIOValidationMode;
+ void *pvAppHintState = NULL;
+
+ /* Check AppHint for GPIO validation mode */
+ OSCreateKMAppHintState(&pvAppHintState);
+ ui32AppHintDefault = PVRSRV_APPHINT_GPIOVALIDATIONMODE;
+ OSGetKMAppHintUINT32(pvAppHintState,
+ GPIOValidationMode,
+ &ui32AppHintDefault,
+ &ui32GPIOValidationMode);
+ OSFreeKMAppHintState(pvAppHintState);
+ pvAppHintState = NULL;
+
+ if (ui32GPIOValidationMode >= RGXFWIF_GPIO_VAL_LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Invalid GPIO validation mode: %d, only valid if smaller than %d. Disabling GPIO validation.",
+ __func__,
+ ui32GPIOValidationMode,
+ RGXFWIF_GPIO_VAL_LAST));
+ }
+ else
+ {
+ psRGXFWInitScratch->eGPIOValidationMode = (RGXFWIF_GPIO_VAL_MODE) ui32GPIOValidationMode;
+ }
+
+ psRGXFWInitScratch->eGPIOValidationMode = ui32GPIOValidationMode;
+ }
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /* Set up Workload Estimation firmware CCB */
+ eError = RGXSetupCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc,
+ &psRGXFWInitScratch->psWorkEstFirmwareCCBCtl,
+ &psRGXFWInitScratch->psWorkEstFirmwareCCB,
+ RGXFWIF_WORKEST_FWCCB_NUMCMDS_LOG2,
+ sizeof(RGXFWIF_WORKEST_FWCCB_CMD),
+ uiMemAllocFlags,
+ "FwWEstCCB");
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate Workload Estimation Firmware CCB"));
+ goto fail;
+ }
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+
+ eError = RGXFWSetupCounterBuffer(psDevInfo,
+ &psDevInfo->psCounterBufferMemDesc,
+ PAGE_SIZE,
+ &psRGXFWInitScratch->sCounterDumpCtl,
+ "CounterBuffer");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate counter buffer"));
+ goto fail;
+ }
+
+#endif
+
+ /* Require a minimum amount of memory for the signature buffers */
+ if (ui32SignatureChecksBufSize < RGXFW_SIG_BUFFER_SIZE_MIN)
+ {
+ ui32SignatureChecksBufSize = RGXFW_SIG_BUFFER_SIZE_MIN;
+ }
+
+ /* Setup Signature and Checksum Buffers for TA and 3D */
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigTAChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA],
+ "TA");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup TA signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigTAChecksSize = ui32SignatureChecksBufSize;
+
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSig3DChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D],
+ "3D");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup 3D signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32Sig3DChecksSize = ui32SignatureChecksBufSize;
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigRTChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_RTU],
+ "RTU");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup RTU signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigRTChecksSize = ui32SignatureChecksBufSize;
+
+ eError = RGXFWSetupSignatureChecks(psDevInfo,
+ &psDevInfo->psRGXFWSigSHChecksMemDesc,
+ ui32SignatureChecksBufSize,
+ &psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_SHG],
+ "SHG");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup SHG signature checks"));
+ goto fail;
+ }
+ psDevInfo->ui32SigSHChecksSize = ui32SignatureChecksBufSize;
+ }
+#endif
+#if defined(RGXFW_ALIGNCHECKS)
+ eError = RGXFWSetupAlignChecks(psDevInfo,
+ &psRGXFWInitScratch->sAlignChecks,
+ pui32RGXFWAlignChecks,
+ ui32RGXFWAlignChecksArrLength);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to setup alignment checks"));
+ goto fail;
+ }
+#endif
+
+ psRGXFWInitScratch->ui32FilterFlags = ui32FilterFlags;
+
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ /* Fill the remaining bits of fw the init data */
+ psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_BRN_65273_HEAP_BASE;
+ psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_BRN_65273_HEAP_BASE;
+ }
+ else
+ {
+ /* Fill the remaining bits of fw the init data */
+ psRGXFWInitScratch->sPDSExecBase.uiAddr = RGX_PDSCODEDATA_HEAP_BASE;
+ psRGXFWInitScratch->sUSCExecBase.uiAddr = RGX_USCCODE_HEAP_BASE;
+ }
+
+ psRGXFWInitScratch->sDPXControlStreamBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+ psRGXFWInitScratch->sResultDumpBase.uiAddr = RGX_DOPPLER_OVERFLOW_HEAP_BASE;
+ psRGXFWInitScratch->sRTUHeapBase.uiAddr = RGX_DOPPLER_HEAP_BASE;
+ psRGXFWInitScratch->sTDMTPUYUVCeoffsHeapBase.uiAddr = RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ psRGXFWInitScratch->ui32JonesDisableMask = ui32JonesDisableMask;
+ }
+ psDevInfo->bPDPEnabled = (ui32ConfigFlags & RGXFWIF_SRVCFG_DISABLE_PDP_EN)
+ ? IMG_FALSE : IMG_TRUE;
+ psRGXFWInitScratch->ui32HWRDebugDumpLimit = ui32HWRDebugDumpLimit;
+
+ psRGXFWInitScratch->eFirmwarePerf = eFirmwarePerf;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+ {
+ eError = _AllocateSLC3Fence(psDevInfo, psRGXFWInitScratch);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate memory for SLC3Fence"));
+ goto fail;
+ }
+ }
+
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) &&
+ ((ui32ConfigFlags & RGXFWIF_INICFG_METAT1_ENABLED) != 0))
+ {
+ /* Allocate a page for T1 stack */
+ eError = DevmemFwAllocate(psDevInfo,
+ RGX_META_STACK_SIZE,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwMETAT1Stack",
+ & psDevInfo->psMETAT1StackMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate T1 Stack"));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sT1Stack,
+ psDevInfo->psMETAT1StackMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: T1 Stack Frame allocated at %x",
+ psRGXFWInitScratch->sT1Stack.ui32Addr));
+ }
+
+#if defined(SUPPORT_PDVFS)
+ /* Core clock rate */
+ uiMemAllocFlags =
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT32),
+ uiMemAllocFlags,
+ "FwCoreClkRate",
+ &psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate PDVFS core clock rate"));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sCoreClockRate,
+ psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSetupFirmware: PDVFS core clock rate allocated at %x",
+ psRGXFWInitScratch->sCoreClockRate.ui32Addr));
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc,
+ (void **)&psDevInfo->pui32RGXFWIFCoreClkRate);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire core clk rate (%u)",
+ eError));
+ goto fail;
+ }
+#endif
+
+ /* Timestamps */
+ uiMemAllocFlags =
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | /* XXX ?? */
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ /*
+ the timer query arrays
+ */
+ PDUMPCOMMENT("Allocate timer query arrays (FW)");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwStartTimesArray",
+ & psDevInfo->psStartTimeMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psStartTimeMemDesc,
+ (void **)& psDevInfo->pui64StartTimeById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map start times array"));
+ goto fail;
+ }
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT64) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwEndTimesArray",
+ & psDevInfo->psEndTimeMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psEndTimeMemDesc,
+ (void **)& psDevInfo->pui64EndTimeById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map end times array"));
+ goto fail;
+ }
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(IMG_UINT32) * RGX_MAX_TIMER_QUERIES,
+ uiMemAllocFlags,
+ "FwCompletedOpsArray",
+ & psDevInfo->psCompletedMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to completed ops array"));
+ goto fail;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psCompletedMemDesc,
+ (void **)& psDevInfo->pui32CompletedById);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to map completed ops array"));
+ goto fail;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psDevInfo->hTimerQueryLock, LOCK_TYPE_DISPATCH);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetupFirmware: Failed to allocate log for timer query"));
+ goto fail;
+ }
+#endif
+
+#if !defined (PDUMP)
+ /* allocate only if required */
+ if (RGXTBIBufferIsInitRequired(psDevInfo))
+#endif
+ {
+ /* When PDUMP is enabled, ALWAYS allocate on-demand TBI buffer resource
+ * (irrespective of loggroup(s) enabled), given that logtype/loggroups can
+ * be set during PDump playback in logconfig, at any point of time */
+ eError = RGXTBIBufferInitOnDemandResources(psDevInfo);
+ }
+
+ PVR_LOGG_IF_ERROR(eError, "RGXTBIBufferInitOnDemandResources", fail);
+ psRGXFWInitScratch->sTBIBuf = psDevInfo->sRGXFWIfTBIBuffer;
+
+ /* Allocate shared buffer for GPU utilisation */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate shared buffer for GPU utilisation");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_GPU_UTIL_FWCB),
+ uiMemAllocFlags,
+ "FwGPUUtilisationBuffer",
+ &psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for GPU utilisation buffer ctl (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_GPU_UTIL_FWCB),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sGpuUtilFWCbCtl,
+ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc,
+ (void **)&psDevInfo->psRGXFWIfGpuUtilFWCb);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel GPU utilisation buffer ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ /* Initialise GPU utilisation buffer */
+ psDevInfo->psRGXFWIfGpuUtilFWCb->ui64LastWord =
+ RGXFWIF_GPU_UTIL_MAKE_WORD(OSClockns64(),RGXFWIF_GPU_UTIL_STATE_IDLE);
+
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw FW runtime configuration (FW)");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_RUNTIME_CFG),
+ uiMemAllocFlags,
+ "FwRuntimeCfg",
+ &psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for FW runtime configuration (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_RUNTIME_CFG),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sRuntimeCfg,
+ psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ (void **)&psDevInfo->psRGXFWIfRuntimeCfg);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel FW runtime configuration (%u)",
+ eError));
+ goto fail;
+ }
+ /* HWPerf: Determine the size of the FW buffer */
+ if (ui32HWPerfFWBufSizeKB == 0 ||
+ ui32HWPerfFWBufSizeKB == RGXFW_HWPERF_L1_SIZE_DEFAULT)
+ {
+ /* Under pvrsrvctl 0 size implies AppHint not set or is set to zero,
+ * use default size from driver constant. Set it to the default
+ * size, no logging.
+ */
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_DEFAULT<<10;
+ }
+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MAX))
+ {
+ /* Size specified as a AppHint but it is too big */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too big, using maximum (%u)",
+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MAX));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MAX<<10;
+ }
+ else if (ui32HWPerfFWBufSizeKB > (RGXFW_HWPERF_L1_SIZE_MIN))
+ {
+ /* Size specified as in AppHint HWPerfFWBufSizeInKB */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: Using HWPerf FW buffer size of %u KB",
+ ui32HWPerfFWBufSizeKB));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = ui32HWPerfFWBufSizeKB<<10;
+ }
+ else
+ {
+ /* Size specified as a AppHint but it is too small */
+ PVR_DPF((PVR_DBG_WARNING,"RGXSetupFirmware: HWPerfFWBufSizeInKB value (%u) too small, using minimum (%u)",
+ ui32HWPerfFWBufSizeKB, RGXFW_HWPERF_L1_SIZE_MIN));
+ psDevInfo->ui32RGXFWIfHWPerfBufSize = RGXFW_HWPERF_L1_SIZE_MIN<<10;
+ }
+
+ /* init HWPERF data */
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfRIdx = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWIdx = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfWrapCount = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfSize = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+ psRGXFWInitScratch->bDisableFilterHWPerfCustomCounter = (ui32ConfigFlags & RGXFWIF_INICFG_HWP_DISABLE_FILTER) ? IMG_TRUE : IMG_FALSE;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfUt = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWPerfDropCount = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32FirstDropOrdinal = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32LastDropOrdinal = 0;
+ psDevInfo->psRGXFWIfTraceBuf->ui32PowMonEnergy = 0;
+
+ /* Second stage initialisation or HWPerf, hHWPerfLock created in first
+ * stage. See RGXRegisterDevice() call to RGXHWPerfInit(). */
+ if (psDevInfo->ui64HWPerfFilter == 0)
+ {
+ psDevInfo->ui64HWPerfFilter = ui64HWPerfFilter;
+ psRGXFWInitScratch->ui64HWPerfFilter = ui64HWPerfFilter;
+ }
+ else
+ {
+ /* The filter has already been modified. This can happen if
+ * pvr/apphint/EnableFTraceGPU was enabled. */
+ psRGXFWInitScratch->ui64HWPerfFilter = psDevInfo->ui64HWPerfFilter;
+ }
+
+ /*Send through the BVNC Feature Flags*/
+ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, &psRGXFWInitScratch->ui32BvncKmFeatureFlags);
+ PVR_LOGG_IF_ERROR(eError, "RGXServerFeatureFlagsToHWPerfFlags", fail);
+
+#if !defined (PDUMP)
+ /* Allocate if HWPerf filter has already been set. This is possible either
+ * by setting a proper AppHint or enabling GPU ftrace events. */
+ if (psDevInfo->ui64HWPerfFilter != 0)
+#endif
+ {
+ /* When PDUMP is enabled, ALWAYS allocate on-demand HWPerf resources
+ * (irrespective of HWPerf enabled or not), given that HWPerf can be
+ * enabled during PDump playback via RTCONF at any point of time. */
+ eError = RGXHWPerfInitOnDemandResources(psDevInfo);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInitOnDemandResources", fail);
+ }
+
+ RGXHWPerfInitAppHintCallbacks(psDeviceNode);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("Allocate rgxfw register configuration structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_REG_CFG),
+ uiMemAllocFlags | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE,
+ "FwRegisterConfigStructure",
+ &psDevInfo->psRGXFWIfRegCfgMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to allocate %u bytes for fw register configurations (%u)",
+ (IMG_UINT32)sizeof(RGXFWIF_REG_CFG),
+ eError));
+ goto fail;
+ }
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sRegCfg,
+ psDevInfo->psRGXFWIfRegCfgMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+#endif
+
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate rgxfw hwperfctl structure");
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32HWPerfCountersDataSize,
+ OSGetPageSize(),
+ uiMemAllocFlags,
+ "FwExHWPerfControlStructure",
+ &psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitHWPerfCounters: Failed to allocate %u bytes for fw hwperf control (%u)",
+ ui32HWPerfCountersDataSize,
+ eError));
+ goto fail;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWIfHWPerfCountersMemDesc, (void**) ppsHWPerfPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto fail;
+ }
+
+
+ RGXSetFirmwareAddress(&psRGXFWInitScratch->sHWPerfCtl,
+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ 0, 0);
+
+ /* Required info by FW to calculate the ActivePM idle timer latency */
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+
+ psRGXFWInitScratch->ui32InitialCoreClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+ psRGXFWInitScratch->ui32ActivePMLatencyms = psRGXData->psRGXTimingInfo->ui32ActivePMLatencyms;
+
+ /* Initialise variable runtime configuration to the system defaults */
+ psRuntimeCfg->ui32CoreClockSpeed = psRGXFWInitScratch->ui32InitialCoreClockSpeed;
+ psRuntimeCfg->ui32ActivePMLatencyms = psRGXFWInitScratch->ui32ActivePMLatencyms;
+ psRuntimeCfg->bActivePMLatencyPersistant = IMG_TRUE;
+
+ /* Initialize the DefaultDustsNumInit Field to Max Dusts */
+ psRuntimeCfg->ui32DefaultDustsNumInit = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+ }
+#if defined(PDUMP)
+ PDUMPCOMMENT("Dump initial state of FW runtime configuration");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfRuntimeCfgMemDesc,
+ 0,
+ sizeof(RGXFWIF_RUNTIME_CFG),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ /* Initialize FW started flag */
+ psRGXFWInitScratch->bFirmwareStarted = IMG_FALSE;
+ psRGXFWInitScratch->ui32MarkerVal = 1;
+
+ /* Initialise the compatibility check data */
+ RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sFWBVNC);
+ RGXFWIF_COMPCHECKS_BVNC_INIT(psRGXFWInitScratch->sRGXCompChecks.sHWBVNC);
+
+ PDUMPCOMMENT("Dump RGXFW Init data");
+ if (!bEnableSignatureChecks)
+ {
+#if defined(PDUMP)
+ PDUMPCOMMENT("(to enable rgxfw signatures place the following line after the RTCONF line)");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, asSigBufCtl),
+ sizeof(RGXFWIF_SIGBUF_CTL)*(psDevInfo->sDevFeatureCfg.ui32MAXDMCount),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+ psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_3D].sBuffer.ui32Addr = 0x0;
+ psRGXFWInitScratch->asSigBufCtl[RGXFWIF_DM_TA].sBuffer.ui32Addr = 0x0;
+ }
+
+ for (dm = 0; dm < (psDevInfo->sDevFeatureCfg.ui32MAXDMCount); dm++)
+ {
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmLockedUpCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmOverranCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmRecoveredCount[dm] = 0;
+ psDevInfo->psRGXFWIfTraceBuf->aui32HwrDmFalseDetectCount[dm] = 0;
+ }
+
+ /*
+ * BIF Tiling configuration
+ */
+
+ psRGXFWInitScratch->eBifTilingMode = eBifTilingMode;
+
+ psRGXFWInitScratch->sBifTilingCfg[0].uiBase = RGX_BIF_TILING_HEAP_1_BASE;
+ psRGXFWInitScratch->sBifTilingCfg[0].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInitScratch->sBifTilingCfg[0].uiXStride = pui32BIFTilingXStrides[0];
+ psRGXFWInitScratch->sBifTilingCfg[1].uiBase = RGX_BIF_TILING_HEAP_2_BASE;
+ psRGXFWInitScratch->sBifTilingCfg[1].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInitScratch->sBifTilingCfg[1].uiXStride = pui32BIFTilingXStrides[1];
+ psRGXFWInitScratch->sBifTilingCfg[2].uiBase = RGX_BIF_TILING_HEAP_3_BASE;
+ psRGXFWInitScratch->sBifTilingCfg[2].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInitScratch->sBifTilingCfg[2].uiXStride = pui32BIFTilingXStrides[2];
+ psRGXFWInitScratch->sBifTilingCfg[3].uiBase = RGX_BIF_TILING_HEAP_4_BASE;
+ psRGXFWInitScratch->sBifTilingCfg[3].uiLen = RGX_BIF_TILING_HEAP_SIZE;
+ psRGXFWInitScratch->sBifTilingCfg[3].uiXStride = pui32BIFTilingXStrides[3];
+
+ /* update the FW structure proper */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInitActual);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ goto fail;
+ }
+
+ OSDeviceMemCopy(psRGXFWInitActual, psRGXFWInitScratch, sizeof(*psRGXFWInitActual));
+
+ /* We don't need access to the fw init data structure anymore */
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ psRGXFWInitActual = NULL;
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Dump rgxfw hwperfctl structure");
+ DevmemPDumpLoadZeroMem (psDevInfo->psRGXFWIfHWPerfCountersMemDesc,
+ 0,
+ ui32HWPerfCountersDataSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("Dump rgxfw trace control structure");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_TRACEBUF),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("Dump rgx TBI buffer");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfTBIBufferMemDesc,
+ 0,
+ sizeof(SFs),
+ PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("Dump rgxfw register configuration buffer");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfRegCfgMemDesc,
+ 0,
+ sizeof(RGXFWIF_REG_CFG),
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+ PDUMPCOMMENT("Dump rgxfw init structure");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfInitMemDesc,
+ 0,
+ sizeof(RGXFWIF_INIT),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* RGXFW Init structure needs to be loaded before we overwrite FaultPhysAddr, else this address patching won't have any effect */
+ PDUMPCOMMENT("Overwrite FaultPhysAddr of FWInit in pdump with actual physical address");
+ RGXPDumpFaultReadRegister(psDevInfo);
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE) &&
+ (0 == RGX_IS_BRN_SUPPORTED(psDevInfo, 50767)))
+ {
+ PDUMPCOMMENT("Dump rgxfw coremem data store");
+ DevmemPDumpLoadMem( psDevInfo->psRGXFWIfCorememDataStoreMemDesc,
+ 0,
+ RGX_META_COREMEM_BSS_SIZE,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ PDUMPCOMMENT("RTCONF: run-time configuration");
+
+
+ /* Dump the config options so they can be edited.
+ *
+ * FIXME: Need new DevmemPDumpWRW API which writes a WRW to load ui32ConfigFlags
+ */
+ PDUMPCOMMENT("(Set the FW config options here)");
+ PDUMPCOMMENT("( Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_TA_EN);
+ PDUMPCOMMENT("( Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_3D_EN);
+ PDUMPCOMMENT("( Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_CDM_EN);
+ PDUMPCOMMENT("( Ctx Switch Rand mode: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_MODE_RAND);
+ PDUMPCOMMENT("( Ctx Switch Soft Reset Enable: 0x%08x)", RGXFWIF_INICFG_CTXSWITCH_SRESET_EN);
+ PDUMPCOMMENT("( Use Extended FW Config flags: 0x%08x)", RGXFWIF_INICFG_USE_EXTENDED);
+ PDUMPCOMMENT("( Rascal+Dust Power Island: 0x%08x)", RGXFWIF_INICFG_POW_RASCALDUST);
+ PDUMPCOMMENT("( Enable HWPerf: 0x%08x)", RGXFWIF_INICFG_HWPERF_EN);
+ PDUMPCOMMENT("( Enable HWR: 0x%08x)", RGXFWIF_INICFG_HWR_EN);
+ PDUMPCOMMENT("( Check MList: 0x%08x)", RGXFWIF_INICFG_CHECK_MLIST_EN);
+ PDUMPCOMMENT("( Disable Auto Clock Gating: 0x%08x)", RGXFWIF_INICFG_DISABLE_CLKGATING_EN);
+ PDUMPCOMMENT("( Enable HWPerf Polling Perf Counter: 0x%08x)", RGXFWIF_INICFG_POLL_COUNTERS_EN);
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, VDM_OBJECT_LEVEL_LLS))
+ {
+ PDUMPCOMMENT("( Ctx Switch Object mode Index: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INDEX);
+ PDUMPCOMMENT("( Ctx Switch Object mode Instance: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_INSTANCE);
+ PDUMPCOMMENT("( Ctx Switch Object mode List: 0x%08x)", RGXFWIF_INICFG_VDM_CTX_STORE_MODE_LIST);
+ }
+
+ PDUMPCOMMENT("( Enable SHG Bypass mode: 0x%08x)", RGXFWIF_INICFG_SHG_BYPASS_EN);
+ PDUMPCOMMENT("( Enable register configuration: 0x%08x)", RGXFWIF_INICFG_REGCONFIG_EN);
+ PDUMPCOMMENT("( Assert on TA Out-of-Memory: 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY);
+ PDUMPCOMMENT("( Disable HWPerf custom counter filter: 0x%08x)", RGXFWIF_INICFG_HWP_DISABLE_FILTER);
+ PDUMPCOMMENT("( Enable HWPerf custom performance timer: 0x%08x)", RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN);
+ PDUMPCOMMENT("( Enable CDM Killing Rand mode: 0x%08x)", RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN);
+ PDUMPCOMMENT("( Enable Ctx Switch profile mode: 0x%08x (none=b'000, fast=b'001, medium=b'010, slow=b'011, nodelay=b'100))", RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK);
+ PDUMPCOMMENT("( Disable DM overlap (except TA during SPM): 0x%08x)", RGXFWIF_INICFG_DISABLE_DM_OVERLAP);
+ PDUMPCOMMENT("( Enable Meta T1 running main code: 0x%08x)", RGXFWIF_INICFG_METAT1_MAIN);
+ PDUMPCOMMENT("( Enable Meta T1 running dummy code: 0x%08x)", RGXFWIF_INICFG_METAT1_DUMMY);
+ PDUMPCOMMENT("( Assert on HWR trigger (page fault, lockup, overrun or poll failure): 0x%08x)", RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER);
+
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+ offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlags),
+ ui32ConfigFlags,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("( Extended FW config options start here )");
+ PDUMPCOMMENT("( Lower Priority Ctx Switch 2D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TDM);
+ PDUMPCOMMENT("( Lower Priority Ctx Switch TA Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_TA);
+ PDUMPCOMMENT("( Lower Priority Ctx Switch 3D Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_3D);
+ PDUMPCOMMENT("( Lower Priority Ctx Switch CDM Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_CDM);
+ PDUMPCOMMENT("( Lower Priority Ctx Switch SHG Enable: 0x%08x)", RGXFWIF_INICFG_EXT_LOW_PRIO_CS_SHG);
+
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfOSConfigDesc,
+ offsetof(RGXFWIF_OS_CONFIG, ui32ConfigFlagsExt),
+ ui32ConfigFlagsExt,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* default: no filter */
+ psRGXFWInitScratch->sPIDFilter.eMode = RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT;
+ psRGXFWInitScratch->sPIDFilter.asItems[0].uiPID = 0;
+
+ PDUMPCOMMENT("( PID filter type: %X=INCLUDE_ALL_EXCEPT, %X=EXCLUDE_ALL_EXCEPT)",
+ RGXFW_PID_FILTER_INCLUDE_ALL_EXCEPT,
+ RGXFW_PID_FILTER_EXCLUDE_ALL_EXCEPT);
+
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sPIDFilter.eMode),
+ psRGXFWInitScratch->sPIDFilter.eMode,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("( PID filter PID/OSID list (Up to %u entries. Terminate with a zero PID))",
+ RGXFWIF_PID_FILTER_MAX_NUM_PIDS);
+ {
+ IMG_UINT32 i;
+
+ /* generate a few WRWs in the pdump stream as an example */
+ for (i = 0; i < MIN(RGXFWIF_PID_FILTER_MAX_NUM_PIDS, 8); i++)
+ {
+ /*
+ * Some compilers cannot cope with the uses of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler output is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiPIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].uiPID);
+
+ const IMG_DEVMEM_OFFSET_T uiOSIDOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_INIT *)0)->sPIDFilter.asItems[i].ui32OSID);
+
+ PDUMPCOMMENT("(PID and OSID pair %u)", i);
+
+ PDUMPCOMMENT("(PID)");
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ uiPIDOff,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("(OSID)");
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfInitMemDesc,
+ uiOSIDOff,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+
+ /*
+ * Dump the log config so it can be edited.
+ */
+ PDUMPCOMMENT("(Set the log config here)");
+ PDUMPCOMMENT("( Log Type: set bit 0 for TRACE, reset for TBI)");
+ PDUMPCOMMENT("( MAIN Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MAIN);
+ PDUMPCOMMENT("( MTS Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_MTS);
+ PDUMPCOMMENT("( CLEANUP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CLEANUP);
+ PDUMPCOMMENT("( CSW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_CSW);
+ PDUMPCOMMENT("( BIF Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_BIF);
+ PDUMPCOMMENT("( PM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_PM);
+ PDUMPCOMMENT("( RTD Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RTD);
+ PDUMPCOMMENT("( SPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_SPM);
+ PDUMPCOMMENT("( POW Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_POW);
+ PDUMPCOMMENT("( HWR Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWR);
+ PDUMPCOMMENT("( HWP Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_HWP);
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ PDUMPCOMMENT("( RPM Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_RPM);
+ }
+#endif
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, META_DMA))
+ {
+ PDUMPCOMMENT("( DMA Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DMA);
+ }
+ PDUMPCOMMENT("( DEBUG Group Enable: 0x%08x)", RGXFWIF_LOG_TYPE_GROUP_DEBUG);
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ offsetof(RGXFWIF_TRACEBUF, ui32LogType),
+ psDevInfo->psRGXFWIfTraceBuf->ui32LogType,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("Set the HWPerf Filter config here");
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, ui64HWPerfFilter),
+ psRGXFWInitScratch->ui64HWPerfFilter,
+ PDUMP_FLAGS_CONTINUOUS);
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PDUMPCOMMENT("(Number of registers configurations for types(byte index): pow on(%d), dust change(%d), ta(%d), 3d(%d), cdm(%d), tla(%d), TDM(%d))",\
+ RGXFWIF_REG_CFG_TYPE_PWR_ON,\
+ RGXFWIF_REG_CFG_TYPE_DUST_CHANGE,\
+ RGXFWIF_REG_CFG_TYPE_TA,\
+ RGXFWIF_REG_CFG_TYPE_3D,\
+ RGXFWIF_REG_CFG_TYPE_CDM,\
+ RGXFWIF_REG_CFG_TYPE_TLA,\
+ RGXFWIF_REG_CFG_TYPE_TDM);
+
+ {
+ IMG_UINT32 i;
+
+ /**
+ * Write 32 bits in each iteration as required by PDUMP WRW command.
+ */
+ for (i = 0; i < RGXFWIF_REG_CFG_TYPE_ALL; i += sizeof(IMG_UINT32))
+ {
+ DevmemPDumpLoadMemValue32(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, aui8NumRegsType[i]),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+
+ PDUMPCOMMENT("(Set registers here: address, mask, value)");
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Addr),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Mask),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ DevmemPDumpLoadMemValue64(psDevInfo->psRGXFWIfRegCfgMemDesc,
+ offsetof(RGXFWIF_REG_CFG, asRegConfigs[0].ui64Value),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif /* SUPPORT_USER_REGISTER_CONFIGURATION */
+#endif /* PDUMP */
+
+ /* Perform additional virtualisation initialisation */
+ eError = RGXVzSetupFirmware(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXSetupFirmware: Failed RGXVzSetupFirmware"));
+ goto fail;
+ }
+
+ OSFreeMem(psRGXFWInitScratch);
+
+ psDevInfo->bFirmwareInitialised = IMG_TRUE;
+
+ return PVRSRV_OK;
+
+ fail:
+ if (psDevInfo->psRGXFWIfInitMemDesc != NULL && psRGXFWInitActual != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ }
+
+ if(psRGXFWInitScratch)
+ {
+ OSFreeMem(psRGXFWInitScratch);
+ }
+
+ RGXFreeFirmware(psDevInfo);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXFreeFirmware
+
+ @Description
+
+ Frees all the firmware-related allocations
+
+ @Input psDevInfo
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ psDevInfo->bFirmwareInitialised = IMG_FALSE;
+
+ RGXVzFreeFirmware(psDevInfo->psDeviceNode);
+
+ RGXFreeCCB(psDevInfo,
+ &psDevInfo->psKernelCCBCtl,
+ &psDevInfo->psKernelCCBCtlMemDesc,
+ &psDevInfo->psKernelCCB,
+ &psDevInfo->psKernelCCBMemDesc);
+
+ RGXFreeCCB(psDevInfo,
+ &psDevInfo->psFirmwareCCBCtl,
+ &psDevInfo->psFirmwareCCBCtlMemDesc,
+ &psDevInfo->psFirmwareCCB,
+ &psDevInfo->psFirmwareCCBMemDesc);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFreeCCB(psDevInfo,
+ &psDevInfo->psWorkEstFirmwareCCBCtl,
+ &psDevInfo->psWorkEstFirmwareCCBCtlMemDesc,
+ &psDevInfo->psWorkEstFirmwareCCB,
+ &psDevInfo->psWorkEstFirmwareCCBMemDesc);
+#endif
+
+#if defined(RGXFW_ALIGNCHECKS)
+ if (psDevInfo->psRGXFWAlignChecksMemDesc)
+ {
+ RGXFWFreeAlignChecks(psDevInfo);
+ }
+#endif
+
+ if (psDevInfo->psRGXFWIfOSConfigDesc)
+ {
+ if(psDevInfo->psFWIfOSConfig)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfOSConfigDesc);
+ psDevInfo->psFWIfOSConfig = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfOSConfigDesc);
+ psDevInfo->psRGXFWIfOSConfigDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWSigTAChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigTAChecksMemDesc);
+ psDevInfo->psRGXFWSigTAChecksMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWSig3DChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSig3DChecksMemDesc);
+ psDevInfo->psRGXFWSig3DChecksMemDesc = NULL;
+ }
+
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (psDevInfo->psRGXFWSigRTChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigRTChecksMemDesc);
+ psDevInfo->psRGXFWSigRTChecksMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWSigSHChecksMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWSigSHChecksMemDesc);
+ psDevInfo->psRGXFWSigSHChecksMemDesc = NULL;
+ }
+ }
+#endif
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ if (psDevInfo->psCounterBufferMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psCounterBufferMemDesc);
+ psDevInfo->psCounterBufferMemDesc = NULL;
+ }
+#endif
+
+ RGXSetupFaultReadRegisterRollback(psDevInfo);
+
+ if (psDevInfo->psPowSyncPrim != NULL)
+ {
+ SyncPrimFree(psDevInfo->psPowSyncPrim);
+ psDevInfo->psPowSyncPrim = NULL;
+ }
+
+ if (psDevInfo->hSyncPrimContext != (IMG_HANDLE) NULL)
+ {
+ SyncPrimContextDestroy(psDevInfo->hSyncPrimContext);
+ psDevInfo->hSyncPrimContext = (IMG_HANDLE) NULL;
+ }
+
+ if (psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfGpuUtilFWCb != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+ psDevInfo->psRGXFWIfGpuUtilFWCb = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc);
+ psDevInfo->psRGXFWIfGpuUtilFWCbCtlMemDesc = NULL;
+ }
+
+ RGXHWPerfDeinit(psDevInfo);
+
+ if (psDevInfo->psRGXFWIfRuntimeCfgMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfRuntimeCfg != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+ psDevInfo->psRGXFWIfRuntimeCfg = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRuntimeCfgMemDesc);
+ psDevInfo->psRGXFWIfRuntimeCfgMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfHWRInfoBuf != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfHWRInfoBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfHWRInfoBufCtlMemDesc = NULL;
+ }
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE) &&
+ (0 == RGX_IS_BRN_SUPPORTED(psDevInfo, 50767)))
+ {
+ if (psDevInfo->psRGXFWIfCorememDataStoreMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfCorememDataStoreMemDesc);
+ psDevInfo->psRGXFWIfCorememDataStoreMemDesc = NULL;
+ }
+ }
+
+ if (psDevInfo->psRGXFWIfTraceBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ /* first deinit/free the tracebuffer allocation */
+ RGXTraceBufferDeinit(psDevInfo);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfTraceBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfTraceBufCtlMemDesc = NULL;
+ }
+
+ if (psDevInfo->psRGXFWIfTBIBufferMemDesc)
+ {
+ RGXTBIBufferDeinit(psDevInfo);
+ }
+
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ if (psDevInfo->psRGXFWIfRegCfgMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfRegCfgMemDesc);
+ psDevInfo->psRGXFWIfRegCfgMemDesc = NULL;
+ }
+#endif
+ if (psDevInfo->psRGXFWIfHWPerfCountersMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfHWPerfCountersMemDesc);
+ psDevInfo->psRGXFWIfHWPerfCountersMemDesc = NULL;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT))
+ {
+ _FreeSLC3Fence(psDevInfo);
+ }
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META) && (psDevInfo->psMETAT1StackMemDesc))
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psMETAT1StackMemDesc);
+ psDevInfo->psMETAT1StackMemDesc = NULL;
+ }
+
+#if defined(SUPPORT_PDVFS)
+ if (psDevInfo->psRGXFWIFCoreClkRateMemDesc)
+ {
+ if (psDevInfo->pui32RGXFWIFCoreClkRate != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ psDevInfo->pui32RGXFWIFCoreClkRate = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIFCoreClkRateMemDesc);
+ psDevInfo->psRGXFWIFCoreClkRateMemDesc = NULL;
+ }
+#endif
+
+ if (psDevInfo->psRGXFWIfInitMemDesc)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfInitMemDesc);
+ psDevInfo->psRGXFWIfInitMemDesc = NULL;
+ }
+ if (psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfGuestHWRInfoBuf)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfGuestHWRInfoBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc);
+ psDevInfo->psRGXFWIfGuestHWRInfoBufCtlMemDesc = NULL;
+ }
+ if (psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc)
+ {
+ if (psDevInfo->psRGXFWIfGuestTraceBuf)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfGuestTraceBuf = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc);
+ psDevInfo->psRGXFWIfGuestTraceBufCtlMemDesc = NULL;
+ }
+ if (psDevInfo->psCompletedMemDesc)
+ {
+ if (psDevInfo->pui32CompletedById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psCompletedMemDesc);
+ psDevInfo->pui32CompletedById = NULL;
+ }
+ DevmemFwFree(psDevInfo, psDevInfo->psCompletedMemDesc);
+ psDevInfo->psCompletedMemDesc = NULL;
+ }
+ if (psDevInfo->psEndTimeMemDesc)
+ {
+ if (psDevInfo->pui64EndTimeById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psEndTimeMemDesc);
+ psDevInfo->pui64EndTimeById = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psEndTimeMemDesc);
+ psDevInfo->psEndTimeMemDesc = NULL;
+ }
+ if (psDevInfo->psStartTimeMemDesc)
+ {
+ if (psDevInfo->pui64StartTimeById)
+ {
+ DevmemReleaseCpuVirtAddr(psDevInfo->psStartTimeMemDesc);
+ psDevInfo->pui64StartTimeById = NULL;
+ }
+
+ DevmemFwFree(psDevInfo, psDevInfo->psStartTimeMemDesc);
+ psDevInfo->psStartTimeMemDesc = NULL;
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (psDevInfo->hTimerQueryLock)
+ {
+ OSLockDestroy(psDevInfo->hTimerQueryLock);
+ psDevInfo->hTimerQueryLock = NULL;
+ }
+#endif
+}
+
+
+/******************************************************************************
+ FUNCTION : RGXAcquireKernelCCBSlot
+
+ PURPOSE : Attempts to obtain a slot in the Kernel CCB
+
+ PARAMETERS : psCCB - the CCB
+ : Address of space if available, NULL otherwise
+
+ RETURNS : PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXAcquireKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+ RGXFWIF_CCB_CTL *psKCCBCtl,
+ IMG_UINT32 *pui32Offset)
+{
+ IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset;
+
+ ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+ ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+ /*
+ * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1
+ * executing kick), hence the kernel CCB should not queue more than
+ * 254 commands.
+ */
+ PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+#if defined(PDUMP)
+ /* Wait for sufficient CCB space to become available */
+ PDUMPCOMMENTWITHFLAGS(0, "Wait for kCCB woff=%u", ui32NextWriteOffset);
+ DevmemPDumpCBP(psKCCBCtrlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+ ui32NextWriteOffset,
+ 1,
+ (psKCCBCtl->ui32WrapMask + 1));
+#endif
+
+ if (ui32NextWriteOffset == psKCCBCtl->ui32ReadOffset)
+ {
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+ }
+ *pui32Offset = ui32NextWriteOffset;
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION : RGXPollKernelCCBSlot
+
+ PURPOSE : Poll for space in Kernel CCB
+
+ PARAMETERS : psCCB - the CCB
+ : Address of space if available, NULL otherwise
+
+ RETURNS : PVRSRV_ERROR
+ ******************************************************************************/
+static PVRSRV_ERROR RGXPollKernelCCBSlot(DEVMEM_MEMDESC *psKCCBCtrlMemDesc,
+ RGXFWIF_CCB_CTL *psKCCBCtl)
+{
+ IMG_UINT32 ui32OldWriteOffset, ui32NextWriteOffset;
+
+ ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+ ui32NextWriteOffset = (ui32OldWriteOffset + 1) & psKCCBCtl->ui32WrapMask;
+
+ /*
+ * Note: The MTS can queue up to 255 kicks (254 pending kicks and 1
+ * executing kick), hence the kernel CCB should not queue more than
+ * 254 commands.
+ */
+ PVR_ASSERT(psKCCBCtl->ui32WrapMask < 255);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+
+ if (ui32NextWriteOffset != psKCCBCtl->ui32ReadOffset)
+ {
+ return PVRSRV_OK;
+ }
+ {
+ /*
+ * The following sanity check doesn't impact performance,
+ * since the CPU has to wait for the GPU anyway (full kernel CCB).
+ */
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+ }
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+}
+
+static PVRSRV_ERROR RGXSendCommandRaw(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 uiPdumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT8 *pui8KCCB = psDevInfo->psKernelCCB;
+ IMG_UINT32 ui32NewWriteOffset;
+ IMG_UINT32 ui32OldWriteOffset = psKCCBCtl->ui32WriteOffset;
+
+#if !defined(PDUMP)
+ PVR_UNREFERENCED_PARAMETER(uiPdumpFlags);
+#else
+ IMG_BOOL bIsInCaptureRange;
+ IMG_BOOL bFirstCommandInBlock;
+ IMG_UINT32 ui32CurrentBlock;
+ IMG_BOOL bPDumpCapturing;
+ IMG_BOOL bPDumpPowTrans = PDUMPPOWCMDINTRANS();
+
+ PDumpIsCaptureFrameKM(&bIsInCaptureRange);
+ bPDumpCapturing = (bIsInCaptureRange || PDUMP_IS_CONTINUOUS(uiPdumpFlags)) && !bPDumpPowTrans;
+
+ PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+ /* In non-block-mode of pdump, ui32CurrentBlock will always be PDUMP_BLOCKNUM_INVALID */
+
+ /* Is this first command in new pdump-block? except first (ui32CurrentBlock == 0) block */
+ bFirstCommandInBlock = ((ui32CurrentBlock != PDUMP_BLOCKNUM_INVALID) && (psDevInfo->ui32LastBlockKCCBCtrlDumped != ui32CurrentBlock) && (ui32CurrentBlock > 0));
+
+ /* Drain KCCB before starting FIRST command in NEW pdump-block */
+ if(bFirstCommandInBlock)
+ {
+ /* If power transition has happened in last command, no need to drain KCCB once again */
+ if((ui32CurrentBlock == 1) && (psDevInfo->bDumpedKCCBCtlAlready))
+ {
+ /* As we are keeping first (ui32CurrentBlock == 0) pdump-block only, drain KCCB at start
+ * of second (ui32CurrentBlock == 1) pdump-block alone. This will synchronise
+ * script-thread and sim-FW thread at end of first pdump-block at playback time.
+ * */
+
+ /* Force the sim-FW to drain the KCCB to catch-up with live positions */
+ eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset, PDUMP_FLAGS_BLKDATA);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandRaw: Problem draining kCCB (%d) in block-mode", eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+ }
+ else
+ {
+ /* We already synced script-thread and sim-FW thread above after first pdump-block.
+ * As we are discarding all other intermediate pdump-blocks, we can skip synchronisation
+ * in all other pdump-blocks and start with last pdump-block directly at playback.
+ * */
+
+ /* Always force live-FW thread and driver-thread to synchronise before re-dumping
+ * KCCBCtrl read write offsets at start of each pdump-block
+ **/
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+ }
+
+ /* Copy Last block-count where we drained and re-dumped the KCCBCtrl */
+ psDevInfo->ui32LastBlockKCCBCtrlDumped = ui32CurrentBlock;
+
+ }
+
+ /* in capture range */
+ if (bPDumpCapturing)
+ {
+ if (!psDevInfo->bDumpedKCCBCtlAlready)
+ {
+ /* entering capture range */
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_TRUE;
+
+ /* Wait for the live FW to catch up */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXSendCommandRaw: waiting on fw to catch-up, roff: %d, woff: %d",
+ psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset));
+ PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psKCCBCtl->ui32ReadOffset, ui32OldWriteOffset, 0xFFFFFFFF);
+
+ /* Dump Init state of Kernel CCB control (read and write offset) */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Initial state of kernel CCB Control, roff: %d, woff: %d",
+ psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset);
+
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+ 0,
+ sizeof(RGXFWIF_CCB_CTL),
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+#endif
+
+ psKCCBCmd->eDM = eKCCBType;
+
+ PVR_ASSERT(ui32CmdSize == psKCCBCtl->ui32CmdSize);
+ if (!OSLockIsLocked(psDeviceNode->hPowerLock))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandRaw called without power lock held!"));
+ PVR_ASSERT(OSLockIsLocked(psDeviceNode->hPowerLock));
+ }
+
+ /*
+ * Acquire a slot in the CCB.
+ */
+ eError = RGXAcquireKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psKCCBCtl, &ui32NewWriteOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto _RGXSendCommandRaw_Exit;
+ }
+
+ /*
+ * Copy the command into the CCB.
+ */
+ OSDeviceMemCopy(&pui8KCCB[ui32OldWriteOffset * psKCCBCtl->ui32CmdSize],
+ psKCCBCmd, psKCCBCtl->ui32CmdSize);
+
+ /* ensure kCCB data is written before the offsets */
+ OSWriteMemoryBarrier();
+
+ /* Move past the current command */
+ psKCCBCtl->ui32WriteOffset = ui32NewWriteOffset;
+ /* Force a read-back to memory to avoid posted writes on certain buses */
+ (void) psKCCBCtl->ui32WriteOffset;
+
+
+#if defined(PDUMP)
+ /* in capture range */
+ if (bPDumpCapturing)
+ {
+ /* Dump new Kernel CCB content */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump kCCB cmd for DM %d, woff = %d",
+ eKCCBType,
+ ui32OldWriteOffset);
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBMemDesc,
+ ui32OldWriteOffset * psKCCBCtl->ui32CmdSize,
+ psKCCBCtl->ui32CmdSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Dump new kernel CCB write offset */
+ PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "Dump kCCBCtl woff (added new cmd for DM %d): %d",
+ eKCCBType,
+ ui32NewWriteOffset);
+ DevmemPDumpLoadMem(psDevInfo->psKernelCCBCtlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+ sizeof(IMG_UINT32),
+ uiPdumpFlags);
+
+ /* mimic the read-back of the write from above */
+ DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32WriteOffset),
+ ui32NewWriteOffset,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ uiPdumpFlags);
+
+ }
+
+ /* out of capture range */
+ if (!bPDumpCapturing)
+ {
+ eError = RGXPdumpDrainKCCB(psDevInfo, ui32OldWriteOffset, PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandRaw: problem draining kCCB (%d)", eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+ }
+#endif
+
+
+ PDUMPCOMMENTWITHFLAGS(uiPdumpFlags, "MTS kick for kernel CCB");
+ /*
+ * Kick the MTS to schedule the firmware.
+ */
+ {
+ IMG_UINT32 ui32MTSRegVal;
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+ !(RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)))
+ {
+#if defined(SUPPORT_STRIP_RENDERING)
+ ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+#else
+ ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+#endif
+ }
+ else
+ {
+#if defined(SUPPORT_STRIP_RENDERING)
+ ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK);
+#else
+ ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_COUNTED;
+#endif
+ }
+
+
+ __MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+
+ PDUMPREG32(RGX_PDUMPREG_NAME, RGX_CR_MTS_SCHEDULE, ui32MTSRegVal, uiPdumpFlags);
+ }
+
+#if defined (NO_HARDWARE)
+ /* keep the roff updated because fw isn't there to update it */
+ psKCCBCtl->ui32ReadOffset = psKCCBCtl->ui32WriteOffset;
+#endif
+
+ _RGXSendCommandRaw_Exit:
+ return eError;
+}
+
+/******************************************************************************
+ FUNCTION : _AllocDeferredCommand
+
+ PURPOSE : Allocate a KCCB command and add it to KCCB deferred list
+
+ PARAMETERS : psDevInfo RGX device info
+ : eKCCBType Firmware Command type
+ : psKCCBCmd Firmware Command
+ : uiPdumpFlags Pdump flags
+
+ RETURNS : PVRSRV_OK If all went good, PVRSRV_ERROR_RETRY otherwise.
+ ******************************************************************************/
+static PVRSRV_ERROR _AllocDeferredCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 uiPdumpFlags)
+{
+ RGX_DEFERRED_KCCB_CMD *psDeferredCommand;
+
+ psDeferredCommand = OSAllocMem(sizeof(*psDeferredCommand));
+
+ if (!psDeferredCommand)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Deferring a KCCB command failed: allocation failure: requesting retry"));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ psDeferredCommand->sKCCBcmd = *psKCCBCmd;
+ psDeferredCommand->eDM = eKCCBType;
+ psDeferredCommand->uiPdumpFlags = uiPdumpFlags;
+ psDeferredCommand->psDevInfo = psDevInfo;
+
+ if (eKCCBType == RGXFWIF_DM_GP)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Deferring a KCCB GP command. This is not expected"));
+ }
+ OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+ dllist_add_to_tail(&(psDevInfo->sKCCBDeferredCommandsListHead), &(psDeferredCommand->sListNode));
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ FUNCTION : _FreeDeferredCommand
+
+ PURPOSE : Remove from the deferred list the sent deferred KCCB command
+
+ PARAMETERS : psNode Node in deferred list
+ : psDeferredKCCBCmd KCCB Command to free
+
+ RETURNS : None
+ ******************************************************************************/
+static void _FreeDeferredCommand(DLLIST_NODE *psNode, RGX_DEFERRED_KCCB_CMD *psDeferredKCCBCmd)
+{
+ dllist_remove_node(psNode);
+ OSFreeMem(psDeferredKCCBCmd);
+}
+
+/******************************************************************************
+ FUNCTION : RGXSendCommandsFromDeferredList
+
+ PURPOSE : Try send KCCB commands in deferred list to KCCB
+
+ PARAMETERS : psDevInfo RGX device info
+ : bPoll Poll for space in KCCB
+
+ RETURNS : PVRSRV_OK If all commands in deferred list are sent to KCCB,
+ PVRSRV_ERROR_KERNEL_CCB_FULL otherwise.
+ ******************************************************************************/
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DLLIST_NODE *psNode, *psNext;
+ RGX_DEFERRED_KCCB_CMD *psTempDeferredKCCBCmd;
+
+ OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+ {
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+ return PVRSRV_OK;
+ }
+
+ /* For every deferred KCCB command, try to send it*/
+ dllist_foreach_node(&psDevInfo->sKCCBDeferredCommandsListHead, psNode, psNext)
+ {
+ psTempDeferredKCCBCmd = IMG_CONTAINER_OF(psNode, RGX_DEFERRED_KCCB_CMD, sListNode);
+ eError = RGXSendCommandRaw(psTempDeferredKCCBCmd->psDevInfo,
+ psTempDeferredKCCBCmd->eDM,
+ &(psTempDeferredKCCBCmd->sKCCBcmd),
+ sizeof(psTempDeferredKCCBCmd->sKCCBcmd),
+ psTempDeferredKCCBCmd->uiPdumpFlags);
+ if (eError == PVRSRV_OK)
+ {
+ _FreeDeferredCommand(psNode, psTempDeferredKCCBCmd);
+ }
+ else
+ {
+ if (bPoll)
+ {
+ break;
+ }
+ else
+ {
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+ }
+ }
+ }
+
+ if (bPoll)
+ {
+ eError = RGXPollKernelCCBSlot(psDevInfo->psKernelCCBCtlMemDesc, psDevInfo->psKernelCCBCtl);
+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+ {
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+ return PVRSRV_ERROR_KERNEL_CCB_FULL;
+ }
+ }
+
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 uiPdumpFlags)
+{
+
+ PVRSRV_ERROR eError;
+ IMG_BOOL bPoll = IMG_FALSE;
+
+ if (eKCCBType == RGXFWIF_DM_GP)
+ {
+ /* Do not defer GP cmds as server will poll for its completion anyway */
+ bPoll = IMG_TRUE;
+ }
+
+ /* First try to Flush all the cmds in deferred list */
+ eError = RGXSendCommandsFromDeferredList(psDevInfo, bPoll);
+ if (eError == PVRSRV_OK)
+ {
+ eError = RGXSendCommandRaw(psDevInfo,
+ eKCCBType,
+ psKCCBCmd,
+ ui32CmdSize,
+ uiPdumpFlags);
+ }
+ /*
+ * If we don't manage to enqueue one of the deferred commands or the command
+ * passed as argument because the KCCB is full, insert the latter into the deferred commands list.
+ * The deferred commands will also be flushed eventually by:
+ * - one more KCCB command sent for any DM
+ * - RGX_MISRHandler_CheckFWActivePowerState
+ */
+ if (eError == PVRSRV_ERROR_KERNEL_CCB_FULL)
+ {
+ eError = _AllocDeferredCommand(psDevInfo, eKCCBType, psKCCBCmd, uiPdumpFlags);
+ }
+ return eError;
+}
+
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+
+ /* Ensure Rogue is powered up before kicking MTS */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to acquire powerlock (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXSendCommandWithPowLock: failed to transition Rogue to ON (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ eError = RGXSendCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSendCommandWithPowLock: failed to schedule command (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+#if defined(DEBUG)
+ /* PVRSRVDebugRequest must be called without powerlock */
+ PVRSRVPowerUnlock(psDeviceNode);
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ goto _PVRSRVPowerLock_Exit;
+#endif
+ }
+
+ _PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ _PVRSRVPowerLock_Exit:
+ return eError;
+}
+
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE*) hCmdCompHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSScheduleMISR(psDevInfo->hProcessQueuesMISR);
+}
+
+/*!
+ ******************************************************************************
+
+ @Function RGX_MISRHandler_ScheduleProcessQueues
+
+ @Description - Sends uncounted kick to all the DMs (the FW will process all
+ the queue for all the DMs)
+ ******************************************************************************/
+static void RGX_MISRHandler_ScheduleProcessQueues(void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+
+ /* We don't need to acquire the BridgeLock as this power transition won't
+ send a command to the FW */
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to acquire powerlock (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ return;
+ }
+
+ /* Check whether it's worth waking up the GPU */
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) &&
+ (eError == PVRSRV_OK) && (ePowerState == PVRSRV_DEV_POWER_STATE_OFF))
+ {
+ /* For now, guest drivers will always wake-up the GPU */
+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_BOOL bGPUHasWorkWaiting;
+
+ bGPUHasWorkWaiting =
+ (RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord) == RGXFWIF_GPU_UTIL_STATE_BLOCKED);
+
+ if (!bGPUHasWorkWaiting)
+ {
+ /* all queues are empty, don't wake up the GPU */
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+ }
+
+ PDUMPPOWCMDSTART();
+ /* wake up the GPU */
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXScheduleProcessQueuesKM: failed to transition Rogue to ON (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+
+ /* uncounted kick to the FW */
+ {
+ IMG_UINT32 ui32MTSRegVal;
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) &&
+ !(RGX_IS_FEATURE_SUPPORTED(psDevInfo, GPU_VIRTUALISATION)))
+ {
+ ui32MTSRegVal = ((RGXFWIF_DM_GP + PVRSRV_VZ_DRIVER_OSID) & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+ }
+ else
+ {
+ ui32MTSRegVal = (RGXFWIF_DM_GP & ~RGX_CR_MTS_SCHEDULE_DM_CLRMSK) | RGX_CR_MTS_SCHEDULE_TASK_NON_COUNTED;
+ }
+
+ HTBLOGK(HTB_SF_MAIN_KICK_UNCOUNTED);
+ __MTSScheduleWrite(psDevInfo, ui32MTSRegVal);
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ return OSInstallMISR(phMISR,
+ RGX_MISRHandler_ScheduleProcessQueues,
+ psDeviceNode);
+}
+
+/*!
+ ******************************************************************************
+
+ @Function RGXScheduleCommand
+
+ @Description - Submits a CCB command and kicks the firmware but first schedules
+ any commands which have to happen before handle
+
+ @Input psDevInfo - pointer to device info
+ @Input eKCCBType - see RGXFWIF_CMD_*
+ @Input psKCCBCmd - kernel CCB command
+ @Input ui32CmdSize - kernel CCB SIZE
+ @Input ui32CacheOpFence - CPU dcache operation fence
+ @Input ui32PDumpFlags - PDUMP_FLAGS_CONTINUOUS bit set if the pdump flags should be continuous
+
+
+ @Return PVRSRV_ERROR
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32CacheOpFence,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT16 uiMMUSyncUpdate;
+
+ eError = CacheOpFence(eKCCBType, ui32CacheOpFence);
+ if (eError != PVRSRV_OK) goto RGXScheduleCommand_exit;
+
+#if defined (SUPPORT_VALIDATION)
+ /* For validation, force the core to different dust count states with each kick */
+ if ((eKCCBType == RGXFWIF_DM_TA) || (eKCCBType == RGXFWIF_DM_CDM))
+ {
+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+ {
+ IMG_UINT32 ui32NumDusts = RGXGetNextDustCount(&psDevInfo->sDustReqState, psDevInfo->sDevFeatureCfg.ui32MAXDustCount);
+ PVRSRVDeviceDustCountChange(psDevInfo->psDeviceNode, ui32NumDusts);
+ }
+ }
+#endif
+
+ /* PVRSRVPowerLock guarantees atomicity between commands. This is helpful
+ in a scenario with several applications allocating resources. */
+ eError = PVRSRVPowerLock(psDevInfo->psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto RGXScheduleCommand_exit;
+ }
+
+ /* Ensure device is powered up before sending any commands */
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDevInfo->psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ eError = RGXPreKickCacheCommand(psDevInfo, eKCCBType, &uiMMUSyncUpdate, IMG_FALSE);
+ if (eError != PVRSRV_OK) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+ eError = RGXSendCommand(psDevInfo, eKCCBType, psKCCBCmd, ui32CmdSize, ui32PDumpFlags);
+ if (eError != PVRSRV_OK) goto _PVRSRVSetDevicePowerStateKM_Exit;
+
+ _PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDevInfo->psDeviceNode);
+
+ RGXScheduleCommand_exit:
+ return eError;
+}
+
+/*
+ * RGXCheckFirmwareCCB
+ */
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_FWCCB_CMD *psFwCCBCmd;
+
+ RGXFWIF_CCB_CTL *psFWCCBCtl = psDevInfo->psFirmwareCCBCtl;
+ IMG_UINT8 *psFWCCB = psDevInfo->psFirmwareCCB;
+
+ while (psFWCCBCtl->ui32ReadOffset != psFWCCBCtl->ui32WriteOffset)
+ {
+ /* Point to the next command */
+ psFwCCBCmd = ((RGXFWIF_FWCCB_CMD *)psFWCCB) + psFWCCBCtl->ui32ReadOffset;
+
+ HTBLOGK(HTB_SF_MAIN_FWCCB_CMD, psFwCCBCmd->eCmdType);
+ switch(psFwCCBCmd->eCmdType)
+ {
+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_BACKING:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(ZSBUFFER_BACKING, "Request to add backing to ZSBuffer");
+ }
+ RGXProcessRequestZSBufferBacking(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_ZSBUFFER_UNBACKING:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(ZSBUFFER_UNBACKING, "Request to remove backing from ZSBuffer");
+ }
+ RGXProcessRequestZSBufferUnbacking(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdZSBufferBacking.ui32ZSBufferID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_FREELIST_GROW:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELIST_GROW, "Request to grow the free list");
+ }
+ RGXProcessRequestGrow(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_FREELISTS_RECONSTRUCTION:
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELISTS_RECONSTRUCTION, "Request to reconstruct free lists");
+ }
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d) for %d freelists",
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Freelist reconstruction request (%d/%d) for %d freelists",
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32HwrCounter+1,
+ psDevInfo->psRGXFWIfTraceBuf->ui32HwrCounter+1,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount));
+ }
+
+ RGXProcessRequestFreelistsReconstruction(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.ui32FreelistsCount,
+ psFwCCBCmd->uCmdData.sCmdFreeListsReconstruction.aui32FreelistIDs);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_DOPPLER_MEMORY_GROW:
+ {
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ if (psDevInfo->bPDPEnabled)
+ {
+ PDUMP_PANIC(FREELIST_GROW, "Request to grow the RPM free list");
+ }
+ RGXProcessRequestRPMGrow(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdFreeListGS.ui32FreelistID);
+ }
+#endif
+ /* This is a partial FWCCB command, read partial command type.
+ * All partial commands start with this same information. */
+ RGXFWIF_FWCCB_CMD_PARTIAL_CONTEXT_RESET_DATA *psPartialCmd =
+ &psFwCCBCmd->uCmdData.sCmdPartialContextResetNotification;
+ IMG_UINT32 ui32PartialCmdType = psPartialCmd->ui32PartialCmdType;
+
+ /* Check that the Host expected this command */
+ if ((psDevInfo->ui32ExpectedPartialFWCCBCmd == RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_NONE) ||
+ (psDevInfo->ui32ExpectedPartialFWCCBCmd != ui32PartialCmdType))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Unexpected partial command: expected %u, found %u",
+ __func__, psDevInfo->ui32ExpectedPartialFWCCBCmd, ui32PartialCmdType));
+ }
+ else if (ui32PartialCmdType == RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_CONTEXT_RESET_DATA)
+ {
+ /* Complete processing of the context reset notification command */
+ DevmemIntPFNotify(psDevInfo->psDeviceNode,
+ psDevInfo->psDeviceNode->ui64ContextResetPCAddress,
+ psPartialCmd->sFaultAddress);
+
+ psDevInfo->psDeviceNode->ui64ContextResetPCAddress = 0ULL;
+ }
+
+ psDevInfo->ui32ExpectedPartialFWCCBCmd = RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_NONE;
+
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_CONTEXT_RESET_NOTIFICATION:
+ {
+ DLLIST_NODE *psNode, *psNext;
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_DATA *psCmdContextResetNotification =
+ &psFwCCBCmd->uCmdData.sCmdContextResetNotification;
+ IMG_UINT32 ui32ServerCommonContextID =
+ psCmdContextResetNotification->ui32ServerCommonContextID;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext = NULL;
+
+ OSWRLockAcquireRead(psDevInfo->hCommonCtxtListLock);
+ dllist_foreach_node(&psDevInfo->sCommonCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_COMMON_CONTEXT *psThisContext =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_COMMON_CONTEXT, sListNode);
+
+ if (psThisContext->ui32ContextID == ui32ServerCommonContextID)
+ {
+ psServerCommonContext = psThisContext;
+ break;
+ }
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXCheckFirmwareCCBs: Context 0x%p reset (ID=0x%08x, Reason=%d, JobRef=0x%08x)",
+ psServerCommonContext,
+ psCmdContextResetNotification->ui32ServerCommonContextID,
+ (IMG_UINT32)(psCmdContextResetNotification->eResetReason),
+ psCmdContextResetNotification->ui32ResetJobRef));
+
+ if (psServerCommonContext != NULL)
+ {
+ psServerCommonContext->eLastResetReason = psCmdContextResetNotification->eResetReason;
+ psServerCommonContext->ui32LastResetJobRef = psCmdContextResetNotification->ui32ResetJobRef;
+ }
+ OSWRLockReleaseRead(psDevInfo->hCommonCtxtListLock);
+
+ if (psCmdContextResetNotification->bPageFault)
+ {
+ if (psCmdContextResetNotification->bPageFault &
+ RGXFWIF_FWCCB_CMD_CONTEXT_RESET_PAGE_FAULT_ADDRESS_FLAG)
+ {
+ /* Delay notification until the last part of this command has been received */
+ psDevInfo->psDeviceNode->ui64ContextResetPCAddress =
+ psCmdContextResetNotification->ui64PCAddress;
+
+ psDevInfo->ui32ExpectedPartialFWCCBCmd = RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_CONTEXT_RESET_DATA;
+ }
+ else
+ {
+ IMG_DEV_VIRTADDR sTmpAddr = {0};
+
+ /* Older FW therefore we can't expect more information. Proceed as usual. */
+ DevmemIntPFNotify(psDevInfo->psDeviceNode,
+ psCmdContextResetNotification->ui64PCAddress,
+ sTmpAddr);
+ }
+ }
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_DEBUG_DUMP:
+ {
+ RGXDumpDebugInfo(NULL,NULL,psDevInfo);
+ /* Notify the OS of an issue that triggered a debug dump */
+ OSWarnOn(IMG_TRUE);
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_UPDATE_STATS:
+ {
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ IMG_PID pidTmp = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.pidOwner;
+ IMG_INT32 i32AdjustmentValue = psFwCCBCmd->uCmdData.sCmdUpdateStatsData.i32AdjustmentValue;
+
+ switch (psFwCCBCmd->uCmdData.sCmdUpdateStatsData.eElementToUpdate)
+ {
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_PARTIAL_RENDERS:
+ {
+ PVRSRVStatsUpdateRenderContextStats(i32AdjustmentValue,0,0,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_OUT_OF_MEMORY:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,i32AdjustmentValue,0,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_TA_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,i32AdjustmentValue,0,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_3D_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,i32AdjustmentValue,0,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_SH_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,i32AdjustmentValue,0,pidTmp);
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_UPDATE_NUM_CDM_STORES:
+ {
+ PVRSRVStatsUpdateRenderContextStats(0,0,0,0,0,i32AdjustmentValue,pidTmp);
+ break;
+ }
+ }
+#endif
+ break;
+ }
+ case RGXFWIF_FWCCB_CMD_CORE_CLK_RATE_CHANGE:
+ {
+#if defined(SUPPORT_PDVFS)
+ PDVFSProcessCoreClkRateChange(psDevInfo,
+ psFwCCBCmd->uCmdData.sCmdCoreClkRateChange.ui32CoreClkRate);
+#endif
+ break;
+ }
+
+ case RGXFWIF_FWCCB_CMD_REQUEST_GPU_RESTART:
+ {
+ if (psDevInfo->psRGXFWIfTraceBuf != NULL &&
+ psDevInfo->psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+ {
+ PVRSRV_ERROR eError;
+
+ /* Power down... */
+ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_OFF);
+ if (eError == PVRSRV_OK)
+ {
+ /* Clear the FW faulted flags... */
+ psDevInfo->psRGXFWIfTraceBuf->ui32HWRStateFlags &= ~(RGXFWIF_HWR_FW_FAULT|RGXFWIF_HWR_RESTART_REQUESTED);
+
+ /* Power back up again... */
+ eError = PVRSRVSetDeviceSystemPowerState(psDevInfo->psDeviceNode,
+ PVRSRV_SYS_POWER_STATE_ON);
+
+ /* Send a dummy KCCB command to ensure the FW wakes up and checks the queues... */
+ if (eError == PVRSRV_OK)
+ {
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sCmpKCCBCmd,
+ sizeof(sCmpKCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ }
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed firmware restart (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+ }
+ break;
+ }
+
+ default:
+ {
+ /* unknown command */
+ PVR_DPF((PVR_DBG_WARNING, "RGXCheckFirmwareCCB: Unknown Command (eCmdType=0x%08x)", psFwCCBCmd->eCmdType));
+ /* Assert on magic value corruption */
+ PVR_ASSERT((((IMG_UINT32)psFwCCBCmd->eCmdType & RGX_CMD_MAGIC_DWORD_MASK) >> RGX_CMD_MAGIC_DWORD_SHIFT) == RGX_CMD_MAGIC_DWORD);
+ }
+ }
+
+ /* Update read offset */
+ psFWCCBCtl->ui32ReadOffset = (psFWCCBCtl->ui32ReadOffset + 1) & psFWCCBCtl->ui32WrapMask;
+ }
+}
+
+/*
+ * PVRSRVRGXFrameworkCopyCommand
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc,
+ IMG_PBYTE pbyGPUFRegisterList,
+ IMG_UINT32 ui32FrameworkRegisterSize)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_RF_REGISTERS *psRFReg;
+
+ eError = DevmemAcquireCpuVirtAddr(psFWFrameworkMemDesc,
+ (void **)&psRFReg);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkCopyCommand: Failed to map firmware render context state (%u)",
+ eError));
+ return eError;
+ }
+
+ OSDeviceMemCopy(psRFReg, pbyGPUFRegisterList, ui32FrameworkRegisterSize);
+
+ /* Release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psFWFrameworkMemDesc);
+
+ /*
+ * Dump the FW framework buffer
+ */
+ PDUMPCOMMENT("Dump FWFramework buffer");
+ DevmemPDumpLoadMem(psFWFrameworkMemDesc, 0, ui32FrameworkRegisterSize, PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXFrameworkCreateKM
+ */
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC **ppsFWFrameworkMemDesc,
+ IMG_UINT32 ui32FrameworkCommandSize)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /*
+ Allocate device memory for the firmware GPU framework state.
+ Sufficient info to kick one or more DMs should be contained in this buffer
+ */
+ PDUMPCOMMENT("Allocate Rogue firmware framework state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ ui32FrameworkCommandSize,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwGPUFrameworkState",
+ ppsFWFrameworkMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXFrameworkContextKM: Failed to allocate firmware framework state (%u)",
+ eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_KCCB_CMD sCmdSyncPrim;
+
+ /* Ensure Rogue is powered up before kicking MTS */
+ eError = PVRSRVPowerLock(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to acquire powerlock (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVPowerLock_Exit;
+ }
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to transition Rogue to ON (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ /* Setup sync primitive */
+ eError = SyncPrimSet(psSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set SyncPrim (%u)",
+ __FUNCTION__, eError));
+ goto _SyncPrimSet_Exit;
+ }
+
+ /* prepare a sync command */
+ eError = SyncPrimGetFirmwareAddr(psSyncPrim,
+ &sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to get SyncPrim FW address(%u)",
+ __FUNCTION__, eError));
+ goto _SyncPrimGetFirmwareAddr_Exit;
+ }
+ sCmdSyncPrim.eCmdType = RGXFWIF_KCCB_CMD_SYNC;
+ sCmdSyncPrim.uCmdData.sSyncData.uiUpdateVal = 1;
+
+ PDUMPCOMMENT("RGXWaitForFWOp: Submit Kernel SyncPrim [0x%08x] to DM %d",
+ sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+ /* submit the sync primitive to the kernel CCB */
+ eError = RGXSendCommand(psDevInfo,
+ eDM,
+ &sCmdSyncPrim,
+ sizeof(RGXFWIF_KCCB_CMD),
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to schedule Kernel SyncPrim with error (%u)",
+ __FUNCTION__,
+ eError));
+ goto _RGXSendCommandRaw_Exit;
+ }
+
+ /* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXScheduleCommandAndWait: Poll for Kernel SyncPrim [0x%08x] on DM %d",
+ sCmdSyncPrim.uCmdData.sSyncData.sSyncObjDevVAddr.ui32Addr, eDM);
+
+ SyncPrimPDumpPol(psSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+#endif
+
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT32 ui32CurrentQueueLength =
+ (psKCCBCtl->ui32WrapMask+1 +
+ psKCCBCtl->ui32WriteOffset -
+ psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+ IMG_UINT32 ui32MaxRetries;
+
+ for (ui32MaxRetries = (ui32CurrentQueueLength + 1) * 3;
+ ui32MaxRetries > 0;
+ ui32MaxRetries--)
+ {
+ eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, 1, 0xffffffff);
+
+ if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ break;
+ }
+ }
+
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information.",
+ __FUNCTION__));
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ PVR_ASSERT(eError != PVRSRV_ERROR_TIMEOUT);
+ goto _PVRSRVDebugRequest_Exit;
+ }
+ }
+
+ _RGXSendCommandRaw_Exit:
+ _SyncPrimGetFirmwareAddr_Exit:
+ _SyncPrimSet_Exit:
+ _PVRSRVSetDevicePowerStateKM_Exit:
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ _PVRSRVDebugRequest_Exit:
+ _PVRSRVPowerLock_Exit:
+ return eError;
+}
+
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_UINT32 *pui32ConfigState,
+ IMG_BOOL bSetNotClear)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ RGXFWIF_KCCB_CMD sStateFlagCmd;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGXFWIF_OS_CONFIG *psOSConfig;
+
+ if (!psDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psDeviceNode = psDevInfo->psDeviceNode;
+ psOSConfig = psDevInfo->psFWIfOSConfig;
+
+ if (NULL == psOSConfig)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: OS Config is not mapped into CPU space", __func__));
+ return PVRSRV_ERROR_INVALID_CPU_ADDR;
+ }
+
+ /* apply change and ensure the new data is written to memory
+ * before requesting the FW to read it
+ */
+ ui32Config = ui32Config & RGXFWIF_INICFG_ALL;
+ if (bSetNotClear)
+ {
+ psOSConfig->ui32ConfigFlags |= ui32Config;
+ }
+ else
+ {
+ psOSConfig->ui32ConfigFlags &= ~ui32Config;
+ }
+
+ /* return current/new value to caller */
+ if (pui32ConfigState)
+ {
+ *pui32ConfigState = psOSConfig->ui32ConfigFlags;
+ }
+
+ OSMemoryBarrier();
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire power lock (%u)", __func__, eError));
+ goto error_lock;
+ }
+
+ /* notify FW to update setting */
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+ {
+ /* Ask the FW to update its cached version of the value */
+ sStateFlagCmd.eCmdType = RGXFWIF_KCCB_CMD_STATEFLAGS_CTRL;
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sStateFlagCmd,
+ sizeof(sStateFlagCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: RGXSendCommand failed. Error:%u", __func__, eError));
+ goto error_cmd;
+ }
+ else
+ {
+ /* Give up the power lock as its acquired in RGXWaitForFWOp */
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ /* Wait for the value to be updated as the FW validates
+ * the parameters and modifies the ui32ConfigFlags
+ * accordingly
+ * (for completeness as registered callbacks should also
+ * not permit invalid transitions)
+ */
+ eError = RGXWaitForFWOp(psDevInfo, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Waiting for value aborted with error (%u)", __func__, eError));
+ }
+ goto error_lock;
+ }
+ }
+
+ error_cmd:
+ PVRSRVPowerUnlock(psDeviceNode);
+ error_lock:
+ return eError;
+}
+
+static
+PVRSRV_ERROR RGXScheduleCleanupCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ RGXFWIF_CLEANUP_TYPE eCleanupType,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ psKCCBCmd->eCmdType = RGXFWIF_KCCB_CMD_CLEANUP;
+
+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType = eCleanupType;
+ eError = SyncPrimGetFirmwareAddr(psSyncPrim, &psKCCBCmd->uCmdData.sCleanupData.sSyncObjDevVAddr.ui32Addr);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ eError = SyncPrimSet(psSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ /*
+ Send the cleanup request to the firmware. If the resource is still busy
+ the firmware will tell us and we'll drop out with a retry.
+ */
+ eError = RGXScheduleCommand(psDevInfo,
+ eDM,
+ psKCCBCmd,
+ ui32CmdSize,
+ 0,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_command;
+ }
+
+ /* Wait for sync primitive to be updated */
+#if defined(PDUMP)
+ PDUMPCOMMENT("Wait for the firmware to reply to the cleanup command");
+ SyncPrimPDumpPol(psSyncPrim,
+ RGXFWIF_CLEANUP_RUN,
+ RGXFWIF_CLEANUP_RUN,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+
+ /*
+ * The cleanup request to the firmware will tell us if a given resource is busy or not.
+ * If the RGXFWIF_CLEANUP_BUSY flag is set, this means that the resource is still in use.
+ * In this case we return a PVRSRV_ERROR_RETRY error to the client drivers and they will
+ * re-issue the cleanup request until it succeed.
+ *
+ * Since this retry mechanism doesn't work for pdumps, client drivers should ensure
+ * that cleanup requests are only submitted if the resource is unused.
+ * If this is not the case, the following poll will block infinitely, making sure
+ * the issue doesn't go unnoticed.
+ */
+ PDUMPCOMMENT("Cleanup: If this poll fails, the following resource is still in use (DM=%u, type=%u, address=0x%08x), which is incorrect in pdumps",
+ eDM,
+ psKCCBCmd->uCmdData.sCleanupData.eCleanupType,
+ psKCCBCmd->uCmdData.sCleanupData.uCleanupData.psContext.ui32Addr);
+ SyncPrimPDumpPol(psSyncPrim,
+ 0,
+ RGXFWIF_CLEANUP_BUSY,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+#endif
+
+ {
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ IMG_UINT32 ui32CurrentQueueLength = (psKCCBCtl->ui32WrapMask+1 +
+ psKCCBCtl->ui32WriteOffset -
+ psKCCBCtl->ui32ReadOffset) & psKCCBCtl->ui32WrapMask;
+ IMG_UINT32 ui32MaxRetries;
+
+ for (ui32MaxRetries = ui32CurrentQueueLength + 1;
+ ui32MaxRetries > 0;
+ ui32MaxRetries--)
+ {
+ eError = PVRSRVWaitForValueKMAndHoldBridgeLockKM(psSyncPrim->pui32LinAddr, RGXFWIF_CLEANUP_RUN, RGXFWIF_CLEANUP_RUN);
+
+ if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ break;
+ }
+ }
+
+ /*
+ If the firmware hasn't got back to us in a timely manner
+ then bail and let the caller retry the command.
+ */
+ if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"RGXScheduleCleanupCommand: PVRSRVWaitForValueKMAndHoldBridgeLock timed out. Dump debug information."));
+
+ eError = PVRSRV_ERROR_RETRY;
+#if defined(DEBUG)
+ PVRSRVDebugRequest(psDevInfo->psDeviceNode,
+ DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+#endif
+ goto fail_poll;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ goto fail_poll;
+ }
+ }
+
+ /*
+ If the command has was run but a resource was busy, then the request
+ will need to be retried.
+ */
+ if (OSReadDeviceMem32(psSyncPrim->pui32LinAddr) & RGXFWIF_CLEANUP_BUSY)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ goto fail_requestbusy;
+ }
+
+ return PVRSRV_OK;
+
+ fail_requestbusy:
+ fail_poll:
+ fail_command:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ RGXRequestCommonContextCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_KCCB_CMD sRCCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+ PRGXFWIF_FWCOMMONCONTEXT psFWCommonContextFWAddr;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+
+ /* Force retry if this context's CCB is currently being dumped
+ * as part of the stalled CCB debug */
+ if (psDevInfo->pvEarliestStalledClientCCB == (void*)psServerCommonContext->psClientCCB)
+ {
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ psFWCommonContextFWAddr = FWCommonContextGetFWAddress(psServerCommonContext);
+
+ PDUMPCOMMENT("Common ctx cleanup Request DM%d [context = 0x%08x]",
+ eDM, psFWCommonContextFWAddr.ui32Addr);
+ PDUMPCOMMENT("Wait for CCB to be empty before common ctx cleanup");
+
+ RGXCCBPDumpDrainCCB(FWCommonContextGetClientCCB(psServerCommonContext), ui32PDumpFlags);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sRCCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psContext = psFWCommonContextFWAddr;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sRCCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_FWCOMMONCONTEXT,
+ psSyncPrim,
+ ui32PDumpFlags);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRequestCommonContextCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ * RGXFWRequestHWRTDataCleanUp
+ */
+
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_HWRTDATA psHWRTData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM)
+{
+ RGXFWIF_KCCB_CMD sHWRTDataCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("HW RTData cleanup Request DM%d [HWRTData = 0x%08x]", eDM, psHWRTData.ui32Addr);
+
+ sHWRTDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWRTData = psHWRTData;
+
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sHWRTDataCleanUpCmd,
+ sizeof(sHWRTDataCleanUpCmd),
+ RGXFWIF_CLEANUP_HWRTDATA,
+ psSync,
+ PDUMP_FLAGS_NONE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestHWRTDataCleanUp: Failed to schedule a HWRTData cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestFreeListCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_FREELIST psFWFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("Free list cleanup Request [FreeList = 0x%08x]", psFWFreeList.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psFreelist = psFWFreeList;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sFLCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_FREELIST,
+ psSync,
+ PDUMP_FLAGS_NONE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestZSBufferCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_ZSBUFFER psFWZSBuffer,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sZSBufferCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("ZS Buffer cleanup Request [ZS Buffer = 0x%08x]", psFWZSBuffer.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sZSBufferCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psZSBuffer = psFWZSBuffer;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_3D,
+ &sZSBufferCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_ZSBUFFER,
+ psSync,
+ PDUMP_FLAGS_NONE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestZSBufferCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM)
+{
+ RGXFWIF_KCCB_CMD sHWFrameDataCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("HW FrameData cleanup Request DM%d [HWFrameData = 0x%08x]", eDM, psHWFrameData.ui32Addr);
+
+ sHWFrameDataCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psHWFrameData = psHWFrameData;
+
+ eError = RGXScheduleCleanupCommand(psDeviceNode->pvDevice,
+ eDM,
+ &sHWFrameDataCleanUpCmd,
+ sizeof(sHWFrameDataCleanUpCmd),
+ RGXFWIF_CLEANUP_HWFRAMEDATA,
+ psSync,
+ PDUMP_FLAGS_NONE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRayFrameDataCleanUp: Failed to schedule a HWFrameData cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+/*
+ RGXFWRequestRPMFreeListCleanUp
+ */
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ RGXFWIF_KCCB_CMD sFLCleanUpCmd = {0};
+ PVRSRV_ERROR eError;
+
+ PDUMPCOMMENT("RPM Free list cleanup Request [RPM FreeList = 0x%08x]", psFWRPMFreeList.ui32Addr);
+
+ /* Setup our command data, the cleanup call will fill in the rest */
+ sFLCleanUpCmd.uCmdData.sCleanupData.uCleanupData.psRPMFreelist = psFWRPMFreeList;
+
+ /* Request cleanup of the firmware resource */
+ eError = RGXScheduleCleanupCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sFLCleanUpCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ RGXFWIF_CLEANUP_RPM_FREELIST,
+ psSync,
+ PDUMP_FLAGS_NONE);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFWRequestRPMFreeListCleanUp: Failed to schedule a memory context cleanup with error (%u)", eError));
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32HCSDeadlineMs)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sSetHCSDeadline;
+
+ sSetHCSDeadline.eCmdType = RGXFWIF_KCCB_CMD_HCS_SET_DEADLINE;
+ sSetHCSDeadline.eDM = RGXFWIF_DM_GP;
+ sSetHCSDeadline.uCmdData.sHCSCtrl.ui32HCSDeadlineMS = ui32HCSDeadlineMs;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sSetHCSDeadline,
+ sizeof(sSetHCSDeadline),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSConfigCmdData;
+ PRGXFWIF_OS_CONFIG sOSConfigFWAddr;
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_INTERNAL_ERROR);
+
+ RGXSetFirmwareAddress(&sOSConfigFWAddr, psDevInfo->psRGXFWIfOSConfigDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+ sOSConfigCmdData.eCmdType = RGXFWIF_KCCB_CMD_OS_CFG_INIT;
+ sOSConfigCmdData.eDM = RGXFWIF_DM_GP;
+ sOSConfigCmdData.uCmdData.sCmdOSConfigData.sOSConfig = sOSConfigFWAddr;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSConfigCmdData,
+ sizeof(sOSConfigCmdData),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32IsolationPriorityThreshold)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSidIsoConfCmd;
+
+ sOSidIsoConfCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ISOLATION_GROUP_CHANGE;
+ sOSidIsoConfCmd.uCmdData.sCmdOSidIsolationData.ui32IsolationPriorityThreshold = ui32IsolationPriorityThreshold;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSidIsoConfCmd,
+ sizeof(sOSidIsoConfCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sOSOnlineStateCmd;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ volatile IMG_UINT32 *pui32OSStateFlags;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ sOSOnlineStateCmd.eCmdType = RGXFWIF_KCCB_CMD_OS_ONLINE_STATE_CONFIGURE;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.ui32OSid = ui32OSid;
+ sOSOnlineStateCmd.uCmdData.sCmdOSOnlineStateData.eNewOSState = eOSOnlineState;
+
+ if (eOSOnlineState == RGXFWIF_OS_ONLINE)
+ {
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSOnlineStateCmd,
+ sizeof(sOSOnlineStateCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+ }
+
+ if (psRGXFWIfTraceBuf == NULL)
+ {
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+ pui32OSStateFlags = (volatile IMG_UINT32*) &psRGXFWIfTraceBuf->ui32OSStateFlags[ui32OSid];
+
+ /* Attempt several times until the FW manages to offload the OS */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ IMG_UINT32 ui32OSStateFlags;
+
+ /* Send request */
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSOnlineStateCmd,
+ sizeof(sOSOnlineStateCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (unlikely(eError == PVRSRV_ERROR_RETRY))
+ {
+ continue;
+ }
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", return_);
+
+ /* Wait for FW to process the cmd */
+ eError = RGXWaitForFWOp(psDevInfo,
+ RGXFWIF_DM_GP,
+ psDevInfo->psDeviceNode->psSyncPrim,
+ PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+ /* read the OS state */
+ OSMemoryBarrier();
+ ui32OSStateFlags = *pui32OSStateFlags;
+
+ if ((ui32OSStateFlags & RGXFW_OS_STATE_ACTIVE_OS) == 0)
+ {
+ /* FW finished offloading the OSID */
+ eError = PVRSRV_OK;
+ break;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_TIMEOUT;
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return_ :
+ return eError;
+}
+
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sOSidPriorityCmd;
+
+ sOSidPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_OSID_PRIORITY_CHANGE;
+ sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32OSidNum = ui32OSid;
+ sOSidPriorityCmd.uCmdData.sCmdOSidPriorityData.ui32Priority = ui32Priority;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sOSidPriorityCmd,
+ sizeof(sOSidPriorityCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ return eError;
+}
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+ CONNECTION_DATA *psConnection,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Priority,
+ RGXFWIF_DM eDM)
+{
+ IMG_UINT32 ui32CmdSize;
+ IMG_UINT8 *pui8CmdPtr;
+ RGXFWIF_KCCB_CMD sPriorityCmd;
+ RGXFWIF_CCB_CMD_HEADER *psCmdHeader;
+ RGXFWIF_CMD_PRIORITY *psCmd;
+ PVRSRV_ERROR eError;
+
+ /*
+ Get space for command
+ */
+ ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CCB_CMD_HEADER) + sizeof(RGXFWIF_CMD_PRIORITY));
+
+ eError = RGXAcquireCCB(FWCommonContextGetClientCCB(psContext),
+ ui32CmdSize,
+ (void **) &pui8CmdPtr,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire space for client CCB", __FUNCTION__));
+ }
+ goto fail_ccbacquire;
+ }
+
+ /*
+ Write the command header and command
+ */
+ psCmdHeader = (RGXFWIF_CCB_CMD_HEADER *) pui8CmdPtr;
+ psCmdHeader->eCmdType = RGXFWIF_CCB_CMD_TYPE_PRIORITY;
+ psCmdHeader->ui32CmdSize = RGX_CCB_FWALLOC_ALIGN(sizeof(RGXFWIF_CMD_PRIORITY));
+ pui8CmdPtr += sizeof(*psCmdHeader);
+
+ psCmd = (RGXFWIF_CMD_PRIORITY *) pui8CmdPtr;
+ psCmd->ui32Priority = ui32Priority;
+ pui8CmdPtr += sizeof(*psCmd);
+
+ /*
+ We should reserved space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ Submit the command
+ */
+ RGXReleaseCCB(FWCommonContextGetClientCCB(psContext),
+ ui32CmdSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to release space in client CCB", __FUNCTION__));
+ return eError;
+ }
+
+ /* Construct the priority command. */
+ sPriorityCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sPriorityCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psContext);
+ sPriorityCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psContext));
+ sPriorityCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ sPriorityCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ eDM,
+ &sPriorityCmd,
+ sizeof(sPriorityCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ContextSetPriority: Failed to submit set priority command with error (%u)", eError));
+ }
+
+ return PVRSRV_OK;
+
+ fail_ccbacquire:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ RGXReadMETAAddr
+ */
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 *pui32Value)
+{
+ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+ IMG_UINT32 ui32Value;
+
+ /* Wait for Slave Port to be Ready */
+ if (PVRSRVPollForValueKM(
+ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Issue the Read */
+ OSWriteHWReg32(
+ psDevInfo->pvRegsBaseKM,
+ RGX_CR_META_SP_MSLVCTRL0,
+ ui32METAAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+ /* Wait for Slave Port to be Ready: read complete */
+ if (PVRSRVPollForValueKM(
+ (IMG_UINT32 __iomem *) (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Read the value */
+ ui32Value = OSReadHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAX);
+
+ *pui32Value = ui32Value;
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGXWriteMETAAddr
+ */
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32METAAddr, IMG_UINT32 ui32Value)
+{
+ IMG_UINT8 __iomem *pui8RegBase = psDevInfo->pvRegsBaseKM;
+
+ /* Wait for Slave Port to be Ready */
+ if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)
+ (pui8RegBase + RGX_CR_META_SP_MSLVCTRL1),
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN) != PVRSRV_OK)
+ {
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ /* Issue the Write */
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVCTRL0, ui32METAAddr);
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_META_SP_MSLVDATAT, ui32Value);
+
+ return PVRSRV_OK;
+}
+
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious)
+{
+ /* Attempt to detect and deal with any stalled client contexts.
+ * bIgnorePrevious may be set by the caller if they know a context to be
+ * stalled, as otherwise this function will only identify stalled
+ * contexts which have not been previously reported.
+ */
+
+ IMG_UINT32 ui32StalledClientMask = 0;
+
+ if (!(OSTryLockAcquire(psDevInfo->hCCBStallCheckLock)))
+ {
+ PVR_LOG(("RGXCheckForStalledClientContexts: Failed to acquire hCCBStallCheckLock, returning..."));
+ return;
+ }
+
+ ui32StalledClientMask |= CheckForStalledClientTransferCtxt(psDevInfo);
+
+ ui32StalledClientMask |= CheckForStalledClientRenderCtxt(psDevInfo);
+
+ ui32StalledClientMask |= CheckForStalledClientKickSyncCtxt(psDevInfo);
+
+#if !defined(UNDER_WDDM)
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_COMPUTE_BIT_MASK)
+ {
+ ui32StalledClientMask |= CheckForStalledClientComputeCtxt(psDevInfo);
+ }
+#endif
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK)
+ {
+ ui32StalledClientMask |= CheckForStalledClientRayCtxt(psDevInfo);
+ }
+
+ /* If at least one DM stalled bit is different than before */
+ if (bIgnorePrevious || (psDevInfo->ui32StalledClientMask != ui32StalledClientMask))//(psDevInfo->ui32StalledClientMask ^ ui32StalledClientMask))
+ {
+ if (ui32StalledClientMask > 0)
+ {
+ static __maybe_unused const char *pszStalledAction =
+#if defined(PVRSRV_STALLED_CCB_ACTION)
+ "force";
+#else
+ "warn";
+#endif
+ /* Print all the stalled DMs */
+ PVR_LOG(("Possible stalled client RGX contexts detected: %s%s%s%s%s%s%s%s%s",
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_GP),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TDM_2D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TA),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_3D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_CDM),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_RTU),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_SHG),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ2D),
+ RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(ui32StalledClientMask, RGX_KICK_TYPE_DM_TQ3D)));
+
+ PVR_LOG(("Trying to identify stalled context...(%s) [%d]",
+ pszStalledAction, bIgnorePrevious));
+
+ DumpStalledContextInfo(psDevInfo);
+ }
+ else
+ {
+ if (psDevInfo->ui32StalledClientMask> 0)
+ {
+ /* Indicate there are no stalled DMs */
+ PVR_LOG(("No further stalled client contexts exist"));
+ }
+ }
+ psDevInfo->ui32StalledClientMask = ui32StalledClientMask;
+ }
+ OSLockRelease(psDevInfo->hCCBStallCheckLock);
+}
+
+/*
+ RGXUpdateHealthStatus
+ */
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+ IMG_BOOL bCheckAfterTimePassed)
+{
+ PVRSRV_DATA* psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_HEALTH_STATUS eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_OK;
+ PVRSRV_DEVICE_HEALTH_REASON eNewReason = PVRSRV_DEVICE_HEALTH_REASON_NONE;
+ PVRSRV_RGXDEV_INFO* psDevInfo;
+ RGXFWIF_TRACEBUF* psRGXFWIfTraceBufCtl;
+ RGXFWIF_CCB_CTL *psKCCBCtl;
+ IMG_UINT32 ui32ThreadCount;
+ IMG_BOOL bKCCBCmdsWaiting;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ PVR_ASSERT(psDevNode != NULL);
+ psDevInfo = psDevNode->pvDevice;
+ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* If the firmware is not initialised, there is not much point continuing! */
+ if (!psDevInfo->bFirmwareInitialised || psDevInfo->pvRegsBaseKM == NULL ||
+ psDevInfo->psDeviceNode == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* If this is a quick update, then include the last current value... */
+ if (!bCheckAfterTimePassed)
+ {
+ eNewStatus = OSAtomicRead(&psDevNode->eHealthStatus);
+ eNewReason = OSAtomicRead(&psDevNode->eHealthReason);
+ }
+
+ /* If Rogue is not powered on, just skip ahead and check for stalled client CCBs */
+ if (PVRSRVIsDevicePowered(psDevNode))
+ {
+ /*
+ Firmware thread checks...
+ */
+ for (ui32ThreadCount = 0; ui32ThreadCount < RGXFW_THREAD_NUM; ui32ThreadCount++)
+ {
+ if (psRGXFWIfTraceBufCtl != NULL)
+ {
+ IMG_CHAR* pszTraceAssertInfo = psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szInfo;
+
+ /*
+ Check if the FW has hit an assert...
+ */
+ if (*pszTraceAssertInfo != '\0')
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware thread %d has asserted: %s (%s:%d)",
+ __FUNCTION__, ui32ThreadCount, pszTraceAssertInfo,
+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.szPath,
+ psRGXFWIfTraceBufCtl->sTraceBuf[ui32ThreadCount].sAssertBuf.ui32LineNum));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_ASSERTED;
+ goto _RGXUpdateHealthStatus_Exit;
+ }
+
+ /*
+ Check the threads to see if they are in the same poll locations as last time...
+ */
+ if (bCheckAfterTimePassed)
+ {
+ if (psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] != 0 &&
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] == psDevInfo->aui32CrLastPollAddr[ui32ThreadCount])
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Firmware stuck on CR poll: T%u polling %s (reg:0x%08X mask:0x%08X)",
+ __FUNCTION__, ui32ThreadCount,
+ ((psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & RGXFW_POLL_TYPE_SET)?("set"):("unset")),
+ psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount] & ~RGXFW_POLL_TYPE_SET,
+ psRGXFWIfTraceBufCtl->aui32CrPollMask[ui32ThreadCount]));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_POLL_FAILING;
+ goto _RGXUpdateHealthStatus_Exit;
+ }
+ psDevInfo->aui32CrLastPollAddr[ui32ThreadCount] = psRGXFWIfTraceBufCtl->aui32CrPollAddr[ui32ThreadCount];
+ }
+ }
+ }
+
+ /*
+ Check if the FW has faulted...
+ */
+ if (psRGXFWIfTraceBufCtl && (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_FW_FAULT))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetDeviceHealthStatus: Firmware has faulted and needs to restart"));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_FAULT;
+ if (psRGXFWIfTraceBufCtl->ui32HWRStateFlags & RGXFWIF_HWR_RESTART_REQUESTED)
+ {
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_RESTARTING;
+ }
+ else
+ {
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_IDLING;
+ }
+ goto _RGXUpdateHealthStatus_Exit;
+ }
+
+ /*
+ Event Object Timeouts check...
+ */
+ if (!bCheckAfterTimePassed)
+ {
+ if (psDevInfo->ui32GEOTimeoutsLastTime > 1 && psPVRSRVData->ui32GEOConsecutiveTimeouts > psDevInfo->ui32GEOTimeoutsLastTime)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Global Event Object Timeouts have risen (from %d to %d)",
+ __FUNCTION__,
+ psDevInfo->ui32GEOTimeoutsLastTime, psPVRSRVData->ui32GEOConsecutiveTimeouts));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_TIMEOUTS;
+ }
+ psDevInfo->ui32GEOTimeoutsLastTime = psPVRSRVData->ui32GEOConsecutiveTimeouts;
+ }
+
+ /*
+ Check the Kernel CCB pointer is valid. If any commands were waiting last time, then check
+ that some have executed since then.
+ */
+ bKCCBCmdsWaiting = IMG_FALSE;
+ psKCCBCtl = psDevInfo->psKernelCCBCtl;
+
+ if (psKCCBCtl != NULL)
+ {
+ if (psKCCBCtl->ui32ReadOffset > psKCCBCtl->ui32WrapMask ||
+ psKCCBCtl->ui32WriteOffset > psKCCBCtl->ui32WrapMask)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: KCCB has invalid offset (ROFF=%d WOFF=%d)",
+ __FUNCTION__, psKCCBCtl->ui32ReadOffset, psKCCBCtl->ui32WriteOffset));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_DEAD;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_CORRUPT;
+ }
+
+ if (psKCCBCtl->ui32ReadOffset != psKCCBCtl->ui32WriteOffset)
+ {
+ bKCCBCmdsWaiting = IMG_TRUE;
+ }
+ }
+
+ if (bCheckAfterTimePassed && psDevInfo->psRGXFWIfTraceBuf != NULL)
+ {
+ IMG_UINT32 ui32KCCBCmdsExecuted = psDevInfo->psRGXFWIfTraceBuf->ui32KCCBCmdsExecuted;
+
+ if (psDevInfo->ui32KCCBCmdsExecutedLastTime == ui32KCCBCmdsExecuted)
+ {
+ /*
+ If something was waiting last time then the Firmware has stopped processing commands.
+ */
+ if (psDevInfo->bKCCBCmdsWaitingLastTime)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: No KCCB commands executed since check!",
+ __FUNCTION__));
+ eNewStatus = PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING;
+ eNewReason = PVRSRV_DEVICE_HEALTH_REASON_QUEUE_STALLED;
+ }
+
+ /*
+ If no commands are currently pending and nothing happened since the last poll, then
+ schedule a dummy command to ping the firmware so we know it is alive and processing.
+ */
+ if (!bKCCBCmdsWaiting)
+ {
+ RGXFWIF_KCCB_CMD sCmpKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ sCmpKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_HEALTH_CHECK;
+
+ /* Protect the PDumpLoadMem. RGXScheduleCommand() cannot take the
+ * PMR lock itself, because some bridge functions will take the PMR lock
+ * before calling RGXScheduleCommand
+ */
+ eError = RGXScheduleCommand(psDevNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sCmpKCCBCmd,
+ sizeof(sCmpKCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Cannot schedule Health Check command! (0x%x)",
+ __FUNCTION__, eError));
+ }
+ else
+ {
+ bKCCBCmdsWaiting = IMG_TRUE;
+ }
+ }
+ }
+
+ psDevInfo->bKCCBCmdsWaitingLastTime = bKCCBCmdsWaiting;
+ psDevInfo->ui32KCCBCmdsExecutedLastTime = ui32KCCBCmdsExecuted;
+ }
+ }
+
+ if (bCheckAfterTimePassed && (PVRSRV_DEVICE_HEALTH_STATUS_OK==eNewStatus))
+ {
+ RGXCheckForStalledClientContexts(psDevInfo, IMG_FALSE);
+ }
+
+ /*
+ Finished, save the new status...
+ */
+ _RGXUpdateHealthStatus_Exit:
+ OSAtomicWrite(&psDevNode->eHealthStatus, eNewStatus);
+ OSAtomicWrite(&psDevNode->eHealthReason, eNewReason);
+
+ /*
+ * Attempt to service the HWPerf buffer to regularly transport idle/periodic
+ * packets to host buffer.
+ */
+ if (psDevNode->pfnServiceHWPerf != NULL)
+ {
+ PVRSRV_ERROR eError = psDevNode->pfnServiceHWPerf(psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: "
+ "Error occurred when servicing HWPerf buffer (%d)",
+ __FUNCTION__, eError));
+ }
+ }
+
+ /* Attempt to refresh timer correlation data */
+ RGXTimeCorrRestartPeriodic(psDevNode);
+
+ return PVRSRV_OK;
+} /* RGXUpdateHealthStatus */
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ RGX_CLIENT_CCB *psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+
+ return CheckForStalledCCB(psCurrentServerCommonContext->psDevInfo->psDeviceNode, psCurrentClientCCB, eKickTypeDM);
+}
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ RGX_CLIENT_CCB *psCurrentClientCCB = psCurrentServerCommonContext->psClientCCB;
+ PRGXFWIF_FWCOMMONCONTEXT sFWCommonContext = psCurrentServerCommonContext->sFWCommonContextFWAddr;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) || defined(PVRSRV_ENABLE_FULL_CCB_DUMP)
+ DumpCCB(psCurrentServerCommonContext->psDevInfo, sFWCommonContext,
+ psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#else
+ DumpStalledCCBCommand(sFWCommonContext, psCurrentClientCCB, pfnDumpDebugPrintf, pvDumpDebugFile);
+#endif
+}
+
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+ IMG_UINT32 *pui32NumCleanupCtl,
+ RGXFWIF_DM eDM,
+ IMG_BOOL bKick,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer)
+{
+ PRGXFWIF_CLEANUP_CTL *psCleanupCtlWrite = apsCleanupCtl;
+
+ PVR_ASSERT((eDM == RGXFWIF_DM_TA) || (eDM == RGXFWIF_DM_3D));
+
+ if (bKick)
+ {
+ if (eDM == RGXFWIF_DM_TA)
+ {
+ if (psRTDataCleanup)
+ {
+ PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+ RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+ offsetof(RGXFWIF_HWRTDATA, sTACleanupState),
+ RFW_FWADDR_NOREF_FLAG);
+
+ *(psCleanupCtlWrite++) = psCleanupCtl;
+ }
+ }
+ else
+ {
+ RGXFWIF_PRBUFFER_TYPE eBufferType;
+ RGX_ZSBUFFER_DATA *psBuffer = NULL;
+
+ if (psRTDataCleanup)
+ {
+ PRGXFWIF_CLEANUP_CTL psCleanupCtl;
+
+ RGXSetFirmwareAddress(&psCleanupCtl, psRTDataCleanup->psFWHWRTDataMemDesc,
+ offsetof(RGXFWIF_HWRTDATA, s3DCleanupState),
+ RFW_FWADDR_NOREF_FLAG);
+
+ *(psCleanupCtlWrite++) = psCleanupCtl;
+ }
+
+ for (eBufferType = RGXFWIF_PRBUFFER_START; eBufferType < RGXFWIF_PRBUFFER_MAXSUPPORTED; eBufferType++)
+ {
+ switch(eBufferType)
+ {
+ case RGXFWIF_PRBUFFER_ZBUFFER:
+ psBuffer = psZBuffer;
+ break;
+ case RGXFWIF_PRBUFFER_SBUFFER:
+ psBuffer = psSBuffer;
+ break;
+ case RGXFWIF_PRBUFFER_MSAABUFFER:
+ psBuffer = psMSAAScratchBuffer;
+ break;
+ case RGXFWIF_PRBUFFER_MAXSUPPORTED:
+ psBuffer = NULL;
+ break;
+ }
+ if (psBuffer)
+ {
+ (psCleanupCtlWrite++)->ui32Addr = psBuffer->sZSBufferFWDevVAddr.ui32Addr +
+ offsetof(RGXFWIF_PRBUFFER, sCleanupState);
+ psBuffer = NULL;
+ }
+ }
+ }
+ }
+
+ *pui32NumCleanupCtl = psCleanupCtlWrite - apsCleanupCtl;
+
+ PVR_ASSERT(*pui32NumCleanupCtl <= RGXFWIF_KCCB_CMD_KICK_DATA_MAX_NUM_CLEANUP_CTLS);
+}
+
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_HWRINFOBUF *psHWRInfoBuf;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl;
+ IMG_UINT32 i;
+
+ if (psDevNode->pvDevice == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_DEVINFO;
+ }
+ psDevInfo = psDevNode->pvDevice;
+
+ psHWRInfoBuf = psDevInfo->psRGXFWIfHWRInfoBuf;
+ psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+
+ for (i = 0 ; i < psDevInfo->sDevFeatureCfg.ui32MAXDMCount ; i++)
+ {
+ /* Reset the HWR numbers */
+ psRGXFWIfTraceBufCtl->aui32HwrDmLockedUpCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmFalseDetectCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmRecoveredCount[i] = 0;
+ psRGXFWIfTraceBufCtl->aui32HwrDmOverranCount[i] = 0;
+ }
+
+ for (i = 0 ; i < RGXFWIF_HWINFO_MAX ; i++)
+ {
+ psHWRInfoBuf->sHWRInfo[i].ui32HWRNumber = 0;
+ }
+
+ for (i = 0 ; i < RGXFW_THREAD_NUM ; i++)
+ {
+ psHWRInfoBuf->ui32FirstCrPollAddr[i] = 0;
+ psHWRInfoBuf->ui32FirstCrPollMask[i] = 0;
+ psHWRInfoBuf->ui32FirstCrPollLastValue[i] = 0;
+ }
+
+ psHWRInfoBuf->ui32WriteIndex = 0;
+ psHWRInfoBuf->ui32DDReqCount = 0;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+ IMG_DEV_PHYADDR *psPhyAddr,
+ IMG_UINT32 ui32LogicalOffset,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_BOOL *bValid)
+{
+
+ PVRSRV_ERROR eError;
+
+ eError = PMRLockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRLockSysPhysAddresses failed (%u)",
+ eError));
+ return eError;
+ }
+
+ eError = PMR_DevPhysAddr(psPMR,
+ ui32Log2PageSize,
+ ui32NumOfPages,
+ ui32LogicalOffset,
+ psPhyAddr,
+ bValid);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMR_DevPhysAddr failed (%u)",
+ eError));
+ return eError;
+ }
+
+
+ eError = PMRUnlockSysPhysAddresses(psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGetPhyAddr: PMRUnLockSysPhysAddresses failed (%u)",
+ eError));
+ return eError;
+ }
+
+ return eError;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32WriteOffset, IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_CCB_CTL *psKCCBCtl = psDevInfo->psKernelCCBCtl;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ ui32PDumpFlags |= PDUMP_FLAGS_CONTINUOUS | PDUMP_FLAGS_POWER;
+
+ if (psDevInfo->bDumpedKCCBCtlAlready)
+ {
+ /* exiting capture range */
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+
+ /* make sure previous cmd is drained in pdump in case we will 'jump' over some future cmds */
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "kCCB(%p): Draining rgxfw_roff (0x%x) == woff (0x%x)",
+ psKCCBCtl,
+ ui32WriteOffset,
+ ui32WriteOffset);
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psKernelCCBCtlMemDesc,
+ offsetof(RGXFWIF_CCB_CTL, ui32ReadOffset),
+ ui32WriteOffset,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPdumpDrainKCCB: problem pdumping POL for kCCBCtl (%d)", eError));
+ }
+ }
+
+ return eError;
+
+}
+#endif
+
+/*!
+ *******************************************************************************
+
+ @Function RGXClientConnectCompatCheck_ClientAgainstFW
+
+ @Description
+
+ Check compatibility of client and firmware (build options)
+ at the connection time.
+
+ @Input psDeviceNode - device node
+ @Input ui32ClientBuildOptions - build options for the client
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV RGXClientConnectCompatCheck_ClientAgainstFW(PVRSRV_DEVICE_NODE * psDeviceNode, IMG_UINT32 ui32ClientBuildOptions)
+{
+ PVRSRV_ERROR eError;
+#if !defined(NO_HARDWARE) || defined(PDUMP)
+#if !defined(NO_HARDWARE)
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+ IMG_UINT32 ui32BuildOptionsMismatch;
+ IMG_UINT32 ui32BuildOptionsFW;
+#endif
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+#endif
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+ if (psDevInfo == NULL || psDevInfo->psRGXFWIfInitMemDesc == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Cannot acquire kernel fw compatibility check info, RGXFWIF_INIT structure not allocated.",
+ __FUNCTION__));
+ return PVRSRV_ERROR_NOT_INITIALISED;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+ __FUNCTION__, eError));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ /* No need to wait if the FW has already updated the values */
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: client and FW build options");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32BuildOptions),
+ ui32ClientBuildOptions,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ ui32BuildOptionsFW = psRGXFWInit->sRGXCompChecks.ui32BuildOptions;
+ ui32BuildOptionsMismatch = ui32ClientBuildOptions ^ ui32BuildOptionsFW;
+
+ if (ui32BuildOptionsMismatch != 0)
+ {
+ if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+ "extra options present in client: (0x%x). Please check rgx_options.h",
+ ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ }
+
+ if ( (ui32BuildOptionsFW & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and client build options; "
+ "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+ ui32BuildOptionsFW & ui32BuildOptionsMismatch ));
+ }
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and client build options match. [ OK ]"));
+ }
+#endif
+
+ eError = PVRSRV_OK;
+#if !defined(NO_HARDWARE)
+ chk_exit:
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzRegisterFirmwarePhysHeap
+
+ @Description Register firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID - Guest OSID
+ @Input sDevPAddr - Heap address
+ @Input ui64DevPSize - Heap size
+
+ @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSID,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+ if (!ui32OSID ||
+ !ui64DevPSize ||
+ !sDevPAddr.uiAddr ||
+ ui32OSID >= RGXFW_NUM_OS ||
+ ui64DevPSize != RGX_FIRMWARE_RAW_HEAP_SIZE)
+ {
+ /* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+ PVR_DPF((PVR_DBG_ERROR, "Invalid guest %d fw physheap spec.\n", ui32OSID));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Registration creates internal RA to managed the guest(s) firmware heap */
+ eError = PVRSRVVzRegisterFirmwarePhysHeap (psDeviceNode, sDevPAddr, ui64DevPSize, ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed\n", ui32OSID));
+ return eError;
+ }
+
+ /* Map guest DMA fw physheap into the fw kernel memory context */
+ eError = RGXVzDevMemAllocateGuestFwHeap(psDeviceNode, ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Mapping guest %d fw physheap failed\n", ui32OSID));
+ return eError;
+ }
+
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzUnregisterFirmwarePhysHeap
+
+ @Description Unregister firmware heap for the specified guest OSID
+
+ @Input psDeviceNode - device node
+ @Input ui32OSID - Guest OSID
+
+ @Return PVRSRV_ERROR - PVRSRV_OK if heap setup was successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSID)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_VZ_RET_IF_NOT_MODE(DRIVER_MODE_HOST, PVRSRV_OK);
+
+ if (!ui32OSID || ui32OSID >= RGXFW_NUM_OS)
+ {
+ /* Guest OSID(s) range [1 up to (RGXFW_NUM_OS-1)] */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Free guest fw physheap from fw kernel memory context */
+ RGXVzDevMemFreeGuestFwHeap(psDeviceNode, ui32OSID);
+
+ /* Unregistration deletes state required to maintain heap */
+ eError = PVRSRVVzUnregisterFirmwarePhysHeap (psDeviceNode, ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Registering guest %d fw physheap failed\n", ui32OSID));
+ return eError;
+ }
+
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzCreateFWKernelMemoryContext
+
+ @Description Setup additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return PVRSRV_ERROR - PVRSRV_OK if successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ eError = SysVzRegisterFwPhysHeap(psDeviceNode->psDevConfig);
+ }
+#if (RGXFW_GUEST_OSID_START < RGXFW_NUM_OS)
+ else
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_CHAR szHeapName[32];
+ IMG_UINT32 ui32OSID;
+ /* Initialise each guest OSID firmware physheap heaps, note that the guest
+ OSID(s) range is [1 up to (RGXFW_NUM_OS-1)] */
+ for (ui32OSID = RGXFW_GUEST_OSID_START; ui32OSID < RGXFW_NUM_OS; ui32OSID++)
+ {
+ OSSNPrintf(szHeapName, sizeof(szHeapName), RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, ui32OSID);
+
+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, szHeapName,
+ &psDevInfo->psGuestFirmwareRawHeap[ui32OSID]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "DevmemFindHeapByName() for guest %d failed", ui32OSID));
+ }
+ }
+ }
+#endif
+
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzDestroyFWKernelMemoryContext
+
+ @Description Destroy additional firmware state specific to VZ
+
+ @Input psDeviceNode - device node
+
+ @Return PVRSRV_ERROR - PVRSRV_OK if successful.
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ return SysVzUnregisterFwPhysHeap(psDeviceNode->psDevConfig);
+ }
+ return PVRSRV_OK;
+}
+
+
+IMG_UINT32 RGXGetFwMainHeapSize(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ if (psDevInfo == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid device info", __func__));
+ return 0;
+ }
+
+ if (psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_MIPS_BIT_MASK)
+ {
+ return RGX_FIRMWARE_MIPS_MAIN_HEAP_SIZE;
+ }
+ else
+ {
+ return RGX_FIRMWARE_META_MAIN_HEAP_SIZE;
+ }
+}
+
+/******************************************************************************
+ End of file (rgxfwutils.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.h b/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.h
new file mode 100644
index 00000000000000..bc4fd46ce296c8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxfwutils.h
@@ -0,0 +1,1174 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX firmware utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX firmware utility routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXFWUTILS_H__)
+#define __RGXFWUTILS_H__
+
+#include "log2.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "devicemem.h"
+#include "device.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "connection_server.h"
+#include "rgxta3d.h"
+#include "devicemem_utils.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "physmem_tdfwcode.h"
+#include "physmem_tdsecbuf.h"
+#endif
+
+
+/*
+ * Firmware-only allocation (which are initialised by the host) must be aligned to the SLC cache line size.
+ * This is because firmware-only allocations are GPU_CACHE_INCOHERENT and this causes problems
+ * if two allocations share the same cache line; e.g. the initialisation of the second allocation won't
+ * make it into the SLC cache because it has been already loaded when accessing the content of the first allocation.
+ */
+static INLINE PVRSRV_ERROR DevmemFwAllocate(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEVMEM_SIZE_T uiSize,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ PVRSRV_ERROR eError;
+ DEVMEM_HEAP *psFwHeap;
+
+ PVR_DPF_ENTERED;
+
+ /* Enforce the standard pre-fix naming scheme callers must follow */
+ PVR_ASSERT((pszText != NULL) && (pszText[0] == 'F') && (pszText[1] == 'w'));
+
+ psFwHeap = (PVRSRV_CHECK_FW_CONFIG(uiFlags)) ? (psDevInfo->psFirmwareConfigHeap) : (psDevInfo->psFirmwareMainHeap);
+
+ eError = DevmemAllocate(psFwHeap,
+ uiSize,
+ GET_ROGUE_CACHE_LINE_SIZE(RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS)),
+ uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ pszText,
+ ppsMemDescPtr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psFwHeap,
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+static INLINE PVRSRV_ERROR DevmemFwAllocateExportable(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ IMG_DEVMEM_ALIGN_T uiAlign,
+ DEVMEM_FLAGS_T uiFlags,
+ const IMG_CHAR *pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ /* Enforce the standard pre-fix naming scheme callers must follow */
+ PVR_ASSERT((pszText != NULL) &&
+ (pszText[0] == 'F') && (pszText[1] == 'w') &&
+ (pszText[2] == 'E') && (pszText[3] == 'x'));
+
+ eError = DevmemAllocateExportable(psDeviceNode,
+ uiSize,
+ uiAlign,
+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) ?
+ ExactLog2(uiAlign) :
+ DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+ uiFlags | PVRSRV_MEMALLOCFLAG_FW_LOCAL,
+ pszText,
+ ppsMemDescPtr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"FW DevmemAllocateExportable failed (%u)", eError));
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ /*
+ We need to map it so the heap for this allocation
+ is set
+ */
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareMainHeap,
+ &sTmpDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"FW DevmemMapToDevice failed (%u)", eError));
+ }
+
+ PVR_DPF_RETURN_RC1(eError, *ppsMemDescPtr);
+}
+
+static void DevmemFWPoison(DEVMEM_MEMDESC *psMemDesc, IMG_BYTE ubPoisonValue)
+{
+ void *pvLinAddr;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psMemDesc, &pvLinAddr);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire FW allocation mapping "
+ "to poison: %s",
+ __func__,
+ PVRSRVGETERRORSTRING(eError)));
+ return;
+ }
+
+ OSDeviceMemSet(pvLinAddr, ubPoisonValue, psMemDesc->uiAllocSize);
+
+ DevmemReleaseCpuVirtAddr(psMemDesc);
+}
+
+static INLINE void DevmemFwFree(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVMEM_MEMDESC *psMemDesc)
+{
+ PVR_DPF_ENTERED1(psMemDesc);
+
+ if(psDevInfo->bEnableFWPoisonOnFree)
+ {
+ DevmemFWPoison(psMemDesc, psDevInfo->ubFWPoisonOnFreeValue);
+ }
+
+ DevmemReleaseDevVirtAddr(psMemDesc);
+ DevmemFree(psMemDesc);
+
+ PVR_DPF_RETURN;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+static INLINE
+PVRSRV_ERROR DevmemImportTDFWCode(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiMemAllocFlags,
+ IMG_BOOL bFWCorememCode,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PMR *psTDFWCodePMR;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_DEVMEM_SIZE_T uiMemDescSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsMemDescPtr);
+
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+ &uiSize,
+ &uiAlign);
+
+ eError = PhysmemNewTDFWCodePMR(psDeviceNode,
+ uiSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ bFWCorememCode,
+ &psTDFWCodePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDFWCodePMR failed (%u)", eError));
+ goto PMRCreateError;
+ }
+
+ /* NB: TDFWCodePMR refcount: 1 -> 2 */
+ eError = DevmemLocalImport(psDeviceNode,
+ psTDFWCodePMR,
+ uiMemAllocFlags,
+ ppsMemDescPtr,
+ &uiMemDescSize,
+ "TDFWCode");
+ if(eError != PVRSRV_OK)
+ {
+ goto ImportError;
+ }
+
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareMainHeap,
+ &sTmpDevVAddr);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to map TD META code PMR (%u)", eError));
+ goto MapError;
+ }
+
+ /* NB: TDFWCodePMR refcount: 2 -> 1
+ * The PMR will be unreferenced again (and destroyed) when
+ * the memdesc tracking it is cleaned up
+ */
+ PMRUnrefPMR(psTDFWCodePMR);
+
+ return PVRSRV_OK;
+
+MapError:
+ DevmemFree(*ppsMemDescPtr);
+ *ppsMemDescPtr = NULL;
+ImportError:
+ /* Unref and destroy the PMR */
+ PMRUnrefPMR(psTDFWCodePMR);
+PMRCreateError:
+
+ return eError;
+}
+
+static INLINE
+PVRSRV_ERROR DevmemImportTDSecureBuf(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiSize,
+ PMR_LOG2ALIGN_T uiLog2Align,
+ IMG_UINT32 uiMemAllocFlags,
+ DEVMEM_MEMDESC **ppsMemDescPtr,
+ IMG_UINT64 *pui64SecBufHandle)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ PMR *psTDSecureBufPMR;
+ IMG_DEV_VIRTADDR sTmpDevVAddr;
+ IMG_DEVMEM_SIZE_T uiMemDescSize;
+ IMG_DEVMEM_ALIGN_T uiAlign = 1 << uiLog2Align;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ppsMemDescPtr);
+
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+ &uiSize,
+ &uiAlign);
+
+ eError = PhysmemNewTDSecureBufPMR(NULL,
+ psDeviceNode,
+ uiSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ &psTDSecureBufPMR,
+ pui64SecBufHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PhysmemNewTDSecureBufPMR failed (%u)", eError));
+ goto PMRCreateError;
+ }
+
+ /* NB: psTDSecureBufPMR refcount: 1 -> 2 */
+ eError = DevmemLocalImport(psDeviceNode,
+ psTDSecureBufPMR,
+ uiMemAllocFlags,
+ ppsMemDescPtr,
+ &uiMemDescSize,
+ "TDSecureBuffer");
+ if(eError != PVRSRV_OK)
+ {
+ goto ImportError;
+ }
+
+ eError = DevmemMapToDevice(*ppsMemDescPtr,
+ psDevInfo->psFirmwareMainHeap,
+ &sTmpDevVAddr);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to map TD secure buffer PMR (%u)", eError));
+ goto MapError;
+ }
+
+ /* NB: psTDSecureBufPMR refcount: 2 -> 1
+ * The PMR will be unreferenced again (and destroyed) when
+ * the memdesc tracking it is cleaned up
+ */
+ PMRUnrefPMR(psTDSecureBufPMR);
+
+ return PVRSRV_OK;
+
+MapError:
+ DevmemFree(*ppsMemDescPtr);
+ *ppsMemDescPtr = NULL;
+ImportError:
+ /* Unref and destroy the PMR */
+ PMRUnrefPMR(psTDSecureBufPMR);
+PMRCreateError:
+
+ return eError;
+}
+#endif
+
+
+/*
+ * This function returns the value of the hardware register RGX_CR_TIMER
+ * which is a timer counting in ticks.
+ */
+
+static INLINE IMG_UINT64 RGXReadHWTimerReg(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT64 ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+
+ /*
+ * In order to avoid having to issue three 32-bit reads to detect the
+ * lower 32-bits wrapping, the MSB of the low 32-bit word is duplicated
+ * in the MSB of the high 32-bit word. If the wrap happens, we just read
+ * the register again (it will not wrap again so soon).
+ */
+ if ((ui64Time ^ (ui64Time << 32)) & ~RGX_CR_TIMER_BIT31_CLRMSK)
+ {
+ ui64Time = OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_TIMER);
+ }
+
+ return ((ui64Time & ~RGX_CR_TIMER_VALUE_CLRMSK) >> RGX_CR_TIMER_VALUE_SHIFT);
+}
+
+/*
+ * This FW Common Context is only mapped into kernel for initialisation and cleanup purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first kick)
+ */
+#define RGX_FWCOMCTX_ALLOCFLAGS (PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) | \
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED)| \
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT | \
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE | \
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE | \
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE | \
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC)
+
+/******************************************************************************
+ * RGXSetFirmwareAddress Flags
+ *****************************************************************************/
+#define RFW_FWADDR_FLAG_NONE (0) /*!< Void flag */
+#define RFW_FWADDR_NOREF_FLAG (1U << 0) /*!< It is safe to immediately release the reference to the pointer,
+ otherwise RGXUnsetFirmwareAddress() must be call when finished. */
+
+IMG_BOOL RGXTraceBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTraceBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+IMG_BOOL RGXTBIBufferIsInitRequired(PVRSRV_RGXDEV_INFO *psDevInfo);
+PVRSRV_ERROR RGXTBIBufferInitOnDemandResources(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXSetupFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ RGXFWIF_BIFTILINGMODE eBifTilingMode,
+ IMG_UINT32 ui32NumTilingCfgs,
+ IMG_UINT32 *pui32BIFTilingXStrides,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGXFWIF_DEV_VIRTADDR *psRGXFWInitFWAddr,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf,
+ FW_PERF_CONF eFirmwarePerf,
+ IMG_UINT32 ui32ConfigFlagsExt);
+
+
+
+void RGXFreeFirmware(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*************************************************************************/ /*!
+@Function RGXSetFirmwareAddress
+
+@Description Sets a pointer in a firmware data structure.
+
+@Input ppDest Address of the pointer to set
+@Input psSrc MemDesc describing the pointer
+@Input ui32Flags Any combination of RFW_FWADDR_*_FLAG
+
+@Return void
+*/ /**************************************************************************/
+void RGXSetFirmwareAddress(RGXFWIF_DEV_VIRTADDR *ppDest,
+ DEVMEM_MEMDESC *psSrc,
+ IMG_UINT32 uiOffset,
+ IMG_UINT32 ui32Flags);
+
+
+/*************************************************************************/ /*!
+@Function RGXSetMetaDMAAddress
+
+@Description Fills a Firmware structure used to setup the Meta DMA with two
+ pointers to the same data, one on 40 bit and one on 32 bit
+ (pointer in the FW memory space).
+
+@Input ppDest Address of the structure to set
+@Input psSrcMemDesc MemDesc describing the pointer
+@Input psSrcFWDevVAddr Firmware memory space pointer
+
+@Return void
+*/ /**************************************************************************/
+void RGXSetMetaDMAAddress(RGXFWIF_DMA_ADDR *psDest,
+ DEVMEM_MEMDESC *psSrcMemDesc,
+ RGXFWIF_DEV_VIRTADDR *psSrcFWDevVAddr,
+ IMG_UINT32 uiOffset);
+
+
+/*************************************************************************/ /*!
+@Function RGXUnsetFirmwareAddress
+
+@Description Unsets a pointer in a firmware data structure
+
+@Input psSrc MemDesc describing the pointer
+
+@Return void
+*/ /**************************************************************************/
+void RGXUnsetFirmwareAddress(DEVMEM_MEMDESC *psSrc);
+
+/*************************************************************************/ /*!
+@Function FWCommonContextAllocate
+
+@Description Allocate a FW common context. This allocates the HW memory
+ for the context, the CCB and wires it all together.
+
+@Input psConnection Connection this context is being created on
+@Input psDeviceNode Device node to create the FW context on
+ (must be RGX device node)
+@Input eRGXCCBRequestor RGX_CCB_REQUESTOR_TYPE enum constant which
+ which represents the requestor of this FWCC
+@Input eDM Data Master type
+@Input psAllocatedMemDesc Pointer to pre-allocated MemDesc to use
+ as the FW context or NULL if this function
+ should allocate it
+@Input ui32AllocatedOffset Offset into pre-allocate MemDesc to use
+ as the FW context. If psAllocatedMemDesc
+ is NULL then this parameter is ignored
+@Input psFWMemContextMemDesc MemDesc of the FW memory context this
+ common context resides on
+@Input psContextStateMemDesc FW context state (context switch) MemDesc
+@Input ui32CCBAllocSize Size of the CCB for this context
+@Input ui32Priority Priority of the context
+@Input psInfo Structure that contains extra info
+ required for the creation of the context
+ (elements might change from core to core)
+@Return PVRSRV_OK if the context was successfully created
+*/ /**************************************************************************/
+PVRSRV_ERROR FWCommonContextAllocate(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_CCB_REQUESTOR_TYPE eRGXCCBRequestor,
+ RGXFWIF_DM eDM,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ DEVMEM_MEMDESC *psContextStateMemDesc,
+ IMG_UINT32 ui32CCBAllocSize,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_COMMON_CONTEXT **ppsServerCommonContext);
+
+
+
+void FWCommonContextFree(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+PRGXFWIF_FWCOMMONCONTEXT FWCommonContextGetFWAddress(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGX_CLIENT_CCB *FWCommonContextGetClientCCB(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+RGXFWIF_CONTEXT_RESET_REASON FWCommonContextGetLastResetReason(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_RGXDEV_INFO* FWCommonContextGetRGXDevInfo(RGX_SERVER_COMMON_CONTEXT *psServerCommonContext);
+
+/*!
+******************************************************************************
+
+ @Function RGXScheduleProcessQueuesKM
+
+ @Description - Software command complete handler
+ (sends uncounted kicks for all the DMs through the MISR)
+
+ @Input hCmdCompHandle - RGX device node
+
+******************************************************************************/
+void RGXScheduleProcessQueuesKM(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXInstallProcessQueuesMISR
+
+ @Description - Installs the MISR to handle Process Queues operations
+
+ @Input phMISR - Pointer to the MISR handler
+
+ @Input psDeviceNode - RGX Device node
+
+******************************************************************************/
+PVRSRV_ERROR RGXInstallProcessQueuesMISR(IMG_HANDLE *phMISR, PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function RGXSendCommandWithPowLock
+
+@Description Sends a command to a particular DM without honouring
+ pending cache operations but taking the power lock.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input ui32PDumpFlags Pdump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommandWithPowLock(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXSendCommandsFromDeferredList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bPoll);
+/*************************************************************************/ /*!
+@Function RGXSendCommand
+
+@Description Sends a command to a particular DM without honouring
+ pending cache operations or the power lock.
+ The function flushes any deferred KCCB commands first.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input uiPdumpFlags PDump flags.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXSendCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ PDUMP_FLAGS_T uiPdumpFlags);
+
+
+/*************************************************************************/ /*!
+@Function RGXScheduleCommand
+
+@Description Sends a command to a particular DM
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input ui32CacheOpFence Pending cache op. fence value.
+@Input ui32PDumpFlags PDump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eKCCBType,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 ui32CacheOpFence,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function RGXScheduleCommandAndWait
+
+@Description Schedules the command with RGXScheduleCommand and then waits
+ for the FW to update a sync. The sync must be piggy backed on
+ the cmd, either by passing a sync cmd or a cmd that contains the
+ sync which the FW will eventually update. The sync is created in
+ the function, therefore the function provides a FWAddr and
+ UpdateValue for that cmd.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input psKCCBCmd The cmd to send.
+@Input ui32CmdSize The cmd size.
+@Input puiSyncObjFWAddr Pointer to the location with the FWAddr of
+ the sync.
+@Input puiUpdateValue Pointer to the location with the update
+ value of the sync.
+@Input ui32PDumpFlags PDump flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXScheduleCommandAndWait(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ RGXFWIF_KCCB_CMD *psKCCBCmd,
+ IMG_UINT32 ui32CmdSize,
+ IMG_UINT32 *puiSyncObjDevVAddr,
+ IMG_UINT32 *puiUpdateValue,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR RGXFirmwareUnittests(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+/*! ***********************************************************************//**
+@brief Copy framework command into FW addressable buffer
+
+@param psFWFrameworkMemDesc
+@param pbyGPUFRegisterList
+@param ui32FrameworkRegisterSize
+
+@returns PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCopyCommand(DEVMEM_MEMDESC *psFWFrameworkMemDesc,
+ IMG_PBYTE pbyGPUFRegisterList,
+ IMG_UINT32 ui32FrameworkRegisterSize);
+
+
+/*! ***********************************************************************//**
+@brief Create FW addressable buffer for framework
+
+@param psDeviceNode
+@param ppsFWFrameworkMemDesc
+@param ui32FrameworkRegisterSize
+
+@returns PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXFrameworkCreateKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEM_MEMDESC ** ppsFWFrameworkMemDesc,
+ IMG_UINT32 ui32FrameworkRegisterSize);
+
+/*************************************************************************/ /*!
+@Function RGXWaitForFWOp
+
+@Description Send a sync command and wait to be signalled.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input ui32PDumpFlags PDump flags
+
+@Return void
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXWaitForFWOp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function RGXStateFlagCtrl
+
+@Description Set and return FW internal state flags.
+
+@Input psDevInfo Device Info
+@Input ui32Config AppHint config flags
+@Output pui32State Current AppHint state flag configuration
+@Input bSetNotClear Set or clear the provided config flags
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXStateFlagCtrl(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_UINT32 *pui32State,
+ IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestCommonContextCleanUp
+
+ @Description Schedules a FW common context cleanup. The firmware will doesn't
+ block waiting for the resource to become idle but rather notifies
+ the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWContext - firmware address of the context to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ @Input ui32PDumpFlags - PDump continuous flag
+
+******************************************************************************/
+PVRSRV_ERROR RGXFWRequestCommonContextCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim,
+ RGXFWIF_DM eDM,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestHWRTDataCleanUp
+
+ @Description Schedules a FW HWRTData memory cleanup. The firmware will doesn't
+ block waiting for the resource to become idle but rather notifies
+ the host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestHWRTDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_HWRTDATA psHWRTData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM);
+
+PVRSRV_ERROR RGXFWRequestRayFrameDataCleanUp(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PRGXFWIF_RAY_FRAME_DATA psHWFrameData,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestRPMFreeListCleanUp
+
+ @Description Schedules a FW RPM FreeList cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWRPMFreeList - firmware address of the RPM freelist to be cleaned up
+
+ @Input psSync - Sync object associated with cleanup
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestRPMFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_RPM_FREELIST psFWRPMFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestFreeListCleanUp
+
+ @Description Schedules a FW FreeList cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psHWRTData - firmware address of the HWRTData to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWRequestFreeListCleanUp(PVRSRV_RGXDEV_INFO *psDeviceNode,
+ PRGXFWIF_FREELIST psFWFreeList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWRequestZSBufferCleanUp
+
+ @Description Schedules a FW ZS Buffer cleanup. The firmware will doesn't block
+ waiting for the resource to become idle but rather notifies the
+ host that the resources is busy.
+
+ @Input psDeviceNode - pointer to device node
+
+ @Input psFWZSBuffer - firmware address of the ZS Buffer to be cleaned up
+
+ @Input eDM - Data master, to which the cleanup command should be send
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXFWRequestZSBufferCleanUp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PRGXFWIF_ZSBUFFER psFWZSBuffer,
+ PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+PVRSRV_ERROR ContextSetPriority(RGX_SERVER_COMMON_CONTEXT *psContext,
+ CONNECTION_DATA *psConnection,
+ PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Priority,
+ RGXFWIF_DM eDM);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWSetHCSDeadline
+
+ @Description Requests the Firmware to set a new Hard Context
+ Switch timeout deadline. Context switches that
+ surpass that deadline cause the system to kill
+ the currently running workloads.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32HCSDeadlineMs The deadline in milliseconds.
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWSetHCSDeadline(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32HCSDeadlineMs);
+
+/*!
+******************************************************************************
+
+ @Function RGXFWChangeOSidPriority
+
+ @Description Requests the Firmware to change the priority of an
+ operating system. Higher priority number equals
+ higher priority on the scheduling system.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32OSid The OSid whose priority is to be altered
+
+ @Input ui32Priority The new priority number for the specified OSid
+ ******************************************************************************/
+PVRSRV_ERROR RGXFWChangeOSidPriority(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ IMG_UINT32 ui32Priority);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWSetOSIsolationThreshold
+
+ @Description Requests the Firmware to change the priority
+ threshold of the OS Isolation group. Any OS with a
+ priority higher or equal than the threshold is
+ considered to be belonging to the isolation group.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32IsolationPriorityThreshold The new priority threshold
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetOSIsolationThreshold(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32IsolationPriorityThreshold);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWOSConfig
+
+ @Description Sends the OS Config structure to the FW to complete
+ the initialization process. The FW will then set all
+ the OS specific parameters for that DDK
+
+ @Input psDeviceNode pointer to device node
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWOSConfig(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+****************************************************************************
+
+ @Function RGXFWSetVMOnlineState
+
+ @Description Requests the Firmware to change the guest OS Online
+ states. This should be initiated by the VMM when a
+ guest VM comes online or goes offline. If offline,
+ the FW offloads any current resource from that OSID.
+ The request is repeated until the FW has had time to
+ free all the resources or has waited for workloads
+ to finish.
+
+ @Input psDeviceNode pointer to device node
+
+ @Input ui32OSid The Guest OSid whose state is being altered
+
+ @Input eOSOnlineState The new state (Online or Offline)
+ ***************************************************************************/
+PVRSRV_ERROR RGXFWSetVMOnlineState(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32OSid,
+ RGXFWIF_OS_STATE_CHANGE eOSOnlineState);
+/*!
+******************************************************************************
+
+ @Function RGXReadMETAAddr
+
+ @Description Reads a value at given address in META memory space
+ (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Output pui32Value - value
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXReadMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32METAAddr,
+ IMG_UINT32 *pui32Value);
+
+/*!
+******************************************************************************
+
+ @Function RGXWriteMETAAddr
+
+ @Description Write a value to the given address in META memory space
+ (it can be either a memory location or a META register)
+
+ @Input psDevInfo - pointer to device info
+
+ @Input ui32METAAddr - address in META memory space
+
+ @Input ui32Value - Value to write to address in META memory space
+
+ ******************************************************************************/
+
+PVRSRV_ERROR RGXWriteMETAAddr(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32METAAddr,
+ IMG_UINT32 ui32Value);
+
+/*!
+******************************************************************************
+
+ @Function RGXCheckFirmwareCCB
+
+ @Description Processes all commands that are found in the Firmware CCB.
+
+ @Input psDevInfo - pointer to device
+
+ ******************************************************************************/
+void RGXCheckFirmwareCCB(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+******************************************************************************
+
+ @Function RGXCheckForStalledClientContexts
+
+ @Description Checks all client contexts, for the device with device info
+ provided, to see if any are waiting for a fence to signal and
+ optionally force signalling of the fence for the context which
+ has been waiting the longest.
+ This function is called by RGXUpdateHealthStatus() and also
+ may be invoked from other trigger points.
+
+ @Input psDevInfo - pointer to device info
+ @Input bIgnorePrevious - if IMG_TRUE, any stalled contexts will be indicated
+ immediately, rather than only checking against any
+ previous stalled contexts
+
+ ******************************************************************************/
+void RGXCheckForStalledClientContexts(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bIgnorePrevious);
+
+/*!
+******************************************************************************
+
+ @Function RGXUpdateHealthStatus
+
+ @Description Tests a number of conditions which might indicate a fatal error has
+ occurred in the firmware. The result is stored in the device node
+ eheathStatus.
+
+ @Input psDevNode Pointer to device node structure.
+ @Input bCheckAfterTimePassed When TRUE, the function will also test for
+ firmware queues and polls not changing
+ since the previous test.
+
+ Note: if not enough time has passed since
+ the last call, false positives may occur.
+
+ @returns PVRSRV_ERROR
+ ******************************************************************************/
+PVRSRV_ERROR RGXUpdateHealthStatus(PVRSRV_DEVICE_NODE* psDevNode,
+ IMG_BOOL bCheckAfterTimePassed);
+
+
+PVRSRV_ERROR CheckStalledClientCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext, RGX_KICK_TYPE_DM eKickTypeDM);
+
+void DumpStalledFWCommonContext(RGX_SERVER_COMMON_CONTEXT *psCurrentServerCommonContext,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/*!
+******************************************************************************
+
+ @Function AttachKickResourcesCleanupCtls
+
+ @Description Attaches the cleanup structures to a kick command so that
+ submission reference counting can be performed when the
+ firmware processes the command
+
+ @Output apsCleanupCtl Array of CleanupCtl structure pointers to populate.
+ @Output pui32NumCleanupCtl Number of CleanupCtl structure pointers written out.
+ @Input eDM Which data master is the subject of the command.
+ @Input bKick TRUE if the client originally wanted to kick this DM.
+ @Input psRTDataCleanup Optional RTData cleanup associated with the command.
+ @Input psZBuffer Optional ZBuffer associated with the command.
+ @Input psSBuffer Optional SBuffer associated with the command.
+ ******************************************************************************/
+void AttachKickResourcesCleanupCtls(PRGXFWIF_CLEANUP_CTL *apsCleanupCtl,
+ IMG_UINT32 *pui32NumCleanupCtl,
+ RGXFWIF_DM eDM,
+ IMG_BOOL bKick,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer);
+
+/*!
+******************************************************************************
+
+ @Function RGXResetHWRLogs
+
+ @Description Resets the HWR Logs buffer (the hardware recovery count is not reset)
+
+ @Input psDevInfo Pointer to the device
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXResetHWRLogs(PVRSRV_DEVICE_NODE *psDevNode);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXGetPhyAddr
+
+ @Description Get the physical address of a certain PMR at a certain offset within it
+
+ @Input psPMR PMR of the allocation
+
+ @Input ui32LogicalOffset Logical offset
+
+ @Output psPhyAddr Physical address of the allocation
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXGetPhyAddr(PMR *psPMR,
+ IMG_DEV_PHYADDR *psPhyAddr,
+ IMG_UINT32 ui32LogicalOffset,
+ IMG_UINT32 ui32Log2PageSize,
+ IMG_UINT32 ui32NumOfPages,
+ IMG_BOOL *bValid);
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function RGXPdumpDrainKCCB
+
+ @Description Wait for the firmware to execute all the commands in the kCCB
+
+ @Input psDevInfo Pointer to the device
+
+ @Input ui32WriteOffset Woff we have to POL for the Roff to be equal to
+
+ @Input ui32PDumpFlags PDUMP flags provided
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXPdumpDrainKCCB(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32WriteOffset,
+ IMG_UINT32 ui32PDumpFlags);
+#endif /* PDUMP */
+
+/*!
+******************************************************************************
+
+ @Function RGXVzCreateFWKernelMemoryContext
+
+ @Description Performs additional firmware memory context creation
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function RGXVzDestroyFWKernelMemoryContext
+
+ @Description Performs additional firmware memory context destruction
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR RGXVzDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function RGXVzRegisterFirmwarePhysHeap
+
+ @Description Register and maps to device, a guest firmware physheap
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzRegisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSID,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+
+ @Function RGXVzDeregisterFirmwarePhysHeap
+
+ @Description Unregister and unmap from device, a guest firmware physheap
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ *****************************************************************************/
+PVRSRV_ERROR RGXVzUnregisterFirmwarePhysHeap(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32OSID);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXGetFwMainHeapSize
+
+ @Description Return size of the main FW heap in bytes
+
+ @Return IMG_UINT32
+ *****************************************************************************/
+IMG_UINT32 RGXGetFwMainHeapSize(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXFWUTILS_H__ */
+/******************************************************************************
+ End of file (rgxfwutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxheapconfig.h b/drivers/gpu/drm/img-rogue/1.10/rgxheapconfig.h
new file mode 100644
index 00000000000000..b75394814eae85
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxheapconfig.h
@@ -0,0 +1,173 @@
+/*************************************************************************/ /*!
+@File
+@Title device configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Memory heaps device specific configuration
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __RGXHEAPCONFIG_H__
+#define __RGXHEAPCONFIG_H__
+
+#include "rgxdefs_km.h"
+
+/*
+ RGX Device Virtual Address Space Definitions
+ NOTES:
+ Base addresses have to be a multiple of 4MiB
+
+ RGX_PDSCODEDATA_HEAP_BASE and RGX_USCCODE_HEAP_BASE will be programmed,
+ on a global basis, into RGX_CR_PDS_EXEC_BASE and RGX_CR_USC_CODE_BASE_*
+ respectively. Therefore if clients use multiple configs they must still
+ be consistent with their definitions for these heaps.
+
+ Shared virtual memory (GENERAL_SVM) support requires half of the address
+ space be reserved for SVM allocations unless BRN fixes are required in
+ which case the SVM heap is disabled. This is reflected in the device
+ connection capability bits returned to userspace.
+
+ Variable page-size heap (GENERAL_NON4K) support reserves 64GiB from the
+ available 4K page-size heap (GENERAL) space. The actual heap page-size
+ defaults to 16K; AppHint PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE
+ can be used to forced it to these values: 4K,64K,256K,1M,2M.
+*/
+
+ /* Start at 4 MiB Size of 512 GiB less 4 MiB (managed by OS/Services) */
+ #define RGX_GENERAL_SVM_HEAP_BASE IMG_UINT64_C(0x0000400000)
+ #define RGX_GENERAL_SVM_HEAP_SIZE IMG_UINT64_C(0x7FFFC00000)
+
+ /* Start at 512GiB. Size of 256 GiB */
+ #define RGX_GENERAL_HEAP_BASE IMG_UINT64_C(0x8000000000)
+ #define RGX_GENERAL_HEAP_SIZE IMG_UINT64_C(0x4000000000)
+
+ /* HWBRN65273 workaround requires General Heap to use a unique single 1GB PCE entry. */
+ #define RGX_GENERAL_BRN_65273_HEAP_BASE IMG_UINT64_C(0x65C0000000)
+ #define RGX_GENERAL_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0080000000)
+
+ /* Start at 768GiB. Size of 64 GiB */
+ #define RGX_GENERAL_NON4K_HEAP_BASE IMG_UINT64_C(0xC000000000)
+ #define RGX_GENERAL_NON4K_HEAP_SIZE IMG_UINT64_C(0x1000000000)
+
+ /* HWBRN65273 workaround requires Non4K memory to use a unique single 1GB PCE entry. */
+ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_BASE IMG_UINT64_C(0x73C0000000)
+ #define RGX_GENERAL_NON4K_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0080000000)
+
+ /* Start at 832 GiB. Size of 32 GiB */
+ #define RGX_BIF_TILING_NUM_HEAPS 4
+ #define RGX_BIF_TILING_HEAP_SIZE IMG_UINT64_C(0x0200000000)
+ #define RGX_BIF_TILING_HEAP_1_BASE IMG_UINT64_C(0xD000000000)
+ #define RGX_BIF_TILING_HEAP_2_BASE (RGX_BIF_TILING_HEAP_1_BASE + RGX_BIF_TILING_HEAP_SIZE)
+ #define RGX_BIF_TILING_HEAP_3_BASE (RGX_BIF_TILING_HEAP_2_BASE + RGX_BIF_TILING_HEAP_SIZE)
+ #define RGX_BIF_TILING_HEAP_4_BASE (RGX_BIF_TILING_HEAP_3_BASE + RGX_BIF_TILING_HEAP_SIZE)
+
+ /* Start at 872 GiB. Size of 4 GiB */
+ #define RGX_PDSCODEDATA_HEAP_BASE IMG_UINT64_C(0xDA00000000)
+ #define RGX_PDSCODEDATA_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* HWBRN65273 workaround requires PDS memory to use a unique single 1GB PCE entry. */
+ #define RGX_PDSCODEDATA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xA800000000)
+ #define RGX_PDSCODEDATA_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000)
+
+ /* HWBRN63142 workaround requires Region Header memory to be at the top
+ of a 16GB aligned range. This is so when masked with 0x03FFFFFFFF the
+ address will avoid aliasing PB addresses. Start at 879.75GB. Size of 256MB. */
+ #define RGX_RGNHDR_BRN_63142_HEAP_BASE IMG_UINT64_C(0xDBF0000000)
+ #define RGX_RGNHDR_BRN_63142_HEAP_SIZE IMG_UINT64_C(0x0010000000)
+
+ /* Start at 880 GiB, Size of 1 MiB */
+ #define RGX_VISTEST_HEAP_BASE IMG_UINT64_C(0xDC00000000)
+ #define RGX_VISTEST_HEAP_SIZE IMG_UINT64_C(0x0000100000)
+
+ /* HWBRN65273 workaround requires VisTest memory to use a unique single 1GB PCE entry. */
+ #define RGX_VISTEST_BRN_65273_HEAP_BASE IMG_UINT64_C(0xE400000000)
+ #define RGX_VISTEST_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0000100000)
+
+ /* Start at 896 GiB Size of 4 GiB */
+ #define RGX_USCCODE_HEAP_BASE IMG_UINT64_C(0xE000000000)
+ #define RGX_USCCODE_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* HWBRN65273 workaround requires USC memory to use a unique single 1GB PCE entry. */
+ #define RGX_USCCODE_BRN_65273_HEAP_BASE IMG_UINT64_C(0xBA00000000)
+ #define RGX_USCCODE_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000)
+
+ /* Start at 903GiB. Firmware heaps defined in rgxdefs_km.h
+ RGX_FIRMWARE_RAW_HEAP_BASE
+ RGX_FIRMWARE_HYPERV_MAIN_HEAP_BASE
+ RGX_FIRMWARE_GUEST_MAIN_HEAP_BASE
+ RGX_FIRMWARE_MAIN_HEAP_SIZE
+ RGX_FIRMWARE_CONFIG_HEAP_SIZE
+ RGX_FIRMWARE_RAW_HEAP_SIZE */
+
+ /* HWBRN65273 workaround requires TQ memory to start at 0GB and use a unique single 1GB PCE entry. */
+ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_BASE IMG_UINT64_C(0x0000000000)
+ #define RGX_TQ3DPARAMETERS_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000)
+
+ /* Start at 912GiB. Size of 16 GiB. 16GB aligned to match RGX_CR_ISP_PIXEL_BASE */
+ #define RGX_TQ3DPARAMETERS_HEAP_BASE IMG_UINT64_C(0xE400000000)
+ #define RGX_TQ3DPARAMETERS_HEAP_SIZE IMG_UINT64_C(0x0400000000)
+
+ /* Start at 928GiB. Size of 4 GiB */
+ #define RGX_DOPPLER_HEAP_BASE IMG_UINT64_C(0xE800000000)
+ #define RGX_DOPPLER_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 932GiB. Size of 4 GiB */
+ #define RGX_DOPPLER_OVERFLOW_HEAP_BASE IMG_UINT64_C(0xE900000000)
+ #define RGX_DOPPLER_OVERFLOW_HEAP_SIZE IMG_UINT64_C(0x0100000000)
+
+ /* Start at 936GiB. Two groups of 128 KBytes that must follow each other in this order. */
+ #define RGX_SERVICES_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00000000)
+ #define RGX_SERVICES_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000020000)
+
+ #define RGX_SIGNALS_HEAP_BASE IMG_UINT64_C(0xEA00020000)
+ #define RGX_SIGNALS_HEAP_SIZE IMG_UINT64_C(0x0000020000)
+
+ /* TDM TPU YUV coeffs - can be reduced to a single page */
+ #define RGX_TDM_TPU_YUV_COEFFS_HEAP_BASE IMG_UINT64_C(0xEA00080000)
+ #define RGX_TDM_TPU_YUV_COEFFS_HEAP_SIZE IMG_UINT64_C(0x0000040000)
+
+ /* HWBRN65273 workaround requires two Region Header buffers 4GB apart. */
+ #define RGX_MMU_INIA_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF800000000)
+ #define RGX_MMU_INIA_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000)
+ #define RGX_MMU_INIB_BRN_65273_HEAP_BASE IMG_UINT64_C(0xF900000000)
+ #define RGX_MMU_INIB_BRN_65273_HEAP_SIZE IMG_UINT64_C(0x0040000000)
+
+#endif /* __RGXHEAPCONFIG_H__ */
+
+/*****************************************************************************
+ End of file (rgxheapconfig.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.c b/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.c
new file mode 100644
index 00000000000000..31fa2e8accba68
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.c
@@ -0,0 +1,4309 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX HW Performance implementation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "rgxdevice.h"
+#include "pvrsrv_error.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "allocmem.h"
+
+#include "pvrsrv.h"
+#include "pvrsrv_tlstreams.h"
+#include "pvrsrv_tlcommon.h"
+#include "tlclient.h"
+#include "tlstream.h"
+
+#include "rgxhwperf.h"
+#include "rgxapi_km.h"
+#include "rgxfwutils.h"
+#include "rgxtimecorr.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pdump_km.h"
+#include "pvrsrv_apphint.h"
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+
+/* This is defined by default to enable producer callbacks.
+ * Clients of the TL interface can disable the use of the callback
+ * with PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK. */
+#define SUPPORT_TL_PRODUCER_CALLBACK 1
+
+/* Maximum enum value to prevent access to RGX_HWPERF_STREAM_ID2_CLIENT stream */
+#define RGX_HWPERF_MAX_STREAM_ID (RGX_HWPERF_STREAM_ID2_CLIENT)
+
+/* Defines size of buffers returned from acquire/release calls */
+#define FW_STREAM_BUFFER_SIZE (0x80000)
+#define HOST_STREAM_BUFFER_SIZE (0x20000)
+
+/* Must be at least as large as two tl packets of maximum size */
+static_assert(HOST_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+ "HOST_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+static_assert(FW_STREAM_BUFFER_SIZE >= (PVRSRVTL_MAX_PACKET_SIZE<<1),
+ "FW_STREAM_BUFFER_SIZE is less than (PVRSRVTL_MAX_PACKET_SIZE<<1)");
+
+static inline IMG_UINT32
+RGXHWPerfGetPackets( IMG_UINT32 ui32BytesExp,
+ IMG_UINT32 ui32AllowedSize,
+ RGX_PHWPERF_V2_PACKET_HDR psCurPkt )
+{
+ IMG_UINT32 sizeSum = 0;
+
+ /* Traverse the array to find how many packets will fit in the available space. */
+ while ( sizeSum < ui32BytesExp &&
+ sizeSum + RGX_HWPERF_GET_SIZE(psCurPkt) < ui32AllowedSize )
+ {
+ sizeSum += RGX_HWPERF_GET_SIZE(psCurPkt);
+ psCurPkt = RGX_HWPERF_GET_NEXT_PACKET(psCurPkt);
+ }
+
+ return sizeSum;
+}
+
+/*
+ RGXHWPerfCopyDataL1toL2
+ */
+static IMG_UINT32 RGXHWPerfCopyDataL1toL2(PVRSRV_RGXDEV_INFO* psDeviceInfo,
+ IMG_BYTE *pbFwBuffer,
+ IMG_UINT32 ui32BytesExp)
+{
+ IMG_HANDLE hHWPerfStream = psDeviceInfo->hHWPerfStream;
+ IMG_BYTE * pbL2Buffer;
+ IMG_UINT32 ui32L2BufFree;
+ IMG_UINT32 ui32BytesCopied = 0;
+ IMG_UINT32 ui32BytesExpMin = RGX_HWPERF_GET_SIZE(RGX_HWPERF_GET_PACKET(pbFwBuffer));
+ PVRSRV_ERROR eError;
+
+ /* HWPERF_MISR_FUNC_DEBUG enables debug code for investigating HWPerf issues */
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ static IMG_UINT32 gui32Ordinal = IMG_UINT32_MAX;
+#endif
+
+ PVR_DPF_ENTERED;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ PVR_DPF((PVR_DBG_VERBOSE, "EVENTS to copy from 0x%p length:%05d",
+ pbFwBuffer, ui32BytesExp));
+#endif
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ {
+ /* Check the incoming buffer of data has not lost any packets */
+ IMG_BYTE *pbFwBufferIter = pbFwBuffer;
+ IMG_BYTE *pbFwBufferEnd = pbFwBuffer+ui32BytesExp;
+ do
+ {
+ RGX_HWPERF_V2_PACKET_HDR *asCurPos = RGX_HWPERF_GET_PACKET(pbFwBufferIter);
+ IMG_UINT32 ui32CurOrdinal = asCurPos->ui32Ordinal;
+ if (gui32Ordinal != IMG_UINT32_MAX)
+ {
+ if ((gui32Ordinal+1) != ui32CurOrdinal)
+ {
+ if (gui32Ordinal < ui32CurOrdinal)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "HWPerf [%p] packets lost (%u packets) between ordinal %u...%u",
+ pbFwBufferIter,
+ ui32CurOrdinal - gui32Ordinal - 1,
+ gui32Ordinal,
+ ui32CurOrdinal));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "HWPerf [%p] packet ordinal out of sequence last: %u, current: %u",
+ pbFwBufferIter,
+ gui32Ordinal,
+ ui32CurOrdinal));
+ }
+ }
+ }
+ gui32Ordinal = asCurPos->ui32Ordinal;
+ pbFwBufferIter += RGX_HWPERF_GET_SIZE(asCurPos);
+ } while( pbFwBufferIter < pbFwBufferEnd );
+ }
+#endif
+
+ if(ui32BytesExp > psDeviceInfo->ui32MaxPacketSize)
+ {
+ IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp,
+ psDeviceInfo->ui32MaxPacketSize,
+ RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+ if(0 != sizeSum)
+ {
+ ui32BytesExp = sizeSum;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write data into host buffer as "
+ "packet is too big and hence it breaches TL "
+ "packet size limit (TLBufferSize / 2.5)"));
+ goto e0;
+ }
+ }
+
+ /* Try submitting all data in one TL packet. */
+ eError = TLStreamReserve2( hHWPerfStream,
+ &pbL2Buffer,
+ (size_t)ui32BytesExp, ui32BytesExpMin,
+ &ui32L2BufFree);
+ if ( eError == PVRSRV_OK )
+ {
+ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)ui32BytesExp );
+ eError = TLStreamCommit(hHWPerfStream, (size_t)ui32BytesExp);
+ if ( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+ eError, __func__));
+ goto e0;
+ }
+ /* Data were successfully written */
+ ui32BytesCopied = ui32BytesExp;
+ }
+ else if (eError == PVRSRV_ERROR_STREAM_FULL)
+ {
+ /* There was not enough space for all data, copy as much as possible */
+ IMG_UINT32 sizeSum = RGXHWPerfGetPackets(ui32BytesExp, ui32L2BufFree, RGX_HWPERF_GET_PACKET(pbFwBuffer));
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Unable to reserve space (%d) in host buffer on first attempt, remaining free space: %d", ui32BytesExp, ui32L2BufFree));
+
+ if ( 0 != sizeSum )
+ {
+ eError = TLStreamReserve( hHWPerfStream, &pbL2Buffer, (size_t)sizeSum);
+
+ if ( eError == PVRSRV_OK )
+ {
+ OSDeviceMemCopy( pbL2Buffer, pbFwBuffer, (size_t)sizeSum );
+ eError = TLStreamCommit(hHWPerfStream, (size_t)sizeSum);
+ if ( eError != PVRSRV_OK )
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "TLStreamCommit() failed (%d) in %s(), unable to copy packet from L1 to L2 buffer",
+ eError, __func__));
+ goto e0;
+ }
+ /* sizeSum bytes of hwperf packets have been successfully written */
+ ui32BytesCopied = sizeSum;
+ }
+ else if ( PVRSRV_ERROR_STREAM_FULL == eError )
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Cannot write HWPerf packet into host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Cannot find space in host buffer, check data in case of packet loss, remaining free space: %d", ui32L2BufFree));
+ }
+ }
+ if ( PVRSRV_OK != eError && /* Some other error occurred */
+ PVRSRV_ERROR_STREAM_FULL != eError ) /* Full error handled by caller, we returning the copied bytes count to caller*/
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "HWPerf enabled: Unexpected Error ( %d ) while copying FW buffer to TL buffer.",
+ eError));
+ }
+
+ e0:
+ /* Return the remaining packets left to be transported. */
+ PVR_DPF_RETURN_VAL(ui32BytesCopied);
+}
+
+
+static INLINE IMG_UINT32 RGXHWPerfAdvanceRIdx(
+ const IMG_UINT32 ui32BufSize,
+ const IMG_UINT32 ui32Pos,
+ const IMG_UINT32 ui32Size)
+{
+ return ( ui32Pos + ui32Size < ui32BufSize ? ui32Pos + ui32Size : 0 );
+}
+
+
+/*
+ RGXHWPerfDataStore
+ */
+static IMG_UINT32 RGXHWPerfDataStore(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBufCtl = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_BYTE* psHwPerfInfo = psDevInfo->psRGXFWIfHWPerfBuf;
+ IMG_UINT32 ui32SrcRIdx, ui32SrcWIdx, ui32SrcWrapCount;
+ IMG_UINT32 ui32BytesExp = 0, ui32BytesCopied = 0, ui32BytesCopiedSum = 0;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ IMG_UINT32 ui32BytesExpSum = 0;
+#endif
+
+ PVR_DPF_ENTERED;
+
+ /* Caller should check this member is valid before calling */
+ PVR_ASSERT(psDevInfo->hHWPerfStream);
+
+ /* Get a copy of the current
+ * read (first packet to read)
+ * write (empty location for the next write to be inserted)
+ * WrapCount (size in bytes of the buffer at or past end)
+ * indexes of the FW buffer */
+ ui32SrcRIdx = psRGXFWIfTraceBufCtl->ui32HWPerfRIdx;
+ ui32SrcWIdx = psRGXFWIfTraceBufCtl->ui32HWPerfWIdx;
+ OSMemoryBarrier();
+ ui32SrcWrapCount = psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount;
+
+ /* Is there any data in the buffer not yet retrieved? */
+ if ( ui32SrcRIdx != ui32SrcWIdx )
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStore EVENTS found srcRIdx:%d srcWIdx: %d", ui32SrcRIdx, ui32SrcWIdx));
+
+ /* Is the write position higher than the read position? */
+ if ( ui32SrcWIdx > ui32SrcRIdx )
+ {
+ /* Yes, buffer has not wrapped */
+ ui32BytesExp = ui32SrcWIdx - ui32SrcRIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+ psHwPerfInfo + ui32SrcRIdx,
+ ui32BytesExp);
+
+ /* Advance the read index and the free bytes counter by the number
+ * of bytes transported. Items will be left in buffer if not all data
+ * could be transported. Exit to allow buffer to drain. */
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+ }
+ /* No, buffer has wrapped and write position is behind read position */
+ else
+ {
+ /* Byte count equal to
+ * number of bytes from read position to the end of the buffer,
+ * + data in the extra space in the end of the buffer. */
+ ui32BytesExp = ui32SrcWrapCount - ui32SrcRIdx;
+
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ /* Attempt to transfer the packets to the TL stream buffer */
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+ psHwPerfInfo + ui32SrcRIdx,
+ ui32BytesExp);
+
+ /* Advance read index as before and Update the local copy of the
+ * read index as it might be used in the last if branch*/
+ ui32SrcRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ /* Update Wrap Count */
+ if ( ui32SrcRIdx == 0)
+ {
+ psRGXFWIfTraceBufCtl->ui32HWPerfWrapCount = psDevInfo->ui32RGXFWIfHWPerfBufSize;
+ }
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = ui32SrcRIdx;
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+
+ /* If all the data in the end of the array was copied, try copying
+ * wrapped data in the beginning of the array, assuming there is
+ * any and the RIdx was wrapped. */
+ if ( (ui32BytesCopied == ui32BytesExp)
+ && (ui32SrcWIdx > 0)
+ && (ui32SrcRIdx == 0) )
+ {
+ ui32BytesExp = ui32SrcWIdx;
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ ui32BytesExpSum += ui32BytesExp;
+#endif
+ ui32BytesCopied = RGXHWPerfCopyDataL1toL2(psDevInfo,
+ psHwPerfInfo,
+ ui32BytesExp);
+ /* Advance the FW buffer read position. */
+ psRGXFWIfTraceBufCtl->ui32HWPerfRIdx = RGXHWPerfAdvanceRIdx(
+ psDevInfo->ui32RGXFWIfHWPerfBufSize, ui32SrcRIdx,
+ ui32BytesCopied);
+
+ ui32BytesCopiedSum += ui32BytesCopied;
+ }
+ }
+#ifdef HWPERF_MISR_FUNC_DEBUG
+ if (ui32BytesCopiedSum != ui32BytesExpSum)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfDataStore: FW L1 RIdx:%u. Not all bytes copied to L2: %u bytes out of %u expected", psRGXFWIfTraceBufCtl->ui32HWPerfRIdx, ui32BytesCopiedSum, ui32BytesExpSum));
+ }
+#endif
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfDataStore NO EVENTS to transport"));
+ }
+
+ PVR_DPF_RETURN_VAL(ui32BytesCopiedSum);
+}
+
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE *psDevInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+ IMG_UINT32 ui32BytesCopied;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDevInfo);
+ psRgxDevInfo = psDevInfo->pvDevice;
+
+ /* Keep HWPerf resource init check and use of
+ * resources atomic, they may not be freed during use
+ */
+ OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+
+ if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+ {
+ ui32BytesCopied = RGXHWPerfDataStore(psRgxDevInfo);
+ if ( ui32BytesCopied )
+ { /* Signal consumers that packets may be available to read when
+ * running from a HW kick, not when called by client APP thread
+ * via the transport layer CB as this can lead to stream
+ * corruption.*/
+ eError = TLStreamSync(psRgxDevInfo->hHWPerfStream);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfDataStoreCB: Zero bytes copied"));
+ RGXDEBUG_PRINT_IRQ_COUNT(psRgxDevInfo);
+ }
+ }
+
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+ PVR_DPF_RETURN_OK;
+}
+
+
+/* Currently supported by default */
+#if defined(SUPPORT_TL_PRODUCER_CALLBACK)
+static PVRSRV_ERROR RGXHWPerfTLCB(IMG_HANDLE hStream,
+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*)pvUser;
+
+ PVR_UNREFERENCED_PARAMETER(hStream);
+ PVR_UNREFERENCED_PARAMETER(ui32Resp);
+
+ PVR_ASSERT(psRgxDevInfo);
+
+ switch (ui32ReqOp)
+ {
+ case TL_SOURCECB_OP_CLIENT_EOS:
+ /* Keep HWPerf resource init check and use of
+ * resources atomic, they may not be freed during use
+ */
+
+ /* This solution is for avoiding a deadlock situation where -
+ * in DoTLStreamReserve(), writer has acquired HWPerfLock and
+ * ReadLock and is waiting on ReadPending (which will be reset
+ * by reader), And
+ * the reader after setting ReadPending in TLStreamAcquireReadPos(),
+ * is waiting for HWPerfLock in RGXHWPerfTLCB().
+ * So here in RGXHWPerfTLCB(), if HWPerfLock is already acquired we
+ * will return to the reader without waiting to acquire HWPerfLock.
+ */
+ if( !OSTryLockAcquire(psRgxDevInfo->hHWPerfLock))
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "hHWPerfLock is already acquired, a write "
+ "operation might already be in process"));
+ return PVRSRV_OK;
+ }
+
+ if (psRgxDevInfo->hHWPerfStream != (IMG_HANDLE) NULL)
+ {
+ (void) RGXHWPerfDataStore(psRgxDevInfo);
+ }
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+ break;
+
+ default:
+ break;
+ }
+
+ return eError;
+}
+#endif
+
+
+static void RGXHWPerfL1BufferDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc)
+ {
+ if (psRgxDevInfo->psRGXFWIfHWPerfBuf != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ psRgxDevInfo->psRGXFWIfHWPerfBuf = NULL;
+ }
+ DevmemFwFree(psRgxDevInfo, psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+ }
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfInit
+
+@Description Called during driver init for initialization of HWPerf module
+ in the Rogue device driver. This function keeps allocated
+ only the minimal necessary resources, which are required for
+ functioning of HWPerf server module.
+
+@Input psRgxDevInfo RGX Device Info
+
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVRSRV_ERROR eError;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ PVR_DPF_ENTERED;
+
+ /* expecting a valid device info */
+ PVR_ASSERT(psRgxDevInfo);
+
+ /* Create a lock for HWPerf server module used for serializing, L1 to L2
+ * copy calls (e.g. in case of TL producer callback) and L1, L2 resource
+ * allocation */
+ eError = OSLockCreate(&psRgxDevInfo->hHWPerfLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+ /* avoid uninitialised data */
+ psRgxDevInfo->hHWPerfStream = (IMG_HANDLE) NULL;
+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc = NULL;
+
+ PVR_DPF_RETURN_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfIsInitRequired
+
+@Description Returns true if the HWperf firmware buffer (L1 buffer) and host
+ driver TL buffer (L2 buffer) are not already allocated. Caller
+ must possess hHWPerfLock lock before calling this
+ function so the state tested is not inconsistent.
+
+@Input psRgxDevInfo RGX Device Info, on which init requirement is
+ checked.
+
+@Return IMG_BOOL Whether initialization (allocation) is required
+ */ /**************************************************************************/
+static INLINE IMG_BOOL RGXHWPerfIsInitRequired(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hHWPerfLock));
+
+#if !defined (NO_HARDWARE)
+ /* Both L1 and L2 buffers are required (for HWPerf functioning) on driver
+ * built for actual hardware (TC, EMU, etc.)
+ */
+ if (psRgxDevInfo->hHWPerfStream == (IMG_HANDLE) NULL)
+ {
+ /* The allocation API (RGXHWPerfInitOnDemandResources) allocates
+ * device memory for both L1 and L2 without any checks. Hence,
+ * either both should be allocated or both be NULL.
+ *
+ * In-case this changes in future (for e.g. a situation where one
+ * of the 2 buffers is already allocated and other is required),
+ * add required checks before allocation calls to avoid memory leaks.
+ */
+ PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL);
+ return IMG_TRUE;
+ }
+ PVR_ASSERT(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc != NULL);
+#else
+ /* On a NO-HW driver L2 is not allocated. So, no point in checking its
+ * allocation */
+ if (psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc == NULL)
+ {
+ return IMG_TRUE;
+ }
+#endif
+ return IMG_FALSE;
+}
+#if !defined(NO_HARDWARE)
+static void _HWPerfFWOnReaderOpenCB(void *pvArg)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo = (PVRSRV_RGXDEV_INFO*) pvArg;
+ PVRSRV_DEVICE_NODE* psDevNode = (PVRSRV_DEVICE_NODE*) psRgxDevInfo->psDeviceNode;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_BVNC_FEATURES;
+
+ eError = RGXScheduleCommand(psDevNode->pvDevice, RGXFWIF_DM_GP,
+ &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to generate feature packet in "
+ "firmware (error = %d)", __func__, eError));
+ return;
+ }
+
+ eError = RGXWaitForFWOp(psDevNode->pvDevice, RGXFWIF_DM_GP,
+ psDevNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGRN_IF_ERROR(eError, "RGXWaitForFWOp");
+
+}
+#endif
+/*************************************************************************/ /*!
+@Function RGXHWPerfInitOnDemandResources
+
+@Description This function allocates the HWperf firmware buffer (L1 buffer)
+ and host driver TL buffer (L2 buffer) if HWPerf is enabled at
+ driver load time. Otherwise, these buffers are allocated
+ on-demand as and when required. Caller
+ must possess hHWPerfLock lock before calling this
+ function so the state tested is not inconsistent if called
+ outside of driver initialisation.
+
+@Input psRgxDevInfo RGX Device Info, on which init is done
+
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32L2BufferSize = 0;
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5]; /* 5 seems reasonable as it can hold
+ names up to "hwperf_9999", which is enough */
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+
+ /* Create the L1 HWPerf buffer on demand */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT)
+ | PVRSRV_MEMALLOCFLAG_GPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE
+ | PVRSRV_MEMALLOCFLAG_CPU_READABLE
+ | PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE
+ | PVRSRV_MEMALLOCFLAG_UNCACHED
+#if defined(PDUMP) /* Helps show where the packet data ends */
+ | PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC
+#else /* Helps show corruption issues in driver-live */
+ | PVRSRV_MEMALLOCFLAG_POISON_ON_ALLOC
+#endif
+ ;
+
+ /* Allocate HWPerf FW L1 buffer */
+ eError = DevmemFwAllocate(psRgxDevInfo,
+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize+RGXFW_HWPERF_L1_PADDING_DEFAULT,
+ uiMemAllocFlags,
+ "FwHWPerfBuffer",
+ &psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to allocate kernel fw hwperf buffer (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ /* Expecting the RuntimeCfg structure is mapped into CPU virtual memory.
+ * Also, make sure the FW address is not already set */
+ PVR_ASSERT(psRgxDevInfo->psRGXFWIfRuntimeCfg && psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf.ui32Addr == 0x0);
+
+ /* Meta cached flag removed from this allocation as it was found
+ * FW performance was better without it. */
+ RGXSetFirmwareAddress(&psRgxDevInfo->psRGXFWIfRuntimeCfg->sHWPerfBuf,
+ psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ eError = DevmemAcquireCpuVirtAddr(psRgxDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ (void**)&psRgxDevInfo->psRGXFWIfHWPerfBuf);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to acquire kernel hwperf buffer (%u)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+
+ /* On NO-HW driver, there is no MISR installed to copy data from L1 to L2. Hence,
+ * L2 buffer is not allocated */
+#if !defined(NO_HARDWARE)
+ /* Host L2 HWPERF buffer size in bytes must be bigger than the L1 buffer
+ * accessed by the FW. The MISR may try to write one packet the size of the L1
+ * buffer in some scenarios. When logging is enabled in the MISR, it can be seen
+ * if the L2 buffer hits a full condition. The closer in size the L2 and L1 buffers
+ * are the more chance of this happening.
+ * Size chosen to allow MISR to write an L1 sized packet and for the client
+ * application/daemon to drain a L1 sized packet e.g. ~ 1.5*L1.
+ */
+ ui32L2BufferSize = psRgxDevInfo->ui32RGXFWIfHWPerfBufSize +
+ (psRgxDevInfo->ui32RGXFWIfHWPerfBufSize>>1);
+
+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+ __FUNCTION__,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = TLStreamCreate(&psRgxDevInfo->hHWPerfStream,
+ psRgxDevInfo->psDeviceNode,
+ pszHWPerfStreamName,
+ ui32L2BufferSize,
+ TL_OPMODE_DROP_NEWER | TL_FLAG_NO_SIGNAL_ON_COMMIT,
+ _HWPerfFWOnReaderOpenCB, psRgxDevInfo,
+#if !defined(SUPPORT_TL_PRODUCER_CALLBACK)
+ NULL, NULL
+#else
+ /* Not enabled by default */
+ RGXHWPerfTLCB, psRgxDevInfo
+#endif
+ );
+ PVR_LOGG_IF_ERROR(eError, "TLStreamCreate", e1);
+
+ eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfStream,
+ PVRSRVGetPVRSRVData()->hTLCtrlStream);
+ /* we can still discover host stream so leave it as is and just log error */
+ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+ /* send the event here because host stream is implicitly opened for write
+ * in TLStreamCreate and TLStreamOpen is never called (so the event is
+ * never emitted) */
+ TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfStream);
+
+ {
+ TL_STREAM_INFO sTLStreamInfo;
+
+ TLStreamInfo(psRgxDevInfo->hHWPerfStream, &sTLStreamInfo);
+ psRgxDevInfo->ui32MaxPacketSize = sTLStreamInfo.maxTLpacketSize;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf buffer size in bytes: L1: %d L2: %d",
+ psRgxDevInfo->ui32RGXFWIfHWPerfBufSize, ui32L2BufferSize));
+
+#else /* defined (NO_HARDWARE) */
+ PVR_UNREFERENCED_PARAMETER(ui32L2BufferSize);
+ PVR_UNREFERENCED_PARAMETER(RGXHWPerfTLCB);
+ PVR_UNREFERENCED_PARAMETER(pszHWPerfStreamName);
+ ui32L2BufferSize = 0;
+#endif
+
+ PVR_DPF_RETURN_OK;
+
+#if !defined(NO_HARDWARE)
+ e1: /* L2 buffer initialisation failures */
+ psRgxDevInfo->hHWPerfStream = NULL;
+#endif
+ e0: /* L1 buffer initialisation failures */
+ RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRgxDevInfo);
+
+ /* Clean up the L2 buffer stream object if allocated */
+ if (psRgxDevInfo->hHWPerfStream)
+ {
+ /* send the event here because host stream is implicitly opened for
+ * write in TLStreamCreate and TLStreamClose is never called (so the
+ * event is never emitted) */
+ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfStream);
+ TLStreamClose(psRgxDevInfo->hHWPerfStream);
+ psRgxDevInfo->hHWPerfStream = NULL;
+ }
+
+ /* Cleanup L1 buffer resources */
+ RGXHWPerfL1BufferDeinit(psRgxDevInfo);
+
+ /* Cleanup the HWPerf server module lock resource */
+ if (psRgxDevInfo->hHWPerfLock)
+ {
+ OSLockDestroy(psRgxDevInfo->hHWPerfLock);
+ psRgxDevInfo->hHWPerfLock = NULL;
+ }
+
+ PVR_DPF_RETURN;
+}
+
+
+/******************************************************************************
+ * RGX HW Performance Profiling Server API(s)
+ *****************************************************************************/
+
+static PVRSRV_ERROR RGXHWPerfCtrlFwBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ /* If this method is being used whether to enable or disable
+ * then the hwperf buffers (host and FW) are likely to be needed
+ * eventually so create them, also helps unit testing. Buffers
+ * allocated on demand to reduce RAM foot print on systems not
+ * needing HWPerf resources.
+ * Obtain lock first, test and init if required. */
+ OSLockAcquire(psDevice->hHWPerfLock);
+
+ if (!psDevice->bFirmwareInitialised)
+ {
+ psDevice->ui64HWPerfFilter = ui64Mask; // at least set filter
+ eError = PVRSRV_ERROR_NOT_INITIALISED;
+
+ PVR_DPF((PVR_DBG_ERROR, "HWPerf has NOT been initialised yet."
+ " Mask has been SET to (%llx)", (long long) ui64Mask));
+
+ goto unlock_and_return;
+ }
+
+ if (RGXHWPerfIsInitRequired(psDevice))
+ {
+ eError = RGXHWPerfInitOnDemandResources(psDevice);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialisation of on-demand HWPerfFW "
+ "resources failed", __func__));
+ goto unlock_and_return;
+ }
+ }
+
+ /* Unlock here as no further HWPerf resources are used below that would be
+ * affected if freed by another thread */
+ OSLockRelease(psDevice->hHWPerfLock);
+
+ /* Return if the filter is the same */
+ if (!bToggle && psDevice->ui64HWPerfFilter == ui64Mask)
+ goto return_;
+
+ /* Prepare command parameters ... */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_UPDATE_CONFIG;
+ sKccbCmd.uCmdData.sHWPerfCtrl.bToggle = bToggle;
+ sKccbCmd.uCmdData.sHWPerfCtrl.ui64Mask = ui64Mask;
+
+ /* Ask the FW to carry out the HWPerf configuration command */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+ &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set new HWPerfFW filter in "
+ "firmware (error = %d)", __func__, eError));
+ goto return_;
+ }
+
+ psDevice->ui64HWPerfFilter = bToggle ?
+ psDevice->ui64HWPerfFilter ^ ui64Mask : ui64Mask;
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP,
+ psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", return_);
+
+#if defined(DEBUG)
+ if (bToggle)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW events (%" IMG_UINT64_FMTSPECx ") have been TOGGLED",
+ ui64Mask));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%" IMG_UINT64_FMTSPECx ")",
+ ui64Mask));
+ }
+#endif
+
+ return PVRSRV_OK;
+
+ unlock_and_return:
+ OSLockRelease(psDevice->hHWPerfLock);
+
+ return_:
+ return eError;
+}
+
+#define HWPERF_HOST_MAX_DEFERRED_PACKETS 800
+
+static PVRSRV_ERROR RGXHWPerfCtrlHostBuffer(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bToggle,
+ IMG_UINT32 ui32Mask)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO* psDevice = psDeviceNode->pvDevice;
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ IMG_UINT32 ui32OldFilter = psDevice->ui32HWPerfHostFilter;
+#endif
+
+ OSLockAcquire(psDevice->hLockHWPerfHostStream);
+ if (psDevice->hHWPerfHostStream == NULL)
+ {
+ eError = RGXHWPerfHostInitOnDemandResources(psDevice);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(psDevice->hLockHWPerfHostStream);
+ return eError;
+ }
+ }
+
+ psDevice->ui32HWPerfHostFilter = bToggle ?
+ psDevice->ui32HWPerfHostFilter ^ ui32Mask : ui32Mask;
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ // Log deferred events stats if filter changed from non-zero to zero
+ if ((ui32OldFilter != 0) && (psDevice->ui32HWPerfHostFilter == 0))
+ {
+ PVR_LOG(("HWPerfHost deferred events buffer high-watermark / size: (%u / %u)",
+ psDevice->ui32DEHighWatermark, HWPERF_HOST_MAX_DEFERRED_PACKETS));
+
+ PVR_LOG(("HWPerfHost deferred event retries: WaitForAtomicCtxPktHighWatermark(%u) "\
+ "WaitForRightOrdPktHighWatermark(%u)",
+ psDevice->ui32WaitForAtomicCtxPktHighWatermark,
+ psDevice->ui32WaitForRightOrdPktHighWatermark));
+ }
+#endif
+
+ OSLockRelease(psDevice->hLockHWPerfHostStream);
+
+#if defined(DEBUG)
+ if (bToggle)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost events (%x) have been TOGGLED",
+ ui32Mask));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost mask has been SET to (%x)",
+ ui32Mask));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXHWPerfCtrlClientBuffer(IMG_BOOL bToggle,
+ IMG_UINT32 ui32InfoPageIdx,
+ IMG_UINT32 ui32Mask)
+{
+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+
+ PVR_LOGR_IF_FALSE(ui32InfoPageIdx >= HWPERF_INFO_IDX_START &&
+ ui32InfoPageIdx < HWPERF_INFO_IDX_END, "invalid info"
+ " page index", PVRSRV_ERROR_INVALID_PARAMS);
+
+ OSLockAcquire(psData->hInfoPageLock);
+ psData->pui32InfoPage[ui32InfoPageIdx] = bToggle ?
+ psData->pui32InfoPage[ui32InfoPageIdx] ^ ui32Mask : ui32Mask;
+ OSLockRelease(psData->hInfoPageLock);
+
+#if defined(DEBUG)
+ if (bToggle)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) events (%x) have been TOGGLED",
+ ui32InfoPageIdx, ui32Mask));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfClient (%u) mask has been SET to (%x)",
+ ui32InfoPageIdx, ui32Mask));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 *pui32BvncKmFeatureFlags)
+{
+ PVR_LOGR_IF_FALSE((NULL != psDevInfo), "psDevInfo invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ *pui32BvncKmFeatureFlags = 0x0;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERFBUS))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERFBUS_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_S7_TOP_INFRASTRUCTURE_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_XT_TOP_INFRASTRUCTURE_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PERF_COUNTER_BATCH))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PERF_COUNTER_BATCH_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_ROGUEXE_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, DUST_POWER_ISLAND_S7))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_DUST_POWER_ISLAND_S7_FLAG;
+ }
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+ {
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_PBE2_IN_XE_FLAG;
+ }
+
+#ifdef SUPPORT_WORKLOAD_ESTIMATION
+ /* Not a part of BVNC feature line and so doesn't need the feature supported check */
+ *pui32BvncKmFeatureFlags |= RGX_HWPERF_FEATURE_WORKLOAD_ESTIMATION;
+#endif
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32BvncKmFeatureFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+
+ PVR_LOGR_IF_FALSE((NULL != psDeviceNode), "psConnection invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ psDevInfo = psDeviceNode->pvDevice;
+ eError = RGXServerFeatureFlagsToHWPerfFlags(psDevInfo, pui32BvncKmFeatureFlags);
+
+ return eError;
+}
+
+/*
+ PVRSRVRGXCtrlHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+ CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+ PVR_ASSERT(psDeviceNode);
+
+ if (eStreamId == RGX_HWPERF_STREAM_ID0_FW)
+ {
+ return RGXHWPerfCtrlFwBuffer(psDeviceNode, bToggle, ui64Mask);
+ }
+ else if (eStreamId == RGX_HWPERF_STREAM_ID1_HOST)
+ {
+ return RGXHWPerfCtrlHostBuffer(psDeviceNode, bToggle, (IMG_UINT32) ui64Mask);
+ }
+ else if (eStreamId == RGX_HWPERF_STREAM_ID2_CLIENT)
+ {
+ IMG_UINT32 ui32Index = (IMG_UINT32) (ui64Mask >> 32);
+ IMG_UINT32 ui32Mask = (IMG_UINT32) ui64Mask;
+
+ return RGXHWPerfCtrlClientBuffer(bToggle, ui32Index, ui32Mask);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXCtrlHWPerfKM: Unknown stream id."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_DPF_RETURN_OK;
+}
+
+/*
+ AppHint interfaces
+ */
+static
+PVRSRV_ERROR RGXHWPerfSetFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT64 ui64Value)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ psDevNode = psPVRSRVData->psDeviceNodeList;
+ /* Control HWPerf on all the devices */
+ while (psDevNode)
+ {
+ eError = RGXHWPerfCtrlFwBuffer(psDevNode, IMG_FALSE, ui64Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+ return eError;
+ }
+ psDevNode = psDevNode->psNext;
+ }
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadFwFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT64 *pui64Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Configuration command is applied for all devices, so filter value should
+ * be same for all */
+ psDevice = psDeviceNode->pvDevice;
+ *pui64Value = psDevice->ui64HWPerfFilter;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfSetHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ psDevNode = psPVRSRVData->psDeviceNodeList;
+ /* Control HWPerf on all the devices */
+ while (psDevNode)
+ {
+ eError = RGXHWPerfCtrlHostBuffer(psDevNode, IMG_FALSE, ui32Value);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set HWPerf firmware filter for device (%d)", psDevNode->sDevId.i32UMIdentifier));
+ return eError;
+ }
+ psDevNode = psDevNode->psNext;
+ }
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXHWPerfReadHostFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevice = psDeviceNode->pvDevice;
+ *pui32Value = psDevice->ui32HWPerfHostFilter;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReadClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivData,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_DATA *psData = PVRSRVGetPVRSRVData();
+ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+ PVR_UNREFERENCED_PARAMETER(psDevice);
+
+ OSLockAcquire(psData->hInfoPageLock);
+ *pui32Value = psData->pui32InfoPage[ui32Idx];
+ OSLockRelease(psData->hInfoPageLock);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _WriteClientFilter(const PVRSRV_DEVICE_NODE *psDevice,
+ const void *psPrivData,
+ IMG_UINT32 ui32Value)
+{
+ IMG_UINT32 ui32Idx = (IMG_UINT32) (uintptr_t) psPrivData;
+ PVR_UNREFERENCED_PARAMETER(psDevice);
+
+ return RGXHWPerfCtrlClientBuffer(IMG_FALSE, ui32Idx, ui32Value);
+}
+
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRVAppHintRegisterHandlersUINT64(APPHINT_ID_HWPerfFWFilter,
+ RGXHWPerfReadFwFilter,
+ RGXHWPerfSetFwFilter,
+ psDeviceNode,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfHostFilter,
+ RGXHWPerfReadHostFilter,
+ RGXHWPerfSetHostFilter,
+ psDeviceNode,
+ NULL);
+}
+
+void RGXHWPerfClientInitAppHintCallbacks(void)
+{
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Services,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_SERVICES_IDX);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_EGL,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_EGL_IDX);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenGLES,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_OPENGLES_IDX);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenCL,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_OPENCL_IDX);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_OpenRL,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_OPENRL_IDX);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_HWPerfClientFilter_Vulkan,
+ _ReadClientFilter,
+ _WriteClientFilter,
+ APPHINT_OF_DRIVER_NO_DEVICE,
+ (void *) HWPERF_FILTER_VULKAN_IDX);
+}
+
+/*
+ PVRSRVRGXEnableHWPerfCountersKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32ArrayLen,
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+ DEVMEM_MEMDESC* psFwBlkConfigsMemDesc;
+ RGX_HWPERF_CONFIG_CNTBLK* psFwArray;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+ PVR_ASSERT(ui32ArrayLen>0);
+ PVR_ASSERT(psBlockConfigs);
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS;
+ sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.ui32NumBlocks = ui32ArrayLen;
+
+ eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwHWPerfCountersConfigBlock",
+ &psFwBlkConfigsMemDesc);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+ RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfCfgEnableBlks.sBlockConfigs,
+ psFwBlkConfigsMemDesc, 0, 0);
+
+ eError = DevmemAcquireCpuVirtAddr(psFwBlkConfigsMemDesc, (void **)&psFwArray);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+ }
+
+ OSDeviceMemCopy(psFwArray, psBlockConfigs, sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen);
+ DevmemPDumpLoadMem(psFwBlkConfigsMemDesc,
+ 0,
+ sizeof(RGX_HWPERF_CONFIG_CNTBLK)*ui32ArrayLen,
+ 0);
+
+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM parameters set, calling FW"));*/
+
+ /* Ask the FW to carry out the HWPerf configuration command
+ */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+ }
+
+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM command scheduled for FW"));*/
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+ }
+
+ /* Release temporary memory used for block configuration
+ */
+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+ /*PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXConfigEnableHWPerfCountersKM firmware completed"));*/
+
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks configured and ENABLED", ui32ArrayLen));
+
+ PVR_DPF_RETURN_OK;
+
+ fail2:
+ DevmemReleaseCpuVirtAddr(psFwBlkConfigsMemDesc);
+ fail1:
+ RGXUnsetFirmwareAddress(psFwBlkConfigsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwBlkConfigsMemDesc);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+
+/*
+ PVRSRVRGXConfigCustomCountersReadingHWPerfKM
+ */
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 * pui32CustomCounterIDs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+ DEVMEM_MEMDESC* psFwSelectCntrsMemDesc = NULL;
+ IMG_UINT32* psFwArray;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "PVRSRVRGXSelectCustomCountersKM: configure block %u to read %u counters", ui16CustomBlockID, ui16NumCustomCounters));
+
+ /* Fill in the command structure with the parameters needed */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_SELECT_CUSTOM_CNTRS;
+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16NumCounters = ui16NumCustomCounters;
+ sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.ui16CustomBlock = ui16CustomBlockID;
+
+ if (ui16NumCustomCounters > 0)
+ {
+ PVR_ASSERT(pui32CustomCounterIDs);
+
+ eError = DevmemFwAllocate(psDeviceNode->pvDevice,
+ sizeof(IMG_UINT32) * ui16NumCustomCounters,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwHWPerfConfigCustomCounters",
+ &psFwSelectCntrsMemDesc);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "DevmemFwAllocate");
+
+ RGXSetFirmwareAddress(&sKccbCmd.uCmdData.sHWPerfSelectCstmCntrs.sCustomCounterIDs,
+ psFwSelectCntrsMemDesc, 0, 0);
+
+ eError = DevmemAcquireCpuVirtAddr(psFwSelectCntrsMemDesc, (void **)&psFwArray);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", fail1);
+ }
+
+ OSDeviceMemCopy(psFwArray, pui32CustomCounterIDs, sizeof(IMG_UINT32) * ui16NumCustomCounters);
+ DevmemPDumpLoadMem(psFwSelectCntrsMemDesc,
+ 0,
+ sizeof(IMG_UINT32) * ui16NumCustomCounters,
+ 0);
+ }
+
+ /* Push in the KCCB the command to configure the custom counters block */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXScheduleCommand", fail2);
+ }
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: Command scheduled"));
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "RGXWaitForFWOp", fail2);
+ }
+ PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXSelectCustomCountersKM: FW operation completed"));
+
+ if (ui16NumCustomCounters > 0)
+ {
+ /* Release temporary memory used for block configuration */
+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+ DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "HWPerf custom counters %u reading will be sent with the next HW events", ui16NumCustomCounters));
+
+ PVR_DPF_RETURN_OK;
+
+ fail2:
+ if (psFwSelectCntrsMemDesc) DevmemReleaseCpuVirtAddr(psFwSelectCntrsMemDesc);
+
+ fail1:
+ if (psFwSelectCntrsMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psFwSelectCntrsMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psFwSelectCntrsMemDesc);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+/*
+ PVRSRVRGXDisableHWPerfcountersKM
+ */
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_BOOL bEnable,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT16 * psBlockIDs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDeviceNode);
+ PVR_ASSERT(ui32ArrayLen>0);
+ PVR_ASSERT(ui32ArrayLen<=RGXFWIF_HWPERF_CTRL_BLKS_MAX);
+ PVR_ASSERT(psBlockIDs);
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CTRL_BLKS;
+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.bEnable = bEnable;
+ sKccbCmd.uCmdData.sHWPerfCtrlBlks.ui32NumBlocks = ui32ArrayLen;
+ OSDeviceMemCopy(sKccbCmd.uCmdData.sHWPerfCtrlBlks.aeBlockIDs, psBlockIDs, sizeof(IMG_UINT16)*ui32ArrayLen);
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM parameters set, calling FW")); */
+
+ /* Ask the FW to carry out the HWPerf configuration command
+ */
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP, &sKccbCmd, sizeof(sKccbCmd), 0, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "RGXScheduleCommand");
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM command scheduled for FW")); */
+
+ /* Wait for FW to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ PVR_LOGR_IF_ERROR(eError, "RGXWaitForFWOp");
+
+ /* PVR_DPF((PVR_DBG_VERBOSE, "PVRSRVRGXCtrlHWPerfCountersKM firmware completed")); */
+
+#if defined(DEBUG)
+ if (bEnable)
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been ENABLED", ui32ArrayLen));
+ else
+ PVR_DPF((PVR_DBG_WARNING, "HWPerf %d counter blocks have been DISABLED", ui32ArrayLen));
+#endif
+
+ PVR_DPF_RETURN_OK;
+}
+
+static INLINE IMG_UINT32 _RGXHWPerfFixBufferSize(IMG_UINT32 ui32BufSizeKB)
+{
+ if (ui32BufSizeKB > HWPERF_HOST_TL_STREAM_SIZE_MAX)
+ {
+ /* Size specified as a AppHint but it is too big */
+ PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+ "value (%u) too big, using maximum (%u)", ui32BufSizeKB,
+ HWPERF_HOST_TL_STREAM_SIZE_MAX));
+ return HWPERF_HOST_TL_STREAM_SIZE_MAX<<10;
+ }
+ else if (ui32BufSizeKB >= HWPERF_HOST_TL_STREAM_SIZE_MIN)
+ {
+ return ui32BufSizeKB<<10;
+ }
+ else if (ui32BufSizeKB > 0)
+ {
+ /* Size specified as a AppHint but it is too small */
+ PVR_DPF((PVR_DBG_WARNING,"RGXHWPerfHostInit: HWPerf Host buffer size "
+ "value (%u) too small, using minimum (%u)", ui32BufSizeKB,
+ HWPERF_HOST_TL_STREAM_SIZE_MIN));
+ return HWPERF_HOST_TL_STREAM_SIZE_MIN<<10;
+ }
+ else
+ {
+ /* 0 size implies AppHint not set or is set to zero,
+ * use default size from driver constant. */
+ return HWPERF_HOST_TL_STREAM_SIZE_DEFAULT<<10;
+ }
+}
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfHostInit
+
+@Description Called during driver init for initialisation of HWPerfHost
+ stream in the Rogue device driver. This function keeps allocated
+ only the minimal necessary resources, which are required for
+ functioning of HWPerf server module.
+
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB)
+{
+ PVRSRV_ERROR eError;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ PVR_ASSERT(psRgxDevInfo != NULL);
+
+ eError = OSLockCreate(&psRgxDevInfo->hLockHWPerfHostStream, LOCK_TYPE_PASSIVE);
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", error);
+
+ psRgxDevInfo->hHWPerfHostStream = NULL;
+ psRgxDevInfo->ui32HWPerfHostFilter = 0; /* disable all events */
+ psRgxDevInfo->ui32HWPerfHostNextOrdinal = 1;
+ psRgxDevInfo->ui32HWPerfHostBufSize = _RGXHWPerfFixBufferSize(ui32BufSizeKB);
+ psRgxDevInfo->pvHostHWPerfMISR = NULL;
+ psRgxDevInfo->pui8DeferredEvents = NULL;
+ /* First packet has ordinal=1, so LastOrdinal=0 will ensure ordering logic
+ * is maintained */
+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = 0;
+ psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+ error:
+ return eError;
+}
+
+static void _HWPerfHostOnConnectCB(void *pvArg)
+{
+ RGX_HWPERF_HOST_CLK_SYNC(pvArg);
+}
+
+/* Avoiding a holder struct using fields below, as a struct gets along padding,
+ * packing, and other compiler dependencies, and we want a continuous stream of
+ * bytes for (header+data) for use in TLStreamWrite. See
+ * _HWPerfHostDeferredEventsEmitter().
+ *
+ * A deferred (UFO) packet is represented in memory as:
+ * - IMG_BOOL --> Indicates whether a packet write is
+ * "complete" by atomic context or not.
+ * - RGX_HWPERF_V2_PACKET_HDR --.
+ * |--> Fed together to TLStreamWrite for
+ * | deferred packet to be written to
+ * | HWPerfHost buffer
+ * - RGX_HWPERF_UFO_DATA--------`
+ *
+ * PS: Currently only UFO events are supported in deferred list */
+#define HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE (sizeof(IMG_BOOL) +\
+ sizeof(RGX_HWPERF_V2_PACKET_HDR) +\
+ sizeof(RGX_HWPERF_UFO_DATA))
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData);
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 ui32MaxOrdinal);
+
+/*************************************************************************/ /*!
+@Function RGXHWPerfHostInitOnDemandResources
+
+@Description This function allocates the HWPerfHost buffer if HWPerf is
+ enabled at driver load time. Otherwise, these buffers are
+ allocated on-demand as and when required.
+
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVRSRV_ERROR eError;
+ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5]; /* 5 makes space up to "hwperf_host_9999" streams */
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d",
+ __FUNCTION__,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = TLStreamCreate(&psRgxDevInfo->hHWPerfHostStream,
+ psRgxDevInfo->psDeviceNode,
+ pszHWPerfHostStreamName, psRgxDevInfo->ui32HWPerfHostBufSize,
+ TL_OPMODE_DROP_NEWER,
+ _HWPerfHostOnConnectCB, psRgxDevInfo,
+ NULL, NULL);
+ PVR_LOGR_IF_ERROR(eError, "TLStreamCreate");
+
+ eError = TLStreamSetNotifStream(psRgxDevInfo->hHWPerfHostStream,
+ PVRSRVGetPVRSRVData()->hTLCtrlStream);
+ /* we can still discover host stream so leave it as is and just log error */
+ PVR_LOG_IF_ERROR(eError, "TLStreamSetNotifStream");
+
+ /* send the event here because host stream is implicitly opened for write
+ * in TLStreamCreate and TLStreamOpen is never called (so the event is
+ * never emitted) */
+ eError = TLStreamMarkStreamOpen(psRgxDevInfo->hHWPerfHostStream);
+ PVR_LOG_IF_ERROR(eError, "TLStreamMarkStreamOpen");
+
+ /* HWPerfHost deferred events specific initialization */
+ eError = OSInstallMISR(&psRgxDevInfo->pvHostHWPerfMISR,
+ RGX_MISRHandler_HWPerfPostDeferredHostEvents, psRgxDevInfo);
+ PVR_LOGG_IF_ERROR(eError, "OSInstallMISR", err_install_misr);
+
+ eError = OSSpinLockCreate(&psRgxDevInfo->hHWPerfHostSpinLock);
+ PVR_LOGG_IF_ERROR(eError, "OSSpinLockCreate", err_spinlock_create);
+
+ psRgxDevInfo->pui8DeferredEvents = OSAllocMem(HWPERF_HOST_MAX_DEFERRED_PACKETS
+ * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE);
+ if(NULL == psRgxDevInfo->pui8DeferredEvents)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OUT OF MEMORY. Could not allocate memory for "\
+ "HWPerfHost deferred events array", __func__));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_alloc_deferred_events;
+ }
+ psRgxDevInfo->ui16DEReadIdx = 0;
+ psRgxDevInfo->ui16DEWriteIdx = 0;
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ psRgxDevInfo->ui32DEHighWatermark = 0;
+ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = 0;
+ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = 0;
+#endif
+
+ PVR_DPF((DBGPRIV_MESSAGE, "HWPerf Host buffer size is %uKB",
+ psRgxDevInfo->ui32HWPerfHostBufSize));
+
+ return PVRSRV_OK;
+
+ err_alloc_deferred_events:
+ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+ psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+
+ err_spinlock_create:
+ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+ psRgxDevInfo->pvHostHWPerfMISR = NULL;
+
+ err_install_misr:
+ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+ TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+ psRgxDevInfo->hHWPerfHostStream = NULL;
+
+ return eError;
+}
+
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ PVR_ASSERT (psRgxDevInfo);
+
+ if (psRgxDevInfo->pui8DeferredEvents)
+ {
+ OSFreeMem(psRgxDevInfo->pui8DeferredEvents);
+ psRgxDevInfo->pui8DeferredEvents = NULL;
+ }
+
+ if (psRgxDevInfo->hHWPerfHostSpinLock)
+ {
+ OSSpinLockDestroy(psRgxDevInfo->hHWPerfHostSpinLock);
+ psRgxDevInfo->hHWPerfHostSpinLock = NULL;
+ }
+
+ if (psRgxDevInfo->pvHostHWPerfMISR)
+ {
+ (void) OSUninstallMISR(psRgxDevInfo->pvHostHWPerfMISR);
+ psRgxDevInfo->pvHostHWPerfMISR = NULL;
+ }
+
+ if (psRgxDevInfo->hHWPerfHostStream)
+ {
+ /* send the event here because host stream is implicitly opened for
+ * write in TLStreamCreate and TLStreamClose is never called (so the
+ * event is never emitted) */
+ TLStreamMarkStreamClose(psRgxDevInfo->hHWPerfHostStream);
+ TLStreamClose(psRgxDevInfo->hHWPerfHostStream);
+ psRgxDevInfo->hHWPerfHostStream = NULL;
+ }
+
+ if (psRgxDevInfo->hLockHWPerfHostStream)
+ {
+ OSLockDestroy(psRgxDevInfo->hLockHWPerfHostStream);
+ psRgxDevInfo->hLockHWPerfHostStream = NULL;
+ }
+}
+
+inline void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Filter)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+ psRgxDevInfo->ui32HWPerfHostFilter = ui32Filter;
+}
+
+inline IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent)
+{
+ PVR_ASSERT(psRgxDevInfo);
+ return (psRgxDevInfo->ui32HWPerfHostFilter & RGX_HWPERF_EVENT_MASK_VALUE(eEvent)) ? IMG_TRUE : IMG_FALSE;
+}
+
+#define MAX_RETRY_COUNT 80
+static inline void _PostFunctionPrologue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 ui32CurrentOrdinal)
+{
+ IMG_UINT32 ui32Retry = MAX_RETRY_COUNT;
+
+ PVR_ASSERT(psRgxDevInfo->hLockHWPerfHostStream != NULL);
+ PVR_ASSERT(psRgxDevInfo->hHWPerfHostStream != NULL);
+
+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+ /* First, flush pending events (if any) */
+ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, ui32CurrentOrdinal);
+
+ while((ui32CurrentOrdinal != psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)
+ && (--ui32Retry != 0))
+ {
+ /* Release lock and give a chance to a waiting context to emit the
+ * expected packet */
+ OSLockRelease (psRgxDevInfo->hLockHWPerfHostStream);
+ OSSleepms(100);
+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+ }
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ if ((ui32Retry == 0) && !(psRgxDevInfo->bWarnedPktOrdinalBroke))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Will warn only once! Potential packet(s) lost after ordinal"\
+ " %u (Current ordinal = %u)", __FUNCTION__,
+ psRgxDevInfo->ui32HWPerfHostLastOrdinal, ui32CurrentOrdinal));
+ psRgxDevInfo->bWarnedPktOrdinalBroke = IMG_TRUE;
+ }
+
+ if (psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+ {
+ psRgxDevInfo->ui32WaitForRightOrdPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+ }
+#endif
+}
+
+static inline void _PostFunctionEpilogue(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 ui32CurrentOrdinal)
+{
+ /* update last ordinal emitted */
+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = ui32CurrentOrdinal;
+
+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+static inline IMG_UINT8 *_ReserveHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+ IMG_UINT8 *pui8Dest;
+
+ PVRSRV_ERROR eError = TLStreamReserve(psRgxDevInfo->hHWPerfHostStream,
+ &pui8Dest, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not reserve space in %s buffer"
+ " (%d). Dropping packet.",
+ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+ return NULL;
+ }
+ PVR_ASSERT(pui8Dest != NULL);
+
+ return pui8Dest;
+}
+
+static inline void _CommitHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError = TLStreamCommit(psRgxDevInfo->hHWPerfHostStream,
+ ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not commit data to %s"
+ " (%d)", __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+ }
+}
+
+/* Returns IMG_TRUE if packet write passes, IMG_FALSE otherwise */
+static inline IMG_BOOL _WriteHWPerfStream(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR *psHeader)
+{
+ PVRSRV_ERROR eError = TLStreamWrite(psRgxDevInfo->hHWPerfHostStream,
+ (IMG_UINT8*) psHeader, psHeader->ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Could not write packet in %s buffer"
+ " (%d). Dropping packet.",
+ __func__, PVRSRV_TL_HWPERF_HOST_SERVER_STREAM, eError));
+ }
+
+ /* Regardless of whether write passed/failed, we consider it "written" */
+ psRgxDevInfo->ui32HWPerfHostLastOrdinal = psHeader->ui32Ordinal;
+
+ return (eError == PVRSRV_OK);
+}
+
+/* Helper macros for deferred events operations */
+#define GET_DE_NEXT_IDX(_curridx) ((_curridx + 1) % HWPERF_HOST_MAX_DEFERRED_PACKETS)
+#define GET_DE_EVENT_BASE(_idx) (psRgxDevInfo->pui8DeferredEvents +\
+ _idx * HWPERF_HOST_DEFERRED_UFO_PACKET_SIZE)
+
+#define GET_DE_EVENT_WRITE_STATUS(_base) ((IMG_BOOL*) _base)
+#define GET_DE_EVENT_DATA(_base) (_base + sizeof(IMG_BOOL))
+
+/* Emits HWPerfHost event packets present in the deferred list stopping when one
+ * of the following cases is hit:
+ * case 1: Packet ordering breaks i.e. a packet found doesn't meet ordering
+ * criteria (ordinal == last_ordinal + 1)
+ *
+ * case 2: A packet with ordinal > ui32MaxOrdinal is found
+ *
+ * case 3: Deferred list's (read == write) i.e. no more deferred packets.
+ *
+ * NOTE: Caller must possess the hLockHWPerfHostStream lock before calling
+ * this function.*/
+static void _HWPerfHostDeferredEventsEmitter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 ui32MaxOrdinal)
+{
+ RGX_HWPERF_V2_PACKET_HDR *psHeader;
+ IMG_UINT32 ui32Retry;
+ IMG_UINT8 *pui8DeferredEvent;
+ IMG_BOOL *pbPacketWritten;
+ IMG_BOOL bWritePassed;
+
+ PVR_ASSERT(OSLockIsLocked(psRgxDevInfo->hLockHWPerfHostStream));
+
+ while (psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx)
+ {
+ pui8DeferredEvent = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEReadIdx);
+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8DeferredEvent);
+ psHeader = (RGX_HWPERF_V2_PACKET_HDR*) GET_DE_EVENT_DATA(pui8DeferredEvent);
+
+ for(ui32Retry = MAX_RETRY_COUNT; !(*pbPacketWritten) && (ui32Retry != 0); ui32Retry--)
+ {
+ /* Packet not yet written, re-check after a while. Wait for a short period as
+ * atomic contexts are generally expected to finish fast */
+ OSWaitus(10);
+ }
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ if((ui32Retry == 0) && !(psRgxDevInfo->bWarnedAtomicCtxPktLost))
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Will warn only once. Dropping a deferred packet as atomic context"\
+ " took too long to write it", __FUNCTION__));
+ psRgxDevInfo->bWarnedAtomicCtxPktLost = IMG_TRUE;
+ }
+
+ if (psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark < (MAX_RETRY_COUNT - ui32Retry))
+ {
+ psRgxDevInfo->ui32WaitForAtomicCtxPktHighWatermark = MAX_RETRY_COUNT - ui32Retry;
+ }
+#endif
+
+ if (*pbPacketWritten)
+ {
+ if ((psHeader->ui32Ordinal > ui32MaxOrdinal) ||
+ (psHeader->ui32Ordinal != (psRgxDevInfo->ui32HWPerfHostLastOrdinal + 1)))
+ {
+ /* Leave remaining events to be emitted by next call to this function */
+ break;
+ }
+ bWritePassed = _WriteHWPerfStream(psRgxDevInfo, psHeader);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Atomic context packet lost!", __func__));
+ bWritePassed = IMG_FALSE;
+ }
+
+ /* Move on to next packet */
+ psRgxDevInfo->ui16DEReadIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEReadIdx);
+
+ if (!bWritePassed // if write failed
+ && ui32MaxOrdinal == IMG_UINT32_MAX // and we are from MISR
+ && psRgxDevInfo->ui16DEReadIdx != psRgxDevInfo->ui16DEWriteIdx) // and there are more events
+ {
+ /* Stop emitting here and re-schedule MISR */
+ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+ break;
+ }
+ }
+}
+
+static void RGX_MISRHandler_HWPerfPostDeferredHostEvents(void *pvData)
+{
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo = pvData;
+
+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+
+ /* Since we're called from MISR, there is no upper cap of ordinal to be emitted.
+ * Send IMG_UINT32_MAX to signify all possible packets. */
+ _HWPerfHostDeferredEventsEmitter(psRgxDevInfo, IMG_UINT32_MAX);
+
+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+}
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+static inline void _UpdateDEBufferHighWatermark(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ IMG_UINT32 ui32DEWatermark;
+ IMG_UINT16 ui16LRead = psRgxDevInfo->ui16DEReadIdx;
+ IMG_UINT16 ui16LWrite = psRgxDevInfo->ui16DEWriteIdx;
+
+ if (ui16LWrite >= ui16LRead)
+ {
+ ui32DEWatermark = ui16LWrite - ui16LRead;
+ }
+ else
+ {
+ ui32DEWatermark = (HWPERF_HOST_MAX_DEFERRED_PACKETS - ui16LRead) + (ui16LWrite);
+ }
+
+ if (ui32DEWatermark > psRgxDevInfo->ui32DEHighWatermark)
+ {
+ psRgxDevInfo->ui32DEHighWatermark = ui32DEWatermark;
+ }
+}
+#endif
+
+/* @Description Gets the data/members that concerns the accuracy of a packet in HWPerfHost
+ buffer. Since the data returned by this function is required in both, an
+ atomic as well as a process/sleepable context, it is protected under spinlock
+
+ @Ouput pui32Ordinal Pointer to ordinal number assigned to this packet
+ @Output pui64Timestamp Timestamp value for this packet
+ @Output ppui8Dest If the current context cannot sleep, pointer to a place in
+ deferred events buffer where the packet data should be written.
+ Don't care, otherwise.
+ */
+static void _GetHWPerfHostPacketSpecifics(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 *pui32Ordinal,
+ IMG_UINT64 *pui64Timestamp,
+ IMG_UINT8 **ppui8Dest,
+ IMG_BOOL bSleepAllowed)
+{
+ IMG_UINT64 ui64SpinLockFlags;
+
+ /* Spin lock is required to avoid getting scheduled out by a higher priority
+ * context while we're getting header specific details and packet place in
+ * HWPerf buffer (when in atomic context) for ourselves */
+ OSSpinLockAcquire(psRgxDevInfo->hHWPerfHostSpinLock, &ui64SpinLockFlags);
+
+ *pui32Ordinal = psRgxDevInfo->ui32HWPerfHostNextOrdinal++;
+ *pui64Timestamp = RGXTimeCorrGetClockus64();
+
+ if (!bSleepAllowed)
+ {
+ /* We're in an atomic context. So return the next position available in
+ * deferred events buffer */
+ IMG_UINT16 ui16NewWriteIdx;
+ IMG_BOOL *pbPacketWritten;
+
+ PVR_ASSERT(ppui8Dest != NULL);
+
+ ui16NewWriteIdx = GET_DE_NEXT_IDX(psRgxDevInfo->ui16DEWriteIdx);
+ if (ui16NewWriteIdx == psRgxDevInfo->ui16DEReadIdx)
+ {
+ /* This shouldn't happen. HWPERF_HOST_MAX_DEFERRED_PACKETS should be
+ * big enough to avoid any such scenario */
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ /* PVR_LOG/printk isn't recommended in atomic context. Perhaps we'll do
+ * this debug output here when trace_printk support is added to DDK */
+ // PVR_LOG(("%s: No more space in deferred events buffer (%u/%u) W=%u,R=%u",
+ // __FUNCTION__, psRgxDevInfo->ui32DEHighWatermark,
+ // HWPERF_HOST_MAX_DEFERRED_PACKETS, psRgxDevInfo->ui16DEWriteIdx,
+ // psRgxDevInfo->ui16DEReadIdx));
+#endif
+ *ppui8Dest = NULL;
+ }
+ else
+ {
+ /* Return the position where deferred event would be written */
+ *ppui8Dest = GET_DE_EVENT_BASE(psRgxDevInfo->ui16DEWriteIdx);
+
+ /* Make sure packet write "state" is "write-pending" _before_ moving write
+ * pointer forward */
+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(*ppui8Dest);
+ *pbPacketWritten = IMG_FALSE;
+
+ psRgxDevInfo->ui16DEWriteIdx = ui16NewWriteIdx;
+
+#if defined (PVRSRV_HWPERF_HOST_DEBUG_DEFERRED_EVENTS)
+ _UpdateDEBufferHighWatermark(psRgxDevInfo);
+#endif
+ }
+ }
+
+ OSSpinLockRelease(psRgxDevInfo->hHWPerfHostSpinLock, ui64SpinLockFlags);
+}
+
+static inline void _SetupHostPacketHeader(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_EVENT_TYPE eEvType,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32Ordinal,
+ IMG_UINT64 ui64Timestamp)
+{
+ RGX_HWPERF_V2_PACKET_HDR *psHeader = (RGX_HWPERF_V2_PACKET_HDR *) pui8Dest;
+
+ PVR_ASSERT(ui32Size<=RGX_HWPERF_MAX_PACKET_SIZE);
+
+ psHeader->ui32Ordinal = ui32Ordinal;
+ psHeader->ui64Timestamp = ui64Timestamp;
+ psHeader->ui32Sig = HWPERF_PACKET_V2B_SIG;
+ psHeader->eTypeId = RGX_HWPERF_MAKE_TYPEID(RGX_HWPERF_STREAM_ID1_HOST,
+ eEvType, 0, 0);
+ psHeader->ui32Size = ui32Size;
+}
+
+static inline void _SetupHostEnqPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT64 ui64CheckFenceUID,
+ IMG_UINT64 ui64UpdateFenceUID,
+ IMG_UINT64 ui64DeadlineInus,
+ IMG_UINT64 ui64CycleEstimate)
+{
+ RGX_HWPERF_HOST_ENQ_DATA *psData = (RGX_HWPERF_HOST_ENQ_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ psData->ui32EnqType = eEnqType;
+ psData->ui32PID = ui32Pid;
+ psData->ui32ExtJobRef = ui32ExtJobRef;
+ psData->ui32IntJobRef = ui32IntJobRef;
+ psData->ui32DMContext = ui32FWDMContext;
+ psData->ui32Padding = 0; /* Set to zero for future compatibility */
+ psData->ui64CheckFence_UID = ui64CheckFenceUID;
+ psData->ui64UpdateFence_UID = ui64UpdateFenceUID;
+ psData->ui64DeadlineInus = ui64DeadlineInus;
+ psData->ui64CycleEstimate = ui64CycleEstimate;
+}
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT64 ui64CheckFenceUID,
+ IMG_UINT64 ui64UpdateFenceUID,
+ IMG_UINT64 ui64DeadlineInus,
+ IMG_UINT64 ui64CycleEstimate )
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_ENQ_DATA);
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT64 ui64Timestamp;
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ NULL, IMG_TRUE);
+
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ENQ, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+ _SetupHostEnqPacketData(pui8Dest,
+ eEnqType,
+ ui32Pid,
+ ui32FWDMContext,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui64CheckFenceUID,
+ ui64UpdateFenceUID,
+ ui64DeadlineInus,
+ ui64CycleEstimate);
+
+ _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+ cleanup:
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _CalculateHostUfoPacketSize(RGX_HWPERF_UFO_EV eUfoType)
+{
+ IMG_UINT32 ui32Size =
+ (IMG_UINT32) offsetof(RGX_HWPERF_UFO_DATA, aui32StreamData);
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+ switch (eUfoType)
+ {
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ ui32Size += sizeof(puData->sCheckSuccess);
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ ui32Size += sizeof(puData->sCheckFail);
+ break;
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ ui32Size += sizeof(puData->sUpdate);
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostUfoPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData)
+{
+ RGX_HWPERF_HOST_UFO_DATA *psData = (RGX_HWPERF_HOST_UFO_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ psData->aui32StreamData;
+
+ psData->eEvType = eUfoType;
+ /* HWPerfHost always emits 1 UFO at a time, since each UFO has 1-to-1 mapping
+ * with an underlying DevNode, and each DevNode has a dedicated HWPerf buffer */
+ psData->ui32StreamInfo = RGX_HWPERF_MAKE_UFOPKTINFO(1,
+ offsetof(RGX_HWPERF_HOST_UFO_DATA, aui32StreamData));
+
+ switch (eUfoType)
+ {
+ case RGX_HWPERF_UFO_EV_CHECK_SUCCESS:
+ case RGX_HWPERF_UFO_EV_PRCHECK_SUCCESS:
+ puData->sCheckSuccess.ui32FWAddr =
+ psUFOData->sCheckSuccess.ui32FWAddr;
+ puData->sCheckSuccess.ui32Value =
+ psUFOData->sCheckSuccess.ui32Value;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sCheckSuccess));
+ break;
+ case RGX_HWPERF_UFO_EV_CHECK_FAIL:
+ case RGX_HWPERF_UFO_EV_PRCHECK_FAIL:
+ puData->sCheckFail.ui32FWAddr =
+ psUFOData->sCheckFail.ui32FWAddr;
+ puData->sCheckFail.ui32Value =
+ psUFOData->sCheckFail.ui32Value;
+ puData->sCheckFail.ui32Required =
+ psUFOData->sCheckFail.ui32Required;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sCheckFail));
+ break;
+ case RGX_HWPERF_UFO_EV_UPDATE:
+ puData->sUpdate.ui32FWAddr =
+ psUFOData->sUpdate.ui32FWAddr;
+ puData->sUpdate.ui32OldValue =
+ psUFOData->sUpdate.ui32OldValue;
+ puData->sUpdate.ui32NewValue =
+ psUFOData->sUpdate.ui32NewValue;
+
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *)
+ (((IMG_BYTE *) puData) + sizeof(puData->sUpdate));
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfHostPostUfoEvent: Invalid UFO"
+ " event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+}
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+ const IMG_BOOL bSleepAllowed)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = _CalculateHostUfoPacketSize(eUfoType);
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT64 ui64Timestamp;
+ IMG_BOOL *pbPacketWritten = NULL;
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ &pui8Dest, bSleepAllowed);
+
+ if (bSleepAllowed)
+ {
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+ }
+ else
+ {
+ if(pui8Dest == NULL)
+ {
+ // Give-up if we couldn't get a place in deferred events buffer
+ goto cleanup;
+ }
+ pbPacketWritten = GET_DE_EVENT_WRITE_STATUS(pui8Dest);
+ pui8Dest = GET_DE_EVENT_DATA(pui8Dest);
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_UFO, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+ _SetupHostUfoPacketData(pui8Dest, eUfoType, psUFOData);
+
+ if (bSleepAllowed)
+ {
+ _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+ }
+ else
+ {
+ *pbPacketWritten = IMG_TRUE;
+ OSScheduleMISR(psRgxDevInfo->pvHostHWPerfMISR);
+ }
+
+ cleanup:
+ if (bSleepAllowed)
+ {
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+ }
+}
+
+#define UNKNOWN_SYNC_NAME "UnknownSync"
+
+static inline IMG_UINT32 _FixNameAndCalculateHostAllocPacketSize(
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ const IMG_CHAR **ppsName,
+ IMG_UINT32 *ui32NameSize)
+{
+ RGX_HWPERF_HOST_ALLOC_DATA *psData;
+ IMG_UINT32 ui32Size = offsetof(RGX_HWPERF_HOST_ALLOC_DATA, uAllocDetail);
+
+ if (*ppsName != NULL && *ui32NameSize > 0)
+ {
+ /* if string longer than maximum cut it (leave space for '\0') */
+ if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ *ui32NameSize = SYNC_MAX_CLASS_NAME_LEN;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostAllocEvent: Invalid"
+ " resource name given."));
+ *ppsName = UNKNOWN_SYNC_NAME;
+ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME);
+ }
+
+ switch (eAllocType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+ ui32Size += sizeof(psData->uAllocDetail.sSyncAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+ ui32Size += sizeof(psData->uAllocDetail.sTimelineAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+ ui32Size += sizeof(psData->uAllocDetail.sFenceAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP:
+ ui32Size += sizeof(psData->uAllocDetail.sSyncCheckPointAlloc) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize;
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostAllocPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ RGX_HWPERF_HOST_ALLOC_DATA *psData = (RGX_HWPERF_HOST_ALLOC_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+ IMG_CHAR *acName = NULL;
+
+ psData->ui32AllocType = eAllocType;
+
+ switch (eAllocType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+ psData->uAllocDetail.sSyncAlloc.ui32FWAddr = ui32FWAddr;
+ acName = psData->uAllocDetail.sSyncAlloc.acName;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+ psData->uAllocDetail.sTimelineAlloc.ui64Timeline_UID1 = ui64UID;
+ psData->uAllocDetail.sTimelineAlloc.uiPid = ui32PID;
+ acName = psData->uAllocDetail.sTimelineAlloc.acName;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+ psData->uAllocDetail.sFenceAlloc.ui64Fence_UID = ui64UID;
+ psData->uAllocDetail.sFenceAlloc.ui32CheckPt_FWAddr = ui32FWAddr;
+ acName = psData->uAllocDetail.sFenceAlloc.acName;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP:
+ psData->uAllocDetail.sSyncCheckPointAlloc.ui64Timeline_UID = ui64UID;
+ psData->uAllocDetail.sSyncCheckPointAlloc.ui32CheckPt_FWAddr = ui32FWAddr;
+ acName = psData->uAllocDetail.sSyncCheckPointAlloc.acName;
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXHWPerfHostPostAllocEvent: Invalid alloc event type"));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+
+ if (ui32NameSize)
+ {
+ OSStringLCopy(acName, psName, ui32NameSize);
+ }
+ else
+ {
+ /* In case no name was given make sure we don't access random memory */
+ acName[0] = '\0';
+ }
+}
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO* psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT64 ui64Timestamp;
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT32 ui32Size = _FixNameAndCalculateHostAllocPacketSize(eAllocType,
+ &psName,
+ &ui32NameSize);
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ NULL, IMG_TRUE);
+
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_ALLOC, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+
+ _SetupHostAllocPacketData(pui8Dest,
+ eAllocType,
+ ui64UID,
+ ui32PID,
+ ui32FWAddr,
+ psName,
+ ui32NameSize);
+
+ _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+ cleanup:
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostFreePacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr)
+{
+ RGX_HWPERF_HOST_FREE_DATA *psData = (RGX_HWPERF_HOST_FREE_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+ psData->ui32FreeType = eFreeType;
+
+ switch (eFreeType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNC:
+ psData->uFreeDetail.sSyncFree.ui32FWAddr = ui32FWAddr;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_TIMELINE:
+ psData->uFreeDetail.sTimelineDestroy.ui64Timeline_UID1 = ui64UID;
+ psData->uFreeDetail.sTimelineDestroy.uiPid = ui32PID;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+ psData->uFreeDetail.sFenceDestroy.ui64Fence_UID = ui64UID;
+ break;
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_SYNCCP:
+ psData->uFreeDetail.sSyncCheckPointFree.ui32CheckPt_FWAddr = ui32FWAddr;
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXHWPerfHostPostFreeEvent: Invalid free event type"));
+ PVR_ASSERT(IMG_FALSE);
+ }
+}
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size = RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_FREE_DATA);
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT64 ui64Timestamp;
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ NULL, IMG_TRUE);
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_FREE, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+ _SetupHostFreePacketData(pui8Dest,
+ eFreeType,
+ ui64UID,
+ ui32PID,
+ ui32FWAddr);
+
+ _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+ cleanup:
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline IMG_UINT32 _FixNameAndCalculateHostModifyPacketSize(
+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+ const IMG_CHAR **ppsName,
+ IMG_UINT32 *ui32NameSize)
+{
+ RGX_HWPERF_HOST_MODIFY_DATA *psData;
+ RGX_HWPERF_HOST_MODIFY_DETAIL *puData;
+ IMG_UINT32 ui32Size = sizeof(psData->ui32ModifyType);
+
+ if (*ppsName != NULL && *ui32NameSize > 0)
+ {
+ /* first strip the terminator */
+ if ((*ppsName)[*ui32NameSize - 1] == '\0')
+ *ui32NameSize -= 1;
+ /* if string longer than maximum cut it (leave space for '\0') */
+ if (*ui32NameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ *ui32NameSize = SYNC_MAX_CLASS_NAME_LEN - 1;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfHostPostModifyEvent: Invalid"
+ " resource name given."));
+ *ppsName = UNKNOWN_SYNC_NAME;
+ *ui32NameSize = sizeof(UNKNOWN_SYNC_NAME) - 1;
+ }
+
+ switch (eModifyType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+ ui32Size += sizeof(puData->sFenceMerge) - SYNC_MAX_CLASS_NAME_LEN +
+ *ui32NameSize + 1; /* +1 for '\0' */
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+ PVR_ASSERT(IMG_FALSE);
+ break;
+ }
+
+ return RGX_HWPERF_MAKE_SIZE_VARIABLE(ui32Size);
+}
+
+static inline void _SetupHostModifyPacketData(IMG_UINT8 *pui8Dest,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+ IMG_UINT64 ui64NewUID,
+ IMG_UINT64 ui64UID1,
+ IMG_UINT64 ui64UID2,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ RGX_HWPERF_HOST_MODIFY_DATA *psData = (RGX_HWPERF_HOST_MODIFY_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+
+ IMG_CHAR *acName = NULL;
+
+ psData->ui32ModifyType = eModifyType;
+
+ switch (eModifyType)
+ {
+ case RGX_HWPERF_HOST_RESOURCE_TYPE_FENCE_PVR:
+ psData->uModifyDetail.sFenceMerge.ui64NewFence_UID = ui64NewUID;
+ psData->uModifyDetail.sFenceMerge.ui64InFence1_UID = ui64UID1;
+ psData->uModifyDetail.sFenceMerge.ui64InFence2_UID = ui64UID2;
+ acName = psData->uModifyDetail.sFenceMerge.acName;
+ break;
+ default:
+ // unknown type - this should never happen
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXHWPerfHostPostModifyEvent: Invalid modify event type"));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ if (ui32NameSize)
+ {
+ OSStringLCopy(acName, psName, ui32NameSize);
+ }
+ else
+ {
+ /* In case no name was given make sure we don't access random memory */
+ acName[0] = '\0';
+ }
+}
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+ IMG_UINT64 ui64NewUID,
+ IMG_UINT64 ui64UID1,
+ IMG_UINT64 ui64UID2,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT64 ui64Timestamp;
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT32 ui32Size = _FixNameAndCalculateHostModifyPacketSize(eModifyType,
+ &psName,
+ &ui32NameSize);
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ NULL, IMG_TRUE);
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_MODIFY, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+ _SetupHostModifyPacketData(pui8Dest,
+ eModifyType,
+ ui64NewUID,
+ ui64UID1,
+ ui64UID2,
+ psName,
+ ui32NameSize);
+
+ cleanup:
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+static inline void _SetupHostClkSyncPacketData(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT8 *pui8Dest)
+{
+ RGX_HWPERF_HOST_CLK_SYNC_DATA *psData = (RGX_HWPERF_HOST_CLK_SYNC_DATA *)
+ (pui8Dest + sizeof(RGX_HWPERF_V2_PACKET_HDR));
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psRgxDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_UINT32 ui32CurrIdx =
+ RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32CurrIdx];
+
+ psData->ui64CRTimestamp = psTimeCorr->ui64CRTimeStamp;
+ psData->ui64OSTimestamp = psTimeCorr->ui64OSTimeStamp;
+ psData->ui32ClockSpeed = psTimeCorr->ui32CoreClockSpeed;
+}
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ IMG_UINT8 *pui8Dest;
+ IMG_UINT32 ui32Size =
+ RGX_HWPERF_MAKE_SIZE_FIXED(RGX_HWPERF_HOST_CLK_SYNC_DATA);
+ IMG_UINT32 ui32Ordinal;
+ IMG_UINT64 ui64Timestamp;
+
+ _GetHWPerfHostPacketSpecifics(psRgxDevInfo, &ui32Ordinal, &ui64Timestamp,
+ NULL, IMG_TRUE);
+ _PostFunctionPrologue(psRgxDevInfo, ui32Ordinal);
+
+ if ((pui8Dest = _ReserveHWPerfStream(psRgxDevInfo, ui32Size)) == NULL)
+ {
+ goto cleanup;
+ }
+
+ _SetupHostPacketHeader(psRgxDevInfo, pui8Dest, RGX_HWPERF_HOST_CLK_SYNC, ui32Size,
+ ui32Ordinal, ui64Timestamp);
+ _SetupHostClkSyncPacketData(psRgxDevInfo, pui8Dest);
+
+ _CommitHWPerfStream(psRgxDevInfo, ui32Size);
+
+ cleanup:
+ _PostFunctionEpilogue(psRgxDevInfo, ui32Ordinal);
+}
+
+/******************************************************************************
+ * SUPPORT_GPUTRACE_EVENTS
+ *
+ * Currently only implemented on Linux and Android. Feature can be enabled on
+ * Android builds but can also be enabled on Linux builds for testing
+ * but requires the gpu.h FTrace event header file to be present.
+ *****************************************************************************/
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+/* Saved value of the clock source before the trace was enabled. We're keeping
+ * it here so that we know which clock should be selected after we disable the
+ * gpu ftrace. */
+static RGXTIMECORR_CLOCK_TYPE geLastTimeCorrClock = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/* This lock ensures that the reference counting operation on the FTrace UFO
+ * events and enable/disable operation on firmware event are performed as
+ * one atomic operation. This should ensure that there are no race conditions
+ * between reference counting and firmware event state change.
+ * See below comment for guiUfoEventRef.
+ */
+static POS_LOCK ghLockFTraceEventLock;
+
+/* Multiple FTrace UFO events are reflected in the firmware as only one event. When
+ * we enable FTrace UFO event we want to also at the same time enable it in
+ * the firmware. Since there is a multiple-to-one relation between those events
+ * we count how many FTrace UFO events is enabled. If at least one event is
+ * enabled we enabled the firmware event. When all FTrace UFO events are disabled
+ * we disable firmware event. */
+static IMG_UINT guiUfoEventRef;
+
+static void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE);
+
+typedef struct RGX_HWPERF_FTRACE_DATA {
+ /* This lock ensures the HWPerf TL stream reading resources are not destroyed
+ * by one thread disabling it while another is reading from it. Keeps the
+ * state and resource create/destroy atomic and consistent. */
+ POS_LOCK hFTraceResourceLock;
+
+ IMG_HANDLE hGPUTraceCmdCompleteHandle;
+ IMG_HANDLE hGPUTraceTLStream;
+ IMG_UINT64 ui64LastSampledTimeCorrOSTimeStamp;
+ IMG_UINT32 ui32FTraceLastOrdinal;
+} RGX_HWPERF_FTRACE_DATA;
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUEnable(PVRSRV_RGXDEV_INFO *psRgxDevInfo)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+ IMG_CHAR pszHWPerfStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRgxDevInfo);
+
+ psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+ /* return if already enabled */
+ if (psFtraceData->hGPUTraceTLStream)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Signal FW to enable event generation */
+ if (psRgxDevInfo->bFirmwareInitialised)
+ {
+ IMG_UINT64 ui64UFOFilter = psRgxDevInfo->ui64HWPerfFilter &
+ (RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO);
+
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+ RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+ ui64UFOFilter);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM", err_out);
+ }
+ else
+ {
+ /* only set filter and exit */
+ psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_HW_KICKFINISH |
+ ((RGX_HWPERF_EVENT_MASK_FW_SED | RGX_HWPERF_EVENT_MASK_FW_UFO) &
+ psRgxDevInfo->ui64HWPerfFilter);
+
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+ (long long) psRgxDevInfo->ui64HWPerfFilter));
+
+ return PVRSRV_OK;
+ }
+
+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfStreamName, sizeof(pszHWPerfStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM, psRgxDevNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+ __FUNCTION__,
+ psRgxDevNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Open the TL Stream for HWPerf data consumption */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ pszHWPerfStreamName,
+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+ &psFtraceData->hGPUTraceTLStream);
+ PVR_LOGG_IF_ERROR(eError, "TLClientOpenStream", err_out);
+
+ if (RGXTimeCorrGetClockSource() != RGXTIMECORR_CLOCK_SCHED)
+ {
+ /* Set clock source for timer correlation data to sched_clock */
+ geLastTimeCorrClock = RGXTimeCorrGetClockSource();
+ RGXTimeCorrSetClockSource(psRgxDevNode, RGXTIMECORR_CLOCK_SCHED);
+ }
+
+ /* Reset the OS timestamp coming from the timer correlation data
+ * associated with the latest HWPerf event we processed.
+ */
+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = 0;
+
+ /* Register a notifier to collect HWPerf data whenever the HW completes
+ * an operation.
+ */
+ eError = PVRSRVRegisterCmdCompleteNotify(
+ &psFtraceData->hGPUTraceCmdCompleteHandle,
+ &RGXHWPerfFTraceCmdCompleteNotify,
+ psRgxDevInfo);
+ PVR_LOGG_IF_ERROR(eError, "PVRSRVRegisterCmdCompleteNotify", err_close_stream);
+
+err_out:
+ PVR_DPF_RETURN_RC(eError);
+
+err_close_stream:
+ TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream);
+ psFtraceData->hGPUTraceTLStream = NULL;
+ goto err_out;
+}
+
+/* Caller must now hold hFTraceResourceLock before calling this method.
+ */
+static PVRSRV_ERROR RGXHWPerfFTraceGPUDisable(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bDeInit)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+ PVRSRV_DEVICE_NODE *psRgxDevNode = psRgxDevInfo->psDeviceNode;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRgxDevInfo);
+
+ psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+ PVR_ASSERT(OSLockIsLocked(psFtraceData->hFTraceResourceLock));
+
+ /* if FW is not yet initialised, just set filter and exit */
+ if (!psRgxDevInfo->bFirmwareInitialised)
+ {
+ psRgxDevInfo->ui64HWPerfFilter = RGX_HWPERF_EVENT_MASK_NONE;
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfFW mask has been SET to (%llx)",
+ (long long) psRgxDevInfo->ui64HWPerfFilter));
+
+ return PVRSRV_OK;
+ }
+
+ if (NULL == psFtraceData->hGPUTraceTLStream)
+ {
+ /* Tracing already disabled, just return */
+ return PVRSRV_OK;
+ }
+
+ if (!bDeInit)
+ {
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psRgxDevNode,
+ RGX_HWPERF_STREAM_ID0_FW, IMG_FALSE,
+ (RGX_HWPERF_EVENT_MASK_NONE));
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+ }
+
+ if (psFtraceData->hGPUTraceCmdCompleteHandle)
+ {
+ /* Tracing is being turned off. Unregister the notifier. */
+ eError = PVRSRVUnregisterCmdCompleteNotify(
+ psFtraceData->hGPUTraceCmdCompleteHandle);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVUnregisterCmdCompleteNotify");
+ psFtraceData->hGPUTraceCmdCompleteHandle = NULL;
+ }
+
+ if (psFtraceData->hGPUTraceTLStream)
+ {
+ IMG_PBYTE pbTmp = NULL;
+ IMG_UINT32 ui32Tmp = 0;
+
+ /* We have to flush both the L1 (FW) and L2 (Host) buffers in case there
+ * are some events left unprocessed in this FTrace/systrace "session"
+ * (note that even if we have just disabled HWPerf on the FW some packets
+ * could have been generated and already copied to L2 by the MISR handler).
+ *
+ * With the following calls we will both copy new data to the Host buffer
+ * (done by the producer callback in TLClientAcquireData) and advance
+ * the read offset in the buffer to catch up with the latest events.
+ */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream,
+ &pbTmp, &ui32Tmp);
+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+ /* Let close stream perform the release data on the outstanding acquired data */
+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psFtraceData->hGPUTraceTLStream);
+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+
+ psFtraceData->hGPUTraceTLStream = NULL;
+ }
+
+ if (geLastTimeCorrClock != RGXTIMECORR_CLOCK_SCHED)
+ {
+ RGXTimeCorrSetClockSource(psRgxDevNode, geLastTimeCorrClock);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_BOOL bNewValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRgxDevInfo);
+ psFtraceData = psRgxDevInfo->pvGpuFtraceData;
+
+ /* About to create/destroy FTrace resources, lock critical section
+ * to avoid HWPerf MISR thread contention.
+ */
+ OSLockAcquire(psFtraceData->hFTraceResourceLock);
+
+ eError = (bNewValue ? RGXHWPerfFTraceGPUEnable(psRgxDevInfo)
+ : RGXHWPerfFTraceGPUDisable(psRgxDevInfo, IMG_FALSE));
+
+ OSLockRelease(psFtraceData->hFTraceResourceLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSet(IMG_BOOL bNewValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ /* This entry point from DebugFS must take the global
+ * bridge lock at this outer level of the stack before calling
+ * into the RGX part of the driver which can lead to RGX
+ * device data changes and communication with the FW which
+ * all requires the bridge lock.
+ */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ /* enable/disable GPU trace on all devices */
+ while (psDeviceNode)
+ {
+ eError = RGXHWPerfFTraceGPUEventsEnabledSet(psDeviceNode->pvDevice, bNewValue);
+ if (eError != PVRSRV_OK)
+ {
+ break;
+ }
+ psDeviceNode = psDeviceNode->psNext;
+ }
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR PVRGpuTraceEnabledSetNoBridgeLock(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_BOOL bNewValue)
+{
+ return RGXHWPerfFTraceGPUEventsEnabledSet(psDeviceNode->pvDevice, bNewValue);
+}
+
+/* Calculate the OS timestamp given an RGX timestamp in the HWPerf event. */
+static uint64_t
+CalculateEventTimestamp(PVRSRV_RGXDEV_INFO *psDevInfo,
+ uint32_t ui32TimeCorrIndex,
+ uint64_t ui64EventTimestamp)
+{
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32TimeCorrIndex];
+ uint64_t ui64CRTimeStamp = psTimeCorr->ui64CRTimeStamp;
+ uint64_t ui64OSTimeStamp = psTimeCorr->ui64OSTimeStamp;
+ uint64_t ui64CRDeltaToOSDeltaKNs = psTimeCorr->ui64CRDeltaToOSDeltaKNs;
+ uint64_t ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+ if (psFtraceData->ui64LastSampledTimeCorrOSTimeStamp > ui64OSTimeStamp)
+ {
+ /* The previous packet had a time reference (time correlation data) more
+ * recent than the one in the current packet, it means the timer
+ * correlation array wrapped too quickly (buffer too small) and in the
+ * previous call to RGXHWPerfFTraceGPUUfoEvent we read one of the
+ * newest timer correlations rather than one of the oldest ones.
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: The timestamps computed so far could be "
+ "wrong! The time correlation array size should be increased "
+ "to avoid this.", __func__));
+ }
+
+ psFtraceData->ui64LastSampledTimeCorrOSTimeStamp = ui64OSTimeStamp;
+
+ /* RGX CR timer ticks delta */
+ deltaRgxTimer = ui64EventTimestamp - ui64CRTimeStamp;
+ /* RGX time delta in nanoseconds */
+ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+ /* Calculate OS time of HWPerf event */
+ ui64EventOSTimestamp = ui64OSTimeStamp + delta_ns;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: psCurrentDvfs RGX %llu, OS %llu, DVFSCLK %u",
+ __func__, ui64CRTimeStamp, ui64OSTimeStamp,
+ psTimeCorr->ui32CoreClockSpeed));
+
+ return ui64EventOSTimestamp;
+}
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32CtxId, IMG_UINT32 ui32JobId,
+ RGX_HWPERF_KICK_TYPE eKickType)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ PVR_DPF_ENTERED;
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUEnqueueEvent: ui32CtxId %u, "
+ "ui32JobId %u", ui32CtxId, ui32JobId));
+
+ PVRGpuTraceClientWork(psDevInfo->psDeviceNode, ui32CtxId, ui32JobId,
+ RGXHWPerfKickTypeToStr(eKickType));
+
+ PVR_DPF_RETURN;
+}
+
+
+static void RGXHWPerfFTraceGPUSwitchEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+{
+ IMG_UINT64 ui64Timestamp;
+ RGX_HWPERF_HW_DATA* psHWPerfPktData;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psHWPerfPkt);
+ PVR_ASSERT(pszWorkName);
+
+ psHWPerfPktData = (RGX_HWPERF_HW_DATA*) RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUSwitchEvent: %s ui32ExtJobRef=%d, ui32IntJobRef=%d, eSwType=%d",
+ pszWorkName, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32IntJobRef, eSwType));
+
+ PVRGpuTraceWorkSwitch(ui64Timestamp, psHWPerfPktData->ui32DMContext, psHWPerfPktData->ui32CtxPriority,
+ psHWPerfPktData->ui32IntJobRef, pszWorkName, eSwType);
+
+ PVR_DPF_RETURN;
+}
+
+static void RGXHWPerfFTraceGPUUfoEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+ IMG_UINT64 ui64Timestamp;
+ RGX_HWPERF_UFO_DATA *psHWPerfPktData;
+ IMG_UINT32 ui32UFOCount;
+ RGX_HWPERF_UFO_DATA_ELEMENT *puData;
+
+ psHWPerfPktData = (RGX_HWPERF_UFO_DATA *)
+ RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui32UFOCount = RGX_HWPERF_GET_UFO_STREAMSIZE(psHWPerfPktData->ui32StreamInfo);
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) psHWPerfPktData)
+ + RGX_HWPERF_GET_UFO_STREAMOFFSET(psHWPerfPktData->ui32StreamInfo));
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUUfoEvent: ui32ExtJobRef=%d, "
+ "ui32IntJobRef=%d", psHWPerfPktData->ui32ExtJobRef,
+ psHWPerfPktData->ui32IntJobRef));
+
+ PVRGpuTraceUfo(ui64Timestamp, psHWPerfPktData->eEvType,
+ psHWPerfPktData->ui32ExtJobRef, psHWPerfPktData->ui32DMContext,
+ psHWPerfPktData->ui32IntJobRef, ui32UFOCount, puData);
+}
+
+static void RGXHWPerfFTraceGPUFirmwareEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt, const IMG_CHAR* pszWorkName,
+ PVR_GPUTRACE_SWITCH_TYPE eSwType)
+
+{
+ uint64_t ui64Timestamp;
+ RGX_HWPERF_FW_DATA *psHWPerfPktData = (RGX_HWPERF_FW_DATA *)
+ RGX_HWPERF_GET_PACKET_DATA_BYTES(psHWPerfPkt);
+
+ ui64Timestamp = CalculateEventTimestamp(psDevInfo, psHWPerfPktData->ui32TimeCorrIndex,
+ psHWPerfPkt->ui64Timestamp);
+
+ PVRGpuTraceFirmware(ui64Timestamp, pszWorkName, eSwType);
+}
+
+static IMG_BOOL ValidAndEmitFTraceEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt)
+{
+ RGX_HWPERF_EVENT_TYPE eType;
+ RGX_HWPERF_FTRACE_DATA *psFtraceData = psDevInfo->pvGpuFtraceData;
+ IMG_UINT32 ui32HwEventTypeIndex;
+ static const struct {
+ IMG_CHAR* pszName;
+ PVR_GPUTRACE_SWITCH_TYPE eSwType;
+ } aszHwEventTypeMap[] = {
+ { /* RGX_HWPERF_FW_BGSTART */ "BG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_BGEND */ "BG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_FW_IRQSTART */ "IRQ", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_IRQEND */ "IRQ", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_FW_DBGSTART */ "DBG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_FW_DBGEND */ "DBG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_PMOOM_TAPAUSE */ "PMOOM_TAPAUSE", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_TAKICK */ "TA", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TAFINISHED */ "TA", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DTQKICK */ "TQ3D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_3DKICK */ "3D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_3DFINISHED */ "3D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_CDMKICK */ "CDM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_CDMFINISHED */ "CDM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_TLAKICK */ "TQ2D", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TLAFINISHED */ "TQ2D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DSPMKICK */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_PERIODIC */ NULL, 0 }, /* PERIODIC not supported */
+ { /* RGX_HWPERF_HW_RTUKICK */ "RTU", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_RTUFINISHED */ "RTU", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_SHGKICK */ "SHG", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_SHGFINISHED */ "SHG", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DTQFINISHED */ "TQ3D", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_3DSPMFINISHED */ "3DSPM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ { /* RGX_HWPERF_HW_PMOOM_TARESUME */ "PMOOM_TARESUME", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TDMKICK */ "TDM", PVR_GPUTRACE_SWITCH_TYPE_BEGIN },
+ { /* RGX_HWPERF_HW_TDMFINISHED */ "TDM", PVR_GPUTRACE_SWITCH_TYPE_END },
+ };
+ static_assert(RGX_HWPERF_HW_EVENT_RANGE0_FIRST_TYPE == RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE + 1,
+ "FW and HW events are not contiguous in RGX_HWPERF_EVENT_TYPE");
+
+ PVR_ASSERT(psHWPerfPkt);
+ eType = RGX_HWPERF_GET_TYPE(psHWPerfPkt);
+
+ if (psFtraceData->ui32FTraceLastOrdinal != psHWPerfPkt->ui32Ordinal - 1)
+ {
+ RGX_HWPERF_STREAM_ID eStreamId = RGX_HWPERF_GET_STREAM_ID(psHWPerfPkt);
+ PVRGpuTraceEventsLost(eStreamId,
+ psFtraceData->ui32FTraceLastOrdinal,
+ psHWPerfPkt->ui32Ordinal);
+ PVR_DPF((PVR_DBG_ERROR, "FTrace events lost (stream_id = %u, ordinal: last = %u, current = %u)",
+ eStreamId, psFtraceData->ui32FTraceLastOrdinal, psHWPerfPkt->ui32Ordinal));
+ }
+
+ psFtraceData->ui32FTraceLastOrdinal = psHWPerfPkt->ui32Ordinal;
+
+ /* Process UFO packets */
+ if (eType == RGX_HWPERF_UFO)
+ {
+ RGXHWPerfFTraceGPUUfoEvent(psDevInfo, psHWPerfPkt);
+ return IMG_TRUE;
+ }
+
+ if (eType <= RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE)
+ {
+ /* this ID belongs to range 0, so index directly in range 0 */
+ ui32HwEventTypeIndex = eType - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ }
+ else
+ {
+ /* this ID belongs to range 1, so first index in range 1 and skip number of slots used up for range 0 */
+ ui32HwEventTypeIndex = (eType - RGX_HWPERF_HW_EVENT_RANGE1_FIRST_TYPE) +
+ (RGX_HWPERF_HW_EVENT_RANGE0_LAST_TYPE - RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE + 1);
+ }
+
+ if (ui32HwEventTypeIndex >= ARRAY_SIZE(aszHwEventTypeMap))
+ goto err_unsupported;
+
+ if (aszHwEventTypeMap[ui32HwEventTypeIndex].pszName == NULL)
+ {
+ /* Not supported map entry, ignore event */
+ goto err_unsupported;
+ }
+
+ if (HWPERF_PACKET_IS_HW_TYPE(eType))
+ {
+ RGXHWPerfFTraceGPUSwitchEvent(psDevInfo, psHWPerfPkt,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+ }
+ else if (HWPERF_PACKET_IS_FW_TYPE(eType))
+ {
+ RGXHWPerfFTraceGPUFirmwareEvent(psDevInfo, psHWPerfPkt,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].pszName,
+ aszHwEventTypeMap[ui32HwEventTypeIndex].eSwType);
+ }
+ else
+ {
+ goto err_unsupported;
+ }
+
+ return IMG_TRUE;
+
+err_unsupported:
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Unsupported event type %d", __func__, eType));
+ return IMG_FALSE;
+}
+
+
+static void RGXHWPerfFTraceGPUProcessPackets(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_PBYTE pBuffer, IMG_UINT32 ui32ReadLen)
+{
+ IMG_UINT32 ui32TlPackets = 0;
+ IMG_UINT32 ui32HWPerfPackets = 0;
+ IMG_UINT32 ui32HWPerfPacketsSent = 0;
+ IMG_PBYTE pBufferEnd;
+ PVRSRVTL_PPACKETHDR psHDRptr;
+ PVRSRVTL_PACKETTYPE ui16TlType;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDevInfo);
+ PVR_ASSERT(pBuffer);
+ PVR_ASSERT(ui32ReadLen);
+
+ /* Process the TL Packets
+ */
+ pBufferEnd = pBuffer+ui32ReadLen;
+ psHDRptr = GET_PACKET_HDR(pBuffer);
+ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+ {
+ ui16TlType = GET_PACKET_TYPE(psHDRptr);
+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+ {
+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+ if (0 == ui16DataLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfFTraceGPUProcessPackets: ZERO Data in TL data packet: %p", psHDRptr));
+ }
+ else
+ {
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfPkt;
+ RGX_HWPERF_V2_PACKET_HDR* psHWPerfEnd;
+
+ /* Check for lost hwperf data packets */
+ psHWPerfEnd = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr)+ui16DataLen);
+ psHWPerfPkt = RGX_HWPERF_GET_PACKET(GET_PACKET_DATA_PTR(psHDRptr));
+ do
+ {
+ if (ValidAndEmitFTraceEvent(psDevInfo, psHWPerfPkt))
+ {
+ ui32HWPerfPacketsSent++;
+ }
+ ui32HWPerfPackets++;
+ psHWPerfPkt = RGX_HWPERF_GET_NEXT_PACKET(psHWPerfPkt);
+ }
+ while (psHWPerfPkt < psHWPerfEnd);
+ }
+ }
+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Indication that the transport buffer was full"));
+ }
+ else
+ {
+ /* else Ignore padding packet type and others */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfFTraceGPUProcessPackets: Ignoring TL packet, type %d", ui16TlType ));
+ }
+
+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+ ui32TlPackets++;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUProcessPackets: TL "
+ "Packets processed %03d, HWPerf packets %03d, sent %03d",
+ ui32TlPackets, ui32HWPerfPackets, ui32HWPerfPacketsSent));
+
+ PVR_DPF_RETURN;
+}
+
+
+static
+void RGXHWPerfFTraceCmdCompleteNotify(PVRSRV_CMDCOMP_HANDLE hCmdCompHandle)
+{
+ PVRSRV_RGXDEV_INFO* psDeviceInfo = hCmdCompHandle;
+ RGX_HWPERF_FTRACE_DATA* psFtraceData;
+ PVRSRV_ERROR eError;
+ IMG_PBYTE pBuffer;
+ IMG_UINT32 ui32ReadLen;
+
+ PVR_DPF_ENTERED;
+
+ /* Exit if no HWPerf enabled device exits */
+ PVR_ASSERT(psDeviceInfo != NULL);
+
+ psFtraceData = psDeviceInfo->pvGpuFtraceData;
+
+ /* Command-complete notifiers can run concurrently. If this is
+ * happening, just bail out and let the previous call finish.
+ * This is ok because we can process the queued packets on the next call.
+ */
+ if (!OSTryLockAcquire(psFtraceData->hFTraceResourceLock))
+ {
+ PVR_DPF_RETURN;
+ }
+
+ /* If this notifier is called, it means the TL resources will be valid at-least
+ * until the end of this call, since the DeInit function will wait on the hFTraceResourceLock
+ * to clean-up the TL resources and un-register the notifier, so just assert here.
+ */
+ PVR_ASSERT(psFtraceData->hGPUTraceTLStream);
+
+ /* If we have a valid stream attempt to acquire some data */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream, &pBuffer, &ui32ReadLen);
+ if (eError == PVRSRV_OK)
+ {
+ /* Process the HWPerf packets and release the data */
+ if (ui32ReadLen > 0)
+ {
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfFTraceGPUThread: DATA AVAILABLE offset=%p, length=%d", pBuffer, ui32ReadLen));
+
+ /* Process the transport layer data for HWPerf packets... */
+ RGXHWPerfFTraceGPUProcessPackets(psDeviceInfo, pBuffer, ui32ReadLen);
+
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psFtraceData->hGPUTraceTLStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "TLClientReleaseData");
+
+ /* Serious error, disable FTrace GPU events */
+
+ /* Release TraceLock so we always have the locking
+ * order BridgeLock->TraceLock to prevent AB-BA deadlocks*/
+ OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+ OSLockAcquire(psFtraceData->hFTraceResourceLock);
+ RGXHWPerfFTraceGPUDisable(psDeviceInfo, IMG_FALSE);
+ OSLockRelease(psFtraceData->hFTraceResourceLock);
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+ goto out;
+
+ }
+ } /* else no data, ignore */
+ }
+ else if (eError != PVRSRV_ERROR_TIMEOUT)
+ {
+ PVR_LOG_ERROR(eError, "TLClientAcquireData");
+ }
+
+ OSLockRelease(psFtraceData->hFTraceResourceLock);
+out:
+ PVR_DPF_RETURN;
+}
+
+inline PVRSRV_ERROR RGXHWPerfFTraceGPUInitSupport(void)
+{
+ PVRSRV_ERROR eError;
+
+ if (ghLockFTraceEventLock != NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "FTrace Support is already initialized"));
+ return PVRSRV_OK;
+ }
+
+ /* common module params initialization */
+ eError = OSLockCreate(&ghLockFTraceEventLock, LOCK_TYPE_PASSIVE);
+ PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+ return PVRSRV_OK;
+}
+
+inline void RGXHWPerfFTraceGPUDeInitSupport(void)
+{
+ if (ghLockFTraceEventLock)
+ {
+ OSLockDestroy(ghLockFTraceEventLock);
+ ghLockFTraceEventLock = NULL;
+ }
+}
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ RGX_HWPERF_FTRACE_DATA *psData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ psData = OSAllocZMem(sizeof(RGX_HWPERF_FTRACE_DATA));
+ psDevInfo->pvGpuFtraceData = psData;
+ if (psData == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* We initialise it only once because we want to track if any
+ * packets were dropped. */
+ psData->ui32FTraceLastOrdinal = IMG_UINT32_MAX - 1;
+
+ eError = OSLockCreate(&psData->hFTraceResourceLock, LOCK_TYPE_DISPATCH);
+ PVR_LOGR_IF_ERROR(eError, "OSLockCreate");
+
+ return PVRSRV_OK;
+}
+
+void RGXHWPerfFTraceGPUDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_HWPERF_FTRACE_DATA *psData = psDevInfo->pvGpuFtraceData;
+
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+ if (psData)
+ {
+ /* first disable the tracing, to free up TL resources */
+ if (psData->hFTraceResourceLock)
+ {
+ OSLockAcquire(psData->hFTraceResourceLock);
+ RGXHWPerfFTraceGPUDisable(psDeviceNode->pvDevice, IMG_TRUE);
+ OSLockRelease(psData->hFTraceResourceLock);
+
+ /* now free all the FTrace resources */
+ OSLockDestroy(psData->hFTraceResourceLock);
+ }
+ OSFreeMem(psData);
+ psDevInfo->pvGpuFtraceData = NULL;
+ }
+}
+
+void PVRGpuTraceEnableUfoCallback(void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+
+ /* Lock down events state, for consistent value of guiUfoEventRef */
+ OSLockAcquire(ghLockFTraceEventLock);
+ if (guiUfoEventRef++ == 0)
+ {
+ /* make sure UFO events are enabled on all rogue devices */
+ while (psDeviceNode)
+ {
+ IMG_UINT64 ui64Filter;
+
+ psRgxDevInfo = psDeviceNode->pvDevice;
+ ui64Filter = RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO) |
+ psRgxDevInfo->ui64HWPerfFilter;
+ /* Small chance exists that ui64HWPerfFilter can be changed here and
+ * the newest filter value will be changed to the old one + UFO event.
+ * This is not a critical problem. */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter);
+ if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+ {
+ /* If we land here that means that the FW is not initialised yet.
+ * We stored the filter and it will be passed to the firmware
+ * during its initialisation phase. So ignore. */
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not enable UFO HWPerf events on device %d", psDeviceNode->sDevId.i32UMIdentifier));
+ }
+
+ psDeviceNode = psDeviceNode->psNext;
+ }
+ }
+ OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableUfoCallback(void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+
+ /* We have to check if lock is valid because on driver unload
+ * RGXHWPerfFTraceGPUDeInit is called before kernel disables the ftrace
+ * events. This means that the lock will be destroyed before this callback
+ * is called.
+ * We can safely return if that situation happens because driver will be
+ * unloaded so we don't care about HWPerf state anymore. */
+ if (ghLockFTraceEventLock == NULL)
+ return;
+
+ psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+ /* Lock down events state, for consistent value of guiUfoEventRef */
+ OSLockAcquire(ghLockFTraceEventLock);
+ if (--guiUfoEventRef == 0)
+ {
+ /* make sure UFO events are disabled on all rogue devices */
+ while (psDeviceNode)
+ {
+ IMG_UINT64 ui64Filter;
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+
+ ui64Filter = ~(RGX_HWPERF_EVENT_MASK_VALUE(RGX_HWPERF_UFO)) &
+ psRgxDevInfo->ui64HWPerfFilter;
+ /* Small chance exists that ui64HWPerfFilter can be changed here and
+ * the newest filter value will be changed to the old one + UFO event.
+ * This is not a critical problem. */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter);
+ if (eError == PVRSRV_ERROR_NOT_INITIALISED)
+ {
+ /* If we land here that means that the FW is not initialised yet.
+ * We stored the filter and it will be passed to the firmware
+ * during its initialisation phase. So ignore. */
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not disable UFO HWPerf events on device %d",
+ psDeviceNode->sDevId.i32UMIdentifier));
+ }
+ psDeviceNode = psDeviceNode->psNext;
+ }
+ }
+ OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+ uint64_t ui64Filter, ui64FWEventsFilter = 0;
+ int i;
+
+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+ {
+ ui64FWEventsFilter |= RGX_HWPERF_EVENT_MASK_VALUE(i);
+ }
+
+ OSLockAcquire(ghLockFTraceEventLock);
+ /* Enable all FW events on all the devices */
+ while (psDeviceNode)
+ {
+ PVRSRV_ERROR eError;
+ psRgxDevInfo = psDeviceNode->pvDevice;
+ ui64Filter = psRgxDevInfo->ui64HWPerfFilter | ui64FWEventsFilter;
+
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not enable HWPerf event for firmware"
+ " task timings (%s).", PVRSRVGetErrorStringKM(eError)));
+ }
+ psDeviceNode = psDeviceNode->psNext;
+ }
+ OSLockRelease(ghLockFTraceEventLock);
+}
+
+void PVRGpuTraceDisableFirmwareActivityCallback(void)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ IMG_UINT64 ui64FWEventsFilter = ~0;
+ int i;
+
+ /* We have to check if lock is valid because on driver unload
+ * RGXHWPerfFTraceGPUDeInit is called before kernel disables the ftrace
+ * events. This means that the lock will be destroyed before this callback
+ * is called.
+ * We can safely return if that situation happens because driver will be
+ * unloaded so we don't care about HWPerf state anymore. */
+ if (ghLockFTraceEventLock == NULL)
+ return;
+
+ psDeviceNode = PVRSRVGetPVRSRVData()->psDeviceNodeList;
+
+ for (i = RGX_HWPERF_FW_EVENT_RANGE_FIRST_TYPE;
+ i <= RGX_HWPERF_FW_EVENT_RANGE_LAST_TYPE; i++)
+ {
+ ui64FWEventsFilter &= ~RGX_HWPERF_EVENT_MASK_VALUE(i);
+ }
+
+ OSLockAcquire(ghLockFTraceEventLock);
+
+ /* Disable all FW events on all the devices */
+ while (psDeviceNode)
+ {
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT64 ui64Filter = psRgxDevInfo->ui64HWPerfFilter & ui64FWEventsFilter;
+
+ if (PVRSRVRGXCtrlHWPerfKM(NULL, psDeviceNode, RGX_HWPERF_STREAM_ID0_FW,
+ IMG_FALSE, ui64Filter) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not disable HWPerf event for firmware task timings."));
+ }
+ psDeviceNode = psDeviceNode->psNext;
+ }
+
+ OSLockRelease(ghLockFTraceEventLock);
+}
+
+#endif /* SUPPORT_GPUTRACE_EVENTS */
+
+/******************************************************************************
+ * Currently only implemented on Linux. Feature can be enabled to provide
+ * an interface to 3rd-party kernel modules that wish to access the
+ * HWPerf data. The API is documented in the rgxapi_km.h header and
+ * the rgx_hwperf* headers.
+ *****************************************************************************/
+
+/* Internal HWPerf kernel connection/device data object to track the state
+ * of a client session.
+ */
+typedef struct
+{
+ PVRSRV_DEVICE_NODE* psRgxDevNode;
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+ /* TL Open/close state */
+ IMG_HANDLE hSD[RGX_HWPERF_MAX_STREAM_ID];
+
+ /* TL Acquire/release state */
+ IMG_PBYTE pHwpBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer returned to user in acquire call */
+ IMG_PBYTE pHwpBufEnd[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to end of HwpBuf */
+ IMG_PBYTE pTlBuf[RGX_HWPERF_MAX_STREAM_ID]; /*!< buffer obtained via TlAcquireData */
+ IMG_PBYTE pTlBufPos[RGX_HWPERF_MAX_STREAM_ID]; /*!< initial position in TlBuf to acquire packets */
+ IMG_PBYTE pTlBufRead[RGX_HWPERF_MAX_STREAM_ID]; /*!< pointer to the last packet read */
+ IMG_UINT32 ui32AcqDataLen[RGX_HWPERF_MAX_STREAM_ID]; /*!< length of acquired TlBuf */
+ IMG_BOOL bRelease[RGX_HWPERF_MAX_STREAM_ID]; /*!< used to determine whether or not to release currently held TlBuf */
+
+
+} RGX_KM_HWPERF_DEVDATA;
+
+PVRSRV_ERROR RGXHWPerfLazyConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ RGX_KM_HWPERF_DEVDATA *psDevData;
+ RGX_HWPERF_DEVICE *psNewHWPerfDevice;
+ RGX_HWPERF_CONNECTION* psHWPerfConnection;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* avoid uninitialised data */
+ PVR_ASSERT(*ppsHWPerfConnection == NULL);
+ PVR_ASSERT(psPVRSRVData);
+
+ /* Allocate connection object */
+ psHWPerfConnection = OSAllocZMem(sizeof(*psHWPerfConnection));
+ if (!psHWPerfConnection)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ /* early save the return pointer to aid clean-up if failure occurs */
+ *ppsHWPerfConnection = psHWPerfConnection;
+
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ while(psDeviceNode)
+ {
+ /* Create a list node to be attached to connection object's list */
+ psNewHWPerfDevice = OSAllocMem(sizeof(*psNewHWPerfDevice));
+ if (!psNewHWPerfDevice)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ /* Insert node at head of the list */
+ psNewHWPerfDevice->psNext = psHWPerfConnection->psHWPerfDevList;
+ psHWPerfConnection->psHWPerfDevList = psNewHWPerfDevice;
+
+ /* create a device data object for kernel server */
+ psDevData = OSAllocZMem(sizeof(*psDevData));
+ psNewHWPerfDevice->hDevData = (IMG_HANDLE)psDevData;
+ if (!psDevData)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ if (OSSNPrintf(psNewHWPerfDevice->pszName, sizeof(psNewHWPerfDevice->pszName),
+ "hwperf_device_%d", psDeviceNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf device name for device %d",
+ __FUNCTION__,
+ psDeviceNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevData->psRgxDevNode = psDeviceNode;
+ psDevData->psRgxDevInfo = psDeviceNode->pvDevice;
+
+ psDeviceNode = psDeviceNode->psNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXHWPerfOpen(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+ RGX_KM_HWPERF_DEVDATA *psDevData;
+ RGX_HWPERF_DEVICE *psHWPerfDev;
+ PVRSRV_RGXDEV_INFO *psRgxDevInfo;
+ PVRSRV_ERROR eError;
+ IMG_CHAR pszHWPerfFwStreamName[sizeof(PVRSRV_TL_HWPERF_RGX_FW_STREAM) + 5];
+ IMG_CHAR pszHWPerfHostStreamName[sizeof(PVRSRV_TL_HWPERF_HOST_SERVER_STREAM) + 5];
+ IMG_UINT32 ui32BufSize;
+
+ /* Disable producer callback by default for the Kernel API. */
+ IMG_UINT32 ui32StreamFlags = PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING |
+ PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Validate input argument values supplied by the caller */
+ if (!psHWPerfConnection)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+ while (psHWPerfDev)
+ {
+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+ psRgxDevInfo = psDevData->psRgxDevInfo;
+
+ /* In the case where the AppHint has not been set we need to
+ * initialise the HWPerf resources here. Allocated on-demand
+ * to reduce RAM foot print on systems not needing HWPerf.
+ */
+ OSLockAcquire(psRgxDevInfo->hHWPerfLock);
+ if (RGXHWPerfIsInitRequired(psRgxDevInfo))
+ {
+ eError = RGXHWPerfInitOnDemandResources(psRgxDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfFW"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+ return eError;
+ }
+ }
+ OSLockRelease(psRgxDevInfo->hHWPerfLock);
+
+ OSLockAcquire(psRgxDevInfo->hLockHWPerfHostStream);
+ if (psRgxDevInfo->hHWPerfHostStream == NULL)
+ {
+ eError = RGXHWPerfHostInitOnDemandResources(psRgxDevInfo);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Initialization of on-demand HWPerfHost"
+ " resources failed", __FUNCTION__));
+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+ return eError;
+ }
+ }
+ OSLockRelease(psRgxDevInfo->hLockHWPerfHostStream);
+
+ /* form the HWPerf stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfFwStreamName, sizeof(pszHWPerfFwStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_RGX_FW_STREAM,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf stream name for device %d",
+ __FUNCTION__,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ /* Open the RGX TL stream for reading in this session */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ pszHWPerfFwStreamName,
+ ui32StreamFlags,
+ &psDevData->hSD[RGX_HWPERF_STREAM_ID0_FW]);
+ PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(RGX_HWPerf)");
+
+ /* form the HWPerf host stream name, corresponding to this DevNode; which can make sense in the UM */
+ if (OSSNPrintf(pszHWPerfHostStreamName, sizeof(pszHWPerfHostStreamName), "%s%d",
+ PVRSRV_TL_HWPERF_HOST_SERVER_STREAM,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier) < 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to form HWPerf host stream name for device %d",
+ __FUNCTION__,
+ psRgxDevInfo->psDeviceNode->sDevId.i32UMIdentifier));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Open the host TL stream for reading in this session */
+ eError = TLClientOpenStream(DIRECT_BRIDGE_HANDLE,
+ pszHWPerfHostStreamName,
+ PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING,
+ &psDevData->hSD[RGX_HWPERF_STREAM_ID1_HOST]);
+ PVR_LOGR_IF_ERROR(eError, "TLClientOpenStream(Host_HWPerf)");
+
+ /* Allocate a large enough buffer for use during the entire session to
+ * avoid the need to resize in the Acquire call as this might be in an ISR
+ * Choose size that can contain at least one packet.
+ */
+ /* Allocate buffer for FW Stream */
+ ui32BufSize = FW_STREAM_BUFFER_SIZE;
+ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] = OSAllocMem(ui32BufSize);
+ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW] == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID0_FW] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]+ui32BufSize;
+
+ /* Allocate buffer for Host Stream */
+ ui32BufSize = HOST_STREAM_BUFFER_SIZE;
+ psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] = OSAllocMem(ui32BufSize);
+ if (psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST] == NULL)
+ {
+ OSFreeMem(psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID0_FW]);
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ psDevData->pHwpBufEnd[RGX_HWPERF_STREAM_ID1_HOST] = psDevData->pHwpBuf[RGX_HWPERF_STREAM_ID1_HOST]+ui32BufSize;
+
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+ PVRSRV_ERROR eError;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ eError = RGXHWPerfLazyConnect(ppsHWPerfConnection);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfLazyConnect", e0);
+
+ eError = RGXHWPerfOpen(*ppsHWPerfConnection);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfOpen", e1);
+
+ return PVRSRV_OK;
+
+ e1: /* HWPerfOpen might have opened some, and then failed */
+ RGXHWPerfClose(*ppsHWPerfConnection);
+ e0: /* LazyConnect might have allocated some resources and then failed,
+ * make sure they are cleaned up */
+ RGXHWPerfFreeConnection(ppsHWPerfConnection);
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfControl(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData;
+ RGX_HWPERF_DEVICE* psHWPerfDev;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Validate input argument values supplied by the caller */
+ if (!psHWPerfConnection)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+ while (psHWPerfDev)
+ {
+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXCtrlHWPerfKM(NULL, psDevData->psRgxDevNode, eStreamId, bToggle, ui64Mask);
+ PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ RGX_HWPERF_CONFIG_CNTBLK* asBlockConfigs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_KM_HWPERF_DEVDATA* psDevData;
+ RGX_HWPERF_DEVICE *psHWPerfDev;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Validate input argument values supplied by the caller */
+ if (!psHWPerfConnection || ui32NumBlocks==0 || !asBlockConfigs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+ while (psHWPerfDev)
+ {
+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXConfigEnableHWPerfCountersKM(NULL,
+ psDevData->psRgxDevNode, ui32NumBlocks, asBlockConfigs);
+ PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfKM");
+
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfConfigureAndEnableCustomCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 *pui32CustomCounterIDs)
+{
+ PVRSRV_ERROR eError;
+ RGX_HWPERF_DEVICE *psHWPerfDev;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Validate input arguments supplied by the caller */
+ PVR_LOGR_IF_FALSE((NULL != psHWPerfConnection), "psHWPerfConnection invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE((0 != ui16NumCustomCounters), "uiNumBlocks invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE((NULL != pui32CustomCounterIDs),"asBlockConfigs invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Check # of blocks */
+ PVR_LOGR_IF_FALSE((!(ui16CustomBlockID > RGX_HWPERF_MAX_CUSTOM_BLKS)),"ui16CustomBlockID invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ /* Check # of counters */
+ PVR_LOGR_IF_FALSE((!(ui16NumCustomCounters > RGX_HWPERF_MAX_CUSTOM_CNTRS)),"ui16NumCustomCounters invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+ while (psHWPerfDev)
+ {
+ RGX_KM_HWPERF_DEVDATA *psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+ eError = PVRSRVRGXConfigCustomCountersKM(NULL,
+ psDevData->psRgxDevNode,
+ ui16CustomBlockID, ui16NumCustomCounters, pui32CustomCounterIDs);
+ PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlCustHWPerfKM");
+
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisableCounters(
+ RGX_HWPERF_CONNECTION *psHWPerfConnection,
+ IMG_UINT32 ui32NumBlocks,
+ IMG_UINT16* aeBlockIDs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_KM_HWPERF_DEVDATA* psDevData;
+ RGX_HWPERF_DEVICE* psHWPerfDev;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Validate input argument values supplied by the caller */
+ if (!psHWPerfConnection || ui32NumBlocks==0 || !aeBlockIDs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32NumBlocks > RGXFWIF_HWPERF_CTRL_BLKS_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+
+ while (psHWPerfDev)
+ {
+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+
+ /* Call the internal server API */
+ eError = PVRSRVRGXCtrlHWPerfCountersKM(NULL,
+ psDevData->psRgxDevNode, IMG_FALSE, ui32NumBlocks, aeBlockIDs);
+ PVR_LOGR_IF_ERROR(eError, "PVRSRVRGXCtrlHWPerfCountersKM");
+
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfAcquireEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_PBYTE* ppBuf,
+ IMG_UINT32* pui32BufLen)
+{
+ PVRSRV_ERROR eError;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+ IMG_PBYTE pDataDest;
+ IMG_UINT32 ui32TlPackets = 0;
+ IMG_PBYTE pBufferEnd;
+ PVRSRVTL_PPACKETHDR psHDRptr;
+ PVRSRVTL_PACKETTYPE ui16TlType;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Reset the output arguments in case we discover an error */
+ *ppBuf = NULL;
+ *pui32BufLen = 0;
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (psDevData->pTlBuf[eStreamId] == NULL)
+ {
+ /* Acquire some data to read from the HWPerf TL stream */
+ eError = TLClientAcquireData(DIRECT_BRIDGE_HANDLE,
+ psDevData->hSD[eStreamId],
+ &psDevData->pTlBuf[eStreamId],
+ &psDevData->ui32AcqDataLen[eStreamId]);
+ PVR_LOGR_IF_ERROR(eError, "TLClientAcquireData");
+
+ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBuf[eStreamId];
+ }
+
+ /* TL indicates no data exists so return OK and zero. */
+ if ((psDevData->pTlBufPos[eStreamId] == NULL) || (psDevData->ui32AcqDataLen[eStreamId] == 0))
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Process each TL packet in the data buffer we have acquired */
+ pBufferEnd = psDevData->pTlBuf[eStreamId]+psDevData->ui32AcqDataLen[eStreamId];
+ pDataDest = psDevData->pHwpBuf[eStreamId];
+ psHDRptr = GET_PACKET_HDR(psDevData->pTlBufPos[eStreamId]);
+ psDevData->pTlBufRead[eStreamId] = psDevData->pTlBufPos[eStreamId];
+ while ( psHDRptr < (PVRSRVTL_PPACKETHDR)pBufferEnd )
+ {
+ ui16TlType = GET_PACKET_TYPE(psHDRptr);
+ if (ui16TlType == PVRSRVTL_PACKETTYPE_DATA)
+ {
+ IMG_UINT16 ui16DataLen = GET_PACKET_DATA_LEN(psHDRptr);
+ if (0 == ui16DataLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXHWPerfAcquireEvents: ZERO Data in TL data packet: %p", psHDRptr));
+ }
+ else
+ {
+ /* Check next packet does not fill buffer */
+ if (pDataDest + ui16DataLen > psDevData->pHwpBufEnd[eStreamId])
+ {
+ break;
+ }
+
+ /* For valid data copy it into the client buffer and move
+ * the write position on */
+ OSDeviceMemCopy(pDataDest, GET_PACKET_DATA_PTR(psHDRptr), ui16DataLen);
+ pDataDest += ui16DataLen;
+ }
+ }
+ else if (ui16TlType == PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Indication that the transport buffer was full"));
+ }
+ else
+ {
+ /* else Ignore padding packet type and others */
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXHWPerfAcquireEvents: Ignoring TL packet, type %d", ui16TlType ));
+ }
+
+ /* Update loop variable to the next packet and increment counts */
+ psHDRptr = GET_NEXT_PACKET_ADDR(psHDRptr);
+ /* Updated to keep track of the next packet to be read. */
+ psDevData->pTlBufRead[eStreamId] = (IMG_PBYTE) psHDRptr;
+ ui32TlPackets++;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "RGXHWPerfAcquireEvents: TL Packets processed %03d", ui32TlPackets));
+
+ psDevData->bRelease[eStreamId] = IMG_FALSE;
+ if (psHDRptr >= (PVRSRVTL_PPACKETHDR) pBufferEnd)
+ {
+ psDevData->bRelease[eStreamId] = IMG_TRUE;
+ }
+
+ /* Update output arguments with client buffer details and true length */
+ *ppBuf = psDevData->pHwpBuf[eStreamId];
+ *pui32BufLen = pDataDest - psDevData->pHwpBuf[eStreamId];
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfReleaseEvents(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGX_KM_HWPERF_DEVDATA* psDevData = (RGX_KM_HWPERF_DEVDATA*)hDevData;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Valid input argument values supplied by the caller */
+ if (!psDevData || eStreamId >= RGX_HWPERF_MAX_STREAM_ID)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (psDevData->bRelease[eStreamId])
+ {
+ /* Inform the TL that we are done with reading the data. */
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[eStreamId]);
+ psDevData->ui32AcqDataLen[eStreamId] = 0;
+ psDevData->pTlBuf[eStreamId] = NULL;
+ }
+ else
+ {
+ psDevData->pTlBufPos[eStreamId] = psDevData->pTlBufRead[eStreamId];
+ }
+ return eError;
+}
+
+
+PVRSRV_ERROR RGXHWPerfGetFilter(
+ IMG_HANDLE hDevData,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_UINT64 *ui64Filter)
+{
+ PVRSRV_RGXDEV_INFO* psRgxDevInfo;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ /* Valid input argument values supplied by the caller */
+ psRgxDevInfo = hDevData ? ((RGX_KM_HWPERF_DEVDATA*) hDevData)->psRgxDevInfo : NULL;
+ if (!psRgxDevInfo)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid pointer to the RGX device",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* No need to take hHWPerfLock here since we are only reading data
+ * from always existing integers to return to debugfs which is an
+ * atomic operation.
+ */
+ switch (eStreamId) {
+ case RGX_HWPERF_STREAM_ID0_FW:
+ *ui64Filter = psRgxDevInfo->ui64HWPerfFilter;
+ break;
+ case RGX_HWPERF_STREAM_ID1_HOST:
+ *ui64Filter = psRgxDevInfo->ui32HWPerfHostFilter;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid stream ID",
+ __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfFreeConnection(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+ RGX_HWPERF_DEVICE *psHWPerfDev, *psHWPerfNextDev;
+ RGX_HWPERF_CONNECTION *psHWPerfConnection = *ppsHWPerfConnection;
+
+ /* if connection object itself is NULL, nothing to free */
+ if (psHWPerfConnection == NULL)
+ {
+ return PVRSRV_OK;
+ }
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ psHWPerfNextDev = psHWPerfConnection->psHWPerfDevList;
+ while (psHWPerfNextDev)
+ {
+ psHWPerfDev = psHWPerfNextDev;
+ psHWPerfNextDev = psHWPerfNextDev->psNext;
+
+ /* Free the session memory */
+ if (psHWPerfDev->hDevData)
+ OSFreeMem(psHWPerfDev->hDevData);
+ OSFreeMem(psHWPerfDev);
+ }
+ OSFreeMem(psHWPerfConnection);
+ *ppsHWPerfConnection = NULL;
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfClose(RGX_HWPERF_CONNECTION *psHWPerfConnection)
+{
+ RGX_HWPERF_DEVICE *psHWPerfDev;
+ RGX_KM_HWPERF_DEVDATA* psDevData;
+ IMG_UINT uiStreamId;
+ PVRSRV_ERROR eError;
+
+ /* Check session connection is not zero */
+ if (!psHWPerfConnection)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ psHWPerfDev = psHWPerfConnection->psHWPerfDevList;
+ while (psHWPerfDev)
+ {
+ psDevData = (RGX_KM_HWPERF_DEVDATA *) psHWPerfDev->hDevData;
+ for (uiStreamId = 0; uiStreamId < RGX_HWPERF_MAX_STREAM_ID; uiStreamId++)
+ {
+ /* If the TL buffer exists they have not called ReleaseData
+ * before disconnecting so clean it up */
+ if (psDevData->pTlBuf[uiStreamId])
+ {
+ /* TLClientReleaseData call and null out the buffer fields
+ * and length */
+ eError = TLClientReleaseData(DIRECT_BRIDGE_HANDLE, psDevData->hSD[uiStreamId]);
+ psDevData->ui32AcqDataLen[uiStreamId] = 0;
+ psDevData->pTlBuf[uiStreamId] = NULL;
+ PVR_LOG_IF_ERROR(eError, "TLClientReleaseData");
+ /* Packets may be lost if release was not required */
+ if (!psDevData->bRelease[uiStreamId])
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGXHWPerfClose: Events in buffer waiting to be read, remaining events may be lost."));
+ }
+ }
+
+ /* Close the TL stream, ignore the error if it occurs as we
+ * are disconnecting */
+ if (psDevData->hSD[uiStreamId])
+ {
+ eError = TLClientCloseStream(DIRECT_BRIDGE_HANDLE,
+ psDevData->hSD[uiStreamId]);
+ PVR_LOG_IF_ERROR(eError, "TLClientCloseStream");
+ psDevData->hSD[uiStreamId] = NULL;
+ }
+
+ /* Free the client buffer used in session */
+ if (psDevData->pHwpBuf[uiStreamId])
+ {
+ OSFreeMem(psDevData->pHwpBuf[uiStreamId]);
+ psDevData->pHwpBuf[uiStreamId] = NULL;
+ }
+ }
+ psHWPerfDev = psHWPerfDev->psNext;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR RGXHWPerfDisconnect(RGX_HWPERF_CONNECTION** ppsHWPerfConnection)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_ERROR_NOT_IMPLEMENTED);
+
+ eError = RGXHWPerfClose(*ppsHWPerfConnection);
+ PVR_LOG_IF_ERROR(eError, "RGXHWPerfClose");
+
+ eError = RGXHWPerfFreeConnection(ppsHWPerfConnection);
+ PVR_LOG_IF_ERROR(eError, "RGXHWPerfFreeConnection");
+
+ return eError;
+}
+
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType)
+{
+ static const IMG_CHAR *aszKickType[RGX_HWPERF_KICK_TYPE_LAST+1] = {
+ "TA3D", "TQ2D", "TQ3D", "CDM", "RS", "VRDM", "TQTDM", "SYNC", "LAST"
+ };
+
+ /* cast in case of negative value */
+ if (((IMG_UINT32) eKickType) >= RGX_HWPERF_KICK_TYPE_LAST)
+ {
+ return "<UNKNOWN>";
+ }
+
+ return aszKickType[eKickType];
+}
+
+
+IMG_UINT64 RGXHWPerfConvertCRTimeStamp(
+ IMG_UINT32 ui32ClkSpeed,
+ IMG_UINT64 ui64CorrCRTimeStamp,
+ IMG_UINT64 ui64CorrOSTimeStamp,
+ IMG_UINT64 ui64CRTimeStamp)
+{
+ IMG_UINT32 ui32Remainder;
+ IMG_UINT64 ui64CRDeltaToOSDeltaKNs;
+ IMG_UINT64 ui64EventOSTimestamp, deltaRgxTimer, delta_ns;
+
+ if (!(ui64CRTimeStamp) || !(ui32ClkSpeed) || !(ui64CorrCRTimeStamp) || !(ui64CorrOSTimeStamp))
+ {
+ return 0;
+ }
+
+ ui64CRDeltaToOSDeltaKNs = RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(ui32ClkSpeed,
+ ui32Remainder);
+
+ /* RGX CR timer ticks delta */
+ deltaRgxTimer = ui64CRTimeStamp - ui64CorrCRTimeStamp;
+ /* RGX time delta in nanoseconds */
+ delta_ns = RGXFWIF_GET_DELTA_OSTIME_NS(deltaRgxTimer, ui64CRDeltaToOSDeltaKNs);
+ /* Calculate OS time of HWPerf event */
+ ui64EventOSTimestamp = ui64CorrOSTimeStamp + delta_ns;
+
+ return ui64EventOSTimestamp;
+}
+
+/******************************************************************************
+ End of file (rgxhwperf.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.h b/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.h
new file mode 100644
index 00000000000000..633c2894424c67
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxhwperf.h
@@ -0,0 +1,387 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX HW Performance header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX HWPerf functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXHWPERF_H_
+#define RGXHWPERF_H_
+
+#include "img_types.h"
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "connection_server.h"
+#include "rgxdevice.h"
+#include "rgx_hwperf.h"
+
+/* HWPerf host buffer size constraints in KBs */
+#define HWPERF_HOST_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFHOSTBUFSIZEINKB
+#define HWPERF_HOST_TL_STREAM_SIZE_MIN (32U)
+#define HWPERF_HOST_TL_STREAM_SIZE_MAX (3072U)
+
+/******************************************************************************
+ * RGX HW Performance decode Bvnc Features for HWPerf
+ *****************************************************************************/
+PVRSRV_ERROR RGXServerFeatureFlagsToHWPerfFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 *pui32BvncKmFeatureFlags);
+
+PVRSRV_ERROR PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32BvncKmFeatureFlags);
+
+/******************************************************************************
+ * RGX HW Performance Data Transport Routines
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfDataStoreCB(PVRSRV_DEVICE_NODE* psDevInfo);
+
+PVRSRV_ERROR RGXHWPerfInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+PVRSRV_ERROR RGXHWPerfInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfDeinit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+void RGXHWPerfInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfClientInitAppHintCallbacks(void);
+
+/******************************************************************************
+ * RGX HW Performance Profiling API(s)
+ *****************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_HWPERF_STREAM_ID eStreamId,
+ IMG_BOOL bToggle,
+ IMG_UINT64 ui64Mask);
+
+
+PVRSRV_ERROR PVRSRVRGXConfigEnableHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32ArrayLen,
+ RGX_HWPERF_CONFIG_CNTBLK * psBlockConfigs);
+
+PVRSRV_ERROR PVRSRVRGXCtrlHWPerfCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_BOOL bEnable,
+ IMG_UINT32 ui32ArrayLen,
+ IMG_UINT16 * psBlockIDs);
+
+PVRSRV_ERROR PVRSRVRGXConfigCustomCountersKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT16 ui16CustomBlockID,
+ IMG_UINT16 ui16NumCustomCounters,
+ IMG_UINT32 * pui32CustomCounterIDs);
+
+/******************************************************************************
+ * RGX HW Performance Host Stream API
+ *****************************************************************************/
+
+PVRSRV_ERROR RGXHWPerfHostInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo, IMG_UINT32 ui32BufSizeKB);
+PVRSRV_ERROR RGXHWPerfHostInitOnDemandResources(PVRSRV_RGXDEV_INFO* psRgxDevInfo);
+void RGXHWPerfHostDeInit(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+
+void RGXHWPerfHostSetEventFilter(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ IMG_UINT32 ui32Filter);
+
+void RGXHWPerfHostPostEnqEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_KICK_TYPE eEnqType,
+ IMG_UINT32 ui32Pid,
+ IMG_UINT32 ui32FWDMContext,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32IntJobRef,
+ IMG_UINT64 ui64CheckFenceUID,
+ IMG_UINT64 ui64UpdateFenceUID,
+ IMG_UINT64 ui64DeadlineInus,
+ IMG_UINT64 ui64CycleEstimate);
+
+void RGXHWPerfHostPostAllocEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eAllocType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostFreeEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eFreeType,
+ IMG_UINT64 ui64UID,
+ IMG_UINT32 ui32PID,
+ IMG_UINT32 ui32FWAddr);
+
+void RGXHWPerfHostPostModifyEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_HOST_RESOURCE_TYPE eModifyType,
+ IMG_UINT64 ui64NewUID,
+ IMG_UINT64 ui64UID1,
+ IMG_UINT64 ui64UID2,
+ const IMG_CHAR *psName,
+ IMG_UINT32 ui32NameSize);
+
+void RGXHWPerfHostPostUfoEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo,
+ RGX_HWPERF_UFO_EV eUfoType,
+ RGX_HWPERF_UFO_DATA_ELEMENT *psUFOData,
+ const IMG_BOOL bSleepAllowed);
+
+void RGXHWPerfHostPostClkSyncEvent(PVRSRV_RGXDEV_INFO *psRgxDevInfo);
+
+IMG_BOOL RGXHWPerfHostIsEventEnabled(PVRSRV_RGXDEV_INFO *psRgxDevInfo, RGX_HWPERF_HOST_EVENT_TYPE eEvent);
+
+#define _RGX_HWPERF_HOST_FILTER(CTX, EV) \
+ (((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)->ui32HWPerfHostFilter \
+ & RGX_HWPERF_EVENT_MASK_VALUE(EV))
+
+#define _RGX_DEVICE_INFO_FROM_CTX(CTX) \
+ ((PVRSRV_RGXDEV_INFO *)CTX->psDeviceNode->pvDevice)
+
+#define _RGX_DEVICE_INFO_FROM_NODE(DEVNODE) \
+ ((PVRSRV_RGXDEV_INFO *)DEVNODE->pvDevice)
+
+/* Deadline and cycle estimate is not supported for all ENQ events */
+#define NO_DEADLINE 0
+#define NO_CYCEST 0
+
+
+#if defined(SUPPORT_RGX)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param C Kick context
+ * @param P Pid of kicking process
+ * @param X Related FW context
+ * @param E External job reference
+ * @param I Job ID
+ * @param K Kick type
+ * @param CHKUID Check fence UID
+ * @param UPDUID Update fence UID
+ * @param D Deadline
+ * @param CE Cycle estimate
+ */
+#define RGX_HWPERF_HOST_ENQ(C, P, X, E, I, K, CHKUID, UPDUID, D, CE) \
+ do { \
+ if (_RGX_HWPERF_HOST_FILTER(C, RGX_HWPERF_HOST_ENQ)) \
+ { \
+ RGXHWPerfHostPostEnqEvent(_RGX_DEVICE_INFO_FROM_CTX(C), \
+ (K), (P), (X), (E), (I), \
+ (CHKUID), (UPDUID), (D), (CE)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device Info pointer
+ * @param T Host UFO event type
+ * @param D Pointer to UFO data
+ * @param S Is sleeping allowed?
+ */
+#define RGX_HWPERF_HOST_UFO(I, T, D, S) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_UFO)) \
+ { \
+ RGXHWPerfHostPostUfoEvent((I), (T), (D), (S)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_ALLOC(D, T, FWADDR, N, Z) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+ { \
+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, 0, 0, \
+ (FWADDR), (N), (Z)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ * @param N string containing sync name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_ALLOC_FENCE_SYNC(D, T, UID, PID, FWADDR, N, Z) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_ALLOC)) \
+ { \
+ RGXHWPerfHostPostAllocEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (UID), (PID), (FWADDR), (N), (Z)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param FWADDR sync firmware address
+ */
+#define RGX_HWPERF_HOST_FREE(D, T, FWADDR) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+ { \
+ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (0), (0), (FWADDR)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param UID ID of input object
+ * @param PID ID of allocating process
+ * @param FWADDR sync firmware address
+ */
+#define RGX_HWPERF_HOST_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_FREE)) \
+ { \
+ RGXHWPerfHostPostFreeEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (UID), (PID), (FWADDR)); \
+ } \
+ } while (0)
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param D Device Node pointer
+ * @param T Host ALLOC event type
+ * @param NEWUID ID of output object
+ * @param UID1 ID of first input object
+ * @param UID2 ID of second input object
+ * @param N string containing new object's name
+ * @param Z string size including null terminating character
+ */
+#define RGX_HWPERF_HOST_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled(_RGX_DEVICE_INFO_FROM_NODE(D), RGX_HWPERF_HOST_MODIFY)) \
+ { \
+ RGXHWPerfHostPostModifyEvent(_RGX_DEVICE_INFO_FROM_NODE(D), \
+ RGX_HWPERF_HOST_RESOURCE_TYPE_##T, \
+ (NEWUID), (UID1), (UID2), N, Z); \
+ } \
+ } while (0)
+
+
+/**
+ * This macro checks if HWPerfHost and the event are enabled and if they are
+ * it posts event to the HWPerfHost stream.
+ *
+ * @param I Device info pointer
+ */
+#define RGX_HWPERF_HOST_CLK_SYNC(I) \
+ do { \
+ if (RGXHWPerfHostIsEventEnabled((I), RGX_HWPERF_HOST_CLK_SYNC)) \
+ { \
+ RGXHWPerfHostPostClkSyncEvent((I)); \
+ } \
+ } while (0)
+
+
+#else
+
+#define RGX_HWPERF_HOST_ENQ(C, P, X, E, I, K, CHKUID, UPDUID, D, CE)
+#define RGX_HWPERF_HOST_UFO(I, T, D, S)
+#define RGX_HWPERF_HOST_ALLOC(D, T, FWADDR, N, Z)
+#define RGX_HWPERF_HOST_ALLOC_FENCE_SYNC(D, T, UID, PID, FWADDR, N, Z)
+#define RGX_HWPERF_HOST_FREE(D, T, FWADDR)
+#define RGX_HWPERF_HOST_FREE_FENCE_SYNC(D, T, UID, PID, FWADDR)
+#define RGX_HWPERF_HOST_MODIFY_FENCE_SYNC(D, T, NEWUID, UID1, UID2, N, Z)
+#define RGX_HWPERF_HOST_CLK_SYNC(I)
+
+#endif
+
+
+/******************************************************************************
+ * RGX HW Performance To FTrace Profiling API(s)
+ *****************************************************************************/
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitSupport(void);
+void RGXHWPerfFTraceGPUDeInitSupport(void);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXHWPerfFTraceGPUDeInitDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+void RGXHWPerfFTraceGPUEnqueueEvent(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ExternalJobRef, IMG_UINT32 ui32InternalJobRef,
+ RGX_HWPERF_KICK_TYPE eKickType);
+
+PVRSRV_ERROR RGXHWPerfFTraceGPUEventsEnabledSet(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_BOOL bNewValue);
+
+void RGXHWPerfFTraceGPUThread(void *pvData);
+
+#endif
+
+/******************************************************************************
+ * RGX HW utils functions
+ *****************************************************************************/
+
+const IMG_CHAR *RGXHWPerfKickTypeToStr(RGX_HWPERF_KICK_TYPE eKickType);
+
+#endif /* RGXHWPERF_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxinit.c b/drivers/gpu/drm/img-rogue/1.10/rgxinit.c
new file mode 100644
index 00000000000000..3c54f7b267608f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxinit.c
@@ -0,0 +1,4648 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "img_defs.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "pvrsrv_bridge_init.h"
+#include "syscommon.h"
+#include "rgx_heaps.h"
+#include "rgxheapconfig.h"
+#include "rgxpower.h"
+#include "tlstream.h"
+#include "pvrsrv_tlstreams.h"
+
+#include "rgxinit.h"
+#include "rgxbvnc.h"
+
+#include "pdump_km.h"
+#include "handle.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxmem.h"
+#include "sync_internal.h"
+#include "pvrsrv_apphint.h"
+#include "oskm_apphint.h"
+#include "debugmisc_server.h"
+
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+#include "rgxmmuinit.h"
+#include "rgxmipsmmuinit.h"
+#include "physmem.h"
+#include "devicemem_utils.h"
+#include "devicemem_server.h"
+#include "physmem_osmem.h"
+
+#include "rgxdebug.h"
+#include "rgxhwperf.h"
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+#include "pvr_gputrace.h"
+#endif
+#include "htbserver.h"
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "rgx_heaps.h"
+
+#include "rgxta3d.h"
+#include "rgxtimecorr.h"
+
+#include "rgx_bvnc_defs_km.h"
+#if defined(PDUMP)
+#include "rgxstartstop.h"
+#endif
+
+#include "rgx_fwif_alignchecks.h"
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "rgxworkest.h"
+#endif
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_CHAR **ppszVersionString);
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_PUINT32 pui32RGXClockSpeed);
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode, IMG_UINT64 ui64ResetValue1, IMG_UINT64 ui64ResetValue2);
+static PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+static void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode);
+static PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor, PVRSRV_RGXDEV_INFO *psDevInfo);
+static void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#define RGX_MMU_LOG2_PAGE_SIZE_4KB (12)
+#define RGX_MMU_LOG2_PAGE_SIZE_16KB (14)
+#define RGX_MMU_LOG2_PAGE_SIZE_64KB (16)
+#define RGX_MMU_LOG2_PAGE_SIZE_256KB (18)
+#define RGX_MMU_LOG2_PAGE_SIZE_1MB (20)
+#define RGX_MMU_LOG2_PAGE_SIZE_2MB (21)
+
+#define RGX_MMU_PAGE_SIZE_4KB ( 4 * 1024)
+#define RGX_MMU_PAGE_SIZE_16KB ( 16 * 1024)
+#define RGX_MMU_PAGE_SIZE_64KB ( 64 * 1024)
+#define RGX_MMU_PAGE_SIZE_256KB ( 256 * 1024)
+#define RGX_MMU_PAGE_SIZE_1MB (1024 * 1024)
+#define RGX_MMU_PAGE_SIZE_2MB (2048 * 1024)
+#define RGX_MMU_PAGE_SIZE_MIN RGX_MMU_PAGE_SIZE_4KB
+#define RGX_MMU_PAGE_SIZE_MAX RGX_MMU_PAGE_SIZE_2MB
+
+#define VAR(x) #x
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+
+/* bits used by the LISR to provide a trace of its last execution */
+#define RGX_LISR_DEVICE_NOT_POWERED (1 << 0)
+#define RGX_LISR_FWIF_POW_OFF (1 << 1)
+#define RGX_LISR_EVENT_EN (1 << 2)
+#define RGX_LISR_COUNTS_EQUAL (1 << 3)
+#define RGX_LISR_PROCESSED (1 << 4)
+
+typedef struct _LISR_EXECUTION_INFO_
+{
+ /* bit mask showing execution flow of last LISR invocation */
+ IMG_UINT32 ui32State;
+ /* snapshot from the last LISR invocation, regardless of
+ * whether an interrupt was handled
+ */
+ IMG_UINT32 aui32InterruptCountSnapshot[RGXFW_THREAD_NUM];
+ /* time of the last LISR invocation */
+ IMG_UINT64 ui64Clockns;
+} LISR_EXECUTION_INFO;
+
+/* information about the last execution of the LISR */
+static LISR_EXECUTION_INFO g_sLISRExecutionInfo;
+
+#endif
+
+#if !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+@Function SampleIRQCount
+@Description Utility function taking snapshots of RGX FW interrupt count.
+@Input paui32Input A pointer to RGX FW IRQ count array.
+ Size of the array should be equal to RGX FW thread
+ count.
+@Input paui32Output A pointer to array containing sampled RGX FW
+ IRQ counts
+@Return IMG_BOOL Returns IMG_TRUE, if RGX FW IRQ is not equal to
+ sampled RGX FW IRQ count for any RGX FW thread.
+ */ /**************************************************************************/
+static INLINE IMG_BOOL SampleIRQCount(volatile IMG_UINT32 *paui32Input,
+ volatile IMG_UINT32 *paui32Output)
+{
+ IMG_UINT32 ui32TID;
+ IMG_BOOL bReturnVal = IMG_FALSE;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ if (paui32Output[ui32TID] != paui32Input[ui32TID])
+ {
+ /**
+ * we are handling any unhandled interrupts here so align the host
+ * count with the FW count
+ */
+
+ /* Sample the current count from the FW _after_ we've cleared the interrupt. */
+ paui32Output[ui32TID] = paui32Input[ui32TID];
+ bReturnVal = IMG_TRUE;
+ }
+ }
+
+ return bReturnVal;
+}
+
+static IMG_BOOL _WaitForInterruptsTimeoutCheck(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_BOOL bScheduleMISR = IMG_FALSE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ IMG_UINT32 ui32TID;
+#endif
+
+ RGXDEBUG_PRINT_IRQ_COUNT(psDevInfo);
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ PVR_DPF((PVR_DBG_ERROR, "Last RGX_LISRHandler State: 0x%08X Clock: %llu",
+ g_sLISRExecutionInfo.ui32State,
+ g_sLISRExecutionInfo.ui64Clockns));
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, \
+ "RGX FW thread %u: InterruptCountSnapshot: 0x%X", \
+ ui32TID, g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID]));
+ }
+#else
+ PVR_DPF((PVR_DBG_ERROR, "No further information available. Please enable PVRSRV_DEBUG_LISR_EXECUTION"));
+#endif
+
+
+ if (psRGXFWIfTraceBuf->ePowState != RGXFWIF_POW_OFF)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WaitForInterruptsTimeout: FW pow state is not OFF (is %u)",
+ (unsigned int) psRGXFWIfTraceBuf->ePowState));
+ }
+
+ bScheduleMISR = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+ psDevInfo->aui32SampleIRQCount);
+ return bScheduleMISR;
+}
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_BOOL bScheduleMISR;
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ bScheduleMISR = IMG_TRUE;
+ }
+ else
+ {
+ bScheduleMISR = _WaitForInterruptsTimeoutCheck(psDevInfo);
+ }
+
+ if (bScheduleMISR)
+ {
+ OSScheduleMISR(psDevInfo->pvMISRData);
+
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ OSScheduleMISR(psDevInfo->pvAPMISRData);
+ }
+ }
+}
+
+/*
+ RGX LISR Handler
+ */
+static IMG_BOOL RGX_LISRHandler (void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_BOOL bInterruptProcessed;
+ RGXFWIF_TRACEBUF *psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32IRQStatus, ui32IRQStatusReg, ui32IRQStatusEventMsk, ui32IRQClearReg, ui32IRQClearMask;
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ if (! psDevInfo->bRGXPowered)
+ {
+ return IMG_FALSE;
+ }
+
+ OSScheduleMISR(psDevInfo->pvMISRData);
+ return IMG_TRUE;
+ }
+ else
+ {
+ bInterruptProcessed = IMG_FALSE;
+ psRGXFWIfTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ ui32IRQStatusReg = RGX_CR_MIPS_WRAPPER_IRQ_STATUS;
+ ui32IRQStatusEventMsk = RGX_CR_MIPS_WRAPPER_IRQ_STATUS_EVENT_EN;
+ ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+ ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+ }else
+ {
+ ui32IRQStatusReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQStatusEventMsk = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN;
+ ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+ }
+
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ IMG_UINT32 ui32TID;
+
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ g_sLISRExecutionInfo.aui32InterruptCountSnapshot[ui32TID ] =
+ psRGXFWIfTraceBuf->aui32InterruptCount[ui32TID];
+ }
+ g_sLISRExecutionInfo.ui32State = 0;
+ g_sLISRExecutionInfo.ui64Clockns = OSClockns64();
+#endif
+
+ if (psDevInfo->bRGXPowered == IMG_FALSE)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_DEVICE_NOT_POWERED;
+#endif
+ if (psRGXFWIfTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_FWIF_POW_OFF;
+#endif
+ return bInterruptProcessed;
+ }
+ }
+
+ ui32IRQStatus = OSReadHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQStatusReg);
+ if (ui32IRQStatus & ui32IRQStatusEventMsk)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_EVENT_EN;
+#endif
+
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+
+ bInterruptProcessed = SampleIRQCount(psRGXFWIfTraceBuf->aui32InterruptCount,
+ psDevInfo->aui32SampleIRQCount);
+
+ if (!bInterruptProcessed)
+ {
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_COUNTS_EQUAL;
+#endif
+ return bInterruptProcessed;
+ }
+
+ bInterruptProcessed = IMG_TRUE;
+#if defined(PVRSRV_DEBUG_LISR_EXECUTION)
+ g_sLISRExecutionInfo.ui32State |= RGX_LISR_PROCESSED;
+#endif
+
+ OSScheduleMISR(psDevInfo->pvMISRData);
+
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ OSScheduleMISR(psDevInfo->pvAPMISRData);
+ }
+ }
+
+ return bInterruptProcessed;
+}
+
+static void RGX_MISR_ProcessKCCBDeferredList(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* First check whether there are pending commands in Deferred KCCB List */
+ OSLockAcquire(psDevInfo->hLockKCCBDeferredCommandsList);
+ if (dllist_is_empty(&psDevInfo->sKCCBDeferredCommandsListHead))
+ {
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+ return;
+ }
+ OSLockRelease(psDevInfo->hLockKCCBDeferredCommandsList);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+
+ /* Powerlock to avoid further Power transition requests
+ while KCCB deferred list is being processed */
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ goto _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed;
+ }
+
+ /* Try to send deferred KCCB commands Do not Poll from here*/
+ eError = RGXSendCommandsFromDeferredList(psDevInfo, IMG_FALSE);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "RGX_MISR_ProcessKCCBDeferredList \
+ could not flush Deferred KCCB list, KCCB is full.\n"));
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ _RGX_MISR_ProcessKCCBDeferredList_PowerLock_failed:
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+ return;
+}
+
+static void RGX_MISRHandler_CheckFWActivePowerState(void *psDevice)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevice;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_ON || psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+ {
+ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+ }
+
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+ {
+ /* The FW is IDLE and therefore could be shut down */
+ eError = RGXActivePowerRequest(psDeviceNode);
+
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED))
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Failed RGXActivePowerRequest call (device: %p) with %s",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MAX, NULL, NULL);
+ }
+ }
+
+}
+
+/* Shorter defines to keep the code a bit shorter */
+#define GPU_ACTIVE_LOW RGXFWIF_GPU_UTIL_STATE_ACTIVE_LOW
+#define GPU_IDLE RGXFWIF_GPU_UTIL_STATE_IDLE
+#define GPU_ACTIVE_HIGH RGXFWIF_GPU_UTIL_STATE_ACTIVE_HIGH
+#define GPU_BLOCKED RGXFWIF_GPU_UTIL_STATE_BLOCKED
+#define MAX_ITERATIONS 64
+
+static PVRSRV_ERROR RGXGetGpuUtilStats(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hGpuUtilUser,
+ RGXFWIF_GPU_UTIL_STATS *psReturnStats)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ volatile RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+ IMG_UINT64 ui64TimeNow;
+ IMG_UINT32 ui32Attempts;
+ IMG_UINT32 ui32Remainder;
+
+
+ /***** (1) Initialise return stats *****/
+
+ psReturnStats->bValid = IMG_FALSE;
+ psReturnStats->ui64GpuStatActiveLow = 0;
+ psReturnStats->ui64GpuStatIdle = 0;
+ psReturnStats->ui64GpuStatActiveHigh = 0;
+ psReturnStats->ui64GpuStatBlocked = 0;
+ psReturnStats->ui64GpuStatCumulative = 0;
+
+ if (hGpuUtilUser == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psAggregateStats = hGpuUtilUser;
+
+
+ /* Try to acquire GPU utilisation counters and repeat if the FW is in the middle of an update */
+ for (ui32Attempts = 0; ui32Attempts < 4; ui32Attempts++)
+ {
+ IMG_UINT64 aui64TmpCounters[RGXFWIF_GPU_UTIL_STATE_NUM] = {0};
+ IMG_UINT64 ui64LastPeriod = 0, ui64LastWord = 0, ui64LastState = 0, ui64LastTime = 0;
+ IMG_UINT32 i = 0;
+
+
+ /***** (2) Get latest data from shared area *****/
+
+ OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+ /*
+ * First attempt at detecting if the FW is in the middle of an update.
+ * This should also help if the FW is in the middle of a 64 bit variable update.
+ */
+ while(((ui64LastWord != psUtilFWCb->ui64LastWord) ||
+ (aui64TmpCounters[ui64LastState] !=
+ psUtilFWCb->aui64StatsCounters[ui64LastState])) &&
+ (i < MAX_ITERATIONS))
+ {
+ ui64LastWord = psUtilFWCb->ui64LastWord;
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(ui64LastWord);
+ aui64TmpCounters[GPU_ACTIVE_LOW] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_LOW];
+ aui64TmpCounters[GPU_IDLE] = psUtilFWCb->aui64StatsCounters[GPU_IDLE];
+ aui64TmpCounters[GPU_ACTIVE_HIGH] = psUtilFWCb->aui64StatsCounters[GPU_ACTIVE_HIGH];
+ aui64TmpCounters[GPU_BLOCKED] = psUtilFWCb->aui64StatsCounters[GPU_BLOCKED];
+ i++;
+ }
+
+ OSLockRelease(psDevInfo->hGPUUtilLock);
+
+ if (i == MAX_ITERATIONS)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "RGXGetGpuUtilStats could not get reliable data after trying %u times", i));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+
+ /***** (3) Compute return stats *****/
+
+ /* Update temp counters to account for the time since the last update to the shared ones */
+ OSMemoryBarrier(); /* Ensure the current time is read after the loop above */
+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64());
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(ui64LastWord);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ aui64TmpCounters[ui64LastState] += ui64LastPeriod;
+
+ /* Get statistics for a user since its last request */
+ psReturnStats->ui64GpuStatActiveLow = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_LOW],
+ psAggregateStats->ui64GpuStatActiveLow);
+ psReturnStats->ui64GpuStatIdle = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_IDLE],
+ psAggregateStats->ui64GpuStatIdle);
+ psReturnStats->ui64GpuStatActiveHigh = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_ACTIVE_HIGH],
+ psAggregateStats->ui64GpuStatActiveHigh);
+ psReturnStats->ui64GpuStatBlocked = RGXFWIF_GPU_UTIL_GET_PERIOD(aui64TmpCounters[GPU_BLOCKED],
+ psAggregateStats->ui64GpuStatBlocked);
+ psReturnStats->ui64GpuStatCumulative = psReturnStats->ui64GpuStatActiveLow + psReturnStats->ui64GpuStatIdle +
+ psReturnStats->ui64GpuStatActiveHigh + psReturnStats->ui64GpuStatBlocked;
+
+ if (psAggregateStats->ui64TimeStamp != 0)
+ {
+ IMG_UINT64 ui64TimeSinceLastCall = ui64TimeNow - psAggregateStats->ui64TimeStamp;
+ /* We expect to return at least 75% of the time since the last call in GPU stats */
+ IMG_UINT64 ui64MinReturnedStats = ui64TimeSinceLastCall - (ui64TimeSinceLastCall / 4);
+
+ /*
+ * If the returned stats are substantially lower than the time since
+ * the last call, then the Host might have read a partial update from the FW.
+ * If this happens, try sampling the shared counters again.
+ */
+ if (psReturnStats->ui64GpuStatCumulative < ui64MinReturnedStats)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Return stats (%" IMG_UINT64_FMTSPEC ") too low "
+ "(call period %" IMG_UINT64_FMTSPEC ")",
+ __func__, psReturnStats->ui64GpuStatCumulative, ui64TimeSinceLastCall));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Attempt #%u has failed, trying again",
+ __func__, ui32Attempts));
+ continue;
+ }
+ }
+
+ break;
+ }
+
+
+ /***** (4) Update aggregate stats for the current user *****/
+
+ psAggregateStats->ui64GpuStatActiveLow += psReturnStats->ui64GpuStatActiveLow;
+ psAggregateStats->ui64GpuStatIdle += psReturnStats->ui64GpuStatIdle;
+ psAggregateStats->ui64GpuStatActiveHigh += psReturnStats->ui64GpuStatActiveHigh;
+ psAggregateStats->ui64GpuStatBlocked += psReturnStats->ui64GpuStatBlocked;
+ psAggregateStats->ui64TimeStamp = ui64TimeNow;
+
+
+ /***** (5) Convert return stats to microseconds *****/
+
+ psReturnStats->ui64GpuStatActiveLow = OSDivide64(psReturnStats->ui64GpuStatActiveLow, 1000, &ui32Remainder);
+ psReturnStats->ui64GpuStatIdle = OSDivide64(psReturnStats->ui64GpuStatIdle, 1000, &ui32Remainder);
+ psReturnStats->ui64GpuStatActiveHigh = OSDivide64(psReturnStats->ui64GpuStatActiveHigh, 1000, &ui32Remainder);
+ psReturnStats->ui64GpuStatBlocked = OSDivide64(psReturnStats->ui64GpuStatBlocked, 1000, &ui32Remainder);
+ psReturnStats->ui64GpuStatCumulative = OSDivide64(psReturnStats->ui64GpuStatCumulative, 1000, &ui32Remainder);
+
+ /* Check that the return stats make sense */
+ if (psReturnStats->ui64GpuStatCumulative == 0)
+ {
+ /* We can enter here only if all the RGXFWIF_GPU_UTIL_GET_PERIOD
+ * returned 0. This could happen if the GPU frequency value
+ * is not well calibrated and the FW is updating the GPU state
+ * while the Host is reading it.
+ * When such an event happens frequently, timers or the aggregate
+ * stats might not be accurate...
+ */
+ PVR_DPF((PVR_DBG_WARNING, "RGXGetGpuUtilStats could not get reliable data."));
+ return PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ psReturnStats->bValid = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser)
+{
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+ /* NoStats used since this may be called outside of the register/de-register
+ * process calls which track memory use. */
+ psAggregateStats = OSAllocMemNoStats(sizeof(RGXFWIF_GPU_UTIL_STATS));
+ if (psAggregateStats == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psAggregateStats->ui64GpuStatActiveLow = 0;
+ psAggregateStats->ui64GpuStatIdle = 0;
+ psAggregateStats->ui64GpuStatActiveHigh = 0;
+ psAggregateStats->ui64GpuStatBlocked = 0;
+ psAggregateStats->ui64TimeStamp = 0;
+
+ /* Not used */
+ psAggregateStats->bValid = IMG_FALSE;
+ psAggregateStats->ui64GpuStatCumulative = 0;
+
+ *phGpuUtilUser = psAggregateStats;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser)
+{
+ RGXFWIF_GPU_UTIL_STATS *psAggregateStats;
+
+ if (hGpuUtilUser == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psAggregateStats = hGpuUtilUser;
+ OSFreeMemNoStats(psAggregateStats);
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGX MISR Handler
+ */
+static void RGX_MISRHandler_Main (void *pvData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = pvData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Give the HWPerf service a chance to transfer some data from the FW
+ * buffer to the host driver transport layer buffer.
+ */
+ RGXHWPerfDataStoreCB(psDeviceNode);
+
+ /* Inform other services devices that we have finished an operation */
+ PVRSRVCheckStatus(psDeviceNode);
+
+#if defined(SUPPORT_PDVFS) && defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ /* Normally, firmware CCB only exists for the primary FW thread unless PDVFS
+ is running on the second[ary] FW thread, here we process said CCB */
+ RGXPDVFSCheckCoreClkRateChange(psDeviceNode->pvDevice);
+#endif
+
+ /* Process the Firmware CCB for pending commands */
+ RGXCheckFirmwareCCB(psDeviceNode->pvDevice);
+
+ /* Calibrate the GPU frequency and recorrelate Host and GPU timers (done every few seconds) */
+ RGXTimeCorrRestartPeriodic(psDeviceNode);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Process Workload Estimation Specific commands from the FW */
+ WorkEstCheckFirmwareCCB(psDeviceNode->pvDevice);
+#endif
+
+ if (psDevInfo->pvAPMISRData == NULL)
+ {
+ RGX_MISR_ProcessKCCBDeferredList(psDeviceNode);
+ }
+}
+#endif /* !defined(NO_HARDWARE) */
+
+
+/* This function puts into the firmware image some parameters for the initial boot */
+static PVRSRV_ERROR RGXBootldrDataInit(PVRSRV_DEVICE_NODE *psDeviceNode,
+ void *pvFWImage)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*) psDeviceNode->pvDevice;
+ IMG_UINT64 *pui64BootConfig;
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_BOOL bValid;
+
+ /* To get a pointer to the bootloader configuration data start from a pointer to the FW image... */
+ pui64BootConfig = (IMG_UINT64 *) pvFWImage;
+
+ /* ... jump to the boot/NMI data page... */
+ pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+
+ /* ... and then jump to the bootloader data offset within the page */
+ pui64BootConfig += RGXMIPSFW_GET_OFFSET_IN_QWORDS(RGXMIPSFW_BOOTLDR_CONF_OFFSET);
+
+#if defined(SUPPORT_ALT_REGBASE)
+ pui64BootConfig[RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET] = psDeviceNode->psDevConfig->sAltRegsCpuPBase.uiAddr;
+#else
+ /* Rogue Registers physical address */
+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ 1, &sPhyAddr, &(psDeviceNode->psDevConfig->sRegsCpuPBase));
+ pui64BootConfig[RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+#endif
+
+ /* MIPS Page Table physical address. There are 16 pages for a firmware heap of 32 MB */
+ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPhyAddr);
+ pui64BootConfig[RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+ /* MIPS Stack Pointer Physical Address */
+ eError = RGXGetPhyAddr(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+ &sPhyAddr,
+ RGXMIPSFW_STACK_OFFSET,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXBootldrDataInit: RGXGetPhyAddr failed (%u)",
+ eError));
+ return eError;
+ }
+ pui64BootConfig[RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET] = sPhyAddr.uiAddr;
+
+ /* Reserved for future use */
+ pui64BootConfig[RGXMIPSFW_RESERVED_FUTURE_OFFSET] = 0;
+
+ /* FW Init Data Structure Virtual Address */
+ pui64BootConfig[RGXMIPSFW_FWINIT_VIRTADDR_OFFSET] = psDevInfo->psRGXFWIfInitMemDesc->sDeviceMemDesc.sDevVAddr.uiAddr;
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR RGXPDumpBootldrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PMR *psFWDataPMR;
+ IMG_DEV_PHYADDR sTmpAddr;
+ IMG_UINT32 ui32BootConfOffset, ui32ParamOffset;
+ PVRSRV_ERROR eError;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+ ui32BootConfOffset = (RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE);
+ ui32BootConfOffset += RGXMIPSFW_BOOTLDR_CONF_OFFSET;
+
+ /* The physical addresses used by a pdump player will be different
+ * than the ones we have put in the MIPS bootloader configuration data.
+ * We have to tell the pdump player to replace the original values with the real ones.
+ */
+ PDUMPCOMMENT("Pass new boot parameters to the FW");
+
+ /* Rogue Registers physical address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_ROGUE_REGS_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ eError = PDumpRegLabelToMem64(RGX_PDUMPREG_NAME,
+ 0x0,
+ psFWDataPMR,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of Rogue registers phy address failed (%u)", eError));
+ return eError;
+ }
+
+ /* Page Table physical Address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_PAGE_TABLE_BASE_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sTmpAddr);
+
+ eError = PDumpPTBaseObjectToMem64(psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ psFWDataPMR,
+ 0,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS,
+ MMU_LEVEL_1,
+ sTmpAddr.uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of page tables phy address failed (%u)", eError));
+ return eError;
+ }
+
+ /* Stack physical address */
+ ui32ParamOffset = ui32BootConfOffset + (RGXMIPSFW_STACKPOINTER_PHYADDR_OFFSET * sizeof(IMG_UINT64));
+
+ eError = PDumpMemLabelToMem64(psFWDataPMR,
+ psFWDataPMR,
+ RGXMIPSFW_STACK_OFFSET,
+ ui32ParamOffset,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPDumpBootldrData: Dump of stack phy address failed (%u)", eError));
+ return eError;
+ }
+
+ return eError;
+}
+#endif /* PDUMP */
+
+
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_BOOL bEnableTrustedDeviceAceConfig)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ {
+ IMG_UINT32 ui32OS, ui32Region;
+
+ for (ui32OS = 0; ui32OS < GPUVIRT_VALIDATION_NUM_OS; ui32OS++)
+ {
+ for (ui32Region = 0; ui32Region < GPUVIRT_VALIDATION_NUM_REGIONS; ui32Region++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,"OS=%u, Region=%u, Min=%u, Max=%u", ui32OS, ui32Region, aui32OSidMin[ui32OS][ui32Region], aui32OSidMax[ui32OS][ui32Region]));
+ }
+ }
+
+ PopulateLMASubArenas(psDeviceNode, aui32OSidMin, aui32OSidMax);
+
+#if defined(EMULATOR)
+ if ((bEnableTrustedDeviceAceConfig) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE)))
+ {
+ SetTrustedDeviceAceEnabled();
+ }
+#else
+ {
+ PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+ }
+#endif
+ }
+#else
+ {
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(aui32OSidMin);
+ PVR_UNREFERENCED_PARAMETER(aui32OSidMax);
+ PVR_UNREFERENCED_PARAMETER(bEnableTrustedDeviceAceConfig);
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXSetPowerParams(PVRSRV_RGXDEV_INFO *psDevInfo,
+ PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+
+ /* Save information used on power transitions for later
+ * (when RGXStart and RGXStop are executed)
+ */
+ psDevInfo->sLayerParams.psDevInfo = psDevInfo;
+ psDevInfo->sLayerParams.psDevConfig = psDevConfig;
+#if defined(PDUMP)
+ psDevInfo->sLayerParams.ui32PdumpFlags = PDUMP_FLAGS_CONTINUOUS;
+#endif
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ IMG_DEV_PHYADDR sKernelMMUCtxPCAddr;
+
+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx,
+ &sKernelMMUCtxPCAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire Kernel MMU Ctx page catalog"));
+ return eError;
+ }
+
+ psDevInfo->sLayerParams.sPCAddr = sKernelMMUCtxPCAddr;
+ }else
+ {
+ PMR *psFWCodePMR = (PMR *)(psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR);
+ PMR *psFWDataPMR = (PMR *)(psDevInfo->psRGXFWDataMemDesc->psImport->hPMR);
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_BOOL bValid;
+
+#if defined(SUPPORT_ALT_REGBASE)
+ psDevInfo->sLayerParams.sGPURegAddr.uiAddr = psDevConfig->sAltRegsCpuPBase.uiAddr;
+#else
+ /* The physical address of the GPU registers needs to be translated
+ * in case we are in a LMA scenario
+ */
+ PhysHeapCpuPAddrToDevPAddr(psDevInfo->psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL],
+ 1,
+ &sPhyAddr,
+ &(psDevConfig->sRegsCpuPBase));
+
+ psDevInfo->sLayerParams.sGPURegAddr = sPhyAddr;
+#endif
+
+ eError = RGXGetPhyAddr(psFWCodePMR,
+ &sPhyAddr,
+ RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI code address"));
+ return eError;
+ }
+
+ psDevInfo->sLayerParams.sBootRemapAddr = sPhyAddr;
+
+ eError = RGXGetPhyAddr(psFWDataPMR,
+ &sPhyAddr,
+ RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW boot/NMI data address"));
+ return eError;
+ }
+
+ psDevInfo->sLayerParams.sDataRemapAddr = sPhyAddr;
+
+ eError = RGXGetPhyAddr(psFWCodePMR,
+ &sPhyAddr,
+ RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * RGXMIPSFW_PAGE_SIZE,
+ RGXMIPSFW_LOG2_PAGE_SIZE,
+ 1,
+ &bValid);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: Failed to acquire FW exceptions address"));
+ return eError;
+ }
+
+ psDevInfo->sLayerParams.sCodeRemapAddr = sPhyAddr;
+
+ psDevInfo->sLayerParams.sTrampolineRemapAddr.uiAddr = psDevInfo->psTrampoline->sPhysAddr.uiAddr;
+
+#if defined(SUPPORT_DEVICE_PA0_AS_VALID)
+ psDevInfo->sLayerParams.bDevicePA0IsValid = psDevConfig->bDevicePA0IsValid;
+#else
+#if defined(LMA) || defined(TC_MEMORY_CONFIG)
+ /*
+ * On LMA system, there is a high chance that address 0x0 is used by the GPU, e.g. TC.
+ * In that case we don't need to protect the spurious MIPS accesses to address 0x0,
+ * since that's a valid address to access.
+ * The TC is usually built with HYBRID memory, but even in UMA we do not need
+ * to apply the WA code on that system, so disable it to simplify.
+ */
+ psDevInfo->sLayerParams.bDevicePA0IsValid = IMG_TRUE;
+#else
+ psDevInfo->sLayerParams.bDevicePA0IsValid = IMG_FALSE;
+#endif
+#endif
+
+
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ /* Send information used on power transitions to the trusted device as
+ * in this setup the driver cannot start/stop the GPU and perform resets
+ */
+ if (psDevConfig->pfnTDSetPowerParams)
+ {
+ PVRSRV_TD_POWER_PARAMS sTDPowerParams;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ sTDPowerParams.sPCAddr = psDevInfo->sLayerParams.sPCAddr;
+ }else
+ {
+ sTDPowerParams.sGPURegAddr = psDevInfo->sLayerParams.sGPURegAddr;
+ sTDPowerParams.sBootRemapAddr = psDevInfo->sLayerParams.sBootRemapAddr;
+ sTDPowerParams.sCodeRemapAddr = psDevInfo->sLayerParams.sCodeRemapAddr;
+ sTDPowerParams.sDataRemapAddr = psDevInfo->sLayerParams.sDataRemapAddr;
+ }
+ eError = psDevConfig->pfnTDSetPowerParams(psDevConfig->hSysData,
+ &sTDPowerParams);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXSetPowerParams: TDSetPowerParams not implemented!"));
+ eError = PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXInitReleaseFWInitResourcesKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PMR *psFWCodePMR,
+ PMR *psFWDataPMR,
+ PMR *psFWCorePMR,
+ PMR *psHWPerfPMR)
+{
+ /* provide a stub interface for the direct bridge */
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psFWCodePMR);
+ PVR_UNREFERENCED_PARAMETER(psFWDataPMR);
+ PVR_UNREFERENCED_PARAMETER(psFWCorePMR);
+ PVR_UNREFERENCED_PARAMETER(psHWPerfPMR);
+
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitDevPart2KM
+ */
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSizeKB,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ RGX_ACTIVEPM_CONF eActivePMConf)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_DEV_POWER_STATE eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+#if defined(PDUMP)
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ RGXPDumpBootldrData(psDeviceNode, psDevInfo);
+ }
+#endif
+#if defined(TIMING) || defined(DEBUG)
+ OSUserModeAccessToPerfCountersEn();
+#endif
+
+ PDUMPCOMMENT("RGX Initialisation Part 2");
+
+ psDevInfo->ui32RegSize = psDevConfig->ui32RegsSize;
+ psDevInfo->sRegsPhysBase = psDevConfig->sRegsCpuPBase;
+
+ /* Initialise Device Flags */
+ psDevInfo->ui32DeviceFlags = 0;
+ RGXSetDeviceFlags(psDevInfo, ui32DeviceFlags, IMG_TRUE);
+
+ /* Allocate DVFS Table (needs to be allocated before SUPPORT_GPUTRACE_EVENTS
+ * is initialised because there is a dependency between them) */
+ psDevInfo->psGpuDVFSTable = OSAllocZMem(sizeof(*(psDevInfo->psGpuDVFSTable)));
+ if (psDevInfo->psGpuDVFSTable == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to allocate gpu dvfs table storage"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Initialise HWPerfHost buffer. */
+ if (RGXHWPerfHostInit(psDevInfo, ui32HWPerfHostBufSizeKB) == PVRSRV_OK)
+ {
+ if (psDevInfo->ui32HWPerfHostFilter == 0)
+ {
+ RGXHWPerfHostSetEventFilter(psDevInfo, ui32HWPerfHostFilter);
+ }
+
+ /* If HWPerf enabled allocate all resources for the host side buffer. */
+ if (psDevInfo->ui32HWPerfHostFilter != 0)
+ {
+ if (RGXHWPerfHostInitOnDemandResources(psDevInfo) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer on demand"
+ " initialisation failed."));
+ }
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING, "HWPerfHost buffer initialisation failed."));
+ }
+
+ /* Initialise lists of ZSBuffers */
+ eError = OSLockCreate(&psDevInfo->hLockZSBuffer,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sZSBufferHead);
+ psDevInfo->ui32ZSBufferCurrID = 1;
+
+ /* Initialise lists of growable Freelists */
+ eError = OSLockCreate(&psDevInfo->hLockFreeList,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sFreeListHead);
+ psDevInfo->ui32FreelistCurrID = 1;
+
+#if defined(SUPPORT_RAY_TRACING)
+ eError = OSLockCreate(&psDevInfo->hLockRPMFreeList,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ dllist_init(&psDevInfo->sRPMFreeListHead);
+ psDevInfo->ui32RPMFreelistCurrID = 1;
+ eError = OSLockCreate(&psDevInfo->hLockRPMContext,LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+#endif
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ eError = OSLockCreate(&psDevInfo->hDebugFaultInfoLock, LOCK_TYPE_PASSIVE);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = OSLockCreate(&psDevInfo->hMMUCtxUnregLock, LOCK_TYPE_PASSIVE);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+#endif
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ eError = OSLockCreate(&psDevInfo->hNMILock, LOCK_TYPE_DISPATCH);
+
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ /* Setup GPU utilisation stats update callback */
+#if !defined(NO_HARDWARE)
+ psDevInfo->pfnGetGpuUtilStats = RGXGetGpuUtilStats;
+#endif
+
+ eError = OSLockCreate(&psDevInfo->hGPUUtilLock, LOCK_TYPE_PASSIVE);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_ON;
+ psDevInfo->eActivePMConf = eActivePMConf;
+
+ /* set-up the Active Power Mgmt callback */
+#if !defined(NO_HARDWARE)
+ {
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+ IMG_BOOL bSysEnableAPM = psRGXData->psRGXTimingInfo->bEnableActivePM;
+ IMG_BOOL bEnableAPM = ((eActivePMConf == RGX_ACTIVEPM_DEFAULT) && bSysEnableAPM) ||
+ (eActivePMConf == RGX_ACTIVEPM_FORCE_ON);
+ /* Disable APM if in VZ mode */
+ bEnableAPM = bEnableAPM && PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE);
+
+ if (bEnableAPM)
+ {
+ eError = OSInstallMISR(&psDevInfo->pvAPMISRData, RGX_MISRHandler_CheckFWActivePowerState, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ /* Prevent the device being woken up before there is something to do. */
+ eDefaultPowerState = PVRSRV_DEV_POWER_STATE_OFF;
+ }
+ }
+#endif
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableAPM,
+ RGXQueryAPMState,
+ RGXSetAPMState,
+ psDeviceNode,
+ NULL);
+
+ RGXTimeCorrInitAppHintCallbacks(psDeviceNode);
+
+ /*
+ Register the device with the power manager.
+ Normal/Hyperv Drivers: Supports power management
+ Guest Drivers: Do not currently support power management
+ */
+ eError = PVRSRVRegisterPowerDevice(psDeviceNode,
+ &RGXPrePowerState, &RGXPostPowerState,
+ psDevConfig->pfnPrePowerState, psDevConfig->pfnPostPowerState,
+ &RGXPreClockSpeedChange, &RGXPostClockSpeedChange,
+ &RGXForcedIdleRequest, &RGXCancelForcedIdleRequest,
+ &RGXDustCountChange,
+ (IMG_HANDLE)psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ eDefaultPowerState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitDevPart2KM: failed to register device with power manager"));
+ return eError;
+ }
+
+ eError = RGXSetPowerParams(psDevInfo, psDevConfig);
+ if (eError != PVRSRV_OK) return eError;
+
+#if defined(PDUMP)
+ /* Run RGXStop with the correct PDump flags to feed the last-frame deinit buffer */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_DEINIT, "RGX deinitialisation commands");
+
+ psDevInfo->sLayerParams.ui32PdumpFlags |= PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW;
+
+ if (! PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ eError = RGXStop(&psDevInfo->sLayerParams);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+ psDevInfo->sLayerParams.ui32PdumpFlags &= ~(PDUMP_FLAGS_DEINIT | PDUMP_FLAGS_NOHW);
+#endif
+
+#if !defined(NO_HARDWARE)
+ eError = RGXInstallProcessQueuesMISR(&psDevInfo->hProcessQueuesMISR, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ return eError;
+ }
+
+ /* Register the interrupt handlers */
+ eError = OSInstallMISR(&psDevInfo->pvMISRData, RGX_MISRHandler_Main, psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ return eError;
+ }
+
+ eError = SysInstallDeviceLISR(psDevConfig->hSysData,
+ psDevConfig->ui32IRQ,
+ PVRSRV_MODNAME,
+ RGX_LISRHandler,
+ psDeviceNode,
+ &psDevInfo->pvLISRData);
+ if (eError != PVRSRV_OK)
+ {
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ (void) OSUninstallMISR(psDevInfo->pvMISRData);
+ return eError;
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer =
+ OSAddTimer((PFN_TIMER_FUNC)PDVFSRequestReactiveUpdate,
+ psDevInfo,
+ PDVFS_REACTIVE_INTERVAL_MS);
+
+ OSEnableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+#endif
+
+#if defined(PDUMP)
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_CACHE_HIERARCHY)))
+ {
+ if (!PVRSRVSystemSnoopingOfCPUCache(psDevConfig) &&
+ !PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has NO cache snooping");
+ }
+ else
+ {
+ if (PVRSRVSystemSnoopingOfCPUCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has CPU cache snooping");
+ }
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDevConfig))
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "System has DEVICE cache snooping");
+ }
+ }
+ }
+#endif
+
+ psDevInfo->bDevInit2Done = IMG_TRUE;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sKccbCmd;
+
+ /* Fill in the command structure with the parameters needed
+ */
+ sKccbCmd.eCmdType = RGXFWIF_KCCB_CMD_HWPERF_CONFIG_ENABLE_BLKS_DIRECT;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sKccbCmd,
+ sizeof(sKccbCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR RGXInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* set up fw memory contexts */
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ /* Register callbacks for creation of device memory contexts */
+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+ /* Create the memory context for the firmware. */
+ eError = DevmemCreateContext(psDeviceNode, DEVMEM_HEAPCFG_META,
+ &psDevInfo->psKernelDevmemCtx);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemCreateContext (%u)", eError));
+ goto failed_to_create_ctx;
+ }
+
+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_MAIN_HEAP_IDENT,
+ &psDevInfo->psFirmwareMainHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+ goto failed_to_find_heap;
+ }
+
+ eError = DevmemFindHeapByName(psDevInfo->psKernelDevmemCtx, RGX_FIRMWARE_CONFIG_HEAP_IDENT,
+ &psDevInfo->psFirmwareConfigHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitCreateFWKernelMemoryContext: Failed DevmemFindHeapByName (%u)", eError));
+ goto failed_to_find_heap;
+ }
+
+ /* Perform additional vz specific initialization */
+ eError = RGXVzInitCreateFWKernelMemoryContext(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXInitCreateFWKernelMemoryContext: Failed RGXVzInitCreateFWKernelMemoryContext (%u)",
+ eError));
+ goto failed_to_find_heap;
+ }
+
+ return eError;
+
+ failed_to_find_heap:
+ /*
+ * Clear the mem context create callbacks before destroying the RGX firmware
+ * context to avoid a spurious callback.
+ */
+ psDeviceNode->pfnRegisterMemoryContext = NULL;
+ psDeviceNode->pfnUnregisterMemoryContext = NULL;
+ DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+ psDevInfo->psKernelDevmemCtx = NULL;
+ failed_to_create_ctx:
+ return eError;
+}
+
+static void RGXDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ RGXVzDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+ /*
+ * Clear the mem context create callbacks before destroying the RGX firmware
+ * context to avoid a spurious callback.
+ */
+ psDeviceNode->pfnRegisterMemoryContext = NULL;
+ psDeviceNode->pfnUnregisterMemoryContext = NULL;
+
+ if (psDevInfo->psKernelDevmemCtx)
+ {
+ eError = DevmemDestroyContext(psDevInfo->psKernelDevmemCtx);
+ /* FIXME - this should return void */
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+}
+
+#if defined(RGXFW_ALIGNCHECKS)
+static PVRSRV_ERROR RGXAlignmentCheck(PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32AlignChecksSize,
+ IMG_UINT32 aui32AlignChecks[])
+{
+ static IMG_UINT32 aui32AlignChecksKM[] = {RGXFW_ALIGN_CHECKS_INIT_KM};
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+ IMG_UINT32 i, *paui32FWAlignChecks;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Skip the alignment check if the driver is guest
+ since there is no firmware to check against */
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, eError);
+
+ if (psDevInfo->psRGXFWAlignChecksMemDesc == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: FW Alignment Check"
+ " Mem Descriptor is NULL"));
+ return PVRSRV_ERROR_ALIGNMENT_ARRAY_NOT_AVAILABLE;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc,
+ (void **) &paui32FWAlignChecks);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVAlignmentCheckKM: Failed to acquire"
+ " kernel address for alignment checks (%u)", eError));
+ return eError;
+ }
+
+ paui32FWAlignChecks += ARRAY_SIZE(aui32AlignChecksKM) + 1;
+ if (*paui32FWAlignChecks++ != ui32AlignChecksSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Mismatch"
+ " in number of structures to check."));
+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+ goto return_;
+ }
+
+ for (i = 0; i < ui32AlignChecksSize; i++)
+ {
+ if (aui32AlignChecks[i] != paui32FWAlignChecks[i])
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVAlignmentCheckKM: Check for"
+ " structured alignment failed."));
+ eError = PVRSRV_ERROR_INVALID_ALIGNMENT;
+ goto return_;
+ }
+ }
+
+ return_:
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWAlignChecksMemDesc);
+
+ return eError;
+}
+#endif
+
+static
+PVRSRV_ERROR RGXAllocateFWCodeRegion(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T ui32FWCodeAllocSize,
+ IMG_UINT32 uiMemAllocFlags,
+ IMG_BOOL bFWCorememCode,
+ const IMG_PCHAR pszText,
+ DEVMEM_MEMDESC **ppsMemDescPtr)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_LOG2ALIGN_T uiLog2Align = OSGetPageShift();
+
+#if defined(SUPPORT_MIPS_CONTIGUOUS_FW_CODE)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ uiLog2Align = RGXMIPSFW_LOG2_PAGE_SIZE_64K;
+ }
+#endif
+
+#if !defined(SUPPORT_TRUSTED_DEVICE)
+ uiMemAllocFlags |= PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PVR_UNREFERENCED_PARAMETER(bFWCorememCode);
+
+ PDUMPCOMMENT("Allocate and export FW %s memory",
+ bFWCorememCode? "coremem code" : "code");
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ ui32FWCodeAllocSize,
+ 1 << uiLog2Align,
+ uiMemAllocFlags,
+ pszText,
+ ppsMemDescPtr);
+ return eError;
+#else
+ PDUMPCOMMENT("Import secure FW %s memory",
+ bFWCorememCode? "coremem code" : "code");
+
+ eError = DevmemImportTDFWCode(psDeviceNode,
+ ui32FWCodeAllocSize,
+ uiLog2Align,
+ uiMemAllocFlags,
+ bFWCorememCode,
+ ppsMemDescPtr);
+ return eError;
+#endif
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver
+
+ @Description
+
+ Validate the FW build options against KM driver build options (KM build options only)
+
+ Following check is redundant, because next check checks the same bits.
+ Redundancy occurs because if client-server are build-compatible and client-firmware are
+ build-compatible then server-firmware are build-compatible as well.
+
+ This check is left for clarity in error messages if any incompatibility occurs.
+
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(RGXFWIF_INIT *psRGXFWInit)
+{
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsFWKMPart, ui32BuildOptionsMismatch;
+
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+
+ ui32BuildOptionsFWKMPart = psRGXFWInit->sRGXCompChecks.ui32BuildOptions & RGX_BUILD_OPTIONS_MASK_KM;
+
+ if (ui32BuildOptions != ui32BuildOptionsFWKMPart)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32BuildOptionsFWKMPart;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+ ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware and KM driver build options; "
+ "extra options present in the KM driver: (0x%x). Please check rgx_options.h",
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ }
+
+ if ( (ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in Firmware-side and KM driver build options; "
+ "extra options present in Firmware: (0x%x). Please check rgx_options.h",
+ ui32BuildOptionsFWKMPart & ui32BuildOptionsMismatch ));
+ return PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ }
+ PVR_DPF((PVR_DBG_WARNING, "RGXDevInitCompatCheck: Firmware and KM driver build options differ."));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware and KM driver build options match. [ OK ]"));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK version against driver DDK version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32DDKVersion;
+ PVRSRV_ERROR eError;
+
+ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW DDK version");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32DDKVersion),
+ ui32DDKVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32DDKVersion != ui32DDKVersion)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible driver DDK version (%u.%u) / Firmware version (%u.%u).",
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_UNPACK_MAJ(psRGXFWInit->sRGXCompChecks.ui32DDKVersion),
+ PVRVERSION_UNPACK_MIN(psRGXFWInit->sRGXCompChecks.ui32DDKVersion)));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK version (%u.%u) and Firmware version (%u.%u) match. [ OK ]",
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_MAJ, PVRVERSION_MIN));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver
+
+ @Description
+
+ Validate FW DDK build against driver DDK build
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+ PVRSRV_ERROR eError=PVRSRV_OK;
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32DDKBuild;
+
+ ui32DDKBuild = PVRVERSION_BUILD;
+#endif
+
+#if defined(PDUMP) && defined(PVRSRV_STRICT_COMPAT_CHECK)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW DDK build");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32DDKBuild),
+ ui32DDKBuild,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32DDKBuild != ui32DDKBuild)
+ {
+ PVR_LOG(("(WARN) RGXDevInitCompatCheck: Different driver DDK build version (%d) / Firmware DDK build version (%d).",
+ ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: driver DDK build version (%d) and Firmware DDK build version (%d) match. [ OK ]",
+ ui32DDKBuild, psRGXFWInit->sRGXCompChecks.ui32DDKBuild));
+ }
+#endif
+ return eError;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_BVNC_FWAgainstDriver
+
+ @Description
+
+ Validate FW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_FWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)
+ IMG_UINT32 i;
+#endif
+#if !defined(NO_HARDWARE)
+ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ IMG_UINT32 ui32B, ui32V, ui32N, ui32C;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+ PVRSRV_ERROR eError;
+ IMG_CHAR szV[8];
+
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%u",ui32V);
+
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (struct version)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+ sBVNC.ui32LayoutVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (maxlen)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+ sBVNC.ui32VLenMax,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - lower 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+ (IMG_UINT32)sBVNC.ui64BNC,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (BNC part - Higher 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+ sizeof(IMG_UINT32),
+ (IMG_UINT32)(sBVNC.ui64BNC >> 32),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+
+ for (i = 0; i < sBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+ {
+ PDUMPCOMMENT("Compatibility check: KM driver and FW BVNC (V part)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sFWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+ i,
+ *((IMG_UINT32 *)(sBVNC.aszV + i)),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ }
+ }
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ RGX_BVNC_EQUAL(sBVNC, psRGXFWInit->sRGXCompChecks.sFWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%u) and firmware (%u).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%u) and firmware (%u).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psRGXFWInit->sRGXCompChecks.sFWBVNC.ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BNC (%u._.%u.%u) and Firmware BNC (%u._.%u.%u)",
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Mismatch in KM driver BVNC (%u.%s.%u.%u) and Firmware BVNC (%u.%s.%u.%u)",
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Firmware BVNC and KM driver BNVC match. [ OK ]"));
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_BVNC_HWAgainstDriver
+
+ @Description
+
+ Validate HW BVNC against driver BVNC
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+#if ((!defined(NO_HARDWARE))&&(!defined(EMULATOR)))
+#define TARGET_SILICON /* definition for everything that is not emu and not nohw configuration */
+#endif
+
+static PVRSRV_ERROR RGXDevInitCompatCheck_BVNC_HWAgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP) || defined(TARGET_SILICON)
+ IMG_UINT64 ui64MaskBNC = RGX_BVNC_PACK_MASK_B |
+ RGX_BVNC_PACK_MASK_N |
+ RGX_BVNC_PACK_MASK_C;
+
+ IMG_UINT32 bMaskV = IMG_FALSE;
+
+ PVRSRV_ERROR eError;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sSWBVNC);
+#endif
+
+#if defined(TARGET_SILICON)
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sHWBVNC);
+ IMG_BOOL bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV;
+#endif
+
+#if defined(PDUMP) || defined(TARGET_SILICON)
+ IMG_UINT32 ui32B, ui32V, ui32N, ui32C;
+ IMG_CHAR szV[8];
+
+ /*if(RGX_DEVICE_HAS_BRN(psDevInfo, 38835))
+ {
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_B;
+ bMaskV = IMG_TRUE;
+ }*/
+#if defined(COMPAT_BVNC_MASK_N)
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_N;
+#endif
+#if defined(COMPAT_BVNC_MASK_C)
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+#endif
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+ rgx_bvnc_packed(&sSWBVNC.ui64BNC, sSWBVNC.aszV, sSWBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 38344) && (ui32C >= 10))
+ {
+ ui64MaskBNC &= ~RGX_BVNC_PACK_MASK_C;
+ }
+
+ if ((ui64MaskBNC != (RGX_BVNC_PACK_MASK_B | RGX_BVNC_PACK_MASK_N | RGX_BVNC_PACK_MASK_C)) || bMaskV)
+ {
+ PVR_LOG(("Compatibility checks: Ignoring fields: '%s%s%s%s' of HW BVNC.",
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_B))?("B"):("")),
+ ((bMaskV)?("V"):("")),
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_N))?("N"):("")),
+ ((!(ui64MaskBNC & RGX_BVNC_PACK_MASK_C))?("C"):(""))));
+ }
+#endif
+
+#if defined(EMULATOR)
+ PVR_LOG(("Compatibility checks for emu target: Ignoring HW BVNC checks."));
+#endif
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Compatibility check: Layout version of compchecks struct");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32LayoutVersion),
+ sSWBVNC.ui32LayoutVersion,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPCOMMENT("Compatibility check: HW V max len and FW V max len");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui32VLenMax),
+ sSWBVNC.ui32VLenMax,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ if (ui64MaskBNC != 0)
+ {
+ PDUMPIF("DISABLE_HWBNC_CHECK");
+ PDUMPELSE("DISABLE_HWBNC_CHECK");
+ PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Lower 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC),
+ (IMG_UINT32)sSWBVNC.ui64BNC ,
+ (IMG_UINT32)ui64MaskBNC,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPCOMMENT("Compatibility check: HW BNC and FW BNC (Higher 32 bits)");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, ui64BNC) +
+ sizeof(IMG_UINT32),
+ (IMG_UINT32)(sSWBVNC.ui64BNC >> 32),
+ (IMG_UINT32)(ui64MaskBNC >> 32),
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+
+ PDUMPFI("DISABLE_HWBNC_CHECK");
+ }
+ if (!bMaskV)
+ {
+ IMG_UINT32 i;
+ PDUMPIF("DISABLE_HWV_CHECK");
+ PDUMPELSE("DISABLE_HWV_CHECK");
+ for (i = 0; i < sSWBVNC.ui32VLenMax; i += sizeof(IMG_UINT32))
+ {
+ PDUMPCOMMENT("Compatibility check: HW V and FW V");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, sHWBVNC) +
+ offsetof(RGXFWIF_COMPCHECKS_BVNC, aszV) +
+ i,
+ *((IMG_UINT32 *)(sSWBVNC.aszV + i)),
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+ }
+ PDUMPFI("DISABLE_HWV_CHECK");
+ }
+#endif
+
+#if defined(TARGET_SILICON)
+ if (psRGXFWInit == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ sHWBVNC = psRGXFWInit->sRGXCompChecks.sHWBVNC;
+
+ sHWBVNC.ui64BNC &= ui64MaskBNC;
+ sSWBVNC.ui64BNC &= ui64MaskBNC;
+
+ if (bMaskV)
+ {
+ sHWBVNC.aszV[0] = '\0';
+ sSWBVNC.aszV[0] = '\0';
+ }
+
+ RGX_BVNC_EQUAL(sSWBVNC, sHWBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 42480))
+ {
+ if (!bCompatibleAll && bCompatibleVersion)
+ {
+ if ((RGX_BVNC_PACKED_EXTR_B(sSWBVNC) == 1) &&
+ !(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sSWBVNC),"76")) &&
+ (RGX_BVNC_PACKED_EXTR_N(sSWBVNC) == 4) &&
+ (RGX_BVNC_PACKED_EXTR_C(sSWBVNC) == 6))
+ {
+ if ((RGX_BVNC_PACKED_EXTR_B(sHWBVNC) == 1) &&
+ !(OSStringCompare(RGX_BVNC_PACKED_EXTR_V(sHWBVNC),"69")) &&
+ (RGX_BVNC_PACKED_EXTR_N(sHWBVNC) == 4) &&
+ (RGX_BVNC_PACKED_EXTR_C(sHWBVNC) == 4))
+ {
+ bCompatibleBNC = IMG_TRUE;
+ bCompatibleLenMax = IMG_TRUE;
+ bCompatibleV = IMG_TRUE;
+ bCompatibleAll = IMG_TRUE;
+ }
+ }
+ }
+ }
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of HW (%d) and FW (%d).",
+ __FUNCTION__,
+ sHWBVNC.ui32LayoutVersion,
+ sSWBVNC.ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of HW (%d) and FW (%d).",
+ __FUNCTION__,
+ sHWBVNC.ui32VLenMax,
+ sSWBVNC.ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BNC (%d._.%d.%d) and FW BNC (%d._.%d.%d).",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) RGXDevInitCompatCheck: Incompatible HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d).",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ return eError;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: HW BVNC (%d.%s.%d.%d) and FW BVNC (%d.%s.%d.%d) match. [ OK ]",
+ RGX_BVNC_PACKED_EXTR_B(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sHWBVNC),
+ RGX_BVNC_PACKED_EXTR_B(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sSWBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sSWBVNC)));
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_METACoreVersion_AgainstDriver
+
+ @Description
+
+ Validate HW META version against driver META version
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+#if defined(PDUMP)||(!defined(NO_HARDWARE))
+ PVRSRV_ERROR eError;
+#endif
+ IMG_UINT32 ui32FWCoreIDValue = 0;
+ IMG_CHAR *pcRGXFW_PROCESSOR = NULL;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ ui32FWCoreIDValue = RGXMIPSFW_CORE_ID_VALUE;
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+ }
+ else if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ switch(RGX_GET_FEATURE_VALUE(psDevInfo, META))
+ {
+ case MTP218: ui32FWCoreIDValue = RGX_CR_META_MTP218_CORE_ID_VALUE; break;
+ case MTP219: ui32FWCoreIDValue = RGX_CR_META_MTP219_CORE_ID_VALUE; break;
+ case LTP218: ui32FWCoreIDValue = RGX_CR_META_LTP218_CORE_ID_VALUE; break;
+ case LTP217: ui32FWCoreIDValue = RGX_CR_META_LTP217_CORE_ID_VALUE; break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+ PVR_ASSERT(0);
+ }
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Undefined FW_CORE_ID_VALUE", __func__));
+ PVR_ASSERT(0);
+ }
+
+#if defined(PDUMP)
+ PDUMPIF("DISABLE_HWMETA_CHECK");
+ PDUMPELSE("DISABLE_HWMETA_CHECK");
+ PDUMPCOMMENT("Compatibility check: KM driver and HW FW Processor version");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, sRGXCompChecks) +
+ offsetof(RGXFWIF_COMPCHECKS, ui32FWProcessorVersion),
+ ui32FWCoreIDValue,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDevInitCompatCheck: problem pdumping POL for psRGXFWIfInitMemDesc (%d)", eError));
+ return eError;
+ }
+ PDUMPFI("DISABLE_HWMETA_CHECK");
+#endif
+
+#if !defined(NO_HARDWARE)
+ if (psRGXFWInit == NULL)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion != ui32FWCoreIDValue)
+ {
+ PVR_LOG(("RGXDevInitCompatCheck: Incompatible driver %s version (%d) / HW %s version (%d).",
+ pcRGXFW_PROCESSOR,
+ ui32FWCoreIDValue,
+ pcRGXFW_PROCESSOR,
+ psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+ eError = PVRSRV_ERROR_FWPROCESSOR_MISMATCH;
+ PVR_DBG_BREAK;
+ return eError;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXDevInitCompatCheck: Compatible driver %s version (%d) / HW %s version (%d) [OK].",
+ pcRGXFW_PROCESSOR,
+ ui32FWCoreIDValue,
+ pcRGXFW_PROCESSOR,
+ psRGXFWInit->sRGXCompChecks.ui32FWProcessorVersion));
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXDevInitCompatCheck_StoreBVNCInUMSharedMem
+
+ @Description
+
+Store BVNC of the core being handled in memory shared with UM for compatibility
+check performed by the UM part of the driver.
+
+ @Input psDevInfo - device info
+ @Input psRGXFWInit - FW init data
+
+ @Return PVRSRV_ERROR - PVRSRV_OK on success or appropriate error code
+
+******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck_StoreBVNCInUMSharedMem(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_INIT *psRGXFWInit)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 *pui32InfoPage = PVRSRVGetPVRSRVData()->pui32InfoPage;
+ PVR_ASSERT(pui32InfoPage);
+
+#if !defined(NO_HARDWARE)
+ PVR_UNREFERENCED_PARAMETER(psDevInfo);
+ PVR_ASSERT(psRGXFWInit);
+
+ pui32InfoPage[CORE_ID_BRANCH] = RGX_BVNC_PACKED_EXTR_B(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+ eError = OSStringToUINT32(RGX_BVNC_PACKED_EXTR_V(psRGXFWInit->sRGXCompChecks.sFWBVNC), 10, &pui32InfoPage[CORE_ID_VERSION]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to obtain core version (%u)",
+ __FUNCTION__, eError));
+ }
+ pui32InfoPage[CORE_ID_NUMBER_OF_SCALABLE_UNITS] = RGX_BVNC_PACKED_EXTR_N(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+ pui32InfoPage[CORE_ID_CONFIG] = RGX_BVNC_PACKED_EXTR_C(psRGXFWInit->sRGXCompChecks.sFWBVNC);
+#else
+ PVR_UNREFERENCED_PARAMETER(psRGXFWInit);
+ PVR_ASSERT(psDevInfo);
+
+ pui32InfoPage[CORE_ID_BRANCH] = psDevInfo->sDevFeatureCfg.ui32B;
+ pui32InfoPage[CORE_ID_VERSION] = psDevInfo->sDevFeatureCfg.ui32V;
+ pui32InfoPage[CORE_ID_NUMBER_OF_SCALABLE_UNITS] = psDevInfo->sDevFeatureCfg.ui32N;
+ pui32InfoPage[CORE_ID_CONFIG] = psDevInfo->sDevFeatureCfg.ui32C;
+#endif /* !defined(NO_HARDWARE) */
+
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevInitCompatCheck
+
+ @Description
+
+ Check compatibility of host driver and firmware (DDK and build options)
+ for RGX devices at services/device initialisation
+
+ @Input psDeviceNode - device node
+
+ @Return PVRSRV_ERROR - depending on mismatch found
+
+ ******************************************************************************/
+static PVRSRV_ERROR RGXDevInitCompatCheck(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_INIT *psRGXFWInit = NULL;
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32RegValue;
+
+ /* Retrieve the FW information */
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to acquire kernel fw compatibility check info (%u)",
+ __FUNCTION__, eError));
+ return eError;
+ }
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ if (*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ /* No need to wait if the FW has already updated the values */
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ ui32RegValue = 0;
+
+ if ((!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST)) &&
+ RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META))
+ {
+ eError = RGXReadMETAAddr(psDevInfo, META_CR_T0ENABLE_OFFSET, &ui32RegValue);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Reading RGX META register failed. Is the GPU correctly powered up? (%u)",
+ __FUNCTION__, eError));
+ goto chk_exit;
+ }
+
+ if (!(ui32RegValue & META_CR_TXENABLE_ENABLE_BIT))
+ {
+ eError = PVRSRV_ERROR_META_THREAD0_NOT_ENABLED;
+ PVR_DPF((PVR_DBG_ERROR,"%s: RGX META is not running. Is the GPU correctly powered up? %d (%u)",
+ __FUNCTION__, psRGXFWInit->sRGXCompChecks.bUpdated, eError));
+ goto chk_exit;
+ }
+ }
+
+ if (!*((volatile IMG_BOOL *)&psRGXFWInit->sRGXCompChecks.bUpdated))
+ {
+ eError = PVRSRV_ERROR_TIMEOUT;
+ PVR_DPF((PVR_DBG_ERROR,"%s: Missing compatibility info from FW (%u)",
+ __FUNCTION__, eError));
+ goto chk_exit;
+ }
+#endif /* defined(NO_HARDWARE) */
+
+ eError = RGXDevInitCompatCheck_KMBuildOptions_FWAgainstDriver(psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_DDKVersion_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_DDKBuild_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ eError = RGXDevInitCompatCheck_BVNC_FWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_BVNC_HWAgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+ }
+ eError = RGXDevInitCompatCheck_FWProcessorVersion_AgainstDriver(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ goto chk_exit;
+ }
+
+ eError = RGXDevInitCompatCheck_StoreBVNCInUMSharedMem(psDevInfo, psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to store compatibility info for UM consumption (%u)",
+ __FUNCTION__, eError));
+ goto chk_exit;
+ }
+
+ eError = PVRSRV_OK;
+ chk_exit:
+#if !defined(NO_HARDWARE)
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+#endif
+ return eError;
+}
+
+/**************************************************************************/ /*!
+@Function RGXSoftReset
+@Description Resets some modules of the RGX device
+@Input psDeviceNode Device node
+@Input ui64ResetValue1 A mask for which each bit set corresponds
+ to a module to reset (via the SOFT_RESET
+ register).
+@Input ui64ResetValue2 A mask for which each bit set corresponds
+ to a module to reset (via the SOFT_RESET2
+ register).
+@Return PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXSoftReset(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT64 ui64ResetValue1,
+ IMG_UINT64 ui64ResetValue2)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_BOOL bSoftReset = IMG_FALSE;
+ IMG_UINT64 ui64SoftResetMask = 0;
+
+ PVR_ASSERT(psDeviceNode != NULL);
+ PVR_ASSERT(psDeviceNode->pvDevice != NULL);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ /* the device info */
+ psDevInfo = psDeviceNode->pvDevice;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE))
+ {
+ ui64SoftResetMask = RGX_CR_SOFT_RESET__PBE2_XE__MASKFULL;
+ }else
+ {
+ ui64SoftResetMask = RGX_CR_SOFT_RESET_MASKFULL;
+ }
+
+ if ((RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE)) && \
+ ((ui64ResetValue2 & RGX_CR_SOFT_RESET2_MASKFULL) != ui64ResetValue2))
+ {
+ bSoftReset = IMG_TRUE;
+ }
+
+ if (((ui64ResetValue1 & ui64SoftResetMask) != ui64ResetValue1) || bSoftReset)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Set in soft-reset */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, ui64ResetValue1);
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, ui64ResetValue2);
+ }
+
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+ }
+
+ /* Take the modules out of reset... */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET, 0);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2, 0);
+ }
+
+ /* ...and fence again */
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ (void) OSReadHWReg64(psDevInfo->pvRegsBaseKM, RGX_CR_SOFT_RESET2);
+ }
+
+ return PVRSRV_OK;
+}
+
+/*!
+ ******************************************************************************
+
+ @Function RGXDebugRequestNotify
+
+ @Description Dump the debug data for RGX
+
+ ******************************************************************************/
+static void RGXDebugRequestNotify(PVRSRV_DBGREQ_HANDLE hDbgReqestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = hDbgReqestHandle;
+
+ /* Only action the request if we've fully init'ed */
+ if (psDevInfo->bDevInit2Done)
+ {
+ RGXDebugRequestProcess(pfnDumpDebugPrintf, pvDumpDebugFile, psDevInfo, ui32VerbLevel);
+ }
+}
+
+static const RGX_MIPS_ADDRESS_TRAMPOLINE sNullTrampoline;
+
+static void RGXFreeTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+ psDevInfo->psTrampoline->hPdumpPages,
+#endif
+ &psDevInfo->psTrampoline->sPages);
+
+ if (psDevInfo->psTrampoline != &sNullTrampoline)
+ {
+ OSFreeMem(psDevInfo->psTrampoline);
+ }
+ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+}
+
+#define RANGES_OVERLAP(x,y,size) (x < (y+size) && y < (x+size))
+#define TRAMPOLINE_ALLOC_MAX_RETIRES (3)
+
+static PVRSRV_ERROR RGXAllocTrampoline(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ IMG_INT32 i, j;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_MIPS_ADDRESS_TRAMPOLINE *pasTrampoline[TRAMPOLINE_ALLOC_MAX_RETIRES];
+
+ PDUMPCOMMENT("Allocate pages for trampoline");
+
+ /* Retry the allocation of the trampoline block (16KB), retaining any
+ * previous allocations overlapping with the target range until we get an
+ * allocation that doesn't overlap with the target range.
+ * Any allocation like this will require a maximum of 3 tries as we are
+ * allocating a physical contiguous block of memory, not individual pages.
+ * Free the unused allocations at the end only after the desired range
+ * is obtained to prevent the alloc function from returning the same bad
+ * range repeatedly.
+ */
+ for (i = 0; i < TRAMPOLINE_ALLOC_MAX_RETIRES; i++)
+ {
+ pasTrampoline[i] = OSAllocMem(sizeof(RGX_MIPS_ADDRESS_TRAMPOLINE));
+ eError = DevPhysMemAlloc(psDeviceNode,
+ RGXMIPSFW_TRAMPOLINE_SIZE,
+ RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE,
+ 0, // (init) u8Value
+ IMG_FALSE, // bInitPage,
+#if defined(PDUMP)
+ psDeviceNode->psFirmwareMMUDevAttrs->pszMMUPxPDumpMemSpaceName,
+ "TrampolineRegion",
+ &pasTrampoline[i]->hPdumpPages,
+#endif
+ &pasTrampoline[i]->sPages,
+ &pasTrampoline[i]->sPhysAddr);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s failed (%u)",
+ __func__, eError));
+ goto fail;
+ }
+
+ if (!RANGES_OVERLAP(pasTrampoline[i]->sPhysAddr.uiAddr,
+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+ RGXMIPSFW_TRAMPOLINE_SIZE))
+ {
+ break;
+ }
+ }
+ if (TRAMPOLINE_ALLOC_MAX_RETIRES == i)
+ {
+ /* Failed to find a physical allocation after 3 attempts */
+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s failed to allocate non-overlapping pages (%u)",
+ __func__, eError));
+ /* Fall through, clean up and return error. */
+ }
+ else
+ {
+ /* Remember the last physical block allocated, it will not be freed */
+ psDevInfo->psTrampoline = pasTrampoline[i];
+ }
+
+fail:
+ /* free all unused allocations */
+ for (j = 0; j < i; j++)
+ {
+ DevPhysMemFree(psDeviceNode,
+#if defined(PDUMP)
+ pasTrampoline[j]->hPdumpPages,
+#endif
+ &pasTrampoline[j]->sPages);
+ OSFreeMem(pasTrampoline[j]);
+ }
+
+ return eError;
+}
+
+#undef RANGES_OVERLAP
+
+
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T uiFWCodeLen,
+ IMG_DEVMEM_SIZE_T uiFWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCorememLen,
+ PMR **ppsFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ PMR **ppsFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ PMR **ppsFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase)
+{
+ DEVMEM_FLAGS_T uiMemAllocFlags;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+
+ eError = RGXInitCreateFWKernelMemoryContext(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitAllocFWImgMemKM: Failed RGXInitCreateFWKernelMemoryContext (%u)", eError));
+ goto failFWMemoryContextAlloc;
+ }
+
+ /*
+ * Set up Allocation for FW code section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+
+ eError = RGXAllocateFWCodeRegion(psDeviceNode,
+ uiFWCodeLen,
+ uiMemAllocFlags,
+ IMG_FALSE,
+ "FwExCodeRegion",
+ &psDevInfo->psRGXFWCodeMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw code mem (%u)",
+ eError));
+ goto failFWCodeMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCodeMemDesc, (void**) ppsFWCodePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWCodeMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc,
+ &psDevInfo->sFWCodeDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw code mem (%u)",
+ eError));
+ goto failFWCodeMemDescAqDevVirt;
+ }
+ *psFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+
+ if (!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS) || (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))))
+ {
+ /*
+ * The FW code must be the first allocation in the firmware heap, otherwise
+ * the bootloader will not work (META will not be able to find the bootloader).
+ */
+ PVR_ASSERT(psFWCodeDevVAddrBase->uiAddr == RGX_FIRMWARE_RAW_HEAP_BASE);
+ }
+
+ /*
+ * Set up Allocation for FW data section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ PDUMPCOMMENT("Allocate and export data memory for fw");
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ uiFWDataLen,
+ OSGetPageSize(),
+ uiMemAllocFlags,
+ "FwExDataRegion",
+ &psDevInfo->psRGXFWDataMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw data mem (%u)",
+ eError));
+ goto failFWDataMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWDataMemDesc, (void **) ppsFWDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWDataMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWDataMemDesc,
+ &psDevInfo->sFWDataDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw data mem (%u)",
+ eError));
+ goto failFWDataMemDescAqDevVirt;
+ }
+ *psFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ eError = RGXAllocTrampoline(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "Failed to allocate trampoline region (%u)",
+ eError));
+ goto failTrampolineMemDescAlloc;
+ }
+ }
+
+ if (uiFWCorememLen != 0)
+ {
+ /*
+ * Set up Allocation for FW coremem section
+ */
+ uiMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC;
+
+ eError = RGXAllocateFWCodeRegion(psDeviceNode,
+ uiFWCorememLen,
+ uiMemAllocFlags,
+ IMG_TRUE,
+ "FwExCorememRegion",
+ &psDevInfo->psRGXFWCorememMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to allocate fw coremem mem, size: %" IMG_INT64_FMTSPECd ", flags: %x (%u)",
+ uiFWCorememLen, uiMemAllocFlags, eError));
+ goto failFWCorememMemDescAlloc;
+ }
+
+ eError = DevmemLocalGetImportHandle(psDevInfo->psRGXFWCorememMemDesc, (void**) ppsFWCorememPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevmemLocalGetImportHandle failed (%u)", eError));
+ goto failFWCorememMemDescAqDevVirt;
+ }
+
+ eError = DevmemAcquireDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc,
+ &psDevInfo->sFWCorememCodeDevVAddrBase);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Failed to acquire devVAddr for fw coremem mem (%u)",
+ eError));
+ goto failFWCorememMemDescAqDevVirt;
+ }
+
+ RGXSetFirmwareAddress(&psDevInfo->sFWCorememCodeFWAddr,
+ psDevInfo->psRGXFWCorememMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ *ppsFWCorememPMR = NULL;
+ psDevInfo->sFWCorememCodeDevVAddrBase.uiAddr = 0;
+ psDevInfo->sFWCorememCodeFWAddr.ui32Addr = 0;
+ }
+
+ *psFWCorememDevVAddrBase = psDevInfo->sFWCorememCodeDevVAddrBase;
+ *psFWCorememMetaVAddrBase = psDevInfo->sFWCorememCodeFWAddr;
+
+ return PVRSRV_OK;
+
+ failFWCorememMemDescAqDevVirt:
+ if (uiFWCorememLen != 0)
+ {
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+ psDevInfo->psRGXFWCorememMemDesc = NULL;
+ }
+ failFWCorememMemDescAlloc:
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ RGXFreeTrampoline(psDeviceNode);
+ }
+ failTrampolineMemDescAlloc:
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+ failFWDataMemDescAqDevVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+ psDevInfo->psRGXFWDataMemDesc = NULL;
+failFWDataMemDescAlloc:
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+ failFWCodeMemDescAqDevVirt:
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+ psDevInfo->psRGXFWCodeMemDesc = NULL;
+ failFWCodeMemDescAlloc:
+ failFWMemoryContextAlloc:
+ return eError;
+}
+
+/*
+ AppHint parameter interface
+ */
+static
+PVRSRV_ERROR RGXFWTraceQueryFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_ERROR eResult;
+
+ eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+ *pui32Value &= RGXFWIF_LOG_TYPE_GROUP_MASK;
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceQueryLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_ERROR eResult;
+
+ eResult = PVRSRVRGXDebugMiscQueryFWLogKM(NULL, psDeviceNode, pui32Value);
+ if (PVRSRV_OK == eResult)
+ {
+ if (*pui32Value & RGXFWIF_LOG_TYPE_TRACE)
+ {
+ *pui32Value = 2; /* Trace */
+ }
+ else if (*pui32Value & RGXFWIF_LOG_TYPE_GROUP_MASK)
+ {
+ *pui32Value = 1; /* TBI */
+ }
+ else
+ {
+ *pui32Value = 0; /* None */
+ }
+ }
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetFilter(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eResult;
+ IMG_UINT32 ui32RGXFWLogType;
+
+ eResult = RGXFWTraceQueryLogType(psDeviceNode, NULL, &ui32RGXFWLogType);
+ if (PVRSRV_OK == eResult)
+ {
+ if (ui32Value && 1 != ui32RGXFWLogType)
+ {
+ ui32Value |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+ eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32Value);
+ }
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXFWTraceSetLogType(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eResult;
+ IMG_UINT32 ui32RGXFWLogType = ui32Value;
+
+ /* 0 - none, 1 - tbi, 2 - trace */
+ if (ui32Value)
+ {
+ eResult = RGXFWTraceQueryFilter(psDeviceNode, NULL, &ui32RGXFWLogType);
+ if (PVRSRV_OK != eResult)
+ {
+ return eResult;
+ }
+ if (!ui32RGXFWLogType)
+ {
+ ui32RGXFWLogType = RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ if (2 == ui32Value)
+ {
+ ui32RGXFWLogType |= RGXFWIF_LOG_TYPE_TRACE;
+ }
+ }
+
+ eResult = PVRSRVRGXDebugMiscSetFWLogKM(NULL, psDeviceNode, ui32RGXFWLogType);
+ return eResult;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_BOOL *pbValue)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+ *pbValue = psDevInfo->bEnableFWPoisonOnFree;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFree(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_BOOL bValue)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+
+ psDevInfo->bEnableFWPoisonOnFree = bValue;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXQueryFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ *pui32Value = psDevInfo->ubFWPoisonOnFreeValue;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR RGXSetFWPoisonOnFreeValue(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *) psDeviceNode->pvDevice;
+ psDevInfo->ubFWPoisonOnFreeValue = (IMG_BYTE) ui32Value;
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVRGXInitFirmwareKM
+ */
+PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ RGXFWIF_COMPCHECKS_BVNC *psFirmwareBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ FW_PERF_CONF eFirmwarePerf,
+ IMG_UINT32 ui32ConfigFlagsExt)
+{
+ PVRSRV_ERROR eError;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+ IMG_BOOL bCompatibleAll=IMG_TRUE, bCompatibleVersion=IMG_TRUE, bCompatibleLenMax=IMG_TRUE, bCompatibleBNC=IMG_TRUE, bCompatibleV=IMG_TRUE;
+ IMG_UINT32 ui32NumBIFTilingConfigs, *pui32BIFTilingXStrides, i, ui32B, ui32V, ui32N, ui32C;
+ RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+ IMG_CHAR szV[8];
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",psDevInfo->sDevFeatureCfg.ui32V);
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, szV, psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+ /* Check if BVNC numbers of firmware and driver are compatible */
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ bCompatibleAll = IMG_TRUE;
+ }
+ else
+ {
+ RGX_BVNC_EQUAL(sBVNC, *psFirmwareBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+ }
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psFirmwareBVNC->ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and firmware (%d).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psFirmwareBVNC->ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / firmware BNC (%d._.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / firmware BVNC (%d.%s.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and firmware BVNC (%d.%s.%d.%d) match. [ OK ]",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psFirmwareBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psFirmwareBVNC)));
+ }
+
+ ui32B = psDevInfo->sDevFeatureCfg.ui32B;
+ ui32V = psDevInfo->sDevFeatureCfg.ui32V;
+ ui32N = psDevInfo->sDevFeatureCfg.ui32N;
+ ui32C = psDevInfo->sDevFeatureCfg.ui32C;
+
+ OSSNPrintf(szV, sizeof(szV),"%d",ui32V);
+
+ /* Check if BVNC numbers of client and driver are compatible */
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, ui32B, szV, ui32N, ui32C);
+
+ RGX_BVNC_EQUAL(sBVNC, *psClientBVNC, bCompatibleAll, bCompatibleVersion, bCompatibleLenMax, bCompatibleBNC, bCompatibleV);
+
+ if (!bCompatibleAll)
+ {
+ if (!bCompatibleVersion)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible compatibility struct version of driver (%d) and client (%d).",
+ __FUNCTION__,
+ sBVNC.ui32LayoutVersion,
+ psClientBVNC->ui32LayoutVersion));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleLenMax)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible V maxlen of driver (%d) and client (%d).",
+ __FUNCTION__,
+ sBVNC.ui32VLenMax,
+ psClientBVNC->ui32VLenMax));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleBNC)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BNC (%d._.%d.%d) / client BNC (%d._.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+
+ if (!bCompatibleV)
+ {
+ PVR_LOG(("(FAIL) %s: Incompatible driver BVNC (%d.%s.%d.%d) / client BVNC (%d.%s.%d.%d).",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ eError = PVRSRV_ERROR_BVNC_MISMATCH;
+ PVR_DBG_BREAK;
+ goto failed_to_pass_compatibility_check;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver BVNC (%d.%s.%d.%d) and client BVNC (%d.%s.%d.%d) match. [ OK ]",
+ __FUNCTION__,
+ RGX_BVNC_PACKED_EXTR_B(sBVNC),
+ RGX_BVNC_PACKED_EXTR_V(sBVNC),
+ RGX_BVNC_PACKED_EXTR_N(sBVNC),
+ RGX_BVNC_PACKED_EXTR_C(sBVNC),
+ RGX_BVNC_PACKED_EXTR_B(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_V(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_N(*psClientBVNC),
+ RGX_BVNC_PACKED_EXTR_C(*psClientBVNC)));
+ }
+
+ PVRSRVSystemBIFTilingGetConfig(psDeviceNode->psDevConfig,
+ &eBIFTilingMode,
+ &ui32NumBIFTilingConfigs);
+ pui32BIFTilingXStrides = OSAllocMem(sizeof(IMG_UINT32) * ui32NumBIFTilingConfigs);
+ if (pui32BIFTilingXStrides == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: OSAllocMem failed (%u)", eError));
+ goto failed_BIF_tiling_alloc;
+ }
+ for(i = 0; i < ui32NumBIFTilingConfigs; i++)
+ {
+ eError = PVRSRVSystemBIFTilingHeapGetXStride(psDeviceNode->psDevConfig,
+ i+1,
+ &pui32BIFTilingXStrides[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to get BIF tiling X stride for heap %u (%u)",
+ __func__, i + 1, eError));
+ goto failed_BIF_heap_init;
+ }
+ }
+
+ eError = RGXSetupFirmware(psDeviceNode,
+ bEnableSignatureChecks,
+ ui32SignatureChecksBufSize,
+ ui32HWPerfFWBufSizeKB,
+ ui64HWPerfFilter,
+ ui32RGXFWAlignChecksArrLength,
+ pui32RGXFWAlignChecks,
+ ui32ConfigFlags,
+ ui32LogType,
+ eBIFTilingMode,
+ ui32NumBIFTilingConfigs,
+ pui32BIFTilingXStrides,
+ ui32FilterFlags,
+ ui32JonesDisableMask,
+ ui32HWRDebugDumpLimit,
+ ui32HWPerfCountersDataSize,
+ ppsHWPerfPMR,
+ psRGXFwInit,
+ eRGXRDPowerIslandingConf,
+ eFirmwarePerf,
+ ui32ConfigFlagsExt);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXInitFirmwareKM: RGXSetupFirmware failed (%u)", eError));
+ goto failed_init_firmware;
+ }
+
+ OSFreeMem(pui32BIFTilingXStrides);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_EnableLogGroup,
+ RGXFWTraceQueryFilter,
+ RGXFWTraceSetFilter,
+ psDeviceNode,
+ NULL);
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FirmwareLogType,
+ RGXFWTraceQueryLogType,
+ RGXFWTraceSetLogType,
+ psDeviceNode,
+ NULL);
+
+ /* FW Poison values are not passed through from the init code
+ * so grab them here */
+ OSCreateKMAppHintState(&pvAppHintState);
+
+ ui32AppHintDefault = PVRSRV_APPHINT_ENABLEFWPOISONONFREE;
+ OSGetKMAppHintBOOL(pvAppHintState,
+ EnableFWPoisonOnFree,
+ &ui32AppHintDefault,
+ &psDevInfo->bEnableFWPoisonOnFree);
+
+ ui32AppHintDefault = PVRSRV_APPHINT_FWPOISONONFREEVALUE;
+ OSGetKMAppHintUINT32(pvAppHintState,
+ FWPoisonOnFreeValue,
+ &ui32AppHintDefault,
+ (IMG_UINT32*)&psDevInfo->ubFWPoisonOnFreeValue);
+
+ OSFreeKMAppHintState(pvAppHintState);
+
+ PVRSRVAppHintRegisterHandlersBOOL(APPHINT_ID_EnableFWPoisonOnFree,
+ RGXQueryFWPoisonOnFree,
+ RGXSetFWPoisonOnFree,
+ psDeviceNode,
+ NULL);
+
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_FWPoisonOnFreeValue,
+ RGXQueryFWPoisonOnFreeValue,
+ RGXSetFWPoisonOnFreeValue,
+ psDeviceNode,
+ NULL);
+
+ return PVRSRV_OK;
+
+ failed_init_firmware:
+ failed_BIF_heap_init:
+ OSFreeMem(pui32BIFTilingXStrides);
+failed_BIF_tiling_alloc:
+failed_to_pass_compatibility_check:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/* See device.h for function declaration */
+static PVRSRV_ERROR RGXAllocUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC **psMemDesc,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ IMG_DEVMEM_SIZE_T uiUFOBlockSize = sizeof(IMG_UINT32);
+ IMG_DEVMEM_ALIGN_T ui32UFOBlockAlign = sizeof(IMG_UINT32);
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ /* Size and align are 'expanded' because we request an Exportalign allocation */
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+ &uiUFOBlockSize,
+ &ui32UFOBlockAlign);
+
+ eError = DevmemFwAllocateExportable(psDeviceNode,
+ uiUFOBlockSize,
+ ui32UFOBlockAlign,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_CACHE_COHERENT |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwExUFOBlock",
+ psMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ RGXSetFirmwareAddress(&pFirmwareAddr, *psMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *puiSyncPrimVAddr = pFirmwareAddr.ui32Addr;
+ *puiSyncPrimBlockSize = TRUNCATE_64BITS_TO_32BITS(uiUFOBlockSize);
+
+ return PVRSRV_OK;
+
+ e0:
+ return eError;
+}
+
+/* See device.h for function declaration */
+static void RGXFreeUFOBlock(PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psMemDesc)
+{
+ /*
+ If the system has snooping of the device cache then the UFO block
+ might be in the cache so we need to flush it out before freeing
+ the memory
+
+ When the device is being shutdown/destroyed we don't care anymore.
+ Several necessary data structures to issue a flush were destroyed
+ already.
+ */
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig) &&
+ psDeviceNode->eDevState != PVRSRV_DEVICE_STATE_DEINIT)
+ {
+ RGXFWIF_KCCB_CMD sFlushInvalCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sFlushInvalCmd,
+ sizeof(sFlushInvalCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXFreeUFOBlock: SLC flush and invalidate aborted with error (%u)", eError));
+ }
+ }
+ }
+
+ RGXUnsetFirmwareAddress(psMemDesc);
+ DevmemFwFree(psDeviceNode->pvDevice, psMemDesc);
+}
+
+/*
+ DevDeInitRGX
+ */
+PVRSRV_ERROR DevDeInitRGX (PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO*)psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ IMG_UINT32 ui32Temp=0;
+
+ if (!psDevInfo)
+ {
+ /* Can happen if DevInitRGX failed */
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Null DevInfo"));
+ return PVRSRV_OK;
+ }
+
+ eError = DeviceDepBridgeDeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+ PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeDeInit");
+
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ OSAtomicWrite(&psDeviceNode->sDummyPage.atRefCounter, 0);
+ PVR_UNREFERENCED_PARAMETER(ui32Temp);
+ }
+ else
+#else
+ {
+ /*Delete the Dummy page related info */
+ ui32Temp = (IMG_UINT32)OSAtomicRead(&psDeviceNode->sDummyPage.atRefCounter);
+ if (0 != ui32Temp)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Dummy page reference counter is non zero (%u)",
+ __func__,
+ ui32Temp));
+ PVR_ASSERT(0);
+ }
+ }
+#endif
+#if defined(PDUMP)
+ if (NULL != psDeviceNode->sDummyPage.hPdumpDummyPg)
+ {
+ PDUMPCOMMENT("Error dummy page handle is still active");
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS) && !defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ if (psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer)
+ {
+ OSDisableTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+ OSRemoveTimer(psDeviceNode->psDevConfig->sDVFS.sPDVFSData.hReactiveTimer);
+ }
+#endif
+
+ /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+ OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ OSLockDestroy(psDevInfo->hCounterDumpingLock);
+#endif
+
+ /* Unregister debug request notifiers first as they could depend on anything. */
+ if (psDevInfo->hDbgReqNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDevInfo->hDbgReqNotify);
+ }
+
+ /* Cancel notifications to this device */
+ PVRSRVUnregisterCmdCompleteNotify(psDeviceNode->hCmdCompNotify);
+ psDeviceNode->hCmdCompNotify = NULL;
+
+ /*
+ * De-initialise in reverse order, so stage 2 init is undone first.
+ */
+ if (psDevInfo->bDevInit2Done)
+ {
+ psDevInfo->bDevInit2Done = IMG_FALSE;
+
+#if !defined(NO_HARDWARE)
+ (void) SysUninstallDeviceLISR(psDevInfo->pvLISRData);
+ (void) OSUninstallMISR(psDevInfo->pvMISRData);
+ (void) OSUninstallMISR(psDevInfo->hProcessQueuesMISR);
+ if (psDevInfo->pvAPMISRData != NULL)
+ {
+ (void) OSUninstallMISR(psDevInfo->pvAPMISRData);
+ }
+#endif /* !NO_HARDWARE */
+
+ /* Remove the device from the power manager */
+ eError = PVRSRVRemovePowerDevice(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSLockDestroy(psDevInfo->hGPUUtilLock);
+
+ /* Free DVFS Table */
+ if (psDevInfo->psGpuDVFSTable != NULL)
+ {
+ OSFreeMem(psDevInfo->psGpuDVFSTable);
+ psDevInfo->psGpuDVFSTable = NULL;
+ }
+
+#if defined(SUPPORT_RAY_TRACING)
+ OSLockDestroy(psDevInfo->hLockRPMContext);
+ OSLockDestroy(psDevInfo->hLockRPMFreeList);
+#endif
+
+ /* De-init Freelists/ZBuffers... */
+ OSLockDestroy(psDevInfo->hLockFreeList);
+ OSLockDestroy(psDevInfo->hLockZSBuffer);
+
+ /* Unregister MMU related stuff */
+ eError = RGXMMUInit_Unregister(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMMUInit_Unregister (0x%x)", eError));
+ return eError;
+ }
+
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ /* Unregister MMU related stuff */
+ eError = RGXMipsMMUInit_Unregister(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed RGXMipsMMUInit_Unregister (0x%x)", eError));
+ return eError;
+ }
+ }
+ }
+
+ /* UnMap Regs */
+ if (psDevInfo->pvRegsBaseKM != NULL)
+ {
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+#endif /* !NO_HARDWARE */
+ psDevInfo->pvRegsBaseKM = NULL;
+ }
+
+#if 0 /* not required at this time */
+ if (psDevInfo->hTimer)
+ {
+ eError = OSRemoveTimer(psDevInfo->hTimer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevDeInitRGX: Failed to remove timer"));
+ return eError;
+ }
+ psDevInfo->hTimer = NULL;
+ }
+#endif
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+
+ RGXDeInitHeaps(psDevMemoryInfo);
+
+ if (psDevInfo->psRGXFWCodeMemDesc)
+ {
+ /* Free fw code */
+ PDUMPCOMMENT("Freeing FW code memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCodeMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCodeMemDesc);
+ psDevInfo->psRGXFWCodeMemDesc = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"No firmware code memory to free"));
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ if (psDevInfo->psTrampoline->sPages.u.pvHandle)
+ {
+ /* Free trampoline region */
+ PDUMPCOMMENT("Freeing trampoline memory");
+ RGXFreeTrampoline(psDeviceNode);
+ }
+ }
+
+ if (psDevInfo->psRGXFWDataMemDesc)
+ {
+ /* Free fw data */
+ PDUMPCOMMENT("Freeing FW data memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWDataMemDesc);
+ psDevInfo->psRGXFWDataMemDesc = NULL;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"No firmware data memory to free"));
+ }
+
+ if (psDevInfo->psRGXFWCorememMemDesc)
+ {
+ /* Free fw data */
+ PDUMPCOMMENT("Freeing FW coremem memory");
+ DevmemReleaseDevVirtAddr(psDevInfo->psRGXFWCorememMemDesc);
+ DevmemFwFree(psDevInfo, psDevInfo->psRGXFWCorememMemDesc);
+ psDevInfo->psRGXFWCorememMemDesc = NULL;
+ }
+
+ /*
+ Free the firmware allocations.
+ */
+ RGXFreeFirmware(psDevInfo);
+ RGXDeInitDestroyFWKernelMemoryContext(psDeviceNode);
+
+ /* De-initialise non-device specific (TL) users of RGX device memory */
+ RGXHWPerfHostDeInit(psDevInfo);
+ eError = HTBDeInit();
+ PVR_LOG_IF_ERROR(eError, "HTBDeInit");
+
+ /* destroy the stalled CCB locks */
+ OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+ OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+
+ /* destroy the context list locks */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+ OSLockDestroy(psDevInfo->hBPLock);
+ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+#endif
+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+ OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+ OSLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+
+
+ if ((psDevInfo->hNMILock != NULL) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS)))
+ {
+ OSLockDestroy(psDevInfo->hNMILock);
+ }
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if (psDevInfo->hDebugFaultInfoLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hDebugFaultInfoLock);
+ }
+ if (psDevInfo->hMMUCtxUnregLock != NULL)
+ {
+ OSLockDestroy(psDevInfo->hMMUCtxUnregLock);
+ }
+#endif
+
+ /* Free device BVNC string */
+ if (NULL != psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ OSFreeMem(psDevInfo->sDevFeatureCfg.pszBVNCString);
+ }
+
+ /* DeAllocate devinfo */
+ OSFreeMem(psDevInfo);
+
+ psDeviceNode->pvDevice = NULL;
+
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static
+PVRSRV_ERROR RGXResetPDump(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)(psDeviceNode->pvDevice);
+
+ psDevInfo->bDumpedKCCBCtlAlready = IMG_FALSE;
+ psDevInfo->ui32LastBlockKCCBCtrlDumped = PDUMP_BLOCKNUM_INVALID;
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+static INLINE DEVMEM_HEAP_BLUEPRINT _blueprint_init(IMG_CHAR *name,
+ IMG_UINT64 heap_base,
+ IMG_DEVMEM_SIZE_T heap_length,
+ IMG_UINT32 log2_import_alignment,
+ IMG_UINT32 tiling_mode,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DEVMEM_HEAP_BLUEPRINT b = {
+ .pszName = name,
+ .sHeapBaseAddr.uiAddr = heap_base,
+ .uiHeapLength = heap_length,
+ .uiLog2DataPageSize = RGXHeapDerivePageSize(OSGetPageShift()),
+ .uiLog2ImportAlignment = log2_import_alignment,
+ .uiLog2TilingStrideFactor = (RGX_BIF_TILING_HEAP_LOG2_ALIGN_TO_STRIDE_BASE - tiling_mode)
+ };
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+ IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+ if (!OSStringCompare(name, RGX_GENERAL_NON4K_HEAP_IDENT))
+ {
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+ &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+ switch (ui32GeneralNon4KHeapPageSize)
+ {
+ case (1 << RGX_HEAP_4KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+ break;
+ case (1 << RGX_HEAP_16KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+ break;
+ case (1 << RGX_HEAP_64KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+ break;
+ case (1 << RGX_HEAP_256KB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+ break;
+ case (1 << RGX_HEAP_1MB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+ break;
+ case (1 << RGX_HEAP_2MB_PAGE_SHIFT):
+ b.uiLog2DataPageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+ break;
+ default:
+ b.uiLog2DataPageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+
+ PVR_DPF((PVR_DBG_ERROR,"Invalid AppHint GeneralAltHeapPageSize [%d] value, using 16KB",
+ ui32AppHintDefault));
+ break;
+ }
+ OSFreeKMAppHintState(pvAppHintState);
+ }
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ /* MIPS FW must use 4K pages even when kernel is using 64K pages */
+ if (OSStringCompare(name, RGX_FIRMWARE_MAIN_HEAP_IDENT) == 0 ||
+ OSStringCompare(name, RGX_FIRMWARE_CONFIG_HEAP_IDENT) == 0 ||
+ OSStringCompare(name, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT) == 0 )
+ {
+ b.uiLog2DataPageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+ }
+ }
+
+ return b;
+}
+
+#define INIT_HEAP(NAME) \
+ do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_ ## NAME ## _HEAP_IDENT, \
+ RGX_ ## NAME ## _HEAP_BASE, \
+ RGX_ ## NAME ## _HEAP_SIZE, \
+ 0, 0, psDevInfo); \
+ psDeviceMemoryHeapCursor++; \
+ } while (0)
+
+#define INIT_FW_MAIN_HEAP(MODE, FWCORE) \
+ do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_FIRMWARE_MAIN_HEAP_IDENT, \
+ RGX_FIRMWARE_ ## MODE ## _MAIN_HEAP_BASE, \
+ RGX_FIRMWARE_ ## FWCORE ## _MAIN_HEAP_SIZE, \
+ 0, 0, psDevInfo); \
+ psDeviceMemoryHeapCursor++; \
+ } while (0)
+
+#define INIT_FW_CONFIG_HEAP(MODE) \
+ do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_FIRMWARE_CONFIG_HEAP_IDENT, \
+ RGX_FIRMWARE_ ## MODE ## _CONFIG_HEAP_BASE, \
+ RGX_FIRMWARE_CONFIG_HEAP_SIZE, \
+ 0, 0, psDevInfo); \
+ psDeviceMemoryHeapCursor++; \
+ } while (0)
+
+#define INIT_HEAP_NAME(STR, NAME) \
+ do { \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ STR, \
+ RGX_ ## NAME ## _HEAP_BASE, \
+ RGX_ ## NAME ## _HEAP_SIZE, \
+ 0, 0, psDevInfo); \
+ psDeviceMemoryHeapCursor++; \
+ } while (0)
+
+#define INIT_TILING_HEAP(D, N, M) \
+ do { \
+ IMG_UINT32 xstride; \
+ PVRSRVSystemBIFTilingHeapGetXStride((D)->psDeviceNode->psDevConfig, N, &xstride); \
+ *psDeviceMemoryHeapCursor = _blueprint_init( \
+ RGX_BIF_TILING_HEAP_ ## N ## _IDENT, \
+ RGX_BIF_TILING_HEAP_ ## N ## _BASE, \
+ RGX_BIF_TILING_HEAP_SIZE, \
+ RGX_BIF_TILING_HEAP_ALIGN_LOG2_FROM_XSTRIDE(xstride), \
+ (IMG_UINT32)M, \
+ psDevInfo); \
+ psDeviceMemoryHeapCursor++; \
+ } while (0)
+
+static PVRSRV_ERROR RGXInitHeaps(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DEVICE_MEMORY_INFO *psNewMemoryInfo,
+ IMG_UINT32 *pui32Log2DummyPgSize)
+{
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+ RGXFWIF_BIFTILINGMODE eBIFTilingMode;
+ IMG_UINT32 uiNumHeaps;
+ void *pvAppHintState = NULL;
+ IMG_UINT32 ui32AppHintDefault = PVRSRV_APPHINT_GENERAL_NON4K_HEAP_PAGE_SIZE;
+ IMG_UINT32 ui32GeneralNon4KHeapPageSize;
+
+#if defined(SUPPORT_VALIDATION)
+ IMG_UINT32 ui32BIFTilingMode, ui32AppHintDefaultTilingMode = RGXFWIF_BIFTILINGMODE_MAX;
+
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, BIFTilingMode,
+ &ui32AppHintDefaultTilingMode, &ui32BIFTilingMode);
+ OSFreeKMAppHintState(pvAppHintState);
+ if (ui32BIFTilingMode == RGXFWIF_BIFTILINGMODE_256x16 || ui32BIFTilingMode == RGXFWIF_BIFTILINGMODE_512x8)
+ {
+ psDevInfo->psDeviceNode->psDevConfig->eBIFTilingMode = ui32BIFTilingMode;
+ }
+ else if (ui32BIFTilingMode != RGXFWIF_BIFTILINGMODE_MAX)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXInitHeaps: BIF Tiling mode apphint is invalid"));
+ }
+#endif
+
+ /* FIXME - consider whether this ought not to be on the device node itself */
+ psNewMemoryInfo->psDeviceMemoryHeap = OSAllocMem(sizeof(DEVMEM_HEAP_BLUEPRINT) * RGX_MAX_HEAP_ID);
+ if (psNewMemoryInfo->psDeviceMemoryHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_BLUEPRINT"));
+ goto e0;
+ }
+
+ PVRSRVSystemBIFTilingGetConfig(psDevInfo->psDeviceNode->psDevConfig, &eBIFTilingMode, &uiNumHeaps);
+
+ /* Get the page size for the dummy page from the NON4K heap apphint */
+ OSCreateKMAppHintState(&pvAppHintState);
+ OSGetKMAppHintUINT32(pvAppHintState, GeneralNon4KHeapPageSize,
+ &ui32AppHintDefault, &ui32GeneralNon4KHeapPageSize);
+ *pui32Log2DummyPgSize = ExactLog2(ui32GeneralNon4KHeapPageSize);
+ OSFreeKMAppHintState(pvAppHintState);
+
+ /* Initialise the heaps */
+ psDeviceMemoryHeapCursor = psNewMemoryInfo->psDeviceMemoryHeap;
+
+ INIT_HEAP(GENERAL_SVM);
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ INIT_HEAP_NAME(RGX_GENERAL_HEAP_IDENT, GENERAL_BRN_65273);
+ }
+ else
+ {
+ INIT_HEAP(GENERAL);
+ }
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 63142))
+ {
+ /* BRN63142 heap must be at the top of an aligned 16GB range. */
+ INIT_HEAP(RGNHDR_BRN_63142);
+ PVR_ASSERT((RGX_RGNHDR_BRN_63142_HEAP_BASE & IMG_UINT64_C(0x3FFFFFFFF)) +
+ RGX_RGNHDR_BRN_63142_HEAP_SIZE == IMG_UINT64_C(0x400000000));
+ }
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ INIT_HEAP_NAME(RGX_GENERAL_NON4K_HEAP_IDENT, GENERAL_NON4K_BRN_65273);
+ INIT_HEAP_NAME(RGX_VISTEST_HEAP_IDENT, VISTEST_BRN_65273);
+
+ /* HWBRN65273 workaround also requires two Region Header buffers 4GB apart. */
+ INIT_HEAP(MMU_INIA_BRN_65273);
+ INIT_HEAP(MMU_INIB_BRN_65273);
+ }
+ else
+ {
+ INIT_HEAP(GENERAL_NON4K);
+ INIT_HEAP(VISTEST);
+ }
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ INIT_HEAP_NAME(RGX_PDSCODEDATA_HEAP_IDENT, PDSCODEDATA_BRN_65273);
+ INIT_HEAP_NAME(RGX_USCCODE_HEAP_IDENT, USCCODE_BRN_65273);
+ }
+ else
+ {
+ INIT_HEAP(PDSCODEDATA);
+ INIT_HEAP(USCCODE);
+ }
+
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ INIT_HEAP_NAME(RGX_TQ3DPARAMETERS_HEAP_IDENT, TQ3DPARAMETERS_BRN_65273);
+ }
+ else
+ {
+ INIT_HEAP(TQ3DPARAMETERS);
+ }
+
+ INIT_TILING_HEAP(psDevInfo, 1, eBIFTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 2, eBIFTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 3, eBIFTilingMode);
+ INIT_TILING_HEAP(psDevInfo, 4, eBIFTilingMode);
+ INIT_HEAP(DOPPLER);
+ INIT_HEAP(DOPPLER_OVERFLOW);
+ INIT_HEAP(TDM_TPU_YUV_COEFFS);
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SIGNAL_SNOOPING))
+ {
+ INIT_HEAP(SERVICES_SIGNALS);
+ INIT_HEAP(SIGNALS);
+ }
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ INIT_FW_CONFIG_HEAP(GUEST);
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ INIT_FW_MAIN_HEAP(GUEST, MIPS);
+ }
+ else
+ {
+ INIT_FW_MAIN_HEAP(GUEST, META);
+ }
+ }
+ else
+ {
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ INIT_FW_MAIN_HEAP(HYPERV, MIPS);
+ }
+ else
+ {
+ INIT_FW_MAIN_HEAP(HYPERV, META);
+ }
+
+ INIT_FW_CONFIG_HEAP(HYPERV);
+ }
+
+ /* set the heap count */
+ psNewMemoryInfo->ui32HeapCount = (IMG_UINT32)(psDeviceMemoryHeapCursor - psNewMemoryInfo->psDeviceMemoryHeap);
+
+ PVR_ASSERT(psNewMemoryInfo->ui32HeapCount <= RGX_MAX_HEAP_ID);
+
+ /*
+ In the new heap setup, we initialise 2 configurations:
+ 1 - One will be for the firmware only (index 1 in array)
+ a. This primarily has the firmware heap in it.
+ b. It also has additional guest OSID firmware heap(s)
+ - Only if the number of support firmware OSID > 1
+ 2 - Others shall be for clients only (index 0 in array)
+ a. This has all the other client heaps in it.
+ */
+ psNewMemoryInfo->uiNumHeapConfigs = 2;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray = OSAllocMem(sizeof(DEVMEM_HEAP_CONFIG) * psNewMemoryInfo->uiNumHeapConfigs);
+ if (psNewMemoryInfo->psDeviceMemoryHeapConfigArray == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterDevice : Failed to alloc memory for DEVMEM_HEAP_CONFIG"));
+ goto e1;
+ }
+
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].pszName = "Default Heap Configuration";
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].uiNumHeaps = psNewMemoryInfo->ui32HeapCount-2;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[0].psHeapBlueprintArray = psNewMemoryInfo->psDeviceMemoryHeap;
+
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].pszName = "Firmware Heap Configuration";
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps = 2;
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].psHeapBlueprintArray = psDeviceMemoryHeapCursor-2;
+
+ /* Perform additional virtualization initialization */
+ if (RGXVzInitHeaps(psNewMemoryInfo, psDeviceMemoryHeapCursor, psDevInfo) != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ return PVRSRV_OK;
+ e1:
+ OSFreeMem(psNewMemoryInfo->psDeviceMemoryHeap);
+ e0:
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+}
+
+#undef INIT_HEAP
+#undef INIT_HEAP_NAME
+#undef INIT_TILING_HEAP
+
+static void RGXDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+ RGXVzDeInitHeaps(psDevMemoryInfo);
+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeapConfigArray);
+ OSFreeMem(psDevMemoryInfo->psDeviceMemoryHeap);
+}
+
+/*
+ RGXRegisterDevice
+ */
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_RGXDEV_INFO **ppsDevInfo)
+{
+ PVRSRV_ERROR eError;
+ DEVICE_MEMORY_INFO *psDevMemoryInfo;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PDUMPCOMMENT("Device Name: %s", psDeviceNode->psDevConfig->pszName);
+
+ if (psDeviceNode->psDevConfig->pszVersion)
+ {
+ PDUMPCOMMENT("Device Version: %s", psDeviceNode->psDevConfig->pszVersion);
+ }
+
+#if defined(RGX_FEATURE_SYSTEM_CACHE)
+ PDUMPCOMMENT("RGX System Level Cache is present");
+#endif /* RGX_FEATURE_SYSTEM_CACHE */
+
+ PDUMPCOMMENT("RGX Initialisation (Part 1)");
+
+ /*********************
+ * Device node setup *
+ *********************/
+ /* Setup static data and callbacks on the device agnostic device node */
+#if defined(PDUMP)
+ psDeviceNode->sDevId.pszPDumpRegName = RGX_PDUMPREG_NAME;
+ /*
+ FIXME: This should not be required as PMR's should give the memspace
+ name. However, due to limitations within PDump we need a memspace name
+ when pdumping with MMU context with virtual address in which case we
+ don't have a PMR to get the name from.
+
+ There is also the issue obtaining a namespace name for the catbase which
+ is required when we PDump the write of the physical catbase into the FW
+ structure
+ */
+ psDeviceNode->sDevId.pszPDumpDevName = PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+ psDeviceNode->pfnPDumpInitDevice = &RGXResetPDump;
+#endif /* PDUMP */
+
+ OSAtomicWrite(&psDeviceNode->eHealthStatus, PVRSRV_DEVICE_HEALTH_STATUS_OK);
+ OSAtomicWrite(&psDeviceNode->eHealthReason, PVRSRV_DEVICE_HEALTH_REASON_NONE);
+
+ /* Configure MMU specific stuff */
+ RGXMMUInit_Register(psDeviceNode);
+
+ psDeviceNode->pfnMMUCacheInvalidate = RGXMMUCacheInvalidate;
+
+ psDeviceNode->pfnMMUCacheInvalidateKick = RGXMMUCacheInvalidateKick;
+
+ /* Register RGX to receive notifies when other devices complete some work */
+ PVRSRVRegisterCmdCompleteNotify(&psDeviceNode->hCmdCompNotify, &RGXScheduleProcessQueuesKM, psDeviceNode);
+
+ psDeviceNode->pfnInitDeviceCompatCheck = &RGXDevInitCompatCheck;
+
+ /* Register callbacks for creation of device memory contexts */
+ psDeviceNode->pfnRegisterMemoryContext = RGXRegisterMemoryContext;
+ psDeviceNode->pfnUnregisterMemoryContext = RGXUnregisterMemoryContext;
+
+ /* Register callbacks for Unified Fence Objects */
+ psDeviceNode->pfnAllocUFOBlock = RGXAllocUFOBlock;
+ psDeviceNode->pfnFreeUFOBlock = RGXFreeUFOBlock;
+
+ /* Register callback for checking the device's health */
+ psDeviceNode->pfnUpdateHealthStatus = RGXUpdateHealthStatus;
+
+ /* Register method to service the FW HWPerf buffer */
+ psDeviceNode->pfnServiceHWPerf = RGXHWPerfDataStoreCB;
+
+ /* Register callback for getting the device version information string */
+ psDeviceNode->pfnDeviceVersionString = RGXDevVersionString;
+
+ /* Register callback for getting the device clock speed */
+ psDeviceNode->pfnDeviceClockSpeed = RGXDevClockSpeed;
+
+ /* Register callback for soft resetting some device modules */
+ psDeviceNode->pfnSoftReset = RGXSoftReset;
+
+ /* Register callback for resetting the HWR logs */
+ psDeviceNode->pfnResetHWRLogs = RGXResetHWRLogs;
+
+#if defined(RGXFW_ALIGNCHECKS)
+ /* Register callback for checking alignment of UM structures */
+ psDeviceNode->pfnAlignmentCheck = RGXAlignmentCheck;
+#endif
+
+ /*Register callback for checking the supported features and getting the
+ * corresponding values */
+ psDeviceNode->pfnCheckDeviceFeature = RGXBvncCheckFeatureSupported;
+ psDeviceNode->pfnGetDeviceFeatureValue = RGXBvncGetSupportedFeatureValue;
+
+ /*Set up required support for dummy page */
+ OSAtomicWrite(&(psDeviceNode->sDummyPage.atRefCounter), 0);
+
+ /*Set the order to 0 */
+ psDeviceNode->sDummyPage.sDummyPageHandle.ui32Order = 0;
+
+ /*Set the size of the Dummy page to zero */
+ psDeviceNode->sDummyPage.ui32Log2DummyPgSize = 0;
+
+ /*Set the Dummy page phys addr */
+ psDeviceNode->sDummyPage.ui64DummyPgPhysAddr = MMU_BAD_PHYS_ADDR;
+
+ /*The lock type need to be dispatch type here because it can be acquired from MISR (Z-buffer) path */
+ eError = OSLockCreate(&psDeviceNode->sDummyPage.psDummyPgLock ,LOCK_TYPE_DISPATCH);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create dummy page lock", __func__));
+ return eError;
+ }
+#if defined(PDUMP)
+ psDeviceNode->sDummyPage.hPdumpDummyPg = NULL;
+#endif
+
+ /*********************
+ * Device info setup *
+ *********************/
+ /* Allocate device control block */
+ psDevInfo = OSAllocZMem(sizeof(*psDevInfo));
+ if (psDevInfo == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"DevInitRGXPart1 : Failed to alloc memory for DevInfo"));
+ return (PVRSRV_ERROR_OUT_OF_MEMORY);
+ }
+ /* Default psTrampoline to point to null struct */
+ psDevInfo->psTrampoline = (RGX_MIPS_ADDRESS_TRAMPOLINE *)&sNullTrampoline;
+
+ /* create locks for the context lists stored in the DevInfo structure.
+ * these lists are modified on context create/destroy and read by the
+ * watchdog thread
+ */
+
+ eError = OSWRLockCreate(&(psDevInfo->hRenderCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create render context list lock", __func__));
+ goto e0;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hComputeCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create compute context list lock", __func__));
+ goto e1;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hTransferCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create transfer context list lock", __func__));
+ goto e2;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hTDMCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create TDM context list lock", __func__));
+ goto e3;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hRaytraceCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create raytrace context list lock", __func__));
+ goto e4;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hKickSyncCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create kick sync context list lock", __func__));
+ goto e5;
+ }
+
+ eError = OSWRLockCreate(&(psDevInfo->hMemoryCtxListLock));
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create memory context list lock", __func__));
+ goto e6;
+ }
+
+ eError = OSLockCreate(&psDevInfo->hLockKCCBDeferredCommandsList,LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to KCCB deferred commands list lock", __func__));
+ goto e7;
+ }
+ dllist_init(&(psDevInfo->sKCCBDeferredCommandsListHead));
+
+ dllist_init(&(psDevInfo->sRenderCtxtListHead));
+ dllist_init(&(psDevInfo->sComputeCtxtListHead));
+ dllist_init(&(psDevInfo->sTransferCtxtListHead));
+ dllist_init(&(psDevInfo->sTDMCtxtListHead));
+ dllist_init(&(psDevInfo->sRaytraceCtxtListHead));
+ dllist_init(&(psDevInfo->sKickSyncCtxtListHead));
+
+ dllist_init(&(psDevInfo->sCommonCtxtListHead));
+ psDevInfo->ui32CommonCtxtCurrentID = 1;
+
+
+ eError = OSWRLockCreate(&psDevInfo->hCommonCtxtListLock);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create common context list lock", __func__));
+ goto e8;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psDevInfo->sRegCongfig.hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create register configuration lock", __func__));
+ goto e9;
+ }
+
+ eError = OSLockCreate(&psDevInfo->hBPLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for break points", __func__));
+ goto e10;
+ }
+
+ eError = OSLockCreate(&psDevInfo->hRGXFWIfBufInitLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for trace buffers", __func__));
+ goto e11;
+ }
+#endif
+
+ eError = OSLockCreate(&psDevInfo->hCCBStallCheckLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB checking lock", __func__));
+ goto e12;
+ }
+ eError = OSLockCreate(&psDevInfo->hCCBRecoveryLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create stalled CCB recovery lock", __func__));
+ goto e13;
+ }
+
+ dllist_init(&psDevInfo->sMemoryContextList);
+
+ /* Setup static data and callbacks on the device specific device info */
+ psDevInfo->psDeviceNode = psDeviceNode;
+
+ psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
+ psDevInfo->pvDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;
+
+ /*
+ * Map RGX Registers
+ */
+#if !defined(NO_HARDWARE)
+ psDevInfo->pvRegsBaseKM = (void __iomem *) OSMapPhysToLin(psDeviceNode->psDevConfig->sRegsCpuPBase,
+ psDeviceNode->psDevConfig->ui32RegsSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+
+ if (psDevInfo->pvRegsBaseKM == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to create RGX register mapping", __func__));
+ eError = PVRSRV_ERROR_BAD_MAPPING;
+ goto e14;
+ }
+#endif
+
+ psDeviceNode->pvDevice = psDevInfo;
+
+ eError = RGXBvncInitialiseConfiguration(psDeviceNode);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Unsupported HW device detected by driver", __func__));
+ goto e15;
+ }
+
+ /* pdump info about the core */
+ PDUMPCOMMENT("RGX Version Information (KM): %d.%d.%d.%d",
+ psDevInfo->sDevFeatureCfg.ui32B,
+ psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N,
+ psDevInfo->sDevFeatureCfg.ui32C);
+
+ eError = RGXInitHeaps(psDevInfo, psDevMemoryInfo,
+ &psDeviceNode->sDummyPage.ui32Log2DummyPgSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e15;
+ }
+
+ eError = RGXHWPerfInit(psDevInfo);
+ PVR_LOGG_IF_ERROR(eError, "RGXHWPerfInit", e15);
+
+ /* Register callback for dumping debug info */
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevInfo->hDbgReqNotify,
+ psDeviceNode,
+ RGXDebugRequestNotify,
+ DEBUG_REQUEST_SYS,
+ psDevInfo);
+ PVR_LOG_IF_ERROR(eError, "PVRSRVRegisterDbgRequestNotify");
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ RGXMipsMMUInit_Register(psDeviceNode);
+ }
+
+ /* The device shared-virtual-memory heap address-space size is stored here for faster
+ look-up without having to walk the device heap configuration structures during
+ client device connection (i.e. this size is relative to a zero-based offset) */
+ if (RGX_IS_BRN_SUPPORTED(psDevInfo, 65273))
+ {
+ psDeviceNode->ui64GeneralSVMHeapTopVA = 0;
+ }else
+ {
+ psDeviceNode->ui64GeneralSVMHeapTopVA = RGX_GENERAL_SVM_HEAP_BASE + RGX_GENERAL_SVM_HEAP_SIZE;
+ }
+
+ if (NULL != psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit)
+ {
+ psDeviceNode->psDevConfig->pfnSysDevFeatureDepInit(psDeviceNode->psDevConfig, \
+ psDevInfo->sDevFeatureCfg.ui64Features);
+ }
+
+ /* Initialise the device dependent bridges */
+ eError = DeviceDepBridgeInit(psDevInfo->sDevFeatureCfg.ui64Features);
+ PVR_LOG_IF_ERROR(eError, "DeviceDepBridgeInit");
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+ eError = OSLockCreate(&psDevInfo->hCounterDumpingLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock for counter sampling.", __func__));
+ goto e16;
+ }
+#endif
+
+ /* No partial FWCCB commands expected from the FW */
+ psDevInfo->ui32ExpectedPartialFWCCBCmd = RGXFWIF_FWCCB_CMD_PARTIAL_TYPE_NONE;
+
+ *ppsDevInfo = psDevInfo;
+
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_POWER_SAMPLING_VIA_DEBUGFS)
+e16:
+ RGXHWPerfDeinit(psDevInfo);
+#endif
+e15:
+#if !defined(NO_HARDWARE)
+ OSUnMapPhysToLin((void __force *) psDevInfo->pvRegsBaseKM,
+ psDevInfo->ui32RegSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+e14:
+#endif /* !NO_HARDWARE */
+ OSLockDestroy(psDevInfo->hCCBRecoveryLock);
+e13:
+ OSLockDestroy(psDevInfo->hCCBStallCheckLock);
+e12:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psDevInfo->hRGXFWIfBufInitLock);
+e11:
+ OSLockDestroy(psDevInfo->hBPLock);
+e10:
+ OSLockDestroy(psDevInfo->sRegCongfig.hLock);
+e9:
+#endif
+ OSWRLockDestroy(psDevInfo->hCommonCtxtListLock);
+ e8:
+ OSWRLockDestroy(psDevInfo->hMemoryCtxListLock);
+ e7:
+ OSLockDestroy(psDevInfo->hLockKCCBDeferredCommandsList);
+ e6:
+ OSWRLockDestroy(psDevInfo->hKickSyncCtxListLock);
+ e5:
+ OSWRLockDestroy(psDevInfo->hRaytraceCtxListLock);
+ e4:
+ OSWRLockDestroy(psDevInfo->hTDMCtxListLock);
+ e3:
+ OSWRLockDestroy(psDevInfo->hTransferCtxListLock);
+ e2:
+ OSWRLockDestroy(psDevInfo->hComputeCtxListLock);
+ e1:
+ OSWRLockDestroy(psDevInfo->hRenderCtxListLock);
+ e0:
+ OSFreeMem(psDevInfo);
+
+ /*Destroy the dummy page lock created above */
+ OSLockDestroy(psDeviceNode->sDummyPage.psDummyPgLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, MIPS))
+ {
+ void *pvFWImage;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc, &pvFWImage);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXInitFinaliseFWImageKM: Acquire mapping for FW data failed (%u)",
+ eError));
+ return eError;
+ }
+
+ eError = RGXBootldrDataInit(psDeviceNode, pvFWImage);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXInitLoadFWImageKM: ELF parameters injection failed (%u)",
+ eError));
+ return eError;
+ }
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWDataMemDesc);
+
+ }
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDevVersionString
+@Description Gets the version string for the given device node and returns
+ a pointer to it in ppszVersionString. It is then the
+ responsibility of the caller to free this memory.
+@Input psDeviceNode Device node from which to obtain the
+ version string
+@Output ppszVersionString Contains the version string upon return
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXDevVersionString(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_CHAR **ppszVersionString)
+{
+#if defined(NO_HARDWARE) || defined(EMULATOR)
+ IMG_PCHAR pszFormatString = "Rogue Version: %s (SW)";
+#else
+ IMG_PCHAR pszFormatString = "Rogue Version: %s (HW)";
+#endif
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ size_t uiStringLength;
+
+ if (psDeviceNode == NULL || ppszVersionString == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ if (NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ IMG_CHAR pszBVNCInfo[MAX_BVNC_STRING_LEN];
+ size_t uiBVNCStringSize;
+
+ OSSNPrintf(pszBVNCInfo, MAX_BVNC_STRING_LEN, "%d.%d.%d.%d", \
+ psDevInfo->sDevFeatureCfg.ui32B, \
+ psDevInfo->sDevFeatureCfg.ui32V, \
+ psDevInfo->sDevFeatureCfg.ui32N, \
+ psDevInfo->sDevFeatureCfg.ui32C);
+
+ uiBVNCStringSize = (OSStringLength(pszBVNCInfo) + 1) * sizeof(IMG_CHAR);
+ psDevInfo->sDevFeatureCfg.pszBVNCString = OSAllocMem(uiBVNCStringSize);
+ if (NULL == psDevInfo->sDevFeatureCfg.pszBVNCString)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: Allocating memory for BVNC Info string failed",
+ __func__));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSCachedMemCopy(psDevInfo->sDevFeatureCfg.pszBVNCString,pszBVNCInfo,uiBVNCStringSize);
+ }
+
+ uiStringLength = OSStringLength(psDevInfo->sDevFeatureCfg.pszBVNCString) +
+ OSStringLength(pszFormatString);
+ *ppszVersionString = OSAllocMem(uiStringLength);
+ if (*ppszVersionString == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSSNPrintf(*ppszVersionString, uiStringLength, pszFormatString,
+ psDevInfo->sDevFeatureCfg.pszBVNCString);
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function RGXDevClockSpeed
+@Description Gets the clock speed for the given device node and returns
+ it in pui32RGXClockSpeed.
+@Input psDeviceNode Device node
+@Output pui32RGXClockSpeed Variable for storing the clock speed
+@Return PVRSRV_ERROR
+ */ /***************************************************************************/
+static PVRSRV_ERROR RGXDevClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*) psDeviceNode->psDevConfig->hDevData;
+
+ /* get clock speed */
+ *pui32RGXClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+
+ return PVRSRV_OK;
+}
+/*!
+ *******************************************************************************
+
+ @Function RGXVzInitCreateFWKernelMemoryContext
+
+ @Description Called to perform additional initialisation during firmware
+ kernel context creation.
+ ******************************************************************************/
+
+static PVRSRV_ERROR RGXVzInitCreateFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+ return RGXVzCreateFWKernelMemoryContext(psDeviceNode);
+}
+/*!
+ *******************************************************************************
+
+ @Function RGXVzDeInitDestroyFWKernelMemoryContext
+
+ @Description Called to perform additional deinitialisation during firmware
+ kernel context destruction.
+ ******************************************************************************/
+
+static void RGXVzDeInitDestroyFWKernelMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+ RGXVzDestroyFWKernelMemoryContext(psDeviceNode);
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzInitHeaps
+
+ @Description Called to perform additional initialisation
+ ******************************************************************************/
+static PVRSRV_ERROR RGXVzInitHeaps(DEVICE_MEMORY_INFO *psNewMemoryInfo,
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor,
+ PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+#if defined(RGXFW_NUM_OS) && (1 < RGXFW_NUM_OS)
+ {
+ IMG_UINT32 uiIdx;
+ IMG_UINT32 uiStringLength;
+ IMG_UINT32 uiStringLengthMax = 32;
+
+ uiStringLength = MIN(sizeof(RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT), uiStringLengthMax + 1);
+
+ /* Create additional guest OSID firmware heaps */
+ for (uiIdx = 1; uiIdx < RGXFW_NUM_OS; uiIdx++)
+ {
+ /* Start by allocating memory for this guest OSID heap identification string */
+ psDeviceMemoryHeapCursor->pszName = OSAllocMem(uiStringLength * sizeof(IMG_CHAR));
+ if (psDeviceMemoryHeapCursor->pszName == NULL)
+ {
+ for (uiIdx = uiIdx - 1; uiIdx > 0; uiIdx--)
+ {
+ void *pzsName = (void *)psDeviceMemoryHeapCursor->pszName;
+ psDeviceMemoryHeapCursor--;
+ OSFreeMem(pzsName);
+ }
+
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Append the guest OSID number to the RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT string */
+ OSSNPrintf((IMG_CHAR *)psDeviceMemoryHeapCursor->pszName, uiStringLength, RGX_FIRMWARE_GUEST_RAW_HEAP_IDENT, uiIdx);
+
+ /* Use the common blueprint template support function to initialise the heap */
+ *psDeviceMemoryHeapCursor = _blueprint_init((IMG_CHAR *)psDeviceMemoryHeapCursor->pszName,
+ RGX_FIRMWARE_RAW_HEAP_BASE + (uiIdx * RGX_FIRMWARE_RAW_HEAP_SIZE),
+ RGX_FIRMWARE_RAW_HEAP_SIZE,
+ 0,
+ 0,
+ psDevInfo);
+
+ /* Append additional guest(s) firmware heap to host driver firmware context heap configuration */
+ psNewMemoryInfo->psDeviceMemoryHeapConfigArray[1].uiNumHeaps += 1;
+
+ /* advance to the next heap */
+ psDeviceMemoryHeapCursor++;
+ }
+ }
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+ *******************************************************************************
+
+ @Function RGXVzDeInitHeaps
+
+ @Description Called to perform additional deinitialisation
+ ******************************************************************************/
+static void RGXVzDeInitHeaps(DEVICE_MEMORY_INFO *psDevMemoryInfo)
+{
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_NATIVE);
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+#if defined(RGXFW_NUM_OS) && (1 < RGXFW_NUM_OS)
+ {
+ IMG_UINT32 uiIdx;
+ IMG_UINT64 uiBase, uiSpan;
+ DEVMEM_HEAP_BLUEPRINT *psDeviceMemoryHeapCursor;
+ psDeviceMemoryHeapCursor = psDevMemoryInfo->psDeviceMemoryHeap;
+ uiBase = RGX_FIRMWARE_RAW_HEAP_BASE + RGX_FIRMWARE_RAW_HEAP_SIZE;
+ uiSpan = uiBase + ((RGXFW_NUM_OS - 1) * RGX_FIRMWARE_RAW_HEAP_SIZE);
+
+ for (uiIdx = 1; uiIdx < RGXFW_NUM_OS; uiIdx++)
+ {
+ /* Safe to do as the guest firmware heaps are last in the list */
+ if (psDeviceMemoryHeapCursor->sHeapBaseAddr.uiAddr >= uiBase &&
+ psDeviceMemoryHeapCursor->sHeapBaseAddr.uiAddr < uiSpan)
+ {
+ void *pszName = (void*)psDeviceMemoryHeapCursor->pszName;
+ OSFreeMem(pszName);
+ uiIdx += 1;
+ }
+
+ psDeviceMemoryHeapCursor++;
+ }
+ }
+#endif
+}
+
+/******************************************************************************
+ End of file (rgxinit.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxinit.h b/drivers/gpu/drm/img-rogue/1.10/rgxinit.h
new file mode 100644
index 00000000000000..624d909b788034
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxinit.h
@@ -0,0 +1,275 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX initialisation header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXINIT_H__)
+#define __RGXINIT_H__
+
+#include "connection_server.h"
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_bridge.h"
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitDevPart2KM
+
+ @Description
+
+ Second part of server-side RGX initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXInitDevPart2KM (PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32DeviceFlags,
+ IMG_UINT32 ui32HWPerfHostBufSizeKB,
+ IMG_UINT32 ui32HWPerfHostFilter,
+ RGX_ACTIVEPM_CONF eActivePMConf);
+
+PVRSRV_ERROR PVRSRVRGXInitAllocFWImgMemKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEVMEM_SIZE_T ui32FWCodeLen,
+ IMG_DEVMEM_SIZE_T ui32FWDataLen,
+ IMG_DEVMEM_SIZE_T uiFWCorememLen,
+ PMR **ppsFWCodePMR,
+ IMG_DEV_VIRTADDR *psFWCodeDevVAddrBase,
+ PMR **ppsFWDataPMR,
+ IMG_DEV_VIRTADDR *psFWDataDevVAddrBase,
+ PMR **ppsFWCorememPMR,
+ IMG_DEV_VIRTADDR *psFWCorememDevVAddrBase,
+ RGXFWIF_DEV_VIRTADDR *psFWCorememMetaVAddrBase);
+
+PVRSRV_ERROR PVRSRVRGXInitMipsWrapperRegistersKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Remap1Config1Offset,
+ IMG_UINT32 ui32Remap1Config2Offset,
+ IMG_UINT32 ui32WrapperConfigOffset,
+ IMG_UINT32 ui32BootCodeOffset);
+PVRSRV_ERROR PVRSRVRGXPdumpBootldrDataInitKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32BootConfOffset,
+ IMG_UINT32 ui32ExceptionVectorsBaseAddress);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitFirmwareKM
+
+ @Description
+
+ Server-side RGX firmware initialisation
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXInitFirmwareKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_DEV_VIRTADDR *psRGXFwInit,
+ IMG_BOOL bEnableSignatureChecks,
+ IMG_UINT32 ui32SignatureChecksBufSize,
+ IMG_UINT32 ui32HWPerfFWBufSizeKB,
+ IMG_UINT64 ui64HWPerfFilter,
+ IMG_UINT32 ui32RGXFWAlignChecksArrLength,
+ IMG_UINT32 *pui32RGXFWAlignChecks,
+ IMG_UINT32 ui32ConfigFlags,
+ IMG_UINT32 ui32LogType,
+ IMG_UINT32 ui32FilterFlags,
+ IMG_UINT32 ui32JonesDisableMask,
+ IMG_UINT32 ui32HWRDebugDumpLimit,
+ RGXFWIF_COMPCHECKS_BVNC *psClientBVNC,
+ RGXFWIF_COMPCHECKS_BVNC *psFirmwareBVNC,
+ IMG_UINT32 ui32HWPerfCountersDataSize,
+ PMR **ppsHWPerfPMR,
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandingConf,
+ FW_PERF_CONF eFirmwarePerf,
+ IMG_UINT32 ui32ConfigFlagsExt);
+
+PVRSRV_ERROR PVRSRVRGXInitReleaseFWInitResourcesKM(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PMR *psFWCodePMR,
+ PMR *psFWDataPMR,
+ PMR *psFWCorePMR,
+ PMR *psHWPerfPMR);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitFinaliseFWImageKM
+
+ @Description
+
+ Perform final steps of FW code setup when necessary
+
+ @Input psDeviceNode - Device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+
+PVRSRV_ERROR
+PVRSRVRGXInitFinaliseFWImageKM(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXInitHWPerfCountersKM
+
+ @Description
+
+ Initialisation of the performance counters
+
+ @Input pvDeviceNode - device node
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXInitHWPerfCountersKM (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+
+ @Function RGXRegisterDevice
+
+ @Description
+
+ Registers the device with the system
+
+ @Input: psDeviceNode - device node
+ @Output: ppsDevInfo - device info
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_RGXDEV_INFO **ppsDevInfo);
+
+
+/*!
+*******************************************************************************
+
+ @Function DevDeInitRGX
+
+ @Description
+
+ Reset and deinitialise Chip
+
+ @Input pvDeviceNode - device info. structure
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR DevDeInitRGX(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#if !defined(NO_HARDWARE)
+
+void RGX_WaitForInterruptsTimeout(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/*!
+*******************************************************************************
+
+ @Function SORgxGpuUtilStatsRegister
+
+ @Description SO Interface function called from the OS layer implementation.
+ Initialise data used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument). This function must be called only once for each
+ different user/handle.
+
+ @Input phGpuUtilUser - Pointer to handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function SORgxGpuUtilStatsUnregister
+
+ @Description SO Interface function called from the OS layer implementation.
+ Free data previously used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument).
+
+ @Input hGpuUtilUser - Handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVGPUVIRTPopulateLMASubArenasKM
+
+ @Description Populates the LMA arenas based on the min max values passed by
+ the client during initialization. GPU Virtualisation Validation
+ only.
+
+ @Input pvDeviceNode : Pointer to a device info structure.
+ ui32NumElements : Total number of min / max values passed by
+ the client
+ pui32Elements : The array containing all the min / max values
+ passed by the client, all bundled together
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVGPUVIRTPopulateLMASubArenasKM(PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_BOOL bEnableTrustedDeviceAceConfig);
+#endif /* __RGXINIT_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.c b/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.c
new file mode 100644
index 00000000000000..562fb7fcd4f00d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.c
@@ -0,0 +1,745 @@
+/*************************************************************************/ /*!
+@File rgxkicksync.c
+@Title Server side of the sync only kick API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "rgxkicksync.h"
+
+#include "rgxdevice.h"
+#include "rgxmem.h"
+#include "rgxfwutils.h"
+#include "allocmem.h"
+#include "sync.h"
+#include "rgxhwperf.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_KICKSYNC_UFO_DUMP 0
+
+//#define KICKSYNC_CHECKPOINT_DEBUG 1
+
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+struct _RGX_SERVER_KICKSYNC_CONTEXT_
+{
+ PVRSRV_DEVICE_NODE * psDeviceNode;
+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+ PVRSRV_CLIENT_SYNC_PRIM * psSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hIntJobRef;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKickSyncContext)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext;
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Prepare cleanup struct */
+ * ppsKickSyncContext = NULL;
+ psKickSyncContext = OSAllocZMem(sizeof(*psKickSyncContext));
+ if (psKickSyncContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psKickSyncContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto err_lockcreate;
+ }
+#endif
+
+ psKickSyncContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ & psKickSyncContext->psSync,
+ "kick sync cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXCreateKickSyncContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ sInfo.psFWFrameworkMemDesc = NULL;
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_KICKSYNC,
+ RGXFWIF_DM_GP,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_KICKSYNC_CCB_SIZE_LOG2,
+ 0, /* priority */
+ & sInfo,
+ & psKickSyncContext->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sKickSyncCtxtListHead), &(psKickSyncContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListFence);
+ SyncAddrListInit(&psKickSyncContext->sSyncAddrListUpdate);
+
+ * ppsKickSyncContext = psKickSyncContext;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psKickSyncContext->hLock);
+err_lockcreate:
+#endif
+ OSFreeMem(psKickSyncContext);
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO * psDevInfo = psKickSyncContext->psDeviceNode->pvDevice;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psKickSyncContext->psDeviceNode,
+ psKickSyncContext->psServerCommonContext,
+ psKickSyncContext->psSync,
+ RGXFWIF_DM_3D,
+ PDUMP_FLAGS_NONE);
+
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+ OSWRLockAcquireWrite(psDevInfo->hKickSyncCtxListLock);
+ dllist_remove_node(&(psKickSyncContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hKickSyncCtxListLock);
+
+ FWCommonContextFree(psKickSyncContext->psServerCommonContext);
+ SyncPrimFree(psKickSyncContext->psSync);
+
+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psKickSyncContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psKickSyncContext->hLock);
+#endif
+
+ OSFreeMem(psKickSyncContext);
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+ {
+ DumpStalledFWCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hKickSyncCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sKickSyncCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_KICKSYNC_CONTEXT *psCurrentServerKickSyncCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_KICKSYNC_CONTEXT, sListNode);
+
+ if (NULL != psCurrentServerKickSyncCtx->psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerKickSyncCtx->psServerCommonContext, RGX_KICK_TYPE_DM_GP) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_GP;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hKickSyncCtxListLock);
+ return ui32ContextBitMask;
+}
+
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContext,
+
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** pasServerSyncs,
+
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE * piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+
+ IMG_UINT32 ui32ExtJobRef)
+{
+ RGXFWIF_KCCB_CMD sKickSyncKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asCmdHelperData[1];
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ IMG_UINT32 i;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+ PRGXFWIF_UFO_ADDR *pauiClientFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress = NULL;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psKickSyncContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psKickSyncContext->hIntJobRef);
+
+ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceOffset);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_syncaddrlist;
+ }
+
+ if (ui32ClientFenceCount > 0)
+ {
+ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+ }
+
+ eError = SyncAddrListPopulate(&psKickSyncContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateOffset);
+
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_syncaddrlist;
+ }
+
+ if (ui32ClientUpdateCount > 0)
+ {
+ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+
+ /* Sanity check the server fences */
+ for (i = 0; i < ui32ServerSyncPrims; i++)
+ {
+ if (0 == (paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on Kick Sync) must fence", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ goto out_unlock;
+ }
+ }
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szUpdateFenceName[31] = '\0';
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* This will never be true if called from the bridge since piUpdateFence will always be valid */
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto out_unlock;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psKickSyncContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psKickSyncContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_resolve_fence;
+ }
+
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateTimeline=%d)...", __FUNCTION__, iUpdateTimeline));
+ eError = SyncCheckpointCreateFence(psKickSyncContext->psDeviceNode,
+ szUpdateFenceName,
+ iUpdateTimeline,
+ psKickSyncContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned error (%d)", __FUNCTION__, eError));
+ goto fail_create_output_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=%u)", __FUNCTION__, iUpdateFence, psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*paui32ClientUpdateValue) * (ui32ClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32ClientUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32ClientUpdateCount);
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32ClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32ClientUpdateCount++;
+ /* Now make sure paui32ClientUpdateValue points to pui32IntAllocatedUpdateValues */
+ paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32ClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the kicksync context update list */
+ SyncAddrListAppendSyncPrim(&psKickSyncContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ /* Append the checks (from input fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to KickSync Fence (&psKickSyncContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psKickSyncContext->sSyncAddrListFence));
+ SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiClientFenceUFOAddress)
+ {
+ pauiClientFenceUFOAddress = psKickSyncContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32ClientFenceCount += ui32FenceSyncCheckpointCount;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientFenceUFOAddress;
+
+ for (iii=0; iii<ui32ClientFenceCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientFenceUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+
+ if (psUpdateSyncCheckpoint)
+ {
+ PVRSRV_ERROR eErr;
+
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to KickSync Update (&psKickSyncContext->sSyncAddrListUpdate=<%p>)...", __FUNCTION__, (void*)&psKickSyncContext->sSyncAddrListUpdate));
+ eErr = SyncAddrListAppendCheckpoints(&psKickSyncContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (eErr != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done. SyncAddrListAppendCheckpoints() returned error (%d)", __FUNCTION__, eErr));
+ }
+ else
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done.", __FUNCTION__));
+ }
+ if (!pauiClientUpdateUFOAddress)
+ {
+ pauiClientUpdateUFOAddress = psKickSyncContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32ClientUpdateCount++;
+#if defined(KICKSYNC_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiClientUpdateUFOAddress;
+
+ for (iii=0; iii<ui32ClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_KICKSYNC_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping KICKSYNC fence/updates syncs...", __FUNCTION__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiClientFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32ClientFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiClientUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32ClientUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d KickSync fence syncs (&psKickSyncContext->sSyncAddrListFence=<%p>, pauiClientFenceUFOAddress=<%p>):", __FUNCTION__, ui32ClientFenceCount, (void*)&psKickSyncContext->sSyncAddrListFence, (void*)pauiClientFenceUFOAddress));
+ for (ii=0; ii<ui32ClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d KickSync update syncs (&psKickSyncContext->sSyncAddrListUpdate=<%p>, pauiClientUpdateUFOAddress=<%p>):", __FUNCTION__, ui32ClientUpdateCount, (void*)&psKickSyncContext->sSyncAddrListUpdate, (void*)pauiClientUpdateUFOAddress));
+ for (ii=0; ii<ui32ClientUpdateCount; ii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Line %d, psTmpIntUpdateUFOAddress=<%p>", __FUNCTION__, __LINE__, (void*)psTmpIntUpdateUFOAddress));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Line %d, pui32TmpIntUpdateValue=<%p>", __FUNCTION__, __LINE__, (void*)pui32TmpIntUpdateValue));
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32ClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext),
+ ui32ClientFenceCount,
+ pauiClientFenceUFOAddress,
+ paui32ClientFenceValue,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOAddress,
+ paui32ClientUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_NULL,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ PDUMP_FLAGS_NONE,
+ NULL,
+ "KickSync",
+ bCCBStateOpen,
+ asCmdHelperData,
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdinit;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asCmdHelperData), asCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cmdaquire;
+ }
+
+ /*
+ * We should reserve space in the kernel CCB here and fill in the command
+ * directly.
+ * This is so if there isn't space in the kernel CCB we can return with
+ * retry back to services client before we take any operations
+ */
+
+ /*
+ * We might only be kicking for flush out a padding packet so only submit
+ * the command if the create was successful
+ */
+ if (eError == PVRSRV_OK)
+ {
+ /*
+ * All the required resources are ready at this point, we can't fail so
+ * take the required server sync operations and commit all the resources
+ */
+ RGXCmdHelperReleaseCmdCCB(1,
+ asCmdHelperData,
+ "KickSync",
+ FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext).ui32Addr);
+ }
+
+ /* Construct the kernel kicksync CCB command. */
+ sKickSyncKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psKickSyncContext->psServerCommonContext);
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psKickSyncContext->psServerCommonContext));
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ sKickSyncKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+
+ /*
+ * Submit the kicksync command to the firmware.
+ */
+ RGX_HWPERF_HOST_ENQ(psKickSyncContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_SYNC,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psKickSyncContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ & sKickSyncKCCBCmd,
+ sizeof(sKickSyncKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ PDUMP_FLAGS_NONE);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psKickSyncContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_SYNC);
+#endif
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXKickSync failed to schedule kernel CCB command. (0x%x)",
+ eError));
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier call
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_cmdaquire;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ *piUpdateFence = iUpdateFence;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psKickSyncContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+fail_cmdaquire:
+fail_cmdinit:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psKickSyncContext->psDeviceNode, &psKickSyncContext->sSyncAddrListUpdate);
+ if(iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+
+ /* Free memory allocated to hold update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ }
+fail_alloc_update_values_mem:
+fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free memory allocated to hold the resolved fence's checkpoints */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+fail_resolve_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+fail_syncaddrlist:
+out_unlock:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psKickSyncContext->hLock);
+#endif
+ return eError;
+}
+
+
+/**************************************************************************//**
+ End of file (rgxkicksync.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.h b/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.h
new file mode 100644
index 00000000000000..77bd1d53736544
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxkicksync.h
@@ -0,0 +1,125 @@
+/*************************************************************************/ /*!
+@File rgxkicksync.h
+@Title Server side of the sync only kick API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (__RGXKICKSYNC_H__)
+#define __RGXKICKSYNC_H__
+
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "sync_server.h"
+#include "rgxdevice.h"
+
+
+typedef struct _RGX_SERVER_KICKSYNC_CONTEXT_ RGX_SERVER_KICKSYNC_CONTEXT;
+
+/**************************************************************************/ /*!
+@Function CheckForStalledKickSyncCtxt
+@Description Function that checks if a kick sync ctx is stalled
+@Return none
+ */ /**************************************************************************/
+void CheckForStalledKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function CheckForStalledClientKickSyncCtxt
+@Description Function that checks if a kick sync client is stalled
+@Return RGX_KICK_TYPE_DM_GP on stalled context. Otherwise, 0
+ */ /**************************************************************************/
+IMG_UINT32 CheckForStalledClientKickSyncCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXCreateKickSyncContextKM
+@Description Server-side implementation of RGXCreateKicksyncContext
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateKickSyncContextKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_KICKSYNC_CONTEXT ** ppsKicksyncContext);
+
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXDestroyKickSyncContextKM
+@Description Server-side implementation of RGXDestroyKicksyncContext
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyKickSyncContextKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXKickSyncKM
+@Description Kicks a sync only command
+@Return PVRSRV_OK on success. Otherwise, a PVRSRV_ error code
+ */ /**************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickSyncKM(RGX_SERVER_KICKSYNC_CONTEXT * psKicksyncContext,
+
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** pasServerSyncs,
+
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE * piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+
+ IMG_UINT32 ui32ExtJobRef);
+
+#endif /* __RGXKICKSYNC_H__ */
+
+/**************************************************************************//**
+ End of file (rgxkicksync.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxlayer.h b/drivers/gpu/drm/img-rogue/1.10/rgxlayer.h
new file mode 100644
index 00000000000000..35d3e5dc39f07f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxlayer.h
@@ -0,0 +1,740 @@
+/*************************************************************************/ /*!
+@File
+@Title Header for Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Declaration of an interface layer used to abstract code that
+ can be compiled outside of the DDK, potentially in a
+ completely different OS.
+ All the headers included by this file must also be copied to
+ the alternative source tree.
+ All the functions declared here must have a DDK implementation
+ inside the DDK source tree (e.g. rgxlayer_impl.h/.c) and
+ another different implementation in case they are used outside
+ of the DDK.
+ All of the functions accept as a first parameter a
+ "const void *hPrivate" argument. It should be used to pass
+ around any implementation specific data required.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_H__)
+#define __RGXLAYER_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#include "img_defs.h"
+#include "img_types.h"
+#include "pvrsrv_error.h" /* includes pvrsrv_errors.h */
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgx_firmware_processor.h"
+/* includes:
+ * rgx_meta.h and rgx_mips.h,
+ * rgxdefs_km.h,
+ * rgx_cr_defs_km.h,
+ * RGX_BVNC_CORE_KM_HEADER (rgxcore_km_B.V.N.C.h),
+ * RGX_BNC_CONFIG_KM_HEADER (rgxconfig_km_B.V.N.C.h)
+ */
+
+#include "rgx_fwif_shared.h"
+/* FIXME required because of RGXFWIF_DEV_VIRTADDR but this header
+ * includes a lot of other headers.. RGXFWIF_DEV_VIRTADDR must be moved
+ * somewhere else (either img_types.h or a new header) */
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXMemCopy
+
+ @Description MemCopy implementation
+
+ @Input hPrivate : Implementation specific data
+ @Input pvDst : Pointer to the destination
+ @Input pvSrc : Pointer to the source location
+ @Input uiSize : The amount of memory to copy in bytes
+
+ @Return void
+
+******************************************************************************/
+void RGXMemCopy(const void *hPrivate,
+ void *pvDst,
+ void *pvSrc,
+ size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXMemSet
+
+ @Description MemSet implementation
+
+ @Input hPrivate : Implementation specific data
+ @Input pvDst : Pointer to the start of the memory region
+ @Input ui8Value : The value to be written
+ @Input uiSize : The number of bytes to be set to ui8Value
+
+ @Return void
+
+******************************************************************************/
+void RGXMemSet(const void *hPrivate,
+ void *pvDst,
+ IMG_UINT8 ui8Value,
+ size_t uiSize);
+
+/*!
+*******************************************************************************
+
+ @Function RGXCommentLog
+
+ @Description Generic log function used for debugging or other purposes
+
+ @Input hPrivate : Implementation specific data
+ @Input pszString : Message to be printed
+ @Input ... : Variadic arguments
+
+ @Return void
+
+******************************************************************************/
+void RGXCommentLog(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...) __printf(2, 3);
+
+/*!
+*******************************************************************************
+
+ @Function RGXErrorLog
+
+ @Description Generic error log function used for debugging or other purposes
+
+ @Input hPrivate : Implementation specific data
+ @Input pszString : Message to be printed
+ @Input ... : Variadic arguments
+
+ @Return void
+
+******************************************************************************/
+void RGXErrorLog(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...) __printf(2, 3);
+
+/* This is used to get the value of a specific feature from hprivate.
+ * Should be used instead of calling RGXDeviceHasFeature. */
+#define RGX_DEVICE_HAS_FEATURE(hPrivate, Feature) \
+ RGXDeviceHasFeature(hPrivate, RGX_FEATURE_##Feature##_BIT_MASK)
+
+/*!
+*******************************************************************************
+
+ @Function RGXDeviceHasFeature
+
+ @Description Checks if a device has a particular feature
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64Feature : Feature to check
+
+ @Return IMG_TRUE if the given feature is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetFWCorememSize
+
+ @Description Get the FW coremem size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return FW coremem size
+
+******************************************************************************/
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWriteReg32/64
+
+ @Description Write a value to a 32/64 bit RGX register
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui32/64RegValue : New register value
+
+ @Return void
+
+******************************************************************************/
+void RGXWriteReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue);
+
+void RGXWriteReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue);
+
+/*!
+*******************************************************************************
+
+ @Function RGXReadReg32/64
+
+ @Description Read a 32/64 bit RGX register
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+
+ @Return Register value
+
+******************************************************************************/
+IMG_UINT32 RGXReadReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr);
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXReadModifyWriteReg32
+
+ @Description Read-modify-write a 32 bit RGX register
+
+ @Input hPrivate : Implementation specific data.
+ @Input ui32RegAddr : Register offset inside the register bank.
+ @Input ui32RegValue : New register value.
+ @Input ui32RegMask : Keep the bits set in the mask.
+
+ @Return Always returns PVRSRV_OK
+
+******************************************************************************/
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegKeepMask);
+
+/*!
+*******************************************************************************
+
+ @Function RGXPollReg32/64
+
+ @Description Poll on a 32/64 bit RGX register until some bits are set/unset
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui32/64RegValue : Value expected from the register
+ @Input ui32/64RegMask : Only the bits set in this mask will be
+ checked against uiRegValue
+
+ @Return PVRSRV_OK if the poll succeeds,
+ PVRSRV_ERROR_TIMEOUT if the poll takes too long
+
+******************************************************************************/
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask);
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWaitCycles
+
+ @Description Wait for a number of GPU cycles and/or microseconds
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Cycles : Number of GPU cycles to wait for in pdumps,
+ it can also be used when running driver-live
+ if desired (ignoring the next parameter)
+ @Input ui32WaitUs : Number of microseconds to wait for when running
+ driver-live
+
+ @Return void
+
+******************************************************************************/
+void RGXWaitCycles(const void *hPrivate,
+ IMG_UINT32 ui32Cycles,
+ IMG_UINT32 ui32WaitUs);
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireKernelMMUPC
+
+ @Description Acquire the Kernel MMU Page Catalogue device physical address
+
+ @Input hPrivate : Implementation specific data
+ @Input psPCAddr : Returned page catalog address
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXWriteKernelMMUPC32/64
+
+ @Description Write the Kernel MMU Page Catalogue to the 32/64 bit
+ RGX register passed as argument.
+ In a driver-live scenario without PDump these functions
+ are the same as RGXWriteReg32/64 and they don't need
+ to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32PCReg : Register offset inside the register bank
+ @Input ui32AlignShift : PC register alignshift
+ @Input ui32Shift : PC register shift
+ @Input ui32/64PCVal : Page catalog value (aligned and shifted)
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT64 ui64PCVal);
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT32 ui32PCVal);
+#else /* defined(PDUMP) */
+
+#define RGXWriteKernelMMUPC64(priv, pcreg, alignshift, shift, pcval) \
+ RGXWriteReg64(priv, pcreg, pcval)
+
+#define RGXWriteKernelMMUPC32(priv, pcreg, alignshift, shift, pcval) \
+ RGXWriteReg32(priv, pcreg, pcval)
+
+#endif /* defined(PDUMP) */
+
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireGPURegsAddr
+
+ @Description Acquire the GPU registers base device physical address
+
+ @Input hPrivate : Implementation specific data
+ @Input psGPURegsAddr : Returned GPU registers base address
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXMIPSWrapperConfig
+
+ @Description Write GPU register bank transaction ID and MIPS boot mode
+ to the MIPS wrapper config register (passed as argument).
+ In a driver-live scenario without PDump this is the same as
+ RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32RegAddr : Register offset inside the register bank
+ @Input ui64GPURegsAddr : GPU registers base address
+ @Input ui32GPURegsAlign : Register bank transactions alignment
+ @Input ui32BootMode : Mips BOOT ISA mode
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64GPURegsAddr,
+ IMG_UINT32 ui32GPURegsAlign,
+ IMG_UINT32 ui32BootMode);
+#else
+#define RGXMIPSWrapperConfig(priv, regaddr, gpuregsaddr, gpuregsalign, bootmode) \
+ RGXWriteReg64(priv, regaddr, ((gpuregsaddr) >> (gpuregsalign)) | (bootmode))
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireBootRemapAddr
+
+ @Description Acquire the device physical address of the MIPS bootloader
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psBootRemapAddr : Base address of the remapped bootloader
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXBootRemapConfig
+
+ @Description Configure the bootloader remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXBootRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXBootRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireCodeRemapAddr
+
+ @Description Acquire the device physical address of the MIPS code
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psCodeRemapAddr : Base address of the remapped code
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXCodeRemapConfig
+
+ @Description Configure the code remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXCodeRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXCodeRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireDataRemapAddr
+
+ @Description Acquire the device physical address of the MIPS data
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psDataRemapAddr : Base address of the remapped data
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDataRemapConfig
+
+ @Description Configure the data remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXDataRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXDataRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXAcquireTrampolineRemapAddr
+
+ @Description Acquire the device physical address of the MIPS data
+ accessed through remap region
+
+ @Input hPrivate : Implementation specific data
+ @Output psTrampolineRemapAddr: Base address of the remapped data
+
+ @Return void
+
+******************************************************************************/
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr);
+
+/*!
+*******************************************************************************
+
+ @Function RGXTrampolineRemapConfig
+
+ @Description Configure the trampoline remap registers passed as arguments.
+ In a driver-live scenario without PDump this is the same as
+ two RGXWriteReg64 and it doesn't need to be reimplemented.
+
+ @Input hPrivate : Implementation specific data
+ @Input ui32Config1RegAddr : Remap config1 register offset
+ @Input ui64Config1RegValue : Remap config1 register value
+ @Input ui32Config2RegAddr : Remap config2 register offset
+ @Input ui64Config2PhyAddr : Output remapped aligned physical address
+ @Input ui64Config2PhyMask : Mask for the output physical address
+ @Input ui64Config2Settings : Extra settings for this remap region
+
+ @Return void
+
+******************************************************************************/
+#if defined(PDUMP)
+void RGXTrampolineRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings);
+#else
+#define RGXTrampolineRemapConfig(priv, c1reg, c1val, c2reg, c2phyaddr, c2phymask, c2settings) do { \
+ RGXWriteReg64(priv, c1reg, (c1val)); \
+ RGXWriteReg64(priv, c2reg, ((c2phyaddr) & (c2phymask)) | (c2settings)); \
+ } while (0)
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXDoFWSlaveBoot
+
+ @Description Returns whether or not a FW Slave Boot is required
+ while powering on
+
+ @Input hPrivate : Implementation specific data
+
+ @Return IMG_BOOL
+
+******************************************************************************/
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXFabricCoherencyTest
+
+ @Description Performs a coherency test
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_OK if the test succeeds,
+ PVRSRV_ERROR_INIT_FAILURE if the test fails at some point
+
+******************************************************************************/
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate);
+
+/* This is used to check if a specific ERN/BRN is enabled from hprivate.
+ * Should be used instead of calling RGXDeviceHasErnBrn. */
+#define RGX_DEVICE_HAS_ERN(hPrivate, ERN) \
+ RGXDeviceHasErnBrn(hPrivate, HW_ERN_##ERN##_BIT_MASK)
+
+#define RGX_DEVICE_HAS_BRN(hPrivate, BRN) \
+ RGXDeviceHasErnBrn(hPrivate, FIX_HW_BRN_##BRN##_BIT_MASK)
+
+/*!
+*******************************************************************************
+
+ @Function RGXDeviceHasErnBrn
+
+ @Description Checks if a device has a particular errata
+
+ @Input hPrivate : Implementation specific data
+ @Input ui64ErnsBrns : Flags to check
+
+ @Return IMG_TRUE if the given errata is available, IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceSLCBanks
+
+ @Description Returns the number of SLC banks used by the device
+
+ @Input hPrivate : Implementation specific data
+
+ @Return Number of SLC banks
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceSLCSize
+
+ @Description Returns the device SLC size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return SLC size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDeviceCacheLineSize
+
+ @Description Returns the device cache line size
+
+ @Input hPrivate : Implementation specific data
+
+ @Return Cache line size
+
+******************************************************************************/
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXGetDevicePhysBusWidth
+
+ @Description Returns the device physical bus width
+
+ @Input hPrivate : Implementation specific data
+
+ @Return Physical bus width
+
+******************************************************************************/
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXDevicePA0IsValid
+
+ @Description Returns true if the device physical address 0x0 is a valid
+ address and can be accessed by the GPU.
+
+ @Input hPrivate : Implementation specific data
+
+ @Return IMG_TRUE if device physical address 0x0 is a valid address,
+ IMG_FALSE otherwise
+
+******************************************************************************/
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* !defined (__RGXLAYER_H__) */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.c b/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.c
new file mode 100644
index 00000000000000..53a7892a43630d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.c
@@ -0,0 +1,1186 @@
+/*************************************************************************/ /*!
+@File
+@Title DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "rgxlayer_impl.h"
+#include "osfunc.h"
+#include "pdump_km.h"
+#include "rgxfwutils.h"
+#include "devicemem.h"
+#include "cache_km.h"
+#include "pmr.h"
+
+#if defined (PDUMP)
+#include <stdarg.h>
+#endif
+
+void RGXMemCopy(const void *hPrivate,
+ void *pvDst,
+ void *pvSrc,
+ size_t uiSize)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ OSDeviceMemCopy(pvDst, pvSrc, uiSize);
+}
+
+void RGXMemSet(const void *hPrivate,
+ void *pvDst,
+ IMG_UINT8 ui8Value,
+ size_t uiSize)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ OSDeviceMemSet(pvDst, ui8Value, uiSize);
+}
+
+void RGXCommentLog(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...)
+{
+#if defined(PDUMP)
+ va_list argList;
+ va_start(argList, pszString);
+ PDumpCommentWithFlagsVA(PDUMP_FLAGS_CONTINUOUS, pszString, argList);
+ va_end(argList);
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+#else
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ PVR_UNREFERENCED_PARAMETER(pszString);
+#endif
+}
+
+void RGXErrorLog(const void *hPrivate,
+ const IMG_CHAR *pszString,
+ ...)
+{
+ IMG_CHAR szBuffer[PVR_MAX_DEBUG_MESSAGE_LEN];
+ va_list argList;
+
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+
+ va_start(argList, pszString);
+ vsnprintf(szBuffer, sizeof(szBuffer), pszString, argList);
+ va_end(argList);
+
+ PVR_DPF((PVR_DBG_ERROR, "%s", szBuffer));
+}
+
+IMG_UINT32 RGXGetFWCorememSize(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32CorememSize = 0;
+
+ PVR_ASSERT(hPrivate != NULL);
+
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, META_COREMEM_SIZE))
+ {
+ ui32CorememSize = RGX_GET_FEATURE_VALUE(psDevInfo, META_COREMEM_SIZE);
+ }
+
+ return ui32CorememSize;
+}
+
+void RGXWriteReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+ {
+ OSWriteHWReg32(pvRegsBase, ui32RegAddr, ui32RegValue);
+ }
+
+ PDUMPREG32(RGX_PDUMPREG_NAME, ui32RegAddr, ui32RegValue, psParams->ui32PdumpFlags);
+}
+
+void RGXWriteReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT64 ui64RegValue)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+ {
+ OSWriteHWReg64(pvRegsBase, ui32RegAddr, ui64RegValue);
+ }
+
+ PDUMPREG64(RGX_PDUMPREG_NAME, ui32RegAddr, ui64RegValue, psParams->ui32PdumpFlags);
+}
+
+IMG_UINT32 RGXReadReg32(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+ IMG_UINT32 ui32RegValue;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+ {
+ ui32RegValue = IMG_UINT32_MAX;
+ }
+ else
+#endif
+ {
+ ui32RegValue = OSReadHWReg32(pvRegsBase, ui32RegAddr);
+ }
+
+ PDUMPREGREAD32(RGX_PDUMPREG_NAME, ui32RegAddr, psParams->ui32PdumpFlags);
+
+ return ui32RegValue;
+}
+
+IMG_UINT64 RGXReadReg64(const void *hPrivate, IMG_UINT32 ui32RegAddr)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+ IMG_UINT64 ui64RegValue;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW)
+ {
+ ui64RegValue = IMG_UINT64_MAX;
+ }
+ else
+#endif
+ {
+ ui64RegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+ }
+
+ PDUMPREGREAD64(RGX_PDUMPREG_NAME, ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+ return ui64RegValue;
+}
+
+IMG_UINT32 RGXReadModifyWriteReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 uiRegValueNew,
+ IMG_UINT64 uiRegKeepMask)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+ /* only use the new values for bits we update according to the keep mask */
+ uiRegValueNew &= ~uiRegKeepMask;
+
+#if defined(PDUMP)
+ /* Store register offset to temp PDump variable */
+ PDumpRegRead64ToInternalVar(RGX_PDUMPREG_NAME, ":SYSMEM:$1", ui32RegAddr, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Keep the bits set in the mask */
+ PDumpWriteVarANDValueOp(":SYSMEM:$1", uiRegKeepMask, PDUMP_FLAGS_CONTINUOUS);
+
+ /* OR the new values */
+ PDumpWriteVarORValueOp(":SYSMEM:$1", uiRegValueNew, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the actual register write */
+ PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0);
+
+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+
+ {
+ IMG_UINT64 uiRegValue = OSReadHWReg64(pvRegsBase, ui32RegAddr);
+ uiRegValue &= uiRegKeepMask;
+ OSWriteHWReg64(pvRegsBase, ui32RegAddr, uiRegValue | uiRegValueNew);
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg32(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32 ui32RegValue,
+ IMG_UINT32 ui32RegMask)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+ ui32RegValue,
+ ui32RegMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg32: Poll for Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr,
+ ui32RegValue,
+ ui32RegMask,
+ psParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXPollReg64(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void __iomem *pvRegsBase;
+
+ /* Split lower and upper words */
+ IMG_UINT32 ui32UpperValue = (IMG_UINT32) (ui64RegValue >> 32);
+ IMG_UINT32 ui32LowerValue = (IMG_UINT32) (ui64RegValue);
+ IMG_UINT32 ui32UpperMask = (IMG_UINT32) (ui64RegMask >> 32);
+ IMG_UINT32 ui32LowerMask = (IMG_UINT32) (ui64RegMask);
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+ pvRegsBase = psDevInfo->pvRegsBaseKM;
+
+#if defined(PDUMP)
+ if (!(psParams->ui32PdumpFlags & PDUMP_FLAGS_NOHW))
+#endif
+ {
+ if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr + 4),
+ ui32UpperValue,
+ ui32UpperMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)((IMG_UINT8 __iomem *)pvRegsBase + ui32RegAddr),
+ ui32LowerValue,
+ ui32LowerMask) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPollReg64: Poll for upper part of Reg (0x%x) failed", ui32RegAddr));
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+ }
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr + 4,
+ ui32UpperValue,
+ ui32UpperMask,
+ psParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+
+ PDUMPREGPOL(RGX_PDUMPREG_NAME,
+ ui32RegAddr,
+ ui32LowerValue,
+ ui32LowerMask,
+ psParams->ui32PdumpFlags,
+ PDUMP_POLL_OPERATOR_EQUAL);
+
+ return PVRSRV_OK;
+}
+
+void RGXWaitCycles(const void *hPrivate, IMG_UINT32 ui32Cycles, IMG_UINT32 ui32TimeUs)
+{
+ PVR_UNREFERENCED_PARAMETER(hPrivate);
+ OSWaitus(ui32TimeUs);
+ PDUMPIDLWITHFLAGS(ui32Cycles, PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXAcquireKernelMMUPC(const void *hPrivate, IMG_DEV_PHYADDR *psPCAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psPCAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sPCAddr;
+}
+
+#if defined(PDUMP)
+void RGXWriteKernelMMUPC64(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT64 ui64PCVal)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write the cat-base address */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM, ui32PCReg, ui64PCVal);
+
+ /* Pdump catbase address */
+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+ RGX_PDUMPREG_NAME,
+ ui32PCReg,
+ 8,
+ ui32PCRegAlignShift,
+ ui32PCRegShift,
+ PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXWriteKernelMMUPC32(const void *hPrivate,
+ IMG_UINT32 ui32PCReg,
+ IMG_UINT32 ui32PCRegAlignShift,
+ IMG_UINT32 ui32PCRegShift,
+ IMG_UINT32 ui32PCVal)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write the cat-base address */
+ OSWriteHWReg32(psDevInfo->pvRegsBaseKM, ui32PCReg, ui32PCVal);
+
+ /* Pdump catbase address */
+ MMU_PDumpWritePageCatBase(psDevInfo->psKernelMMUCtx,
+ RGX_PDUMPREG_NAME,
+ ui32PCReg,
+ 4,
+ ui32PCRegAlignShift,
+ ui32PCRegShift,
+ PDUMP_FLAGS_CONTINUOUS);
+}
+#endif /* defined(PDUMP) */
+
+void RGXAcquireGPURegsAddr(const void *hPrivate, IMG_DEV_PHYADDR *psGPURegsAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psGPURegsAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sGPURegAddr;
+}
+
+#if defined(PDUMP)
+void RGXMIPSWrapperConfig(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64GPURegsAddr,
+ IMG_UINT32 ui32GPURegsAlign,
+ IMG_UINT32 ui32BootMode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+ ui32RegAddr,
+ (ui64GPURegsAddr >> ui32GPURegsAlign) | ui32BootMode);
+
+ /* Store register offset to temp PDump variable */
+ PDumpRegLabelToInternalVar(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+
+ /* Align register transactions identifier */
+ PDumpWriteVarSHRValueOp(":SYSMEM:$1", ui32GPURegsAlign, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Enable micromips instruction encoding */
+ PDumpWriteVarORValueOp(":SYSMEM:$1", ui32BootMode, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the actual register write */
+ PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", 0);
+}
+#endif
+
+void RGXAcquireBootRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psBootRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psBootRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sBootRemapAddr;
+}
+
+void RGXAcquireCodeRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psCodeRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psCodeRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sCodeRemapAddr;
+}
+
+void RGXAcquireDataRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psDataRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psDataRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sDataRemapAddr;
+}
+
+void RGXAcquireTrampolineRemapAddr(const void *hPrivate, IMG_DEV_PHYADDR *psTrampolineRemapAddr)
+{
+ PVR_ASSERT(hPrivate != NULL);
+ *psTrampolineRemapAddr = ((RGX_LAYER_PARAMS*)hPrivate)->sTrampolineRemapAddr;
+}
+
+#if defined(PDUMP)
+static inline
+void RGXWriteRemapConfig2Reg(void __iomem *pvRegs,
+ PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64PhyAddr,
+ IMG_UINT64 ui64PhyMask,
+ IMG_UINT64 ui64Settings)
+{
+ OSWriteHWReg64(pvRegs, ui32RegAddr, (ui64PhyAddr & ui64PhyMask) | ui64Settings);
+
+ /* Store memory offset to temp PDump variable */
+ PDumpMemLabelToInternalVar64(":SYSMEM:$1", psPMR, uiLogicalOffset, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Keep only the relevant bits of the output physical address */
+ PDumpWriteVarANDValueOp(":SYSMEM:$1", ui64PhyMask, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Extra settings for this remapped region */
+ PDumpWriteVarORValueOp(":SYSMEM:$1", ui64Settings, PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the actual register write */
+ PDumpInternalVarToReg64(RGX_PDUMPREG_NAME, ui32RegAddr, ":SYSMEM:$1", PDUMP_FLAGS_CONTINUOUS);
+}
+
+void RGXBootRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32BootRemapMemOffset = RGXMIPSFW_BOOT_NMI_CODE_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32BootRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+
+void RGXCodeRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32CodeRemapMemOffset = RGXMIPSFW_EXCEPTIONSVECTORS_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWCodeMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWCodeMemDesc->uiOffset + ui32CodeRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+
+void RGXDataRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 ui32DataRemapMemOffset = RGXMIPSFW_BOOT_NMI_DATA_BASE_PAGE * (IMG_UINT32)RGXMIPSFW_PAGE_SIZE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* Write remap config1 register */
+ RGXWriteReg64(hPrivate,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Write remap config2 register */
+ RGXWriteRemapConfig2Reg(psDevInfo->pvRegsBaseKM,
+ psDevInfo->psRGXFWDataMemDesc->psImport->hPMR,
+ psDevInfo->psRGXFWDataMemDesc->uiOffset + ui32DataRemapMemOffset,
+ ui32Config2RegAddr,
+ ui64Config2PhyAddr,
+ ui64Config2PhyMask,
+ ui64Config2Settings);
+}
+
+void RGXTrampolineRemapConfig(const void *hPrivate,
+ IMG_UINT32 ui32Config1RegAddr,
+ IMG_UINT64 ui64Config1RegValue,
+ IMG_UINT32 ui32Config2RegAddr,
+ IMG_UINT64 ui64Config2PhyAddr,
+ IMG_UINT64 ui64Config2PhyMask,
+ IMG_UINT64 ui64Config2Settings)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ /* write the register for real, without PDump */
+ OSWriteHWReg64(psDevInfo->pvRegsBaseKM,
+ ui32Config1RegAddr,
+ ui64Config1RegValue);
+
+ /* Store the memory address in a PDump variable */
+ PDumpPhysHandleToInternalVar64(":SYSMEM:$1",
+ psDevInfo->psTrampoline->hPdumpPages,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Keep only the relevant bits of the input physical address */
+ PDumpWriteVarANDValueOp(":SYSMEM:$1",
+ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_BASE_ADDR_IN_CLRMSK,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Enable bit */
+ PDumpWriteVarORValueOp(":SYSMEM:$1",
+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* Do the PDump register write */
+ PDumpInternalVarToReg64(RGX_PDUMPREG_NAME,
+ ui32Config1RegAddr,
+ ":SYSMEM:$1",
+ PDUMP_FLAGS_CONTINUOUS);
+
+ /* this can be written directly */
+ RGXWriteReg64(hPrivate,
+ ui32Config2RegAddr,
+ (ui64Config2PhyAddr & ui64Config2PhyMask) | ui64Config2Settings);
+}
+#endif
+
+#define MAX_NUM_COHERENCY_TESTS (10)
+IMG_BOOL RGXDoFWSlaveBoot(const void *hPrivate)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ if (psDevInfo->ui32CoherencyTestsDone >= MAX_NUM_COHERENCY_TESTS)
+ {
+ return IMG_FALSE;
+ }
+
+ psDevConfig = ((RGX_LAYER_PARAMS*)hPrivate)->psDevConfig;
+
+ return PVRSRVSystemSnoopingOfCPUCache(psDevConfig);
+}
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Issue a Write */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+ return eError;
+}
+
+/*
+ * The fabric coherency test is performed when platform supports fabric coherency
+ * either in the form of ACE-lite or Full-ACE. This test is done quite early
+ * with the firmware processor quiescent and makes exclusive use of the slave
+ * port interface for reading/writing through the device memory hierarchy. The
+ * rationale for the test is to ensure that what the CPU writes to its dcache
+ * is visible to the GPU via coherency snoop miss/hit and vice-versa without
+ * any intervening cache maintenance by the writing agent.
+ */
+PVRSRV_ERROR RGXFabricCoherencyTest(const void *hPrivate)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ IMG_UINT32 *pui32FabricCohTestBufferCpuVA;
+ DEVMEM_MEMDESC *psFabricCohTestBufferMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFabricCohTestBufferDevVA;
+ IMG_DEVMEM_SIZE_T uiFabricCohTestBlockSize = sizeof(IMG_UINT64);
+ IMG_DEVMEM_ALIGN_T uiFabricCohTestBlockAlign = sizeof(IMG_UINT64);
+ IMG_UINT64 ui64SegOutAddrTopCached = 0;
+ IMG_UINT64 ui64SegOutAddrTopUncached = 0;
+ IMG_UINT32 ui32SLCCTRL = 0;
+ IMG_UINT32 ui32OddEven;
+ IMG_BOOL bFeatureS7;
+ IMG_UINT32 ui32TestType;
+ IMG_UINT32 ui32OddEvenSeed = 1;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bFullTestPassed = IMG_TRUE;
+ IMG_BOOL bSubTestPassed = IMG_FALSE;
+ IMG_BOOL bExit = IMG_FALSE;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psDevInfo = ((RGX_LAYER_PARAMS*)hPrivate)->psDevInfo;
+
+ PVR_LOG(("Starting fabric coherency test ....."));
+
+ bFeatureS7 = RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE);
+
+ if (bFeatureS7)
+ {
+ if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+ {
+ ui64SegOutAddrTopCached = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_CACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+ ui64SegOutAddrTopUncached = RGXFW_SEGMMU_OUTADDR_TOP_S7_SLC_UNCACHED_ERN_45914(META_MMU_CONTEXT_MAPPING);
+ }
+
+ /* Configure META to use SLC force-linefill for the bootloader segment */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+ (ui64SegOutAddrTopUncached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+ }
+ else
+ {
+ /* Bypass the SLC when IO coherency is enabled */
+ ui32SLCCTRL = RGXReadReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_SLC_CTRL_BYPASS,
+ ui32SLCCTRL | RGX_CR_SLC_CTRL_BYPASS_BYP_CC_EN);
+ }
+
+ /* Size and align are 'expanded' because we request an export align allocation */
+ DevmemExportalignAdjustSizeAndAlign(DevmemGetHeapLog2PageSize(psDevInfo->psFirmwareMainHeap),
+ &uiFabricCohTestBlockSize,
+ &uiFabricCohTestBlockAlign);
+
+ /* Allocate, acquire cpu address and set firmware address */
+ eError = DevmemFwAllocateExportable(psDevInfo->psDeviceNode,
+ uiFabricCohTestBlockSize,
+ uiFabricCohTestBlockAlign,
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_COHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwExFabricCoherencyTestBuffer",
+ &psFabricCohTestBufferMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "DevmemFwAllocateExportable() error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ goto e0;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psFabricCohTestBufferMemDesc, (void **) &pui32FabricCohTestBufferCpuVA);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "DevmemAcquireCpuVirtAddr() error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ goto e0;
+ }
+
+ /* Create a FW address which is uncached in the Meta DCache and in the SLC
+ * using the Meta bootloader segment.
+ * This segment is the only one configured correctly out of reset
+ * (when this test is meant to be executed).
+ */
+ RGXSetFirmwareAddress(&sFabricCohTestBufferDevVA,
+ psFabricCohTestBufferMemDesc,
+ 0,
+ RFW_FWADDR_FLAG_NONE);
+
+ /* Undo most of the FW mappings done by RGXSetFirmwareAddress */
+ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_META_CACHE_MASK;
+ sFabricCohTestBufferDevVA.ui32Addr &= ~RGXFW_SEGMMU_DATA_VIVT_SLC_CACHE_MASK;
+ sFabricCohTestBufferDevVA.ui32Addr -= RGXFW_SEGMMU_DATA_BASE_ADDRESS;
+
+ /* Map the buffer in the bootloader segment as uncached */
+ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_BOOTLDR_META_ADDR;
+ sFabricCohTestBufferDevVA.ui32Addr |= RGXFW_SEGMMU_DATA_META_UNCACHED;
+
+ for (ui32TestType = 0; ui32TestType < 4 && bExit == IMG_FALSE; ui32TestType++)
+ {
+ IMG_CPU_PHYADDR sCpuPhyAddr;
+ IMG_BOOL bValid;
+ PMR *psPMR;
+
+ /* Acquire underlying PMR CpuPA in preparation for cache maintenance */
+ (void) DevmemLocalGetImportHandle(psFabricCohTestBufferMemDesc, (void**)&psPMR);
+ eError = PMR_CpuPhysAddr(psPMR, OSGetPageShift(), 1, 0, &sCpuPhyAddr, &bValid);
+ if (eError != PVRSRV_OK || bValid != IMG_TRUE)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PMR_CpuPhysAddr error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ bExit = IMG_TRUE;
+ continue;
+ }
+
+ /* Here we do two passes [runs] mostly to account for the effects of using
+ the different seed (i.e. ui32OddEvenSeed) value to read and write */
+ for (ui32OddEven = 1; ui32OddEven < 3 && bExit == IMG_FALSE; ui32OddEven++)
+ {
+ IMG_UINT32 i;
+
+#if defined(DEBUG)
+ switch(ui32TestType)
+ {
+ case 0:
+ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+ break;
+ case 1:
+ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: starting [run #%u]", ui32OddEven));
+ break;
+ case 2:
+ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven));
+ break;
+ case 3:
+ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: starting [run #%u]", ui32OddEven));
+ break;
+ default:
+ PVR_LOG(("Internal error, exiting test"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ bExit = IMG_TRUE;
+ continue;
+ break;
+ }
+#endif
+
+ for (i = 0; i < 2 && bExit == IMG_FALSE; i++)
+ {
+ IMG_UINT32 ui32FWAddr;
+ IMG_UINT32 ui32FWValue;
+ IMG_UINT32 ui32FWValue2;
+ IMG_CPU_PHYADDR sCpuPhyAddrStart;
+ IMG_CPU_PHYADDR sCpuPhyAddrEnd;
+ IMG_UINT32 ui32LastFWValue = ~0;
+ IMG_UINT32 ui32Offset = i * sizeof(IMG_UINT32);
+
+ /* Calculate next address and seed value to write/read from slave-port */
+ ui32FWAddr = sFabricCohTestBufferDevVA.ui32Addr + ui32Offset;
+ sCpuPhyAddrStart.uiAddr = sCpuPhyAddr.uiAddr + ui32Offset;
+ sCpuPhyAddrEnd.uiAddr = sCpuPhyAddrStart.uiAddr;
+ ui32OddEvenSeed += 1;
+
+ if (ui32TestType & 0x1)
+ {
+ ui32FWValue = i + ui32OddEvenSeed;
+
+ switch(ui32TestType)
+ {
+ case 1:
+ case 3:
+ /* Clean dcache to ensure there is no stale data in dcache that might over-write
+ what we are about to write via slave-port here because if it drains from the CPU
+ dcache before we read it, it would corrupt what we are going to read back via
+ the CPU */
+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+ CacheOpExec(psDevInfo->psDeviceNode,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+ sCpuPhyAddrStart,
+ sCpuPhyAddrEnd,
+ PVRSRV_CACHE_OP_CLEAN);
+ break;
+ }
+
+ /* Write the value using the RGX slave-port interface */
+ eError = RGXWriteMETAAddr(psDevInfo, ui32FWAddr, ui32FWValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXWriteMETAAddr error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ bExit = IMG_TRUE;
+ continue;
+ }
+
+ /* Read back value using RGX slave-port interface, this is used
+ as a sort of memory barrier for the above write */
+ eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue2);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXReadMETAAddr error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ bExit = IMG_TRUE;
+ continue;
+ }
+ else if (ui32FWValue != ui32FWValue2)
+ {
+ /* Fatal error, we should abort */
+ PVR_DPF((PVR_DBG_ERROR,
+ "At Offset: %d, RAW via SlavePort failed: expected: %x, got: %x",
+ i,
+ ui32FWValue,
+ ui32FWValue2));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ bExit = IMG_TRUE;
+ continue;
+ }
+
+ if (! PVRSRVSystemSnoopingOfDeviceCache(psDevInfo->psDeviceNode->psDevConfig))
+ {
+ /* Invalidate dcache to ensure that any prefetched data by the CPU from this memory
+ region is discarded before we read (i.e. next read must trigger a cache miss).
+ If there is snooping of device cache, then any prefetching done by the CPU
+ will reflect the most up to date datum writing by GPU into said location,
+ that is to say prefetching must be coherent so CPU d-flush is not needed */
+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+ CacheOpExec(psDevInfo->psDeviceNode,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+ sCpuPhyAddrStart,
+ sCpuPhyAddrEnd,
+ PVRSRV_CACHE_OP_INVALIDATE);
+ }
+ }
+ else
+ {
+ IMG_UINT32 ui32RAWCpuValue;
+
+ /* Ensures line is in dcache */
+ ui32FWValue = pui32FabricCohTestBufferCpuVA[i];
+ ui32FWValue = ~0;
+
+ /* Dirty allocation in dcache */
+ ui32RAWCpuValue = i + ui32OddEvenSeed;
+ pui32FabricCohTestBufferCpuVA[i] = i + ui32OddEvenSeed;
+
+ /* Flush possible cpu store-buffer(ing) on LMA */
+ OSWriteMemoryBarrier();
+
+ switch(ui32TestType)
+ {
+ case 0:
+ /* Flush dcache to force subsequent incoming CPU-bound snoop to miss so
+ memory is coherent before the SlavePort reads */
+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+ CacheOpExec(psDevInfo->psDeviceNode,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+ sCpuPhyAddrStart,
+ sCpuPhyAddrEnd,
+ PVRSRV_CACHE_OP_FLUSH);
+ break;
+ }
+
+ /* Read back value using RGX slave-port interface */
+ eError = RGXReadMETAAddr(psDevInfo, ui32FWAddr, &ui32FWValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXReadWithSP error: %s, exiting",
+ PVRSRVGetErrorStringKM(eError)));
+ bExit = IMG_TRUE;
+ continue;
+ }
+
+ /* We are being mostly paranoid here, just to account for CPU RAW operations */
+ sCpuPhyAddrEnd.uiAddr += sizeof(IMG_UINT32);
+ CacheOpExec(psDevInfo->psDeviceNode,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset,
+ (IMG_CHAR *)pui32FabricCohTestBufferCpuVA + ui32Offset + sizeof(IMG_UINT32),
+ sCpuPhyAddrStart,
+ sCpuPhyAddrEnd,
+ PVRSRV_CACHE_OP_FLUSH);
+ if (pui32FabricCohTestBufferCpuVA[i] != ui32RAWCpuValue)
+ {
+ /* Fatal error, we should abort */
+ PVR_DPF((PVR_DBG_ERROR,
+ "At Offset: %d, RAW by CPU failed: expected: %x, got: %x",
+ i,
+ ui32RAWCpuValue,
+ pui32FabricCohTestBufferCpuVA[i]));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ bExit = IMG_TRUE;
+ continue;
+ }
+ }
+
+ /* Compare to see if sub-test passed */
+ if (pui32FabricCohTestBufferCpuVA[i] == ui32FWValue)
+ {
+ bSubTestPassed = IMG_TRUE;
+ }
+ else
+ {
+ bSubTestPassed = IMG_FALSE;
+ bFullTestPassed = IMG_FALSE;
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ if (ui32LastFWValue != ui32FWValue)
+ {
+#if defined(DEBUG)
+ PVR_LOG(("At Offset: %d, Expected: %x, Got: %x",
+ i,
+ (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i],
+ (ui32TestType & 0x1) ? pui32FabricCohTestBufferCpuVA[i] : ui32FWValue));
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "test encountered unexpected error, exiting"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ bExit = IMG_TRUE;
+ continue;
+ }
+ }
+
+ ui32LastFWValue = (ui32TestType & 0x1) ? ui32FWValue : pui32FabricCohTestBufferCpuVA[i];
+ }
+
+#if defined(DEBUG)
+ if (bExit)
+ {
+ continue;
+ }
+
+ switch(ui32TestType)
+ {
+ case 0:
+ PVR_LOG(("CPU:Write/GPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+ break;
+ case 1:
+ PVR_LOG(("GPU:Write/CPU:Read Snoop Miss Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+ break;
+ case 2:
+ PVR_LOG(("CPU:Write/GPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+ break;
+ case 3:
+ PVR_LOG(("GPU:Write/CPU:Read Snoop Hit Test: completed [run #%u]: %s", ui32OddEven, bSubTestPassed ? "PASSED" : "FAILED"));
+ break;
+ default:
+ PVR_LOG(("Internal error, exiting test"));
+ bExit = IMG_TRUE;
+ continue;
+ break;
+ }
+#endif
+ }
+ }
+
+ RGXUnsetFirmwareAddress(psFabricCohTestBufferMemDesc);
+ DevmemReleaseCpuVirtAddr(psFabricCohTestBufferMemDesc);
+ DevmemFwFree(psDevInfo, psFabricCohTestBufferMemDesc);
+
+ e0:
+ if (bFeatureS7)
+ {
+ /* Restore bootloader segment settings */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_MMCU_SEGMENTn_OUTA1(6),
+ (ui64SegOutAddrTopCached | RGXFW_BOOTLDR_DEVV_ADDR) >> 32);
+ }
+ else
+ {
+ /* Restore SLC bypass settings */
+ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_BYPASS, ui32SLCCTRL);
+ }
+
+ bFullTestPassed = bExit ? IMG_FALSE: bFullTestPassed;
+ if (bFullTestPassed)
+ {
+ PVR_LOG(("fabric coherency test: PASSED"));
+ psDevInfo->ui32CoherencyTestsDone = MAX_NUM_COHERENCY_TESTS + 1;
+ }
+ else
+ {
+ PVR_LOG(("fabric coherency test: FAILED"));
+ psDevInfo->ui32CoherencyTestsDone++;
+ }
+
+ return eError;
+}
+
+IMG_BOOL RGXDeviceHasFeature(const void *hPrivate, IMG_UINT64 ui64Feature)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ return (psDevInfo->sDevFeatureCfg.ui64Features & ui64Feature) != 0;
+}
+
+IMG_BOOL RGXDeviceHasErnBrn(const void *hPrivate, IMG_UINT64 ui64ErnsBrns)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ return (psDevInfo->sDevFeatureCfg.ui64ErnsBrns & ui64ErnsBrns) != 0;
+}
+
+IMG_UINT32 RGXGetDeviceSLCBanks(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_BANKS))
+ {
+ return 0;
+ }
+ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_BANKS);
+}
+
+IMG_UINT32 RGXGetDeviceSLCSize(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_SIZE_IN_BYTES))
+ {
+ return 0;
+ }
+ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_SIZE_IN_BYTES);
+}
+
+IMG_UINT32 RGXGetDeviceCacheLineSize(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SLC_CACHE_LINE_SIZE_BITS))
+ {
+ return 0;
+ }
+ return RGX_GET_FEATURE_VALUE(psDevInfo, SLC_CACHE_LINE_SIZE_BITS);
+}
+
+IMG_UINT32 RGXGetDevicePhysBusWidth(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ if (!RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+ {
+ return 0;
+ }
+ return RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH);
+}
+
+IMG_BOOL RGXDevicePA0IsValid(const void *hPrivate)
+{
+ RGX_LAYER_PARAMS *psParams;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_ASSERT(hPrivate != NULL);
+ psParams = (RGX_LAYER_PARAMS*)hPrivate;
+ psDevInfo = psParams->psDevInfo;
+
+ return psDevInfo->sLayerParams.bDevicePA0IsValid;
+}
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.h b/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.h
new file mode 100644
index 00000000000000..a14e1ab23d8adb
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxlayer_impl.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title Header for DDK implementation of the Services abstraction layer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for DDK implementation of the Services abstraction layer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__RGXLAYER_IMPL_H__)
+#define __RGXLAYER_IMPL_H__
+
+#include "rgxlayer.h"
+#include "device_connection.h"
+
+typedef struct _RGX_LAYER_PARAMS_
+{
+ void *psDevInfo;
+ void *psDevConfig;
+#if defined(PDUMP)
+ IMG_UINT32 ui32PdumpFlags;
+#endif
+
+ IMG_DEV_PHYADDR sPCAddr;
+ IMG_DEV_PHYADDR sGPURegAddr;
+ IMG_DEV_PHYADDR sBootRemapAddr;
+ IMG_DEV_PHYADDR sCodeRemapAddr;
+ IMG_DEV_PHYADDR sDataRemapAddr;
+ IMG_DEV_PHYADDR sTrampolineRemapAddr;
+ IMG_BOOL bDevicePA0IsValid;
+} RGX_LAYER_PARAMS;
+
+#endif /* !defined (__RGXLAYER_IMPL_H__) */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmem.c b/drivers/gpu/drm/img-rogue/1.10/rgxmem.c
new file mode 100644
index 00000000000000..fe7c090af61d98
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmem.c
@@ -0,0 +1,721 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX memory context management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX memory context management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvr_debug.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_server_utils.h"
+#include "devicemem_pdump.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pdump_physmem.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgx_memallocflags.h"
+#include "rgx_bvnc_defs_km.h"
+/*
+ FIXME:
+ For now just get global state, but what we really want is to do
+ this per memory context
+*/
+static IMG_UINT32 gui32CacheOpps;
+/* FIXME: End */
+
+typedef struct _SERVER_MMU_CONTEXT_ {
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ MMU_CONTEXT *psMMUContext;
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ DLLIST_NODE sNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+} SERVER_MMU_CONTEXT;
+
+
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eMMULevel,
+ IMG_BOOL bUnmap)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(bUnmap);
+
+ switch (eMMULevel)
+ {
+ case MMU_LEVEL_3: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PC;
+ break;
+ case MMU_LEVEL_2: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PD;
+ break;
+ case MMU_LEVEL_1: gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_PT;
+ if(!(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT)))
+ {
+ gui32CacheOpps |= RGXFWIF_MMUCACHEDATA_FLAGS_TLB;
+ }
+ break;
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+}
+
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT16 *pui16MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt)
+{
+ PVRSRV_ERROR eError;
+
+ if (!gui32CacheOpps)
+ {
+ /* if CacheOps aren't dirty, do nothing */
+ return PVRSRV_OK;
+ }
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to acquire powerlock (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto RGXMMUCacheInvalidateKick_exit;
+ }
+
+ /* Ensure device is powered up before sending any commands */
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON,
+ IMG_FALSE);
+ PDUMPPOWCMDEND();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: failed to transition RGX to ON (%s)",
+ __func__, PVRSRVGetErrorStringKM(eError)));
+ goto _PVRSRVSetDevicePowerStateKM_Exit;
+ }
+
+ eError = RGXPreKickCacheCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ pui16MMUInvalidateUpdate,
+ bInterrupt);
+_PVRSRVSetDevicePowerStateKM_Exit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+RGXMMUCacheInvalidateKick_exit:
+ return eError;
+}
+
+/* Caller should ensure that power lock is held before calling this function */
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ IMG_UINT16 *pui16MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psDevInfo->psDeviceNode;
+ RGXFWIF_KCCB_CMD sFlushCmd;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32CacheOps = gui32CacheOpps; /* Shadow copy global cache ops to
+ avoid working on (possible)
+ changing cache ops requests */
+
+ if (!ui32CacheOps)
+ {
+ return PVRSRV_OK;
+ }
+
+ *pui16MMUInvalidateUpdate = psDeviceNode->ui16NextMMUInvalidateUpdate;
+
+ /* Setup cmd and add the device nodes sync object */
+ sFlushCmd.eCmdType = RGXFWIF_KCCB_CMD_MMUCACHE;
+ sFlushCmd.uCmdData.sMMUCacheData.ui16MMUCacheSyncUpdateValue = psDeviceNode->ui16NextMMUInvalidateUpdate;
+ SyncPrimGetFirmwareAddr(psDeviceNode->psMMUCacheSyncPrim,
+ &sFlushCmd.uCmdData.sMMUCacheData.sMMUCacheSync.ui32Addr);
+
+ /* Set the update value for the next kick */
+ psDeviceNode->ui16NextMMUInvalidateUpdate++;
+
+ sFlushCmd.uCmdData.sMMUCacheData.ui32Flags =
+ ui32CacheOps |
+ /* Set which memory context this command is for (all ctxs for now) */
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, SLC_VIVT) ? RGXFWIF_MMUCACHEDATA_FLAGS_CTX_ALL : 0) |
+ (bInterrupt ? RGXFWIF_MMUCACHEDATA_FLAGS_INTERRUPT : 0);
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Submit MMU flush and invalidate (flags = 0x%08x)",
+ sFlushCmd.uCmdData.sMMUCacheData.ui32Flags);
+#endif
+
+ /* Mark in the global cache ops that we just scheduled cache ops specified in ui32CacheOps */
+ gui32CacheOpps ^= ui32CacheOps;
+
+ /* Schedule MMU cache command */
+ eError = RGXSendCommand(psDevInfo,
+ eDM,
+ &sFlushCmd,
+ sizeof(RGXFWIF_KCCB_CMD),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPreKickCacheCommand: Failed to schedule MMU "
+ "cache command to DM=%d with error (%u)", eDM, eError));
+ }
+
+ return eError;
+}
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+/* page fault debug is the only current use case for needing to find process info
+ * after that process device memory context has been destroyed
+ */
+
+typedef struct _UNREGISTERED_MEMORY_CONTEXT_
+{
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ IMG_DEV_PHYADDR sPCDevPAddr;
+} UNREGISTERED_MEMORY_CONTEXT;
+
+/* must be a power of two */
+#define UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE (1 << 3)
+
+static UNREGISTERED_MEMORY_CONTEXT gasUnregisteredMemCtxs[UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE];
+static IMG_UINT32 gui32UnregisteredMemCtxsHead;
+
+/* record a device memory context being unregistered.
+ * the list of unregistered contexts can be used to find the PID and process name
+ * belonging to a memory context which has been destroyed
+ */
+static void _RecordUnregisteredMemoryContext(PVRSRV_RGXDEV_INFO *psDevInfo, SERVER_MMU_CONTEXT *psServerMMUContext)
+{
+ UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ psRecord = &gasUnregisteredMemCtxs[gui32UnregisteredMemCtxsHead];
+
+ gui32UnregisteredMemCtxsHead = (gui32UnregisteredMemCtxsHead + 1)
+ & (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1);
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ psRecord->uiPID = psServerMMUContext->uiPID;
+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &psRecord->sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("_RecordUnregisteredMemoryContext: Failed to get PC address for memory context"));
+ }
+ OSStringLCopy(psRecord->szProcessName, psServerMMUContext->szProcessName, sizeof(psRecord->szProcessName));
+}
+
+#endif
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData)
+{
+ SERVER_MMU_CONTEXT *psServerMMUContext = hPrivData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psServerMMUContext->psDevInfo;
+
+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+ dllist_remove_node(&psServerMMUContext->sNode);
+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ _RecordUnregisteredMemoryContext(psDevInfo, psServerMMUContext);
+#endif
+
+ /*
+ * Release the page catalogue address acquired in RGXRegisterMemoryContext().
+ */
+ MMU_ReleaseBaseAddr(NULL /* FIXME */);
+
+ /*
+ * Free the firmware memory context.
+ */
+ DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+
+ OSFreeMem(psServerMMUContext);
+}
+
+
+/*
+ * RGXRegisterMemoryContext
+ */
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_FLAGS_T uiFWMemContextMemAllocFlags;
+ RGXFWIF_FWMEMCONTEXT *psFWMemContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ SERVER_MMU_CONTEXT *psServerMMUContext;
+
+ if (psDevInfo->psKernelMMUCtx == NULL)
+ {
+ /*
+ * This must be the creation of the Kernel memory context. Take a copy
+ * of the MMU context for use when programming the BIF.
+ */
+ psDevInfo->psKernelMMUCtx = psMMUContext;
+ }
+ else
+ {
+ psServerMMUContext = OSAllocMem(sizeof(*psServerMMUContext));
+ if (psServerMMUContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_server_ctx;
+ }
+
+ psServerMMUContext->psDevInfo = psDevInfo;
+
+ /*
+ * This FW MemContext is only mapped into kernel for initialisation purposes.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at any kick)
+ */
+ uiFWMemContextMemAllocFlags = PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE;
+
+ /*
+ Allocate device memory for the firmware memory context for the new
+ application.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware memory context");
+ /* FIXME: why cache-consistent? */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWMemContext),
+ uiFWMemContextMemAllocFlags,
+ "FwMemoryContext",
+ &psFWMemContextMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to allocate firmware memory context (%u)",
+ eError));
+ goto fail_alloc_fw_ctx;
+ }
+
+ /*
+ Temporarily map the firmware memory context to the kernel.
+ */
+ eError = DevmemAcquireCpuVirtAddr(psFWMemContextMemDesc,
+ (void **)&psFWMemContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to map firmware memory context (%u)",
+ eError));
+ goto fail_acquire_cpu_addr;
+ }
+
+ /*
+ * Write the new memory context's page catalogue into the firmware memory
+ * context for the client.
+ */
+ eError = MMU_AcquireBaseAddr(psMMUContext, &psFWMemContext->sPCDevPAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_acquire_base_addr;
+ }
+
+ /*
+ * Set default values for the rest of the structure.
+ */
+ psFWMemContext->uiPageCatBaseRegID = -1;
+ psFWMemContext->uiBreakpointAddr = 0;
+ psFWMemContext->uiBPHandlerAddr = 0;
+ psFWMemContext->uiBreakpointCtl = 0;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProt;
+
+ MMU_GetOSids(psMMUContext, &ui32OSid, &ui32OSidReg, &bOSidAxiProt);
+
+ psFWMemContext->ui32OSid = ui32OSidReg;
+ psFWMemContext->bOSidAxiProt = bOSidAxiProt;
+}
+#endif
+
+#if defined(PDUMP)
+ {
+ IMG_CHAR aszName[PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH];
+ IMG_DEVMEM_OFFSET_T uiOffset = 0;
+
+ /*
+ * Dump the Mem context allocation
+ */
+ DevmemPDumpLoadMem(psFWMemContextMemDesc, 0, sizeof(*psFWMemContext), PDUMP_FLAGS_CONTINUOUS);
+
+
+ /*
+ * Obtain a symbolic addr of the mem context structure
+ */
+ eError = DevmemPDumpPageCatBaseToSAddr(psFWMemContextMemDesc,
+ &uiOffset,
+ aszName,
+ PHYSMEM_PDUMP_MEMSPNAME_SYMB_ADDR_MAX_LENGTH);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to generate a Dump Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_pdump_cat_base_addr;
+ }
+
+ /*
+ * Dump the Page Cat tag in the mem context (symbolic address)
+ */
+ eError = MMU_PDumpWritePageCatBase(psMMUContext,
+ aszName,
+ uiOffset,
+ 8, /* 64-bit register write */
+ 0,
+ 0,
+ 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXRegisterMemoryContext: Failed to acquire Page Catalogue address (%u)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+ goto fail_pdump_cat_base;
+ }
+ }
+#endif
+
+ /*
+ * Release kernel address acquired above.
+ */
+ DevmemReleaseCpuVirtAddr(psFWMemContextMemDesc);
+
+ /*
+ * Store the process information for this device memory context
+ * for use with the host page-fault analysis.
+ */
+ psServerMMUContext->uiPID = OSGetCurrentClientProcessIDKM();
+ psServerMMUContext->psMMUContext = psMMUContext;
+ psServerMMUContext->psFWMemContextMemDesc = psFWMemContextMemDesc;
+ if (OSSNPrintf(psServerMMUContext->szProcessName,
+ RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME,
+ "%s",
+ OSGetCurrentClientProcessNameKM()) == RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME)
+ {
+ psServerMMUContext->szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME-1] = '\0';
+ }
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "New memory context: Process Name: %s PID: %u (0x%08X)",
+ psServerMMUContext->szProcessName,
+ psServerMMUContext->uiPID,
+ psServerMMUContext->uiPID);
+
+ OSWRLockAcquireWrite(psDevInfo->hMemoryCtxListLock);
+ dllist_add_to_tail(&psDevInfo->sMemoryContextList, &psServerMMUContext->sNode);
+ OSWRLockReleaseWrite(psDevInfo->hMemoryCtxListLock);
+
+ MMU_SetDeviceData(psMMUContext, psFWMemContextMemDesc);
+ *hPrivData = psServerMMUContext;
+ }
+
+ return PVRSRV_OK;
+
+#if defined(PDUMP)
+fail_pdump_cat_base:
+fail_pdump_cat_base_addr:
+ MMU_ReleaseBaseAddr(NULL);
+#endif
+fail_acquire_base_addr:
+ /* Done before jumping to the fail point as the release is done before exit */
+fail_acquire_cpu_addr:
+ DevmemFwFree(psDevInfo, psServerMMUContext->psFWMemContextMemDesc);
+fail_alloc_fw_ctx:
+ OSFreeMem(psServerMMUContext);
+fail_alloc_server_ctx:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv)
+{
+ SERVER_MMU_CONTEXT *psMMUContext = (SERVER_MMU_CONTEXT *) hPriv;
+
+ return psMMUContext->psFWMemContextMemDesc;
+}
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ MMU_FAULT_DATA *psOutFaultData)
+{
+ IMG_DEV_PHYADDR sPCDevPAddr;
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hMemoryCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psServerMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+ if (MMU_AcquireBaseAddr(psServerMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for memory context"));
+ continue;
+ }
+
+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+ {
+ PVR_DUMPDEBUG_LOG("Found memory context (PID = %d, %s)",
+ psServerMMUContext->uiPID,
+ psServerMMUContext->szProcessName);
+
+ MMU_CheckFaultAddress(psServerMMUContext->psMMUContext, psDevVAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile, psOutFaultData);
+ goto out_unlock;
+ }
+ }
+
+ /* Lastly check for fault in the kernel allocated memory */
+ if (MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for kernel memory context"));
+ }
+
+ if (psDevPAddr->uiAddr == sPCDevPAddr.uiAddr)
+ {
+ MMU_CheckFaultAddress(psDevInfo->psKernelMMUCtx, psDevVAddr,
+ pfnDumpDebugPrintf, pvDumpDebugFile, psOutFaultData);
+ }
+
+out_unlock:
+ OSWRLockReleaseRead(psDevInfo->hMemoryCtxListLock);
+}
+
+/* given the physical address of a page catalogue, searches for a corresponding
+ * MMU context and if found, provides the caller details of the process.
+ * Returns IMG_TRUE if a process is found.
+ */
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+ RGXMEM_PROCESS_INFO *psInfo)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ DLLIST_NODE *psNode, *psNext;
+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+ /* check if the input PC addr corresponds to an active memory context */
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psThisMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+ IMG_DEV_PHYADDR sPCDevPAddr;
+
+ if (MMU_AcquireBaseAddr(psThisMMUContext->psMMUContext, &sPCDevPAddr) != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for memory context"));
+ continue;
+ }
+
+ if (sPCAddress.uiAddr == sPCDevPAddr.uiAddr)
+ {
+ psServerMMUContext = psThisMMUContext;
+ break;
+ }
+ }
+
+ if(psServerMMUContext != NULL)
+ {
+ psInfo->uiPID = psServerMMUContext->uiPID;
+ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ /* else check if the input PC addr corresponds to the firmware */
+ else
+ {
+ IMG_DEV_PHYADDR sKernelPCDevPAddr;
+ PVRSRV_ERROR eError;
+
+ eError = MMU_AcquireBaseAddr(psDevInfo->psKernelMMUCtx, &sKernelPCDevPAddr);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_LOG(("Failed to get PC address for kernel memory context"));
+ }
+ else
+ {
+ if(sPCAddress.uiAddr == sKernelPCDevPAddr.uiAddr)
+ {
+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ }
+ }
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ if(bRet == IMG_FALSE)
+ {
+ /* no active memory context found with the given PC address.
+ * Check the list of most recently freed memory contexts.
+ */
+ IMG_UINT32 i;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ /* iterate through the list of unregistered memory contexts
+ * from newest (one before the head) to the oldest (the current head)
+ */
+ i = gui32UnregisteredMemCtxsHead;
+
+ do
+ {
+ UNREGISTERED_MEMORY_CONTEXT *psRecord;
+
+ i ? i-- : (i = (UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE - 1));
+
+ psRecord = &gasUnregisteredMemCtxs[i];
+
+ if(psRecord->sPCDevPAddr.uiAddr == sPCAddress.uiAddr)
+ {
+ psInfo->uiPID = psRecord->uiPID;
+ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_TRUE;
+ bRet = IMG_TRUE;
+ break;
+ }
+ } while(i != gui32UnregisteredMemCtxsHead);
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ }
+#endif
+ return bRet;
+}
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+ RGXMEM_PROCESS_INFO *psInfo)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ DLLIST_NODE *psNode, *psNext;
+ SERVER_MMU_CONTEXT *psServerMMUContext = NULL;
+
+ /* check if the input PID corresponds to an active memory context */
+ dllist_foreach_node(&psDevInfo->sMemoryContextList, psNode, psNext)
+ {
+ SERVER_MMU_CONTEXT *psThisMMUContext =
+ IMG_CONTAINER_OF(psNode, SERVER_MMU_CONTEXT, sNode);
+
+ if (psThisMMUContext->uiPID == uiPID)
+ {
+ psServerMMUContext = psThisMMUContext;
+ break;
+ }
+ }
+
+ if(psServerMMUContext != NULL)
+ {
+ psInfo->uiPID = psServerMMUContext->uiPID;
+ OSStringLCopy(psInfo->szProcessName, psServerMMUContext->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+ /* else check if the input PID corresponds to the firmware */
+ else if(uiPID == RGXMEM_SERVER_PID_FIRMWARE)
+ {
+ psInfo->uiPID = RGXMEM_SERVER_PID_FIRMWARE;
+ OSStringLCopy(psInfo->szProcessName, "Firmware", sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_FALSE;
+ bRet = IMG_TRUE;
+ }
+#if defined(SUPPORT_PAGE_FAULT_DEBUG)
+ /* if the PID didn't correspond to an active context or the
+ * FW address then see if it matches a recently unregistered context
+ */
+ if(bRet == IMG_FALSE)
+ {
+ IMG_UINT32 i;
+
+ OSLockAcquire(psDevInfo->hMMUCtxUnregLock);
+
+ for(i = (gui32UnregisteredMemCtxsHead > 0) ? (gui32UnregisteredMemCtxsHead - 1) :
+ UNREGISTERED_MEMORY_CONTEXTS_HISTORY_SIZE;
+ i != gui32UnregisteredMemCtxsHead; i--)
+ {
+ UNREGISTERED_MEMORY_CONTEXT *psRecord = &gasUnregisteredMemCtxs[i];
+
+ if(psRecord->uiPID == uiPID)
+ {
+ psInfo->uiPID = psRecord->uiPID;
+ OSStringLCopy(psInfo->szProcessName, psRecord->szProcessName, sizeof(psInfo->szProcessName));
+ psInfo->bUnregistered = IMG_TRUE;
+ bRet = IMG_TRUE;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hMMUCtxUnregLock);
+
+ }
+#endif
+ return bRet;
+}
+
+/******************************************************************************
+ End of file (rgxmem.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmem.h b/drivers/gpu/drm/img-rogue/1.10/rgxmem.h
new file mode 100644
index 00000000000000..3eeacd087df2ec
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmem.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX memory context management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for RGX memory context management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXMEM_H__)
+#define __RGXMEM_H__
+
+#include "pvrsrv_error.h"
+#include "device.h"
+#include "mmu_common.h"
+#include "rgxdevice.h"
+
+#define RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME 16
+
+/* this PID denotes the firmware */
+#define RGXMEM_SERVER_PID_FIRMWARE 0xFFFFFFFF
+
+/* this PID denotes the PM */
+#define RGXMEM_SERVER_PID_PM 0xEFFFFFFF
+
+typedef struct _RGXMEM_PROCESS_INFO_
+{
+ IMG_PID uiPID;
+ IMG_CHAR szProcessName[RGXMEM_SERVER_MMU_CONTEXT_MAX_NAME];
+ IMG_BOOL bUnregistered;
+} RGXMEM_PROCESS_INFO;
+
+IMG_DEV_PHYADDR GetPC(MMU_CONTEXT * psContext);
+
+/* FIXME: SyncPrim should be stored on the memory context */
+void RGXMMUSyncPrimAlloc(PVRSRV_DEVICE_NODE *psDeviceNode);
+void RGXMMUSyncPrimFree(void);
+
+void RGXMMUCacheInvalidate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hDeviceData,
+ MMU_LEVEL eMMULevel,
+ IMG_BOOL bUnmap);
+
+/*************************************************************************/ /*!
+@Function RGXMMUCacheInvalidateKick
+
+@Description Sends a flush command to a particular DM but first takes
+ the power lock.
+
+@Input psDevInfo Device Info
+@Input pui16NextMMUInvalidateUpdate
+@Input bInterrupt Should the firmware signal command completion to
+ the host
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXMMUCacheInvalidateKick(PVRSRV_DEVICE_NODE *psDevInfo,
+ IMG_UINT16 *pui16NextMMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+/*************************************************************************/ /*!
+@Function RGXPreKickCacheCommand
+
+@Description Sends a cache flush command to a particular DM without
+ honouring the power lock. It's caller's responsibility
+ to ensure power lock is held before calling this function.
+
+@Input psDevInfo Device Info
+@Input eDM To which DM the cmd is sent.
+@Input pui16MMUInvalidateUpdate
+@Input bInterrupt Should the firmware signal command completion to
+ the host
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+PVRSRV_ERROR RGXPreKickCacheCommand(PVRSRV_RGXDEV_INFO *psDevInfo,
+ RGXFWIF_DM eDM,
+ IMG_UINT16 *pui16MMUInvalidateUpdate,
+ IMG_BOOL bInterrupt);
+
+void RGXUnregisterMemoryContext(IMG_HANDLE hPrivData);
+PVRSRV_ERROR RGXRegisterMemoryContext(PVRSRV_DEVICE_NODE *psDeviceNode,
+ MMU_CONTEXT *psMMUContext,
+ IMG_HANDLE *hPrivData);
+
+DEVMEM_MEMDESC *RGXGetFWMemDescFromMemoryContextHandle(IMG_HANDLE hPriv);
+
+void RGXCheckFaultAddress(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_DEV_VIRTADDR *psDevVAddr,
+ IMG_DEV_PHYADDR *psDevPAddr,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile,
+ MMU_FAULT_DATA *psOutFaultData);
+
+IMG_BOOL RGXPCAddrToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_DEV_PHYADDR sPCAddress,
+ RGXMEM_PROCESS_INFO *psInfo);
+
+IMG_BOOL RGXPCPIDToProcessInfo(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_PID uiPID,
+ RGXMEM_PROCESS_INFO *psInfo);
+
+#endif /* __RGXMEM_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.c b/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.c
new file mode 100644
index 00000000000000..430afb62ffdf39
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.c
@@ -0,0 +1,940 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxmipsmmuinit.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+#include "rgxheapconfig.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "pdump_km.h"
+#include "rgxdevice.h"
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+/* Currently there is no page directory for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PDE_PROTMASK 0
+/* Currently there is no page catalog for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PCE_PROTMASK 0
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ * Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ * Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+ the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+/* Cached policy */
+static IMG_UINT32 gui32CachedPolicy;
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_BOOL bPhysBusAbove32Bit = 0;
+
+ if (RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, PHYS_BUS_WIDTH))
+ {
+ bPhysBusAbove32Bit = RGX_GET_FEATURE_VALUE(psDevInfo, PHYS_BUS_WIDTH) > 32;
+ }
+
+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL]);
+
+ /*
+ * Setup sRGXMMUPCEConfig, no PC in MIPS MMU currently
+ */
+ sRGXMMUPCEConfig.uiBytesPerEntry = 0; /* 32 bit entries */
+ sRGXMMUPCEConfig.uiAddrMask = 0; /* Mask to get significant address bits of PC entry */
+
+ sRGXMMUPCEConfig.uiAddrShift = 0; /* Shift this many bits to get PD address in PC entry */
+ sRGXMMUPCEConfig.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE; /* Alignment of PD AND PC */
+
+ sRGXMMUPCEConfig.uiProtMask = RGX_MIPS_MMUCTRL_PCE_PROTMASK; //Mask to get the status bits of the PC */
+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to have status bits starting with bit 0 */
+
+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to have entry valid bit starting with bit 0 */
+
+ /*
+ * Setup sRGXMMUTopLevelDevVAddrConfig
+ */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = 0; /* Get the PC address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = 0;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = 0;
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = 0; /* Get the PD address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = 0;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = 0;
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexMask = IMG_UINT64_C(0xfffffff000); /* Get the PT address bits from a 40 bit virt. address (in a 64bit UINT) */
+ sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPT = RGX_FIRMWARE_RAW_HEAP_SIZE >> sRGXMMUTopLevelDevVAddrConfig.uiPTIndexShift;
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_4KBDP. No PD in MIPS MMU currently
+ */
+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 0;
+
+ /* No PD used for MIPS */
+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x0);
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MIPS_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_4KBDP.
+ */
+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 1 << RGXMIPSFW_LOG2_PTE_ENTRY_SIZE;
+
+
+ if (bPhysBusAbove32Bit)
+ {
+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK_ABOVE_32BIT;
+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY_ABOVE_32BIT;
+ }
+ else
+ {
+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = RGXMIPSFW_ENTRYLO_PFN_MASK;
+ gui32CachedPolicy = RGXMIPSFW_CACHED_POLICY;
+ }
+
+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = RGXMIPSFW_ENTRYLO_PFN_SHIFT;
+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = (IMG_UINT32)RGXMIPSFW_LOG2_PAGE_SIZE;
+
+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGXMIPSFW_ENTRYLO_DVG | ~RGXMIPSFW_ENTRYLO_CACHE_POLICY_CLRMSK;
+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGXMIPSFW_ENTRYLO_VALID_EN;
+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGXMIPSFW_ENTRYLO_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_4KBDP
+ */
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = 0;
+
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = RGX_FIRMWARE_RAW_HEAP_SIZE >> sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift;
+
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = RGX_FIRMWARE_RAW_HEAP_BASE & IMG_UINT64_C(0x00ffffffff);
+
+ /*
+ * Setup gsPageSizeConfig4KB
+ */
+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+ gsPageSizeConfig4KB.uiRefCount = 0;
+ gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_16KBDP
+ */
+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 0; /* These are for a page directory ENTRY, meaning the address of a PT cropped to suit the PD */
+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the page tables NOT directories */
+
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_16KBDP. Not supported yet
+ */
+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 0; /* These are for a page table ENTRY, meaning the address of a PAGE cropped to suit the PD */
+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 0; /* Alignment of the pages NOT tables */
+
+ sRGXMMUPTEConfig_16KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_16KBDP
+ */
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD= 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig16KB
+ */
+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+ gsPageSizeConfig16KB.uiRefCount = 0;
+ gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size. Not supported yet
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_64KBDP
+ */
+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_64KBDP.
+ *
+ */
+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_64KBDP.
+ */
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig64KB.
+ */
+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+ gsPageSizeConfig64KB.uiRefCount = 0;
+ gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size. Not supported yet
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_256KBDP
+ */
+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+ */
+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_256KBDP
+ */
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig256KB
+ */
+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+ gsPageSizeConfig256KB.uiRefCount = 0;
+ gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_1MBDP. Not supported yet
+ */
+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_1MBDP
+ */
+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_1MBDP
+ */
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig1MB
+ */
+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+ gsPageSizeConfig1MB.uiRefCount = 0;
+ gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_2MBDP. Not supported yet
+ */
+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 0;
+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiProtMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = 0;
+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUPTEConfig_2MBDP
+ */
+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 0;
+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiProtMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = 0;
+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = 0;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_2MBDP
+ */
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = 0;
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig2MB
+ */
+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+ gsPageSizeConfig2MB.uiRefCount = 0;
+ gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUDeviceAttributes
+ */
+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_MIPS_MICROAPTIV;
+ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_1;
+ /* The page table fits in one big physical page as big as the page table itself */
+ sRGXMMUDeviceAttributes.ui32BaseAlign = RGXMIPSFW_LOG2_PAGETABLE_PAGE_SIZE;
+ /* The base configuration is set to 4kB pages*/
+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPTEConfig_4KBDP;
+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+ /* Functions for deriving page table/dir/cat protection bits */
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+ on per-heap basis */
+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+ psDeviceNode->psFirmwareMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+ psDeviceNode->psFirmwareMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+ gsPageSizeConfig4KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+ gsPageSizeConfig4KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+ gsPageSizeConfig16KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+ gsPageSizeConfig16KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+ gsPageSizeConfig64KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+ gsPageSizeConfig64KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+ gsPageSizeConfig256KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+ gsPageSizeConfig256KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+ gsPageSizeConfig1MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+ gsPageSizeConfig1MB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+ gsPageSizeConfig2MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+ gsPageSizeConfig2MB.uiRefCount));
+#endif
+ if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+ gsPageSizeConfig16KB.uiRefCount > 0 ||
+ gsPageSizeConfig64KB.uiRefCount > 0 ||
+ gsPageSizeConfig256KB.uiRefCount > 0 ||
+ gsPageSizeConfig1MB.uiRefCount > 0 ||
+ gsPageSizeConfig2MB.uiRefCount > 0
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt4
+@Description calculate the PCE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt8
+@Description calculate the PCE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "Page Catalog not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt4
+@Description derive the PDE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt8
+@Description derive the PDE protection flags based on an 8 byte entry
+
+@Input uiLog2DataPageSize The log2 of the required page size.
+ E.g, for 4KiB pages, this parameter must be 12.
+ For 2MiB pages, it must be set to 21.
+
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "Page Directory not supported on MIPS MMU"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt4
+@Description calculate the PTE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+ IMG_UINT32 ui32MMUFlags = 0;
+
+ if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+ {
+ /* read/write */
+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_DIRTY_EN;
+ }
+ else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+ {
+ /* read only */
+ }
+ else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+ {
+ /* write only */
+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_READ_INHIBIT_EN;
+ }
+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: neither read nor write specified..."));
+ }
+
+ /* cache coherency */
+ if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt4: cache coherency not supported for MIPS caches"));
+ }
+
+ /* cache setup */
+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+ {
+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_UNCACHED;
+ }
+ else
+ {
+ ui32MMUFlags |= gui32CachedPolicy <<
+ RGXMIPSFW_ENTRYLO_CACHE_POLICY_SHIFT;
+ }
+
+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+ {
+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_VALID_EN;
+ ui32MMUFlags |= RGXMIPSFW_ENTRYLO_GLOBAL_EN;
+ }
+
+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+ {
+ /* PVR_DPF((PVR_DBG_WARNING, "RGXDerivePTEProt4: PMMETA Protect not existent for MIPS, option discarded")); */
+ }
+
+ return ui32MMUFlags;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt8
+@Description calculate the PTE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "8-byte PTE not supported on this device"));
+
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXGetPageSizeConfig
+@Description Set up configuration for variable sized data pages.
+ RGXPutPageSizeConfigCB has to be called to ensure correct
+ refcounting.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+ /* FW will use 4k and GPU 64k */
+ case RGXMIPSFW_LOG2_PAGE_SIZE:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Refer caller's pointers to the data */
+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ /* Increment ref-count - not that we're allocating anything here
+ (I'm using static structs), but one day we might, so we want
+ the Get/Put code to be balanced properly */
+ psPageSizeConfig->uiRefCount ++;
+
+ /* This is purely for debug statistics */
+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+ psPageSizeConfig->uiRefCount);
+#endif
+
+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXPutPageSizeConfig
+@Description Tells this code that the mmu module is done with the
+ configurations set in RGXGetPageSizeConfig. This can
+ be a no-op.
+ Called after RGXGetPageSizeConfigCB.
+@Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+ IMG_UINT32 uiLog2DataPageSize;
+
+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGXMIPSFW_LOG2_PAGE_SIZE_64K:
+ /* FW will use 4k and GPU 64k */
+ case RGXMIPSFW_LOG2_PAGE_SIZE:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Ref-count here is not especially useful, but it's an extra
+ check that the API is being used correctly */
+ psPageSizeConfig->uiRefCount --;
+#else
+ PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui64PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "PDE not supported on MIPS"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.h b/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.h
new file mode 100644
index 00000000000000..8d59c189b3e8ac
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmipsmmuinit.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation for the MIPS firmware
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily. It exists solely
+ for the linkage between rgxinit.c and rgxmmuinit.c, the former
+ being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMIPSMMUINIT_H_
+#define _SRVKM_RGXMIPSMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+#include "rgx_mips.h"
+
+/*
+
+ Labelling of fields within virtual address. No PD and PC are used currently for
+ the MIPS MMU
+*/
+/*
+Page Table entry #
+*/
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_SHIFT (12U)
+#define RGX_MIPS_MMUCTRL_VADDR_PT_INDEX_CLRMSK (IMG_UINT64_C(0XFFFFFFFF00000FFF))
+
+
+/* PC entries related definitions */
+/* No PC is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_EN (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_VALID_CLRMSK (0U)
+
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_CLRMSK (0U)
+#define RGX_MIPS_MMUCTRL_PC_DATA_READ_ONLY_EN (0U)
+
+/* PD entries related definitions */
+/* No PD is currently used for MIPS MMU */
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_EN (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_VALID_CLRMSK (0U)
+
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_SHIFT (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_CLRMSK (0U)
+#define RGX_MIPS_MMUCTRL_PD_DATA_READ_ONLY_EN (0U)
+
+
+PVRSRV_ERROR RGXMipsMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMipsMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMIPSMMUINIT_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.c b/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.c
new file mode 100644
index 00000000000000..0e7dd464bb0db0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.c
@@ -0,0 +1,1076 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+#include "rgxmmuinit.h"
+#include "rgxmmudefs_km.h"
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "pdump_mmu.h"
+
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "rgx_memallocflags.h"
+#include "rgx_heaps.h"
+#include "pdump_km.h"
+
+
+/* useful macros */
+/* units represented in a bitfield */
+#define UNITS_IN_BITFIELD(Mask, Shift) ((Mask >> Shift) + 1)
+
+
+/*
+ * Bits of PT, PD and PC not involving addresses
+ */
+
+#define RGX_MMUCTRL_PTE_PROTMASK (RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN | \
+ RGX_MMUCTRL_PT_DATA_ENTRY_PENDING_EN | \
+ RGX_MMUCTRL_PT_DATA_PM_SRC_EN | \
+ RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN | \
+ RGX_MMUCTRL_PT_DATA_CC_EN | \
+ RGX_MMUCTRL_PT_DATA_READ_ONLY_EN | \
+ RGX_MMUCTRL_PT_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PDE_PROTMASK (RGX_MMUCTRL_PD_DATA_ENTRY_PENDING_EN | \
+ ~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK | \
+ RGX_MMUCTRL_PD_DATA_VALID_EN)
+
+#define RGX_MMUCTRL_PCE_PROTMASK (RGX_MMUCTRL_PC_DATA_ENTRY_PENDING_EN | \
+ RGX_MMUCTRL_PC_DATA_VALID_EN)
+
+
+
+static MMU_PxE_CONFIG sRGXMMUPCEConfig;
+static MMU_DEVVADDR_CONFIG sRGXMMUTopLevelDevVAddrConfig;
+
+
+/*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_4KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_4KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_4KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig4KB;
+
+
+/*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_16KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_16KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_16KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig16KB;
+
+
+/*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_64KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_64KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_64KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig64KB;
+
+
+/*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_256KBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_256KBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig256KB;
+
+
+/*
+ *
+ * Configuration for heaps with 1MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_1MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_1MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_1MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig1MB;
+
+
+/*
+ *
+ * Configuration for heaps with 2MB Data-Page size
+ *
+ */
+
+static MMU_PxE_CONFIG sRGXMMUPDEConfig_2MBDP;
+static MMU_PxE_CONFIG sRGXMMUPTEConfig_2MBDP;
+static MMU_DEVVADDR_CONFIG sRGXMMUDevVAddrConfig_2MBDP;
+static MMU_PAGESIZECONFIG gsPageSizeConfig2MB;
+
+
+/* Forward declaration of protection bits derivation functions, for
+ the following structure */
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags);
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize);
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags);
+
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv);
+
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv);
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize);
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize);
+
+static MMU_DEVICEATTRIBS sRGXMMUDeviceAttributes;
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ /* Setup of Px Entries:
+ *
+ *
+ * PAGE TABLE (8 Byte):
+ *
+ * | 62 | 61...40 | 39...12 (varies) | 11...6 | 5 | 4 | 3 | 2 | 1 | 0 |
+ * | PM/Meta protect | VP Page (39:18) | Physical Page | VP Page (17:12) | Entry Pending | PM src | SLC Bypass Ctrl | Cache Coherency | Read Only | Valid |
+ *
+ *
+ * PAGE DIRECTORY (8 Byte):
+ *
+ * | 40 | 39...5 (varies) | 4 | 3...1 | 0 |
+ * | Entry Pending | Page Table base address | (reserved) | Page Size | Valid |
+ *
+ *
+ * PAGE CATALOGUE (4 Byte):
+ *
+ * | 31...4 | 3...2 | 1 | 0 |
+ * | Page Directory base address | (reserved) | Entry Pending | Valid |
+ *
+ */
+
+
+ /* Example how to get the PD address from a PC entry.
+ * The procedure is the same for PD and PT entries to retrieve PT and Page addresses:
+ *
+ * 1) sRGXMMUPCEConfig.uiAddrMask applied to PC entry with '&':
+ * | 31...4 | 3...2 | 1 | 0 |
+ * | PD Addr | 0 | 0 | 0 |
+ *
+ * 2) sRGXMMUPCEConfig.uiAddrShift applied with '>>':
+ * | 27...0 |
+ * | PD Addr |
+ *
+ * 3) sRGXMMUPCEConfig.uiAddrLog2Align applied with '<<':
+ * | 39...0 |
+ * | PD Addr |
+ *
+ */
+
+
+ sRGXMMUDeviceAttributes.pszMMUPxPDumpMemSpaceName =
+ PhysHeapPDumpMemspaceName(psDeviceNode->apsPhysHeap[PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL]);
+
+ /*
+ * Setup sRGXMMUPCEConfig
+ */
+ sRGXMMUPCEConfig.uiBytesPerEntry = 4; /* 32 bit entries */
+ sRGXMMUPCEConfig.uiAddrMask = 0xfffffff0; /* Mask to get significant address bits of PC entry i.e. the address of the PD */
+
+ sRGXMMUPCEConfig.uiAddrShift = 4; /* Shift this many bits to get PD address */
+ sRGXMMUPCEConfig.uiAddrLog2Align = 12; /* Alignment of PD physical addresses. */
+
+ sRGXMMUPCEConfig.uiProtMask = RGX_MMUCTRL_PCE_PROTMASK; /* Mask to get the status bits (pending | valid)*/
+ sRGXMMUPCEConfig.uiProtShift = 0; /* Shift this many bits to get the statis bits */
+
+ sRGXMMUPCEConfig.uiValidEnMask = RGX_MMUCTRL_PC_DATA_VALID_EN; /* Mask to get entry valid bit of the PC */
+ sRGXMMUPCEConfig.uiValidEnShift = RGX_MMUCTRL_PC_DATA_VALID_SHIFT; /* Shift this many bits to get entry valid bit */
+
+ /*
+ * Setup sRGXMMUTopLevelDevVAddrConfig
+ */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK; /* Mask to get PC index applied to a 40 bit virt. device address */
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PC index */
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPCIndexMask,
+ sRGXMMUTopLevelDevVAddrConfig.uiPCIndexShift));
+
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK; /* Mask to get PD index applied to a 40 bit virt. device address */
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT; /* Shift a 40 bit virt. device address by this amount to get the PD index */
+ sRGXMMUTopLevelDevVAddrConfig.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUTopLevelDevVAddrConfig.uiPDIndexMask,
+ sRGXMMUTopLevelDevVAddrConfig.uiPDIndexShift));
+
+ /*
+ *
+ * Configuration for heaps with 4kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_4KBDP
+ */
+ sRGXMMUPDEConfig_4KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_4KBDP.uiAddrShift = 12;
+ sRGXMMUPDEConfig_4KBDP.uiAddrLog2Align = 12;
+
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_4KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_4KBDP
+ */
+ sRGXMMUPTEConfig_4KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_4KBDP.uiAddrMask = IMG_UINT64_C(0xfffffff000);
+ sRGXMMUPTEConfig_4KBDP.uiAddrShift = 12;
+ sRGXMMUPTEConfig_4KBDP.uiAddrLog2Align = 12; /* Alignment of the physical addresses of the pages NOT PTs */
+
+ sRGXMMUPTEConfig_4KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_4KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_4KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_4KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_4KBDP
+ */
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPCIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPDIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask = ~RGX_MMUCTRL_VADDR_PT_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift = RGX_MMUCTRL_VADDR_PT_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_4KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_4KBDP.uiPTIndexShift));
+
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000000fff);
+ sRGXMMUDevVAddrConfig_4KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_4KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig4KB
+ */
+ gsPageSizeConfig4KB.psPDEConfig = &sRGXMMUPDEConfig_4KBDP;
+ gsPageSizeConfig4KB.psPTEConfig = &sRGXMMUPTEConfig_4KBDP;
+ gsPageSizeConfig4KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_4KBDP;
+ gsPageSizeConfig4KB.uiRefCount = 0;
+ gsPageSizeConfig4KB.uiMaxRefCount = 0;
+
+
+ /*
+ *
+ * Configuration for heaps with 16kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_16KBDP
+ */
+ sRGXMMUPDEConfig_16KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_16KBDP.uiAddrShift = 10;
+ sRGXMMUPDEConfig_16KBDP.uiAddrLog2Align = 10;
+
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_16KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_16KBDP
+ */
+ sRGXMMUPTEConfig_16KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_16KBDP.uiAddrMask = IMG_UINT64_C(0xffffffc000);
+ sRGXMMUPTEConfig_16KBDP.uiAddrShift = 14;
+ sRGXMMUPTEConfig_16KBDP.uiAddrLog2Align = 14;
+
+ sRGXMMUPTEConfig_16KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_16KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_16KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_16KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_16KBDP
+ */
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001fc000);
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift = 14;
+ sRGXMMUDevVAddrConfig_16KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_16KBDP.uiPTIndexShift));
+
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetMask = IMG_UINT64_C(0x0000003fff);
+ sRGXMMUDevVAddrConfig_16KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_16KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig16KB
+ */
+ gsPageSizeConfig16KB.psPDEConfig = &sRGXMMUPDEConfig_16KBDP;
+ gsPageSizeConfig16KB.psPTEConfig = &sRGXMMUPTEConfig_16KBDP;
+ gsPageSizeConfig16KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_16KBDP;
+ gsPageSizeConfig16KB.uiRefCount = 0;
+ gsPageSizeConfig16KB.uiMaxRefCount = 0;
+
+
+ /*
+ *
+ * Configuration for heaps with 64kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_64KBDP
+ */
+ sRGXMMUPDEConfig_64KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_64KBDP.uiAddrShift = 8;
+ sRGXMMUPDEConfig_64KBDP.uiAddrLog2Align = 8;
+
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_64KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_64KBDP
+ */
+ sRGXMMUPTEConfig_64KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_64KBDP.uiAddrMask = IMG_UINT64_C(0xffffff0000);
+ sRGXMMUPTEConfig_64KBDP.uiAddrShift =16;
+ sRGXMMUPTEConfig_64KBDP.uiAddrLog2Align = 16;
+
+ sRGXMMUPTEConfig_64KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_64KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_64KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_64KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_64KBDP
+ */
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001f0000);
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift = 16;
+ sRGXMMUDevVAddrConfig_64KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_64KBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000000ffff);
+ sRGXMMUDevVAddrConfig_64KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_64KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig64KB
+ */
+ gsPageSizeConfig64KB.psPDEConfig = &sRGXMMUPDEConfig_64KBDP;
+ gsPageSizeConfig64KB.psPTEConfig = &sRGXMMUPTEConfig_64KBDP;
+ gsPageSizeConfig64KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_64KBDP;
+ gsPageSizeConfig64KB.uiRefCount = 0;
+ gsPageSizeConfig64KB.uiMaxRefCount = 0;
+
+
+ /*
+ *
+ * Configuration for heaps with 256kB Data-Page size
+ *
+ */
+
+ /*
+ * Setup sRGXMMUPDEConfig_256KBDP
+ */
+ sRGXMMUPDEConfig_256KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ sRGXMMUPDEConfig_256KBDP.uiAddrShift = 6;
+ sRGXMMUPDEConfig_256KBDP.uiAddrLog2Align = 6;
+
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_256KBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup MMU_PxE_CONFIG sRGXMMUPTEConfig_256KBDP
+ */
+ sRGXMMUPTEConfig_256KBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_256KBDP.uiAddrMask = IMG_UINT64_C(0xfffffc0000);
+ sRGXMMUPTEConfig_256KBDP.uiAddrShift = 18;
+ sRGXMMUPTEConfig_256KBDP.uiAddrLog2Align = 18;
+
+ sRGXMMUPTEConfig_256KBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_256KBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_256KBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_256KBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_256KBDP
+ */
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask = IMG_UINT64_C(0x00001c0000);
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift = 18;
+ sRGXMMUDevVAddrConfig_256KBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_256KBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetMask = IMG_UINT64_C(0x000003ffff);
+ sRGXMMUDevVAddrConfig_256KBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_256KBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig256KB
+ */
+ gsPageSizeConfig256KB.psPDEConfig = &sRGXMMUPDEConfig_256KBDP;
+ gsPageSizeConfig256KB.psPTEConfig = &sRGXMMUPTEConfig_256KBDP;
+ gsPageSizeConfig256KB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_256KBDP;
+ gsPageSizeConfig256KB.uiRefCount = 0;
+ gsPageSizeConfig256KB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_1MBDP
+ */
+ sRGXMMUPDEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ /*
+ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+ * if they contain fewer entries.
+ */
+ sRGXMMUPDEConfig_1MBDP.uiAddrShift = 6;
+ sRGXMMUPDEConfig_1MBDP.uiAddrLog2Align = 6;
+
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_1MBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_1MBDP
+ */
+ sRGXMMUPTEConfig_1MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_1MBDP.uiAddrMask = IMG_UINT64_C(0xfffff00000);
+ sRGXMMUPTEConfig_1MBDP.uiAddrShift = 20;
+ sRGXMMUPTEConfig_1MBDP.uiAddrLog2Align = 20;
+
+ sRGXMMUPTEConfig_1MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_1MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_1MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_1MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_1MBDP
+ */
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000100000);
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift = 20;
+ sRGXMMUDevVAddrConfig_1MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_1MBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00000fffff);
+ sRGXMMUDevVAddrConfig_1MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_1MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig1MB
+ */
+ gsPageSizeConfig1MB.psPDEConfig = &sRGXMMUPDEConfig_1MBDP;
+ gsPageSizeConfig1MB.psPTEConfig = &sRGXMMUPTEConfig_1MBDP;
+ gsPageSizeConfig1MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_1MBDP;
+ gsPageSizeConfig1MB.uiRefCount = 0;
+ gsPageSizeConfig1MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUPDEConfig_2MBDP
+ */
+ sRGXMMUPDEConfig_2MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPDEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xfffffffff0);
+ /*
+ * The hardware requires that PT tables need be 1<<6 = 64 byte aligned even
+ * if they contain fewer entries.
+ */
+ sRGXMMUPDEConfig_2MBDP.uiAddrShift = 6;
+ sRGXMMUPDEConfig_2MBDP.uiAddrLog2Align = 6;
+
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlMask = IMG_UINT64_C(0x000000000e);
+ sRGXMMUPDEConfig_2MBDP.uiVarCtrlShift = 1;
+
+ sRGXMMUPDEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PDE_PROTMASK;
+ sRGXMMUPDEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPDEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PD_DATA_VALID_EN;
+ sRGXMMUPDEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PD_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUPTEConfig_2MBDP
+ */
+ sRGXMMUPTEConfig_2MBDP.uiBytesPerEntry = 8;
+
+ sRGXMMUPTEConfig_2MBDP.uiAddrMask = IMG_UINT64_C(0xffffe00000);
+ sRGXMMUPTEConfig_2MBDP.uiAddrShift = 21;
+ sRGXMMUPTEConfig_2MBDP.uiAddrLog2Align = 21;
+
+ sRGXMMUPTEConfig_2MBDP.uiProtMask = RGX_MMUCTRL_PTE_PROTMASK;
+ sRGXMMUPTEConfig_2MBDP.uiProtShift = 0;
+
+ sRGXMMUPTEConfig_2MBDP.uiValidEnMask = RGX_MMUCTRL_PT_DATA_VALID_EN;
+ sRGXMMUPTEConfig_2MBDP.uiValidEnShift = RGX_MMUCTRL_PT_DATA_VALID_SHIFT;
+
+ /*
+ * Setup sRGXMMUDevVAddrConfig_2MBDP
+ */
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask = ~RGX_MMUCTRL_VADDR_PC_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift = RGX_MMUCTRL_VADDR_PC_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPC = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPCIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask = ~RGX_MMUCTRL_VADDR_PD_INDEX_CLRMSK;
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift = RGX_MMUCTRL_VADDR_PD_INDEX_SHIFT;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPD = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPDIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask = IMG_UINT64_C(0x0000000000);
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift = 21;
+ sRGXMMUDevVAddrConfig_2MBDP.uiNumEntriesPT = TRUNCATE_64BITS_TO_32BITS(UNITS_IN_BITFIELD(sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexMask,
+ sRGXMMUDevVAddrConfig_2MBDP.uiPTIndexShift));
+
+
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetMask = IMG_UINT64_C(0x00001fffff);
+ sRGXMMUDevVAddrConfig_2MBDP.uiPageOffsetShift = 0;
+ sRGXMMUDevVAddrConfig_2MBDP.uiOffsetInBytes = 0;
+
+ /*
+ * Setup gsPageSizeConfig2MB
+ */
+ gsPageSizeConfig2MB.psPDEConfig = &sRGXMMUPDEConfig_2MBDP;
+ gsPageSizeConfig2MB.psPTEConfig = &sRGXMMUPTEConfig_2MBDP;
+ gsPageSizeConfig2MB.psDevVAddrConfig = &sRGXMMUDevVAddrConfig_2MBDP;
+ gsPageSizeConfig2MB.uiRefCount = 0;
+ gsPageSizeConfig2MB.uiMaxRefCount = 0;
+
+ /*
+ * Setup sRGXMMUDeviceAttributes
+ */
+ sRGXMMUDeviceAttributes.eMMUType = PDUMP_MMU_TYPE_VARPAGE_40BIT;
+ sRGXMMUDeviceAttributes.eTopLevel = MMU_LEVEL_3;
+ sRGXMMUDeviceAttributes.ui32BaseAlign = RGX_MMUCTRL_PC_DATA_PD_BASE_ALIGNSHIFT;
+ sRGXMMUDeviceAttributes.psBaseConfig = &sRGXMMUPCEConfig;
+ sRGXMMUDeviceAttributes.psTopLevelDevVAddrConfig = &sRGXMMUTopLevelDevVAddrConfig;
+
+ /* Functions for deriving page table/dir/cat protection bits */
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt8 = RGXDerivePCEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePCEProt4 = RGXDerivePCEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt8 = RGXDerivePDEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePDEProt4 = RGXDerivePDEProt4;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt8 = RGXDerivePTEProt8;
+ sRGXMMUDeviceAttributes.pfnDerivePTEProt4 = RGXDerivePTEProt4;
+
+ /* Functions for establishing configurations for PDE/PTE/DEVVADDR
+ on per-heap basis */
+ sRGXMMUDeviceAttributes.pfnGetPageSizeConfiguration = RGXGetPageSizeConfigCB;
+ sRGXMMUDeviceAttributes.pfnPutPageSizeConfiguration = RGXPutPageSizeConfigCB;
+
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE4 = RGXGetPageSizeFromPDE4;
+ sRGXMMUDeviceAttributes.pfnGetPageSizeFromPDE8 = RGXGetPageSizeFromPDE8;
+
+ psDeviceNode->psMMUDevAttrs = &sRGXMMUDeviceAttributes;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PVRSRV_OK;
+
+#if defined(PDUMP)
+ psDeviceNode->pfnMMUGetContextID = NULL;
+#endif
+
+ psDeviceNode->psMMUDevAttrs = NULL;
+
+#if defined(DEBUG)
+ PVR_DPF((PVR_DBG_MESSAGE, "Variable Page Size Heap Stats:"));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 4K page heaps: %d",
+ gsPageSizeConfig4KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 4K page heaps (should be 0): %d",
+ gsPageSizeConfig4KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 16K page heaps: %d",
+ gsPageSizeConfig16KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 16K page heaps (should be 0): %d",
+ gsPageSizeConfig16KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 64K page heaps: %d",
+ gsPageSizeConfig64KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 64K page heaps (should be 0): %d",
+ gsPageSizeConfig64KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 256K page heaps: %d",
+ gsPageSizeConfig256KB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 256K page heaps (should be 0): %d",
+ gsPageSizeConfig256KB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 1M page heaps: %d",
+ gsPageSizeConfig1MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 1M page heaps (should be 0): %d",
+ gsPageSizeConfig1MB.uiRefCount));
+ PVR_DPF((PVR_DBG_MESSAGE, "Max 2M page heaps: %d",
+ gsPageSizeConfig2MB.uiMaxRefCount));
+ PVR_DPF((PVR_DBG_VERBOSE, "Current 2M page heaps (should be 0): %d",
+ gsPageSizeConfig2MB.uiRefCount));
+#endif
+ if (gsPageSizeConfig4KB.uiRefCount > 0 ||
+ gsPageSizeConfig16KB.uiRefCount > 0 ||
+ gsPageSizeConfig64KB.uiRefCount > 0 ||
+ gsPageSizeConfig256KB.uiRefCount > 0 ||
+ gsPageSizeConfig1MB.uiRefCount > 0 ||
+ gsPageSizeConfig2MB.uiRefCount > 0
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXMMUInit_Unregister: Unbalanced MMU API Usage (Internal error)"));
+ }
+
+ return eError;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt4
+@Description calculate the PCE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePCEProt4(IMG_UINT32 uiProtFlags)
+{
+ return (uiProtFlags & MMU_PROTFLAGS_INVALID)?0:RGX_MMUCTRL_PC_DATA_VALID_EN;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePCEProt8
+@Description calculate the PCE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePCEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ PVR_DPF((PVR_DBG_ERROR, "8-byte PCE not supported on this device"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt4
+@Description derive the PDE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePDEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+ return 0;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePDEProt8
+@Description derive the PDE protection flags based on an 8 byte entry
+
+@Input uiLog2DataPageSize The log2 of the required page size.
+ E.g, for 4KiB pages, this parameter must be 12.
+ For 2MiB pages, it must be set to 21.
+
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePDEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT64 ret_value = 0; // 0 means invalid
+
+ if (! (uiProtFlags & MMU_PROTFLAGS_INVALID)) // if not invalid
+ {
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ ret_value = RGX_MMUCTRL_PD_DATA_VALID_EN | RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s:%d: in function<%s>: Invalid parameter log2_page_size. Expected {12, 14, 16, 18, 20, 21}. Got [%u]",
+ __FILE__, __LINE__, __FUNCTION__, uiLog2DataPageSize));
+ }
+ }
+ return ret_value;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt4
+@Description calculate the PTE protection flags based on a 4 byte entry
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT32 RGXDerivePTEProt4(IMG_UINT32 uiProtFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(uiProtFlags);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PTE not supported on this device"));
+
+ return 0;
+}
+
+/*************************************************************************/ /*!
+@Function RGXDerivePTEProt8
+@Description calculate the PTE protection flags based on an 8 byte entry
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static IMG_UINT64 RGXDerivePTEProt8(IMG_UINT32 uiProtFlags, IMG_UINT32 uiLog2DataPageSize)
+{
+ IMG_UINT64 ui64MMUFlags=0;
+
+ PVR_UNREFERENCED_PARAMETER(uiLog2DataPageSize);
+
+ if(((MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE) & uiProtFlags) == (MMU_PROTFLAGS_READABLE|MMU_PROTFLAGS_WRITEABLE))
+ {
+ /* read/write */
+ }
+ else if(MMU_PROTFLAGS_READABLE & uiProtFlags)
+ {
+ /* read only */
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_READ_ONLY_EN;
+ }
+ else if(MMU_PROTFLAGS_WRITEABLE & uiProtFlags)
+ {
+ /* write only */
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: write-only is not possible on this device"));
+ }
+ else if ((MMU_PROTFLAGS_INVALID & uiProtFlags) == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXDerivePTEProt8: neither read nor write specified..."));
+ }
+
+ /* cache coherency */
+ if(MMU_PROTFLAGS_CACHE_COHERENT & uiProtFlags)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_CC_EN;
+ }
+
+ /* cache setup */
+ if ((MMU_PROTFLAGS_CACHED & uiProtFlags) == 0)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_SLC_BYPASS_CTRL_EN;
+ }
+
+ if ((uiProtFlags & MMU_PROTFLAGS_INVALID) == 0)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_VALID_EN;
+ }
+
+ if (MMU_PROTFLAGS_DEVICE(PMMETA_PROTECT) & uiProtFlags)
+ {
+ ui64MMUFlags |= RGX_MMUCTRL_PT_DATA_PM_META_PROTECT_EN;
+ }
+
+ return ui64MMUFlags;
+}
+
+
+/*************************************************************************/ /*!
+@Function RGXGetPageSizeConfig
+@Description Set up configuration for variable sized data pages.
+ RGXPutPageSizeConfigCB has to be called to ensure correct
+ refcounting.
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXGetPageSizeConfigCB(IMG_UINT32 uiLog2DataPageSize,
+ const MMU_PxE_CONFIG **ppsMMUPDEConfig,
+ const MMU_PxE_CONFIG **ppsMMUPTEConfig,
+ const MMU_DEVVADDR_CONFIG **ppsMMUDevVAddrConfig,
+ IMG_HANDLE *phPriv)
+{
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGetPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Refer caller's pointers to the data */
+ *ppsMMUPDEConfig = psPageSizeConfig->psPDEConfig;
+ *ppsMMUPTEConfig = psPageSizeConfig->psPTEConfig;
+ *ppsMMUDevVAddrConfig = psPageSizeConfig->psDevVAddrConfig;
+
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ /* Increment ref-count - not that we're allocating anything here
+ (I'm using static structs), but one day we might, so we want
+ the Get/Put code to be balanced properly */
+ psPageSizeConfig->uiRefCount ++;
+
+ /* This is purely for debug statistics */
+ psPageSizeConfig->uiMaxRefCount = MAX(psPageSizeConfig->uiMaxRefCount,
+ psPageSizeConfig->uiRefCount);
+#endif
+
+ *phPriv = (IMG_HANDLE)(uintptr_t)uiLog2DataPageSize;
+ PVR_ASSERT (uiLog2DataPageSize == (IMG_UINT32)(uintptr_t)*phPriv);
+
+ return PVRSRV_OK;
+}
+
+/*************************************************************************/ /*!
+@Function RGXPutPageSizeConfig
+@Description Tells this code that the mmu module is done with the
+ configurations set in RGXGetPageSizeConfig. This can
+ be a no-op.
+ Called after RGXGetPageSizeConfigCB.
+@Return PVRSRV_ERROR
+ */ /**************************************************************************/
+static PVRSRV_ERROR RGXPutPageSizeConfigCB(IMG_HANDLE hPriv)
+{
+#if defined(SUPPORT_MMU_PAGESIZECONFIG_REFCOUNT)
+ MMU_PAGESIZECONFIG *psPageSizeConfig;
+ IMG_UINT32 uiLog2DataPageSize;
+
+ uiLog2DataPageSize = (IMG_UINT32)(uintptr_t) hPriv;
+
+ switch (uiLog2DataPageSize)
+ {
+ case RGX_HEAP_4KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig4KB;
+ break;
+ case RGX_HEAP_16KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig16KB;
+ break;
+ case RGX_HEAP_64KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig64KB;
+ break;
+ case RGX_HEAP_256KB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig256KB;
+ break;
+ case RGX_HEAP_1MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig1MB;
+ break;
+ case RGX_HEAP_2MB_PAGE_SHIFT:
+ psPageSizeConfig = &gsPageSizeConfig2MB;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPutPageSizeConfigCB: Invalid Data Page Size 1<<0x%x",
+ uiLog2DataPageSize));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+
+ /* Ref-count here is not especially useful, but it's an extra
+ check that the API is being used correctly */
+ psPageSizeConfig->uiRefCount --;
+#else
+ PVR_UNREFERENCED_PARAMETER(hPriv);
+#endif
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE4(IMG_UINT32 ui32PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32PDE);
+ PVR_UNREFERENCED_PARAMETER(pui32Log2PageSize);
+ PVR_DPF((PVR_DBG_ERROR, "4-byte PDE not supported on this device"));
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+}
+
+static PVRSRV_ERROR RGXGetPageSizeFromPDE8(IMG_UINT64 ui64PDE, IMG_UINT32 *pui32Log2PageSize)
+{
+ switch (ui64PDE & (~RGX_MMUCTRL_PD_DATA_PAGE_SIZE_CLRMSK))
+ {
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_4KB:
+ *pui32Log2PageSize = RGX_HEAP_4KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_16KB:
+ *pui32Log2PageSize = RGX_HEAP_16KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_64KB:
+ *pui32Log2PageSize = RGX_HEAP_64KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_256KB:
+ *pui32Log2PageSize = RGX_HEAP_256KB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_1MB:
+ *pui32Log2PageSize = RGX_HEAP_1MB_PAGE_SHIFT;
+ break;
+ case RGX_MMUCTRL_PD_DATA_PAGE_SIZE_2MB:
+ *pui32Log2PageSize = RGX_HEAP_2MB_PAGE_SHIFT;
+ break;
+ default:
+ return PVRSRV_ERROR_MMU_INVALID_PAGE_SIZE_FOR_DEVICE;
+ }
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.h b/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.h
new file mode 100644
index 00000000000000..48fd722eaaf66f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxmmuinit.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific MMU initialisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* NB: this file is not to be included arbitrarily. It exists solely
+ for the linkage between rgxinit.c and rgxmmuinit.c, the former
+ being otherwise cluttered by the contents of the latter */
+
+#ifndef _SRVKM_RGXMMUINIT_H_
+#define _SRVKM_RGXMMUINIT_H_
+
+#include "device.h"
+#include "img_types.h"
+#include "mmu_common.h"
+#include "img_defs.h"
+
+PVRSRV_ERROR RGXMMUInit_Register(PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR RGXMMUInit_Unregister(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* #ifndef _SRVKM_RGXMMUINIT_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxpdump.c b/drivers/gpu/drm/img-rogue/1.10/rgxpdump.c
new file mode 100644
index 00000000000000..46077d9650577b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxpdump.c
@@ -0,0 +1,439 @@
+/*************************************************************************/ /*!
+@File rgxpdump.c
+@Title Device specific pdump routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific pdump functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(PDUMP)
+#include "pvrsrv.h"
+#include "devicemem_pdump.h"
+#include "rgxpdump.h"
+#include "rgx_bvnc_defs_km.h"
+#include <pdumpdesc.h>
+
+/*
+ * There are two different set of functions one for META and one for MIPS
+ * because the Pdump player does not implement the support for
+ * the MIPS MMU yet. So for MIPS builds we cannot use DevmemPDumpSaveToFileVirtual,
+ * we have to use DevmemPDumpSaveToFile instead.
+ */
+static PVRSRV_ERROR _MetaDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ /* TA signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigTAChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigTAChecksSize,
+ "out.tasig",
+ 0,
+ ui32PDumpFlags);
+
+ /* 3D signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSig3DChecksMemDesc,
+ 0,
+ psDevInfo->ui32Sig3DChecksSize,
+ "out.3dsig",
+ 0,
+ ui32PDumpFlags);
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ /* RT signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigRTChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigRTChecksSize,
+ "out.rtsig",
+ 0,
+ ui32PDumpFlags);
+ /* SH signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWSigSHChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigSHChecksSize,
+ "out.shsig",
+ 0,
+ ui32PDumpFlags);
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+static PVRSRV_ERROR _MetaDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ /* Dump trace buffers */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+ for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufThreadNumOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+ /* ui32TracePointer tracepointer */
+ ui32Size = sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufThreadNumOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+
+ /* trace buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+ 0, /* 0 offset in the trace buffer mem desc */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+
+ /* assert info buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ offsetof(RGXFWIF_TRACEBUF, sTraceBuf) /* move to first element of sTraceBuf */
+ + ui32ThreadNum * sizeof(RGXFWIF_TRACEBUF_SPACE) /* skip required number of sTraceBuf elements */
+ + offsetof(RGXFWIF_TRACEBUF_SPACE, sAssertBuf), /* offset into its sAssertBuf, to be pdumped */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset,
+ ui32PDumpFlags);
+ ui32OutFileOffset += ui32Size;
+ }
+
+ /* FW HWPerf buffer is always allocated when PDUMP is defined, irrespective of HWPerf events being enabled/disabled */
+ PVR_ASSERT(psDevInfo->psRGXFWIfHWPerfBufMemDesc);
+
+ /* Dump hwperf buffer */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+ DevmemPDumpSaveToFileVirtual(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0,
+ psDevInfo->ui32RGXFWIfHWPerfBufSize,
+ "out.hwperf",
+ 0,
+ ui32PDumpFlags);
+
+ return PVRSRV_OK;
+
+}
+
+
+static PVRSRV_ERROR _MipsDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* TA signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump TA signatures and checksums Buffer");
+
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigTAChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigTAChecksSize,
+ "out.tasig",
+ 0);
+
+ /* 3D signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump 3D signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSig3DChecksMemDesc,
+ 0,
+ psDevInfo->ui32Sig3DChecksSize,
+ "out.3dsig",
+ 0);
+#if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ /* RT signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump RTU signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigRTChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigRTChecksSize,
+ "out.rtsig",
+ 0);
+
+ /* SH signatures */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump SHG signatures and checksums Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWSigSHChecksMemDesc,
+ 0,
+ psDevInfo->ui32SigSHChecksSize,
+ "out.shsig",
+ 0);
+ }
+#endif
+
+ return PVRSRV_OK;
+
+}
+
+static PVRSRV_ERROR _MipsDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ IMG_UINT32 ui32ThreadNum, ui32Size, ui32OutFileOffset;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ /* Dump trace buffers */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump trace buffers");
+ for(ui32ThreadNum = 0, ui32OutFileOffset = 0; ui32ThreadNum < RGXFW_THREAD_NUM; ui32ThreadNum++)
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF *)0)->sTraceBuf[ui32ThreadNum]);
+
+ /* Same again... */
+ const IMG_DEVMEM_OFFSET_T uiTraceBufSpaceAssertBufOff
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_TRACEBUF_SPACE *)0)->sAssertBuf);
+
+ /* ui32TracePointer tracepointer */
+ ui32Size = sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+
+ /* trace buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_SIZE * sizeof(IMG_UINT32);
+ PVR_ASSERT(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum]);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufferMemDesc[ui32ThreadNum],
+ 0, /* 0 offset in the trace buffer mem desc */
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+
+ /* assert info buffer */
+ ui32Size = RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + RGXFW_TRACE_BUFFER_ASSERT_SIZE * sizeof(IMG_CHAR)
+ + sizeof(IMG_UINT32);
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfTraceBufCtlMemDesc,
+ uiTraceBufOff + uiTraceBufSpaceAssertBufOff,
+ ui32Size,
+ "out.trace",
+ ui32OutFileOffset);
+ ui32OutFileOffset += ui32Size;
+ }
+
+ /* Dump hwperf buffer */
+ PDumpCommentWithFlags(ui32PDumpFlags, "** Dump HWPerf Buffer");
+ DevmemPDumpSaveToFile(psDevInfo->psRGXFWIfHWPerfBufMemDesc,
+ 0,
+ psDevInfo->ui32RGXFWIfHWPerfBufSize,
+ "out.hwperf",
+ 0);
+
+ return PVRSRV_OK;
+
+}
+
+
+/*
+ * PVRSRVPDumpSignatureBufferKM
+ */
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ if( (psDeviceNode->pfnCheckDeviceFeature) &&
+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+ {
+ return _MipsDumpSignatureBufferKM(psConnection,
+ psDeviceNode,
+ ui32PDumpFlags);
+ }
+ else
+ {
+ return _MetaDumpSignatureBufferKM(psConnection,
+ psDeviceNode,
+ ui32PDumpFlags);
+ }
+}
+
+
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ if( (psDeviceNode->pfnCheckDeviceFeature) &&
+ PVRSRV_IS_FEATURE_SUPPORTED(psDeviceNode, MIPS))
+ {
+ return _MipsDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+ }else
+ {
+ return _MetaDumpTraceBufferKM(psConnection, psDeviceNode, ui32PDumpFlags);
+ }
+}
+
+PVRSRV_ERROR RGXPDumpOutputImageHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_PBYTE pbyPDumpImageHdr)
+{
+ IMG_PUINT32 pui32Word;
+ IMG_UINT32 ui32HeaderDataSize;
+
+ pui32Word = (IMG_PUINT32) pbyPDumpImageHdr;
+ pui32Word[0] = (IMAGE_HEADER_TYPE << HEADER_WORD0_TYPE_SHIFT);
+ pui32Word[1] = (IMAGE_HEADER_SIZE << HEADER_WORD1_SIZE_SHIFT) |
+ (IMAGE_HEADER_VERSION << HEADER_WORD1_VERSION_SHIFT);
+
+ ui32HeaderDataSize = ui32DataSize;
+ if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+ {
+ ui32HeaderDataSize += ui32HeaderSize;
+ }
+ pui32Word[2] = ui32HeaderDataSize << HEADER_WORD2_DATA_SIZE_SHIFT;
+
+ pui32Word[3] = ui32LogicalWidth << IMAGE_HEADER_WORD3_LOGICAL_WIDTH_SHIFT;
+ pui32Word[4] = ui32LogicalHeight << IMAGE_HEADER_WORD4_LOGICAL_HEIGHT_SHIFT;
+
+ pui32Word[5] = ePixFmt << IMAGE_HEADER_WORD5_FORMAT_SHIFT;
+
+ pui32Word[6] = ui32PhysicalWidth << IMAGE_HEADER_WORD6_PHYSICAL_WIDTH_SHIFT;
+ pui32Word[7] = ui32PhysicalHeight << IMAGE_HEADER_WORD7_PHYSICAL_HEIGHT_SHIFT;
+
+ pui32Word[8] = IMAGE_HEADER_WORD8_STRIDE_POSITIVE | IMAGE_HEADER_WORD8_BIFTYPE_NONE;
+
+ switch (eMemLayout)
+ {
+ case IMG_MEMLAYOUT_STRIDED:
+ pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_STRIDED;
+ break;
+ case IMG_MEMLAYOUT_TWIDDLED:
+ pui32Word[8] |= IMAGE_HEADER_WORD8_TWIDDLING_NTWIDDLE;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "Unsupported memory layout - %d", eMemLayout));
+ return PVRSRV_ERROR_UNSUPPORTED_MEMORY_LAYOUT;
+ }
+
+ pui32Word[9] = 0;
+ if (eFBCompression != IMG_FB_COMPRESSION_NONE)
+ {
+ switch (PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM))
+ {
+ case 1:
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_BASE;
+ break;
+ case 2:
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V2;
+ break;
+ case 3:
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCCOMPAT_V3_REMAP;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "Unsupported algorithm - %d",
+ PVRSRV_GET_DEVICE_FEATURE_VALUE(psDeviceNode, FBCDC_ALGORITHM)));
+ return PVRSRV_ERROR_NOT_ENABLED;
+ }
+ }
+
+ switch (eFBCompression)
+ {
+ case IMG_FB_COMPRESSION_NONE:
+ break;
+ case IMG_FB_COMPRESSION_DIRECT_8x8:
+ pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_8X8;
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+ break;
+ case IMG_FB_COMPRESSION_DIRECT_16x4:
+ pui32Word[8] |= IMAGE_HEADER_WORD8_FBCTYPE_16x4;
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+ break;
+ case IMG_FB_COMPRESSION_DIRECT_32x2:
+ pui32Word[9] |= IMAGE_HEADER_WORD9_FBCDECOR_ENABLE;
+ break;
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "Unsupported compression mode - %d", eFBCompression));
+ return PVRSRV_ERROR_UNSUPPORTED_FB_COMPRESSION_MODE;
+ }
+
+ pui32Word[10] = paui32FBCClearColour[0];
+ pui32Word[11] = paui32FBCClearColour[1];
+ pui32Word[12] = paui32FBCClearColour[2];
+ pui32Word[13] = paui32FBCClearColour[3];
+
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+
+/******************************************************************************
+ End of file (rgxpdump.c)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxpdump.h b/drivers/gpu/drm/img-rogue/1.10/rgxpdump.h
new file mode 100644
index 00000000000000..e9d860313ce038
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxpdump.h
@@ -0,0 +1,141 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX pdump Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX pdump functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "rgxdevice.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+
+#if defined(PDUMP)
+/*!
+******************************************************************************
+
+ @Function PVRSRVPDumpSignatureBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVPDumpTraceBufferKM
+
+ @Description
+
+ Dumps TA and 3D signature and checksum buffers
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*!
+******************************************************************************
+
+ @Function RGXPDumpOutputImageHdr
+
+ @Description
+
+ Dumps the header for an OutputImage command
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXPDumpOutputImageHdr(PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32HeaderSize,
+ IMG_UINT32 ui32DataSize,
+ IMG_UINT32 ui32LogicalWidth,
+ IMG_UINT32 ui32LogicalHeight,
+ IMG_UINT32 ui32PhysicalWidth,
+ IMG_UINT32 ui32PhysicalHeight,
+ PDUMP_PIXEL_FORMAT ePixFmt,
+ IMG_MEMLAYOUT eMemLayout,
+ IMG_FB_COMPRESSION eFBCompression,
+ const IMG_UINT32 *paui32FBCClearColour,
+ IMG_PBYTE pbyPDumpImageHdr);
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpSignatureBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpSignatureBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVPDumpTraceBufferKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVPDumpTraceBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+/******************************************************************************
+ End of file (rgxpdump.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxpower.c b/drivers/gpu/drm/img-rogue/1.10/rgxpower.c
new file mode 100644
index 00000000000000..a9c78764695231
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxpower.c
@@ -0,0 +1,969 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific power routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "rgxpower.h"
+#include "rgxinit.h"
+#include "rgx_fwif_km.h"
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "rgxdebug.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "rgxtimecorr.h"
+#include "devicemem_utils.h"
+#include "htbserver.h"
+#include "rgxstartstop.h"
+#include "sync.h"
+#include "lists.h"
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+#include "process_stats.h"
+#endif
+#if defined(PVR_DVFS)
+#include "pvr_dvfs_device.h"
+#endif
+
+static PVRSRV_ERROR RGXFWNotifyHostTimeout(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sCmd;
+
+ /* Send the Timeout notification to the FW */
+ sCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+ sCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_HOST_TIMEOUT;
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sCmd,
+ sizeof(sCmd),
+ PDUMP_FLAGS_NONE);
+
+ return eError;
+}
+
+static void _RGXUpdateGPUUtilStats(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGXFWIF_GPU_UTIL_FWCB *psUtilFWCb;
+ IMG_UINT64 *paui64StatsCounters;
+ IMG_UINT64 ui64LastPeriod;
+ IMG_UINT64 ui64LastState;
+ IMG_UINT64 ui64LastTime;
+ IMG_UINT64 ui64TimeNow;
+
+ psUtilFWCb = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ paui64StatsCounters = &psUtilFWCb->aui64StatsCounters[0];
+
+ OSLockAcquire(psDevInfo->hGPUUtilLock);
+
+ ui64TimeNow = RGXFWIF_GPU_UTIL_GET_TIME(RGXTimeCorrGetClockns64());
+
+ /* Update counters to account for the time since the last update */
+ ui64LastState = RGXFWIF_GPU_UTIL_GET_STATE(psUtilFWCb->ui64LastWord);
+ ui64LastTime = RGXFWIF_GPU_UTIL_GET_TIME(psUtilFWCb->ui64LastWord);
+ ui64LastPeriod = RGXFWIF_GPU_UTIL_GET_PERIOD(ui64TimeNow, ui64LastTime);
+ paui64StatsCounters[ui64LastState] += ui64LastPeriod;
+
+ /* Update state and time of the latest update */
+ psUtilFWCb->ui64LastWord = RGXFWIF_GPU_UTIL_MAKE_WORD(ui64TimeNow, ui64LastState);
+
+ OSLockRelease(psDevInfo->hGPUUtilLock);
+}
+
+static INLINE PVRSRV_ERROR RGXDoStop(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ if (psDevConfig->pfnTDRGXStop == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPrePowerState: TDRGXStop not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psDevConfig->pfnTDRGXStop(psDevConfig->hSysData);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ eError = RGXStop(&psDevInfo->sLayerParams);
+#endif
+
+ return eError;
+}
+
+/*
+ RGXPrePowerState
+*/
+PVRSRV_ERROR RGXPrePowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eNewPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Send the Power off request to the FW */
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_OFF_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.bForced = bForced;
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to send Power off request"));
+ return eError;
+ }
+
+ /* Wait for the firmware to complete processing. It cannot use PVRSRVWaitForValueKM as it relies
+ on the EventObject which is signalled in this MISR */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+ /* Check the Power state after the answer */
+ if (eError == PVRSRV_OK)
+ {
+ /* Finally, de-initialise some registers. */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+#if !defined(NO_HARDWARE)
+ IMG_UINT32 ui32TID;
+ for (ui32TID = 0; ui32TID < RGXFW_THREAD_NUM; ui32TID++)
+ {
+ /* Wait for the pending META/MIPS to host interrupts to come back. */
+ eError = PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psDevInfo->aui32SampleIRQCount[ui32TID],
+ psFWTraceBuf->aui32InterruptCount[ui32TID],
+ 0xffffffff);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, \
+ "RGXPrePowerState: Wait for pending interrupts failed. Thread %u: Host:%u, FW: %u", \
+ ui32TID, \
+ psDevInfo->aui32SampleIRQCount[ui32TID], \
+ psFWTraceBuf->aui32InterruptCount[ui32TID]));
+
+ RGX_WaitForInterruptsTimeout(psDevInfo);
+ break;
+ }
+ }
+#endif /* NO_HARDWARE */
+
+ /* Update GPU frequency and timer correlation related data */
+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+ /* Update GPU state counters */
+ _RGXUpdateGPUUtilStats(psDevInfo);
+
+#if defined(PVR_DVFS)
+ eError = SuspendDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failed to suspend DVFS"));
+ return eError;
+ }
+#endif
+
+ psDevInfo->bRGXPowered = IMG_FALSE;
+
+ eError = RGXDoStop(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ /* Power down failures are treated as successful since the power was removed but logged. */
+ PVR_DPF((PVR_DBG_WARNING, "RGXPrePowerState: RGXDoStop failed (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ psDevInfo->ui32ActivePMReqNonIdle++;
+ eError = PVRSRV_OK;
+ }
+ }
+ else
+ {
+ /* the sync was updated but the pow state isn't off -> the FW denied the transition */
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+
+ if (bForced)
+ { /* It is an error for a forced request to be denied */
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Failure to power off during a forced power off. FW: %d", psFWTraceBuf->ePowState));
+ }
+ }
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ {
+ /* timeout waiting for the FW to ack the request: return timeout */
+ PVR_DPF((PVR_DBG_WARNING,"RGXPrePowerState: Timeout waiting for powoff ack from the FW"));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPrePowerState: Error waiting for powoff ack from the FW (%s)", PVRSRVGetErrorStringKM(eError)));
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_FAILURE;
+ }
+
+ }
+
+ return eError;
+}
+
+static INLINE PVRSRV_ERROR RGXDoStart(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+
+ if (psDevConfig->pfnTDRGXStart == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: TDRGXStart not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ eError = psDevConfig->pfnTDRGXStart(psDevConfig->hSysData);
+#else
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ eError = RGXStart(&psDevInfo->sLayerParams);
+#endif
+
+ return eError;
+}
+
+/*
+ RGXPostPowerState
+*/
+PVRSRV_ERROR RGXPostPowerState (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced)
+{
+ if ((eNewPowerState != eCurrentPowerState) &&
+ (eCurrentPowerState != PVRSRV_DEV_POWER_STATE_ON))
+ {
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_INIT *psRGXFWInit;
+
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ psDevInfo->bRGXPowered = IMG_TRUE;
+ return PVRSRV_OK;
+ }
+
+ if (eCurrentPowerState == PVRSRV_DEV_POWER_STATE_OFF)
+ {
+ /* Update timer correlation related data */
+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_POWER);
+
+ /* Update GPU state counters */
+ _RGXUpdateGPUUtilStats(psDevInfo);
+
+ eError = RGXDoStart(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: RGXDoStart failed"));
+ return eError;
+ }
+
+ OSMemoryBarrier();
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+ eError = ValidateFWImageWithSP(psDevInfo);
+ if (eError != PVRSRV_OK) return eError;
+#endif
+
+ eError = DevmemAcquireCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc,
+ (void **)&psRGXFWInit);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPostPowerState: Failed to acquire kernel fw if ctl (%u)",
+ eError));
+ return eError;
+ }
+
+ /*
+ * Check whether the FW has started by polling on bFirmwareStarted flag
+ */
+ if (PVRSRVPollForValueKM((IMG_UINT32 __iomem *)&psRGXFWInit->bFirmwareStarted,
+ IMG_TRUE,
+ 0xFFFFFFFF) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostPowerState: Polling for 'FW started' flag failed."));
+ eError = PVRSRV_ERROR_TIMEOUT;
+
+ /*
+ * When bFirmwareStarted fails some info may be gained by doing the following
+ * debug dump but unfortunately it could be potentially dangerous if the reason
+ * for not booting is the GPU power is not ON. However, if we have reached this
+ * point the System Layer has returned without errors, we assume the GPU power
+ * is indeed ON.
+ */
+ RGXDumpRGXDebugSummary(NULL, NULL, psDeviceNode->pvDevice, IMG_TRUE);
+ RGXDumpRGXRegisters(NULL, NULL, psDeviceNode->pvDevice);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Wait for the Firmware to start.");
+ eError = DevmemPDumpDevmemPol32(psDevInfo->psRGXFWIfInitMemDesc,
+ offsetof(RGXFWIF_INIT, bFirmwareStarted),
+ IMG_TRUE,
+ 0xFFFFFFFFU,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXPostPowerState: problem pdumping POL for psRGXFWIfInitMemDesc (%d)",
+ eError));
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+ return eError;
+ }
+#endif
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ SetFirmwareStartTime(psRGXFWInit->ui32FirmwareStartedTimeStamp);
+#endif
+
+ HTBSyncPartitionMarker(psRGXFWInit->ui32MarkerVal);
+
+ DevmemReleaseCpuVirtAddr(psDevInfo->psRGXFWIfInitMemDesc);
+
+ psDevInfo->bRGXPowered = IMG_TRUE;
+
+#if defined(PVR_DVFS)
+ eError = ResumeDVFS();
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXPostPowerState: Failed to resume DVFS"));
+ return eError;
+ }
+#endif
+ }
+ }
+
+ PDUMPCOMMENT("RGXPostPowerState: Current state: %d, New state: %d", eCurrentPowerState, eNewPowerState);
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGXPreClockSpeedChange
+*/
+PVRSRV_ERROR RGXPreClockSpeedChange (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+ PVR_UNREFERENCED_PARAMETER(psRGXData);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RGXPreClockSpeedChange: RGX clock speed was %uHz",
+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+
+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+ && (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+ {
+ /* Update GPU frequency and timer correlation related data */
+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+ }
+
+ return eError;
+}
+
+/*
+ RGXPostClockSpeedChange
+*/
+PVRSRV_ERROR RGXPostClockSpeedChange (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ IMG_UINT32 ui32NewClockSpeed = psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ /* Update runtime configuration with the new value */
+ psDevInfo->psRGXFWIfRuntimeCfg->ui32CoreClockSpeed = ui32NewClockSpeed;
+
+ if ((eCurrentPowerState != PVRSRV_DEV_POWER_STATE_OFF)
+ && (psFWTraceBuf->ePowState != RGXFWIF_POW_OFF))
+ {
+ RGXFWIF_KCCB_CMD sCOREClkSpeedChangeCmd;
+
+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_DVFS);
+
+ sCOREClkSpeedChangeCmd.eCmdType = RGXFWIF_KCCB_CMD_CORECLKSPEEDCHANGE;
+ sCOREClkSpeedChangeCmd.uCmdData.sCORECLKSPEEDCHANGEData.ui32NewClockSpeed = ui32NewClockSpeed;
+
+ /* Ensure the new clock speed is written to memory before requesting the FW to read it */
+ OSMemoryBarrier();
+
+ PDUMPCOMMENT("Scheduling CORE clock speed change command");
+
+ PDUMPPOWCMDSTART();
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sCOREClkSpeedChangeCmd,
+ sizeof(sCOREClkSpeedChangeCmd),
+ PDUMP_FLAGS_NONE);
+ PDUMPPOWCMDEND();
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling CORE clock speed change command failed");
+ PVR_DPF((PVR_DBG_ERROR, "RGXPostClockSpeedChange: Scheduling KCCB command failed. Error:%u", eError));
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RGXPostClockSpeedChange: RGX clock speed changed to %uHz",
+ psRGXData->psRGXTimingInfo->ui32CoreClockSpeed));
+ }
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function RGXDustCountChange
+
+ @Description
+
+ Does change of number of DUSTs
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32NumberOfDusts)
+{
+
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sDustCountChange;
+ IMG_UINT32 ui32MaxAvailableDusts = psDevInfo->sDevFeatureCfg.ui32MAXDustCount;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ if (ui32NumberOfDusts > ui32MaxAvailableDusts)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDustCountChange: Invalid number of DUSTs (%u) while expecting value within <0,%u>. Error:%u",
+ ui32NumberOfDusts,
+ ui32MaxAvailableDusts,
+ eError));
+ return eError;
+ }
+
+ #if defined(FIX_HW_BRN_59042)
+ if (ui32NumberOfDusts < ui32MaxAvailableDusts && (ui32NumberOfDusts & 0x1))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDustCountChange: Invalid number of DUSTs (%u) due to HW restriction. Allowed values are :-",
+ ui32NumberOfDusts));
+ switch (ui32MaxAvailableDusts)
+ {
+ case 2: PVR_DPF((PVR_DBG_ERROR, "0, 2")); break;
+ case 3: PVR_DPF((PVR_DBG_ERROR, "0, 2, 3")); break;
+ case 4: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4")); break;
+ case 5: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 5")); break;
+ case 6: PVR_DPF((PVR_DBG_ERROR, "0, 2, 4, 6")); break;
+ default: break;
+ }
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ #endif
+
+ psRuntimeCfg->ui32DefaultDustsNumInit = ui32NumberOfDusts;
+
+ #if !defined(NO_HARDWARE)
+ {
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+ return PVRSRV_OK;
+ }
+
+ if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+ {
+ eError = PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED;
+ PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Attempt to change dust count when not IDLE"));
+ return eError;
+ }
+ }
+ #endif
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+
+ sDustCountChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sDustCountChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_NUMDUST_CHANGE;
+ sDustCountChange.uCmdData.sPowData.uPoweReqData.ui32NumOfDusts = ui32NumberOfDusts;
+
+ PDUMPCOMMENT("Scheduling command to change Dust Count to %u", ui32NumberOfDusts);
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sDustCountChange,
+ sizeof(sDustCountChange),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling command to change Dust Count failed. Error:%u", eError);
+ PVR_DPF((PVR_DBG_ERROR, "RGXDustCountChange: Scheduling KCCB to change Dust Count failed. Error:%u", eError));
+ return eError;
+ }
+
+ /* Wait for the firmware to answer. */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDustCountChange: Timeout waiting for idle request"));
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXDustCountChange: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ @Function RGXAPMLatencyChange
+*/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ActivePMLatencyms,
+ IMG_BOOL bActivePMLatencyPersistant)
+{
+
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ RGXFWIF_RUNTIME_CFG *psRuntimeCfg = psDevInfo->psRGXFWIfRuntimeCfg;
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXAPMLatencyChange: Failed to acquire power lock"));
+ return eError;
+ }
+
+ /* Update runtime configuration with the new values and ensure the
+ * new APM latency is written to memory before requesting the FW to
+ * read it
+ */
+ psRuntimeCfg->ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+ psRuntimeCfg->bActivePMLatencyPersistant = bActivePMLatencyPersistant;
+ OSMemoryBarrier();
+
+ eError = PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+
+ if ((eError == PVRSRV_OK) && (ePowerState != PVRSRV_DEV_POWER_STATE_OFF))
+ {
+ RGXFWIF_KCCB_CMD sActivePMLatencyChange;
+ sActivePMLatencyChange.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sActivePMLatencyChange.uCmdData.sPowData.ePowType = RGXFWIF_POW_APM_LATENCY_CHANGE;
+ sActivePMLatencyChange.uCmdData.sPowData.uPoweReqData.ui32ActivePMLatencyms = ui32ActivePMLatencyms;
+
+ PDUMPCOMMENT("Scheduling command to change APM latency to %u", ui32ActivePMLatencyms);
+ eError = RGXSendCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sActivePMLatencyChange,
+ sizeof(sActivePMLatencyChange),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("Scheduling command to change APM latency failed. Error:%u", eError);
+ PVR_DPF((PVR_DBG_ERROR, "RGXAPMLatencyChange: Scheduling KCCB to change APM latency failed. Error:%u", eError));
+ goto ErrorExit;
+ }
+ }
+
+ErrorExit:
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ return eError;
+}
+
+/*
+ RGXActivePowerRequest
+*/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TRACEBUF *psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+ /* NOTE: If this function were to wait for event object attempt should be
+ made to prevent releasing bridge lock during sleep. Bridge lock should
+ be held during sleep. */
+
+ /* Powerlock to avoid further requests from racing with the FW hand-shake from now on
+ (previous kicks to this point are detected by the FW) */
+ eError = PVRSRVPowerLock(psDeviceNode);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to acquire PowerLock (device: %p, error: %s)",
+ __func__, psDeviceNode, PVRSRVGetErrorStringKM(eError)));
+ goto _RGXActivePowerRequest_PowerLock_failed;
+ }
+
+ /* Check again for IDLE once we have the power lock */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_IDLE)
+ {
+
+ psDevInfo->ui32ActivePMReqTotal++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ SetFirmwareHandshakeIdleTime(RGXReadHWTimerReg(psDevInfo)-psFWTraceBuf->ui64StartIdleTime);
+#endif
+
+ PDUMPPOWCMDSTART();
+ eError = PVRSRVSetDevicePowerStateKM(psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_OFF,
+ IMG_FALSE); /* forced */
+ PDUMPPOWCMDEND();
+
+ if (eError == PVRSRV_OK)
+ {
+ psDevInfo->ui32ActivePMReqOk++;
+ }
+ else if (eError == PVRSRV_ERROR_DEVICE_POWER_CHANGE_DENIED)
+ {
+ psDevInfo->ui32ActivePMReqDenied++;
+ }
+
+ }
+
+ PVRSRVPowerUnlock(psDeviceNode);
+
+ _RGXActivePowerRequest_PowerLock_failed:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+
+ return eError;
+
+}
+/*
+ RGXForcedIdleRequest
+*/
+
+#define RGX_FORCED_IDLE_RETRY_COUNT 10
+
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32RetryCount = 0;
+#if !defined(NO_HARDWARE)
+ RGXFWIF_TRACEBUF *psFWTraceBuf;
+#endif
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+#if !defined(NO_HARDWARE)
+ psFWTraceBuf = psDevInfo->psRGXFWIfTraceBuf;
+
+ /* Firmware already forced idle */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_FORCED_IDLE)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Firmware is not powered. Sometimes this is permitted, for instance we were forcing idle to power down. */
+ if (psFWTraceBuf->ePowState == RGXFWIF_POW_OFF)
+ {
+ return (bDeviceOffPermitted) ? PVRSRV_OK : PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+ }
+#endif
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ return eError;
+ }
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_FORCE_IDLE;
+
+ PDUMPCOMMENT("RGXForcedIdleRequest: Sending forced idle command");
+
+ /* Send one forced IDLE command to GP */
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Failed to send idle request"));
+ return eError;
+ }
+
+ /* Wait for GPU to finish current workload */
+ do {
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 0x1, 0xFFFFFFFF);
+ if ((eError == PVRSRV_OK) || (ui32RetryCount == RGX_FORCED_IDLE_RETRY_COUNT))
+ {
+ break;
+ }
+ ui32RetryCount++;
+ PVR_DPF((PVR_DBG_WARNING,"RGXForcedIdleRequest: Request timeout. Retry %d of %d", ui32RetryCount, RGX_FORCED_IDLE_RETRY_COUNT));
+ } while (IMG_TRUE);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXFWNotifyHostTimeout(psDevInfo);
+ PVR_DPF((PVR_DBG_ERROR,"RGXForcedIdleRequest: Idle request failed. Firmware potentially left in forced idle state"));
+ return eError;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+#if !defined(NO_HARDWARE)
+ /* Check the firmware state for idleness */
+ if (psFWTraceBuf->ePowState != RGXFWIF_POW_FORCED_IDLE)
+ {
+ return PVRSRV_ERROR_DEVICE_IDLE_REQUEST_DENIED;
+ }
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ RGXCancelForcedIdleRequest
+*/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_KCCB_CMD sPowCmd;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_GUEST, PVRSRV_OK);
+
+ eError = SyncPrimSet(psDevInfo->psPowSyncPrim, 0);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to set Power sync prim",
+ __FUNCTION__));
+ goto ErrorExit;
+ }
+
+ /* Send the IDLE request to the FW */
+ sPowCmd.eCmdType = RGXFWIF_KCCB_CMD_POW;
+ sPowCmd.uCmdData.sPowData.ePowType = RGXFWIF_POW_FORCED_IDLE_REQ;
+ sPowCmd.uCmdData.sPowData.uPoweReqData.ePowRequestType = RGXFWIF_POWER_CANCEL_FORCED_IDLE;
+
+ PDUMPCOMMENT("RGXForcedIdleRequest: Sending cancel forced idle command");
+
+ /* Send cancel forced IDLE command to GP */
+ eError = RGXSendCommand(psDevInfo,
+ RGXFWIF_DM_GP,
+ &sPowCmd,
+ sizeof(sPowCmd),
+ PDUMP_FLAGS_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PDUMPCOMMENT("RGXCancelForcedIdleRequest: Failed to send cancel IDLE request for DM%d", RGXFWIF_DM_GP);
+ goto ErrorExit;
+ }
+
+ /* Wait for the firmware to answer. */
+ eError = PVRSRVPollForValueKM(psDevInfo->psPowSyncPrim->pui32LinAddr, 1, 0xFFFFFFFF);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Timeout waiting for cancel idle request"));
+ goto ErrorExit;
+ }
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("RGXCancelForcedIdleRequest: Poll for Kernel SyncPrim [0x%p] on DM %d", psDevInfo->psPowSyncPrim->pui32LinAddr, RGXFWIF_DM_GP);
+
+ SyncPrimPDumpPol(psDevInfo->psPowSyncPrim,
+ 1,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+#endif
+
+ return eError;
+
+ErrorExit:
+ PVR_DPF((PVR_DBG_ERROR,"RGXCancelForcedIdleRequest: Firmware potentially left in forced idle state"));
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetNextDustCount
+
+ @Description
+
+ Calculate a sequence of dust counts to achieve full transition coverage.
+ We increment two counts of dusts and switch up and down between them.
+ It does contain a few redundant transitions. If two dust exist, the
+ output transitions should be as follows.
+
+ 0->1, 0<-1, 0->2, 0<-2, (0->1)
+ 1->1, 1->2, 1<-2, (1->2)
+ 2->2, (2->0),
+ 0->0. Repeat.
+
+ Redundant transitions in brackets.
+
+ @Input psDustReqState : Counter state used to calculate next dust count
+ @Input ui32DustCount : Number of dusts in the core
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustReqState, IMG_UINT32 ui32DustCount)
+{
+ if (psDustReqState->bToggle)
+ {
+ psDustReqState->ui32DustCount2++;
+ }
+
+ if (psDustReqState->ui32DustCount2 > ui32DustCount)
+ {
+ psDustReqState->ui32DustCount1++;
+ psDustReqState->ui32DustCount2 = psDustReqState->ui32DustCount1;
+ }
+
+ if (psDustReqState->ui32DustCount1 > ui32DustCount)
+ {
+ psDustReqState->ui32DustCount1 = 0;
+ psDustReqState->ui32DustCount2 = 0;
+ }
+
+ psDustReqState->bToggle = !psDustReqState->bToggle;
+
+ return (psDustReqState->bToggle) ? psDustReqState->ui32DustCount1 : psDustReqState->ui32DustCount2;
+}
+
+/******************************************************************************
+ End of file (rgxpower.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxpower.h b/drivers/gpu/drm/img-rogue/1.10/rgxpower.h
new file mode 100644
index 00000000000000..073dd47164b8b2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxpower.h
@@ -0,0 +1,245 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX power header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX power
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXPOWER_H__)
+#define __RGXPOWER_H__
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "servicesext.h"
+#include "rgxdevice.h"
+
+
+/*!
+******************************************************************************
+
+ @Function RGXPrePowerState
+
+ @Description
+
+ does necessary preparation before power state transition
+
+ @Input hDevHandle : RGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPrePowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+/*!
+******************************************************************************
+
+ @Function RGXPostPowerState
+
+ @Description
+
+ does necessary preparation after power state transition
+
+ @Input hDevHandle : RGX Device Node
+ @Input eNewPowerState : New power state
+ @Input eCurrentPowerState : Current power state
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostPowerState(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXPreClockSpeedChange
+
+ @Description
+
+ Does processing required before an RGX clock speed change.
+
+ @Input hDevHandle : RGX Device Node
+ @Input bIdleDevice : Whether the firmware needs to be idled
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPreClockSpeedChange(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+******************************************************************************
+
+ @Function RGXPostClockSpeedChange
+
+ @Description
+
+ Does processing required after an RGX clock speed change.
+
+ @Input hDevHandle : RGX Device Node
+ @Input bIdleDevice : Whether the firmware had been idled previously
+ @Input eCurrentPowerState : Power state of the device
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXPostClockSpeedChange(IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+
+/*!
+******************************************************************************
+
+ @Function RGXDustCountChange
+
+ @Description Change of number of DUSTs
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32NumberOfDusts : Number of DUSTs to make transition to
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXDustCountChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32NumberOfDusts);
+
+/*!
+******************************************************************************
+
+ @Function RGXAPMLatencyChange
+
+ @Description
+
+ Changes the wait duration used before firmware indicates IDLE.
+ Reducing this value will cause the firmware to shut off faster and
+ more often but may increase bubbles in GPU scheduling due to the added
+ power management activity. If bPersistent is NOT set, APM latency will
+ return back to system default on power up.
+
+ @Input hDevHandle : RGX Device Node
+ @Input ui32ActivePMLatencyms : Number of milliseconds to wait
+ @Input bPersistent : Set to ensure new value is not reset
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXAPMLatencyChange(IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32ActivePMLatencyms,
+ IMG_BOOL bActivePMLatencyPersistant);
+
+/*!
+******************************************************************************
+
+ @Function RGXActivePowerRequest
+
+ @Description Initiate a handshake with the FW to power off the GPU
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXActivePowerRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXForcedIdleRequest
+
+ @Description Initiate a handshake with the FW to idle the GPU
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXForcedIdleRequest(IMG_HANDLE hDevHandle, IMG_BOOL bDeviceOffPermitted);
+
+/*!
+******************************************************************************
+
+ @Function RGXCancelForcedIdleRequest
+
+ @Description Send a request to cancel idle to the firmware.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return PVRSRV_ERROR :
+
+******************************************************************************/
+PVRSRV_ERROR RGXCancelForcedIdleRequest(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function PVRSRVGetNextDustCount
+
+ @Description
+
+ Calculate a sequence of dust counts to achieve full transition coverage.
+ We increment two counts of dusts and switch up and down between them.
+ It does contain a few redundant transitions. If two dust exist, the
+ output transitions should be as follows.
+
+ 0->1, 0<-1, 0->2, 0<-2, (0->1)
+ 1->1, 1->2, 1<-2, (1->2)
+ 2->2, (2->0),
+ 0->0. Repeat.
+
+ Redundant transitions in brackets.
+
+ @Input psDustReqState : Counter state used to calculate next dust count
+ @Input ui32DustCount : Number of dusts in the core
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_UINT32 RGXGetNextDustCount(RGX_DUST_STATE *psDustState, IMG_UINT32 ui32DustCount);
+
+
+#endif /* __RGXPOWER_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxray.c b/drivers/gpu/drm/img-rogue/1.10/rgxray.c
new file mode 100644
index 00000000000000..a38c4b554174d2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxray.c
@@ -0,0 +1,3764 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX ray tracing routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX ray tracing routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+/* for the offsetof macro */
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#if defined(INTEGRITY_OS)
+#include <string.h>
+#endif
+
+#include "img_defs.h"
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxray.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "devicemem_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_RAY_UFO_DUMP 0
+
+//#define RAY_CHECKPOINT_DEBUG 1
+
+#if defined(RAY_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/*
+ * FIXME: Defs copied from "rgxrpmdefs.h"
+ */
+
+typedef struct _RGX_RPM_DATA_RTU_FREE_PAGE_LIST {
+ IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_FREE_PAGE_LIST;
+
+/*
+Page table index.
+ The field is a pointer to a free page
+ */
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_WOFF (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_SHIFT (0U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_FREE_PAGE_LIST_PTI_CLRMSK ) | (((_x_) & (0x003fffff)) << 0)))
+#define RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(_ft_) (((_ft_).u32_0 >> (0)) & 0x003fffff)
+
+typedef struct _RGX_RPM_DATA_RTU_PAGE_TABLE {
+ IMG_UINT32 u32_0;
+} RGX_RPM_DATA_RTU_PAGE_TABLE;
+
+/*
+ Page Table State
+ <br> 00: Empty Block
+ <br> 01: Full Block
+ <br> 10: Fragmented Block: Partially full page
+ */
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_SHIFT (30U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK (0X3FFFFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_PTS(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_PTS_CLRMSK ) | (((_x_) & (0x00000003)) << 30)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_PTS(_ft_) (((_ft_).u32_0 >> (30)) & 0x00000003)
+/*
+ Primitives in Page.
+ Number of unique primitives stored in this page.
+ The memory manager will re-use this page when the RCNT drops to zero.
+ */
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_SHIFT (22U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK (0XC03FFFFFU)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_RCNT(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_RCNT_CLRMSK ) | (((_x_) & (0x000000ff)) << 22)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_RCNT(_ft_) (((_ft_).u32_0 >> (22)) & 0x000000ff)
+/*
+Next page table index.
+ The field is a pointer to the next page for this primitive.
+ */
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_WOFF (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_SHIFT (0U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK (0XFFC00000U)
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_SET_NPTI(_ft_,_x_) ((_ft_).u32_0 = (((_ft_).u32_0 & RGX_RPM_DATA_RTU_PAGE_TABLE_NPTI_CLRMSK ) | (((_x_) & (0x003fffff)) << 0)))
+#define RGX_RPM_DATA_RTU_PAGE_TABLE_GET_NPTI(_ft_) (((_ft_).u32_0 >> (0)) & 0x003fffff)
+
+
+#define RGX_CR_RPM_PAGE_TABLE_BASE_VALUE_ALIGNSHIFT (2U)
+#define RGX_CR_RPM_SHF_FPL_BASE_ALIGNSHIFT (2U)
+
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+#if 0
+ /* FIXME - multiple frame contexts? */
+ RGX_RPM_FREELIST *psSHFFreeList;
+ RGX_RPM_FREELIST *psSHGFreeList;
+#endif
+} RGX_SERVER_RAY_SH_DATA;
+
+
+typedef enum {
+ NODE_EMPTY = 0,
+ NODE_SCENE_HIERARCHY,
+ NODE_RPM_PAGE_TABLE,
+ NODE_RPM_FREE_PAGE_LIST
+} RGX_DEVMEM_NODE_TYPE;
+
+typedef struct _RGX_DEVMEM_NODE_ {
+ RGX_DEVMEM_NODE_TYPE eNodeType; /*!< Alloc type */
+ PMR *psPMR; /*!< Scene hierarchy/page table/free page list phys pages */
+ DEVMEMINT_HEAP *psDevMemHeap; /*!< Heap where the virtual mapping is made */
+ IMG_DEV_VIRTADDR sAddr; /*!< GPU virtual address where the phys pages are mapped into */
+ IMG_UINT32 ui32NumPhysPages; /*!< Number of physical pages mapped in for this node */
+ IMG_UINT32 ui32StartOfMappingIndex; /*!< Start of mapping index (i.e. OS page offset from virtual base) */
+ IMG_BOOL bInternal;
+} RGX_DEVMEM_NODE;
+
+typedef struct _RGX_RPM_DEVMEM_DESC_ {
+ DLLIST_NODE sMemoryDescBlock; /*!< the hierarchy scene memory block */
+ RGX_RPM_FREELIST *psFreeList; /*!< Free list this allocation is associated with */
+ IMG_UINT32 ui32NumPages; /*!< Number of RPM pages added */
+ RGX_DEVMEM_NODE sSceneHierarchyNode; /*!< scene hierarchy block descriptor */
+ RGX_DEVMEM_NODE sRPMPageListNode; /*!< RPM page list block descriptor */
+ RGX_DEVMEM_NODE sRPMFreeListNode; /*!< RPM free list block descriptor */
+} RGX_RPM_DEVMEM_DESC;
+
+typedef struct _DEVMEM_RPM_FREELIST_LOOKUP_
+{
+ IMG_UINT32 ui32FreeListID;
+ RGX_RPM_FREELIST *psFreeList;
+} DEVMEM_RPM_FREELIST_LOOKUP;
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+ RGX_CLIENT_CCB *psFCClientCCB[DPX_MAX_RAY_CONTEXTS];
+ DEVMEM_MEMDESC *psFCClientCCBMemDesc[DPX_MAX_RAY_CONTEXTS];
+ DEVMEM_MEMDESC *psFCClientCCBCtrlMemDesc[DPX_MAX_RAY_CONTEXTS];
+} RGX_SERVER_RAY_RS_DATA;
+
+
+struct _RGX_SERVER_RAY_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRayContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ RGX_SERVER_RAY_SH_DATA sSHData;
+ RGX_SERVER_RAY_RS_DATA sRSData;
+ IMG_UINT32 ui32CleanupStatus;
+#define RAY_CLEANUP_SH_COMPLETE (1 << 0)
+#define RAY_CLEANUP_RS_COMPLETE (1 << 1)
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hIntJobRef;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+
+#if 0
+static
+#ifdef __GNUC__
+__attribute__((noreturn))
+#endif
+void sleep_for_ever(void)
+{
+#if defined(__KLOCWORK__) // klocworks would report an infinite loop because of while(1).
+ PVR_ASSERT(0);
+#else
+ while(1)
+ {
+ OSSleepms(~0); // sleep the maximum amount of time possible
+ }
+#endif
+}
+#endif
+
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ IMG_UINT32 ui32NumPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ PMR **ppsPMR);
+
+static PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ IMG_DEV_VIRTADDR sDevVAddrBase);
+
+static
+PVRSRV_ERROR _CreateSHContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RAY_SH_DATA *psSHData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_VRDMCTX_STATE *psContextState;
+ PVRSRV_ERROR eError;
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware SHG context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_VRDMCTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRaySHGContextSuspendState",
+ &psSHData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_shcontextsuspendalloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSHData->psContextStateMemDesc,
+ (void **)&psContextState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to map firmware render context state (%u)",
+ eError));
+ goto fail_suspendcpuvirtacquire;
+ }
+ psContextState->uVRDMReg_VRM_CALL_STACK_POINTER = sVRMCallStackAddr.uiAddr;
+ DevmemReleaseCpuVirtAddr(psSHData->psContextStateMemDesc);
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_SH,
+ RGXFWIF_DM_SHG,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ psSHData->psContextStateMemDesc,
+ RGX_RTU_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psSHData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init TA fw common context (%u)",
+ eError));
+ goto fail_shcommoncontext;
+ }
+
+ /*
+ * Dump the FW SH context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the SH context suspend state buffer");
+ DevmemPDumpLoadMem(psSHData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_VRDMCTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psSHData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+ fail_shcommoncontext:
+ fail_suspendcpuvirtacquire:
+ DevmemFwFree(psDevInfo, psSHData->psContextStateMemDesc);
+ fail_shcontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+static
+PVRSRV_ERROR _CreateRSContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RAY_RS_DATA *psRSData)
+{
+ PVRSRV_ERROR eError;
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_RS,
+ RGXFWIF_DM_RTU,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_RTU_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psRSData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to init 3D fw common context (%u)",
+ eError));
+ goto fail_rscommoncontext;
+ }
+
+ psRSData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+ fail_rscommoncontext:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ Static functions used by ray context code
+ */
+
+static
+PVRSRV_ERROR _DestroySHContext(RGX_SERVER_RAY_SH_DATA *psSHData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psSHData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_SHG,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+ FWCommonContextFree(psSHData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, psSHData->psContextStateMemDesc);
+ psSHData->psContextStateMemDesc = NULL;
+ psSHData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _DestroyRSContext(RGX_SERVER_RAY_RS_DATA *psRSData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psRSData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_RTU,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free its resources */
+
+
+ FWCommonContextFree(psRSData->psServerCommonContext);
+ psRSData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+
+/*
+ * RPM driver management rev 2
+ *
+ * The RPM freelists are opaque to the client driver. Scene Hierarchy pages
+ * are managed in Blocks (analogous to PB blocks) which are alloc'd in KM
+ * and mapped into the client MMU context.
+ *
+ * Page tables are set up for each existing Scene Memory Block.
+ *
+ * Freelist entries are also updated according to the list of Scene Memory Blocks.
+ *
+ * NOTES:
+ *
+ * (1) Scene Hierarchy shrink is not expected to be used.
+ * (2) The RPM FreeLists are Circular buffers and must be contiguous in virtual space
+ * (3) Each PMR is created with no phys backing pages. Pages are mapped in on-demand
+ * via RGXGrowRPMFreeList.
+ *
+ */
+#if defined(DEBUG)
+static PVRSRV_ERROR _ReadRPMFreePageList(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiIdx, j;
+ size_t uNumBytesCopied;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST *psFreeListBuffer;
+ IMG_UINT32 ui32PTI[4];
+
+ /* Allocate scratch area for setting up Page table indices */
+ psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+ if (psFreeListBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* Read scratch buffer from PMR (FPL entries must be contiguous) */
+ eError = PMR_ReadBytes(psPMR,
+ uiLogicalOffset,
+ (IMG_UINT8 *) psFreeListBuffer,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ &uNumBytesCopied);
+
+ if (eError == PVRSRV_OK)
+ {
+ for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx +=4)
+ {
+ for (j=0; j<4; j++)
+ {
+ ui32PTI[j] = RGX_RPM_DATA_RTU_FREE_PAGE_LIST_GET_PTI(psFreeListBuffer[uiIdx + j]);
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "%4d: %7d %7d %7d %7d", uiIdx,
+ ui32PTI[0], ui32PTI[1], ui32PTI[2], ui32PTI[3]));
+ }
+ }
+
+ /* Free scratch buffer */
+ OSFreeMem(psFreeListBuffer);
+
+ return eError;
+}
+
+static IMG_BOOL RGXDumpRPMFreeListPageList(RGX_RPM_FREELIST *psFreeList)
+{
+ PVR_LOG(("RPM Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32FreelistID,
+ psFreeList->ui64FreelistChecksum));
+
+ /* Dump FreeList page list */
+ _ReadRPMFreePageList(psFreeList->psFreeListPMR, 0, psFreeList->ui32CurrentFLPages);
+
+ return IMG_TRUE;
+}
+#endif
+
+static PVRSRV_ERROR _UpdateFwRPMFreelistSize(RGX_RPM_FREELIST *psFreeList,
+ IMG_BOOL bGrow,
+ IMG_BOOL bRestartRPM,
+ IMG_UINT32 ui32DeltaSize)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_KCCB_CMD sGPCCBCmd;
+
+ if(!bGrow)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: RPM freelist shrink not supported."));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* send feedback */
+ sGPCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+ sGPCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32DeltaSize;
+ sGPCCBCmd.uCmdData.sFreeListGSData.ui32NewPages =
+ ((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+ psFreeList->ui32CurrentFLPages;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Send FW update: RPM freelist [FWAddr=0x%08x] has 0x%08x pages",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32CurrentFLPages));
+
+ /* Submit command to the firmware. */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psFreeList->psDevInfo,
+ RGXFWIF_DM_GP,
+ &sGPCCBCmd,
+ sizeof(sGPCCBCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_UpdateFwRPMFreelistSize: failed to update FW freelist size. (error = %u)", eError));
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+#if 0
+static void _CheckRPMFreelist(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumOfPagesToCheck,
+ IMG_UINT64 ui64ExpectedCheckSum,
+ IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+ /* No checksum needed as we have all information in the pdumps */
+ PVR_UNREFERENCED_PARAMETER(psFreeList);
+ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+ *pui64CalculatedCheckSum = 0;
+#else
+ PVRSRV_ERROR eError;
+ size_t uiNumBytes;
+ IMG_UINT8* pui8Buffer;
+ IMG_UINT32* pui32Buffer;
+ IMG_UINT32 ui32CheckSumAdd = 0;
+ IMG_UINT32 ui32CheckSumXor = 0;
+ IMG_UINT32 ui32Entry;
+ IMG_UINT32 ui32Entry2;
+ IMG_BOOL bFreelistBad = IMG_FALSE;
+
+ *pui64CalculatedCheckSum = 0;
+
+ /* Allocate Buffer of the size of the freelist */
+ pui8Buffer = OSAllocMem(psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ if (pui8Buffer == NULL)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ /* Copy freelist content into Buffer */
+ eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+ psFreeList->uiFreeListPMROffset + (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages) * sizeof(IMG_UINT32),
+ pui8Buffer,
+ psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32),
+ &uiNumBytes);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pui8Buffer);
+ PVR_LOG(("_CheckRPMFreelist: Failed to get freelist data for RPM freelist %p!", psFreeList));
+ sleep_for_ever();
+ //PVR_ASSERT(0);
+ return;
+ }
+
+ PVR_ASSERT(uiNumBytes == psFreeList->ui32CurrentFLPages * sizeof(IMG_UINT32));
+ PVR_ASSERT(ui32NumOfPagesToCheck <= psFreeList->ui32CurrentFLPages);
+
+ /* Generate checksum */
+ pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+ for(ui32Entry = 0; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+ {
+ ui32CheckSumAdd += pui32Buffer[ui32Entry];
+ ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+ /* Check for double entries */
+ for (ui32Entry2 = 0; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+ {
+ if ((ui32Entry != ui32Entry2) &&
+ (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2]))
+ {
+ PVR_LOG(("_CheckRPMFreelist: RPM Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ pui32Buffer[ui32Entry2],
+ ui32Entry,
+ ui32Entry2,
+ psFreeList->ui32CurrentFLPages));
+ bFreelistBad = IMG_TRUE;
+ }
+ }
+ }
+
+ OSFreeMem(pui8Buffer);
+
+ /* Check the calculated checksum against the expected checksum... */
+ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Checksum mismatch for RPM freelist %p! Expected 0x%016llx calculated 0x%016llx",
+ psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+ bFreelistBad = IMG_TRUE;
+ }
+
+ if (bFreelistBad)
+ {
+ PVR_LOG(("_CheckRPMFreelist: Sleeping for ever!"));
+ sleep_for_ever();
+ // PVR_ASSERT(!bFreelistBad);
+ }
+#endif
+}
+#endif
+
+static PVRSRV_ERROR _WriteRPMFreePageList(PMR *psPMR,
+ IMG_DEVMEM_OFFSET_T uiLogicalOffset,
+ IMG_UINT32 ui32NextPageIndex,
+ IMG_UINT32 ui32PageCount)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 uiIdx;
+ size_t uNumBytesCopied;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST *psFreeListBuffer;
+
+ /* Allocate scratch area for setting up Page table indices */
+ psFreeListBuffer = OSAllocMem(ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST));
+ if (psFreeListBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_WriteRPMPageList: failed to allocate scratch page table"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (uiIdx = 0; uiIdx < ui32PageCount; uiIdx ++, ui32NextPageIndex ++)
+ {
+ psFreeListBuffer[uiIdx].u32_0 = 0;
+ RGX_RPM_DATA_RTU_FREE_PAGE_LIST_SET_PTI(psFreeListBuffer[uiIdx], ui32NextPageIndex);
+ }
+
+ /* Copy scratch buffer to PMR */
+ eError = PMR_WriteBytes(psPMR,
+ uiLogicalOffset,
+ (IMG_UINT8 *) psFreeListBuffer,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ &uNumBytesCopied);
+
+ /* Free scratch buffer */
+ OSFreeMem(psFreeListBuffer);
+
+#if defined(PDUMP)
+ /* Pdump the Page tables */
+ PDUMPCOMMENT("Dump %u RPM free page list entries.", ui32PageCount);
+ PMRPDumpLoadMem(psPMR,
+ uiLogicalOffset,
+ ui32PageCount * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST),
+ PDUMP_FLAGS_CONTINUOUS,
+ IMG_FALSE);
+#endif
+ return eError;
+}
+
+
+static RGX_RPM_FREELIST* FindRPMFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_RPM_FREELIST *psFreeList = NULL;
+
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ dllist_foreach_node(&psDevInfo->sRPMFreeListHead, psNode, psNext)
+ {
+ RGX_RPM_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_RPM_FREELIST, sNode);
+
+ if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+ {
+ psFreeList = psThisFreeList;
+ break;
+ }
+ }
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ return psFreeList;
+}
+
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID)
+{
+ RGX_RPM_FREELIST *psFreeList = NULL;
+ RGXFWIF_KCCB_CMD sVRDMCCBCmd;
+ IMG_UINT32 ui32GrowValue;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bRestartRPM = IMG_TRUE; /* FIXME */
+
+ PVR_ASSERT(psDevInfo);
+
+ /* find the freelist with the corresponding ID */
+ psFreeList = FindRPMFreeList(psDevInfo, ui32FreelistID);
+
+ if (psFreeList)
+ {
+ /* Try to grow the freelist */
+ eError = RGXGrowRPMFreeList(psFreeList,
+ psFreeList->ui32GrowFLPages,
+ &psFreeList->sMemoryBlockHead);
+ if (eError == PVRSRV_OK)
+ {
+ /* Grow successful, return size of grow size */
+ ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+ psFreeList->ui32NumGrowReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(0,
+ 1, /* Add 1 to the appropriate counter (Requests by FW) */
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+
+ }
+ else
+ {
+ /* Grow failed */
+ ui32GrowValue = 0;
+ PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p [ID %d] failed (error %u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ eError));
+ }
+
+ /* send feedback */
+ sVRDMCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_DOPPLER_MEMORY_GROW;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+ sVRDMCCBCmd.uCmdData.sFreeListGSData.ui32NewPages =
+ ((bRestartRPM) ? RGX_FREELIST_GSDATA_RPM_RESTART_EN : 0) |
+ (psFreeList->ui32CurrentFLPages);
+
+ PVR_DPF((PVR_DBG_ERROR,"Send feedback to RPM after grow on freelist [ID %d]", ui32FreelistID));
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_SHG,
+ &sVRDMCCBCmd,
+ sizeof(sVRDMCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ /* Should never happen */
+ PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+}
+
+
+/*!
+ * RGXGrowRPMFreeList
+ *
+ * Allocate and map physical backing pages for RPM buffers
+ *
+ * @param ppsRPMDevMemDesc - RPM buffer descriptor representing new Scene memory block
+ * and its associated RPM page table and free page list entries
+ * @param psRPMContext - RPM context
+ * @param psFreeList - RPM freelist descriptor
+ * @param ui32RequestNumPages - number of RPM pages to add to Doppler scene hierarchy
+ * @param pListHeader - linked list of RGX_RPM_DEVMEM_DESC blocks
+ *
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32RequestNumPages,
+ PDLLIST_NODE pListHeader)
+{
+ PVRSRV_ERROR eError;
+ RGX_SERVER_RPM_CONTEXT *psRPMContext = psFreeList->psParentCtx;
+ RGX_RPM_DEVMEM_DESC *psRPMDevMemDesc;
+ IMG_DEVMEM_OFFSET_T uiPMROffset;
+ IMG_UINT32 ui32NextPageIndex;
+
+ /* Are we allowed to grow ? */
+ if (ui32RequestNumPages > psFreeList->psParentCtx->ui32UnallocatedPages)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Scene Hierarchy buffer exceeded (0x%x pages required, 0x%x pages available).",
+ ui32RequestNumPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+ return PVRSRV_ERROR_RPM_PBSIZE_ALREADY_MAX;
+ }
+
+ /* Allocate descriptor */
+ psRPMDevMemDesc = OSAllocZMem(sizeof(*psRPMDevMemDesc));
+ if (psRPMDevMemDesc == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: failed to allocate host data structure"));
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ * - the context's ui32UnallocatedPages
+ */
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMContext);
+
+ /* Update the sparse PMRs */
+ psRPMDevMemDesc->psFreeList = psFreeList;
+ psRPMDevMemDesc->ui32NumPages = ui32RequestNumPages;
+ psRPMDevMemDesc->sSceneHierarchyNode.psPMR = psRPMContext->psSceneHierarchyPMR;
+ psRPMDevMemDesc->sRPMPageListNode.psPMR = psRPMContext->psRPMPageTablePMR;
+ psRPMDevMemDesc->sRPMFreeListNode.psPMR = psFreeList->psFreeListPMR;
+
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RGXGrowRPMFreeList: mapping %d pages for Doppler scene memory to VA 0x%" IMG_UINT64_FMTSPECx " with heap ID %p",
+ ui32RequestNumPages, psRPMContext->sSceneMemoryBaseAddr.uiAddr, psRPMContext->psSceneHeap));
+
+ /*
+ * 1. Doppler scene hierarchy
+ */
+ PDUMPCOMMENT("Allocate %d pages with mapping index %d for Doppler scene memory.",
+ ui32RequestNumPages,
+ psRPMContext->ui32SceneMemorySparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sSceneHierarchyNode,
+ psFreeList,
+ NODE_SCENE_HIERARCHY,
+ psRPMContext->psSceneHeap,
+ ui32RequestNumPages,
+ psRPMContext->sSceneMemoryBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM scene hierarchy block (status %d)", eError));
+ goto ErrorSceneBlock;
+ }
+
+ /*
+ * 2. RPM page list
+ */
+ if (ui32RequestNumPages > psRPMContext->ui32RPMEntriesInPage)
+ {
+ /* we need to map in phys pages for RPM page table */
+ PDUMPCOMMENT("Allocate %d (%d requested) page table entries with mapping index %d for RPM page table.",
+ ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+ ui32RequestNumPages,
+ psRPMContext->ui32RPMPageTableSparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMPageListNode,
+ psFreeList,
+ NODE_RPM_PAGE_TABLE,
+ psRPMContext->psRPMPageTableHeap,
+ ui32RequestNumPages - psRPMContext->ui32RPMEntriesInPage,
+ psRPMContext->sRPMPageTableBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM page table block (status %d)", eError));
+ goto ErrorPageTableBlock;
+ }
+ }
+
+ /*
+ * 3. Free page list (FPL)
+ */
+ if (ui32RequestNumPages > psFreeList->ui32EntriesInPage)
+ {
+ /* we need to map in phys pages for RPM free page list */
+ PDUMPCOMMENT("Allocate %d (%d requested) FPL entries with mapping index %d for RPM free page list.",
+ ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+ ui32RequestNumPages,
+ psFreeList->ui32RPMFreeListSparseMappingIndex);
+ eError = _RGXMapRPMPBBlock(&psRPMDevMemDesc->sRPMFreeListNode,
+ psFreeList,
+ NODE_RPM_FREE_PAGE_LIST,
+ psRPMContext->psRPMPageTableHeap,
+ ui32RequestNumPages - psFreeList->ui32EntriesInPage,
+ psFreeList->sBaseDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: Unable to map RPM free page list (status %d)", eError));
+ goto ErrorFreeListBlock;
+ }
+ }
+
+ /*
+ * Update FPL entries
+ */
+
+ /* Calculate doppler page index from base of Doppler heap */
+ ui32NextPageIndex = (psRPMDevMemDesc->sSceneHierarchyNode.sAddr.uiAddr -
+ psRPMContext->sDopplerHeapBaseAddr.uiAddr) >> psFreeList->uiLog2DopplerPageSize;
+
+ /* Calculate write offset into FPL PMR assuming pages are mapped in order with no gaps */
+ uiPMROffset = (size_t)psFreeList->ui32CurrentFLPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+
+ eError = _WriteRPMFreePageList(psFreeList->psFreeListPMR, uiPMROffset, ui32NextPageIndex, ui32RequestNumPages);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowRPMFreeList: error writing RPM free list entries (%d)", eError));
+ goto ErrorFreeListWriteEntries;
+ }
+
+ {
+ /*
+ * Update the entries remaining in the last mapped RPM and FPL pages.
+ *
+ * psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * 1024 entries are added (can be zero)
+ * ui32RequestNumPages entries are committed
+ *
+ * The number of entries remaining should always be less than a full page.
+ */
+ IMG_UINT32 ui32PTEntriesPerChunk = OSGetPageSize() / sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+ IMG_UINT32 ui32PTEntriesPerChunkClearMask = ~(ui32PTEntriesPerChunk - 1);
+
+ psRPMContext->ui32RPMEntriesInPage = psRPMContext->ui32RPMEntriesInPage +
+ (psRPMDevMemDesc->sRPMPageListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+ PVR_ASSERT((psRPMContext->ui32RPMEntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+
+ psFreeList->ui32EntriesInPage = psFreeList->ui32EntriesInPage +
+ (psRPMDevMemDesc->sRPMFreeListNode.ui32NumPhysPages * ui32PTEntriesPerChunk) - ui32RequestNumPages;
+ PVR_ASSERT((psFreeList->ui32EntriesInPage & ui32PTEntriesPerChunkClearMask) == 0);
+ }
+
+ /* Add node to link list */
+ dllist_add_to_head(pListHeader, &psRPMDevMemDesc->sMemoryDescBlock);
+
+ /* Update number of available pages */
+ psFreeList->ui32CurrentFLPages += ui32RequestNumPages;
+ psRPMContext->ui32UnallocatedPages -= ui32RequestNumPages;
+
+#if defined(DEBUG)
+ RGXDumpRPMFreeListPageList(psFreeList);
+#endif
+
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RPM Freelist [%p, ID %d]: grow by %u pages (current pages %u/%u, unallocated pages %u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ ui32RequestNumPages,
+ psFreeList->ui32CurrentFLPages,
+ psRPMContext->ui32TotalRPMPages,
+ psRPMContext->ui32UnallocatedPages));
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ ErrorFreeListWriteEntries:
+ /* TODO: unmap sparse block for RPM FPL */
+ ErrorFreeListBlock:
+ /* TODO: unmap sparse block for RPM page table */
+ ErrorPageTableBlock:
+ /* TODO: unmap sparse block for scene hierarchy */
+
+ ErrorSceneBlock:
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMContext);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ OSFreeMem(psRPMDevMemDesc);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+static PVRSRV_ERROR RGXShrinkRPMFreeList(PDLLIST_NODE pListHeader,
+ RGX_RPM_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode;
+ RGX_RPM_DEVMEM_DESC *psRPMDevMemNode;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32OldValue;
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages value
+ */
+ PVR_ASSERT(pListHeader);
+ PVR_ASSERT(psFreeList);
+ PVR_ASSERT(psFreeList->psDevInfo);
+ PVR_ASSERT(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ /********************************************************************
+ * All scene memory blocks must be freed together as non-contiguous
+ * virtual mappings are not yet supported.
+ ********************************************************************/
+
+ /* Get node from head of list and remove it */
+ psNode = dllist_get_next_node(pListHeader);
+ PVR_DPF((PVR_DBG_MESSAGE, "Found node %p", psNode));
+ if (psNode)
+ {
+ dllist_remove_node(psNode);
+
+ psRPMDevMemNode = IMG_CONTAINER_OF(psNode, RGX_RPM_DEVMEM_DESC, sMemoryDescBlock);
+ PVR_ASSERT(psRPMDevMemNode);
+ PVR_ASSERT(psRPMDevMemNode->psFreeList);
+ PVR_ASSERT(psRPMDevMemNode->sSceneHierarchyNode.psPMR);
+
+ /* remove scene hierarchy block */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing scene hierarchy node"));
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sSceneHierarchyNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->psParentCtx->sSceneMemoryBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sSceneHierarchyNode.ui32NumPhysPages,
+ psRPMDevMemNode->sSceneHierarchyNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+
+ /*
+ * If the grow size is sub OS page size then the page lists may not need updating
+ */
+ if (psRPMDevMemNode->sRPMPageListNode.eNodeType != NODE_EMPTY)
+ {
+ /* unmap the RPM page table backing pages */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM page list node"));
+ PVR_ASSERT(psRPMDevMemNode->sRPMPageListNode.psPMR);
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMPageListNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->psParentCtx->sRPMPageTableBaseAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sRPMPageListNode.ui32NumPhysPages,
+ psRPMDevMemNode->sRPMPageListNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+ }
+
+ if (psRPMDevMemNode->sRPMFreeListNode.eNodeType != NODE_EMPTY)
+ {
+ /* unmap the RPM free page list backing pages */
+ PVR_DPF((PVR_DBG_MESSAGE, "Removing RPM free list node"));
+ PVR_ASSERT(psRPMDevMemNode->sRPMFreeListNode.psPMR);
+ eError = _RGXUnmapRPMPBBlock(&psRPMDevMemNode->sRPMFreeListNode,
+ psRPMDevMemNode->psFreeList,
+ psFreeList->sBaseDevVAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXShrinkRPMFreeList: Failed to unmap %d pages with mapping index %d (status %d)",
+ psRPMDevMemNode->sRPMFreeListNode.ui32NumPhysPages,
+ psRPMDevMemNode->sRPMFreeListNode.ui32StartOfMappingIndex,
+ eError));
+ goto UnMapError;
+ }
+ }
+
+ /* update available RPM pages in freelist (NOTE: may be different from phys page count) */
+ ui32OldValue = psFreeList->ui32CurrentFLPages;
+ psFreeList->ui32CurrentFLPages -= psRPMDevMemNode->ui32NumPages;
+
+ /* check underflow */
+ PVR_ASSERT(ui32OldValue > psFreeList->ui32CurrentFLPages);
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p, ID %d]: shrink by %u pages (current pages %u/%u)",
+ psFreeList,
+ psFreeList->ui32FreelistID,
+ psRPMDevMemNode->ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->psParentCtx->ui32UnallocatedPages));
+
+ OSFreeMem(psRPMDevMemNode);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at zero PB size (%u pages)",
+ psFreeList,
+ psFreeList->ui32CurrentFLPages));
+ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+ }
+
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ return PVRSRV_OK;
+
+ UnMapError:
+ OSFreeMem(psRPMDevMemNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*!
+ * _RGXCreateRPMSparsePMR
+ *
+ * Creates a PMR container with no phys pages initially. Phys pages will be allocated
+ * and mapped later when requested by client or by HW RPM Out of Memory event.
+ * The PMR is created with zero phys backing pages.
+ * The sparse PMR is associated to either the RPM context or to the RPM freelist(s):
+ *
+ * RGX_SERVER_RPM_CONTEXT - Scene hierarchy, page table
+ * RGX_RPM_FREELIST - free page list PMR
+ *
+ * @param eBlockType - whether block is for scene hierarchy pages or page
+ * tables. This parameter is used to calculate size.
+ * @param ui32NumPages - total number of pages
+ * @param uiLog2DopplerPageSize - log2 Doppler/RPM page size
+ * @param ppsPMR - (Output) new PMR container.
+ *
+ * See the documentation for more details.
+ */
+static
+PVRSRV_ERROR _RGXCreateRPMSparsePMR(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ IMG_UINT32 ui32NumPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ PMR **ppsPMR)
+{
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_SIZE_T uiMaxSize = 0;
+ IMG_UINT32 ui32NumVirtPages = 0; /*!< number of virtual pages to cover virtual range */
+ IMG_UINT32 ui32Log2OSPageSize = OSGetPageShift();
+ IMG_UINT32 ui32ChunkSize = OSGetPageSize();
+ PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+ /* Work out the allocation logical size = virtual size */
+ switch(eBlockType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Allocate Scene Hierarchy PMR (Pages %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * (1 << uiLog2DopplerPageSize);
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Allocate RPM Page Table PMR (Page entries %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ /*
+ * Each RPM free page list (FPL) supports the maximum range.
+ * In practise the maximum range is divided between allocations in each FPL
+ */
+ PDUMPCOMMENT("Allocate RPM Free Page List PMR (Page entries %08X)", ui32NumPages);
+ uiMaxSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+
+ /* Needed to write page indices into the freelist */
+ uiCustomFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE;
+
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+
+ uiMaxSize = (uiMaxSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+ ui32NumVirtPages = uiMaxSize >> ui32Log2OSPageSize;
+
+ eError = PhysmemNewRamBackedPMR(psConnection,
+ psDeviceNode,
+ uiMaxSize, /* the maximum size which should match num virtual pages * page size */
+ ui32ChunkSize,
+ 0,
+ ui32NumVirtPages,
+ NULL,
+ ui32Log2OSPageSize,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | PVRSRV_MEMALLOCFLAG_SPARSE_NO_DUMMY_BACKING | uiCustomFlags),
+ strlen("RPM Buffer") + 1,
+ "RPM Buffer",
+ OSGetCurrentClientProcessIDKM(),
+ ppsPMR);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "_RGXCreateRPMSparsePMR: Failed to allocate sparse PMR of size: 0x%016" IMG_UINT64_FMTSPECX,
+ (IMG_UINT64)uiMaxSize));
+ }
+
+ return eError;
+}
+
+/*!
+ * _RGXMapRPMPBBlock
+ *
+ * Maps in a block of phys pages for one of the following:
+ *
+ * NODE_SCENE_HIERARCHY - scene hierarchy
+ * NODE_RPM_PAGE_TABLE - RPM page table entries
+ * NODE_RPM_FREE_PAGE_LIST - RPM free page list entries
+ *
+ * @param psDevMemNode - device mem block descriptor (allocated by caller)
+ * @param psFreeList - free list descriptor
+ * @param eBlockType - block type: scene memory, RPM page table or RPM page free list
+ * @param psDevmemHeap - heap for GPU virtual mapping
+ * @param ui32NumPages - number of pages for scene memory, OR
+ * number of PT entries for RPM page table or page free list
+ * @param sDevVAddrBase - GPU virtual base address i.e. base address at start of sparse allocation
+ *
+ * @return PVRSRV_OK if no error occurred
+ */
+static
+PVRSRV_ERROR _RGXMapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ RGX_DEVMEM_NODE_TYPE eBlockType,
+ DEVMEMINT_HEAP *psDevmemHeap,
+ IMG_UINT32 ui32NumPages,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 sCpuVAddrNULL = 0; /* no CPU mapping needed */
+ IMG_UINT32 *paui32AllocPageIndices; /* table of virtual indices for sparse mapping */
+ IMG_PUINT32 pui32MappingIndex = NULL; /* virtual index where next physical chunk is mapped */
+ IMG_UINT32 i;
+ size_t uiSize = 0;
+ IMG_UINT32 ui32Log2OSPageSize = OSGetPageShift();
+ IMG_UINT32 ui32ChunkSize = OSGetPageSize();
+ IMG_UINT32 ui32NumPhysPages = 0; /*!< number of physical pages for data pages or RPM PTs */
+ PVRSRV_MEMALLOCFLAGS_T uiCustomFlags = 0;
+
+
+ /* Allocate Memory Block for scene hierarchy */
+ switch(eBlockType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Allocate Scene Hierarchy Block (Pages %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * (1 << psFreeList->psParentCtx->uiLog2DopplerPageSize);
+ pui32MappingIndex = &psFreeList->psParentCtx->ui32SceneMemorySparseMappingIndex;
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Allocate RPM Page Table Block (Page entries %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_PAGE_TABLE);
+ pui32MappingIndex = &psFreeList->psParentCtx->ui32RPMPageTableSparseMappingIndex;
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ PDUMPCOMMENT("Allocate RPM Free Page List Block (Page entries %08X)", ui32NumPages);
+ uiSize = (size_t)ui32NumPages * sizeof(RGX_RPM_DATA_RTU_FREE_PAGE_LIST);
+ pui32MappingIndex = &psFreeList->ui32RPMFreeListSparseMappingIndex;
+
+ /* Needed to write page indices into the freelist */
+ uiCustomFlags |= PVRSRV_MEMALLOCFLAG_CPU_READABLE | PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE;
+
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+
+ /*
+ * Round size up to multiple of the sparse chunk size = OS page size.
+ */
+ uiSize = (uiSize + ui32ChunkSize - 1) & ~(ui32ChunkSize - 1);
+ ui32NumPhysPages = uiSize >> ui32Log2OSPageSize;
+
+ paui32AllocPageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+ if (paui32AllocPageIndices == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: failed to allocate sparse mapping index list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+ for(i=0; i<ui32NumPhysPages; i++)
+ {
+ paui32AllocPageIndices[i] = *pui32MappingIndex + i;
+ }
+
+ /* Set up some state */
+ psDevMemNode->eNodeType = eBlockType;
+ psDevMemNode->psDevMemHeap = psDevmemHeap;
+ if (eBlockType == NODE_SCENE_HIERARCHY)
+ {
+ /* the mapped-in scene hierarchy device address will be used to set up the FPL entries */
+ psDevMemNode->sAddr.uiAddr = sDevVAddrBase.uiAddr + (*pui32MappingIndex * ui32ChunkSize);
+ }
+ psDevMemNode->ui32NumPhysPages = ui32NumPhysPages;
+ psDevMemNode->ui32StartOfMappingIndex = *pui32MappingIndex;
+
+ {
+ if ((eBlockType == NODE_SCENE_HIERARCHY) &&
+ (ui32NumPhysPages > psFreeList->psParentCtx->ui32UnallocatedPages))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: virtual address space exceeded (0x%x pages required, 0x%x pages available).",
+ ui32NumPhysPages, psFreeList->psParentCtx->ui32UnallocatedPages));
+ OSFreeMem(paui32AllocPageIndices);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: unable to lock PMR physical pages (status %d)", eError));
+ goto ErrorLockPhys;
+ }
+
+ eError = DevmemIntChangeSparse(psDevmemHeap,
+ psDevMemNode->psPMR,
+ ui32NumPhysPages,
+ paui32AllocPageIndices,
+ 0,
+ NULL,
+ SPARSE_RESIZE_ALLOC,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE | uiCustomFlags),
+ sDevVAddrBase,
+ sCpuVAddrNULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXCreateRPMPBBlockSparse: change sparse mapping failed with %d pages starting at %d (status %d)",
+ ui32NumPhysPages, *pui32MappingIndex, eError));
+ goto ErrorSparseMapping;
+ }
+
+ /* FIXME: leave locked until destroy */
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+ }
+
+ /*
+ * Update the mapping index for the next allocation.
+ * The virtual pages should be contiguous.
+ */
+ *pui32MappingIndex += ui32NumPhysPages;
+
+ OSFreeMem(paui32AllocPageIndices);
+
+ return PVRSRV_OK;
+
+ ErrorSparseMapping:
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ ErrorLockPhys:
+ OSFreeMem(paui32AllocPageIndices);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*!
+ * _RGXUnmapRPMPBBlock
+ *
+ * NOTE: because the SHF and SHG requests for memory are interleaved, the
+ * page mapping offset cannot be updated (non-contiguous virtual mapping
+ * is not supported).
+ *
+ * So either
+ * (i) the allocated virtual address range is unusable after unmap
+ * (ii) all of the scene memory must be freed
+ *
+ * @param psDevMemNode - block to free
+ * @param psFreeList - RPM free list
+ * @param sDevVAddrBase - the virtual base address (i.e. where page 1 of the PMR is mapped)
+ */
+static
+PVRSRV_ERROR _RGXUnmapRPMPBBlock(RGX_DEVMEM_NODE *psDevMemNode,
+ RGX_RPM_FREELIST *psFreeList,
+ IMG_DEV_VIRTADDR sDevVAddrBase)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT64 sCpuVAddrNULL = 0; /* no CPU mapping needed */
+ IMG_UINT32 *paui32FreePageIndices; /* table of virtual indices for sparse unmapping */
+ IMG_UINT32 i;
+ IMG_UINT32 ui32NumPhysPages = psDevMemNode->ui32NumPhysPages; /*!< number of physical pages for data pages or RPM PTs */
+
+#if defined(PDUMP)
+ /* Free Memory Block for scene hierarchy */
+ switch(psDevMemNode->eNodeType)
+ {
+ case NODE_EMPTY:
+ PVR_ASSERT(IMG_FALSE);
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ case NODE_SCENE_HIERARCHY:
+ PDUMPCOMMENT("Free Scene Hierarchy Block (Pages %08X)", ui32NumPhysPages);
+ break;
+ case NODE_RPM_PAGE_TABLE:
+ PDUMPCOMMENT("Free RPM Page Table Block (Page entries %08X)", ui32NumPhysPages);
+ break;
+ case NODE_RPM_FREE_PAGE_LIST:
+ PDUMPCOMMENT("Free RPM Free Page List Block (Page entries %08X)", ui32NumPhysPages);
+ break;
+ /* no default case because the build should error out if a case is unhandled */
+ }
+#endif
+
+ paui32FreePageIndices = OSAllocMem(ui32NumPhysPages * sizeof(IMG_UINT32));
+ if (paui32FreePageIndices == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: failed to allocate sparse mapping index list"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+ for(i=0; i<ui32NumPhysPages; i++)
+ {
+ paui32FreePageIndices[i] = psDevMemNode->ui32StartOfMappingIndex + i;
+ }
+
+ {
+ eError = PMRLockSysPhysAddresses(psDevMemNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: unable to lock PMR physical pages (status %d)", eError));
+ goto ErrorLockPhys;
+ }
+
+ eError = DevmemIntChangeSparse(psDevMemNode->psDevMemHeap,
+ psDevMemNode->psPMR,
+ 0, /* no pages are mapped here */
+ NULL,
+ ui32NumPhysPages,
+ paui32FreePageIndices,
+ SPARSE_RESIZE_FREE,
+ (PVRSRV_MEMALLOCFLAG_GPU_READABLE | PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE),
+ sDevVAddrBase,
+ sCpuVAddrNULL);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_RGXUnmapRPMPBBlock: free sparse mapping failed with %d pages starting at %d (status %d)",
+ ui32NumPhysPages, psDevMemNode->ui32StartOfMappingIndex, eError));
+ goto ErrorSparseMapping;
+ }
+
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+ }
+
+ OSFreeMem(paui32FreePageIndices);
+
+ return PVRSRV_OK;
+
+ ErrorSparseMapping:
+ PMRUnlockSysPhysAddresses(psDevMemNode->psPMR);
+
+ ErrorLockPhys:
+ OSFreeMem(paui32FreePageIndices);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*!
+ * RGXCreateRPMFreeList
+ *
+ * @param ui32InitFLPages - initial allocation of mapped-in physical pages
+ * @param ui32GrowFLPages - physical pages to add to scene hierarchy if RPM OOM occurs
+ * @param sFreeListDevVAddr - virtual base address of free list
+ * @param sRPMPageListDevVAddr (DEPRECATED -- cached in RPM Context)
+ * @param ui32FLSyncAddr (DEPRECATED)
+ * @param ppsFreeList - returns a RPM freelist handle to client
+ * @param puiHWFreeList - 'handle' to FW freelist, passed in VRDM kick (FIXME)
+ * @param bIsExternal - flag which marks if the freelist is an external one
+ */
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT *psRPMContext,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ RGX_RPM_FREELIST **ppsFreeList,
+ IMG_UINT32 *puiHWFreeList,
+ IMG_BOOL bIsExternal)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_RPM_FREELIST *psFWRPMFreeList;
+ DEVMEM_MEMDESC *psFWRPMFreelistMemDesc;
+ RGX_RPM_FREELIST *psFreeList;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Allocate kernel freelist struct */
+ psFreeList = OSAllocZMem(sizeof(*psFreeList));
+ if (psFreeList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psFreeList->psCleanupSync,
+ "RPM free list cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMFreeList: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /*
+ * This FW FreeList context is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ *
+ * TODO - RPM freelist will be modified after creation, but only from host-side.
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWRPMFreeList),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ "FwRPMFreeList",
+ &psFWRPMFreelistMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto ErrorFWFreeListAlloc;
+ }
+
+ /* Initialise host data structures */
+ psFreeList->psConnection = psConnection;
+ psFreeList->psDevInfo = psDevInfo;
+ psFreeList->psParentCtx = psRPMContext;
+ psFreeList->psFWFreelistMemDesc = psFWRPMFreelistMemDesc;
+ psFreeList->sBaseDevVAddr = sFreeListDevVAddr;
+ RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWRPMFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ psFreeList->ui32FreelistID = psDevInfo->ui32RPMFreelistCurrID++;
+ //psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+ /* TODO: is it really needed? */
+ if(bIsExternal == IMG_FALSE)
+ {
+ psFreeList->ui32InitFLPages = ui32InitFLPages;
+ psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+ }
+ //psFreeList->ui32CurrentFLPages = ui32InitFLPages;
+ psFreeList->ui32RefCount = 0;
+ dllist_init(&psFreeList->sMemoryBlockHead);
+
+ /* Wizard2 -- support per-freelist Doppler virtual page size */
+ psFreeList->uiLog2DopplerPageSize = psRPMContext->uiLog2DopplerPageSize;
+
+ /* Initialise FW data structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWRPMFreeList);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFWFreeListCpuMap);
+
+ /*
+ * FIXME - the max pages are shared with the other freelists so this
+ * over-estimates the number of free pages. The full check is
+ * implemented in RGXGrowRPMFreeList.
+ */
+ if(bIsExternal == IMG_TRUE)
+ {
+ /* An external RPM FreeList will never grow */
+ psFWRPMFreeList->ui32MaxPages = ui32InitFLPages;
+ }
+ else
+ {
+ psFWRPMFreeList->ui32MaxPages = psFreeList->psParentCtx->ui32TotalRPMPages;
+ }
+ psFWRPMFreeList->ui32CurrentPages = ui32InitFLPages;
+ psFWRPMFreeList->ui32GrowPages = ui32GrowFLPages;
+ psFWRPMFreeList->ui32ReadOffset = 0;
+ psFWRPMFreeList->ui32WriteOffset = RGX_CR_RPM_SHG_FPL_WRITE_TOGGLE_EN; /* FL is full */
+ psFWRPMFreeList->bReadToggle = IMG_FALSE;
+ psFWRPMFreeList->bWriteToggle = IMG_TRUE;
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr = sFreeListDevVAddr.uiAddr;
+ psFWRPMFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+ psFWRPMFreeList->bGrowPending = IMG_FALSE;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "RPM Freelist %p created: FW freelist: %p, Init pages 0x%08x, Max FL base address " IMG_DEVMEM_SIZE_FMTSPEC ", Init FL base address " IMG_DEVMEM_SIZE_FMTSPEC,
+ psFreeList,
+ psFWRPMFreeList,
+ ui32InitFLPages,
+ sFreeListDevVAddr.uiAddr,
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr));
+
+ PVR_DPF((PVR_DBG_MESSAGE,"RPM FW Freelist %p created: sync FW addr 0x%08x", psFWRPMFreeList, psFWRPMFreeList->sSyncAddr));
+
+ PDUMPCOMMENT("Dump FW RPM FreeList");
+ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWRPMFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+ /*
+ * Separate dump of the Freelist's number of Pages and stack pointer.
+ * This allows to easily modify the PB size in the out2.txt files.
+ */
+ PDUMPCOMMENT("RPM FreeList TotalPages");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_RPM_FREELIST, ui32CurrentPages),
+ psFWRPMFreeList->ui32CurrentPages,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ PDUMPCOMMENT("RPM FreeList device virtual base address");
+ DevmemPDumpLoadMemValue64(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_RPM_FREELIST, sFreeListDevVAddr),
+ psFWRPMFreeList->sFreeListDevVAddr.uiAddr,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ if (bIsExternal == IMG_TRUE)
+ {
+ /* Mark the freelist as an external */
+ psFreeList->bIsExternal = IMG_TRUE;
+
+ /* In case of an external RPM FreeList it is not needed to:
+ * - create sparse PMR
+ * - allocate physical memory for the freelist
+ * - add it to the list of freelist
+ */
+
+ /* return values */
+ *puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+ }
+
+ psFreeList->bIsExternal = IMG_FALSE;
+
+ /*
+ * Create the sparse PMR for the RPM free page list
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_RPM_FREE_PAGE_LIST,
+ psRPMContext->ui32TotalRPMPages,
+ psRPMContext->uiLog2DopplerPageSize,
+ &psFreeList->psFreeListPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Free page list (%d)", eError));
+ goto ErrorSparsePMR;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ */
+ /* Add to list of freelists */
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ psFreeList->psParentCtx->uiFLRefCount++;
+ dllist_add_to_tail(&psDevInfo->sRPMFreeListHead, &psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ /*
+ * Add initial scene hierarchy block
+ * Allocate phys memory for scene hierarchy, free page list and RPM page-in-use list
+ */
+ eError = RGXGrowRPMFreeList(psFreeList, ui32InitFLPages, &psFreeList->sMemoryBlockHead);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMFreeList: error during phys memory allocation and mapping (%d)", eError));
+ goto ErrorGrowFreeList;
+ }
+
+ /* return values */
+ *puiHWFreeList = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ ErrorGrowFreeList:
+ /* Remove freelists from list */
+ OSLockAcquire(psDevInfo->hLockRPMFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ psFreeList->psParentCtx->uiFLRefCount--;
+ OSLockRelease(psDevInfo->hLockRPMFreeList);
+
+ ErrorSparsePMR:
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ ErrorFWFreeListCpuMap:
+ RGXUnsetFirmwareAddress(psFWRPMFreelistMemDesc);
+ DevmemFwFree(psDevInfo, psFWRPMFreelistMemDesc);
+
+ ErrorFWFreeListAlloc:
+ PMRUnrefPMR(psFreeList->psFreeListPMR);
+
+ ErrorSyncAlloc:
+ OSFreeMem(psFreeList);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * RGXDestroyRPMFreeList
+ */
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList)
+{
+ PVRSRV_ERROR eError;
+ //IMG_UINT64 ui64CheckSum;
+
+ PVR_ASSERT(psFreeList);
+
+ if(psFreeList->ui32RefCount != 0 && psFreeList->bIsExternal == IMG_FALSE)
+ {
+ /* Freelist still busy */
+ PVR_DPF((PVR_DBG_WARNING, "Freelist %p is busy", psFreeList));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Freelist is not in use => start firmware cleanup */
+ eError = RGXFWRequestRPMFreeListCleanUp(psFreeList->psDevInfo,
+ psFreeList->sFreeListFWDevVAddr,
+ psFreeList->psCleanupSync);
+ if(eError != PVRSRV_OK)
+ {
+ /* Can happen if the firmware took too long to handle the cleanup request,
+ * or if SLC-flushes didn't went through (due to some GPU lockup) */
+ return eError;
+ }
+
+ /* update the statistics */
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateFreelistStats(psFreeList->ui32NumGrowReqByApp,
+ psFreeList->ui32NumGrowReqByFW,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ 0); /* FIXME - owner PID */
+#endif
+
+ /* Destroy FW structures */
+ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+ DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+ if(psFreeList->bIsExternal == IMG_FALSE)
+ {
+ /* Free the phys mem block descriptors. */
+ PVR_DPF((PVR_DBG_WARNING, "Cleaning RPM freelist index %d", psFreeList->ui32FreelistID));
+ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+ {
+ eError = RGXShrinkRPMFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ psFreeList->psParentCtx->uiFLRefCount--;
+
+ /* consistency checks */
+ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockHead));
+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+ /* Free RPM Free page list PMR */
+ eError = PMRUnrefPMR(psFreeList->psFreeListPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMFreeList: Failed to free RPM free page list PMR %p (error %u)",
+ psFreeList->psFreeListPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* Remove RPM FreeList from list */
+ OSLockAcquire(psFreeList->psDevInfo->hLockRPMFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockRPMFreeList);
+ }
+
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ /* free Freelist */
+ OSFreeMem(psFreeList);
+
+ return eError;
+}
+
+
+/*!
+ * RGXAddBlockToRPMFreeListKM
+ *
+ * NOTE: This API isn't used but it's provided for symmetry with the parameter
+ * management API.
+ */
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if we have reference to freelist's PMR */
+ if (psFreeList->psFreeListPMR == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RPM Freelist is not configured for grow"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psFreeList->psParentCtx->hLock);
+#endif
+ /* grow freelist */
+ eError = RGXGrowRPMFreeList(psFreeList,
+ ui32NumPages,
+ &psFreeList->sMemoryBlockHead);
+ if(eError == PVRSRV_OK)
+ {
+ /* update freelist data in firmware */
+ _UpdateFwRPMFreelistSize(psFreeList, IMG_TRUE, IMG_TRUE, ui32NumPages);
+
+ psFreeList->ui32NumGrowReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ 0,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psFreeList->psParentCtx->hLock);
+#endif
+ return eError;
+}
+
+
+/*
+ * RGXCreateRPMContext
+ */
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT **ppsRPMContext,
+ IMG_UINT32 ui32TotalRPMPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr,
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr,
+ DEVMEMINT_HEAP *psSceneHeap,
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr,
+ DEVMEMINT_HEAP *psRPMPageTableHeap,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWFrameData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ //DEVMEM_MEMDESC *psFWRPMContextMemDesc;
+ RGX_SERVER_RPM_CONTEXT *psRPMContext;
+ RGXFWIF_RAY_FRAME_DATA *psFrameData;
+ RGXFWIF_DEV_VIRTADDR sFirmwareAddr;
+
+ /* Allocate kernel RPM context */
+ psRPMContext = OSAllocZMem(sizeof(*psRPMContext));
+ if (psRPMContext == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ *ppsRPMContext = psRPMContext;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psRPMContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto ErrorCreateLock;
+ }
+#endif
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRPMContext->psCleanupSync,
+ "RPM context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateRPMContext: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /*
+ * 1. Create the sparse PMR for scene hierarchy
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_SCENE_HIERARCHY,
+ ui32TotalRPMPages,
+ uiLog2DopplerPageSize,
+ &psRPMContext->psSceneHierarchyPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for Scene hierarchy (%d)", eError));
+ goto ErrorSparsePMR1;
+ }
+
+ /*
+ * 2. Create the sparse PMR for the RPM page list
+ */
+ eError = _RGXCreateRPMSparsePMR(psConnection, psDeviceNode,
+ NODE_RPM_PAGE_TABLE,
+ ui32TotalRPMPages,
+ uiLog2DopplerPageSize,
+ &psRPMContext->psRPMPageTablePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: failed to allocate PMR for RPM Page list (%d)", eError));
+ goto ErrorSparsePMR2;
+ }
+
+ /* Allocate FW structure and return FW address to client */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFrameData),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwRPMContext",
+ ppsMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRPMContext: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto ErrorFWRPMContextAlloc;
+ }
+
+ /* Update the unallocated pages, which are shared between the RPM freelists */
+ psRPMContext->ui32UnallocatedPages = psRPMContext->ui32TotalRPMPages = ui32TotalRPMPages;
+ psRPMContext->psDeviceNode = psDeviceNode;
+ psRPMContext->psFWRPMContextMemDesc = *ppsMemDesc;
+ psRPMContext->uiLog2DopplerPageSize = uiLog2DopplerPageSize;
+
+ /* Cache the virtual alloc state for future phys page mapping */
+ psRPMContext->sDopplerHeapBaseAddr = sDopplerHeapBaseAddr;
+ psRPMContext->sSceneMemoryBaseAddr = sSceneMemoryBaseAddr;
+ psRPMContext->psSceneHeap = psSceneHeap;
+ psRPMContext->sRPMPageTableBaseAddr = sRPMPageTableBaseAddr;
+ psRPMContext->psRPMPageTableHeap = psRPMPageTableHeap;
+
+ /*
+ * TODO - implement RPM abort control using HW frame data to track
+ * abort status in RTU.
+ */
+ RGXSetFirmwareAddress(&sFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *puiHWFrameData = sFirmwareAddr.ui32Addr;
+
+ //eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psFrameData);
+ //PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", ErrorFrameDataCpuMap);
+
+ /*
+ * TODO: pdumping
+ */
+
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+
+ //DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+
+ ErrorFWRPMContextAlloc:
+ PMRUnrefPMR(psRPMContext->psRPMPageTablePMR);
+
+ ErrorSparsePMR2:
+ PMRUnrefPMR(psRPMContext->psSceneHierarchyPMR);
+
+ ErrorSparsePMR1:
+ SyncPrimFree(psRPMContext->psCleanupSync);
+
+ ErrorSyncAlloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psRPMContext->hLock);
+
+ ErrorCreateLock:
+#endif
+ OSFreeMem(psRPMContext);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * RGXDestroyRPMContext
+ */
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PRGXFWIF_RAY_FRAME_DATA psFrameData;
+
+ /* Wait for FW to process all commands */
+
+ PVR_ASSERT(psCleanupData);
+
+ RGXSetFirmwareAddress(&psFrameData, psCleanupData->psFWRPMContextMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+ /* Cleanup frame data in SHG */
+ eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+ psFrameData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_SHG);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "FrameData busy in SHG"));
+ return eError;
+ }
+
+ psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+ /* Cleanup frame data in RTU */
+ eError = RGXFWRequestRayFrameDataCleanUp(psCleanupData->psDeviceNode,
+ psFrameData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_RTU);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "FrameData busy in RTU"));
+ return eError;
+ }
+
+ /* Free Scene hierarchy PMR (We should be the only one that holds a ref on the PMR) */
+ eError = PMRUnrefPMR(psCleanupData->psSceneHierarchyPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMContext: Failed to free scene hierarchy PMR %p (error %u)",
+ psCleanupData->psSceneHierarchyPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* Free RPM Page list PMR */
+ eError = PMRUnrefPMR(psCleanupData->psRPMPageTablePMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXDestroyRPMContext: Failed to free RPM page list PMR %p (error %u)",
+ psCleanupData->psRPMPageTablePMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ if (psCleanupData->uiFLRefCount > 0)
+ {
+ /* Kernel RPM freelists hold reference to RPM context */
+ PVR_DPF((PVR_DBG_WARNING, "RGXDestroyRPMContext: Free list ref count non-zero."));
+ return PVRSRV_ERROR_NONZERO_REFCOUNT;
+ }
+
+ /* If we got here then SHG and RTU operations on this FrameData have finished */
+ SyncPrimFree(psCleanupData->psCleanupSync);
+
+ /* Free the FW RPM descriptor */
+ RGXUnsetFirmwareAddress(psCleanupData->psFWRPMContextMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psFWRPMContextMemDesc);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psCleanupData->hLock);
+#endif
+
+ OSFreeMem(psCleanupData);
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXCreateRayContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pabyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RAY_CONTEXT **ppsRayContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_RAY_CONTEXT *psRayContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ RGXFWIF_FWRAYCONTEXT *pFWRayContext;
+ IMG_UINT32 i;
+
+ /* Prepare cleanup structure */
+ *ppsRayContext= NULL;
+ psRayContext = OSAllocZMem(sizeof(*psRayContext));
+ if (psRayContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psRayContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_createlock;
+ }
+#endif
+
+ psRayContext->psDeviceNode = psDeviceNode;
+
+ /*
+ Allocate device memory for the firmware ray context.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware ray context");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWRAYCONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRayContext",
+ &psRayContext->psFWRayContextMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware ray context (%u)",
+ eError));
+ goto fail_fwraycontext;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRayContext->psCleanupSync,
+ "Ray context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode, &psRayContext->psFWFrameworkMemDesc, ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psRayContext->psFWFrameworkMemDesc, pabyFrameworkRegisters, ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRayContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psRayContext->psFWFrameworkMemDesc;
+
+ eError = _CreateSHContext(psConnection,
+ psDeviceNode,
+ psRayContext->psFWRayContextMemDesc,
+ offsetof(RGXFWIF_FWRAYCONTEXT, sSHGContext),
+ psFWMemContextMemDesc,
+ sVRMCallStackAddr,
+ ui32Priority,
+ &sInfo,
+ &psRayContext->sSHData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_shcontext;
+ }
+
+ eError = _CreateRSContext(psConnection,
+ psDeviceNode,
+ psRayContext->psFWRayContextMemDesc,
+ offsetof(RGXFWIF_FWRAYCONTEXT, sRTUContext),
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psRayContext->sRSData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_rscontext;
+ }
+
+ /*
+ Temporarily map the firmware context to the kernel and init it
+ */
+ eError = DevmemAcquireCpuVirtAddr(psRayContext->psFWRayContextMemDesc,
+ (void **)&pFWRayContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware %s ray context to CPU",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+
+ for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+ {
+ /* Allocate the frame context client CCB */
+ eError = RGXCreateCCB(psDevInfo,
+ RGX_RTU_CCB_SIZE_LOG2,
+ psConnection,
+ REQ_TYPE_FC0 + i,
+ psRayContext->sRSData.psServerCommonContext,
+ &psRayContext->sRSData.psFCClientCCB[i],
+ &psRayContext->sRSData.psFCClientCCBMemDesc[i],
+ &psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to create CCB for frame context %u (%s)",
+ __FUNCTION__,
+ i,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+ /* Set the firmware CCB device addresses in the firmware common context */
+ RGXSetFirmwareAddress(&pFWRayContext->psCCB[i],
+ psRayContext->sRSData.psFCClientCCBMemDesc[i],
+ 0, RFW_FWADDR_FLAG_NONE);
+ RGXSetFirmwareAddress(&pFWRayContext->psCCBCtl[i],
+ psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i],
+ 0, RFW_FWADDR_FLAG_NONE);
+ }
+
+ pFWRayContext->ui32ActiveFCMask = 0;
+ pFWRayContext->ui32NextFC = RGXFWIF_INVALID_FRAME_CONTEXT;
+
+ /* We've finished the setup so release the CPU mapping */
+ DevmemReleaseCpuVirtAddr(psRayContext->psFWRayContextMemDesc);
+
+ /*
+ As the common context alloc will dump the SH and RS common contexts
+ after they've been setup we skip of the 2 common contexts and dump the
+ rest of the structure
+ */
+ PDUMPCOMMENT("Dump shared part of ray context context");
+ DevmemPDumpLoadMem(psRayContext->psFWRayContextMemDesc,
+ (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+ sizeof(RGXFWIF_FWRAYCONTEXT) - (sizeof(RGXFWIF_FWCOMMONCONTEXT) * 2),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+ }
+
+ *ppsRayContext= psRayContext;
+ return PVRSRV_OK;
+
+ fail_rscontext:
+ _DestroySHContext(&psRayContext->sSHData,
+ psDeviceNode,
+ psRayContext->psCleanupSync);
+ fail_shcontext:
+ fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+ fail_frameworkcreate:
+ SyncPrimFree(psRayContext->psCleanupSync);
+ fail_syncalloc:
+ DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psRayContext->hLock);
+ fail_createlock:
+#endif
+ fail_fwraycontext:
+ OSFreeMem(psRayContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXDestroyRayContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psRayContext->psDeviceNode->pvDevice;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_remove_node(&(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+
+ /* Cleanup the TA if we haven't already */
+ if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_SH_COMPLETE) == 0)
+ {
+ eError = _DestroySHContext(&psRayContext->sSHData,
+ psRayContext->psDeviceNode,
+ psRayContext->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ psRayContext->ui32CleanupStatus |= RAY_CLEANUP_SH_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+ /* Cleanup the RS if we haven't already */
+ if ((psRayContext->ui32CleanupStatus & RAY_CLEANUP_RS_COMPLETE) == 0)
+ {
+ eError = _DestroyRSContext(&psRayContext->sRSData,
+ psRayContext->psDeviceNode,
+ psRayContext->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ psRayContext->ui32CleanupStatus |= RAY_CLEANUP_RS_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+#if 0
+ /*
+ * FIXME - De-allocate RPM freelists (should be called from UM)
+ */
+ RGXDestroyRPMFreeList(psRayContext->sSHData.psSHFFreeList);
+ RGXDestroyRPMFreeList(psRayContext->sSHData.psSHGFreeList);
+#endif
+
+ for (i = 0; i < DPX_MAX_RAY_CONTEXTS; i++)
+ {
+ RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBMemDesc[i]);
+ RGXUnsetFirmwareAddress(psRayContext->sRSData.psFCClientCCBCtrlMemDesc[i]);
+ RGXDestroyCCB(psDevInfo, psRayContext->sRSData.psFCClientCCB[i]);
+ }
+
+ /*
+ Only if both TA and 3D contexts have been cleaned up can we
+ free the shared resources
+ */
+ if (psRayContext->ui32CleanupStatus == (RAY_CLEANUP_RS_COMPLETE | RAY_CLEANUP_SH_COMPLETE))
+ {
+ /* Free the framework buffer */
+ DevmemFwFree(psDevInfo, psRayContext->psFWFrameworkMemDesc);
+
+ /* Free the firmware ray context */
+ DevmemFwFree(psDevInfo, psRayContext->psFWRayContextMemDesc);
+
+ /* Free the cleanup sync */
+ SyncPrimFree(psRayContext->psCleanupSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psRayContext->hLock);
+#endif
+
+ OSFreeMem(psRayContext);
+ }
+
+ return PVRSRV_OK;
+
+ e0:
+ OSWRLockAcquireWrite(psDevInfo->hRaytraceCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRaytraceCtxtListHead), &(psRayContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRaytraceCtxListLock);
+ return eError;
+}
+
+/*
+ * PVRSRVRGXKickRSKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32FCCmdSize,
+ IMG_PBYTE pui8FCDMCmd,
+ IMG_UINT32 ui32FrameContextID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason)
+{
+ RGXFWIF_KCCB_CMD sRSKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA asRSCmdHelperData[1] = { };
+ RGX_CCB_CMD_HELPER_DATA asFCCmdHelperData[1] = { };
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError1;
+ PVRSRV_ERROR eError2;
+ RGX_SERVER_RAY_RS_DATA *psRSData = &psRayContext->sRSData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32FCWoff;
+ IMG_UINT32 ui32RTUCmdOffset = 0;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32FWCtx;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ IMG_UINT32 *paui32IntFenceValue = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#if !defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iUpdateTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+ __func__, iUpdateTimeline));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (iCheckFence >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif /* !defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on RS) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRayContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psRayContext->hIntJobRef);
+
+ ui32IntClientFenceCount = ui32ClientFenceCount;
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+ }
+
+ paui32IntFenceValue = paui32ClientFenceValue;
+ ui32IntClientUpdateCount = ui32ClientUpdateCount;
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(RAY_CHECKPOINT_DEBUG)
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<32; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (piUpdateFence)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode,
+ szUpdateFenceName,
+ iUpdateTimeline,
+ psRayContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d)", __FUNCTION__, iUpdateFence));
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the ray context update list */
+ SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount)
+ {
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Ray RS Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence));
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Ray RS Update (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_RAY_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (RS) fence/updates syncs...", __FUNCTION__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RS) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (RS) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+
+ if(pui8DMCmd != NULL)
+ {
+ eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+ 0,
+ NULL,
+ NULL,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_RTU,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "FC",
+ bCCBStateOpen,
+ asFCCmdHelperData,
+ sRobustnessResetReason);
+ }
+ else
+ {
+ eError = RGXCmdHelperInitCmdCCB(psRSData->psFCClientCCB[ui32FrameContextID],
+ 0,
+ NULL,
+ NULL,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_NULL,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "FC",
+ bCCBStateOpen,
+ asFCCmdHelperData,
+ sRobustnessResetReason);
+
+ }
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asFCCmdHelperData),
+ asFCCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_acquireRScmd;
+ }
+
+ ui32FCWoff = RGXCmdHelperGetCommandSize(ARRAY_SIZE(asFCCmdHelperData),
+ asFCCmdHelperData);
+
+ *(IMG_UINT32*)pui8FCDMCmd = RGXGetHostWriteOffsetCCB(psRSData->psFCClientCCB[ui32FrameContextID]) + ui32FCWoff;
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ eError1 = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext),
+ ui32ClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ 0,
+ NULL,
+ NULL,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32FCCmdSize,
+ pui8FCDMCmd,
+ NULL,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_RTU_FC,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "RS",
+ bCCBStateOpen,
+ asRSCmdHelperData,
+ sRobustnessResetReason);
+ if (eError1 != PVRSRV_OK)
+ {
+ goto fail_acquireRScmd;
+ }
+
+ eError1 = RGXCmdHelperAcquireCmdCCB(ARRAY_SIZE(asRSCmdHelperData),
+ asRSCmdHelperData);
+ if (eError1 != PVRSRV_OK)
+ {
+ goto fail_acquireRScmd;
+ }
+
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ if (eError == PVRSRV_OK)
+ {
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ RGXCmdHelperReleaseCmdCCB(ARRAY_SIZE(asFCCmdHelperData),
+ asFCCmdHelperData, "FC", 0);
+ }
+
+ if (eError1 == PVRSRV_OK)
+ {
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ ui32RTUCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ARRAY_SIZE(asRSCmdHelperData),
+ asRSCmdHelperData, "RS",
+ FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr);
+ }
+
+ /*
+ * Construct the kernel RTU CCB command.
+ * (Safe to release reference to ray context virtual address because
+ * ray context destruction must flush the firmware).
+ */
+ sRSKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sRSKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRSData->psServerCommonContext);
+ sRSKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRSData->psServerCommonContext));
+ sRSKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psRSData->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_RTU,
+ sRSKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32RTUCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psRayContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_RS,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ /*
+ * Submit the RTU command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_RTU,
+ &sRSKCCBCmd,
+ sizeof(sRSKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickRSKM failed to schedule kernel RTU command. Error:%u", eError));
+ if (eError == PVRSRV_OK)
+ {
+ eError = eError2;
+ }
+ goto fail_acquireRScmd;
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_RS);
+#endif
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ if (piUpdateFence)
+ {
+ *piUpdateFence = iUpdateFence;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ return eError;
+
+ fail_initcmd:
+ fail_acquireRScmd:
+ SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ fail_alloc_update_values_mem:
+ if(iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ fail_resolve_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ err_populate_sync_addr_list:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ return eError;
+}
+
+/*
+ * PVRSRVRGXKickVRDMKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceSyncOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason)
+{
+ RGXFWIF_KCCB_CMD sSHKCCBCmd;
+ RGX_CCB_CMD_HELPER_DATA sCmdHelperData;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ RGX_SERVER_RAY_SH_DATA *psSHData = &psRayContext->sSHData;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SHGCmdOffset = 0;
+ IMG_UINT32 ui32IntJobRef;
+ IMG_UINT32 ui32FWCtx;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ IMG_UINT32 *paui32IntFenceValue = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iUpdateTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+ __func__, iUpdateTimeline));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (iCheckFence >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerSyncPrims;i++)
+ {
+ if (!(paui32ServerSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on SH) must fence", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szUpdateFenceName[31] = '\0';
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRayContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psRayContext->hIntJobRef);
+
+ ui32IntClientFenceCount = ui32ClientFenceCount;
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientFenceCount && !pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ paui32IntFenceValue = paui32ClientFenceValue;
+
+ ui32IntClientUpdateCount = ui32ClientUpdateCount;
+ eError = SyncAddrListPopulate(&psRayContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list;
+ }
+ if (ui32IntClientUpdateCount && !pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, iCheckFence, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psRayContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<32; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __FUNCTION__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (piUpdateFence)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psRayContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, iUpdateTimeline, (void*)psRayContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psRayContext->psDeviceNode,
+ szUpdateFenceName,
+ (PVRSRV_TIMELINE)iUpdateTimeline,
+ psRayContext->psDeviceNode->hSyncCheckpointContext,
+ (PVRSRV_FENCE*)&iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d)", __FUNCTION__, iUpdateFence));
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the ray context update list */
+ SyncAddrListAppendSyncPrim(&psRayContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount)
+ {
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to Ray VRDM Fence (&psRayContext->sSyncAddrListFence=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)&psRayContext->sSyncAddrListFence));
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psRayContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to Ray VRDM Update (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)&psRayContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ SyncAddrListAppendCheckpoints(&psRayContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psRayContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(RAY_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __FUNCTION__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __FUNCTION__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_RAY_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping Ray (VRDM) fence/updates syncs...", __FUNCTION__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (VRDM) fence syncs (&psRayContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __FUNCTION__, ui32IntClientFenceCount, (void*)&psRayContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d Ray (VRDM) update syncs (&psRayContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __FUNCTION__, ui32IntClientUpdateCount, (void*)&psRayContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRayContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext),
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncPrims,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerSyncs,
+ ui32CmdSize,
+ pui8DMCmd,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_SHG,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "SH",
+ bCCBStateOpen,
+ &sCmdHelperData,
+ sRobustnessResetReason);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+
+ eError = RGXCmdHelperAcquireCmdCCB(1, &sCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ goto PVRSRVRGXKickSHKM_Exit;
+ }
+
+
+ /*
+ We should reserve space in the kernel CCB here and fill in the command
+ directly.
+ This is so if there isn't space in the kernel CCB we can return with
+ retry back to services client before we take any operations
+ */
+
+ /*
+ We might only be kicking for flush out a padding packet so only submit
+ the command if the create was successful
+ */
+ if (eError == PVRSRV_OK)
+ {
+ /*
+ All the required resources are ready at this point, we can't fail so
+ take the required server sync operations and commit all the resources
+ */
+ ui32SHGCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1, &sCmdHelperData, "SH", FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr);
+ }
+
+ /*
+ * Construct the kernel SHG CCB command.
+ * (Safe to release reference to ray context virtual address because
+ * ray context destruction must flush the firmware).
+ */
+ sSHKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sSHKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psSHData->psServerCommonContext);
+ sSHKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psSHData->psServerCommonContext));
+ sSHKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ ui32FWCtx = FWCommonContextGetFWAddress(psSHData->psServerCommonContext).ui32Addr;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_SHG,
+ sSHKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32SHGCmdOffset
+ );
+ RGX_HWPERF_HOST_ENQ(psRayContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_VRDM,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ /*
+ * Submit the RTU command to the firmware.
+ */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRayContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_SHG,
+ &sSHKCCBCmd,
+ sizeof(sSHKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError2 != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXKickSHKM failed to schedule kernel RTU command. Error:%u", eError));
+ if (eError == PVRSRV_OK)
+ {
+ eError = eError2;
+ }
+ goto PVRSRVRGXKickSHKM_Exit;
+ }
+ else
+ {
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRayContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_VRDM);
+#endif
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ if (piUpdateFence)
+ {
+ *piUpdateFence = iUpdateFence;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ return eError;
+
+ fail_initcmd:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psRayContext->psDeviceNode, &psRayContext->sSyncAddrListUpdate);
+ fail_alloc_update_values_mem:
+ if(iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ fail_resolve_input_fence:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ PVRSRVRGXKickSHKM_Exit:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ err_populate_sync_addr_list:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRayContext->hLock);
+#endif
+
+ if (psRayContext->sSHData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRayContext->sSHData.psServerCommonContext,
+ psConnection,
+ psRayContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_SHG);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the SH part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_shcontext;
+ }
+
+ psRayContext->sSHData.ui32Priority = ui32Priority;
+ }
+
+ if (psRayContext->sRSData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRayContext->sRSData.psServerCommonContext,
+ psConnection,
+ psRayContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_RTU);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the RS part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_rscontext;
+ }
+
+ psRayContext->sRSData.ui32Priority = ui32Priority;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+ fail_rscontext:
+ fail_shcontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRayContext->hLock);
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+ dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ DumpStalledFWCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hRaytraceCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sRaytraceCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RAY_CONTEXT *psCurrentServerRayCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RAY_CONTEXT, sListNode);
+ if(NULL != psCurrentServerRayCtx->sSHData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sSHData.psServerCommonContext, RGX_KICK_TYPE_DM_RTU) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_RTU;
+ }
+ }
+
+ if(NULL != psCurrentServerRayCtx->sRSData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRayCtx->sRSData.psServerCommonContext, RGX_KICK_TYPE_DM_SHG) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_SHG;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hRaytraceCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/******************************************************************************
+ End of file (rgxray.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxray.h b/drivers/gpu/drm/img-rogue/1.10/rgxray.h
new file mode 100644
index 00000000000000..56454bb2fb3c7d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxray.h
@@ -0,0 +1,368 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX ray tracing functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX ray tracing functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXRAY_H__)
+#define __RGXRAY_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxfwutils.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RAY_CONTEXT_ RGX_SERVER_RAY_CONTEXT;
+typedef struct _RGX_SERVER_RPM_CONTEXT_ RGX_SERVER_RPM_CONTEXT;
+typedef struct _RGX_RPM_FREELIST_ RGX_RPM_FREELIST;
+
+
+struct _RGX_SERVER_RPM_CONTEXT_
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRPMContextMemDesc;
+ //DEVMEM_MEMDESC *psRTACtlMemDesc;
+ //DEVMEM_MEMDESC *psRTArrayMemDesc;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ IMG_UINT32 uiFLRefCount; /*!< increments each time a free list references this parent context */
+
+ DEVMEMINT_HEAP *psSceneHeap;
+ DEVMEMINT_HEAP *psRPMPageTableHeap;
+ DEVMEMINT_HEAP *psRPMFreeListHeap;
+
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr;
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr; /*!< Base address of the virtual heap where Doppler scene is mapped */
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr;
+
+ IMG_UINT32 ui32TotalRPMPages; /*!< Total virtual pages available */
+ IMG_UINT32 uiLog2DopplerPageSize; /*!< Doppler virtual page size, may be sub-4KB */
+ IMG_UINT32 ui32UnallocatedPages; /*!< Unmapped pages which may be mapped and added to a RPM free list */
+ IMG_UINT32 ui32RPMEntriesInPage; /*!< Number of remaining RPM page entries (dwords) in current mapped pages */
+
+ /* Sparse mappings */
+ PMR *psSceneHierarchyPMR; /*!< Scene hierarchy phys page resource */
+ PMR *psRPMPageTablePMR; /*!< RPM pages in use by scene hierarchy phys page resource */
+
+ /* Current page offset at the end of the physical allocation (PMR)
+ * for the scene memory and RPM page tables. This is where new phys pages
+ * will be mapped when the grow occurs (using sparse dev mem API). */
+ IMG_UINT32 ui32SceneMemorySparseMappingIndex;
+ IMG_UINT32 ui32RPMPageTableSparseMappingIndex;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+/*
+ * RPM host freelist (analogous to PM host freelist)
+ */
+struct _RGX_RPM_FREELIST_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ CONNECTION_DATA *psConnection;
+ RGX_SERVER_RPM_CONTEXT *psParentCtx;
+
+ /* Free list PMR. Used for grow */
+ PMR *psFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset;
+
+ IMG_DEV_VIRTADDR sBaseDevVAddr;
+
+ /* Current page offset at the end of the physical allocation (PMR)
+ * for the scene memory and RPM page tables. This is where new phys pages
+ * will be mapped when the grow occurs (using sparse dev mem API). */
+ IMG_UINT32 ui32RPMFreeListSparseMappingIndex;
+
+ IMG_UINT32 ui32ReadOffset; /*!< FPL circular buffer read offset */
+ IMG_UINT32 ui32WriteOffset; /*!< FPL circular buffer write offset */
+
+ /* Freelist config */
+ IMG_UINT32 ui32MaxFLPages;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32CurrentFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_UINT32 ui32FreelistID;
+ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */
+ IMG_BOOL bCheckFreelist; /* freelist check enabled */
+ IMG_UINT32 ui32RefCount; /* freelist reference counting */
+ IMG_UINT32 uiLog2DopplerPageSize; /*!< Doppler virtual page size, may be sub-4KB */
+ IMG_UINT32 ui32EntriesInPage; /*!< Number of remaining FPL page entries (dwords) in current mapped pages */
+
+ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application*/
+ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */
+ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */
+
+ IMG_PID ownerPid; /* Pid of the owner of the list */
+
+ /*
+ * External freelists don't use common RPM memory and are not added to global list of freelists.
+ * They're created and destroyed on demand, e.g. when loading offline hierarchies.
+ */
+ IMG_BOOL bIsExternal; /* Mark if the freelist is external */
+
+ /* Memory Blocks */
+ DLLIST_NODE sMemoryBlockHead; /* head of list of RGX_RPM_DEVMEM_DESC block descriptors */
+ DLLIST_NODE sNode; /* node used to reference list of freelists on device */
+
+ /* FW data structures */
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+} ;
+
+
+/*!
+ * RGXCreateRPMFreeList
+ *
+ * @param ui32MaxFLPages
+ * @param ui32InitFLPages
+ * @param ui32GrowFLPages
+ * @param bCheckFreelist
+ * @param sFreeListDevVAddr
+ * @param sRPMPageListDevVAddr
+ * @param psFreeListPMR
+ * @param uiFreeListPMROffset
+ * @param ppsFreeList
+ * @param bIsExternal
+ */
+PVRSRV_ERROR RGXCreateRPMFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT *psRPMContext,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ RGX_RPM_FREELIST **ppsFreeList,
+ IMG_UINT32 *puiHWFreeList,
+ IMG_BOOL bIsExternal);
+
+/*!
+ * RGXGrowRPMFreeList
+ */
+PVRSRV_ERROR RGXGrowRPMFreeList(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32RequestNumPages,
+ PDLLIST_NODE pListHeader);
+
+/*!
+ * RGXDestroyRPMFreeList
+ */
+PVRSRV_ERROR RGXDestroyRPMFreeList(RGX_RPM_FREELIST *psFreeList);
+
+/*!
+ * RGXCreateRPMContext
+ */
+PVRSRV_ERROR RGXCreateRPMContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SERVER_RPM_CONTEXT **ppsRPMContext,
+ IMG_UINT32 ui32TotalRPMPages,
+ IMG_UINT32 uiLog2DopplerPageSize,
+ IMG_DEV_VIRTADDR sSceneMemoryBaseAddr,
+ IMG_DEV_VIRTADDR sDopplerHeapBaseAddr,
+ DEVMEMINT_HEAP *psSceneHeap,
+ IMG_DEV_VIRTADDR sRPMPageTableBaseAddr,
+ DEVMEMINT_HEAP *psRPMPageTableHeap,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWFrameData);
+
+/*!
+ * RGXDestroyRPMContext
+ */
+PVRSRV_ERROR RGXDestroyRPMContext(RGX_SERVER_RPM_CONTEXT *psCleanupData);
+
+/*!
+ RGXProcessRequestRPMGrow
+*/
+void RGXProcessRequestRPMGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID);
+
+
+/*!
+ RGXAddBlockToRPMFreeListKM
+*/
+PVRSRV_ERROR RGXAddBlockToRPMFreeListKM(RGX_RPM_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateRenderContextKM
+
+ @Description
+ Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psSHGCCBMemDesc - SHG CCB Memory descriptor
+ @Input psSHGCCBCtlMemDesc - SHG CCB Ctrl Memory descriptor
+ @Input psRTUCCBMemDesc - RTU CCB Memory descriptor
+ @Input psRTUCCBCtlMemDesc - RTU CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input sVRMCallStackAddr - VRM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRayContextMemDesc - firmware ray context memory descriptor
+ @Output ppsFWRayContextStateMemDesc - firmware ray context state memory descriptor
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRayContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sVRMCallStackAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RAY_CONTEXT **ppsRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyRayContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyRayContext
+
+ @Input psRayContext - Ray context
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRayContextKM(RGX_SERVER_RAY_CONTEXT *psRayContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickRSKM
+
+ @Description
+ Server-side implementation of RGXKickRS
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32RTUcCCBWoffUpdate - New fw Woff for the client RTU CCB
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickRSKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32FCCmdSize,
+ IMG_PBYTE pui8FCDMCmd,
+ IMG_UINT32 ui32FrameContextID,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason);
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickVRDMKM
+
+ @Description
+ Server-side implementation of PVRSRVRGXKickVRDMKM
+
+ @Input pvDeviceNode - device node
+ @Input psFWRayContextMemDesc - memdesc for the firmware render context
+ @Input ui32SHGcCCBWoffUpdate - New fw Woff for the client SHG CCB
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickVRDMKM(RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientFenceOffset,
+ IMG_UINT32 *paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateOffset,
+ IMG_UINT32 *paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncPrims,
+ IMG_UINT32 *paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32CmdSize,
+ IMG_PBYTE pui8DMCmd,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_DEV_VIRTADDR sRobustnessResetReason);
+
+PVRSRV_ERROR PVRSRVRGXSetRayContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ RGX_SERVER_RAY_CONTEXT *psRayContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if ray context is waiting on a fence */
+void CheckForStalledRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client ray contexts are stalled */
+IMG_UINT32 CheckForStalledClientRayCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXRAY_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.c b/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.c
new file mode 100644
index 00000000000000..342413972042a0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.c
@@ -0,0 +1,327 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Register configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Regconfig routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxregconfig.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "device.h"
+#include "sync_internal.h"
+#include "pdump_km.h"
+#include "pvrsrv.h"
+
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT8 ui8RegCfgType)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+ RGXFWIF_REG_CFG_TYPE eRegCfgType = (RGXFWIF_REG_CFG_TYPE) ui8RegCfgType;
+
+ PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRegCfg->hLock);
+#endif
+
+ if (eRegCfgType < psRegCfg->eRegCfgTypeToPush)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXSetRegConfigTypeKM: Register configuration requested (%d) is not valid since it has to be at least %d."
+ " Configurations of different types need to go in order",
+ eRegCfgType,
+ psRegCfg->eRegCfgTypeToPush));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return PVRSRV_ERROR_REG_CONFIG_INVALID_TYPE;
+ }
+
+ psRegCfg->eRegCfgTypeToPush = eRegCfgType;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psDevConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigTypeKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRegCfg->hLock);
+#endif
+
+ if (psRegCfg->bEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Cannot add record whilst register configuration active."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+ }
+ if (psRegCfg->ui32NumRegRecords == RGXFWIF_REG_CFG_MAX_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: Register configuration full."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return PVRSRV_ERROR_REG_CONFIG_FULL;
+ }
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Addr = (IMG_UINT64) ui32RegAddr;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Value = ui64RegValue;
+ sRegCfgCmd.uCmdData.sRegConfigData.sRegConfig.ui64Mask = ui64RegMask;
+ sRegCfgCmd.uCmdData.sRegConfigData.eRegConfigType = psRegCfg->eRegCfgTypeToPush;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ADD;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXAddRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return eError;
+ }
+
+ psRegCfg->ui32NumRegRecords++;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXSetRegConfigPIKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRegCfg->hLock);
+#endif
+
+ if (psRegCfg->bEnabled)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Attempt to clear register configuration whilst active."));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return PVRSRV_ERROR_REG_CONFIG_ENABLED;
+ }
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_CLEAR;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return eError;
+ }
+
+ psRegCfg->ui32NumRegRecords = 0;
+ psRegCfg->eRegCfgTypeToPush = RGXFWIF_REG_CFG_TYPE_PWR_ON;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+
+ return eError;
+#else
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXClearRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRegCfg->hLock);
+#endif
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_ENABLE;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return eError;
+ }
+
+ psRegCfg->bEnabled = IMG_TRUE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXEnableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(SUPPORT_USER_REGISTER_CONFIGURATION)
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_KCCB_CMD sRegCfgCmd;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_REG_CONFIG *psRegCfg = &psDevInfo->sRegCongfig;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRegCfg->hLock);
+#endif
+
+ sRegCfgCmd.eCmdType = RGXFWIF_KCCB_CMD_REGCONFIG;
+ sRegCfgCmd.uCmdData.sRegConfigData.eCmdType = RGXFWIF_REGCFG_CMD_DISABLE;
+
+ eError = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sRegCfgCmd,
+ sizeof(sRegCfgCmd),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: RGXScheduleCommand failed. Error:%u", eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+ return eError;
+ }
+
+ psRegCfg->bEnabled = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRegCfg->hLock);
+#endif
+
+ return eError;
+#else
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXDisableRegConfigKM: Feature disabled. Compile with SUPPORT_USER_REGISTER_CONFIGURATION"));
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return PVRSRV_ERROR_FEATURE_DISABLED;
+#endif
+}
+
+
+/******************************************************************************
+ End of file (rgxregconfig.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.h b/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.h
new file mode 100644
index 00000000000000..5edb2b96e765cb
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxregconfig.h
@@ -0,0 +1,130 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX register configuration functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX register configuration functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXREGCONFIG_H__)
+#define __RGXREGCONFIG_H__
+
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_km.h"
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetRegConfigTypeKM
+
+ @Description
+ Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui8RegPowerIsland - Reg configuration
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSetRegConfigTypeKM(CONNECTION_DATA * psDevConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT8 ui8RegPowerIsland);
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXSetRegConfigKM
+
+ @Description
+ Server-side implementation of RGXSetRegConfig
+
+ @Input psDeviceNode - RGX Device node
+ @Input ui64RegAddr - Register address
+ @Input ui64RegValue - Reg value
+ @Input ui64RegMask - Reg mask
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXAddRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui64RegAddr,
+ IMG_UINT64 ui64RegValue,
+ IMG_UINT64 ui64RegMask);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXClearRegConfigKM
+
+ @Description
+ Server-side implementation of RGXClearRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXClearRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXEnableRegConfigKM
+
+ @Description
+ Server-side implementation of RGXEnableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXEnableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+*******************************************************************************
+ @Function PVRSRVRGXDisableRegConfigKM
+
+ @Description
+ Server-side implementation of RGXDisableRegConfig
+
+ @Input psDeviceNode - RGX Device node
+
+ @Return PVRSRV_ERROR
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDisableRegConfigKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+#endif /* __RGXREGCONFIG_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxsignals.c b/drivers/gpu/drm/img-rogue/1.10/rgxsignals.c
new file mode 100644
index 00000000000000..1db7e9132856ff
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxsignals.c
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File rgxsignals.c
+@Title RGX Signals routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Signals routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxsignals.h"
+
+#include "rgxmem.h"
+#include "rgx_fwif_km.h"
+#include "mmu_common.h"
+#include "devicemem.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sDevSignalAddress)
+{
+ DEVMEM_MEMDESC *psFWMemContextMemDesc;
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_SIGNAL_UPDATE;
+ sKCCBCmd.uCmdData.sSignalUpdateData.sDevSignalAddress = sDevSignalAddress;
+ RGXSetFirmwareAddress(&sKCCBCmd.uCmdData.sSignalUpdateData.psFWMemContext,
+ psFWMemContextMemDesc,
+ 0, RFW_FWADDR_NOREF_FLAG);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand((PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXNotifySignalUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+ return eError;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxsignals.h b/drivers/gpu/drm/img-rogue/1.10/rgxsignals.h
new file mode 100644
index 00000000000000..509d960c5622aa
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxsignals.h
@@ -0,0 +1,71 @@
+/*************************************************************************/ /*!
+@File rgxsignals.h
+@Title RGX Signals routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Signals routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(_RGX_SIGNALS_H)
+#define _RGX_SIGNALS_H
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+#include "device.h"
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXNotifySignalUpdateKM
+
+ @Description Server-side implementation of RGXNotifySignalUpdate
+
+ @Input hMemCtxPrivData - memory context private data
+ @Input sDevSignalAddress - device virtual address of the updated signal
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+
+PVRSRV_ERROR PVRSRVRGXNotifySignalUpdateKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_HANDLE hMemCtxPrivData,
+ IMG_DEV_VIRTADDR sDevSignalAddress);
+
+#endif
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxsrvinit.c b/drivers/gpu/drm/img-rogue/1.10/rgxsrvinit.c
new file mode 100644
index 00000000000000..a8309ed549b4d8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxsrvinit.c
@@ -0,0 +1,1678 @@
+/*************************************************************************/ /*!
+@File
+@Title Services initialisation routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "srvinit.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+#include "km_apphint_defs.h"
+#include "htbuffer_types.h"
+#include "htbuffer_init.h"
+
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+
+#include "rgx_fwif.h"
+#include "pdump_km.h"
+
+#include "rgx_fwif_sig.h"
+#include "rgxinit.h"
+
+#include "rgx_compat_bvnc.h"
+
+#include "osfunc.h"
+
+#include "rgxdefs_km.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+#include "rgx_fwif_hwperf.h"
+#include "rgx_hwperf_table.h"
+
+#include "rgxfwload.h"
+#include "rgxlayer_impl.h"
+#include "rgxfwimageutils.h"
+
+#include "rgx_hwperf.h"
+#include "rgx_bvnc_defs_km.h"
+
+#include "rgxdevice.h"
+
+#include "pvrsrv.h"
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#endif
+
+#define DRIVER_MODE_HOST 0 /* AppHint value for host driver mode */
+
+#define HW_PERF_FILTER_DEFAULT 0x00000000 /* Default to no HWPerf */
+#define HW_PERF_FILTER_DEFAULT_ALL_ON 0xFFFFFFFF /* All events */
+
+#define VZ_RGX_FW_FILENAME_SUFFIX ".vz"
+
+#if defined(SUPPORT_VALIDATION)
+#include "pvrsrv_apphint.h"
+#endif
+
+#if defined(LINUX)
+#include "km_apphint.h"
+#include "os_srvinit_param.h"
+#else
+#include "srvinit_param.h"
+/*!
+*******************************************************************************
+ * AppHint mnemonic data type helper tables
+******************************************************************************/
+/* apphint map of name vs. enable flag */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_loggroup_tbl[] = {
+#define X(a, b) { #b, HTB_LOG_GROUP_FLAG(a) },
+ HTB_LOG_SFGROUPLIST
+#undef X
+};
+/* apphint map of arg vs. OpMode */
+static SRV_INIT_PARAM_UINT32_LOOKUP htb_opmode_tbl[] = {
+ { "droplatest", HTB_OPMODE_DROPLATEST},
+ { "dropoldest", HTB_OPMODE_DROPOLDEST},
+ /* HTB should never be started in HTB_OPMODE_BLOCK
+ * as this can lead to deadlocks
+ */
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_logtype_tbl[] = {
+ { "trace", 2},
+ { "tbi", 1},
+ { "none", 0}
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP timecorr_clk_tbl[] = {
+ { "mono", 0 },
+ { "mono_raw", 1 },
+ { "sched", 2 }
+};
+
+static SRV_INIT_PARAM_UINT32_LOOKUP fwt_loggroup_tbl[] = { RGXFWIF_LOG_GROUP_NAME_VALUE_MAP };
+
+/*
+ * Services AppHints initialisation
+ */
+#define X(a, b, c, d, e) SrvInitParamInit ## b( a, d, e )
+APPHINT_LIST_ALL
+#undef X
+#endif /* LINUX */
+
+/*
+ * Container for all the apphints used by this module
+ */
+typedef struct _RGX_SRVINIT_APPHINTS_
+{
+ IMG_UINT32 ui32DriverMode;
+ IMG_BOOL bDustRequestInject;
+ IMG_BOOL bEnableSignatureChecks;
+ IMG_UINT32 ui32SignatureChecksBufSize;
+
+#if defined(DEBUG)
+ IMG_BOOL bAssertOnOutOfMem;
+#endif
+ IMG_BOOL bAssertOnHWRTrigger;
+ IMG_BOOL bCheckMlist;
+ IMG_BOOL bDisableClockGating;
+ IMG_BOOL bDisableDMOverlap;
+ IMG_BOOL bDisableFEDLogging;
+ IMG_BOOL bDisablePDP;
+ IMG_BOOL bEnableCDMKillRand;
+ IMG_BOOL bEnableHWR;
+ IMG_BOOL bFilteringMode;
+ IMG_BOOL bHWPerfDisableCustomCounterFilter;
+ IMG_BOOL bZeroFreelist;
+ IMG_UINT32 ui32EnableFWContextSwitch;
+ IMG_UINT32 ui32FWContextSwitchProfile;
+ IMG_UINT32 ui32VDMContextSwitchMode;
+ IMG_UINT32 ui32HWPerfFWBufSize;
+ IMG_UINT32 ui32HWPerfHostBufSize;
+ IMG_UINT32 ui32HWPerfFilter0;
+ IMG_UINT32 ui32HWPerfFilter1;
+ IMG_UINT32 ui32HWPerfHostFilter;
+ IMG_UINT32 ui32TimeCorrClock;
+ IMG_UINT32 ui32HWRDebugDumpLimit;
+ IMG_UINT32 ui32JonesDisableMask;
+ IMG_UINT32 ui32LogType;
+ IMG_UINT32 ui32TruncateMode;
+ FW_PERF_CONF eFirmwarePerf;
+ RGX_ACTIVEPM_CONF eRGXActivePMConf;
+ RGX_META_T1_CONF eUseMETAT1;
+ RGX_RD_POWER_ISLAND_CONF eRGXRDPowerIslandConf;
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS];
+#endif
+ IMG_BOOL bEnableTrustedDeviceAceConfig;
+ IMG_UINT32 ui32FWContextSwitchCrossDM;
+} RGX_SRVINIT_APPHINTS;
+
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+/*
+ * Parses the dot('.') separated OSID regions on a string and stores the integer results
+ * in an array. Numbers can be decimal or hex (starting with 0x) and there must be a . between each
+ * (example: 1.2.3.4.5.6.7.8)
+ */
+static void _ParseOSidRegionString(IMG_CHAR *apszBuffer, IMG_UINT32 *pui32ApphintArray)
+{
+ IMG_UINT32 ui32OSid;
+ IMG_CHAR *pui8StringParsingBase=apszBuffer;
+ IMG_UINT32 ui32StringLength = OSStringLength(apszBuffer);
+
+ /* Initialize all apphints to 0 */
+ for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+ {
+ pui32ApphintArray[ui32OSid] = 0;
+ }
+
+ /* Parse the string. Even if it fails, apphints will have been initialized */
+ for (ui32OSid = 0; ui32OSid < GPUVIRT_VALIDATION_NUM_OS; ui32OSid++)
+ {
+ IMG_UINT32 ui32Base=10;
+ IMG_CHAR *pui8StringParsingNextDelimiter;
+
+ /* Find the next character in the string that's not a ',' '.' or ' ' */
+ while ((*pui8StringParsingBase == '.' ||
+ *pui8StringParsingBase == ',' ||
+ *pui8StringParsingBase == ' ') &&
+ pui8StringParsingBase - apszBuffer <= ui32StringLength)
+ {
+ pui8StringParsingBase++;
+ }
+
+ if (pui8StringParsingBase - apszBuffer > ui32StringLength)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Reached the end of the apphint string while trying to parse it.\nBuffer: %s, OSid: %d", pui8StringParsingBase, ui32OSid));
+ return ;
+ }
+
+ /* If the substring begins with "0x" move the pointer 2 bytes forward and set the base to 16 */
+ if (*pui8StringParsingBase == '0' && *(pui8StringParsingBase+1) =='x')
+ {
+ ui32Base=16;
+ pui8StringParsingBase+=2;
+ }
+
+ /* Find the next delimiter in the string or the end of the string itself if we're parsing the final number */
+ pui8StringParsingNextDelimiter = pui8StringParsingBase;
+
+ while(*pui8StringParsingNextDelimiter!='.' &&
+ *pui8StringParsingNextDelimiter!=',' &&
+ *pui8StringParsingNextDelimiter!=' ' &&
+ *pui8StringParsingNextDelimiter!='\0' &&
+ (pui8StringParsingNextDelimiter - apszBuffer <= ui32StringLength))
+ {
+ pui8StringParsingNextDelimiter++;
+ }
+
+ /*
+ * Each number is followed by a '.' except for the last one. If a string termination is found
+ * when not expected the functions returns
+ */
+
+ if (*pui8StringParsingNextDelimiter=='\0' && ui32OSid < GPUVIRT_VALIDATION_NUM_OS - 1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "There was an error parsing the OSid Region Apphint Strings"));
+ return ;
+ }
+
+ /*replace the . with a string termination so that it can be properly parsed to an integer */
+ *pui8StringParsingNextDelimiter = '\0';
+
+ /* Parse the number. The fact that it is followed by '\0' means that the string parsing utility
+ * will finish there and not try to parse the rest */
+
+ OSStringToUINT32(pui8StringParsingBase, ui32Base, &pui32ApphintArray[ui32OSid]);
+
+ pui8StringParsingBase = pui8StringParsingNextDelimiter + 1;
+ }
+}
+
+#endif
+/*!
+*******************************************************************************
+
+ @Function GetApphints
+
+ @Description Read init time apphints and initialise internal variables
+
+ @Input psHints : Pointer to apphints container
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetApphints(PVRSRV_RGXDEV_INFO *psDevInfo, RGX_SRVINIT_APPHINTS *psHints)
+{
+ void *pvParamState = SrvInitParamOpen();
+ IMG_UINT32 ui32ParamTemp;
+ IMG_BOOL bS7TopInfra = IMG_FALSE, bE42290 = IMG_FALSE, bTPUFiltermodeCtrl = IMG_FALSE, \
+ bE42606 = IMG_FALSE, bAXIACELite = IMG_FALSE;
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ bS7TopInfra = IMG_TRUE;
+ }
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TPU_FILTERING_MODE_CONTROL))
+ {
+ bTPUFiltermodeCtrl = IMG_TRUE;
+ }
+
+ if(RGX_IS_ERN_SUPPORTED(psDevInfo, 42290))
+ {
+ bE42290 = IMG_TRUE;
+ }
+
+ if(RGX_IS_ERN_SUPPORTED(psDevInfo, 42606))
+ {
+ bE42606 = IMG_TRUE;
+ }
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, AXI_ACELITE))
+ {
+ bAXIACELite = IMG_TRUE;
+ }
+
+ /*
+ * NB AppHints initialised to a default value via SrvInitParamInit* macros above
+ */
+ SrvInitParamGetUINT32(pvParamState, DriverMode, psHints->ui32DriverMode);
+ SrvInitParamGetBOOL(pvParamState, DustRequestInject, psHints->bDustRequestInject);
+ SrvInitParamGetBOOL(pvParamState, EnableSignatureChecks, psHints->bEnableSignatureChecks);
+ SrvInitParamGetUINT32(pvParamState, SignatureChecksBufSize, psHints->ui32SignatureChecksBufSize);
+
+#if defined(DEBUG)
+ SrvInitParamGetBOOL(pvParamState, AssertOutOfMemory, psHints->bAssertOnOutOfMem);
+#endif
+ SrvInitParamGetBOOL(pvParamState, AssertOnHWRTrigger, psHints->bAssertOnHWRTrigger);
+ SrvInitParamGetBOOL(pvParamState, CheckMList, psHints->bCheckMlist);
+ SrvInitParamGetBOOL(pvParamState, DisableClockGating, psHints->bDisableClockGating);
+ SrvInitParamGetBOOL(pvParamState, DisableDMOverlap, psHints->bDisableDMOverlap);
+ SrvInitParamGetBOOL(pvParamState, DisableFEDLogging, psHints->bDisableFEDLogging);
+ SrvInitParamGetUINT32(pvParamState, EnableAPM, ui32ParamTemp);
+ psHints->eRGXActivePMConf = ui32ParamTemp;
+ SrvInitParamGetBOOL(pvParamState, EnableCDMKillingRandMode, psHints->bEnableCDMKillRand);
+ SrvInitParamGetUINT32(pvParamState, EnableFWContextSwitch, psHints->ui32EnableFWContextSwitch);
+ SrvInitParamGetUINT32(pvParamState, VDMContextSwitchMode, psHints->ui32VDMContextSwitchMode);
+ SrvInitParamGetBOOL(pvParamState, EnableHWR, psHints->bEnableHWR);
+ SrvInitParamGetUINT32(pvParamState, EnableRDPowerIsland, ui32ParamTemp);
+ psHints->eRGXRDPowerIslandConf = ui32ParamTemp;
+ SrvInitParamGetUINT32(pvParamState, FirmwarePerf, ui32ParamTemp);
+ psHints->eFirmwarePerf = ui32ParamTemp;
+ SrvInitParamGetUINT32(pvParamState, FWContextSwitchProfile, psHints->ui32FWContextSwitchProfile);
+ SrvInitParamGetBOOL(pvParamState, HWPerfDisableCustomCounterFilter, psHints->bHWPerfDisableCustomCounterFilter);
+ SrvInitParamGetUINT32(pvParamState, HWPerfHostBufSizeInKB, psHints->ui32HWPerfHostBufSize);
+ SrvInitParamGetUINT32(pvParamState, HWPerfFWBufSizeInKB, psHints->ui32HWPerfFWBufSize);
+#if defined(LINUX)
+ /* name changes */
+ {
+ IMG_UINT64 ui64Tmp;
+ SrvInitParamGetBOOL(pvParamState, DisablePDumpPanic, psHints->bDisablePDP);
+ SrvInitParamGetUINT64(pvParamState, HWPerfFWFilter, ui64Tmp);
+ psHints->ui32HWPerfFilter0 = (IMG_UINT32)(ui64Tmp & 0xffffffffllu);
+ psHints->ui32HWPerfFilter1 = (IMG_UINT32)((ui64Tmp >> 32) & 0xffffffffllu);
+ }
+#else
+ SrvInitParamUnreferenced(DisablePDumpPanic);
+ SrvInitParamUnreferenced(HWPerfFWFilter);
+ SrvInitParamUnreferenced(RGXBVNC);
+#endif
+ SrvInitParamGetUINT32(pvParamState, HWPerfHostFilter, psHints->ui32HWPerfHostFilter);
+ SrvInitParamGetUINT32List(pvParamState, TimeCorrClock, psHints->ui32TimeCorrClock);
+ SrvInitParamGetUINT32(pvParamState, HWRDebugDumpLimit, ui32ParamTemp);
+ psHints->ui32HWRDebugDumpLimit = MIN(ui32ParamTemp, RGXFWIF_HWR_DEBUG_DUMP_ALL);
+
+ if(bS7TopInfra)
+ {
+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK (0XFFFFFFCFU)
+ #define RGX_CR_JONES_FIX_MT_ORDER_ISP_EN (0X00000020U)
+ #define RGX_CR_JONES_FIX_MT_ORDER_TE_EN (0X00000010U)
+
+ SrvInitParamGetUINT32(pvParamState, JonesDisableMask, ui32ParamTemp);
+ if (((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_ISP_EN) ||
+ ((ui32ParamTemp & ~RGX_CR_JONES_FIX_MT_ORDER_ISP_TE_CLRMSK) == RGX_CR_JONES_FIX_MT_ORDER_TE_EN))
+ {
+ ui32ParamTemp |= (RGX_CR_JONES_FIX_MT_ORDER_TE_EN |
+ RGX_CR_JONES_FIX_MT_ORDER_ISP_EN);
+ PVR_DPF((PVR_DBG_WARNING, "Tile reordering mode requires both TE and ISP enabled. Forcing JonesDisableMask = %d",
+ ui32ParamTemp));
+ }
+ psHints->ui32JonesDisableMask = ui32ParamTemp;
+ }
+
+ if ( (bE42290) && (bTPUFiltermodeCtrl))
+ {
+ SrvInitParamGetBOOL(pvParamState, NewFilteringMode, psHints->bFilteringMode);
+ }
+
+ if(bE42606)
+ {
+ SrvInitParamGetUINT32(pvParamState, TruncateMode, psHints->ui32TruncateMode);
+ }
+#if defined(EMULATOR)
+ if(bAXIACELite)
+ {
+ SrvInitParamGetBOOL(pvParamState, EnableTrustedDeviceAceConfig, psHints->bEnableTrustedDeviceAceConfig);
+ }
+#endif
+
+ SrvInitParamGetUINT32(pvParamState, UseMETAT1, ui32ParamTemp);
+ psHints->eUseMETAT1 = ui32ParamTemp & RGXFWIF_INICFG_METAT1_MASK;
+
+ SrvInitParamGetBOOL(pvParamState, ZeroFreelist, psHints->bZeroFreelist);
+
+#if defined(LINUX)
+ SrvInitParamGetUINT32(pvParamState, FWContextSwitchCrossDM, psHints->ui32FWContextSwitchCrossDM);
+#else
+ SrvInitParamUnreferenced(FWContextSwitchCrossDM);
+#endif
+
+ /*
+ * FW logs apphints
+ */
+ {
+ IMG_UINT32 ui32LogType;
+ IMG_BOOL bAnyLogGroupConfigured;
+
+ SrvInitParamGetUINT32BitField(pvParamState, EnableLogGroup, ui32LogType);
+ bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+ SrvInitParamGetUINT32List(pvParamState, FirmwareLogType, ui32ParamTemp);
+
+ /* Defaulting to TRACE */
+ ui32LogType |= RGXFWIF_LOG_TYPE_TRACE;
+
+ if (ui32ParamTemp == 2 /* TRACE */)
+ {
+ if (!bAnyLogGroupConfigured)
+ {
+ /* No groups configured - defaulting to MAIN group */
+ ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ }
+ else if (ui32ParamTemp == 1 /* TBI */)
+ {
+ if (!bAnyLogGroupConfigured)
+ {
+ /* No groups configured - defaulting to MAIN group */
+ ui32LogType |= RGXFWIF_LOG_TYPE_GROUP_MAIN;
+ }
+ ui32LogType &= ~RGXFWIF_LOG_TYPE_TRACE;
+ }
+ else if (ui32ParamTemp == 0 /* NONE */)
+ {
+ /* "NONE" means "TRACE without any log groups enabled */
+ ui32LogType = RGXFWIF_LOG_TYPE_TRACE;
+ }
+
+ psHints->ui32LogType = ui32LogType;
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+ /*
+ * GPU virtualisation validation apphints
+ */
+ {
+ IMG_CHAR pszOSidRegionBuffer[GPUVIRT_VALIDATION_MAX_STRING_LENGTH];
+
+ SrvInitParamGetSTRING(pvParamState, OSidRegion0Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+ _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[0]);
+
+ SrvInitParamGetSTRING(pvParamState, OSidRegion0Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+ _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[0]);
+
+ SrvInitParamGetSTRING(pvParamState, OSidRegion1Min, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+ _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMin[1]);
+
+ SrvInitParamGetSTRING(pvParamState, OSidRegion1Max, pszOSidRegionBuffer, GPUVIRT_VALIDATION_MAX_STRING_LENGTH);
+ _ParseOSidRegionString(pszOSidRegionBuffer, psHints->aui32OSidMax[1]);
+ }
+#else
+#if !defined(LINUX)
+ SrvInitParamUnreferenced(OSidRegion0Min);
+ SrvInitParamUnreferenced(OSidRegion0Max);
+ SrvInitParamUnreferenced(OSidRegion1Min);
+ SrvInitParamUnreferenced(OSidRegion1Max);
+#endif /* !defined(LINUX) */
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+
+ SrvInitParamClose(pvParamState);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetFWConfigFlags
+
+ @Description Initialise and return FW config flags
+
+ @Input psHints : Apphints container
+ @Input pui32FWConfigFlags : Pointer to config flags
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetFWConfigFlags(RGX_SRVINIT_APPHINTS *psHints,
+ IMG_UINT32 *pui32FWConfigFlags,
+ IMG_UINT32 *pui32FWConfigFlagsExt)
+{
+ IMG_UINT32 ui32FWConfigFlags = 0;
+
+#if defined(DEBUG)
+ ui32FWConfigFlags |= psHints->bAssertOnOutOfMem ? RGXFWIF_INICFG_ASSERT_ON_OUTOFMEMORY : 0;
+#endif
+ ui32FWConfigFlags |= psHints->bAssertOnHWRTrigger ? RGXFWIF_INICFG_ASSERT_ON_HWR_TRIGGER : 0;
+ ui32FWConfigFlags |= psHints->bCheckMlist ? RGXFWIF_INICFG_CHECK_MLIST_EN : 0;
+ ui32FWConfigFlags |= psHints->bDisableClockGating ? RGXFWIF_INICFG_DISABLE_CLKGATING_EN : 0;
+ ui32FWConfigFlags |= psHints->bDisableDMOverlap ? RGXFWIF_INICFG_DISABLE_DM_OVERLAP : 0;
+ ui32FWConfigFlags |= psHints->bDisablePDP ? RGXFWIF_SRVCFG_DISABLE_PDP_EN : 0;
+ ui32FWConfigFlags |= psHints->bEnableCDMKillRand ? RGXFWIF_INICFG_CDM_KILL_MODE_RAND_EN : 0;
+ ui32FWConfigFlags |= (psHints->ui32HWPerfFilter0 != 0 || psHints->ui32HWPerfFilter1 != 0) ? RGXFWIF_INICFG_HWPERF_EN : 0;
+#if !defined(NO_HARDWARE)
+ ui32FWConfigFlags |= psHints->bEnableHWR ? RGXFWIF_INICFG_HWR_EN : 0;
+#endif
+ ui32FWConfigFlags |= psHints->bHWPerfDisableCustomCounterFilter ? RGXFWIF_INICFG_HWP_DISABLE_FILTER : 0;
+ ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_CUSTOM_TIMER) ? RGXFWIF_INICFG_CUSTOM_PERF_TIMER_EN : 0;
+ ui32FWConfigFlags |= (psHints->eFirmwarePerf == FW_PERF_CONF_POLLS) ? RGXFWIF_INICFG_POLL_COUNTERS_EN : 0;
+ ui32FWConfigFlags |= psHints->eUseMETAT1 << RGXFWIF_INICFG_METAT1_SHIFT;
+ ui32FWConfigFlags |= psHints->ui32EnableFWContextSwitch & ~RGXFWIF_INICFG_CTXSWITCH_CLRMSK;
+ ui32FWConfigFlags |= (psHints->ui32VDMContextSwitchMode << RGXFWIF_INICFG_VDM_CTX_STORE_MODE_SHIFT) & ~RGXFWIF_INICFG_VDM_CTX_STORE_MODE_CLRMSK;
+
+ ui32FWConfigFlags |= (psHints->ui32FWContextSwitchProfile << RGXFWIF_INICFG_CTXSWITCH_PROFILE_SHIFT) & RGXFWIF_INICFG_CTXSWITCH_PROFILE_MASK;
+
+ *pui32FWConfigFlags = ui32FWConfigFlags;
+ *pui32FWConfigFlagsExt = psHints->ui32FWContextSwitchCrossDM;
+ *pui32FWConfigFlagsExt |= RGXFWIF_INICFG_EXT_HWPERF_FEATURE_FLAGS;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetFilterFlags
+
+ @Description Initialise and return filter flags
+
+ @Input psHints : Apphints container
+
+ @Return Filter flags
+
+******************************************************************************/
+static INLINE IMG_UINT32 GetFilterFlags(RGX_SRVINIT_APPHINTS *psHints)
+{
+ IMG_UINT32 ui32FilterFlags = 0;
+
+ ui32FilterFlags |= psHints->bFilteringMode ? RGXFWIF_FILTCFG_NEW_FILTER_MODE : 0;
+ if (psHints->ui32TruncateMode == 2)
+ {
+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_INT;
+ }
+ else if (psHints->ui32TruncateMode == 3)
+ {
+ ui32FilterFlags |= RGXFWIF_FILTCFG_TRUNCATE_HALF;
+ }
+
+ return ui32FilterFlags;
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function GetDeviceFlags
+
+ @Description Initialise and return device flags
+
+ @Input psHints : Apphints container
+ @Input pui32DeviceFlags : Pointer to device flags
+
+ @Return void
+
+******************************************************************************/
+static INLINE void GetDeviceFlags(RGX_SRVINIT_APPHINTS *psHints,
+ IMG_UINT32 *pui32DeviceFlags)
+{
+ IMG_UINT32 ui32DeviceFlags = 0;
+
+ ui32DeviceFlags |= psHints->bDustRequestInject? RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN : 0;
+
+ ui32DeviceFlags |= psHints->bZeroFreelist ? RGXKMIF_DEVICE_STATE_ZERO_FREELIST : 0;
+ ui32DeviceFlags |= psHints->bDisableFEDLogging ? RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN : 0;
+ ui32DeviceFlags |= (psHints->ui32HWPerfHostFilter != 0) ? RGXKMIF_DEVICE_STATE_HWPERF_HOST_EN : 0;
+
+ *pui32DeviceFlags = ui32DeviceFlags;
+}
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+/*************************************************************************/ /*!
+ @Function RGXTDProcessFWImage
+
+ @Description Fetch and send data used by the trusted device to complete
+ the FW image setup
+
+ @Input psDeviceNode - Device node
+ @Input psRGXFW - Firmware blob
+
+ @Return PVRSRV_ERROR
+*/ /**************************************************************************/
+static PVRSRV_ERROR RGXTDProcessFWImage(PVRSRV_DEVICE_NODE *psDeviceNode,
+ struct RGXFW *psRGXFW)
+{
+ PVRSRV_DEVICE_CONFIG *psDevConfig = psDeviceNode->psDevConfig;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_TD_FW_PARAMS sTDFWParams;
+ PVRSRV_ERROR eError;
+
+ if (psDevConfig->pfnTDSendFWImage == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXTDProcessFWImage: TDProcessFWImage not implemented!"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+ }
+
+ sTDFWParams.pvFirmware = RGXFirmwareData(psRGXFW);
+ sTDFWParams.ui32FirmwareSize = RGXFirmwareSize(psRGXFW);
+ sTDFWParams.sFWCodeDevVAddrBase = psDevInfo->sFWCodeDevVAddrBase;
+ sTDFWParams.sFWDataDevVAddrBase = psDevInfo->sFWDataDevVAddrBase;
+ sTDFWParams.sFWCorememCodeFWAddr = psDevInfo->sFWCorememCodeFWAddr;
+ sTDFWParams.sFWInitFWAddr = psDevInfo->sFWInitFWAddr;
+
+ eError = psDevConfig->pfnTDSendFWImage(psDevConfig->hSysData, &sTDFWParams);
+
+ return eError;
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function AcquireHostData
+
+ @Description Acquire Device MemDesc and CPU pointer for a given PMR
+
+ @Input psDeviceNode : Device Node
+ @Input hPMR : PMR
+ @Output ppsHostMemDesc : Returned MemDesc
+ @Output ppvHostAddr : Returned CPU pointer
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE
+PVRSRV_ERROR AcquireHostData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PMR* pPMR,
+ DEVMEM_MEMDESC **ppsHostMemDesc,
+ void **ppvHostAddr)
+{
+ IMG_HANDLE hImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemMakeLocalImportHandle(psDeviceNode,
+ pPMR,
+ &hImportHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemMakeLocalImportHandle failed (%d)", eError));
+ goto acquire_failmakehandle;
+ }
+
+ eError = DevmemLocalImport(psDeviceNode,
+ hImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE,
+ ppsHostMemDesc,
+ &uiImportSize,
+ "AcquireHostData");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemLocalImport failed (%d)", eError));
+ goto acquire_failimport;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsHostMemDesc,
+ ppvHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "AcquireHostData: DevmemAcquireCpuVirtAddr failed (%d)", eError));
+ goto acquire_failcpuaddr;
+ }
+
+ /* We don't need the import handle anymore */
+ DevmemUnmakeLocalImportHandle(psDeviceNode, hImportHandle);
+
+ return PVRSRV_OK;
+
+
+acquire_failcpuaddr:
+ DevmemFree(*ppsHostMemDesc);
+
+acquire_failimport:
+ DevmemUnmakeLocalImportHandle(psDeviceNode, hImportHandle);
+
+acquire_failmakehandle:
+ return eError;
+}
+
+/*!
+*******************************************************************************
+
+ @Function ReleaseHostData
+
+ @Description Releases resources associated with a Device MemDesc
+
+ @Input psHostMemDesc : MemDesc to free
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static INLINE void ReleaseHostData(DEVMEM_MEMDESC *psHostMemDesc)
+{
+ DevmemReleaseCpuVirtAddr(psHostMemDesc);
+ DevmemFree(psHostMemDesc);
+}
+
+/*!
+*******************************************************************************
+
+ @Function GetFirmwareBVNC
+
+ @Description Retrieves FW BVNC information from binary data
+
+ @Input psRGXFW : Firmware binary handle to get BVNC from
+
+ @Output psRGXFWBVNC : structure store BVNC info
+
+ @Return IMG_TRUE upon success, IMG_FALSE otherwise
+
+******************************************************************************/
+static INLINE IMG_BOOL GetFirmwareBVNC(struct RGXFW *psRGXFW,
+ RGXFWIF_COMPCHECKS_BVNC *psFWBVNC)
+{
+#if defined(LINUX)
+ const size_t FWSize = RGXFirmwareSize(psRGXFW);
+ const RGXFWIF_COMPCHECKS_BVNC * psBinBVNC;
+#endif
+
+#if !defined(LINUX)
+ /* Check not available in non linux OSes. Just fill the struct and return true */
+ psFWBVNC->ui32LayoutVersion = RGXFWIF_COMPCHECKS_LAYOUT_VERSION;
+ psFWBVNC->ui32VLenMax = RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX;
+
+ rgx_bvnc_packed(&psFWBVNC->ui64BNC, psFWBVNC->aszV, psFWBVNC->ui32VLenMax,
+ RGX_BNC_KM_B, RGX_BVNC_KM_V_ST, RGX_BNC_KM_N, RGX_BNC_KM_C);
+
+#else
+
+ if (FWSize < FW_BVNC_BACKWARDS_OFFSET)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Firmware is too small (%zu bytes)",
+ __func__, FWSize));
+ return IMG_FALSE;
+ }
+
+ psBinBVNC = (RGXFWIF_COMPCHECKS_BVNC *) ((IMG_UINT8 *) (RGXFirmwareData(psRGXFW)) +
+ (FWSize - FW_BVNC_BACKWARDS_OFFSET));
+
+ psFWBVNC->ui32LayoutVersion = RGX_INT32_FROM_BE(psBinBVNC->ui32LayoutVersion);
+
+ psFWBVNC->ui32VLenMax = RGX_INT32_FROM_BE(psBinBVNC->ui32VLenMax);
+
+ psFWBVNC->ui64BNC = RGX_INT64_FROM_BE(psBinBVNC->ui64BNC);
+
+ strncpy(psFWBVNC->aszV, psBinBVNC->aszV, sizeof(psFWBVNC->aszV));
+#endif /* defined(LINUX) */
+
+ return IMG_TRUE;
+}
+
+/*!
+*******************************************************************************
+
+ @Function InitFirmware
+
+ @Description Allocate, initialise and pdump Firmware code and data memory
+
+ @Input psDeviceNode : Device Node
+ @Input psHints : Apphints
+ @Input psBVNC : Compatibility checks
+ @Output phFWCodePMR : FW code PMR handle
+ @Output phFWDataPMR : FW data PMR handle
+ @Output phFWCorememPMR : FW coremem code PMR handle
+ @Output phHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitFirmware(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_SRVINIT_APPHINTS *psHints,
+ RGXFWIF_COMPCHECKS_BVNC *psBVNC,
+ PMR **phFWCodePMR,
+ PMR **phFWDataPMR,
+ PMR **phFWCorememPMR,
+ PMR **phHWPerfDataPMR)
+{
+ struct RGXFW *psRGXFW = NULL;
+ const IMG_BYTE *pbRGXFirmware = NULL;
+ RGXFWIF_COMPCHECKS_BVNC sFWBVNC;
+
+ /* FW code memory */
+ IMG_DEVMEM_SIZE_T uiFWCodeAllocSize;
+ IMG_DEV_VIRTADDR sFWCodeDevVAddrBase;
+ DEVMEM_MEMDESC *psFWCodeHostMemDesc;
+ void *pvFWCodeHostAddr;
+
+ /* FW data memory */
+ IMG_DEVMEM_SIZE_T uiFWDataAllocSize;
+ IMG_DEV_VIRTADDR sFWDataDevVAddrBase;
+ DEVMEM_MEMDESC *psFWDataHostMemDesc;
+ void *pvFWDataHostAddr;
+
+ /* FW coremem code memory */
+ IMG_DEVMEM_SIZE_T uiFWCorememCodeAllocSize;
+ IMG_DEV_VIRTADDR sFWCorememDevVAddrBase;
+
+ /*
+ * Only declare psFWCorememHostMemDesc where used (PVR_UNREFERENCED_PARAMETER doesn't
+ * help for local vars when using certain compilers)
+ */
+ DEVMEM_MEMDESC *psFWCorememHostMemDesc;
+ void *pvFWCorememHostAddr = NULL;
+
+ RGXFWIF_DEV_VIRTADDR sFWCorememFWAddr; /* FW coremem data */
+ RGXFWIF_DEV_VIRTADDR sRGXFwInit; /* FW init struct */
+ RGX_LAYER_PARAMS sLayerParams;
+ IMG_UINT32 ui32FWConfigFlags, ui32FWConfigFlagsExt;
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ const IMG_CHAR * const pszFWFilenameSuffix =
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_NATIVE) ? "" : VZ_RGX_FW_FILENAME_SUFFIX;
+ IMG_CHAR aszFWFilenameStr[sizeof(RGX_FW_FILENAME) +
+ MAX_BVNC_STRING_LEN +
+ sizeof(VZ_RGX_FW_FILENAME_SUFFIX)];
+ IMG_CHAR aszFWpFilenameStr[ARRAY_SIZE(aszFWFilenameStr)];
+
+ OSSNPrintf(aszFWFilenameStr, ARRAY_SIZE(aszFWFilenameStr),
+ "%s.%d.%d.%d.%d%s",
+ RGX_FW_FILENAME,
+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+ pszFWFilenameSuffix);
+
+ OSSNPrintf(aszFWpFilenameStr, ARRAY_SIZE(aszFWpFilenameStr),
+ "%s.%d.%dp.%d.%d%s",
+ RGX_FW_FILENAME,
+ psDevInfo->sDevFeatureCfg.ui32B, psDevInfo->sDevFeatureCfg.ui32V,
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C,
+ pszFWFilenameSuffix);
+
+ /*
+ * Get pointer to Firmware image
+ */
+ psRGXFW = RGXLoadFirmware(psDeviceNode, aszFWFilenameStr, aszFWpFilenameStr);
+ if (psRGXFW == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ goto cleanup_initfw;
+ }
+ pbRGXFirmware = RGXFirmwareData(psRGXFW);
+
+ if (!GetFirmwareBVNC(psRGXFW, &sFWBVNC))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXLoadFirmware failed to get Firmware BVNC"));
+ eError = PVRSRV_ERROR_INIT_FAILURE;
+ goto cleanup_initfw;
+ }
+
+ }
+ sLayerParams.psDevInfo = psDevInfo;
+
+ /*
+ * Allocate Firmware memory
+ */
+
+ eError = RGXGetFWImageAllocSize(&sLayerParams,
+ pbRGXFirmware,
+ &uiFWCodeAllocSize,
+ &uiFWDataAllocSize,
+ &uiFWCorememCodeAllocSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXGetFWImageAllocSize failed"));
+ goto cleanup_initfw;
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Disable META core memory allocation unless the META DMA is available */
+ if (!RGX_DEVICE_HAS_FEATURE(&sLayerParams, META_DMA))
+ {
+ uiFWCorememCodeAllocSize = 0;
+ }
+#endif
+ eError = PVRSRVRGXInitAllocFWImgMemKM(psDeviceNode,
+ uiFWCodeAllocSize,
+ uiFWDataAllocSize,
+ uiFWCorememCodeAllocSize,
+ phFWCodePMR,
+ &sFWCodeDevVAddrBase,
+ phFWDataPMR,
+ &sFWDataDevVAddrBase,
+ phFWCorememPMR,
+ &sFWCorememDevVAddrBase,
+ &sFWCorememFWAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitAllocFWImgMem failed (%d)", eError));
+ goto cleanup_initfw;
+ }
+
+
+ /*
+ * Setup Firmware initialisation data
+ */
+
+ GetFWConfigFlags(psHints, &ui32FWConfigFlags, &ui32FWConfigFlagsExt);
+
+ eError = PVRSRVRGXInitFirmwareKM(psDeviceNode,
+ &sRGXFwInit,
+ psHints->bEnableSignatureChecks,
+ psHints->ui32SignatureChecksBufSize,
+ psHints->ui32HWPerfFWBufSize,
+ (IMG_UINT64)psHints->ui32HWPerfFilter0 |
+ ((IMG_UINT64)psHints->ui32HWPerfFilter1 << 32),
+ 0,
+ NULL,
+ ui32FWConfigFlags,
+ psHints->ui32LogType,
+ GetFilterFlags(psHints),
+ psHints->ui32JonesDisableMask,
+ psHints->ui32HWRDebugDumpLimit,
+ psBVNC,
+ &sFWBVNC,
+ sizeof(RGXFWIF_HWPERF_CTL),
+ phHWPerfDataPMR,
+ psHints->eRGXRDPowerIslandConf,
+ psHints->eFirmwarePerf,
+ ui32FWConfigFlagsExt);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: PVRSRVRGXInitFirmware failed (%d)", eError));
+ goto cleanup_initfw;
+ }
+
+ /*
+ * Acquire pointers to Firmware allocations
+ */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ eError = AcquireHostData(psDeviceNode,
+ *phFWCodePMR,
+ &psFWCodeHostMemDesc,
+ &pvFWCodeHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW code failed (%d)", eError));
+ goto release_code;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psFWCodeHostMemDesc);
+
+ /* We can't get a pointer to a secure FW allocation from within the DDK */
+ pvFWCodeHostAddr = NULL;
+#endif
+
+ eError = AcquireHostData(psDeviceNode,
+ *phFWDataPMR,
+ &psFWDataHostMemDesc,
+ &pvFWDataHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW data failed (%d)", eError));
+ goto release_data;
+ }
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ if (uiFWCorememCodeAllocSize)
+ {
+ eError = AcquireHostData(psDeviceNode,
+ *phFWCorememPMR,
+ &psFWCorememHostMemDesc,
+ &pvFWCorememHostAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: AcquireHostData for FW coremem code failed (%d)", eError));
+ goto release_corememcode;
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psFWCorememHostMemDesc);
+
+ /* We can't get a pointer to a secure FW allocation from within the DDK */
+ pvFWCorememHostAddr = NULL;
+#endif
+
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ /*
+ * Process the Firmware image and setup code and data segments.
+ *
+ * When the trusted device is enabled and the FW code lives
+ * in secure memory we will only setup the data segments here,
+ * while the code segments will be loaded to secure memory
+ * by the trusted device.
+ */
+ eError = RGXProcessFWImage(&sLayerParams,
+ pbRGXFirmware,
+ pvFWCodeHostAddr,
+ pvFWDataHostAddr,
+ pvFWCorememHostAddr,
+ &sFWCodeDevVAddrBase,
+ &sFWDataDevVAddrBase,
+ &sFWCorememDevVAddrBase,
+ &sFWCorememFWAddr,
+ &sRGXFwInit,
+#if defined(RGXFW_META_SUPPORT_2ND_THREAD)
+ 2,
+#else
+ psHints->eUseMETAT1 == RGX_META_T1_OFF ? 1 : 2,
+#endif
+ psHints->eUseMETAT1 == RGX_META_T1_MAIN ? 1 : 0,
+ uiFWCorememCodeAllocSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXProcessFWImage failed (%d)", eError));
+ goto release_fw_allocations;
+ }
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE) && !defined(NO_HARDWARE)
+ RGXTDProcessFWImage(psDeviceNode, psRGXFW);
+#endif
+
+ /*
+ * Perform final steps (if any) on the kernel
+ * before pdumping the Firmware allocations
+ */
+ eError = PVRSRVRGXInitFinaliseFWImageKM(psDeviceNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "InitFirmware: RGXInitFinaliseFWImage failed (%d)", eError));
+ goto release_fw_allocations;
+ }
+
+ /*
+ * PDump Firmware allocations
+ */
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware code image");
+ DevmemPDumpLoadMem(psFWCodeHostMemDesc,
+ 0,
+ uiFWCodeAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+#endif
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware data image");
+ DevmemPDumpLoadMem(psFWDataHostMemDesc,
+ 0,
+ uiFWDataAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+ if (uiFWCorememCodeAllocSize)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Dump firmware coremem image");
+ DevmemPDumpLoadMem(psFWCorememHostMemDesc,
+ 0,
+ uiFWCorememCodeAllocSize,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+#endif
+
+
+ /*
+ * Release Firmware allocations and clean up
+ */
+
+release_fw_allocations:
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_corememcode:
+ if (uiFWCorememCodeAllocSize)
+ {
+ ReleaseHostData(psFWCorememHostMemDesc);
+ }
+#endif
+
+release_data:
+ ReleaseHostData(psFWDataHostMemDesc);
+
+#if !defined(SUPPORT_TRUSTED_DEVICE) || defined(NO_HARDWARE)
+release_code:
+ ReleaseHostData(psFWCodeHostMemDesc);
+#endif
+cleanup_initfw:
+ if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) && psRGXFW != NULL)
+ {
+ RGXUnloadFirmware(psRGXFW);
+ }
+
+ return eError;
+}
+
+
+#if defined(PDUMP)
+/*!
+*******************************************************************************
+
+ @Function InitialiseHWPerfCounters
+
+ @Description
+
+ Initialisation of hardware performance counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input psDeviceNode
+
+ @Input psHWPerfDataMemDesc
+
+ @Input psHWPerfInitDataInt
+
+ @Return void
+
+******************************************************************************/
+
+static void InitialiseHWPerfCounters(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psHWPerfDataMemDesc, RGXFWIF_HWPERF_CTL *psHWPerfInitDataInt)
+{
+ RGXFWIF_HWPERF_CTL_BLK *psHWPerfInitBlkData;
+ IMG_UINT32 ui32CntBlkModelLen;
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL *asCntBlkTypeModel;
+ const RGXFW_HWPERF_CNTBLK_TYPE_MODEL* psBlkTypeDesc;
+ IMG_UINT32 ui32BlockID, ui32BlkCfgIdx, ui32CounterIdx ;
+ RGX_HWPERF_CNTBLK_RT_INFO sCntBlkRtInfo;
+ void *pvDev = psDeviceNode->pvDevice;
+
+ ui32CntBlkModelLen = RGXGetHWPerfBlockConfig(&asCntBlkTypeModel);
+ for(ui32BlkCfgIdx = 0; ui32BlkCfgIdx < ui32CntBlkModelLen; ui32BlkCfgIdx++)
+ {
+ /* Exit early if this core does not have any of these counter blocks
+ * due to core type/BVNC features.... */
+ psBlkTypeDesc = &asCntBlkTypeModel[ui32BlkCfgIdx];
+ if (psBlkTypeDesc->pfnIsBlkPresent(psBlkTypeDesc, pvDev, &sCntBlkRtInfo) == IMG_FALSE)
+ {
+ continue;
+ }
+
+ /* Program all counters in one block so those already on may
+ * be configured off and vice-a-versa. */
+ for (ui32BlockID = psBlkTypeDesc->uiCntBlkIdBase;
+ ui32BlockID < psBlkTypeDesc->uiCntBlkIdBase+sCntBlkRtInfo.uiNumUnits;
+ ui32BlockID++)
+ {
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Unit %d Block : %s", ui32BlockID-psBlkTypeDesc->uiCntBlkIdBase, psBlkTypeDesc->pszBlockNameComment);
+ /* Get the block configure store to update from the global store of
+ * block configuration. This is used to remember the configuration
+ * between configurations and core power on in APM */
+ psHWPerfInitBlkData = rgxfw_hwperf_get_block_ctl(ui32BlockID, psHWPerfInitDataInt);
+ /* Assert to check for HWPerf block mis-configuration */
+ PVR_ASSERT(psHWPerfInitBlkData);
+
+ psHWPerfInitBlkData->bValid = IMG_TRUE;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bValid: This specifies if the layout block is valid for the given BVNC.");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->bValid) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->bValid,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->bEnabled = IMG_FALSE;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "bEnabled: Set to 0x1 if the block needs to be enabled during playback. ");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->bEnabled) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->bEnabled,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->eBlockID = ui32BlockID;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "eBlockID: The Block ID for the layout block. See RGX_HWPERF_CNTBLK_ID for further information.");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->eBlockID) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->eBlockID,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psHWPerfInitBlkData->uiCounterMask = 0x00;
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "uiCounterMask: Bitmask for selecting the counters that need to be configured.(Bit 0 - counter0, bit 1 - counter1 and so on. ");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->uiCounterMask) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->uiCounterMask,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for(ui32CounterIdx = RGX_CNTBLK_COUNTER0_ID; ui32CounterIdx < psBlkTypeDesc->uiNumCounters; ui32CounterIdx++)
+ {
+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx] = IMG_UINT64_C(0x0000000000000000);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "%s_COUNTER_%d", psBlkTypeDesc->pszBlockNameComment,ui32CounterIdx);
+ DevmemPDumpLoadMemValue64(psHWPerfDataMemDesc,
+ (size_t)&(psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx]) - (size_t)(psHWPerfInitDataInt),
+ psHWPerfInitBlkData->aui64CounterCfg[ui32CounterIdx],
+ PDUMP_FLAGS_CONTINUOUS);
+
+ }
+ }
+ }
+}
+/*!
+*******************************************************************************
+
+ @Function InitialiseCustomCounters
+
+ @Description
+
+ Initialisation of custom counters and dumping them out to pdump, so that they can be modified at a later point.
+
+ @Input psDeviceNode
+
+ @Input psHWPerfDataMemDesc
+
+ @Return void
+
+******************************************************************************/
+
+static void InitialiseCustomCounters(PVRSRV_DEVICE_NODE *psDeviceNode, DEVMEM_MEMDESC *psHWPerfDataMemDesc)
+{
+ IMG_UINT32 ui32CustomBlock, ui32CounterID;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32SelectedCountersBlockMask - The Bitmask of the custom counters that are to be selected");
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ offsetof(RGXFWIF_HWPERF_CTL, ui32SelectedCountersBlockMask),
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for( ui32CustomBlock = 0; ui32CustomBlock < RGX_HWPERF_MAX_CUSTOM_BLKS; ui32CustomBlock++ )
+ {
+ /*
+ * Some compilers cannot cope with the use of offsetof() below - the specific problem being the use of
+ * a non-const variable in the expression, which it needs to be const. Typical compiler error produced is
+ * "expression must have a constant value".
+ */
+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounters
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].ui32NumSelectedCounters);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "ui32NumSelectedCounters - The Number of counters selected for this Custom Block: %d",ui32CustomBlock );
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ uiOffsetOfCustomBlockSelectedCounters,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ for(ui32CounterID = 0; ui32CounterID < RGX_HWPERF_MAX_CUSTOM_CNTRS; ui32CounterID++ )
+ {
+ const IMG_DEVMEM_OFFSET_T uiOffsetOfCustomBlockSelectedCounterIDs
+ = (IMG_DEVMEM_OFFSET_T)(uintptr_t)&(((RGXFWIF_HWPERF_CTL *)0)->SelCntr[ui32CustomBlock].aui32SelectedCountersIDs[ui32CounterID]);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "CUSTOMBLK_%d_COUNTERID_%d",ui32CustomBlock, ui32CounterID);
+ DevmemPDumpLoadMemValue32(psHWPerfDataMemDesc,
+ uiOffsetOfCustomBlockSelectedCounterIDs,
+ 0,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+}
+
+/*!
+*******************************************************************************
+
+ @Function InitialiseAllCounters
+
+ @Description Initialise HWPerf and custom counters
+
+ @Input psDeviceNode : Device Node
+ @Input psHWPerfDataPMR : HWPerf control PMR handle
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR InitialiseAllCounters(PVRSRV_DEVICE_NODE *psDeviceNode,
+ PMR *psHWPerfDataPMR)
+{
+ RGXFWIF_HWPERF_CTL *psHWPerfInitData;
+ DEVMEM_MEMDESC *psHWPerfDataMemDesc;
+ PVRSRV_ERROR eError;
+
+ eError = AcquireHostData(psDeviceNode,
+ psHWPerfDataPMR,
+ &psHWPerfDataMemDesc,
+ (void **)&psHWPerfInitData);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", failHWPerfCountersMemDescAqCpuVirt);
+ }
+
+ InitialiseHWPerfCounters(psDeviceNode, psHWPerfDataMemDesc, psHWPerfInitData);
+ InitialiseCustomCounters(psDeviceNode, psHWPerfDataMemDesc);
+
+failHWPerfCountersMemDescAqCpuVirt:
+ ReleaseHostData(psHWPerfDataMemDesc);
+
+ return eError;
+}
+#endif /* PDUMP */
+
+/*
+ * _ParseHTBAppHints:
+ *
+ * Generate necessary references to the globally visible AppHints which are
+ * declared in the above #include "km_apphint_defs.h"
+ * Without these local references some compiler tool-chains will treat
+ * unreferenced declarations as fatal errors. This function duplicates the
+ * HTB_specific apphint references which are made in htbserver.c:HTBInit()
+ * However, it makes absolutely *NO* use of these hints.
+ */
+static void
+_ParseHTBAppHints(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ void *pvParamState = NULL;
+ IMG_UINT32 ui32LogType;
+ IMG_BOOL bAnyLogGroupConfigured;
+ IMG_UINT32 ui32BufferSize;
+ IMG_UINT32 ui32OpMode;
+
+ /* Services initialisation parameters */
+ pvParamState = SrvInitParamOpen();
+ if (pvParamState == NULL)
+ return;
+
+ SrvInitParamGetUINT32BitField(pvParamState, EnableHTBLogGroup, ui32LogType);
+ bAnyLogGroupConfigured = ui32LogType ? IMG_TRUE : IMG_FALSE;
+ SrvInitParamGetUINT32List(pvParamState, HTBOperationMode, ui32OpMode);
+ SrvInitParamGetUINT32(pvParamState, HTBufferSizeInKB, ui32BufferSize);
+
+ SrvInitParamClose(pvParamState);
+}
+
+#if defined(PDUMP) && defined(__KERNEL__)
+static void RGXInitFWSigRegisters(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ IMG_UINT32 ui32PhantomCnt = 0;
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, CLUSTER_GROUPING))
+ {
+ ui32PhantomCnt = RGX_REQ_NUM_PHANTOMS(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) ? RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) : 0) - 1;
+ }
+
+ /*Initialise the TA related signature registers */
+ if(0 == gui32TASigRegCount)
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SCALABLE_VDM_GPP))
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVB_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS0_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS1_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS2_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS3_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS4_CHECKSUM, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_USC_UVS5_CHECKSUM, 0, 0, 0};
+ }
+
+ if(RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, SCALABLE_TE_ARCH))
+ {
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, SCALABLE_VDM_GPP))
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP_CLIP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT,0, ui32PhantomCnt};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP, 0, 0, 0};
+ }
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TE_CHECKSUM,0, 0, 0};
+ }else
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PPP_SIGNATURE, 0, 0, 0};
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TE_SIGNATURE, 0, 0, 0};
+ }
+
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_VCE_CHECKSUM, 0, 0, 0};
+
+ if(!RGX_IS_FEATURE_SUPPORTED(psDevInfo, PDS_PER_DUST) ||
+ !RGX_IS_BRN_SUPPORTED(psDevInfo, 62204))
+ {
+ asTASigRegList[gui32TASigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PDS_DOUTM_STM_SIGNATURE,0, 0, 0};
+ }
+ }
+
+ if(0 == gui323DSigRegCount)
+ {
+ /* List of 3D signature and checksum register addresses */
+ if(!RGX_IS_FEATURE_SUPPORTED(psDevInfo, S7_TOP_INFRASTRUCTURE))
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM, 0, 0, 0};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM, 0, 0, 0};
+
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, PBE2_IN_XE) && RGX_IS_FEATURE_VALUE_SUPPORTED(psDevInfo, NUM_CLUSTERS) &&
+ RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS) > 1)
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM, 0, 0, 0};
+ }
+ else
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM, RGX_CR_PBE_INDIRECT, 0, RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)-1};
+ }
+ }else
+ {
+ if (RGX_IS_FEATURE_SUPPORTED(psDevInfo, XT_TOP_INFRASTRUCTURE) ||
+ RGX_IS_FEATURE_SUPPORTED(psDevInfo, ROGUEXE))
+ {
+ const IMG_UINT32 ui32RasterModCnt = RGX_GET_NUM_RASTERISATION_MODULES(psDevInfo->sDevFeatureCfg) - 1;
+
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM, RGX_CR_RASTERISATION_INDIRECT, 0, ui32RasterModCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM, RGX_CR_RASTERISATION_INDIRECT, 0, ui32RasterModCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM, RGX_CR_RASTERISATION_INDIRECT, 0, ui32RasterModCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM, RGX_CR_RASTERISATION_INDIRECT, 0, ui32RasterModCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM, RGX_CR_RASTERISATION_INDIRECT, 0, ui32RasterModCnt};
+ }
+ else
+ {
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_PDS_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_ISP_TPF_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE0_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_TFPU_PLANE1_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_IFPU_ISP_CHECKSUM, RGX_CR_BLACKPEARL_INDIRECT, 0, ui32PhantomCnt};
+ }
+
+ as3DSigRegList[gui323DSigRegCount++] = (RGXFW_REGISTER_LIST){RGX_CR_PBE_CHECKSUM, RGX_CR_PBE_INDIRECT, 0, RGX_GET_FEATURE_VALUE(psDevInfo, NUM_CLUSTERS)-1};
+
+ }
+
+ }
+
+}
+#endif
+
+/*!
+*******************************************************************************
+
+ @Function RGXInit
+
+ @Description
+
+ RGX Initialisation
+
+ @Input psDeviceNode
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_COMPCHECKS_BVNC_DECLARE_AND_INIT(sBVNC);
+
+ /* Services initialisation parameters */
+ RGX_SRVINIT_APPHINTS sApphints = {0};
+ IMG_UINT32 ui32DeviceFlags;
+
+ void *pvDevInfo = NULL;
+
+ /* FW allocations handles */
+ PMR *psFWCodePMR;
+ PMR *psFWDataPMR;
+ PMR *psFWCorememPMR;
+
+ /* HWPerf Ctl allocation handle */
+ PMR *psHWPerfDataPMR;
+
+ PVRSRV_RGXDEV_INFO *psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ IMG_CHAR sV[RGXFWIF_COMPCHECKS_BVNC_V_LEN_MAX];
+
+ OSSNPrintf(sV, sizeof(sV), "%d", psDevInfo->sDevFeatureCfg.ui32V);
+ /*
+ * FIXME:
+ * Is this check redundant for the kernel mode version of srvinit?
+ * How do we check the user mode BVNC in this case?
+ */
+ rgx_bvnc_packed(&sBVNC.ui64BNC, sBVNC.aszV, sBVNC.ui32VLenMax, psDevInfo->sDevFeatureCfg.ui32B, \
+ sV, \
+ psDevInfo->sDevFeatureCfg.ui32N, psDevInfo->sDevFeatureCfg.ui32C);
+
+ pvDevInfo = (void *)psDevInfo;
+
+ /* Services initialisation parameters */
+ _ParseHTBAppHints(psDeviceNode);
+ GetApphints(psDevInfo, &sApphints);
+ GetDeviceFlags(&sApphints, &ui32DeviceFlags);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVRSRVGPUVIRTPopulateLMASubArenasKM(psDeviceNode, sApphints.aui32OSidMin, sApphints.aui32OSidMax, sApphints.bEnableTrustedDeviceAceConfig);
+}
+#endif
+
+
+ eError = InitFirmware(psDeviceNode,
+ &sApphints,
+ &sBVNC,
+ &psFWCodePMR,
+ &psFWDataPMR,
+ &psFWCorememPMR,
+ &psHWPerfDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitFirmware failed (%d)", eError));
+ goto cleanup;
+ }
+
+#if defined(PDUMP)
+ eError = InitialiseAllCounters(psDeviceNode, psHWPerfDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: InitialiseAllCounters failed (%d)", eError));
+ goto cleanup;
+ }
+#endif
+
+ /* Done using PMR handles, now release them */
+ eError = PVRSRVRGXInitReleaseFWInitResourcesKM(psDeviceNode,
+ psFWCodePMR,
+ psFWDataPMR,
+ psFWCorememPMR,
+ psHWPerfDataPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: BridgeRGXInitReleaseFWInitResources failed (%d)", eError));
+ goto cleanup;
+ }
+
+ /*
+ * Perform second stage of RGX initialisation
+ */
+ eError = PVRSRVRGXInitDevPart2KM(psDeviceNode,
+ ui32DeviceFlags,
+ sApphints.ui32HWPerfHostBufSize,
+ sApphints.ui32HWPerfHostFilter,
+ sApphints.eRGXActivePMConf);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXInit: PVRSRVRGXInitDevPart2KM failed (%d)", eError));
+ goto cleanup;
+ }
+
+#if defined(SUPPORT_VALIDATION)
+ PVRSRVAppHintDumpState();
+#endif
+
+#if defined(PDUMP)
+ /*
+ * Dump the list of signature registers
+ */
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32TASigRegCount = 0, ui323DSigRegCount= 0;
+ IMG_BOOL bRayTracing = IMG_FALSE;
+
+#if defined(__KERNEL__)
+ RGXInitFWSigRegisters(psDevInfo);
+ ui32TASigRegCount = gui32TASigRegCount;
+ ui323DSigRegCount = gui323DSigRegCount;
+ #if defined(RGX_FEATURE_RAY_TRACING)
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, RAY_TRACING_DEPRECATED))
+ {
+ bRayTracing = IMG_TRUE;
+ }
+ #endif
+ #if defined(DEBUG)
+ if (gui32TASigRegCount > SIG_REG_TA_MAX_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: TA signature registers max count exceeded",__func__));
+ PVR_ASSERT(0);
+ }
+ if (gui323DSigRegCount > SIG_REG_3D_MAX_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: 3D signature registers max count exceeded",__func__));
+ PVR_ASSERT(0);
+ }
+ #endif
+#else
+ ui32TASigRegCount = sizeof(asTASigRegList)/sizeof(RGXFW_REGISTER_LIST);
+ ui323DSigRegCount = sizeof(as3DSigRegList)/sizeof(RGXFW_REGISTER_LIST);
+#if defined(RGX_FEATURE_RAY_TRACING)
+ bRayTracing = IMG_TRUE;
+#endif
+#endif
+
+
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature TA registers: ");
+ for (i = 0; i < ui32TASigRegCount; i++)
+ {
+ if (asTASigRegList[i].ui16IndirectRegNum != 0)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asTASigRegList[i].ui16RegNum, asTASigRegList[i].ui16IndirectRegNum,
+ asTASigRegList[i].ui16IndirectStartVal, asTASigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asTASigRegList[i].ui16RegNum);
+ }
+ }
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature 3D registers: ");
+ for (i = 0; i < ui323DSigRegCount; i++)
+ {
+ if (as3DSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ as3DSigRegList[i].ui16RegNum, as3DSigRegList[i].ui16IndirectRegNum,
+ as3DSigRegList[i].ui16IndirectStartVal, as3DSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", as3DSigRegList[i].ui16RegNum);
+ }
+ }
+
+ if(bRayTracing)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature RTU registers: ");
+ for (i = 0; i < sizeof(asRTUSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+ {
+ if (asRTUSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asRTUSigRegList[i].ui16RegNum, asRTUSigRegList[i].ui16IndirectRegNum,
+ asRTUSigRegList[i].ui16IndirectStartVal, asRTUSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asRTUSigRegList[i].ui16RegNum);
+ }
+ }
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Signature SHG registers: ");
+ for (i = 0; i < sizeof(asSHGSigRegList)/sizeof(RGXFW_REGISTER_LIST); i++)
+ {
+ if (asSHGSigRegList[i].ui16IndirectRegNum != 0)
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X (indirect via 0x%8.8X %d to %d)",
+ asSHGSigRegList[i].ui16RegNum, asSHGSigRegList[i].ui16IndirectRegNum,
+ asSHGSigRegList[i].ui16IndirectStartVal, asSHGSigRegList[i].ui16IndirectEndVal);
+ }
+ else
+ {
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, " * 0x%8.8X", asSHGSigRegList[i].ui16RegNum);
+ }
+ }
+ }
+
+ }
+#endif /* defined(PDUMP) */
+
+ eError = PVRSRV_OK;
+
+cleanup:
+ return eError;
+}
+
+/******************************************************************************
+ End of file (rgxsrvinit.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.c b/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.c
new file mode 100644
index 00000000000000..673ff68a2eebbe
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.c
@@ -0,0 +1,1150 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific start/stop routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific start/stop routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* The routines implemented here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when trusted device is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxstartstop.h"
+
+#if defined(SUPPORT_SHARED_SLC)
+#include "rgxapi_km.h"
+#include "rgxdevice.h"
+#endif
+
+#define SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING
+
+
+#if !defined(FIX_HW_BRN_37453)
+/*!
+*******************************************************************************
+
+ @Function RGXEnableClocks
+
+ @Description Enable RGX Clocks
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXEnableClocks(const void *hPrivate)
+{
+ RGXCommentLog(hPrivate, "RGX clock: use default (automatic clock gating)");
+}
+#endif
+
+
+static PVRSRV_ERROR RGXWriteMetaRegThroughSP(const void *hPrivate, IMG_UINT32 ui32RegAddr, IMG_UINT32 ui32RegValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Issue a Write */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr);
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVDATAT, ui32RegValue);
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXReadMetaRegThroughSP(const void *hPrivate,
+ IMG_UINT32 ui32RegAddr,
+ IMG_UINT32* ui32RegValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Issue a Read */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVCTRL0, ui32RegAddr | RGX_CR_META_SP_MSLVCTRL0_RD_EN);
+
+ /* Wait for Slave Port to be Ready */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN|RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+
+#if !defined(NO_HARDWARE)
+ *ui32RegValue = RGXReadReg32(hPrivate, RGX_CR_META_SP_MSLVDATAX);
+#else
+ *ui32RegValue = 0xFFFFFFFF;
+#endif
+
+ return eError;
+}
+
+static PVRSRV_ERROR RGXWriteMetaCoreRegThoughSP(const void *hPrivate,
+ IMG_UINT32 ui32CoreReg,
+ IMG_UINT32 ui32Value)
+{
+ IMG_UINT32 i = 0;
+
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXDT_OFFSET, ui32Value);
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, ui32CoreReg & ~META_CR_TXUXXRXRQ_RDnWR_BIT);
+
+ do
+ {
+ RGXReadMetaRegThroughSP(hPrivate, META_CR_TXUXXRXRQ_OFFSET, &ui32Value);
+ } while (((ui32Value & META_CR_TXUXXRXRQ_DREADY_BIT) != META_CR_TXUXXRXRQ_DREADY_BIT) && (i++ < 1000));
+
+ if (i == 1000)
+ {
+ RGXCommentLog(hPrivate, "RGXWriteMetaCoreRegThoughSP: Timeout");
+ return PVRSRV_ERROR_TIMEOUT;
+ }
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR RGXStartFirmware(const void *hPrivate)
+{
+ PVRSRV_ERROR eError;
+
+ /* Give privilege to debug and slave port */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN);
+
+ /* Point Meta to the bootloader address, global (uncached) range */
+ eError = RGXWriteMetaCoreRegThoughSP(hPrivate,
+ PC_ACCESS(0),
+ RGXFW_BOOTLDR_META_ADDR | META_MEM_GLOBAL_RANGE_BIT);
+
+ if (eError != PVRSRV_OK)
+ {
+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start failed!");
+ return eError;
+ }
+
+ /* Enable minim encoding */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_TXPRIVEXT, META_CR_TXPRIVEXT_MINIM_EN);
+
+ /* Enable Meta thread */
+ RGXWriteMetaRegThroughSP(hPrivate, META_CR_T0ENABLE_OFFSET, META_CR_TXENABLE_ENABLE_BIT);
+
+ return PVRSRV_OK;
+}
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitMetaProcWrapper
+
+ @Description Configures the hardware wrapper of the META processor
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitMetaProcWrapper(const void *hPrivate)
+{
+ IMG_UINT64 ui64GartenConfig;
+
+ /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address */
+
+ /* Garten IDLE bit controlled by META */
+ ui64GartenConfig = RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META;
+
+ /* The fence addr is set at the fw init sequence */
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ /* Set PC = 0 for fences */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_CLRMSK;
+ ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PC_BASE_SHIFT;
+
+ {
+ /* Ensure the META fences go all the way to external memory */
+ ui64GartenConfig |= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_SLC_COHERENT_EN; /* SLC Coherent 1 */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG__S7_TOP__FENCE_PERSISTENCE_CLRMSK; /* SLC Persistence 0 */
+ }
+ }
+ else
+ {
+ /* Set PC = 0 for fences */
+ ui64GartenConfig &= RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK;
+ ui64GartenConfig |= (IMG_UINT64)META_MMU_CONTEXT_MAPPING
+ << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT;
+
+ /* Set SLC DM=META */
+ ui64GartenConfig |= ((IMG_UINT64) RGXFW_SEGMMU_META_DM_ID) << RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT;
+ }
+
+ RGXCommentLog(hPrivate, "RGXStart: Configure META wrapper");
+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, ui64GartenConfig);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitMipsProcWrapper
+
+ @Description Configures the hardware wrapper of the MIPS processor
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitMipsProcWrapper(const void *hPrivate)
+{
+ IMG_DEV_PHYADDR sPhyAddr;
+ IMG_UINT64 ui64RemapSettings = RGXMIPSFW_BOOT_REMAP_LOG2_SEGMENT_SIZE; /* Same for all remap registers */
+
+ RGXCommentLog(hPrivate, "RGXStart: Configure MIPS wrapper");
+
+ /*
+ * MIPS wrapper (registers transaction ID and ISA mode) setup
+ */
+
+ RGXCommentLog(hPrivate, "RGXStart: Write wrapper config register");
+
+ if (RGXGetDevicePhysBusWidth(hPrivate) > 32)
+ {
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MIPS_WRAPPER_CONFIG,
+ (RGXMIPSFW_REGISTERS_VIRTUAL_BASE >>
+ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN) |
+ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+ }
+ else
+ {
+ RGXAcquireGPURegsAddr(hPrivate, &sPhyAddr);
+
+ RGXMIPSWrapperConfig(hPrivate,
+ RGX_CR_MIPS_WRAPPER_CONFIG,
+ sPhyAddr.uiAddr,
+ RGXMIPSFW_WRAPPER_CONFIG_REGBANK_ADDR_ALIGN,
+ RGX_CR_MIPS_WRAPPER_CONFIG_BOOT_ISA_MODE_MICROMIPS);
+ }
+
+ /*
+ * Boot remap setup
+ */
+
+ RGXAcquireBootRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Do not mark accesses to a FW code remap region as DRM accesses */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLog(hPrivate, "RGXStart: Write boot remap registers");
+ RGXBootRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG1,
+ RGXMIPSFW_BOOT_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP1_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP1_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ if (RGX_DEVICE_HAS_BRN(hPrivate, 63553))
+ {
+ IMG_BOOL bPhysBusAbove32Bit = RGXGetDevicePhysBusWidth(hPrivate) > 32;
+ IMG_BOOL bDevicePA0IsValid = RGXDevicePA0IsValid(hPrivate);
+
+ /* WA always required on 36 bit cores, to avoid continuous unmapped memory accesses to address 0x0 */
+ if (bPhysBusAbove32Bit || !bDevicePA0IsValid)
+ {
+ RGXCodeRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP5_CONFIG1,
+ 0x0 | RGX_CR_MIPS_ADDR_REMAP5_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP5_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP5_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+ }
+ }
+
+ /*
+ * Data remap setup
+ */
+
+ RGXAcquireDataRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Remapped data in non-secure memory */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLog(hPrivate, "RGXStart: Write data remap registers");
+ RGXDataRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG1,
+ RGXMIPSFW_DATA_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP2_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP2_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP2_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /*
+ * Code remap setup
+ */
+
+ RGXAcquireCodeRemapAddr(hPrivate, &sPhyAddr);
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Do not mark accesses to a FW code remap region as DRM accesses */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLog(hPrivate, "RGXStart: Write exceptions remap registers");
+ RGXCodeRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG1,
+ RGXMIPSFW_CODE_REMAP_PHYS_ADDR_IN | RGX_CR_MIPS_ADDR_REMAP3_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP3_CONFIG2,
+ sPhyAddr.uiAddr,
+ ~RGX_CR_MIPS_ADDR_REMAP3_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /*
+ * Trampoline remap setup
+ */
+
+ RGXAcquireTrampolineRemapAddr(hPrivate, &sPhyAddr);
+ ui64RemapSettings = RGXMIPSFW_TRAMPOLINE_LOG2_SEGMENT_SIZE;
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Remapped data in non-secure memory */
+ ui64RemapSettings &= RGX_CR_MIPS_ADDR_REMAP1_CONFIG2_TRUSTED_CLRMSK;
+#endif
+
+ RGXCommentLog(hPrivate, "RGXStart: Write trampoline remap registers");
+ RGXTrampolineRemapConfig(hPrivate,
+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG1,
+ sPhyAddr.uiAddr | RGX_CR_MIPS_ADDR_REMAP4_CONFIG1_MODE_ENABLE_EN,
+ RGX_CR_MIPS_ADDR_REMAP4_CONFIG2,
+ RGXMIPSFW_TRAMPOLINE_TARGET_PHYS_ADDR,
+ ~RGX_CR_MIPS_ADDR_REMAP4_CONFIG2_ADDR_OUT_CLRMSK,
+ ui64RemapSettings);
+
+ /* Garten IDLE bit controlled by MIPS */
+ RGXCommentLog(hPrivate, "RGXStart: Set GARTEN_IDLE type to MIPS");
+ RGXWriteReg64(hPrivate, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG, RGX_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META);
+
+ /* Turn on the EJTAG probe (only useful driver live) */
+ RGXWriteReg32(hPrivate, RGX_CR_MIPS_DEBUG_CONFIG, 0);
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function __RGXInitSLC
+
+ @Description Initialise RGX SLC
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void __RGXInitSLC(const void *hPrivate)
+{
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_CACHE_HIERARCHY))
+ {
+ IMG_UINT32 ui32Reg;
+ IMG_UINT32 ui32RegVal;
+
+ {
+ /*
+ * SLC control
+ */
+ ui32Reg = RGX_CR_SLC3_CTRL_MISC;
+ ui32RegVal = RGX_CR_SLC3_CTRL_MISC_ADDR_DECODE_MODE_SCRAMBLE_PVR_HASH |
+ RGX_CR_SLC3_CTRL_MISC_WRITE_COMBINER_EN;
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+
+ /*
+ * SLC scramble bits
+ */
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32Count=0;
+ IMG_UINT32 ui32SLCBanks = RGXGetDeviceSLCBanks(hPrivate);
+ IMG_UINT64 aui64ScrambleValues[4];
+ IMG_UINT32 aui32ScrambleRegs[] = {
+ RGX_CR_SLC3_SCRAMBLE,
+ RGX_CR_SLC3_SCRAMBLE2,
+ RGX_CR_SLC3_SCRAMBLE3,
+ RGX_CR_SLC3_SCRAMBLE4
+ };
+
+ if (2 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0x6965a99a55696a6a);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0x6aa9aa66959aaa9a);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x9a5665965a99a566);
+ aui64ScrambleValues[3] = IMG_UINT64_C(0x5aa69596aa66669a);
+ ui32Count = 4;
+ }
+ else if (4 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0xc6788d722dd29ce4);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0x7272e4e11b279372);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x87d872d26c6c4be1);
+ aui64ScrambleValues[3] = IMG_UINT64_C(0xe1b4878d4b36e478);
+ ui32Count = 4;
+
+ }
+ else if (8 == ui32SLCBanks)
+ {
+ aui64ScrambleValues[0] = IMG_UINT64_C(0x859d6569e8fac688);
+ aui64ScrambleValues[1] = IMG_UINT64_C(0xf285e1eae4299d33);
+ aui64ScrambleValues[2] = IMG_UINT64_C(0x1e1af2be3c0aa447);
+ ui32Count = 3;
+ }
+
+ for (i = 0; i < ui32Count; i++)
+ {
+ IMG_UINT32 ui32Reg = aui32ScrambleRegs[i];
+ IMG_UINT64 ui64Value = aui64ScrambleValues[i];
+ RGXWriteReg64(hPrivate, ui32Reg, ui64Value);
+ }
+ }
+ }
+
+ if (RGX_DEVICE_HAS_ERN(hPrivate, 45914))
+ {
+ /* Disable the forced SLC coherency which the hardware enables for compatibility with older pdumps */
+ RGXCommentLog(hPrivate, "Disable forced SLC coherency");
+ RGXWriteReg64(hPrivate, RGX_CR_GARTEN_SLC, 0);
+ }
+ }
+ else
+ {
+ IMG_UINT32 ui32Reg;
+ IMG_UINT32 ui32RegVal;
+ IMG_UINT64 ui64RegVal;
+
+#if defined(FIX_HW_BRN_36492)
+ /* Because the WA for this BRN forbids using SLC reset, need to inval it instead */
+ RGXCommentLog(hPrivate, "Invalidate the SLC");
+ RGXWriteReg32(hPrivate, RGX_CR_SLC_CTRL_FLUSH_INVAL, RGX_CR_SLC_CTRL_FLUSH_INVAL_ALL_EN);
+
+ /* Poll for completion */
+ RGXPollReg32(hPrivate, RGX_CR_SLC_STATUS0, 0x0, RGX_CR_SLC_STATUS0_MASKFULL);
+#endif
+
+ /*
+ * SLC Bypass control
+ */
+ ui32Reg = RGX_CR_SLC_CTRL_BYPASS;
+ ui64RegVal = 0;
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLCSIZE8) ||
+ RGX_DEVICE_HAS_BRN(hPrivate, 61450))
+ {
+ RGXCommentLog(hPrivate, "Bypass SLC for IPF_OBJ and IPF_CPF");
+ ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_OBJ_EN |
+ (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_IPF_CPF_EN;
+ }
+
+ if (RGXGetDeviceSLCSize(hPrivate) < (128*1024))
+ {
+ /* Bypass SLC for textures if the SLC size is less than 128kB */
+ RGXCommentLog(hPrivate, "Bypass SLC for TPU");
+ ui64RegVal |= (IMG_UINT64) RGX_CR_SLC_CTRL_BYPASS_REQ_TPU_EN;
+ }
+
+ if (ui64RegVal != 0)
+ {
+ RGXReadModifyWriteReg64(hPrivate, ui32Reg, ui64RegVal, ~ui64RegVal);
+ }
+
+
+ /*
+ * SLC Misc control.
+ *
+ * Note: This is a 64bit register and we set only the lower 32bits leaving the top
+ * 32bits (RGX_CR_SLC_CTRL_MISC_SCRAMBLE_BITS) unchanged from the HW default.
+ */
+ ui32Reg = RGX_CR_SLC_CTRL_MISC;
+ ui32RegVal = (RGXReadReg32(hPrivate, ui32Reg) & RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN) |
+ RGX_CR_SLC_CTRL_MISC_ADDR_DECODE_MODE_PVR_HASH1;
+
+ if (RGX_DEVICE_HAS_BRN(hPrivate, 60084))
+ {
+#if !defined(SOC_FEATURE_STRICT_SAME_ADDRESS_WRITE_ORDERING)
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+#else
+ if (RGX_DEVICE_HAS_ERN(hPrivate, 61389))
+ {
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_ENABLE_PSG_HAZARD_CHECK_EN;
+ }
+#endif
+ }
+ /* Bypass burst combiner if SLC line size is smaller than 1024 bits */
+ if (RGXGetDeviceCacheLineSize(hPrivate) < 1024)
+ {
+ ui32RegVal |= RGX_CR_SLC_CTRL_MISC_BYPASS_BURST_COMBINER_EN;
+ }
+
+ RGXWriteReg32(hPrivate, ui32Reg, ui32RegVal);
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXInitBIF
+
+ @Description Initialise RGX BIF
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXInitBIF(const void *hPrivate)
+{
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+ {
+ IMG_DEV_PHYADDR sPCAddr;
+
+ /*
+ * Acquire the address of the Kernel Page Catalogue.
+ */
+ RGXAcquireKernelMMUPC(hPrivate, &sPCAddr);
+
+ /*
+ * Write the kernel catalogue base.
+ */
+ RGXCommentLog(hPrivate, "RGX firmware MMU Page Catalogue");
+
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+ {
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC64(hPrivate,
+ RGX_CR_BIF_CAT_BASE0,
+ RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT,
+ RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT,
+ ((sPCAddr.uiAddr
+ >> RGX_CR_BIF_CAT_BASE0_ADDR_ALIGNSHIFT)
+ << RGX_CR_BIF_CAT_BASE0_ADDR_SHIFT)
+ & ~RGX_CR_BIF_CAT_BASE0_ADDR_CLRMSK);
+ /*
+ * Trusted Firmware boot
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+ }
+ else
+ {
+ IMG_UINT32 uiPCAddr;
+ uiPCAddr = (((sPCAddr.uiAddr >> RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT)
+ << RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT)
+ & ~RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_CLRMSK);
+ /* Set the mapping context */
+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 0);
+
+ /* Write the cat-base address */
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ /* Set-up MMU ID 1 mapping to the same PC used by MMU ID 0 */
+ RGXWriteReg32(hPrivate, RGX_CR_MMU_CBASE_MAPPING_CONTEXT, 1);
+ RGXWriteKernelMMUPC32(hPrivate,
+ RGX_CR_MMU_CBASE_MAPPING,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_ALIGNSHIFT,
+ RGX_CR_MMU_CBASE_MAPPING_BASE_ADDR_SHIFT,
+ uiPCAddr);
+#endif /* SUPPORT_TRUSTED_DEVICE */
+ }
+ }
+ else
+ {
+ /*
+ * Trusted Firmware boot
+ */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLog(hPrivate, "RGXInitBIF: Trusted Device enabled");
+ RGXWriteReg32(hPrivate, RGX_CR_BIF_TRUST, RGX_CR_BIF_TRUST_ENABLE_EN);
+#endif
+ }
+}
+
+
+/*!
+*******************************************************************************
+
+ @Function RGXAXIACELiteInit
+
+ @Description Initialise AXI-ACE Lite interface
+
+ @Input hPrivate : Implementation specific data
+
+ @Return void
+
+******************************************************************************/
+static void RGXAXIACELiteInit(const void *hPrivate)
+{
+ IMG_UINT32 ui32RegAddr;
+ IMG_UINT64 ui64RegVal;
+
+ ui32RegAddr = RGX_CR_AXI_ACE_LITE_CONFIGURATION;
+
+ /* Setup AXI-ACE config. Set everything to outer cache */
+ ui64RegVal = (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_NON_SNOOPING_SHIFT) |
+ (3U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_NON_SNOOPING_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_CACHE_MAINTENANCE_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWDOMAIN_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARDOMAIN_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_AWCACHE_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_COHERENT_SHIFT) |
+ (2U << RGX_CR_AXI_ACE_LITE_CONFIGURATION_ARCACHE_CACHE_MAINTENANCE_SHIFT);
+
+ if (RGX_DEVICE_HAS_BRN(hPrivate, 42321))
+ {
+ ui64RegVal |= (((IMG_UINT64) 1) << RGX_CR_AXI_ACE_LITE_CONFIGURATION_DISABLE_COHERENT_WRITELINEUNIQUE_SHIFT);
+ }
+
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SLC_VIVT))
+ {
+ RGXCommentLog(hPrivate, "OSID 0 and 1 are trusted");
+ ui64RegVal |= IMG_UINT64_C(0xFC)
+ << RGX_CR_AXI_ACE_LITE_CONFIGURATION_OSID_SECURITY_SHIFT;
+ }
+#endif
+
+ RGXCommentLog(hPrivate, "Init AXI-ACE interface");
+ RGXWriteReg64(hPrivate, ui32RegAddr, ui64RegVal);
+}
+
+
+PVRSRV_ERROR RGXStart(const void *hPrivate)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_BOOL bDoFWSlaveBoot;
+ IMG_CHAR *pcRGXFW_PROCESSOR;
+ IMG_BOOL bMetaFW;
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS))
+ {
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_MIPS;
+ bMetaFW = IMG_FALSE;
+ bDoFWSlaveBoot = IMG_FALSE;
+ }
+ else
+ {
+ pcRGXFW_PROCESSOR = RGXFW_PROCESSOR_META;
+ bMetaFW = IMG_TRUE;
+ bDoFWSlaveBoot = RGXDoFWSlaveBoot(hPrivate);
+ }
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, SYS_BUS_SECURE_RESET))
+ {
+ /* Disable the default sys_bus_secure protection to perform minimal setup */
+ RGXCommentLog(hPrivate, "RGXStart: Disable sys_bus_secure");
+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, 0);
+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+ }
+
+#if defined(FIX_HW_BRN_37453)
+ /* Force all clocks on*/
+ RGXCommentLog(hPrivate, "RGXStart: force all clocks on");
+ RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_ON);
+#endif
+
+#if defined(SUPPORT_SHARED_SLC) && !defined(FIX_HW_BRN_36492)
+ /* When the SLC is shared, the SLC reset is performed by the System layer when calling
+ * RGXInitSLC (before any device uses it), therefore mask out the SLC bit to avoid
+ * soft_resetting it here. If HW_BRN_36492, the bit is already masked out.
+ */
+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL ^ RGX_CR_SOFT_RESET_SLC_EN)
+ RGXCommentLog(hPrivate, "RGXStart: Shared SLC (don't reset SLC as part of RGX reset)");
+#else
+#define RGX_CR_SOFT_RESET_ALL (RGX_CR_SOFT_RESET_MASKFULL)
+#endif
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ /* Set RGX in soft-reset */
+ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 1");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS);
+
+ RGXCommentLog(hPrivate, "RGXStart: soft reset assert step 2");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_JONES_ALL | RGX_S7_SOFT_RESET_DUSTS);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, RGX_S7_SOFT_RESET2);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset but META/MIPS */
+ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 1 excluding %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_S7_SOFT_RESET_DUSTS | RGX_CR_SOFT_RESET_GARTEN_EN);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET2, 0x0);
+
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ RGXCommentLog(hPrivate, "RGXStart: soft reset de-assert step 2 excluding %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+ }
+ else
+ {
+ /* Set RGX in soft-reset */
+ RGXCommentLog(hPrivate, "RGXStart: soft reset everything");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL);
+
+ /* Take Rascal and Dust out of reset */
+ RGXCommentLog(hPrivate, "RGXStart: Rascal and Dust out of reset");
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_ALL ^ RGX_CR_SOFT_RESET_RASCALDUSTS_EN);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset but META/MIPS */
+ RGXCommentLog(hPrivate, "RGXStart: Take everything out of reset but %s", pcRGXFW_PROCESSOR);
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_GARTEN_EN);
+ }
+
+
+#if !defined(FIX_HW_BRN_37453)
+ /* Enable clocks */
+ RGXEnableClocks(hPrivate);
+#endif
+
+ /*
+ * Initialise SLC.
+ */
+#if !defined(SUPPORT_SHARED_SLC)
+ __RGXInitSLC(hPrivate);
+#endif
+
+ if (bMetaFW)
+ {
+ if (bDoFWSlaveBoot)
+ {
+ /* Configure META to Slave boot */
+ RGXCommentLog(hPrivate, "RGXStart: META Slave boot");
+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, 0);
+
+ }
+ else
+ {
+ /* Configure META to Master boot */
+ RGXCommentLog(hPrivate, "RGXStart: META Master boot");
+ RGXWriteReg32(hPrivate, RGX_CR_META_BOOT, RGX_CR_META_BOOT_MODE_EN);
+ }
+ }
+
+ /*
+ * Initialise Firmware wrapper
+ */
+ if (bMetaFW)
+ {
+ RGXInitMetaProcWrapper(hPrivate);
+ }
+ else
+ {
+ RGXInitMipsProcWrapper(hPrivate);
+ }
+
+ if (RGX_DEVICE_HAS_FEATURE(hPrivate, AXI_ACELITE))
+ {
+ /* We must init the AXI-ACE interface before 1st BIF transaction */
+ RGXAXIACELiteInit(hPrivate);
+ }
+
+ /*
+ * Initialise BIF.
+ */
+ RGXInitBIF(hPrivate);
+
+ RGXCommentLog(hPrivate, "RGXStart: Take %s out of reset", pcRGXFW_PROCESSOR);
+
+ /* Need to wait for at least 16 cycles before taking META/MIPS out of reset ... */
+ RGXWaitCycles(hPrivate, 32, 3);
+
+ RGXWriteReg64(hPrivate, RGX_CR_SOFT_RESET, 0x0);
+ (void) RGXReadReg64(hPrivate, RGX_CR_SOFT_RESET);
+
+ /* ... and afterwards */
+ RGXWaitCycles(hPrivate, 32, 3);
+
+#if defined(FIX_HW_BRN_37453)
+ /* We rely on the 32 clk sleep from above */
+
+ /* Switch clocks back to auto */
+ RGXCommentLog(hPrivate, "RGXStart: set clocks back to auto");
+ RGXWriteReg64(hPrivate, RGX_CR_CLK_CTRL, RGX_CR_CLK_CTRL_ALL_AUTO);
+#endif
+
+ if (bMetaFW && bDoFWSlaveBoot)
+ {
+ eError = RGXFabricCoherencyTest(hPrivate);
+ if (eError != PVRSRV_OK) return eError;
+
+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Slave boot Start");
+ eError = RGXStartFirmware(hPrivate);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ else
+ {
+ RGXCommentLog(hPrivate, "RGXStart: RGX Firmware Master boot Start");
+ }
+
+ /* Enable Sys Bus security */
+#if defined(SUPPORT_TRUSTED_DEVICE)
+ RGXCommentLog(hPrivate, "RGXStart: Enable sys_bus_secure");
+ RGXWriteReg32(hPrivate, RGX_CR_SYS_BUS_SECURE, RGX_CR_SYS_BUS_SECURE_ENABLE_EN);
+ (void) RGXReadReg32(hPrivate, RGX_CR_SYS_BUS_SECURE); /* Fence write */
+#endif
+
+ return eError;
+}
+
+static INLINE void ClearIRQStatusRegister(const void *hPrivate, IMG_BOOL bMetaFW)
+{
+ IMG_UINT32 ui32IRQClearReg;
+ IMG_UINT32 ui32IRQClearMask;
+
+ if(bMetaFW)
+ {
+ ui32IRQClearReg = RGX_CR_META_SP_MSLVIRQSTATUS;
+ ui32IRQClearMask = RGX_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK;
+ }
+ else
+ {
+ ui32IRQClearReg = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR;
+ ui32IRQClearMask = RGX_CR_MIPS_WRAPPER_IRQ_CLEAR_EVENT_EN;
+ }
+
+ RGXWriteReg32(hPrivate, ui32IRQClearReg, ui32IRQClearMask);
+
+#if defined(RGX_FEATURE_OCPBUS)
+ RGXWriteReg32(hPrivate, RGX_CR_OCP_IRQSTATUS_2, RGX_CR_OCP_IRQSTATUS_2_RGX_IRQ_STATUS_EN);
+#endif
+}
+
+PVRSRV_ERROR RGXStop(const void *hPrivate)
+{
+ IMG_BOOL bMetaFW = !RGX_DEVICE_HAS_FEATURE(hPrivate, MIPS);
+ PVRSRV_ERROR eError;
+
+ ClearIRQStatusRegister(hPrivate, bMetaFW);
+
+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+ }
+
+ if (eError != PVRSRV_OK) return eError;
+
+
+#if !defined(SUPPORT_SHARED_SLC)
+ /* Wait for SLC to signal IDLE */
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC_IDLE,
+ RGX_CR_SLC_IDLE_MASKFULL,
+ RGX_CR_SLC_IDLE_MASKFULL);
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC3_IDLE,
+ RGX_CR_SLC3_IDLE_MASKFULL,
+ RGX_CR_SLC3_IDLE_MASKFULL);
+ }
+#endif /* SUPPORT_SHARED_SLC */
+ if (eError != PVRSRV_OK) return eError;
+
+
+ /* Unset MTS DM association with threads */
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC,
+ RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_INTCTX_THREAD0_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC,
+ RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_BGCTX_THREAD0_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC,
+ RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_INTCTX_THREAD1_DM_ASSOC_MASKFULL);
+ RGXWriteReg32(hPrivate,
+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC,
+ RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_DM_ASSOC_CLRMSK
+ & RGX_CR_MTS_BGCTX_THREAD1_DM_ASSOC_MASKFULL);
+
+
+#if defined(PDUMP)
+ if (bMetaFW)
+ {
+ /* Disabling threads is only required for pdumps to stop the fw gracefully */
+
+ /* Disable thread 0 */
+ eError = RGXWriteMetaRegThroughSP(hPrivate,
+ META_CR_T0ENABLE_OFFSET,
+ ~META_CR_TXENABLE_ENABLE_BIT);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Disable thread 1 */
+ eError = RGXWriteMetaRegThroughSP(hPrivate,
+ META_CR_T1ENABLE_OFFSET,
+ ~META_CR_TXENABLE_ENABLE_BIT);
+ if (eError != PVRSRV_OK) return eError;
+
+ /* Clear down any irq raised by META (done after disabling the FW
+ * threads to avoid a race condition).
+ * This is only really needed for PDumps but we do it anyway driver-live.
+ */
+ RGXWriteReg32(hPrivate, RGX_CR_META_SP_MSLVIRQSTATUS, 0x0);
+
+ /* Wait for the Slave Port to finish all the transactions */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_META_SP_MSLVCTRL1,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN,
+ RGX_CR_META_SP_MSLVCTRL1_READY_EN | RGX_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+#endif
+
+
+ /* Extra Idle checks */
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIF_STATUS_MMU,
+ 0,
+ RGX_CR_BIF_STATUS_MMU_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIFPM_STATUS_MMU,
+ 0,
+ RGX_CR_BIFPM_STATUS_MMU_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE) &&
+ !RGX_DEVICE_HAS_FEATURE(hPrivate, XT_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIF_READS_EXT_STATUS,
+ 0,
+ RGX_CR_BIF_READS_EXT_STATUS_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_BIFPM_READS_EXT_STATUS,
+ 0,
+ RGX_CR_BIFPM_READS_EXT_STATUS_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+
+ {
+ IMG_UINT64 ui64SLCMask = RGX_CR_SLC_STATUS1_MASKFULL;
+ eError = RGXPollReg64(hPrivate,
+ RGX_CR_SLC_STATUS1,
+ 0,
+ ui64SLCMask);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+ if (4 == RGXGetDeviceSLCBanks(hPrivate))
+ {
+ eError = RGXPollReg64(hPrivate,
+ RGX_CR_SLC_STATUS2,
+ 0,
+ RGX_CR_SLC_STATUS2_MASKFULL);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+#if !defined(SUPPORT_SHARED_SLC)
+ /* Wait for SLC to signal IDLE */
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC_IDLE,
+ RGX_CR_SLC_IDLE_MASKFULL,
+ RGX_CR_SLC_IDLE_MASKFULL);
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SLC3_IDLE,
+ RGX_CR_SLC3_IDLE_MASKFULL,
+ RGX_CR_SLC3_IDLE_MASKFULL);
+ }
+#endif /* SUPPORT_SHARED_SLC */
+ if (eError != PVRSRV_OK) return eError;
+
+
+ /* Wait for Sidekick/Jones to signal IDLE except for the Garten Wrapper */
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN),
+ RGX_CR_SIDEKICK_IDLE_MASKFULL^(RGX_CR_SIDEKICK_IDLE_GARTEN_EN|RGX_CR_SIDEKICK_IDLE_SOCIF_EN|RGX_CR_SIDEKICK_IDLE_HOSTIF_EN));
+ }
+ else
+ {
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, FASTRENDER_DM))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN),
+ RGX_CR_JONES_IDLE_MASKFULL^(RGX_CR_JONES_IDLE_GARTEN_EN|RGX_CR_JONES_IDLE_SOCIF_EN|RGX_CR_JONES_IDLE_HOSTIF_EN));
+ }
+ }
+
+ if (eError != PVRSRV_OK) return eError;
+
+
+ if (bMetaFW)
+ {
+ IMG_UINT32 ui32RegValue;
+
+ eError = RGXReadMetaRegThroughSP(hPrivate,
+ META_CR_TxVECINT_BHALT,
+ &ui32RegValue);
+ if (eError != PVRSRV_OK) return eError;
+
+ if ((ui32RegValue & 0xFFFFFFFFU) == 0x0)
+ {
+ /* Wait for Sidekick/Jones to signal IDLE including
+ * the Garten Wrapper if there is no debugger attached
+ * (TxVECINT_BHALT = 0x0) */
+ if (!RGX_DEVICE_HAS_FEATURE(hPrivate, S7_TOP_INFRASTRUCTURE))
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_JONES_IDLE,
+ RGX_CR_JONES_IDLE_GARTEN_EN,
+ RGX_CR_JONES_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+ }
+ }
+ else
+ {
+ eError = RGXPollReg32(hPrivate,
+ RGX_CR_SIDEKICK_IDLE,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN,
+ RGX_CR_SIDEKICK_IDLE_GARTEN_EN);
+ if (eError != PVRSRV_OK) return eError;
+ }
+
+ return eError;
+}
+
+
+/*
+ * RGXInitSLC
+ */
+#if defined(SUPPORT_SHARED_SLC)
+PVRSRV_ERROR RGXInitSLC(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ void *pvPowerParams;
+
+ if (psDeviceNode == NULL)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psDevInfo = psDeviceNode->pvDevice;
+ pvPowerParams = &psDevInfo->sLayerParams;
+
+#if !defined(FIX_HW_BRN_36492)
+ /* reset the SLC */
+ RGXCommentLog(pvPowerParams, "RGXInitSLC: soft reset SLC");
+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, RGX_CR_SOFT_RESET_SLC_EN);
+
+ /* Read soft-reset to fence previous write in order to clear the SOCIF pipeline */
+ (void) RGXReadReg64(pvPowerParams, RGX_CR_SOFT_RESET);
+
+ /* Take everything out of reset */
+ RGXWriteReg64(pvPowerParams, RGX_CR_SOFT_RESET, 0x0);
+#endif
+
+ __RGXInitSLC(pvPowerParams);
+
+ return PVRSRV_OK;
+}
+#endif
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.h b/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.h
new file mode 100644
index 00000000000000..ac1411864b93df
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxstartstop.h
@@ -0,0 +1,84 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX start/stop header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX start/stop functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXSTARTSTOP_H__)
+#define __RGXSTARTSTOP_H__
+
+/* The routines declared here are built on top of an abstraction layer to
+ * hide DDK/OS-specific details in case they are used outside of the DDK
+ * (e.g. when DRM security is enabled).
+ * Any new dependency should be added to rgxlayer.h.
+ * Any new code should be built on top of the existing abstraction layer,
+ * which should be extended when necessary. */
+#include "rgxlayer.h"
+
+/*!
+*******************************************************************************
+
+ @Function RGXStart
+
+ @Description Perform GPU reset and initialisation
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStart(const void *hPrivate);
+
+/*!
+*******************************************************************************
+
+ @Function RGXStop
+
+ @Description Stop Rogue in preparation for power down
+
+ @Input hPrivate : Implementation specific data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXStop(const void *hPrivate);
+
+#endif /* __RGXSTARTSTOP_H__ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.c b/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.c
new file mode 100644
index 00000000000000..34cf8954c528f0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.c
@@ -0,0 +1,175 @@
+/*************************************************************************/ /*!
+@File rgxsyncutils.c
+@Title RGX Sync Utilities
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Sync helper functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "rgxsyncutils.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "allocmem.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static
+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues,
+ IMG_UINT32 ui32Count)
+{
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+ for (iii=0; iii<ui32Count; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+}
+#else
+#define CHKPT_DBG(X)
+#endif
+
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+ SYNC_ADDR_LIST *psSyncList,
+ SYNC_ADDR_LIST *psPRSyncList,
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+ RGX_SYNC_DATA *psSyncData,
+ IMG_BOOL bKick3D)
+{
+ IMG_UINT32 *pui32TimelineUpdateWOff = NULL;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+
+ IMG_UINT32 ui32ClientUpdateValueCount = psSyncData->ui32ClientUpdateValueCount;
+
+ /* Space for original client updates, and the one new update */
+ size_t uiUpdateSize = sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientUpdateValueCount + 1);
+
+ if (!bKick3D)
+ {
+ /* Additional space for one PR update, only the newest one */
+ uiUpdateSize += sizeof(*pui32IntAllocatedUpdateValues) * 1;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(<%p>)", __func__, \
+ (void*)pui32IntAllocatedUpdateValues));
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(uiUpdateSize);
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, uiUpdateSize);
+ pui32TimelineUpdateWOff = pui32IntAllocatedUpdateValues;
+
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d %s update values into pui32IntAllocatedUpdateValues(<%p>)", __func__, \
+ ui32ClientUpdateValueCount, bKick3D ? "TA/3D" : "TA/PR", (void*)pui32IntAllocatedUpdateValues));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32TimelineUpdateWOff, psSyncData->paui32ClientUpdateValue, ui32ClientUpdateValueCount * sizeof(*psSyncData->paui32ClientUpdateValue));
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ _DebugSyncValues(pui32TimelineUpdateWOff, ui32ClientUpdateValueCount);
+#endif
+
+ pui32TimelineUpdateWOff += ui32ClientUpdateValueCount;
+ }
+
+ /* Now set the additional update value and append the timeline sync prim addr to either the
+ * render context 3D (or TA) update list
+ */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __func__, \
+ ui32FenceTimelineUpdateValue, bKick3D ? "TA/3D" : "TA/PR"));
+
+ /* Append the TA/3D update */
+ {
+ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+ psSyncData->ui32ClientUpdateValueCount++;
+ psSyncData->ui32ClientUpdateCount++;
+ SyncAddrListAppendSyncPrim(psSyncList, psFenceTimelineUpdateSync);
+
+ if (!psSyncData->pauiClientUpdateUFOAddress)
+ {
+ psSyncData->pauiClientUpdateUFOAddress = psSyncList->pasFWAddrs;
+ }
+ /* Update paui32ClientUpdateValue to point to our new list of update values */
+ psSyncData->paui32ClientUpdateValue = pui32IntAllocatedUpdateValues;
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ _DebugSyncValues(pui32IntAllocatedUpdateValues, psSyncData->ui32ClientUpdateValueCount);
+#endif
+ }
+
+ if (!bKick3D)
+ {
+ /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */
+ *pui32TimelineUpdateWOff++ = ui32FenceTimelineUpdateValue;
+ psSyncData->ui32ClientPRUpdateValueCount = 1;
+ psSyncData->ui32ClientPRUpdateCount = 1;
+ SyncAddrListAppendSyncPrim(psPRSyncList, psFenceTimelineUpdateSync);
+
+ if (!psSyncData->pauiClientPRUpdateUFOAddress)
+ {
+ psSyncData->pauiClientPRUpdateUFOAddress = psPRSyncList->pasFWAddrs;
+ }
+ /* Update paui32ClientPRUpdateValue to point to our new list of update values */
+ psSyncData->paui32ClientPRUpdateValue = &pui32IntAllocatedUpdateValues[psSyncData->ui32ClientUpdateValueCount];
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ _DebugSyncValues(psSyncData->paui32ClientPRUpdateValue, psSyncData->ui32ClientPRUpdateValueCount);
+#endif
+ }
+
+ /* Do not free the old psSyncData->ui32ClientUpdateValueCount,
+ * as it was constant data passed through the bridge down to PVRSRVRGXKickTA3DKM() */
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.h b/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.h
new file mode 100644
index 00000000000000..7ce3a893d12209
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxsyncutils.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File rgxsyncutils.h
+@Title RGX Sync Utilities
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Sync helper functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RGXSYNCUTILS_H
+#define RGXSYNCUTILS_H
+
+#include "rgxdevice.h"
+#include "sync_server.h"
+#include "rgxdebug.h"
+#include "rgx_fwif_km.h"
+
+typedef struct _RGX_SYNC_DATA_
+{
+ PRGXFWIF_UFO_ADDR *pauiClientUpdateUFOAddress;
+ IMG_UINT32 *paui32ClientUpdateValue;
+ IMG_UINT32 ui32ClientUpdateValueCount;
+ IMG_UINT32 ui32ClientUpdateCount;
+
+ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress;
+ IMG_UINT32 *paui32ClientPRUpdateValue;
+ IMG_UINT32 ui32ClientPRUpdateValueCount;
+ IMG_UINT32 ui32ClientPRUpdateCount;
+} RGX_SYNC_DATA;
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if 0 //defined(TA3D_CHECKPOINT_DEBUG)
+void _DebugSyncValues(IMG_UINT32 *pui32UpdateValues,
+ IMG_UINT32 ui32Count);
+
+void _DebugSyncCheckpoints(PSYNC_CHECKPOINT *apsSyncCheckpoints,
+ IMG_UINT32 ui32Count);
+#endif
+
+PVRSRV_ERROR RGXSyncAppendTimelineUpdate(IMG_UINT32 ui32FenceTimelineUpdateValue,
+ SYNC_ADDR_LIST *psSyncList,
+ SYNC_ADDR_LIST *psPRSyncList, /* FIXME -- is this required? */
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync,
+ RGX_SYNC_DATA *psSyncData,
+ IMG_BOOL bKick3D);
+
+#endif /* RGXSYNCUTILS_H */
+
+/******************************************************************************
+ End of file (rgxsyncutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxta3d.c b/drivers/gpu/drm/img-rogue/1.10/rgxta3d.c
new file mode 100644
index 00000000000000..ae62628bc98616
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxta3d.c
@@ -0,0 +1,5183 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX TA/3D routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX TA/3D routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+/* for the offsetof macro */
+#if defined(LINUX)
+#include <linux/stddef.h>
+#else
+#include <stddef.h>
+#endif
+
+#include "pdump_km.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxta3d.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "ri_server.h"
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "rgx_memallocflags.h"
+#include "rgxccb.h"
+#include "rgxhwperf.h"
+#include "rgxtimerquery.h"
+#include "rgxsyncutils.h"
+#include "htbuffer.h"
+
+#include "rgxdefs_km.h"
+#include "rgx_fwif_km.h"
+#include "physmem.h"
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "process_stats.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+#if defined(SUPPORT_PDVFS)
+#include "rgxpdvfs.h"
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#include "hash.h"
+#include "rgxworkest.h"
+
+#define HASH_CLEAN_LIMIT 6
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TA3D_UFO_DUMP 0
+
+//#define TA3D_CHECKPOINT_DEBUG
+
+#if defined(TA3D_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+static INLINE
+void _DebugSyncValues(const IMG_CHAR *pszFunction,
+ const IMG_UINT32 *pui32UpdateValues,
+ const IMG_UINT32 ui32Count)
+{
+ IMG_UINT32 i;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32UpdateValues;
+
+ for (i=0; i<ui32Count; i++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", pszFunction, i, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+}
+
+static INLINE
+void _DebugSyncCheckpoints(const IMG_CHAR *pszFunction,
+ const IMG_CHAR *pszDMName,
+ const PSYNC_CHECKPOINT *apsSyncCheckpoints,
+ const IMG_UINT32 ui32Count)
+{
+ IMG_UINT32 i;
+ for (i=0; i<ui32Count; i++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFence%sSyncCheckpoints[%d]=<%p>", pszFunction, pszDMName, i, *(apsSyncCheckpoints + i)));
+ }
+}
+
+#else
+#define CHKPT_DBG(X)
+#endif
+
+/* define the number of commands required to be set up by the CCB helper */
+/* 1 command for the TA */
+#define CCB_CMD_HELPER_NUM_TA_COMMANDS 1
+/* Up to 3 commands for the 3D (partial render fence, partial render, and render) */
+#define CCB_CMD_HELPER_NUM_3D_COMMANDS 3
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+#define WORKEST_CYCLES_PREDICTION_GET(x) ((x).ui64CyclesPrediction)
+#else
+#define WORKEST_CYCLES_PREDICTION_GET(x) (NO_CYCEST)
+#endif
+
+typedef struct _DEVMEM_REF_LOOKUP_
+{
+ IMG_UINT32 ui32ZSBufferID;
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+} DEVMEM_REF_LOOKUP;
+
+typedef struct _DEVMEM_FREELIST_LOOKUP_
+{
+ IMG_UINT32 ui32FreeListID;
+ RGX_FREELIST *psFreeList;
+} DEVMEM_FREELIST_LOOKUP;
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_RC_TA_DATA;
+
+typedef struct {
+ DEVMEM_MEMDESC *psContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+} RGX_SERVER_RC_3D_DATA;
+
+struct _RGX_SERVER_RENDER_CONTEXT_ {
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ /* this lock protects usage of the render context.
+ * it ensures only one kick is being prepared and/or submitted on
+ * this render context at any time
+ */
+ POS_LOCK hLock;
+ RGX_CCB_CMD_HELPER_DATA asTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+ RGX_CCB_CMD_HELPER_DATA as3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWRenderContextMemDesc;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ RGX_SERVER_RC_TA_DATA sTAData;
+ RGX_SERVER_RC_3D_DATA s3DData;
+ IMG_UINT32 ui32CleanupStatus;
+#define RC_CLEANUP_TA_COMPLETE (1 << 0)
+#define RC_CLEANUP_3D_COMPLETE (1 << 1)
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListTAFence;
+ SYNC_ADDR_LIST sSyncAddrListTAUpdate;
+ SYNC_ADDR_LIST sSyncAddrList3DFence;
+ SYNC_ADDR_LIST sSyncAddrList3DUpdate;
+ ATOMIC_T hIntJobRef;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WORKEST_HOST_DATA sWorkEstData;
+#endif
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+};
+
+
+/*
+ Static functions used by render context code
+ */
+
+static
+PVRSRV_ERROR _DestroyTAContext(RGX_SERVER_RC_TA_DATA *psTAData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ psTAData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_TA,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+#if defined(DEBUG)
+ /* Log the number of TA context stores which occurred */
+ {
+ RGXFWIF_TACTX_STATE *psFWTAState;
+
+ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+ (void**)&psFWTAState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+ __FUNCTION__, eError));
+ }
+ else
+ {
+ /* Release the CPU virt addr */
+ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+ }
+ }
+#endif
+ FWCommonContextFree(psTAData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, psTAData->psContextStateMemDesc);
+ psTAData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static
+PVRSRV_ERROR _Destroy3DContext(RGX_SERVER_RC_3D_DATA *ps3DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps3DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_3D,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+#if defined(DEBUG)
+ /* Log the number of 3D context stores which occurred */
+ {
+ RGXFWIF_3DCTX_STATE *psFW3DState;
+
+ eError = DevmemAcquireCpuVirtAddr(ps3DData->psContextStateMemDesc,
+ (void**)&psFW3DState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"%s: Failed to map firmware render context state (%u)",
+ __FUNCTION__, eError));
+ }
+ else
+ {
+ /* Release the CPU virt addr */
+ DevmemReleaseCpuVirtAddr(ps3DData->psContextStateMemDesc);
+ }
+ }
+#endif
+
+ FWCommonContextFree(ps3DData->psServerCommonContext);
+ DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psContextStateMemDesc);
+ ps3DData->psServerCommonContext = NULL;
+ return PVRSRV_OK;
+}
+
+static void _RGXDumpPMRPageList(DLLIST_NODE *psNode)
+{
+ RGX_PMR_NODE *psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ PVRSRV_ERROR eError;
+
+ eError = PMRDumpPageList(psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Error (%u) printing pmr %p", eError, psPMRNode->psPMR));
+ }
+}
+
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ PVR_LOG(("Freelist FWAddr 0x%08x, ID = %d, CheckSum 0x%016" IMG_UINT64_FMTSPECx,
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ psFreeList->ui32FreelistID,
+ psFreeList->ui64FreelistChecksum));
+
+ /* Dump Init FreeList page list */
+ PVR_LOG((" Initial Memory block"));
+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+ {
+ _RGXDumpPMRPageList(psNode);
+ }
+
+ /* Dump Grow FreeList page list */
+ PVR_LOG((" Grow Memory blocks"));
+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+ {
+ _RGXDumpPMRPageList(psNode);
+ }
+
+ return IMG_TRUE;
+}
+
+static void _CheckFreelist(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumOfPagesToCheck,
+ IMG_UINT64 ui64ExpectedCheckSum,
+ IMG_UINT64 *pui64CalculatedCheckSum)
+{
+#if defined(NO_HARDWARE)
+ /* No checksum needed as we have all information in the pdumps */
+ PVR_UNREFERENCED_PARAMETER(psFreeList);
+ PVR_UNREFERENCED_PARAMETER(ui32NumOfPagesToCheck);
+ PVR_UNREFERENCED_PARAMETER(ui64ExpectedCheckSum);
+ *pui64CalculatedCheckSum = 0;
+#else
+ PVRSRV_ERROR eError;
+ size_t uiNumBytes;
+ IMG_UINT8* pui8Buffer;
+ IMG_UINT32* pui32Buffer;
+ IMG_UINT32 ui32CheckSumAdd = 0;
+ IMG_UINT32 ui32CheckSumXor = 0;
+ IMG_UINT32 ui32Entry;
+ IMG_UINT32 ui32Entry2;
+ IMG_BOOL bFreelistBad = IMG_FALSE;
+
+ *pui64CalculatedCheckSum = 0;
+
+ PVR_ASSERT(ui32NumOfPagesToCheck <= (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+ /* Allocate Buffer of the size of the freelist */
+ pui8Buffer = OSAllocMem(ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+ if (pui8Buffer == NULL)
+ {
+ PVR_LOG(("_CheckFreelist: Failed to allocate buffer to check freelist %p!", psFreeList));
+ PVR_ASSERT(0);
+ return;
+ }
+
+ /* Copy freelist content into Buffer */
+ eError = PMR_ReadBytes(psFreeList->psFreeListPMR,
+ psFreeList->uiFreeListPMROffset +
+ (((psFreeList->ui32MaxFLPages -
+ psFreeList->ui32CurrentFLPages -
+ psFreeList->ui32ReadyFLPages) * sizeof(IMG_UINT32)) &
+ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1)),
+ pui8Buffer,
+ ui32NumOfPagesToCheck * sizeof(IMG_UINT32),
+ &uiNumBytes);
+ if (eError != PVRSRV_OK)
+ {
+ OSFreeMem(pui8Buffer);
+ PVR_LOG(("_CheckFreelist: Failed to get freelist data for freelist %p!", psFreeList));
+ PVR_ASSERT(0);
+ return;
+ }
+
+ PVR_ASSERT(uiNumBytes == ui32NumOfPagesToCheck * sizeof(IMG_UINT32));
+
+ /* Generate checksum (skipping the first page if not allocated) */
+ pui32Buffer = (IMG_UINT32 *)pui8Buffer;
+ ui32Entry = ((psFreeList->ui32GrowFLPages == 0 && psFreeList->ui32CurrentFLPages > 1) ? 1 : 0);
+ for(/*ui32Entry*/ ; ui32Entry < ui32NumOfPagesToCheck; ui32Entry++)
+ {
+ ui32CheckSumAdd += pui32Buffer[ui32Entry];
+ ui32CheckSumXor ^= pui32Buffer[ui32Entry];
+
+ /* Check for double entries */
+ for (ui32Entry2 = ui32Entry+1; ui32Entry2 < ui32NumOfPagesToCheck; ui32Entry2++)
+ {
+ if (pui32Buffer[ui32Entry] == pui32Buffer[ui32Entry2])
+ {
+ PVR_LOG(("_CheckFreelist: Freelist consistency failure: FW addr: 0x%08X, Double entry found 0x%08x on idx: %d and %d of %d",
+ psFreeList->sFreeListFWDevVAddr.ui32Addr,
+ pui32Buffer[ui32Entry2],
+ ui32Entry,
+ ui32Entry2,
+ psFreeList->ui32CurrentFLPages));
+ bFreelistBad = IMG_TRUE;
+ break;
+ }
+ }
+ }
+
+ OSFreeMem(pui8Buffer);
+
+ /* Check the calculated checksum against the expected checksum... */
+ *pui64CalculatedCheckSum = ((IMG_UINT64)ui32CheckSumXor << 32) | ui32CheckSumAdd;
+
+ if (ui64ExpectedCheckSum != 0 && ui64ExpectedCheckSum != *pui64CalculatedCheckSum)
+ {
+ PVR_LOG(("_CheckFreelist: Checksum mismatch for freelist %p! Expected 0x%016" IMG_UINT64_FMTSPECx " calculated 0x%016" IMG_UINT64_FMTSPECx,
+ psFreeList, ui64ExpectedCheckSum, *pui64CalculatedCheckSum));
+ bFreelistBad = IMG_TRUE;
+ }
+
+ if (bFreelistBad)
+ {
+ PVR_LOG(("_CheckFreelist: Sleeping for ever!"));
+ PVR_ASSERT(!bFreelistBad);
+ }
+#endif
+}
+
+
+/*
+ * Function to work out the number of freelist pages to reserve for growing
+ * within the FW without having to wait for the host to progress a grow
+ * request.
+ *
+ * The number of pages must be a multiple of 4 to align the PM addresses
+ * for the initial freelist allocation and also be less than the grow size.
+ *
+ * If the threshold or grow size means less than 4 pages, then the feature
+ * is not used.
+ */
+static IMG_UINT32 _CalculateFreelistReadyPages(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32FLPages)
+{
+ IMG_UINT32 ui32ReadyFLPages = ((ui32FLPages * psFreeList->ui32GrowThreshold) / 100) &
+ ~((RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE/sizeof(IMG_UINT32))-1);
+
+ if (ui32ReadyFLPages > psFreeList->ui32GrowFLPages)
+ {
+ ui32ReadyFLPages = psFreeList->ui32GrowFLPages;
+ }
+
+ return ui32ReadyFLPages;
+}
+
+
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages,
+ PDLLIST_NODE pListHeader,
+ IMG_BOOL bForCreate)
+{
+ RGX_PMR_NODE *psPMRNode;
+ IMG_DEVMEM_SIZE_T uiSize;
+ IMG_UINT32 ui32MappingTable = 0;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_DEVMEM_SIZE_T uistartPage;
+ PVRSRV_ERROR eError;
+ const IMG_CHAR * pszAllocName = "Free List";
+
+ /* Are we allowed to grow ? */
+ if (psFreeList->ui32MaxFLPages - (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) < ui32NumPages)
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: grow by %u pages denied. Max PB size reached (current pages %u+%u/%u)",
+ psFreeList,
+ ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32ReadyFLPages,
+ psFreeList->ui32MaxFLPages));
+ return PVRSRV_ERROR_PBSIZE_ALREADY_MAX;
+ }
+
+ /* Allocate kernel memory block structure */
+ psPMRNode = OSAllocMem(sizeof(*psPMRNode));
+ if (psPMRNode == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXGrowFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages
+ */
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+
+ /*
+ * The PM never takes the last page in a freelist, so if this block
+ * of pages is the first one and there is no ability to grow, then
+ * we can skip allocating one 4K page for the lowest entry.
+ */
+ if (OSGetPageSize() > RGX_BIF_PM_PHYSICAL_PAGE_SIZE)
+ {
+ /*
+ * Allocation size will be rounded up to the OS page size,
+ * any attempt to change it a bit now will be invalidated later.
+ */
+ psPMRNode->bFirstPageMissing = IMG_FALSE;
+ }
+ else
+ {
+ psPMRNode->bFirstPageMissing = (psFreeList->ui32GrowFLPages == 0 && ui32NumPages > 1);
+ }
+
+ psPMRNode->ui32NumPages = ui32NumPages;
+ psPMRNode->psFreeList = psFreeList;
+
+ /* Allocate Memory Block */
+ PDUMPCOMMENT("Allocate PB Block (Pages %08X)", ui32NumPages);
+ uiSize = (IMG_DEVMEM_SIZE_T)ui32NumPages * RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+ if (psPMRNode->bFirstPageMissing)
+ {
+ uiSize -= RGX_BIF_PM_PHYSICAL_PAGE_SIZE;
+ }
+ eError = PhysmemNewRamBackedPMR(NULL,
+ psFreeList->psDevInfo->psDeviceNode,
+ uiSize,
+ uiSize,
+ 1,
+ 1,
+ &ui32MappingTable,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE,
+ OSStringLength(pszAllocName) + 1,
+ pszAllocName,
+ psFreeList->ownerPid,
+ &psPMRNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to allocate PB block of size: 0x%016" IMG_UINT64_FMTSPECX,
+ (IMG_UINT64)uiSize));
+ goto ErrorBlockAlloc;
+ }
+
+ /* Zeroing physical pages pointed by the PMR */
+ if (psFreeList->psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+ {
+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXGrowFreeList: Failed to zero PMR %p of freelist %p with Error %d",
+ psPMRNode->psPMR,
+ psFreeList,
+ eError));
+ PVR_ASSERT(0);
+ }
+ }
+
+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+ uistartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+ uiOffset = psFreeList->uiFreeListPMROffset + ((uistartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+#if defined(PVR_RI_DEBUG)
+
+ eError = RIWritePMREntryWithOwnerKM(psPMRNode->psPMR,
+ psFreeList->ownerPid);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: call to RIWritePMREntryWithOwnerKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+
+ /* Attach RI information */
+ eError = RIWriteMEMDESCEntryKM(psPMRNode->psPMR,
+ OSStringNLength(pszAllocName, DEVMEM_ANNOTATION_MAX_LEN),
+ pszAllocName,
+ 0,
+ uiSize,
+ IMG_FALSE,
+ IMG_FALSE,
+ &psPMRNode->hRIHandle);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: call to RIWriteMEMDESCEntryKM failed (eError=%d)",
+ __func__,
+ eError));
+ }
+
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ /* write Freelist with Memory Block physical addresses */
+ eError = PMRWritePMPageList(
+ /* Target PMR, offset, and length */
+ psFreeList->psFreeListPMR,
+ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+ /* Referenced PMR, and "page" granularity */
+ psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ &psPMRNode->psPageList);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to write pages of Node %p",
+ psPMRNode));
+ goto ErrorPopulateFreelist;
+ }
+
+#if defined(SUPPORT_SHADOW_FREELISTS)
+ /* Copy freelist memory to shadow freelist */
+ {
+ const IMG_UINT32 ui32FLMaxSize = psFreeList->ui32MaxFLPages * sizeof (IMG_UINT32);
+ const IMG_UINT32 ui32MapSize = ui32FLMaxSize * 2;
+ const IMG_UINT32 ui32CopyOffset = uiOffset - psFreeList->uiFreeListPMROffset;
+ IMG_BYTE *pFLMapAddr;
+ size_t uiNumBytes;
+ PVRSRV_ERROR res;
+ IMG_HANDLE hMapHandle;
+
+ /* Map both the FL and the shadow FL */
+ res = PMRAcquireKernelMappingData(psFreeList->psFreeListPMR, psFreeList->uiFreeListPMROffset, ui32MapSize,
+ (void**) &pFLMapAddr, &uiNumBytes, &hMapHandle);
+ if (res != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to map freelist (ID=%d)",
+ psFreeList->ui32FreelistID));
+ goto ErrorPopulateFreelist;
+ }
+
+ /* Copy only the newly added memory */
+ memcpy(pFLMapAddr + ui32FLMaxSize + ui32CopyOffset, pFLMapAddr + ui32CopyOffset , uiLength);
+
+#if defined(PDUMP)
+ PDUMPCOMMENT("Initialize shadow freelist");
+
+ /* Translate memcpy to pdump */
+ {
+ IMG_DEVMEM_OFFSET_T uiCurrOffset;
+
+ for (uiCurrOffset = uiOffset; (uiCurrOffset - uiOffset) < uiLength; uiCurrOffset += sizeof (IMG_UINT32))
+ {
+ PMRPDumpCopyMem32(psFreeList->psFreeListPMR,
+ uiCurrOffset + ui32FLMaxSize,
+ psFreeList->psFreeListPMR,
+ uiCurrOffset,
+ ":SYSMEM:$1",
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+ }
+#endif
+
+
+ res = PMRReleaseKernelMappingData(psFreeList->psFreeListPMR, hMapHandle);
+
+ if (res != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXGrowFreeList: Failed to release freelist mapping (ID=%d)",
+ psFreeList->ui32FreelistID));
+ goto ErrorPopulateFreelist;
+ }
+ }
+#endif
+
+ /* We add It must be added to the tail, otherwise the freelist population won't work */
+ dllist_add_to_head(pListHeader, &psPMRNode->sMemoryBlock);
+
+ /* Update number of available pages */
+ psFreeList->ui32CurrentFLPages += ui32NumPages;
+
+ /* Reserve a number ready pages to allow the FW to process OOM quickly and asynchronously request a grow. */
+ psFreeList->ui32ReadyFLPages = _CalculateFreelistReadyPages(psFreeList, psFreeList->ui32CurrentFLPages);
+ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+
+ /* Update statistics */
+ if (psFreeList->ui32NumHighPages < psFreeList->ui32CurrentFLPages)
+ {
+ psFreeList->ui32NumHighPages = psFreeList->ui32CurrentFLPages;
+ }
+
+ if (psFreeList->bCheckFreelist)
+ {
+ /*
+ * We can only calculate the freelist checksum when the list is full
+ * (e.g. at initial creation time). At other times the checksum cannot
+ * be calculated and has to be disabled for this freelist.
+ */
+ if ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages)
+ {
+ _CheckFreelist(psFreeList, ui32NumPages, 0, &psFreeList->ui64FreelistChecksum);
+ }
+ else
+ {
+ psFreeList->ui64FreelistChecksum = 0;
+ }
+ }
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Freelist [%p]: %s %u pages (pages=%u+%u/%u checksum=0x%016" IMG_UINT64_FMTSPECx "%s)",
+ psFreeList,
+ ((psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages) == ui32NumPages ? "Create initial" : "Grow by"),
+ ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32ReadyFLPages,
+ psFreeList->ui32MaxFLPages,
+ psFreeList->ui64FreelistChecksum,
+ (psPMRNode->bFirstPageMissing ? " - lowest page not allocated" : "")));
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+ ErrorPopulateFreelist:
+ PMRUnrefPMR(psPMRNode->psPMR);
+
+ ErrorBlockAlloc:
+ OSFreeMem(psPMRNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+
+}
+
+static PVRSRV_ERROR RGXShrinkFreeList(PDLLIST_NODE pListHeader,
+ RGX_FREELIST *psFreeList)
+{
+ DLLIST_NODE *psNode;
+ RGX_PMR_NODE *psPMRNode;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32OldValue;
+
+ /*
+ * Lock protects simultaneous manipulation of:
+ * - the memory block list
+ * - the freelist's ui32CurrentFLPages value
+ */
+ PVR_ASSERT(pListHeader);
+ PVR_ASSERT(psFreeList);
+ PVR_ASSERT(psFreeList->psDevInfo);
+ PVR_ASSERT(psFreeList->psDevInfo->hLockFreeList);
+
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+
+ /* Get node from head of list and remove it */
+ psNode = dllist_get_next_node(pListHeader);
+ if (psNode)
+ {
+ dllist_remove_node(psNode);
+
+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ PVR_ASSERT(psPMRNode);
+ PVR_ASSERT(psPMRNode->psPMR);
+ PVR_ASSERT(psPMRNode->psFreeList);
+
+ /* remove block from freelist list */
+
+ /* Unwrite Freelist with Memory Block physical addresses */
+ eError = PMRUnwritePMPageList(psPMRNode->psPageList);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXRemoveBlockFromFreeListKM: Failed to unwrite pages of Node %p",
+ psPMRNode));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+#if defined(PVR_RI_DEBUG)
+
+ if (psPMRNode->hRIHandle)
+ {
+ PVRSRV_ERROR eError;
+
+ eError = RIDeleteMEMDESCEntryKM(psPMRNode->hRIHandle);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: call to RIDeleteMEMDESCEntryKM failed (eError=%d)", __func__, eError));
+ }
+ }
+
+#endif /* if defined(PVR_RI_DEBUG) */
+
+ /* Free PMR (We should be the only one that holds a ref on the PMR) */
+ eError = PMRUnrefPMR(psPMRNode->psPMR);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXRemoveBlockFromFreeListKM: Failed to free PB block %p (error %u)",
+ psPMRNode->psPMR,
+ eError));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* update available pages in freelist */
+ ui32OldValue = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+
+ /*
+ * Deallocated pages should first be deducted from ReadyPages bank, once
+ * there are no more left, start deducting them from CurrentPage bank.
+ */
+ if (psPMRNode->ui32NumPages > psFreeList->ui32ReadyFLPages)
+ {
+ psFreeList->ui32CurrentFLPages -= psPMRNode->ui32NumPages - psFreeList->ui32ReadyFLPages;
+ psFreeList->ui32ReadyFLPages = 0;
+ }
+ else
+ {
+ psFreeList->ui32ReadyFLPages -= psPMRNode->ui32NumPages;
+ }
+
+ /* check underflow */
+ PVR_ASSERT(ui32OldValue > (psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages));
+
+ PVR_DPF((PVR_DBG_MESSAGE, "Freelist [%p]: shrink by %u pages (current pages %u/%u)",
+ psFreeList,
+ psPMRNode->ui32NumPages,
+ psFreeList->ui32CurrentFLPages,
+ psFreeList->ui32MaxFLPages));
+
+ OSFreeMem(psPMRNode);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,"Freelist [0x%p]: shrink denied. PB already at initial PB size (%u pages)",
+ psFreeList,
+ psFreeList->ui32InitFLPages));
+ eError = PVRSRV_ERROR_PBSIZE_ALREADY_MIN;
+ }
+
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ return eError;
+}
+
+static RGX_FREELIST *FindFreeList(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32FreelistID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_FREELIST *psFreeList = NULL;
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psThisFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+
+ if (psThisFreeList->ui32FreelistID == ui32FreelistID)
+ {
+ psFreeList = psThisFreeList;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hLockFreeList);
+ return psFreeList;
+}
+
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID)
+{
+ RGX_FREELIST *psFreeList = NULL;
+ RGXFWIF_KCCB_CMD s3DCCBCmd;
+ IMG_UINT32 ui32GrowValue;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ psFreeList = FindFreeList(psDevInfo, ui32FreelistID);
+
+ if (psFreeList)
+ {
+ /* Since the FW made the request, it has already consumed the ready pages, update the host struct */
+ psFreeList->ui32CurrentFLPages += psFreeList->ui32ReadyFLPages;
+ psFreeList->ui32ReadyFLPages = 0;
+
+ /* Try to grow the freelist */
+ eError = RGXGrowFreeList(psFreeList,
+ psFreeList->ui32GrowFLPages,
+ &psFreeList->sMemoryBlockHead,
+ IMG_FALSE);
+
+ if (eError == PVRSRV_OK)
+ {
+ /* Grow successful, return size of grow size */
+ ui32GrowValue = psFreeList->ui32GrowFLPages;
+
+ psFreeList->ui32NumGrowReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(0,
+ 1, /* Add 1 to the appropriate counter (Requests by FW) */
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+
+ }
+ else
+ {
+ /* Grow failed */
+ ui32GrowValue = 0;
+ PVR_DPF((PVR_DBG_ERROR,"Grow for FreeList %p failed (error %u)",
+ psFreeList,
+ eError));
+ }
+
+ /* send feedback */
+ s3DCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELIST_GROW_UPDATE;
+ s3DCCBCmd.uCmdData.sFreeListGSData.sFreeListFWDevVAddr.ui32Addr = psFreeList->sFreeListFWDevVAddr.ui32Addr;
+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32DeltaPages = ui32GrowValue;
+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32NewPages = psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages;
+ s3DCCBCmd.uCmdData.sFreeListGSData.ui32ReadyPages = psFreeList->ui32ReadyFLPages;
+
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_3D,
+ &s3DCCBCmd,
+ sizeof(s3DCCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ else
+ {
+ /* Should never happen */
+ PVR_DPF((PVR_DBG_ERROR,"FreeList Lookup for FreeList ID 0x%08x failed (Populate)", ui32FreelistID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+}
+
+static void _RGXFreeListReconstruction(PDLLIST_NODE psNode)
+{
+
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGX_FREELIST *psFreeList;
+ RGX_PMR_NODE *psPMRNode;
+ PVRSRV_ERROR eError;
+ IMG_DEVMEM_OFFSET_T uiOffset;
+ IMG_DEVMEM_SIZE_T uiLength;
+ IMG_UINT32 ui32StartPage;
+
+ psPMRNode = IMG_CONTAINER_OF(psNode, RGX_PMR_NODE, sMemoryBlock);
+ psFreeList = psPMRNode->psFreeList;
+ PVR_ASSERT(psFreeList);
+ psDevInfo = psFreeList->psDevInfo;
+ PVR_ASSERT(psDevInfo);
+
+ uiLength = psPMRNode->ui32NumPages * sizeof(IMG_UINT32);
+ ui32StartPage = (psFreeList->ui32MaxFLPages - psFreeList->ui32CurrentFLPages - psPMRNode->ui32NumPages);
+ uiOffset = psFreeList->uiFreeListPMROffset + ((ui32StartPage * sizeof(IMG_UINT32)) & ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1));
+
+ PMRUnwritePMPageList(psPMRNode->psPageList);
+ psPMRNode->psPageList = NULL;
+ eError = PMRWritePMPageList(
+ /* Target PMR, offset, and length */
+ psFreeList->psFreeListPMR,
+ (psPMRNode->bFirstPageMissing ? uiOffset + sizeof(IMG_UINT32) : uiOffset),
+ (psPMRNode->bFirstPageMissing ? uiLength - sizeof(IMG_UINT32) : uiLength),
+ /* Referenced PMR, and "page" granularity */
+ psPMRNode->psPMR,
+ RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT,
+ &psPMRNode->psPageList);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Error (%u) writing FL 0x%08x", eError, (IMG_UINT32)psFreeList->ui32FreelistID));
+ }
+
+ /* Zeroing physical pages pointed by the reconstructed freelist */
+ if (psDevInfo->ui32DeviceFlags & RGXKM_DEVICE_STATE_ZERO_FREELIST)
+ {
+ eError = PMRZeroingPMR(psPMRNode->psPMR, RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXFreeListReconstruction: Failed to zero PMR %p of freelist %p with Error %d",
+ psPMRNode->psPMR,
+ psFreeList,
+ eError));
+ PVR_ASSERT(0);
+ }
+ }
+
+
+ psFreeList->ui32CurrentFLPages += psPMRNode->ui32NumPages;
+}
+
+
+static PVRSRV_ERROR RGXReconstructFreeList(RGX_FREELIST *psFreeList)
+{
+ IMG_UINT32 ui32OriginalFLPages;
+ DLLIST_NODE *psNode, *psNext;
+ RGXFWIF_FREELIST *psFWFreeList;
+ PVRSRV_ERROR eError;
+
+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: Reconstructing freelist %p (ID=%u)", psFreeList, psFreeList->ui32FreelistID));
+
+ /* Do the FreeList Reconstruction */
+ ui32OriginalFLPages = psFreeList->ui32CurrentFLPages;
+ psFreeList->ui32CurrentFLPages = 0;
+
+ /* Reconstructing Init FreeList pages */
+ dllist_foreach_node(&psFreeList->sMemoryBlockInitHead, psNode, psNext)
+ {
+ _RGXFreeListReconstruction(psNode);
+ }
+
+ /* Reconstructing Grow FreeList pages */
+ dllist_foreach_node(&psFreeList->sMemoryBlockHead, psNode, psNext)
+ {
+ _RGXFreeListReconstruction(psNode);
+ }
+
+ /* Ready pages are allocated but kept hidden until OOM occurs. */
+ psFreeList->ui32CurrentFLPages -= psFreeList->ui32ReadyFLPages;
+ if (psFreeList->ui32CurrentFLPages != ui32OriginalFLPages)
+ {
+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == ui32OriginalFLPages);
+ return PVRSRV_ERROR_FREELIST_RECONSTRUCTION_FAILED;
+ }
+
+ /* Reset the firmware freelist structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+ psFWFreeList->ui32AllocatedPageCount = 0;
+ psFWFreeList->ui32AllocatedMMUPageCount = 0;
+ psFWFreeList->ui32HWRCounter++;
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ /* Check the Freelist checksum if required (as the list is fully populated) */
+ if (psFreeList->bCheckFreelist)
+ {
+ IMG_UINT64 ui64CheckSum;
+
+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+ }
+
+ return eError;
+}
+
+
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistsCount,
+ IMG_UINT32 *paui32Freelists)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32Loop;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+
+ PVR_ASSERT(psDevInfo != NULL);
+ PVR_ASSERT(ui32FreelistsCount <= (MAX_HW_TA3DCONTEXTS * RGXFW_MAX_FREELISTS));
+
+ //PVR_DPF((PVR_DBG_ERROR, "FreeList RECONSTRUCTION: %u freelist(s) requested for reconstruction", ui32FreelistsCount));
+
+ /*
+ * Initialise the response command (in case we don't find a freelist ID)...
+ */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_FREELISTS_RECONSTRUCTION_UPDATE;
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.ui32FreelistsCount = ui32FreelistsCount;
+
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] = paui32Freelists[ui32Loop] |
+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+ }
+
+ /*
+ * The list of freelists we have been given for reconstruction will
+ * consist of local and global freelists (maybe MMU as well). Any
+ * local freelists will have their global list specified as well.
+ * However there may be other local freelists not listed, which are
+ * going to have their global freelist reconstructed. Therefore we
+ * have to find those freelists as well meaning we will have to
+ * iterate the entire list of freelists to find which must be reconstructed.
+ */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_foreach_node(&psDevInfo->sFreeListHead, psNode, psNext)
+ {
+ RGX_FREELIST *psFreeList = IMG_CONTAINER_OF(psNode, RGX_FREELIST, sNode);
+ IMG_BOOL bReconstruct = IMG_FALSE;
+
+ /*
+ * Check if this freelist needs to be reconstructed (was it requested
+ * or was its global freelist requested)...
+ */
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID ||
+ paui32Freelists[ui32Loop] == psFreeList->ui32FreelistGlobalID)
+ {
+ bReconstruct = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (bReconstruct)
+ {
+ eError = RGXReconstructFreeList(psFreeList);
+ if (eError == PVRSRV_OK)
+ {
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ if (paui32Freelists[ui32Loop] == psFreeList->ui32FreelistID)
+ {
+ /* Reconstruction of this requested freelist was successful... */
+ sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &= ~RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG;
+ break;
+ }
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Reconstructing of FreeList %p failed (error %u)",
+ psFreeList,
+ eError));
+ }
+ }
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ /* Check that all freelists were found and reconstructed... */
+ for (ui32Loop = 0; ui32Loop < ui32FreelistsCount; ui32Loop++)
+ {
+ PVR_ASSERT((sTACCBCmd.uCmdData.sFreeListsReconstructionData.aui32FreelistIDs[ui32Loop] &
+ RGXFWIF_FREELISTS_RECONSTRUCTION_FAILED_FLAG) == 0);
+ }
+
+ /* send feedback */
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+/* Create HWRTDataSet */
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 psRenderTarget, /* FIXME this should not be IMG_UINT32 */
+ IMG_DEV_VIRTADDR psPMMListDevVAddr,
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS],
+ RGX_RTDATA_CLEANUP_DATA **ppsCleanupData,
+ DEVMEM_MEMDESC **ppsRTACtlMemDesc,
+ IMG_UINT32 ui32PPPScreen,
+ IMG_UINT32 ui32PPPGridOffset,
+ IMG_UINT64 ui64PPPMultiSampleCtl,
+ IMG_UINT32 ui32TPCStride,
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr,
+ IMG_UINT32 ui32TPCSize,
+ IMG_UINT32 ui32TEScreen,
+ IMG_UINT32 ui32TEAA,
+ IMG_UINT32 ui32TEMTILE1,
+ IMG_UINT32 ui32TEMTILE2,
+ IMG_UINT32 ui32MTileStride,
+ IMG_UINT32 ui32ISPMergeLowerX,
+ IMG_UINT32 ui32ISPMergeLowerY,
+ IMG_UINT32 ui32ISPMergeUpperX,
+ IMG_UINT32 ui32ISPMergeUpperY,
+ IMG_UINT32 ui32ISPMergeScaleX,
+ IMG_UINT32 ui32ISPMergeScaleY,
+ IMG_UINT16 ui16MaxRTs,
+ DEVMEM_MEMDESC **ppsMemDesc,
+ IMG_UINT32 *puiHWRTData)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ RGXFWIF_HWRTDATA *psHWRTData;
+ RGXFWIF_RTA_CTL *psRTACtl;
+ IMG_UINT32 ui32Loop;
+ RGX_RTDATA_CLEANUP_DATA *psTmpCleanup;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Prepare cleanup struct */
+ psTmpCleanup = OSAllocZMem(sizeof(*psTmpCleanup));
+ if (psTmpCleanup == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto AllocError;
+ }
+
+ *ppsCleanupData = psTmpCleanup;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTmpCleanup->psCleanupSync,
+ "HWRTData cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto SyncAlloc;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ /*
+ * This FW RT-Data is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_HWRTDATA),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwHWRTData",
+ ppsMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateHWRTData: DevmemAllocate for RGX_FWIF_HWRTDATA failed"));
+ goto FWRTDataAllocateError;
+ }
+
+ psTmpCleanup->psDeviceNode = psDeviceNode;
+ psTmpCleanup->psFWHWRTDataMemDesc = *ppsMemDesc;
+
+ RGXSetFirmwareAddress(&pFirmwareAddr, *ppsMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+ *puiHWRTData = pFirmwareAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsMemDesc, (void **)&psHWRTData);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTDataCpuMapError);
+
+ /* FIXME: MList is something that that PM writes physical addresses to,
+ * so ideally its best allocated in kernel */
+ psHWRTData->psPMMListDevVAddr = psPMMListDevVAddr;
+ psHWRTData->psParentRenderTarget.ui32Addr = psRenderTarget;
+
+ psHWRTData->ui32PPPScreen = ui32PPPScreen;
+ psHWRTData->ui32PPPGridOffset = ui32PPPGridOffset;
+ psHWRTData->ui64PPPMultiSampleCtl = ui64PPPMultiSampleCtl;
+ psHWRTData->ui32TPCStride = ui32TPCStride;
+ psHWRTData->sTailPtrsDevVAddr = sTailPtrsDevVAddr;
+ psHWRTData->ui32TPCSize = ui32TPCSize;
+ psHWRTData->ui32TEScreen = ui32TEScreen;
+ psHWRTData->ui32TEAA = ui32TEAA;
+ psHWRTData->ui32TEMTILE1 = ui32TEMTILE1;
+ psHWRTData->ui32TEMTILE2 = ui32TEMTILE2;
+ psHWRTData->ui32MTileStride = ui32MTileStride;
+ psHWRTData->ui32ISPMergeLowerX = ui32ISPMergeLowerX;
+ psHWRTData->ui32ISPMergeLowerY = ui32ISPMergeLowerY;
+ psHWRTData->ui32ISPMergeUpperX = ui32ISPMergeUpperX;
+ psHWRTData->ui32ISPMergeUpperY = ui32ISPMergeUpperY;
+ psHWRTData->ui32ISPMergeScaleX = ui32ISPMergeScaleX;
+ psHWRTData->ui32ISPMergeScaleY = ui32ISPMergeScaleY;
+
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ psTmpCleanup->apsFreeLists[ui32Loop] = apsFreeLists[ui32Loop];
+ psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount++;
+ psHWRTData->apsFreeLists[ui32Loop].ui32Addr = psTmpCleanup->apsFreeLists[ui32Loop]->sFreeListFWDevVAddr.ui32Addr;
+ /* invalid initial snapshot value, the snapshot is always taken during first kick
+ * and hence the value get replaced during the first kick anyway. So it's safe to set it 0.
+ */
+ psHWRTData->aui32FreeListHWRSnapshot[ui32Loop] = 0;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ PDUMPCOMMENT("Allocate RGXFW RTA control");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_RTA_CTL),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwRTAControl",
+ ppsRTACtlMemDesc);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate RGX RTA control (%u)",
+ eError));
+ goto FWRTAAllocateError;
+ }
+ psTmpCleanup->psRTACtlMemDesc = *ppsRTACtlMemDesc;
+ RGXSetFirmwareAddress(&psHWRTData->psRTACtl,
+ *ppsRTACtlMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ eError = DevmemAcquireCpuVirtAddr(*ppsRTACtlMemDesc, (void **)&psRTACtl);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWRTACpuMapError);
+ psRTACtl->ui32RenderTargetIndex = 0;
+ psRTACtl->ui32ActiveRenderTargets = 0;
+
+ if (ui16MaxRTs > 1)
+ {
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for shadow render target cache");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui16MaxRTs * sizeof(IMG_UINT32),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED|
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwShadowRTCache",
+ &psTmpCleanup->psRTArrayMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u)",
+ ui16MaxRTs, eError));
+ goto FWAllocateRTArryError;
+ }
+
+ RGXSetFirmwareAddress(&psRTACtl->sValidRenderTargets,
+ psTmpCleanup->psRTArrayMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Allocate memory for the checks */
+ PDUMPCOMMENT("Allocate memory for tracking renders accumulation");
+ eError = DevmemFwAllocate(psDevInfo,
+ ui16MaxRTs * sizeof(IMG_UINT32),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(FIRMWARE_CACHED) |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_UNCACHED|
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC,
+ "FwRendersAccumulation",
+ &psTmpCleanup->psRendersAccArrayMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateHWRTData: Failed to allocate %d bytes for render target array (%u) (renders accumulation)",
+ ui16MaxRTs, eError));
+ goto FWAllocateRTAccArryError;
+ }
+
+ RGXSetFirmwareAddress(&psRTACtl->sNumRenders,
+ psTmpCleanup->psRendersAccArrayMemDesc,
+ 0, RFW_FWADDR_FLAG_NONE);
+ psRTACtl->ui16MaxRTs = ui16MaxRTs;
+ }
+ else
+ {
+ psRTACtl->sValidRenderTargets.ui32Addr = 0;
+ psRTACtl->sNumRenders.ui32Addr = 0;
+ psRTACtl->ui16MaxRTs = 1;
+ }
+
+ PDUMPCOMMENT("Dump HWRTData 0x%08X", *puiHWRTData);
+ DevmemPDumpLoadMem(*ppsMemDesc, 0, sizeof(*psHWRTData), PDUMP_FLAGS_CONTINUOUS);
+ PDUMPCOMMENT("Dump RTACtl");
+ DevmemPDumpLoadMem(*ppsRTACtlMemDesc, 0, sizeof(*psRTACtl), PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+ DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+ return PVRSRV_OK;
+
+ FWAllocateRTAccArryError:
+ DevmemFwFree(psDevInfo, psTmpCleanup->psRTArrayMemDesc);
+ FWAllocateRTArryError:
+ DevmemReleaseCpuVirtAddr(*ppsRTACtlMemDesc);
+ FWRTACpuMapError:
+ RGXUnsetFirmwareAddress(*ppsRTACtlMemDesc);
+ DevmemFwFree(psDevInfo, *ppsRTACtlMemDesc);
+ FWRTAAllocateError:
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ PVR_ASSERT(psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+ psTmpCleanup->apsFreeLists[ui32Loop]->ui32RefCount--;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+ DevmemReleaseCpuVirtAddr(*ppsMemDesc);
+ FWRTDataCpuMapError:
+ RGXUnsetFirmwareAddress(*ppsMemDesc);
+ DevmemFwFree(psDevInfo, *ppsMemDesc);
+ FWRTDataAllocateError:
+ SyncPrimFree(psTmpCleanup->psCleanupSync);
+ SyncAlloc:
+ *ppsCleanupData = NULL;
+ OSFreeMem(psTmpCleanup);
+
+ AllocError:
+ return eError;
+}
+
+/* Destroy HWRTDataSet */
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ PVRSRV_ERROR eError;
+ PRGXFWIF_HWRTDATA psHWRTData;
+ IMG_UINT32 ui32Loop;
+
+ PVR_ASSERT(psCleanupData);
+
+ RGXSetFirmwareAddress(&psHWRTData, psCleanupData->psFWHWRTDataMemDesc, 0, RFW_FWADDR_NOREF_FLAG);
+
+ /* Cleanup HWRTData in TA */
+ eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+ psHWRTData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_TA);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+
+ psDevInfo = psCleanupData->psDeviceNode->pvDevice;
+
+ /* Cleanup HWRTData in 3D */
+ eError = RGXFWRequestHWRTDataCleanUp(psCleanupData->psDeviceNode,
+ psHWRTData,
+ psCleanupData->psCleanupSync,
+ RGXFWIF_DM_3D);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+
+ /* If we got here then TA and 3D operations on this RTData have finished */
+ if (psCleanupData->psRTACtlMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRTACtlMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRTACtlMemDesc);
+ }
+
+ RGXUnsetFirmwareAddress(psCleanupData->psFWHWRTDataMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psFWHWRTDataMemDesc);
+
+ if (psCleanupData->psRTArrayMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRTArrayMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRTArrayMemDesc);
+ }
+
+ if (psCleanupData->psRendersAccArrayMemDesc)
+ {
+ RGXUnsetFirmwareAddress(psCleanupData->psRendersAccArrayMemDesc);
+ DevmemFwFree(psDevInfo, psCleanupData->psRendersAccArrayMemDesc);
+ }
+
+ SyncPrimFree(psCleanupData->psCleanupSync);
+
+ /* decrease freelist refcount */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ for (ui32Loop = 0; ui32Loop < RGXFW_MAX_FREELISTS; ui32Loop++)
+ {
+ PVR_ASSERT(psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount > 0);
+ psCleanupData->apsFreeLists[ui32Loop]->ui32RefCount--;
+ }
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ OSFreeMem(psCleanupData);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MaxFLPages,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_UINT32 ui32GrowParamThreshold,
+ RGX_FREELIST *psGlobalFreeList,
+ IMG_BOOL bCheckFreelist,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ PMR *psFreeListPMR,
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset,
+ RGX_FREELIST **ppsFreeList)
+{
+ PVRSRV_ERROR eError;
+ RGXFWIF_FREELIST *psFWFreeList;
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGX_FREELIST *psFreeList;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ if (OSGetPageShift() > RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT)
+ {
+ IMG_UINT32 ui32Size, ui32NewInitFLPages, ui32NewMaxFLPages, ui32NewGrowFLPages;
+
+ /* Round up number of FL pages to the next multiple of the OS page size */
+
+ ui32Size = ui32InitFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+ ui32NewInitFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+ ui32Size = ui32GrowFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+ ui32NewGrowFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+ ui32Size = ui32MaxFLPages << RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+ ui32Size = PVR_ALIGN(ui32Size, (IMG_DEVMEM_SIZE_T)OSGetPageSize());
+ ui32NewMaxFLPages = ui32Size >> RGX_BIF_PM_PHYSICAL_PAGE_ALIGNSHIFT;
+
+ PVR_DPF((PVR_DBG_WARNING, "%s: Increased number of PB pages: Init %u -> %u, Grow %u -> %u, Max %u -> %u",
+ __func__, ui32InitFLPages, ui32NewInitFLPages, ui32GrowFLPages, ui32NewGrowFLPages, ui32MaxFLPages, ui32NewMaxFLPages));
+
+ ui32InitFLPages = ui32NewInitFLPages;
+ ui32GrowFLPages = ui32NewGrowFLPages;
+ ui32MaxFLPages = ui32NewMaxFLPages;
+ }
+
+ /* Allocate kernel freelist struct */
+ psFreeList = OSAllocZMem(sizeof(*psFreeList));
+ if (psFreeList == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: failed to allocate host data structure"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocHost;
+ }
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psFreeList->psCleanupSync,
+ "ta3d free list cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateFreeList: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto SyncAlloc;
+ }
+
+ /*
+ * This FW FreeList context is only mapped into kernel for initialisation
+ * and reconstruction (at other times it is not mapped and only used by
+ * the FW. Therefore the GPU cache doesn't need coherency, and write-combine
+ * is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWFreeList),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwFreeList",
+ &psFWFreelistMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateFreeList: DevmemAllocate for RGXFWIF_FREELIST failed"));
+ goto FWFreeListAlloc;
+ }
+
+ /* Initialise host data structures */
+ psFreeList->psDevInfo = psDevInfo;
+ psFreeList->psFreeListPMR = psFreeListPMR;
+ psFreeList->uiFreeListPMROffset = uiFreeListPMROffset;
+ psFreeList->psFWFreelistMemDesc = psFWFreelistMemDesc;
+ RGXSetFirmwareAddress(&psFreeList->sFreeListFWDevVAddr, psFWFreelistMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ psFreeList->ui32FreelistID = psDevInfo->ui32FreelistCurrID++;
+ psFreeList->ui32FreelistGlobalID = (psGlobalFreeList ? psGlobalFreeList->ui32FreelistID : 0);
+ psFreeList->ui32MaxFLPages = ui32MaxFLPages;
+ psFreeList->ui32InitFLPages = ui32InitFLPages;
+ psFreeList->ui32GrowFLPages = ui32GrowFLPages;
+ psFreeList->ui32CurrentFLPages = 0;
+ psFreeList->ui32ReadyFLPages = 0;
+ psFreeList->ui32GrowThreshold = ui32GrowParamThreshold;
+ psFreeList->ui64FreelistChecksum = 0;
+ psFreeList->ui32RefCount = 0;
+ psFreeList->bCheckFreelist = bCheckFreelist;
+ dllist_init(&psFreeList->sMemoryBlockHead);
+ dllist_init(&psFreeList->sMemoryBlockInitHead);
+ psFreeList->ownerPid = OSGetCurrentClientProcessIDKM();
+
+
+ /* Add to list of freelists */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_add_to_tail(&psDevInfo->sFreeListHead, &psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+
+ /* Initialise FW data structure */
+ eError = DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", FWFreeListCpuMap);
+
+ {
+ const IMG_UINT32 ui32ReadyPages = _CalculateFreelistReadyPages(psFreeList, ui32InitFLPages);
+
+ psFWFreeList->ui32MaxPages = ui32MaxFLPages;
+ psFWFreeList->ui32CurrentPages = ui32InitFLPages - ui32ReadyPages;
+ psFWFreeList->ui32GrowPages = ui32GrowFLPages;
+ psFWFreeList->ui32CurrentStackTop = psFWFreeList->ui32CurrentPages - 1;
+ psFWFreeList->psFreeListDevVAddr = sFreeListDevVAddr;
+ psFWFreeList->ui64CurrentDevVAddr = (sFreeListDevVAddr.uiAddr +
+ ((ui32MaxFLPages - psFWFreeList->ui32CurrentPages) * sizeof(IMG_UINT32))) &
+ ~((IMG_UINT64)RGX_BIF_PM_FREELIST_BASE_ADDR_ALIGNSIZE-1);
+ psFWFreeList->ui32FreeListID = psFreeList->ui32FreelistID;
+ psFWFreeList->bGrowPending = IMG_FALSE;
+ psFWFreeList->ui32ReadyPages = ui32ReadyPages;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"Freelist %p created: Max pages 0x%08x, Init pages 0x%08x, Max FL base address 0x%016" IMG_UINT64_FMTSPECx ", Init FL base address 0x%016" IMG_UINT64_FMTSPECx,
+ psFreeList,
+ ui32MaxFLPages,
+ ui32InitFLPages,
+ sFreeListDevVAddr.uiAddr,
+ psFWFreeList->ui64CurrentDevVAddr));
+
+ PDUMPCOMMENT("Dump FW FreeList");
+ DevmemPDumpLoadMem(psFreeList->psFWFreelistMemDesc, 0, sizeof(*psFWFreeList), PDUMP_FLAGS_CONTINUOUS);
+
+ /*
+ * Separate dump of the Freelist's number of Pages and stack pointer.
+ * This allows to easily modify the PB size in the out2.txt files.
+ */
+ PDUMPCOMMENT("FreeList TotalPages");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_FREELIST, ui32CurrentPages),
+ psFWFreeList->ui32CurrentPages,
+ PDUMP_FLAGS_CONTINUOUS);
+ PDUMPCOMMENT("FreeList StackPointer");
+ DevmemPDumpLoadMemValue32(psFreeList->psFWFreelistMemDesc,
+ offsetof(RGXFWIF_FREELIST, ui32CurrentStackTop),
+ psFWFreeList->ui32CurrentStackTop,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+
+ /* Add initial PB block */
+ eError = RGXGrowFreeList(psFreeList,
+ ui32InitFLPages,
+ &psFreeList->sMemoryBlockInitHead,
+ IMG_TRUE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "RGXCreateFreeList: failed to allocate initial memory block for free list 0x%016" IMG_UINT64_FMTSPECx " (error = %u)",
+ sFreeListDevVAddr.uiAddr,
+ eError));
+ goto FWFreeListCpuMap;
+ }
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ /* Update Stats */
+ PVRSRVStatsUpdateFreelistStats(1, /* Add 1 to the appropriate counter (Requests by App)*/
+ 0,
+ psFreeList->ui32InitFLPages,
+ psFreeList->ui32NumHighPages,
+ psFreeList->ownerPid);
+
+#endif
+
+ /* return values */
+ *ppsFreeList = psFreeList;
+
+ return PVRSRV_OK;
+
+ /* Error handling */
+
+ FWFreeListCpuMap:
+ /* Remove freelists from list */
+ OSLockAcquire(psDevInfo->hLockFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psDevInfo->hLockFreeList);
+
+ RGXUnsetFirmwareAddress(psFWFreelistMemDesc);
+ DevmemFwFree(psDevInfo, psFWFreelistMemDesc);
+
+ FWFreeListAlloc:
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ SyncAlloc:
+ OSFreeMem(psFreeList);
+
+ ErrorAllocHost:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ RGXDestroyFreeList
+ */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32RefCount;
+
+ PVR_ASSERT(psFreeList);
+
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+ ui32RefCount = psFreeList->ui32RefCount;
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ if (ui32RefCount != 0)
+ {
+ /* Freelist still busy */
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Freelist is not in use => start firmware cleanup */
+ eError = RGXFWRequestFreeListCleanUp(psFreeList->psDevInfo,
+ psFreeList->sFreeListFWDevVAddr,
+ psFreeList->psCleanupSync);
+ if (eError != PVRSRV_OK)
+ {
+ /* Can happen if the firmware took too long to handle the cleanup request,
+ * or if SLC-flushes didn't went through (due to some GPU lockup) */
+ return eError;
+ }
+
+ /* Remove FreeList from linked list before we destroy it... */
+ OSLockAcquire(psFreeList->psDevInfo->hLockFreeList);
+ dllist_remove_node(&psFreeList->sNode);
+ OSLockRelease(psFreeList->psDevInfo->hLockFreeList);
+
+ if (psFreeList->bCheckFreelist)
+ {
+ RGXFWIF_FREELIST *psFWFreeList;
+ IMG_UINT64 ui32CurrentStackTop;
+ IMG_UINT64 ui64CheckSum;
+
+ /* Get the current stack pointer for this free list */
+ DevmemAcquireCpuVirtAddr(psFreeList->psFWFreelistMemDesc, (void **)&psFWFreeList);
+ ui32CurrentStackTop = psFWFreeList->ui32CurrentStackTop;
+ DevmemReleaseCpuVirtAddr(psFreeList->psFWFreelistMemDesc);
+
+ if (ui32CurrentStackTop == psFreeList->ui32CurrentFLPages-1)
+ {
+ /* Do consistency tests (as the list is fully populated) */
+ _CheckFreelist(psFreeList, psFreeList->ui32CurrentFLPages + psFreeList->ui32ReadyFLPages, psFreeList->ui64FreelistChecksum, &ui64CheckSum);
+ }
+ else
+ {
+ /* Check for duplicate pages, but don't check the checksum as the list is not fully populated */
+ _CheckFreelist(psFreeList, ui32CurrentStackTop+1, 0, &ui64CheckSum);
+ }
+ }
+
+ /* Destroy FW structures */
+ RGXUnsetFirmwareAddress(psFreeList->psFWFreelistMemDesc);
+ DevmemFwFree(psFreeList->psDevInfo, psFreeList->psFWFreelistMemDesc);
+
+ /* Remove grow shrink blocks */
+ while (!dllist_is_empty(&psFreeList->sMemoryBlockHead))
+ {
+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+
+ /* Remove initial PB block */
+ eError = RGXShrinkFreeList(&psFreeList->sMemoryBlockInitHead, psFreeList);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ /* consistency checks */
+ PVR_ASSERT(dllist_is_empty(&psFreeList->sMemoryBlockInitHead));
+ PVR_ASSERT(psFreeList->ui32CurrentFLPages == 0);
+
+ SyncPrimFree(psFreeList->psCleanupSync);
+
+ /* free Freelist */
+ OSFreeMem(psFreeList);
+
+ return eError;
+}
+
+
+/*
+ RGXCreateRenderTarget
+ */
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR psVHeapTableDevVAddr,
+ RGX_RT_CLEANUP_DATA **ppsCleanupData,
+ IMG_UINT32 *sRenderTargetFWDevVAddr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ RGXFWIF_RENDER_TARGET *psRenderTarget;
+ RGXFWIF_DEV_VIRTADDR pFirmwareAddr;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_RT_CLEANUP_DATA *psCleanupData;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psCleanupData = OSAllocZMem(sizeof(*psCleanupData));
+ if (psCleanupData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_out;
+ }
+
+ psCleanupData->psDeviceNode = psDeviceNode;
+ /*
+ * This FW render target context is only mapped into kernel for initialisation.
+ * Otherwise this allocation is only used by the FW.
+ * Therefore the GPU cache doesn't need coherency,
+ * and write-combine is suffice on the CPU side (WC buffer will be flushed at the first TA-kick)
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psRenderTarget),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwRenderTarget",
+ &psCleanupData->psRenderTargetMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "RGXCreateRenderTarget: DevmemAllocate for Render Target failed"));
+ goto err_free;
+ }
+ RGXSetFirmwareAddress(&pFirmwareAddr, psCleanupData->psRenderTargetMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+ *sRenderTargetFWDevVAddr = pFirmwareAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(psCleanupData->psRenderTargetMemDesc, (void **)&psRenderTarget);
+ PVR_LOGG_IF_ERROR(eError, "Devmem AcquireCpuVirtAddr", err_fwalloc);
+
+ psRenderTarget->psVHeapTableDevVAddr = psVHeapTableDevVAddr;
+ psRenderTarget->bTACachesNeedZeroing = IMG_FALSE;
+ PDUMPCOMMENT("Dump RenderTarget");
+ DevmemPDumpLoadMem(psCleanupData->psRenderTargetMemDesc, 0, sizeof(*psRenderTarget), PDUMP_FLAGS_CONTINUOUS);
+ DevmemReleaseCpuVirtAddr(psCleanupData->psRenderTargetMemDesc);
+
+ *ppsCleanupData = psCleanupData;
+
+ err_out:
+ return eError;
+
+ err_free:
+ OSFreeMem(psCleanupData);
+ goto err_out;
+
+ err_fwalloc:
+ DevmemFwFree(psDevInfo, psCleanupData->psRenderTargetMemDesc);
+ goto err_free;
+
+}
+
+
+/*
+ RGXDestroyRenderTarget
+ */
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psCleanupData->psDeviceNode;
+
+ RGXUnsetFirmwareAddress(psCleanupData->psRenderTargetMemDesc);
+
+ /*
+ Note:
+ When we get RT cleanup in the FW call that instead
+ */
+ /* Flush the SLC before freeing */
+ {
+ RGXFWIF_KCCB_CMD sFlushInvalCmd;
+ PVRSRV_ERROR eError;
+
+ /* Schedule the SLC flush command ... */
+#if defined(PDUMP)
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Submit SLC flush and invalidate");
+#endif
+ sFlushInvalCmd.eCmdType = RGXFWIF_KCCB_CMD_SLCFLUSHINVAL;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bInval = IMG_TRUE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.bDMContext = IMG_FALSE;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.eDM = 0;
+ sFlushInvalCmd.uCmdData.sSLCFlushInvalData.psContext.ui32Addr = 0;
+
+ eError = RGXSendCommandWithPowLock(psDeviceNode->pvDevice,
+ RGXFWIF_DM_GP,
+ &sFlushInvalCmd,
+ sizeof(sFlushInvalCmd),
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: Failed to schedule SLC flush command with error (%u)", eError));
+ }
+ else
+ {
+ /* Wait for the SLC flush to complete */
+ eError = RGXWaitForFWOp(psDeviceNode->pvDevice, RGXFWIF_DM_GP, psDeviceNode->psSyncPrim, PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXDestroyRenderTarget: SLC flush and invalidate aborted with error (%u)", eError));
+ }
+ }
+ }
+
+ DevmemFwFree(psDeviceNode->pvDevice, psCleanupData->psRenderTargetMemDesc);
+ OSFreeMem(psCleanupData);
+ return PVRSRV_OK;
+}
+
+/*
+ RGXCreateZSBuffer
+ */
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ RGX_ZSBUFFER_DATA **ppsZSBuffer,
+ IMG_UINT32 *pui32ZSBufferFWDevVAddr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_PRBUFFER *psFWZSBuffer;
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ DEVMEM_MEMDESC *psFWZSBufferMemDesc;
+ IMG_BOOL bOnDemand = PVRSRV_CHECK_ON_DEMAND(uiMapFlags) ? IMG_TRUE : IMG_FALSE;
+
+ /* Allocate host data structure */
+ psZSBuffer = OSAllocZMem(sizeof(*psZSBuffer));
+ if (psZSBuffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup data structure for ZS-Buffer"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ErrorAllocCleanup;
+ }
+
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psZSBuffer->psCleanupSync,
+ "ta3d zs buffer cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto ErrorSyncAlloc;
+ }
+
+ /* Populate Host data */
+ psZSBuffer->psDevInfo = psDevInfo;
+ psZSBuffer->psReservation = psReservation;
+ psZSBuffer->psPMR = psPMR;
+ psZSBuffer->uiMapFlags = uiMapFlags;
+ psZSBuffer->ui32RefCount = 0;
+ psZSBuffer->bOnDemand = bOnDemand;
+ if (bOnDemand)
+ {
+ psZSBuffer->ui32ZSBufferID = psDevInfo->ui32ZSBufferCurrID++;
+ psZSBuffer->psMapping = NULL;
+
+ OSLockAcquire(psDevInfo->hLockZSBuffer);
+ dllist_add_to_tail(&psDevInfo->sZSBufferHead, &psZSBuffer->sNode);
+ OSLockRelease(psDevInfo->hLockZSBuffer);
+ }
+
+ /* Allocate firmware memory for ZS-Buffer. */
+ PDUMPCOMMENT("Allocate firmware ZS-Buffer data structure");
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(*psFWZSBuffer),
+ PVRSRV_MEMALLOCFLAG_DEVICE_FLAG(PMMETA_PROTECT) |
+ PVRSRV_MEMALLOCFLAG_ZERO_ON_ALLOC |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_CACHE_INCOHERENT |
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE,
+ "FwZSBuffer",
+ &psFWZSBufferMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to allocate firmware ZS-Buffer (%u)", eError));
+ goto ErrorAllocFWZSBuffer;
+ }
+ psZSBuffer->psFWZSBufferMemDesc = psFWZSBufferMemDesc;
+
+ /* Temporarily map the firmware render context to the kernel. */
+ eError = DevmemAcquireCpuVirtAddr(psFWZSBufferMemDesc,
+ (void **)&psFWZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"RGXCreateZSBufferKM: Failed to map firmware ZS-Buffer (%u)", eError));
+ goto ErrorAcquireFWZSBuffer;
+ }
+
+ /* Populate FW ZS-Buffer data structure */
+ psFWZSBuffer->bOnDemand = bOnDemand;
+ psFWZSBuffer->eState = (bOnDemand) ? RGXFWIF_PRBUFFER_UNBACKED : RGXFWIF_PRBUFFER_BACKED;
+ psFWZSBuffer->ui32BufferID = psZSBuffer->ui32ZSBufferID;
+
+ /* Get firmware address of ZS-Buffer. */
+ RGXSetFirmwareAddress(&psZSBuffer->sZSBufferFWDevVAddr, psFWZSBufferMemDesc, 0, RFW_FWADDR_FLAG_NONE);
+
+ /* Dump the ZS-Buffer and the memory content */
+ PDUMPCOMMENT("Dump firmware ZS-Buffer");
+ DevmemPDumpLoadMem(psFWZSBufferMemDesc, 0, sizeof(*psFWZSBuffer), PDUMP_FLAGS_CONTINUOUS);
+
+ /* Release address acquired above. */
+ DevmemReleaseCpuVirtAddr(psFWZSBufferMemDesc);
+
+
+ /* define return value */
+ *ppsZSBuffer = psZSBuffer;
+ *pui32ZSBufferFWDevVAddr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS-Buffer [%p] created (%s)",
+ psZSBuffer,
+ (bOnDemand) ? "On-Demand": "Up-front"));
+
+ psZSBuffer->owner=OSGetCurrentClientProcessIDKM();
+
+ return PVRSRV_OK;
+
+ /* error handling */
+
+ ErrorAcquireFWZSBuffer:
+ DevmemFwFree(psDevInfo, psFWZSBufferMemDesc);
+
+ ErrorAllocFWZSBuffer:
+ SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ ErrorSyncAlloc:
+ OSFreeMem(psZSBuffer);
+
+ ErrorAllocCleanup:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ RGXDestroyZSBuffer
+ */
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psZSBuffer);
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ /* Request ZS Buffer cleanup */
+ eError = RGXFWRequestZSBufferCleanUp(psZSBuffer->psDevInfo,
+ psZSBuffer->sZSBufferFWDevVAddr,
+ psZSBuffer->psCleanupSync);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ /* Free the firmware render context. */
+ RGXUnsetFirmwareAddress(psZSBuffer->psFWZSBufferMemDesc);
+ DevmemFwFree(psZSBuffer->psDevInfo, psZSBuffer->psFWZSBufferMemDesc);
+
+ /* Remove Deferred Allocation from list */
+ if (psZSBuffer->bOnDemand)
+ {
+ OSLockAcquire(hLockZSBuffer);
+ PVR_ASSERT(dllist_node_is_in_list(&psZSBuffer->sNode));
+ dllist_remove_node(&psZSBuffer->sNode);
+ OSLockRelease(hLockZSBuffer);
+ }
+
+ SyncPrimFree(psZSBuffer->psCleanupSync);
+
+ PVR_ASSERT(psZSBuffer->ui32RefCount == 0);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS-Buffer [%p] destroyed",psZSBuffer));
+
+ /* Free ZS-Buffer host data structure */
+ OSFreeMem(psZSBuffer);
+
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ if (!psZSBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!psZSBuffer->bOnDemand)
+ {
+ /* Only deferred allocations can be populated */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing requested",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ OSLockAcquire(hLockZSBuffer);
+
+ if (psZSBuffer->ui32RefCount == 0)
+ {
+ if (psZSBuffer->bOnDemand)
+ {
+ IMG_HANDLE hDevmemHeap;
+
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+ /* Get Heap */
+ eError = DevmemServerGetHeapHandle(psZSBuffer->psReservation, &hDevmemHeap);
+ PVR_ASSERT(psZSBuffer->psMapping == NULL);
+
+ eError = DevmemIntMapPMR(hDevmemHeap,
+ psZSBuffer->psReservation,
+ psZSBuffer->psPMR,
+ psZSBuffer->uiMapFlags,
+ &psZSBuffer->psMapping);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable populate ZS Buffer [%p, ID=0x%08x] with error %u",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID,
+ eError));
+ OSLockRelease(hLockZSBuffer);
+ return eError;
+
+ }
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing acquired",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ }
+ }
+
+ /* Increase refcount*/
+ psZSBuffer->ui32RefCount++;
+
+ OSLockRelease(hLockZSBuffer);
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+ RGX_POPULATION **ppsPopulation)
+{
+ RGX_POPULATION *psPopulation;
+ PVRSRV_ERROR eError;
+
+ psZSBuffer->ui32NumReqByApp++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateZSBufferStats(1,0,psZSBuffer->owner);
+#endif
+
+ /* Do the backing */
+ eError = RGXBackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ goto OnErrorBacking;
+ }
+
+ /* Create the handle to the backing */
+ psPopulation = OSAllocMem(sizeof(*psPopulation));
+ if (psPopulation == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto OnErrorAlloc;
+ }
+
+ psPopulation->psZSBuffer = psZSBuffer;
+
+ /* return value */
+ *ppsPopulation = psPopulation;
+
+ return PVRSRV_OK;
+
+ OnErrorAlloc:
+ RGXUnbackingZSBuffer(psZSBuffer);
+
+ OnErrorBacking:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer)
+{
+ POS_LOCK hLockZSBuffer;
+ PVRSRV_ERROR eError;
+
+ if (!psZSBuffer)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_ASSERT(psZSBuffer->ui32RefCount);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"ZS Buffer [%p, ID=0x%08x]: Physical backing removal requested",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+
+ hLockZSBuffer = psZSBuffer->psDevInfo->hLockZSBuffer;
+
+ OSLockAcquire(hLockZSBuffer);
+
+ if (psZSBuffer->bOnDemand)
+ {
+ if (psZSBuffer->ui32RefCount == 1)
+ {
+ PVR_ASSERT(psZSBuffer->psMapping);
+
+ eError = DevmemIntUnmapPMR(psZSBuffer->psMapping);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Unable to unpopulate ZS Buffer [%p, ID=0x%08x] with error %u",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID,
+ eError));
+ OSLockRelease(hLockZSBuffer);
+ return eError;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "ZS Buffer [%p, ID=0x%08x]: Physical backing removed",
+ psZSBuffer,
+ psZSBuffer->ui32ZSBufferID));
+ }
+ }
+
+ /* Decrease refcount*/
+ psZSBuffer->ui32RefCount--;
+
+ OSLockRelease(hLockZSBuffer);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation)
+{
+ PVRSRV_ERROR eError;
+
+ if (!psPopulation)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ eError = RGXUnbackingZSBuffer(psPopulation->psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ OSFreeMem(psPopulation);
+
+ return PVRSRV_OK;
+}
+
+static RGX_ZSBUFFER_DATA *FindZSBuffer(PVRSRV_RGXDEV_INFO *psDevInfo, IMG_UINT32 ui32ZSBufferID)
+{
+ DLLIST_NODE *psNode, *psNext;
+ RGX_ZSBUFFER_DATA *psZSBuffer = NULL;
+
+ OSLockAcquire(psDevInfo->hLockZSBuffer);
+
+ dllist_foreach_node(&psDevInfo->sZSBufferHead, psNode, psNext)
+ {
+ RGX_ZSBUFFER_DATA *psThisZSBuffer = IMG_CONTAINER_OF(psNode, RGX_ZSBUFFER_DATA, sNode);
+
+ if (psThisZSBuffer->ui32ZSBufferID == ui32ZSBufferID)
+ {
+ psZSBuffer = psThisZSBuffer;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevInfo->hLockZSBuffer);
+ return psZSBuffer;
+}
+
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID)
+{
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ /* scan all deferred allocations */
+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+ if (psZSBuffer)
+ {
+ IMG_BOOL bBackingDone = IMG_TRUE;
+
+ /* Populate ZLS */
+ eError = RGXBackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"Populating ZS-Buffer failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+ bBackingDone = IMG_FALSE;
+ }
+
+ /* send confirmation */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_BACKING_UPDATE;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = bBackingDone;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ psZSBuffer->ui32NumReqByFW++;
+
+#if defined(PVRSRV_ENABLE_PROCESS_STATS)
+ PVRSRVStatsUpdateZSBufferStats(0,1,psZSBuffer->owner);
+#endif
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (Populate)", ui32ZSBufferID));
+ }
+}
+
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID)
+{
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+ RGXFWIF_KCCB_CMD sTACCBCmd;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psDevInfo);
+
+ /* scan all deferred allocations */
+ psZSBuffer = FindZSBuffer(psDevInfo, ui32ZSBufferID);
+
+ if (psZSBuffer)
+ {
+ /* Unpopulate ZLS */
+ eError = RGXUnbackingZSBuffer(psZSBuffer);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"UnPopulating ZS-Buffer failed failed with error %u (ID = 0x%08x)", eError, ui32ZSBufferID));
+ PVR_ASSERT(IMG_FALSE);
+ }
+
+ /* send confirmation */
+ sTACCBCmd.eCmdType = RGXFWIF_KCCB_CMD_ZSBUFFER_UNBACKING_UPDATE;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.sZSBufferFWDevVAddr.ui32Addr = psZSBuffer->sZSBufferFWDevVAddr.ui32Addr;
+ sTACCBCmd.uCmdData.sZSBufferBackingData.bDone = IMG_TRUE;
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psDevInfo,
+ RGXFWIF_DM_TA,
+ &sTACCBCmd,
+ sizeof(sTACCBCmd),
+ 0,
+ PDUMP_FLAGS_NONE);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ /* Kernel CCB should never fill up, as the FW is processing them right away */
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"ZS Buffer Lookup for ZS Buffer ID 0x%08x failed (UnPopulate)", ui32ZSBufferID));
+ }
+}
+
+static
+PVRSRV_ERROR _CreateTAContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RC_TA_DATA *psTAData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_TACTX_STATE *psContextState;
+ PVRSRV_ERROR eError;
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware TA context suspend state");
+
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_TACTX_STATE),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwTAContextState",
+ &psTAData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_tacontextsuspendalloc;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psTAData->psContextStateMemDesc,
+ (void **)&psContextState);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to map firmware render context state (%u)",
+ eError));
+ goto fail_suspendcpuvirtacquire;
+ }
+ psContextState->uTAReg_VDM_CALL_STACK_POINTER_Init = sVDMCallStackAddr.uiAddr;
+ DevmemReleaseCpuVirtAddr(psTAData->psContextStateMemDesc);
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TA,
+ RGXFWIF_DM_TA,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ psTAData->psContextStateMemDesc,
+ RGX_TA_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psTAData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init TA fw common context (%u)",
+ eError));
+ goto fail_tacommoncontext;
+ }
+
+ /*
+ * Dump the FW 3D context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the TA context suspend state buffer");
+ DevmemPDumpLoadMem(psTAData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_TACTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ psTAData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+ fail_tacommoncontext:
+ fail_suspendcpuvirtacquire:
+ DevmemFwFree(psDevInfo, psTAData->psContextStateMemDesc);
+ fail_tacontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+static
+PVRSRV_ERROR _Create3DContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psAllocatedMemDesc,
+ IMG_UINT32 ui32AllocatedOffset,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_RC_3D_DATA *ps3DData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */
+ IMG_UINT ui3DRegISPStateStoreSize = 0;
+
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware 3D context suspend state");
+
+ if(!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+ {
+ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+ }
+
+ /* Size of the CS buffer */
+ /* Calculate the size of the 3DCTX ISP state */
+ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+ uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+ eError = DevmemFwAllocate(psDevInfo,
+ ui3DRegISPStateStoreSize,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "Fw3DContextState",
+ &ps3DData->psContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU context suspend state (%u)",
+ eError));
+ goto fail_3dcontextsuspendalloc;
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_3D,
+ RGXFWIF_DM_3D,
+ psAllocatedMemDesc,
+ ui32AllocatedOffset,
+ psFWMemContextMemDesc,
+ ps3DData->psContextStateMemDesc,
+ RGX_3D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps3DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to init 3D fw common context (%u)",
+ eError));
+ goto fail_3dcommoncontext;
+ }
+
+ /*
+ * Dump the FW 3D context suspend state buffer
+ */
+ PDUMPCOMMENT("Dump the 3D context suspend state buffer");
+ DevmemPDumpLoadMem(ps3DData->psContextStateMemDesc,
+ 0,
+ sizeof(RGXFWIF_3DCTX_STATE),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ ps3DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+ fail_3dcommoncontext:
+ DevmemFwFree(psDevInfo, ps3DData->psContextStateMemDesc);
+ fail_3dcontextsuspendalloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXCreateRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32FrameworkRegisterSize,
+ IMG_PBYTE pabyFrameworkRegisters,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+
+ /* Prepare cleanup structure */
+ *ppsRenderContext = NULL;
+ psRenderContext = OSAllocZMem(sizeof(*psRenderContext));
+ if (psRenderContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psRenderContext->hLock, LOCK_TYPE_NONE);
+
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock;
+ }
+#endif
+
+ psRenderContext->psDeviceNode = psDeviceNode;
+
+ /*
+ Create the FW render context, this has the TA and 3D FW common
+ contexts embedded within it
+ */
+ eError = DevmemFwAllocate(psDevInfo,
+ sizeof(RGXFWIF_FWRENDERCONTEXT),
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwRenderContext",
+ &psRenderContext->psFWRenderContextMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_fwrendercontext;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WorkEstInit(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psRenderContext->psCleanupSync,
+ "ta3d render context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psRenderContext->psFWFrameworkMemDesc,
+ ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psRenderContext->psFWFrameworkMemDesc,
+ pabyFrameworkRegisters,
+ ui32FrameworkRegisterSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXCreateRenderContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psRenderContext->psFWFrameworkMemDesc;
+
+ eError = _CreateTAContext(psConnection,
+ psDeviceNode,
+ psRenderContext->psFWRenderContextMemDesc,
+ offsetof(RGXFWIF_FWRENDERCONTEXT, sTAContext),
+ psFWMemContextMemDesc,
+ sVDMCallStackAddr,
+ ui32Priority,
+ &sInfo,
+ &psRenderContext->sTAData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tacontext;
+ }
+
+ eError = _Create3DContext(psConnection,
+ psDeviceNode,
+ psRenderContext->psFWRenderContextMemDesc,
+ offsetof(RGXFWIF_FWRENDERCONTEXT, s3DContext),
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psRenderContext->s3DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcontext;
+ }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psRenderContext->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-ta3d");
+ if (IS_ERR(psRenderContext->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(psRenderContext->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAFence);
+ SyncAddrListInit(&psRenderContext->sSyncAddrListTAUpdate);
+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DFence);
+ SyncAddrListInit(&psRenderContext->sSyncAddrList3DUpdate);
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+ }
+
+ *ppsRenderContext= psRenderContext;
+ return PVRSRV_OK;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ fail_buffer_sync_context_create:
+ _Destroy3DContext(&psRenderContext->s3DData,
+ psRenderContext->psDeviceNode,
+ psRenderContext->psCleanupSync);
+#endif
+ fail_3dcontext:
+ _DestroyTAContext(&psRenderContext->sTAData,
+ psDeviceNode,
+ psRenderContext->psCleanupSync);
+ fail_tacontext:
+ fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+ fail_frameworkcreate:
+ SyncPrimFree(psRenderContext->psCleanupSync);
+ fail_syncalloc:
+ DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+ fail_fwrendercontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psRenderContext->hLock);
+ fail_lock:
+#endif
+ OSFreeMem(psRenderContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+
+ return eError;
+}
+
+/*
+ * PVRSRVRGXDestroyRenderContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psRenderContext->psDeviceNode->pvDevice;
+ RGXFWIF_FWRENDERCONTEXT *psFWRenderContext;
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ IMG_UINT32 ui32WorkEstCCBSubmitted;
+#endif
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_remove_node(&(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psRenderContext->psBufferSyncContext);
+#endif
+
+ /* Cleanup the TA if we haven't already */
+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_TA_COMPLETE) == 0)
+ {
+ eError = _DestroyTAContext(&psRenderContext->sTAData,
+ psRenderContext->psDeviceNode,
+ psRenderContext->psCleanupSync);
+ if (eError == PVRSRV_OK)
+ {
+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_TA_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+ /* Cleanup the 3D if we haven't already */
+ if ((psRenderContext->ui32CleanupStatus & RC_CLEANUP_3D_COMPLETE) == 0)
+ {
+ eError = _Destroy3DContext(&psRenderContext->s3DData,
+ psRenderContext->psDeviceNode,
+ psRenderContext->psCleanupSync);
+ if (eError == PVRSRV_OK)
+ {
+ psRenderContext->ui32CleanupStatus |= RC_CLEANUP_3D_COMPLETE;
+ }
+ else
+ {
+ goto e0;
+ }
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+ eError));
+ goto e0;
+ }
+
+ ui32WorkEstCCBSubmitted = psFWRenderContext->ui32WorkEstCCBSubmitted;
+
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+
+ /* Check if all of the workload estimation CCB commands for this workload are read */
+ if (ui32WorkEstCCBSubmitted != psRenderContext->sWorkEstData.ui32WorkEstCCBReceived)
+ {
+ eError = PVRSRV_ERROR_RETRY;
+ goto e0;
+ }
+#endif
+
+ /*
+ Only if both TA and 3D contexts have been cleaned up can we
+ free the shared resources
+ */
+ if (psRenderContext->ui32CleanupStatus == (RC_CLEANUP_3D_COMPLETE | RC_CLEANUP_TA_COMPLETE))
+ {
+
+ /* Update SPM statistics */
+ eError = DevmemAcquireCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc,
+ (void **)&psFWRenderContext);
+ if (eError == PVRSRV_OK)
+ {
+ DevmemReleaseCpuVirtAddr(psRenderContext->psFWRenderContextMemDesc);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXDestroyRenderContextKM: Failed to map firmware render context (%u)",
+ eError));
+ }
+
+ /* Free the framework buffer */
+ DevmemFwFree(psDevInfo, psRenderContext->psFWFrameworkMemDesc);
+
+ /* Free the firmware render context */
+ DevmemFwFree(psDevInfo, psRenderContext->psFWRenderContextMemDesc);
+
+ /* Free the cleanup sync */
+ SyncPrimFree(psRenderContext->psCleanupSync);
+
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAFence);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrListTAUpdate);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DFence);
+ SyncAddrListDeinit(&psRenderContext->sSyncAddrList3DUpdate);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ WorkEstDeInit(psDevInfo, &psRenderContext->sWorkEstData);
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psRenderContext->hLock);
+#endif
+
+ OSFreeMem(psRenderContext);
+ }
+
+ return PVRSRV_OK;
+
+ e0:
+ OSWRLockAcquireWrite(psDevInfo->hRenderCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sRenderCtxtListHead), &(psRenderContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hRenderCtxListLock);
+ return eError;
+}
+
+
+/* TODO !!! this was local on the stack, and we managed to blow the stack for the kernel.
+ * THIS - 46 argument function needs to be sorted out.
+ */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+static RGX_CCB_CMD_HELPER_DATA gasTACmdHelperData[CCB_CMD_HELPER_NUM_TA_COMMANDS];
+static RGX_CCB_CMD_HELPER_DATA gas3DCmdHelperData[CCB_CMD_HELPER_NUM_3D_COMMANDS];
+#endif
+
+/*
+ * PVRSRVRGXKickTA3DKM
+ */
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientTAFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAFenceSyncOffset,
+ IMG_UINT32 *paui32ClientTAFenceValue,
+ IMG_UINT32 ui32ClientTAUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientTAUpdateValue,
+ IMG_UINT32 ui32ServerTASyncPrims,
+ IMG_UINT32 *paui32ServerTASyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerTASyncs,
+ IMG_UINT32 ui32Client3DFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DFenceSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DFenceSyncOffset,
+ IMG_UINT32 *paui32Client3DFenceValue,
+ IMG_UINT32 ui32Client3DUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DUpdateSyncOffset,
+ IMG_UINT32 *paui32Client3DUpdateValue,
+ IMG_UINT32 ui32Server3DSyncPrims,
+ IMG_UINT32 *paui32Server3DSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServer3DSyncs,
+ SYNC_PRIMITIVE_BLOCK *psPRFenceSyncPrimBlock,
+ IMG_UINT32 ui32PRFenceSyncOffset,
+ IMG_UINT32 ui32PRFenceValue,
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ PVRSRV_FENCE iCheckTAFence,
+ PVRSRV_TIMELINE iUpdateTATimeline,
+ PVRSRV_FENCE *piUpdateTAFence,
+ IMG_CHAR szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH],
+#else
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH],
+#endif
+ PVRSRV_FENCE iCheck3DFence,
+ PVRSRV_TIMELINE iUpdate3DTimeline,
+ PVRSRV_FENCE *piUpdate3DFence,
+ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+ IMG_UINT32 ui32TACmdSize,
+ IMG_PBYTE pui8TADMCmd,
+ IMG_UINT32 ui323DPRCmdSize,
+ IMG_PBYTE pui83DPRDMCmd,
+ IMG_UINT32 ui323DCmdSize,
+ IMG_PBYTE pui83DDMCmd,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_BOOL bLastTAInScene,
+ IMG_BOOL bKickTA,
+ IMG_BOOL bKickPR,
+ IMG_BOOL bKick3D,
+ IMG_BOOL bAbort,
+ IMG_UINT32 ui32PDumpFlags,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer,
+ IMG_BOOL bCommitRefCountsTA,
+ IMG_BOOL bCommitRefCounts3D,
+ IMG_BOOL *pbCommittedRefCountsTA,
+ IMG_BOOL *pbCommittedRefCounts3D,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus,
+ IMG_DEV_VIRTADDR sRobustnessResetReason)
+{
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ /* if the bridge lock is present then we use the singular/global helper structures */
+ RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = gasTACmdHelperData;
+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = gas3DCmdHelperData;
+#else
+ /* if there is no bridge lock then we use the per-context helper structures */
+ RGX_CCB_CMD_HELPER_DATA *pasTACmdHelperData = psRenderContext->asTACmdHelperData;
+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelperData = psRenderContext->as3DCmdHelperData;
+#endif
+
+ IMG_UINT32 ui32TACmdCount=0;
+ IMG_UINT32 ui323DCmdCount=0;
+ IMG_UINT32 ui32TACmdOffset=0;
+ IMG_UINT32 ui323DCmdOffset=0;
+ RGXFWIF_UFO sPRUFO;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+#if !defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+#endif
+ IMG_UINT32 ui32IntJobRef;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+ IMG_UINT32 ui32ClientPRUpdateCount = 0;
+ PRGXFWIF_UFO_ADDR *pauiClientPRUpdateUFOAddress = NULL;
+ IMG_UINT32 *paui32ClientPRUpdateValue = NULL;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ PRGXFWIF_UFO_ADDR *pauiClientTAFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiClientTAUpdateUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiClient3DFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiClient3DUpdateUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR uiPRFenceUFOAddress;
+
+#if !defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#else
+ IMG_UINT64 uiCheckTAFenceUID = 0;
+ IMG_UINT64 uiCheck3DFenceUID = 0;
+ IMG_UINT64 uiUpdateTAFenceUID = 0;
+ IMG_UINT64 uiUpdate3DFenceUID = 0;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL) || defined(SUPPORT_BUFFER_SYNC)
+ IMG_BOOL bTAFenceOnSyncCheckpointsOnly = IMG_FALSE;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ IMG_BOOL b3DFenceOnSyncCheckpointsOnly = IMG_FALSE;
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ IMG_UINT32 ui32TAFenceTimelineUpdateValue = 0;
+ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+
+ /*
+ * Count of the number of TA and 3D update values (may differ from number of
+ * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+ */
+ IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+ IMG_UINT32 ui32ClientTAUpdateValueCount = ui32ClientTAUpdateCount;
+ IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+ PSYNC_CHECKPOINT *apsFenceTASyncCheckpoints = NULL; /*!< TA fence checkpoints */
+ PSYNC_CHECKPOINT *apsFence3DSyncCheckpoints = NULL; /*!< 3D fence checkpoints */
+ IMG_UINT32 ui32FenceTASyncCheckpointCount = 0;
+ IMG_UINT32 ui32Fence3DSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psUpdateTASyncCheckpoint = NULL; /*!< TA update checkpoint (output) */
+ PSYNC_CHECKPOINT psUpdate3DSyncCheckpoint = NULL; /*!< 3D update checkpoint (output) */
+ PVRSRV_CLIENT_SYNC_PRIM *psTAFenceTimelineUpdateSync = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+ void *pvTAUpdateFenceFinaliseData = NULL;
+ void *pv3DUpdateFenceFinaliseData = NULL;
+
+ RGX_SYNC_DATA sTASyncData = {0}; /*!< Contains internal update syncs for TA */
+ RGX_SYNC_DATA s3DSyncData = {0}; /*!< Contains internal update syncs for 3D */
+
+ PVRSRV_FENCE iUpdateTAFence = PVRSRV_NO_FENCE;
+ PVRSRV_FENCE iUpdate3DFence = PVRSRV_NO_FENCE;
+
+#else
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ /*
+ * Count of the number of TA and 3D update values (may differ from number of
+ * TA and 3D updates later, as sync checkpoints do not need to specify a value)
+ */
+ IMG_UINT32 ui32ClientPRUpdateValueCount = 0;
+ IMG_UINT32 ui32Client3DUpdateValueCount = ui32Client3DUpdateCount;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ void *pvUpdateFenceFinaliseData = NULL;
+
+#endif /* PVRSRV_SYNC_SEPARATE_TIMELINES */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if defined(SUPPORT_BUFFER_SYNC)
+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickDataTA;
+ RGXFWIF_WORKEST_KICK_DATA sWorkloadKickData3D;
+ IMG_UINT32 ui32TACommandOffset = 0;
+ IMG_UINT32 ui323DCommandOffset = 0;
+ IMG_UINT32 ui32TACmdHeaderOffset = 0;
+ IMG_UINT32 ui323DCmdHeaderOffset = 0;
+ IMG_UINT32 ui323DFullRenderCommandOffset = 0;
+ IMG_UINT32 ui32TACmdOffsetWrapCheck = 0;
+ IMG_UINT32 ui323DCmdOffsetWrapCheck = 0;
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if !defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ PVR_UNREFERENCED_PARAMETER(iCheck3DFence);
+ PVR_UNREFERENCED_PARAMETER(iUpdate3DTimeline);
+ PVR_UNREFERENCED_PARAMETER(piUpdate3DFence);
+ PVR_UNREFERENCED_PARAMETER(szFenceName3D);
+#endif
+
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iUpdateTATimeline >= 0 && !piUpdateTAFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (iUpdate3DTimeline >= 0 && !piUpdate3DFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (iUpdateTATimeline >= 0 || iUpdate3DTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (TA=%d, 3D=%d) in non-supporting driver",
+ __func__, iUpdateTATimeline, iUpdate3DTimeline));
+ }
+ if (iCheckFenceTA >= 0 || iCheckFence3D >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (TA=%d, 3D=%d) in non-supporting driver",
+ __func__, iCheckFenceTA, iCheckFence3D));
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceNameTA[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+ szFenceName3D[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+#else
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (iUpdateTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing update timeline (%d) in non-supporting driver",
+ __func__, iUpdateTimeline));
+ }
+ if (iCheckFence >= 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[PVRSRV_SYNC_NAME_LENGTH-1] = '\0';
+#endif /* PVRSRV_SYNC_SEPARATE_TIMELINES */
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ sWorkloadKickDataTA.ui64ReturnDataIndex = 0;
+ sWorkloadKickDataTA.ui64CyclesPrediction = 0;
+ sWorkloadKickData3D.ui64ReturnDataIndex = 0;
+ sWorkloadKickData3D.ui64CyclesPrediction = 0;
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psRenderContext->hIntJobRef);
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d", __FUNCTION__,
+ ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ServerTASyncPrims=%d, ui32Server3DSyncPrims=%d", __FUNCTION__, ui32ServerTASyncPrims, ui32Server3DSyncPrims));
+
+ *pbCommittedRefCountsTA = IMG_FALSE;
+ *pbCommittedRefCounts3D = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRenderContext->hLock);
+#endif
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAFence, %d fences)...", __func__, ui32ClientTAFenceCount));
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAFence,
+ ui32ClientTAFenceCount,
+ apsClientTAFenceSyncPrimBlock,
+ paui32ClientTAFenceSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list_ta_fence;
+ }
+
+ if (ui32ClientTAFenceCount)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientTAFenceUFOAddress=<%p> ", __func__, (void*)pauiClientTAFenceUFOAddress));
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrListTAUpdate, %d updates)...", __func__, ui32ClientTAUpdateCount));
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrListTAUpdate,
+ ui32ClientTAUpdateCount,
+ apsClientTAUpdateSyncPrimBlock,
+ paui32ClientTAUpdateSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list_ta_update;
+ }
+
+ if (ui32ClientTAUpdateCount)
+ {
+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientTAUpdateUFOAddress=<%p> ", __func__, (void*)pauiClientTAUpdateUFOAddress));
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DFence, %d fences)...", __func__, ui32Client3DFenceCount));
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DFence,
+ ui32Client3DFenceCount,
+ apsClient3DFenceSyncPrimBlock,
+ paui32Client3DFenceSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list_3d_fence;
+ }
+
+ if (ui32Client3DFenceCount)
+ {
+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DFenceUFOAddress=<%p> ", __func__, (void*)pauiClient3DFenceUFOAddress));
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psRenderContext->sSyncAddrList3DUpdate, %d updates)...", __func__, ui32Client3DUpdateCount));
+ eError = SyncAddrListPopulate(&psRenderContext->sSyncAddrList3DUpdate,
+ ui32Client3DUpdateCount,
+ apsClient3DUpdateSyncPrimBlock,
+ paui32Client3DUpdateSyncOffset);
+ if (eError != PVRSRV_OK)
+ {
+ goto err_populate_sync_addr_list_3d_update;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (ui32Client3DUpdateCount || (iUpdate3DTimeline != PVRSRV_NO_TIMELINE && piUpdate3DFence && bKick3D))
+#else
+ if (ui32Client3DUpdateCount)
+#endif
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClient3DUpdateUFOAddress=<%p> ", __func__, (void*)pauiClient3DUpdateUFOAddress));
+
+ eError = SyncPrimitiveBlockToFWAddr(psPRFenceSyncPrimBlock, ui32PRFenceSyncOffset, &uiPRFenceUFOAddress);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto err_pr_fence_address;
+ }
+
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+ IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+ PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+ IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After populating sync prims ~~~", __FUNCTION__));
+
+ /* Dump Fence syncs, Update syncs and PR Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", __FUNCTION__, ui32ClientTAFenceCount));
+ for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+ {
+ if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr, *pui32TmpClientTAFenceValue, *pui32TmpClientTAFenceValue));
+ pui32TmpClientTAFenceValue++;
+ }
+ psTmpClientTAFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", __FUNCTION__, ui32ClientTAUpdateCount));
+ for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+ {
+ if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr, *pui32TmpClientTAUpdateValue));
+ pui32TmpClientTAUpdateValue++;
+ }
+ psTmpClientTAUpdateUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", __FUNCTION__, ui32Client3DFenceCount));
+ for (ii=0; ii<ui32Client3DFenceCount; ii++)
+ {
+ if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr, *pui32TmpClient3DFenceValue));
+ pui32TmpClient3DFenceValue++;
+ }
+ psTmpClient3DFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", __FUNCTION__, ui32Client3DUpdateCount));
+ for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+ {
+ if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr, *pui32TmpClient3DUpdateValue));
+ pui32TmpClient3DUpdateValue++;
+ }
+ psTmpClient3DUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ /* Sanity check the server fences */
+ for (i=0;i<ui32ServerTASyncPrims;i++)
+ {
+ if (!(paui32ServerTASyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on TA) must fence", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ for (i=0;i<ui32Server3DSyncPrims;i++)
+ {
+ if (!(paui32Server3DSyncFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Server fence (on 3D) must fence", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psRenderContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Sanity check we have a PR kick if there are client or server fences
+ */
+ if (!bKickPR && ((ui32Client3DFenceCount != 0) || (ui32Server3DSyncPrims != 0)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: 3D fence (client or server) passed without a PR kick", __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32SyncPMRCount)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ int err;
+
+ if (!bKickTA)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a TA",
+ __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (!bKickPR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync only supported for kicks including a PR",
+ __FUNCTION__));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __FUNCTION__));
+ err = pvr_buffer_sync_resolve_and_create_fences(psRenderContext->psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ &ui32BufferFenceSyncCheckpointCount,
+ &apsBufferFenceSyncCheckpoints,
+ &psBufferUpdateSyncCheckpoint,
+ &psBufferSyncData);
+ if (err)
+ {
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%d)", __FUNCTION__, eError));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return eError;
+ }
+
+ /* Append buffer sync fences to TA fences */
+ if (ui32BufferFenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TA Fence (&psRenderContext->sSyncAddrListTAFence=<%p>, pauiClientTAFenceUFOAddress=<%p>)...", __FUNCTION__, ui32BufferFenceSyncCheckpointCount, (void*)&psRenderContext->sSyncAddrListTAFence , (void*)pauiClientTAFenceUFOAddress));
+ SyncAddrListAppendAndDeRefCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+ ui32BufferFenceSyncCheckpointCount,
+ apsBufferFenceSyncCheckpoints);
+ if (!pauiClientTAFenceUFOAddress)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+ if (ui32ClientTAFenceCount == 0)
+ {
+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32ClientTAFenceCount += ui32BufferFenceSyncCheckpointCount;
+ }
+
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ /* If we have a 3D kick append update to the 3D updates else append to the PR update */
+ if (bKick3D)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to 3D Update (&psRenderContext->sSyncAddrList3DUpdate=<%p>, pauiClient3DUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psRenderContext->sSyncAddrList3DUpdate , (void*)pauiClient3DUpdateUFOAddress));
+ /* Append buffer sync update to 3D updates */
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psBufferUpdateSyncCheckpoint);
+ if (!pauiClient3DUpdateUFOAddress)
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32Client3DUpdateCount++;
+ }
+ else
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TA Update (&psRenderContext->sSyncAddrListTAUpdate=<%p>, pauiClient3DUpdateUFOAddress=<%p>)...", __FUNCTION__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psRenderContext->sSyncAddrListTAUpdate , (void*)pauiClientTAUpdateUFOAddress));
+ /* Append buffer sync update to TA updates */
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+ 1,
+ &psBufferUpdateSyncCheckpoint);
+ if (!pauiClientTAUpdateUFOAddress)
+ {
+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+ }
+ ui32ClientTAUpdateCount++;
+ }
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount));
+
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __FUNCTION__, ui32SyncPMRCount));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (iCheckTAFence >= 0 || iUpdateTATimeline >= 0 ||
+ iCheck3DFence >= 0 || iUpdate3DTimeline >= 0)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClientTAIntUpdateUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiClient3DIntUpdateUFOAddress = NULL;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: [TA] iCheckFence = %d, iUpdateTimeline = %d", __FUNCTION__, iCheckTAFence, iUpdateTATimeline));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: [3D] iCheckFence = %d, iUpdateTimeline = %d", __FUNCTION__, iCheck3DFence, iUpdate3DTimeline));
+
+ if (!bKickTA)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs only supported for kicks including a TA",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+ if (!bKickPR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs require a PR for all kicks",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+
+ if (iCheckTAFence != PVRSRV_NO_FENCE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[TA] (iCheckFence=%d), psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, \
+ iCheckTAFence, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckTAFence,
+ &ui32FenceTASyncCheckpointCount,
+ &apsFenceTASyncCheckpoints,
+ &uiCheckTAFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, \
+ iCheckTAFence, ui32FenceTASyncCheckpointCount, (void*)apsFenceTASyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ if (apsFenceTASyncCheckpoints)
+ {
+ _DebugSyncCheckpoints(__FUNCTION__, "TA", apsFenceTASyncCheckpoints, ui32FenceTASyncCheckpointCount);
+ }
+#endif
+ }
+
+ if (iCheck3DFence != PVRSRV_NO_FENCE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence[3D] (iCheckFence=%d), psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__, \
+ iCheck3DFence, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ iCheck3DFence,
+ &ui32Fence3DSyncCheckpointCount,
+ &apsFence3DSyncCheckpoints,
+ &uiCheck3DFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__, \
+ iCheck3DFence, ui32Fence3DSyncCheckpointCount, (void*)apsFence3DSyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ if (apsFence3DSyncCheckpoints)
+ {
+ _DebugSyncCheckpoints(__FUNCTION__, "3D", apsFence3DSyncCheckpoints, ui32Fence3DSyncCheckpointCount);
+ }
+#endif
+ }
+
+ {
+ /* Create the output fence for TA (if required) */
+ if (iUpdateTATimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence[TA] (iUpdateFence=%d, iUpdateTimeline=%d, psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, \
+ iUpdateTAFence, iUpdateTATimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+ szFenceNameTA,
+ iUpdateTATimeline,
+ psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateTAFence,
+ &uiUpdateTAFenceUID,
+ &pvTAUpdateFenceFinaliseData,
+ &psUpdateTASyncCheckpoint,
+ (void*)&psTAFenceTimelineUpdateSync,
+ &ui32TAFenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[TA] failed (%d)", __FUNCTION__, eError));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence[TA] (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __FUNCTION__, \
+ iUpdateTAFence, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue));
+
+ /* Store the FW address of the update sync checkpoint in pauiClientTAIntUpdateUFOAddress */
+ pauiClientTAIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateTASyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress[TA]->ui32Addr=0x%x", __FUNCTION__, \
+ pauiClientTAIntUpdateUFOAddress->ui32Addr));
+ }
+
+ /* Append the sync prim update for the TA timeline (if required) */
+ if (psTAFenceTimelineUpdateSync)
+ {
+ sTASyncData.ui32ClientUpdateCount = ui32ClientTAUpdateCount;
+ sTASyncData.ui32ClientUpdateValueCount = ui32ClientTAUpdateValueCount;
+ sTASyncData.ui32ClientPRUpdateValueCount= (bKick3D) ? 0 : ui32ClientPRUpdateValueCount;
+ sTASyncData.paui32ClientUpdateValue = paui32ClientTAUpdateValue;
+
+ eError = RGXSyncAppendTimelineUpdate(ui32TAFenceTimelineUpdateValue,
+ &psRenderContext->sSyncAddrListTAUpdate,
+ (bKick3D) ? NULL : &psRenderContext->sSyncAddrList3DUpdate,
+ psTAFenceTimelineUpdateSync,
+ &sTASyncData,
+ bKick3D);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_alloc_update_values_mem_TA;
+ }
+
+ paui32ClientTAUpdateValue = sTASyncData.paui32ClientUpdateValue;
+ ui32ClientTAUpdateValueCount = sTASyncData.ui32ClientUpdateValueCount;
+ pauiClientTAUpdateUFOAddress = sTASyncData.pauiClientUpdateUFOAddress;
+ ui32ClientTAUpdateCount = sTASyncData.ui32ClientUpdateCount;
+ }
+
+ /* Create the output fence for 3D (if required) */
+ if (iUpdate3DTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence[3D] (iUpdateFence=%d, iUpdateTimeline=%d, psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, \
+ iUpdate3DFence, iUpdate3DTimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+ szFenceName3D,
+ iUpdate3DTimeline,
+ psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdate3DFence,
+ &uiUpdate3DFenceUID,
+ &pv3DUpdateFenceFinaliseData,
+ &psUpdate3DSyncCheckpoint,
+ (void*)&ps3DFenceTimelineUpdateSync,
+ &ui323DFenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence[3D] failed (%d)", __FUNCTION__, eError));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence[3D] (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __FUNCTION__, \
+ iUpdate3DFence, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue));
+
+ /* Store the FW address of the update sync checkpoint in pauiClient3DIntUpdateUFOAddress */
+ pauiClient3DIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdate3DSyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress[3D]->ui32Addr=0x%x", __FUNCTION__, \
+ pauiClient3DIntUpdateUFOAddress->ui32Addr));
+ }
+
+ /* Append the sync prim update for the 3D timeline (if required) */
+ if (ps3DFenceTimelineUpdateSync)
+ {
+ s3DSyncData.ui32ClientUpdateCount = ui32Client3DUpdateCount;
+ s3DSyncData.ui32ClientUpdateValueCount = ui32Client3DUpdateValueCount;
+ s3DSyncData.ui32ClientPRUpdateValueCount= ui32ClientPRUpdateValueCount;
+ s3DSyncData.paui32ClientUpdateValue = paui32Client3DUpdateValue;
+
+ eError = RGXSyncAppendTimelineUpdate(ui323DFenceTimelineUpdateValue,
+ &psRenderContext->sSyncAddrList3DUpdate,
+ &psRenderContext->sSyncAddrList3DUpdate, /*!< PR update: is this required? */
+ ps3DFenceTimelineUpdateSync,
+ &s3DSyncData,
+ bKick3D);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_alloc_update_values_mem_3D;
+ }
+
+ /* FIXME: can this be optimised? */
+ paui32Client3DUpdateValue = s3DSyncData.paui32ClientUpdateValue;
+ ui32Client3DUpdateValueCount = s3DSyncData.ui32ClientUpdateValueCount;
+ pauiClient3DUpdateUFOAddress = s3DSyncData.pauiClientUpdateUFOAddress;
+ ui32Client3DUpdateCount = s3DSyncData.ui32ClientUpdateCount;
+
+ if (!bKick3D)
+ {
+ paui32ClientPRUpdateValue = s3DSyncData.paui32ClientPRUpdateValue;
+ ui32ClientPRUpdateValueCount = s3DSyncData.ui32ClientPRUpdateValueCount;
+ pauiClientPRUpdateUFOAddress = s3DSyncData.pauiClientPRUpdateUFOAddress;
+ ui32ClientPRUpdateCount = s3DSyncData.ui32ClientPRUpdateCount;
+ }
+ }
+
+ /* If TA command present, attach TA synchronisation checks and updates */
+ if (bKickTA)
+ {
+
+ /* Checks (from input fence) */
+ if (ui32FenceTASyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", __FUNCTION__, \
+ ui32FenceTASyncCheckpointCount, (void*)apsFenceTASyncCheckpoints));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+ ui32FenceTASyncCheckpointCount,
+ apsFenceTASyncCheckpoints);
+ if (!pauiClientTAFenceUFOAddress)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, \
+ ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceTASyncCheckpointCount));
+ if (ui32ClientTAFenceCount == 0)
+ {
+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32ClientTAFenceCount += ui32FenceTASyncCheckpointCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount now %d}", __FUNCTION__, ui32ClientTAFenceCount));
+
+ if (psUpdateTASyncCheckpoint)
+ {
+ /* Update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Update...", __FUNCTION__, \
+ (void*)psUpdateTASyncCheckpoint, SyncCheckpointGetId(psUpdateTASyncCheckpoint)));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAUpdate,
+ 1,
+ &psUpdateTASyncCheckpoint);
+ if (!pauiClientTAUpdateUFOAddress)
+ {
+ pauiClientTAUpdateUFOAddress = psRenderContext->sSyncAddrListTAUpdate.pasFWAddrs;
+ }
+ ui32ClientTAUpdateCount++;
+ }
+
+ if (!bKick3D && psUpdate3DSyncCheckpoint)
+ {
+ /* Attach update to the 3D (used for PR) Updates */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D(PR) Update...", __FUNCTION__, \
+ (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psUpdate3DSyncCheckpoint);
+ if (!pauiClientPRUpdateUFOAddress)
+ {
+ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32ClientPRUpdateCount++;
+ }
+ }
+
+ if (bKick3D)
+ {
+ /* Attach checks and updates to the 3D */
+
+ /* Checks (from input fence) */
+ if (ui32Fence3DSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to 3D Fence...", __FUNCTION__, ui32Fence3DSyncCheckpointCount));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+ ui32Fence3DSyncCheckpointCount,
+ apsFence3DSyncCheckpoints);
+ if (!pauiClient3DFenceUFOAddress)
+ {
+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d, now %d}", __FUNCTION__, \
+ ui32Client3DFenceCount, ui32Client3DFenceCount+ui32Fence3DSyncCheckpointCount));
+ if (ui32Client3DFenceCount == 0)
+ {
+ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32Client3DFenceCount += ui32Fence3DSyncCheckpointCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d}", __FUNCTION__, ui32Client3DFenceCount));
+
+ if (psUpdate3DSyncCheckpoint)
+ {
+ /* Update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __FUNCTION__, \
+ (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint)));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psUpdate3DSyncCheckpoint);
+ if (!pauiClient3DUpdateUFOAddress)
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32Client3DUpdateCount++;
+ }
+ }
+
+ {
+ IMG_PID uiCurrentProcess = OSGetCurrentClientProcessIDKM();
+
+ /* Search for foreign or cross process dependencies */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Checking for foreign checkpoints (%d sync points)...", __FUNCTION__, \
+ ui32Fence3DSyncCheckpointCount));
+ for (i=0; i<ui32Fence3DSyncCheckpointCount; i++)
+ {
+ /* Check to see if the checkpoint or was created from another
+ * process */
+ if (SyncCheckpointGetCreator(apsFence3DSyncCheckpoints[i]) != uiCurrentProcess)
+ {
+ /* 3D Sync point represents foreign or cross process
+ * dependency, copy sync point to TA command fence. */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to TA Fence...", __FUNCTION__, \
+ (void*)apsFence3DSyncCheckpoints[i], SyncCheckpointGetId(apsFence3DSyncCheckpoints[i])));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+ 1,
+ &apsFence3DSyncCheckpoints[i]);
+
+ if (!pauiClientTAFenceUFOAddress)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, \
+ ui32Client3DFenceCount, ui32ClientTAFenceCount+1));
+
+ if (ui32ClientTAFenceCount == 0)
+ {
+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+
+ ui32ClientTAFenceCount++;
+ }
+ }
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount));
+ }
+
+ if (ui32ClientTAFenceCount)
+ {
+ PVR_ASSERT(pauiClientTAFenceUFOAddress);
+ if (!bTAFenceOnSyncCheckpointsOnly)
+ {
+ PVR_ASSERT(paui32ClientTAFenceValue);
+ }
+ }
+ if (ui32ClientTAUpdateCount)
+ {
+ PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+ if (ui32ClientTAUpdateValueCount>0)
+ PVR_ASSERT(paui32ClientTAUpdateValue);
+ }
+ if (ui32Client3DFenceCount)
+ {
+ PVR_ASSERT(pauiClient3DFenceUFOAddress);
+ if (!b3DFenceOnSyncCheckpointsOnly)
+ {
+ PVR_ASSERT(paui32Client3DFenceValue);
+ }
+ }
+ if (ui32Client3DUpdateCount)
+ {
+ PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+ if (ui32Client3DUpdateValueCount>0)
+ PVR_ASSERT(paui32Client3DUpdateValue);
+ }
+ if (ui32ClientPRUpdateCount)
+ {
+ PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+ if (ui32ClientPRUpdateValueCount>0)
+ PVR_ASSERT(paui32ClientPRUpdateValue);
+ }
+
+ }
+#else /* PVRSRV_SYNC_SEPARATE_TIMELINES */
+ if (iCheckFence >= 0 || iUpdateTimeline >= 0)
+ {
+ PRGXFWIF_UFO_ADDR *pauiClientIntUpdateUFOAddress = NULL;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: iCheckFence = %d, iUpdateTimeline = %d", __FUNCTION__, iCheckFence, iUpdateTimeline));
+
+ if (!bKickTA)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs only supported for kicks including a TA",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+ if (!bKickPR)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Native syncs require a PR for all kicks",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_fdsync;
+ }
+
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __FUNCTION__,
+ iCheckFence, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __FUNCTION__, eError));
+ goto fail_resolve_input_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __FUNCTION__,
+ iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ if (apsFenceSyncCheckpoints)
+ {
+ _DebugSyncCheckpoints(__FUNCTION__, "", apsFenceSyncCheckpoints, ui32FenceSyncCheckpointCount);
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (iUpdateFence=%d, iUpdateTimeline=%d, psRenderContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __FUNCTION__, iUpdateFence, \
+ iUpdateTimeline, (void*)psRenderContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psRenderContext->psDeviceNode,
+ szFenceName,
+ iUpdateTimeline,
+ psRenderContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence failed (%d)", __FUNCTION__, eError));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (iUpdateFence=%d, psFenceTimelineUpdateSync=<%p>, ui32FenceTimelineUpdateValue=0x%x)", __FUNCTION__, \
+ iUpdateFence, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+
+ /* Store the FW address of the update sync checkpoint in pauiClientIntUpdateUFOAddress */
+ pauiClientIntUpdateUFOAddress = SyncCheckpointGetRGXFWIFUFOAddr(psUpdateSyncCheckpoint);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiClientIntUpdateUFOAddress->ui32Addr=0x%x", __FUNCTION__, pauiClientIntUpdateUFOAddress->ui32Addr));
+ }
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: About to allocate memory to hold updates in pui32IntAllocatedUpdateValues(currently <%p>)", __FUNCTION__,
+ (void*)pui32IntAllocatedUpdateValues));
+ if (bKick3D)
+ {
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32Client3DUpdateValueCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, sizeof(*pui32IntAllocatedUpdateValues) * (ui32Client3DUpdateValueCount+1));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Copying %d 3D update values into pui32IntAllocatedUpdateValues(<%p>)", __FUNCTION__,
+ ui32Client3DUpdateCount, (void*)pui32IntAllocatedUpdateValues));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32Client3DUpdateValue, ui32Client3DUpdateValueCount * sizeof(*paui32Client3DUpdateValue));
+ }
+ else
+ {
+ /* Allocate memory to hold our timeline update value (for PR update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientPRUpdateValueCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xcc, sizeof(*pui32IntAllocatedUpdateValues) * (ui32ClientPRUpdateValueCount+1));
+ }
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ if (bKick3D)
+ {
+ _DebugSyncValues(__FUNCTION__, pui32IntAllocatedUpdateValues, ui32Client3DUpdateValueCount);
+ }
+#endif
+ /* Now set the additional update value and append the timeline sync prim addr to either the
+ * render context 3D (or TA) update list
+ */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value (0x%x) to psRenderContext->sSyncAddrList%sUpdate...", __FUNCTION__, ui32FenceTimelineUpdateValue, bKick3D ? "3D" : "TA"));
+ if (bKick3D)
+ {
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32Client3DUpdateValueCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32Client3DUpdateValueCount++;
+ ui32Client3DUpdateCount++;
+ SyncAddrListAppendSyncPrim(&psRenderContext->sSyncAddrList3DUpdate,
+ psFenceTimelineUpdateSync);
+ if (!pauiClient3DUpdateUFOAddress)
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ /* Update paui32Client3DUpdateValue to point to our new list of update values */
+ paui32Client3DUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ else
+ {
+ /* Use the sSyncAddrList3DUpdate for PR (as it doesn't have one of its own) */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32ClientPRUpdateValueCount++;
+ ui32ClientPRUpdateCount++;
+ SyncAddrListAppendSyncPrim(&psRenderContext->sSyncAddrList3DUpdate,
+ psFenceTimelineUpdateSync);
+ if (!pauiClientPRUpdateUFOAddress)
+ {
+ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ /* Update paui32ClientPRUpdateValue to point to our new list of update values */
+ paui32ClientPRUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+#if defined(TA3D_CHECKPOINT_DEBUG)
+ {
+ _DebugSyncValues(__FUNCTION__, pui32IntAllocatedUpdateValues, ui32Client3DUpdateValueCount);
+ }
+#endif
+ }
+
+ CHKPT_DBG((PVR_DBG_WARNING, "Attaching sync checkpoints: Kicking TA (%d), 3D (%d), %p", bKickTA, bKick3D, pauiClient3DUpdateUFOAddress));
+
+ /* Attach 3D and TA synchronisation checks and updates as needed */
+ if (bKick3D)
+ {
+ if (bKickTA)
+ {
+ /* we have a TA and 3D, attach checks to TA, updates to 3D */
+ /* Checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TA Fence (apsFenceSyncCheckpoints=<%p>)...", __FUNCTION__, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiClientTAFenceUFOAddress)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceSyncCheckpointCount));
+ if (ui32ClientTAFenceCount == 0)
+ {
+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32ClientTAFenceCount += ui32FenceSyncCheckpointCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount now %d}", __FUNCTION__, ui32ClientTAFenceCount));
+
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint)));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiClient3DUpdateUFOAddress)
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32Client3DUpdateCount++;
+ }
+ }
+ else
+ {
+ /* we only have 3D, attach checks and updates to the 3D */
+ /* Checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to 3D Fence...", __FUNCTION__, ui32FenceSyncCheckpointCount));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiClient3DFenceUFOAddress)
+ {
+ pauiClient3DFenceUFOAddress = psRenderContext->sSyncAddrList3DFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d, now %d}", __FUNCTION__, ui32Client3DFenceCount, ui32Client3DFenceCount+ui32FenceSyncCheckpointCount));
+ if (ui32Client3DFenceCount == 0)
+ {
+ b3DFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32Client3DFenceCount += ui32FenceSyncCheckpointCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32Client3DFenceCount was %d}", __FUNCTION__, ui32Client3DFenceCount));
+
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint<%p> (ID=%d) to 3D Update...", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint)));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiClient3DUpdateUFOAddress)
+ {
+ pauiClient3DUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32Client3DUpdateCount++;
+ }
+ }
+ }
+ else
+ {
+ /* we only have TA, attach checks to the TA */
+ /* Checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TA Fence...", __FUNCTION__, ui32FenceSyncCheckpointCount));
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrListTAFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiClientTAFenceUFOAddress)
+ {
+ pauiClientTAFenceUFOAddress = psRenderContext->sSyncAddrListTAFence.pasFWAddrs;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount was %d, now %d}", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAFenceCount+ui32FenceSyncCheckpointCount));
+ if (ui32ClientTAFenceCount == 0)
+ {
+ bTAFenceOnSyncCheckpointsOnly = IMG_TRUE;
+ }
+ ui32ClientTAFenceCount += ui32FenceSyncCheckpointCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: {ui32ClientTAFenceCount now %d}", __FUNCTION__, ui32ClientTAFenceCount));
+
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Attach update to the 3D (used for PR) Updates */
+ SyncAddrListAppendCheckpoints(&psRenderContext->sSyncAddrList3DUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiClientPRUpdateUFOAddress)
+ {
+ pauiClientPRUpdateUFOAddress = psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs;
+ }
+ ui32ClientPRUpdateCount++;
+ }
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d, ui32Client3DFenceCount=%d, ui32Client3DUpdateCount=%d, ui32ClientPRUpdateCount=%d,", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount, ui32Client3DFenceCount, ui32Client3DUpdateCount, ui32ClientPRUpdateCount));
+ }
+ if (ui32ClientTAFenceCount)
+ {
+ PVR_ASSERT(pauiClientTAFenceUFOAddress);
+ if (!bTAFenceOnSyncCheckpointsOnly)
+ {
+ PVR_ASSERT(paui32ClientTAFenceValue);
+ }
+ }
+ if (ui32ClientTAUpdateCount)
+ {
+ PVR_ASSERT(pauiClientTAUpdateUFOAddress);
+ /* We don't have TA updates from fences, so there should always be a value
+ * (fence updates are attached to the PR)
+ */
+ PVR_ASSERT(paui32ClientTAUpdateValue);
+ }
+ if (ui32Client3DFenceCount)
+ {
+ PVR_ASSERT(pauiClient3DFenceUFOAddress);
+ if (!b3DFenceOnSyncCheckpointsOnly)
+ {
+ PVR_ASSERT(paui32Client3DFenceValue);
+ }
+ }
+ if (ui32Client3DUpdateCount)
+ {
+ PVR_ASSERT(pauiClient3DUpdateUFOAddress);
+ if (ui32Client3DUpdateValueCount>0)
+ PVR_ASSERT(paui32Client3DUpdateValue);
+ }
+ if (ui32ClientPRUpdateCount)
+ {
+ PVR_ASSERT(pauiClientPRUpdateUFOAddress);
+ if (ui32ClientPRUpdateValueCount>0)
+ PVR_ASSERT(paui32ClientPRUpdateValue);
+ }
+ }
+#endif /* PVRSRV_SYNC_SEPARATE_TIMELINES */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAFenceCount=%d, pauiClientTAFenceUFOAddress=<%p> Line ", __func__, ui32ClientTAFenceCount, (void*)paui32ClientTAFenceValue));
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ui32ClientTAUpdateCount=%d, pauiClientTAUpdateUFOAddress=<%p> Line ", __func__, ui32ClientTAUpdateCount, (void*)pauiClientTAUpdateUFOAddress));
+#if (ENABLE_TA3D_UFO_DUMP == 1)
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpClientTAFenceUFOAddress = pauiClientTAFenceUFOAddress;
+ IMG_UINT32 *pui32TmpClientTAFenceValue = paui32ClientTAFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpClientTAUpdateUFOAddress = pauiClientTAUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpClientTAUpdateValue = paui32ClientTAUpdateValue;
+ PRGXFWIF_UFO_ADDR *psTmpClient3DFenceUFOAddress = pauiClient3DFenceUFOAddress;
+ IMG_UINT32 *pui32TmpClient3DFenceValue = paui32Client3DFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpClient3DUpdateUFOAddress = pauiClient3DUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpClient3DUpdateValue = paui32Client3DUpdateValue;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: ~~~ After appending sync checkpoints ", __FUNCTION__));
+
+ /* Dump Fence syncs, Update syncs and PR Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA fence syncs:", __FUNCTION__, ui32ClientTAFenceCount));
+ for (ii=0; ii<ui32ClientTAFenceCount; ii++)
+ {
+ if (psTmpClientTAFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAFenceCount, (void*)psTmpClientTAFenceUFOAddress, psTmpClientTAFenceUFOAddress->ui32Addr, *pui32TmpClientTAFenceValue, *pui32TmpClientTAFenceValue));
+ pui32TmpClientTAFenceValue++;
+ }
+ psTmpClientTAFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TA update syncs:", __FUNCTION__, ui32ClientTAUpdateCount));
+ for (ii=0; ii<ui32ClientTAUpdateCount; ii++)
+ {
+ if (psTmpClientTAUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", __FUNCTION__, ii+1, ui32ClientTAUpdateCount, (void*)psTmpClientTAUpdateUFOAddress, psTmpClientTAUpdateUFOAddress->ui32Addr, *pui32TmpClientTAUpdateValue, *pui32TmpClientTAUpdateValue));
+ pui32TmpClientTAUpdateValue++;
+ }
+ psTmpClientTAUpdateUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D fence syncs:", __FUNCTION__, ui32Client3DFenceCount));
+ for (ii=0; ii<ui32Client3DFenceCount; ii++)
+ {
+ if (psTmpClient3DFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __FUNCTION__, ii+1, ui32Client3DFenceCount, (void*)psTmpClient3DFenceUFOAddress, psTmpClient3DFenceUFOAddress->ui32Addr, *pui32TmpClient3DFenceValue, *pui32TmpClient3DFenceValue));
+ pui32TmpClient3DFenceValue++;
+ }
+ psTmpClient3DFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d 3D update syncs:", __FUNCTION__, ui32Client3DUpdateCount));
+ for (ii=0; ii<ui32Client3DUpdateCount; ii++)
+ {
+ if (psTmpClient3DUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d(0x%x)", __FUNCTION__, ii+1, ui32Client3DUpdateCount, (void*)psTmpClient3DUpdateUFOAddress, psTmpClient3DUpdateUFOAddress->ui32Addr, *pui32TmpClient3DUpdateValue, *pui32TmpClient3DUpdateValue));
+ pui32TmpClient3DUpdateValue++;
+ }
+ psTmpClient3DUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ /* Init and acquire to TA command if required */
+ if (bKickTA)
+ {
+ RGX_SERVER_RC_TA_DATA *psTAData = &psRenderContext->sTAData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.sWorkloadMatchingDataTA,
+ ui32RenderTargetSize,
+ ui32NumberOfDrawCalls,
+ ui32NumberOfIndices,
+ ui32NumberOfMRTs,
+ ui64DeadlineInus,
+ &sWorkloadKickDataTA);
+#endif
+
+ /* Init the TA command helper */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientTAFenceCount=%d, ui32ClientTAUpdateCount=%d", __FUNCTION__, ui32ClientTAFenceCount, ui32ClientTAUpdateCount));
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(psTAData->psServerCommonContext),
+ ui32ClientTAFenceCount,
+ pauiClientTAFenceUFOAddress,
+ paui32ClientTAFenceValue,
+ ui32ClientTAUpdateCount,
+ pauiClientTAUpdateUFOAddress,
+ paui32ClientTAUpdateValue,
+ ui32ServerTASyncPrims,
+ paui32ServerTASyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ pasServerTASyncs,
+ ui32TACmdSize,
+ pui8TADMCmd,
+ & pPreAddr,
+ (bKick3D ? NULL : & pPostAddr),
+ (bKick3D ? NULL : & pRMWUFOAddr),
+ RGXFWIF_CCB_CMD_TYPE_TA,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickDataTA,
+#else
+ NULL,
+#endif
+ "TA",
+ bCCBStateOpen,
+ pasTACmdHelperData,
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_tacmdinit;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* The following is used to determine the offset of the command header containing
+ the workload estimation data so that can be accessed when the KCCB is read */
+ ui32TACmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(pasTACmdHelperData);
+#endif
+
+ eError = RGXCmdHelperAcquireCmdCCB(CCB_CMD_HELPER_NUM_TA_COMMANDS, pasTACmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_taacquirecmd;
+ }
+ else
+ {
+ ui32TACmdCount++;
+ }
+ }
+
+ /* Only kick the 3D if required */
+ if (bKickPR)
+ {
+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+ /*
+ The command helper doesn't know about the PR fence so create
+ the command with all the fences against it and later create
+ the PR command itself which _must_ come after the PR fence.
+ */
+ sPRUFO.puiAddrUFO = uiPRFenceUFOAddress;
+ sPRUFO.ui32Value = ui32PRFenceValue;
+
+ /* Init the PR fence command helper */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32Client3DFenceCount=%d", __FUNCTION__, ui32Client3DFenceCount));
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ ui32Client3DFenceCount,
+ pauiClient3DFenceUFOAddress,
+ paui32Client3DFenceValue,
+ 0,
+ NULL,
+ NULL,
+ (bKick3D ? ui32Server3DSyncPrims : 0),
+ paui32Server3DSyncFlags,
+ PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK,
+ pasServer3DSyncs,
+ sizeof(sPRUFO),
+ (IMG_UINT8*) &sPRUFO,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_FENCE_PR,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "3D-PR-Fence",
+ bCCBStateOpen,
+ &pas3DCmdHelperData[ui323DCmdCount++],
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_prfencecmdinit;
+ }
+
+ /* Init the 3D PR command helper */
+ /*
+ Updates for Android (fence sync and Timeline sync prim) are provided in the PR-update
+ if no 3D is present. This is so the timeline update cannot happen out of order with any
+ other 3D already in flight for the same timeline (PR-updates are done in the 3D cCCB).
+ This out of order timeline sync prim update could happen if we attach it to the TA update.
+ */
+ if (ui32ClientPRUpdateCount)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Line %d, ui32ClientPRUpdateCount=%d, pauiClientPRUpdateUFOAddress=0x%x, ui32ClientPRUpdateValueCount=%d, paui32ClientPRUpdateValue=0x%x", __FUNCTION__, __LINE__,
+ ui32ClientPRUpdateCount, pauiClientPRUpdateUFOAddress->ui32Addr, ui32ClientPRUpdateValueCount, (ui32ClientPRUpdateValueCount == 0) ? PVRSRV_SYNC_CHECKPOINT_SIGNALLED : *paui32ClientPRUpdateValue));
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling RGXCmdHelperInitCmdCCB(), ui32ClientPRUpdateCount=%d", __FUNCTION__, ui32ClientPRUpdateCount));
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ 0,
+ NULL,
+ NULL,
+ ui32ClientPRUpdateCount,
+ pauiClientPRUpdateUFOAddress,
+ paui32ClientPRUpdateValue,
+ 0,
+ NULL,
+ SYNC_FLAG_MASK_ALL,
+ NULL,
+ ui323DPRCmdSize,
+ pui83DPRDMCmd,
+ NULL,
+ NULL,
+ NULL,
+ RGXFWIF_CCB_CMD_TYPE_3D_PR,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ "3D-PR",
+ bCCBStateOpen,
+ &pas3DCmdHelperData[ui323DCmdCount++],
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_prcmdinit;
+ }
+ }
+
+ if (bKick3D || bAbort)
+ {
+ RGX_SERVER_RC_3D_DATA *ps3DData = &psRenderContext->s3DData;
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Prepare workload estimation */
+ WorkEstPrepare(psRenderContext->psDeviceNode->pvDevice,
+ &psRenderContext->sWorkEstData,
+ &psRenderContext->sWorkEstData.sWorkloadMatchingData3D,
+ ui32RenderTargetSize,
+ ui32NumberOfDrawCalls,
+ ui32NumberOfIndices,
+ ui32NumberOfMRTs,
+ ui64DeadlineInus,
+ &sWorkloadKickData3D);
+#endif
+
+ /* Init the 3D command helper */
+ eError = RGXCmdHelperInitCmdCCB(FWCommonContextGetClientCCB(ps3DData->psServerCommonContext),
+ 0,
+ NULL,
+ NULL,
+ ui32Client3DUpdateCount,
+ pauiClient3DUpdateUFOAddress,
+ paui32Client3DUpdateValue,
+ ui32Server3DSyncPrims,
+ paui32Server3DSyncFlags,
+ PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE,
+ pasServer3DSyncs,
+ ui323DCmdSize,
+ pui83DDMCmd,
+ (bKickTA ? NULL : & pPreAddr),
+ & pPostAddr,
+ & pRMWUFOAddr,
+ RGXFWIF_CCB_CMD_TYPE_3D,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ &sWorkloadKickData3D,
+#else
+ NULL,
+#endif
+ "3D",
+ bCCBStateOpen,
+ &pas3DCmdHelperData[ui323DCmdCount++],
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_3dcmdinit;
+ }
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* The following are used to determine the offset of the command header containing the workload estimation
+ data so that can be accessed when the KCCB is read */
+ ui323DCmdHeaderOffset = RGXCmdHelperGetDMCommandHeaderOffset(&pas3DCmdHelperData[ui323DCmdCount - 1]);
+ ui323DFullRenderCommandOffset = RGXCmdHelperGetCommandOffset(pas3DCmdHelperData, ui323DCmdCount - 1);
+#endif
+ }
+
+ /* Protect against array overflow in RGXCmdHelperAcquireCmdCCB() */
+ if (ui323DCmdCount > CCB_CMD_HELPER_NUM_3D_COMMANDS)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_3dcmdinit;
+ }
+
+ if (ui323DCmdCount)
+ {
+ PVR_ASSERT(bKickPR || bKick3D);
+
+ /* Acquire space for all the 3D command(s) */
+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount, pas3DCmdHelperData);
+ if (eError != PVRSRV_OK)
+ {
+ /* If RGXCmdHelperAcquireCmdCCB fails we skip the scheduling
+ * of a new TA command with the same Write offset in Kernel CCB.
+ */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_3dacquirecmd;
+ }
+ }
+
+ /*
+ We should acquire the space in the kernel CCB here as after this point
+ we release the commands which will take operations on server syncs
+ which can't be undone
+ */
+
+ /*
+ Everything is ready to go now, release the commands
+ */
+ if (ui32TACmdCount)
+ {
+ ui32TACmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui32TACmdCount,
+ pasTACmdHelperData,
+ "TA",
+ FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui32TACmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* This checks if the command would wrap around at the end of the CCB and therefore would start at an
+ offset of 0 rather than the current command offset */
+ if (ui32TACmdOffset < ui32TACmdOffsetWrapCheck)
+ {
+ ui32TACommandOffset = ui32TACmdOffset;
+ }
+ else
+ {
+ ui32TACommandOffset = 0;
+ }
+#endif
+ }
+
+ if (ui323DCmdCount)
+ {
+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+ pas3DCmdHelperData,
+ "3D",
+ FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr);
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ ui323DCmdOffsetWrapCheck = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ if (ui323DCmdOffset < ui323DCmdOffsetWrapCheck)
+ {
+ ui323DCommandOffset = ui323DCmdOffset;
+ }
+ else
+ {
+ ui323DCommandOffset = 0;
+ }
+#endif
+ }
+
+ if (ui32TACmdCount)
+ {
+ RGXFWIF_KCCB_CMD sTAKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel TA CCB command. */
+ sTAKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sTAKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->sTAData.psServerCommonContext);
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->sTAData.psServerCommonContext));
+
+ /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui32TACommandOffset + ui32TACmdHeaderOffset;
+#else
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+ if (bCommitRefCountsTA)
+ {
+ AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &sTAKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+ &sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+ RGXFWIF_DM_TA,
+ bKickTA,
+ psRTDataCleanup,
+ psZBuffer,
+ psSBuffer,
+ psMSAAScratchBuffer);
+ *pbCommittedRefCountsTA = IMG_TRUE;
+ }
+ else
+ {
+ sTAKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ }
+
+ HTBLOGK(HTB_SF_MAIN_KICK_TA,
+ sTAKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui32TACmdOffset
+ );
+
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ RGX_HWPERF_HOST_ENQ(psRenderContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_TA,
+ uiCheckTAFenceUID,
+ uiUpdateTAFenceUID,
+ ui64DeadlineInus,
+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA));
+#else
+ RGX_HWPERF_HOST_ENQ(psRenderContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_TA,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ ui64DeadlineInus,
+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickDataTA));
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_TA,
+ &sTAKCCBCmd,
+ sizeof(sTAKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psRenderContext->psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TA3D);
+#endif
+ }
+
+ if (ui323DCmdCount)
+ {
+ RGXFWIF_KCCB_CMD s3DKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 3D CCB command. */
+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psRenderContext->s3DData.psServerCommonContext);
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psRenderContext->s3DData.psServerCommonContext));
+
+ /* Add the Workload data into the KCCB kick */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Store the offset to the CCCB command header so that it can be referenced when the KCCB command reaches the FW */
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = ui323DCommandOffset + ui323DCmdHeaderOffset + ui323DFullRenderCommandOffset;
+#else
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+#endif
+
+ if (bCommitRefCounts3D)
+ {
+ AttachKickResourcesCleanupCtls((PRGXFWIF_CLEANUP_CTL *) &s3DKCCBCmd.uCmdData.sCmdKickData.apsCleanupCtl,
+ &s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl,
+ RGXFWIF_DM_3D,
+ bKick3D,
+ psRTDataCleanup,
+ psZBuffer,
+ psSBuffer,
+ psMSAAScratchBuffer);
+ *pbCommittedRefCounts3D = IMG_TRUE;
+ }
+ else
+ {
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ }
+
+
+ HTBLOGK(HTB_SF_MAIN_KICK_3D,
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui323DCmdOffset);
+
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ RGX_HWPERF_HOST_ENQ(psRenderContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_3D,
+ uiCheck3DFenceUID,
+ uiUpdate3DFenceUID,
+ ui64DeadlineInus,
+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D));
+#else
+ RGX_HWPERF_HOST_ENQ(psRenderContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_3D,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ ui64DeadlineInus,
+ WORKEST_CYCLES_PREDICTION_GET(sWorkloadKickData3D));
+#endif
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psRenderContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_3D,
+ &s3DKCCBCmd,
+ sizeof(s3DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Failed, eError=%d, Line", __FUNCTION__, eError));
+ goto fail_3dacquirecmd;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (psUpdateTASyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint [TA] <%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, \
+ (void*)psUpdateTASyncCheckpoint, SyncCheckpointGetId(psUpdateTASyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateTASyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateTASyncCheckpoint);
+ }
+ if (psTAFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim [TA] <%p> to %d", __FUNCTION__, (void*)psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psTAFenceTimelineUpdateSync, ui32TAFenceTimelineUpdateValue);
+ }
+
+ if (psUpdate3DSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint [3D] <%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, \
+ (void*)psUpdate3DSyncCheckpoint, SyncCheckpointGetId(psUpdate3DSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdate3DSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdate3DSyncCheckpoint);
+ }
+ if (ps3DFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim [3D] <%p> to %d", __FUNCTION__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+
+#else
+ if (psUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Signalling NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __FUNCTION__, (void*)psUpdateSyncCheckpoint, SyncCheckpointGetId(psUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(psUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Updating NOHW sync prim<%p> to %d", __FUNCTION__, (void*)psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+
+#endif /* defined(PVRSRV_SYNC_SEPARATE_TIMELINES) */
+#endif /* defined(NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling pvr_buffer_sync_kick_succeeded(psBufferSyncData=<%p>)...", __FUNCTION__, (void*)psBufferSyncData));
+ pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if(piUpdateTAFence)
+ {
+ *piUpdateTAFence = iUpdateTAFence;
+ }
+ if(piUpdate3DFence)
+ {
+ *piUpdate3DFence = iUpdate3DFence;
+ }
+#else
+ *piUpdateFence = iUpdateFence;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence.
+ * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+ */
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (bKickTA)
+ {
+ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+ }
+ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+
+ if (pvTAUpdateFenceFinaliseData && (iUpdateTAFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateTAFence, pvTAUpdateFenceFinaliseData);
+ }
+ if (pv3DUpdateFenceFinaliseData && (iUpdate3DFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdate3DFence, pv3DUpdateFenceFinaliseData);
+ }
+#else
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, apsFenceSyncCheckpoints);
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+#endif
+
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (apsFenceTASyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+ }
+ if (apsFence3DSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+ }
+
+ if (sTASyncData.paui32ClientUpdateValue)
+ {
+ OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+ }
+ if (s3DSyncData.paui32ClientUpdateValue)
+ {
+ OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+ }
+
+#else
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ }
+#endif
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+ fail_3dacquirecmd:
+ fail_3dcmdinit:
+ fail_prcmdinit:
+ fail_prfencecmdinit:
+ fail_taacquirecmd:
+ fail_tacmdinit:
+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAFence);
+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrListTAUpdate);
+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DFence);
+ SyncAddrListRollbackCheckpoints(psRenderContext->psDeviceNode, &psRenderContext->sSyncAddrList3DUpdate);
+ /* Where a TA-only kick (ie no 3D) is submitted, the PR update will make use of the unused 3DUpdate list.
+ * If this has happened, performing a rollback on pauiClientPRUpdateUFOAddress will simply repeat what
+ * has already been done for the sSyncAddrList3DUpdate above and result in a double decrement of the
+ * sync checkpoint's hEnqueuedCCBCount, so we need to check before rolling back the PRUpdate.
+ */
+ if (pauiClientPRUpdateUFOAddress && (pauiClientPRUpdateUFOAddress != psRenderContext->sSyncAddrList3DUpdate.pasFWAddrs))
+ {
+ SyncCheckpointRollbackFromUFO(psRenderContext->psDeviceNode, pauiClientPRUpdateUFOAddress->ui32Addr);
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ fail_alloc_update_values_mem_3D:
+ /* FIXME: sTASyncData.paui32ClientPRUpdateValue points to the same buffer, needs a review */
+ fail_alloc_update_values_mem_TA:
+ if (iUpdateTAFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateTAFence, pvTAUpdateFenceFinaliseData);
+ }
+ if (iUpdate3DFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdate3DFence, pv3DUpdateFenceFinaliseData);
+ }
+#else
+ fail_alloc_update_values_mem:
+ if (iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+#endif
+ fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence.
+ * NOTE: 3D fence is always submitted, either via 3D or TA(PR).
+ */
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (bKickTA)
+ {
+ SyncAddrListDeRefCheckpoints(ui32FenceTASyncCheckpointCount, apsFenceTASyncCheckpoints);
+ }
+ SyncAddrListDeRefCheckpoints(ui32Fence3DSyncCheckpointCount, apsFence3DSyncCheckpoints);
+#else
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount, apsFenceSyncCheckpoints);
+#endif
+ fail_resolve_input_fence:
+ fail_fdsync:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_failed(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ err_pr_fence_address:
+ err_populate_sync_addr_list_3d_update:
+ err_populate_sync_addr_list_3d_fence:
+ err_populate_sync_addr_list_ta_update:
+ err_populate_sync_addr_list_ta_fence:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+#if defined(PVRSRV_SYNC_SEPARATE_TIMELINES)
+ if (apsFenceTASyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceTASyncCheckpoints);
+ }
+ if (apsFence3DSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFence3DSyncCheckpoints);
+ }
+ if (sTASyncData.paui32ClientUpdateValue)
+ {
+ OSFreeMem(sTASyncData.paui32ClientUpdateValue);
+ }
+ if (s3DSyncData.paui32ClientUpdateValue)
+ {
+ OSFreeMem(s3DSyncData.paui32ClientUpdateValue);
+ }
+#else
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ }
+#endif
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ PVR_ASSERT(eError != PVRSRV_OK);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psRenderContext->hLock);
+#endif
+
+ if (psRenderContext->sTAData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRenderContext->sTAData.psServerCommonContext,
+ psConnection,
+ psRenderContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_TA);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the TA part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_tacontext;
+ }
+ psRenderContext->sTAData.ui32Priority = ui32Priority;
+ }
+
+ if (psRenderContext->s3DData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psRenderContext->s3DData.psServerCommonContext,
+ psConnection,
+ psRenderContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_3D);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the rendercontext (%s)", __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_3dcontext;
+ }
+ psRenderContext->s3DData.ui32Priority = ui32Priority;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+ fail_3dcontext:
+ fail_tacontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psRenderContext->hLock);
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * PVRSRVRGXGetLastRenderContextResetReasonKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef)
+{
+ RGX_SERVER_RC_TA_DATA *psRenderCtxTAData;
+ RGX_SERVER_RC_3D_DATA *psRenderCtx3DData;
+ RGX_SERVER_COMMON_CONTEXT *psCurrentServerTACommonCtx, *psCurrentServer3DCommonCtx;
+ RGXFWIF_CONTEXT_RESET_REASON eLastTAResetReason, eLast3DResetReason;
+ IMG_UINT32 ui32LastTAResetJobRef, ui32Last3DResetJobRef;
+
+ PVR_ASSERT(psRenderContext != NULL);
+ PVR_ASSERT(peLastResetReason != NULL);
+ PVR_ASSERT(pui32LastResetJobRef != NULL);
+
+ psRenderCtxTAData = &(psRenderContext->sTAData);
+ psCurrentServerTACommonCtx = psRenderCtxTAData->psServerCommonContext;
+ psRenderCtx3DData = &(psRenderContext->s3DData);
+ psCurrentServer3DCommonCtx = psRenderCtx3DData->psServerCommonContext;
+
+ /* Get the last reset reasons from both the TA and 3D so they are reset... */
+ eLastTAResetReason = FWCommonContextGetLastResetReason(psCurrentServerTACommonCtx, &ui32LastTAResetJobRef);
+ eLast3DResetReason = FWCommonContextGetLastResetReason(psCurrentServer3DCommonCtx, &ui32Last3DResetJobRef);
+
+ /* Combine the reset reason from TA and 3D into one... */
+ *peLastResetReason = (IMG_UINT32) eLast3DResetReason;
+ *pui32LastResetJobRef = ui32Last3DResetJobRef;
+ if (eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_NONE ||
+ ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP ||
+ eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) &&
+ (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_LOCKUP ||
+ eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_GUILTY_OVERRUNING)) ||
+ ((eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_LOCKUP ||
+ eLast3DResetReason == RGXFWIF_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING) &&
+ (eLastTAResetReason == RGXFWIF_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH)))
+ {
+ *peLastResetReason = eLastTAResetReason;
+ *pui32LastResetJobRef = ui32LastTAResetJobRef;
+ }
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVRGXGetPartialRenderCountKM
+ */
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+ IMG_UINT32 *pui32NumPartialRenders)
+{
+ RGXFWIF_HWRTDATA *psHWRTData;
+ PVRSRV_ERROR eError;
+
+ eError = DevmemAcquireCpuVirtAddr(psHWRTDataMemDesc, (void **)&psHWRTData);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVRGXGetPartialRenderCountKM: Failed to map Firmware Render Target Data (%u)", eError));
+ return eError;
+ }
+
+ *pui32NumPartialRenders = psHWRTData->ui32NumPartialRenders;
+
+ DevmemReleaseCpuVirtAddr(psHWRTDataMemDesc);
+
+ return PVRSRV_OK;
+}
+
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ DumpStalledFWCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hRenderCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sRenderCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_RENDER_CONTEXT *psCurrentServerRenderCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_RENDER_CONTEXT, sListNode);
+ if (NULL != psCurrentServerRenderCtx->sTAData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->sTAData.psServerCommonContext, RGX_KICK_TYPE_DM_TA) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TA;
+ }
+ }
+
+ if (NULL != psCurrentServerRenderCtx->s3DData.psServerCommonContext)
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerRenderCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_3D) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_3D;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hRenderCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/*
+ * RGXRenderContextStalledKM
+ */
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext)
+{
+ RGXCheckForStalledClientContexts((PVRSRV_RGXDEV_INFO *) psRenderContext->psDeviceNode->pvDevice, IMG_TRUE);
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (rgxta3d.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxta3d.h b/drivers/gpu/drm/img-rogue/1.10/rgxta3d.h
new file mode 100644
index 00000000000000..cb20a3cd519c5c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxta3d.h
@@ -0,0 +1,452 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX TA and 3D Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX TA and 3D Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTA3D_H__)
+#define __RGXTA3D_H__
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgx_fwif_shared.h"
+#include "rgx_fwif_resetframework.h"
+#include "sync_server.h"
+#include "connection_server.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+typedef struct _RGX_SERVER_RENDER_CONTEXT_ RGX_SERVER_RENDER_CONTEXT;
+typedef struct _RGX_FREELIST_ RGX_FREELIST;
+typedef struct _RGX_PMR_NODE_ RGX_PMR_NODE;
+
+typedef struct {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWHWRTDataMemDesc;
+ DEVMEM_MEMDESC *psRTACtlMemDesc;
+ DEVMEM_MEMDESC *psRTArrayMemDesc;
+ DEVMEM_MEMDESC *psRendersAccArrayMemDesc;
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS];
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+} RGX_RTDATA_CLEANUP_DATA;
+
+struct _RGX_FREELIST_ {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ /* Free list PMR */
+ PMR *psFreeListPMR;
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset;
+
+ /* Freelist config */
+ IMG_UINT32 ui32MaxFLPages;
+ IMG_UINT32 ui32InitFLPages;
+ IMG_UINT32 ui32CurrentFLPages;
+ IMG_UINT32 ui32GrowFLPages;
+ IMG_UINT32 ui32ReadyFLPages;
+ IMG_UINT32 ui32GrowThreshold; /* Percentage of FL memory used that should trigger a new grow request */
+ IMG_UINT32 ui32FreelistID;
+ IMG_UINT32 ui32FreelistGlobalID; /* related global freelist for this freelist */
+ IMG_UINT64 ui64FreelistChecksum; /* checksum over freelist content */
+ IMG_BOOL bCheckFreelist; /* freelist check enabled */
+ IMG_UINT32 ui32RefCount; /* freelist reference counting */
+
+ IMG_UINT32 ui32NumGrowReqByApp; /* Total number of grow requests by Application*/
+ IMG_UINT32 ui32NumGrowReqByFW; /* Total Number of grow requests by Firmware */
+ IMG_UINT32 ui32NumHighPages; /* High Mark of pages in the freelist */
+
+ IMG_PID ownerPid; /* Pid of the owner of the list */
+
+ /* Memory Blocks */
+ DLLIST_NODE sMemoryBlockHead;
+ DLLIST_NODE sMemoryBlockInitHead;
+ DLLIST_NODE sNode;
+
+ /* FW data structures */
+ DEVMEM_MEMDESC *psFWFreelistMemDesc;
+ RGXFWIF_DEV_VIRTADDR sFreeListFWDevVAddr;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+} ;
+
+struct _RGX_PMR_NODE_ {
+ RGX_FREELIST *psFreeList;
+ PMR *psPMR;
+ PMR_PAGELIST *psPageList;
+ DLLIST_NODE sMemoryBlock;
+ IMG_UINT32 ui32NumPages;
+ IMG_BOOL bFirstPageMissing;
+#if defined(PVR_RI_DEBUG)
+ RI_HANDLE hRIHandle;
+#endif
+} ;
+
+typedef struct {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psRenderTargetMemDesc;
+} RGX_RT_CLEANUP_DATA;
+
+typedef struct {
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+ DEVMEM_MEMDESC *psFWZSBufferMemDesc;
+ RGXFWIF_DEV_VIRTADDR sZSBufferFWDevVAddr;
+
+ DEVMEMINT_RESERVATION *psReservation;
+ PMR *psPMR;
+ DEVMEMINT_MAPPING *psMapping;
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags;
+ IMG_UINT32 ui32ZSBufferID;
+ IMG_UINT32 ui32RefCount;
+ IMG_BOOL bOnDemand;
+
+ IMG_BOOL ui32NumReqByApp; /* Number of Backing Requests from Application */
+ IMG_BOOL ui32NumReqByFW; /* Number of Backing Requests from Firmware */
+
+ IMG_PID owner;
+
+ DLLIST_NODE sNode;
+
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+}RGX_ZSBUFFER_DATA;
+
+typedef struct {
+ RGX_ZSBUFFER_DATA *psZSBuffer;
+} RGX_POPULATION;
+
+/* Dump the physical pages of a freelist */
+IMG_BOOL RGXDumpFreeListPageList(RGX_FREELIST *psFreeList);
+
+
+/* Create HWRTDataSet */
+PVRSRV_ERROR RGXCreateHWRTData(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 psRenderTarget,
+ IMG_DEV_VIRTADDR psPMMListDevVAddr,
+ RGX_FREELIST *apsFreeLists[RGXFW_MAX_FREELISTS],
+ RGX_RTDATA_CLEANUP_DATA **ppsCleanupData,
+ DEVMEM_MEMDESC **ppsRTACtlMemDesc,
+ IMG_UINT32 ui32PPPScreen,
+ IMG_UINT32 ui32PPPGridOffset,
+ IMG_UINT64 ui64PPPMultiSampleCtl,
+ IMG_UINT32 ui32TPCStride,
+ IMG_DEV_VIRTADDR sTailPtrsDevVAddr,
+ IMG_UINT32 ui32TPCSize,
+ IMG_UINT32 ui32TEScreen,
+ IMG_UINT32 ui32TEAA,
+ IMG_UINT32 ui32TEMTILE1,
+ IMG_UINT32 ui32TEMTILE2,
+ IMG_UINT32 ui32MTileStride,
+ IMG_UINT32 ui32ISPMergeLowerX,
+ IMG_UINT32 ui32ISPMergeLowerY,
+ IMG_UINT32 ui32ISPMergeUpperX,
+ IMG_UINT32 ui32ISPMergeUpperY,
+ IMG_UINT32 ui32ISPMergeScaleX,
+ IMG_UINT32 ui32ISPMergeScaleY,
+ IMG_UINT16 ui16MaxRTs,
+ DEVMEM_MEMDESC **psMemDesc,
+ IMG_UINT32 *puiHWRTData);
+
+/* Destroy HWRTData */
+PVRSRV_ERROR RGXDestroyHWRTData(RGX_RTDATA_CLEANUP_DATA *psCleanupData);
+
+/* Create Render Target */
+PVRSRV_ERROR RGXCreateRenderTarget(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_DEV_VIRTADDR psVHeapTableDevVAddr,
+ RGX_RT_CLEANUP_DATA **ppsCleanupData,
+ IMG_UINT32 *sRenderTargetFWDevVAddr);
+
+/* Destroy render target */
+PVRSRV_ERROR RGXDestroyRenderTarget(RGX_RT_CLEANUP_DATA *psCleanupData);
+
+
+/*
+ RGXCreateZSBuffer
+*/
+PVRSRV_ERROR RGXCreateZSBufferKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEMINT_RESERVATION *psReservation,
+ PMR *psPMR,
+ PVRSRV_MEMALLOCFLAGS_T uiMapFlags,
+ RGX_ZSBUFFER_DATA **ppsZSBuffer,
+ IMG_UINT32 *sRenderTargetFWDevVAddr);
+
+/*
+ RGXDestroyZSBuffer
+*/
+PVRSRV_ERROR RGXDestroyZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+
+/*
+ * RGXBackingZSBuffer()
+ *
+ * Backs ZS-Buffer with physical pages
+ */
+PVRSRV_ERROR
+RGXBackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXPopulateZSBufferKM()
+ *
+ * Backs ZS-Buffer with physical pages (called by Bridge calls)
+ */
+PVRSRV_ERROR RGXPopulateZSBufferKM(RGX_ZSBUFFER_DATA *psZSBuffer,
+ RGX_POPULATION **ppsPopulation);
+
+/*
+ * RGXUnbackingZSBuffer()
+ *
+ * Frees ZS-Buffer's physical pages
+ */
+PVRSRV_ERROR RGXUnbackingZSBuffer(RGX_ZSBUFFER_DATA *psZSBuffer);
+
+/*
+ * RGXUnpopulateZSBufferKM()
+ *
+ * Frees ZS-Buffer's physical pages (called by Bridge calls )
+ */
+PVRSRV_ERROR RGXUnpopulateZSBufferKM(RGX_POPULATION *psPopulation);
+
+/*
+ RGXProcessRequestZSBufferBacking
+*/
+void RGXProcessRequestZSBufferBacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID);
+
+/*
+ RGXProcessRequestZSBufferUnbacking
+*/
+void RGXProcessRequestZSBufferUnbacking(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32ZSBufferID);
+
+/*
+ RGXGrowFreeList
+*/
+PVRSRV_ERROR RGXGrowFreeList(RGX_FREELIST *psFreeList,
+ IMG_UINT32 ui32NumPages,
+ PDLLIST_NODE pListHeader,
+ IMG_BOOL bForCreate);
+
+/* Create free list */
+PVRSRV_ERROR RGXCreateFreeList(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32MaxFLPages,
+ IMG_UINT32 ui32InitFLPages,
+ IMG_UINT32 ui32GrowFLPages,
+ IMG_UINT32 ui32GrowParamThreshold,
+ RGX_FREELIST *psGlobalFreeList,
+ IMG_BOOL bCheckFreelist,
+ IMG_DEV_VIRTADDR sFreeListDevVAddr,
+ PMR *psFreeListPMR,
+ IMG_DEVMEM_OFFSET_T uiFreeListPMROffset,
+ RGX_FREELIST **ppsFreeList);
+
+/* Destroy free list */
+PVRSRV_ERROR RGXDestroyFreeList(RGX_FREELIST *psFreeList);
+
+/*
+ RGXProcessRequestGrow
+*/
+void RGXProcessRequestGrow(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistID);
+
+
+/* Reconstruct free list after Hardware Recovery */
+void RGXProcessRequestFreelistsReconstruction(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32FreelistsCount,
+ IMG_UINT32 *paui32Freelists);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateRenderContextKM
+
+ @Description
+ Server-side implementation of RGXCreateRenderContext
+
+ @Input pvDeviceNode - device node
+ @Input psTACCBMemDesc - TA CCB Memory descriptor
+ @Input psTACCBCtlMemDesc - TA CCB Ctrl Memory descriptor
+ @Input ps3DCCBMemDesc - 3D CCB Memory descriptor
+ @Input ps3DCCBCtlMemDesc - 3D CCB Ctrl Memory descriptor
+ @Input ui32Priority - context priority
+ @Input psVDMStackPointer - VDM call stack device virtual address
+ @Input ui32FrameworkRegisterSize - framework register size
+ @Input pbyFrameworkRegisters - ptr to framework register
+ @Input hMemCtxPrivData - memory context private data
+ @Output ppsCleanupData - clean up data
+ @Output ppsFWRenderContextMemDesc - firmware render context memory descriptor
+ @Output ppsFWContextStateMemDesc - firmware context state memory descriptor
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateRenderContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_DEV_VIRTADDR sVDMCallStackAddr,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_RENDER_CONTEXT **ppsRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyRenderContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyRenderContext
+
+ @Input psCleanupData - clean up data
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyRenderContextKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXKickTA3DKM
+
+ @Description
+ Server-side implementation of RGXKickTA3D
+
+ @Input psRTDataCleanup - RT data associated with the kick (or NULL)
+ @Input psZBuffer - Z-buffer associated with the kick (or NULL)
+ @Input psSBuffer - S-buffer associated with the kick (or NULL)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXKickTA3DKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientTAFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientTAFenceSyncPrimBlock,
+ IMG_UINT32 *paui32ClientTAFenceSyncOffset,
+ IMG_UINT32 *paui32ClientTAFenceValue,
+ IMG_UINT32 ui32ClientTAUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClientUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32ClientUpdateSyncOffset,
+ IMG_UINT32 *paui32ClientTAUpdateValue,
+ IMG_UINT32 ui32ServerTASyncPrims,
+ IMG_UINT32 *paui32ServerTASyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServerTASyncs,
+ IMG_UINT32 ui32Client3DFenceCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DFenceSyncPrimBlock,
+ IMG_UINT32 *pauiClient3DFenceSyncOffset,
+ IMG_UINT32 *paui32Client3DFenceValue,
+ IMG_UINT32 ui32Client3DUpdateCount,
+ SYNC_PRIMITIVE_BLOCK **apsClient3DUpdateSyncPrimBlock,
+ IMG_UINT32 *paui32Client3DUpdateSyncOffset,
+ IMG_UINT32 *paui32Client3DUpdateValue,
+ IMG_UINT32 ui32Server3DSyncPrims,
+ IMG_UINT32 *paui32Server3DSyncFlags,
+ SERVER_SYNC_PRIMITIVE **pasServer3DSyncs,
+ SYNC_PRIMITIVE_BLOCK *psPRSyncPrimBlock,
+ IMG_UINT32 ui32PRSyncOffset,
+ IMG_UINT32 ui32PRFenceValue,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE *piUpdateFence,
+ IMG_CHAR szFenceName[PVRSRV_SYNC_NAME_LENGTH],
+ PVRSRV_FENCE iCheckFence3D,
+ PVRSRV_TIMELINE iUpdateTimeline3D,
+ PVRSRV_FENCE *piUpdateFence3D,
+ IMG_CHAR szFenceName3D[PVRSRV_SYNC_NAME_LENGTH],
+ IMG_UINT32 ui32TACmdSize,
+ IMG_PBYTE pui8TADMCmd,
+ IMG_UINT32 ui323DPRCmdSize,
+ IMG_PBYTE pui83DPRDMCmd,
+ IMG_UINT32 ui323DCmdSize,
+ IMG_PBYTE pui83DDMCmd,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_BOOL bLastTAInScene,
+ IMG_BOOL bKickTA,
+ IMG_BOOL bKickPR,
+ IMG_BOOL bKick3D,
+ IMG_BOOL bAbort,
+ IMG_UINT32 ui32PDumpFlags,
+ RGX_RTDATA_CLEANUP_DATA *psRTDataCleanup,
+ RGX_ZSBUFFER_DATA *psZBuffer,
+ RGX_ZSBUFFER_DATA *psSBuffer,
+ RGX_ZSBUFFER_DATA *psMSAAScratchBuffer,
+ IMG_BOOL bCommitRefCountsTA,
+ IMG_BOOL bCommitRefCounts3D,
+ IMG_BOOL *pbCommittedRefCountsTA,
+ IMG_BOOL *pbCommittedRefCounts3D,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs,
+ IMG_UINT32 ui32RenderTargetSize,
+ IMG_UINT32 ui32NumberOfDrawCalls,
+ IMG_UINT32 ui32NumberOfIndices,
+ IMG_UINT32 ui32NumberOfMRTs,
+ IMG_UINT64 ui64DeadlineInus,
+ IMG_DEV_VIRTADDR sRobustnessResetReason);
+
+
+PVRSRV_ERROR PVRSRVRGXSetRenderContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR PVRSRVRGXGetLastRenderContextResetReasonKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext,
+ IMG_UINT32 *peLastResetReason,
+ IMG_UINT32 *pui32LastResetJobRef);
+
+PVRSRV_ERROR PVRSRVRGXGetPartialRenderCountKM(DEVMEM_MEMDESC *psHWRTDataMemDesc,
+ IMG_UINT32 *pui32NumPartialRenders);
+
+/* Debug - check if render context is waiting on a fence */
+void CheckForStalledRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client contexts are stalled */
+IMG_UINT32 CheckForStalledClientRenderCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+PVRSRV_ERROR RGXRenderContextStalledKM(RGX_SERVER_RENDER_CONTEXT *psRenderContext);
+
+#endif /* __RGXTA3D_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.c b/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.c
new file mode 100644
index 00000000000000..781460796f19f3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.c
@@ -0,0 +1,1168 @@
+/*************************************************************************/ /*!
+@File rgxtdmtransfer.c
+@Title Device specific TDM transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtdmtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TDM_UFO_DUMP 0
+
+//#define TDM_CHECKPOINT_DEBUG 1
+
+#if defined(TDM_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT * psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_TDM_DATA;
+
+
+struct _RGX_SERVER_TQ_TDM_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_UINT32 ui32Flags;
+ RGX_SERVER_TQ_TDM_DATA sTDMData;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ SYNC_ADDR_LIST sSyncAddrListFence;
+ SYNC_ADDR_LIST sSyncAddrListUpdate;
+ ATOMIC_T hIntJobRef;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+static PVRSRV_ERROR _CreateTDMTransferContext(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ DEVMEM_MEMDESC * psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO * psInfo,
+ RGX_SERVER_TQ_TDM_DATA * psTDMData)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ psTDMData->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-tdm");
+ if (IS_ERR(psTDMData->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(psTDMData->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ eError = FWCommonContextAllocate(
+ psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_TDM,
+ RGXFWIF_DM_TDM,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_TQ2D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &psTDMData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ psTDMData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+ fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+ psTDMData->psBufferSyncContext = NULL;
+ fail_buffer_sync_context_create:
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _DestroyTDMTransferContext(
+ RGX_SERVER_TQ_TDM_DATA * psTDMData,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM * psCleanupSync)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(
+ psDeviceNode,
+ psTDMData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_TDM,
+ PDUMP_FLAGS_NONE);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ FWCommonContextFree(psTDMData->psServerCommonContext);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(psTDMData->psBufferSyncContext);
+ psTDMData->psBufferSyncContext = NULL;
+#endif
+
+ return PVRSRV_OK;
+}
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_TDM_CONTEXT ** ppsTransferContext)
+{
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext;
+
+ DEVMEM_MEMDESC * psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ /* Allocate the server side structure */
+ *ppsTransferContext = NULL;
+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+ if (psTransferContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psTransferContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_lockcreate;
+ }
+#endif
+
+ psTransferContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTransferContext->psCleanupSync,
+ "transfer context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psTransferContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+ pabyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+ eError = _CreateTDMTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->sTDMData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_tdmtransfercontext;
+ }
+
+ SyncAddrListInit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListInit(&psTransferContext->sSyncAddrListUpdate);
+
+ {
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ *ppsTransferContext = psTransferContext;
+ }
+
+ *ppsTransferContext = psTransferContext;
+
+ return PVRSRV_OK;
+
+ fail_tdmtransfercontext:
+ fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ fail_frameworkcreate:
+ SyncPrimFree(psTransferContext->psCleanupSync);
+ fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psTransferContext->hLock);
+ fail_lockcreate:
+#endif
+ OSFreeMem(psTransferContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ *ppsTransferContext = NULL;
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_remove_node(&(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+
+
+ eError = _DestroyTDMTransferContext(&psTransferContext->sTDMData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroyTDM;
+ }
+
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ SyncPrimFree(psTransferContext->psCleanupSync);
+
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListFence);
+ SyncAddrListDeinit(&psTransferContext->sSyncAddrListUpdate);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psTransferContext->hLock);
+#endif
+
+ OSFreeMem(psTransferContext);
+
+ return PVRSRV_OK;
+
+ fail_destroyTDM:
+
+ OSWRLockAcquireWrite(psDevInfo->hTDMCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTDMCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTDMCtxListLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** papsServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE * piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32FWCommandSize,
+ IMG_UINT8 * pui8FWCommand,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 * paui32SyncPMRFlags,
+ PMR ** ppsSyncPMRs)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+ PRGXFWIF_UFO_ADDR * pauiIntFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR * pauiIntUpdateUFOAddress = NULL;
+ IMG_UINT32 * paui32IntFenceValue = paui32ClientFenceValue;
+ IMG_UINT32 ui32IntClientFenceCount = ui32ClientFenceCount;
+ IMG_UINT32 * paui32IntUpdateValue = paui32ClientUpdateValue;
+ IMG_UINT32 ui32IntClientUpdateCount = ui32ClientUpdateCount;
+ PVRSRV_ERROR eError;
+ PVRSRV_ERROR eError2;
+ PVRSRV_FENCE iUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT32 ui32IntJobRef;
+
+ IMG_UINT32 ui32CmdOffset = 0;
+ IMG_BOOL bCCBStateOpen;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 uiUpdateFenceUID = 0;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT psUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui32IntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *psFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui32FenceTimelineUpdateValue = 0;
+ void *pvUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (iUpdateTimeline >= 0 && !piUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (iUpdateTimeline >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d) in non-supporting driver",
+ __func__, iUpdateTimeline));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (iCheckFence >= 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szUpdateFenceName[31] = '\0';
+
+ if (ui32SyncPMRCount != 0)
+ {
+ if (!ppsSyncPMRs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psTransferContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psTransferContext->hIntJobRef);
+
+ /* We can't allocate the required amount of stack space on all consumer architectures */
+ psCmdHelper = OSAllocMem(sizeof(RGX_CCB_CMD_HELPER_DATA));
+ if (psCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_allochelper;
+ }
+
+
+ /*
+ Init the command helper commands for all the prepares
+ */
+ {
+ RGX_CLIENT_CCB *psClientCCB;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+ IMG_CHAR *pszCommandName;
+ RGXFWIF_CCB_CMD_TYPE eType;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+ psServerCommonCtx = psTransferContext->sTDMData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-TDM";
+
+ if (ui32FWCommandSize == 0)
+ {
+ /* A NULL CMD for TDM is used to append updates to a non finished
+ * FW command. bCCBStateOpen is used in case capture range is
+ * entered on this command, to not drain CCB up to the Roff for this
+ * command, but the finished command prior to this.
+ */
+ bCCBStateOpen = IMG_TRUE;
+ eType = RGXFWIF_CCB_CMD_TYPE_NULL;
+ }
+ else
+ {
+ bCCBStateOpen = IMG_FALSE;
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_TDM;
+ }
+#if defined(SUPPORT_BUFFER_SYNC)
+ psBufferSyncContext = psTransferContext->sTDMData.psBufferSyncContext;
+#endif
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListFence,
+ ui32ClientFenceCount,
+ pauiClientFenceUFOSyncPrimBlock,
+ paui32ClientFenceSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ paui32IntFenceValue = paui32ClientFenceValue;
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+
+ eError = SyncAddrListPopulate(&psTransferContext->sSyncAddrListUpdate,
+ ui32ClientUpdateCount,
+ pauiClientUpdateUFOSyncPrimBlock,
+ paui32ClientUpdateSyncOffset);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list;
+ }
+ paui32IntUpdateValue = paui32ClientUpdateValue;
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+
+
+ if (ui32SyncPMRCount)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ int err;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ &ui32BufferFenceSyncCheckpointCount,
+ &apsBufferFenceSyncCheckpoints,
+ &psBufferUpdateSyncCheckpoint,
+ &psBufferSyncData);
+ if (err)
+ {
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%d)", __func__, eError));
+ goto fail_resolve_input_fence;
+ }
+
+ /* Append buffer sync fences */
+ if (ui32BufferFenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+ SyncAddrListAppendAndDeRefCheckpoints(&psTransferContext->sSyncAddrListFence,
+ ui32BufferFenceSyncCheckpointCount,
+ apsBufferFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+ }
+
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+ 1,
+ &psBufferUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+ }
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_populate_sync_addr_list;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_resolve_input_fence;
+ }
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<32; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint)); //psFenceSyncCheckpoints[ii]));
+ }
+ }
+#endif
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+ szUpdateFenceName,
+ iUpdateTimeline,
+ psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ &iUpdateFence,
+ &uiUpdateFenceUID,
+ &pvUpdateFenceFinaliseData,
+ &psUpdateSyncCheckpoint,
+ (void*)&psFenceTimelineUpdateSync,
+ &ui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_create_output_fence;
+ }
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (psFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ pui32IntAllocatedUpdateValues = OSAllocMem(sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!pui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(pui32IntAllocatedUpdateValues, 0xbb, sizeof(*pui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(pui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(*pui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ /* Now set the additional update value */
+ pui32TimelineUpdateWp = pui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ *pui32TimelineUpdateWp = ui32FenceTimelineUpdateValue;
+ ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the transfer context update list */
+ SyncAddrListAppendSyncPrim(&psTransferContext->sSyncAddrListUpdate,
+ psFenceTimelineUpdateSync);
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ paui32IntUpdateValue = pui32IntAllocatedUpdateValues;
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount)
+ {
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (&psTransferContext->sSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)&psTransferContext->sSyncAddrListFence));
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psTransferContext->sSyncAddrListFence.pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ if (psUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ SyncAddrListAppendCheckpoints(&psTransferContext->sSyncAddrListUpdate,
+ 1,
+ &psUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psTransferContext->sSyncAddrListUpdate.pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(TDM_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_TDM_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TDM fence/updates syncs...", __func__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM fence syncs (&psTransferContext->sSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->sSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TDM update syncs (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->sSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Create the command helper data for this command
+ */
+ eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ ui32ServerSyncCount,
+ paui32ServerSyncFlags,
+ SYNC_FLAG_MASK_ALL,
+ papsServerSyncs,
+ ui32FWCommandSize,
+ pui8FWCommand,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ eType,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ pszCommandName,
+ bCCBStateOpen,
+ psCmdHelper,
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+ }
+
+ /*
+ Acquire space for all the commands in one go
+ */
+
+ eError = RGXCmdHelperAcquireCmdCCB(1, psCmdHelper);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdacquire;
+ }
+
+
+ /*
+ We should acquire the kernel CCB(s) space here as the schedule could fail
+ and we would have to roll back all the syncs
+ */
+
+ /*
+ Only do the command helper release (which takes the server sync
+ operations if the acquire succeeded
+ */
+ ui32CmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(1,
+ psCmdHelper,
+ "TQ_TDM",
+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr);
+
+
+ /*
+ Even if we failed to acquire the client CCB space we might still need
+ to kick the HW to process a padding packet to release space for us next
+ time round
+ */
+ {
+ RGXFWIF_KCCB_CMD sTDMKCCBCmd;
+
+ /* Construct the kernel 3D CCB command. */
+ sTDMKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ sTDMKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->sTDMData.psServerCommonContext));
+ sTDMKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ /* HTBLOGK(HTB_SF_MAIN_KICK_TDM, */
+ /* s3DKCCBCmd.uCmdData.sCmdKickData.psContext, */
+ /* ui323DCmdOffset); */
+ RGX_HWPERF_HOST_ENQ(psTransferContext,
+ OSGetCurrentClientProcessIDKM(),
+ FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext).ui32Addr,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_TQTDM,
+ uiCheckFenceUID,
+ uiUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ & sTDMKCCBCmd,
+ sizeof(sTDMKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ FWCommonContextGetFWAddress(psTransferContext->
+ sTDMData.psServerCommonContext).ui32Addr,
+ ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQTDM);
+#endif
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_2dcmdacquire;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (psUpdateSyncCheckpoint)
+ {
+ SyncCheckpointSignalNoHW(psUpdateSyncCheckpoint);
+ }
+ if (psFenceTimelineUpdateSync)
+ {
+ SyncPrimNoHwUpdate(psFenceTimelineUpdateSync, ui32FenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ * piUpdateFence = iUpdateFence;
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pvUpdateFenceFinaliseData && (iUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ OSFreeMem(psCmdHelper);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui32IntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui32IntAllocatedUpdateValues);
+ pui32IntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+ /*
+ No resources are created in this function so there is nothing to free
+ unless we had to merge syncs.
+ If we fail after the client CCB acquire there is still nothing to do
+ as only the client CCB release will modify the client CCB
+ */
+ fail_2dcmdacquire:
+ fail_3dcmdacquire:
+
+ fail_initcmd:
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, &psTransferContext->sSyncAddrListUpdate);
+ fail_alloc_update_values_mem:
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* fail_pdumpcheck: */
+ /* fail_cmdtype: */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if(iUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(iUpdateFence, pvUpdateFenceFinaliseData);
+ }
+ fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL) || defined(SUPPORT_BUFFER_SYNC)
+ fail_resolve_input_fence:
+#endif
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_failed(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ fail_populate_sync_addr_list:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ OSFreeMem(psCmdHelper);
+ fail_allochelper:
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+#endif
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ RGXFWIF_KCCB_CMD sKCCBCmd;
+ PVRSRV_ERROR eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psTransferContext->hLock);
+#endif
+
+ /* Schedule the firmware command */
+ sKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_NOTIFY_WRITE_OFFSET_UPDATE;
+ sKCCBCmd.uCmdData.sWriteOffsetUpdateData.psContext = FWCommonContextGetFWAddress(psTransferContext->sTDMData.psServerCommonContext);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError = RGXScheduleCommand(psTransferContext->psDeviceNode->pvDevice,
+ RGXFWIF_DM_TDM,
+ &sKCCBCmd,
+ sizeof(sKCCBCmd),
+ 0,
+ ui32PDumpFlags);
+ if (eError != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVRGXTDMNotifyWriteOffsetUpdateKM: Failed to schedule the FW command %d (%s)",
+ eError, PVRSRVGETERRORSTRING(eError)));
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psTransferContext->hLock);
+#endif
+
+ if (psTransferContext->sTDMData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psTransferContext->sTDMData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_TDM);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return eError;
+ }
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return PVRSRV_OK;
+}
+
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->sTDMData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+}
+
+
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hTDMCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTDMCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_TDM_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_TDM_CONTEXT, sListNode);
+
+ if (CheckStalledClientCommonContext(
+ psCurrentServerTransferCtx->sTDMData.psServerCommonContext, RGX_KICK_TYPE_DM_TDM_2D)
+ == PVRSRV_ERROR_CCCB_STALLED) {
+ ui32ContextBitMask = RGX_KICK_TYPE_DM_TDM_2D;
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTDMCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtdmtransfer.c)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.h b/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.h
new file mode 100644
index 00000000000000..ce756ad1685535
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtdmtransfer.h
@@ -0,0 +1,118 @@
+/*************************************************************************/ /*!
+@File rgxtdmtransfer.h
+@Title RGX Transfer queue 2 Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Transfer queue Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTDMTRANSFER_H__)
+#define __RGXTDMTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_TDM_CONTEXT_ RGX_SERVER_TQ_TDM_CONTEXT;
+
+
+PVRSRV_ERROR PVRSRVRGXTDMCreateTransferContextKM(
+ CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_TDM_CONTEXT **ppsTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMDestroyTransferContextKM(RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext);
+
+
+PVRSRV_ERROR PVRSRVRGXTDMSubmitTransferKM(
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContext,
+ IMG_UINT32 ui32PDumpFlags,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientFenceSyncOffset,
+ IMG_UINT32 * paui32ClientFenceValue,
+ IMG_UINT32 ui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ** pauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 * paui32ClientUpdateSyncOffset,
+ IMG_UINT32 * paui32ClientUpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 * paui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ** papsServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE iUpdateTimeline,
+ PVRSRV_FENCE * piUpdateFence,
+ IMG_CHAR szUpdateFenceName[32],
+ IMG_UINT32 ui32FWCommandSize,
+ IMG_UINT8 * pui8FWCommand,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 * pui32SyncPMRFlags,
+ PMR ** ppsSyncPMRs);
+
+PVRSRV_ERROR PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32PDumpFlags);
+
+PVRSRV_ERROR PVRSRVRGXTDMSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ RGX_SERVER_TQ_TDM_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTDMTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+
+#endif /* __RGXTDMTRANSFER_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.c b/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.c
new file mode 100644
index 00000000000000..bbdd349ac92db3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.c
@@ -0,0 +1,555 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific time correlation and calibration routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific time correlation and calibration routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimecorr.h"
+#include "rgxfwutils.h"
+#include "htbserver.h"
+#include "pvrsrv_apphint.h"
+
+/******************************************************************************
+ *
+ * - A calibration period is started on power-on and after a DVFS transition,
+ * and it's closed before a power-off and before a DVFS transition
+ * (so power-on -> dfvs -> dvfs -> power-off , power on -> dvfs -> dvfs...,
+ * where each arrow is a calibration period).
+ *
+ * - The timers on the Host and on the FW are correlated at the beginning of
+ * each period together with the current GPU frequency.
+ *
+ * - Correlation and calibration are also done at regular intervals using
+ * a best effort approach.
+ *
+ *****************************************************************************/
+
+static IMG_UINT32 g_ui32ClockSource = PVRSRV_APPHINT_TIMECORRCLOCK;
+
+/*
+ AppHint interfaces
+*/
+
+static PVRSRV_ERROR _SetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 ui32Value)
+{
+ static const IMG_CHAR *apszClocks[] = {
+ "mono", "mono_raw", "sched"
+ };
+
+ if (ui32Value >= RGXTIMECORR_CLOCK_LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Invalid clock source type (%u)", ui32Value));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ RGXTimeCorrEnd((PVRSRV_DEVICE_NODE *) psDeviceNode,
+ RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+ PVR_DPF((PVR_DBG_WARNING, "Setting time correlation clock from \"%s\" to \"%s\"",
+ apszClocks[g_ui32ClockSource],
+ apszClocks[ui32Value]));
+
+ g_ui32ClockSource = ui32Value;
+
+ RGXTimeCorrBegin((PVRSRV_DEVICE_NODE *) psDeviceNode,
+ RGXTIMECORR_EVENT_CLOCK_CHANGE);
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+ PVR_UNREFERENCED_PARAMETER(apszClocks);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _GetClock(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *psPrivate,
+ IMG_UINT32 *pui32Value)
+{
+ *pui32Value = g_ui32ClockSource;
+
+ PVR_UNREFERENCED_PARAMETER(psPrivate);
+
+ return PVRSRV_OK;
+}
+
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ PVRSRVAppHintRegisterHandlersUINT32(APPHINT_ID_TimeCorrClock, _GetClock,
+ _SetClock, psDeviceNode, NULL);
+}
+
+/*
+ End of AppHint interface
+*/
+
+IMG_UINT64 RGXTimeCorrGetClockns64(void)
+{
+ IMG_UINT64 ui64Clock;
+
+ switch (g_ui32ClockSource) {
+ case RGXTIMECORR_CLOCK_MONO:
+ return ((void) OSClockMonotonicns64(&ui64Clock), ui64Clock);
+ case RGXTIMECORR_CLOCK_MONO_RAW:
+ return OSClockMonotonicRawns64();
+ case RGXTIMECORR_CLOCK_SCHED:
+ return OSClockns64();
+ default:
+ PVR_ASSERT(IMG_FALSE);
+ return 0;
+ }
+}
+
+IMG_UINT64 RGXTimeCorrGetClockus64(void)
+{
+ IMG_UINT32 rem;
+ return OSDivide64r64(RGXTimeCorrGetClockns64(), 1000, &rem);
+}
+
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_TIME_CORR *psTimeCorrs,
+ IMG_UINT32 ui32NumOut)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_UINT32 ui32CurrentIndex = psGpuUtilFWCB->ui32TimeCorrSeqCount;
+
+ while(ui32NumOut--)
+ {
+ *(psTimeCorrs++) = psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32CurrentIndex)];
+ ui32CurrentIndex--;
+ }
+}
+
+static __maybe_unused const IMG_CHAR* _EventToString(RGXTIMECORR_EVENT eEvent)
+{
+ switch (eEvent)
+ {
+ case RGXTIMECORR_EVENT_POWER:
+ return "power";
+ case RGXTIMECORR_EVENT_DVFS:
+ return "dvfs";
+ case RGXTIMECORR_EVENT_PERIODIC:
+ return "periodic";
+ case RGXTIMECORR_EVENT_CLOCK_CHANGE:
+ return "clock source";
+ default:
+ return "n/a";
+ }
+}
+
+static inline IMG_UINT32 _RGXGetSystemLayerGPUClockSpeed(PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+ RGX_DATA *psRGXData = (RGX_DATA*)psDeviceNode->psDevConfig->hDevData;
+
+ return psRGXData->psRGXTimingInfo->ui32CoreClockSpeed;
+}
+
+static inline IMG_UINT32 _RGXGetEstimatedGPUClockSpeed(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ GPU_FREQ_TRACKING_DATA *psTrackingData;
+
+ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+ return psTrackingData->ui32EstCoreClockSpeed;
+}
+
+static void _RGXMakeTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode, RGXTIMECORR_EVENT eEvent)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_UINT32 ui32NewSeqCount = psGpuUtilFWCB->ui32TimeCorrSeqCount + 1;
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[RGXFWIF_TIME_CORR_CURR_INDEX(ui32NewSeqCount)];
+ IMG_UINT32 ui32Remainder;
+
+ /*
+ * The following reads must be done as close together as possible, because
+ * they represent the same current time sampled from different clock sources.
+ */
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ if (OSClockMonotonicns64(&psTimeCorr->ui64OSMonoTimeStamp) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"_RGXMakeTimeCorrData: System Monotonic Clock not available."));
+ PVR_ASSERT(0);
+ }
+#endif
+ psTimeCorr->ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+ psTimeCorr->ui64OSTimeStamp = RGXTimeCorrGetClockns64();
+ psTimeCorr->ui32CoreClockSpeed = _RGXGetEstimatedGPUClockSpeed(psDevInfo);
+ psTimeCorr->ui64CRDeltaToOSDeltaKNs = RGXFWIF_GET_CRDELTA_TO_OSDELTA_K_NS(psTimeCorr->ui32CoreClockSpeed, ui32Remainder);
+
+ /* Make sure the values are written to memory before updating the index of the current entry */
+ OSWriteMemoryBarrier();
+
+ /* Update the index of the current entry in the timer correlation array */
+ psGpuUtilFWCB->ui32TimeCorrSeqCount = ui32NewSeqCount;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Timer correlation data (post %s event): OS %" IMG_UINT64_FMTSPEC " ns, "
+ "CR %" IMG_UINT64_FMTSPEC ", GPU freq. %u Hz (given as %u Hz)",
+ _EventToString(eEvent),
+ psTimeCorr->ui64OSTimeStamp,
+ psTimeCorr->ui64CRTimeStamp,
+ RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode)));
+
+ /*
+ * Don't log timing data to the HTB log after a power(-on) event.
+ * Otherwise this will be logged before the HTB partition marker, breaking
+ * the log sync grammar. This data will be automatically repeated when the
+ * partition marker is written.
+ */
+ HTBSyncScale(eEvent != RGXTIMECORR_EVENT_POWER,
+ psTimeCorr->ui64OSTimeStamp,
+ psTimeCorr->ui64CRTimeStamp,
+ psTimeCorr->ui32CoreClockSpeed);
+}
+
+static void _RGXCheckTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+#if !defined(NO_HARDWARE) && defined(DEBUG)
+#define SCALING_FACTOR (10)
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGXFWIF_GPU_UTIL_FWCB *psGpuUtilFWCB = psDevInfo->psRGXFWIfGpuUtilFWCb;
+ IMG_UINT32 ui32Index = RGXFWIF_TIME_CORR_CURR_INDEX(psGpuUtilFWCB->ui32TimeCorrSeqCount);
+ RGXFWIF_TIME_CORR *psTimeCorr = &psGpuUtilFWCB->sTimeCorr[ui32Index];
+ IMG_UINT64 ui64EstimatedTime, ui64CRTimeStamp, ui64OSTimeStamp;
+ IMG_UINT64 ui64CRTimeDiff, ui64OSTimeDiff;
+ IMG_INT64 i64Diff;
+ IMG_UINT32 ui32Ratio, ui32Remainder;
+
+ /*
+ * The following reads must be done as close together as possible, because
+ * they represent the same current time sampled from different clock sources.
+ */
+ ui64CRTimeStamp = RGXReadHWTimerReg(psDevInfo);
+ ui64OSTimeStamp = RGXTimeCorrGetClockns64();
+
+ if ((ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) < (1 << SCALING_FACTOR))
+ {
+ /*
+ * Less than ~1us has passed since the timer correlation data was generated.
+ * A time frame this short is probably not enough to get an estimate
+ * of how good the timer correlation data was.
+ * Skip calculations for the above reason and to avoid a division by 0 below.
+ */
+ return;
+ }
+
+
+ /* Calculate an estimated timestamp based on the latest timer correlation data */
+ ui64CRTimeDiff = ui64CRTimeStamp - psTimeCorr->ui64CRTimeStamp;
+ ui64OSTimeDiff = RGXFWIF_GET_DELTA_OSTIME_NS(ui64CRTimeDiff,
+ psTimeCorr->ui64CRDeltaToOSDeltaKNs);
+ ui64EstimatedTime = psTimeCorr->ui64OSTimeStamp + ui64OSTimeDiff;
+
+ /* Get difference between estimated timestamp and current timestamp, in ns */
+ i64Diff = ui64EstimatedTime - ui64OSTimeStamp;
+
+ /*
+ * Calculate ratio between estimated time diff and real time diff:
+ * ratio% : 100% = (OSestimate - OStimecorr) : (OSreal - OStimecorr)
+ *
+ * The operands are scaled down (approximately from ns to us) so at least
+ * the divisor fits on 32 bit.
+ */
+ ui32Ratio = OSDivide64(((ui64EstimatedTime - psTimeCorr->ui64OSTimeStamp) * 100ULL) >> SCALING_FACTOR,
+ (ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp) >> SCALING_FACTOR,
+ &ui32Remainder);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Estimated timestamp check: diff %" IMG_INT64_FMTSPECd " ns over "
+ "period %" IMG_UINT64_FMTSPEC " ns, estimated timer speed %u%%",
+ i64Diff,
+ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+ ui32Ratio));
+
+ /* Warn if the estimated timestamp is not within +/- 1% of the current time */
+ if (ui32Ratio < 99 || ui32Ratio > 101)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Estimated timestamps generated in the last %" IMG_UINT64_FMTSPEC " ns "
+ "were %s the real time (increasing at %u%% speed)",
+ ui64OSTimeStamp - psTimeCorr->ui64OSTimeStamp,
+ i64Diff > 0 ? "ahead of" : "behind",
+ ui32Ratio));
+
+ /* Higher ratio == higher delta OS == higher delta CR == frequency higher than expected (and viceversa) */
+ PVR_DPF((PVR_DBG_WARNING,
+ "Current GPU frequency %u Hz (given as %u Hz) is probably %s than expected",
+ RGXFWIF_ROUND_TO_KHZ(psTimeCorr->ui32CoreClockSpeed),
+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+ i64Diff > 0 ? "lower" : "higher"));
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+#endif
+}
+
+static inline IMG_UINT32 _RGXGPUFreqGetIndex(RGX_GPU_DVFS_TABLE *psGpuDVFSTable, IMG_UINT32 ui32CoreClockSpeed)
+{
+ IMG_UINT32 *paui32GPUFrequencies = psGpuDVFSTable->aui32GPUFrequency;
+ IMG_UINT32 i;
+
+ for (i = 0; i < RGX_GPU_DVFS_TABLE_SIZE; i++)
+ {
+ if (paui32GPUFrequencies[i] == ui32CoreClockSpeed)
+ {
+ return i;
+ }
+
+ if (paui32GPUFrequencies[i] == 0)
+ {
+ paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+ return i;
+ }
+ }
+
+ i--;
+
+ PVR_DPF((PVR_DBG_ERROR, "GPU frequency table in the driver is full! "
+ "Table size should be increased! Overriding last entry (%u) with %u",
+ paui32GPUFrequencies[i], ui32CoreClockSpeed));
+
+ paui32GPUFrequencies[i] = ui32CoreClockSpeed;
+
+ return i;
+}
+
+static void _RGXGPUFreqCalibrationPeriodStart(PVRSRV_DEVICE_NODE *psDeviceNode, RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ GPU_FREQ_TRACKING_DATA *psTrackingData;
+ IMG_UINT32 ui32CoreClockSpeed, ui32Index;
+
+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64();
+
+ psGpuDVFSTable->ui64CalibrationCRTimestamp = ui64CRTimestamp;
+ psGpuDVFSTable->ui64CalibrationOSTimestamp = ui64OSTimestamp;
+
+ ui32CoreClockSpeed = _RGXGetSystemLayerGPUClockSpeed(psDeviceNode);
+ ui32Index = _RGXGPUFreqGetIndex(psGpuDVFSTable, ui32CoreClockSpeed);
+ psTrackingData = &psGpuDVFSTable->asTrackingData[ui32Index];
+
+ /* Set the time needed to (re)calibrate the GPU frequency */
+ if (psTrackingData->ui32CalibrationCount == 0) /* We never met this frequency */
+ {
+ psTrackingData->ui32EstCoreClockSpeed = ui32CoreClockSpeed;
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_FIRST_CALIBRATION_TIME_US;
+ }
+ else if (psTrackingData->ui32CalibrationCount == 1) /* We calibrated this frequency only once */
+ {
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_TRANSITION_CALIBRATION_TIME_US;
+ }
+ else
+ {
+ psGpuDVFSTable->ui32CalibrationPeriod = RGX_GPU_DVFS_PERIODIC_CALIBRATION_TIME_US;
+ }
+
+ /* Update the index to the DVFS table */
+ psGpuDVFSTable->ui32FreqIndex = ui32Index;
+}
+
+static void _RGXGPUFreqCalibrationPeriodStop(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ IMG_UINT64 ui64CRTimestamp = RGXReadHWTimerReg(psDevInfo);
+ IMG_UINT64 ui64OSTimestamp = RGXTimeCorrGetClockus64();
+
+ psGpuDVFSTable->ui64CalibrationCRTimediff =
+ ui64CRTimestamp - psGpuDVFSTable->ui64CalibrationCRTimestamp;
+ psGpuDVFSTable->ui64CalibrationOSTimediff =
+ ui64OSTimestamp - psGpuDVFSTable->ui64CalibrationOSTimestamp;
+
+ /* Check if the current timer correlation data is good enough */
+ _RGXCheckTimeCorrData(psDeviceNode, psGpuDVFSTable);
+}
+
+static void _RGXGPUFreqCalibrationCalculate(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable,
+ RGXTIMECORR_EVENT eEvent)
+{
+#if !defined(NO_HARDWARE)
+ GPU_FREQ_TRACKING_DATA *psTrackingData;
+ IMG_UINT32 ui32EstCoreClockSpeed, ui32PrevCoreClockSpeed;
+ IMG_INT32 i32Diff;
+ IMG_UINT32 ui32Remainder;
+
+ /*
+ * Find out what the GPU frequency was in the last period.
+ * This should return a value very close to the frequency passed by the system layer.
+ */
+ ui32EstCoreClockSpeed =
+ RGXFWIF_GET_GPU_CLOCK_FREQUENCY_HZ(psGpuDVFSTable->ui64CalibrationCRTimediff,
+ psGpuDVFSTable->ui64CalibrationOSTimediff,
+ ui32Remainder);
+
+ /* Update GPU frequency used by the driver for a given system layer frequency */
+ psTrackingData = &psGpuDVFSTable->asTrackingData[psGpuDVFSTable->ui32FreqIndex];
+
+ ui32PrevCoreClockSpeed = psTrackingData->ui32EstCoreClockSpeed;
+ psTrackingData->ui32EstCoreClockSpeed = ui32EstCoreClockSpeed;
+ psTrackingData->ui32CalibrationCount++;
+
+ i32Diff = (IMG_INT32) (ui32EstCoreClockSpeed - ui32PrevCoreClockSpeed);
+
+ if ((i32Diff < -1000000) || (i32Diff > 1000000))
+ {
+ /* Warn if the frequency changed by more than 1 MHz between recalculations */
+ PVR_DPF((PVR_DBG_WARNING,
+ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+ "more than 1 MHz difference between old and new value "
+ "(%u Hz -> %u Hz over %" IMG_UINT64_FMTSPEC " us)",
+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+ _EventToString(eEvent),
+ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+ psGpuDVFSTable->ui64CalibrationOSTimediff));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "GPU frequency calibration of system layer frequency %u Hz (pre %s event): "
+ "%u Hz -> %u Hz done over %" IMG_UINT64_FMTSPEC " us",
+ _RGXGetSystemLayerGPUClockSpeed(psDeviceNode),
+ _EventToString(eEvent),
+ RGXFWIF_ROUND_TO_KHZ(ui32PrevCoreClockSpeed),
+ RGXFWIF_ROUND_TO_KHZ(ui32EstCoreClockSpeed),
+ psGpuDVFSTable->ui64CalibrationOSTimediff));
+ }
+
+ /* Reset time deltas to avoid recalibrating the same frequency over and over again */
+ psGpuDVFSTable->ui64CalibrationCRTimediff = 0;
+ psGpuDVFSTable->ui64CalibrationOSTimediff = 0;
+#else
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(psGpuDVFSTable);
+ PVR_UNREFERENCED_PARAMETER(eEvent);
+#endif
+}
+
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ _RGXGPUFreqCalibrationPeriodStart(psDeviceNode, psGpuDVFSTable);
+ _RGXMakeTimeCorrData(psDeviceNode, eEvent);
+}
+
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ _RGXGPUFreqCalibrationPeriodStop(psDeviceNode, psGpuDVFSTable);
+
+ if (psGpuDVFSTable->ui64CalibrationOSTimediff >= psGpuDVFSTable->ui32CalibrationPeriod)
+ {
+ _RGXGPUFreqCalibrationCalculate(psDeviceNode, psGpuDVFSTable, eEvent);
+ }
+}
+
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = hDevHandle;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_GPU_DVFS_TABLE *psGpuDVFSTable = psDevInfo->psGpuDVFSTable;
+ IMG_UINT64 ui64TimeNow = RGXTimeCorrGetClockus64();
+ PVRSRV_DEV_POWER_STATE ePowerState;
+ PVRSRV_VZ_RETN_IF_MODE(DRIVER_MODE_GUEST);
+
+ if (psDevInfo->psGpuDVFSTable == NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Required data not initialised yet", __func__));
+ return;
+ }
+
+ /* Check if it's the right time to recalibrate the GPU clock frequency */
+ if ((ui64TimeNow - psGpuDVFSTable->ui64CalibrationOSTimestamp) < psGpuDVFSTable->ui32CalibrationPeriod) return;
+
+ /* Try to acquire the powerlock, if not possible then don't wait */
+ if(!OSTryLockAcquire(psDeviceNode->hPowerLock)) return;
+
+ /* If the GPU is off then we can't do anything */
+ PVRSRVGetDevicePowerState(psDeviceNode, &ePowerState);
+ if (ePowerState != PVRSRV_DEV_POWER_STATE_ON)
+ {
+ PVRSRVPowerUnlock(psDeviceNode);
+ return;
+ }
+
+ /* All checks passed, we can calibrate and correlate */
+ RGXTimeCorrEnd(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+ RGXTimeCorrBegin(psDeviceNode, RGXTIMECORR_EVENT_PERIODIC);
+
+ PVRSRVPowerUnlock(psDeviceNode);
+}
+
+/*
+ RGXTimeCorrGetClockSource
+*/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void)
+{
+ return g_ui32ClockSource;
+}
+
+/*
+ RGXTimeCorrSetClockSource
+*/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXTIMECORR_CLOCK_TYPE eClockType)
+{
+ return _SetClock(psDeviceNode, NULL, eClockType);
+}
+
+/******************************************************************************
+ End of file (rgxtimecorr.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.h b/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.h
new file mode 100644
index 00000000000000..b876d70bc3e4df
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtimecorr.h
@@ -0,0 +1,204 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX time correlation and calibration header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX time correlation and calibration routines
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTIMECORR_H__)
+#define __RGXTIMECORR_H__
+
+#include "img_types.h"
+#include "device.h"
+
+typedef enum
+{
+ RGXTIMECORR_CLOCK_MONO,
+ RGXTIMECORR_CLOCK_MONO_RAW,
+ RGXTIMECORR_CLOCK_SCHED,
+
+ RGXTIMECORR_CLOCK_LAST
+} RGXTIMECORR_CLOCK_TYPE;
+
+typedef enum
+{
+ RGXTIMECORR_EVENT_POWER,
+ RGXTIMECORR_EVENT_DVFS,
+ RGXTIMECORR_EVENT_PERIODIC,
+ RGXTIMECORR_EVENT_CLOCK_CHANGE
+} RGXTIMECORR_EVENT;
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrBegin
+
+ @Description Generate new timer correlation data, and start tracking
+ the current GPU frequency.
+
+ @Input hDevHandle : RGX Device Node
+ @Input eEvent : Event associated with the beginning of a timer
+ correlation period
+
+ @Return void
+
+******************************************************************************/
+void RGXTimeCorrBegin(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrEnd
+
+ @Description Stop tracking the CPU and GPU timers, and if possible
+ recalculate the GPU frequency to a value which makes the timer
+ correlation data more accurate.
+
+ @Input hDevHandle : RGX Device Node
+ @Input eEvent : Event associated with the end of a timer
+ correlation period
+
+ @Return void
+
+******************************************************************************/
+void RGXTimeCorrEnd(IMG_HANDLE hDevHandle, RGXTIMECORR_EVENT eEvent);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrRestartPeriodic
+
+ @Description Perform actions from RGXTimeCorrEnd and RGXTimeCorrBegin,
+ but only if enough time has passed since the last timer
+ correlation data was generated.
+
+ @Input hDevHandle : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXTimeCorrRestartPeriodic(IMG_HANDLE hDevHandle);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrGetClockns64
+
+ @Description Returns value of currently selected clock (in ns).
+
+ @Return clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockns64(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrGetClockus64
+
+ @Description Returns value of currently selected clock (in us).
+
+ @Return clock value from currently selected clock source
+
+******************************************************************************/
+IMG_UINT64 RGXTimeCorrGetClockus64(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrGetClockSource
+
+ @Description Returns currently selected clock source
+
+ @Return clock source type
+
+******************************************************************************/
+RGXTIMECORR_CLOCK_TYPE RGXTimeCorrGetClockSource(void);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrSetClockSource
+
+ @Description Sets clock source for correlation data.
+
+ @Input psDeviceNode : RGX Device Node
+ @Input eClockType : clock source type
+
+ @Return error code
+
+******************************************************************************/
+PVRSRV_ERROR RGXTimeCorrSetClockSource(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXTIMECORR_CLOCK_TYPE eClockType);
+
+/*!
+******************************************************************************
+
+ @Function RGXTimeCorrInitAppHintCallbacks
+
+ @Description Initialise apphint callbacks for timer correlation
+ related apphints.
+
+ @Input psDeviceNode : RGX Device Node
+
+ @Return void
+
+******************************************************************************/
+void RGXTimeCorrInitAppHintCallbacks(const PVRSRV_DEVICE_NODE *psDeviceNode);
+
+/*!
+******************************************************************************
+
+ @Function RGXGetTimeCorrData
+
+ @Description Get a number of the most recent time correlation data points
+
+ @Input psDeviceNode : RGX Device Node
+ @Output psTimeCorrs : Output array of RGXFWIF_TIME_CORR elements
+ for data to be written to
+ @Input ui32NumOut : Number of elements to be written out
+
+ @Return void
+
+******************************************************************************/
+void RGXGetTimeCorrData(PVRSRV_DEVICE_NODE *psDeviceNode,
+ RGXFWIF_TIME_CORR *psTimeCorrs,
+ IMG_UINT32 ui32NumOut);
+
+#endif /* __RGXTIMECORR_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.c b/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.c
new file mode 100644
index 00000000000000..103cfa23301e7b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.c
@@ -0,0 +1,255 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Timer queries
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description RGX Timer queries
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgxtimerquery.h"
+#include "rgxdevice.h"
+#include "rgxtimecorr.h"
+
+#include "rgxfwutils.h"
+#include "pdump_km.h"
+
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+ psDevInfo->bSaveStart = IMG_TRUE;
+ psDevInfo->bSaveEnd = IMG_TRUE;
+
+ /* clear the stamps, in case there is no Kick */
+ psDevInfo->pui64StartTimeById[ui32QueryId] = 0UL;
+ psDevInfo->pui64EndTimeById[ui32QueryId] = 0UL;
+
+ /* save of the active query index */
+ psDevInfo->ui32ActiveQueryId = ui32QueryId;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+ /* clear off the flags set by Begin(). Note that _START_TIME is
+ * probably already cleared by Kick()
+ */
+ psDevInfo->bSaveStart = IMG_FALSE;
+ psDevInfo->bSaveEnd = IMG_FALSE;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId,
+ IMG_UINT64 * pui64StartTime,
+ IMG_UINT64 * pui64EndTime)
+{
+ PVRSRV_RGXDEV_INFO * psDevInfo = (PVRSRV_RGXDEV_INFO *)psDeviceNode->pvDevice;
+ IMG_UINT32 ui32Scheduled;
+ IMG_UINT32 ui32Completed;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ if (ui32QueryId >= RGX_MAX_TIMER_QUERIES)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psDevInfo->hTimerQueryLock);
+#endif
+
+ ui32Scheduled = psDevInfo->aui32ScheduledOnId[ui32QueryId];
+ ui32Completed = psDevInfo->pui32CompletedById[ui32QueryId];
+
+ /* if there was no kick since the Begin() on this id we return 0-s as Begin cleared
+ * the stamps. If there was no begin the returned data is undefined - but still
+ * safe from services pov
+ */
+ if (ui32Completed >= ui32Scheduled)
+ {
+ * pui64StartTime = psDevInfo->pui64StartTimeById[ui32QueryId];
+ * pui64EndTime = psDevInfo->pui64EndTimeById[ui32QueryId];
+
+ eError = PVRSRV_OK;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psDevInfo->hTimerQueryLock);
+#endif
+ return eError;
+}
+
+
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT64 * pui64Time)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+
+ *pui64Time = RGXTimeCorrGetClockns64();
+
+ return PVRSRV_OK;
+}
+
+
+
+/******************************************************************************
+ NOT BRIDGED/EXPORTED FUNCS
+******************************************************************************/
+/* writes a time stamp command in the client CCB */
+void
+RGXWriteTimestampCommand(IMG_PBYTE * ppbyPtr,
+ RGXFWIF_CCB_CMD_TYPE eCmdType,
+ PRGXFWIF_TIMESTAMP_ADDR pAddr)
+{
+ RGXFWIF_CCB_CMD_HEADER * psHeader;
+
+ psHeader = (RGXFWIF_CCB_CMD_HEADER *) (*ppbyPtr);
+
+ PVR_ASSERT(eCmdType == RGXFWIF_CCB_CMD_TYPE_PRE_TIMESTAMP
+ || eCmdType == RGXFWIF_CCB_CMD_TYPE_POST_TIMESTAMP);
+
+ psHeader->eCmdType = eCmdType;
+ psHeader->ui32CmdSize = (sizeof(RGXFWIF_DEV_VIRTADDR) + RGXFWIF_FWALLOC_ALIGN - 1) & ~(RGXFWIF_FWALLOC_ALIGN - 1);
+
+ (*ppbyPtr) += sizeof(RGXFWIF_CCB_CMD_HEADER);
+
+ (*(PRGXFWIF_TIMESTAMP_ADDR*)*ppbyPtr) = pAddr;
+
+ (*ppbyPtr) += psHeader->ui32CmdSize;
+}
+
+
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+ PRGXFWIF_UFO_ADDR * ppUpdate)
+{
+ if (ppPreAddr != NULL)
+ {
+ if (psDevInfo->bSaveStart)
+ {
+ /* drop the SaveStart on the first Kick */
+ psDevInfo->bSaveStart = IMG_FALSE;
+
+ RGXSetFirmwareAddress(ppPreAddr,
+ psDevInfo->psStartTimeMemDesc,
+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ ppPreAddr->ui32Addr = 0;
+ }
+ }
+
+ if (ppPostAddr != NULL && ppUpdate != NULL)
+ {
+ if (psDevInfo->bSaveEnd)
+ {
+ RGXSetFirmwareAddress(ppPostAddr,
+ psDevInfo->psEndTimeMemDesc,
+ sizeof(IMG_UINT64) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+
+ psDevInfo->aui32ScheduledOnId[psDevInfo->ui32ActiveQueryId]++;
+
+ RGXSetFirmwareAddress(ppUpdate,
+ psDevInfo->psCompletedMemDesc,
+ sizeof(IMG_UINT32) * psDevInfo->ui32ActiveQueryId,
+ RFW_FWADDR_NOREF_FLAG);
+ }
+ else
+ {
+ ppUpdate->ui32Addr = 0;
+ ppPostAddr->ui32Addr = 0;
+ }
+ }
+}
+
+
+/******************************************************************************
+ End of file (rgxtimerquery.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.h b/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.h
new file mode 100644
index 00000000000000..fe65f5fe99b028
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtimerquery.h
@@ -0,0 +1,135 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Timer queries
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Timer queries functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if ! defined (_RGX_TIMERQUERIES_H_)
+#define _RGX_TIMERQUERIES_H_
+
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "device.h"
+#include "rgxdevice.h"
+
+#include "connection_server.h"
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXBeginTimerQuery
+@Description Opens a new timer query.
+
+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXBeginTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXEndTimerQuery
+@Description Closes a timer query
+
+ The lack of ui32QueryId argument expresses the fact that there can't
+ be overlapping queries open.
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXEndTimerQueryKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode);
+
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXQueryTimer
+@Description Queries the state of the specified timer
+
+@Input ui32QueryId an identifier between [ 0 and RGX_MAX_TIMER_QUERIES - 1 ]
+@Out pui64StartTime
+@Out pui64EndTime
+@Return PVRSRV_OK on success.
+ PVRSRV_ERROR_RESOURCE_UNAVAILABLE if the device is still busy with
+ operations from the queried period
+ other error code otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXQueryTimerKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32QueryId,
+ IMG_UINT64 * pui64StartTime,
+ IMG_UINT64 * pui64EndTime);
+
+
+/**************************************************************************/ /*!
+@Function PVRSRVRGXCurrentTime
+@Description Returns the current state of the timer used in timer queries
+@Input psDevData Device data.
+@Out pui64Time
+@Return PVRSRV_OK on success.
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVRGXCurrentTime(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT64 * pui64Time);
+
+
+/******************************************************************************
+ NON BRIDGED/EXPORTED interface
+******************************************************************************/
+
+/* write the timestamp cmd from the helper*/
+void
+RGXWriteTimestampCommand(IMG_PBYTE * ppui8CmdPtr,
+ RGXFWIF_CCB_CMD_TYPE eCmdType,
+ PRGXFWIF_TIMESTAMP_ADDR pAddr);
+
+/* get the relevant data from the Kick to the helper*/
+void
+RGX_GetTimestampCmdHelper(PVRSRV_RGXDEV_INFO * psDevInfo,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPreAddr,
+ PRGXFWIF_TIMESTAMP_ADDR * ppPostAddr,
+ PRGXFWIF_UFO_ADDR * ppUpdate);
+
+#endif /* _RGX_TIMERQUERIES_H_ */
+
+/******************************************************************************
+ End of file (rgxtimerquery.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.c b/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.c
new file mode 100644
index 00000000000000..ae235d3c91b7c8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.c
@@ -0,0 +1,1688 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific transfer queue routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pdump_km.h"
+#include "rgxdevice.h"
+#include "rgxccb.h"
+#include "rgxutils.h"
+#include "rgxfwutils.h"
+#include "rgxtransfer.h"
+#include "rgx_tq_shared.h"
+#include "rgxmem.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pvr_debug.h"
+#include "pvrsrv.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgx_memallocflags.h"
+#include "rgxtimerquery.h"
+#include "rgxhwperf.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "sync_server.h"
+#include "sync_internal.h"
+#include "sync.h"
+#include "rgx_bvnc_defs_km.h"
+
+#if defined(SUPPORT_BUFFER_SYNC)
+#include "pvr_buffer_sync.h"
+#endif
+
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+
+/* Enable this to dump the compiled list of UFOs prior to kick call */
+#define ENABLE_TQ_UFO_DUMP 0
+
+//#define TRANSFER_CHECKPOINT_DEBUG 1
+
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+#define CHKPT_DBG(X) PVR_DPF(X)
+#else
+#define CHKPT_DBG(X)
+#endif
+
+typedef struct {
+ DEVMEM_MEMDESC *psFWContextStateMemDesc;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_3D_DATA;
+
+
+typedef struct {
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonContext;
+ IMG_UINT32 ui32Priority;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+} RGX_SERVER_TQ_2D_DATA;
+
+struct _RGX_SERVER_TQ_CONTEXT_ {
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ DEVMEM_MEMDESC *psFWFrameworkMemDesc;
+ IMG_UINT32 ui32Flags;
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_2D (1<<0)
+#define RGX_SERVER_TQ_CONTEXT_FLAGS_3D (1<<1)
+ RGX_SERVER_TQ_3D_DATA s3DData;
+ RGX_SERVER_TQ_2D_DATA s2DData;
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync;
+ DLLIST_NODE sListNode;
+ ATOMIC_T hIntJobRef;
+ IMG_UINT32 ui32PDumpFlags;
+ /* per-prepare sync address lists */
+ SYNC_ADDR_LIST asSyncAddrListFence[TQ_MAX_PREPARES_PER_SUBMIT];
+ SYNC_ADDR_LIST asSyncAddrListUpdate[TQ_MAX_PREPARES_PER_SUBMIT];
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ POS_LOCK hLock;
+#endif
+};
+
+/*
+ Static functions used by transfer context code
+*/
+static PVRSRV_ERROR _Create3DTransferContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_TQ_3D_DATA *ps3DData)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ PVRSRV_ERROR eError;
+ IMG_UINT ui3DRegISPStateStoreSize = 0;
+ IMG_UINT uiNumISPStoreRegs = 1; /* default value 1 expected */
+ /*
+ Allocate device memory for the firmware GPU context suspend state.
+ Note: the FW reads/writes the state to memory by accessing the GPU register interface.
+ */
+ PDUMPCOMMENT("Allocate RGX firmware TQ/3D context suspend state");
+
+ if(!RGX_IS_FEATURE_SUPPORTED(psDevInfo, XE_MEMORY_HIERARCHY))
+ {
+ uiNumISPStoreRegs = psDeviceNode->pfnGetDeviceFeatureValue(psDeviceNode,
+ RGX_FEATURE_NUM_ISP_IPP_PIPES_IDX);
+ }
+
+ /* Calculate the size of the 3DCTX ISP state */
+ ui3DRegISPStateStoreSize = sizeof(RGXFWIF_3DCTX_STATE) +
+ uiNumISPStoreRegs * sizeof(((RGXFWIF_3DCTX_STATE *)0)->au3DReg_ISP_STORE[0]);
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ ps3DData->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-tq3d");
+ if (IS_ERR(ps3DData->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(ps3DData->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ eError = DevmemFwAllocate(psDevInfo,
+ ui3DRegISPStateStoreSize,
+ RGX_FWCOMCTX_ALLOCFLAGS,
+ "FwTQ3DContext",
+ &ps3DData->psFWContextStateMemDesc);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextswitchstate;
+ }
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_3D,
+ RGXFWIF_DM_3D,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ ps3DData->psFWContextStateMemDesc,
+ RGX_TQ3D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps3DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+
+ PDUMPCOMMENT("Dump 3D context suspend state buffer");
+ DevmemPDumpLoadMem(ps3DData->psFWContextStateMemDesc, 0, sizeof(RGXFWIF_3DCTX_STATE), PDUMP_FLAGS_CONTINUOUS);
+
+ ps3DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+ DevmemFwFree(psDevInfo, ps3DData->psFWContextStateMemDesc);
+fail_contextswitchstate:
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+ ps3DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _Create2DTransferContext(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ DEVMEM_MEMDESC *psFWMemContextMemDesc,
+ IMG_UINT32 ui32Priority,
+ RGX_COMMON_CONTEXT_INFO *psInfo,
+ RGX_SERVER_TQ_2D_DATA *ps2DData)
+{
+ PVRSRV_ERROR eError;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ ps2DData->psBufferSyncContext =
+ pvr_buffer_sync_context_create(psDeviceNode->psDevConfig->pvOSDevice,
+ "rogue-tqtla");
+ if (IS_ERR(ps2DData->psBufferSyncContext))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to create buffer_sync context (err=%ld)",
+ __func__, PTR_ERR(ps2DData->psBufferSyncContext)));
+
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_buffer_sync_context_create;
+ }
+#endif
+
+ eError = FWCommonContextAllocate(psConnection,
+ psDeviceNode,
+ REQ_TYPE_TQ_2D,
+ RGXFWIF_DM_2D,
+ NULL,
+ 0,
+ psFWMemContextMemDesc,
+ NULL,
+ RGX_TQ2D_CCB_SIZE_LOG2,
+ ui32Priority,
+ psInfo,
+ &ps2DData->psServerCommonContext);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_contextalloc;
+ }
+
+ ps2DData->ui32Priority = ui32Priority;
+ return PVRSRV_OK;
+
+fail_contextalloc:
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+ ps2DData->psBufferSyncContext = NULL;
+fail_buffer_sync_context_create:
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+
+static PVRSRV_ERROR _Destroy2DTransferContext(RGX_SERVER_TQ_2D_DATA *ps2DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps2DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_2D,
+ ui32PDumpFlags);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ FWCommonContextFree(ps2DData->psServerCommonContext);
+ ps2DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(ps2DData->psBufferSyncContext);
+ ps2DData->psBufferSyncContext = NULL;
+#endif
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _Destroy3DTransferContext(RGX_SERVER_TQ_3D_DATA *ps3DData,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ PVRSRV_CLIENT_SYNC_PRIM *psCleanupSync,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ /* Check if the FW has finished with this resource ... */
+ eError = RGXFWRequestCommonContextCleanUp(psDeviceNode,
+ ps3DData->psServerCommonContext,
+ psCleanupSync,
+ RGXFWIF_DM_3D,
+ ui32PDumpFlags);
+ if (eError == PVRSRV_ERROR_RETRY)
+ {
+ return eError;
+ }
+ else if (eError != PVRSRV_OK)
+ {
+ PVR_LOG(("%s: Unexpected error from RGXFWRequestCommonContextCleanUp (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ return eError;
+ }
+
+ /* ... it has so we can free it's resources */
+ DevmemFwFree(psDeviceNode->pvDevice, ps3DData->psFWContextStateMemDesc);
+ FWCommonContextFree(ps3DData->psServerCommonContext);
+ ps3DData->psServerCommonContext = NULL;
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ pvr_buffer_sync_context_destroy(ps3DData->psBufferSyncContext);
+ ps3DData->psBufferSyncContext = NULL;
+#endif
+
+ return PVRSRV_OK;
+}
+
+
+/*
+ * PVRSRVCreateTransferContextKM
+ */
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext)
+{
+ RGX_SERVER_TQ_CONTEXT *psTransferContext;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ DEVMEM_MEMDESC *psFWMemContextMemDesc = RGXGetFWMemDescFromMemoryContextHandle(hMemCtxPrivData);
+ RGX_COMMON_CONTEXT_INFO sInfo;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Allocate the server side structure */
+ *ppsTransferContext = NULL;
+ psTransferContext = OSAllocZMem(sizeof(*psTransferContext));
+ if (psTransferContext == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&psTransferContext->hLock, LOCK_TYPE_NONE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create lock (%s)",
+ __func__,
+ PVRSRVGetErrorStringKM(eError)));
+ goto fail_createlock;
+ }
+#endif
+
+ psTransferContext->psDeviceNode = psDeviceNode;
+
+ /* Allocate cleanup sync */
+ eError = SyncPrimAlloc(psDeviceNode->hSyncPrimContext,
+ &psTransferContext->psCleanupSync,
+ "transfer context cleanup");
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate cleanup sync (0x%x)",
+ eError));
+ goto fail_syncalloc;
+ }
+
+ /*
+ * Create the FW framework buffer
+ */
+ eError = PVRSRVRGXFrameworkCreateKM(psDeviceNode,
+ &psTransferContext->psFWFrameworkMemDesc,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to allocate firmware GPU framework state (%u)",
+ eError));
+ goto fail_frameworkcreate;
+ }
+
+ /* Copy the Framework client data into the framework buffer */
+ eError = PVRSRVRGXFrameworkCopyCommand(psTransferContext->psFWFrameworkMemDesc,
+ pabyFrameworkCommand,
+ ui32FrameworkCommandSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVCreateTransferContextKM: Failed to populate the framework buffer (%u)",
+ eError));
+ goto fail_frameworkcopy;
+ }
+
+ sInfo.psFWFrameworkMemDesc = psTransferContext->psFWFrameworkMemDesc;
+
+ eError = _Create3DTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->s3DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dtransfercontext;
+ }
+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))
+ {
+ eError = _Create2DTransferContext(psConnection,
+ psDeviceNode,
+ psFWMemContextMemDesc,
+ ui32Priority,
+ &sInfo,
+ &psTransferContext->s2DData);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_2dtransfercontext;
+ }
+ psTransferContext->ui32Flags |= RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+ }
+
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+ *ppsTransferContext = psTransferContext;
+ }
+
+ *ppsTransferContext = psTransferContext;
+
+ return PVRSRV_OK;
+
+
+fail_2dtransfercontext:
+ if(RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA))
+ {
+ _Destroy3DTransferContext(&psTransferContext->s3DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ psTransferContext->ui32PDumpFlags);
+ }
+
+fail_3dtransfercontext:
+fail_frameworkcopy:
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+fail_frameworkcreate:
+ SyncPrimFree(psTransferContext->psCleanupSync);
+fail_syncalloc:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psTransferContext->hLock);
+fail_createlock:
+#endif
+ OSFreeMem(psTransferContext);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ *ppsTransferContext = NULL;
+ return eError;
+}
+
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psTransferContext->psDeviceNode->pvDevice;
+ IMG_UINT32 i;
+
+ /* remove node from list before calling destroy - as destroy, if successful
+ * will invalidate the node
+ * must be re-added if destroy fails
+ */
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_remove_node(&(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+
+ if ((psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ eError = _Destroy2DTransferContext(&psTransferContext->s2DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroy2d;
+ }
+ /* We've freed the 2D context, don't try to free it again */
+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_2D;
+ }
+
+ if (psTransferContext->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+ {
+ eError = _Destroy3DTransferContext(&psTransferContext->s3DData,
+ psTransferContext->psDeviceNode,
+ psTransferContext->psCleanupSync,
+ PDUMP_FLAGS_CONTINUOUS);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_destroy3d;
+ }
+ /* We've freed the 3D context, don't try to free it again */
+ psTransferContext->ui32Flags &= ~RGX_SERVER_TQ_CONTEXT_FLAGS_3D;
+ }
+
+ /* free any resources within the per-prepare UFO address stores */
+ for(i = 0; i < TQ_MAX_PREPARES_PER_SUBMIT; i++)
+ {
+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListFence[i]);
+ SyncAddrListDeinit(&psTransferContext->asSyncAddrListUpdate[i]);
+ }
+
+ DevmemFwFree(psDevInfo, psTransferContext->psFWFrameworkMemDesc);
+ SyncPrimFree(psTransferContext->psCleanupSync);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(psTransferContext->hLock);
+#endif
+
+ OSFreeMem(psTransferContext);
+
+ return PVRSRV_OK;
+
+fail_destroy3d:
+
+fail_destroy2d:
+ OSWRLockAcquireWrite(psDevInfo->hTransferCtxListLock);
+ dllist_add_to_tail(&(psDevInfo->sTransferCtxtListHead), &(psTransferContext->sListNode));
+ OSWRLockReleaseWrite(psDevInfo->hTransferCtxListLock);
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/*
+ * PVRSRVSubmitTQ3DKickKM
+ */
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32PrepareCount,
+ IMG_UINT32 *paui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientFenceSyncOffset,
+ IMG_UINT32 **papaui32ClientFenceValue,
+ IMG_UINT32 *paui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientUpdateSyncOffset,
+ IMG_UINT32 **papaui32ClientUpdateValue,
+ IMG_UINT32 *paui32ServerSyncCount,
+ IMG_UINT32 **papaui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ***papapsServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE i2DUpdateTimeline,
+ PVRSRV_FENCE *pi2DUpdateFence,
+ PVRSRV_TIMELINE i3DUpdateTimeline,
+ PVRSRV_FENCE *pi3DUpdateFence,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 *paui32FWCommandSize,
+ IMG_UINT8 **papaui8FWCommand,
+ IMG_UINT32 *pui32TQPrepareFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode = psTransferContext->psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ RGX_CCB_CMD_HELPER_DATA *pas3DCmdHelper;
+ RGX_CCB_CMD_HELPER_DATA *pas2DCmdHelper;
+ IMG_UINT32 ui323DCmdCount = 0;
+ IMG_UINT32 ui322DCmdCount = 0;
+ IMG_UINT32 ui323DCmdLast = 0;
+ IMG_UINT32 ui322DCmdLast = 0;
+ IMG_UINT32 ui323DCmdOffset = 0;
+ IMG_UINT32 ui322DCmdOffset = 0;
+ IMG_UINT32 ui32PDumpFlags = PDUMP_FLAGS_NONE;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32IntClientFenceCount = 0;
+ IMG_UINT32 *paui32IntFenceValue = NULL;
+ IMG_UINT32 ui32IntClientUpdateCount = 0;
+ IMG_UINT32 *paui32IntUpdateValue = NULL;
+ SYNC_ADDR_LIST *psSyncAddrListFence;
+ SYNC_ADDR_LIST *psSyncAddrListUpdate;
+ IMG_UINT64 uiCheckFenceUID = 0;
+ IMG_UINT64 ui2DUpdateFenceUID = 0;
+ IMG_UINT64 ui3DUpdateFenceUID = 0;
+ IMG_BOOL bCCBStateOpen = IMG_FALSE;
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PSYNC_CHECKPOINT ps2DUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT ps3DUpdateSyncCheckpoint = NULL;
+ PSYNC_CHECKPOINT *apsFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32FenceSyncCheckpointCount = 0;
+ IMG_UINT32 *pui322DIntAllocatedUpdateValues = NULL;
+ IMG_UINT32 *pui323DIntAllocatedUpdateValues = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *ps2DFenceTimelineUpdateSync = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM *ps3DFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 ui322DFenceTimelineUpdateValue = 0;
+ IMG_UINT32 ui323DFenceTimelineUpdateValue = 0;
+ void *pv2DUpdateFenceFinaliseData = NULL;
+ void *pv3DUpdateFenceFinaliseData = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+#if defined(SUPPORT_BUFFER_SYNC)
+ PSYNC_CHECKPOINT psBufferUpdateSyncCheckpoint = NULL;
+ struct pvr_buffer_sync_append_data *psBufferSyncData = NULL;
+ PSYNC_CHECKPOINT *apsBufferFenceSyncCheckpoints = NULL;
+ IMG_UINT32 ui32BufferFenceSyncCheckpointCount = 0;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eError2;
+ PVRSRV_FENCE i2DUpdateFence = PVRSRV_NO_FENCE;
+ PVRSRV_FENCE i3DUpdateFence = PVRSRV_NO_FENCE;
+ IMG_UINT32 ui32IntJobRef;
+
+ PRGXFWIF_TIMESTAMP_ADDR pPreAddr;
+ PRGXFWIF_TIMESTAMP_ADDR pPostAddr;
+ PRGXFWIF_UFO_ADDR pRMWUFOAddr;
+
+ IMG_DEV_VIRTADDR sRobustnessResetReason = {0};
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi2DUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (i3DUpdateTimeline != PVRSRV_NO_TIMELINE && !pi3DUpdateFence)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#else /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ if (i2DUpdateTimeline != PVRSRV_NO_TIMELINE|| i3DUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing update timeline (%d, %d) in non-supporting driver",
+ __func__, i2DUpdateTimeline, i3DUpdateTimeline));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (iCheckFence != PVRSRV_NO_FENCE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Providing check fence (%d) in non-supporting driver",
+ __func__, iCheckFence));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ /* Ensure the string is null-terminated (Required for safety) */
+ szFenceName[31] = '\0';
+
+ if ((ui32PrepareCount == 0) || (ui32PrepareCount > TQ_MAX_PREPARES_PER_SUBMIT))
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32SyncPMRCount != 0)
+ {
+ if (!ppsSyncPMRs)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ /* PMR sync is valid only when there is no batching */
+ if ((ui32PrepareCount != 1))
+#endif
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psTransferContext->hLock);
+#endif
+
+ ui32IntJobRef = OSAtomicIncrement(&psTransferContext->hIntJobRef);
+
+ /* We can't allocate the required amount of stack space on all consumer architectures */
+ pas3DCmdHelper = OSAllocMem(sizeof(*pas3DCmdHelper) * ui32PrepareCount);
+ if (pas3DCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc3dhelper;
+ }
+ pas2DCmdHelper = OSAllocMem(sizeof(*pas2DCmdHelper) * ui32PrepareCount);
+ if (pas2DCmdHelper == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc2dhelper;
+ }
+
+ /*
+ Ensure we do the right thing for server syncs which cross call boundaries
+ */
+ for (i=0;i<ui32PrepareCount;i++)
+ {
+ IMG_BOOL bHaveStartPrepare = pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_START;
+ IMG_BOOL bHaveEndPrepare = IMG_FALSE;
+
+ if (bHaveStartPrepare)
+ {
+ IMG_UINT32 k;
+ /*
+ We've at the start of a transfer operation (which might be made
+ up of multiple HW operations) so check if we also have then
+ end of the transfer operation in the batch
+ */
+ for (k=i;k<ui32PrepareCount;k++)
+ {
+ if (pui32TQPrepareFlags[k] & TQ_PREP_FLAGS_END)
+ {
+ bHaveEndPrepare = IMG_TRUE;
+ break;
+ }
+ }
+
+ if (!bHaveEndPrepare)
+ {
+ /*
+ We don't have the complete command passed in this call
+ so drop the update request. When we get called again with
+ the last HW command in this transfer operation we'll do
+ the update at that point.
+ */
+ for (k=0;k<paui32ServerSyncCount[i];k++)
+ {
+ papaui32ServerSyncFlags[i][k] &= ~PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE;
+ }
+ }
+ }
+
+ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+ {
+ ui323DCmdLast++;
+ } else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && \
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ ui322DCmdLast++;
+ }
+ }
+
+
+ /*
+ Init the command helper commands for all the prepares
+ */
+ for (i=0;i<ui32PrepareCount;i++)
+ {
+ RGX_CLIENT_CCB *psClientCCB;
+ RGX_SERVER_COMMON_CONTEXT *psServerCommonCtx;
+ IMG_CHAR *pszCommandName;
+ RGX_CCB_CMD_HELPER_DATA *psCmdHelper;
+ RGXFWIF_CCB_CMD_TYPE eType;
+ PRGXFWIF_UFO_ADDR *pauiIntFenceUFOAddress = NULL;
+ PRGXFWIF_UFO_ADDR *pauiIntUpdateUFOAddress = NULL;
+#if defined(SUPPORT_BUFFER_SYNC)
+ struct pvr_buffer_sync_context *psBufferSyncContext;
+#endif
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ PVRSRV_FENCE *piUpdateFence = NULL;
+ PVRSRV_TIMELINE iUpdateTimeline = PVRSRV_NO_TIMELINE;
+ void **ppvUpdateFenceFinaliseData = NULL;
+ PSYNC_CHECKPOINT * ppsUpdateSyncCheckpoint = NULL;
+ PVRSRV_CLIENT_SYNC_PRIM **ppsFenceTimelineUpdateSync = NULL;
+ IMG_UINT32 *pui32FenceTimelineUpdateValue = NULL;
+ IMG_UINT32 **ppui32IntAllocatedUpdateValues = NULL;
+ IMG_BOOL bCheckFence = IMG_FALSE;
+ IMG_UINT64 *puiUpdateFenceUID = NULL;
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 3D))
+ {
+ psServerCommonCtx = psTransferContext->s3DData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-3D";
+ psCmdHelper = &pas3DCmdHelper[ui323DCmdCount++];
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_3D;
+#if defined(SUPPORT_BUFFER_SYNC)
+ psBufferSyncContext = psTransferContext->s3DData.psBufferSyncContext;
+#endif
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (ui323DCmdCount == 1)
+ {
+ bCheckFence = IMG_TRUE;
+ }
+ if (ui323DCmdCount == ui323DCmdLast)
+ {
+ piUpdateFence = &i3DUpdateFence;
+ iUpdateTimeline = i3DUpdateTimeline;
+ ppvUpdateFenceFinaliseData = &pv3DUpdateFenceFinaliseData;
+ ppsUpdateSyncCheckpoint = &ps3DUpdateSyncCheckpoint;
+ ppsFenceTimelineUpdateSync = &ps3DFenceTimelineUpdateSync;
+ pui32FenceTimelineUpdateValue = &ui323DFenceTimelineUpdateValue;
+ ppui32IntAllocatedUpdateValues = &pui323DIntAllocatedUpdateValues;
+ puiUpdateFenceUID = &ui3DUpdateFenceUID;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ }
+ else if (TQ_PREP_FLAGS_COMMAND_IS(pui32TQPrepareFlags[i], 2D) && \
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ psServerCommonCtx = psTransferContext->s2DData.psServerCommonContext;
+ psClientCCB = FWCommonContextGetClientCCB(psServerCommonCtx);
+ pszCommandName = "TQ-2D";
+ psCmdHelper = &pas2DCmdHelper[ui322DCmdCount++];
+ eType = RGXFWIF_CCB_CMD_TYPE_TQ_2D;
+#if defined(SUPPORT_BUFFER_SYNC)
+ psBufferSyncContext = psTransferContext->s2DData.psBufferSyncContext;
+#endif
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (ui322DCmdCount == 1)
+ {
+ bCheckFence = IMG_TRUE;
+ }
+ if (ui322DCmdCount == ui322DCmdLast)
+ {
+ piUpdateFence = &i2DUpdateFence;
+ iUpdateTimeline = i2DUpdateTimeline;
+ ppvUpdateFenceFinaliseData = &pv2DUpdateFenceFinaliseData;
+ ppsUpdateSyncCheckpoint = &ps2DUpdateSyncCheckpoint;
+ ppsFenceTimelineUpdateSync = &ps2DFenceTimelineUpdateSync;
+ pui32FenceTimelineUpdateValue = &ui322DFenceTimelineUpdateValue;
+ ppui32IntAllocatedUpdateValues = &pui322DIntAllocatedUpdateValues;
+ puiUpdateFenceUID = &ui2DUpdateFenceUID;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto fail_cmdtype;
+ }
+
+ if (i == 0)
+ {
+ ui32PDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+ PDUMPCOMMENTWITHFLAGS(ui32PDumpFlags,
+ "%s Command Server Submit on FWCtx %08x", pszCommandName, FWCommonContextGetFWAddress(psServerCommonCtx).ui32Addr);
+ psTransferContext->ui32PDumpFlags |= ui32PDumpFlags;
+ }
+ else
+ {
+ IMG_UINT32 ui32NewPDumpFlags = ((pui32TQPrepareFlags[i] & TQ_PREP_FLAGS_PDUMPCONTINUOUS) != 0) ? PDUMP_FLAGS_CONTINUOUS : PDUMP_FLAGS_NONE;
+ if (ui32NewPDumpFlags != ui32PDumpFlags)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR, "%s: Mixing of continuous and non-continuous command in a batch is not permitted", __func__));
+ goto fail_pdumpcheck;
+ }
+ }
+
+ psSyncAddrListFence = &psTransferContext->asSyncAddrListFence[i];
+ ui32IntClientFenceCount = paui32ClientFenceCount[i];
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->sSyncAddrListFence, %d fences)", __func__, ui32IntClientFenceCount));
+ eError = SyncAddrListPopulate(psSyncAddrListFence,
+ ui32IntClientFenceCount,
+ papauiClientFenceUFOSyncPrimBlock[i],
+ papaui32ClientFenceSyncOffset[i]);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list_fence;
+ }
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+ }
+
+ paui32IntFenceValue = papaui32ClientFenceValue[i];
+ psSyncAddrListUpdate = &psTransferContext->asSyncAddrListUpdate[i];
+ ui32IntClientUpdateCount = paui32ClientUpdateCount[i];
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: SyncAddrListPopulate(psTransferContext->asSyncAddrListUpdate[], %d updates)", __func__, ui32IntClientUpdateCount));
+ eError = SyncAddrListPopulate(psSyncAddrListUpdate,
+ ui32IntClientUpdateCount,
+ papauiClientUpdateUFOSyncPrimBlock[i],
+ papaui32ClientUpdateSyncOffset[i]);
+ if(eError != PVRSRV_OK)
+ {
+ goto fail_populate_sync_addr_list_update;
+ }
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+ }
+ paui32IntUpdateValue = papaui32ClientUpdateValue[i];
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after sync prims) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+ if (ui32SyncPMRCount)
+ {
+#if defined(SUPPORT_BUFFER_SYNC)
+ int err;
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Calling pvr_buffer_sync_resolve_and_create_fences", __func__));
+ err = pvr_buffer_sync_resolve_and_create_fences(psBufferSyncContext,
+ ui32SyncPMRCount,
+ ppsSyncPMRs,
+ paui32SyncPMRFlags,
+ &ui32BufferFenceSyncCheckpointCount,
+ &apsBufferFenceSyncCheckpoints,
+ &psBufferUpdateSyncCheckpoint,
+ &psBufferSyncData);
+ if (err)
+ {
+ eError = (err == -ENOMEM) ? PVRSRV_ERROR_OUT_OF_MEMORY : PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_DPF((PVR_DBG_ERROR, "%s: pvr_buffer_sync_resolve_and_create_fences failed (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+ goto fail_resolve_input_fence;
+ }
+
+ /* Append buffer sync fences */
+ if (ui32BufferFenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d buffer sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>)...", __func__, ui32BufferFenceSyncCheckpointCount, (void*)psSyncAddrListFence , (void*)pauiIntFenceUFOAddress));
+ SyncAddrListAppendAndDeRefCheckpoints(psSyncAddrListFence,
+ ui32BufferFenceSyncCheckpointCount,
+ apsBufferFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32BufferFenceSyncCheckpointCount;
+ }
+
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 buffer sync checkpoint<%p> to TQ Update (&psTransferContext->sSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)psBufferUpdateSyncCheckpoint, (void*)&psTransferContext->sSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ /* Append the update (from output fence) */
+ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+ 1,
+ &psBufferUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after buffer_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#else /* defined(SUPPORT_BUFFER_SYNC) */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Buffer sync not supported but got %u buffers", __func__, ui32SyncPMRCount));
+ PVR_DPF((PVR_DBG_ERROR, "%s: <--EXIT(%d)", __func__, PVRSRV_ERROR_INVALID_PARAMS));
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return PVRSRV_ERROR_INVALID_PARAMS;
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (bCheckFence)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointResolveFence (iCheckFence=%d), psTransferContext->psDeviceNode->hSyncCheckpointContext=<%p>...", __func__, iCheckFence, (void*)psTransferContext->psDeviceNode->hSyncCheckpointContext));
+ /* Resolve the sync checkpoints that make up the input fence */
+ eError = SyncCheckpointResolveFence(psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ iCheckFence,
+ &ui32FenceSyncCheckpointCount,
+ &apsFenceSyncCheckpoints,
+ &uiCheckFenceUID);
+ if (eError != PVRSRV_OK)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, returned ERROR (eError=%d)", __func__, eError));
+ goto fail_resolve_input_fence;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: ...done, fence %d contained %d checkpoints (apsFenceSyncCheckpoints=<%p>)", __func__, iCheckFence, ui32FenceSyncCheckpointCount, (void*)apsFenceSyncCheckpoints));
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ IMG_UINT32 ii;
+ for (ii=0; ii<ui32FenceSyncCheckpointCount; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(apsFenceSyncCheckpoints + ii);
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: apsFenceSyncCheckpoints[%d]=<%p>", __func__, ii, (void*)psNextCheckpoint));
+ }
+ }
+#endif
+ }
+ /* Create the output fence (if required) */
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: calling SyncCheckpointCreateFence (piUpdateFence=%p, iUpdateTimeline=%d, psTranserContext->psDeviceNode->hSyncCheckpointContext=<%p>)", __func__, piUpdateFence, iUpdateTimeline, (void*)psTransferContext->psDeviceNode->hSyncCheckpointContext));
+ eError = SyncCheckpointCreateFence(psTransferContext->psDeviceNode,
+ szFenceName,
+ iUpdateTimeline,
+ psTransferContext->psDeviceNode->hSyncCheckpointContext,
+ piUpdateFence,
+ puiUpdateFenceUID,
+ ppvUpdateFenceFinaliseData,
+ ppsUpdateSyncCheckpoint,
+ (void*)ppsFenceTimelineUpdateSync,
+ pui32FenceTimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCreateFence failed (%d)", __func__, eError));
+ goto fail_create_output_fence;
+ }
+
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: returned from SyncCheckpointCreateFence (piUpdateFence=%p)", __func__, piUpdateFence));
+
+ /* Append the sync prim update for the timeline (if required) */
+ if (*ppsFenceTimelineUpdateSync)
+ {
+ IMG_UINT32 *pui32TimelineUpdateWp = NULL;
+
+ /* Allocate memory to hold the list of update values (including our timeline update) */
+ *ppui32IntAllocatedUpdateValues = OSAllocMem(sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+ if (!*ppui32IntAllocatedUpdateValues)
+ {
+ /* Failed to allocate memory */
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc_update_values_mem;
+ }
+ OSCachedMemSet(*ppui32IntAllocatedUpdateValues, 0xbb, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount+1));
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferUpdateSyncCheckpoint)
+ {
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * (ui32IntClientUpdateCount-1));
+ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + (ui32IntClientUpdateCount-1);
+ }
+ else
+#endif
+ {
+ /* Copy the update values into the new memory, then append our timeline update value */
+ OSCachedMemCopy(*ppui32IntAllocatedUpdateValues, paui32IntUpdateValue, sizeof(**ppui32IntAllocatedUpdateValues) * ui32IntClientUpdateCount);
+ pui32TimelineUpdateWp = *ppui32IntAllocatedUpdateValues + ui32IntClientUpdateCount;
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Appending the additional update value 0x%x)", __func__, *pui32FenceTimelineUpdateValue));
+ /* Now set the additional update value */
+ *pui32TimelineUpdateWp = *pui32FenceTimelineUpdateValue;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Now append the timeline sync prim addr to the transfer context update list */
+ SyncAddrListAppendSyncPrim(psSyncAddrListUpdate,
+ *ppsFenceTimelineUpdateSync);
+ ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+ if (ui32IntClientUpdateCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)*ppui32IntAllocatedUpdateValues;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: *ppui32IntAllocatedUpdateValues[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ /* Ensure paui32IntUpdateValue is now pointing to our new array of update values */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: set paui32IntUpdateValue<%p> to point to *ppui32IntAllocatedUpdateValues<%p>", __func__, (void*)paui32IntUpdateValue, (void*)*ppui32IntAllocatedUpdateValues));
+ paui32IntUpdateValue = *ppui32IntAllocatedUpdateValues;
+ }
+ }
+
+ if (ui32FenceSyncCheckpointCount)
+ {
+ /* Append the checks (from input fence) */
+ if (ui32FenceSyncCheckpointCount > 0)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append %d sync checkpoints to TQ Fence (psSyncAddrListFence=<%p>)...", __func__, ui32FenceSyncCheckpointCount, (void*)psSyncAddrListFence));
+ SyncAddrListAppendCheckpoints(psSyncAddrListFence,
+ ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ if (!pauiIntFenceUFOAddress)
+ {
+ pauiIntFenceUFOAddress = psSyncAddrListFence->pasFWAddrs;
+ }
+ ui32IntClientFenceCount += ui32FenceSyncCheckpointCount;
+ }
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+ if (ui32IntClientFenceCount > 0)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntFenceUFOAddress;
+
+ for (iii=0; iii<ui32IntClientFenceCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: psSyncAddrListFence->pasFWAddrs[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ if (iUpdateTimeline != PVRSRV_NO_TIMELINE && *ppsUpdateSyncCheckpoint)
+ {
+ /* Append the update (from output fence) */
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: Append 1 sync checkpoint to TQ Update (psSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>)...", __func__, (void*)&psTransferContext->asSyncAddrListUpdate , (void*)pauiIntUpdateUFOAddress));
+ SyncAddrListAppendCheckpoints(psSyncAddrListUpdate,
+ 1,
+ ppsUpdateSyncCheckpoint);
+ if (!pauiIntUpdateUFOAddress)
+ {
+ pauiIntUpdateUFOAddress = psSyncAddrListUpdate->pasFWAddrs;
+ }
+ ui32IntClientUpdateCount++;
+#if defined(TRANSFER_CHECKPOINT_DEBUG)
+ {
+ IMG_UINT32 iii;
+ IMG_UINT32 *pui32Tmp = (IMG_UINT32*)pauiIntUpdateUFOAddress;
+
+ for (iii=0; iii<ui32IntClientUpdateCount; iii++)
+ {
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: pauiIntUpdateUFOAddress[%d](<%p>) = 0x%x", __func__, iii, (void*)pui32Tmp, *pui32Tmp));
+ pui32Tmp++;
+ }
+ }
+#endif
+ }
+ CHKPT_DBG((PVR_DBG_ERROR, "%s: (after pvr_sync) ui32IntClientFenceCount=%d, ui32IntClientUpdateCount=%d", __func__, ui32IntClientFenceCount, ui32IntClientUpdateCount));
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if (ENABLE_TQ_UFO_DUMP == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: dumping TQ fence/updates syncs...", __func__));
+ {
+ IMG_UINT32 ii;
+ PRGXFWIF_UFO_ADDR *psTmpIntFenceUFOAddress = pauiIntFenceUFOAddress;
+ IMG_UINT32 *pui32TmpIntFenceValue = paui32IntFenceValue;
+ PRGXFWIF_UFO_ADDR *psTmpIntUpdateUFOAddress = pauiIntUpdateUFOAddress;
+ IMG_UINT32 *pui32TmpIntUpdateValue = paui32IntUpdateValue;
+
+ /* Dump Fence syncs and Update syncs */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ fence syncs (&psTransferContext->asSyncAddrListFence=<%p>, pauiIntFenceUFOAddress=<%p>):", __func__, ui32IntClientFenceCount, (void*)&psTransferContext->asSyncAddrListFence, (void*)pauiIntFenceUFOAddress));
+ for (ii=0; ii<ui32IntClientFenceCount; ii++)
+ {
+ if (psTmpIntFenceUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, CheckValue=%d(0x%x)", __func__, ii+1, ui32IntClientFenceCount, (void*)psTmpIntFenceUFOAddress, psTmpIntFenceUFOAddress->ui32Addr, *pui32TmpIntFenceValue, *pui32TmpIntFenceValue));
+ pui32TmpIntFenceValue++;
+ }
+ psTmpIntFenceUFOAddress++;
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: Prepared %d TQ update syncs (&psTransferContext->asSyncAddrListUpdate=<%p>, pauiIntUpdateUFOAddress=<%p>):", __func__, ui32IntClientUpdateCount, (void*)&psTransferContext->asSyncAddrListUpdate, (void*)pauiIntUpdateUFOAddress));
+ for (ii=0; ii<ui32IntClientUpdateCount; ii++)
+ {
+ if (psTmpIntUpdateUFOAddress->ui32Addr & 0x1)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=PVRSRV_SYNC_CHECKPOINT_SIGNALLED", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: %d/%d<%p>. FWAddr=0x%x, UpdateValue=%d", __func__, ii+1, ui32IntClientUpdateCount, (void*)psTmpIntUpdateUFOAddress, psTmpIntUpdateUFOAddress->ui32Addr, *pui32TmpIntUpdateValue));
+ pui32TmpIntUpdateValue++;
+ }
+ psTmpIntUpdateUFOAddress++;
+ }
+ }
+#endif
+
+ RGX_GetTimestampCmdHelper((PVRSRV_RGXDEV_INFO*) psTransferContext->psDeviceNode->pvDevice,
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr);
+
+ /*
+ Create the command helper data for this command
+ */
+ eError = RGXCmdHelperInitCmdCCB(psClientCCB,
+ ui32IntClientFenceCount,
+ pauiIntFenceUFOAddress,
+ paui32IntFenceValue,
+ ui32IntClientUpdateCount,
+ pauiIntUpdateUFOAddress,
+ paui32IntUpdateValue,
+ paui32ServerSyncCount[i],
+ papaui32ServerSyncFlags[i],
+ SYNC_FLAG_MASK_ALL,
+ papapsServerSyncs[i],
+ paui32FWCommandSize[i],
+ papaui8FWCommand[i],
+ & pPreAddr,
+ & pPostAddr,
+ & pRMWUFOAddr,
+ eType,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ ui32PDumpFlags,
+ NULL,
+ pszCommandName,
+ bCCBStateOpen,
+ psCmdHelper,
+ sRobustnessResetReason);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_initcmd;
+ }
+ }
+
+ /*
+ Acquire space for all the commands in one go
+ */
+ if (ui323DCmdCount)
+ {
+ eError = RGXCmdHelperAcquireCmdCCB(ui323DCmdCount,
+ &pas3DCmdHelper[0]);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_3dcmdacquire;
+ }
+ }
+
+ if (ui322DCmdCount)
+ {
+ eError = RGXCmdHelperAcquireCmdCCB(ui322DCmdCount,
+ &pas2DCmdHelper[0]);
+ if (eError != PVRSRV_OK)
+ {
+ if (ui323DCmdCount)
+ {
+ ui323DCmdCount = 0;
+ ui322DCmdCount = 0;
+ }
+ else
+ {
+ goto fail_2dcmdacquire;
+ }
+ }
+ }
+
+ /*
+ We should acquire the kernel CCB(s) space here as the schedule could fail
+ and we would have to roll back all the syncs
+ */
+
+ /*
+ Only do the command helper release (which takes the server sync
+ operations if the acquire succeeded
+ */
+ if (ui323DCmdCount)
+ {
+ ui323DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui323DCmdCount,
+ &pas3DCmdHelper[0],
+ "TQ_3D",
+ FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr);
+ }
+
+ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ ui322DCmdOffset = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+ RGXCmdHelperReleaseCmdCCB(ui322DCmdCount,
+ &pas2DCmdHelper[0],
+ "TQ_2D",
+ FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr);
+ }
+
+ if (ui323DCmdCount)
+ {
+ RGXFWIF_KCCB_CMD s3DKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 3D CCB command. */
+ s3DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s3DData.psServerCommonContext);
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s3DData.psServerCommonContext));
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+ s3DKCCBCmd.uCmdData.sCmdKickData.ui32WorkEstCmdHeaderOffset = 0;
+ HTBLOGK(HTB_SF_MAIN_KICK_3D,
+ s3DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui323DCmdOffset);
+ RGX_HWPERF_HOST_ENQ(psTransferContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_TQ3D,
+ uiCheckFenceUID,
+ ui3DUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_3D,
+ &s3DKCCBCmd,
+ sizeof(s3DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ3D);
+#endif
+ }
+
+ if ((ui322DCmdCount) && (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ RGXFWIF_KCCB_CMD s2DKCCBCmd;
+ IMG_UINT32 ui32FWCtx = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext).ui32Addr;
+
+ /* Construct the kernel 2D CCB command. */
+ s2DKCCBCmd.eCmdType = RGXFWIF_KCCB_CMD_KICK;
+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext = FWCommonContextGetFWAddress(psTransferContext->s2DData.psServerCommonContext);
+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32CWoffUpdate = RGXGetHostWriteOffsetCCB(FWCommonContextGetClientCCB(psTransferContext->s2DData.psServerCommonContext));
+ s2DKCCBCmd.uCmdData.sCmdKickData.ui32NumCleanupCtl = 0;
+
+ HTBLOGK(HTB_SF_MAIN_KICK_2D,
+ s2DKCCBCmd.uCmdData.sCmdKickData.psContext,
+ ui322DCmdOffset);
+ RGX_HWPERF_HOST_ENQ(psTransferContext,
+ OSGetCurrentClientProcessIDKM(),
+ ui32FWCtx,
+ ui32ExtJobRef,
+ ui32IntJobRef,
+ RGX_HWPERF_KICK_TYPE_TQ2D,
+ uiCheckFenceUID,
+ ui2DUpdateFenceUID,
+ NO_DEADLINE,
+ NO_CYCEST);
+
+ LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+ {
+ eError2 = RGXScheduleCommand(psDeviceNode->pvDevice,
+ RGXFWIF_DM_2D,
+ &s2DKCCBCmd,
+ sizeof(s2DKCCBCmd),
+ ui32ClientCacheOpSeqNum,
+ ui32PDumpFlags);
+ if (eError2 != PVRSRV_ERROR_RETRY)
+ {
+ break;
+ }
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+ } END_LOOP_UNTIL_TIMEOUT();
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+ RGXHWPerfFTraceGPUEnqueueEvent(psDeviceNode->pvDevice,
+ ui32FWCtx, ui32IntJobRef, RGX_HWPERF_KICK_TYPE_TQ2D);
+#endif
+ }
+
+ /*
+ * Now check eError (which may have returned an error from our earlier calls
+ * to RGXCmdHelperAcquireCmdCCB) - we needed to process any flush command first
+ * so we check it now...
+ */
+ if (eError != PVRSRV_OK )
+ {
+ goto fail_2dcmdacquire;
+ }
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+#if defined(NO_HARDWARE)
+ /* If NO_HARDWARE, signal the output fence's sync checkpoint and sync prim */
+ if (ps2DUpdateSyncCheckpoint)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Signalling TLA NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps2DUpdateSyncCheckpoint, SyncCheckpointGetId(ps2DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps2DUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(ps2DUpdateSyncCheckpoint);
+ }
+ if (ps2DFenceTimelineUpdateSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Updating TLA NOHW sync prim<%p> to %d", __func__, (void*)ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(ps2DFenceTimelineUpdateSync, ui322DFenceTimelineUpdateValue);
+ }
+ if (ps3DUpdateSyncCheckpoint)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Signalling TQ3D NOHW sync checkpoint<%p>, ID:%d, FwAddr=0x%x", __func__, (void*)ps3DUpdateSyncCheckpoint, SyncCheckpointGetId(ps3DUpdateSyncCheckpoint), SyncCheckpointGetFirmwareAddr(ps3DUpdateSyncCheckpoint)));
+ SyncCheckpointSignalNoHW(ps3DUpdateSyncCheckpoint);
+ }
+ if (ps3DFenceTimelineUpdateSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Updating TQ3D NOHW sync prim<%p> to %d", __func__, (void*)ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue));
+ SyncPrimNoHwUpdate(ps3DFenceTimelineUpdateSync, ui323DFenceTimelineUpdateValue);
+ }
+ SyncCheckpointNoHWUpdateTimelines(NULL);
+#endif /* defined (NO_HARDWARE) */
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_succeeded(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+ if (pi2DUpdateFence)
+ {
+ *pi2DUpdateFence = i2DUpdateFence;
+ }
+ if (pi3DUpdateFence)
+ {
+ *pi3DUpdateFence = i3DUpdateFence;
+ }
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ if (pv2DUpdateFenceFinaliseData && (i2DUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(i2DUpdateFence, pv2DUpdateFenceFinaliseData);
+ }
+ if (pv3DUpdateFenceFinaliseData && (i3DUpdateFence != PVRSRV_NO_FENCE))
+ {
+ SyncCheckpointFinaliseFence(i3DUpdateFence, pv3DUpdateFenceFinaliseData);
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+ OSFreeMem(pas2DCmdHelper);
+ OSFreeMem(pas3DCmdHelper);
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui322DIntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui322DIntAllocatedUpdateValues);
+ pui322DIntAllocatedUpdateValues = NULL;
+ }
+ if (pui323DIntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui323DIntAllocatedUpdateValues);
+ pui323DIntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+/*
+ No resources are created in this function so there is nothing to free
+ unless we had to merge syncs.
+ If we fail after the client CCB acquire there is still nothing to do
+ as only the client CCB release will modify the client CCB
+*/
+fail_2dcmdacquire:
+fail_3dcmdacquire:
+fail_initcmd:
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, psSyncAddrListFence);
+ SyncAddrListRollbackCheckpoints(psTransferContext->psDeviceNode, psSyncAddrListUpdate);
+fail_alloc_update_values_mem:
+ if(i2DUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(i2DUpdateFence, pv2DUpdateFenceFinaliseData);
+ }
+ if(i3DUpdateFence != PVRSRV_NO_FENCE)
+ {
+ SyncCheckpointRollbackFenceData(i3DUpdateFence, pv3DUpdateFenceFinaliseData);
+ }
+fail_create_output_fence:
+ /* Drop the references taken on the sync checkpoints in the
+ * resolved input fence */
+ SyncAddrListDeRefCheckpoints(ui32FenceSyncCheckpointCount,
+ apsFenceSyncCheckpoints);
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL) || defined(SUPPORT_BUFFER_SYNC)
+fail_resolve_input_fence:
+#endif
+
+fail_pdumpcheck:
+fail_cmdtype:
+
+#if defined(SUPPORT_BUFFER_SYNC)
+ if (psBufferSyncData)
+ {
+ pvr_buffer_sync_kick_failed(psBufferSyncData);
+ }
+ if (apsBufferFenceSyncCheckpoints)
+ {
+ kfree(apsBufferFenceSyncCheckpoints);
+ }
+#endif /* defined(SUPPORT_BUFFER_SYNC) */
+
+fail_populate_sync_addr_list_update:
+fail_populate_sync_addr_list_fence:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ OSFreeMem(pas2DCmdHelper);
+fail_alloc2dhelper:
+ OSFreeMem(pas3DCmdHelper);
+fail_alloc3dhelper:
+
+#if defined(PVR_USE_FENCE_SYNC_MODEL)
+ /* Free the memory that was allocated for the sync checkpoint list returned by ResolveFence() */
+ if (apsFenceSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(apsFenceSyncCheckpoints);
+ }
+ /* Free memory allocated to hold the internal list of update values */
+ if (pui322DIntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui322DIntAllocatedUpdateValues);
+ pui322DIntAllocatedUpdateValues = NULL;
+ }
+ if (pui323DIntAllocatedUpdateValues)
+ {
+ OSFreeMem(pui323DIntAllocatedUpdateValues);
+ pui323DIntAllocatedUpdateValues = NULL;
+ }
+#endif /* defined(PVR_USE_FENCE_SYNC_MODEL) */
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return eError;
+}
+
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDevNode->pvDevice;
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockAcquire(psTransferContext->hLock);
+#endif
+
+ if ((psTransferContext->s2DData.ui32Priority != ui32Priority) &&
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ eError = ContextSetPriority(psTransferContext->s2DData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_2D);
+ if (eError != PVRSRV_OK)
+ {
+ if(eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 2D part of the transfercontext (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+ goto fail_2dcontext;
+ }
+ psTransferContext->s2DData.ui32Priority = ui32Priority;
+ }
+
+ if (psTransferContext->s3DData.ui32Priority != ui32Priority)
+ {
+ eError = ContextSetPriority(psTransferContext->s3DData.psServerCommonContext,
+ psConnection,
+ psTransferContext->psDeviceNode->pvDevice,
+ ui32Priority,
+ RGXFWIF_DM_3D);
+ if (eError != PVRSRV_OK)
+ {
+ if(eError != PVRSRV_ERROR_RETRY)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to set the priority of the 3D part of the transfercontext (%s)", __func__, PVRSRVGetErrorStringKM(eError)));
+ }
+ goto fail_3dcontext;
+ }
+ psTransferContext->s3DData.ui32Priority = ui32Priority;
+ }
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ return PVRSRV_OK;
+
+fail_3dcontext:
+
+fail_2dcontext:
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockRelease(psTransferContext->hLock);
+#endif
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) &&
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ if (psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D)
+ {
+ DumpStalledFWCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+}
+
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_UINT32 ui32ContextBitMask = 0;
+
+ OSWRLockAcquireRead(psDevInfo->hTransferCtxListLock);
+
+ dllist_foreach_node(&psDevInfo->sTransferCtxtListHead, psNode, psNext)
+ {
+ RGX_SERVER_TQ_CONTEXT *psCurrentServerTransferCtx =
+ IMG_CONTAINER_OF(psNode, RGX_SERVER_TQ_CONTEXT, sListNode);
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_2D) && \
+ (NULL != psCurrentServerTransferCtx->s2DData.psServerCommonContext) && \
+ (RGX_IS_FEATURE_SUPPORTED(psDevInfo, TLA)))
+ {
+ if (CheckStalledClientCommonContext(psCurrentServerTransferCtx->s2DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ2D) == PVRSRV_ERROR_CCCB_STALLED)
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ2D;
+ }
+ }
+
+ if ((psCurrentServerTransferCtx->ui32Flags & RGX_SERVER_TQ_CONTEXT_FLAGS_3D) && (NULL != psCurrentServerTransferCtx->s3DData.psServerCommonContext))
+ {
+ if ((CheckStalledClientCommonContext(psCurrentServerTransferCtx->s3DData.psServerCommonContext, RGX_KICK_TYPE_DM_TQ3D) == PVRSRV_ERROR_CCCB_STALLED))
+ {
+ ui32ContextBitMask |= RGX_KICK_TYPE_DM_TQ3D;
+ }
+ }
+ }
+
+ OSWRLockReleaseRead(psDevInfo->hTransferCtxListLock);
+ return ui32ContextBitMask;
+}
+
+/**************************************************************************//**
+ End of file (rgxtransfer.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.h b/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.h
new file mode 100644
index 00000000000000..839ab3f4bffaec
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxtransfer.h
@@ -0,0 +1,152 @@
+/*************************************************************************/ /*!
+@File
+@Title RGX Transfer queue Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the RGX Transfer queue Functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__RGXTRANSFER_H__)
+#define __RGXTRANSFER_H__
+
+#include "devicemem.h"
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxfwutils.h"
+#include "rgx_fwif_resetframework.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+
+#include "sync_server.h"
+#include "connection_server.h"
+
+typedef struct _RGX_SERVER_TQ_CONTEXT_ RGX_SERVER_TQ_CONTEXT;
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXCreateTransferContextKM
+
+ @Description
+ Server-side implementation of RGXCreateTransferContext
+
+ @Input pvDeviceNode - device node
+
+FIXME fill this in
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXCreateTransferContextKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32Priority,
+ IMG_UINT32 ui32FrameworkCommandSize,
+ IMG_PBYTE pabyFrameworkCommand,
+ IMG_HANDLE hMemCtxPrivData,
+ RGX_SERVER_TQ_CONTEXT **ppsTransferContext);
+
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVRGXDestroyTransferContextKM
+
+ @Description
+ Server-side implementation of RGXDestroyTransferContext
+
+ @Input psTransferContext - Transfer context
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXDestroyTransferContextKM(RGX_SERVER_TQ_CONTEXT *psTransferContext);
+
+/*!
+*******************************************************************************
+
+ @Function PVRSRVSubmitTransferKM
+
+ @Description
+ Schedules one or more 2D or 3D HW commands on the firmware
+
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR PVRSRVRGXSubmitTransferKM(RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32ClientCacheOpSeqNum,
+ IMG_UINT32 ui32PrepareCount,
+ IMG_UINT32 *paui32ClientFenceCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientFenceUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientFenceSyncOffset,
+ IMG_UINT32 **papaui32ClientFenceValue,
+ IMG_UINT32 *paui32ClientUpdateCount,
+ SYNC_PRIMITIVE_BLOCK ***papauiClientUpdateUFOSyncPrimBlock,
+ IMG_UINT32 **papaui32ClientUpdateSyncOffset,
+ IMG_UINT32 **papaui32ClientUpdateValue,
+ IMG_UINT32 *paui32ServerSyncCount,
+ IMG_UINT32 **papaui32ServerSyncFlags,
+ SERVER_SYNC_PRIMITIVE ***papapsServerSyncs,
+ PVRSRV_FENCE iCheckFence,
+ PVRSRV_TIMELINE i2DUpdateTimeline,
+ PVRSRV_FENCE *pi2DUpdateFence,
+ PVRSRV_TIMELINE i3DUpdateTimeline,
+ PVRSRV_FENCE *pi3DUpdateFence,
+ IMG_CHAR szFenceName[32],
+ IMG_UINT32 *paui32FWCommandSize,
+ IMG_UINT8 **papaui8FWCommand,
+ IMG_UINT32 *pui32TQPrepareFlags,
+ IMG_UINT32 ui32ExtJobRef,
+ IMG_UINT32 ui32SyncPMRCount,
+ IMG_UINT32 *paui32SyncPMRFlags,
+ PMR **ppsSyncPMRs);
+
+PVRSRV_ERROR PVRSRVRGXSetTransferContextPriorityKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ RGX_SERVER_TQ_CONTEXT *psTransferContext,
+ IMG_UINT32 ui32Priority);
+
+/* Debug - check if transfer context is waiting on a fence */
+void CheckForStalledTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/* Debug/Watchdog - check if client transfer contexts are stalled */
+IMG_UINT32 CheckForStalledClientTransferCtxt(PVRSRV_RGXDEV_INFO *psDevInfo);
+
+#endif /* __RGXTRANSFER_H__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxutils.c b/drivers/gpu/drm/img-rogue/1.10/rgxutils.c
new file mode 100644
index 00000000000000..22a21e63836cae
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxutils.c
@@ -0,0 +1,249 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific utility routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Device specific functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "rgx_fwif_km.h"
+#include "pdump_km.h"
+#include "osfunc.h"
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "rgxutils.h"
+#include "power.h"
+#include "pvrsrv.h"
+#include "sync_internal.h"
+#include "rgxfwutils.h"
+
+
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 *pui32State)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ psDevInfo = psDeviceNode->pvDevice;
+ *pui32State = psDevInfo->eActivePMConf;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 ui32State)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ if (RGX_ACTIVEPM_FORCE_OFF != ui32State
+ || !psDevInfo->pvAPMISRData)
+ {
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+ }
+
+#if !defined(NO_HARDWARE)
+ eError = OSUninstallMISR(psDevInfo->pvAPMISRData);
+ if (PVRSRV_OK == eError)
+ {
+ psDevInfo->eActivePMConf = RGX_ACTIVEPM_FORCE_OFF;
+ psDevInfo->pvAPMISRData = NULL;
+ eError = PVRSRVSetDeviceDefaultPowerState((const PPVRSRV_DEVICE_NODE)psDeviceNode,
+ PVRSRV_DEV_POWER_STATE_ON);
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL *pbEnabled)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ *pbEnabled = psDevInfo->bPDPEnabled;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL bEnable)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ PVR_UNREFERENCED_PARAMETER(pvPrivateData);
+
+ if (!psDeviceNode || !psDeviceNode->pvDevice)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevInfo = psDeviceNode->pvDevice;
+
+ psDevInfo->bPDPEnabled = bEnable;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 *pui32DeviceFlags)
+{
+ if (!pui32DeviceFlags || !psDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *pui32DeviceFlags = psDevInfo->ui32DeviceFlags;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_BOOL bSetNotClear)
+{
+ IMG_UINT32 ui32DeviceFlags = 0;
+
+ if (!psDevInfo)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_ZERO_FREELIST)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_ZERO_FREELIST;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_DISABLE_DW_LOGGING_EN)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_DISABLE_DW_LOGGING_EN;
+ }
+
+ if (ui32Config & RGXKMIF_DEVICE_STATE_DUST_REQUEST_INJECT_EN)
+ {
+ ui32DeviceFlags |= RGXKM_DEVICE_STATE_DUST_REQUEST_INJECT_EN;
+ }
+
+ if (bSetNotClear)
+ {
+ psDevInfo->ui32DeviceFlags |= ui32DeviceFlags;
+ }
+ else
+ {
+ psDevInfo->ui32DeviceFlags &= ~ui32DeviceFlags;
+ }
+
+ return PVRSRV_OK;
+}
+
+inline const char * RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM)
+{
+ /*
+ * This is based on the currently defined DMs.
+ * If you need to modify the enum in include/rgx_common.h
+ * please keep this function up-to-date too.
+ *
+ * typedef enum _RGXFWIF_DM_
+ * {
+ * RGXFWIF_DM_GP = 0,
+ * RGXFWIF_DM_2D = 1,
+ * RGXFWIF_DM_TDM = 1,
+ * RGXFWIF_DM_TA = 2,
+ * RGXFWIF_DM_3D = 3,
+ * RGXFWIF_DM_CDM = 4,
+ * RGXFWIF_DM_RTU = 5,
+ * RGXFWIF_DM_SHG = 6,
+ * RGXFWIF_DM_LAST,
+ * RGXFWIF_DM_FORCE_I32 = 0x7fffffff
+ * } RGXFWIF_DM;
+ */
+ PVR_ASSERT(eKickTypeDM < RGX_KICK_TYPE_DM_LAST);
+
+ switch(eKickTypeDM) {
+ case RGX_KICK_TYPE_DM_GP:
+ return "GP ";
+ case RGX_KICK_TYPE_DM_TDM_2D:
+ return "TDM/2D ";
+ case RGX_KICK_TYPE_DM_TA:
+ return "TA ";
+ case RGX_KICK_TYPE_DM_3D:
+ return "3D ";
+ case RGX_KICK_TYPE_DM_CDM:
+ return "CDM ";
+ case RGX_KICK_TYPE_DM_RTU:
+ return "RTU ";
+ case RGX_KICK_TYPE_DM_SHG:
+ return "SHG ";
+ case RGX_KICK_TYPE_DM_TQ2D:
+ return "TQ2D ";
+ case RGX_KICK_TYPE_DM_TQ3D:
+ return "TQ3D ";
+ default:
+ return "Invalid DM ";
+ }
+}
+
+/******************************************************************************
+ End of file (rgxutils.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/rgxutils.h b/drivers/gpu/drm/img-rogue/1.10/rgxutils.h
new file mode 100644
index 00000000000000..c2f798036af65c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rgxutils.h
@@ -0,0 +1,185 @@
+/*************************************************************************/ /*!
+@File
+@Title Device specific utility routines declarations
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Inline functions/structures specific to RGX
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "device.h"
+#include "rgxdevice.h"
+#include "rgxdebug.h"
+#include "pvr_notifier.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+
+ @Function RGXQueryAPMState
+
+ @Description Query the state of the APM configuration
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Output pui32State : The APM configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 *pui32State);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetAPMState
+
+ @Description Set the APM configuration state. Currently only 'OFF' is
+ supported
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input ui32State : The requested APM configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetAPMState(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_UINT32 ui32State);
+
+/*!
+******************************************************************************
+
+ @Function RGXQueryPdumpPanicEnable
+
+ @Description Get the PDump Panic Enable configuration state.
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input pbEnabled : IMG_TRUE if PDump Panic is enabled
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXQueryPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL *pbEnabled);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetPdumpPanicEnable
+
+ @Description Set the PDump Panic Enable flag
+
+ @Input psDeviceNode : The device node
+
+ @Input pvPrivateData: Unused (required for AppHint callback)
+
+ @Input bEnable : The requested configuration state
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetPdumpPanicEnable(const PVRSRV_DEVICE_NODE *psDeviceNode,
+ const void *pvPrivateData,
+ IMG_BOOL bEnable);
+
+/*!
+******************************************************************************
+
+ @Function RGXGetDeviceFlags
+
+ @Description Get the device flags for a given device
+
+ @Input psDevInfo : The device descriptor query
+
+ @Output pui32DeviceFlags : The current state of the device flags
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXGetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 *pui32DeviceFlags);
+
+/*!
+******************************************************************************
+
+ @Function RGXSetDeviceFlags
+
+ @Description Set the device flags for a given device
+
+ @Input psDevInfo : The device descriptor to modify
+
+ @Input ui32Config : The device flags to modify
+
+ @Input bSetNotClear : Set or clear the specified flags
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RGXSetDeviceFlags(PVRSRV_RGXDEV_INFO *psDevInfo,
+ IMG_UINT32 ui32Config,
+ IMG_BOOL bSetNotClear);
+
+/*!
+******************************************************************************
+
+ @Function RGXStringifyKickTypeDM
+
+ @Description Gives the kick type DM name stringified
+
+ @Input Kick type DM
+
+ @Return Array containing the kick type DM name
+
+******************************************************************************/
+const char* RGXStringifyKickTypeDM(RGX_KICK_TYPE_DM eKickTypeDM);
+
+#define RGX_STRINGIFY_KICK_TYPE_DM_IF_SET(bitmask, eKickTypeDM) bitmask & eKickTypeDM ? RGXStringifyKickTypeDM(eKickTypeDM) : ""
+/******************************************************************************
+ End of file (rgxutils.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/ri_server.c b/drivers/gpu/drm/img-rogue/1.10/ri_server.c
new file mode 100644
index 00000000000000..39ec9795f55b9c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ri_server.c
@@ -0,0 +1,2131 @@
+/*************************************************************************/ /*!
+@File ri_server.c
+@Title Resource Information (RI) server implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Resource Information (RI) server functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+
+#include <stdarg.h>
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+
+#include "srvkm.h"
+#include "lock.h"
+
+/* services/include */
+#include "pvr_ricommon.h"
+
+/* services/server/include/ */
+#include "ri_server.h"
+
+/* services/include/shared/ */
+#include "hash.h"
+/* services/shared/include/ */
+#include "dllist.h"
+
+#include "pmr.h"
+
+/* include/device.h */
+#include "device.h"
+
+#if !defined(RI_UNIT_TEST)
+#include "pvrsrv.h"
+#endif
+
+
+#if defined(PVR_RI_DEBUG)
+
+#define USE_RI_LOCK 1
+
+/*
+ * Initial size use for Hash table.
+ * (Used to index the RI list entries).
+ */
+#define _RI_INITIAL_HASH_TABLE_SIZE 64
+
+/*
+ * Values written to the 'valid' field of
+ * RI structures when created and cleared
+ * prior to being destroyed.
+ * The code can then check this value
+ * before accessing the provided pointer
+ * contents as a valid RI structure.
+ */
+#define _VALID_RI_LIST_ENTRY 0x66bccb66
+#define _VALID_RI_SUBLIST_ENTRY 0x77cddc77
+#define _INVALID 0x00000000
+
+/*
+ * If this define is set to 1, details of
+ * the linked lists (addresses, prev/next
+ * ptrs, etc) are also output when function
+ * RIDumpList() is called
+ */
+#define _DUMP_LINKEDLIST_INFO 0
+
+
+typedef IMG_UINT64 _RI_BASE_T;
+
+
+
+
+/* No +1 in SIZE macros since sizeof includes \0 byte in size */
+
+#define RI_PROC_BUF_SIZE 16
+
+#define RI_MEMDESC_SUM_FRMT "PID %d %s MEMDESCs Alloc'd:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) + "\
+ "Imported:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K) = "\
+ "Total:0x%010" IMG_UINT64_FMTSPECx " (%" IMG_UINT64_FMTSPEC "K)\n"
+#define RI_MEMDESC_SUM_BUF_SIZE (sizeof(RI_MEMDESC_SUM_FRMT)+5+RI_PROC_BUF_SIZE+60)
+
+
+#define RI_PMR_SUM_FRMT "PID %d %s PMRs Alloc'd:0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K "\
+ "[Physical: 0x%010" IMG_UINT64_FMTSPECx ", %" IMG_UINT64_FMTSPEC "K]\n"
+#define RI_PMR_SUM_BUF_SIZE (sizeof(RI_PMR_SUM_FRMT)+(40))
+
+
+#define RI_PMR_ENTRY_FRMT "%%s<%%p>\t%%-%ds\t0x%%010" IMG_UINT64_FMTSPECx "\t[0x%%010" IMG_UINT64_FMTSPECx "]\t%%c"
+#define RI_PMR_ENTRY_BUF_SIZE (sizeof(RI_PMR_ENTRY_FRMT)+(3+16+PVR_ANNOTATION_MAX_LEN+10+10))
+#define RI_PMR_ENTRY_FRMT_SIZE (sizeof(RI_PMR_ENTRY_FRMT))
+
+/* Use %5d rather than %d so the output aligns in server/kernel.log, debugFS sees extra spaces */
+#define RI_MEMDESC_ENTRY_PROC_FRMT "[%5d:%s]"
+#define RI_MEMDESC_ENTRY_PROC_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_PROC_FRMT)+5+16)
+
+#define RI_SYS_ALLOC_IMPORT_FRMT "{Import from PID %d}"
+#define RI_SYS_ALLOC_IMPORT_FRMT_SIZE (sizeof(RI_SYS_ALLOC_IMPORT_FRMT)+5)
+static IMG_CHAR g_szSysAllocImport[RI_SYS_ALLOC_IMPORT_FRMT_SIZE];
+
+#define RI_MEMDESC_ENTRY_IMPORT_FRMT "{Import from PID %d}"
+#define RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_IMPORT_FRMT)+5)
+
+#define RI_MEMDESC_ENTRY_UNPINNED_FRMT "{Unpinned}"
+#define RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_UNPINNED_FRMT))
+
+#define RI_MEMDESC_ENTRY_FRMT "%%s0x%%010" IMG_UINT64_FMTSPECx "\t%%-%ds %%s\t0x%%010" IMG_UINT64_FMTSPECx "\t<%%p> %%s%%s%%s%%c"
+#define RI_MEMDESC_ENTRY_BUF_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT)+(3+10+PVR_ANNOTATION_MAX_LEN+RI_MEMDESC_ENTRY_PROC_BUF_SIZE+16+\
+ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE+RI_SYS_ALLOC_IMPORT_FRMT_SIZE+RI_MEMDESC_ENTRY_UNPINNED_BUF_SIZE))
+#define RI_MEMDESC_ENTRY_FRMT_SIZE (sizeof(RI_MEMDESC_ENTRY_FRMT))
+
+
+#define RI_FRMT_SIZE_MAX (MAX(RI_MEMDESC_ENTRY_BUF_SIZE,\
+ MAX(RI_PMR_ENTRY_BUF_SIZE,\
+ MAX(RI_MEMDESC_SUM_BUF_SIZE,\
+ RI_PMR_SUM_BUF_SIZE))))
+
+
+
+
+/*
+ * Structure used to make linked sublist of
+ * memory allocations (MEMDESC)
+ */
+struct _RI_SUBLIST_ENTRY_
+{
+ DLLIST_NODE sListNode;
+ struct _RI_LIST_ENTRY_ *psRI;
+ IMG_UINT32 valid;
+ IMG_BOOL bIsImport;
+ IMG_BOOL bIsSuballoc;
+ IMG_PID pid;
+ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE];
+ IMG_DEV_VIRTADDR sVAddr;
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64Size;
+ IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN+1];
+ DLLIST_NODE sProcListNode;
+};
+
+/*
+ * Structure used to make linked list of
+ * PMRs. Sublists of allocations (MEMDESCs) made
+ * from these PMRs are chained off these entries.
+ */
+struct _RI_LIST_ENTRY_
+{
+ DLLIST_NODE sListNode;
+ DLLIST_NODE sSysAllocListNode;
+ DLLIST_NODE sSubListFirst;
+ IMG_UINT32 valid;
+ PMR *psPMR;
+ IMG_PID pid;
+ IMG_CHAR ai8ProcName[RI_PROC_BUF_SIZE];
+ IMG_UINT16 ui16SubListCount;
+ IMG_UINT16 ui16MaxSubListCount;
+ IMG_UINT32 ui32RIPMRFlags; /* Flags used to indicate the type of allocation */
+ IMG_UINT32 ui32Flags; /* Flags used to indicate if PMR appears in ri debugfs output */
+};
+
+typedef struct _RI_LIST_ENTRY_ RI_LIST_ENTRY;
+typedef struct _RI_SUBLIST_ENTRY_ RI_SUBLIST_ENTRY;
+
+static IMG_UINT16 g_ui16RICount;
+static HASH_TABLE *g_pRIHashTable;
+static IMG_UINT16 g_ui16ProcCount;
+static HASH_TABLE *g_pProcHashTable;
+
+static POS_LOCK g_hRILock;
+
+/* linked list of PMR allocations made against the PVR_SYS_ALLOC_PID and
+ lock to prevent concurrent access to it */
+static POS_LOCK g_hSysAllocPidListLock;
+static DLLIST_NODE g_sSysAllocPidListHead;
+
+/*
+ * Flag used to indicate if RILock should be destroyed when final PMR entry
+ * is deleted, i.e. if RIDeInitKM() has already been called before that point
+ * but the handle manager has deferred deletion of RI entries.
+ */
+static IMG_BOOL bRIDeInitDeferred = IMG_FALSE;
+
+/*
+ * Used as head of linked-list of PMR RI entries -
+ * this is useful when we wish to iterate all PMR
+ * list entries (when we don't have a PMR ref)
+ */
+static DLLIST_NODE sListFirst;
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+/* Function used to produce string containing info for PMR RI entries (used for both debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry, IMG_BOOL bDebugFs, IMG_UINT16 ui16MaxStrLen, IMG_CHAR *pszEntryString);
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v);
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid);
+#define _RIOutput(x) PVR_LOG(x)
+
+#define RI_FLAG_PARSED_BY_DEBUGFS 0x1
+#define RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS 0x2
+#define RI_FLAG_SYSALLOC_PMR 0x4
+
+static IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen);
+
+static IMG_UINT32
+_ProcHashFunc (size_t uKeySize, void *pKey, IMG_UINT32 uHashTabLen)
+{
+ IMG_UINT32 *p = (IMG_UINT32 *)pKey;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+ IMG_UINT32 ui;
+ IMG_UINT32 uHashKey = 0;
+
+ PVR_UNREFERENCED_PARAMETER(uHashTabLen);
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ IMG_UINT32 uHashPart = *p++;
+
+ uHashPart += (uHashPart << 12);
+ uHashPart ^= (uHashPart >> 22);
+ uHashPart += (uHashPart << 4);
+ uHashPart ^= (uHashPart >> 9);
+ uHashPart += (uHashPart << 10);
+ uHashPart ^= (uHashPart >> 2);
+ uHashPart += (uHashPart << 7);
+ uHashPart ^= (uHashPart >> 12);
+
+ uHashKey += uHashPart;
+ }
+
+ return uHashKey;
+}
+
+static IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2);
+
+static IMG_BOOL
+_ProcHashComp (size_t uKeySize, void *pKey1, void *pKey2)
+{
+ IMG_UINT32 *p1 = (IMG_UINT32 *)pKey1;
+ IMG_UINT32 *p2 = (IMG_UINT32 *)pKey2;
+ IMG_UINT32 uKeyLen = uKeySize / sizeof(IMG_UINT32);
+ IMG_UINT32 ui;
+
+ for (ui = 0; ui < uKeyLen; ui++)
+ {
+ if (*p1++ != *p2++)
+ return IMG_FALSE;
+ }
+
+ return IMG_TRUE;
+}
+
+static void _RILock(void)
+{
+#if (USE_RI_LOCK == 1)
+ OSLockAcquire(g_hRILock);
+#endif
+}
+
+static void _RIUnlock(void)
+{
+#if (USE_RI_LOCK == 1)
+ OSLockRelease(g_hRILock);
+#endif
+}
+
+/* This value maintains a count of the number of PMRs attributed to the
+ * PVR_SYS_ALLOC_PID. Access to this value is protected by g_hRILock,
+ * so it does not need to be an ATOMIC_T.
+ */
+static IMG_UINT32 g_ui32SysAllocPMRCount;
+
+
+PVRSRV_ERROR RIInitKM(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_INT iCharsWritten;
+
+ bRIDeInitDeferred = IMG_FALSE;
+
+ iCharsWritten = OSSNPrintf(g_szSysAllocImport,
+ RI_SYS_ALLOC_IMPORT_FRMT_SIZE+1,
+ RI_SYS_ALLOC_IMPORT_FRMT,
+ PVR_SYS_ALLOC_PID);
+ PVR_LOG_IF_FALSE((iCharsWritten>0 && iCharsWritten<(IMG_INT32)RI_SYS_ALLOC_IMPORT_FRMT_SIZE), \
+ "OSSNPrintf failed to initialise g_szSysAllocImport");
+
+ eError = OSLockCreate(&g_hSysAllocPidListLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSLockCreate (g_hSysAllocPidListLock) failed (returned %d)",__func__,eError));
+ }
+ dllist_init(&(g_sSysAllocPidListHead));
+#if (USE_RI_LOCK == 1)
+ eError = OSLockCreate(&g_hRILock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: OSLockCreate (g_hRILock) failed (returned %d)",__func__,eError));
+ }
+#endif
+ return eError;
+}
+void RIDeInitKM(void)
+{
+#if (USE_RI_LOCK == 1)
+ if (g_ui16RICount > 0)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: called with %d entries remaining - deferring OSLockDestroy()",__func__,g_ui16RICount));
+ bRIDeInitDeferred = IMG_TRUE;
+ }
+ else
+ {
+ OSLockDestroy(g_hRILock);
+ OSLockDestroy(g_hSysAllocPidListLock);
+ }
+#endif
+}
+
+/*!
+******************************************************************************
+
+ @Function RILockAcquireKM
+
+ @Description
+ Acquires the RI Lock (which protects the integrity of the RI
+ linked lists). Caller will be suspended until lock is acquired.
+
+ @Return None
+
+******************************************************************************/
+void RILockAcquireKM(void)
+{
+ _RILock();
+}
+
+/*!
+******************************************************************************
+
+ @Function RILockReleaseKM
+
+ @Description
+ Releases the RI Lock (which protects the integrity of the RI
+ linked lists).
+
+ @Return None
+
+******************************************************************************/
+void RILockReleaseKM(void)
+{
+ _RIUnlock();
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWritePMREntryWithOwnerKM
+
+ @Description
+ Writes a new Resource Information list entry.
+ The new entry will be inserted at the head of the list of
+ PMR RI entries and assigned the values provided.
+
+ @input psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @input ui32Owner - PID of the process which owns the allocation. This
+ may not be the current process (e.g. a request to
+ grow a buffer may happen in the context of a kernel
+ thread, or we may import further resource for a
+ suballocation made from the FW heap which can then
+ also be utilized by other processes)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+ IMG_PID ui32Owner)
+{
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = psPMR;
+ RI_LIST_ENTRY *psRIEntry = NULL;
+
+ /* if Hash table has not been created, create it now */
+ if (!g_pRIHashTable)
+ {
+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+ }
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ /* Error - no memory to allocate for Hash table(s) */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ if (!psPMR)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ /* Acquire RI Lock */
+ _RILock();
+
+ /* look-up psPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ if (!psRIEntry)
+ {
+ /*
+ * If failed to find a matching existing entry, create a new one
+ */
+ psRIEntry = (RI_LIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_LIST_ENTRY));
+ if (!psRIEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ IMG_UINT32 ui32PMRFlags = PMR_Flags(psPMR);
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psPMR);
+
+ /*
+ * Add new RI Entry
+ */
+ if (g_ui16RICount == 0)
+ {
+ /* Initialise PMR entry linked-list head */
+ dllist_init(&sListFirst);
+ }
+ g_ui16RICount++;
+
+ dllist_init (&(psRIEntry->sSysAllocListNode));
+ dllist_init (&(psRIEntry->sSubListFirst));
+ psRIEntry->ui16SubListCount = 0;
+ psRIEntry->ui16MaxSubListCount = 0;
+ psRIEntry->valid = _VALID_RI_LIST_ENTRY;
+
+ /* Check if this PMR should be accounted for under the
+ * PVR_SYS_ALLOC_PID debugFS entry. This should happen if
+ * we are in the driver init phase, the flags indicate
+ * this is a FW local allocation (made from FW heap)
+ * or the owner PID is PVR_SYS_ALLOC_PID.
+ * Also record host dev node allocs on the system PID.
+ */
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+ PVRSRV_CHECK_FW_LOCAL(ui32PMRFlags) ||
+ ui32Owner == PVR_SYS_ALLOC_PID ||
+ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+ {
+ psRIEntry->ui32RIPMRFlags = RI_FLAG_SYSALLOC_PMR;
+ OSSNPrintf(psRIEntry->ai8ProcName,
+ RI_PROC_BUF_SIZE,
+ "SysProc");
+ psRIEntry->pid = PVR_SYS_ALLOC_PID;
+ OSLockAcquire(g_hSysAllocPidListLock);
+ /* Add this psRIEntry to the list of entries for PVR_SYS_ALLOC_PID */
+ dllist_add_to_tail(&g_sSysAllocPidListHead,(PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+ OSLockRelease(g_hSysAllocPidListLock);
+ g_ui32SysAllocPMRCount++;
+ }
+ else
+ {
+ psRIEntry->ui32RIPMRFlags = 0;
+ psRIEntry->pid = ui32Owner;
+ }
+
+ OSSNPrintf(psRIEntry->ai8ProcName,
+ RI_PROC_BUF_SIZE,
+ "%s",
+ OSGetCurrentClientProcessNameKM());
+ /* Add PMR entry to linked-list of all PMR entries */
+ dllist_init (&(psRIEntry->sListNode));
+ dllist_add_to_tail(&sListFirst,(PDLLIST_NODE)&(psRIEntry->sListNode));
+ }
+
+ psRIEntry->psPMR = psPMR;
+ psRIEntry->ui32Flags = 0;
+
+ /* Create index entry in Hash Table */
+ HASH_Insert_Extended (g_pRIHashTable, (void *)&pPMRHashKey, (uintptr_t)psRIEntry);
+
+ /* Store phRIHandle in PMR structure, so it can delete the associated RI entry when it destroys the PMR */
+ PMRStoreRIHandle(psPMR, psRIEntry);
+ }
+ /* Release RI Lock */
+ _RIUnlock();
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWritePMREntryKM
+
+ @Description
+ Writes a new Resource Information list entry.
+ The new entry will be inserted at the head of the list of
+ PMR RI entries and assigned the values provided.
+
+ @input psPMR - Reference (handle) to the PMR to which this reference relates
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR)
+{
+ return RIWritePMREntryWithOwnerKM(psPMR,
+ OSGetCurrentClientProcessIDKM());
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWriteMEMDESCEntryKM
+
+ @Description
+ Writes a new Resource Information sublist entry.
+ The new entry will be inserted at the head of the sublist of
+ the indicated PMR list entry, and assigned the values provided.
+
+ @input psPMR - Reference (handle) to the PMR to which this MEMDESC RI entry relates
+ @input ui32TextBSize - Length of string provided in psz8TextB parameter
+ @input psz8TextB - String describing this secondary reference (may be null)
+ @input uiOffset - Offset from the start of the PMR at which this allocation begins
+ @input uiSize - Size of this allocation
+ @input bIsImport - Flag indicating if this is an allocation or an import
+ @input bIsSuballoc - Flag indicating if this is a sub-allocation
+ @output phRIHandle - Handle to the created RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Offset,
+ IMG_UINT64 ui64Size,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc,
+ RI_HANDLE *phRIHandle)
+{
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = psPMR;
+ IMG_PID pid;
+ IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ /* check Hash tables have been created (meaning at least one PMR has been defined) */
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (!psPMR || !phRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ /* Acquire RI Lock */
+ _RILock();
+
+ *phRIHandle = NULL;
+
+ /* look-up psPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ if (!psRIEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+ if (!psRISubEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI sublist entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ else
+ {
+ /*
+ * Insert new entry in sublist
+ */
+ PDLLIST_NODE currentNode = dllist_get_next_node(&(psRIEntry->sSubListFirst));
+
+ /*
+ * Insert new entry before currentNode
+ */
+ if (!currentNode)
+ {
+ currentNode = &(psRIEntry->sSubListFirst);
+ }
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sListNode));
+
+ psRISubEntry->psRI = psRIEntry;
+
+ /* Increment number of entries in sublist */
+ psRIEntry->ui16SubListCount++;
+ if (psRIEntry->ui16SubListCount > psRIEntry->ui16MaxSubListCount)
+ {
+ psRIEntry->ui16MaxSubListCount = psRIEntry->ui16SubListCount;
+ }
+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+ }
+
+ /* If allocation is made during device or driver initialisation,
+ * track the MEMDESC entry under PVR_SYS_ALLOC_PID, otherwise use
+ * the current PID.
+ * Record host dev node allocations on the system PID.
+ */
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = (PVRSRV_DEVICE_NODE *)PMR_DeviceNode(psRISubEntry->psRI->psPMR);
+
+ if (psDeviceNode->eDevState == PVRSRV_DEVICE_STATE_INIT ||
+ psDeviceNode == PVRSRVGetPVRSRVData()->psHostMemDeviceNode)
+ {
+ psRISubEntry->pid = psRISubEntry->psRI->pid;
+ }
+ else
+ {
+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+ }
+ }
+
+ if (ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)
+ ui32TextBSize = DEVMEM_ANNOTATION_MAX_LEN - 1;
+ /* copy ai8TextB field data */
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+ /* ensure string is NUL-terminated */
+ psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+ psRISubEntry->ui64Offset = ui64Offset;
+ psRISubEntry->ui64Size = ui64Size;
+ psRISubEntry->bIsImport = bIsImport;
+ psRISubEntry->bIsSuballoc = bIsSuballoc;
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+ dllist_init (&(psRISubEntry->sProcListNode));
+
+ /*
+ * Now insert this MEMDESC into the proc list
+ */
+ /* look-up pid in Hash Table */
+ pid = psRISubEntry->pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if (!hashData)
+ {
+ /*
+ * No allocations for this pid yet
+ */
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+ /* Increment number of entries in proc hash table */
+ g_ui16ProcCount++;
+ }
+ else
+ {
+ /*
+ * Insert allocation into pid allocations linked list
+ */
+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+ /*
+ * Insert new entry
+ */
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+ }
+ *phRIHandle = (RI_HANDLE)psRISubEntry;
+ /* Release RI Lock */
+ _RIUnlock();
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIWriteProcListEntryKM
+
+ @Description
+ Write a new entry in the process list directly. We have to do this
+ because there might be no, multiple or changing PMR handles.
+
+ In the common case we have a PMR that will be added to the PMR list
+ and one or several MemDescs that are associated to it in a sub-list.
+ Additionally these MemDescs will be inserted in the per-process list.
+
+ There might be special descriptors from e.g. new user APIs that
+ are associated with no or multiple PMRs and not just one.
+ These can be now added to the per-process list (as RI_SUBLIST_ENTRY)
+ directly with this function and won't be listed in the PMR list (RIEntry)
+ because there might be no PMR.
+
+ To remove entries from the per-process list, just use
+ RIDeleteMEMDESCEntryKM().
+
+ @input ai8TextB - String describing this secondary reference (may be null)
+ @input uiSize - Size of this allocation
+ @input ui64DevVAddr - Virtual address of this entry
+ @output phRIHandle - Handle to the created RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ RI_HANDLE *phRIHandle)
+{
+ uintptr_t hashData = 0;
+ IMG_PID pid;
+ IMG_PCHAR pszText = (IMG_PCHAR)psz8TextB;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!g_pRIHashTable)
+ {
+ g_pRIHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(PMR*), HASH_Func_Default, HASH_Key_Comp_Default);
+ g_pProcHashTable = HASH_Create_Extended(_RI_INITIAL_HASH_TABLE_SIZE, sizeof(IMG_PID), _ProcHashFunc, _ProcHashComp);
+
+ if (!g_pRIHashTable || !g_pProcHashTable)
+ {
+ /* Error - no memory to allocate for Hash table(s) */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ /* Acquire RI Lock */
+ _RILock();
+
+ *phRIHandle = NULL;
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)OSAllocZMemNoStats(sizeof(RI_SUBLIST_ENTRY));
+ if (!psRISubEntry)
+ {
+ /* Release RI Lock */
+ _RIUnlock();
+ /* Error - no memory to allocate for new RI sublist entry */
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ psRISubEntry->valid = _VALID_RI_SUBLIST_ENTRY;
+
+
+ psRISubEntry->pid = OSGetCurrentClientProcessIDKM();
+
+ if (ui32TextBSize > DEVMEM_ANNOTATION_MAX_LEN)
+ ui32TextBSize = DEVMEM_ANNOTATION_MAX_LEN - 1;
+ /* copy ai8TextB field data */
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8TextB, ui32TextBSize+1, "%s", pszText);
+ /* ensure string is NUL-terminated */
+ psRISubEntry->ai8TextB[ui32TextBSize] = '\0';
+
+ psRISubEntry->ui64Offset = 0;
+ psRISubEntry->ui64Size = ui64Size;
+ psRISubEntry->sVAddr.uiAddr = ui64DevVAddr;
+ psRISubEntry->bIsImport = IMG_FALSE;
+ psRISubEntry->bIsSuballoc = IMG_FALSE;
+ OSSNPrintf((IMG_CHAR *)psRISubEntry->ai8ProcName, RI_PROC_BUF_SIZE, "%s", OSGetCurrentClientProcessNameKM());
+ dllist_init (&(psRISubEntry->sProcListNode));
+
+ /*
+ * Now insert this MEMDESC into the proc list
+ */
+ /* look-up pid in Hash Table */
+ pid = psRISubEntry->pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if (!hashData)
+ {
+ /*
+ * No allocations for this pid yet
+ */
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)&(psRISubEntry->sProcListNode));
+ /* Increment number of entries in proc hash table */
+ g_ui16ProcCount++;
+ }
+ else
+ {
+ /*
+ * Insert allocation into pid allocations linked list
+ */
+ PDLLIST_NODE currentNode = (PDLLIST_NODE)hashData;
+
+ /*
+ * Insert new entry
+ */
+ dllist_add_to_tail(currentNode, (PDLLIST_NODE)&(psRISubEntry->sProcListNode));
+ }
+ *phRIHandle = (RI_HANDLE)psRISubEntry;
+ /* Release RI Lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIUpdateMEMDESCAddrKM
+
+ @Description
+ Update a Resource Information entry.
+
+ @input hRIHandle - Handle of object whose reference info is to be updated
+ @input uiAddr - New address for the RI entry
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sVAddr)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ psRISubEntry->sVAddr.uiAddr = sVAddr.uiAddr;
+
+ /* Release RI lock */
+ _RIUnlock();
+
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeletePMREntryKM
+
+ @Description
+ Delete a Resource Information entry.
+
+ @input hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ PMR *pPMRHashKey;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ else
+ {
+ psRIEntry = (RI_LIST_ENTRY *)hRIHandle;
+
+ if (psRIEntry->valid != _VALID_RI_LIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if(psRIEntry->ui16SubListCount == 0)
+ {
+ /* Acquire RI lock*/
+ _RILock();
+
+ /* Remove the HASH table index entry */
+ pPMRHashKey = psRIEntry->psPMR;
+ HASH_Remove_Extended(g_pRIHashTable, (void *)&pPMRHashKey);
+
+ psRIEntry->valid = _INVALID;
+
+ /* Remove PMR entry from linked-list of PMR entries */
+ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sListNode));
+
+ if (psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)
+ {
+ dllist_remove_node((PDLLIST_NODE)&(psRIEntry->sSysAllocListNode));
+ g_ui32SysAllocPMRCount--;
+ }
+
+ /* Now, free the memory used to store the RI entry */
+ OSFreeMemNoStats(psRIEntry);
+ psRIEntry = NULL;
+
+ /*
+ * Decrement number of RI entries - if this is now zero,
+ * we can delete the RI hash table
+ */
+ if(--g_ui16RICount == 0)
+ {
+ HASH_Delete(g_pRIHashTable);
+ g_pRIHashTable = NULL;
+
+ _RIUnlock();
+
+ /* If deInit has been deferred, we can now destroy the RI Lock */
+ if (bRIDeInitDeferred)
+ {
+ OSLockDestroy(g_hRILock);
+ }
+ }
+ else
+ {
+ /* Release RI lock*/
+ _RIUnlock();
+ }
+ /*
+ * Make the handle NULL once PMR RI entry is deleted
+ */
+ hRIHandle = NULL;
+ }
+ else
+ {
+ eResult = PVRSRV_ERROR_DEVICEMEM_ALLOCATIONS_REMAIN_IN_HEAP;
+ }
+ }
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeleteMEMDESCEntryKM
+
+ @Description
+ Delete a Resource Information entry.
+ Entry can be from RIEntry list or ProcList.
+
+ @input hRIHandle - Handle of object whose reference info is to be deleted
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ uintptr_t hashData = 0;
+ IMG_PID pid;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+
+ if (!hRIHandle)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psRISubEntry = (RI_SUBLIST_ENTRY *)hRIHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ /* Pointer does not point to valid structure */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ /* For entries which do have a parent PMR remove the node from the sublist */
+ if (psRISubEntry->psRI)
+ {
+ psRIEntry = (RI_LIST_ENTRY *)psRISubEntry->psRI;
+
+ /* Now, remove entry from the sublist */
+ dllist_remove_node(&(psRISubEntry->sListNode));
+ }
+
+ psRISubEntry->valid = _INVALID;
+
+ /* Remove the entry from the proc allocations linked list */
+ pid = psRISubEntry->pid;
+ /* If this is the only allocation for this pid, just remove it from the hash table */
+ if (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL)
+ {
+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+ /* Decrement number of entries in proc hash table, and delete the hash table if there are now none */
+ if(--g_ui16ProcCount == 0)
+ {
+ HASH_Delete(g_pProcHashTable);
+ g_pProcHashTable = NULL;
+ }
+ }
+ else
+ {
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&pid);
+ if ((PDLLIST_NODE)hashData == &(psRISubEntry->sProcListNode))
+ {
+ HASH_Remove_Extended(g_pProcHashTable, (void *)&pid);
+ HASH_Insert_Extended (g_pProcHashTable, (void *)&pid, (uintptr_t)dllist_get_next_node(&(psRISubEntry->sProcListNode)));
+ }
+ }
+ dllist_remove_node(&(psRISubEntry->sProcListNode));
+
+ /* Now, free the memory used to store the sublist entry */
+ OSFreeMemNoStats(psRISubEntry);
+ psRISubEntry = NULL;
+
+ /*
+ * Decrement number of entries in sublist if this MemDesc had a parent entry.
+ */
+ if (psRIEntry)
+ {
+ psRIEntry->ui16SubListCount--;
+ }
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ /*
+ * Make the handle NULL once MEMDESC RI entry is deleted
+ */
+ hRIHandle = NULL;
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDeleteListKM
+
+ @Description
+ Delete all Resource Information entries and free associated
+ memory.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDeleteListKM(void)
+{
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+ _RILock();
+
+ if (g_pRIHashTable)
+ {
+ eResult = HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DeleteAllEntries);
+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+ {
+ /*
+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+ * the hash table gets deleted as a result of deleting the final PMR entry,
+ * so this is not a real error condition...
+ */
+ eResult = PVRSRV_OK;
+ }
+ }
+
+ /* After the run through the RIHashTable that holds the PMR entries there might be
+ * still entries left in the per-process hash table because they were added with
+ * RIWriteProcListEntryKM() and have no PMR parent associated.
+ */
+ if (g_pProcHashTable)
+ {
+ eResult = HASH_Iterate(g_pProcHashTable, (HASH_pfnCallback) _DeleteAllProcEntries);
+ if (eResult == PVRSRV_ERROR_RESOURCE_UNAVAILABLE)
+ {
+ /*
+ * PVRSRV_ERROR_RESOURCE_UNAVAILABLE is used to stop the Hash iterator when
+ * the hash table gets deleted as a result of deleting the final PMR entry,
+ * so this is not a real error condition...
+ */
+ eResult = PVRSRV_OK;
+ }
+ }
+
+ _RIUnlock();
+
+ return eResult;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpListKM
+
+ @Description
+ Dumps out the contents of the RI List entry for the
+ specified PMR, and all MEMDESC allocation entries
+ in the associated sub linked list.
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @input psPMR - PMR for which RI entry details are to be output
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpList(psPMR,0);
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIGetListEntryKM
+
+ @Description
+ Returns pointer to a formatted string with details of the specified
+ list entry. If no entry exists (e.g. it may have been deleted
+ since the previous call), NULL is returned.
+
+ @input pid - pid for which RI entry details are to be output
+ @input ppHandle - handle to the entry, if NULL, the first entry will be
+ returned.
+ @output pszEntryString - string to be output for the entry
+ @output hEntry - hEntry will be returned pointing to the next entry
+ (or NULL if there is no next entry)
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+ IMG_HANDLE **ppHandle,
+ IMG_CHAR **ppszEntryString)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ uintptr_t hashData = 0;
+ IMG_PID hashKey = pid;
+
+ static IMG_CHAR acStringBuffer[RI_FRMT_SIZE_MAX];
+
+ static IMG_UINT64 ui64TotalMemdescAlloc;
+ static IMG_UINT64 ui64TotalImport;
+ static IMG_UINT64 ui64TotalPMRAlloc;
+ static IMG_UINT64 ui64TotalPMRBacked;
+ static enum {
+ RI_GET_STATE_MEMDESCS_LIST_START,
+ RI_GET_STATE_MEMDESCS_SUMMARY,
+ RI_GET_STATE_PMR_LIST,
+ RI_GET_STATE_PMR_SUMMARY,
+ RI_GET_STATE_END,
+ RI_GET_STATE_LAST
+ } g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+
+ static DLLIST_NODE *psNode;
+ static DLLIST_NODE *psSysAllocNode;
+ static IMG_CHAR szProcName[RI_PROC_BUF_SIZE];
+ static IMG_UINT32 ui32ProcessedSysAllocPMRCount;
+
+ acStringBuffer[0] = '\0';
+
+ switch (g_bNextGetState)
+ {
+ case RI_GET_STATE_MEMDESCS_LIST_START:
+ /* look-up pid in Hash Table, to obtain first entry for pid */
+ hashData = HASH_Retrieve_Extended(g_pProcHashTable, (void *)&hashKey);
+ if (hashData)
+ {
+ if (*ppHandle)
+ {
+ psRISubEntry = (RI_SUBLIST_ENTRY *)*ppHandle;
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ psRISubEntry = NULL;
+ }
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry->valid != _VALID_RI_SUBLIST_ENTRY)
+ {
+ psRISubEntry = NULL;
+ }
+ }
+ }
+
+ if (psRISubEntry)
+ {
+ PDLLIST_NODE psNextProcListNode = dllist_get_next_node(&psRISubEntry->sProcListNode);
+
+ if (psRISubEntry->bIsImport)
+ {
+ ui64TotalImport += psRISubEntry->ui64Size;
+ }
+ else
+ {
+ ui64TotalMemdescAlloc += psRISubEntry->ui64Size;
+ }
+
+ _GenerateMEMDESCEntryString(psRISubEntry,
+ IMG_TRUE,
+ RI_MEMDESC_ENTRY_BUF_SIZE,
+ acStringBuffer);
+
+ /* If not an imported PMR, flag 'parent' PMR has having been listed in MEMDESCs */
+ if (!psRISubEntry->bIsImport && !(psRISubEntry->psRI->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR))
+ {
+ psRISubEntry->psRI->ui32RIPMRFlags |= RI_FLAG_PARSED_BY_DEBUGFS;
+ }
+
+ if (szProcName[0] == '\0')
+ {
+ OSStringCopy(szProcName, (pid == PVR_SYS_ALLOC_PID) ?
+ PVRSRV_MODNAME : psRISubEntry->ai8ProcName);
+ }
+
+
+ *ppszEntryString = acStringBuffer;
+ *ppHandle = (IMG_HANDLE)IMG_CONTAINER_OF(psNextProcListNode, RI_SUBLIST_ENTRY, sProcListNode);
+
+ if (psNextProcListNode == NULL ||
+ psNextProcListNode == (PDLLIST_NODE)hashData)
+ {
+ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+ }
+ /* else continue to list MEMDESCs */
+ }
+ else
+ {
+ if (ui64TotalMemdescAlloc == 0)
+ {
+ acStringBuffer[0] = '\0';
+ *ppszEntryString =acStringBuffer;
+ g_bNextGetState = RI_GET_STATE_MEMDESCS_SUMMARY;
+ }
+ /* else continue to list MEMDESCs */
+ }
+ break;
+
+ case RI_GET_STATE_MEMDESCS_SUMMARY:
+ OSSNPrintf( acStringBuffer,
+ RI_MEMDESC_SUM_BUF_SIZE,
+ RI_MEMDESC_SUM_FRMT,
+ pid,
+ szProcName,
+ ui64TotalMemdescAlloc,
+ ui64TotalMemdescAlloc >> 10,
+ ui64TotalImport,
+ ui64TotalImport >> 10,
+ (ui64TotalMemdescAlloc + ui64TotalImport),
+ (ui64TotalMemdescAlloc + ui64TotalImport) >> 10);
+
+ *ppszEntryString = acStringBuffer;
+ ui64TotalMemdescAlloc = 0;
+ ui64TotalImport = 0;
+ szProcName[0] = '\0';
+
+ g_bNextGetState = RI_GET_STATE_PMR_LIST;
+ break;
+
+ case RI_GET_STATE_PMR_LIST:
+ if (pid == PVR_SYS_ALLOC_PID)
+ {
+ OSLockAcquire(g_hSysAllocPidListLock);
+ acStringBuffer[0] = '\0';
+ if (!psSysAllocNode)
+ {
+ psSysAllocNode = &g_sSysAllocPidListHead;
+ ui32ProcessedSysAllocPMRCount = 0;
+ }
+ psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+
+ if (szProcName[0] == '\0')
+ {
+ OSStringCopy(szProcName, PVRSRV_MODNAME);
+ }
+ if (psSysAllocNode != NULL && psSysAllocNode != &g_sSysAllocPidListHead)
+ {
+ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+ _GeneratePMREntryString(psRIEntry,
+ IMG_TRUE,
+ RI_PMR_ENTRY_BUF_SIZE,
+ acStringBuffer);
+ PMR_LogicalSize(psRIEntry->psPMR,
+ &uiPMRLogicalSize);
+ ui64TotalPMRAlloc += uiPMRLogicalSize;
+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+ ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+ ui32ProcessedSysAllocPMRCount++;
+ if (ui32ProcessedSysAllocPMRCount > g_ui32SysAllocPMRCount+1)
+ {
+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+ }
+ /* else continue to list PMRs */
+ }
+ else
+ {
+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+ }
+ *ppszEntryString = (IMG_CHAR *)acStringBuffer;
+ OSLockRelease(g_hSysAllocPidListLock);
+ }
+ else
+ {
+ IMG_BOOL bPMRToDisplay = IMG_FALSE;
+
+ /* Iterate through the 'touched' PMRs and display details */
+ if (!psNode)
+ {
+ psNode = dllist_get_next_node(&sListFirst);
+ }
+ else
+ {
+ psNode = dllist_get_next_node(psNode);
+ }
+
+ while ((psNode != NULL && psNode != &sListFirst) &&
+ !bPMRToDisplay)
+ {
+ psRIEntry = IMG_CONTAINER_OF(psNode, RI_LIST_ENTRY, sListNode);
+ if (psRIEntry->ui32RIPMRFlags & RI_FLAG_PARSED_BY_DEBUGFS)
+ {
+ IMG_DEVMEM_SIZE_T uiPMRPhysicalBacking, uiPMRLogicalSize = 0;
+
+ /* This PMR was 'touched', so display details and unflag it*/
+ _GeneratePMREntryString(psRIEntry,
+ IMG_TRUE,
+ RI_PMR_ENTRY_BUF_SIZE,
+ acStringBuffer);
+ psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PARSED_BY_DEBUGFS;
+ PMR_LogicalSize(psRIEntry->psPMR, &uiPMRLogicalSize);
+ ui64TotalPMRAlloc += uiPMRLogicalSize;
+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPMRPhysicalBacking);
+ ui64TotalPMRBacked += uiPMRPhysicalBacking;
+
+ /* Remember the name of the process for 1 PMR for the summary */
+ if (szProcName[0] == '\0')
+ {
+ OSStringCopy(szProcName, psRIEntry->ai8ProcName);
+ }
+ bPMRToDisplay = IMG_TRUE;
+ }
+ else
+ {
+ psNode = dllist_get_next_node(psNode);
+ }
+ }
+
+ if (psNode == NULL || (psNode == &sListFirst))
+ {
+ g_bNextGetState = RI_GET_STATE_PMR_SUMMARY;
+ }
+ /* else continue listing PMRs */
+ }
+ break;
+
+ case RI_GET_STATE_PMR_SUMMARY:
+ OSSNPrintf( acStringBuffer,
+ RI_PMR_SUM_BUF_SIZE,
+ RI_PMR_SUM_FRMT,
+ pid,
+ szProcName,
+ ui64TotalPMRAlloc,
+ ui64TotalPMRAlloc >> 10,
+ ui64TotalPMRBacked,
+ ui64TotalPMRBacked >> 10);
+
+ *ppszEntryString = acStringBuffer;
+ ui64TotalPMRAlloc = 0;
+ ui64TotalPMRBacked = 0;
+ szProcName[0] = '\0';
+ psSysAllocNode = NULL;
+
+ g_bNextGetState = RI_GET_STATE_END;
+ break;
+
+ default:
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bad %d)",__func__, g_bNextGetState));
+
+ case RI_GET_STATE_END:
+ /* Reset state ready for the next ri_mem_area file to display */
+ *ppszEntryString = NULL;
+ *ppHandle = NULL;
+ psNode = NULL;
+ szProcName[0] = '\0';
+
+ g_bNextGetState = RI_GET_STATE_MEMDESCS_LIST_START;
+ return IMG_FALSE;
+ break;
+ }
+
+ return IMG_TRUE;
+}
+
+/* Function used to produce string containing info for MEMDESC RI entries (used for both debugfs and kernel log output) */
+static void _GenerateMEMDESCEntryString(RI_SUBLIST_ENTRY *psRISubEntry,
+ IMG_BOOL bDebugFs,
+ IMG_UINT16 ui16MaxStrLen,
+ IMG_CHAR *pszEntryString)
+{
+ IMG_CHAR szProc[RI_MEMDESC_ENTRY_PROC_BUF_SIZE];
+ IMG_CHAR szImport[RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE];
+ const IMG_CHAR *pszAnnotationText;
+ IMG_CHAR szEntryFormat[RI_MEMDESC_ENTRY_FRMT_SIZE];
+
+ OSSNPrintf(szEntryFormat,
+ RI_MEMDESC_ENTRY_FRMT_SIZE,
+ RI_MEMDESC_ENTRY_FRMT,
+ DEVMEM_ANNOTATION_MAX_LEN);
+
+ if (!bDebugFs)
+ {
+ /* we don't include process ID info for debugfs output */
+ OSSNPrintf(szProc,
+ RI_MEMDESC_ENTRY_PROC_BUF_SIZE,
+ RI_MEMDESC_ENTRY_PROC_FRMT,
+ psRISubEntry->pid,
+ psRISubEntry->ai8ProcName);
+ }
+
+ if (psRISubEntry->bIsImport)
+ {
+ OSSNPrintf( (IMG_CHAR *)&szImport,
+ RI_MEMDESC_ENTRY_IMPORT_BUF_SIZE,
+ RI_MEMDESC_ENTRY_IMPORT_FRMT,
+ psRISubEntry->psRI->pid);
+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+ pszAnnotationText = PMR_GetAnnotation(psRISubEntry->psRI->psPMR);
+ }
+ else if (!psRISubEntry->bIsSuballoc)
+ {
+ /* Set pszAnnotationText to that of the 'parent' PMR RI entry */
+ pszAnnotationText = PMR_GetAnnotation(psRISubEntry->psRI->psPMR);
+ }
+ else
+ {
+ /* Set pszAnnotationText to that of the MEMDESC RI entry */
+ pszAnnotationText = psRISubEntry->ai8TextB;
+ }
+
+ /* Don't print memdescs if they are local imports
+ * (i.e. imported PMRs allocated by this process)
+ */
+ if (bDebugFs &&
+ ((psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset) == 0) &&
+ (psRISubEntry->bIsImport && ((psRISubEntry->pid == psRISubEntry->psRI->pid) || (psRISubEntry->psRI->pid == PVR_SYS_ALLOC_PID))))
+ {
+ /* Don't print this entry */
+ pszEntryString[0] = '\0';
+ }
+ else
+ {
+ OSSNPrintf(pszEntryString,
+ ui16MaxStrLen,
+ szEntryFormat,
+ (bDebugFs ? "" : " "),
+ (psRISubEntry->sVAddr.uiAddr + psRISubEntry->ui64Offset),
+ pszAnnotationText,
+ (bDebugFs ? "" : (char *)szProc),
+ psRISubEntry->ui64Size,
+ psRISubEntry->psRI->psPMR,
+ (psRISubEntry->bIsImport ? (char *)&szImport : ""),
+ (!psRISubEntry->bIsImport && (psRISubEntry->psRI->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR) && (psRISubEntry->pid != PVR_SYS_ALLOC_PID)) ? g_szSysAllocImport : "",
+ (PMR_IsUnpinned(psRISubEntry->psRI->psPMR) ? RI_MEMDESC_ENTRY_UNPINNED_FRMT : ""),
+ (bDebugFs ? '\n' : ' '));
+ }
+}
+
+/* Function used to produce string containing info for PMR RI entries (used for debugfs and kernel log output) */
+static void _GeneratePMREntryString(RI_LIST_ENTRY *psRIEntry,
+ IMG_BOOL bDebugFs,
+ IMG_UINT16 ui16MaxStrLen,
+ IMG_CHAR *pszEntryString)
+{
+ const IMG_CHAR* pszAnnotationText;
+ IMG_DEVMEM_SIZE_T uiLogicalSize = 0;
+ IMG_DEVMEM_SIZE_T uiPhysicalSize = 0;
+ IMG_CHAR szEntryFormat[RI_PMR_ENTRY_FRMT_SIZE];
+
+ PMR_LogicalSize(psRIEntry->psPMR, &uiLogicalSize);
+
+ PMR_PhysicalSize(psRIEntry->psPMR, &uiPhysicalSize);
+
+ OSSNPrintf(szEntryFormat,
+ RI_PMR_ENTRY_FRMT_SIZE,
+ RI_PMR_ENTRY_FRMT,
+ DEVMEM_ANNOTATION_MAX_LEN);
+
+ /* Set pszAnnotationText to that PMR RI entry */
+ pszAnnotationText = (IMG_PCHAR) PMR_GetAnnotation(psRIEntry->psPMR);
+
+ OSSNPrintf(pszEntryString,
+ ui16MaxStrLen,
+ szEntryFormat,
+ (bDebugFs ? "" : " "),
+ (void*)psRIEntry->psPMR,
+ pszAnnotationText,
+ uiLogicalSize,
+ uiPhysicalSize,
+ (bDebugFs ? '\n' : ' '));
+}
+
+/*!
+******************************************************************************
+
+ @Function _DumpList
+ @Description
+ Dumps out RI List entries according to parameters passed.
+
+ @input psPMR - If not NULL, function will output the RI entries for
+ the specified PMR only
+ @input pid - If non-zero, the function will only output MEMDESC RI
+ entries made by the process with ID pid.
+ If zero, all MEMDESC RI entries will be output.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpList(PMR *psPMR, IMG_PID pid)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ IMG_UINT16 ui16SubEntriesParsed = 0;
+ uintptr_t hashData = 0;
+ IMG_PID hashKey;
+ PMR *pPMRHashKey = psPMR;
+ IMG_BOOL bDisplayedThisPMR = IMG_FALSE;
+ IMG_UINT64 ui64LogicalSize = 0;
+
+
+ if (!psPMR)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ if (g_pRIHashTable && g_pProcHashTable)
+ {
+ if (pid != 0)
+ {
+ /* look-up pid in Hash Table */
+ hashKey = pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+ if (hashData)
+ {
+ psRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ }
+ }
+ else
+ {
+ /* look-up psPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+ }
+ if (!psRIEntry)
+ {
+ /* No entry found in hash table */
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+ while (psRIEntry)
+ {
+ bDisplayedThisPMR = IMG_FALSE;
+ /* Output details for RI entry */
+ if (!pid)
+ {
+ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+ PMR_GetAnnotation(psRIEntry->psPMR),
+ psRIEntry->psPMR,
+ (IMG_UINT)psRIEntry->ui16SubListCount,
+ ui64LogicalSize));
+ bDisplayedThisPMR = IMG_TRUE;
+ }
+ ui16SubEntriesParsed = 0;
+ if(psRIEntry->ui16SubListCount)
+ {
+#if _DUMP_LINKEDLIST_INFO
+ _RIOutput (("RI LIST: {sSubListFirst.psNextNode:0x%x}\n",
+ (IMG_UINT)psRIEntry->sSubListFirst.psNextNode));
+#endif /* _DUMP_LINKEDLIST_INFO */
+ if (!pid)
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ /* Traverse RI sublist and output details for each entry */
+ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+ {
+ if (!bDisplayedThisPMR)
+ {
+ PMR_LogicalSize(psPMR, (IMG_DEVMEM_SIZE_T*)&ui64LogicalSize);
+
+ _RIOutput (("%s <%p> suballocs:%d size:0x%010" IMG_UINT64_FMTSPECx,
+ PMR_GetAnnotation(psRIEntry->psPMR),
+ psRIEntry->psPMR,
+ (IMG_UINT)psRIEntry->ui16SubListCount,
+ ui64LogicalSize));
+ bDisplayedThisPMR = IMG_TRUE;
+ }
+#if _DUMP_LINKEDLIST_INFO
+ _RIOutput (("RI LIST: [this subentry:0x%x]\n",(IMG_UINT)psRISubEntry));
+ _RIOutput (("RI LIST: psRI:0x%x\n",(IMG_UINT32)psRISubEntry->psRI));
+#endif /* _DUMP_LINKEDLIST_INFO */
+
+ {
+ IMG_CHAR szEntryString[RI_MEMDESC_ENTRY_BUF_SIZE];
+
+ _GenerateMEMDESCEntryString(psRISubEntry,
+ IMG_FALSE,
+ RI_MEMDESC_ENTRY_BUF_SIZE,
+ szEntryString);
+ _RIOutput (("%s",szEntryString));
+ }
+
+ if (pid)
+ {
+ if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+ {
+ psRISubEntry = NULL;
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+ RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ if (psRIEntry != psRISubEntry->psRI)
+ {
+ /*
+ * The next MEMDESC in the process linked list is in a different PMR
+ */
+ psRIEntry = psRISubEntry->psRI;
+ bDisplayedThisPMR = IMG_FALSE;
+ }
+ }
+ }
+ }
+ else
+ {
+ ui16SubEntriesParsed++;
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ }
+ }
+ if (!pid)
+ {
+ if (ui16SubEntriesParsed != psRIEntry->ui16SubListCount)
+ {
+ /*
+ * Output error message as sublist does not contain the
+ * number of entries indicated by sublist count
+ */
+ _RIOutput (("RI ERROR: RI sublist contains %d entries, not %d entries\n",
+ ui16SubEntriesParsed,psRIEntry->ui16SubListCount));
+ }
+ else if (psRIEntry->ui16SubListCount && !dllist_get_next_node(&(psRIEntry->sSubListFirst)))
+ {
+ /*
+ * Output error message as sublist is empty but sublist count
+ * is not zero
+ */
+ _RIOutput (("RI ERROR: ui16SubListCount=%d for empty RI sublist\n",
+ psRIEntry->ui16SubListCount));
+ }
+ }
+ psRIEntry = NULL;
+ }
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpAllKM
+
+ @Description
+ Dumps out the contents of all RI List entries (i.e. for all
+ MEMDESC allocations for each PMR).
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpAllKM(void)
+{
+ if (g_pRIHashTable)
+ {
+ return HASH_Iterate(g_pRIHashTable, (HASH_pfnCallback)_DumpAllEntries);
+ }
+ return PVRSRV_OK;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpProcessKM
+
+ @Description
+ Dumps out the contents of all MEMDESC RI List entries (for every
+ PMR) which have been allocate by the specified process only.
+ At present, output is directed to Kernel log
+ via PVR_DPF.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 dummyPMR;
+
+ if (!g_pProcHashTable)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpList((PMR *)&dummyPMR,pid);
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+
+ @Function _TotalAllocsForProcess
+ @Description
+ Totals all PMR physical backing for given process.
+
+ @input pid - ID of process.
+
+ @input ePhysHeapType - type of Physical Heap for which to total allocs
+
+ @Return Size of all physical backing for PID's PMRs allocated from the
+ specified heap type (in bytes).
+
+******************************************************************************/
+static IMG_INT32 _TotalAllocsForProcess(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psInitialRISubEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ uintptr_t hashData = 0;
+ IMG_PID hashKey;
+ IMG_INT32 i32TotalPhysical = 0;
+
+
+ if (g_pRIHashTable && g_pProcHashTable)
+ {
+ if (pid == PVR_SYS_ALLOC_PID)
+ {
+ IMG_UINT32 ui32ProcessedSysAllocPMRCount = 0;
+ DLLIST_NODE *psSysAllocNode = NULL;
+
+ OSLockAcquire(g_hSysAllocPidListLock);
+ psSysAllocNode = dllist_get_next_node(&g_sSysAllocPidListHead);
+ while (psSysAllocNode && psSysAllocNode != &g_sSysAllocPidListHead)
+ {
+ psRIEntry = IMG_CONTAINER_OF((PDLLIST_NODE)psSysAllocNode, RI_LIST_ENTRY, sSysAllocListNode);
+ ui32ProcessedSysAllocPMRCount++;
+ if (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType)
+ {
+ IMG_UINT64 ui64PhysicalSize;
+
+ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+ }
+ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+ }
+ psSysAllocNode = dllist_get_next_node(psSysAllocNode);
+ }
+ OSLockRelease(g_hSysAllocPidListLock);
+ }
+ else
+ {
+ if (pid != 0)
+ {
+ /* look-up pid in Hash Table */
+ hashKey = pid;
+ hashData = HASH_Retrieve_Extended (g_pProcHashTable, (void *)&hashKey);
+ if (hashData)
+ {
+ psInitialRISubEntry = IMG_CONTAINER_OF((PDLLIST_NODE)hashData, RI_SUBLIST_ENTRY, sProcListNode);
+ psRISubEntry = psInitialRISubEntry;
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ }
+ }
+
+ while (psRISubEntry && psRIEntry)
+ {
+ if (!psRISubEntry->bIsImport && !(psRIEntry->ui32RIPMRFlags & RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS) &&
+ (pid == PVR_SYS_ALLOC_PID || !(psRIEntry->ui32RIPMRFlags & RI_FLAG_SYSALLOC_PMR)) &&
+ (PhysHeapGetType(PMR_PhysHeap(psRIEntry->psPMR)) == ePhysHeapType))
+ {
+ IMG_UINT64 ui64PhysicalSize;
+
+
+ PMR_PhysicalSize(psRIEntry->psPMR, (IMG_DEVMEM_SIZE_T*)&ui64PhysicalSize);
+ if (((IMG_UINT64)i32TotalPhysical + ui64PhysicalSize > 0x7fffffff))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "%s: i32TotalPhysical exceeding size for i32",__func__));
+ }
+ i32TotalPhysical += (IMG_INT32)(ui64PhysicalSize & 0x00000000ffffffff);
+ psRIEntry->ui32RIPMRFlags |= RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+ }
+ if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+ {
+ psRISubEntry = NULL;
+ psRIEntry = NULL;
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+ RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ }
+ }
+ psRISubEntry = psInitialRISubEntry;
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ while (psRISubEntry && psRIEntry)
+ {
+ psRIEntry->ui32RIPMRFlags &= ~RI_FLAG_PMR_PHYS_COUNTED_BY_DEBUGFS;
+ if((dllist_get_next_node(&(psRISubEntry->sProcListNode)) == NULL) ||
+ (dllist_get_next_node(&(psRISubEntry->sProcListNode)) == (PDLLIST_NODE)hashData))
+ {
+ psRISubEntry = NULL;
+ psRIEntry = NULL;
+ }
+ else
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sProcListNode)),
+ RI_SUBLIST_ENTRY, sProcListNode);
+ if (psRISubEntry)
+ {
+ psRIEntry = psRISubEntry->psRI;
+ }
+ }
+ }
+ }
+ }
+ return i32TotalPhysical;
+}
+
+/*!
+******************************************************************************
+
+ @Function RITotalAllocProcessKM
+
+ @Description
+ Returns the total of allocated GPU memory (backing for PMRs)
+ which has been allocated from the specific heap by the specified
+ process only.
+
+ @Return Amount of physical backing allocated (in bytes)
+
+******************************************************************************/
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType)
+{
+ IMG_INT32 i32BackingTotal = 0;
+
+ if (g_pProcHashTable)
+ {
+ /* Acquire RI lock*/
+ _RILock();
+
+ i32BackingTotal = _TotalAllocsForProcess(pid, ePhysHeapType);
+
+ /* Release RI lock*/
+ _RIUnlock();
+ }
+ return i32BackingTotal;
+}
+
+#if defined(DEBUG)
+/*!
+******************************************************************************
+
+ @Function _DumpList
+ @Description
+ Dumps out RI List entries according to parameters passed.
+
+ @input psPMR - If not NULL, function will output the RI entries for
+ the specified PMR only
+ @input pid - If non-zero, the function will only output MEMDESC RI
+ entries made by the process with ID pid.
+ If zero, all MEMDESC RI entries will be output.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+static PVRSRV_ERROR _DumpProcessList(PMR *psPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ RI_LIST_ENTRY *psRIEntry = NULL;
+ RI_SUBLIST_ENTRY *psRISubEntry = NULL;
+ IMG_UINT16 ui16SubEntriesParsed = 0;
+ uintptr_t hashData = 0;
+ PMR *pPMRHashKey = psPMR;
+
+ psDevVAddr->uiAddr = 0;
+
+ if (!psPMR)
+ {
+ /* NULL handle provided */
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ if (g_pRIHashTable && g_pProcHashTable)
+ {
+ PVR_ASSERT(psPMR && pid);
+
+ /* look-up psPMR in Hash Table */
+ hashData = HASH_Retrieve_Extended (g_pRIHashTable, (void *)&pPMRHashKey);
+ psRIEntry = (RI_LIST_ENTRY *)hashData;
+
+ if (!psRIEntry)
+ {
+ /* No entry found in hash table */
+ return PVRSRV_ERROR_NOT_FOUND;
+ }
+
+ if (psRIEntry->ui16SubListCount)
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)),
+ RI_SUBLIST_ENTRY, sListNode);
+
+ /* Traverse RI sublist and output details for each entry */
+ while (psRISubEntry && (ui16SubEntriesParsed < psRIEntry->ui16SubListCount))
+ {
+ if (pid == psRISubEntry->pid)
+ {
+ IMG_UINT64 ui64StartOffset = psRISubEntry->ui64Offset;
+ IMG_UINT64 ui64EndOffset = psRISubEntry->ui64Offset + psRISubEntry->ui64Size;
+
+ if (ui64Offset >= ui64StartOffset && ui64Offset < ui64EndOffset)
+ {
+ psDevVAddr->uiAddr = psRISubEntry->sVAddr.uiAddr;
+ return PVRSRV_OK;
+ }
+ }
+
+ ui16SubEntriesParsed++;
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRISubEntry->sListNode)),
+ RI_SUBLIST_ENTRY, sListNode);
+ }
+ }
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+/*!
+******************************************************************************
+
+ @Function RIDumpProcessListKM
+
+ @Description
+ Dumps out selected contents of all MEMDESC RI List entries (for a
+ PMR) which have been allocate by the specified process only.
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr)
+{
+ PVRSRV_ERROR eError;
+
+ if (!g_pProcHashTable)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Acquire RI lock*/
+ _RILock();
+
+ eError = _DumpProcessList(psPMR,
+ pid,
+ ui64Offset,
+ psDevVAddr);
+
+ /* Release RI lock*/
+ _RIUnlock();
+
+ return eError;
+}
+#endif
+
+static PVRSRV_ERROR _DumpAllEntries (uintptr_t k, uintptr_t v)
+{
+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ return RIDumpListKM(psRIEntry->psPMR);
+}
+
+static PVRSRV_ERROR _DeleteAllEntries (uintptr_t k, uintptr_t v)
+{
+ RI_LIST_ENTRY *psRIEntry = (RI_LIST_ENTRY *)v;
+ RI_SUBLIST_ENTRY *psRISubEntry;
+ PVRSRV_ERROR eResult = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ while ((eResult == PVRSRV_OK) && (psRIEntry->ui16SubListCount > 0))
+ {
+ psRISubEntry = IMG_CONTAINER_OF(dllist_get_next_node(&(psRIEntry->sSubListFirst)), RI_SUBLIST_ENTRY, sListNode);
+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE)psRISubEntry);
+ }
+ if (eResult == PVRSRV_OK)
+ {
+ eResult = RIDeletePMREntryKM((RI_HANDLE)psRIEntry);
+ /*
+ * If we've deleted the Hash table, return
+ * an error to stop the iterator...
+ */
+ if (!g_pRIHashTable)
+ {
+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+ }
+ return eResult;
+}
+
+static PVRSRV_ERROR _DeleteAllProcEntries (uintptr_t k, uintptr_t v)
+{
+ RI_SUBLIST_ENTRY *psRISubEntry = (RI_SUBLIST_ENTRY *)v;
+ PVRSRV_ERROR eResult;
+
+ PVR_UNREFERENCED_PARAMETER (k);
+
+ eResult = RIDeleteMEMDESCEntryKM((RI_HANDLE) psRISubEntry);
+ if (eResult == PVRSRV_OK && !g_pProcHashTable)
+ {
+ /*
+ * If we've deleted the Hash table, return
+ * an error to stop the iterator...
+ */
+ eResult = PVRSRV_ERROR_RESOURCE_UNAVAILABLE;
+ }
+
+ return eResult;
+}
+
+#endif /* if defined(PVR_RI_DEBUG) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/ri_server.h b/drivers/gpu/drm/img-rogue/1.10/ri_server.h
new file mode 100644
index 00000000000000..a0c1bdc100b6fc
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ri_server.h
@@ -0,0 +1,106 @@
+/*************************************************************************/ /*!
+@File ri_server.h
+@Title Resource Information abstraction
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Resource Information (RI) functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _RI_SERVER_H_
+#define _RI_SERVER_H_
+
+#include <img_defs.h>
+#include <ri_typedefs.h>
+#include <pmr.h>
+#include <pvrsrv_error.h>
+#include <physheap.h>
+
+PVRSRV_ERROR RIInitKM(void);
+void RIDeInitKM(void);
+
+void RILockAcquireKM(void);
+void RILockReleaseKM(void);
+
+PVRSRV_ERROR RIWritePMREntryKM(PMR *psPMR);
+
+PVRSRV_ERROR RIWritePMREntryWithOwnerKM(PMR *psPMR,
+ IMG_PID ui32Owner);
+
+PVRSRV_ERROR RIWriteMEMDESCEntryKM(PMR *psPMR,
+ IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR ai8TextB[DEVMEM_ANNOTATION_MAX_LEN],
+ IMG_UINT64 uiOffset,
+ IMG_UINT64 uiSize,
+ IMG_BOOL bIsImport,
+ IMG_BOOL bIsSuballoc,
+ RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIWriteProcListEntryKM(IMG_UINT32 ui32TextBSize,
+ const IMG_CHAR *psz8TextB,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64DevVAddr,
+ RI_HANDLE *phRIHandle);
+
+PVRSRV_ERROR RIUpdateMEMDESCAddrKM(RI_HANDLE hRIHandle,
+ IMG_DEV_VIRTADDR sVAddr);
+
+PVRSRV_ERROR RIDeletePMREntryKM(RI_HANDLE hRIHandle);
+PVRSRV_ERROR RIDeleteMEMDESCEntryKM(RI_HANDLE hRIHandle);
+
+PVRSRV_ERROR RIDeleteListKM(void);
+
+PVRSRV_ERROR RIDumpListKM(PMR *psPMR);
+
+PVRSRV_ERROR RIDumpAllKM(void);
+
+PVRSRV_ERROR RIDumpProcessKM(IMG_PID pid);
+
+#if defined(DEBUG)
+PVRSRV_ERROR RIDumpProcessListKM(PMR *psPMR,
+ IMG_PID pid,
+ IMG_UINT64 ui64Offset,
+ IMG_DEV_VIRTADDR *psDevVAddr);
+#endif
+
+IMG_BOOL RIGetListEntryKM(IMG_PID pid,
+ IMG_HANDLE **ppHandle,
+ IMG_CHAR **ppszEntryString);
+
+IMG_INT32 RITotalAllocProcessKM(IMG_PID pid, PHYS_HEAP_TYPE ePhysHeapType);
+
+#endif /* #ifndef _RI_SERVER_H _*/
diff --git a/drivers/gpu/drm/img-rogue/1.10/ri_typedefs.h b/drivers/gpu/drm/img-rogue/1.10/ri_typedefs.h
new file mode 100644
index 00000000000000..2580b2025099a0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/ri_typedefs.h
@@ -0,0 +1,53 @@
+/*************************************************************************/ /*!
+@File
+@Title Resource Information (RI) Management
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Client side part of RI management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef RI_TYPEDEFS_H
+#define RI_TYPEDEFS_H
+
+#include "img_types.h"
+
+typedef struct RI_SUBLIST_ENTRY RI_ENTRY;
+typedef RI_ENTRY* RI_HANDLE;
+
+#endif /* #ifndef RI_TYPEDEFS_H */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/rogue_trace_events.h b/drivers/gpu/drm/img-rogue/1.10/rogue_trace_events.h
new file mode 100644
index 00000000000000..4975a1e539dee7
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/rogue_trace_events.h
@@ -0,0 +1,455 @@
+/*************************************************************************/ /*!
+@File
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rogue
+
+#if !defined(_ROGUE_TRACE_EVENTS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ROGUE_TRACE_EVENTS_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2); \
+ do_div(t, NSEC_PER_SEC); \
+ t; \
+ })
+
+#define show_usecs_from_ns(ns) \
+ ({ \
+ u64 t = ns + (NSEC_PER_USEC / 2) ; \
+ u32 rem; \
+ do_div(t, NSEC_PER_USEC); \
+ rem = do_div(t, USEC_PER_SEC); \
+ })
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void);
+#else
+void trace_fence_update_enabled_callback(void);
+#endif
+void trace_fence_update_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_update,
+
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ u32 sync_fwaddr, u32 sync_value),
+
+ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( cmd, cmd )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ __field( u32, offset )
+ __field( u32, sync_fwaddr )
+ __field( u32, sync_value )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(cmd, cmd);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ __entry->offset = offset;
+ __entry->sync_fwaddr = sync_fwaddr;
+ __entry->sync_value = sync_value;
+ ),
+
+ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ __get_str(comm),
+ __get_str(cmd),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->offset,
+ (unsigned long)__entry->sync_fwaddr,
+ (unsigned long)__entry->sync_value),
+
+ trace_fence_update_enabled_callback,
+ trace_fence_update_disabled_callback
+);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void);
+#else
+void trace_fence_check_enabled_callback(void);
+#endif
+void trace_fence_check_disabled_callback(void);
+
+TRACE_EVENT_FN(rogue_fence_check,
+
+ TP_PROTO(const char *comm, const char *cmd, const char *dm, u32 ctx_id, u32 offset,
+ u32 sync_fwaddr, u32 sync_value),
+
+ TP_ARGS(comm, cmd, dm, ctx_id, offset, sync_fwaddr, sync_value),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( cmd, cmd )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ __field( u32, offset )
+ __field( u32, sync_fwaddr )
+ __field( u32, sync_value )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(cmd, cmd);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ __entry->offset = offset;
+ __entry->sync_fwaddr = sync_fwaddr;
+ __entry->sync_value = sync_value;
+ ),
+
+ TP_printk("comm=%s cmd=%s dm=%s ctx_id=%lu offset=%lu sync_fwaddr=%#lx sync_value=%#lx",
+ __get_str(comm),
+ __get_str(cmd),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->offset,
+ (unsigned long)__entry->sync_fwaddr,
+ (unsigned long)__entry->sync_value),
+
+ trace_fence_check_enabled_callback,
+ trace_fence_check_disabled_callback
+);
+
+TRACE_EVENT(rogue_create_fw_context,
+
+ TP_PROTO(const char *comm, const char *dm, u32 ctx_id),
+
+ TP_ARGS(comm, dm, ctx_id),
+
+ TP_STRUCT__entry(
+ __string( comm, comm )
+ __string( dm, dm )
+ __field( u32, ctx_id )
+ ),
+
+ TP_fast_assign(
+ __assign_str(comm, comm);
+ __assign_str(dm, dm);
+ __entry->ctx_id = ctx_id;
+ ),
+
+ TP_printk("comm=%s dm=%s ctx_id=%lu",
+ __get_str(comm),
+ __get_str(dm),
+ (unsigned long)__entry->ctx_id)
+);
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void PVRGpuTraceEnableUfoCallback(void);
+void PVRGpuTraceDisableUfoCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableUfoCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableUfoCallbackWrapper \
+ PVRGpuTraceEnableUfoCallback
+#endif
+
+TRACE_EVENT_FN(rogue_ufo_update,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 old_value, u32 new_value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, old_value, new_value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, old_value )
+ __field( u32, new_value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->old_value = old_value;
+ __entry->new_value = new_value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "old_value=%#lx new_value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->old_value,
+ (unsigned long)__entry->new_value),
+ PVRGpuTraceEnableUfoCallbackWrapper,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_fail,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 value, u32 required),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ __field( u32, required )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ __entry->required = required;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "value=%#lx required=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value,
+ (unsigned long)__entry->required),
+ PVRGpuTraceEnableUfoCallbackWrapper,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_fail,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr,
+ u32 value, u32 required),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value, required),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ __field( u32, required )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ __entry->required = required;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx "
+ "value=%#lx required=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value,
+ (unsigned long)__entry->required),
+ PVRGpuTraceEnableUfoCallbackWrapper,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_check_success,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value),
+ PVRGpuTraceEnableUfoCallbackWrapper,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT_FN(rogue_ufo_pr_check_success,
+
+ TP_PROTO(u64 timestamp, u32 ctx_id, u32 job_id, u32 fwaddr, u32 value),
+
+ TP_ARGS(timestamp, ctx_id, job_id, fwaddr, value),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __field( u32, ctx_id )
+ __field( u32, job_id )
+ __field( u32, fwaddr )
+ __field( u32, value )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __entry->ctx_id = ctx_id;
+ __entry->job_id = job_id;
+ __entry->fwaddr = fwaddr;
+ __entry->value = value;
+ ),
+
+ TP_printk("ts=%llu.%06lu ctx_id=%lu job_id=%lu fwaddr=%#lx value=%#lx",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ (unsigned long)__entry->ctx_id,
+ (unsigned long)__entry->job_id,
+ (unsigned long)__entry->fwaddr,
+ (unsigned long)__entry->value),
+ PVRGpuTraceEnableUfoCallbackWrapper,
+ PVRGpuTraceDisableUfoCallback
+);
+
+TRACE_EVENT(rogue_events_lost,
+
+ TP_PROTO(u32 event_source, u32 last_ordinal, u32 curr_ordinal),
+
+ TP_ARGS(event_source, last_ordinal, curr_ordinal),
+
+ TP_STRUCT__entry(
+ __field( u32, event_source )
+ __field( u32, last_ordinal )
+ __field( u32, curr_ordinal )
+ ),
+
+ TP_fast_assign(
+ __entry->event_source = event_source;
+ __entry->last_ordinal = last_ordinal;
+ __entry->curr_ordinal = curr_ordinal;
+ ),
+
+ TP_printk("event_source=%s last_ordinal=%u curr_ordinal=%u",
+ __print_symbolic(__entry->event_source, {0, "GPU"}, {1, "Host"}),
+ __entry->last_ordinal,
+ __entry->curr_ordinal)
+);
+
+void PVRGpuTraceEnableFirmwareActivityCallback(void);
+void PVRGpuTraceDisableFirmwareActivityCallback(void);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void);
+#else
+#define PVRGpuTraceEnableFirmwareActivityCallbackWrapper \
+ PVRGpuTraceEnableFirmwareActivityCallback
+#endif
+
+TRACE_EVENT_FN(rogue_firmware_activity,
+
+ TP_PROTO(u64 timestamp, const char *task, u32 fw_event),
+
+ TP_ARGS(timestamp, task, fw_event),
+
+ TP_STRUCT__entry(
+ __field( u64, timestamp )
+ __string( task, task )
+ __field( u32, fw_event )
+ ),
+
+ TP_fast_assign(
+ __entry->timestamp = timestamp;
+ __assign_str(task, task);
+ __entry->fw_event = fw_event;
+ ),
+
+ TP_printk("ts=%llu.%06lu task=%s event=%s",
+ (unsigned long long)show_secs_from_ns(__entry->timestamp),
+ (unsigned long)show_usecs_from_ns(__entry->timestamp),
+ __get_str(task),
+ __print_symbolic(__entry->fw_event,
+ /* These values are from pvr_gputrace.h. */
+ { 1, "begin" },
+ { 2, "end" })),
+
+ PVRGpuTraceEnableFirmwareActivityCallbackWrapper,
+ PVRGpuTraceDisableFirmwareActivityCallback
+);
+
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _ROGUE_TRACE_EVENTS_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+
+/* This is needed because the name of this file doesn't match TRACE_SYSTEM. */
+#define TRACE_INCLUDE_FILE rogue_trace_events
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_breakpoint_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_breakpoint_bridge.c
new file mode 100644
index 00000000000000..e14d7a82944108
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_breakpoint_bridge.c
@@ -0,0 +1,457 @@
+/*******************************************************************************
+@File
+@Title Server bridge for breakpoint
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for breakpoint
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxbreakpoint.h"
+
+
+#include "common_breakpoint_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETBREAKPOINT *psRGXSetBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXSETBREAKPOINT *psRGXSetBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXSetBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSetBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXSetBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetBreakpoint_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetBreakpointOUT->eError =
+ PVRSRVRGXSetBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ psRGXSetBreakpointIN->eFWDataMaster,
+ psRGXSetBreakpointIN->ui32BreakpointAddr,
+ psRGXSetBreakpointIN->ui32HandlerAddr,
+ psRGXSetBreakpointIN->ui32DM);
+
+
+
+
+RGXSetBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCLEARBREAKPOINT *psRGXClearBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXCLEARBREAKPOINT *psRGXClearBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXClearBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXClearBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXClearBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXClearBreakpoint_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXClearBreakpointOUT->eError =
+ PVRSRVRGXClearBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXClearBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENABLEBREAKPOINT *psRGXEnableBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXENABLEBREAKPOINT *psRGXEnableBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXEnableBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXEnableBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXEnableBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXEnableBreakpoint_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXEnableBreakpointOUT->eError =
+ PVRSRVRGXEnableBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXEnableBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableBreakpoint(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointIN,
+ PVRSRV_BRIDGE_OUT_RGXDISABLEBREAKPOINT *psRGXDisableBreakpointOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXDisableBreakpointIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXDisableBreakpointOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXDisableBreakpointOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXDisableBreakpoint_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXDisableBreakpointOUT->eError =
+ PVRSRVRGXDisableBreakpointKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt);
+
+
+
+
+RGXDisableBreakpoint_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXOverallocateBPRegisters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersIN,
+ PVRSRV_BRIDGE_OUT_RGXOVERALLOCATEBPREGISTERS *psRGXOverallocateBPRegistersOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXOverallocateBPRegistersOUT->eError =
+ PVRSRVRGXOverallocateBPRegistersKM(psConnection, OSGetDevData(psConnection),
+ psRGXOverallocateBPRegistersIN->ui32TempRegs,
+ psRGXOverallocateBPRegistersIN->ui32SharedRegs);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
+
+#if !defined(EXCLUDE_BREAKPOINT_BRIDGE)
+PVRSRV_ERROR InitBREAKPOINTBridge(void);
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void);
+
+/*
+ * Register all BREAKPOINT functions with services
+ */
+PVRSRV_ERROR InitBREAKPOINTBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT, PVRSRVBridgeRGXSetBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT, PVRSRVBridgeRGXClearBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT, PVRSRVBridgeRGXEnableBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT, PVRSRVBridgeRGXDisableBreakpoint,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS, PVRSRVBridgeRGXOverallocateBPRegisters,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all breakpoint functions with services
+ */
+PVRSRV_ERROR DeinitBREAKPOINTBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXSETBREAKPOINT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXCLEARBREAKPOINT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXENABLEBREAKPOINT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXDISABLEBREAKPOINT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_BREAKPOINT, PVRSRV_BRIDGE_BREAKPOINT_RGXOVERALLOCATEBPREGISTERS);
+
+
+
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_BREAKPOINT_BRIDGE */
+/* This bridge is conditional on EXCLUDE_BREAKPOINT_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitBREAKPOINTBridge() \
+ PVRSRV_OK
+
+#define DeinitBREAKPOINTBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_BREAKPOINT_BRIDGE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_cache_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_cache_bridge.c
new file mode 100644
index 00000000000000..9b204231df12ad
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_cache_bridge.c
@@ -0,0 +1,504 @@
+/*******************************************************************************
+@File
+@Title Server bridge for cache
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cache
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "cache_km.h"
+
+
+#include "common_cache_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeCacheOpQueue(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPQUEUE *psCacheOpQueueIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPQUEUE *psCacheOpQueueOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * *psPMRInt = NULL;
+ IMG_HANDLE *hPMRInt2 = NULL;
+ IMG_UINT64 *ui64AddressInt = NULL;
+ IMG_DEVMEM_OFFSET_T *uiOffsetInt = NULL;
+ IMG_DEVMEM_SIZE_T *uiSizeInt = NULL;
+ PVRSRV_CACHE_OP *iuCacheOpInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) +
+ (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psCacheOpQueueIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psCacheOpQueueIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ psPMRInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PMR *);
+ hPMRInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hPMRInt2, (const void __user *) psCacheOpQueueIN->phPMR, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ ui64AddressInt = (IMG_UINT64*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui64AddressInt, (const void __user *) psCacheOpQueueIN->pui64Address, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_UINT64)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiOffsetInt = (IMG_DEVMEM_OFFSET_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiOffsetInt, (const void __user *) psCacheOpQueueIN->puiOffset, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_OFFSET_T)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ uiSizeInt = (IMG_DEVMEM_SIZE_T*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiSizeInt, (const void __user *) psCacheOpQueueIN->puiSize, psCacheOpQueueIN->ui32NumCacheOps * sizeof(IMG_DEVMEM_SIZE_T)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+ if (psCacheOpQueueIN->ui32NumCacheOps != 0)
+ {
+ iuCacheOpInt = (PVRSRV_CACHE_OP*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP);
+ }
+
+ /* Copy the data over */
+ if (psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP) > 0)
+ {
+ if ( OSCopyFromUser(NULL, iuCacheOpInt, (const void __user *) psCacheOpQueueIN->piuCacheOp, psCacheOpQueueIN->ui32NumCacheOps * sizeof(PVRSRV_CACHE_OP)) != PVRSRV_OK )
+ {
+ psCacheOpQueueOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto CacheOpQueue_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+ {
+ /* Look up the address from the handle */
+ psCacheOpQueueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt[i],
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpQueueOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpQueue_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpQueueOUT->eError =
+ CacheOpQueue(
+ psCacheOpQueueIN->ui32NumCacheOps,
+ psPMRInt,
+ ui64AddressInt,
+ uiOffsetInt,
+ uiSizeInt,
+ iuCacheOpInt,
+ psCacheOpQueueIN->ui32OpTimeline,
+ psCacheOpQueueIN->ui32OpInfoPgGFSeqNum,
+ psCacheOpQueueIN->ui32CurrentFenceSeqNum,
+ &psCacheOpQueueOUT->ui32NextFenceSeqNum);
+
+
+
+
+CacheOpQueue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ if (hPMRInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psCacheOpQueueIN->ui32NumCacheOps;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hPMRInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpExec(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPEXEC *psCacheOpExecIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPEXEC *psCacheOpExecOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psCacheOpExecIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psCacheOpExecOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpExecOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpExec_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpExecOUT->eError =
+ CacheOpValExec(
+ psPMRInt,
+ psCacheOpExecIN->ui64Address,
+ psCacheOpExecIN->uiOffset,
+ psCacheOpExecIN->uiSize,
+ psCacheOpExecIN->iuCacheOp);
+
+
+
+
+CacheOpExec_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeCacheOpLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CACHEOPLOG *psCacheOpLogIN,
+ PVRSRV_BRIDGE_OUT_CACHEOPLOG *psCacheOpLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psCacheOpLogIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psCacheOpLogOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psCacheOpLogOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto CacheOpLog_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psCacheOpLogOUT->eError =
+ CacheOpLog(
+ psPMRInt,
+ psCacheOpLogIN->ui64Address,
+ psCacheOpLogIN->uiOffset,
+ psCacheOpLogIN->uiSize,
+ psCacheOpLogIN->i64QueuedTimeUs,
+ psCacheOpLogIN->i64ExecuteTimeUs,
+ psCacheOpLogIN->i32NumRBF,
+ psCacheOpLogIN->bIsDiscard,
+ psCacheOpLogIN->iuCacheOp);
+
+
+
+
+CacheOpLog_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitCACHEBridge(void);
+PVRSRV_ERROR DeinitCACHEBridge(void);
+
+/*
+ * Register all CACHE functions with services
+ */
+PVRSRV_ERROR InitCACHEBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE, PVRSRVBridgeCacheOpQueue,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC, PVRSRVBridgeCacheOpExec,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG, PVRSRVBridgeCacheOpLog,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cache functions with services
+ */
+PVRSRV_ERROR DeinitCACHEBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPQUEUE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPEXEC);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CACHE, PVRSRV_BRIDGE_CACHE_CACHEOPLOG);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_cmm_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_cmm_bridge.c
new file mode 100644
index 00000000000000..ee680748b97781
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_cmm_bridge.c
@@ -0,0 +1,478 @@
+/*******************************************************************************
+@File
+@Title Server bridge for cmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for cmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pmr.h"
+#include "devicemem_server.h"
+
+
+#include "common_cmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevmemIntExportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTEXPORTCTX *psDevmemIntExportCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hContext = psDevmemIntExportCtxIN->hContext;
+ DEVMEMINT_CTX * psContextInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntExportCtxIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_CTX_EXPORT * psContextExportInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psContextInt,
+ hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntExportCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntExportCtxOUT->eError =
+ DevmemIntExportCtx(
+ psContextInt,
+ psPMRInt,
+ &psContextExportInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntExportCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntExportCtxOUT->hContextExport,
+ (void *) psContextExportInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnexportCtx);
+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntExportCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntExportCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntExportCtxOUT->eError != PVRSRV_OK)
+ {
+ if (psContextExportInt)
+ {
+ DevmemIntUnexportCtx(psContextExportInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnexportCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNEXPORTCTX *psDevmemIntUnexportCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnexportCtxOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnexportCtxIN->hContextExport,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX_EXPORT);
+ if ((psDevmemIntUnexportCtxOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnexportCtxOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnexportCtx: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnexportCtxOUT->eError)));
+ UnlockHandle();
+ goto DevmemIntUnexportCtx_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnexportCtx_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntAcquireRemoteCtx(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTACQUIREREMOTECTX *psDevmemIntAcquireRemoteCtxOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntAcquireRemoteCtxIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_CTX * psContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->hContext = NULL;
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntAcquireRemoteCtxOUT->eError =
+ DevmemIntAcquireRemoteCtx(
+ psPMRInt,
+ &psContextInt,
+ &hPrivDataInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntAcquireRemoteCtxOUT->hContext,
+ (void *) psContextInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+
+
+
+
+
+ psDevmemIntAcquireRemoteCtxOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntAcquireRemoteCtxOUT->hPrivData,
+ (void *) hPrivDataInt,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psDevmemIntAcquireRemoteCtxOUT->hContext);
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntAcquireRemoteCtx_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntAcquireRemoteCtx_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntAcquireRemoteCtxOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psDevmemIntAcquireRemoteCtxOUT->hContext)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntAcquireRemoteCtxOUT->hContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntAcquireRemoteCtx: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psContextInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psContextInt)
+ {
+ DevmemIntCtxDestroy(psContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_CMM_BRIDGE */
+
+#if !defined(EXCLUDE_CMM_BRIDGE)
+PVRSRV_ERROR InitCMMBridge(void);
+PVRSRV_ERROR DeinitCMMBridge(void);
+
+/*
+ * Register all CMM functions with services
+ */
+PVRSRV_ERROR InitCMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX, PVRSRVBridgeDevmemIntExportCtx,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX, PVRSRVBridgeDevmemIntUnexportCtx,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX, PVRSRVBridgeDevmemIntAcquireRemoteCtx,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all cmm functions with services
+ */
+PVRSRV_ERROR DeinitCMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTEXPORTCTX);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTUNEXPORTCTX);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_CMM, PVRSRV_BRIDGE_CMM_DEVMEMINTACQUIREREMOTECTX);
+
+
+
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_CMM_BRIDGE */
+/* This bridge is conditional on EXCLUDE_CMM_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitCMMBridge() \
+ PVRSRV_OK
+
+#define DeinitCMMBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_CMM_BRIDGE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_debugmisc_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_debugmisc_bridge.c
new file mode 100644
index 00000000000000..e050a5f2238811
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_debugmisc_bridge.c
@@ -0,0 +1,314 @@
+/*******************************************************************************
+@File
+@Title Server bridge for debugmisc
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for debugmisc
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "debugmisc_server.h"
+#include "pmr.h"
+
+
+#include "common_debugmisc_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDebugMiscSLCSetBypassState(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateIN,
+ PVRSRV_BRIDGE_OUT_DEBUGMISCSLCSETBYPASSSTATE *psDebugMiscSLCSetBypassStateOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psDebugMiscSLCSetBypassStateOUT->eError =
+ PVRSRVDebugMiscSLCSetBypassStateKM(psConnection, OSGetDevData(psConnection),
+ psDebugMiscSLCSetBypassStateIN->ui32Flags,
+ psDebugMiscSLCSetBypassStateIN->bIsBypassed);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetFWLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETFWLOG *psRGXDebugMiscSetFWLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetFWLogOUT->eError =
+ PVRSRVRGXDebugMiscSetFWLogKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetFWLogIN->ui32RGXFWLogType);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscDumpFreelistPageList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCDUMPFREELISTPAGELIST *psRGXDebugMiscDumpFreelistPageListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXDebugMiscDumpFreelistPageListIN);
+
+
+
+
+
+ psRGXDebugMiscDumpFreelistPageListOUT->eError =
+ PVRSRVRGXDebugMiscDumpFreelistPageListKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetHCSDeadline(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETHCSDEADLINE *psRGXDebugMiscSetHCSDeadlineOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetHCSDeadlineOUT->eError =
+ PVRSRVRGXDebugMiscSetHCSDeadlineKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetHCSDeadlineIN->ui32RGXHCSDeadline);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSidPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSIDPRIORITY *psRGXDebugMiscSetOSidPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetOSidPriorityOUT->eError =
+ PVRSRVRGXDebugMiscSetOSidPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetOSidPriorityIN->ui32OSid,
+ psRGXDebugMiscSetOSidPriorityIN->ui32Priority);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateIN,
+ PVRSRV_BRIDGE_OUT_RGXDEBUGMISCSETOSNEWONLINESTATE *psRGXDebugMiscSetOSNewOnlineStateOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXDebugMiscSetOSNewOnlineStateOUT->eError =
+ PVRSRVRGXDebugMiscSetOSNewOnlineStateKM(psConnection, OSGetDevData(psConnection),
+ psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSid,
+ psRGXDebugMiscSetOSNewOnlineStateIN->ui32OSNewState);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDEBUGMISCBridge(void);
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void);
+
+/*
+ * Register all DEBUGMISC functions with services
+ */
+PVRSRV_ERROR InitDEBUGMISCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE, PVRSRVBridgeDebugMiscSLCSetBypassState,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG, PVRSRVBridgeRGXDebugMiscSetFWLog,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST, PVRSRVBridgeRGXDebugMiscDumpFreelistPageList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE, PVRSRVBridgeRGXDebugMiscSetHCSDeadline,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY, PVRSRVBridgeRGXDebugMiscSetOSidPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE, PVRSRVBridgeRGXDebugMiscSetOSNewOnlineState,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all debugmisc functions with services
+ */
+PVRSRV_ERROR DeinitDEBUGMISCBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_DEBUGMISCSLCSETBYPASSSTATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETFWLOG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCDUMPFREELISTPAGELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETHCSDEADLINE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSIDPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEBUGMISC, PVRSRV_BRIDGE_DEBUGMISC_RGXDEBUGMISCSETOSNEWONLINESTATE);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_devicememhistory_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_devicememhistory_bridge.c
new file mode 100644
index 00000000000000..6a004c01a4e51c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_devicememhistory_bridge.c
@@ -0,0 +1,814 @@
+/*******************************************************************************
+@File
+@Title Server bridge for devicememhistory
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for devicememhistory
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_history_server.h"
+
+
+#include "common_devicememhistory_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAP *psDevicememHistoryMapIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAP *psDevicememHistoryMapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistoryMapIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, (const void __user *) psDevicememHistoryMapIN->puiText, DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryMapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMap_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevicememHistoryMapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistoryMapOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistoryMap_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistoryMapOUT->eError =
+ DevicememHistoryMapKM(
+ psPMRInt,
+ psDevicememHistoryMapIN->uiOffset,
+ psDevicememHistoryMapIN->sDevVAddr,
+ psDevicememHistoryMapIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryMapIN->ui32Log2PageSize,
+ psDevicememHistoryMapIN->ui32AllocationIndex,
+ &psDevicememHistoryMapOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAP *psDevicememHistoryUnmapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistoryUnmapIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, (const void __user *) psDevicememHistoryUnmapIN->puiText, DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryUnmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmap_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevicememHistoryUnmapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistoryUnmapOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistoryUnmap_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistoryUnmapOUT->eError =
+ DevicememHistoryUnmapKM(
+ psPMRInt,
+ psDevicememHistoryUnmapIN->uiOffset,
+ psDevicememHistoryUnmapIN->sDevVAddr,
+ psDevicememHistoryUnmapIN->uiSize,
+ uiTextInt,
+ psDevicememHistoryUnmapIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryMapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYMAPVRANGE *psDevicememHistoryMapVRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryMapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryMapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryMapVRange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, (const void __user *) psDevicememHistoryMapVRangeIN->puiText, DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryMapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryMapVRange_exit;
+ }
+ }
+
+
+ psDevicememHistoryMapVRangeOUT->eError =
+ DevicememHistoryMapVRangeKM(
+ psDevicememHistoryMapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryMapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryMapVRangeIN->ui32NumPages,
+ psDevicememHistoryMapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryMapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryMapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryMapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryMapVRange_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistoryUnmapVRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYUNMAPVRANGE *psDevicememHistoryUnmapVRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistoryUnmapVRangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistoryUnmapVRangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, (const void __user *) psDevicememHistoryUnmapVRangeIN->puiText, DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistoryUnmapVRangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistoryUnmapVRange_exit;
+ }
+ }
+
+
+ psDevicememHistoryUnmapVRangeOUT->eError =
+ DevicememHistoryUnmapVRangeKM(
+ psDevicememHistoryUnmapVRangeIN->sBaseDevVAddr,
+ psDevicememHistoryUnmapVRangeIN->ui32ui32StartPage,
+ psDevicememHistoryUnmapVRangeIN->ui32NumPages,
+ psDevicememHistoryUnmapVRangeIN->uiAllocSize,
+ uiTextInt,
+ psDevicememHistoryUnmapVRangeIN->ui32Log2PageSize,
+ psDevicememHistoryUnmapVRangeIN->ui32AllocationIndex,
+ &psDevicememHistoryUnmapVRangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistoryUnmapVRange_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevicememHistorySparseChange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeIN,
+ PVRSRV_BRIDGE_OUT_DEVICEMEMHISTORYSPARSECHANGE *psDevicememHistorySparseChangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevicememHistorySparseChangeIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiTextInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) +
+ (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+ (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevicememHistorySparseChangeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevicememHistorySparseChangeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ }
+
+
+ {
+ uiTextInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextInt, (const void __user *) psDevicememHistorySparseChangeIN->puiText, DEVMEM_ANNOTATION_MAX_LEN * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, (const void __user *) psDevicememHistorySparseChangeIN->pui32AllocPageIndices, psDevicememHistorySparseChangeIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, (const void __user *) psDevicememHistorySparseChangeIN->pui32FreePageIndices, psDevicememHistorySparseChangeIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psDevicememHistorySparseChangeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevicememHistorySparseChange_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevicememHistorySparseChangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevicememHistorySparseChangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevicememHistorySparseChange_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevicememHistorySparseChangeOUT->eError =
+ DevicememHistorySparseChangeKM(
+ psPMRInt,
+ psDevicememHistorySparseChangeIN->uiOffset,
+ psDevicememHistorySparseChangeIN->sDevVAddr,
+ psDevicememHistorySparseChangeIN->uiSize,
+ uiTextInt,
+ psDevicememHistorySparseChangeIN->ui32Log2PageSize,
+ psDevicememHistorySparseChangeIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psDevicememHistorySparseChangeIN->ui32AllocationIndex,
+ &psDevicememHistorySparseChangeOUT->ui32AllocationIndexOut);
+
+
+
+
+DevicememHistorySparseChange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pDEVICEMEMHISTORYBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+
+#if defined(SUPPORT_DEVICEMEMHISTORY_BRIDGE)
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void);
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void);
+
+/*
+ * Register all DEVICEMEMHISTORY functions with services
+ */
+PVRSRV_ERROR InitDEVICEMEMHISTORYBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pDEVICEMEMHISTORYBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP, PVRSRVBridgeDevicememHistoryMap,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP, PVRSRVBridgeDevicememHistoryUnmap,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE, PVRSRVBridgeDevicememHistoryMapVRange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE, PVRSRVBridgeDevicememHistoryUnmapVRange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE, PVRSRVBridgeDevicememHistorySparseChange,
+ pDEVICEMEMHISTORYBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all devicememhistory functions with services
+ */
+PVRSRV_ERROR DeinitDEVICEMEMHISTORYBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pDEVICEMEMHISTORYBridgeLock), "OSLockDestroy");
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYMAPVRANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYUNMAPVRANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DEVICEMEMHISTORY, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DEVICEMEMHISTORYSPARSECHANGE);
+
+
+
+ return PVRSRV_OK;
+}
+#else /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
+/* This bridge is conditional on SUPPORT_DEVICEMEMHISTORY_BRIDGE - when not defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitDEVICEMEMHISTORYBridge() \
+ PVRSRV_OK
+
+#define DeinitDEVICEMEMHISTORYBridge() \
+ PVRSRV_OK
+
+#endif /* SUPPORT_DEVICEMEMHISTORY_BRIDGE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_dmabuf_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_dmabuf_bridge.c
new file mode 100644
index 00000000000000..182b07b5dfe622
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_dmabuf_bridge.c
@@ -0,0 +1,485 @@
+/*******************************************************************************
+@File
+@Title Server bridge for dmabuf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for dmabuf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "physmem_dmabuf.h"
+#include "pmr.h"
+
+
+#include "common_dmabuf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePhysmemImportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTDMABUF *psPhysmemImportDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNameInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportDmaBufIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemImportDmaBufIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportDmaBuf_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportDmaBufIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, (const void __user *) psPhysmemImportDmaBufIN->puiName, psPhysmemImportDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemImportDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportDmaBuf_exit;
+ }
+ }
+
+
+ psPhysmemImportDmaBufOUT->eError =
+ PhysmemImportDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportDmaBufIN->ifd,
+ psPhysmemImportDmaBufIN->uiFlags,
+ psPhysmemImportDmaBufIN->ui32NameSize,
+ uiNameInt,
+ &psPMRPtrInt,
+ &psPhysmemImportDmaBufOUT->uiSize,
+ &psPhysmemImportDmaBufOUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportDmaBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportDmaBuf_exit:
+
+
+
+ if (psPhysmemImportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemExportDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMEXPORTDMABUF *psPhysmemExportDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPhysmemExportDmaBufIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPhysmemExportDmaBufOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPhysmemExportDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemExportDmaBuf_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPhysmemExportDmaBufOUT->eError =
+ PhysmemExportDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPMRInt,
+ &psPhysmemExportDmaBufOUT->iFd);
+
+
+
+
+PhysmemExportDmaBuf_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSparseDmaBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSPARSEDMABUF *psPhysmemImportSparseDmaBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiNameInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemImportSparseDmaBufIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemImportSparseDmaBufIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+ }
+
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, (const void __user *) psPhysmemImportSparseDmaBufIN->pui32MappingTable, psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, (const void __user *) psPhysmemImportSparseDmaBufIN->puiName, psPhysmemImportSparseDmaBufIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+ }
+
+
+ psPhysmemImportSparseDmaBufOUT->eError =
+ PhysmemImportSparseDmaBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportSparseDmaBufIN->ifd,
+ psPhysmemImportSparseDmaBufIN->uiFlags,
+ psPhysmemImportSparseDmaBufIN->uiChunkSize,
+ psPhysmemImportSparseDmaBufIN->ui32NumPhysChunks,
+ psPhysmemImportSparseDmaBufIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemImportSparseDmaBufIN->ui32NameSize,
+ uiNameInt,
+ &psPMRPtrInt,
+ &psPhysmemImportSparseDmaBufOUT->uiSize,
+ &psPhysmemImportSparseDmaBufOUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportSparseDmaBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportSparseDmaBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportSparseDmaBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportSparseDmaBuf_exit:
+
+
+
+ if (psPhysmemImportSparseDmaBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitDMABUFBridge(void);
+PVRSRV_ERROR DeinitDMABUFBridge(void);
+
+/*
+ * Register all DMABUF functions with services
+ */
+PVRSRV_ERROR InitDMABUFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF, PVRSRVBridgePhysmemImportDmaBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF, PVRSRVBridgePhysmemExportDmaBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF, PVRSRVBridgePhysmemImportSparseDmaBuf,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all dmabuf functions with services
+ */
+PVRSRV_ERROR DeinitDMABUFBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTDMABUF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMEXPORTDMABUF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_DMABUF, PVRSRV_BRIDGE_DMABUF_PHYSMEMIMPORTSPARSEDMABUF);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_htbuffer_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_htbuffer_bridge.c
new file mode 100644
index 00000000000000..622014b249972d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_htbuffer_bridge.c
@@ -0,0 +1,437 @@
+/*******************************************************************************
+@File
+@Title Server bridge for htbuffer
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for htbuffer
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "htbserver.h"
+
+
+#include "common_htbuffer_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+#if defined(PVR_NEVER_USED)
+static IMG_INT
+PVRSRVBridgeHTBConfigure(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBCONFIGURE *psHTBConfigureIN,
+ PVRSRV_BRIDGE_OUT_HTBCONFIGURE *psHTBConfigureOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBConfigureIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBConfigureIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBConfigureOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBConfigure_exit;
+ }
+ }
+ }
+
+ if (psHTBConfigureIN->ui32NameSize != 0)
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, (const void __user *) psHTBConfigureIN->puiName, psHTBConfigureIN->ui32NameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psHTBConfigureOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBConfigure_exit;
+ }
+ }
+
+
+ psHTBConfigureOUT->eError =
+ PVRSRVHTBConfigureKM(
+ psHTBConfigureIN->ui32NameSize,
+ uiNameInt,
+ psHTBConfigureIN->ui32BufferSize);
+
+
+
+
+HTBConfigure_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeHTBConfigure NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeHTBControl(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBCONTROL *psHTBControlIN,
+ PVRSRV_BRIDGE_OUT_HTBCONTROL *psHTBControlOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32GroupEnableInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBControlIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBControlIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBControl_exit;
+ }
+ }
+ }
+
+ if (psHTBControlIN->ui32NumGroups != 0)
+ {
+ ui32GroupEnableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32GroupEnableInt, (const void __user *) psHTBControlIN->pui32GroupEnable, psHTBControlIN->ui32NumGroups * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psHTBControlOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBControl_exit;
+ }
+ }
+
+
+ psHTBControlOUT->eError =
+ HTBControlKM(
+ psHTBControlIN->ui32NumGroups,
+ ui32GroupEnableInt,
+ psHTBControlIN->ui32LogLevel,
+ psHTBControlIN->ui32EnablePID,
+ psHTBControlIN->ui32LogMode,
+ psHTBControlIN->ui32OpMode);
+
+
+
+
+HTBControl_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHTBLog(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HTBLOG *psHTBLogIN,
+ PVRSRV_BRIDGE_OUT_HTBLOG *psHTBLogOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32ArgsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHTBLogIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHTBLogIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHTBLogOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HTBLog_exit;
+ }
+ }
+ }
+
+ if (psHTBLogIN->ui32NumArgs != 0)
+ {
+ ui32ArgsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ArgsInt, (const void __user *) psHTBLogIN->pui32Args, psHTBLogIN->ui32NumArgs * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psHTBLogOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HTBLog_exit;
+ }
+ }
+
+
+ psHTBLogOUT->eError =
+ HTBLogKM(
+ psHTBLogIN->ui32PID,
+ psHTBLogIN->ui32TimeStamp,
+ psHTBLogIN->ui32SF,
+ psHTBLogIN->ui32NumArgs,
+ ui32ArgsInt);
+
+
+
+
+HTBLog_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pHTBUFFERBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
+
+#if !defined(EXCLUDE_HTBUFFER_BRIDGE)
+PVRSRV_ERROR InitHTBUFFERBridge(void);
+PVRSRV_ERROR DeinitHTBUFFERBridge(void);
+
+/*
+ * Register all HTBUFFER functions with services
+ */
+PVRSRV_ERROR InitHTBUFFERBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pHTBUFFERBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE, PVRSRVBridgeHTBConfigure,
+ pHTBUFFERBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL, PVRSRVBridgeHTBControl,
+ pHTBUFFERBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG, PVRSRVBridgeHTBLog,
+ pHTBUFFERBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all htbuffer functions with services
+ */
+PVRSRV_ERROR DeinitHTBUFFERBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pHTBUFFERBridgeLock), "OSLockDestroy");
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONFIGURE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBCONTROL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_HTBUFFER, PVRSRV_BRIDGE_HTBUFFER_HTBLOG);
+
+
+
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_HTBUFFER_BRIDGE */
+/* This bridge is conditional on EXCLUDE_HTBUFFER_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitHTBUFFERBridge() \
+ PVRSRV_OK
+
+#define DeinitHTBUFFERBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_HTBUFFER_BRIDGE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_mm_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_mm_bridge.c
new file mode 100644
index 00000000000000..d84eb2174a79fd
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_mm_bridge.c
@@ -0,0 +1,3435 @@
+/*******************************************************************************
+@File
+@Title Server bridge for mm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for mm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem.h"
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "devicemem_heapcfg.h"
+#include "physmem.h"
+#include "physmem_tdsecbuf.h"
+
+
+#include "common_mm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+static PVRSRV_ERROR ReleasePMRExport(void *pvData)
+{
+ PVR_UNREFERENCED_PARAMETER(pvData);
+
+ return PVRSRV_OK;
+}
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRExportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMREXPORTPMR *psPMRExportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMREXPORTPMR *psPMRExportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRExportPMRIN->hPMR;
+ PMR * psPMRInt = NULL;
+ PMR_EXPORT * psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRExportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRExportPMROUT->eError =
+ PMRExportPMR(
+ psPMRInt,
+ &psPMRExportInt,
+ &psPMRExportPMROUT->ui64Size,
+ &psPMRExportPMROUT->ui32Log2Contig,
+ &psPMRExportPMROUT->ui64Password);
+ /* Exit early if bridged call fails */
+ if(psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRExportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+ /*
+ * For cases where we need a cross process handle we actually allocate two.
+ *
+ * The first one is a connection specific handle and it gets given the real
+ * release function. This handle does *NOT* get returned to the caller. It's
+ * purpose is to release any leaked resources when we either have a bad or
+ * abnormally terminated client. If we didn't do this then the resource
+ * wouldn't be freed until driver unload. If the resource is freed normally,
+ * this handle can be looked up via the cross process handle and then
+ * released accordingly.
+ *
+ * The second one is a cross process handle and it gets given a noop release
+ * function. This handle does get returned to the caller.
+ */
+
+
+
+
+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+ &hPMRExportInt,
+ (void *) psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnexportPMR);
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+
+ psPMRExportPMROUT->eError = PVRSRVAllocHandleUnlocked(KERNEL_HANDLE_BASE,
+ &psPMRExportPMROUT->hPMRExport,
+ (void *) psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI,
+ (PFN_HANDLE_RELEASE)&ReleasePMRExport);
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRExportPMR_exit;
+ }
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRExportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRExportPMROUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psPMRExportPMROUT->hPMRExport)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRExportPMROUT->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRExportPMR: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ }
+
+ if (hPMRExportInt)
+ {
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRExportPMR: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psPMRExportInt = NULL;
+ }
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psPMRExportInt)
+ {
+ PMRUnexportPMR(psPMRExportInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnexportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNEXPORTPMR *psPMRUnexportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNEXPORTPMR *psPMRUnexportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR_EXPORT * psPMRExportInt = NULL;
+ IMG_HANDLE hPMRExportInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **) &psPMRExportInt,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ IMG_FALSE);
+ if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ /*
+ * Find the connection specific handle that represents the same data
+ * as the cross process handle as releasing it will actually call the
+ * data's real release function (see the function where the cross
+ * process handle is allocated for more details).
+ */
+ psPMRUnexportPMROUT->eError =
+ PVRSRVFindHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ &hPMRExportInt,
+ psPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if (psPMRUnexportPMROUT->eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT(psPMRUnexportPMROUT->eError == PVRSRV_OK);
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hPMRExportInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ }
+ PVR_ASSERT((psPMRUnexportPMROUT->eError == PVRSRV_OK) ||
+ (psPMRUnexportPMROUT->eError == PVRSRV_ERROR_RETRY));
+
+
+
+
+
+ psPMRUnexportPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ (IMG_HANDLE) psPMRUnexportPMRIN->hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ if ((psPMRUnexportPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnexportPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnexportPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnexportPMROUT->eError)));
+ UnlockHandle();
+ goto PMRUnexportPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnexportPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRGetUID(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRGETUID *psPMRGetUIDIN,
+ PVRSRV_BRIDGE_OUT_PMRGETUID *psPMRGetUIDOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRGetUIDIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRGetUIDOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRGetUIDOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRGetUID_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRGetUIDOUT->eError =
+ PMRGetUID(
+ psPMRInt,
+ &psPMRGetUIDOUT->ui64UID);
+
+
+
+
+PMRGetUID_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRMakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleIN,
+ PVRSRV_BRIDGE_OUT_PMRMAKELOCALIMPORTHANDLE *psPMRMakeLocalImportHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hBuffer = psPMRMakeLocalImportHandleIN->hBuffer;
+ PMR * psBufferInt = NULL;
+ PMR * psExtMemInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRMakeLocalImportHandleOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psBufferInt,
+ hBuffer,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ IMG_TRUE);
+ if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRMakeLocalImportHandle_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRMakeLocalImportHandleOUT->eError =
+ PMRMakeLocalImportHandle(
+ psBufferInt,
+ &psExtMemInt);
+ /* Exit early if bridged call fails */
+ if(psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRMakeLocalImportHandleOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+ &psPMRMakeLocalImportHandleOUT->hExtMem,
+ (void *) psExtMemInt,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnmakeLocalImportHandle);
+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRMakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRMakeLocalImportHandle_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hBuffer,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRMakeLocalImportHandleOUT->eError != PVRSRV_OK)
+ {
+ if (psExtMemInt)
+ {
+ PMRUnmakeLocalImportHandle(psExtMemInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnmakeLocalImportHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleIN,
+ PVRSRV_BRIDGE_OUT_PMRUNMAKELOCALIMPORTHANDLE *psPMRUnmakeLocalImportHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnmakeLocalImportHandleOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psPMRUnmakeLocalImportHandleIN->hExtMem,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ if ((psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_OK) &&
+ (psPMRUnmakeLocalImportHandleOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnmakeLocalImportHandle: %s",
+ PVRSRVGetErrorStringKM(psPMRUnmakeLocalImportHandleOUT->eError)));
+ UnlockHandle();
+ goto PMRUnmakeLocalImportHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnmakeLocalImportHandle_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRIMPORTPMR *psPMRImportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRIMPORTPMR *psPMRImportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRExport = psPMRImportPMRIN->hPMRExport;
+ PMR_EXPORT * psPMRExportInt = NULL;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(KERNEL_HANDLE_BASE,
+ (void **) &psPMRExportInt,
+ hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT,
+ IMG_TRUE);
+ if(psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRImportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRImportPMROUT->eError =
+ PhysmemImportPMR(psConnection, OSGetDevData(psConnection),
+ psPMRExportInt,
+ psPMRImportPMRIN->ui64uiPassword,
+ psPMRImportPMRIN->ui64uiSize,
+ psPMRImportPMRIN->ui32uiLog2Contig,
+ &psPMRInt);
+ /* Exit early if bridged call fails */
+ if(psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPMRImportPMROUT->hPMR,
+ (void *) psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRExportInt)
+ {
+ PVRSRVReleaseHandleUnlocked(KERNEL_HANDLE_BASE,
+ hPMRExport,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR_EXPORT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PMRUnrefPMR(psPMRInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRLocalImportPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRLOCALIMPORTPMR *psPMRLocalImportPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRLOCALIMPORTPMR *psPMRLocalImportPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hExtHandle = psPMRLocalImportPMRIN->hExtHandle;
+ PMR * psExtHandleInt = NULL;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRLocalImportPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (void **) &psExtHandleInt,
+ hExtHandle,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ IMG_TRUE);
+ if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRLocalImportPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRLocalImportPMROUT->eError =
+ PMRLocalImportPMR(
+ psExtHandleInt,
+ &psPMRInt,
+ &psPMRLocalImportPMROUT->uiSize,
+ &psPMRLocalImportPMROUT->sAlign);
+ /* Exit early if bridged call fails */
+ if(psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPMRLocalImportPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPMRLocalImportPMROUT->hPMR,
+ (void *) psPMRInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRLocalImportPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PMRLocalImportPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psExtHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ hExtHandle,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psPMRLocalImportPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PMRUnrefPMR(psPMRInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNREFPMR *psPMRUnrefPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNREFPMR *psPMRUnrefPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnrefPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if ((psPMRUnrefPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnrefPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnrefPMROUT->eError)));
+ UnlockHandle();
+ goto PMRUnrefPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnrefPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRUnrefUnlockPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMRIN,
+ PVRSRV_BRIDGE_OUT_PMRUNREFUNLOCKPMR *psPMRUnrefUnlockPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psPMRUnrefUnlockPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psPMRUnrefUnlockPMRIN->hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ if ((psPMRUnrefUnlockPMROUT->eError != PVRSRV_OK) &&
+ (psPMRUnrefUnlockPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgePMRUnrefUnlockPMR: %s",
+ PVRSRVGetErrorStringKM(psPMRUnrefUnlockPMROUT->eError)));
+ UnlockHandle();
+ goto PMRUnrefUnlockPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+PMRUnrefUnlockPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMRIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDPMR *psPhysmemNewRamBackedPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiAnnotationInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) +
+ (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedPMRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedPMRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ }
+
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, (const void __user *) psPhysmemNewRamBackedPMRIN->pui32MappingTable, psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength != 0)
+ {
+ uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiAnnotationInt, (const void __user *) psPhysmemNewRamBackedPMRIN->puiAnnotation, psPhysmemNewRamBackedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+ }
+
+
+ psPhysmemNewRamBackedPMROUT->eError =
+ PhysmemNewRamBackedPMR(psConnection, OSGetDevData(psConnection),
+ psPhysmemNewRamBackedPMRIN->uiSize,
+ psPhysmemNewRamBackedPMRIN->uiChunkSize,
+ psPhysmemNewRamBackedPMRIN->ui32NumPhysChunks,
+ psPhysmemNewRamBackedPMRIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemNewRamBackedPMRIN->ui32Log2PageSize,
+ psPhysmemNewRamBackedPMRIN->uiFlags,
+ psPhysmemNewRamBackedPMRIN->ui32AnnotationLength,
+ uiAnnotationInt,
+ psPhysmemNewRamBackedPMRIN->ui32PID,
+ &psPMRPtrInt);
+ /* Exit early if bridged call fails */
+ if(psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemNewRamBackedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemNewRamBackedPMROUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemNewRamBackedPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemNewRamBackedPMR_exit:
+
+
+
+ if (psPhysmemNewRamBackedPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemNewRamBackedLockedPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMRIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMNEWRAMBACKEDLOCKEDPMR *psPhysmemNewRamBackedLockedPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32MappingTableInt = NULL;
+ IMG_CHAR *uiAnnotationInt = NULL;
+ PMR * psPMRPtrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) +
+ (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPhysmemNewRamBackedLockedPMRIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPhysmemNewRamBackedLockedPMRIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+ }
+
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks != 0)
+ {
+ ui32MappingTableInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32MappingTableInt, (const void __user *) psPhysmemNewRamBackedLockedPMRIN->pui32MappingTable, psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength != 0)
+ {
+ uiAnnotationInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiAnnotationInt, (const void __user *) psPhysmemNewRamBackedLockedPMRIN->puiAnnotation, psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+ }
+
+
+ psPhysmemNewRamBackedLockedPMROUT->eError =
+ PhysmemNewRamBackedLockedPMR(psConnection, OSGetDevData(psConnection),
+ psPhysmemNewRamBackedLockedPMRIN->uiSize,
+ psPhysmemNewRamBackedLockedPMRIN->uiChunkSize,
+ psPhysmemNewRamBackedLockedPMRIN->ui32NumPhysChunks,
+ psPhysmemNewRamBackedLockedPMRIN->ui32NumVirtChunks,
+ ui32MappingTableInt,
+ psPhysmemNewRamBackedLockedPMRIN->ui32Log2PageSize,
+ psPhysmemNewRamBackedLockedPMRIN->uiFlags,
+ psPhysmemNewRamBackedLockedPMRIN->ui32AnnotationLength,
+ uiAnnotationInt,
+ psPhysmemNewRamBackedLockedPMRIN->ui32PID,
+ &psPMRPtrInt);
+ /* Exit early if bridged call fails */
+ if(psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemNewRamBackedLockedPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemNewRamBackedLockedPMROUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefUnlockPMR);
+ if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemNewRamBackedLockedPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemNewRamBackedLockedPMR_exit:
+
+
+
+ if (psPhysmemNewRamBackedLockedPMROUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefUnlockPMR(psPMRPtrInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePhysmemImportSecBuf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufIN,
+ PVRSRV_BRIDGE_OUT_PHYSMEMIMPORTSECBUF *psPhysmemImportSecBufOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * psPMRPtrInt = NULL;
+
+
+
+
+
+
+
+
+ psPhysmemImportSecBufOUT->eError =
+ PhysmemImportSecBuf(psConnection, OSGetDevData(psConnection),
+ psPhysmemImportSecBufIN->uiSize,
+ psPhysmemImportSecBufIN->ui32Log2Align,
+ psPhysmemImportSecBufIN->uiFlags,
+ &psPMRPtrInt,
+ &psPhysmemImportSecBufOUT->ui64SecBufHandle);
+ /* Exit early if bridged call fails */
+ if(psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ goto PhysmemImportSecBuf_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psPhysmemImportSecBufOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psPhysmemImportSecBufOUT->hPMRPtr,
+ (void *) psPMRPtrInt,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PMRUnrefPMR);
+ if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PhysmemImportSecBuf_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+PhysmemImportSecBuf_exit:
+
+
+
+ if (psPhysmemImportSecBufOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRPtrInt)
+ {
+ PMRUnrefPMR(psPMRPtrInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPin(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPIN *psDevmemIntPinIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPIN *psDevmemIntPinOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntPinIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntPinOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntPinOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPin_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPinOUT->eError =
+ DevmemIntPin(
+ psPMRInt);
+
+
+
+
+DevmemIntPin_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpin(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNPIN *psDevmemIntUnpinIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPIN *psDevmemIntUnpinOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psDevmemIntUnpinIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntUnpinOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntUnpinOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpin_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnpinOUT->eError =
+ DevmemIntUnpin(
+ psPMRInt);
+
+
+
+
+DevmemIntUnpin_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPinValidate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPINVALIDATE *psDevmemIntPinValidateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hMapping = psDevmemIntPinValidateIN->hMapping;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntPinValidateIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntPinValidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psMappingInt,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ IMG_TRUE);
+ if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPinValidate_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntPinValidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntPinValidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPinValidate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPinValidateOUT->eError =
+ DevmemIntPinValidate(
+ psMappingInt,
+ psPMRInt);
+
+
+
+
+DevmemIntPinValidate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psMappingInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnpinInvalidate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNPININVALIDATE *psDevmemIntUnpinInvalidateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hMapping = psDevmemIntUnpinInvalidateIN->hMapping;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntUnpinInvalidateIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntUnpinInvalidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psMappingInt,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ IMG_TRUE);
+ if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpinInvalidate_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntUnpinInvalidateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntUnpinInvalidateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnpinInvalidate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnpinInvalidateOUT->eError =
+ DevmemIntUnpinInvalidate(
+ psMappingInt,
+ psPMRInt);
+
+
+
+
+DevmemIntUnpinInvalidate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psMappingInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXCREATE *psDevmemIntCtxCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ DEVMEMINT_CTX * psDevMemServerContextInt = NULL;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->hDevMemServerContext = NULL;
+
+
+
+ psDevmemIntCtxCreateOUT->eError =
+ DevmemIntCtxCreate(psConnection, OSGetDevData(psConnection),
+ psDevmemIntCtxCreateIN->bbKernelMemoryCtx,
+ &psDevMemServerContextInt,
+ &hPrivDataInt,
+ &psDevmemIntCtxCreateOUT->ui32CPUCacheLineSize);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntCtxCreateOUT->hDevMemServerContext,
+ (void *) psDevMemServerContextInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntCtxDestroy);
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntCtxCreate_exit;
+ }
+
+
+
+
+
+
+ psDevmemIntCtxCreateOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntCtxCreateOUT->hPrivData,
+ (void *) hPrivDataInt,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,psDevmemIntCtxCreateOUT->hDevMemServerContext);
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntCtxCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntCtxCreate_exit:
+
+
+
+ if (psDevmemIntCtxCreateOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psDevmemIntCtxCreateOUT->hDevMemServerContext)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxCreateOUT->hDevMemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntCtxCreate: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psDevMemServerContextInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psDevMemServerContextInt)
+ {
+ DevmemIntCtxDestroy(psDevMemServerContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntCtxDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTCTXDESTROY *psDevmemIntCtxDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntCtxDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntCtxDestroyIN->hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ if ((psDevmemIntCtxDestroyOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntCtxDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntCtxDestroy: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntCtxDestroyOUT->eError)));
+ UnlockHandle();
+ goto DevmemIntCtxDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntCtxDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPCREATE *psDevmemIntHeapCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIntHeapCreateIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+ DEVMEMINT_HEAP * psDevmemHeapPtrInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntHeapCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntHeapCreate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntHeapCreateOUT->eError =
+ DevmemIntHeapCreate(
+ psDevmemCtxInt,
+ psDevmemIntHeapCreateIN->sHeapBaseAddr,
+ psDevmemIntHeapCreateIN->uiHeapLength,
+ psDevmemIntHeapCreateIN->ui32Log2DataPageSize,
+ &psDevmemHeapPtrInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntHeapCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntHeapCreateOUT->hDevmemHeapPtr,
+ (void *) psDevmemHeapPtrInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntHeapDestroy);
+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntHeapCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntHeapCreate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntHeapCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psDevmemHeapPtrInt)
+ {
+ DevmemIntHeapDestroy(psDevmemHeapPtrInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntHeapDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTHEAPDESTROY *psDevmemIntHeapDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntHeapDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntHeapDestroyIN->hDevmemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ if ((psDevmemIntHeapDestroyOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntHeapDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntHeapDestroy: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntHeapDestroyOUT->eError)));
+ UnlockHandle();
+ goto DevmemIntHeapDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntHeapDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPMR *psDevmemIntMapPMRIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPMR *psDevmemIntMapPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntMapPMRIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+ IMG_HANDLE hReservation = psDevmemIntMapPMRIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPMRIN->hPMR;
+ PMR * psPMRInt = NULL;
+ DEVMEMINT_MAPPING * psMappingInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPMROUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntMapPMROUT->eError =
+ DevmemIntMapPMR(
+ psDevmemServerHeapInt,
+ psReservationInt,
+ psPMRInt,
+ psDevmemIntMapPMRIN->uiMapFlags,
+ &psMappingInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntMapPMROUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntMapPMROUT->hMapping,
+ (void *) psMappingInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnmapPMR);
+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPMR_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntMapPMR_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntMapPMROUT->eError != PVRSRV_OK)
+ {
+ if (psMappingInt)
+ {
+ DevmemIntUnmapPMR(psMappingInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPMR(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMRIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPMR *psDevmemIntUnmapPMROUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnmapPMROUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnmapPMRIN->hMapping,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_MAPPING);
+ if ((psDevmemIntUnmapPMROUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnmapPMROUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnmapPMR: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnmapPMROUT->eError)));
+ UnlockHandle();
+ goto DevmemIntUnmapPMR_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnmapPMR_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntReserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTRESERVERANGE *psDevmemIntReserveRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerHeap = psDevmemIntReserveRangeIN->hDevmemServerHeap;
+ DEVMEMINT_HEAP * psDevmemServerHeapInt = NULL;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntReserveRangeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerHeapInt,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntReserveRange_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntReserveRangeOUT->eError =
+ DevmemIntReserveRange(
+ psDevmemServerHeapInt,
+ psDevmemIntReserveRangeIN->sAddress,
+ psDevmemIntReserveRangeIN->uiLength,
+ &psReservationInt);
+ /* Exit early if bridged call fails */
+ if(psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntReserveRangeOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psDevmemIntReserveRangeOUT->hReservation,
+ (void *) psReservationInt,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&DevmemIntUnreserveRange);
+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntReserveRange_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+DevmemIntReserveRange_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psDevmemIntReserveRangeOUT->eError != PVRSRV_OK)
+ {
+ if (psReservationInt)
+ {
+ DevmemIntUnreserveRange(psReservationInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnreserveRange(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNRESERVERANGE *psDevmemIntUnreserveRangeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psDevmemIntUnreserveRangeOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psDevmemIntUnreserveRangeIN->hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ if ((psDevmemIntUnreserveRangeOUT->eError != PVRSRV_OK) &&
+ (psDevmemIntUnreserveRangeOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeDevmemIntUnreserveRange: %s",
+ PVRSRVGetErrorStringKM(psDevmemIntUnreserveRangeOUT->eError)));
+ UnlockHandle();
+ goto DevmemIntUnreserveRange_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+DevmemIntUnreserveRange_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeChangeSparseMem(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CHANGESPARSEMEM *psChangeSparseMemIN,
+ PVRSRV_BRIDGE_OUT_CHANGESPARSEMEM *psChangeSparseMemOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSrvDevMemHeap = psChangeSparseMemIN->hSrvDevMemHeap;
+ DEVMEMINT_HEAP * psSrvDevMemHeapInt = NULL;
+ IMG_HANDLE hPMR = psChangeSparseMemIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_UINT32 *ui32AllocPageIndicesInt = NULL;
+ IMG_UINT32 *ui32FreePageIndicesInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) +
+ (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psChangeSparseMemIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psChangeSparseMemIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ChangeSparseMem_exit;
+ }
+ }
+ }
+
+ if (psChangeSparseMemIN->ui32AllocPageCount != 0)
+ {
+ ui32AllocPageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AllocPageIndicesInt, (const void __user *) psChangeSparseMemIN->pui32AllocPageIndices, psChangeSparseMemIN->ui32AllocPageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+ if (psChangeSparseMemIN->ui32FreePageCount != 0)
+ {
+ ui32FreePageIndicesInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FreePageIndicesInt, (const void __user *) psChangeSparseMemIN->pui32FreePageIndices, psChangeSparseMemIN->ui32FreePageCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psChangeSparseMemOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ChangeSparseMem_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSrvDevMemHeapInt,
+ hSrvDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ChangeSparseMem_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psChangeSparseMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psChangeSparseMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ChangeSparseMem_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psChangeSparseMemOUT->eError =
+ DevmemIntChangeSparse(
+ psSrvDevMemHeapInt,
+ psPMRInt,
+ psChangeSparseMemIN->ui32AllocPageCount,
+ ui32AllocPageIndicesInt,
+ psChangeSparseMemIN->ui32FreePageCount,
+ ui32FreePageIndicesInt,
+ psChangeSparseMemIN->ui32SparseFlags,
+ psChangeSparseMemIN->uiFlags,
+ psChangeSparseMemIN->sDevVAddr,
+ psChangeSparseMemIN->ui64CPUVAddr);
+
+
+
+
+ChangeSparseMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSrvDevMemHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSrvDevMemHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntMapPages(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTMAPPAGES *psDevmemIntMapPagesIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTMAPPAGES *psDevmemIntMapPagesOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psDevmemIntMapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psDevmemIntMapPagesIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPages_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntMapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psDevmemIntMapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntMapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntMapPagesOUT->eError =
+ DevmemIntMapPages(
+ psReservationInt,
+ psPMRInt,
+ psDevmemIntMapPagesIN->ui32PageCount,
+ psDevmemIntMapPagesIN->ui32PhysicalPgOffset,
+ psDevmemIntMapPagesIN->uiFlags,
+ psDevmemIntMapPagesIN->sDevVAddr);
+
+
+
+
+DevmemIntMapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntUnmapPages(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTUNMAPPAGES *psDevmemIntUnmapPagesOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psDevmemIntUnmapPagesIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntUnmapPagesOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psDevmemIntUnmapPagesOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntUnmapPages_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntUnmapPagesOUT->eError =
+ DevmemIntUnmapPages(
+ psReservationInt,
+ psDevmemIntUnmapPagesIN->sDevVAddr,
+ psDevmemIntUnmapPagesIN->ui32PageCount);
+
+
+
+
+DevmemIntUnmapPages_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIsVDevAddrValid(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMISVDEVADDRVALID *psDevmemIsVDevAddrValidOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIsVDevAddrValidIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIsVDevAddrValidOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIsVDevAddrValidOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIsVDevAddrValid_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIsVDevAddrValidOUT->eError =
+ DevmemIntIsVDevAddrValid(psConnection, OSGetDevData(psConnection),
+ psDevmemCtxInt,
+ psDevmemIsVDevAddrValidIN->sAddress);
+
+
+
+
+DevmemIsVDevAddrValid_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGCOUNT *psHeapCfgHeapConfigCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psHeapCfgHeapConfigCountIN);
+
+
+
+
+
+ psHeapCfgHeapConfigCountOUT->eError =
+ HeapCfgHeapConfigCount(psConnection, OSGetDevData(psConnection),
+ &psHeapCfgHeapConfigCountOUT->ui32NumHeapConfigs);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCOUNT *psHeapCfgHeapCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psHeapCfgHeapCountOUT->eError =
+ HeapCfgHeapCount(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapCountIN->ui32HeapConfigIndex,
+ &psHeapCfgHeapCountOUT->ui32NumHeaps);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapConfigName(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPCONFIGNAME *psHeapCfgHeapConfigNameOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *puiHeapConfigNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psHeapCfgHeapConfigNameOUT->puiHeapConfigName = psHeapCfgHeapConfigNameIN->puiHeapConfigName;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapConfigNameIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapConfigNameIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz != 0)
+ {
+ puiHeapConfigNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR);
+ }
+
+
+
+ psHeapCfgHeapConfigNameOUT->eError =
+ HeapCfgHeapConfigName(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigIndex,
+ psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz,
+ puiHeapConfigNameInt);
+
+
+
+ if ((psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psHeapCfgHeapConfigNameOUT->puiHeapConfigName, puiHeapConfigNameInt,
+ (psHeapCfgHeapConfigNameIN->ui32HeapConfigNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psHeapCfgHeapConfigNameOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapConfigName_exit;
+ }
+ }
+
+
+HeapCfgHeapConfigName_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHeapCfgHeapDetails(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsIN,
+ PVRSRV_BRIDGE_OUT_HEAPCFGHEAPDETAILS *psHeapCfgHeapDetailsOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *puiHeapNameOutInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psHeapCfgHeapDetailsOUT->puiHeapNameOut = psHeapCfgHeapDetailsIN->puiHeapNameOut;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psHeapCfgHeapDetailsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psHeapCfgHeapDetailsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+ }
+
+ if (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz != 0)
+ {
+ puiHeapNameOutInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR);
+ }
+
+
+
+ psHeapCfgHeapDetailsOUT->eError =
+ HeapCfgHeapDetails(psConnection, OSGetDevData(psConnection),
+ psHeapCfgHeapDetailsIN->ui32HeapConfigIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapIndex,
+ psHeapCfgHeapDetailsIN->ui32HeapNameBufSz,
+ puiHeapNameOutInt,
+ &psHeapCfgHeapDetailsOUT->sDevVAddrBase,
+ &psHeapCfgHeapDetailsOUT->uiHeapLength,
+ &psHeapCfgHeapDetailsOUT->ui32Log2DataPageSizeOut,
+ &psHeapCfgHeapDetailsOUT->ui32Log2ImportAlignmentOut,
+ &psHeapCfgHeapDetailsOUT->ui32Log2TilingStrideFactorOut);
+
+
+
+ if ((psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psHeapCfgHeapDetailsOUT->puiHeapNameOut, puiHeapNameOutInt,
+ (psHeapCfgHeapDetailsIN->ui32HeapNameBufSz * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psHeapCfgHeapDetailsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto HeapCfgHeapDetails_exit;
+ }
+ }
+
+
+HeapCfgHeapDetails_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntRegisterPFNotifyKM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTREGISTERPFNOTIFYKM *psDevmemIntRegisterPFNotifyKMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemIntRegisterPFNotifyKMIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntRegisterPFNotifyKMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntRegisterPFNotifyKM_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntRegisterPFNotifyKMOUT->eError =
+ DevmemIntRegisterPFNotifyKM(
+ psDevmemCtxInt,
+ psDevmemIntRegisterPFNotifyKMIN->ui32PID,
+ psDevmemIntRegisterPFNotifyKMIN->bRegister);
+
+
+
+
+DevmemIntRegisterPFNotifyKM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetMaxDevMemSize(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeIN,
+ PVRSRV_BRIDGE_OUT_GETMAXDEVMEMSIZE *psGetMaxDevMemSizeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psGetMaxDevMemSizeIN);
+
+
+
+
+
+ psGetMaxDevMemSizeOUT->eError =
+ PVRSRVGetMaxDevMemSizeKM(psConnection, OSGetDevData(psConnection),
+ &psGetMaxDevMemSizeOUT->uiLMASize,
+ &psGetMaxDevMemSizeOUT->uiUMASize);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemGetFaultAddress(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMGETFAULTADDRESS *psDevmemGetFaultAddressOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psDevmemGetFaultAddressIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemGetFaultAddressOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemGetFaultAddressOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemGetFaultAddress_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemGetFaultAddressOUT->eError =
+ DevmemIntGetFaultAddress(psConnection, OSGetDevData(psConnection),
+ psDevmemCtxInt,
+ &psDevmemGetFaultAddressOUT->sFaultAddress);
+
+
+
+
+DevmemGetFaultAddress_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitMMBridge(void);
+PVRSRV_ERROR DeinitMMBridge(void);
+
+/*
+ * Register all MM functions with services
+ */
+PVRSRV_ERROR InitMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR, PVRSRVBridgePMRExportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR, PVRSRVBridgePMRUnexportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID, PVRSRVBridgePMRGetUID,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRMakeLocalImportHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE, PVRSRVBridgePMRUnmakeLocalImportHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR, PVRSRVBridgePMRImportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR, PVRSRVBridgePMRLocalImportPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR, PVRSRVBridgePMRUnrefPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR, PVRSRVBridgePMRUnrefUnlockPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR, PVRSRVBridgePhysmemNewRamBackedPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR, PVRSRVBridgePhysmemNewRamBackedLockedPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF, PVRSRVBridgePhysmemImportSecBuf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN, PVRSRVBridgeDevmemIntPin,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN, PVRSRVBridgeDevmemIntUnpin,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE, PVRSRVBridgeDevmemIntPinValidate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE, PVRSRVBridgeDevmemIntUnpinInvalidate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE, PVRSRVBridgeDevmemIntCtxCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY, PVRSRVBridgeDevmemIntCtxDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE, PVRSRVBridgeDevmemIntHeapCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY, PVRSRVBridgeDevmemIntHeapDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR, PVRSRVBridgeDevmemIntMapPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR, PVRSRVBridgeDevmemIntUnmapPMR,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE, PVRSRVBridgeDevmemIntReserveRange,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE, PVRSRVBridgeDevmemIntUnreserveRange,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM, PVRSRVBridgeChangeSparseMem,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES, PVRSRVBridgeDevmemIntMapPages,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES, PVRSRVBridgeDevmemIntUnmapPages,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID, PVRSRVBridgeDevmemIsVDevAddrValid,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT, PVRSRVBridgeHeapCfgHeapConfigCount,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT, PVRSRVBridgeHeapCfgHeapCount,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME, PVRSRVBridgeHeapCfgHeapConfigName,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS, PVRSRVBridgeHeapCfgHeapDetails,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM, PVRSRVBridgeDevmemIntRegisterPFNotifyKM,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE, PVRSRVBridgeGetMaxDevMemSize,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS, PVRSRVBridgeDevmemGetFaultAddress,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all mm functions with services
+ */
+PVRSRV_ERROR DeinitMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMREXPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNEXPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRGETUID);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRMAKELOCALIMPORTHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNMAKELOCALIMPORTHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRIMPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRLOCALIMPORTPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PMRUNREFUNLOCKPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMNEWRAMBACKEDLOCKEDPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_PHYSMEMIMPORTSECBUF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPIN);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPIN);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTPINVALIDATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNPININVALIDATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXCREATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTCTXDESTROY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPCREATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTHEAPDESTROY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPMR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNRESERVERANGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_CHANGESPARSEMEM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTMAPPAGES);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTUNMAPPAGES);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMISVDEVADDRVALID);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGCOUNT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCOUNT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPCONFIGNAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_HEAPCFGHEAPDETAILS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMINTREGISTERPFNOTIFYKM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_GETMAXDEVMEMSIZE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_MM, PVRSRV_BRIDGE_MM_DEVMEMGETFAULTADDRESS);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_pdump_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_pdump_bridge.c
new file mode 100644
index 00000000000000..aca4dd533688f8
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_pdump_bridge.c
@@ -0,0 +1,561 @@
+/*******************************************************************************
+@File
+@Title Server bridge for pdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pdump_km.h"
+
+
+#include "common_pdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeDevmemPDumpBitmap(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMPDUMPBITMAP *psDevmemPDumpBitmapOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiFileNameInt = NULL;
+ IMG_HANDLE hDevmemCtx = psDevmemPDumpBitmapIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemPDumpBitmapIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemPDumpBitmapIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevmemPDumpBitmap_exit;
+ }
+ }
+ }
+
+
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, (const void __user *) psDevmemPDumpBitmapIN->puiFileName, PVRSRV_PDUMP_MAX_FILENAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevmemPDumpBitmapOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevmemPDumpBitmap_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemPDumpBitmapOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemPDumpBitmapOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemPDumpBitmap_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemPDumpBitmapOUT->eError =
+ DevmemIntPDumpBitmap(psConnection, OSGetDevData(psConnection),
+ uiFileNameInt,
+ psDevmemPDumpBitmapIN->ui32FileOffset,
+ psDevmemPDumpBitmapIN->ui32Width,
+ psDevmemPDumpBitmapIN->ui32Height,
+ psDevmemPDumpBitmapIN->ui32StrideInBytes,
+ psDevmemPDumpBitmapIN->sDevBaseAddr,
+ psDevmemCtxInt,
+ psDevmemPDumpBitmapIN->ui32Size,
+ psDevmemPDumpBitmapIN->ePixelFormat,
+ psDevmemPDumpBitmapIN->ui32AddrMode,
+ psDevmemPDumpBitmapIN->ui32PDumpFlags);
+
+
+
+
+DevmemPDumpBitmap_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePDumpImageDescriptor(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorIN,
+ PVRSRV_BRIDGE_OUT_PDUMPIMAGEDESCRIPTOR *psPDumpImageDescriptorOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemCtx = psPDumpImageDescriptorIN->hDevmemCtx;
+ DEVMEMINT_CTX * psDevmemCtxInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+ IMG_UINT32 *ui32FBCClearColourInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) +
+ (4 * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPDumpImageDescriptorIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPDumpImageDescriptorIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PDumpImageDescriptor_exit;
+ }
+ }
+ }
+
+ if (psPDumpImageDescriptorIN->ui32StringSize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, (const void __user *) psPDumpImageDescriptorIN->puiFileName, psPDumpImageDescriptorIN->ui32StringSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PDumpImageDescriptor_exit;
+ }
+ }
+
+ {
+ ui32FBCClearColourInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 4 * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (4 * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FBCClearColourInt, (const void __user *) psPDumpImageDescriptorIN->pui32FBCClearColour, 4 * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psPDumpImageDescriptorOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PDumpImageDescriptor_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPDumpImageDescriptorOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemCtxInt,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psPDumpImageDescriptorOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PDumpImageDescriptor_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPDumpImageDescriptorOUT->eError =
+ DevmemIntPdumpImageDescriptor(psConnection, OSGetDevData(psConnection),
+ psDevmemCtxInt,
+ psPDumpImageDescriptorIN->ui32StringSize,
+ uiFileNameInt,
+ psPDumpImageDescriptorIN->sDataDevAddr,
+ psPDumpImageDescriptorIN->ui32DataSize,
+ psPDumpImageDescriptorIN->ui32LogicalWidth,
+ psPDumpImageDescriptorIN->ui32LogicalHeight,
+ psPDumpImageDescriptorIN->ui32PhysicalWidth,
+ psPDumpImageDescriptorIN->ui32PhysicalHeight,
+ psPDumpImageDescriptorIN->ePixelFormat,
+ psPDumpImageDescriptorIN->eMemLayout,
+ psPDumpImageDescriptorIN->eFBCompression,
+ ui32FBCClearColourInt,
+ psPDumpImageDescriptorIN->sHeaderDevAddr,
+ psPDumpImageDescriptorIN->ui32HeaderSize,
+ psPDumpImageDescriptorIN->ui32PDumpFlags);
+
+
+
+
+PDumpImageDescriptor_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemCtxInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemCtx,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpComment(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPCOMMENT *psPVRSRVPDumpCommentOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiCommentInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPVRSRVPDumpCommentIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPVRSRVPDumpCommentIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PVRSRVPDumpComment_exit;
+ }
+ }
+ }
+
+
+ {
+ uiCommentInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiCommentInt, (const void __user *) psPVRSRVPDumpCommentIN->puiComment, PVRSRV_PDUMP_MAX_COMMENT_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPVRSRVPDumpCommentOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PVRSRVPDumpComment_exit;
+ }
+ }
+
+
+ psPVRSRVPDumpCommentOUT->eError =
+ PDumpCommentKM(
+ uiCommentInt,
+ psPVRSRVPDumpCommentIN->ui32Flags);
+
+
+
+
+PVRSRVPDumpComment_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETFRAME *psPVRSRVPDumpSetFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPVRSRVPDumpSetFrameOUT->eError =
+ PDumpSetFrameKM(psConnection, OSGetDevData(psConnection),
+ psPVRSRVPDumpSetFrameIN->ui32Frame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPBridge(void);
+PVRSRV_ERROR DeinitPDUMPBridge(void);
+
+/*
+ * Register all PDUMP functions with services
+ */
+PVRSRV_ERROR InitPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP, PVRSRVBridgeDevmemPDumpBitmap,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR, PVRSRVBridgePDumpImageDescriptor,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT, PVRSRVBridgePVRSRVPDumpComment,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME, PVRSRVBridgePVRSRVPDumpSetFrame,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdump functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_DEVMEMPDUMPBITMAP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PDUMPIMAGEDESCRIPTOR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPCOMMENT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMP, PVRSRV_BRIDGE_PDUMP_PVRSRVPDUMPSETFRAME);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_pdumpctrl_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_pdumpctrl_bridge.c
new file mode 100644
index 00000000000000..738ccfd5f43107
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_pdumpctrl_bridge.c
@@ -0,0 +1,254 @@
+/*******************************************************************************
+@File
+@Title Server bridge for pdumpctrl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpctrl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "pdump_km.h"
+
+
+#include "common_pdumpctrl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+#include "lock.h"
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetState(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETSTATE *psPVRSRVPDumpGetStateOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetStateIN);
+
+
+
+
+
+ psPVRSRVPDumpGetStateOUT->eError =
+ PDumpGetStateKM(
+ &psPVRSRVPDumpGetStateOUT->ui64State);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpGetFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPGETFRAME *psPVRSRVPDumpGetFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpGetFrameIN);
+
+
+
+
+
+ psPVRSRVPDumpGetFrameOUT->eError =
+ PDumpGetFrameKM(psConnection, OSGetDevData(psConnection),
+ &psPVRSRVPDumpGetFrameOUT->ui32Frame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS *psPVRSRVPDumpSetDefaultCaptureParamsOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psPVRSRVPDumpSetDefaultCaptureParamsOUT->eError =
+ PDumpSetDefaultCaptureParamsKM(
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Mode,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Start,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32End,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32Interval,
+ psPVRSRVPDumpSetDefaultCaptureParamsIN->ui32MaxParamFileSize);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameIN,
+ PVRSRV_BRIDGE_OUT_PVRSRVPDUMPISLASTCAPTUREFRAME *psPVRSRVPDumpIsLastCaptureFrameOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVPDumpIsLastCaptureFrameIN);
+
+
+
+
+
+ psPVRSRVPDumpIsLastCaptureFrameOUT->eError =
+ PDumpIsLastCaptureFrameKM(
+ &psPVRSRVPDumpIsLastCaptureFrameOUT->bpbIsLastCaptureFrame);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static POS_LOCK pPDUMPCTRLBridgeLock;
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPCTRLBridge(void);
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void);
+
+/*
+ * Register all PDUMPCTRL functions with services
+ */
+PVRSRV_ERROR InitPDUMPCTRLBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockCreate(&pPDUMPCTRLBridgeLock, LOCK_TYPE_PASSIVE), "OSLockCreate");
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE, PVRSRVBridgePVRSRVPDumpGetState,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME, PVRSRVBridgePVRSRVPDumpGetFrame,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS, PVRSRVBridgePVRSRVPDumpSetDefaultCaptureParams,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME, PVRSRVBridgePVRSRVPDumpIsLastCaptureFrame,
+ pPDUMPCTRLBridgeLock, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpctrl functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPCTRLBridge(void)
+{
+ PVR_LOGR_IF_ERROR(OSLockDestroy(pPDUMPCTRLBridgeLock), "OSLockDestroy");
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETSTATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPGETFRAME);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPSETDEFAULTCAPTUREPARAMS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPCTRL, PVRSRV_BRIDGE_PDUMPCTRL_PVRSRVPDUMPISLASTCAPTUREFRAME);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_pdumpmm_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_pdumpmm_bridge.c
new file mode 100644
index 00000000000000..b98fcd3097fa74
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_pdumpmm_bridge.c
@@ -0,0 +1,957 @@
+/*******************************************************************************
+@File
+@Title Server bridge for pdumpmm
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pdumpmm
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "devicemem_server.h"
+#include "pmr.h"
+#include "physmem.h"
+
+
+#include "common_pdumpmm_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMem(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEM *psPMRPDumpLoadMemIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEM *psPMRPDumpLoadMemOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMem_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemOUT->eError =
+ PMRPDumpLoadMem(
+ psPMRInt,
+ psPMRPDumpLoadMemIN->uiOffset,
+ psPMRPDumpLoadMemIN->uiSize,
+ psPMRPDumpLoadMemIN->ui32PDumpFlags,
+ psPMRPDumpLoadMemIN->bbZero);
+
+
+
+
+PMRPDumpLoadMem_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue32(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE32 *psPMRPDumpLoadMemValue32OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue32IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemValue32OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMemValue32_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemValue32OUT->eError =
+ PMRPDumpLoadMemValue32(
+ psPMRInt,
+ psPMRPDumpLoadMemValue32IN->uiOffset,
+ psPMRPDumpLoadMemValue32IN->ui32Value,
+ psPMRPDumpLoadMemValue32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpLoadMemValue64(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPLOADMEMVALUE64 *psPMRPDumpLoadMemValue64OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpLoadMemValue64IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpLoadMemValue64OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpLoadMemValue64_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpLoadMemValue64OUT->eError =
+ PMRPDumpLoadMemValue64(
+ psPMRInt,
+ psPMRPDumpLoadMemValue64IN->uiOffset,
+ psPMRPDumpLoadMemValue64IN->ui64Value,
+ psPMRPDumpLoadMemValue64IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpLoadMemValue64_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSaveToFile(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSAVETOFILE *psPMRPDumpSaveToFileOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpSaveToFileIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSaveToFileIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSaveToFileIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, (const void __user *) psPMRPDumpSaveToFileIN->puiFileName, psPMRPDumpSaveToFileIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psPMRPDumpSaveToFileOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSaveToFile_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpSaveToFileOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpSaveToFileOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpSaveToFile_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpSaveToFileOUT->eError =
+ PMRPDumpSaveToFile(
+ psPMRInt,
+ psPMRPDumpSaveToFileIN->uiOffset,
+ psPMRPDumpSaveToFileIN->uiSize,
+ psPMRPDumpSaveToFileIN->ui32ArraySize,
+ uiFileNameInt,
+ psPMRPDumpSaveToFileIN->ui32uiFileOffset);
+
+
+
+
+PMRPDumpSaveToFile_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpSymbolicAddr(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPSYMBOLICADDR *psPMRPDumpSymbolicAddrOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpSymbolicAddrIN->hPMR;
+ PMR * psPMRInt = NULL;
+ IMG_CHAR *puiMemspaceNameInt = NULL;
+ IMG_CHAR *puiSymbolicAddrInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) +
+ (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+ psPMRPDumpSymbolicAddrOUT->puiMemspaceName = psPMRPDumpSymbolicAddrIN->puiMemspaceName;
+ psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr = psPMRPDumpSymbolicAddrIN->puiSymbolicAddr;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psPMRPDumpSymbolicAddrIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psPMRPDumpSymbolicAddrIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen != 0)
+ {
+ puiMemspaceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR);
+ }
+
+ if (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen != 0)
+ {
+ puiSymbolicAddrInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR);
+ }
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpSymbolicAddrOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpSymbolicAddrOUT->eError =
+ PMR_PDumpSymbolicAddr(
+ psPMRInt,
+ psPMRPDumpSymbolicAddrIN->uiOffset,
+ psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen,
+ puiMemspaceNameInt,
+ psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen,
+ puiSymbolicAddrInt,
+ &psPMRPDumpSymbolicAddrOUT->uiNewOffset,
+ &psPMRPDumpSymbolicAddrOUT->uiNextSymName);
+
+
+
+ if ((psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psPMRPDumpSymbolicAddrOUT->puiMemspaceName, puiMemspaceNameInt,
+ (psPMRPDumpSymbolicAddrIN->ui32MemspaceNameLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+ if ((psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psPMRPDumpSymbolicAddrOUT->puiSymbolicAddr, puiSymbolicAddrInt,
+ (psPMRPDumpSymbolicAddrIN->ui32SymbolicAddrLen * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psPMRPDumpSymbolicAddrOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto PMRPDumpSymbolicAddr_exit;
+ }
+ }
+
+
+PMRPDumpSymbolicAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpPol32(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPPOL32 *psPMRPDumpPol32IN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPPOL32 *psPMRPDumpPol32OUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpPol32IN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpPol32OUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpPol32OUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpPol32_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpPol32OUT->eError =
+ PMRPDumpPol32(
+ psPMRInt,
+ psPMRPDumpPol32IN->uiOffset,
+ psPMRPDumpPol32IN->ui32Value,
+ psPMRPDumpPol32IN->ui32Mask,
+ psPMRPDumpPol32IN->eOperator,
+ psPMRPDumpPol32IN->ui32PDumpFlags);
+
+
+
+
+PMRPDumpPol32_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePMRPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PMRPDUMPCBP *psPMRPDumpCBPIN,
+ PVRSRV_BRIDGE_OUT_PMRPDUMPCBP *psPMRPDumpCBPOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMR = psPMRPDumpCBPIN->hPMR;
+ PMR * psPMRInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psPMRPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psPMRPDumpCBPOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto PMRPDumpCBP_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psPMRPDumpCBPOUT->eError =
+ PMRPDumpCBP(
+ psPMRInt,
+ psPMRPDumpCBPIN->uiReadOffset,
+ psPMRPDumpCBPIN->uiWriteOffset,
+ psPMRPDumpCBPIN->uiPacketSize,
+ psPMRPDumpCBPIN->uiBufferSize);
+
+
+
+
+PMRPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualIN,
+ PVRSRV_BRIDGE_OUT_DEVMEMINTPDUMPSAVETOFILEVIRTUAL *psDevmemIntPDumpSaveToFileVirtualOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hDevmemServerContext = psDevmemIntPDumpSaveToFileVirtualIN->hDevmemServerContext;
+ DEVMEMINT_CTX * psDevmemServerContextInt = NULL;
+ IMG_CHAR *uiFileNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psDevmemIntPDumpSaveToFileVirtualIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psDevmemIntPDumpSaveToFileVirtualIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+ }
+
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize != 0)
+ {
+ uiFileNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiFileNameInt, (const void __user *) psDevmemIntPDumpSaveToFileVirtualIN->puiFileName, psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psDevmemServerContextInt,
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX,
+ IMG_TRUE);
+ if(psDevmemIntPDumpSaveToFileVirtualOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto DevmemIntPDumpSaveToFileVirtual_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psDevmemIntPDumpSaveToFileVirtualOUT->eError =
+ DevmemIntPDumpSaveToFileVirtual(
+ psDevmemServerContextInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->sAddress,
+ psDevmemIntPDumpSaveToFileVirtualIN->uiSize,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32ArraySize,
+ uiFileNameInt,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32FileOffset,
+ psDevmemIntPDumpSaveToFileVirtualIN->ui32PDumpFlags);
+
+
+
+
+DevmemIntPDumpSaveToFileVirtual_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psDevmemServerContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hDevmemServerContext,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_CTX);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitPDUMPMMBridge(void);
+PVRSRV_ERROR DeinitPDUMPMMBridge(void);
+
+/*
+ * Register all PDUMPMM functions with services
+ */
+PVRSRV_ERROR InitPDUMPMMBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM, PVRSRVBridgePMRPDumpLoadMem,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32, PVRSRVBridgePMRPDumpLoadMemValue32,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64, PVRSRVBridgePMRPDumpLoadMemValue64,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE, PVRSRVBridgePMRPDumpSaveToFile,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR, PVRSRVBridgePMRPDumpSymbolicAddr,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32, PVRSRVBridgePMRPDumpPol32,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP, PVRSRVBridgePMRPDumpCBP,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL, PVRSRVBridgeDevmemIntPDumpSaveToFileVirtual,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pdumpmm functions with services
+ */
+PVRSRV_ERROR DeinitPDUMPMMBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE32);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPLOADMEMVALUE64);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSAVETOFILE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPSYMBOLICADDR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPPOL32);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_PMRPDUMPCBP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PDUMPMM, PVRSRV_BRIDGE_PDUMPMM_DEVMEMINTPDUMPSAVETOFILEVIRTUAL);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_pvrtl_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_pvrtl_bridge.c
new file mode 100644
index 00000000000000..2feb8cebef0506
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_pvrtl_bridge.c
@@ -0,0 +1,916 @@
+/*******************************************************************************
+@File
+@Title Server bridge for pvrtl
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for pvrtl
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "tlserver.h"
+
+
+#include "common_pvrtl_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeTLOpenStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLOPENSTREAM *psTLOpenStreamIN,
+ PVRSRV_BRIDGE_OUT_TLOPENSTREAM *psTLOpenStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNameInt = NULL;
+ TL_STREAM_DESC * psSDInt = NULL;
+ PMR * psTLPMRInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+ psTLOpenStreamOUT->hSD = NULL;
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLOpenStreamIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLOpenStreamIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLOpenStream_exit;
+ }
+ }
+ }
+
+
+ {
+ uiNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNameInt, (const void __user *) psTLOpenStreamIN->puiName, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psTLOpenStreamOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLOpenStream_exit;
+ }
+ }
+
+
+ psTLOpenStreamOUT->eError =
+ TLServerOpenStreamKM(
+ uiNameInt,
+ psTLOpenStreamIN->ui32Mode,
+ &psSDInt,
+ &psTLPMRInt);
+ /* Exit early if bridged call fails */
+ if(psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ goto TLOpenStream_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psTLOpenStreamOUT->hSD,
+ (void *) psSDInt,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&TLServerCloseStreamKM);
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLOpenStream_exit;
+ }
+
+
+
+
+
+
+ psTLOpenStreamOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psTLOpenStreamOUT->hTLPMR,
+ (void *) psTLPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,psTLOpenStreamOUT->hSD);
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLOpenStream_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+TLOpenStream_exit:
+
+
+
+ if (psTLOpenStreamOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psTLOpenStreamOUT->hSD)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLOpenStreamOUT->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeTLOpenStream: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSDInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psSDInt)
+ {
+ TLServerCloseStreamKM(psSDInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCloseStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLCLOSESTREAM *psTLCloseStreamIN,
+ PVRSRV_BRIDGE_OUT_TLCLOSESTREAM *psTLCloseStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psTLCloseStreamOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psTLCloseStreamIN->hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ if ((psTLCloseStreamOUT->eError != PVRSRV_OK) &&
+ (psTLCloseStreamOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeTLCloseStream: %s",
+ PVRSRVGetErrorStringKM(psTLCloseStreamOUT->eError)));
+ UnlockHandle();
+ goto TLCloseStream_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+TLCloseStream_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLAcquireData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLACQUIREDATA *psTLAcquireDataIN,
+ PVRSRV_BRIDGE_OUT_TLACQUIREDATA *psTLAcquireDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLAcquireDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psTLAcquireDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLAcquireDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLAcquireData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLAcquireDataOUT->eError =
+ TLServerAcquireDataKM(
+ psSDInt,
+ &psTLAcquireDataOUT->ui32ReadOffset,
+ &psTLAcquireDataOUT->ui32ReadLen);
+
+
+
+
+TLAcquireData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReleaseData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLRELEASEDATA *psTLReleaseDataIN,
+ PVRSRV_BRIDGE_OUT_TLRELEASEDATA *psTLReleaseDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLReleaseDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psTLReleaseDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLReleaseDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLReleaseData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLReleaseDataOUT->eError =
+ TLServerReleaseDataKM(
+ psSDInt,
+ psTLReleaseDataIN->ui32ReadOffset,
+ psTLReleaseDataIN->ui32ReadLen);
+
+
+
+
+TLReleaseData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLDiscoverStreams(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLDISCOVERSTREAMS *psTLDiscoverStreamsIN,
+ PVRSRV_BRIDGE_OUT_TLDISCOVERSTREAMS *psTLDiscoverStreamsOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiNamePatternInt = NULL;
+ IMG_CHAR *puiStreamsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) +
+ (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psTLDiscoverStreamsOUT->puiStreams = psTLDiscoverStreamsIN->puiStreams;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLDiscoverStreamsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLDiscoverStreamsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLDiscoverStreams_exit;
+ }
+ }
+ }
+
+
+ {
+ uiNamePatternInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiNamePatternInt, (const void __user *) psTLDiscoverStreamsIN->puiNamePattern, PRVSRVTL_MAX_STREAM_NAME_SIZE * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ }
+ if (psTLDiscoverStreamsIN->ui32Size != 0)
+ {
+ puiStreamsInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR);
+ }
+
+
+
+ psTLDiscoverStreamsOUT->eError =
+ TLServerDiscoverStreamsKM(
+ uiNamePatternInt,
+ psTLDiscoverStreamsIN->ui32Size,
+ puiStreamsInt,
+ &psTLDiscoverStreamsOUT->ui32NumFound);
+
+
+
+ if ((psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psTLDiscoverStreamsOUT->puiStreams, puiStreamsInt,
+ (psTLDiscoverStreamsIN->ui32Size * sizeof(IMG_CHAR))) != PVRSRV_OK )
+ {
+ psTLDiscoverStreamsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLDiscoverStreams_exit;
+ }
+ }
+
+
+TLDiscoverStreams_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLReserveStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLRESERVESTREAM *psTLReserveStreamIN,
+ PVRSRV_BRIDGE_OUT_TLRESERVESTREAM *psTLReserveStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLReserveStreamIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psTLReserveStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLReserveStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLReserveStream_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLReserveStreamOUT->eError =
+ TLServerReserveStreamKM(
+ psSDInt,
+ &psTLReserveStreamOUT->ui32BufferOffset,
+ psTLReserveStreamIN->ui32Size,
+ psTLReserveStreamIN->ui32SizeMin,
+ &psTLReserveStreamOUT->ui32Available);
+
+
+
+
+TLReserveStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLCommitStream(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLCOMMITSTREAM *psTLCommitStreamIN,
+ PVRSRV_BRIDGE_OUT_TLCOMMITSTREAM *psTLCommitStreamOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLCommitStreamIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psTLCommitStreamOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLCommitStreamOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLCommitStream_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLCommitStreamOUT->eError =
+ TLServerCommitStreamKM(
+ psSDInt,
+ psTLCommitStreamIN->ui32ReqSize);
+
+
+
+
+TLCommitStream_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeTLWriteData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_TLWRITEDATA *psTLWriteDataIN,
+ PVRSRV_BRIDGE_OUT_TLWRITEDATA *psTLWriteDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSD = psTLWriteDataIN->hSD;
+ TL_STREAM_DESC * psSDInt = NULL;
+ IMG_BYTE *psDataInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psTLWriteDataIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psTLWriteDataIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto TLWriteData_exit;
+ }
+ }
+ }
+
+ if (psTLWriteDataIN->ui32Size != 0)
+ {
+ psDataInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDataInt, (const void __user *) psTLWriteDataIN->psData, psTLWriteDataIN->ui32Size * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psTLWriteDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto TLWriteData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psTLWriteDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSDInt,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD,
+ IMG_TRUE);
+ if(psTLWriteDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto TLWriteData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psTLWriteDataOUT->eError =
+ TLServerWriteDataKM(
+ psSDInt,
+ psTLWriteDataIN->ui32Size,
+ psDataInt);
+
+
+
+
+TLWriteData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSDInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSD,
+ PVRSRV_HANDLE_TYPE_PVR_TL_SD);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_FALSE;
+
+PVRSRV_ERROR InitPVRTLBridge(void);
+PVRSRV_ERROR DeinitPVRTLBridge(void);
+
+/*
+ * Register all PVRTL functions with services
+ */
+PVRSRV_ERROR InitPVRTLBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM, PVRSRVBridgeTLOpenStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM, PVRSRVBridgeTLCloseStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA, PVRSRVBridgeTLAcquireData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA, PVRSRVBridgeTLReleaseData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS, PVRSRVBridgeTLDiscoverStreams,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM, PVRSRVBridgeTLReserveStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM, PVRSRVBridgeTLCommitStream,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA, PVRSRVBridgeTLWriteData,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all pvrtl functions with services
+ */
+PVRSRV_ERROR DeinitPVRTLBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLOPENSTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCLOSESTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLACQUIREDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRELEASEDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLDISCOVERSTREAMS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLRESERVESTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLCOMMITSTREAM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_PVRTL, PVRSRV_BRIDGE_PVRTL_TLWRITEDATA);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_regconfig_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_regconfig_bridge.c
new file mode 100644
index 00000000000000..881b2145f21754
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_regconfig_bridge.c
@@ -0,0 +1,293 @@
+/*******************************************************************************
+@File
+@Title Server bridge for regconfig
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for regconfig
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxregconfig.h"
+
+
+#include "common_regconfig_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXSetRegConfigType(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeIN,
+ PVRSRV_BRIDGE_OUT_RGXSETREGCONFIGTYPE *psRGXSetRegConfigTypeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXSetRegConfigTypeOUT->eError =
+ PVRSRVRGXSetRegConfigTypeKM(psConnection, OSGetDevData(psConnection),
+ psRGXSetRegConfigTypeIN->ui8RegPowerIsland);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXAddRegconfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXADDREGCONFIG *psRGXAddRegconfigIN,
+ PVRSRV_BRIDGE_OUT_RGXADDREGCONFIG *psRGXAddRegconfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXAddRegconfigOUT->eError =
+ PVRSRVRGXAddRegConfigKM(psConnection, OSGetDevData(psConnection),
+ psRGXAddRegconfigIN->ui32RegAddr,
+ psRGXAddRegconfigIN->ui64RegValue,
+ psRGXAddRegconfigIN->ui64RegMask);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXClearRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCLEARREGCONFIG *psRGXClearRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXCLEARREGCONFIG *psRGXClearRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXClearRegConfigIN);
+
+
+
+
+
+ psRGXClearRegConfigOUT->eError =
+ PVRSRVRGXClearRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEnableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENABLEREGCONFIG *psRGXEnableRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXENABLEREGCONFIG *psRGXEnableRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEnableRegConfigIN);
+
+
+
+
+
+ psRGXEnableRegConfigOUT->eError =
+ PVRSRVRGXEnableRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDisableRegConfig(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDISABLEREGCONFIG *psRGXDisableRegConfigIN,
+ PVRSRV_BRIDGE_OUT_RGXDISABLEREGCONFIG *psRGXDisableRegConfigOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXDisableRegConfigIN);
+
+
+
+
+
+ psRGXDisableRegConfigOUT->eError =
+ PVRSRVRGXDisableRegConfigKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
+
+#if !defined(EXCLUDE_REGCONFIG_BRIDGE)
+PVRSRV_ERROR InitREGCONFIGBridge(void);
+PVRSRV_ERROR DeinitREGCONFIGBridge(void);
+
+/*
+ * Register all REGCONFIG functions with services
+ */
+PVRSRV_ERROR InitREGCONFIGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE, PVRSRVBridgeRGXSetRegConfigType,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG, PVRSRVBridgeRGXAddRegconfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG, PVRSRVBridgeRGXClearRegConfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG, PVRSRVBridgeRGXEnableRegConfig,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG, PVRSRVBridgeRGXDisableRegConfig,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all regconfig functions with services
+ */
+PVRSRV_ERROR DeinitREGCONFIGBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXSETREGCONFIGTYPE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXADDREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXCLEARREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXENABLEREGCONFIG);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_REGCONFIG, PVRSRV_BRIDGE_REGCONFIG_RGXDISABLEREGCONFIG);
+
+
+
+ return PVRSRV_OK;
+}
+#else /* EXCLUDE_REGCONFIG_BRIDGE */
+/* This bridge is conditional on EXCLUDE_REGCONFIG_BRIDGE - when defined,
+ * do not populate the dispatch table with its functions
+ */
+#define InitREGCONFIGBridge() \
+ PVRSRV_OK
+
+#define DeinitREGCONFIGBridge() \
+ PVRSRV_OK
+
+#endif /* EXCLUDE_REGCONFIG_BRIDGE */
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxcmp_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxcmp_bridge.c
new file mode 100644
index 00000000000000..1986aa69541ccf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxcmp_bridge.c
@@ -0,0 +1,1199 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxcmp
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxcmp
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxcompute.h"
+
+
+#include "common_rgxcmp_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATECOMPUTECONTEXT *psRGXCreateComputeContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateComputeContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateComputeContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateComputeContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, (const void __user *) psRGXCreateComputeContextIN->psFrameworkCmd, psRGXCreateComputeContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateComputeContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateComputeContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateComputeContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateComputeContextOUT->eError =
+ PVRSRVRGXCreateComputeContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateComputeContextIN->ui32Priority,
+ psRGXCreateComputeContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ psRGXCreateComputeContextIN->sResumeSignalAddr,
+ &psComputeContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateComputeContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateComputeContextOUT->hComputeContext,
+ (void *) psComputeContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyComputeContextKM);
+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateComputeContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateComputeContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateComputeContextOUT->eError != PVRSRV_OK)
+ {
+ if (psComputeContextInt)
+ {
+ PVRSRVRGXDestroyComputeContextKM(psComputeContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyComputeContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYCOMPUTECONTEXT *psRGXDestroyComputeContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXDestroyComputeContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyComputeContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyComputeContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyComputeContextIN->hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ if ((psRGXDestroyComputeContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyComputeContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyComputeContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyComputeContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyComputeContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyComputeContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickCDM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKCDM *psRGXKickCDMIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKCDM *psRGXKickCDMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXKickCDMIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickCDMIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickCDMIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, (const void __user *) psRGXKickCDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceOffsetInt, (const void __user *) psRGXKickCDMIN->pui32ClientFenceOffset, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, (const void __user *) psRGXKickCDMIN->pui32ClientFenceValue, psRGXKickCDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, (const void __user *) psRGXKickCDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateOffsetInt, (const void __user *) psRGXKickCDMIN->pui32ClientUpdateOffset, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, (const void __user *) psRGXKickCDMIN->pui32ClientUpdateValue, psRGXKickCDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, (const void __user *) psRGXKickCDMIN->pui32ServerSyncFlags, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, (const void __user *) psRGXKickCDMIN->phServerSyncs, psRGXKickCDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXKickCDMIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+ if (psRGXKickCDMIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, (const void __user *) psRGXKickCDMIN->psDMCmd, psRGXKickCDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickCDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickCDM_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickCDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickCDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickCDM_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickCDMOUT->eError =
+ PVRSRVRGXKickCDMKM(
+ psComputeContextInt,
+ psRGXKickCDMIN->ui32ClientCacheOpSeqNum,
+ psRGXKickCDMIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickCDMIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickCDMIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickCDMIN->hCheckFenceFd,
+ psRGXKickCDMIN->hUpdateTimeline,
+ &psRGXKickCDMOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickCDMIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickCDMIN->ui32PDumpFlags,
+ psRGXKickCDMIN->ui32ExtJobRef,
+ psRGXKickCDMIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickCDM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hClientFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientFenceUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClientUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ClientUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientUpdateUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickCDMIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXFlushComputeData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataIN,
+ PVRSRV_BRIDGE_OUT_RGXFLUSHCOMPUTEDATA *psRGXFlushComputeDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXFlushComputeDataIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXFlushComputeDataOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXFlushComputeData_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXFlushComputeDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXFlushComputeDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXFlushComputeData_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXFlushComputeDataOUT->eError =
+ PVRSRVRGXFlushComputeDataKM(
+ psComputeContextInt);
+
+
+
+
+RGXFlushComputeData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetComputeContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETCOMPUTECONTEXTPRIORITY *psRGXSetComputeContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXSetComputeContextPriorityIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXSetComputeContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetComputeContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetComputeContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetComputeContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetComputeContextPriorityOUT->eError =
+ PVRSRVRGXSetComputeContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psComputeContextInt,
+ psRGXSetComputeContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetComputeContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastComputeContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonIN,
+ PVRSRV_BRIDGE_OUT_RGXGETLASTCOMPUTECONTEXTRESETREASON *psRGXGetLastComputeContextResetReasonOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXGetLastComputeContextResetReasonIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXGetLastComputeContextResetReasonOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXGetLastComputeContextResetReason_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXGetLastComputeContextResetReasonOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXGetLastComputeContextResetReasonOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetLastComputeContextResetReason_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetLastComputeContextResetReasonOUT->eError =
+ PVRSRVRGXGetLastComputeContextResetReasonKM(
+ psComputeContextInt,
+ &psRGXGetLastComputeContextResetReasonOUT->ui32LastResetReason,
+ &psRGXGetLastComputeContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastComputeContextResetReason_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE *psRGXNotifyComputeWriteOffsetUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hComputeContext = psRGXNotifyComputeWriteOffsetUpdateIN->hComputeContext;
+ RGX_SERVER_COMPUTE_CONTEXT * psComputeContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_COMPUTE_BIT_MASK))
+ {
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psComputeContextInt,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT,
+ IMG_TRUE);
+ if(psRGXNotifyComputeWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXNotifyComputeWriteOffsetUpdate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXNotifyComputeWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXNotifyComputeWriteOffsetUpdateKM(
+ psComputeContextInt);
+
+
+
+
+RGXNotifyComputeWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psComputeContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hComputeContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_COMPUTE_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXCMPBridge(void);
+PVRSRV_ERROR DeinitRGXCMPBridge(void);
+
+/*
+ * Register all RGXCMP functions with services
+ */
+PVRSRV_ERROR InitRGXCMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT, PVRSRVBridgeRGXCreateComputeContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT, PVRSRVBridgeRGXDestroyComputeContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM, PVRSRVBridgeRGXKickCDM,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA, PVRSRVBridgeRGXFlushComputeData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY, PVRSRVBridgeRGXSetComputeContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON, PVRSRVBridgeRGXGetLastComputeContextResetReason,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE, PVRSRVBridgeRGXNotifyComputeWriteOffsetUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxcmp functions with services
+ */
+PVRSRV_ERROR DeinitRGXCMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXCREATECOMPUTECONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXDESTROYCOMPUTECONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXKICKCDM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXFLUSHCOMPUTEDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXSETCOMPUTECONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXGETLASTCOMPUTECONTEXTRESETREASON);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXCMP, PVRSRV_BRIDGE_RGXCMP_RGXNOTIFYCOMPUTEWRITEOFFSETUPDATE);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxhwperf_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxhwperf_bridge.c
new file mode 100644
index 00000000000000..c9c7984ad5b56a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxhwperf_bridge.c
@@ -0,0 +1,477 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxhwperf
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxhwperf
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxhwperf.h"
+
+
+#include "common_rgxhwperf_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerf(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERF *psRGXCtrlHWPerfIN,
+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERF *psRGXCtrlHWPerfOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXCtrlHWPerfOUT->eError =
+ PVRSRVRGXCtrlHWPerfKM(psConnection, OSGetDevData(psConnection),
+ psRGXCtrlHWPerfIN->ui32StreamId,
+ psRGXCtrlHWPerfIN->bToggle,
+ psRGXCtrlHWPerfIN->ui64Mask);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigEnableHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCONFIGENABLEHWPERFCOUNTERS *psRGXConfigEnableHWPerfCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_HWPERF_CONFIG_CNTBLK *psBlockConfigsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigEnableHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigEnableHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXConfigEnableHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen != 0)
+ {
+ psBlockConfigsInt = (RGX_HWPERF_CONFIG_CNTBLK*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK);
+ }
+
+ /* Copy the data over */
+ if (psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psBlockConfigsInt, (const void __user *) psRGXConfigEnableHWPerfCountersIN->psBlockConfigs, psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen * sizeof(RGX_HWPERF_CONFIG_CNTBLK)) != PVRSRV_OK )
+ {
+ psRGXConfigEnableHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXConfigEnableHWPerfCounters_exit;
+ }
+ }
+
+
+ psRGXConfigEnableHWPerfCountersOUT->eError =
+ PVRSRVRGXConfigEnableHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXConfigEnableHWPerfCountersIN->ui32ArrayLen,
+ psBlockConfigsInt);
+
+
+
+
+RGXConfigEnableHWPerfCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCtrlHWPerfCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCTRLHWPERFCOUNTERS *psRGXCtrlHWPerfCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT16 *ui16BlockIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCtrlHWPerfCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCtrlHWPerfCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCtrlHWPerfCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen != 0)
+ {
+ ui16BlockIDsInt = (IMG_UINT16*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16);
+ }
+
+ /* Copy the data over */
+ if (psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui16BlockIDsInt, (const void __user *) psRGXCtrlHWPerfCountersIN->pui16BlockIDs, psRGXCtrlHWPerfCountersIN->ui32ArrayLen * sizeof(IMG_UINT16)) != PVRSRV_OK )
+ {
+ psRGXCtrlHWPerfCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCtrlHWPerfCounters_exit;
+ }
+ }
+
+
+ psRGXCtrlHWPerfCountersOUT->eError =
+ PVRSRVRGXCtrlHWPerfCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXCtrlHWPerfCountersIN->bEnable,
+ psRGXCtrlHWPerfCountersIN->ui32ArrayLen,
+ ui16BlockIDsInt);
+
+
+
+
+RGXCtrlHWPerfCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXConfigCustomCounters(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersIN,
+ PVRSRV_BRIDGE_OUT_RGXCONFIGCUSTOMCOUNTERS *psRGXConfigCustomCountersOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32CustomCounterIDsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXConfigCustomCountersIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXConfigCustomCountersIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXConfigCustomCounters_exit;
+ }
+ }
+ }
+
+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters != 0)
+ {
+ ui32CustomCounterIDsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32CustomCounterIDsInt, (const void __user *) psRGXConfigCustomCountersIN->pui32CustomCounterIDs, psRGXConfigCustomCountersIN->ui16NumCustomCounters * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXConfigCustomCountersOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXConfigCustomCounters_exit;
+ }
+ }
+
+
+ psRGXConfigCustomCountersOUT->eError =
+ PVRSRVRGXConfigCustomCountersKM(psConnection, OSGetDevData(psConnection),
+ psRGXConfigCustomCountersIN->ui16CustomBlockID,
+ psRGXConfigCustomCountersIN->ui16NumCustomCounters,
+ ui32CustomCounterIDsInt);
+
+
+
+
+RGXConfigCustomCounters_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsIN,
+ PVRSRV_BRIDGE_OUT_RGXGETHWPERFBVNCFEATUREFLAGS *psRGXGetHWPerfBvncFeatureFlagsOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXGetHWPerfBvncFeatureFlagsIN);
+
+
+
+
+
+ psRGXGetHWPerfBvncFeatureFlagsOUT->eError =
+ PVRSRVRGXGetHWPerfBvncFeatureFlagsKM(psConnection, OSGetDevData(psConnection),
+ &psRGXGetHWPerfBvncFeatureFlagsOUT->ui32FeatureFlags);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXHWPERFBridge(void);
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void);
+
+/*
+ * Register all RGXHWPERF functions with services
+ */
+PVRSRV_ERROR InitRGXHWPERFBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF, PVRSRVBridgeRGXCtrlHWPerf,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS, PVRSRVBridgeRGXConfigEnableHWPerfCounters,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS, PVRSRVBridgeRGXCtrlHWPerfCounters,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS, PVRSRVBridgeRGXConfigCustomCounters,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS, PVRSRVBridgeRGXGetHWPerfBvncFeatureFlags,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxhwperf functions with services
+ */
+PVRSRV_ERROR DeinitRGXHWPERFBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERF);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGENABLEHWPERFCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCTRLHWPERFCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXCONFIGCUSTOMCOUNTERS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXHWPERF, PVRSRV_BRIDGE_RGXHWPERF_RGXGETHWPERFBVNCFEATUREFLAGS);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxkicksync_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxkicksync_bridge.c
new file mode 100644
index 00000000000000..59c61803c636b4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxkicksync_bridge.c
@@ -0,0 +1,721 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxkicksync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxkicksync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxkicksync.h"
+
+
+#include "common_rgxkicksync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEKICKSYNCCONTEXT *psRGXCreateKickSyncContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXCreateKickSyncContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateKickSyncContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateKickSyncContextOUT->eError =
+ PVRSRVRGXCreateKickSyncContextKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ &psKickSyncContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateKickSyncContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateKickSyncContextOUT->hKickSyncContext,
+ (void *) psKickSyncContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyKickSyncContextKM);
+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateKickSyncContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateKickSyncContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateKickSyncContextOUT->eError != PVRSRV_OK)
+ {
+ if (psKickSyncContextInt)
+ {
+ PVRSRVRGXDestroyKickSyncContextKM(psKickSyncContextInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyKickSyncContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYKICKSYNCCONTEXT *psRGXDestroyKickSyncContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyKickSyncContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyKickSyncContextIN->hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ if ((psRGXDestroyKickSyncContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyKickSyncContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyKickSyncContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyKickSyncContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyKickSyncContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyKickSyncContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickSync(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKSYNC *psRGXKickSyncIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKSYNC *psRGXKickSyncOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hKickSyncContext = psRGXKickSyncIN->hKickSyncContext;
+ RGX_SERVER_KICKSYNC_CONTEXT * psKickSyncContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickSyncIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickSyncIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, (const void __user *) psRGXKickSyncIN->phFenceUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, (const void __user *) psRGXKickSyncIN->pui32FenceSyncOffset, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, (const void __user *) psRGXKickSyncIN->pui32FenceValue, psRGXKickSyncIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, (const void __user *) psRGXKickSyncIN->phUpdateUFOSyncPrimBlock, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, (const void __user *) psRGXKickSyncIN->pui32UpdateSyncOffset, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, (const void __user *) psRGXKickSyncIN->pui32UpdateValue, psRGXKickSyncIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, (const void __user *) psRGXKickSyncIN->pui32ServerSyncFlags, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+ if (psRGXKickSyncIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, (const void __user *) psRGXKickSyncIN->phServerSync, psRGXKickSyncIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXKickSyncIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickSyncOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickSync_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psKickSyncContextInt,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i],
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i],
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickSyncOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickSyncOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickSync_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickSyncOUT->eError =
+ PVRSRVRGXKickSyncKM(
+ psKickSyncContextInt,
+ psRGXKickSyncIN->ui32ClientCacheOpSeqNum,
+ psRGXKickSyncIN->ui32ClientFenceCount,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ psRGXKickSyncIN->ui32ClientUpdateCount,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ psRGXKickSyncIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXKickSyncIN->hCheckFenceFD,
+ psRGXKickSyncIN->hTimelineFenceFD,
+ &psRGXKickSyncOUT->hUpdateFenceFD,
+ uiUpdateFenceNameInt,
+ psRGXKickSyncIN->ui32ExtJobRef);
+
+
+
+
+RGXKickSync_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psKickSyncContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hKickSyncContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_KICKSYNC_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hFenceUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ClientUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hUpdateUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickSyncIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void);
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void);
+
+/*
+ * Register all RGXKICKSYNC functions with services
+ */
+PVRSRV_ERROR InitRGXKICKSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT, PVRSRVBridgeRGXCreateKickSyncContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT, PVRSRVBridgeRGXDestroyKickSyncContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC, PVRSRVBridgeRGXKickSync,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxkicksync functions with services
+ */
+PVRSRV_ERROR DeinitRGXKICKSYNCBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXCREATEKICKSYNCCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXDESTROYKICKSYNCCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXKICKSYNC, PVRSRV_BRIDGE_RGXKICKSYNC_RGXKICKSYNC);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxpdump_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxpdump_bridge.c
new file mode 100644
index 00000000000000..8dac96f198ac01
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxpdump_bridge.c
@@ -0,0 +1,172 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxpdump
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxpdump
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxpdump.h"
+
+
+#include "common_rgxpdump_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgePDumpTraceBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PDUMPTRACEBUFFER *psPDumpTraceBufferIN,
+ PVRSRV_BRIDGE_OUT_PDUMPTRACEBUFFER *psPDumpTraceBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPDumpTraceBufferOUT->eError =
+ PVRSRVPDumpTraceBufferKM(psConnection, OSGetDevData(psConnection),
+ psPDumpTraceBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgePDumpSignatureBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferIN,
+ PVRSRV_BRIDGE_OUT_PDUMPSIGNATUREBUFFER *psPDumpSignatureBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psPDumpSignatureBufferOUT->eError =
+ PVRSRVPDumpSignatureBufferKM(psConnection, OSGetDevData(psConnection),
+ psPDumpSignatureBufferIN->ui32PDumpFlags);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXPDUMPBridge(void);
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void);
+
+/*
+ * Register all RGXPDUMP functions with services
+ */
+PVRSRV_ERROR InitRGXPDUMPBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER, PVRSRVBridgePDumpTraceBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER, PVRSRVBridgePDumpSignatureBuffer,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxpdump functions with services
+ */
+PVRSRV_ERROR DeinitRGXPDUMPBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPTRACEBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXPDUMP, PVRSRV_BRIDGE_RGXPDUMP_PDUMPSIGNATUREBUFFER);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxray_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxray_bridge.c
new file mode 100644
index 00000000000000..93ba78ed38b56c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxray_bridge.c
@@ -0,0 +1,1915 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxray
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxray
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxray.h"
+#include "devicemem_server.h"
+
+
+#include "common_rgxray_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERPMFREELIST *psRGXCreateRPMFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRPMContext = psRGXCreateRPMFreeListIN->hRPMContext;
+ RGX_SERVER_RPM_CONTEXT * psRPMContextInt = NULL;
+ RGX_RPM_FREELIST * psCleanupCookieInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXCreateRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRPMFreeList_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateRPMFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRPMContextInt,
+ hRPMContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMFreeList_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRPMFreeListOUT->eError =
+ RGXCreateRPMFreeList(psConnection, OSGetDevData(psConnection),
+ psRPMContextInt,
+ psRGXCreateRPMFreeListIN->ui32InitFLPages,
+ psRGXCreateRPMFreeListIN->ui32GrowFLPages,
+ psRGXCreateRPMFreeListIN->sFreeListDevVAddr,
+ &psCleanupCookieInt,
+ &psRGXCreateRPMFreeListOUT->ui32HWFreeList,
+ psRGXCreateRPMFreeListIN->bIsExternal);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRPMFreeList_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRPMFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMFreeListOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRPMFreeList);
+ if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMFreeList_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRPMFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRPMContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRPMContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRPMFreeListOUT->eError != PVRSRV_OK)
+ {
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyRPMFreeList(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRPMFREELIST *psRGXDestroyRPMFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXDestroyRPMFreeListOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRPMFreeList_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRPMFreeListOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRPMFreeListIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RPM_FREELIST);
+ if ((psRGXDestroyRPMFreeListOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRPMFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRPMFreeList: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRPMFreeListOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyRPMFreeList_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRPMFreeList_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERPMCONTEXT *psRGXCreateRPMContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERPMCONTEXT *psRGXCreateRPMContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_SERVER_RPM_CONTEXT * psCleanupCookieInt = NULL;
+ IMG_HANDLE hSceneHeap = psRGXCreateRPMContextIN->hSceneHeap;
+ DEVMEMINT_HEAP * psSceneHeapInt = NULL;
+ IMG_HANDLE hRPMPageTableHeap = psRGXCreateRPMContextIN->hRPMPageTableHeap;
+ DEVMEMINT_HEAP * psRPMPageTableHeapInt = NULL;
+ DEVMEM_MEMDESC * psHWMemDescInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXCreateRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRPMContext_exit;
+ }
+ }
+
+
+
+ psRGXCreateRPMContextOUT->hCleanupCookie = NULL;
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateRPMContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSceneHeapInt,
+ hSceneHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateRPMContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRPMPageTableHeapInt,
+ hRPMPageTableHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP,
+ IMG_TRUE);
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRPMContextOUT->eError =
+ RGXCreateRPMContext(psConnection, OSGetDevData(psConnection),
+ &psCleanupCookieInt,
+ psRGXCreateRPMContextIN->ui32TotalRPMPages,
+ psRGXCreateRPMContextIN->ui32Log2DopplerPageSize,
+ psRGXCreateRPMContextIN->sSceneMemoryBaseAddr,
+ psRGXCreateRPMContextIN->sDopplerHeapBaseAddr,
+ psSceneHeapInt,
+ psRGXCreateRPMContextIN->sRPMPageTableBaseAddr,
+ psRPMPageTableHeapInt,
+ &psHWMemDescInt,
+ &psRGXCreateRPMContextOUT->ui32HWFrameData);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRPMContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRPMContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMContextOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRPMContext);
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateRPMContextOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRPMContextOUT->hHWMemDesc,
+ (void *) psHWMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateRPMContextOUT->hCleanupCookie);
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRPMContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRPMContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSceneHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSceneHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRPMPageTableHeapInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRPMPageTableHeap,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_HEAP);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRPMContextOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psRGXCreateRPMContextOUT->hCleanupCookie)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXCreateRPMContextOUT->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXCreateRPMContext: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psCleanupCookieInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyRPMContext(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRPMContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRPMCONTEXT *psRGXDestroyRPMContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXDestroyRPMContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRPMContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRPMContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRPMContextIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RPM_CONTEXT);
+ if ((psRGXDestroyRPMContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRPMContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRPMContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRPMContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyRPMContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRPMContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickRS(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKRS *psRGXKickRSIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKRS *psRGXKickRSOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXKickRSIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+ IMG_BYTE *psFCDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickRS_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickRSIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickRSIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, (const void __user *) psRGXKickRSIN->phClientFenceUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, (const void __user *) psRGXKickRSIN->pui32ClientFenceSyncOffset, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, (const void __user *) psRGXKickRSIN->pui32ClientFenceValue, psRGXKickRSIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, (const void __user *) psRGXKickRSIN->phClientUpdateUFOSyncPrimBlock, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, (const void __user *) psRGXKickRSIN->pui32ClientUpdateSyncOffset, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, (const void __user *) psRGXKickRSIN->pui32ClientUpdateValue, psRGXKickRSIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, (const void __user *) psRGXKickRSIN->pui32ServerSyncFlags, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, (const void __user *) psRGXKickRSIN->phServerSyncs, psRGXKickRSIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXKickRSIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, (const void __user *) psRGXKickRSIN->psDMCmd, psRGXKickRSIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+ if (psRGXKickRSIN->ui32FCCmdSize != 0)
+ {
+ psFCDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFCDMCmdInt, (const void __user *) psRGXKickRSIN->psFCDMCmd, psRGXKickRSIN->ui32FCCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickRSOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickRS_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickRSOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickRSOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickRS_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickRSOUT->eError =
+ PVRSRVRGXKickRSKM(
+ psRayContextInt,
+ psRGXKickRSIN->ui32ClientCacheOpSeqNum,
+ psRGXKickRSIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceSyncOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickRSIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateSyncOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickRSIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickRSIN->hCheckFenceFD,
+ psRGXKickRSIN->hUpdateTimeline,
+ &psRGXKickRSOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickRSIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickRSIN->ui32FCCmdSize,
+ psFCDMCmdInt,
+ psRGXKickRSIN->ui32FrameContext,
+ psRGXKickRSIN->ui32PDumpFlags,
+ psRGXKickRSIN->ui32ExtJobRef,
+ psRGXKickRSIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickRS_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hClientFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientFenceUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClientUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ClientUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientUpdateUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickRSIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickVRDM(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKVRDM *psRGXKickVRDMIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKVRDM *psRGXKickVRDMOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXKickVRDMIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncsInt = NULL;
+ IMG_HANDLE *hServerSyncsInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_BYTE *psDMCmdInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickVRDMIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickVRDMIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ psClientFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientFenceUFOSyncPrimBlockInt2, (const void __user *) psRGXKickVRDMIN->phClientFenceUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceSyncOffsetInt, (const void __user *) psRGXKickVRDMIN->pui32ClientFenceSyncOffset, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientFenceCount != 0)
+ {
+ ui32ClientFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceValueInt, (const void __user *) psRGXKickVRDMIN->pui32ClientFenceValue, psRGXKickVRDMIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ psClientUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientUpdateUFOSyncPrimBlockInt2, (const void __user *) psRGXKickVRDMIN->phClientUpdateUFOSyncPrimBlock, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateSyncOffsetInt, (const void __user *) psRGXKickVRDMIN->pui32ClientUpdateSyncOffset, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount != 0)
+ {
+ ui32ClientUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateValueInt, (const void __user *) psRGXKickVRDMIN->pui32ClientUpdateValue, psRGXKickVRDMIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, (const void __user *) psRGXKickVRDMIN->pui32ServerSyncFlags, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncsInt2, (const void __user *) psRGXKickVRDMIN->phServerSyncs, psRGXKickVRDMIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXKickVRDMIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+ if (psRGXKickVRDMIN->ui32CmdSize != 0)
+ {
+ psDMCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psDMCmdInt, (const void __user *) psRGXKickVRDMIN->psDMCmd, psRGXKickVRDMIN->ui32CmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickVRDMOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickVRDM_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientFenceUFOSyncPrimBlockInt[i],
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientUpdateUFOSyncPrimBlockInt[i],
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickVRDMOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncsInt[i],
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickVRDMOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickVRDM_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickVRDMOUT->eError =
+ PVRSRVRGXKickVRDMKM(
+ psRayContextInt,
+ psRGXKickVRDMIN->ui32ClientCacheOpSeqNum,
+ psRGXKickVRDMIN->ui32ClientFenceCount,
+ psClientFenceUFOSyncPrimBlockInt,
+ ui32ClientFenceSyncOffsetInt,
+ ui32ClientFenceValueInt,
+ psRGXKickVRDMIN->ui32ClientUpdateCount,
+ psClientUpdateUFOSyncPrimBlockInt,
+ ui32ClientUpdateSyncOffsetInt,
+ ui32ClientUpdateValueInt,
+ psRGXKickVRDMIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncsInt,
+ psRGXKickVRDMIN->hCheckFenceFD,
+ psRGXKickVRDMIN->hUpdateTimeline,
+ &psRGXKickVRDMOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickVRDMIN->ui32CmdSize,
+ psDMCmdInt,
+ psRGXKickVRDMIN->ui32PDumpFlags,
+ psRGXKickVRDMIN->ui32ExtJobRef,
+ psRGXKickVRDMIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickVRDM_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hClientFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientFenceUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClientUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ClientUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientUpdateUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickVRDMIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERAYCONTEXT *psRGXCreateRayContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERAYCONTEXT *psRGXCreateRayContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateRayContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXCreateRayContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRayContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRayContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRayContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRayContextIN->ui32FrameworkCmdSize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, (const void __user *) psRGXCreateRayContextIN->psFrameworkCmd, psRGXCreateRayContextIN->ui32FrameworkCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateRayContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRayContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRayContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRayContextOUT->eError =
+ PVRSRVRGXCreateRayContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRayContextIN->ui32Priority,
+ psRGXCreateRayContextIN->sVRMCallStackAddr,
+ psRGXCreateRayContextIN->ui32FrameworkCmdSize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psRayContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRayContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRayContextOUT->hRayContext,
+ (void *) psRayContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRayContextKM);
+ if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRayContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRayContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRayContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRayContextInt)
+ {
+ PVRSRVRGXDestroyRayContextKM(psRayContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRayContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRAYCONTEXT *psRGXDestroyRayContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXDestroyRayContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXDestroyRayContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRayContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRayContextIN->hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ if ((psRGXDestroyRayContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRayContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRayContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRayContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyRayContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRayContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRayContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETRAYCONTEXTPRIORITY *psRGXSetRayContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRayContext = psRGXSetRayContextPriorityIN->hRayContext;
+ RGX_SERVER_RAY_CONTEXT * psRayContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_RAY_TRACING_DEPRECATED_BIT_MASK))
+ {
+ psRGXSetRayContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXSetRayContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSetRayContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRayContextInt,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetRayContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetRayContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetRayContextPriorityOUT->eError =
+ PVRSRVRGXSetRayContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRayContextInt,
+ psRGXSetRayContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRayContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRayContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRayContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RAY_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXRAYBridge(void);
+PVRSRV_ERROR DeinitRGXRAYBridge(void);
+
+/*
+ * Register all RGXRAY functions with services
+ */
+PVRSRV_ERROR InitRGXRAYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST, PVRSRVBridgeRGXCreateRPMFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST, PVRSRVBridgeRGXDestroyRPMFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT, PVRSRVBridgeRGXCreateRPMContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT, PVRSRVBridgeRGXDestroyRPMContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRS, PVRSRVBridgeRGXKickRS,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM, PVRSRVBridgeRGXKickVRDM,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT, PVRSRVBridgeRGXCreateRayContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT, PVRSRVBridgeRGXDestroyRayContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY, PVRSRVBridgeRGXSetRayContextPriority,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxray functions with services
+ */
+PVRSRV_ERROR DeinitRGXRAYBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERPMCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRPMCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKRS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXKICKVRDM);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXCREATERAYCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXDESTROYRAYCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXRAY, PVRSRV_BRIDGE_RGXRAY_RGXSETRAYCONTEXTPRIORITY);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxsignals_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxsignals_bridge.c
new file mode 100644
index 00000000000000..2990f4d5a20e46
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxsignals_bridge.c
@@ -0,0 +1,193 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxsignals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxsignals
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxsignals.h"
+
+
+#include "common_rgxsignals_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXNotifySignalUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXNOTIFYSIGNALUPDATE *psRGXNotifySignalUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPrivData = psRGXNotifySignalUpdateIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_SIGNAL_SNOOPING_BIT_MASK))
+ {
+ psRGXNotifySignalUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXNotifySignalUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXNotifySignalUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXNotifySignalUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXNotifySignalUpdate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXNotifySignalUpdateOUT->eError =
+ PVRSRVRGXNotifySignalUpdateKM(psConnection, OSGetDevData(psConnection),
+ hPrivDataInt,
+ psRGXNotifySignalUpdateIN->sDevSignalAddress);
+
+
+
+
+RGXNotifySignalUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXSIGNALSBridge(void);
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void);
+
+/*
+ * Register all RGXSIGNALS functions with services
+ */
+PVRSRV_ERROR InitRGXSIGNALSBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE, PVRSRVBridgeRGXNotifySignalUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxsignals functions with services
+ */
+PVRSRV_ERROR DeinitRGXSIGNALSBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXSIGNALS, PVRSRV_BRIDGE_RGXSIGNALS_RGXNOTIFYSIGNALUPDATE);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxta3d_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxta3d_bridge.c
new file mode 100644
index 00000000000000..215c26a18c8e80
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxta3d_bridge.c
@@ -0,0 +1,2823 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxta3d
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxta3d
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxta3d.h"
+
+
+#include "common_rgxta3d_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEHWRTDATA *psRGXCreateHWRTDataIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEHWRTDATA *psRGXCreateHWRTDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_FREELIST * *psapsFreeListsInt = NULL;
+ IMG_HANDLE *hapsFreeListsInt2 = NULL;
+ RGX_RTDATA_CLEANUP_DATA * psCleanupCookieInt = NULL;
+ DEVMEM_MEMDESC * psRTACtlMemDescInt = NULL;
+ DEVMEM_MEMDESC * pssHWRTDataMemDescInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *)) +
+ (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+ psRGXCreateHWRTDataOUT->hCleanupCookie = NULL;
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateHWRTDataIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateHWRTDataIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+ }
+
+
+ {
+ psapsFreeListsInt = (RGX_FREELIST **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(RGX_FREELIST *);
+ hapsFreeListsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hapsFreeListsInt2, (const void __user *) psRGXCreateHWRTDataIN->phapsFreeLists, RGXFW_MAX_FREELISTS * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXCreateHWRTDataOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXCreateHWRTDataOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psapsFreeListsInt[i],
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateHWRTDataOUT->eError =
+ RGXCreateHWRTData(psConnection, OSGetDevData(psConnection),
+ psRGXCreateHWRTDataIN->ui32RenderTarget,
+ psRGXCreateHWRTDataIN->sPMMlistDevVAddr,
+ psapsFreeListsInt,
+ &psCleanupCookieInt,
+ &psRTACtlMemDescInt,
+ psRGXCreateHWRTDataIN->ui32PPPScreen,
+ psRGXCreateHWRTDataIN->ui32PPPGridOffset,
+ psRGXCreateHWRTDataIN->ui64PPPMultiSampleCtl,
+ psRGXCreateHWRTDataIN->ui32TPCStride,
+ psRGXCreateHWRTDataIN->sTailPtrsDevVAddr,
+ psRGXCreateHWRTDataIN->ui32TPCSize,
+ psRGXCreateHWRTDataIN->ui32TEScreen,
+ psRGXCreateHWRTDataIN->ui32TEAA,
+ psRGXCreateHWRTDataIN->ui32TEMTILE1,
+ psRGXCreateHWRTDataIN->ui32TEMTILE2,
+ psRGXCreateHWRTDataIN->ui32MTileStride,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeLowerY,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeUpperY,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleX,
+ psRGXCreateHWRTDataIN->ui32ui32ISPMergeScaleY,
+ psRGXCreateHWRTDataIN->ui16MaxRTs,
+ &pssHWRTDataMemDescInt,
+ &psRGXCreateHWRTDataOUT->ui32FWHWRTData);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateHWRTData_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyHWRTData);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hRTACtlMemDesc,
+ (void *) psRTACtlMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateHWRTDataOUT->hCleanupCookie);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+
+
+
+
+
+ psRGXCreateHWRTDataOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateHWRTDataOUT->hsHWRTDataMemDesc,
+ (void *) pssHWRTDataMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psRGXCreateHWRTDataOUT->hCleanupCookie);
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateHWRTData_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateHWRTData_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ if (hapsFreeListsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<RGXFW_MAX_FREELISTS;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hapsFreeListsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hapsFreeListsInt2[i],
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateHWRTDataOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psRGXCreateHWRTDataOUT->hCleanupCookie)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXCreateHWRTDataOUT->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXCreateHWRTData: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psCleanupCookieInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyHWRTData(psCleanupCookieInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyHWRTData(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYHWRTDATA *psRGXDestroyHWRTDataOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyHWRTDataOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyHWRTDataIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ if ((psRGXDestroyHWRTDataOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyHWRTDataOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyHWRTData: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyHWRTDataOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyHWRTData_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyHWRTData_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERENDERTARGET *psRGXCreateRenderTargetIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERTARGET *psRGXCreateRenderTargetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ RGX_RT_CLEANUP_DATA * pssRenderTargetMemDescInt = NULL;
+
+
+
+
+
+
+
+
+ psRGXCreateRenderTargetOUT->eError =
+ RGXCreateRenderTarget(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRenderTargetIN->spsVHeapTableDevVAddr,
+ &pssRenderTargetMemDescInt,
+ &psRGXCreateRenderTargetOUT->ui32sRenderTargetFWDevVAddr);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRenderTarget_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRenderTargetOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRenderTargetOUT->hsRenderTargetMemDesc,
+ (void *) pssRenderTargetMemDescInt,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyRenderTarget);
+ if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderTarget_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRenderTarget_exit:
+
+
+
+ if (psRGXCreateRenderTargetOUT->eError != PVRSRV_OK)
+ {
+ if (pssRenderTargetMemDescInt)
+ {
+ RGXDestroyRenderTarget(pssRenderTargetMemDescInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderTarget(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERTARGET *psRGXDestroyRenderTargetOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRenderTargetOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRenderTargetIN->hsRenderTargetMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_RENDERTARGET);
+ if ((psRGXDestroyRenderTargetOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRenderTargetOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRenderTarget: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRenderTargetOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyRenderTarget_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRenderTarget_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEZSBUFFER *psRGXCreateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEZSBUFFER *psRGXCreateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hReservation = psRGXCreateZSBufferIN->hReservation;
+ DEVMEMINT_RESERVATION * psReservationInt = NULL;
+ IMG_HANDLE hPMR = psRGXCreateZSBufferIN->hPMR;
+ PMR * psPMRInt = NULL;
+ RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psReservationInt,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION,
+ IMG_TRUE);
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRInt,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateZSBufferOUT->eError =
+ RGXCreateZSBufferKM(psConnection, OSGetDevData(psConnection),
+ psReservationInt,
+ psPMRInt,
+ psRGXCreateZSBufferIN->uiMapFlags,
+ &pssZSBufferKMInt,
+ &psRGXCreateZSBufferOUT->ui32sZSBufferFWDevVAddr);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateZSBufferOUT->hsZSBufferKM,
+ (void *) pssZSBufferKMInt,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyZSBufferKM);
+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psReservationInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hReservation,
+ PVRSRV_HANDLE_TYPE_DEVMEMINT_RESERVATION);
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssZSBufferKMInt)
+ {
+ RGXDestroyZSBufferKM(pssZSBufferKMInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYZSBUFFER *psRGXDestroyZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyZSBufferOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyZSBufferIN->hsZSBufferMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ if ((psRGXDestroyZSBufferOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyZSBuffer: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyZSBufferOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyZSBuffer_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXPopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXPOPULATEZSBUFFER *psRGXPopulateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsZSBufferKM = psRGXPopulateZSBufferIN->hsZSBufferKM;
+ RGX_ZSBUFFER_DATA * pssZSBufferKMInt = NULL;
+ RGX_POPULATION * pssPopulationInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXPopulateZSBufferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssZSBufferKMInt,
+ hsZSBufferKM,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXPopulateZSBuffer_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXPopulateZSBufferOUT->eError =
+ RGXPopulateZSBufferKM(
+ pssZSBufferKMInt,
+ &pssPopulationInt);
+ /* Exit early if bridged call fails */
+ if(psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXPopulateZSBufferOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXPopulateZSBufferOUT->hsPopulation,
+ (void *) pssPopulationInt,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXUnpopulateZSBufferKM);
+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXPopulateZSBuffer_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXPopulateZSBuffer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(pssZSBufferKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsZSBufferKM,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXPopulateZSBufferOUT->eError != PVRSRV_OK)
+ {
+ if (pssPopulationInt)
+ {
+ RGXUnpopulateZSBufferKM(pssPopulationInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXUnpopulateZSBuffer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferIN,
+ PVRSRV_BRIDGE_OUT_RGXUNPOPULATEZSBUFFER *psRGXUnpopulateZSBufferOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXUnpopulateZSBufferOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXUnpopulateZSBufferIN->hsPopulation,
+ PVRSRV_HANDLE_TYPE_RGX_POPULATION);
+ if ((psRGXUnpopulateZSBufferOUT->eError != PVRSRV_OK) &&
+ (psRGXUnpopulateZSBufferOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXUnpopulateZSBuffer: %s",
+ PVRSRVGetErrorStringKM(psRGXUnpopulateZSBufferOUT->eError)));
+ UnlockHandle();
+ goto RGXUnpopulateZSBuffer_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXUnpopulateZSBuffer_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATEFREELIST *psRGXCreateFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATEFREELIST *psRGXCreateFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hsGlobalFreeList = psRGXCreateFreeListIN->hsGlobalFreeList;
+ RGX_FREELIST * pssGlobalFreeListInt = NULL;
+ IMG_HANDLE hsFreeListPMR = psRGXCreateFreeListIN->hsFreeListPMR;
+ PMR * pssFreeListPMRInt = NULL;
+ RGX_FREELIST * psCleanupCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssGlobalFreeListInt,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ IMG_TRUE);
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateFreeListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pssFreeListPMRInt,
+ hsFreeListPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateFreeListOUT->eError =
+ RGXCreateFreeList(psConnection, OSGetDevData(psConnection),
+ psRGXCreateFreeListIN->ui32ui32MaxFLPages,
+ psRGXCreateFreeListIN->ui32ui32InitFLPages,
+ psRGXCreateFreeListIN->ui32ui32GrowFLPages,
+ psRGXCreateFreeListIN->ui32ui32GrowParamThreshold,
+ pssGlobalFreeListInt,
+ psRGXCreateFreeListIN->bbFreeListCheck,
+ psRGXCreateFreeListIN->spsFreeListDevVAddr,
+ pssFreeListPMRInt,
+ psRGXCreateFreeListIN->uiPMROffset,
+ &psCleanupCookieInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateFreeListOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateFreeListOUT->hCleanupCookie,
+ (void *) psCleanupCookieInt,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RGXDestroyFreeList);
+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateFreeList_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateFreeList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+ if (psRGXCreateFreeListIN->hsGlobalFreeList)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(pssGlobalFreeListInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsGlobalFreeList,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ }
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(pssFreeListPMRInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hsFreeListPMR,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateFreeListOUT->eError != PVRSRV_OK)
+ {
+ if (psCleanupCookieInt)
+ {
+ RGXDestroyFreeList(psCleanupCookieInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyFreeList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYFREELIST *psRGXDestroyFreeListIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYFREELIST *psRGXDestroyFreeListOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyFreeListOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyFreeListIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_FREELIST);
+ if ((psRGXDestroyFreeListOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyFreeListOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyFreeList: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyFreeListOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyFreeList_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyFreeList_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCreateRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATERENDERCONTEXT *psRGXCreateRenderContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateRenderContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateRenderContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateRenderContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, (const void __user *) psRGXCreateRenderContextIN->psFrameworkCmd, psRGXCreateRenderContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateRenderContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateRenderContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateRenderContextOUT->eError =
+ PVRSRVRGXCreateRenderContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateRenderContextIN->ui32Priority,
+ psRGXCreateRenderContextIN->sVDMCallStackAddr,
+ psRGXCreateRenderContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psRenderContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateRenderContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateRenderContextOUT->hRenderContext,
+ (void *) psRenderContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyRenderContextKM);
+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateRenderContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateRenderContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateRenderContextOUT->eError != PVRSRV_OK)
+ {
+ if (psRenderContextInt)
+ {
+ PVRSRVRGXDestroyRenderContextKM(psRenderContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyRenderContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYRENDERCONTEXT *psRGXDestroyRenderContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyRenderContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyRenderContextIN->hCleanupCookie,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ if ((psRGXDestroyRenderContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyRenderContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyRenderContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyRenderContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyRenderContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyRenderContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXKickTA3D(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXKICKTA3D *psRGXKickTA3DIN,
+ PVRSRV_BRIDGE_OUT_RGXKICKTA3D *psRGXKickTA3DOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXKickTA3DIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientTAFenceSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAFenceSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClientTAUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClientTAUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32ClientTAUpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerTASyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerTASyncsInt = NULL;
+ IMG_HANDLE *hServerTASyncsInt2 = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClient3DFenceSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClient3DFenceSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32Client3DFenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32Client3DFenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psClient3DUpdateSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hClient3DUpdateSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32Client3DUpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32Client3DUpdateValueInt = NULL;
+ IMG_UINT32 *ui32Server3DSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServer3DSyncsInt = NULL;
+ IMG_HANDLE *hServer3DSyncsInt2 = NULL;
+ IMG_HANDLE hPRFenceUFOSyncPrimBlock = psRGXKickTA3DIN->hPRFenceUFOSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK * psPRFenceUFOSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_CHAR *uiUpdateFenceName3DInt = NULL;
+ IMG_BYTE *psTACmdInt = NULL;
+ IMG_BYTE *ps3DPRCmdInt = NULL;
+ IMG_BYTE *ps3DCmdInt = NULL;
+ IMG_HANDLE hRTDataCleanup = psRGXKickTA3DIN->hRTDataCleanup;
+ RGX_RTDATA_CLEANUP_DATA * psRTDataCleanupInt = NULL;
+ IMG_HANDLE hZBuffer = psRGXKickTA3DIN->hZBuffer;
+ RGX_ZSBUFFER_DATA * psZBufferInt = NULL;
+ IMG_HANDLE hSBuffer = psRGXKickTA3DIN->hSBuffer;
+ RGX_ZSBUFFER_DATA * psSBufferInt = NULL;
+ IMG_HANDLE hMSAAScratchBuffer = psRGXKickTA3DIN->hMSAAScratchBuffer;
+ RGX_ZSBUFFER_DATA * psMSAAScratchBufferInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) +
+ (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) +
+ (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXKickTA3DIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXKickTA3DIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ psClientTAFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientTAFenceSyncPrimBlockInt2, (const void __user *) psRGXKickTA3DIN->phClientTAFenceSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAFenceSyncOffsetInt, (const void __user *) psRGXKickTA3DIN->pui32ClientTAFenceSyncOffset, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount != 0)
+ {
+ ui32ClientTAFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAFenceValueInt, (const void __user *) psRGXKickTA3DIN->pui32ClientTAFenceValue, psRGXKickTA3DIN->ui32ClientTAFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ psClientTAUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClientTAUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClientTAUpdateSyncPrimBlockInt2, (const void __user *) psRGXKickTA3DIN->phClientTAUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAUpdateSyncOffsetInt, (const void __user *) psRGXKickTA3DIN->pui32ClientTAUpdateSyncOffset, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount != 0)
+ {
+ ui32ClientTAUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientTAUpdateValueInt, (const void __user *) psRGXKickTA3DIN->pui32ClientTAUpdateValue, psRGXKickTA3DIN->ui32ClientTAUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+ {
+ ui32ServerTASyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerTASyncFlagsInt, (const void __user *) psRGXKickTA3DIN->pui32ServerTASyncFlags, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims != 0)
+ {
+ psServerTASyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerTASyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerTASyncsInt2, (const void __user *) psRGXKickTA3DIN->phServerTASyncs, psRGXKickTA3DIN->ui32ServerTASyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ psClient3DFenceSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClient3DFenceSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClient3DFenceSyncPrimBlockInt2, (const void __user *) psRGXKickTA3DIN->phClient3DFenceSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ ui32Client3DFenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DFenceSyncOffsetInt, (const void __user *) psRGXKickTA3DIN->pui32Client3DFenceSyncOffset, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount != 0)
+ {
+ ui32Client3DFenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DFenceValueInt, (const void __user *) psRGXKickTA3DIN->pui32Client3DFenceValue, psRGXKickTA3DIN->ui32Client3DFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ psClient3DUpdateSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hClient3DUpdateSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hClient3DUpdateSyncPrimBlockInt2, (const void __user *) psRGXKickTA3DIN->phClient3DUpdateSyncPrimBlock, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DUpdateSyncOffsetInt, (const void __user *) psRGXKickTA3DIN->pui32Client3DUpdateSyncOffset, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount != 0)
+ {
+ ui32Client3DUpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Client3DUpdateValueInt, (const void __user *) psRGXKickTA3DIN->pui32Client3DUpdateValue, psRGXKickTA3DIN->ui32Client3DUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+ {
+ ui32Server3DSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32Server3DSyncFlagsInt, (const void __user *) psRGXKickTA3DIN->pui32Server3DSyncFlags, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims != 0)
+ {
+ psServer3DSyncsInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServer3DSyncsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServer3DSyncsInt2, (const void __user *) psRGXKickTA3DIN->phServer3DSyncs, psRGXKickTA3DIN->ui32Server3DSyncPrims * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXKickTA3DIN->puiUpdateFenceName, PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceName3DInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceName3DInt, (const void __user *) psRGXKickTA3DIN->puiUpdateFenceName3D, PVRSRV_SYNC_NAME_LENGTH * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32TACmdSize != 0)
+ {
+ psTACmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psTACmdInt, (const void __user *) psRGXKickTA3DIN->psTACmd, psRGXKickTA3DIN->ui32TACmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui323DPRCmdSize != 0)
+ {
+ ps3DPRCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ps3DPRCmdInt, (const void __user *) psRGXKickTA3DIN->ps3DPRCmd, psRGXKickTA3DIN->ui323DPRCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui323DCmdSize != 0)
+ {
+ ps3DCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ps3DCmdInt, (const void __user *) psRGXKickTA3DIN->ps3DCmd, psRGXKickTA3DIN->ui323DCmdSize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, (const void __user *) psRGXKickTA3DIN->pui32SyncPMRFlags, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+ if (psRGXKickTA3DIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, (const void __user *) psRGXKickTA3DIN->phSyncPMRs, psRGXKickTA3DIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXKickTA3DOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientTAFenceSyncPrimBlockInt[i],
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClientTAUpdateSyncPrimBlockInt[i],
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerTASyncsInt[i],
+ hServerTASyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClient3DFenceSyncPrimBlockInt[i],
+ hClient3DFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psClient3DUpdateSyncPrimBlockInt[i],
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServer3DSyncsInt[i],
+ hServer3DSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPRFenceUFOSyncPrimBlockInt,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hRTDataCleanup)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRTDataCleanupInt,
+ hRTDataCleanup,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hZBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psZBufferInt,
+ hZBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hSBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSBufferInt,
+ hSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psMSAAScratchBufferInt,
+ hMSAAScratchBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXKickTA3DOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXKickTA3DOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXKickTA3D_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXKickTA3DOUT->eError =
+ PVRSRVRGXKickTA3DKM(
+ psRenderContextInt,
+ psRGXKickTA3DIN->ui32ClientCacheOpSeqNum,
+ psRGXKickTA3DIN->ui32ClientTAFenceCount,
+ psClientTAFenceSyncPrimBlockInt,
+ ui32ClientTAFenceSyncOffsetInt,
+ ui32ClientTAFenceValueInt,
+ psRGXKickTA3DIN->ui32ClientTAUpdateCount,
+ psClientTAUpdateSyncPrimBlockInt,
+ ui32ClientTAUpdateSyncOffsetInt,
+ ui32ClientTAUpdateValueInt,
+ psRGXKickTA3DIN->ui32ServerTASyncPrims,
+ ui32ServerTASyncFlagsInt,
+ psServerTASyncsInt,
+ psRGXKickTA3DIN->ui32Client3DFenceCount,
+ psClient3DFenceSyncPrimBlockInt,
+ ui32Client3DFenceSyncOffsetInt,
+ ui32Client3DFenceValueInt,
+ psRGXKickTA3DIN->ui32Client3DUpdateCount,
+ psClient3DUpdateSyncPrimBlockInt,
+ ui32Client3DUpdateSyncOffsetInt,
+ ui32Client3DUpdateValueInt,
+ psRGXKickTA3DIN->ui32Server3DSyncPrims,
+ ui32Server3DSyncFlagsInt,
+ psServer3DSyncsInt,
+ psPRFenceUFOSyncPrimBlockInt,
+ psRGXKickTA3DIN->ui32FRFenceUFOSyncOffset,
+ psRGXKickTA3DIN->ui32FRFenceValue,
+ psRGXKickTA3DIN->hCheckFence,
+ psRGXKickTA3DIN->hUpdateTimeline,
+ &psRGXKickTA3DOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXKickTA3DIN->hCheckFence3D,
+ psRGXKickTA3DIN->hUpdateTimeline3D,
+ &psRGXKickTA3DOUT->hUpdateFence3D,
+ uiUpdateFenceName3DInt,
+ psRGXKickTA3DIN->ui32TACmdSize,
+ psTACmdInt,
+ psRGXKickTA3DIN->ui323DPRCmdSize,
+ ps3DPRCmdInt,
+ psRGXKickTA3DIN->ui323DCmdSize,
+ ps3DCmdInt,
+ psRGXKickTA3DIN->ui32ExtJobRef,
+ psRGXKickTA3DIN->bbLastTAInScene,
+ psRGXKickTA3DIN->bbKickTA,
+ psRGXKickTA3DIN->bbKickPR,
+ psRGXKickTA3DIN->bbKick3D,
+ psRGXKickTA3DIN->bbAbort,
+ psRGXKickTA3DIN->ui32PDumpFlags,
+ psRTDataCleanupInt,
+ psZBufferInt,
+ psSBufferInt,
+ psMSAAScratchBufferInt,
+ psRGXKickTA3DIN->bbCommitRefCountsTA,
+ psRGXKickTA3DIN->bbCommitRefCounts3D,
+ &psRGXKickTA3DOUT->bbCommittedRefCountsTA,
+ &psRGXKickTA3DOUT->bbCommittedRefCounts3D,
+ psRGXKickTA3DIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt,
+ psRGXKickTA3DIN->ui32RenderTargetSize,
+ psRGXKickTA3DIN->ui32NumberOfDrawCalls,
+ psRGXKickTA3DIN->ui32NumberOfIndices,
+ psRGXKickTA3DIN->ui32NumberOfMRTs,
+ psRGXKickTA3DIN->ui64Deadline,
+ psRGXKickTA3DIN->ssRobustnessResetReason);
+
+
+
+
+RGXKickTA3D_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hClientTAFenceSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientTAFenceSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClientTAUpdateSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ClientTAUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClientTAUpdateSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClientTAUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerTASyncsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32ServerTASyncPrims;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerTASyncsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerTASyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClient3DFenceSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClient3DFenceSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClient3DFenceSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hClient3DUpdateSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Client3DUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hClient3DUpdateSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hClient3DUpdateSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServer3DSyncsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32Server3DSyncPrims;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServer3DSyncsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServer3DSyncsInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPRFenceUFOSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPRFenceUFOSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hRTDataCleanup)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(psRTDataCleanupInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRTDataCleanup,
+ PVRSRV_HANDLE_TYPE_RGX_RTDATA_CLEANUP);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hZBuffer)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(psZBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hZBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hSBuffer)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(psSBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+
+
+
+
+ if (psRGXKickTA3DIN->hMSAAScratchBuffer)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(psMSAAScratchBufferInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hMSAAScratchBuffer,
+ PVRSRV_HANDLE_TYPE_RGX_FWIF_ZSBUFFER);
+ }
+ }
+
+
+
+
+
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXKickTA3DIN->ui32SyncPMRCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hSyncPMRsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetRenderContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETRENDERCONTEXTPRIORITY *psRGXSetRenderContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXSetRenderContextPriorityIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetRenderContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetRenderContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetRenderContextPriorityOUT->eError =
+ PVRSRVRGXSetRenderContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psRenderContextInt,
+ psRGXSetRenderContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetRenderContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetLastRenderContextResetReason(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonIN,
+ PVRSRV_BRIDGE_OUT_RGXGETLASTRENDERCONTEXTRESETREASON *psRGXGetLastRenderContextResetReasonOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXGetLastRenderContextResetReasonIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXGetLastRenderContextResetReasonOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXGetLastRenderContextResetReasonOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetLastRenderContextResetReason_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetLastRenderContextResetReasonOUT->eError =
+ PVRSRVRGXGetLastRenderContextResetReasonKM(
+ psRenderContextInt,
+ &psRGXGetLastRenderContextResetReasonOUT->ui32LastResetReason,
+ &psRGXGetLastRenderContextResetReasonOUT->ui32LastResetJobRef);
+
+
+
+
+RGXGetLastRenderContextResetReason_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXGetPartialRenderCount(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountIN,
+ PVRSRV_BRIDGE_OUT_RGXGETPARTIALRENDERCOUNT *psRGXGetPartialRenderCountOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hHWRTDataMemDesc = psRGXGetPartialRenderCountIN->hHWRTDataMemDesc;
+ DEVMEM_MEMDESC * psHWRTDataMemDescInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXGetPartialRenderCountOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psHWRTDataMemDescInt,
+ hHWRTDataMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC,
+ IMG_TRUE);
+ if(psRGXGetPartialRenderCountOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXGetPartialRenderCount_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXGetPartialRenderCountOUT->eError =
+ PVRSRVRGXGetPartialRenderCountKM(
+ psHWRTDataMemDescInt,
+ &psRGXGetPartialRenderCountOUT->ui32NumPartialRenders);
+
+
+
+
+RGXGetPartialRenderCount_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psHWRTDataMemDescInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hHWRTDataMemDesc,
+ PVRSRV_HANDLE_TYPE_RGX_FW_MEMDESC);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXRenderContextStalled(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledIN,
+ PVRSRV_BRIDGE_OUT_RGXRENDERCONTEXTSTALLED *psRGXRenderContextStalledOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRenderContext = psRGXRenderContextStalledIN->hRenderContext;
+ RGX_SERVER_RENDER_CONTEXT * psRenderContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXRenderContextStalledOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRenderContextInt,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT,
+ IMG_TRUE);
+ if(psRGXRenderContextStalledOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXRenderContextStalled_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXRenderContextStalledOUT->eError =
+ RGXRenderContextStalledKM(
+ psRenderContextInt);
+
+
+
+
+RGXRenderContextStalled_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRenderContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRenderContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_RENDER_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTA3DBridge(void);
+PVRSRV_ERROR DeinitRGXTA3DBridge(void);
+
+/*
+ * Register all RGXTA3D functions with services
+ */
+PVRSRV_ERROR InitRGXTA3DBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA, PVRSRVBridgeRGXCreateHWRTData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA, PVRSRVBridgeRGXDestroyHWRTData,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET, PVRSRVBridgeRGXCreateRenderTarget,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET, PVRSRVBridgeRGXDestroyRenderTarget,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER, PVRSRVBridgeRGXCreateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER, PVRSRVBridgeRGXDestroyZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER, PVRSRVBridgeRGXPopulateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER, PVRSRVBridgeRGXUnpopulateZSBuffer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST, PVRSRVBridgeRGXCreateFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST, PVRSRVBridgeRGXDestroyFreeList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT, PVRSRVBridgeRGXCreateRenderContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT, PVRSRVBridgeRGXDestroyRenderContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D, PVRSRVBridgeRGXKickTA3D,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY, PVRSRVBridgeRGXSetRenderContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON, PVRSRVBridgeRGXGetLastRenderContextResetReason,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT, PVRSRVBridgeRGXGetPartialRenderCount,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED, PVRSRVBridgeRGXRenderContextStalled,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxta3d functions with services
+ */
+PVRSRV_ERROR DeinitRGXTA3DBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEHWRTDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYHWRTDATA);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERTARGET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERTARGET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXPOPULATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXUNPOPULATEZSBUFFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATEFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYFREELIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXCREATERENDERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXDESTROYRENDERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXKICKTA3D);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXSETRENDERCONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETLASTRENDERCONTEXTRESETREASON);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXGETPARTIALRENDERCOUNT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTA3D, PVRSRV_BRIDGE_RGXTA3D_RGXRENDERCONTEXTSTALLED);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxtq2_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxtq2_bridge.c
new file mode 100644
index 00000000000000..3ae0f550763b31
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxtq2_bridge.c
@@ -0,0 +1,1111 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxtq2
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtq2
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtdmtransfer.h"
+
+
+#include "common_rgxtq2_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+#include "rgx_bvnc_defs_km.h"
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXTDMCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMCREATETRANSFERCONTEXT *psRGXTDMCreateTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXTDMCreateTransferContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMCreateTransferContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMCreateTransferContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, (const void __user *) psRGXTDMCreateTransferContextIN->psFrameworkCmd, psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMCreateTransferContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMCreateTransferContextOUT->eError =
+ PVRSRVRGXTDMCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXTDMCreateTransferContextIN->ui32Priority,
+ psRGXTDMCreateTransferContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psTransferContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXTDMCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXTDMCreateTransferContextOUT->hTransferContext,
+ (void *) psTransferContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXTDMDestroyTransferContextKM);
+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMCreateTransferContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXTDMCreateTransferContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXTDMCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ if (psTransferContextInt)
+ {
+ PVRSRVRGXTDMDestroyTransferContextKM(psTransferContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMDESTROYTRANSFERCONTEXT *psRGXTDMDestroyTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMDestroyTransferContextOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+ }
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXTDMDestroyTransferContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXTDMDestroyTransferContextIN->hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ if ((psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+ (psRGXTDMDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXTDMDestroyTransferContext: %s",
+ PVRSRVGetErrorStringKM(psRGXTDMDestroyTransferContextOUT->eError)));
+ UnlockHandle();
+ goto RGXTDMDestroyTransferContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXTDMDestroyTransferContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMSUBMITTRANSFER *psRGXTDMSubmitTransferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMSubmitTransferIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * *psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE *hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 *ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_UINT8 *ui8FWCommandInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXTDMSubmitTransferIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXTDMSubmitTransferIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hFenceUFOSyncPrimBlockInt2, (const void __user *) psRGXTDMSubmitTransferIN->phFenceUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceSyncOffsetInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32FenceSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32FenceValue, psRGXTDMSubmitTransferIN->ui32ClientFenceCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hUpdateUFOSyncPrimBlockInt2, (const void __user *) psRGXTDMSubmitTransferIN->phUpdateUFOSyncPrimBlock, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateSyncOffsetInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateSyncOffsetInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32UpdateSyncOffset, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32UpdateValue, psRGXTDMSubmitTransferIN->ui32ClientUpdateCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerSyncFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncFlagsInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32ServerSyncFlags, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, (const void __user *) psRGXTDMSubmitTransferIN->phServerSync, psRGXTDMSubmitTransferIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXTDMSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32CommandSize != 0)
+ {
+ ui8FWCommandInt = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui8FWCommandInt, (const void __user *) psRGXTDMSubmitTransferIN->pui8FWCommand, psRGXTDMSubmitTransferIN->ui32CommandSize * sizeof(IMG_UINT8)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, (const void __user *) psRGXTDMSubmitTransferIN->pui32SyncPMRFlags, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, (const void __user *) psRGXTDMSubmitTransferIN->phSyncPMRs, psRGXTDMSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXTDMSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i],
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i],
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXTDMSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSubmitTransfer_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMSubmitTransferOUT->eError =
+ PVRSRVRGXTDMSubmitTransferKM(
+ psTransferContextInt,
+ psRGXTDMSubmitTransferIN->ui32PDumpFlags,
+ psRGXTDMSubmitTransferIN->ui32ClientCacheOpSeqNum,
+ psRGXTDMSubmitTransferIN->ui32ClientFenceCount,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ psRGXTDMSubmitTransferIN->ui32ClientUpdateCount,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ psRGXTDMSubmitTransferIN->ui32ServerSyncCount,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXTDMSubmitTransferIN->hCheckFenceFD,
+ psRGXTDMSubmitTransferIN->hUpdateTimeline,
+ &psRGXTDMSubmitTransferOUT->hUpdateFence,
+ uiUpdateFenceNameInt,
+ psRGXTDMSubmitTransferIN->ui32CommandSize,
+ ui8FWCommandInt,
+ psRGXTDMSubmitTransferIN->ui32ExternalJobReference,
+ psRGXTDMSubmitTransferIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt);
+
+
+
+
+RGXTDMSubmitTransfer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientFenceCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hFenceUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ClientUpdateCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hUpdateUFOSyncPrimBlockInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXTDMSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hSyncPMRsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMSETTRANSFERCONTEXTPRIORITY *psRGXTDMSetTransferContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMSetTransferContextPriorityIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMSetTransferContextPriorityOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMSetTransferContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMSetTransferContextPriorityOUT->eError =
+ PVRSRVRGXTDMSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psTransferContextInt,
+ psRGXTDMSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXTDMSetTransferContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateIN,
+ PVRSRV_BRIDGE_OUT_RGXTDMNOTIFYWRITEOFFSETUPDATE *psRGXTDMNotifyWriteOffsetUpdateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXTDMNotifyWriteOffsetUpdateIN->hTransferContext;
+ RGX_SERVER_TQ_TDM_CONTEXT * psTransferContextInt = NULL;
+
+
+ {
+ PVRSRV_DEVICE_NODE *psDeviceNode = OSGetDevData(psConnection);
+
+ /* Check that device supports the required feature */
+ if ((psDeviceNode->pfnCheckDeviceFeature) &&
+ !psDeviceNode->pfnCheckDeviceFeature(psDeviceNode, RGX_FEATURE_FASTRENDER_DM_BIT_MASK))
+ {
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError = PVRSRV_ERROR_NOT_SUPPORTED;
+
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ }
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT,
+ IMG_TRUE);
+ if(psRGXTDMNotifyWriteOffsetUpdateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXTDMNotifyWriteOffsetUpdate_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXTDMNotifyWriteOffsetUpdateOUT->eError =
+ PVRSRVRGXTDMNotifyWriteOffsetUpdateKM(
+ psTransferContextInt,
+ psRGXTDMNotifyWriteOffsetUpdateIN->ui32PDumpFlags);
+
+
+
+
+RGXTDMNotifyWriteOffsetUpdate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_TDM_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQ2Bridge(void);
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void);
+
+/*
+ * Register all RGXTQ2 functions with services
+ */
+PVRSRV_ERROR InitRGXTQ2Bridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT, PVRSRVBridgeRGXTDMCreateTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXTDMDestroyTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER, PVRSRVBridgeRGXTDMSubmitTransfer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXTDMSetTransferContextPriority,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE, PVRSRVBridgeRGXTDMNotifyWriteOffsetUpdate,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq2 functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQ2Bridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMCREATETRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMDESTROYTRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSUBMITTRANSFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMSETTRANSFERCONTEXTPRIORITY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ2, PVRSRV_BRIDGE_RGXTQ2_RGXTDMNOTIFYWRITEOFFSETUPDATE);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_rgxtq_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_rgxtq_bridge.c
new file mode 100644
index 00000000000000..1580aa8412488a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_rgxtq_bridge.c
@@ -0,0 +1,1418 @@
+/*******************************************************************************
+@File
+@Title Server bridge for rgxtq
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for rgxtq
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtransfer.h"
+
+
+#include "common_rgxtq_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXCreateTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXCREATETRANSFERCONTEXT *psRGXCreateTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_BYTE *psFrameworkCmdInt = NULL;
+ IMG_HANDLE hPrivData = psRGXCreateTransferContextIN->hPrivData;
+ IMG_HANDLE hPrivDataInt = NULL;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXCreateTransferContextIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXCreateTransferContextIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXCreateTransferContext_exit;
+ }
+ }
+ }
+
+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize != 0)
+ {
+ psFrameworkCmdInt = (IMG_BYTE*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE);
+ }
+
+ /* Copy the data over */
+ if (psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, psFrameworkCmdInt, (const void __user *) psRGXCreateTransferContextIN->psFrameworkCmd, psRGXCreateTransferContextIN->ui32FrameworkCmdize * sizeof(IMG_BYTE)) != PVRSRV_OK )
+ {
+ psRGXCreateTransferContextOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXCreateTransferContext_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXCreateTransferContextOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hPrivDataInt,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA,
+ IMG_TRUE);
+ if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateTransferContext_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXCreateTransferContextOUT->eError =
+ PVRSRVRGXCreateTransferContextKM(psConnection, OSGetDevData(psConnection),
+ psRGXCreateTransferContextIN->ui32Priority,
+ psRGXCreateTransferContextIN->ui32FrameworkCmdize,
+ psFrameworkCmdInt,
+ hPrivDataInt,
+ &psTransferContextInt);
+ /* Exit early if bridged call fails */
+ if(psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ goto RGXCreateTransferContext_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRGXCreateTransferContextOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRGXCreateTransferContextOUT->hTransferContext,
+ (void *) psTransferContextInt,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVRGXDestroyTransferContextKM);
+ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXCreateTransferContext_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RGXCreateTransferContext_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hPrivDataInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPrivData,
+ PVRSRV_HANDLE_TYPE_DEV_PRIV_DATA);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRGXCreateTransferContextOUT->eError != PVRSRV_OK)
+ {
+ if (psTransferContextInt)
+ {
+ PVRSRVRGXDestroyTransferContextKM(psTransferContextInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXDestroyTransferContext(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextIN,
+ PVRSRV_BRIDGE_OUT_RGXDESTROYTRANSFERCONTEXT *psRGXDestroyTransferContextOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRGXDestroyTransferContextOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRGXDestroyTransferContextIN->hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ if ((psRGXDestroyTransferContextOUT->eError != PVRSRV_OK) &&
+ (psRGXDestroyTransferContextOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRGXDestroyTransferContext: %s",
+ PVRSRVGetErrorStringKM(psRGXDestroyTransferContextOUT->eError)));
+ UnlockHandle();
+ goto RGXDestroyTransferContext_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RGXDestroyTransferContext_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSubmitTransfer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSUBMITTRANSFER *psRGXSubmitTransferIN,
+ PVRSRV_BRIDGE_OUT_RGXSUBMITTRANSFER *psRGXSubmitTransferOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXSubmitTransferIN->hTransferContext;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+ IMG_UINT32 *ui32ClientFenceCountInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * **psFenceUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE **hFenceUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 **ui32FenceSyncOffsetInt = NULL;
+ IMG_UINT32 **ui32FenceValueInt = NULL;
+ IMG_UINT32 *ui32ClientUpdateCountInt = NULL;
+ SYNC_PRIMITIVE_BLOCK * **psUpdateUFOSyncPrimBlockInt = NULL;
+ IMG_HANDLE **hUpdateUFOSyncPrimBlockInt2 = NULL;
+ IMG_UINT32 **ui32UpdateSyncOffsetInt = NULL;
+ IMG_UINT32 **ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerSyncCountInt = NULL;
+ IMG_UINT32 **ui32ServerSyncFlagsInt = NULL;
+ SERVER_SYNC_PRIMITIVE * **psServerSyncInt = NULL;
+ IMG_HANDLE **hServerSyncInt2 = NULL;
+ IMG_CHAR *uiUpdateFenceNameInt = NULL;
+ IMG_UINT32 *ui32CommandSizeInt = NULL;
+ IMG_UINT8 **ui8FWCommandInt = NULL;
+ IMG_UINT32 *ui32TQPrepareFlagsInt = NULL;
+ IMG_UINT32 *ui32SyncPMRFlagsInt = NULL;
+ PMR * *psSyncPMRsInt = NULL;
+ IMG_HANDLE *hSyncPMRsInt2 = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+ IMG_BYTE *pArrayArgsBuffer2 = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (32 * sizeof(IMG_CHAR)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *)) +
+ (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) +
+ 0;
+ IMG_UINT32 ui32BufferSize2 = 0;
+ IMG_UINT32 ui32NextOffset2 = 0;
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE **);
+ ui32BufferSize += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+ }
+
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRGXSubmitTransferIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRGXSubmitTransferIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ClientFenceCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientFenceCountInt, (const void __user *) psRGXSubmitTransferIN->pui32ClientFenceCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+ psFenceUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ /* Assigning hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+ hFenceUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32FenceSyncOffsetInt to the right offset in the pool buffer for first dimension */
+ ui32FenceSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32FenceValueInt to the right offset in the pool buffer for first dimension */
+ ui32FenceValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ClientUpdateCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ClientUpdateCountInt, (const void __user *) psRGXSubmitTransferIN->pui32ClientUpdateCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer for first dimension */
+ psUpdateUFOSyncPrimBlockInt = (SYNC_PRIMITIVE_BLOCK ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SYNC_PRIMITIVE_BLOCK **);
+ /* Assigning hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer for first dimension */
+ hUpdateUFOSyncPrimBlockInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32UpdateSyncOffsetInt to the right offset in the pool buffer for first dimension */
+ ui32UpdateSyncOffsetInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32UpdateValueInt to the right offset in the pool buffer for first dimension */
+ ui32UpdateValueInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32ServerSyncCountInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerSyncCountInt, (const void __user *) psRGXSubmitTransferIN->pui32ServerSyncCount, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui32ServerSyncFlagsInt to the right offset in the pool buffer for first dimension */
+ ui32ServerSyncFlagsInt = (IMG_UINT32**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning psServerSyncInt to the right offset in the pool buffer for first dimension */
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE ***)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(SERVER_SYNC_PRIMITIVE **);
+ /* Assigning hServerSyncInt2 to the right offset in the pool buffer for first dimension */
+ hServerSyncInt2 = (IMG_HANDLE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_HANDLE);
+ }
+
+
+ {
+ uiUpdateFenceNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += 32 * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (32 * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiUpdateFenceNameInt, (const void __user *) psRGXSubmitTransferIN->puiUpdateFenceName, 32 * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32CommandSizeInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32CommandSizeInt, (const void __user *) psRGXSubmitTransferIN->pui32CommandSize, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ /* Assigning ui8FWCommandInt to the right offset in the pool buffer for first dimension */
+ ui8FWCommandInt = (IMG_UINT8**)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT8*);
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ ui32TQPrepareFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32TQPrepareFlagsInt, (const void __user *) psRGXSubmitTransferIN->pui32TQPrepareFlags, psRGXSubmitTransferIN->ui32PrepareCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ ui32SyncPMRFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncPMRFlagsInt, (const void __user *) psRGXSubmitTransferIN->pui32SyncPMRFlags, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount != 0)
+ {
+ psSyncPMRsInt = (PMR **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(PMR *);
+ hSyncPMRsInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncPMRsInt2, (const void __user *) psRGXSubmitTransferIN->phSyncPMRs, psRGXSubmitTransferIN->ui32SyncPMRCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i = 0; i < psRGXSubmitTransferIN->ui32PrepareCount; i++)
+ {
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+ ui32BufferSize2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE *);
+ ui32BufferSize2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+ }
+ }
+
+ if (ui32BufferSize2 != 0)
+ {
+ pArrayArgsBuffer2 = OSAllocMemNoStats(ui32BufferSize2);
+
+ if(!pArrayArgsBuffer2)
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psFenceUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+ psFenceUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ /* Assigning each hFenceUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hFenceUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32FenceSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32FenceSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32FenceValueInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32FenceValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psUpdateUFOSyncPrimBlockInt to the right offset in the pool buffer (this is the second dimension) */
+ psUpdateUFOSyncPrimBlockInt[i] = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ /* Assigning each hUpdateUFOSyncPrimBlockInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hUpdateUFOSyncPrimBlockInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32UpdateSyncOffsetInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32UpdateSyncOffsetInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32UpdateValueInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32UpdateValueInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui32ServerSyncFlagsInt to the right offset in the pool buffer (this is the second dimension) */
+ ui32ServerSyncFlagsInt[i] = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each psServerSyncInt to the right offset in the pool buffer (this is the second dimension) */
+ psServerSyncInt[i] = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(SERVER_SYNC_PRIMITIVE *);
+ /* Assigning each hServerSyncInt2 to the right offset in the pool buffer (this is the second dimension) */
+ hServerSyncInt2[i] = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE);
+ }
+ }
+ if (psRGXSubmitTransferIN->ui32PrepareCount != 0)
+ {
+ IMG_UINT32 i;
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Assigning each ui8FWCommandInt to the right offset in the pool buffer (this is the second dimension) */
+ ui8FWCommandInt[i] = (IMG_UINT8*)(((IMG_UINT8 *)pArrayArgsBuffer2) + ui32NextOffset2);
+ ui32NextOffset2 += ui32CommandSizeInt[i] * sizeof(IMG_UINT8);
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->phFenceUFOSyncPrimBlock[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hFenceUFOSyncPrimBlockInt2[i]), (const void __user *) psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui32FenceSyncOffset[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32FenceSyncOffsetInt[i]), (const void __user *) psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui32FenceValue[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32FenceValueInt[i]), (const void __user *) psPtr, (ui32ClientFenceCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->phUpdateUFOSyncPrimBlock[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hUpdateUFOSyncPrimBlockInt2[i]), (const void __user *) psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui32UpdateSyncOffset[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32UpdateSyncOffsetInt[i]), (const void __user *) psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui32UpdateValue[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32UpdateValueInt[i]), (const void __user *) psPtr, (ui32ClientUpdateCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui32ServerSyncFlags[i],
+ sizeof(IMG_UINT32 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui32ServerSyncFlagsInt[i]), (const void __user *) psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_HANDLE **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->phServerSync[i],
+ sizeof(IMG_HANDLE **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (hServerSyncInt2[i]), (const void __user *) psPtr, (ui32ServerSyncCountInt[i] * sizeof(IMG_HANDLE))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+ {
+ IMG_UINT32 i;
+ IMG_UINT8 **psPtr;
+
+ /* Loop over all the pointers in the array copying the data into the kernel */
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ /* Copy the pointer over from the client side */
+ if ( OSCopyFromUser(NULL, &psPtr, (const void __user *) &psRGXSubmitTransferIN->pui8FWCommand[i],
+ sizeof(IMG_UINT8 **)) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+
+ /* Copy the data over */
+ if ((ui32CommandSizeInt[i] * sizeof(IMG_UINT8)) > 0)
+ {
+ if ( OSCopyFromUser(NULL, (ui8FWCommandInt[i]), (const void __user *) psPtr, (ui32CommandSizeInt[i] * sizeof(IMG_UINT8))) != PVRSRV_OK )
+ {
+ psRGXSubmitTransferOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientFenceCountInt[i];j++)
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psFenceUFOSyncPrimBlockInt[i][j],
+ hFenceUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psUpdateUFOSyncPrimBlockInt[i][j],
+ hUpdateUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ServerSyncCountInt[i];j++)
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i][j],
+ hServerSyncInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+ /* Look up the address from the handle */
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncPMRsInt[i],
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRGXSubmitTransferOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSubmitTransfer_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSubmitTransferOUT->eError =
+ PVRSRVRGXSubmitTransferKM(
+ psTransferContextInt,
+ psRGXSubmitTransferIN->ui32ClientCacheOpSeqNum,
+ psRGXSubmitTransferIN->ui32PrepareCount,
+ ui32ClientFenceCountInt,
+ psFenceUFOSyncPrimBlockInt,
+ ui32FenceSyncOffsetInt,
+ ui32FenceValueInt,
+ ui32ClientUpdateCountInt,
+ psUpdateUFOSyncPrimBlockInt,
+ ui32UpdateSyncOffsetInt,
+ ui32UpdateValueInt,
+ ui32ServerSyncCountInt,
+ ui32ServerSyncFlagsInt,
+ psServerSyncInt,
+ psRGXSubmitTransferIN->hCheckFenceFD,
+ psRGXSubmitTransferIN->h2DUpdateTimeline,
+ &psRGXSubmitTransferOUT->h2DUpdateFence,
+ psRGXSubmitTransferIN->h3DUpdateTimeline,
+ &psRGXSubmitTransferOUT->h3DUpdateFence,
+ uiUpdateFenceNameInt,
+ ui32CommandSizeInt,
+ ui8FWCommandInt,
+ ui32TQPrepareFlagsInt,
+ psRGXSubmitTransferIN->ui32ExtJobRef,
+ psRGXSubmitTransferIN->ui32SyncPMRCount,
+ ui32SyncPMRFlagsInt,
+ psSyncPMRsInt);
+
+
+
+
+RGXSubmitTransfer_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ }
+
+
+
+
+
+
+ if (hFenceUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientFenceCountInt[i];j++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hFenceUFOSyncPrimBlockInt2[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hFenceUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hUpdateUFOSyncPrimBlockInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ClientUpdateCountInt[i];j++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hUpdateUFOSyncPrimBlockInt2[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hUpdateUFOSyncPrimBlockInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32PrepareCount;i++)
+ {
+ IMG_UINT32 j;
+ for (j=0;j<ui32ServerSyncCountInt[i];j++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncInt2[i][j])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i][j],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hSyncPMRsInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psRGXSubmitTransferIN->ui32SyncPMRCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hSyncPMRsInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncPMRsInt2[i],
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize2 == ui32NextOffset2);
+
+ if(pArrayArgsBuffer2)
+ OSFreeMemNoStats(pArrayArgsBuffer2);
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXSetTransferContextPriority(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityIN,
+ PVRSRV_BRIDGE_OUT_RGXSETTRANSFERCONTEXTPRIORITY *psRGXSetTransferContextPriorityOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hTransferContext = psRGXSetTransferContextPriorityIN->hTransferContext;
+ RGX_SERVER_TQ_CONTEXT * psTransferContextInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRGXSetTransferContextPriorityOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psTransferContextInt,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT,
+ IMG_TRUE);
+ if(psRGXSetTransferContextPriorityOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RGXSetTransferContextPriority_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRGXSetTransferContextPriorityOUT->eError =
+ PVRSRVRGXSetTransferContextPriorityKM(psConnection, OSGetDevData(psConnection),
+ psTransferContextInt,
+ psRGXSetTransferContextPriorityIN->ui32Priority);
+
+
+
+
+RGXSetTransferContextPriority_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psTransferContextInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hTransferContext,
+ PVRSRV_HANDLE_TYPE_RGX_SERVER_TQ_CONTEXT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRGXTQBridge(void);
+PVRSRV_ERROR DeinitRGXTQBridge(void);
+
+/*
+ * Register all RGXTQ functions with services
+ */
+PVRSRV_ERROR InitRGXTQBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT, PVRSRVBridgeRGXCreateTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT, PVRSRVBridgeRGXDestroyTransferContext,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER, PVRSRVBridgeRGXSubmitTransfer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY, PVRSRVBridgeRGXSetTransferContextPriority,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all rgxtq functions with services
+ */
+PVRSRV_ERROR DeinitRGXTQBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXCREATETRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXDESTROYTRANSFERCONTEXT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSUBMITTRANSFER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RGXTQ, PVRSRV_BRIDGE_RGXTQ_RGXSETTRANSFERCONTEXTPRIORITY);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_ri_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_ri_bridge.c
new file mode 100644
index 00000000000000..0ea39cd536ce38
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_ri_bridge.c
@@ -0,0 +1,853 @@
+/*******************************************************************************
+@File
+@Title Server bridge for ri
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for ri
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "ri_server.h"
+
+
+#include "common_ri_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRY *psRIWritePMREntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRY *psRIWritePMREntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIWritePMREntryIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRIWritePMREntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIWritePMREntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWritePMREntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIWritePMREntryOUT->eError =
+ RIWritePMREntryKM(
+ psPMRHandleInt);
+
+
+
+
+RIWritePMREntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEMEMDESCENTRY *psRIWriteMEMDESCEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIWriteMEMDESCEntryIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteMEMDESCEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteMEMDESCEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextBInt, (const void __user *) psRIWriteMEMDESCEntryIN->puiTextB, psRIWriteMEMDESCEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRIWriteMEMDESCEntryOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteMEMDESCEntry_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIWriteMEMDESCEntryOUT->eError =
+ RIWriteMEMDESCEntryKM(
+ psPMRHandleInt,
+ psRIWriteMEMDESCEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteMEMDESCEntryIN->ui64Offset,
+ psRIWriteMEMDESCEntryIN->ui64Size,
+ psRIWriteMEMDESCEntryIN->bIsImport,
+ psRIWriteMEMDESCEntryIN->bIsSuballoc,
+ &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if(psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRIWriteMEMDESCEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRIWriteMEMDESCEntryOUT->hRIHandle,
+ (void *) psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RIWriteMEMDESCEntry_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psRIWriteMEMDESCEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWriteProcListEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEPROCLISTENTRY *psRIWriteProcListEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiTextBInt = NULL;
+ RI_HANDLE psRIHandleInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psRIWriteProcListEntryIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psRIWriteProcListEntryIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto RIWriteProcListEntry_exit;
+ }
+ }
+ }
+
+ if (psRIWriteProcListEntryIN->ui32TextBSize != 0)
+ {
+ uiTextBInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiTextBInt, (const void __user *) psRIWriteProcListEntryIN->puiTextB, psRIWriteProcListEntryIN->ui32TextBSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psRIWriteProcListEntryOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto RIWriteProcListEntry_exit;
+ }
+ }
+
+
+ psRIWriteProcListEntryOUT->eError =
+ RIWriteProcListEntryKM(
+ psRIWriteProcListEntryIN->ui32TextBSize,
+ uiTextBInt,
+ psRIWriteProcListEntryIN->ui64Size,
+ psRIWriteProcListEntryIN->ui64DevVAddr,
+ &psRIHandleInt);
+ /* Exit early if bridged call fails */
+ if(psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psRIWriteProcListEntryOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psRIWriteProcListEntryOUT->hRIHandle,
+ (void *) psRIHandleInt,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&RIDeleteMEMDESCEntryKM);
+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWriteProcListEntry_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+RIWriteProcListEntry_exit:
+
+
+
+ if (psRIWriteProcListEntryOUT->eError != PVRSRV_OK)
+ {
+ if (psRIHandleInt)
+ {
+ RIDeleteMEMDESCEntryKM(psRIHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIUpdateMEMDESCAddr(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrIN,
+ PVRSRV_BRIDGE_OUT_RIUPDATEMEMDESCADDR *psRIUpdateMEMDESCAddrOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hRIHandle = psRIUpdateMEMDESCAddrIN->hRIHandle;
+ RI_HANDLE psRIHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRIUpdateMEMDESCAddrOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psRIHandleInt,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE,
+ IMG_TRUE);
+ if(psRIUpdateMEMDESCAddrOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIUpdateMEMDESCAddr_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIUpdateMEMDESCAddrOUT->eError =
+ RIUpdateMEMDESCAddrKM(
+ psRIHandleInt,
+ psRIUpdateMEMDESCAddrIN->sAddr);
+
+
+
+
+RIUpdateMEMDESCAddr_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psRIHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDeleteMEMDESCEntry(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryIN,
+ PVRSRV_BRIDGE_OUT_RIDELETEMEMDESCENTRY *psRIDeleteMEMDESCEntryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psRIDeleteMEMDESCEntryOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psRIDeleteMEMDESCEntryIN->hRIHandle,
+ PVRSRV_HANDLE_TYPE_RI_HANDLE);
+ if ((psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_OK) &&
+ (psRIDeleteMEMDESCEntryOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeRIDeleteMEMDESCEntry: %s",
+ PVRSRVGetErrorStringKM(psRIDeleteMEMDESCEntryOUT->eError)));
+ UnlockHandle();
+ goto RIDeleteMEMDESCEntry_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+RIDeleteMEMDESCEntry_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpList(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPLIST *psRIDumpListIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPLIST *psRIDumpListOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIDumpListIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRIDumpListOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIDumpListOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIDumpList_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIDumpListOUT->eError =
+ RIDumpListKM(
+ psPMRHandleInt);
+
+
+
+
+RIDumpList_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpAll(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPALL *psRIDumpAllIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPALL *psRIDumpAllOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psRIDumpAllIN);
+
+
+
+
+
+ psRIDumpAllOUT->eError =
+ RIDumpAllKM(
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIDumpProcess(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIDUMPPROCESS *psRIDumpProcessIN,
+ PVRSRV_BRIDGE_OUT_RIDUMPPROCESS *psRIDumpProcessOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+
+
+
+
+ psRIDumpProcessOUT->eError =
+ RIDumpProcessKM(
+ psRIDumpProcessIN->ui32Pid);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRIWritePMREntryWithOwner(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerIN,
+ PVRSRV_BRIDGE_OUT_RIWRITEPMRENTRYWITHOWNER *psRIWritePMREntryWithOwnerOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hPMRHandle = psRIWritePMREntryWithOwnerIN->hPMRHandle;
+ PMR * psPMRHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psRIWritePMREntryWithOwnerOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psPMRHandleInt,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR,
+ IMG_TRUE);
+ if(psRIWritePMREntryWithOwnerOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto RIWritePMREntryWithOwner_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psRIWritePMREntryWithOwnerOUT->eError =
+ RIWritePMREntryWithOwnerKM(
+ psPMRHandleInt,
+ psRIWritePMREntryWithOwnerIN->ui32Owner);
+
+
+
+
+RIWritePMREntryWithOwner_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psPMRHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hPMRHandle,
+ PVRSRV_HANDLE_TYPE_PHYSMEM_PMR);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitRIBridge(void);
+PVRSRV_ERROR DeinitRIBridge(void);
+
+/*
+ * Register all RI functions with services
+ */
+PVRSRV_ERROR InitRIBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY, PVRSRVBridgeRIWritePMREntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY, PVRSRVBridgeRIWriteMEMDESCEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY, PVRSRVBridgeRIWriteProcListEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR, PVRSRVBridgeRIUpdateMEMDESCAddr,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY, PVRSRVBridgeRIDeleteMEMDESCEntry,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST, PVRSRVBridgeRIDumpList,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL, PVRSRVBridgeRIDumpAll,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS, PVRSRVBridgeRIDumpProcess,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER, PVRSRVBridgeRIWritePMREntryWithOwner,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all ri functions with services
+ */
+PVRSRV_ERROR DeinitRIBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEMEMDESCENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPROCLISTENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIUPDATEMEMDESCADDR);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDELETEMEMDESCENTRY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPLIST);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPALL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIDUMPPROCESS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_RI, PVRSRV_BRIDGE_RI_RIWRITEPMRENTRYWITHOWNER);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_srvcore_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_srvcore_bridge.c
new file mode 100644
index 00000000000000..92f595396dfb2f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_srvcore_bridge.c
@@ -0,0 +1,1098 @@
+/*******************************************************************************
+@File
+@Title Server bridge for srvcore
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for srvcore
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "srvcore.h"
+#include "info_page.h"
+
+
+#include "common_srvcore_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeConnect(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_CONNECT *psConnectIN,
+ PVRSRV_BRIDGE_OUT_CONNECT *psConnectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psConnectOUT->eError =
+ PVRSRVConnectKM(psConnection, OSGetDevData(psConnection),
+ psConnectIN->ui32Flags,
+ psConnectIN->ui32ClientBuildOptions,
+ psConnectIN->ui32ClientDDKVersion,
+ psConnectIN->ui32ClientDDKBuild,
+ &psConnectOUT->ui8KernelArch,
+ &psConnectOUT->ui32CapabilityFlags,
+ &psConnectOUT->ui32PVRBridges,
+ &psConnectOUT->ui32RGXBridges);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDisconnect(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DISCONNECT *psDisconnectIN,
+ PVRSRV_BRIDGE_OUT_DISCONNECT *psDisconnectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDisconnectIN);
+
+
+
+
+
+ psDisconnectOUT->eError =
+ PVRSRVDisconnectKM(
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAcquireGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectIN,
+ PVRSRV_BRIDGE_OUT_ACQUIREGLOBALEVENTOBJECT *psAcquireGlobalEventObjectOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hGlobalEventObjectInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psAcquireGlobalEventObjectIN);
+
+
+
+
+
+ psAcquireGlobalEventObjectOUT->eError =
+ PVRSRVAcquireGlobalEventObjectKM(
+ &hGlobalEventObjectInt);
+ /* Exit early if bridged call fails */
+ if(psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psAcquireGlobalEventObjectOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psAcquireGlobalEventObjectOUT->hGlobalEventObject,
+ (void *) hGlobalEventObjectInt,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVReleaseGlobalEventObjectKM);
+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AcquireGlobalEventObject_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+AcquireGlobalEventObject_exit:
+
+
+
+ if (psAcquireGlobalEventObjectOUT->eError != PVRSRV_OK)
+ {
+ if (hGlobalEventObjectInt)
+ {
+ PVRSRVReleaseGlobalEventObjectKM(hGlobalEventObjectInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeReleaseGlobalEventObject(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectIN,
+ PVRSRV_BRIDGE_OUT_RELEASEGLOBALEVENTOBJECT *psReleaseGlobalEventObjectOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psReleaseGlobalEventObjectOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psReleaseGlobalEventObjectIN->hGlobalEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ if ((psReleaseGlobalEventObjectOUT->eError != PVRSRV_OK) &&
+ (psReleaseGlobalEventObjectOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeReleaseGlobalEventObject: %s",
+ PVRSRVGetErrorStringKM(psReleaseGlobalEventObjectOUT->eError)));
+ UnlockHandle();
+ goto ReleaseGlobalEventObject_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+ReleaseGlobalEventObject_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectOpen(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTOPEN *psEventObjectOpenIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTOPEN *psEventObjectOpenOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hEventObject = psEventObjectOpenIN->hEventObject;
+ IMG_HANDLE hEventObjectInt = NULL;
+ IMG_HANDLE hOSEventInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psEventObjectOpenOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hEventObjectInt,
+ hEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT,
+ IMG_TRUE);
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectOpen_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectOpenOUT->eError =
+ OSEventObjectOpen(
+ hEventObjectInt,
+ &hOSEventInt);
+ /* Exit early if bridged call fails */
+ if(psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ goto EventObjectOpen_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psEventObjectOpenOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psEventObjectOpenOUT->hOSEvent,
+ (void *) hOSEventInt,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&OSEventObjectClose);
+ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectOpen_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+EventObjectOpen_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hEventObjectInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hEventObject,
+ PVRSRV_HANDLE_TYPE_SHARED_EVENT_OBJECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psEventObjectOpenOUT->eError != PVRSRV_OK)
+ {
+ if (hOSEventInt)
+ {
+ OSEventObjectClose(hOSEventInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWait(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAIT *psEventObjectWaitIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAIT *psEventObjectWaitOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hOSEventKM = psEventObjectWaitIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psEventObjectWaitOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ IMG_TRUE);
+ if(psEventObjectWaitOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectWait_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectWaitOUT->eError =
+ OSEventObjectWait(
+ hOSEventKMInt);
+
+
+
+
+EventObjectWait_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectClose(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTCLOSE *psEventObjectCloseIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTCLOSE *psEventObjectCloseOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psEventObjectCloseOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psEventObjectCloseIN->hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ if ((psEventObjectCloseOUT->eError != PVRSRV_OK) &&
+ (psEventObjectCloseOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeEventObjectClose: %s",
+ PVRSRVGetErrorStringKM(psEventObjectCloseOUT->eError)));
+ UnlockHandle();
+ goto EventObjectClose_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+EventObjectClose_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeDumpDebugInfo(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_DUMPDEBUGINFO *psDumpDebugInfoIN,
+ PVRSRV_BRIDGE_OUT_DUMPDEBUGINFO *psDumpDebugInfoOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psDumpDebugInfoOUT->eError =
+ PVRSRVDumpDebugInfoKM(psConnection, OSGetDevData(psConnection),
+ psDumpDebugInfoIN->ui32ui32VerbLevel);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetDevClockSpeed(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_GETDEVCLOCKSPEED *psGetDevClockSpeedIN,
+ PVRSRV_BRIDGE_OUT_GETDEVCLOCKSPEED *psGetDevClockSpeedOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psGetDevClockSpeedIN);
+
+
+
+
+
+ psGetDevClockSpeedOUT->eError =
+ PVRSRVGetDevClockSpeedKM(psConnection, OSGetDevData(psConnection),
+ &psGetDevClockSpeedOUT->ui32ui32ClockSpeed);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeHWOpTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_HWOPTIMEOUT *psHWOpTimeoutIN,
+ PVRSRV_BRIDGE_OUT_HWOPTIMEOUT *psHWOpTimeoutOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psHWOpTimeoutIN);
+
+
+
+
+
+ psHWOpTimeoutOUT->eError =
+ PVRSRVHWOpTimeoutKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAlignmentCheck(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ALIGNMENTCHECK *psAlignmentCheckIN,
+ PVRSRV_BRIDGE_OUT_ALIGNMENTCHECK *psAlignmentCheckOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *ui32AlignChecksInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psAlignmentCheckIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psAlignmentCheckIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto AlignmentCheck_exit;
+ }
+ }
+ }
+
+ if (psAlignmentCheckIN->ui32AlignChecksSize != 0)
+ {
+ ui32AlignChecksInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32AlignChecksInt, (const void __user *) psAlignmentCheckIN->pui32AlignChecks, psAlignmentCheckIN->ui32AlignChecksSize * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psAlignmentCheckOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto AlignmentCheck_exit;
+ }
+ }
+
+
+ psAlignmentCheckOUT->eError =
+ PVRSRVAlignmentCheckKM(psConnection, OSGetDevData(psConnection),
+ psAlignmentCheckIN->ui32AlignChecksSize,
+ ui32AlignChecksInt);
+
+
+
+
+AlignmentCheck_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeGetDeviceStatus(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_GETDEVICESTATUS *psGetDeviceStatusIN,
+ PVRSRV_BRIDGE_OUT_GETDEVICESTATUS *psGetDeviceStatusOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psGetDeviceStatusIN);
+
+
+
+
+
+ psGetDeviceStatusOUT->eError =
+ PVRSRVGetDeviceStatusKM(psConnection, OSGetDevData(psConnection),
+ &psGetDeviceStatusOUT->ui32DeviceSatus);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeEventObjectWaitTimeout(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutIN,
+ PVRSRV_BRIDGE_OUT_EVENTOBJECTWAITTIMEOUT *psEventObjectWaitTimeoutOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hOSEventKM = psEventObjectWaitTimeoutIN->hOSEventKM;
+ IMG_HANDLE hOSEventKMInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psEventObjectWaitTimeoutOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &hOSEventKMInt,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT,
+ IMG_TRUE);
+ if(psEventObjectWaitTimeoutOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto EventObjectWaitTimeout_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psEventObjectWaitTimeoutOUT->eError =
+ OSEventObjectWaitTimeout(
+ hOSEventKMInt,
+ psEventObjectWaitTimeoutIN->ui64uiTimeoutus);
+
+
+
+
+EventObjectWaitTimeout_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(hOSEventKMInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hOSEventKM,
+ PVRSRV_HANDLE_TYPE_EVENT_OBJECT_CONNECT);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeFindProcessMemStats(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_FINDPROCESSMEMSTATS *psFindProcessMemStatsIN,
+ PVRSRV_BRIDGE_OUT_FINDPROCESSMEMSTATS *psFindProcessMemStatsOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_UINT32 *pui32MemStatsArrayInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) +
+ 0;
+
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psFindProcessMemStatsOUT->pui32MemStatsArray = psFindProcessMemStatsIN->pui32MemStatsArray;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psFindProcessMemStatsIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psFindProcessMemStatsIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto FindProcessMemStats_exit;
+ }
+ }
+ }
+
+ if (psFindProcessMemStatsIN->ui32ArrSize != 0)
+ {
+ pui32MemStatsArrayInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32);
+ }
+
+
+
+ psFindProcessMemStatsOUT->eError =
+ PVRSRVFindProcessMemStatsKM(
+ psFindProcessMemStatsIN->ui32PID,
+ psFindProcessMemStatsIN->ui32ArrSize,
+ psFindProcessMemStatsIN->bbAllProcessStats,
+ pui32MemStatsArrayInt);
+
+
+
+ if ((psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psFindProcessMemStatsOUT->pui32MemStatsArray, pui32MemStatsArrayInt,
+ (psFindProcessMemStatsIN->ui32ArrSize * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psFindProcessMemStatsOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto FindProcessMemStats_exit;
+ }
+ }
+
+
+FindProcessMemStats_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeAcquireInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ACQUIREINFOPAGE *psAcquireInfoPageIN,
+ PVRSRV_BRIDGE_OUT_ACQUIREINFOPAGE *psAcquireInfoPageOUT,
+ CONNECTION_DATA *psConnection)
+{
+ PMR * psPMRInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psAcquireInfoPageIN);
+
+
+
+
+
+ psAcquireInfoPageOUT->eError =
+ PVRSRVAcquireInfoPageKM(
+ &psPMRInt);
+ /* Exit early if bridged call fails */
+ if(psAcquireInfoPageOUT->eError != PVRSRV_OK)
+ {
+ goto AcquireInfoPage_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psAcquireInfoPageOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+
+ &psAcquireInfoPageOUT->hPMR,
+ (void *) psPMRInt,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVReleaseInfoPageKM);
+ if (psAcquireInfoPageOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AcquireInfoPage_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+AcquireInfoPage_exit:
+
+
+
+ if (psAcquireInfoPageOUT->eError != PVRSRV_OK)
+ {
+ if (psPMRInt)
+ {
+ PVRSRVReleaseInfoPageKM(psPMRInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeReleaseInfoPage(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RELEASEINFOPAGE *psReleaseInfoPageIN,
+ PVRSRV_BRIDGE_OUT_RELEASEINFOPAGE *psReleaseInfoPageOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psReleaseInfoPageOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psProcessHandleBase->psHandleBase,
+ (IMG_HANDLE) psReleaseInfoPageIN->hPMR,
+ PVRSRV_HANDLE_TYPE_DEVMEM_MEM_IMPORT);
+ if ((psReleaseInfoPageOUT->eError != PVRSRV_OK) &&
+ (psReleaseInfoPageOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeReleaseInfoPage: %s",
+ PVRSRVGetErrorStringKM(psReleaseInfoPageOUT->eError)));
+ UnlockHandle();
+ goto ReleaseInfoPage_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+ReleaseInfoPage_exit:
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSRVCOREBridge(void);
+PVRSRV_ERROR DeinitSRVCOREBridge(void);
+
+/*
+ * Register all SRVCORE functions with services
+ */
+PVRSRV_ERROR InitSRVCOREBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT, PVRSRVBridgeConnect,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT, PVRSRVBridgeDisconnect,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT, PVRSRVBridgeAcquireGlobalEventObject,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT, PVRSRVBridgeReleaseGlobalEventObject,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN, PVRSRVBridgeEventObjectOpen,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT, PVRSRVBridgeEventObjectWait,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE, PVRSRVBridgeEventObjectClose,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO, PVRSRVBridgeDumpDebugInfo,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED, PVRSRVBridgeGetDevClockSpeed,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT, PVRSRVBridgeHWOpTimeout,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK, PVRSRVBridgeAlignmentCheck,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS, PVRSRVBridgeGetDeviceStatus,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT, PVRSRVBridgeEventObjectWaitTimeout,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS, PVRSRVBridgeFindProcessMemStats,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE, PVRSRVBridgeAcquireInfoPage,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE, PVRSRVBridgeReleaseInfoPage,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all srvcore functions with services
+ */
+PVRSRV_ERROR DeinitSRVCOREBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_CONNECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DISCONNECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREGLOBALEVENTOBJECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEGLOBALEVENTOBJECT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTOPEN);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAIT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTCLOSE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_DUMPDEBUGINFO);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVCLOCKSPEED);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_HWOPTIMEOUT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ALIGNMENTCHECK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_GETDEVICESTATUS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_EVENTOBJECTWAITTIMEOUT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_FINDPROCESSMEMSTATS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_ACQUIREINFOPAGE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SRVCORE, PVRSRV_BRIDGE_SRVCORE_RELEASEINFOPAGE);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_sync_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_sync_bridge.c
new file mode 100644
index 00000000000000..901027dc38858f
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_sync_bridge.c
@@ -0,0 +1,2204 @@
+/*******************************************************************************
+@File
+@Title Server bridge for sync
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for sync
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+#include "pdump.h"
+#include "pvrsrv_sync_km.h"
+#include "sync_fallback_server.h"
+
+
+#include "common_sync_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeAllocSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockIN,
+ PVRSRV_BRIDGE_OUT_ALLOCSYNCPRIMITIVEBLOCK *psAllocSyncPrimitiveBlockOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+ PMR * pshSyncPMRInt = NULL;
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psAllocSyncPrimitiveBlockIN);
+
+
+ psAllocSyncPrimitiveBlockOUT->hSyncHandle = NULL;
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError =
+ PVRSRVAllocSyncPrimitiveBlockKM(psConnection, OSGetDevData(psConnection),
+ &psSyncHandleInt,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimVAddr,
+ &psAllocSyncPrimitiveBlockOUT->ui32SyncPrimBlockSize,
+ &pshSyncPMRInt);
+ /* Exit early if bridged call fails */
+ if(psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+ (void *) psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVFreeSyncPrimitiveBlockKM);
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+
+
+
+
+
+ psAllocSyncPrimitiveBlockOUT->eError = PVRSRVAllocSubHandleUnlocked(psConnection->psHandleBase,
+
+ &psAllocSyncPrimitiveBlockOUT->hhSyncPMR,
+ (void *) pshSyncPMRInt,
+ PVRSRV_HANDLE_TYPE_PMR_LOCAL_EXPORT_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,psAllocSyncPrimitiveBlockOUT->hSyncHandle);
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto AllocSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+AllocSyncPrimitiveBlock_exit:
+
+
+
+ if (psAllocSyncPrimitiveBlockOUT->eError != PVRSRV_OK)
+ {
+ /* Lock over handle creation cleanup. */
+ LockHandle();
+ if (psAllocSyncPrimitiveBlockOUT->hSyncHandle)
+ {
+
+
+ PVRSRV_ERROR eError = PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psAllocSyncPrimitiveBlockOUT->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if ((eError != PVRSRV_OK) && (eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeAllocSyncPrimitiveBlock: %s",
+ PVRSRVGetErrorStringKM(eError)));
+ }
+ /* Releasing the handle should free/destroy/release the resource.
+ * This should never fail... */
+ PVR_ASSERT((eError == PVRSRV_OK) || (eError == PVRSRV_ERROR_RETRY));
+
+ /* Avoid freeing/destroying/releasing the resource a second time below */
+ psSyncHandleInt = NULL;
+ }
+
+
+ /* Release now we have cleaned up creation handles. */
+ UnlockHandle();
+ if (psSyncHandleInt)
+ {
+ PVRSRVFreeSyncPrimitiveBlockKM(psSyncHandleInt);
+ }
+ }
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeFreeSyncPrimitiveBlock(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockIN,
+ PVRSRV_BRIDGE_OUT_FREESYNCPRIMITIVEBLOCK *psFreeSyncPrimitiveBlockOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psFreeSyncPrimitiveBlockOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psFreeSyncPrimitiveBlockIN->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ if ((psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_OK) &&
+ (psFreeSyncPrimitiveBlockOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeFreeSyncPrimitiveBlock: %s",
+ PVRSRVGetErrorStringKM(psFreeSyncPrimitiveBlockOUT->eError)));
+ UnlockHandle();
+ goto FreeSyncPrimitiveBlock_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+FreeSyncPrimitiveBlock_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMSET *psSyncPrimSetIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMSET *psSyncPrimSetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimSetIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimSetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimSet_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimSetOUT->eError =
+ PVRSRVSyncPrimSetKM(
+ psSyncHandleInt,
+ psSyncPrimSetIN->ui32Index,
+ psSyncPrimSetIN->ui32Value);
+
+
+
+
+SyncPrimSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncPrimSet(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCPRIMSET *psServerSyncPrimSetIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCPRIMSET *psServerSyncPrimSetOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psServerSyncPrimSetIN->hSyncHandle;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psServerSyncPrimSetOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncPrimSetOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncPrimSet_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncPrimSetOUT->eError =
+ PVRSRVServerSyncPrimSetKM(
+ psSyncHandleInt,
+ psServerSyncPrimSetIN->ui32Value);
+
+
+
+
+ServerSyncPrimSet_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncAlloc(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCALLOC *psServerSyncAllocIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCALLOC *psServerSyncAllocOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncAllocIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncAllocIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psServerSyncAllocOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ServerSyncAlloc_exit;
+ }
+ }
+ }
+
+ if (psServerSyncAllocIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, (const void __user *) psServerSyncAllocIN->puiClassName, psServerSyncAllocIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psServerSyncAllocOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncAlloc_exit;
+ }
+ }
+
+
+ psServerSyncAllocOUT->eError =
+ PVRSRVServerSyncAllocKM(psConnection, OSGetDevData(psConnection),
+ &psSyncHandleInt,
+ &psServerSyncAllocOUT->ui32SyncPrimVAddr,
+ psServerSyncAllocIN->ui32ClassNameSize,
+ uiClassNameInt);
+ /* Exit early if bridged call fails */
+ if(psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ goto ServerSyncAlloc_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psServerSyncAllocOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psServerSyncAllocOUT->hSyncHandle,
+ (void *) psSyncHandleInt,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVServerSyncFreeKM);
+ if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncAlloc_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+ServerSyncAlloc_exit:
+
+
+
+ if (psServerSyncAllocOUT->eError != PVRSRV_OK)
+ {
+ if (psSyncHandleInt)
+ {
+ PVRSRVServerSyncFreeKM(psSyncHandleInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncFree(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCFREE *psServerSyncFreeIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCFREE *psServerSyncFreeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psServerSyncFreeOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psServerSyncFreeIN->hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ if ((psServerSyncFreeOUT->eError != PVRSRV_OK) &&
+ (psServerSyncFreeOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeServerSyncFree: %s",
+ PVRSRVGetErrorStringKM(psServerSyncFreeOUT->eError)));
+ UnlockHandle();
+ goto ServerSyncFree_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+ServerSyncFree_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncQueueHWOp(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCQUEUEHWOP *psServerSyncQueueHWOpOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psServerSyncQueueHWOpIN->hSyncHandle;
+ SERVER_SYNC_PRIMITIVE * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psServerSyncQueueHWOpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncQueueHWOpOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncQueueHWOp_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncQueueHWOpOUT->eError =
+ PVRSRVServerSyncQueueHWOpKM(
+ psSyncHandleInt,
+ psServerSyncQueueHWOpIN->bbUpdate,
+ &psServerSyncQueueHWOpOUT->ui32FenceValue,
+ &psServerSyncQueueHWOpOUT->ui32UpdateValue);
+
+
+
+
+ServerSyncQueueHWOp_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeServerSyncGetStatus(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SERVERSYNCGETSTATUS *psServerSyncGetStatusIN,
+ PVRSRV_BRIDGE_OUT_SERVERSYNCGETSTATUS *psServerSyncGetStatusOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SERVER_SYNC_PRIMITIVE * *psSyncHandleInt = NULL;
+ IMG_HANDLE *hSyncHandleInt2 = NULL;
+ IMG_UINT32 *pui32UIDInt = NULL;
+ IMG_UINT32 *pui32FWAddrInt = NULL;
+ IMG_UINT32 *pui32CurrentOpInt = NULL;
+ IMG_UINT32 *pui32NextOpInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+ psServerSyncGetStatusOUT->pui32UID = psServerSyncGetStatusIN->pui32UID;
+ psServerSyncGetStatusOUT->pui32FWAddr = psServerSyncGetStatusIN->pui32FWAddr;
+ psServerSyncGetStatusOUT->pui32CurrentOp = psServerSyncGetStatusIN->pui32CurrentOp;
+ psServerSyncGetStatusOUT->pui32NextOp = psServerSyncGetStatusIN->pui32NextOp;
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psServerSyncGetStatusIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psServerSyncGetStatusIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ psSyncHandleInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hSyncHandleInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hSyncHandleInt2, (const void __user *) psServerSyncGetStatusIN->phSyncHandle, psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32UIDInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32FWAddrInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32CurrentOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+ if (psServerSyncGetStatusIN->ui32SyncCount != 0)
+ {
+ pui32NextOpInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32);
+ }
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psServerSyncGetStatusOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt[i],
+ hSyncHandleInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psServerSyncGetStatusOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psServerSyncGetStatusOUT->eError =
+ PVRSRVServerSyncGetStatusKM(
+ psServerSyncGetStatusIN->ui32SyncCount,
+ psSyncHandleInt,
+ pui32UIDInt,
+ pui32FWAddrInt,
+ pui32CurrentOpInt,
+ pui32NextOpInt);
+
+
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psServerSyncGetStatusOUT->pui32UID, pui32UIDInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psServerSyncGetStatusOUT->pui32FWAddr, pui32FWAddrInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psServerSyncGetStatusOUT->pui32CurrentOp, pui32CurrentOpInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+ if ((psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32)) > 0)
+ {
+ if ( OSCopyToUser(NULL, (void __user *) psServerSyncGetStatusOUT->pui32NextOp, pui32NextOpInt,
+ (psServerSyncGetStatusIN->ui32SyncCount * sizeof(IMG_UINT32))) != PVRSRV_OK )
+ {
+ psServerSyncGetStatusOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto ServerSyncGetStatus_exit;
+ }
+ }
+
+
+ServerSyncGetStatus_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ if (hSyncHandleInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psServerSyncGetStatusIN->ui32SyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hSyncHandleInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandleInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpCreate(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPCREATE *psSyncPrimOpCreateIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPCREATE *psSyncPrimOpCreateOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_PRIMITIVE_BLOCK * *psBlockListInt = NULL;
+ IMG_HANDLE *hBlockListInt2 = NULL;
+ IMG_UINT32 *ui32SyncBlockIndexInt = NULL;
+ IMG_UINT32 *ui32IndexInt = NULL;
+ SERVER_SYNC_PRIMITIVE * *psServerSyncInt = NULL;
+ IMG_HANDLE *hServerSyncInt2 = NULL;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *)) +
+ (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) +
+ (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *)) +
+ (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpCreateIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpCreateIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+
+ if (psSyncPrimOpCreateIN->ui32SyncBlockCount != 0)
+ {
+ psBlockListInt = (SYNC_PRIMITIVE_BLOCK **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(SYNC_PRIMITIVE_BLOCK *);
+ hBlockListInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hBlockListInt2, (const void __user *) psSyncPrimOpCreateIN->phBlockList, psSyncPrimOpCreateIN->ui32SyncBlockCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+ {
+ ui32SyncBlockIndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32SyncBlockIndexInt, (const void __user *) psSyncPrimOpCreateIN->pui32SyncBlockIndex, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount != 0)
+ {
+ ui32IndexInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32IndexInt, (const void __user *) psSyncPrimOpCreateIN->pui32Index, psSyncPrimOpCreateIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ if (psSyncPrimOpCreateIN->ui32ServerSyncCount != 0)
+ {
+ psServerSyncInt = (SERVER_SYNC_PRIMITIVE **)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(SERVER_SYNC_PRIMITIVE *);
+ hServerSyncInt2 = (IMG_HANDLE *)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE) > 0)
+ {
+ if ( OSCopyFromUser(NULL, hServerSyncInt2, (const void __user *) psSyncPrimOpCreateIN->phServerSync, psSyncPrimOpCreateIN->ui32ServerSyncCount * sizeof(IMG_HANDLE)) != PVRSRV_OK )
+ {
+ psSyncPrimOpCreateOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psBlockListInt[i],
+ hBlockListInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+
+
+
+
+
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+ {
+ /* Look up the address from the handle */
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerSyncInt[i],
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE,
+ IMG_TRUE);
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+ }
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpCreateOUT->eError =
+ PVRSRVSyncPrimOpCreateKM(
+ psSyncPrimOpCreateIN->ui32SyncBlockCount,
+ psBlockListInt,
+ psSyncPrimOpCreateIN->ui32ClientSyncCount,
+ ui32SyncBlockIndexInt,
+ ui32IndexInt,
+ psSyncPrimOpCreateIN->ui32ServerSyncCount,
+ psServerSyncInt,
+ &psServerCookieInt);
+ /* Exit early if bridged call fails */
+ if(psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ goto SyncPrimOpCreate_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psSyncPrimOpCreateOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psSyncPrimOpCreateOUT->hServerCookie,
+ (void *) psServerCookieInt,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ PVRSRV_HANDLE_ALLOC_FLAG_MULTI
+ ,(PFN_HANDLE_RELEASE)&PVRSRVSyncPrimOpDestroyKM);
+ if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpCreate_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+SyncPrimOpCreate_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ if (hBlockListInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32SyncBlockCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hBlockListInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hBlockListInt2[i],
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ }
+ }
+
+
+
+
+
+
+ if (hServerSyncInt2)
+ {
+ IMG_UINT32 i;
+
+ for (i=0;i<psSyncPrimOpCreateIN->ui32ServerSyncCount;i++)
+ {
+
+ /* Unreference the previously looked up handle */
+ if(hServerSyncInt2[i])
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerSyncInt2[i],
+ PVRSRV_HANDLE_TYPE_SERVER_SYNC_PRIMITIVE);
+ }
+ }
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psSyncPrimOpCreateOUT->eError != PVRSRV_OK)
+ {
+ if (psServerCookieInt)
+ {
+ PVRSRVSyncPrimOpDestroyKM(psServerCookieInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpTake(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPTAKE *psSyncPrimOpTakeIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPTAKE *psSyncPrimOpTakeOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpTakeIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+ IMG_UINT32 *ui32FlagsInt = NULL;
+ IMG_UINT32 *ui32FenceValueInt = NULL;
+ IMG_UINT32 *ui32UpdateValueInt = NULL;
+ IMG_UINT32 *ui32ServerFlagsInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) +
+ (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncPrimOpTakeIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncPrimOpTakeIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ }
+
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32FlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FlagsInt, (const void __user *) psSyncPrimOpTakeIN->pui32Flags, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32FenceValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32FenceValueInt, (const void __user *) psSyncPrimOpTakeIN->pui32FenceValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount != 0)
+ {
+ ui32UpdateValueInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32UpdateValueInt, (const void __user *) psSyncPrimOpTakeIN->pui32UpdateValue, psSyncPrimOpTakeIN->ui32ClientSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+ if (psSyncPrimOpTakeIN->ui32ServerSyncCount != 0)
+ {
+ ui32ServerFlagsInt = (IMG_UINT32*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32);
+ }
+
+ /* Copy the data over */
+ if (psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32) > 0)
+ {
+ if ( OSCopyFromUser(NULL, ui32ServerFlagsInt, (const void __user *) psSyncPrimOpTakeIN->pui32ServerFlags, psSyncPrimOpTakeIN->ui32ServerSyncCount * sizeof(IMG_UINT32)) != PVRSRV_OK )
+ {
+ psSyncPrimOpTakeOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncPrimOpTake_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimOpTakeOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpTakeOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpTake_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpTakeOUT->eError =
+ PVRSRVSyncPrimOpTakeKM(
+ psServerCookieInt,
+ psSyncPrimOpTakeIN->ui32ClientSyncCount,
+ ui32FlagsInt,
+ ui32FenceValueInt,
+ ui32UpdateValueInt,
+ psSyncPrimOpTakeIN->ui32ServerSyncCount,
+ ui32ServerFlagsInt);
+
+
+
+
+SyncPrimOpTake_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpReady(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPREADY *psSyncPrimOpReadyIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPREADY *psSyncPrimOpReadyOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpReadyIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimOpReadyOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpReadyOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpReady_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpReadyOUT->eError =
+ PVRSRVSyncPrimOpReadyKM(
+ psServerCookieInt,
+ &psSyncPrimOpReadyOUT->bReady);
+
+
+
+
+SyncPrimOpReady_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpComplete(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPCOMPLETE *psSyncPrimOpCompleteOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpCompleteIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimOpCompleteOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpCompleteOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpComplete_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpCompleteOUT->eError =
+ PVRSRVSyncPrimOpCompleteKM(
+ psServerCookieInt);
+
+
+
+
+SyncPrimOpComplete_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncPrimOpDestroy(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPDESTROY *psSyncPrimOpDestroyOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psSyncPrimOpDestroyOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncPrimOpDestroyIN->hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ if ((psSyncPrimOpDestroyOUT->eError != PVRSRV_OK) &&
+ (psSyncPrimOpDestroyOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeSyncPrimOpDestroy: %s",
+ PVRSRVGetErrorStringKM(psSyncPrimOpDestroyOUT->eError)));
+ UnlockHandle();
+ goto SyncPrimOpDestroy_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+SyncPrimOpDestroy_exit:
+
+
+
+
+ return 0;
+}
+
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDump(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMP *psSyncPrimPDumpIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMP *psSyncPrimPDumpOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDump_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpOUT->eError =
+ PVRSRVSyncPrimPDumpKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpIN->ui32Offset);
+
+
+
+
+SyncPrimPDump_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDump NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpValue(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPVALUE *psSyncPrimPDumpValueOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpValueIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpValueOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpValue_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpValueOUT->eError =
+ PVRSRVSyncPrimPDumpValueKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpValueIN->ui32Offset,
+ psSyncPrimPDumpValueIN->ui32Value);
+
+
+
+
+SyncPrimPDumpValue_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpValue NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPPOL *psSyncPrimPDumpPolOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpPolIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpPolOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpPol_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpPolOUT->eError =
+ PVRSRVSyncPrimPDumpPolKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpPolIN->ui32Offset,
+ psSyncPrimPDumpPolIN->ui32Value,
+ psSyncPrimPDumpPolIN->ui32Mask,
+ psSyncPrimPDumpPolIN->eOperator,
+ psSyncPrimPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimPDumpPol_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimOpPDumpPol(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMOPPDUMPPOL *psSyncPrimOpPDumpPolOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hServerCookie = psSyncPrimOpPDumpPolIN->hServerCookie;
+ SERVER_OP_COOKIE * psServerCookieInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimOpPDumpPolOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psServerCookieInt,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE,
+ IMG_TRUE);
+ if(psSyncPrimOpPDumpPolOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimOpPDumpPol_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimOpPDumpPolOUT->eError =
+ PVRSRVSyncPrimOpPDumpPolKM(
+ psServerCookieInt,
+ psSyncPrimOpPDumpPolIN->eOperator,
+ psSyncPrimOpPDumpPolIN->uiPDumpFlags);
+
+
+
+
+SyncPrimOpPDumpPol_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psServerCookieInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hServerCookie,
+ PVRSRV_HANDLE_TYPE_SERVER_OP_COOKIE);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimOpPDumpPol NULL
+#endif
+
+#if defined(PDUMP)
+static IMG_INT
+PVRSRVBridgeSyncPrimPDumpCBP(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPIN,
+ PVRSRV_BRIDGE_OUT_SYNCPRIMPDUMPCBP *psSyncPrimPDumpCBPOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_HANDLE hSyncHandle = psSyncPrimPDumpCBPIN->hSyncHandle;
+ SYNC_PRIMITIVE_BLOCK * psSyncHandleInt = NULL;
+
+
+
+
+
+
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &psSyncHandleInt,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncPrimPDumpCBPOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncPrimPDumpCBP_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncPrimPDumpCBPOUT->eError =
+ PVRSRVSyncPrimPDumpCBPKM(
+ psSyncHandleInt,
+ psSyncPrimPDumpCBPIN->ui32Offset,
+ psSyncPrimPDumpCBPIN->uiWriteOffset,
+ psSyncPrimPDumpCBPIN->uiPacketSize,
+ psSyncPrimPDumpCBPIN->uiBufferSize);
+
+
+
+
+SyncPrimPDumpCBP_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(psSyncHandleInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hSyncHandle,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+
+ return 0;
+}
+
+#else
+#define PVRSRVBridgeSyncPrimPDumpCBP NULL
+#endif
+
+static IMG_INT
+PVRSRVBridgeSyncAllocEvent(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCALLOCEVENT *psSyncAllocEventIN,
+ PVRSRV_BRIDGE_OUT_SYNCALLOCEVENT *psSyncAllocEventOUT,
+ CONNECTION_DATA *psConnection)
+{
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncAllocEventIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncAllocEventIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncAllocEvent_exit;
+ }
+ }
+ }
+
+ if (psSyncAllocEventIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, (const void __user *) psSyncAllocEventIN->puiClassName, psSyncAllocEventIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psSyncAllocEventOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncAllocEvent_exit;
+ }
+ }
+
+
+ psSyncAllocEventOUT->eError =
+ PVRSRVSyncAllocEventKM(psConnection, OSGetDevData(psConnection),
+ psSyncAllocEventIN->bServerSync,
+ psSyncAllocEventIN->ui32FWAddr,
+ psSyncAllocEventIN->ui32ClassNameSize,
+ uiClassNameInt);
+
+
+
+
+SyncAllocEvent_exit:
+
+
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncFreeEvent(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCFREEEVENT *psSyncFreeEventIN,
+ PVRSRV_BRIDGE_OUT_SYNCFREEEVENT *psSyncFreeEventOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psSyncFreeEventOUT->eError =
+ PVRSRVSyncFreeEventKM(psConnection, OSGetDevData(psConnection),
+ psSyncFreeEventIN->ui32FWAddr);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCBridge(void);
+PVRSRV_ERROR DeinitSYNCBridge(void);
+
+/*
+ * Register all SYNC functions with services
+ */
+PVRSRV_ERROR InitSYNCBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK, PVRSRVBridgeAllocSyncPrimitiveBlock,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK, PVRSRVBridgeFreeSyncPrimitiveBlock,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET, PVRSRVBridgeSyncPrimSet,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET, PVRSRVBridgeServerSyncPrimSet,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC, PVRSRVBridgeServerSyncAlloc,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE, PVRSRVBridgeServerSyncFree,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP, PVRSRVBridgeServerSyncQueueHWOp,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS, PVRSRVBridgeServerSyncGetStatus,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE, PVRSRVBridgeSyncPrimOpCreate,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE, PVRSRVBridgeSyncPrimOpTake,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY, PVRSRVBridgeSyncPrimOpReady,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE, PVRSRVBridgeSyncPrimOpComplete,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY, PVRSRVBridgeSyncPrimOpDestroy,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP, PVRSRVBridgeSyncPrimPDump,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE, PVRSRVBridgeSyncPrimPDumpValue,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL, PVRSRVBridgeSyncPrimPDumpPol,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL, PVRSRVBridgeSyncPrimOpPDumpPol,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP, PVRSRVBridgeSyncPrimPDumpCBP,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT, PVRSRVBridgeSyncAllocEvent,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT, PVRSRVBridgeSyncFreeEvent,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all sync functions with services
+ */
+PVRSRV_ERROR DeinitSYNCBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_ALLOCSYNCPRIMITIVEBLOCK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_FREESYNCPRIMITIVEBLOCK);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMSET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCPRIMSET);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCALLOC);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCFREE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCQUEUEHWOP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SERVERSYNCGETSTATUS);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCREATE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPTAKE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPREADY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPCOMPLETE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPDESTROY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPVALUE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPPOL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMOPPDUMPPOL);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCPRIMPDUMPCBP);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCALLOCEVENT);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNC, PVRSRV_BRIDGE_SYNC_SYNCFREEEVENT);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_synctracking_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_synctracking_bridge.c
new file mode 100644
index 00000000000000..02840c6a0e32fa
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_synctracking_bridge.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+@File
+@Title Server bridge for synctracking
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for synctracking
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "sync.h"
+#include "sync_server.h"
+
+
+#include "common_synctracking_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeSyncRecordRemoveByHandle(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleIN,
+ PVRSRV_BRIDGE_OUT_SYNCRECORDREMOVEBYHANDLE *psSyncRecordRemoveByHandleOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+
+ /* Lock over handle destruction. */
+ LockHandle();
+
+
+
+
+
+ psSyncRecordRemoveByHandleOUT->eError =
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ (IMG_HANDLE) psSyncRecordRemoveByHandleIN->hhRecord,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE);
+ if ((psSyncRecordRemoveByHandleOUT->eError != PVRSRV_OK) &&
+ (psSyncRecordRemoveByHandleOUT->eError != PVRSRV_ERROR_RETRY))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "PVRSRVBridgeSyncRecordRemoveByHandle: %s",
+ PVRSRVGetErrorStringKM(psSyncRecordRemoveByHandleOUT->eError)));
+ UnlockHandle();
+ goto SyncRecordRemoveByHandle_exit;
+ }
+
+ /* Release now we have destroyed handles. */
+ UnlockHandle();
+
+
+
+SyncRecordRemoveByHandle_exit:
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeSyncRecordAdd(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_SYNCRECORDADD *psSyncRecordAddIN,
+ PVRSRV_BRIDGE_OUT_SYNCRECORDADD *psSyncRecordAddOUT,
+ CONNECTION_DATA *psConnection)
+{
+ SYNC_RECORD_HANDLE pshRecordInt = NULL;
+ IMG_HANDLE hhServerSyncPrimBlock = psSyncRecordAddIN->hhServerSyncPrimBlock;
+ SYNC_PRIMITIVE_BLOCK * pshServerSyncPrimBlockInt = NULL;
+ IMG_CHAR *uiClassNameInt = NULL;
+
+ IMG_UINT32 ui32NextOffset = 0;
+ IMG_BYTE *pArrayArgsBuffer = NULL;
+#if !defined(INTEGRITY_OS)
+ IMG_BOOL bHaveEnoughSpace = IMG_FALSE;
+#endif
+
+ IMG_UINT32 ui32BufferSize =
+ (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) +
+ 0;
+
+
+
+
+
+ if (ui32BufferSize != 0)
+ {
+#if !defined(INTEGRITY_OS)
+ /* Try to use remainder of input buffer for copies if possible, word-aligned for safety. */
+ IMG_UINT32 ui32InBufferOffset = PVR_ALIGN(sizeof(*psSyncRecordAddIN), sizeof(unsigned long));
+ IMG_UINT32 ui32InBufferExcessSize = ui32InBufferOffset >= PVRSRV_MAX_BRIDGE_IN_SIZE ? 0 :
+ PVRSRV_MAX_BRIDGE_IN_SIZE - ui32InBufferOffset;
+
+ bHaveEnoughSpace = ui32BufferSize <= ui32InBufferExcessSize;
+ if (bHaveEnoughSpace)
+ {
+ IMG_BYTE *pInputBuffer = (IMG_BYTE *)psSyncRecordAddIN;
+
+ pArrayArgsBuffer = &pInputBuffer[ui32InBufferOffset];
+ }
+ else
+#endif
+ {
+ pArrayArgsBuffer = OSAllocMemNoStats(ui32BufferSize);
+
+ if(!pArrayArgsBuffer)
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto SyncRecordAdd_exit;
+ }
+ }
+ }
+
+ if (psSyncRecordAddIN->ui32ClassNameSize != 0)
+ {
+ uiClassNameInt = (IMG_CHAR*)(((IMG_UINT8 *)pArrayArgsBuffer) + ui32NextOffset);
+ ui32NextOffset += psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR);
+ }
+
+ /* Copy the data over */
+ if (psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR) > 0)
+ {
+ if ( OSCopyFromUser(NULL, uiClassNameInt, (const void __user *) psSyncRecordAddIN->puiClassName, psSyncRecordAddIN->ui32ClassNameSize * sizeof(IMG_CHAR)) != PVRSRV_OK )
+ {
+ psSyncRecordAddOUT->eError = PVRSRV_ERROR_INVALID_PARAMS;
+
+ goto SyncRecordAdd_exit;
+ }
+ }
+
+ /* Lock over handle lookup. */
+ LockHandle();
+
+
+
+
+
+ /* Look up the address from the handle */
+ psSyncRecordAddOUT->eError =
+ PVRSRVLookupHandleUnlocked(psConnection->psHandleBase,
+ (void **) &pshServerSyncPrimBlockInt,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK,
+ IMG_TRUE);
+ if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncRecordAdd_exit;
+ }
+ /* Release now we have looked up handles. */
+ UnlockHandle();
+
+ psSyncRecordAddOUT->eError =
+ PVRSRVSyncRecordAddKM(psConnection, OSGetDevData(psConnection),
+ &pshRecordInt,
+ pshServerSyncPrimBlockInt,
+ psSyncRecordAddIN->ui32ui32FwBlockAddr,
+ psSyncRecordAddIN->ui32ui32SyncOffset,
+ psSyncRecordAddIN->bbServerSync,
+ psSyncRecordAddIN->ui32ClassNameSize,
+ uiClassNameInt);
+ /* Exit early if bridged call fails */
+ if(psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Lock over handle creation. */
+ LockHandle();
+
+
+
+
+
+ psSyncRecordAddOUT->eError = PVRSRVAllocHandleUnlocked(psConnection->psHandleBase,
+
+ &psSyncRecordAddOUT->hhRecord,
+ (void *) pshRecordInt,
+ PVRSRV_HANDLE_TYPE_SYNC_RECORD_HANDLE,
+ PVRSRV_HANDLE_ALLOC_FLAG_NONE
+ ,(PFN_HANDLE_RELEASE)&PVRSRVSyncRecordRemoveByHandleKM);
+ if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ UnlockHandle();
+ goto SyncRecordAdd_exit;
+ }
+
+ /* Release now we have created handles. */
+ UnlockHandle();
+
+
+
+SyncRecordAdd_exit:
+
+ /* Lock over handle lookup cleanup. */
+ LockHandle();
+
+
+
+
+
+
+
+ /* Unreference the previously looked up handle */
+ if(pshServerSyncPrimBlockInt)
+ {
+ PVRSRVReleaseHandleUnlocked(psConnection->psHandleBase,
+ hhServerSyncPrimBlock,
+ PVRSRV_HANDLE_TYPE_SYNC_PRIMITIVE_BLOCK);
+ }
+ /* Release now we have cleaned up look up handles. */
+ UnlockHandle();
+
+ if (psSyncRecordAddOUT->eError != PVRSRV_OK)
+ {
+ if (pshRecordInt)
+ {
+ PVRSRVSyncRecordRemoveByHandleKM(pshRecordInt);
+ }
+ }
+
+ /* Allocated space should be equal to the last updated offset */
+ PVR_ASSERT(ui32BufferSize == ui32NextOffset);
+
+#if defined(INTEGRITY_OS)
+ if(pArrayArgsBuffer)
+#else
+ if(!bHaveEnoughSpace && pArrayArgsBuffer)
+#endif
+ OSFreeMemNoStats(pArrayArgsBuffer);
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void);
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void);
+
+/*
+ * Register all SYNCTRACKING functions with services
+ */
+PVRSRV_ERROR InitSYNCTRACKINGBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE, PVRSRVBridgeSyncRecordRemoveByHandle,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD, PVRSRVBridgeSyncRecordAdd,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all synctracking functions with services
+ */
+PVRSRV_ERROR DeinitSYNCTRACKINGBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDREMOVEBYHANDLE);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_SYNCTRACKING, PVRSRV_BRIDGE_SYNCTRACKING_SYNCRECORDADD);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/server_timerquery_bridge.c b/drivers/gpu/drm/img-rogue/1.10/server_timerquery_bridge.c
new file mode 100644
index 00000000000000..3c5ed10eb850c2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/server_timerquery_bridge.c
@@ -0,0 +1,244 @@
+/*******************************************************************************
+@File
+@Title Server bridge for timerquery
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side of the bridge for timerquery
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+********************************************************************************/
+
+#include <linux/uaccess.h>
+
+#include "img_defs.h"
+
+#include "rgxtimerquery.h"
+
+
+#include "common_timerquery_bridge.h"
+
+#include "allocmem.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+#include "srvcore.h"
+#include "handle.h"
+
+#include <linux/slab.h>
+
+
+
+
+
+
+/* ***************************************************************************
+ * Server-side bridge entry points
+ */
+
+static IMG_INT
+PVRSRVBridgeRGXBeginTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryIN,
+ PVRSRV_BRIDGE_OUT_RGXBEGINTIMERQUERY *psRGXBeginTimerQueryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXBeginTimerQueryOUT->eError =
+ PVRSRVRGXBeginTimerQueryKM(psConnection, OSGetDevData(psConnection),
+ psRGXBeginTimerQueryIN->ui32QueryId);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXEndTimerQuery(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXENDTIMERQUERY *psRGXEndTimerQueryIN,
+ PVRSRV_BRIDGE_OUT_RGXENDTIMERQUERY *psRGXEndTimerQueryOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXEndTimerQueryIN);
+
+
+
+
+
+ psRGXEndTimerQueryOUT->eError =
+ PVRSRVRGXEndTimerQueryKM(psConnection, OSGetDevData(psConnection)
+ );
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXQueryTimer(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXQUERYTIMER *psRGXQueryTimerIN,
+ PVRSRV_BRIDGE_OUT_RGXQUERYTIMER *psRGXQueryTimerOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+
+
+
+
+
+ psRGXQueryTimerOUT->eError =
+ PVRSRVRGXQueryTimerKM(psConnection, OSGetDevData(psConnection),
+ psRGXQueryTimerIN->ui32QueryId,
+ &psRGXQueryTimerOUT->ui64StartTime,
+ &psRGXQueryTimerOUT->ui64EndTime);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+static IMG_INT
+PVRSRVBridgeRGXCurrentTime(IMG_UINT32 ui32DispatchTableEntry,
+ PVRSRV_BRIDGE_IN_RGXCURRENTTIME *psRGXCurrentTimeIN,
+ PVRSRV_BRIDGE_OUT_RGXCURRENTTIME *psRGXCurrentTimeOUT,
+ CONNECTION_DATA *psConnection)
+{
+
+
+
+ PVR_UNREFERENCED_PARAMETER(psRGXCurrentTimeIN);
+
+
+
+
+
+ psRGXCurrentTimeOUT->eError =
+ PVRSRVRGXCurrentTime(psConnection, OSGetDevData(psConnection),
+ &psRGXCurrentTimeOUT->ui64Time);
+
+
+
+
+
+
+
+
+ return 0;
+}
+
+
+
+
+/* ***************************************************************************
+ * Server bridge dispatch related glue
+ */
+
+static IMG_BOOL bUseLock = IMG_TRUE;
+
+PVRSRV_ERROR InitTIMERQUERYBridge(void);
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void);
+
+/*
+ * Register all TIMERQUERY functions with services
+ */
+PVRSRV_ERROR InitTIMERQUERYBridge(void)
+{
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY, PVRSRVBridgeRGXBeginTimerQuery,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY, PVRSRVBridgeRGXEndTimerQuery,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER, PVRSRVBridgeRGXQueryTimer,
+ NULL, bUseLock);
+
+ SetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME, PVRSRVBridgeRGXCurrentTime,
+ NULL, bUseLock);
+
+
+ return PVRSRV_OK;
+}
+
+/*
+ * Unregister all timerquery functions with services
+ */
+PVRSRV_ERROR DeinitTIMERQUERYBridge(void)
+{
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXBEGINTIMERQUERY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXENDTIMERQUERY);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXQUERYTIMER);
+
+ UnsetDispatchTableEntry(PVRSRV_BRIDGE_TIMERQUERY, PVRSRV_BRIDGE_TIMERQUERY_RGXCURRENTTIME);
+
+
+
+ return PVRSRV_OK;
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/services_kernel_client.h b/drivers/gpu/drm/img-rogue/1.10/services_kernel_client.h
new file mode 100644
index 00000000000000..30620c7bad5a9c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/services_kernel_client.h
@@ -0,0 +1,346 @@
+/* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
+/* vi: set ts=8 sw=8 sts=8: */
+/*************************************************************************/ /*!
+@File services_kernel_client.h
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* This file contains a partial redefinition of the PowerVR Services 5
+ * interface for use by components which are checkpatch clean. This
+ * header is included by the unrefined, non-checkpatch clean headers
+ * to ensure that prototype/typedef/macro changes break the build.
+ */
+
+#ifndef __SERVICES_KERNEL_CLIENT__
+#define __SERVICES_KERNEL_CLIENT__
+
+#include "pvrsrv_error.h"
+
+#include <linux/types.h>
+
+#include "pvrsrv_sync_km.h"
+#include "sync_checkpoint_external.h"
+
+#ifndef __pvrsrv_defined_struct_enum__
+
+/* rgx_fwif_shared.h */
+
+struct _RGXFWIF_DEV_VIRTADDR_ {
+ __u32 ui32Addr;
+};
+
+/* sync_external.h */
+
+struct PVRSRV_CLIENT_SYNC_PRIM {
+ volatile __u32 *pui32LinAddr;
+};
+
+struct PVRSRV_CLIENT_SYNC_PRIM_OP {
+ __u32 ui32Flags;
+ struct pvrsrv_sync_prim *psSync;
+ __u32 ui32FenceValue;
+ __u32 ui32UpdateValue;
+};
+
+typedef enum tag_img_bool
+{
+ IMG_FALSE = 0,
+ IMG_TRUE = 1,
+ IMG_FORCE_ALIGN = 0x7FFFFFFF
+} IMG_BOOL, *IMG_PBOOL;
+
+#else /* __pvrsrv_defined_struct_enum__ */
+
+struct _RGXFWIF_DEV_VIRTADDR_;
+
+struct PVRSRV_CLIENT_SYNC_PRIM;
+struct PVRSRV_CLIENT_SYNC_PRIM_OP;
+
+enum tag_img_bool;
+
+#endif /* __pvrsrv_defined_struct_enum__ */
+
+struct _PMR_;
+struct _PVRSRV_DEVICE_NODE_;
+struct dma_buf;
+struct SYNC_PRIM_CONTEXT;
+
+/* pvr_notifier.h */
+
+#ifndef _CMDCOMPNOTIFY_PFN_
+typedef void (*PFN_CMDCOMP_NOTIFY)(void *hCmdCompHandle);
+#define _CMDCOMPNOTIFY_PFN_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterCmdCompleteNotify(void **phNotify,
+ PFN_CMDCOMP_NOTIFY pfnCmdCompleteNotify, void *hPrivData);
+enum PVRSRV_ERROR PVRSRVUnregisterCmdCompleteNotify(void *hNotify);
+void PVRSRVCheckStatus(void *hCmdCompCallerHandle);
+
+#define DEBUG_REQUEST_DC 0
+#define DEBUG_REQUEST_SERVERSYNC 1
+#define DEBUG_REQUEST_SYS 2
+#define DEBUG_REQUEST_ANDROIDSYNC 3
+#define DEBUG_REQUEST_LINUXFENCE 4
+#define DEBUG_REQUEST_SYNCCHECKPOINT 5
+#define DEBUG_REQUEST_HTB 6
+#define DEBUG_REQUEST_APPHINT 7
+#define DEBUG_REQUEST_FALLBACKSYNC 8
+
+#define DEBUG_REQUEST_VERBOSITY_LOW 0
+#define DEBUG_REQUEST_VERBOSITY_MEDIUM 1
+#define DEBUG_REQUEST_VERBOSITY_HIGH 2
+#define DEBUG_REQUEST_VERBOSITY_MAX DEBUG_REQUEST_VERBOSITY_HIGH
+
+#ifndef _DBGNOTIFY_PFNS_
+typedef void (DUMPDEBUG_PRINTF_FUNC)(void *pvDumpDebugFile,
+ const char *fmt, ...) __printf(2, 3);
+typedef void (*PFN_DBGREQ_NOTIFY) (void *hDebugRequestHandle,
+ __u32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+#define _DBGNOTIFY_PFNS_
+#endif
+enum PVRSRV_ERROR PVRSRVRegisterDbgRequestNotify(void **phNotify,
+ struct _PVRSRV_DEVICE_NODE_ *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ __u32 ui32RequesterID,
+ void *hDbgRequestHandle);
+enum PVRSRV_ERROR PVRSRVUnregisterDbgRequestNotify(void *hNotify);
+
+/* physmem_dmabuf.h */
+
+struct dma_buf *PhysmemGetDmaBuf(struct _PMR_ *psPMR);
+
+/* pvrsrv.h */
+
+enum PVRSRV_ERROR PVRSRVAcquireGlobalEventObjectKM(void **phGlobalEventObject);
+enum PVRSRV_ERROR PVRSRVReleaseGlobalEventObjectKM(void *hGlobalEventObject);
+
+/* sync.h */
+
+enum PVRSRV_ERROR SyncPrimContextCreate(
+ struct _PVRSRV_DEVICE_NODE_ *psDevConnection,
+ struct SYNC_PRIM_CONTEXT **phSyncPrimContext);
+void SyncPrimContextDestroy(struct SYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+enum PVRSRV_ERROR SyncPrimAlloc(struct SYNC_PRIM_CONTEXT *hSyncPrimContext,
+ struct PVRSRV_CLIENT_SYNC_PRIM **ppsSync, const char *pszClassName);
+enum PVRSRV_ERROR SyncPrimFree(struct PVRSRV_CLIENT_SYNC_PRIM *psSync);
+enum PVRSRV_ERROR SyncPrimGetFirmwareAddr(
+ struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ __u32 *sync_addr);
+enum PVRSRV_ERROR SyncPrimSet(struct PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ __u32 ui32Value);
+
+/* pdump_km.h */
+
+#ifdef PDUMP
+enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...);
+#else
+static inline enum PVRSRV_ERROR __printf(1, 2) PDumpComment(char *fmt, ...)
+{
+ return PVRSRV_OK;
+}
+#endif
+
+/* osfunc.h */
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+void OSAcquireBridgeLock(void);
+void OSReleaseBridgeLock(void);
+#endif
+enum PVRSRV_ERROR OSEventObjectWait(void *hOSEventKM);
+enum PVRSRV_ERROR OSEventObjectOpen(void *hEventObject, void **phOSEventKM);
+enum PVRSRV_ERROR OSEventObjectClose(void *hOSEventKM);
+__u32 OSGetCurrentClientProcessIDKM(void);
+
+/* srvkm.h */
+
+enum PVRSRV_ERROR PVRSRVDeviceCreate(void *pvOSDevice,
+ int i32UMIdentifier,
+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+enum PVRSRV_ERROR PVRSRVDeviceDestroy(
+ struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+const char *PVRSRVGetErrorStringKM(enum PVRSRV_ERROR eError);
+
+
+/* This is the function that kick code will call in order to obtain a list of the PSYNC_CHECKPOINTs
+ * for a given PVRSRV_FENCE passed to a kick function.
+ * The OS native sync code will allocate the memory to hold the returned list of PSYNC_CHECKPOINT ptrs.
+ * The caller will free this memory once it has finished referencing it.
+ *
+ * Input: fence The input (check) fence
+ * Output: nr_checkpoints The number of PVRSRV_SYNC_CHECKPOINT ptrs returned in the
+ * checkpoint_handles parameter.
+ * Output: fence_uid Unique ID of the check fence
+ * Input/Output: checkpoint_handles The returned list of PVRSRV_SYNC_CHECKPOINTs.
+ */
+enum PVRSRV_ERROR
+pvr_sync_resolve_fence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE fence, u32 *nr_checkpoints, PSYNC_CHECKPOINT **checkpoint_handles, u64 *fence_uid);
+
+
+/* This is the function that kick code will call in order to obtain a new PVRSRV_FENCE from the
+ * OS native sync code and the PSYNC_CHECKPOINT used in that fence.
+ * The OS native sync code needs to implement a function meeting this specification.
+ *
+ * Input: fence_name A string to annotate the fence with (for debug).
+ * Input: timeline The timeline on which the new fence is to be created.
+ * Output: new_fence The new PVRSRV_FENCE to be returned by the kick call.
+ * Output: fence_uid Unique ID of the update fence.
+ * Output: fence_finalise_data Pointer to data needed to finalise the fence.
+ * Output: new_checkpoint_handle The PSYNC_CHECKPOINT used by the new fence.
+ */
+enum PVRSRV_ERROR
+pvr_sync_create_fence(const char *fence_name,
+ PVRSRV_TIMELINE timeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *new_fence,
+ u64 *fence_uid,
+ void **fence_finalise_data,
+ PSYNC_CHECKPOINT *new_checkpoint_handle,
+ void **timeline_update_sync,
+ __u32 *timeline_update_value);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(
+ const char *fence_name,
+ PVRSRV_TIMELINE timeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *new_fence,
+ u64 *fence_uid,
+ void **fence_finalise_data,
+ PSYNC_CHECKPOINT *new_checkpoint_handle,
+ void **timeline_update_sync,
+ __u32 *timeline_update_value);
+#endif
+
+/* This is the function that kick code will call in order to 'rollback' a created
+ * output fence should an error occur when submitting the kick.
+ * The OS native sync code needs to implement a function meeting this specification.
+ *
+ * Input: fence_to_rollback The PVRSRV_FENCE to be 'rolled back'. The fence
+ * should be destroyed and any actions taken due to
+ * its creation that need to be undone should be
+ * reverted.
+ * Input: finalise_data The finalise data for the fence to be 'rolled back'.
+ */
+enum PVRSRV_ERROR
+pvr_sync_rollback_fence_data(PVRSRV_FENCE fence_to_rollback, void* finalise_data);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+#endif
+
+/* This is the function that kick code will call in order to 'finalise' a created
+ * output fence just prior to returning from the kick function.
+ * The OS native sync code needs to implement a function meeting this
+ * specification - the implementation may be a nop if the OS does not need to
+ * perform any actions at this point.
+ *
+ * Input: fence_fd The PVRSRV_FENCE to be 'finalised'. This value
+ * will have been returned by an earlier call to
+ * pvr_sync_create_fence().
+ * Input: finalise_data The finalise data returned by an earlier call
+ * to pvr_sync_create_fence().
+ */
+enum PVRSRV_ERROR
+pvr_sync_finalise_fence (PVRSRV_FENCE fence_fd, void *finalise_data);
+#ifndef _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+#endif
+
+/* This is the function that driver code will call in order to request the
+ * sync implementation to output debug information relating to any sync
+ * checkpoints it may have created which appear in the provided array of
+ * FW addresses of Unified Fence Objects (UFOs).
+ *
+ * Input: nr_ufos The number of FW addresses provided in the
+ * vaddrs parameter.
+ * Input: vaddrs The array of FW addresses of UFOs. The sync
+ * implementation should check each of these to
+ * see if any relate to sync checkpoints it has
+ * created and where they do output debug information
+ * pertaining to the native/fallback sync with
+ * which it is associated.
+ */
+u32
+pvr_sync_dump_info_on_stalled_ufos(u32 nr_ufos, u32 *vaddrs);
+#ifndef _CHECKPOINT_PFNS_
+typedef __u32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(__u32 num_ufos, __u32 *vaddrs);
+#endif
+
+/* This is the function that kick code will call in a NO_HARDWARE build only after
+ * sync checkpoints have been manually signalled, to allow the OS native sync
+ * implementation to update its timelines (as the usual callback notification
+ * of signalled checkpoints is not supported for NO_HARDWARE).
+ */
+#ifndef _CHECKPOINT_PFNS_
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+#define _CHECKPOINT_PFNS_
+#endif
+enum PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate,
+ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback,
+ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise,
+ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines,
+ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem,
+ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs);
+
+/* sync_checkpoint.h */
+enum PVRSRV_ERROR SyncCheckpointContextCreate(struct _PVRSRV_DEVICE_NODE_ *psDevConnection, PSYNC_CHECKPOINT_CONTEXT *phSyncCheckpointContext);
+enum PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext);
+enum PVRSRV_ERROR SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext, PVRSRV_TIMELINE timeline, const char *pszCheckpointName, PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+void SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+void SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum tag_img_bool SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, u32 fence_sync_flags);
+enum PVRSRV_ERROR SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+enum PVRSRV_ERROR SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+void SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+__u32 SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+PVRSRV_TIMELINE SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+const char *SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif
+
+#endif /* __SERVICES_KERNEL_CLIENT__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/services_km.h b/drivers/gpu/drm/img-rogue/1.10/services_km.h
new file mode 100644
index 00000000000000..ca495008cab67d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/services_km.h
@@ -0,0 +1,160 @@
+/*************************************************************************/ /*!
+@File
+@Title Services API Kernel mode Header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Exported services API details
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+*/ /**************************************************************************/
+
+#ifndef SERVICES_KM_H
+#define SERVICES_KM_H
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "virt_validation_defs.h"
+#endif
+
+/*! 4k page size definition */
+#define PVRSRV_4K_PAGE_SIZE 4096UL /*!< Size of a 4K Page */
+#define PVRSRV_4K_PAGE_SIZE_ALIGNSHIFT 12 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 16k page size definition */
+#define PVRSRV_16K_PAGE_SIZE 16384UL /*!< Size of a 16K Page */
+#define PVRSRV_16K_PAGE_SIZE_ALIGNSHIFT 14 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 64k page size definition */
+#define PVRSRV_64K_PAGE_SIZE 65536UL /*!< Size of a 64K Page */
+#define PVRSRV_64K_PAGE_SIZE_ALIGNSHIFT 16 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 256k page size definition */
+#define PVRSRV_256K_PAGE_SIZE 262144UL /*!< Size of a 256K Page */
+#define PVRSRV_256K_PAGE_SIZE_ALIGNSHIFT 18 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 1MB page size definition */
+#define PVRSRV_1M_PAGE_SIZE 1048576UL /*!< Size of a 1M Page */
+#define PVRSRV_1M_PAGE_SIZE_ALIGNSHIFT 20 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+/*! 2MB page size definition */
+#define PVRSRV_2M_PAGE_SIZE 2097152UL /*!< Size of a 2M Page */
+#define PVRSRV_2M_PAGE_SIZE_ALIGNSHIFT 21 /*!< Amount to shift an address by so that
+ it is always page-aligned */
+
+/*!
+ * Forward declaration (look on connection.h)
+ */
+typedef struct _PVRSRV_DEV_CONNECTION_ PVRSRV_DEV_CONNECTION;
+
+/*!
+ Flags for Services connection.
+ Allows to define per-client policy for Services
+*/
+
+#define SRV_WORKEST_ENABLED (1U << 2) /*!< If Workload Estimation is enabled */
+#define SRV_PDVFS_ENABLED (1U << 3) /*!< If PDVFS is enabled */
+
+#define SRV_NO_HWPERF_CLIENT_STREAM (1U << 4) /*!< Don't create HWPerf for this connection */
+
+#define SRV_FLAGS_CLIENT_64BIT_COMPAT (1U << 5) /* This flags gets set if the client is 64 Bit
+ compatible.
+ */
+/* Size of pointer on a 64 bit machine */
+#define POINTER_SIZE_64BIT (8)
+
+/*
+ * Bits 20 - 27 are used to pass information needed for validation
+ * of the GPU Virtualisation Validation mechanism. In particular:
+ *
+ * Bits:
+ * [20 - 22]: OSid of the memory region that will be used for allocations
+ * [23 - 25]: OSid that will be emitted by the Firmware for all memory accesses
+ * regarding that memory context.
+ * [26]: If the AXI Protection register will be set to secure for that OSid
+ * [27]: If the Emulator Wrapper Register checking for protection violation
+ * will be set to secure for that OSid
+ */
+
+#define VIRTVAL_FLAG_OSID_SHIFT (20)
+#define SRV_VIRTVAL_FLAG_OSID_MASK (7U << VIRTVAL_FLAG_OSID_SHIFT)
+
+#define VIRTVAL_FLAG_OSIDREG_SHIFT (23)
+#define SRV_VIRTVAL_FLAG_OSIDREG_MASK (7U << VIRTVAL_FLAG_OSIDREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPREG_SHIFT (26)
+#define SRV_VIRTVAL_FLAG_AXIPREG_MASK (1U << VIRTVAL_FLAG_AXIPREG_SHIFT)
+
+#define VIRTVAL_FLAG_AXIPTD_SHIFT (27)
+#define SRV_VIRTVAL_FLAG_AXIPTD_MASK (1U << VIRTVAL_FLAG_AXIPTD_SHIFT)
+
+#define SRV_FLAGS_PDUMPCTRL (1U << 31) /*!< PDump Ctrl client flag */
+
+/*
+ Pdump flags which are accessible to Services clients
+*/
+#define PDUMP_NONE 0x00000000UL /*<! No flags */
+
+#define PDUMP_BLKDATA 0x10000000UL /*<! This flag indicates block-mode PDump data to be recorded in
+ Block script stream in addition to Main script stream. */
+
+#define PDUMP_CONT 0x40000000UL /*<! Output this entry always regardless of framed capture range,
+ used by client applications being dumped. */
+#define PDUMP_PERSIST 0x80000000UL /*<! Output this entry always regardless of app and range,
+ used by persistent resources created after
+ driver initialisation that must appear in
+ all PDump captures in that session. */
+
+/* Valid range of values for pdump block length used in 'block' mode of PDump */
+#define PDUMP_BLOCKLEN_MIN 10
+#define PDUMP_BLOCKLEN_MAX 1000
+
+#define PDUMP_FRAME_MIN 0
+#define PDUMP_FRAME_MAX (IMG_UINT32_MAX - 1)
+#define PDUMP_FRAME_UNSET IMG_UINT32_MAX
+
+/* Status of the device. */
+typedef enum
+{
+ PVRSRV_DEVICE_STATUS_UNKNOWN, /* status of the device is unknown */
+ PVRSRV_DEVICE_STATUS_OK, /* the device is operational */
+ PVRSRV_DEVICE_STATUS_NOT_RESPONDING, /* the device is not responding */
+ PVRSRV_DEVICE_STATUS_DEVICE_ERROR /* the device is not operational */
+} PVRSRV_DEVICE_STATUS;
+
+#endif /* SERVICES_KM_H */
+/**************************************************************************//**
+End of file (services_km.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/servicesext.h b/drivers/gpu/drm/img-rogue/1.10/servicesext.h
new file mode 100644
index 00000000000000..79075e148124b4
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/servicesext.h
@@ -0,0 +1,172 @@
+/*************************************************************************/ /*!
+@File
+@Title Services definitions required by external drivers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides services data structures, defines and prototypes
+ required by external drivers
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined (__SERVICESEXT_H__)
+#define __SERVICESEXT_H__
+
+/* include/ */
+#include "pvrsrv_error.h"
+#include "img_types.h"
+#include "img_3dtypes.h"
+#include "pvrsrv_device_types.h"
+
+
+/*
+ * Lock buffer read/write flags
+ */
+#define PVRSRV_LOCKFLG_READONLY (1) /*!< The locking process will only read the locked surface */
+
+/*!
+ *****************************************************************************
+ * Services State
+ *****************************************************************************/
+typedef enum _PVRSRV_SERVICES_STATE_
+{
+ PVRSRV_SERVICES_STATE_UNDEFINED = 0,
+ PVRSRV_SERVICES_STATE_OK,
+ PVRSRV_SERVICES_STATE_BAD,
+} PVRSRV_SERVICES_STATE;
+
+
+/*!
+ *****************************************************************************
+ * States for power management
+ *****************************************************************************/
+/*!
+ System Power State Enum
+ */
+typedef enum _PVRSRV_SYS_POWER_STATE_
+{
+ PVRSRV_SYS_POWER_STATE_Unspecified = -1, /*!< Unspecified : Uninitialised */
+ PVRSRV_SYS_POWER_STATE_OFF = 0, /*!< Off */
+ PVRSRV_SYS_POWER_STATE_ON = 1, /*!< On */
+
+ PVRSRV_SYS_POWER_STATE_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_SYS_POWER_STATE, *PPVRSRV_SYS_POWER_STATE; /*!< Typedef for ptr to PVRSRV_SYS_POWER_STATE */
+
+/*!
+ Device Power State Enum
+ */
+typedef enum _PVRSRV_DEV_POWER_STATE_
+{
+ PVRSRV_DEV_POWER_STATE_DEFAULT = -1, /*!< Default state for the device */
+ PVRSRV_DEV_POWER_STATE_OFF = 0, /*!< Unpowered */
+ PVRSRV_DEV_POWER_STATE_ON = 1, /*!< Running */
+
+ PVRSRV_DEV_POWER_STATE_FORCE_I32 = 0x7fffffff /*!< Force enum to be at least 32-bits wide */
+
+} PVRSRV_DEV_POWER_STATE, *PPVRSRV_DEV_POWER_STATE; /*!< Typedef for ptr to PVRSRV_DEV_POWER_STATE */ /* PRQA S 3205 */
+
+
+/* Power transition handler prototypes */
+
+/*!
+ Typedef for a pointer to a Function that will be called before a transition
+ from one power state to another. See also PFN_POST_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+/*!
+ Typedef for a pointer to a Function that will be called after a transition
+ from one power state to another. See also PFN_PRE_POWER.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_POWER) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eNewPowerState,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState,
+ IMG_BOOL bForced);
+
+/* Clock speed handler prototypes */
+
+/*!
+ Typedef for a pointer to a Function that will be caled before a transition
+ from one clockspeed to another. See also PFN_POST_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_PRE_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+ Typedef for a pointer to a Function that will be caled after a transition
+ from one clockspeed to another. See also PFN_PRE_CLOCKSPEED_CHANGE.
+ */
+typedef PVRSRV_ERROR (*PFN_POST_CLOCKSPEED_CHANGE) (IMG_HANDLE hDevHandle,
+ PVRSRV_DEV_POWER_STATE eCurrentPowerState);
+
+/*!
+ Typedef for a pointer to a function that will be called to transition the device
+ to a forced idle state. Used in unison with (forced) power requests, DVFS and cluster count changes.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_REQUEST) (IMG_HANDLE hDevHandle,
+ IMG_BOOL bDeviceOffPermitted);
+
+/*!
+ Typedef for a pointer to a function that will be called to cancel a forced idle state
+ and return the firmware back to a state where the hardware can be scheduled.
+ */
+typedef PVRSRV_ERROR (*PFN_FORCED_IDLE_CANCEL_REQUEST) (IMG_HANDLE hDevHandle);
+
+typedef PVRSRV_ERROR (*PFN_DUST_COUNT_REQUEST) (IMG_HANDLE hDevHandle,
+ IMG_UINT32 ui32DustCount);
+
+/*!
+ *****************************************************************************
+ * This structure is used for OS independent registry (profile) access
+ *****************************************************************************/
+
+typedef struct _PVRSRV_REGISTRY_INFO
+{
+ IMG_UINT32 ui32DevCookie;
+ IMG_PCHAR pszKey;
+ IMG_PCHAR pszValue;
+ IMG_PCHAR pszBuf;
+ IMG_UINT32 ui32BufSize;
+} PVRSRV_REGISTRY_INFO, *PPVRSRV_REGISTRY_INFO;
+
+#endif /* __SERVICESEXT_H__ */
+/*****************************************************************************
+ End of file (servicesext.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/sofunc_pvr.h b/drivers/gpu/drm/img-rogue/1.10/sofunc_pvr.h
new file mode 100644
index 00000000000000..e9b3305e985903
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sofunc_pvr.h
@@ -0,0 +1,94 @@
+/*************************************************************************/ /*!
+@File
+@Title SO Interface header file for common PVR functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Contains SO interface functions. These functions are defined in
+ the common layer and are called from the env layer OS specific
+ implementation.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_PVR_H_)
+#define SOFUNC_PVR_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "device.h"
+#include "pvr_notifier.h"
+
+
+/**************************************************************************/ /*!
+ @Function SOPvrDbgRequestNotifyRegister
+ @Description SO Interface function called from the OS layer implementation.
+ Register a callback function that is called when a debug request
+ is made via a call PVRSRVDebugRequest. There are a number of
+ verbosity levels ranging from DEBUG_REQUEST_VERBOSITY_LOW up to
+ DEBUG_REQUEST_VERBOSITY_MAX. The callback will be called once
+ for each level up to the highest level specified to
+ PVRSRVDebugRequest.
+@Output phNotify On success, points to debug notifier handle
+@Input psDevNode Device node for which the debug callback
+ should be registered
+@Input pfnDbgRequestNotify Function callback
+@Input ui32RequesterID Requester ID. This is used to determine
+ the order in which callbacks are called,
+ see DEBUG_REQUEST_*
+@Input hDbgReqeustHandle Data to be passed back to the caller via
+ the callback function
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /******************************************************************** ******/
+PVRSRV_ERROR SOPvrDbgRequestNotifyRegister(IMG_HANDLE *phNotify,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ PFN_DBGREQ_NOTIFY pfnDbgRequestNotify,
+ IMG_UINT32 ui32RequesterID,
+ PVRSRV_DBGREQ_HANDLE hDbgRequestHandle);
+
+/**************************************************************************/ /*!
+ @Function SOPvrDbgRequestNotifyUnregister
+ @Description SO Interface function called from the OS layer implementation.
+ Remove and clean up the specified notifier registration so that
+ it does not receive any further callbacks.
+ @Input hNotify Handle returned to caller from
+ SOPvrDbgRequestNotifyRegister().
+ @Return PVRSRV_ERROR
+*/ /***************************************************************************/
+PVRSRV_ERROR SOPvrDbgRequestNotifyUnregister(IMG_HANDLE hNotify);
+
+
+#endif /* SOFUNC_PVR_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sofunc_rgx.h b/drivers/gpu/drm/img-rogue/1.10/sofunc_rgx.h
new file mode 100644
index 00000000000000..be9594d9524205
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sofunc_rgx.h
@@ -0,0 +1,95 @@
+/*************************************************************************/ /*!
+@File
+@Title SO Interface header file for devices/RGX functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Contains SO interface functions. These functions are defined in
+ the common devices layer and are called from the env layer OS
+ specific implementation.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(SOFUNC_RGX_H_)
+#define SOFUNC_RGX_H_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+
+#if !defined(NO_HARDWARE)
+/*!
+*******************************************************************************
+
+ @Function SORgxGpuUtilStatsRegister
+
+ @Description SO Interface function called from the OS layer implementation.
+ Initialise data used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument). This function must be called only once for each
+ different user/handle.
+
+ @Input phGpuUtilUser - Pointer to handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsRegister(IMG_HANDLE *phGpuUtilUser);
+
+
+/*!
+*******************************************************************************
+
+ @Function SORgxGpuUtilStatsUnregister
+
+ @Description SO Interface function called from the OS layer implementation.
+ Free data previously used to compute GPU utilisation statistics
+ for a particular user (identified by the handle passed as
+ argument).
+
+ @Input hGpuUtilUser - Handle used to identify a user of
+ RGXGetGpuUtilStats
+
+ @Return PVRSRV_ERROR
+
+******************************************************************************/
+PVRSRV_ERROR SORgxGpuUtilStatsUnregister(IMG_HANDLE hGpuUtilUser);
+#endif /* !defined(NO_HARDWARE) */
+
+
+
+#endif /* SOFUNC_RGX_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/srvcore.c b/drivers/gpu/drm/img-rogue/1.10/srvcore.c
new file mode 100644
index 00000000000000..83c906878f1238
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/srvcore.c
@@ -0,0 +1,1336 @@
+/*************************************************************************/ /*!
+@File
+@Title PVR Common Bridge Module (kernel side)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements core PVRSRV API, server side
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "ra.h"
+#include "pvr_bridge.h"
+#include "connection_server.h"
+#include "device.h"
+#include "htbuffer.h"
+
+#include "pdump_km.h"
+
+#include "srvkm.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "log2.h"
+
+#include "srvcore.h"
+#include "pvrsrv.h"
+#include "power.h"
+#include "lists.h"
+
+#if defined(SUPPORT_RGX)
+#include "rgxdevice.h"
+#include "rgxinit.h"
+#endif
+
+#include "rgx_options.h"
+#include "pvrversion.h"
+#include "lock.h"
+#include "osfunc.h"
+#include "device_connection.h"
+#include "process_stats.h"
+#include "pvrsrv_pool.h"
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "physmem_lma.h"
+#include "services_km.h"
+#endif
+
+#include "pvrsrv_tlstreams.h"
+#include "tlstream.h"
+
+#if defined(PVRSRV_MISSING_NO_SPEC_IMPL)
+#pragma message ("There is no implementation of OSConfineArrayIndexNoSpeculation() - see osfunc.h")
+#endif
+
+/* For the purpose of maintainability, it is intended that this file should not
+ * contain any OS specific #ifdefs. Please find a way to add e.g.
+ * an osfunc.c abstraction or override the entire function in question within
+ * env,*,pvr_bridge_k.c
+ */
+
+PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT] = { {.pfFunction = DummyBW,} ,};
+
+#define PVR_DISPATCH_OFFSET_FIRST_FUNC 0
+#define PVR_DISPATCH_OFFSET_LAST_FUNC 1
+#define PVR_DISPATCH_OFFSET_ARRAY_MAX 2
+
+#define PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT PVRSRV_APPHINT_HWPERFCLIENTBUFFERSIZE
+
+static IMG_UINT16 g_BridgeDispatchTableStartOffsets[BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT][PVR_DISPATCH_OFFSET_ARRAY_MAX];
+
+#if defined(DEBUG_BRIDGE_KM)
+/* a lock used for protecting bridge call timing calculations
+ * for calls which do not acquire a lock
+ */
+static POS_LOCK g_hStatsLock;
+PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+
+void BridgeGlobalStatsLock(void)
+{
+ OSLockAcquire(g_hStatsLock);
+}
+
+void BridgeGlobalStatsUnlock(void)
+{
+ OSLockRelease(g_hStatsLock);
+}
+#endif
+
+void BridgeDispatchTableStartOffsetsInit(void)
+{
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEFAULT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEFAULT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SRVCORE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SRVCORE_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCEXPORT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCSEXPORT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCSEXPORT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPCTRL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPCTRL_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMPLAT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMPMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMPMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PDUMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DMABUF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DMABUF_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_CACHE][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_CACHE_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SMM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SMM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_PVRTL][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_PVRTL_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RI][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RI_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_VALIDATION][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TUTILS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEVICEMEMHISTORY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_HTBUFFER][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_HTBUFFER_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DCPLAT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DCPLAT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_MMEXTMEM][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_MMEXTMEM_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCTRACKING][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCTRACKING_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_SYNCFALLBACK][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_SYNCFALLBACK_DISPATCH_LAST;
+#if defined(SUPPORT_RGX)
+ /* Need a gap here to start next entry at element 128 */
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXCMP][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTA3D][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_BREAKPOINT][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_DEBUGMISC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_FIRST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXPDUMP][PVR_DISPATCH_OFFSET_LAST_FUNC]= PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXHWPERF][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXRAY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_REGCONFIG][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_TIMERQUERY][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXKICKSYNC][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXKICKSYNC_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXSIGNALS][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXSIGNALS_DISPATCH_LAST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_FIRST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_FIRST;
+ g_BridgeDispatchTableStartOffsets[PVRSRV_BRIDGE_RGXTQ2][PVR_DISPATCH_OFFSET_LAST_FUNC] = PVRSRV_BRIDGE_RGXTQ2_DISPATCH_LAST;
+#endif
+}
+
+#if defined(DEBUG_BRIDGE_KM)
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void __user *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyFromUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyFromUserBytes+=ui32Size;
+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void __user *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ g_BridgeDispatchTable[ui32DispatchTableEntry].ui32CopyToUserTotalBytes+=ui32Size;
+ g_BridgeGlobalStats.ui32TotalCopyToUserBytes+=ui32Size;
+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#else
+INLINE PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void __user *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+ return OSBridgeCopyFromUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+INLINE PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void __user *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size)
+{
+ PVR_UNREFERENCED_PARAMETER (ui32DispatchTableEntry);
+ return OSBridgeCopyToUser(psConnection, pvDest, pvSrc, ui32Size);
+}
+#endif
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32ClientBuildOptions,
+ IMG_UINT32 ui32ClientDDKVersion,
+ IMG_UINT32 ui32ClientDDKBuild,
+ IMG_UINT8 *pui8KernelArch,
+ IMG_UINT32 *pui32CapabilityFlags,
+ IMG_UINT32 *ui32PVRBridges,
+ IMG_UINT32 *ui32RGXBridges)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32BuildOptions, ui32BuildOptionsMismatch;
+ IMG_UINT32 ui32DDKVersion, ui32DDKBuild;
+ PVRSRV_DATA *psSRVData = NULL;
+ IMG_UINT64 ui64ProcessVASpaceSize = OSGetCurrentProcessVASpaceSize();
+ static IMG_BOOL bIsFirstConnection=IMG_FALSE;
+
+ /* Clear the flags */
+ *pui32CapabilityFlags = 0;
+
+ psSRVData = PVRSRVGetPVRSRVData();
+
+ psConnection->ui32ClientFlags = ui32Flags;
+
+ /* output the available bridges */
+ *ui32PVRBridges = gui32PVRBridges;
+#if defined(SUPPORT_RGX)
+ *ui32RGXBridges = gui32RGXBridges;
+#else
+ *ui32RGXBridges = 0;
+#endif
+
+ /* Is the system snooping of caches emulated in software? */
+ if (PVRSRVSystemSnoopingIsEmulated(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_EMULATE_FLAG;
+ }
+ else
+ {
+ /*Set flags to pass back to the client showing which cache coherency is available.*/
+ /*Is the system CPU cache coherent?*/
+ if (PVRSRVSystemSnoopingOfCPUCache(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_DEVICE_FLAG;
+ }
+ /*Is the system device cache coherent?*/
+ if (PVRSRVSystemSnoopingOfDeviceCache(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_CACHE_COHERENT_CPU_FLAG;
+ }
+ }
+
+ /* Has the system device non-mappable local memory?*/
+ if (PVRSRVSystemHasNonMappableLocalMemory(psDeviceNode->psDevConfig))
+ {
+ *pui32CapabilityFlags |= PVRSRV_NONMAPPABLE_MEMORY_PRESENT_FLAG;
+ }
+
+ /* Set flags to indicate shared-virtual-memory (SVM) allocation availability */
+ if (! psDeviceNode->ui64GeneralSVMHeapTopVA || ! ui64ProcessVASpaceSize)
+ {
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_UNSUPPORTED;
+ }
+ else
+ {
+ if (ui64ProcessVASpaceSize <= psDeviceNode->ui64GeneralSVMHeapTopVA)
+ {
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_SUPPORTED;
+ }
+ else
+ {
+ /* This can happen when processor has more virtual address bits
+ than device (i.e. alloc is not always guaranteed to succeed) */
+ *pui32CapabilityFlags |= PVRSRV_DEVMEM_SVM_ALLOC_CANFAIL;
+ }
+ }
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+{
+ PVRSRV_RGXDEV_INFO *psDevInfo = psDeviceNode->pvDevice;
+ IMG_UINT32 ui32OSid = 0, ui32OSidReg = 0;
+ IMG_BOOL bOSidAxiProtReg = IMG_FALSE;
+
+ IMG_PID pIDCurrent = OSGetCurrentClientProcessIDKM();
+
+ ui32OSid = (ui32Flags & SRV_VIRTVAL_FLAG_OSID_MASK) >> (VIRTVAL_FLAG_OSID_SHIFT);
+ ui32OSidReg = (ui32Flags & SRV_VIRTVAL_FLAG_OSIDREG_MASK) >> (VIRTVAL_FLAG_OSIDREG_SHIFT);
+
+#if defined(EMULATOR)
+
+ if(psDevInfo->sDevFeatureCfg.ui64Features & RGX_FEATURE_AXI_ACELITE_BIT_MASK)
+ {
+ IMG_UINT32 ui32OSidAxiProtReg = 0, ui32OSidAxiProtTD = 0;
+
+ ui32OSidAxiProtReg = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPREG_MASK) >> (VIRTVAL_FLAG_AXIPREG_SHIFT);
+ ui32OSidAxiProtTD = (ui32Flags & SRV_VIRTVAL_FLAG_AXIPTD_MASK) >> (VIRTVAL_FLAG_AXIPTD_SHIFT);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "[AxiProt & Virt]: Setting bOSidAxiProt of Emulator's Trusted Device for Catbase %d to %s",
+ ui32OSidReg,
+ (ui32OSidAxiProtTD == 1)?"TRUE":"FALSE"));
+
+ bOSidAxiProtReg = ui32OSidAxiProtReg == 1;
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "[AxiProt & Virt]: Setting bOSidAxiProt of FW's Register for Catbase %d to %s",
+ ui32OSidReg,
+ bOSidAxiProtReg?"TRUE":"FALSE"));
+
+ SetAxiProtOSid(ui32OSidReg, ui32OSidAxiProtTD);
+ }
+
+#endif
+
+ InsertPidOSidsCoupling(pIDCurrent, ui32OSid, ui32OSidReg, bOSidAxiProtReg);
+
+ PVR_DPF((PVR_DBG_MESSAGE,"[GPU Virtualization Validation]: OSIDs: %d, %d\n",ui32OSid, ui32OSidReg));
+}
+#endif
+
+#if defined(SUPPORT_WORKLOAD_ESTIMATION)
+ /* Only enabled if enabled in the UM */
+ if(!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_WORKLOAD_ESTIMATION_MASK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Workload Estimation disabled. Not enabled in UM."));
+ }
+#endif
+
+#if defined(SUPPORT_PDVFS)
+ /* Only enabled if enabled in the UM */
+ if(!(ui32ClientBuildOptions & RGX_BUILD_OPTIONS_KM & OPTIONS_PDVFS_MASK))
+ {
+ PVR_DPF((PVR_DBG_ERROR,"PVRSRVConnectKM: Proactive DVFS disabled. Not enabled in UM."));
+ }
+#endif
+
+ ui32DDKVersion = PVRVERSION_PACK(PVRVERSION_MAJ, PVRVERSION_MIN);
+ ui32DDKBuild = PVRVERSION_BUILD;
+
+ if(ui32Flags & SRV_FLAGS_CLIENT_64BIT_COMPAT)
+ {
+ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_64BIT;
+ }else
+ {
+ psSRVData->sDriverInfo.ui8UMSupportedArch |= BUILD_ARCH_32BIT;
+ }
+
+ if(IMG_FALSE == bIsFirstConnection)
+ {
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildOptions = ui32ClientBuildOptions;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildVersion = ui32DDKVersion;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildVersion = ui32ClientDDKVersion;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildRevision = ui32DDKBuild;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildRevision = ui32ClientDDKBuild;
+
+ psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType = ((RGX_BUILD_OPTIONS_KM) & OPTIONS_DEBUG_MASK)? \
+ BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+ psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType = (ui32ClientBuildOptions & OPTIONS_DEBUG_MASK)? \
+ BUILD_TYPE_DEBUG:BUILD_TYPE_RELEASE;
+
+ if (sizeof(void *) == POINTER_SIZE_64BIT)
+ {
+ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_64BIT;
+ }
+ else
+ {
+ psSRVData->sDriverInfo.ui8KMBitArch |= BUILD_ARCH_32BIT;
+ }
+
+ }
+
+ /* Masking out every option that is not kernel specific*/
+ ui32ClientBuildOptions &= RGX_BUILD_OPTIONS_MASK_KM;
+
+ /*
+ * Validate the build options
+ */
+ ui32BuildOptions = (RGX_BUILD_OPTIONS_KM);
+ if (ui32BuildOptions != ui32ClientBuildOptions)
+ {
+ ui32BuildOptionsMismatch = ui32BuildOptions ^ ui32ClientBuildOptions;
+#if !defined(PVRSRV_STRICT_COMPAT_CHECK)
+ /*Mask the debug flag option out as we do support combinations of debug vs release in um & km*/
+ ui32BuildOptionsMismatch &= OPTIONS_STRICT;
+#endif
+ if ( (ui32ClientBuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+ "extra options present in client-side driver: (0x%x). Please check rgx_options.h",
+ __FUNCTION__,
+ ui32ClientBuildOptions & ui32BuildOptionsMismatch ));
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+
+ if ( (ui32BuildOptions & ui32BuildOptionsMismatch) != 0)
+ {
+ PVR_LOG(("(FAIL) %s: Mismatch in client-side and KM driver build options; "
+ "extra options present in KM driver: (0x%x). Please check rgx_options.h",
+ __FUNCTION__,
+ ui32BuildOptions & ui32BuildOptionsMismatch ));
+ eError = PVRSRV_ERROR_BUILD_OPTIONS_MISMATCH;
+ goto chk_exit;
+ }
+ if(IMG_FALSE == bIsFirstConnection)
+ {
+ PVR_LOG(("%s: COMPAT_TEST: Client-side (0x%04x) (%s) and KM driver (0x%04x) (%s) build options differ.",
+ __FUNCTION__,
+ ui32ClientBuildOptions,
+ (psSRVData->sDriverInfo.sUMBuildInfo.ui32BuildType)?"release":"debug",
+ ui32BuildOptions,
+ (psSRVData->sDriverInfo.sKMBuildInfo.ui32BuildType)?"release":"debug"));
+ }else{
+ PVR_DPF((PVR_DBG_WARNING, "%s: COMPAT_TEST: Client-side (0x%04x) and KM driver (0x%04x) build options differ.",
+ __FUNCTION__,
+ ui32ClientBuildOptions,
+ ui32BuildOptions));
+
+ }
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: Client-side and KM driver build options match. [ OK ]", __FUNCTION__));
+ }
+
+ /*
+ * Validate DDK version
+ */
+ if (ui32ClientDDKVersion != ui32DDKVersion)
+ {
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ PVR_LOG(("(FAIL) %s: Incompatible driver DDK version (%u.%u) / client DDK version (%u.%u).",
+ __FUNCTION__,
+ PVRVERSION_MAJ, PVRVERSION_MIN,
+ PVRVERSION_UNPACK_MAJ(ui32ClientDDKVersion),
+ PVRVERSION_UNPACK_MIN(ui32ClientDDKVersion)));
+ eError = PVRSRV_ERROR_DDK_VERSION_MISMATCH;
+ PVR_DBG_BREAK;
+ goto chk_exit;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK version (%u.%u) and client DDK version (%u.%u) match. [ OK ]",
+ __FUNCTION__,
+ PVRVERSION_MAJ, PVRVERSION_MIN, PVRVERSION_MAJ, PVRVERSION_MIN));
+ }
+
+ /* Create stream for every connection except for the special clients
+ * that don't need it e.g.: recipients of HWPerf data. */
+ if (!(psConnection->ui32ClientFlags & SRV_NO_HWPERF_CLIENT_STREAM))
+ {
+ IMG_CHAR acStreamName[PRVSRVTL_MAX_STREAM_NAME_SIZE];
+ OSSNPrintf(acStreamName, PRVSRVTL_MAX_STREAM_NAME_SIZE,
+ PVRSRV_TL_HWPERF_HOST_CLIENT_STREAM_FMTSPEC,
+ psDeviceNode->sDevId.i32UMIdentifier,
+ psConnection->pid);
+
+ eError = TLStreamCreate(&psConnection->hClientTLStream, psDeviceNode,
+ acStreamName,
+ PVRSRV_CLIENT_TL_STREAM_SIZE_DEFAULT,
+ TL_OPMODE_DROP_NEWER |
+ TL_FLAG_ALLOCATE_ON_FIRST_OPEN,
+ NULL, NULL, NULL, NULL);
+ if (eError != PVRSRV_OK && eError != PVRSRV_ERROR_ALREADY_EXISTS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Could not create private TL stream (%s)",
+ PVRSRVGetErrorStringKM(eError)));
+ psConnection->hClientTLStream = NULL;
+ }
+ else if (eError == PVRSRV_OK)
+ {
+ /* Set "tlctrl" stream as a notification channel. This channel is
+ * is used to notify recipients about stream open/close (by writer)
+ * actions (and possibly other actions in the future). */
+ eError = TLStreamSetNotifStream(psConnection->hClientTLStream,
+ psSRVData->hTLCtrlStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to set notification stream"));
+ TLStreamClose(psConnection->hClientTLStream);
+ psConnection->hClientTLStream = NULL;
+ }
+ }
+
+ /* Reset error status. Don't want to propagate any errors from here */
+ eError = PVRSRV_OK;
+ PVR_DPF((PVR_DBG_MESSAGE, "Created stream \"%s\".", acStreamName));
+ }
+
+ /*
+ * Validate DDK build
+ */
+ if (ui32ClientDDKBuild != ui32DDKBuild)
+ {
+ if(!psSRVData->sDriverInfo.bIsNoMatch)
+ psSRVData->sDriverInfo.bIsNoMatch = IMG_TRUE;
+ PVR_DPF((PVR_DBG_WARNING, "%s: Mismatch in driver DDK revision (%d) / client DDK revision (%d).",
+ __FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+#if defined(PVRSRV_STRICT_COMPAT_CHECK)
+ eError = PVRSRV_ERROR_DDK_BUILD_MISMATCH;
+ PVR_DBG_BREAK;
+ goto chk_exit;
+#endif
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: COMPAT_TEST: driver DDK revision (%d) and client DDK revision (%d) match. [ OK ]",
+ __FUNCTION__, ui32DDKBuild, ui32ClientDDKBuild));
+ }
+
+ /* Success so far so is it the PDump client that is connecting? */
+ if (ui32Flags & SRV_FLAGS_PDUMPCTRL)
+ {
+ PDumpConnectionNotify();
+ }
+
+ PVR_ASSERT(pui8KernelArch != NULL);
+
+ if (psSRVData->sDriverInfo.ui8KMBitArch & BUILD_ARCH_64BIT)
+ {
+ *pui8KernelArch = 64;
+ }
+ else
+ {
+ *pui8KernelArch = 32;
+ }
+
+ bIsFirstConnection = IMG_TRUE;
+
+#if defined(DEBUG_BRIDGE_KM)
+ {
+ int ii;
+
+ /* dump dispatch table offset lookup table */
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: g_BridgeDispatchTableStartOffsets[0-%lu] entries:", __FUNCTION__, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT - 1));
+ for (ii=0; ii < BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT; ii++)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "g_BridgeDispatchTableStartOffsets[%d]: %u", ii, g_BridgeDispatchTableStartOffsets[ii][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+ }
+ }
+#endif
+
+#if defined(PDUMP)
+ if (!(ui32Flags & SRV_FLAGS_PDUMPCTRL))
+ {
+ IMG_UINT64 ui64PDumpState = 0;
+
+ PDumpGetStateKM(&ui64PDumpState);
+ if(ui64PDumpState & PDUMP_STATE_CONNECTED)
+ {
+ *pui32CapabilityFlags |= PVRSRV_PDUMP_IS_RECORDING;
+ }
+
+ PVR_LOG(("PDump: Client App connected"));
+ }
+#endif
+
+chk_exit:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void)
+{
+ /* just return OK, per-process data is cleaned up by resmgr */
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVAcquireGlobalEventObjectKM
+@Description Acquire the global event object.
+@Output phGlobalEventObject On success, points to the global event
+ object handle
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error
+ otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ *phGlobalEventObject = psPVRSRVData->hGlobalEventObject;
+
+ return PVRSRV_OK;
+}
+
+/**************************************************************************/ /*!
+@Function PVRSRVReleaseGlobalEventObjectKM
+@Description Release the global event object.
+@Output hGlobalEventObject Global event object handle
+@Return PVRSRV_ERROR PVRSRV_OK on success or an error otherwise
+*/ /***************************************************************************/
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ PVR_ASSERT(psPVRSRVData->hGlobalEventObject == hGlobalEventObject);
+
+ return PVRSRV_OK;
+}
+
+/*
+ PVRSRVDumpDebugInfoKM
+*/
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32VerbLevel)
+{
+ if (ui32VerbLevel > DEBUG_REQUEST_VERBOSITY_MAX)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ PVR_LOG(("User requested PVR debug info"));
+
+ PVRSRVDebugRequest(psDeviceNode, ui32VerbLevel, NULL, NULL);
+
+ return PVRSRV_OK;
+}
+
+/*
+ PVRSRVGetDevClockSpeedKM
+*/
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_ASSERT(psDeviceNode->pfnDeviceClockSpeed != NULL);
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ eError = psDeviceNode->pfnDeviceClockSpeed(psDeviceNode, pui32RGXClockSpeed);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDevClockSpeedKM: "
+ "Could not get device clock speed (%d)!",
+ eError));
+ }
+
+ return eError;
+}
+
+
+/*
+ PVRSRVHWOpTimeoutKM
+*/
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode)
+{
+#if defined(PVRSRV_RESET_ON_HWTIMEOUT)
+ PVR_LOG(("User requested OS reset"));
+ OSPanic();
+#endif
+ PVR_LOG(("HW operation timeout, dump server info"));
+ PVRSRVDebugRequest(psDeviceNode, DEBUG_REQUEST_VERBOSITY_MEDIUM, NULL, NULL);
+ return PVRSRV_OK;
+}
+
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection)
+{
+ PVR_UNREFERENCED_PARAMETER(psBridgeIn);
+ PVR_UNREFERENCED_PARAMETER(psBridgeOut);
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u (%s) mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32DispatchTableEntry, g_BridgeDispatchTable[ui32DispatchTableEntry].pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE ERROR: ui32DispatchTableEntry %u mapped to "
+ "Dummy Wrapper (probably not what you want!)",
+ __FUNCTION__, ui32DispatchTableEntry));
+#endif
+ return PVRSRV_ERROR_BRIDGE_ENOTTY;
+}
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32AlignChecksSize,
+ IMG_UINT32 aui32AlignChecks[])
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+#if !defined(NO_HARDWARE) && defined(RGXFW_ALIGNCHECKS)
+
+ PVR_ASSERT(psDeviceNode->pfnAlignmentCheck != NULL);
+ return psDeviceNode->pfnAlignmentCheck(psDeviceNode, ui32AlignChecksSize,
+ aui32AlignChecks);
+
+#else
+
+ PVR_UNREFERENCED_PARAMETER(psDeviceNode);
+ PVR_UNREFERENCED_PARAMETER(ui32AlignChecksSize);
+ PVR_UNREFERENCED_PARAMETER(aui32AlignChecks);
+
+ return PVRSRV_OK;
+
+#endif /* !defined(NO_HARDWARE) */
+
+}
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32DeviceStatus)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* First try to update the status. */
+ if (psDeviceNode->pfnUpdateHealthStatus != NULL)
+ {
+ PVRSRV_ERROR eError = psDeviceNode->pfnUpdateHealthStatus(psDeviceNode,
+ IMG_FALSE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "PVRSRVGetDeviceStatusKM: Failed to"
+ "check for device status (%d)", eError));
+
+ /* Return unknown status and error because we don't know what
+ * happened and if the status is valid. */
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+ return eError;
+ }
+ }
+
+ switch (OSAtomicRead(&psDeviceNode->eHealthStatus))
+ {
+ case PVRSRV_DEVICE_HEALTH_STATUS_OK:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_OK;
+ return PVRSRV_OK;
+ case PVRSRV_DEVICE_HEALTH_STATUS_NOT_RESPONDING:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_NOT_RESPONDING;
+ return PVRSRV_OK;
+ case PVRSRV_DEVICE_HEALTH_STATUS_DEAD:
+ case PVRSRV_DEVICE_HEALTH_STATUS_FAULT:
+ case PVRSRV_DEVICE_HEALTH_STATUS_UNDEFINED:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_DEVICE_ERROR;
+ return PVRSRV_OK;
+ default:
+ *pui32DeviceStatus = PVRSRV_DEVICE_STATUS_UNKNOWN;
+ return PVRSRV_ERROR_INTERNAL_ERROR;
+ }
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for removing entries in the g_BridgeDispatchTable array.
+ * All this does is zero the entry to allow for a full table re-population
+ * later.
+ *
+ * @param ui32BridgeGroup
+ * @param ui32Index
+ *
+ * @return
+ ********************************************************************************/
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index)
+{
+ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+ g_BridgeDispatchTable[ui32Index].pfFunction = NULL;
+ g_BridgeDispatchTable[ui32Index].hBridgeLock = NULL;
+ g_BridgeDispatchTable[ui32Index].bUseLock = 0;
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[ui32Index].pszIOCName = NULL;
+ g_BridgeDispatchTable[ui32Index].pszFunctionName = NULL;
+ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = NULL;
+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+}
+
+/*!
+ * *****************************************************************************
+ * @brief A wrapper for filling in the g_BridgeDispatchTable array that does
+ * error checking.
+ *
+ * @param ui32Index
+ * @param pszIOCName
+ * @param pfFunction
+ * @param pszFunctionName
+ *
+ * @return
+ ********************************************************************************/
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName,
+ POS_LOCK hBridgeLock,
+ const IMG_CHAR *pszBridgeLockName,
+ IMG_BOOL bUseLock)
+{
+ static IMG_UINT32 ui32PrevIndex = IMG_UINT32_MAX; /* -1 */
+
+#if !defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE) && !defined(DEBUG_BRIDGE_KM)
+ PVR_UNREFERENCED_PARAMETER(pszFunctionName);
+ PVR_UNREFERENCED_PARAMETER(pszBridgeLockName);
+#endif
+
+ ui32Index += g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ /* Enable this to dump out the dispatch table entries */
+ PVR_DPF((PVR_DBG_WARNING, "%s: g_BridgeDispatchTableStartOffsets[%d]=%d", __FUNCTION__, ui32BridgeGroup, g_BridgeDispatchTableStartOffsets[ui32BridgeGroup][PVR_DISPATCH_OFFSET_FIRST_FUNC]));
+ PVR_DPF((PVR_DBG_WARNING, "%s: %d %s %s %s", __FUNCTION__, ui32Index, pszIOCName, pszFunctionName, pszBridgeLockName));
+#endif
+
+ /* Any gaps are sub-optimal in-terms of memory usage, but we are mainly
+ * interested in spotting any large gap of wasted memory that could be
+ * accidentally introduced.
+ *
+ * This will currently flag up any gaps > 5 entries.
+ *
+ * NOTE: This shouldn't be debug only since switching from debug->release
+ * etc is likely to modify the available ioctls and thus be a point where
+ * mistakes are exposed. This isn't run at a performance critical time.
+ */
+ if((ui32PrevIndex != IMG_UINT32_MAX) &&
+ ((ui32Index >= ui32PrevIndex + DISPATCH_TABLE_GAP_THRESHOLD) ||
+ (ui32Index <= ui32PrevIndex)))
+ {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: There is a gap in the dispatch table between indices %u (%s) and %u (%s)",
+ __FUNCTION__, ui32PrevIndex, g_BridgeDispatchTable[ui32PrevIndex].pszIOCName,
+ ui32Index, pszIOCName));
+#else
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "%s: There is a gap in the dispatch table between indices %u and %u (%s)",
+ __FUNCTION__, (IMG_UINT)ui32PrevIndex, (IMG_UINT)ui32Index, pszIOCName));
+#endif
+ }
+
+ if (ui32Index >= BRIDGE_DISPATCH_TABLE_ENTRY_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Index %u (%s) out of range",
+ __FUNCTION__, (IMG_UINT)ui32Index, pszIOCName));
+
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_ERROR, "%s: BRIDGE_DISPATCH_TABLE_ENTRY_COUNT = %lu",
+ __FUNCTION__, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT));
+#if defined(SUPPORT_RGX)
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_TIMERQUERY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_REGCONFIG_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXRAY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXHWPERF_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXPDUMP_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_DEBUGMISC_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_BREAKPOINT_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXTA3D_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXCMP_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST = %lu\n",
+ __FUNCTION__, PVRSRV_BRIDGE_RGXTQ_DISPATCH_LAST));
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGX_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_RGX_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_RGX_LAST));
+#endif
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_DEVICEMEMHISTORY_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_TUTILS_DISPATCH_LAST));
+ PVR_DPF((PVR_DBG_ERROR, "%s: PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST = %lu",
+ __FUNCTION__, PVRSRV_BRIDGE_VALIDATION_DISPATCH_LAST));
+#endif
+
+ OSPanic();
+ }
+
+ /* Panic if the previous entry has been overwritten as this is not allowed!
+ * NOTE: This shouldn't be debug only since switching from debug->release
+ * etc is likely to modify the available ioctls and thus be a point where
+ * mistakes are exposed. This isn't run at a performance critical time.
+ */
+ if(g_BridgeDispatchTable[ui32Index].pfFunction)
+ {
+ if(g_BridgeDispatchTable[ui32Index].pfFunction != pfFunction)
+ {
+#if defined(DEBUG_BRIDGE_KM_DISPATCH_TABLE)
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Adding dispatch table entry for %s clobbers an existing entry for %s (current pfn=<%p>, new pfn=<%p>)",
+ __FUNCTION__, pszIOCName, g_BridgeDispatchTable[ui32Index].pszIOCName),
+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+#else
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Adding dispatch table entry for %s clobbers an existing entry (index=%u). (current pfn=<%p>, new pfn=<%p>)",
+ __FUNCTION__, pszIOCName, ui32Index,
+ (void*)g_BridgeDispatchTable[ui32Index].pfFunction, (void*)pfFunction));
+ PVR_DPF((PVR_DBG_WARNING, "NOTE: Enabling DEBUG_BRIDGE_KM_DISPATCH_TABLE may help debug this issue."));
+#endif
+ OSPanic();
+ }
+ }
+ else
+ {
+ g_BridgeDispatchTable[ui32Index].pfFunction = pfFunction;
+ g_BridgeDispatchTable[ui32Index].hBridgeLock = hBridgeLock;
+ g_BridgeDispatchTable[ui32Index].bUseLock = bUseLock;
+#if defined(DEBUG_BRIDGE_KM)
+ g_BridgeDispatchTable[ui32Index].pszIOCName = pszIOCName;
+ g_BridgeDispatchTable[ui32Index].pszFunctionName = pszFunctionName;
+ g_BridgeDispatchTable[ui32Index].pszBridgeLockName = pszBridgeLockName;
+ g_BridgeDispatchTable[ui32Index].ui32CallCount = 0;
+ g_BridgeDispatchTable[ui32Index].ui32CopyFromUserTotalBytes = 0;
+ g_BridgeDispatchTable[ui32Index].ui64TotalTimeNS = 0;
+ g_BridgeDispatchTable[ui32Index].ui64MaxTimeNS = 0;
+#endif
+ }
+
+ ui32PrevIndex = ui32Index;
+}
+
+
+PVRSRV_ERROR BridgeInit(void)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+#if defined(DEBUG_BRIDGE_KM)
+ eError = OSLockCreate(&g_hStatsLock, LOCK_TYPE_PASSIVE);
+
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to create bridge stats lock"));
+ return eError;
+ }
+#endif
+
+ return eError;
+}
+
+void BridgeDeinit(void)
+{
+#if defined(DEBUG_BRIDGE_KM)
+ if(g_hStatsLock)
+ {
+ OSLockDestroy(g_hStatsLock);
+ g_hStatsLock = NULL;
+ }
+#endif
+}
+
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM)
+{
+
+ void * psBridgeIn=NULL;
+ void * psBridgeOut=NULL;
+ BridgeWrapperFunction pfBridgeHandler;
+ IMG_UINT32 ui32DispatchTableEntry, ui32GroupBoundary;
+ PVRSRV_ERROR err = PVRSRV_OK;
+ PVRSRV_POOL_TOKEN hBridgeBufferPoolToken = NULL;
+ IMG_UINT32 ui32Timestamp = OSClockus();
+#if defined(DEBUG_BRIDGE_KM)
+ IMG_UINT64 ui64TimeStart;
+ IMG_UINT64 ui64TimeEnd;
+ IMG_UINT64 ui64TimeDiff;
+#endif
+#if !defined(INTEGRITY_OS)
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+#endif
+ IMG_UINT32 ui32DispatchTableIndex, ui32DispatchTableEntryIndex;
+
+#if defined(DEBUG_BRIDGE_KM_STOP_AT_DISPATCH)
+ PVR_DBG_BREAK;
+#endif
+
+ if(psBridgePackageKM->ui32BridgeID >= BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Out of range dispatch table group ID: %d",
+ __FUNCTION__, psBridgePackageKM->ui32BridgeID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+
+ ui32DispatchTableIndex = OSConfineArrayIndexNoSpeculation(psBridgePackageKM->ui32BridgeID, BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT);
+
+ ui32DispatchTableEntry = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_FIRST_FUNC];
+ ui32GroupBoundary = g_BridgeDispatchTableStartOffsets[ui32DispatchTableIndex][PVR_DISPATCH_OFFSET_LAST_FUNC];
+
+ /* bridge function is not implemented in this build */
+ if(0 == ui32DispatchTableEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ /* this points to DummyBW() which returns PVRSRV_ERROR_ENOTTY */
+ err = g_BridgeDispatchTable[ui32DispatchTableEntry].pfFunction(ui32DispatchTableEntry,
+ psBridgeIn,
+ psBridgeOut,
+ psConnection);
+ goto return_error;
+ }
+ if((ui32DispatchTableEntry + psBridgePackageKM->ui32FunctionID) > ui32GroupBoundary)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, boundary = %d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntry,ui32GroupBoundary, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+ ui32DispatchTableEntry += psBridgePackageKM->ui32FunctionID;
+ ui32DispatchTableEntryIndex = OSConfineArrayIndexNoSpeculation(ui32DispatchTableEntry, ui32GroupBoundary+1);
+ if(BRIDGE_DISPATCH_TABLE_ENTRY_COUNT <= ui32DispatchTableEntry)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Dispatch table entry=%d, entry count = %lu,"
+ " (bridge module %d, function %d)", __FUNCTION__,
+ ui32DispatchTableEntry, BRIDGE_DISPATCH_TABLE_ENTRY_COUNT,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID));
+ err = PVRSRV_ERROR_BRIDGE_EINVAL;
+ goto return_error;
+ }
+#if defined(DEBUG_BRIDGE_KM)
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Dispatch table entry index=%d, (bridge module %d, function %d)",
+ __FUNCTION__,
+ ui32DispatchTableEntryIndex, psBridgePackageKM->ui32BridgeID, psBridgePackageKM->ui32FunctionID));
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: %s",
+ __FUNCTION__,
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pszIOCName));
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui32CallCount++;
+ g_BridgeGlobalStats.ui32IOCTLCount++;
+#endif
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ /* Acquire default global bridge lock if calling module has no independent lock */
+ OSAcquireBridgeLock();
+
+ /* Request for global bridge buffers */
+ OSGetGlobalBridgeBuffers(&psBridgeIn,
+ &psBridgeOut);
+ }
+ else
+#endif /* PVRSRV_USE_BRIDGE_LOCK */
+ {
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ OSLockAcquire(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+ }
+#endif
+#if !defined(INTEGRITY_OS)
+ /* try to acquire a bridge buffer from the pool */
+
+ err = PVRSRVPoolGet(psPVRSRVData->psBridgeBufferPool,
+ &hBridgeBufferPoolToken,
+ &psBridgeIn);
+
+ if(err != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to get bridge buffer from global pool"));
+ goto unlock_and_return_error;
+ }
+
+ psBridgeOut = ((IMG_BYTE *) psBridgeIn) + PVRSRV_MAX_BRIDGE_IN_SIZE;
+#endif
+ }
+
+#if defined(DEBUG_BRIDGE_KM)
+ ui64TimeStart = OSClockns64();
+#endif
+
+ if (psBridgePackageKM->ui32InBufferSize > PVRSRV_MAX_BRIDGE_IN_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge input buffer too small "
+ "(data size %u, buffer size %u)!", __FUNCTION__,
+ psBridgePackageKM->ui32InBufferSize, PVRSRV_MAX_BRIDGE_IN_SIZE));
+ err = PVRSRV_ERROR_BRIDGE_ERANGE;
+ goto unlock_and_return_error;
+ }
+
+#if !defined(INTEGRITY_OS)
+ if (psBridgePackageKM->ui32OutBufferSize > PVRSRV_MAX_BRIDGE_OUT_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Bridge output buffer too small "
+ "(data size %u, buffer size %u)!", __FUNCTION__,
+ psBridgePackageKM->ui32OutBufferSize, PVRSRV_MAX_BRIDGE_OUT_SIZE));
+ err = PVRSRV_ERROR_BRIDGE_ERANGE;
+ goto unlock_and_return_error;
+ }
+
+ if((CopyFromUserWrapper (psConnection,
+ ui32DispatchTableEntryIndex,
+ psBridgeIn,
+ psBridgePackageKM->pvParamIn,
+ psBridgePackageKM->ui32InBufferSize) != PVRSRV_OK)
+#if defined __QNXNTO__
+/* For Neutrino, the output bridge buffer acts as an input as well */
+ || (CopyFromUserWrapper(psConnection,
+ ui32DispatchTableEntryIndex,
+ psBridgeOut,
+ (void *)((uintptr_t)psBridgePackageKM->pvParamIn + psBridgePackageKM->ui32InBufferSize),
+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+#endif
+ ) /* end of if-condition */
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: CopyFromUserWrapper returned an error!", __FUNCTION__));
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+#else
+ psBridgeIn = psBridgePackageKM->pvParamIn;
+ psBridgeOut = psBridgePackageKM->pvParamOut;
+#endif
+
+ pfBridgeHandler =
+ (BridgeWrapperFunction)g_BridgeDispatchTable[ui32DispatchTableEntryIndex].pfFunction;
+
+ if (pfBridgeHandler == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32DispatchTableEntry = %d is not a registered function!",
+ __FUNCTION__, ui32DispatchTableEntry));
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+
+ /* pfBridgeHandler functions do not fail and return an IMG_INT.
+ * The value returned is either 0 or PVRSRV_OK (0).
+ * In the event this changes an error may be +ve or -ve,
+ * so try to return something consistent here.
+ */
+ if (0 != pfBridgeHandler(ui32DispatchTableEntryIndex,
+ psBridgeIn,
+ psBridgeOut,
+ psConnection)
+ )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: pfBridgeHandler returned an error", __FUNCTION__));
+ err = PVRSRV_ERROR_BRIDGE_EPERM;
+ goto unlock_and_return_error;
+ }
+
+ /*
+ This should always be true as a.t.m. all bridge calls have to
+ return an error message, but this could change so we do this
+ check to be safe.
+ */
+ if (psBridgePackageKM->ui32OutBufferSize > 0)
+ {
+#if !defined(INTEGRITY_OS)
+ if (CopyToUserWrapper (psConnection,
+ ui32DispatchTableEntryIndex,
+ psBridgePackageKM->pvParamOut,
+ psBridgeOut,
+ psBridgePackageKM->ui32OutBufferSize) != PVRSRV_OK)
+ {
+ err = PVRSRV_ERROR_BRIDGE_EFAULT;
+ goto unlock_and_return_error;
+ }
+#endif
+ }
+
+#if defined(DEBUG_BRIDGE_KM)
+ ui64TimeEnd = OSClockns64();
+
+ ui64TimeDiff = ui64TimeEnd - ui64TimeStart;
+
+ /* if there is no lock held then acquire the stats lock to
+ * ensure the calculations are done safely
+ */
+ if(!g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ BridgeGlobalStatsLock();
+ }
+
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64TotalTimeNS += ui64TimeDiff;
+
+ if(ui64TimeDiff > g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS)
+ {
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].ui64MaxTimeNS = ui64TimeDiff;
+ }
+
+ if(!g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ BridgeGlobalStatsUnlock();
+ }
+#endif
+
+unlock_and_return_error:
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock == NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ OSReleaseBridgeLock();
+ }
+ else
+#endif
+ {
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ if (g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock != NULL &&
+ g_BridgeDispatchTable[ui32DispatchTableEntryIndex].bUseLock)
+ {
+ OSLockRelease(g_BridgeDispatchTable[ui32DispatchTableEntryIndex].hBridgeLock);
+ }
+#endif
+
+#if !defined(INTEGRITY_OS)
+ if (hBridgeBufferPoolToken != NULL)
+ {
+ err = PVRSRVPoolPut(psPVRSRVData->psBridgeBufferPool,
+ hBridgeBufferPoolToken);
+
+ if(err != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to return bridge buffer to global pool"));
+ }
+ }
+#endif
+ }
+
+return_error:
+ if (err)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: returning (err = %d)", __FUNCTION__, err));
+ }
+ /* ignore transport layer bridge to avoid HTB flooding */
+ if (psBridgePackageKM->ui32BridgeID != PVRSRV_BRIDGE_PVRTL)
+ {
+ if (err)
+ {
+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL_ERR, ui32Timestamp,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID, err);
+ }
+ else
+ {
+ HTBLOGK(HTB_SF_BRG_BRIDGE_CALL, ui32Timestamp,
+ psBridgePackageKM->ui32BridgeID,
+ psBridgePackageKM->ui32FunctionID);
+ }
+ }
+
+ return err;
+}
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid, IMG_UINT32 ui32ArrSize, IMG_BOOL bAllProcessStats, IMG_UINT32 *pui32MemStatArray)
+{
+#if !defined(__QNXNTO__)
+ return PVRSRVFindProcessMemStats(pid,
+ ui32ArrSize,
+ bAllProcessStats,
+ pui32MemStatArray);
+#else
+ PVR_DPF((PVR_DBG_ERROR, "This functionality is not yet implemented for this platform"));
+
+ return PVRSRV_ERROR_NOT_SUPPORTED;
+#endif
+
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/srvcore.h b/drivers/gpu/drm/img-rogue/1.10/srvcore.h
new file mode 100644
index 00000000000000..c3fa14f75c9bde
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/srvcore.h
@@ -0,0 +1,213 @@
+/**************************************************************************/ /*!
+@File
+@Title PVR Bridge Functionality
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header for the PVR Bridge code
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef __BRIDGED_PVR_BRIDGE_H__
+#define __BRIDGED_PVR_BRIDGE_H__
+
+#include "lock_types.h"
+#include "connection_server.h"
+#include "pvr_debug.h"
+
+#include "pvr_bridge.h"
+#if defined(SUPPORT_RGX)
+#include "rgx_bridge.h"
+#endif
+
+PVRSRV_ERROR
+CopyFromUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void *pvDest,
+ void __user *pvSrc,
+ IMG_UINT32 ui32Size);
+PVRSRV_ERROR
+CopyToUserWrapper(CONNECTION_DATA *psConnection,
+ IMG_UINT32 ui32DispatchTableEntry,
+ void __user *pvDest,
+ void *pvSrc,
+ IMG_UINT32 ui32Size);
+
+IMG_INT
+DummyBW(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection);
+
+typedef IMG_INT (*BridgeWrapperFunction)(IMG_UINT32 ui32DispatchTableEntry,
+ void *psBridgeIn,
+ void *psBridgeOut,
+ CONNECTION_DATA *psConnection);
+
+typedef struct _PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY
+{
+ BridgeWrapperFunction pfFunction; /*!< The wrapper function that validates the ioctl
+ arguments before calling into srvkm proper */
+ POS_LOCK hBridgeLock; /*!< The bridge lock which needs to be acquired
+ before calling the above wrapper */
+ IMG_BOOL bUseLock; /*!< Specify whether to use a bridge lock at all */
+#if defined(DEBUG_BRIDGE_KM)
+ const IMG_CHAR *pszIOCName; /*!< Name of the ioctl: e.g. "PVRSRV_BRIDGE_CONNECT_SERVICES" */
+ const IMG_CHAR *pszFunctionName; /*!< Name of the wrapper function: e.g. "PVRSRVConnectBW" */
+ const IMG_CHAR *pszBridgeLockName; /*!< Name of bridge lock which will be acquired */
+ IMG_UINT32 ui32CallCount; /*!< The total number of times the ioctl has been called */
+ IMG_UINT32 ui32CopyFromUserTotalBytes; /*!< The total number of bytes copied from
+ userspace within this ioctl */
+ IMG_UINT32 ui32CopyToUserTotalBytes; /*!< The total number of bytes copied from
+ userspace within this ioctl */
+ IMG_UINT64 ui64TotalTimeNS; /*!< The total amount of time spent in this bridge function */
+ IMG_UINT64 ui64MaxTimeNS; /*!< The maximum amount of time for a single call to this bridge function */
+#endif
+}PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY;
+
+#if defined(SUPPORT_RGX)
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_DISPATCH_LAST+1)
+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_RGX_LAST+1)
+#else
+ #define BRIDGE_DISPATCH_TABLE_ENTRY_COUNT (PVRSRV_BRIDGE_DISPATCH_LAST+1)
+ #define BRIDGE_DISPATCH_TABLE_START_ENTRY_COUNT (PVRSRV_BRIDGE_LAST+1)
+#endif
+
+extern PVRSRV_BRIDGE_DISPATCH_TABLE_ENTRY g_BridgeDispatchTable[BRIDGE_DISPATCH_TABLE_ENTRY_COUNT];
+
+void BridgeDispatchTableStartOffsetsInit(void);
+
+void
+_SetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index,
+ const IMG_CHAR *pszIOCName,
+ BridgeWrapperFunction pfFunction,
+ const IMG_CHAR *pszFunctionName,
+ POS_LOCK hBridgeLock,
+ const IMG_CHAR* pszBridgeLockName,
+ IMG_BOOL bUseLock );
+void
+UnsetDispatchTableEntry(IMG_UINT32 ui32BridgeGroup,
+ IMG_UINT32 ui32Index);
+
+
+/* PRQA S 0884,3410 2*/ /* macro relies on the lack of brackets */
+#define SetDispatchTableEntry(ui32BridgeGroup, ui32Index, pfFunction,\
+ hBridgeLock, bUseLock) \
+ _SetDispatchTableEntry(ui32BridgeGroup, ui32Index, #ui32Index, (BridgeWrapperFunction)pfFunction, #pfFunction,\
+ (POS_LOCK)hBridgeLock, #hBridgeLock, bUseLock )
+
+#define DISPATCH_TABLE_GAP_THRESHOLD 5
+
+
+#if defined(DEBUG_BRIDGE_KM)
+typedef struct _PVRSRV_BRIDGE_GLOBAL_STATS
+{
+ IMG_UINT32 ui32IOCTLCount;
+ IMG_UINT32 ui32TotalCopyFromUserBytes;
+ IMG_UINT32 ui32TotalCopyToUserBytes;
+} PVRSRV_BRIDGE_GLOBAL_STATS;
+
+void BridgeGlobalStatsLock(void);
+void BridgeGlobalStatsUnlock(void);
+
+/* OS specific code may want to report the stats held here and within the
+ * BRIDGE_DISPATCH_TABLE_ENTRYs (E.g. on Linux we report these via a
+ * debugfs entry /sys/kernel/debug/pvr/bridge_stats) */
+extern PVRSRV_BRIDGE_GLOBAL_STATS g_BridgeGlobalStats;
+#endif
+
+PVRSRV_ERROR BridgeInit(void);
+void BridgeDeinit(void);
+
+PVRSRV_ERROR BridgedDispatchKM(CONNECTION_DATA * psConnection,
+ PVRSRV_BRIDGE_PACKAGE * psBridgePackageKM);
+
+
+PVRSRV_ERROR
+PVRSRVConnectKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32Flags,
+ IMG_UINT32 ui32ClientBuildOptions,
+ IMG_UINT32 ui32ClientDDKVersion,
+ IMG_UINT32 ui32ClientDDKBuild,
+ IMG_UINT8 *pui8KernelArch,
+ IMG_UINT32 *ui32CapabilityFlags,
+ IMG_UINT32 *ui32PVRBridges,
+ IMG_UINT32 *ui32RGXBridges);
+
+PVRSRV_ERROR
+PVRSRVDisconnectKM(void);
+
+PVRSRV_ERROR
+PVRSRVAcquireGlobalEventObjectKM(IMG_HANDLE *phGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVReleaseGlobalEventObjectKM(IMG_HANDLE hGlobalEventObject);
+
+PVRSRV_ERROR
+PVRSRVDumpDebugInfoKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 ui32VerbLevel);
+
+PVRSRV_ERROR
+PVRSRVGetDevClockSpeedKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_PUINT32 pui32RGXClockSpeed);
+
+PVRSRV_ERROR
+PVRSRVHWOpTimeoutKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR PVRSRVAlignmentCheckKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDeviceNode,
+ IMG_UINT32 ui32FWAlignChecksSize,
+ IMG_UINT32 aui32FWAlignChecks[]);
+
+PVRSRV_ERROR PVRSRVGetDeviceStatusKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDeviceNode,
+ IMG_UINT32 *pui32DeviceStatus);
+
+PVRSRV_ERROR PVRSRVFindProcessMemStatsKM(IMG_PID pid,
+ IMG_UINT32 ui32ArrSize,
+ IMG_BOOL bAllProcessStats,
+ IMG_UINT32 *ui32MemoryStats);
+
+#endif /* __BRIDGED_PVR_BRIDGE_H__ */
+
+/******************************************************************************
+ End of file (srvcore.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/srvinit.h b/drivers/gpu/drm/img-rogue/1.10/srvinit.h
new file mode 100644
index 00000000000000..5fe3ee2233c68a
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/srvinit.h
@@ -0,0 +1,68 @@
+/*************************************************************************/ /*!
+@File
+@Title Initialisation server internal header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the connections between the various parts of the
+ initialisation server.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SRVINIT_H__
+#define __SRVINIT_H__
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "device_connection.h"
+#include "device.h"
+
+#if defined(SUPPORT_RGX)
+PVRSRV_ERROR RGXInit(PVRSRV_DEVICE_NODE *psDeviceNode);
+#endif
+
+#if defined (__cplusplus)
+}
+#endif
+#endif /* __SRVINIT_H__ */
+
+/******************************************************************************
+ End of file (srvinit.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/srvkm.h b/drivers/gpu/drm/img-rogue/1.10/srvkm.h
new file mode 100644
index 00000000000000..1ce8218d6b3486
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/srvkm.h
@@ -0,0 +1,141 @@
+/**************************************************************************/ /*!
+@File
+@Title Services kernel module internal header file
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef SRVKM_H
+#define SRVKM_H
+
+#include "servicesext.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+struct _PVRSRV_DEVICE_NODE_;
+
+/*************************************************************************/ /*!
+@Function PVRSRVDriverInit
+@Description Performs one time initialisation of Services.
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV PVRSRVDriverInit(void);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDriverInit
+@Description Performs one time de-initialisation of Services.
+@Return void
+*/ /**************************************************************************/
+void IMG_CALLCONV PVRSRVDriverDeInit(void);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceCreate
+@Description Creates a PVR Services device node for an OS native device.
+@Input pvOSDevice OS native device
+@Input i32UMIdentifier A unique identifier which helps recognize this
+ Device in the UM space.
+@Output ppsDeviceNode Points to the new device node on success
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceCreate(void *pvOSDevice, IMG_INT32 i32UMIdentifier,
+ struct _PVRSRV_DEVICE_NODE_ **ppsDeviceNode);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceInitialise
+@Description Initialises the given device, created by PVRSRVDeviceCreate, so
+ that's in a functional state ready to be used.
+@Input psDeviceNode Device node of the device to be initialised
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR PVRSRVDeviceInitialise(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/*************************************************************************/ /*!
+@Function PVRSRVDeviceDestroy
+@Description Destroys a PVR Services device node.
+@Input psDeviceNode Device node to destroy
+@Return PVRSRV_ERROR PVRSRV_OK on success and an error otherwise
+*/ /**************************************************************************/
+PVRSRV_ERROR IMG_CALLCONV
+PVRSRVDeviceDestroy(struct _PVRSRV_DEVICE_NODE_ *psDeviceNode);
+
+/******************
+HIGHER LEVEL MACROS
+*******************/
+
+/*----------------------------------------------------------------------------
+Repeats the body of the loop for a certain minimum time, or until the body
+exits by its own means (break, return, goto, etc.)
+
+Example of usage:
+
+LOOP_UNTIL_TIMEOUT(MAX_HW_TIME_US)
+{
+ if(psQueueInfo->ui32ReadOffset == psQueueInfo->ui32WriteOffset)
+ {
+ bTimeout = IMG_FALSE;
+ break;
+ }
+
+ OSWaitus(MAX_HW_TIME_US/WAIT_TRY_COUNT);
+} END_LOOP_UNTIL_TIMEOUT();
+
+-----------------------------------------------------------------------------*/
+
+/* uiNotLastLoop will remain at 1 until the timeout has expired, at which time
+ * it will be decremented and the loop executed one final time. This is necessary
+ * when preemption is enabled.
+ */
+/* PRQA S 3411,3431 12 */ /* critical format, leave alone */
+#define LOOP_UNTIL_TIMEOUT(TIMEOUT) \
+{\
+ IMG_UINT32 uiOffset, uiStart, uiCurrent; \
+ IMG_INT32 iNotLastLoop; \
+ for(uiOffset = 0, uiStart = OSClockus(), uiCurrent = uiStart + 1, iNotLastLoop = 1;\
+ ((uiCurrent - uiStart + uiOffset) < (TIMEOUT)) || iNotLastLoop--; \
+ uiCurrent = OSClockus(), \
+ uiOffset = uiCurrent < uiStart ? IMG_UINT32_MAX - uiStart : uiOffset, \
+ uiStart = uiCurrent < uiStart ? 0 : uiStart)
+
+#define END_LOOP_UNTIL_TIMEOUT() \
+}
+
+#endif /* SRVKM_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync.c b/drivers/gpu/drm/img-rogue/1.10/sync.c
new file mode 100644
index 00000000000000..a6667d7b0846f9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync.c
@@ -0,0 +1,2090 @@
+/*************************************************************************/ /*!
+@File
+@Title Services synchronisation interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements client side code for services synchronisation
+ interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "client_sync_bridge.h"
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#include "client_synctracking_bridge.h"
+#endif
+#include "pvr_bridge.h"
+#include "allocmem.h"
+#include "osfunc.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "lock.h"
+#include "log2.h"
+/* FIXME */
+#if defined(__KERNEL__)
+#include "pvrsrv.h"
+#endif
+
+
+#define SYNC_BLOCK_LIST_CHUNCK_SIZE 10
+
+/*
+ This defines the maximum amount of synchronisation memory
+ that can be allocated per SyncPrim context.
+ In reality this number is meaningless as we would run out
+ of synchronisation memory before we reach this limit, but
+ we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_MEM (4 * 1024 * 1024)
+
+typedef struct _SYNC_BLOCK_LIST_
+{
+ IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */
+ IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */
+ SYNC_PRIM_BLOCK **papsSyncPrimBlock; /*!< Array of syncprim blocks */
+} SYNC_BLOCK_LIST;
+
+typedef struct _SYNC_OP_COOKIE_
+{
+ IMG_UINT32 ui32SyncCount;
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 ui32ServerSyncCount;
+ IMG_BOOL bHaveServerSync;
+ IMG_HANDLE hBridge;
+ IMG_HANDLE hServerCookie;
+
+ SYNC_BLOCK_LIST *psSyncBlockList;
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim;
+ /*
+ Client sync(s) info.
+ If this changes update the calculation of ui32ClientAllocSize
+ */
+ IMG_UINT32 *paui32SyncBlockIndex;
+ IMG_UINT32 *paui32Index;
+ IMG_UINT32 *paui32Flags;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 *paui32UpdateValue;
+
+ /*
+ Server sync(s) info
+ If this changes update the calculation of ui32ServerAllocSize
+ */
+ IMG_HANDLE *pahServerSync;
+ IMG_UINT32 *paui32ServerFlags;
+} SYNC_OP_COOKIE;
+
+/* forward declaration */
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value);
+
+/*
+ Internal interfaces for management of SYNC_PRIM_CONTEXT
+ */
+static void
+_SyncPrimContextUnref(SYNC_PRIM_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextUnref context already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+ {
+ /* SyncPrimContextDestroy only when no longer referenced */
+ RA_Delete(psContext->psSpanRA);
+ RA_Delete(psContext->psSubAllocRA);
+ OSFreeMem(psContext);
+ }
+}
+
+static void
+_SyncPrimContextRef(SYNC_PRIM_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimContextRef context use after free"));
+ }
+ else
+ {
+ OSAtomicIncrement(&psContext->hRefCount);
+ }
+}
+
+/*
+ Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+AllocSyncPrimitiveBlock(SYNC_PRIM_CONTEXT *psContext,
+ SYNC_PRIM_BLOCK **ppsSyncBlock)
+{
+ SYNC_PRIM_BLOCK *psSyncBlk;
+ IMG_HANDLE hSyncPMR;
+ IMG_HANDLE hSyncImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ PVRSRV_ERROR eError;
+
+ psSyncBlk = OSAllocMem(sizeof(SYNC_PRIM_BLOCK));
+ if (psSyncBlk == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+ psSyncBlk->psContext = psContext;
+
+ /* Allocate sync prim block */
+ eError = BridgeAllocSyncPrimitiveBlock(psContext->hDevConnection,
+ &psSyncBlk->hServerSyncPrimBlock,
+ &psSyncBlk->ui32FirmwareAddr,
+ &psSyncBlk->ui32SyncBlockSize,
+ &hSyncPMR);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_blockalloc;
+ }
+
+ /* Make it mappable by the client */
+ eError = DevmemMakeLocalImportHandle(psContext->hDevConnection,
+ hSyncPMR,
+ &hSyncImportHandle);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_export;
+ }
+
+ /* Get CPU mapping of the memory block */
+ eError = DevmemLocalImport(psContext->hDevConnection,
+ hSyncImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE,
+ &psSyncBlk->hMemDesc,
+ &uiImportSize,
+ "SyncPrimitiveBlock");
+
+ /*
+ Regardless of success or failure we "undo" the export
+ */
+ DevmemUnmakeLocalImportHandle(psContext->hDevConnection,
+ hSyncImportHandle);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_import;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+ (void **) &psSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_cpuvaddr;
+ }
+
+ *ppsSyncBlock = psSyncBlk;
+ return PVRSRV_OK;
+
+ fail_cpuvaddr:
+ DevmemFree(psSyncBlk->hMemDesc);
+ fail_import:
+ fail_export:
+ BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+ psSyncBlk->hServerSyncPrimBlock);
+ fail_blockalloc:
+ OSFreeMem(psSyncBlk);
+ fail_alloc:
+ return eError;
+}
+
+static void
+FreeSyncPrimitiveBlock(SYNC_PRIM_BLOCK *psSyncBlk)
+{
+ SYNC_PRIM_CONTEXT *psContext = psSyncBlk->psContext;
+
+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+ DevmemFree(psSyncBlk->hMemDesc);
+ BridgeFreeSyncPrimitiveBlock(psContext->hDevConnection,
+ psSyncBlk->hServerSyncPrimBlock);
+ OSFreeMem(psSyncBlk);
+}
+
+static PVRSRV_ERROR
+SyncPrimBlockImport(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ SYNC_PRIM_CONTEXT *psContext = hArena;
+ SYNC_PRIM_BLOCK *psSyncBlock = NULL;
+ RA_LENGTH_T uiSpanSize;
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uFlags);
+
+ /* Check we've not be called with an unexpected size */
+ if (!hArena || sizeof(IMG_UINT32) != uSize)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /*
+ Ensure the synprim context doesn't go away while we have sync blocks
+ attached to it
+ */
+ _SyncPrimContextRef(psContext);
+
+ /* Allocate the block of memory */
+ eError = AllocSyncPrimitiveBlock(psContext, &psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate syncprim block (%d)", eError));
+ goto fail_syncblockalloc;
+ }
+
+ /* Allocate a span for it */
+ eError = RA_Alloc(psContext->psSpanRA,
+ psSyncBlock->ui32SyncBlockSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ psSyncBlock->ui32SyncBlockSize,
+ pszAnnotation,
+ &psSyncBlock->uiSpanBase,
+ &uiSpanSize,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_spanalloc;
+ }
+
+ /*
+ There is no reason the span RA should return an allocation larger
+ then we request
+ */
+ PVR_ASSERT(uiSpanSize == psSyncBlock->ui32SyncBlockSize);
+
+ *puiBase = psSyncBlock->uiSpanBase;
+ *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+ *phImport = psSyncBlock;
+ return PVRSRV_OK;
+
+ fail_spanalloc:
+ FreeSyncPrimitiveBlock(psSyncBlock);
+ fail_syncblockalloc:
+ _SyncPrimContextUnref(psContext);
+ e0:
+ return eError;
+}
+
+static void
+SyncPrimBlockUnimport(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ SYNC_PRIM_CONTEXT *psContext = hArena;
+ SYNC_PRIM_BLOCK *psSyncBlock = hImport;
+
+ if (!psContext || !psSyncBlock || uiBase != psSyncBlock->uiSpanBase)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input params", __FUNCTION__));
+ return;
+ }
+
+ /* Free the span this import is using */
+ RA_Free(psContext->psSpanRA, uiBase);
+
+ /* Free the syncpim block */
+ FreeSyncPrimitiveBlock(psSyncBlock);
+
+ /* Drop our reference to the syncprim context */
+ _SyncPrimContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 SyncPrimGetOffset(SYNC_PRIM *psSyncInt)
+{
+ IMG_UINT64 ui64Temp;
+
+ PVR_ASSERT(psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL);
+
+ /* FIXME: Subtracting a 64-bit address from another and then implicit
+ * cast to 32-bit number. Need to review all call sequences that use this
+ * function, added explicit casting for now.
+ */
+ ui64Temp = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+ PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+ return (IMG_UINT32)ui64Temp;
+}
+
+static void SyncPrimGetCPULinAddr(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+
+ psSyncInt->sCommon.pui32LinAddr = psSyncBlock->pui32LinAddr +
+ (SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32));
+}
+
+static void SyncPrimLocalFree(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ {
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hConn =
+ psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(PVRSRVIsBridgeEnabled(hConn, PVRSRV_BRIDGE_SYNCTRACKING))
+ {
+ if(psSyncInt->u.sLocal.hRecord)
+ {
+ /* remove this sync record */
+ eError = BridgeSyncRecordRemoveByHandle(hConn,
+ psSyncInt->u.sLocal.hRecord);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to remove SyncRecord", __FUNCTION__));
+ }
+ }
+ }
+ else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+ {
+ IMG_UINT32 ui32FWAddr = psSyncBlock->ui32FirmwareAddr +
+ SyncPrimGetOffset(psSyncInt);
+
+ eError = BridgeSyncFreeEvent(hConn, ui32FWAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error:"
+ " %d", eError));
+ }
+ }
+ }
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_POISON_VALUE);
+#else
+ /* reset the sync prim value as it is freed.
+ * this guarantees the client sync allocated to the client will
+ * have a value of zero and the client does not need to
+ * explicitly initialise the sync value to zero.
+ * the allocation of the backing memory for the sync prim block
+ * is done with ZERO_ON_ALLOC so the memory is initially all zero.
+ */
+ (void) _SyncPrimSetValue(psSyncInt, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+ RA_Free(psContext->psSubAllocRA, psSyncInt->u.sLocal.uiSpanAddr);
+ OSFreeMem(psSyncInt);
+ _SyncPrimContextUnref(psContext);
+}
+
+static void SyncPrimServerFree(SYNC_PRIM *psSyncInt)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeServerSyncFree(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimServerFree failed"));
+ }
+ OSFreeMem(psSyncInt);
+}
+
+static void SyncPrimLocalUnref(SYNC_PRIM *psSyncInt)
+{
+ if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalUnref sync already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psSyncInt->u.sLocal.hRefCount))
+ {
+ SyncPrimLocalFree(psSyncInt);
+ }
+}
+
+static void SyncPrimLocalRef(SYNC_PRIM *psSyncInt)
+{
+ if (!OSAtomicRead(&psSyncInt->u.sLocal.hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimLocalRef sync use after free"));
+ }
+ else
+ {
+ OSAtomicIncrement(&psSyncInt->u.sLocal.hRefCount);
+ }
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrLocal(SYNC_PRIM *psSyncInt)
+{
+ SYNC_PRIM_BLOCK *psSyncBlock;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ return psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psSyncInt);
+}
+
+static IMG_UINT32 SyncPrimGetFirmwareAddrServer(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sServer.ui32FirmwareAddr;
+}
+
+#if !defined(__KERNEL__)
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleLocal(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sLocal.psSyncBlock->psContext->hDevConnection;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandleServer(SYNC_PRIM *psSyncInt)
+{
+ return psSyncInt->u.sServer.hBridge;
+}
+
+static SYNC_BRIDGE_HANDLE _SyncPrimGetBridgeHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ return _SyncPrimGetBridgeHandleLocal(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ return _SyncPrimGetBridgeHandleServer(psSyncInt);
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "_SyncPrimGetBridgeHandle: Invalid sync type"));
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ return 0;
+ }
+}
+#endif
+
+/*
+ Internal interfaces for management of syncprim block lists
+ */
+static SYNC_BLOCK_LIST *_SyncPrimBlockListCreate(void)
+{
+ SYNC_BLOCK_LIST *psBlockList;
+
+ psBlockList = OSAllocMem(sizeof(SYNC_BLOCK_LIST));
+ if (!psBlockList)
+ {
+ return NULL;
+ }
+
+ psBlockList->ui32BlockCount = 0;
+ psBlockList->ui32BlockListSize = SYNC_BLOCK_LIST_CHUNCK_SIZE;
+
+ psBlockList->papsSyncPrimBlock = OSAllocZMem(sizeof(SYNC_PRIM_BLOCK *)
+ * SYNC_BLOCK_LIST_CHUNCK_SIZE);
+ if (!psBlockList->papsSyncPrimBlock)
+ {
+ OSFreeMem(psBlockList);
+ return NULL;
+ }
+
+ return psBlockList;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListAdd(SYNC_BLOCK_LIST *psBlockList,
+ SYNC_PRIM_BLOCK *psSyncPrimBlock)
+{
+ IMG_UINT32 i;
+
+ /* Check the context isn't already on the list */
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+ {
+ return PVRSRV_OK;
+ }
+ }
+
+ /* Check we have space for a new item */
+ if (psBlockList->ui32BlockCount == psBlockList->ui32BlockListSize)
+ {
+ SYNC_PRIM_BLOCK **papsNewSyncPrimBlock;
+
+ papsNewSyncPrimBlock = OSAllocMem(sizeof(SYNC_PRIM_BLOCK *) *
+ (psBlockList->ui32BlockListSize +
+ SYNC_BLOCK_LIST_CHUNCK_SIZE));
+ if (!papsNewSyncPrimBlock)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ OSCachedMemCopy(papsNewSyncPrimBlock,
+ psBlockList->papsSyncPrimBlock,
+ sizeof(SYNC_PRIM_CONTEXT *) *
+ psBlockList->ui32BlockListSize);
+
+ OSFreeMem(psBlockList->papsSyncPrimBlock);
+
+ psBlockList->papsSyncPrimBlock = papsNewSyncPrimBlock;
+ psBlockList->ui32BlockListSize += SYNC_BLOCK_LIST_CHUNCK_SIZE;
+ }
+
+ /* Add the context to the list */
+ psBlockList->papsSyncPrimBlock[psBlockList->ui32BlockCount++] = psSyncPrimBlock;
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListBlockToIndex(SYNC_BLOCK_LIST *psBlockList,
+ SYNC_PRIM_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 *pui32Index)
+{
+ IMG_UINT32 i;
+
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ if (psBlockList->papsSyncPrimBlock[i] == psSyncPrimBlock)
+ {
+ *pui32Index = i;
+ return PVRSRV_OK;
+ }
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+}
+
+static PVRSRV_ERROR _SyncPrimBlockListHandleArrayCreate(SYNC_BLOCK_LIST *psBlockList,
+ IMG_UINT32 *pui32BlockHandleCount,
+ IMG_HANDLE **ppahHandleList)
+{
+ IMG_HANDLE *pahHandleList;
+ IMG_UINT32 i;
+
+ pahHandleList = OSAllocMem(sizeof(IMG_HANDLE) *
+ psBlockList->ui32BlockCount);
+ if (!pahHandleList)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (i=0;i<psBlockList->ui32BlockCount;i++)
+ {
+ pahHandleList[i] = psBlockList->papsSyncPrimBlock[i]->hServerSyncPrimBlock;
+ }
+
+ *ppahHandleList = pahHandleList;
+ *pui32BlockHandleCount = psBlockList->ui32BlockCount;
+
+ return PVRSRV_OK;
+}
+
+static void _SyncPrimBlockListHandleArrayDestroy(IMG_HANDLE *pahHandleList)
+{
+ OSFreeMem(pahHandleList);
+}
+
+static IMG_UINT32 _SyncPrimBlockListGetClientValue(SYNC_BLOCK_LIST *psBlockList,
+ IMG_UINT32 ui32BlockIndex,
+ IMG_UINT32 ui32Index)
+{
+ return *((IMG_UINT32 __force *)(psBlockList->papsSyncPrimBlock[ui32BlockIndex]->pui32LinAddr)+ui32Index);
+}
+
+static void _SyncPrimBlockListDestroy(SYNC_BLOCK_LIST *psBlockList)
+{
+ OSFreeMem(psBlockList->papsSyncPrimBlock);
+ OSFreeMem(psBlockList);
+}
+
+
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+ PVR_ASSERT(IsPower2(ui32Align));
+ return ExactLog2(ui32Align);
+}
+
+/*
+ External interfaces
+ */
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+ PSYNC_PRIM_CONTEXT *phSyncPrimContext)
+{
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ psContext = OSAllocMem(sizeof(SYNC_PRIM_CONTEXT));
+ if (psContext == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psContext->hDevConnection = hDevConnection;
+
+ OSSNPrintf(psContext->azName, SYNC_PRIM_NAME_SIZE, "Sync Prim RA-%p", psContext);
+ OSSNPrintf(psContext->azSpanName, SYNC_PRIM_NAME_SIZE, "Sync Prim span RA-%p", psContext);
+
+ /*
+ Create the RA for sub-allocations of the SynPrim's
+
+ Note:
+ The import size doesn't matter here as the server will pass
+ back the blocksize when does the import which overrides
+ what we specify here.
+ */
+
+ psContext->psSubAllocRA = RA_Create(psContext->azName,
+ /* Params for imports */
+ _Log2(sizeof(IMG_UINT32)),
+ RA_LOCKCLASS_2,
+ SyncPrimBlockImport,
+ SyncPrimBlockUnimport,
+ psContext,
+ IMG_FALSE);
+ if (psContext->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_suballoc;
+ }
+
+ /*
+ Create the span-management RA
+
+ The RA requires that we work with linear spans. For our use
+ here we don't require this behaviour as we're always working
+ within offsets of blocks (imports). However, we need to keep
+ the RA happy so we create the "span" management RA which
+ ensures that all are imports are added to the RA in a linear
+ fashion
+ */
+ psContext->psSpanRA = RA_Create(psContext->azSpanName,
+ /* Params for imports */
+ 0,
+ RA_LOCKCLASS_1,
+ NULL,
+ NULL,
+ NULL,
+ IMG_FALSE);
+ if (psContext->psSpanRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span;
+ }
+
+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_MEM, 0, NULL))
+ {
+ RA_Delete(psContext->psSpanRA);
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_span;
+ }
+
+ OSAtomicWrite(&psContext->hRefCount, 1);
+
+ *phSyncPrimContext = psContext;
+ return PVRSRV_OK;
+ fail_span:
+ RA_Delete(psContext->psSubAllocRA);
+ fail_suballoc:
+ OSFreeMem(psContext);
+ fail_alloc:
+ return eError;
+}
+
+IMG_INTERNAL void SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext)
+{
+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+ if (1 != OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s attempted with active references, may be the result of a race", __FUNCTION__));
+ }
+#if defined(PVRSRV_FORCE_UNLOAD_IF_BAD_STATE)
+#if defined(__KERNEL__)
+ if (PVRSRVGetPVRSRVData()->eServicesState != PVRSRV_SERVICES_STATE_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Forcing context destruction due to bad driver state.", __FUNCTION__));
+ OSAtomicWrite(&psContext->hRefCount, 1);
+ }
+#endif
+#endif
+ _SyncPrimContextUnref(psContext);
+}
+
+static PVRSRV_ERROR _SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName,
+ IMG_BOOL bServerSync)
+{
+ SYNC_PRIM_CONTEXT *psContext = hSyncPrimContext;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM *psNewSync;
+ PVRSRV_ERROR eError;
+ RA_BASE_T uiSpanAddr;
+
+ if (!hSyncPrimContext)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid context", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psNewSync = OSAllocMem(sizeof(SYNC_PRIM));
+ if (psNewSync == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = RA_Alloc(psContext->psSubAllocRA,
+ sizeof(IMG_UINT32),
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ sizeof(IMG_UINT32),
+ "Sync_Prim",
+ &uiSpanAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psSyncBlock);
+ if (PVRSRV_OK != eError)
+ {
+ goto fail_raalloc;
+ }
+ psNewSync->eType = SYNC_PRIM_TYPE_LOCAL;
+ OSAtomicWrite(&psNewSync->u.sLocal.hRefCount, 1);
+ psNewSync->u.sLocal.uiSpanAddr = uiSpanAddr;
+ psNewSync->u.sLocal.psSyncBlock = psSyncBlock;
+ SyncPrimGetCPULinAddr(psNewSync);
+ *ppsSync = &psNewSync->sCommon;
+ _SyncPrimContextRef(psContext);
+#if defined(PVRSRV_ENABLE_SYNC_POISONING)
+ (void) _SyncPrimSetValue(psNewSync, LOCAL_SYNC_PRIM_RESET_VALUE);
+#endif
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(PVRSRVIsBridgeEnabled(psSyncBlock->psContext->hDevConnection, PVRSRV_BRIDGE_SYNCTRACKING))
+ {
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+ size_t uiSize;
+
+ if(pszClassName)
+ {
+ uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+ /* Copy the class name annotation into a fixed-size array */
+ OSCachedMemCopy(szClassName, pszClassName, uiSize);
+ if (uiSize == SYNC_MAX_CLASS_NAME_LEN)
+ szClassName[SYNC_MAX_CLASS_NAME_LEN-1] = '\0';
+ else
+ szClassName[uiSize++] = '\0';
+ }
+ else
+ {
+ /* No class name annotation */
+ uiSize = 0;
+ szClassName[0] = '\0';
+ }
+
+ /* record this sync */
+ eError = BridgeSyncRecordAdd(
+ psSyncBlock->psContext->hDevConnection,
+ &psNewSync->u.sLocal.hRecord,
+ psSyncBlock->hServerSyncPrimBlock,
+ psSyncBlock->ui32FirmwareAddr,
+ SyncPrimGetOffset(psNewSync),
+ bServerSync,
+ uiSize,
+ szClassName);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: failed to add SyncRecord \"%s\" (%s)",
+ __FUNCTION__,
+ szClassName,
+ PVRSRVGETERRORSTRING(eError)));
+ psNewSync->u.sLocal.hRecord = NULL;
+ }
+ }
+ else
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+ {
+ size_t uiSize;
+
+ uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+
+ if (uiSize < SYNC_MAX_CLASS_NAME_LEN)
+ uiSize++;
+ /* uiSize now reflects size used for pszClassName + NUL byte */
+
+ eError = BridgeSyncAllocEvent(hSyncPrimContext->hDevConnection,
+ bServerSync,
+ psSyncBlock->ui32FirmwareAddr + SyncPrimGetOffset(psNewSync),
+ uiSize,
+ pszClassName);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "BridgeSyncAllocEvent failed with error: %d",
+ eError));
+ }
+ }
+
+ return PVRSRV_OK;
+
+ fail_raalloc:
+ OSFreeMem(psNewSync);
+ fail_alloc:
+ return eError;
+}
+
+#if defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName)
+{
+ return _SyncPrimAlloc(hSyncPrimContext,
+ ppsSync,
+ pszClassName,
+ IMG_TRUE);
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName)
+{
+ return _SyncPrimAlloc(hSyncPrimContext,
+ ppsSync,
+ pszClassName,
+ IMG_FALSE);
+}
+
+static PVRSRV_ERROR
+_SyncPrimSetValue(SYNC_PRIM *psSyncInt, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError;
+
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimSet(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt)/sizeof(IMG_UINT32),
+ ui32Value);
+ }
+ else
+ {
+ eError = BridgeServerSyncPrimSet(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync,
+ ui32Value);
+ }
+ /* These functions don't actually fail */
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ SyncPrimLocalUnref(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ SyncPrimServerFree(psSyncInt);
+ }
+ else
+ {
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+ err_out:
+ return eError;
+}
+
+#if defined(NO_HARDWARE)
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ /* There is no check for the psSyncInt to be LOCAL as this call
+ substitutes the Firmware updating a sync and that sync could
+ be a server one */
+
+ eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+ err_out:
+ return eError;
+}
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: null sync pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimSet: Invalid sync type"));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+ eError = _SyncPrimSetValue(psSyncInt, ui32Value);
+
+#if defined(PDUMP)
+ SyncPrimPDump(psSync);
+#endif
+ err_out:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_HANDLE *phBlock,
+ IMG_UINT32 *pui32Offset)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if(!psSync || !phBlock || !pui32Offset)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimGetHandleAndOffset: invalid input pointer"));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *phBlock = psSyncInt->u.sLocal.psSyncBlock->hServerSyncPrimBlock;
+ *pui32Offset = psSyncInt->u.sLocal.uiSpanAddr - psSyncInt->u.sLocal.psSyncBlock->uiSpanBase;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: psSync not a Local sync prim (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ err_out:
+ return eError;
+}
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ *pui32FwAddr = 0;
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_out;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *pui32FwAddr = SyncPrimGetFirmwareAddrLocal(psSyncInt);
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ *pui32FwAddr = SyncPrimGetFirmwareAddrServer(psSyncInt);
+ }
+ else
+ {
+ /* Either the client has given us a bad pointer or there is an
+ * error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_out;
+ }
+
+ err_out:
+ return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL PVRSRV_ERROR SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo)
+{
+#if defined(PVRSRV_NEED_PVR_DPF)
+ SYNC_PRIM *psSyncInt;
+ PVRSRV_CLIENT_SYNC_PRIM **papsServerSync;
+ IMG_UINT32 ui32ServerSyncs = 0;
+ IMG_UINT32 *pui32UID = NULL;
+ IMG_UINT32 *pui32FWAddr = NULL;
+ IMG_UINT32 *pui32CurrentOp = NULL;
+ IMG_UINT32 *pui32NextOp = NULL;
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ papsServerSync = OSAllocMem(ui32SyncCount * sizeof(PVRSRV_CLIENT_SYNC_PRIM *));
+ if (!papsServerSync)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ for (i = 0; i < ui32SyncCount; i++)
+ {
+ psSyncInt = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync=local fw=0x%x curr=0x%04x",
+ pcszExtraInfo,
+ SyncPrimGetFirmwareAddrLocal(psSyncInt),
+ *psSyncInt->sCommon.pui32LinAddr));
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ papsServerSync[ui32ServerSyncs++] = papsSync[i];
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Invalid sync type"));
+ /*
+ Either the client has given us a bad pointer or there is an
+ error in this module
+ */
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto err_free;
+ }
+ }
+
+ if (ui32ServerSyncs > 0)
+ {
+ pui32UID = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32UID)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32FWAddr = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32FWAddr)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32CurrentOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32CurrentOp)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ pui32NextOp = OSAllocMem(ui32ServerSyncs * sizeof(IMG_UINT32));
+ if (!pui32NextOp)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto err_free;
+ }
+ eError = SyncPrimServerGetStatus(ui32ServerSyncs, papsServerSync,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimDumpSyncs: Error querying server sync status (%d)",
+ eError));
+ goto err_free;
+ }
+ for (i = 0; i < ui32ServerSyncs; i++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync=server fw=0x%x curr=0x%04x next=0x%04x id=%u%s",
+ pcszExtraInfo,
+ pui32FWAddr[i],
+ pui32CurrentOp[i],
+ pui32NextOp[i],
+ pui32UID[i],
+ (pui32NextOp[i] - pui32CurrentOp[i] == 1) ? " *" :
+ (pui32NextOp[i] - pui32CurrentOp[i] > 1) ? " **" :
+ ""));
+ }
+ }
+
+ err_free:
+ OSFreeMem(papsServerSync);
+ if (pui32UID)
+ {
+ OSFreeMem(pui32UID);
+ }
+ if (pui32FWAddr)
+ {
+ OSFreeMem(pui32FWAddr);
+ }
+ if (pui32CurrentOp)
+ {
+ OSFreeMem(pui32CurrentOp);
+ }
+ if (pui32NextOp)
+ {
+ OSFreeMem(pui32NextOp);
+ }
+ return eError;
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32SyncCount);
+ PVR_UNREFERENCED_PARAMETER(papsSync);
+ PVR_UNREFERENCED_PARAMETER(pcszExtraInfo);
+ return PVRSRV_OK;
+#endif
+}
+#endif
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+ PSYNC_OP_COOKIE *ppsCookie)
+{
+ SYNC_OP_COOKIE *psNewCookie;
+ SYNC_BLOCK_LIST *psSyncBlockList;
+ IMG_UINT32 ui32ServerSyncCount = 0;
+ IMG_UINT32 ui32ClientSyncCount = 0;
+ IMG_UINT32 ui32ServerAllocSize;
+ IMG_UINT32 ui32ClientAllocSize;
+ IMG_UINT32 ui32TotalAllocSize;
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SyncBlockCount;
+ IMG_HANDLE hBridge;
+ IMG_HANDLE *pahHandleList;
+ IMG_CHAR *pcPtr;
+ PVRSRV_ERROR eError;
+ IMG_BOOL bServerSync;
+
+ psSyncBlockList = _SyncPrimBlockListCreate();
+
+ if (!psSyncBlockList)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (bServerSync)
+ {
+ ui32ServerSyncCount++;
+ }
+ else
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+ ui32ClientSyncCount++;
+ eError = _SyncPrimBlockListAdd(psSyncBlockList, psSync->u.sLocal.psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ }
+ }
+
+ ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(IMG_HANDLE) + sizeof(IMG_UINT32));
+ ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+ ui32TotalAllocSize = sizeof(SYNC_OP_COOKIE) +
+ (sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount) +
+ ui32ServerAllocSize +
+ ui32ClientAllocSize;
+
+ psNewCookie = OSAllocMem(ui32TotalAllocSize);
+ pcPtr = (IMG_CHAR *) psNewCookie;
+
+ if (!psNewCookie)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ /* Setup the pointers */
+ pcPtr += sizeof(SYNC_OP_COOKIE);
+ psNewCookie->papsSyncPrim = (PVRSRV_CLIENT_SYNC_PRIM **) pcPtr;
+
+ pcPtr += sizeof(PVRSRV_CLIENT_SYNC_PRIM *) * ui32SyncCount;
+ psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->pahServerSync =(IMG_HANDLE *) pcPtr;
+ pcPtr += sizeof(IMG_HANDLE) * ui32ServerSyncCount;
+
+ psNewCookie->paui32ServerFlags =(IMG_UINT32 *) pcPtr;
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+ /* Check the pointer setup went ok */
+ if (!(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: cookie setup failed", __FUNCTION__));
+ eError = PVRSRV_ERROR_INTERNAL_ERROR;
+ goto e2;
+ }
+
+ psNewCookie->ui32SyncCount = ui32SyncCount;
+ psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+ psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+ psNewCookie->psSyncBlockList = psSyncBlockList;
+
+ /*
+ Get the bridge handle from the 1st sync.
+
+ Note: We assume the all syncs have been created with the same
+ services connection.
+ */
+ eError = SyncPrimIsServerSync(papsSyncPrim[0], &bServerSync);
+ if (PVRSRV_OK != eError) goto e2;
+ if (bServerSync)
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+ hBridge = psSync->u.sServer.hBridge;
+ }
+ else
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[0];
+
+ hBridge = psSync->u.sLocal.psSyncBlock->psContext->hDevConnection;
+ }
+
+ psNewCookie->hBridge = hBridge;
+
+ if (ui32ServerSyncCount)
+ {
+ psNewCookie->bHaveServerSync = IMG_TRUE;
+ }
+ else
+ {
+ psNewCookie->bHaveServerSync = IMG_FALSE;
+ }
+
+ /* Fill in the server and client sync data */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSync = (SYNC_PRIM *) papsSyncPrim[i];
+
+ eError = SyncPrimIsServerSync(papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e2;
+ if (bServerSync)
+ {
+ psNewCookie->pahServerSync[ui32ServerIndex] = psSync->u.sServer.hServerSync;
+
+ ui32ServerIndex++;
+ }
+ else
+ {
+ /* Location of sync */
+ eError = _SyncPrimBlockListBlockToIndex(psSyncBlockList,
+ psSync->u.sLocal.psSyncBlock,
+ &psNewCookie->paui32SyncBlockIndex[ui32ClientIndex]);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Workout the index to sync */
+ psNewCookie->paui32Index[ui32ClientIndex] =
+ SyncPrimGetOffset(psSync)/sizeof(IMG_UINT32);
+
+ ui32ClientIndex++;
+ }
+
+ psNewCookie->papsSyncPrim[i] = papsSyncPrim[i];
+ }
+
+ eError = _SyncPrimBlockListHandleArrayCreate(psSyncBlockList,
+ &ui32SyncBlockCount,
+ &pahHandleList);
+ if (eError !=PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /*
+ Create the server side cookie. Here we pass in all the unchanging
+ data so we only need to pass in the minimum at takeop time
+ */
+ eError = BridgeSyncPrimOpCreate(hBridge,
+ ui32SyncBlockCount,
+ pahHandleList,
+ psNewCookie->ui32ClientSyncCount,
+ psNewCookie->paui32SyncBlockIndex,
+ psNewCookie->paui32Index,
+ psNewCookie->ui32ServerSyncCount,
+ psNewCookie->pahServerSync,
+ &psNewCookie->hServerCookie);
+
+ /* Free the handle list regardless of error */
+ _SyncPrimBlockListHandleArrayDestroy(pahHandleList);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ /* Increase the reference count on all referenced local sync prims
+ * so that they cannot be freed until this Op is finished with
+ */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSyncInt;
+ psSyncInt = IMG_CONTAINER_OF(papsSyncPrim[i], SYNC_PRIM, sCommon);
+ if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+ {
+ SyncPrimLocalRef(psSyncInt);
+ }
+ }
+
+ *ppsCookie = psNewCookie;
+ return PVRSRV_OK;
+
+ e2:
+ OSFreeMem(psNewCookie);
+ e1:
+ _SyncPrimBlockListDestroy(psSyncBlockList);
+ e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ IMG_UINT32 i;
+ IMG_BOOL bServerSync;
+
+ /* Copy client sync operations */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ /*
+ Sanity check the client passes in the same syncs as the
+ ones we got at create time
+ */
+ if (psCookie->papsSyncPrim[i] != pasSyncOp[i].psSync)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ eError = SyncPrimIsServerSync(pasSyncOp[i].psSync, &bServerSync);
+ if (PVRSRV_OK != eError) goto e0;
+ if (bServerSync)
+ {
+ psCookie->paui32ServerFlags[ui32ServerIndex] =
+ pasSyncOp[i].ui32Flags;
+
+ ui32ServerIndex++;
+ }
+ else
+ {
+ /* Client operation information */
+ psCookie->paui32Flags[ui32ClientIndex] =
+ pasSyncOp[i].ui32Flags;
+ psCookie->paui32FenceValue[ui32ClientIndex] =
+ pasSyncOp[i].ui32FenceValue;
+ psCookie->paui32UpdateValue[ui32ClientIndex] =
+ pasSyncOp[i].ui32UpdateValue;
+
+ ui32ClientIndex++;
+ }
+ }
+
+ eError = BridgeSyncPrimOpTake(psCookie->hBridge,
+ psCookie->hServerCookie,
+ psCookie->ui32ClientSyncCount,
+ psCookie->paui32Flags,
+ psCookie->paui32FenceValue,
+ psCookie->paui32UpdateValue,
+ psCookie->ui32ServerSyncCount,
+ psCookie->paui32ServerFlags);
+
+ e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+ IMG_BOOL *pbReady)
+{
+ PVRSRV_ERROR eError;
+ if (!psCookie)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ /*
+ If we have a server sync we have no choice
+ but to do the check in the server
+ */
+ if (psCookie->bHaveServerSync)
+ {
+ eError = BridgeSyncPrimOpReady(psCookie->hBridge,
+ psCookie->hServerCookie,
+ pbReady);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to do sync check in server (Error = %d)",
+ __FUNCTION__, eError));
+ goto e0;
+ }
+ }
+ else
+ {
+ IMG_UINT32 i;
+ IMG_UINT32 ui32SnapShot;
+ IMG_BOOL bReady = IMG_TRUE;
+
+ for (i=0;i<psCookie->ui32ClientSyncCount;i++)
+ {
+ if ((psCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+ {
+ continue;
+ }
+
+ ui32SnapShot = _SyncPrimBlockListGetClientValue(psCookie->psSyncBlockList,
+ psCookie->paui32SyncBlockIndex[i],
+ psCookie->paui32Index[i]);
+ if (ui32SnapShot != psCookie->paui32FenceValue[i])
+ {
+ bReady = IMG_FALSE;
+ break;
+ }
+ }
+
+ *pbReady = bReady;
+ }
+
+ return PVRSRV_OK;
+ e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie)
+{
+ PVRSRV_ERROR eError;
+
+ eError = BridgeSyncPrimOpComplete(psCookie->hBridge,
+ psCookie->hServerCookie);
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 i;
+
+ eError = BridgeSyncPrimOpDestroy(psCookie->hBridge, psCookie->hServerCookie);
+ if (PVRSRV_OK != eError)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Failed to destroy SyncPrimOp (Error = %d)",
+ __FUNCTION__, eError));
+ goto err_out;
+ }
+
+ /* Decrease the reference count on all referenced local sync prims
+ * so that they can be freed now this Op is finished with
+ */
+ for (i=0;i<psCookie->ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psSyncInt;
+ psSyncInt = IMG_CONTAINER_OF(psCookie->papsSyncPrim[i], SYNC_PRIM, sCommon);
+ if (SYNC_PRIM_TYPE_LOCAL == psSyncInt->eType)
+ {
+ SyncPrimLocalUnref(psSyncInt);
+ }
+ }
+
+ _SyncPrimBlockListDestroy(psCookie->psSyncBlockList);
+ OSFreeMem(psCookie);
+
+ err_out:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 *pui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp)
+{
+ IMG_UINT32 ui32ServerIndex = 0;
+ IMG_UINT32 ui32ClientIndex = 0;
+ PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOps;
+ IMG_UINT32 i;
+ IMG_BOOL bServerSync;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ psSyncOps = OSAllocMem(sizeof(PVRSRV_CLIENT_SYNC_PRIM_OP) *
+ psCookie->ui32SyncCount);
+ if (!psSyncOps)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ for (i=0; i<psCookie->ui32SyncCount; i++)
+ {
+ psSyncOps[i].psSync = psCookie->papsSyncPrim[i];
+ eError = SyncPrimIsServerSync(psCookie->papsSyncPrim[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (bServerSync)
+ {
+ psSyncOps[i].ui32FenceValue = 0;
+ psSyncOps[i].ui32UpdateValue = 0;
+ psSyncOps[i].ui32Flags = psCookie->paui32ServerFlags[ui32ServerIndex];
+ ui32ServerIndex++;
+ }
+ else
+ {
+ psSyncOps[i].ui32FenceValue = psCookie->paui32FenceValue[ui32ClientIndex];
+ psSyncOps[i].ui32UpdateValue = psCookie->paui32UpdateValue[ui32ClientIndex];
+ psSyncOps[i].ui32Flags = psCookie->paui32Flags[ui32ClientIndex];
+ ui32ClientIndex++;
+ }
+ }
+
+ *ppsSyncOp = psSyncOps;
+ *pui32SyncCount = psCookie->ui32SyncCount;
+
+ e1:
+ OSFreeMem(psSyncOps);
+ e0:
+ return eError;
+}
+
+#if !defined(__KERNEL__)
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerAlloc(SYNC_BRIDGE_HANDLE hBridge,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName
+ PVR_DBG_FILELINE_PARAM)
+{
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+ SYNC_PRIM *psNewSync;
+ PVRSRV_ERROR eError;
+ size_t uiSize;
+
+#if !defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+ PVR_DBG_FILELINE_UNREF();
+#endif
+ psNewSync = OSAllocZMem(sizeof(SYNC_PRIM));
+ if (psNewSync == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ if (pszClassName)
+ {
+ uiSize = OSStringNLength(pszClassName, SYNC_MAX_CLASS_NAME_LEN);
+ /* Copy the class name annotation into a fixed-size array */
+ OSCachedMemCopy(szClassName, pszClassName, uiSize);
+ /* NUL-terminate the ClassName if it wasn't already */
+ if (uiSize == SYNC_MAX_CLASS_NAME_LEN)
+ szClassName[SYNC_MAX_CLASS_NAME_LEN-1] = '\0';
+ else
+ szClassName[uiSize++] = '\0';
+ }
+ else
+ {
+ /* No class name annotation */
+ uiSize = 0;
+ szClassName[0] = '\0';
+ }
+
+ eError = BridgeServerSyncAlloc(hBridge,
+ &psNewSync->u.sServer.hServerSync,
+ &psNewSync->u.sServer.ui32FirmwareAddr,
+ uiSize,
+ szClassName);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+#if defined(PVR_SYNC_PRIM_ALLOC_TRACE)
+ PVR_DPF((PVR_DBG_WARNING, "Allocated sync=server fw=0x%x [%p]" PVR_DBG_FILELINE_FMT,
+ psNewSync->u.sServer.ui32FirmwareAddr, &psNewSync->sCommon PVR_DBG_FILELINE_ARG));
+#endif
+
+ psNewSync->eType = SYNC_PRIM_TYPE_SERVER;
+ psNewSync->u.sServer.hBridge = hBridge;
+ *ppsSync = &psNewSync->sCommon;
+
+ return PVRSRV_OK;
+ e1:
+ OSFreeMem(psNewSync);
+ e0:
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 i;
+ SYNC_BRIDGE_HANDLE hBridge = NULL;
+ IMG_HANDLE *pahServerHandle;
+ IMG_BOOL bServerSync;
+
+ if (papsSync[0])
+ {
+ hBridge = _SyncPrimGetBridgeHandle(papsSync[0]);
+ }
+ if (!hBridge)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid Sync connection\n", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+ pahServerHandle = OSAllocMem(sizeof(IMG_HANDLE) * ui32SyncCount);
+ if (pahServerHandle == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /*
+ Check that all the sync we've been passed are server syncs
+ and that they all are on the same connection.
+ */
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ SYNC_PRIM *psIntSync = IMG_CONTAINER_OF(papsSync[i], SYNC_PRIM, sCommon);
+
+ eError = SyncPrimIsServerSync(papsSync[i], &bServerSync);
+ if (PVRSRV_OK != eError) goto e1;
+ if (!bServerSync)
+ {
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e1;
+ }
+
+ if (!papsSync[i] || hBridge != _SyncPrimGetBridgeHandle(papsSync[i]))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncServerGetStatus: Sync connection is different\n"));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e1;
+ }
+
+ pahServerHandle[i] = psIntSync->u.sServer.hServerSync;
+ }
+
+ eError = BridgeServerSyncGetStatus(hBridge,
+ ui32SyncCount,
+ pahServerHandle,
+ pui32UID,
+ pui32FWAddr,
+ pui32CurrentOp,
+ pui32NextOp);
+ OSFreeMem(pahServerHandle);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+ return PVRSRV_OK;
+
+ e1:
+ OSFreeMem(pahServerHandle);
+ e0:
+ return eError;
+}
+
+#endif
+
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_LOCAL)
+ {
+ *pbServerSync = IMG_FALSE;
+ }
+ else if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ *pbServerSync = IMG_TRUE;
+ }
+ else
+ {
+ /* Either the client has given us a bad pointer or there is an
+ * error in this module
+ */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync type", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+ e0:
+ return eError;
+}
+
+IMG_INTERNAL
+IMG_HANDLE SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+
+ if (!psSync)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ goto e0;
+ }
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType == SYNC_PRIM_TYPE_SERVER)
+ {
+ return psSyncInt->u.sServer.hServerSync;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ goto e0;
+ }
+ e0:
+ return (IMG_HANDLE) NULL;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp)
+{
+ SYNC_PRIM *psSyncInt;
+ IMG_BOOL bUpdate;
+ PVRSRV_ERROR eError;
+
+ if (!psSyncOp)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid input pointer", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto e0;
+ }
+
+ psSyncInt = IMG_CONTAINER_OF(psSyncOp->psSync, SYNC_PRIM, sCommon);
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_SERVER)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: invalid sync type (%d)",
+ __FUNCTION__, psSyncInt->eType));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+ if (0 == psSyncOp->ui32Flags)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: no sync flags", __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_SYNC_PRIM;
+ goto e0;
+ }
+
+ if (psSyncOp->ui32Flags & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ bUpdate = IMG_TRUE;
+ }else
+ {
+ bUpdate = IMG_FALSE;
+ }
+
+ eError = BridgeServerSyncQueueHWOp(psSyncInt->u.sServer.hBridge,
+ psSyncInt->u.sServer.hServerSync,
+ bUpdate,
+ &psSyncOp->ui32FenceValue,
+ &psSyncOp->ui32UpdateValue);
+ e0:
+ return eError;
+}
+
+#if defined(PDUMP)
+IMG_INTERNAL void SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDump(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt));
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDump: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDumpValue(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ ui32Value);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpPol: Invalid sync type (expected SYNC_PRIM_TYPE_LOCAL)"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ eError = BridgeSyncPrimPDumpPol(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psCookie != NULL);
+
+ eError = BridgeSyncPrimOpPDumpPol(psCookie->hBridge,
+ psCookie->hServerCookie,
+ eOperator,
+ ui32PDumpFlags);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+IMG_INTERNAL void SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ SYNC_PRIM *psSyncInt;
+ SYNC_PRIM_BLOCK *psSyncBlock;
+ SYNC_PRIM_CONTEXT *psContext;
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(psSync != NULL);
+ psSyncInt = IMG_CONTAINER_OF(psSync, SYNC_PRIM, sCommon);
+
+ if (psSyncInt->eType != SYNC_PRIM_TYPE_LOCAL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncPrimPDumpCBP: Invalid sync type"));
+ PVR_ASSERT(IMG_FALSE);
+ return;
+ }
+
+ psSyncBlock = psSyncInt->u.sLocal.psSyncBlock;
+ psContext = psSyncBlock->psContext;
+
+ /* FIXME: uiWriteOffset, uiPacketSize, uiBufferSize were changed to
+ * 64-bit quantities to resolve Windows compiler warnings.
+ * However the bridge is only 32-bit hence compiler warnings
+ * of implicit cast and loss of data.
+ * Added explicit cast and assert to remove warning.
+ */
+#if (defined(_WIN32) && !defined(_WIN64)) || (defined(LINUX) && defined(__i386__))
+ PVR_ASSERT(uiWriteOffset<IMG_UINT32_MAX);
+ PVR_ASSERT(uiPacketSize<IMG_UINT32_MAX);
+ PVR_ASSERT(uiBufferSize<IMG_UINT32_MAX);
+#endif
+ eError = BridgeSyncPrimPDumpCBP(psContext->hDevConnection,
+ psSyncBlock->hServerSyncPrimBlock,
+ SyncPrimGetOffset(psSyncInt),
+ (IMG_UINT32)uiWriteOffset,
+ (IMG_UINT32)uiPacketSize,
+ (IMG_UINT32)uiBufferSize);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed with error %d",
+ __FUNCTION__, eError));
+ }
+ PVR_ASSERT(eError == PVRSRV_OK);
+}
+
+#endif
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync.h b/drivers/gpu/drm/img-rogue/1.10/sync.h
new file mode 100644
index 00000000000000..ccf91f690b08c6
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync.h
@@ -0,0 +1,400 @@
+/*************************************************************************/ /*!
+@File
+@Title Synchronisation interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the client side interface for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_
+#define _SYNC_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include <powervr/sync_external.h>
+#include "pdumpdefs.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+
+#include "device_connection.h"
+
+#if defined(__KERNEL__) && defined(LINUX) && !defined(__GENKSYMS__)
+#define __pvrsrv_defined_struct_enum__
+#include <services_kernel_client.h>
+#endif
+
+/*************************************************************************/ /*!
+@Function SyncPrimContextCreate
+
+@Description Create a new synchronisation context
+
+@Input hBridge Bridge handle
+
+@Input hDeviceNode Device node handle
+
+@Output hSyncPrimContext Handle to the created synchronisation
+ primitive context
+
+@Return PVRSRV_OK if the synchronisation primitive context was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimContextCreate(SHARED_DEV_CONNECTION hDevConnection,
+ PSYNC_PRIM_CONTEXT *hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function SyncPrimContextDestroy
+
+@Description Destroy a synchronisation context
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context to destroy
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimContextDestroy(PSYNC_PRIM_CONTEXT hSyncPrimContext);
+
+/*************************************************************************/ /*!
+@Function SyncPrimAlloc
+
+@Description Allocate a new synchronisation primitive on the specified
+ synchronisation context
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context
+
+@Output ppsSync Created synchronisation primitive
+
+@Input pszClassName Sync source annotation
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAlloc(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName);
+
+#if defined(__KERNEL__)
+/*************************************************************************/ /*!
+@Function SyncPrimAllocForServerSync
+
+@Description Allocate a new synchronisation primitive on the specified
+ synchronisation context for a server sync
+
+@Input hSyncPrimContext Handle to the synchronisation
+ primitive context
+
+@Output ppsSync Created synchronisation primitive
+
+@Input pszClassName Sync source annotation
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimAllocForServerSync(PSYNC_PRIM_CONTEXT hSyncPrimContext,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName);
+#endif
+
+/*************************************************************************/ /*!
+@Function SyncPrimFree
+
+@Description Free a synchronisation primitive
+
+@Input psSync The synchronisation primitive to free
+
+@Return PVRSRV_OK if the synchronisation primitive was
+ successfully freed
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimFree(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function SyncPrimSet
+
+@Description Set the synchronisation primitive to a value
+
+@Input psSync The synchronisation primitive to set
+
+@Input ui32Value Value to set it to
+
+@Return PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimSet(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+#if defined(NO_HARDWARE)
+
+/*************************************************************************/ /*!
+@Function SyncPrimNoHwUpdate
+
+@Description Updates the synchronisation primitive value (in NoHardware drivers)
+
+@Input psSync The synchronisation primitive to update
+
+@Input ui32Value Value to update it to
+
+@Return PVRSRV_OK on success
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncPrimNoHwUpdate(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+#endif
+
+PVRSRV_ERROR
+SyncPrimServerAlloc(SHARED_DEV_CONNECTION hDevConnection,
+ PVRSRV_CLIENT_SYNC_PRIM **ppsSync,
+ const IMG_CHAR *pszClassName
+ PVR_DBG_FILELINE_PARAM);
+
+PVRSRV_ERROR
+SyncPrimServerGetStatus(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSync,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+SyncPrimServerQueueOp(PVRSRV_CLIENT_SYNC_PRIM_OP *psSyncOp);
+
+PVRSRV_ERROR
+SyncPrimIsServerSync(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_BOOL *pbServerSync);
+
+IMG_HANDLE
+SyncPrimGetServerHandle(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+
+
+PVRSRV_ERROR
+SyncPrimOpCreate(IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM **papsSyncPrim,
+ PSYNC_OP_COOKIE *ppsCookie);
+
+PVRSRV_ERROR
+SyncPrimOpTake(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 ui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP *pasSyncOp);
+
+PVRSRV_ERROR
+SyncPrimOpReady(PSYNC_OP_COOKIE psCookie,
+ IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+SyncPrimOpComplete(PSYNC_OP_COOKIE psCookie);
+
+IMG_INTERNAL
+PVRSRV_ERROR SyncPrimOpDestroy(PSYNC_OP_COOKIE psCookie);
+
+PVRSRV_ERROR
+SyncPrimOpResolve(PSYNC_OP_COOKIE psCookie,
+ IMG_UINT32 *pui32SyncCount,
+ PVRSRV_CLIENT_SYNC_PRIM_OP **ppsSyncOp);
+
+PVRSRV_ERROR
+SyncPrimDumpSyncs(IMG_UINT32 ui32SyncCount, PVRSRV_CLIENT_SYNC_PRIM **papsSync, const IMG_CHAR *pcszExtraInfo);
+
+#if defined(PDUMP)
+/*************************************************************************/ /*!
+@Function SyncPrimPDump
+
+@Description PDump the current value of the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpValue
+
+@Description PDump the ui32Value as the value of the synchronisation
+ primitive (regardless of the current value).
+
+@Input psSync The synchronisation primitive to PDump
+@Input ui32Value Value to give to the sync prim on the pdump
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpPol
+
+@Description Do a PDump poll of the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Input ui32Value Value to poll for
+
+@Input ui32Mask PDump mask operator
+
+@Input ui32PDumpFlags PDump flags
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function SyncPrimOpPDumpPol
+
+@Description Do a PDump poll all the synchronisation primitives on this
+ Operation cookie.
+
+@Input psCookie Operation cookie
+
+@Input ui32PDumpFlags PDump flags
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimOpPDumpPol(PSYNC_OP_COOKIE psCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags);
+
+/*************************************************************************/ /*!
+@Function SyncPrimPDumpCBP
+
+@Description Do a PDump CB poll using the synchronisation primitive
+
+@Input psSync The synchronisation primitive to PDump
+
+@Input uiWriteOffset Current write offset of buffer
+
+@Input uiPacketSize Size of the packet to write into CB
+
+@Input uiBufferSize Size of the CB
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize);
+
+#else
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpValue)
+#endif
+static INLINE void
+SyncPrimPDumpValue(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDump)
+#endif
+static INLINE void
+SyncPrimPDump(PVRSRV_CLIENT_SYNC_PRIM *psSync)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpPol)
+#endif
+static INLINE void
+SyncPrimPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT32 ui32Value,
+ IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimServerPDumpPol)
+#endif
+static INLINE void
+SyncPrimServerPDumpPol(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ PDUMP_POLL_OPERATOR eOperator,
+ IMG_UINT32 ui32PDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(ui32PDumpFlags);
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(SyncPrimPDumpCBP)
+#endif
+static INLINE void
+SyncPrimPDumpCBP(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_UINT64 uiWriteOffset,
+ IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psSync);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+}
+#endif /* PDUMP */
+#endif /* _PVRSRV_SYNC_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.c b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.c
new file mode 100644
index 00000000000000..5569b9e894cade
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.c
@@ -0,0 +1,2593 @@
+/*************************************************************************/ /*!
+@File
+@Title Services synchronisation checkpoint interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements server side code for services synchronisation
+ interface
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "img_types.h"
+#include "allocmem.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "osfunc.h"
+#include "dllist.h"
+#include "sync.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint_init.h"
+#include "lock.h"
+#include "log2.h"
+#include "pvrsrv.h"
+#include "pdump_km.h"
+
+#include "pvrsrv_sync_km.h"
+#include "rgxhwperf.h"
+
+#if defined(PVRSRV_NEED_PVR_DPF)
+
+/* Enable this to turn on debug relating to the creation and
+ resolution of contexts */
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+
+/* Enable this to turn on debug relating to the creation and
+ resolution of fences */
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+ allocation and freeing */
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint
+ enqueuing and signalling */
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+
+/* Enable this to turn on debug relating to the sync checkpoint pool */
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+
+/* Enable this to turn on debug relating to sync checkpoint UFO
+ lookup */
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+
+/* Enable this to turn on sync checkpoint deferred cleanup debug
+ * (for syncs we have been told to free but which have some
+ * outstanding FW operations remaining (enqueued in CCBs)
+ */
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#else
+
+#define ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_POOL_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_UFO_DEBUG 0
+#define ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG 0
+
+#endif
+
+/* Set the size of the sync checkpoint pool (not used if 0).
+ * A pool will be maintained for each sync checkpoint context.
+ */
+#define SYNC_CHECKPOINT_POOL_SIZE 128
+
+#define SYNC_CHECKPOINT_BLOCK_LIST_CHUNK_SIZE 10
+#define LOCAL_SYNC_CHECKPOINT_RESET_VALUE PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED
+
+/*
+ This defines the maximum amount of synchronisation memory
+ that can be allocated per sync checkpoint context.
+ In reality this number is meaningless as we would run out
+ of synchronisation memory before we reach this limit, but
+ we need to provide a size to the span RA.
+ */
+#define MAX_SYNC_CHECKPOINT_MEM (4 * 1024 * 1024)
+
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_LIST_
+{
+ IMG_UINT32 ui32BlockCount; /*!< Number of contexts in the list */
+ IMG_UINT32 ui32BlockListSize; /*!< Size of the array contexts */
+ SYNC_CHECKPOINT_BLOCK **papsSyncCheckpointBlock; /*!< Array of sync checkpoint blocks */
+} SYNC_CHECKPOINT_BLOCK_LIST;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_
+{
+ SHARED_DEV_CONNECTION psDeviceNode;
+ PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve;
+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate;
+ /*
+ * Used as head of linked-list of sync checkpoints for which
+ * SyncCheckpointFree() has been called, but have outstanding
+ * FW operations (enqueued in CCBs)
+ * This list will be check whenever a SyncCheckpointFree() is
+ * called, and when SyncCheckpointContextDestroy() is called.
+ */
+ DLLIST_NODE sDeferredCleanupListHead;
+ /* Lock to protect the deferred cleanup list */
+ POS_LOCK hDeferredCleanupListLock;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ _SYNC_CHECKPOINT *psSyncCheckpointPool[SYNC_CHECKPOINT_POOL_SIZE];
+ IMG_BOOL bSyncCheckpointPoolFull;
+ IMG_BOOL bSyncCheckpointPoolValid;
+ IMG_UINT32 ui32SyncCheckpointPoolCount;
+ IMG_UINT32 ui32SyncCheckpointPoolWp;
+ IMG_UINT32 ui32SyncCheckpointPoolRp;
+ POS_LOCK hSyncCheckpointPoolLock;
+#endif
+} _SYNC_CHECKPOINT_CONTEXT_CTL;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+
+/* this is the max number of sync checkpoint records we will search or dump
+ * at any time.
+ */
+#define SYNC_CHECKPOINT_RECORD_LIMIT 20000
+
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+struct SYNC_CHECKPOINT_RECORD
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< handle to SYNC_CHECKPOINT_BLOCK */
+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */
+ IMG_UINT32 ui32FwBlockAddr;
+ IMG_PID uiPID;
+ IMG_UINT32 ui32UID;
+ IMG_UINT64 ui64OSTime;
+ DLLIST_NODE sNode;
+ IMG_CHAR szClassName[PVRSRV_SYNC_NAME_LENGTH];
+ PSYNC_CHECKPOINT pSyncCheckpt;
+};
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_BOOL gbSyncCheckpointInit = IMG_FALSE;
+static PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN g_pfnFenceResolve;
+static PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN g_pfnFenceCreate;
+static PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN g_pfnFenceDataRollback;
+static PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN g_pfnFenceFinalise;
+static PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN g_pfnNoHWUpdateTimelines;
+static PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN g_pfnFreeChkptListMem;
+static PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN g_pfnDumpInfoOnStalledUFOs;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint);
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext);
+#endif
+
+/* Defined values to indicate status of sync checkpoint, which is
+ * stored in the memory of the structure */
+#define SYNC_CHECKPOINT_PATTERN_IN_USE 0x1a1aa
+#define SYNC_CHECKPOINT_PATTERN_IN_POOL 0x2b2bb
+#define SYNC_CHECKPOINT_PATTERN_FREED 0x3c3cc
+
+static inline void RGXSRVHWPerfSyncCheckpointUFOIsSignalled(PVRSRV_RGXDEV_INFO *psDevInfo,
+ _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ {
+ RGX_HWPERF_UFO_EV eEv;
+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+ if (psSyncCheckpointInt)
+ {
+ if ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+ (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED))
+ {
+ sSyncData.sCheckSuccess.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+ sSyncData.sCheckSuccess.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+ eEv = RGX_HWPERF_UFO_EV_CHECK_SUCCESS;
+ }
+ else
+ {
+ sSyncData.sCheckFail.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+ sSyncData.sCheckFail.ui32Value = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+ sSyncData.sCheckFail.ui32Required = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ eEv = RGX_HWPERF_UFO_EV_CHECK_FAIL;
+ }
+ RGXHWPerfHostPostUfoEvent(psDevInfo, eEv, &sSyncData,
+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+ }
+ }
+}
+
+static inline void RGXSRVHWPerfSyncCheckpointUFOUpdate(PVRSRV_RGXDEV_INFO *psDevInfo,
+ _SYNC_CHECKPOINT *psSyncCheckpointInt, IMG_UINT32 ui32FenceSyncFlags)
+{
+ if (RGXHWPerfHostIsEventEnabled(psDevInfo, RGX_HWPERF_HOST_UFO)
+ && !(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ {
+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+
+ if (psSyncCheckpointInt)
+ {
+ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt);
+ sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ RGXHWPerfHostPostUfoEvent(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+ }
+ }
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(PSYNC_CHECKPOINT_RECORD_HANDLE *phRecord,
+ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_UINT32 ui32UID,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt);
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord);
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode);
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+#endif
+
+#if defined(PDUMP)
+static PVRSRV_ERROR _SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+static PVRSRV_ERROR _SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint);
+#endif
+
+/* Unique incremental ID assigned to sync checkpoints when allocated */
+static IMG_UINT32 g_SyncCheckpointUID;
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext);
+
+/*
+ Internal interfaces for management of _SYNC_CHECKPOINT_CONTEXT
+ */
+static void
+_SyncCheckpointContextUnref(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+ "_SyncCheckpointContextUnref context already freed");
+ }
+ else if (0 == OSAtomicDecrement(&psContext->hRefCount))
+ {
+ /* SyncCheckpointContextDestroy only when no longer referenced */
+ OSLockDestroy(psContext->psContextCtl->hDeferredCleanupListLock);
+ psContext->psContextCtl->hDeferredCleanupListLock = NULL;
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ if (psContext->psContextCtl->ui32SyncCheckpointPoolCount)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called for context<%p> with %d sync checkpoints still in the pool",
+ __FUNCTION__,
+ (void*)psContext,
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount));
+ }
+ psContext->psContextCtl->bSyncCheckpointPoolValid = IMG_FALSE;
+ OSLockDestroy(psContext->psContextCtl->hSyncCheckpointPoolLock);
+ psContext->psContextCtl->hSyncCheckpointPoolLock = NULL;
+#endif
+ OSFreeMem(psContext->psContextCtl);
+ RA_Delete(psContext->psSpanRA);
+ RA_Delete(psContext->psSubAllocRA);
+ OSLockDestroy(psContext->hLock);
+ psContext->hLock = NULL;
+ OSFreeMem(psContext);
+ }
+}
+
+static void
+_SyncCheckpointContextRef(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ if (!OSAtomicRead(&psContext->hRefCount))
+ {
+ PVR_LOG_ERROR(PVRSRV_ERROR_INVALID_CONTEXT,
+ "_SyncCheckpointContextRef context use after free");
+ }
+ else
+ {
+ OSAtomicIncrement(&psContext->hRefCount);
+ }
+}
+
+/*
+ Internal interfaces for management of synchronisation block memory
+ */
+static PVRSRV_ERROR
+_AllocSyncCheckpointBlock(_SYNC_CHECKPOINT_CONTEXT *psContext,
+ SYNC_CHECKPOINT_BLOCK **ppsSyncBlock)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlk;
+ PVRSRV_ERROR eError;
+
+ psSyncBlk = OSAllocMem(sizeof(*psSyncBlk));
+ PVR_LOGG_IF_NOMEM(psSyncBlk, "OSAllocMem", eError, fail_alloc);
+
+ psSyncBlk->psContext = psContext;
+
+ /* Allocate sync checkpoint block */
+ psDevNode = psContext->psDevNode;
+ if (!psDevNode)
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOG_ERROR(eError, "context device node invalid");
+ goto fail_alloc_ufo_block;
+ }
+ psSyncBlk->psDevNode = psDevNode;
+
+ eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+ &psSyncBlk->hMemDesc,
+ &psSyncBlk->ui32FirmwareAddr,
+ &psSyncBlk->ui32SyncBlockSize);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "failed to allocate ufo block");
+ goto fail_alloc_ufo_block;
+ }
+
+ eError = DevmemAcquireCpuVirtAddr(psSyncBlk->hMemDesc,
+ (void **) &psSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "DevmemAcquireCpuVirtAddr");
+ goto fail_devmem_acquire;
+ }
+
+ OSAtomicWrite(&psSyncBlk->hRefCount, 1);
+
+ OSLockCreate(&psSyncBlk->hLock, LOCK_TYPE_NONE);
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Allocated Sync Checkpoint UFO block (FirmwareVAddr = 0x%08x)",
+ psSyncBlk->ui32FirmwareAddr);
+
+ *ppsSyncBlock = psSyncBlk;
+ return PVRSRV_OK;
+
+ fail_devmem_acquire:
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+ fail_alloc_ufo_block:
+ OSFreeMem(psSyncBlk);
+ fail_alloc:
+ return eError;
+}
+
+static void
+_FreeSyncCheckpointBlock(SYNC_CHECKPOINT_BLOCK *psSyncBlk)
+{
+ OSLockAcquire(psSyncBlk->hLock);
+ if (0 == OSAtomicDecrement(&psSyncBlk->hRefCount))
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+ DevmemReleaseCpuVirtAddr(psSyncBlk->hMemDesc);
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->hMemDesc);
+ OSLockRelease(psSyncBlk->hLock);
+ OSLockDestroy(psSyncBlk->hLock);
+ psSyncBlk->hLock = NULL;
+ OSFreeMem(psSyncBlk);
+ }
+ else
+ {
+ OSLockRelease(psSyncBlk->hLock);
+ }
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointBlockImport(RA_PERARENA_HANDLE hArena,
+ RA_LENGTH_T uSize,
+ RA_FLAGS_T uFlags,
+ const IMG_CHAR *pszAnnotation,
+ RA_BASE_T *puiBase,
+ RA_LENGTH_T *puiActualSize,
+ RA_PERISPAN_HANDLE *phImport)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = NULL;
+ RA_LENGTH_T uiSpanSize;
+ PVRSRV_ERROR eError;
+ PVR_UNREFERENCED_PARAMETER(uFlags);
+
+ PVR_LOG_IF_FALSE((hArena != NULL), "hArena is NULL");
+
+ /* Check we've not be called with an unexpected size */
+ PVR_LOG_IF_FALSE((uSize == sizeof(_SYNC_CHECKPOINT_FW_OBJ)),
+ "uiSize is not the size of _SYNC_CHECKPOINT_FW_OBJ");
+
+ /*
+ Ensure the sync checkpoint context doesn't go away while we have sync blocks
+ attached to it
+ */
+ _SyncCheckpointContextRef(psContext);
+
+ /* Allocate the block of memory */
+ eError = _AllocSyncCheckpointBlock(psContext, &psSyncBlock);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_syncblockalloc;
+ }
+
+ /* Allocate a span for it */
+ eError = RA_Alloc(psContext->psSpanRA,
+ psSyncBlock->ui32SyncBlockSize,
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ psSyncBlock->ui32SyncBlockSize,
+ pszAnnotation,
+ &psSyncBlock->uiSpanBase,
+ &uiSpanSize,
+ NULL);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_spanalloc;
+ }
+
+ /*
+ There is no reason the span RA should return an allocation larger
+ then we request
+ */
+ PVR_LOG_IF_FALSE((uiSpanSize == psSyncBlock->ui32SyncBlockSize),
+ "uiSpanSize invalid");
+
+ *puiBase = psSyncBlock->uiSpanBase;
+ *puiActualSize = psSyncBlock->ui32SyncBlockSize;
+ *phImport = psSyncBlock;
+ return PVRSRV_OK;
+
+ fail_spanalloc:
+ _FreeSyncCheckpointBlock(psSyncBlock);
+ fail_syncblockalloc:
+ _SyncCheckpointContextUnref(psContext);
+
+ return eError;
+}
+
+static void
+_SyncCheckpointBlockUnimport(RA_PERARENA_HANDLE hArena,
+ RA_BASE_T uiBase,
+ RA_PERISPAN_HANDLE hImport)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hArena;
+ SYNC_CHECKPOINT_BLOCK *psSyncBlock = hImport;
+
+ PVR_LOG_IF_FALSE((psContext != NULL), "hArena invalid");
+ PVR_LOG_IF_FALSE((psSyncBlock != NULL), "hImport invalid");
+ PVR_LOG_IF_FALSE((uiBase == psSyncBlock->uiSpanBase), "uiBase invalid");
+
+ /* Free the span this import is using */
+ RA_Free(psContext->psSpanRA, uiBase);
+
+ /* Free the sync checkpoint block */
+ _FreeSyncCheckpointBlock(psSyncBlock);
+
+ /* Drop our reference to the sync checkpoint context */
+ _SyncCheckpointContextUnref(psContext);
+}
+
+static INLINE IMG_UINT32 _SyncCheckpointGetOffset(_SYNC_CHECKPOINT *psSyncInt)
+{
+ IMG_UINT64 ui64Temp;
+
+ ui64Temp = psSyncInt->uiSpanAddr - psSyncInt->psSyncCheckpointBlock->uiSpanBase;
+ PVR_ASSERT(ui64Temp<IMG_UINT32_MAX);
+ return (IMG_UINT32)ui64Temp;
+}
+
+/* Used by SyncCheckpointContextCreate() below */
+static INLINE IMG_UINT32 _Log2(IMG_UINT32 ui32Align)
+{
+ PVR_ASSERT(IsPower2(ui32Align));
+ return ExactLog2(ui32Align);
+}
+
+/*
+ External interfaces
+ */
+
+PVRSRV_ERROR
+SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate,
+ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback,
+ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise,
+ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines,
+ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem,
+ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ g_pfnFenceResolve = pfnFenceResolve;
+ g_pfnFenceCreate = pfnFenceCreate;
+ g_pfnFenceDataRollback = pfnFenceDataRollback;
+ g_pfnFenceFinalise = pfnFenceFinalise;
+ g_pfnNoHWUpdateTimelines = pfnNoHWUpdateTimelines;
+ g_pfnFreeChkptListMem = pfnFreeCheckpointListMem;
+ g_pfnDumpInfoOnStalledUFOs = pfnDumpInfoOnStalledUFOs;
+
+ return eError;
+}
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints,
+ PSYNC_CHECKPOINT **papsSyncCheckpoints,
+ IMG_UINT64 *pui64FenceUID)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnFenceResolve)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ PVR_LOG_ERROR(eError, "g_pfnFenceResolve is NULL");
+ return eError;
+ }
+
+ if (papsSyncCheckpoints)
+ {
+ eError = g_pfnFenceResolve(psSyncCheckpointContext,
+ hFence,
+ pui32NumSyncCheckpoints,
+ papsSyncCheckpoints,
+ pui64FenceUID);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ PVR_LOGR_IF_ERROR(eError, "g_pfnFenceResolve");
+
+ if (*pui32NumSyncCheckpoints > MAX_SYNC_CHECKPOINTS_PER_FENCE)
+ {
+ IMG_UINT32 i;
+ PVR_DPF((PVR_DBG_ERROR, "%s: g_pfnFenceResolve() returned too many checkpoints (%u > MAX_SYNC_CHECKPOINTS_PER_FENCE=%u)",
+ __func__, *pui32NumSyncCheckpoints, MAX_SYNC_CHECKPOINTS_PER_FENCE));
+
+ /* Free resources after error */
+ for (i = 0; i < *pui32NumSyncCheckpoints; i++)
+ {
+ SyncCheckpointDropRef((*papsSyncCheckpoints)[i]);
+ }
+ if (*papsSyncCheckpoints)
+ {
+ SyncCheckpointFreeCheckpointListMem(*papsSyncCheckpoints);
+ }
+
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+ {
+ IMG_UINT32 ii;
+
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: g_pfnFenceResolve() for fence %d returned the following %d checkpoints:",
+ __FUNCTION__,
+ hFence,
+ *pui32NumSyncCheckpoints));
+
+ for (ii=0; ii<*pui32NumSyncCheckpoints; ii++)
+ {
+ PSYNC_CHECKPOINT psNextCheckpoint = *(*papsSyncCheckpoints + ii);
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: *papsSyncCheckpoints[%d]:<%p>",
+ __FUNCTION__,
+ ii,
+ (void*)psNextCheckpoint));
+ }
+ }
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PVRSRV_DEVICE_NODE *psDevNode,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_TIMELINE hTimeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *phNewFence,
+ IMG_UINT64 *puiUpdateFenceUID,
+ void **ppvFenceFinaliseData,
+ PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+ void **ppvTimelineUpdateSyncPrim,
+ IMG_UINT32 *pui32TimelineUpdateValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+
+ if (!g_pfnFenceCreate)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ PVR_LOG_ERROR(eError, "g_pfnFenceCreate is NULL");
+ }
+ else
+ {
+ eError = g_pfnFenceCreate(pszFenceName,
+ hTimeline,
+ psSyncCheckpointContext,
+ phNewFence,
+ puiUpdateFenceUID,
+ ppvFenceFinaliseData,
+ psNewSyncCheckpoint,
+ ppvTimelineUpdateSyncPrim,
+ pui32TimelineUpdateValue);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s failed to create new fence<%p> for timeline<%d> using "
+ "sync checkpoint context<%p>, psNewSyncCheckpoint=<%p>, eError=%s",
+ __FUNCTION__,
+ (void*)phNewFence,
+ hTimeline,
+ (void*)psSyncCheckpointContext,
+ (void*)psNewSyncCheckpoint,
+ PVRSRVGetErrorStringKM(eError)));
+ }
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s created new fence<%d> for timeline<%d> using "
+ "sync checkpoint context<%p>, new sync_checkpoint=<%p>",
+ __FUNCTION__,
+ *phNewFence,
+ hTimeline,
+ (void*)psSyncCheckpointContext,
+ (void*)*psNewSyncCheckpoint));
+ }
+#endif
+ }
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnFenceDataRollback)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ PVR_LOG_ERROR(eError, "g_pfnFenceDataRollback is NULL");
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: called to rollback fence data <%p>",
+ __FUNCTION__,
+ pvFinaliseData));
+#endif
+ eError = g_pfnFenceDataRollback(hFence, pvFinaliseData);
+ PVR_LOG_IF_ERROR(eError, "g_pfnFenceDataRollback returned error");
+ }
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PVRSRV_FENCE hFence, void *pvFinaliseData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnFenceFinalise)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: Warning (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED) (this is permitted)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_FENCE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: called to finalise fence <%d>",
+ __FUNCTION__,
+ hFence));
+#endif
+ eError = g_pfnFenceFinalise(hFence, pvFinaliseData);
+ PVR_LOG_IF_ERROR(eError, "g_pfnFenceFinalise returned error");
+ }
+ return eError;
+}
+
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem)
+{
+ if (g_pfnFreeChkptListMem)
+ {
+ g_pfnFreeChkptListMem(pvCheckpointListMem);
+ }
+}
+
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!g_pfnNoHWUpdateTimelines)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ PVR_LOG_ERROR(eError, "g_pfnNoHWUpdateTimelines is NULL");
+ }
+ else
+ {
+ g_pfnNoHWUpdateTimelines(pvPrivateData);
+ }
+ return eError;
+
+}
+
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs, IMG_UINT32 *pui32Vaddrs, IMG_UINT32 *pui32NumSyncOwnedUFOs)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOGR_IF_FALSE((pui32NumSyncOwnedUFOs != NULL), "pui32NumSyncOwnedUFOs invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ if (!g_pfnDumpInfoOnStalledUFOs)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: ERROR (eError=PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED)",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED;
+ PVR_LOG_ERROR(eError, "g_pfnDumpInfoOnStalledUFOs is NULL");
+ }
+ else
+ {
+ *pui32NumSyncOwnedUFOs = g_pfnDumpInfoOnStalledUFOs(ui32NumUFOs, pui32Vaddrs);
+ }
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = NULL;
+ _SYNC_CHECKPOINT_CONTEXT_CTL *psContextCtl = NULL;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_LOGR_IF_FALSE((ppsSyncCheckpointContext != NULL),
+ "ppsSyncCheckpointContext invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+
+ psContext = OSAllocMem(sizeof(*psContext));
+ PVR_LOGG_IF_NOMEM(psContext, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+ psContextCtl = OSAllocMem(sizeof(*psContextCtl));
+ PVR_LOGG_IF_NOMEM(psContextCtl, "OSAllocMem", eError, fail_alloc2); /* Sets OOM error code */
+
+ eError = OSLockCreate(&psContext->hLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+ "to OSLockCreate(context lock) failed");
+ goto fail_create_context_lock;
+ }
+
+ eError = OSLockCreate(&psContextCtl->hDeferredCleanupListLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+ "to OSLockCreate(deferred cleanup list lock) failed");
+ goto fail_create_deferred_cleanup_lock;
+ }
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ eError = OSLockCreate(&psContextCtl->hSyncCheckpointPoolLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call "
+ "to OSLockCreate(sync checkpoint pool lock) failed");
+ goto fail_create_pool_lock;
+ }
+#endif
+
+ dllist_init(&psContextCtl->sDeferredCleanupListHead);
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ psContextCtl->ui32SyncCheckpointPoolCount = 0;
+ psContextCtl->ui32SyncCheckpointPoolWp = 0;
+ psContextCtl->ui32SyncCheckpointPoolRp = 0;
+ psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+ psContextCtl->bSyncCheckpointPoolValid = IMG_TRUE;
+#endif
+ psContext->psDevNode = psDevNode;
+
+ OSSNPrintf(psContext->azName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim RA-%p", psContext);
+ OSSNPrintf(psContext->azSpanName, PVRSRV_SYNC_NAME_LENGTH, "Sync Prim span RA-%p", psContext);
+
+ /*
+ Create the RA for sub-allocations of the sync checkpoints
+
+ Note:
+ The import size doesn't matter here as the server will pass
+ back the blocksize when it does the import which overrides
+ what we specify here.
+ */
+ psContext->psSubAllocRA = RA_Create(psContext->azName,
+ /* Params for imports */
+ _Log2(sizeof(IMG_UINT32)),
+ RA_LOCKCLASS_2,
+ _SyncCheckpointBlockImport,
+ _SyncCheckpointBlockUnimport,
+ psContext,
+ IMG_FALSE);
+ if (psContext->psSubAllocRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(subAlloc) failed");
+ goto fail_suballoc;
+ }
+
+ /*
+ Create the span-management RA
+
+ The RA requires that we work with linear spans. For our use
+ here we don't require this behaviour as we're always working
+ within offsets of blocks (imports). However, we need to keep
+ the RA happy so we create the "span" management RA which
+ ensures that all are imports are added to the RA in a linear
+ fashion
+ */
+ psContext->psSpanRA = RA_Create(psContext->azSpanName,
+ /* Params for imports */
+ 0,
+ RA_LOCKCLASS_1,
+ NULL,
+ NULL,
+ NULL,
+ IMG_FALSE);
+ if (psContext->psSpanRA == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Create(span) failed");
+ goto fail_span;
+ }
+
+ if (!RA_Add(psContext->psSpanRA, 0, MAX_SYNC_CHECKPOINT_MEM, 0, NULL))
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_LOG_ERROR(eError, "SyncCheckpointContextCreate call to RA_Add(span) failed");
+ goto fail_span_add;
+ }
+
+ OSAtomicWrite(&psContext->hRefCount, 1);
+ OSAtomicWrite(&psContext->hCheckpointCount, 0);
+
+ psContext->psContextCtl = psContextCtl;
+
+ *ppsSyncCheckpointContext = (PSYNC_CHECKPOINT_CONTEXT)psContext;
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: created psSyncCheckpointContext=<%p>",
+ __FUNCTION__,
+ (void*)*ppsSyncCheckpointContext));
+#endif
+ return PVRSRV_OK;
+
+ fail_span_add:
+ RA_Delete(psContext->psSpanRA);
+ fail_span:
+ RA_Delete(psContext->psSubAllocRA);
+ fail_suballoc:
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ OSLockDestroy(psContextCtl->hSyncCheckpointPoolLock);
+ psContextCtl->hSyncCheckpointPoolLock = NULL;
+ fail_create_pool_lock:
+#endif
+ OSLockDestroy(psContextCtl->hDeferredCleanupListLock);
+ psContextCtl->hDeferredCleanupListLock = NULL;
+ fail_create_deferred_cleanup_lock:
+ OSLockDestroy(psContext->hLock);
+ psContext->hLock = NULL;
+ fail_create_context_lock:
+ OSFreeMem(psContextCtl);
+ fail_alloc2:
+ OSFreeMem(psContext);
+ fail_alloc:
+ return eError;
+}
+
+/* Poisons and frees the checkpoint and lock.
+ * Decrements context refcount. */
+static void _FreeSyncCheckpoint(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+ psSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = 0;
+ psSyncCheckpoint->psSyncCheckpointFwObj = NULL;
+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_FREED;
+
+ RA_Free(psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA,
+ psSyncCheckpoint->uiSpanAddr);
+ psSyncCheckpoint->psSyncCheckpointBlock = NULL;
+
+ OSLockDestroy(psSyncCheckpoint->hLock);
+ OSFreeMem(psSyncCheckpoint);
+
+ OSAtomicDecrement(&psContext->hCheckpointCount);
+}
+
+PVRSRV_ERROR SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = (_SYNC_CHECKPOINT_CONTEXT*)psSyncCheckpointContext;
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+ IMG_INT iRf = 0;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpointContext != NULL), "psSyncCheckpointContext invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_CONTEXT_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s: destroying psSyncCheckpointContext=<%p>",
+ __FUNCTION__,
+ (void*)psSyncCheckpointContext));
+#endif
+
+ _CheckDeferredCleanupList(psContext);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+ if (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0)
+ {
+ IMG_UINT32 ui32NumFreedFromPool = _CleanCheckpointPool(psContext);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s freed %d sync checkpoints that were still in the pool for context<%p>",
+ __FUNCTION__,
+ ui32NumFreedFromPool,
+ (void*)psContext));
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32NumFreedFromPool);
+#endif
+ }
+#endif
+
+ iRf = OSAtomicRead(&psContext->hCheckpointCount);
+
+ if (iRf != 0)
+ {
+ /* Note, this is not a permanent error as the caller may retry later */
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s <%p> attempted with active references (iRf=%d), "
+ "may be the result of a race",
+ __FUNCTION__,
+ (void*)psContext,
+ iRf));
+
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ {
+ DLLIST_NODE *psNode, *psNext;
+
+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+ {
+ _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+ IMG_BOOL bDeferredFree = dllist_node_is_in_list(&psSyncCheckpoint->sDeferredFreeListNode);
+
+ /* Line below avoids build error in release builds (where PVR_DPF is not defined) */
+ PVR_UNREFERENCED_PARAMETER(bDeferredFree);
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s syncCheckpoint<%p> ID=%d, %s, refs=%d, state=%s, fwaddr=%#08x, enqCount:%d, FWCount:%d %s",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ psSyncCheckpoint->ui32UID,
+ psSyncCheckpoint->azName,
+ OSAtomicRead(&psSyncCheckpoint->hRefCount),
+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED ?
+ "PVRSRV_SYNC_CHECKPOINT_SIGNALLED" :
+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED ?
+ "PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED" : "PVRSRV_SYNC_CHECKPOINT_ERRORED",
+ psSyncCheckpoint->ui32FWAddr,
+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+ bDeferredFree ? "(deferred free)" : ""));
+
+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+ }
+ else
+ {
+ IMG_INT iRf2 = 0;
+
+ iRf2 = OSAtomicRead(&psContext->hRefCount);
+ _SyncCheckpointContextUnref(psContext);
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+ PVRSRV_TIMELINE hTimeline,
+ const IMG_CHAR *pszCheckpointName,
+ PSYNC_CHECKPOINT *ppsSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psNewSyncCheckpoint = NULL;
+ _SYNC_CHECKPOINT_CONTEXT *psSyncContextInt = (_SYNC_CHECKPOINT_CONTEXT*)psSyncContext;
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_ERROR eError;
+
+ PVR_LOGR_IF_FALSE((psSyncContext != NULL), "psSyncContext invalid", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE((ppsSyncCheckpoint != NULL), "ppsSyncCheckpoint invalid", PVRSRV_ERROR_INVALID_PARAMS);
+
+ psDevNode = (PVRSRV_DEVICE_NODE *)psSyncContextInt->psDevNode;
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+ PVR_DPF((PVR_DBG_WARNING, "%s Entry, Getting checkpoint from pool", __FUNCTION__));
+#endif
+ psNewSyncCheckpoint = _GetCheckpointFromPool(psSyncContextInt);
+ if (!psNewSyncCheckpoint)
+ {
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+ PVR_DPF((PVR_DBG_WARNING, "%s checkpoint pool empty - will have to allocate", __FUNCTION__));
+#endif
+ }
+#endif
+ /* If pool is empty (or not defined) alloc the new sync checkpoint */
+ if (!psNewSyncCheckpoint)
+ {
+ psNewSyncCheckpoint = OSAllocMem(sizeof(*psNewSyncCheckpoint));
+ PVR_LOGG_IF_NOMEM(psNewSyncCheckpoint, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+ eError = OSLockCreate(&psNewSyncCheckpoint->hLock, LOCK_TYPE_NONE);
+
+ PVR_LOGG_IF_ERROR(eError, "OSLockCreate", fail_create_checkpoint_lock);
+
+ eError = RA_Alloc(psSyncContextInt->psSubAllocRA,
+ sizeof(*psNewSyncCheckpoint->psSyncCheckpointFwObj),
+ RA_NO_IMPORT_MULTIPLIER,
+ 0,
+ sizeof(IMG_UINT32),
+ (IMG_CHAR*)pszCheckpointName,
+ &psNewSyncCheckpoint->uiSpanAddr,
+ NULL,
+ (RA_PERISPAN_HANDLE *) &psNewSyncCheckpoint->psSyncCheckpointBlock);
+ PVR_LOGG_IF_ERROR(eError, "RA_Alloc", fail_raalloc);
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s CALLED RA_Alloc(), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ __FUNCTION__,
+ (void*)psSyncContextInt->psSubAllocRA,
+ psNewSyncCheckpoint->uiSpanAddr));
+#endif
+ psNewSyncCheckpoint->hTimeline = hTimeline;
+ psNewSyncCheckpoint->psSyncCheckpointFwObj =
+ (volatile _SYNC_CHECKPOINT_FW_OBJ*)(psNewSyncCheckpoint->psSyncCheckpointBlock->pui32LinAddr +
+ (_SyncCheckpointGetOffset(psNewSyncCheckpoint)/sizeof(IMG_UINT32)));
+ psNewSyncCheckpoint->ui32FWAddr = psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+ _SyncCheckpointGetOffset(psNewSyncCheckpoint) + 1;
+ OSAtomicIncrement(&psNewSyncCheckpoint->psSyncCheckpointBlock->psContext->hCheckpointCount);
+ psNewSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s called to allocate new sync checkpoint<%p> for context<%p>", __FUNCTION__, (void*)psNewSyncCheckpoint, (void*)psSyncContext));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpointFwObj<%p>", __FUNCTION__, (void*)psNewSyncCheckpoint->psSyncCheckpointFwObj));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint FwAddr=0x%x", __FUNCTION__, SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint)));
+ PVR_DPF((PVR_DBG_WARNING, "%s pszCheckpointName = %s", __FUNCTION__, pszCheckpointName));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint Timeline=%d", __FUNCTION__, psNewSyncCheckpoint->hTimeline));
+#endif
+ }
+
+ OSAtomicWrite(&psNewSyncCheckpoint->hRefCount, 1);
+ OSAtomicWrite(&psNewSyncCheckpoint->hEnqueuedCCBCount, 0);
+ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount = 0;
+ psNewSyncCheckpoint->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED;
+ psNewSyncCheckpoint->uiProcess = OSGetCurrentClientProcessIDKM();
+ OSCachedMemSet(&psNewSyncCheckpoint->sDeferredFreeListNode, 0, sizeof(psNewSyncCheckpoint->sDeferredFreeListNode));
+
+ if(pszCheckpointName)
+ {
+ /* Copy over the checkpoint name annotation */
+ OSStringLCopy(psNewSyncCheckpoint->azName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+ }
+ else
+ {
+ /* No sync checkpoint name annotation */
+ psNewSyncCheckpoint->azName[0] = '\0';
+ }
+
+ /* Store sync checkpoint FW address in PRGXFWIF_UFO_ADDR struct */
+ psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr = SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+
+ /* Assign unique ID to this sync checkpoint */
+ psNewSyncCheckpoint->ui32UID = g_SyncCheckpointUID++;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Allocated Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+ psNewSyncCheckpoint->azName,
+ psNewSyncCheckpoint->ui32UID, psNewSyncCheckpoint->hTimeline,
+ psNewSyncCheckpoint->sCheckpointUFOAddr.ui32Addr);
+
+ RGX_HWPERF_HOST_ALLOC(psDevNode, SYNCCP, psNewSyncCheckpoint->ui32FWAddr, psNewSyncCheckpoint->azName, sizeof(psNewSyncCheckpoint->azName));
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ {
+ IMG_CHAR szChkptName[PVRSRV_SYNC_NAME_LENGTH];
+
+ if(pszCheckpointName)
+ {
+ /* Copy the checkpoint name annotation into a fixed-size array */
+ OSStringLCopy(szChkptName, pszCheckpointName, PVRSRV_SYNC_NAME_LENGTH);
+ }
+ else
+ {
+ /* No checkpoint name annotation */
+ szChkptName[0] = 0;
+ }
+ /* record this sync */
+ eError = _SyncCheckpointRecordAdd(&psNewSyncCheckpoint->hRecord,
+ psNewSyncCheckpoint->psSyncCheckpointBlock,
+ psNewSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr,
+ _SyncCheckpointGetOffset(psNewSyncCheckpoint),
+ psNewSyncCheckpoint->ui32UID,
+ OSStringNLength(szChkptName, PVRSRV_SYNC_NAME_LENGTH),
+ szChkptName, (PSYNC_CHECKPOINT)psNewSyncCheckpoint);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\" (%s)",
+ __func__,
+ szChkptName,
+ PVRSRVGetErrorStringKM(eError)));
+ psNewSyncCheckpoint->hRecord = NULL;
+ /* note the error but continue without affecting driver operation */
+ }
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(pszCheckpointName);
+#endif /* if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+ /* Add the sync checkpoint to the device list */
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_add_to_head(&psDevNode->sSyncCheckpointSyncsList,
+ &psNewSyncCheckpoint->sListNode);
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+ *ppsSyncCheckpoint = (PSYNC_CHECKPOINT)psNewSyncCheckpoint;
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s Exit(Ok), psNewSyncCheckpoint->ui32UID=%d <%p>",
+ __FUNCTION__,
+ psNewSyncCheckpoint->ui32UID,
+ (void*)psNewSyncCheckpoint));
+#endif
+ return PVRSRV_OK;
+
+ fail_raalloc:
+ OSLockDestroy(psNewSyncCheckpoint->hLock);
+ psNewSyncCheckpoint->hLock = NULL;
+ fail_create_checkpoint_lock:
+ OSFreeMem(psNewSyncCheckpoint);
+ fail_alloc:
+ return eError;
+}
+
+static void SyncCheckpointUnref(_SYNC_CHECKPOINT *psSyncCheckpointInt)
+{
+ _SYNC_CHECKPOINT_CONTEXT *psContext;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ psContext = psSyncCheckpointInt->psSyncCheckpointBlock->psContext;
+ psDevNode = (PVRSRV_DEVICE_NODE *)psContext->psDevNode;
+
+ /*
+ * Without this reference, the context may be destroyed as soon
+ * as _FreeSyncCheckpoint is called, but the context is still
+ * needed when _CheckDeferredCleanupList is called at the end
+ * of this function.
+ */
+ _SyncCheckpointContextRef(psContext);
+
+ PVR_ASSERT(psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE);
+ if (!OSAtomicRead(&psSyncCheckpointInt->hRefCount))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "SyncCheckpointUnref sync checkpoint already freed"));
+ }
+ else if (0 == OSAtomicDecrement(&psSyncCheckpointInt->hRefCount))
+ {
+ /* If the firmware has serviced all enqueued references to the sync checkpoint, free it */
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+ {
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s No outstanding FW ops and hRef is zero, deleting SyncCheckpoint..",
+ __FUNCTION__));
+#endif
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(psSyncCheckpointInt->hRecord)
+ {
+ PVRSRV_ERROR eError;
+ /* remove this sync record */
+ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+ }
+#endif
+ /* Remove the sync checkpoint from the global list */
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_remove_node(&psSyncCheckpointInt->sListNode);
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+ RGX_HWPERF_HOST_FREE(psDevNode, SYNCCP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s attempting to return sync checkpoint to the pool",
+ __FUNCTION__));
+#endif
+ if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+ {
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if ((ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s pool is full, so just free it",
+ __FUNCTION__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt,
+ (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+ psSyncCheckpointInt->uiSpanAddr));
+#endif
+ _FreeSyncCheckpoint(psSyncCheckpointInt);
+ }
+ }
+ else
+ {
+#if ((ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1) || (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1))
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s Outstanding FW ops hEnqueuedCCBCount=%d != FwObj->ui32FwRefCount=%d "
+ "- DEFERRING CLEANUP psSyncCheckpoint(ID:%d)<%p>",
+ __FUNCTION__,
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt));
+#endif
+ /* Add the sync checkpoint to the deferred free list */
+ OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+ dllist_add_to_tail(&psContext->psContextCtl->sDeferredCleanupListHead,
+ &psSyncCheckpointInt->sDeferredFreeListNode);
+ OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+ }
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s psSyncCheckpoint(ID:%d)<%p>, hRefCount decremented to %d",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt,
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount))));
+#endif
+ }
+
+ /* See if any sync checkpoints in the deferred cleanup list can be freed */
+ _CheckDeferredCleanupList(psContext);
+
+ _SyncCheckpointContextUnref(psContext);
+}
+
+void SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s Entry, psSyncCheckpoint(ID:%d)<%p>, hRefCount=%d, psSyncCheckpoint->ui32ValidationCheck=0x%x",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpoint,
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hRefCount)),
+ psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+ SyncCheckpointUnref(psSyncCheckpointInt);
+}
+
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if(psSyncCheckpointInt)
+ {
+ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+ "psSyncCheckpoint already signalled");
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+
+#if defined(PDUMP)
+ /* We may need to temporarily disable the posting of PDump events here, as the caller can be
+ * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+ */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Signalled Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+ psSyncCheckpointInt->azName,
+ psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+ (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+ _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+ _SyncCheckpointSignalPDump(psSyncCheckpointInt);
+#endif
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+ "when value is already %d",
+ __FUNCTION__,
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+ psSyncCheckpointInt->ui32UID,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+ }
+ }
+}
+
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if(psSyncCheckpointInt)
+ {
+ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+ "psSyncCheckpoint already signalled");
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+ RGXSRVHWPerfSyncCheckpointUFOUpdate(psDevInfo, psSyncCheckpointInt, PVRSRV_FENCE_FLAG_NONE);
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_SIGNALLED;
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s asked to set PVRSRV_SYNC_CHECKPOINT_SIGNALLED(%d) for (psSyncCheckpointInt->ui32UID=%d), "
+ "when value is already %d",
+ __FUNCTION__,
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+ psSyncCheckpointInt->ui32UID,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State));
+#endif
+ }
+ }
+}
+
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if(psSyncCheckpointInt)
+ {
+ PVR_LOG_IF_FALSE((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED),
+ "psSyncCheckpoint already signalled");
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ if (!(ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_SUPPRESS_HWP_PKT))
+ {
+ RGX_HWPERF_UFO_DATA_ELEMENT sSyncData;
+ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+ sSyncData.sUpdate.ui32FWAddr = SyncCheckpointGetFirmwareAddr(psSyncCheckpoint);
+ sSyncData.sUpdate.ui32OldValue = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+ sSyncData.sUpdate.ui32NewValue = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+ RGX_HWPERF_HOST_UFO(psDevInfo, RGX_HWPERF_UFO_EV_UPDATE, &sSyncData,
+ (ui32FenceSyncFlags & PVRSRV_FENCE_FLAG_CTX_ATOMIC) ? IMG_FALSE : IMG_TRUE);
+ }
+
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State = PVRSRV_SYNC_CHECKPOINT_ERRORED;
+
+#if defined(PDUMP)
+ /* We may need to temporarily disable the posting of PDump events here, as the caller can be
+ * in interrupt context and PDUMPCOMMENTWITHFLAGS takes the PDUMP_LOCK mutex
+ */
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Errored Sync Checkpoint %s (ID:%d, TL:%d, FirmwareVAddr = 0x%08x)",
+ psSyncCheckpointInt->azName,
+ psSyncCheckpointInt->ui32UID, psSyncCheckpointInt->hTimeline,
+ (psSyncCheckpointInt->psSyncCheckpointBlock->ui32FirmwareAddr +
+ _SyncCheckpointGetOffset(psSyncCheckpointInt)));
+ _SyncCheckpointErrorPDump(psSyncCheckpointInt);
+#endif
+ }
+ }
+}
+
+IMG_BOOL SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+ bRet = ((psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ||
+ (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED));
+
+ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called for psSyncCheckpoint<%p>, returning %d",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ bRet));
+#endif
+ }
+ return bRet;
+}
+
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags)
+{
+ IMG_BOOL bRet = IMG_FALSE;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+ PVRSRV_RGXDEV_INFO *psDevInfo = psSyncCheckpointInt->psSyncCheckpointBlock->psDevNode->pvDevice;
+
+ bRet = (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_ERRORED);
+
+ RGXSRVHWPerfSyncCheckpointUFOIsSignalled(psDevInfo, psSyncCheckpointInt, ui32FenceSyncFlags);
+
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called for psSyncCheckpoint<%p>, returning %d",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ bRet));
+#endif
+ }
+ return bRet;
+}
+
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", "Null");
+
+ switch (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State)
+ {
+ case PVRSRV_SYNC_CHECKPOINT_SIGNALLED:
+ return "Signalled";
+ case PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED:
+ return "Active";
+ case PVRSRV_SYNC_CHECKPOINT_ERRORED:
+ return "Errored";
+ default:
+ return "Unknown";
+ }
+}
+
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ PVRSRV_ERROR eRet = PVRSRV_OK;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+ "psSyncCheckpoint invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+ __func__,
+ psSyncCheckpointInt,
+ OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+ OSAtomicRead(&psSyncCheckpointInt->hRefCount)+1,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+ OSAtomicIncrement(&psSyncCheckpointInt->hRefCount);
+
+ return eRet;
+}
+
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ PVRSRV_ERROR eRet = PVRSRV_OK;
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGR_IF_FALSE((psSyncCheckpoint != NULL),
+ "psSyncCheckpoint invalid",
+ PVRSRV_ERROR_INVALID_PARAMS);
+#if (ENABLE_SYNC_CHECKPOINT_ALLOC_AND_FREE_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+ __func__,
+ psSyncCheckpointInt,
+ OSAtomicRead(&psSyncCheckpointInt->hRefCount),
+ OSAtomicRead(&psSyncCheckpointInt->hRefCount)-1,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+ SyncCheckpointUnref(psSyncCheckpointInt);
+
+ return eRet;
+}
+
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid");
+
+ if (psSyncCheckpointInt)
+ {
+#if !defined(NO_HARDWARE)
+#if (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s called for psSyncCheckpoint<%p> %d->%d (FWRef %u)",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)+1,
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+#endif
+ OSAtomicIncrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+#endif
+ }
+}
+
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+ if (psSyncCheckpointInt)
+ {
+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+ {
+ return &psSyncCheckpointInt->sCheckpointUFOAddr;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ psSyncCheckpointInt->ui32ValidationCheck));
+ }
+ }
+
+ invalid_chkpt:
+ return NULL;
+}
+
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ IMG_UINT32 ui32Ret = 0;
+
+ PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+ if (psSyncCheckpointInt)
+ {
+ if (psSyncCheckpointInt->ui32ValidationCheck == SYNC_CHECKPOINT_PATTERN_IN_USE)
+ {
+ ui32Ret = psSyncCheckpointInt->ui32FWAddr;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s called for psSyncCheckpoint<%p>, but ui32ValidationCheck=0x%x",
+ __FUNCTION__,
+ (void*)psSyncCheckpoint,
+ psSyncCheckpointInt->ui32ValidationCheck));
+ }
+ }
+
+ invalid_chkpt:
+ return ui32Ret;
+}
+
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ IMG_UINT32 ui32Ret = 0;
+
+ PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL), "psSyncCheckpoint invalid", invalid_chkpt);
+
+ if (psSyncCheckpointInt)
+ {
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s returning ID for sync checkpoint<%p>",
+ __FUNCTION__,
+ (void*)psSyncCheckpointInt));
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s (validationCheck=0x%x)",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+ ui32Ret = psSyncCheckpointInt->ui32UID;
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s (ui32UID=0x%x)",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID));
+#endif
+ }
+ return ui32Ret;
+
+ invalid_chkpt:
+ return 0;
+}
+
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ PVRSRV_TIMELINE i32Ret = PVRSRV_NO_TIMELINE;
+
+ PVR_LOGG_IF_FALSE((psSyncCheckpoint != NULL),
+ "psSyncCheckpoint invalid",
+ invalid_chkpt);
+
+ if (psSyncCheckpointInt)
+ {
+ i32Ret = psSyncCheckpointInt->hTimeline;
+ }
+ return i32Ret;
+
+ invalid_chkpt:
+ return 0;
+}
+
+
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+ return OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount);
+}
+
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+ return OSAtomicRead(&psSyncCheckpointInt->hRefCount);
+}
+
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+ PVR_LOGR_IF_FALSE(psSyncCheckpoint != NULL, "psSyncCheckpoint invalid", 0);
+
+ return psSyncCheckpointInt->uiProcess;
+}
+
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+ IMG_UINT32 ui32FwAddr)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt;
+ PDLLIST_NODE psNode, psNext;
+ IMG_UINT32 ui32State = 0;
+
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+ {
+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+ {
+ ui32State = psSyncCheckpointInt->psSyncCheckpointFwObj->ui32State;
+ break;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+ return ui32State;
+}
+
+void SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+ IMG_UINT32 ui32FwAddr)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt;
+ PDLLIST_NODE psNode, psNext;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called to error UFO with ui32FWAddr=%d",
+ __FUNCTION__,
+ ui32FwAddr));
+#endif
+
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+ {
+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+ {
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s calling SyncCheckpointError for sync checkpoint <%p>",
+ __FUNCTION__,
+ (void*)psSyncCheckpointInt));
+#endif
+ /* Mark as errored */
+ SyncCheckpointError((PSYNC_CHECKPOINT)psSyncCheckpointInt, IMG_TRUE);
+ break;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+}
+
+void SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+ PDLLIST_NODE psNode = NULL, psNext = NULL;
+
+#if (ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called to rollback UFO with ui32FWAddr=0x%x",
+ __FUNCTION__,
+ ui32FwAddr));
+#endif
+#if !defined(NO_HARDWARE)
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+ {
+ psSyncCheckpointInt = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+ if (ui32FwAddr == SyncCheckpointGetFirmwareAddr((PSYNC_CHECKPOINT)psSyncCheckpointInt))
+ {
+#if ((ENABLE_SYNC_CHECKPOINT_UFO_DEBUG == 1)) || (ENABLE_SYNC_CHECKPOINT_ENQ_AND_SIGNAL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s called for psSyncCheckpointInt<%p> %d->%d",
+ __FUNCTION__,
+ (void*)psSyncCheckpointInt,
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount),
+ OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)-1));
+#endif
+ OSAtomicDecrement(&psSyncCheckpointInt->hEnqueuedCCBCount);
+ break;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+#else
+ PVR_UNREFERENCED_PARAMETER(psNode);
+ PVR_UNREFERENCED_PARAMETER(psNext);
+ PVR_UNREFERENCED_PARAMETER(psSyncCheckpointInt);
+#endif
+}
+
+static void _SyncCheckpointState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpoint = IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sListNode);
+
+ if (psSyncCheckpoint->psSyncCheckpointFwObj->ui32State == PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED)
+ {
+ PVR_DUMPDEBUG_LOG("\t- ID = %d, FWAddr = 0x%08x, r%d:e%d:f%d: %s",
+ psSyncCheckpoint->ui32UID,
+ psSyncCheckpoint->psSyncCheckpointBlock->ui32FirmwareAddr +
+ _SyncCheckpointGetOffset(psSyncCheckpoint),
+ OSAtomicRead(&psSyncCheckpoint->hRefCount),
+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+ psSyncCheckpoint->azName);
+ }
+}
+
+static void _SyncCheckpointDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ DLLIST_NODE *psNode, *psNext;
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ {
+ PVR_DUMPDEBUG_LOG("------[ Active Sync Checkpoints ]------");
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointSyncsList, psNode, psNext)
+ {
+ _SyncCheckpointState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+ }
+}
+
+PVRSRV_ERROR
+SyncCheckpointInit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!gbSyncCheckpointInit)
+ {
+ eError = OSLockCreate(&psDevNode->hSyncCheckpointListLock, LOCK_TYPE_NONE);
+ if (eError == PVRSRV_OK)
+ {
+ dllist_init(&psDevNode->sSyncCheckpointSyncsList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointNotify,
+ psDevNode,
+ _SyncCheckpointDebugRequest,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ (PVRSRV_DBGREQ_HANDLE)psDevNode);
+ if (eError == PVRSRV_OK)
+ {
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ _SyncCheckpointRecordListInit(psDevNode);
+#endif
+ gbSyncCheckpointInit = IMG_TRUE;
+ }
+ else
+ {
+ /* free the created lock */
+ OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+ psDevNode->hSyncCheckpointListLock = NULL;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s FAILED to create psDevNode->hSyncCheckpointListLock",
+ __FUNCTION__));
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s called but already initialised", __FUNCTION__));
+ }
+ return eError;
+}
+
+void SyncCheckpointDeinit(PPVRSRV_DEVICE_NODE psDevNode)
+{
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointNotify);
+ psDevNode->hSyncCheckpointNotify = NULL;
+ OSLockDestroy(psDevNode->hSyncCheckpointListLock);
+ psDevNode->hSyncCheckpointListLock = NULL;
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ _SyncCheckpointRecordListDeinit(psDevNode);
+#endif
+ gbSyncCheckpointInit = IMG_FALSE;
+}
+
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_BOOL bFound = IMG_FALSE;
+
+ if (!pszSyncInfo)
+ {
+ return;
+ }
+
+ pszSyncInfo[0] = '\0';
+
+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+ {
+ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+ if ((psSyncCheckpointRec->ui32FwBlockAddr + psSyncCheckpointRec->ui32SyncOffset + 1) == ui32FwAddr)
+ {
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+ {
+ void *pSyncCheckpointAddr = (void*)( ((IMG_BYTE*)
+ psSyncCheckpointBlock->pui32LinAddr) + psSyncCheckpointRec->ui32SyncOffset);
+ OSSNPrintf(pszSyncInfo, len, "%s Checkpoint:%05u (%s)",
+ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+ "SIGNALLED" :
+ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+ "ERRORED" : "NOT_SIGNALLED"),
+ psSyncCheckpointRec->uiPID,
+ psSyncCheckpointRec->szClassName);
+ }
+ else
+ {
+ OSSNPrintf(pszSyncInfo, len, "Checkpoint:%05u (%s)",
+ psSyncCheckpointRec->uiPID,
+ psSyncCheckpointRec->szClassName);
+ }
+
+ bFound = IMG_TRUE;
+ break;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+ if(!bFound && (psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT))
+ {
+ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+ }
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordAdd(
+ PSYNC_CHECKPOINT_RECORD_HANDLE * phRecord,
+ SYNC_CHECKPOINT_BLOCK *hSyncCheckpointBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_UINT32 ui32UID,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName, PSYNC_CHECKPOINT pSyncCheckpt)
+{
+ struct SYNC_CHECKPOINT_RECORD * psSyncRec;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = hSyncCheckpointBlock->psContext;
+ PVRSRV_DEVICE_NODE *psDevNode = psContext->psDevNode;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!phRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *phRecord = NULL;
+
+ psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+ PVR_LOGG_IF_NOMEM(psSyncRec, "OSAllocMem", eError, fail_alloc); /* Sets OOM error code */
+
+ psSyncRec->psDevNode = psDevNode;
+ psSyncRec->psSyncCheckpointBlock = hSyncCheckpointBlock;
+ psSyncRec->ui32SyncOffset = ui32SyncOffset;
+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+ psSyncRec->ui64OSTime = OSClockns64();
+ psSyncRec->uiPID = OSGetCurrentProcessID();
+ psSyncRec->ui32UID = ui32UID;
+ psSyncRec->pSyncCheckpt = pSyncCheckpt;
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN;
+ /* Copy over the class name annotation */
+ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+ }
+ else
+ {
+ /* No class name annotation */
+ psSyncRec->szClassName[0] = 0;
+ }
+
+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+ if(psDevNode->ui32SyncCheckpointRecordCount < SYNC_CHECKPOINT_RECORD_LIMIT)
+ {
+ dllist_add_to_head(&psDevNode->sSyncCheckpointRecordList, &psSyncRec->sNode);
+ psDevNode->ui32SyncCheckpointRecordCount++;
+
+ if(psDevNode->ui32SyncCheckpointRecordCount > psDevNode->ui32SyncCheckpointRecordCountHighWatermark)
+ {
+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = psDevNode->ui32SyncCheckpointRecordCount;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync checkpoint record \"%s\". %u records already exist.",
+ __func__,
+ pszClassName,
+ psDevNode->ui32SyncCheckpointRecordCount));
+ OSFreeMem(psSyncRec);
+ psSyncRec = NULL;
+ eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+ *phRecord = (PSYNC_CHECKPOINT_RECORD_HANDLE)psSyncRec;
+
+ fail_alloc:
+ return eError;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointRecordRemove(PSYNC_CHECKPOINT_RECORD_HANDLE hRecord)
+{
+ struct SYNC_CHECKPOINT_RECORD **ppFreedSync;
+ struct SYNC_CHECKPOINT_RECORD *pSync = (struct SYNC_CHECKPOINT_RECORD*)hRecord;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ if (!hRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevNode = pSync->psDevNode;
+
+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+ dllist_remove_node(&pSync->sNode);
+
+ if (psDevNode->uiSyncCheckpointRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: psDevNode->uiSyncCheckpointRecordFreeIdx out of range",
+ __FUNCTION__));
+ psDevNode->uiSyncCheckpointRecordFreeIdx = 0;
+ }
+ ppFreedSync = &psDevNode->apsSyncCheckpointRecordsFreed[psDevNode->uiSyncCheckpointRecordFreeIdx];
+ psDevNode->uiSyncCheckpointRecordFreeIdx =
+ (psDevNode->uiSyncCheckpointRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+ if (*ppFreedSync)
+ {
+ OSFreeMem(*ppFreedSync);
+ }
+ pSync->psSyncCheckpointBlock = NULL;
+ pSync->ui64OSTime = OSClockns64();
+ *ppFreedSync = pSync;
+
+ psDevNode->ui32SyncCheckpointRecordCount--;
+
+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+ return PVRSRV_OK;
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncCheckpointRecordPrint(struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec,
+ IMG_UINT64 ui64TimeNow,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpoint = (_SYNC_CHECKPOINT *)psSyncCheckpointRec->pSyncCheckpt;
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock = psSyncCheckpointRec->psSyncCheckpointBlock;
+ IMG_UINT64 ui64DeltaS;
+ IMG_UINT32 ui32DeltaF;
+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncCheckpointRec->ui64OSTime;
+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+ if (psSyncCheckpointBlock && psSyncCheckpointBlock->pui32LinAddr)
+ {
+ void *pSyncCheckpointAddr;
+ pSyncCheckpointAddr = (void*)( ((IMG_BYTE*) psSyncCheckpointBlock->pui32LinAddr) + psSyncCheckpointRec->ui32SyncOffset);
+
+ PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u %010u FWAddr=0x%08x (r%d:e%d:f%d) State=%s (%s)",
+ psSyncCheckpointRec->uiPID,
+ ui64DeltaS, ui32DeltaF,psSyncCheckpointRec->ui32UID,
+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+ OSAtomicRead(&psSyncCheckpoint->hRefCount),
+ OSAtomicRead(&psSyncCheckpoint->hEnqueuedCCBCount),
+ psSyncCheckpoint->psSyncCheckpointFwObj->ui32FwRefCount,
+ (*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_SIGNALLED) ?
+ "SIGNALLED" :
+ ((*(IMG_UINT32*)pSyncCheckpointAddr == PVRSRV_SYNC_CHECKPOINT_ERRORED) ?
+ "ERRORED" : "NOT_SIGNALLED"),
+ psSyncCheckpointRec->szClassName);
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("\t%05u %05llu.%09u %010u FWAddr=0x%08x State=<null_ptr> (%s)",
+ psSyncCheckpointRec->uiPID,
+ ui64DeltaS, ui32DeltaF, psSyncCheckpointRec->ui32UID,
+ (psSyncCheckpointRec->ui32FwBlockAddr+psSyncCheckpointRec->ui32SyncOffset),
+ psSyncCheckpointRec->szClassName
+ );
+ }
+}
+
+static void _SyncCheckpointRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ IMG_UINT64 ui64TimeNowS;
+ IMG_UINT32 ui32TimeNowF;
+ IMG_UINT64 ui64TimeNow = OSClockns64();
+ DLLIST_NODE *psNode, *psNext;
+
+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ {
+ IMG_UINT32 i;
+
+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+
+ PVR_DUMPDEBUG_LOG("Dumping allocated sync checkpoints. Allocated: %u High watermark: %u (time ref %05llu.%09u)",
+ psDevNode->ui32SyncCheckpointRecordCount,
+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark,
+ ui64TimeNowS,
+ ui32TimeNowF);
+ if(psDevNode->ui32SyncCheckpointRecordCountHighWatermark == SYNC_CHECKPOINT_RECORD_LIMIT)
+ {
+ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+ SYNC_CHECKPOINT_RECORD_LIMIT);
+ }
+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+
+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+ {
+ struct SYNC_CHECKPOINT_RECORD *psSyncCheckpointRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+ _SyncCheckpointRecordPrint(psSyncCheckpointRec, ui64TimeNow,
+ pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ PVR_DUMPDEBUG_LOG("Dumping all recently freed sync checkpoints @ %05llu.%09u",
+ ui64TimeNowS,
+ ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-5s %-15s %-10s %-17s %-14s (%s)",
+ "PID", "Time Delta (s)", "UID", "Address", "State", "Annotation");
+ for(i = DECREMENT_WITH_WRAP(psDevNode->uiSyncCheckpointRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+ i != psDevNode->uiSyncCheckpointRecordFreeIdx;
+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+ {
+ if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+ {
+ _SyncCheckpointRecordPrint(psDevNode->apsSyncCheckpointRecordsFreed[i],
+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ else
+ {
+ break;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+ }
+}
+#undef NS_IN_S
+static PVRSRV_ERROR _SyncCheckpointRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&psDevNode->hSyncCheckpointRecordLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psDevNode->sSyncCheckpointRecordList);
+
+ psDevNode->ui32SyncCheckpointRecordCount = 0;
+ psDevNode->ui32SyncCheckpointRecordCountHighWatermark = 0;
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncCheckpointRecordNotify,
+ psDevNode,
+ _SyncCheckpointRecordRequest,
+ DEBUG_REQUEST_SYNCCHECKPOINT,
+ (PVRSRV_DBGREQ_HANDLE)psDevNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+ return PVRSRV_OK;
+
+ fail_dbg_register:
+ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+ fail_lock_create:
+ return eError;
+}
+
+static void _SyncCheckpointRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DLLIST_NODE *psNode, *psNext;
+ int i;
+
+ OSLockAcquire(psDevNode->hSyncCheckpointRecordLock);
+ dllist_foreach_node(&psDevNode->sSyncCheckpointRecordList, psNode, psNext)
+ {
+ struct SYNC_CHECKPOINT_RECORD *pSyncCheckpointRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_CHECKPOINT_RECORD, sNode);
+
+ dllist_remove_node(psNode);
+ OSFreeMem(pSyncCheckpointRec);
+ }
+
+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+ {
+ if (psDevNode->apsSyncCheckpointRecordsFreed[i])
+ {
+ OSFreeMem(psDevNode->apsSyncCheckpointRecordsFreed[i]);
+ psDevNode->apsSyncCheckpointRecordsFreed[i] = NULL;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncCheckpointRecordLock);
+
+ if (psDevNode->hSyncCheckpointRecordNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncCheckpointRecordNotify);
+ }
+ OSLockDestroy(psDevNode->hSyncCheckpointRecordLock);
+}
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = (_SYNC_CHECKPOINT*)psSyncCheckpoint;
+
+ DevmemPDumpDevmemPol32(psSyncCheckpointInt->psSyncCheckpointBlock->hMemDesc,
+ _SyncCheckpointGetOffset(psSyncCheckpointInt),
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+ 0xFFFFFFFF,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+static PVRSRV_ERROR
+_SyncCheckpointSignalPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+ _SyncCheckpointGetOffset(psSyncCheckpoint),
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_SyncCheckpointErrorPDump(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncCheckpoint->psSyncCheckpointBlock->hMemDesc,
+ _SyncCheckpointGetOffset(psSyncCheckpoint),
+ PVRSRV_SYNC_CHECKPOINT_ERRORED,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+#endif
+
+static void _CheckDeferredCleanupList(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ DLLIST_NODE *psNode, *psNext;
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE*)psContext->psDevNode;
+
+ /* Check the deferred cleanup list and free any sync checkpoints we can */
+ OSLockAcquire(psContext->psContextCtl->hDeferredCleanupListLock);
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s called", __FUNCTION__));
+#endif
+
+ if (dllist_is_empty(&psContext->psContextCtl->sDeferredCleanupListHead))
+ {
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s: Defer free list is empty", __FUNCTION__));
+#endif
+ }
+
+ dllist_foreach_node(&psContext->psContextCtl->sDeferredCleanupListHead, psNode, psNext)
+ {
+ _SYNC_CHECKPOINT *psSyncCheckpointInt =
+ IMG_CONTAINER_OF(psNode, _SYNC_CHECKPOINT, sDeferredFreeListNode);
+
+ if (psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount ==
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)))
+ {
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ if(psSyncCheckpointInt->hRecord)
+ {
+ PVRSRV_ERROR eError;
+ /* remove this sync record */
+ eError = _SyncCheckpointRecordRemove(psSyncCheckpointInt->hRecord);
+ PVR_LOG_IF_ERROR(eError, "_SyncCheckpointRecordRemove");
+ }
+#endif
+
+ /* Remove the sync checkpoint from the deferred free list */
+ dllist_remove_node(&psSyncCheckpointInt->sDeferredFreeListNode);
+
+ /* Remove the sync checkpoint from the global list */
+ OSLockAcquire(psDevNode->hSyncCheckpointListLock);
+ dllist_remove_node(&psSyncCheckpointInt->sListNode);
+ OSLockRelease(psDevNode->hSyncCheckpointListLock);
+
+ RGX_HWPERF_HOST_FREE(psDevNode, SYNCCP, psSyncCheckpointInt->ui32FWAddr);
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s attempting to return sync(ID:%d),%p> to pool",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt));
+#endif
+ if (!_PutCheckpointInPool(psSyncCheckpointInt))
+#endif
+ {
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s pool is full, so just free it", __FUNCTION__));
+#endif
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+ "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt,
+ (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+ psSyncCheckpointInt->uiSpanAddr));
+#endif
+ _FreeSyncCheckpoint(psSyncCheckpointInt);
+ }
+ }
+#if (ENABLE_SYNC_CHECKPOINT_DEFERRED_CLEANUP_DEBUG == 1)
+ else
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s psSyncCheckpoint '%s'' (ID:%d)<%p>), still pending (enq=%d,FWRef=%d)",
+ __FUNCTION__,
+ psSyncCheckpointInt->azName,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt,
+ (IMG_UINT32)(OSAtomicRead(&psSyncCheckpointInt->hEnqueuedCCBCount)),
+ psSyncCheckpointInt->psSyncCheckpointFwObj->ui32FwRefCount));
+ }
+#endif
+ }
+ OSLockRelease(psContext->psContextCtl->hDeferredCleanupListLock);
+
+ /*Check if all checkpoints are now in the pool
+ if this is the case clean the pool to avoid having left over allocations*/
+ if((OSAtomicRead(&psContext->hCheckpointCount)) == psContext->psContextCtl->ui32SyncCheckpointPoolCount)
+ {
+ _CleanCheckpointPool(psContext);
+ }
+}
+
+#if (SYNC_CHECKPOINT_POOL_SIZE > 0)
+static _SYNC_CHECKPOINT *_GetCheckpointFromPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpoint = NULL;
+
+ /* Acquire sync checkpoint pool lock */
+ OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+ /* Check if pool has anything in it */
+ if (psContext->psContextCtl->bSyncCheckpointPoolValid && (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+ (psContext->psContextCtl->ui32SyncCheckpointPoolWp != psContext->psContextCtl->ui32SyncCheckpointPoolRp))
+ {
+ /* Get the next sync checkpoint from the pool */
+ psSyncCheckpoint = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+ if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+ {
+ psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+ }
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+ psContext->psContextCtl->bSyncCheckpointPoolFull = IMG_FALSE;
+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_USE;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s checkpoint(old ID:%d)<-POOL(%d/%d), psContext=<%p>, poolRp=%d, poolWp=%d",
+ __FUNCTION__,
+ psSyncCheckpoint->ui32UID,
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+ SYNC_CHECKPOINT_POOL_SIZE,
+ (void*)psContext, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+ }
+ /* Release sync checkpoint pool lock */
+ OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+ return psSyncCheckpoint;
+}
+
+static IMG_BOOL _PutCheckpointInPool(_SYNC_CHECKPOINT *psSyncCheckpoint)
+{
+ IMG_BOOL bReturnedToPool = IMG_FALSE;
+ _SYNC_CHECKPOINT_CONTEXT *psContext = psSyncCheckpoint->psSyncCheckpointBlock->psContext;
+
+ /* Acquire sync checkpoint pool lock */
+ OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+ /* Check if pool has space */
+ if (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+ !psContext->psContextCtl->bSyncCheckpointPoolFull)
+ {
+ /* Put the sync checkpoint into the next write slot in the pool */
+ psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolWp++] = psSyncCheckpoint;
+ if (psContext->psContextCtl->ui32SyncCheckpointPoolWp == SYNC_CHECKPOINT_POOL_SIZE)
+ {
+ psContext->psContextCtl->ui32SyncCheckpointPoolWp = 0;
+ }
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount++;
+ psContext->psContextCtl->bSyncCheckpointPoolFull =
+ ((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+ (psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+ bReturnedToPool = IMG_TRUE;
+ psSyncCheckpoint->ui32ValidationCheck = SYNC_CHECKPOINT_PATTERN_IN_POOL;
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s checkpoint(ID:%d)->POOL(%d/%d), poolRp=%d, poolWp=%d",
+ __FUNCTION__,
+ psSyncCheckpoint->ui32UID,
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount,
+ SYNC_CHECKPOINT_POOL_SIZE, psContext->psContextCtl->ui32SyncCheckpointPoolRp, psContext->psContextCtl->ui32SyncCheckpointPoolWp));
+#endif
+ }
+ /* Release sync checkpoint pool lock */
+ OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+ return bReturnedToPool;
+}
+
+static IMG_UINT32 _CleanCheckpointPool(_SYNC_CHECKPOINT_CONTEXT *psContext)
+{
+ _SYNC_CHECKPOINT *psSyncCheckpointInt = NULL;
+ IMG_UINT32 ui32ItemsFreed = 0;
+
+ /* Acquire sync checkpoint pool lock */
+ OSLockAcquire(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s psContext=<%p>, bSyncCheckpointPoolValid=%d, uiSyncCheckpointPoolCount=%d",
+ __FUNCTION__,
+ (void*)psContext,
+ psContext->psContextCtl->bSyncCheckpointPoolValid,
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount));
+#endif
+ /* While the pool still contains sync checkpoints, free them */
+ while (psContext->psContextCtl->bSyncCheckpointPoolValid &&
+ (psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0))
+ {
+ /* Get the sync checkpoint from the next read slot in the pool */
+ psSyncCheckpointInt = psContext->psContextCtl->psSyncCheckpointPool[psContext->psContextCtl->ui32SyncCheckpointPoolRp++];
+ if (psContext->psContextCtl->ui32SyncCheckpointPoolRp == SYNC_CHECKPOINT_POOL_SIZE)
+ {
+ psContext->psContextCtl->ui32SyncCheckpointPoolRp = 0;
+ }
+ psContext->psContextCtl->ui32SyncCheckpointPoolCount--;
+ psContext->psContextCtl->bSyncCheckpointPoolFull =
+ ((psContext->psContextCtl->ui32SyncCheckpointPoolCount > 0) &&
+ (psContext->psContextCtl->ui32SyncCheckpointPoolWp == psContext->psContextCtl->ui32SyncCheckpointPoolRp));
+
+ if (psSyncCheckpointInt)
+ {
+ if (psSyncCheckpointInt->ui32ValidationCheck != SYNC_CHECKPOINT_PATTERN_IN_POOL)
+ {
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s pool contains invalid entry (ui32ValidationCheck=0x%x)",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32ValidationCheck));
+#endif
+ }
+
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint(ID:%d)", __FUNCTION__, psSyncCheckpointInt->ui32UID));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->ui32ValidationCheck=0x%x", __FUNCTION__, psSyncCheckpointInt->ui32ValidationCheck));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->uiSpanAddr=0x%llx", __FUNCTION__, psSyncCheckpointInt->uiSpanAddr));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->psSyncCheckpointBlock=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext));
+ PVR_DPF((PVR_DBG_WARNING, "%s psSyncCheckpoint->psSyncCheckpointBlock->psContext->psSubAllocRA=<%p>", __FUNCTION__, (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA));
+#endif
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING,
+ "%s CALLING RA_Free(psSyncCheckpoint(ID:%d)<%p>), "
+ "psSubAllocRA=<%p>, ui32SpanAddr=0x%llx",
+ __FUNCTION__,
+ psSyncCheckpointInt->ui32UID,
+ (void*)psSyncCheckpointInt,
+ (void*)psSyncCheckpointInt->psSyncCheckpointBlock->psContext->psSubAllocRA,
+ psSyncCheckpointInt->uiSpanAddr));
+#endif
+ _FreeSyncCheckpoint(psSyncCheckpointInt);
+ ui32ItemsFreed++;
+ }
+ else
+ {
+#if (ENABLE_SYNC_CHECKPOINT_POOL_DEBUG == 1)
+ PVR_DPF((PVR_DBG_WARNING, "%s pool contains NULL entry", __FUNCTION__));
+#endif
+ }
+ }
+ /* Release sync checkpoint pool lock */
+ OSLockRelease(psContext->psContextCtl->hSyncCheckpointPoolLock);
+
+ return ui32ItemsFreed;
+}
+#endif /* (SYNC_CHECKPOINT_POOL_SIZE > 0) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.h b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.h
new file mode 100644
index 00000000000000..11d5ff848fc8a0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint.h
@@ -0,0 +1,596 @@
+/*************************************************************************/ /*!
+@File
+@Title Synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the client side interface for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_
+#define _SYNC_CHECKPOINT_
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_sync_km.h"
+#include "pdumpdefs.h"
+#include "pdump.h"
+#include "dllist.h"
+#include "pvr_debug.h"
+#include "device_connection.h"
+#include "opaque_types.h"
+
+#ifndef _CHECKPOINT_TYPES_
+#define _CHECKPOINT_TYPES_
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* definitions for functions to be implemented by OS-specific sync - the OS-specific sync code
+ will call SyncCheckpointRegisterFunctions() when initialised, in order to register functions
+ we can then call */
+#ifndef _CHECKPOINT_PFNS_
+#define _CHECKPOINT_PFNS_
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN)(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE fence,
+ IMG_UINT32 *nr_checkpoints,
+ PSYNC_CHECKPOINT **checkpoint_handles,
+ IMG_UINT64 *pui64FenceUID);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN)(const IMG_CHAR *fence_name,
+ PVRSRV_TIMELINE timeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *new_fence,
+ IMG_UINT64 *pui64FenceUID,
+ void **ppvFenceFinaliseData,
+ PSYNC_CHECKPOINT *new_checkpoint_handle,
+ IMG_HANDLE *timeline_update_sync,
+ IMG_UINT32 *timeline_update_value);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN)(PVRSRV_FENCE fence_to_rollback, void *finalise_data);
+typedef PVRSRV_ERROR (*PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN)(PVRSRV_FENCE fence_to_finalise, void *finalise_data);
+typedef void (*PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN)(void *private_data);
+typedef void (*PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN)(void *mem_ptr);
+typedef IMG_UINT32 (*PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN)(IMG_UINT32 num_ufos, IMG_UINT32 *vaddrs);
+#endif
+
+PVRSRV_ERROR SyncCheckpointRegisterFunctions(PFN_SYNC_CHECKPOINT_FENCE_RESOLVE_FN pfnFenceResolve,
+ PFN_SYNC_CHECKPOINT_FENCE_CREATE_FN pfnFenceCreate,
+ PFN_SYNC_CHECKPOINT_FENCE_ROLLBACK_DATA_FN pfnFenceDataRollback,
+ PFN_SYNC_CHECKPOINT_FENCE_FINALISE_FN pfnFenceFinalise,
+ PFN_SYNC_CHECKPOINT_NOHW_UPDATE_TIMELINES_FN pfnNoHWUpdateTimelines,
+ PFN_SYNC_CHECKPOINT_FREE_CHECKPOINT_LIST_MEM_FN pfnFreeCheckpointListMem,
+ PFN_SYNC_CHECKPOINT_DUMP_INFO_ON_STALLED_UFOS_FN pfnDumpInfoOnStalledUFOs);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointContextCreate
+
+@Description Create a new synchronisation checkpoint context
+
+@Input psDevNode Device node
+
+@Output ppsSyncCheckpointContext Handle to the created synchronisation
+ checkpoint context
+
+@Return PVRSRV_OK if the synchronisation checkpoint context was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextCreate(PPVRSRV_DEVICE_NODE psDevNode,
+ PSYNC_CHECKPOINT_CONTEXT *ppsSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointContextDestroy
+
+@Description Destroy a synchronisation checkpoint context
+
+@Input psSyncCheckpointContext Handle to the synchronisation
+ checkpoint context to destroy
+
+@Return PVRSRV_OK if the synchronisation checkpoint context was
+ successfully destroyed.
+ PVRSRV_ERROR_UNABLE_TO_DESTROY_CONTEXT if the context still
+ has sync checkpoints defined
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointContextDestroy(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointAlloc
+
+@Description Allocate a new synchronisation checkpoint on the specified
+ synchronisation checkpoint context
+
+@Input hSyncCheckpointContext Handle to the synchronisation
+ checkpoint context
+
+@Input hTimeline Timeline on which this sync
+ checkpoint is being created
+
+@Input pszClassName Sync checkpoint source annotation
+ (will be truncated to at most
+ PVRSRV_SYNC_NAME_LENGTH chars)
+
+@Output ppsSyncCheckpoint Created synchronisation checkpoint
+
+@Return PVRSRV_OK if the synchronisation checkpoint was
+ successfully created
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointAlloc(PSYNC_CHECKPOINT_CONTEXT psSyncContext,
+ PVRSRV_TIMELINE hTimeline,
+ const IMG_CHAR *pszCheckpointName,
+ PSYNC_CHECKPOINT *ppsSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointFree
+
+@Description Free a synchronisation checkpoint
+ The reference count held for the synchronisation checkpoint
+ is decremented - if it has becomes zero, it is also freed.
+
+@Input psSyncCheckpoint The synchronisation checkpoint to free
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFree(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointSignal
+
+@Description Signal the synchronisation checkpoint
+
+@Input psSyncCheckpoint The synchronisation checkpoint to signal
+
+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignal(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointSignalNoHW
+
+@Description Signal the synchronisation checkpoint in NO_HARWARE build
+
+@Input psSyncCheckpoint The synchronisation checkpoint to signal
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointSignalNoHW(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointError
+
+@Description Error the synchronisation checkpoint
+
+@Input psSyncCheckpoint The synchronisation checkpoint to error
+
+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointError(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointStateFromUFO
+
+@Description Returns the current state of the synchronisation checkpoint
+ which has the given UFO firmware address
+
+@Input psDevNode The device owning the sync
+ checkpoint
+
+@Input ui32FwAddr The firmware address of the sync
+ checkpoint
+
+@Return The current state (32-bit value) of the sync checkpoint
+*/
+/*****************************************************************************/
+IMG_UINT32 SyncCheckpointStateFromUFO(PPVRSRV_DEVICE_NODE psDevNode,
+ IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointErrorFromUFO
+
+@Description Error the synchronisation checkpoint which has the
+ given UFO firmware address
+
+@Input psDevNode The device owning the sync
+ checkpoint to be errored
+
+@Input ui32FwAddr The firmware address of the sync
+ checkpoint to be errored
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointErrorFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointRollbackFromUFO
+
+@Description Drop the enqueued count reference taken on the synchronisation
+ checkpoint on behalf of the firmware.
+ Called in the event of a DM Kick failing.
+
+@Input psDevNode The device owning the sync
+ checkpoint to be rolled back
+
+@Input ui32FwAddr The firmware address of the sync
+ checkpoint to be rolled back
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointRollbackFromUFO(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointIsSignalled
+
+@Description Returns IMG_TRUE if the synchronisation checkpoint is
+ signalled or errored
+
+@Input psSyncCheckpoint The synchronisation checkpoint to test
+
+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior
+
+@Return None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsSignalled(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointIsErrored
+
+@Description Returns IMG_TRUE if the synchronisation checkpoint is
+ errored
+
+@Input psSyncCheckpoint The synchronisation checkpoint to test
+
+@Input ui32FenceSyncFlags Flags used for controlling HWPerf behavior
+
+@Return None
+*/
+/*****************************************************************************/
+IMG_BOOL
+SyncCheckpointIsErrored(PSYNC_CHECKPOINT psSyncCheckpoint, IMG_UINT32 ui32FenceSyncFlags);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointTakeRef
+
+@Description Take a reference on a synchronisation checkpoint
+
+@Input psSyncCheckpoint Synchronisation checkpoint to take a
+ reference on
+
+@Return PVRSRV_OK if a reference was taken on the synchronisation
+ primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointTakeRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointDropRef
+
+@Description Drop a reference on a synchronisation checkpoint
+
+@Input psSyncCheckpoint Synchronisation checkpoint to drop a
+ reference on
+
+@Return PVRSRV_OK if a reference was dropped on the synchronisation
+ primitive
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDropRef(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointResolveFence
+
+@Description Resolve a fence, returning a list of the sync checkpoints
+ that fence contains.
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input psSyncCheckpointContext The sync checkpoint context
+ on which checkpoints should be
+ created (in the event of the fence
+ having a native sync pt with no
+ associated sync checkpoint)
+
+@Input hFence The fence to be resolved
+
+@Output pui32NumSyncCheckpoints The number of sync checkpoints the
+ fence contains. Can return 0 if
+ passed a null (-1) fence.
+
+@Output papsSyncCheckpoints List of sync checkpoints the fence
+ contains
+
+@Output puiFenceUID Unique ID of the resolved fence
+
+@Return PVRSRV_OK if a valid fence was provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointResolveFence(PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext, PVRSRV_FENCE hFence, IMG_UINT32 *pui32NumSyncCheckpoints, PSYNC_CHECKPOINT **papsSyncCheckpoints, IMG_UINT64 *puiFenceUID);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointCreateFence
+
+@Description Create a fence containing a single sync checkpoint.
+ Return the fence and a ptr to sync checkpoint it contains.
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input pszFenceName String to assign to the new fence
+ (for debugging purposes)
+
+@Input hTimeline Timeline on which the new fence is
+ to be created
+
+@Input psSyncCheckpointContext Sync checkpoint context to be used
+ when creating the new fence
+
+@Output phNewFence The newly created fence
+
+@Output pui32FenceUID Unique ID of the created fence
+
+@Output ppvFenceFinaliseData Any data needed to finalise the fence
+ in a later call to the function
+ SyncCheckpointFinaliseFence()
+
+@Output psNewSyncCheckpoint The sync checkpoint contained in
+ the new fence
+
+@Return PVRSRV_OK if a valid fence was provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointCreateFence(PPVRSRV_DEVICE_NODE psDeviceNode,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_TIMELINE hTimeline,
+ PSYNC_CHECKPOINT_CONTEXT psSyncCheckpointContext,
+ PVRSRV_FENCE *phNewFence,
+ IMG_UINT64 *pui64FenceUID,
+ void **ppvFenceFinaliseData,
+ PSYNC_CHECKPOINT *psNewSyncCheckpoint,
+ void **ppvTimelineUpdateSyncPrim,
+ IMG_UINT32 *pui32TimelineUpdateValue);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointRollbackFenceData
+
+@Description 'Rolls back' the fence specified (destroys the fence and
+ takes any other required actions to undo the fence
+ creation (eg if the implementation wishes to revert the
+ incrementing of the fence's timeline, etc).
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input hFence Fence to be 'rolled back'
+
+@Input pvFinaliseData Data needed to finalise the
+ fence
+
+@Return PVRSRV_OK if a valid fence was provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointRollbackFenceData(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointFinaliseFence
+
+@Description 'Finalise' the fence specified (performs any actions the
+ underlying implementation may need to perform just prior
+ to the fence being returned to the client.
+ This function in turn calls a function provided by the
+ OS native sync implementation - if the native sync
+ implementation does not need to perform any actions at
+ this time, this function does not need to be registered.
+
+@Input hFence Fence to be 'finalised'
+
+@Input pvFinaliseData Data needed to finalise the fence
+
+@Return PVRSRV_OK if a valid fence and finalise data were provided.
+ PVRSRV_ERROR_INVALID_PARAMS if an invalid fence or finalise
+ data were provided.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function (permitted).
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointFinaliseFence(PVRSRV_FENCE hFence, void *pvFinaliseData);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointFreeCheckpointListMem
+
+@Description Free memory the memory which was allocated by the sync
+ implementation and used to return the list of sync
+ checkpoints when resolving a fence.
+ to the fence being returned to the client.
+ This function in turn calls a free function registered by
+ the sync implementation (if a function has been registered).
+
+@Input pvCheckpointListMem Pointer to the memory to be freed
+
+@Return None
+*/
+/*****************************************************************************/
+void
+SyncCheckpointFreeCheckpointListMem(void *pvCheckpointListMem);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointNoHWUpdateTimelines
+
+@Description Called by the DDK in a NO_HARDWARE build only.
+ After syncs have been manually signalled by the DDK, this
+ function is called to allow the OS native sync implementation
+ to update its timelines (as the usual callback notification
+ of signalled checkpoints is not supported for NO_HARDWARE).
+ This function in turn calls a function provided by the
+ OS native sync implementation.
+
+@Input pvPrivateData Any data the OS native sync
+ implementation might require.
+
+@Return PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function, otherwise
+ PVRSRV_OK.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointNoHWUpdateTimelines(void *pvPrivateData);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointDumpInfoOnStalledUFOs
+
+@Description Called by the DDK in the event of the health check watchdog
+ examining the CCBs and determining that one has failed to
+ progress after 10 second when the GPU is idle due to waiting
+ on one or more UFO fences.
+ The DDK will pass a list of UFOs on which the CCB is waiting
+ and the sync implementation will check them to see if any
+ relate to sync points it has created. If so, the
+ implementation should dump debug information on those sync
+ points to the kernel log or other suitable output (which will
+ allow the unsignalled syncs to be identified).
+ The function shall return the number of syncs in the provided
+ array that were syncs which it had created.
+
+@Input ui32NumUFOs The number of UFOs in the array passed
+ in the pui32VAddrs parameter.
+ pui32Vaddr The array of UFOs the CCB is waiting on.
+
+@Output pui32NumSyncOwnedUFOs The number of UFOs in pui32Vaddr which
+ relate to syncs created by the sync
+ implementation.
+
+@Return PVRSRV_OK if a valid pointer is provided in pui32NumSyncOwnedUFOs.
+ PVRSRV_ERROR_INVALID_PARAMS if a NULL value is provided in
+ pui32NumSyncOwnedUFOs.
+ PVRSRV_ERROR_SYNC_NATIVESYNC_NOT_REGISTERED if the OS native
+ sync has not registered a callback function.
+
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointDumpInfoOnStalledUFOs(IMG_UINT32 ui32NumUFOs,
+ IMG_UINT32 *pui32Vaddrs,
+ IMG_UINT32 *pui32NumSyncOwnedUFOs);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetStateString
+
+@Description Called to get a string representing the current state of a
+ sync checkpoint.
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get the
+ state for.
+
+@Return The string representing the current state of this checkpoint
+*/
+/*****************************************************************************/
+const IMG_CHAR *
+SyncCheckpointGetStateString(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointPDumpPol
+
+@Description Called to insert a poll into the PDump script on a given
+ sync checkpoint being signalled or errored.
+
+@Input psSyncCheckpoint Synchronisation checkpoint for
+ PDump to poll on
+
+@Input ui32PDumpFlags PDump flags
+
+@Return PVRSRV_OK if a valid sync checkpoint was provided.
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointPDumpPol(PSYNC_CHECKPOINT psSyncCheckpoint, PDUMP_FLAGS_T ui32PDumpFlags);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+/*************************************************************************/ /*!
+@Function SyncCheckpointRecordLookup
+
+@Description Returns a debug string with information about the
+ sync checkpoint.
+
+@Input psDevNode The device owning the sync
+ checkpoint to lookup
+
+@Input ui32FwAddr The firmware address of the sync
+ checkpoint to lookup
+
+@Input pszSyncInfo Character array to write to
+
+@Input len Len of the character array
+
+@Return None
+*/
+/*****************************************************************************/
+void SyncCheckpointRecordLookup(PPVRSRV_DEVICE_NODE psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len);
+#endif /* defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+#endif /* _SYNC_CHECKPOINT_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_external.h b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_external.h
new file mode 100644
index 00000000000000..c0aaf2f8d8393c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_external.h
@@ -0,0 +1,80 @@
+/*************************************************************************/ /*!
+@File
+@Title Services external synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines synchronisation checkpoint structures that are visible
+ internally and externally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_EXTERNAL_
+#define _SYNC_CHECKPOINT_EXTERNAL_
+
+#ifndef _CHECKPOINT_TYPES_
+#define _CHECKPOINT_TYPES_
+typedef struct _SYNC_CHECKPOINT_CONTEXT *PSYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT *PSYNC_CHECKPOINT;
+#endif
+
+/* PVRSRV_SYNC_CHECKPOINT states.
+ * The OS native sync implementation should call pfnIsSignalled() to determine if a
+ * PVRSRV_SYNC_CHECKPOINT has signalled (which will return an IMG_BOOL), but can set the
+ * state for a PVRSRV_SYNC_CHECKPOINT (which is currently in the NOT_SIGNALLED state)
+ * where that PVRSRV_SYNC_CHECKPOINT is representing a foreign sync.
+ */
+typedef enum
+{
+ PVRSRV_SYNC_CHECKPOINT_NOT_SIGNALLED = 0x000, /*!< checkpoint has not signalled */
+ PVRSRV_SYNC_CHECKPOINT_SIGNALLED = 0x519, /*!< checkpoint has signalled */
+ PVRSRV_SYNC_CHECKPOINT_ERRORED = 0xeff /*!< checkpoint has been errored */
+} PVRSRV_SYNC_CHECKPOINT_STATE;
+
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR(fwaddr) ((fwaddr) & 0x1)
+#define PVRSRV_UFO_IS_SYNC_CHECKPOINT(ufoptr) (PVRSRV_UFO_IS_SYNC_CHECKPOINT_FWADDR((ufoptr)->puiAddrUFO.ui32Addr))
+
+/* Maximum number of sync checkpoints the firmware supports in one fence */
+#define MAX_SYNC_CHECKPOINTS_PER_FENCE 32
+
+/*!
+ * Define to be used with SyncCheckpointAlloc() to indicate a checkpoint which
+ * represents a foreign sync point or collection of foreign sync points.
+ */
+#define SYNC_CHECKPOINT_FOREIGN_CHECKPOINT ((PVRSRV_TIMELINE) -2)
+
+#endif /* _SYNC_CHECKPOINT_EXTERNAL_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_init.h b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_init.h
new file mode 100644
index 00000000000000..f5aa139c260ede
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_init.h
@@ -0,0 +1,82 @@
+/*************************************************************************/ /*!
+@File
+@Title Services synchronisation checkpoint initialisation interface
+ header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines synchronisation checkpoint structures that are visible
+ internally and externally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INIT_
+#define _SYNC_CHECKPOINT_INIT_
+
+#include "device.h"
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointInit
+
+@Description Initialise the sync checkpoint driver by giving it the
+ device node (needed to determine the pfnUFOAlloc function
+ to call in order to allocate sync block memory).
+
+@Input psDevNode Device for which sync checkpoints
+ are being initialised
+
+@Return PVRSRV_OK initialised successfully,
+ PVRSRV_ERROR_<error> otherwise
+*/
+/*****************************************************************************/
+PVRSRV_ERROR
+SyncCheckpointInit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointDeinit
+
+@Description Deinitialise the sync checkpoint driver.
+ Frees resources allocated during initialisation.
+
+@Input psDevNode Device for which sync checkpoints
+ are being de-initialised
+
+@Return None
+*/
+/*****************************************************************************/
+void SyncCheckpointDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+#endif /* _SYNC_CHECKPOINT_INIT_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal.h b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal.h
new file mode 100644
index 00000000000000..529089b023c425
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal.h
@@ -0,0 +1,255 @@
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation checkpoint interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal server interface for services
+ synchronisation checkpoints.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __SYNC_CHECKPOINT__
+#define __SYNC_CHECKPOINT__
+
+#include "img_types.h"
+#include "opaque_types.h"
+#include "sync_checkpoint_internal_fw.h"
+#include "sync_checkpoint_external.h"
+#include "sync_checkpoint.h"
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+#include "rgx_fwif_shared.h"
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+struct SYNC_CHECKPOINT_RECORD;
+#endif
+
+/*
+ Private structures
+*/
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_CTL_ *_PSYNC_CHECKPOINT_CONTEXT_CTL;
+
+typedef struct _SYNC_CHECKPOINT_CONTEXT_
+{
+ PPVRSRV_DEVICE_NODE psDevNode;
+ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the RA */
+ RA_ARENA *psSubAllocRA; /*!< RA context */
+ IMG_CHAR azSpanName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the span RA */
+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
+ ATOMIC_T hRefCount; /*!< Ref count for this context */
+ ATOMIC_T hCheckpointCount; /*!< Checkpoint count for this context */
+ POS_LOCK hLock;
+ _PSYNC_CHECKPOINT_CONTEXT_CTL psContextCtl;
+} _SYNC_CHECKPOINT_CONTEXT;
+
+typedef struct _SYNC_CHECKPOINT_BLOCK_
+{
+ ATOMIC_T hRefCount; /*!< Ref count for this sync block */
+ POS_LOCK hLock;
+ _SYNC_CHECKPOINT_CONTEXT *psContext; /*!< Our copy of the services connection */
+ PPVRSRV_DEVICE_NODE psDevNode;
+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync checkpoint block */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */
+ DEVMEM_MEMDESC *hMemDesc; /*!< DevMem allocation for block */
+ volatile IMG_UINT32 *pui32LinAddr; /*!< Server-code CPU mapping */
+ IMG_UINT64 uiSpanBase; /*!< Base of this import (FW DevMem) in the span RA */
+ DLLIST_NODE sListNode; /*!< List node for the sync chkpt block list */
+} SYNC_CHECKPOINT_BLOCK;
+
+typedef struct SYNC_CHECKPOINT_RECORD* PSYNC_CHECKPOINT_RECORD_HANDLE;
+
+typedef struct _SYNC_CHECKPOINT_
+{
+ //_SYNC_CHECKPOINT_CONTEXT *psContext; /*!< pointer to the parent context of this checkpoint */
+ /* A sync checkpoint is assigned a unique ID, to avoid any confusion should
+ * the same memory be re-used later for a different checkpoint
+ */
+ IMG_UINT32 ui32UID; /*!< Unique ID assigned to sync checkpoint (to distinguish checkpoints if memory is re-used)*/
+ POS_LOCK hLock;
+ ATOMIC_T hRefCount; /*!< Ref count for this sync */
+ ATOMIC_T hEnqueuedCCBCount; /*!< Num times sync has been put in CCBs */
+ SYNC_CHECKPOINT_BLOCK *psSyncCheckpointBlock; /*!< Synchronisation block this checkpoint is allocated on */
+ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
+ volatile _SYNC_CHECKPOINT_FW_OBJ *psSyncCheckpointFwObj; /*!< CPU view of the data held in the sync block */
+ PRGXFWIF_UFO_ADDR sCheckpointUFOAddr; /*!< PRGXFWIF_UFO_ADDR struct used to pass update address to FW */
+ IMG_CHAR azName[PVRSRV_SYNC_NAME_LENGTH]; /*!< Name of the checkpoint */
+ PVRSRV_TIMELINE hTimeline; /*!< Timeline on which this sync checkpoint was created */
+ IMG_UINT32 ui32ValidationCheck;
+ IMG_PID uiProcess; /*!< The Process ID of the process which created this sync checkpoint */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ PSYNC_CHECKPOINT_RECORD_HANDLE hRecord; /*!< Sync record handle */
+#endif
+ DLLIST_NODE sListNode; /*!< List node for the global sync chkpt list */
+ DLLIST_NODE sDeferredFreeListNode; /*!< List node for the deferred free sync chkpt list */
+ IMG_UINT32 ui32FWAddr; /*!< FWAddr stored at sync checkpoint alloc time */
+} _SYNC_CHECKPOINT;
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetFirmwareAddr
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the firmware address of
+
+@Return The firmware address of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetFirmwareAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointCCBEnqueued
+
+@Description Increment the CCB enqueued reference count for a
+ synchronisation checkpoint. This indicates how many FW
+ operations (checks/update) have been placed into CCBs for the
+ sync checkpoint.
+ When the FW services these operation, it increments its own
+ reference count. When these two values are equal, we know
+ there are not outstanding FW operating for the checkpoint
+ in any CCB.
+
+@Input psSyncCheckpoint Synchronisation checkpoint for which
+ to increment the enqueued reference
+ count
+
+@Return None
+
+*/
+/*****************************************************************************/
+void
+SyncCheckpointCCBEnqueued(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetEnqueuedCount
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the enqueued count of
+
+@Return The enqueued count of the sync checkpoint
+ (i.e. the number of FW operations (checks or updates)
+ currently enqueued in CCBs for the sync checkpoint)
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetEnqueuedCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetReferenceCount
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the reference count of
+
+@Return The host reference count of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetReferenceCount(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetCreator
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the creating process of
+
+@Return The process id of the process which created this sync checkpoint.
+
+*/
+/*****************************************************************************/
+IMG_PID
+SyncCheckpointGetCreator(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetId
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the unique Id of
+
+@Return The unique Id of the sync checkpoint
+
+*/
+/*****************************************************************************/
+IMG_UINT32
+SyncCheckpointGetId(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetTimeline
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the parent timeline of
+
+@Return The parent timeline of the sync checkpoint
+
+*/
+/*****************************************************************************/
+PVRSRV_TIMELINE
+SyncCheckpointGetTimeline(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+/*************************************************************************/ /*!
+@Function SyncCheckpointGetRGXFWIFUFOAddr
+
+@Description .
+
+@Input psSyncCheckpoint Synchronisation checkpoint to get
+ the PRGXFWIF_UFO_ADDR of
+
+@Return The PRGXFWIF_UFO_ADDR of the sync checkpoint, used when
+ providing the update in server kick code.
+
+*/
+/*****************************************************************************/
+PRGXFWIF_UFO_ADDR*
+SyncCheckpointGetRGXFWIFUFOAddr(PSYNC_CHECKPOINT psSyncCheckpoint);
+
+#endif /* __SYNC_CHECKPOINT__ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal_fw.h b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal_fw.h
new file mode 100644
index 00000000000000..e8bda02b41c912
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_checkpoint_internal_fw.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation checkpoint FW obj header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal FW object structure for services
+ synchronisation checkpoints.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_CHECKPOINT_INTERNAL_FW_
+#define _SYNC_CHECKPOINT_INTERNAL_FW_
+
+#include "img_types.h"
+
+/* Sync_checkpoint firmware object.
+ * This is the FW-addressable structure use to hold the sync checkpoint's
+ * state and other information which needs to be accessed by the firmware.
+ */
+typedef struct _SYNC_CHECKPOINT_FW_OBJ_
+{
+ IMG_UINT32 ui32State; /*!< Holds the current state of the sync checkpoint */
+ IMG_UINT32 ui32FwRefCount; /*!< Holds the FW reference count (num of fences/updates processed) */
+} _SYNC_CHECKPOINT_FW_OBJ;
+
+/* Bit mask Firmware can use to test if a checkpoint has signalled or errored */
+#define SYNC_CHECKPOINT_SIGNALLED_MASK (0x1 << 0)
+
+#endif /* _SYNC_CHECKPOINT_INTERNAL_FW_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_fallback_server.h b/drivers/gpu/drm/img-rogue/1.10/sync_fallback_server.h
new file mode 100644
index 00000000000000..0aaa913d576923
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_fallback_server.h
@@ -0,0 +1,178 @@
+/**************************************************************************/ /*!
+@File
+@Title Fallback sync interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#ifndef _SYNC_FALLBACK_SERVER_H_
+#define _SYNC_FALLBACK_SERVER_H_
+
+#include "img_types.h"
+#include "sync_checkpoint.h"
+#include "device.h"
+#include "connection_server.h"
+
+
+typedef struct _PVRSRV_TIMELINE_SERVER_ PVRSRV_TIMELINE_SERVER;
+typedef struct _PVRSRV_FENCE_SERVER_ PVRSRV_FENCE_SERVER;
+typedef struct _PVRSRV_FENCE_EXPORT_ PVRSRV_FENCE_EXPORT;
+
+typedef struct _PVRSRV_SYNC_PT_ PVRSRV_SYNC_PT;
+
+#define SYNC_FB_TIMELINE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+#define SYNC_FB_FENCE_MAX_LENGTH PVRSRV_SYNC_NAME_LENGTH
+
+/*****************************************************************************/
+/* */
+/* SW SPECIFIC FUNCTIONS */
+/* */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreateSW(IMG_UINT32 uiTimelineNameSize,
+ const IMG_CHAR *pszTimelineName,
+ PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreateSW(PVRSRV_TIMELINE_SERVER *psTimeline,
+ IMG_UINT32 uiFenceNameSize,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_FENCE_SERVER **ppsOutputFence);
+
+PVRSRV_ERROR SyncFbTimelineAdvanceSW(PVRSRV_TIMELINE_SERVER *psTimeline);
+
+/*****************************************************************************/
+/* */
+/* PVR SPECIFIC FUNCTIONS */
+/* */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineCreatePVR(IMG_UINT32 uiTimelineNameSize,
+ const IMG_CHAR *pszTimelineName,
+ PVRSRV_TIMELINE_SERVER **ppsTimeline);
+
+PVRSRV_ERROR SyncFbFenceCreatePVR(const IMG_CHAR *pszName,
+ PVRSRV_TIMELINE iTl,
+ PSYNC_CHECKPOINT_CONTEXT hSyncCheckpointContext,
+ PVRSRV_FENCE *piOutFence,
+ IMG_UINT64 *puiFenceUID,
+ void **ppvFenceFinaliseData,
+ PSYNC_CHECKPOINT *ppsOutCheckpoint,
+ void **ppvTimelineUpdateSync,
+ IMG_UINT32 *puiTimelineUpdateValue);
+
+PVRSRV_ERROR SyncFbFenceResolvePVR(PSYNC_CHECKPOINT_CONTEXT psContext,
+ PVRSRV_FENCE iFence,
+ IMG_UINT32 *puiNumCheckpoints,
+ PSYNC_CHECKPOINT **papsCheckpoints,
+ IMG_UINT64 *puiFenceUID);
+
+/*****************************************************************************/
+/* */
+/* GENERIC FUNCTIONS */
+/* */
+/*****************************************************************************/
+
+PVRSRV_ERROR SyncFbTimelineRelease(PVRSRV_TIMELINE_SERVER *psTl);
+
+PVRSRV_ERROR SyncFbFenceRelease(PVRSRV_FENCE_SERVER *psFence);
+
+PVRSRV_ERROR SyncFbFenceDup(PVRSRV_FENCE_SERVER *psInFence,
+ PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceMerge(PVRSRV_FENCE_SERVER *psInFence1,
+ PVRSRV_FENCE_SERVER *psInFence2,
+ IMG_UINT32 uiFenceNameSize,
+ const IMG_CHAR *pszFenceName,
+ PVRSRV_FENCE_SERVER **ppsOutFence);
+
+PVRSRV_ERROR SyncFbFenceWait(PVRSRV_FENCE_SERVER *psFence,
+ IMG_UINT32 uiTimeout);
+
+PVRSRV_ERROR SyncFbFenceDump(PVRSRV_FENCE_SERVER *psFence,
+ IMG_UINT32 uiLine,
+ IMG_UINT32 uiFileNameLength,
+ const IMG_CHAR *pszFile);
+
+PVRSRV_ERROR SyncFbFenceDump2(PVRSRV_FENCE_SERVER *psFence,
+ IMG_UINT32 uiLine,
+ IMG_UINT32 uiFileNameLength,
+ const IMG_CHAR *pszFile,
+ IMG_UINT32 uiModuleLength,
+ const IMG_CHAR *pszModule,
+ IMG_UINT32 uiDescLength,
+ const IMG_CHAR *pszDesc);
+
+PVRSRV_ERROR SyncFbRegisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+PVRSRV_ERROR SyncFbDeregisterDevice(PVRSRV_DEVICE_NODE *psDeviceNode);
+
+IMG_UINT32 SyncFbDumpInfoOnStalledUFOs(IMG_UINT32 nr_ufos, IMG_UINT32 *vaddrs);
+
+/*****************************************************************************/
+/* */
+/* IMPORT/EXPORT FUNCTIONS */
+/* */
+/*****************************************************************************/
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR SyncFbFenceExportInsecure(PVRSRV_FENCE_SERVER *psFence,
+ PVRSRV_FENCE_EXPORT **ppExport);
+
+PVRSRV_ERROR SyncFbFenceExportDestroyInsecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportInsecure(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevice,
+ PVRSRV_FENCE_EXPORT *psImport,
+ PVRSRV_FENCE_SERVER **psFence);
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+PVRSRV_ERROR SyncFbFenceExportSecure(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ PVRSRV_FENCE_SERVER *psFence,
+ IMG_SECURE_TYPE *phSecure,
+ PVRSRV_FENCE_EXPORT **ppsExport,
+ CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR SyncFbFenceExportDestroySecure(PVRSRV_FENCE_EXPORT *psExport);
+
+PVRSRV_ERROR SyncFbFenceImportSecure(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevice,
+ IMG_SECURE_TYPE hSecure,
+ PVRSRV_FENCE_SERVER **psFence);
+
+#endif /* _SYNC_FALLBACK_SERVER_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_internal.h b/drivers/gpu/drm/img-rogue/1.10/sync_internal.h
new file mode 100644
index 00000000000000..b7f276a0e10384
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_internal.h
@@ -0,0 +1,129 @@
+/*************************************************************************/ /*!
+@File
+@Title Services internal synchronisation interface header
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Defines the internal client side interface for services
+ synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _SYNC_INTERNAL_
+#define _SYNC_INTERNAL_
+
+#include "img_types.h"
+#include <powervr/sync_external.h>
+#include "ra.h"
+#include "dllist.h"
+#include "lock.h"
+#include "devicemem.h"
+
+
+#define LOCAL_SYNC_PRIM_RESET_VALUE 0
+#define LOCAL_SYNC_PRIM_POISON_VALUE 0xa5a5a5a5u
+
+/*
+ Private structure's
+*/
+#define SYNC_PRIM_NAME_SIZE 50
+typedef struct SYNC_PRIM_CONTEXT
+{
+ SHARED_DEV_CONNECTION hDevConnection;
+ IMG_CHAR azName[SYNC_PRIM_NAME_SIZE]; /*!< Name of the RA */
+ RA_ARENA *psSubAllocRA; /*!< RA context */
+ IMG_CHAR azSpanName[SYNC_PRIM_NAME_SIZE];/*!< Name of the span RA */
+ RA_ARENA *psSpanRA; /*!< RA used for span management of SubAllocRA */
+ ATOMIC_T hRefCount; /*!< Ref count for this context */
+} SYNC_PRIM_CONTEXT;
+
+typedef struct _SYNC_PRIM_BLOCK_
+{
+ SYNC_PRIM_CONTEXT *psContext; /*!< Our copy of the services connection */
+ IMG_HANDLE hServerSyncPrimBlock; /*!< Server handle for this block */
+ IMG_UINT32 ui32SyncBlockSize; /*!< Size of the sync prim block */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address */
+ DEVMEM_MEMDESC *hMemDesc; /*!< Host mapping handle */
+ IMG_UINT32 __iomem *pui32LinAddr; /*!< User CPU mapping */
+ IMG_UINT64 uiSpanBase; /*!< Base of this import in the span RA */
+ DLLIST_NODE sListNode; /*!< List node for the sync block list */
+} SYNC_PRIM_BLOCK;
+
+typedef enum _SYNC_PRIM_TYPE_
+{
+ SYNC_PRIM_TYPE_UNKNOWN = 0,
+ SYNC_PRIM_TYPE_LOCAL,
+ SYNC_PRIM_TYPE_SERVER,
+} SYNC_PRIM_TYPE;
+
+typedef struct _SYNC_PRIM_LOCAL_
+{
+ ATOMIC_T hRefCount; /*!< Ref count for this sync */
+ SYNC_PRIM_BLOCK *psSyncBlock; /*!< Synchronisation block this primitive is allocated on */
+ IMG_UINT64 uiSpanAddr; /*!< Span address of the sync */
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ IMG_HANDLE hRecord; /*!< Sync record handle */
+#endif
+} SYNC_PRIM_LOCAL;
+
+typedef struct _SYNC_PRIM_SERVER_
+{
+ SYNC_BRIDGE_HANDLE hBridge; /*!< Bridge handle */
+ IMG_HANDLE hServerSync; /*!< Handle to the server sync */
+ IMG_UINT32 ui32FirmwareAddr; /*!< Firmware address of the sync */
+} SYNC_PRIM_SERVER;
+
+typedef struct _SYNC_PRIM_
+{
+ PVRSRV_CLIENT_SYNC_PRIM sCommon; /*!< Client visible part of the sync prim */
+ SYNC_PRIM_TYPE eType; /*!< Sync primitive type */
+ union {
+ SYNC_PRIM_LOCAL sLocal; /*!< Local sync primitive data */
+ SYNC_PRIM_SERVER sServer; /*!< Server sync primitive data */
+ } u;
+} SYNC_PRIM;
+
+
+/* FIXME this must return a correctly typed pointer */
+IMG_INTERNAL PVRSRV_ERROR
+SyncPrimGetFirmwareAddr(PVRSRV_CLIENT_SYNC_PRIM *psSync, IMG_UINT32 *pui32FwAddr);
+
+IMG_INTERNAL PVRSRV_ERROR SyncPrimLocalGetHandleAndOffset(PVRSRV_CLIENT_SYNC_PRIM *psSync,
+ IMG_HANDLE *phBlock,
+ IMG_UINT32 *pui32Offset);
+
+
+#endif /* _SYNC_INTERNAL_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_server.c b/drivers/gpu/drm/img-rogue/1.10/sync_server.c
new file mode 100644
index 00000000000000..ee23f77bb99b93
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_server.c
@@ -0,0 +1,2620 @@
+/*************************************************************************/ /*!
+@File sync_server.c
+@Title Server side synchronisation functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implements the server side functions that for synchronisation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "img_types.h"
+#include "sync_server.h"
+#include "allocmem.h"
+#include "device.h"
+#include "devicemem.h"
+#include "devicemem_pdump.h"
+#include "osfunc.h"
+#include "pdump.h"
+#include "pvr_debug.h"
+#include "pvr_notifier.h"
+#include "pdump_km.h"
+#include "sync.h"
+#include "sync_internal.h"
+#include "connection_server.h"
+#include "htbuffer.h"
+#include "rgxhwperf.h"
+
+#include "sync_checkpoint_internal.h"
+#include "sync_checkpoint.h"
+
+/* Include this to obtain MAX_SYNC_CHECKPOINTS_PER_FENCE */
+#include "sync_checkpoint_external.h"
+
+#if defined(SUPPORT_SECURE_EXPORT)
+#include "ossecure_export.h"
+#endif
+
+#if defined(SUPPORT_EXTRA_METASP_DEBUG)
+#include "rgxdebug.h"
+#endif
+
+/* Set this to enable debug relating to the construction and maintenance of the sync address list */
+#define SYNC_ADDR_LIST_DEBUG 0
+
+/* Set maximum number of FWAddrs that can be accommodated in a SYNC_ADDR_LIST.
+ * This should allow for PVRSRV_MAX_SYNC_PRIMS sync prims plus
+ * MAX_SYNC_CHECKPOINTS_PER_FENCE sync checkpoints plus one further sync prim
+ * to accommodate the additional sync prim update returned by Native
+ * sync implementation (used for timeline debug).
+ */
+#define PVRSRV_MAX_SYNC_ADDR_LIST_SIZE (PVRSRV_MAX_SYNC_PRIMS+MAX_SYNC_CHECKPOINTS_PER_FENCE+1)
+
+/* Max number of syncs allowed in a sync prim op */
+#define SYNC_PRIM_OP_MAX_SYNCS 1024
+
+struct _SYNC_PRIMITIVE_BLOCK_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ DEVMEM_MEMDESC *psMemDesc;
+ IMG_UINT32 *pui32LinAddr;
+ IMG_UINT32 ui32BlockSize; /*!< Size of the Sync Primitive Block */
+ ATOMIC_T sRefCount;
+ DLLIST_NODE sConnectionNode;
+ SYNC_CONNECTION_DATA *psSyncConnectionData; /*!< Link back to the sync connection data if there is one */
+ PRGXFWIF_UFO_ADDR uiFWAddr; /*!< The firmware address of the sync prim block */
+};
+
+struct _SERVER_SYNC_PRIMITIVE_
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_CLIENT_SYNC_PRIM *psSync;
+ IMG_UINT32 ui32NextOp;
+ ATOMIC_T sRefCount;
+ IMG_UINT32 ui32UID;
+ IMG_UINT32 ui32LastSyncRequesterID;
+ DLLIST_NODE sSyncServerListNode;
+ /* PDump only data */
+ IMG_BOOL bSWOperation;
+ IMG_BOOL bSWOpStartedInCaptRange;
+ IMG_UINT32 ui32LastHWUpdate;
+ IMG_UINT32 ui32LastPdumpedBlock; /* To be used in block-mode of PDump - This holds pdump-block number where sync primitive is pdumped last time */
+ IMG_BOOL bFirstOperationInBlock; /* Is current operation taken on this sync is first in a current pdump-block? */
+ IMG_BOOL bPDumped;
+ POS_LOCK hLock; /*!< used to make ServerSyncQueue*Op calls atomic */
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+
+struct _SERVER_SYNC_EXPORT_
+{
+ SERVER_SYNC_PRIMITIVE *psSync;
+};
+
+struct _SERVER_OP_COOKIE_
+{
+ IMG_BOOL bActive;
+ /*
+ Client syncblock(s) info.
+ If this changes update the calculation of ui32BlockAllocSize
+ */
+ IMG_UINT32 ui32SyncBlockCount;
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock;
+
+ /*
+ Client sync(s) info.
+ If this changes update the calculation of ui32ClientAllocSize
+ */
+ IMG_UINT32 ui32ClientSyncCount;
+ IMG_UINT32 *paui32SyncBlockIndex;
+ IMG_UINT32 *paui32Index;
+ IMG_UINT32 *paui32Flags;
+ IMG_UINT32 *paui32FenceValue;
+ IMG_UINT32 *paui32UpdateValue;
+
+ /*
+ Server sync(s) info
+ If this changes update the calculation of ui32ServerAllocSize
+ */
+ IMG_UINT32 ui32ServerSyncCount;
+ SERVER_SYNC_PRIMITIVE **papsServerSync;
+ IMG_UINT32 *paui32ServerFenceValue;
+ IMG_UINT32 *paui32ServerUpdateValue;
+
+};
+
+struct _SYNC_CONNECTION_DATA_
+{
+ DLLIST_NODE sListHead; /*!< list of sync block associated with / created against this connection */
+ ATOMIC_T sRefCount; /*!< number of references to this object */
+ POS_LOCK hLock; /*!< lock protecting the list of sync blocks */
+};
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+#define DECREMENT_WITH_WRAP(value, sz) ((value) ? ((value) - 1) : ((sz) - 1))
+
+/* this is the max number of syncs we will search or dump
+ * at any time.
+ */
+#define SYNC_RECORD_LIMIT 20000
+
+enum SYNC_RECORD_TYPE
+{
+ SYNC_RECORD_TYPE_UNKNOWN = 0,
+ SYNC_RECORD_TYPE_CLIENT,
+ SYNC_RECORD_TYPE_SERVER,
+};
+
+struct SYNC_RECORD
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ SYNC_PRIMITIVE_BLOCK *psServerSyncPrimBlock; /*!< handle to _SYNC_PRIMITIVE_BLOCK_ */
+ IMG_UINT32 ui32SyncOffset; /*!< offset to sync in block */
+ IMG_UINT32 ui32FwBlockAddr;
+ IMG_PID uiPID;
+ IMG_UINT64 ui64OSTime;
+ enum SYNC_RECORD_TYPE eRecordType;
+ DLLIST_NODE sNode;
+ IMG_CHAR szClassName[SYNC_MAX_CLASS_NAME_LEN];
+};
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+static IMG_UINT32 g_ServerSyncUID;
+
+#define SYNC_REQUESTOR_UNKNOWN 0
+static IMG_UINT32 g_ui32NextSyncRequestorID = 1;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+static POS_LOCK ghServerSyncLock;
+#endif
+
+#if defined(SYNC_DEBUG) || defined(REFCOUNT_DEBUG)
+#define SYNC_REFCOUNT_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_REFCOUNT_PRINT(fmt, ...)
+#endif
+
+#if defined(SYNC_DEBUG)
+#define SYNC_UPDATES_PRINT(fmt, ...) PVRSRVDebugPrintf(PVR_DBG_WARNING, __FILE__, __LINE__, fmt, __VA_ARGS__)
+#else
+#define SYNC_UPDATES_PRINT(fmt, ...)
+#endif
+
+/*!
+*****************************************************************************
+ @Function : SyncPrimitiveBlockToFWAddr
+
+ @Description : Given a pointer to a sync primitive block and an offset,
+ returns the firmware address of the sync.
+
+ @Input psSyncPrimBlock : Sync primitive block which contains the sync
+ @Input ui32Offset : Offset of sync within the sync primitive block
+ @Output psAddrOut : Absolute FW address of the sync is written out through
+ this pointer
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 ui32Offset,
+ PRGXFWIF_UFO_ADDR *psAddrOut)
+{
+ /* check offset is legal */
+ if((ui32Offset >= psSyncPrimBlock->ui32BlockSize) ||
+ (ui32Offset % sizeof(IMG_UINT32)))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimitiveBlockToFWAddr: parameters check failed"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psAddrOut->ui32Addr = psSyncPrimBlock->uiFWAddr.ui32Addr + ui32Offset;
+ return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListGrow
+
+ @Description : Grow the SYNC_ADDR_LIST so it can accommodate the given
+ number of syncs, up to a maximum of PVRSRV_MAX_SYNC_PRIMS.
+
+ @Input psList : The SYNC_ADDR_LIST to grow
+ @Input ui32NumSyncs : The number of sync addresses to be able to hold
+ @Return : PVRSRV_OK on success
+*****************************************************************************/
+
+static PVRSRV_ERROR SyncAddrListGrow(SYNC_ADDR_LIST *psList, IMG_UINT32 ui32NumSyncs)
+{
+ if (ui32NumSyncs > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: ui32NumSyncs=%u > PVRSRV_MAX_SYNC_ADDR_LIST_SIZE=%u", __FUNCTION__, ui32NumSyncs, PVRSRV_MAX_SYNC_ADDR_LIST_SIZE));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+ if(ui32NumSyncs > psList->ui32NumSyncs)
+ {
+ if(psList->pasFWAddrs == NULL)
+ {
+ psList->pasFWAddrs = OSAllocMem(sizeof(PRGXFWIF_UFO_ADDR) * PVRSRV_MAX_SYNC_ADDR_LIST_SIZE);
+ if(psList->pasFWAddrs == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+ }
+
+ psList->ui32NumSyncs = ui32NumSyncs;
+ }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+ return PVRSRV_OK;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListInit
+
+ @Description : Initialise a SYNC_ADDR_LIST structure ready for use
+
+ @Input psList : The SYNC_ADDR_LIST structure to initialise
+ @Return : None
+*****************************************************************************/
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList)
+{
+ psList->ui32NumSyncs = 0;
+ psList->pasFWAddrs = NULL;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListDeinit
+
+ @Description : Frees any resources associated with the given SYNC_ADDR_LIST
+
+ @Input psList : The SYNC_ADDR_LIST structure to deinitialise
+ @Return : None
+*****************************************************************************/
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList)
+{
+ if(psList->pasFWAddrs != NULL)
+ {
+ OSFreeMem(psList->pasFWAddrs);
+ }
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListPopulate
+
+ @Description : Populate the given SYNC_ADDR_LIST with the FW addresses
+ of the syncs given by the SYNC_PRIMITIVE_BLOCKs and sync offsets
+
+ @Input ui32NumSyncs : The number of syncs being passed in
+ @Input apsSyncPrimBlock: Array of pointers to SYNC_PRIMITIVE_BLOCK structures
+ in which the syncs are based
+ @Input paui32SyncOffset: Array of offsets within each of the sync primitive blocks
+ where the syncs are located
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumSyncs,
+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+ IMG_UINT32 *paui32SyncOffset)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+ if(ui32NumSyncs > psList->ui32NumSyncs)
+ {
+ eError = SyncAddrListGrow(psList, ui32NumSyncs);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+ psList->ui32NumSyncs = ui32NumSyncs;
+
+ for(i = 0; i < ui32NumSyncs; i++)
+ {
+ eError = SyncPrimitiveBlockToFWAddr(apsSyncPrimBlock[i],
+ paui32SyncOffset[i],
+ &psList->pasFWAddrs[i]);
+
+ if(eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+ }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumSyncs));
+#endif
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32FwAddr = 0;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs));
+#endif
+ /* Ensure there's room in psList for the additional sync prim update */
+ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + 1);
+ if(eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ SyncPrimGetFirmwareAddr(psSyncPrim, &ui32FwAddr);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Appending sync prim <%p> UFO addr (0x%x) to psList[->pasFWAddrss[%d]", __FUNCTION__, (void*)psSyncPrim, ui32FwAddr, psList->ui32NumSyncs-1));
+#endif
+ psList->pasFWAddrs[psList->ui32NumSyncs-1].ui32Addr = ui32FwAddr;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ {
+ IMG_UINT32 iii;
+
+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __FUNCTION__, psList->ui32NumSyncs));
+ for (iii=0; iii<psList->ui32NumSyncs; iii++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->pasFWAddrs[%d].ui32Addr=0x%x", __FUNCTION__, iii, psList->pasFWAddrs[iii].ui32Addr));
+ }
+ }
+#endif
+e0:
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d", __FUNCTION__, (void*)psList, psList->ui32NumSyncs));
+#endif
+ return eError;
+}
+
+
+static PVRSRV_ERROR
+_AppendCheckpoints(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint,
+ IMG_BOOL bDeRefCheckpoints)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32SyncCheckpointIndex;
+ IMG_UINT32 ui32RollbackSize = psList->ui32NumSyncs;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Entry psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+ /* Ensure there's room in psList for the sync checkpoints */
+ eError = SyncAddrListGrow(psList, psList->ui32NumSyncs + ui32NumCheckpoints);
+ if(eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: * * * * ERROR * * * * Trying to SyncAddrListGrow(psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+ goto e0;
+ }
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: (ui32NumCheckpoints=%d) (psList->ui32NumSyncs is now %d) array already contains %d FWAddrs:", __FUNCTION__, ui32NumCheckpoints, psList->ui32NumSyncs, ui32RollbackSize));
+ if (ui32RollbackSize > 0)
+ {
+ {
+ IMG_UINT32 kk;
+ for (kk=0; kk<ui32RollbackSize; kk++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __FUNCTION__,
+ (void*)&psList->pasFWAddrs[kk], kk,
+ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+ }
+ }
+ }
+ PVR_DPF((PVR_DBG_ERROR, "%s: apsSyncCheckpoint=<%p>, apsSyncCheckpoint[0] = <%p>", __FUNCTION__, (void*)apsSyncCheckpoint, (void*)apsSyncCheckpoint[0]));
+#endif
+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+ {
+ psList->pasFWAddrs[ui32RollbackSize + ui32SyncCheckpointIndex].ui32Addr = SyncCheckpointGetFirmwareAddr(apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: SyncCheckpointCCBEnqueued(<%p>)", __FUNCTION__, (void*)apsSyncCheckpoint[ui32SyncCheckpointIndex]));
+ PVR_DPF((PVR_DBG_ERROR, "%s: ID:%d", __FUNCTION__, SyncCheckpointGetId((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex])));
+#endif
+ SyncCheckpointCCBEnqueued((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+ if (bDeRefCheckpoints)
+ {
+ /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+ SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+ }
+ }
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ if (psList->ui32NumSyncs > 0)
+ {
+ IMG_UINT32 kk;
+ for (kk=0; kk<psList->ui32NumSyncs; kk++)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: <%p>psList->pasFWAddrs[%d].ui32Addr = %u(0x%x)", __FUNCTION__,
+ (void*)&psList->pasFWAddrs[kk], kk,
+ psList->pasFWAddrs[kk].ui32Addr, psList->pasFWAddrs[kk].ui32Addr));
+ }
+ }
+#endif
+ return eError;
+
+e0:
+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+ {
+ if (bDeRefCheckpoints)
+ {
+ /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+ SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+ }
+ }
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: Exit psList=<%p>, psList->ui32NumSyncs=%d, ui32NumCheckpoints=%d)", __FUNCTION__, (void*)psList, psList->ui32NumSyncs, ui32NumCheckpoints));
+#endif
+ return eError;
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListAppendCheckpoints
+
+ @Description : Append the FW addresses of the sync checkpoints given in
+ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST
+
+ @Input ui32NumSyncCheckpoints : The number of sync checkpoints
+ being passed in
+ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+ are to be appended to the SYNC_ADDR_LIST
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_FALSE);
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListAppendAndDeRefCheckpoints
+
+ @Description : Append the FW addresses of the sync checkpoints given in
+ the PSYNC_CHECKPOINTs array to the given SYNC_ADDR_LIST.
+ A reference is dropped for each of the checkpoints.
+
+ @Input ui32NumSyncCheckpoints : The number of sync checkpoints
+ being passed in
+ @Input apsSyncCheckpoint : Array of PSYNC_CHECKPOINTs whose details
+ are to be appended to the SYNC_ADDR_LIST
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+ return _AppendCheckpoints(psList, ui32NumCheckpoints, apsSyncCheckpoint, IMG_TRUE);
+}
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint)
+{
+ IMG_UINT32 ui32SyncCheckpointIndex;
+
+ for (ui32SyncCheckpointIndex=0; ui32SyncCheckpointIndex<ui32NumCheckpoints; ui32SyncCheckpointIndex++)
+ {
+ /* Drop the reference that was taken internally by the OS implementation of resolve_fence() */
+ SyncCheckpointDropRef((PSYNC_CHECKPOINT)apsSyncCheckpoint[ui32SyncCheckpointIndex]);
+ }
+}
+
+/*!
+*****************************************************************************
+ @Function : SyncAddrListRollbackCheckpoints
+
+ @Description : Rollback the enqueued count of each sync checkpoint in
+ the given SYNC_ADDR_LIST. This needs to be done in the
+ event of the kick call failing, so that the reference
+ taken on each sync checkpoint on the firmware's behalf
+ is dropped.
+
+ @Input psList : The SYNC_ADDR_LIST structure containing
+ sync checkpoints to be rolled back
+
+ @Return : PVRSRV_OK on success. PVRSRV_ERROR_INVALID_PARAMS if input
+ parameters are invalid.
+*****************************************************************************/
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ IMG_UINT32 ui32SyncIndex;
+
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: called (psList=<%p>)", __FUNCTION__, (void*)psList));
+#endif
+ if (psList)
+ {
+#if (SYNC_ADDR_LIST_DEBUG == 1)
+ PVR_DPF((PVR_DBG_ERROR, "%s: psList->ui32NumSyncs=%d", __FUNCTION__, psList->ui32NumSyncs));
+#endif
+ for (ui32SyncIndex=0; ui32SyncIndex<psList->ui32NumSyncs; ui32SyncIndex++)
+ {
+ if (psList->pasFWAddrs[ui32SyncIndex].ui32Addr & 0x1)
+ {
+ SyncCheckpointRollbackFromUFO(psDevNode, psList->pasFWAddrs[ui32SyncIndex].ui32Addr);
+ }
+ }
+ }
+ return eError;
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ struct SYNC_RECORD * psSyncRec;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ RGX_HWPERF_HOST_ALLOC(psDevNode, SYNC,
+ ui32FwBlockAddr + ui32SyncOffset,
+ pszClassName,
+ ui32ClassNameSize);
+
+ if (!phRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ *phRecord = NULL;
+
+ psSyncRec = OSAllocMem(sizeof(*psSyncRec));
+ if (!psSyncRec)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ psSyncRec->psDevNode = psDevNode;
+ psSyncRec->psServerSyncPrimBlock = hServerSyncPrimBlock;
+ psSyncRec->ui32SyncOffset = ui32SyncOffset;
+ psSyncRec->ui32FwBlockAddr = ui32FwBlockAddr;
+ psSyncRec->ui64OSTime = OSClockns64();
+ psSyncRec->uiPID = OSGetCurrentProcessID();
+ psSyncRec->eRecordType = bServerSync? SYNC_RECORD_TYPE_SERVER: SYNC_RECORD_TYPE_CLIENT;
+
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN;
+ /* Copy over the class name annotation */
+ OSStringLCopy(psSyncRec->szClassName, pszClassName, ui32ClassNameSize);
+ }
+ else
+ {
+ /* No class name annotation */
+ psSyncRec->szClassName[0] = 0;
+ }
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ if(psDevNode->ui32SyncServerRecordCount < SYNC_RECORD_LIMIT)
+ {
+ dllist_add_to_head(&psDevNode->sSyncServerRecordList, &psSyncRec->sNode);
+ psDevNode->ui32SyncServerRecordCount++;
+
+ if(psDevNode->ui32SyncServerRecordCount > psDevNode->ui32SyncServerRecordCountHighWatermark)
+ {
+ psDevNode->ui32SyncServerRecordCountHighWatermark = psDevNode->ui32SyncServerRecordCount;
+ }
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to add sync record \"%s\". %u records already exist.",
+ __func__,
+ pszClassName,
+ psDevNode->ui32SyncServerRecordCount));
+ OSFreeMem(psSyncRec);
+ psSyncRec = NULL;
+ eError = PVRSRV_ERROR_TOOMANYBUFFERS;
+ }
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ *phRecord = (SYNC_RECORD_HANDLE)psSyncRec;
+
+fail_alloc:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord)
+{
+ struct SYNC_RECORD **ppFreedSync;
+ struct SYNC_RECORD *pSync = (struct SYNC_RECORD*)hRecord;
+ PVRSRV_DEVICE_NODE *psDevNode;
+
+ if (!hRecord)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ psDevNode = pSync->psDevNode;
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+ RGX_HWPERF_HOST_FREE(psDevNode, SYNC, pSync->ui32FwBlockAddr + pSync->ui32SyncOffset);
+
+ dllist_remove_node(&pSync->sNode);
+
+ if (psDevNode->uiSyncServerRecordFreeIdx >= PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: freed sync record index out of range",
+ __func__));
+ psDevNode->uiSyncServerRecordFreeIdx = 0;
+ }
+ ppFreedSync = &psDevNode->apsSyncServerRecordsFreed[psDevNode->uiSyncServerRecordFreeIdx];
+ psDevNode->uiSyncServerRecordFreeIdx =
+ (psDevNode->uiSyncServerRecordFreeIdx + 1) % PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN;
+
+ if (*ppFreedSync)
+ {
+ OSFreeMem(*ppFreedSync);
+ }
+ pSync->psServerSyncPrimBlock = NULL;
+ pSync->ui64OSTime = OSClockns64();
+ *ppFreedSync = pSync;
+
+ psDevNode->ui32SyncServerRecordCount--;
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ return PVRSRV_OK;
+}
+#else
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ *phRecord = NULL;
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ PVR_UNREFERENCED_PARAMETER(psDevNode);
+ PVR_UNREFERENCED_PARAMETER(phRecord);
+ PVR_UNREFERENCED_PARAMETER(hServerSyncPrimBlock);
+ PVR_UNREFERENCED_PARAMETER(ui32FwBlockAddr);
+ PVR_UNREFERENCED_PARAMETER(ui32SyncOffset);
+ PVR_UNREFERENCED_PARAMETER(bServerSync);
+ PVR_UNREFERENCED_PARAMETER(ui32ClassNameSize);
+ PVR_UNREFERENCED_PARAMETER(pszClassName);
+ return PVRSRV_OK;
+}
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord)
+{
+ PVR_UNREFERENCED_PARAMETER(hRecord);
+ return PVRSRV_OK;
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ RGX_HWPERF_HOST_ALLOC(psDevNode, SYNC, ui32FWAddr, pszClassName, ui32ClassNameSize);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32FWAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+ RGX_HWPERF_HOST_FREE(psDevNode, SYNC, ui32FWAddr);
+
+ return PVRSRV_OK;
+}
+
+static
+void _SyncConnectionRef(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psSyncConnectionData->sRefCount);
+
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, iRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static
+void _SyncConnectionUnref(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ IMG_INT iRefCount = OSAtomicDecrement(&psSyncConnectionData->sRefCount);
+ if (iRefCount == 0)
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, iRefCount);
+
+ PVR_ASSERT(dllist_is_empty(&psSyncConnectionData->sListHead));
+ OSLockDestroy(psSyncConnectionData->hLock);
+ OSFreeMem(psSyncConnectionData);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync connection %p, refcount = %d",
+ __FUNCTION__, psSyncConnectionData, iRefCount);
+ PVR_ASSERT(iRefCount > 0);
+ }
+}
+
+static
+void _SyncConnectionAddBlock(CONNECTION_DATA *psConnection, SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+ if (psConnection)
+ {
+ SYNC_CONNECTION_DATA *psSyncConnectionData = psConnection->psSyncConnectionData;
+
+ /*
+ Make sure the connection doesn't go away. It doesn't matter that we will release
+ the lock between as the refcount and list don't have to be atomic w.r.t. to each other
+ */
+ _SyncConnectionRef(psSyncConnectionData);
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+ if (psConnection != NULL)
+ {
+ dllist_add_to_head(&psSyncConnectionData->sListHead, &psBlock->sConnectionNode);
+ }
+ OSLockRelease(psSyncConnectionData->hLock);
+ psBlock->psSyncConnectionData = psSyncConnectionData;
+ }
+ else
+ {
+ psBlock->psSyncConnectionData = NULL;
+ }
+}
+
+static
+void _SyncConnectionRemoveBlock(SYNC_PRIMITIVE_BLOCK *psBlock)
+{
+ SYNC_CONNECTION_DATA *psSyncConnectionData = psBlock->psSyncConnectionData;
+
+ if (psBlock->psSyncConnectionData)
+ {
+ OSLockAcquire(psSyncConnectionData->hLock);
+ dllist_remove_node(&psBlock->sConnectionNode);
+ OSLockRelease(psSyncConnectionData->hLock);
+
+ _SyncConnectionUnref(psBlock->psSyncConnectionData);
+ }
+}
+
+static
+void _SyncPrimitiveBlockRef(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psSyncBlk->sRefCount);
+
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+ __FUNCTION__, psSyncBlk, iRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static
+void _SyncPrimitiveBlockUnref(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ IMG_INT iRefCount = OSAtomicDecrement(&psSyncBlk->sRefCount);
+ if (iRefCount == 0)
+ {
+ PVRSRV_DEVICE_NODE *psDevNode = psSyncBlk->psDevNode;
+
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d (remove)",
+ __FUNCTION__, psSyncBlk, iRefCount);
+
+ _SyncConnectionRemoveBlock(psSyncBlk);
+ DevmemReleaseCpuVirtAddr(psSyncBlk->psMemDesc);
+ psDevNode->pfnFreeUFOBlock(psDevNode, psSyncBlk->psMemDesc);
+ OSFreeMem(psSyncBlk);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Sync block %p, refcount = %d",
+ __FUNCTION__, psSyncBlk, iRefCount);
+ PVR_ASSERT(iRefCount > 0);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize,
+ PMR **ppsSyncPMR)
+{
+ SYNC_PRIMITIVE_BLOCK *psNewSyncBlk;
+ PVRSRV_ERROR eError;
+
+ psNewSyncBlk = OSAllocMem(sizeof(SYNC_PRIMITIVE_BLOCK));
+ if (psNewSyncBlk == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+ psNewSyncBlk->psDevNode = psDevNode;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS, "Allocate UFO block");
+
+ eError = psDevNode->pfnAllocUFOBlock(psDevNode,
+ &psNewSyncBlk->psMemDesc,
+ &psNewSyncBlk->uiFWAddr.ui32Addr,
+ &psNewSyncBlk->ui32BlockSize);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ *puiSyncPrimVAddr = psNewSyncBlk->uiFWAddr.ui32Addr;
+
+ eError = DevmemAcquireCpuVirtAddr(psNewSyncBlk->psMemDesc,
+ (void **) &psNewSyncBlk->pui32LinAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ eError = DevmemLocalGetImportHandle(psNewSyncBlk->psMemDesc, (void **) ppsSyncPMR);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e3;
+ }
+
+ OSAtomicWrite(&psNewSyncBlk->sRefCount, 1);
+
+ /* If there is a connection pointer then add the new block onto it's list */
+ _SyncConnectionAddBlock(psConnection, psNewSyncBlk);
+
+ *ppsSyncBlk = psNewSyncBlk;
+ *puiSyncPrimBlockSize = psNewSyncBlk->ui32BlockSize;
+
+ PDUMPCOMMENTWITHFLAGS(PDUMP_FLAGS_CONTINUOUS,
+ "Allocated UFO block (FirmwareVAddr = 0x%08x)",
+ *puiSyncPrimVAddr);
+
+ return PVRSRV_OK;
+
+e3:
+ DevmemReleaseCpuVirtAddr(psNewSyncBlk->psMemDesc);
+e2:
+ psDevNode->pfnFreeUFOBlock(psDevNode, psNewSyncBlk->psMemDesc);
+e1:
+ OSFreeMem(psNewSyncBlk);
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk)
+{
+ _SyncPrimitiveBlockUnref(psSyncBlk);
+
+ return PVRSRV_OK;
+}
+
+static INLINE IMG_BOOL _CheckSyncIndex(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+ IMG_UINT32 ui32Index)
+{
+ return ((ui32Index * sizeof(IMG_UINT32)) < psSyncBlk->ui32BlockSize);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value)
+{
+ if(_CheckSyncIndex(psSyncBlk, ui32Index))
+ {
+ psSyncBlk->pui32LinAddr[ui32Index] = ui32Value;
+ return PVRSRV_OK;
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "PVRSRVSyncPrimSetKM: Index %u out of range for "
+ "0x%08X byte sync block (value 0x%08X)",
+ ui32Index,
+ psSyncBlk->ui32BlockSize,
+ ui32Value));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value)
+{
+ OSWriteDeviceMem32(psServerSync->psSync->pui32LinAddr,ui32Value);
+
+ return PVRSRV_OK;
+}
+
+static void
+_ServerSyncRef(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ IMG_INT iRefCount = OSAtomicIncrement(&psSync->sRefCount);
+
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, iRefCount);
+ PVR_UNREFERENCED_PARAMETER(iRefCount);
+}
+
+static void
+_ServerSyncUnref(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = psSync->psDevNode;
+
+ IMG_UINT32 iRefCount = OSAtomicDecrement(&psSync->sRefCount);
+ if (iRefCount == 0)
+ {
+ IMG_UINT32 ui32SyncAddr;
+
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, iRefCount);
+ HTBLOGK(HTB_SF_SYNC_SERVER_UNREF, ui32SyncAddr);
+
+ /* Remove the sync from the global list */
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_remove_node(&psSync->sSyncServerListNode);
+ OSLockRelease(psDevNode->hSyncServerListLock);
+
+ OSLockDestroy(psSync->hLock);
+ /* safe to ignore return value as an error indicates
+ * the sync is either already freed or not a sync
+ */
+ (void)SyncPrimFree(psSync->psSync);
+ OSFreeMem(psSync);
+ }
+ else
+ {
+ SYNC_REFCOUNT_PRINT("%s: Server sync %p, refcount = %d",
+ __FUNCTION__, psSync, iRefCount);
+ PVR_ASSERT(iRefCount > 0);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName)
+{
+ SERVER_SYNC_PRIMITIVE *psNewSync;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ psNewSync = OSAllocMem(sizeof(SERVER_SYNC_PRIMITIVE));
+ if (psNewSync == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ /* szClassName must be setup now and used for the SyncPrimAlloc call because
+ * pszClassName is allocated in the bridge code is not NULL terminated
+ */
+ if(pszClassName)
+ {
+ if (ui32ClassNameSize >= SYNC_MAX_CLASS_NAME_LEN)
+ ui32ClassNameSize = SYNC_MAX_CLASS_NAME_LEN;
+ /* Copy over the class name annotation */
+ OSStringLCopy(psNewSync->szClassName, pszClassName, ui32ClassNameSize);
+ }
+ else
+ {
+ /* No class name annotation */
+ psNewSync->szClassName[0] = 0;
+ }
+
+ eError = SyncPrimAllocForServerSync(psDevNode->hSyncPrimContext,
+ &psNewSync->psSync,
+ psNewSync->szClassName);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_sync_alloc;
+ }
+
+ eError = OSLockCreate(&psNewSync->hLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+
+ eError = SyncPrimSet(psNewSync->psSync, 0);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_sync_op;
+ }
+
+ psNewSync->psDevNode = psDevNode;
+ psNewSync->ui32NextOp = 0;
+ psNewSync->ui32UID = g_ServerSyncUID++;
+ psNewSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+ psNewSync->bSWOperation = IMG_FALSE;
+ psNewSync->ui32LastHWUpdate = 0x0bad592c;
+ psNewSync->ui32LastPdumpedBlock = PDUMP_BLOCKNUM_INVALID;
+ psNewSync->bFirstOperationInBlock = IMG_FALSE;
+ psNewSync->bPDumped = IMG_FALSE;
+ OSAtomicWrite(&psNewSync->sRefCount, 1);
+
+ eError = SyncPrimGetFirmwareAddr(psNewSync->psSync, pui32SyncPrimVAddr);
+ if (PVRSRV_OK != eError)
+ {
+ goto fail_sync_op;
+ }
+
+ /* Add the sync to the global list */
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_add_to_head(&psDevNode->sSyncServerSyncsList, &psNewSync->sSyncServerListNode);
+ OSLockRelease(psDevNode->hSyncServerListLock);
+
+ HTBLOGK(HTB_SF_SYNC_SERVER_ALLOC, *pui32SyncPrimVAddr);
+ SYNC_UPDATES_PRINT("%s: sync: %p, fwaddr: %8.8X", __FUNCTION__, psNewSync, *pui32SyncPrimVAddr);
+ *ppsSync = psNewSync;
+ return PVRSRV_OK;
+
+fail_sync_op:
+ OSLockDestroy(psNewSync->hLock);
+
+fail_lock_create:
+ SyncPrimFree(psNewSync->psSync);
+
+fail_sync_alloc:
+ OSFreeMem(psNewSync);
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ _ServerSyncUnref(psSync);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+ SERVER_SYNC_PRIMITIVE **papsSyncs,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp)
+{
+ IMG_UINT32 i, ui32SyncAddr;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eReturn = PVRSRV_OK;
+
+ for (i=0;i<ui32SyncCount;i++)
+ {
+ PVRSRV_CLIENT_SYNC_PRIM *psClientSync = papsSyncs[i]->psSync;
+
+ eError = SyncPrimGetFirmwareAddr(psClientSync, &ui32SyncAddr);
+ if (PVRSRV_OK != eError)
+ {
+ pui32FWAddr[i] = 0;
+ pui32CurrentOp[i] = 0;
+ eReturn = eError;
+ }
+ else
+ {
+ pui32FWAddr[i] = ui32SyncAddr;
+ pui32CurrentOp[i] = OSReadDeviceMem32(psClientSync->pui32LinAddr);
+ }
+ pui32NextOp[i] = papsSyncs[i]->ui32NextOp;
+ pui32UID[i] = papsSyncs[i]->ui32UID;
+ }
+ return eReturn;
+}
+
+#if defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT)
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport)
+{
+ SERVER_SYNC_EXPORT *psNewExport;
+ PVRSRV_ERROR eError;
+
+ psNewExport = OSAllocMem(sizeof(SERVER_SYNC_EXPORT));
+ if (!psNewExport)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ _ServerSyncRef(psSync);
+
+ psNewExport->psSync = psSync;
+ *ppsExport = psNewExport;
+
+ return PVRSRV_OK;
+e0:
+ return eError;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ _ServerSyncUnref(psExport->psSync);
+
+ OSFreeMem(psExport);
+
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+_PVRSRVSyncPrimServerImportKM(PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ SERVER_SYNC_PRIMITIVE *psSync = psExport->psSync;
+ PVRSRV_ERROR eError;
+
+ if (psSync->psDevNode != psDevNode)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: server sync invalid for this device\n",
+ __func__));
+ return PVRSRV_ERROR_PMR_NOT_PERMITTED;
+ }
+
+ _ServerSyncRef(psSync);
+
+ *ppsSync = psSync;
+ eError = SyncPrimGetFirmwareAddr(psSync->psSync,
+ pui32SyncPrimVAddr);
+ return eError;
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) || defined(SUPPORT_SECURE_EXPORT) */
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport)
+{
+ return _PVRSRVSyncPrimServerExportKM(psSync, ppsExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ return _PVRSRVSyncPrimServerUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ return _PVRSRVSyncPrimServerImportKM(psDevNode, psExport, ppsSync,
+ pui32SyncPrimVAddr);
+}
+#endif /* defined(SUPPORT_INSECURE_EXPORT) */
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport)
+{
+ _PVRSRVSyncPrimServerUnexportKM(psExport);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR _ReleaseSecureSync(void *psExport)
+{
+ return PVRSRVSyncPrimServerSecureUnexportKM(psExport);
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_SECURE_TYPE *phSecure,
+ SERVER_SYNC_EXPORT **ppsExport,
+ CONNECTION_DATA **ppsSecureConnection)
+{
+ SERVER_SYNC_EXPORT *psNewExport;
+ PVRSRV_ERROR eError;
+
+ PVR_UNREFERENCED_PARAMETER(ppsSecureConnection);
+
+ /* Create an export server sync */
+ eError = _PVRSRVSyncPrimServerExportKM(psSync,
+ &psNewExport);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Transform it into a secure export */
+ eError = OSSecureExport("secure_sync",
+ _ReleaseSecureSync,
+ (void *) psNewExport,
+ phSecure);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ *ppsExport = psNewExport;
+ return PVRSRV_OK;
+e1:
+ _PVRSRVSyncPrimServerUnexportKM(psNewExport);
+e0:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr)
+{
+ PVRSRV_ERROR eError;
+ SERVER_SYNC_EXPORT *psImport;
+
+ PVR_UNREFERENCED_PARAMETER(psConnection);
+
+ /* Retrieve the data from the secure import */
+ eError = OSSecureImport(hSecure, (void **) &psImport);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eError = _PVRSRVSyncPrimServerImportKM(psDevNode, psImport, ppsSync,
+ pui32SyncPrimVAddr);
+e0:
+ return eError;
+}
+#endif /* defined(SUPPORT_SECURE_EXPORT) */
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID)
+{
+ *pui32SyncRequesterID = g_ui32NextSyncRequestorID++;
+
+ return PVRSRV_OK;
+}
+
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32SyncRequesterID);
+}
+
+static void
+_ServerSyncTakeOperation(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ IMG_BOOL bInCaptureRange;
+#if defined(PDUMP)
+ IMG_UINT32 ui32CurrentBlock;
+#endif
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVR_ASSERT(OSLockIsLocked(ghServerSyncLock));
+#endif
+
+ /* Only advance the pending if an update is required */
+ if (bUpdate)
+ {
+ *pui32FenceValue = psSync->ui32NextOp++;
+ }
+ else
+ {
+ *pui32FenceValue = psSync->ui32NextOp;
+ }
+
+ *pui32UpdateValue = psSync->ui32NextOp;
+
+ PDumpIsCaptureFrameKM(&bInCaptureRange);
+
+#if defined(PDUMP)
+ PDumpGetCurrentBlockKM(&ui32CurrentBlock);
+
+ /* Is this first operation taken on _this_ sync in a new pdump-block? */
+ psSync->bFirstOperationInBlock = (psSync->ui32LastPdumpedBlock != ui32CurrentBlock) && (ui32CurrentBlock != PDUMP_BLOCKNUM_INVALID);
+#endif
+ /*
+ If this is the 1st operation (in this capture range) then PDump
+ this sync
+
+ In case of block-mode of PDump, if this is first operation taken on _this_
+ particular sync in this new pdump-block then PDump this sync
+
+ It means, this is the first operation taken on _this_ particular sync after live-FW
+ thread and driver-thread are synchronised at start of new pdump-block. So we need to
+ re-dump this sync so that its latest values can be loaded _after_ sim-FW thread and
+ script-thread are synchronised at start of playback of new/next pdump-block at playback time.
+ */
+ if ((!psSync->bPDumped && bInCaptureRange) || psSync->bFirstOperationInBlock)
+ {
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32SyncAddr;
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ PDumpCommentWithFlags(0,
+ "Dump initial sync state (0x%p, FW VAddr = 0x%08x) = 0x%08x\n",
+ psSync,
+ ui32SyncAddr,
+ OSReadDeviceMem32(psSync->psSync->pui32LinAddr));
+ }
+ psSync->ui32LastPdumpedBlock = ui32CurrentBlock; /* Update last pdumped block number */
+#endif
+
+ SyncPrimPDump(psSync->psSync);
+ psSync->bPDumped = IMG_TRUE;
+ }
+
+ /*
+ When exiting capture range clear down bPDumped as we might re-enter
+ capture range and thus need to PDump this sync again
+ */
+ if (!bInCaptureRange)
+ {
+ psSync->bPDumped = IMG_FALSE;
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired)
+{
+ PVRSRV_ERROR eError;
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVLockServerSync();
+#endif
+
+ eError = PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psSync,
+ pui32FenceValue,
+ pui32UpdateValue,
+ ui32SyncRequesterID,
+ bUpdate,
+ pbFenceRequired);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVUnlockServerSync();
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired)
+{
+
+ _ServerSyncRef(psSync);
+
+ /*
+ We need to acquire the lock here to ensure the state that we're
+ modifying below will be consistent with itself. It doesn't matter
+ if another thread acquires the lock in between this and taking reference
+ as we've ensured the sync won't go away.
+ */
+ OSLockAcquire(psSync->hLock);
+ _ServerSyncTakeOperation(psSync,
+ bUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ /*
+ The caller want to know if a fence command is required
+ i.e. was the last operation done on this sync done by
+ the same sync requester
+ */
+ if (pbFenceRequired)
+ {
+ if (ui32SyncRequesterID == psSync->ui32LastSyncRequesterID)
+ {
+ *pbFenceRequired = IMG_FALSE;
+ }
+ else
+ {
+ *pbFenceRequired = IMG_TRUE;
+ }
+ }
+ /*
+ If we're transitioning from a HW operation to a SW operation we
+ need to save the last update the HW will do so that when we PDump
+ we can issue a POL for it before the next HW operation and then
+ LDB in the last SW fence update
+ */
+ if (psSync->bSWOperation == IMG_FALSE)
+ {
+ psSync->bSWOperation = IMG_TRUE;
+ psSync->ui32LastHWUpdate = *pui32FenceValue;
+ PDumpIsCaptureFrameKM(&psSync->bSWOpStartedInCaptRange);
+ }
+
+ if (pbFenceRequired)
+ {
+ if (*pbFenceRequired)
+ {
+ SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+ }
+ }
+
+ /* Only update the last requester id if we are make changes to this sync
+ * object. */
+ if (bUpdate)
+ psSync->ui32LastSyncRequesterID = ui32SyncRequesterID;
+
+ OSLockRelease(psSync->hLock);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ PVRSRV_ERROR eError;
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVLockServerSync();
+#endif
+
+ eError = PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(psSync,
+ bUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVUnlockServerSync();
+#endif
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue)
+{
+ /*
+ For HW operations the client is required to ensure the
+ operation has completed before freeing the sync as we
+ no way of dropping the refcount if we where to acquire it
+ here.
+
+ Take the lock to ensure the state that we're modifying below
+ will be consistent with itself.
+ */
+ OSLockAcquire(psSync->hLock);
+ _ServerSyncTakeOperation(psSync,
+ bUpdate,
+ pui32FenceValue,
+ pui32UpdateValue);
+
+ /*
+ Note:
+
+ We might want to consider optimising the fences that we write for
+ HW operations but for now just clear it back to unknown
+ */
+ psSync->ui32LastSyncRequesterID = SYNC_REQUESTOR_UNKNOWN;
+
+ if (psSync->bSWOperation)
+ {
+#if defined(PDUMP)
+ {
+ IMG_UINT32 ui32SyncAddr;
+ (void)SyncPrimGetFirmwareAddr(psSync->psSync, &ui32SyncAddr);
+ PDumpCommentWithFlags(0,
+ "Wait for HW ops and dummy update for SW ops (0x%p, FW VAddr = 0x%08x, value = 0x%08x)\n",
+ psSync,
+ ui32SyncAddr,
+ *pui32FenceValue);
+ }
+#endif
+
+ /* In case of block-mode of PDump, if this is NOT the first operation on _this_ sync in
+ * current pdump-block and SW operation is started in capture range (which is always
+ * true in case of block-mode) dump POL for previous HW operation
+ *
+ * It means, if this is not the first operation on _this_ sync in current pdump-block,
+ * we need to synchronise script-thread and sim-FW thread on _this_ sync before processing
+ * further commands from current pdump-block.
+ * */
+ if (psSync->bSWOpStartedInCaptRange && !psSync->bFirstOperationInBlock)
+ {
+ /* Dump a POL for the previous HW operation */
+ SyncPrimPDumpPol(psSync->psSync,
+ psSync->ui32LastHWUpdate,
+ 0xffffffff,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ 0);
+ }
+
+ /* Dump the expected value (i.e. the value after all the SW operations) */
+ SyncPrimPDumpValue(psSync->psSync, *pui32FenceValue);
+
+ /* Reset the state as we've just done a HW operation */
+ psSync->bSWOperation = IMG_FALSE;
+ }
+ OSLockRelease(psSync->hLock);
+
+ SYNC_UPDATES_PRINT("%s: sync: %p, fence: %d, value: %d", __FUNCTION__, psSync, *pui32FenceValue, *pui32UpdateValue);
+
+ return PVRSRV_OK;
+}
+
+IMG_BOOL ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 ui32FenceValue)
+{
+ SYNC_UPDATES_PRINT("%s: sync: %p, value(%d) == fence(%d)?", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32FenceValue);
+ return (OSReadDeviceMem32(psSync->psSync->pui32LinAddr) == ui32FenceValue);
+}
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bDoUpdate,
+ IMG_UINT32 ui32UpdateValue)
+{
+ if (bDoUpdate)
+ {
+ SYNC_UPDATES_PRINT("%s: sync: %p (%d) = %d", __FUNCTION__, psSync, *psSync->psSync->pui32LinAddr, ui32UpdateValue);
+
+ OSWriteDeviceMem32(psSync->psSync->pui32LinAddr, ui32UpdateValue);
+ }
+
+ _ServerSyncUnref(psSync);
+}
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return psSync->ui32UID;
+}
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr)
+{
+ return SyncPrimGetFirmwareAddr(psSync->psSync, pui32SyncAddr);
+}
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return OSReadDeviceMem32(psSync->psSync->pui32LinAddr);
+}
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return psSync->ui32NextOp;
+}
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync)
+{
+ return psSync->psDevNode;
+}
+
+static void _ServerSyncState(PDLLIST_NODE psNode,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SERVER_SYNC_PRIMITIVE *psSync = IMG_CONTAINER_OF(psNode, SERVER_SYNC_PRIMITIVE, sSyncServerListNode);
+
+ if (OSReadDeviceMem32(psSync->psSync->pui32LinAddr) != psSync->ui32NextOp)
+ {
+ IMG_UINT32 ui32SyncAddr, ui32Val = 0;
+
+ (void)ServerSyncGetFWAddr(psSync, &ui32SyncAddr);
+#if !defined(SUPPORT_EXTRA_METASP_DEBUG)
+ PVR_UNREFERENCED_PARAMETER(ui32Val);
+ PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Current = 0x%08x, NextOp = 0x%08x (%s)",
+ psSync->ui32UID,
+ ui32SyncAddr,
+ ServerSyncGetValue(psSync),
+ psSync->ui32NextOp,
+ psSync->szClassName);
+#else
+ RGXReadWithSP(psSync->psDevNode->pvDevice, ui32SyncAddr, &ui32Val);
+ PVR_DUMPDEBUG_LOG("\tPending server sync (ID = %d, FWAddr = 0x%08x): Value (Host) = 0x%08x, Value (FW) = 0x%08x, NextOp = 0x%08x (%s)",
+ psSync->ui32UID,
+ ui32SyncAddr,
+ ServerSyncGetValue(psSync),
+ ui32Val,
+ psSync->ui32NextOp,
+ psSync->szClassName);
+#endif
+ }
+}
+
+static void _ServerSyncDebugRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ DLLIST_NODE *psNode, *psNext;
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ {
+ PVR_DUMPDEBUG_LOG("------[ Pending Server Syncs ]------");
+ OSLockAcquire(psDevNode->hSyncServerListLock);
+ dllist_foreach_node(&psDevNode->sSyncServerSyncsList, psNode, psNext)
+ {
+ _ServerSyncState(psNode, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ OSLockRelease(psDevNode->hSyncServerListLock);
+ }
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32SyncBlockIndex,
+ IMG_UINT32 *paui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ SERVER_SYNC_PRIMITIVE **papsServerSync,
+ SERVER_OP_COOKIE **ppsServerCookie)
+{
+ SERVER_OP_COOKIE *psNewCookie;
+ IMG_UINT32 ui32BlockAllocSize;
+ IMG_UINT32 ui32ServerAllocSize;
+ IMG_UINT32 ui32ClientAllocSize;
+ IMG_UINT32 ui32TotalAllocSize;
+ IMG_UINT32 i;
+ IMG_CHAR *pcPtr;
+ PVRSRV_ERROR eError;
+
+ if((ui32ClientSyncCount + ui32ServerSyncCount) > SYNC_PRIM_OP_MAX_SYNCS)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Too many syncs specified", __func__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* Allocate space for all the sync block list */
+ ui32BlockAllocSize = ui32SyncBlockCount * (sizeof(SYNC_PRIMITIVE_BLOCK *));
+
+ /* Allocate space for all the client sync size elements */
+ ui32ClientAllocSize = ui32ClientSyncCount * (5 * sizeof(IMG_UINT32));
+
+ /* Allocate space for all the server sync size elements */
+ ui32ServerAllocSize = ui32ServerSyncCount * (sizeof(SERVER_SYNC_PRIMITIVE *)
+ + (2 * sizeof(IMG_UINT32)));
+
+ ui32TotalAllocSize = sizeof(SERVER_OP_COOKIE) +
+ ui32BlockAllocSize +
+ ui32ServerAllocSize +
+ ui32ClientAllocSize;
+
+ psNewCookie = OSAllocZMem(ui32TotalAllocSize);
+ pcPtr = (IMG_CHAR *) psNewCookie;
+
+ if (!psNewCookie)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ /* Setup the pointers */
+ pcPtr += sizeof(SERVER_OP_COOKIE);
+ psNewCookie->papsSyncPrimBlock = (SYNC_PRIMITIVE_BLOCK **) pcPtr;
+
+ pcPtr += sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount;
+ psNewCookie->paui32SyncBlockIndex = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Index = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32Flags = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32FenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->paui32UpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ClientSyncCount;
+ psNewCookie->papsServerSync =(SERVER_SYNC_PRIMITIVE **) pcPtr;
+
+ pcPtr += sizeof(SERVER_SYNC_PRIMITIVE *) * ui32ServerSyncCount;
+ psNewCookie->paui32ServerFenceValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+ psNewCookie->paui32ServerUpdateValue = (IMG_UINT32 *) pcPtr;
+
+ pcPtr += sizeof(IMG_UINT32) * ui32ServerSyncCount;
+
+ /* Check the pointer setup went ok */
+ PVR_ASSERT(pcPtr == (((IMG_CHAR *) psNewCookie) + ui32TotalAllocSize));
+
+ psNewCookie->ui32SyncBlockCount= ui32SyncBlockCount;
+ psNewCookie->ui32ServerSyncCount = ui32ServerSyncCount;
+ psNewCookie->ui32ClientSyncCount = ui32ClientSyncCount;
+ psNewCookie->bActive = IMG_FALSE;
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_CREATE, psNewCookie, ui32SyncBlockCount,
+ ui32ServerSyncCount, ui32ClientSyncCount);
+
+ /* Copy all the data into our server cookie */
+ OSCachedMemCopy(psNewCookie->papsSyncPrimBlock,
+ papsSyncPrimBlock,
+ sizeof(SYNC_PRIMITIVE_BLOCK *) * ui32SyncBlockCount);
+
+ /* Copy the sync block and sync indices.
+ *
+ * Each index must be verified:
+ * Each Sync Block index must be within the range of the number of sync block
+ * pointers received. All those pointers are valid, as verified by the bridge.
+ * And each Sync index must be valid for the Sync Block it relates to.
+ */
+ for(i = 0; i < ui32ClientSyncCount; i++)
+ {
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock;
+
+ /* first copy the sync block index and ensure it is in range */
+
+ if(paui32SyncBlockIndex[i] >= ui32SyncBlockCount)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Sync block index %u is out of range",
+ __func__,
+ paui32SyncBlockIndex[i]));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_range;
+ }
+
+ psNewCookie->paui32SyncBlockIndex[i] = paui32SyncBlockIndex[i];
+
+ /* now copy the sync index and ensure it is a valid index within
+ * the corresponding sync block (note the sync block index was
+ * verified above
+ */
+
+ psSyncBlock = psNewCookie->papsSyncPrimBlock[paui32SyncBlockIndex[i]];
+
+ if(_CheckSyncIndex(psSyncBlock, paui32Index[i]) == IMG_FALSE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Sync index %u is out of range",
+ __func__,
+ paui32Index[i]));
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ goto err_range;
+ }
+
+ psNewCookie->paui32Index[i] = paui32Index[i];
+ }
+
+ OSCachedMemCopy(psNewCookie->papsServerSync,
+ papsServerSync,
+ sizeof(SERVER_SYNC_PRIMITIVE *) *ui32ServerSyncCount);
+
+ /*
+ Take a reference on all the sync blocks and server syncs so they can't
+ be freed while we're using them
+ */
+ for (i=0;i<ui32SyncBlockCount;i++)
+ {
+ _SyncPrimitiveBlockRef(psNewCookie->papsSyncPrimBlock[i]);
+ }
+
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ _ServerSyncRef(psNewCookie->papsServerSync[i]);
+ }
+
+ *ppsServerCookie = psNewCookie;
+ return PVRSRV_OK;
+
+err_range:
+ OSFreeMem(psNewCookie);
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32Flags,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerFlags)
+{
+ IMG_UINT32 i;
+
+ if ((ui32ClientSyncCount != psServerCookie->ui32ClientSyncCount) ||
+ (ui32ServerSyncCount != psServerCookie->ui32ServerSyncCount))
+ {
+ /* The bridge layer should have stopped us getting here but check in case */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Invalid sync counts", __FUNCTION__));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ /* Server syncs must fence */
+ if ((paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK) == 0)
+ {
+ return PVRSRV_ERROR_INVALID_SYNC_PRIM_OP;
+ }
+ }
+
+ /*
+ For client syncs all we need to do is save the values
+ that we've been passed
+ */
+ OSCachedMemCopy(psServerCookie->paui32Flags,
+ paui32Flags,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+ OSCachedMemCopy(psServerCookie->paui32FenceValue,
+ paui32FenceValue,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+ OSCachedMemCopy(psServerCookie->paui32UpdateValue,
+ paui32UpdateValue,
+ sizeof(IMG_UINT32) * ui32ClientSyncCount);
+
+ /*
+ For server syncs we just take an operation
+ */
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVLockServerSync();
+#endif
+ for (i=0;i<ui32ServerSyncCount;i++)
+ {
+ /*
+ Take op can only take one operation at a time so we can't
+ optimise away fences so just report the requester as unknown
+ */
+ PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(psServerCookie->papsServerSync[i],
+ &psServerCookie->paui32ServerFenceValue[i],
+ &psServerCookie->paui32ServerUpdateValue[i],
+ SYNC_REQUESTOR_UNKNOWN,
+ (paui32ServerFlags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE) ? IMG_TRUE:IMG_FALSE,
+ NULL);
+ }
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ PVRSRVUnlockServerSync();
+#endif
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_TAKE, psServerCookie,
+ ui32ServerSyncCount, ui32ClientSyncCount);
+ psServerCookie->bActive = IMG_TRUE;
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_BOOL *pbReady)
+{
+ IMG_UINT32 i;
+ IMG_BOOL bReady = IMG_TRUE;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!psServerCookie->bActive)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+ bReady = IMG_FALSE;
+ eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+ goto e0;
+ }
+
+ /* Check the client syncs */
+ for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ if (psSyncBlock->pui32LinAddr[ui32Index] !=
+ psServerCookie->paui32FenceValue[i])
+ {
+ bReady = IMG_FALSE;
+ goto e0;
+ }
+ }
+ }
+
+ for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+ {
+ bReady = ServerSyncFenceIsMet(psServerCookie->papsServerSync[i],
+ psServerCookie->paui32ServerFenceValue[i]);
+ if (!bReady)
+ {
+ break;
+ }
+ }
+
+e0:
+ *pbReady = bReady;
+ return eError;
+}
+
+static
+IMG_BOOL _SyncPrimOpComplete(SERVER_OP_COOKIE *psServerCookie)
+{
+ RGX_HWPERF_UFO_DATA_ELEMENT sUFOData;
+ IMG_BOOL bDidUpdates = IMG_FALSE;
+ IMG_UINT32 i;
+
+ for (i=0;i<psServerCookie->ui32ClientSyncCount;i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_UPDATE)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ sUFOData.sUpdate.ui32FWAddr = psSyncBlock->uiFWAddr.ui32Addr + ui32Index * sizeof(IMG_UINT32);
+ sUFOData.sUpdate.ui32OldValue = psSyncBlock->pui32LinAddr[ui32Index];
+ sUFOData.sUpdate.ui32NewValue = psServerCookie->paui32UpdateValue[i];
+
+ psSyncBlock->pui32LinAddr[ui32Index] = psServerCookie->paui32UpdateValue[i];
+ RGX_HWPERF_HOST_UFO(psSyncBlock->psDevNode->pvDevice,
+ RGX_HWPERF_UFO_EV_UPDATE, &sUFOData, IMG_TRUE);
+ bDidUpdates = IMG_TRUE;
+ }
+ }
+
+ for (i=0;i<psServerCookie->ui32ServerSyncCount;i++)
+ {
+ IMG_BOOL bUpdate = psServerCookie->paui32ServerFenceValue[i] != psServerCookie->paui32ServerUpdateValue[i];
+
+ if (bUpdate)
+ {
+ IMG_UINT32 ui32SyncAddr;
+
+ (void)ServerSyncGetFWAddr(psServerCookie->papsServerSync[i], &ui32SyncAddr);
+ sUFOData.sUpdate.ui32FWAddr = ui32SyncAddr;
+ sUFOData.sUpdate.ui32OldValue = ServerSyncGetValue(psServerCookie->papsServerSync[i]);
+ sUFOData.sUpdate.ui32NewValue = psServerCookie->paui32ServerUpdateValue[i];
+ RGX_HWPERF_HOST_UFO(psServerCookie->papsServerSync[i]->psDevNode->pvDevice,
+ RGX_HWPERF_UFO_EV_UPDATE, &sUFOData, IMG_TRUE);
+ bDidUpdates = IMG_TRUE;
+ }
+
+ ServerSyncCompleteOp(psServerCookie->papsServerSync[i],
+ bUpdate,
+ psServerCookie->paui32ServerUpdateValue[i]);
+ }
+
+ psServerCookie->bActive = IMG_FALSE;
+
+ return bDidUpdates;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie)
+{
+ IMG_BOOL bReady;
+
+ PVRSRVSyncPrimOpReadyKM(psServerCookie, &bReady);
+
+ /* Check the client is playing ball */
+ if (!bReady)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: sync op still not ready", __FUNCTION__));
+
+ return PVRSRV_ERROR_BAD_SYNC_STATE;
+ }
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_COMPLETE, psServerCookie);
+
+ if (_SyncPrimOpComplete(psServerCookie))
+ {
+ PVRSRVCheckStatus(NULL);
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie)
+{
+ IMG_UINT32 i;
+
+ /* If the operation is still active then check if it's finished yet */
+ if (psServerCookie->bActive)
+ {
+ if (PVRSRVSyncPrimOpCompleteKM(psServerCookie) == PVRSRV_ERROR_BAD_SYNC_STATE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Not ready, ask for retry", __FUNCTION__));
+ return PVRSRV_ERROR_RETRY;
+ }
+ }
+
+ /* Drop our references on the sync blocks and server syncs*/
+ for (i = 0; i < psServerCookie->ui32SyncBlockCount; i++)
+ {
+ _SyncPrimitiveBlockUnref(psServerCookie->papsSyncPrimBlock[i]);
+ }
+
+ for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+ {
+ _ServerSyncUnref(psServerCookie->papsServerSync[i]);
+ }
+
+ HTBLOGK(HTB_SF_SYNC_PRIM_OP_DESTROY, psServerCookie);
+ OSFreeMem(psServerCookie);
+ return PVRSRV_OK;
+}
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset, IMG_UINT32 ui32Value)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMemValue32(psSyncBlk->psMemDesc,
+ ui32Offset,
+ ui32Value,
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+ /*
+ We might be ask to PDump sync state outside of capture range
+ (e.g. texture uploads) so make this continuous.
+ */
+ DevmemPDumpLoadMem(psSyncBlk->psMemDesc,
+ ui32Offset,
+ sizeof(IMG_UINT32),
+ PDUMP_FLAGS_CONTINUOUS);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ DevmemPDumpDevmemPol32(psSyncBlk->psMemDesc,
+ ui32Offset,
+ ui32Value,
+ ui32Mask,
+ eOperator,
+ ui32PDumpFlags);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags)
+{
+ IMG_UINT32 i;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ if (!psServerCookie->bActive)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Operation cookie not active (no take operation performed)", __FUNCTION__));
+
+ eError = PVRSRV_ERROR_BAD_SYNC_STATE;
+ goto e0;
+ }
+
+ /* PDump POL on the client syncs */
+ for (i = 0; i < psServerCookie->ui32ClientSyncCount; i++)
+ {
+ if (psServerCookie->paui32Flags[i] & PVRSRV_CLIENT_SYNC_PRIM_OP_CHECK)
+ {
+ IMG_UINT32 ui32BlockIndex = psServerCookie->paui32SyncBlockIndex[i];
+ IMG_UINT32 ui32Index = psServerCookie->paui32Index[i];
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psServerCookie->papsSyncPrimBlock[ui32BlockIndex];
+
+ PVRSRVSyncPrimPDumpPolKM(psSyncBlock,
+ ui32Index*sizeof(IMG_UINT32),
+ psServerCookie->paui32FenceValue[i],
+ 0xFFFFFFFFU,
+ eOperator,
+ ui32PDumpFlags);
+ }
+ }
+
+ /* PDump POL on the server syncs */
+ for (i = 0; i < psServerCookie->ui32ServerSyncCount; i++)
+ {
+ SERVER_SYNC_PRIMITIVE *psServerSync = psServerCookie->papsServerSync[i];
+ IMG_UINT32 ui32FenceValue = psServerCookie->paui32ServerFenceValue[i];
+
+ SyncPrimPDumpPol(psServerSync->psSync,
+ ui32FenceValue,
+ 0xFFFFFFFFU,
+ PDUMP_POLL_OPERATOR_EQUAL,
+ ui32PDumpFlags);
+ }
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ DevmemPDumpCBP(psSyncBlk->psMemDesc,
+ ui32Offset,
+ uiWriteOffset,
+ uiPacketSize,
+ uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif
+
+/* SyncRegisterConnection */
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData)
+{
+ SYNC_CONNECTION_DATA *psSyncConnectionData;
+ PVRSRV_ERROR eError;
+
+ psSyncConnectionData = OSAllocMem(sizeof(SYNC_CONNECTION_DATA));
+ if (psSyncConnectionData == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto fail_alloc;
+ }
+
+ eError = OSLockCreate(&psSyncConnectionData->hLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lockcreate;
+ }
+ dllist_init(&psSyncConnectionData->sListHead);
+ OSAtomicWrite(&psSyncConnectionData->sRefCount, 1);
+
+ *ppsSyncConnectionData = psSyncConnectionData;
+ return PVRSRV_OK;
+
+fail_lockcreate:
+ OSFreeMem(psSyncConnectionData);
+fail_alloc:
+ PVR_ASSERT(eError != PVRSRV_OK);
+ return eError;
+}
+
+/* SyncUnregisterConnection */
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ _SyncConnectionUnref(psSyncConnectionData);
+}
+
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *psSyncConnectionData)
+{
+ DLLIST_NODE *psNode, *psNext;
+
+ OSLockAcquire(psSyncConnectionData->hLock);
+
+ PDUMPCOMMENT("Dump client Sync Prim state");
+ dllist_foreach_node(&psSyncConnectionData->sListHead, psNode, psNext)
+ {
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock =
+ IMG_CONTAINER_OF(psNode, SYNC_PRIMITIVE_BLOCK, sConnectionNode);
+
+ DevmemPDumpLoadMem(psSyncBlock->psMemDesc,
+ 0,
+ psSyncBlock->ui32BlockSize,
+ PDUMP_FLAGS_CONTINUOUS);
+ }
+
+ OSLockRelease(psSyncConnectionData->hLock);
+}
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len)
+{
+ DLLIST_NODE *psNode, *psNext;
+ IMG_INT iEnd;
+ IMG_BOOL bFound = IMG_FALSE;
+
+ if (!pszSyncInfo)
+ {
+ return;
+ }
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ pszSyncInfo[0] = '\0';
+
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *psSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+ if ((psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset) == ui32FwAddr
+ && SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType
+ && psSyncRec->psServerSyncPrimBlock
+ && psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+ )
+ {
+ IMG_UINT32 *pui32SyncAddr;
+ pui32SyncAddr = psSyncRec->psServerSyncPrimBlock->pui32LinAddr
+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+ iEnd = OSSNPrintf(pszSyncInfo, len, "Cur=0x%08x %s:%05u (%s)",
+ *pui32SyncAddr,
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ psSyncRec->szClassName
+ );
+ if (iEnd >= 0 && iEnd < len)
+ {
+ pszSyncInfo[iEnd] = '\0';
+ }
+ bFound = IMG_TRUE;
+ break;
+ }
+ }
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ if(!bFound && (psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT))
+ {
+ OSSNPrintf(pszSyncInfo, len, "(Record may be lost)");
+ }
+}
+
+#define NS_IN_S (1000000000UL)
+static void _SyncRecordPrint(struct SYNC_RECORD *psSyncRec,
+ IMG_UINT64 ui64TimeNow,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ SYNC_PRIMITIVE_BLOCK *psSyncBlock = psSyncRec->psServerSyncPrimBlock;
+
+ if (SYNC_RECORD_TYPE_UNKNOWN != psSyncRec->eRecordType)
+ {
+ IMG_UINT64 ui64DeltaS;
+ IMG_UINT32 ui32DeltaF;
+ IMG_UINT64 ui64Delta = ui64TimeNow - psSyncRec->ui64OSTime;
+ ui64DeltaS = OSDivide64(ui64Delta, NS_IN_S, &ui32DeltaF);
+
+ if (psSyncBlock && psSyncBlock->pui32LinAddr)
+ {
+ IMG_UINT32 *pui32SyncAddr;
+ pui32SyncAddr = psSyncBlock->pui32LinAddr
+ + (psSyncRec->ui32SyncOffset/sizeof(IMG_UINT32));
+
+ PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=0x%08x (%s)",
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+ *pui32SyncAddr,
+ psSyncRec->szClassName
+ );
+ }
+ else
+ {
+ PVR_DUMPDEBUG_LOG("\t%s %05u %05llu.%09u FWAddr=0x%08x Val=<null_ptr> (%s)",
+ ((SYNC_RECORD_TYPE_SERVER==psSyncRec->eRecordType)?"Server":"Client"),
+ psSyncRec->uiPID,
+ ui64DeltaS, ui32DeltaF,
+ (psSyncRec->ui32FwBlockAddr+psSyncRec->ui32SyncOffset),
+ psSyncRec->szClassName
+ );
+ }
+ }
+}
+
+static void _SyncRecordRequest(PVRSRV_DBGREQ_HANDLE hDebugRequestHandle,
+ IMG_UINT32 ui32VerbLevel,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile)
+{
+ PVRSRV_DEVICE_NODE *psDevNode = (PVRSRV_DEVICE_NODE *)hDebugRequestHandle;
+ IMG_UINT64 ui64TimeNowS;
+ IMG_UINT32 ui32TimeNowF;
+ IMG_UINT64 ui64TimeNow = OSClockns64();
+ DLLIST_NODE *psNode, *psNext;
+
+ ui64TimeNowS = OSDivide64(ui64TimeNow, NS_IN_S, &ui32TimeNowF);
+
+ if (ui32VerbLevel == DEBUG_REQUEST_VERBOSITY_MEDIUM)
+ {
+ IMG_UINT32 i;
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+
+ PVR_DUMPDEBUG_LOG("Dumping all allocated syncs. Allocated: %u High watermark: %u @ %05llu.%09u",
+ psDevNode->ui32SyncServerRecordCount,
+ psDevNode->ui32SyncServerRecordCountHighWatermark,
+ ui64TimeNowS,
+ ui32TimeNowF);
+ if(psDevNode->ui32SyncServerRecordCountHighWatermark == SYNC_RECORD_LIMIT)
+ {
+ PVR_DUMPDEBUG_LOG("Warning: Record limit (%u) was reached. Some sync checkpoints may not have been recorded in the debug information.",
+ SYNC_RECORD_LIMIT);
+ }
+
+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *psSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+ _SyncRecordPrint(psSyncRec, ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+
+ PVR_DUMPDEBUG_LOG("Dumping all recently freed syncs @ %05llu.%09u", ui64TimeNowS, ui32TimeNowF);
+ PVR_DUMPDEBUG_LOG("\t%-6s %-5s %-15s %-17s %-14s (%s)",
+ "Type", "PID", "Time Delta (s)", "Address", "Value", "Annotation");
+ for (i = DECREMENT_WITH_WRAP(psDevNode->uiSyncServerRecordFreeIdx, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN);
+ i != psDevNode->uiSyncServerRecordFreeIdx;
+ i = DECREMENT_WITH_WRAP(i, PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN))
+ {
+ if (psDevNode->apsSyncServerRecordsFreed[i])
+ {
+ _SyncRecordPrint(psDevNode->apsSyncServerRecordsFreed[i],
+ ui64TimeNow, pfnDumpDebugPrintf, pvDumpDebugFile);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+ }
+}
+#undef NS_IN_S
+
+static PVRSRV_ERROR SyncRecordListInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ psDevNode->ui32SyncServerRecordCount = 0;
+ psDevNode->ui32SyncServerRecordCountHighWatermark = 0;
+
+ eError = OSLockCreate(&psDevNode->hSyncServerRecordLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psDevNode->sSyncServerRecordList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerRecordNotify,
+ psDevNode,
+ _SyncRecordRequest,
+ DEBUG_REQUEST_SERVERSYNC,
+ psDevNode);
+
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+ return PVRSRV_OK;
+
+fail_dbg_register:
+ OSLockDestroy(psDevNode->hSyncServerRecordLock);
+fail_lock_create:
+ return eError;
+}
+
+static void SyncRecordListDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ DLLIST_NODE *psNode, *psNext;
+ int i;
+
+ OSLockAcquire(psDevNode->hSyncServerRecordLock);
+ dllist_foreach_node(&psDevNode->sSyncServerRecordList, psNode, psNext)
+ {
+ struct SYNC_RECORD *pSyncRec =
+ IMG_CONTAINER_OF(psNode, struct SYNC_RECORD, sNode);
+
+ dllist_remove_node(psNode);
+ OSFreeMem(pSyncRec);
+ }
+
+ for (i = 0; i < PVRSRV_FULL_SYNC_TRACKING_HISTORY_LEN; i++)
+ {
+ if (psDevNode->apsSyncServerRecordsFreed[i])
+ {
+ OSFreeMem(psDevNode->apsSyncServerRecordsFreed[i]);
+ psDevNode->apsSyncServerRecordsFreed[i] = NULL;
+ }
+ }
+ OSLockRelease(psDevNode->hSyncServerRecordLock);
+
+ if (psDevNode->hSyncServerRecordNotify)
+ {
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerRecordNotify);
+ }
+ OSLockDestroy(psDevNode->hSyncServerRecordLock);
+}
+#endif /* #if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING) */
+
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRV_ERROR eError;
+
+ eError = OSLockCreate(&psDevNode->hSyncServerListLock, LOCK_TYPE_NONE);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_lock_create;
+ }
+ dllist_init(&psDevNode->sSyncServerSyncsList);
+
+ eError = PVRSRVRegisterDbgRequestNotify(&psDevNode->hSyncServerNotify,
+ psDevNode,
+ _ServerSyncDebugRequest,
+ DEBUG_REQUEST_SERVERSYNC,
+ psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_dbg_register;
+ }
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ eError = SyncRecordListInit(psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ goto fail_record_list;
+ }
+#endif
+
+ return PVRSRV_OK;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+fail_record_list:
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+#endif
+fail_dbg_register:
+ OSLockDestroy(psDevNode->hSyncServerListLock);
+fail_lock_create:
+ return eError;
+}
+
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode)
+{
+ PVRSRVUnregisterDbgRequestNotify(psDevNode->hSyncServerNotify);
+ psDevNode->hSyncServerNotify = NULL;
+
+ OSLockDestroy(psDevNode->hSyncServerListLock);
+ psDevNode->hSyncServerListLock = NULL;
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+ SyncRecordListDeinit(psDevNode);
+#endif
+}
+
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ eError = OSLockCreate(&ghServerSyncLock, LOCK_TYPE_NONE);
+
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "%s: Failed to create server sync lock", __func__));
+ goto err;
+ }
+err:
+#endif
+ return eError;
+}
+
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData)
+{
+ PVR_UNREFERENCED_PARAMETER(psPVRSRVData);
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSLockDestroy(ghServerSyncLock);
+#endif
+}
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+void PVRSRVLockServerSync(void)
+{
+ OSLockAcquire(ghServerSyncLock);
+}
+
+void PVRSRVUnlockServerSync(void)
+{
+ OSLockRelease(ghServerSyncLock);
+}
+#endif
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/sync_server.h b/drivers/gpu/drm/img-rogue/1.10/sync_server.h
new file mode 100644
index 00000000000000..5750b80e69d3c2
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/sync_server.h
@@ -0,0 +1,437 @@
+/**************************************************************************/ /*!
+@File
+@Title Server side synchronisation interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Describes the server side synchronisation functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "device.h"
+#include "devicemem.h"
+#include "pdump.h"
+#include "pvrsrv_error.h"
+#include "connection_server.h"
+
+#ifndef _SYNC_SERVER_H_
+#define _SYNC_SERVER_H_
+
+typedef struct _SERVER_OP_COOKIE_ SERVER_OP_COOKIE;
+typedef struct _SERVER_SYNC_PRIMITIVE_ SERVER_SYNC_PRIMITIVE;
+typedef struct _SYNC_PRIMITIVE_BLOCK_ SYNC_PRIMITIVE_BLOCK;
+typedef struct _SERVER_SYNC_EXPORT_ SERVER_SYNC_EXPORT;
+typedef struct _SYNC_CONNECTION_DATA_ SYNC_CONNECTION_DATA;
+typedef struct SYNC_RECORD* SYNC_RECORD_HANDLE;
+
+typedef struct _SYNC_ADDR_LIST_
+{
+ IMG_UINT32 ui32NumSyncs;
+ PRGXFWIF_UFO_ADDR *pasFWAddrs;
+ IMG_UINT32 ui32State;
+} SYNC_ADDR_LIST;
+
+PVRSRV_ERROR
+SyncPrimitiveBlockToFWAddr(SYNC_PRIMITIVE_BLOCK *psSyncPrimBlock,
+ IMG_UINT32 ui32Offset,
+ PRGXFWIF_UFO_ADDR *psAddrOut);
+
+void
+SyncAddrListInit(SYNC_ADDR_LIST *psList);
+
+void
+SyncAddrListDeinit(SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+SyncAddrListPopulate(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumSyncs,
+ SYNC_PRIMITIVE_BLOCK **apsSyncPrimBlock,
+ IMG_UINT32 *paui32SyncOffset);
+
+PVRSRV_ERROR
+SyncAddrListAppendSyncPrim(SYNC_ADDR_LIST *psList,
+ PVRSRV_CLIENT_SYNC_PRIM *psSyncPrim);
+PVRSRV_ERROR
+SyncAddrListAppendCheckpoints(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListAppendAndDeRefCheckpoints(SYNC_ADDR_LIST *psList,
+ IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+void
+SyncAddrListDeRefCheckpoints(IMG_UINT32 ui32NumCheckpoints,
+ PSYNC_CHECKPOINT *apsSyncCheckpoint);
+
+PVRSRV_ERROR
+SyncAddrListRollbackCheckpoints(PVRSRV_DEVICE_NODE *psDevNode, SYNC_ADDR_LIST *psList);
+
+PVRSRV_ERROR
+PVRSRVAllocSyncPrimitiveBlockKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SYNC_PRIMITIVE_BLOCK **ppsSyncBlk,
+ IMG_UINT32 *puiSyncPrimVAddr,
+ IMG_UINT32 *puiSyncPrimBlockSize,
+ PMR **ppsSyncPMR);
+
+PVRSRV_ERROR
+PVRSRVExportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk,
+ DEVMEM_EXPORTCOOKIE **psExportCookie);
+
+PVRSRV_ERROR
+PVRSRVUnexportSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVFreeSyncPrimitiveBlockKM(SYNC_PRIMITIVE_BLOCK *ppsSyncBlk);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimSetKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Index,
+ IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVServerSyncPrimSetKM(SERVER_SYNC_PRIMITIVE *psServerSync, IMG_UINT32 ui32Value);
+
+#if defined(SUPPORT_INSECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerExportKM(SERVER_SYNC_PRIMITIVE *psSync,
+ SERVER_SYNC_EXPORT **ppsExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_EXPORT *psExport,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+#if defined(SUPPORT_SECURE_EXPORT)
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureExportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE * psDevNode,
+ SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_SECURE_TYPE *phSecure,
+ SERVER_SYNC_EXPORT **ppsExport,
+ CONNECTION_DATA **ppsSecureConnection);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureUnexportKM(SERVER_SYNC_EXPORT *psExport);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimServerSecureImportKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_SECURE_TYPE hSecure,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr);
+#endif
+
+IMG_UINT32 PVRSRVServerSyncRequesterRegisterKM(IMG_UINT32 *pui32SyncRequesterID);
+void PVRSRVServerSyncRequesterUnregisterKM(IMG_UINT32 ui32SyncRequesterID);
+
+PVRSRV_ERROR
+PVRSRVSyncAllocEventKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32FWAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncFreeEventKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_UINT32 ui32FWAddr);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordAddKM(CONNECTION_DATA *psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SYNC_RECORD_HANDLE *phRecord,
+ SYNC_PRIMITIVE_BLOCK *hServerSyncPrimBlock,
+ IMG_UINT32 ui32FwBlockAddr,
+ IMG_UINT32 ui32SyncOffset,
+ IMG_BOOL bServerSync,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *pszClassName);
+
+PVRSRV_ERROR
+PVRSRVSyncRecordRemoveByHandleKM(
+ SYNC_RECORD_HANDLE hRecord);
+
+PVRSRV_ERROR
+PVRSRVServerSyncAllocKM(CONNECTION_DATA * psConnection,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ SERVER_SYNC_PRIMITIVE **ppsSync,
+ IMG_UINT32 *pui32SyncPrimVAddr,
+ IMG_UINT32 ui32ClassNameSize,
+ const IMG_CHAR *szClassName);
+PVRSRV_ERROR
+PVRSRVServerSyncFreeKM(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+PVRSRVServerSyncGetStatusKM(IMG_UINT32 ui32SyncCount,
+ SERVER_SYNC_PRIMITIVE **papsSyncs,
+ IMG_UINT32 *pui32UID,
+ IMG_UINT32 *pui32FWAddr,
+ IMG_UINT32 *pui32CurrentOp,
+ IMG_UINT32 *pui32NextOp);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired);
+PVRSRV_ERROR
+PVRSRVServerSyncQueueSWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue,
+ IMG_UINT32 ui32SyncRequesterID,
+ IMG_BOOL bUpdate,
+ IMG_BOOL *pbFenceRequired);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue);
+
+PVRSRV_ERROR
+PVRSRVServerSyncQueueHWOpKM_NoGlobalLock(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bUpdate,
+ IMG_UINT32 *pui32FenceValue,
+ IMG_UINT32 *pui32UpdateValue);
+
+IMG_BOOL
+ServerSyncFenceIsMet(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_UINT32 ui32FenceValue);
+
+void
+ServerSyncCompleteOp(SERVER_SYNC_PRIMITIVE *psSync,
+ IMG_BOOL bDoUpdate,
+ IMG_UINT32 ui32UpdateValue);
+
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCreateKM(IMG_UINT32 ui32SyncBlockCount,
+ SYNC_PRIMITIVE_BLOCK **papsSyncPrimBlock,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32SyncBlockIndex,
+ IMG_UINT32 *paui32Index,
+ IMG_UINT32 ui32ServerSyncCount,
+ SERVER_SYNC_PRIMITIVE **papsServerSync,
+ SERVER_OP_COOKIE **ppsServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpTakeKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_UINT32 ui32ClientSyncCount,
+ IMG_UINT32 *paui32Flags,
+ IMG_UINT32 *paui32FenceValue,
+ IMG_UINT32 *paui32UpdateValue,
+ IMG_UINT32 ui32ServerSyncCount,
+ IMG_UINT32 *paui32ServerFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpReadyKM(SERVER_OP_COOKIE *psServerCookie,
+ IMG_BOOL *pbReady);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpCompleteKM(SERVER_OP_COOKIE *psServerCookie);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpDestroyKM(SERVER_OP_COOKIE *psServerCookie);
+
+IMG_UINT32 ServerSyncGetId(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_ERROR
+ServerSyncGetFWAddr(SERVER_SYNC_PRIMITIVE *psSync, IMG_UINT32 *pui32SyncAddr);
+
+IMG_UINT32 ServerSyncGetValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+IMG_UINT32 ServerSyncGetNextValue(SERVER_SYNC_PRIMITIVE *psSync);
+
+PVRSRV_DEVICE_NODE* ServerSyncGetDeviceNode(SERVER_SYNC_PRIMITIVE *psSync);
+
+#if defined(PVRSRV_ENABLE_FULL_SYNC_TRACKING)
+void SyncRecordLookup(PVRSRV_DEVICE_NODE *psDevNode, IMG_UINT32 ui32FwAddr,
+ IMG_CHAR * pszSyncInfo, size_t len);
+#endif
+
+void ServerSyncDumpPending(void);
+
+PVRSRV_ERROR SyncRegisterConnection(SYNC_CONNECTION_DATA **ppsSyncConnectionData);
+void SyncUnregisterConnection(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+void SyncConnectionPDumpSyncBlocks(SYNC_CONNECTION_DATA *ppsSyncConnectionData);
+
+/*!
+******************************************************************************
+@Function ServerSyncInit
+
+@Description Per-device initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInit(PVRSRV_DEVICE_NODE *psDevNode);
+void ServerSyncDeinit(PVRSRV_DEVICE_NODE *psDevNode);
+
+/*!
+******************************************************************************
+@Function ServerSyncInitOnce
+
+@Description One-time initialisation for the ServerSync module
+******************************************************************************/
+PVRSRV_ERROR ServerSyncInitOnce(PVRSRV_DATA *psPVRSRVData);
+void ServerSyncDeinitOnce(PVRSRV_DATA *psPVRSRVData);
+
+#if !defined(PVRSRV_USE_BRIDGE_LOCK)
+/*!
+******************************************************************************
+@Function PVRSRVLockServerSync
+
+@Description Acquire a global lock to maintain server sync consistency
+******************************************************************************/
+void PVRSRVLockServerSync(void);
+/*!
+******************************************************************************
+@Function PVRSRVUnlockServerSync
+
+@Description Release the global server sync lock
+******************************************************************************/
+void PVRSRVUnlockServerSync(void);
+#endif
+
+#if defined(PDUMP)
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T ui32PDumpFlags);
+
+PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize);
+
+#else /* PDUMP */
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpValueKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpValueKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpPolKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT32 ui32Offset,
+ IMG_UINT32 ui32Value, IMG_UINT32 ui32Mask,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(ui32Value);
+ PVR_UNREFERENCED_PARAMETER(ui32Mask);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimOpPDumpPolKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimOpPDumpPolKM(SERVER_OP_COOKIE *psServerCookie,
+ PDUMP_POLL_OPERATOR eOperator,
+ PDUMP_FLAGS_T uiDumpFlags)
+{
+ PVR_UNREFERENCED_PARAMETER(psServerCookie);
+ PVR_UNREFERENCED_PARAMETER(eOperator);
+ PVR_UNREFERENCED_PARAMETER(uiDumpFlags);
+ return PVRSRV_OK;
+}
+
+#ifdef INLINE_IS_PRAGMA
+#pragma inline(PVRSRVSyncPrimPDumpCBPKM)
+#endif
+static INLINE PVRSRV_ERROR
+PVRSRVSyncPrimPDumpCBPKM(SYNC_PRIMITIVE_BLOCK *psSyncBlk, IMG_UINT64 ui32Offset,
+ IMG_UINT64 uiWriteOffset, IMG_UINT64 uiPacketSize,
+ IMG_UINT64 uiBufferSize)
+{
+ PVR_UNREFERENCED_PARAMETER(psSyncBlk);
+ PVR_UNREFERENCED_PARAMETER(ui32Offset);
+ PVR_UNREFERENCED_PARAMETER(uiWriteOffset);
+ PVR_UNREFERENCED_PARAMETER(uiPacketSize);
+ PVR_UNREFERENCED_PARAMETER(uiBufferSize);
+ return PVRSRV_OK;
+}
+#endif /* PDUMP */
+#endif /*_SYNC_SERVER_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/dma_support.c b/drivers/gpu/drm/img-rogue/1.10/system/dma_support.c
new file mode 100644
index 00000000000000..7b2a6a5afa08fb
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/dma_support.c
@@ -0,0 +1,538 @@
+/*************************************************************************/ /*!
+@File dma_support.c
+@Title System DMA support
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This provides a contiguous memory allocator (i.e. DMA allocator);
+ these APIs are used for allocation/ioremapping (DMA/PA <-> CPU/VA)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#if defined(LINUX)
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-mapping.h>
+#include <asm-generic/getorder.h>
+#endif
+
+#include "allocmem.h"
+#include "dma_support.h"
+#include "kernel_compatibility.h"
+
+#define DMA_MAX_IOREMAP_ENTRIES 2
+static IMG_BOOL gbEnableDmaIoRemapping = IMG_FALSE;
+static DMA_ALLOC gsDmaIoRemapArray[DMA_MAX_IOREMAP_ENTRIES] = {{0}};
+
+#if defined(LINUX)
+static void*
+SysDmaAcquireKernelAddress(struct page *page, IMG_UINT64 ui64Size, void *pvOSDevice)
+{
+ IMG_UINT32 uiIdx;
+ PVRSRV_ERROR eError;
+ void *pvVirtAddr = NULL;
+ IMG_UINT32 ui32PgCount = (IMG_UINT32)(ui64Size >> OSGetPageShift());
+ PVRSRV_DEVICE_NODE *psDevNode = OSAllocZMemNoStats(sizeof(*psDevNode));
+ PVRSRV_DEVICE_CONFIG *psDevConfig = OSAllocZMemNoStats(sizeof(*psDevConfig));
+ struct page **pagearray = OSAllocZMemNoStats(ui32PgCount * sizeof(struct page *));
+#if defined(CONFIG_ARM64)
+ pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
+#else
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+
+ /* Validate all required dynamic tmp buffer allocations */
+ if (psDevNode == NULL || psDevConfig == NULL || pagearray == NULL)
+ {
+ if (psDevNode)
+ {
+ OSFreeMem(psDevNode);
+ }
+
+ if (psDevConfig)
+ {
+ OSFreeMem(psDevConfig);
+ }
+
+ if (pagearray)
+ {
+ OSFreeMem(pagearray);
+ }
+
+ goto e0;
+ }
+
+ /* Fake psDevNode->psDevConfig->pvOSDevice */
+ psDevConfig->pvOSDevice = pvOSDevice;
+ psDevNode->psDevConfig = psDevConfig;
+
+ /* Evict any page data contents from d-cache */
+ eError = OSCPUOperation(PVRSRV_CACHE_OP_FLUSH);
+ for (uiIdx = 0; uiIdx < ui32PgCount; uiIdx++)
+ {
+ /* Prepare array required for vmap */
+ pagearray[uiIdx] = &page[uiIdx];
+
+ if (eError != PVRSRV_OK)
+ {
+#if defined(CONFIG_64BIT)
+ void *pvVirtStart = kmap(&page[uiIdx]);
+ void *pvVirtEnd = pvVirtStart + ui64Size;
+ IMG_CPU_PHYADDR sCPUPhysStart = {page_to_phys(&page[uiIdx])};
+ IMG_CPU_PHYADDR sCPUPhysEnd = {sCPUPhysStart.uiAddr + ui64Size};
+ /* all pages have a kernel linear address, flush entire range */
+#else
+ void *pvVirtStart = kmap(&page[uiIdx]);
+ void *pvVirtEnd = pvVirtStart + PAGE_SIZE;
+ IMG_CPU_PHYADDR sCPUPhysStart = {page_to_phys(&page[uiIdx])};
+ IMG_CPU_PHYADDR sCPUPhysEnd = {sCPUPhysStart.uiAddr + PAGE_SIZE};
+ /* pages might be from HIGHMEM, need to kmap/flush per page */
+#endif
+
+ /* Fallback to range-based d-cache flush */
+ OSCPUCacheInvalidateRangeKM(psDevNode,
+ pvVirtStart, pvVirtEnd,
+ sCPUPhysStart, sCPUPhysEnd);
+
+#if defined(CONFIG_64BIT)
+ eError = PVRSRV_OK;
+#else
+ kunmap(&page[uiIdx]);
+#endif
+ }
+ }
+
+ /* Remap pages into VMALLOC space */
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ pvVirtAddr = vmap(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot);
+#else
+ pvVirtAddr = vm_map_ram(pagearray, ui32PgCount, VM_READ | VM_WRITE, prot);
+#endif
+
+ /* Clean-up tmp buffers */
+ OSFreeMem(psDevConfig);
+ OSFreeMem(psDevNode);
+ OSFreeMem(pagearray);
+
+e0:
+ return pvVirtAddr;
+}
+
+static void SysDmaReleaseKernelAddress(void *pvVirtAddr, IMG_UINT64 ui64Size)
+{
+#if !defined(CONFIG_64BIT) || defined(PVRSRV_FORCE_SLOWER_VMAP_ON_64BIT_BUILDS)
+ vunmap(pvVirtAddr);
+#else
+ vm_unmap_ram(pvVirtAddr, ui64Size >> OSGetPageShift());
+#endif
+}
+#endif
+
+/*!
+******************************************************************************
+ @Function SysDmaAllocMem
+
+ @Description Allocates physically contiguous memory
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ struct page *page;
+
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->hHandle ||
+ psDmaAlloc->pvVirtAddr ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->sBusAddr.uiAddr ||
+ psDmaAlloc->pvOSDevice == NULL)
+ {
+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+#if defined(LINUX)
+ psDmaAlloc->hHandle =
+#if defined(CONFIG_L4) || defined(PVR_LINUX_PHYSMEM_SUPPRESS_DMA_AC)
+ NULL;
+#else
+ dma_alloc_coherent((struct device *)psDmaAlloc->pvOSDevice,
+ (size_t) psDmaAlloc->ui64Size,
+ (dma_addr_t *)&psDmaAlloc->sBusAddr.uiAddr,
+ GFP_KERNEL);
+#endif
+ if (psDmaAlloc->hHandle)
+ {
+#if !defined(CONFIG_ARM)
+#if defined(CONFIG_L4)
+ page = pfn_to_page((unsigned long)l4x_phys_to_virt(psDmaAlloc->sBusAddr.uiAddr) >> PAGE_SHIFT);
+#else
+ page = pfn_to_page(psDmaAlloc->sBusAddr.uiAddr >> PAGE_SHIFT);
+#endif
+#else /* !defined(CONFIG_ARM) */
+ page = pfn_to_page(dma_to_pfn((struct device *)psDmaAlloc->pvOSDevice, psDmaAlloc->sBusAddr.uiAddr));
+#endif
+
+ psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(page,
+ psDmaAlloc->ui64Size,
+ psDmaAlloc->pvOSDevice);
+ if (! psDmaAlloc->pvVirtAddr)
+ {
+ /* Not always safe due to possibility of DMA_ATTR_NO_KERNEL_MAPPING */
+ PVR_LOG(("Cannot remap DMA buffer, using cookie VA as buffer VA"));
+ psDmaAlloc->pvVirtAddr = psDmaAlloc->hHandle;
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Allocated DMA buffer V:0x%llx P:0x%llx S:%llx",
+ (IMG_UINT64)(uintptr_t)psDmaAlloc->pvVirtAddr,
+ psDmaAlloc->sBusAddr.uiAddr,
+ psDmaAlloc->ui64Size));
+ }
+ else if ((page = alloc_pages(GFP_KERNEL, __get_order(psDmaAlloc->ui64Size))))
+ {
+ psDmaAlloc->pvVirtAddr = SysDmaAcquireKernelAddress(page,
+ psDmaAlloc->ui64Size,
+ psDmaAlloc->pvOSDevice);
+ if (! psDmaAlloc->pvVirtAddr)
+ {
+ __free_pages(page, __get_order(psDmaAlloc->ui64Size));
+ goto e0;
+ }
+
+ psDmaAlloc->sBusAddr.uiAddr = page_to_phys(page);
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "Allocated contiguous buffer V:0x%llx P:0x%llx S:%llx",
+ (IMG_UINT64)(uintptr_t)psDmaAlloc->pvVirtAddr,
+ psDmaAlloc->sBusAddr.uiAddr,
+ psDmaAlloc->ui64Size));
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES;
+ }
+#else
+ #error "Provide OS implementation of DMA allocation";
+#endif
+
+e0:
+ PVR_LOGR_IF_FALSE((psDmaAlloc->pvVirtAddr), "DMA/CMA allocation failed", PVRSRV_ERROR_FAILED_TO_ALLOC_PAGES);
+ return eError;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaFreeMem
+
+ @Description Free physically contiguous memory
+
+ @Return void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psDmaAlloc)
+{
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->pvOSDevice == NULL ||
+ psDmaAlloc->pvVirtAddr == NULL ||
+ psDmaAlloc->sBusAddr.uiAddr == 0)
+ {
+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+ return;
+ }
+
+#if defined(LINUX)
+ if (psDmaAlloc->pvVirtAddr != psDmaAlloc->hHandle)
+ {
+ SysDmaReleaseKernelAddress(psDmaAlloc->pvVirtAddr, psDmaAlloc->ui64Size);
+ }
+
+ if (! psDmaAlloc->hHandle)
+ {
+#if !defined(CONFIG_ARM)
+ struct page *page = pfn_to_page(psDmaAlloc->sBusAddr.uiAddr >> PAGE_SHIFT);
+#else
+ struct page *page = pfn_to_page(dma_to_pfn((struct device *)psDmaAlloc->pvOSDevice, psDmaAlloc->sBusAddr.uiAddr));
+#endif
+
+ __free_pages(page, __get_order(psDmaAlloc->ui64Size));
+ return;
+ }
+
+ dma_free_coherent((struct device *)psDmaAlloc->pvOSDevice,
+ (size_t) psDmaAlloc->ui64Size,
+ psDmaAlloc->hHandle,
+ (dma_addr_t )psDmaAlloc->sBusAddr.uiAddr);
+#else
+ #error "Provide OS implementation of DMA deallocation";
+#endif
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaRegisterForIoRemapping
+
+ @Description Registers DMA_ALLOC for manual I/O remapping
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+ IMG_UINT32 ui32Idx;
+ IMG_BOOL bTabEntryFound = IMG_TRUE;
+ PVRSRV_ERROR eError = PVRSRV_ERROR_TOO_FEW_BUFFERS;
+
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->pvOSDevice == NULL ||
+ psDmaAlloc->pvVirtAddr == NULL ||
+ psDmaAlloc->sBusAddr.uiAddr == 0)
+ {
+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ /* Check if an I/O remap entry exists for remapping */
+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == NULL)
+ {
+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == 0);
+ PVR_ASSERT(gsDmaIoRemapArray[ui32Idx].ui64Size == 0);
+ break;
+ }
+ }
+
+ if (ui32Idx >= DMA_MAX_IOREMAP_ENTRIES)
+ {
+ bTabEntryFound = IMG_FALSE;
+ }
+
+ if (bTabEntryFound)
+ {
+ IMG_BOOL bSameVAddr, bSamePAddr, bSameSize;
+
+ bSamePAddr = gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr == psDmaAlloc->sBusAddr.uiAddr;
+ bSameVAddr = gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr;
+ bSameSize = gsDmaIoRemapArray[ui32Idx].ui64Size == psDmaAlloc->ui64Size;
+
+ if (bSameVAddr)
+ {
+ if (bSamePAddr && bSameSize)
+ {
+ eError = PVRSRV_OK;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_ALREADY_EXISTS;
+ }
+ }
+ else
+ {
+ PVR_ASSERT(bSamePAddr == IMG_FALSE);
+
+ gsDmaIoRemapArray[ui32Idx].ui64Size = psDmaAlloc->ui64Size;
+ gsDmaIoRemapArray[ui32Idx].sBusAddr = psDmaAlloc->sBusAddr;
+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = psDmaAlloc->pvVirtAddr;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: register I/O remap: "\
+ "VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+ psDmaAlloc->pvVirtAddr,
+ psDmaAlloc->sBusAddr.uiAddr,
+ psDmaAlloc->ui64Size));
+
+ gbEnableDmaIoRemapping = IMG_TRUE;
+ eError = PVRSRV_OK;
+ }
+ }
+
+ return eError;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaDeregisterForIoRemapping
+
+ @Description Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psDmaAlloc)
+{
+ IMG_UINT32 ui32Idx;
+
+ if (psDmaAlloc == NULL ||
+ psDmaAlloc->ui64Size == 0 ||
+ psDmaAlloc->pvOSDevice == NULL ||
+ psDmaAlloc->pvVirtAddr == NULL ||
+ psDmaAlloc->sBusAddr.uiAddr == 0)
+ {
+ PVR_LOG_IF_FALSE((IMG_FALSE), "Invalid parameter");
+ return;
+ }
+
+ /* Remove specified entries from list of I/O remap entries */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr == psDmaAlloc->pvVirtAddr)
+ {
+ gsDmaIoRemapArray[ui32Idx].sBusAddr.uiAddr = 0;
+ gsDmaIoRemapArray[ui32Idx].pvVirtAddr = NULL;
+ gsDmaIoRemapArray[ui32Idx].ui64Size = 0;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: deregister I/O remap: "\
+ "VA: 0x%p, PA: 0x%llx, Size: 0x%llx",
+ psDmaAlloc->pvVirtAddr,
+ psDmaAlloc->sBusAddr.uiAddr,
+ psDmaAlloc->ui64Size));
+
+ break;
+ }
+ }
+
+ /* Check if no other I/O remap entries exists for remapping */
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ if (gsDmaIoRemapArray[ui32Idx].pvVirtAddr != NULL)
+ {
+ break;
+ }
+ }
+
+ if (ui32Idx == DMA_MAX_IOREMAP_ENTRIES)
+ {
+ /* No entries found so disable remapping */
+ gbEnableDmaIoRemapping = IMG_FALSE;
+ }
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaDevPAddrToCpuVAddr
+
+ @Description Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size)
+{
+ IMG_CPU_VIRTADDR pvDMAVirtAddr = NULL;
+ DMA_ALLOC *psHeapDmaAlloc;
+ IMG_UINT32 ui32Idx;
+
+ if (gbEnableDmaIoRemapping == IMG_FALSE)
+ {
+ return pvDMAVirtAddr;
+ }
+
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+ if (psHeapDmaAlloc->sBusAddr.uiAddr && uiAddr >= psHeapDmaAlloc->sBusAddr.uiAddr)
+ {
+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+ IMG_UINT64 uiOffset = uiAddr - psHeapDmaAlloc->sBusAddr.uiAddr;
+
+ if (uiOffset < uiSpan)
+ {
+ PVR_ASSERT((uiOffset+ui64Size-1) < uiSpan);
+ pvDMAVirtAddr = psHeapDmaAlloc->pvVirtAddr + uiOffset;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: remap: PA: 0x%llx => VA: 0x%p",
+ uiAddr, pvDMAVirtAddr));
+
+ break;
+ }
+ }
+ }
+
+ return pvDMAVirtAddr;
+}
+
+/*!
+******************************************************************************
+ @Function SysDmaCpuVAddrToDevPAddr
+
+ @Description Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr)
+{
+ IMG_UINT64 uiAddr = 0;
+ DMA_ALLOC *psHeapDmaAlloc;
+ IMG_UINT32 ui32Idx;
+
+ if (gbEnableDmaIoRemapping == IMG_FALSE)
+ {
+ return uiAddr;
+ }
+
+ for (ui32Idx = 0; ui32Idx < DMA_MAX_IOREMAP_ENTRIES; ++ui32Idx)
+ {
+ psHeapDmaAlloc = &gsDmaIoRemapArray[ui32Idx];
+ if (psHeapDmaAlloc->pvVirtAddr && pvDMAVirtAddr >= psHeapDmaAlloc->pvVirtAddr)
+ {
+ IMG_UINT64 uiSpan = psHeapDmaAlloc->ui64Size;
+ IMG_UINT64 uiOffset = pvDMAVirtAddr - psHeapDmaAlloc->pvVirtAddr;
+
+ if (uiOffset < uiSpan)
+ {
+ uiAddr = psHeapDmaAlloc->sBusAddr.uiAddr + uiOffset;
+
+ PVR_DPF((PVR_DBG_MESSAGE,
+ "DMA: remap: VA: 0x%p => PA: 0x%llx",
+ pvDMAVirtAddr, uiAddr));
+
+ break;
+ }
+ }
+ }
+
+ return uiAddr;
+}
+
+/******************************************************************************
+ End of file (dma_support.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/dma_support.h b/drivers/gpu/drm/img-rogue/1.10/system/dma_support.h
new file mode 100644
index 00000000000000..7eb5aa7a5158ba
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/dma_support.h
@@ -0,0 +1,126 @@
+/*************************************************************************/ /*!
+@File dma_support.h
+@Title Device contiguous memory allocator and I/O re-mapper
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides a contiguous memory allocator API; mainly
+ used for allocating / ioremapping (DMA/PA <-> CPU/VA)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _DMA_SUPPORT_H_
+#define _DMA_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+typedef struct _DMA_ALLOC_
+{
+ IMG_UINT64 ui64Size;
+ IMG_CPU_VIRTADDR pvVirtAddr;
+ IMG_DEV_PHYADDR sBusAddr;
+ IMG_HANDLE hHandle;
+ void *pvOSDevice;
+} DMA_ALLOC;
+
+/*!
+******************************************************************************
+ @Function SysDmaAllocMem
+
+ @Description Allocates physically contiguous memory
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaAllocMem(DMA_ALLOC *psDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function SysDmaFreeMem
+
+ @Description Free physically contiguous memory
+
+ @Return void
+ ******************************************************************************/
+void SysDmaFreeMem(DMA_ALLOC *psCmaAlloc);
+
+/*!
+******************************************************************************
+ @Function SysDmaRegisterForIoRemapping
+
+ @Description Registers DMA_ALLOC for manual I/O remapping
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysDmaRegisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function SysDmaDeregisterForIoRemapping
+
+ @Description Deregisters DMA_ALLOC from manual I/O remapping
+
+ @Return void
+ ******************************************************************************/
+void SysDmaDeregisterForIoRemapping(DMA_ALLOC *psPhysHeapDmaAlloc);
+
+/*!
+******************************************************************************
+ @Function SysDmaDevPAddrToCpuVAddr
+
+ @Description Maps a DMA_ALLOC physical address to CPU virtual address
+
+ @Return IMG_CPU_VIRTADDR on success. Otherwise, a NULL
+ ******************************************************************************/
+IMG_CPU_VIRTADDR SysDmaDevPAddrToCpuVAddr(IMG_UINT64 uiAddr, IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function SysDmaCpuVAddrToDevPAddr
+
+ @Description Maps a DMA_ALLOC CPU virtual address to physical address
+
+ @Return Non-zero value on success. Otherwise, a 0
+ ******************************************************************************/
+IMG_UINT64 SysDmaCpuVAddrToDevPAddr(IMG_CPU_VIRTADDR pvDMAVirtAddr);
+
+#endif /* _DMA_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (dma_support.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/syscommon.h b/drivers/gpu/drm/img-rogue/1.10/system/syscommon.h
new file mode 100644
index 00000000000000..428073146a56fd
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/syscommon.h
@@ -0,0 +1,124 @@
+/**************************************************************************/ /*!
+@File
+@Title Common System APIs and structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides common system-specific declarations and
+ macros that are supported by all systems
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /***************************************************************************/
+
+#if !defined(__SYSCOMMON_H__)
+#define __SYSCOMMON_H__
+
+#include "img_types.h"
+#include "pvr_notifier.h"
+#include "pvrsrv_device.h"
+#include "pvrsrv_error.h"
+
+typedef IMG_BOOL (*PFN_LISR)(void *pvData);
+
+/**************************************************************************/ /*!
+@Function SysDevInit
+@Description System specific device initialisation function.
+@Input pvOSDevice pointer to the OS device reference
+@Input ppsDevConfig returned device configuration info
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDevInit(void *pvOSDevice, PVRSRV_DEVICE_CONFIG **ppsDevConfig);
+
+/**************************************************************************/ /*!
+@Function SysDevDeInit
+@Description System specific device deinitialisation function.
+@Input psDevConfig device configuration info of the device to be
+ deinitialised
+@Return None.
+*/ /***************************************************************************/
+void SysDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/**************************************************************************/ /*!
+@Function SysDebugInfo
+@Description Dump system specific device debug information.
+@Input psDevConfig pointer to device configuration info
+@Input pfnDumpDebugPrintf the 'printf' function to be called to
+ display the debug info
+@Input pvDumpDebugFile optional file identifier to be passed to
+ the 'printf' function if required
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysDebugInfo(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ DUMPDEBUG_PRINTF_FUNC *pfnDumpDebugPrintf,
+ void *pvDumpDebugFile);
+
+/**************************************************************************/ /*!
+@Function SysInstallDeviceLISR
+@Description Installs the system Low-level Interrupt Service Routine (LISR)
+ which handles low-level processing of interrupts from the device
+ (GPU).
+ The LISR will be invoked when the device raises an interrupt. An
+ LISR may not be descheduled, so code which needs to do so should
+ be placed in an MISR.
+ The installed LISR will schedule any MISRs once it has completed
+ its interrupt processing, by calling OSScheduleMISR().
+@Input hSysData pointer to the system data of the device
+@Input ui32IRQ the IRQ on which the LISR is to be installed
+@Input pszName name of the module installing the LISR
+@Input pfnLISR pointer to the function to be installed as the
+ LISR
+@Input pvData private data provided to the LISR
+@Output phLISRData handle to the installed LISR (to be used for a
+ subsequent uninstall)
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysInstallDeviceLISR(IMG_HANDLE hSysData,
+ IMG_UINT32 ui32IRQ,
+ const IMG_CHAR *pszName,
+ PFN_LISR pfnLISR,
+ void *pvData,
+ IMG_HANDLE *phLISRData);
+
+/**************************************************************************/ /*!
+@Function SysUninstallDeviceLISR
+@Description Uninstalls the system Low-level Interrupt Service Routine (LISR)
+ which handles low-level processing of interrupts from the device
+ (GPU).
+@Input hLISRData handle of the LISR to be uninstalled
+@Return PVRSRV_OK on success, a failure code otherwise.
+*/ /***************************************************************************/
+PVRSRV_ERROR SysUninstallDeviceLISR(IMG_HANDLE hLISRData);
+
+#endif /* !defined(__SYSCOMMON_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/sysvalidation.h b/drivers/gpu/drm/img-rogue/1.10/system/sysvalidation.h
new file mode 100644
index 00000000000000..ae46ee5ce33b06
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/sysvalidation.h
@@ -0,0 +1,63 @@
+/*************************************************************************/ /*!
+@File
+@Title Validation System APIs and structures
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides system-specific declarations and macros
+ needed for hardware validation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(__SYSVALIDATION_H__)
+#define __SYSVALIDATION_H__
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION)
+#include "img_types.h"
+#include "rgxdefs_km.h"
+#include "virt_validation_defs.h"
+
+void SysSetOSidRegisters(IMG_UINT32 aui32OSidMin[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS],
+ IMG_UINT32 aui32OSidMax[GPUVIRT_VALIDATION_NUM_REGIONS][GPUVIRT_VALIDATION_NUM_OS]);
+void SysPrintAndResetFaultStatusRegister(void);
+
+#if defined(SUPPORT_GPUVIRT_VALIDATION) && defined(EMULATOR)
+void SysSetAxiProtOSid(IMG_UINT32 ui32OSid, IMG_BOOL bState);
+void SysSetTrustedDeviceAceEnabled(void);
+#endif
+#endif /* defined(SUPPORT_GPUVIRT_VALIDATION) */
+
+#endif /* !defined(__SYSVALIDATION_H__) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_impl.h b/drivers/gpu/drm/img-rogue/1.10/system/vmm_impl.h
new file mode 100644
index 00000000000000..aafbdc3186bd87
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_impl.h
@@ -0,0 +1,283 @@
+/*************************************************************************/ /*!
+@File vmm_impl.h
+@Title Common VM manager API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides common VM manager definitions that need to
+ be shared by system virtualization layer itself and modules that
+ implement the actual VM manager types.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_IMPL_H_
+#define _VMM_IMPL_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+/*
+ Virtual machine manager para-virtualization (PVZ) connection:
+ - Type is implemented by host and guest drivers
+ - Assumes synchronous function call semantics
+ - Unidirectional semantics
+ - For Host (vmm -> host)
+ - For Guest (guest -> vmm)
+ - Parameters can be IN/OUT/INOUT
+
+ - Host pvz entries are pre-implemented by IMG
+ - For host implementation, see vmm_pvz_server.c
+ - Called by host side hypercall handler or VMM
+
+ - Guest pvz entries are supplied by 3rd-party
+ - These are specific to hypervisor (VMM) type
+ - These implement the actual hypercalls mechanism
+
+ Para-virtualization call runtime sequence:
+ 1 - Guest driver in guest VM calls PVZ function
+ 1.1 - Guest PVZ connection calls
+ 1.2 - Guest VM Manager type which
+ 1.2.1 - Performs any pre-processing like parameter packing, etc.
+ 1.2.2 - Issues hypercall (blocking synchronous call)
+
+ 2 - VM Manager (hypervisor) receives hypercall
+ 2.1 - Hypercall handler:
+ 2.1.1 - Performs any pre-processing
+ 2.1.2 - If call terminates in VM Manager: perform action and return from hypercall
+ 2.1.3 - Otherwise forward to host driver (implementation specific call)
+
+ 3 - Host driver receives call from VM Manager
+ 3.1 - Host VM manager type:
+ 3.1.1 - Performs any pre-processing like parameter unpacking, etc.
+ 3.1.2 - Acquires host driver PVZ handler and calls the appropriate entry
+ 3.2 - Host PVZ connection calls corresponding host system virtualisation layer
+ 3.3 - Host driver system virtualisation layer:
+ 3.3.1 - Perform action requested by guest driver
+ 3.3.2 - Return to host VM Manager type
+ 3.4 - Host VM Manager type:
+ 3.4.1 - Prepare to return from hypercall
+ 3.4.2 - Perform any post-processing like result packing, etc.
+ 3.4.3 - Issue return from hypercall
+
+ 4 - VM Manager (hypervisor)
+ 4.1 - Perform any post-processing
+ 4.2 - Return control to guest driver
+
+ 5 - Guest driver in guest VM
+ 5.1 - Perform any post-processing like parameter unpacking, etc.
+ 5.2 - Continue execution in guest VM
+ */
+typedef struct _VMM_PVZ_CONNECTION_
+{
+ struct {
+ /*
+ This pair must be implemented if the device configuration is
+ not provided during guest build or if the device interrupt
+ is dynamically mapped into the VM virtual interrupt line.
+ If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+ */
+ PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsPBase);
+
+ PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+ /*
+ This pair must be implemented if the host is responsible for
+ allocating the physical heaps on behalf of the guest; these
+ physical heaps Addr/Size are allocated in the host domain
+ and are communicated to the guest so must be re-expressed
+ relative to the guest VM IPA space. The guest assumes said
+ memory is not managed by the underlying GuestOS kernel.
+ If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+ */
+ PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *peType,
+ IMG_UINT64 *pui64FwSize,
+ IMG_UINT64 *pui64FwPAddr,
+ IMG_UINT64 *pui64GpuSize,
+ IMG_UINT64 *pui64GpuPAddr);
+
+ PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+ /*
+ This pair must be implemented if the guest is responsible
+ for allocating the physical heap that backs its firmware
+ allocations, this is the default configuration. The physical
+ heap is allocated within the guest VM IPA space and this
+ IPA Addr/Size must be re-expressed as PA space Addr/Size
+ by the VM manager before forwarding request to host.
+ If not implemented, return PVRSRV_ERROR_NOT_IMPLEMENTED.
+ */
+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64PAddr);
+
+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+ } sHostFuncTab;
+
+ struct {
+ /*
+ Corresponding server side entries to handle guest PVZ calls
+ NOTE:
+ - Pvz function ui32OSID parameter
+ - OSID determination is responsibility of VM manager
+ - Actual OSID value must be supplied by VM manager
+ - This can be done either in client/VMM/host side
+ - Must be done before host pvz function(s) are called
+ - Host pvz function assumes valid OSID
+ */
+ PVRSRV_ERROR (*pfnCreateDevConfig)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsPBase);
+
+ PVRSRV_ERROR (*pfnDestroyDevConfig)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+ PVRSRV_ERROR (*pfnCreateDevPhysHeaps)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *peType,
+ IMG_UINT64 *pui64FwSize,
+ IMG_UINT64 *pui64FwPAddr,
+ IMG_UINT64 *pui64GpuSize,
+ IMG_UINT64 *pui64GpuPAddr);
+
+ PVRSRV_ERROR (*pfnDestroyDevPhysHeaps)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+ PVRSRV_ERROR (*pfnMapDevPhysHeap)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64PAddr);
+
+ PVRSRV_ERROR (*pfnUnmapDevPhysHeap)(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+ } sGuestFuncTab;
+
+ struct {
+ /*
+ This configuration interface specifies which driver host/guest is
+ responsible for allocating the physical memory backing the guest
+ driver(s) physical heap. Both the host and guest(s) must agree to
+ use the same policy. It must be implemented and should return
+ PVRSRV_OK.
+ */
+ PVRSRV_ERROR (*pfnGetDevPhysHeapOrigin)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+ /*
+ If the host is responsible for allocating the backing memory for
+ the physical heap, the function should return heap Addr/Size value
+ pairs obtained in sHostFuncTab->pfnCreateDevPhysHeaps().
+
+ If the guest is responsible for allocating the backing memory for
+ the physical heap, the function should return the proper values to
+ direct the guest driver on which allocation method to use. This is
+ communicated by using the returned pui64Addr/pui64Size value pairs
+ as show below:
+
+ For UMA platforms:
+ - For GPU physical heap
+ - 0/0 => UMA
+ - 0/0x[hex-value] => DMA
+ - 0x[hex-value]/0x[hex-value] => UMA/carve-out
+
+ - For FW physical heap
+ - 0/0x[hex-value] => DMA
+ - 0x[hex-value]/0x[hex-value] => UMA/carve-out
+
+ For LMA platforms:
+ - For GPU physical heap
+ - 0x/0x[hex-value] => LMA
+
+ - For FW physical heap
+ - 0x/0x[hex-value] => LMA
+ */
+ PVRSRV_ERROR (*pfnGetDevPhysHeapAddrSize)(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT64 *pui64Addr);
+ } sConfigFuncTab;
+
+ struct {
+ /*
+ This is used by the VM manager to report pertinent runtime guest VM
+ information to the host; these events may in turn be forwarded to
+ the firmware
+ */
+ PVRSRV_ERROR (*pfnOnVmOnline)(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+ PVRSRV_ERROR (*pfnOnVmOffline)(IMG_UINT32 ui32OSID);
+
+ PVRSRV_ERROR (*pfnVMMConfigure)(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+ } sVmmFuncTab;
+} VMM_PVZ_CONNECTION;
+
+/*!
+******************************************************************************
+ @Function VMMCreatePvzConnection() and VMMDestroyPvzConnection()
+
+ @Description Both the guest and VM manager call this in order to obtain
+ a PVZ connection to the VM and host respectively; that is,
+ guest calls it to obtain connection to VM, VM calls it to
+ obtain connection to host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection);
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VMM_IMPL_H_ */
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.c b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.c
new file mode 100644
index 00000000000000..89826b69c95466
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.c
@@ -0,0 +1,335 @@
+/*************************************************************************/ /*!
+@File vmm_pvz_client.c
+@Title VM manager client para-virtualization
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header provides VMM client para-virtualization APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+
+
+static inline void
+PvzClientLockAcquire(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzClientLockRelease(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+/*
+ * ===========================================================
+ * The following client para-virtualization (pvz) functions
+ * are exclusively called by guests to initiate a pvz call
+ * to the host via hypervisor (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID)
+{
+ IMG_UINT32 ui32IRQ;
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32RegsSize;
+ IMG_UINT64 ui64RegsCpuPBase;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICECONFIG;
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevConfig);
+
+ eError = psVmmPvz->sHostFuncTab.pfnCreateDevConfig(uiFuncID,
+ ui32DevID,
+ &ui32IRQ,
+ &ui32RegsSize,
+ &ui64RegsCpuPBase);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Device VM system configuration MMIO/IRQ values */
+ if (ui64RegsCpuPBase)
+ {
+ psDevConfig->sRegsCpuPBase.uiAddr = ui64RegsCpuPBase;
+ }
+
+ if (ui32RegsSize)
+ {
+ psDevConfig->ui32RegsSize = ui32RegsSize;
+ }
+
+ if (ui32IRQ)
+ {
+ psDevConfig->ui32IRQ = ui32IRQ;
+ }
+
+ PVR_ASSERT(psDevConfig->sRegsCpuPBase.uiAddr);
+ PVR_ASSERT(psDevConfig->ui32RegsSize);
+ PVR_ASSERT(psDevConfig->ui32IRQ);
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICECONFIG;
+
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevConfig);
+
+ eError = psVmmPvz->sHostFuncTab.pfnDestroyDevConfig(uiFuncID,
+ ui32DevID);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT32 ui32HeapType;
+ PHYS_HEAP_TYPE eHeapType;
+ IMG_UINT64 ui64FwPhysHeapSize;
+ IMG_UINT64 ui64FwPhysHeapAddr;
+ IMG_UINT64 ui64GpuPhysHeapSize;
+ IMG_UINT64 ui64GpuPhysHeapAddr;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS;
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps);
+
+ eError = psVmmPvz->sHostFuncTab.pfnCreateDevPhysHeaps(uiFuncID,
+ ui32DevID,
+ &ui32HeapType,
+ &ui64FwPhysHeapSize,
+ &ui64FwPhysHeapAddr,
+ &ui64GpuPhysHeapSize,
+ &ui64GpuPhysHeapAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ eHeapType = (PHYS_HEAP_TYPE) ui32HeapType;
+ for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+ {
+ IMG_UINT64 ui64PhysHeapSize;
+ IMG_DEV_PHYADDR sPhysHeapAddr;
+
+ switch (ePhysHeap)
+ {
+ case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+ sPhysHeapAddr.uiAddr = ui64GpuPhysHeapAddr;
+ ui64PhysHeapSize = ui64GpuPhysHeapSize;
+ break;
+
+ case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+ sPhysHeapAddr.uiAddr = ui64FwPhysHeapAddr;
+ ui64PhysHeapSize = ui64FwPhysHeapSize;
+ break;
+
+ default:
+ ui64PhysHeapSize = (IMG_UINT64)0;
+ break;
+ }
+
+ if (ui64PhysHeapSize)
+ {
+ eError = SysVzSetPhysHeapAddrSize(psDevConfig,
+ ePhysHeap,
+ eHeapType,
+ sPhysHeapAddr,
+ ui64PhysHeapSize);
+ PVR_ASSERT(eError == PVRSRV_OK);
+
+ eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+ PVR_ASSERT(eError == PVRSRV_OK);
+ }
+ }
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS;
+
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps);
+
+ eError = psVmmPvz->sHostFuncTab.pfnDestroyDevPhysHeaps(uiFuncID,
+ ui32DevID);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize)
+{
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_MAPDEVICEPHYSHEAP;
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap);
+
+ eError = psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap(uiFuncID,
+ ui32DevID,
+ ui64DevPSize,
+ sDevPAddr.uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ IMG_UINT32 uiFuncID = PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP;
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PvzClientLockAcquire();
+
+ PVR_ASSERT(psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap);
+
+ eError = psVmmPvz->sHostFuncTab.pfnUnmapDevPhysHeap(uiFuncID,
+ ui32DevID);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+e0:
+ PvzClientLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_client.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.h b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.h
new file mode 100644
index 00000000000000..374dafc069d689
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_client.h
@@ -0,0 +1,143 @@
+/*************************************************************************/ /*!
+@File vmm_pvz_client.h
+@Title Guest VM manager client para-virtualization routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header provides guest VMM client para-virtualization APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_CLIENT_H_
+#define _VMM_PVZ_CLIENT_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function PvzClientCreateDevConfig
+
+ @Description The guest front-end to initiate a pfnCreateDevConfig PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzClientDestroyDevConfig
+
+ @Description The guest front-end to initiate a pfnDestroyDevConfig PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzClientCreateDevPhysHeaps
+
+ @Description The guest front-end to initiate a pfnCreateDevPhysHeaps PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzClientDestroyDevPhysHeaps
+
+ @Description The guest front-end to initiate a pfnDestroyDevPhysHeaps PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzClientMapDevPhysHeap
+
+ @Description The guest front-end to initiate a pfnMapDevPhysHeap PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientMapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID,
+ IMG_DEV_PHYADDR sDevPAddr,
+ IMG_UINT64 ui64DevPSize);
+
+/*!
+******************************************************************************
+ @Function PvzClientUnmapDevPhysHeap
+
+ @Description The guest front-end to initiate a pfnUnmapDevPhysHeap PVZ
+ call to the host.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzClientUnmapDevPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ IMG_UINT32 ui32DevID);
+
+#endif /* _VMM_PVZ_CLIENT_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_client.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_common.h b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_common.h
new file mode 100644
index 00000000000000..b8d5a934aac90e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_common.h
@@ -0,0 +1,60 @@
+/*************************************************************************/ /*!
+@File vmm_pvz_common.h
+@Title Common VM manager function IDs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header provides VM manager para-virtualization function IDs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_COMMON_H_
+#define _VMM_PVZ_COMMON_H_
+
+#define PVZ_BRIDGE_DEFAULT 0UL
+#define PVZ_BRIDGE_CREATEDEVICECONFIG (PVZ_BRIDGE_DEFAULT + 1)
+#define PVZ_BRIDGE_DESTROYDEVICECONFIG (PVZ_BRIDGE_CREATEDEVICECONFIG + 1)
+#define PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS (PVZ_BRIDGE_DESTROYDEVICECONFIG + 1)
+#define PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS (PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS + 1)
+#define PVZ_BRIDGE_MAPDEVICEPHYSHEAP (PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS + 1)
+#define PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP (PVZ_BRIDGE_MAPDEVICEPHYSHEAP + 1)
+#define PVZ_BRIDGE_LAST (PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP + 1)
+
+#endif /* _VMM_PVZ_COMMON_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_common.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.c b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.c
new file mode 100644
index 00000000000000..a0a3481937659e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.c
@@ -0,0 +1,324 @@
+/*************************************************************************/ /*!
+@File vmm_pvz_server.c
+@Title VM manager server para-virtualization handlers
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header provides VMM server para-virtz handler APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+
+#include "vz_vm.h"
+#include "vmm_impl.h"
+#include "vz_vmm_pvz.h"
+#include "vz_support.h"
+#include "vmm_pvz_server.h"
+#include "vz_physheap.h"
+
+
+static inline void
+PvzServerLockAcquire(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ OSLockAcquire(psPVRSRVData->hPvzConnectionLock);
+}
+
+static inline void
+PvzServerLockRelease(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ OSLockRelease(psPVRSRVData->hPvzConnectionLock);
+}
+
+
+/*
+ * ===========================================================
+ * The following server para-virtualization (pvz) functions
+ * are exclusively called by the VM manager (hypervisor) on
+ * behalf of guests to complete guest pvz calls
+ * (guest -> vm manager -> host)
+ * ===========================================================
+ */
+
+PVRSRV_ERROR
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsCpuPBase)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICECONFIG);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzCreateDevConfig(ui32OSID,
+ ui32DevID,
+ pui32IRQ,
+ pui32RegsSize,
+ pui64RegsCpuPBase);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICECONFIG);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzDestroyDevConfig(ui32OSID, ui32DevID);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *peHeapType,
+ IMG_UINT64 *pui64FwSize,
+ IMG_UINT64 *pui64FwAddr,
+ IMG_UINT64 *pui64GpuSize,
+ IMG_UINT64 *pui64GpuAddr)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_CREATEDEVICEPHYSHEAPS);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzCreateDevPhysHeaps(ui32OSID,
+ ui32DevID,
+ peHeapType,
+ pui64FwSize,
+ pui64FwAddr,
+ pui64GpuSize,
+ pui64GpuAddr);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_DESTROYDEVICEPHYSHEAPS);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzDestroyDevPhysHeaps(ui32OSID, ui32DevID);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64PAddr)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ VMM_PVZ_CONNECTION *psVmmPvz = SysVzPvzConnectionAcquire();
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_MAPDEVICEPHYSHEAP);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psPVRSRVData->psDeviceNodeList->psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eOrigin);
+
+ if (eOrigin != PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ /* Reject hypercall if called with an incompatible PVZ physheap origin
+ configuration specified on host; here the guest has been configured
+ with guest-origin but host has not, both must use the same origin */
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Host PVZ config: Does not match with Guest PVZ config\n"
+ "=>: pfnGetDevPhysHeapOrigin() is not identical with guest\n"
+ "=>: host and guest(s) must use the same FW physheap origin",
+ __func__));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e0;
+ }
+
+ eError = SysVzPvzRegisterFwPhysHeap(ui32OSID,
+ ui32DevID,
+ ui64Size,
+ ui64PAddr);
+
+e0:
+ PvzServerLockRelease();
+ SysVzPvzConnectionRelease(psVmmPvz);
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_ASSERT(ui32FuncID == PVZ_BRIDGE_UNMAPDEVICEPHYSHEAP);
+
+ eError = SysVzIsVmOnline(ui32OSID);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzUnregisterFwPhysHeap(ui32OSID, ui32DevID);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+
+/*
+ * ============================================================
+ * The following server para-virtualization (pvz) functions
+ * are exclusively called by the VM manager (hypervisor) to
+ * pass side band information to the host (vm manager -> host)
+ * ============================================================
+ */
+
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError;
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzOnVmOnline(ui32OSID, ui32Priority);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID)
+{
+ PVRSRV_ERROR eError;
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzOnVmOffline(ui32OSID);
+
+ PvzServerLockRelease();
+
+ return eError;
+}
+
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+ PVRSRV_ERROR eError;
+
+ PvzServerLockAcquire();
+
+ eError = SysVzPvzVMMConfigure(eVMMParamType, ui32ParamValue);
+
+ PvzServerLockRelease();
+
+ return eError;
+
+}
+
+/******************************************************************************
+ End of file (vmm_pvz_server.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.h b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.h
new file mode 100644
index 00000000000000..397ca5d108bfcf
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_pvz_server.h
@@ -0,0 +1,205 @@
+/*************************************************************************/ /*!
+@File vmm_pvz_server.h
+@Title VM manager para-virtualization interface helper routines
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Header provides API(s) available to VM manager, this must be
+ called to close the loop during guest para-virtualization calls.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VMM_PVZ_SERVER_H_
+#define _VMM_PVZ_SERVER_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "vmm_pvz_common.h"
+
+
+/*!
+******************************************************************************
+ @Function PvzServerCreateDevConfig
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnCreateDevConfig.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerCreateDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function PvzServerDestroyDevConfig
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnDestroyDevConfig.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerDestroyDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzServerCreateDevPhysHeaps
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnCreateDevPhysHeaps.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pePHeapType,
+ IMG_UINT64 *pui64FwSize,
+ IMG_UINT64 *pui64FwAddr,
+ IMG_UINT64 *pui64GpuSize,
+ IMG_UINT64 *pui64GpuAddr);
+
+/*!
+******************************************************************************
+ @Function PvzServerDestroyDevPhysHeaps
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnDestroyDevPhysHeaps.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerDestroyDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzServerMapDevPhysHeap
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnMapDevPhysHeap.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerMapDevPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64PAddr);
+
+/*!
+******************************************************************************
+ @Function PvzServerUnmapDevPhysHeap
+
+ @Description The VM manager calls this in response to guest PVZ interface
+ call pfnUnmapDevPhysHeap.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerUnmapDevPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function PvzServerOnVmOnline
+
+ @Description The VM manager calls this when guest VM machine comes online.
+ The host driver might initialize the FW if it has not done so
+ already.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOnline(IMG_UINT32 ui32OSID, IMG_UINT32 ui32Priority);
+
+/*!
+******************************************************************************
+ @Function PvzServerOnVmOffline
+
+ @Description The VM manager calls this when a guest VM machine is about to
+ go offline. The VM manager might have unmapped the GPU kick
+ register for such VM but not the GPU memory until the call returns.
+ Once the function returns, the FW does not hold any reference
+ for such VM and no workloads from it are running in the GPU and
+ it is safe to remove the memory for such VM.
+
+ @Return PVRSRV_OK on success. PVRSRV_ERROR_TIMEOUT if
+ for some reason the FW is taking too long to
+ clean-up the resources of the OSID. Otherwise,
+ a PVRSRV_ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerOnVmOffline(IMG_UINT32 ui32OSID);
+
+/*!
+******************************************************************************
+ @Function PvzServerVMMConfigure
+
+ @Description The VM manager calls this to configure several parameters
+ like HCS or isolation.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR
+PvzServerVMMConfigure(VMM_CONF_PARAM eVMMParamType,
+ IMG_UINT32 ui32ParamValue);
+
+#endif /* _VMM_PVZ_SERVER_H_ */
+
+/*****************************************************************************
+ End of file (vmm_pvz_server.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vmm_type_stub.c b/drivers/gpu/drm/img-rogue/1.10/system/vmm_type_stub.c
new file mode 100644
index 00000000000000..8598a36eac959c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vmm_type_stub.c
@@ -0,0 +1,226 @@
+/*************************************************************************/ /*!
+@File vmm_type_stub.c
+@Title Stub VM manager type
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Sample stub (no-operation) VM manager implementation
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "rgxheapconfig.h"
+
+#include "vmm_impl.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+StubVMMCreateDevConfig(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsCpuPBase)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ PVR_UNREFERENCED_PARAMETER(pui32IRQ);
+ PVR_UNREFERENCED_PARAMETER(pui32RegsSize);
+ PVR_UNREFERENCED_PARAMETER(pui64RegsCpuPBase);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevConfig(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMCreateDevPhysHeaps(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *peType,
+ IMG_UINT64 *pui64FwPhysHeapSize,
+ IMG_UINT64 *pui64FwPhysHeapAddr,
+ IMG_UINT64 *pui64GpuPhysHeapSize,
+ IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ PVR_UNREFERENCED_PARAMETER(peType);
+ PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapSize);
+ PVR_UNREFERENCED_PARAMETER(pui64FwPhysHeapAddr);
+ PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapSize);
+ PVR_UNREFERENCED_PARAMETER(pui64GpuPhysHeapAddr);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMDestroyDevPhysHeaps(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMMapDevPhysHeap(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64Addr)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(ui64Addr);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMUnmapDevPhysHeap(IMG_UINT32 ui32FuncID,
+ IMG_UINT32 ui32DevID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32FuncID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+ *peOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST;
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(eHeapType);
+ return PVRSRV_OK;
+}
+
+static PVRSRV_ERROR
+StubVMMGetDevPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType,
+ IMG_UINT64 *pui64Size,
+ IMG_UINT64 *pui64Addr)
+{
+ *pui64Size = 0;
+ *pui64Addr = 0;
+ PVR_UNREFERENCED_PARAMETER(psDevConfig);
+ PVR_UNREFERENCED_PARAMETER(eHeapType);
+ return PVRSRV_OK;
+}
+
+static VMM_PVZ_CONNECTION gsStubVmmPvz =
+{
+ .sHostFuncTab = {
+ /* pfnCreateDevConfig */
+ &StubVMMCreateDevConfig,
+
+ /* pfnDestroyDevConfig */
+ &StubVMMDestroyDevConfig,
+
+ /* pfnCreateDevPhysHeaps */
+ &StubVMMCreateDevPhysHeaps,
+
+ /* pfnDestroyDevPhysHeaps */
+ &StubVMMDestroyDevPhysHeaps,
+
+ /* pfnMapDevPhysHeap */
+ &StubVMMMapDevPhysHeap,
+
+ /* pfnUnmapDevPhysHeap */
+ &StubVMMUnmapDevPhysHeap
+ },
+
+ .sGuestFuncTab = {
+ /* pfnCreateDevConfig */
+ &PvzServerCreateDevConfig,
+
+ /* pfnDestroyDevConfig */
+ &PvzServerDestroyDevConfig,
+
+ /* pfnCreateDevPhysHeaps */
+ &PvzServerCreateDevPhysHeaps,
+
+ /* pfnDestroyDevPhysHeaps */
+ &PvzServerDestroyDevPhysHeaps,
+
+ /* pfnMapDevPhysHeap */
+ &PvzServerMapDevPhysHeap,
+
+ /* pfnUnmapDevPhysHeap */
+ &PvzServerUnmapDevPhysHeap
+ },
+
+ .sConfigFuncTab = {
+ /* pfnGetDevPhysHeapOrigin */
+ &StubVMMGetDevPhysHeapOrigin,
+
+ /* pfnGetDevPhysHeapAddrSize */
+ &StubVMMGetDevPhysHeapAddrSize
+ },
+
+ .sVmmFuncTab = {
+ /* pfnOnVmOnline */
+ &PvzServerOnVmOnline,
+
+ /* pfnOnVmOffline */
+ &PvzServerOnVmOffline,
+
+ /* pfnVMMConfigure */
+ &PvzServerVMMConfigure
+ }
+};
+
+PVRSRV_ERROR VMMCreatePvzConnection(VMM_PVZ_CONNECTION **psPvzConnection)
+{
+ PVR_LOGR_IF_FALSE((NULL != psPvzConnection), "VMMCreatePvzConnection", PVRSRV_ERROR_INVALID_PARAMS);
+ *psPvzConnection = &gsStubVmmPvz;
+ PVR_DPF((PVR_DBG_ERROR, "Using a stub VM manager type, no runtime VZ support"));
+ return PVRSRV_ERROR_NOT_IMPLEMENTED;
+}
+
+void VMMDestroyPvzConnection(VMM_PVZ_CONNECTION *psPvzConnection)
+{
+ PVR_LOG_IF_FALSE((NULL != psPvzConnection), "VMMDestroyPvzConnection");
+}
+
+/******************************************************************************
+ End of file (vmm_type_stub.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap.h b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap.h
new file mode 100644
index 00000000000000..a3ac9fc85ba393
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap.h
@@ -0,0 +1,267 @@
+/*************************************************************************/ /*!
+@File vz_physheap.h
+@Title System virtualization physheap support APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides physheaps virtualization-specific APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_PHYSHEAP_H_
+#define _VZ_PHYSHEAP_H_
+
+#include "pvrsrv.h"
+
+typedef enum _PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_
+{
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST = 0,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST = 1,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST
+} PVRSRV_DEVICE_PHYS_HEAP_ORIGIN;
+
+/*!
+******************************************************************************
+ @Function SysVzGetPhysHeapAddrSize
+
+ @Description Get the address and size value of the specified device heap
+
+ @Return PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ PHYS_HEAP_TYPE eType,
+ IMG_DEV_PHYADDR *psAddr,
+ IMG_UINT64 *pui64Size);
+
+/*!
+******************************************************************************
+ @Function SysVzSetPhysHeapAddrSize
+
+ @Description Set physical heap configuration attributes
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ PHYS_HEAP_TYPE eType,
+ IMG_DEV_PHYADDR sAddr,
+ IMG_UINT64 ui64Size);
+
+/*!
+******************************************************************************
+ @Function SysVzRegisterPhysHeap
+
+ @Description Registers heap with virtualization services
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function SysVzDeregisterPhysHeap
+
+ @Description Deregister heap from virtualization services
+
+ @Return void
+ ******************************************************************************/
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+
+/*!
+******************************************************************************
+ @Function SysVzGetPhysHeapConfig
+
+ @Description Looks-up device physical heap configuration
+
+ @Return PHYS_HEAP_CONFIG * on success. Otherwise, NULL
+ ******************************************************************************/
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap);
+
+/*!
+******************************************************************************
+ @Function SysVzGetPhysHeapOrigin
+
+ @Description Identify which driver is responsible for allocating the
+ device physical heap backing-memory
+
+ @Return void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin);
+
+/*!
+******************************************************************************
+ @Function SysVzGetMemoryConfigPhysHeapType
+
+ @Description Get the platform memory configuration physical heap type
+
+ @Return PHYS_HEAP_TYPE
+ ******************************************************************************/
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void);
+
+/*!
+******************************************************************************
+ @Function SysVzInitDevPhysHeaps
+
+ @Description Initialize device physical heap
+
+ @Return void
+ ******************************************************************************/
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzDeInitDevPhysHeaps
+
+ @Description DeInitialize device physical heap
+
+ @Return void
+ ******************************************************************************/
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzCreateDevPhysHeaps
+
+ @Description Create device physical heaps
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzDestroyDevPhysHeaps
+
+ @Description Destroy device physical heaps
+
+ @Return void
+ ******************************************************************************/
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzRegisterFwPhysHeap
+
+ @Description Maps VM relative physically contiguous memory into the
+ firmware kernel memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzUnregisterFwPhysHeap
+
+ @Description Unmaps VM relative physically contiguous memory from the
+ firmware kernel memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzCreateDevPhysHeaps
+
+ @Description Create guest device physical heaps
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *peType,
+ IMG_UINT64 *pui64FwSize,
+ IMG_UINT64 *pui64FwAddr,
+ IMG_UINT64 *pui64GpuSize,
+ IMG_UINT64 *puiGpuAddr);
+
+/*!
+******************************************************************************
+ @Function SysVzDestroyDevPhysHeaps
+
+ @Description Destroy guest device physical heaps
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+/*!
+******************************************************************************
+ @Function SysVzRegisterFwPhysHeap
+
+ @Description Maps guest VM relative physically contiguous memory into
+ the firmware kernel memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64Addr);
+
+/*!
+******************************************************************************
+ @Function SysVzUnregisterFwPhysHeap
+
+ @Description Unmaps guest VM relative physically contiguous memory from
+ the firmware kernel memory context
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_PHYSHEAP_H_ */
+
+/*****************************************************************************
+ End of file (vz_physheap.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_common.c b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_common.c
new file mode 100644
index 00000000000000..3a01e6df538e52
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_common.c
@@ -0,0 +1,563 @@
+/*************************************************************************/ /*!
+@File vz_physheap_common.c
+@Title System virtualization common physheap configuration API(s)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System virtualization common physical heap configuration API(s)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_impl.h"
+
+PVRSRV_ERROR SysVzCreateDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeap, &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ eError = PvzClientCreateDevPhysHeaps(psDevConfig, 0);
+ eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+ PVR_LOGG_IF_ERROR(eError, "PvzClientCreateDevPhysHeaps", e0);
+ }
+
+e0:
+ return eError;
+}
+
+void SysVzDestroyDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeapType, &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ eError = PvzClientDestroyDevPhysHeaps(psDevConfig, 0);
+ eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+ PVR_LOGG_IF_ERROR(eError, "PvzClientDestroyDevPhysHeaps", e0);
+ }
+
+e0:
+ return;
+}
+
+PVRSRV_ERROR SysVzRegisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeap, &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+ IMG_DEV_PHYADDR sDevPAddr;
+ IMG_UINT64 ui64DevPSize;
+
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+ PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+ sDevPAddr.uiAddr = psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr;
+ PVR_LOGR_IF_FALSE((0 != sDevPAddr.uiAddr), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+ ui64DevPSize = psPhysHeapConfig->pasRegions[0].uiSize;
+ PVR_LOGR_IF_FALSE((0 != ui64DevPSize), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+ eError = PvzClientMapDevPhysHeap(psDevConfig, 0, sDevPAddr, ui64DevPSize);
+ PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+ }
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR SysVzUnregisterFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig, eHeapType, &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ eError = PvzClientUnmapDevPhysHeap(psDevConfig, 0);
+ PVR_LOGG_IF_ERROR(eError, "PvzClientMapDevPhysHeap", e0);
+ }
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR SysVzRegisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+ PVR_LOGR_IF_FALSE((eHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST), "Invalid Heap", PVRSRV_ERROR_INVALID_PARAMS);
+ PVR_LOGR_IF_FALSE((eHeap != PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL), "Skipping CPU local heap registration", PVRSRV_OK);
+
+ /* Currently we only support GPU/FW DMA physheap registration */
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeap);
+ PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "SysVzGetPhysHeapConfig", PVRSRV_ERROR_INVALID_PARAMS);
+
+ if (psPhysHeapConfig &&
+ psPhysHeapConfig->pasRegions &&
+ psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ DMA_ALLOC *psDmaAlloc;
+
+ if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+ {
+ /* DMA physheaps have quirks on some OS environments */
+ psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+ eError = SysDmaRegisterForIoRemapping(psDmaAlloc);
+ PVR_LOG_IF_ERROR(eError, "SysDmaRegisterForIoRemapping");
+ }
+ }
+
+ return eError;
+}
+
+void SysVzDeregisterPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+ if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_CPU_LOCAL ||
+ eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST)
+ {
+ return;
+ }
+
+ /* Currently we only support GPU/FW physheap deregistration */
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, eHeapType);
+ PVR_LOG_IF_FALSE((psPhysHeapConfig!=NULL), "SysVzGetPhysHeapConfig");
+
+ if (psPhysHeapConfig &&
+ psPhysHeapConfig->pasRegions &&
+ psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ DMA_ALLOC *psDmaAlloc;
+
+ if (psPhysHeapConfig->eType == PHYS_HEAP_TYPE_DMA)
+ {
+ psDmaAlloc = psPhysHeapConfig->pasRegions[0].hPrivData;
+ SysDmaDeregisterForIoRemapping(psDmaAlloc);
+ }
+ }
+
+}
+
+PHYS_HEAP_CONFIG *SysVzGetPhysHeapConfig(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType)
+{
+ IMG_UINT uiIdx;
+ IMG_UINT aui32PhysHeapID;
+ IMG_UINT32 ui32PhysHeapCount;
+ PHYS_HEAP_CONFIG *psPhysHeap;
+ PHYS_HEAP_CONFIG *ps1stPhysHeap = &psDevConfig->pasPhysHeaps[0];
+
+ if (eHeapType == PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL)
+ {
+ return ps1stPhysHeap;
+ }
+
+ /* Initialise here to catch lookup failures */
+ ui32PhysHeapCount = psDevConfig->ui32PhysHeapCount;
+ psPhysHeap = NULL;
+
+ if (eHeapType < PVRSRV_DEVICE_PHYS_HEAP_LAST)
+ {
+ /* Lookup ID of the physheap and get a pointer structure */
+ aui32PhysHeapID = psDevConfig->aui32PhysHeapID[eHeapType];
+ for (uiIdx = 1; uiIdx < ui32PhysHeapCount; uiIdx++)
+ {
+ if (ps1stPhysHeap[uiIdx].ui32PhysHeapID == aui32PhysHeapID)
+ {
+ psPhysHeap = &ps1stPhysHeap[uiIdx];
+ break;
+ }
+ }
+ }
+ PVR_LOG_IF_FALSE((psPhysHeap != NULL), "eHeapType >= PVRSRV_DEVICE_PHYS_HEAP_LAST");
+
+ return psPhysHeap;
+}
+
+PVRSRV_ERROR SysVzSetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+ PHYS_HEAP_TYPE eHeapType,
+ IMG_DEV_PHYADDR sPhysHeapAddr,
+ IMG_UINT64 ui64PhysHeapSize)
+{
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+ PVR_LOGR_IF_FALSE((psPhysHeapConfig != NULL), "Invalid PhysHeapConfig", eError);
+ PVR_LOGR_IF_FALSE((ui64PhysHeapSize != 0), "Invalid PhysHeapSize", eError);
+
+ if (eHeapType == PHYS_HEAP_TYPE_UMA || eHeapType == PHYS_HEAP_TYPE_LMA)
+ {
+ /* At this junction, we _may_ initialise new state */
+ PVR_ASSERT(sPhysHeapAddr.uiAddr && ui64PhysHeapSize);
+
+ if (psPhysHeapConfig->pasRegions == NULL)
+ {
+ psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+ if (psPhysHeapConfig->pasRegions == NULL)
+ {
+ return PVRSRV_ERROR_OUT_OF_MEMORY;
+ }
+
+ PVR_ASSERT(! psPhysHeapConfig->bDynAlloc);
+ psPhysHeapConfig->bDynAlloc = IMG_TRUE;
+ psPhysHeapConfig->ui32NumOfRegions++;
+ }
+
+ if (eHeapType == PHYS_HEAP_TYPE_UMA)
+ {
+ psPhysHeapConfig->pasRegions[0].sCardBase = sPhysHeapAddr;
+ }
+
+ psPhysHeapConfig->pasRegions[0].sStartAddr.uiAddr = sPhysHeapAddr.uiAddr;
+ psPhysHeapConfig->pasRegions[0].uiSize = ui64PhysHeapSize;
+ psPhysHeapConfig->eType = eHeapType;
+
+ eError = PVRSRV_OK;
+ }
+
+ PVR_LOG_IF_ERROR(eError, "SysVzSetPhysHeapAddrSize");
+ return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapAddrSize(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap,
+ PHYS_HEAP_TYPE eHeapType,
+ IMG_DEV_PHYADDR *psAddr,
+ IMG_UINT64 *pui64Size)
+{
+ IMG_UINT64 uiAddr;
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+
+ PVR_UNREFERENCED_PARAMETER(eHeapType);
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize);
+
+ eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+ ePhysHeap,
+ pui64Size,
+ &uiAddr);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: VMM/PVZ pfnGetDevPhysHeapAddrSize() must be implemented (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ }
+
+ goto e0;
+ }
+
+ psAddr->uiAddr = uiAddr;
+e0:
+ SysVzPvzConnectionRelease(psVmmPvz);
+ return eError;
+}
+
+PVRSRV_ERROR SysVzGetPhysHeapOrigin(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP eHeap,
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN *peOrigin)
+{
+ PVRSRV_ERROR eError;
+ VMM_PVZ_CONNECTION *psVmmPvz;
+
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ PVR_ASSERT(psVmmPvz);
+
+ PVR_ASSERT(psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin);
+
+ eError = psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psDevConfig,
+ eHeap,
+ peOrigin);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_NOT_IMPLEMENTED)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: VMM/PVZ pfnGetDevPhysHeapOrigin() must be implemented (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+ }
+
+ goto e0;
+ }
+
+e0:
+ SysVzPvzConnectionRelease(psVmmPvz);
+ return eError;
+}
+
+PVRSRV_ERROR SysVzPvzCreateDevPhysHeaps(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pePhysHeapType,
+ IMG_UINT64 *pui64FwPhysHeapSize,
+ IMG_UINT64 *pui64FwPhysHeapAddr,
+ IMG_UINT64 *pui64GpuPhysHeapSize,
+ IMG_UINT64 *pui64GpuPhysHeapAddr)
+{
+ IMG_UINT64 uiHeapSize;
+ IMG_DEV_PHYADDR sCardBase;
+ IMG_CPU_PHYADDR sStartAddr;
+ PHYS_HEAP_CONFIG *psPhysHeap;
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+ PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+ PVR_LOGR_IF_FALSE((ui32OSID > 0 && ui32OSID < RGXFW_NUM_OS), "Invalid OSID", eError);
+
+ /* For now, limit support to single device setups */
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ psDevConfig = psDeviceNode->psDevConfig;
+
+ /* Default is a kernel managed UMA
+ physheap memory configuration */
+ *pui64FwPhysHeapSize = (IMG_UINT64)0;
+ *pui64FwPhysHeapAddr = (IMG_UINT64)0;
+ *pui64GpuPhysHeapSize = (IMG_UINT64)0;
+ *pui64GpuPhysHeapAddr = (IMG_UINT64)0;
+
+ *pePhysHeapType = (IMG_UINT32) SysVzGetMemoryConfigPhysHeapType();
+ for (ePhysHeap = 0; ePhysHeap < PVRSRV_DEVICE_PHYS_HEAP_LAST; ePhysHeap++)
+ {
+ switch (ePhysHeap)
+ {
+ /* Only interested in these physheaps */
+ case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+ case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+ {
+ PVRSRV_ERROR eError;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig,
+ ePhysHeap,
+ &eHeapOrigin);
+ PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ continue;
+ }
+ }
+ break;
+
+ default:
+ continue;
+ }
+
+ /* Determine what type of physheap backs this phyconfig */
+ psPhysHeap = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+ if (psPhysHeap && psPhysHeap->pasRegions)
+ {
+ /* Services managed physheap (LMA/UMA-carve-out/DMA) */
+ sStartAddr = psPhysHeap->pasRegions[0].sStartAddr;
+ sCardBase = psPhysHeap->pasRegions[0].sCardBase;
+ uiHeapSize = psPhysHeap->pasRegions[0].uiSize;
+
+ if (! uiHeapSize)
+ {
+ /* UMA (i.e. non carve-out), don't re-base so skip */
+ PVR_ASSERT(!sStartAddr.uiAddr && !sCardBase.uiAddr);
+ continue;
+ }
+
+ /* Rebase this guest OSID physical heap */
+ sStartAddr.uiAddr += ui32OSID * uiHeapSize;
+ sCardBase.uiAddr += ui32OSID * uiHeapSize;
+
+ switch (ePhysHeap)
+ {
+ case PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL:
+ *pui64GpuPhysHeapSize = uiHeapSize;
+ *pui64GpuPhysHeapAddr = sStartAddr.uiAddr;
+ break;
+
+ case PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL:
+ *pui64FwPhysHeapSize = uiHeapSize;
+ *pui64FwPhysHeapAddr = sStartAddr.uiAddr;
+ break;
+
+ default:
+ PVR_ASSERT(0);
+ break;
+ }
+ }
+ else
+ {
+#if defined(DEBUG)
+ eError = SysVzGetPhysHeapOrigin(psDevConfig,
+ ePhysHeap,
+ &eHeapOrigin);
+ PVR_LOGR_IF_ERROR(eError, "SysVzGetPhysHeapOrigin");
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ PVR_ASSERT(ePhysHeap != PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL);
+ }
+#endif
+ }
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzDestroyDevPhysHeaps(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+ PVR_UNREFERENCED_PARAMETER(ui32OSID);
+ PVR_UNREFERENCED_PARAMETER(ui32DevID);
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzPvzRegisterFwPhysHeap(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT64 ui64Size,
+ IMG_UINT64 ui64PAddr)
+{
+ PVRSRV_DEVICE_NODE* psDeviceNode;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVRSRV_DEVICE_PHYS_HEAP eHeapType = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+ PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ psDevConfig = psDeviceNode->psDevConfig;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig,
+ eHeapType,
+ &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+#if defined(SUPPORT_RGX)
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ IMG_DEV_PHYADDR sDevPAddr = {ui64PAddr};
+ eError = RGXVzRegisterFirmwarePhysHeap(psDeviceNode,
+ ui32OSID,
+ sDevPAddr,
+ ui64Size);
+ PVR_LOGG_IF_ERROR(eError, "RGXVzRegisterFirmwarePhysHeap", e0);
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32OSID);
+ PVR_UNREFERENCED_PARAMETER(ui64Size);
+ PVR_UNREFERENCED_PARAMETER(ui64PAddr);
+#endif
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR SysVzPvzUnregisterFwPhysHeap(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_ERROR eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ PVR_LOGR_IF_FALSE((ui32DevID == 0), "Invalid Device ID", eError);
+ PVR_LOGR_IF_FALSE((psPVRSRVData != NULL), "Invalid PVRSRVData", eError);
+
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ psDevConfig = psDeviceNode->psDevConfig;
+
+ eError = SysVzGetPhysHeapOrigin(psDevConfig,
+ eHeap,
+ &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+#if defined(SUPPORT_RGX)
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST)
+ {
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ eError = RGXVzUnregisterFirmwarePhysHeap(psDeviceNode, ui32OSID);
+ PVR_LOG_IF_ERROR(eError, "RGXVzUnregisterFirmwarePhysHeap");
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32OSID);
+#endif
+
+e0:
+ return eError;
+}
+
+/******************************************************************************
+ End of file (vz_physheap_common.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_generic.c b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_generic.c
new file mode 100644
index 00000000000000..d65397d6acdb6b
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_physheap_generic.c
@@ -0,0 +1,410 @@
+/*************************************************************************/ /*!
+@File vz_physheap_generic.c
+@Title System virtualization physheap configuration
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System virtualization physical heap configuration
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv_device.h"
+#include "rgxfwutils.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+
+#if defined(CONFIG_L4)
+static IMG_HANDLE gahPhysHeapIoRemap[PVRSRV_DEVICE_PHYS_HEAP_LAST];
+#endif
+
+static PVRSRV_ERROR
+SysVzCreateDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+ PVRSRV_ERROR eError;
+ DMA_ALLOC *psDmaAlloc;
+ PHYS_HEAP_REGION *psPhysHeapRegion;
+
+ psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+ PVR_LOGR_IF_FALSE((NULL != psPhysHeapRegion->hPrivData), "DMA physheap already created", PVRSRV_ERROR_INVALID_PARAMS);
+
+ psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+ psDmaAlloc->ui64Size = psPhysHeapRegion->uiSize;
+
+ eError = SysDmaAllocMem(psDmaAlloc);
+ if (eError != PVRSRV_OK)
+ {
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+ }
+ else
+ {
+ psPhysHeapRegion->sStartAddr.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+ psPhysHeapRegion->sCardBase.uiAddr = psDmaAlloc->sBusAddr.uiAddr;
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+ }
+
+ return eError;
+}
+
+static void
+SysVzDestroyDmaPhysHeap(PHYS_HEAP_CONFIG *psPhysHeapConfig)
+{
+ DMA_ALLOC *psDmaAlloc;
+ PHYS_HEAP_REGION *psPhysHeapRegion;
+
+ psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+ psDmaAlloc = (DMA_ALLOC*)psPhysHeapRegion->hPrivData;
+
+ if (psDmaAlloc != NULL)
+ {
+ PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address");
+ PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address");
+ PVR_LOG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size");
+
+ SysDmaFreeMem(psDmaAlloc);
+
+ psPhysHeapRegion->sCardBase.uiAddr = 0;
+ psPhysHeapRegion->sStartAddr.uiAddr = 0;
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+ }
+}
+
+static PVRSRV_ERROR
+SysVzCreatePhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+ IMG_DEV_PHYADDR sHeapAddr;
+ IMG_UINT64 ui64HeapSize = 0;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PHYS_HEAP_REGION *psPhysHeapRegion;
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eHeapOrigin;
+
+ /* Lookup GPU/FW physical heap config, allocate primary region */
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+ PVR_LOGR_IF_FALSE((NULL != psPhysHeapConfig), "Invalid physheap config", PVRSRV_ERROR_INVALID_PARAMS);
+
+ if (psPhysHeapConfig->pasRegions == NULL)
+ {
+ psPhysHeapConfig->pasRegions = OSAllocZMem(sizeof(PHYS_HEAP_REGION));
+ PVR_LOGG_IF_NOMEM(psPhysHeapConfig->pasRegions, "OSAllocZMem", eError, e0);
+
+ PVR_ASSERT(! psPhysHeapConfig->bDynAlloc);
+ psPhysHeapConfig->bDynAlloc = IMG_TRUE;
+ psPhysHeapConfig->ui32NumOfRegions++;
+ }
+
+ if (psPhysHeapConfig->pasRegions[0].hPrivData == NULL)
+ {
+ DMA_ALLOC *psDmaAlloc = OSAllocZMem(sizeof(DMA_ALLOC));
+ PVR_LOGG_IF_NOMEM(psDmaAlloc, "OSAllocZMem", eError, e0);
+
+ psDmaAlloc->pvOSDevice = psDevConfig->pvOSDevice;
+ psPhysHeapConfig->pasRegions[0].hPrivData = psDmaAlloc;
+ }
+
+ /* Lookup physheap addr/size from VM manager type */
+ eError = SysVzGetPhysHeapAddrSize(psDevConfig,
+ ePhysHeap,
+ PHYS_HEAP_TYPE_UMA,
+ &sHeapAddr,
+ &ui64HeapSize);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapAddrSize", e0);
+
+ /* Initialise physical heap and region state */
+ psPhysHeapRegion = &psPhysHeapConfig->pasRegions[0];
+ psPhysHeapRegion->sStartAddr.uiAddr = sHeapAddr.uiAddr;
+ psPhysHeapRegion->sCardBase.uiAddr = sHeapAddr.uiAddr;
+ psPhysHeapRegion->uiSize = ui64HeapSize;
+
+ if (ePhysHeap == PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)
+ {
+ /* Firmware physheaps require additional init */
+ psPhysHeapConfig->pszPDumpMemspaceName = "SYSMEM";
+ psPhysHeapConfig->psMemFuncs =
+ psDevConfig->pasPhysHeaps[0].psMemFuncs;
+ }
+
+ /* Which driver is responsible for allocating the
+ physical memory backing the device physheap */
+ eError = SysVzGetPhysHeapOrigin(psDevConfig,
+ ePhysHeap,
+ &eHeapOrigin);
+ PVR_LOGG_IF_ERROR(eError, "SysVzGetPhysHeapOrigin", e0);
+
+ if (psPhysHeapRegion->sStartAddr.uiAddr == 0)
+ {
+ if (psPhysHeapRegion->uiSize)
+ {
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ /* Scale DMA size by the number of OSIDs */
+ psPhysHeapRegion->uiSize *= RGXFW_NUM_OS;
+ }
+
+ eError = SysVzCreateDmaPhysHeap(psPhysHeapConfig);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "SysVzCreateDmaPhysHeap", e0);
+ }
+
+ /* Verify the validity of DMA physheap region */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid DMA physheap start address", e0);
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid DMA physheap card address", e0);
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid DMA physheap size", e0);
+ eError = PVRSRV_OK;
+
+ /* Services managed DMA physheap setup complete */
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_DMA;
+
+ /* Only the PHYS_HEAP_TYPE_DMA should be registered */
+ eError = SysVzRegisterPhysHeap(psDevConfig, ePhysHeap);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_LOGG_IF_ERROR(eError, "SysVzRegisterPhysHeap", e0);
+ }
+
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ /* Restore original physheap size */
+ psPhysHeapRegion->uiSize /= RGXFW_NUM_OS;
+ }
+ }
+ else
+ {
+ if (psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+ psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+ }
+
+ if (psPhysHeapConfig->bDynAlloc)
+ {
+ OSFreeMem(psPhysHeapConfig->pasRegions);
+ psPhysHeapConfig->pasRegions = NULL;
+ psPhysHeapConfig->ui32NumOfRegions--;
+ psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+ PVR_LOGG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount", e0);
+ }
+
+ if (ePhysHeap == PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL)
+ {
+ /* Using UMA physheaps for FW has pre-conditions, verify */
+ if (eHeapOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: Invalid firmware physheap config\n"
+ "=>: HOST origin (i.e. static) VZ setups require non-UMA FW physheaps spec.",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_HOST) ? "Host" : "Guest"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ }
+ }
+
+ /* Kernel managed UMA physheap setup complete */
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+ }
+ }
+ else
+ {
+ /* Verify the validity of the UMA carve-out physheap region */
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sStartAddr.uiAddr), "Invalid UMA carve-out physheap start address", e0);
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->sCardBase.uiAddr), "Invalid UMA carve-out physheap card address", e0);
+ PVR_LOGG_IF_FALSE((0 != psPhysHeapRegion->uiSize), "Invalid UMA carve-out physheap size", e0);
+ eError = PVRSRV_OK;
+
+ if (psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ /* Need regions but don't require the DMA priv. data */
+ OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+ psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+ }
+
+#if defined(CONFIG_L4)
+ {
+ IMG_UINT64 ui64Offset;
+ IMG_UINT64 ui64BaseAddr;
+ IMG_CPU_VIRTADDR pvCpuVAddr;
+
+ /* On Fiasco.OC/l4linux, ioremap physheap now */
+ gahPhysHeapIoRemap[ePhysHeap] =
+ OSMapPhysToLin(psPhysHeapRegion->sStartAddr,
+ psPhysHeapRegion->uiSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+ PVR_LOGG_IF_FALSE((NULL != gahPhysHeapIoRemap[ePhysHeap]), "OSMapPhysToLin", e0);
+ }
+#endif
+
+ /* Services managed UMA carve-out physheap setup complete */
+ psPhysHeapConfig->eType = PHYS_HEAP_TYPE_UMA;
+ }
+
+ return eError;
+
+e0:
+ if (psPhysHeapConfig->pasRegions)
+ {
+ SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+ if (psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+ psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+ }
+
+ if (psPhysHeapConfig->bDynAlloc)
+ {
+ OSFreeMem(psPhysHeapConfig->pasRegions);
+ psPhysHeapConfig->pasRegions = NULL;
+ psPhysHeapConfig->ui32NumOfRegions--;
+ psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+ PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+ }
+ }
+
+ return eError;
+}
+
+static void
+SysVzDestroyPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP ePhysHeap)
+{
+ PHYS_HEAP_CONFIG *psPhysHeapConfig;
+
+ SysVzDeregisterPhysHeap(psDevConfig, ePhysHeap);
+
+ psPhysHeapConfig = SysVzGetPhysHeapConfig(psDevConfig, ePhysHeap);
+ if (psPhysHeapConfig == NULL ||
+ psPhysHeapConfig->pasRegions == NULL)
+ {
+ return;
+ }
+
+#if defined(CONFIG_L4)
+ if (gahPhysHeapIoRemap[ePhysHeap] != NULL)
+ {
+ OSUnMapPhysToLin(gahPhysHeapIoRemap[ePhysHeap],
+ psPhysHeapConfig->pasRegions[0].uiSize,
+ PVRSRV_MEMALLOCFLAG_CPU_UNCACHED);
+ }
+
+ gahPhysHeapIoRemap[ePhysHeap] = NULL;
+#endif
+
+ if (psPhysHeapConfig->pasRegions[0].hPrivData)
+ {
+ SysVzDestroyDmaPhysHeap(psPhysHeapConfig);
+ OSFreeMem(psPhysHeapConfig->pasRegions[0].hPrivData);
+ psPhysHeapConfig->pasRegions[0].hPrivData = NULL;
+ }
+
+ if (psPhysHeapConfig->bDynAlloc)
+ {
+ OSFreeMem(psPhysHeapConfig->pasRegions);
+ psPhysHeapConfig->pasRegions = NULL;
+ psPhysHeapConfig->ui32NumOfRegions--;
+ psPhysHeapConfig->bDynAlloc = IMG_FALSE;
+ PVR_LOG_IF_FALSE((psPhysHeapConfig->ui32NumOfRegions == 0), "Invalid refcount");
+ }
+}
+
+static PVRSRV_ERROR
+SysVzCreateGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+ return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyGpuPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL;
+ SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+static PVRSRV_ERROR
+SysVzCreateFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ return SysVzCreatePhysHeap(psDevConfig, eHeap);
+}
+
+static void
+SysVzDestroyFwPhysHeap(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_DEVICE_PHYS_HEAP eHeap = PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL;
+ SysVzDestroyPhysHeap(psDevConfig, eHeap);
+}
+
+PHYS_HEAP_TYPE SysVzGetMemoryConfigPhysHeapType(void)
+{
+ return PHYS_HEAP_TYPE_UMA;
+}
+
+PVRSRV_ERROR SysVzInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+
+ eError = SysVzCreateFwPhysHeap(psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ eError = SysVzCreateGpuPhysHeap(psDevConfig);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ return eError;
+}
+
+void SysVzDeInitDevPhysHeaps(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ SysVzDestroyGpuPhysHeap(psDevConfig);
+ SysVzDestroyFwPhysHeap(psDevConfig);
+}
+
+/******************************************************************************
+ End of file (vz_physheap_generic.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_support.c b/drivers/gpu/drm/img-rogue/1.10/system/vz_support.c
new file mode 100644
index 00000000000000..08e2e80bb08476
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_support.c
@@ -0,0 +1,342 @@
+/*************************************************************************/ /*!
+@File vz_support.c
+@Title System virtualization configuration setup
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System virtualization configuration support API(s)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "allocmem.h"
+#include "physheap.h"
+#include "rgxdevice.h"
+#include "pvrsrv.h"
+#include "pvrsrv_device.h"
+
+#include "dma_support.h"
+#include "vz_support.h"
+#include "vz_vmm_pvz.h"
+#include "vz_physheap.h"
+#include "vmm_pvz_client.h"
+#include "vmm_pvz_server.h"
+
+static PVRSRV_ERROR
+SysVzPvzConnectionValidate(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ VMM_PVZ_CONNECTION *psVmmPvz;
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DEVICE_PHYS_HEAP_ORIGIN eOrigin = PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST;
+ IMG_UINT64 ui64Size = 0, ui64Addr = 0;
+
+ /*
+ * Acquire the underlying VM manager PVZ connection & validate it.
+ */
+ psVmmPvz = SysVzPvzConnectionAcquire();
+ if (psVmmPvz == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: Unable to acquire PVZ connection",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e0;
+ }
+ else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: pfnGetDevPhysHeapOrigin cannot be NULL",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: pfnGetDevPhysHeapAddrSize cannot be NULL",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_GPU_LOCAL,
+ &ui64Size,
+ &ui64Addr) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: pfnGetDevPhysHeapAddrSize(GPU) must return PVRSRV_OK",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapAddrSize(psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &ui64Size,
+ &ui64Addr) != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: pfnGetDevPhysHeapAddrSize(FW) must return PVRSRV_OK",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (PVRSRV_OK !=
+ psVmmPvz->sConfigFuncTab.pfnGetDevPhysHeapOrigin(psDevConfig,
+ PVRSRV_DEVICE_PHYS_HEAP_FW_LOCAL,
+ &eOrigin))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: Invalid config. function table setup\n"
+ "=>: pfnGetDevPhysHeapOrigin() must return PVRSRV_OK",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_LAST)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: Invalid config. function table setup\n"
+ "=>: pfnGetDevPhysHeapOrigin() returned an invalid physheap origin",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) &&
+ eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_GUEST &&
+ psVmmPvz->sHostFuncTab.pfnMapDevPhysHeap == NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Guest PVZ config: Invalid config. function table setup\n"
+ "=>: implement pfnMapDevPhysHeap() when using GUEST physheap origin",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (!PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) &&
+ (psVmmPvz->sGuestFuncTab.pfnCreateDevConfig == NULL ||
+ psVmmPvz->sGuestFuncTab.pfnDestroyDevConfig == NULL ||
+ psVmmPvz->sGuestFuncTab.pfnCreateDevPhysHeaps == NULL ||
+ psVmmPvz->sGuestFuncTab.pfnDestroyDevPhysHeaps == NULL ||
+ psVmmPvz->sGuestFuncTab.pfnMapDevPhysHeap == NULL ||
+ psVmmPvz->sGuestFuncTab.pfnUnmapDevPhysHeap == NULL))
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Host PVZ config: Invalid guest function table setup\n",
+ __FUNCTION__));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+ else if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST &&
+ ui64Size == 0 &&
+ ui64Addr == 0)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: %s PVZ config: Invalid pfnGetDevPhysHeapAddrSize(FW) physheap config.\n"
+ "=>: HEAP_ORIGIN_HOST is not compatible with FW UMA allocator",
+ __FUNCTION__,
+ PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST) ? "Guest" : "Host"));
+ eError = PVRSRV_ERROR_INVALID_PVZ_CONFIG;
+ goto e1;
+ }
+
+ /* Log which PVZ setup type is being used by driver */
+ if (eOrigin == PVRSRV_DEVICE_PHYS_HEAP_ORIGIN_HOST)
+ {
+ /*
+ * Static PVZ bootstrap setup
+ *
+ * This setup uses host-origin, has no hypercall mechanism & does not support any
+ * out-of-order initialisation of host/guest VMs/drivers. The host driver has all
+ * the information needed to initialize all OSIDs firmware state when it's loaded
+ * and its PVZ layer must mark all guest OSIDs as being online as part of its PVZ
+ * initialisation. Having no out-of-order initialisation support, the guest driver
+ * can only submit a workload to the device after the host driver has completely
+ * initialized the firmware, the VZ hypervisor/VM setup must guarantee this.
+ */
+ PVR_LOG(("Using static PVZ bootstrap setup"));
+ }
+ else
+ {
+ /*
+ * Dynamic PVZ bootstrap setup
+ *
+ * This setup uses guest-origin, has PVZ hypercall mechanism & supports out-of-order
+ * initialisation of host/guest VMs/drivers. The host driver initializes only its
+ * own OSID-0 firmware state when its loaded and each guest driver will use its PVZ
+ * interface to hypercall to the host driver to both synchronise its initialisation
+ * so it does not submit any workload to the firmware before the host driver has
+ * had a chance to initialize the firmware and to also initialize its own OSID-x
+ * firmware state.
+ */
+ PVR_LOG(("Using dynamic PVZ bootstrap setup"));
+ }
+
+e1:
+ SysVzPvzConnectionRelease(psVmmPvz);
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+ RGX_DATA* psDevData = psDevConfig->hDevData;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+ /* Initialise pvz connection */
+ eError = SysVzPvzConnectionInit();
+ PVR_LOGR_IF_ERROR(eError, "SysVzPvzConnectionInit");
+
+ /* Ensure pvz connection is configured correctly */
+ eError = SysVzPvzConnectionValidate(psDevConfig);
+ PVR_LOGR_IF_ERROR(eError, "SysVzPvzConnectionValidate");
+
+ psPVRSRVData->abVmOnline[0] = IMG_TRUE;
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ /* Undo any functionality not supported in guest drivers */
+ psDevData->psRGXTimingInfo->bEnableRDPowIsland = IMG_FALSE;
+ psDevData->psRGXTimingInfo->bEnableActivePM = IMG_FALSE;
+ psDevConfig->pfnPrePowerState = NULL;
+ psDevConfig->pfnPostPowerState = NULL;
+
+ /* Perform additional guest-specific device
+ configuration initialisation */
+ eError = SysVzCreateDevConfig(psDevConfig);
+ PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevConfig");
+
+ eError = SysVzCreateDevPhysHeaps(psDevConfig);
+ PVR_LOGR_IF_ERROR(eError, "SysVzCreateDevPhysHeaps");
+ }
+
+ /* Perform general device physheap initialisation */
+ eError = SysVzInitDevPhysHeaps(psDevConfig);
+ PVR_LOGR_IF_ERROR(eError, "SysVzInitDevPhysHeaps");
+
+ return eError;
+}
+
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_VZ_RET_IF_MODE(DRIVER_MODE_NATIVE, PVRSRV_OK);
+
+ SysVzDeInitDevPhysHeaps(psDevConfig);
+ if (PVRSRV_VZ_MODE_IS(DRIVER_MODE_GUEST))
+ {
+ SysVzDestroyDevPhysHeaps(psDevConfig);
+ SysVzDestroyDevConfig(psDevConfig);
+ }
+
+ SysVzPvzConnectionDeInit();
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PvzClientCreateDevConfig(psDevConfig, 0);
+ eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+ PVR_LOG_IF_ERROR(eError, "PvzClientCreateDevConfig");
+
+ return eError;
+}
+
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig)
+{
+ PVRSRV_ERROR eError;
+
+ eError = PvzClientDestroyDevConfig(psDevConfig, 0);
+ eError = (eError == PVRSRV_ERROR_NOT_IMPLEMENTED) ? PVRSRV_OK : eError;
+ PVR_LOG_IF_ERROR(eError, "SysVzDestroyDevConfig");
+
+ return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsCpuPBase)
+{
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_DEVICE_CONFIG *psDevConfig;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (ui32OSID == 0 ||
+ ui32DevID != 0 ||
+ psPVRSRVData == NULL ||
+ ui32OSID >= RGXFW_NUM_OS)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ /* For now, limit support to single device setups */
+ psDevNode = psPVRSRVData->psDeviceNodeList;
+ psDevConfig = psDevNode->psDevConfig;
+
+ /* Copy across guest VM device config information, here
+ we assume this is the same across VMs and host */
+ *pui64RegsCpuPBase = psDevConfig->sRegsCpuPBase.uiAddr;
+ *pui32RegsSize = psDevConfig->ui32RegsSize;
+ *pui32IRQ = psDevConfig->ui32IRQ;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID)
+{
+ if (ui32OSID == 0 ||
+ ui32DevID != 0 ||
+ ui32OSID >= RGXFW_NUM_OS)
+ {
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return PVRSRV_OK;
+}
+
+/******************************************************************************
+ End of file (vz_support.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_support.h b/drivers/gpu/drm/img-rogue/1.10/system/vz_support.h
new file mode 100644
index 00000000000000..d0d526b2b55dea
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_support.h
@@ -0,0 +1,126 @@
+/*************************************************************************/ /*!
+@File vz_support.h
+@Title System virtualization support API(s)
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides the system virtualization API(s)
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_H_
+#define _VZ_SUPPORT_H_
+
+#include "osfunc.h"
+#include "pvrsrv.h"
+
+/*!
+******************************************************************************
+ @Function SysVzDevInit
+
+ @Description Entry into system virtualization per device configuration
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzDevDeInit
+
+ @Description Exit from system virtualization per device configuration
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDevDeInit(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzCreateDevConfig
+
+ @Description Guest para-virtualization initialization per device
+ configuration.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzCreateDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzDestroyDevConfig
+
+ @Description Guest para-virtualization deinitialization per device
+ configuration.
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzDestroyDevConfig(PVRSRV_DEVICE_CONFIG *psDevConfig);
+
+/*!
+******************************************************************************
+ @Function SysVzCreateDevConfig
+
+ @Description Server para-virtz handler for client SysVzCreateDevConfig
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzCreateDevConfig(IMG_UINT32 ui32OSID,
+ IMG_UINT32 ui32DevID,
+ IMG_UINT32 *pui32IRQ,
+ IMG_UINT32 *pui32RegsSize,
+ IMG_UINT64 *pui64RegsPAddr);
+
+/*!
+******************************************************************************
+ @Function SysVzDestroyDevConfig
+
+ @Description Server para-virtz handler for client SysVzDestroyDevConfig
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ ERROR code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzDestroyDevConfig(IMG_UINT32 ui32OSID, IMG_UINT32 ui32DevID);
+
+#endif /* _VZ_SUPPORT_H_ */
+
+/*****************************************************************************
+ End of file (vz_support.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_vm.h b/drivers/gpu/drm/img-rogue/1.10/system/vz_vm.h
new file mode 100644
index 00000000000000..f74eb6aac4fd2e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_vm.h
@@ -0,0 +1,61 @@
+/*************************************************************************/ /*!
+@File vz_vm.h
+@Title System virtualization VM support APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides VM management support APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_SUPPORT_VM_H_
+#define _VZ_SUPPORT_VM_H_
+
+#include "pvrsrv.h"
+
+PVRSRV_ERROR SysVzIsVmOnline(IMG_UINT32 ui32OSID);
+
+PVRSRV_ERROR SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority);
+
+PVRSRV_ERROR SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid);
+
+PVRSRV_ERROR SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue);
+
+#endif /* _VZ_SUPPORT_VM_H_ */
+
+/*****************************************************************************
+ End of file (vz_vm.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.c b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.c
new file mode 100644
index 00000000000000..f3ed39c507b7c0
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.c
@@ -0,0 +1,114 @@
+/*************************************************************************/ /*!
+@File vz_vmm_pvz.c
+@Title VM manager para-virtualization APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description VM manager para-virtualization management
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "allocmem.h"
+#include "pvrsrv.h"
+#include "vz_vmm_pvz.h"
+
+PVRSRV_ERROR SysVzPvzConnectionInit(void)
+{
+ PVRSRV_ERROR eError;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ /* Create para-virtualization connection lock */
+ eError = OSLockCreate(&psPVRSRVData->hPvzConnectionLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSLockCreate failed (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto e0;
+ }
+
+ /* Create VM manager para-virtualization connection */
+ eError = VMMCreatePvzConnection((VMM_PVZ_CONNECTION **)&psPVRSRVData->hPvzConnection);
+ if (eError != PVRSRV_OK)
+ {
+ OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+ psPVRSRVData->hPvzConnectionLock = NULL;
+
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: Unable to create PVZ connection (%s)",
+ __FUNCTION__,
+ PVRSRVGetErrorStringKM(eError)));
+
+ goto e0;
+ }
+
+e0:
+ return eError;
+}
+
+void SysVzPvzConnectionDeInit(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ VMMDestroyPvzConnection(psPVRSRVData->hPvzConnection);
+ psPVRSRVData->hPvzConnection = NULL;
+
+ OSLockDestroy(psPVRSRVData->hPvzConnectionLock);
+ psPVRSRVData->hPvzConnectionLock = NULL;
+}
+
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVR_ASSERT(psPVRSRVData->hPvzConnection != NULL);
+ return psPVRSRVData->hPvzConnection;
+}
+
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psParaVz)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ /* Nothing to do, sanity check the pointer passed back */
+ PVR_ASSERT(psParaVz == psPVRSRVData->hPvzConnection);
+}
+
+/******************************************************************************
+ End of file (vz_vmm_pvz.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.h b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.h
new file mode 100644
index 00000000000000..99d2a143def166
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_pvz.h
@@ -0,0 +1,85 @@
+/*************************************************************************/ /*!
+@File vz_vmm_pvz.h
+@Title System virtualization VM manager management APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description This header provides VM manager para-virtz management APIs
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef _VZ_VMM_PVZ_H_
+#define _VZ_VMM_PVZ_H_
+
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "vmm_impl.h"
+
+/*!
+******************************************************************************
+ @Function SysVzPvzConnectionInit() and SysVzPvzConnectionDeInit()
+
+ @Description SysVzPvzConnectionInit initializes the VM manager para-virtz
+ which is used subsequently for communication between guest and
+ host; depending on the underlying VM setup, this could either be
+ either a hyper-call or cross-VM call
+
+ @Return PVRSRV_ERROR PVRSRV_OK on success. Otherwise, a PVRSRV_
+ error code
+ ******************************************************************************/
+PVRSRV_ERROR SysVzPvzConnectionInit(void);
+void SysVzPvzConnectionDeInit(void);
+
+/*!
+******************************************************************************
+ @Function SysVzPvzConnectionAcquire() and SysVzPvzConnectionRelease()
+
+ @Description These are to acquire/release a handle to the VM manager para-virtz
+ connection to make a pvz call; on the client, use it to make the
+ actual pvz call and on the sever handler / VM manager, use it
+ to complete the processing for the pvz call or make a VM manager
+ to host pvzbridge call
+
+ @Return PVRSRV_ERROR VMM_PVZ_CONNECTION* on success. Otherwise NULL
+ ******************************************************************************/
+VMM_PVZ_CONNECTION* SysVzPvzConnectionAcquire(void);
+void SysVzPvzConnectionRelease(VMM_PVZ_CONNECTION *psPvzConnection);
+
+#endif /* _VZ_VMM_PVZ_H_ */
+
+/*****************************************************************************
+ End of file (vz_vmm_pvz.h)
+*****************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_vm.c b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_vm.c
new file mode 100644
index 00000000000000..ca52df4f8cdf31
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/system/vz_vmm_vm.c
@@ -0,0 +1,243 @@
+/*************************************************************************/ /*!
+@File vz_vmm_vm.c
+@Title System virtualization VM support APIs
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description System virtualization VM support functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#include "osfunc.h"
+#include "pvrsrv.h"
+#include "img_types.h"
+#include "pvrsrv.h"
+#include "pvrsrv_error.h"
+#include "vz_vm.h"
+#include "rgxfwutils.h"
+
+PVRSRV_ERROR
+SysVzIsVmOnline(IMG_UINT32 ui32OSID)
+{
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+
+ if (ui32OSID == 0 || ui32OSID >= RGXFW_NUM_OS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: invalid OSID (%d)",
+ __FUNCTION__, ui32OSID));
+
+ return PVRSRV_ERROR_INVALID_PVZ_OSID;
+ }
+
+ if (!psPVRSRVData->abVmOnline[ui32OSID])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSID %d is already disabled.",
+ __FUNCTION__, ui32OSID));
+
+ return PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE;
+ }
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOnline(IMG_UINT32 ui32OSid, IMG_UINT32 ui32Priority)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: invalid OSID (%d)",
+ __FUNCTION__, ui32OSid));
+
+ return PVRSRV_ERROR_INVALID_PVZ_OSID;
+ }
+
+ if (psPVRSRVData->abVmOnline[ui32OSid])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSID %d is already enabled.",
+ __FUNCTION__, ui32OSid));
+ return PVRSRV_ERROR_PVZ_OSID_IS_ONLINE;
+ }
+
+ /* For now, limit support to single device setups */
+ psDevNode = psPVRSRVData->psDeviceNodeList;
+ psDevInfo = psDevNode->pvDevice;
+
+ if (psDevNode->eDevState == PVRSRV_DEVICE_STATE_INIT)
+ {
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSAcquireBridgeLock();
+#endif
+
+ /* Firmware not initialized yet, do it here */
+ eError = PVRSRVDeviceInitialise(psDevNode);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: failed to initialize firmware (%s)",
+ __FUNCTION__, PVRSRVGetErrorStringKM(eError)));
+ goto e0;
+ }
+#if defined(PVRSRV_USE_BRIDGE_LOCK)
+ OSReleaseBridgeLock();
+#endif
+ }
+
+ /* request new priority and enable OS */
+
+ eError = RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_ONLINE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ psPVRSRVData->abVmOnline[ui32OSid] = IMG_TRUE;
+
+ eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Priority);
+
+e0:
+ return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzOnVmOffline(IMG_UINT32 ui32OSid)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDevNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ if (ui32OSid == 0 || ui32OSid >= RGXFW_NUM_OS)
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: invalid OSID (%d)",
+ __FUNCTION__, ui32OSid));
+
+ return PVRSRV_ERROR_INVALID_PVZ_OSID;
+ }
+
+ if (!psPVRSRVData->abVmOnline[ui32OSid])
+ {
+ PVR_DPF((PVR_DBG_ERROR,
+ "%s: OSID %d is already disabled.",
+ __FUNCTION__, ui32OSid));
+ return PVRSRV_ERROR_PVZ_OSID_IS_OFFLINE;
+ }
+
+ /* For now, limit support to single device setups */
+ psDevNode = psPVRSRVData->psDeviceNodeList;
+ psDevInfo = psDevNode->pvDevice;
+
+ eError = RGXFWSetVMOnlineState(psDevInfo, ui32OSid, RGXFWIF_OS_OFFLINE);
+ if (eError == PVRSRV_OK)
+ {
+ psPVRSRVData->abVmOnline[ui32OSid] = IMG_FALSE;
+ }
+
+ return eError;
+}
+
+PVRSRV_ERROR
+SysVzPvzVMMConfigure(VMM_CONF_PARAM eVMMParamType, IMG_UINT32 ui32ParamValue)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+
+ PVRSRV_DATA *psPVRSRVData = PVRSRVGetPVRSRVData();
+ PVRSRV_DEVICE_NODE *psDeviceNode;
+ PVRSRV_RGXDEV_INFO *psDevInfo;
+
+ psDeviceNode = psPVRSRVData->psDeviceNodeList;
+ psDevInfo = psDeviceNode->pvDevice;
+
+ switch(eVMMParamType)
+ {
+#if defined(SUPPORT_RGX)
+ case VMM_CONF_PRIO_OSID0:
+ case VMM_CONF_PRIO_OSID1:
+ case VMM_CONF_PRIO_OSID2:
+ case VMM_CONF_PRIO_OSID3:
+ case VMM_CONF_PRIO_OSID4:
+ case VMM_CONF_PRIO_OSID5:
+ case VMM_CONF_PRIO_OSID6:
+ case VMM_CONF_PRIO_OSID7:
+ {
+ IMG_UINT32 ui32OSid = eVMMParamType;
+ IMG_UINT32 ui32Prio = ui32ParamValue;
+
+ if (ui32OSid < RGXFW_NUM_OS)
+ {
+ eError = RGXFWChangeOSidPriority(psDevInfo, ui32OSid, ui32Prio);
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_INVALID_PVZ_OSID;
+ }
+ break;
+ }
+ case VMM_CONF_ISOL_THRES:
+ {
+ IMG_UINT32 ui32Threshold = ui32ParamValue;
+ eError = RGXFWSetOSIsolationThreshold(psDevInfo, ui32Threshold);
+ break;
+ }
+ case VMM_CONF_HCS_DEADLINE:
+ {
+ IMG_UINT32 ui32HCSDeadline = ui32ParamValue;
+ eError = RGXFWSetHCSDeadline(psDevInfo, ui32HCSDeadline);
+ break;
+ }
+#else
+ PVR_UNREFERENCED_PARAMETER(ui32ParamValue);
+#endif
+ default:
+ {
+ eError = PVRSRV_ERROR_INVALID_PARAMS;
+ }
+ }
+
+ return eError;
+}
+
+/******************************************************************************
+ End of file (vz_vmm_vm.c)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlclient.c b/drivers/gpu/drm/img-rogue/1.10/tlclient.c
new file mode 100644
index 00000000000000..90d35cc235f53c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlclient.c
@@ -0,0 +1,506 @@
+/*************************************************************************/ /*!
+@File tlclient.c
+@Title Services Transport Layer shared API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common API used in both clients and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+/* DESIGN NOTE
+ * This transport layer consumer-role API was created as a shared API when a
+ * client wanted to read the data of a TL stream from within the KM server
+ * driver. This was in addition to the existing clients supported externally
+ * by the UM client library component via PVR API layer.
+ * This shared API is thus used by the PVR TL API in the client library and
+ * by clients internal to the server driver module. It depends on
+ * client entry points of the TL and DEVMEM bridge modules. These entry points
+ * encapsulate from the TL shared API whether a direct bridge or an indirect
+ * (ioctl) bridge is used.
+ * One reason for needing this layer centres around the fact that some of the
+ * API functions make multiple bridge calls and the logic that glues these
+ * together is common regardless of client location. Further this layer has
+ * allowed the defensive coding that checks parameters to move into the PVR
+ * API layer where untrusted clients enter giving a more efficient KM code path.
+ */
+
+#include "img_defs.h"
+#include "pvrsrv_error.h"
+#include "pvr_debug.h"
+#include "osfunc.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlclient.h"
+#include "pvrsrv_tlcommon.h"
+#include "client_pvrtl_bridge.h"
+
+/* Defines/Constants
+ */
+
+#define NO_ACQUIRE 0xffffffffU
+
+/* User-side stream descriptor structure.
+ */
+typedef struct _TL_STREAM_DESC_
+{
+ /* Handle on kernel-side stream descriptor*/
+ IMG_HANDLE hServerSD;
+
+ /* Stream data buffer variables */
+ DEVMEM_MEMDESC* psUMmemDesc;
+ IMG_PBYTE pBaseAddr;
+
+ /* Offset in bytes into the circular buffer and valid only after
+ * an Acquire call and undefined after a release. */
+ IMG_UINT32 uiReadOffset;
+
+ /* Always a positive integer when the Acquire call returns and a release
+ * is outstanding. Undefined at all other times. */
+ IMG_UINT32 uiReadLen;
+
+ /* Flag indicating if the RESERVE_TOO_BIG error was already printed.
+ * It's used to reduce number of errors in kernel log. */
+ IMG_BOOL bPrinted;
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE* phSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC *psSD = NULL;
+ IMG_HANDLE hTLPMR;
+ IMG_HANDLE hTLImportHandle;
+ IMG_DEVMEM_SIZE_T uiImportSize;
+ IMG_UINT32 ui32MemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(pszName);
+ PVR_ASSERT(phSD);
+ *phSD = NULL;
+
+ /* Allocate memory for the stream descriptor object, initialise with
+ * "no data read" yet. */
+ psSD = OSAllocZMem(sizeof(TL_STREAM_DESC));
+ if (psSD == NULL)
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLOpenStream: KM returned %d", eError));
+ goto e0;
+ }
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+ /* Send open stream request to kernel server to get stream handle and
+ * buffer cookie so we can get access to the buffer in this process. */
+ eError = BridgeTLOpenStream(hSrvHandle, pszName, ui32Mode,
+ &psSD->hServerSD, &hTLPMR);
+ if (eError != PVRSRV_OK)
+ {
+ if ((ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT) &&
+ (eError == PVRSRV_ERROR_TIMEOUT))
+ {
+ goto e1;
+ }
+ PVR_LOGG_IF_ERROR(eError, "BridgeTLOpenStream", e1);
+ }
+
+ /* Convert server export cookie into a cookie for use by this client */
+ eError = DevmemMakeLocalImportHandle(hSrvHandle,
+ hTLPMR, &hTLImportHandle);
+ PVR_LOGG_IF_ERROR(eError, "DevmemMakeLocalImportHandle", e2);
+
+ ui32MemFlags |= ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE : 0;
+ /* Now convert client cookie into a client handle on the buffer's
+ * physical memory region */
+ eError = DevmemLocalImport(hSrvHandle,
+ hTLImportHandle,
+ PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE,
+ &psSD->psUMmemDesc,
+ &uiImportSize,
+ "TLBuffer");
+ PVR_LOGG_IF_ERROR(eError, "DevmemImport", e3);
+
+ /* Now map the memory into the virtual address space of this process. */
+ eError = DevmemAcquireCpuVirtAddr(psSD->psUMmemDesc, (void **)
+ &psSD->pBaseAddr);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e4);
+
+ /* Ignore error, not much that can be done */
+ (void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+ hTLImportHandle);
+
+ /* Return client descriptor handle to caller */
+ *phSD = psSD;
+ return PVRSRV_OK;
+
+/* Clean up post buffer setup */
+e4:
+ DevmemFree(psSD->psUMmemDesc);
+e3:
+ (void) DevmemUnmakeLocalImportHandle(hSrvHandle,
+ &hTLImportHandle);
+/* Clean up post stream open */
+e2:
+ BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+
+/* Cleanup post allocation of the descriptor object */
+e1:
+ OSFreeMem(psSD);
+
+e0:
+ return eError;
+}
+
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+
+ /* Check the caller provided connection is valid */
+ if (!psSD->hServerSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientCloseStream: descriptor already closed/not open"));
+ return PVRSRV_ERROR_HANDLE_NOT_FOUND;
+ }
+
+ /* Check if acquire is outstanding, perform release if it is, ignore result
+ * as there is not much we can do if it is an error other than close */
+ if (psSD->uiReadLen != NO_ACQUIRE)
+ {
+ (void) BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+ psSD->uiReadOffset, psSD->uiReadLen);
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+ }
+
+ /* Clean up DevMem resources used for this stream in this client */
+ DevmemReleaseCpuVirtAddr(psSD->psUMmemDesc);
+
+ DevmemFree(psSD->psUMmemDesc);
+
+ /* Send close to server to clean up kernel mode resources for this
+ * handle and release the memory. */
+ eError = BridgeTLCloseStream(hSrvHandle, psSD->hServerSD);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLCloseStream: KM returned %d", eError));
+ /* Not much we can do with error, fall through to clean up
+ * return eError; */
+ }
+
+ OSCachedMemSet(psSD, 0x00, sizeof(TL_STREAM_DESC));
+ OSFreeMem (psSD);
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR *pszNamePattern,
+ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+ IMG_UINT32 *pui32NumFound)
+{
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(pszNamePattern);
+ PVR_ASSERT(pui32NumFound);
+
+ return BridgeTLDiscoverStreams(hSrvHandle,
+ pszNamePattern,
+ // we need to treat this as one dimensional
+ // array
+ *pui32NumFound * PRVSRVTL_MAX_STREAM_NAME_SIZE,
+ (IMG_CHAR *) aszStreams,
+ pui32NumFound);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+ IMG_UINT32 ui32BufferOffset, ui32Dummy;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppui8Data);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+ &ui32BufferOffset, ui32Size, ui32Size,
+ &ui32Dummy);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+ IMG_UINT32 ui32BufferOffset;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppui8Data);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLReserveStream(hSrvHandle, psSD->hServerSD,
+ &ui32BufferOffset, ui32Size, ui32SizeMin,
+ pui32Available);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ *ppui8Data = psSD->pBaseAddr + ui32BufferOffset;
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ui32Size);
+
+ eError = BridgeTLCommitStream(hSrvHandle, psSD->hServerSD, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ return eError;
+ }
+
+ return PVRSRV_OK;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_PBYTE* ppPacketBuf,
+ IMG_UINT32* pui32BufLen)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ppPacketBuf);
+ PVR_ASSERT(pui32BufLen);
+
+ /* Check Acquire has not been called twice in a row without a release */
+ if (psSD->uiReadOffset != NO_ACQUIRE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientAcquireData: acquire already outstanding"));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ *pui32BufLen = 0;
+ /* Ask the kernel server for the next chunk of data to read */
+ eError = BridgeTLAcquireData(hSrvHandle, psSD->hServerSD,
+ &psSD->uiReadOffset, &psSD->uiReadLen);
+ if (eError != PVRSRV_OK)
+ {
+ if ((eError != PVRSRV_ERROR_RESOURCE_UNAVAILABLE) &&
+ (eError != PVRSRV_ERROR_TIMEOUT))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLAcquireData: KM returned %d", eError));
+ }
+ psSD->uiReadOffset = psSD->uiReadLen = NO_ACQUIRE;
+ return eError;
+ }
+
+ /* Return the data offset and length to the caller if bytes are available
+ * to be read. Could be zero for non-blocking mode. */
+ if (psSD->uiReadLen)
+ {
+ *ppPacketBuf = psSD->pBaseAddr + psSD->uiReadOffset;
+ *pui32BufLen = psSD->uiReadLen;
+ }
+ else
+ {
+ /* On non-blocking, zero length data could be returned from server
+ * Which is basically a no-acquire operation */
+ *ppPacketBuf = NULL;
+ *pui32BufLen = 0;
+ }
+
+ return eError;
+}
+
+static PVRSRV_ERROR _TLClientReleaseDataLen(
+ IMG_HANDLE hSrvHandle,
+ TL_STREAM_DESC* psSD,
+ IMG_UINT32 uiReadLen)
+{
+ PVRSRV_ERROR eError;
+
+ /* the previous acquire did not return any data, this is a no-operation */
+ if (psSD->uiReadLen == 0)
+ {
+ return PVRSRV_OK;
+ }
+
+ /* Check release has not been called twice in a row without an acquire */
+ if (psSD->uiReadOffset == NO_ACQUIRE)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+ return PVRSRV_ERROR_RETRY;
+ }
+
+ /* Inform the kernel to release the data from the buffer */
+ eError = BridgeTLReleaseData(hSrvHandle, psSD->hServerSD,
+ psSD->uiReadOffset, uiReadLen);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "BridgeTLReleaseData: KM returned %d", eError));
+ /* Need to continue to keep client data consistent, fall through
+ * return eError */
+ }
+
+ /* Reset state to indicate no outstanding acquire */
+ psSD->uiReadLen = psSD->uiReadOffset = NO_ACQUIRE;
+
+ return eError;
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD)
+{
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+
+ return _TLClientReleaseDataLen(hSrvHandle, psSD, psSD->uiReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen)
+{
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+
+ /* Check the specified size is within the size returned by Acquire */
+ if (uiActualReadLen > psSD->uiReadLen)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientReleaseData_: no acquire to release"));
+ return PVRSRV_ERROR_INVALID_PARAMS;
+ }
+
+ return _TLClientReleaseDataLen(hSrvHandle, psSD, uiActualReadLen);
+}
+
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data)
+{
+ PVRSRV_ERROR eError;
+ TL_STREAM_DESC* psSD = (TL_STREAM_DESC*) hSD;
+
+ PVR_ASSERT(hSrvHandle);
+ PVR_ASSERT(hSD);
+ PVR_ASSERT(ui32Size);
+ PVR_ASSERT(pui8Data);
+
+ eError = BridgeTLWriteData(hSrvHandle, psSD->hServerSD, ui32Size, pui8Data);
+ if (eError != PVRSRV_OK)
+ {
+ if (eError == PVRSRV_ERROR_STREAM_FULL)
+ {
+ if (!psSD->bPrinted)
+ {
+ psSD->bPrinted = IMG_TRUE;
+ PVR_DPF((PVR_DBG_ERROR, "Not enough space. Failed to write"
+ " data to the stream (%d).", eError));
+ }
+ }
+ else if (eError == PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TL packet size limit exceeded. "
+ "Failed to write data to the stream (%d).", eError));
+ }
+ else
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLClientWriteData: KM returned %d",
+ eError));
+ }
+ }
+
+ return eError;
+}
+
+/******************************************************************************
+ End of file (tlclient.c)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlclient.h b/drivers/gpu/drm/img-rogue/1.10/tlclient.h
new file mode 100644
index 00000000000000..1704082ab0031e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlclient.h
@@ -0,0 +1,256 @@
+/*************************************************************************/ /*!
+@File tlclient.h
+@Title Services Transport Layer shared API
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport layer common API used in both clients and server
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef TLCLIENT_H_
+#define TLCLIENT_H_
+
+
+#include "img_defs.h"
+#include "pvrsrv_tlcommon.h"
+#include "pvrsrv_error.h"
+
+
+/* This value is used for the hSrvHandle argument in the client API when
+ * called directly from the kernel which will lead to a direct bridge access.
+ */
+#define DIRECT_BRIDGE_HANDLE ((IMG_HANDLE)0xDEADBEEFU)
+
+
+/**************************************************************************/ /*!
+ @Function TLClientOpenStream
+ @Description Open a descriptor onto an existing kernel transport stream.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input pszName Address of the stream name string, no longer
+ than PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ @Input ui32Mode Unused
+ @Output phSD Address of a pointer to an stream object
+ @Return PVRSRV_ERROR_NOT_FOUND: when named stream not found
+ @Return PVRSRV_ERROR_ALREADY_OPEN: stream already open by another
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_TIMEOUT: block timed out, stream not found
+ @Return PVRSRV_ERROR: for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientOpenStream(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ IMG_HANDLE* phSD);
+
+
+/**************************************************************************/ /*!
+ @Function TLClientCloseStream
+ @Description Close and release the stream connection to Services kernel
+ server transport layer. Any outstanding Acquire will be
+ released.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle is not known
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR: for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCloseStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function TLClientDiscoverStreams
+ @Description Finds all streams that's name starts with pszNamePattern and
+ ends with a number.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input pszNamePattern Name pattern. Must be beginning of a string.
+ @Output pui32Streams Array of numbers from end of the discovered
+ names.
+ @inOut pui32Count When input max number of number that can fit
+ into pui32Streams. When output number of
+ discovered streams.
+ @Return PVRSRV_ERROR for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientDiscoverStreams(IMG_HANDLE hSrvHandle,
+ const IMG_CHAR *pszNamePattern,
+ IMG_CHAR aszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+ IMG_UINT32 *pui32NumFound);
+
+/**************************************************************************/ /*!
+ @Function TLClientReserveStream
+ @Description Reserves a region with given size in the stream. If the stream
+ is already reserved the function will return an error.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Output ppui8Data pointer to the buffer
+ @Input ui32Size size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function TLClientStreamReserve2
+ @Description Reserves a region with given size in the stream. If the stream
+ is already reserved the function will return an error.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Output ppui8Data pointer to the buffer
+ @Input ui32Size size of the data
+ @Input ui32SizeMin minimum size of the data
+ @Input ui32Available available space in buffer
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReserveStream2(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32 *pui32Available);
+
+/**************************************************************************/ /*!
+ @Function TLClientStreamCommit
+ @Description Commits previously reserved region in the stream and therefore
+ allows next reserves.
+ This function call has to be preceded by the call to
+ TLClientReserveStream or TLClientReserveStream2.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to close
+ @Input ui32Size Size of the data
+ @Return
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientCommitStream(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size);
+
+/**************************************************************************/ /*!
+ @Function TLClientAcquireData
+ @Description When there is data available in the stream buffer this call
+ returns with the address and length of the data buffer the
+ client can safely read. This buffer may contain one or more
+ packets of data.
+ If no data is available then this call blocks until it becomes
+ available. However if the stream has been destroyed while
+ waiting then a resource unavailable error will be returned
+ to the caller. Clients must pair this call with a
+ ReleaseData call.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Output ppPacketBuf Address of a pointer to an byte buffer. On exit
+ pointer contains address of buffer to read from
+ @Output puiBufLen Pointer to an integer. On exit it is the size
+ of the data to read from the packet buffer
+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle not known
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_RETRY: release not called beforehand
+ @Return PVRSRV_ERROR_TIMEOUT: block timed out, no data
+ @Return PVRSRV_ERROR: for other system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientAcquireData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_PBYTE* ppPacketBuf,
+ IMG_UINT32* puiBufLen);
+
+
+/**************************************************************************/ /*!
+ @Function TLClientReleaseData
+ @Description Called after client has read the stream data out of the buffer
+ The data is subsequently flushed from the stream buffer to make
+ room for more data packets from the stream source.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle not known to TL
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_RETRY: acquire not called beforehand
+ @Return PVRSRV_ERROR: for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD);
+
+/**************************************************************************/ /*!
+ @Function TLClientReleaseDataLess
+ @Description Called after client has read only some data out of the buffer
+ and wishes to complete the read early i.e. does not want to read
+ the full data that the acquire call returned e.g read just one
+ packet from the stream.
+ The data is subsequently flushed from the stream buffer to make
+ room for more data packets from the stream source.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Input uiActualReadLen Size of data read, in bytes. Must be on a TL
+ packet boundary.
+ @Return PVRSRV_ERROR_INVALID_PARAMS: when read length too big
+ @Return PVRSRV_ERROR_RESOURCE_UNAVAILABLE: when stream no longer exists
+ @Return PVRSRV_ERROR_HANDLE_NOT_FOUND: when SD handle not known to TL
+ @Return PVRSRV_ERROR_STREAM_ERROR: internal driver state error
+ @Return PVRSRV_ERROR_RETRY: acquire not called beforehand
+ @Return PVRSRV_ERROR: for system codes
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientReleaseDataLess(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD, IMG_UINT32 uiActualReadLen);
+
+/**************************************************************************/ /*!
+ @Function TLClientWriteData
+ @Description Writes data to the stream.
+ @Input hSrvHandle Address of a pointer to a connection object
+ @Input hSD Handle of the stream object to read
+ @Input ui32Size Size of the data
+ @Input pui8Data Pointer to data
+*/ /***************************************************************************/
+IMG_INTERNAL
+PVRSRV_ERROR TLClientWriteData(IMG_HANDLE hSrvHandle,
+ IMG_HANDLE hSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data);
+
+
+#endif /* TLCLIENT_H_ */
+
+/******************************************************************************
+ End of file (tlclient.h)
+******************************************************************************/
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlintern.c b/drivers/gpu/drm/img-rogue/1.10/tlintern.c
new file mode 100644
index 00000000000000..603c0e73cdabb3
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlintern.c
@@ -0,0 +1,436 @@
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer functions available to driver components in
+ the driver.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "devicemem.h"
+
+#include "pvrsrv_tlcommon.h"
+#include "tlintern.h"
+
+/*
+ * Make functions
+ */
+PTL_STREAM_DESC
+TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3)
+{
+ PTL_STREAM_DESC ps = OSAllocZMem(sizeof(TL_STREAM_DESC));
+ if (ps == NULL)
+ {
+ return NULL;
+ }
+ ps->psNode = f1;
+ ps->ui32Flags = f2;
+ ps->hReadEvent = f3;
+ ps->uiRefCount = 1;
+ return ps;
+}
+
+PTL_SNODE
+TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4)
+{
+ PTL_SNODE ps = OSAllocZMem(sizeof(TL_SNODE));
+ if (ps == NULL)
+ {
+ return NULL;
+ }
+ ps->hReadEventObj = f2;
+ ps->psStream = f3;
+ ps->psRDesc = f4;
+ f3->psNode = ps;
+ return ps;
+}
+
+/*
+ * Transport Layer Global top variables and functions
+ */
+static TL_GLOBAL_DATA sTLGlobalData;
+
+TL_GLOBAL_DATA *TLGGD(void) // TLGetGlobalData()
+{
+ return &sTLGlobalData;
+}
+
+/* TLInit must only be called once at driver initialisation.
+ * An assert is provided to check this condition on debug builds.
+ */
+PVRSRV_ERROR
+TLInit(void)
+{
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT (sTLGlobalData.hTLGDLock == NULL && sTLGlobalData.hTLEventObj == NULL);
+
+ /* Allocate a lock for TL global data, to be used while updating the TL data.
+ * This is for making TL global data muti-thread safe */
+ eError = OSLockCreate (&sTLGlobalData.hTLGDLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e0;
+ }
+
+ /* Allocate the event object used to signal global TL events such as
+ * new stream created */
+ eError = OSEventObjectCreate("TLGlobalEventObj", &sTLGlobalData.hTLEventObj);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+
+ PVR_DPF_RETURN_OK;
+
+/* Don't allow the driver to start up on error */
+e1:
+ OSLockDestroy (sTLGlobalData.hTLGDLock);
+ sTLGlobalData.hTLGDLock = NULL;
+e0:
+ PVR_DPF_RETURN_RC (eError);
+}
+
+static void RemoveAndFreeStreamNode(PTL_SNODE psRemove)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE* last;
+ PTL_SNODE psn;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ /* Unlink the stream node from the master list */
+ PVR_ASSERT(psGD->psHead);
+ last = &psGD->psHead;
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn == psRemove)
+ {
+ /* Other calling code may have freed and zeroed the pointers */
+ if (psn->psRDesc)
+ {
+ OSFreeMem(psn->psRDesc);
+ psn->psRDesc = NULL;
+ }
+ if (psn->psStream)
+ {
+ OSFreeMem(psn->psStream);
+ psn->psStream = NULL;
+ }
+ *last = psn->psNext;
+ break;
+ }
+ last = &psn->psNext;
+ }
+
+ /* Release the event list object owned by the stream node */
+ if (psRemove->hReadEventObj)
+ {
+ eError = OSEventObjectDestroy(psRemove->hReadEventObj);
+ PVR_LOG_IF_ERROR(eError, "OSEventObjectDestroy");
+
+ psRemove->hReadEventObj = NULL;
+ }
+
+ /* Release the memory of the stream node */
+ OSFreeMem(psRemove);
+
+ PVR_DPF_RETURN;
+}
+
+void
+TLDeInit(void)
+{
+ PVR_DPF_ENTERED;
+
+ if (sTLGlobalData.uiClientCnt)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "TLDeInit transport layer but %d client streams are still connected", sTLGlobalData.uiClientCnt));
+ sTLGlobalData.uiClientCnt = 0;
+ }
+
+ /* Clean up the SNODE list */
+ if (sTLGlobalData.psHead)
+ {
+ while (sTLGlobalData.psHead)
+ {
+ RemoveAndFreeStreamNode(sTLGlobalData.psHead);
+ }
+ /* Leave psHead NULL on loop exit */
+ }
+
+ /* Clean up the TL global event object */
+ if (sTLGlobalData.hTLEventObj)
+ {
+ OSEventObjectDestroy(sTLGlobalData.hTLEventObj);
+ sTLGlobalData.hTLEventObj = NULL;
+ }
+
+ /* Destroy the TL global data lock */
+ if (sTLGlobalData.hTLGDLock)
+ {
+ OSLockDestroy (sTLGlobalData.hTLGDLock);
+ sTLGlobalData.hTLGDLock = NULL;
+ }
+
+ PVR_DPF_RETURN;
+}
+
+void TLAddStreamNode(PTL_SNODE psAdd)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psAdd);
+ psAdd->psNext = TLGGD()->psHead;
+ TLGGD()->psHead = psAdd;
+
+ PVR_DPF_RETURN;
+}
+
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(pszName);
+
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn->psStream && OSStringCompare(psn->psStream->szName, pszName)==0)
+ {
+ PVR_DPF_RETURN_VAL(psn);
+ }
+ }
+
+ PVR_DPF_RETURN_VAL(NULL);
+}
+
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psDesc);
+
+ for (psn = psGD->psHead; psn; psn=psn->psNext)
+ {
+ if (psn->psRDesc == psDesc || psn->psWDesc == psDesc)
+ {
+ PVR_DPF_RETURN_VAL(psn);
+ }
+ }
+ PVR_DPF_RETURN_VAL(NULL);
+}
+
+static inline IMG_BOOL IsDigit(IMG_CHAR c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static inline IMG_BOOL ReadNumber(const IMG_CHAR *pszBuffer,
+ IMG_UINT32 *pui32Number)
+{
+ IMG_CHAR acTmp[11] = {0}; // max 10 digits
+ IMG_UINT32 ui32Result;
+ IMG_UINT i;
+
+ for (i = 0; i < sizeof(acTmp) - 1; i++)
+ {
+ if (!IsDigit(*pszBuffer))
+ break;
+ acTmp[i] = *pszBuffer++;
+ }
+
+ /* if there are no digits or there is something after the number */
+ if (i == 0 || *pszBuffer != '\0')
+ return IMG_FALSE;
+
+ if (OSStringToUINT32(acTmp, 10, &ui32Result) != PVRSRV_OK)
+ return IMG_FALSE;
+
+ *pui32Number = ui32Result;
+
+ return IMG_TRUE;
+}
+
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+ IMG_UINT32 ui32Max)
+{
+ TL_GLOBAL_DATA *psGD = TLGGD();
+ PTL_SNODE psn;
+ IMG_UINT32 ui32Count = 0;
+ size_t uiLen;
+
+ PVR_ASSERT(pszNamePattern);
+
+ if ((uiLen = OSStringLength(pszNamePattern)) == 0)
+ return 0;
+
+ for (psn = psGD->psHead; psn; psn = psn->psNext)
+ {
+ if (OSStringNCompare(pszNamePattern, psn->psStream->szName, uiLen) != 0)
+ continue;
+
+ /* If aaszStreams is NULL we only count how many string match
+ * the given pattern. If it's a valid pointer we also return
+ * the names. */
+ if (aaszStreams != NULL)
+ {
+ if (ui32Count >= ui32Max)
+ break;
+
+ /* all of names are shorter than MAX and null terminated */
+ OSStringNCopy(aaszStreams[ui32Count], psn->psStream->szName,
+ PRVSRVTL_MAX_STREAM_NAME_SIZE);
+ }
+
+ ui32Count++;
+ }
+
+ return ui32Count;
+}
+
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc)
+{
+ PTL_SNODE psn;
+
+ PVR_DPF_ENTERED;
+
+ psn = TLFindStreamNodeByDesc(psDesc);
+ if (psn == NULL)
+ PVR_DPF_RETURN_VAL(NULL);
+
+ PVR_ASSERT(psDesc == psn->psWDesc);
+
+ psn->uiWRefCount++;
+ psDesc->uiRefCount++;
+
+ PVR_DPF_RETURN_VAL(psn);
+}
+
+void TLReturnStreamNode(PTL_SNODE psNode)
+{
+ psNode->uiWRefCount--;
+ psNode->psWDesc->uiRefCount--;
+
+ PVR_ASSERT(psNode->uiWRefCount > 0);
+ PVR_ASSERT(psNode->psWDesc->uiRefCount > 0);
+}
+
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psRemove);
+
+ /* If there is a client connected to this stream, defer stream's deletion */
+ if (psRemove->psRDesc != NULL || psRemove->psWDesc != NULL)
+ {
+ PVR_DPF_RETURN_VAL (IMG_FALSE);
+ }
+
+ /* Remove stream from TL_GLOBAL_DATA's list and free stream node */
+ psRemove->psStream = NULL;
+ RemoveAndFreeStreamNode(psRemove);
+
+ PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
+
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psNodeToRemove,
+ PTL_STREAM_DESC psSD)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psNodeToRemove);
+ PVR_ASSERT(psSD);
+
+ /* Decrement reference count. For descriptor obtained by reader it must
+ * reach 0 (only single reader allowed) and for descriptors obtained by
+ * writers it must reach value greater or equal to 0 (multiple writers
+ * model). */
+ psSD->uiRefCount--;
+
+ if (psSD == psNodeToRemove->psRDesc)
+ {
+ PVR_ASSERT(0 == psSD->uiRefCount);
+ /* Remove stream descriptor (i.e. stream reader context) */
+ psNodeToRemove->psRDesc = NULL;
+ }
+ else if (psSD == psNodeToRemove->psWDesc)
+ {
+ PVR_ASSERT(0 <= psSD->uiRefCount);
+
+ psNodeToRemove->uiWRefCount--;
+
+ /* Remove stream descriptor if reference == 0 */
+ if (0 == psSD->uiRefCount)
+ {
+ psNodeToRemove->psWDesc = NULL;
+ }
+ }
+
+ /* Do not Free Stream Node if there is a write reference (a producer
+ * context) to the stream */
+ if (NULL != psNodeToRemove->psRDesc || NULL != psNodeToRemove->psWDesc ||
+ 0 != psNodeToRemove->uiWRefCount)
+ {
+ PVR_DPF_RETURN_VAL (IMG_FALSE);
+ }
+
+ /* Make stream pointer NULL to prevent it from being destroyed in
+ * RemoveAndFreeStreamNode Cleanup of stream should be done by the calling
+ * context */
+ psNodeToRemove->psStream = NULL;
+ RemoveAndFreeStreamNode(psNodeToRemove);
+
+ PVR_DPF_RETURN_VAL (IMG_TRUE);
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlintern.h b/drivers/gpu/drm/img-rogue/1.10/tlintern.h
new file mode 100644
index 00000000000000..de465fe40e4e4d
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlintern.h
@@ -0,0 +1,320 @@
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer internals
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer header used by TL internally
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLINTERN_H__
+#define __TLINTERN_H__
+
+
+#include "devicemem_typedefs.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+#include "lock.h"
+#include "tlstream.h"
+
+/* Forward declarations */
+typedef struct _TL_SNODE_* PTL_SNODE;
+
+/* To debug buffer utilisation enable this macro here and
+ * define PVRSRV_NEED_PVR_TRACE in the server pvr_debug.c and in tutils.c
+ * before the inclusion of pvr_debug.h. Issue pvrtutils 6 on target to see
+ * stream buffer utilisation. */
+//#define TL_BUFFER_STATS 1
+
+/*! TL stream structure container.
+ * pbyBuffer holds the circular buffer.
+ * ui32Read points to the beginning of the buffer, ie to where data to
+ * Read begin.
+ * ui32Write points to the end of data that have been committed, ie this is
+ * where new data will be written.
+ * ui32Pending number of bytes reserved in last reserve call which have not
+ * yet been submitted. Therefore these data are not ready to
+ * be transported.
+ * hStreamWLock - provides atomic protection for the ui32Pending & ui32Write
+ * members of the structure for when they are checked and/or
+ * updated in the context of a stream writer (producer)
+ * calling DoTLStreamReserve() & TLStreamCommit().
+ * - Reader context is not multi-threaded, only one client per
+ * stream is allowed. Also note the read context may be in an
+ * ISR which prevents a design where locks can be held in the
+ * AcquireData/ReleaseData() calls. Thus this lock only
+ * protects the stream members from simultaneous writers.
+ *
+ * ui32Read < ui32Write <= ui32Pending
+ * where < and <= operators are overloaded to make sense in a circular way.
+ */
+typedef struct _TL_STREAM_
+{
+ IMG_CHAR szName[PRVSRVTL_MAX_STREAM_NAME_SIZE]; /*!< String name identifier */
+ PVRSRV_DEVICE_NODE *psDevNode; /*!< Underlying device on which the stream's buffer is allocated */
+ TL_OPMODE eOpMode; /*!< Mode of Operation of TL Buffer */
+
+ IMG_BOOL bWaitForEmptyOnDestroy; /*!< Flag: On destroying a non-empty stream block until
+ * stream is drained. */
+ IMG_BOOL bNoSignalOnCommit; /*!< Flag: Used to avoid the TL signalling waiting consumers
+ * that new data is available on every commit. Producers
+ * using this flag will need to manually signal when
+ * appropriate using the TLStreamSync() API */
+
+ void (*pfOnReaderOpenCallback)(void *); /*!< Optional on reader connect callback */
+ void *pvOnReaderOpenUserData; /*!< On reader connect user data */
+ void (*pfProducerCallback)(void); /*!< Optional producer callback of type TL_STREAM_SOURCECB */
+ void *pvProducerUserData; /*!< Producer callback user data */
+
+ struct _TL_STREAM_ *psNotifStream; /*!< Pointer to the stream to which notification will be sent */
+
+ volatile IMG_UINT32 ui32Read; /*!< Pointer to the beginning of available data */
+ volatile IMG_UINT32 ui32Write; /*!< Pointer to already committed data which are ready to be
+ * copied to user space */
+ IMG_UINT32 ui32Pending; /*!< Count pending bytes reserved in buffer */
+ IMG_UINT32 ui32Size; /*!< Buffer size */
+ IMG_UINT32 ui32MaxPacketSize; /*! Max TL packet size */
+ IMG_BYTE *pbyBuffer; /*!< Actual data buffer */
+
+ PTL_SNODE psNode; /*!< Ptr to parent stream node */
+ DEVMEM_MEMDESC *psStreamMemDesc; /*!< MemDescriptor used to allocate buffer space through PMR */
+
+ IMG_HANDLE hProducerEvent; /*!< Handle to wait on if there is not enough space */
+ IMG_HANDLE hProducerEventObj; /*!< Handle to signal blocked reserve calls */
+
+ POS_LOCK hStreamWLock; /*!< Writers Lock for ui32Pending & ui32Write*/
+ POS_LOCK hReadLock; /*!< Readers Lock for bReadPending & ui32Read*/
+ IMG_BOOL bReadPending; /*!< Tracks if a read operation is pending or not*/
+
+#if defined(TL_BUFFER_STATS)
+ IMG_UINT32 ui32CntReadFails; /*!< Tracks how many times reader failed to acquire read lock */
+ IMG_UINT32 ui32CntReadSuccesses; /*!< Tracks how many times reader acquires read lock successfully */
+ IMG_UINT32 ui32CntWriteSuccesses; /*!< Tracks how many times writer acquires read lock successfully */
+ IMG_UINT32 ui32CntWriteWaits; /*!< Tracks how many times writer had to wait to acquire read lock */
+ IMG_UINT32 ui32CntNumWriteSuccess; /*!< Tracks how many write operations were successful*/
+ IMG_UINT32 ui32BufferUt; /*!< Buffer utilisation high watermark, see TL_BUFFER_STATS above */
+#endif
+} TL_STREAM, *PTL_STREAM;
+
+/* there need to be enough space reserved in the buffer for 2 minimal packets
+ * and it needs to be aligned the same way the buffer is or there will be a
+ * compile error.*/
+#define BUFFER_RESERVED_SPACE 2*PVRSRVTL_PACKET_ALIGNMENT
+
+/* ensure the space reserved follows the buffer's alignment */
+static_assert(!(BUFFER_RESERVED_SPACE&(PVRSRVTL_PACKET_ALIGNMENT-1)),
+ "BUFFER_RESERVED_SPACE must be a multiple of PVRSRVTL_PACKET_ALIGNMENT");
+
+/* Define the largest value that a uint that matches the
+ * PVRSRVTL_PACKET_ALIGNMENT size can hold */
+#define MAX_UINT 0xffffFFFF
+
+/*! Defines the value used for TL_STREAM.ui32Pending when no reserve is
+ * outstanding on the stream. */
+#define NOTHING_PENDING IMG_UINT32_MAX
+
+
+/*
+ * Transport Layer Stream Descriptor types/defs
+ */
+typedef struct _TL_STREAM_DESC_
+{
+ PTL_SNODE psNode; /*!< Ptr to parent stream node */
+ IMG_UINT32 ui32Flags;
+ IMG_HANDLE hReadEvent; /*!< For wait call (only used/set in reader descriptors) */
+ IMG_INT uiRefCount; /*!< Reference count to the SD */
+
+#if defined(TL_BUFFER_STATS)
+ /* Behaviour counters, no multi-threading protection need as they are
+ * incremented in a single thread due to only supporting one reader
+ * at present */
+ IMG_UINT32 ui32AcquireCount; /*!< Counters used to analysing stream performance, see ++ loc */
+ IMG_UINT32 ui32NoData; /*!< Counters used to analysing stream performance, see ++ loc */
+ IMG_UINT32 ui32NoDataSleep; /*!< Counters used to analysing stream performance, see ++ loc */
+ IMG_UINT32 ui32Signalled; /*!< Counters used to analysing stream performance, see ++ loc */
+ IMG_UINT32 ui32TimeoutEmpty; /*!< Counters used to analysing stream performance, see ++ loc */
+ IMG_UINT32 ui32TimeoutData; /*!< Counters used to analysing stream performance, see ++ loc */
+#endif
+ IMG_UINT32 ui32ReadLen; /*!< Size of data returned by initial Acquire */
+} TL_STREAM_DESC, *PTL_STREAM_DESC;
+
+PTL_STREAM_DESC TLMakeStreamDesc(PTL_SNODE f1, IMG_UINT32 f2, IMG_HANDLE f3);
+
+#define TL_STREAM_KM_FLAG_MASK 0xFFFF0000
+#define TL_STREAM_FLAG_TEST 0x10000000
+#define TL_STREAM_FLAG_WRAPREAD 0x00010000
+
+#define TL_STREAM_UM_FLAG_MASK 0x0000FFFF
+
+#if defined(TL_BUFFER_STATS)
+# define TL_COUNTER_INC(a) ((a)++)
+#else
+# define TL_COUNTER_INC(a)
+#endif
+/*
+ * Transport Layer stream list node
+ */
+typedef struct _TL_SNODE_
+{
+ struct _TL_SNODE_* psNext; /*!< Linked list next element */
+ IMG_HANDLE hReadEventObj; /*!< Readers 'wait for data' event */
+ PTL_STREAM psStream; /*!< TL Stream object */
+ IMG_INT uiWRefCount; /*!< Stream writer reference count */
+ PTL_STREAM_DESC psRDesc; /*!< Stream reader 0 or ptr only */
+ PTL_STREAM_DESC psWDesc; /*!< Stream writer 0 or ptr only */
+} TL_SNODE;
+
+PTL_SNODE TLMakeSNode(IMG_HANDLE f2, TL_STREAM *f3, TL_STREAM_DESC *f4);
+
+/*
+ * Transport Layer global top types and variables
+ * Use access function to obtain pointer.
+ *
+ * hTLGDLock - provides atomicity over read/check/write operations and
+ * sequence of operations on uiClientCnt, psHead list of SNODEs and
+ * the immediate members in a list element SNODE structure.
+ * - This larger scope of responsibility for this lock helps avoid
+ * the need for a lock in the SNODE structure.
+ * - Lock held in the client (reader) context when streams are
+ * opened/closed and in the server (writer) context when streams
+ * are created/open/closed.
+ */
+typedef struct _TL_GDATA_
+{
+ IMG_HANDLE hTLEventObj; /* Global TL signal object, new streams, etc */
+
+ IMG_UINT uiClientCnt; /* Counter to track the number of client stream connections. */
+ PTL_SNODE psHead; /* List of TL streams and associated client handle */
+
+ POS_LOCK hTLGDLock; /* Lock for structure AND psHead SNODE list */
+} TL_GLOBAL_DATA, *PTL_GLOBAL_DATA;
+
+/*
+ * Transport Layer Internal Kernel-Mode Server API
+ */
+TL_GLOBAL_DATA* TLGGD(void); /* TLGetGlobalData() */
+
+PVRSRV_ERROR TLInit(void);
+void TLDeInit(void);
+
+void TLAddStreamNode(PTL_SNODE psAdd);
+PTL_SNODE TLFindStreamNodeByName(const IMG_CHAR *pszName);
+PTL_SNODE TLFindStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+IMG_UINT32 TLDiscoverStreamNodes(const IMG_CHAR *pszNamePattern,
+ IMG_CHAR aaszStreams[][PRVSRVTL_MAX_STREAM_NAME_SIZE],
+ IMG_UINT32 ui32Max);
+PTL_SNODE TLFindAndGetStreamNodeByDesc(PTL_STREAM_DESC psDesc);
+void TLReturnStreamNode(PTL_SNODE psNode);
+
+/****************************************************************************************
+ Function Name : TLTryRemoveStreamAndFreeStreamNode
+
+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose stream is requested
+ to be removed from TL_GLOBAL_DATA's list
+
+ Return Value : IMG_TRUE - If the stream was made NULL and this
+ TL_SNODE was removed from the
+ TL_GLOBAL_DATA's list
+
+ IMG_FALSE - If the stream wasn't made NULL as there
+ is a client connected to this stream
+
+ Description : If there is no client currently connected to this stream then,
+ This function removes this TL_SNODE from the
+ TL_GLOBAL_DATA's list. The caller is responsible for the
+ cleanup of the TL_STREAM whose TL_SNODE may be removed
+
+ Otherwise, this function does nothing
+*****************************************************************************************/
+IMG_BOOL TLTryRemoveStreamAndFreeStreamNode(PTL_SNODE psRemove);
+
+/*****************************************************************************************
+ Function Name : TLUnrefDescAndTryFreeStreamNode
+
+ Inputs : PTL_SNODE Pointer to the TL_SNODE whose descriptor is
+ requested to be removed
+ : PTL_STREAM_DESC Pointer to the STREAM_DESC
+
+ Return Value : IMG_TRUE - If this TL_SNODE was removed from the
+ TL_GLOBAL_DATA's list
+
+ IMG_FALSE - Otherwise
+
+ Description : This function removes the stream descriptor from this TL_SNODE
+ and,
+ If there is no writer (producer context) currently bound to this stream,
+ This function removes this TL_SNODE from the
+ TL_GLOBAL_DATA's list. The caller is responsible for the
+ cleanup of the TL_STREAM whose TL_SNODE may be removed
+******************************************************************************************/
+IMG_BOOL TLUnrefDescAndTryFreeStreamNode(PTL_SNODE psRemove, PTL_STREAM_DESC psSD);
+
+/*
+ * Transport Layer stream interface to server part declared here to avoid
+ * circular dependency.
+ */
+IMG_UINT32 TLStreamAcquireReadPos(PTL_STREAM psStream,
+ IMG_BOOL bDisableCallback,
+ IMG_UINT32* puiReadOffset);
+PVRSRV_ERROR TLStreamAdvanceReadPos(PTL_STREAM psStream,
+ IMG_UINT32 uiReadLen,
+ IMG_UINT32 uiOrigReadLen);
+
+DEVMEM_MEMDESC* TLStreamGetBufferPointer(PTL_STREAM psStream);
+IMG_BOOL TLStreamEOS(PTL_STREAM psStream);
+
+/****************************************************************************************
+ Function Name : TLStreamDestroy
+
+ Inputs : PTL_STREAM Pointer to the TL_STREAM to be destroyed
+
+ Description : This function performs all the clean-up operations required for
+ destruction of this stream
+*****************************************************************************************/
+void TLStreamDestroy (PTL_STREAM);
+
+/*
+ * Test related functions
+ */
+PVRSRV_ERROR TUtilsInit (PVRSRV_DEVICE_NODE *psDeviceNode);
+PVRSRV_ERROR TUtilsDeinit (PVRSRV_DEVICE_NODE *psDeviceNode);
+
+
+#endif /* __TLINTERN_H__ */
+/******************************************************************************
+ End of file (tlintern.h)
+******************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlserver.c b/drivers/gpu/drm/img-rogue/1.10/tlserver.c
new file mode 100644
index 00000000000000..bc4b830b9d824e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlserver.c
@@ -0,0 +1,714 @@
+/*************************************************************************/ /*!
+@File
+@Title KM server Transport Layer implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main bridge APIs for Transport Layer client functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include "img_defs.h"
+
+/*#define PVR_DPF_FUNCTION_TRACE_ON 1*/
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "connection_server.h"
+#include "allocmem.h"
+#include "devicemem.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+#include "tlserver.h"
+
+#include "pvrsrv_tlstreams.h"
+
+#define NO_STREAM_WAIT_PERIOD 2000000ULL
+#define NO_DATA_WAIT_PERIOD 1000000ULL
+#define NO_ACQUIRE 0xffffffffU
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+PVRSRV_ERROR
+TLServerOpenStreamKM(const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ PTL_STREAM_DESC* ppsSD,
+ PMR** ppsTLPMR)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PVRSRV_ERROR eErrorEO = PVRSRV_OK;
+ PTL_SNODE psNode;
+ PTL_STREAM psStream;
+ TL_STREAM_DESC *psNewSD = NULL;
+ IMG_HANDLE hEvent;
+ IMG_BOOL bIsWriteOnly = ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ IMG_TRUE : IMG_FALSE;
+ IMG_BOOL bResetOnOpen = ui32Mode & PVRSRV_STREAM_FLAG_RESET_ON_OPEN ?
+ IMG_TRUE : IMG_FALSE;
+ IMG_BOOL bNoOpenCB = ui32Mode & PVRSRV_STREAM_FLAG_IGNORE_OPEN_CALLBACK ?
+ IMG_TRUE : IMG_FALSE;
+ PTL_GLOBAL_DATA psGD = TLGGD();
+
+#if defined(PVR_DPF_FUNCTION_TRACE_ON)
+ PVR_DPF((PVR_DBG_CALLTRACE, "--> %s:%d entered (%s, %x)", __func__, __LINE__, pszName, ui32Mode));
+#endif
+
+ PVR_ASSERT(pszName);
+
+ /* Acquire TL_GLOBAL_DATA lock here, as if the following TLFindStreamNodeByName
+ * returns NON NULL PTL_SNODE, we try updating the global data client count and
+ * PTL_SNODE's psRDesc and we want to make sure the TL_SNODE is valid (eg. has
+ * not been deleted) while we are updating it
+ */
+ OSLockAcquire (psGD->hTLGDLock);
+
+ psNode = TLFindStreamNodeByName(pszName);
+ if ((psNode == NULL) && (ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WAIT))
+ { /* Blocking code to wait for stream to be created if it does not exist */
+ eError = OSEventObjectOpen(psGD->hTLEventObj, &hEvent);
+ PVR_LOGG_IF_ERROR (eError, "OSEventObjectOpen", e0);
+
+ do
+ {
+ if ((psNode = TLFindStreamNodeByName(pszName)) == NULL)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Stream %s does not exist, waiting...", pszName));
+
+ /* Release TL_GLOBAL_DATA lock before sleeping */
+ OSLockRelease (psGD->hTLGDLock);
+
+ /* Will exit OK or with timeout, both cases safe to ignore */
+ eErrorEO = OSEventObjectWaitTimeout(hEvent, NO_STREAM_WAIT_PERIOD);
+
+ /* Acquire lock after waking up */
+ OSLockAcquire (psGD->hTLGDLock);
+ }
+ }
+ while ((psNode == NULL) && (eErrorEO == PVRSRV_OK));
+
+ eError = OSEventObjectClose(hEvent);
+ PVR_LOGG_IF_ERROR (eError, "OSEventObjectClose", e0);
+ }
+
+ /* Make sure we have found a stream node after wait/search */
+ if (psNode == NULL)
+ {
+ /* Did we exit the wait with timeout, inform caller */
+ if (eErrorEO == PVRSRV_ERROR_TIMEOUT)
+ {
+ eError = eErrorEO;
+ }
+ else
+ {
+ eError = PVRSRV_ERROR_NOT_FOUND;
+ PVR_DPF((PVR_DBG_ERROR, "Stream \"%s\" does not exist", pszName));
+ }
+ goto e0;
+ }
+
+ psStream = psNode->psStream;
+
+ /* Allocate memory for the stream. The memory will be allocated with the
+ * first call. */
+ eError = TLAllocSharedMemIfNull(psStream);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to allocate memory for stream"
+ " \"%s\"", pszName));
+ goto e0;
+ }
+
+ if (bIsWriteOnly)
+ {
+
+ /* If psWDesc == NULL it means that this is the first attempt
+ * to open stream for write. If yes create the descriptor or increment
+ * reference count otherwise. */
+ if (psNode->psWDesc == NULL)
+ {
+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, NULL);
+ psNode->psWDesc = psNewSD;
+ }
+ else
+ {
+ psNewSD = psNode->psWDesc;
+ psNode->psWDesc->uiRefCount++;
+ }
+
+ if (!psNewSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream"
+ " writer descriptor"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ psNode->uiWRefCount++;
+ }
+ else
+ {
+ /* Only one reader per stream supported */
+ if (psNode->psRDesc != NULL)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Cannot open \"%s\" stream, stream already"
+ " opened", pszName));
+ eError = PVRSRV_ERROR_ALREADY_OPEN;
+ goto e0;
+ }
+
+ /* Create an event handle for this client to wait on when no data in
+ * stream buffer. */
+ eError = OSEventObjectOpen(psNode->hReadEventObj, &hEvent);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to open node's event object"));
+ eError = PVRSRV_ERROR_UNABLE_TO_CREATE_EVENT;
+ goto e0;
+ }
+
+ psNewSD = TLMakeStreamDesc(psNode, ui32Mode, hEvent);
+ psNode->psRDesc = psNewSD;
+
+ if (!psNewSD)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Not possible to make a new stream descriptor"));
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e1;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "TLServerOpenStreamKM evList=%p, evObj=%p",
+ psNode->hReadEventObj,
+ psNode->psRDesc->hReadEvent));
+ }
+
+ /* Copy the import handle back to the user mode API to enable access to
+ * the stream buffer from user-mode process. */
+ eError = DevmemLocalGetImportHandle(TLStreamGetBufferPointer(psStream),
+ (void**) ppsTLPMR);
+ PVR_LOGG_IF_ERROR(eError, "DevmemLocalGetImportHandle", e2);
+
+ psGD->uiClientCnt++;
+
+ /* Global data updated. Now release global lock */
+ OSLockRelease (psGD->hTLGDLock);
+
+ *ppsSD = psNewSD;
+
+ if (bResetOnOpen)
+ {
+ TLStreamReset(psStream);
+ }
+
+ /* This callback is executed only on reader open. There are some actions
+ * executed on reader open that don't make much sense for writers e.g.
+ * injection on time synchronisation packet into the stream. */
+ if (!bIsWriteOnly && psStream->pfOnReaderOpenCallback != NULL && !bNoOpenCB)
+ {
+ psStream->pfOnReaderOpenCallback(psStream->pvOnReaderOpenUserData);
+ }
+
+ /* psNode->uiWRefCount is set to '1' on stream create so the first open
+ * is '2'. */
+ if (bIsWriteOnly && psStream->psNotifStream != NULL &&
+ psNode->uiWRefCount == 2)
+ {
+ TLStreamMarkStreamOpen(psStream);
+ }
+
+ PVR_DPF((PVR_DBG_MESSAGE, "%s: Stream %s opened for %s", __func__, pszName,
+ ui32Mode & PVRSRV_STREAM_FLAG_OPEN_WO ? "write" : "read"));
+
+ PVR_DPF_RETURN_OK;
+
+e2:
+ OSFreeMem(psNewSD);
+e1:
+ if (!bIsWriteOnly)
+ OSEventObjectClose(hEvent);
+e0:
+ OSLockRelease (psGD->hTLGDLock);
+ PVR_DPF_RETURN_RC (eError);
+}
+
+PVRSRV_ERROR
+TLServerCloseStreamKM(PTL_STREAM_DESC psSD)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_GLOBAL_DATA psGD = TLGGD();
+ PTL_SNODE psNode;
+ PTL_STREAM psStream;
+ IMG_BOOL bDestroyStream;
+ IMG_BOOL bIsWriteOnly = psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO ?
+ IMG_TRUE : IMG_FALSE;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Check stream still valid */
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since the descriptor is valid, the stream should not have been made NULL */
+ PVR_ASSERT (psNode->psStream);
+
+ /* Save the stream's reference in-case its destruction is required after this
+ * client is removed */
+ psStream = psNode->psStream;
+
+ /* Acquire TL_GLOBAL_DATA lock as the following TLRemoveDescAndTryFreeStreamNode
+ * call will update the TL_SNODE's descriptor value */
+ OSLockAcquire (psGD->hTLGDLock);
+
+ /* Close event handle because event object list might be destroyed in
+ * TLUnrefDescAndTryFreeStreamNode(). */
+ if (!bIsWriteOnly)
+ {
+ /* Close and free the event handle resource used by this descriptor */
+ eError = OSEventObjectClose(psSD->hReadEvent);
+ if (eError != PVRSRV_OK)
+ {
+ /* Log error but continue as it seems best */
+ PVR_DPF((PVR_DBG_ERROR, "OSEventObjectClose() failed error %d",
+ eError));
+ eError = PVRSRV_ERROR_UNABLE_TO_DESTROY_EVENT;
+ }
+ }
+ else if (psNode->uiWRefCount == 2 && psStream->psNotifStream != NULL)
+ {
+ /* psNode->uiWRefCount is set to '1' on stream create so the last close
+ * before destruction is '2'. */
+ TLStreamMarkStreamClose(psStream);
+ }
+
+ /* Remove descriptor from stream object/list */
+ bDestroyStream = TLUnrefDescAndTryFreeStreamNode (psNode, psSD);
+
+ /* Assert the counter is sane after input data validated. */
+ PVR_ASSERT(psGD->uiClientCnt > 0);
+ psGD->uiClientCnt--;
+
+ OSLockRelease (psGD->hTLGDLock);
+
+ /* Destroy the stream if its TL_SNODE was removed from TL_GLOBAL_DATA */
+ if (bDestroyStream)
+ {
+ TLStreamDestroy (psStream);
+ psStream = NULL;
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE, "%s: Stream closed", __func__));
+
+ /* Free the descriptor if ref count reaches 0. */
+ if (psSD->uiRefCount == 0)
+ {
+ /* Free the stream descriptor object */
+ OSFreeMem(psSD);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* ui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode;
+ IMG_UINT8* pui8Buffer = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Acquire the global lock. We have to be sure that no one modifies
+ * the list while we are looking for our stream. */
+ OSLockAcquire(psGD->hTLGDLock);
+ /* Check stream still valid */
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamReserve2(psNode->psStream, &pui8Buffer, ui32Size,
+ ui32SizeMin, pui32Available);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Failed to reserve %u (%u, %u) bytes in the stream, error %s.", \
+ ui32Size, ui32SizeMin, *pui32Available, PVRSRVGETERRORSTRING(eError)));
+ }
+ else if (pui8Buffer == NULL)
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Not enough space in the stream."));
+ eError = PVRSRV_ERROR_STREAM_FULL;
+ }
+ else
+ {
+ *ui32BufferOffset = pui8Buffer - psNode->psStream->pbyBuffer;
+ PVR_ASSERT(*ui32BufferOffset < psNode->psStream->ui32Size);
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Acquire the global lock. We have to be sure that no one modifies
+ * the list while we are looking for our stream. */
+ OSLockAcquire(psGD->hTLGDLock);
+ /* Check stream still valid */
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamCommit(psNode->psStream, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to commit data into stream."));
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 ui32Size,
+ IMG_CHAR *pszStreams,
+ IMG_UINT32 *pui32NumFound)
+{
+ PTL_SNODE psNode = NULL;
+ IMG_CHAR (*paszStreams)[PRVSRVTL_MAX_STREAM_NAME_SIZE] =
+ (IMG_CHAR (*)[PRVSRVTL_MAX_STREAM_NAME_SIZE]) pszStreams;
+
+ if (*pszNamePattern == '\0')
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ if (ui32Size % PRVSRVTL_MAX_STREAM_NAME_SIZE != 0)
+ return PVRSRV_ERROR_INVALID_PARAMS;
+
+ /* Sanity check, quick exit if there are no streams */
+ if (TLGGD()->psHead == NULL)
+ {
+ *pui32NumFound = 0;
+ return PVRSRV_OK;
+ }
+
+ OSLockAcquire(TLGGD()->hTLGDLock);
+
+ *pui32NumFound = TLDiscoverStreamNodes(pszNamePattern, paszStreams,
+ ui32Size / PRVSRVTL_MAX_STREAM_NAME_SIZE);
+
+ /* Find "tlctrl" stream and reset it */
+ psNode = TLFindStreamNodeByName(PVRSRV_TL_CTLR_STREAM);
+ if (psNode != NULL)
+ TLStreamReset(psNode->psStream);
+
+ OSLockRelease(TLGGD()->hTLGDLock);
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* puiReadOffset,
+ IMG_UINT32* puiReadLen)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ IMG_UINT32 uiTmpOffset;
+ IMG_UINT32 uiTmpLen = 0;
+ PTL_SNODE psNode;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ TL_COUNTER_INC(psSD->ui32AcquireCount);
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Check stream still valid */
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* If we are here, the stream will never be made NULL until this context itself
+ * calls TLRemoveDescAndTryFreeStreamNode(). This is because the producer will
+ * fail to make the stream NULL (by calling TLTryRemoveStreamAndFreeStreamNode)
+ * when a valid stream descriptor is present (i.e. a client is connected).
+ * Hence, no checks for stream being NON NULL are required after this. */
+ PVR_ASSERT (psNode->psStream);
+
+ psSD->ui32ReadLen = 0; /* Handle NULL read returns */
+
+ do
+ {
+ uiTmpLen = TLStreamAcquireReadPos(psNode->psStream, psSD->ui32Flags & PVRSRV_STREAM_FLAG_DISABLE_PRODUCER_CALLBACK, &uiTmpOffset);
+
+ if (uiTmpLen > 0)
+ { /* Data found */
+
+ *puiReadOffset = uiTmpOffset;
+ *puiReadLen = uiTmpLen;
+ psSD->ui32ReadLen = uiTmpLen; /* Save the original data length in the stream desc */
+ PVR_DPF_RETURN_OK;
+ }
+ else if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING))
+ { /* No data found blocking */
+
+ TL_COUNTER_INC(psSD->ui32NoDataSleep);
+
+ eError = OSEventObjectWaitTimeout(psSD->hReadEvent, NO_DATA_WAIT_PERIOD);
+ if (eError == PVRSRV_OK)
+ { /* Data present */
+
+ TL_COUNTER_INC(psSD->ui32Signalled);
+
+ continue; /* Acquire read position again */
+ }
+ else if (eError == PVRSRV_ERROR_TIMEOUT)
+ { /* Timeout back to client if still no data, optimisation help reduce bridge calls */
+
+ if (TLStreamOutOfData(psNode->psStream))
+ {
+ /* Return on timeout if stream empty, else let while exit and return data */
+ TL_COUNTER_INC(psSD->ui32TimeoutEmpty);
+ PVR_DPF_RETURN_RC(eError);
+ }
+ else
+ {
+ /* Data available, loop and repeat read procedure, to honour read limit/error path */
+ TL_COUNTER_INC(psSD->ui32TimeoutData);
+
+ continue; /* Acquire read position again */
+ }
+ }
+ else
+ { /* Some other system error with event objects */
+ PVR_DPF_RETURN_RC(eError);
+ }
+ }
+ else
+ { /* No data non-blocking */
+ TL_COUNTER_INC(psSD->ui32NoData);
+
+ /* When no-data in non-blocking mode, uiReadOffset should be set to NO_ACQUIRE
+ * signifying there's no need of Release call */
+ *puiReadOffset = NO_ACQUIRE;
+ *puiReadLen = 0;
+ PVR_DPF_RETURN_OK;
+ }
+ }
+ while (1);
+}
+
+PVRSRV_ERROR
+TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 uiReadOffset,
+ IMG_UINT32 uiReadLen)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode;
+
+ PVR_DPF_ENTERED;
+
+ /* Unreferenced in release builds */
+ PVR_UNREFERENCED_PARAMETER(uiReadOffset);
+
+ PVR_ASSERT(psSD);
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ if ((uiReadLen % PVRSRVTL_PACKET_ALIGNMENT != 0))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Check stream still valid */
+ psNode = TLFindStreamNodeByDesc(psSD);
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ PVR_DPF((PVR_DBG_VERBOSE, "TLReleaseDataKM uiReadOffset=%d, uiReadLen=%d", uiReadOffset, uiReadLen));
+
+ /* Move read position on to free up space in stream buffer */
+ PVR_DPF_RETURN_RC(TLStreamAdvanceReadPos(psNode->psStream, uiReadLen, psSD->ui32ReadLen));
+}
+
+PVRSRV_ERROR
+TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE* pui8Data)
+{
+ TL_GLOBAL_DATA* psGD = TLGGD();
+ PTL_SNODE psNode;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psSD);
+
+ if (!(psSD->ui32Flags & PVRSRV_STREAM_FLAG_OPEN_WO))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Sanity check, quick exit if there are no streams */
+ if (psGD->psHead == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ /* Check stream still valid */
+ psNode = TLFindAndGetStreamNodeByDesc(psSD);
+ OSLockRelease(psGD->hTLGDLock);
+
+ if ((psNode == NULL) || (psNode != psSD->psNode))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_HANDLE_NOT_FOUND);
+ }
+
+ /* Since we have a valid stream descriptor, the stream should not have been
+ * made NULL by any producer context. */
+ PVR_ASSERT (psNode->psStream);
+
+ eError = TLStreamWrite(psNode->psStream, pui8Data, ui32Size);
+ if (eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Failed to write data to the stream (%d).",
+ eError));
+ }
+
+ OSLockAcquire(psGD->hTLGDLock);
+ TLReturnStreamNode(psNode);
+ OSLockRelease(psGD->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+/*****************************************************************************
+ End of file (tlserver.c)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlserver.h b/drivers/gpu/drm/img-rogue/1.10/tlserver.h
new file mode 100644
index 00000000000000..8d2bd3b7974309
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlserver.h
@@ -0,0 +1,98 @@
+/*************************************************************************/ /*!
+@File
+@Title KM server Transport Layer implementation
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Main bridge APIs for Transport Layer client functions
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef __TLSERVER_H_
+#define __TLSERVER_H_
+
+#include "img_defs.h"
+#include "pvr_debug.h"
+#include "connection_server.h"
+
+#include "tlintern.h"
+
+/*
+ * Transport Layer Client API Kernel-Mode bridge implementation
+ */
+
+PVRSRV_ERROR TLServerConnectKM(CONNECTION_DATA *psConnection);
+PVRSRV_ERROR TLServerDisconnectKM(CONNECTION_DATA *psConnection);
+
+PVRSRV_ERROR TLServerOpenStreamKM(const IMG_CHAR* pszName,
+ IMG_UINT32 ui32Mode,
+ PTL_STREAM_DESC* ppsSD,
+ PMR** ppsTLPMR);
+
+PVRSRV_ERROR TLServerCloseStreamKM(PTL_STREAM_DESC psSD);
+
+PVRSRV_ERROR TLServerDiscoverStreamsKM(const IMG_CHAR *pszNamePattern,
+ IMG_UINT32 ui32Max,
+ IMG_CHAR *pszStreams,
+ IMG_UINT32 *pui32NumFound);
+
+PVRSRV_ERROR TLServerReserveStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* ui32BufferOffset,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available);
+
+PVRSRV_ERROR TLServerCommitStreamKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size);
+
+PVRSRV_ERROR TLServerAcquireDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32* puiReadOffset,
+ IMG_UINT32* puiReadLen);
+
+PVRSRV_ERROR TLServerReleaseDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 uiReadOffset,
+ IMG_UINT32 uiReadLen);
+
+PVRSRV_ERROR TLServerWriteDataKM(PTL_STREAM_DESC psSD,
+ IMG_UINT32 ui32Size,
+ IMG_BYTE *pui8Data);
+
+#endif /* __TLSERVER_H_ */
+
+/*****************************************************************************
+ End of file (tlserver.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlstream.c b/drivers/gpu/drm/img-rogue/1.10/tlstream.c
new file mode 100644
index 00000000000000..3fb4f63da8a21e
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlstream.c
@@ -0,0 +1,1338 @@
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API implementation.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Transport Layer API implementation.
+ These functions are provided to driver components.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+//#define PVR_DPF_FUNCTION_TRACE_ON 1
+#undef PVR_DPF_FUNCTION_TRACE_ON
+#include "pvr_debug.h"
+
+#include "allocmem.h"
+#include "devicemem.h"
+#include "pvrsrv_error.h"
+#include "osfunc.h"
+#include "log2.h"
+
+#include "tlintern.h"
+#include "tlstream.h"
+
+#include "pvrsrv.h"
+
+#define EVENT_OBJECT_TIMEOUT_US 1000000ULL
+#define READ_PENDING_TIMEOUT_US 100000ULL
+
+/*! Compute maximum TL packet size for this stream. Max packet size will be
+ * minimum of PVRSRVTL_MAX_PACKET_SIZE and (BufferSize / 2.5). This computation
+ * is required to avoid a corner case that was observed when TL buffer size is
+ * smaller than twice of TL max packet size and read, write index are positioned
+ * in such a way that the TL packet (write packet + padding packet) size is may
+ * be bigger than the buffer size itself.
+ */
+#define GET_TL_MAX_PACKET_SIZE( bufSize ) PVRSRVTL_ALIGN( MIN( PVRSRVTL_MAX_PACKET_SIZE, ( 2 * bufSize ) / 5 ) )
+
+/* Given the state of the buffer it returns a number of bytes that the client
+ * can use for a successful allocation. */
+static INLINE IMG_UINT32 suggestAllocSize(IMG_UINT32 ui32LRead,
+ IMG_UINT32 ui32LWrite,
+ IMG_UINT32 ui32CBSize,
+ IMG_UINT32 ui32ReqSizeMin,
+ IMG_UINT32 ui32MaxPacketSize)
+{
+ IMG_UINT32 ui32AvSpace = 0;
+
+ /* This could be written in fewer lines using the ? operator but it
+ would not be kind to potential readers of this source at all. */
+ if ( ui32LRead > ui32LWrite ) /* Buffer WRAPPED */
+ {
+ if ( (ui32LRead - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32LRead - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ }
+ else /* Normal, no wrap */
+ {
+ if ( (ui32CBSize - ui32LWrite) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32CBSize - ui32LWrite - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ else if ( (ui32LRead - 0) > (sizeof(PVRSRVTL_PACKETHDR) + ui32ReqSizeMin + (IMG_INT) BUFFER_RESERVED_SPACE) )
+ {
+ ui32AvSpace = ui32LRead - sizeof(PVRSRVTL_PACKETHDR) - (IMG_INT) BUFFER_RESERVED_SPACE;
+ }
+ }
+ /* The max size of a TL packet currently is UINT16. adjust accordingly */
+ return MIN(ui32AvSpace, ui32MaxPacketSize);
+}
+
+/* Returns bytes left in the buffer. Negative if there is not any.
+ * two 4b aligned values are reserved, one for the write failed buffer flag
+ * and one to be able to distinguish the buffer full state to the buffer
+ * empty state.
+ * Always returns free space -8 even when the "write failed" packet may be
+ * already in the stream before this write. */
+static INLINE IMG_INT
+cbSpaceLeft(IMG_UINT32 ui32Read, IMG_UINT32 ui32Write, IMG_UINT32 ui32size)
+{
+ /* We need to reserve 4b (one packet) in the buffer to be able to tell empty
+ * buffers from full buffers and one more for packet write fail packet */
+ if ( ui32Read > ui32Write )
+ {
+ return (IMG_INT)ui32Read - (IMG_INT)ui32Write - (IMG_INT)BUFFER_RESERVED_SPACE;
+ }
+ else
+ {
+ return (IMG_INT)ui32size - ((IMG_INT)ui32Write - (IMG_INT)ui32Read) - (IMG_INT)BUFFER_RESERVED_SPACE;
+ }
+}
+
+PVRSRV_ERROR TLAllocSharedMemIfNull(IMG_HANDLE hStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+ PVRSRV_ERROR eError;
+
+ /* CPU Local memory used as these buffers are not accessed by the device.
+ * CPU Uncached write combine memory used to improve write performance,
+ * memory barrier added in TLStreamCommit to ensure data written to memory
+ * before CB write point is updated before consumption by the reader.
+ */
+ IMG_CHAR pszBufferLabel[PRVSRVTL_MAX_STREAM_NAME_SIZE + 20];
+ DEVMEM_FLAGS_T uiMemFlags = PVRSRV_MEMALLOCFLAG_CPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITEABLE |
+ PVRSRV_MEMALLOCFLAG_GPU_READABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_WRITE_COMBINE |
+ PVRSRV_MEMALLOCFLAG_KERNEL_CPU_MAPPABLE |
+ PVRSRV_MEMALLOCFLAG_CPU_LOCAL; // TL for now is only used by host driver, so cpulocal mem suffices
+
+ /* Exit if memory has already been allocated. */
+ if (psStream->pbyBuffer != NULL)
+ return PVRSRV_OK;
+
+ OSSNPrintf(pszBufferLabel, sizeof(pszBufferLabel), "TLStreamBuf-%s",
+ psStream->szName);
+
+
+ /* Use HostMemDeviceNode instead of psStream->psDevNode to benefit from faster
+ * accesses to CPU local memory. When the framework to access CPU_LOCAL device
+ * memory from GPU is fixed, we'll switch back to use psStream->psDevNode for
+ * TL buffers */
+ eError = DevmemAllocateExportable((IMG_HANDLE)PVRSRVGetPVRSRVData()->psHostMemDeviceNode,
+ (IMG_DEVMEM_SIZE_T) psStream->ui32Size,
+ (IMG_DEVMEM_ALIGN_T) OSGetPageSize(),
+ ExactLog2(OSGetPageSize()),
+ uiMemFlags,
+ pszBufferLabel,
+ &psStream->psStreamMemDesc);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAllocateExportable", e0);
+
+ eError = DevmemAcquireCpuVirtAddr(psStream->psStreamMemDesc,
+ (void**) &psStream->pbyBuffer);
+ PVR_LOGG_IF_ERROR(eError, "DevmemAcquireCpuVirtAddr", e1);
+
+ return PVRSRV_OK;
+
+e1:
+ DevmemFree(psStream->psStreamMemDesc);
+e0:
+ return eError;
+}
+
+void TLFreeSharedMem(IMG_HANDLE hStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+ if (psStream->pbyBuffer != NULL)
+ {
+ DevmemReleaseCpuVirtAddr(psStream->psStreamMemDesc);
+ psStream->pbyBuffer = NULL;
+ }
+ if (psStream->psStreamMemDesc != NULL)
+ {
+ DevmemFree(psStream->psStreamMemDesc);
+ psStream->psStreamMemDesc = NULL;
+ }
+}
+
+/*******************************************************************************
+ * TL Server public API implementation.
+ ******************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_CHAR *szStreamName,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32StreamFlags,
+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+ void *pvOnRederOpenUD,
+ TL_STREAM_SOURCECB pfProducerCB,
+ void *pvProducerUD)
+{
+ PTL_STREAM psTmp;
+ PVRSRV_ERROR eError;
+ IMG_HANDLE hEventList;
+ PTL_SNODE psn;
+ TL_OPMODE eOpMode;
+
+ PVR_DPF_ENTERED;
+ /* Sanity checks: */
+ /* non NULL handler required */
+ if ( NULL == phStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ if (szStreamName == NULL || *szStreamName == '\0' ||
+ OSStringLength(szStreamName) >= PRVSRVTL_MAX_STREAM_NAME_SIZE)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ if ( NULL == psDevNode )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Acquire TL_GLOBAL_DATA lock here because, if the following TLFindStreamNodeByName()
+ * returns NULL, a new TL_SNODE will be added to TL_GLOBAL_DATA's TL_SNODE list */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Check if there already exists a stream with this name. */
+ psn = TLFindStreamNodeByName( szStreamName );
+ if ( NULL != psn )
+ {
+ eError = PVRSRV_ERROR_ALREADY_EXISTS;
+ goto e0;
+ }
+
+ /* Allocate stream structure container (stream struct) for the new stream */
+ psTmp = OSAllocZMem(sizeof(TL_STREAM));
+ if ( NULL == psTmp )
+ {
+ eError = PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e0;
+ }
+
+ OSStringCopy(psTmp->szName, szStreamName);
+
+ if ( ui32StreamFlags & TL_FLAG_FORCE_FLUSH )
+ {
+ psTmp->bWaitForEmptyOnDestroy = IMG_TRUE;
+ }
+
+ psTmp->bNoSignalOnCommit = (ui32StreamFlags&TL_FLAG_NO_SIGNAL_ON_COMMIT) ? IMG_TRUE : IMG_FALSE;
+
+ psTmp->eOpMode = eOpMode;
+
+ eError = OSEventObjectCreate(NULL, &psTmp->hProducerEventObj);
+ if (eError != PVRSRV_OK)
+ {
+ goto e1;
+ }
+ /* Create an event handle for this kind of stream */
+ eError = OSEventObjectOpen(psTmp->hProducerEventObj, &psTmp->hProducerEvent);
+ if (eError != PVRSRV_OK)
+ {
+ goto e2;
+ }
+
+ psTmp->pfOnReaderOpenCallback = pfOnReaderOpenCB;
+ psTmp->pvOnReaderOpenUserData = pvOnRederOpenUD;
+ /* Remember producer supplied CB and data for later */
+ psTmp->pfProducerCallback = (void(*)(void))pfProducerCB;
+ psTmp->pvProducerUserData = pvProducerUD;
+
+ psTmp->psNotifStream = NULL;
+
+ /* Round the requested bytes to a multiple of array elements' size, eg round 3 to 4 */
+ psTmp->ui32Size = PVRSRVTL_ALIGN(ui32Size);
+ psTmp->ui32MaxPacketSize = GET_TL_MAX_PACKET_SIZE(psTmp->ui32Size);
+ psTmp->ui32Read = 0;
+ psTmp->ui32Write = 0;
+ psTmp->ui32Pending = NOTHING_PENDING;
+ psTmp->psDevNode = psDevNode;
+ psTmp->bReadPending = IMG_FALSE;
+ /* Memory will be allocated on first connect to the stream */
+ if (!(ui32StreamFlags & TL_FLAG_ALLOCATE_ON_FIRST_OPEN))
+ {
+ /* Allocate memory for the circular buffer and export it to user space. */
+ eError = TLAllocSharedMemIfNull(psTmp);
+ PVR_LOGG_IF_ERROR(eError, "TLAllocSharedMem", e3);
+ }
+
+ /* Synchronisation object to synchronise with user side data transfers. */
+ eError = OSEventObjectCreate(psTmp->szName, &hEventList);
+ if (eError != PVRSRV_OK)
+ {
+ goto e4;
+ }
+
+ eError = OSLockCreate (&psTmp->hStreamWLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e5;
+ }
+
+ eError = OSLockCreate (&psTmp->hReadLock, LOCK_TYPE_PASSIVE);
+ if (eError != PVRSRV_OK)
+ {
+ goto e6;
+ }
+
+ /* Now remember the stream in the global TL structures */
+ psn = TLMakeSNode(hEventList, (TL_STREAM *)psTmp, NULL);
+ if (psn == NULL)
+ {
+ eError=PVRSRV_ERROR_OUT_OF_MEMORY;
+ goto e7;
+ }
+
+ /* Stream node created, now reset the write reference count to 1
+ * (i.e. this context's reference) */
+ psn->uiWRefCount = 1;
+
+ TLAddStreamNode(psn);
+
+ /* Release TL_GLOBAL_DATA lock as the new TL_SNODE is now added to the list */
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ /* Best effort signal, client wait timeout will ultimately let it find the
+ * new stream if this fails, acceptable to avoid clean-up as it is tricky
+ * at this point */
+ (void) OSEventObjectSignal(TLGGD()->hTLEventObj);
+
+ /* Pass the newly created stream handle back to caller */
+ *phStream = (IMG_HANDLE)psTmp;
+ PVR_DPF_RETURN_OK;
+
+e7:
+ OSLockDestroy(psTmp->hReadLock);
+e6:
+ OSLockDestroy(psTmp->hStreamWLock);
+e5:
+ OSEventObjectDestroy(hEventList);
+e4:
+ TLFreeSharedMem(psTmp);
+e3:
+ OSEventObjectClose(psTmp->hProducerEvent);
+e2:
+ OSEventObjectDestroy(psTmp->hProducerEventObj);
+e1:
+ OSFreeMem(psTmp);
+e0:
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+void TLStreamReset(IMG_HANDLE hStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+ PVR_ASSERT(psStream != NULL);
+
+ OSLockAcquire(psStream->hStreamWLock);
+
+ while (psStream->ui32Pending != NOTHING_PENDING)
+ {
+ PVRSRV_ERROR eError;
+
+ /* We're in the middle of a write so we cannot reset the stream.
+ * We are going to wait until the data is committed. Release lock while
+ * we're here. */
+ OSLockRelease(psStream->hStreamWLock);
+
+ /* Event when psStream->bNoSignalOnCommit is set we can still use
+ * the timeout capability of event object API (time in us). */
+ eError = OSEventObjectWaitTimeout(psStream->psNode->hReadEventObj, 100);
+ if (eError != PVRSRV_ERROR_TIMEOUT && eError != PVRSRV_OK)
+ {
+ PVR_LOGRN_IF_ERROR(eError, "OSEventObjectWaitTimeout");
+ }
+
+ OSLockAcquire(psStream->hStreamWLock);
+
+ /* Either timeout occurred or the stream has been signalled.
+ * If former we have to check if the data was committed and if latter
+ * if the stream hasn't been re-reserved. Either way we have to go
+ * back to the condition.
+ * If the stream has been released we'll exit with the lock held so
+ * we can finally go and reset the stream. */
+ }
+
+ psStream->ui32Read = 0;
+ psStream->ui32Write = 0;
+ /* we know that ui32Pending already has correct value (no need to set) */
+
+ OSLockRelease(psStream->hStreamWLock);
+}
+
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream)
+{
+ PTL_STREAM psStream = (PTL_STREAM) hStream;
+
+ if (hStream == NULL || hNotifStream == NULL)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ psStream->psNotifStream = (PTL_STREAM) hNotifStream;
+
+ return PVRSRV_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReconfigure(
+ IMG_HANDLE hStream,
+ IMG_UINT32 ui32StreamFlags)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_STREAM psTmp;
+ TL_OPMODE eOpMode;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eOpMode = ui32StreamFlags & TL_OPMODE_MASK;
+ if (( eOpMode <= TL_OPMODE_UNDEF ) || ( eOpMode >= TL_OPMODE_LAST ))
+ {
+ PVR_DPF((PVR_DBG_ERROR, "OpMode for TL stream is invalid"));
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Prevent the TL Stream buffer from being written to
+ * while its mode is being reconfigured
+ */
+ OSLockAcquire (psTmp->hStreamWLock);
+ if ( NOTHING_PENDING != psTmp->ui32Pending )
+ {
+ OSLockRelease (psTmp->hStreamWLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+ }
+ psTmp->ui32Pending = 0;
+ OSLockRelease (psTmp->hStreamWLock);
+
+ psTmp->eOpMode = eOpMode;
+
+ OSLockAcquire (psTmp->hStreamWLock);
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamWLock);
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName)
+{
+ PTL_SNODE psTmpSNode;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == phStream || NULL == szStreamName )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ /* Acquire the TL_GLOBAL_DATA lock first to ensure,
+ * the TL_STREAM while returned and being modified,
+ * is not deleted by some other context */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Search for a stream node with a matching stream name */
+ psTmpSNode = TLFindStreamNodeByName(szStreamName);
+
+ if ( NULL == psTmpSNode )
+ {
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_FOUND);
+ }
+
+ if (psTmpSNode->psStream->psNotifStream != NULL &&
+ psTmpSNode->uiWRefCount == 1)
+ {
+ TLStreamMarkStreamOpen(psTmpSNode->psStream);
+ }
+
+ /* The TL_SNODE->uiWRefCount governs the presence of this node in the
+ * TL_GLOBAL_DATA list i.e. when uiWRefCount falls to zero we try removing
+ * this node from the TL_GLOBAL_DATA list. Hence, is protected using the
+ * TL_GLOBAL_DATA lock and not TL_STREAM lock */
+ psTmpSNode->uiWRefCount++;
+
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ /* Return the stream handle to the caller */
+ *phStream = (IMG_HANDLE)psTmpSNode->psStream;
+
+ PVR_DPF_RETURN_VAL(PVRSRV_OK);
+}
+
+void
+TLStreamClose(IMG_HANDLE hStream)
+{
+ PTL_STREAM psTmp;
+ IMG_BOOL bDestroyStream;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "TLStreamClose failed as NULL stream handler passed, nothing done."));
+ PVR_DPF_RETURN;
+ }
+
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Acquire TL_GLOBAL_DATA lock for updating the reference count as this will be required
+ * in-case this TL_STREAM node is to be deleted */
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Decrement write reference counter of the stream */
+ psTmp->psNode->uiWRefCount--;
+
+ if ( 0 != psTmp->psNode->uiWRefCount )
+ {
+ /* The stream is still being used in other context(s) do not destroy
+ * anything */
+
+ /* uiWRefCount == 1 means that stream was closed for write. Next
+ * close is pairing TLStreamCreate(). Send notification to indicate
+ * that no writer are connected to the stream any more. */
+ if (psTmp->psNotifStream != NULL && psTmp->psNode->uiWRefCount == 1)
+ {
+ TLStreamMarkStreamClose(psTmp);
+ }
+
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN;
+ }
+ else
+ {
+ /* Now we try removing this TL_STREAM from TL_GLOBAL_DATA */
+
+ if ( psTmp->bWaitForEmptyOnDestroy == IMG_TRUE )
+ {
+ /* We won't require the TL_STREAM lock to be acquired here for accessing its read
+ * and write offsets. REASON: We are here because there is no producer context
+ * referencing this TL_STREAM, hence its ui32Write offset won't be changed now.
+ * Also, the update of ui32Read offset is not protected by locks */
+ while (psTmp->ui32Read != psTmp->ui32Write)
+ {
+ /* Release lock before sleeping */
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ OSEventObjectWaitTimeout(psTmp->hProducerEvent, EVENT_OBJECT_TIMEOUT_US);
+
+ OSLockAcquire (TLGGD()->hTLGDLock);
+
+ /* Ensure destruction of stream is still required */
+ if (0 != psTmp->psNode->uiWRefCount)
+ {
+ OSLockRelease (TLGGD()->hTLGDLock);
+ PVR_DPF_RETURN;
+ }
+ }
+ }
+
+ /* Try removing the stream from TL_GLOBAL_DATA */
+ bDestroyStream = TLTryRemoveStreamAndFreeStreamNode (psTmp->psNode);
+
+ OSLockRelease (TLGGD()->hTLGDLock);
+
+ if (bDestroyStream)
+ {
+ /* Destroy the stream if it was removed from TL_GLOBAL_DATA */
+ TLStreamDestroy (psTmp);
+ psTmp = NULL;
+ }
+ PVR_DPF_RETURN;
+ }
+}
+
+/*
+ * DoTLSetPacketHeader
+ *
+ * Ensure that whenever we update a Header we always add the RESERVED field
+ */
+static inline void DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR, IMG_UINT32);
+static inline void
+DoTLSetPacketHeader(PVRSRVTL_PPACKETHDR pHdr,
+ IMG_UINT32 ui32Val)
+{
+ PVR_ASSERT(((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) == 0);
+
+ /* Check that this is a correctly aligned packet header. */
+ if (((size_t)pHdr & (size_t)(PVRSRVTL_PACKET_ALIGNMENT - 1)) != 0)
+ {
+ /* Should return an error because the header is misaligned */
+ PVR_DPF((PVR_DBG_ERROR, "%s: Misaligned header @ %p", __func__, pHdr));
+ pHdr->uiTypeSize = ui32Val;
+ }
+ else
+ {
+ pHdr->uiTypeSize = ui32Val;
+ pHdr->uiReserved = PVRSRVTL_PACKETHDR_RESERVED;
+ }
+}
+
+static PVRSRV_ERROR
+DoTLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32ReqSize,
+ IMG_UINT32 ui32ReqSizeMin,
+ PVRSRVTL_PACKETTYPE ePacketType,
+ IMG_UINT32* pui32AvSpace)
+{
+ PTL_STREAM psTmp;
+ IMG_UINT32 *pui32Buf, ui32LRead, ui32LWrite, ui32LPending, lReqSizeAligned, lReqSizeActual, ui32CreateFreeSpace;
+ IMG_INT pad, iFreeSpace;
+ IMG_UINT8 *pui8IncrRead = NULL;
+ PVRSRV_ERROR eError;
+ PVRSRVTL_PPACKETHDR pHdr;
+
+ PVR_DPF_ENTERED;
+ if (pui32AvSpace) *pui32AvSpace = 0;
+
+ if (( NULL == hStream ))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Assert used as the packet type parameter is currently only provided
+ * by the TL APIs, not the calling client */
+ PVR_ASSERT((PVRSRVTL_PACKETTYPE_UNDEF < ePacketType) && (PVRSRVTL_PACKETTYPE_LAST >= ePacketType));
+
+ /* The buffer is only used in "rounded" (aligned) chunks */
+ lReqSizeAligned = PVRSRVTL_ALIGN(ui32ReqSize);
+
+ /* Lock the stream before reading it's pending value, because if pending is set
+ * to NOTHING_PENDING, we update the pending value such that subsequent calls to
+ * this function from other context(s) fail with PVRSRV_ERROR_NOT_READY */
+ OSLockAcquire (psTmp->hStreamWLock);
+
+ /* Get a local copy of the stream buffer parameters */
+ ui32LRead = psTmp->ui32Read;
+ ui32LWrite = psTmp->ui32Write;
+ ui32LPending = psTmp->ui32Pending;
+
+ /* Multiple pending reserves are not supported. */
+ if ( NOTHING_PENDING != ui32LPending )
+ {
+ OSLockRelease (psTmp->hStreamWLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_NOT_READY);
+ }
+
+ if ( psTmp->ui32MaxPacketSize < lReqSizeAligned )
+ {
+ PVR_DPF((PVR_DBG_ERROR, "Requested Size: %u > TL Max Packet size: %u", lReqSizeAligned, psTmp->ui32MaxPacketSize));
+ psTmp->ui32Pending = NOTHING_PENDING;
+ if (pui32AvSpace)
+ {
+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+ if (*pui32AvSpace == 0 && psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ *pui32AvSpace = psTmp->ui32MaxPacketSize;
+ PVR_DPF((PVR_DBG_MESSAGE, "Opmode is Drop_Oldest, so Available Space changed to: %u\n", *pui32AvSpace));
+ }
+ }
+ OSLockRelease (psTmp->hStreamWLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED);
+ }
+
+ /* Prevent other threads from entering this region before we are done updating
+ * the pending value and write offset (incase of padding). This is not exactly
+ * a lock but a signal for other contexts that there is a TLStreamCommit operation
+ * pending on this stream */
+ psTmp->ui32Pending = 0;
+
+ OSLockRelease (psTmp->hStreamWLock);
+
+ /* If there is enough contiguous space following the current Write
+ * position then no padding is required */
+ if ( psTmp->ui32Size
+ < ui32LWrite + lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) )
+ {
+ pad = psTmp->ui32Size - ui32LWrite;
+ }
+ else
+ {
+ pad = 0;
+ }
+
+ lReqSizeActual = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR) + pad;
+ iFreeSpace = cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size);
+
+ if (iFreeSpace < (IMG_INT) lReqSizeActual)
+ {
+ /* If this is a blocking reserve and there is not enough space then wait. */
+ if (psTmp->eOpMode == TL_OPMODE_BLOCK)
+ {
+ while ( ( cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+ <(IMG_INT) lReqSizeActual ) )
+ {
+ /* The TL bridge is lockless now, so changing to OSEventObjectWait() */
+ OSEventObjectWait(psTmp->hProducerEvent);
+ // update local copies.
+ ui32LRead = psTmp->ui32Read;
+ ui32LWrite = psTmp->ui32Write;
+ }
+ }
+ /* Data overwriting, also insert PACKETS_DROPPED flag into existing packet */
+ else if (psTmp->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ OSLockAcquire(psTmp->hReadLock);
+
+ while(psTmp->bReadPending)
+ {
+ PVR_DPF((PVR_DBG_MESSAGE, "Waiting for the pending read operation to complete."));
+ OSLockRelease(psTmp->hReadLock);
+#if defined(TL_BUFFER_STATS)
+ psTmp->ui32CntWriteWaits++;
+#endif
+ eError = OSEventObjectWaitTimeout(psTmp->hProducerEvent, READ_PENDING_TIMEOUT_US);
+ OSLockAcquire(psTmp->hReadLock);
+ }
+
+#if defined(TL_BUFFER_STATS)
+ psTmp->ui32CntWriteSuccesses++;
+#endif
+ ui32LRead = psTmp->ui32Read;
+
+ if (cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size)
+ < (IMG_INT) lReqSizeActual)
+ {
+ ui32CreateFreeSpace = 5 * (psTmp->ui32Size / 100);
+ if (ui32CreateFreeSpace < lReqSizeActual)
+ {
+ ui32CreateFreeSpace = lReqSizeActual;
+ }
+
+ while(ui32CreateFreeSpace > (IMG_UINT32)cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+ {
+ pui8IncrRead = &psTmp->pbyBuffer[ui32LRead];
+ ui32LRead += (sizeof(PVRSRVTL_PACKETHDR) + PVRSRVTL_ALIGN( GET_PACKET_DATA_LEN(pui8IncrRead) ));
+
+ /* Check if buffer needs to wrap */
+ if (ui32LRead >= psTmp->ui32Size)
+ {
+ ui32LRead = 0;
+ }
+ }
+ psTmp->ui32Read = ui32LRead;
+ pui8IncrRead = &psTmp->pbyBuffer[psTmp->ui32Read];
+
+ pHdr = GET_PACKET_HDR(pui8IncrRead);
+ DoTLSetPacketHeader(pHdr, SET_PACKETS_DROPPED(pHdr));
+ }
+ /* else fall through as there is enough space now to write the data */
+
+ OSLockRelease(psTmp->hReadLock);
+ }
+ /* No data overwriting, insert write_failed flag and return */
+ else if (psTmp->eOpMode == TL_OPMODE_DROP_NEWER)
+ {
+ /* Caller should not try to use ppui8Data,
+ * NULLify to give user a chance of avoiding memory corruption */
+ *ppui8Data = NULL;
+
+ /* This flag should not be inserted two consecutive times, so
+ * check the last ui32 in case it was a packet drop packet. */
+ pui32Buf = ui32LWrite
+ ?
+ (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite - sizeof(PVRSRVTL_PACKETHDR)]
+ : // Previous four bytes are not guaranteed to be a packet header...
+ (IMG_UINT32*)&psTmp->pbyBuffer[psTmp->ui32Size - PVRSRVTL_PACKET_ALIGNMENT];
+
+ pHdr = GET_PACKET_HDR(pui32Buf);
+ if ( PVRSRVTL_PACKETTYPE_MOST_RECENT_WRITE_FAILED
+ !=
+ GET_PACKET_TYPE( pHdr ) )
+ {
+ /* Insert size-stamped packet header */
+ pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+ pHdr = GET_PACKET_HDR(pui32Buf);
+ DoTLSetPacketHeader(pHdr, PVRSRVTL_SET_PACKET_WRITE_FAILED);
+ ui32LWrite += sizeof(PVRSRVTL_PACKETHDR);
+ ui32LWrite %= psTmp->ui32Size;
+ iFreeSpace -= sizeof(PVRSRVTL_PACKETHDR);
+ }
+
+ OSLockAcquire (psTmp->hStreamWLock);
+ psTmp->ui32Write = ui32LWrite;
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamWLock);
+
+ if (pui32AvSpace)
+ {
+ *pui32AvSpace = suggestAllocSize(ui32LRead, ui32LWrite, psTmp->ui32Size, ui32ReqSizeMin, psTmp->ui32MaxPacketSize);
+ }
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_FULL);
+ }
+ }
+
+ /* The easy case: buffer has enough space to hold the requested packet (data + header) */
+ if ( (cbSpaceLeft(ui32LRead, ui32LWrite, psTmp->ui32Size))
+ >= (IMG_INT) lReqSizeActual )
+ {
+ if ( pad )
+ {
+ /* Inserting padding packet. */
+ pui32Buf = (IMG_UINT32*)&psTmp->pbyBuffer[ui32LWrite];
+ pHdr = GET_PACKET_HDR(pui32Buf);
+ DoTLSetPacketHeader(pHdr,
+ PVRSRVTL_SET_PACKET_PADDING(pad-sizeof(PVRSRVTL_PACKETHDR)));
+
+ /* CAUTION: the used pad value should always result in a properly
+ * aligned ui32LWrite pointer, which in this case is 0 */
+ ui32LWrite = (ui32LWrite + pad) % psTmp->ui32Size;
+ /* Detect unaligned pad value */
+ PVR_ASSERT( ui32LWrite == 0);
+ }
+ /* Insert size-stamped packet header */
+ pui32Buf = (IMG_UINT32*) &psTmp->pbyBuffer[ui32LWrite];
+
+ pHdr = GET_PACKET_HDR(pui32Buf);
+ DoTLSetPacketHeader(pHdr,
+ PVRSRVTL_SET_PACKET_HDR(ui32ReqSize, ePacketType));
+
+ /* return the next position in the buffer to the user */
+ *ppui8Data = &psTmp->pbyBuffer[ ui32LWrite+sizeof(PVRSRVTL_PACKETHDR) ];
+
+ /* update pending offset: size stamp + data */
+ ui32LPending = lReqSizeAligned + sizeof(PVRSRVTL_PACKETHDR);
+ }
+ else
+ {
+ OSLockAcquire (psTmp->hStreamWLock);
+ psTmp->ui32Pending = NOTHING_PENDING;
+ OSLockRelease (psTmp->hStreamWLock);
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_ERROR);
+ }
+
+ /* Acquire stream lock for updating stream parameters */
+ OSLockAcquire (psTmp->hStreamWLock);
+ psTmp->ui32Write = ui32LWrite;
+ psTmp->ui32Pending = ui32LPending;
+ OSLockRelease (psTmp->hStreamWLock);
+
+#if defined(TL_BUFFER_STATS)
+ psTmp->ui32CntNumWriteSuccess++;
+#endif
+
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size)
+{
+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32Size, PVRSRVTL_PACKETTYPE_DATA, NULL);
+}
+
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available)
+{
+ return DoTLStreamReserve(hStream, ppui8Data, ui32Size, ui32SizeMin, PVRSRVTL_PACKETTYPE_DATA, pui32Available);
+}
+
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream, IMG_UINT32 ui32ReqSize)
+{
+ PTL_STREAM psTmp;
+ IMG_UINT32 ui32LRead, ui32OldWrite, ui32LWrite, ui32LPending;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)hStream;
+
+ /* Get a local copy of the stream buffer parameters */
+ ui32LRead = psTmp->ui32Read;
+ ui32LWrite = psTmp->ui32Write;
+ ui32LPending = psTmp->ui32Pending;
+
+ ui32OldWrite = ui32LWrite;
+
+ // Space in buffer is aligned
+ ui32ReqSize = PVRSRVTL_ALIGN(ui32ReqSize) + sizeof(PVRSRVTL_PACKETHDR);
+
+ /* Check pending reserver and ReqSize + packet header size. */
+ if ((ui32LPending == NOTHING_PENDING) || (ui32ReqSize > ui32LPending))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_STREAM_MISUSE);
+ }
+
+ /* Update pointer to written data. */
+ ui32LWrite = (ui32LWrite + ui32ReqSize) % psTmp->ui32Size;
+
+ /* and reset LPending to 0 since data are now submitted */
+ ui32LPending = NOTHING_PENDING;
+
+ /* Calculate high water mark for debug purposes */
+#if defined(TL_BUFFER_STATS)
+ {
+ IMG_UINT32 tmp = 0;
+ if (ui32LWrite > ui32LRead)
+ {
+ tmp = (ui32LWrite-ui32LRead);
+ }
+ else if (ui32LWrite < ui32LRead)
+ {
+ tmp = (psTmp->ui32Size-ui32LRead+ui32LWrite);
+ } /* else equal, ignore */
+
+ if (tmp > psTmp->ui32BufferUt)
+ {
+ psTmp->ui32BufferUt = tmp;
+ }
+ }
+#endif
+
+ /* Memory barrier required to ensure prior data written by writer is
+ * flushed from WC buffer to main memory. */
+ OSWriteMemoryBarrier();
+
+ /* Acquire stream lock to ensure other context(s) (if any)
+ * wait on the lock (in DoTLStreamReserve) for consistent values
+ * of write offset and pending value */
+ OSLockAcquire (psTmp->hStreamWLock);
+
+ /* Update stream buffer parameters to match local copies */
+ psTmp->ui32Write = ui32LWrite;
+ psTmp->ui32Pending = ui32LPending;
+
+ OSLockRelease (psTmp->hStreamWLock);
+
+ /* If we have transitioned from an empty buffer to a non-empty buffer,
+ * signal any consumers that may be waiting */
+ if (ui32OldWrite == ui32LRead && !psTmp->bNoSignalOnCommit)
+ {
+ /* Signal consumers that may be waiting */
+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ }
+ PVR_DPF_RETURN_OK;
+}
+
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream, IMG_UINT8 *pui8Src, IMG_UINT32 ui32Size)
+{
+ IMG_BYTE *pbyDest = NULL;
+ PVRSRV_ERROR eError;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == hStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eError = TLStreamReserve(hStream, &pbyDest, ui32Size);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ else
+ {
+ OSDeviceMemCopy((void*)pbyDest, (void*)pui8Src, ui32Size);
+ eError = TLStreamCommit(hStream, ui32Size);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+ }
+
+ PVR_DPF_RETURN_OK;
+}
+
+void TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo)
+{
+ IMG_DEVMEM_SIZE_T actual_req_size;
+ IMG_DEVMEM_ALIGN_T align = 4; /* Low dummy value so the real value can be obtained */
+
+ actual_req_size = 2;
+ DevmemExportalignAdjustSizeAndAlign(OSGetPageShift(), &actual_req_size, &align);
+
+ psInfo->headerSize = sizeof(PVRSRVTL_PACKETHDR);
+ psInfo->minReservationSize = sizeof(IMG_UINT32);
+ psInfo->pageSize = (IMG_UINT32)(actual_req_size);
+ psInfo->pageAlign = (IMG_UINT32)(align);
+ psInfo->maxTLpacketSize = ((PTL_STREAM)hStream)->ui32MaxPacketSize;
+}
+
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE psStream)
+{
+ PVRSRV_ERROR eError;
+ IMG_UINT8* pData;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == psStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ eError = DoTLStreamReserve(psStream, &pData, 0, 0, PVRSRVTL_PACKETTYPE_MARKER_EOS, NULL);
+ if ( PVRSRV_OK != eError )
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ PVR_DPF_RETURN_RC(TLStreamCommit(psStream, 0));
+}
+
+
+static PVRSRV_ERROR
+_TLStreamMarkOC(IMG_HANDLE hStream, PVRSRVTL_PACKETTYPE ePacketType)
+{
+ PVRSRV_ERROR eError;
+ PTL_STREAM psStream = hStream;
+ IMG_UINT32 ui32Size;
+ IMG_UINT8 *pData;
+
+ PVR_DPF_ENTERED;
+
+ if (NULL == psStream)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+
+ if (NULL == psStream->psNotifStream)
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_NOTIF_STREAM);
+ }
+
+ ui32Size = OSStringLength(psStream->szName) + 1;
+
+ eError = DoTLStreamReserve(psStream->psNotifStream, &pData, ui32Size,
+ ui32Size, ePacketType, NULL);
+ if ( PVRSRV_OK != eError)
+ {
+ PVR_DPF_RETURN_RC(eError);
+ }
+
+ OSDeviceMemCopy(pData, psStream->szName, ui32Size);
+
+ PVR_DPF_RETURN_RC(TLStreamCommit(psStream->psNotifStream, ui32Size));
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE psStream)
+{
+ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_OPEN_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE psStream)
+{
+ return _TLStreamMarkOC(psStream, PVRSRVTL_PACKETTYPE_STREAM_CLOSE_FOR_WRITE);
+}
+
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE psStream)
+{
+ PVRSRV_ERROR eError = PVRSRV_OK;
+ PTL_STREAM psTmp;
+
+ PVR_DPF_ENTERED;
+
+ if ( NULL == psStream )
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_PARAMS);
+ }
+ psTmp = (PTL_STREAM)psStream;
+
+ /* If read client exists and has opened stream in blocking mode,
+ * signal when data is available to read. */
+ if (psTmp->psNode->psRDesc &&
+ (!(psTmp->psNode->psRDesc->ui32Flags & PVRSRV_STREAM_FLAG_ACQUIRE_NONBLOCKING)) &&
+ psTmp->ui32Read != psTmp->ui32Write)
+ {
+ eError = OSEventObjectSignal(psTmp->psNode->hReadEventObj);
+ }
+
+ PVR_DPF_RETURN_RC(eError);
+}
+
+IMG_BOOL
+TLStreamOutOfData(IMG_HANDLE hStream)
+{
+ PTL_STREAM psTmp;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(hStream);
+ psTmp = (PTL_STREAM)hStream;
+
+ /* If both pointers are equal then the buffer is empty */
+ PVR_DPF_RETURN_VAL( psTmp->ui32Read == psTmp->ui32Write );
+}
+
+/*
+ * Internal stream APIs to server part of Transport Layer, declared in
+ * header tlintern.h. Direct pointers to stream objects are used here as
+ * these functions are internal.
+ */
+IMG_UINT32
+TLStreamAcquireReadPos(PTL_STREAM psStream,
+ IMG_BOOL bDisableCallback,
+ IMG_UINT32* puiReadOffset)
+{
+ IMG_UINT32 uiReadLen = 0;
+ IMG_UINT32 ui32LRead, ui32LWrite;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+ PVR_ASSERT(puiReadOffset);
+
+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ if (!OSTryLockAcquire(psStream->hReadLock))
+ {
+ PVR_DPF((PVR_DBG_WARNING, "Read lock on the stream is acquired by some writer, "
+ "hence reader failed to acquire read lock."));
+#if defined(TL_BUFFER_STATS)
+ psStream->ui32CntReadFails++;
+#endif
+ PVR_DPF_RETURN_VAL(0);
+ }
+ }
+
+#if defined(TL_BUFFER_STATS)
+ psStream->ui32CntReadSuccesses++;
+#endif
+
+ /* Grab a local copy */
+ ui32LRead = psStream->ui32Read;
+ ui32LWrite = psStream->ui32Write;
+
+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ psStream->bReadPending = IMG_TRUE;
+ OSLockRelease(psStream->hReadLock);
+ }
+
+ /* No data available and CB defined - try and get data */
+ if ((ui32LRead == ui32LWrite) && psStream->pfProducerCallback && !bDisableCallback)
+ {
+ PVRSRV_ERROR eRc;
+ IMG_UINT32 ui32Resp = 0;
+
+ eRc = ((TL_STREAM_SOURCECB)psStream->pfProducerCallback)(psStream, TL_SOURCECB_OP_CLIENT_EOS,
+ &ui32Resp, psStream->pvProducerUserData);
+ PVR_LOG_IF_ERROR(eRc, "TLStream->pfProducerCallback");
+
+ ui32LWrite = psStream->ui32Write;
+ }
+
+ /* No data available... */
+ if (ui32LRead == ui32LWrite)
+ {
+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ psStream->bReadPending = IMG_FALSE;
+ }
+ PVR_DPF_RETURN_VAL(0);
+ }
+
+ /* Data is available to read... */
+ *puiReadOffset = ui32LRead;
+
+ /*PVR_DPF((PVR_DBG_VERBOSE,
+ * "TLStreamAcquireReadPos Start before: Write:%d, Read:%d, size:%d",
+ * ui32LWrite, ui32LRead, psStream->ui32Size));
+ */
+
+ if ( ui32LRead > ui32LWrite )
+ { /* CB has wrapped around.
+ * Return the first contiguous piece of memory, ie [ReadLen,EndOfBuffer]
+ * and let a subsequent AcquireReadPos read the rest of the Buffer */
+ /*PVR_DPF((PVR_DBG_VERBOSE, "TLStreamAcquireReadPos buffer has wrapped"));*/
+ uiReadLen = psStream->ui32Size - ui32LRead;
+ }
+ else
+ { // CB has not wrapped
+ uiReadLen = ui32LWrite - ui32LRead;
+ }
+
+ PVR_DPF_RETURN_VAL(uiReadLen);
+}
+
+PVRSRV_ERROR
+TLStreamAdvanceReadPos(PTL_STREAM psStream,
+ IMG_UINT32 uiReadLen,
+ IMG_UINT32 uiOrigReadLen)
+{
+ IMG_UINT32 uiNewReadPos;
+
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ /*
+ * This API does not use Read lock as 'bReadPending' is sufficient
+ * to keep Read index safe by preventing a write from updating the
+ * index and 'bReadPending' itself is safe as it can only be modified
+ * by readers and there can be only one reader in action at a time.
+ */
+
+ /* Update the read offset by the length provided in a circular manner.
+ * Assuming the update to be atomic hence, avoiding use of locks
+ */
+ uiNewReadPos = (psStream->ui32Read + uiReadLen) % psStream->ui32Size;
+
+ /* Must validate length is on a packet boundary, for
+ * TLReleaseDataLess calls.
+ */
+ if (uiReadLen != uiOrigReadLen) /* buffer not empty */
+ {
+ PVRSRVTL_PPACKETHDR psHdr = GET_PACKET_HDR(psStream->pbyBuffer+uiNewReadPos);
+ PVRSRVTL_PACKETTYPE eType = GET_PACKET_TYPE(psHdr);
+
+ if ((psHdr->uiReserved != PVRSRVTL_PACKETHDR_RESERVED) ||
+ (eType == PVRSRVTL_PACKETTYPE_UNDEF) ||
+ (eType >= PVRSRVTL_PACKETTYPE_LAST))
+ {
+ PVR_DPF_RETURN_RC(PVRSRV_ERROR_INVALID_ALIGNMENT);
+ }
+ /* else OK, on a packet boundary */
+ }
+ /* else no check needed */
+
+ psStream->ui32Read = uiNewReadPos;
+
+ if (psStream->eOpMode == TL_OPMODE_DROP_OLDEST)
+ {
+ psStream->bReadPending = IMG_FALSE;
+ }
+
+ /* notify reserves that may be pending */
+ /* The producer event object is used to signal the StreamReserve if the TL
+ * Buffer is in blocking mode and is full.
+ * Previously this event was only signalled if the buffer was created in
+ * blocking mode. Since the buffer mode can now change dynamically the event
+ * is signalled every time to avoid any potential race where the signal is
+ * required, but not produced.
+ */
+ {
+ PVRSRV_ERROR eError;
+ eError = OSEventObjectSignal(psStream->hProducerEventObj);
+ if ( eError != PVRSRV_OK)
+ {
+ PVR_DPF((PVR_DBG_WARNING,
+ "Error in TLStreamAdvanceReadPos: OSEventObjectSignal returned:%u",
+ eError));
+ /* We've failed to notify the producer event. This means there may
+ * be a delay in generating more data to be consumed until the next
+ * Write() generating action occurs.
+ */
+ }
+ }
+
+ PVR_DPF((PVR_DBG_VERBOSE,
+ "TLStreamAdvanceReadPos Read now at: %d",
+ psStream->ui32Read));
+ PVR_DPF_RETURN_OK;
+}
+
+void
+TLStreamDestroy (PTL_STREAM psStream)
+{
+ PVR_ASSERT (psStream);
+
+ OSLockDestroy (psStream->hStreamWLock);
+ OSLockDestroy (psStream->hReadLock);
+
+ OSEventObjectClose(psStream->hProducerEvent);
+ OSEventObjectDestroy(psStream->hProducerEventObj);
+
+ TLFreeSharedMem(psStream);
+ OSFreeMem(psStream);
+}
+
+DEVMEM_MEMDESC*
+TLStreamGetBufferPointer(PTL_STREAM psStream)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ PVR_DPF_RETURN_VAL(psStream->psStreamMemDesc);
+}
+
+IMG_BOOL
+TLStreamEOS(PTL_STREAM psStream)
+{
+ PVR_DPF_ENTERED;
+
+ PVR_ASSERT(psStream);
+
+ /* If both pointers are equal then the buffer is empty */
+ PVR_DPF_RETURN_VAL( psStream->ui32Read == psStream->ui32Write );
+}
diff --git a/drivers/gpu/drm/img-rogue/1.10/tlstream.h b/drivers/gpu/drm/img-rogue/1.10/tlstream.h
new file mode 100644
index 00000000000000..b398bf6fac2f5c
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/tlstream.h
@@ -0,0 +1,500 @@
+/*************************************************************************/ /*!
+@File
+@Title Transport Layer kernel side API.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description TL provides driver components with a way to copy data from kernel
+ space to user space (e.g. screen/file).
+
+ Data can be passed to the Transport Layer through the
+ TL Stream (kernel space) API interface.
+
+ The buffer provided to every stream is a modified version of a
+ circular buffer. Which CB version is created is specified by
+ relevant flags when creating a stream. Currently two types
+ of buffer are available:
+ - TL_OPMODE_DROP_NEWER:
+ When the buffer is full, incoming data are dropped
+ (instead of overwriting older data) and a marker is set
+ to let the user know that data have been lost.
+ - TL_OPMODE_BLOCK:
+ When the circular buffer is full, reserve/write calls block
+ until enough space is freed.
+ - TL_OPMODE_DROP_OLDEST:
+ When the circular buffer is full, the oldest packets in the
+ buffer are dropped and a flag is set in header of next packet
+ to let the user know that data have been lost.
+
+ All size/space requests are in bytes. However, the actual
+ implementation uses native word sizes (i.e. 4 byte aligned).
+
+ The user does not need to provide space for the stream buffer
+ as the TL handles memory allocations and usage.
+
+ Inserting data to a stream's buffer can be done either:
+ - by using TLReserve/TLCommit: User is provided with a buffer
+ to write data to.
+ - or by using TLWrite: User provides a buffer with
+ data to be committed. The TL
+ copies the data from the
+ buffer into the stream buffer
+ and returns.
+ Users should be aware that there are implementation overheads
+ associated with every stream buffer. If you find that less
+ data are captured than expected then try increasing the
+ stream buffer size or use TLInfo to obtain buffer parameters
+ and calculate optimum required values at run time.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+#ifndef __TLSTREAM_H__
+#define __TLSTREAM_H__
+
+
+#include "img_types.h"
+#include "pvrsrv_error.h"
+#include "pvrsrv_tlcommon.h"
+#include "device.h"
+
+/*! Extract TL stream opmode from the given stream create flags.
+ * Last 3 bits of streamFlag is used for storing opmode, hence
+ * opmode mask is set as following. */
+#define TL_OPMODE_MASK 0x7
+
+/*
+ * NOTE: This enum is used to directly access the HTB_OPMODE_xxx values
+ * within htbserver.c.
+ * As such we *MUST* keep the values matching in order of declaration.
+ */
+/*! Opmode specifying circular buffer behaviour */
+typedef enum
+{
+ /*! Undefined operation mode */
+ TL_OPMODE_UNDEF = 0,
+
+ /*! Reject new data if the buffer is full, producer may then decide to
+ * drop the data or retry after some time. */
+ TL_OPMODE_DROP_NEWER,
+
+ /*! When buffer is full, advance the tail/read position to accept the new
+ * reserve call (size permitting), effectively overwriting the oldest
+ * data in the circular buffer. */
+ TL_OPMODE_DROP_OLDEST,
+
+ /*! Block Reserve (subsequently Write) calls if there is not enough space
+ * until some space is freed via a client read operation. */
+ TL_OPMODE_BLOCK,
+
+ /*!< For error checking */
+ TL_OPMODE_LAST
+
+} TL_OPMODE;
+
+static_assert(TL_OPMODE_LAST <= TL_OPMODE_MASK,
+ "TL_OPMODE_LAST must not exceed TL_OPMODE_MASK");
+
+/*! Flags specifying stream behaviour */
+/*! Do not destroy stream if there still are data that have not been
+ * copied in user space. Block until the stream is emptied. */
+#define TL_FLAG_FORCE_FLUSH (1U<<8)
+/*! Do not signal consumers on commit automatically when the stream buffer
+ * transitions from empty to non-empty. Producer responsible for signal when
+ * it chooses. */
+#define TL_FLAG_NO_SIGNAL_ON_COMMIT (1U<<9)
+
+/*! Defer allocation of stream's shared memory until first open. */
+#define TL_FLAG_ALLOCATE_ON_FIRST_OPEN (1U<<10)
+
+/*! Structure used to pass internal TL stream sizes information to users.*/
+typedef struct _TL_STREAM_INFO_
+{
+ IMG_UINT32 headerSize; /*!< Packet header size in bytes */
+ IMG_UINT32 minReservationSize; /*!< Minimum data size reserved in bytes */
+ IMG_UINT32 pageSize; /*!< Page size in bytes */
+ IMG_UINT32 pageAlign; /*!< Page alignment in bytes */
+ IMG_UINT32 maxTLpacketSize; /*! Max allowed TL packet size*/
+} TL_STREAM_INFO, *PTL_STREAM_INFO;
+
+/*! Callback operations or notifications that a stream producer may handle
+ * when requested by the Transport Layer.
+ */
+#define TL_SOURCECB_OP_CLIENT_EOS 0x01 /*!< Client has reached end of stream,
+ * can anymore data be supplied?
+ * ui32Resp ignored in this operation */
+
+/*! Function pointer type for the callback handler into the "producer" code
+ * that writes data to the TL stream. Producer should handle the notification
+ * or operation supplied in ui32ReqOp on stream hStream. The
+ * Operations and notifications are defined above in TL_SOURCECB_OP */
+typedef PVRSRV_ERROR (*TL_STREAM_SOURCECB)(IMG_HANDLE hStream,
+ IMG_UINT32 ui32ReqOp, IMG_UINT32* ui32Resp, void* pvUser);
+
+typedef void (*TL_STREAM_ONREADEROPENCB)(void *pvArg);
+
+/*************************************************************************/ /*!
+ @Function TLAllocSharedMem
+ @Description Allocates shared memory for the stream.
+ @Input phStream Stream handle.
+ @Return eError Internal services call returned eError error
+ number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLAllocSharedMemIfNull(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLFreeSharedMem
+ @Description Frees stream's shared memory.
+ @Input phStream Stream handle.
+*/ /**************************************************************************/
+void
+TLFreeSharedMem(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamCreate
+ @Description Request the creation of a new stream and open a handle.
+ If creating a stream which should continue to exist after the
+ current context is finished, then TLStreamCreate must be
+ followed by a TLStreamOpen call. On any case, the number of
+ create/open calls must balance with the number of close calls
+ used. This ensures the resources of a stream are released when
+ it is no longer required.
+ @Output phStream Pointer to handle to store the new stream.
+ @Input psDevNode Pointer to the Device Node to be used for
+ stream allocation.
+ @Input szStreamName Name of stream, maximum length:
+ PRVSRVTL_MAX_STREAM_NAME_SIZE.
+ If a longer string is provided,creation fails.
+ @Input ui32Size Desired buffer size in bytes.
+ @Input ui32StreamFlags Flags that configure buffer behaviour.See above.
+ @Input pfOnReaderOpenCB Optional callback called when a client opens
+ this stream, may be null.
+ @Input pvOnReaderOpenUD Optional user data for pfOnReaderOpenCB, may
+ be null.
+ @Input pfProducerCB Optional callback, may be null.
+ @Input pvProducerUD Optional user data for callback, may be null.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or string name
+ exceeded MAX_STREAM_NAME_SIZE
+ @Return PVRSRV_ERROR_OUT_OF_MEMORY Failed to allocate space for stream
+ handle.
+ @Return PVRSRV_ERROR_DUPLICATE_VALUE There already exists a stream with
+ the same stream name string.
+ @Return eError Internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCreate(IMG_HANDLE *phStream,
+ PVRSRV_DEVICE_NODE *psDevNode,
+ IMG_CHAR *szStreamName,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32StreamFlags,
+ TL_STREAM_ONREADEROPENCB pfOnReaderOpenCB,
+ void *pvOnReaderOpenUD,
+ TL_STREAM_SOURCECB pfProducerCB,
+ void *pvProducerUD);
+
+/*************************************************************************/ /*!
+ @Function TLStreamOpen
+ @Description Attach to existing stream that has already been created by a
+ TLStreamCreate call. A handle is returned to the stream.
+ @Output phStream Pointer to handle to store the stream.
+ @Input szStreamName Name of stream, should match an already
+ existing stream name
+ @Return PVRSRV_ERROR_NOT_FOUND None of the streams matched the
+ requested stream name.
+ PVRSRV_ERROR_INVALID_PARAMS non NULL pointer to stream
+ handler is required.
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamOpen(IMG_HANDLE *phStream,
+ IMG_CHAR *szStreamName);
+
+
+/*************************************************************************/ /*!
+ @Function TLStreamReset
+ @Description Resets read and write pointers and pending flag.
+ @Output phStream Pointer to stream's handle
+*/ /**************************************************************************/
+void TLStreamReset(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamOpen
+ @Description Registers a "notification stream" which will be used to publish
+ information about state change of the "hStream" stream.
+ Notification can inform about events such as stream open/close,
+ etc.
+ @Input hStream Handle to stream to update.
+ @Input hNotifStream Handle to the stream which will be used for
+ publishing notifications.
+ @Return PVRSRV_ERROR_INVALID_PARAMS if either of the parameters is
+ NULL
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSetNotifStream(IMG_HANDLE hStream, IMG_HANDLE hNotifStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReconfigure
+ @Description Request the stream flags controlling buffer behaviour to
+ be updated.
+ In the case where TL_OPMODE_BLOCK is to be used,
+ TLStreamCreate should be called without that flag and this
+ function used to change the stream mode once a consumer process
+ has been started. This avoids a deadlock scenario where the
+ TLStreaWrite/TLStreamReserve call will hold the Bridge Lock
+ while blocking if the TL buffer is full.
+ The TL_OPMODE_BLOCK should never drop the Bridge Lock
+ as this leads to another deadlock scenario where the caller to
+ TLStreamWrite/TLStreamReserve has already acquired another lock
+ (eg. gHandleLock) which is not dropped. This then leads to that
+ thead acquiring locks out of order.
+ @Input hStream Handle to stream to update.
+ @Input ui32StreamFlags Flags that configure buffer behaviour. See above.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle or inconsistent
+ stream flags.
+ @Return PVRSRV_ERROR_NOT_READY Stream is currently being written to
+ try again later.
+ @Return eError Internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReconfigure(
+ IMG_HANDLE hStream,
+ IMG_UINT32 ui32StreamFlags);
+
+/*************************************************************************/ /*!
+ @Function TLStreamClose
+ @Description Detach from the stream associated with the given handle. If
+ the current handle is the last one accessing the stream
+ (i.e. the number of TLStreamCreate+TLStreamOpen calls matches
+ the number of TLStreamClose calls) then the stream is also
+ deleted.
+ On return the handle is no longer valid.
+ @Input hStream Handle to stream that will be closed.
+ @Return None.
+*/ /**************************************************************************/
+void
+TLStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReserve
+ @Description Reserve space in stream buffer. When successful every
+ TLStreamReserve call must be followed by a matching
+ TLStreamCommit call. While a TLStreamCommit call is pending
+ for a stream, subsequent TLStreamReserve calls for this
+ stream will fail.
+ @Input hStream Stream handle.
+ @Output ppui8Data Pointer to a pointer to a location in the
+ buffer. The caller can then use this address
+ in writing data into the stream.
+ @Input ui32Size Number of bytes to reserve in buffer.
+ @Return PVRSRV_INVALID_PARAMS NULL stream handler.
+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved
+ that are pending to be committed.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to
+ reserve more space than the
+ buffer size.
+ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested
+ is larger than the free
+ space.
+ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size
+ requested is larger
+ than max TL packet size
+ @Return PVRSRV_OK Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamReserve2
+ @Description Reserve space in stream buffer. When successful every
+ TLStreamReserve call must be followed by a matching
+ TLStreamCommit call. While a TLStreamCommit call is pending
+ for a stream, subsequent TLStreamReserve calls for this
+ stream will fail.
+ @Input hStream Stream handle.
+ @Output ppui8Data Pointer to a pointer to a location in the
+ buffer. The caller can then use this address
+ in writing data into the stream.
+ @Input ui32Size Ideal number of bytes to reserve in buffer.
+ @Input ui32SizeMin Minimum number of bytes to reserve in buffer.
+ @Input pui32Available Optional, but when present and the
+ RESERVE_TOO_BIG error is returned, a size
+ suggestion is returned in this argument which
+ the caller can attempt to reserve again for a
+ successful allocation.
+ @Return PVRSRV_INVALID_PARAMS NULL stream handler.
+ @Return PVRSRV_ERROR_NOT_READY There are data previously reserved
+ that are pending to be committed.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Misusing the stream by trying to
+ reserve more space than the
+ buffer size.
+ @Return PVRSRV_ERROR_STREAM_FULL The reserve size requested
+ is larger than the free
+ space.
+ Check the pui32Available
+ value for the correct
+ reserve size to use.
+ @Return PVRSRV_ERROR_TLPACKET_SIZE_LIMIT_EXCEEDED The reserve size
+ requested is larger
+ than max TL packet size
+ @Return PVRSRV_OK Success, output arguments valid.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamReserve2(IMG_HANDLE hStream,
+ IMG_UINT8 **ppui8Data,
+ IMG_UINT32 ui32Size,
+ IMG_UINT32 ui32SizeMin,
+ IMG_UINT32* pui32Available);
+
+/*************************************************************************/ /*!
+ @Function TLStreamCommit
+ @Description Notify TL that data have been written in the stream buffer.
+ Should always follow and match TLStreamReserve call.
+ @Input hStream Stream handle.
+ @Input ui32Size Number of bytes that have been added to the
+ stream.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle.
+ @Return PVRSRV_ERROR_STREAM_MISUSE Commit results in more data
+ committed than the buffer size,
+ the stream is misused.
+ @Return eError Commit was successful but
+ internal services call returned
+ eError error number.
+ @Return PVRSRV_OK
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamCommit(IMG_HANDLE hStream,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamWrite
+ @Description Combined Reserve/Commit call. This function Reserves space in
+ the specified stream buffer, copies ui32Size bytes of data
+ from the array pui8Src points to and Commits in an "atomic"
+ style operation.
+ @Input hStream Stream handle.
+ @Input pui8Src Source to read data from.
+ @Input ui32Size Number of bytes to copy and commit.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamWrite(IMG_HANDLE hStream,
+ IMG_UINT8 *pui8Src,
+ IMG_UINT32 ui32Size);
+
+/*************************************************************************/ /*!
+ @Function TLStreamSync
+ @Description Signal the consumer to start acquiring data from the stream
+ buffer. Called by producers that use the TL_FLAG_NO_SIGNAL_ON_COMMIT
+ flag to manually control when consumers starting reading the
+ stream. Used when multiple small writes need to be batched.
+ @Input hStream Stream handle.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handle.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK
+ */ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamSync(IMG_HANDLE hStream);
+
+
+/*************************************************************************/ /*!
+ @Function TLStreamMarkEOS
+ @Description Insert a EOS marker packet in the given stream.
+ @Input hStream Stream handle.
+ @Return PVRSRV_ERROR_INVALID_PARAMS NULL stream handler.
+ @Return eError Error codes returned by either
+ Reserve or Commit.
+ @Return PVRSRV_OK Success.
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkEOS(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function TLStreamMarkStreamOpen
+@Description Puts *open* stream packet into hStream's notification stream,
+ if set, error otherwise."
+@Input hStream Stream handle.
+@Return PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamOpen(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+@Function TLStreamMarkStreamClose
+@Description Puts *close* stream packet into hStream's notification stream,
+ if set, error otherwise."
+@Input hStream Stream handle.
+@Return PVRSRV_OK on success and error code on failure
+*/ /**************************************************************************/
+PVRSRV_ERROR
+TLStreamMarkStreamClose(IMG_HANDLE hStream);
+
+/*************************************************************************/ /*!
+ @Function TLStreamInfo
+ @Description Run time information about buffer elemental sizes.
+ It sets psInfo members accordingly. Users can use those values
+ to calculate the parameters they use in TLStreamCreate and
+ TLStreamReserve.
+ @Output psInfo pointer to stream info structure.
+ @Return None.
+*/ /**************************************************************************/
+void
+TLStreamInfo(IMG_HANDLE hStream, PTL_STREAM_INFO psInfo);
+
+/*************************************************************************/ /*!
+ @Function TLStreamOutOfData
+ @Description Query if the stream is empty (no data waiting to be read).
+ @Input hStream Stream handle.
+ @Return IMG_BOOL True if read==write, no data waiting,
+ false otherwise
+*/ /**************************************************************************/
+IMG_BOOL TLStreamOutOfData(IMG_HANDLE hStream);
+
+#endif /* __TLSTREAM_H__ */
+/*****************************************************************************
+ End of file (tlstream.h)
+*****************************************************************************/
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/trace_events.c b/drivers/gpu/drm/img-rogue/1.10/trace_events.c
new file mode 100644
index 00000000000000..f3371b9e6e0c04
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/trace_events.c
@@ -0,0 +1,231 @@
+/*************************************************************************/ /*!
+@Title Linux trace event helper functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#include <linux/version.h>
+#include <linux/sched.h>
+
+#include "img_types.h"
+#include "trace_events.h"
+#if !defined(SUPPORT_GPUTRACE_EVENTS)
+#define CREATE_TRACE_POINTS
+#endif
+#include "rogue_trace_events.h"
+#include "sync_checkpoint_external.h"
+
+static bool fence_update_event_enabled, fence_check_event_enabled;
+
+bool trace_rogue_are_fence_updates_traced(void)
+{
+ return fence_update_event_enabled;
+}
+
+bool trace_rogue_are_fence_checks_traced(void)
+{
+ return fence_check_event_enabled;
+}
+
+/*
+ * Call backs referenced from rogue_trace_events.h. Note that these are not
+ * thread-safe, however, since running trace code when tracing is not enabled is
+ * simply a no-op, there is no harm in it.
+ */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_update_enabled_callback(void)
+#else
+void trace_fence_update_enabled_callback(void)
+#endif
+{
+ fence_update_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ return 0;
+#endif
+}
+
+void trace_fence_update_disabled_callback(void)
+{
+ fence_update_event_enabled = false;
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+int trace_fence_check_enabled_callback(void)
+#else
+void trace_fence_check_enabled_callback(void)
+#endif
+{
+ fence_check_event_enabled = true;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+ return 0;
+#endif
+}
+
+void trace_fence_check_disabled_callback(void)
+{
+ fence_check_event_enabled = false;
+}
+
+/* This is a helper that calls trace_rogue_fence_update for each fence in an
+ * array.
+ */
+void trace_rogue_fence_updates(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+ IMG_UINT i;
+ for (i = 0; i < uCount; i++)
+ {
+ trace_rogue_fence_update(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+ }
+}
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm, IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+ IMG_UINT i;
+ for (i = 0; i < uCount; i++)
+ {
+ trace_rogue_fence_check(current->comm, cmd, dm, ui32FWContext, ui32Offset,
+ pauiAddresses[i].ui32Addr, PVRSRV_SYNC_CHECKPOINT_SIGNALLED);
+ }
+}
+
+#if defined(SUPPORT_GPUTRACE_EVENTS)
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ trace_rogue_ufo_update(ui64OSTimestamp, ui32FWCtx,
+ ui32JobId,
+ puData->sUpdate.ui32FWAddr,
+ puData->sUpdate.ui32OldValue,
+ puData->sUpdate.ui32NewValue);
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sUpdate));
+ }
+}
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ if (bPrEvent)
+ {
+ trace_rogue_ufo_pr_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckSuccess.ui32FWAddr,
+ puData->sCheckSuccess.ui32Value);
+ }
+ else
+ {
+ trace_rogue_ufo_check_success(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckSuccess.ui32FWAddr,
+ puData->sCheckSuccess.ui32Value);
+ }
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sCheckSuccess));
+ }
+}
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+ IMG_UINT i;
+ for (i = 0; i < ui32UFOCount; i++)
+ {
+ if (bPrEvent)
+ {
+ trace_rogue_ufo_pr_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckFail.ui32FWAddr,
+ puData->sCheckFail.ui32Value,
+ puData->sCheckFail.ui32Required);
+ }
+ else
+ {
+ trace_rogue_ufo_check_fail(ui64OSTimestamp, ui32FWCtx, ui32JobId,
+ puData->sCheckFail.ui32FWAddr,
+ puData->sCheckFail.ui32Value,
+ puData->sCheckFail.ui32Required);
+ }
+ puData = (RGX_HWPERF_UFO_DATA_ELEMENT *) (((IMG_BYTE *) puData)
+ + sizeof(puData->sCheckFail));
+ }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
+
+int PVRGpuTraceEnableUfoCallbackWrapper(void)
+{
+ PVRGpuTraceEnableUfoCallback();
+
+ return 0;
+}
+
+int PVRGpuTraceEnableFirmwareActivityCallbackWrapper(void)
+{
+ PVRGpuTraceEnableFirmwareActivityCallback();
+
+ return 0;
+}
+#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) */
+#endif /* defined(SUPPORT_GPUTRACE_EVENTS) */
diff --git a/drivers/gpu/drm/img-rogue/1.10/trace_events.h b/drivers/gpu/drm/img-rogue/1.10/trace_events.h
new file mode 100644
index 00000000000000..840e93b6d187f9
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/trace_events.h
@@ -0,0 +1,154 @@
+/*************************************************************************/ /*!
+@Title Linux trace events and event helper functions
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#if !defined(TRACE_EVENTS_H)
+#define TRACE_EVENTS_H
+
+#include "rgx_fwif_km.h"
+#include "rgx_hwperf.h"
+
+/* We need to make these functions do nothing if CONFIG_EVENT_TRACING isn't
+ * enabled, just like the actual trace event functions that the kernel
+ * defines for us.
+ */
+#ifdef CONFIG_EVENT_TRACING
+bool trace_rogue_are_fence_checks_traced(void);
+
+bool trace_rogue_are_fence_updates_traced(void);
+
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values);
+
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values);
+
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData);
+
+#else /* CONFIG_TRACE_EVENTS */
+static inline
+bool trace_rogue_are_fence_checks_traced(void)
+{
+ return false;
+}
+
+static inline
+bool trace_rogue_are_fence_updates_traced(void)
+{
+ return false;
+}
+
+static inline
+void trace_rogue_fence_updates(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_fence_checks(const char *cmd, const char *dm,
+ IMG_UINT32 ui32FWContext,
+ IMG_UINT32 ui32Offset,
+ IMG_UINT uCount,
+ PRGXFWIF_UFO_ADDR *pauiAddresses,
+ IMG_UINT32 *paui32Values)
+{
+}
+
+static inline
+void trace_rogue_ufo_updates(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_success(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+
+static inline
+void trace_rogue_ufo_checks_fail(IMG_UINT64 ui64OSTimestamp,
+ IMG_UINT32 ui32FWCtx,
+ IMG_UINT32 ui32JobId,
+ IMG_BOOL bPrEvent,
+ IMG_UINT32 ui32UFOCount,
+ const RGX_HWPERF_UFO_DATA_ELEMENT *puData)
+{
+}
+#endif /* CONFIG_TRACE_EVENTS */
+
+#endif /* TRACE_EVENTS_H */
diff --git a/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.c b/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.c
new file mode 100644
index 00000000000000..b0c695e30f4400
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.c
@@ -0,0 +1,244 @@
+/*************************************************************************/ /*!
+@File
+@Title Provides splay-trees.
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Implementation of splay-trees.
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */ /**************************************************************************/
+
+#include "allocmem.h" /* for OSMemAlloc / OSMemFree */
+#include "osfunc.h" /* for OSMemFree */
+#include "pvr_debug.h"
+#include "uniq_key_splay_tree.h"
+
+/**
+ * This function performs a simple top down splay
+ *
+ * @param ui32Flags the flags that must splayed to the root (if possible).
+ * @param psTree The tree to splay.
+ * @return the resulting tree after the splay operation.
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_SPLAY_TREE sTmp1;
+ IMG_PSPLAY_TREE psLeft;
+ IMG_PSPLAY_TREE psRight;
+ IMG_PSPLAY_TREE psTmp2;
+
+ if (psTree == NULL)
+ {
+ return NULL;
+ }
+
+ sTmp1.psLeft = NULL;
+ sTmp1.psRight = NULL;
+
+ psLeft = &sTmp1;
+ psRight = &sTmp1;
+
+ for (;;)
+ {
+ if (ui32Flags < psTree->ui32Flags)
+ {
+ if (psTree->psLeft == NULL)
+ {
+ break;
+ }
+
+ if (ui32Flags < psTree->psLeft->ui32Flags)
+ {
+ /* if we get to this point, we need to rotate right the tree */
+ psTmp2 = psTree->psLeft;
+ psTree->psLeft = psTmp2->psRight;
+ psTmp2->psRight = psTree;
+ psTree = psTmp2;
+ if (psTree->psLeft == NULL)
+ {
+ break;
+ }
+ }
+
+ /* if we get to this point, we need to link right */
+ psRight->psLeft = psTree;
+ psRight = psTree;
+ psTree = psTree->psLeft;
+ }
+ else
+ {
+ if (ui32Flags > psTree->ui32Flags)
+ {
+ if (psTree->psRight == NULL)
+ {
+ break;
+ }
+
+ if (ui32Flags > psTree->psRight->ui32Flags)
+ {
+ /* if we get to this point, we need to rotate left the tree */
+ psTmp2 = psTree->psRight;
+ psTree->psRight = psTmp2->psLeft;
+ psTmp2->psLeft = psTree;
+ psTree = psTmp2;
+ if (psTree->psRight == NULL)
+ {
+ break;
+ }
+ }
+
+ /* if we get to this point, we need to link left */
+ psLeft->psRight = psTree;
+ psLeft = psTree;
+ psTree = psTree->psRight;
+ }
+ else
+ {
+ break;
+ }
+ }
+ }
+
+ /* at this point re-assemble the tree */
+ psLeft->psRight = psTree->psLeft;
+ psRight->psLeft = psTree->psRight;
+ psTree->psLeft = sTmp1.psRight;
+ psTree->psRight = sTmp1.psLeft;
+ return psTree;
+}
+
+
+/**
+ * This function inserts a node into the Tree (unless it is already present, in
+ * which case it is equivalent to performing only a splay operation
+ *
+ * @param ui32Flags the key of the new node
+ * @param psTree The tree into which one wants to add a new node
+ * @return The resulting with the node in it
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_PSPLAY_TREE psNew;
+
+ if (psTree != NULL)
+ {
+ psTree = PVRSRVSplay(ui32Flags, psTree);
+ if (psTree->ui32Flags == ui32Flags)
+ {
+ return psTree;
+ }
+ }
+
+ psNew = (IMG_PSPLAY_TREE) OSAllocMem(sizeof(IMG_SPLAY_TREE));
+ if (psNew == NULL)
+ {
+ PVR_DPF ((PVR_DBG_ERROR, "Error: failed to allocate memory to add a node to the splay tree."));
+ return NULL;
+ }
+
+ psNew->ui32Flags = ui32Flags;
+ OSCachedMemSet(&(psNew->buckets[0]), 0, sizeof(psNew->buckets));
+
+#if defined(PVR_CTZLL)
+ psNew->bHasEltsMapping = ~(((IMG_ELTS_MAPPINGS) 1 << (sizeof(psNew->buckets) / (sizeof(psNew->buckets[0])))) - 1);
+#endif
+
+ if (psTree == NULL)
+ {
+ psNew->psLeft = NULL;
+ psNew->psRight = NULL;
+ return psNew;
+ }
+
+ if (ui32Flags < psTree->ui32Flags)
+ {
+ psNew->psLeft = psTree->psLeft;
+ psNew->psRight = psTree;
+ psTree->psLeft = NULL;
+ }
+ else
+ {
+ psNew->psRight = psTree->psRight;
+ psNew->psLeft = psTree;
+ psTree->psRight = NULL;
+ }
+
+ return psNew;
+}
+
+
+/**
+ * Deletes a node from the tree (unless it is not there, in which case it is
+ * equivalent to a splay operation)
+ *
+ * @param ui32Flags the value of the node to remove
+ * @param psTree the tree into which the node must be removed
+ * @return the resulting tree
+ */
+IMG_INTERNAL
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree)
+{
+ IMG_PSPLAY_TREE psTmp;
+ if (psTree == NULL)
+ {
+ return NULL;
+ }
+
+ psTree = PVRSRVSplay(ui32Flags, psTree);
+ if (ui32Flags == psTree->ui32Flags)
+ {
+ /* The value was present in the tree */
+ if (psTree->psLeft == NULL)
+ {
+ psTmp = psTree->psRight;
+ }
+ else
+ {
+ psTmp = PVRSRVSplay(ui32Flags, psTree->psLeft);
+ psTmp->psRight = psTree->psRight;
+ }
+ OSFreeMem(psTree);
+ return psTmp;
+ }
+
+ /* the value was not present in the tree, so just return it as is (after the
+ * splay) */
+ return psTree;
+}
+
+
diff --git a/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.h b/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.h
new file mode 100644
index 00000000000000..945d93c382ec62
--- /dev/null
+++ b/drivers/gpu/drm/img-rogue/1.10/uniq_key_splay_tree.h
@@ -0,0 +1,86 @@
+/*************************************************************************/ /*!
+@File
+@Title Splay trees interface
+@Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
+@Description Provides debug functionality
+@License Dual MIT/GPLv2
+
+The contents of this file are subject to the MIT license as set out below.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+Alternatively, the contents of this file may be used under the terms of
+the GNU General Public License Version 2 ("GPL") in which case the provisions
+of GPL are applicable instead of those above.
+
+If you wish to allow use of your version of this file only under the terms of
+GPL, and not to allow others to use your version of this file under the terms
+of the MIT license, indicate your decision by deleting the provisions above
+and replace them with the notice and other provisions required by GPL as set
+out in the file called "GPL-COPYING" included in this distribution. If you do
+not delete the provisions above, a recipient may use your version of this file
+under the terms of either the MIT license or GPL.
+
+This License is also included in this distribution in the file called
+"MIT-COPYING".
+
+EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
+PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
+BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/ /**************************************************************************/
+
+#ifndef UNIQ_KEY_SPLAY_TREE_H_
+#define UNIQ_KEY_SPLAY_TREE_H_
+
+#include "img_types.h"
+#include "pvr_intrinsics.h"
+
+#if defined(PVR_CTZLL)
+ /* map the is_bucket_n_free to an int.
+ * This way, the driver can find the first non empty without loop
+ */
+ typedef IMG_UINT64 IMG_ELTS_MAPPINGS;
+#endif
+
+/* head of list of free boundary tags for indexed by pvr_log2 of the
+ boundary tag size */
+#define FREE_TABLE_LIMIT 40
+
+struct _BT_;
+
+typedef struct img_splay_tree
+{
+ /* left child/subtree */
+ struct img_splay_tree * psLeft;
+
+ /* right child/subtree */
+ struct img_splay_tree * psRight;
+
+ /* Flags to match on this span, used as the key. */
+ IMG_UINT32 ui32Flags;
+#if defined(PVR_CTZLL)
+ /* each bit of this int is a boolean telling if the corresponding
+ bucket is empty or not */
+ IMG_ELTS_MAPPINGS bHasEltsMapping;
+#endif
+ struct _BT_ * buckets[FREE_TABLE_LIMIT];
+} IMG_SPLAY_TREE, *IMG_PSPLAY_TREE;
+
+IMG_PSPLAY_TREE PVRSRVSplay (IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVInsert(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+IMG_PSPLAY_TREE PVRSRVDelete(IMG_UINT32 ui32Flags, IMG_PSPLAY_TREE psTree);
+
+
+#endif /* !UNIQ_KEY_SPLAY_TREE_H_ */
diff --git a/drivers/gpu/drm/img-rogue/1.9/module_common.c b/drivers/gpu/drm/img-rogue/1.9/module_common.c
index cf26fa10a2a564..926bfd1bc468ee 100644
--- a/drivers/gpu/drm/img-rogue/1.9/module_common.c
+++ b/drivers/gpu/drm/img-rogue/1.9/module_common.c
@@ -108,14 +108,6 @@ EXPORT_SYMBOL(PVRSRVGetErrorStringKM);
EXPORT_SYMBOL(RGXInitSLC);
#endif
-EXPORT_SYMBOL(RGXHWPerfConnect);
-EXPORT_SYMBOL(RGXHWPerfDisconnect);
-EXPORT_SYMBOL(RGXHWPerfControl);
-EXPORT_SYMBOL(RGXHWPerfConfigureAndEnableCounters);
-EXPORT_SYMBOL(RGXHWPerfDisableCounters);
-EXPORT_SYMBOL(RGXHWPerfAcquireEvents);
-EXPORT_SYMBOL(RGXHWPerfReleaseEvents);
-EXPORT_SYMBOL(RGXHWPerfConvertCRTimeStamp);
#if defined(SUPPORT_KERNEL_HWPERF_TEST)
EXPORT_SYMBOL(OSAddTimer);
EXPORT_SYMBOL(OSEnableTimer);
diff --git a/drivers/gpu/drm/img-rogue/1.9/pvr_dvfs_device.c b/drivers/gpu/drm/img-rogue/1.9/pvr_dvfs_device.c
index 37153db72a7e19..e85683834b011f 100644
--- a/drivers/gpu/drm/img-rogue/1.9/pvr_dvfs_device.c
+++ b/drivers/gpu/drm/img-rogue/1.9/pvr_dvfs_device.c
@@ -431,8 +431,13 @@ PVRSRV_ERROR InitDVFS(PPVRSRV_DEVICE_NODE psDeviceNode)
goto err_exit;
}
+#if defined(CHROMIUMOS_KERNEL) && (LINUX_VERSION_CODE > KERNEL_VERSION(4, 4, 0))
+ psDVFSDevice->psDevFreq->policy.user.min_freq = min_freq;
+ psDVFSDevice->psDevFreq->policy.user.max_freq = max_freq;
+#else
psDVFSDevice->psDevFreq->min_freq = min_freq;
psDVFSDevice->psDevFreq->max_freq = max_freq;
+#endif
err = devfreq_register_opp_notifier(psDev, psDVFSDevice->psDevFreq);
if (err) {
diff --git a/drivers/gpu/drm/img-rogue/Kconfig b/drivers/gpu/drm/img-rogue/Kconfig
index a58d929daad7ac..c7d4623f41c206 100644
--- a/drivers/gpu/drm/img-rogue/Kconfig
+++ b/drivers/gpu/drm/img-rogue/Kconfig
@@ -1,3 +1,21 @@
+config DRM_POWERVR_ROGUE_1_10
+ tristate "PowerVR Rogue"
+ depends on ARM64 || METAG
+ depends on HAS_IOMEM
+ depends on DRM
+ depends on SYNC_FILE
+ select DRM_KMS_HELPER
+ select GPU_TRACEPOINTS
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_OPP
+ select DEVFREQ_THERMAL
+ help
+ Driver for PowerVR Rogue graphics hardware 1.10.
+
+ Say Y here if your SoC contains a PowerVR Rogue GPU. For more
+ information, see <https://www.imgtec.com/graphics-processors/>.
+
config DRM_POWERVR_ROGUE_1_9
tristate "PowerVR Rogue"
depends on ARM64 || X86
@@ -13,18 +31,18 @@ config DRM_POWERVR_ROGUE_1_9
Driver for PowerVR Rogue graphics hardware 1.9.
Say Y here if your SoC contains a PowerVR Rogue GPU. For more
- information, see <http://www.imgtec.com/powervr/>.
+ information, see <https://www.imgtec.com/graphics-processors/>.
config DRM_POWERVR_ROGUE_DEBUG
bool "Enable PowerVR Rogue debug features"
- depends on DRM_POWERVR_ROGUE_1_9
+ depends on DRM_POWERVR_ROGUE_1_9 || DRM_POWERVR_ROGUE_1_10
default n
help
Add additional debug features to the PowerVR Rogue driver.
config DRM_POWERVR_ROGUE_PDUMP
bool "Enable PowerVR Rogue PDUMP tracing."
- depends on DRM_POWERVR_ROGUE_1_9
+ depends on DRM_POWERVR_ROGUE_1_9 || DRM_POWERVR_ROGUE_1_10
default n
help
Enable PDUMP.
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 945cb80ae0ba32..7942df26190b81 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -519,6 +519,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -559,14 +562,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 05108b505fbfa1..d9df8d32fc35bc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -274,6 +274,9 @@ int mgag200_mm_init(struct mga_device *mdev)
return ret;
}
+ arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+
mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
@@ -282,10 +285,14 @@ int mgag200_mm_init(struct mga_device *mdev)
void mgag200_mm_fini(struct mga_device *mdev)
{
+ struct drm_device *dev = mdev->dev;
+
ttm_bo_device_release(&mdev->ttm.bdev);
mgag200_ttm_global_release(mdev);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
arch_phys_wc_del(mdev->fb_mtrr);
mdev->fb_mtrr = 0;
}
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index d5cf937d687896..9bd3b7eb3eb3c8 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -107,7 +107,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (old_state->legacy_cursor_update)
continue;
+ if (drm_crtc_vblank_get(crtc))
+ continue;
+
kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c48817c6a4..909a52b21ebe78 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -103,7 +103,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
char *fptr = &fifo->buf[fifo->head];
int n;
- wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+ if (!rd->open)
+ return;
n = min(sz, circ_space_to_end(&rd->fifo));
memcpy(fptr, ptr, n);
@@ -192,7 +194,10 @@ out:
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
+
rd->open = false;
+ wake_up_all(&rd->fifo_event);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 5ab13e7939db7d..2922a82cba8e4d 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_TTM
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB
- select FRAMEBUFFER_CONSOLE if !EXPERT
select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
select X86_PLATFORM_DEVICES if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 62adf21a75948b..9553d6949f3521 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -253,12 +253,16 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- /* Outputs are only polled while runtime active, so acquiring a
- * runtime PM ref here is unnecessary (and would deadlock upon
- * runtime suspend because it waits for polling to finish).
+ /* Outputs are only polled while runtime active, so resuming the
+ * device here is unnecessary (and would deadlock upon runtime suspend
+ * because it waits for polling to finish). We do however, want to
+ * prevent the autosuspend timer from elapsing during this operation
+ * if possible.
*/
- if (!drm_kms_helper_is_poll_worker()) {
- ret = pm_runtime_get_sync(connector->dev->dev);
+ if (drm_kms_helper_is_poll_worker()) {
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
@@ -329,10 +333,8 @@ detect_analog:
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
- }
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3dd600253d1d91..dab811138506d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -235,7 +235,7 @@ void
nouveau_fbcon_accel_save_disable(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags;
drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
}
@@ -245,7 +245,7 @@ void
nouveau_fbcon_accel_restore(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
+ if (drm->fbcon && drm->fbcon->helper.fbdev) {
drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags;
}
}
@@ -257,7 +257,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev)
struct nouveau_fbdev *fbcon = drm->fbcon;
if (fbcon && drm->channel) {
console_lock();
- fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
+ if (fbcon->helper.fbdev)
+ fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
console_unlock();
nouveau_channel_idle(drm->channel);
nvif_object_fini(&fbcon->twod);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index b477f6386dd0a6..e036a729d47ef7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -601,7 +601,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -611,7 +611,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index d2e7d209f65174..9835327a32146f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -397,6 +397,9 @@ nouveau_ttm_init(struct nouveau_drm *drm)
/* VRAM init */
drm->gem.vram_available = drm->device.info.ram_user;
+ arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM,
drm->gem.vram_available >> PAGE_SHIFT);
if (ret) {
@@ -429,6 +432,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
void
nouveau_ttm_fini(struct nouveau_drm *drm)
{
+ struct nvkm_device *device = nvxx_device(&drm->device);
+
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
@@ -438,4 +443,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
+ arch_io_free_memtype_wc(device->func->resource_addr(device, 1),
+ device->func->resource_size(device, 1));
+
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index e7e581d6a8ff24..1bfc4807ce5b27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -23,6 +23,10 @@
#ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER
#include "priv.h"
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+#include <asm/dma-iommu.h>
+#endif
+
static int
nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
@@ -85,6 +89,15 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
unsigned long pgsize_bitmap;
int ret;
+#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
+ if (dev->archdata.mapping) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
+#endif
+
if (!tdev->func->iommu_bit)
return;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
index 2b9c3f11b7a87b..ba42ed86148a1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
@@ -161,7 +161,8 @@ gm204_devinit_post(struct nvkm_devinit *base, bool post)
}
/* load and execute some other ucode image (bios therm?) */
- return pmu_load(init, 0x01, post, NULL, NULL);
+ pmu_load(init, 0x01, post, NULL, NULL);
+ return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 083db3f5181fd6..8282ae0c4fc3e3 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -262,6 +262,17 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
}
txn->last_pat->next_pa = 0;
+ /* ensure that the written descriptors are visible to DMM */
+ wmb();
+
+ /*
+ * NOTE: the wmb() above should be enough, but there seems to be a bug
+ * in OMAP's memory barrier implementation, which in some rare cases may
+ * cause the writes not to be observable after wmb().
+ */
+
+ /* read back to ensure the data is in RAM */
+ readl(&txn->last_pat->next_pa);
/* write to PAT_DESCR to clear out any pending transaction */
writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index a188a3959f1ad3..6ad827b93ae19a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -823,7 +823,7 @@ static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx)
int ret, i;
ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id));
- if (ret < ARRAY_SIZE(id) || id[0] == 0x00) {
+ if (ret < 0 || ret < ARRAY_SIZE(id) || id[0] == 0x00) {
dev_err(ctx->dev, "read id failed\n");
ctx->error = -EIO;
return;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 3677c272305d33..0ae21731a5169b 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -165,6 +165,8 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
schedule_work(&qdev->fb_work);
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+
static void qxl_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
@@ -193,6 +195,7 @@ static struct fb_deferred_io qxl_defio = {
.delay = QXL_DIRTY_DELAY,
.deferred_io = qxl_deferred_io,
};
+#endif
static void qxl_fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
@@ -418,8 +421,10 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev,
goto out_destroy_fbi;
}
+#ifdef CONFIG_DRM_FBDEV_EMULATION
info->fbdefio = &qxl_defio;
fb_deferred_io_init(info);
+#endif
qdev->fbdev_info = info;
qdev->fbdev_qfb = &qfbdev->qfb;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 46f87d4aaf31fe..782fee330b4c39 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -1299,6 +1299,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
return -EINVAL;
}
ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff);
+ break;
case CB_TARGET_MASK:
track->cb_target_mask = radeon_get_ib_value(p, idx);
track->cb_dirty = true;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1a2a7365d0b5f7..c6bf378534f834 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -844,7 +844,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector)
return ret;
}
-static int radeon_lvds_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
@@ -993,7 +993,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
return ret;
}
-static int radeon_vga_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1136,7 +1136,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
return 1;
}
-static int radeon_tv_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
if ((mode->hdisplay > 1024) || (mode->vdisplay > 768))
@@ -1477,7 +1477,7 @@ static void radeon_dvi_force(struct drm_connector *connector)
radeon_connector->use_digital = true;
}
-static int radeon_dvi_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
@@ -1778,7 +1778,7 @@ out:
return ret;
}
-static int radeon_dp_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index fff7022dcf56f0..1d1d430bb589be 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -446,9 +446,11 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
DRM_DEBUG_KMS("dig encoder is %d %d %d\n", dig_enc->dig_encoder,
dig_enc->linkb, radeon_crtc->crtc_id);
+ slots = drm_dp_find_vcpi_slots(&radeon_connector->mst_port->mst_mgr,
+ mst_enc->pbn);
ret = drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
radeon_connector->port,
- mst_enc->pbn, &slots);
+ mst_enc->pbn, slots);
ret = drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
radeon_dp_mst_set_be_cntl(primary, mst_enc,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0caa6f226ea6ee..9b2b0871c0fac2 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -291,7 +291,8 @@ out_unref:
void radeon_fb_output_poll_changed(struct radeon_device *rdev)
{
- drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper);
}
static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev)
@@ -324,6 +325,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
int bpp_sel = 32;
int ret;
+ /* don't enable fbdev if no connectors */
+ if (list_empty(&rdev->ddev->mode_config.connector_list))
+ return 0;
+
/* select 8 bpp console on RN50 or 16MB cards */
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
bpp_sel = 8;
@@ -376,11 +381,15 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state)
{
- fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_set_suspend(&rdev->mode_info.rfbdev->helper, state);
}
bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
{
+ if (!rdev->mode_info.rfbdev)
+ return false;
+
if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj))
return true;
return false;
@@ -388,12 +397,14 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
{
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
+ if (rdev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
}
void radeon_fbdev_restore_mode(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 83aee9e814ba05..18ec38d0d3f5ed 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -447,6 +447,10 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
int radeon_bo_init(struct radeon_device *rdev)
{
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(rdev->mc.aper_base,
+ rdev->mc.aper_size);
+
/* Add an MTRR for the VRAM */
if (!rdev->fastfb_working) {
rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
@@ -464,6 +468,7 @@ void radeon_bo_fini(struct radeon_device *rdev)
{
radeon_ttm_fini(rdev);
arch_phys_wc_del(rdev->mc.vram_mtrr);
+ arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
}
/* Returns how many bytes TTM can move per IB.
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d684e2b79d2bf9..0c380fe7738269 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -557,7 +557,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
struct page **pages = ttm->pages + pinned;
r = get_user_pages(current, current->mm, userptr, num_pages,
- write, 0, pages, NULL);
+ write ? FOLL_WRITE : 0, pages, NULL);
if (r < 0)
goto release_pages;
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index cd6a6e24293543..73fbff31255211 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -8,7 +8,8 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
rockchipdrm-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o
-rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o
+rockchipdrm-$(CONFIG_ROCKCHIP_CDN_DP) += cdn-dp-core.o cdn-dp-reg.o \
+ cdn-dp-link-training.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi_rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 1973617ccbe075..2a07916a3d7f73 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -193,8 +193,8 @@ static int cdn_dp_get_sink_count(struct cdn_dp_device *dp, u8 *sink_count)
u8 value;
*sink_count = 0;
- ret = cdn_dp_dpcd_read(dp, DP_SINK_COUNT, &value, 1);
- if (ret)
+ ret = drm_dp_dpcd_read(&dp->aux, DP_SINK_COUNT, &value, 1);
+ if (ret < 0)
return ret;
*sink_count = DP_GET_SINK_COUNT(value);
@@ -516,9 +516,9 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp)
if (!cdn_dp_check_sink_connection(dp))
return -ENODEV;
- ret = cdn_dp_dpcd_read(dp, DP_DPCD_REV, dp->dpcd,
- DP_RECEIVER_CAP_SIZE);
- if (ret) {
+ ret = drm_dp_dpcd_read(&dp->aux, DP_DPCD_REV, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
return ret;
}
@@ -733,8 +733,8 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp)
if (!port || !dp->link.rate || !dp->link.num_lanes)
return false;
- if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status,
- DP_LINK_STATUS_SIZE)) {
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) !=
+ DP_LINK_STATUS_SIZE) {
DRM_ERROR("Failed to get link status\n");
return false;
}
@@ -778,11 +778,13 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
goto out;
}
}
-
- ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
- if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to idle video %d\n", ret);
- goto out;
+ if (dp->use_fw_training) {
+ ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_IDLE);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to idle video %d\n", ret);
+ goto out;
+ }
}
ret = cdn_dp_config_video(dp);
@@ -791,11 +793,15 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder)
goto out;
}
- ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
- if (ret) {
- DRM_DEV_ERROR(dp->dev, "Failed to valid video %d\n", ret);
- goto out;
+ if (dp->use_fw_training) {
+ ret = cdn_dp_set_video_status(dp, CONTROL_VIDEO_VALID);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to valid video %d\n", ret);
+ goto out;
+ }
}
+
out:
mutex_unlock(&dp->lock);
if (!ret)
@@ -1357,6 +1363,40 @@ static void cdn_dp_hdcp_prop_work(struct work_struct *work)
}
+static ssize_t cdn_dp_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct cdn_dp_device *dp = container_of(aux, struct cdn_dp_device, aux);
+ int ret;
+ u8 status;
+
+ switch (msg->request & ~DP_AUX_I2C_MOT) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_WRITE_STATUS_UPDATE:
+ ret = cdn_dp_dpcd_write(dp, msg->address, msg->buffer,
+ msg->size);
+ break;
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ ret = cdn_dp_dpcd_read(dp, msg->address, msg->buffer,
+ msg->size);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ status = cdn_dp_get_aux_status(dp);
+ if (status == AUX_STATUS_ACK)
+ msg->reply = DP_AUX_NATIVE_REPLY_ACK;
+ else if (status == AUX_STATUS_NACK)
+ msg->reply = DP_AUX_NATIVE_REPLY_NACK;
+ else if (status == AUX_STATUS_DEFER)
+ msg->reply = DP_AUX_NATIVE_REPLY_DEFER;
+
+ return ret;
+}
+
static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
@@ -1375,6 +1415,13 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
dp->active = false;
dp->active_port = -1;
dp->fw_loaded = false;
+ dp->aux.name = "DP-AUX";
+ dp->aux.transfer = cdn_dp_aux_transfer;
+ dp->aux.dev = dev;
+
+ ret = drm_dp_aux_register(&dp->aux);
+ if (ret)
+ return ret;
INIT_WORK(&dp->event_work, cdn_dp_pd_event_work);
INIT_DELAYED_WORK(&dp->hdcp_event_work, cdn_dp_hdcp_event_work);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h
index 062599536c044b..b63de57b3150fe 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h
@@ -98,11 +98,13 @@ struct cdn_dp_device {
struct platform_device *audio_pdev;
struct work_struct event_work;
struct edid *edid;
+ struct drm_dp_aux aux;
struct mutex lock;
bool connected;
bool active;
bool suspended;
+ bool use_fw_training;
const struct firmware *fw; /* cdn dp firmware */
unsigned int fw_version; /* cdn fw version */
@@ -125,6 +127,7 @@ struct cdn_dp_device {
u8 ports;
u8 lanes;
int active_port;
+ u8 train_set[4];
u8 dpcd[DP_RECEIVER_CAP_SIZE];
bool sink_has_audio;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-link-training.c b/drivers/gpu/drm/rockchip/cdn-dp-link-training.c
new file mode 100644
index 00000000000000..73c329008c27ee
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/cdn-dp-link-training.c
@@ -0,0 +1,420 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Chris Zhong <zyw@rock-chips.com>
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <soc/rockchip/rockchip_phy_typec.h>
+
+#include "cdn-dp-core.h"
+#include "cdn-dp-reg.h"
+
+static void cdn_dp_set_signal_levels(struct cdn_dp_device *dp)
+{
+ struct cdn_dp_port *port = dp->port[dp->active_port];
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(port->phy);
+
+ int rate = drm_dp_bw_code_to_link_rate(dp->link.rate);
+ u8 swing = (dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ u8 pre_emphasis = (dp->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+ >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ tcphy->typec_phy_config(port->phy, rate, dp->link.num_lanes,
+ swing, pre_emphasis);
+}
+
+static int cdn_dp_set_pattern(struct cdn_dp_device *dp, uint8_t dp_train_pat)
+{
+ u32 phy_config, global_config;
+ int ret;
+ uint8_t pattern = dp_train_pat & DP_TRAINING_PATTERN_MASK;
+
+ global_config = NUM_LANES(dp->link.num_lanes - 1) | SST_MODE |
+ GLOBAL_EN | RG_EN | ENC_RST_DIS | WR_VHSYNC_FALL;
+
+ phy_config = DP_TX_PHY_ENCODER_BYPASS(0) |
+ DP_TX_PHY_SKEW_BYPASS(0) |
+ DP_TX_PHY_DISPARITY_RST(0) |
+ DP_TX_PHY_LANE0_SKEW(0) |
+ DP_TX_PHY_LANE1_SKEW(1) |
+ DP_TX_PHY_LANE2_SKEW(2) |
+ DP_TX_PHY_LANE3_SKEW(3) |
+ DP_TX_PHY_10BIT_ENABLE(0);
+
+ if (pattern != DP_TRAINING_PATTERN_DISABLE) {
+ global_config |= NO_VIDEO;
+ phy_config |= DP_TX_PHY_TRAINING_ENABLE(1) |
+ DP_TX_PHY_SCRAMBLER_BYPASS(1) |
+ DP_TX_PHY_TRAINING_PATTERN(pattern);
+ }
+
+ ret = cdn_dp_reg_write(dp, DP_FRAMER_GLOBAL_CONFIG, global_config);
+ if (ret) {
+ DRM_ERROR("fail to set DP_FRAMER_GLOBAL_CONFIG, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = cdn_dp_reg_write(dp, DP_TX_PHY_CONFIG_REG, phy_config);
+ if (ret) {
+ DRM_ERROR("fail to set DP_TX_PHY_CONFIG_REG, error: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = cdn_dp_reg_write(dp, DPTX_LANE_EN, BIT(dp->link.num_lanes) - 1);
+ if (ret) {
+ DRM_ERROR("fail to set DPTX_LANE_EN, error: %d\n", ret);
+ return ret;
+ }
+
+ if (drm_dp_enhanced_frame_cap(dp->dpcd))
+ ret = cdn_dp_reg_write(dp, DPTX_ENHNCD, 1);
+ else
+ ret = cdn_dp_reg_write(dp, DPTX_ENHNCD, 0);
+ if (ret)
+ DRM_ERROR("failed to set DPTX_ENHNCD, error: %x\n", ret);
+
+ return ret;
+}
+
+static u8 cdn_dp_pre_emphasis_max(u8 voltage_swing)
+{
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
+ return DP_TRAIN_PRE_EMPH_LEVEL_3;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
+ return DP_TRAIN_PRE_EMPH_LEVEL_2;
+ case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
+ return DP_TRAIN_PRE_EMPH_LEVEL_1;
+ default:
+ return DP_TRAIN_PRE_EMPH_LEVEL_0;
+ }
+}
+
+static void cdn_dp_get_adjust_train(struct cdn_dp_device *dp,
+ uint8_t link_status[DP_LINK_STATUS_SIZE])
+{
+ int i;
+ uint8_t v = 0, p = 0;
+ uint8_t preemph_max;
+
+ for (i = 0; i < dp->link.num_lanes; i++) {
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, i));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status,
+ i));
+ }
+
+ if (v >= VOLTAGE_LEVEL_2)
+ v = VOLTAGE_LEVEL_2 | DP_TRAIN_MAX_SWING_REACHED;
+
+ preemph_max = cdn_dp_pre_emphasis_max(v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+ for (i = 0; i < dp->link.num_lanes; i++)
+ dp->train_set[i] = v | p;
+}
+
+/*
+ * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ */
+static u32 cdn_dp_select_chaneq_pattern(struct cdn_dp_device *dp)
+{
+ u32 training_pattern = DP_TRAINING_PATTERN_2;
+
+ /*
+ * cdn dp support HBR2 also support TPS3. TPS3 support is also mandatory
+ * for downstream devices that support HBR2. However, not all sinks
+ * follow the spec.
+ */
+ if (drm_dp_tps3_supported(dp->dpcd))
+ training_pattern = DP_TRAINING_PATTERN_3;
+ else
+ DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+
+ return training_pattern;
+}
+
+
+static bool cdn_dp_link_max_vswing_reached(struct cdn_dp_device *dp)
+{
+ int lane;
+
+ for (lane = 0; lane < dp->link.num_lanes; lane++)
+ if ((dp->train_set[lane] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ return true;
+}
+
+static int cdn_dp_update_link_train(struct cdn_dp_device *dp)
+{
+ int ret;
+
+ cdn_dp_set_signal_levels(dp);
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET,
+ dp->train_set, dp->link.num_lanes);
+ if (ret != dp->link.num_lanes)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cdn_dp_set_link_train(struct cdn_dp_device *dp,
+ uint8_t dp_train_pat)
+{
+ uint8_t buf[sizeof(dp->train_set) + 1];
+ int ret, len;
+
+ buf[0] = dp_train_pat;
+ if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
+ DP_TRAINING_PATTERN_DISABLE) {
+ /* don't write DP_TRAINING_LANEx_SET on disable */
+ len = 1;
+ } else {
+ /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
+ memcpy(buf + 1, dp->train_set, dp->link.num_lanes);
+ len = dp->link.num_lanes + 1;
+ }
+
+ ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_PATTERN_SET,
+ buf, len);
+ if (ret != len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cdn_dp_reset_link_train(struct cdn_dp_device *dp,
+ uint8_t dp_train_pat)
+{
+ int ret;
+
+ memset(dp->train_set, 0, sizeof(dp->train_set));
+
+ cdn_dp_set_signal_levels(dp);
+
+ ret = cdn_dp_set_pattern(dp, dp_train_pat);
+ if (ret)
+ return ret;
+
+ return cdn_dp_set_link_train(dp, dp_train_pat);
+}
+
+/* Enable corresponding port and start training pattern 1 */
+static int cdn_dp_link_training_clock_recovery(struct cdn_dp_device *dp)
+{
+ u8 voltage;
+ u8 link_status[DP_LINK_STATUS_SIZE];
+ u32 voltage_tries, max_vswing_tries;
+ int ret;
+
+ /* clock recovery */
+ ret = cdn_dp_reset_link_train(dp, DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE);
+ if (ret) {
+ DRM_ERROR("failed to start link train\n");
+ return ret;
+ }
+
+ voltage_tries = 1;
+ max_vswing_tries = 0;
+ for (;;) {
+ drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) !=
+ DP_LINK_STATUS_SIZE) {
+ DRM_ERROR("failed to get link status\n");
+ return -EINVAL;
+ }
+
+ if (drm_dp_clock_recovery_ok(link_status, dp->link.num_lanes)) {
+ DRM_DEBUG_KMS("clock recovery OK\n");
+ return 0;
+ }
+
+ if (voltage_tries >= 5) {
+ DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+ return -EINVAL;
+ }
+
+ if (max_vswing_tries >= 1) {
+ DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+ return -EINVAL;
+ }
+
+ voltage = dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+ /* Update training set as requested by target */
+ cdn_dp_get_adjust_train(dp, link_status);
+ if (cdn_dp_update_link_train(dp)) {
+ DRM_ERROR("failed to update link training\n");
+ return -EINVAL;
+ }
+
+ if ((dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
+ voltage)
+ ++voltage_tries;
+ else
+ voltage_tries = 1;
+
+ if (cdn_dp_link_max_vswing_reached(dp))
+ ++max_vswing_tries;
+ }
+}
+
+static int cdn_dp_link_training_channel_equalization(struct cdn_dp_device *dp)
+{
+ int tries, ret;
+ u32 training_pattern;
+ uint8_t link_status[DP_LINK_STATUS_SIZE];
+
+ training_pattern = cdn_dp_select_chaneq_pattern(dp);
+ training_pattern |= DP_LINK_SCRAMBLING_DISABLE;
+
+ ret = cdn_dp_set_pattern(dp, training_pattern);
+ if (ret)
+ return ret;
+
+ ret = cdn_dp_set_link_train(dp, training_pattern);
+ if (ret) {
+ DRM_ERROR("failed to start channel equalization\n");
+ return ret;
+ }
+
+ for (tries = 0; tries < 5; tries++) {
+ drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) !=
+ DP_LINK_STATUS_SIZE) {
+ DRM_ERROR("failed to get link status\n");
+ break;
+ }
+
+ /* Make sure clock is still ok */
+ if (!drm_dp_clock_recovery_ok(link_status,
+ dp->link.num_lanes)) {
+ DRM_DEBUG_KMS("Clock recovery check failed\n");
+ break;
+ }
+
+ if (drm_dp_channel_eq_ok(link_status, dp->link.num_lanes)) {
+ DRM_DEBUG_KMS("Channel EQ done\n");
+ return 0;
+ }
+
+ /* Update training set as requested by target */
+ cdn_dp_get_adjust_train(dp, link_status);
+ if (cdn_dp_update_link_train(dp)) {
+ DRM_ERROR("failed to update link training\n");
+ break;
+ }
+ }
+
+ /* Try 5 times, else fail and try at lower BW */
+ if (tries == 5)
+ DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
+
+ return -EINVAL;
+}
+
+static int cdn_dp_stop_link_train(struct cdn_dp_device *dp)
+{
+ int ret = cdn_dp_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE);
+
+ if (ret)
+ return ret;
+
+ return cdn_dp_set_link_train(dp, DP_TRAINING_PATTERN_DISABLE);
+}
+
+static int cdn_dp_get_lower_link_rate(struct cdn_dp_device *dp)
+{
+ switch (dp->link.rate) {
+ case DP_LINK_BW_1_62:
+ return -EINVAL;
+ case DP_LINK_BW_2_7:
+ dp->link.rate = DP_LINK_BW_1_62;
+ break;
+ case DP_LINK_BW_5_4:
+ dp->link.rate = DP_LINK_BW_2_7;
+ break;
+ default:
+ dp->link.rate = DP_LINK_BW_5_4;
+ break;
+ }
+
+ return 0;
+}
+
+int cdn_dp_software_train_link(struct cdn_dp_device *dp)
+{
+ int ret, stop_err;
+ u8 link_config[2];
+ u32 rate, sink_max, source_max;
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_DPCD_REV, dp->dpcd,
+ sizeof(dp->dpcd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Failed to get caps %d\n", ret);
+ return ret;
+ }
+
+ source_max = dp->lanes;
+ sink_max = drm_dp_max_lane_count(dp->dpcd);
+ dp->link.num_lanes = min(source_max, sink_max);
+
+ source_max = drm_dp_bw_code_to_link_rate(CDN_DP_MAX_LINK_RATE);
+ sink_max = drm_dp_max_link_rate(dp->dpcd);
+ rate = min(source_max, sink_max);
+ dp->link.rate = drm_dp_link_rate_to_bw_code(rate);
+
+ link_config[0] = 0;
+ link_config[1] = 0;
+ if (dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & 0x01)
+ link_config[1] = DP_SET_ANSI_8B10B;
+ drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
+
+ while (true) {
+
+ /* Write the link configuration data */
+ link_config[0] = dp->link.rate;
+ link_config[1] = dp->link.num_lanes;
+ if (drm_dp_enhanced_frame_cap(dp->dpcd))
+ link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+ drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, link_config, 2);
+
+ ret = cdn_dp_link_training_clock_recovery(dp);
+ if (ret) {
+ if (!cdn_dp_get_lower_link_rate(dp))
+ continue;
+
+ DRM_ERROR("training clock recovery failed: %d\n", ret);
+ break;
+ }
+
+ ret = cdn_dp_link_training_channel_equalization(dp);
+ if (ret) {
+ if (!cdn_dp_get_lower_link_rate(dp))
+ continue;
+
+ DRM_ERROR("training channel eq failed: %d\n", ret);
+ break;
+ }
+
+ break;
+ }
+
+ stop_err = cdn_dp_stop_link_train(dp);
+ if (stop_err) {
+ DRM_ERROR("stop training fail, error: %d\n", stop_err);
+ return stop_err;
+ }
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 38974d6fd17107..6016d0ee10f1e5 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -18,7 +18,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/reset.h>
+#include <soc/rockchip/rockchip_phy_typec.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
@@ -190,7 +192,7 @@ static int cdn_dp_mailbox_send(struct cdn_dp_device *dp, u8 module_id,
return 0;
}
-static int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
+int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val)
{
u8 msg[6];
@@ -222,7 +224,12 @@ static int cdn_dp_reg_write_bit(struct cdn_dp_device *dp, u16 addr,
sizeof(field), field);
}
-int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
+/*
+ * Returns the number of bytes transferred on success, or a negative
+ * error code on failure. -ETIMEDOUT is returned if mailbox message was
+ * not send successfully;
+ */
+ssize_t cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
{
u8 msg[5], reg[5];
int ret;
@@ -248,24 +255,41 @@ int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
goto err_dpcd_read;
ret = cdn_dp_mailbox_read_receive(dp, data, len);
+ if (!ret)
+ return len;
err_dpcd_read:
+ DRM_DEV_ERROR(dp->dev, "dpcd read failed: %d\n", ret);
return ret;
}
-int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
+#define CDN_AUX_HEADER_SIZE 5
+#define CDN_AUX_MSG_SIZE 20
+/*
+ * Returns the number of bytes transferred on success, or a negative error
+ * code on failure. -ETIMEDOUT is returned if mailbox message was not send
+ * success; -EINVAL is returned if get the wrong data size after message
+ * is sent
+ */
+ssize_t cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len)
{
- u8 msg[6], reg[5];
+ u8 msg[CDN_AUX_MSG_SIZE + CDN_AUX_HEADER_SIZE];
+ u8 reg[CDN_AUX_HEADER_SIZE];
int ret;
- msg[0] = 0;
- msg[1] = 1;
+ if (WARN_ON(len > CDN_AUX_MSG_SIZE) || WARN_ON(len <= 0))
+ return -EINVAL;
+
+ msg[0] = (len >> 8) & 0xff;
+ msg[1] = len & 0xff;
msg[2] = (addr >> 16) & 0xff;
msg[3] = (addr >> 8) & 0xff;
msg[4] = addr & 0xff;
- msg[5] = value;
+
+ memcpy(msg + CDN_AUX_HEADER_SIZE, data, len);
+
ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX, DPTX_WRITE_DPCD,
- sizeof(msg), msg);
+ CDN_AUX_HEADER_SIZE + len, msg);
if (ret)
goto err_dpcd_write;
@@ -278,8 +302,12 @@ int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value)
if (ret)
goto err_dpcd_write;
- if (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))
+ if ((len != (reg[0] << 8 | reg[1])) ||
+ (addr != (reg[2] << 16 | reg[3] << 8 | reg[4]))) {
ret = -EINVAL;
+ } else {
+ return len;
+ }
err_dpcd_write:
if (ret)
@@ -287,6 +315,33 @@ err_dpcd_write:
return ret;
}
+int cdn_dp_get_aux_status(struct cdn_dp_device *dp)
+{
+ u8 status;
+ int ret;
+
+ ret = cdn_dp_mailbox_send(dp, MB_MODULE_ID_DP_TX,
+ DPTX_GET_LAST_AUX_STAUS, 0, NULL);
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdn_dp_mailbox_validate_receive(dp, MB_MODULE_ID_DP_TX,
+ DPTX_GET_LAST_AUX_STAUS,
+ sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ ret = cdn_dp_mailbox_read_receive(dp, &status, sizeof(status));
+ if (ret)
+ goto err_get_hpd;
+
+ return status;
+
+err_get_hpd:
+ DRM_DEV_ERROR(dp->dev, "get aux status failed: %d\n", ret);
+ return ret;
+}
+
int cdn_dp_load_firmware(struct cdn_dp_device *dp, const u32 *i_mem,
u32 i_size, const u32 *d_mem, u32 d_size)
{
@@ -557,6 +612,31 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
{
int ret;
+ /*
+ * DP firmware uses fixed phy config values to do training, but some
+ * boards need to adjust these values to fit for their unique hardware
+ * design. So if the phy is using custom config values, do software
+ * link training instead of relying on firmware, if software training
+ * fail, keep firmware training as a fallback if sw training fails.
+ */
+ ret = cdn_dp_software_train_link(dp);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to do software training %d\n", ret);
+ goto do_fw_training;
+ }
+ ret = cdn_dp_reg_write(dp, SOURCE_HDTX_CAR, 0xf);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to write SOURCE_HDTX_CAR register %d\n", ret);
+ goto do_fw_training;
+ }
+ dp->use_fw_training = false;
+ return 0;
+
+do_fw_training:
+ dp->use_fw_training = true;
+ DRM_DEV_DEBUG_KMS(dp->dev, "use fw training\n");
ret = cdn_dp_training_start(dp);
if (ret) {
DRM_DEV_ERROR(dp->dev, "Failed to start training %d\n", ret);
@@ -571,7 +651,7 @@ int cdn_dp_train_link(struct cdn_dp_device *dp)
DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate,
dp->link.num_lanes);
- return ret;
+ return 0;
}
int cdn_dp_set_video_status(struct cdn_dp_device *dp, int active)
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.h b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
index b2f17d16c0175c..30d2f587e8cfb0 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.h
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.h
@@ -137,7 +137,7 @@
#define HPD_EVENT_MASK 0x211c
#define HPD_EVENT_DET 0x2120
-/* dpyx framer addr */
+/* dptx framer addr */
#define DP_FRAMER_GLOBAL_CONFIG 0x2200
#define DP_SW_RESET 0x2204
#define DP_FRAMER_TU 0x2208
@@ -328,6 +328,13 @@
#define GENERAL_BUS_SETTINGS 0x03
#define GENERAL_TEST_ACCESS 0x04
+/* AUX status*/
+#define AUX_STATUS_ACK 0
+#define AUX_STATUS_NACK 1
+#define AUX_STATUS_DEFER 2
+#define AUX_STATUS_SINK_ERROR 3
+#define AUX_STATUS_BUS_ERROR 4
+
/* hdcp opcode */
#define HDCP_TX_CONFIGURATION 0x00
#define HDCP2_TX_SET_PUBLIC_KEY_PARAMS 0x01
@@ -453,6 +460,40 @@
/* Refernce cycles when using lane clock as refernce */
#define LANE_REF_CYC 0x8000
+/* register CM_VID_CTRL */
+#define LANE_VID_REF_CYC(x) (((x) & (BIT(24) - 1)) << 0)
+#define NMVID_MEAS_TOLERANCE(x) (((x) & 0xf) << 24)
+
+/* register DP_TX_PHY_CONFIG_REG */
+#define DP_TX_PHY_TRAINING_ENABLE(x) ((x) & 1)
+#define DP_TX_PHY_TRAINING_TYPE_PRBS7 (0 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_TPS1 (1 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_TPS2 (2 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_TPS3 (3 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_TPS4 (4 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_PLTPAT (5 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_D10_2 (6 << 1)
+#define DP_TX_PHY_TRAINING_TYPE_HBR2CPAT (8 << 1)
+#define DP_TX_PHY_TRAINING_PATTERN(x) ((x) << 1)
+#define DP_TX_PHY_SCRAMBLER_BYPASS(x) (((x) & 1) << 5)
+#define DP_TX_PHY_ENCODER_BYPASS(x) (((x) & 1) << 6)
+#define DP_TX_PHY_SKEW_BYPASS(x) (((x) & 1) << 7)
+#define DP_TX_PHY_DISPARITY_RST(x) (((x) & 1) << 8)
+#define DP_TX_PHY_LANE0_SKEW(x) (((x) & 7) << 9)
+#define DP_TX_PHY_LANE1_SKEW(x) (((x) & 7) << 12)
+#define DP_TX_PHY_LANE2_SKEW(x) (((x) & 7) << 15)
+#define DP_TX_PHY_LANE3_SKEW(x) (((x) & 7) << 18)
+#define DP_TX_PHY_10BIT_ENABLE(x) (((x) & 1) << 21)
+
+/* register DP_FRAMER_GLOBAL_CONFIG */
+#define NUM_LANES(x) ((x) & 3)
+#define SST_MODE (0 << 2)
+#define RG_EN (0 << 4)
+#define GLOBAL_EN BIT(3)
+#define NO_VIDEO BIT(5)
+#define ENC_RST_DIS BIT(6)
+#define WR_VHSYNC_FALL BIT(7)
+
enum voltage_swing_level {
VOLTAGE_LEVEL_0,
VOLTAGE_LEVEL_1,
@@ -501,8 +542,12 @@ int cdn_dp_set_host_cap(struct cdn_dp_device *dp, u8 lanes, bool flip);
int cdn_dp_event_config(struct cdn_dp_device *dp);
u32 cdn_dp_get_event(struct cdn_dp_device *dp);
int cdn_dp_get_hpd_status(struct cdn_dp_device *dp);
-int cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr, u8 value);
-int cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr, u8 *data, u16 len);
+int cdn_dp_reg_write(struct cdn_dp_device *dp, u16 addr, u32 val);
+ssize_t cdn_dp_dpcd_write(struct cdn_dp_device *dp, u32 addr,
+ u8 *data, u16 len);
+ssize_t cdn_dp_dpcd_read(struct cdn_dp_device *dp, u32 addr,
+ u8 *data, u16 len);
+int cdn_dp_get_aux_status(struct cdn_dp_device *dp);
int cdn_dp_get_edid_block(void *dp, u8 *edid,
unsigned int block, size_t length);
int cdn_dp_train_link(struct cdn_dp_device *dp);
@@ -516,4 +561,5 @@ int cdn_dp_hdcp_tx_configuration(struct cdn_dp_device *dp, int tx_mode,
int cdn_dp_hdcp_tx_status_req(struct cdn_dp_device *dp, uint16_t *tx_status);
int cdn_dp_hdcp_tx_is_receiver_id_valid_req(struct cdn_dp_device *dp);
int cdn_dp_hdcp_tx_respond_id_valid(struct cdn_dp_device *dp, bool valid);
+int cdn_dp_software_train_link(struct cdn_dp_device *dp);
#endif /* _CDN_DP_REG_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 9f7a36a84948bc..c6070b49a6b003 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -106,8 +106,6 @@ void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev);
int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
-void rockchip_drm_enable_dmc(struct rockchip_drm_private *priv);
-void rockchip_drm_disable_dmc(struct rockchip_drm_private *priv);
void rockchip_drm_set_win_enabled(struct drm_crtc *ctrc, bool enabled);
#ifdef CONFIG_ROCKCHIP_CDN_DP
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 613ab0622d6e4a..1616ec4f4d84d5 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,12 +4,7 @@ config DRM_UDL
depends on USB_SUPPORT
depends on USB_ARCH_HAS_HCD
select USB
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_DEFERRED_IO
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
help
This is a KMS driver for the USB displaylink video adapters.
Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 57811a1481fedf..f36905c536fd13 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -70,7 +70,7 @@ static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
memcpy(buff_ptr, block_buff, EDID_LENGTH);
kfree(block_buff);
buff_ptr += EDID_LENGTH;
- for (i = 1; i < extensions; ++i) {
+ for (i = 1; i <= extensions; ++i) {
if (udl_get_edid_block(udl, i, buff_ptr)) {
buff_ptr += EDID_LENGTH;
} else {
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 60dcee78b95222..404d4e834b16b7 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -78,6 +78,7 @@ static uint16_t rgb16(uint32_t col)
}
#endif
+#ifdef CONFIG_DRM_FBDEV_EMULATION
/*
* NOTE: fb_defio.c is holding info->fbdefio.mutex
* Touching ANY framebuffer memory that triggers a page fault
@@ -139,6 +140,7 @@ error:
>> 10)), /* Kcycles */
&udl->cpu_kcycles_used);
}
+#endif
int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
@@ -341,12 +343,13 @@ static int udl_fb_open(struct fb_info *info, int user)
ufbdev->fb_count++;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -356,6 +359,7 @@ static int udl_fb_open(struct fb_info *info, int user)
info->fbdefio = fbdefio;
fb_deferred_io_init(info);
}
+#endif
pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
@@ -373,12 +377,14 @@ static int udl_fb_release(struct fb_info *info, int user)
ufbdev->fb_count--;
+#ifdef CONFIG_DRM_FBDEV_EMULATION
if ((ufbdev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
info->fbdefio = NULL;
info->fbops->fb_mmap = udl_fb_mmap;
}
+#endif
pr_warn("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 5fdb5276af5138..1e98c5a173c262 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -143,18 +143,13 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
-
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
+ down(&udl->urbs.limit_sem);
spin_lock_irqsave(&udl->urbs.lock, flags);
@@ -178,17 +173,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -204,11 +204,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -219,16 +224,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index d0cbd5ecd7f0fa..4459cb32d1fec0 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -242,8 +242,8 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
ret = get_user_pages(current, current->mm,
(unsigned long)xfer->mem_addr,
vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE),
- 0, vsg->pages, NULL);
+ (vsg->direction == DMA_FROM_DEVICE) ? FOLL_WRITE : 0,
+ vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 9983eadb81b6a1..81d1807ac22816 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,14 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
- select DRM_TTM
+ select DRM_KMS_HELPER
+ select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
- QEMU based VMMs (like KVM or Xen).
+ QEMU based VMMs (like KVM or Xen).
If unsure say M.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b3d29bff5bd392..f41b1e86bbc694 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -594,13 +594,16 @@ out_fixup:
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ int ret = 0;
- if (intel_iommu_enabled &&
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
- return 0;
+
+ return ret;
}
#else
static int vmw_dma_masks(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index fda8e85dd5a261..ad0dd566aded02 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3663,7 +3663,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b8e640061166ad..f1f3a06eb94bb4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -533,11 +533,9 @@ static int vmw_fb_set_par(struct fb_info *info)
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
- struct drm_display_mode *old_mode;
struct drm_display_mode *mode;
int ret;
- old_mode = par->set_mode;
mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
if (!mode) {
DRM_ERROR("Could not create new fb mode.\n");
@@ -548,11 +546,7 @@ static int vmw_fb_set_par(struct fb_info *info)
mode->vdisplay = var->yres;
vmw_guess_mode_timing(mode);
- if (old_mode && drm_mode_equal(old_mode, mode)) {
- drm_mode_destroy(vmw_priv->dev, mode);
- mode = old_mode;
- old_mode = NULL;
- } else if (!vmw_kms_validate_mode_vram(vmw_priv,
+ if (!vmw_kms_validate_mode_vram(vmw_priv,
mode->hdisplay *
DIV_ROUND_UP(var->bits_per_pixel, 8),
mode->vdisplay)) {
@@ -615,8 +609,8 @@ static int vmw_fb_set_par(struct fb_info *info)
schedule_delayed_work(&par->local_work, 0);
out_unlock:
- if (old_mode)
- drm_mode_destroy(vmw_priv->dev, old_mode);
+ if (par->set_mode)
+ drm_mode_destroy(vmw_priv->dev, par->set_mode);
par->set_mode = mode;
drm_modeset_unlock_all(vmw_priv->dev);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 53d3d1d45b48ec..ce1b10a2ae857a 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -116,8 +116,8 @@ static int host1x_probe(struct platform_device *pdev)
syncpt_irq = platform_get_irq(pdev, 0);
if (syncpt_irq < 0) {
- dev_err(&pdev->dev, "failed to get IRQ\n");
- return -ENXIO;
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", syncpt_irq);
+ return syncpt_irq;
}
host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 5030cba4a58182..df295a0ce87d36 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -746,8 +746,8 @@ static struct ipu_devtype ipu_type_imx51 = {
.cpmem_ofs = 0x1f000000,
.srm_ofs = 0x1f040000,
.tpm_ofs = 0x1f060000,
- .csi0_ofs = 0x1f030000,
- .csi1_ofs = 0x1f038000,
+ .csi0_ofs = 0x1e030000,
+ .csi1_ofs = 0x1e038000,
.ic_ofs = 0x1e020000,
.disp0_ofs = 0x1e040000,
.disp1_ofs = 0x1e048000,
@@ -762,8 +762,8 @@ static struct ipu_devtype ipu_type_imx53 = {
.cpmem_ofs = 0x07000000,
.srm_ofs = 0x07040000,
.tpm_ofs = 0x07060000,
- .csi0_ofs = 0x07030000,
- .csi1_ofs = 0x07038000,
+ .csi0_ofs = 0x06030000,
+ .csi1_ofs = 0x06038000,
.ic_ofs = 0x06020000,
.disp0_ofs = 0x06040000,
.disp1_ofs = 0x06048000,
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index ef4d62b16aac75..b87e31409a6d48 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -300,6 +300,12 @@ config HID_GOOGLE_HAMMER
---help---
Say Y here if you have a Google Hammer device.
+config HID_GOOGLE_WHISKERS
+ tristate "Google Whiskers Keyboard"
+ depends on HID_GOOGLE_HAMMER && MFD_CROS_EC
+ ---help---
+ Say Y here if you have a Google Whiskers device.
+
config HID_GT683R
tristate "MSI GT68xR LED support"
depends on LEDS_CLASS && USB_HID
@@ -941,4 +947,6 @@ source "drivers/hid/usbhid/Kconfig"
source "drivers/hid/i2c-hid/Kconfig"
+source "drivers/hid/intel-ish-hid/Kconfig"
+
endmenu
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index a0468a0f7eb62f..5dc9e4ab2d99a8 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
obj-$(CONFIG_HID_GFRM) += hid-gfrm.o
obj-$(CONFIG_HID_GOOGLE_HAMMER) += hid-google-hammer.o
+obj-$(CONFIG_HID_GOOGLE_WHISKERS) += hid-google-whiskers.o
obj-$(CONFIG_HID_GT683R) += hid-gt683r.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtek-kbd.o
@@ -112,3 +113,5 @@ obj-$(CONFIG_USB_MOUSE) += usbhid/
obj-$(CONFIG_USB_KBD) += usbhid/
obj-$(CONFIG_I2C_HID) += i2c-hid/
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-hid/
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 884d82f9190e21..65a0c79f212e17 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -333,7 +333,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+ if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+ usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -474,6 +475,14 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 27b557aa97c1a2..88d260c7cb2bce 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -129,12 +129,23 @@ static int open_collection(struct hid_parser *parser, unsigned type)
{
struct hid_collection *collection;
unsigned usage;
+ int collection_index;
usage = parser->local.usage[0];
- if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- hid_err(parser->device, "collection stack overflow\n");
- return -EINVAL;
+ if (parser->collection_stack_ptr == parser->collection_stack_size) {
+ unsigned int *collection_stack;
+ unsigned int new_size = parser->collection_stack_size +
+ HID_COLLECTION_STACK_SIZE;
+
+ collection_stack = krealloc(parser->collection_stack,
+ new_size * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!collection_stack)
+ return -ENOMEM;
+
+ parser->collection_stack = collection_stack;
+ parser->collection_stack_size = new_size;
}
if (parser->device->maxcollection == parser->device->collection_size) {
@@ -158,11 +169,13 @@ static int open_collection(struct hid_parser *parser, unsigned type)
parser->collection_stack[parser->collection_stack_ptr++] =
parser->device->maxcollection;
- collection = parser->device->collection +
- parser->device->maxcollection++;
+ collection_index = parser->device->maxcollection++;
+ collection = parser->device->collection + collection_index;
collection->type = type;
collection->usage = usage;
collection->level = parser->collection_stack_ptr - 1;
+ collection->parent_idx = parser->active_collection_idx;
+ parser->active_collection_idx = collection_index;
if (type == HID_COLLECTION_APPLICATION)
parser->device->maxapplication++;
@@ -181,6 +194,13 @@ static int close_collection(struct hid_parser *parser)
return -EINVAL;
}
parser->collection_stack_ptr--;
+ if (parser->active_collection_idx != -1) {
+ struct hid_device *device = parser->device;
+ struct hid_collection *c;
+
+ c = &device->collection[parser->active_collection_idx];
+ parser->active_collection_idx = c->parent_idx;
+ }
return 0;
}
@@ -277,6 +297,7 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
field->usage[i].collection_index =
parser->local.collection_index[j];
field->usage[i].usage_index = i;
+ field->usage[i].resolution_multiplier = 1;
}
field->maxusage = usages;
@@ -805,6 +826,7 @@ static int hid_scan_report(struct hid_device *hid)
return -ENOMEM;
parser->device = hid;
+ parser->active_collection_idx = -1;
hid->group = HID_GROUP_GENERIC;
/*
@@ -841,6 +863,7 @@ static int hid_scan_report(struct hid_device *hid)
break;
}
+ kfree(parser->collection_stack);
vfree(parser);
return 0;
}
@@ -933,6 +956,169 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
}
EXPORT_SYMBOL_GPL(hid_validate_values);
+static int hid_calculate_multiplier(struct hid_device *hid,
+ struct hid_field *multiplier)
+{
+ int m;
+ __s32 v = *multiplier->value;
+ __s32 lmin = multiplier->logical_minimum;
+ __s32 lmax = multiplier->logical_maximum;
+ __s32 pmin = multiplier->physical_minimum;
+ __s32 pmax = multiplier->physical_maximum;
+
+ /*
+ * "Because OS implementations will generally divide the control's
+ * reported count by the Effective Resolution Multiplier, designers
+ * should take care not to establish a potential Effective
+ * Resolution Multiplier of zero."
+ * HID Usage Table, v1.12, Section 4.3.1, p31
+ */
+ if (lmax - lmin == 0)
+ return 1;
+ /*
+ * Handling the unit exponent is left as an exercise to whoever
+ * finds a device where that exponent is not 0.
+ */
+ m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
+ if (unlikely(multiplier->unit_exponent != 0)) {
+ hid_warn(hid,
+ "unsupported Resolution Multiplier unit exponent %d\n",
+ multiplier->unit_exponent);
+ }
+
+ /* There are no devices with an effective multiplier > 255 */
+ if (unlikely(m == 0 || m > 255 || m < -255)) {
+ hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
+ m = 1;
+ }
+
+ return m;
+}
+
+static void hid_apply_multiplier_to_field(struct hid_device *hid,
+ struct hid_field *field,
+ struct hid_collection *multiplier_collection,
+ int effective_multiplier)
+{
+ struct hid_collection *collection;
+ struct hid_usage *usage;
+ int i;
+
+ /*
+ * If multiplier_collection is NULL, the multiplier applies
+ * to all fields in the report.
+ * Otherwise, it is the Logical Collection the multiplier applies to
+ * but our field may be in a subcollection of that collection.
+ */
+ for (i = 0; i < field->maxusage; i++) {
+ usage = &field->usage[i];
+
+ collection = &hid->collection[usage->collection_index];
+ while (collection->parent_idx != -1 &&
+ collection != multiplier_collection)
+ collection = &hid->collection[collection->parent_idx];
+
+ if (collection->parent_idx != -1 ||
+ multiplier_collection == NULL)
+ usage->resolution_multiplier = effective_multiplier;
+
+ }
+}
+
+static void hid_apply_multiplier(struct hid_device *hid,
+ struct hid_field *multiplier)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ struct hid_field *field;
+ struct hid_collection *multiplier_collection;
+ int effective_multiplier;
+ int i;
+
+ /*
+ * "The Resolution Multiplier control must be contained in the same
+ * Logical Collection as the control(s) to which it is to be applied.
+ * If no Resolution Multiplier is defined, then the Resolution
+ * Multiplier defaults to 1. If more than one control exists in a
+ * Logical Collection, the Resolution Multiplier is associated with
+ * all controls in the collection. If no Logical Collection is
+ * defined, the Resolution Multiplier is associated with all
+ * controls in the report."
+ * HID Usage Table, v1.12, Section 4.3.1, p30
+ *
+ * Thus, search from the current collection upwards until we find a
+ * logical collection. Then search all fields for that same parent
+ * collection. Those are the fields the multiplier applies to.
+ *
+ * If we have more than one multiplier, it will overwrite the
+ * applicable fields later.
+ */
+ multiplier_collection = &hid->collection[multiplier->usage->collection_index];
+ while (multiplier_collection->parent_idx != -1 &&
+ multiplier_collection->type != HID_COLLECTION_LOGICAL)
+ multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
+
+ effective_multiplier = hid_calculate_multiplier(hid, multiplier);
+
+ rep_enum = &hid->report_enum[HID_INPUT_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ for (i = 0; i < rep->maxfield; i++) {
+ field = rep->field[i];
+ hid_apply_multiplier_to_field(hid, field,
+ multiplier_collection,
+ effective_multiplier);
+ }
+ }
+}
+
+/*
+ * hid_setup_resolution_multiplier - set up all resolution multipliers
+ *
+ * @device: hid device
+ *
+ * Search for all Resolution Multiplier Feature Reports and apply their
+ * value to all matching Input items. This only updates the internal struct
+ * fields.
+ *
+ * The Resolution Multiplier is applied by the hardware. If the multiplier
+ * is anything other than 1, the hardware will send pre-multiplied events
+ * so that the same physical interaction generates an accumulated
+ * accumulated_value = value * * multiplier
+ * This may be achieved by sending
+ * - "value * multiplier" for each event, or
+ * - "value" but "multiplier" times as frequently, or
+ * - a combination of the above
+ * The only guarantee is that the same physical interaction always generates
+ * an accumulated 'value * multiplier'.
+ *
+ * This function must be called before any event processing and after
+ * any SetRequest to the Resolution Multiplier.
+ */
+void hid_setup_resolution_multiplier(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ struct hid_usage *usage;
+ int i, j;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ for (i = 0; i < rep->maxfield; i++) {
+ /* Ignore if report count is out of bounds. */
+ if (rep->field[i]->report_count < 1)
+ continue;
+
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ usage = &rep->field[i]->usage[j];
+ if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
+ hid_apply_multiplier(hid,
+ rep->field[i]);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
+
/**
* hid_open_report - open a driver-specific device report
*
@@ -990,10 +1176,11 @@ int hid_open_report(struct hid_device *device)
parser = vzalloc(sizeof(struct hid_parser));
if (!parser) {
ret = -ENOMEM;
- goto err;
+ goto alloc_err;
}
parser->device = device;
+ parser->active_collection_idx = -1;
end = start + size;
@@ -1029,14 +1216,25 @@ int hid_open_report(struct hid_device *device)
hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
+
+ /*
+ * fetch initial values in case the device's
+ * default multiplier isn't the recommended 1
+ */
+ hid_setup_resolution_multiplier(device);
+
+ kfree(parser->collection_stack);
vfree(parser);
device->status |= HID_STAT_PARSED;
+
return 0;
}
}
hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
+ kfree(parser->collection_stack);
+alloc_err:
vfree(parser);
hid_close_report(device);
return ret;
@@ -1790,6 +1988,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@@ -1866,6 +2066,10 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1910,6 +2114,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
.driver_data = QUIRK_NO_HID_GENERIC },
{ HID_USB_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND),
.driver_data = QUIRK_NO_HID_GENERIC },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS),
+ .driver_data = QUIRK_NO_HID_GENERIC },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
@@ -2068,6 +2274,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 2886b645ced738..d7179dd3c9ef48 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/kfifo.h>
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -455,7 +456,7 @@ static char *resolv_usage_page(unsigned page, struct seq_file *f) {
char *buf = NULL;
if (!f) {
- buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_ATOMIC);
+ buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_ATOMIC);
if (!buf)
return ERR_PTR(-ENOMEM);
}
@@ -659,17 +660,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
/* enqueue string to 'events' ring buffer */
void hid_debug_event(struct hid_device *hdev, char *buf)
{
- int i;
struct hid_debug_list *list;
unsigned long flags;
spin_lock_irqsave(&hdev->debug_list_lock, flags);
- list_for_each_entry(list, &hdev->debug_list, node) {
- for (i = 0; i < strlen(buf); i++)
- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
- buf[i];
- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
- }
+ list_for_each_entry(list, &hdev->debug_list, node)
+ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
wake_up_interruptible(&hdev->debug_wait);
@@ -720,8 +716,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
hid_debug_event(hdev, buf);
kfree(buf);
- wake_up_interruptible(&hdev->debug_wait);
-
+ wake_up_interruptible(&hdev->debug_wait);
}
EXPORT_SYMBOL_GPL(hid_dump_input);
@@ -1086,8 +1081,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
- if (!(list->hid_debug_buf = kzalloc(sizeof(char) * HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
- err = -ENOMEM;
+ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
+ if (err) {
kfree(list);
goto out;
}
@@ -1107,71 +1102,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hid_debug_list *list = file->private_data;
- int ret = 0, len;
+ int ret = 0, copied;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&list->read_mutex);
- while (ret == 0) {
- if (list->head == list->tail) {
- add_wait_queue(&list->hdev->debug_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
-
- while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (!list->hdev || !list->hdev->debug) {
- ret = -EIO;
- set_current_state(TASK_RUNNING);
- goto out;
- }
-
- /* allow O_NONBLOCK from other threads */
- mutex_unlock(&list->read_mutex);
- schedule();
- mutex_lock(&list->read_mutex);
- set_current_state(TASK_INTERRUPTIBLE);
+ if (kfifo_is_empty(&list->hid_debug_fifo)) {
+ add_wait_queue(&list->hdev->debug_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ while (kfifo_is_empty(&list->hid_debug_fifo)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&list->hdev->debug_wait, &wait);
- }
-
- if (ret)
- goto out;
-
- /* pass the ringbuffer contents to userspace */
-copy_rest:
- if (list->tail == list->head)
- goto out;
- if (list->tail > list->head) {
- len = list->tail - list->head;
-
- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
- goto out;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
}
- ret += len;
- list->head += len;
- } else {
- len = HID_DEBUG_BUFSIZE - list->head;
- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
+ /* if list->hdev is NULL we cannot remove_wait_queue().
+ * if list->hdev->debug is 0 then hid_debug_unregister()
+ * was already called and list->hdev is being destroyed.
+ * if we add remove_wait_queue() here we can hit a race.
+ */
+ if (!list->hdev || !list->hdev->debug) {
+ ret = -EIO;
+ set_current_state(TASK_RUNNING);
goto out;
}
- list->head = 0;
- ret += len;
- goto copy_rest;
+
+ /* allow O_NONBLOCK from other threads */
+ mutex_unlock(&list->read_mutex);
+ schedule();
+ mutex_lock(&list->read_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&list->hdev->debug_wait, &wait);
+
+ if (ret)
+ goto out;
}
+
+ /* pass the fifo content to userspace, locking is not needed with only
+ * one concurrent reader and one concurrent writer
+ */
+ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
+ if (ret)
+ goto out;
+ ret = copied;
out:
mutex_unlock(&list->read_mutex);
return ret;
@@ -1182,7 +1163,7 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait)
struct hid_debug_list *list = file->private_data;
poll_wait(file, &list->hdev->debug_wait, wait);
- if (list->head != list->tail)
+ if (!kfifo_is_empty(&list->hid_debug_fifo))
return POLLIN | POLLRDNORM;
if (!list->hdev->debug)
return POLLERR | POLLHUP;
@@ -1197,7 +1178,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
- kfree(list->hid_debug_buf);
+ kfifo_free(&list->hid_debug_fifo);
kfree(list);
return 0;
@@ -1248,4 +1229,3 @@ void hid_debug_exit(void)
{
debugfs_remove_recursive(hid_debug_root);
}
-
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index f8a4dc3ea594d4..e2f596d8b95971 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -18,6 +18,7 @@
#include <linux/usb.h>
#include "hid-ids.h"
+#include "hid-google-hammer.h"
#define MAX_BRIGHTNESS 100
@@ -88,8 +89,7 @@ static int hammer_register_leds(struct hid_device *hdev)
return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
}
-static int hammer_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+int hammer_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
@@ -107,6 +107,7 @@ static int hammer_input_configured(struct hid_device *hdev,
return 0;
}
+EXPORT_SYMBOL_GPL(hammer_input_configured);
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC_OVERRIDE,
diff --git a/drivers/hid/hid-google-hammer.h b/drivers/hid/hid-google-hammer.h
new file mode 100644
index 00000000000000..3e5f61161708f2
--- /dev/null
+++ b/drivers/hid/hid-google-hammer.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * HID driver for Google Hammer device.
+ *
+ * Copyright (c) 2018 Google Inc.
+ */
+#ifndef _HID_GOOGLE_HAMMER_H
+#define _HID_GOOGLE_HAMMER_H
+
+int hammer_input_configured(struct hid_device *hdev, struct hid_input *hi);
+
+#endif
diff --git a/drivers/hid/hid-google-whiskers.c b/drivers/hid/hid-google-whiskers.c
new file mode 100644
index 00000000000000..56fe28c99823dc
--- /dev/null
+++ b/drivers/hid/hid-google-whiskers.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// HID driver for Google Whiskers device.
+//
+// Copyright (c) 2018 Google Inc.
+
+#include <linux/acpi.h>
+#include <linux/hid.h>
+#include <linux/leds.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <linux/usb.h>
+#include <asm/unaligned.h>
+
+#include "hid-ids.h"
+#include "hid-google-hammer.h"
+
+#define HID_UP_GOOGLEVENDOR 0xffd10000
+#define HID_VD_KBD_FOLDED 0x00000019
+#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+
+struct whiskers_ec {
+ struct device *dev; /* The platform device (EC) */
+ struct input_dev *input;
+ bool base_present;
+ bool base_folded; /* false: not folded or unknown */
+ struct notifier_block notifier;
+};
+
+static struct whiskers_ec whiskers_ec;
+static DEFINE_SPINLOCK(whiskers_ec_lock);
+static DEFINE_MUTEX(whiskers_ec_reglock);
+
+static bool whiskers_parse_base_state(const void *data)
+{
+ u32 switches = get_unaligned_le32(data);
+
+ return !!(switches & BIT(EC_MKBP_BASE_ATTACHED));
+}
+
+static int whiskers_ec_query_base(struct cros_ec_device *ec_dev, bool get_state,
+ bool *state)
+{
+ struct ec_params_mkbp_info *params;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(u32), sizeof(*params)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MKBP_INFO;
+ msg->version = 1;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(u32);
+ params = (struct ec_params_mkbp_info *)msg->data;
+ params->info_type = get_state ?
+ EC_MKBP_INFO_CURRENT : EC_MKBP_INFO_SUPPORTED;
+ params->event_type = EC_MKBP_EVENT_SWITCH;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret >= 0) {
+ if (ret != sizeof(u32)) {
+ dev_warn(ec_dev->dev, "wrong result size: %d != %zu\n",
+ ret, sizeof(u32));
+ ret = -EPROTO;
+ } else {
+ *state = whiskers_parse_base_state(msg->data);
+ ret = 0;
+ }
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+
+static int whiskers_ec_notify(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec = _notify;
+ unsigned long flags;
+ bool base_present;
+
+ if (ec->event_data.event_type == EC_MKBP_EVENT_SWITCH) {
+ base_present = whiskers_parse_base_state(
+ &ec->event_data.data.switches);
+ dev_dbg(whiskers_ec.dev,
+ "%s: base: %d\n", __func__, base_present);
+
+ if (device_may_wakeup(whiskers_ec.dev) ||
+ !queued_during_suspend) {
+
+ pm_wakeup_event(whiskers_ec.dev, 0);
+
+ spin_lock_irqsave(&whiskers_ec_lock, flags);
+
+ /*
+ * While input layer dedupes the events, we do not want
+ * to disrupt the state reported by the base by
+ * overriding it with state reported by the LID. Only
+ * report changes, as we assume that on attach the base
+ * is not folded.
+ */
+ if (base_present != whiskers_ec.base_present) {
+ input_report_switch(whiskers_ec.input,
+ SW_TABLET_MODE,
+ !base_present);
+ input_sync(whiskers_ec.input);
+ whiskers_ec.base_present = base_present;
+ }
+
+ spin_unlock_irqrestore(&whiskers_ec_lock, flags);
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static __maybe_unused int whiskers_ec_resume(struct device *dev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(dev->parent);
+ bool base_present;
+ int error;
+
+ error = whiskers_ec_query_base(ec, true, &base_present);
+ if (error) {
+ dev_warn(dev, "failed to fetch base state on resume: %d\n",
+ error);
+ } else {
+ spin_lock_irq(&whiskers_ec_lock);
+
+ whiskers_ec.base_present = base_present;
+
+ /*
+ * Only report if base is disconnected. If base is connected,
+ * it will resend its state on resume, and we'll update it
+ * in whiskers_event().
+ */
+ if (!whiskers_ec.base_present) {
+ input_report_switch(whiskers_ec.input,
+ SW_TABLET_MODE, 1);
+ input_sync(whiskers_ec.input);
+ }
+
+ spin_unlock_irq(&whiskers_ec_lock);
+ }
+
+ return 0;
+}
+
+static const SIMPLE_DEV_PM_OPS(whiskers_ec_pm_ops, NULL, whiskers_ec_resume);
+
+static void whiskers_ec_set_input(struct input_dev *input)
+{
+ /* Take the lock so whiskers_event does not race with us here */
+ spin_lock_irq(&whiskers_ec_lock);
+ whiskers_ec.input = input;
+ spin_unlock_irq(&whiskers_ec_lock);
+}
+
+static int __whiskers_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *input;
+ bool base_supported;
+ int error;
+
+ error = whiskers_ec_query_base(ec, false, &base_supported);
+ if (error)
+ return error;
+
+ if (!base_supported)
+ return -ENXIO;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Whiskers Tablet Mode Switch";
+
+ input->id.bustype = BUS_HOST;
+ input->id.version = 1;
+ input->id.product = 0;
+
+ input_set_capability(input, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /* Seed the state */
+ error = whiskers_ec_query_base(ec, true, &whiskers_ec.base_present);
+ if (error) {
+ dev_err(&pdev->dev, "cannot query base state: %d\n", error);
+ return error;
+ }
+
+ if (!whiskers_ec.base_present)
+ whiskers_ec.base_folded = false;
+
+ dev_dbg(&pdev->dev, "%s: base: %d, folded: %d\n", __func__,
+ whiskers_ec.base_present, whiskers_ec.base_folded);
+
+ input_report_switch(input, SW_TABLET_MODE,
+ !whiskers_ec.base_present ||
+ whiskers_ec.base_folded);
+
+ whiskers_ec_set_input(input);
+
+ whiskers_ec.dev = &pdev->dev;
+ whiskers_ec.notifier.notifier_call = whiskers_ec_notify;
+ error = blocking_notifier_chain_register(&ec->event_notifier,
+ &whiskers_ec.notifier);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register notifier: %d\n", error);
+ whiskers_ec_set_input(NULL);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ return 0;
+}
+
+static int whiskers_ec_probe(struct platform_device *pdev)
+{
+ int retval;
+
+ mutex_lock(&whiskers_ec_reglock);
+
+ if (whiskers_ec.input) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ retval = __whiskers_ec_probe(pdev);
+
+out:
+ mutex_unlock(&whiskers_ec_reglock);
+ return retval;
+}
+
+static int whiskers_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ mutex_lock(&whiskers_ec_reglock);
+
+ blocking_notifier_chain_unregister(&ec->event_notifier,
+ &whiskers_ec.notifier);
+ whiskers_ec_set_input(NULL);
+
+ mutex_unlock(&whiskers_ec_reglock);
+ return 0;
+}
+
+static const struct acpi_device_id whiskers_ec_acpi_ids[] = {
+ { "GOOG000B", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, whiskers_ec_acpi_ids);
+
+static struct platform_driver whiskers_ec_driver = {
+ .probe = whiskers_ec_probe,
+ .remove = whiskers_ec_remove,
+ .driver = {
+ .name = "whiskers_ec",
+ .acpi_match_table = ACPI_PTR(whiskers_ec_acpi_ids),
+ .pm = &whiskers_ec_pm_ops,
+ },
+};
+
+static int whiskers_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->hid == WHISKERS_KBD_FOLDED) {
+ /*
+ * We do not want to have this usage mapped as it will get
+ * mixed in with "base attached" signal and delivered over
+ * separate input device for tablet switch mode.
+ */
+
+ /*
+ * Also, override open/close and inhibit/uninhibit from
+ * hid-input.c as we can't power down the interface since
+ * we need to get signal when base is unfolded. So when
+ * device is inhibited we simply drop events in input core.
+ * This code would be better placed in input_configured()
+ * method, but since we reusing hammer's implementation we
+ * do it here instead.
+ */
+ hi->input->open = NULL;
+ hi->input->close = NULL;
+ hi->input->inhibit = NULL;
+ hi->input->uninhibit = NULL;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int whiskers_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ unsigned long flags;
+
+ if (usage->hid == WHISKERS_KBD_FOLDED) {
+ spin_lock_irqsave(&whiskers_ec_lock, flags);
+
+ /*
+ * If we are getting events from Whiskers that means that it
+ * is attached to the lid.
+ */
+ whiskers_ec.base_present = true;
+ whiskers_ec.base_folded = value;
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ whiskers_ec.base_present, whiskers_ec.base_folded);
+
+ if (whiskers_ec.input) {
+ input_report_switch(whiskers_ec.input,
+ SW_TABLET_MODE, value);
+ input_sync(whiskers_ec.input);
+ }
+
+ spin_unlock_irqrestore(&whiskers_ec_lock, flags);
+ return 1; /* We handled this event */
+ }
+
+ return 0;
+}
+
+static int whiskers_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ int error;
+
+ error = hid_parse(hdev);
+ if (error)
+ return error;
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (error)
+ return error;
+
+ /*
+ * We always want to poll for, and handle tablet mode events, even when
+ * nobody has opened the input device. This also prevents the hid core
+ * from dropping early tablet mode events from the device.
+ */
+ if (intf->cur_altsetting->desc.bInterfaceProtocol ==
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+ error = hid_hw_open(hdev);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
+static void whiskers_remove(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ unsigned long flags;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol ==
+ USB_INTERFACE_PROTOCOL_KEYBOARD) {
+ hid_hw_close(hdev);
+
+ /*
+ * If we are disconnecting then most likely Whiskers is
+ * being removed. Even if it is not removed, without proper
+ * keyboard we should not stay in clamshell mode.
+ *
+ * The reason for doing it here and not waiting for signal
+ * from EC, is that on some devices there are high leakage
+ * on Whiskers pins and we do not detect disconnect reliably,
+ * resulting in devices being stuck in clamshell mode.
+ */
+ spin_lock_irqsave(&whiskers_ec_lock, flags);
+ if (whiskers_ec.input && whiskers_ec.base_present) {
+ input_report_switch(whiskers_ec.input,
+ SW_TABLET_MODE, 1);
+ input_sync(whiskers_ec.input);
+ }
+ whiskers_ec.base_present = false;
+ spin_unlock_irqrestore(&whiskers_ec_lock, flags);
+ }
+
+ hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id whiskers_hid_devices[] = {
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC_OVERRIDE,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, whiskers_hid_devices);
+
+static struct hid_driver whiskers_hid_driver = {
+ .name = "whiskers",
+ .id_table = whiskers_hid_devices,
+ .probe = whiskers_probe,
+ .remove = whiskers_remove,
+ .input_configured = hammer_input_configured,
+ .input_mapping = whiskers_input_mapping,
+ .event = whiskers_event,
+};
+
+static int __init whiskers_init(void)
+{
+ int error;
+
+ error = platform_driver_register(&whiskers_ec_driver);
+ if (error)
+ return error;
+
+ error = hid_register_driver(&whiskers_hid_driver);
+ if (error) {
+ platform_driver_unregister(&whiskers_ec_driver);
+ return error;
+ }
+
+ return 0;
+}
+module_init(whiskers_init);
+
+static void __exit whiskers_exit(void)
+{
+ hid_unregister_driver(&whiskers_hid_driver);
+ platform_driver_unregister(&whiskers_ec_driver);
+}
+module_exit(whiskers_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5d465c6fb63bfe..aafc28d3b31fd5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -71,9 +71,11 @@
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
#define USB_VENDOR_ID_APPLE 0x05ac
+#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -139,6 +141,8 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -388,6 +392,7 @@
#define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE 0x5028
#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b
#define USB_DEVICE_ID_GOOGLE_WAND 0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
@@ -519,6 +524,9 @@
#define USB_VENDOR_ID_IRTOUCHSYSTEMS 0x6615
#define USB_DEVICE_ID_IRTOUCH_INFRARED_USB 0x0070
+#define USB_VENDOR_ID_INNOMEDIA 0x1292
+#define USB_DEVICE_ID_INNEX_GENESIS_ATARI 0x4745
+
#define USB_VENDOR_ID_ITE 0x048d
#define USB_DEVICE_ID_ITE_LENOVO_YOGA 0x8386
#define USB_DEVICE_ID_ITE_LENOVO_YOGA2 0x8350
@@ -886,6 +894,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
@@ -916,6 +926,7 @@
#define USB_VENDOR_ID_SYMBOL 0x05e0
#define USB_DEVICE_ID_SYMBOL_SCANNER_1 0x0800
#define USB_DEVICE_ID_SYMBOL_SCANNER_2 0x1300
+#define USB_DEVICE_ID_SYMBOL_SCANNER_3 0x1200
#define USB_VENDOR_ID_SYNAPTICS 0x06cb
#define USB_DEVICE_ID_SYNAPTICS_TP 0x0001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f076d810cac9ae..4da5b7e4977f9a 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -324,6 +324,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM,
USB_DEVICE_ID_ELECOM_BM084),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SYMBOL,
+ USB_DEVICE_ID_SYMBOL_SCANNER_3),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -689,7 +692,15 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
map_abs_clear(usage->hid & 0xf);
break;
- case HID_GD_SLIDER: case HID_GD_DIAL: case HID_GD_WHEEL:
+ case HID_GD_WHEEL:
+ if (field->flags & HID_MAIN_ITEM_RELATIVE) {
+ set_bit(REL_WHEEL, input->relbit);
+ map_rel(REL_WHEEL_HI_RES);
+ } else {
+ map_abs(usage->hid & 0xf);
+ }
+ break;
+ case HID_GD_SLIDER: case HID_GD_DIAL:
if (field->flags & HID_MAIN_ITEM_RELATIVE)
map_rel(usage->hid & 0xf);
else
@@ -941,6 +952,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1b8: map_key_clear(KEY_VIDEO); break;
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
+ case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
@@ -966,7 +978,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x22f: map_key_clear(KEY_ZOOMRESET); break;
case 0x233: map_key_clear(KEY_SCROLLUP); break;
case 0x234: map_key_clear(KEY_SCROLLDOWN); break;
- case 0x238: map_rel(REL_HWHEEL); break;
+ case 0x238: /* AC Pan */
+ set_bit(REL_HWHEEL, input->relbit);
+ map_rel(REL_HWHEEL_HI_RES);
+ break;
case 0x23d: map_key_clear(KEY_EDIT); break;
case 0x25f: map_key_clear(KEY_CANCEL); break;
case 0x269: map_key_clear(KEY_INSERT); break;
@@ -1149,6 +1164,38 @@ ignore:
}
+static void hidinput_handle_scroll(struct hid_usage *usage,
+ struct input_dev *input,
+ __s32 value)
+{
+ int code;
+ int hi_res, lo_res;
+
+ if (value == 0)
+ return;
+
+ if (usage->code == REL_WHEEL_HI_RES)
+ code = REL_WHEEL;
+ else
+ code = REL_HWHEEL;
+
+ /*
+ * Windows reports one wheel click as value 120. Where a high-res
+ * scroll wheel is present, a fraction of 120 is reported instead.
+ * Our REL_WHEEL_HI_RES axis does the same because all HW must
+ * adhere to the 120 expectation.
+ */
+ hi_res = value * 120/usage->resolution_multiplier;
+
+ usage->wheel_accumulated += hi_res;
+ lo_res = usage->wheel_accumulated/120;
+ if (lo_res)
+ usage->wheel_accumulated -= lo_res * 120;
+
+ input_event(input, EV_REL, code, lo_res);
+ input_event(input, EV_REL, usage->code, hi_res);
+}
+
void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value)
{
struct input_dev *input;
@@ -1211,6 +1258,12 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
return;
+ if ((usage->type == EV_REL) && (usage->code == REL_WHEEL_HI_RES ||
+ usage->code == REL_HWHEEL_HI_RES)) {
+ hidinput_handle_scroll(usage, input, value);
+ return;
+ }
+
if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
(usage->code == ABS_VOLUME)) {
int count = abs(value);
@@ -1456,6 +1509,58 @@ static int hidinput_uninhibit(struct input_dev *dev)
return dev->users ? hid_hw_open(hid) : 0;
}
+static void hidinput_change_resolution_multipliers(struct hid_device *hid)
+{
+ struct hid_report_enum *rep_enum;
+ struct hid_report *rep;
+ struct hid_usage *usage;
+ int i, j;
+
+ rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(rep, &rep_enum->report_list, list) {
+ bool update_needed = false;
+
+ if (rep->maxfield == 0)
+ continue;
+
+ /*
+ * If we have more than one feature within this report we
+ * need to fill in the bits from the others before we can
+ * overwrite the ones for the Resolution Multiplier.
+ */
+ if (rep->maxfield > 1) {
+ hid_hw_request(hid, rep, HID_REQ_GET_REPORT);
+ hid_hw_wait(hid);
+ }
+
+ for (i = 0; i < rep->maxfield; i++) {
+ __s32 logical_max = rep->field[i]->logical_maximum;
+
+ /* There is no good reason for a Resolution
+ * Multiplier to have a count other than 1.
+ * Ignore that case.
+ */
+ if (rep->field[i]->report_count != 1)
+ continue;
+
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ usage = &rep->field[i]->usage[j];
+
+ if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER)
+ continue;
+
+ *rep->field[i]->value = logical_max;
+ update_needed = true;
+ }
+ }
+ if (update_needed)
+ hid_hw_request(hid, rep, HID_REQ_SET_REPORT);
+ }
+
+ /* refresh our structs */
+ hid_setup_resolution_multiplier(hid);
+}
+
static void report_features(struct hid_device *hid)
{
struct hid_driver *drv = hid->driver;
@@ -1655,6 +1760,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
}
}
+ hidinput_change_resolution_multipliers(hid);
+
if (hidinput && (hid->quirks & HID_QUIRK_NO_EMPTY_INPUT) &&
!hidinput_has_been_populated(hidinput)) {
/* no need to register an input device not populated */
@@ -1712,4 +1819,3 @@ void hidinput_disconnect(struct hid_device *hid)
cancel_work_sync(&hid->led_work);
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
-
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 8979f1fd5208f9..24a4a23bdc90da 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -703,7 +703,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
data_pointer->led_mute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_mute);
+ ret = led_classdev_register(dev, &data_pointer->led_mute);
+ if (ret < 0)
+ goto err;
data_pointer->led_micmute.name = name_micmute;
data_pointer->led_micmute.brightness_get =
@@ -711,7 +713,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
data_pointer->led_micmute.brightness_set =
lenovo_led_brightness_set_tpkbd;
data_pointer->led_micmute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_micmute);
+ ret = led_classdev_register(dev, &data_pointer->led_micmute);
+ if (ret < 0) {
+ led_classdev_unregister(&data_pointer->led_mute);
+ goto err;
+ }
lenovo_features_set_tpkbd(hdev);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 5fd97860aec4d8..60f0239478f885 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -49,12 +49,19 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_CLASS_K400 BIT(2)
/* bits 2..20 are reserved for classes */
-#define HIDPP_QUIRK_CONNECT_EVENTS BIT(21)
+/* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */
#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
-#define HIDPP_QUIRK_DELAYED_INIT (HIDPP_QUIRK_NO_HIDINPUT | \
- HIDPP_QUIRK_CONNECT_EVENTS)
+/* Convenience constant to check for any high-res support. */
+#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2121)
+
+#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
/*
* There are two hidpp protocols in use, the first version hidpp10 is known
@@ -100,6 +107,25 @@ struct hidpp_report {
};
} __packed;
+/**
+ * struct hidpp_scroll_counter - Utility class for processing high-resolution
+ * scroll events.
+ * @dev: the input device for which events should be reported.
+ * @wheel_multiplier: the scalar multiplier to be applied to each wheel event
+ * @remainder: counts the number of high-resolution units moved since the last
+ * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
+ * only be used by class methods.
+ * @direction: direction of last movement (1 or -1)
+ * @last_time: last event time, used to reset remainder after inactivity
+ */
+struct hidpp_scroll_counter {
+ struct input_dev *dev;
+ int wheel_multiplier;
+ int remainder;
+ int direction;
+ unsigned long long last_time;
+};
+
struct hidpp_device {
struct hid_device *hid_dev;
struct mutex send_mutex;
@@ -118,6 +144,7 @@ struct hidpp_device {
struct input_dev *delayed_input;
unsigned long quirks;
+ struct hidpp_scroll_counter vertical_wheel_counter;
};
@@ -336,6 +363,67 @@ static void hidpp_prefix_name(char **name, int name_length)
*name = new_name;
}
+/**
+ * hidpp_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
+ * events given a high-resolution wheel
+ * movement.
+ * @counter: a hid_scroll_counter struct describing the wheel.
+ * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
+ * units.
+ *
+ * Given a high-resolution movement, this function converts the movement into
+ * fractions of 120 and emits high-resolution scroll events for the input
+ * device. It also uses the multiplier from &struct hid_scroll_counter to
+ * emit low-resolution scroll events when appropriate for
+ * backwards-compatibility with userspace input libraries.
+ */
+static void hidpp_scroll_counter_handle_scroll(struct hidpp_scroll_counter *counter,
+ int hi_res_value)
+{
+ int low_res_value, remainder, direction;
+ unsigned long long now, previous;
+
+ hi_res_value = hi_res_value * 120/counter->wheel_multiplier;
+ input_report_rel(counter->dev, REL_WHEEL_HI_RES, hi_res_value);
+
+ remainder = counter->remainder;
+ direction = hi_res_value > 0 ? 1 : -1;
+
+ now = sched_clock();
+ previous = counter->last_time;
+ counter->last_time = now;
+ /*
+ * Reset the remainder after a period of inactivity or when the
+ * direction changes. This prevents the REL_WHEEL emulation point
+ * from sliding for devices that don't always provide the same
+ * number of movements per detent.
+ */
+ if (now - previous > 1000000000 || direction != counter->direction)
+ remainder = 0;
+
+ counter->direction = direction;
+ remainder += hi_res_value;
+
+ /* Some wheels will rest 7/8ths of a detent from the previous detent
+ * after slow movement, so we want the threshold for low-res events to
+ * be in the middle between two detents (e.g. after 4/8ths) as
+ * opposed to on the detents themselves (8/8ths).
+ */
+ if (abs(remainder) >= 60) {
+ /* Add (or subtract) 1 because we want to trigger when the wheel
+ * is half-way to the next detent (i.e. scroll 1 detent after a
+ * 1/2 detent movement, 2 detents after a 1 1/2 detent movement,
+ * etc.).
+ */
+ low_res_value = remainder / 120;
+ if (low_res_value == 0)
+ low_res_value = (hi_res_value > 0 ? 1 : -1);
+ input_report_rel(counter->dev, REL_WHEEL, low_res_value);
+ remainder -= low_res_value * 120;
+ }
+ counter->remainder = remainder;
+}
+
/* -------------------------------------------------------------------------- */
/* HIDP++ 1.0 commands */
/* -------------------------------------------------------------------------- */
@@ -345,6 +433,48 @@ static void hidpp_prefix_name(char **name, int name_length)
#define HIDPP_SET_LONG_REGISTER 0x82
#define HIDPP_GET_LONG_REGISTER 0x83
+/**
+ * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * @hidpp_dev: the device to set the register on.
+ * @register_address: the address of the register to modify.
+ * @byte: the byte of the register to modify. Should be less than 3.
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 bit)
+{
+ struct hidpp_report response;
+ int ret;
+ u8 params[3] = { 0 };
+
+ ret = hidpp_send_rap_command_sync(hidpp_dev,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ register_address,
+ NULL, 0, &response);
+ if (ret)
+ return ret;
+
+ memcpy(params, response.rap.params, 3);
+
+ params[byte] |= BIT(bit);
+
+ return hidpp_send_rap_command_sync(hidpp_dev,
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_SET_REGISTER,
+ register_address,
+ params, 3, &response);
+}
+
+
+#define HIDPP_REG_FEATURES 0x01
+
+/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
+static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
+}
+
#define HIDPP_REG_PAIRING_INFORMATION 0xB5
#define DEVICE_NAME 0x40
@@ -563,6 +693,99 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* 0x2120: Hi-resolution scrolling */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
+
+#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
+
+static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
+ bool enabled, u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = enabled ? BIT(0) : 0;
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
+ params, sizeof(params), &response);
+ if (ret)
+ return ret;
+ *multiplier = response.fap.params[1];
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x2121: HiRes Wheel */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HIRES_WHEEL 0x2121
+
+#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
+#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
+
+static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
+ u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ goto return_default;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
+ NULL, 0, &response);
+ if (ret)
+ goto return_default;
+
+ *multiplier = response.fap.params[0];
+ return 0;
+return_default:
+ hid_warn(hidpp->hid_dev,
+ "Couldn't get wheel multiplier (error %d)\n", ret);
+ return ret;
+}
+
+static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
+ bool high_resolution, bool use_hidpp)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = (invert ? BIT(2) : 0) |
+ (high_resolution ? BIT(1) : 0) |
+ (use_hidpp ? BIT(0) : 0);
+
+ return hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_SET_WHEEL_MODE,
+ params, sizeof(params), &response);
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x6010: Touchpad FW items */
/* -------------------------------------------------------------------------- */
@@ -972,9 +1195,6 @@ static int wtp_connect(struct hid_device *hdev, bool connected)
struct wtp_data *wd = hidpp->private_data;
int ret;
- if (!connected)
- return 0;
-
if (!wd->x_size) {
ret = wtp_get_config(hidpp);
if (ret) {
@@ -1042,9 +1262,6 @@ static int m560_send_config_command(struct hid_device *hdev, bool connected)
hidpp_dev = hid_get_drvdata(hdev);
- if (!connected)
- return -ENODEV;
-
return hidpp_send_rap_command_sync(
hidpp_dev,
REPORT_ID_HIDPP_SHORT,
@@ -1140,10 +1357,15 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_key(mydata->input, BTN_RIGHT,
!!(data[1] & M560_MOUSE_BTN_RIGHT));
- if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT)
+ if (data[1] & M560_MOUSE_BTN_WHEEL_LEFT) {
input_report_rel(mydata->input, REL_HWHEEL, -1);
- else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT)
+ input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ -120);
+ } else if (data[1] & M560_MOUSE_BTN_WHEEL_RIGHT) {
input_report_rel(mydata->input, REL_HWHEEL, 1);
+ input_report_rel(mydata->input, REL_HWHEEL_HI_RES,
+ 120);
+ }
v = hid_snto32(hid_field_extract(hdev, data+3, 0, 12), 12);
input_report_rel(mydata->input, REL_X, v);
@@ -1152,7 +1374,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- input_report_rel(mydata->input, REL_WHEEL, v);
+ hidpp_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
@@ -1179,6 +1402,8 @@ static void m560_populate_input(struct hidpp_device *hidpp,
__set_bit(REL_Y, mydata->input->relbit);
__set_bit(REL_WHEEL, mydata->input->relbit);
__set_bit(REL_HWHEEL, mydata->input->relbit);
+ __set_bit(REL_WHEEL_HI_RES, mydata->input->relbit);
+ __set_bit(REL_HWHEEL_HI_RES, mydata->input->relbit);
}
static int m560_input_mapping(struct hid_device *hdev, struct hid_input *hi,
@@ -1248,9 +1473,6 @@ static int k400_connect(struct hid_device *hdev, bool connected)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
- if (!connected)
- return 0;
-
if (!disable_tap_to_click)
return 0;
@@ -1258,6 +1480,37 @@ static int k400_connect(struct hid_device *hdev, bool connected)
}
/* -------------------------------------------------------------------------- */
+/* High-resolution scroll wheels */
+/* -------------------------------------------------------------------------- */
+
+static int hi_res_scroll_enable(struct hidpp_device *hidpp)
+{
+ int ret;
+ u8 multiplier = 1;
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
+ if (ret == 0)
+ ret = hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
+ } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
+ &multiplier);
+ } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */ {
+ ret = hidpp10_enable_scrolling_acceleration(hidpp);
+ multiplier = 8;
+ }
+ if (ret)
+ return ret;
+
+ if (multiplier == 0)
+ multiplier = 1;
+
+ hidpp->vertical_wheel_counter.wheel_multiplier = multiplier;
+ hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -1283,6 +1536,9 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
wtp_populate_input(hidpp, input, origin_is_hid_core);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
m560_populate_input(hidpp, input, origin_is_hid_core);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ hidpp->vertical_wheel_counter.dev = input;
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -1330,8 +1586,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
if (unlikely(hidpp_report_is_connect_event(report))) {
atomic_set(&hidpp->connected,
!(report->rap.params[0] & (1 << 6)));
- if ((hidpp->quirks & HIDPP_QUIRK_CONNECT_EVENTS) &&
- (schedule_work(&hidpp->work) == 0))
+ if (schedule_work(&hidpp->work) == 0)
dbg_hid("%s: connect event already queued\n", __func__);
return 1;
}
@@ -1378,6 +1633,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
+static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* This function will only be called for scroll events, due to the
+ * restriction imposed in hidpp_usages.
+ */
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct hidpp_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ /* A scroll event may occur before the multiplier has been retrieved or
+ * the input device set, or high-res scroll enabling may fail. In such
+ * cases we must return early (falling back to default behaviour) to
+ * avoid a crash in hidpp_scroll_counter_handle_scroll.
+ */
+ if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
+ || counter->dev == NULL || counter->wheel_multiplier == 0)
+ return 0;
+
+ hidpp_scroll_counter_handle_scroll(counter, value);
+ return 1;
+}
+
static void hidpp_overwrite_name(struct hid_device *hdev, bool use_unifying)
{
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
@@ -1447,6 +1723,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
struct input_dev *input;
char *name, *devm_name;
+ if (!connected)
+ return;
+
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_connect(hdev, connected);
if (ret)
@@ -1461,9 +1740,6 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
return;
}
- if (!connected || hidpp->delayed_input)
- return;
-
/* the device is already connected, we can ask for its name and
* protocol */
if (!hidpp->protocol_major) {
@@ -1476,8 +1752,11 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
hidpp->protocol_major, hidpp->protocol_minor);
}
- if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT))
- /* if HID created the input nodes for us, we can stop now */
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ hi_res_scroll_enable(hidpp);
+
+ if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
+ /* if the input nodes are already created, we can stop now */
return;
if (!hidpp->name || hidpp->name == hdev->name) {
@@ -1531,7 +1810,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (disable_raw_mode) {
hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
- hidpp->quirks &= ~HIDPP_QUIRK_CONNECT_EVENTS;
hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
}
@@ -1596,12 +1874,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto hid_hw_start_fail;
}
- if (hidpp->quirks & HIDPP_QUIRK_CONNECT_EVENTS) {
- /* Allow incoming packets */
- hid_device_io_start(hdev);
+ /* Allow incoming packets */
+ hid_device_io_start(hdev);
- hidpp_connect_event(hidpp);
- }
+ hidpp_connect_event(hidpp);
return ret;
@@ -1623,42 +1899,78 @@ static void hidpp_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+#define LDJ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4011),
+ LDJ_DEVICE(0x4011),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
{ /* wireless touchpad T650 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4101),
+ LDJ_DEVICE(0x4101),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
{ /* wireless touchpad T651 */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Mouse Logitech Anywhere MX */
+ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech Cube */
+ LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M335 */
+ LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M515 */
+ LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+ LDJ_DEVICE(0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
+ | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M705 (firmware RQM17) */
+ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech M705 (firmware RQM67) */
+ LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M720 */
+ LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2 */
+ LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2S */
+ LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master */
+ LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 2S */
+ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech Performance MX */
+ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4024),
- .driver_data = HIDPP_QUIRK_CONNECT_EVENTS | HIDPP_QUIRK_CLASS_K400 },
+ LDJ_DEVICE(0x4024),
+ .driver_data = HIDPP_QUIRK_CLASS_K400 },
- { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+ { LDJ_DEVICE(HID_ANY_ID) },
{}
};
MODULE_DEVICE_TABLE(hid, hidpp_devices);
+static const struct hid_usage_id hidpp_usages[] = {
+ { HID_GD_WHEEL, EV_REL, REL_WHEEL_HI_RES },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
+ .usage_table = hidpp_usages,
+ .event = hidpp_event,
.input_configured = hidpp_input_configured,
.input_mapping = hidpp_input_mapping,
};
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 9e2061081879a5..e15ef0db7c993d 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -53,6 +53,8 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD_REPORT_ID 0x28
+#define TRACKPAD2_USB_REPORT_ID 0x02
+#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
@@ -90,6 +92,17 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD_RES_Y \
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+#define TRACKPAD2_DIMENSION_X (float)16000
+#define TRACKPAD2_MIN_X -3678
+#define TRACKPAD2_MAX_X 3934
+#define TRACKPAD2_RES_X \
+ ((TRACKPAD2_MAX_X - TRACKPAD2_MIN_X) / (TRACKPAD2_DIMENSION_X / 100))
+#define TRACKPAD2_DIMENSION_Y (float)11490
+#define TRACKPAD2_MIN_Y -2478
+#define TRACKPAD2_MAX_Y 2587
+#define TRACKPAD2_RES_Y \
+ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -182,6 +195,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
{
struct input_dev *input = msc->input;
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
+ int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
@@ -193,6 +207,17 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ id = tdata[8] & 0xf;
+ x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
+ y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
+ size = tdata[6];
+ orientation = (tdata[8] >> 5) - 4;
+ touch_major = tdata[4];
+ touch_minor = tdata[5];
+ pressure = tdata[7];
+ state = tdata[3] & 0xC0;
+ down = state == 0x80;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
@@ -214,7 +239,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel) {
+ if (emulate_scroll_wheel && (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -268,10 +294,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
- else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -286,6 +316,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
switch (data[0]) {
case TRACKPAD_REPORT_ID:
+ case TRACKPAD2_BT_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
@@ -307,6 +338,22 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
+ case TRACKPAD2_USB_REPORT_ID:
+ /* Expect twelve bytes of prefix and N*9 bytes of touch data. */
+ if (size < 12 || ((size - 12) % 9) != 0)
+ return 0;
+ npoints = (size - 12) / 9;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD2_USB_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 9 + 12);
+
+ clicks = data[1];
+ break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
@@ -352,6 +399,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
input_mt_report_pointer_emulation(input, true);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
@@ -364,6 +414,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
+ int mt_flags = 0;
__set_bit(EV_KEY, input->evbit);
@@ -380,6 +431,22 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL, input->relbit);
__set_bit(REL_HWHEEL, input->relbit);
}
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ /* setting the device name to ensure the same driver settings
+ * get loaded, whether connected through bluetooth or USB
+ */
+ input->name = "Apple Inc. Magic Trackpad 2";
+
+ __clear_bit(EV_MSC, input->evbit);
+ __clear_bit(BTN_0, input->keybit);
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
+ __set_bit(BTN_MOUSE, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
/* input->keybit is initialized with incorrect button info
* for Magic Trackpad. There really is only one physical
@@ -402,14 +469,13 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_ABS, input->evbit);
- error = input_mt_init_slots(input, 16, 0);
+ error = input_mt_init_slots(input, 16, mt_flags);
if (error)
return error;
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
4, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
* to how pointer motion is reported (and relative to how USB
@@ -418,6 +484,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
@@ -427,7 +494,25 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD2_MIN_X,
+ TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD2_MIN_Y,
+ TRACKPAD2_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD2_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD2_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
TRACKPAD_MAX_X, 4, 0);
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
@@ -447,7 +532,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
- if (report_undeciphered) {
+ if (report_undeciphered &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -465,7 +551,8 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
msc->input = hi->input;
/* Magic Trackpad does not give relative data after switching to MT */
- if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -494,10 +581,20 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- __u8 feature[] = { 0xd7, 0x01 };
+ const u8 *feature;
+ const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
+ const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
+ u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
+ int feature_size;
+
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return 0;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -531,7 +628,14 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID);
- else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_BT_REPORT_ID);
+ else /* USB_VENDOR_ID_APPLE */
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_USB_REPORT_ID);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -545,6 +649,25 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
+ if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
@@ -553,9 +676,10 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
- if (ret != -EIO && ret != sizeof(feature)) {
+ kfree(buf);
+ if (ret != -EIO && ret != feature_size) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
@@ -571,6 +695,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index dcc50f930e4e39..095d5a1735012b 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -166,6 +166,7 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
#define MT_CLS_VTL 0x0110
#define MT_CLS_GOOGLE 0x0111
+#define MT_CLS_WALLABY 0x0112
#define MT_DEFAULT_MAXCONTACT 10
#define MT_MAX_MAXCONTACT 250
@@ -289,6 +290,12 @@ static struct mt_class mt_classes[] = {
MT_QUIRK_SLOT_IS_CONTACTID |
MT_QUIRK_HOVERING
},
+ { .name = MT_CLS_WALLABY,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE,
+ .export_all_inputs = true },
{ }
};
@@ -347,7 +354,8 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
*/
if (!(hdev->quirks & HID_QUIRK_NO_INIT_REPORTS))
return;
- if (td->mtclass.name != MT_CLS_WIN_8)
+ if (td->mtclass.name != MT_CLS_WIN_8 &&
+ td->mtclass.name != MT_CLS_WALLABY)
return;
buf = hid_alloc_report_buf(report, GFP_KERNEL);
@@ -527,7 +535,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
mt_store_field(usage, td, hi);
return 1;
case HID_DG_CONFIDENCE:
- if (cls->name == MT_CLS_WIN_8 &&
+ if ((cls->name == MT_CLS_WIN_8 ||
+ cls->name == MT_CLS_WALLABY) &&
(field->application == HID_DG_TOUCHPAD ||
field->application == HID_DG_TOUCHSCREEN)) {
cls->quirks |= MT_QUIRK_CONFIDENCE;
@@ -1624,6 +1633,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_VTL,
USB_DEVICE_ID_VTL_MULTITOUCH_FF3F) },
+ /* Wallaby devices */
+ { .driver_data = MT_CLS_WALLABY,
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
+ 0x03f6, 0xa001) },
+
/* Wistron panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 756d1ef9bd991d..6124fd6e04d1a7 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
+ if (ret)
+ hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
index febb21ee190e99..584b10d3fc3d84 100644
--- a/drivers/hid/hid-plantronics.c
+++ b/drivers/hid/hid-plantronics.c
@@ -2,7 +2,7 @@
* Plantronics USB HID Driver
*
* Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
- * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com>
+ * Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com>
*/
/*
@@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev,
unsigned short mapped_key;
unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
+ /* special case for PTT products */
+ if (field->application == HID_GD_JOYSTICK)
+ goto defaulted;
+
/* handle volume up/down mapping */
/* non-standard types or multi-HID interfaces - plt_type is PID */
if (!(plt_type & HID_USAGE_PAGE)) {
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index f81c03b9ee2705..71c4cf5e430665 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -1185,6 +1185,20 @@ static int rmi_populate(struct hid_device *hdev)
return 0;
}
+static int rmi_open(struct input_dev *input)
+{
+ struct hid_device *hdev = input_get_drvdata(input);
+
+ return hid_hw_open(hdev);
+}
+
+static void rmi_close(struct input_dev *input)
+{
+ struct hid_device *hdev = input_get_drvdata(input);
+
+ hid_hw_close(hdev);
+}
+
static int rmi_inhibit(struct input_dev *input)
{
struct hid_device *hdev = input_get_drvdata(input);
@@ -1267,6 +1281,9 @@ static int rmi_input_configured(struct hid_device *hdev, struct hid_input *hi)
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
}
+
+ input->open = rmi_open;
+ input->close = rmi_close;
input->inhibit = rmi_inhibit;
input->uninhibit = rmi_uninhibit;
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 21febbb0d84e6e..6f3d47185bf00d 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2460,6 +2460,12 @@ static const struct hid_device_id sony_devices[] = {
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
{ }
};
MODULE_DEVICE_TABLE(hid, sony_devices);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 77246d2baef99a..2483ae81d1fb20 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -473,7 +473,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if ((ret_size > size) || (ret_size <= 2)) {
+ if ((ret_size > size) || (ret_size < 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
@@ -1089,6 +1089,14 @@ static int i2c_hid_probe(struct i2c_client *client,
pm_runtime_enable(&client->dev);
device_enable_async_suspend(&client->dev);
+ /* Make sure there is something at this address */
+ ret = i2c_smbus_read_byte(client);
+ if (ret < 0) {
+ dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
+ ret = -ENXIO;
+ goto err_pm;
+ }
+
ret = i2c_hid_fetch_hid_descriptor(ihid);
if (ret < 0)
goto err_pm;
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
new file mode 100644
index 00000000000000..519e4c8b53c4fe
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -0,0 +1,17 @@
+menu "Intel ISH HID support"
+ depends on (X86_64 || COMPILE_TEST) && PCI
+
+config INTEL_ISH_HID
+ tristate "Intel Integrated Sensor Hub"
+ default n
+ select HID
+ help
+ The Integrated Sensor Hub (ISH) enables the ability to offload
+ sensor polling and algorithm processing to a dedicated low power
+ processor in the chipset. This allows the core processor to go into
+ low power modes more often, resulting in the increased battery life.
+ The current processors that support ISH are: Cherrytrail, Skylake,
+ Broxton and Kaby Lake.
+
+ Say Y here if you want to support Intel ISH. If unsure, say N.
+endmenu
diff --git a/drivers/hid/intel-ish-hid/Makefile b/drivers/hid/intel-ish-hid/Makefile
new file mode 100644
index 00000000000000..8c08b0b358b1a2
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile - Intel ISH HID drivers
+# Copyright (c) 2014-2016, Intel Corporation.
+#
+#
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp.o
+intel-ishtp-objs := ishtp/init.o
+intel-ishtp-objs += ishtp/hbm.o
+intel-ishtp-objs += ishtp/client.o
+intel-ishtp-objs += ishtp/bus.o
+intel-ishtp-objs += ishtp/dma-if.o
+intel-ishtp-objs += ishtp/client-buffers.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ish-ipc.o
+intel-ish-ipc-objs := ipc/ipc.o
+intel-ish-ipc-objs += ipc/pci-ish.o
+
+obj-$(CONFIG_INTEL_ISH_HID) += intel-ishtp-hid.o
+intel-ishtp-hid-objs := ishtp-hid.o
+intel-ishtp-hid-objs += ishtp-hid-client.o
+
+ccflags-y += -Idrivers/hid/intel-ish-hid/ishtp
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
new file mode 100644
index 00000000000000..a5897b9c0956a1
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish-regs.h
@@ -0,0 +1,228 @@
+/*
+ * ISH registers definitions
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_ISH_REGS_H_
+#define _ISHTP_ISH_REGS_H_
+
+
+/*** IPC PCI Offsets and sizes ***/
+/* ISH IPC Base Address */
+#define IPC_REG_BASE 0x0000
+/* Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_CHV_AB (IPC_REG_BASE + 0x00)
+/* Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_CHV_AB (IPC_REG_BASE + 0x04)
+/*BXT, CHV_K0*/
+/*Peripheral Interrupt Status Register */
+#define IPC_REG_PISR_BXT (IPC_REG_BASE + 0x0C)
+/*Peripheral Interrupt Mask Register */
+#define IPC_REG_PIMR_BXT (IPC_REG_BASE + 0x08)
+/***********************************/
+/* ISH Host Firmware status Register */
+#define IPC_REG_ISH_HOST_FWSTS (IPC_REG_BASE + 0x34)
+/* Host Communication Register */
+#define IPC_REG_HOST_COMM (IPC_REG_BASE + 0x38)
+/* Reset register */
+#define IPC_REG_ISH_RST (IPC_REG_BASE + 0x44)
+
+/* Inbound doorbell register Host to ISH */
+#define IPC_REG_HOST2ISH_DRBL (IPC_REG_BASE + 0x48)
+/* Outbound doorbell register ISH to Host */
+#define IPC_REG_ISH2HOST_DRBL (IPC_REG_BASE + 0x54)
+/* ISH to HOST message registers */
+#define IPC_REG_ISH2HOST_MSG (IPC_REG_BASE + 0x60)
+/* HOST to ISH message registers */
+#define IPC_REG_HOST2ISH_MSG (IPC_REG_BASE + 0xE0)
+/* REMAP2 to enable DMA (D3 RCR) */
+#define IPC_REG_ISH_RMP2 (IPC_REG_BASE + 0x368)
+
+#define IPC_REG_MAX (IPC_REG_BASE + 0x400)
+
+/*** register bits - HISR ***/
+/* bit corresponds HOST2ISH interrupt in PISR and PIMR registers */
+#define IPC_INT_HOST2ISH_BIT (1<<0)
+/***********************************/
+/*CHV_A0, CHV_B0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_CHV_AB (1<<3)
+/*BXT, CHV_K0*/
+/* bit corresponds ISH2HOST interrupt in PISR and PIMR registers */
+#define IPC_INT_ISH2HOST_BIT_BXT (1<<0)
+/***********************************/
+
+/* bit corresponds ISH2HOST busy clear interrupt in PIMR register */
+#define IPC_INT_ISH2HOST_CLR_MASK_BIT (1<<11)
+
+/* offset of ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_OFFS (0)
+
+/* bit corresponds ISH2HOST busy clear interrupt in IPC_BUSY_CLR register */
+#define IPC_INT_ISH2HOST_CLR_BIT (1<<IPC_INT_ISH2HOST_CLR_OFFS)
+
+/* bit corresponds busy bit in doorbell registers */
+#define IPC_DRBL_BUSY_OFFS (31)
+#define IPC_DRBL_BUSY_BIT (1<<IPC_DRBL_BUSY_OFFS)
+
+#define IPC_HOST_OWNS_MSG_OFFS (30)
+
+/*
+ * A0: bit means that host owns MSGnn registers and is reading them.
+ * ISH FW may not write to them
+ */
+#define IPC_HOST_OWNS_MSG_BIT (1<<IPC_HOST_OWNS_MSG_OFFS)
+
+/*
+ * Host status bits (HOSTCOMM)
+ */
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOSTCOMM_READY_OFFS (7)
+#define IPC_HOSTCOMM_READY_BIT (1<<IPC_HOSTCOMM_READY_OFFS)
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB (31)
+#define IPC_HOSTCOMM_INT_EN_BIT_CHV_AB \
+ (1<<IPC_HOSTCOMM_INT_EN_OFFS_CHV_AB)
+/*BXT, CHV_K0*/
+#define IPC_PIMR_INT_EN_OFFS_BXT (0)
+#define IPC_PIMR_INT_EN_BIT_BXT (1<<IPC_PIMR_INT_EN_OFFS_BXT)
+
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT (8)
+#define IPC_HOST2ISH_BUSYCLEAR_MASK_BIT \
+ (1<<IPC_HOST2ISH_BUSYCLEAR_MASK_OFFS_BXT)
+/***********************************/
+/*
+ * both Host and ISH have ILUP at bit 0
+ * bit corresponds host ready bit in both status registers
+ */
+#define IPC_ILUP_OFFS (0)
+#define IPC_ILUP_BIT (1<<IPC_ILUP_OFFS)
+
+/*
+ * ISH FW status bits in ISH FW Status Register
+ */
+#define IPC_ISH_FWSTS_SHIFT 12
+#define IPC_ISH_FWSTS_MASK GENMASK(15, 12)
+#define IPC_GET_ISH_FWSTS(status) \
+ (((status) & IPC_ISH_FWSTS_MASK) >> IPC_ISH_FWSTS_SHIFT)
+
+/*
+ * FW status bits (relevant)
+ */
+#define IPC_FWSTS_ILUP 0x1
+#define IPC_FWSTS_ISHTP_UP (1<<1)
+#define IPC_FWSTS_DMA0 (1<<16)
+#define IPC_FWSTS_DMA1 (1<<17)
+#define IPC_FWSTS_DMA2 (1<<18)
+#define IPC_FWSTS_DMA3 (1<<19)
+
+#define IPC_ISH_IN_DMA \
+ (IPC_FWSTS_DMA0 | IPC_FWSTS_DMA1 | IPC_FWSTS_DMA2 | IPC_FWSTS_DMA3)
+
+/* bit corresponds host ready bit in ISH FW Status Register */
+#define IPC_ISH_ISHTP_READY_OFFS (1)
+#define IPC_ISH_ISHTP_READY_BIT (1<<IPC_ISH_ISHTP_READY_OFFS)
+
+#define IPC_RMP2_DMA_ENABLED 0x1 /* Value to enable DMA, per D3 RCR */
+
+#define IPC_MSG_MAX_SIZE 0x80
+
+
+#define IPC_HEADER_LENGTH_MASK 0x03FF
+#define IPC_HEADER_PROTOCOL_MASK 0x0F
+#define IPC_HEADER_MNG_CMD_MASK 0x0F
+
+#define IPC_HEADER_LENGTH_OFFSET 0
+#define IPC_HEADER_PROTOCOL_OFFSET 10
+#define IPC_HEADER_MNG_CMD_OFFSET 16
+
+#define IPC_HEADER_GET_LENGTH(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_LENGTH_OFFSET)&IPC_HEADER_LENGTH_MASK)
+#define IPC_HEADER_GET_PROTOCOL(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_PROTOCOL_OFFSET)&IPC_HEADER_PROTOCOL_MASK)
+#define IPC_HEADER_GET_MNG_CMD(drbl_reg) \
+ (((drbl_reg) >> IPC_HEADER_MNG_CMD_OFFSET)&IPC_HEADER_MNG_CMD_MASK)
+
+#define IPC_IS_BUSY(drbl_reg) \
+ (((drbl_reg)&IPC_DRBL_BUSY_BIT) == ((uint32_t)IPC_DRBL_BUSY_BIT))
+
+/***********************************/
+/*CHV_A0, CHV_B0*/
+#define IPC_INT_FROM_ISH_TO_HOST_CHV_AB(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_CHV_AB) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_CHV_AB))
+/*BXT, CHV_K0*/
+#define IPC_INT_FROM_ISH_TO_HOST_BXT(drbl_reg) \
+ (((drbl_reg)&IPC_INT_ISH2HOST_BIT_BXT) == \
+ ((u32)IPC_INT_ISH2HOST_BIT_BXT))
+/***********************************/
+
+#define IPC_BUILD_HEADER(length, protocol, busy) \
+ (((busy)<<IPC_DRBL_BUSY_OFFS) | \
+ ((protocol) << IPC_HEADER_PROTOCOL_OFFSET) | \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+#define IPC_BUILD_MNG_MSG(cmd, length) \
+ (((1)<<IPC_DRBL_BUSY_OFFS)| \
+ ((IPC_PROTOCOL_MNG)<<IPC_HEADER_PROTOCOL_OFFSET)| \
+ ((cmd)<<IPC_HEADER_MNG_CMD_OFFSET)| \
+ ((length)<<IPC_HEADER_LENGTH_OFFSET))
+
+
+#define IPC_SET_HOST_READY(host_status) \
+ ((host_status) |= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_SET_HOST_ILUP(host_status) \
+ ((host_status) |= (IPC_ILUP_BIT))
+
+#define IPC_CLEAR_HOST_READY(host_status) \
+ ((host_status) ^= (IPC_HOSTCOMM_READY_BIT))
+
+#define IPC_CLEAR_HOST_ILUP(host_status) \
+ ((host_status) ^= (IPC_ILUP_BIT))
+
+/* todo - temp until PIMR HW ready */
+#define IPC_HOST_BUSY_READING_OFFS 6
+
+/* bit corresponds host ready bit in Host Status Register (HOST_COMM) */
+#define IPC_HOST_BUSY_READING_BIT (1<<IPC_HOST_BUSY_READING_OFFS)
+
+#define IPC_SET_HOST_BUSY_READING(host_status) \
+ ((host_status) |= (IPC_HOST_BUSY_READING_BIT))
+
+#define IPC_CLEAR_HOST_BUSY_READING(host_status)\
+ ((host_status) ^= (IPC_HOST_BUSY_READING_BIT))
+
+
+#define IPC_IS_ISH_ISHTP_READY(ish_status) \
+ (((ish_status) & IPC_ISH_ISHTP_READY_BIT) == \
+ ((uint32_t)IPC_ISH_ISHTP_READY_BIT))
+
+#define IPC_IS_ISH_ILUP(ish_status) \
+ (((ish_status) & IPC_ILUP_BIT) == ((uint32_t)IPC_ILUP_BIT))
+
+
+#define IPC_PROTOCOL_ISHTP 1
+#define IPC_PROTOCOL_MNG 3
+
+#define MNG_RX_CMPL_ENABLE 0
+#define MNG_RX_CMPL_DISABLE 1
+#define MNG_RX_CMPL_INDICATION 2
+#define MNG_RESET_NOTIFY 3
+#define MNG_RESET_NOTIFY_ACK 4
+#define MNG_SYNC_FW_CLOCK 5
+#define MNG_ILLEGAL_CMD 0xFF
+
+#endif /* _ISHTP_ISH_REGS_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
new file mode 100644
index 00000000000000..08a8327dfd2248
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -0,0 +1,88 @@
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_HW_ISH_H_
+#define _ISHTP_HW_ISH_H_
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include "hw-ish-regs.h"
+#include "ishtp-dev.h"
+
+#define CHV_DEVICE_ID 0x22D8
+#define BXT_Ax_DEVICE_ID 0x0AA2
+#define BXT_Bx_DEVICE_ID 0x1AA2
+#define APL_Ax_DEVICE_ID 0x5AA2
+#define SPT_Ax_DEVICE_ID 0x9D35
+#define CNL_Ax_DEVICE_ID 0x9DFC
+#define GLK_Ax_DEVICE_ID 0x31A2
+#define CNL_H_DEVICE_ID 0xA37C
+#define ICL_MOBILE_DEVICE_ID 0x34FC
+#define SPT_H_DEVICE_ID 0xA135
+
+#define REVISION_ID_CHT_A0 0x6
+#define REVISION_ID_CHT_Ax_SI 0x0
+#define REVISION_ID_CHT_Bx_SI 0x10
+#define REVISION_ID_CHT_Kx_SI 0x20
+#define REVISION_ID_CHT_Dx_SI 0x30
+#define REVISION_ID_CHT_B0 0xB0
+#define REVISION_ID_SI_MASK 0x70
+
+struct ipc_rst_payload_type {
+ uint16_t reset_id;
+ uint16_t reserved;
+};
+
+struct time_sync_format {
+ uint8_t ts1_source;
+ uint8_t ts2_source;
+ uint16_t reserved;
+} __packed;
+
+struct ipc_time_update_msg {
+ uint64_t primary_host_time;
+ struct time_sync_format sync_info;
+ uint64_t secondary_host_time;
+} __packed;
+
+enum {
+ HOST_UTC_TIME_USEC = 0,
+ HOST_SYSTEM_TIME_USEC = 1
+};
+
+struct ish_hw {
+ void __iomem *mem_addr;
+};
+
+/*
+ * ISH FW status type
+ */
+enum {
+ FWSTS_AFTER_RESET = 0,
+ FWSTS_WAIT_FOR_HOST = 4,
+ FWSTS_START_KERNEL_DMA = 5,
+ FWSTS_FW_IS_RUNNING = 7,
+ FWSTS_SENSOR_APP_LOADED = 8,
+ FWSTS_SENSOR_APP_RUNNING = 15
+};
+
+#define to_ish_hw(dev) (struct ish_hw *)((dev)->hw)
+
+irqreturn_t ish_irq_handler(int irq, void *dev_id);
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev);
+int ish_hw_start(struct ishtp_device *dev);
+void ish_device_disable(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HW_ISH_H_ */
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
new file mode 100644
index 00000000000000..bfbca7ec54ce4a
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -0,0 +1,976 @@
+/*
+ * H/W layer of ISHTP provider device (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include "client.h"
+#include "hw-ish.h"
+#include "hbm.h"
+
+/* For FW reset flow */
+static struct work_struct fw_reset_work;
+static struct ishtp_device *ishtp_dev;
+
+/**
+ * ish_reg_read() - Read register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ *
+ * Read 32 bit register at a given offset
+ *
+ * Return: Read register value
+ */
+static inline uint32_t ish_reg_read(const struct ishtp_device *dev,
+ unsigned long offset)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ return readl(hw->mem_addr + offset);
+}
+
+/**
+ * ish_reg_write() - Write register
+ * @dev: ISHTP device pointer
+ * @offset: Register offset
+ * @value: Value to write
+ *
+ * Writes 32 bit register at a give offset
+ */
+static inline void ish_reg_write(struct ishtp_device *dev,
+ unsigned long offset,
+ uint32_t value)
+{
+ struct ish_hw *hw = to_ish_hw(dev);
+
+ writel(value, hw->mem_addr + offset);
+}
+
+/**
+ * _ish_read_fw_sts_reg() - Read FW status register
+ * @dev: ISHTP device pointer
+ *
+ * Read FW status register
+ *
+ * Return: Read register value
+ */
+static inline uint32_t _ish_read_fw_sts_reg(struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
+ * check_generated_interrupt() - Check if ISH interrupt
+ * @dev: ISHTP device pointer
+ *
+ * Check if an interrupt was generated for ISH
+ *
+ * Return: Read true or false
+ */
+static bool check_generated_interrupt(struct ishtp_device *dev)
+{
+ bool interrupt_generated = true;
+ uint32_t pisr_val = 0;
+
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_CHV_AB);
+ interrupt_generated =
+ IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
+ } else {
+ pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
+ interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
+ }
+
+ return interrupt_generated;
+}
+
+/**
+ * ish_is_input_ready() - Check if FW ready for RX
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready for receiving data
+ *
+ * Return: Read true or false
+ */
+static bool ish_is_input_ready(struct ishtp_device *dev)
+{
+ uint32_t doorbell_val;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_HOST2ISH_DRBL);
+ return !IPC_IS_BUSY(doorbell_val);
+}
+
+/**
+ * set_host_ready() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void set_host_ready(struct ishtp_device *dev)
+{
+ if (dev->pdev->device == CHV_DEVICE_ID) {
+ if (dev->pdev->revision == REVISION_ID_CHT_A0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Ax_SI)
+ ish_reg_write(dev, IPC_REG_HOST_COMM, 0x81);
+ else if (dev->pdev->revision == REVISION_ID_CHT_B0 ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Bx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Kx_SI ||
+ (dev->pdev->revision & REVISION_ID_SI_MASK) ==
+ REVISION_ID_CHT_Dx_SI) {
+ uint32_t host_comm_val;
+
+ host_comm_val = ish_reg_read(dev, IPC_REG_HOST_COMM);
+ host_comm_val |= IPC_HOSTCOMM_INT_EN_BIT_CHV_AB | 0x81;
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_comm_val);
+ }
+ } else {
+ uint32_t host_pimr_val;
+
+ host_pimr_val = ish_reg_read(dev, IPC_REG_PIMR_BXT);
+ host_pimr_val |= IPC_PIMR_INT_EN_BIT_BXT;
+ /*
+ * disable interrupt generated instead of
+ * RX_complete_msg
+ */
+ host_pimr_val &= ~IPC_HOST2ISH_BUSYCLEAR_MASK_BIT;
+
+ ish_reg_write(dev, IPC_REG_PIMR_BXT, host_pimr_val);
+ }
+}
+
+/**
+ * ishtp_fw_is_ready() - Check if FW ready
+ * @dev: ISHTP device pointer
+ *
+ * Check if ISH FW is ready
+ *
+ * Return: Read true or false
+ */
+static bool ishtp_fw_is_ready(struct ishtp_device *dev)
+{
+ uint32_t ish_status = _ish_read_fw_sts_reg(dev);
+
+ return IPC_IS_ISH_ILUP(ish_status) &&
+ IPC_IS_ISH_ISHTP_READY(ish_status);
+}
+
+/**
+ * ish_set_host_rdy() - Indicate host ready
+ * @dev: ISHTP device pointer
+ *
+ * Set host ready indication to FW
+ */
+static void ish_set_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_SET_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+/**
+ * ish_clr_host_rdy() - Indicate host not ready
+ * @dev: ISHTP device pointer
+ *
+ * Send host not ready indication to FW
+ */
+static void ish_clr_host_rdy(struct ishtp_device *dev)
+{
+ uint32_t host_status = ish_reg_read(dev, IPC_REG_HOST_COMM);
+
+ IPC_CLEAR_HOST_READY(host_status);
+ ish_reg_write(dev, IPC_REG_HOST_COMM, host_status);
+}
+
+/**
+ * _ishtp_read_hdr() - Read message header
+ * @dev: ISHTP device pointer
+ *
+ * Read header of 32bit length
+ *
+ * Return: Read register value
+ */
+static uint32_t _ishtp_read_hdr(const struct ishtp_device *dev)
+{
+ return ish_reg_read(dev, IPC_REG_ISH2HOST_MSG);
+}
+
+/**
+ * _ishtp_read - Read message
+ * @dev: ISHTP device pointer
+ * @buffer: message buffer
+ * @buffer_length: length of message buffer
+ *
+ * Read message from FW
+ *
+ * Return: Always 0
+ */
+static int _ishtp_read(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length)
+{
+ uint32_t i;
+ uint32_t *r_buf = (uint32_t *)buffer;
+ uint32_t msg_offs;
+
+ msg_offs = IPC_REG_ISH2HOST_MSG + sizeof(struct ishtp_msg_hdr);
+ for (i = 0; i < buffer_length; i += sizeof(uint32_t))
+ *r_buf++ = ish_reg_read(dev, msg_offs + i);
+
+ return 0;
+}
+
+/**
+ * write_ipc_from_queue() - try to write ipc msg from Tx queue to device
+ * @dev: ishtp device pointer
+ *
+ * Check if DRBL is cleared. if it is - write the first IPC msg, then call
+ * the callback function (unless it's NULL)
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_from_queue(struct ishtp_device *dev)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long length;
+ unsigned long rem;
+ unsigned long flags;
+ uint32_t doorbell_val;
+ uint32_t *r_buf;
+ uint32_t reg_addr;
+ int i;
+ void (*ipc_send_compl)(void *);
+ void *ipc_send_compl_prm;
+ static int out_ipc_locked;
+ unsigned long out_ipc_flags;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return -EINVAL;
+
+ spin_lock_irqsave(&dev->out_ipc_spinlock, out_ipc_flags);
+ if (out_ipc_locked) {
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+ return -EBUSY;
+ }
+ out_ipc_locked = 1;
+ if (!ish_is_input_ready(dev)) {
+ out_ipc_locked = 0;
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&dev->out_ipc_spinlock, out_ipc_flags);
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ /*
+ * if tx send list is empty - return 0;
+ * may happen, as RX_COMPLETE handler doesn't check list emptiness.
+ */
+ if (list_empty(&dev->wr_processing_list_head.link)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ out_ipc_locked = 0;
+ return 0;
+ }
+
+ ipc_link = list_entry(dev->wr_processing_list_head.link.next,
+ struct wr_msg_ctl_info, link);
+ /* first 4 bytes of the data is the doorbell value (IPC header) */
+ length = ipc_link->length - sizeof(uint32_t);
+ doorbell_val = *(uint32_t *)ipc_link->inline_data;
+ r_buf = (uint32_t *)(ipc_link->inline_data + sizeof(uint32_t));
+
+ /* If sending MNG_SYNC_FW_CLOCK, update clock again */
+ if (IPC_HEADER_GET_PROTOCOL(doorbell_val) == IPC_PROTOCOL_MNG &&
+ IPC_HEADER_GET_MNG_CMD(doorbell_val) == MNG_SYNC_FW_CLOCK) {
+ uint64_t usec_system, usec_utc;
+ struct ipc_time_update_msg time_update;
+ struct time_sync_format ts_format;
+
+ usec_system = ktime_to_us(ktime_get_boottime());
+ usec_utc = ktime_to_us(ktime_get_real());
+ ts_format.ts1_source = HOST_SYSTEM_TIME_USEC;
+ ts_format.ts2_source = HOST_UTC_TIME_USEC;
+ ts_format.reserved = 0;
+
+ time_update.primary_host_time = usec_system;
+ time_update.secondary_host_time = usec_utc;
+ time_update.sync_info = ts_format;
+
+ memcpy(r_buf, &time_update,
+ sizeof(struct ipc_time_update_msg));
+ }
+
+ for (i = 0, reg_addr = IPC_REG_HOST2ISH_MSG; i < length >> 2; i++,
+ reg_addr += 4)
+ ish_reg_write(dev, reg_addr, r_buf[i]);
+
+ rem = length & 0x3;
+ if (rem > 0) {
+ uint32_t reg = 0;
+
+ memcpy(&reg, &r_buf[length >> 2], rem);
+ ish_reg_write(dev, reg_addr, reg);
+ }
+ /* Flush writes to msg registers and doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ /* Update IPC counters */
+ ++dev->ipc_tx_cnt;
+ dev->ipc_tx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, doorbell_val);
+ out_ipc_locked = 0;
+
+ ipc_send_compl = ipc_link->ipc_send_compl;
+ ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
+ list_del_init(&ipc_link->link);
+ list_add_tail(&ipc_link->link, &dev->wr_free_list_head.link);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /*
+ * callback will be called out of spinlock,
+ * after ipc_link returned to free list
+ */
+ if (ipc_send_compl)
+ ipc_send_compl(ipc_send_compl_prm);
+
+ return 0;
+}
+
+/**
+ * write_ipc_to_queue() - write ipc msg to Tx queue
+ * @dev: ishtp device instance
+ * @ipc_send_compl: Send complete callback
+ * @ipc_send_compl_prm: Parameter to send in complete callback
+ * @msg: Pointer to message
+ * @length: Length of message
+ *
+ * Recived msg with IPC (and upper protocol) header and add it to the device
+ * Tx-to-write list then try to send the first IPC waiting msg
+ * (if DRBL is cleared)
+ * This function returns negative value for failure (means free list
+ * is empty, or msg too long) and 0 for success.
+ *
+ * Return: 0 for success else failure code
+ */
+static int write_ipc_to_queue(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length)
+{
+ struct wr_msg_ctl_info *ipc_link;
+ unsigned long flags;
+
+ if (length > IPC_FULL_MSG_SIZE)
+ return -EMSGSIZE;
+
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ if (list_empty(&dev->wr_free_list_head.link)) {
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+ return -ENOMEM;
+ }
+ ipc_link = list_entry(dev->wr_free_list_head.link.next,
+ struct wr_msg_ctl_info, link);
+ list_del_init(&ipc_link->link);
+
+ ipc_link->ipc_send_compl = ipc_send_compl;
+ ipc_link->ipc_send_compl_prm = ipc_send_compl_prm;
+ ipc_link->length = length;
+ memcpy(ipc_link->inline_data, msg, length);
+
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list_head.link);
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ write_ipc_from_queue(dev);
+
+ return 0;
+}
+
+/**
+ * ipc_send_mng_msg() - Send management message
+ * @dev: ishtp device instance
+ * @msg_code: Message code
+ * @msg: Pointer to message
+ * @size: Length of message
+ *
+ * Send management message to FW
+ *
+ * Return: 0 for success else failure code
+ */
+static int ipc_send_mng_msg(struct ishtp_device *dev, uint32_t msg_code,
+ void *msg, size_t size)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val = IPC_BUILD_MNG_MSG(msg_code, size);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), msg, size);
+ return write_ipc_to_queue(dev, NULL, NULL, ipc_msg,
+ sizeof(uint32_t) + size);
+}
+
+#define WAIT_FOR_FW_RDY 0x1
+#define WAIT_FOR_INPUT_RDY 0x2
+
+/**
+ * timed_wait_for_timeout() - wait special event with timeout
+ * @dev: ISHTP device pointer
+ * @condition: indicate the condition for waiting
+ * @timeinc: time slice for every wait cycle, in ms
+ * @timeout: time in ms for timeout
+ *
+ * This function will check special event to be ready in a loop, the loop
+ * period is specificd in timeinc. Wait timeout will causes failure.
+ *
+ * Return: 0 for success else failure code
+ */
+static int timed_wait_for_timeout(struct ishtp_device *dev, int condition,
+ unsigned int timeinc, unsigned int timeout)
+{
+ bool complete = false;
+ int ret;
+
+ do {
+ if (condition == WAIT_FOR_FW_RDY) {
+ complete = ishtp_fw_is_ready(dev);
+ } else if (condition == WAIT_FOR_INPUT_RDY) {
+ complete = ish_is_input_ready(dev);
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!complete) {
+ unsigned long left_time;
+
+ left_time = msleep_interruptible(timeinc);
+ timeout -= (timeinc - left_time);
+ }
+ } while (!complete && timeout > 0);
+
+ if (complete)
+ ret = 0;
+ else
+ ret = -EBUSY;
+
+out:
+ return ret;
+}
+
+#define TIME_SLICE_FOR_FW_RDY_MS 100
+#define TIME_SLICE_FOR_INPUT_RDY_MS 100
+#define TIMEOUT_FOR_FW_RDY_MS 2000
+#define TIMEOUT_FOR_INPUT_RDY_MS 2000
+
+/**
+ * ish_fw_reset_handler() - FW reset handler
+ * @dev: ishtp device pointer
+ *
+ * Handle FW reset
+ *
+ * Return: 0 for success else failure code
+ */
+static int ish_fw_reset_handler(struct ishtp_device *dev)
+{
+ uint32_t reset_id;
+ unsigned long flags;
+ struct wr_msg_ctl_info *processing, *next;
+
+ /* Read reset ID */
+ reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
+
+ /* Clear IPC output queue */
+ spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
+ list_for_each_entry_safe(processing, next,
+ &dev->wr_processing_list_head.link, link) {
+ list_move_tail(&processing->link, &dev->wr_free_list_head.link);
+ }
+ spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
+
+ /* ISHTP notification in IPC_RESET */
+ ishtp_reset_handler(dev);
+
+ if (!ish_is_input_ready(dev))
+ timed_wait_for_timeout(dev, WAIT_FOR_INPUT_RDY,
+ TIME_SLICE_FOR_INPUT_RDY_MS, TIMEOUT_FOR_INPUT_RDY_MS);
+
+ /* ISH FW is dead */
+ if (!ish_is_input_ready(dev))
+ return -EPIPE;
+ /*
+ * Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
+ * RESET_NOTIFY_ACK - FW will be checking for it
+ */
+ ish_set_host_rdy(dev);
+ /* Send RESET_NOTIFY_ACK (with reset_id) */
+ ipc_send_mng_msg(dev, MNG_RESET_NOTIFY_ACK, &reset_id,
+ sizeof(uint32_t));
+
+ /* Wait for ISH FW'es ILUP and ISHTP_READY */
+ timed_wait_for_timeout(dev, WAIT_FOR_FW_RDY,
+ TIME_SLICE_FOR_FW_RDY_MS, TIMEOUT_FOR_FW_RDY_MS);
+ if (!ishtp_fw_is_ready(dev)) {
+ /* ISH FW is dead */
+ uint32_t ish_status;
+
+ ish_status = _ish_read_fw_sts_reg(dev);
+ dev_err(dev->devc,
+ "[ishtp-ish]: completed reset, ISH is dead (FWSTS = %08X)\n",
+ ish_status);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+#define TIMEOUT_FOR_HW_RDY_MS 300
+
+/**
+ * ish_fw_reset_work_fn() - FW reset worker function
+ * @unused: not used
+ *
+ * Call ish_fw_reset_handler to complete FW reset
+ */
+static void fw_reset_work_fn(struct work_struct *unused)
+{
+ int rv;
+
+ rv = ish_fw_reset_handler(ishtp_dev);
+ if (!rv) {
+ /* ISH is ILUP & ISHTP-ready. Restart ISHTP */
+ msleep_interruptible(TIMEOUT_FOR_HW_RDY_MS);
+ ishtp_dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&ishtp_dev->wait_hw_ready);
+
+ /* ISHTP notification in IPC_RESET sequence completion */
+ ishtp_reset_compl_handler(ishtp_dev);
+ } else
+ dev_err(ishtp_dev->devc, "[ishtp-ish]: FW reset failed (%d)\n",
+ rv);
+}
+
+/**
+ * _ish_sync_fw_clock() -Sync FW clock with the OS clock
+ * @dev: ishtp device pointer
+ *
+ * Sync FW and OS time
+ */
+static void _ish_sync_fw_clock(struct ishtp_device *dev)
+{
+ static unsigned long prev_sync;
+ uint64_t usec;
+
+ if (prev_sync && jiffies - prev_sync < 20 * HZ)
+ return;
+
+ prev_sync = jiffies;
+ usec = ktime_to_us(ktime_get_boottime());
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+}
+
+/**
+ * recv_ipc() - Receive and process IPC management messages
+ * @dev: ishtp device instance
+ * @doorbell_val: doorbell value
+ *
+ * This function runs in ISR context.
+ * NOTE: Any other mng command than reset_notify and reset_notify_ack
+ * won't wake BH handler
+ */
+static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
+{
+ uint32_t mng_cmd;
+
+ mng_cmd = IPC_HEADER_GET_MNG_CMD(doorbell_val);
+
+ switch (mng_cmd) {
+ default:
+ break;
+
+ case MNG_RX_CMPL_INDICATION:
+ if (dev->suspend_flag) {
+ dev->suspend_flag = 0;
+ wake_up_interruptible(&dev->suspend_wait);
+ }
+ if (dev->resume_flag) {
+ dev->resume_flag = 0;
+ wake_up_interruptible(&dev->resume_wait);
+ }
+
+ write_ipc_from_queue(dev);
+ break;
+
+ case MNG_RESET_NOTIFY:
+ if (!ishtp_dev) {
+ ishtp_dev = dev;
+ INIT_WORK(&fw_reset_work, fw_reset_work_fn);
+ }
+ schedule_work(&fw_reset_work);
+ break;
+
+ case MNG_RESET_NOTIFY_ACK:
+ dev->recvd_hw_ready = 1;
+ wake_up_interruptible(&dev->wait_hw_ready);
+ break;
+ }
+}
+
+/**
+ * ish_irq_handler() - ISH IRQ handler
+ * @irq: irq number
+ * @dev_id: ishtp device pointer
+ *
+ * ISH IRQ handler. If interrupt is generated and is for ISH it will process
+ * the interrupt.
+ */
+irqreturn_t ish_irq_handler(int irq, void *dev_id)
+{
+ struct ishtp_device *dev = dev_id;
+ uint32_t doorbell_val;
+ bool interrupt_generated;
+
+ /* Check that it's interrupt from ISH (may be shared) */
+ interrupt_generated = check_generated_interrupt(dev);
+
+ if (!interrupt_generated)
+ return IRQ_NONE;
+
+ doorbell_val = ish_reg_read(dev, IPC_REG_ISH2HOST_DRBL);
+ if (!IPC_IS_BUSY(doorbell_val))
+ return IRQ_HANDLED;
+
+ if (dev->dev_state == ISHTP_DEV_DISABLED)
+ return IRQ_HANDLED;
+
+ /* Sanity check: IPC dgram length in header */
+ if (IPC_HEADER_GET_LENGTH(doorbell_val) > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "IPC hdr - bad length: %u; dropped\n",
+ (unsigned int)IPC_HEADER_GET_LENGTH(doorbell_val));
+ goto eoi;
+ }
+
+ switch (IPC_HEADER_GET_PROTOCOL(doorbell_val)) {
+ default:
+ break;
+ case IPC_PROTOCOL_MNG:
+ recv_ipc(dev, doorbell_val);
+ break;
+ case IPC_PROTOCOL_ISHTP:
+ ishtp_recv(dev);
+ break;
+ }
+
+eoi:
+ /* Update IPC counters */
+ ++dev->ipc_rx_cnt;
+ dev->ipc_rx_bytes_cnt += IPC_HEADER_GET_LENGTH(doorbell_val);
+
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ish_disable_dma() - disable dma communication between host and ISHFW
+ * @dev: ishtp device pointer
+ *
+ * Clear the dma enable bit and wait for dma inactive.
+ *
+ * Return: 0 for success else error code.
+ */
+static int ish_disable_dma(struct ishtp_device *dev)
+{
+ unsigned int dma_delay;
+
+ /* Clear the dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, 0);
+
+ /* wait for dma inactive */
+ for (dma_delay = 0; dma_delay < MAX_DMA_DELAY &&
+ _ish_read_fw_sts_reg(dev) & (IPC_ISH_IN_DMA);
+ dma_delay += 5)
+ mdelay(5);
+
+ if (dma_delay >= MAX_DMA_DELAY) {
+ dev_err(dev->devc,
+ "Wait for DMA inactive timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_wakeup() - wakeup ishfw from waiting-for-host state
+ * @dev: ishtp device pointer
+ *
+ * Set the dma enable bit and send a void message to FW,
+ * it wil wakeup FW from waiting-for-host state.
+ */
+static void ish_wakeup(struct ishtp_device *dev)
+{
+ /* Set dma enable bit */
+ ish_reg_write(dev, IPC_REG_ISH_RMP2, IPC_RMP2_DMA_ENABLED);
+
+ /*
+ * Send 0 IPC message so that ISH FW wakes up if it was already
+ * asleep.
+ */
+ ish_reg_write(dev, IPC_REG_HOST2ISH_DRBL, IPC_DRBL_BUSY_BIT);
+
+ /* Flush writes to doorbell and REMAP2 */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+}
+
+/**
+ * _ish_hw_reset() - HW reset
+ * @dev: ishtp device pointer
+ *
+ * Reset ISH HW to recover if any error
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_hw_reset(struct ishtp_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+ int rv;
+ uint16_t csr;
+
+ if (!pdev)
+ return -ENODEV;
+
+ rv = pci_reset_function(pdev);
+ if (!rv)
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ if (!pdev->pm_cap) {
+ dev_err(&pdev->dev, "Can't reset - no PM caps\n");
+ return -EINVAL;
+ }
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return -EBUSY;
+ }
+
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &csr);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D3hot;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ mdelay(pdev->d3_delay);
+
+ csr &= ~PCI_PM_CTRL_STATE_MASK;
+ csr |= PCI_D0;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, csr);
+
+ /* Now we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
+
+ return 0;
+}
+
+/**
+ * _ish_ipc_reset() - IPC reset
+ * @dev: ishtp device pointer
+ *
+ * Resets host and fw IPC and upper layers
+ *
+ * Return: 0 for success else error fault code
+ */
+static int _ish_ipc_reset(struct ishtp_device *dev)
+{
+ struct ipc_rst_payload_type ipc_mng_msg;
+ int rv = 0;
+
+ ipc_mng_msg.reset_id = 1;
+ ipc_mng_msg.reserved = 0;
+
+ set_host_ready(dev);
+
+ /* Clear the incoming doorbell */
+ ish_reg_write(dev, IPC_REG_ISH2HOST_DRBL, 0);
+ /* Flush write to doorbell */
+ ish_reg_read(dev, IPC_REG_ISH_HOST_FWSTS);
+
+ dev->recvd_hw_ready = 0;
+
+ /* send message */
+ rv = ipc_send_mng_msg(dev, MNG_RESET_NOTIFY, &ipc_mng_msg,
+ sizeof(struct ipc_rst_payload_type));
+ if (rv) {
+ dev_err(dev->devc, "Failed to send IPC MNG_RESET_NOTIFY\n");
+ return rv;
+ }
+
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready, 2 * HZ);
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc, "Timed out waiting for HW ready\n");
+ rv = -ENODEV;
+ }
+
+ return rv;
+}
+
+/**
+ * ish_hw_start() -Start ISH HW
+ * @dev: ishtp device pointer
+ *
+ * Set host to ready state and wait for FW reset
+ *
+ * Return: 0 for success else error fault code
+ */
+int ish_hw_start(struct ishtp_device *dev)
+{
+ ish_set_host_rdy(dev);
+
+ /* After that we can enable ISH DMA operation and wakeup ISHFW */
+ ish_wakeup(dev);
+
+ set_host_ready(dev);
+
+ /* wait for FW-initiated reset flow */
+ if (!dev->recvd_hw_ready)
+ wait_event_interruptible_timeout(dev->wait_hw_ready,
+ dev->recvd_hw_ready,
+ 10 * HZ);
+
+ if (!dev->recvd_hw_ready) {
+ dev_err(dev->devc,
+ "[ishtp-ish]: Timed out waiting for FW-initiated reset\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ish_ipc_get_header() -Get doorbell value
+ * @dev: ishtp device pointer
+ * @length: length of message
+ * @busy: busy status
+ *
+ * Get door bell value from message header
+ *
+ * Return: door bell value
+ */
+static uint32_t ish_ipc_get_header(struct ishtp_device *dev, int length,
+ int busy)
+{
+ uint32_t drbl_val;
+
+ drbl_val = IPC_BUILD_HEADER(length, IPC_PROTOCOL_ISHTP, busy);
+
+ return drbl_val;
+}
+
+static const struct ishtp_hw_ops ish_hw_ops = {
+ .hw_reset = _ish_hw_reset,
+ .ipc_reset = _ish_ipc_reset,
+ .ipc_get_header = ish_ipc_get_header,
+ .ishtp_read = _ishtp_read,
+ .write = write_ipc_to_queue,
+ .get_fw_status = _ish_read_fw_sts_reg,
+ .sync_fw_clock = _ish_sync_fw_clock,
+ .ishtp_read_hdr = _ishtp_read_hdr
+};
+
+/**
+ * ish_dev_init() -Initialize ISH devoce
+ * @pdev: PCI device
+ *
+ * Allocate ISHTP device and initialize IPC processing
+ *
+ * Return: ISHTP device instance on success else NULL
+ */
+struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
+{
+ struct ishtp_device *dev;
+ int i;
+
+ dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct ishtp_device) + sizeof(struct ish_hw),
+ GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ ishtp_device_init(dev);
+
+ init_waitqueue_head(&dev->wait_hw_ready);
+
+ spin_lock_init(&dev->wr_processing_spinlock);
+ spin_lock_init(&dev->out_ipc_spinlock);
+
+ /* Init IPC processing and free lists */
+ INIT_LIST_HEAD(&dev->wr_processing_list_head.link);
+ INIT_LIST_HEAD(&dev->wr_free_list_head.link);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
+ struct wr_msg_ctl_info *tx_buf;
+
+ tx_buf = devm_kzalloc(&pdev->dev,
+ sizeof(struct wr_msg_ctl_info),
+ GFP_KERNEL);
+ if (!tx_buf) {
+ /*
+ * IPC buffers may be limited or not available
+ * at all - although this shouldn't happen
+ */
+ dev_err(dev->devc,
+ "[ishtp-ish]: failure in Tx FIFO allocations (%d)\n",
+ i);
+ break;
+ }
+ list_add_tail(&tx_buf->link, &dev->wr_free_list_head.link);
+ }
+
+ dev->ops = &ish_hw_ops;
+ dev->devc = &pdev->dev;
+ dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
+ return dev;
+}
+
+/**
+ * ish_device_disable() - Disable ISH device
+ * @dev: ISHTP device pointer
+ *
+ * Disable ISH by clearing host ready to inform firmware.
+ */
+void ish_device_disable(struct ishtp_device *dev)
+{
+ struct pci_dev *pdev = dev->pdev;
+
+ if (!pdev)
+ return;
+
+ /* Disable dma communication between FW and host */
+ if (ish_disable_dma(dev)) {
+ dev_err(&pdev->dev,
+ "Can't reset - stuck with DMA in-progress\n");
+ return;
+ }
+
+ /* Put ISH to D3hot state for power saving */
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ ish_clr_host_rdy(dev);
+}
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
new file mode 100644
index 00000000000000..256b3016116cec
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -0,0 +1,342 @@
+/*
+ * PCI glue for ISHTP provider device (ISH) driver
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/intel_ish.h>
+#include "ishtp-dev.h"
+#include "hw-ish.h"
+
+static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CHV_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, BXT_Bx_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, APL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+
+/**
+ * ish_event_tracer() - Callback function to dump trace messages
+ * @dev: ishtp device
+ * @format: printf style format
+ *
+ * Callback to direct log messages to Linux trace buffers
+ */
+static __printf(2, 3)
+void ish_event_tracer(struct ishtp_device *dev, const char *format, ...)
+{
+ if (trace_ishtp_dump_enabled()) {
+ va_list args;
+ char tmp_buf[100];
+
+ va_start(args, format);
+ vsnprintf(tmp_buf, sizeof(tmp_buf), format, args);
+ va_end(args);
+
+ trace_ishtp_dump(tmp_buf);
+ }
+}
+
+/**
+ * ish_init() - Init function
+ * @dev: ishtp device
+ *
+ * This function initialize wait queues for suspend/resume and call
+ * calls hadware initialization function. This will initiate
+ * startup sequence
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_init(struct ishtp_device *dev)
+{
+ int ret;
+
+ /* Set the state of ISH HW to start */
+ ret = ish_hw_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISH: hw start failed.\n");
+ return ret;
+ }
+
+ /* Start the inter process communication to ISH processor */
+ ret = ishtp_start(dev);
+ if (ret) {
+ dev_err(dev->devc, "ISHTP: Protocol init failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct pci_device_id ish_invalid_pci_ids[] = {
+ /* Mehlow platform special pci ids */
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA309)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xA30A)},
+ {}
+};
+
+/**
+ * ish_probe() - PCI driver probe callback
+ * @pdev: pci device
+ * @ent: pci device id
+ *
+ * Initialize PCI function, setup interrupt and call for ISH initialization
+ *
+ * Return: 0 for success or error code for failure
+ */
+static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct ishtp_device *dev;
+ struct ish_hw *hw;
+ int ret;
+
+ /* Check for invalid platforms for ISH support */
+ if (pci_dev_present(ish_invalid_pci_ids))
+ return -ENODEV;
+
+ /* enable pci dev */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: Failed to enable PCI device\n");
+ return ret;
+ }
+
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+
+ /* pci request regions for ISH driver */
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: Failed to get PCI regions\n");
+ goto disable_device;
+ }
+
+ /* allocates and initializes the ISH dev structure */
+ dev = ish_dev_init(pdev);
+ if (!dev) {
+ ret = -ENOMEM;
+ goto release_regions;
+ }
+ hw = to_ish_hw(dev);
+ dev->print_log = ish_event_tracer;
+
+ /* mapping IO device memory */
+ hw->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!hw->mem_addr) {
+ dev_err(&pdev->dev, "ISH: mapping I/O range failure\n");
+ ret = -ENOMEM;
+ goto free_device;
+ }
+
+ dev->pdev = pdev;
+
+ pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
+
+ /* request and enable interrupt */
+ ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME, dev);
+ if (ret) {
+ dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
+ pdev->irq);
+ goto free_device;
+ }
+
+ dev_set_drvdata(dev->devc, dev);
+
+ init_waitqueue_head(&dev->suspend_wait);
+ init_waitqueue_head(&dev->resume_wait);
+
+ ret = ish_init(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ free_irq(pdev->irq, dev);
+free_device:
+ pci_iounmap(pdev, hw->mem_addr);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "ISH: PCI driver initialization failed.\n");
+
+ return ret;
+}
+
+/**
+ * ish_remove() - PCI driver remove callback
+ * @pdev: pci device
+ *
+ * This function does cleanup of ISH on pci remove callback
+ */
+static void ish_remove(struct pci_dev *pdev)
+{
+ struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
+ struct ish_hw *hw = to_ish_hw(ishtp_dev);
+
+ ishtp_bus_remove_all_clients(ishtp_dev, false);
+ ish_device_disable(ishtp_dev);
+
+ free_irq(pdev->irq, ishtp_dev);
+ pci_iounmap(pdev, hw->mem_addr);
+ pci_release_regions(pdev);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct device __maybe_unused *ish_resume_device;
+
+/* 50ms to get resume response */
+#define WAIT_FOR_RESUME_ACK_MS 50
+
+/**
+ * ish_resume_handler() - Work function to complete resume
+ * @work: work struct
+ *
+ * The resume work function to complete resume function asynchronously.
+ * There are two resume paths, one where ISH is not powered off,
+ * in that case a simple resume message is enough, others we need
+ * a reset sequence.
+ */
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
+{
+ struct pci_dev *pdev = to_pci_dev(ish_resume_device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+ uint32_t fwsts;
+ int ret;
+
+ /* Get ISH FW status */
+ fwsts = IPC_GET_ISH_FWSTS(dev->ops->get_fw_status(dev));
+
+ /*
+ * If currently, in ISH FW, sensor app is loaded or beyond that,
+ * it means ISH isn't powered off, in this case, send a resume message.
+ */
+ if (fwsts >= FWSTS_SENSOR_APP_LOADED) {
+ ishtp_send_resume(dev);
+
+ /* Waiting to get resume response */
+ if (dev->resume_flag)
+ ret = wait_event_interruptible_timeout(dev->resume_wait,
+ !dev->resume_flag,
+ msecs_to_jiffies(WAIT_FOR_RESUME_ACK_MS));
+ }
+
+ /*
+ * If in ISH FW, sensor app isn't loaded yet, or no resume response.
+ * That means this platform is not S0ix compatible, or something is
+ * wrong with ISH FW. So on resume, full reboot of ISH processor will
+ * happen, so need to go through init sequence again.
+ */
+ if (dev->resume_flag)
+ ish_init(dev);
+}
+
+/**
+ * ish_suspend() - ISH suspend callback
+ * @device: device pointer
+ *
+ * ISH suspend callback
+ *
+ * Return: 0 to the pm core
+ */
+static int __maybe_unused ish_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ enable_irq_wake(pdev->irq);
+ /*
+ * If previous suspend hasn't been asnwered then ISH is likely dead,
+ * don't attempt nested notification
+ */
+ if (dev->suspend_flag)
+ return 0;
+
+ dev->resume_flag = 0;
+ dev->suspend_flag = 1;
+ ishtp_send_suspend(dev);
+
+ /* 25 ms should be enough for live ISH to flush all IPC buf */
+ if (dev->suspend_flag)
+ wait_event_interruptible_timeout(dev->suspend_wait,
+ !dev->suspend_flag,
+ msecs_to_jiffies(25));
+
+ return 0;
+}
+
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
+/**
+ * ish_resume() - ISH resume callback
+ * @device: device pointer
+ *
+ * ISH resume callback
+ *
+ * Return: 0 to the pm core
+ */
+static int __maybe_unused ish_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct ishtp_device *dev = pci_get_drvdata(pdev);
+
+ ish_resume_device = device;
+ dev->resume_flag = 1;
+
+ disable_irq_wake(pdev->irq);
+ schedule_work(&resume_work);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
+
+static struct pci_driver ish_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ish_pci_tbl,
+ .probe = ish_probe,
+ .remove = ish_remove,
+ .driver.pm = &ish_pm_ops,
+};
+
+module_pci_driver(ish_driver);
+
+/* Original author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/* Adoption to upstream Linux kernel */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_DESCRIPTION("Intel(R) Integrated Sensor Hub PCI Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
new file mode 100644
index 00000000000000..9def6938e298dc
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -0,0 +1,988 @@
+/*
+ * ISHTP client driver for HID (ISH)
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/hid.h>
+#include <linux/sched.h>
+#include "ishtp/ishtp-dev.h"
+#include "ishtp/client.h"
+#include "ishtp-hid.h"
+
+/* Rx ring buffer pool size */
+#define HID_CL_RX_RING_SIZE 32
+#define HID_CL_TX_RING_SIZE 16
+
+/**
+ * report_bad_packets() - Report bad packets
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @cur_pos: Current position index in payload
+ * @payload_len: Length of payload expected
+ *
+ * Dumps error in case bad packet is received
+ */
+static void report_bad_packet(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t cur_pos, size_t payload_len)
+{
+ struct hostif_msg *recv_msg = recv_buf;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ dev_err(&client_data->cl_device->dev, "[hid-ish]: BAD packet %02X\n"
+ "total_bad=%u cur_pos=%u\n"
+ "[%02X %02X %02X %02X]\n"
+ "payload_len=%u\n"
+ "multi_packet_cnt=%u\n"
+ "is_response=%02X\n",
+ recv_msg->hdr.command, client_data->bad_recv_cnt,
+ (unsigned int)cur_pos,
+ ((unsigned char *)recv_msg)[0], ((unsigned char *)recv_msg)[1],
+ ((unsigned char *)recv_msg)[2], ((unsigned char *)recv_msg)[3],
+ (unsigned int)payload_len, client_data->multi_packet_cnt,
+ recv_msg->hdr.command & ~CMD_MASK);
+}
+
+/**
+ * process_recv() - Received and parse incoming packet
+ * @hid_ishtp_cl: Client instance to get stats
+ * @recv_buf: Raw received host interface message
+ * @data_len: length of the message
+ *
+ * Parse the incoming packet. If it is a response packet then it will update
+ * per instance flags and wake up the caller waiting to for the response.
+ */
+static void process_recv(struct ishtp_cl *hid_ishtp_cl, void *recv_buf,
+ size_t data_len)
+{
+ struct hostif_msg *recv_msg;
+ unsigned char *payload;
+ struct device_info *dev_info;
+ int i, j;
+ size_t payload_len, total_len, cur_pos, raw_len;
+ int report_type;
+ struct report_list *reports_list;
+ char *reports;
+ size_t report_len;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int curr_hid_dev = client_data->cur_hid_dev;
+ struct ishtp_hid_data *hid_data = NULL;
+ struct hid_device *hid = NULL;
+
+ payload = recv_buf + sizeof(struct hostif_msg_hdr);
+ total_len = data_len;
+ cur_pos = 0;
+
+ do {
+ if (cur_pos + sizeof(struct hostif_msg) > total_len) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: error, received %u which is less than data header %u\n",
+ (unsigned int)data_len,
+ (unsigned int)sizeof(struct hostif_msg_hdr));
+ ++client_data->bad_recv_cnt;
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+
+ recv_msg = (struct hostif_msg *)(recv_buf + cur_pos);
+ payload_len = recv_msg->hdr.size;
+
+ /* Sanity checks */
+ if (cur_pos + payload_len + sizeof(struct hostif_msg) >
+ total_len) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+
+ hid_ishtp_trace(client_data, "%s %d\n",
+ __func__, recv_msg->hdr.command & CMD_MASK);
+
+ switch (recv_msg->hdr.command & CMD_MASK) {
+ case HOSTIF_DM_ENUM_DEVICES:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ client_data->hid_dev_count = (unsigned int)*payload;
+ if (!client_data->hid_devices)
+ client_data->hid_devices = devm_kzalloc(
+ &client_data->cl_device->dev,
+ client_data->hid_dev_count *
+ sizeof(struct device_info),
+ GFP_KERNEL);
+ if (!client_data->hid_devices) {
+ dev_err(&client_data->cl_device->dev,
+ "Mem alloc failed for hid device info\n");
+ wake_up_interruptible(&client_data->init_wait);
+ break;
+ }
+ for (i = 0; i < client_data->hid_dev_count; ++i) {
+ if (1 + sizeof(struct device_info) * i >=
+ payload_len) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: [ENUM_DEVICES]: content size %zu is bigger than payload_len %zu\n",
+ 1 + sizeof(struct device_info)
+ * i, payload_len);
+ }
+
+ if (1 + sizeof(struct device_info) * i >=
+ data_len)
+ break;
+
+ dev_info = (struct device_info *)(payload + 1 +
+ sizeof(struct device_info) * i);
+ if (client_data->hid_devices)
+ memcpy(client_data->hid_devices + i,
+ dev_info,
+ sizeof(struct device_info));
+ }
+
+ client_data->enum_devices_done = true;
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_HID_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ if (!client_data->hid_descr[curr_hid_dev])
+ client_data->hid_descr[curr_hid_dev] =
+ devm_kmalloc(&client_data->cl_device->dev,
+ payload_len, GFP_KERNEL);
+ if (client_data->hid_descr[curr_hid_dev]) {
+ memcpy(client_data->hid_descr[curr_hid_dev],
+ payload, payload_len);
+ client_data->hid_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->hid_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_REPORT_DESCRIPTOR:
+ if ((!(recv_msg->hdr.command & ~CMD_MASK) ||
+ client_data->init_done)) {
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg,
+ cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+ }
+ if (!client_data->report_descr[curr_hid_dev])
+ client_data->report_descr[curr_hid_dev] =
+ devm_kmalloc(&client_data->cl_device->dev,
+ payload_len, GFP_KERNEL);
+ if (client_data->report_descr[curr_hid_dev]) {
+ memcpy(client_data->report_descr[curr_hid_dev],
+ payload,
+ payload_len);
+ client_data->report_descr_size[curr_hid_dev] =
+ payload_len;
+ client_data->report_descr_done = true;
+ }
+ wake_up_interruptible(&client_data->init_wait);
+
+ break;
+
+ case HOSTIF_GET_FEATURE_REPORT:
+ report_type = HID_FEATURE_REPORT;
+ goto do_get_report;
+
+ case HOSTIF_GET_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+do_get_report:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id) {
+ hid = client_data->hid_sensor_hubs[i];
+ if (!hid)
+ break;
+
+ hid_data = hid->driver_data;
+ if (hid_data->raw_get_req) {
+ raw_len =
+ (hid_data->raw_buf_size <
+ payload_len) ?
+ hid_data->raw_buf_size :
+ payload_len;
+
+ memcpy(hid_data->raw_buf,
+ payload, raw_len);
+ } else {
+ hid_input_report
+ (hid, report_type,
+ payload, payload_len,
+ 0);
+ }
+
+ ishtp_hid_wakeup(hid);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_SET_FEATURE_REPORT:
+ /* Get index of device that matches this id */
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i]) {
+ ishtp_hid_wakeup(
+ client_data->hid_sensor_hubs[
+ i]);
+ break;
+ }
+ }
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT:
+ report_type = HID_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id)
+ if (client_data->hid_sensor_hubs[i])
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type, payload,
+ payload_len, 0);
+ break;
+
+ case HOSTIF_PUBLISH_INPUT_REPORT_LIST:
+ report_type = HID_INPUT_REPORT;
+ reports_list = (struct report_list *)payload;
+ reports = (char *)reports_list->reports;
+
+ for (j = 0; j < reports_list->num_of_reports; j++) {
+ recv_msg = (struct hostif_msg *)(reports +
+ sizeof(uint16_t));
+ report_len = *(uint16_t *)reports;
+ payload = reports + sizeof(uint16_t) +
+ sizeof(struct hostif_msg_hdr);
+ payload_len = report_len -
+ sizeof(struct hostif_msg_hdr);
+
+ for (i = 0; i < client_data->num_hid_devices;
+ ++i)
+ if (recv_msg->hdr.device_id ==
+ client_data->hid_devices[i].dev_id &&
+ client_data->hid_sensor_hubs[i]) {
+ hid_input_report(
+ client_data->hid_sensor_hubs[
+ i],
+ report_type,
+ payload, payload_len,
+ 0);
+ }
+
+ reports += sizeof(uint16_t) + report_len;
+ }
+ break;
+ default:
+ ++client_data->bad_recv_cnt;
+ report_bad_packet(hid_ishtp_cl, recv_msg, cur_pos,
+ payload_len);
+ ish_hw_reset(hid_ishtp_cl->dev);
+ break;
+
+ }
+
+ if (!cur_pos && cur_pos + payload_len +
+ sizeof(struct hostif_msg) < total_len)
+ ++client_data->multi_packet_cnt;
+
+ cur_pos += payload_len + sizeof(struct hostif_msg);
+ payload += payload_len + sizeof(struct hostif_msg);
+
+ } while (cur_pos < total_len);
+}
+
+/**
+ * ish_cl_event_cb() - bus driver callback for incoming message/packet
+ * @device: Pointer to the the ishtp client device for which this message
+ * is targeted
+ *
+ * Remove the packet from the list and process the message by calling
+ * process_recv
+ */
+static void ish_cl_event_cb(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl *hid_ishtp_cl = device->driver_data;
+ struct ishtp_cl_rb *rb_in_proc;
+ size_t r_length;
+ unsigned long flags;
+
+ if (!hid_ishtp_cl)
+ return;
+
+ spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
+ while (!list_empty(&hid_ishtp_cl->in_process_list.list)) {
+ rb_in_proc = list_entry(
+ hid_ishtp_cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&rb_in_proc->list);
+ spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock,
+ flags);
+
+ if (!rb_in_proc->buffer.data)
+ return;
+
+ r_length = rb_in_proc->buf_idx;
+
+ /* decide what to do with received data */
+ process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
+
+ ishtp_cl_io_rb_recycle(rb_in_proc);
+ spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
+ }
+ spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags);
+}
+
+/**
+ * hid_ishtp_set_feature() - send request to ISH FW to set a feature request
+ * @hid: hid device instance for this request
+ * @buf: feature buffer
+ * @len: Length of feature buffer
+ * @report_id: Report id for the feature set request
+ *
+ * This is called from hid core .request() callback. This function doesn't wait
+ * for response.
+ */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ struct hostif_msg *msg = (struct hostif_msg *)buf;
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ memset(msg, 0, sizeof(struct hostif_msg));
+ msg->hdr.command = HOSTIF_SET_FEATURE_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg->hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, buf, len);
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * hid_ishtp_get_report() - request to get feature/input report
+ * @hid: hid device instance for this request
+ * @report_id: Report id for the get request
+ * @report_type: Report type for the this request
+ *
+ * This is called from hid core .request() callback. This function will send
+ * request to FW and return without waiting for response.
+ */
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ struct hostif_msg_to_sensor msg = {};
+ int rv;
+ int i;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+ rv = ishtp_hid_link_ready_wait(client_data);
+ if (rv) {
+ hid_ishtp_trace(client_data, "%s hid %p link not ready\n",
+ __func__, hid);
+ return;
+ }
+
+ msg.hdr.command = (report_type == HID_FEATURE_REPORT) ?
+ HOSTIF_GET_FEATURE_REPORT : HOSTIF_GET_INPUT_REPORT;
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (hid == client_data->hid_sensor_hubs[i]) {
+ msg.hdr.device_id =
+ client_data->hid_devices[i].dev_id;
+ break;
+ }
+ }
+
+ if (i == client_data->num_hid_devices)
+ return;
+
+ msg.report_id = report_id;
+ rv = ishtp_cl_send(client_data->hid_ishtp_cl, (uint8_t *)&msg,
+ sizeof(msg));
+ if (rv)
+ hid_ishtp_trace(client_data, "%s hid %p send failed\n",
+ __func__, hid);
+}
+
+/**
+ * ishtp_hid_link_ready_wait() - Wait for link ready
+ * @client_data: client data instance
+ *
+ * If the transport link started suspend process, then wait, till either
+ * resumed or timeout
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data)
+{
+ int rc;
+
+ if (client_data->suspended) {
+ hid_ishtp_trace(client_data, "wait for link ready\n");
+ rc = wait_event_interruptible_timeout(
+ client_data->ishtp_resume_wait,
+ !client_data->suspended,
+ 5 * HZ);
+
+ if (rc == 0) {
+ hid_ishtp_trace(client_data, "link not ready\n");
+ return -EIO;
+ }
+ hid_ishtp_trace(client_data, "link ready\n");
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_enum_enum_devices() - Enumerate hid devices
+ * @hid_ishtp_cl: client instance
+ *
+ * Helper function to send request to firmware to enumerate HID devices
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_enum_enum_devices(struct ishtp_cl *hid_ishtp_cl)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int retry_count;
+ int rv;
+
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_DM_ENUM_DEVICES;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *)&msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ retry_count = 0;
+ while (!client_data->enum_devices_done &&
+ retry_count < 10) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->enum_devices_done,
+ 3 * HZ);
+ ++retry_count;
+ if (!client_data->enum_devices_done)
+ /* Send HOSTIF_DM_ENUM_DEVICES */
+ rv = ishtp_cl_send(hid_ishtp_cl,
+ (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ }
+ if (!client_data->enum_devices_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out waiting for enum_devices\n");
+ return -ETIMEDOUT;
+ }
+ if (!client_data->hid_devices) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: failed to allocate HID dev structures\n");
+ return -ENOMEM;
+ }
+
+ client_data->num_hid_devices = client_data->hid_dev_count;
+ dev_info(&hid_ishtp_cl->device->dev,
+ "[hid-ish]: enum_devices_done OK, num_hid_devices=%d\n",
+ client_data->num_hid_devices);
+
+ return 0;
+}
+
+/**
+ * ishtp_get_hid_descriptor() - Get hid descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID descriptor of a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_hid_descriptor(struct ishtp_cl *hid_ishtp_cl, int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int rv;
+
+ /* Get HID descriptor */
+ client_data->hid_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_HID_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->hid_descr_done) {
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->hid_descr_done,
+ 3 * HZ);
+ if (!client_data->hid_descr_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out for hid_descr_done\n");
+ return -EIO;
+ }
+
+ if (!client_data->hid_descr[index]) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: allocation HID desc fail\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_get_report_descriptor() - Get report descriptor
+ * @hid_ishtp_cl: client instance
+ * @index: Index into the hid_descr array
+ *
+ * Helper function to send request to firmware get HID report descriptor of
+ * a device
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
+ int index)
+{
+ struct hostif_msg msg;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int rv;
+
+ /* Get report descriptor */
+ client_data->report_descr_done = false;
+ memset(&msg, 0, sizeof(struct hostif_msg));
+ msg.hdr.command = HOSTIF_GET_REPORT_DESCRIPTOR;
+ msg.hdr.device_id = client_data->hid_devices[index].dev_id;
+ rv = ishtp_cl_send(hid_ishtp_cl, (unsigned char *) &msg,
+ sizeof(struct hostif_msg));
+ if (rv)
+ return rv;
+
+ if (!client_data->report_descr_done)
+ wait_event_interruptible_timeout(client_data->init_wait,
+ client_data->report_descr_done,
+ 3 * HZ);
+ if (!client_data->report_descr_done) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: timed out for report descr\n");
+ return -EIO;
+ }
+ if (!client_data->report_descr[index]) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: failed to alloc report descr\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_init() - Init function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ * @reset: true if called for init after reset
+ *
+ * This function complete the initializtion of the client. The summary of
+ * processing:
+ * - Send request to enumerate the hid clients
+ * Get the HID descriptor for each enumearated device
+ * Get report description of each device
+ * Register each device wik hid core by calling ishtp_hid_probe
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ int i;
+ int rv;
+
+ dev_dbg(&client_data->cl_device->dev, "%s\n", __func__);
+ hid_ishtp_trace(client_data, "%s reset flag: %d\n", __func__, reset);
+
+ rv = ishtp_cl_link(hid_ishtp_cl, ISHTP_HOST_CLIENT_ID_ANY);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "ishtp_cl_link failed\n");
+ return -ENOMEM;
+ }
+
+ client_data->init_done = 0;
+
+ dev = hid_ishtp_cl->dev;
+
+ /* Connect to FW client */
+ hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
+ hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid);
+ if (i < 0) {
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ dev_err(&client_data->cl_device->dev,
+ "ish client uuid not found\n");
+ return i;
+ }
+ hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id;
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
+
+ rv = ishtp_cl_connect(hid_ishtp_cl);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "client connect fail\n");
+ goto err_cl_unlink;
+ }
+
+ hid_ishtp_trace(client_data, "%s client connected\n", __func__);
+
+ /* Register read callback */
+ ishtp_register_event_cb(hid_ishtp_cl->device, ish_cl_event_cb);
+
+ rv = ishtp_enum_enum_devices(hid_ishtp_cl);
+ if (rv)
+ goto err_cl_disconnect;
+
+ hid_ishtp_trace(client_data, "%s enumerated device count %d\n",
+ __func__, client_data->num_hid_devices);
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ client_data->cur_hid_dev = i;
+
+ rv = ishtp_get_hid_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ rv = ishtp_get_report_descriptor(hid_ishtp_cl, i);
+ if (rv)
+ goto err_cl_disconnect;
+
+ if (!reset) {
+ rv = ishtp_hid_probe(i, client_data);
+ if (rv) {
+ dev_err(&client_data->cl_device->dev,
+ "[hid-ish]: HID probe for #%u failed: %d\n",
+ i, rv);
+ goto err_cl_disconnect;
+ }
+ }
+ } /* for() on all hid devices */
+
+ client_data->init_done = 1;
+ client_data->suspended = false;
+ wake_up_interruptible(&client_data->ishtp_resume_wait);
+ hid_ishtp_trace(client_data, "%s successful init\n", __func__);
+ return 0;
+
+err_cl_disconnect:
+ hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_cl_disconnect(hid_ishtp_cl);
+err_cl_unlink:
+ ishtp_cl_unlink(hid_ishtp_cl);
+ return rv;
+}
+
+/**
+ * hid_ishtp_cl_deinit() - Deinit function for ISHTP client
+ * @hid_ishtp_cl: ISHTP client instance
+ *
+ * Unlink and free hid client
+ */
+static void hid_ishtp_cl_deinit(struct ishtp_cl *hid_ishtp_cl)
+{
+ ishtp_cl_unlink(hid_ishtp_cl);
+ ishtp_cl_flush_queues(hid_ishtp_cl);
+
+ /* disband and free all Tx and Rx client-level rings */
+ ishtp_cl_free(hid_ishtp_cl);
+}
+
+static void hid_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_cl_data *client_data;
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ int retry;
+ int rv;
+
+ client_data = container_of(work, struct ishtp_cl_data, work);
+
+ hid_ishtp_cl = client_data->hid_ishtp_cl;
+ cl_device = client_data->cl_device;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ dev_dbg(&cl_device->dev, "%s\n", __func__);
+
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ if (!hid_ishtp_cl)
+ return;
+
+ cl_device->driver_data = hid_ishtp_cl;
+ hid_ishtp_cl->client_data = client_data;
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+
+ client_data->num_hid_devices = 0;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 1);
+ if (!rv)
+ break;
+ dev_err(&client_data->cl_device->dev, "Retry reset init\n");
+ }
+ if (rv) {
+ dev_err(&client_data->cl_device->dev, "Reset Failed\n");
+ hid_ishtp_trace(client_data, "%s Failed hid_ishtp_cl %p\n",
+ __func__, hid_ishtp_cl);
+ }
+}
+
+/**
+ * hid_ishtp_cl_probe() - ISHTP client driver probe
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device create on ISHTP bus
+ *
+ * Return: 0 on success, non zero on error
+ */
+static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl;
+ struct ishtp_cl_data *client_data;
+ int rv;
+
+ if (!cl_device)
+ return -ENODEV;
+
+ if (uuid_le_cmp(hid_ishtp_guid,
+ cl_device->fw_client->props.protocol_name) != 0)
+ return -ENODEV;
+
+ client_data = devm_kzalloc(&cl_device->dev, sizeof(*client_data),
+ GFP_KERNEL);
+ if (!client_data)
+ return -ENOMEM;
+
+ hid_ishtp_cl = ishtp_cl_allocate(cl_device->ishtp_dev);
+ if (!hid_ishtp_cl)
+ return -ENOMEM;
+
+ cl_device->driver_data = hid_ishtp_cl;
+ hid_ishtp_cl->client_data = client_data;
+ client_data->hid_ishtp_cl = hid_ishtp_cl;
+ client_data->cl_device = cl_device;
+
+ init_waitqueue_head(&client_data->init_wait);
+ init_waitqueue_head(&client_data->ishtp_resume_wait);
+
+ INIT_WORK(&client_data->work, hid_ishtp_cl_reset_handler);
+
+ rv = hid_ishtp_cl_init(hid_ishtp_cl, 0);
+ if (rv) {
+ ishtp_cl_free(hid_ishtp_cl);
+ return rv;
+ }
+ ishtp_get_device(cl_device);
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_remove() - ISHTP client driver remove
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device remove on ISHTP bus
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ dev_dbg(&cl_device->dev, "%s\n", __func__);
+ hid_ishtp_cl->state = ISHTP_CL_DISCONNECTING;
+ ishtp_cl_disconnect(hid_ishtp_cl);
+ ishtp_put_device(cl_device);
+ ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
+
+ hid_ishtp_cl = NULL;
+
+ client_data->num_hid_devices = 0;
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_reset() - ISHTP client driver reset
+ * @cl_device: ISHTP client device instance
+ *
+ * This function gets called on device reset on ISHTP bus
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+
+ schedule_work(&client_data->work);
+
+ return 0;
+}
+
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+
+/**
+ * hid_ishtp_cl_suspend() - ISHTP client driver suspend
+ * @device: device instance
+ *
+ * This function gets called on system suspend
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ client_data->suspended = true;
+
+ return 0;
+}
+
+/**
+ * hid_ishtp_cl_resume() - ISHTP client driver resume
+ * @device: device instance
+ *
+ * This function gets called on system resume
+ *
+ * Return: 0
+ */
+static int hid_ishtp_cl_resume(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
+ struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+
+ hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
+ hid_ishtp_cl);
+ client_data->suspended = false;
+ return 0;
+}
+
+static const struct dev_pm_ops hid_ishtp_pm_ops = {
+ .suspend = hid_ishtp_cl_suspend,
+ .resume = hid_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver hid_ishtp_cl_driver = {
+ .name = "ish-hid",
+ .probe = hid_ishtp_cl_probe,
+ .remove = hid_ishtp_cl_remove,
+ .reset = hid_ishtp_cl_reset,
+ .driver.pm = &hid_ishtp_pm_ops,
+};
+
+static int __init ish_hid_init(void)
+{
+ int rv;
+
+ /* Register ISHTP client device driver with ISHTP Bus */
+ rv = ishtp_cl_driver_register(&hid_ishtp_cl_driver);
+
+ return rv;
+
+}
+
+static void __exit ish_hid_exit(void)
+{
+ ishtp_cl_driver_unregister(&hid_ishtp_cl_driver);
+}
+
+late_initcall(ish_hid_init);
+module_exit(ish_hid_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP HID client driver");
+/* Primary author */
+MODULE_AUTHOR("Daniel Drubin <daniel.drubin@intel.com>");
+/*
+ * Several modification for multi instance support
+ * suspend/resume and clean up
+ */
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
new file mode 100644
index 00000000000000..822a9251ae8bcc
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -0,0 +1,283 @@
+/*
+ * ISHTP-HID glue driver.
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/hid.h>
+#include <uapi/linux/input.h>
+#include "ishtp/client.h"
+#include "ishtp-hid.h"
+
+/**
+ * ishtp_hid_parse() - hid-core .parse() callback
+ * @hid: hid device instance
+ *
+ * This function gets called during call to hid_add_device
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_hid_parse(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ int rv;
+
+ rv = hid_parse_report(hid, client_data->report_descr[hid_data->index],
+ client_data->report_descr_size[hid_data->index]);
+ if (rv)
+ return rv;
+
+ return 0;
+}
+
+/* Empty callbacks with success return code */
+static int ishtp_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_stop(struct hid_device *hid)
+{
+}
+
+static int ishtp_hid_open(struct hid_device *hid)
+{
+ return 0;
+}
+
+static void ishtp_hid_close(struct hid_device *hid)
+{
+}
+
+static int ishtp_raw_request(struct hid_device *hid, unsigned char reportnum,
+ __u8 *buf, size_t len, unsigned char rtype,
+ int reqtype)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ char *ishtp_buf = NULL;
+ size_t ishtp_buf_len;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ if (rtype == HID_OUTPUT_REPORT)
+ return -EINVAL;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_buf = buf;
+ hid_data->raw_buf_size = len;
+ hid_data->raw_get_req = true;
+
+ hid_ishtp_get_report(hid, reportnum, rtype);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ ishtp_buf_len = len + header_size;
+ ishtp_buf = kzalloc(ishtp_buf_len + 7, GFP_KERNEL);
+ if (!ishtp_buf)
+ return -ENOMEM;
+
+ memcpy(ishtp_buf + header_size, buf, len);
+ hid_ishtp_set_feature(hid, ishtp_buf, ishtp_buf_len, reportnum);
+ kfree(ishtp_buf);
+ break;
+ }
+
+ hid_hw_wait(hid);
+
+ return len;
+}
+
+/**
+ * ishtp_hid_request() - hid-core .request() callback
+ * @hid: hid device instance
+ * @rep: pointer to hid_report
+ * @reqtype: type of req. [GET|SET]_REPORT
+ *
+ * This function is used to set/get feaure/input report.
+ */
+static void ishtp_hid_request(struct hid_device *hid, struct hid_report *rep,
+ int reqtype)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ /* the specific report length, just HID part of it */
+ unsigned int len = ((rep->size - 1) >> 3) + 1 + (rep->id > 0);
+ char *buf;
+ unsigned int header_size = sizeof(struct hostif_msg);
+
+ len += header_size;
+
+ hid_data->request_done = false;
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ hid_data->raw_get_req = false;
+ hid_ishtp_get_report(hid, rep->id, rep->type);
+ break;
+ case HID_REQ_SET_REPORT:
+ /*
+ * Spare 7 bytes for 64b accesses through
+ * get/put_unaligned_le64()
+ */
+ buf = kzalloc(len + 7, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hid_output_report(rep, buf + header_size);
+ hid_ishtp_set_feature(hid, buf, len, rep->id);
+ kfree(buf);
+ break;
+ }
+}
+
+/**
+ * ishtp_wait_for_response() - hid-core .wait() callback
+ * @hid: hid device instance
+ *
+ * This function is used to wait after get feaure/input report.
+ *
+ * Return: 0 on success and non zero on error
+ */
+static int ishtp_wait_for_response(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+ struct ishtp_cl_data *client_data = hid_data->client_data;
+ int rv;
+
+ hid_ishtp_trace(client_data, "%s hid %p\n", __func__, hid);
+
+ rv = ishtp_hid_link_ready_wait(hid_data->client_data);
+ if (rv)
+ return rv;
+
+ if (!hid_data->request_done)
+ wait_event_interruptible_timeout(hid_data->hid_wait,
+ hid_data->request_done, 3 * HZ);
+
+ if (!hid_data->request_done) {
+ hid_err(hid,
+ "timeout waiting for response from ISHTP device\n");
+ return -ETIMEDOUT;
+ }
+ hid_ishtp_trace(client_data, "%s hid %p done\n", __func__, hid);
+
+ hid_data->request_done = false;
+
+ return 0;
+}
+
+/**
+ * ishtp_hid_wakeup() - Wakeup caller
+ * @hid: hid device instance
+ *
+ * This function will wakeup caller waiting for Get/Set feature report
+ */
+void ishtp_hid_wakeup(struct hid_device *hid)
+{
+ struct ishtp_hid_data *hid_data = hid->driver_data;
+
+ hid_data->request_done = true;
+ wake_up_interruptible(&hid_data->hid_wait);
+}
+
+static struct hid_ll_driver ishtp_hid_ll_driver = {
+ .parse = ishtp_hid_parse,
+ .start = ishtp_hid_start,
+ .stop = ishtp_hid_stop,
+ .open = ishtp_hid_open,
+ .close = ishtp_hid_close,
+ .request = ishtp_hid_request,
+ .wait = ishtp_wait_for_response,
+ .raw_request = ishtp_raw_request
+};
+
+/**
+ * ishtp_hid_probe() - hid register ll driver
+ * @cur_hid_dev: Index of hid device calling to register
+ * @client_data: Client data pointer
+ *
+ * This function is used to allocate and add HID device.
+ *
+ * Return: 0 on success, non zero on error
+ */
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data)
+{
+ int rv;
+ struct hid_device *hid;
+ struct ishtp_hid_data *hid_data;
+
+ hid = hid_allocate_device();
+ if (IS_ERR(hid)) {
+ rv = PTR_ERR(hid);
+ return -ENOMEM;
+ }
+
+ hid_data = kzalloc(sizeof(*hid_data), GFP_KERNEL);
+ if (!hid_data) {
+ rv = -ENOMEM;
+ goto err_hid_data;
+ }
+
+ hid_data->index = cur_hid_dev;
+ hid_data->client_data = client_data;
+ init_waitqueue_head(&hid_data->hid_wait);
+
+ hid->driver_data = hid_data;
+
+ client_data->hid_sensor_hubs[cur_hid_dev] = hid;
+
+ hid->ll_driver = &ishtp_hid_ll_driver;
+ hid->bus = BUS_INTEL_ISHTP;
+ hid->dev.parent = &client_data->cl_device->dev;
+ hid->version = le16_to_cpu(ISH_HID_VERSION);
+ hid->vendor = le16_to_cpu(client_data->hid_devices[cur_hid_dev].vid);
+ hid->product = le16_to_cpu(client_data->hid_devices[cur_hid_dev].pid);
+ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "hid-ishtp",
+ hid->vendor, hid->product);
+
+ rv = hid_add_device(hid);
+ if (rv)
+ goto err_hid_device;
+
+ hid_ishtp_trace(client_data, "%s allocated hid %p\n", __func__, hid);
+
+ return 0;
+
+err_hid_device:
+ kfree(hid_data);
+err_hid_data:
+ kfree(hid);
+ return rv;
+}
+
+/**
+ * ishtp_hid_probe() - Remove registered hid device
+ * @client_data: client data pointer
+ *
+ * This function is used to destroy allocatd HID device.
+ */
+void ishtp_hid_remove(struct ishtp_cl_data *client_data)
+{
+ int i;
+
+ for (i = 0; i < client_data->num_hid_devices; ++i) {
+ if (client_data->hid_sensor_hubs[i]) {
+ kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ client_data->hid_sensor_hubs[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
new file mode 100644
index 00000000000000..9ef31bfb7f6444
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -0,0 +1,190 @@
+/*
+ * ISHTP-HID glue driver's definitions.
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef ISHTP_HID__H
+#define ISHTP_HID__H
+
+/* The fixed ISH product and vendor id */
+#define ISH_HID_VENDOR 0x8086
+#define ISH_HID_PRODUCT 0x22D8
+#define ISH_HID_VERSION 0x0200
+
+#define CMD_MASK 0x7F
+#define IS_RESPONSE 0x80
+
+/* Used to dump to Linux trace buffer, if enabled */
+#define hid_ishtp_trace(client, ...) \
+ client->cl_device->ishtp_dev->print_log(\
+ client->cl_device->ishtp_dev, __VA_ARGS__)
+
+/* ISH Transport protocol (ISHTP in short) GUID */
+static const uuid_le hid_ishtp_guid = UUID_LE(0x33AECD58, 0xB679, 0x4E54,
+ 0x9B, 0xD9, 0xA0, 0x4D, 0x34,
+ 0xF0, 0xC2, 0x26);
+
+/* ISH HID message structure */
+struct hostif_msg_hdr {
+ uint8_t command; /* Bit 7: is_response */
+ uint8_t device_id;
+ uint8_t status;
+ uint8_t flags;
+ uint16_t size;
+} __packed;
+
+struct hostif_msg {
+ struct hostif_msg_hdr hdr;
+} __packed;
+
+struct hostif_msg_to_sensor {
+ struct hostif_msg_hdr hdr;
+ uint8_t report_id;
+} __packed;
+
+struct device_info {
+ uint32_t dev_id;
+ uint8_t dev_class;
+ uint16_t pid;
+ uint16_t vid;
+} __packed;
+
+struct ishtp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint8_t hotfix;
+ uint16_t build;
+} __packed;
+
+/* struct for ISHTP aggregated input data */
+struct report_list {
+ uint16_t total_size;
+ uint8_t num_of_reports;
+ uint8_t flags;
+ struct {
+ uint16_t size_of_report;
+ uint8_t report[1];
+ } __packed reports[1];
+} __packed;
+
+/* HOSTIF commands */
+#define HOSTIF_HID_COMMAND_BASE 0
+#define HOSTIF_GET_HID_DESCRIPTOR 0
+#define HOSTIF_GET_REPORT_DESCRIPTOR 1
+#define HOSTIF_GET_FEATURE_REPORT 2
+#define HOSTIF_SET_FEATURE_REPORT 3
+#define HOSTIF_GET_INPUT_REPORT 4
+#define HOSTIF_PUBLISH_INPUT_REPORT 5
+#define HOSTIF_PUBLISH_INPUT_REPORT_LIST 6
+#define HOSTIF_DM_COMMAND_BASE 32
+#define HOSTIF_DM_ENUM_DEVICES 33
+#define HOSTIF_DM_ADD_DEVICE 34
+
+#define MAX_HID_DEVICES 32
+
+/**
+ * struct ishtp_cl_data - Encapsulate per ISH TP HID Client
+ * @enum_device_done: Enum devices response complete flag
+ * @hid_descr_done: HID descriptor complete flag
+ * @report_descr_done: Get report descriptor complete flag
+ * @init_done: Init process completed successfully
+ * @suspended: System is under suspend state or in progress
+ * @num_hid_devices: Number of HID devices enumerated in this client
+ * @cur_hid_dev: This keeps track of the device index for which
+ * initialization and registration with HID core
+ * in progress.
+ * @hid_devices: Store vid/pid/devid for each enumerated HID device
+ * @report_descr: Stores the raw report descriptors for each HID device
+ * @report_descr_size: Report description of size of above repo_descr[]
+ * @hid_sensor_hubs: Pointer to hid_device for all HID device, so that
+ * when clients are removed, they can be freed
+ * @hid_descr: Pointer to hid descriptor for each enumerated hid
+ * device
+ * @hid_descr_size: Size of each above report descriptor
+ * @init_wait: Wait queue to wait during initialization, where the
+ * client send message to ISH FW and wait for response
+ * @ishtp_hid_wait: The wait for get report during wait callback from hid
+ * core
+ * @bad_recv_cnt: Running count of packets received with error
+ * @multi_packet_cnt: Count of fragmented packet count
+ *
+ * This structure is used to store completion flags and per client data like
+ * like report description, number of HID devices etc.
+ */
+struct ishtp_cl_data {
+ /* completion flags */
+ bool enum_devices_done;
+ bool hid_descr_done;
+ bool report_descr_done;
+ bool init_done;
+ bool suspended;
+
+ unsigned int num_hid_devices;
+ unsigned int cur_hid_dev;
+ unsigned int hid_dev_count;
+
+ struct device_info *hid_devices;
+ unsigned char *report_descr[MAX_HID_DEVICES];
+ int report_descr_size[MAX_HID_DEVICES];
+ struct hid_device *hid_sensor_hubs[MAX_HID_DEVICES];
+ unsigned char *hid_descr[MAX_HID_DEVICES];
+ int hid_descr_size[MAX_HID_DEVICES];
+
+ wait_queue_head_t init_wait;
+ wait_queue_head_t ishtp_resume_wait;
+ struct ishtp_cl *hid_ishtp_cl;
+
+ /* Statistics */
+ unsigned int bad_recv_cnt;
+ int multi_packet_cnt;
+
+ struct work_struct work;
+ struct ishtp_cl_device *cl_device;
+};
+
+/**
+ * struct ishtp_hid_data - Per instance HID data
+ * @index: Device index in the order of enumeration
+ * @request_done: Get Feature/Input report complete flag
+ * used during get/set request from hid core
+ * @client_data: Link to the client instance
+ * @hid_wait: Completion waitq
+ *
+ * @raw_get_req: Flag indicating raw get request ongoing
+ * @raw_buf: raw request buffer filled on receiving get report
+ * @raw_buf_size: raw request buffer size
+ * Used to tie hid hid->driver data to driver client instance
+ */
+struct ishtp_hid_data {
+ int index;
+ bool request_done;
+ struct ishtp_cl_data *client_data;
+ wait_queue_head_t hid_wait;
+
+ /* raw request */
+ bool raw_get_req;
+ u8 *raw_buf;
+ size_t raw_buf_size;
+};
+
+/* Interface functions between HID LL driver and ISH TP client */
+void hid_ishtp_set_feature(struct hid_device *hid, char *buf, unsigned int len,
+ int report_id);
+void hid_ishtp_get_report(struct hid_device *hid, int report_id,
+ int report_type);
+int ishtp_hid_probe(unsigned int cur_hid_dev,
+ struct ishtp_cl_data *client_data);
+void ishtp_hid_remove(struct ishtp_cl_data *client_data);
+int ishtp_hid_link_ready_wait(struct ishtp_cl_data *client_data);
+void ishtp_hid_wakeup(struct hid_device *hid);
+
+#endif /* ISHTP_HID__H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
new file mode 100644
index 00000000000000..2623a567ffba5a
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -0,0 +1,783 @@
+/*
+ * ISHTP bus driver
+ *
+ * Copyright (c) 2012-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include "bus.h"
+#include "ishtp-dev.h"
+#include "client.h"
+#include "hbm.h"
+
+static int ishtp_use_dma;
+module_param_named(ishtp_use_dma, ishtp_use_dma, int, 0600);
+MODULE_PARM_DESC(ishtp_use_dma, "Use DMA to send messages");
+
+#define to_ishtp_cl_driver(d) container_of(d, struct ishtp_cl_driver, driver)
+#define to_ishtp_cl_device(d) container_of(d, struct ishtp_cl_device, dev)
+static bool ishtp_device_ready;
+
+/**
+ * ishtp_recv() - process ishtp message
+ * @dev: ishtp device
+ *
+ * If a message with valid header and size is received, then
+ * this function calls appropriate handler. The host or firmware
+ * address is zero, then they are host bus management message,
+ * otherwise they are message fo clients.
+ */
+void ishtp_recv(struct ishtp_device *dev)
+{
+ uint32_t msg_hdr;
+ struct ishtp_msg_hdr *ishtp_hdr;
+
+ /* Read ISHTP header dword */
+ msg_hdr = dev->ops->ishtp_read_hdr(dev);
+ if (!msg_hdr)
+ return;
+
+ dev->ops->sync_fw_clock(dev);
+
+ ishtp_hdr = (struct ishtp_msg_hdr *)&msg_hdr;
+ dev->ishtp_msg_hdr = msg_hdr;
+
+ /* Sanity check: ISHTP frag. length in header */
+ if (ishtp_hdr->length > dev->mtu) {
+ dev_err(dev->devc,
+ "ISHTP hdr - bad length: %u; dropped [%08X]\n",
+ (unsigned int)ishtp_hdr->length, msg_hdr);
+ return;
+ }
+
+ /* ISHTP bus message */
+ if (!ishtp_hdr->host_addr && !ishtp_hdr->fw_addr)
+ recv_hbm(dev, ishtp_hdr);
+ /* ISHTP fixed-client message */
+ else if (!ishtp_hdr->host_addr)
+ recv_fixed_cl_msg(dev, ishtp_hdr);
+ else
+ /* ISHTP client message */
+ recv_ishtp_cl_msg(dev, ishtp_hdr);
+}
+EXPORT_SYMBOL(ishtp_recv);
+
+/**
+ * ishtp_send_msg() - Send ishtp message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @msg: Message contents
+ * @ipc_send_compl: completion callback
+ * @ipc_send_compl_prm: completion callback parameter
+ *
+ * Send a multi fragment message via IPC. After sending the first fragment
+ * the completion callback is called to schedule transmit of next fragment.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_send_msg(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ void *msg, void(*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm)
+{
+ unsigned char ipc_msg[IPC_FULL_MSG_SIZE];
+ uint32_t drbl_val;
+
+ drbl_val = dev->ops->ipc_get_header(dev, hdr->length +
+ sizeof(struct ishtp_msg_hdr),
+ 1);
+
+ memcpy(ipc_msg, &drbl_val, sizeof(uint32_t));
+ memcpy(ipc_msg + sizeof(uint32_t), hdr, sizeof(uint32_t));
+ memcpy(ipc_msg + 2 * sizeof(uint32_t), msg, hdr->length);
+ return dev->ops->write(dev, ipc_send_compl, ipc_send_compl_prm,
+ ipc_msg, 2 * sizeof(uint32_t) + hdr->length);
+}
+
+/**
+ * ishtp_write_message() - Send ishtp single fragment message
+ * @dev: ishtp device
+ * @hdr: Message header
+ * @buf: message data
+ *
+ * Send a single fragment message via IPC. This returns IPC send message
+ * status.
+ *
+ * Return: This returns IPC send message status.
+ */
+int ishtp_write_message(struct ishtp_device *dev, struct ishtp_msg_hdr *hdr,
+ unsigned char *buf)
+{
+ return ishtp_send_msg(dev, hdr, buf, NULL, NULL);
+}
+
+/**
+ * ishtp_fw_cl_by_uuid() - locate index of fw client
+ * @dev: ishtp device
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID.
+ *
+ * Return: fw client index or -ENOENT if not found
+ */
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
+{
+ int i, res = -ENOENT;
+
+ for (i = 0; i < dev->fw_clients_num; ++i) {
+ if (uuid_le_cmp(*uuid, dev->fw_clients[i].props.protocol_name)
+ == 0) {
+ res = i;
+ break;
+ }
+ }
+ return res;
+}
+EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
+
+/**
+ * ishtp_fw_cl_by_id() - return index to fw_clients for client_id
+ * @dev: the ishtp device structure
+ * @client_id: fw client id to search
+ *
+ * Search firmware client using client id.
+ *
+ * Return: index on success, -ENOENT on failure.
+ */
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id)
+{
+ int i, res = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ for (i = 0; i < dev->fw_clients_num; i++) {
+ if (dev->fw_clients[i].client_id == client_id) {
+ res = i;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ return res;
+}
+
+/**
+ * ishtp_cl_device_probe() - Bus probe() callback
+ * @dev: the device structure
+ *
+ * This is a bus probe callback and calls the drive probe function.
+ *
+ * Return: Return value from driver probe() call.
+ */
+static int ishtp_cl_device_probe(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver || !driver->probe)
+ return -ENODEV;
+
+ return driver->probe(device);
+}
+
+/**
+ * ishtp_cl_device_remove() - Bus remove() callback
+ * @dev: the device structure
+ *
+ * This is a bus remove callback and calls the drive remove function.
+ * Since the ISH driver model supports only built in, this is
+ * primarily can be called during pci driver init failure.
+ *
+ * Return: Return value from driver remove() call.
+ */
+static int ishtp_cl_device_remove(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+
+ if (!device || !dev->driver)
+ return 0;
+
+ if (device->event_cb) {
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+ }
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (!driver->remove) {
+ dev->driver = NULL;
+
+ return 0;
+ }
+
+ return driver->remove(device);
+}
+
+/**
+ * ishtp_cl_device_suspend() - Bus suspend callback
+ * @dev: device
+ *
+ * Called during device suspend process.
+ *
+ * Return: Return value from driver suspend() call.
+ */
+static int ishtp_cl_device_suspend(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->suspend)
+ ret = driver->driver.pm->suspend(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_resume() - Bus resume callback
+ * @dev: device
+ *
+ * Called during device resume process.
+ *
+ * Return: Return value from driver resume() call.
+ */
+static int ishtp_cl_device_resume(struct device *dev)
+{
+ struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ if (!device)
+ return 0;
+
+ /*
+ * When ISH needs hard reset, it is done asynchrnously, hence bus
+ * resume will be called before full ISH resume
+ */
+ if (device->ishtp_dev->resume_flag)
+ return 0;
+
+ driver = to_ishtp_cl_driver(dev->driver);
+ if (driver && driver->driver.pm) {
+ if (driver->driver.pm->resume)
+ ret = driver->driver.pm->resume(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * ishtp_cl_device_reset() - Reset callback
+ * @device: ishtp client device instance
+ *
+ * This is a callback when HW reset is done and the device need
+ * reinit.
+ *
+ * Return: Return value from driver reset() call.
+ */
+static int ishtp_cl_device_reset(struct ishtp_cl_device *device)
+{
+ struct ishtp_cl_driver *driver;
+ int ret = 0;
+
+ device->event_cb = NULL;
+ cancel_work_sync(&device->event_work);
+
+ driver = to_ishtp_cl_driver(device->dev.driver);
+ if (driver && driver->reset)
+ ret = driver->reset(device);
+
+ return ret;
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
+ char *buf)
+{
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "ishtp:%s\n", dev_name(dev));
+ return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *ishtp_cl_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ishtp_cl_dev);
+
+static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ if (add_uevent_var(env, "MODALIAS=ishtp:%s", dev_name(dev)))
+ return -ENOMEM;
+ return 0;
+}
+
+static const struct dev_pm_ops ishtp_cl_bus_dev_pm_ops = {
+ /* Suspend callbacks */
+ .suspend = ishtp_cl_device_suspend,
+ .resume = ishtp_cl_device_resume,
+ /* Hibernate callbacks */
+ .freeze = ishtp_cl_device_suspend,
+ .thaw = ishtp_cl_device_resume,
+ .restore = ishtp_cl_device_resume,
+};
+
+static struct bus_type ishtp_cl_bus_type = {
+ .name = "ishtp",
+ .dev_groups = ishtp_cl_dev_groups,
+ .probe = ishtp_cl_device_probe,
+ .remove = ishtp_cl_device_remove,
+ .pm = &ishtp_cl_bus_dev_pm_ops,
+ .uevent = ishtp_cl_uevent,
+};
+
+static void ishtp_cl_dev_release(struct device *dev)
+{
+ kfree(to_ishtp_cl_device(dev));
+}
+
+static const struct device_type ishtp_cl_device_type = {
+ .release = ishtp_cl_dev_release,
+};
+
+/**
+ * ishtp_bus_add_device() - Function to create device on bus
+ * @dev: ishtp device
+ * @uuid: uuid of the client
+ * @name: Name of the client
+ *
+ * Allocate ISHTP bus client device, attach it to uuid
+ * and register with ISHTP bus.
+ *
+ * Return: ishtp_cl_device pointer or NULL on failure
+ */
+static struct ishtp_cl_device *ishtp_bus_add_device(struct ishtp_device *dev,
+ uuid_le uuid, char *name)
+{
+ struct ishtp_cl_device *device;
+ int status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_for_each_entry(device, &dev->device_list, device_link) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ device->fw_client = &dev->fw_clients[
+ dev->fw_client_presentation_num - 1];
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ ishtp_cl_device_reset(device);
+ return device;
+ }
+ }
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ device = kzalloc(sizeof(struct ishtp_cl_device), GFP_KERNEL);
+ if (!device)
+ return NULL;
+
+ device->dev.parent = dev->devc;
+ device->dev.bus = &ishtp_cl_bus_type;
+ device->dev.type = &ishtp_cl_device_type;
+ device->ishtp_dev = dev;
+
+ device->fw_client =
+ &dev->fw_clients[dev->fw_client_presentation_num - 1];
+
+ dev_set_name(&device->dev, "%s", name);
+
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_add_tail(&device->device_link, &dev->device_list);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+
+ status = device_register(&device->dev);
+ if (status) {
+ spin_lock_irqsave(&dev->device_list_lock, flags);
+ list_del(&device->device_link);
+ spin_unlock_irqrestore(&dev->device_list_lock, flags);
+ dev_err(dev->devc, "Failed to register ISHTP client device\n");
+ put_device(&device->dev);
+ return NULL;
+ }
+
+ ishtp_device_ready = true;
+
+ return device;
+}
+
+/**
+ * ishtp_bus_remove_device() - Function to relase device on bus
+ * @device: client device instance
+ *
+ * This is a counterpart of ishtp_bus_add_device.
+ * Device is unregistered.
+ * the device structure is freed in 'ishtp_cl_dev_release' function
+ * Called only during error in pci driver init path.
+ */
+static void ishtp_bus_remove_device(struct ishtp_cl_device *device)
+{
+ device_unregister(&device->dev);
+}
+
+/**
+ * __ishtp_cl_driver_register() - Client driver register
+ * @driver: the client driver instance
+ * @owner: Owner of this driver module
+ *
+ * Once a client driver is probed, it created a client
+ * instance and registers with the bus.
+ *
+ * Return: Return value of driver_register or -ENODEV if not ready
+ */
+int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner)
+{
+ int err;
+
+ if (!ishtp_device_ready)
+ return -ENODEV;
+
+ driver->driver.name = driver->name;
+ driver->driver.owner = owner;
+ driver->driver.bus = &ishtp_cl_bus_type;
+
+ err = driver_register(&driver->driver);
+ if (err)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(__ishtp_cl_driver_register);
+
+/**
+ * ishtp_cl_driver_unregister() - Client driver unregister
+ * @driver: the client driver instance
+ *
+ * Unregister client during device removal process.
+ */
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver)
+{
+ driver_unregister(&driver->driver);
+}
+EXPORT_SYMBOL(ishtp_cl_driver_unregister);
+
+/**
+ * ishtp_bus_event_work() - event work function
+ * @work: work struct pointer
+ *
+ * Once an event is received for a client this work
+ * function is called. If the device has registered a
+ * callback then the callback is called.
+ */
+static void ishtp_bus_event_work(struct work_struct *work)
+{
+ struct ishtp_cl_device *device;
+
+ device = container_of(work, struct ishtp_cl_device, event_work);
+
+ if (device->event_cb)
+ device->event_cb(device);
+}
+
+/**
+ * ishtp_cl_bus_rx_event() - schedule event work
+ * @device: client device instance
+ *
+ * Once an event is received for a client this schedules
+ * a work function to process.
+ */
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device)
+{
+ if (!device || !device->event_cb)
+ return;
+
+ if (device->event_cb)
+ schedule_work(&device->event_work);
+}
+
+/**
+ * ishtp_register_event_cb() - Register callback
+ * @device: client device instance
+ * @event_cb: Event processor for an client
+ *
+ * Register a callback for events, called from client driver
+ *
+ * Return: Return 0 or -EALREADY if already registered
+ */
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*event_cb)(struct ishtp_cl_device *))
+{
+ if (device->event_cb)
+ return -EALREADY;
+
+ device->event_cb = event_cb;
+ INIT_WORK(&device->event_work, ishtp_bus_event_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_register_event_cb);
+
+/**
+ * ishtp_get_device() - update usage count for the device
+ * @cl_device: client device instance
+ *
+ * Increment the usage count. The device can't be deleted
+ */
+void ishtp_get_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count++;
+}
+EXPORT_SYMBOL(ishtp_get_device);
+
+/**
+ * ishtp_put_device() - decrement usage count for the device
+ * @cl_device: client device instance
+ *
+ * Decrement the usage count. The device can be deleted is count = 0
+ */
+void ishtp_put_device(struct ishtp_cl_device *cl_device)
+{
+ cl_device->reference_count--;
+}
+EXPORT_SYMBOL(ishtp_put_device);
+
+/**
+ * ishtp_bus_new_client() - Create a new client
+ * @dev: ISHTP device instance
+ *
+ * Once bus protocol enumerates a client, this is called
+ * to add a device for the client.
+ *
+ * Return: 0 on success or error code on failure
+ */
+int ishtp_bus_new_client(struct ishtp_device *dev)
+{
+ int i;
+ char *dev_name;
+ struct ishtp_cl_device *cl_device;
+ uuid_le device_uuid;
+
+ /*
+ * For all reported clients, create an unconnected client and add its
+ * device to ISHTP bus.
+ * If appropriate driver has loaded, this will trigger its probe().
+ * Otherwise, probe() will be called when driver is loaded
+ */
+ i = dev->fw_client_presentation_num - 1;
+ device_uuid = dev->fw_clients[i].props.protocol_name;
+ dev_name = kasprintf(GFP_KERNEL, "{%pUL}", device_uuid.b);
+ if (!dev_name)
+ return -ENOMEM;
+
+ cl_device = ishtp_bus_add_device(dev, device_uuid, dev_name);
+ if (!cl_device) {
+ kfree(dev_name);
+ return -ENOENT;
+ }
+
+ kfree(dev_name);
+
+ return 0;
+}
+
+/**
+ * ishtp_cl_device_bind() - bind a device
+ * @cl: ishtp client device
+ *
+ * Binds connected ishtp_cl to ISHTP bus device
+ *
+ * Return: 0 on success or fault code
+ */
+int ishtp_cl_device_bind(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_device *cl_device;
+ unsigned long flags;
+ int rv;
+
+ if (!cl->fw_client_id || cl->state != ISHTP_CL_CONNECTED)
+ return -EFAULT;
+
+ rv = -ENOENT;
+ spin_lock_irqsave(&cl->dev->device_list_lock, flags);
+ list_for_each_entry(cl_device, &cl->dev->device_list,
+ device_link) {
+ if (cl_device->fw_client->client_id == cl->fw_client_id) {
+ cl->device = cl_device;
+ rv = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cl->dev->device_list_lock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_bus_remove_all_clients() - Remove all clients
+ * @ishtp_dev: ishtp device
+ * @warm_reset: Reset due to FW reset dure to errors or S3 suspend
+ *
+ * This is part of reset/remove flow. This function the main processing
+ * only targets error processing, if the FW has forced reset or
+ * error to remove connected clients. When warm reset the client devices are
+ * not removed.
+ */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset)
+{
+ struct ishtp_cl_device *cl_device, *n;
+ struct ishtp_cl *cl;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ishtp_dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &ishtp_dev->cl_list, link) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /*
+ * Wake any pending process. The waiter would check dev->state
+ * and determine that it's not enabled already,
+ * and will return error to its caller
+ */
+ wake_up_interruptible(&cl->wait_ctrl_res);
+
+ /* Disband any pending read/write requests and free rb */
+ ishtp_cl_flush_queues(cl);
+
+ /* Remove all free and in_process rings, both Rx and Tx */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+
+ /*
+ * Free client and ISHTP bus client device structures
+ * don't free host client because it is part of the OS fd
+ * structure
+ */
+ }
+ spin_unlock_irqrestore(&ishtp_dev->cl_list_lock, flags);
+
+ /* Release DMA buffers for client messages */
+ ishtp_cl_free_dma_buf(ishtp_dev);
+
+ /* remove bus clients */
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
+ device_link) {
+ if (warm_reset && cl_device->reference_count)
+ continue;
+
+ list_del(&cl_device->device_link);
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+ ishtp_bus_remove_device(cl_device);
+ spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&ishtp_dev->device_list_lock, flags);
+
+ /* Free all client structures */
+ spin_lock_irqsave(&ishtp_dev->fw_clients_lock, flags);
+ kfree(ishtp_dev->fw_clients);
+ ishtp_dev->fw_clients = NULL;
+ ishtp_dev->fw_clients_num = 0;
+ ishtp_dev->fw_client_presentation_num = 0;
+ ishtp_dev->fw_client_index = 0;
+ bitmap_zero(ishtp_dev->fw_clients_map, ISHTP_CLIENTS_MAX);
+ spin_unlock_irqrestore(&ishtp_dev->fw_clients_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_bus_remove_all_clients);
+
+/**
+ * ishtp_reset_handler() - IPC reset handler
+ * @dev: ishtp device
+ *
+ * ISHTP Handler for IPC_RESET notification
+ */
+void ishtp_reset_handler(struct ishtp_device *dev)
+{
+ unsigned long flags;
+
+ /* Handle FW-initiated reset */
+ dev->dev_state = ISHTP_DEV_RESETTING;
+
+ /* Clear BH processing queue - no further HBMs */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ dev->rd_msg_fifo_head = dev->rd_msg_fifo_tail = 0;
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+
+ /* Handle ISH FW reset against upper layers */
+ ishtp_bus_remove_all_clients(dev, true);
+}
+EXPORT_SYMBOL(ishtp_reset_handler);
+
+/**
+ * ishtp_reset_compl_handler() - Reset completion handler
+ * @dev: ishtp device
+ *
+ * ISHTP handler for IPC_RESET sequence completion to start
+ * host message bus start protocol sequence.
+ */
+void ishtp_reset_compl_handler(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INIT_CLIENTS;
+ dev->hbm_state = ISHTP_HBM_START;
+ ishtp_hbm_start_req(dev);
+}
+EXPORT_SYMBOL(ishtp_reset_compl_handler);
+
+/**
+ * ishtp_use_dma_transfer() - Function to use DMA
+ *
+ * This interface is used to enable usage of DMA
+ *
+ * Return non zero if DMA can be enabled
+ */
+int ishtp_use_dma_transfer(void)
+{
+ return ishtp_use_dma;
+}
+
+/**
+ * ishtp_bus_register() - Function to register bus
+ *
+ * This register ishtp bus
+ *
+ * Return: Return output of bus_register
+ */
+static int __init ishtp_bus_register(void)
+{
+ return bus_register(&ishtp_cl_bus_type);
+}
+
+/**
+ * ishtp_bus_unregister() - Function to unregister bus
+ *
+ * This unregister ishtp bus
+ */
+static void __exit ishtp_bus_unregister(void)
+{
+ bus_unregister(&ishtp_cl_bus_type);
+}
+
+module_init(ishtp_bus_register);
+module_exit(ishtp_bus_unregister);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
new file mode 100644
index 00000000000000..a1ffae7f26ad2c
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -0,0 +1,114 @@
+/*
+ * ISHTP bus definitions
+ *
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#ifndef _LINUX_ISHTP_CL_BUS_H
+#define _LINUX_ISHTP_CL_BUS_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct ishtp_cl;
+struct ishtp_cl_device;
+struct ishtp_device;
+struct ishtp_msg_hdr;
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @dev: device pointer
+ * @ishtp_dev: pointer to ishtp device structure to primarily to access
+ * hw device operation callbacks and properties
+ * @fw_client: fw_client pointer to get fw information like protocol name
+ * max message length etc.
+ * @device_link: Link to next client in the list on a bus
+ * @event_work: Used to schedule rx event for client
+ * @driver_data: Storage driver private data
+ * @reference_count: Used for get/put device
+ * @event_cb: Callback to driver to send events
+ *
+ * An ishtp_cl_device pointer is returned from ishtp_add_device()
+ * and links ISHTP bus clients to their actual host client pointer.
+ * Drivers for ISHTP devices will get an ishtp_cl_device pointer
+ * when being probed and shall use it for doing bus I/O.
+ */
+struct ishtp_cl_device {
+ struct device dev;
+ struct ishtp_device *ishtp_dev;
+ struct ishtp_fw_client *fw_client;
+ struct list_head device_link;
+ struct work_struct event_work;
+ void *driver_data;
+ int reference_count;
+ void (*event_cb)(struct ishtp_cl_device *device);
+};
+
+/**
+ * struct ishtp_cl_device - ISHTP device handle
+ * @driver: driver instance on a bus
+ * @name: Name of the device for probe
+ * @probe: driver callback for device probe
+ * @remove: driver callback on device removal
+ *
+ * Client drivers defines to get probed/removed for ISHTP client device.
+ */
+struct ishtp_cl_driver {
+ struct device_driver driver;
+ const char *name;
+ int (*probe)(struct ishtp_cl_device *dev);
+ int (*remove)(struct ishtp_cl_device *dev);
+ int (*reset)(struct ishtp_cl_device *dev);
+ const struct dev_pm_ops *pm;
+};
+
+
+int ishtp_bus_new_client(struct ishtp_device *dev);
+void ishtp_remove_all_clients(struct ishtp_device *dev);
+int ishtp_cl_device_bind(struct ishtp_cl *cl);
+void ishtp_cl_bus_rx_event(struct ishtp_cl_device *device);
+
+/* Write a multi-fragment message */
+int ishtp_send_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr, void *msg,
+ void (*ipc_send_compl)(void *),
+ void *ipc_send_compl_prm);
+
+/* Write a single-fragment message */
+int ishtp_write_message(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *hdr,
+ unsigned char *buf);
+
+/* Use DMA to send/receive messages */
+int ishtp_use_dma_transfer(void);
+
+/* Exported functions */
+void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
+ bool warm_reset);
+
+void ishtp_recv(struct ishtp_device *dev);
+void ishtp_reset_handler(struct ishtp_device *dev);
+void ishtp_reset_compl_handler(struct ishtp_device *dev);
+
+void ishtp_put_device(struct ishtp_cl_device *);
+void ishtp_get_device(struct ishtp_cl_device *);
+
+int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
+ struct module *owner);
+#define ishtp_cl_driver_register(driver) \
+ __ishtp_cl_driver_register(driver, THIS_MODULE)
+void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
+
+int ishtp_register_event_cb(struct ishtp_cl_device *device,
+ void (*read_cb)(struct ishtp_cl_device *));
+int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+
+#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
new file mode 100644
index 00000000000000..b9b917d2d50db3
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -0,0 +1,257 @@
+/*
+ * ISHTP Ring Buffers
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_rx_ring() - Allocate RX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize RX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ struct ishtp_cl_rb *rb;
+ int ret = 0;
+ unsigned long flags;
+
+ for (j = 0; j < cl->rx_ring_size; ++j) {
+ rb = ishtp_io_rb_init(cl);
+ if (!rb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = ishtp_io_rb_alloc_buf(rb, len);
+ if (ret)
+ goto out;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+
+ return 0;
+
+out:
+ dev_err(&cl->device->dev, "error in allocating Rx buffers\n");
+ ishtp_cl_free_rx_ring(cl);
+ return ret;
+}
+
+/**
+ * ishtp_cl_alloc_tx_ring() - Allocate TX ring buffers
+ * @cl: client device instance
+ *
+ * Allocate and initialize TX ring buffers
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
+{
+ size_t len = cl->device->fw_client->props.max_msg_length;
+ int j;
+ unsigned long flags;
+
+ /* Allocate pool to free Tx bufs */
+ for (j = 0; j < cl->tx_ring_size; ++j) {
+ struct ishtp_cl_tx_ring *tx_buf;
+
+ tx_buf = kzalloc(sizeof(struct ishtp_cl_tx_ring), GFP_KERNEL);
+ if (!tx_buf)
+ goto out;
+
+ tx_buf->send_buf.data = kmalloc(len, GFP_KERNEL);
+ if (!tx_buf->send_buf.data) {
+ kfree(tx_buf);
+ goto out;
+ }
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+ }
+ return 0;
+out:
+ dev_err(&cl->device->dev, "error in allocating Tx pool\n");
+ ishtp_cl_free_rx_ring(cl);
+ return -ENOMEM;
+}
+
+/**
+ * ishtp_cl_free_rx_ring() - Free RX ring buffers
+ * @cl: client device instance
+ *
+ * Free RX ring buffers
+ */
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ unsigned long flags;
+
+ /* release allocated memory - pass over free_rb_list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ while (!list_empty(&cl->free_rb_list.list)) {
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb,
+ list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ /* release allocated memory - pass over in_process_list */
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ while (!list_empty(&cl->in_process_list.list)) {
+ rb = list_entry(cl->in_process_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del(&rb->list);
+ kfree(rb->buffer.data);
+ kfree(rb);
+ }
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_free_tx_ring() - Free TX ring buffers
+ * @cl: client device instance
+ *
+ * Free TX ring buffers
+ */
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_tx_ring *tx_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
+ /* release allocated memory - pass over tx_free_list */
+ while (!list_empty(&cl->tx_free_list.list)) {
+ tx_buf = list_entry(cl->tx_free_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, flags);
+ /* release allocated memory - pass over tx_list */
+ while (!list_empty(&cl->tx_list.list)) {
+ tx_buf = list_entry(cl->tx_list.list.next,
+ struct ishtp_cl_tx_ring, list);
+ list_del(&tx_buf->list);
+ kfree(tx_buf->send_buf.data);
+ kfree(tx_buf);
+ }
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, flags);
+}
+
+/**
+ * ishtp_io_rb_free() - Free IO request block
+ * @rb: IO request block
+ *
+ * Free io request block memory
+ */
+void ishtp_io_rb_free(struct ishtp_cl_rb *rb)
+{
+ if (rb == NULL)
+ return;
+
+ kfree(rb->buffer.data);
+ kfree(rb);
+}
+
+/**
+ * ishtp_io_rb_init() - Allocate and init IO request block
+ * @cl: client device instance
+ *
+ * Allocate and initialize request block
+ *
+ * Return: Allocted IO request block pointer
+ */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+
+ rb = kzalloc(sizeof(struct ishtp_cl_rb), GFP_KERNEL);
+ if (!rb)
+ return NULL;
+
+ INIT_LIST_HEAD(&rb->list);
+ rb->cl = cl;
+ rb->buf_idx = 0;
+ return rb;
+}
+
+/**
+ * ishtp_io_rb_alloc_buf() - Allocate and init response buffer
+ * @rb: IO request block
+ * @length: length of response buffer
+ *
+ * Allocate respose buffer
+ *
+ * Return: 0 on success else -ENOMEM
+ */
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length)
+{
+ if (!rb)
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ rb->buffer.data = kmalloc(length, GFP_KERNEL);
+ if (!rb->buffer.data)
+ return -ENOMEM;
+
+ rb->buffer.size = length;
+ return 0;
+}
+
+/**
+ * ishtp_cl_io_rb_recycle() - Recycle IO request blocks
+ * @rb: IO request block
+ *
+ * Re-append rb to its client's free list and send flow control if needed
+ *
+ * Return: 0 on success else -EFAULT
+ */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
+{
+ struct ishtp_cl *cl;
+ int rets = 0;
+ unsigned long flags;
+
+ if (!rb || !rb->cl)
+ return -EFAULT;
+
+ cl = rb->cl;
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ /*
+ * If we returned the first buffer to empty 'free' list,
+ * send flow control
+ */
+ if (!cl->out_flow_ctrl_creds)
+ rets = ishtp_cl_read_start(cl);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
new file mode 100644
index 00000000000000..007443ef5fca44
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -0,0 +1,1047 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_read_list_flush() - Flush read queue
+ * @cl: ishtp client instance
+ *
+ * Used to remove all entries from read queue for a client
+ */
+static void ishtp_read_list_flush(struct ishtp_cl *cl)
+{
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
+ list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
+ if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ }
+ spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
+}
+
+/**
+ * ishtp_cl_flush_queues() - Flush all queues for a client
+ * @cl: ishtp client instance
+ *
+ * Used to remove all queues for a client. This is called when a client device
+ * needs reset due to error, S3 resume or during module removal
+ *
+ * Return: 0 on success else -EINVAL if device is NULL
+ */
+int ishtp_cl_flush_queues(struct ishtp_cl *cl)
+{
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ ishtp_read_list_flush(cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_flush_queues);
+
+/**
+ * ishtp_cl_init() - Initialize all fields of a client device
+ * @cl: ishtp client instance
+ * @dev: ishtp device
+ *
+ * Initializes a client device fields: Init spinlocks, init queues etc.
+ * This function is called during new client creation
+ */
+static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
+{
+ memset(cl, 0, sizeof(struct ishtp_cl));
+ init_waitqueue_head(&cl->wait_ctrl_res);
+ spin_lock_init(&cl->free_list_spinlock);
+ spin_lock_init(&cl->in_process_spinlock);
+ spin_lock_init(&cl->tx_list_spinlock);
+ spin_lock_init(&cl->tx_free_list_spinlock);
+ spin_lock_init(&cl->fc_spinlock);
+ INIT_LIST_HEAD(&cl->link);
+ cl->dev = dev;
+
+ INIT_LIST_HEAD(&cl->free_rb_list.list);
+ INIT_LIST_HEAD(&cl->tx_list.list);
+ INIT_LIST_HEAD(&cl->tx_free_list.list);
+ INIT_LIST_HEAD(&cl->in_process_list.list);
+
+ cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
+ cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+
+ /* dma */
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->last_dma_acked = 1;
+ cl->last_dma_addr = NULL;
+ cl->last_ipc_acked = 1;
+}
+
+/**
+ * ishtp_cl_allocate() - allocates client structure and sets it up.
+ * @dev: ishtp device
+ *
+ * Allocate memory for new client device and call to initialize each field.
+ *
+ * Return: The allocated client instance or NULL on failure
+ */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_cl *cl;
+
+ cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
+ if (!cl)
+ return NULL;
+
+ ishtp_cl_init(cl, dev);
+ return cl;
+}
+EXPORT_SYMBOL(ishtp_cl_allocate);
+
+/**
+ * ishtp_cl_free() - Frees a client device
+ * @cl: client device instance
+ *
+ * Frees a client device
+ */
+void ishtp_cl_free(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ unsigned long flags;
+
+ if (!cl)
+ return;
+
+ dev = cl->dev;
+ if (!dev)
+ return;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_free_tx_ring(cl);
+ kfree(cl);
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_free);
+
+/**
+ * ishtp_cl_link() - Reserve a host id and link the client instance
+ * @cl: client device instance
+ * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
+ * id from the available can be used
+ *
+ *
+ * This allocates a single bit in the hostmap. This function will make sure
+ * that not many client sessions are opened at the same time. Once allocated
+ * the client device instance is added to the ishtp device in the current
+ * client list
+ *
+ * Return: 0 or error code on failure
+ */
+int ishtp_cl_link(struct ishtp_cl *cl, int id)
+{
+ struct ishtp_device *dev;
+ unsigned long flags, flags_cl;
+ int ret = 0;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -EINVAL;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+
+ if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
+ ret = -EMFILE;
+ goto unlock_dev;
+ }
+
+ /* If Id is not assigned get one*/
+ if (id == ISHTP_HOST_CLIENT_ID_ANY)
+ id = find_first_zero_bit(dev->host_clients_map,
+ ISHTP_CLIENTS_MAX);
+
+ if (id >= ISHTP_CLIENTS_MAX) {
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
+ return -ENOENT;
+ }
+
+ dev->open_handle_count++;
+ cl->host_client_id = id;
+ spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ret = -ENODEV;
+ goto unlock_cl;
+ }
+ list_add_tail(&cl->link, &dev->cl_list);
+ set_bit(id, dev->host_clients_map);
+ cl->state = ISHTP_CL_INITIALIZING;
+
+unlock_cl:
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
+unlock_dev:
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(ishtp_cl_link);
+
+/**
+ * ishtp_cl_unlink() - remove fw_cl from the client device list
+ * @cl: client device instance
+ *
+ * Remove a previously linked device to a ishtp device
+ */
+void ishtp_cl_unlink(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ /* don't shout on error exit path */
+ if (!cl || !cl->dev)
+ return;
+
+ dev = cl->dev;
+
+ spin_lock_irqsave(&dev->device_lock, flags);
+ if (dev->open_handle_count > 0) {
+ clear_bit(cl->host_client_id, dev->host_clients_map);
+ dev->open_handle_count--;
+ }
+ spin_unlock_irqrestore(&dev->device_lock, flags);
+
+ /*
+ * This checks that 'cl' is actually linked into device's structure,
+ * before attempting 'list_del'
+ */
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link)
+ if (cl->host_client_id == pos->host_client_id) {
+ list_del_init(&pos->link);
+ break;
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+EXPORT_SYMBOL(ishtp_cl_unlink);
+
+/**
+ * ishtp_cl_disconnect() - Send disconnect request to firmware
+ * @cl: client device instance
+ *
+ * Send a disconnect request for a client to firmware.
+ *
+ * Return: 0 if successful disconnect response from the firmware or error
+ * code on failure
+ */
+int ishtp_cl_disconnect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int err;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
+
+ if (cl->state != ISHTP_CL_DISCONNECTING) {
+ dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
+ return 0;
+ }
+
+ if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
+ dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
+ dev_err(&cl->device->dev, "failed to disconnect.\n");
+ return -ENODEV;
+ }
+
+ err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state != ISHTP_DEV_ENABLED ||
+ cl->state == ISHTP_CL_DISCONNECTED),
+ ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
+
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ if (cl->state == ISHTP_CL_DISCONNECTED) {
+ dev->print_log(dev, "%s() successful\n", __func__);
+ return 0;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_cl_disconnect);
+
+/**
+ * ishtp_cl_is_other_connecting() - Check other client is connecting
+ * @cl: client device instance
+ *
+ * Checks if other client with the same fw client id is connecting
+ *
+ * Return: true if other client is connected else false
+ */
+static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl *pos;
+ unsigned long flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return false;
+
+ dev = cl->dev;
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(pos, &dev->cl_list, link) {
+ if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
+ cl->fw_client_id == pos->fw_client_id) {
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+
+ return false;
+}
+
+/**
+ * ishtp_cl_connect() - Send connect request to firmware
+ * @cl: client device instance
+ *
+ * Send a connect request for a client to firmware. If successful it will
+ * RX and TX ring buffers
+ *
+ * Return: 0 if successful connect response from the firmware and able
+ * to bind and allocate ring buffers or error code on failure
+ */
+int ishtp_cl_connect(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ int rets;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
+
+ if (ishtp_cl_is_other_connecting(cl)) {
+ dev->print_log(dev, "%s() Busy\n", __func__);
+ return -EBUSY;
+ }
+
+ if (ishtp_hbm_cl_connect_req(dev, cl)) {
+ dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
+ return -ENODEV;
+ }
+
+ rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
+ (dev->dev_state == ISHTP_DEV_ENABLED &&
+ (cl->state == ISHTP_CL_CONNECTED ||
+ cl->state == ISHTP_CL_DISCONNECTED)),
+ ishtp_secs_to_jiffies(
+ ISHTP_CL_CONNECT_TIMEOUT));
+ /*
+ * If FW reset arrived, this will happen. Don't check cl->,
+ * as 'cl' may be freed already
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
+ __func__);
+ return -EFAULT;
+ }
+
+ rets = cl->status;
+ if (rets) {
+ dev->print_log(dev, "%s() Invalid status\n", __func__);
+ return rets;
+ }
+
+ rets = ishtp_cl_device_bind(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Bind error\n", __func__);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_rx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ rets = ishtp_cl_alloc_tx_ring(cl);
+ if (rets) {
+ dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
+ /* if failed allocation, disconnect */
+ ishtp_cl_free_rx_ring(cl);
+ ishtp_cl_disconnect(cl);
+ return rets;
+ }
+
+ /* Upon successful connection and allocation, emit flow-control */
+ rets = ishtp_cl_read_start(cl);
+
+ dev->print_log(dev, "%s() successful\n", __func__);
+
+ return rets;
+}
+EXPORT_SYMBOL(ishtp_cl_connect);
+
+/**
+ * ishtp_cl_read_start() - Prepare to read client message
+ * @cl: client device instance
+ *
+ * Get a free buffer from pool of free read buffers and add to read buffer
+ * pool to add contents. Send a flow control request to firmware to be able
+ * send next message.
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_read_start(struct ishtp_cl *cl)
+{
+ struct ishtp_device *dev;
+ struct ishtp_cl_rb *rb;
+ int rets;
+ int i;
+ unsigned long flags;
+ unsigned long dev_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return -ENODEV;
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return -ENODEV;
+
+ i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (i < 0) {
+ dev_err(&cl->device->dev, "no such fw client %d\n",
+ cl->fw_client_id);
+ return -ENODEV;
+ }
+
+ /* The current rb is the head of the free rb list */
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ if (list_empty(&cl->free_rb_list.list)) {
+ dev_warn(&cl->device->dev,
+ "[ishtp-ish] Rx buffers pool is empty\n");
+ rets = -ENOMEM;
+ rb = NULL;
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ goto out;
+ }
+ rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+
+ rb->cl = cl;
+ rb->buf_idx = 0;
+
+ INIT_LIST_HEAD(&rb->list);
+ rets = 0;
+
+ /*
+ * This must be BEFORE sending flow control -
+ * response in ISR may come too fast...
+ */
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_add_tail(&rb->list, &dev->read_list.list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+ if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
+ rets = -ENODEV;
+ goto out;
+ }
+out:
+ /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
+ if (rets && rb) {
+ spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
+ list_del(&rb->list);
+ spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
+
+ spin_lock_irqsave(&cl->free_list_spinlock, flags);
+ list_add_tail(&rb->list, &cl->free_rb_list.list);
+ spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
+ }
+ return rets;
+}
+
+/**
+ * ishtp_cl_send() - Send a message to firmware
+ * @cl: client device instance
+ * @buf: message buffer
+ * @length: length of message
+ *
+ * If the client is correct state to send message, this function gets a buffer
+ * from tx ring buffers, copy the message data and call to send the message
+ * using ishtp_cl_send_msg()
+ *
+ * Return: 0 if successful or error code on failure
+ */
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
+{
+ struct ishtp_device *dev;
+ int id;
+ struct ishtp_cl_tx_ring *cl_msg;
+ int have_msg_to_send = 0;
+ unsigned long tx_flags, tx_free_flags;
+
+ if (WARN_ON(!cl || !cl->dev))
+ return -ENODEV;
+
+ dev = cl->dev;
+
+ if (cl->state != ISHTP_CL_CONNECTED) {
+ ++cl->err_send_msg;
+ return -EPIPE;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED) {
+ ++cl->err_send_msg;
+ return -ENODEV;
+ }
+
+ /* Check if we have fw client device */
+ id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
+ if (id < 0) {
+ ++cl->err_send_msg;
+ return -ENOENT;
+ }
+
+ if (length > dev->fw_clients[id].props.max_msg_length) {
+ ++cl->err_send_msg;
+ return -EMSGSIZE;
+ }
+
+ /* No free bufs */
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ if (list_empty(&cl->tx_free_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ ++cl->err_send_msg;
+ return -ENOMEM;
+ }
+
+ cl_msg = list_first_entry(&cl->tx_free_list.list,
+ struct ishtp_cl_tx_ring, list);
+ if (!cl_msg->send_buf.data) {
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ return -EIO;
+ /* Should not happen, as free list is pre-allocated */
+ }
+ /*
+ * This is safe, as 'length' is already checked for not exceeding
+ * max ISHTP message size per client
+ */
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ memcpy(cl_msg->send_buf.data, buf, length);
+ cl_msg->send_buf.size = length;
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ have_msg_to_send = !list_empty(&cl->tx_list.list);
+ list_add_tail(&cl_msg->list, &cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
+ ishtp_cl_send_msg(dev, cl);
+
+ return 0;
+}
+EXPORT_SYMBOL(ishtp_cl_send);
+
+/**
+ * ishtp_cl_read_complete() - read complete
+ * @rb: Pointer to client request block
+ *
+ * If the message is completely received call ishtp_cl_bus_rx_event()
+ * to process message
+ */
+static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
+{
+ unsigned long flags;
+ int schedule_work_flag = 0;
+ struct ishtp_cl *cl = rb->cl;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, flags);
+ /*
+ * if in-process list is empty, then need to schedule
+ * the processing thread
+ */
+ schedule_work_flag = list_empty(&cl->in_process_list.list);
+ list_add_tail(&rb->list, &cl->in_process_list.list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
+
+ if (schedule_work_flag)
+ ishtp_cl_bus_rx_event(cl->device);
+}
+
+/**
+ * ipc_tx_callback() - IPC tx callback function
+ * @prm: Pointer to client device instance
+ *
+ * Send message over IPC either first time or on callback on previous message
+ * completion
+ */
+static void ipc_tx_callback(void *prm)
+{
+ struct ishtp_cl *cl = prm;
+ struct ishtp_cl_tx_ring *cl_msg;
+ size_t rem;
+ struct ishtp_device *dev = (cl ? cl->dev : NULL);
+ struct ishtp_msg_hdr ishtp_hdr;
+ unsigned long tx_flags, tx_free_flags;
+ unsigned char *pmsg;
+
+ if (!dev)
+ return;
+
+ /*
+ * Other conditions if some critical error has
+ * occurred before this callback is called
+ */
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ return;
+
+ if (cl->state != ISHTP_CL_CONNECTED)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ if (!cl->sending) {
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_ipc_acked = 0;
+ cl->last_tx_path = CL_TX_PATH_IPC;
+ cl->sending = 1;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ cl->sending = 0;
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
+ } else {
+ /* Send IPC fragment */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ cl->tx_offs += dev->mtu;
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ }
+}
+
+/**
+ * ishtp_cl_send_msg_ipc() -Send message using IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message over IPC not using DMA
+ */
+static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ /* If last DMA message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
+ return;
+
+ cl->tx_offs = 0;
+ ipc_tx_callback(cl);
+ ++cl->send_msg_cnt_ipc;
+}
+
+/**
+ * ishtp_cl_send_msg_dma() -Send message using DMA
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA
+ */
+static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ struct dma_xfer_hbm dma_xfer;
+ unsigned char *msg_addr;
+ int off;
+ struct ishtp_cl_tx_ring *cl_msg;
+ unsigned long tx_flags, tx_free_flags;
+
+ /* If last IPC message wasn't acked yet, leave this one in Tx queue */
+ if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
+ return;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ if (list_empty(&cl->tx_list.list)) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ return;
+ }
+
+ cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
+ list);
+
+ msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
+ if (!msg_addr) {
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+ if (dev->transfer_path == CL_TX_PATH_DEFAULT)
+ ishtp_cl_send_msg_ipc(dev, cl);
+ return;
+ }
+
+ list_del_init(&cl_msg->list); /* Must be before write */
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ --cl->ishtp_flow_ctrl_creds;
+ cl->last_dma_acked = 0;
+ cl->last_dma_addr = msg_addr;
+ cl->last_tx_path = CL_TX_PATH_DMA;
+
+ /* write msg to dma buf */
+ memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
+
+ /* send dma_xfer hbm msg */
+ off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
+ dma_xfer.hbm = DMA_XFER;
+ dma_xfer.fw_client_id = cl->fw_client_id;
+ dma_xfer.host_client_id = cl->host_client_id;
+ dma_xfer.reserved = 0;
+ dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
+ dma_xfer.msg_length = cl_msg->send_buf.size;
+ dma_xfer.reserved2 = 0;
+ ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+ ++cl->send_msg_cnt_dma;
+}
+
+/**
+ * ishtp_cl_send_msg() -Send message using DMA or IPC
+ * @dev: ISHTP device instance
+ * @cl: Pointer to client device instance
+ *
+ * Send message using DMA or IPC based on transfer_path
+ */
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ if (dev->transfer_path == CL_TX_PATH_DMA)
+ ishtp_cl_send_msg_dma(dev, cl);
+ else
+ ishtp_cl_send_msg_ipc(dev, cl);
+}
+
+/**
+ * recv_ishtp_cl_msg() -Receive client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: Pointer to message header
+ *
+ * Receive and dispatch ISHTP client messages. This function executes in ISR
+ * or work queue context
+ */
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+ int rb_count;
+
+ if (ishtp_hdr->reserved) {
+ dev_err(dev->devc, "corrupted message header.\n");
+ goto eoi;
+ }
+
+ if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
+ dev_err(dev->devc,
+ "ISHTP message length in hdr exceeds IPC MTU\n");
+ goto eoi;
+ }
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+ rb_count = -1;
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ ++rb_count;
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
+ cl->fw_client_id == ishtp_hdr->fw_addr) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /* If no Rx buffer is allocated, disband the rb */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "Rx buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, ishtp_hdr->length,
+ rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data + rb->buf_idx;
+ dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
+
+ rb->buf_idx += ishtp_hdr->length;
+ if (ishtp_hdr->msg_complete) {
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+ }
+ /* One more fragment in message (even if this was last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev_err(dev->devc, "Dropped Rx msg - no request\n");
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_ipc;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
+
+/**
+ * recv_ishtp_cl_msg_dma() -Receive client message
+ * @dev: ISHTP device instance
+ * @msg: message pointer
+ * @hbm: hbm buffer
+ *
+ * Receive and dispatch ISHTP client messages using DMA. This function executes
+ * in ISR or work queue context
+ */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm)
+{
+ struct ishtp_cl *cl;
+ struct ishtp_cl_rb *rb;
+ struct ishtp_cl_rb *new_rb;
+ unsigned char *buffer = NULL;
+ struct ishtp_cl_rb *complete_rb = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->read_list_spinlock, flags);
+
+ list_for_each_entry(rb, &dev->read_list.list, list) {
+ cl = rb->cl;
+ if (!cl || !(cl->host_client_id == hbm->host_client_id &&
+ cl->fw_client_id == hbm->fw_client_id) ||
+ !(cl->state == ISHTP_CL_CONNECTED))
+ continue;
+
+ /*
+ * If no Rx buffer is allocated, disband the rb
+ */
+ if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "response buffer is not allocated.\n");
+ list_del(&rb->list);
+ ishtp_io_rb_free(rb);
+ cl->status = -ENOMEM;
+ goto eoi;
+ }
+
+ /*
+ * If message buffer overflown (exceeds max. client msg
+ * size, drop message and return to free buffer.
+ * Do we need to disconnect such a client? (We don't send
+ * back FC, so communication will be stuck anyway)
+ */
+ if (rb->buffer.size < hbm->msg_length) {
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ dev_err(&cl->device->dev,
+ "message overflow. size %d len %d idx %ld\n",
+ rb->buffer.size, hbm->msg_length, rb->buf_idx);
+ list_del(&rb->list);
+ ishtp_cl_io_rb_recycle(rb);
+ cl->status = -EIO;
+ goto eoi;
+ }
+
+ buffer = rb->buffer.data;
+ memcpy(buffer, msg, hbm->msg_length);
+ rb->buf_idx = hbm->msg_length;
+
+ /* Last fragment in message - it's complete */
+ cl->status = 0;
+ list_del(&rb->list);
+ complete_rb = rb;
+
+ --cl->out_flow_ctrl_creds;
+ /*
+ * the whole msg arrived, send a new FC, and add a new
+ * rb buffer for the next coming msg
+ */
+ spin_lock(&cl->free_list_spinlock);
+
+ if (!list_empty(&cl->free_rb_list.list)) {
+ new_rb = list_entry(cl->free_rb_list.list.next,
+ struct ishtp_cl_rb, list);
+ list_del_init(&new_rb->list);
+ spin_unlock(&cl->free_list_spinlock);
+ new_rb->cl = cl;
+ new_rb->buf_idx = 0;
+ INIT_LIST_HEAD(&new_rb->list);
+ list_add_tail(&new_rb->list,
+ &dev->read_list.list);
+
+ ishtp_hbm_cl_flow_control_req(dev, cl);
+ } else {
+ spin_unlock(&cl->free_list_spinlock);
+ }
+
+ /* One more fragment in message (this is always last) */
+ ++cl->recv_msg_num_frags;
+
+ /*
+ * We can safely break here (and in BH too),
+ * a single input message can go only to a single request!
+ */
+ break;
+ }
+
+ spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
+ /* If it's nobody's message, just read and discard it */
+ if (!buffer) {
+ dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
+ goto eoi;
+ }
+
+ if (complete_rb) {
+ cl = complete_rb->cl;
+ cl->ts_rx = ktime_get();
+ ++cl->recv_msg_cnt_dma;
+ ishtp_cl_read_complete(complete_rb);
+ }
+eoi:
+ return;
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
new file mode 100644
index 00000000000000..79eade547f5db5
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -0,0 +1,182 @@
+/*
+ * ISHTP client logic
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_CLIENT_H_
+#define _ISHTP_CLIENT_H_
+
+#include <linux/types.h>
+#include "ishtp-dev.h"
+
+/* Client state */
+enum cl_state {
+ ISHTP_CL_INITIALIZING = 0,
+ ISHTP_CL_CONNECTING,
+ ISHTP_CL_CONNECTED,
+ ISHTP_CL_DISCONNECTING,
+ ISHTP_CL_DISCONNECTED
+};
+
+/* Tx and Rx ring size */
+#define CL_DEF_RX_RING_SIZE 2
+#define CL_DEF_TX_RING_SIZE 2
+#define CL_MAX_RX_RING_SIZE 32
+#define CL_MAX_TX_RING_SIZE 32
+
+#define DMA_SLOT_SIZE 4096
+/* Number of IPC fragments after which it's worth sending via DMA */
+#define DMA_WORTH_THRESHOLD 3
+
+/* DMA/IPC Tx paths. Other the default means enforcement */
+#define CL_TX_PATH_DEFAULT 0
+#define CL_TX_PATH_IPC 1
+#define CL_TX_PATH_DMA 2
+
+/* Client Tx buffer list entry */
+struct ishtp_cl_tx_ring {
+ struct list_head list;
+ struct ishtp_msg_data send_buf;
+};
+
+/* ISHTP client instance */
+struct ishtp_cl {
+ struct list_head link;
+ struct ishtp_device *dev;
+ enum cl_state state;
+ int status;
+
+ /* Link to ISHTP bus device */
+ struct ishtp_cl_device *device;
+
+ /* ID of client connected */
+ uint8_t host_client_id;
+ uint8_t fw_client_id;
+ uint8_t ishtp_flow_ctrl_creds;
+ uint8_t out_flow_ctrl_creds;
+
+ /* dma */
+ int last_tx_path;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_dma_acked;
+ unsigned char *last_dma_addr;
+ /* 0: ack wasn't received,1:ack was received */
+ int last_ipc_acked;
+
+ /* Rx ring buffer pool */
+ unsigned int rx_ring_size;
+ struct ishtp_cl_rb free_rb_list;
+ spinlock_t free_list_spinlock;
+ /* Rx in-process list */
+ struct ishtp_cl_rb in_process_list;
+ spinlock_t in_process_spinlock;
+
+ /* Client Tx buffers list */
+ unsigned int tx_ring_size;
+ struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ spinlock_t tx_list_spinlock;
+ spinlock_t tx_free_list_spinlock;
+ size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
+
+ /**
+ * if we get a FC, and the list is not empty, we must know whether we
+ * are at the middle of sending.
+ * if so -need to increase FC counter, otherwise, need to start sending
+ * the first msg in list
+ * (!)This is for counting-FC implementation only. Within single-FC the
+ * other party may NOT send FC until it receives complete message
+ */
+ int sending;
+
+ /* Send FC spinlock */
+ spinlock_t fc_spinlock;
+
+ /* wait queue for connect and disconnect response from FW */
+ wait_queue_head_t wait_ctrl_res;
+
+ /* Error stats */
+ unsigned int err_send_msg;
+ unsigned int err_send_fc;
+
+ /* Send/recv stats */
+ unsigned int send_msg_cnt_ipc;
+ unsigned int send_msg_cnt_dma;
+ unsigned int recv_msg_cnt_ipc;
+ unsigned int recv_msg_cnt_dma;
+ unsigned int recv_msg_num_frags;
+ unsigned int ishtp_flow_ctrl_cnt;
+ unsigned int out_flow_ctrl_cnt;
+
+ /* Rx msg ... out FC timing */
+ ktime_t ts_rx;
+ ktime_t ts_out_fc;
+ ktime_t ts_max_fc_delay;
+ void *client_data;
+};
+
+/* Client connection managenment internal functions */
+int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, uuid_le *uuid);
+int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
+void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
+void recv_ishtp_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+int ishtp_cl_read_start(struct ishtp_cl *cl);
+
+/* Ring Buffer I/F */
+int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
+int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
+void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+
+/* DMA I/F functions */
+void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
+ struct dma_xfer_hbm *hbm);
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev);
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size);
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size);
+
+/* Request blocks alloc/free I/F */
+struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
+void ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
+int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
+
+/**
+ * ishtp_cl_cmp_id - tells if file private data have same id
+ * returns true - if ids are the same and not NULL
+ */
+static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
+ const struct ishtp_cl *cl2)
+{
+ return cl1 && cl2 &&
+ (cl1->host_client_id == cl2->host_client_id) &&
+ (cl1->fw_client_id == cl2->fw_client_id);
+}
+
+/* exported functions from ISHTP under client management scope */
+struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
+void ishtp_cl_free(struct ishtp_cl *cl);
+int ishtp_cl_link(struct ishtp_cl *cl, int id);
+void ishtp_cl_unlink(struct ishtp_cl *cl);
+int ishtp_cl_disconnect(struct ishtp_cl *cl);
+int ishtp_cl_connect(struct ishtp_cl *cl);
+int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
+int ishtp_cl_flush_queues(struct ishtp_cl *cl);
+
+/* exported functions from ISHTP client buffer management scope */
+int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+
+#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/dma-if.c b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
new file mode 100644
index 00000000000000..2783f366611496
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/dma-if.c
@@ -0,0 +1,175 @@
+/*
+ * ISHTP DMA I/F functions
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include "ishtp-dev.h"
+#include "client.h"
+
+/**
+ * ishtp_cl_alloc_dma_buf() - Allocate DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Allocate RX and TX DMA buffer once during bus setup.
+ * It allocates 1MB, RX and TX DMA buffer, which are divided
+ * into slots.
+ */
+void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf)
+ return;
+
+ dev->ishtp_host_dma_tx_buf_size = 1024*1024;
+ dev->ishtp_host_dma_rx_buf_size = 1024*1024;
+
+ /* Allocate Tx buffer and init usage bitmap */
+ dev->ishtp_host_dma_tx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_tx_buf_size,
+ &h, GFP_KERNEL);
+ if (dev->ishtp_host_dma_tx_buf)
+ dev->ishtp_host_dma_tx_buf_phys = h;
+
+ dev->ishtp_dma_num_slots = dev->ishtp_host_dma_tx_buf_size /
+ DMA_SLOT_SIZE;
+
+ dev->ishtp_dma_tx_map = kcalloc(dev->ishtp_dma_num_slots,
+ sizeof(uint8_t),
+ GFP_KERNEL);
+ spin_lock_init(&dev->ishtp_dma_tx_lock);
+
+ /* Allocate Rx buffer */
+ dev->ishtp_host_dma_rx_buf = dma_alloc_coherent(dev->devc,
+ dev->ishtp_host_dma_rx_buf_size,
+ &h, GFP_KERNEL);
+
+ if (dev->ishtp_host_dma_rx_buf)
+ dev->ishtp_host_dma_rx_buf_phys = h;
+}
+
+/**
+ * ishtp_cl_free_dma_buf() - Free DMA RX and TX buffer
+ * @dev: ishtp device
+ *
+ * Free DMA buffer when all clients are released. This is
+ * only happens during error path in ISH built in driver
+ * model
+ */
+void ishtp_cl_free_dma_buf(struct ishtp_device *dev)
+{
+ dma_addr_t h;
+
+ if (dev->ishtp_host_dma_tx_buf) {
+ h = dev->ishtp_host_dma_tx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_tx_buf_size,
+ dev->ishtp_host_dma_tx_buf, h);
+ }
+
+ if (dev->ishtp_host_dma_rx_buf) {
+ h = dev->ishtp_host_dma_rx_buf_phys;
+ dma_free_coherent(dev->devc, dev->ishtp_host_dma_rx_buf_size,
+ dev->ishtp_host_dma_rx_buf, h);
+ }
+
+ kfree(dev->ishtp_dma_tx_map);
+ dev->ishtp_host_dma_tx_buf = NULL;
+ dev->ishtp_host_dma_rx_buf = NULL;
+ dev->ishtp_dma_tx_map = NULL;
+}
+
+/*
+ * ishtp_cl_get_dma_send_buf() - Get a DMA memory slot
+ * @dev: ishtp device
+ * @size: Size of memory to get
+ *
+ * Find and return free address of "size" bytes in dma tx buffer.
+ * the function will mark this address as "in-used" memory.
+ *
+ * Return: NULL when no free buffer else a buffer to copy
+ */
+void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
+ uint32_t size)
+{
+ unsigned long flags;
+ int i, j, free;
+ /* additional slot is needed if there is rem */
+ int required_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
+ free = 1;
+ for (j = 0; j < required_slots; j++)
+ if (dev->ishtp_dma_tx_map[i+j]) {
+ free = 0;
+ i += j;
+ break;
+ }
+ if (free) {
+ /* mark memory as "caught" */
+ for (j = 0; j < required_slots; j++)
+ dev->ishtp_dma_tx_map[i+j] = 1;
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ return (i * DMA_SLOT_SIZE) +
+ (unsigned char *)dev->ishtp_host_dma_tx_buf;
+ }
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "No free DMA buffer to send msg\n");
+ return NULL;
+}
+
+/*
+ * ishtp_cl_release_dma_acked_mem() - Release DMA memory slot
+ * @dev: ishtp device
+ * @msg_addr: message address of slot
+ * @size: Size of memory to get
+ *
+ * Release_dma_acked_mem - returnes the acked memory to free list.
+ * (from msg_addr, size bytes long)
+ */
+void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
+ void *msg_addr,
+ uint8_t size)
+{
+ unsigned long flags;
+ int acked_slots = (size / DMA_SLOT_SIZE)
+ + 1 * (size % DMA_SLOT_SIZE != 0);
+ int i, j;
+
+ if ((msg_addr - dev->ishtp_host_dma_tx_buf) % DMA_SLOT_SIZE) {
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+
+ i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
+ spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
+ for (j = 0; j < acked_slots; j++) {
+ if ((i + j) >= dev->ishtp_dma_num_slots ||
+ !dev->ishtp_dma_tx_map[i+j]) {
+ /* no such slot, or memory is already free */
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+ dev_err(dev->devc, "Bad DMA Tx ack address\n");
+ return;
+ }
+ dev->ishtp_dma_tx_map[i+j] = 0;
+ }
+ spin_unlock_irqrestore(&dev->ishtp_dma_tx_lock, flags);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.c b/drivers/hid/intel-ish-hid/ishtp/hbm.c
new file mode 100644
index 00000000000000..9fa351f7d4158d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.c
@@ -0,0 +1,1024 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_hbm_fw_cl_allocate() - Allocate FW clients
+ * @dev: ISHTP device instance
+ *
+ * Allocates storage for fw clients
+ */
+static void ishtp_hbm_fw_cl_allocate(struct ishtp_device *dev)
+{
+ struct ishtp_fw_client *clients;
+ int b;
+
+ /* count how many ISH clients we have */
+ for_each_set_bit(b, dev->fw_clients_map, ISHTP_CLIENTS_MAX)
+ dev->fw_clients_num++;
+
+ if (dev->fw_clients_num <= 0)
+ return;
+
+ /* allocate storage for fw clients representation */
+ clients = kcalloc(dev->fw_clients_num, sizeof(struct ishtp_fw_client),
+ GFP_KERNEL);
+ if (!clients) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ ish_hw_reset(dev);
+ return;
+ }
+ dev->fw_clients = clients;
+}
+
+/**
+ * ishtp_hbm_cl_hdr() - construct client hbm header
+ * @cl: client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ *
+ * Initialize HBM buffer
+ */
+static inline void ishtp_hbm_cl_hdr(struct ishtp_cl *cl, uint8_t hbm_cmd,
+ void *buf, size_t len)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ memset(cmd, 0, len);
+
+ cmd->hbm_cmd = hbm_cmd;
+ cmd->host_addr = cl->host_client_id;
+ cmd->fw_addr = cl->fw_client_id;
+}
+
+/**
+ * ishtp_hbm_cl_addr_equal() - Compare client address
+ * @cl: client
+ * @buf: Client command buffer
+ *
+ * Compare client address with the address in command buffer
+ *
+ * Return: True if they have the same address
+ */
+static inline bool ishtp_hbm_cl_addr_equal(struct ishtp_cl *cl, void *buf)
+{
+ struct ishtp_hbm_cl_cmd *cmd = buf;
+
+ return cl->host_client_id == cmd->host_addr &&
+ cl->fw_client_id == cmd->fw_addr;
+}
+
+/**
+ * ishtp_hbm_start_wait() - Wait for HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Wait for HBM start message from firmware
+ *
+ * Return: 0 if HBM start is/was received else timeout error
+ */
+int ishtp_hbm_start_wait(struct ishtp_device *dev)
+{
+ int ret;
+
+ if (dev->hbm_state > ISHTP_HBM_START)
+ return 0;
+
+ dev_dbg(dev->devc, "Going to wait for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+ ret = wait_event_interruptible_timeout(dev->wait_hbm_recvd_msg,
+ dev->hbm_state >= ISHTP_HBM_STARTED,
+ (ISHTP_INTEROP_TIMEOUT * HZ));
+
+ dev_dbg(dev->devc,
+ "Woke up from waiting for ishtp start. hbm_state=%08X\n",
+ dev->hbm_state);
+
+ if (ret <= 0 && (dev->hbm_state <= ISHTP_HBM_START)) {
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ dev_err(dev->devc,
+ "waiting for ishtp start failed. ret=%d hbm_state=%08X\n",
+ ret, dev->hbm_state);
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/**
+ * ishtp_hbm_start_req() - Send HBM start message
+ * @dev: ISHTP device instance
+ *
+ * Send HBM start message to firmware
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_start_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_version_request *start_req;
+ const size_t len = sizeof(struct hbm_host_version_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+
+ /* host start message */
+ start_req = (struct hbm_host_version_request *)data;
+ memset(start_req, 0, len);
+ start_req->hbm_cmd = HOST_START_REQ_CMD;
+ start_req->host_version.major_version = HBM_MAJOR_VERSION;
+ start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+ /*
+ * (!) Response to HBM start may be so quick that this thread would get
+ * preempted BEFORE managing to set hbm_state = ISHTP_HBM_START.
+ * So set it at first, change back to ISHTP_HBM_IDLE upon failure
+ */
+ dev->hbm_state = ISHTP_HBM_START;
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev_err(dev->devc, "version message send failed\n");
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev->hbm_state = ISHTP_HBM_IDLE;
+ ish_hw_reset(dev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_enum_clients_req() - Send client enum req
+ * @dev: ISHTP device instance
+ *
+ * Send enumeration client request message
+ *
+ * Return: 0 if success else error code
+ */
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_enum_request *enum_req;
+ const size_t len = sizeof(struct hbm_host_enum_request);
+
+ /* enumerate clients */
+ ishtp_hbm_hdr(ishtp_hdr, len);
+
+ enum_req = (struct hbm_host_enum_request *)data;
+ memset(enum_req, 0, len);
+ enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "enumeration request send failed\n");
+ ish_hw_reset(dev);
+ }
+ dev->hbm_state = ISHTP_HBM_ENUM_CLIENTS;
+}
+
+/**
+ * ishtp_hbm_prop_req() - Request property
+ * @dev: ISHTP device instance
+ *
+ * Request property for a single client
+ *
+ * Return: 0 if success else error code
+ */
+static int ishtp_hbm_prop_req(struct ishtp_device *dev)
+{
+
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_props_request *prop_req;
+ const size_t len = sizeof(struct hbm_props_request);
+ unsigned long next_client_index;
+ uint8_t client_num;
+
+ client_num = dev->fw_client_presentation_num;
+
+ next_client_index = find_next_bit(dev->fw_clients_map,
+ ISHTP_CLIENTS_MAX, dev->fw_client_index);
+
+ /* We got all client properties */
+ if (next_client_index == ISHTP_CLIENTS_MAX) {
+ dev->hbm_state = ISHTP_HBM_WORKING;
+ dev->dev_state = ISHTP_DEV_ENABLED;
+
+ for (dev->fw_client_presentation_num = 1;
+ dev->fw_client_presentation_num < client_num + 1;
+ ++dev->fw_client_presentation_num)
+ /* Add new client device */
+ ishtp_bus_new_client(dev);
+ return 0;
+ }
+
+ dev->fw_clients[client_num].client_id = next_client_index;
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ prop_req = (struct hbm_props_request *)data;
+
+ memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+ prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+ prop_req->address = next_client_index;
+
+ if (ishtp_write_message(dev, ishtp_hdr, data)) {
+ dev->dev_state = ISHTP_DEV_RESETTING;
+ dev_err(dev->devc, "properties request send failed\n");
+ ish_hw_reset(dev);
+ return -EIO;
+ }
+
+ dev->fw_client_index = next_client_index;
+
+ return 0;
+}
+
+/**
+ * ishtp_hbm_stop_req() - Send HBM stop
+ * @dev: ISHTP device instance
+ *
+ * Send stop request message
+ */
+static void ishtp_hbm_stop_req(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ struct hbm_host_stop_request *req;
+ const size_t len = sizeof(struct hbm_host_stop_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ req = (struct hbm_host_stop_request *)data;
+
+ memset(req, 0, sizeof(struct hbm_host_stop_request));
+ req->hbm_cmd = HOST_STOP_REQ_CMD;
+ req->reason = DRIVER_STOP_REQUEST;
+
+ ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_flow_control_req() - Send flow control request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send flow control request
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_flow_control);
+ int rv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cl->fc_spinlock, flags);
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, ISHTP_FLOW_CONTROL_CMD, data, len);
+
+ /*
+ * Sync possible race when RB recycle and packet receive paths
+ * both try to send an out FC
+ */
+ if (cl->out_flow_ctrl_creds) {
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return 0;
+ }
+
+ cl->recv_msg_num_frags = 0;
+
+ rv = ishtp_write_message(dev, ishtp_hdr, data);
+ if (!rv) {
+ ++cl->out_flow_ctrl_creds;
+ ++cl->out_flow_ctrl_cnt;
+ cl->ts_out_fc = ktime_get();
+ if (cl->ts_rx.tv64) {
+ ktime_t ts_diff = ktime_sub(cl->ts_out_fc, cl->ts_rx);
+ if (ktime_after(ts_diff, cl->ts_max_fc_delay))
+ cl->ts_max_fc_delay = ts_diff;
+ }
+ } else {
+ ++cl->err_send_fc;
+ }
+
+ spin_unlock_irqrestore(&cl->fc_spinlock, flags);
+ return rv;
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_req() - Send disconnect request
+ * @dev: ISHTP device instance
+ * @cl: ISHTP client instance
+ *
+ * Send disconnect message to fw
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, data, len);
+
+ return ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_disconnect_res() - Get disconnect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received disconnect response from fw
+ */
+static void ishtp_hbm_cl_disconnect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (!rs->status && ishtp_hbm_cl_addr_equal(cl, rs)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_cl_connect_req() - Send connect request
+ * @dev: ISHTP device instance
+ * @cl: client device instance
+ *
+ * Send connection request to specific fw client
+ *
+ * Return: 0 if success else error code
+ */
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl)
+{
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[128];
+ struct ishtp_msg_hdr *ishtp_hdr = &hdr;
+ const size_t len = sizeof(struct hbm_client_connect_request);
+
+ ishtp_hbm_hdr(ishtp_hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, data, len);
+
+ return ishtp_write_message(dev, ishtp_hdr, data);
+}
+
+/**
+ * ishtp_hbm_cl_connect_res() - Get connect response
+ * @dev: ISHTP device instance
+ * @rs: Response message
+ *
+ * Received connect response from fw
+ */
+static void ishtp_hbm_cl_connect_res(struct ishtp_device *dev,
+ struct hbm_client_connect_response *rs)
+{
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, rs)) {
+ if (!rs->status) {
+ cl->state = ISHTP_CL_CONNECTED;
+ cl->status = 0;
+ } else {
+ cl->state = ISHTP_CL_DISCONNECTED;
+ cl->status = -ENODEV;
+ }
+ wake_up_interruptible(&cl->wait_ctrl_res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_client_disconnect_request() - Receive disconnect request
+ * @dev: ISHTP device instance
+ * @disconnect_req: disconnect request structure
+ *
+ * Disconnect request bus message from the fw. Send diconnect response.
+ */
+static void ishtp_hbm_fw_disconnect_req(struct ishtp_device *dev,
+ struct hbm_client_connect_request *disconnect_req)
+{
+ struct ishtp_cl *cl;
+ const size_t len = sizeof(struct hbm_client_connect_response);
+ unsigned long flags;
+ struct ishtp_msg_hdr hdr;
+ unsigned char data[4]; /* All HBM messages are 4 bytes */
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (ishtp_hbm_cl_addr_equal(cl, disconnect_req)) {
+ cl->state = ISHTP_CL_DISCONNECTED;
+
+ /* send disconnect response */
+ ishtp_hbm_hdr(&hdr, len);
+ ishtp_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD, data,
+ len);
+ ishtp_write_message(dev, &hdr, data);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+}
+
+/**
+ * ishtp_hbm_dma_xfer_ack(() - Receive transfer ACK
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ack for ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer_ack(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *)&dev->ishtp_msg_hdr;
+ unsigned int msg_offs;
+ struct ishtp_cl *cl;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_tx_buf_phys;
+ if (offs > dev->ishtp_host_dma_tx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Tx ack message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_tx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Tx ack message size\n");
+ return;
+ }
+
+ /* logical address of the acked mem */
+ msg = (unsigned char *)dev->ishtp_host_dma_tx_buf + offs;
+ ishtp_cl_release_dma_acked_mem(dev, msg, dma_xfer->msg_length);
+
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->fw_client_id == dma_xfer->fw_client_id &&
+ cl->host_client_id == dma_xfer->host_client_id)
+ /*
+ * in case that a single ack may be sent
+ * over several dma transfers, and the last msg
+ * addr was inside the acked memory, but not in
+ * its start
+ */
+ if (cl->last_dma_addr >=
+ (unsigned char *)msg &&
+ cl->last_dma_addr <
+ (unsigned char *)msg +
+ dma_xfer->msg_length) {
+ cl->last_dma_acked = 1;
+
+ if (!list_empty(&cl->tx_list.list) &&
+ cl->ishtp_flow_ctrl_creds) {
+ /*
+ * start sending the first msg
+ */
+ ishtp_cl_send_msg(dev, cl);
+ }
+ }
+ }
+ ++dma_xfer;
+ }
+}
+
+/**
+ * ishtp_hbm_dma_xfer() - Receive DMA transfer message
+ * @dev: ISHTP device instance
+ * @dma_xfer: HBM transfer message
+ *
+ * Receive ISHTP-over-DMA client message
+ */
+static void ishtp_hbm_dma_xfer(struct ishtp_device *dev,
+ struct dma_xfer_hbm *dma_xfer)
+{
+ void *msg;
+ uint64_t offs;
+ struct ishtp_msg_hdr hdr;
+ struct ishtp_msg_hdr *ishtp_hdr =
+ (struct ishtp_msg_hdr *) &dev->ishtp_msg_hdr;
+ struct dma_xfer_hbm *prm = dma_xfer;
+ unsigned int msg_offs;
+
+ for (msg_offs = 0; msg_offs < ishtp_hdr->length;
+ msg_offs += sizeof(struct dma_xfer_hbm)) {
+
+ offs = dma_xfer->msg_addr - dev->ishtp_host_dma_rx_buf_phys;
+ if (offs > dev->ishtp_host_dma_rx_buf_size) {
+ dev_err(dev->devc, "Bad DMA Rx message address\n");
+ return;
+ }
+ if (dma_xfer->msg_length >
+ dev->ishtp_host_dma_rx_buf_size - offs) {
+ dev_err(dev->devc, "Bad DMA Rx message size\n");
+ return;
+ }
+ msg = dev->ishtp_host_dma_rx_buf + offs;
+ recv_ishtp_cl_msg_dma(dev, msg, dma_xfer);
+ dma_xfer->hbm = DMA_XFER_ACK; /* Prepare for response */
+ ++dma_xfer;
+ }
+
+ /* Send DMA_XFER_ACK [...] */
+ ishtp_hbm_hdr(&hdr, ishtp_hdr->length);
+ ishtp_write_message(dev, &hdr, (unsigned char *)prm);
+}
+
+/**
+ * ishtp_hbm_dispatch() - HBM dispatch function
+ * @dev: ISHTP device instance
+ * @hdr: bus message
+ *
+ * Bottom half read routine after ISR to handle the read bus message cmd
+ * processing
+ */
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr)
+{
+ struct ishtp_bus_message *ishtp_msg;
+ struct ishtp_fw_client *fw_client;
+ struct hbm_host_version_response *version_res;
+ struct hbm_client_connect_response *connect_res;
+ struct hbm_client_connect_response *disconnect_res;
+ struct hbm_client_connect_request *disconnect_req;
+ struct hbm_props_response *props_res;
+ struct hbm_host_enum_response *enum_res;
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct dma_alloc_notify dma_alloc_notify;
+ struct dma_xfer_hbm *dma_xfer;
+
+ ishtp_msg = hdr;
+
+ switch (ishtp_msg->hbm_cmd) {
+ case HOST_START_RES_CMD:
+ version_res = (struct hbm_host_version_response *)ishtp_msg;
+ if (!version_res->host_version_supported) {
+ dev->version = version_res->fw_max_version;
+
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ ishtp_hbm_stop_req(dev);
+ return;
+ }
+
+ dev->version.major_version = HBM_MAJOR_VERSION;
+ dev->version.minor_version = HBM_MINOR_VERSION;
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_START) {
+ dev->hbm_state = ISHTP_HBM_STARTED;
+ ishtp_hbm_enum_clients_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: wrong host start response\n");
+ /* BUG: why do we arrive here? */
+ ish_hw_reset(dev);
+ return;
+ }
+
+ wake_up_interruptible(&dev->wait_hbm_recvd_msg);
+ break;
+
+ case CLIENT_CONNECT_RES_CMD:
+ connect_res = (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_connect_res(dev, connect_res);
+ break;
+
+ case CLIENT_DISCONNECT_RES_CMD:
+ disconnect_res =
+ (struct hbm_client_connect_response *)ishtp_msg;
+ ishtp_hbm_cl_disconnect_res(dev, disconnect_res);
+ break;
+
+ case HOST_CLIENT_PROPERTIES_RES_CMD:
+ props_res = (struct hbm_props_response *)ishtp_msg;
+ fw_client = &dev->fw_clients[dev->fw_client_presentation_num];
+
+ if (props_res->status || !dev->fw_clients) {
+ dev_err(dev->devc,
+ "reset: properties response hbm wrong status\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (fw_client->client_id != props_res->address) {
+ dev_err(dev->devc,
+ "reset: host properties response address mismatch [%02X %02X]\n",
+ fw_client->client_id, props_res->address);
+ ish_hw_reset(dev);
+ return;
+ }
+
+ if (dev->dev_state != ISHTP_DEV_INIT_CLIENTS ||
+ dev->hbm_state != ISHTP_HBM_CLIENT_PROPERTIES) {
+ dev_err(dev->devc,
+ "reset: unexpected properties response\n");
+ ish_hw_reset(dev);
+ return;
+ }
+
+ fw_client->props = props_res->client_properties;
+ dev->fw_client_index++;
+ dev->fw_client_presentation_num++;
+
+ /* request property for the next client */
+ ishtp_hbm_prop_req(dev);
+
+ if (dev->dev_state != ISHTP_DEV_ENABLED)
+ break;
+
+ if (!ishtp_use_dma_transfer())
+ break;
+
+ dev_dbg(dev->devc, "Requesting to use DMA\n");
+ ishtp_cl_alloc_dma_buf(dev);
+ if (dev->ishtp_host_dma_rx_buf) {
+ const size_t len = sizeof(dma_alloc_notify);
+
+ memset(&dma_alloc_notify, 0, sizeof(dma_alloc_notify));
+ dma_alloc_notify.hbm = DMA_BUFFER_ALLOC_NOTIFY;
+ dma_alloc_notify.buf_size =
+ dev->ishtp_host_dma_rx_buf_size;
+ dma_alloc_notify.buf_address =
+ dev->ishtp_host_dma_rx_buf_phys;
+ ishtp_hbm_hdr(&ishtp_hdr, len);
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&dma_alloc_notify);
+ }
+
+ break;
+
+ case HOST_ENUM_RES_CMD:
+ enum_res = (struct hbm_host_enum_response *) ishtp_msg;
+ memcpy(dev->fw_clients_map, enum_res->valid_addresses, 32);
+ if (dev->dev_state == ISHTP_DEV_INIT_CLIENTS &&
+ dev->hbm_state == ISHTP_HBM_ENUM_CLIENTS) {
+ dev->fw_client_presentation_num = 0;
+ dev->fw_client_index = 0;
+
+ ishtp_hbm_fw_cl_allocate(dev);
+ dev->hbm_state = ISHTP_HBM_CLIENT_PROPERTIES;
+
+ /* first property request */
+ ishtp_hbm_prop_req(dev);
+ } else {
+ dev_err(dev->devc,
+ "reset: unexpected enumeration response hbm\n");
+ ish_hw_reset(dev);
+ return;
+ }
+ break;
+
+ case HOST_STOP_RES_CMD:
+ if (dev->hbm_state != ISHTP_HBM_STOPPED)
+ dev_err(dev->devc, "unexpected stop response\n");
+
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ dev_info(dev->devc, "reset: FW stop response\n");
+ ish_hw_reset(dev);
+ break;
+
+ case CLIENT_DISCONNECT_REQ_CMD:
+ /* search for client */
+ disconnect_req =
+ (struct hbm_client_connect_request *)ishtp_msg;
+ ishtp_hbm_fw_disconnect_req(dev, disconnect_req);
+ break;
+
+ case FW_STOP_REQ_CMD:
+ dev->hbm_state = ISHTP_HBM_STOPPED;
+ break;
+
+ case DMA_BUFFER_ALLOC_RESPONSE:
+ dev->ishtp_host_dma_enabled = 1;
+ break;
+
+ case DMA_XFER:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled) {
+ dev_err(dev->devc,
+ "DMA XFER requested but DMA is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer(dev, dma_xfer);
+ break;
+
+ case DMA_XFER_ACK:
+ dma_xfer = (struct dma_xfer_hbm *)ishtp_msg;
+ if (!dev->ishtp_host_dma_enabled ||
+ !dev->ishtp_host_dma_tx_buf) {
+ dev_err(dev->devc,
+ "DMA XFER acked but DMA Tx is not enabled\n");
+ break;
+ }
+ ishtp_hbm_dma_xfer_ack(dev, dma_xfer);
+ break;
+
+ default:
+ dev_err(dev->devc, "unknown HBM: %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+
+ break;
+ }
+}
+
+/**
+ * bh_hbm_work_fn() - HBM work function
+ * @work: work struct
+ *
+ * Bottom half processing work function (instead of thread handler)
+ * for processing hbm messages
+ */
+void bh_hbm_work_fn(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ishtp_device *dev;
+ unsigned char hbm[IPC_PAYLOAD_SIZE];
+
+ dev = container_of(work, struct ishtp_device, bh_hbm_work);
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if (dev->rd_msg_fifo_head != dev->rd_msg_fifo_tail) {
+ memcpy(hbm, dev->rd_msg_fifo + dev->rd_msg_fifo_head,
+ IPC_PAYLOAD_SIZE);
+ dev->rd_msg_fifo_head =
+ (dev->rd_msg_fifo_head + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ ishtp_hbm_dispatch(dev, (struct ishtp_bus_message *)hbm);
+ } else {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ }
+}
+
+/**
+ * recv_hbm() - Receive HBM message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP bus messages in ISR context. This will schedule
+ * work function to process message
+ */
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+ struct ishtp_bus_message *ishtp_msg =
+ (struct ishtp_bus_message *)rd_msg_buf;
+ unsigned long flags;
+
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+
+ /* Flow control - handle in place */
+ if (ishtp_msg->hbm_cmd == ISHTP_FLOW_CONTROL_CMD) {
+ struct hbm_flow_control *flow_control =
+ (struct hbm_flow_control *)ishtp_msg;
+ struct ishtp_cl *cl = NULL;
+ unsigned long flags, tx_flags;
+
+ spin_lock_irqsave(&dev->cl_list_lock, flags);
+ list_for_each_entry(cl, &dev->cl_list, link) {
+ if (cl->host_client_id == flow_control->host_addr &&
+ cl->fw_client_id ==
+ flow_control->fw_addr) {
+ /*
+ * NOTE: It's valid only for counting
+ * flow-control implementation to receive a
+ * FC in the middle of sending. Meanwhile not
+ * supported
+ */
+ if (cl->ishtp_flow_ctrl_creds)
+ dev_err(dev->devc,
+ "recv extra FC from FW client %u (host client %u) (FC count was %d)\n",
+ (unsigned int)cl->fw_client_id,
+ (unsigned int)cl->host_client_id,
+ cl->ishtp_flow_ctrl_creds);
+ else {
+ ++cl->ishtp_flow_ctrl_creds;
+ ++cl->ishtp_flow_ctrl_cnt;
+ cl->last_ipc_acked = 1;
+ spin_lock_irqsave(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ if (!list_empty(&cl->tx_list.list)) {
+ /*
+ * start sending the first msg
+ * = the callback function
+ */
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ ishtp_cl_send_msg(dev, cl);
+ } else {
+ spin_unlock_irqrestore(
+ &cl->tx_list_spinlock,
+ tx_flags);
+ }
+ }
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->cl_list_lock, flags);
+ goto eoi;
+ }
+
+ /*
+ * Some messages that are safe for ISR processing and important
+ * to be done "quickly" and in-order, go here
+ */
+ if (ishtp_msg->hbm_cmd == CLIENT_CONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_RES_CMD ||
+ ishtp_msg->hbm_cmd == CLIENT_DISCONNECT_REQ_CMD ||
+ ishtp_msg->hbm_cmd == DMA_XFER) {
+ ishtp_hbm_dispatch(dev, ishtp_msg);
+ goto eoi;
+ }
+
+ /*
+ * All other HBMs go here.
+ * We schedule HBMs for processing serially by using system wq,
+ * possibly there will be multiple HBMs scheduled at the same time.
+ */
+ spin_lock_irqsave(&dev->rd_msg_spinlock, flags);
+ if ((dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE) ==
+ dev->rd_msg_fifo_head) {
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ dev_err(dev->devc, "BH buffer overflow, dropping HBM %u\n",
+ (unsigned int)ishtp_msg->hbm_cmd);
+ goto eoi;
+ }
+ memcpy(dev->rd_msg_fifo + dev->rd_msg_fifo_tail, ishtp_msg,
+ ishtp_hdr->length);
+ dev->rd_msg_fifo_tail = (dev->rd_msg_fifo_tail + IPC_PAYLOAD_SIZE) %
+ (RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE);
+ spin_unlock_irqrestore(&dev->rd_msg_spinlock, flags);
+ schedule_work(&dev->bh_hbm_work);
+eoi:
+ return;
+}
+
+/**
+ * recv_fixed_cl_msg() - Receive fixed client message
+ * @dev: ISHTP device instance
+ * @ishtp_hdr: received bus message
+ *
+ * Receive and process ISHTP fixed client messages (address == 0)
+ * in ISR context
+ */
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr)
+{
+ uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
+
+ dev->print_log(dev,
+ "%s() got fixed client msg from client #%d\n",
+ __func__, ishtp_hdr->fw_addr);
+ dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
+ if (ishtp_hdr->fw_addr == ISHTP_SYSTEM_STATE_CLIENT_ADDR) {
+ struct ish_system_states_header *msg_hdr =
+ (struct ish_system_states_header *)rd_msg_buf;
+ if (msg_hdr->cmd == SYSTEM_STATE_SUBSCRIBE)
+ ishtp_send_resume(dev);
+ /* if FW request arrived here, the system is not suspended */
+ else
+ dev_err(dev->devc, "unknown fixed client msg [%02X]\n",
+ msg_hdr->cmd);
+ }
+}
+
+/**
+ * fix_cl_hdr() - Initialize fixed client header
+ * @hdr: message header
+ * @length: length of message
+ * @cl_addr: Client address
+ *
+ * Initialize message header for fixed client
+ */
+static inline void fix_cl_hdr(struct ishtp_msg_hdr *hdr, size_t length,
+ uint8_t cl_addr)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = cl_addr;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+/*** Suspend and resume notification ***/
+
+static uint32_t current_state;
+static uint32_t supported_states = 0 | SUSPEND_STATE_BIT;
+
+/**
+ * ishtp_send_suspend() - Send suspend message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send suspend message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_suspend(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state |= SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends SUSPEND notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_suspend);
+
+/**
+ * ishtp_send_resume() - Send resume message to FW
+ * @dev: ISHTP device instance
+ *
+ * Send resume message to FW. This is useful for system freeze (non S3) case
+ */
+void ishtp_send_resume(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_status state_status_msg;
+ const size_t len = sizeof(struct ish_system_states_status);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&state_status_msg, 0, len);
+ state_status_msg.hdr.cmd = SYSTEM_STATE_STATUS;
+ state_status_msg.supported_states = supported_states;
+ current_state &= ~SUSPEND_STATE_BIT;
+ dev->print_log(dev, "%s() sends RESUME notification\n", __func__);
+ state_status_msg.states_status = current_state;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&state_status_msg);
+}
+EXPORT_SYMBOL(ishtp_send_resume);
+
+/**
+ * ishtp_query_subscribers() - Send query subscribers message
+ * @dev: ISHTP device instance
+ *
+ * Send message to query subscribers
+ */
+void ishtp_query_subscribers(struct ishtp_device *dev)
+{
+ struct ishtp_msg_hdr ishtp_hdr;
+ struct ish_system_states_query_subscribers query_subscribers_msg;
+ const size_t len = sizeof(struct ish_system_states_query_subscribers);
+
+ fix_cl_hdr(&ishtp_hdr, len, ISHTP_SYSTEM_STATE_CLIENT_ADDR);
+
+ memset(&query_subscribers_msg, 0, len);
+ query_subscribers_msg.hdr.cmd = SYSTEM_STATE_QUERY_SUBSCRIBERS;
+
+ ishtp_write_message(dev, &ishtp_hdr,
+ (unsigned char *)&query_subscribers_msg);
+}
diff --git a/drivers/hid/intel-ish-hid/ishtp/hbm.h b/drivers/hid/intel-ish-hid/ishtp/hbm.h
new file mode 100644
index 00000000000000..d96111cef7f84d
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/hbm.h
@@ -0,0 +1,321 @@
+/*
+ * ISHTP bus layer messages handling
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_HBM_H_
+#define _ISHTP_HBM_H_
+
+#include <linux/uuid.h>
+
+struct ishtp_device;
+struct ishtp_msg_hdr;
+struct ishtp_cl;
+
+/*
+ * Timeouts in Seconds
+ */
+#define ISHTP_INTEROP_TIMEOUT 7 /* Timeout on ready message */
+
+#define ISHTP_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+
+/*
+ * ISHTP Version
+ */
+#define HBM_MINOR_VERSION 0
+#define HBM_MAJOR_VERSION 1
+
+/* Host bus message command opcode */
+#define ISHTP_HBM_CMD_OP_MSK 0x7f
+/* Host bus message command RESPONSE */
+#define ISHTP_HBM_CMD_RES_MSK 0x80
+
+/*
+ * ISHTP Bus Message Command IDs
+ */
+#define HOST_START_REQ_CMD 0x01
+#define HOST_START_RES_CMD 0x81
+
+#define HOST_STOP_REQ_CMD 0x02
+#define HOST_STOP_RES_CMD 0x82
+
+#define FW_STOP_REQ_CMD 0x03
+
+#define HOST_ENUM_REQ_CMD 0x04
+#define HOST_ENUM_RES_CMD 0x84
+
+#define HOST_CLIENT_PROPERTIES_REQ_CMD 0x05
+#define HOST_CLIENT_PROPERTIES_RES_CMD 0x85
+
+#define CLIENT_CONNECT_REQ_CMD 0x06
+#define CLIENT_CONNECT_RES_CMD 0x86
+
+#define CLIENT_DISCONNECT_REQ_CMD 0x07
+#define CLIENT_DISCONNECT_RES_CMD 0x87
+
+#define ISHTP_FLOW_CONTROL_CMD 0x08
+
+#define DMA_BUFFER_ALLOC_NOTIFY 0x11
+#define DMA_BUFFER_ALLOC_RESPONSE 0x91
+
+#define DMA_XFER 0x12
+#define DMA_XFER_ACK 0x92
+
+/*
+ * ISHTP Stop Reason
+ * used by hbm_host_stop_request.reason
+ */
+#define DRIVER_STOP_REQUEST 0x00
+
+/*
+ * ISHTP BUS Interface Section
+ */
+struct ishtp_msg_hdr {
+ uint32_t fw_addr:8;
+ uint32_t host_addr:8;
+ uint32_t length:9;
+ uint32_t reserved:6;
+ uint32_t msg_complete:1;
+} __packed;
+
+struct ishtp_bus_message {
+ uint8_t hbm_cmd;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ * CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct ishtp_hbm_cl_cmd {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t data;
+};
+
+struct hbm_version {
+ uint8_t minor_version;
+ uint8_t major_version;
+} __packed;
+
+struct hbm_host_version_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved;
+ struct hbm_version host_version;
+} __packed;
+
+struct hbm_host_version_response {
+ uint8_t hbm_cmd;
+ uint8_t host_version_supported;
+ struct hbm_version fw_max_version;
+} __packed;
+
+struct hbm_host_stop_request {
+ uint8_t hbm_cmd;
+ uint8_t reason;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_host_stop_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_request {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+} __packed;
+
+struct hbm_host_enum_response {
+ uint8_t hbm_cmd;
+ uint8_t reserved[3];
+ uint8_t valid_addresses[32];
+} __packed;
+
+struct ishtp_client_properties {
+ uuid_le protocol_name;
+ uint8_t protocol_version;
+ uint8_t max_number_of_connections;
+ uint8_t fixed_address;
+ uint8_t single_recv_buf;
+ uint32_t max_msg_length;
+ uint8_t dma_hdr_len;
+#define ISHTP_CLIENT_DMA_ENABLED 0x80
+ uint8_t reserved4;
+ uint8_t reserved5;
+ uint8_t reserved6;
+} __packed;
+
+struct hbm_props_request {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t reserved[2];
+} __packed;
+
+struct hbm_props_response {
+ uint8_t hbm_cmd;
+ uint8_t address;
+ uint8_t status;
+ uint8_t reserved[1];
+ struct ishtp_client_properties client_properties;
+} __packed;
+
+/**
+ * struct hbm_client_connect_request - connect/disconnect request
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @reserved
+ */
+struct hbm_client_connect_request {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved;
+} __packed;
+
+/**
+ * struct hbm_client_connect_response - connect/disconnect response
+ *
+ * @hbm_cmd - bus message command header
+ * @fw_addr - address of the fw client
+ * @host_addr - address of the client in the driver
+ * @status - status of the request
+ */
+struct hbm_client_connect_response {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t status;
+} __packed;
+
+
+#define ISHTP_FC_MESSAGE_RESERVED_LENGTH 5
+
+struct hbm_flow_control {
+ uint8_t hbm_cmd;
+ uint8_t fw_addr;
+ uint8_t host_addr;
+ uint8_t reserved[ISHTP_FC_MESSAGE_RESERVED_LENGTH];
+} __packed;
+
+struct dma_alloc_notify {
+ uint8_t hbm;
+ uint8_t status;
+ uint8_t reserved[2];
+ uint32_t buf_size;
+ uint64_t buf_address;
+ /* [...] May come more size/address pairs */
+} __packed;
+
+struct dma_xfer_hbm {
+ uint8_t hbm;
+ uint8_t fw_client_id;
+ uint8_t host_client_id;
+ uint8_t reserved;
+ uint64_t msg_addr;
+ uint32_t msg_length;
+ uint32_t reserved2;
+} __packed;
+
+/* System state */
+#define ISHTP_SYSTEM_STATE_CLIENT_ADDR 13
+
+#define SYSTEM_STATE_SUBSCRIBE 0x1
+#define SYSTEM_STATE_STATUS 0x2
+#define SYSTEM_STATE_QUERY_SUBSCRIBERS 0x3
+#define SYSTEM_STATE_STATE_CHANGE_REQ 0x4
+/*indicates suspend and resume states*/
+#define SUSPEND_STATE_BIT (1<<1)
+
+struct ish_system_states_header {
+ uint32_t cmd;
+ uint32_t cmd_status; /*responses will have this set*/
+} __packed;
+
+struct ish_system_states_subscribe {
+ struct ish_system_states_header hdr;
+ uint32_t states;
+} __packed;
+
+struct ish_system_states_status {
+ struct ish_system_states_header hdr;
+ uint32_t supported_states;
+ uint32_t states_status;
+} __packed;
+
+struct ish_system_states_query_subscribers {
+ struct ish_system_states_header hdr;
+} __packed;
+
+struct ish_system_states_state_change_req {
+ struct ish_system_states_header hdr;
+ uint32_t requested_states;
+ uint32_t states_status;
+} __packed;
+
+/**
+ * enum ishtp_hbm_state - host bus message protocol state
+ *
+ * @ISHTP_HBM_IDLE : protocol not started
+ * @ISHTP_HBM_START : start request message was sent
+ * @ISHTP_HBM_ENUM_CLIENTS : enumeration request was sent
+ * @ISHTP_HBM_CLIENT_PROPERTIES : acquiring clients properties
+ */
+enum ishtp_hbm_state {
+ ISHTP_HBM_IDLE = 0,
+ ISHTP_HBM_START,
+ ISHTP_HBM_STARTED,
+ ISHTP_HBM_ENUM_CLIENTS,
+ ISHTP_HBM_CLIENT_PROPERTIES,
+ ISHTP_HBM_WORKING,
+ ISHTP_HBM_STOPPED,
+};
+
+static inline void ishtp_hbm_hdr(struct ishtp_msg_hdr *hdr, size_t length)
+{
+ hdr->host_addr = 0;
+ hdr->fw_addr = 0;
+ hdr->length = length;
+ hdr->msg_complete = 1;
+ hdr->reserved = 0;
+}
+
+int ishtp_hbm_start_req(struct ishtp_device *dev);
+int ishtp_hbm_start_wait(struct ishtp_device *dev);
+int ishtp_hbm_cl_flow_control_req(struct ishtp_device *dev,
+ struct ishtp_cl *cl);
+int ishtp_hbm_cl_disconnect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+int ishtp_hbm_cl_connect_req(struct ishtp_device *dev, struct ishtp_cl *cl);
+void ishtp_hbm_enum_clients_req(struct ishtp_device *dev);
+void bh_hbm_work_fn(struct work_struct *work);
+void recv_hbm(struct ishtp_device *dev, struct ishtp_msg_hdr *ishtp_hdr);
+void recv_fixed_cl_msg(struct ishtp_device *dev,
+ struct ishtp_msg_hdr *ishtp_hdr);
+void ishtp_hbm_dispatch(struct ishtp_device *dev,
+ struct ishtp_bus_message *hdr);
+
+void ishtp_query_subscribers(struct ishtp_device *dev);
+
+/* Exported I/F */
+void ishtp_send_suspend(struct ishtp_device *dev);
+void ishtp_send_resume(struct ishtp_device *dev);
+
+#endif /* _ISHTP_HBM_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/init.c b/drivers/hid/intel-ish-hid/ishtp/init.c
new file mode 100644
index 00000000000000..d27e03526acd8b
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/init.c
@@ -0,0 +1,114 @@
+/*
+ * Initialization protocol for ISHTP driver
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "ishtp-dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * ishtp_dev_state_str() -Convert to string format
+ * @state: state to convert
+ *
+ * Convert state to string for prints
+ *
+ * Return: character pointer to converted string
+ */
+const char *ishtp_dev_state_str(int state)
+{
+ switch (state) {
+ case ISHTP_DEV_INITIALIZING:
+ return "INITIALIZING";
+ case ISHTP_DEV_INIT_CLIENTS:
+ return "INIT_CLIENTS";
+ case ISHTP_DEV_ENABLED:
+ return "ENABLED";
+ case ISHTP_DEV_RESETTING:
+ return "RESETTING";
+ case ISHTP_DEV_DISABLED:
+ return "DISABLED";
+ case ISHTP_DEV_POWER_DOWN:
+ return "POWER_DOWN";
+ case ISHTP_DEV_POWER_UP:
+ return "POWER_UP";
+ default:
+ return "unknown";
+ }
+}
+
+/**
+ * ishtp_device_init() - ishtp device init
+ * @dev: ISHTP device instance
+ *
+ * After ISHTP device is alloacted, this function is used to initialize
+ * each field which includes spin lock, work struct and lists
+ */
+void ishtp_device_init(struct ishtp_device *dev)
+{
+ dev->dev_state = ISHTP_DEV_INITIALIZING;
+ INIT_LIST_HEAD(&dev->cl_list);
+ INIT_LIST_HEAD(&dev->device_list);
+ dev->rd_msg_fifo_head = 0;
+ dev->rd_msg_fifo_tail = 0;
+ spin_lock_init(&dev->rd_msg_spinlock);
+
+ init_waitqueue_head(&dev->wait_hbm_recvd_msg);
+ spin_lock_init(&dev->read_list_spinlock);
+ spin_lock_init(&dev->device_lock);
+ spin_lock_init(&dev->device_list_lock);
+ spin_lock_init(&dev->cl_list_lock);
+ spin_lock_init(&dev->fw_clients_lock);
+ INIT_WORK(&dev->bh_hbm_work, bh_hbm_work_fn);
+
+ bitmap_zero(dev->host_clients_map, ISHTP_CLIENTS_MAX);
+ dev->open_handle_count = 0;
+
+ /*
+ * Reserving client ID 0 for ISHTP Bus Message communications
+ */
+ bitmap_set(dev->host_clients_map, 0, 1);
+
+ INIT_LIST_HEAD(&dev->read_list.list);
+
+}
+EXPORT_SYMBOL(ishtp_device_init);
+
+/**
+ * ishtp_start() - Start ISH processing
+ * @dev: ISHTP device instance
+ *
+ * Start ISHTP processing by sending query subscriber message
+ *
+ * Return: 0 on success else -ENODEV
+ */
+int ishtp_start(struct ishtp_device *dev)
+{
+ if (ishtp_hbm_start_wait(dev)) {
+ dev_err(dev->devc, "HBM haven't started");
+ goto err;
+ }
+
+ /* suspend & resume notification - send QUERY_SUBSCRIBERS msg */
+ ishtp_query_subscribers(dev);
+
+ return 0;
+err:
+ dev_err(dev->devc, "link layer initialization failed.\n");
+ dev->dev_state = ISHTP_DEV_DISABLED;
+ return -ENODEV;
+}
+EXPORT_SYMBOL(ishtp_start);
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
new file mode 100644
index 00000000000000..6a6d927b78b0e2
--- /dev/null
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -0,0 +1,278 @@
+/*
+ * Most ISHTP provider device and ISHTP logic declarations
+ *
+ * Copyright (c) 2003-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef _ISHTP_DEV_H_
+#define _ISHTP_DEV_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include "bus.h"
+#include "hbm.h"
+
+#define IPC_PAYLOAD_SIZE 128
+#define ISHTP_RD_MSG_BUF_SIZE IPC_PAYLOAD_SIZE
+#define IPC_FULL_MSG_SIZE 132
+
+/* Number of messages to be held in ISR->BH FIFO */
+#define RD_INT_FIFO_SIZE 64
+
+/*
+ * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
+ * Tx complete interrupt or RX_COMPLETE handler
+ */
+#define IPC_TX_FIFO_SIZE 512
+
+/*
+ * Number of Maximum ISHTP Clients
+ */
+#define ISHTP_CLIENTS_MAX 256
+
+/*
+ * Number of File descriptors/handles
+ * that can be opened to the driver.
+ *
+ * Limit to 255: 256 Total Clients
+ * minus internal client for ISHTP Bus Messages
+ */
+#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)
+
+/* Internal Clients Number */
+#define ISHTP_HOST_CLIENT_ID_ANY (-1)
+#define ISHTP_HBM_HOST_CLIENT_ID 0
+
+#define MAX_DMA_DELAY 20
+
+/* ISHTP device states */
+enum ishtp_dev_state {
+ ISHTP_DEV_INITIALIZING = 0,
+ ISHTP_DEV_INIT_CLIENTS,
+ ISHTP_DEV_ENABLED,
+ ISHTP_DEV_RESETTING,
+ ISHTP_DEV_DISABLED,
+ ISHTP_DEV_POWER_DOWN,
+ ISHTP_DEV_POWER_UP
+};
+const char *ishtp_dev_state_str(int state);
+
+struct ishtp_cl;
+
+/**
+ * struct ishtp_fw_client - representation of fw client
+ *
+ * @props - client properties
+ * @client_id - fw client id
+ */
+struct ishtp_fw_client {
+ struct ishtp_client_properties props;
+ uint8_t client_id;
+};
+
+/**
+ * struct ishtp_msg_data - ISHTP message data struct
+ * @size: Size of data in the *data
+ * @data: Pointer to data
+ */
+struct ishtp_msg_data {
+ uint32_t size;
+ unsigned char *data;
+};
+
+/*
+ * struct ishtp_cl_rb - request block structure
+ * @list: Link to list members
+ * @cl: ISHTP client instance
+ * @buffer: message header
+ * @buf_idx: Index into buffer
+ * @read_time: unused at this time
+ */
+struct ishtp_cl_rb {
+ struct list_head list;
+ struct ishtp_cl *cl;
+ struct ishtp_msg_data buffer;
+ unsigned long buf_idx;
+ unsigned long read_time;
+};
+
+/*
+ * Control info for IPC messages ISHTP/IPC sending FIFO -
+ * list with inline data buffer
+ * This structure will be filled with parameters submitted
+ * by the caller glue layer
+ * 'buf' may be pointing to the external buffer or to 'inline_data'
+ * 'offset' will be initialized to 0 by submitting
+ *
+ * 'ipc_send_compl' is intended for use by clients that send fragmented
+ * messages. When a fragment is sent down to IPC msg regs,
+ * it will be called.
+ * If it has more fragments to send, it will do it. With last fragment
+ * it will send appropriate ISHTP "message-complete" flag.
+ * It will remove the outstanding message
+ * (mark outstanding buffer as available).
+ * If counting flow control is in work and there are more flow control
+ * credits, it can put the next client message queued in cl.
+ * structure for IPC processing.
+ *
+ */
+struct wr_msg_ctl_info {
+ /* Will be called with 'ipc_send_compl_prm' as parameter */
+ void (*ipc_send_compl)(void *);
+
+ void *ipc_send_compl_prm;
+ size_t length;
+ struct list_head link;
+ unsigned char inline_data[IPC_FULL_MSG_SIZE];
+};
+
+/*
+ * The ISHTP layer talks to hardware IPC message using the following
+ * callbacks
+ */
+struct ishtp_hw_ops {
+ int (*hw_reset)(struct ishtp_device *dev);
+ int (*ipc_reset)(struct ishtp_device *dev);
+ uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
+ int busy);
+ int (*write)(struct ishtp_device *dev,
+ void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
+ unsigned char *msg, int length);
+ uint32_t (*ishtp_read_hdr)(const struct ishtp_device *dev);
+ int (*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
+ unsigned long buffer_length);
+ uint32_t (*get_fw_status)(struct ishtp_device *dev);
+ void (*sync_fw_clock)(struct ishtp_device *dev);
+};
+
+/**
+ * struct ishtp_device - ISHTP private device struct
+ */
+struct ishtp_device {
+ struct device *devc; /* pointer to lowest device */
+ struct pci_dev *pdev; /* PCI device to get device ids */
+
+ /* waitq for waiting for suspend response */
+ wait_queue_head_t suspend_wait;
+ bool suspend_flag; /* Suspend is active */
+
+ /* waitq for waiting for resume response */
+ wait_queue_head_t resume_wait;
+ bool resume_flag; /*Resume is active */
+
+ /*
+ * lock for the device, for everything that doesn't have
+ * a dedicated spinlock
+ */
+ spinlock_t device_lock;
+
+ bool recvd_hw_ready;
+ struct hbm_version version;
+ int transfer_path; /* Choice of transfer path: IPC or DMA */
+
+ /* ishtp device states */
+ enum ishtp_dev_state dev_state;
+ enum ishtp_hbm_state hbm_state;
+
+ /* driver read queue */
+ struct ishtp_cl_rb read_list;
+ spinlock_t read_list_spinlock;
+
+ /* list of ishtp_cl's */
+ struct list_head cl_list;
+ spinlock_t cl_list_lock;
+ long open_handle_count;
+
+ /* List of bus devices */
+ struct list_head device_list;
+ spinlock_t device_list_lock;
+
+ /* waiting queues for receive message from FW */
+ wait_queue_head_t wait_hw_ready;
+ wait_queue_head_t wait_hbm_recvd_msg;
+
+ /* FIFO for input messages for BH processing */
+ unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
+ unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
+ spinlock_t rd_msg_spinlock;
+ struct work_struct bh_hbm_work;
+
+ /* IPC write queue */
+ struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+ /* For both processing list and free list */
+ spinlock_t wr_processing_spinlock;
+
+ spinlock_t out_ipc_spinlock;
+
+ struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
+ DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
+ DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
+ uint8_t fw_clients_num;
+ uint8_t fw_client_presentation_num;
+ uint8_t fw_client_index;
+ spinlock_t fw_clients_lock;
+
+ /* TX DMA buffers and slots */
+ int ishtp_host_dma_enabled;
+ void *ishtp_host_dma_tx_buf;
+ unsigned int ishtp_host_dma_tx_buf_size;
+ uint64_t ishtp_host_dma_tx_buf_phys;
+ int ishtp_dma_num_slots;
+
+ /* map of 4k blocks in Tx dma buf: 0-free, 1-used */
+ uint8_t *ishtp_dma_tx_map;
+ spinlock_t ishtp_dma_tx_lock;
+
+ /* RX DMA buffers and slots */
+ void *ishtp_host_dma_rx_buf;
+ unsigned int ishtp_host_dma_rx_buf_size;
+ uint64_t ishtp_host_dma_rx_buf_phys;
+
+ /* Dump to trace buffers if enabled*/
+ __printf(2, 3) void (*print_log)(struct ishtp_device *dev,
+ const char *format, ...);
+
+ /* Debug stats */
+ unsigned int ipc_rx_cnt;
+ unsigned long long ipc_rx_bytes_cnt;
+ unsigned int ipc_tx_cnt;
+ unsigned long long ipc_tx_bytes_cnt;
+
+ const struct ishtp_hw_ops *ops;
+ size_t mtu;
+ uint32_t ishtp_msg_hdr;
+ char hw[0] __aligned(sizeof(void *));
+};
+
+static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
+{
+ return msecs_to_jiffies(sec * MSEC_PER_SEC);
+}
+
+/*
+ * Register Access Function
+ */
+static inline int ish_ipc_reset(struct ishtp_device *dev)
+{
+ return dev->ops->ipc_reset(dev);
+}
+
+static inline int ish_hw_reset(struct ishtp_device *dev)
+{
+ return dev->ops->hw_reset(dev);
+}
+
+/* Exported function */
+void ishtp_device_init(struct ishtp_device *dev);
+int ishtp_start(struct ishtp_device *dev);
+
+#endif /*_ISHTP_DEV_H_*/
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 632de7c06ed6eb..fa11b3138cff32 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -12,6 +12,7 @@
#include <linux/atomic.h>
#include <linux/compat.h>
+#include <linux/cred.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/hid.h>
@@ -24,6 +25,7 @@
#include <linux/spinlock.h>
#include <linux/uhid.h>
#include <linux/wait.h>
+#include <linux/uaccess.h>
#define UHID_NAME "uhid"
#define UHID_BUFSIZE 32
@@ -721,6 +723,17 @@ static ssize_t uhid_char_write(struct file *file, const char __user *buffer,
switch (uhid->input_buf.type) {
case UHID_CREATE:
+ /*
+ * 'struct uhid_create_req' contains a __user pointer which is
+ * copied from, so it's unsafe to allow this with elevated
+ * privileges (e.g. from a setuid binary) or via kernel_write().
+ */
+ if (file->f_cred != current_cred() || uaccess_kernel()) {
+ pr_err_once("UHID_CREATE from different security context by process %d (%s), this is not allowed.\n",
+ task_tgid_vnr(current), current->comm);
+ ret = -EACCES;
+ goto unlock;
+ }
ret = uhid_dev_create(uhid, &uhid->input_buf);
break;
case UHID_CREATE2:
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 07a9f083eec968..8cdf9cb6eb1154 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -153,6 +153,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_MULTIPLE_1781, USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI, HID_QUIRK_MULTI_INPUT },
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 667171829f65c3..096a9632bf6718 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -35,6 +35,7 @@
#include <linux/hiddev.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#include "usbhid.h"
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -479,10 +480,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index = array_index_nospec(uref->usage_index,
+ field->maxusage);
uref->usage_code = field->usage[uref->usage_index].hid;
@@ -509,20 +514,32 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
if (cmd == HIDIOCGCOLLECTIONINDEX) {
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->maxusage);
} else if (uref->usage_index >= field->report_count)
goto inval;
}
- if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
+ if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+ if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values >
+ field->report_count)
+ goto inval;
+
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->report_count -
+ uref_multi->num_values);
+ }
switch (cmd) {
case HIDIOCGUSAGE:
@@ -763,6 +780,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (finfo.field_index >= report->maxfield)
break;
+ finfo.field_index = array_index_nospec(finfo.field_index,
+ report->maxfield);
field = report->field[finfo.field_index];
memset(&finfo, 0, sizeof(finfo));
@@ -807,6 +826,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cinfo.index >= hid->maxcollection)
break;
+ cinfo.index = array_index_nospec(cinfo.index,
+ hid->maxcollection);
cinfo.type = hid->collection[cinfo.index].type;
cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b62c50d1b1e4b8..b184956bd4306e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2487,8 +2487,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
features->device_type |= WACOM_DEVICETYPE_PAD;
- features->x_max = 4096;
- features->y_max = 4096;
+ if (features->type == INTUOSHT2) {
+ features->x_max = features->x_max / 10;
+ features->y_max = features->y_max / 10;
+ }
+ else {
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
}
else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 12dcbd8226f21b..2cce48d9e903a3 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -256,7 +256,6 @@ void hv_fcopy_onchannelcallback(void *context)
*/
fcopy_transaction.recv_len = recvlen;
- fcopy_transaction.recv_channel = channel;
fcopy_transaction.recv_req_id = requestid;
fcopy_transaction.fcopy_msg = fcopy_msg;
@@ -323,6 +322,7 @@ static void fcopy_on_reset(void)
int hv_fcopy_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ fcopy_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/*
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ce4d3a93549162..1771a968c3f245 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -78,9 +78,11 @@ static void kvp_send_key(struct work_struct *dummy);
static void kvp_respond_to_host(struct hv_kvp_msg *msg, int error);
static void kvp_timeout_func(struct work_struct *dummy);
+static void kvp_host_handshake_func(struct work_struct *dummy);
static void kvp_register(int);
static DECLARE_DELAYED_WORK(kvp_timeout_work, kvp_timeout_func);
+static DECLARE_DELAYED_WORK(kvp_host_handshake_work, kvp_host_handshake_func);
static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
static const char kvp_devname[] = "vmbus/hv_kvp";
@@ -131,6 +133,11 @@ static void kvp_timeout_func(struct work_struct *dummy)
hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
}
+static void kvp_host_handshake_func(struct work_struct *dummy)
+{
+ hv_poll_channel(kvp_transaction.recv_channel, hv_kvp_onchannelcallback);
+}
+
static int kvp_handle_handshake(struct hv_kvp_msg *msg)
{
switch (msg->kvp_hdr.operation) {
@@ -155,7 +162,13 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
pr_debug("KVP: userspace daemon ver. %d registered\n",
KVP_OP_REGISTER);
kvp_register(dm_reg_value);
- kvp_transaction.state = HVUTIL_READY;
+
+ /*
+ * If we're still negotiating with the host cancel the timeout
+ * work to not poll the channel twice.
+ */
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
+ hv_poll_channel(kvp_transaction.recv_channel, kvp_poll_wrapper);
return 0;
}
@@ -595,10 +608,26 @@ void hv_kvp_onchannelcallback(void *context)
struct icmsg_negotiate *negop = NULL;
int util_fw_version;
int kvp_srv_version;
+ static enum {NEGO_NOT_STARTED,
+ NEGO_IN_PROGRESS,
+ NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
+ if (kvp_transaction.state < HVUTIL_READY) {
+ /*
+ * If userspace daemon is not connected and host is asking
+ * us to negotiate we need to delay to not lose messages.
+ * This is important for Failover IP setting.
+ */
+ if (host_negotiatied == NEGO_NOT_STARTED) {
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
+ HV_UTIL_NEGO_TIMEOUT * HZ);
+ }
+ return;
+ }
if (kvp_transaction.state > HVUTIL_READY)
return;
-
+recheck:
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
@@ -640,7 +669,6 @@ void hv_kvp_onchannelcallback(void *context)
*/
kvp_transaction.recv_len = recvlen;
- kvp_transaction.recv_channel = channel;
kvp_transaction.recv_req_id = requestid;
kvp_transaction.kvp_msg = kvp_msg;
@@ -674,6 +702,10 @@ void hv_kvp_onchannelcallback(void *context)
vmbus_sendpacket(channel, recv_buffer,
recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
+
+ host_negotiatied = NEGO_FINISHED;
+
+ goto recheck;
}
}
@@ -690,6 +722,7 @@ int
hv_kvp_init(struct hv_util_service *srv)
{
recv_buffer = srv->recv_buffer;
+ kvp_transaction.recv_channel = srv->channel;
init_completion(&release_event);
/*
@@ -711,6 +744,7 @@ hv_kvp_init(struct hv_util_service *srv)
void hv_kvp_deinit(void)
{
kvp_transaction.state = HVUTIL_DEVICE_DYING;
+ cancel_delayed_work_sync(&kvp_host_handshake_work);
cancel_delayed_work_sync(&kvp_timeout_work);
cancel_work_sync(&kvp_sendkey_work);
hvutil_transport_destroy(hvt);
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index faad79ae318a63..b0feddb17170fe 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -114,7 +114,7 @@ static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
default:
return -EINVAL;
}
- vss_transaction.state = HVUTIL_READY;
+ hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
pr_debug("VSS: userspace daemon ver. %d registered\n", dm_reg_value);
return 0;
}
@@ -264,7 +264,6 @@ void hv_vss_onchannelcallback(void *context)
*/
vss_transaction.recv_len = recvlen;
- vss_transaction.recv_channel = channel;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
@@ -340,6 +339,7 @@ hv_vss_init(struct hv_util_service *srv)
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
+ vss_transaction.recv_channel = srv->channel;
/*
* When this driver loads, the user level daemon that
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 41f5896224bd47..9dc63725363d2a 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -326,6 +326,7 @@ static int util_probe(struct hv_device *dev,
srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
if (!srv->recv_buffer)
return -ENOMEM;
+ srv->channel = dev->channel;
if (srv->util_init) {
ret = srv->util_init(srv);
if (ret) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 75e383e6d03d32..15e06493c53aad 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -36,6 +36,11 @@
#define HV_UTIL_TIMEOUT 30
/*
+ * Timeout for guest-host handshake for services.
+ */
+#define HV_UTIL_NEGO_TIMEOUT 60
+
+/*
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
* is set by CPUID(HVCPUID_VERSION_FEATURES).
*/
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 802dcb40903080..b877cce0409b8d 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -316,6 +316,8 @@ static ssize_t out_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_interrupt_mask);
}
@@ -329,6 +331,8 @@ static ssize_t out_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_read_index);
}
@@ -343,6 +347,8 @@ static ssize_t out_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.current_write_index);
}
@@ -357,6 +363,8 @@ static ssize_t out_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_toread);
}
@@ -371,6 +379,8 @@ static ssize_t out_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->outbound, &outbound);
return sprintf(buf, "%d\n", outbound.bytes_avail_towrite);
}
@@ -384,6 +394,8 @@ static ssize_t in_intr_mask_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
}
@@ -397,6 +409,8 @@ static ssize_t in_read_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_read_index);
}
@@ -410,6 +424,8 @@ static ssize_t in_write_index_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.current_write_index);
}
@@ -424,6 +440,8 @@ static ssize_t in_read_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
}
@@ -438,6 +456,8 @@ static ssize_t in_write_bytes_avail_show(struct device *dev,
if (!hv_dev->channel)
return -ENODEV;
+ if (hv_dev->channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 3cefd1aeb24f4a..9c262d95533150 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -274,14 +274,18 @@ static inline u16 volt2reg(int channel, long volt, u8 bypass_attn)
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
{
- u16 val;
+ int val1, val2;
- val = i2c_smbus_read_byte_data(client, reg);
- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+ val1 = i2c_smbus_read_byte_data(client, reg);
+ if (val1 < 0)
+ return val1;
+ val2 = i2c_smbus_read_byte_data(client, reg + 1);
+ if (val2 < 0)
+ return val2;
- return val;
+ return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index 55b5a8ff1cfe22..ca3aa28977bc9a 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -114,7 +114,7 @@ static ssize_t show_label(struct device *dev, struct device_attribute *devattr,
return sprintf(buf, "%s\n", sdata->label);
}
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
{
int cpu;
@@ -125,9 +125,8 @@ static int __init get_logical_cpu(int hwcpu)
return -ENOENT;
}
-static void __init make_sensor_label(struct device_node *np,
- struct sensor_data *sdata,
- const char *label)
+static void make_sensor_label(struct device_node *np,
+ struct sensor_data *sdata, const char *label)
{
u32 id;
size_t n;
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index ac63e562071fea..1f291b344178a4 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
@@ -273,7 +273,7 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg,
break;
case INA2XX_CURRENT:
/* signed register, result in mA */
- val = regval * data->current_lsb_uA;
+ val = (s16)regval * data->current_lsb_uA;
val = DIV_ROUND_CLOSEST(val, 1000);
break;
case INA2XX_CALIBRATION:
@@ -328,6 +328,15 @@ static int ina2xx_set_shunt(struct ina2xx_data *data, long val)
return 0;
}
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
@@ -402,7 +411,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL,
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 4bcd9b882948c1..be60bd5bab783b 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = data->client;
unsigned long min, val;
u8 reg;
- int err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ int rv;
+
+ rv = kstrtoul(buf, 10, &val);
+ if (rv < 0)
+ return rv;
/* Save fan_min */
mutex_lock(&data->update_lock);
@@ -390,8 +392,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- reg = (lm80_read_value(client, LM80_REG_FANDIV) &
- ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
+ rv = lm80_read_value(client, LM80_REG_FANDIV);
+ if (rv < 0) {
+ mutex_unlock(&data->update_lock);
+ return rv;
+ }
+ reg = (rv & ~(3 << (2 * (nr + 1))))
+ | (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
/* Restore fan_min */
@@ -623,6 +630,7 @@ static int lm80_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
+ int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -635,8 +643,14 @@ static int lm80_probe(struct i2c_client *client,
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ if (rv < 0)
+ return rv;
+ data->fan[f_min][0] = rv;
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+ if (rv < 0)
+ return rv;
+ data->fan[f_min][1] = rv;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 0a74991a60f0df..1b2b79f6ea3a91 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -117,6 +117,8 @@ static int pmbus_identify(struct i2c_client *client,
} else {
info->pages = 1;
}
+
+ pmbus_clear_faults(client);
}
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index d013acf3f83a3a..c00bad02761aee 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1759,7 +1759,10 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK))
client->flags |= I2C_CLIENT_PEC;
- pmbus_clear_faults(client);
+ if (data->info->pages)
+ pmbus_clear_faults(client);
+ else
+ pmbus_clear_fault_page(client, -1);
if (info->identify) {
ret = (*info->identify)(client, info);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 49276bbdac3ddf..1bb80f992aa860 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -1691,7 +1691,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
* somewhere else in the code
*/
#define SENSOR_ATTR_TEMP(index) { \
- SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 4 ? S_IWUSR : 0), \
+ SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \
show_temp_mode, store_temp_mode, NOT_USED, index - 1), \
SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \
NULL, TEMP_READ, index - 1), \
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 77d0f9c1118dfd..92969dae739d83 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -489,15 +489,6 @@ err_misc_register:
return ret;
}
-static int etb_remove(struct amba_device *adev)
-{
- struct etb_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int etb_runtime_suspend(struct device *dev)
{
@@ -537,10 +528,10 @@ static struct amba_driver etb_driver = {
.name = "coresight-etb10",
.owner = THIS_MODULE,
.pm = &etb_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etb_probe,
- .remove = etb_remove,
.id_table = etb_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d630b7ece73521..5981fcc699601a 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1877,17 +1877,6 @@ err_arch_supported:
return ret;
}
-static int etm_remove(struct amba_device *adev)
-{
- struct etm_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm_count == 0)
- unregister_hotcpu_notifier(&etm_cpu_notifier);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int etm_runtime_suspend(struct device *dev)
{
@@ -1948,9 +1937,9 @@ static struct amba_driver etm_driver = {
.name = "coresight-etm3x",
.owner = THIS_MODULE,
.pm = &etm_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = etm_probe,
- .remove = etm_remove,
.id_table = etm_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index a6707642bb238a..0edc10b4400440 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -2219,7 +2219,7 @@ static ssize_t name##_show(struct device *_dev, \
return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
readl_relaxed(drvdata->base + offset)); \
} \
-DEVICE_ATTR_RO(name)
+static DEVICE_ATTR_RO(name)
coresight_simple_func(trcoslsr, TRCOSLSR);
coresight_simple_func(trcpdcr, TRCPDCR);
@@ -2684,17 +2684,6 @@ err_coresight_register:
return ret;
}
-static int etm4_remove(struct amba_device *adev)
-{
- struct etmv4_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- if (--etm4_count == 0)
- unregister_hotcpu_notifier(&etm4_cpu_notifier);
-
- return 0;
-}
-
static struct amba_id etm4_ids[] = {
{ /* ETM 4.0 - Qualcomm */
.id = 0x0003b95d,
@@ -2712,9 +2701,9 @@ static struct amba_id etm4_ids[] = {
static struct amba_driver etm4x_driver = {
.drv = {
.name = "coresight-etm4x",
+ .suppress_bind_attrs = true,
},
.probe = etm4_probe,
- .remove = etm4_remove,
.id_table = etm4_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 2e36bde7fcb41b..25e8ea140a099f 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -226,14 +226,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int funnel_remove(struct amba_device *adev)
-{
- struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int funnel_runtime_suspend(struct device *dev)
{
@@ -273,9 +265,9 @@ static struct amba_driver funnel_driver = {
.name = "coresight-funnel",
.owner = THIS_MODULE,
.pm = &funnel_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = funnel_probe,
- .remove = funnel_remove,
.id_table = funnel_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-replicator-qcom.c b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
index 584059e9e8660f..4448151794605d 100644
--- a/drivers/hwtracing/coresight/coresight-replicator-qcom.c
+++ b/drivers/hwtracing/coresight/coresight-replicator-qcom.c
@@ -156,15 +156,6 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int replicator_remove(struct amba_device *adev)
-{
- struct replicator_state *drvdata = amba_get_drvdata(adev);
-
- pm_runtime_disable(&adev->dev);
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -206,9 +197,9 @@ static struct amba_driver replicator_driver = {
.drv = {
.name = "coresight-replicator-qcom",
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = replicator_probe,
- .remove = replicator_remove,
.id_table = replicator_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 963ac197c2535c..b77d700a3f0e04 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -127,20 +127,6 @@ out_disable_pm:
return ret;
}
-static int replicator_remove(struct platform_device *pdev)
-{
- struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
-
- coresight_unregister(drvdata->csdev);
- pm_runtime_get_sync(&pdev->dev);
- if (!IS_ERR(drvdata->atclk))
- clk_disable_unprepare(drvdata->atclk);
- pm_runtime_put_noidle(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int replicator_runtime_suspend(struct device *dev)
{
@@ -175,11 +161,11 @@ static const struct of_device_id replicator_match[] = {
static struct platform_driver replicator_driver = {
.probe = replicator_probe,
- .remove = replicator_remove,
.driver = {
.name = "coresight-replicator",
.of_match_table = replicator_match,
.pm = &replicator_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index a57c7ec1661f91..c4fa70ed14ceda 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -124,7 +124,7 @@ struct tmc_drvdata {
bool reading;
char *buf;
dma_addr_t paddr;
- void __iomem *vaddr;
+ void *vaddr;
u32 size;
bool enable;
enum tmc_config_type config_type;
@@ -766,23 +766,10 @@ err_misc_register:
err_devm_kzalloc:
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
dma_free_coherent(dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
+ drvdata->vaddr, drvdata->paddr);
return ret;
}
-static int tmc_remove(struct amba_device *adev)
-{
- struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
-
- misc_deregister(&drvdata->miscdev);
- coresight_unregister(drvdata->csdev);
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(drvdata->dev, drvdata->size,
- &drvdata->paddr, GFP_KERNEL);
-
- return 0;
-}
-
static struct amba_id tmc_ids[] = {
{
.id = 0x0003b961,
@@ -795,9 +782,9 @@ static struct amba_driver tmc_driver = {
.drv = {
.name = "coresight-tmc",
.owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
},
.probe = tmc_probe,
- .remove = tmc_remove,
.id_table = tmc_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 22e10b7d505db2..105c192eb2c104 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -46,8 +46,9 @@
/** register definition **/
/* FFSR - 0x300 */
-#define FFSR_FT_STOPPED BIT(1)
+#define FFSR_FT_STOPPED_BIT 1
/* FFCR - 0x304 */
+#define FFCR_FON_MAN_BIT 6
#define FFCR_FON_MAN BIT(6)
#define FFCR_STOP_FI BIT(12)
@@ -93,9 +94,9 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
/* Generate manual flush */
writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
/* Wait for flush to complete */
- coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0);
+ coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN_BIT, 0);
/* Wait for formatter to stop */
- coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1);
+ coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED_BIT, 1);
CS_LOCK(drvdata->base);
}
@@ -179,14 +180,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
return 0;
}
-static int tpiu_remove(struct amba_device *adev)
-{
- struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
-
- coresight_unregister(drvdata->csdev);
- return 0;
-}
-
#ifdef CONFIG_PM
static int tpiu_runtime_suspend(struct device *dev)
{
@@ -230,9 +223,9 @@ static struct amba_driver tpiu_driver = {
.name = "coresight-tpiu",
.owner = THIS_MODULE,
.pm = &tpiu_dev_pm_ops,
+ .suppress_bind_attrs = true,
},
.probe = tpiu_probe,
- .remove = tpiu_remove,
.id_table = tpiu_ids,
};
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 93738dfbf6313e..c6aea4795d0b2d 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -86,7 +86,7 @@ static int coresight_find_link_inport(struct coresight_device *csdev)
dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
dev_name(&parent->dev), dev_name(&csdev->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_find_link_outport(struct coresight_device *csdev)
@@ -107,7 +107,7 @@ static int coresight_find_link_outport(struct coresight_device *csdev)
dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
dev_name(&csdev->dev), dev_name(&child->dev));
- return 0;
+ return -ENODEV;
}
static int coresight_enable_sink(struct coresight_device *csdev)
@@ -155,6 +155,9 @@ static int coresight_enable_link(struct coresight_device *csdev)
else
refport = 0;
+ if (refport < 0)
+ return refport;
+
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
@@ -481,6 +484,8 @@ static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
+ kfree(csdev->conns);
+ kfree(csdev->refcnt);
kfree(csdev);
}
@@ -568,6 +573,8 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
if (dev) {
conn->child_dev = to_coresight_device(dev);
+ /* and put reference from 'bus_find_device()' */
+ put_device(dev);
} else {
csdev->orphan = true;
conn->child_dev = NULL;
@@ -575,6 +582,50 @@ static void coresight_fixup_device_conns(struct coresight_device *csdev)
}
}
+static int coresight_remove_match(struct device *dev, void *data)
+{
+ int i;
+ struct coresight_device *csdev, *iterator;
+ struct coresight_connection *conn;
+
+ csdev = data;
+ iterator = to_coresight_device(dev);
+
+ /* No need to check oneself */
+ if (csdev == iterator)
+ return 0;
+
+ /*
+ * Circle throuch all the connection of that component. If we find
+ * a connection whose name matches @csdev, remove it.
+ */
+ for (i = 0; i < iterator->nr_outport; i++) {
+ conn = &iterator->conns[i];
+
+ if (conn->child_dev == NULL)
+ continue;
+
+ if (!strcmp(dev_name(&csdev->dev), conn->child_name)) {
+ iterator->orphan = true;
+ conn->child_dev = NULL;
+ /* No need to continue */
+ break;
+ }
+ }
+
+ /*
+ * Returning '0' ensures that all known component on the
+ * bus will be checked.
+ */
+ return 0;
+}
+
+static void coresight_remove_conns(struct coresight_device *csdev)
+{
+ bus_for_each_dev(&coresight_bustype, NULL,
+ csdev, coresight_remove_match);
+}
+
/**
* coresight_timeout - loop until a bit has changed to a specific state.
* @addr: base address of the area of interest.
@@ -713,12 +764,9 @@ EXPORT_SYMBOL_GPL(coresight_register);
void coresight_unregister(struct coresight_device *csdev)
{
- mutex_lock(&coresight_mutex);
-
- kfree(csdev->conns);
+ /* Remove references of that device in the topology */
+ coresight_remove_conns(csdev);
device_unregister(&csdev->dev);
-
- mutex_unlock(&coresight_mutex);
}
EXPORT_SYMBOL_GPL(coresight_unregister);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 7d2bb154960839..fb7597b1c66fe1 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -86,7 +86,7 @@ static int of_coresight_alloc_memory(struct device *dev,
return -ENOMEM;
/* Children connected to this component via @outports */
- pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+ pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
sizeof(*pdata->child_names),
GFP_KERNEL);
if (!pdata->child_names)
diff --git a/drivers/hwtracing/intel_th/gth.c b/drivers/hwtracing/intel_th/gth.c
index 2dc5378ccd3aa3..eb43943cdf075b 100644
--- a/drivers/hwtracing/intel_th/gth.c
+++ b/drivers/hwtracing/intel_th/gth.c
@@ -591,11 +591,15 @@ static void intel_th_gth_unassign(struct intel_th_device *thdev,
{
struct gth_device *gth = dev_get_drvdata(&thdev->dev);
int port = othdev->output.port;
+ int master;
spin_lock(&gth->gth_lock);
othdev->output.port = -1;
othdev->output.active = false;
gth->output[port].output = NULL;
+ for (master = 0; master < TH_CONFIGURABLE_MASTERS; master++)
+ if (gth->master[master] == port)
+ gth->master[master] = -1;
spin_unlock(&gth->gth_lock);
}
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 70ca27e4560214..9d9e47eb0842cf 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1418,7 +1418,8 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
if (!end)
break;
- len -= end - p;
+ /* consume the number and the following comma, hence +1 */
+ len -= end - p + 1;
p = end + 1;
} while (len);
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index e7a348807f0cb2..e0ac7539552693 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -9,6 +9,8 @@ config STM
Say Y here to enable System Trace Module device support.
+if STM
+
config STM_DUMMY
tristate "Dummy STM driver"
help
@@ -25,3 +27,5 @@ config STM_SOURCE_CONSOLE
If you want to send kernel console messages over STM devices,
say Y.
+
+endif
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index d2dff159a471af..b6cc841de79dda 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -26,6 +26,7 @@
#include <linux/stm.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
#include "stm.h"
#include <uapi/linux/stm.h>
@@ -113,6 +114,7 @@ struct stm_device *stm_find_device(const char *buf)
stm = to_stm_device(dev);
if (!try_module_get(stm->owner)) {
+ /* matches class_find_device() above */
put_device(dev);
return NULL;
}
@@ -125,7 +127,7 @@ struct stm_device *stm_find_device(const char *buf)
* @stm: stm device, previously acquired by stm_find_device()
*
* This drops the module reference and device reference taken by
- * stm_find_device().
+ * stm_find_device() or stm_char_open().
*/
void stm_put_device(struct stm_device *stm)
{
@@ -185,6 +187,9 @@ static void stm_output_claim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
if (WARN_ON_ONCE(master->nr_free < output->nr_chans))
return;
@@ -199,6 +204,9 @@ stm_output_disclaim(struct stm_device *stm, struct stm_output *output)
{
struct stp_master *master = stm_master(stm, output->master);
+ lockdep_assert_held(&stm->mc_lock);
+ lockdep_assert_held(&output->lock);
+
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
@@ -228,6 +236,9 @@ static int find_free_channels(unsigned long *bitmap, unsigned int start,
;
if (i == width)
return pos;
+
+ /* step over [pos..pos+i) to continue search */
+ pos += i;
}
return -1;
@@ -288,6 +299,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
}
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
/* output is already assigned -- shouldn't happen */
if (WARN_ON_ONCE(output->nr_chans))
goto unlock;
@@ -304,6 +316,7 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
ret = 0;
unlock:
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
return ret;
@@ -312,11 +325,18 @@ unlock:
static void stm_output_free(struct stm_device *stm, struct stm_output *output)
{
spin_lock(&stm->mc_lock);
+ spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
+static void stm_output_init(struct stm_output *output)
+{
+ spin_lock_init(&output->lock);
+}
+
static int major_match(struct device *dev, const void *data)
{
unsigned int major = *(unsigned int *)data;
@@ -339,6 +359,7 @@ static int stm_char_open(struct inode *inode, struct file *file)
if (!stmf)
return -ENOMEM;
+ stm_output_init(&stmf->output);
stmf->stm = to_stm_device(dev);
if (!try_module_get(stmf->stm->owner))
@@ -349,6 +370,8 @@ static int stm_char_open(struct inode *inode, struct file *file)
return nonseekable_open(inode, file);
err_free:
+ /* matches class_find_device() above */
+ put_device(dev);
kfree(stmf);
return err;
@@ -359,6 +382,11 @@ static int stm_char_release(struct inode *inode, struct file *file)
struct stm_file *stmf = file->private_data;
stm_output_free(stmf->stm, &stmf->output);
+
+ /*
+ * matches the stm_char_open()'s
+ * class_find_device() + try_module_get()
+ */
stm_put_device(stmf->stm);
kfree(stmf);
@@ -406,6 +434,9 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
char *kbuf;
int err;
+ if (count + 1 > PAGE_SIZE)
+ count = PAGE_SIZE - 1;
+
/*
* if no m/c have been assigned to this writer up to this
* point, use "default" policy entry
@@ -473,7 +504,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
- int ret = -EINVAL;
+ int ret = -EINVAL, wlimit = 1;
u32 size;
if (stmf->output.nr_chans)
@@ -501,8 +532,10 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
if (id->__reserved_0 || id->__reserved_1)
goto err_free;
- if (id->width < 1 ||
- id->width > PAGE_SIZE / stm->data->sw_mmiosz)
+ if (stm->data->sw_mmiosz)
+ wlimit = PAGE_SIZE / stm->data->sw_mmiosz;
+
+ if (id->width < 1 || id->width > wlimit)
goto err_free;
ret = stm_file_assign(stmf, id->id, id->width);
@@ -515,10 +548,8 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
ret = stm->data->link(stm->data, stmf->output.master,
stmf->output.channel);
- if (ret) {
+ if (ret)
stm_output_free(stmf->stm, &stmf->output);
- stm_put_device(stmf->stm);
- }
err_free:
kfree(id);
@@ -602,7 +633,7 @@ static void stm_device_release(struct device *dev)
{
struct stm_device *stm = to_stm_device(dev);
- kfree(stm);
+ vfree(stm);
}
int stm_register_device(struct device *parent, struct stm_data *stm_data,
@@ -619,7 +650,7 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
return -EINVAL;
nmasters = stm_data->sw_end - stm_data->sw_start;
- stm = kzalloc(sizeof(*stm) + nmasters * sizeof(void *), GFP_KERNEL);
+ stm = vzalloc(sizeof(*stm) + nmasters * sizeof(void *));
if (!stm)
return -ENOMEM;
@@ -633,17 +664,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->dev.parent = parent;
stm->dev.release = stm_device_release;
- err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
- if (err)
- goto err_device;
-
- err = device_add(&stm->dev);
- if (err)
- goto err_device;
-
+ mutex_init(&stm->link_mutex);
spin_lock_init(&stm->link_lock);
INIT_LIST_HEAD(&stm->link_list);
+ /* initialize the object before it is accessible via sysfs */
spin_lock_init(&stm->mc_lock);
mutex_init(&stm->policy_mutex);
stm->sw_nmasters = nmasters;
@@ -651,31 +676,50 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
stm->data = stm_data;
stm_data->stm = stm;
+ err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
+ if (err)
+ goto err_device;
+
+ err = device_add(&stm->dev);
+ if (err)
+ goto err_device;
+
return 0;
err_device:
+ unregister_chrdev(stm->major, stm_data->name);
+
+ /* matches device_initialize() above */
put_device(&stm->dev);
err_free:
- kfree(stm);
+ vfree(stm);
return err;
}
EXPORT_SYMBOL_GPL(stm_register_device);
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm);
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm);
void stm_unregister_device(struct stm_data *stm_data)
{
struct stm_device *stm = stm_data->stm;
struct stm_source_device *src, *iter;
- int i;
+ int i, ret;
- spin_lock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
list_for_each_entry_safe(src, iter, &stm->link_list, link_entry) {
- __stm_source_link_drop(src, stm);
+ ret = __stm_source_link_drop(src, stm);
+ /*
+ * src <-> stm link must not change under the same
+ * stm::link_mutex, so complain loudly if it has;
+ * also in this situation ret!=0 means this src is
+ * not connected to this stm and it should be otherwise
+ * safe to proceed with the tear-down of stm.
+ */
+ WARN_ON_ONCE(ret);
}
- spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
synchronize_srcu(&stm_source_srcu);
@@ -694,6 +738,17 @@ void stm_unregister_device(struct stm_data *stm_data)
}
EXPORT_SYMBOL_GPL(stm_unregister_device);
+/*
+ * stm::link_list access serialization uses a spinlock and a mutex; holding
+ * either of them guarantees that the list is stable; modification requires
+ * holding both of them.
+ *
+ * Lock ordering is as follows:
+ * stm::link_mutex
+ * stm::link_lock
+ * src::link_lock
+ */
+
/**
* stm_source_link_add() - connect an stm_source device to an stm device
* @src: stm_source device
@@ -710,6 +765,7 @@ static int stm_source_link_add(struct stm_source_device *src,
char *id;
int err;
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -719,6 +775,7 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
id = kstrdup(src->data->name, GFP_KERNEL);
if (id) {
@@ -753,9 +810,9 @@ static int stm_source_link_add(struct stm_source_device *src,
fail_free_output:
stm_output_free(stm, &src->output);
- stm_put_device(stm);
fail_detach:
+ mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
@@ -764,6 +821,7 @@ fail_detach:
spin_unlock(&src->link_lock);
spin_unlock(&stm->link_lock);
+ mutex_unlock(&stm->link_mutex);
return err;
}
@@ -776,28 +834,45 @@ fail_detach:
* If @stm is @src::link, disconnect them from one another and put the
* reference on the @stm device.
*
- * Caller must hold stm::link_lock.
+ * Caller must hold stm::link_mutex.
*/
-static void __stm_source_link_drop(struct stm_source_device *src,
- struct stm_device *stm)
+static int __stm_source_link_drop(struct stm_source_device *src,
+ struct stm_device *stm)
{
struct stm_device *link;
+ int ret = 0;
+
+ lockdep_assert_held(&stm->link_mutex);
+ /* for stm::link_list modification, we hold both mutex and spinlock */
+ spin_lock(&stm->link_lock);
spin_lock(&src->link_lock);
link = srcu_dereference_check(src->link, &stm_source_srcu, 1);
- if (WARN_ON_ONCE(link != stm)) {
- spin_unlock(&src->link_lock);
- return;
+
+ /*
+ * The linked device may have changed since we last looked, because
+ * we weren't holding the src::link_lock back then; if this is the
+ * case, tell the caller to retry.
+ */
+ if (link != stm) {
+ ret = -EAGAIN;
+ goto unlock;
}
stm_output_free(link, &src->output);
- /* caller must hold stm::link_lock */
list_del_init(&src->link_entry);
/* matches stm_find_device() from stm_source_link_store() */
stm_put_device(link);
rcu_assign_pointer(src->link, NULL);
+unlock:
spin_unlock(&src->link_lock);
+ spin_unlock(&stm->link_lock);
+
+ if (!ret && src->data->unlink)
+ src->data->unlink(src->data);
+
+ return ret;
}
/**
@@ -813,21 +888,29 @@ static void __stm_source_link_drop(struct stm_source_device *src,
static void stm_source_link_drop(struct stm_source_device *src)
{
struct stm_device *stm;
- int idx;
+ int idx, ret;
+retry:
idx = srcu_read_lock(&stm_source_srcu);
+ /*
+ * The stm device will be valid for the duration of this
+ * read section, but the link may change before we grab
+ * the src::link_lock in __stm_source_link_drop().
+ */
stm = srcu_dereference(src->link, &stm_source_srcu);
+ ret = 0;
if (stm) {
- if (src->data->unlink)
- src->data->unlink(src->data);
-
- spin_lock(&stm->link_lock);
- __stm_source_link_drop(src, stm);
- spin_unlock(&stm->link_lock);
+ mutex_lock(&stm->link_mutex);
+ ret = __stm_source_link_drop(src, stm);
+ mutex_unlock(&stm->link_mutex);
}
srcu_read_unlock(&stm_source_srcu, idx);
+
+ /* if it did change, retry */
+ if (ret == -EAGAIN)
+ goto retry;
}
static ssize_t stm_source_link_show(struct device *dev,
@@ -862,8 +945,10 @@ static ssize_t stm_source_link_store(struct device *dev,
return -EINVAL;
err = stm_source_link_add(src, link);
- if (err)
+ if (err) {
+ /* matches the stm_find_device() above */
stm_put_device(link);
+ }
return err ? : count;
}
@@ -925,6 +1010,7 @@ int stm_source_register_device(struct device *parent,
if (err)
goto err;
+ stm_output_init(&src->output);
spin_lock_init(&src->link_lock);
INIT_LIST_HEAD(&src->link_entry);
src->data = data;
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 11ab6d01adf63d..1db189657b2b01 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -272,13 +272,17 @@ void stp_policy_unbind(struct stp_policy *policy)
{
struct stm_device *stm = policy->stm;
+ /*
+ * stp_policy_release() will not call here if the policy is already
+ * unbound; other users should not either, as no link exists between
+ * this policy and anything else in that case
+ */
if (WARN_ON_ONCE(!policy->stm))
return;
- mutex_lock(&stm->policy_mutex);
- stm->policy = NULL;
- mutex_unlock(&stm->policy_mutex);
+ lockdep_assert_held(&stm->policy_mutex);
+ stm->policy = NULL;
policy->stm = NULL;
stm_put_device(stm);
@@ -287,8 +291,16 @@ void stp_policy_unbind(struct stp_policy *policy)
static void stp_policy_release(struct config_item *item)
{
struct stp_policy *policy = to_stp_policy(item);
+ struct stm_device *stm = policy->stm;
+ /* a policy *can* be unbound and still exist in configfs tree */
+ if (!stm)
+ return;
+
+ mutex_lock(&stm->policy_mutex);
stp_policy_unbind(policy);
+ mutex_unlock(&stm->policy_mutex);
+
kfree(policy);
}
@@ -320,10 +332,11 @@ stp_policies_make(struct config_group *group, const char *name)
/*
* node must look like <device_name>.<policy_name>, where
- * <device_name> is the name of an existing stm device and
- * <policy_name> is an arbitrary string
+ * <device_name> is the name of an existing stm device; may
+ * contain dots;
+ * <policy_name> is an arbitrary string; may not contain dots
*/
- p = strchr(devname, '.');
+ p = strrchr(devname, '.');
if (!p) {
kfree(devname);
return ERR_PTR(-EINVAL);
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 95ece0292c991c..4e8c6926260f3e 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -45,6 +45,7 @@ struct stm_device {
int major;
unsigned int sw_nmasters;
struct stm_data *data;
+ struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
/* master allocation */
@@ -56,6 +57,7 @@ struct stm_device {
container_of((_d), struct stm_device, dev)
struct stm_output {
+ spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index c335cc7852f94e..1c68b05c86490a 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -74,8 +74,7 @@
MST_STATUS_ND)
#define MST_STATUS_ERR (MST_STATUS_NAK | \
MST_STATUS_AL | \
- MST_STATUS_IP | \
- MST_STATUS_TSS)
+ MST_STATUS_IP)
#define MST_TX_BYTES_XFRD 0x50
#define MST_RX_BYTES_XFRD 0x54
#define SCL_HIGH_PERIOD 0x80
@@ -241,7 +240,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
*/
if (c <= 0 || c > I2C_SMBUS_BLOCK_MAX) {
idev->msg_err = -EPROTO;
- i2c_int_disable(idev, ~0);
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
complete(&idev->msg_complete);
break;
}
@@ -297,17 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
i2c_int_disable(idev, MST_STATUS_TFL);
}
- if (status & MST_STATUS_SCC) {
- /* Stop completed */
- i2c_int_disable(idev, ~0);
- complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SNS) {
- /* Transfer done */
- i2c_int_disable(idev, ~0);
- if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
- axxia_i2c_empty_rx_fifo(idev);
- complete(&idev->msg_complete);
- } else if (unlikely(status & MST_STATUS_ERR)) {
+ if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */
i2c_int_disable(idev, ~0);
if (status & MST_STATUS_AL)
@@ -324,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
readl(idev->base + MST_TX_BYTES_XFRD),
readl(idev->base + MST_TX_XFER));
complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SCC) {
+ /* Stop completed */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SNS) {
+ /* Transfer done */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+ axxia_i2c_empty_rx_fifo(idev);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_TSS) {
+ /* Transfer timeout */
+ idev->msg_err = -ETIMEDOUT;
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
}
out:
@@ -339,10 +343,10 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
u32 rx_xfer, tx_xfer;
u32 addr_1, addr_2;
unsigned long time_left;
+ unsigned int wt_value;
idev->msg = msg;
idev->msg_xfrd = 0;
- idev->msg_err = 0;
reinit_completion(&idev->msg_complete);
if (i2c_m_ten(msg)) {
@@ -382,9 +386,18 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
else if (axxia_i2c_fill_tx_fifo(idev) != 0)
int_mask |= MST_STATUS_TFL;
+ wt_value = WT_VALUE(readl(idev->base + WAIT_TIMER_CONTROL));
+ /* Disable wait timer temporarly */
+ writel(wt_value, idev->base + WAIT_TIMER_CONTROL);
+ /* Check if timeout error happened */
+ if (idev->msg_err)
+ goto out;
+
/* Start manual mode */
writel(CMD_MANUAL, idev->base + MST_COMMAND);
+ writel(WT_EN | wt_value, idev->base + WAIT_TIMER_CONTROL);
+
i2c_int_enable(idev, int_mask);
time_left = wait_for_completion_timeout(&idev->msg_complete,
@@ -395,13 +408,15 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
if (readl(idev->base + MST_COMMAND) & CMD_BUSY)
dev_warn(idev->dev, "busy after xfer\n");
- if (time_left == 0)
+ if (time_left == 0) {
idev->msg_err = -ETIMEDOUT;
-
- if (idev->msg_err == -ETIMEDOUT)
i2c_recover_bus(&idev->adapter);
+ axxia_i2c_init(idev);
+ }
- if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO)
+out:
+ if (unlikely(idev->msg_err) && idev->msg_err != -ENXIO &&
+ idev->msg_err != -ETIMEDOUT)
axxia_i2c_init(idev);
return idev->msg_err;
@@ -409,7 +424,7 @@ static int axxia_i2c_xfer_msg(struct axxia_i2c_dev *idev, struct i2c_msg *msg)
static int axxia_i2c_stop(struct axxia_i2c_dev *idev)
{
- u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC;
+ u32 int_mask = MST_STATUS_ERR | MST_STATUS_SCC | MST_STATUS_TSS;
unsigned long time_left;
reinit_completion(&idev->msg_complete);
@@ -436,6 +451,9 @@ axxia_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
int i;
int ret = 0;
+ idev->msg_err = 0;
+ i2c_int_enable(idev, MST_STATUS_TSS);
+
for (i = 0; ret == 0 && i < num; ++i)
ret = axxia_i2c_xfer_msg(idev, &msgs[i]);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 84deed6571bdf4..6d32e6da3110d1 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -378,8 +378,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ else
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -436,8 +438,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if (id->send_count > CDNS_I2C_FIFO_DEPTH)
+ if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
ctrl_reg |= CDNS_I2C_CR_HOLD;
+ else
+ ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
+
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-cros-ec-tunnel.c b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
index dab57bb1240c33..390dc2b2238952 100644
--- a/drivers/i2c/busses/i2c-cros-ec-tunnel.c
+++ b/drivers/i2c/busses/i2c-cros-ec-tunnel.c
@@ -16,6 +16,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#define DRV_NAME "cros-ec-i2c-tunnel"
+
#define I2C_MAX_RETRIES 3
/**
@@ -356,7 +358,7 @@ static int ec_i2c_probe(struct platform_device *pdev)
bus->dev = dev;
bus->adap.owner = THIS_MODULE;
- strlcpy(bus->adap.name, "cros-ec-i2c-tunnel", sizeof(bus->adap.name));
+ strlcpy(bus->adap.name, DRV_NAME, sizeof(bus->adap.name));
bus->adap.algo = &ec_i2c_algorithm;
bus->adap.algo_data = bus;
bus->adap.dev.parent = &pdev->dev;
@@ -387,7 +389,7 @@ static int ec_i2c_remove(struct platform_device *dev)
#ifdef CONFIG_OF
static const struct of_device_id cros_ec_i2c_of_match[] = {
- { .compatible = "google,cros-ec-i2c-tunnel" },
+ { .compatible = "google," DRV_NAME },
{},
};
MODULE_DEVICE_TABLE(of, cros_ec_i2c_of_match);
@@ -397,7 +399,7 @@ static struct platform_driver ec_i2c_tunnel_driver = {
.probe = ec_i2c_probe,
.remove = ec_i2c_remove,
.driver = {
- .name = "cros-ec-i2c-tunnel",
+ .name = DRV_NAME,
.of_match_table = of_match_ptr(cros_ec_i2c_of_match),
},
};
@@ -416,4 +418,4 @@ module_exit(ec_i2c_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EC I2C tunnel driver");
-MODULE_ALIAS("platform:cros-ec-i2c-tunnel");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a8bdcb5292f5b3..57f6eb1427b4ee 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -234,12 +234,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
+ *
+ * Note:
+ * CLKH is not allowed to be 0, in this case I2C clock is not generated
+ * at all
*/
- if (clk >= clkl + d) {
+ if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
- clkh = 0;
+ clkh = 1;
clkl = clk - (d << 1);
}
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 7d7ae97476e2c4..a45ebe7492c9ed 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -11,7 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
-#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/acpi.h>
@@ -156,7 +155,3 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index dbf1ca23ca0299..51d3c971368a2e 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -42,6 +42,8 @@
#define DW_IC_SS_SCL_LCNT 0x18
#define DW_IC_FS_SCL_HCNT 0x1c
#define DW_IC_FS_SCL_LCNT 0x20
+#define DW_IC_HS_SCL_HCNT 0x24
+#define DW_IC_HS_SCL_LCNT 0x28
#define DW_IC_INTR_STAT 0x2c
#define DW_IC_INTR_MASK 0x30
#define DW_IC_RAW_INTR_STAT 0x34
@@ -89,12 +91,20 @@
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
-#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_ACTIVITY 0x1
+#define DW_IC_STATUS_TFE BIT(2)
+#define DW_IC_STATUS_MST_ACTIVITY BIT(5)
+
+#define DW_IC_SDA_HOLD_RX_SHIFT 16
+#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT)
#define DW_IC_ERR_TX_ABRT 0x1
#define DW_IC_TAR_10BITADDR_MASTER BIT(12)
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3))
+#define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2)
+
/*
* status codes
*/
@@ -169,13 +179,13 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
u32 value;
- if (dev->accessor_flags & ACCESS_16BIT)
+ if (dev->flags & ACCESS_16BIT)
value = readw_relaxed(dev->base + offset) |
(readw_relaxed(dev->base + offset + 2) << 16);
else
value = readl_relaxed(dev->base + offset);
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
return swab32(value);
else
return value;
@@ -183,10 +193,10 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
b = swab32(b);
- if (dev->accessor_flags & ACCESS_16BIT) {
+ if (dev->flags & ACCESS_16BIT) {
writew_relaxed((u16)b, dev->base + offset);
writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
} else {
@@ -252,10 +262,15 @@ static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
{
+ dw_writel(dev, enable, DW_IC_ENABLE);
+}
+
+static void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable)
+{
int timeout = 100;
do {
- dw_writel(dev, enable, DW_IC_ENABLE);
+ __i2c_dw_enable(dev, enable);
if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable)
return;
@@ -271,6 +286,39 @@ static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
enable ? "en" : "dis");
}
+static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
+{
+ /*
+ * Clock is not necessary if we got LCNT/HCNT values directly from
+ * the platform code.
+ */
+ if (WARN_ON_ONCE(!dev->get_clk_rate_khz))
+ return 0;
+ return dev->get_clk_rate_khz(dev);
+}
+
+static int i2c_dw_acquire_lock(struct dw_i2c_dev *dev)
+{
+ int ret;
+
+ if (!dev->acquire_lock)
+ return 0;
+
+ ret = dev->acquire_lock(dev);
+ if (!ret)
+ return 0;
+
+ dev_err(dev->dev, "couldn't acquire bus ownership\n");
+
+ return ret;
+}
+
+static void i2c_dw_release_lock(struct dw_i2c_dev *dev)
+{
+ if (dev->release_lock)
+ dev->release_lock(dev);
+}
+
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
@@ -281,39 +329,33 @@ static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable)
*/
int i2c_dw_init(struct dw_i2c_dev *dev)
{
- u32 input_clock_khz;
u32 hcnt, lcnt;
- u32 reg;
+ u32 reg, comp_param1;
u32 sda_falling_time, scl_falling_time;
int ret;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- return ret;
- }
- }
-
- input_clock_khz = dev->get_clk_rate_khz(dev);
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
/* Configure register endianess access */
- dev->accessor_flags |= ACCESS_SWAP;
+ dev->flags |= ACCESS_SWAP;
} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
/* Configure register access mode 16bit */
- dev->accessor_flags |= ACCESS_16BIT;
+ dev->flags |= ACCESS_16BIT;
} else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
return -ENODEV;
}
+ comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1);
+
/* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* set standard and fast speed deviders for high/low periods */
@@ -325,12 +367,12 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
hcnt = dev->ss_hcnt;
lcnt = dev->ss_lcnt;
} else {
- hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+ hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
4000, /* tHD;STA = tHIGH = 4.0 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+ lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
4700, /* tLOW = 4.7 us */
scl_falling_time,
0); /* No offset */
@@ -339,17 +381,20 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
- /* Set SCL timing parameters for fast-mode */
- if (dev->fs_hcnt && dev->fs_lcnt) {
+ /* Set SCL timing parameters for fast-mode or fast-mode plus */
+ if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) {
+ hcnt = dev->fp_hcnt;
+ lcnt = dev->fp_lcnt;
+ } else if (dev->fs_hcnt && dev->fs_lcnt) {
hcnt = dev->fs_hcnt;
lcnt = dev->fs_lcnt;
} else {
- hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+ hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev),
600, /* tHD;STA = tHIGH = 0.6 us */
sda_falling_time,
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
- lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+ lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev),
1300, /* tLOW = 1.3 us */
scl_falling_time,
0); /* No offset */
@@ -358,14 +403,43 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
+ if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
+ DW_IC_CON_SPEED_HIGH) {
+ if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
+ != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
+ dev_err(dev->dev, "High Speed not supported!\n");
+ dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
+ } else if (dev->hs_hcnt && dev->hs_lcnt) {
+ hcnt = dev->hs_hcnt;
+ lcnt = dev->hs_lcnt;
+ dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT);
+ dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT);
+ dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n",
+ hcnt, lcnt);
+ }
+ }
+
/* Configure SDA Hold Time if required */
- if (dev->sda_hold_time) {
- reg = dw_readl(dev, DW_IC_COMP_VERSION);
- if (reg >= DW_IC_SDA_HOLD_MIN_VERS)
- dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
- else
- dev_warn(dev->dev,
- "Hardware too old to adjust SDA hold time.");
+ reg = dw_readl(dev, DW_IC_COMP_VERSION);
+ if (reg >= DW_IC_SDA_HOLD_MIN_VERS) {
+ if (!dev->sda_hold_time) {
+ /* Keep previous hold time setting if no one set it */
+ dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD);
+ }
+ /*
+ * Workaround for avoiding TX arbitration lost in case I2C
+ * slave pulls SDA down "too quickly" after falling egde of
+ * SCL by enabling non-zero SDA RX hold. Specification says it
+ * extends incoming SDA low to high transition while SCL is
+ * high but it apprears to help also above issue.
+ */
+ if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK))
+ dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT;
+ dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD);
+ } else {
+ dev_warn(dev->dev,
+ "Hardware too old to adjust SDA hold time.\n");
}
/* Configure Tx/Rx FIFO threshold levels */
@@ -375,8 +449,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
/* configure the i2c master */
dw_writel(dev, dev->master_cfg , DW_IC_CON);
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -403,27 +477,45 @@ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
- u32 ic_con, ic_tar = 0;
+ u32 ic_tar = 0;
+ bool enabled;
- /* Disable the adapter */
- __i2c_dw_enable(dev, false);
+ enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1;
+
+ if (enabled) {
+ u32 ic_status;
+
+ /*
+ * Only disable adapter if ic_tar and ic_con can't be
+ * dynamically updated
+ */
+ ic_status = dw_readl(dev, DW_IC_STATUS);
+ if (!dev->dynamic_tar_update_enabled ||
+ (ic_status & DW_IC_STATUS_MST_ACTIVITY) ||
+ !(ic_status & DW_IC_STATUS_TFE)) {
+ __i2c_dw_enable_and_wait(dev, false);
+ enabled = false;
+ }
+ }
/* if the slave address is ten bit address, enable 10BITADDR */
- ic_con = dw_readl(dev, DW_IC_CON);
- if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
- ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ if (dev->dynamic_tar_update_enabled) {
/*
* If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
- * mode has to be enabled via bit 12 of IC_TAR register.
- * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
- * detected from registers.
+ * mode has to be enabled via bit 12 of IC_TAR register,
+ * otherwise bit 4 of IC_CON is used.
*/
- ic_tar = DW_IC_TAR_10BITADDR_MASTER;
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_tar = DW_IC_TAR_10BITADDR_MASTER;
} else {
- ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
- }
+ u32 ic_con = dw_readl(dev, DW_IC_CON);
- dw_writel(dev, ic_con, DW_IC_CON);
+ if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
+ ic_con |= DW_IC_CON_10BITADDR_MASTER;
+ else
+ ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
+ dw_writel(dev, ic_con, DW_IC_CON);
+ }
/*
* Set the slave (target) address and enable 10-bit addressing mode
@@ -434,8 +526,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
/* enforce disabled interrupts (due to HW issues) */
i2c_dw_disable_int(dev);
- /* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ if (!enabled)
+ /* Enable the adapter */
+ __i2c_dw_enable_and_wait(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -462,6 +555,8 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
intr_mask = DW_IC_INTR_DEFAULT_MASK;
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
+ u32 flags = msgs[dev->msg_write_idx].flags;
+
/*
* if target address has changed, we need to
* reprogram the target address in the i2c
@@ -507,8 +602,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
* detected from the registers so we set it always
* when writing/reading the last byte.
*/
+
+ /*
+ * i2c-core.c always sets the buffer length of
+ * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
+ * be adjusted when receiving the first byte.
+ * Thus we can't stop the transaction here.
+ */
if (dev->msg_write_idx == dev->msgs_num - 1 &&
- buf_len == 1)
+ buf_len == 1 && !(flags & I2C_M_RECV_LEN))
cmd |= BIT(9);
if (need_restart) {
@@ -533,7 +635,12 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dev->tx_buf = buf;
dev->tx_buf_len = buf_len;
- if (buf_len > 0) {
+ /*
+ * Because we don't know the buffer length in the
+ * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
+ * the transaction here.
+ */
+ if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
@@ -554,6 +661,24 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
dw_writel(dev, intr_mask, DW_IC_INTR_MASK);
}
+static u8
+i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
+{
+ struct i2c_msg *msgs = dev->msgs;
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ /*
+ * Adjust the buffer length and mask the flag
+ * after receiving the first byte.
+ */
+ len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
+ dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
+ msgs[dev->msg_read_idx].len = len;
+ msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
+
+ return len;
+}
+
static void
i2c_dw_read(struct dw_i2c_dev *dev)
{
@@ -578,7 +703,15 @@ i2c_dw_read(struct dw_i2c_dev *dev)
rx_valid = dw_readl(dev, DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
- *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ *buf = dw_readl(dev, DW_IC_DATA_CMD);
+ /* Ensure length byte is a valid value */
+ if (flags & I2C_M_RECV_LEN &&
+ *buf <= I2C_SMBUS_BLOCK_MAX && *buf > 0) {
+ len = i2c_dw_recv_len(dev, *buf);
+ }
+ buf++;
dev->rx_outstanding--;
}
@@ -616,7 +749,8 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
}
/*
- * Prepare controller for a transaction and call i2c_dw_xfer_msg
+ * Prepare controller for a transaction and start transfer by calling
+ * i2c_dw_xfer_init()
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -626,7 +760,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
- mutex_lock(&dev->lock);
pm_runtime_get_sync(dev->dev);
reinit_completion(&dev->cmd_complete);
@@ -640,13 +773,9 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
dev->abort_source = 0;
dev->rx_outstanding = 0;
- if (dev->acquire_lock) {
- ret = dev->acquire_lock(dev);
- if (ret) {
- dev_err(dev->dev, "couldn't acquire bus ownership\n");
- goto done_nolock;
- }
- }
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ goto done_nolock;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
@@ -664,15 +793,6 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
goto done;
}
- /*
- * We must disable the adapter before unlocking the &dev->lock mutex
- * below. Otherwise the hardware might continue generating interrupts
- * which in turn causes a race condition with the following transfer.
- * Needs some more investigation if the additional interrupts are
- * a hardware bug or this driver doesn't handle them correctly yet.
- */
- __i2c_dw_enable(dev, false);
-
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
@@ -697,13 +817,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
ret = -EIO;
done:
- if (dev->release_lock)
- dev->release_lock(dev);
+ i2c_dw_release_lock(dev);
done_nolock:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
- mutex_unlock(&dev->lock);
return ret;
}
@@ -816,9 +934,19 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET))
+ || dev->msg_err) {
+ /*
+ * We must disable interruts before returning and signaling
+ * the end of the current transfer. Otherwise the hardware
+ * might continue generating interrupts for non-existent
+ * transfers.
+ */
+ i2c_dw_disable_int(dev);
+ dw_readl(dev, DW_IC_CLR_INTR);
+
complete(&dev->cmd_complete);
- else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ } else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
@@ -831,7 +959,7 @@ tx_aborted:
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
/* Disable controller */
- __i2c_dw_enable(dev, false);
+ __i2c_dw_enable_and_wait(dev, false);
/* Disable all interupts */
dw_writel(dev, 0, DW_IC_INTR_MASK);
@@ -855,22 +983,44 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
int r;
+ u32 reg;
init_completion(&dev->cmd_complete);
- mutex_init(&dev->lock);
r = i2c_dw_init(dev);
if (r)
return r;
+ r = i2c_dw_acquire_lock(dev);
+ if (r)
+ return r;
+
+ /*
+ * Test if dynamic TAR update is enabled in this controller by writing
+ * to IC_10BITADDR_MASTER field in IC_CON: when it is enabled this
+ * field is read-only so it should not succeed
+ */
+ reg = dw_readl(dev, DW_IC_CON);
+ dw_writel(dev, reg ^ DW_IC_CON_10BITADDR_MASTER, DW_IC_CON);
+
+ if ((dw_readl(dev, DW_IC_CON) & DW_IC_CON_10BITADDR_MASTER) ==
+ (reg & DW_IC_CON_10BITADDR_MASTER)) {
+ dev->dynamic_tar_update_enabled = true;
+ dev_dbg(dev->dev, "Dynamic TAR update enabled");
+ }
+
+ i2c_dw_release_lock(dev);
+
snprintf(adap->name, sizeof(adap->name),
"Synopsys DesignWare I2C adapter");
+ adap->retries = 3;
adap->algo = &i2c_dw_algo;
adap->dev.parent = dev->dev;
i2c_set_adapdata(adap, dev);
i2c_dw_disable_int(dev);
- r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
+ r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
+ IRQF_SHARED | IRQF_COND_SUSPEND,
dev_name(dev->dev), dev);
if (r) {
dev_err(dev->dev, "failure requesting irq %i: %d\n",
@@ -878,9 +1028,17 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
return r;
}
+ /*
+ * Increment PM usage count during adapter registration in order to
+ * avoid possible spurious runtime suspend when adapter device is
+ * registered to the device core and immediate resume in case bus has
+ * registered I2C slaves that do I2C transfers in their probe.
+ */
+ pm_runtime_get_noresume(dev->dev);
r = i2c_add_numbered_adapter(adap);
if (r)
dev_err(dev->dev, "failure adding adapter: %d\n", r);
+ pm_runtime_put_noidle(dev->dev);
return r;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 4e287b04a4b34d..c642b723719e35 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -22,10 +22,20 @@
*
*/
+#include <linux/i2c.h>
+
+#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
+ I2C_FUNC_SMBUS_BYTE | \
+ I2C_FUNC_SMBUS_BYTE_DATA | \
+ I2C_FUNC_SMBUS_WORD_DATA | \
+ I2C_FUNC_SMBUS_BLOCK_DATA | \
+ I2C_FUNC_SMBUS_I2C_BLOCK)
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
+#define DW_IC_CON_SPEED_HIGH 0x6
+#define DW_IC_CON_SPEED_MASK 0x6
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
#define DW_IC_CON_SLAVE_DISABLE 0x40
@@ -36,7 +46,6 @@
* @dev: driver model device node
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
- * @lock: protect this struct and IO registers
* @clk: input reference clock
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transfered
@@ -57,10 +66,15 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
+ * @clk_freq: bus clock frequency
* @ss_hcnt: standard speed HCNT value
* @ss_lcnt: standard speed LCNT value
* @fs_hcnt: fast speed HCNT value
* @fs_lcnt: fast speed LCNT value
+ * @fp_hcnt: fast plus HCNT value
+ * @fp_lcnt: fast plus LCNT value
+ * @hs_hcnt: high speed HCNT value
+ * @hs_lcnt: high speed LCNT value
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @pm_runtime_disabled: true if pm runtime is disabled
@@ -73,7 +87,6 @@ struct dw_i2c_dev {
struct device *dev;
void __iomem *base;
struct completion cmd_complete;
- struct mutex lock;
struct clk *clk;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
@@ -90,13 +103,14 @@ struct dw_i2c_dev {
unsigned int status;
u32 abort_source;
int irq;
- u32 accessor_flags;
+ u32 flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
int rx_outstanding;
+ u32 clk_freq;
u32 sda_hold_time;
u32 sda_falling_time;
u32 scl_falling_time;
@@ -104,11 +118,16 @@ struct dw_i2c_dev {
u16 ss_lcnt;
u16 fs_hcnt;
u16 fs_lcnt;
+ u16 fp_hcnt;
+ u16 fp_lcnt;
+ u16 hs_hcnt;
+ u16 hs_lcnt;
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
bool suspended;
bool skip_resume;
+ bool dynamic_tar_update_enabled;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 1543d35d228dfa..bb517fbeb623b8 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -6,7 +6,7 @@
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011, 2015 Intel Corporation.
+ * Copyright (C) 2011, 2015, 2016 Intel Corporation.
*
* ----------------------------------------------------------------------------
*
@@ -23,31 +23,26 @@
*
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
+#include <linux/acpi.h>
#include <linux/delay.h>
-#include <linux/i2c.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <linux/acpi.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
#include "i2c-designware-core.h"
#define DRIVER_NAME "i2c-designware-pci"
enum dw_pci_ctl_id_t {
- medfield_0,
- medfield_1,
- medfield_2,
- medfield_3,
- medfield_4,
- medfield_5,
-
+ medfield,
baytrail,
haswell,
};
@@ -68,17 +63,13 @@ struct dw_pci_controller {
u32 clk_khz;
u32 functionality;
struct dw_scl_sda_cfg *scl_sda_cfg;
+ int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c);
};
#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \
DW_IC_CON_SLAVE_DISABLE | \
DW_IC_CON_RESTART_EN)
-#define DW_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
- I2C_FUNC_SMBUS_BYTE | \
- I2C_FUNC_SMBUS_BYTE_DATA | \
- I2C_FUNC_SMBUS_WORD_DATA | \
- I2C_FUNC_SMBUS_I2C_BLOCK)
/* BayTrail HCNT/LCNT/SDA hold time */
static struct dw_scl_sda_cfg byt_config = {
@@ -98,48 +89,33 @@ static struct dw_scl_sda_cfg hsw_config = {
.sda_hold = 0x9,
};
+static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c)
+{
+ switch (pdev->device) {
+ case 0x0817:
+ c->bus_cfg &= ~DW_IC_CON_SPEED_MASK;
+ c->bus_cfg |= DW_IC_CON_SPEED_STD;
+ case 0x0818:
+ case 0x0819:
+ c->bus_num = pdev->device - 0x817 + 3;
+ return 0;
+ case 0x082C:
+ case 0x082D:
+ case 0x082E:
+ c->bus_num = pdev->device - 0x82C + 0;
+ return 0;
+ }
+ return -ENODEV;
+}
+
static struct dw_pci_controller dw_pci_controllers[] = {
- [medfield_0] = {
- .bus_num = 0,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_1] = {
- .bus_num = 1,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_2] = {
- .bus_num = 2,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_3] = {
- .bus_num = 3,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_4] = {
- .bus_num = 4,
- .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
- .tx_fifo_depth = 32,
- .rx_fifo_depth = 32,
- .clk_khz = 25000,
- },
- [medfield_5] = {
- .bus_num = 5,
+ [medfield] = {
+ .bus_num = -1,
.bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
.tx_fifo_depth = 32,
.rx_fifo_depth = 32,
.clk_khz = 25000,
+ .setup = mfld_setup,
},
[baytrail] = {
.bus_num = -1,
@@ -162,7 +138,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
#ifdef CONFIG_PM
static int i2c_dw_pci_suspend(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
i2c_dw_disable(pci_get_drvdata(pdev));
return 0;
@@ -170,7 +146,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
static int i2c_dw_pci_resume(struct device *dev)
{
- struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct pci_dev *pdev = to_pci_dev(dev);
return i2c_dw_init(pci_get_drvdata(pdev));
}
@@ -190,7 +166,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
int r;
- struct dw_pci_controller *controller;
+ struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
@@ -224,8 +200,15 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
dev->irq = pdev->irq;
+
+ if (controller->setup) {
+ r = controller->setup(pdev, controller);
+ if (r)
+ return r;
+ }
+
dev->functionality = controller->functionality |
- DW_DEFAULT_FUNCTIONALITY;
+ DW_IC_DEFAULT_FUNCTIONALITY;
dev->master_cfg = controller->bus_cfg;
if (controller->scl_sda_cfg) {
@@ -276,12 +259,12 @@ MODULE_ALIAS("i2c_designware-pci");
static const struct pci_device_id i2_designware_pci_ids[] = {
/* Medfield */
- { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
- { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
- { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
- { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
- { PCI_VDEVICE(INTEL, 0x082D), medfield_1 },
- { PCI_VDEVICE(INTEL, 0x082E), medfield_2 },
+ { PCI_VDEVICE(INTEL, 0x0817), medfield },
+ { PCI_VDEVICE(INTEL, 0x0818), medfield },
+ { PCI_VDEVICE(INTEL, 0x0819), medfield },
+ { PCI_VDEVICE(INTEL, 0x082C), medfield },
+ { PCI_VDEVICE(INTEL, 0x082D), medfield },
+ { PCI_VDEVICE(INTEL, 0x082E), medfield },
/* Baytrail */
{ PCI_VDEVICE(INTEL, 0x0F41), baytrail },
{ PCI_VDEVICE(INTEL, 0x0F42), baytrail },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 5dc4e93448465b..01b49ea7a31b52 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/acpi.h>
@@ -83,8 +84,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
*hcnt = (u16)objs[0].integer.value;
*lcnt = (u16)objs[1].integer.value;
- if (sda_hold)
- *sda_hold = (u32)objs[2].integer.value;
+ *sda_hold = (u32)objs[2].integer.value;
}
kfree(buf.pointer);
@@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
const struct acpi_device_id *id;
dev->adapter.nr = -1;
@@ -100,16 +101,33 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dev->rx_fifo_depth = 32;
/*
- * Try to get SDA hold time and *CNT values from an ACPI method if
- * it exists for both supported speed modes.
+ * Try to get SDA hold time and *CNT values from an ACPI method for
+ * selected speed modes.
*/
- dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, NULL);
- dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
- &dev->sda_hold_time);
+ dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt, &ss_ht);
+ dw_i2c_acpi_params(pdev, "FPCN", &dev->fp_hcnt, &dev->fp_lcnt, &fp_ht);
+ dw_i2c_acpi_params(pdev, "HSCN", &dev->hs_hcnt, &dev->hs_lcnt, &hs_ht);
+ dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt, &fs_ht);
+
+ switch (dev->clk_freq) {
+ case 100000:
+ dev->sda_hold_time = ss_ht;
+ break;
+ case 1000000:
+ dev->sda_hold_time = fp_ht;
+ break;
+ case 3400000:
+ dev->sda_hold_time = hs_ht;
+ break;
+ case 400000:
+ default:
+ dev->sda_hold_time = fs_ht;
+ break;
+ }
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
if (id && id->driver_data)
- dev->accessor_flags |= (u32)id->driver_data;
+ dev->flags |= (u32)id->driver_data;
return 0;
}
@@ -132,14 +150,49 @@ static inline int dw_i2c_acpi_configure(struct platform_device *pdev)
}
#endif
+static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare)
+{
+ if (IS_ERR(i_dev->clk))
+ return PTR_ERR(i_dev->clk);
+
+ if (prepare)
+ return clk_prepare_enable(i_dev->clk);
+
+ clk_disable_unprepare(i_dev->clk);
+ return 0;
+}
+
+static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
+{
+ u32 param, tx_fifo_depth, rx_fifo_depth;
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec.
+ */
+ param = i2c_dw_read_comp_param(dev);
+ tx_fifo_depth = ((param >> 16) & 0xff) + 1;
+ rx_fifo_depth = ((param >> 8) & 0xff) + 1;
+ if (!dev->tx_fifo_depth) {
+ dev->tx_fifo_depth = tx_fifo_depth;
+ dev->rx_fifo_depth = rx_fifo_depth;
+ dev->adapter.nr = id;
+ } else if (tx_fifo_depth >= 2) {
+ dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
+ tx_fifo_depth);
+ dev->rx_fifo_depth = min_t(u32, dev->rx_fifo_depth,
+ rx_fifo_depth);
+ }
+}
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
+ struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem;
- struct dw_i2c_platform_data *pdata;
int irq, r;
- u32 clk_freq, ht = 0;
+ u32 acpi_speed, ht = 0;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -158,77 +211,77 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->irq = irq;
platform_set_drvdata(pdev, dev);
- /* fast mode by default because of legacy reasons */
- clk_freq = 400000;
+ if (pdata) {
+ dev->clk_freq = pdata->i2c_scl_freq;
+ } else {
+ device_property_read_u32(&pdev->dev, "i2c-sda-hold-time-ns",
+ &ht);
+ device_property_read_u32(&pdev->dev, "i2c-sda-falling-time-ns",
+ &dev->sda_falling_time);
+ device_property_read_u32(&pdev->dev, "i2c-scl-falling-time-ns",
+ &dev->scl_falling_time);
+ device_property_read_u32(&pdev->dev, "clock-frequency",
+ &dev->clk_freq);
+ }
+
+ acpi_speed = i2c_acpi_find_bus_speed(&pdev->dev);
+ /*
+ * Find bus speed from the "clock-frequency" device property, ACPI
+ * or by using fast mode if neither is set.
+ */
+ if (acpi_speed && dev->clk_freq)
+ dev->clk_freq = min(dev->clk_freq, acpi_speed);
+ else if (acpi_speed || dev->clk_freq)
+ dev->clk_freq = max(dev->clk_freq, acpi_speed);
+ else
+ dev->clk_freq = 400000;
- if (has_acpi_companion(&pdev->dev)) {
+ if (has_acpi_companion(&pdev->dev))
dw_i2c_acpi_configure(pdev);
- } else if (pdev->dev.of_node) {
- of_property_read_u32(pdev->dev.of_node,
- "i2c-sda-hold-time-ns", &ht);
-
- of_property_read_u32(pdev->dev.of_node,
- "i2c-sda-falling-time-ns",
- &dev->sda_falling_time);
- of_property_read_u32(pdev->dev.of_node,
- "i2c-scl-falling-time-ns",
- &dev->scl_falling_time);
-
- of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &clk_freq);
-
- /* Only standard mode at 100kHz and fast mode at 400kHz
- * are supported.
- */
- if (clk_freq != 100000 && clk_freq != 400000) {
- dev_err(&pdev->dev, "Only 100kHz and 400kHz supported");
- return -EINVAL;
- }
- } else {
- pdata = dev_get_platdata(&pdev->dev);
- if (pdata)
- clk_freq = pdata->i2c_scl_freq;
+
+ /*
+ * Only standard mode at 100kHz, fast mode at 400kHz,
+ * fast mode plus at 1MHz and high speed mode at 3.4MHz are supported.
+ */
+ if (dev->clk_freq != 100000 && dev->clk_freq != 400000
+ && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
+ dev_err(&pdev->dev,
+ "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
+ return -EINVAL;
}
r = i2c_dw_eval_lock_support(dev);
if (r)
return r;
- dev->functionality =
- I2C_FUNC_I2C |
- I2C_FUNC_10BIT_ADDR |
- I2C_FUNC_SMBUS_BYTE |
- I2C_FUNC_SMBUS_BYTE_DATA |
- I2C_FUNC_SMBUS_WORD_DATA |
- I2C_FUNC_SMBUS_I2C_BLOCK;
- if (clk_freq == 100000)
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_STD;
- else
- dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
- DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
-
- dev->clk = devm_clk_get(&pdev->dev, NULL);
- dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
- if (IS_ERR(dev->clk))
- return PTR_ERR(dev->clk);
- clk_prepare_enable(dev->clk);
+ dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
- if (!dev->sda_hold_time && ht) {
- u32 ic_clk = dev->get_clk_rate_khz(dev);
+ dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
+ DW_IC_CON_RESTART_EN;
- dev->sda_hold_time = div_u64((u64)ic_clk * ht + 500000,
- 1000000);
+ switch (dev->clk_freq) {
+ case 100000:
+ dev->master_cfg |= DW_IC_CON_SPEED_STD;
+ break;
+ case 3400000:
+ dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
+ break;
+ default:
+ dev->master_cfg |= DW_IC_CON_SPEED_FAST;
}
- if (!dev->tx_fifo_depth) {
- u32 param1 = i2c_dw_read_comp_param(dev);
+ dev->clk = devm_clk_get(&pdev->dev, NULL);
+ if (!i2c_dw_plat_prepare_clk(dev, true)) {
+ dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz;
- dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
- dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
- dev->adapter.nr = pdev->id;
+ if (!dev->sda_hold_time && ht)
+ dev->sda_hold_time = div_u64(
+ (u64)dev->get_clk_rate_khz(dev) * ht + 500000,
+ 1000000);
}
+ dw_i2c_set_fifo_size(dev, pdev->id);
+
adap = &dev->adapter;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
@@ -305,7 +358,7 @@ static int dw_i2c_plat_suspend(struct device *dev)
}
i2c_dw_disable(i_dev);
- clk_disable_unprepare(i_dev->clk);
+ i2c_dw_plat_prepare_clk(i_dev, false);
i_dev->suspended = true;
@@ -325,7 +378,7 @@ static int dw_i2c_plat_resume(struct device *dev)
return 0;
}
- clk_prepare_enable(i_dev->clk);
+ i2c_dw_plat_prepare_clk(i_dev, true);
if (!i_dev->pm_runtime_disabled)
i2c_dw_init(i_dev);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 85f39cc3e2765f..f78069cd8d5381 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -128,6 +128,7 @@
#define SBREG_BAR 0x10
#define SBREG_SMBCTRL 0xc6000c
+#define SBREG_SMBCTRL_DNV 0xcf000c
/* Host status bits for SMBPCISTS */
#define SMBPCISTS_INTS 0x08
@@ -1251,7 +1252,11 @@ static void i801_add_tco(struct i801_priv *priv)
spin_unlock(&p2sb_spinlock);
res = &tco_res[ICH_RES_MEM_OFF];
- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
+ else
+ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
+
res->end = res->start + 3;
res->flags = IORESOURCE_MEM;
@@ -1267,6 +1272,13 @@ static void i801_add_tco(struct i801_priv *priv)
}
#ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+ acpi_physical_address address)
+{
+ return address >= priv->smba &&
+ address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
@@ -1282,7 +1294,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
*/
mutex_lock(&priv->acpi_lock);
- if (!priv->acpi_reserved) {
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index d4d853680ae478..cf1b57a054d09e 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
+ reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -677,9 +677,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
struct imx_i2c_dma *dma = i2c_imx->dma;
struct device *dev = &i2c_imx->adapter.dev;
- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
- temp |= I2CR_DMAEN;
- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
dma->chan_using = dma->chan_rx;
dma->dma_transfer_dir = DMA_DEV_TO_MEM;
@@ -690,7 +687,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -793,6 +789,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
int i, result;
unsigned int temp;
int block_data = msgs->flags & I2C_M_RECV_LEN;
+ int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write slave address: addr=0x%x\n",
@@ -819,12 +816,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
*/
if ((msgs->len - 1) || block_data)
temp &= ~I2CR_TXAK;
+ if (use_dma)
+ temp |= I2CR_DMAEN;
imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
- if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
+ if (use_dma)
return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
/* read data */
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 1111cb966a4431..fa2b58142cded1 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -587,7 +587,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
/* unmap the data buffer */
if (dma_size != 0)
- dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction);
+ dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n");
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 599c0d7bd906d1..dfe1a53ce4ad35 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -33,7 +33,6 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
-#include <linux/spinlock.h>
/* register offsets */
#define ICSCR 0x00 /* slave ctrl */
@@ -84,6 +83,7 @@
#define RCAR_BUS_PHASE_START (MDBS | MIE | ESG)
#define RCAR_BUS_PHASE_DATA (MDBS | MIE)
+#define RCAR_BUS_MASK_DATA (~(ESG | FSB) & 0xFF)
#define RCAR_BUS_PHASE_STOP (MDBS | MIE | FSB)
#define RCAR_IRQ_SEND (MNR | MAL | MST | MAT | MDE)
@@ -94,7 +94,6 @@
#define RCAR_IRQ_ACK_RECV (~(MAT | MDR) & 0xFF)
#define ID_LAST_MSG (1 << 0)
-#define ID_IOERROR (1 << 1)
#define ID_DONE (1 << 2)
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
@@ -108,10 +107,10 @@ enum rcar_i2c_type {
struct rcar_i2c_priv {
void __iomem *io;
struct i2c_adapter adap;
- struct i2c_msg *msg;
+ struct i2c_msg *msg;
+ int msgs_left;
struct clk *clk;
- spinlock_t lock;
wait_queue_head_t wait;
int pos;
@@ -144,9 +143,10 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
{
/* reset master mode */
rcar_i2c_write(priv, ICMIER, 0);
- rcar_i2c_write(priv, ICMCR, 0);
+ rcar_i2c_write(priv, ICMCR, MDBS);
rcar_i2c_write(priv, ICMSR, 0);
- rcar_i2c_write(priv, ICMAR, 0);
+ /* start clock */
+ rcar_i2c_write(priv, ICCCR, priv->icccr);
}
static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
@@ -257,16 +257,28 @@ static void rcar_i2c_prepare_msg(struct rcar_i2c_priv *priv)
{
int read = !!rcar_i2c_is_recv(priv);
+ priv->pos = 0;
+ priv->flags = 0;
+ if (priv->msgs_left == 1)
+ rcar_i2c_flags_set(priv, ID_LAST_MSG);
+
rcar_i2c_write(priv, ICMAR, (priv->msg->addr << 1) | read);
rcar_i2c_write(priv, ICMSR, 0);
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_START);
rcar_i2c_write(priv, ICMIER, read ? RCAR_IRQ_RECV : RCAR_IRQ_SEND);
}
+static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
+{
+ priv->msg++;
+ priv->msgs_left--;
+ rcar_i2c_prepare_msg(priv);
+}
+
/*
* interrupt functions
*/
-static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
+static void rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
{
struct i2c_msg *msg = priv->msg;
@@ -276,14 +288,7 @@ static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
* Do nothing
*/
if (!(msr & MDE))
- return 0;
-
- /*
- * If address transfer phase finished,
- * goto data phase.
- */
- if (msr & MAT)
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
+ return;
if (priv->pos < msg->len) {
/*
@@ -305,29 +310,23 @@ static int rcar_i2c_irq_send(struct rcar_i2c_priv *priv, u32 msr)
* [ICRXTX] -> [SHIFT] -> [I2C bus]
*/
- if (priv->flags & ID_LAST_MSG)
+ if (priv->flags & ID_LAST_MSG) {
/*
* If current msg is the _LAST_ msg,
* prepare stop condition here.
* ID_DONE will be set on STOP irq.
*/
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
- else
- /*
- * If current msg is _NOT_ last msg,
- * it doesn't call stop phase.
- * thus, there is no STOP irq.
- * return ID_DONE here.
- */
- return ID_DONE;
+ } else {
+ rcar_i2c_next_msg(priv);
+ return;
+ }
}
rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_SEND);
-
- return 0;
}
-static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
+static void rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
{
struct i2c_msg *msg = priv->msg;
@@ -337,14 +336,10 @@ static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
* Do nothing
*/
if (!(msr & MDR))
- return 0;
+ return;
if (msr & MAT) {
- /*
- * Address transfer phase finished,
- * but, there is no data at this point.
- * Do nothing.
- */
+ /* Address transfer phase finished, but no data at this point. */
} else if (priv->pos < msg->len) {
/*
* get received data
@@ -360,12 +355,11 @@ static int rcar_i2c_irq_recv(struct rcar_i2c_priv *priv, u32 msr)
*/
if (priv->pos + 1 >= msg->len)
rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
- else
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
-
- rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
- return 0;
+ if (priv->pos == msg->len && !(priv->flags & ID_LAST_MSG))
+ rcar_i2c_next_msg(priv);
+ else
+ rcar_i2c_write(priv, ICMSR, RCAR_IRQ_ACK_RECV);
}
static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
@@ -426,22 +420,21 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
{
struct rcar_i2c_priv *priv = ptr;
- irqreturn_t result = IRQ_HANDLED;
- u32 msr;
+ u32 msr, val;
- /*-------------- spin lock -----------------*/
- spin_lock(&priv->lock);
-
- if (rcar_i2c_slave_irq(priv))
- goto exit;
+ /* Clear START or STOP as soon as we can */
+ val = rcar_i2c_read(priv, ICMCR);
+ rcar_i2c_write(priv, ICMCR, val & RCAR_BUS_MASK_DATA);
msr = rcar_i2c_read(priv, ICMSR);
/* Only handle interrupts that are currently enabled */
msr &= rcar_i2c_read(priv, ICMIER);
if (!msr) {
- result = IRQ_NONE;
- goto exit;
+ if (rcar_i2c_slave_irq(priv))
+ return IRQ_HANDLED;
+
+ return IRQ_NONE;
}
/* Arbitration lost */
@@ -452,8 +445,7 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
/* Nack */
if (msr & MNR) {
- /* go to stop phase */
- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_STOP);
+ /* HW automatically sends STOP after received NACK */
rcar_i2c_write(priv, ICMIER, RCAR_IRQ_STOP);
rcar_i2c_flags_set(priv, ID_NACK);
goto out;
@@ -461,14 +453,15 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
/* Stop */
if (msr & MST) {
+ priv->msgs_left--; /* The last message also made it */
rcar_i2c_flags_set(priv, ID_DONE);
goto out;
}
if (rcar_i2c_is_recv(priv))
- rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr));
+ rcar_i2c_irq_recv(priv, msr);
else
- rcar_i2c_flags_set(priv, rcar_i2c_irq_send(priv, msr));
+ rcar_i2c_irq_send(priv, msr);
out:
if (rcar_i2c_flags_has(priv, ID_DONE)) {
@@ -477,11 +470,7 @@ out:
wake_up(&priv->wait);
}
-exit:
- spin_unlock(&priv->lock);
- /*-------------- spin unlock -----------------*/
-
- return result;
+ return IRQ_HANDLED;
}
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
@@ -490,21 +479,12 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
{
struct rcar_i2c_priv *priv = i2c_get_adapdata(adap);
struct device *dev = rcar_i2c_priv_to_dev(priv);
- unsigned long flags;
int i, ret;
- long timeout;
+ long time_left;
pm_runtime_get_sync(dev);
- /*-------------- spin lock -----------------*/
- spin_lock_irqsave(&priv->lock, flags);
-
rcar_i2c_init(priv);
- /* start clock */
- rcar_i2c_write(priv, ICCCR, priv->icccr);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- /*-------------- spin unlock -----------------*/
ret = rcar_i2c_bus_barrier(priv);
if (ret < 0)
@@ -514,48 +494,28 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
/* This HW can't send STOP after address phase */
if (msgs[i].len == 0) {
ret = -EOPNOTSUPP;
- break;
- }
-
- /*-------------- spin lock -----------------*/
- spin_lock_irqsave(&priv->lock, flags);
-
- /* init each data */
- priv->msg = &msgs[i];
- priv->pos = 0;
- priv->flags = 0;
- if (i == num - 1)
- rcar_i2c_flags_set(priv, ID_LAST_MSG);
-
- rcar_i2c_prepare_msg(priv);
-
- spin_unlock_irqrestore(&priv->lock, flags);
- /*-------------- spin unlock -----------------*/
-
- timeout = wait_event_timeout(priv->wait,
- rcar_i2c_flags_has(priv, ID_DONE),
- adap->timeout);
- if (!timeout) {
- ret = -ETIMEDOUT;
- break;
- }
-
- if (rcar_i2c_flags_has(priv, ID_NACK)) {
- ret = -ENXIO;
- break;
- }
-
- if (rcar_i2c_flags_has(priv, ID_ARBLOST)) {
- ret = -EAGAIN;
- break;
- }
-
- if (rcar_i2c_flags_has(priv, ID_IOERROR)) {
- ret = -EIO;
- break;
+ goto out;
}
+ }
- ret = i + 1; /* The number of transfer */
+ /* init data */
+ priv->msg = msgs;
+ priv->msgs_left = num;
+
+ rcar_i2c_prepare_msg(priv);
+
+ time_left = wait_event_timeout(priv->wait,
+ rcar_i2c_flags_has(priv, ID_DONE),
+ num * adap->timeout);
+ if (!time_left) {
+ rcar_i2c_init(priv);
+ ret = -ETIMEDOUT;
+ } else if (rcar_i2c_flags_has(priv, ID_NACK)) {
+ ret = -ENXIO;
+ } else if (rcar_i2c_flags_has(priv, ID_ARBLOST)) {
+ ret = -EAGAIN;
+ } else {
+ ret = num - priv->msgs_left; /* The number of transfer */
}
out:
pm_runtime_put(dev);
@@ -650,23 +610,26 @@ static int rcar_i2c_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->io = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->io))
+ return PTR_ERR(priv->io);
+
bus_speed = 100000; /* default 100 kHz */
of_property_read_u32(dev->of_node, "clock-frequency", &bus_speed);
priv->devtype = (enum rcar_i2c_type)of_match_device(rcar_i2c_dt_ids, dev)->data;
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
if (ret < 0)
- return ret;
+ goto out_pm_put;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->io = devm_ioremap_resource(dev, res);
- if (IS_ERR(priv->io))
- return PTR_ERR(priv->io);
+ pm_runtime_put(dev);
irq = platform_get_irq(pdev, 0);
init_waitqueue_head(&priv->wait);
- spin_lock_init(&priv->lock);
adap = &priv->adap;
adap->nr = pdev->id;
@@ -682,22 +645,26 @@ static int rcar_i2c_probe(struct platform_device *pdev)
dev_name(dev), priv);
if (ret < 0) {
dev_err(dev, "cannot get irq %d\n", irq);
- return ret;
+ goto out_pm_disable;
}
- pm_runtime_enable(dev);
platform_set_drvdata(pdev, priv);
ret = i2c_add_numbered_adapter(adap);
if (ret < 0) {
dev_err(dev, "reg adap failed: %d\n", ret);
- pm_runtime_disable(dev);
- return ret;
+ goto out_pm_disable;
}
dev_info(dev, "probed\n");
return 0;
+
+ out_pm_put:
+ pm_runtime_put(dev);
+ out_pm_disable:
+ pm_runtime_disable(dev);
+ return ret;
}
static int rcar_i2c_remove(struct platform_device *pdev)
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 7aa7b9cb6203f0..d2178f701b4188 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
@@ -363,6 +364,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
const struct acpi_device_id *id;
+ int ret;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi)
@@ -384,8 +386,10 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
- if (smbus_cmi->cap_info == 0)
+ if (smbus_cmi->cap_info == 0) {
+ ret = -ENODEV;
goto err;
+ }
snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
"SMBus CMI adapter %s",
@@ -396,7 +400,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
smbus_cmi->adapter.dev.parent = &device->dev;
- if (i2c_add_adapter(&smbus_cmi->adapter)) {
+ ret = i2c_add_adapter(&smbus_cmi->adapter);
+ if (ret) {
dev_err(&device->dev, "Couldn't register adapter!\n");
goto err;
}
@@ -406,7 +411,7 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
err:
kfree(smbus_cmi);
device->driver_data = NULL;
- return -EIO;
+ return ret;
}
static int acpi_smbus_cmi_remove(struct acpi_device *device)
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index a0522fcc4ff875..1004422dbb10f2 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -696,7 +696,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
/* payload size is only 12 bit */
static struct i2c_adapter_quirks tegra_i2c_quirks = {
.max_read_len = 4096,
- .max_write_len = 4096,
+ .max_write_len = 4096 - 12,
};
static const struct tegra_i2c_hw_feature tegra20_i2c_hw = {
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index e8d03bcfe3e0a4..3f6b43fe4d5d9b 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -394,11 +394,8 @@ static int uniphier_fi2c_master_xfer(struct i2c_adapter *adap,
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index e3c3861c332510..ad5eb8bacc6d16 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -247,11 +247,8 @@ static int uniphier_i2c_master_xfer(struct i2c_adapter *adap,
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 0b20449e48cfe8..da9acec1a0295d 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -533,6 +533,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
{
u8 rx_watermark;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
+ unsigned long flags;
/* Clear and enable Rx full interrupt. */
xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
@@ -548,6 +549,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
rx_watermark = IIC_RX_FIFO_DEPTH;
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
+ local_irq_save(flags);
if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
@@ -558,6 +560,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+ local_irq_restore(flags);
+
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 92c4a0d3a40db0..753c9659ccb341 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -85,7 +85,7 @@ void i2c_transfer_trace_unreg(void)
}
#if defined(CONFIG_ACPI)
-struct acpi_i2c_handler_data {
+struct i2c_acpi_handler_data {
struct acpi_connection_info info;
struct i2c_adapter *adapter;
};
@@ -100,18 +100,20 @@ struct gsb_buffer {
};
} __packed;
-struct acpi_i2c_lookup {
+struct i2c_acpi_lookup {
struct i2c_board_info *info;
acpi_handle adapter_handle;
acpi_handle device_handle;
+ acpi_handle search_handle;
+ u32 speed;
+ u32 min_speed;
};
-static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
+static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data)
{
- struct acpi_i2c_lookup *lookup = data;
+ struct i2c_acpi_lookup *lookup = data;
struct i2c_board_info *info = lookup->info;
struct acpi_resource_i2c_serialbus *sb;
- acpi_handle adapter_handle;
acpi_status status;
if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
@@ -121,106 +123,145 @@ static int acpi_i2c_find_address(struct acpi_resource *ares, void *data)
if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C)
return 1;
- /*
- * Extract the ResourceSource and make sure that the handle matches
- * with the I2C adapter handle.
- */
status = acpi_get_handle(lookup->device_handle,
sb->resource_source.string_ptr,
- &adapter_handle);
- if (ACPI_SUCCESS(status) && adapter_handle == lookup->adapter_handle) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
+ &lookup->adapter_handle);
+ if (!ACPI_SUCCESS(status))
+ return 1;
+
+ info->addr = sb->slave_address;
+ lookup->speed = sb->connection_speed;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
return 1;
}
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
+static int i2c_acpi_do_lookup(struct acpi_device *adev,
+ struct i2c_acpi_lookup *lookup)
{
- struct i2c_adapter *adapter = data;
+ struct i2c_board_info *info = lookup->info;
struct list_head resource_list;
- struct acpi_i2c_lookup lookup;
- struct resource_entry *entry;
- struct i2c_board_info info;
- struct i2c_client *client;
- struct acpi_device *adev;
int ret;
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
-
- memset(&info, 0, sizeof(info));
- info.fwnode = acpi_fwnode_handle(adev);
+ if (acpi_bus_get_status(adev) || !adev->status.present ||
+ acpi_device_enumerated(adev))
+ return -EINVAL;
- memset(&lookup, 0, sizeof(lookup));
- lookup.adapter_handle = ACPI_HANDLE(&adapter->dev);
- lookup.device_handle = handle;
- lookup.info = &info;
+ memset(info, 0, sizeof(*info));
+ lookup->device_handle = acpi_device_handle(adev);
- /*
- * Look up for I2cSerialBus resource with ResourceSource that
- * matches with this adapter.
- */
+ /* Look up for I2cSerialBus resource */
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_find_address, &lookup);
+ i2c_acpi_fill_info, lookup);
acpi_dev_free_resource_list(&resource_list);
- if (ret < 0 || !info.addr)
- return AE_OK;
+ if (ret < 0 || !info->addr)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int i2c_acpi_get_info(struct acpi_device *adev,
+ struct i2c_board_info *info,
+ acpi_handle *adapter_handle)
+{
+ struct list_head resource_list;
+ struct resource_entry *entry;
+ struct i2c_acpi_lookup lookup;
+ int ret;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.info = info;
+
+ ret = i2c_acpi_do_lookup(adev, &lookup);
+ if (ret)
+ return ret;
+
+ info->fwnode = acpi_fwnode_handle(adev);
+ *adapter_handle = lookup.adapter_handle;
/* Then fill IRQ number if any */
+ INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (ret < 0)
- return AE_OK;
+ return -EINVAL;
resource_list_for_each_entry(entry, &resource_list) {
if (resource_type(entry->res) == IORESOURCE_IRQ) {
- info.irq = entry->res->start;
+ info->irq = entry->res->start;
break;
}
}
acpi_dev_free_resource_list(&resource_list);
+ strlcpy(info->type, dev_name(&adev->dev), sizeof(info->type));
+
+ return 0;
+}
+
+static void i2c_acpi_register_device(struct i2c_adapter *adapter,
+ struct acpi_device *adev,
+ struct i2c_board_info *info)
+{
+ struct i2c_client *client;
+
adev->power.flags.ignore_parent = true;
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
/* Allow device property to enable probing before init */
if (!acpi_dev_get_property(adev, "linux,probed", ACPI_TYPE_ANY, NULL)) {
- unsigned short addrs[] = { info.addr, I2C_CLIENT_END };
+ unsigned short addrs[] = { info->addr, I2C_CLIENT_END };
- client = i2c_new_probed_device(adapter, &info, addrs, NULL);
+ client = i2c_new_probed_device(adapter, info, addrs, NULL);
} else {
- client = i2c_new_device(adapter, &info);
+ client = i2c_new_device(adapter, info);
}
+ acpi_device_set_enumerated(adev);
+
if (!client) {
adev->power.flags.ignore_parent = false;
dev_err(&adapter->dev,
"failed to add I2C device %s from ACPI\n",
dev_name(&adev->dev));
}
+}
+
+static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct acpi_device *adev;
+ acpi_handle adapter_handle;
+ struct i2c_board_info info;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (i2c_acpi_get_info(adev, &info, &adapter_handle))
+ return AE_OK;
+
+ if (adapter_handle != ACPI_HANDLE(&adapter->dev))
+ return AE_OK;
+
+ i2c_acpi_register_device(adapter, adev, &info);
return AE_OK;
}
-#define ACPI_I2C_MAX_SCAN_DEPTH 32
+#define I2C_ACPI_MAX_SCAN_DEPTH 32
/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter
* @adap: pointer to adapter
*
* Enumerate all I2C slave devices behind this adapter by walking the ACPI
* namespace. When a device is found it will be added to the Linux device
* model and bound to the corresponding ACPI handle.
*/
-static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+static void i2c_acpi_register_devices(struct i2c_adapter *adap)
{
acpi_status status;
@@ -228,15 +269,145 @@ static void acpi_i2c_register_devices(struct i2c_adapter *adap)
return;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_I2C_MAX_SCAN_DEPTH,
- acpi_i2c_add_device, NULL,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_add_device, NULL,
adap, NULL);
if (ACPI_FAILURE(status))
dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
}
+static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_acpi_lookup *lookup = data;
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+
+ if (i2c_acpi_do_lookup(adev, lookup))
+ return AE_OK;
+
+ if (lookup->search_handle != lookup->adapter_handle)
+ return AE_OK;
+
+ if (lookup->speed <= lookup->min_speed)
+ lookup->min_speed = lookup->speed;
+
+ return AE_OK;
+}
+
+/**
+ * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI
+ * @dev: The device owning the bus
+ *
+ * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves
+ * devices connected to this bus and use the speed of slowest device.
+ *
+ * Returns the speed in Hz or zero
+ */
+u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ struct i2c_acpi_lookup lookup;
+ struct i2c_board_info dummy;
+ acpi_status status;
+
+ if (!has_acpi_companion(dev))
+ return 0;
+
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.search_handle = ACPI_HANDLE(dev);
+ lookup.min_speed = UINT_MAX;
+ lookup.info = &dummy;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ I2C_ACPI_MAX_SCAN_DEPTH,
+ i2c_acpi_lookup_speed, NULL,
+ &lookup, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "unable to find I2C bus speed from ACPI\n");
+ return 0;
+ }
+
+ return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0;
+}
+EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed);
+
+static int i2c_acpi_match_adapter(struct device *dev, void *data)
+{
+ struct i2c_adapter *adapter = i2c_verify_adapter(dev);
+
+ if (!adapter)
+ return 0;
+
+ return ACPI_HANDLE(dev) == (acpi_handle)data;
+}
+
+static int i2c_acpi_match_device(struct device *dev, void *data)
+{
+ return ACPI_COMPANION(dev) == data;
+}
+
+static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, handle,
+ i2c_acpi_match_adapter);
+ return dev ? i2c_verify_adapter(dev) : NULL;
+}
+
+static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device);
+ return dev ? i2c_verify_client(dev) : NULL;
+}
+
+static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
+ void *arg)
+{
+ struct acpi_device *adev = arg;
+ struct i2c_board_info info;
+ acpi_handle adapter_handle;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ switch (value) {
+ case ACPI_RECONFIG_DEVICE_ADD:
+ if (i2c_acpi_get_info(adev, &info, &adapter_handle))
+ break;
+
+ adapter = i2c_acpi_find_adapter_by_handle(adapter_handle);
+ if (!adapter)
+ break;
+
+ i2c_acpi_register_device(adapter, adev, &info);
+ break;
+ case ACPI_RECONFIG_DEVICE_REMOVE:
+ if (!acpi_device_enumerated(adev))
+ break;
+
+ client = i2c_acpi_find_client_by_adev(adev);
+ if (!client)
+ break;
+
+ i2c_unregister_device(client);
+ put_device(&client->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block i2c_acpi_notifier = {
+ .notifier_call = i2c_acpi_notify,
+};
#else /* CONFIG_ACPI */
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) { }
+static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { }
+extern struct notifier_block i2c_acpi_notifier;
#endif /* CONFIG_ACPI */
#ifdef CONFIG_ACPI_I2C_OPREGION
@@ -301,12 +472,12 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
}
static acpi_status
-acpi_i2c_space_handler(u32 function, acpi_physical_address command,
+i2c_acpi_space_handler(u32 function, acpi_physical_address command,
u32 bits, u64 *value64,
void *handler_context, void *region_context)
{
struct gsb_buffer *gsb = (struct gsb_buffer *)value64;
- struct acpi_i2c_handler_data *data = handler_context;
+ struct i2c_acpi_handler_data *data = handler_context;
struct acpi_connection_info *info = &data->info;
struct acpi_resource_i2c_serialbus *sb;
struct i2c_adapter *adapter = data->adapter;
@@ -424,10 +595,10 @@ acpi_i2c_space_handler(u32 function, acpi_physical_address command,
}
-static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -438,7 +609,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
if (!handle)
return -ENODEV;
- data = kzalloc(sizeof(struct acpi_i2c_handler_data),
+ data = kzalloc(sizeof(struct i2c_acpi_handler_data),
GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -452,7 +623,7 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
status = acpi_install_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler,
+ &i2c_acpi_space_handler,
NULL,
data);
if (ACPI_FAILURE(status)) {
@@ -466,10 +637,10 @@ static int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
return 0;
}
-static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{
acpi_handle handle;
- struct acpi_i2c_handler_data *data;
+ struct i2c_acpi_handler_data *data;
acpi_status status;
if (!adapter->dev.parent)
@@ -482,7 +653,7 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_remove_address_space_handler(handle,
ACPI_ADR_SPACE_GSBUS,
- &acpi_i2c_space_handler);
+ &i2c_acpi_space_handler);
status = acpi_bus_get_private_data(handle, (void **)&data);
if (ACPI_SUCCESS(status))
@@ -491,10 +662,10 @@ static void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
acpi_bus_detach_private_data(handle);
}
#else /* CONFIG_ACPI_I2C_OPREGION */
-static inline void acpi_i2c_remove_space_handler(struct i2c_adapter *adapter)
+static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter)
{ }
-static inline int acpi_i2c_install_space_handler(struct i2c_adapter *adapter)
+static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter)
{ return 0; }
#endif /* CONFIG_ACPI_I2C_OPREGION */
@@ -1131,6 +1302,8 @@ void i2c_unregister_device(struct i2c_client *client)
{
if (client->dev.of_node)
of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+ if (ACPI_COMPANION(&client->dev))
+ acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev));
device_unregister(&client->dev);
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -1698,8 +1871,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
exit_recovery:
/* create pre-declared device nodes */
of_i2c_register_devices(adap);
- acpi_i2c_register_devices(adap);
- acpi_i2c_install_space_handler(adap);
+ i2c_acpi_register_devices(adap);
+ i2c_acpi_install_space_handler(adap);
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -1873,7 +2046,7 @@ void i2c_del_adapter(struct i2c_adapter *adap)
return;
}
- acpi_i2c_remove_space_handler(adap);
+ i2c_acpi_remove_space_handler(adap);
/* Tell drivers about this removal */
mutex_lock(&core_lock);
bus_for_each_drv(&i2c_bus_type, NULL, adap,
@@ -2152,6 +2325,8 @@ static int __init i2c_init(void)
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier));
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_register(&i2c_acpi_notifier));
return 0;
@@ -2166,6 +2341,8 @@ bus_err:
static void __exit i2c_exit(void)
{
+ if (IS_ENABLED(CONFIG_ACPI))
+ WARN_ON(acpi_reconfig_notifier_unregister(&i2c_acpi_notifier));
if (IS_ENABLED(CONFIG_OF_DYNAMIC))
WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
i2c_del_driver(&dummy_driver);
@@ -3064,6 +3241,8 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
status = i2c_transfer(adapter, msg, num);
if (status < 0)
return status;
+ if (status != num)
+ return -EIO;
/* Check PEC if last message is a read */
if (i && (msg[num-1].flags & I2C_M_RD)) {
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 94c837046786ad..57e3790c87b1b5 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -459,9 +459,15 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return i2cdev_ioctl_smbus(client, arg);
case I2C_RETRIES:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
client->adapter->retries = arg;
break;
case I2C_TIMEOUT:
+ if (arg > INT_MAX)
+ return -EINVAL;
+
/* For historical reasons, user-space sets the timeout
* value in units of 10 ms.
*/
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 96a345248224b1..0add5bb3cee853 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -920,6 +920,7 @@ static u8 pmac_ide_cable_detect(ide_hwif_t *hwif)
struct device_node *root = of_find_node_by_path("/");
const char *model = of_get_property(root, "model", NULL);
+ of_node_put(root);
/* Get cable type from device-tree. */
if (cable && !strncmp(cable, "80-", 3)) {
/* Some drives fail to detect 80c cable in PowerBook */
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index 93986f0590efa6..d83e5b75a37ba6 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -245,12 +245,14 @@ static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *idev = pf->indio_dev;
struct at91_adc_state *st = iio_priv(idev);
+ struct iio_chan_spec const *chan;
int i, j = 0;
for (i = 0; i < idev->masklength; i++) {
if (!test_bit(i, idev->active_scan_mask))
continue;
- st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, i));
+ chan = idev->channels + i;
+ st->buffer[j] = at91_adc_readl(st, AT91_ADC_CHAN(st, chan->channel));
j++;
}
@@ -276,6 +278,8 @@ static void handle_adc_eoc_trigger(int irq, struct iio_dev *idev)
iio_trigger_poll(idev->trig);
} else {
st->last_value = at91_adc_readl(st, AT91_ADC_CHAN(st, st->chnb));
+ /* Needed to ACK the DRDY interruption */
+ at91_adc_readl(st, AT91_ADC_LCDR);
st->done = true;
wake_up_interruptible(&st->wq_data_avail);
}
diff --git a/drivers/iio/buffer/kfifo_buf.c b/drivers/iio/buffer/kfifo_buf.c
index c5b999f0c51943..e44181f9eb367c 100644
--- a/drivers/iio/buffer/kfifo_buf.c
+++ b/drivers/iio/buffer/kfifo_buf.c
@@ -19,11 +19,18 @@ struct iio_kfifo {
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
static inline int __iio_allocate_kfifo(struct iio_kfifo *buf,
- int bytes_per_datum, int length)
+ size_t bytes_per_datum, unsigned int length)
{
if ((length == 0) || (bytes_per_datum == 0))
return -EINVAL;
+ /*
+ * Make sure we don't overflow an unsigned int after kfifo rounds up to
+ * the next power of 2.
+ */
+ if (roundup_pow_of_two(length) > UINT_MAX / bytes_per_datum)
+ return -EINVAL;
+
return __kfifo_alloc((struct __kfifo *)&buf->kf, length,
bytes_per_datum, GFP_KERNEL);
}
@@ -64,7 +71,7 @@ static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
return 0;
}
-static int iio_set_length_kfifo(struct iio_buffer *r, int length)
+static int iio_set_length_kfifo(struct iio_buffer *r, unsigned int length)
{
/* Avoid an invalid state */
if (length < 2)
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index f88698e831bdf5..3060af1d47352f 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -103,7 +103,7 @@ static int cros_ec_sensors_read(struct iio_dev *indio_dev,
* Do not use IIO_DEGREE_TO_RAD to avoid precision
* loss. Round to the nearest integer.
*/
- *val = div_s64(val64 * 314159 + 9000000ULL, 1000);
+ *val = div_s64(val64 * 314159 + 500ULL, 1000);
*val2 = 18000 << (CROS_EC_SENSOR_BITS - 1);
ret = IIO_VAL_FRACTIONAL;
break;
@@ -178,6 +178,17 @@ static int cros_ec_sensors_write(struct iio_dev *indio_dev,
}
mutex_unlock(&st->core.cmd_lock);
+ if ((ret == 0) &&
+ ((mask == IIO_CHAN_INFO_FREQUENCY) ||
+ (mask == IIO_CHAN_INFO_SAMP_FREQ))) {
+ /*
+ * Add a delay to allow the EC to flush older datum.
+ * Assuming 1Mb link to the EC and 20 bytes per event, with 200
+ * elements in the FIFO, we need 4ms. Add time for interrupt
+ * handling and waking up requestor.
+ */
+ usleep_range(10000, 15000);
+ }
return ret;
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_ring.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_ring.c
index af0b6011c230b0..40814400d5b405 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_ring.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_ring.c
@@ -26,10 +26,12 @@
#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
#include <linux/mfd/cros_ec.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
+#include <linux/pm_wakeup.h>
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
@@ -81,9 +83,21 @@ struct cros_ec_sensors_ec_overflow_state {
s64 last;
};
+/* Precision of fixed point for the m values from the filter */
#define M_PRECISION (1 << 23)
+
+/* Length of the filter, how long to remember entries for */
#define TS_HISTORY_SIZE 64
+/* Only activate the filter once we have at least this many elements. */
+#define TS_HISTORY_THRESHOLD 8
+
+/*
+ * If we don't have any history entries for this long, empty the filter to
+ * make sure there are no big discontinuities.
+ */
+#define TS_HISTORY_BORED_US 500000
+
struct cros_ec_sensors_ts_filter_state {
s64 x_offset, y_offset;
s64 x_history[TS_HISTORY_SIZE]; /* stored relative to x_offset */
@@ -113,17 +127,66 @@ struct cros_ec_sensors_ring_state {
int fifo_size;
/* Used for timestamp spreading calculations when a batch shows up */
+ s64 penultimate_batch_timestamp[CROS_EC_SENSOR_MAX];
+ int penultimate_batch_len[CROS_EC_SENSOR_MAX];
s64 last_batch_timestamp[CROS_EC_SENSOR_MAX];
- s64 last_batch_len[CROS_EC_SENSOR_MAX];
+ int last_batch_len[CROS_EC_SENSOR_MAX];
+ s64 newest_sensor_event[CROS_EC_SENSOR_MAX];
struct cros_ec_sensors_ec_overflow_state overflow_a;
struct cros_ec_sensors_ec_overflow_state overflow_b;
struct cros_ec_sensors_ts_filter_state filter;
+
+ /*
+ * The timestamps reported from the EC have low jitter.
+ * Timestamps also come before every sample.
+ * Set either by feature bits coming from the EC or userspace.
+ */
+ bool tight_timestamps;
+};
+
+static ssize_t cros_ec_ring_attr_tight_timestamps_show(
+ struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct cros_ec_sensors_ring_state *state =
+ iio_priv(dev_to_iio_dev(dev));
+
+ return sprintf(buf, "%d\n", state->tight_timestamps);
+}
+
+static ssize_t cros_ec_ring_attr_tight_timestamps_store(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct cros_ec_sensors_ring_state *state =
+ iio_priv(dev_to_iio_dev(dev));
+ int ret;
+
+ ret = strtobool(buf, &state->tight_timestamps);
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(tight_timestamps, 0644,
+ cros_ec_ring_attr_tight_timestamps_show,
+ cros_ec_ring_attr_tight_timestamps_store,
+ 0);
+
+static struct attribute *cros_ec_ring_attributes[] = {
+ &iio_dev_attr_tight_timestamps.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group cros_ec_ring_attribute_group = {
+ .attrs = cros_ec_ring_attributes,
};
static const struct iio_info ec_sensors_info = {
.driver_module = THIS_MODULE,
+ .attrs = &cros_ec_ring_attribute_group,
};
static int cros_ec_ring_fifo_toggle(struct cros_ec_sensors_ring_state *state,
@@ -141,9 +204,17 @@ static int cros_ec_ring_fifo_toggle(struct cros_ec_sensors_ring_state *state,
return ret;
}
-static int cros_ec_ring_median_cmp(const void *ts1, const void *ts2)
+static int cros_ec_ring_median_cmp(const void *pv1, const void *pv2)
{
- return *(s64 *)ts1 - *(s64 *)ts2;
+ s64 v1 = *(s64 *)pv1;
+ s64 v2 = *(s64 *)pv2;
+
+ if (v1 > v2)
+ return 1;
+ else if (v1 < v2)
+ return -1;
+ else
+ return 0;
}
/*
@@ -158,7 +229,7 @@ static int cros_ec_ring_median_cmp(const void *ts1, const void *ts2)
*/
static s64 cros_ec_ring_median(s64 *array, size_t length)
{
- sort(array, length, sizeof(s64), cros_ec_ring_median_cmp, 0);
+ sort(array, length, sizeof(s64), cros_ec_ring_median_cmp, NULL);
return array[length / 2];
}
@@ -217,7 +288,7 @@ static s64 cros_ec_ring_median(s64 *array, size_t length)
* @b IRQ timestamp, EC timebase (us)
* @c IRQ timestamp, AP timebase (ns)
*/
-void cros_ec_ring_ts_filter_update(
+static void cros_ec_ring_ts_filter_update(
struct cros_ec_sensors_ts_filter_state *state,
s64 b, s64 c)
{
@@ -237,7 +308,11 @@ void cros_ec_ring_ts_filter_update(
if (dx == 0)
return; /* we already have this irq in the history */
dy = (state->y_history[0] + state->y_offset) - y;
- m = (dy * M_PRECISION) / dx;
+ m = div64_s64(dy * M_PRECISION, dx);
+
+ /* Empty filter if we haven't seen any action in a while. */
+ if (-dx > TS_HISTORY_BORED_US)
+ state->history_len = 0;
/* Move everything over, also update offset to all absolute coords .*/
for (i = state->history_len - 1; i >= 1; i--) {
@@ -265,18 +340,25 @@ void cros_ec_ring_ts_filter_update(
state->history_len++;
/* Precalculate things for the filter. */
- state->median_m =
- cros_ec_ring_median(m_history_copy, state->history_len);
+ if (state->history_len > TS_HISTORY_THRESHOLD) {
+ state->median_m =
+ cros_ec_ring_median(m_history_copy, state->history_len - 1);
- /*
- * Calculate y-intercepts as if m_median is the slope and points in
- * the history are on the line. median_error will still be in the
- * offset coordinate system.
- */
- for (i = 0; i < state->history_len; i++)
- error[i] = state->y_history[i] -
- state->median_m * state->x_history[i] / M_PRECISION;
- state->median_error = cros_ec_ring_median(error, state->history_len);
+ /*
+ * Calculate y-intercepts as if m_median is the slope and
+ * points in the history are on the line. median_error will
+ * still be in the offset coordinate system.
+ */
+ for (i = 0; i < state->history_len; i++)
+ error[i] = state->y_history[i] -
+ div_s64(state->median_m * state->x_history[i],
+ M_PRECISION);
+ state->median_error =
+ cros_ec_ring_median(error, state->history_len);
+ } else {
+ state->median_m = 0;
+ state->median_error = 0;
+ }
}
/*
@@ -290,6 +372,10 @@ void cros_ec_ring_ts_filter_update(
*
* @returns timestamp in AP timebase (ns)
*
+ * Note: The filter will only activate once state->history_len goes
+ * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a
+ * transform.
+ *
* How to derive the formula, starting from:
* f(x) = median_m * x + median_error
* That's the calculated AP - EC offset (at the x point in time)
@@ -298,10 +384,10 @@ void cros_ec_ring_ts_filter_update(
* Remember to undo the "y = c - b * 1000" modification:
* f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000
*/
-s64 cros_ec_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
- s64 x)
+static s64 cros_ec_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
+ s64 x)
{
- return state->median_m * (x - state->x_offset) / M_PRECISION
+ return div_s64(state->median_m * (x - state->x_offset), M_PRECISION)
+ state->median_error + state->y_offset + x * 1000;
}
@@ -325,6 +411,20 @@ static void cros_ec_ring_fix_overflow(s64 *ts,
state->last = *ts;
}
+static void cros_ec_ring_check_for_past_timestamp(
+ struct cros_ec_sensors_ring_state *state,
+ struct cros_ec_sensors_ring_sample *sample)
+{
+ const u8 sensor_id = sample->sensor_id;
+
+ // if this event is earlier than one we saw before...
+ if (state->newest_sensor_event[sensor_id] > sample->timestamp)
+ // mark it for spreading
+ sample->timestamp = state->last_batch_timestamp[sensor_id];
+ else
+ state->newest_sensor_event[sensor_id] = sample->timestamp;
+}
+
/*
* cros_ec_ring_process_event: process one EC FIFO event
*
@@ -346,8 +446,12 @@ static bool cros_ec_ring_process_event(
struct ec_response_motion_sensor_data *in,
struct cros_ec_sensors_ring_sample *out)
{
+ struct iio_dev *indio_dev = state->core.indio_dev;
int axis;
- s64 new_timestamp;
+
+ if (device_may_wakeup(&indio_dev->dev) &&
+ in->flags & MOTIONSENSE_SENSOR_FLAG_WAKEUP)
+ pm_wakeup_event(&indio_dev->dev, 0);
if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP) {
s64 a = in->timestamp;
@@ -357,14 +461,25 @@ static bool cros_ec_ring_process_event(
cros_ec_ring_fix_overflow(&a, 1LL << 32, &state->overflow_a);
cros_ec_ring_fix_overflow(&b, 1LL << 32, &state->overflow_b);
- cros_ec_ring_ts_filter_update(&state->filter, b, c);
- new_timestamp = cros_ec_ring_ts_filter(&state->filter, a);
- /*
- * The timestamp can be stale if we had to use the fifo
- * info timestamp.
- */
- if (new_timestamp - *current_timestamp > 0)
- *current_timestamp = new_timestamp;
+ if (state->tight_timestamps) {
+ cros_ec_ring_ts_filter_update(&state->filter, b, c);
+ *current_timestamp =
+ cros_ec_ring_ts_filter(&state->filter, a);
+
+ } else {
+ s64 new_timestamp;
+ /*
+ * disable filtering since we might add more jitter
+ * if b is in a random point in time
+ */
+ new_timestamp = c - b * 1000 + a * 1000;
+ /*
+ * The timestamp can be stale if we had to use the fifo
+ * info timestamp.
+ */
+ if (new_timestamp - *current_timestamp > 0)
+ *current_timestamp = new_timestamp;
+ }
}
if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
@@ -388,10 +503,285 @@ static bool cros_ec_ring_process_event(
out->flag = in->flags;
for (axis = X; axis < MAX_AXIS; axis++)
out->vector[axis] = in->data[axis];
+ if (state->tight_timestamps)
+ cros_ec_ring_check_for_past_timestamp(state, out);
return true;
}
/*
+ * cros_ec_ring_spread_add: Calculate proper timestamps then add to ringbuffer.
+ *
+ * Note: This is the new spreading code, assumes every sample's timestamp
+ * preceeds the sample. Run if tight_timestamps == true.
+ *
+ * Sometimes the EC receives only one interrupt (hence timestamp) for
+ * a batch of samples. Only the first sample will have the correct
+ * timestamp. So we must interpolate the other samples.
+ * We use the previous batch timestamp and our current batch timestamp
+ * as a way to calculate period, then spread the samples evenly.
+ *
+ * s0 int, 0ms
+ * s1 int, 10ms
+ * s2 int, 20ms
+ * 30ms point goes by, no interrupt, previous one is still asserted
+ * downloading s2 and s3
+ * s3 sample, 20ms (incorrect timestamp)
+ * s4 int, 40ms
+ *
+ * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
+ * has 2 samples in them, we adjust the timestamp of s3.
+ * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
+ * been part of a bigger batch things would have gotten a little
+ * more complicated.
+ *
+ * Note: we also assume another sensor sample doesn't break up a batch
+ * in 2 or more partitions. Example, there can't ever be a sync sensor
+ * in between S2 and S3. This simplifies the following code.
+ */
+static void cros_ec_ring_spread_add(
+ struct cros_ec_sensors_ring_state *state,
+ unsigned long sensor_mask,
+ struct cros_ec_sensors_ring_sample *last_out)
+{
+ struct iio_dev *indio_dev = state->core.indio_dev;
+ struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
+ int id;
+
+ for_each_set_bit(id, &sensor_mask, BITS_PER_LONG) {
+ for (batch_start = state->ring; batch_start < last_out;
+ batch_start = next_batch_start) {
+ /*
+ * For each batch (where all samples have the same
+ * timestamp).
+ */
+ int batch_len, sample_idx;
+ struct cros_ec_sensors_ring_sample *batch_end =
+ batch_start;
+ struct cros_ec_sensors_ring_sample *s;
+ s64 batch_timestamp = batch_start->timestamp;
+ s64 sample_period;
+
+ /*
+ * Skip over batches that start with the sensor types
+ * we're not looking at right now.
+ */
+ if (batch_start->sensor_id != id) {
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ /*
+ * Send out flush packets, but do not start a batch
+ * from a flush, as it happens asynchronously to the
+ * regular flow of events.
+ */
+ if (batch_start->flag &
+ MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ iio_push_to_buffers(indio_dev,
+ (u8 *)batch_start);
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ if (batch_start->timestamp <=
+ state->last_batch_timestamp[id]) {
+
+ batch_timestamp =
+ state->last_batch_timestamp[id];
+ batch_len = state->last_batch_len[id];
+
+ sample_idx = batch_len;
+
+ state->last_batch_timestamp[id] =
+ state->penultimate_batch_timestamp[id];
+ state->last_batch_len[id] =
+ state->penultimate_batch_len[id];
+ } else {
+ /*
+ * Push first sample in the batch to the,
+ * kifo, it's guaranteed to be correct, the
+ * rest will follow later on.
+ */
+ sample_idx = batch_len = 1;
+ iio_push_to_buffers(indio_dev,
+ (u8 *)batch_start);
+ batch_start++;
+ }
+
+ /* Find all samples have the same timestamp. */
+ for (s = batch_start; s < last_out; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't count them.
+ */
+ continue;
+ if (s->timestamp != batch_timestamp)
+ /* we discovered the next batch */
+ break;
+ if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ /* break on flush packets */
+ break;
+ batch_end = s;
+ batch_len++;
+ }
+
+ if (batch_len == 1)
+ goto done_with_this_batch;
+
+ /* Can we calculate period? */
+ if (state->last_batch_len[id] == 0) {
+ dev_warn(&indio_dev->dev, "Sensor %d: lost %d samples when spreading\n",
+ id, batch_len - 1);
+ goto done_with_this_batch;
+ /*
+ * Note: we're dropping the rest of the samples
+ * in this batch since we have no idea where
+ * they're supposed to go without a period
+ * calculation.
+ */
+ }
+
+ sample_period = div_s64(batch_timestamp -
+ state->last_batch_timestamp[id],
+ state->last_batch_len[id]);
+ dev_dbg(&indio_dev->dev,
+ "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n",
+ batch_len, id,
+ state->last_batch_timestamp[id],
+ state->last_batch_len[id],
+ batch_timestamp,
+ sample_period);
+
+ /*
+ * Adjust timestamps of the samples then push them to
+ * kfifo.
+ */
+ for (s = batch_start; s <= batch_end; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't change them.
+ */
+ continue;
+
+ s->timestamp = batch_timestamp +
+ sample_period * sample_idx;
+ sample_idx++;
+
+ iio_push_to_buffers(indio_dev, (u8 *)s);
+ }
+
+done_with_this_batch:
+ state->penultimate_batch_timestamp[id] =
+ state->last_batch_timestamp[id];
+ state->penultimate_batch_len[id] =
+ state->last_batch_len[id];
+
+ state->last_batch_timestamp[id] = batch_timestamp;
+ state->last_batch_len[id] = batch_len;
+
+ next_batch_start = batch_end + 1;
+ }
+ }
+}
+
+/*
+ * cros_ec_ring_spread_add_legacy: Calculate proper timestamps then
+ * add to ringbuffer (legacy).
+ *
+ * Note: This assumes we're running old firmware, where every sample's timestamp
+ * is after the sample. Run if tight_timestamps == false.
+ *
+ * If there is a sample with a proper timestamp
+ * timestamp | count
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ * next_out --> TS2 |
+ * We spread time for the samples [older_unprocess_out .. out]
+ * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ *
+ * If we reach the end of the samples, we compare with the
+ * current timestamp:
+ *
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ * We know have [TS1+1/3, TS1+2/3, current timestamp]
+ */
+static void cros_ec_ring_spread_add_legacy(
+ struct cros_ec_sensors_ring_state *state,
+ unsigned long sensor_mask,
+ s64 current_timestamp,
+ struct cros_ec_sensors_ring_sample *last_out)
+{
+ struct cros_ec_sensors_ring_sample *out;
+ struct iio_dev *indio_dev = state->core.indio_dev;
+ int i;
+
+ for_each_set_bit(i, &sensor_mask, BITS_PER_LONG) {
+ s64 older_timestamp;
+ s64 timestamp;
+ struct cros_ec_sensors_ring_sample *older_unprocess_out =
+ state->ring;
+ struct cros_ec_sensors_ring_sample *next_out;
+ int count = 1;
+
+ for (out = state->ring; out < last_out; out = next_out) {
+ s64 time_period;
+
+ next_out = out + 1;
+ if (out->sensor_id != i)
+ continue;
+
+ /* Timestamp to start with */
+ older_timestamp = out->timestamp;
+
+ /* find next sample */
+ while (next_out < last_out && next_out->sensor_id != i)
+ next_out++;
+
+ if (next_out >= last_out) {
+ timestamp = current_timestamp;
+ } else {
+ timestamp = next_out->timestamp;
+ if (timestamp == older_timestamp) {
+ count++;
+ continue;
+ }
+ }
+
+ /*
+ * The next sample has a new timestamp,
+ * spread the unprocessed samples.
+ */
+ if (next_out < last_out)
+ count++;
+ time_period = div_s64(timestamp - older_timestamp,
+ count);
+
+ for (; older_unprocess_out <= out;
+ older_unprocess_out++) {
+ if (older_unprocess_out->sensor_id != i)
+ continue;
+ older_timestamp += time_period;
+ older_unprocess_out->timestamp =
+ older_timestamp;
+ }
+ count = 1;
+ /* The next_out sample has a valid timestamp, skip. */
+ next_out++;
+ older_unprocess_out = next_out;
+ }
+ }
+
+ /* push the event into the kfifo */
+ for (out = state->ring; out < last_out; out++)
+ iio_push_to_buffers(indio_dev, (u8 *)out);
+}
+
+/*
* cros_ec_ring_handler - the trigger handler function
*
* @state: device information.
@@ -407,8 +797,6 @@ static void cros_ec_ring_handler(struct cros_ec_sensors_ring_state *state)
unsigned long sensor_mask = 0;
struct ec_response_motion_sensor_data *in;
struct cros_ec_sensors_ring_sample *out, *last_out;
- struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
-
mutex_lock(&state->core.cmd_lock);
/* Get FIFO information */
@@ -488,7 +876,8 @@ static void cros_ec_ring_handler(struct cros_ec_sensors_ring_state *state)
* the AP is slow to respond to the IRQ, the EC may have added new
* samples. Use the FIFO info timestamp as last timestamp then.
*/
- if ((last_out-1)->timestamp == current_timestamp)
+ if (!state->tight_timestamps &&
+ (last_out-1)->timestamp == current_timestamp)
current_timestamp = fifo_timestamp;
/* Check if buffer is set properly. */
@@ -512,97 +901,13 @@ static void cros_ec_ring_handler(struct cros_ec_sensors_ring_state *state)
}
/*
- * Calculate proper timestamps.
- *
- * Sometimes the EC receives only one interrupt (hence timestamp) for
- * a batch of samples. Only the first sample will have the correct
- * timestamp. So we must interpolate the other samples.
- * We use the previous batch timestamp and our current batch timestamp
- * as a way to calculate period, then spread the samples evenly.
- *
- * s0 int, 0ms
- * s1 int, 10ms
- * s2 int, 20ms
- * 30ms point goes by, no interrupt, previous one is still asserted
- * downloading s2 and s3
- * s3 sample, 20ms (incorrect timestamp)
- * s4 int, 40ms
- *
- * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
- * has 2 samples in them, we adjust the timestamp of s3.
- * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
- * been part of a bigger batch things would have gotten a little
- * more complicated.
- *
- * Note: we also assume another sensor sample doesn't break up a batch
- * in 2 or more partitions. Example, there can't ever be a sync sensor
- * in between S2 and S3. This simplifies the following code.
+ * Spread samples in case of batching, then add them to the ringbuffer.
*/
- for (batch_start = state->ring; batch_start < last_out;
- batch_start = next_batch_start) {
- /* for each batch (where all samples have the same timestamp) */
- int batch_len, sample_idx = 1;
- const int id = batch_start->sensor_id;
- struct cros_ec_sensors_ring_sample *batch_end = batch_start;
- struct cros_ec_sensors_ring_sample *s;
- const s64 batch_timestamp = batch_start->timestamp;
- s64 sample_period;
-
- /*
- * Push first sample in the batch to the kfifo,
- * it's guaranteed to be correct, rest come later.
- */
- iio_push_to_buffers(indio_dev, (u8 *)batch_start);
-
- /* Find all samples have the same timestamp. */
- for (s = batch_start + 1; s < last_out; s++) {
- if (s->timestamp != batch_timestamp)
- break; /* we discovered the next batch */
- if (s->sensor_id != id)
- break; /* another sensor, surely next batch */
- batch_end = s;
- }
- batch_len = batch_end - batch_start + 1;
-
- if (batch_len == 1)
- goto done_with_this_batch;
-
- dev_dbg(&indio_dev->dev,
- "Adjusting samples, sensor %d last_batch @%lld (%lld samples) batch_timestamp=%lld => period=%lld\n",
- id, state->last_batch_timestamp[id],
- state->last_batch_len[id], batch_timestamp,
- sample_period);
-
- /* Can we calculate period? */
- if (state->last_batch_len[id] == 0) {
- dev_warn(&indio_dev->dev, "Sensor %d: lost %d samples when spreading\n",
- id, batch_len - 1);
- goto done_with_this_batch;
- /*
- * Note: we're dropping the rest of the samples in
- * this batch since we have no idea where they're
- * supposed to go without a period calculation.
- */
- }
-
- sample_period = div_s64(batch_timestamp -
- state->last_batch_timestamp[id],
- state->last_batch_len[id]);
-
- /* Adjust timestamps of the samples then push them to kfifo. */
- for (s = batch_start + 1; s <= batch_end; s++) {
- s->timestamp = batch_timestamp +
- sample_period * sample_idx;
- sample_idx++;
-
- iio_push_to_buffers(indio_dev, (u8 *)s);
- }
-
-done_with_this_batch:
- state->last_batch_timestamp[id] = batch_timestamp;
- state->last_batch_len[id] = batch_len;
- next_batch_start = batch_end + 1;
- }
+ if (state->tight_timestamps)
+ cros_ec_ring_spread_add(state, sensor_mask, last_out);
+ else
+ cros_ec_ring_spread_add_legacy(state, sensor_mask,
+ current_timestamp, last_out);
ring_handler_end:
state->fifo_timestamp[LAST_TS] = current_timestamp;
@@ -779,6 +1084,12 @@ static int cros_ec_ring_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ state->tight_timestamps = !!cros_ec_check_features(ec_dev,
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+ /* cros_ec defers sensor wakeup decisions to this device. */
+ device_init_wakeup(&indio_dev->dev, true);
+
/* register the notifier that will act as a top half interrupt. */
state->notifier.notifier_call = cros_ec_ring_event;
ret = blocking_notifier_chain_register(&ec_device->event_notifier,
@@ -805,12 +1116,12 @@ static int cros_ec_ring_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-const struct dev_pm_ops cros_ec_ring_pm_ops = {
+static const struct dev_pm_ops cros_ec_ring_pm_ops = {
.prepare = cros_ec_ring_prepare,
.complete = cros_ec_ring_complete
};
#else
-const struct dev_pm_ops cros_ec_ring_pm_ops = { };
+static const struct dev_pm_ops cros_ec_ring_pm_ops = { };
#endif
static struct platform_driver cros_ec_ring_platform_driver = {
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 44a30f286de102..57b1812a5a185a 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -507,7 +507,7 @@ static ssize_t ad9523_store(struct device *dev,
return ret;
if (!state)
- return 0;
+ return len;
mutex_lock(&indio_dev->mlock);
switch ((u32)this_attr->address) {
@@ -641,7 +641,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) /
AD9523_CLK_DIST_DIV_REV(ret);
*val = code / 1000000;
- *val2 = (code % 1000000) * 10;
+ *val2 = code % 1000000;
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index ef4c73db5b53fb..51c4b7f2083b25 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -32,6 +32,34 @@ config LIDAR_LITE_V2
To compile this driver as a module, choose M here: the
module will be called pulsedlight-lite-v2
+config SX9310
+ tristate "SX9310 Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ depends on ACPI
+ help
+ Say Y here to build a driver for Semtech's SX9310 capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx9310.
+
+config SX932X
+ tristate "sx932x Semtech proximity sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select REGMAP_I2C
+ depends on I2C
+ depends on ACPI
+ help
+ Say Y here to build a driver for Semtech's sx932x capacitive
+ proximity/button sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sx932x.
+
config SX9500
tristate "SX9500 Semtech proximity sensor"
select IIO_BUFFER
diff --git a/drivers/iio/proximity/Makefile b/drivers/iio/proximity/Makefile
index 9aadd9a8ee9985..80f8d7ad680a37 100644
--- a/drivers/iio/proximity/Makefile
+++ b/drivers/iio/proximity/Makefile
@@ -5,4 +5,6 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AS3935) += as3935.o
obj-$(CONFIG_LIDAR_LITE_V2) += pulsedlight-lidar-lite-v2.o
+obj-$(CONFIG_SX9310) += sx9310.o
+obj-$(CONFIG_SX932X) += sx932x.o
obj-$(CONFIG_SX9500) += sx9500.o
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
new file mode 100644
index 00000000000000..696c3af1e89143
--- /dev/null
+++ b/drivers/iio/proximity/sx9310.c
@@ -0,0 +1,1163 @@
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Driver for Semtech's SX9310 capacitive proximity/button solution.
+ * Datasheet available at
+ * <http://www.semtech.com/images/datasheet/sx9310.pdf>.
+ * Based on SX9500 driver and Semtech driver using the input framework
+ * <https://my.syncplicity.com/share/teouwsim8niiaud/
+ * linux-driver-SX9310_NoSmartHSensing>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define SX9310_DRIVER_NAME "sx9310"
+#define SX9310_ACPI_NAME "STH9310"
+#define SX9310_IRQ_NAME "sx9310_event"
+
+#define SX9310_GPIO_INT "interrupt"
+
+/* Register definitions. */
+#define SX9310_REG_IRQ_SRC 0x00
+#define SX9310_REG_STAT0 0x01
+#define SX9310_REG_STAT1 0x02
+#define SX9310_REG_IRQ_MSK 0x03
+#define SX9310_REG_IRQ_FUNC 0x04
+
+#define SX9310_REG_PROX_CTRL0 0x10
+#define SX9310_REG_PROX_CTRL1 0x11
+#define SX9310_REG_PROX_CTRL2 0x12
+#define SX9310_REG_PROX_CTRL3 0x13
+#define SX9310_REG_PROX_CTRL4 0x14
+#define SX9310_REG_PROX_CTRL5 0x15
+#define SX9310_REG_PROX_CTRL6 0x16
+#define SX9310_REG_PROX_CTRL7 0x17
+#define SX9310_REG_PROX_CTRL8 0x18
+#define SX9310_REG_PROX_CTRL9 0x19
+#define SX9310_REG_PROX_CTRL10 0x1A
+#define SX9310_REG_PROX_CTRL11 0x1B
+#define SX9310_REG_PROX_CTRL12 0x1C
+#define SX9310_REG_PROX_CTRL13 0x1D
+#define SX9310_REG_PROX_CTRL14 0x1E
+#define SX9310_REG_PROX_CTRL15 0x1F
+#define SX9310_REG_PROX_CTRL16 0x20
+#define SX9310_REG_PROX_CTRL17 0x21
+#define SX9310_REG_PROX_CTRL18 0x22
+#define SX9310_REG_PROX_CTRL19 0x23
+#define SX9310_REG_SAR_CTRL0 0x2A
+#define SX9310_REG_SAR_CTRL1 0x2B
+#define SX9310_REG_SAR_CTRL2 0x2C
+
+#define SX9310_REG_SENSOR_SEL 0x30
+
+#define SX9310_REG_USE_MSB 0x31
+#define SX9310_REG_USE_LSB 0x32
+
+#define SX9310_REG_AVG_MSB 0x33
+#define SX9310_REG_AVG_LSB 0x34
+
+#define SX9310_REG_DIFF_MSB 0x35
+#define SX9310_REG_DIFF_LSB 0x36
+
+#define SX9310_REG_OFFSET_MSB 0x37
+#define SX9310_REG_OFFSET_LSB 0x38
+
+#define SX9310_REG_SAR_MSB 0x39
+#define SX9310_REG_SAR_LSB 0x3A
+
+#define SX9310_REG_I2CADDR 0x40
+#define SX9310_REG_PAUSE 0x41
+#define SX9310_REG_WHOAMI 0x42
+/* Expected content of the WHOAMI register. */
+#define SX9310_WHOAMI_VALUE 0x01
+
+#define SX9310_REG_RESET 0x7f
+/* Write this to REG_RESET to do a soft reset. */
+#define SX9310_SOFT_RESET 0xde
+
+
+/* Sensor Readback */
+
+/*
+ * These serve for identifying IRQ source in the IRQ_SRC register, and
+ * also for masking the IRQs in the IRQ_MSK register.
+ */
+#define SX9310_RESET_IRQ BIT(7)
+#define SX9310_CLOSE_IRQ BIT(6)
+#define SX9310_FAR_IRQ BIT(5)
+#define SX9310_COMPDONE_IRQ BIT(4)
+#define SX9310_CONVDONE_IRQ BIT(3)
+
+#define SX9310_SCAN_PERIOD_MASK GENMASK(7, 4)
+#define SX9310_SCAN_PERIOD_SHIFT 4
+
+#define SX9310_COMPSTAT_MASK GENMASK(3, 0)
+
+/* 4 channels, as defined in STAT0: COMB, CS2, CS1 and CS0. */
+#define SX9310_NUM_CHANNELS 4
+#define SX9310_CHAN_MASK GENMASK(2, 0)
+
+struct sx9310_data {
+ struct mutex mutex;
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel.
+ * We only send an event to user space when this changes.
+ */
+ bool prox_stat[SX9310_NUM_CHANNELS];
+ bool event_enabled[SX9310_NUM_CHANNELS];
+ bool trigger_enabled;
+ u16 *buffer;
+ /* Remember enabled channels and sample rate during suspend. */
+ unsigned int suspend_ctrl0;
+ struct completion completion;
+ int data_rdy_users, close_far_users;
+ int channel_users[SX9310_NUM_CHANNELS];
+ unsigned int num_irqs;
+};
+
+static const struct iio_event_spec sx9310_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX9310_CHANNEL(idx, name, addr) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .address = addr, \
+ .event_spec = sx9310_events, \
+ .num_event_specs = ARRAY_SIZE(sx9310_events), \
+ .extend_name = name, \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ }, \
+ }
+
+static const struct iio_chan_spec sx9310_channels[] = {
+ SX9310_CHANNEL(0, "USE_CS0", SX9310_REG_USE_MSB),
+ SX9310_CHANNEL(1, "USE_CS1", SX9310_REG_USE_MSB),
+ SX9310_CHANNEL(2, "USE_CS2", SX9310_REG_USE_MSB),
+ SX9310_CHANNEL(3, "USE_COMB", SX9310_REG_USE_MSB),
+
+ SX9310_CHANNEL(4, "DIFF_CS0", SX9310_REG_DIFF_MSB),
+ SX9310_CHANNEL(5, "DIFF_CS1", SX9310_REG_DIFF_MSB),
+ SX9310_CHANNEL(6, "DIFF_CS2", SX9310_REG_DIFF_MSB),
+ SX9310_CHANNEL(7, "DIFF_COMB", SX9310_REG_DIFF_MSB),
+
+ IIO_CHAN_SOFT_TIMESTAMP(8),
+};
+
+/*
+ * Each entry contains the integer part (val) and the fractional part, in micro
+ * seconds. It conforms to the IIO output IIO_VAL_INT_PLUS_MICRO.
+ */
+static const struct {
+ int val;
+ int val2;
+} sx9310_samp_freq_table[] = {
+ {500, 0}, /* 0000: Min (no idle time) */
+ {66, 666666}, /* 0001: 15 ms */
+ {33, 333333}, /* 0010: 30 ms (Typ.) */
+ {22, 222222}, /* 0011: 45 ms */
+ {16, 666666}, /* 0100: 60 ms */
+ {11, 111111}, /* 0101: 90 ms */
+ {8, 333333}, /* 0110: 120 ms */
+ {5, 0}, /* 0111: 200 ms */
+ {2, 500000}, /* 1000: 400 ms */
+ {1, 666666}, /* 1001: 600 ms */
+ {1, 250000}, /* 1010: 800 ms */
+ {1, 0}, /* 1011: 1 s */
+ {0, 500000}, /* 1100: 2 s */
+ {8, 333333}, /* 1101: 3 s */
+ {0, 250000}, /* 1110: 4 s */
+ {0, 200000}, /* 1111: 5 s */
+};
+static const unsigned int sx9310_scan_period_table[] = {
+ 2, 15, 30, 45, 60, 90, 120, 200, 400, 800, 1000, 2000, 3000, 4000, 5000,
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "500.0 66.666666 33.333333 22.222222 16.666666 "
+ "11.111111 8.333333 5.0 2.500000 1.666666 1.250000 "
+ "1.0 0.500000 8.333333 0.250000 0.200000");
+
+
+static const struct regmap_range sx9310_writable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_MSK, SX9310_REG_IRQ_FUNC),
+ regmap_reg_range(SX9310_REG_PROX_CTRL0, SX9310_REG_PROX_CTRL19),
+ regmap_reg_range(SX9310_REG_SAR_CTRL0, SX9310_REG_SAR_CTRL2),
+ regmap_reg_range(SX9310_REG_SENSOR_SEL, SX9310_REG_SENSOR_SEL),
+ regmap_reg_range(SX9310_REG_OFFSET_MSB, SX9310_REG_OFFSET_LSB),
+ regmap_reg_range(SX9310_REG_PAUSE, SX9310_REG_PAUSE),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_writeable_regs = {
+ .yes_ranges = sx9310_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_writable_reg_ranges),
+};
+
+/*
+ * All allocated registers are readable, so we just list unallocated
+ * ones.
+ */
+static const struct regmap_range sx9310_non_readable_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_FUNC + 1, SX9310_REG_PROX_CTRL0 - 1),
+ regmap_reg_range(SX9310_REG_SAR_CTRL2 + 1, SX9310_REG_SENSOR_SEL - 1),
+ regmap_reg_range(SX9310_REG_SAR_LSB + 1, SX9310_REG_I2CADDR - 1),
+ regmap_reg_range(SX9310_REG_WHOAMI + 1, SX9310_REG_RESET - 1),
+};
+
+static const struct regmap_access_table sx9310_readable_regs = {
+ .no_ranges = sx9310_non_readable_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(sx9310_non_readable_reg_ranges),
+};
+
+static const struct regmap_range sx9310_volatile_reg_ranges[] = {
+ regmap_reg_range(SX9310_REG_IRQ_SRC, SX9310_REG_STAT1),
+ regmap_reg_range(SX9310_REG_USE_MSB, SX9310_REG_DIFF_LSB),
+ regmap_reg_range(SX9310_REG_SAR_MSB, SX9310_REG_SAR_LSB),
+ regmap_reg_range(SX9310_REG_RESET, SX9310_REG_RESET),
+};
+
+static const struct regmap_access_table sx9310_volatile_regs = {
+ .yes_ranges = sx9310_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx9310_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx9310_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX9310_REG_RESET,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx9310_writeable_regs,
+ .rd_table = &sx9310_readable_regs,
+ .volatile_table = &sx9310_volatile_regs,
+};
+
+static int sx9310_inc_users(struct sx9310_data *data, int *counter,
+ unsigned int reg, unsigned int bitmask)
+{
+ ++(*counter);
+ if (*counter != 1)
+ /* Bit is already active, nothing to do. */
+ return 0;
+
+ return regmap_update_bits(data->regmap, reg, bitmask, bitmask);
+}
+
+static int sx9310_dec_users(struct sx9310_data *data, int *counter,
+ unsigned int reg, unsigned int bitmask)
+{
+ --(*counter);
+ if (*counter != 0)
+ /* There are more users, do not deactivate. */
+ return 0;
+
+ return regmap_update_bits(data->regmap, reg, bitmask, 0);
+}
+
+static int sx9310_inc_chan_users(struct sx9310_data *data, int chan)
+{
+ return sx9310_inc_users(data, &data->channel_users[chan],
+ SX9310_REG_PROX_CTRL0, BIT(chan));
+}
+
+static int sx9310_dec_chan_users(struct sx9310_data *data, int chan)
+{
+ return sx9310_dec_users(data, &data->channel_users[chan],
+ SX9310_REG_PROX_CTRL0, BIT(chan));
+}
+
+static int sx9310_inc_data_rdy_users(struct sx9310_data *data)
+{
+ return sx9310_inc_users(data, &data->data_rdy_users,
+ SX9310_REG_IRQ_MSK, SX9310_CONVDONE_IRQ);
+}
+
+static int sx9310_dec_data_rdy_users(struct sx9310_data *data)
+{
+ return sx9310_dec_users(data, &data->data_rdy_users,
+ SX9310_REG_IRQ_MSK, SX9310_CONVDONE_IRQ);
+}
+
+static int sx9310_inc_close_far_users(struct sx9310_data *data)
+{
+ return sx9310_inc_users(data, &data->close_far_users,
+ SX9310_REG_IRQ_MSK,
+ SX9310_CLOSE_IRQ | SX9310_FAR_IRQ);
+}
+
+static int sx9310_dec_close_far_users(struct sx9310_data *data)
+{
+ return sx9310_dec_users(data, &data->close_far_users,
+ SX9310_REG_IRQ_MSK,
+ SX9310_CLOSE_IRQ | SX9310_FAR_IRQ);
+}
+
+static int sx9310_read_prox_data(struct sx9310_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+ __be16 regval;
+
+ ret = regmap_write(data->regmap, SX9310_REG_SENSOR_SEL, chan->channel);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, chan->address, &regval, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(be16_to_cpu(regval),
+ (chan->address == SX9310_REG_DIFF_MSB ? 11 : 15));
+
+ return 0;
+}
+
+/*
+ * If we have no interrupt support, we have to wait for a scan period
+ * after enabling a channel to get a result.
+ */
+static int sx9310_wait_for_sample(struct sx9310_data *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+
+ msleep(sx9310_scan_period_table[val]);
+
+ return 0;
+}
+
+static int sx9310_read_proximity(struct sx9310_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx9310_inc_chan_users(data, chan->channel & SX9310_CHAN_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = sx9310_inc_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0)
+ ret = wait_for_completion_interruptible(&data->completion);
+ else
+ ret = sx9310_wait_for_sample(data);
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx9310_read_prox_data(data, chan, val);
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx9310_dec_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ ret = sx9310_dec_chan_users(data, chan->channel & SX9310_CHAN_MASK);
+ if (ret < 0)
+ goto out;
+
+ ret = IIO_VAL_INT;
+
+ goto out;
+
+out_dec_data_rdy:
+ sx9310_dec_data_rdy_users(data);
+out_dec_chan:
+ sx9310_dec_chan_users(data, chan->channel & SX9310_CHAN_MASK);
+out:
+ mutex_unlock(&data->mutex);
+ reinit_completion(&data->completion);
+
+ return ret;
+}
+
+static int sx9310_read_samp_freq(struct sx9310_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ unsigned int regval;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &regval);
+ mutex_unlock(&data->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX9310_SCAN_PERIOD_MASK) >> SX9310_SCAN_PERIOD_SHIFT;
+ *val = sx9310_samp_freq_table[regval].val;
+ *val2 = sx9310_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx9310_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ ret = sx9310_read_proximity(data, chan, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9310_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx9310_set_samp_freq(struct sx9310_data *data,
+ int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
+ if (val == sx9310_samp_freq_table[i].val &&
+ val2 == sx9310_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx9310_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL0,
+ SX9310_SCAN_PERIOD_MASK,
+ i << SX9310_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx9310_set_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t sx9310_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX9310_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static void sx9310_push_events(struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int val, chan;
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, SX9310_REG_STAT0, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ return;
+ }
+
+ for (chan = 0; chan < SX9310_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!data->event_enabled[chan])
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir);
+ iio_push_event(indio_dev, ev, iio_get_time_ns());
+ data->prox_stat[chan] = new_prox;
+ }
+}
+
+static irqreturn_t sx9310_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (val & (SX9310_CLOSE_IRQ | SX9310_FAR_IRQ))
+ sx9310_push_events(indio_dev);
+
+ if (val & SX9310_CONVDONE_IRQ)
+ complete(&data->completion);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ return data->event_enabled[chan->channel & SX9310_CHAN_MASK];
+}
+
+static int sx9310_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret, sx_channel = chan->channel & SX9310_CHAN_MASK;
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ if (state == 1) {
+ ret = sx9310_inc_chan_users(data, sx_channel);
+ if (ret < 0)
+ goto out_unlock;
+ ret = sx9310_inc_close_far_users(data);
+ if (ret < 0)
+ goto out_undo_chan;
+ } else {
+ ret = sx9310_dec_chan_users(data, sx_channel);
+ if (ret < 0)
+ goto out_unlock;
+ ret = sx9310_dec_close_far_users(data);
+ if (ret < 0)
+ goto out_undo_chan;
+ }
+
+ data->event_enabled[sx_channel] = state;
+ goto out_unlock;
+
+out_undo_chan:
+ if (state == 1)
+ sx9310_dec_chan_users(data, sx_channel);
+ else
+ sx9310_inc_chan_users(data, sx_channel);
+out_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int sx9310_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ kfree(data->buffer);
+ data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ mutex_unlock(&data->mutex);
+
+ if (data->buffer == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static ssize_t sx9310_uid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
+ if (adev == NULL)
+ return sprintf(buf, "%d\n", indio_dev->id);
+ else
+ return sprintf(buf, "%s\n", acpi_device_uid(adev));
+}
+
+static IIO_DEVICE_ATTR(uid, 0444, sx9310_uid_show, NULL, 0);
+
+static struct attribute *sx9310_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_uid.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx9310_attribute_group = {
+ .attrs = sx9310_attributes,
+};
+
+static const struct iio_info sx9310_info = {
+ .attrs = &sx9310_attribute_group,
+ .read_raw = &sx9310_read_raw,
+ .write_raw = &sx9310_write_raw,
+ .read_event_config = &sx9310_read_event_config,
+ .write_event_config = &sx9310_write_event_config,
+ .update_scan_mode = &sx9310_update_scan_mode,
+};
+
+static int sx9310_set_trigger_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ ret = sx9310_inc_data_rdy_users(data);
+ else
+ ret = sx9310_dec_data_rdy_users(data);
+ if (ret < 0)
+ goto out;
+
+ data->trigger_enabled = state;
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx9310_trigger_ops = {
+ .set_trigger_state = sx9310_set_trigger_state,
+};
+
+static irqreturn_t sx9310_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int val, bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sx9310_read_prox_data(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sx9310_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ mutex_lock(&data->mutex);
+
+ for (i = 0; i < SX9310_NUM_CHANNELS; i++)
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ ret = sx9310_inc_chan_users(data, i);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ for (i = i - 1; i >= 0; i--)
+ if (test_bit(i, indio_dev->active_scan_mask))
+ sx9310_dec_chan_users(data, i);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx9310_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ iio_triggered_buffer_predisable(indio_dev);
+
+ mutex_lock(&data->mutex);
+
+ for (i = 0; i < SX9310_NUM_CHANNELS; i++)
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ ret = sx9310_dec_chan_users(data, i);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ for (i = i - 1; i >= 0; i--)
+ if (test_bit(i, indio_dev->active_scan_mask))
+ sx9310_inc_chan_users(data, i);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sx9310_buffer_setup_ops = {
+ .preenable = sx9310_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = sx9310_buffer_predisable,
+};
+
+struct sx9310_reg_config {
+ const char *register_name;
+ u8 reg;
+ u8 def;
+};
+
+#define SX9310_REG_CONFIG(_name, _reg, _def) \
+{ \
+ .register_name = SX9310_ACPI_NAME ",reg_" _name, \
+ .reg = SX9310_REG_##_reg, \
+ .def = _def \
+}
+
+static const struct sx9310_reg_config sx9310_default_regs[] = {
+ {
+ .register_name = NULL,
+ .reg = SX9310_REG_IRQ_MSK,
+ .def = 0x60,
+ },
+ {
+ .register_name = NULL,
+ .reg = SX9310_REG_IRQ_FUNC,
+ .def = 0x00,
+ },
+ /*
+ * The lower 4 bits should not be set as it enable sensors measurements.
+ * Turning the detection on before the configuration values are set to
+ * good values can cause the device to return erroneous readings.
+ */
+ SX9310_REG_CONFIG("prox_ctrl0", PROX_CTRL0, 0x10),
+ SX9310_REG_CONFIG("prox_ctrl1", PROX_CTRL1, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl2", PROX_CTRL2, 0x84),
+ SX9310_REG_CONFIG("prox_ctrl3", PROX_CTRL3, 0x0F),
+ SX9310_REG_CONFIG("prox_ctrl4", PROX_CTRL4, 0x07),
+ SX9310_REG_CONFIG("prox_ctrl5", PROX_CTRL5, 0xC2),
+ SX9310_REG_CONFIG("prox_ctrl6", PROX_CTRL6, 0x20),
+ SX9310_REG_CONFIG("prox_ctrl7", PROX_CTRL7, 0x0D),
+ SX9310_REG_CONFIG("prox_ctrl8", PROX_CTRL8, 0x8D),
+ SX9310_REG_CONFIG("prox_ctrl9", PROX_CTRL9, 0x43),
+ SX9310_REG_CONFIG("prox_ctrl10", PROX_CTRL10, 0x11),
+ SX9310_REG_CONFIG("prox_ctrl11", PROX_CTRL11, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl12", PROX_CTRL12, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl13", PROX_CTRL13, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl14", PROX_CTRL14, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl15", PROX_CTRL15, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl16", PROX_CTRL16, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl17", PROX_CTRL17, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl18", PROX_CTRL18, 0x00),
+ SX9310_REG_CONFIG("prox_ctrl19", PROX_CTRL19, 0x00),
+ SX9310_REG_CONFIG("sar_ctrl0", SAR_CTRL0, 0x50),
+ SX9310_REG_CONFIG("sar_ctrl1", SAR_CTRL1, 0x8A),
+ SX9310_REG_CONFIG("sar_ctrl2", SAR_CTRL2, 0x3C),
+};
+
+static int sx9310_read_register_property(struct acpi_device *adev,
+ const struct sx9310_reg_config *cfg,
+ u8 *value)
+{
+ /* FIXME: only ACPI supported. */
+ const union acpi_object *acpi_value = NULL;
+ int ret;
+
+ if ((adev == NULL) || (cfg->register_name == NULL)) {
+ *value = cfg->def;
+ return 0;
+ }
+
+ ret = acpi_dev_get_property(adev, cfg->register_name,
+ ACPI_TYPE_INTEGER, &acpi_value);
+ switch (ret) {
+ case -EPROTO:
+ dev_err(&adev->dev, "ACPI property %s typed incorrectly\n",
+ cfg->register_name);
+ break;
+ case -EINVAL:
+ dev_dbg(&adev->dev, "property %s missing from ACPI\n",
+ cfg->register_name);
+ break;
+ }
+
+ *value = acpi_value ? (u8)acpi_value->integer.value : cfg->def;
+ return 0;
+}
+
+static int sx9310_load_config(struct device *dev, struct regmap *regmap)
+{
+ u8 val;
+ int i, ret;
+ const struct sx9310_reg_config *cfg;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev == NULL)
+ dev_warn(dev, "ACPI configuration missing\n");
+
+ for (i = 0; i < ARRAY_SIZE(sx9310_default_regs); ++i) {
+ cfg = &sx9310_default_regs[i];
+ ret = sx9310_read_register_property(adev, cfg, &val);
+ if (ret < 0)
+ return ret;
+ ret = regmap_write(regmap, cfg->reg, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Activate all channels and perform an initial compensation. */
+static int sx9310_init_compensation(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int i, ret;
+ unsigned int val;
+ unsigned int ctrl0;
+
+ ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0, &ctrl0);
+ if (ret < 0)
+ return ret;
+
+ /* run the compensation phase on all channels */
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0 | 0xF);
+ if (ret < 0)
+ return ret;
+
+ for (i = 100; i >= 0; i--) {
+ usleep_range(10000, 20000);
+ ret = regmap_read(data->regmap, SX9310_REG_STAT1, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & SX9310_COMPSTAT_MASK))
+ break;
+ }
+
+ if (i < 0) {
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x", val);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
+ return ret;
+}
+
+static int sx9310_init_device(struct iio_dev *indio_dev)
+{
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_write(data->regmap, SX9310_REG_IRQ_MSK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, SX9310_REG_RESET,
+ SX9310_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 2000); /* power-up time is ~1ms. */
+
+ ret = regmap_write(data->regmap, SX9310_REG_RESET, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, SX9310_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = sx9310_load_config(&indio_dev->dev, data->regmap);
+ if (ret < 0)
+ return ret;
+
+ return sx9310_init_compensation(indio_dev);
+}
+
+static int sx9310_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct sx9310_data *data;
+ unsigned int whoami;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+ data->trigger_enabled = false;
+
+ data->regmap = devm_regmap_init_i2c(client, &sx9310_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ ret = regmap_read(data->regmap, SX9310_REG_WHOAMI, &whoami);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "error in reading WHOAMI register: %d", ret);
+ return -ENODEV;
+ }
+ if (whoami != SX9310_WHOAMI_VALUE) {
+ dev_err(&client->dev, "unexpected WHOAMI response: %u", whoami);
+ return -ENODEV;
+ }
+
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = SX9310_DRIVER_NAME;
+ indio_dev->channels = sx9310_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx9310_channels);
+ indio_dev->info = &sx9310_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = sx9310_init_device(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ if (client->irq <= 0)
+ dev_warn(&client->dev, "no valid irq found\n");
+ else {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx9310_irq_handler, sx9310_irq_thread_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ SX9310_IRQ_NAME, indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx9310_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = iio_trigger_register(data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL, sx9310_trigger_handler,
+ &sx9310_buffer_setup_ops);
+ if (ret < 0)
+ goto out_trigger_unregister;
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret < 0)
+ goto out_trigger_unregister;
+
+ return 0;
+
+out_trigger_unregister:
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int sx9310_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct sx9310_data *data = iio_priv(indio_dev);
+
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+ kfree(data->buffer);
+ return 0;
+}
+
+static int __maybe_unused sx9310_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ disable_irq_nosync(data->client->irq);
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused sx9310_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx9310_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ mutex_unlock(&data->mutex);
+
+ enable_irq(data->client->irq);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sx9310_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sx9310_suspend, sx9310_resume)
+};
+
+static const struct acpi_device_id sx9310_acpi_match[] = {
+ {SX9310_ACPI_NAME, 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sx9310_acpi_match);
+
+static const struct of_device_id sx9310_of_match[] = {
+ { .compatible = "semtech," SX9310_DRIVER_NAME, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sx9310_of_match);
+
+static const struct i2c_device_id sx9310_id[] = {
+ {SX9310_DRIVER_NAME, 0},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sx9310_id);
+
+static struct i2c_driver sx9310_driver = {
+ .driver = {
+ .name = SX9310_DRIVER_NAME,
+ .acpi_match_table = ACPI_PTR(sx9310_acpi_match),
+ .of_match_table = of_match_ptr(sx9310_of_match),
+ .pm = &sx9310_pm_ops,
+ },
+ .probe = sx9310_probe,
+ .remove = sx9310_remove,
+ .id_table = sx9310_id,
+};
+module_i2c_driver(sx9310_driver);
+
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX9310 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/proximity/sx932x.c b/drivers/iio/proximity/sx932x.c
new file mode 100644
index 00000000000000..094167fa481543
--- /dev/null
+++ b/drivers/iio/proximity/sx932x.c
@@ -0,0 +1,1470 @@
+/*
+ * Copyright 2018 Google LLC.
+ *
+ * Driver for Semtech's SX9320 capacitive proximity/button solution.
+ * Datasheet available at
+ * <http://www.semtech.com/images/datasheet/sx9320.pdf>.
+ * Based on SX9500 driver and Semtech driver using the input framework
+ * <https://my.syncplicity.com/share/teouwsim8niiaud/
+ * linux-driver-SX9320_NoSmartHSensing>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regmap.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+
+#define SX932X_DRIVER_NAME "sx932x"
+#define SX9320_ACPI_NAME "STH9320"
+#define SX9321_ACPI_NAME "STH9321"
+#define SX932X_DEV_NB 2
+
+#define SX932X_IRQ_NAME "sx932x_event"
+
+#define SX932X_GPIO_INT "interrupt"
+
+/* Register definitions. */
+#define SX932X_REG_IRQ_SRC 0x00
+#define SX932X_REG_STAT0 0x01
+#define SX932X_REG_STAT1 0x02
+#define SX932X_REG_STAT2 0x03
+#define SX932X_REG_STAT3 0x04
+#define SX932X_REG_IRQ_MSK 0x05
+#define SX932X_REG_IRQ_CFG0 0x06
+#define SX932X_REG_IRQ_CFG2 0x07
+
+#define SX932X_REG_GNRL_CTRL0 0x10
+#define SX932X_REG_GNRL_CTRL1 0x11
+
+#define SX932X_REG_I2C_ADDR 0x14
+#define SX932X_REG_CLK_SPRD 0x15
+
+#define SX932X_REG_AFE_CTRL0 0x20
+#define SX932X_REG_AFE_CTRL1 0x21
+#define SX932X_REG_AFE_CTRL2 0x22
+#define SX932X_REG_AFE_CTRL3 0x23
+#define SX932X_REG_AFE_CTRL4 0x24
+#define SX932X_REG_AFE_CTRL5 0x25
+#define SX932X_REG_AFE_CTRL6 0x26
+#define SX932X_REG_AFE_CTRL7 0x27
+#define SX932X_REG_AFE_PH0 0x28
+#define SX932X_REG_AFE_PH1 0x29
+#define SX932X_REG_AFE_PH2 0x2a
+#define SX932X_REG_AFE_PH3 0x2b
+#define SX932X_REG_AFE_CTRL8 0x2c
+#define SX932X_REG_AFE_CTRL9 0x2d
+
+#define SX932X_REG_PROX_CTRL0 0x30
+#define SX932X_REG_PROX_CTRL1 0x31
+#define SX932X_REG_PROX_CTRL2 0x32
+#define SX932X_REG_PROX_CTRL3 0x33
+#define SX932X_REG_PROX_CTRL4 0x34
+#define SX932X_REG_PROX_CTRL5 0x35
+#define SX932X_REG_PROX_CTRL6 0x36
+#define SX932X_REG_PROX_CTRL7 0x37
+
+#define SX932X_REG_ADV_CTRL0 0x40
+#define SX932X_REG_ADV_CTRL1 0x41
+#define SX932X_REG_ADV_CTRL2 0x42
+#define SX932X_REG_ADV_CTRL3 0x43
+#define SX932X_REG_ADV_CTRL4 0x44
+#define SX932X_REG_ADV_CTRL5 0x45
+#define SX932X_REG_ADV_CTRL6 0x46
+#define SX932X_REG_ADV_CTRL7 0x47
+#define SX932X_REG_ADV_CTRL8 0x48
+#define SX932X_REG_ADV_CTRL9 0x49
+#define SX932X_REG_ADV_CTRL10 0x4a
+#define SX932X_REG_ADV_CTRL11 0x4b
+#define SX932X_REG_ADV_CTRL12 0x4c
+#define SX932X_REG_ADV_CTRL13 0x4d
+#define SX932X_REG_ADV_CTRL14 0x4e
+#define SX932X_REG_ADV_CTRL15 0x4f
+#define SX932X_REG_ADV_CTRL16 0x50
+#define SX932X_REG_ADV_CTRL17 0x51
+#define SX932X_REG_ADV_CTRL18 0x52
+#define SX932X_REG_ADV_CTRL19 0x53
+#define SX932X_REG_ADV_CTRL20 0x54
+
+#define SX932X_REG_PHASE_SEL 0x60
+
+#define SX932X_REG_USE_MSB 0x61
+#define SX932X_REG_USE_LSB 0x62
+
+#define SX932X_REG_AVG_MSB 0x63
+#define SX932X_REG_AVG_LSB 0x64
+
+#define SX932X_REG_DIFF_MSB 0x65
+#define SX932X_REG_DIFF_LSB 0x66
+
+#define SX932X_REG_OFFSET_MSB 0x67
+#define SX932X_REG_OFFSET_LSB 0x68
+
+#define SX932X_REG_SAR_MSB 0x69
+#define SX932X_REG_SAR_LSB 0x6a
+
+#define SX932X_REG_RESET 0x9f
+/* Write this to REG_RESET to do a soft reset. */
+#define SX932X_SOFT_RESET 0xde
+
+#define SX932X_REG_WHOAMI 0xfa
+#define SX9320_WHOAMI 0x20
+#define SX9321_WHOAMI 0x21
+#define SX932X_REG_REVISION 0xfe
+
+
+/* Sensor Readback */
+
+/*
+ * These serve for identifying IRQ source in the IRQ_SRC register, and
+ * also for masking the IRQs in the IRQ_MSK register.
+ */
+#define SX932X_RESET_IRQ BIT(7)
+#define SX932X_CLOSE_IRQ BIT(6)
+#define SX932X_FAR_IRQ BIT(5)
+#define SX932X_COMPDONE_IRQ BIT(4)
+#define SX932X_CONVDONE_IRQ BIT(3)
+
+#define SX932X_SCAN_PERIOD_MASK GENMASK(7, 4)
+#define SX932X_SCAN_PERIOD_SHIFT 4
+
+#define SX932X_COMPSTAT_MASK GENMASK(3, 0)
+
+/* 4 channels, as defined in STAT0: PH0, PH1, PH2 and PH3. */
+#define SX932X_NUM_CHANNELS 4
+#define SX932X_CHAN_MASK GENMASK(1, 0)
+
+struct sx932x_reg_config {
+ const char *register_name;
+ u8 reg;
+ u8 def;
+};
+
+struct sx932x_data {
+ struct mutex mutex;
+ struct i2c_client *client;
+ const struct sx932x_reg_config *reg_config;
+ struct iio_trigger *trig;
+ struct regmap *regmap;
+ /*
+ * Last reading of the proximity status for each channel.
+ * We only send an event to user space when this changes.
+ */
+ bool prox_stat[SX932X_NUM_CHANNELS];
+ bool event_enabled[SX932X_NUM_CHANNELS];
+ bool trigger_enabled;
+ u16 *buffer;
+ /* Remember which phases are enabled during a suspend. */
+ unsigned int suspend_reg_gnrl_ctrl1;
+ struct completion completion;
+ int data_rdy_users, close_far_users;
+ int channel_users[SX932X_NUM_CHANNELS];
+};
+
+static const struct iio_event_spec sx932x_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define SX932X_RAW_CHANNEL(idx, name, addr) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .address = addr, \
+ .event_spec = sx932x_events, \
+ .num_event_specs = ARRAY_SIZE(sx932x_events), \
+ .extend_name = name, \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ }, \
+ }
+
+#define SX932X_PROCESSED_CHANNEL(idx, name, addr) \
+ { \
+ .type = IIO_PROXIMITY, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .channel = idx, \
+ .address = addr, \
+ .event_spec = sx932x_events, \
+ .num_event_specs = ARRAY_SIZE(sx932x_events), \
+ .extend_name = name, \
+ .scan_index = idx, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .shift = 0, \
+ }, \
+ }
+
+static const struct iio_chan_spec sx932x_channels[] = {
+ SX932X_RAW_CHANNEL(0, "UC0", SX932X_REG_USE_MSB),
+ SX932X_RAW_CHANNEL(1, "UC1", SX932X_REG_USE_MSB),
+ SX932X_RAW_CHANNEL(2, "UC2", SX932X_REG_USE_MSB),
+ SX932X_RAW_CHANNEL(3, "UC3", SX932X_REG_USE_MSB),
+
+ SX932X_RAW_CHANNEL(4, "AC0", SX932X_REG_AVG_MSB),
+ SX932X_RAW_CHANNEL(5, "AC1", SX932X_REG_AVG_MSB),
+ SX932X_RAW_CHANNEL(6, "AC2", SX932X_REG_AVG_MSB),
+ SX932X_RAW_CHANNEL(7, "AC3", SX932X_REG_AVG_MSB),
+
+ SX932X_RAW_CHANNEL(8, "DC0", SX932X_REG_DIFF_MSB),
+ SX932X_RAW_CHANNEL(9, "DC1", SX932X_REG_DIFF_MSB),
+ SX932X_RAW_CHANNEL(10, "DC2", SX932X_REG_DIFF_MSB),
+ SX932X_RAW_CHANNEL(11, "DC3", SX932X_REG_DIFF_MSB),
+
+ SX932X_RAW_CHANNEL(12, "CO0", SX932X_REG_OFFSET_MSB),
+ SX932X_RAW_CHANNEL(13, "CO1", SX932X_REG_OFFSET_MSB),
+ SX932X_RAW_CHANNEL(14, "CO2", SX932X_REG_OFFSET_MSB),
+ SX932X_RAW_CHANNEL(15, "CO3", SX932X_REG_OFFSET_MSB),
+
+ IIO_CHAN_SOFT_TIMESTAMP(16),
+};
+
+static const struct {
+ int val;
+ int val2;
+} sx932x_samp_freq_table[] = {
+ {500, 0}, /* 0000: Min (no idle time) */
+ {66, 666666}, /* 0001: 15 ms */
+ {33, 333333}, /* 0010: 30 ms (Typ.) */
+ {22, 222222}, /* 0011: 45 ms */
+ {16, 666666}, /* 0100: 60 ms */
+ {11, 111111}, /* 0101: 90 ms */
+ {8, 333333}, /* 0110: 120 ms */
+ {5, 0}, /* 0111: 200 ms */
+ {2, 500000}, /* 1000: 400 ms */
+ {1, 666666}, /* 1001: 600 ms */
+ {1, 250000}, /* 1010: 800 ms */
+ {1, 0}, /* 1011: 1 s */
+ {0, 500000}, /* 1100: 2 s */
+ {8, 333333}, /* 1101: 3 s */
+ {0, 250000}, /* 1110: 4 s */
+ {0, 200000}, /* 1111: 5 s */
+};
+static const unsigned int sx932x_scan_period_table[] = {
+ 2, 15, 30, 45, 60, 90, 120, 200, 400, 800, 1000, 2000, 3000, 4000, 5000,
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(
+ "500.0 66.666666 33.333333 22.222222 16.666666 "
+ "11.111111 8.333333 5.0 2.500000 1.666666 1.250000 "
+ "1.0 0.500000 8.333333 0.250000 0.200000");
+
+
+static const struct regmap_range sx932x_writable_reg_ranges[] = {
+ regmap_reg_range(SX932X_REG_IRQ_MSK, SX932X_REG_IRQ_CFG2),
+ regmap_reg_range(SX932X_REG_GNRL_CTRL0, SX932X_REG_GNRL_CTRL1),
+ /* Leave i2c and clock spreading as unavailable */
+ regmap_reg_range(SX932X_REG_AFE_CTRL0, SX932X_REG_AFE_CTRL9),
+ regmap_reg_range(SX932X_REG_PROX_CTRL0, SX932X_REG_PROX_CTRL7),
+ regmap_reg_range(SX932X_REG_ADV_CTRL0, SX932X_REG_ADV_CTRL20),
+ regmap_reg_range(SX932X_REG_PHASE_SEL, SX932X_REG_PHASE_SEL),
+ regmap_reg_range(SX932X_REG_OFFSET_MSB, SX932X_REG_OFFSET_LSB),
+ regmap_reg_range(SX932X_REG_RESET, SX932X_REG_RESET),
+};
+
+static const struct regmap_access_table sx932x_writeable_regs = {
+ .yes_ranges = sx932x_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx932x_writable_reg_ranges),
+};
+
+/*
+ * All allocated registers are readable, so we just list unallocated
+ * ones.
+ */
+static const struct regmap_range sx932x_non_readable_reg_ranges[] = {
+ regmap_reg_range(SX932X_REG_IRQ_CFG2 + 1, SX932X_REG_GNRL_CTRL0 - 1),
+ regmap_reg_range(SX932X_REG_GNRL_CTRL1 + 1, SX932X_REG_AFE_CTRL0 - 1),
+ regmap_reg_range(SX932X_REG_AFE_CTRL9 + 1, SX932X_REG_PROX_CTRL0 - 1),
+ regmap_reg_range(SX932X_REG_PROX_CTRL7 + 1, SX932X_REG_ADV_CTRL0 - 1),
+ regmap_reg_range(SX932X_REG_ADV_CTRL20 + 1, SX932X_REG_PHASE_SEL - 1),
+ regmap_reg_range(SX932X_REG_SAR_LSB + 1, SX932X_REG_RESET - 1),
+ regmap_reg_range(SX932X_REG_RESET + 1, SX932X_REG_WHOAMI - 1),
+ regmap_reg_range(SX932X_REG_WHOAMI + 1, SX932X_REG_REVISION - 1),
+};
+
+static const struct regmap_access_table sx932x_readable_regs = {
+ .no_ranges = sx932x_non_readable_reg_ranges,
+ .n_no_ranges = ARRAY_SIZE(sx932x_non_readable_reg_ranges),
+};
+
+static const struct regmap_range sx932x_volatile_reg_ranges[] = {
+ regmap_reg_range(SX932X_REG_IRQ_SRC, SX932X_REG_STAT3),
+ regmap_reg_range(SX932X_REG_USE_MSB, SX932X_REG_DIFF_LSB),
+ regmap_reg_range(SX932X_REG_SAR_MSB, SX932X_REG_SAR_LSB),
+ regmap_reg_range(SX932X_REG_WHOAMI, SX932X_REG_WHOAMI),
+ regmap_reg_range(SX932X_REG_REVISION, SX932X_REG_REVISION),
+};
+
+static const struct regmap_access_table sx932x_volatile_regs = {
+ .yes_ranges = sx932x_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(sx932x_volatile_reg_ranges),
+};
+
+static const struct regmap_config sx932x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = SX932X_REG_REVISION,
+ .cache_type = REGCACHE_RBTREE,
+
+ .wr_table = &sx932x_writeable_regs,
+ .rd_table = &sx932x_readable_regs,
+ .volatile_table = &sx932x_volatile_regs,
+};
+
+static int sx932x_inc_users(struct sx932x_data *data, int *counter,
+ unsigned int reg, unsigned int bitmask)
+{
+ ++(*counter);
+ if (*counter != 1)
+ /* Bit is already active, nothing to do. */
+ return 0;
+
+ return regmap_update_bits(data->regmap, reg, bitmask, bitmask);
+}
+
+static int sx932x_dec_users(struct sx932x_data *data, int *counter,
+ unsigned int reg, unsigned int bitmask)
+{
+ --(*counter);
+ if (*counter != 0)
+ /* There are more users, do not deactivate. */
+ return 0;
+
+ return regmap_update_bits(data->regmap, reg, bitmask, 0);
+}
+
+static int sx932x_inc_chan_users(struct sx932x_data *data, int chan)
+{
+ return sx932x_inc_users(data, &data->channel_users[chan],
+ SX932X_REG_PROX_CTRL0, BIT(chan));
+}
+
+static int sx932x_dec_chan_users(struct sx932x_data *data, int chan)
+{
+ return sx932x_dec_users(data, &data->channel_users[chan],
+ SX932X_REG_PROX_CTRL0, BIT(chan));
+}
+
+static int sx932x_inc_data_rdy_users(struct sx932x_data *data)
+{
+ return sx932x_inc_users(data, &data->data_rdy_users,
+ SX932X_REG_IRQ_MSK, SX932X_CONVDONE_IRQ);
+}
+
+static int sx932x_dec_data_rdy_users(struct sx932x_data *data)
+{
+ return sx932x_dec_users(data, &data->data_rdy_users,
+ SX932X_REG_IRQ_MSK, SX932X_CONVDONE_IRQ);
+}
+
+static int sx932x_inc_close_far_users(struct sx932x_data *data)
+{
+ return sx932x_inc_users(data, &data->close_far_users,
+ SX932X_REG_IRQ_MSK,
+ SX932X_CLOSE_IRQ | SX932X_FAR_IRQ);
+}
+
+static int sx932x_dec_close_far_users(struct sx932x_data *data)
+{
+ return sx932x_dec_users(data, &data->close_far_users,
+ SX932X_REG_IRQ_MSK,
+ SX932X_CLOSE_IRQ | SX932X_FAR_IRQ);
+}
+
+static int sx932x_read_prox_data(struct sx932x_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+ __be16 regval;
+ const int regid = chan->channel & 3;
+
+ ret = regmap_write(data->regmap, SX932X_REG_PHASE_SEL, regid);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, chan->address, &regval, 2);
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(be16_to_cpu(regval), 15);
+
+ return 0;
+}
+
+static int sx932x_read_stat_data(struct sx932x_data *data,
+ const struct iio_chan_spec
+ *chan,
+ int *val)
+{
+ int ret;
+
+ ret = regmap_read(data->regmap, chan->address, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * If we have no interrupt support, we have to wait for a scan period
+ * after enabling a channel to get a result.
+ */
+static int sx932x_wait_for_sample(struct sx932x_data *data)
+{
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX932X_REG_PROX_CTRL0, &val);
+ if (ret < 0)
+ return ret;
+
+ val = (val & SX932X_SCAN_PERIOD_MASK) >> SX932X_SCAN_PERIOD_SHIFT;
+
+ msleep(sx932x_scan_period_table[val]);
+
+ return 0;
+}
+
+static int sx932x_read_stat(struct sx932x_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx932x_inc_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = sx932x_inc_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0)
+ ret = wait_for_completion_interruptible(&data->completion);
+ else
+ ret = sx932x_wait_for_sample(data);
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx932x_read_stat_data(data, chan, val);
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx932x_dec_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ ret = sx932x_dec_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = IIO_VAL_INT;
+
+ goto out;
+
+out_dec_data_rdy:
+ sx932x_dec_data_rdy_users(data);
+out_dec_chan:
+ sx932x_dec_chan_users(data, chan->channel);
+out:
+ mutex_unlock(&data->mutex);
+ reinit_completion(&data->completion);
+
+ return ret;
+}
+
+static int sx932x_read_proximity(struct sx932x_data *data,
+ const struct iio_chan_spec *chan,
+ int *val)
+{
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ ret = sx932x_inc_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = sx932x_inc_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ mutex_unlock(&data->mutex);
+
+ if (data->client->irq > 0)
+ ret = wait_for_completion_interruptible(&data->completion);
+ else
+ ret = sx932x_wait_for_sample(data);
+
+ mutex_lock(&data->mutex);
+
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx932x_read_prox_data(data, chan, val);
+ if (ret < 0)
+ goto out_dec_data_rdy;
+
+ ret = sx932x_dec_data_rdy_users(data);
+ if (ret < 0)
+ goto out_dec_chan;
+
+ ret = sx932x_dec_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out;
+
+ ret = IIO_VAL_INT;
+
+ goto out;
+
+out_dec_data_rdy:
+ sx932x_dec_data_rdy_users(data);
+out_dec_chan:
+ sx932x_dec_chan_users(data, chan->channel);
+out:
+ mutex_unlock(&data->mutex);
+ reinit_completion(&data->completion);
+
+ return ret;
+}
+
+static int sx932x_read_samp_freq(struct sx932x_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ unsigned int regval;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX932X_REG_PROX_CTRL0, &regval);
+ mutex_unlock(&data->mutex);
+
+ if (ret < 0)
+ return ret;
+
+ regval = (regval & SX932X_SCAN_PERIOD_MASK) >> SX932X_SCAN_PERIOD_SHIFT;
+ *val = sx932x_samp_freq_table[regval].val;
+ *val2 = sx932x_samp_freq_table[regval].val2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int sx932x_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ ret = sx932x_read_proximity(data, chan, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_PROCESSED:
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ ret = sx932x_read_stat(data, chan, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx932x_read_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int sx932x_set_samp_freq(struct sx932x_data *data,
+ int val, int val2)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(sx932x_samp_freq_table); i++)
+ if (val == sx932x_samp_freq_table[i].val &&
+ val2 == sx932x_samp_freq_table[i].val2)
+ break;
+
+ if (i == ARRAY_SIZE(sx932x_samp_freq_table))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_update_bits(data->regmap, SX932X_REG_PROX_CTRL0,
+ SX932X_SCAN_PERIOD_MASK,
+ i << SX932X_SCAN_PERIOD_SHIFT);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx932x_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long mask)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return sx932x_set_samp_freq(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t sx932x_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ if (data->trigger_enabled)
+ iio_trigger_poll(data->trig);
+
+ /*
+ * Even if no event is enabled, we need to wake the thread to
+ * clear the interrupt state by reading SX932X_REG_IRQ_SRC. It
+ * is not possible to do that here because regmap_read takes a
+ * mutex.
+ */
+ return IRQ_WAKE_THREAD;
+}
+
+static void sx932x_push_events(struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int val, chan;
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ ret = regmap_read(data->regmap, SX932X_REG_STAT0, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ return;
+ }
+
+ for (chan = 0; chan < SX932X_NUM_CHANNELS; chan++) {
+ int dir;
+ u64 ev;
+ bool new_prox = val & BIT(chan);
+
+ if (!data->event_enabled[chan])
+ continue;
+ if (new_prox == data->prox_stat[chan])
+ /* No change on this channel. */
+ continue;
+
+ dir = new_prox ? IIO_EV_DIR_FALLING : IIO_EV_DIR_RISING;
+ ev = IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, chan,
+ IIO_EV_TYPE_THRESH, dir);
+ iio_push_event(indio_dev, ev, iio_get_time_ns());
+ data->prox_stat[chan] = new_prox;
+ }
+}
+
+static irqreturn_t sx932x_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ mutex_lock(&data->mutex);
+
+ ret = regmap_read(data->regmap, SX932X_REG_IRQ_SRC, &val);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "i2c transfer error in irq\n");
+ goto out;
+ }
+
+ if (val & (SX932X_CLOSE_IRQ | SX932X_FAR_IRQ))
+ sx932x_push_events(indio_dev);
+
+ if (val & SX932X_CONVDONE_IRQ)
+ complete(&data->completion);
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return IRQ_HANDLED;
+}
+
+static int sx932x_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ return data->event_enabled[chan->channel & SX932X_CHAN_MASK];
+}
+
+static int sx932x_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_PROXIMITY || type != IIO_EV_TYPE_THRESH ||
+ dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+
+ if (state == 1) {
+ ret = sx932x_inc_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ ret = sx932x_inc_close_far_users(data);
+ if (ret < 0)
+ goto out_undo_chan;
+ } else {
+ ret = sx932x_dec_chan_users(data, chan->channel);
+ if (ret < 0)
+ goto out_unlock;
+ ret = sx932x_dec_close_far_users(data);
+ if (ret < 0)
+ goto out_undo_chan;
+ }
+
+ data->event_enabled[chan->channel & SX932X_CHAN_MASK] = state;
+ goto out_unlock;
+
+out_undo_chan:
+ if (state == 1)
+ sx932x_dec_chan_users(data, chan->channel);
+ else
+ sx932x_inc_chan_users(data, chan->channel);
+out_unlock:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int sx932x_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ kfree(data->buffer);
+ data->buffer = kzalloc(indio_dev->scan_bytes, GFP_KERNEL);
+ mutex_unlock(&data->mutex);
+
+ if (data->buffer == NULL)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static ssize_t sx932x_uid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+
+ if (adev == NULL)
+ return sprintf(buf, "%d\n", indio_dev->id);
+ else
+ return sprintf(buf, "%s\n", acpi_device_uid(adev));
+}
+
+static IIO_DEVICE_ATTR(uid, 0444, sx932x_uid_show, NULL, 0);
+
+struct sx932x_ctrl_attribute {
+ struct device_attribute dev_attr;
+ u8 ctrl;
+};
+
+static inline struct sx932x_ctrl_attribute *sx932x_to_ctrl_attr(
+ struct device_attribute *attr)
+{
+ return container_of(attr, struct sx932x_ctrl_attribute, dev_attr);
+}
+
+static ssize_t sx932x_show_ctrl_attr(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sx932x_ctrl_attribute *attr = sx932x_to_ctrl_attr(dev_attr);
+ struct sx932x_data *data = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, attr->ctrl, &val);
+ mutex_unlock(&data->mutex);
+ if (ret)
+ return ret;
+
+ return scnprintf(buf, PAGE_SIZE, "%02x\n", val);
+}
+
+static ssize_t sx932x_store_ctrl_attr(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct sx932x_ctrl_attribute *attr = sx932x_to_ctrl_attr(dev_attr);
+ struct sx932x_data *data = iio_priv(indio_dev);
+ u8 val;
+ int ret;
+
+ if (kstrtou8(buf, 0, &val))
+ return -EINVAL;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, attr->ctrl, val);
+ mutex_unlock(&data->mutex);
+ if (ret)
+ return ret;
+
+ return len;
+}
+
+#define SX932X_CTRL_ATTR_FILL(_name, _mode, _show, _store, _ctrl, _offset) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .ctrl = _ctrl + _offset }
+
+#define SX932X_CTRL_ATTR(_name, _mode, _show, _store, _ctrl, _offset) \
+ struct sx932x_ctrl_attribute sx932x_ctrl_attr_##_name = \
+ SX932X_CTRL_ATTR_FILL( \
+ _name, _mode, _show, _store, _ctrl, _offset)
+
+#define SX932X_ADV_CTRL_RW(_ctrl) \
+ SX932X_CTRL_ATTR(adv_ctrl##_ctrl##_raw, 0644, \
+ sx932x_show_ctrl_attr, sx932x_store_ctrl_attr, \
+ _ctrl, SX932X_REG_ADV_CTRL0)
+
+static SX932X_ADV_CTRL_RW(0);
+static SX932X_ADV_CTRL_RW(1);
+static SX932X_ADV_CTRL_RW(2);
+static SX932X_ADV_CTRL_RW(3);
+static SX932X_ADV_CTRL_RW(4);
+static SX932X_ADV_CTRL_RW(5);
+static SX932X_ADV_CTRL_RW(6);
+static SX932X_ADV_CTRL_RW(7);
+static SX932X_ADV_CTRL_RW(8);
+static SX932X_ADV_CTRL_RW(9);
+static SX932X_ADV_CTRL_RW(10);
+static SX932X_ADV_CTRL_RW(11);
+static SX932X_ADV_CTRL_RW(12);
+static SX932X_ADV_CTRL_RW(13);
+static SX932X_ADV_CTRL_RW(14);
+static SX932X_ADV_CTRL_RW(15);
+static SX932X_ADV_CTRL_RW(16);
+static SX932X_ADV_CTRL_RW(17);
+static SX932X_ADV_CTRL_RW(18);
+static SX932X_ADV_CTRL_RW(19);
+static SX932X_ADV_CTRL_RW(20);
+
+static struct attribute *sx932x_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ &iio_dev_attr_uid.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl0_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl1_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl2_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl3_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl4_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl5_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl6_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl7_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl8_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl9_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl10_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl11_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl12_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl13_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl14_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl15_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl16_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl17_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl18_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl19_raw.dev_attr.attr,
+ &sx932x_ctrl_attr_adv_ctrl20_raw.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group sx932x_attribute_group = {
+ .attrs = sx932x_attributes,
+};
+
+static const struct iio_info sx932x_info = {
+ .attrs = &sx932x_attribute_group,
+ .read_raw = &sx932x_read_raw,
+ .write_raw = &sx932x_write_raw,
+ .read_event_config = &sx932x_read_event_config,
+ .write_event_config = &sx932x_write_event_config,
+ .update_scan_mode = &sx932x_update_scan_mode,
+};
+
+static int sx932x_set_trigger_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+
+ if (state)
+ ret = sx932x_inc_data_rdy_users(data);
+ else
+ ret = sx932x_dec_data_rdy_users(data);
+ if (ret < 0)
+ goto out;
+
+ data->trigger_enabled = state;
+
+out:
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_trigger_ops sx932x_trigger_ops = {
+ .set_trigger_state = sx932x_set_trigger_state,
+};
+
+static irqreturn_t sx932x_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int val, bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = sx932x_read_prox_data(data, &indio_dev->channels[bit],
+ &val);
+ if (ret < 0)
+ goto out;
+
+ data->buffer[i++] = val;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ iio_get_time_ns());
+
+out:
+ mutex_unlock(&data->mutex);
+
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int sx932x_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ mutex_lock(&data->mutex);
+
+ for (i = 0; i < SX932X_NUM_CHANNELS; i++)
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ ret = sx932x_inc_chan_users(data, i);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ for (i = i - 1; i >= 0; i--)
+ if (test_bit(i, indio_dev->active_scan_mask))
+ sx932x_dec_chan_users(data, i);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int sx932x_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret = 0, i;
+
+ iio_triggered_buffer_predisable(indio_dev);
+
+ mutex_lock(&data->mutex);
+
+ for (i = 0; i < SX932X_NUM_CHANNELS; i++)
+ if (test_bit(i, indio_dev->active_scan_mask)) {
+ ret = sx932x_dec_chan_users(data, i);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ for (i = i - 1; i >= 0; i--)
+ if (test_bit(i, indio_dev->active_scan_mask))
+ sx932x_inc_chan_users(data, i);
+
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_buffer_setup_ops sx932x_buffer_setup_ops = {
+ .preenable = sx932x_buffer_preenable,
+ .postenable = iio_triggered_buffer_postenable,
+ .predisable = sx932x_buffer_predisable,
+};
+
+#define SX9320_REG_CONFIG(_name, _reg, _def) \
+{ \
+ .register_name = SX9320_ACPI_NAME ",reg_" _name, \
+ .reg = SX932X_REG_##_reg, \
+ .def = _def \
+}
+
+#define SX9321_REG_CONFIG(_name, _reg, _def) \
+{ \
+ .register_name = SX9321_ACPI_NAME ",reg_" _name, \
+ .reg = SX932X_REG_##_reg, \
+ .def = _def \
+}
+
+static const struct sx932x_reg_config sx9320_reg_config[] = {
+ {
+ .register_name = NULL,
+ .reg = SX932X_REG_ADV_CTRL11,
+ .def = (5 << 4) | 2,
+ },
+ {
+ .register_name = NULL,
+ .reg = SX932X_REG_ADV_CTRL12,
+ .def = (11 << 4) | 5,
+ },
+ SX9320_REG_CONFIG("adv_ctrl10", ADV_CTRL10, (5 << 4) | 12),
+ SX9320_REG_CONFIG("afe_ctrl4", AFE_CTRL4, 7),
+ SX9320_REG_CONFIG("afe_ctrl7", AFE_CTRL7, 7),
+ SX9320_REG_CONFIG("afe_ctrl9", AFE_CTRL9, 15),
+ SX9320_REG_CONFIG("prox_ctrl0", PROX_CTRL0, (2 << 3) | 2),
+ SX9320_REG_CONFIG("prox_ctrl1", PROX_CTRL1, (2 << 3) | 2),
+ SX9320_REG_CONFIG("prox_ctrl2", PROX_CTRL2, 0x80 | 16),
+ SX9320_REG_CONFIG("prox_ctrl4", PROX_CTRL4, (1 << 3) | 4),
+ SX9320_REG_CONFIG("prox_ctrl5", PROX_CTRL5, (1 << 4) | 2),
+ SX9320_REG_CONFIG("prox_ctrl6", PROX_CTRL6, 60),
+ SX9320_REG_CONFIG("prox_ctrl7", PROX_CTRL7, 88),
+ SX9320_REG_CONFIG("adv_ctrl16", ADV_CTRL16, (3 << 4) | (2 << 2)),
+ SX9320_REG_CONFIG("adv_ctrl17", ADV_CTRL17, (5 << 4) | 6),
+ SX9320_REG_CONFIG("adv_ctrl18", ADV_CTRL18, (3 << 4) | 3),
+ SX9320_REG_CONFIG("gnrl_ctrl1", GNRL_CTRL1, 0x2f),
+ {
+ .register_name = NULL,
+ .reg = 0,
+ .def = 0,
+ },
+};
+
+static const struct sx932x_reg_config sx9321_reg_config[] = {
+ {
+ .register_name = NULL,
+ .reg = SX932X_REG_ADV_CTRL11,
+ .def = (5 << 4) | 2,
+ },
+ {
+ .register_name = NULL,
+ .reg = SX932X_REG_ADV_CTRL12,
+ .def = (11 << 4) | 5,
+ },
+ SX9321_REG_CONFIG("adv_ctrl10", ADV_CTRL10, (5 << 4) | 12),
+ SX9321_REG_CONFIG("afe_ctrl4", AFE_CTRL4, 7),
+ SX9321_REG_CONFIG("afe_ctrl7", AFE_CTRL7, 7),
+ SX9321_REG_CONFIG("afe_ctrl9", AFE_CTRL9, 15),
+ SX9321_REG_CONFIG("prox_ctrl0", PROX_CTRL0, (2 << 3) | 2),
+ SX9321_REG_CONFIG("prox_ctrl1", PROX_CTRL1, (2 << 3) | 2),
+ SX9321_REG_CONFIG("prox_ctrl2", PROX_CTRL2, 0x80 | 16),
+ SX9321_REG_CONFIG("prox_ctrl4", PROX_CTRL4, (1 << 3) | 4),
+ SX9321_REG_CONFIG("prox_ctrl5", PROX_CTRL5, (1 << 4) | 2),
+ SX9321_REG_CONFIG("prox_ctrl6", PROX_CTRL6, 60),
+ SX9321_REG_CONFIG("prox_ctrl7", PROX_CTRL7, 88),
+ SX9321_REG_CONFIG("adv_ctrl16", ADV_CTRL16, (3 << 4) | (2 << 2)),
+ SX9321_REG_CONFIG("adv_ctrl17", ADV_CTRL17, (5 << 4) | 6),
+ SX9321_REG_CONFIG("adv_ctrl18", ADV_CTRL18, (3 << 4) | 3),
+ SX9321_REG_CONFIG("gnrl_ctrl1", GNRL_CTRL1, 0x2f),
+ {
+ .register_name = NULL,
+ .reg = 0,
+ .def = 0,
+ },
+};
+
+static const struct sx932x_reg_config *sx932x_reg_configs[] = {
+ sx9320_reg_config,
+ sx9321_reg_config
+};
+
+static int sx932x_read_register_property(struct acpi_device *adev,
+ const struct sx932x_reg_config *cfg,
+ u8 *value)
+{
+ /* FIXME: only ACPI supported. */
+ const union acpi_object *acpi_value = NULL;
+ int ret;
+
+ if ((adev == NULL) || (cfg->register_name == NULL)) {
+ *value = cfg->def;
+ return 0;
+ }
+
+ ret = acpi_dev_get_property(adev, cfg->register_name,
+ ACPI_TYPE_INTEGER, &acpi_value);
+ switch (ret) {
+ case -EPROTO:
+ dev_err(&adev->dev, "ACPI property %s typed incorrectly\n",
+ cfg->register_name);
+ break;
+ case -EINVAL:
+ dev_dbg(&adev->dev, "property %s missing from ACPI\n",
+ cfg->register_name);
+ break;
+ }
+
+ *value = acpi_value ? (u8)acpi_value->integer.value : cfg->def;
+ return 0;
+}
+
+static int sx932x_load_config(struct device *dev, struct sx932x_data *data)
+{
+ u8 val;
+ int i, ret;
+ const struct sx932x_reg_config *cfg;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ for (i = 0; data->reg_config[i].reg != 0; i++) {
+ cfg = &data->reg_config[i];
+ ret = sx932x_read_register_property(adev, cfg, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, cfg->reg, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Activate all channels and perform an initial compensation. */
+static int sx932x_init_compensation(struct iio_dev *indio_dev)
+{
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int i, ret;
+ bool success = false;
+ unsigned int val;
+
+ for (i = 100; i >= 0; i--) {
+ usleep_range(10000, 20000);
+ ret = regmap_read(data->regmap, SX932X_REG_STAT2, &val);
+ if (ret < 0)
+ goto out;
+ if (!(val & SX932X_COMPSTAT_MASK)) {
+ success = true;
+ break;
+ }
+ }
+
+ if (success) {
+ dev_info(&data->client->dev,
+ "initial compensation success");
+ } else {
+ dev_err(&data->client->dev,
+ "initial compensation timed out: 0x%02x", val);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ return ret;
+}
+
+static int sx932x_init_device(struct iio_dev *indio_dev)
+{
+ struct device *dev = &indio_dev->dev;
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+ unsigned int val;
+
+ ret = regmap_read(data->regmap, SX932X_REG_WHOAMI, &val);
+ if (ret < 0 || (val != SX9320_WHOAMI && val != SX9321_WHOAMI)) {
+ dev_err(&data->client->dev,
+ "Unable to identify the chip: %d - 0x%02x", ret, val);
+ return -ENODEV;
+ }
+
+ ret = regmap_write(data->regmap, SX932X_REG_IRQ_MSK, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, SX932X_REG_RESET,
+ SX932X_SOFT_RESET);
+ if (ret < 0)
+ return ret;
+
+ // wait for the reset to be completed
+ usleep_range(10000, 20000);
+
+ ret = regmap_write(data->regmap, SX932X_REG_RESET, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(data->regmap, SX932X_REG_IRQ_SRC, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = sx932x_load_config(dev, data);
+ if (ret < 0)
+ return ret;
+
+ return sx932x_init_compensation(indio_dev);
+}
+
+static int sx932x_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ const struct acpi_device_id *acpi_id;
+ struct sx932x_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ if (i2c_id) {
+ data->reg_config = sx932x_reg_configs[i2c_id->driver_data];
+ } else if (ACPI_HANDLE(&client->dev)) {
+ acpi_id = acpi_match_device(
+ client->dev.driver->acpi_match_table,
+ &client->dev);
+ if (!acpi_id) {
+ dev_err(&client->dev, "No driver data\n");
+ return -EINVAL;
+ }
+ data->reg_config = sx932x_reg_configs[acpi_id->driver_data];
+ } else {
+ return -ENODEV;
+ }
+ mutex_init(&data->mutex);
+ init_completion(&data->completion);
+ data->trigger_enabled = false;
+
+ data->regmap = devm_regmap_init_i2c(client, &sx932x_regmap_config);
+ if (IS_ERR(data->regmap))
+ return PTR_ERR(data->regmap);
+
+ ACPI_COMPANION_SET(&indio_dev->dev, ACPI_COMPANION(&client->dev));
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = SX932X_DRIVER_NAME;
+ indio_dev->channels = sx932x_channels;
+ indio_dev->num_channels = ARRAY_SIZE(sx932x_channels);
+ indio_dev->info = &sx932x_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ i2c_set_clientdata(client, indio_dev);
+
+ ret = sx932x_init_device(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "tried to init sx9320 - error %d\n",
+ ret);
+ return ret;
+ }
+
+ if (client->irq <= 0)
+ dev_warn(&client->dev, "no valid irq found\n");
+ else {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ sx932x_irq_handler, sx932x_irq_thread_handler,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ SX932X_IRQ_NAME, indio_dev);
+ if (ret < 0)
+ return ret;
+
+ data->trig = devm_iio_trigger_alloc(&client->dev,
+ "%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->dev.parent = &client->dev;
+ data->trig->ops = &sx932x_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, indio_dev);
+
+ ret = iio_trigger_register(data->trig);
+ if (ret)
+ return ret;
+ }
+
+ ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
+ NULL, sx932x_trigger_handler,
+ &sx932x_buffer_setup_ops);
+ if (ret < 0)
+ goto out_trigger_unregister;
+
+ ret = devm_iio_device_register(&client->dev, indio_dev);
+ if (ret < 0)
+ goto out_trigger_unregister;
+
+ return 0;
+
+out_trigger_unregister:
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+
+ return ret;
+}
+
+static int sx932x_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct sx932x_data *data = iio_priv(indio_dev);
+
+ if (client->irq > 0)
+ iio_trigger_unregister(data->trig);
+ kfree(data->buffer);
+ return 0;
+}
+
+static int __maybe_unused sx932x_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_read(data->regmap, SX932X_REG_GNRL_CTRL1,
+ &data->suspend_reg_gnrl_ctrl1);
+
+ if (ret < 0)
+ goto out;
+
+ // disable all phases, send the device to sleep
+ ret = regmap_write(data->regmap, SX932X_REG_GNRL_CTRL1, 0);
+
+out:
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int __maybe_unused sx932x_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct sx932x_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = regmap_write(data->regmap, SX932X_REG_GNRL_CTRL1,
+ data->suspend_reg_gnrl_ctrl1);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct dev_pm_ops sx932x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sx932x_suspend, sx932x_resume)
+};
+
+static const struct acpi_device_id sx932x_acpi_match[] = {
+ {SX9320_ACPI_NAME, 0},
+ {SX9321_ACPI_NAME, 1},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sx932x_acpi_match);
+
+static const struct of_device_id sx932x_of_match[] = {
+ { .compatible = "semtech,sx9320", },
+ { .compatible = "semtech,sx9321", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sx932x_of_match);
+
+static const struct i2c_device_id sx932x_id[] = {
+ {"sx9320", 0},
+ {"sx9321", 1},
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, sx932x_id);
+
+static struct i2c_driver sx932x_driver = {
+ .driver = {
+ .name = SX932X_DRIVER_NAME,
+ .acpi_match_table = ACPI_PTR(sx932x_acpi_match),
+ .of_match_table = of_match_ptr(sx932x_of_match),
+ .pm = &sx932x_pm_ops,
+ },
+ .probe = sx932x_probe,
+ .remove = sx932x_remove,
+ .id_table = sx932x_id,
+};
+module_i2c_driver(sx932x_driver);
+
+MODULE_AUTHOR("Gwendal Grignou <gwendal@chromium.org>");
+MODULE_DESCRIPTION("Driver for Semtech SX9320/SX9321 proximity sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index aa26f3c3416bbb..c151bb625179cf 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -33,6 +33,18 @@ config INFINIBAND_USER_ACCESS
libibverbs, libibcm and a hardware driver library from
<http://www.openfabrics.org/git/>.
+config INFINIBAND_USER_ACCESS_UCM
+ bool "Userspace CM (UCM, DEPRECATED)"
+ depends on BROKEN
+ depends on INFINIBAND_USER_ACCESS
+ help
+ The UCM module has known security flaws, which no one is
+ interested to fix. The user-space part of this code was
+ dropped from the upstream a long time ago.
+
+ This option is DEPRECATED and planned to be removed.
+
+
config INFINIBAND_USER_MEM
bool
depends on INFINIBAND_USER_ACCESS != n
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile
index d43a8994ac5c12..737612a442be12 100644
--- a/drivers/infiniband/core/Makefile
+++ b/drivers/infiniband/core/Makefile
@@ -5,8 +5,8 @@ obj-$(CONFIG_INFINIBAND) += ib_core.o ib_mad.o ib_sa.o \
ib_cm.o iw_cm.o ib_addr.o \
$(infiniband-y)
obj-$(CONFIG_INFINIBAND_USER_MAD) += ib_umad.o
-obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
- $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o $(user_access-y)
+obj-$(CONFIG_INFINIBAND_USER_ACCESS_UCM) += ib_ucm.o $(user_access-y)
ib_core-y := packer.o ud_header.o verbs.o sysfs.o \
device.o fmr_pool.o cache.o netlink.o \
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d57a78ec74251d..1454290078def0 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -544,6 +544,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid = (union ib_gid *) &addr->sib_addr;
pkey = ntohs(addr->sib_pkey);
+ mutex_lock(&lock);
list_for_each_entry(cur_dev, &dev_list, list) {
for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
if (!rdma_cap_af_ib(cur_dev->device, p))
@@ -567,18 +568,19 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
cma_dev = cur_dev;
sgid = gid;
id_priv->id.port_num = p;
+ goto found;
}
}
}
}
-
- if (!cma_dev)
- return -ENODEV;
+ mutex_unlock(&lock);
+ return -ENODEV;
found:
cma_attach_to_dev(id_priv, cma_dev);
- addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
- memcpy(&addr->sib_addr, &sgid, sizeof sgid);
+ mutex_unlock(&lock);
+ addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
+ memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
return 0;
}
@@ -1280,9 +1282,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
(addr->src_addr.ss_family == AF_IB ||
cma_protocol_roce_dev_port(id->device, port_num));
- return !addr->dev_addr.bound_dev_if ||
- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
- addr->dev_addr.bound_dev_if == net_dev->ifindex);
+ /*
+ * Net namespaces must match, and if the listner is listening
+ * on a specific netdevice than netdevice must match as well.
+ */
+ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
+ (!!addr->dev_addr.bound_dev_if ==
+ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
+ return true;
+ else
+ return false;
}
static struct rdma_id_private *cma_find_listener(
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8d84c563ba7535..616173b7a5e8a9 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1548,7 +1548,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
mad_reg_req->oui, 3)) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(!*method);
+ if (!*method)
+ goto error3;
goto check_in_use;
}
}
@@ -1558,10 +1559,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
vclass]->oui[i])) {
method = &(*vendor_table)->vendor_class[
vclass]->method_table[i];
- BUG_ON(*method);
/* Allocate method table for this OUI */
- if ((ret = allocate_method_table(method)))
- goto error3;
+ if (!*method) {
+ ret = allocate_method_table(method);
+ if (ret)
+ goto error3;
+ }
memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
mad_reg_req->oui, 3);
goto check_in_use;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 564adf3116e875..4b3a00855f520e 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -46,6 +46,8 @@
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
+
#include <asm/uaccess.h>
#include <rdma/ib.h>
@@ -1115,6 +1117,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 795938edce3fce..3e4d3d5560bf10 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -44,6 +44,8 @@
#include <linux/module.h>
#include <linux/nsproxy.h>
+#include <linux/nospec.h>
+
#include <rdma/rdma_user_cm.h>
#include <rdma/ib_marshall.h>
#include <rdma/rdma_cm.h>
@@ -123,6 +125,8 @@ static DEFINE_MUTEX(mut);
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
+static const struct file_operations ucma_fops;
+
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
@@ -217,7 +221,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
return NULL;
mutex_lock(&mut);
- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
+ mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL);
mutex_unlock(&mut);
if (mc->id < 0)
goto error;
@@ -1375,6 +1379,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
goto err3;
}
+ mutex_lock(&mut);
+ idr_replace(&multicast_idr, mc, mc->id);
+ mutex_unlock(&mut);
+
mutex_unlock(&file->mut);
ucma_put_ctx(ctx);
return 0;
@@ -1531,6 +1539,10 @@ static ssize_t ucma_migrate_id(struct ucma_file *new_file,
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
+ if (f.file->f_op != &ucma_fops) {
+ ret = -EINVAL;
+ goto file_put;
+ }
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1617,6 +1629,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
return -EINVAL;
+ hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
if (hdr.in + sizeof(hdr) > len)
return -EINVAL;
@@ -1699,6 +1712,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
mutex_lock(&mut);
if (!ctx->closing) {
mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it.
*/
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6790ebb366ddaa..8762eac47570c2 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -95,6 +95,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
DEFINE_DMA_ATTRS(attrs);
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
+ unsigned int gup_flags = FOLL_WRITE;
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -122,16 +123,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->address = addr;
umem->page_size = PAGE_SIZE;
umem->pid = get_task_pid(current, PIDTYPE_PID);
- /*
- * We ask for writable memory if any of the following
- * access flags are set. "Local write" and "remote write"
- * obviously require write access. "Remote atomic" can do
- * things like fetch and add, which will modify memory, and
- * "MW bind" can change permissions by binding a window.
- */
- umem->writable = !!(access &
- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
+ umem->writable = ib_access_writable(access);
if (access & IB_ACCESS_ON_DEMAND) {
put_pid(umem->pid);
@@ -186,6 +178,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (ret)
goto out;
+ if (!umem->writable)
+ gup_flags |= FOLL_FORCE;
+
need_release = 1;
sg_list_start = umem->sg_head.sgl;
@@ -193,7 +188,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- 1, !umem->writable, page_list, vma_list);
+ gup_flags, page_list, vma_list);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 40becdb3196e07..738ccfee7caefe 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -527,6 +527,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
u64 off;
int j, k, ret = 0, start_idx, npages = 0;
u64 base_virt_addr;
+ unsigned int flags = 0;
if (access_mask == 0)
return -EINVAL;
@@ -556,6 +557,9 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
goto out_put_task;
}
+ if (access_mask & ODP_WRITE_ALLOWED_BIT)
+ flags |= FOLL_WRITE;
+
start_idx = (user_virt - ib_umem_start(umem)) >> PAGE_SHIFT;
k = start_idx;
@@ -574,8 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/
npages = get_user_pages(owning_process, owning_mm, user_virt,
gup_num_pages,
- access_mask & ODP_WRITE_ALLOWED_BIT, 0,
- local_page_list, NULL);
+ flags, local_page_list, NULL);
up_read(&owning_mm->mmap_sem);
if (npages < 0)
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index e1629ab58db787..8218d714fa0144 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -926,7 +926,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
return -ENOMEM;
mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 53aa7515f542a5..04206c600098f4 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1183,6 +1183,12 @@ static void flush_qp(struct c4iw_qp *qhp)
t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index d862b9b7910e48..199a9cdd0d12a3 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1780,7 +1780,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
"buf:%lld\n", wc.wr_id);
break;
default:
- BUG_ON(1);
break;
}
} else {
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index ce87e9cc7effa8..bf52e35dd506a6 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -130,6 +130,40 @@ out:
return err;
}
+static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
+ u64 length, u64 virt_addr,
+ int access_flags)
+{
+ /*
+ * Force registering the memory as writable if the underlying pages
+ * are writable. This is so rereg can change the access permissions
+ * from readable to writable without having to run through ib_umem_get
+ * again
+ */
+ if (!ib_access_writable(access_flags)) {
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ /*
+ * FIXME: Ideally this would iterate over all the vmas that
+ * cover the memory, but for now it requires a single vma to
+ * entirely cover the MR to support RO mappings.
+ */
+ vma = find_vma(current->mm, start);
+ if (vma && vma->vm_end >= start + length &&
+ vma->vm_start <= start) {
+ if (vma->vm_flags & VM_WRITE)
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ } else {
+ access_flags |= IB_ACCESS_LOCAL_WRITE;
+ }
+
+ up_read(&current->mm->mmap_sem);
+ }
+
+ return ib_umem_get(context, start, length, access_flags, 0);
+}
+
struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata)
@@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (!mr)
return ERR_PTR(-ENOMEM);
- /* Force registering the memory as writable. */
- /* Used for memory re-registeration. HCA protects the access */
- mr->umem = ib_umem_get(pd->uobject->context, start, length,
- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
+ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
+ virt_addr, access_flags);
if (IS_ERR(mr->umem)) {
err = PTR_ERR(mr->umem);
goto err_free;
@@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
+ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
+ return -EPERM;
+
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
@@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
ib_umem_release(mmr->umem);
- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
- mr_access_flags |
- IB_ACCESS_LOCAL_WRITE,
- 0);
+ mmr->umem =
+ mlx4_get_umem_mr(mr->uobject->context, start, length,
+ virt_addr, mr_access_flags);
if (IS_ERR(mmr->umem)) {
err = PTR_ERR(mmr->umem);
/* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 7d2e42dd692657..8676685dbf3d46 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -472,8 +472,8 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out;
}
- ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
- pages, NULL);
+ ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1,
+ FOLL_WRITE, pages, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 748b63b86cbc60..40242ead096f42 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
struct ocrdma_stats *pstats = filp->private_data;
struct ocrdma_dev *dev = pstats->dev;
- if (count > 32)
+ if (*ppos != 0 || count == 0 || count > sizeof(tmp_str))
goto err;
if (copy_from_user(tmp_str, buffer, count))
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 7df16f74bb4585..c6c75b99cf2c46 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1451,8 +1451,7 @@ u64 qib_sps_ints(void);
/*
* dma_addr wrappers - all 0's invalid for hw
*/
-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
- size_t, int);
+int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
const char *qib_get_unit_name(int unit);
/*
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 24f4a782e0f431..5908fd3af00d73 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
goto done;
}
for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
+ dma_addr_t daddr;
+
for (; ntids--; tid++) {
if (tid == tidcnt)
tid = 0;
@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
ret = -ENOMEM;
break;
}
+ ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
+ if (ret)
+ break;
+
tidlist[i] = tid + tidoff;
/* we "know" system pages and TID pages are same size */
dd->pageshadow[ctxttid + tid] = pagep[i];
- dd->physshadow[ctxttid + tid] =
- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dd->physshadow[ctxttid + tid] = daddr;
/*
* don't need atomic or it's overhead
*/
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index 59193f67ea7878..56bd59bc08b545 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -515,7 +515,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
index 74f90b2619f6f5..75c3f0dffe63b5 100644
--- a/drivers/infiniband/hw/qib/qib_user_pages.c
+++ b/drivers/infiniband/hw/qib/qib_user_pages.c
@@ -68,7 +68,8 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got,
+ FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
@@ -98,23 +99,27 @@ bail:
*
* I'm sure we won't be so lucky with other iommu's, so FIXME.
*/
-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
- unsigned long offset, size_t size, int direction)
+int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
{
dma_addr_t phys;
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
- if (phys == 0) {
- pci_unmap_page(hwdev, phys, size, direction);
- phys = pci_map_page(hwdev, page, offset, size, direction);
+ if (!phys) {
+ pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(hwdev, phys))
+ return -ENOMEM;
/*
* FIXME: If we get 0 again, we should keep this page,
* map another, then free the 0 page.
*/
}
-
- return phys;
+ *daddr = phys;
+ return 0;
}
/**
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 645a5f6e6c88f0..7f0d75e2944159 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -113,6 +113,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int flags;
dma_addr_t pa;
DEFINE_DMA_ATTRS(attrs);
+ unsigned int gup_flags;
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
@@ -140,6 +141,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
flags = IOMMU_READ | IOMMU_CACHE;
flags |= (writable) ? IOMMU_WRITE : 0;
+ gup_flags = FOLL_WRITE;
+ gup_flags |= (writable) ? 0 : FOLL_FORCE;
cur_base = addr & PAGE_MASK;
ret = 0;
@@ -147,7 +150,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = get_user_pages(current, current->mm, cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)),
- 1, !writable, page_list, NULL);
+ gup_flags, page_list, NULL);
if (ret < 0)
goto out;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index f74b115426036e..a338e60836ee26 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -992,12 +992,14 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
skb_queue_head_init(&skqueue);
+ netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
+ netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 0cbc7ceb9a556a..1610accfed0b71 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -1289,7 +1289,9 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
pr_err("ib_check_mr_status failed, ret %d\n", ret);
- goto err;
+ /* Not a lot we can do, return ambiguous guard error */
+ *sector = 0;
+ return 0x1;
}
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
@@ -1317,7 +1319,4 @@ u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
}
return 0;
-err:
- /* Not alot we can do here, return ambiguous guard error */
- return 0x1;
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 4fd2892613dd12..3dbc3ed263c214 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2594,7 +2594,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2606,15 +2605,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &ch->req_ring[i];
-
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
- }
- }
-
return SUCCESS;
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 3258baf3282e26..26476a64e66396 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -76,18 +76,23 @@
*/
#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/usb/input.h>
+#include <linux/usb/quirks.h>
#define DRIVER_AUTHOR "Marko Friedemann <mfr@bmx-chemnitz.de>"
#define DRIVER_DESC "X-Box pad driver"
-#define XPAD_PKT_LEN 32
+#define XPAD_PKT_LEN 64
-/* xbox d-pads should map to buttons, as is required for DDR pads
- but we map them to axes when possible to simplify things */
+/*
+ * xbox d-pads should map to buttons, as is required for DDR pads
+ * but we map them to axes when possible to simplify things
+ */
#define MAP_DPAD_TO_BUTTONS (1 << 0)
#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define MAP_STICKS_TO_NULL (1 << 2)
@@ -112,6 +117,10 @@ static bool sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
+static bool auto_poweroff = true;
+module_param(auto_poweroff, bool, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(auto_poweroff, "Power off wireless controllers on suspend");
+
static const struct xpad_device {
u16 idVendor;
u16 idProduct;
@@ -119,43 +128,77 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
+ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
{ 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
+ { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX },
{ 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
{ 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
- { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
- { 0x045e, 0x02dd, "Microsoft X-Box One pad (Covert Forces)", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 },
{ 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
+ { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE },
+ { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
- { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
- { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 },
{ 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
{ 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
{ 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+ { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX },
+ { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 },
{ 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
{ 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX },
+ { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX },
+ { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX },
+ { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX },
+ { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX },
+ { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX },
+ { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX },
{ 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
+ { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
{ 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
+ { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX },
{ 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
+ { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX },
+ { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX },
+ { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 },
{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 },
{ 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
{ 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 },
{ 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 },
+ { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 },
+ { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX },
{ 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
@@ -163,32 +206,68 @@ static const struct xpad_device {
{ 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
{ 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX },
{ 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX },
{ 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
{ 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE },
+ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE },
{ 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 },
{ 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
{ 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX },
{ 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 },
+ { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 },
{ 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE },
+ { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
+ { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX },
{ 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
{ 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
{ 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
+ { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
+ { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
{ 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
+ { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
+ { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
@@ -196,24 +275,67 @@ static const struct xpad_device {
{ 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 },
- { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE },
{ 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 },
{ 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX },
{ 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
{ 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
@@ -274,15 +396,15 @@ static const signed short xpad_abs_triggers[] = {
* match against vendor id as well. Wired Xbox 360 devices have protocol 1,
* wireless controllers have protocol 129.
*/
-#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \
+#define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \
.match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \
.idVendor = (vend), \
.bInterfaceClass = USB_CLASS_VENDOR_SPEC, \
.bInterfaceSubClass = 93, \
.bInterfaceProtocol = (pr)
#define XPAD_XBOX360_VENDOR(vend) \
- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \
- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) }
+ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 1) }, \
+ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 129) }
/* The Xbox One controller uses subclass 71 and protocol 208. */
#define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \
@@ -292,33 +414,135 @@ static const signed short xpad_abs_triggers[] = {
.bInterfaceSubClass = 71, \
.bInterfaceProtocol = (pr)
#define XPAD_XBOXONE_VENDOR(vend) \
- { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) }
+ { XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) }
-static struct usb_device_id xpad_table[] = {
+static const struct usb_device_id xpad_table[] = {
{ USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */
+ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */
XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */
XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */
XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */
+ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */
+ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */
XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */
{ USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */
+ XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */
+ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */
XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */
+ XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
+ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
+ XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
+ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */
- XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
- XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
- XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
+ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
+ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */
+ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
+ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */
{ }
};
MODULE_DEVICE_TABLE(usb, xpad_table);
+struct xboxone_init_packet {
+ u16 idVendor;
+ u16 idProduct;
+ const u8 *data;
+ u8 len;
+};
+
+#define XBOXONE_INIT_PKT(_vid, _pid, _data) \
+ { \
+ .idVendor = (_vid), \
+ .idProduct = (_pid), \
+ .data = (_data), \
+ .len = ARRAY_SIZE(_data), \
+ }
+
+
+/*
+ * This packet is required for all Xbox One pads with 2015
+ * or later firmware installed (or present from the factory).
+ */
+static const u8 xboxone_fw2015_init[] = {
+ 0x05, 0x20, 0x00, 0x01, 0x00
+};
+
+/*
+ * This packet is required for the Titanfall 2 Xbox One pads
+ * (0x0e6f:0x0165) to finish initialization and for Hori pads
+ * (0x0f0d:0x0067) to make the analog sticks work.
+ */
+static const u8 xboxone_hori_init[] = {
+ 0x01, 0x20, 0x00, 0x09, 0x00, 0x04, 0x20, 0x3a,
+ 0x00, 0x00, 0x00, 0x80, 0x00
+};
+
+/*
+ * This packet is required for most (all?) of the PDP pads to start
+ * sending input reports. These pads include: (0x0e6f:0x02ab),
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ */
+static const u8 xboxone_pdp_init1[] = {
+ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14
+};
+
+/*
+ * This packet is required for most (all?) of the PDP pads to start
+ * sending input reports. These pads include: (0x0e6f:0x02ab),
+ * (0x0e6f:0x02a4), (0x0e6f:0x02a6).
+ */
+static const u8 xboxone_pdp_init2[] = {
+ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00
+};
+
+/*
+ * A specific rumble packet is required for some PowerA pads to start
+ * sending input reports. One of those pads is (0x24c6:0x543a).
+ */
+static const u8 xboxone_rumblebegin_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x1D, 0x1D, 0xFF, 0x00, 0x00
+};
+
+/*
+ * A rumble packet with zero FF intensity will immediately
+ * terminate the rumbling required to init PowerA pads.
+ * This should happen fast enough that the motors don't
+ * spin up to enough speed to actually vibrate the gamepad.
+ */
+static const u8 xboxone_rumbleend_init[] = {
+ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+/*
+ * This specifies the selection of init packets that a gamepad
+ * will be sent on init *and* the order in which they will be
+ * sent. The correct sequence number will be added when the
+ * packet is going to be sent.
+ */
+static const struct xboxone_init_packet xboxone_init_packets[] = {
+ XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init),
+ XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init),
+ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init1),
+ XBOXONE_INIT_PKT(0x0e6f, 0x0000, xboxone_pdp_init2),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init),
+ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init),
+};
+
struct xpad_output_packet {
u8 data[XPAD_PKT_LEN];
u8 len;
@@ -334,23 +558,28 @@ struct xpad_output_packet {
struct usb_xpad {
struct input_dev *dev; /* input device interface */
+ struct input_dev __rcu *x360w_dev;
struct usb_device *udev; /* usb device */
struct usb_interface *intf; /* usb interface */
- int pad_present;
+ bool pad_present;
+ bool input_created;
struct urb *irq_in; /* urb for interrupt in report */
unsigned char *idata; /* input data */
dma_addr_t idata_dma;
struct urb *irq_out; /* urb for interrupt out report */
+ struct usb_anchor irq_out_anchor;
bool irq_out_active; /* we must not use an active URB */
+ u8 odata_serial; /* serial number for xbox one protocol */
unsigned char *odata; /* output data */
dma_addr_t odata_dma;
spinlock_t odata_lock;
struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS];
int last_out_packet;
+ int init_seq;
#if defined(CONFIG_JOYSTICK_XPAD_LEDS)
struct xpad_led *led;
@@ -362,8 +591,13 @@ struct usb_xpad {
int xtype; /* type of xbox device */
int pad_nr; /* the order x360 pads were attached */
const char *name; /* name of the device */
+ struct work_struct work; /* init/remove device from callback */
};
+static int xpad_init_input(struct usb_xpad *xpad);
+static void xpad_deinit_input(struct usb_xpad *xpad);
+static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num);
+
/*
* xpad_process_packet
*
@@ -443,10 +677,12 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
* http://www.free60.org/wiki/Gamepad
*/
-static void xpad360_process_packet(struct usb_xpad *xpad,
+static void xpad360_process_packet(struct usb_xpad *xpad, struct input_dev *dev,
u16 cmd, unsigned char *data)
{
- struct input_dev *dev = xpad->dev;
+ /* valid pad data */
+ if (data[0] != 0x00)
+ return;
/* digital pad */
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
@@ -514,7 +750,30 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
input_sync(dev);
}
-static void xpad_identify_controller(struct usb_xpad *xpad);
+static void xpad_presence_work(struct work_struct *work)
+{
+ struct usb_xpad *xpad = container_of(work, struct usb_xpad, work);
+ int error;
+
+ if (xpad->pad_present) {
+ error = xpad_init_input(xpad);
+ if (error) {
+ /* complain only, not much else we can do here */
+ dev_err(&xpad->dev->dev,
+ "unable to init device: %d\n", error);
+ } else {
+ rcu_assign_pointer(xpad->x360w_dev, xpad->dev);
+ }
+ } else {
+ RCU_INIT_POINTER(xpad->x360w_dev, NULL);
+ synchronize_rcu();
+ /*
+ * Now that we are sure xpad360w_process_packet is not
+ * using input device we can get rid of it.
+ */
+ xpad_deinit_input(xpad);
+ }
+}
/*
* xpad360w_process_packet
@@ -532,35 +791,61 @@ static void xpad_identify_controller(struct usb_xpad *xpad);
*/
static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
+ struct input_dev *dev;
+ bool present;
+
/* Presence change */
if (data[0] & 0x08) {
- if (data[1] & 0x80) {
- xpad->pad_present = 1;
- /*
- * Light up the segment corresponding to
- * controller number.
- */
- xpad_identify_controller(xpad);
- } else
- xpad->pad_present = 0;
+ present = (data[1] & 0x80) != 0;
+
+ if (xpad->pad_present != present) {
+ xpad->pad_present = present;
+ schedule_work(&xpad->work);
+ }
}
/* Valid pad data */
- if (!(data[1] & 0x1))
+ if (data[1] != 0x1)
return;
- xpad360_process_packet(xpad, cmd, &data[4]);
+ rcu_read_lock();
+ dev = rcu_dereference(xpad->x360w_dev);
+ if (dev)
+ xpad360_process_packet(xpad, dev, cmd, &data[4]);
+ rcu_read_unlock();
}
/*
- * xpadone_process_buttons
+ * xpadone_process_packet
*
- * Process a button update packet from an Xbox one controller.
+ * Completes a request by converting the data into events for the
+ * input subsystem. This version is for the Xbox One controller.
+ *
+ * The report format was gleaned from
+ * https://github.com/kylelemons/xbox/blob/master/xbox.go
*/
-static void xpadone_process_buttons(struct usb_xpad *xpad,
- struct input_dev *dev,
- unsigned char *data)
+static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data)
{
+ struct input_dev *dev = xpad->dev;
+
+ /* the xbox button has its own special report */
+ if (data[0] == 0X07) {
+ /*
+ * The Xbox One S controller requires these reports to be
+ * acked otherwise it continues sending them forever and
+ * won't report further mode button events.
+ */
+ if (data[1] == 0x30)
+ xpadone_ack_mode_report(xpad, data[2]);
+
+ input_report_key(dev, BTN_MODE, data[4] & 0x01);
+ input_sync(dev);
+ return;
+ }
+ /* check invalid packet */
+ else if (data[0] != 0X20)
+ return;
+
/* menu/view buttons */
input_report_key(dev, BTN_START, data[4] & 0x04);
input_report_key(dev, BTN_SELECT, data[4] & 0x08);
@@ -623,34 +908,6 @@ static void xpadone_process_buttons(struct usb_xpad *xpad,
input_sync(dev);
}
-/*
- * xpadone_process_packet
- *
- * Completes a request by converting the data into events for the
- * input subsystem. This version is for the Xbox One controller.
- *
- * The report format was gleaned from
- * https://github.com/kylelemons/xbox/blob/master/xbox.go
- */
-
-static void xpadone_process_packet(struct usb_xpad *xpad,
- u16 cmd, unsigned char *data)
-{
- struct input_dev *dev = xpad->dev;
-
- switch (data[0]) {
- case 0x20:
- xpadone_process_buttons(xpad, dev, data);
- break;
-
- case 0x07:
- /* the xbox button has its own special report */
- input_report_key(dev, BTN_MODE, data[4] & 0x01);
- input_sync(dev);
- break;
- }
-}
-
static void xpad_irq_in(struct urb *urb)
{
struct usb_xpad *xpad = urb->context;
@@ -678,7 +935,7 @@ static void xpad_irq_in(struct urb *urb)
switch (xpad->xtype) {
case XTYPE_XBOX360:
- xpad360_process_packet(xpad, 0, xpad->idata);
+ xpad360_process_packet(xpad, xpad->dev, 0, xpad->idata);
break;
case XTYPE_XBOX360W:
xpad360w_process_packet(xpad, 0, xpad->idata);
@@ -698,11 +955,47 @@ exit:
}
/* Callers must hold xpad->odata_lock spinlock */
+static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad)
+{
+ const struct xboxone_init_packet *init_packet;
+
+ if (xpad->xtype != XTYPE_XBOXONE)
+ return false;
+
+ /* Perform initialization sequence for Xbox One pads that require it */
+ while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) {
+ init_packet = &xboxone_init_packets[xpad->init_seq++];
+
+ if (init_packet->idVendor != 0 &&
+ init_packet->idVendor != xpad->dev->id.vendor)
+ continue;
+
+ if (init_packet->idProduct != 0 &&
+ init_packet->idProduct != xpad->dev->id.product)
+ continue;
+
+ /* This packet applies to our device, so prepare to send it */
+ memcpy(xpad->odata, init_packet->data, init_packet->len);
+ xpad->irq_out->transfer_buffer_length = init_packet->len;
+
+ /* Update packet with current sequence number */
+ xpad->odata[2] = xpad->odata_serial++;
+ return true;
+ }
+
+ return false;
+}
+
+/* Callers must hold xpad->odata_lock spinlock */
static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad)
{
struct xpad_output_packet *pkt, *packet = NULL;
int i;
+ /* We may have init packets to send before we can send user commands */
+ if (xpad_prepare_next_init_packet(xpad))
+ return true;
+
for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) {
if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS)
xpad->last_out_packet = 0;
@@ -733,11 +1026,13 @@ static int xpad_try_sending_next_out_packet(struct usb_xpad *xpad)
int error;
if (!xpad->irq_out_active && xpad_prepare_next_out_packet(xpad)) {
+ usb_anchor_urb(xpad->irq_out, &xpad->irq_out_anchor);
error = usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
if (error) {
dev_err(&xpad->intf->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, error);
+ usb_unanchor_urb(xpad->irq_out);
return -EIO;
}
@@ -779,11 +1074,13 @@ static void xpad_irq_out(struct urb *urb)
}
if (xpad->irq_out_active) {
+ usb_anchor_urb(urb, &xpad->irq_out_anchor);
error = usb_submit_urb(urb, GFP_ATOMIC);
if (error) {
dev_err(dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, error);
+ usb_unanchor_urb(urb);
xpad->irq_out_active = false;
}
}
@@ -791,34 +1088,29 @@ static void xpad_irq_out(struct urb *urb)
spin_unlock_irqrestore(&xpad->odata_lock, flags);
}
-static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
+static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad,
+ struct usb_endpoint_descriptor *ep_irq_out)
{
- struct usb_endpoint_descriptor *ep_irq_out;
- int ep_irq_out_idx;
int error;
if (xpad->xtype == XTYPE_UNKNOWN)
return 0;
+ init_usb_anchor(&xpad->irq_out_anchor);
+
xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN,
GFP_KERNEL, &xpad->odata_dma);
- if (!xpad->odata) {
- error = -ENOMEM;
- goto fail1;
- }
+ if (!xpad->odata)
+ return -ENOMEM;
spin_lock_init(&xpad->odata_lock);
xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL);
if (!xpad->irq_out) {
error = -ENOMEM;
- goto fail2;
+ goto err_free_coherent;
}
- /* Xbox One controller has in/out endpoints swapped. */
- ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1;
- ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc;
-
usb_fill_int_urb(xpad->irq_out, xpad->udev,
usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress),
xpad->odata, XPAD_PKT_LEN,
@@ -828,14 +1120,21 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
return 0;
- fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
- fail1: return error;
+err_free_coherent:
+ usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma);
+ return error;
}
static void xpad_stop_output(struct usb_xpad *xpad)
{
- if (xpad->xtype != XTYPE_UNKNOWN)
- usb_kill_urb(xpad->irq_out);
+ if (xpad->xtype != XTYPE_UNKNOWN) {
+ if (!usb_wait_anchor_empty_timeout(&xpad->irq_out_anchor,
+ 5000)) {
+ dev_warn(&xpad->intf->dev,
+ "timed out waiting for output URB to complete, killing\n");
+ usb_kill_anchored_urbs(&xpad->irq_out_anchor);
+ }
+ }
}
static void xpad_deinit_output(struct usb_xpad *xpad)
@@ -882,21 +1181,17 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad)
static int xpad_start_xbox_one(struct usb_xpad *xpad)
{
- struct xpad_output_packet *packet =
- &xpad->out_packets[XPAD_OUT_CMD_IDX];
unsigned long flags;
int retval;
spin_lock_irqsave(&xpad->odata_lock, flags);
- /* Xbox one controller needs to be initialized. */
- packet->data[0] = 0x05;
- packet->data[1] = 0x20;
- packet->len = 2;
- packet->pending = true;
-
- /* Reset the sequence so we send out start packet first */
- xpad->last_out_packet = -1;
+ /*
+ * Begin the init sequence by attempting to send a packet.
+ * We will cycle through the init packet sequence before
+ * sending any packets from the output ring.
+ */
+ xpad->init_seq = 0;
retval = xpad_try_sending_next_out_packet(xpad);
spin_unlock_irqrestore(&xpad->odata_lock, flags);
@@ -904,6 +1199,30 @@ static int xpad_start_xbox_one(struct usb_xpad *xpad)
return retval;
}
+static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num)
+{
+ unsigned long flags;
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+ static const u8 mode_report_ack[] = {
+ 0x01, 0x20, 0x00, 0x09, 0x00, 0x07, 0x20, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->len = sizeof(mode_report_ack);
+ memcpy(packet->data, mode_report_ack, packet->len);
+ packet->data[2] = seq_num;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out the ack now */
+ xpad->last_out_packet = -1;
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+}
+
#ifdef CONFIG_JOYSTICK_XPAD_FF
static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect)
{
@@ -966,18 +1285,19 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect
case XTYPE_XBOXONE:
packet->data[0] = 0x09; /* activate rumble */
- packet->data[1] = 0x08;
- packet->data[2] = 0x00;
- packet->data[3] = 0x08; /* continuous effect */
- packet->data[4] = 0x00; /* simple rumble mode */
- packet->data[5] = 0x03; /* L and R actuator only */
- packet->data[6] = 0x00; /* TODO: LT actuator */
- packet->data[7] = 0x00; /* TODO: RT actuator */
- packet->data[8] = strong / 256; /* left actuator */
- packet->data[9] = weak / 256; /* right actuator */
- packet->data[10] = 0x80; /* length of pulse */
- packet->data[11] = 0x00; /* stop period of pulse */
- packet->len = 12;
+ packet->data[1] = 0x00;
+ packet->data[2] = xpad->odata_serial++;
+ packet->data[3] = 0x09;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x0F;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = strong / 512; /* left actuator */
+ packet->data[9] = weak / 512; /* right actuator */
+ packet->data[10] = 0xFF; /* on period */
+ packet->data[11] = 0x00; /* off period */
+ packet->data[12] = 0xFF; /* repeat count */
+ packet->len = 13;
packet->pending = true;
break;
@@ -1090,7 +1410,7 @@ static void xpad_send_led_command(struct usb_xpad *xpad, int command)
*/
static void xpad_identify_controller(struct usb_xpad *xpad)
{
- xpad_send_led_command(xpad, (xpad->pad_nr % 4) + 2);
+ led_set_brightness(&xpad->led->led_cdev, (xpad->pad_nr % 4) + 2);
}
static void xpad_led_set(struct led_classdev *led_cdev,
@@ -1127,19 +1447,13 @@ static int xpad_led_probe(struct usb_xpad *xpad)
led_cdev = &led->led_cdev;
led_cdev->name = led->name;
led_cdev->brightness_set = xpad_led_set;
+ led_cdev->flags = LED_CORE_SUSPENDRESUME;
error = led_classdev_register(&xpad->udev->dev, led_cdev);
if (error)
goto err_free_id;
- if (xpad->xtype == XTYPE_XBOX360) {
- /*
- * Light up the segment corresponding to controller
- * number on wired devices. On wireless we'll do that
- * when they respond to "presence" packet.
- */
- xpad_identify_controller(xpad);
- }
+ xpad_identify_controller(xpad);
return 0;
@@ -1164,41 +1478,110 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
#else
static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
static void xpad_led_disconnect(struct usb_xpad *xpad) { }
-static void xpad_identify_controller(struct usb_xpad *xpad) { }
#endif
-static int xpad_open(struct input_dev *dev)
+static int xpad_start_input(struct usb_xpad *xpad)
{
- struct usb_xpad *xpad = input_get_drvdata(dev);
-
- /* URB was submitted in probe */
- if (xpad->xtype == XTYPE_XBOX360W)
- return 0;
+ int error;
- xpad->irq_in->dev = xpad->udev;
if (usb_submit_urb(xpad->irq_in, GFP_KERNEL))
return -EIO;
- if (xpad->xtype == XTYPE_XBOXONE)
- return xpad_start_xbox_one(xpad);
+ if (xpad->xtype == XTYPE_XBOXONE) {
+ error = xpad_start_xbox_one(xpad);
+ if (error) {
+ usb_kill_urb(xpad->irq_in);
+ return error;
+ }
+ }
return 0;
}
-static void xpad_close(struct input_dev *dev)
+static void xpad_stop_input(struct usb_xpad *xpad)
{
- struct usb_xpad *xpad = input_get_drvdata(dev);
+ usb_kill_urb(xpad->irq_in);
+}
- if (xpad->xtype != XTYPE_XBOX360W)
+static void xpad360w_poweroff_controller(struct usb_xpad *xpad)
+{
+ unsigned long flags;
+ struct xpad_output_packet *packet =
+ &xpad->out_packets[XPAD_OUT_CMD_IDX];
+
+ spin_lock_irqsave(&xpad->odata_lock, flags);
+
+ packet->data[0] = 0x00;
+ packet->data[1] = 0x00;
+ packet->data[2] = 0x08;
+ packet->data[3] = 0xC0;
+ packet->data[4] = 0x00;
+ packet->data[5] = 0x00;
+ packet->data[6] = 0x00;
+ packet->data[7] = 0x00;
+ packet->data[8] = 0x00;
+ packet->data[9] = 0x00;
+ packet->data[10] = 0x00;
+ packet->data[11] = 0x00;
+ packet->len = 12;
+ packet->pending = true;
+
+ /* Reset the sequence so we send out poweroff now */
+ xpad->last_out_packet = -1;
+ xpad_try_sending_next_out_packet(xpad);
+
+ spin_unlock_irqrestore(&xpad->odata_lock, flags);
+}
+
+static int xpad360w_start_input(struct usb_xpad *xpad)
+{
+ int error;
+
+ error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
+ if (error)
+ return -EIO;
+
+ /*
+ * Send presence packet.
+ * This will force the controller to resend connection packets.
+ * This is useful in the case we activate the module after the
+ * adapter has been plugged in, as it won't automatically
+ * send us info about the controllers.
+ */
+ error = xpad_inquiry_pad_presence(xpad);
+ if (error) {
usb_kill_urb(xpad->irq_in);
+ return error;
+ }
- xpad_stop_output(xpad);
+ return 0;
+}
+
+static void xpad360w_stop_input(struct usb_xpad *xpad)
+{
+ usb_kill_urb(xpad->irq_in);
+
+ /* Make sure we are done with presence work if it was scheduled */
+ flush_work(&xpad->work);
+}
+
+static int xpad_open(struct input_dev *dev)
+{
+ struct usb_xpad *xpad = input_get_drvdata(dev);
+
+ return xpad_start_input(xpad);
+}
+
+static void xpad_close(struct input_dev *dev)
+{
+ struct usb_xpad *xpad = input_get_drvdata(dev);
+
+ xpad_stop_input(xpad);
}
static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
{
struct usb_xpad *xpad = input_get_drvdata(input_dev);
- set_bit(abs, input_dev->absbit);
switch (abs) {
case ABS_X:
@@ -1218,13 +1601,19 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
break;
+ default:
+ input_set_abs_params(input_dev, abs, 0, 0, 0, 0);
+ break;
}
}
static void xpad_deinit_input(struct usb_xpad *xpad)
{
- xpad_led_disconnect(xpad);
- input_unregister_device(xpad->dev);
+ if (xpad->input_created) {
+ xpad->input_created = false;
+ xpad_led_disconnect(xpad);
+ input_unregister_device(xpad->dev);
+ }
}
static int xpad_init_input(struct usb_xpad *xpad)
@@ -1250,13 +1639,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
input_set_drvdata(input_dev, xpad);
- input_dev->open = xpad_open;
- input_dev->close = xpad_close;
-
- __set_bit(EV_KEY, input_dev->evbit);
+ if (xpad->xtype != XTYPE_XBOX360W) {
+ input_dev->open = xpad_open;
+ input_dev->close = xpad_close;
+ }
if (!(xpad->mapping & MAP_STICKS_TO_NULL)) {
- __set_bit(EV_ABS, input_dev->evbit);
/* set up axes */
for (i = 0; xpad_abs[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs[i]);
@@ -1264,21 +1652,22 @@ static int xpad_init_input(struct usb_xpad *xpad)
/* set up standard buttons */
for (i = 0; xpad_common_btn[i] >= 0; i++)
- __set_bit(xpad_common_btn[i], input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY, xpad_common_btn[i]);
/* set up model-specific ones */
if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W ||
xpad->xtype == XTYPE_XBOXONE) {
for (i = 0; xpad360_btn[i] >= 0; i++)
- __set_bit(xpad360_btn[i], input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY, xpad360_btn[i]);
} else {
for (i = 0; xpad_btn[i] >= 0; i++)
- __set_bit(xpad_btn[i], input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY, xpad_btn[i]);
}
if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
for (i = 0; xpad_btn_pad[i] >= 0; i++)
- __set_bit(xpad_btn_pad[i], input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY,
+ xpad_btn_pad[i]);
}
/*
@@ -1295,7 +1684,8 @@ static int xpad_init_input(struct usb_xpad *xpad)
if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
for (i = 0; xpad_btn_triggers[i] >= 0; i++)
- __set_bit(xpad_btn_triggers[i], input_dev->keybit);
+ input_set_capability(input_dev, EV_KEY,
+ xpad_btn_triggers[i]);
} else {
for (i = 0; xpad_abs_triggers[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
@@ -1313,6 +1703,7 @@ static int xpad_init_input(struct usb_xpad *xpad)
if (error)
goto err_disconnect_led;
+ xpad->input_created = true;
return 0;
err_disconnect_led:
@@ -1328,8 +1719,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
{
struct usb_device *udev = interface_to_usbdev(intf);
struct usb_xpad *xpad;
- struct usb_endpoint_descriptor *ep_irq_in;
- int ep_irq_in_idx;
+ struct usb_endpoint_descriptor *ep_irq_in, *ep_irq_out;
int i, error;
if (intf->cur_altsetting->desc.bNumEndpoints != 2)
@@ -1366,6 +1756,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
xpad->name = xpad_device[i].name;
+ INIT_WORK(&xpad->work, xpad_presence_work);
if (xpad->xtype == XTYPE_UNKNOWN) {
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
@@ -1398,13 +1789,28 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
goto err_free_in_urb;
}
- error = xpad_init_output(intf, xpad);
- if (error)
+ ep_irq_in = ep_irq_out = NULL;
+
+ for (i = 0; i < 2; i++) {
+ struct usb_endpoint_descriptor *ep =
+ &intf->cur_altsetting->endpoint[i].desc;
+
+ if (usb_endpoint_xfer_int(ep)) {
+ if (usb_endpoint_dir_in(ep))
+ ep_irq_in = ep;
+ else
+ ep_irq_out = ep;
+ }
+ }
+
+ if (!ep_irq_in || !ep_irq_out) {
+ error = -ENODEV;
goto err_free_in_urb;
+ }
- /* Xbox One controller has in/out endpoints swapped. */
- ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0;
- ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc;
+ error = xpad_init_output(intf, xpad, ep_irq_out);
+ if (error)
+ goto err_free_in_urb;
usb_fill_int_urb(xpad->irq_in, udev,
usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress),
@@ -1415,10 +1821,6 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
usb_set_intfdata(intf, xpad);
- error = xpad_init_input(xpad);
- if (error)
- goto err_deinit_output;
-
if (xpad->xtype == XTYPE_XBOX360W) {
/*
* Submit the int URB immediately rather than waiting for open
@@ -1427,28 +1829,24 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
* exactly the message that a controller has arrived that
* we're waiting for.
*/
- xpad->irq_in->dev = xpad->udev;
- error = usb_submit_urb(xpad->irq_in, GFP_KERNEL);
+ error = xpad360w_start_input(xpad);
if (error)
- goto err_deinit_input;
-
+ goto err_deinit_output;
/*
- * Send presence packet.
- * This will force the controller to resend connection packets.
- * This is useful in the case we activate the module after the
- * adapter has been plugged in, as it won't automatically
- * send us info about the controllers.
+ * Wireless controllers require RESET_RESUME to work properly
+ * after suspend. Ideally this quirk should be in usb core
+ * quirk list, but we have too many vendors producing these
+ * controllers and we'd need to maintain 2 identical lists
+ * here in this driver and in usb core.
*/
- error = xpad_inquiry_pad_presence(xpad);
+ udev->quirks |= USB_QUIRK_RESET_RESUME;
+ } else {
+ error = xpad_init_input(xpad);
if (error)
- goto err_kill_in_urb;
+ goto err_deinit_output;
}
return 0;
-err_kill_in_urb:
- usb_kill_urb(xpad->irq_in);
-err_deinit_input:
- xpad_deinit_input(xpad);
err_deinit_output:
xpad_deinit_output(xpad);
err_free_in_urb:
@@ -1458,19 +1856,24 @@ err_free_idata:
err_free_mem:
kfree(xpad);
return error;
-
}
static void xpad_disconnect(struct usb_interface *intf)
{
- struct usb_xpad *xpad = usb_get_intfdata (intf);
+ struct usb_xpad *xpad = usb_get_intfdata(intf);
+
+ if (xpad->xtype == XTYPE_XBOX360W)
+ xpad360w_stop_input(xpad);
xpad_deinit_input(xpad);
- xpad_deinit_output(xpad);
- if (xpad->xtype == XTYPE_XBOX360W) {
- usb_kill_urb(xpad->irq_in);
- }
+ /*
+ * Now that both input device and LED device are gone we can
+ * stop output URB.
+ */
+ xpad_stop_output(xpad);
+
+ xpad_deinit_output(xpad);
usb_free_urb(xpad->irq_in);
usb_free_coherent(xpad->udev, XPAD_PKT_LEN,
@@ -1481,10 +1884,72 @@ static void xpad_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
}
+static int xpad_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct usb_xpad *xpad = usb_get_intfdata(intf);
+ struct input_dev *input = xpad->dev;
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ /*
+ * Wireless controllers always listen to input so
+ * they are notified when controller shows up
+ * or goes away.
+ */
+ xpad360w_stop_input(xpad);
+
+ /*
+ * The wireless adapter is going off now, so the
+ * gamepads are going to become disconnected.
+ * Unless explicitly disabled, power them down
+ * so they don't just sit there flashing.
+ */
+ if (auto_poweroff && xpad->pad_present)
+ xpad360w_poweroff_controller(xpad);
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users)
+ xpad_stop_input(xpad);
+ mutex_unlock(&input->mutex);
+ }
+
+ xpad_stop_output(xpad);
+
+ return 0;
+}
+
+static int xpad_resume(struct usb_interface *intf)
+{
+ struct usb_xpad *xpad = usb_get_intfdata(intf);
+ struct input_dev *input = xpad->dev;
+ int retval = 0;
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ retval = xpad360w_start_input(xpad);
+ } else {
+ mutex_lock(&input->mutex);
+ if (input->users) {
+ retval = xpad_start_input(xpad);
+ } else if (xpad->xtype == XTYPE_XBOXONE) {
+ /*
+ * Even if there are no users, we'll send Xbox One pads
+ * the startup sequence so they don't sit there and
+ * blink until somebody opens the input device again.
+ */
+ retval = xpad_start_xbox_one(xpad);
+ }
+ mutex_unlock(&input->mutex);
+ }
+
+ return retval;
+}
+
static struct usb_driver xpad_driver = {
.name = "xpad",
.probe = xpad_probe,
.disconnect = xpad_disconnect,
+ .suspend = xpad_suspend,
+ .resume = xpad_resume,
+ .reset_resume = xpad_resume,
.id_table = xpad_table,
};
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index f1235831283d74..fdeda0b0fbd61c 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -79,8 +79,7 @@ MODULE_LICENSE("GPL");
*/
-static unsigned char atakbd_keycode[0x72] = { /* American layout */
- [0] = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
@@ -121,9 +120,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_BACKSLASH, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_GRAVE, /* FIXME: '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -149,45 +148,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
- [69] = KEY_ESC,
- [70] = KEY_DELETE,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
+ [71] = KEY_HOME,
+ [72] = KEY_UP,
[74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
+ [75] = KEY_LEFT,
+ [77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
+ [80] = KEY_DOWN,
+ [82] = KEY_INSERT,
+ [83] = KEY_DELETE,
[96] = KEY_102ND,
- [97] = KEY_KPASTERISK, /* FIXME */
- [98] = KEY_KPSLASH,
+ [97] = KEY_UNDO,
+ [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
- [103] = KEY_UP,
- [104] = KEY_KPASTERISK, /* FIXME */
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_KPASTERISK, /* FIXME */
- [108] = KEY_DOWN,
- [109] = KEY_KPASTERISK, /* FIXME */
- [110] = KEY_KPASTERISK, /* FIXME */
- [111] = KEY_KPASTERISK, /* FIXME */
- [112] = KEY_KPASTERISK, /* FIXME */
- [113] = KEY_KPASTERISK /* FIXME */
+ [103] = KEY_KP7,
+ [104] = KEY_KP8,
+ [105] = KEY_KP9,
+ [106] = KEY_KP4,
+ [107] = KEY_KP5,
+ [108] = KEY_KP6,
+ [109] = KEY_KP1,
+ [110] = KEY_KP2,
+ [111] = KEY_KP3,
+ [112] = KEY_KP0,
+ [113] = KEY_KPDOT,
+ [114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
@@ -195,21 +183,15 @@ static struct input_dev *atakbd_dev;
static void atakbd_interrupt(unsigned char scancode, char down)
{
- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
+ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(atakbd_dev, scancode, 1);
- input_report_key(atakbd_dev, scancode, 0);
- input_sync(atakbd_dev);
- } else {
- input_report_key(atakbd_dev, scancode, down);
- input_sync(atakbd_dev);
- }
- } else /* scancodes >= 0xf2 are mouse data, most likely */
+ input_report_key(atakbd_dev, scancode, down);
+ input_sync(atakbd_dev);
+ } else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
index 0d1cfc0e48b9e3..39c026bf6796d6 100644
--- a/drivers/input/keyboard/cros_ec_keyb.c
+++ b/drivers/input/keyboard/cros_ec_keyb.c
@@ -274,24 +274,35 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
switch (ckdev->ec->event_data.event_type) {
case EC_MKBP_EVENT_KEY_MATRIX:
- /*
- * If EC is not the wake source, discard key state changes
- * during suspend.
- */
- if (queued_during_suspend)
- return NOTIFY_OK;
+ if (device_may_wakeup(ckdev->dev)) {
+ pm_wakeup_event(ckdev->dev, 0);
+ } else {
+ /*
+ * If keyboard is not wake enabled, discard key state
+ * changes during suspend. Switches will be re-checked
+ * in cros_ec_keyb_resume() to be sure nothing is lost.
+ */
+ if (queued_during_suspend)
+ return NOTIFY_OK;
+ }
if (ckdev->ec->event_size != ckdev->cols) {
dev_err(ckdev->dev,
"Discarded incomplete key matrix event.\n");
return NOTIFY_OK;
}
+
cros_ec_keyb_process(ckdev,
ckdev->ec->event_data.data.key_matrix,
ckdev->ec->event_size);
break;
case EC_MKBP_EVENT_SYSRQ:
+ if (device_may_wakeup(ckdev->dev))
+ pm_wakeup_event(ckdev->dev, 0);
+ else if (queued_during_suspend)
+ return NOTIFY_OK;
+
val = get_unaligned_le32(&ckdev->ec->event_data.data.sysrq);
dev_dbg(ckdev->dev, "sysrq code from EC : %#x\n", val);
handle_sysrq(val);
@@ -299,12 +310,9 @@ static int cros_ec_keyb_work(struct notifier_block *nb,
case EC_MKBP_EVENT_BUTTON:
case EC_MKBP_EVENT_SWITCH:
- /*
- * If EC is not the wake source, discard key state
- * changes during suspend. Switches will be re-checked in
- * cros_ec_keyb_resume() to be sure nothing is lost.
- */
- if (queued_during_suspend)
+ if (device_may_wakeup(ckdev->dev))
+ pm_wakeup_event(ckdev->dev, 0);
+ else if (queued_during_suspend)
return NOTIFY_OK;
if (ckdev->ec->event_data.event_type == EC_MKBP_EVENT_BUTTON) {
@@ -668,6 +676,7 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
return err;
}
+ device_init_wakeup(ckdev->dev, true);
return 0;
}
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index 795fa353de7c01..2e12e31f45c562 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -220,7 +220,7 @@ static void matrix_keypad_stop(struct input_dev *dev)
keypad->stopped = true;
spin_unlock_irq(&keypad->lock);
- flush_work(&keypad->work.work);
+ flush_delayed_work(&keypad->work);
/*
* matrix_keypad_scan() will leave IRQs enabled;
* we should disable them now.
@@ -405,7 +405,7 @@ matrix_keypad_parse_dt(struct device *dev)
struct matrix_keypad_platform_data *pdata;
struct device_node *np = dev->of_node;
unsigned int *gpios;
- int i, nrow, ncol;
+ int ret, i, nrow, ncol;
if (!np) {
dev_err(dev, "device lacks DT data\n");
@@ -447,12 +447,19 @@ matrix_keypad_parse_dt(struct device *dev)
return ERR_PTR(-ENOMEM);
}
- for (i = 0; i < pdata->num_row_gpios; i++)
- gpios[i] = of_get_named_gpio(np, "row-gpios", i);
+ for (i = 0; i < nrow; i++) {
+ ret = of_get_named_gpio(np, "row-gpios", i);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ gpios[i] = ret;
+ }
- for (i = 0; i < pdata->num_col_gpios; i++)
- gpios[pdata->num_row_gpios + i] =
- of_get_named_gpio(np, "col-gpios", i);
+ for (i = 0; i < ncol; i++) {
+ ret = of_get_named_gpio(np, "col-gpios", i);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ gpios[nrow + i] = ret;
+ }
pdata->row_gpios = gpios;
pdata->col_gpios = &gpios[pdata->num_row_gpios];
@@ -479,10 +486,8 @@ static int matrix_keypad_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
pdata = matrix_keypad_parse_dt(&pdev->dev);
- if (IS_ERR(pdata)) {
- dev_err(&pdev->dev, "no platform data defined\n");
+ if (IS_ERR(pdata))
return PTR_ERR(pdata);
- }
} else if (!pdata->keymap_data) {
dev_err(&pdev->dev, "no keymap data defined\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index 6639b2b8528aa6..3d2c60c8de8303 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -60,8 +60,18 @@
/* OMAP4 values */
#define OMAP4_VAL_IRQDISABLE 0x0
-#define OMAP4_VAL_DEBOUNCINGTIME 0x7
-#define OMAP4_VAL_PVT 0x7
+
+/*
+ * Errata i689: If a key is released for a time shorter than debounce time,
+ * the keyboard will idle and never detect the key release. The workaround
+ * is to use at least a 12ms debounce time. See omap5432 TRM chapter
+ * "26.4.6.2 Keyboard Controller Timer" for more information.
+ */
+#define OMAP4_KEYPAD_PTV_DIV_128 0x6
+#define OMAP4_KEYPAD_DEBOUNCINGTIME_MS(dbms, ptv) \
+ ((((dbms) * 1000) / ((1 << ((ptv) + 1)) * (1000000 / 32768))) - 1)
+#define OMAP4_VAL_DEBOUNCINGTIME_16MS \
+ OMAP4_KEYPAD_DEBOUNCINGTIME_MS(16, OMAP4_KEYPAD_PTV_DIV_128)
enum {
KBD_REVISION_OMAP4 = 0,
@@ -116,12 +126,8 @@ static irqreturn_t omap4_keypad_irq_handler(int irq, void *dev_id)
{
struct omap4_keypad *keypad_data = dev_id;
- if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS)) {
- /* Disable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_VAL_IRQDISABLE);
+ if (kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS))
return IRQ_WAKE_THREAD;
- }
return IRQ_NONE;
}
@@ -163,11 +169,6 @@ static irqreturn_t omap4_keypad_irq_thread_fn(int irq, void *dev_id)
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
- /* enable interrupts */
- kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
- OMAP4_DEF_IRQENABLE_EVENTEN |
- OMAP4_DEF_IRQENABLE_LONGKEY);
-
return IRQ_HANDLED;
}
@@ -181,9 +182,9 @@ static int omap4_keypad_open(struct input_dev *input)
kbd_writel(keypad_data, OMAP4_KBD_CTRL,
OMAP4_DEF_CTRL_NOSOFTMODE |
- (OMAP4_VAL_PVT << OMAP4_DEF_CTRL_PTV_SHIFT));
+ (OMAP4_KEYPAD_PTV_DIV_128 << OMAP4_DEF_CTRL_PTV_SHIFT));
kbd_writel(keypad_data, OMAP4_KBD_DEBOUNCINGTIME,
- OMAP4_VAL_DEBOUNCINGTIME);
+ OMAP4_VAL_DEBOUNCINGTIME_16MS);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
@@ -204,9 +205,10 @@ static void omap4_keypad_close(struct input_dev *input)
disable_irq(keypad_data->irq);
- /* Disable interrupts */
+ /* Disable interrupts and wake-up events */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
OMAP4_VAL_IRQDISABLE);
+ kbd_writel(keypad_data, OMAP4_KBD_WAKEUPENABLE, 0);
/* clear pending interrupts */
kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
@@ -354,7 +356,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
}
error = request_threaded_irq(keypad_data->irq, omap4_keypad_irq_handler,
- omap4_keypad_irq_thread_fn, 0,
+ omap4_keypad_irq_thread_fn, IRQF_ONESHOT,
"omap4-keypad", keypad_data);
if (error) {
dev_err(&pdev->dev, "failed to register interrupt\n");
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c
index de7be4f03d9193..ebf9f643d910cf 100644
--- a/drivers/input/keyboard/st-keyscan.c
+++ b/drivers/input/keyboard/st-keyscan.c
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev)
input_dev->id.bustype = BUS_HOST;
+ keypad_data->input_dev = input_dev;
+
error = keypad_matrix_key_parse_dt(keypad_data);
if (error)
return error;
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev)
input_set_drvdata(input_dev, keypad_data);
- keypad_data->input_dev = input_dev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
keypad_data->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(keypad_data->base))
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1d0e61d7c13188..b6c1d1d482c197 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -482,13 +482,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
idev->close = bma150_irq_close;
input_set_drvdata(idev, bma150);
+ bma150->input = idev;
+
error = input_register_device(idev);
if (error) {
input_free_device(idev);
return error;
}
- bma150->input = idev;
return 0;
}
@@ -511,15 +512,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
bma150_init_input_device(bma150, ipoll_dev->input);
+ bma150->input_polled = ipoll_dev;
+ bma150->input = ipoll_dev->input;
+
error = input_register_polled_device(ipoll_dev);
if (error) {
input_free_polled_device(ipoll_dev);
return error;
}
- bma150->input_polled = ipoll_dev;
- bma150->input = ipoll_dev->input;
-
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 61c20243625085..9d7fae74389810 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -27,6 +27,8 @@
#define ETP_DISABLE_POWER 0x0001
#define ETP_PRESSURE_OFFSET 25
+#define ETP_CALIBRATE_MAX_LEN 3
+
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a54d9e733ae7e5..72b2ebedf79af5 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -683,7 +683,7 @@ static ssize_t calibrate_store(struct device *dev,
int tries = 20;
int retval;
int error;
- u8 val[3];
+ u8 val[ETP_CALIBRATE_MAX_LEN];
retval = mutex_lock_interruptible(&data->sysfs_mutex);
if (retval)
@@ -1335,6 +1335,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0601", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },
@@ -1343,6 +1344,15 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060B", 0 },
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
+ { "ELAN0612", 0 },
+ { "ELAN0617", 0 },
+ { "ELAN0618", 0 },
+ { "ELAN061C", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN061E", 0 },
+ { "ELAN0620", 0 },
+ { "ELAN0621", 0 },
+ { "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index 7c82691d0abc07..489d8721b651db 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -56,7 +56,7 @@
static int elan_smbus_initialize(struct i2c_client *client)
{
u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
- u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
int len, error;
/* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
{
int error;
+ u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
error = i2c_smbus_read_block_data(client,
- ETP_SMBUS_CALIBRATE_QUERY, val);
+ ETP_SMBUS_CALIBRATE_QUERY, buf);
if (error < 0)
return error;
+ memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
return 0;
}
@@ -130,7 +134,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client,
bool max_baseline, u8 *value)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
max_baseline ?
@@ -149,7 +153,7 @@ static int elan_smbus_get_version(struct i2c_client *client,
bool iap, u8 *version)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_IAP_VERSION_CMD :
@@ -169,7 +173,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
u16 *ic_type, u8 *version)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_SM_VERSION_CMD, val);
@@ -186,7 +190,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_UNIQUEID_CMD, val);
@@ -203,7 +207,7 @@ static int elan_smbus_get_checksum(struct i2c_client *client,
bool iap, u16 *csum)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
iap ? ETP_SMBUS_FW_CHECKSUM_CMD :
@@ -223,7 +227,7 @@ static int elan_smbus_get_max(struct i2c_client *client,
unsigned int *max_x, unsigned int *max_y)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client, ETP_SMBUS_RANGE_CMD, val);
if (error) {
@@ -241,7 +245,7 @@ static int elan_smbus_get_resolution(struct i2c_client *client,
u8 *hw_res_x, u8 *hw_res_y)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_RESOLUTION_CMD, val);
@@ -261,7 +265,7 @@ static int elan_smbus_get_num_traces(struct i2c_client *client,
unsigned int *y_traces)
{
int error;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client,
ETP_SMBUS_XY_TRACENUM_CMD, val);
@@ -288,7 +292,7 @@ static int elan_smbus_iap_get_mode(struct i2c_client *client,
{
int error;
u16 constant;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
error = i2c_smbus_read_block_data(client, ETP_SMBUS_IAP_CTRL_CMD, val);
if (error < 0) {
@@ -339,7 +343,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client)
int len;
int error;
enum tp_mode mode;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
u8 cmd[4] = {0x0F, 0x78, 0x00, 0x06};
u16 password;
@@ -413,7 +417,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client,
struct device *dev = &client->dev;
int error;
u16 result;
- u8 val[3];
+ u8 val[I2C_SMBUS_BLOCK_MAX] = {0};
/*
* Due to the limitation of smbus protocol limiting
@@ -466,6 +470,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
{
int len;
+ BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_PACKET_QUERY,
&report[ETP_SMBUS_REPORT_OFFSET]);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 06ea28e5d7b4c8..4c1e527f14a5f0 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -804,7 +804,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
else if (ic_version == 7 && etd->samples[1] == 0x2A)
sanity_check = ((packet[3] & 0x1c) == 0x10);
else
- sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+ sanity_check = ((packet[0] & 0x08) == 0x00 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
@@ -1121,6 +1121,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
+ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1173,10 +1175,25 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
+ {
+ /* Fujitsu H780 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
+ },
+ },
#endif
{ }
};
+static const char * const middle_button_pnp_ids[] = {
+ "LEN2131", /* ThinkPad P52 w/ NFC */
+ "LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
+ NULL
+};
+
/*
* Set the appropriate event bits for the input subsystem
*/
@@ -1196,7 +1213,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
- if (dmi_check_system(elantech_dmi_has_middle_button))
+ if (dmi_check_system(elantech_dmi_has_middle_button) ||
+ psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
__set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ced7c03d0fc8ff..564100c61125a8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
+ {
+ /* Lenovo LaVie Z */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ },
+ },
{ }
};
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 20ab802461e774..1d46b763aae695 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -187,6 +187,7 @@ enum {
MODEL_DIGITIZER_II = 0x5544, /* UD */
MODEL_GRAPHIRE = 0x4554, /* ET */
MODEL_PENPARTNER = 0x4354, /* CT */
+ MODEL_ARTPAD_II = 0x4B54, /* KT */
};
static void wacom_handle_model_response(struct wacom *wacom)
@@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
break;
+ case MODEL_ARTPAD_II:
case MODEL_DIGITIZER_II:
wacom->dev->name = "Wacom Digitizer II";
wacom->dev->id.version = MODEL_DIGITIZER_II;
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index b89c2c8e2c05e4..51af13d57ce7f3 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1878,8 +1878,10 @@ static int mxt_get_object_table(struct mxt_data *data)
data->T6_reportid = min_id;
break;
case MXT_TOUCH_MULTI_T9:
+ /* Only handle messages from first T9 instance */
data->T9_reportid_min = min_id;
- data->T9_reportid_max = max_id;
+ data->T9_reportid_max = min_id +
+ object->num_report_ids - 1;
data->num_touchids = object->num_report_ids;
data->has_T9 = true;
break;
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 4d113c9e4b7710..7bf2597ce44c63 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -425,6 +425,7 @@ MODULE_DEVICE_TABLE(i2c, goodix_ts_id);
#ifdef CONFIG_ACPI
static const struct acpi_device_id goodix_acpi_match[] = {
{ "GDIX1001", 0 },
+ { "GDIX1002", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 52c36394dba500..0ad8b7c78a438c 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1982,6 +1982,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
static void do_detach(struct iommu_dev_data *dev_data)
{
+ struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
@@ -1997,10 +1998,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
- /* decrease reference counters */
- dev_data->domain->dev_iommu[iommu->index] -= 1;
- dev_data->domain->dev_cnt -= 1;
-
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
/* Flush the DTE entry */
device_flush_dte(dev_data);
+
+ /* Flush IOTLB */
+ domain_flush_tlb_pde(domain);
+
+ /* Wait for the flushes to finish */
+ domain_flush_complete(domain);
+
+ /* decrease reference counters - needs to happen after the flushes */
+ domain->dev_iommu[iommu->index] -= 1;
+ domain->dev_cnt -= 1;
}
/*
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 74ba2a851e7260..82b9e3505f2612 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -683,7 +683,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q)
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
- writel(q->cons, q->cons_reg);
+
+ /*
+ * Ensure that all CPU accesses (reads and writes) to the queue
+ * are complete before we update the cons pointer.
+ */
+ mb();
+ writel_relaxed(q->cons, q->cons_reg);
}
static int queue_sync_prod(struct arm_smmu_queue *q)
@@ -1219,6 +1225,7 @@ static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
/* Sync our overflow flag, as we believe we're up to speed */
q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
+ writel(q->cons, q->cons_reg);
return IRQ_HANDLED;
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index e913a930ac8093..5a63e32a4a6b7f 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1315,8 +1315,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
qi_submit_sync(&desc, iommu);
}
-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask)
+void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask)
{
struct qi_desc desc;
@@ -1331,7 +1331,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
qdep = 0;
desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
- QI_DIOTLB_TYPE;
+ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
qi_submit_sync(&desc, iommu);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4efec2db4ee2c7..8b4a4d95669aad 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -419,6 +419,7 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
u8 pasid_supported:3;
u8 pasid_enabled:1;
u8 pri_supported:1;
@@ -1479,6 +1480,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
return;
pdev = to_pci_dev(info->dev);
+ /* For IOMMU that supports device IOTLB throttling (DIT), we assign
+ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge
+ * queue depth at PF level. If DIT is not set, PFSID will be treated as
+ * reserved, which should be set to 0.
+ */
+ if (!ecap_dit(info->iommu->ecap))
+ info->pfsid = 0;
+ else {
+ struct pci_dev *pf_pdev;
+
+ /* pdev will be returned if device is not a vf */
+ pf_pdev = pci_physfn(pdev);
+ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
+ }
#ifdef CONFIG_INTEL_IOMMU_SVM
/* The PCIe spec, in its wisdom, declares that the behaviour of
@@ -1537,7 +1552,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
sid = info->bus << 8 | info->devfn;
qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -2025,7 +2041,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* than default. Unnecessary for PT mode.
*/
if (translation != CONTEXT_TT_PASS_THROUGH) {
- for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
+ for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) {
ret = -ENOMEM;
pgd = phys_to_virt(dma_pte_addr(pgd));
if (!dma_pte_present(pgd))
@@ -2039,7 +2055,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
translation = CONTEXT_TT_MULTI_LEVEL;
context_set_address_root(context, virt_to_phys(pgd));
- context_set_address_width(context, iommu->agaw);
+ context_set_address_width(context, agaw);
} else {
/*
* In pass through mode, AW must be programmed to
@@ -2961,7 +2977,7 @@ static int copy_context_table(struct intel_iommu *iommu,
}
if (old_ce)
- iounmap(old_ce);
+ memunmap(old_ce);
ret = 0;
if (devfn < 0x80)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 10068a481e2237..cbde03e509c1a9 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -558,7 +558,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
pr_err("%s: Page request without PASID: %08llx %08llx\n",
iommu->name, ((unsigned long long *)req)[0],
((unsigned long long *)req)[1]);
- goto bad_req;
+ goto no_pasid;
}
if (!svm || svm->pasid != req->pasid) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index e9b241b1c9dd84..ac596928f6b40a 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -753,7 +753,7 @@ static inline void set_irq_posting_cap(void)
* should have X86_FEATURE_CX16 support, this has been confirmed
* with Intel hardware guys.
*/
- if ( cpu_has_cx16 )
+ if (boot_cpu_has(X86_FEATURE_CX16))
intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
for_each_iommu(iommu, drhd)
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 2fdbac67a77f4b..6dd7bb9b6a44ea 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -44,7 +44,7 @@ struct ipmmu_vmsa_domain {
struct io_pgtable_ops *iop;
unsigned int context_id;
- spinlock_t lock; /* Protects mappings */
+ struct mutex mutex; /* Protects mappings */
};
struct ipmmu_vmsa_archdata {
@@ -372,6 +372,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain)
{
+ if (!domain->mmu)
+ return;
+
/*
* Disable the context. Flush the TLB as required when modifying the
* context registers.
@@ -464,7 +467,7 @@ static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
if (!domain)
return NULL;
- spin_lock_init(&domain->lock);
+ mutex_init(&domain->mutex);
return &domain->io_domain;
}
@@ -488,7 +491,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu;
struct ipmmu_vmsa_device *mmu = archdata->mmu;
struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- unsigned long flags;
unsigned int i;
int ret = 0;
@@ -497,7 +499,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return -ENXIO;
}
- spin_lock_irqsave(&domain->lock, flags);
+ mutex_lock(&domain->mutex);
if (!domain->mmu) {
/* The domain hasn't been used yet, initialize it. */
@@ -513,7 +515,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
ret = -EINVAL;
}
- spin_unlock_irqrestore(&domain->lock, flags);
+ mutex_unlock(&domain->mutex);
if (ret < 0)
return ret;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index d7af88534971ec..6fb34bf0f3527d 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -216,6 +216,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
+#ifdef CONFIG_SMP
static void bcm7038_l1_cpu_offline(struct irq_data *d)
{
struct cpumask *mask = irq_data_get_affinity_mask(d);
@@ -240,6 +241,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
}
irq_set_affinity_locked(d, &new_affinity, false);
}
+#endif
static int __init bcm7038_l1_init_one(struct device_node *dn,
unsigned int idx,
@@ -292,7 +294,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
.irq_set_affinity = bcm7038_l1_set_affinity,
+#ifdef CONFIG_SMP
.irq_cpu_offline = bcm7038_l1_cpu_offline,
+#endif
};
static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index dbc151734a73dd..c3b043ce48373b 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -69,7 +69,10 @@ struct its_node {
unsigned long phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
- void *tables[GITS_BASER_NR_REGS];
+ struct {
+ void *base;
+ u32 order;
+ } tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
u64 cbaser_save;
u64 baser_save[GITS_BASER_NR_REGS];
@@ -82,6 +85,9 @@ struct its_node {
#define ITS_ITT_ALIGN SZ_256
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
+
struct event_lpi_map {
unsigned long *lpi_map;
u16 *col_map;
@@ -821,9 +827,10 @@ static void its_free_tables(struct its_node *its)
int i;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- if (its->tables[i]) {
- free_page((unsigned long)its->tables[i]);
- its->tables[i] = NULL;
+ if (its->tables[i].base) {
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
+ its->tables[i].base = NULL;
}
}
}
@@ -856,7 +863,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
int order = get_order(psz);
- int alloc_size;
int alloc_pages;
u64 tmp;
void *base;
@@ -888,8 +894,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
}
}
- alloc_size = (1 << order) * PAGE_SIZE;
- alloc_pages = (alloc_size / psz);
+retry_alloc_baser:
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -903,7 +909,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
goto out_free;
}
- its->tables[i] = base;
+ its->tables[i].base = base;
+ its->tables[i].order = order;
retry_baser:
val = (virt_to_phys(base) |
@@ -941,7 +948,7 @@ retry_baser:
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, alloc_size);
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -952,13 +959,16 @@ retry_baser:
* size and retry. If we reach 4K, then
* something is horribly wrong...
*/
+ free_pages((unsigned long)base, order);
+ its->tables[i].base = NULL;
+
switch (psz) {
case SZ_16K:
psz = SZ_4K;
- goto retry_baser;
+ goto retry_alloc_baser;
case SZ_64K:
psz = SZ_16K;
- goto retry_baser;
+ goto retry_alloc_baser;
}
}
@@ -971,7 +981,7 @@ retry_baser:
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(alloc_size / entry_size),
+ (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
@@ -1226,13 +1236,14 @@ static void its_free_device(struct its_device *its_dev)
kfree(its_dev);
}
-static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
+static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
{
int idx;
- idx = find_first_zero_bit(dev->event_map.lpi_map,
- dev->event_map.nr_lpis);
- if (idx == dev->event_map.nr_lpis)
+ idx = bitmap_find_free_region(dev->event_map.lpi_map,
+ dev->event_map.nr_lpis,
+ get_count_order(nvecs));
+ if (idx < 0)
return -ENOSPC;
*hwirq = dev->event_map.lpi_base + idx;
@@ -1313,20 +1324,20 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int err;
int i;
- for (i = 0; i < nr_irqs; i++) {
- err = its_alloc_device_irq(its_dev, &hwirq);
- if (err)
- return err;
+ err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
+ if (err)
+ return err;
- err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
+ for (i = 0; i < nr_irqs; i++) {
+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
if (err)
return err;
irq_domain_set_hwirq_and_chip(domain, virq + i,
- hwirq, &its_irq_chip, its_dev);
+ hwirq + i, &its_irq_chip, its_dev);
pr_debug("ID:%d pID:%d vID:%d\n",
- (int)(hwirq - its_dev->event_map.lpi_base),
- (int) hwirq, virq + i);
+ (int)(hwirq + i - its_dev->event_map.lpi_base),
+ (int)(hwirq + i), virq + i);
}
return 0;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7851e786af99b7..721dc8a3be624f 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -336,7 +336,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
- if (likely(irqnr > 15 && irqnr < 1021)) {
+ if (likely(irqnr > 15 && irqnr < 1020)) {
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
handle_domain_irq(gic->domain, irqnr, regs);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 013fc9659a842b..2fe2bcb63a711c 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
#define SEL_INT_PENDING (1 << 6)
#define SEL_INT_NUM_MASK 0x3f
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
+
struct icu_chip_data {
int nr_irqs;
unsigned int virq_base;
@@ -190,7 +193,8 @@ static struct mmp_intc_conf mmp_conf = {
static struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
- .conf_mask = 0x7f,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dd7e38ac29bdd8..d15347de415a65 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -851,7 +851,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
u16 ret;
if (contr == 0) {
- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
@@ -859,7 +859,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 74bf1a17ae7ca0..b90776ef56ec8d 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
static void gigaset_device_release(struct device *dev)
{
- struct cardstate *cs = dev_get_drvdata(dev);
-
- if (!cs)
- return;
- dev_set_drvdata(dev, NULL);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
+ kfree(container_of(dev, struct ser_cardstate, dev.dev));
}
/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
cs->hw.ser = NULL;
return rc;
}
- dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
tasklet_init(&cs->write_tasklet,
gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4d9b195547c5cc..df2a10157720a8 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "\0\0" + 1;
+ cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
index d91dd580e9781b..37aaea88a6adc0 100644
--- a/drivers/isdn/hardware/eicon/diva.c
+++ b/drivers/isdn/hardware/eicon/diva.c
@@ -387,10 +387,10 @@ void divasa_xdi_driver_unload(void)
** Receive and process command from user mode utility
*/
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length,
+ int length, void *mptr,
divas_xdi_copy_from_user_fn_t cp_fn)
{
- diva_xdi_um_cfg_cmd_t msg;
+ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
diva_os_xdi_adapter_t *a = NULL;
diva_os_spin_lock_magic_t old_irql;
struct list_head *tmp;
@@ -400,21 +400,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
length, sizeof(diva_xdi_um_cfg_cmd_t)))
return NULL;
}
- if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
+ if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
DBG_ERR(("A: A(?) open, write error"))
return NULL;
}
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
list_for_each(tmp, &adapter_queue) {
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
- if (a->controller == (int)msg.adapter)
+ if (a->controller == (int)msg->adapter)
break;
a = NULL;
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
if (!a) {
- DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
+ DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
}
return (a);
@@ -436,8 +436,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
int
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, divas_xdi_copy_from_user_fn_t cp_fn)
+ int length, void *mptr,
+ divas_xdi_copy_from_user_fn_t cp_fn)
{
+ diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
void *data;
@@ -458,7 +460,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
return (-2);
}
- length = (*cp_fn) (os_handle, data, src, length);
+ if (msg) {
+ *(diva_xdi_um_cfg_cmd_t *)data = *msg;
+ length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
+ src + sizeof(*msg), length - sizeof(*msg));
+ } else {
+ length = (*cp_fn) (os_handle, data, src, length);
+ }
if (length > 0) {
if ((*(a->interface.cmd_proc))
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
index e979085d1b891f..a0a607c0c32e21 100644
--- a/drivers/isdn/hardware/eicon/diva.h
+++ b/drivers/isdn/hardware/eicon/diva.h
@@ -19,10 +19,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
- int length, divas_xdi_copy_from_user_fn_t cp_fn);
+ int length, void *msg,
+ divas_xdi_copy_from_user_fn_t cp_fn);
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
- int length,
+ int length, void *msg,
divas_xdi_copy_from_user_fn_t cp_fn);
void diva_xdi_close_adapter(void *adapter, void *os_handle);
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index a2e0ed6c9a4d36..91bd2ba0bdd889 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
static ssize_t divas_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ diva_xdi_um_cfg_cmd_t msg;
int ret = -EINVAL;
if (!file->private_data) {
file->private_data = diva_xdi_open_adapter(file, buf,
- count,
+ count, &msg,
xdi_copy_from_user);
- }
- if (!file->private_data) {
- return (-ENODEV);
+ if (!file->private_data)
+ return (-ENODEV);
+ ret = diva_xdi_write(file->private_data, file,
+ buf, count, &msg, xdi_copy_from_user);
+ } else {
+ ret = diva_xdi_write(file->private_data, file,
+ buf, count, NULL, xdi_copy_from_user);
}
- ret = diva_xdi_write(file->private_data, file,
- buf, count, xdi_copy_from_user);
switch (ret) {
case -1: /* Message should be removed from rx mailbox first */
ret = -EBUSY;
@@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
static ssize_t divas_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ diva_xdi_um_cfg_cmd_t msg;
int ret = -EINVAL;
if (!file->private_data) {
file->private_data = diva_xdi_open_adapter(file, buf,
- count,
+ count, &msg,
xdi_copy_from_user);
}
if (!file->private_data) {
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 28543d7951886e..9a27809bdaf2bc 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4370,7 +4370,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
if (m->clock2)
test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
- if (ent->device == 0xB410) {
+ if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+ ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 90449e1e91e5a2..1b1453d62fedf1 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1169,11 +1169,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
if (cs->debug & L1_DEB_LAPD)
debugl1(cs, "-> PH_REQUEST_PULL");
#endif
+ spin_lock_irqsave(&cs->lock, flags);
if (!cs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index e4c43a17b333f1..8088c34336aa82 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1655,13 +1655,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
} else
return -EINVAL;
case IIOCDBGVAR:
- if (arg) {
- if (copy_to_user(argp, &dev, sizeof(ulong)))
- return -EFAULT;
- return 0;
- } else
- return -EINVAL;
- break;
+ return -EINVAL;
default:
if ((cmd & IIOCDRVCTL) == IIOCDRVCTL)
cmd = ((cmd >> _IOC_NRSHIFT) & _IOC_NRMASK) & ISDN_DRVIOCTL_MASK;
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2175225af74214..2da3f5cd07294a 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -786,7 +786,7 @@ isdn_tty_suspend(char *id, modem_info *info, atemu *m)
cmd.parm.cmsg.para[3] = 4; /* 16 bit 0x0004 Suspend */
cmd.parm.cmsg.para[4] = 0;
cmd.parm.cmsg.para[5] = l;
- strncpy(&cmd.parm.cmsg.para[6], id, l);
+ strscpy(&cmd.parm.cmsg.para[6], id, l);
cmd.command = CAPI_PUT_MESSAGE;
cmd.driver = info->isdn_driver;
cmd.arg = info->isdn_channel;
@@ -1459,15 +1459,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
modem_info *info = (modem_info *) tty->driver_data;
+ mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
- tty->termios.c_ospeed == old_termios->c_ospeed)
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
+ mutex_unlock(&modem_info_mutex);
return;
+ }
isdn_tty_change_speed(info);
}
+ mutex_unlock(&modem_info_mutex);
}
/*
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 9438d7ec33080a..8b29e97cf66818 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -168,8 +168,8 @@ dev_expire_timer(unsigned long data)
spin_lock_irqsave(&timer->dev->lock, flags);
if (timer->id >= 0)
list_move_tail(&timer->list, &timer->dev->expired);
- spin_unlock_irqrestore(&timer->dev->lock, flags);
wake_up_interruptible(&timer->dev->wait);
+ spin_unlock_irqrestore(&timer->dev->lock, flags);
}
static int
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 5db4515a4fd7aa..df186b38da78ff 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -118,8 +118,8 @@ static int create_gpio_led(const struct gpio_led *template,
return ret;
led_dat->gpiod = gpio_to_desc(template->gpio);
- if (IS_ERR(led_dat->gpiod))
- return PTR_ERR(led_dat->gpiod);
+ if (!led_dat->gpiod)
+ return -EINVAL;
}
led_dat->cdev.name = template->name;
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 1d0187f42941aa..d12370352ae343 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ if (ret)
+ return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 27f2c5108e8acf..187843a79a60e0 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -141,6 +141,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
ret = led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {
priv->num_leds++;
+ led_pwm_set(&led_data->cdev, led_data->cdev.brightness);
} else {
dev_err(dev, "failed to register PWM led for %s: %d\n",
led->name, ret);
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index f9512bfa6c3c7d..0a41132ffba797 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -530,8 +530,9 @@ init_pmu(void)
int timeout;
struct adb_request req;
- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
+ /* Negate TREQ. Set TACK to input and TREQ to output. */
+ out_8(&via[B], in_8(&via[B]) | TREQ);
+ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
timeout = 100000;
@@ -1453,8 +1454,8 @@ pmu_sr_intr(void)
struct adb_request *req;
int bite = 0;
- if (via[B] & TREQ) {
- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
+ if (in_8(&via[B]) & TREQ) {
+ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
out_8(&via[IFR], SR_INT);
return NULL;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 4ed621ad27e403..05aa3ac1381ba9 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -2372,7 +2372,7 @@ static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
struct keybuf *buf = refill->buf;
int ret = MAP_CONTINUE;
- if (bkey_cmp(k, refill->end) >= 0) {
+ if (bkey_cmp(k, refill->end) > 0) {
ret = MAP_DONE;
goto out;
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index f2c0000de61375..95a6ae053714cb 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -462,8 +462,10 @@ static int bch_writeback_thread(void *arg)
* data on cache. BCACHE_DEV_DETACHING flag is set in
* bch_cached_dev_detach().
*/
- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
+ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) {
+ up_write(&dc->writeback_lock);
break;
+ }
}
up_write(&dc->writeback_lock);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 4d3bf4f6aa3456..b1d5fa0bc8f7bd 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -830,8 +830,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
*/
while (1) {
if (dm_bufio_cache_size_latch != 1) {
- b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY |
- __GFP_NOMEMALLOC | __GFP_NOWARN);
+ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
if (b)
return b;
}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index d3c55d7754afac..905badc6cb1799 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -337,7 +337,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
disk_super->version = cpu_to_le32(MAX_CACHE_VERSION);
memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
- disk_super->policy_hint_size = 0;
+ disk_super->policy_hint_size = cpu_to_le32(0);
__copy_sm_root(cmd, disk_super);
@@ -652,6 +652,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
+ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size);
disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 0da5efaad85c30..71256022203b35 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3388,8 +3388,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
- if (from_cblock(new_size) > from_cblock(cache->cache_size))
- return true;
+ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+ if (cache->sized) {
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+ }
/*
* We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 065d7cee0d219b..f43bc95d90b541 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1685,8 +1685,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
- int ioctl_flags,
- struct dm_ioctl **param, int *param_flags)
+ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl *dmi;
int secure_data;
@@ -1734,18 +1733,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
return -ENOMEM;
}
- if (copy_from_user(dmi, user, param_kernel->data_size))
- goto bad;
+ /* Copy from param_kernel (which was already copied from user) */
+ memcpy(dmi, param_kernel, minimum_data_size);
-data_copied:
- /*
- * Abort if something changed the ioctl data while it was being copied.
- */
- if (dmi->data_size != param_kernel->data_size) {
- DMERR("rejecting ioctl: data size modified while processing parameters");
+ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
+ param_kernel->data_size - minimum_data_size))
goto bad;
- }
-
+data_copied:
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1452ed9aacb422..04248394843e81 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -55,15 +55,17 @@ struct dm_kcopyd_client {
struct dm_kcopyd_throttle *throttle;
/*
- * We maintain three lists of jobs:
+ * We maintain four lists of jobs:
*
* i) jobs waiting for pages
* ii) jobs that have pages, and are waiting for the io to be issued.
- * iii) jobs that have completed.
+ * iii) jobs that don't need to do any IO and just run a callback
+ * iv) jobs that have completed.
*
- * All three of these are protected by job_lock.
+ * All four of these are protected by job_lock.
*/
spinlock_t job_lock;
+ struct list_head callback_jobs;
struct list_head complete_jobs;
struct list_head io_jobs;
struct list_head pages_jobs;
@@ -454,6 +456,8 @@ static int run_complete_job(struct kcopyd_job *job)
if (atomic_dec_and_test(&kc->nr_jobs))
wake_up(&kc->destroyq);
+ cond_resched();
+
return 0;
}
@@ -581,6 +585,7 @@ static void do_work(struct work_struct *work)
struct dm_kcopyd_client *kc = container_of(work,
struct dm_kcopyd_client, kcopyd_work);
struct blk_plug plug;
+ unsigned long flags;
/*
* The order that these are called is *very* important.
@@ -589,6 +594,10 @@ static void do_work(struct work_struct *work)
* list. io jobs call wake when they complete and it all
* starts again.
*/
+ spin_lock_irqsave(&kc->job_lock, flags);
+ list_splice_tail_init(&kc->callback_jobs, &kc->complete_jobs);
+ spin_unlock_irqrestore(&kc->job_lock, flags);
+
blk_start_plug(&plug);
process_jobs(&kc->complete_jobs, kc, run_complete_job);
process_jobs(&kc->pages_jobs, kc, run_pages_job);
@@ -606,7 +615,7 @@ static void dispatch_job(struct kcopyd_job *job)
struct dm_kcopyd_client *kc = job->kc;
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
- push(&kc->complete_jobs, job);
+ push(&kc->callback_jobs, job);
else if (job->pages == &zero_page_list)
push(&kc->io_jobs, job);
else
@@ -793,7 +802,7 @@ void dm_kcopyd_do_callback(void *j, int read_err, unsigned long write_err)
job->read_err = read_err;
job->write_err = write_err;
- push(&kc->complete_jobs, job);
+ push(&kc->callback_jobs, job);
wake(kc);
}
EXPORT_SYMBOL(dm_kcopyd_do_callback);
@@ -823,6 +832,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *thro
return ERR_PTR(-ENOMEM);
spin_lock_init(&kc->job_lock);
+ INIT_LIST_HEAD(&kc->callback_jobs);
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
@@ -872,6 +882,7 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
/* Wait for completion of all jobs submitted by this client. */
wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
+ BUG_ON(!list_empty(&kc->callback_jobs));
BUG_ON(!list_empty(&kc->complete_jobs));
BUG_ON(!list_empty(&kc->io_jobs));
BUG_ON(!list_empty(&kc->pages_jobs));
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index e4d1bafe78c1ab..2a855e5429ab7c 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -19,6 +19,7 @@
#include <linux/vmalloc.h>
#include <linux/log2.h>
#include <linux/dm-kcopyd.h>
+#include <linux/semaphore.h>
#include "dm.h"
@@ -105,6 +106,9 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
+ /* Maximum number of in-flight COW jobs. */
+ struct semaphore cow_count;
+
struct dm_kcopyd_client *kcopyd_client;
/* Wait for events based on state_bits */
@@ -145,6 +149,19 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+/*
+ * Maximum number of chunks being copied on write.
+ *
+ * The value was decided experimentally as a trade-off between memory
+ * consumption, stalling the kernel's workqueues and maintaining a high enough
+ * throughput.
+ */
+#define DEFAULT_COW_THRESHOLD 2048
+
+static int cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
+
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -1189,6 +1206,8 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
+ sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+
s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
@@ -1560,6 +1579,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
}
list_add(&pe->out_of_order_entry, lh);
}
+ up(&s->cow_count);
}
/*
@@ -1583,6 +1603,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
dest.count = src.count;
/* Hand over to kcopyd */
+ down(&s->cow_count);
dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
}
@@ -1602,6 +1623,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
pe->full_bio = bio;
pe->full_bio_end_io = bio->bi_end_io;
+ down(&s->cow_count);
callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
copy_callback, pe);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 6fde563a644bff..b3d78bba3a79a4 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1480,30 +1480,6 @@ static bool dm_table_supports_discards(struct dm_table *t)
return false;
}
-static int device_nonrot(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
-{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return q && blk_queue_nonrot(q);
-}
-
-static bool dm_table_all_nonrot(struct dm_table *t)
-{
- unsigned i = 0;
-
- /* Ensure that all underlying device are non rotational. */
- while (i < dm_table_get_num_targets(t)) {
- struct dm_target *ti = dm_table_get_target(t, i++);
-
- if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, device_nonrot, NULL))
- return false;
- }
-
- return true;
-}
-
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@@ -1518,10 +1494,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
- if (!dm_table_all_nonrot(t))
- queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
- else
- queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
if (dm_table_supports_flush(t, REQ_FLUSH)) {
flush |= REQ_FLUSH;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index e339f4288e8f6a..2711aa965445c0 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -190,6 +190,12 @@ struct dm_pool_metadata {
sector_t data_block_size;
/*
+ * We reserve a section of the metadata for commit overhead.
+ * All reported space does *not* include this.
+ */
+ dm_block_t metadata_reserve;
+
+ /*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
@@ -827,6 +833,20 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
return dm_tm_commit(pmd->tm, sblock);
}
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+ int r;
+ dm_block_t total;
+ dm_block_t max_blocks = 4096; /* 16M */
+
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+ if (r) {
+ DMERR("could not get size of metadata device");
+ pmd->metadata_reserve = max_blocks;
+ } else
+ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
@@ -860,6 +880,8 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
return ERR_PTR(r);
}
+ __set_metadata_reserve(pmd);
+
return pmd;
}
@@ -1763,6 +1785,13 @@ int dm_pool_get_free_metadata_block_count(struct dm_pool_metadata *pmd,
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+ if (!r) {
+ if (*result < pmd->metadata_reserve)
+ *result = 0;
+ else
+ *result -= pmd->metadata_reserve;
+ }
up_read(&pmd->root_lock);
return r;
@@ -1875,8 +1904,11 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_cou
int r = -EINVAL;
down_write(&pmd->root_lock);
- if (!pmd->fail_io)
+ if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
+ if (!r)
+ __set_metadata_reserve(pmd);
+ }
up_write(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index a1cc797fe88f49..07eaa9f9071271 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@ struct dm_thin_new_mapping;
enum pool_mode {
PM_WRITE, /* metadata may be changed */
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
+
+ /*
+ * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+ */
+ PM_OUT_OF_METADATA_SPACE,
PM_READ_ONLY, /* metadata may not be changed */
+
PM_FAIL, /* all I/O fails */
};
@@ -250,6 +256,7 @@ struct pool {
spinlock_t lock;
struct bio_list deferred_flush_bios;
+ struct bio_list deferred_flush_completions;
struct list_head prepared_mappings;
struct list_head prepared_discards;
struct list_head active_thins;
@@ -914,6 +921,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
mempool_free(m, m->tc->pool->mapping_pool);
}
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ /*
+ * If the bio has the REQ_FUA flag set we must commit the metadata
+ * before signaling its completion.
+ */
+ if (!bio_triggers_commit(tc, bio)) {
+ bio_endio(bio);
+ return;
+ }
+
+ /*
+ * Complete bio with an error if earlier I/O caused changes to the
+ * metadata that can't be committed, e.g, due to I/O errors on the
+ * metadata device.
+ */
+ if (dm_thin_aborted_changes(tc->td)) {
+ bio_io_error(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in process_deferred_bios().
+ */
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_flush_completions, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
@@ -946,7 +986,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
if (bio) {
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
- bio_endio(bio);
+ complete_overwrite_bio(tc, bio);
} else {
inc_all_io_entry(tc->pool, m->cell->holder);
remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -1299,7 +1339,37 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-static void check_for_space(struct pool *pool)
+static void requeue_bios(struct pool *pool);
+
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+ return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+ return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+ int r;
+ const char *ooms_reason = NULL;
+ dm_block_t nr_free;
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+ if (r)
+ ooms_reason = "Could not get free metadata blocks";
+ else if (!nr_free)
+ ooms_reason = "No free metadata blocks";
+
+ if (ooms_reason && !is_read_only(pool)) {
+ DMERR("%s", ooms_reason);
+ set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+ }
+}
+
+static void check_for_data_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
@@ -1311,8 +1381,10 @@ static void check_for_space(struct pool *pool)
if (r)
return;
- if (nr_free)
+ if (nr_free) {
set_pool_mode(pool, PM_WRITE);
+ requeue_bios(pool);
+ }
}
/*
@@ -1323,14 +1395,16 @@ static int commit(struct pool *pool)
{
int r;
- if (get_pool_mode(pool) >= PM_READ_ONLY)
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
- else
- check_for_space(pool);
+ else {
+ check_for_metadata_space(pool);
+ check_for_data_space(pool);
+ }
return r;
}
@@ -1389,10 +1463,26 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ if (r == -ENOSPC)
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+ else
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ return r;
+ }
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
return r;
}
+ if (!free_blocks) {
+ /* Let's commit before we use up the metadata reserve. */
+ r = commit(pool);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -1424,6 +1514,7 @@ static int should_error_unserviceable_bio(struct pool *pool)
case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? -ENOSPC : 0;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
case PM_FAIL:
return -EIO;
@@ -2171,7 +2262,7 @@ static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
struct bio *bio;
- struct bio_list bios;
+ struct bio_list bios, bio_completions;
struct thin_c *tc;
tc = get_first_thin(pool);
@@ -2182,26 +2273,36 @@ static void process_deferred_bios(struct pool *pool)
}
/*
- * If there are any deferred flush bios, we must commit
- * the metadata before issuing them.
+ * If there are any deferred flush bios, we must commit the metadata
+ * before issuing them or signaling their completion.
*/
bio_list_init(&bios);
+ bio_list_init(&bio_completions);
+
spin_lock_irqsave(&pool->lock, flags);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
+
+ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
+ bio_list_init(&pool->deferred_flush_completions);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) &&
+ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
+ bio_list_merge(&bios, &bio_completions);
+
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
}
pool->last_commit_jiffies = jiffies;
+ while ((bio = bio_list_pop(&bio_completions)))
+ bio_endio(bio);
+
while ((bio = bio_list_pop(&bios)))
generic_make_request(bio);
}
@@ -2394,8 +2495,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
error_retry_list(pool);
break;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (old_mode != new_mode)
+ if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@@ -2827,6 +2929,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_flush_bios);
+ bio_list_init(&pool->deferred_flush_completions);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
INIT_LIST_HEAD(&pool->active_thins);
@@ -3326,6 +3429,10 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+ set_pool_mode(pool, PM_WRITE);
+
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3629,7 +3736,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
return -EOPNOTSUPP;
@@ -3703,6 +3810,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
dm_block_t nr_blocks_data;
dm_block_t nr_blocks_metadata;
dm_block_t held_root;
+ enum pool_mode mode;
char buf[BDEVNAME_SIZE];
char buf2[BDEVNAME_SIZE];
struct pool_c *pt = ti->private;
@@ -3773,9 +3881,10 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
- if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ mode = get_pool_mode(pool);
+ if (mode == PM_OUT_OF_DATA_SPACE)
DMEMIT("out_of_data_space ");
- else if (pool->pf.mode == PM_READ_ONLY)
+ else if (is_read_only_pool_mode(mode))
DMEMIT("ro ");
else
DMEMIT("rw ");
diff --git a/drivers/md/dm-verity-chromeos.c b/drivers/md/dm-verity-chromeos.c
index 18f9842dcc0663..ac8c0bee510c2b 100644
--- a/drivers/md/dm-verity-chromeos.c
+++ b/drivers/md/dm-verity-chromeos.c
@@ -39,8 +39,11 @@
static void chromeos_invalidate_kernel_endio(struct bio *bio)
{
- if (bio->bi_error)
+ if (bio->bi_error) {
+ DMERR("%s: bio operation failed (error=0x%x)", __func__,
+ bio->bi_error);
chromeos_set_need_recovery();
+ }
complete(bio->bi_private);
}
@@ -128,7 +131,7 @@ static int chromeos_invalidate_kernel_bio(struct block_device *root_bdev)
/* Ensure we do synchronous unblocked I/O. We may also need
* sync_bdev() on completion, but it really shouldn't.
*/
- int rw = REQ_SYNC | REQ_SOFTBARRIER | REQ_NOIDLE;
+ int rw;
devt = get_boot_dev();
if (!devt) {
@@ -160,6 +163,12 @@ static int chromeos_invalidate_kernel_bio(struct block_device *root_bdev)
goto failed_to_alloc_page;
}
+ /*
+ * Request read operation with REQ_FLUSH flag to ensure that the
+ * cache of non-volatile storage device has been flushed before read is
+ * started.
+ */
+ rw = REQ_SYNC | REQ_NOIDLE | REQ_FLUSH;
if (chromeos_invalidate_kernel_submit(bio, bdev, rw, page)) {
ret = -1;
goto failed_to_submit_read;
@@ -183,7 +192,7 @@ static int chromeos_invalidate_kernel_bio(struct block_device *root_bdev)
bdev = blkdev_get_by_dev(devt, dev_mode,
chromeos_invalidate_kernel_bio);
if (IS_ERR(bdev)) {
- DMERR("invalidate_kernel: could not open device for reading");
+ DMERR("invalidate_kernel: could not open device for writing");
dev_mode = 0;
ret = -1;
goto failed_to_write;
@@ -194,7 +203,12 @@ static int chromeos_invalidate_kernel_bio(struct block_device *root_bdev)
*/
bio_reset(bio);
- rw |= REQ_WRITE;
+ /*
+ * Request write operation with REQ_FUA flag to ensure that I/O
+ * completion for the write is signaled only after the data has been
+ * committed to non-volatile storage.
+ */
+ rw = REQ_WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA;
if (chromeos_invalidate_kernel_submit(bio, bdev, rw, page)) {
ret = -1;
goto failed_to_submit_write;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index a7a561af05c9c4..617a0aefc1c455 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -239,15 +239,6 @@ static void recover_bitmaps(struct md_thread *thread)
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
- /* Clear suspend_area associated with the bitmap */
- spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
- spin_unlock_irq(&cinfo->suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@@ -266,6 +257,16 @@ static void recover_bitmaps(struct md_thread *thread)
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto dlm_unlock;
}
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
if (hi > 0) {
/* TODO:Wait for current resync to get over */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 62c3328e2a1dd4..07f307402351b7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2690,7 +2690,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
}
} else if (cmd_match(buf, "re-add")) {
- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
+ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
+ rdev->saved_raid_disk >= 0) {
/* clear_bit is performed _after_ all the devices
* have their local Faulty bit cleared. If any writes
* happen in the meantime in the local node, they
@@ -6144,6 +6145,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
struct md_rdev *rdev;
int ret = -1;
+ if (!mddev->pers)
+ return -ENODEV;
+
rdev = find_rdev(mddev, dev);
if (!rdev)
return -ENXIO;
@@ -8153,6 +8157,7 @@ static int remove_and_add_spares(struct mddev *mddev,
if (mddev->pers->hot_remove_disk(
mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
+ rdev->saved_raid_disk = rdev->raid_disk;
rdev->raid_disk = -1;
removed++;
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 89dcbf2fa84606..82e284d2b202bb 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1605,6 +1605,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7b6acedc89c177..69e9abf00c744f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1737,6 +1737,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->geo.raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
@@ -3691,6 +3692,13 @@ static int run(struct mddev *mddev)
disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
+
+ if (disk->replacement &&
+ !test_bit(In_sync, &disk->replacement->flags) &&
+ disk->replacement->saved_raid_disk < 0) {
+ conf->fullsync = 1;
+ }
+
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
@@ -3747,6 +3755,8 @@ static int run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
+ if (!mddev->sync_thread)
+ goto out_free_conf;
}
return 0;
@@ -4329,11 +4339,12 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
allow_barrier(conf);
}
+ raise_barrier(conf, 0);
read_more:
/* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
- raise_barrier(conf, sectors_done != 0);
+ raise_barrier(conf, 1);
atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev;
r10_bio->sector = sector_nr;
@@ -4433,11 +4444,12 @@ bio_full:
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
- sector_nr += nr_sectors;
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
+ lower_barrier(conf);
+
/* Now that we have done the whole section we can
* update reshape_progress
*/
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index d59b861764a10b..5e65dc6def7eec 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4190,6 +4190,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
s->failed++;
if (rdev && !test_bit(Faulty, &rdev->flags))
do_recovery = 1;
+ else if (!rdev) {
+ rdev = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
+ }
}
}
if (test_bit(STRIPE_SYNCING, &sh->state)) {
@@ -6967,6 +6973,8 @@ static int run(struct mddev *mddev)
set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
mddev->sync_thread = md_register_thread(md_do_sync, mddev,
"reshape");
+ if (!mddev->sync_thread)
+ goto abort;
}
/* Ok, everything is just fine now */
diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
index 08b619d0ea1ef7..351451fd254d8b 100644
--- a/drivers/media/cec/cec-notifier.c
+++ b/drivers/media/cec/cec-notifier.c
@@ -33,6 +33,7 @@ struct cec_notifier {
struct list_head head;
struct kref kref;
struct device *dev;
+ const char *conn;
struct cec_adapter *cec_adap;
void (*callback)(struct cec_adapter *adap, u16 pa);
@@ -42,13 +43,14 @@ struct cec_notifier {
static LIST_HEAD(cec_notifiers);
static DEFINE_MUTEX(cec_notifiers_lock);
-struct cec_notifier *cec_notifier_get(struct device *dev)
+struct cec_notifier *cec_notifier_get_conn(struct device *dev, const char *conn)
{
struct cec_notifier *n;
mutex_lock(&cec_notifiers_lock);
list_for_each_entry(n, &cec_notifiers, head) {
- if (n->dev == dev) {
+ if (n->dev == dev &&
+ (!conn || !strcmp(n->conn, conn))) {
kref_get(&n->kref);
mutex_unlock(&cec_notifiers_lock);
return n;
@@ -58,6 +60,8 @@ struct cec_notifier *cec_notifier_get(struct device *dev)
if (!n)
goto unlock;
n->dev = dev;
+ if (conn)
+ n->conn = kstrdup(conn, GFP_KERNEL);
n->phys_addr = CEC_PHYS_ADDR_INVALID;
mutex_init(&n->lock);
kref_init(&n->kref);
@@ -66,7 +70,7 @@ unlock:
mutex_unlock(&cec_notifiers_lock);
return n;
}
-EXPORT_SYMBOL_GPL(cec_notifier_get);
+EXPORT_SYMBOL_GPL(cec_notifier_get_conn);
static void cec_notifier_release(struct kref *kref)
{
@@ -74,6 +78,7 @@ static void cec_notifier_release(struct kref *kref)
container_of(kref, struct cec_notifier, kref);
list_del(&n->head);
+ kfree(n->conn);
kfree(n);
}
diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c
index bfe831c10b1c46..b95a631f23f9ab 100644
--- a/drivers/media/common/siano/smsendian.c
+++ b/drivers/media/common/siano/smsendian.c
@@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer)
switch (msg->x_msg_header.msg_type) {
case MSG_SMS_DATA_DOWNLOAD_REQ:
{
- msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]);
+ msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0]));
break;
}
@@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer)
sizeof(struct sms_msg_hdr))/4;
for (i = 0; i < msg_words; i++)
- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
+ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
break;
}
@@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer)
{
struct sms_version_res *ver =
(struct sms_version_res *) msg;
- ver->chip_model = le16_to_cpu(ver->chip_model);
+ ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model);
break;
}
@@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer)
sizeof(struct sms_msg_hdr))/4;
for (i = 0; i < msg_words; i++)
- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]);
+ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]);
break;
}
@@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg)
#ifdef __BIG_ENDIAN
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg;
- phdr->msg_type = le16_to_cpu(phdr->msg_type);
- phdr->msg_length = le16_to_cpu(phdr->msg_length);
- phdr->msg_flags = le16_to_cpu(phdr->msg_flags);
+ phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type);
+ phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length);
+ phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags);
#endif /* __BIG_ENDIAN */
}
EXPORT_SYMBOL_GPL(smsendian_handle_message_header);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index e2a3833170e322..2c835e69c4df9f 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -230,8 +230,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
wake_up_interruptible (&events->wait_queue);
}
+static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
+ struct dvb_fe_events *events)
+{
+ int ret;
+
+ up(&fepriv->sem);
+ ret = events->eventw != events->eventr;
+ down(&fepriv->sem);
+
+ return ret;
+}
+
static int dvb_frontend_get_event(struct dvb_frontend *fe,
- struct dvb_frontend_event *event, int flags)
+ struct dvb_frontend_event *event, int flags)
{
struct dvb_frontend_private *fepriv = fe->frontend_priv;
struct dvb_fe_events *events = &fepriv->events;
@@ -249,13 +261,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
if (flags & O_NONBLOCK)
return -EWOULDBLOCK;
- up(&fepriv->sem);
-
- ret = wait_event_interruptible (events->wait_queue,
- events->eventw != events->eventr);
-
- if (down_interruptible (&fepriv->sem))
- return -ERESTARTSYS;
+ ret = wait_event_interruptible(events->wait_queue,
+ dvb_frontend_test_event(fepriv, events));
if (ret < 0)
return ret;
diff --git a/drivers/media/dvb-frontends/ascot2e.c b/drivers/media/dvb-frontends/ascot2e.c
index f770f6a2c987b0..3ea9edc8cdbed4 100644
--- a/drivers/media/dvb-frontends/ascot2e.c
+++ b/drivers/media/dvb-frontends/ascot2e.c
@@ -155,7 +155,9 @@ static int ascot2e_write_regs(struct ascot2e_priv *priv,
static int ascot2e_write_reg(struct ascot2e_priv *priv, u8 reg, u8 val)
{
- return ascot2e_write_regs(priv, reg, &val, 1);
+ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return ascot2e_write_regs(priv, reg, &tmp, 1);
}
static int ascot2e_read_regs(struct ascot2e_priv *priv,
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index 107853b0fdddc8..bde77671a37c88 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -241,7 +241,9 @@ static int cxd2841er_write_regs(struct cxd2841er_priv *priv,
static int cxd2841er_write_reg(struct cxd2841er_priv *priv,
u8 addr, u8 reg, u8 val)
{
- return cxd2841er_write_regs(priv, addr, reg, &val, 1);
+ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return cxd2841er_write_regs(priv, addr, reg, &tmp, 1);
}
static int cxd2841er_read_regs(struct cxd2841er_priv *priv,
diff --git a/drivers/media/dvb-frontends/horus3a.c b/drivers/media/dvb-frontends/horus3a.c
index 000606af70f746..f770ab72a8e333 100644
--- a/drivers/media/dvb-frontends/horus3a.c
+++ b/drivers/media/dvb-frontends/horus3a.c
@@ -89,7 +89,9 @@ static int horus3a_write_regs(struct horus3a_priv *priv,
static int horus3a_write_reg(struct horus3a_priv *priv, u8 reg, u8 val)
{
- return horus3a_write_regs(priv, reg, &val, 1);
+ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return horus3a_write_regs(priv, reg, &tmp, 1);
}
static int horus3a_enter_power_save(struct horus3a_priv *priv)
diff --git a/drivers/media/dvb-frontends/itd1000.c b/drivers/media/dvb-frontends/itd1000.c
index cadcae4cff8912..ac9d2591bb6fca 100644
--- a/drivers/media/dvb-frontends/itd1000.c
+++ b/drivers/media/dvb-frontends/itd1000.c
@@ -99,8 +99,9 @@ static int itd1000_read_reg(struct itd1000_state *state, u8 reg)
static inline int itd1000_write_reg(struct itd1000_state *state, u8 r, u8 v)
{
- int ret = itd1000_write_regs(state, r, &v, 1);
- state->shadow[r] = v;
+ u8 tmp = v; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+ int ret = itd1000_write_regs(state, r, &tmp, 1);
+ state->shadow[r] = tmp;
return ret;
}
diff --git a/drivers/media/dvb-frontends/mt312.c b/drivers/media/dvb-frontends/mt312.c
index c36e6764eeadc8..c4418827102806 100644
--- a/drivers/media/dvb-frontends/mt312.c
+++ b/drivers/media/dvb-frontends/mt312.c
@@ -142,7 +142,10 @@ static inline int mt312_readreg(struct mt312_state *state,
static inline int mt312_writereg(struct mt312_state *state,
const enum mt312_reg_addr reg, const u8 val)
{
- return mt312_write(state, reg, &val, 1);
+ u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+
+ return mt312_write(state, reg, &tmp, 1);
}
static inline u32 mt312_div(u32 a, u32 b)
diff --git a/drivers/media/dvb-frontends/stb0899_drv.c b/drivers/media/dvb-frontends/stb0899_drv.c
index 756650f154abe4..ad9b7d4f8d959d 100644
--- a/drivers/media/dvb-frontends/stb0899_drv.c
+++ b/drivers/media/dvb-frontends/stb0899_drv.c
@@ -552,7 +552,8 @@ int stb0899_write_regs(struct stb0899_state *state, unsigned int reg, u8 *data,
int stb0899_write_reg(struct stb0899_state *state, unsigned int reg, u8 data)
{
- return stb0899_write_regs(state, reg, &data, 1);
+ u8 tmp = data;
+ return stb0899_write_regs(state, reg, &tmp, 1);
}
/*
diff --git a/drivers/media/dvb-frontends/stb6100.c b/drivers/media/dvb-frontends/stb6100.c
index 4ef8a5c7003e90..44fac257003485 100644
--- a/drivers/media/dvb-frontends/stb6100.c
+++ b/drivers/media/dvb-frontends/stb6100.c
@@ -226,12 +226,14 @@ static int stb6100_write_reg_range(struct stb6100_state *state, u8 buf[], int st
static int stb6100_write_reg(struct stb6100_state *state, u8 reg, u8 data)
{
+ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
if (unlikely(reg >= STB6100_NUMREGS)) {
dprintk(verbose, FE_ERROR, 1, "Invalid register offset 0x%x", reg);
return -EREMOTEIO;
}
- data = (data & stb6100_template[reg].mask) | stb6100_template[reg].set;
- return stb6100_write_reg_range(state, &data, reg, 1);
+ tmp = (tmp & stb6100_template[reg].mask) | stb6100_template[reg].set;
+ return stb6100_write_reg_range(state, &tmp, reg, 1);
}
diff --git a/drivers/media/dvb-frontends/stv0367.c b/drivers/media/dvb-frontends/stv0367.c
index 44cb73f68af658..ddd0d778ad6e9a 100644
--- a/drivers/media/dvb-frontends/stv0367.c
+++ b/drivers/media/dvb-frontends/stv0367.c
@@ -804,7 +804,9 @@ int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
{
- return stv0367_writeregs(state, reg, &data, 1);
+ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return stv0367_writeregs(state, reg, &tmp, 1);
}
static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
diff --git a/drivers/media/dvb-frontends/stv090x.c b/drivers/media/dvb-frontends/stv090x.c
index 25bdf6e0f9632d..f0377e2b341b79 100644
--- a/drivers/media/dvb-frontends/stv090x.c
+++ b/drivers/media/dvb-frontends/stv090x.c
@@ -761,7 +761,9 @@ static int stv090x_write_regs(struct stv090x_state *state, unsigned int reg, u8
static int stv090x_write_reg(struct stv090x_state *state, unsigned int reg, u8 data)
{
- return stv090x_write_regs(state, reg, &data, 1);
+ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return stv090x_write_regs(state, reg, &tmp, 1);
}
static int stv090x_i2c_gate_ctrl(struct stv090x_state *state, int enable)
diff --git a/drivers/media/dvb-frontends/stv6110x.c b/drivers/media/dvb-frontends/stv6110x.c
index e66154e5c1d771..45d14869e7b872 100644
--- a/drivers/media/dvb-frontends/stv6110x.c
+++ b/drivers/media/dvb-frontends/stv6110x.c
@@ -97,7 +97,9 @@ static int stv6110x_write_regs(struct stv6110x_state *stv6110x, int start, u8 da
static int stv6110x_write_reg(struct stv6110x_state *stv6110x, u8 reg, u8 data)
{
- return stv6110x_write_regs(stv6110x, reg, &data, 1);
+ u8 tmp = data; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return stv6110x_write_regs(stv6110x, reg, &tmp, 1);
}
static int stv6110x_init(struct dvb_frontend *fe)
diff --git a/drivers/media/dvb-frontends/zl10039.c b/drivers/media/dvb-frontends/zl10039.c
index ee09ec26c553ef..b273e4fd80244e 100644
--- a/drivers/media/dvb-frontends/zl10039.c
+++ b/drivers/media/dvb-frontends/zl10039.c
@@ -138,7 +138,9 @@ static inline int zl10039_writereg(struct zl10039_state *state,
const enum zl10039_reg_addr reg,
const u8 val)
{
- return zl10039_write(state, reg, &val, 1);
+ const u8 tmp = val; /* see gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+
+ return zl10039_write(state, reg, &tmp, 1);
}
static int zl10039_init(struct dvb_frontend *fe)
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 251a556112a995..280b5ffea5922f 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -968,7 +968,8 @@ static int get_ca_object_length(struct avc_response_frame *r)
return r->operand[7];
}
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
@@ -1009,7 +1010,8 @@ out:
return ret;
}
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len)
{
struct avc_command_frame *c = (void *)fdtv->avc_data;
struct avc_response_frame *r = (void *)fdtv->avc_data;
diff --git a/drivers/media/firewire/firedtv.h b/drivers/media/firewire/firedtv.h
index 345d1eda8c057b..5b18a08c628576 100644
--- a/drivers/media/firewire/firedtv.h
+++ b/drivers/media/firewire/firedtv.h
@@ -124,8 +124,10 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
struct dvb_diseqc_master_cmd *diseqcmd);
void avc_remote_ctrl_work(struct work_struct *work);
int avc_register_remote_control(struct firedtv *fdtv);
-int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
-int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len);
+int avc_ca_app_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
+int avc_ca_info(struct firedtv *fdtv, unsigned char *app_info,
+ unsigned int *len);
int avc_ca_reset(struct firedtv *fdtv);
int avc_ca_pmt(struct firedtv *fdtv, char *app_info, int length);
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval);
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index f2b8df78a0ee1c..6aaabf45eb278b 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -270,6 +270,16 @@ config VIDEO_ML86V7667
To compile this driver as a module, choose M here: the
module will be called ml86v7667.
+config VIDEO_AK7375
+ tristate "AK7375 lens voice coil support"
+ depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
+ depends on VIDEO_V4L2_SUBDEV_API
+ ---help---
+ This is a driver for the AK7375 camera lens voice coil.
+ AK7375 is a 12 bit DAC with 120mA output current sink
+ capability. This is designed for linear control of
+ voice coil motors, controlled via I2C serial interface.
+
config VIDEO_DW9714
tristate "DW9714 lens voice coil support"
depends on I2C && VIDEO_V4L2 && MEDIA_CONTROLLER
@@ -586,6 +596,17 @@ config VIDEO_OV13858
This is a Video4Linux2 sensor-level driver for the OmniVision
OV13858 camera.
+config VIDEO_IMX208
+ tristate "Sony IMX208 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX208 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx208.
+
config VIDEO_IMX258
tristate "Sony IMX258 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -594,8 +615,30 @@ config VIDEO_IMX258
This is a Video4Linux2 sensor-level driver for the Sony
IMX258 camera.
- To compile this driver as a module, choose M here: the
- module will be called imx258.
+ To compile this driver as a module, choose M here: the
+ module will be called imx258.
+
+config VIDEO_IMX319
+ tristate "Sony IMX319 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ help
+ This is a Video4Linux2 sensor driver for the Sony
+ IMX319 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx319.
+
+config VIDEO_IMX355
+ tristate "Sony IMX355 sensor support"
+ depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on MEDIA_CAMERA_SUPPORT
+ ---help---
+ This is a Video4Linux2 sensor-level driver for the Sony
+ IMX355 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx355.
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index de5346c1d62d24..0f453e8c822fd8 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -61,7 +61,10 @@ obj-$(CONFIG_VIDEO_OV7640) += ov7640.o
obj-$(CONFIG_VIDEO_OV7670) += ov7670.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
-obj-$(CONFIG_VIDEO_IMX258) += imx258.o
+obj-$(CONFIG_VIDEO_IMX208) += imx208.o
+obj-$(CONFIG_VIDEO_IMX258) += imx258.o
+obj-$(CONFIG_VIDEO_IMX319) += imx319.o
+obj-$(CONFIG_VIDEO_IMX355) += imx355.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
obj-$(CONFIG_VIDEO_MT9T001) += mt9t001.o
@@ -84,5 +87,6 @@ obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
+obj-$(CONFIG_VIDEO_AK7375) += ak7375.o
obj-$(CONFIG_VIDEO_DW9714) += dw9714.o
obj-$(CONFIG_VIDEO_DW9807) += dw9807.o
diff --git a/drivers/media/i2c/ak7375.c b/drivers/media/i2c/ak7375.c
new file mode 100644
index 00000000000000..731c21d90e4be5
--- /dev/null
+++ b/drivers/media/i2c/ak7375.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+
+#define AK7375_MAX_FOCUS_POS 4095
+/*
+ * This sets the minimum granularity for the focus positions.
+ * A value of 1 gives maximum accuracy for a desired focus position
+ */
+#define AK7375_FOCUS_STEPS 1
+/*
+ * This acts as the minimum granularity of lens movement.
+ * Keep this value power of 2, so the control steps can be
+ * uniformly adjusted for gradual lens movement, with desired
+ * number of control steps.
+ */
+#define AK7375_CTRL_STEPS 64
+#define AK7375_CTRL_DELAY_US 1000
+
+#define AK7375_REG_POSITION 0x0
+#define AK7375_REG_CONT 0x2
+#define AK7375_MODE_ACTIVE 0x0
+#define AK7375_MODE_STANDBY 0x40
+
+/* ak7375 device structure */
+struct ak7375_device {
+ struct v4l2_ctrl_handler ctrls_vcm;
+ struct v4l2_subdev sd;
+ struct v4l2_ctrl *focus;
+ /* active or standby mode */
+ bool active;
+};
+
+static inline struct ak7375_device *to_ak7375_vcm(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct ak7375_device, ctrls_vcm);
+}
+
+static inline struct ak7375_device *sd_to_ak7375_vcm(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct ak7375_device, sd);
+}
+
+static int ak7375_i2c_write(struct ak7375_device *ak7375,
+ u8 addr, u16 data, u8 size)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ak7375->sd);
+ u8 buf[3];
+ int ret;
+
+ if (size != 1 && size != 2)
+ return -EINVAL;
+ buf[0] = addr;
+ buf[size] = data & 0xff;
+ if (size == 2)
+ buf[1] = (data >> 8) & 0xff;
+ ret = i2c_master_send(client, (const char *)buf, size + 1);
+ if (ret < 0)
+ return ret;
+ if (ret != size + 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int ak7375_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ak7375_device *dev_vcm = to_ak7375_vcm(ctrl);
+
+ if (ctrl->id == V4L2_CID_FOCUS_ABSOLUTE)
+ return ak7375_i2c_write(dev_vcm, AK7375_REG_POSITION,
+ ctrl->val << 4, 2);
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops ak7375_vcm_ctrl_ops = {
+ .s_ctrl = ak7375_set_ctrl,
+};
+
+static int ak7375_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ int ret;
+
+ ret = pm_runtime_get_sync(sd->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(sd->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ak7375_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ pm_runtime_put(sd->dev);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops ak7375_int_ops = {
+ .open = ak7375_open,
+ .close = ak7375_close,
+};
+
+static const struct v4l2_subdev_ops ak7375_ops = { };
+
+static void ak7375_subdev_cleanup(struct ak7375_device *ak7375_dev)
+{
+ v4l2_async_unregister_subdev(&ak7375_dev->sd);
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+}
+
+static int ak7375_init_controls(struct ak7375_device *dev_vcm)
+{
+ struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
+ const struct v4l2_ctrl_ops *ops = &ak7375_vcm_ctrl_ops;
+
+ v4l2_ctrl_handler_init(hdl, 1);
+
+ dev_vcm->focus = v4l2_ctrl_new_std(hdl, ops, V4L2_CID_FOCUS_ABSOLUTE,
+ 0, AK7375_MAX_FOCUS_POS, AK7375_FOCUS_STEPS, 0);
+
+ if (hdl->error)
+ dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n",
+ __func__, hdl->error);
+ dev_vcm->sd.ctrl_handler = hdl;
+
+ return hdl->error;
+}
+
+static int ak7375_probe(struct i2c_client *client)
+{
+ struct ak7375_device *ak7375_dev;
+ int ret;
+
+ ak7375_dev = devm_kzalloc(&client->dev, sizeof(*ak7375_dev),
+ GFP_KERNEL);
+ if (!ak7375_dev)
+ return -ENOMEM;
+
+ v4l2_i2c_subdev_init(&ak7375_dev->sd, client, &ak7375_ops);
+ ak7375_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ak7375_dev->sd.internal_ops = &ak7375_int_ops;
+ ak7375_dev->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_LENS;
+
+ ret = ak7375_init_controls(ak7375_dev);
+ if (ret)
+ goto err_cleanup;
+
+ ret = media_entity_init(&ak7375_dev->sd.entity, 0, NULL, 0);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = v4l2_async_register_subdev(&ak7375_dev->sd);
+ if (ret < 0)
+ goto err_cleanup;
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_cleanup:
+ v4l2_ctrl_handler_free(&ak7375_dev->ctrls_vcm);
+ media_entity_cleanup(&ak7375_dev->sd.entity);
+
+ return ret;
+}
+
+static int ak7375_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+
+ ak7375_subdev_cleanup(ak7375_dev);
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position, so it consumes least current
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_suspend(struct device *dev)
+{
+
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (!ak7375_dev->active)
+ return 0;
+
+ for (val = ak7375_dev->focus->val & ~(AK7375_CTRL_STEPS - 1);
+ val >= 0; val -= AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_once(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_STANDBY, 1);
+ if (ret)
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+
+ ak7375_dev->active = false;
+
+ return 0;
+}
+
+/*
+ * This function sets the vcm position to the value set by the user
+ * through v4l2_ctrl_ops s_ctrl handler
+ * The lens position is gradually moved in units of AK7375_CTRL_STEPS,
+ * to make the movements smoothly.
+ */
+static int __maybe_unused ak7375_vcm_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ak7375_device *ak7375_dev = sd_to_ak7375_vcm(sd);
+ int ret, val;
+
+ if (ak7375_dev->active)
+ return 0;
+
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_CONT,
+ AK7375_MODE_ACTIVE, 1);
+ if (ret) {
+ dev_err(dev, "%s I2C failure: %d\n", __func__, ret);
+ return ret;
+ }
+
+ for (val = ak7375_dev->focus->val % AK7375_CTRL_STEPS;
+ val <= ak7375_dev->focus->val;
+ val += AK7375_CTRL_STEPS) {
+ ret = ak7375_i2c_write(ak7375_dev, AK7375_REG_POSITION,
+ val << 4, 2);
+ if (ret)
+ dev_err_ratelimited(dev, "%s I2C failure: %d\n",
+ __func__, ret);
+ usleep_range(AK7375_CTRL_DELAY_US, AK7375_CTRL_DELAY_US + 10);
+ }
+
+ ak7375_dev->active = true;
+
+ return 0;
+}
+
+static const struct of_device_id ak7375_of_table[] = {
+ { .compatible = "asahi-kasei,ak7375" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ak7375_of_table);
+
+static const struct dev_pm_ops ak7375_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume)
+ SET_RUNTIME_PM_OPS(ak7375_vcm_suspend, ak7375_vcm_resume, NULL)
+};
+
+static struct i2c_driver ak7375_i2c_driver = {
+ .driver = {
+ .name = "ak7375",
+ .pm = &ak7375_pm_ops,
+ .of_match_table = ak7375_of_table,
+ },
+ .probe_new = ak7375_probe,
+ .remove = ak7375_remove,
+};
+module_i2c_driver(ak7375_i2c_driver);
+
+MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_DESCRIPTION("AK7375 VCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index a47ab1947cc453..17d217c3585a5d 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -467,8 +467,13 @@ static void cx23885_initialize(struct i2c_client *client)
{
DEFINE_WAIT(wait);
struct cx25840_state *state = to_state(i2c_get_clientdata(client));
+ u32 clk_freq = 0;
struct workqueue_struct *q;
+ /* cx23885 sets hostdata to clk_freq pointer */
+ if (v4l2_get_subdev_hostdata(&state->sd))
+ clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd));
+
/*
* Come out of digital power down
* The CX23888, at least, needs this, otherwise registers aside from
@@ -504,8 +509,13 @@ static void cx23885_initialize(struct i2c_client *client)
* 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz
* 572.73 MHz before post divide
*/
- /* HVR1850 or 50MHz xtal */
- cx25840_write(client, 0x2, 0x71);
+ if (clk_freq == 25000000) {
+ /* 888/ImpactVCBe or 25Mhz xtal */
+ ; /* nothing to do */
+ } else {
+ /* HVR1850 or 50MHz xtal */
+ cx25840_write(client, 0x2, 0x71);
+ }
cx25840_write4(client, 0x11c, 0x01d1744c);
cx25840_write4(client, 0x118, 0x00000416);
cx25840_write4(client, 0x404, 0x0010253e);
@@ -548,9 +558,15 @@ static void cx23885_initialize(struct i2c_client *client)
/* HVR1850 */
switch (state->id) {
case CX23888_AV:
- /* 888/HVR1250 specific */
- cx25840_write4(client, 0x10c, 0x13333333);
- cx25840_write4(client, 0x108, 0x00000515);
+ if (clk_freq == 25000000) {
+ /* 888/ImpactVCBe or 25MHz xtal */
+ cx25840_write4(client, 0x10c, 0x01b6db7b);
+ cx25840_write4(client, 0x108, 0x00000512);
+ } else {
+ /* 888/HVR1250 or 50MHz xtal */
+ cx25840_write4(client, 0x10c, 0x13333333);
+ cx25840_write4(client, 0x108, 0x00000515);
+ }
break;
default:
cx25840_write4(client, 0x10c, 0x002be2c9);
@@ -577,7 +593,7 @@ static void cx23885_initialize(struct i2c_client *client)
* 368.64 MHz before post divide
* 122.88 MHz / 0xa = 12.288 MHz
*/
- /* HVR1850 or 50MHz xtal */
+ /* HVR1850 or 50MHz xtal or 25MHz xtal */
cx25840_write4(client, 0x114, 0x017dbf48);
cx25840_write4(client, 0x110, 0x000a030e);
break;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index 5ac8600c7590a8..1fa771b24e638a 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -1,17 +1,6 @@
-/*
- * Copyright (c) 2015--2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2015--2017 Intel Corporation.
-#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -43,7 +32,6 @@
/* dw9714 device structure */
struct dw9714_device {
- struct i2c_client *client;
struct v4l2_ctrl_handler ctrls_vcm;
struct v4l2_subdev sd;
u16 current_val;
@@ -74,7 +62,7 @@ static int dw9714_i2c_write(struct i2c_client *client, u16 data)
static int dw9714_t_focus_vcm(struct dw9714_device *dw9714_dev, u16 val)
{
- struct i2c_client *client = dw9714_dev->client;
+ struct i2c_client *client = v4l2_get_subdevdata(&dw9714_dev->sd);
dw9714_dev->current_val = val;
@@ -97,13 +85,11 @@ static const struct v4l2_ctrl_ops dw9714_vcm_ctrl_ops = {
static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
- struct device *dev = &dw9714_dev->client->dev;
int rval;
- rval = pm_runtime_get_sync(dev);
+ rval = pm_runtime_get_sync(sd->dev);
if (rval < 0) {
- pm_runtime_put_noidle(dev);
+ pm_runtime_put_noidle(sd->dev);
return rval;
}
@@ -112,10 +98,7 @@ static int dw9714_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
static int dw9714_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct dw9714_device *dw9714_dev = sd_to_dw9714_vcm(sd);
- struct device *dev = &dw9714_dev->client->dev;
-
- pm_runtime_put(dev);
+ pm_runtime_put(sd->dev);
return 0;
}
@@ -138,7 +121,6 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
{
struct v4l2_ctrl_handler *hdl = &dev_vcm->ctrls_vcm;
const struct v4l2_ctrl_ops *ops = &dw9714_vcm_ctrl_ops;
- struct i2c_client *client = dev_vcm->client;
v4l2_ctrl_handler_init(hdl, 1);
@@ -146,14 +128,13 @@ static int dw9714_init_controls(struct dw9714_device *dev_vcm)
0, DW9714_MAX_FOCUS_POS, DW9714_FOCUS_STEPS, 0);
if (hdl->error)
- dev_err(&client->dev, "%s fail error: 0x%x\n",
+ dev_err(dev_vcm->sd.dev, "%s fail error: 0x%x\n",
__func__, hdl->error);
dev_vcm->sd.ctrl_handler = hdl;
return hdl->error;
}
-static int dw9714_probe(struct i2c_client *client,
- const struct i2c_device_id *devid)
+static int dw9714_probe(struct i2c_client *client)
{
struct dw9714_device *dw9714_dev;
int rval;
@@ -163,8 +144,6 @@ static int dw9714_probe(struct i2c_client *client,
if (dw9714_dev == NULL)
return -ENOMEM;
- dw9714_dev->client = client;
-
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
dw9714_dev->sd.internal_ops = &dw9714_int_ops;
@@ -185,11 +164,13 @@ static int dw9714_probe(struct i2c_client *client,
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
return 0;
err_cleanup:
- dw9714_subdev_cleanup(dw9714_dev);
+ v4l2_ctrl_handler_free(&dw9714_dev->ctrls_vcm);
+ media_entity_cleanup(&dw9714_dev->sd.entity);
dev_err(&client->dev, "Probe failed: %d\n", rval);
return rval;
}
@@ -255,19 +236,10 @@ static int __maybe_unused dw9714_vcm_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id dw9714_acpi_match[] = {
- {"DWDWD000", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, dw9714_acpi_match);
-#endif
-
static const struct i2c_device_id dw9714_id_table[] = {
- {DW9714_NAME, 0},
- {}
+ { DW9714_NAME, 0 },
+ { { 0 } }
};
-
MODULE_DEVICE_TABLE(i2c, dw9714_id_table);
static const struct of_device_id dw9714_of_table[] = {
@@ -285,10 +257,9 @@ static struct i2c_driver dw9714_i2c_driver = {
.driver = {
.name = DW9714_NAME,
.pm = &dw9714_pm_ops,
- .acpi_match_table = ACPI_PTR(dw9714_acpi_match),
.of_match_table = dw9714_of_table,
},
- .probe = dw9714_probe,
+ .probe_new = dw9714_probe,
.remove = dw9714_remove,
.id_table = dw9714_id_table,
};
diff --git a/drivers/media/i2c/imx208.c b/drivers/media/i2c/imx208.c
new file mode 100644
index 00000000000000..db513a563582a5
--- /dev/null
+++ b/drivers/media/i2c/imx208.c
@@ -0,0 +1,1098 @@
+/*
+ * Copyright (c) 2018 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <asm/unaligned.h>
+
+#define IMX208_REG_MODE_SELECT 0x0100
+#define IMX208_MODE_STANDBY 0x00
+#define IMX208_MODE_STREAMING 0x01
+
+/* Chip ID */
+#define IMX208_REG_CHIP_ID 0x0000
+#define IMX208_CHIP_ID 0x0208
+
+/* V_TIMING internal */
+#define IMX208_REG_VTS 0x0340
+#define IMX208_VTS_60FPS 0x0472
+#define IMX208_VTS_BINNING 0x0239
+#define IMX208_VTS_60FPS_MIN 0x0458
+#define IMX208_VTS_BINNING_MIN 0x0230
+#define IMX208_VTS_MAX 0xffff
+
+/* HBLANK control - read only */
+#define IMX208_PPL_384MHZ 2248
+#define IMX208_PPL_96MHZ 2248
+
+/* Exposure control */
+#define IMX208_REG_EXPOSURE 0x0202
+#define IMX208_EXPOSURE_MIN 4
+#define IMX208_EXPOSURE_STEP 1
+#define IMX208_EXPOSURE_DEFAULT 0x190
+#define IMX208_EXPOSURE_MAX 65535
+
+/* Analog gain control */
+#define IMX208_REG_ANALOG_GAIN 0x0204
+#define IMX208_ANA_GAIN_MIN 0
+#define IMX208_ANA_GAIN_MAX 0x00e0
+#define IMX208_ANA_GAIN_STEP 1
+#define IMX208_ANA_GAIN_DEFAULT 0x0
+
+/* Digital gain control */
+#define IMX208_REG_GR_DIGITAL_GAIN 0x020e
+#define IMX208_REG_R_DIGITAL_GAIN 0x0210
+#define IMX208_REG_B_DIGITAL_GAIN 0x0212
+#define IMX208_REG_GB_DIGITAL_GAIN 0x0214
+#define IMX208_DIGITAL_GAIN_SHIFT 8
+
+/* Orientation */
+#define IMX208_REG_ORIENTATION_CONTROL 0x0101
+
+/* Test Pattern Control */
+#define IMX208_REG_TEST_PATTERN_MODE 0x0600
+#define IMX208_TEST_PATTERN_DISABLE 0x0
+#define IMX208_TEST_PATTERN_SOLID_COLOR 0x1
+#define IMX208_TEST_PATTERN_COLOR_BARS 0x2
+#define IMX208_TEST_PATTERN_GREY_COLOR 0x3
+#define IMX208_TEST_PATTERN_PN9 0x4
+#define IMX208_TEST_PATTERN_FIX_1 0x100
+#define IMX208_TEST_PATTERN_FIX_2 0x101
+#define IMX208_TEST_PATTERN_FIX_3 0x102
+#define IMX208_TEST_PATTERN_FIX_4 0x103
+#define IMX208_TEST_PATTERN_FIX_5 0x104
+#define IMX208_TEST_PATTERN_FIX_6 0x105
+
+/* OTP Access */
+#define IMX208_OTP_BASE 0x3500
+#define IMX208_OTP_SIZE 40
+
+struct imx208_reg {
+ u16 address;
+ u8 val;
+};
+
+struct imx208_reg_list {
+ u32 num_of_regs;
+ const struct imx208_reg *regs;
+};
+
+/* Link frequency config */
+struct imx208_link_freq_config {
+ u32 pixels_per_line;
+
+ /* PLL registers for this link frequency */
+ struct imx208_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct imx208_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts_def;
+ u32 vts_min;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct imx208_reg_list reg_list;
+};
+
+static const struct imx208_reg pll_ctrl_reg[] = {
+ {0x0305, 0x02},
+ {0x0307, 0x50},
+ {0x303C, 0x3C},
+};
+
+static const struct imx208_reg mode_1936x1096_60fps_regs[] = {
+ {0x0340, 0x04},
+ {0x0341, 0x72},
+ {0x0342, 0x04},
+ {0x0343, 0x64},
+ {0x034C, 0x07},
+ {0x034D, 0x90},
+ {0x034E, 0x04},
+ {0x034F, 0x48},
+ {0x0381, 0x01},
+ {0x0383, 0x01},
+ {0x0385, 0x01},
+ {0x0387, 0x01},
+ {0x3048, 0x00},
+ {0x3050, 0x01},
+ {0x30D5, 0x00},
+ {0x3301, 0x00},
+ {0x3318, 0x62},
+ {0x0202, 0x01},
+ {0x0203, 0x90},
+ {0x0205, 0x00},
+};
+
+static const struct imx208_reg mode_968_548_60fps_regs[] = {
+ {0x0340, 0x02},
+ {0x0341, 0x39},
+ {0x0342, 0x08},
+ {0x0343, 0xC8},
+ {0x034C, 0x03},
+ {0x034D, 0xC8},
+ {0x034E, 0x02},
+ {0x034F, 0x24},
+ {0x0381, 0x01},
+ {0x0383, 0x03},
+ {0x0385, 0x01},
+ {0x0387, 0x03},
+ {0x3048, 0x01},
+ {0x3050, 0x02},
+ {0x30D5, 0x03},
+ {0x3301, 0x10},
+ {0x3318, 0x75},
+ {0x0202, 0x01},
+ {0x0203, 0x90},
+ {0x0205, 0x00},
+};
+
+static const s64 imx208_discrete_digital_gain[] = {
+ 1, 2, 4, 8, 16,
+};
+
+static const char * const imx208_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Color",
+ "100% Color Bar",
+ "Fade to Grey Color Bar",
+ "PN9",
+ "Fixed Pattern1",
+ "Fixed Pattern2",
+ "Fixed Pattern3",
+ "Fixed Pattern4",
+ "Fixed Pattern5",
+ "Fixed Pattern6"
+};
+
+static const int imx208_test_pattern_val[] = {
+ IMX208_TEST_PATTERN_DISABLE,
+ IMX208_TEST_PATTERN_SOLID_COLOR,
+ IMX208_TEST_PATTERN_COLOR_BARS,
+ IMX208_TEST_PATTERN_GREY_COLOR,
+ IMX208_TEST_PATTERN_PN9,
+ IMX208_TEST_PATTERN_FIX_1,
+ IMX208_TEST_PATTERN_FIX_2,
+ IMX208_TEST_PATTERN_FIX_3,
+ IMX208_TEST_PATTERN_FIX_4,
+ IMX208_TEST_PATTERN_FIX_5,
+ IMX208_TEST_PATTERN_FIX_6,
+};
+
+/* Configurations for supported link frequencies */
+#define IMX208_MHZ (1000*1000ULL)
+#define IMX208_LINK_FREQ_384MHZ (384ULL * IMX208_MHZ)
+#define IMX208_LINK_FREQ_96MHZ (96ULL * IMX208_MHZ)
+
+#define IMX208_DATA_RATE_DOUBLE 2
+#define IMX208_NUM_OF_LANES 2
+#define IMX208_PIXEL_BITS 10
+
+enum {
+ IMX208_LINK_FREQ_384MHZ_INDEX,
+ IMX208_LINK_FREQ_96MHZ_INDEX,
+};
+
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 2; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= IMX208_DATA_RATE_DOUBLE * IMX208_NUM_OF_LANES;
+ do_div(f, IMX208_PIXEL_BITS);
+
+ return f;
+}
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ [IMX208_LINK_FREQ_384MHZ_INDEX] = IMX208_LINK_FREQ_384MHZ,
+ [IMX208_LINK_FREQ_96MHZ_INDEX] = IMX208_LINK_FREQ_96MHZ,
+};
+
+/* Link frequency configs */
+static const struct imx208_link_freq_config link_freq_configs[] = {
+ [IMX208_LINK_FREQ_384MHZ_INDEX] = {
+ .pixels_per_line = IMX208_PPL_384MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(pll_ctrl_reg),
+ .regs = pll_ctrl_reg,
+ }
+ },
+ [IMX208_LINK_FREQ_96MHZ_INDEX] = {
+ .pixels_per_line = IMX208_PPL_96MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(pll_ctrl_reg),
+ .regs = pll_ctrl_reg,
+ }
+ },
+};
+
+/* Mode configs */
+static const struct imx208_mode supported_modes[] = {
+ {
+ .width = 1936,
+ .height = 1096,
+ .vts_def = IMX208_VTS_60FPS,
+ .vts_min = IMX208_VTS_60FPS_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1936x1096_60fps_regs),
+ .regs = mode_1936x1096_60fps_regs,
+ },
+ .link_freq_index = IMX208_LINK_FREQ_384MHZ_INDEX,
+ },
+ {
+ .width = 968,
+ .height = 548,
+ .vts_def = IMX208_VTS_BINNING,
+ .vts_min = IMX208_VTS_BINNING_MIN,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_968_548_60fps_regs),
+ .regs = mode_968_548_60fps_regs,
+ },
+ .link_freq_index = IMX208_LINK_FREQ_96MHZ_INDEX,
+ },
+};
+
+struct imx208 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ /* Current mode */
+ const struct imx208_mode *cur_mode;
+
+ /*
+ * Mutex for serialized access:
+ * Protect sensor set pad format and start/stop streaming safely.
+ * Protect access to sensor v4l2 controls.
+ */
+ struct mutex imx208_mx;
+
+ /* Streaming on/off */
+ bool streaming;
+
+ /* OTP data */
+ bool otp_read;
+ char otp_data[IMX208_OTP_SIZE];
+};
+
+static inline struct imx208 *to_imx208(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx208, sd);
+}
+
+/* Get bayer order based on flip setting. */
+static u32 imx208_get_format_code(struct imx208 *imx208)
+{
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ static const u32 codes[2][2] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, },
+ };
+
+ return codes[imx208->vflip->val][imx208->hflip->val];
+}
+
+/* Read registers up to 4 at a time */
+static int imx208_read_reg(struct imx208 *imx208, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { reg >> 8, reg & 0xff };
+ u8 data_buf[4] = { 0, };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int imx208_write_reg(struct imx208 *imx208, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int imx208_write_regs(struct imx208 *imx208,
+ const struct imx208_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < len; i++) {
+ ret = imx208_write_reg(imx208, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(
+ &client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Open sub-device */
+static int imx208_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ /* Initialize try_fmt */
+ try_fmt->width = supported_modes[0].width;
+ try_fmt->height = supported_modes[0].height;
+ try_fmt->code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+
+static int imx208_update_digital_gain(struct imx208 *imx208, u32 len, u32 val)
+{
+ int ret;
+
+ val = imx208_discrete_digital_gain[val] << IMX208_DIGITAL_GAIN_SHIFT;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_GR_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_GB_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ ret = imx208_write_reg(imx208, IMX208_REG_R_DIGITAL_GAIN, 2, val);
+ if (ret)
+ return ret;
+
+ return imx208_write_reg(imx208, IMX208_REG_B_DIGITAL_GAIN, 2, val);
+}
+
+static int imx208_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx208 *imx208 =
+ container_of(ctrl->handler, struct imx208, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = imx208_write_reg(imx208, IMX208_REG_ANALOG_GAIN,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = imx208_write_reg(imx208, IMX208_REG_EXPOSURE,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx208_update_digital_gain(imx208, 2, ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update VTS that meets expected vertical blanking */
+ ret = imx208_write_reg(imx208, IMX208_REG_VTS, 2,
+ imx208->cur_mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx208_write_reg(imx208, IMX208_REG_TEST_PATTERN_MODE,
+ 2, imx208_test_pattern_val[ctrl->val]);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = imx208_write_reg(imx208, IMX208_REG_ORIENTATION_CONTROL,
+ 1,
+ imx208->hflip->val |
+ imx208->vflip->val << 1);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_err(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx208_ctrl_ops = {
+ .s_ctrl = imx208_set_ctrl,
+};
+
+static const struct v4l2_ctrl_config imx208_digital_gain_control = {
+ .ops = &imx208_ctrl_ops,
+ .id = V4L2_CID_DIGITAL_GAIN,
+ .name = "Digital Gain",
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .min = 0,
+ .max = ARRAY_SIZE(imx208_discrete_digital_gain) - 1,
+ .step = 0,
+ .def = 0,
+ .menu_skip_mask = 0,
+ .qmenu_int = imx208_discrete_digital_gain,
+};
+
+static int imx208_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = imx208_get_format_code(imx208);
+
+ return 0;
+}
+
+static int imx208_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != imx208_get_format_code(imx208))
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void imx208_mode_to_pad_format(struct imx208 *imx208,
+ const struct imx208_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = imx208_get_format_code(imx208);
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int __imx208_get_pad_format(struct imx208 *imx208,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt->format = *v4l2_subdev_get_try_format(&imx208->sd, cfg,
+ fmt->pad);
+ else
+ imx208_mode_to_pad_format(imx208, imx208->cur_mode, fmt);
+
+ return 0;
+}
+
+static int imx208_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ mutex_lock(&imx208->imx208_mx);
+ ret = __imx208_get_pad_format(imx208, cfg, fmt);
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int imx208_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ const struct imx208_mode *mode;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
+
+ mutex_lock(&imx208->imx208_mx);
+
+ fmt->format.code = imx208_get_format_code(imx208);
+ mode = v4l2_find_nearest_size(
+ supported_modes, ARRAY_SIZE(supported_modes), width, height,
+ fmt->format.width, fmt->format.height);
+ imx208_mode_to_pad_format(imx208, mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
+ } else {
+ imx208->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(imx208->link_freq, mode->link_freq_index);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(imx208->pixel_rate, pixel_rate);
+ /* Update limits and set FPS to default */
+ vblank_def = imx208->cur_mode->vts_def -
+ imx208->cur_mode->height;
+ vblank_min = imx208->cur_mode->vts_min -
+ imx208->cur_mode->height;
+ __v4l2_ctrl_modify_range(
+ imx208->vblank, vblank_min,
+ IMX208_VTS_MAX - imx208->cur_mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(imx208->vblank, vblank_def);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - imx208->cur_mode->width;
+ __v4l2_ctrl_modify_range(imx208->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&imx208->imx208_mx);
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx208_start_streaming(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ const struct imx208_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Setup PLL */
+ link_freq_index = imx208->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &imx208->cur_mode->reg_list;
+ ret = imx208_write_regs(imx208, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(imx208->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /* set stream on register */
+ return imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
+ 1, IMX208_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int imx208_stop_streaming(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+
+ /* set stream off register */
+ ret = imx208_write_reg(imx208, IMX208_REG_MODE_SELECT,
+ 1, IMX208_MODE_STANDBY);
+ if (ret)
+ dev_err(&client->dev, "%s failed to set stream\n", __func__);
+
+ /*
+ * Return success even if it was an error, as there is nothing the
+ * caller can do about it.
+ */
+ return 0;
+}
+
+static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx208 *imx208 = to_imx208(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&imx208->imx208_mx);
+ if (imx208->streaming == enable) {
+ mutex_unlock(&imx208->imx208_mx);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0)
+ goto err_rpm_put;
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = imx208_start_streaming(imx208);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ imx208_stop_streaming(imx208);
+ pm_runtime_put(&client->dev);
+ }
+
+ imx208->streaming = enable;
+ mutex_unlock(&imx208->imx208_mx);
+
+ /* vflip and hflip cannot change during streaming */
+ v4l2_ctrl_grab(imx208->vflip, enable);
+ v4l2_ctrl_grab(imx208->hflip, enable);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int __maybe_unused imx208_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+
+ if (imx208->streaming)
+ imx208_stop_streaming(imx208);
+
+ return 0;
+}
+
+static int __maybe_unused imx208_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ if (imx208->streaming) {
+ ret = imx208_start_streaming(imx208);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx208_stop_streaming(imx208);
+ imx208->streaming = 0;
+
+ return ret;
+}
+
+/* Verify chip ID */
+static int imx208_identify_module(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ int ret;
+ u32 val;
+
+ ret = imx208_read_reg(imx208, IMX208_REG_CHIP_ID,
+ 2, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read chip id %x\n",
+ IMX208_CHIP_ID);
+ return ret;
+ }
+
+ if (val != IMX208_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ IMX208_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops imx208_video_ops = {
+ .s_stream = imx208_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx208_pad_ops = {
+ .enum_mbus_code = imx208_enum_mbus_code,
+ .get_fmt = imx208_get_pad_format,
+ .set_fmt = imx208_set_pad_format,
+ .enum_frame_size = imx208_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops imx208_subdev_ops = {
+ .video = &imx208_video_ops,
+ .pad = &imx208_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops imx208_internal_ops = {
+ .open = imx208_open,
+};
+
+static int imx208_read_otp(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2] = { IMX208_OTP_BASE >> 8, IMX208_OTP_BASE & 0xff };
+ int ret = 0;
+
+ mutex_lock(&imx208->imx208_mx);
+
+ if (imx208->otp_read)
+ goto out_unlock;
+
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto out_unlock;
+ }
+
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from registers */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = sizeof(imx208->otp_data);
+ msgs[1].buf = imx208->otp_data;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret == ARRAY_SIZE(msgs)) {
+ imx208->otp_read = true;
+ ret = 0;
+ }
+
+ pm_runtime_put(&client->dev);
+
+out_unlock:
+ mutex_unlock(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static ssize_t otp_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(kobj_to_dev(kobj));
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+ int ret;
+
+ ret = imx208_read_otp(imx208);
+ if (ret)
+ return ret;
+
+ memcpy(buf, &imx208->otp_data[off], count);
+ return count;
+}
+static const BIN_ATTR_RO(otp, IMX208_OTP_SIZE);
+
+/* Initialize control handlers */
+static int imx208_init_controls(struct imx208 *imx208)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx208->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr = &imx208->ctrl_handler;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 8);
+ if (ret)
+ return ret;
+
+ mutex_init(&imx208->imx208_mx);
+ ctrl_hdlr->lock = &imx208->imx208_mx;
+ imx208->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &imx208_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(link_freq_menu_items) - 1,
+ 0, link_freq_menu_items);
+
+ if (imx208->link_freq)
+ imx208->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = link_freq_to_pixel_rate(
+ link_freq_menu_items[ARRAY_SIZE(
+ link_freq_menu_items) - 1]);
+ /* By default, PIXEL_RATE is read only */
+ imx208->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
+
+ vblank_def = imx208->cur_mode->vts_def - imx208->cur_mode->height;
+ vblank_min = imx208->cur_mode->vts_min - imx208->cur_mode->height;
+ imx208->vblank = v4l2_ctrl_new_std(
+ ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_VBLANK,
+ vblank_min,
+ IMX208_VTS_MAX - imx208->cur_mode->height, 1,
+ vblank_def);
+
+ imx208->hblank = v4l2_ctrl_new_std(
+ ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_HBLANK,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width,
+ 1,
+ IMX208_PPL_384MHZ - imx208->cur_mode->width);
+
+ if (imx208->hblank)
+ imx208->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = imx208->cur_mode->vts_def - 8;
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_EXPOSURE,
+ IMX208_EXPOSURE_MIN, IMX208_EXPOSURE_MAX,
+ IMX208_EXPOSURE_STEP, IMX208_EXPOSURE_DEFAULT);
+
+ imx208->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ imx208->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx208_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX208_ANA_GAIN_MIN, IMX208_ANA_GAIN_MAX,
+ IMX208_ANA_GAIN_STEP, IMX208_ANA_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_custom(ctrl_hdlr, &imx208_digital_gain_control, NULL);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx208_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx208_test_pattern_menu) - 1,
+ 0, 0, imx208_test_pattern_menu);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ imx208->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static void imx208_free_controls(struct imx208 *imx208)
+{
+ v4l2_ctrl_handler_free(imx208->sd.ctrl_handler);
+}
+
+static int imx208_probe(struct i2c_client *client)
+{
+ struct imx208 *imx208;
+ int ret;
+ u32 val = 0;
+
+ device_property_read_u32(&client->dev, "clock-frequency", &val);
+ if (val != 19200000) {
+ dev_err(&client->dev,
+ "Unsupported clock-frequency %u. Expected 19200000.\n",
+ val);
+ return -EINVAL;
+ }
+
+ imx208 = devm_kzalloc(&client->dev, sizeof(*imx208), GFP_KERNEL);
+ if (!imx208)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&imx208->sd, client, &imx208_subdev_ops);
+
+ /* Check module identity */
+ ret = imx208_identify_module(imx208);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto error_probe;
+ }
+
+ /* Set default mode to max resolution */
+ imx208->cur_mode = &supported_modes[0];
+
+ ret = imx208_init_controls(imx208);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto error_probe;
+ }
+
+ /* Initialize subdev */
+ imx208->sd.internal_ops = &imx208_internal_ops;
+ imx208->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ imx208->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /* Initialize source pad */
+ imx208->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&imx208->sd.entity, 1, &imx208->pad, 0);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx208->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ ret = device_create_bin_file(&client->dev, &bin_attr_otp);
+ if (ret) {
+ dev_err(&client->dev, "sysfs otp creation failed\n");
+ goto error_async_subdev;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_async_subdev:
+ v4l2_async_unregister_subdev(&imx208->sd);
+
+error_media_entity:
+ media_entity_cleanup(&imx208->sd.entity);
+
+error_handler_free:
+ imx208_free_controls(imx208);
+
+error_probe:
+ mutex_destroy(&imx208->imx208_mx);
+
+ return ret;
+}
+
+static int imx208_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx208 *imx208 = to_imx208(sd);
+
+ device_remove_bin_file(&client->dev, &bin_attr_otp);
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ imx208_free_controls(imx208);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&imx208->imx208_mx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx208_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx208_suspend, imx208_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id imx208_acpi_ids[] = {
+ { "INT3478" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, imx208_acpi_ids);
+#endif
+
+static struct i2c_driver imx208_i2c_driver = {
+ .driver = {
+ .name = "imx208",
+ .pm = &imx208_pm_ops,
+ .acpi_match_table = ACPI_PTR(imx208_acpi_ids),
+ },
+ .probe_new = imx208_probe,
+ .remove = imx208_remove,
+};
+
+module_i2c_driver(imx208_i2c_driver);
+
+MODULE_AUTHOR("Yeh, Andy <andy.yeh@intel.com>");
+MODULE_AUTHOR("Chen, Ping-chung <ping-chung.chen@intel.com>");
+MODULE_DESCRIPTION("Sony IMX208 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index d4cfa36498525f..0c782700ee3f92 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1,15 +1,5 @@
-/*
- * Copyright (c) 2018 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -20,10 +10,6 @@
#include <media/v4l2-device.h>
#include <asm/unaligned.h>
-#ifndef V4L2_CID_DIGITAL_GAIN
-#define V4L2_CID_DIGITAL_GAIN V4L2_CID_GAIN
-#endif
-
#define IMX258_REG_VALUE_08BIT 1
#define IMX258_REG_VALUE_16BIT 2
@@ -36,7 +22,6 @@
#define IMX258_CHIP_ID 0x0258
/* V_TIMING internal */
-#define IMX258_REG_VTS 0x0340
#define IMX258_VTS_30FPS 0x0c98
#define IMX258_VTS_30FPS_2K 0x0638
#define IMX258_VTS_30FPS_VGA 0x034c
@@ -71,13 +56,17 @@
#define IMX258_REG_B_DIGITAL_GAIN 0x0212
#define IMX258_REG_GB_DIGITAL_GAIN 0x0214
#define IMX258_DGTL_GAIN_MIN 0
-#define IMX258_DGTL_GAIN_MAX 4096 /* Max = 0xFFF */
+#define IMX258_DGTL_GAIN_MAX 4096 /* Max = 0xFFF */
#define IMX258_DGTL_GAIN_DEFAULT 1024
-#define IMX258_DGTL_GAIN_STEP 1
+#define IMX258_DGTL_GAIN_STEP 1
+
+/* Test Pattern Control */
+#define IMX258_REG_TEST_PATTERN 0x0600
/* Orientation */
-#define REG_MIRROR_FLIP_CONTROL 0x0101
+#define REG_MIRROR_FLIP_CONTROL 0x0101
#define REG_CONFIG_MIRROR_FLIP 0x03
+#define REG_CONFIG_FLIP_TEST_PATTERN 0x02
struct imx258_reg {
u16 address;
@@ -510,10 +499,10 @@ static const struct imx258_reg mode_1048_780_regs[] = {
static const char * const imx258_test_pattern_menu[] = {
"Disabled",
- "Vertical Color Bar Type 1",
- "Vertical Color Bar Type 2",
- "Vertical Color Bar Type 3",
- "Vertical Color Bar Type 4"
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
};
/* Configurations for supported link frequencies */
@@ -680,7 +669,7 @@ static int imx258_write_reg(struct imx258 *imx258, u16 reg, u32 len, u32 val)
/* Write a list of registers */
static int imx258_write_regs(struct imx258 *imx258,
- const struct imx258_reg *regs, u32 len)
+ const struct imx258_reg *regs, u32 len)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
unsigned int i;
@@ -773,16 +762,20 @@ static int imx258_set_ctrl(struct v4l2_ctrl *ctrl)
ret = imx258_update_digital_gain(imx258, IMX258_REG_VALUE_16BIT,
ctrl->val);
break;
- case V4L2_CID_VBLANK:
- /*
- * Auto Frame Length Line Control is enabled by default.
- * Not need control Vblank Register.
- */
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx258_write_reg(imx258, IMX258_REG_TEST_PATTERN,
+ IMX258_REG_VALUE_16BIT,
+ ctrl->val);
+ ret = imx258_write_reg(imx258, REG_MIRROR_FLIP_CONTROL,
+ IMX258_REG_VALUE_08BIT,
+ !ctrl->val ? REG_CONFIG_MIRROR_FLIP :
+ REG_CONFIG_FLIP_TEST_PATTERN);
break;
default:
dev_info(&client->dev,
"ctrl(id:0x%x,val:0x%x) is not handled\n",
ctrl->id, ctrl->val);
+ ret = -EINVAL;
break;
}
@@ -809,8 +802,8 @@ static int imx258_enum_mbus_code(struct v4l2_subdev *sd,
}
static int imx258_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_frame_size_enum *fse)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index >= ARRAY_SIZE(supported_modes))
return -EINVAL;
@@ -827,7 +820,7 @@ static int imx258_enum_frame_size(struct v4l2_subdev *sd,
}
static void imx258_update_pad_format(const struct imx258_mode *mode,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *fmt)
{
fmt->format.width = mode->width;
fmt->format.height = mode->height;
@@ -836,8 +829,8 @@ static void imx258_update_pad_format(const struct imx258_mode *mode,
}
static int __imx258_get_pad_format(struct imx258 *imx258,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
fmt->format = *v4l2_subdev_get_try_format(&imx258->sd, cfg,
@@ -849,8 +842,8 @@ static int __imx258_get_pad_format(struct imx258 *imx258,
}
static int imx258_get_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
int ret;
@@ -863,8 +856,8 @@ static int imx258_get_pad_format(struct v4l2_subdev *sd,
}
static int imx258_set_pad_format(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
{
struct imx258 *imx258 = to_imx258(sd);
const struct imx258_mode *mode;
@@ -880,8 +873,8 @@ static int imx258_set_pad_format(struct v4l2_subdev *sd,
/* Only one raw bayer(GBRG) order is supported */
fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
- mode = v4l2_find_nearest_size(
- supported_modes, ARRAY_SIZE(supported_modes), width, height,
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes), width, height,
fmt->format.width, fmt->format.height);
imx258_update_pad_format(mode, fmt);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
@@ -942,7 +935,7 @@ static int imx258_start_streaming(struct imx258 *imx258)
/* Set Orientation be 180 degree */
ret = imx258_write_reg(imx258, REG_MIRROR_FLIP_CONTROL,
- IMX258_REG_VALUE_08BIT, REG_CONFIG_MIRROR_FLIP);
+ IMX258_REG_VALUE_08BIT, REG_CONFIG_MIRROR_FLIP);
if (ret) {
dev_err(&client->dev, "%s failed to set orientation\n",
__func__);
@@ -1064,7 +1057,7 @@ static int imx258_identify_module(struct imx258 *imx258)
u32 val;
ret = imx258_read_reg(imx258, IMX258_REG_CHIP_ID,
- IMX258_REG_VALUE_16BIT, &val);
+ IMX258_REG_VALUE_16BIT, &val);
if (ret) {
dev_err(&client->dev, "failed to read chip id %x\n",
IMX258_CHIP_ID);
@@ -1105,7 +1098,6 @@ static int imx258_init_controls(struct imx258 *imx258)
{
struct i2c_client *client = v4l2_get_subdevdata(&imx258->sd);
struct v4l2_ctrl_handler *ctrl_hdlr;
- s64 exposure_max;
s64 vblank_def;
s64 vblank_min;
s64 pixel_rate_min;
@@ -1146,6 +1138,9 @@ static int imx258_init_controls(struct imx258 *imx258)
IMX258_VTS_MAX - imx258->cur_mode->height, 1,
vblank_def);
+ if (imx258->vblank)
+ imx258->vblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
imx258->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &imx258_ctrl_ops, V4L2_CID_HBLANK,
IMX258_PPL_DEFAULT - imx258->cur_mode->width,
@@ -1156,7 +1151,6 @@ static int imx258_init_controls(struct imx258 *imx258)
if (imx258->hblank)
imx258->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
- exposure_max = imx258->cur_mode->vts_def - 8;
imx258->exposure = v4l2_ctrl_new_std(
ctrl_hdlr, &imx258_ctrl_ops,
V4L2_CID_EXPOSURE, IMX258_EXPOSURE_MIN,
@@ -1237,10 +1231,10 @@ static int imx258_probe(struct i2c_client *client)
/* Initialize source pad */
imx258->pad.flags = MEDIA_PAD_FL_SOURCE;
+
ret = media_entity_init(&imx258->sd.entity, 1, &imx258->pad, 0);
- if (ret) {
+ if (ret)
goto error_handler_free;
- }
ret = v4l2_async_register_subdev_sensor_common(&imx258->sd);
if (ret < 0)
diff --git a/drivers/media/i2c/imx319.c b/drivers/media/i2c/imx319.c
new file mode 100644
index 00000000000000..53f1d210905054
--- /dev/null
+++ b/drivers/media/i2c/imx319.c
@@ -0,0 +1,2557 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Intel Corporation
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#define IMX319_REG_MODE_SELECT 0x0100
+#define IMX319_MODE_STANDBY 0x00
+#define IMX319_MODE_STREAMING 0x01
+
+/* Chip ID */
+#define IMX319_REG_CHIP_ID 0x0016
+#define IMX319_CHIP_ID 0x0319
+
+/* V_TIMING internal */
+#define IMX319_REG_FLL 0x0340
+#define IMX319_FLL_MAX 0xffff
+
+/* Exposure control */
+#define IMX319_REG_EXPOSURE 0x0202
+#define IMX319_EXPOSURE_MIN 1
+#define IMX319_EXPOSURE_STEP 1
+#define IMX319_EXPOSURE_DEFAULT 0x04f6
+
+/*
+ * the digital control register for all color control looks like:
+ * +-----------------+------------------+
+ * | [7:0] | [15:8] |
+ * +-----------------+------------------+
+ * | 0x020f | 0x020e |
+ * --------------------------------------
+ * it is used to calculate the digital gain times value(integral + fractional)
+ * the [15:8] bits is the fractional part and [7:0] bits is the integral
+ * calculation equation is:
+ * gain value (unit: times) = REG[15:8] + REG[7:0]/0x100
+ * Only value in 0x0100 ~ 0x0FFF range is allowed.
+ * Analog gain use 10 bits in the registers and allowed range is 0 ~ 960
+ */
+/* Analog gain control */
+#define IMX319_REG_ANALOG_GAIN 0x0204
+#define IMX319_ANA_GAIN_MIN 0
+#define IMX319_ANA_GAIN_MAX 960
+#define IMX319_ANA_GAIN_STEP 1
+#define IMX319_ANA_GAIN_DEFAULT 0
+
+/* Digital gain control */
+#define IMX319_REG_DPGA_USE_GLOBAL_GAIN 0x3ff9
+#define IMX319_REG_DIG_GAIN_GLOBAL 0x020e
+#define IMX319_DGTL_GAIN_MIN 256
+#define IMX319_DGTL_GAIN_MAX 4095
+#define IMX319_DGTL_GAIN_STEP 1
+#define IMX319_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define IMX319_REG_TEST_PATTERN 0x0600
+#define IMX319_TEST_PATTERN_DISABLED 0
+#define IMX319_TEST_PATTERN_SOLID_COLOR 1
+#define IMX319_TEST_PATTERN_COLOR_BARS 2
+#define IMX319_TEST_PATTERN_GRAY_COLOR_BARS 3
+#define IMX319_TEST_PATTERN_PN9 4
+
+/* Flip Control */
+#define IMX319_REG_ORIENTATION 0x0101
+
+/* default link frequency and external clock */
+#define IMX319_LINK_FREQ_DEFAULT 482400000
+#define IMX319_EXT_CLK 19200000
+#define IMX319_LINK_FREQ_INDEX 0
+
+struct imx319_reg {
+ u16 address;
+ u8 val;
+};
+
+struct imx319_reg_list {
+ u32 num_of_regs;
+ const struct imx319_reg *regs;
+};
+
+/* Mode : resolution and related config&values */
+struct imx319_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 fll_def;
+ u32 fll_min;
+
+ /* H-timing */
+ u32 llp;
+
+ /* index of link frequency */
+ u32 link_freq_index;
+
+ /* Default register values */
+ struct imx319_reg_list reg_list;
+};
+
+struct imx319_hwcfg {
+ u32 ext_clk; /* sensor external clk */
+ s64 *link_freqs; /* CSI-2 link frequencies */
+ unsigned int nr_of_link_freqs;
+};
+
+struct imx319 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ /* Current mode */
+ const struct imx319_mode *cur_mode;
+
+ struct imx319_hwcfg *hwcfg;
+ s64 link_def_freq; /* CSI-2 link default frequency */
+
+ /*
+ * Mutex for serialized access:
+ * Protect sensor set pad format and start/stop streaming safely.
+ * Protect access to sensor v4l2 controls.
+ */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static const struct imx319_reg imx319_global_regs[] = {
+ { 0x0136, 0x13 },
+ { 0x0137, 0x33 },
+ { 0x3c7e, 0x05 },
+ { 0x3c7f, 0x07 },
+ { 0x4d39, 0x0b },
+ { 0x4d41, 0x33 },
+ { 0x4d43, 0x0c },
+ { 0x4d49, 0x89 },
+ { 0x4e05, 0x0b },
+ { 0x4e0d, 0x33 },
+ { 0x4e0f, 0x0c },
+ { 0x4e15, 0x89 },
+ { 0x4e49, 0x2a },
+ { 0x4e51, 0x33 },
+ { 0x4e53, 0x0c },
+ { 0x4e59, 0x89 },
+ { 0x5601, 0x4f },
+ { 0x560b, 0x45 },
+ { 0x562f, 0x0a },
+ { 0x5643, 0x0a },
+ { 0x5645, 0x0c },
+ { 0x56ef, 0x51 },
+ { 0x586f, 0x33 },
+ { 0x5873, 0x89 },
+ { 0x5905, 0x33 },
+ { 0x5907, 0x89 },
+ { 0x590d, 0x33 },
+ { 0x590f, 0x89 },
+ { 0x5915, 0x33 },
+ { 0x5917, 0x89 },
+ { 0x5969, 0x1c },
+ { 0x596b, 0x72 },
+ { 0x5971, 0x33 },
+ { 0x5973, 0x89 },
+ { 0x5975, 0x33 },
+ { 0x5977, 0x89 },
+ { 0x5979, 0x1c },
+ { 0x597b, 0x72 },
+ { 0x5985, 0x33 },
+ { 0x5987, 0x89 },
+ { 0x5999, 0x1c },
+ { 0x599b, 0x72 },
+ { 0x59a5, 0x33 },
+ { 0x59a7, 0x89 },
+ { 0x7485, 0x08 },
+ { 0x7487, 0x0c },
+ { 0x7489, 0xc7 },
+ { 0x748b, 0x8b },
+ { 0x9004, 0x09 },
+ { 0x9200, 0x6a },
+ { 0x9201, 0x22 },
+ { 0x9202, 0x6a },
+ { 0x9203, 0x23 },
+ { 0x9204, 0x5f },
+ { 0x9205, 0x23 },
+ { 0x9206, 0x5f },
+ { 0x9207, 0x24 },
+ { 0x9208, 0x5f },
+ { 0x9209, 0x26 },
+ { 0x920a, 0x5f },
+ { 0x920b, 0x27 },
+ { 0x920c, 0x5f },
+ { 0x920d, 0x29 },
+ { 0x920e, 0x5f },
+ { 0x920f, 0x2a },
+ { 0x9210, 0x5f },
+ { 0x9211, 0x2c },
+ { 0xbc22, 0x1a },
+ { 0xf01f, 0x04 },
+ { 0xf021, 0x03 },
+ { 0xf023, 0x02 },
+ { 0xf03d, 0x05 },
+ { 0xf03f, 0x03 },
+ { 0xf041, 0x02 },
+ { 0xf0af, 0x04 },
+ { 0xf0b1, 0x03 },
+ { 0xf0b3, 0x02 },
+ { 0xf0cd, 0x05 },
+ { 0xf0cf, 0x03 },
+ { 0xf0d1, 0x02 },
+ { 0xf13f, 0x04 },
+ { 0xf141, 0x03 },
+ { 0xf143, 0x02 },
+ { 0xf15d, 0x05 },
+ { 0xf15f, 0x03 },
+ { 0xf161, 0x02 },
+ { 0xf1cf, 0x04 },
+ { 0xf1d1, 0x03 },
+ { 0xf1d3, 0x02 },
+ { 0xf1ed, 0x05 },
+ { 0xf1ef, 0x03 },
+ { 0xf1f1, 0x02 },
+ { 0xf287, 0x04 },
+ { 0xf289, 0x03 },
+ { 0xf28b, 0x02 },
+ { 0xf2a5, 0x05 },
+ { 0xf2a7, 0x03 },
+ { 0xf2a9, 0x02 },
+ { 0xf2b7, 0x04 },
+ { 0xf2b9, 0x03 },
+ { 0xf2bb, 0x02 },
+ { 0xf2d5, 0x05 },
+ { 0xf2d7, 0x03 },
+ { 0xf2d9, 0x02 },
+};
+
+static const struct imx319_reg_list imx319_global_setting = {
+ .num_of_regs = ARRAY_SIZE(imx319_global_regs),
+ .regs = imx319_global_regs,
+};
+
+static const struct imx319_reg mode_3264x2448_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0f },
+ { 0x0343, 0x80 },
+ { 0x0340, 0x0c },
+ { 0x0341, 0xaa },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x01 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0x08 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x08 },
+ { 0x040c, 0x0c },
+ { 0x040d, 0xc0 },
+ { 0x040e, 0x09 },
+ { 0x040f, 0x90 },
+ { 0x034c, 0x0c },
+ { 0x034d, 0xc0 },
+ { 0x034e, 0x09 },
+ { 0x034f, 0x90 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0x48 },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x01 },
+ { 0x3f79, 0x18 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x00 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x0a },
+ { 0x0203, 0x7a },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_3280x2464_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0f },
+ { 0x0343, 0x80 },
+ { 0x0340, 0x0c },
+ { 0x0341, 0xaa },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x01 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0x00 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x0c },
+ { 0x040d, 0xd0 },
+ { 0x040e, 0x09 },
+ { 0x040f, 0xa0 },
+ { 0x034c, 0x0c },
+ { 0x034d, 0xd0 },
+ { 0x034e, 0x09 },
+ { 0x034f, 0xa0 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0x48 },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x01 },
+ { 0x3f79, 0x18 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x00 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x0a },
+ { 0x0203, 0x7a },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1936x1096_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0f },
+ { 0x0343, 0x80 },
+ { 0x0340, 0x0c },
+ { 0x0341, 0xaa },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xac },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xf3 },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x01 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x02 },
+ { 0x0409, 0xa0 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x07 },
+ { 0x040d, 0x90 },
+ { 0x040e, 0x04 },
+ { 0x040f, 0x48 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x90 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x48 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0x48 },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x01 },
+ { 0x3f79, 0x18 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x00 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x05 },
+ { 0x0203, 0x34 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1920x1080_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0f },
+ { 0x0343, 0x80 },
+ { 0x0340, 0x0c },
+ { 0x0341, 0xaa },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xb4 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xeb },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x01 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x02 },
+ { 0x0409, 0xa8 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x07 },
+ { 0x040d, 0x80 },
+ { 0x040e, 0x04 },
+ { 0x040f, 0x38 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x80 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x38 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0x48 },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x01 },
+ { 0x3f79, 0x18 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x00 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x05 },
+ { 0x0203, 0x34 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1640x1232_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x08 },
+ { 0x0343, 0x20 },
+ { 0x0340, 0x18 },
+ { 0x0341, 0x2a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x02 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0x00 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x06 },
+ { 0x040d, 0x68 },
+ { 0x040e, 0x04 },
+ { 0x040f, 0xd0 },
+ { 0x034c, 0x06 },
+ { 0x034d, 0x68 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0xd0 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0xba },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x00 },
+ { 0x3f79, 0x34 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x04 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x04 },
+ { 0x0203, 0xf6 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1640x922_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x08 },
+ { 0x0343, 0x20 },
+ { 0x0340, 0x18 },
+ { 0x0341, 0x2a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x01 },
+ { 0x0347, 0x30 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x08 },
+ { 0x034b, 0x6f },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x02 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0x00 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x02 },
+ { 0x040c, 0x06 },
+ { 0x040d, 0x68 },
+ { 0x040e, 0x03 },
+ { 0x040f, 0x9a },
+ { 0x034c, 0x06 },
+ { 0x034d, 0x68 },
+ { 0x034e, 0x03 },
+ { 0x034f, 0x9a },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0xba },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x00 },
+ { 0x3f79, 0x34 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x04 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x04 },
+ { 0x0203, 0xf6 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1296x736_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x08 },
+ { 0x0343, 0x20 },
+ { 0x0340, 0x18 },
+ { 0x0341, 0x2a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x01 },
+ { 0x0347, 0xf0 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x07 },
+ { 0x034b, 0xaf },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x02 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0xac },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x05 },
+ { 0x040d, 0x10 },
+ { 0x040e, 0x02 },
+ { 0x040f, 0xe0 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x10 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xe0 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0xba },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x00 },
+ { 0x3f79, 0x34 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x04 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x04 },
+ { 0x0203, 0xf6 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const struct imx319_reg mode_1280x720_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x08 },
+ { 0x0343, 0x20 },
+ { 0x0340, 0x18 },
+ { 0x0341, 0x2a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x07 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0221, 0x11 },
+ { 0x0381, 0x01 },
+ { 0x0383, 0x01 },
+ { 0x0385, 0x01 },
+ { 0x0387, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x0a },
+ { 0x3140, 0x02 },
+ { 0x3141, 0x00 },
+ { 0x3f0d, 0x0a },
+ { 0x3f14, 0x01 },
+ { 0x3f3c, 0x02 },
+ { 0x3f4d, 0x01 },
+ { 0x3f4c, 0x01 },
+ { 0x4254, 0x7f },
+ { 0x0401, 0x00 },
+ { 0x0404, 0x00 },
+ { 0x0405, 0x10 },
+ { 0x0408, 0x00 },
+ { 0x0409, 0xb4 },
+ { 0x040a, 0x00 },
+ { 0x040b, 0x00 },
+ { 0x040c, 0x05 },
+ { 0x040d, 0x00 },
+ { 0x040e, 0x02 },
+ { 0x040f, 0xd0 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x00 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xd0 },
+ { 0x3261, 0x00 },
+ { 0x3264, 0x00 },
+ { 0x3265, 0x10 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x04 },
+ { 0x0305, 0x04 },
+ { 0x0306, 0x01 },
+ { 0x0307, 0x92 },
+ { 0x0309, 0x0a },
+ { 0x030b, 0x02 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0xfa },
+ { 0x0310, 0x00 },
+ { 0x0820, 0x0f },
+ { 0x0821, 0x13 },
+ { 0x0822, 0x33 },
+ { 0x0823, 0x33 },
+ { 0x3e20, 0x01 },
+ { 0x3e37, 0x00 },
+ { 0x3e3b, 0x01 },
+ { 0x38a3, 0x01 },
+ { 0x38a8, 0x00 },
+ { 0x38a9, 0x00 },
+ { 0x38aa, 0x00 },
+ { 0x38ab, 0x00 },
+ { 0x3234, 0x00 },
+ { 0x3fc1, 0x00 },
+ { 0x3235, 0x00 },
+ { 0x3802, 0x00 },
+ { 0x3143, 0x04 },
+ { 0x360a, 0x00 },
+ { 0x0b00, 0x00 },
+ { 0x0106, 0x00 },
+ { 0x0b05, 0x01 },
+ { 0x0b06, 0x01 },
+ { 0x3230, 0x00 },
+ { 0x3602, 0x01 },
+ { 0x3607, 0x01 },
+ { 0x3c00, 0x00 },
+ { 0x3c01, 0xba },
+ { 0x3c02, 0xc8 },
+ { 0x3c03, 0xaa },
+ { 0x3c04, 0x91 },
+ { 0x3c05, 0x54 },
+ { 0x3c06, 0x26 },
+ { 0x3c07, 0x20 },
+ { 0x3c08, 0x51 },
+ { 0x3d80, 0x00 },
+ { 0x3f50, 0x00 },
+ { 0x3f56, 0x00 },
+ { 0x3f57, 0x30 },
+ { 0x3f78, 0x00 },
+ { 0x3f79, 0x34 },
+ { 0x3f7c, 0x00 },
+ { 0x3f7d, 0x00 },
+ { 0x3fba, 0x00 },
+ { 0x3fbb, 0x00 },
+ { 0xa081, 0x04 },
+ { 0xe014, 0x00 },
+ { 0x0202, 0x04 },
+ { 0x0203, 0xf6 },
+ { 0x0224, 0x01 },
+ { 0x0225, 0xf4 },
+ { 0x0204, 0x00 },
+ { 0x0205, 0x00 },
+ { 0x0216, 0x00 },
+ { 0x0217, 0x00 },
+ { 0x020e, 0x01 },
+ { 0x020f, 0x00 },
+ { 0x0210, 0x01 },
+ { 0x0211, 0x00 },
+ { 0x0212, 0x01 },
+ { 0x0213, 0x00 },
+ { 0x0214, 0x01 },
+ { 0x0215, 0x00 },
+ { 0x0218, 0x01 },
+ { 0x0219, 0x00 },
+ { 0x3614, 0x00 },
+ { 0x3616, 0x0d },
+ { 0x3617, 0x56 },
+ { 0xb612, 0x20 },
+ { 0xb613, 0x20 },
+ { 0xb614, 0x20 },
+ { 0xb615, 0x20 },
+ { 0xb616, 0x0a },
+ { 0xb617, 0x0a },
+ { 0xb618, 0x20 },
+ { 0xb619, 0x20 },
+ { 0xb61a, 0x20 },
+ { 0xb61b, 0x20 },
+ { 0xb61c, 0x0a },
+ { 0xb61d, 0x0a },
+ { 0xb666, 0x30 },
+ { 0xb667, 0x30 },
+ { 0xb668, 0x30 },
+ { 0xb669, 0x30 },
+ { 0xb66a, 0x14 },
+ { 0xb66b, 0x14 },
+ { 0xb66c, 0x20 },
+ { 0xb66d, 0x20 },
+ { 0xb66e, 0x20 },
+ { 0xb66f, 0x20 },
+ { 0xb670, 0x10 },
+ { 0xb671, 0x10 },
+ { 0x3237, 0x00 },
+ { 0x3900, 0x00 },
+ { 0x3901, 0x00 },
+ { 0x3902, 0x00 },
+ { 0x3904, 0x00 },
+ { 0x3905, 0x00 },
+ { 0x3906, 0x00 },
+ { 0x3907, 0x00 },
+ { 0x3908, 0x00 },
+ { 0x3909, 0x00 },
+ { 0x3912, 0x00 },
+ { 0x3930, 0x00 },
+ { 0x3931, 0x00 },
+ { 0x3933, 0x00 },
+ { 0x3934, 0x00 },
+ { 0x3935, 0x00 },
+ { 0x3936, 0x00 },
+ { 0x3937, 0x00 },
+ { 0x30ac, 0x00 },
+};
+
+static const char * const imx319_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
+};
+
+/* supported link frequencies */
+static const s64 link_freq_menu_items[] = {
+ IMX319_LINK_FREQ_DEFAULT,
+};
+
+/* Mode configs */
+static const struct imx319_mode supported_modes[] = {
+ {
+ .width = 3280,
+ .height = 2464,
+ .fll_def = 3242,
+ .fll_min = 3242,
+ .llp = 3968,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ .regs = mode_3280x2464_regs,
+ },
+ },
+ {
+ .width = 3264,
+ .height = 2448,
+ .fll_def = 3242,
+ .fll_min = 3242,
+ .llp = 3968,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs),
+ .regs = mode_3264x2448_regs,
+ },
+ },
+ {
+ .width = 1936,
+ .height = 1096,
+ .fll_def = 3242,
+ .fll_min = 3242,
+ .llp = 3968,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1936x1096_regs),
+ .regs = mode_1936x1096_regs,
+ },
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fll_def = 3242,
+ .fll_min = 3242,
+ .llp = 3968,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080_regs),
+ .regs = mode_1920x1080_regs,
+ },
+ },
+ {
+ .width = 1640,
+ .height = 1232,
+ .fll_def = 5146,
+ .fll_min = 5146,
+ .llp = 2500,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs),
+ .regs = mode_1640x1232_regs,
+ },
+ },
+ {
+ .width = 1640,
+ .height = 922,
+ .fll_def = 5146,
+ .fll_min = 5146,
+ .llp = 2500,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640x922_regs),
+ .regs = mode_1640x922_regs,
+ },
+ },
+ {
+ .width = 1296,
+ .height = 736,
+ .fll_def = 5146,
+ .fll_min = 5146,
+ .llp = 2500,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x736_regs),
+ .regs = mode_1296x736_regs,
+ },
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fll_def = 5146,
+ .fll_min = 5146,
+ .llp = 2500,
+ .link_freq_index = IMX319_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_regs),
+ .regs = mode_1280x720_regs,
+ },
+ },
+};
+
+static inline struct imx319 *to_imx319(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx319, sd);
+}
+
+/* Get bayer order based on flip setting. */
+static u32 imx319_get_format_code(struct imx319 *imx319)
+{
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ u32 code;
+ static const u32 codes[2][2] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, },
+ };
+
+ lockdep_assert_held(&imx319->mutex);
+ code = codes[imx319->vflip->val][imx319->hflip->val];
+
+ return code;
+}
+
+/* Read registers up to 4 at a time */
+static int imx319_read_reg(struct imx319 *imx319, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = { 0 };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int imx319_write_reg(struct imx319 *imx319, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int imx319_write_regs(struct imx319 *imx319,
+ const struct imx319_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = imx319_write_reg(imx319, regs[i].address, 1, regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err %d",
+ regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Open sub-device */
+static int imx319_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ mutex_lock(&imx319->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = imx319->cur_mode->width;
+ try_fmt->height = imx319->cur_mode->height;
+ try_fmt->code = imx319_get_format_code(imx319);
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ mutex_unlock(&imx319->mutex);
+
+ return 0;
+}
+
+static int imx319_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx319 *imx319 = container_of(ctrl->handler,
+ struct imx319, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = imx319->cur_mode->height + ctrl->val - 18;
+ __v4l2_ctrl_modify_range(imx319->exposure,
+ imx319->exposure->minimum,
+ max, imx319->exposure->step, max);
+ break;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ /* Analog gain = 1024/(1024 - ctrl->val) times */
+ ret = imx319_write_reg(imx319, IMX319_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx319_write_reg(imx319, IMX319_REG_DIG_GAIN_GLOBAL, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = imx319_write_reg(imx319, IMX319_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = imx319_write_reg(imx319, IMX319_REG_FLL, 2,
+ imx319->cur_mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx319_write_reg(imx319, IMX319_REG_TEST_PATTERN,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = imx319_write_reg(imx319, IMX319_REG_ORIENTATION, 1,
+ imx319->hflip->val |
+ imx319->vflip->val << 1);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx319_ctrl_ops = {
+ .s_ctrl = imx319_set_ctrl,
+};
+
+static int imx319_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ mutex_lock(&imx319->mutex);
+ code->code = imx319_get_format_code(imx319);
+ mutex_unlock(&imx319->mutex);
+
+ return 0;
+}
+
+static int imx319_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ mutex_lock(&imx319->mutex);
+ if (fse->code != imx319_get_format_code(imx319)) {
+ mutex_unlock(&imx319->mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&imx319->mutex);
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void imx319_update_pad_format(struct imx319 *imx319,
+ const struct imx319_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = imx319_get_format_code(imx319);
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int imx319_do_get_pad_format(struct imx319 *imx319,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &imx319->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ imx319_update_pad_format(imx319, imx319->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int imx319_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+ int ret;
+
+ mutex_lock(&imx319->mutex);
+ ret = imx319_do_get_pad_format(imx319, cfg, fmt);
+ mutex_unlock(&imx319->mutex);
+
+ return ret;
+}
+
+static int
+imx319_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+ const struct imx319_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ u64 pixel_rate;
+ u32 height;
+
+ mutex_lock(&imx319->mutex);
+
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ fmt->format.code = imx319_get_format_code(imx319);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ imx319_update_pad_format(imx319, mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ imx319->cur_mode = mode;
+ pixel_rate = imx319->link_def_freq * 2 * 4;
+ do_div(pixel_rate, 10);
+ __v4l2_ctrl_s_ctrl_int64(imx319->pixel_rate, pixel_rate);
+ /* Update limits and set FPS to default */
+ height = imx319->cur_mode->height;
+ vblank_def = imx319->cur_mode->fll_def - height;
+ vblank_min = imx319->cur_mode->fll_min - height;
+ height = IMX319_FLL_MAX - height;
+ __v4l2_ctrl_modify_range(imx319->vblank, vblank_min, height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(imx319->vblank, vblank_def);
+ h_blank = mode->llp - imx319->cur_mode->width;
+ /*
+ * Currently hblank is not changeable.
+ * So FPS control is done only by vblank.
+ */
+ __v4l2_ctrl_modify_range(imx319->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&imx319->mutex);
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx319_start_streaming(struct imx319 *imx319)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ const struct imx319_reg_list *reg_list;
+ int ret;
+
+ /* Global Setting */
+ reg_list = &imx319_global_setting;
+ ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set global settings");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &imx319->cur_mode->reg_list;
+ ret = imx319_write_regs(imx319, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ /* set digital gain control to all color mode */
+ ret = imx319_write_reg(imx319, IMX319_REG_DPGA_USE_GLOBAL_GAIN, 1, 1);
+ if (ret)
+ return ret;
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(imx319->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return imx319_write_reg(imx319, IMX319_REG_MODE_SELECT,
+ 1, IMX319_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int imx319_stop_streaming(struct imx319 *imx319)
+{
+ return imx319_write_reg(imx319, IMX319_REG_MODE_SELECT,
+ 1, IMX319_MODE_STANDBY);
+}
+
+static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx319 *imx319 = to_imx319(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&imx319->mutex);
+ if (imx319->streaming == enable) {
+ mutex_unlock(&imx319->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto err_unlock;
+ }
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = imx319_start_streaming(imx319);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ imx319_stop_streaming(imx319);
+ pm_runtime_put(&client->dev);
+ }
+
+ imx319->streaming = enable;
+
+ /* vflip and hflip cannot change during streaming */
+ __v4l2_ctrl_grab(imx319->vflip, enable);
+ __v4l2_ctrl_grab(imx319->hflip, enable);
+
+ mutex_unlock(&imx319->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&imx319->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused imx319_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx319 *imx319 = to_imx319(sd);
+
+ if (imx319->streaming)
+ imx319_stop_streaming(imx319);
+
+ return 0;
+}
+
+static int __maybe_unused imx319_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx319 *imx319 = to_imx319(sd);
+ int ret;
+
+ if (imx319->streaming) {
+ ret = imx319_start_streaming(imx319);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx319_stop_streaming(imx319);
+ imx319->streaming = 0;
+ return ret;
+}
+
+/* Verify chip ID */
+static int imx319_identify_module(struct imx319 *imx319)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ int ret;
+ u32 val;
+
+ ret = imx319_read_reg(imx319, IMX319_REG_CHIP_ID, 2, &val);
+ if (ret)
+ return ret;
+
+ if (val != IMX319_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ IMX319_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops imx319_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops imx319_video_ops = {
+ .s_stream = imx319_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx319_pad_ops = {
+ .enum_mbus_code = imx319_enum_mbus_code,
+ .get_fmt = imx319_get_pad_format,
+ .set_fmt = imx319_set_pad_format,
+ .enum_frame_size = imx319_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops imx319_subdev_ops = {
+ .core = &imx319_subdev_core_ops,
+ .video = &imx319_video_ops,
+ .pad = &imx319_pad_ops,
+};
+
+static const struct media_entity_operations imx319_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops imx319_internal_ops = {
+ .open = imx319_open,
+};
+
+/* Initialize control handlers */
+static int imx319_init_controls(struct imx319 *imx319)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx319->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 hblank;
+ u64 pixel_rate;
+ const struct imx319_mode *mode;
+ u32 max;
+ int ret;
+
+ ctrl_hdlr = &imx319->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &imx319->mutex;
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ imx319->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_LINK_FREQ, max, 0,
+ link_freq_menu_items);
+ if (imx319->link_freq)
+ imx319->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
+ pixel_rate = imx319->link_def_freq * 2 * 4;
+ do_div(pixel_rate, 10);
+ /* By default, PIXEL_RATE is read only */
+ imx319->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, pixel_rate,
+ pixel_rate, 1, pixel_rate);
+
+ /* Initial vblank/hblank/exposure parameters based on current mode */
+ mode = imx319->cur_mode;
+ vblank_def = mode->fll_def - mode->height;
+ vblank_min = mode->fll_min - mode->height;
+ imx319->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ IMX319_FLL_MAX - mode->height,
+ 1, vblank_def);
+
+ hblank = mode->llp - mode->width;
+ imx319->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx319->hblank)
+ imx319->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* fll >= exposure time + adjust parameter (default value is 18) */
+ exposure_max = mode->fll_def - 18;
+ imx319->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX319_EXPOSURE_MIN, exposure_max,
+ IMX319_EXPOSURE_STEP,
+ IMX319_EXPOSURE_DEFAULT);
+
+ imx319->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ imx319->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX319_ANA_GAIN_MIN, IMX319_ANA_GAIN_MAX,
+ IMX319_ANA_GAIN_STEP, IMX319_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx319_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX319_DGTL_GAIN_MIN, IMX319_DGTL_GAIN_MAX,
+ IMX319_DGTL_GAIN_STEP, IMX319_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx319_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx319_test_pattern_menu) - 1,
+ 0, 0, imx319_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "control init failed: %d", ret);
+ goto error;
+ }
+
+ imx319->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static struct imx319_hwcfg *imx319_get_hwcfg(struct device *dev)
+{
+ struct imx319_hwcfg *cfg;
+ struct v4l2_fwnode_endpoint *bus_cfg;
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int i;
+ int ret;
+
+ if (!fwnode)
+ return NULL;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return NULL;
+
+ bus_cfg = v4l2_fwnode_endpoint_alloc_parse(ep);
+ if (IS_ERR(bus_cfg))
+ goto out_err;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ goto out_err;
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &cfg->ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ goto out_err;
+ }
+
+ dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
+ if (cfg->ext_clk != IMX319_EXT_CLK) {
+ dev_err(dev, "external clock %d is not supported",
+ cfg->ext_clk);
+ goto out_err;
+ }
+
+ dev_dbg(dev, "num of link freqs: %d", bus_cfg->nr_of_link_frequencies);
+ if (!bus_cfg->nr_of_link_frequencies) {
+ dev_warn(dev, "no link frequencies defined");
+ goto out_err;
+ }
+
+ cfg->nr_of_link_freqs = bus_cfg->nr_of_link_frequencies;
+ cfg->link_freqs = devm_kcalloc(dev,
+ bus_cfg->nr_of_link_frequencies + 1,
+ sizeof(*cfg->link_freqs), GFP_KERNEL);
+ if (!cfg->link_freqs)
+ goto out_err;
+
+ for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
+ cfg->link_freqs[i] = bus_cfg->link_frequencies[i];
+ dev_dbg(dev, "link_freq[%d] = %lld", i, cfg->link_freqs[i]);
+ }
+
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
+ return cfg;
+
+out_err:
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
+ return NULL;
+}
+
+static int imx319_probe(struct i2c_client *client)
+{
+ struct imx319 *imx319;
+ int ret;
+ u32 i;
+
+ imx319 = devm_kzalloc(&client->dev, sizeof(*imx319), GFP_KERNEL);
+ if (!imx319)
+ return -ENOMEM;
+
+ mutex_init(&imx319->mutex);
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&imx319->sd, client, &imx319_subdev_ops);
+
+ /* Check module identity */
+ ret = imx319_identify_module(imx319);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto error_probe;
+ }
+
+ imx319->hwcfg = imx319_get_hwcfg(&client->dev);
+ if (!imx319->hwcfg) {
+ dev_err(&client->dev, "failed to get hwcfg");
+ ret = -ENODEV;
+ goto error_probe;
+ }
+
+ imx319->link_def_freq = link_freq_menu_items[IMX319_LINK_FREQ_INDEX];
+ for (i = 0; i < imx319->hwcfg->nr_of_link_freqs; i++) {
+ if (imx319->hwcfg->link_freqs[i] == imx319->link_def_freq) {
+ dev_dbg(&client->dev, "link freq index %d matched", i);
+ break;
+ }
+ }
+
+ if (i == imx319->hwcfg->nr_of_link_freqs) {
+ dev_warn(&client->dev, "no link frequency supported "
+ "defaulting to %lld\n", imx319->link_def_freq);
+ }
+
+ /* Set default mode to max resolution */
+ imx319->cur_mode = &supported_modes[0];
+
+ ret = imx319_init_controls(imx319);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto error_probe;
+ }
+
+ /* Initialize subdev */
+ imx319->sd.internal_ops = &imx319_internal_ops;
+ imx319->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ imx319->sd.entity.ops = &imx319_subdev_entity_ops;
+ imx319->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /* Initialize source pad */
+ imx319->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&imx319->sd.entity, 1, &imx319->pad, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx319->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&imx319->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(imx319->sd.ctrl_handler);
+
+error_probe:
+ mutex_destroy(&imx319->mutex);
+
+ return ret;
+}
+
+static int imx319_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx319 *imx319 = to_imx319(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&imx319->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx319_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx319_suspend, imx319_resume)
+};
+
+static const struct acpi_device_id imx319_acpi_ids[] = {
+ { "SONY319A" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, imx319_acpi_ids);
+
+static struct i2c_driver imx319_i2c_driver = {
+ .driver = {
+ .name = "imx319",
+ .pm = &imx319_pm_ops,
+ .acpi_match_table = ACPI_PTR(imx319_acpi_ids),
+ },
+ .probe_new = imx319_probe,
+ .remove = imx319_remove,
+};
+module_i2c_driver(imx319_i2c_driver);
+
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_DESCRIPTION("Sony imx319 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx355.c b/drivers/media/i2c/imx355.c
new file mode 100644
index 00000000000000..02c008917f0656
--- /dev/null
+++ b/drivers/media/i2c/imx355.c
@@ -0,0 +1,1859 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2017 - 2018 Intel Corporation
+
+#include <asm/unaligned.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fwnode.h>
+
+#define IMX355_REG_MODE_SELECT 0x0100
+#define IMX355_MODE_STANDBY 0x00
+#define IMX355_MODE_STREAMING 0x01
+
+/* Chip ID */
+#define IMX355_REG_CHIP_ID 0x0016
+#define IMX355_CHIP_ID 0x0355
+
+/* V_TIMING internal */
+#define IMX355_REG_FLL 0x0340
+#define IMX355_FLL_MAX 0xffff
+
+/* Exposure control */
+#define IMX355_REG_EXPOSURE 0x0202
+#define IMX355_EXPOSURE_MIN 1
+#define IMX355_EXPOSURE_STEP 1
+#define IMX355_EXPOSURE_DEFAULT 0x0282
+
+/* Analog gain control */
+#define IMX355_REG_ANALOG_GAIN 0x0204
+#define IMX355_ANA_GAIN_MIN 0
+#define IMX355_ANA_GAIN_MAX 960
+#define IMX355_ANA_GAIN_STEP 1
+#define IMX355_ANA_GAIN_DEFAULT 0
+
+/* Digital gain control */
+#define IMX355_REG_DPGA_USE_GLOBAL_GAIN 0x3070
+#define IMX355_REG_DIG_GAIN_GLOBAL 0x020e
+#define IMX355_DGTL_GAIN_MIN 256
+#define IMX355_DGTL_GAIN_MAX 4095
+#define IMX355_DGTL_GAIN_STEP 1
+#define IMX355_DGTL_GAIN_DEFAULT 256
+
+/* Test Pattern Control */
+#define IMX355_REG_TEST_PATTERN 0x0600
+#define IMX355_TEST_PATTERN_DISABLED 0
+#define IMX355_TEST_PATTERN_SOLID_COLOR 1
+#define IMX355_TEST_PATTERN_COLOR_BARS 2
+#define IMX355_TEST_PATTERN_GRAY_COLOR_BARS 3
+#define IMX355_TEST_PATTERN_PN9 4
+
+/* Flip Control */
+#define IMX355_REG_ORIENTATION 0x0101
+
+/* default link frequency and external clock */
+#define IMX355_LINK_FREQ_DEFAULT 360000000
+#define IMX355_EXT_CLK 19200000
+#define IMX355_LINK_FREQ_INDEX 0
+
+struct imx355_reg {
+ u16 address;
+ u8 val;
+};
+
+struct imx355_reg_list {
+ u32 num_of_regs;
+ const struct imx355_reg *regs;
+};
+
+/* Mode : resolution and related config&values */
+struct imx355_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 fll_def;
+ u32 fll_min;
+
+ /* H-timing */
+ u32 llp;
+
+ /* index of link frequency */
+ u32 link_freq_index;
+
+ /* Default register values */
+ struct imx355_reg_list reg_list;
+};
+
+struct imx355_hwcfg {
+ u32 ext_clk; /* sensor external clk */
+ s64 *link_freqs; /* CSI-2 link frequencies */
+ unsigned int nr_of_link_freqs;
+};
+
+struct imx355 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *hflip;
+
+ /* Current mode */
+ const struct imx355_mode *cur_mode;
+
+ struct imx355_hwcfg *hwcfg;
+ s64 link_def_freq; /* CSI-2 link default frequency */
+
+ /*
+ * Mutex for serialized access:
+ * Protect sensor set pad format and start/stop streaming safely.
+ * Protect access to sensor v4l2 controls.
+ */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+static const struct imx355_reg imx355_global_regs[] = {
+ { 0x0136, 0x13 },
+ { 0x0137, 0x33 },
+ { 0x304e, 0x03 },
+ { 0x4348, 0x16 },
+ { 0x4350, 0x19 },
+ { 0x4408, 0x0a },
+ { 0x440c, 0x0b },
+ { 0x4411, 0x5f },
+ { 0x4412, 0x2c },
+ { 0x4623, 0x00 },
+ { 0x462c, 0x0f },
+ { 0x462d, 0x00 },
+ { 0x462e, 0x00 },
+ { 0x4684, 0x54 },
+ { 0x480a, 0x07 },
+ { 0x4908, 0x07 },
+ { 0x4909, 0x07 },
+ { 0x490d, 0x0a },
+ { 0x491e, 0x0f },
+ { 0x4921, 0x06 },
+ { 0x4923, 0x28 },
+ { 0x4924, 0x28 },
+ { 0x4925, 0x29 },
+ { 0x4926, 0x29 },
+ { 0x4927, 0x1f },
+ { 0x4928, 0x20 },
+ { 0x4929, 0x20 },
+ { 0x492a, 0x20 },
+ { 0x492c, 0x05 },
+ { 0x492d, 0x06 },
+ { 0x492e, 0x06 },
+ { 0x492f, 0x06 },
+ { 0x4930, 0x03 },
+ { 0x4931, 0x04 },
+ { 0x4932, 0x04 },
+ { 0x4933, 0x05 },
+ { 0x595e, 0x01 },
+ { 0x5963, 0x01 },
+ { 0x3030, 0x01 },
+ { 0x3031, 0x01 },
+ { 0x3045, 0x01 },
+ { 0x4010, 0x00 },
+ { 0x4011, 0x00 },
+ { 0x4012, 0x00 },
+ { 0x4013, 0x01 },
+ { 0x68a8, 0xfe },
+ { 0x68a9, 0xff },
+ { 0x6888, 0x00 },
+ { 0x6889, 0x00 },
+ { 0x68b0, 0x00 },
+ { 0x3058, 0x00 },
+ { 0x305a, 0x00 },
+};
+
+static const struct imx355_reg_list imx355_global_setting = {
+ .num_of_regs = ARRAY_SIZE(imx355_global_regs),
+ .regs = imx355_global_regs,
+};
+
+static const struct imx355_reg mode_3268x2448_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x0a },
+ { 0x0341, 0x36 },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x08 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x08 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcb },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x97 },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x0c },
+ { 0x034d, 0xc4 },
+ { 0x034e, 0x09 },
+ { 0x034f, 0x90 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_3264x2448_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x0a },
+ { 0x0341, 0x36 },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x08 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x08 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xc7 },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x97 },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x0c },
+ { 0x034d, 0xc0 },
+ { 0x034e, 0x09 },
+ { 0x034f, 0x90 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_3280x2464_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x0a },
+ { 0x0341, 0x36 },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x0c },
+ { 0x034d, 0xd0 },
+ { 0x034e, 0x09 },
+ { 0x034f, 0xa0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1940x1096_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x02 },
+ { 0x0345, 0xa0 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xac },
+ { 0x0348, 0x0a },
+ { 0x0349, 0x33 },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xf3 },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x94 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x48 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1936x1096_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x02 },
+ { 0x0345, 0xa0 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xac },
+ { 0x0348, 0x0a },
+ { 0x0349, 0x2f },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xf3 },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x90 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x48 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1924x1080_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x02 },
+ { 0x0345, 0xa8 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xb4 },
+ { 0x0348, 0x0a },
+ { 0x0349, 0x2b },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xeb },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x84 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x38 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1920x1080_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x02 },
+ { 0x0345, 0xa8 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0xb4 },
+ { 0x0348, 0x0a },
+ { 0x0349, 0x27 },
+ { 0x034a, 0x06 },
+ { 0x034b, 0xeb },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x00 },
+ { 0x0901, 0x11 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x07 },
+ { 0x034d, 0x80 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0x38 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1640x1232_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x06 },
+ { 0x034d, 0x68 },
+ { 0x034e, 0x04 },
+ { 0x034f, 0xd0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1640x922_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x01 },
+ { 0x0347, 0x30 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x08 },
+ { 0x034b, 0x63 },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x06 },
+ { 0x034d, 0x68 },
+ { 0x034e, 0x03 },
+ { 0x034f, 0x9a },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1300x736_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x01 },
+ { 0x0345, 0x58 },
+ { 0x0346, 0x01 },
+ { 0x0347, 0xf0 },
+ { 0x0348, 0x0b },
+ { 0x0349, 0x7f },
+ { 0x034a, 0x07 },
+ { 0x034b, 0xaf },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x14 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xe0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1296x736_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x01 },
+ { 0x0345, 0x58 },
+ { 0x0346, 0x01 },
+ { 0x0347, 0xf0 },
+ { 0x0348, 0x0b },
+ { 0x0349, 0x77 },
+ { 0x034a, 0x07 },
+ { 0x034b, 0xaf },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x10 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xe0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1284x720_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x01 },
+ { 0x0345, 0x68 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0b },
+ { 0x0349, 0x6f },
+ { 0x034a, 0x07 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x04 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xd0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_1280x720_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x07 },
+ { 0x0343, 0x2c },
+ { 0x0340, 0x05 },
+ { 0x0341, 0x1a },
+ { 0x0344, 0x01 },
+ { 0x0345, 0x68 },
+ { 0x0346, 0x02 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0b },
+ { 0x0349, 0x67 },
+ { 0x034a, 0x07 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x22 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x05 },
+ { 0x034d, 0x00 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0xd0 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x00 },
+ { 0x0701, 0x10 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const struct imx355_reg mode_820x616_regs[] = {
+ { 0x0112, 0x0a },
+ { 0x0113, 0x0a },
+ { 0x0114, 0x03 },
+ { 0x0342, 0x0e },
+ { 0x0343, 0x58 },
+ { 0x0340, 0x02 },
+ { 0x0341, 0x8c },
+ { 0x0344, 0x00 },
+ { 0x0345, 0x00 },
+ { 0x0346, 0x00 },
+ { 0x0347, 0x00 },
+ { 0x0348, 0x0c },
+ { 0x0349, 0xcf },
+ { 0x034a, 0x09 },
+ { 0x034b, 0x9f },
+ { 0x0220, 0x00 },
+ { 0x0222, 0x01 },
+ { 0x0900, 0x01 },
+ { 0x0901, 0x44 },
+ { 0x0902, 0x00 },
+ { 0x034c, 0x03 },
+ { 0x034d, 0x34 },
+ { 0x034e, 0x02 },
+ { 0x034f, 0x68 },
+ { 0x0301, 0x05 },
+ { 0x0303, 0x01 },
+ { 0x0305, 0x02 },
+ { 0x0306, 0x00 },
+ { 0x0307, 0x78 },
+ { 0x030b, 0x01 },
+ { 0x030d, 0x02 },
+ { 0x030e, 0x00 },
+ { 0x030f, 0x4b },
+ { 0x0310, 0x00 },
+ { 0x0700, 0x02 },
+ { 0x0701, 0x78 },
+ { 0x0820, 0x0b },
+ { 0x0821, 0x40 },
+ { 0x3088, 0x04 },
+ { 0x6813, 0x02 },
+ { 0x6835, 0x07 },
+ { 0x6836, 0x01 },
+ { 0x6837, 0x04 },
+ { 0x684d, 0x07 },
+ { 0x684e, 0x01 },
+ { 0x684f, 0x04 },
+};
+
+static const char * const imx355_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "Eight Vertical Colour Bars",
+ "Colour Bars With Fade to Grey",
+ "Pseudorandom Sequence (PN9)",
+};
+
+/* supported link frequencies */
+static const s64 link_freq_menu_items[] = {
+ IMX355_LINK_FREQ_DEFAULT,
+};
+
+/* Mode configs */
+static const struct imx355_mode supported_modes[] = {
+ {
+ .width = 3280,
+ .height = 2464,
+ .fll_def = 0xa36,
+ .fll_min = 0xa36,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
+ .regs = mode_3280x2464_regs,
+ },
+ },
+ {
+ .width = 3268,
+ .height = 2448,
+ .fll_def = 0xa36,
+ .fll_min = 0xa36,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3268x2448_regs),
+ .regs = mode_3268x2448_regs,
+ },
+ },
+ {
+ .width = 3264,
+ .height = 2448,
+ .fll_def = 0xa36,
+ .fll_min = 0xa36,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_3264x2448_regs),
+ .regs = mode_3264x2448_regs,
+ },
+ },
+ {
+ .width = 1940,
+ .height = 1096,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1940x1096_regs),
+ .regs = mode_1940x1096_regs,
+ },
+ },
+ {
+ .width = 1936,
+ .height = 1096,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1936x1096_regs),
+ .regs = mode_1936x1096_regs,
+ },
+ },
+ {
+ .width = 1924,
+ .height = 1080,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1924x1080_regs),
+ .regs = mode_1924x1080_regs,
+ },
+ },
+ {
+ .width = 1920,
+ .height = 1080,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1920x1080_regs),
+ .regs = mode_1920x1080_regs,
+ },
+ },
+ {
+ .width = 1640,
+ .height = 1232,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640x1232_regs),
+ .regs = mode_1640x1232_regs,
+ },
+ },
+ {
+ .width = 1640,
+ .height = 922,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1640x922_regs),
+ .regs = mode_1640x922_regs,
+ },
+ },
+ {
+ .width = 1300,
+ .height = 736,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1300x736_regs),
+ .regs = mode_1300x736_regs,
+ },
+ },
+ {
+ .width = 1296,
+ .height = 736,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1296x736_regs),
+ .regs = mode_1296x736_regs,
+ },
+ },
+ {
+ .width = 1284,
+ .height = 720,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1284x720_regs),
+ .regs = mode_1284x720_regs,
+ },
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .fll_def = 0x51a,
+ .fll_min = 0x51a,
+ .llp = 0x72c,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_regs),
+ .regs = mode_1280x720_regs,
+ },
+ },
+ {
+ .width = 820,
+ .height = 616,
+ .fll_def = 0x28c,
+ .fll_min = 0x28c,
+ .llp = 0xe58,
+ .link_freq_index = IMX355_LINK_FREQ_INDEX,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_820x616_regs),
+ .regs = mode_820x616_regs,
+ },
+ },
+};
+
+static inline struct imx355 *to_imx355(struct v4l2_subdev *_sd)
+{
+ return container_of(_sd, struct imx355, sd);
+}
+
+/* Get bayer order based on flip setting. */
+static u32 imx355_get_format_code(struct imx355 *imx355)
+{
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ u32 code;
+ static const u32 codes[2][2] = {
+ { MEDIA_BUS_FMT_SRGGB10_1X10, MEDIA_BUS_FMT_SGRBG10_1X10, },
+ { MEDIA_BUS_FMT_SGBRG10_1X10, MEDIA_BUS_FMT_SBGGR10_1X10, },
+ };
+
+ lockdep_assert_held(&imx355->mutex);
+ code = codes[imx355->vflip->val][imx355->hflip->val];
+
+ return code;
+}
+
+/* Read registers up to 4 at a time */
+static int imx355_read_reg(struct imx355 *imx355, u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[4] = { 0 };
+ int ret;
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, addr_buf);
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = ARRAY_SIZE(addr_buf);
+ msgs[0].buf = addr_buf;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_buf[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = get_unaligned_be32(data_buf);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int imx355_write_reg(struct imx355 *imx355, u16 reg, u32 len, u32 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ u8 buf[6];
+
+ if (len > 4)
+ return -EINVAL;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int imx355_write_regs(struct imx355 *imx355,
+ const struct imx355_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = imx355_write_reg(imx355, regs[i].address, 1, regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "write reg 0x%4.4x return err %d",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Open sub-device */
+static int imx355_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, 0);
+
+ mutex_lock(&imx355->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = imx355->cur_mode->width;
+ try_fmt->height = imx355->cur_mode->height;
+ try_fmt->code = imx355_get_format_code(imx355);
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ mutex_unlock(&imx355->mutex);
+
+ return 0;
+}
+
+static int imx355_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imx355 *imx355 = container_of(ctrl->handler,
+ struct imx355, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = imx355->cur_mode->height + ctrl->val - 10;
+ __v4l2_ctrl_modify_range(imx355->exposure,
+ imx355->exposure->minimum,
+ max, imx355->exposure->step, max);
+ break;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ /* Analog gain = 1024/(1024 - ctrl->val) times */
+ ret = imx355_write_reg(imx355, IMX355_REG_ANALOG_GAIN, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = imx355_write_reg(imx355, IMX355_REG_DIG_GAIN_GLOBAL, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = imx355_write_reg(imx355, IMX355_REG_EXPOSURE, 2,
+ ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ ret = imx355_write_reg(imx355, IMX355_REG_FLL, 2,
+ imx355->cur_mode->height + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = imx355_write_reg(imx355, IMX355_REG_TEST_PATTERN,
+ 2, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ case V4L2_CID_VFLIP:
+ ret = imx355_write_reg(imx355, IMX355_REG_ORIENTATION, 1,
+ imx355->hflip->val |
+ imx355->vflip->val << 1);
+ break;
+ default:
+ ret = -EINVAL;
+ dev_info(&client->dev, "ctrl(id:0x%x,val:0x%x) is not handled",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops imx355_ctrl_ops = {
+ .s_ctrl = imx355_set_ctrl,
+};
+
+static int imx355_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+
+ if (code->index > 0)
+ return -EINVAL;
+
+ mutex_lock(&imx355->mutex);
+ code->code = imx355_get_format_code(imx355);
+ mutex_unlock(&imx355->mutex);
+
+ return 0;
+}
+
+static int imx355_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ mutex_lock(&imx355->mutex);
+ if (fse->code != imx355_get_format_code(imx355)) {
+ mutex_unlock(&imx355->mutex);
+ return -EINVAL;
+ }
+ mutex_unlock(&imx355->mutex);
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void imx355_update_pad_format(struct imx355 *imx355,
+ const struct imx355_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = imx355_get_format_code(imx355);
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int imx355_do_get_pad_format(struct imx355 *imx355,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &imx355->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ imx355_update_pad_format(imx355, imx355->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int imx355_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+ int ret;
+
+ mutex_lock(&imx355->mutex);
+ ret = imx355_do_get_pad_format(imx355, cfg, fmt);
+ mutex_unlock(&imx355->mutex);
+
+ return ret;
+}
+
+static int
+imx355_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+ const struct imx355_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ u64 pixel_rate;
+ u32 height;
+
+ mutex_lock(&imx355->mutex);
+
+ /*
+ * Only one bayer order is supported.
+ * It depends on the flip settings.
+ */
+ fmt->format.code = imx355_get_format_code(imx355);
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ imx355_update_pad_format(imx355, mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ imx355->cur_mode = mode;
+ pixel_rate = imx355->link_def_freq * 2 * 4;
+ do_div(pixel_rate, 10);
+ __v4l2_ctrl_s_ctrl_int64(imx355->pixel_rate, pixel_rate);
+ /* Update limits and set FPS to default */
+ height = imx355->cur_mode->height;
+ vblank_def = imx355->cur_mode->fll_def - height;
+ vblank_min = imx355->cur_mode->fll_min - height;
+ height = IMX355_FLL_MAX - height;
+ __v4l2_ctrl_modify_range(imx355->vblank, vblank_min, height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(imx355->vblank, vblank_def);
+ h_blank = mode->llp - imx355->cur_mode->width;
+ /*
+ * Currently hblank is not changeable.
+ * So FPS control is done only by vblank.
+ */
+ __v4l2_ctrl_modify_range(imx355->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&imx355->mutex);
+
+ return 0;
+}
+
+/* Start streaming */
+static int imx355_start_streaming(struct imx355 *imx355)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ const struct imx355_reg_list *reg_list;
+ int ret;
+
+ /* Global Setting */
+ reg_list = &imx355_global_setting;
+ ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set global settings");
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &imx355->cur_mode->reg_list;
+ ret = imx355_write_regs(imx355, reg_list->regs, reg_list->num_of_regs);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode");
+ return ret;
+ }
+
+ /* set digital gain control to all color mode */
+ ret = imx355_write_reg(imx355, IMX355_REG_DPGA_USE_GLOBAL_GAIN, 1, 1);
+ if (ret)
+ return ret;
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(imx355->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return imx355_write_reg(imx355, IMX355_REG_MODE_SELECT,
+ 1, IMX355_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int imx355_stop_streaming(struct imx355 *imx355)
+{
+ return imx355_write_reg(imx355, IMX355_REG_MODE_SELECT,
+ 1, IMX355_MODE_STANDBY);
+}
+
+static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct imx355 *imx355 = to_imx355(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&imx355->mutex);
+ if (imx355->streaming == enable) {
+ mutex_unlock(&imx355->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto err_unlock;
+ }
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = imx355_start_streaming(imx355);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ imx355_stop_streaming(imx355);
+ pm_runtime_put(&client->dev);
+ }
+
+ imx355->streaming = enable;
+
+ /* vflip and hflip cannot change during streaming */
+ __v4l2_ctrl_grab(imx355->vflip, enable);
+ __v4l2_ctrl_grab(imx355->hflip, enable);
+
+ mutex_unlock(&imx355->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&imx355->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused imx355_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx355 *imx355 = to_imx355(sd);
+
+ if (imx355->streaming)
+ imx355_stop_streaming(imx355);
+
+ return 0;
+}
+
+static int __maybe_unused imx355_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx355 *imx355 = to_imx355(sd);
+ int ret;
+
+ if (imx355->streaming) {
+ ret = imx355_start_streaming(imx355);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ imx355_stop_streaming(imx355);
+ imx355->streaming = 0;
+ return ret;
+}
+
+/* Verify chip ID */
+static int imx355_identify_module(struct imx355 *imx355)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ int ret;
+ u32 val;
+
+ ret = imx355_read_reg(imx355, IMX355_REG_CHIP_ID, 2, &val);
+ if (ret)
+ return ret;
+
+ if (val != IMX355_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ IMX355_CHIP_ID, val);
+ return -EIO;
+ }
+ return 0;
+}
+
+static const struct v4l2_subdev_core_ops imx355_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_video_ops imx355_video_ops = {
+ .s_stream = imx355_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops imx355_pad_ops = {
+ .enum_mbus_code = imx355_enum_mbus_code,
+ .get_fmt = imx355_get_pad_format,
+ .set_fmt = imx355_set_pad_format,
+ .enum_frame_size = imx355_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops imx355_subdev_ops = {
+ .core = &imx355_subdev_core_ops,
+ .video = &imx355_video_ops,
+ .pad = &imx355_pad_ops,
+};
+
+static const struct media_entity_operations imx355_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops imx355_internal_ops = {
+ .open = imx355_open,
+};
+
+/* Initialize control handlers */
+static int imx355_init_controls(struct imx355 *imx355)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&imx355->sd);
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 hblank;
+ u64 pixel_rate;
+ const struct imx355_mode *mode;
+ u32 max;
+ int ret;
+
+ ctrl_hdlr = &imx355->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &imx355->mutex;
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ imx355->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_LINK_FREQ, max, 0,
+ link_freq_menu_items);
+ if (imx355->link_freq)
+ imx355->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* pixel_rate = link_freq * 2 * nr_of_lanes / bits_per_sample */
+ pixel_rate = imx355->link_def_freq * 2 * 4;
+ do_div(pixel_rate, 10);
+ /* By default, PIXEL_RATE is read only */
+ imx355->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, pixel_rate,
+ pixel_rate, 1, pixel_rate);
+
+ /* Initialize vblank/hblank/exposure parameters based on current mode */
+ mode = imx355->cur_mode;
+ vblank_def = mode->fll_def - mode->height;
+ vblank_min = mode->fll_min - mode->height;
+ imx355->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_VBLANK, vblank_min,
+ IMX355_FLL_MAX - mode->height,
+ 1, vblank_def);
+
+ hblank = mode->llp - mode->width;
+ imx355->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_HBLANK, hblank, hblank,
+ 1, hblank);
+ if (imx355->hblank)
+ imx355->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ /* fll >= exposure time + adjust parameter (default value is 10) */
+ exposure_max = mode->fll_def - 10;
+ imx355->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ IMX355_EXPOSURE_MIN, exposure_max,
+ IMX355_EXPOSURE_STEP,
+ IMX355_EXPOSURE_DEFAULT);
+
+ imx355->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ imx355->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ IMX355_ANA_GAIN_MIN, IMX355_ANA_GAIN_MAX,
+ IMX355_ANA_GAIN_STEP, IMX355_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &imx355_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ IMX355_DGTL_GAIN_MIN, IMX355_DGTL_GAIN_MAX,
+ IMX355_DGTL_GAIN_STEP, IMX355_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx355_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(imx355_test_pattern_menu) - 1,
+ 0, 0, imx355_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "control init failed: %d", ret);
+ goto error;
+ }
+
+ imx355->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+
+ return ret;
+}
+
+static struct imx355_hwcfg *imx355_get_hwcfg(struct device *dev)
+{
+ struct imx355_hwcfg *cfg;
+ struct v4l2_fwnode_endpoint *bus_cfg;
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int i;
+ int ret;
+
+ if (!fwnode)
+ return NULL;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return NULL;
+
+ bus_cfg = v4l2_fwnode_endpoint_alloc_parse(ep);
+ if (IS_ERR(bus_cfg))
+ goto out_err;
+
+ cfg = devm_kzalloc(dev, sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ goto out_err;
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &cfg->ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ goto out_err;
+ }
+
+ dev_dbg(dev, "ext clk: %d", cfg->ext_clk);
+ if (cfg->ext_clk != IMX355_EXT_CLK) {
+ dev_err(dev, "external clock %d is not supported",
+ cfg->ext_clk);
+ goto out_err;
+ }
+
+ dev_dbg(dev, "num of link freqs: %d", bus_cfg->nr_of_link_frequencies);
+ if (!bus_cfg->nr_of_link_frequencies) {
+ dev_warn(dev, "no link frequencies defined");
+ goto out_err;
+ }
+
+ cfg->nr_of_link_freqs = bus_cfg->nr_of_link_frequencies;
+ cfg->link_freqs = devm_kcalloc(
+ dev, bus_cfg->nr_of_link_frequencies + 1,
+ sizeof(*cfg->link_freqs), GFP_KERNEL);
+ if (!cfg->link_freqs)
+ goto out_err;
+
+ for (i = 0; i < bus_cfg->nr_of_link_frequencies; i++) {
+ cfg->link_freqs[i] = bus_cfg->link_frequencies[i];
+ dev_dbg(dev, "link_freq[%d] = %lld", i, cfg->link_freqs[i]);
+ }
+
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
+ return cfg;
+
+out_err:
+ v4l2_fwnode_endpoint_free(bus_cfg);
+ fwnode_handle_put(ep);
+ return NULL;
+}
+
+static int imx355_probe(struct i2c_client *client)
+{
+ struct imx355 *imx355;
+ int ret;
+ u32 i;
+
+ imx355 = devm_kzalloc(&client->dev, sizeof(*imx355), GFP_KERNEL);
+ if (!imx355)
+ return -ENOMEM;
+
+ mutex_init(&imx355->mutex);
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&imx355->sd, client, &imx355_subdev_ops);
+
+ /* Check module identity */
+ ret = imx355_identify_module(imx355);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto error_probe;
+ }
+
+ imx355->hwcfg = imx355_get_hwcfg(&client->dev);
+ if (!imx355->hwcfg) {
+ dev_err(&client->dev, "failed to get hwcfg");
+ ret = -ENODEV;
+ goto error_probe;
+ }
+
+ imx355->link_def_freq = link_freq_menu_items[IMX355_LINK_FREQ_INDEX];
+ for (i = 0; i < imx355->hwcfg->nr_of_link_freqs; i++) {
+ if (imx355->hwcfg->link_freqs[i] == imx355->link_def_freq) {
+ dev_dbg(&client->dev, "link freq index %d matched", i);
+ break;
+ }
+ }
+
+ if (i == imx355->hwcfg->nr_of_link_freqs)
+ dev_warn(&client->dev,
+ "no link frequency supported, use default %lld",
+ imx355->link_def_freq);
+
+ /* Set default mode to max resolution */
+ imx355->cur_mode = &supported_modes[0];
+
+ ret = imx355_init_controls(imx355);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto error_probe;
+ }
+
+ /* Initialize subdev */
+ imx355->sd.internal_ops = &imx355_internal_ops;
+ imx355->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ imx355->sd.entity.ops = &imx355_subdev_entity_ops;
+ imx355->sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+ /* Initialize source pad */
+ imx355->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_init(&imx355->sd.entity, 1, &imx355->pad, 0);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor_common(&imx355->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&imx355->sd.entity);
+
+error_handler_free:
+ v4l2_ctrl_handler_free(imx355->sd.ctrl_handler);
+
+error_probe:
+ mutex_destroy(&imx355->mutex);
+
+ return ret;
+}
+
+static int imx355_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct imx355 *imx355 = to_imx355(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&imx355->mutex);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx355_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx355_suspend, imx355_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id imx355_acpi_ids[] = {
+ { "SONY355A" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(acpi, imx355_acpi_ids);
+#endif
+
+static struct i2c_driver imx355_i2c_driver = {
+ .driver = {
+ .name = "imx355",
+ .pm = &imx355_pm_ops,
+ .acpi_match_table = ACPI_PTR(imx355_acpi_ids),
+ },
+ .probe_new = imx355_probe,
+ .remove = imx355_remove,
+};
+module_i2c_driver(imx355_i2c_driver);
+
+MODULE_AUTHOR("Qiu, Tianshu <tian.shu.qiu@intel.com>");
+MODULE_AUTHOR("Rapolu, Chiranjeevi <chiranjeevi.rapolu@intel.com>");
+MODULE_AUTHOR("Bingbu Cao <bingbu.cao@intel.com>");
+MODULE_AUTHOR("Yang, Hyungwoo <hyungwoo.yang@intel.com>");
+MODULE_DESCRIPTION("Sony imx355 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index de1cb74918d886..e9803f59fe9600 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -1,16 +1,5 @@
-/*
- * Copyright (c) 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Intel Corporation.
#include <linux/acpi.h>
#include <linux/delay.h>
@@ -1679,7 +1668,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
OV13858_NUM_OF_LINK_FREQS - 1,
0,
link_freq_menu_items);
- ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (ov13858->link_freq)
+ ov13858->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
pixel_rate_min = link_freq_to_pixel_rate(link_freq_menu_items[1]);
@@ -1702,7 +1692,8 @@ static int ov13858_init_controls(struct ov13858 *ov13858)
ov13858->hblank = v4l2_ctrl_new_std(
ctrl_hdlr, &ov13858_ctrl_ops, V4L2_CID_HBLANK,
hblank, hblank, 1, hblank);
- ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ if (ov13858->hblank)
+ ov13858->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
exposure_max = mode->vts_def - 8;
ov13858->exposure = v4l2_ctrl_new_std(
diff --git a/drivers/media/i2c/ov2685.c b/drivers/media/i2c/ov2685.c
index c6123c7fbe502a..16e28d4a7bf0b5 100644
--- a/drivers/media/i2c/ov2685.c
+++ b/drivers/media/i2c/ov2685.c
@@ -828,6 +828,12 @@ static const struct of_device_id ov2685_of_match[] = {
MODULE_DEVICE_TABLE(of, ov2685_of_match);
#endif
+static const struct i2c_device_id ov2685_id[] = {
+ { "ov2685", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ov2685_id);
+
static struct i2c_driver ov2685_i2c_driver = {
.driver = {
.name = "ov2685",
@@ -837,6 +843,7 @@ static struct i2c_driver ov2685_i2c_driver = {
},
.probe = &ov2685_probe,
.remove = &ov2685_remove,
+ .id_table = ov2685_id,
};
module_i2c_driver(ov2685_i2c_driver);
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 65406aef86f3e1..f2ffb9f20aa6d8 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -1,16 +1,5 @@
-/*
- * Copyright (c) 2017 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017 Intel Corporation.
#include <linux/acpi.h>
#include <linux/delay.h>
diff --git a/drivers/media/i2c/ov5695.c b/drivers/media/i2c/ov5695.c
index 75d6ece7ea59b9..212455a315dba8 100644
--- a/drivers/media/i2c/ov5695.c
+++ b/drivers/media/i2c/ov5695.c
@@ -1382,6 +1382,12 @@ static const struct of_device_id ov5695_of_match[] = {
MODULE_DEVICE_TABLE(of, ov5695_of_match);
#endif
+static const struct i2c_device_id ov5695_id[] = {
+ { "ov5695", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, ov5695_id);
+
static struct i2c_driver ov5695_i2c_driver = {
.driver = {
.name = "ov5695",
@@ -1391,6 +1397,7 @@ static struct i2c_driver ov5695_i2c_driver = {
},
.probe = &ov5695_probe,
.remove = &ov5695_remove,
+ .id_table = ov5695_id,
};
module_i2c_driver(ov5695_i2c_driver);
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 58cebb8fc06794..8481a4ca83951a 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -982,7 +982,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
if (rval)
goto out;
- for (i = 0; i < 1000; i++) {
+ for (i = 1000; i > 0; i--) {
rval = smiapp_read(
sensor,
SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s);
@@ -993,11 +993,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor,
if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY)
break;
- if (--i == 0) {
- rval = -ETIMEDOUT;
- goto out;
- }
-
+ }
+ if (!i) {
+ rval = -ETIMEDOUT;
+ goto out;
}
for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) {
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index f150a8bd94dc8d..70f1a80d4e395e 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@ static int ov772x_set_params(struct ov772x_priv *priv,
* set COM8
*/
if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+ ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 2650405e2c1b53..8a22edd1daf875 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -872,9 +872,6 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
/* tvp5150 has some special limits */
rect.left = clamp(rect.left, 0, TVP5150_MAX_CROP_LEFT);
- rect.width = clamp_t(unsigned int, rect.width,
- TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
- TVP5150_H_MAX - rect.left);
rect.top = clamp(rect.top, 0, TVP5150_MAX_CROP_TOP);
/* Calculate height based on current standard */
@@ -888,9 +885,16 @@ static int tvp5150_s_crop(struct v4l2_subdev *sd, const struct v4l2_crop *a)
else
hmax = TVP5150_V_MAX_OTHERS;
- rect.height = clamp_t(unsigned int, rect.height,
+ /*
+ * alignments:
+ * - width = 2 due to UYVY colorspace
+ * - height, image = no special alignment
+ */
+ v4l_bound_align_image(&rect.width,
+ TVP5150_H_MAX - TVP5150_MAX_CROP_LEFT - rect.left,
+ TVP5150_H_MAX - rect.left, 1, &rect.height,
hmax - TVP5150_MAX_CROP_TOP - rect.top,
- hmax - rect.top);
+ hmax - rect.top, 0, 0);
tvp5150_write(sd, TVP5150_VERT_BLANKING_START, rect.top);
tvp5150_write(sd, TVP5150_VERT_BLANKING_STOP,
diff --git a/drivers/media/pci/cx23885/altera-ci.c b/drivers/media/pci/cx23885/altera-ci.c
index aaf4e46ff3e96c..a0c1ff97f9053a 100644
--- a/drivers/media/pci/cx23885/altera-ci.c
+++ b/drivers/media/pci/cx23885/altera-ci.c
@@ -660,6 +660,10 @@ static int altera_hw_filt_init(struct altera_ci_config *config, int hw_filt_nr)
}
temp_int = append_internal(inter);
+ if (!temp_int) {
+ ret = -ENOMEM;
+ goto err;
+ }
inter->filts_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
@@ -694,6 +698,7 @@ err:
__func__, ret);
kfree(pid_filt);
+ kfree(inter);
return ret;
}
@@ -728,6 +733,10 @@ int altera_ci_init(struct altera_ci_config *config, int ci_nr)
}
temp_int = append_internal(inter);
+ if (!temp_int) {
+ ret = -ENOMEM;
+ goto err;
+ }
inter->cis_used = 1;
inter->dev = config->dev;
inter->fpga_rw = config->fpga_rw;
@@ -796,6 +805,7 @@ err:
ci_dbg_print("%s: Cannot initialize CI: Error %d.\n", __func__, ret);
kfree(state);
+ kfree(inter);
return ret;
}
diff --git a/drivers/media/pci/intel/ipu3/Kconfig b/drivers/media/pci/intel/ipu3/Kconfig
index fe9c2dc0a496b0..16954783c88776 100644
--- a/drivers/media/pci/intel/ipu3/Kconfig
+++ b/drivers/media/pci/intel/ipu3/Kconfig
@@ -22,13 +22,13 @@ config VIDEO_IPU3_IMGU
tristate "Intel ipu3-imgu driver"
depends on PCI && VIDEO_V4L2
depends on MEDIA_CONTROLLER && VIDEO_V4L2_SUBDEV_API
- depends on X86_64
+ depends on X86
select IOMMU_IOVA
select VIDEOBUF2_DMA_SG
---help---
- This is the video4linux2 driver for Intel IPU3 image processing unit,
- found in Intel Skylake and Kaby Lake SoCs and used for processing
- images and video.
+ This is the Video4Linux2 driver for Intel IPU3 image processing unit,
+ found in Intel Skylake and Kaby Lake SoCs and used for processing
+ images and video.
- Say Y or M here if you have a Skylake/Kaby Lake SoC with a MIPI
- camera. The module will be called ipu3-imgu.
+ Say Y or M here if you have a Skylake/Kaby Lake SoC with a MIPI
+ camera. The module will be called ipu3-imgu.
diff --git a/drivers/media/pci/intel/ipu3/ipu3-abi.h b/drivers/media/pci/intel/ipu3/ipu3-abi.h
index 24102647a89e51..8df8e54a5b4f25 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-abi.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-abi.h
@@ -8,7 +8,7 @@
/******************* IMGU Hardware information *******************/
-typedef __u32 imgu_addr_t;
+typedef u32 imgu_addr_t;
#define IMGU_ISP_VMEM_ALIGN 128
#define IMGU_DVS_BLOCK_W 64
@@ -60,7 +60,7 @@ typedef __u32 imgu_addr_t;
#define IMGU_SYSTEM_REQ_FREQ_DIVIDER 25
#define IMGU_REG_INT_STATUS 0x30
#define IMGU_REG_INT_ENABLE 0x34
-#define IMGU_REG_INT_CSS_IRQ (1 << 31)
+#define IMGU_REG_INT_CSS_IRQ BIT(31)
/* STATE_0_5_0_IMGHMMADR */
#define IMGU_REG_STATE 0x130
#define IMGU_STATE_HALT_STS BIT(0)
@@ -220,8 +220,6 @@ typedef __u32 imgu_addr_t;
#define IMGU_SCALER_TAPS_UV (IMGU_SCALER_FILTER_TAPS / 2)
#define IMGU_SCALER_FIR_PHASES \
(IMGU_SCALER_PHASES << IMGU_SCALER_PHASE_COUNTER_PREC_REF)
-#define IMGU_OSYS_BLOCK_WIDTH (2 * IPU3_UAPI_ISP_VEC_ELEMS)
-#define IMGU_OSYS_BLOCK_HEIGHT 32
/******************* imgu_abi_acc_param *******************/
@@ -237,6 +235,9 @@ typedef __u32 imgu_addr_t;
#define IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES 52
#define IMGU_ABI_DVS_STAT_MAX_TRANSFERS 52
+#define IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE 8
+#define IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE 32
+
#define IMGU_ABI_AWB_FR_MAX_TRANSFERS 30
#define IMGU_ABI_AWB_FR_MAX_PROCESS_LINES 30
#define IMGU_ABI_AWB_FR_MAX_OPERATIONS \
@@ -256,6 +257,236 @@ typedef __u32 imgu_addr_t;
#define IMGU_ABI_OSYS_PIN_OUT 1
#define IMGU_ABI_OSYS_PINS 2
+#define IMGU_ABI_DVS_STAT_LEVELS 3
+#define IMGU_ABI_YUVP2_YTM_LUT_ENTRIES 256
+#define IMGU_ABI_GDC_FRAC_BITS 8
+#define IMGU_ABI_BINARY_MAX_OUTPUT_PORTS 2
+#define IMGU_ABI_MAX_BINARY_NAME 64
+#define IMGU_ABI_ISP_DDR_WORD_BITS 256
+#define IMGU_ABI_ISP_DDR_WORD_BYTES (IMGU_ABI_ISP_DDR_WORD_BITS / 8)
+#define IMGU_ABI_MAX_STAGES 3
+#define IMGU_ABI_MAX_IF_CONFIGS 3
+#define IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP BIT(31)
+#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST BIT(0)
+#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST BIT(4)
+#define IMGU_ABI_MAX_SP_THREADS 4
+#define IMGU_ABI_FRAMES_REF 3
+#define IMGU_ABI_FRAMES_TNR 4
+#define IMGU_ABI_BUF_SETS_TNR 1
+
+#define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue) \
+ (0 << 24 | (thread) << 16 | (queue) << 8)
+#define IMGU_ABI_EVENT_BUFFER_DEQUEUED(queue) (1 << 24 | (queue) << 8)
+#define IMGU_ABI_EVENT_EVENT_DEQUEUED (2 << 24)
+#define IMGU_ABI_EVENT_START_STREAM (3 << 24)
+#define IMGU_ABI_EVENT_STOP_STREAM (4 << 24)
+#define IMGU_ABI_EVENT_MIPI_BUFFERS_READY (5 << 24)
+#define IMGU_ABI_EVENT_UNLOCK_RAW_BUFFER (6 << 24)
+#define IMGU_ABI_EVENT_STAGE_ENABLE_DISABLE (7 << 24)
+
+#define IMGU_ABI_HOST2SP_BUFQ_SIZE 3
+#define IMGU_ABI_SP2HOST_BUFQ_SIZE (2 * IMGU_ABI_MAX_SP_THREADS)
+#define IMGU_ABI_HOST2SP_EVTQ_SIZE (IMGU_ABI_QUEUE_NUM * \
+ IMGU_ABI_MAX_SP_THREADS * 2 + IMGU_ABI_MAX_SP_THREADS * 4)
+#define IMGU_ABI_SP2HOST_EVTQ_SIZE (6 * IMGU_ABI_MAX_SP_THREADS)
+
+#define IMGU_ABI_EVTTYPE_EVENT_SHIFT 0
+#define IMGU_ABI_EVTTYPE_EVENT_MASK (0xff << IMGU_ABI_EVTTYPE_EVENT_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPE_SHIFT 8
+#define IMGU_ABI_EVTTYPE_PIPE_MASK (0xff << IMGU_ABI_EVTTYPE_PIPE_SHIFT)
+#define IMGU_ABI_EVTTYPE_PIPEID_SHIFT 16
+#define IMGU_ABI_EVTTYPE_PIPEID_MASK (0xff << IMGU_ABI_EVTTYPE_PIPEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_MODULEID_SHIFT 8
+#define IMGU_ABI_EVTTYPE_MODULEID_MASK (0xff << IMGU_ABI_EVTTYPE_MODULEID_SHIFT)
+#define IMGU_ABI_EVTTYPE_LINENO_SHIFT 16
+#define IMGU_ABI_EVTTYPE_LINENO_MASK (0xffff << IMGU_ABI_EVTTYPE_LINENO_SHIFT)
+
+/* Output frame ready */
+#define IMGU_ABI_EVTTYPE_OUT_FRAME_DONE 0
+/* Second output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE 1
+/* Viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE 2
+/* Second viewfinder Output frame ready */
+#define IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE 3
+/* Indication that 3A statistics are available */
+#define IMGU_ABI_EVTTYPE_3A_STATS_DONE 4
+/* Indication that DIS statistics are available */
+#define IMGU_ABI_EVTTYPE_DIS_STATS_DONE 5
+/* Pipeline Done event, sent after last pipeline stage */
+#define IMGU_ABI_EVTTYPE_PIPELINE_DONE 6
+/* Frame tagged */
+#define IMGU_ABI_EVTTYPE_FRAME_TAGGED 7
+/* Input frame ready */
+#define IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE 8
+/* Metadata ready */
+#define IMGU_ABI_EVTTYPE_METADATA_DONE 9
+/* Indication that LACE statistics are available */
+#define IMGU_ABI_EVTTYPE_LACE_STATS_DONE 10
+/* Extension stage executed */
+#define IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE 11
+/* Timing measurement data */
+#define IMGU_ABI_EVTTYPE_TIMER 12
+/* End Of Frame event, sent when in buffered sensor mode */
+#define IMGU_ABI_EVTTYPE_PORT_EOF 13
+/* Performance warning encountered by FW */
+#define IMGU_ABI_EVTTYPE_FW_WARNING 14
+/* Assertion hit by FW */
+#define IMGU_ABI_EVTTYPE_FW_ASSERT 15
+
+#define IMGU_ABI_NUM_CONTINUOUS_FRAMES 10
+#define IMGU_ABI_SP_COMM_COMMAND 0x00
+
+/*
+ * The host2sp_cmd_ready command is the only command written by the SP
+ * It acknowledges that is previous command has been received.
+ * (this does not mean that the command has been executed)
+ * It also indicates that a new command can be send (it is a queue
+ * with depth 1).
+ */
+#define IMGU_ABI_SP_COMM_COMMAND_READY 1
+/* Command written by the Host */
+#define IMGU_ABI_SP_COMM_COMMAND_DUMMY 2 /* No action */
+#define IMGU_ABI_SP_COMM_COMMAND_START_FLASH 3 /* Start the flash */
+#define IMGU_ABI_SP_COMM_COMMAND_TERMINATE 4 /* Terminate */
+
+/* n = 0..IPU3_CSS_PIPE_ID_NUM-1 */
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(n) ((n) * 4 + 0x60)
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT 0
+#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_AND_SHIFT 16
+
+#define IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM 1 /* sp_pmem */
+
+/***** For parameter computation *****/
+
+#define IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET 0xC
+#define IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET 0x8
+#define IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS 32
+
+#define IMGU_SCALER_ELEMS_PER_VEC 0x10
+#define IMGU_SCALER_FILTER_TAPS_Y 0x4
+#define IMGU_SCALER_OUT_BPP 0x8
+
+#define IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR 0x400
+#define IMGU_SCALER_TO_OF_ACK_FA_ADDR \
+ (0xC00 + IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET)
+#define IMGU_OF_TO_ACK_FA_ADDR (0xC00 + IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET)
+#define IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR 0
+#define IMGU_SCALER_INTR_BPP 10
+
+#define IMGU_PS_SNR_PRESERVE_BITS 3
+#define IMGU_CNTX_BPP 11
+#define IMGU_SCALER_FILTER_TAPS_UV (IMGU_SCALER_FILTER_TAPS_Y / 2)
+
+#define IMGU_VMEM2_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+#define IMGU_STRIDE_Y (IMGU_SCALER_FILTER_TAPS_Y + 1)
+#define IMGU_MAX_FRAME_WIDTH 3840
+#define IMGU_VMEM3_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
+
+#define IMGU_VER_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_OUT_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 1 */
+#define IMGU_MAX_INPUT_BLOCK_HEIGHT 64
+#define IMGU_HOR_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_INTR_BPP + \
+ IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 2 */
+#define IMGU_MAX_OUTPUT_BLOCK_WIDTH 128
+#define IMGU_CNTX_STRIDE_UV (IMGU_SCALER_FILTER_TAPS_UV + 1)
+
+#define IMGU_OSYS_DMA_CROP_W_LIMIT 64
+#define IMGU_OSYS_DMA_CROP_H_LIMIT 4
+#define IMGU_OSYS_BLOCK_WIDTH (2 * IPU3_UAPI_ISP_VEC_ELEMS)
+#define IMGU_OSYS_BLOCK_HEIGHT 32
+#define IMGU_OSYS_PHASES 0x20
+#define IMGU_OSYS_FILTER_TAPS 0x4
+#define IMGU_OSYS_PHASE_COUNTER_PREC_REF 6
+#define IMGU_OSYS_NUM_INPUT_BUFFERS 2
+#define IMGU_OSYS_FIR_PHASES \
+ (IMGU_OSYS_PHASES << IMGU_OSYS_PHASE_COUNTER_PREC_REF)
+#define IMGU_OSYS_TAPS_UV (IMGU_OSYS_FILTER_TAPS / 2)
+#define IMGU_OSYS_TAPS_Y (IMGU_OSYS_FILTER_TAPS)
+#define IMGU_OSYS_NUM_INTERM_BUFFERS 2
+
+#define IMGU_VMEM1_Y_SIZE \
+ (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
+#define IMGU_VMEM1_UV_SIZE (IMGU_VMEM1_Y_SIZE / 4)
+#define IMGU_VMEM1_OUT_BUF_ADDR (IMGU_VMEM1_INP_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_OSYS_NUM_OUTPUT_BUFFERS 2
+
+/* transpose of input height */
+#define IMGU_VMEM2_VECS_PER_LINE \
+ (DIV_ROUND_UP(IMGU_OSYS_BLOCK_HEIGHT, IMGU_VMEM2_ELEMS_PER_VEC))
+/* size in words (vectors) */
+#define IMGU_VMEM2_BUF_SIZE \
+ (IMGU_VMEM2_VECS_PER_LINE * IMGU_VMEM2_LINES_PER_BLOCK)
+#define IMGU_VMEM3_VER_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_HOR_Y_SIZE \
+ ((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS)
+#define IMGU_VMEM3_VER_Y_EXTRA \
+ ((IMGU_STRIDE_Y * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
+#define IMGU_VMEM3_VER_U_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_HOR_U_SIZE \
+ (((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_U_EXTRA \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+#define IMGU_VMEM3_VER_V_SIZE \
+ (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
+ / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
+
+#define IMGU_ISP_VEC_NELEMS 64
+#define IMGU_LUMA_TO_CHROMA_RATIO 2
+#define IMGU_INPUT_BLOCK_WIDTH (128)
+#define IMGU_FIFO_ADDR_SCALER_TO_FMT \
+ (IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR >> 2)
+#define IMGU_FIFO_ADDR_SCALER_TO_SP (IMGU_SCALER_TO_OF_ACK_FA_ADDR >> 2)
+#define IMGU_VMEM1_INP_BUF_ADDR 0
+#define IMGU_VMEM1_Y_STRIDE \
+ (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
+#define IMGU_VMEM1_BUF_SIZE (IMGU_VMEM1_V_OFFSET + IMGU_VMEM1_UV_SIZE)
+
+#define IMGU_VMEM1_U_OFFSET (IMGU_VMEM1_Y_SIZE)
+#define IMGU_VMEM1_V_OFFSET (IMGU_VMEM1_U_OFFSET + IMGU_VMEM1_UV_SIZE)
+#define IMGU_VMEM1_UV_STRIDE (IMGU_VMEM1_Y_STRIDE / 2)
+#define IMGU_VMEM1_INT_BUF_ADDR (IMGU_VMEM1_OUT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_OUTPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+
+#define IMGU_VMEM1_ELEMS_PER_VEC (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
+#define IMGU_VMEM2_BUF_Y_ADDR 0
+#define IMGU_VMEM2_BUF_Y_STRIDE (IMGU_VMEM2_VECS_PER_LINE)
+#define IMGU_VMEM2_BUF_U_ADDR \
+ (IMGU_VMEM2_BUF_Y_ADDR + IMGU_VMEM2_BUF_SIZE)
+#define IMGU_VMEM2_BUF_V_ADDR \
+ (IMGU_VMEM2_BUF_U_ADDR + IMGU_VMEM2_BUF_SIZE / 4)
+#define IMGU_VMEM2_BUF_UV_STRIDE (IMGU_VMEM2_VECS_PER_LINE / 2)
+/* 1.5 x depth of intermediate buffer */
+#define IMGU_VMEM2_LINES_PER_BLOCK 192
+#define IMGU_VMEM3_HOR_Y_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE)
+#define IMGU_VMEM3_HOR_U_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE)
+#define IMGU_VMEM3_HOR_V_ADDR \
+ (IMGU_VMEM3_VER_V_ADDR + IMGU_VMEM3_VER_V_SIZE)
+#define IMGU_VMEM3_VER_Y_ADDR 0
+#define IMGU_VMEM3_VER_U_ADDR \
+ (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE + \
+ max(IMGU_VMEM3_HOR_Y_SIZE, IMGU_VMEM3_VER_Y_EXTRA))
+#define IMGU_VMEM3_VER_V_ADDR \
+ (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE + \
+ max(IMGU_VMEM3_HOR_U_SIZE, IMGU_VMEM3_VER_U_EXTRA))
+#define IMGU_FIFO_ADDR_FMT_TO_SP (IMGU_OF_TO_ACK_FA_ADDR >> 2)
+#define IMGU_FIFO_ADDR_FMT_TO_SCALER (IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR >> 2)
+#define IMGU_VMEM1_HST_BUF_ADDR (IMGU_VMEM1_INT_BUF_ADDR + \
+ (IMGU_OSYS_NUM_INTERM_BUFFERS * IMGU_VMEM1_BUF_SIZE))
+#define IMGU_VMEM1_HST_BUF_STRIDE 120
+#define IMGU_VMEM1_HST_BUF_NLINES 3
+
enum imgu_abi_frame_format {
IMGU_ABI_FRAME_FORMAT_NV11, /* 12 bit YUV 411, Y, UV plane */
IMGU_ABI_FRAME_FORMAT_NV12, /* 12 bit YUV 420, Y, UV plane */
@@ -304,7 +535,7 @@ enum imgu_abi_frame_format {
* line; UYVY interleaved
* even line
*/
- IMGU_ABI_FRAME_FORMAT_YCgCo444_16, /* Internal format for ISP2.7,
+ IMGU_ABI_FRAME_FORMAT_YCGCO444_16, /* Internal format for ISP2.7,
* 16 bits per plane YUV 444,
* Y, U, V plane
*/
@@ -336,29 +567,120 @@ enum imgu_abi_osys_tiling {
IMGU_ABI_OSYS_TILING_YF,
};
+enum imgu_abi_osys_procmode {
+ IMGU_ABI_OSYS_PROCMODE_BYPASS,
+ IMGU_ABI_OSYS_PROCMODE_UPSCALE,
+ IMGU_ABI_OSYS_PROCMODE_DOWNSCALE,
+};
+
+enum imgu_abi_queue_id {
+ IMGU_ABI_QUEUE_EVENT_ID = -1,
+ IMGU_ABI_QUEUE_A_ID = 0,
+ IMGU_ABI_QUEUE_B_ID,
+ IMGU_ABI_QUEUE_C_ID,
+ IMGU_ABI_QUEUE_D_ID,
+ IMGU_ABI_QUEUE_E_ID,
+ IMGU_ABI_QUEUE_F_ID,
+ IMGU_ABI_QUEUE_G_ID,
+ IMGU_ABI_QUEUE_H_ID, /* input frame queue for skycam */
+ IMGU_ABI_QUEUE_NUM
+};
+
+enum imgu_abi_buffer_type {
+ IMGU_ABI_BUFFER_TYPE_INVALID = -1,
+ IMGU_ABI_BUFFER_TYPE_3A_STATISTICS = 0,
+ IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_LACE_STATISTICS,
+ IMGU_ABI_BUFFER_TYPE_INPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_RAW_OUTPUT_FRAME,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_INPUT,
+ IMGU_ABI_BUFFER_TYPE_CUSTOM_OUTPUT,
+ IMGU_ABI_BUFFER_TYPE_METADATA,
+ IMGU_ABI_BUFFER_TYPE_PARAMETER_SET,
+ IMGU_ABI_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
+ IMGU_ABI_NUM_DYNAMIC_BUFFER_TYPE,
+ IMGU_ABI_NUM_BUFFER_TYPE
+};
+
+enum imgu_abi_raw_type {
+ IMGU_ABI_RAW_TYPE_BAYER,
+ IMGU_ABI_RAW_TYPE_IR_ON_GR,
+ IMGU_ABI_RAW_TYPE_IR_ON_GB
+};
+
+enum imgu_abi_memories {
+ IMGU_ABI_MEM_ISP_PMEM0 = 0,
+ IMGU_ABI_MEM_ISP_DMEM0,
+ IMGU_ABI_MEM_ISP_VMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM0,
+ IMGU_ABI_MEM_ISP_VAMEM1,
+ IMGU_ABI_MEM_ISP_VAMEM2,
+ IMGU_ABI_MEM_ISP_HMEM0,
+ IMGU_ABI_MEM_SP0_DMEM0,
+ IMGU_ABI_MEM_SP1_DMEM0,
+ IMGU_ABI_MEM_DDR,
+ IMGU_ABI_NUM_MEMORIES
+};
+
+enum imgu_abi_param_class {
+ IMGU_ABI_PARAM_CLASS_PARAM, /* Late binding parameters, like 3A */
+ IMGU_ABI_PARAM_CLASS_CONFIG, /* Pipe config time parameters */
+ IMGU_ABI_PARAM_CLASS_STATE, /* State parameters, eg. buffer index */
+ IMGU_ABI_PARAM_CLASS_NUM
+};
+
+enum imgu_abi_bin_input_src {
+ IMGU_ABI_BINARY_INPUT_SOURCE_SENSOR,
+ IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY,
+ IMGU_ABI_BINARY_INPUT_SOURCE_VARIABLE,
+};
+
+enum imgu_abi_sp_swstate {
+ IMGU_ABI_SP_SWSTATE_TERMINATED,
+ IMGU_ABI_SP_SWSTATE_INITIALIZED,
+ IMGU_ABI_SP_SWSTATE_CONNECTED,
+ IMGU_ABI_SP_SWSTATE_RUNNING,
+};
+
+enum imgu_abi_bl_swstate {
+ IMGU_ABI_BL_SWSTATE_OK = 0x100,
+ IMGU_ABI_BL_SWSTATE_BUSY,
+ IMGU_ABI_BL_SWSTATE_ERR,
+};
+
+/* The type of pipe stage */
+enum imgu_abi_stage_type {
+ IMGU_ABI_STAGE_TYPE_SP,
+ IMGU_ABI_STAGE_TYPE_ISP,
+};
+
struct imgu_abi_acc_operation {
/*
* zero means on init,
* others mean upon receiving an ack signal from the BC acc.
*/
- __u8 op_indicator;
- __u8 op_type;
+ u8 op_indicator;
+ u8 op_type;
} __packed;
struct imgu_abi_acc_process_lines_cmd_data {
- __u16 lines;
- __u8 cfg_set;
- __u8 __reserved; /* Align to 4 bytes */
+ u16 lines;
+ u8 cfg_set;
+ u8 reserved; /* Align to 4 bytes */
} __packed;
/* Bayer shading definitions */
struct imgu_abi_shd_transfer_luts_set_data {
- __u8 set_number;
- __u8 padding[3];
+ u8 set_number;
+ u8 padding[3];
imgu_addr_t rg_lut_ddr_addr;
imgu_addr_t bg_lut_ddr_addr;
- __u32 align_dummy;
+ u32 align_dummy;
} __packed;
struct imgu_abi_shd_grid_config {
@@ -366,15 +688,15 @@ struct imgu_abi_shd_grid_config {
u32 grid_width:8;
u32 grid_height:8;
u32 block_width:3;
- u32 __reserved0:1;
+ u32 reserved0:1;
u32 block_height:3;
- u32 __reserved1:1;
+ u32 reserved1:1;
u32 grid_height_per_slice:8;
/* reg 1 */
s32 x_start:13;
- s32 __reserved2:3;
+ s32 reserved2:3;
s32 y_start:13;
- s32 __reserved3:3;
+ s32 reserved3:3;
} __packed;
struct imgu_abi_shd_general_config {
@@ -382,55 +704,55 @@ struct imgu_abi_shd_general_config {
u32 shd_enable:1;
/* aka 'gf' */
u32 gain_factor:2;
- u32 __reserved:21;
+ u32 reserved:21;
} __packed;
struct imgu_abi_shd_black_level_config {
/* reg 0 */
s32 bl_r:12;
- s32 __reserved0:4;
+ s32 reserved0:4;
s32 bl_gr:12;
- u32 __reserved1:1;
+ u32 reserved1:1;
/* aka 'nf' */
u32 normalization_shift:3;
/* reg 1 */
s32 bl_gb:12;
- s32 __reserved2:4;
+ s32 reserved2:4;
s32 bl_b:12;
- s32 __reserved3:4;
+ s32 reserved3:4;
} __packed;
struct imgu_abi_shd_intra_frame_operations_data {
struct imgu_abi_acc_operation
- operation_list[IMGU_ABI_SHD_MAX_OPERATIONS] IPU3_ALIGN;
+ operation_list[IMGU_ABI_SHD_MAX_OPERATIONS] __aligned(32);
struct imgu_abi_acc_process_lines_cmd_data
- process_lines_data[IMGU_ABI_SHD_MAX_PROCESS_LINES] IPU3_ALIGN;
+ process_lines_data[IMGU_ABI_SHD_MAX_PROCESS_LINES] __aligned(32);
struct imgu_abi_shd_transfer_luts_set_data
- transfer_data[IMGU_ABI_SHD_MAX_TRANSFERS] IPU3_ALIGN;
+ transfer_data[IMGU_ABI_SHD_MAX_TRANSFERS] __aligned(32);
} __packed;
struct imgu_abi_shd_config {
- struct ipu3_uapi_shd_config_static shd IMGU_ABI_PAD;
- struct imgu_abi_shd_intra_frame_operations_data shd_ops IMGU_ABI_PAD;
- struct ipu3_uapi_shd_lut shd_lut IMGU_ABI_PAD;
+ struct ipu3_uapi_shd_config_static shd __aligned(32);
+ struct imgu_abi_shd_intra_frame_operations_data shd_ops __aligned(32);
+ struct ipu3_uapi_shd_lut shd_lut __aligned(32);
} __packed;
struct imgu_abi_stripe_input_frame_resolution {
- __u16 width;
- __u16 height;
- __u32 bayer_order; /* enum ipu3_uapi_bayer_order */
- __u32 raw_bit_depth;
+ u16 width;
+ u16 height;
+ u32 bayer_order; /* enum ipu3_uapi_bayer_order */
+ u32 raw_bit_depth;
} __packed;
/* Stripe-based processing */
struct imgu_abi_stripes {
/* offset from start of frame - measured in pixels */
- __u16 offset;
+ u16 offset;
/* stripe width - measured in pixels */
- __u16 width;
+ u16 width;
/* stripe width - measured in pixels */
- __u16 height;
+ u16 height;
} __packed;
struct imgu_abi_stripe_data {
@@ -438,9 +760,9 @@ struct imgu_abi_stripe_data {
* number of stripes for current processing source
* - VLIW binary parameter we currently support 1 or 2 stripes
*/
- __u16 num_of_stripes;
+ u16 num_of_stripes;
- __u8 padding[2];
+ u8 padding[2];
/*
* the following data is derived from resolution-related
@@ -474,9 +796,9 @@ struct imgu_abi_stripe_data {
* input resolution for output system (equal to bds_out - envelope)
* output-system input frame width as configured by user
*/
- __u16 output_system_in_frame_width;
+ u16 output_system_in_frame_width;
/* output-system input frame height as configured by user */
- __u16 output_system_in_frame_height;
+ u16 output_system_in_frame_height;
/*
* 'output-stripes' - accounts for stiching on the output (no overlap)
@@ -491,41 +813,41 @@ struct imgu_abi_stripe_data {
*/
struct imgu_abi_stripes block_stripes[IPU3_UAPI_MAX_STRIPES];
- __u16 effective_frame_width; /* Needed for vertical cropping */
- __u16 bds_frame_width;
- __u16 out_frame_width; /* Output frame width as configured by user */
- __u16 out_frame_height; /* Output frame height as configured by user */
+ u16 effective_frame_width; /* Needed for vertical cropping */
+ u16 bds_frame_width;
+ u16 out_frame_width; /* Output frame width as configured by user */
+ u16 out_frame_height; /* Output frame height as configured by user */
/* GDC in buffer (A.K.A delay frame,ref buffer) info */
- __u16 gdc_in_buffer_width; /* GDC in buffer width */
- __u16 gdc_in_buffer_height; /* GDC in buffer height */
+ u16 gdc_in_buffer_width; /* GDC in buffer width */
+ u16 gdc_in_buffer_height; /* GDC in buffer height */
/* GDC in buffer first valid pixel x offset */
- __u16 gdc_in_buffer_offset_x;
+ u16 gdc_in_buffer_offset_x;
/* GDC in buffer first valid pixel y offset */
- __u16 gdc_in_buffer_offset_y;
+ u16 gdc_in_buffer_offset_y;
/* Display frame width as configured by user */
- __u16 display_frame_width;
+ u16 display_frame_width;
/* Display frame height as configured by user */
- __u16 display_frame_height;
- __u16 bds_aligned_frame_width;
+ u16 display_frame_height;
+ u16 bds_aligned_frame_width;
/* Number of vectors to left-crop when writing stripes (not stripe 0) */
- __u16 half_overlap_vectors;
+ u16 half_overlap_vectors;
/* Decimate ISP and fixed func resolutions after BDS (ir_extraction) */
- __u16 ir_ext_decimation;
- __u8 padding1[2];
+ u16 ir_ext_decimation;
+ u8 padding1[2];
} __packed;
/* Input feeder related structs */
struct imgu_abi_input_feeder_data {
- __u32 row_stride; /* row stride */
- __u32 start_row_address; /* start row address */
- __u32 start_pixel; /* start pixel */
+ u32 row_stride; /* row stride */
+ u32 start_row_address; /* start row address */
+ u32 start_pixel; /* start pixel */
} __packed;
struct imgu_abi_input_feeder_data_aligned {
- struct imgu_abi_input_feeder_data data IPU3_ALIGN;
+ struct imgu_abi_input_feeder_data data __aligned(32);
} __packed;
struct imgu_abi_input_feeder_data_per_stripe {
@@ -536,194 +858,196 @@ struct imgu_abi_input_feeder_data_per_stripe {
struct imgu_abi_input_feeder_config {
struct imgu_abi_input_feeder_data data;
struct imgu_abi_input_feeder_data_per_stripe data_per_stripe
- IPU3_ALIGN;
+ __aligned(32);
} __packed;
/* DVS related definitions */
-#define IMGU_ABI_DVS_STAT_LEVELS 3
-
struct imgu_abi_dvs_stat_grd_config {
- __u8 grid_width; /* 5 bits */
- __u8 grid_height;
- __u8 block_width; /* 8 bits */
- __u8 block_height;
- __u16 x_start; /* 12 bits */
- __u16 y_start;
- __u16 enable;
- __u16 x_end; /* 12 bits */
- __u16 y_end;
+ u8 grid_width;
+ u8 grid_height;
+ u8 block_width;
+ u8 block_height;
+ u16 x_start;
+ u16 y_start;
+ u16 enable;
+ u16 x_end;
+ u16 y_end;
} __packed;
struct imgu_abi_dvs_stat_cfg {
- __u8 __reserved0[4];
+ u8 reserved0[4];
struct imgu_abi_dvs_stat_grd_config
grd_config[IMGU_ABI_DVS_STAT_LEVELS];
- __u8 __reserved1[18];
+ u8 reserved1[18];
} __packed;
struct imgu_abi_dvs_stat_transfer_op_data {
- __u8 set_number;
+ u8 set_number;
} __packed;
struct imgu_abi_dvs_stat_intra_frame_operations_data {
struct imgu_abi_acc_operation
- ops[IMGU_ABI_DVS_STAT_MAX_OPERATIONS] IPU3_ALIGN;
+ ops[IMGU_ABI_DVS_STAT_MAX_OPERATIONS] __aligned(32);
struct imgu_abi_acc_process_lines_cmd_data
process_lines_data[IMGU_ABI_DVS_STAT_MAX_PROCESS_LINES]
- IPU3_ALIGN;
+ __aligned(32);
struct imgu_abi_dvs_stat_transfer_op_data
- transfer_data[IMGU_ABI_DVS_STAT_MAX_TRANSFERS] IPU3_ALIGN;
+ transfer_data[IMGU_ABI_DVS_STAT_MAX_TRANSFERS] __aligned(32);
} __packed;
struct imgu_abi_dvs_stat_config {
- struct imgu_abi_dvs_stat_cfg cfg IPU3_ALIGN;
- __u8 __reserved0[128];
+ struct imgu_abi_dvs_stat_cfg cfg __aligned(32);
+ u8 reserved0[128];
struct imgu_abi_dvs_stat_intra_frame_operations_data operations_data;
- __u8 __reserved1[64];
+ u8 reserved1[64];
+} __packed;
+
+/* Y-tone Mapping */
+
+struct imgu_abi_yuvp2_y_tm_lut_static_config {
+ u16 entries[IMGU_ABI_YUVP2_YTM_LUT_ENTRIES];
+ u32 enable;
} __packed;
/* Output formatter related structs */
struct imgu_abi_osys_formatter_params {
- __u32 format;
- __u32 flip;
- __u32 mirror;
- __u32 tiling;
- __u32 reduce_range;
- __u32 alpha_blending;
- __u32 release_inp_addr;
- __u32 release_inp_en;
- __u32 process_out_buf_addr;
- __u32 image_width_vecs;
- __u32 image_height_lines;
- __u32 inp_buff_y_st_addr;
- __u32 inp_buff_y_line_stride;
- __u32 inp_buff_y_buffer_stride;
- __u32 int_buff_u_st_addr;
- __u32 int_buff_v_st_addr;
- __u32 inp_buff_uv_line_stride;
- __u32 inp_buff_uv_buffer_stride;
- __u32 out_buff_level;
- __u32 out_buff_nr_y_lines;
- __u32 out_buff_u_st_offset;
- __u32 out_buff_v_st_offset;
- __u32 out_buff_y_line_stride;
- __u32 out_buff_uv_line_stride;
- __u32 hist_buff_st_addr;
- __u32 hist_buff_line_stride;
- __u32 hist_buff_nr_lines;
+ u32 format;
+ u32 flip;
+ u32 mirror;
+ u32 tiling;
+ u32 reduce_range;
+ u32 alpha_blending;
+ u32 release_inp_addr;
+ u32 release_inp_en;
+ u32 process_out_buf_addr;
+ u32 image_width_vecs;
+ u32 image_height_lines;
+ u32 inp_buff_y_st_addr;
+ u32 inp_buff_y_line_stride;
+ u32 inp_buff_y_buffer_stride;
+ u32 int_buff_u_st_addr;
+ u32 int_buff_v_st_addr;
+ u32 inp_buff_uv_line_stride;
+ u32 inp_buff_uv_buffer_stride;
+ u32 out_buff_level;
+ u32 out_buff_nr_y_lines;
+ u32 out_buff_u_st_offset;
+ u32 out_buff_v_st_offset;
+ u32 out_buff_y_line_stride;
+ u32 out_buff_uv_line_stride;
+ u32 hist_buff_st_addr;
+ u32 hist_buff_line_stride;
+ u32 hist_buff_nr_lines;
} __packed;
struct imgu_abi_osys_formatter {
- struct imgu_abi_osys_formatter_params param IPU3_ALIGN;
+ struct imgu_abi_osys_formatter_params param __aligned(32);
} __packed;
struct imgu_abi_osys_scaler_params {
- __u32 inp_buf_y_st_addr;
- __u32 inp_buf_y_line_stride;
- __u32 inp_buf_y_buffer_stride;
- __u32 inp_buf_u_st_addr;
- __u32 inp_buf_v_st_addr;
- __u32 inp_buf_uv_line_stride;
- __u32 inp_buf_uv_buffer_stride;
- __u32 inp_buf_chunk_width;
- __u32 inp_buf_nr_buffers;
+ u32 inp_buf_y_st_addr;
+ u32 inp_buf_y_line_stride;
+ u32 inp_buf_y_buffer_stride;
+ u32 inp_buf_u_st_addr;
+ u32 inp_buf_v_st_addr;
+ u32 inp_buf_uv_line_stride;
+ u32 inp_buf_uv_buffer_stride;
+ u32 inp_buf_chunk_width;
+ u32 inp_buf_nr_buffers;
/* Output buffers */
- __u32 out_buf_y_st_addr;
- __u32 out_buf_y_line_stride;
- __u32 out_buf_y_buffer_stride;
- __u32 out_buf_u_st_addr;
- __u32 out_buf_v_st_addr;
- __u32 out_buf_uv_line_stride;
- __u32 out_buf_uv_buffer_stride;
- __u32 out_buf_nr_buffers;
+ u32 out_buf_y_st_addr;
+ u32 out_buf_y_line_stride;
+ u32 out_buf_y_buffer_stride;
+ u32 out_buf_u_st_addr;
+ u32 out_buf_v_st_addr;
+ u32 out_buf_uv_line_stride;
+ u32 out_buf_uv_buffer_stride;
+ u32 out_buf_nr_buffers;
/* Intermediate buffers */
- __u32 int_buf_y_st_addr;
- __u32 int_buf_y_line_stride;
- __u32 int_buf_u_st_addr;
- __u32 int_buf_v_st_addr;
- __u32 int_buf_uv_line_stride;
- __u32 int_buf_height;
- __u32 int_buf_chunk_width;
- __u32 int_buf_chunk_height;
+ u32 int_buf_y_st_addr;
+ u32 int_buf_y_line_stride;
+ u32 int_buf_u_st_addr;
+ u32 int_buf_v_st_addr;
+ u32 int_buf_uv_line_stride;
+ u32 int_buf_height;
+ u32 int_buf_chunk_width;
+ u32 int_buf_chunk_height;
/* Context buffers */
- __u32 ctx_buf_hor_y_st_addr;
- __u32 ctx_buf_hor_u_st_addr;
- __u32 ctx_buf_hor_v_st_addr;
- __u32 ctx_buf_ver_y_st_addr;
- __u32 ctx_buf_ver_u_st_addr;
- __u32 ctx_buf_ver_v_st_addr;
+ u32 ctx_buf_hor_y_st_addr;
+ u32 ctx_buf_hor_u_st_addr;
+ u32 ctx_buf_hor_v_st_addr;
+ u32 ctx_buf_ver_y_st_addr;
+ u32 ctx_buf_ver_u_st_addr;
+ u32 ctx_buf_ver_v_st_addr;
/* Addresses for release-input and process-output tokens */
- __u32 release_inp_buf_addr;
- __u32 release_inp_buf_en;
- __u32 release_out_buf_en;
- __u32 process_out_buf_addr;
+ u32 release_inp_buf_addr;
+ u32 release_inp_buf_en;
+ u32 release_out_buf_en;
+ u32 process_out_buf_addr;
/* Settings dimensions, padding, cropping */
- __u32 input_image_y_width;
- __u32 input_image_y_height;
- __u32 input_image_y_start_column;
- __u32 input_image_uv_start_column;
- __u32 input_image_y_left_pad;
- __u32 input_image_uv_left_pad;
- __u32 input_image_y_right_pad;
- __u32 input_image_uv_right_pad;
- __u32 input_image_y_top_pad;
- __u32 input_image_uv_top_pad;
- __u32 input_image_y_bottom_pad;
- __u32 input_image_uv_bottom_pad;
- __u32 processing_mode;
-#define IMGU_ABI_OSYS_PROCMODE_BYPASS 0
-#define IMGU_ABI_OSYS_PROCMODE_UPSCALE 1
-#define IMGU_ABI_OSYS_PROCMODE_DOWNSCALE 2
- __u32 scaling_ratio;
- __u32 y_left_phase_init;
- __u32 uv_left_phase_init;
- __u32 y_top_phase_init;
- __u32 uv_top_phase_init;
- __u32 coeffs_exp_shift;
- __u32 out_y_left_crop;
- __u32 out_uv_left_crop;
- __u32 out_y_top_crop;
- __u32 out_uv_top_crop;
+ u32 input_image_y_width;
+ u32 input_image_y_height;
+ u32 input_image_y_start_column;
+ u32 input_image_uv_start_column;
+ u32 input_image_y_left_pad;
+ u32 input_image_uv_left_pad;
+ u32 input_image_y_right_pad;
+ u32 input_image_uv_right_pad;
+ u32 input_image_y_top_pad;
+ u32 input_image_uv_top_pad;
+ u32 input_image_y_bottom_pad;
+ u32 input_image_uv_bottom_pad;
+ u32 processing_mode; /* enum imgu_abi_osys_procmode */
+ u32 scaling_ratio;
+ u32 y_left_phase_init;
+ u32 uv_left_phase_init;
+ u32 y_top_phase_init;
+ u32 uv_top_phase_init;
+ u32 coeffs_exp_shift;
+ u32 out_y_left_crop;
+ u32 out_uv_left_crop;
+ u32 out_y_top_crop;
+ u32 out_uv_top_crop;
} __packed;
struct imgu_abi_osys_scaler {
- struct imgu_abi_osys_scaler_params param IPU3_ALIGN;
+ struct imgu_abi_osys_scaler_params param __aligned(32);
} __packed;
struct imgu_abi_osys_frame_params {
/* Output pins */
- __u32 enable;
- __u32 format; /* enum imgu_abi_osys_format */
- __u32 flip;
- __u32 mirror;
- __u32 tiling; /* enum imgu_abi_osys_tiling */
- __u32 width;
- __u32 height;
- __u32 stride;
- __u32 scaled;
+ u32 enable;
+ u32 format; /* enum imgu_abi_osys_format */
+ u32 flip;
+ u32 mirror;
+ u32 tiling; /* enum imgu_abi_osys_tiling */
+ u32 width;
+ u32 height;
+ u32 stride;
+ u32 scaled;
} __packed;
struct imgu_abi_osys_frame {
- struct imgu_abi_osys_frame_params param IPU3_ALIGN;
+ struct imgu_abi_osys_frame_params param __aligned(32);
} __packed;
struct imgu_abi_osys_stripe {
/* Input resolution */
- __u32 input_width;
- __u32 input_height;
+ u32 input_width;
+ u32 input_height;
/* Output Stripe */
- __u32 output_width[IMGU_ABI_OSYS_PINS];
- __u32 output_height[IMGU_ABI_OSYS_PINS];
- __u32 output_offset[IMGU_ABI_OSYS_PINS];
- __u32 buf_stride[IMGU_ABI_OSYS_PINS];
+ u32 output_width[IMGU_ABI_OSYS_PINS];
+ u32 output_height[IMGU_ABI_OSYS_PINS];
+ u32 output_offset[IMGU_ABI_OSYS_PINS];
+ u32 buf_stride[IMGU_ABI_OSYS_PINS];
/* Scaler params */
- __u32 block_width;
- __u32 block_height;
+ u32 block_width;
+ u32 block_height;
/* Output Crop factor */
- __u32 crop_top[IMGU_ABI_OSYS_PINS];
- __u32 crop_left[IMGU_ABI_OSYS_PINS];
+ u32 crop_top[IMGU_ABI_OSYS_PINS];
+ u32 crop_left[IMGU_ABI_OSYS_PINS];
} __packed;
struct imgu_abi_osys_config {
@@ -733,26 +1057,97 @@ struct imgu_abi_osys_config {
struct imgu_abi_osys_frame frame[IMGU_ABI_OSYS_PINS];
struct imgu_abi_osys_stripe stripe[IPU3_UAPI_MAX_STRIPES];
/* 32 packed coefficients for luma and chroma */
- __s8 scaler_coeffs_chroma[128];
- __s8 scaler_coeffs_luma[128];
+ s8 scaler_coeffs_chroma[128];
+ s8 scaler_coeffs_luma[128];
} __packed;
-/* Defect pixel correction */
+/* BDS */
-struct imgu_abi_dpc_config {
- __u8 __reserved[240832];
+struct imgu_abi_bds_hor_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 hor_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 out_frame_width:13;
+ u32 reserved1:3;
} __packed;
-/* BDS */
+struct imgu_abi_bds_ptrn_arr {
+ u32 elems[IMGU_ABI_BDS_SAMPLE_PATTERN_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_phase_entry {
+ s8 coeff_min2;
+ s8 coeff_min1;
+ s8 coeff_0;
+ s8 nf;
+ s8 coeff_pls1;
+ s8 coeff_pls2;
+ s8 coeff_pls3;
+ u8 reserved;
+} __packed;
+
+struct imgu_abi_bds_phase_arr {
+ struct imgu_abi_bds_phase_entry
+ even[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+ struct imgu_abi_bds_phase_entry
+ odd[IMGU_ABI_BDS_PHASE_COEFFS_ARRAY_SIZE];
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl1 {
+ u32 hor_crop_start:13;
+ u32 reserved0:3;
+ u32 hor_crop_end:13;
+ u32 reserved1:1;
+ u32 hor_crop_en:1;
+ u32 reserved2:1;
+} __packed;
+
+struct imgu_abi_bds_hor_ctrl2 {
+ u32 input_frame_height:13;
+ u32 reserved0:19;
+} __packed;
+
+struct imgu_abi_bds_hor {
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ptrn_arr hor_ptrn_arr;
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_hor_ctrl1 hor_ctrl1;
+ struct imgu_abi_bds_hor_ctrl2 hor_ctrl2;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl0 {
+ u32 sample_patrn_length:9;
+ u32 reserved0:3;
+ u32 ver_ds_en:1;
+ u32 min_clip_val:1;
+ u32 max_clip_val:2;
+ u32 reserved1:16;
+} __packed;
+
+struct imgu_abi_bds_ver_ctrl1 {
+ u32 out_frame_width:13;
+ u32 reserved0:3;
+ u32 out_frame_height:13;
+ u32 reserved1:3;
+} __packed;
+
+struct imgu_abi_bds_ver {
+ struct imgu_abi_bds_ver_ctrl0 ver_ctrl0;
+ struct imgu_abi_bds_ptrn_arr ver_ptrn_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+} __packed;
struct imgu_abi_bds_per_stripe_data {
- struct ipu3_uapi_bds_hor_ctrl0 hor_ctrl0;
- struct ipu3_uapi_bds_ver_ctrl1 ver_ctrl1;
- struct ipu3_uapi_bds_hor_ctrl1 crop;
+ struct imgu_abi_bds_hor_ctrl0 hor_ctrl0;
+ struct imgu_abi_bds_ver_ctrl1 ver_ctrl1;
+ struct imgu_abi_bds_hor_ctrl1 crop;
} __packed;
struct imgu_abi_bds_per_stripe_data_aligned {
- struct imgu_abi_bds_per_stripe_data data IPU3_ALIGN;
+ struct imgu_abi_bds_per_stripe_data data __aligned(32);
} __packed;
struct imgu_abi_bds_per_stripe {
@@ -761,35 +1156,64 @@ struct imgu_abi_bds_per_stripe {
} __packed;
struct imgu_abi_bds_config {
- struct ipu3_uapi_bds_hor hor IPU3_ALIGN;
- struct ipu3_uapi_bds_ver ver IPU3_ALIGN;
- struct imgu_abi_bds_per_stripe per_stripe IPU3_ALIGN;
- __u32 enabled;
+ struct imgu_abi_bds_hor hor __aligned(32);
+ struct imgu_abi_bds_ver ver __aligned(32);
+ struct imgu_abi_bds_per_stripe per_stripe __aligned(32);
+ u32 enabled;
+} __packed;
+
+/* ANR */
+
+struct imgu_abi_anr_search_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_stitch_config {
+ u32 anr_stitch_en;
+ u16 frame_width;
+ u16 frame_height;
+ u8 reserved[40];
+ struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
+} __packed;
+
+struct imgu_abi_anr_tile2strm_config {
+ u32 enable;
+ u16 frame_width;
+ u16 frame_height;
+} __packed;
+
+struct imgu_abi_anr_config {
+ struct imgu_abi_anr_search_config search __aligned(32);
+ struct ipu3_uapi_anr_transform_config transform __aligned(32);
+ struct imgu_abi_anr_stitch_config stitch __aligned(32);
+ struct imgu_abi_anr_tile2strm_config tile2strm __aligned(32);
} __packed;
/* AF */
struct imgu_abi_af_frame_size {
- __u16 width;
- __u16 height;
+ u16 width;
+ u16 height;
} __packed;
struct imgu_abi_af_config_s {
- struct ipu3_uapi_af_filter_config filter_config IPU3_ALIGN;
+ struct ipu3_uapi_af_filter_config filter_config __aligned(32);
struct imgu_abi_af_frame_size frame_size;
- struct ipu3_uapi_grid_config grid_cfg IPU3_ALIGN;
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
} __packed;
struct imgu_abi_af_intra_frame_operations_data {
struct imgu_abi_acc_operation ops[IMGU_ABI_AF_MAX_OPERATIONS]
- IPU3_ALIGN;
+ __aligned(32);
struct imgu_abi_acc_process_lines_cmd_data
- process_lines_data[IMGU_ABI_AF_MAX_PROCESS_LINES] IPU3_ALIGN;
+ process_lines_data[IMGU_ABI_AF_MAX_PROCESS_LINES] __aligned(32);
} __packed;
struct imgu_abi_af_stripe_config {
- struct imgu_abi_af_frame_size frame_size IPU3_ALIGN;
- struct ipu3_uapi_grid_config grid_cfg IPU3_ALIGN;
+ struct imgu_abi_af_frame_size frame_size __aligned(32);
+ struct ipu3_uapi_grid_config grid_cfg __aligned(32);
} __packed;
struct imgu_abi_af_config {
@@ -801,12 +1225,12 @@ struct imgu_abi_af_config {
/* AE */
struct imgu_abi_ae_config {
- struct ipu3_uapi_ae_grid_config grid_cfg IPU3_ALIGN;
+ struct ipu3_uapi_ae_grid_config grid_cfg __aligned(32);
struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
- IPU3_ALIGN;
- struct ipu3_uapi_ae_ccm ae_ccm IPU3_ALIGN;
+ __aligned(32);
+ struct ipu3_uapi_ae_ccm ae_ccm __aligned(32);
struct {
- struct ipu3_uapi_ae_grid_config grid IPU3_ALIGN;
+ struct ipu3_uapi_ae_grid_config grid __aligned(32);
} stripes[IPU3_UAPI_MAX_STRIPES];
} __packed;
@@ -814,9 +1238,9 @@ struct imgu_abi_ae_config {
struct imgu_abi_awb_fr_intra_frame_operations_data {
struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_FR_MAX_OPERATIONS]
- IPU3_ALIGN;
+ __aligned(32);
struct imgu_abi_acc_process_lines_cmd_data
- process_lines_data[IMGU_ABI_AWB_FR_MAX_PROCESS_LINES] IPU3_ALIGN;
+ process_lines_data[IMGU_ABI_AWB_FR_MAX_PROCESS_LINES] __aligned(32);
} __packed;
struct imgu_abi_awb_fr_config {
@@ -826,51 +1250,52 @@ struct imgu_abi_awb_fr_config {
} __packed;
struct imgu_abi_acc_transfer_op_data {
- __u8 set_number;
+ u8 set_number;
} __packed;
-struct IPU3_ALIGN imgu_abi_awb_intra_frame_operations_data {
+struct imgu_abi_awb_intra_frame_operations_data {
struct imgu_abi_acc_operation ops[IMGU_ABI_AWB_MAX_OPERATIONS]
- IPU3_ALIGN;
+ __aligned(32);
struct imgu_abi_acc_process_lines_cmd_data
- process_lines_data[IMGU_ABI_AWB_MAX_PROCESS_LINES] IPU3_ALIGN;
+ process_lines_data[IMGU_ABI_AWB_MAX_PROCESS_LINES] __aligned(32);
struct imgu_abi_acc_transfer_op_data
- transfer_data[IMGU_ABI_AWB_MAX_TRANSFERS] IPU3_ALIGN;
-} __packed;
+ transfer_data[IMGU_ABI_AWB_MAX_TRANSFERS] __aligned(32);
+} __aligned(32) __packed;
struct imgu_abi_awb_config {
- struct ipu3_uapi_awb_config_s config IPU3_ALIGN;
+ struct ipu3_uapi_awb_config_s config __aligned(32);
struct imgu_abi_awb_intra_frame_operations_data operations_data;
struct ipu3_uapi_awb_config_s stripes[IPU3_UAPI_MAX_STRIPES];
} __packed;
struct imgu_abi_acc_param {
struct imgu_abi_stripe_data stripe;
- __u8 padding[8];
+ u8 padding[8];
struct imgu_abi_input_feeder_config input_feeder;
struct ipu3_uapi_bnr_static_config bnr;
struct ipu3_uapi_bnr_static_config_green_disparity green_disparity
- IPU3_ALIGN;
- struct ipu3_uapi_dm_config dm IPU3_ALIGN;
- struct ipu3_uapi_ccm_mat_config ccm IPU3_ALIGN;
- struct ipu3_uapi_gamma_config gamma IPU3_ALIGN;
- struct ipu3_uapi_csc_mat_config csc IPU3_ALIGN;
- struct ipu3_uapi_cds_params cds IPU3_ALIGN;
- struct imgu_abi_shd_config shd IPU3_ALIGN;
+ __aligned(32);
+ struct ipu3_uapi_dm_config dm __aligned(32);
+ struct ipu3_uapi_ccm_mat_config ccm __aligned(32);
+ struct ipu3_uapi_gamma_config gamma __aligned(32);
+ struct ipu3_uapi_csc_mat_config csc __aligned(32);
+ struct ipu3_uapi_cds_params cds __aligned(32);
+ struct imgu_abi_shd_config shd __aligned(32);
struct imgu_abi_dvs_stat_config dvs_stat;
- __u8 padding1[224]; /* reserved for lace_stat */
- struct ipu3_uapi_yuvp1_iefd_config iefd IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds_c0 IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_chnr_config chnr_c0 IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_chnr_config chnr IPU3_ALIGN;
- struct ipu3_uapi_yuvp2_y_tm_lut_static_config ytm IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds2 IPU3_ALIGN;
- struct ipu3_uapi_yuvp2_tcc_static_config tcc IPU3_ALIGN;
- struct imgu_abi_dpc_config dpc IPU3_ALIGN;
+ u8 padding1[224]; /* reserved for lace_stat */
+ struct ipu3_uapi_yuvp1_iefd_config iefd __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __aligned(32);
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds __aligned(32);
+ struct ipu3_uapi_yuvp1_chnr_config chnr __aligned(32);
+ struct imgu_abi_yuvp2_y_tm_lut_static_config ytm __aligned(32);
+ struct ipu3_uapi_yuvp1_yds_config yds2 __aligned(32);
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __aligned(32);
+ /* reserved for defect pixel correction */
+ u8 dpc[240832] __aligned(32);
struct imgu_abi_bds_config bds;
- struct ipu3_uapi_anr_config anr;
+ struct imgu_abi_anr_config anr;
struct imgu_abi_awb_fr_config awb_fr;
struct imgu_abi_ae_config ae;
struct imgu_abi_af_config af;
@@ -880,66 +1305,29 @@ struct imgu_abi_acc_param {
/***** Morphing table entry *****/
-#define IMGU_ABI_GDC_FRAC_BITS 8
-
struct imgu_abi_gdc_warp_param {
- __u32 origin_x;
- __u32 origin_y;
- __u32 in_addr_offset;
- __u32 in_block_width;
- __u32 in_block_height;
- __u32 p0_x;
- __u32 p0_y;
- __u32 p1_x;
- __u32 p1_y;
- __u32 p2_x;
- __u32 p2_y;
- __u32 p3_x;
- __u32 p3_y;
- __u32 in_block_width_a;
- __u32 in_block_width_b;
- __u32 padding; /* struct size multiple of DDR word */
+ u32 origin_x;
+ u32 origin_y;
+ u32 in_addr_offset;
+ u32 in_block_width;
+ u32 in_block_height;
+ u32 p0_x;
+ u32 p0_y;
+ u32 p1_x;
+ u32 p1_y;
+ u32 p2_x;
+ u32 p2_y;
+ u32 p3_x;
+ u32 p3_y;
+ u32 in_block_width_a;
+ u32 in_block_width_b;
+ u32 padding; /* struct size multiple of DDR word */
} __packed;
/******************* Firmware ABI definitions *******************/
/***** struct imgu_abi_sp_stage *****/
-#define IMGU_ABI_BINARY_MAX_OUTPUT_PORTS 2
-
-enum imgu_abi_queue_id {
- IMGU_ABI_QUEUE_EVENT_ID = -1,
- IMGU_ABI_QUEUE_A_ID = 0,
- IMGU_ABI_QUEUE_B_ID,
- IMGU_ABI_QUEUE_C_ID,
- IMGU_ABI_QUEUE_D_ID,
- IMGU_ABI_QUEUE_E_ID,
- IMGU_ABI_QUEUE_F_ID,
- IMGU_ABI_QUEUE_G_ID,
- IMGU_ABI_QUEUE_H_ID, /* input frame queue for skycam */
- IMGU_ABI_QUEUE_NUM
-};
-
-enum imgu_abi_buffer_type {
- IMGU_ABI_BUFFER_TYPE_INVALID = -1,
- IMGU_ABI_BUFFER_TYPE_3A_STATISTICS = 0,
- IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS,
- IMGU_ABI_BUFFER_TYPE_LACE_STATISTICS,
- IMGU_ABI_BUFFER_TYPE_INPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_SEC_OUTPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_RAW_OUTPUT_FRAME,
- IMGU_ABI_BUFFER_TYPE_CUSTOM_INPUT,
- IMGU_ABI_BUFFER_TYPE_CUSTOM_OUTPUT,
- IMGU_ABI_BUFFER_TYPE_METADATA,
- IMGU_ABI_BUFFER_TYPE_PARAMETER_SET,
- IMGU_ABI_BUFFER_TYPE_PER_FRAME_PARAMETER_SET,
- IMGU_ABI_NUM_DYNAMIC_BUFFER_TYPE,
- IMGU_ABI_NUM_BUFFER_TYPE
-};
-
struct imgu_abi_crop_pos {
u16 x;
u16 y;
@@ -972,9 +1360,6 @@ struct imgu_abi_frame_sp_info {
* IronGr case - IMGU_ABI_RAW_TYPE_IR_ON_GR
* IronGb case - IMGU_ABI_RAW_TYPE_IR_ON_GB
*/
-#define IMGU_ABI_RAW_TYPE_BAYER 0
-#define IMGU_ABI_RAW_TYPE_IR_ON_GR 1
-#define IMGU_ABI_RAW_TYPE_IR_ON_GB 2
u8 padding[2]; /* Extend to 32 bit multiple */
} __packed;
@@ -1063,7 +1448,7 @@ struct imgu_abi_uds_info {
/* Information for a single pipeline stage */
struct imgu_abi_sp_stage {
/* Multiple boolean flags can be stored in an integer */
- u8 num; /* Stage number */
+ u8 num; /* Stage number */
u8 isp_online;
u8 isp_copy_vf;
u8 isp_copy_output;
@@ -1078,13 +1463,7 @@ struct imgu_abi_sp_stage {
*/
u8 program_input_circuit;
u8 func;
-#define IMGU_ABI_STAGE_FUNC_RAW_COPY 0
-#define IMGU_ABI_STAGE_FUNC_BIN_COPY 1
-#define IMGU_ABI_STAGE_FUNC_ISYS_COPY 2
-#define IMGU_ABI_STAGE_FUNC_NO_FUNC 3
- u8 stage_type; /* The type of the pipe-stage */
-#define IMGU_ABI_STAGE_TYPE_SP 0
-#define IMGU_ABI_STAGE_TYPE_ISP 1
+ u8 stage_type; /* enum imgu_abi_stage_type */
u8 num_stripes;
u8 isp_pipe_version;
struct {
@@ -1113,29 +1492,6 @@ struct imgu_abi_sp_stage {
/***** struct imgu_abi_isp_stage *****/
-#define IMGU_ABI_MAX_BINARY_NAME 64
-
-enum imgu_abi_memories {
- IMGU_ABI_MEM_ISP_PMEM0 = 0,
- IMGU_ABI_MEM_ISP_DMEM0,
- IMGU_ABI_MEM_ISP_VMEM0,
- IMGU_ABI_MEM_ISP_VAMEM0,
- IMGU_ABI_MEM_ISP_VAMEM1,
- IMGU_ABI_MEM_ISP_VAMEM2,
- IMGU_ABI_MEM_ISP_HMEM0,
- IMGU_ABI_MEM_SP0_DMEM0,
- IMGU_ABI_MEM_SP1_DMEM0,
- IMGU_ABI_MEM_DDR,
- IMGU_ABI_NUM_MEMORIES
-};
-
-enum imgu_abi_param_class {
- IMGU_ABI_PARAM_CLASS_PARAM, /* Late binding parameters, like 3A */
- IMGU_ABI_PARAM_CLASS_CONFIG, /* Pipe config time parameters */
- IMGU_ABI_PARAM_CLASS_STATE, /* State parameters, eg. buffer index */
- IMGU_ABI_PARAM_CLASS_NUM
-};
-
struct imgu_abi_isp_param_memory_offsets {
u32 offsets[IMGU_ABI_PARAM_CLASS_NUM]; /* offset wrt hdr in bytes */
} __packed;
@@ -1171,9 +1527,9 @@ struct imgu_abi_blob_info {
u32 bss_size; /* Size of bss section
* Dynamic data filled by loader
*/
- const void *code __aligned(8); /* Code section absolute pointer */
+ u64 code __attribute__((aligned(8))); /* Code section absolute pointer */
/* within fw, code = icache + text */
- const void *data __aligned(8); /* Data section absolute pointer */
+ u64 data __attribute__((aligned(8))); /* Data section absolute pointer */
/* within fw, data = data + bss */
} __packed;
@@ -1192,10 +1548,7 @@ struct imgu_abi_binary_input_info {
u32 min_height;
u32 max_width;
u32 max_height;
- u32 source; /* memory, sensor, variable */
-#define IMGU_ABI_BINARY_INPUT_SOURCE_SENSOR 0
-#define IMGU_ABI_BINARY_INPUT_SOURCE_MEMORY 1
-#define IMGU_ABI_BINARY_INPUT_SOURCE_VARIABLE 2
+ u32 source; /* enum imgu_abi_bin_input_src */
} __packed;
struct imgu_abi_binary_output_info {
@@ -1283,7 +1636,7 @@ struct imgu_abi_isp_param_segments {
} __packed;
struct imgu_abi_binary_info {
- u32 id __aligned(8); /* IMGU_ABI_BINARY_ID_* */
+ u32 id __attribute__((aligned(8))); /* IMGU_ABI_BINARY_ID_* */
struct imgu_abi_binary_pipeline_info pipeline;
struct imgu_abi_binary_input_info input;
struct imgu_abi_binary_output_info output;
@@ -1328,11 +1681,11 @@ struct imgu_abi_binary_info {
u8 rgb2yuv;
u8 high_quality;
u8 kerneltest;
- u8 routing_shd_to_bnr; /* connect SHD with BNR ACCs*/
- u8 routing_bnr_to_anr; /* connect BNR with ANR ACCs*/
+ u8 routing_shd_to_bnr; /* connect SHD with BNR ACCs */
+ u8 routing_bnr_to_anr; /* connect BNR with ANR ACCs */
u8 routing_anr_to_de; /* connect ANR with DE ACCs */
- u8 routing_rgb_to_yuvp1; /* connect RGB with YUVP1 ACCs*/
- u8 routing_yuvp1_to_yuvp2; /* connect YUVP1 with YUVP2 ACCs*/
+ u8 routing_rgb_to_yuvp1; /* connect RGB with YUVP1 */
+ u8 routing_yuvp1_to_yuvp2; /* connect YUVP1 with YUVP2 */
u8 luma_only;
u8 input_yuv;
u8 input_raw;
@@ -1367,8 +1720,6 @@ struct imgu_abi_binary_info {
u8 rgbir;
} enable;
struct {
- /* DMA channel ID: [0,...,IMGU_NUM_DMA_CHANNELS> */
-#define IMGU_NUM_DMA_CHANNELS 19
u8 ref_y_channel;
u8 ref_c_channel;
u8 tnr_channel;
@@ -1393,10 +1744,6 @@ struct imgu_abi_isp_stage {
/***** struct imgu_abi_ddr_address_map and parameter set *****/
-#define IMGU_ABI_ISP_DDR_WORD_BITS 256
-#define IMGU_ABI_ISP_DDR_WORD_BYTES (IMGU_ABI_ISP_DDR_WORD_BITS / 8)
-#define IMGU_ABI_MAX_STAGES 3
-
/* xmem address map allocation */
struct imgu_abi_ddr_address_map {
imgu_addr_t isp_mem_param[IMGU_ABI_MAX_STAGES][IMGU_ABI_NUM_MEMORIES];
@@ -1416,8 +1763,6 @@ struct imgu_abi_parameter_set_info {
/***** struct imgu_abi_sp_group *****/
-#define IMGU_ABI_MAX_IF_CONFIGS 3
-
/* SP configuration information */
struct imgu_abi_sp_config {
u8 no_isp_sync; /* Signal host immediately after start */
@@ -1434,18 +1779,11 @@ struct imgu_abi_sp_pipeline {
u32 pipe_num; /* the dynamic pipe number */
u32 thread_id; /* the sp thread ID */
u32 pipe_config; /* the pipe config */
-#define IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP (1 << 31)
u32 pipe_qos_config; /* Bitmap of multiple QOS extension fw
* state, 0xffffffff indicates non
* QOS pipe.
*/
u32 inout_port_config;
-#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST (1 << 0)
-#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_COPYSINK (1 << 1)
-#define IMGU_ABI_PORT_CONFIG_TYPE_INPUT_TAGGERSINK (1 << 2)
-#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST (1 << 4)
-#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_COPYSINK (1 << 5)
-#define IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_TAGGERSINK (1 << 6)
u32 required_bds_factor;
u32 dvs_frame_delay;
u32 num_stages; /* the pipe config */
@@ -1493,8 +1831,6 @@ struct imgu_abi_sp_debug_command {
u32 dma_sw_reg;
} __packed;
-#define IMGU_ABI_MAX_SP_THREADS 4
-
/*
* Group all host initialized SP variables into this struct.
* This is initialized every stage through dma.
@@ -1508,10 +1844,6 @@ struct imgu_abi_sp_group {
/***** parameter and state class binary configurations *****/
-#define IMGU_ABI_FRAMES_REF 3
-#define IMGU_ABI_FRAMES_TNR 4
-#define IMGU_ABI_BUF_SETS_TNR 1
-
struct imgu_abi_isp_iterator_config {
struct imgu_abi_frame_sp_info input_info;
struct imgu_abi_frame_sp_info internal_info;
@@ -1562,66 +1894,6 @@ struct imgu_abi_isp_tnr3_dmem_state {
/***** Queues *****/
-#define IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread, queue) \
- (0 << 24 | (thread) << 16 | (queue) << 8)
-#define IMGU_ABI_EVENT_BUFFER_DEQUEUED(queue) (1 << 24 | (queue) << 8)
-#define IMGU_ABI_EVENT_EVENT_DEQUEUED (2 << 24)
-#define IMGU_ABI_EVENT_START_STREAM (3 << 24)
-#define IMGU_ABI_EVENT_STOP_STREAM (4 << 24)
-#define IMGU_ABI_EVENT_MIPI_BUFFERS_READY (5 << 24)
-#define IMGU_ABI_EVENT_UNLOCK_RAW_BUFFER (6 << 24)
-#define IMGU_ABI_EVENT_STAGE_ENABLE_DISABLE (7 << 24)
-
-#define IMGU_ABI_HOST2SP_BUFQ_SIZE 3
-#define IMGU_ABI_SP2HOST_BUFQ_SIZE (2 * IMGU_ABI_MAX_SP_THREADS)
-#define IMGU_ABI_HOST2SP_EVTQ_SIZE (IMGU_ABI_QUEUE_NUM * \
- IMGU_ABI_MAX_SP_THREADS * 2 + IMGU_ABI_MAX_SP_THREADS * 4)
-#define IMGU_ABI_SP2HOST_EVTQ_SIZE (6 * IMGU_ABI_MAX_SP_THREADS)
-
-#define IMGU_ABI_EVTTYPE_EVENT_SHIFT 0
-#define IMGU_ABI_EVTTYPE_EVENT_MASK (0xff << IMGU_ABI_EVTTYPE_EVENT_SHIFT)
-#define IMGU_ABI_EVTTYPE_PIPE_SHIFT 8
-#define IMGU_ABI_EVTTYPE_PIPE_MASK (0xff << IMGU_ABI_EVTTYPE_PIPE_SHIFT)
-#define IMGU_ABI_EVTTYPE_PIPEID_SHIFT 16
-#define IMGU_ABI_EVTTYPE_PIPEID_MASK (0xff << IMGU_ABI_EVTTYPE_PIPEID_SHIFT)
-#define IMGU_ABI_EVTTYPE_MODULEID_SHIFT 8
-#define IMGU_ABI_EVTTYPE_MODULEID_MASK (0xff << IMGU_ABI_EVTTYPE_MODULEID_SHIFT)
-#define IMGU_ABI_EVTTYPE_LINENO_SHIFT 16
-#define IMGU_ABI_EVTTYPE_LINENO_MASK (0xffff << IMGU_ABI_EVTTYPE_LINENO_SHIFT)
-
-/* Output frame ready */
-#define IMGU_ABI_EVTTYPE_OUT_FRAME_DONE 0
-/* Second output frame ready */
-#define IMGU_ABI_EVTTYPE_2ND_OUT_FRAME_DONE 1
-/* Viewfinder Output frame ready */
-#define IMGU_ABI_EVTTYPE_VF_OUT_FRAME_DONE 2
-/* Second viewfinder Output frame ready */
-#define IMGU_ABI_EVTTYPE_2ND_VF_OUT_FRAME_DONE 3
-/* Indication that 3A statistics are available */
-#define IMGU_ABI_EVTTYPE_3A_STATS_DONE 4
-/* Indication that DIS statistics are available */
-#define IMGU_ABI_EVTTYPE_DIS_STATS_DONE 5
-/* Pipeline Done event, sent after last pipeline stage */
-#define IMGU_ABI_EVTTYPE_PIPELINE_DONE 6
-/* Frame tagged */
-#define IMGU_ABI_EVTTYPE_FRAME_TAGGED 7
-/* Input frame ready */
-#define IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE 8
-/* Metadata ready */
-#define IMGU_ABI_EVTTYPE_METADATA_DONE 9
-/* Indication that LACE statistics are available */
-#define IMGU_ABI_EVTTYPE_LACE_STATS_DONE 10
-/* Extension stage executed */
-#define IMGU_ABI_EVTTYPE_ACC_STAGE_COMPLETE 11
-/* Timing measurement data */
-#define IMGU_ABI_EVTTYPE_TIMER 12
-/* End Of Frame event, sent when in buffered sensor mode */
-#define IMGU_ABI_EVTTYPE_PORT_EOF 13
-/* Performance warning encountered by FW */
-#define IMGU_ABI_EVTTYPE_FW_WARNING 14
-/* Assertion hit by FW */
-#define IMGU_ABI_EVTTYPE_FW_ASSERT 15
-
struct imgu_abi_queue_info {
u8 size; /* the maximum number of elements*/
u8 step; /* number of bytes per element */
@@ -1695,7 +1967,7 @@ struct imgu_abi_time_meas {
struct imgu_abi_buffer {
union {
struct imgu_abi_isp_3a_statistics s3a;
- u8 __reserved[28];
+ u8 reserved[28];
imgu_addr_t skc_dvs_statistics;
imgu_addr_t lace_stat;
struct imgu_abi_metadata metadata;
@@ -1714,37 +1986,16 @@ struct imgu_abi_buffer {
* uint64_t does not exist on SP/ISP.
* Size of the struct is checked by sp.hive.c.
*/
- u64 cookie_ptr __aligned(8);
+ u64 cookie_ptr __attribute__((aligned(8)));
u64 kernel_ptr;
struct imgu_abi_time_meas timing_data;
u32 isys_eof_clock_tick;
} __packed;
-#define IMGU_ABI_NUM_CONTINUOUS_FRAMES 10
-#define IMGU_ABI_SP_COMM_COMMAND 0x00
-
-/*
- * The host2sp_cmd_ready command is the only command written by the SP
- * It acknowledges that is previous command has been received.
- * (this does not mean that the command has been executed)
- * It also indicates that a new command can be send (it is a queue
- * with depth 1).
- */
-#define IMGU_ABI_SP_COMM_COMMAND_READY 1
-/* Command written by the Host */
-#define IMGU_ABI_SP_COMM_COMMAND_DUMMY 2 /* No action */
-#define IMGU_ABI_SP_COMM_COMMAND_START_FLASH 3 /* Start the flash */
-#define IMGU_ABI_SP_COMM_COMMAND_TERMINATE 4 /* Terminate */
-
-/* n = 0..IPU3_CSS_PIPE_ID_NUM-1 */
-#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK(n) ((n) * 4 + 0x60)
-#define IMGU_ABI_SP_COMM_EVENT_IRQ_MASK_OR_SHIFT 0
-
struct imgu_abi_bl_dma_cmd_entry {
u32 src_addr; /* virtual DDR address */
u32 size; /* number of bytes to transferred */
u32 dst_type;
-#define IMGU_ABI_BL_DMACMD_TYPE_SP_PMEM 1 /* sp_pmem */
u32 dst_addr; /* hmm address of xMEM or MMIO */
} __packed;
@@ -1757,132 +2008,4 @@ struct imgu_abi_sp_init_dmem_cfg {
u32 sp_id; /* sp id */
} __packed;
-/***** For parameter computation *****/
-
-#define IMGU_SCALER_ELEMS_PER_VEC 0x10
-#define IMGU_SCALER_FILTER_TAPS_Y 0x4
-#define IMGU_SCALER_OUT_BPP 0x8
-
-#define IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET 0xC
-#define IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET 0x8
-
-#define IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR 0x400
-#define IMGU_SCALER_TO_OF_ACK_FA_ADDR \
- (0xC00 + IMGU_HIVE_OF_SYS_SCALER_TO_FA_OFFSET)
-#define IMGU_OF_TO_ACK_FA_ADDR (0xC00 + IMGU_HIVE_OF_SYS_OF_TO_FA_OFFSET)
-#define IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR 0
-#define IMGU_OSYS_PHASES 0x20
-#define IMGU_OSYS_FILTER_TAPS 0x4
-#define IMGU_SCALER_INTR_BPP 10
-
-#define IMGU_PS_SNR_PRESERVE_BITS 3
-#define IMGU_CNTX_BPP 11
-#define IMGU_SCALER_FILTER_TAPS_UV (IMGU_SCALER_FILTER_TAPS_Y / 2)
-
-#define IMGU_VMEM2_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
-#define IMGU_STRIDE_Y (IMGU_SCALER_FILTER_TAPS_Y + 1)
-#define IMGU_MAX_FRAME_WIDTH 3840
-#define IMGU_VMEM3_ELEMS_PER_VEC (IMGU_SCALER_ELEMS_PER_VEC)
-
-#define IMGU_VER_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_OUT_BPP + \
- IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 1 */
-#define IMGU_MAX_INPUT_BLOCK_HEIGHT 64
-#define IMGU_HOR_CNTX_WORDS DIV_ROUND_UP((IMGU_SCALER_INTR_BPP + \
- IMGU_PS_SNR_PRESERVE_BITS), IMGU_CNTX_BPP) /* 2 */
-#define IMGU_MAX_OUTPUT_BLOCK_WIDTH 128
-#define IMGU_CNTX_STRIDE_UV (IMGU_SCALER_FILTER_TAPS_UV + 1)
-
-#define IMGU_OSYS_PHASE_COUNTER_PREC_REF 6
-#define IMGU_VMEM1_Y_SIZE \
- (IMGU_OSYS_BLOCK_HEIGHT * IMGU_VMEM1_Y_STRIDE)
-#define IMGU_VMEM1_UV_SIZE (IMGU_VMEM1_Y_SIZE / 4)
-#define IMGU_VMEM1_OUT_BUF_ADDR (IMGU_VMEM1_INP_BUF_ADDR + \
- (IMGU_OSYS_NUM_INPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
-#define IMGU_OSYS_NUM_OUTPUT_BUFFERS 2
-
-/* transpose of input height */
-#define IMGU_VMEM2_VECS_PER_LINE \
- (DIV_ROUND_UP(IMGU_OSYS_BLOCK_HEIGHT, IMGU_VMEM2_ELEMS_PER_VEC))
-/* size in words (vectors) */
-#define IMGU_VMEM2_BUF_SIZE \
- (IMGU_VMEM2_VECS_PER_LINE * IMGU_VMEM2_LINES_PER_BLOCK)
-#define IMGU_VMEM3_VER_Y_SIZE \
- ((IMGU_STRIDE_Y * IMGU_MAX_FRAME_WIDTH \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
-#define IMGU_VMEM3_HOR_Y_SIZE \
- ((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS)
-#define IMGU_VMEM3_VER_Y_EXTRA \
- ((IMGU_STRIDE_Y * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS)
-#define IMGU_VMEM3_VER_U_SIZE \
- (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
-#define IMGU_VMEM3_HOR_U_SIZE \
- (((IMGU_STRIDE_Y * IMGU_MAX_INPUT_BLOCK_HEIGHT \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_HOR_CNTX_WORDS) / 2)
-#define IMGU_VMEM3_VER_U_EXTRA \
- (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_OUTPUT_BLOCK_WIDTH \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
-#define IMGU_VMEM3_VER_V_SIZE \
- (((IMGU_CNTX_STRIDE_UV * IMGU_MAX_FRAME_WIDTH \
- / IMGU_VMEM3_ELEMS_PER_VEC) * IMGU_VER_CNTX_WORDS) / 2)
-
-#define IMGU_OSYS_DMA_CROP_W_LIMIT 64
-#define IMGU_OSYS_DMA_CROP_H_LIMIT 4
-
-#define IMGU_ISP_VEC_NELEMS 64
-#define IMGU_LUMA_TO_CHROMA_RATIO 2
-#define IMGU_OSYS_FIR_PHASES \
- (IMGU_OSYS_PHASES << IMGU_OSYS_PHASE_COUNTER_PREC_REF)
-#define IMGU_OSYS_TAPS_UV (IMGU_OSYS_FILTER_TAPS / 2)
-#define IMGU_INPUT_BLOCK_WIDTH (128)
-#define IMGU_OSYS_TAPS_Y (IMGU_OSYS_FILTER_TAPS)
-#define IMGU_FIFO_ADDR_SCALER_TO_FMT \
- (IMGU_SCALER_MS_TO_OUTFORMACC_SL_ADDR >> 2)
-#define IMGU_FIFO_ADDR_SCALER_TO_SP (IMGU_SCALER_TO_OF_ACK_FA_ADDR >> 2)
-#define IMGU_VMEM1_INP_BUF_ADDR 0
-#define IMGU_VMEM1_Y_STRIDE \
- (IMGU_OSYS_BLOCK_WIDTH / IMGU_VMEM1_ELEMS_PER_VEC)
-#define IMGU_VMEM1_BUF_SIZE (IMGU_VMEM1_V_OFFSET + IMGU_VMEM1_UV_SIZE)
-
-#define IMGU_VMEM1_U_OFFSET (IMGU_VMEM1_Y_SIZE)
-#define IMGU_VMEM1_V_OFFSET (IMGU_VMEM1_U_OFFSET + IMGU_VMEM1_UV_SIZE)
-#define IMGU_VMEM1_UV_STRIDE (IMGU_VMEM1_Y_STRIDE / 2)
-#define IMGU_OSYS_NUM_INPUT_BUFFERS 2
-#define IMGU_VMEM1_INT_BUF_ADDR (IMGU_VMEM1_OUT_BUF_ADDR + \
- (IMGU_OSYS_NUM_OUTPUT_BUFFERS * IMGU_VMEM1_BUF_SIZE))
-
-#define IMGU_VMEM1_ELEMS_PER_VEC (IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS)
-#define IMGU_OSYS_NUM_INTERM_BUFFERS 2
-#define IMGU_VMEM2_BUF_Y_ADDR 0
-#define IMGU_VMEM2_BUF_Y_STRIDE (IMGU_VMEM2_VECS_PER_LINE)
-#define IMGU_VMEM2_BUF_U_ADDR \
- (IMGU_VMEM2_BUF_Y_ADDR + IMGU_VMEM2_BUF_SIZE)
-#define IMGU_VMEM2_BUF_V_ADDR \
- (IMGU_VMEM2_BUF_U_ADDR + IMGU_VMEM2_BUF_SIZE / 4)
-#define IMGU_VMEM2_BUF_UV_STRIDE (IMGU_VMEM2_VECS_PER_LINE / 2)
-/* 1.5 x depth of intermediate buffer */
-#define IMGU_VMEM2_LINES_PER_BLOCK 192
-#define IMGU_VMEM3_HOR_Y_ADDR \
- (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE)
-#define IMGU_VMEM3_HOR_U_ADDR \
- (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE)
-#define IMGU_VMEM3_HOR_V_ADDR \
- (IMGU_VMEM3_VER_V_ADDR + IMGU_VMEM3_VER_V_SIZE)
-#define IMGU_VMEM3_VER_Y_ADDR 0
-#define IMGU_VMEM3_VER_U_ADDR \
- (IMGU_VMEM3_VER_Y_ADDR + IMGU_VMEM3_VER_Y_SIZE + \
- max(IMGU_VMEM3_HOR_Y_SIZE, IMGU_VMEM3_VER_Y_EXTRA))
-#define IMGU_VMEM3_VER_V_ADDR \
- (IMGU_VMEM3_VER_U_ADDR + IMGU_VMEM3_VER_U_SIZE + \
- max(IMGU_VMEM3_HOR_U_SIZE, IMGU_VMEM3_VER_U_EXTRA))
-#define IMGU_HIVE_OF_SYS_OF_SYSTEM_NWAYS 32
-#define IMGU_FIFO_ADDR_FMT_TO_SP (IMGU_OF_TO_ACK_FA_ADDR >> 2)
-#define IMGU_FIFO_ADDR_FMT_TO_SCALER (IMGU_OUTFORMACC_MS_TO_SCALER_SL_ADDR >> 2)
-#define IMGU_VMEM1_HST_BUF_ADDR (IMGU_VMEM1_INT_BUF_ADDR + \
- (IMGU_OSYS_NUM_INTERM_BUFFERS * IMGU_VMEM1_BUF_SIZE))
-#define IMGU_VMEM1_HST_BUF_STRIDE 120
-#define IMGU_VMEM1_HST_BUF_NLINES 3
-
#endif
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
index 895ca3391cec13..7c1414a1a8af00 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
@@ -515,7 +516,9 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
void __iomem *base = cio2->base;
- unsigned int i, maxloops = 1000;
+ unsigned int i;
+ u32 val, mask;
+ int ret;
/* Disable CSI receiver and MIPI backend devices */
writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
@@ -523,24 +526,42 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
- /* Halt DMA */
- writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
- do {
- if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
- CIO2_CDMAC0_DMA_HALTED)
- break;
- usleep_range(1000, 2000);
- } while (--maxloops);
- if (!maxloops)
- dev_err(&cio2->pci_dev->dev,
- "DMA %i can not be halted\n", CIO2_DMA_CHAN);
-
+ /*
+ * FrameOpen bits should be 0 after normal abort
+ * Otherwise, try to abort frame processing by force
+ */
+ mask = 0;
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
+ mask |= CIO2_PBM_FOPN_FRAMEOPEN(i);
}
+
+ ret = readl_poll_timeout(base + CIO2_REG_PBM_FOPN_ABORT, val,
+ !(val & mask), 20, 2000);
+ if (ret) {
+ dev_warn(&cio2->pci_dev->dev,
+ "frames normal abort timeout, try to abort by force");
+ for (i = 0; i < CIO2_NUM_PORTS; i++) {
+ writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
+ CIO2_PBM_FOPN_FORCE_ABORT(i),
+ base + CIO2_REG_PBM_FOPN_ABORT);
+ }
+ }
+
+ /* Halt DMA */
+ val = readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
+ writel(val & ~CIO2_CDMAC0_DMA_EN,
+ base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
+ ret = readl_poll_timeout(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN), val,
+ (val & CIO2_CDMAC0_DMA_HALTED), 20, 2000);
+ if (ret)
+ dev_err(&cio2->pci_dev->dev,
+ "DMA %i can not be halted\n", CIO2_DMA_CHAN);
+
+ synchronize_irq(cio2->pci_dev->irq);
}
static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
@@ -1035,12 +1056,12 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ cio2_hw_exit(cio2, q);
+
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
dev_err(&cio2->pci_dev->dev,
"failed to stop sensor streaming\n");
- cio2_hw_exit(cio2, q);
- synchronize_irq(cio2->pci_dev->irq);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
media_entity_pipeline_stop(&q->vdev.entity);
pm_runtime_put(&cio2->pci_dev->dev);
@@ -1987,7 +2008,6 @@ static int __maybe_unused cio2_suspend(struct device *dev)
/* Stop stream */
cio2_hw_exit(cio2, q);
- synchronize_irq(pci_dev->irq);
pm_runtime_force_suspend(dev);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 8c821ba1637ae9..4ff96c5e198366 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -10,8 +10,6 @@
#define CIO2_PCI_ID 0x9d32
#define CIO2_PCI_BAR 0
#define CIO2_DMA_MASK DMA_BIT_MASK(39)
-#define CIO2_IMAGE_MAX_WIDTH 4224
-#define CIO2_IMAGE_MAX_LENGTH 3136
#define CIO2_IMAGE_MAX_WIDTH 4224
#define CIO2_IMAGE_MAX_LENGTH 3136
@@ -179,7 +177,7 @@
/* below n = 0..3 */
#define CIO2_PBM_FOPN_ABORT(n) (0x1 << 8 * (n))
#define CIO2_PBM_FOPN_FORCE_ABORT(n) (0x2 << 8 * (n))
-#define CIO2_PBM_FOPN_FRAMEOPEN(n) (0x8 << 8 * (n))
+#define CIO2_PBM_FOPN_FRAMEOPEN(n) (0x10 << 8 * (n))
#define CIO2_REG_LTRCTRL 0x1480
#define CIO2_LTRCTRL_LTRDYNEN BIT(16)
#define CIO2_LTRCTRL_LTRSTABLETIME_SHIFT 8
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-fw.c b/drivers/media/pci/intel/ipu3/ipu3-css-fw.c
index 449bc2794d4a7c..55861aa8fb037a 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-fw.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-fw.c
@@ -69,13 +69,14 @@ unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi)
return obgrid_size;
}
-void *ipu3_css_fw_pipeline_params(struct ipu3_css *css,
+void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
enum imgu_abi_param_class cls,
enum imgu_abi_memories mem,
struct imgu_fw_isp_parameter *par,
size_t par_size, void *binary_params)
{
- struct imgu_fw_info *bi = &css->fwp->binary_header[css->current_binary];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
if (par->offset + par->size >
bi->info.isp.sp.mem_initializers.params[cls][mem].size)
@@ -92,11 +93,13 @@ void *ipu3_css_fw_pipeline_params(struct ipu3_css *css,
void ipu3_css_fw_cleanup(struct ipu3_css *css)
{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
if (css->binary) {
unsigned int i;
for (i = 0; i < css->fwp->file_header.binary_nr; i++)
- ipu3_dmamap_free(css->dev, &css->binary[i]);
+ ipu3_dmamap_free(imgu, &css->binary[i]);
kfree(css->binary);
}
if (css->fw)
@@ -109,6 +112,7 @@ void ipu3_css_fw_cleanup(struct ipu3_css *css)
int ipu3_css_fw_init(struct ipu3_css *css)
{
static const u32 BLOCK_MAX = 65536;
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
struct device *dev = css->dev;
unsigned int i, j, binary_nr;
int r;
@@ -242,7 +246,7 @@ int ipu3_css_fw_init(struct ipu3_css *css)
void *blob = (void *)css->fwp + bi->blob.offset;
size_t size = bi->blob.size;
- if (!ipu3_dmamap_alloc(css->dev, &css->binary[i], size)) {
+ if (!ipu3_dmamap_alloc(imgu, &css->binary[i], size)) {
r = -ENOMEM;
goto error_out;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-fw.h b/drivers/media/pci/intel/ipu3/ipu3-css-fw.h
index 700ab2abed2d92..d1ffe5170e74af 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-fw.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-fw.h
@@ -98,12 +98,6 @@ struct imgu_fw_binary_xinfo {
struct imgu_fw_binary_xinfo *next __aligned(8);
};
-/* Scalar processor sw state */
-#define IMGU_ABI_SP_SWSTATE_TERMINATED 0
-#define IMGU_ABI_SP_SWSTATE_INITIALIZED 1
-#define IMGU_ABI_SP_SWSTATE_CONNECTED 2
-#define IMGU_ABI_SP_SWSTATE_RUNNING 3
-
struct imgu_fw_sp_info {
u32 init_dmem_data; /* data sect config, stored to dmem */
u32 per_frame_data; /* Per frame data, stored to dmem */
@@ -112,7 +106,7 @@ struct imgu_fw_sp_info {
u32 host_sp_queue; /* Host <-> SP queues */
u32 host_sp_com; /* Host <-> SP commands */
u32 isp_started; /* P'ed from sensor thread, csim only */
- u32 sw_state; /* Polled from css */
+ u32 sw_state; /* Polled from css, enum imgu_abi_sp_swstate */
u32 host_sp_queues_initialized; /* Polled from the SP */
u32 sleep_mode; /* different mode to halt SP */
u32 invalidate_tlb; /* inform SP to invalidate mmu TLB */
@@ -131,15 +125,11 @@ struct imgu_fw_sp_info {
u32 tagger_frames_addr; /* Base address of tagger state */
};
-/* Boot loader sw state */
-#define IMGU_ABI_BL_SWSTATE_OK 0x100
-#define IMGU_ABI_BL_SWSTATE_BUSY (IMGU_ABI_BL_SWSTATE_OK + 1)
-#define IMGU_ABI_BL_SWSTATE_ERR (IMGU_ABI_BL_SWSTATE_OK + 2)
-
struct imgu_fw_bl_info {
u32 num_dma_cmds; /* Number of cmds sent by CSS */
u32 dma_cmd_list; /* Dma command list sent by CSS */
- u32 sw_state; /* Polled from css */
+ u32 sw_state; /* Polled from css, enum imgu_abi_bl_swstate */
+ /* Entry functions */
u32 bl_entry; /* The SP entry function */
};
@@ -189,7 +179,7 @@ int ipu3_css_fw_init(struct ipu3_css *css);
void ipu3_css_fw_cleanup(struct ipu3_css *css);
unsigned int ipu3_css_fw_obgrid_size(const struct imgu_fw_info *bi);
-void *ipu3_css_fw_pipeline_params(struct ipu3_css *css,
+void *ipu3_css_fw_pipeline_params(struct ipu3_css *css, unsigned int pipe,
enum imgu_abi_param_class cls,
enum imgu_abi_memories mem,
struct imgu_fw_isp_parameter *par,
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-params.c b/drivers/media/pci/intel/ipu3/ipu3-css-params.c
index 0492a87868ec33..f36c63eeb0fc5f 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-params.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-params.c
@@ -7,10 +7,12 @@
#include "ipu3-css-fw.h"
#include "ipu3-tables.h"
-#define sqr(x) ((x) * (x))
#define DIV_ROUND_CLOSEST_DOWN(a, b) (((a) + ((b) / 2) - 1) / (b))
#define roundclosest_down(a, b) (DIV_ROUND_CLOSEST_DOWN(a, b) * (b))
+#define IPU3_UAPI_ANR_MAX_RESET ((1 << 12) - 1)
+#define IPU3_UAPI_ANR_MIN_RESET (((-1) << 12) + 1)
+
struct ipu3_css_scaler_info {
unsigned int phase_step; /* Same for luma/chroma */
int exp_shift;
@@ -362,55 +364,59 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
struct ipu3_css_scaler_info *scaler_luma,
struct ipu3_css_scaler_info *scaler_chroma,
struct ipu3_css_frame_params frame_params[],
- struct ipu3_css_stripe_params stripe_params[])
+ struct ipu3_css_stripe_params stripe_params[],
+ unsigned int pipe)
{
- u32 input_width = css->rect[IPU3_CSS_RECT_GDC].width;
- u32 input_height = css->rect[IPU3_CSS_RECT_GDC].height;
- u32 target_width = css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
- u32 target_height = css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
- unsigned int procmode = 0;
struct ipu3_css_reso reso;
unsigned int output_width, pin, s;
+ u32 input_width, input_height, target_width, target_height;
+ unsigned int procmode = 0;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
+ input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
+ target_width = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ target_height = css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
/* Frame parameters */
/* Input width for Output System is output width of DVS (with GDC) */
- reso.input_width = css->rect[IPU3_CSS_RECT_GDC].width;
+ reso.input_width = css_pipe->rect[IPU3_CSS_RECT_GDC].width;
/* Input height for Output System is output height of DVS (with GDC) */
- reso.input_height = css->rect[IPU3_CSS_RECT_GDC].height;
+ reso.input_height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
reso.input_format =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
reso.pin_width[IMGU_ABI_OSYS_PIN_OUT] =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
reso.pin_height[IMGU_ABI_OSYS_PIN_OUT] =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
reso.pin_stride[IMGU_ABI_OSYS_PIN_OUT] =
- css->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
reso.pin_format[IMGU_ABI_OSYS_PIN_OUT] =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
reso.pin_width[IMGU_ABI_OSYS_PIN_VF] =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
reso.pin_height[IMGU_ABI_OSYS_PIN_VF] =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
reso.pin_stride[IMGU_ABI_OSYS_PIN_VF] =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
reso.pin_format[IMGU_ABI_OSYS_PIN_VF] =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
/* Configure the frame parameters for all output pins */
frame_params[IMGU_ABI_OSYS_PIN_OUT].width =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
frame_params[IMGU_ABI_OSYS_PIN_OUT].height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
frame_params[IMGU_ABI_OSYS_PIN_VF].width =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
frame_params[IMGU_ABI_OSYS_PIN_VF].height =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_top = 0;
frame_params[IMGU_ABI_OSYS_PIN_VF].crop_left = 0;
@@ -840,7 +846,8 @@ static int ipu3_css_osys_calc_frame_and_stripe_params(
* This function configures the Output Formatter System, given the number of
* stripes, scaler luma and chrome parameters
*/
-static void ipu3_css_osys_calc(struct ipu3_css *css, unsigned int stripes,
+static int ipu3_css_osys_calc(struct ipu3_css *css, unsigned int pipe,
+ unsigned int stripes,
struct imgu_abi_osys_config *osys,
struct ipu3_css_scaler_info *scaler_luma,
struct ipu3_css_scaler_info *scaler_chroma,
@@ -850,13 +857,18 @@ static void ipu3_css_osys_calc(struct ipu3_css *css, unsigned int stripes,
struct ipu3_css_stripe_params stripe_params[IPU3_UAPI_MAX_STRIPES];
struct imgu_abi_osys_formatter_params *param;
unsigned int pin, s;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
memset(osys, 0, sizeof(*osys));
/* Compute the frame and stripe params */
- ipu3_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
- scaler_luma, scaler_chroma,
- frame_params, stripe_params);
+ if (ipu3_css_osys_calc_frame_and_stripe_params(css, stripes, osys,
+ scaler_luma,
+ scaler_chroma,
+ frame_params,
+ stripe_params,
+ pipe))
+ return -EINVAL;
/* Output formatter system parameters */
@@ -1178,21 +1190,24 @@ static void ipu3_css_osys_calc(struct ipu3_css *css, unsigned int stripes,
block_stripes[0].height = stripe_params[0].input_height;
} else {
struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
- unsigned int sp_block_width = IPU3_UAPI_ISP_VEC_ELEMS *
- bi->info.isp.sp.block.block_width;
+ &css->fwp->binary_header[css_pipe->bindex];
+ unsigned int sp_block_width =
+ bi->info.isp.sp.block.block_width *
+ IPU3_UAPI_ISP_VEC_ELEMS;
block_stripes[0].width = roundup(stripe_params[0].input_width,
sp_block_width);
block_stripes[1].offset =
- rounddown(css->rect[IPU3_CSS_RECT_GDC].width -
+ rounddown(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
stripe_params[1].input_width, sp_block_width);
block_stripes[1].width =
- roundup(css->rect[IPU3_CSS_RECT_GDC].width -
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width -
block_stripes[1].offset, sp_block_width);
- block_stripes[0].height = css->rect[IPU3_CSS_RECT_GDC].height;
+ block_stripes[0].height = css_pipe->rect[IPU3_CSS_RECT_GDC].height;
block_stripes[1].height = block_stripes[0].height;
}
+
+ return 0;
}
/*********************** Mostly 3A operations ******************************/
@@ -1618,15 +1633,17 @@ ipu3_css_acc_process_lines(const struct process_lines *pl,
return 0;
}
-static int ipu3_css_af_ops_calc(struct ipu3_css *css,
+static int ipu3_css_af_ops_calc(struct ipu3_css *css, unsigned int pipe,
struct imgu_abi_af_config *af_config)
{
struct imgu_abi_af_intra_frame_operations_data *to =
&af_config->operations_data;
- struct imgu_fw_info *bi = &css->fwp->binary_header[css->current_binary];
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
- .image_height = css->rect[IPU3_CSS_RECT_BDS].height,
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = af_config->config.grid_cfg.height,
.block_height =
1 << af_config->config.grid_cfg.block_height_log2,
@@ -1644,14 +1661,16 @@ static int ipu3_css_af_ops_calc(struct ipu3_css *css,
}
static int
-ipu3_css_awb_fr_ops_calc(struct ipu3_css *css,
+ipu3_css_awb_fr_ops_calc(struct ipu3_css *css, unsigned int pipe,
struct imgu_abi_awb_fr_config *awb_fr_config)
{
struct imgu_abi_awb_fr_intra_frame_operations_data *to =
&awb_fr_config->operations_data;
- struct imgu_fw_info *bi = &css->fwp->binary_header[css->current_binary];
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
- .image_height = css->rect[IPU3_CSS_RECT_BDS].height,
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = awb_fr_config->config.grid_cfg.height,
.block_height =
1 << awb_fr_config->config.grid_cfg.block_height_log2,
@@ -1668,15 +1687,17 @@ ipu3_css_awb_fr_ops_calc(struct ipu3_css *css,
NULL);
}
-static int ipu3_css_awb_ops_calc(struct ipu3_css *css,
+static int ipu3_css_awb_ops_calc(struct ipu3_css *css, unsigned int pipe,
struct imgu_abi_awb_config *awb_config)
{
struct imgu_abi_awb_intra_frame_operations_data *to =
&awb_config->operations_data;
- struct imgu_fw_info *bi = &css->fwp->binary_header[css->current_binary];
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi =
+ &css->fwp->binary_header[css_pipe->bindex];
struct process_lines pl = {
- .image_height = css->rect[IPU3_CSS_RECT_BDS].height,
+ .image_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height,
.grid_height = awb_config->config.grid.height,
.block_height =
1 << awb_config->config.grid.block_height_log2,
@@ -1708,22 +1729,24 @@ static void ipu3_css_grid_end_calc(struct ipu3_uapi_grid_config *grid_cfg)
/****************** config computation *****************************/
-static void ipu3_css_cfg_acc_stripe(struct ipu3_css *css,
+static int ipu3_css_cfg_acc_stripe(struct ipu3_css *css, unsigned int pipe,
struct imgu_abi_acc_param *acc)
{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
- const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
- const unsigned int F = IPU3_UAPI_ISP_VEC_ELEMS * 2;
+ &css->fwp->binary_header[css_pipe->bindex];
struct ipu3_css_scaler_info scaler_luma, scaler_chroma;
+ const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
+ const unsigned int f = IPU3_UAPI_ISP_VEC_ELEMS * 2;
unsigned int bds_ds, i;
memset(acc, 0, sizeof(*acc));
/* acc_param: osys_config */
- ipu3_css_osys_calc(css, stripes, &acc->osys, &scaler_luma,
- &scaler_chroma, acc->stripe.block_stripes);
+ if (ipu3_css_osys_calc(css, pipe, stripes, &acc->osys, &scaler_luma,
+ &scaler_chroma, acc->stripe.block_stripes))
+ return -EINVAL;
/* acc_param: stripe data */
@@ -1738,77 +1761,78 @@ static void ipu3_css_cfg_acc_stripe(struct ipu3_css *css,
acc->stripe.num_of_stripes = stripes;
acc->stripe.input_frame.width =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
acc->stripe.input_frame.height =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
acc->stripe.input_frame.bayer_order =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
for (i = 0; i < stripes; i++)
acc->stripe.bds_out_stripes[i].height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.bds_out_stripes[0].offset = 0;
if (stripes <= 1) {
acc->stripe.bds_out_stripes[0].width =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, F);
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
} else {
/* Image processing is divided into two stripes */
acc->stripe.bds_out_stripes[0].width =
acc->stripe.bds_out_stripes[1].width =
- (css->rect[IPU3_CSS_RECT_BDS].width / 2 & ~(F - 1)) + F;
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width / 2 & ~(f - 1)) + f;
/*
* Sum of width of the two stripes should not be smaller
* than output width and must be even times of overlapping
* unit f.
*/
- if ((css->rect[IPU3_CSS_RECT_BDS].width / F & 1) !=
- !!(css->rect[IPU3_CSS_RECT_BDS].width & (F - 1)))
- acc->stripe.bds_out_stripes[0].width += F;
- if ((css->rect[IPU3_CSS_RECT_BDS].width / F & 1) &&
- (css->rect[IPU3_CSS_RECT_BDS].width & (F - 1))) {
- acc->stripe.bds_out_stripes[0].width += F;
- acc->stripe.bds_out_stripes[1].width += F;
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) !=
+ !!(css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1)))
+ acc->stripe.bds_out_stripes[0].width += f;
+ if ((css_pipe->rect[IPU3_CSS_RECT_BDS].width / f & 1) &&
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width & (f - 1))) {
+ acc->stripe.bds_out_stripes[0].width += f;
+ acc->stripe.bds_out_stripes[1].width += f;
}
/* Overlap between stripes is IPU3_UAPI_ISP_VEC_ELEMS * 4 */
acc->stripe.bds_out_stripes[1].offset =
- acc->stripe.bds_out_stripes[0].width - 2 * F;
+ acc->stripe.bds_out_stripes[0].width - 2 * f;
}
acc->stripe.effective_stripes[0].height =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->stripe.effective_stripes[0].offset = 0;
acc->stripe.bds_out_stripes_no_overlap[0].height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.bds_out_stripes_no_overlap[0].offset = 0;
acc->stripe.output_stripes[0].height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.output_stripes[0].offset = 0;
if (stripes <= 1) {
acc->stripe.down_scaled_stripes[0].width =
- css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.down_scaled_stripes[0].height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.down_scaled_stripes[0].offset = 0;
acc->stripe.effective_stripes[0].width =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
acc->stripe.bds_out_stripes_no_overlap[0].width =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, F);
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f);
acc->stripe.output_stripes[0].width =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
} else { /* Two stripes */
- bds_ds = css->rect[IPU3_CSS_RECT_EFFECTIVE].width *
+ bds_ds = css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width *
IMGU_BDS_GRANULARITY /
- css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.down_scaled_stripes[0] =
acc->stripe.bds_out_stripes[0];
acc->stripe.down_scaled_stripes[1] =
acc->stripe.bds_out_stripes[1];
- if (!IS_ALIGNED(css->rect[IPU3_CSS_RECT_BDS].width, F))
- acc->stripe.down_scaled_stripes[1].width += -F +
- (css->rect[IPU3_CSS_RECT_BDS].width & (F - 1));
+ if (!IS_ALIGNED(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f))
+ acc->stripe.down_scaled_stripes[1].width +=
+ (css_pipe->rect[IPU3_CSS_RECT_BDS].width
+ & (f - 1)) - f;
acc->stripe.effective_stripes[0].width = bds_ds *
acc->stripe.down_scaled_stripes[0].width /
@@ -1817,55 +1841,55 @@ static void ipu3_css_cfg_acc_stripe(struct ipu3_css *css,
acc->stripe.down_scaled_stripes[1].width /
IMGU_BDS_GRANULARITY;
acc->stripe.effective_stripes[1].height =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->stripe.effective_stripes[1].offset = bds_ds *
acc->stripe.down_scaled_stripes[1].offset /
IMGU_BDS_GRANULARITY;
acc->stripe.bds_out_stripes_no_overlap[0].width =
acc->stripe.bds_out_stripes_no_overlap[1].offset =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, 2 * F) / 2;
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, 2 * f) / 2;
acc->stripe.bds_out_stripes_no_overlap[1].width =
- DIV_ROUND_UP(css->rect[IPU3_CSS_RECT_BDS].width, F) /
- 2 * F;
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_BDS].width, f)
+ / 2 * f;
acc->stripe.bds_out_stripes_no_overlap[1].height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->stripe.output_stripes[0].width =
- acc->stripe.down_scaled_stripes[0].width - F;
+ acc->stripe.down_scaled_stripes[0].width - f;
acc->stripe.output_stripes[1].width =
- acc->stripe.down_scaled_stripes[1].width - F;
+ acc->stripe.down_scaled_stripes[1].width - f;
acc->stripe.output_stripes[1].height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.output_stripes[1].offset =
acc->stripe.output_stripes[0].width;
}
acc->stripe.output_system_in_frame_width =
- css->rect[IPU3_CSS_RECT_GDC].width;
+ css_pipe->rect[IPU3_CSS_RECT_GDC].width;
acc->stripe.output_system_in_frame_height =
- css->rect[IPU3_CSS_RECT_GDC].height;
+ css_pipe->rect[IPU3_CSS_RECT_GDC].height;
acc->stripe.effective_frame_width =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].width;
- acc->stripe.bds_frame_width = css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ acc->stripe.bds_frame_width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->stripe.out_frame_width =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
acc->stripe.out_frame_height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
acc->stripe.gdc_in_buffer_width =
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline /
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline /
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel;
acc->stripe.gdc_in_buffer_height =
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
acc->stripe.gdc_in_buffer_offset_x = IMGU_GDC_BUF_X;
acc->stripe.gdc_in_buffer_offset_y = IMGU_GDC_BUF_Y;
acc->stripe.display_frame_width =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
acc->stripe.display_frame_height =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
acc->stripe.bds_aligned_frame_width =
- roundup(css->rect[IPU3_CSS_RECT_BDS].width,
+ roundup(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS);
if (stripes > 1)
@@ -1873,16 +1897,20 @@ static void ipu3_css_cfg_acc_stripe(struct ipu3_css *css,
IMGU_STRIPE_FIXED_HALF_OVERLAP;
else
acc->stripe.half_overlap_vectors = 0;
+
+ return 0;
}
static void ipu3_css_cfg_acc_dvs(struct ipu3_css *css,
- struct imgu_abi_acc_param *acc)
+ struct imgu_abi_acc_param *acc,
+ unsigned int pipe)
{
unsigned int i;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
/* Disable DVS statistics */
acc->dvs_stat.operations_data.process_lines_data[0].lines =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->dvs_stat.operations_data.process_lines_data[0].cfg_set = 0;
acc->dvs_stat.operations_data.ops[0].op_type =
IMGU_ABI_ACC_OPTYPE_PROCESS_LINES;
@@ -1894,8 +1922,10 @@ static void ipu3_css_cfg_acc_dvs(struct ipu3_css *css,
static void acc_bds_per_stripe_data(struct ipu3_css *css,
struct imgu_abi_acc_param *acc,
- const int i)
+ const int i, unsigned int pipe)
{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_en = 0;
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_start = 0;
acc->bds.per_stripe.aligned_data[i].data.crop.hor_crop_end = 0;
@@ -1906,7 +1936,7 @@ static void acc_bds_per_stripe_data(struct ipu3_css *css,
acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_width =
acc->stripe.down_scaled_stripes[i].width;
acc->bds.per_stripe.aligned_data[i].data.ver_ctrl1.out_frame_height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
}
/*
@@ -1915,42 +1945,45 @@ static void acc_bds_per_stripe_data(struct ipu3_css *css,
* telling which fields to take from the old values (or generate if it is NULL)
* and which to take from the new user values.
*/
-int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
struct imgu_abi_acc_param *acc,
struct imgu_abi_acc_param *acc_old,
struct ipu3_uapi_acc_param *acc_user)
{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
+ &css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
const unsigned int tnr_frame_width =
acc->stripe.bds_aligned_frame_width;
const unsigned int min_overlap = 10;
const struct v4l2_pix_format_mplane *pixm =
- &css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
+ &css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
const struct ipu3_css_bds_config *cfg_bds;
struct imgu_abi_input_feeder_data *feeder_data;
- unsigned int bds_ds, ofs_x, ofs_y, i, width;
+ unsigned int bds_ds, ofs_x, ofs_y, i, width, height;
u8 b_w_log2; /* Block width log2 */
/* Update stripe using chroma and luma */
- ipu3_css_cfg_acc_stripe(css, acc);
+ if (ipu3_css_cfg_acc_stripe(css, pipe, acc))
+ return -EINVAL;
/* acc_param: input_feeder_config */
ofs_x = ((pixm->width -
- css->rect[IPU3_CSS_RECT_EFFECTIVE].width) >> 1) & ~1;
- ofs_x += css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width) >> 1) & ~1;
+ ofs_x += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_RGGB ||
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
ofs_y = ((pixm->height -
- css->rect[IPU3_CSS_RECT_EFFECTIVE].height) >> 1) & ~1;
- ofs_y += css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height) >> 1) & ~1;
+ ofs_y += css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_BGGR ||
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order ==
IMGU_ABI_BAYER_ORDER_GBRG ? 1 : 0;
acc->input_feeder.data.row_stride = pixm->plane_fmt[0].bytesperline;
acc->input_feeder.data.start_row_address =
@@ -1993,8 +2026,6 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
}
acc->bnr.column_size = tnr_frame_width;
- acc->bnr.opt_center_sqr.x_sqr_reset = sqr(acc->bnr.opt_center.x_reset);
- acc->bnr.opt_center_sqr.y_sqr_reset = sqr(acc->bnr.opt_center.y_reset);
/* acc_param: bnr_static_config_green_disparity */
@@ -2108,11 +2139,11 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->shd.shd.grid.grid_height_per_slice;
if (ipu3_css_shd_ops_calc(&acc->shd.shd_ops, &acc->shd.shd.grid,
- css->rect[IPU3_CSS_RECT_BDS].height))
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height))
return -EINVAL;
/* acc_param: dvs_stat_config */
- ipu3_css_cfg_acc_dvs(css, acc);
+ ipu3_css_cfg_acc_dvs(css, acc, pipe);
/* acc_param: yuvp1_iefd_config */
@@ -2194,7 +2225,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/* acc_param: yuvp2_y_tm_lut_static_config */
- for (i = 0; i < IPU3_UAPI_YUVP2_YTM_LUT_ENTRIES; i++)
+ for (i = 0; i < IMGU_ABI_YUVP2_YTM_LUT_ENTRIES; i++)
acc->ytm.entries[i] = i * 32;
acc->ytm.enable = 0; /* Always disabled on IPU3 */
@@ -2254,8 +2285,8 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/* acc_param: bds_config */
- bds_ds = (css->rect[IPU3_CSS_RECT_EFFECTIVE].height *
- IMGU_BDS_GRANULARITY) / css->rect[IPU3_CSS_RECT_BDS].height;
+ bds_ds = (css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height *
+ IMGU_BDS_GRANULARITY) / css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (bds_ds < IMGU_BDS_MIN_SF_INV ||
bds_ds - IMGU_BDS_MIN_SF_INV >= ARRAY_SIZE(ipu3_css_bds_configs))
return -EINVAL;
@@ -2270,11 +2301,11 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->bds.hor.hor_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
acc->bds.hor.hor_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
acc->bds.hor.hor_ctrl0.out_frame_width =
- css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->bds.hor.hor_ptrn_arr = cfg_bds->ptrn_arr;
acc->bds.hor.hor_phase_arr = cfg_bds->hor_phase_arr;
acc->bds.hor.hor_ctrl2.input_frame_height =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
acc->bds.ver.ver_ctrl0.min_clip_val = IMGU_BDS_MIN_CLIP_VAL;
acc->bds.ver.ver_ctrl0.max_clip_val = IMGU_BDS_MAX_CLIP_VAL;
acc->bds.ver.ver_ctrl0.sample_patrn_length =
@@ -2283,11 +2314,11 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->bds.ver.ver_ptrn_arr = cfg_bds->ptrn_arr;
acc->bds.ver.ver_phase_arr = cfg_bds->ver_phase_arr;
acc->bds.ver.ver_ctrl1.out_frame_width =
- css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
acc->bds.ver.ver_ctrl1.out_frame_height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
for (i = 0; i < stripes; i++)
- acc_bds_per_stripe_data(css, acc, i);
+ acc_bds_per_stripe_data(css, acc, i, pipe);
acc->bds.enabled = cfg_bds->hor_ds_en || cfg_bds->ver_ds_en;
@@ -2295,10 +2326,18 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
if (use && use->acc_anr) {
/* Take values from user */
- acc->anr = acc_user->anr;
+ acc->anr.transform = acc_user->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_user->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_user->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
} else if (acc_old) {
/* Use old value */
- acc->anr = acc_old->anr;
+ acc->anr.transform = acc_old->anr.transform;
+ acc->anr.stitch.anr_stitch_en =
+ acc_old->anr.stitch.anr_stitch_en;
+ memcpy(acc->anr.stitch.pyramid, acc_old->anr.stitch.pyramid,
+ sizeof(acc->anr.stitch.pyramid));
} else {
/* Calculate from scratch */
acc->anr = ipu3_css_anr_defaults;
@@ -2309,16 +2348,25 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->anr.transform.enable = 1;
acc->anr.tile2strm.enable = 1;
acc->anr.tile2strm.frame_width =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
acc->anr.search.frame_width = acc->anr.tile2strm.frame_width;
acc->anr.stitch.frame_width = acc->anr.tile2strm.frame_width;
- acc->anr.tile2strm.frame_height = css->rect[IPU3_CSS_RECT_BDS].height;
+ acc->anr.tile2strm.frame_height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->anr.search.frame_height = acc->anr.tile2strm.frame_height;
acc->anr.stitch.frame_height = acc->anr.tile2strm.frame_height;
- width = ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
- if (acc->anr.transform.xreset > IPU3_UAPI_ANR_MAX_XRESET - width)
- acc->anr.transform.xreset = IPU3_UAPI_ANR_MAX_XRESET - width;
+ width = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ height = css_pipe->rect[IPU3_CSS_RECT_BDS].height;
+
+ if (acc->anr.transform.xreset + width > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MAX_RESET - width;
+ if (acc->anr.transform.xreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.xreset = IPU3_UAPI_ANR_MIN_RESET;
+
+ if (acc->anr.transform.yreset + height > IPU3_UAPI_ANR_MAX_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MAX_RESET - height;
+ if (acc->anr.transform.yreset < IPU3_UAPI_ANR_MIN_RESET)
+ acc->anr.transform.yreset = IPU3_UAPI_ANR_MIN_RESET;
/* acc_param: awb_fr_config */
@@ -2392,7 +2440,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
}
- if (ipu3_css_awb_fr_ops_calc(css, &acc->awb_fr))
+ if (ipu3_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
return -EINVAL;
/* acc_param: ae_config */
@@ -2494,9 +2542,9 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->af.config.grid_cfg.height_per_slice =
IMGU_ABI_AF_MAX_CELLS_PER_SET / acc->af.config.grid_cfg.width;
acc->af.config.frame_size.width =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width, IMGU_ISP_VMEM_ALIGN);
acc->af.config.frame_size.height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
if (acc->stripe.bds_out_stripes[0].width <= min_overlap)
return -EINVAL;
@@ -2504,7 +2552,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
for (i = 0; i < stripes; i++) {
acc->af.stripes[i].grid_cfg = acc->af.config.grid_cfg;
acc->af.stripes[i].frame_size.height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
acc->af.stripes[i].frame_size.width =
acc->stripe.bds_out_stripes[i].width;
}
@@ -2555,7 +2603,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->af.stripes[i].grid_cfg.height_per_slice = 1;
}
- if (ipu3_css_af_ops_calc(css, &acc->af))
+ if (ipu3_css_af_ops_calc(css, pipe, &acc->af))
return -EINVAL;
/* acc_param: awb_config */
@@ -2624,7 +2672,7 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
acc->awb.stripes[i].grid.height_per_slice = 1;
}
- if (ipu3_css_awb_ops_calc(css, &acc->awb))
+ if (ipu3_css_awb_ops_calc(css, pipe, &acc->awb))
return -EINVAL;
return 0;
@@ -2639,7 +2687,8 @@ int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
* to the structure inside `new_binary_params'. In that case the caller
* should calculate and fill the structure from scratch.
*/
-static void *ipu3_css_cfg_copy(struct ipu3_css *css, bool use_user,
+static void *ipu3_css_cfg_copy(struct ipu3_css *css,
+ unsigned int pipe, bool use_user,
void *user_setting, void *old_binary_params,
void *new_binary_params,
enum imgu_abi_memories m,
@@ -2649,8 +2698,8 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css, bool use_user,
const enum imgu_abi_param_class c = IMGU_ABI_PARAM_CLASS_PARAM;
void *new_setting, *old_setting;
- new_setting = ipu3_css_fw_pipeline_params(css, c, m, par, par_size,
- new_binary_params);
+ new_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
+ par_size, new_binary_params);
if (!new_setting)
return ERR_PTR(-EPROTO); /* Corrupted firmware */
@@ -2659,7 +2708,7 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css, bool use_user,
memcpy(new_setting, user_setting, par_size);
} else if (old_binary_params) {
/* Take previous value */
- old_setting = ipu3_css_fw_pipeline_params(css, c, m, par,
+ old_setting = ipu3_css_fw_pipeline_params(css, pipe, c, m, par,
par_size,
old_binary_params);
if (!old_setting)
@@ -2675,12 +2724,13 @@ static void *ipu3_css_cfg_copy(struct ipu3_css *css, bool use_user,
/*
* Configure VMEM0 parameters (late binding parameters).
*/
-int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
void *vmem0, void *vmem0_old,
struct ipu3_uapi_params *user)
{
const struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
+ &css->fwp->binary_header[css->pipes[pipe].bindex];
struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
struct ipu3_uapi_isp_lin_vmem_params *lin_vmem = NULL;
@@ -2696,7 +2746,7 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/* Configure Linearization VMEM0 parameters */
- lin_vmem = ipu3_css_cfg_copy(css, use && use->lin_vmem_params,
+ lin_vmem = ipu3_css_cfg_copy(css, pipe, use && use->lin_vmem_params,
&user->lin_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.lin, sizeof(*lin_vmem));
if (!IS_ERR_OR_NULL(lin_vmem)) {
@@ -2715,8 +2765,9 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
}
/* Configure TNR3 VMEM parameters */
- if (css->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- tnr_vmem = ipu3_css_cfg_copy(css, use && use->tnr3_vmem_params,
+ if (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_vmem = ipu3_css_cfg_copy(css, pipe,
+ use && use->tnr3_vmem_params,
&user->tnr3_vmem_params,
vmem0_old, vmem0, m,
&pofs->vmem.tnr3,
@@ -2731,7 +2782,7 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/* Configure XNR3 VMEM parameters */
- xnr_vmem = ipu3_css_cfg_copy(css, use && use->xnr3_vmem_params,
+ xnr_vmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_vmem_params,
&user->xnr3_vmem_params, vmem0_old, vmem0,
m, &pofs->vmem.xnr3, sizeof(*xnr_vmem));
if (!IS_ERR_OR_NULL(xnr_vmem)) {
@@ -2752,12 +2803,14 @@ int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/*
* Configure DMEM0 parameters (late binding parameters).
*/
-int ipu3_css_cfg_dmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
void *dmem0, void *dmem0_old,
struct ipu3_uapi_params *user)
{
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
+ &css->fwp->binary_header[css_pipe->bindex];
struct imgu_fw_param_memory_offsets *pofs = (void *)css->fwp +
bi->blob.memory_offsets.offsets[IMGU_ABI_PARAM_CLASS_PARAM];
@@ -2772,10 +2825,12 @@ int ipu3_css_cfg_dmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
memset(dmem0, 0, bi->info.isp.sp.mem_initializers.params[c][m].size);
/* Configure TNR3 DMEM0 parameters */
- if (css->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- tnr_dmem = ipu3_css_cfg_copy(css, use && use->tnr3_dmem_params,
- &user->tnr3_dmem_params, dmem0_old,
- dmem0, m, &pofs->dmem.tnr3,
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ tnr_dmem = ipu3_css_cfg_copy(css, pipe,
+ use && use->tnr3_dmem_params,
+ &user->tnr3_dmem_params,
+ dmem0_old, dmem0, m,
+ &pofs->dmem.tnr3,
sizeof(*tnr_dmem));
if (!IS_ERR_OR_NULL(tnr_dmem)) {
/* Generate parameter from scratch */
@@ -2786,7 +2841,7 @@ int ipu3_css_cfg_dmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
/* Configure XNR3 DMEM0 parameters */
- xnr_dmem = ipu3_css_cfg_copy(css, use && use->xnr3_dmem_params,
+ xnr_dmem = ipu3_css_cfg_copy(css, pipe, use && use->xnr3_dmem_params,
&user->xnr3_dmem_params, dmem0_old, dmem0,
m, &pofs->dmem.xnr3, sizeof(*xnr_dmem));
if (!IS_ERR_OR_NULL(xnr_dmem)) {
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-params.h b/drivers/media/pci/intel/ipu3/ipu3-css-params.h
index f93ed027f04d0d..f3a0a47117a497 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-params.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-params.h
@@ -4,16 +4,19 @@
#ifndef __IPU3_PARAMS_H
#define __IPU3_PARAMS_H
-int ipu3_css_cfg_acc(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_acc(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
struct imgu_abi_acc_param *acc,
struct imgu_abi_acc_param *acc_old,
struct ipu3_uapi_acc_param *acc_user);
-int ipu3_css_cfg_vmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_vmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
void *vmem0, void *vmem0_old,
struct ipu3_uapi_params *user);
-int ipu3_css_cfg_dmem0(struct ipu3_css *css, struct ipu3_uapi_flags *use,
+int ipu3_css_cfg_dmem0(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_uapi_flags *use,
void *dmem0, void *dmem0_old,
struct ipu3_uapi_params *user);
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-pool.c b/drivers/media/pci/intel/ipu3/ipu3-css-pool.c
index 84f6a7801a01cc..6f271f81669b7c 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-pool.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-pool.c
@@ -3,50 +3,46 @@
#include <linux/device.h>
+#include "ipu3.h"
#include "ipu3-css-pool.h"
#include "ipu3-dmamap.h"
-int ipu3_css_dma_buffer_resize(struct device *dev, struct ipu3_css_map *map,
- size_t size)
+int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct ipu3_css_map *map, size_t size)
{
if (map->size < size && map->vaddr) {
- dev_warn(dev, "dma buffer is resized from %zu to %zu",
+ dev_warn(&imgu->pci_dev->dev, "dma buf resized from %zu to %zu",
map->size, size);
- ipu3_dmamap_free(dev, map);
- if (!ipu3_dmamap_alloc(dev, map, size))
+ ipu3_dmamap_free(imgu, map);
+ if (!ipu3_dmamap_alloc(imgu, map, size))
return -ENOMEM;
}
return 0;
}
-void ipu3_css_pool_cleanup(struct device *dev, struct ipu3_css_pool *pool)
+void ipu3_css_pool_cleanup(struct imgu_device *imgu, struct ipu3_css_pool *pool)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++)
- ipu3_dmamap_free(dev, &pool->entry[i].param);
+ ipu3_dmamap_free(imgu, &pool->entry[i].param);
}
-int ipu3_css_pool_init(struct device *dev, struct ipu3_css_pool *pool,
+int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
size_t size)
{
unsigned int i;
for (i = 0; i < IPU3_CSS_POOL_SIZE; i++) {
- /*
- * entry[i].framenum is initialized to INT_MIN so that
- * ipu3_css_pool_check() can treat it as usesable slot.
- */
- pool->entry[i].framenum = INT_MIN;
-
+ pool->entry[i].valid = false;
if (size == 0) {
pool->entry[i].param.vaddr = NULL;
continue;
}
- if (!ipu3_dmamap_alloc(dev, &pool->entry[i].param, size))
+ if (!ipu3_dmamap_alloc(imgu, &pool->entry[i].param, size))
goto fail;
}
@@ -55,52 +51,20 @@ int ipu3_css_pool_init(struct device *dev, struct ipu3_css_pool *pool,
return 0;
fail:
- ipu3_css_pool_cleanup(dev, pool);
+ ipu3_css_pool_cleanup(imgu, pool);
return -ENOMEM;
}
/*
- * Check that the following call to pool_get succeeds.
- * Return negative on error.
+ * Allocate a new parameter via recycling the oldest entry in the pool.
*/
-static int ipu3_css_pool_check(struct ipu3_css_pool *pool, long framenum)
+void ipu3_css_pool_get(struct ipu3_css_pool *pool)
{
/* Get the oldest entry */
- int n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
- long diff = framenum - pool->entry[n].framenum;
-
- /* if framenum wraps around and becomes smaller than entry n */
- if (diff < 0)
- diff += LONG_MAX;
-
- /*
- * pool->entry[n].framenum stores the frame number where that
- * entry was allocated. If that was allocated more than POOL_SIZE
- * frames back, it is old enough that we know it is no more in
- * use by firmware.
- */
- if (diff > IPU3_CSS_POOL_SIZE)
- return n;
-
- return -ENOSPC;
-}
+ u32 n = (pool->last + 1) % IPU3_CSS_POOL_SIZE;
-/*
- * Allocate a new parameter from pool at frame number `framenum'.
- * Release the oldest entry in the pool to make space for the new entry.
- * Return negative on error.
- */
-int ipu3_css_pool_get(struct ipu3_css_pool *pool, long framenum)
-{
- int n = ipu3_css_pool_check(pool, framenum);
-
- if (n < 0)
- return n;
-
- pool->entry[n].framenum = framenum;
+ pool->entry[n].valid = true;
pool->last = n;
-
- return n;
}
/*
@@ -108,13 +72,18 @@ int ipu3_css_pool_get(struct ipu3_css_pool *pool, long framenum)
*/
void ipu3_css_pool_put(struct ipu3_css_pool *pool)
{
- pool->entry[pool->last].framenum = INT_MIN;
+ pool->entry[pool->last].valid = false;
pool->last = (pool->last + IPU3_CSS_POOL_SIZE - 1) % IPU3_CSS_POOL_SIZE;
}
-/*
- * Return the nth entry from last, if that entry has no frame stored,
- * return a null map instead to indicate frame not available for the entry.
+/**
+ * ipu3_css_pool_last - Retrieve the nth pool entry from last
+ *
+ * @pool: a pointer to &struct ipu3_css_pool.
+ * @n: the distance to the last index.
+ *
+ * Returns:
+ * The nth entry from last or null map to indicate no frame stored.
*/
const struct ipu3_css_map *
ipu3_css_pool_last(struct ipu3_css_pool *pool, unsigned int n)
@@ -124,7 +93,7 @@ ipu3_css_pool_last(struct ipu3_css_pool *pool, unsigned int n)
WARN_ON(n >= IPU3_CSS_POOL_SIZE);
- if (pool->entry[i].framenum < 0)
+ if (!pool->entry[i].valid)
return &null_map;
return &pool->entry[i].param;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css-pool.h b/drivers/media/pci/intel/ipu3/ipu3-css-pool.h
index 4b22e0856232ea..9c895efd2bfaca 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css-pool.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-css-pool.h
@@ -5,9 +5,19 @@
#define __IPU3_UTIL_H
struct device;
+struct imgu_device;
#define IPU3_CSS_POOL_SIZE 4
+/**
+ * ipu3_css_map - store DMA mapping info for buffer
+ *
+ * @size: size of the buffer in bytes.
+ * @vaddr: kernel virtual address.
+ * @daddr: iova dma address to access IPU3.
+ * @vma: private, a pointer to &struct vm_struct,
+ * used for ipu3_dmamap_free.
+ */
struct ipu3_css_map {
size_t size;
void *vaddr;
@@ -15,22 +25,31 @@ struct ipu3_css_map {
struct vm_struct *vma;
};
+/**
+ * ipu3_css_pool - circular buffer pool definition
+ *
+ * @entry: array with IPU3_CSS_POOL_SIZE elements.
+ * @entry.param: a &struct ipu3_css_map for storing the mem mapping.
+ * @entry.valid: used to mark if the entry has vaid data.
+ * @last: write pointer, initialized to IPU3_CSS_POOL_SIZE.
+ */
struct ipu3_css_pool {
struct {
struct ipu3_css_map param;
- long framenum;
+ bool valid;
} entry[IPU3_CSS_POOL_SIZE];
- unsigned int last; /* Latest entry */
+ u32 last;
};
-int ipu3_css_dma_buffer_resize(struct device *dev, struct ipu3_css_map *map,
- size_t size);
-void ipu3_css_pool_cleanup(struct device *dev, struct ipu3_css_pool *pool);
-int ipu3_css_pool_init(struct device *dev, struct ipu3_css_pool *pool,
+int ipu3_css_dma_buffer_resize(struct imgu_device *imgu,
+ struct ipu3_css_map *map, size_t size);
+void ipu3_css_pool_cleanup(struct imgu_device *imgu,
+ struct ipu3_css_pool *pool);
+int ipu3_css_pool_init(struct imgu_device *imgu, struct ipu3_css_pool *pool,
size_t size);
-int ipu3_css_pool_get(struct ipu3_css_pool *pool, long framenum);
+void ipu3_css_pool_get(struct ipu3_css_pool *pool);
void ipu3_css_pool_put(struct ipu3_css_pool *pool);
const struct ipu3_css_map *ipu3_css_pool_last(struct ipu3_css_pool *pool,
- unsigned int last);
+ u32 last);
#endif
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css.c b/drivers/media/pci/intel/ipu3/ipu3-css.c
index 5c9da3884e9db9..8864206fd7e3f3 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-css.c
@@ -23,9 +23,8 @@
#define IPU3_CSS_MAX_H 3136
#define IPU3_CSS_MAX_W 4224
-/* filter size from graph settings is fixed as 4 */
-#define FILTER_SIZE 4
-#define MIN_ENVELOPE 8
+/* minimal envelope size(GDC in - out) should be 4 */
+#define MIN_ENVELOPE 4
/*
* pre-allocated buffer size for CSS ABI, auxiliary frames
@@ -659,25 +658,28 @@ static void ipu3_css_hw_cleanup(struct ipu3_css *css)
usleep_range(200, 300);
}
-static void ipu3_css_pipeline_cleanup(struct ipu3_css *css)
+static void ipu3_css_pipeline_cleanup(struct ipu3_css *css, unsigned int pipe)
{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i;
- ipu3_css_pool_cleanup(css->dev, &css->pool.parameter_set_info);
- ipu3_css_pool_cleanup(css->dev, &css->pool.acc);
- ipu3_css_pool_cleanup(css->dev, &css->pool.gdc);
- ipu3_css_pool_cleanup(css->dev, &css->pool.obgrid);
+ ipu3_css_pool_cleanup(imgu,
+ &css->pipes[pipe].pool.parameter_set_info);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.acc);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.gdc);
+ ipu3_css_pool_cleanup(imgu, &css->pipes[pipe].pool.obgrid);
+
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- ipu3_css_pool_cleanup(css->dev, &css->pool.binary_params_p[i]);
+ ipu3_css_pool_cleanup(imgu,
+ &css->pipes[pipe].pool.binary_params_p[i]);
}
/*
* This function initializes various stages of the
* IPU3 CSS ISP pipeline
*/
-static int ipu3_css_pipeline_init(struct ipu3_css *css)
+static int ipu3_css_pipeline_init(struct ipu3_css *css, unsigned int pipe)
{
- static const unsigned int PIPE_ID = IPU3_CSS_PIPE_ID_VIDEO;
static const int BYPC = 2; /* Bytes per component */
static const struct imgu_abi_buffer_sp buffer_sp_init = {
.buf_src = {.queue_id = IMGU_ABI_QUEUE_EVENT_ID},
@@ -691,11 +693,12 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
struct imgu_abi_isp_ref_dmem_state *cfg_ref_state;
struct imgu_abi_isp_tnr3_dmem_state *cfg_tnr_state;
- const int pipe = 0, stage = 0, thread = 0;
+ const int stage = 0;
unsigned int i, j;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
const struct imgu_fw_info *bi =
- &css->fwp->binary_header[css->current_binary];
+ &css->fwp->binary_header[css_pipe->bindex];
const unsigned int stripes = bi->info.isp.sp.iterator.num_stripes;
struct imgu_fw_config_memory_offsets *cofs = (void *)css->fwp +
@@ -708,101 +711,107 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
struct imgu_abi_sp_group *sp_group;
const unsigned int bds_width_pad =
- ALIGN(css->rect[IPU3_CSS_RECT_BDS].width,
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS);
const enum imgu_abi_memories m0 = IMGU_ABI_MEM_ISP_DMEM0;
enum imgu_abi_param_class cfg = IMGU_ABI_PARAM_CLASS_CONFIG;
- void *vaddr = css->binary_params_cs[cfg - 1][m0].vaddr;
+ void *vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
+
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
/* Configure iterator */
- cfg_iter = ipu3_css_fw_pipeline_params(css, cfg, m0,
+ cfg_iter = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.iterator,
sizeof(*cfg_iter), vaddr);
if (!cfg_iter)
goto bad_firmware;
cfg_iter->input_info.res.width =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
cfg_iter->input_info.res.height =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
cfg_iter->input_info.padded_width =
- css->queue[IPU3_CSS_QUEUE_IN].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
cfg_iter->input_info.format =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
cfg_iter->input_info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
cfg_iter->input_info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
cfg_iter->input_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
- cfg_iter->internal_info.res.width = css->rect[IPU3_CSS_RECT_BDS].width;
+ cfg_iter->internal_info.res.width = css_pipe->rect[IPU3_CSS_RECT_BDS].width;
cfg_iter->internal_info.res.height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
cfg_iter->internal_info.padded_width = bds_width_pad;
cfg_iter->internal_info.format =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
cfg_iter->internal_info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
cfg_iter->internal_info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
cfg_iter->internal_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
cfg_iter->output_info.res.width =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
cfg_iter->output_info.res.height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
cfg_iter->output_info.padded_width =
- css->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
cfg_iter->output_info.format =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
cfg_iter->output_info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
cfg_iter->output_info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
cfg_iter->output_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
cfg_iter->vf_info.res.width =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
cfg_iter->vf_info.res.height =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
cfg_iter->vf_info.padded_width =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
cfg_iter->vf_info.format =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
cfg_iter->vf_info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
cfg_iter->vf_info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
cfg_iter->vf_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
- cfg_iter->dvs_envelope.width = css->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ cfg_iter->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
cfg_iter->dvs_envelope.height =
- css->rect[IPU3_CSS_RECT_ENVELOPE].height;
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
/* Configure reference (delay) frames */
- cfg_ref = ipu3_css_fw_pipeline_params(css, cfg, m0, &cofs->dmem.ref,
+ cfg_ref = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
+ &cofs->dmem.ref,
sizeof(*cfg_ref), vaddr);
if (!cfg_ref)
goto bad_firmware;
cfg_ref->port_b.crop = 0;
cfg_ref->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES / BYPC;
- cfg_ref->port_b.width = css->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
+ cfg_ref->port_b.width =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width;
cfg_ref->port_b.stride =
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline;
cfg_ref->width_a_over_b =
IPU3_UAPI_ISP_VEC_ELEMS / cfg_ref->port_b.elems;
cfg_ref->dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++) {
cfg_ref->ref_frame_addr_y[i] =
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr;
cfg_ref->ref_frame_addr_c[i] =
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
- css->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i].daddr +
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline *
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
}
for (; i < IMGU_ABI_FRAMES_REF; i++) {
cfg_ref->ref_frame_addr_y[i] = 0;
@@ -811,23 +820,23 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
/* Configure DVS (digital video stabilization) */
- cfg_dvs = ipu3_css_fw_pipeline_params(css, cfg, m0,
+ cfg_dvs = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.dvs, sizeof(*cfg_dvs),
vaddr);
if (!cfg_dvs)
goto bad_firmware;
cfg_dvs->num_horizontal_blocks =
- ALIGN(DIV_ROUND_UP(css->rect[IPU3_CSS_RECT_GDC].width,
+ ALIGN(DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
IMGU_DVS_BLOCK_W), 2);
cfg_dvs->num_vertical_blocks =
- DIV_ROUND_UP(css->rect[IPU3_CSS_RECT_GDC].height,
+ DIV_ROUND_UP(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
IMGU_DVS_BLOCK_H);
/* Configure TNR (temporal noise reduction) */
- if (css->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
- cfg_tnr = ipu3_css_fw_pipeline_params(css, cfg, m0,
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ cfg_tnr = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
&cofs->dmem.tnr3,
sizeof(*cfg_tnr),
vaddr);
@@ -837,17 +846,17 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
cfg_tnr->port_b.crop = 0;
cfg_tnr->port_b.elems = IMGU_ABI_ISP_DDR_WORD_BYTES;
cfg_tnr->port_b.width =
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
cfg_tnr->port_b.stride =
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline;
cfg_tnr->width_a_over_b =
- IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
+ IPU3_UAPI_ISP_VEC_ELEMS / cfg_tnr->port_b.elems;
cfg_tnr->frame_height =
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
cfg_tnr->delay_frame = IPU3_CSS_AUX_FRAMES - 1;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
cfg_tnr->frame_addr[i] =
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR]
.mem[i].daddr;
for (; i < IMGU_ABI_FRAMES_TNR; i++)
cfg_tnr->frame_addr[i] = 0;
@@ -856,9 +865,9 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
/* Configure ref dmem state parameters */
cfg = IMGU_ABI_PARAM_CLASS_STATE;
- vaddr = css->binary_params_cs[cfg - 1][m0].vaddr;
+ vaddr = css_pipe->binary_params_cs[cfg - 1][m0].vaddr;
- cfg_ref_state = ipu3_css_fw_pipeline_params(css, cfg, m0,
+ cfg_ref_state = ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.ref,
sizeof(*cfg_ref_state),
vaddr);
@@ -869,9 +878,9 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
cfg_ref_state->ref_out_buf_idx = 1;
/* Configure tnr dmem state parameters */
- if (css->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
+ if (css_pipe->pipe_id == IPU3_CSS_PIPE_ID_VIDEO) {
cfg_tnr_state =
- ipu3_css_fw_pipeline_params(css, cfg, m0,
+ ipu3_css_fw_pipeline_params(css, pipe, cfg, m0,
&sofs->dmem.tnr3,
sizeof(*cfg_tnr_state),
vaddr);
@@ -888,21 +897,22 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
/* Configure ISP stage */
- isp_stage = css->xmem_isp_stage_ptrs[pipe][stage].vaddr;
+ isp_stage = css_pipe->xmem_isp_stage_ptrs[pipe][stage].vaddr;
memset(isp_stage, 0, sizeof(*isp_stage));
isp_stage->blob_info = bi->blob;
isp_stage->binary_info = bi->info.isp.sp;
- strcpy(isp_stage->binary_name,
- (char *)css->fwp + bi->blob.prog_name_offset);
+ strscpy(isp_stage->binary_name,
+ (char *)css->fwp + bi->blob.prog_name_offset,
+ sizeof(isp_stage->binary_name));
isp_stage->mem_initializers = bi->info.isp.sp.mem_initializers;
for (i = IMGU_ABI_PARAM_CLASS_CONFIG; i < IMGU_ABI_PARAM_CLASS_NUM; i++)
for (j = 0; j < IMGU_ABI_NUM_MEMORIES; j++)
isp_stage->mem_initializers.params[i][j].address =
- css->binary_params_cs[i - 1][j].daddr;
+ css_pipe->binary_params_cs[i - 1][j].daddr;
/* Configure SP stage */
- sp_stage = css->xmem_sp_stage_ptrs[pipe][stage].vaddr;
+ sp_stage = css_pipe->xmem_sp_stage_ptrs[pipe][stage].vaddr;
memset(sp_stage, 0, sizeof(*sp_stage));
sp_stage->frames.in.buf_attr = buffer_sp_init;
@@ -918,48 +928,45 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
sp_stage->isp_copy_vf = 0;
sp_stage->isp_copy_output = 0;
- /* Enable VF output only when VF or PV queue requested by user */
-
- sp_stage->enable.vf_output =
- (css->vf_output_en != IPU3_NODE_VF_DISABLED);
+ sp_stage->enable.vf_output = css_pipe->vf_output_en;
sp_stage->frames.effective_in_res.width =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].width;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].width;
sp_stage->frames.effective_in_res.height =
- css->rect[IPU3_CSS_RECT_EFFECTIVE].height;
+ css_pipe->rect[IPU3_CSS_RECT_EFFECTIVE].height;
sp_stage->frames.in.info.res.width =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.width;
sp_stage->frames.in.info.res.height =
- css->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].fmt.mpix.height;
sp_stage->frames.in.info.padded_width =
- css->queue[IPU3_CSS_QUEUE_IN].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].width_pad;
sp_stage->frames.in.info.format =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->frame_format;
sp_stage->frames.in.info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bit_depth;
sp_stage->frames.in.info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_IN].css_fmt->bayer_order;
sp_stage->frames.in.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
sp_stage->frames.in.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_C_ID;
sp_stage->frames.in.buf_attr.buf_type =
IMGU_ABI_BUFFER_TYPE_INPUT_FRAME;
sp_stage->frames.out[0].info.res.width =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.width;
sp_stage->frames.out[0].info.res.height =
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
sp_stage->frames.out[0].info.padded_width =
- css->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
sp_stage->frames.out[0].info.format =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
sp_stage->frames.out[0].info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
sp_stage->frames.out[0].info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
sp_stage->frames.out[0].info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
sp_stage->frames.out[0].planes.nv.uv.offset =
- css->queue[IPU3_CSS_QUEUE_OUT].width_pad *
- css->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].fmt.mpix.height;
sp_stage->frames.out[0].buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_D_ID;
sp_stage->frames.out[0].buf_attr.buf_type =
IMGU_ABI_BUFFER_TYPE_OUTPUT_FRAME;
@@ -968,38 +975,38 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
IMGU_ABI_QUEUE_EVENT_ID;
sp_stage->frames.internal_frame_info.res.width =
- css->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
sp_stage->frames.internal_frame_info.res.height =
- css->rect[IPU3_CSS_RECT_BDS].height;
+ css_pipe->rect[IPU3_CSS_RECT_BDS].height;
sp_stage->frames.internal_frame_info.padded_width = bds_width_pad;
sp_stage->frames.internal_frame_info.format =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->frame_format;
sp_stage->frames.internal_frame_info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bit_depth;
sp_stage->frames.internal_frame_info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].css_fmt->bayer_order;
sp_stage->frames.internal_frame_info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
sp_stage->frames.out_vf.info.res.width =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.width;
sp_stage->frames.out_vf.info.res.height =
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
sp_stage->frames.out_vf.info.padded_width =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
sp_stage->frames.out_vf.info.format =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->frame_format;
sp_stage->frames.out_vf.info.raw_bit_depth =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bit_depth;
sp_stage->frames.out_vf.info.raw_bayer_order =
- css->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].css_fmt->bayer_order;
sp_stage->frames.out_vf.info.raw_type = IMGU_ABI_RAW_TYPE_BAYER;
sp_stage->frames.out_vf.planes.yuv.u.offset =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad *
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height;
sp_stage->frames.out_vf.planes.yuv.v.offset =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad *
- css->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height * 5 / 4;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad *
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].fmt.mpix.height * 5 / 4;
sp_stage->frames.out_vf.buf_attr.buf_src.queue_id = IMGU_ABI_QUEUE_E_ID;
sp_stage->frames.out_vf.buf_attr.buf_type =
IMGU_ABI_BUFFER_TYPE_VF_OUTPUT_FRAME;
@@ -1010,16 +1017,16 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
sp_stage->frames.dvs_buf.buf_src.queue_id = IMGU_ABI_QUEUE_G_ID;
sp_stage->frames.dvs_buf.buf_type = IMGU_ABI_BUFFER_TYPE_DIS_STATISTICS;
- sp_stage->dvs_envelope.width = css->rect[IPU3_CSS_RECT_ENVELOPE].width;
+ sp_stage->dvs_envelope.width = css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].width;
sp_stage->dvs_envelope.height =
- css->rect[IPU3_CSS_RECT_ENVELOPE].height;
+ css_pipe->rect[IPU3_CSS_RECT_ENVELOPE].height;
sp_stage->isp_pipe_version =
bi->info.isp.sp.pipeline.isp_pipe_version;
sp_stage->isp_deci_log_factor =
- clamp(max(fls(css->rect[IPU3_CSS_RECT_BDS].width /
+ clamp(max(fls(css_pipe->rect[IPU3_CSS_RECT_BDS].width /
IMGU_MAX_BQ_GRID_WIDTH),
- fls(css->rect[IPU3_CSS_RECT_BDS].height /
+ fls(css_pipe->rect[IPU3_CSS_RECT_BDS].height /
IMGU_MAX_BQ_GRID_HEIGHT)) - 1, 3, 5);
sp_stage->isp_vf_downscale_bits = 0;
sp_stage->if_config_index = 255;
@@ -1028,52 +1035,54 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
sp_stage->enable.s3a = 1;
sp_stage->enable.dvs_stats = 0;
- sp_stage->xmem_bin_addr = css->binary[css->current_binary].daddr;
- sp_stage->xmem_map_addr = css->sp_ddr_ptrs.daddr;
- sp_stage->isp_stage_addr = css->xmem_isp_stage_ptrs[pipe][stage].daddr;
+ sp_stage->xmem_bin_addr = css->binary[css_pipe->bindex].daddr;
+ sp_stage->xmem_map_addr = css_pipe->sp_ddr_ptrs.daddr;
+ sp_stage->isp_stage_addr =
+ css_pipe->xmem_isp_stage_ptrs[pipe][stage].daddr;
/* Configure SP group */
sp_group = css->xmem_sp_group_ptrs.vaddr;
- memset(sp_group, 0, sizeof(*sp_group));
-
- sp_group->pipe[thread].num_stages = 1;
- sp_group->pipe[thread].pipe_id = PIPE_ID;
- sp_group->pipe[thread].thread_id = thread;
- sp_group->pipe[thread].pipe_num = pipe;
- sp_group->pipe[thread].num_execs = -1;
- sp_group->pipe[thread].pipe_qos_config = -1;
- sp_group->pipe[thread].required_bds_factor = 0;
- sp_group->pipe[thread].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
- sp_group->pipe[thread].inout_port_config =
+ memset(&sp_group->pipe[pipe], 0, sizeof(struct imgu_abi_sp_pipeline));
+
+ sp_group->pipe[pipe].num_stages = 1;
+ sp_group->pipe[pipe].pipe_id = css_pipe->pipe_id;
+ sp_group->pipe[pipe].thread_id = pipe;
+ sp_group->pipe[pipe].pipe_num = pipe;
+ sp_group->pipe[pipe].num_execs = -1;
+ sp_group->pipe[pipe].pipe_qos_config = -1;
+ sp_group->pipe[pipe].required_bds_factor = 0;
+ sp_group->pipe[pipe].dvs_frame_delay = IPU3_CSS_AUX_FRAMES - 1;
+ sp_group->pipe[pipe].inout_port_config =
IMGU_ABI_PORT_CONFIG_TYPE_INPUT_HOST |
IMGU_ABI_PORT_CONFIG_TYPE_OUTPUT_HOST;
- sp_group->pipe[thread].scaler_pp_lut = 0;
- sp_group->pipe[thread].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
- sp_group->pipe[thread].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
- sp_group->pipe[thread].sp_stage_addr[stage] =
- css->xmem_sp_stage_ptrs[pipe][stage].daddr;
- sp_group->pipe[thread].pipe_config =
- bi->info.isp.sp.enable.params ? (1 << thread) : 0;
- sp_group->pipe[thread].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
+ sp_group->pipe[pipe].scaler_pp_lut = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_x_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].shading.internal_frame_origin_y_bqs_on_sctbl = 0;
+ sp_group->pipe[pipe].sp_stage_addr[stage] =
+ css_pipe->xmem_sp_stage_ptrs[pipe][stage].daddr;
+ sp_group->pipe[pipe].pipe_config =
+ bi->info.isp.sp.enable.params ? (1 << pipe) : 0;
+ sp_group->pipe[pipe].pipe_config |= IMGU_ABI_PIPE_CONFIG_ACQUIRE_ISP;
/* Initialize parameter pools */
- if (ipu3_css_pool_init(css->dev, &css->pool.parameter_set_info,
+ if (ipu3_css_pool_init(imgu, &css_pipe->pool.parameter_set_info,
sizeof(struct imgu_abi_parameter_set_info)) ||
- ipu3_css_pool_init(css->dev, &css->pool.acc,
+ ipu3_css_pool_init(imgu, &css_pipe->pool.acc,
sizeof(struct imgu_abi_acc_param)) ||
- ipu3_css_pool_init(css->dev, &css->pool.gdc,
+ ipu3_css_pool_init(imgu, &css_pipe->pool.gdc,
sizeof(struct imgu_abi_gdc_warp_param) *
3 * cfg_dvs->num_horizontal_blocks / 2 *
cfg_dvs->num_vertical_blocks) ||
- ipu3_css_pool_init(css->dev, &css->pool.obgrid,
+ ipu3_css_pool_init(imgu, &css_pipe->pool.obgrid,
ipu3_css_fw_obgrid_size(
- &css->fwp->binary_header[css->current_binary])))
+ &css->fwp->binary_header[css_pipe->bindex])))
goto out_of_memory;
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- if (ipu3_css_pool_init(css->dev, &css->pool.binary_params_p[i],
+ if (ipu3_css_pool_init(imgu,
+ &css_pipe->pool.binary_params_p[i],
bi->info.isp.sp.mem_initializers.params
[IMGU_ABI_PARAM_CLASS_PARAM][i].size))
goto out_of_memory;
@@ -1081,11 +1090,11 @@ static int ipu3_css_pipeline_init(struct ipu3_css *css)
return 0;
bad_firmware:
- ipu3_css_pipeline_cleanup(css);
+ ipu3_css_pipeline_cleanup(css, pipe);
return -EPROTO;
out_of_memory:
- ipu3_css_pipeline_cleanup(css);
+ ipu3_css_pipeline_cleanup(css, pipe);
return -ENOMEM;
}
@@ -1188,131 +1197,147 @@ static int ipu3_css_dequeue_data(struct ipu3_css *css, int queue, u32 *data)
}
/* Free binary-specific resources */
-static void ipu3_css_binary_cleanup(struct ipu3_css *css)
+static void ipu3_css_binary_cleanup(struct ipu3_css *css, unsigned int pipe)
{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
for (j = 0; j < IMGU_ABI_PARAM_CLASS_NUM - 1; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
- ipu3_dmamap_free(css->dev,
- &css->binary_params_cs[j][i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->binary_params_cs[j][i]);
j = IPU3_CSS_AUX_FRAME_REF;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- ipu3_dmamap_free(css->dev, &css->aux_frames[j].mem[i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
j = IPU3_CSS_AUX_FRAME_TNR;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- ipu3_dmamap_free(css->dev, &css->aux_frames[j].mem[i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->aux_frames[j].mem[i]);
}
-static int ipu3_css_binary_preallocate(struct ipu3_css *css)
+static int ipu3_css_binary_preallocate(struct ipu3_css *css, unsigned int pipe)
{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
unsigned int i, j;
- for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
- for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
- if (!ipu3_dmamap_alloc(css->dev,
- &css->binary_params_cs[j - 1][i],
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ for (j = IMGU_ABI_PARAM_CLASS_CONFIG;
+ j < IMGU_ABI_PARAM_CLASS_NUM; j++)
+ for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
CSS_ABI_SIZE))
goto out_of_memory;
- }
- j = IPU3_CSS_AUX_FRAME_REF;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (!ipu3_dmamap_alloc(css->dev, &css->aux_frames[j].mem[i],
- CSS_BDS_SIZE))
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].
+ mem[i], CSS_BDS_SIZE))
goto out_of_memory;
- j = IPU3_CSS_AUX_FRAME_TNR;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (!ipu3_dmamap_alloc(css->dev, &css->aux_frames[j].mem[i],
- CSS_GDC_SIZE))
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].
+ mem[i], CSS_GDC_SIZE))
goto out_of_memory;
return 0;
out_of_memory:
- ipu3_css_binary_cleanup(css);
+ ipu3_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
/* allocate binary-specific resources */
-static int ipu3_css_binary_setup(struct ipu3_css *css)
+static int ipu3_css_binary_setup(struct ipu3_css *css, unsigned int pipe)
{
- const struct imgu_abi_binary_info *sp =
- &css->fwp->binary_header[css->current_binary].info.isp.sp;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ struct imgu_fw_info *bi = &css->fwp->binary_header[css_pipe->bindex];
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ int i, j, size;
static const int BYPC = 2; /* Bytes per component */
- unsigned int w, h, size, i, j;
+ unsigned int w, h;
/* Allocate parameter memory blocks for this binary */
for (j = IMGU_ABI_PARAM_CLASS_CONFIG; j < IMGU_ABI_PARAM_CLASS_NUM; j++)
for (i = 0; i < IMGU_ABI_NUM_MEMORIES; i++) {
if (ipu3_css_dma_buffer_resize(
- css->dev, &css->binary_params_cs[j - 1][i],
- sp->mem_initializers.params[j][i].size))
+ imgu,
+ &css_pipe->binary_params_cs[j - 1][i],
+ bi->info.isp.sp.mem_initializers.params[j][i].size))
goto out_of_memory;
}
/* Allocate internal frame buffers */
/* Reference frames for DVS, FRAME_FORMAT_YUV420_16 */
- j = IPU3_CSS_AUX_FRAME_REF;
- css->aux_frames[j].bytesperpixel = BYPC;
- css->aux_frames[j].width = css->rect[IPU3_CSS_RECT_BDS].width;
- css->aux_frames[j].height = ALIGN(css->rect[IPU3_CSS_RECT_BDS].height,
- IMGU_DVS_BLOCK_H) +
- 2 * IMGU_GDC_BUF_Y;
- h = css->aux_frames[j].height;
- w = ALIGN(css->rect[IPU3_CSS_RECT_BDS].width,
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel = BYPC;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].width =
+ css_pipe->rect[IPU3_CSS_RECT_BDS].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height =
+ ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].height,
+ IMGU_DVS_BLOCK_H) + 2 * IMGU_GDC_BUF_Y;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].height;
+ w = ALIGN(css_pipe->rect[IPU3_CSS_RECT_BDS].width,
2 * IPU3_UAPI_ISP_VEC_ELEMS) + 2 * IMGU_GDC_BUF_X;
- css->aux_frames[j].bytesperline = css->aux_frames[j].bytesperpixel * w;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperline =
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].bytesperpixel * w;
size = w * h * BYPC + (w / 2) * (h / 2) * BYPC * 2;
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
- if (ipu3_css_dma_buffer_resize(css->dev,
- &css->aux_frames[j].mem[i],
- size))
+ if (ipu3_css_dma_buffer_resize(
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_REF].mem[i],
+ size))
goto out_of_memory;
/* TNR frames for temporal noise reduction, FRAME_FORMAT_YUV_LINE */
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
- roundup(css->rect[IPU3_CSS_RECT_GDC].width,
- sp->block.block_width *
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperpixel = 1;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].width,
+ bi->info.isp.sp.block.block_width *
IPU3_UAPI_ISP_VEC_ELEMS);
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
- roundup(css->rect[IPU3_CSS_RECT_GDC].height,
- sp->block.output_block_height);
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height =
+ roundup(css_pipe->rect[IPU3_CSS_RECT_GDC].height,
+ bi->info.isp.sp.block.output_block_height);
- w = css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
- css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
- h = css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
+ w = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].width;
+ css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].bytesperline = w;
+ h = css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].height;
size = w * ALIGN(h * 3 / 2 + 3, 2); /* +3 for vf_pp prefetch */
for (i = 0; i < IPU3_CSS_AUX_FRAMES; i++)
if (ipu3_css_dma_buffer_resize(
- css->dev,
- &css->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i], size))
+ imgu,
+ &css_pipe->aux_frames[IPU3_CSS_AUX_FRAME_TNR].mem[i],
+ size))
goto out_of_memory;
return 0;
out_of_memory:
- ipu3_css_binary_cleanup(css);
+ ipu3_css_binary_cleanup(css, pipe);
return -ENOMEM;
}
int ipu3_css_start_streaming(struct ipu3_css *css)
{
u32 data;
- int r;
+ int r, pipe;
if (css->streaming)
return -EPROTO;
- r = ipu3_css_binary_setup(css);
- if (r < 0)
- return r;
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_binary_setup(css, pipe);
+ if (r < 0)
+ return r;
+ }
r = ipu3_css_hw_init(css);
if (r < 0)
@@ -1322,19 +1347,22 @@ int ipu3_css_start_streaming(struct ipu3_css *css)
if (r < 0)
goto fail;
- r = ipu3_css_pipeline_init(css);
- if (r < 0)
- goto fail;
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_pipeline_init(css, pipe);
+ if (r < 0)
+ goto fail;
+ }
css->streaming = true;
- css->frame = 0;
ipu3_css_hw_enable_irq(css);
/* Initialize parameters to default */
- r = ipu3_css_set_parameters(css, NULL);
- if (r < 0)
- goto fail;
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_set_parameters(css, pipe, NULL);
+ if (r < 0)
+ goto fail;
+ }
while (!(r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_A_ID, &data)))
;
@@ -1346,18 +1374,23 @@ int ipu3_css_start_streaming(struct ipu3_css *css)
if (r != -EBUSY)
goto fail;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, 0,
- IMGU_ABI_EVENT_START_STREAM);
- if (r < 0)
- goto fail;
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_START_STREAM |
+ pipe << 16);
+ if (r < 0)
+ goto fail;
+ }
return 0;
fail:
css->streaming = false;
ipu3_css_hw_cleanup(css);
- ipu3_css_pipeline_cleanup(css);
- ipu3_css_binary_cleanup(css);
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ ipu3_css_pipeline_cleanup(css, pipe);
+ ipu3_css_binary_cleanup(css, pipe);
+ }
return r;
}
@@ -1365,13 +1398,14 @@ fail:
void ipu3_css_stop_streaming(struct ipu3_css *css)
{
struct ipu3_css_buffer *b, *b0;
- int q, r;
+ int q, r, pipe;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, 0,
- IMGU_ABI_EVENT_STOP_STREAM);
-
- if (r < 0)
- dev_warn(css->dev, "failed on stop stream event\n");
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_STOP_STREAM);
+ if (r < 0)
+ dev_warn(css->dev, "failed on stop stream event\n");
+ }
if (!css->streaming)
return;
@@ -1380,126 +1414,173 @@ void ipu3_css_stop_streaming(struct ipu3_css *css)
ipu3_css_hw_cleanup(css);
- ipu3_css_pipeline_cleanup(css);
+ for_each_set_bit(pipe, css->enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
- spin_lock(&css->qlock);
- for (q = 0; q < IPU3_CSS_QUEUES; q++)
- list_for_each_entry_safe(b, b0, &css->queue[q].bufs, list) {
- b->state = IPU3_CSS_BUFFER_FAILED;
- list_del(&b->list);
- }
- spin_unlock(&css->qlock);
+ ipu3_css_pipeline_cleanup(css, pipe);
+
+ spin_lock(&css_pipe->qlock);
+ for (q = 0; q < IPU3_CSS_QUEUES; q++)
+ list_for_each_entry_safe(b, b0,
+ &css_pipe->queue[q].bufs,
+ list) {
+ b->state = IPU3_CSS_BUFFER_FAILED;
+ list_del(&b->list);
+ }
+ spin_unlock(&css_pipe->qlock);
+ }
css->streaming = false;
}
-bool ipu3_css_queue_empty(struct ipu3_css *css)
+bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe)
{
int q;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
- spin_lock(&css->qlock);
+ spin_lock(&css_pipe->qlock);
for (q = 0; q < IPU3_CSS_QUEUES; q++)
- if (!list_empty(&css->queue[q].bufs))
+ if (!list_empty(&css_pipe->queue[q].bufs))
break;
- spin_unlock(&css->qlock);
-
+ spin_unlock(&css_pipe->qlock);
return (q == IPU3_CSS_QUEUES);
}
+bool ipu3_css_queue_empty(struct ipu3_css *css)
+{
+ unsigned int pipe;
+ bool ret = 0;
+
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ ret &= ipu3_css_pipe_queue_empty(css, pipe);
+
+ return ret;
+}
+
bool ipu3_css_is_streaming(struct ipu3_css *css)
{
return css->streaming;
}
-void ipu3_css_cleanup(struct ipu3_css *css)
+static int ipu3_css_map_init(struct ipu3_css *css, unsigned int pipe)
{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
unsigned int p, q, i;
- ipu3_css_stop_streaming(css);
- ipu3_css_binary_cleanup(css);
+ /* Allocate and map common structures with imgu hardware */
+ for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
+ for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->
+ xmem_sp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_sp_stage)))
+ return -ENOMEM;
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->
+ xmem_isp_stage_ptrs[p][i],
+ sizeof(struct imgu_abi_isp_stage)))
+ return -ENOMEM;
+ }
- for (q = 0; q < IPU3_CSS_QUEUES; q++)
- for (i = 0; i < ARRAY_SIZE(css->abi_buffers[q]); i++)
- ipu3_dmamap_free(css->dev, &css->abi_buffers[q][i]);
+ if (!ipu3_dmamap_alloc(imgu, &css_pipe->sp_ddr_ptrs,
+ ALIGN(sizeof(struct imgu_abi_ddr_address_map),
+ IMGU_ABI_ISP_DDR_WORD_BYTES)))
+ return -ENOMEM;
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+
+ for (i = 0; i < abi_buf_num; i++)
+ if (!ipu3_dmamap_alloc(imgu,
+ &css_pipe->abi_buffers[q][i],
+ sizeof(struct imgu_abi_buffer)))
+ return -ENOMEM;
+ }
+
+ if (ipu3_css_binary_preallocate(css, pipe)) {
+ ipu3_css_binary_cleanup(css, pipe);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ipu3_css_pipe_cleanup(struct ipu3_css *css, unsigned int pipe)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ unsigned int p, q, i, abi_buf_num;
+
+ ipu3_css_binary_cleanup(css, pipe);
+
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]);
+ for (i = 0; i < abi_buf_num; i++)
+ ipu3_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]);
+ }
for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
- ipu3_dmamap_free(css->dev,
- &css->xmem_sp_stage_ptrs[p][i]);
- ipu3_dmamap_free(css->dev,
- &css->xmem_isp_stage_ptrs[p][i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->xmem_sp_stage_ptrs[p][i]);
+ ipu3_dmamap_free(imgu,
+ &css_pipe->xmem_isp_stage_ptrs[p][i]);
}
- ipu3_dmamap_free(css->dev, &css->sp_ddr_ptrs);
- ipu3_dmamap_free(css->dev, &css->xmem_sp_group_ptrs);
+ ipu3_dmamap_free(imgu, &css_pipe->sp_ddr_ptrs);
+}
+
+void ipu3_css_cleanup(struct ipu3_css *css)
+{
+ struct imgu_device *imgu = dev_get_drvdata(css->dev);
+ unsigned int pipe;
+ ipu3_css_stop_streaming(css);
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++)
+ ipu3_css_pipe_cleanup(css, pipe);
+ ipu3_dmamap_free(imgu, &css->xmem_sp_group_ptrs);
ipu3_css_fw_cleanup(css);
}
int ipu3_css_init(struct device *dev, struct ipu3_css *css,
void __iomem *base, int length)
{
- int r, p, q, i;
+ struct imgu_device *imgu = dev_get_drvdata(dev);
+ int r, q, pipe;
/* Initialize main data structure */
css->dev = dev;
css->base = base;
css->iomem_length = length;
- css->current_binary = IPU3_CSS_DEFAULT_BINARY;
- css->pipe_id = IPU3_CSS_PIPE_ID_NUM;
- css->vf_output_en = IPU3_NODE_VF_DISABLED;
- spin_lock_init(&css->qlock);
- for (q = 0; q < IPU3_CSS_QUEUES; q++) {
- r = ipu3_css_queue_init(&css->queue[q], NULL, 0);
- if (r)
+ for (pipe = 0; pipe < IMGU_MAX_PIPE_NUM; pipe++) {
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+
+ css_pipe->vf_output_en = false;
+ spin_lock_init(&css_pipe->qlock);
+ css_pipe->bindex = IPU3_CSS_DEFAULT_BINARY;
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ for (q = 0; q < IPU3_CSS_QUEUES; q++) {
+ r = ipu3_css_queue_init(&css_pipe->queue[q], NULL, 0);
+ if (r)
+ return r;
+ }
+ r = ipu3_css_map_init(css, pipe);
+ if (r) {
+ ipu3_css_cleanup(css);
return r;
+ }
}
+ if (!ipu3_dmamap_alloc(imgu, &css->xmem_sp_group_ptrs,
+ sizeof(struct imgu_abi_sp_group)))
+ return -ENOMEM;
r = ipu3_css_fw_init(css);
if (r)
return r;
- /* Allocate and map common structures with imgu hardware */
-
- for (p = 0; p < IPU3_CSS_PIPE_ID_NUM; p++)
- for (i = 0; i < IMGU_ABI_MAX_STAGES; i++) {
- if (!ipu3_dmamap_alloc(dev,
- &css->xmem_sp_stage_ptrs[p][i],
- sizeof(struct imgu_abi_sp_stage)))
- goto error_no_memory;
- if (!ipu3_dmamap_alloc(dev,
- &css->xmem_isp_stage_ptrs[p][i],
- sizeof(struct imgu_abi_isp_stage)))
- goto error_no_memory;
- }
-
- if (!ipu3_dmamap_alloc(dev, &css->sp_ddr_ptrs,
- ALIGN(sizeof(struct imgu_abi_ddr_address_map),
- IMGU_ABI_ISP_DDR_WORD_BYTES)))
- goto error_no_memory;
-
- if (!ipu3_dmamap_alloc(dev, &css->xmem_sp_group_ptrs,
- sizeof(struct imgu_abi_sp_group)))
- goto error_no_memory;
-
- for (q = 0; q < IPU3_CSS_QUEUES; q++)
- for (i = 0; i < ARRAY_SIZE(css->abi_buffers[q]); i++)
- if (!ipu3_dmamap_alloc(dev, &css->abi_buffers[q][i],
- sizeof(struct imgu_abi_buffer)))
- goto error_no_memory;
-
- if (ipu3_css_binary_preallocate(css))
- goto error_binary_setup;
-
return 0;
-
-error_binary_setup:
- ipu3_css_binary_cleanup(css);
-error_no_memory:
- ipu3_css_cleanup(css);
-
- return -ENOMEM;
}
static u32 ipu3_css_adjust(u32 res, u32 align)
@@ -1511,11 +1592,13 @@ static u32 ipu3_css_adjust(u32 res, u32 align)
/* Select a binary matching the required resolutions and formats */
static int ipu3_css_find_binary(struct ipu3_css *css,
+ unsigned int pipe,
struct ipu3_css_queue queue[IPU3_CSS_QUEUES],
struct v4l2_rect rects[IPU3_CSS_RECTS])
{
const int binary_nr = css->fwp->file_header.binary_nr;
- unsigned int binary_mode = (css->pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
+ unsigned int binary_mode =
+ (css->pipes[pipe].pipe_id == IPU3_CSS_PIPE_ID_CAPTURE) ?
IA_CSS_BINARY_MODE_PRIMARY : IA_CSS_BINARY_MODE_VIDEO;
const struct v4l2_pix_format_mplane *in =
&queue[IPU3_CSS_QUEUE_IN].fmt.mpix;
@@ -1616,7 +1699,8 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
}
/* All checks passed, select the binary */
- dev_dbg(css->dev, "using binary %s\n", name);
+ dev_dbg(css->dev, "using binary %s id = %u\n", name,
+ bi->info.isp.sp.id);
return i;
}
@@ -1633,7 +1717,8 @@ static int ipu3_css_find_binary(struct ipu3_css *css,
*/
int ipu3_css_fmt_try(struct ipu3_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
- struct v4l2_rect *rects[IPU3_CSS_RECTS])
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
{
static const u32 EFF_ALIGN_W = 2;
static const u32 BDS_ALIGN_W = 4;
@@ -1665,13 +1750,7 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
&q[IPU3_CSS_QUEUE_OUT].fmt.mpix;
struct v4l2_pix_format_mplane *const vf =
&q[IPU3_CSS_QUEUE_VF].fmt.mpix;
- int binary, i, s;
-
- /* Decide which pipe to use */
- if (css->vf_output_en == IPU3_NODE_PV_ENABLED)
- css->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
- else if (css->vf_output_en == IPU3_NODE_VF_ENABLED)
- css->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ int i, s;
/* Adjust all formats, get statistics buffer sizes and formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
@@ -1706,9 +1785,8 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
/* Always require one input and vf only if out is also enabled */
if (!ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) ||
- (ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_VF]) &&
- !ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT]))) {
- dev_dbg(css->dev, "required queues are disabled\n");
+ !ipu3_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) {
+ dev_warn(css->dev, "required queues are disabled\n");
return -EINVAL;
}
@@ -1742,17 +1820,21 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
vf->width = ipu3_css_adjust(vf->width, VF_ALIGN_W);
vf->height = ipu3_css_adjust(vf->height, 1);
- s = (bds->width - gdc->width) / 2 - FILTER_SIZE;
+ s = (bds->width - gdc->width) / 2;
env->width = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
- s = (bds->height - gdc->height) / 2 - FILTER_SIZE;
+ s = (bds->height - gdc->height) / 2;
env->height = s < MIN_ENVELOPE ? MIN_ENVELOPE : s;
- binary = ipu3_css_find_binary(css, q, r);
- if (binary < 0) {
+ css->pipes[pipe].bindex =
+ ipu3_css_find_binary(css, pipe, q, r);
+ if (css->pipes[pipe].bindex < 0) {
dev_err(css->dev, "failed to find suitable binary\n");
return -EINVAL;
}
+ dev_dbg(css->dev, "Binary index %d for pipe %d found.",
+ css->pipes[pipe].bindex, pipe);
+
/* Final adjustment and set back the queried formats */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
if (fmts[i]) {
@@ -1776,16 +1858,18 @@ int ipu3_css_fmt_try(struct ipu3_css *css,
bds->width, bds->height, gdc->width, gdc->height,
out->width, out->height, vf->width, vf->height);
- return binary;
+ return 0;
}
int ipu3_css_fmt_set(struct ipu3_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
- struct v4l2_rect *rects[IPU3_CSS_RECTS])
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe)
{
struct v4l2_rect rect_data[IPU3_CSS_RECTS];
struct v4l2_rect *all_rects[IPU3_CSS_RECTS];
int i, r;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
for (i = 0; i < IPU3_CSS_RECTS; i++) {
if (rects[i])
@@ -1794,17 +1878,16 @@ int ipu3_css_fmt_set(struct ipu3_css *css,
memset(&rect_data[i], 0, sizeof(rect_data[i]));
all_rects[i] = &rect_data[i];
}
- r = ipu3_css_fmt_try(css, fmts, all_rects);
+ r = ipu3_css_fmt_try(css, fmts, all_rects, pipe);
if (r < 0)
return r;
- css->current_binary = (unsigned int)r;
for (i = 0; i < IPU3_CSS_QUEUES; i++)
- if (ipu3_css_queue_init(&css->queue[i], fmts[i],
+ if (ipu3_css_queue_init(&css_pipe->queue[i], fmts[i],
IPU3_CSS_QUEUE_TO_FLAGS(i)))
return -EINVAL;
for (i = 0; i < IPU3_CSS_RECTS; i++) {
- css->rect[i] = rect_data[i];
+ css_pipe->rect[i] = rect_data[i];
if (rects[i])
*rects[i] = rect_data[i];
}
@@ -1834,13 +1917,14 @@ int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt)
* Returns 0 on success, -EBUSY if the buffer queue is full, or some other
* code on error conditions.
*/
-int ipu3_css_buf_queue(struct ipu3_css *css, struct ipu3_css_buffer *b)
+int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_css_buffer *b)
{
- static const int thread;
struct imgu_abi_buffer *abi_buf;
struct imgu_addr_t *buf_addr;
u32 data;
int r;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
if (!css->streaming)
return -EPROTO; /* CSS or buffer in wrong state */
@@ -1849,11 +1933,11 @@ int ipu3_css_buf_queue(struct ipu3_css *css, struct ipu3_css_buffer *b)
return -EINVAL;
b->queue_pos = ipu3_css_queue_pos(css, ipu3_css_queues[b->queue].qid,
- thread);
+ pipe);
- if (b->queue_pos >= ARRAY_SIZE(css->abi_buffers[b->queue]))
+ if (b->queue_pos >= ARRAY_SIZE(css->pipes[pipe].abi_buffers[b->queue]))
return -EIO;
- abi_buf = css->abi_buffers[b->queue][b->queue_pos].vaddr;
+ abi_buf = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].vaddr;
/* Fill struct abi_buffer for firmware */
memset(abi_buf, 0, sizeof(*abi_buf));
@@ -1866,30 +1950,31 @@ int ipu3_css_buf_queue(struct ipu3_css *css, struct ipu3_css_buffer *b)
if (b->queue == IPU3_CSS_QUEUE_OUT)
abi_buf->payload.frame.padded_width =
- css->queue[IPU3_CSS_QUEUE_OUT].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_OUT].width_pad;
if (b->queue == IPU3_CSS_QUEUE_VF)
abi_buf->payload.frame.padded_width =
- css->queue[IPU3_CSS_QUEUE_VF].width_pad;
+ css_pipe->queue[IPU3_CSS_QUEUE_VF].width_pad;
- spin_lock(&css->qlock);
- list_add_tail(&b->list, &css->queue[b->queue].bufs);
- spin_unlock(&css->qlock);
+ spin_lock(&css_pipe->qlock);
+ list_add_tail(&b->list, &css_pipe->queue[b->queue].bufs);
+ spin_unlock(&css_pipe->qlock);
b->state = IPU3_CSS_BUFFER_QUEUED;
- data = css->abi_buffers[b->queue][b->queue_pos].daddr;
+ data = css->pipes[pipe].abi_buffers[b->queue][b->queue_pos].daddr;
r = ipu3_css_queue_data(css, ipu3_css_queues[b->queue].qid,
- thread, data);
+ pipe, data);
if (r < 0)
goto queueing_failed;
- data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread,
+ data = IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
ipu3_css_queues[b->queue].qid);
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, 0, data);
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe, data);
if (r < 0)
goto queueing_failed;
- dev_dbg(css->dev, "queued buffer %p to css queue %i\n", b, b->queue);
+ dev_dbg(css->dev, "queued buffer %p to css queue %i in pipe %d\n",
+ b, b->queue, pipe);
return 0;
@@ -1908,7 +1993,6 @@ queueing_failed:
*/
struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
{
- static const int thread;
static const unsigned char evtype_to_queue[] = {
[IMGU_ABI_EVTTYPE_INPUT_FRAME_DONE] = IPU3_CSS_QUEUE_IN,
[IMGU_ABI_EVTTYPE_OUT_FRAME_DONE] = IPU3_CSS_QUEUE_OUT,
@@ -1918,6 +2002,7 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
struct ipu3_css_buffer *b = ERR_PTR(-EAGAIN);
u32 event, daddr;
int evtype, pipe, pipeid, queue, qid, r;
+ struct ipu3_css_pipe *css_pipe;
if (!css->streaming)
return ERR_PTR(-EPROTO);
@@ -1941,11 +2026,16 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
queue = evtype_to_queue[evtype];
qid = ipu3_css_queues[queue].qid;
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
+
if (qid >= IMGU_ABI_QUEUE_NUM) {
dev_err(css->dev, "Invalid qid: %i\n", qid);
return ERR_PTR(-EIO);
}
-
+ css_pipe = &css->pipes[pipe];
dev_dbg(css->dev,
"event: buffer done 0x%x queue %i pipe %i pipeid %i\n",
event, queue, pipe, pipeid);
@@ -1957,39 +2047,46 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
return ERR_PTR(-EIO);
}
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, thread,
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED(qid));
if (r < 0) {
dev_err(css->dev, "failed to queue event\n");
return ERR_PTR(-EIO);
}
- spin_lock(&css->qlock);
- if (list_empty(&css->queue[queue].bufs)) {
- spin_unlock(&css->qlock);
+ spin_lock(&css_pipe->qlock);
+ if (list_empty(&css_pipe->queue[queue].bufs)) {
+ spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "event on empty queue\n");
return ERR_PTR(-EIO);
}
- b = list_first_entry(&css->queue[queue].bufs,
+ b = list_first_entry(&css_pipe->queue[queue].bufs,
struct ipu3_css_buffer, list);
if (queue != b->queue ||
- daddr != css->abi_buffers[b->queue][b->queue_pos].daddr) {
- spin_unlock(&css->qlock);
+ daddr != css_pipe->abi_buffers
+ [b->queue][b->queue_pos].daddr) {
+ spin_unlock(&css_pipe->qlock);
dev_err(css->dev, "dequeued bad buffer 0x%x\n", daddr);
return ERR_PTR(-EIO);
}
+
+ dev_dbg(css->dev, "buffer 0x%8x done from pipe %d\n", daddr, pipe);
+ b->pipe = pipe;
b->state = IPU3_CSS_BUFFER_DONE;
list_del(&b->list);
- spin_unlock(&css->qlock);
+ spin_unlock(&css_pipe->qlock);
break;
case IMGU_ABI_EVTTYPE_PIPELINE_DONE:
- dev_dbg(css->dev, "event: pipeline done 0x%x for frame %ld\n",
- event, css->frame);
+ pipe = (event & IMGU_ABI_EVTTYPE_PIPE_MASK) >>
+ IMGU_ABI_EVTTYPE_PIPE_SHIFT;
+ if (pipe >= IMGU_MAX_PIPE_NUM) {
+ dev_err(css->dev, "Invalid pipe: %i\n", pipe);
+ return ERR_PTR(-EIO);
+ }
- if (css->frame == LONG_MAX)
- css->frame = 0;
- else
- css->frame++;
+ css_pipe = &css->pipes[pipe];
+ dev_dbg(css->dev, "event: pipeline done 0x%8x for pipe %d\n",
+ event, pipe);
break;
case IMGU_ABI_EVTTYPE_TIMER:
r = ipu3_css_dequeue_data(css, IMGU_ABI_QUEUE_EVENT_ID, &event);
@@ -2030,11 +2127,12 @@ struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css)
* Return index to css->parameter_set_info which has the newly created
* parameters or negative value on error.
*/
-int ipu3_css_set_parameters(struct ipu3_css *css,
+int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params)
{
static const unsigned int queue_id = IMGU_ABI_QUEUE_A_ID;
- const int stage = 0, thread = 0;
+ struct ipu3_css_pipe *css_pipe = &css->pipes[pipe];
+ const int stage = 0;
const struct imgu_fw_info *bi;
int obgrid_size;
unsigned int stripes, i;
@@ -2055,58 +2153,57 @@ int ipu3_css_set_parameters(struct ipu3_css *css,
if (!css->streaming)
return -EPROTO;
- bi = &css->fwp->binary_header[css->current_binary];
+ dev_dbg(css->dev, "%s for pipe %d", __func__, pipe);
+
+ bi = &css->fwp->binary_header[css_pipe->bindex];
obgrid_size = ipu3_css_fw_obgrid_size(bi);
stripes = bi->info.isp.sp.iterator.num_stripes ? : 1;
/*
- * Check that we can get a new parameter_set_info from the pool.
- * If this succeeds, then all of the other pool_get() calls below
- * should also succeed.
+ * TODO(b/118782861): If userspace queues more than 4 buffers, the
+ * parameters from previous buffers will be overwritten. Fix the driver
+ * not to allow this.
*/
- if (ipu3_css_pool_get(&css->pool.parameter_set_info, css->frame) < 0)
- goto fail_no_put;
- param_set = ipu3_css_pool_last(&css->pool.parameter_set_info, 0)->vaddr;
+ ipu3_css_pool_get(&css_pipe->pool.parameter_set_info);
+ param_set = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info,
+ 0)->vaddr;
- map = ipu3_css_pool_last(&css->pool.acc, 0);
/* Get a new acc only if new parameters given, or none yet */
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
if (set_params || !map->vaddr) {
- if (ipu3_css_pool_get(&css->pool.acc, css->frame) < 0)
- goto fail;
- map = ipu3_css_pool_last(&css->pool.acc, 0);
+ ipu3_css_pool_get(&css_pipe->pool.acc);
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
acc = map->vaddr;
}
/* Get new VMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_VMEM0;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params ||
set_params->use.tnr3_vmem_params ||
set_params->use.xnr3_vmem_params))) {
- if (ipu3_css_pool_get(&css->pool.binary_params_p[m],
- css->frame) < 0)
- goto fail;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 0);
+ ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
vmem0 = map->vaddr;
}
/* Get new DMEM0 only if needed, or none yet */
m = IMGU_ABI_MEM_ISP_DMEM0;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params ||
set_params->use.xnr3_dmem_params))) {
- if (ipu3_css_pool_get(&css->pool.binary_params_p[m],
- css->frame) < 0)
- goto fail;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 0);
+ ipu3_css_pool_get(&css_pipe->pool.binary_params_p[m]);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
dmem0 = map->vaddr;
}
/* Configure acc parameter cluster */
if (acc) {
- map = ipu3_css_pool_last(&css->pool.acc, 1);
- r = ipu3_css_cfg_acc(css, use, acc, map->vaddr, set_params ?
- &set_params->acc_param : NULL);
+ /* get acc_old */
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 1);
+ /* user acc */
+ r = ipu3_css_cfg_acc(css, pipe, use, acc, map->vaddr,
+ set_params ? &set_params->acc_param : NULL);
if (r < 0)
goto fail;
}
@@ -2114,16 +2211,18 @@ int ipu3_css_set_parameters(struct ipu3_css *css,
/* Configure late binding parameters */
if (vmem0) {
m = IMGU_ABI_MEM_ISP_VMEM0;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 1);
- r = ipu3_css_cfg_vmem0(css, use, vmem0, map->vaddr, set_params);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = ipu3_css_cfg_vmem0(css, pipe, use, vmem0,
+ map->vaddr, set_params);
if (r < 0)
goto fail;
}
if (dmem0) {
m = IMGU_ABI_MEM_ISP_DMEM0;
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 1);
- r = ipu3_css_cfg_dmem0(css, use, dmem0, map->vaddr, set_params);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 1);
+ r = ipu3_css_cfg_dmem0(css, pipe, use, dmem0,
+ map->vaddr, set_params);
if (r < 0)
goto fail;
}
@@ -2134,30 +2233,27 @@ int ipu3_css_set_parameters(struct ipu3_css *css,
unsigned int g = IPU3_CSS_RECT_GDC;
unsigned int e = IPU3_CSS_RECT_ENVELOPE;
- map = ipu3_css_pool_last(&css->pool.gdc, 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
if (!map->vaddr) {
- if (ipu3_css_pool_get(&css->pool.gdc, css->frame) < 0)
- goto fail;
- map = ipu3_css_pool_last(&css->pool.gdc, 0);
+ ipu3_css_pool_get(&css_pipe->pool.gdc);
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
gdc = map->vaddr;
- ipu3_css_cfg_gdc_table(gdc,
- css->aux_frames[a].bytesperline /
- css->aux_frames[a].bytesperpixel,
- css->aux_frames[a].height,
- css->rect[g].width,
- css->rect[g].height,
- css->rect[e].width + FILTER_SIZE,
- css->rect[e].height +
- FILTER_SIZE);
+ ipu3_css_cfg_gdc_table(map->vaddr,
+ css_pipe->aux_frames[a].bytesperline /
+ css_pipe->aux_frames[a].bytesperpixel,
+ css_pipe->aux_frames[a].height,
+ css_pipe->rect[g].width,
+ css_pipe->rect[g].height,
+ css_pipe->rect[e].width,
+ css_pipe->rect[e].height);
}
}
/* Get a new obgrid only if a new obgrid is given, or none yet */
- map = ipu3_css_pool_last(&css->pool.obgrid, 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
if (!map->vaddr || (set_params && set_params->use.obgrid_param)) {
- if (ipu3_css_pool_get(&css->pool.obgrid, css->frame) < 0)
- goto fail;
- map = ipu3_css_pool_last(&css->pool.obgrid, 0);
+ ipu3_css_pool_get(&css_pipe->pool.obgrid);
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
obgrid = map->vaddr;
/* Configure optical black level grid (obgrid) */
@@ -2171,29 +2267,31 @@ int ipu3_css_set_parameters(struct ipu3_css *css,
/* Configure parameter set info, queued to `queue_id' */
memset(param_set, 0, sizeof(*param_set));
- map = ipu3_css_pool_last(&css->pool.acc, 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.acc, 0);
param_set->mem_map.acc_cluster_params_for_sp = map->daddr;
- map = ipu3_css_pool_last(&css->pool.gdc, 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.gdc, 0);
param_set->mem_map.dvs_6axis_params_y = map->daddr;
- map = ipu3_css_pool_last(&css->pool.obgrid, 0);
- for (i = 0; i < stripes; i++)
+ for (i = 0; i < stripes; i++) {
+ map = ipu3_css_pool_last(&css_pipe->pool.obgrid, 0);
param_set->mem_map.obgrid_tbl[i] =
- map->daddr + (obgrid_size / stripes) * i;
+ map->daddr + (obgrid_size / stripes) * i;
+ }
for (m = 0; m < IMGU_ABI_NUM_MEMORIES; m++) {
- map = ipu3_css_pool_last(&css->pool.binary_params_p[m], 0);
+ map = ipu3_css_pool_last(&css_pipe->pool.binary_params_p[m], 0);
param_set->mem_map.isp_mem_param[stage][m] = map->daddr;
}
+
/* Then queue the new parameter buffer */
- map = ipu3_css_pool_last(&css->pool.parameter_set_info, 0);
- r = ipu3_css_queue_data(css, queue_id, thread, map->daddr);
+ map = ipu3_css_pool_last(&css_pipe->pool.parameter_set_info, 0);
+ r = ipu3_css_queue_data(css, queue_id, pipe, map->daddr);
if (r < 0)
goto fail;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, 0,
- IMGU_ABI_EVENT_BUFFER_ENQUEUED(thread,
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
+ IMGU_ABI_EVENT_BUFFER_ENQUEUED(pipe,
queue_id));
if (r < 0)
goto fail_no_put;
@@ -2208,7 +2306,7 @@ int ipu3_css_set_parameters(struct ipu3_css *css,
break;
if (r)
goto fail_no_put;
- r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, thread,
+ r = ipu3_css_queue_data(css, IMGU_ABI_QUEUE_EVENT_ID, pipe,
IMGU_ABI_EVENT_BUFFER_DEQUEUED
(queue_id));
if (r < 0) {
@@ -2226,19 +2324,21 @@ fail:
* parameters again later.
*/
- ipu3_css_pool_put(&css->pool.parameter_set_info);
+ ipu3_css_pool_put(&css_pipe->pool.parameter_set_info);
if (acc)
- ipu3_css_pool_put(&css->pool.acc);
+ ipu3_css_pool_put(&css_pipe->pool.acc);
if (gdc)
- ipu3_css_pool_put(&css->pool.gdc);
+ ipu3_css_pool_put(&css_pipe->pool.gdc);
if (obgrid)
- ipu3_css_pool_put(&css->pool.obgrid);
+ ipu3_css_pool_put(&css_pipe->pool.obgrid);
if (vmem0)
ipu3_css_pool_put(
- &css->pool.binary_params_p[IMGU_ABI_MEM_ISP_VMEM0]);
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_VMEM0]);
if (dmem0)
ipu3_css_pool_put(
- &css->pool.binary_params_p[IMGU_ABI_MEM_ISP_DMEM0]);
+ &css_pipe->pool.binary_params_p
+ [IMGU_ABI_MEM_ISP_DMEM0]);
fail_no_put:
return r;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-css.h b/drivers/media/pci/intel/ipu3/ipu3-css.h
index 44081c09b02b08..8580b9dde23594 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-css.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-css.h
@@ -13,6 +13,7 @@
/* 2 stages for split isp pipeline, 1 for scaling */
#define IMGU_NUM_SP 2
#define IMGU_MAX_PIPELINE_NUM 20
+#define IMGU_MAX_PIPE_NUM 2
/* For DVS etc., format FRAME_FMT_YUV420_16 */
#define IPU3_CSS_AUX_FRAME_REF 0
@@ -57,12 +58,6 @@ struct ipu3_css_resolution {
u32 h;
};
-enum ipu3_css_vf_status {
- IPU3_NODE_VF_ENABLED,
- IPU3_NODE_PV_ENABLED,
- IPU3_NODE_VF_DISABLED
-};
-
enum ipu3_css_buffer_state {
IPU3_CSS_BUFFER_NEW, /* Not yet queued */
IPU3_CSS_BUFFER_QUEUED, /* Queued, waiting to be filled */
@@ -77,6 +72,7 @@ struct ipu3_css_buffer {
enum ipu3_css_buffer_state state;
struct list_head list;
u8 queue_pos;
+ unsigned int pipe;
};
struct ipu3_css_format {
@@ -104,27 +100,23 @@ struct ipu3_css_queue {
struct list_head bufs;
};
-/* IPU3 Camera Sub System structure */
-struct ipu3_css {
- struct device *dev;
- void __iomem *base;
- const struct firmware *fw;
- struct imgu_fw_header *fwp;
- int iomem_length;
- int fw_bl, fw_sp[IMGU_NUM_SP]; /* Indices of bl and SP binaries */
- struct ipu3_css_map *binary; /* fw binaries mapped to device */
- unsigned int current_binary; /* Currently selected binary */
- bool streaming; /* true when streaming is enabled */
- long frame; /* Latest frame not yet processed */
- enum ipu3_css_pipe_id pipe_id; /* CSS pipe ID. */
+struct ipu3_css_pipe {
+ enum ipu3_css_pipe_id pipe_id;
+ unsigned int bindex;
+
+ struct ipu3_css_queue queue[IPU3_CSS_QUEUES];
+ struct v4l2_rect rect[IPU3_CSS_RECTS];
+
+ bool vf_output_en;
+
+ spinlock_t qlock;
/* Data structures shared with IMGU and driver, always allocated */
+ struct ipu3_css_map sp_ddr_ptrs;
struct ipu3_css_map xmem_sp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
[IMGU_ABI_MAX_STAGES];
struct ipu3_css_map xmem_isp_stage_ptrs[IPU3_CSS_PIPE_ID_NUM]
[IMGU_ABI_MAX_STAGES];
- struct ipu3_css_map sp_ddr_ptrs;
- struct ipu3_css_map xmem_sp_group_ptrs;
/*
* Data structures shared with IMGU and driver, binary specific.
@@ -141,11 +133,6 @@ struct ipu3_css {
unsigned int bytesperpixel;
} aux_frames[IPU3_CSS_AUX_FRAME_TYPES];
- struct ipu3_css_queue queue[IPU3_CSS_QUEUES];
- struct v4l2_rect rect[IPU3_CSS_RECTS];
- struct ipu3_css_map abi_buffers[IPU3_CSS_QUEUES]
- [IMGU_ABI_HOST2SP_BUFQ_SIZE];
-
struct {
struct ipu3_css_pool parameter_set_info;
struct ipu3_css_pool acc;
@@ -155,9 +142,26 @@ struct ipu3_css {
struct ipu3_css_pool binary_params_p[IMGU_ABI_NUM_MEMORIES];
} pool;
- enum ipu3_css_vf_status vf_output_en;
- /* Protect access to css->queue[] */
- spinlock_t qlock;
+ struct ipu3_css_map abi_buffers[IPU3_CSS_QUEUES]
+ [IMGU_ABI_HOST2SP_BUFQ_SIZE];
+};
+
+/* IPU3 Camera Sub System structure */
+struct ipu3_css {
+ struct device *dev;
+ void __iomem *base;
+ const struct firmware *fw;
+ struct imgu_fw_header *fwp;
+ int iomem_length;
+ int fw_bl, fw_sp[IMGU_NUM_SP]; /* Indices of bl and SP binaries */
+ struct ipu3_css_map *binary; /* fw binaries mapped to device */
+ bool streaming; /* true when streaming is enabled */
+
+ struct ipu3_css_pipe pipes[IMGU_MAX_PIPE_NUM];
+ struct ipu3_css_map xmem_sp_group_ptrs;
+
+ /* enabled pipe(s) */
+ DECLARE_BITMAP(enabled_pipes, IMGU_MAX_PIPE_NUM);
};
/******************* css v4l *******************/
@@ -166,17 +170,21 @@ int ipu3_css_init(struct device *dev, struct ipu3_css *css,
void ipu3_css_cleanup(struct ipu3_css *css);
int ipu3_css_fmt_try(struct ipu3_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
- struct v4l2_rect *rects[IPU3_CSS_RECTS]);
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
int ipu3_css_fmt_set(struct ipu3_css *css,
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES],
- struct v4l2_rect *rects[IPU3_CSS_RECTS]);
+ struct v4l2_rect *rects[IPU3_CSS_RECTS],
+ unsigned int pipe);
int ipu3_css_meta_fmt_set(struct v4l2_meta_format *fmt);
-int ipu3_css_buf_queue(struct ipu3_css *css, struct ipu3_css_buffer *b);
+int ipu3_css_buf_queue(struct ipu3_css *css, unsigned int pipe,
+ struct ipu3_css_buffer *b);
struct ipu3_css_buffer *ipu3_css_buf_dequeue(struct ipu3_css *css);
int ipu3_css_start_streaming(struct ipu3_css *css);
void ipu3_css_stop_streaming(struct ipu3_css *css);
bool ipu3_css_queue_empty(struct ipu3_css *css);
bool ipu3_css_is_streaming(struct ipu3_css *css);
+bool ipu3_css_pipe_queue_empty(struct ipu3_css *css, unsigned int pipe);
/******************* css hw *******************/
int ipu3_css_set_powerup(struct device *dev, void __iomem *base);
@@ -184,7 +192,7 @@ void ipu3_css_set_powerdown(struct device *dev, void __iomem *base);
int ipu3_css_irq_ack(struct ipu3_css *css);
/******************* set parameters ************/
-int ipu3_css_set_parameters(struct ipu3_css *css,
+int ipu3_css_set_parameters(struct ipu3_css *css, unsigned int pipe,
struct ipu3_uapi_params *set_params);
/******************* auxiliary helpers *******************/
diff --git a/drivers/media/pci/intel/ipu3/ipu3-dmamap.c b/drivers/media/pci/intel/ipu3/ipu3-dmamap.c
index cd8f37a221e8ce..103b30b24b1171 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-dmamap.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-dmamap.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018 Intel Corporation
- * Copyright (C) 2018 Google, Inc.
+ * Copyright 2018 Google LLC.
*
* Author: Tomasz Figa <tfiga@chromium.org>
* Author: Yong Zhi <yong.zhi@intel.com>
@@ -39,7 +39,7 @@ static struct page **ipu3_dmamap_alloc_buffer(size_t size,
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
/* Allocate mem for array of page ptrs */
- pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL);
if (!pages)
return NULL;
@@ -86,27 +86,24 @@ static struct page **ipu3_dmamap_alloc_buffer(size_t size,
/**
* ipu3_dmamap_alloc - allocate and map a buffer into KVA
- * @dev: struct device pointer
+ * @imgu: struct device pointer
* @map: struct to store mapping variables
* @len: size required
*
* Return KVA on success or NULL on failure
*/
-void *ipu3_dmamap_alloc(struct device *dev, struct ipu3_css_map *map,
+void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
size_t len)
{
- struct imgu_device *imgu = dev_get_drvdata(dev);
unsigned long shift = iova_shift(&imgu->iova_domain);
unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
+ struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
struct page **pages;
dma_addr_t iovaddr;
struct iova *iova;
int i, rval;
- if (WARN_ON(!dev))
- return NULL;
-
dev_dbg(dev, "%s: allocating %zu\n", __func__, size);
iova = alloc_iova(&imgu->iova_domain, size >> shift,
@@ -164,9 +161,8 @@ out_free_iova:
return NULL;
}
-void ipu3_dmamap_unmap(struct device *dev, struct ipu3_css_map *map)
+void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map)
{
- struct imgu_device *imgu = dev_get_drvdata(dev);
struct iova *iova;
iova = find_iova(&imgu->iova_domain,
@@ -183,17 +179,17 @@ void ipu3_dmamap_unmap(struct device *dev, struct ipu3_css_map *map)
/*
* Counterpart of ipu3_dmamap_alloc
*/
-void ipu3_dmamap_free(struct device *dev, struct ipu3_css_map *map)
+void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map)
{
struct vm_struct *area = map->vma;
- dev_dbg(dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
+ dev_dbg(&imgu->pci_dev->dev, "%s: freeing %zu @ IOVA %pad @ VA %p\n",
__func__, map->size, &map->daddr, map->vaddr);
if (!map->vaddr)
return;
- ipu3_dmamap_unmap(dev, map);
+ ipu3_dmamap_unmap(imgu, map);
if (WARN_ON(!area) || WARN_ON(!area->pages))
return;
@@ -203,10 +199,9 @@ void ipu3_dmamap_free(struct device *dev, struct ipu3_css_map *map)
map->vaddr = NULL;
}
-int ipu3_dmamap_map_sg(struct device *dev, struct scatterlist *sglist,
+int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
int nents, struct ipu3_css_map *map)
{
- struct imgu_device *imgu = dev_get_drvdata(dev);
unsigned long shift = iova_shift(&imgu->iova_domain);
struct scatterlist *sg;
struct iova *iova;
@@ -224,7 +219,7 @@ int ipu3_dmamap_map_sg(struct device *dev, struct scatterlist *sglist,
}
size = iova_align(&imgu->iova_domain, size);
- dev_dbg(dev, "dmamap: mapping sg %d entries, %zu pages\n",
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: mapping sg %d entries, %zu pages\n",
nents, size >> shift);
iova = alloc_iova(&imgu->iova_domain, size >> shift,
@@ -232,7 +227,7 @@ int ipu3_dmamap_map_sg(struct device *dev, struct scatterlist *sglist,
if (!iova)
return -ENOMEM;
- dev_dbg(dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
+ dev_dbg(&imgu->pci_dev->dev, "dmamap: iova low pfn %lu, high pfn %lu\n",
iova->pfn_lo, iova->pfn_hi);
if (ipu3_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
@@ -251,30 +246,24 @@ out_fail:
return -EFAULT;
}
-int ipu3_dmamap_init(struct device *dev)
+int ipu3_dmamap_init(struct imgu_device *imgu)
{
- struct imgu_device *imgu = dev_get_drvdata(dev);
- unsigned long order, base_pfn;
- int ret;
+ unsigned long order, base_pfn, end_pfn;
+ int ret = iova_cache_get();
- ret = iova_cache_get();
if (ret)
return ret;
order = __ffs(imgu->mmu->pgsize_bitmap);
- base_pfn = max_t(unsigned long, 1,
- imgu->mmu->aperture_start >> order);
-
- init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn,
- 1UL << (32 - order));
+ base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
+ end_pfn = imgu->mmu->aperture_end >> order;
+ init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn, end_pfn);
return 0;
}
-void ipu3_dmamap_exit(struct device *dev)
+void ipu3_dmamap_exit(struct imgu_device *imgu)
{
- struct imgu_device *imgu = dev_get_drvdata(dev);
-
put_iova_domain(&imgu->iova_domain);
iova_cache_put();
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-dmamap.h b/drivers/media/pci/intel/ipu3/ipu3-dmamap.h
index 2172802293807b..b9d224a3327334 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-dmamap.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-dmamap.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Intel Corporation */
-/* Copyright (C) 2018 Google, Inc. */
+/* Copyright 2018 Google LLC. */
#ifndef __IPU3_DMAMAP_H
#define __IPU3_DMAMAP_H
@@ -8,15 +8,15 @@
struct imgu_device;
struct scatterlist;
-void *ipu3_dmamap_alloc(struct device *dev, struct ipu3_css_map *map,
+void *ipu3_dmamap_alloc(struct imgu_device *imgu, struct ipu3_css_map *map,
size_t len);
-void ipu3_dmamap_free(struct device *dev, struct ipu3_css_map *map);
+void ipu3_dmamap_free(struct imgu_device *imgu, struct ipu3_css_map *map);
-int ipu3_dmamap_map_sg(struct device *dev, struct scatterlist *sglist,
+int ipu3_dmamap_map_sg(struct imgu_device *imgu, struct scatterlist *sglist,
int nents, struct ipu3_css_map *map);
-void ipu3_dmamap_unmap(struct device *dev, struct ipu3_css_map *map);
+void ipu3_dmamap_unmap(struct imgu_device *imgu, struct ipu3_css_map *map);
-int ipu3_dmamap_init(struct device *dev);
-void ipu3_dmamap_exit(struct device *dev);
+int ipu3_dmamap_init(struct imgu_device *imgu);
+void ipu3_dmamap_exit(struct imgu_device *imgu);
#endif
diff --git a/drivers/media/pci/intel/ipu3/ipu3-mmu.c b/drivers/media/pci/intel/ipu3/ipu3-mmu.c
index 6a50e887fc1df6..dc42d858ab3d21 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-mmu.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-mmu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2018 Intel Corporation.
- * Copyright (C) 2018 Google, Inc.
+ * Copyright (C) 2018 Intel Corporation.
+ * Copyright 2018 Google LLC.
*
* Author: Tuukka Toivonen <tuukka.toivonen@intel.com>
* Author: Sakari Ailus <sakari.ailus@linux.intel.com>
@@ -86,9 +86,10 @@ static void ipu3_mmu_tlb_invalidate(struct ipu3_mmu *mmu)
static void call_if_ipu3_is_powered(struct ipu3_mmu *mmu,
void (*func)(struct ipu3_mmu *mmu))
{
- pm_runtime_get_noresume(mmu->dev);
- if (pm_runtime_active(mmu->dev))
- func(mmu);
+ if (!pm_runtime_get_if_in_use(mmu->dev))
+ return;
+
+ func(mmu);
pm_runtime_put(mmu->dev);
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-mmu.h b/drivers/media/pci/intel/ipu3/ipu3-mmu.h
index 4976187c18f616..8fe63b4c6e1c46 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-mmu.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-mmu.h
@@ -1,14 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Intel Corporation */
-/* Copyright (C) 2018 Google, Inc. */
+/* Copyright 2018 Google LLC. */
#ifndef __IPU3_MMU_H
#define __IPU3_MMU_H
+/**
+ * struct ipu3_mmu_info - Describes mmu geometry
+ *
+ * @aperture_start: First address that can be mapped
+ * @aperture_end: Last address that can be mapped
+ * @pgsize_bitmap: Bitmap of page sizes in use
+ */
struct ipu3_mmu_info {
- dma_addr_t aperture_start; /* First address that can be mapped */
- dma_addr_t aperture_end; /* Last address that can be mapped */
- unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
+ dma_addr_t aperture_start;
+ dma_addr_t aperture_end;
+ unsigned long pgsize_bitmap;
};
struct device;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-tables.c b/drivers/media/pci/intel/ipu3/ipu3-tables.c
index a9c4ae616cee7b..334517987eba14 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-tables.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-tables.c
@@ -9488,7 +9488,7 @@ const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
956, 1024, 1088, 1144, 1200, 1256, 1304, 1356, 1404, 1448
} };
-const struct ipu3_uapi_anr_config ipu3_css_anr_defaults = {
+const struct imgu_abi_anr_config ipu3_css_anr_defaults = {
.transform = {
.adaptive_treshhold_en = 1,
.alpha = { { 13, 13, 13, 13, 0, 0, 0, 0},
diff --git a/drivers/media/pci/intel/ipu3/ipu3-tables.h b/drivers/media/pci/intel/ipu3/ipu3-tables.h
index 86d79332776248..6563782cbd22d8 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-tables.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-tables.h
@@ -4,7 +4,7 @@
#ifndef __IPU3_TABLES_H
#define __IPU3_TABLES_H
-#include <uapi/linux/intel-ipu3.h>
+#include "ipu3-abi.h"
#define IMGU_BDS_GRANULARITY 32 /* Downscaling granularity */
#define IMGU_BDS_MIN_SF_INV IMGU_BDS_GRANULARITY
@@ -20,9 +20,9 @@
#define IMGU_GDC_LUT_LEN 256
struct ipu3_css_bds_config {
- struct ipu3_uapi_bds_phase_arr hor_phase_arr;
- struct ipu3_uapi_bds_phase_arr ver_phase_arr;
- struct ipu3_uapi_bds_ptrn_arr ptrn_arr;
+ struct imgu_abi_bds_phase_arr hor_phase_arr;
+ struct imgu_abi_bds_phase_arr ver_phase_arr;
+ struct imgu_abi_bds_ptrn_arr ptrn_arr;
u16 sample_patrn_length;
u8 hor_ds_en;
u8 ver_ds_en;
@@ -56,7 +56,7 @@ extern const struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config
ipu3_css_tcc_gain_pcwl_lut;
extern const struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config
ipu3_css_tcc_r_sqr_lut;
-extern const struct ipu3_uapi_anr_config ipu3_css_anr_defaults;
+extern const struct imgu_abi_anr_config ipu3_css_anr_defaults;
extern const struct ipu3_uapi_awb_fr_config_s ipu3_css_awb_fr_defaults;
extern const struct ipu3_uapi_ae_grid_config ipu3_css_ae_grid_defaults;
extern const struct ipu3_uapi_ae_ccm ipu3_css_ae_ccm_defaults;
diff --git a/drivers/media/pci/intel/ipu3/ipu3-v4l2.c b/drivers/media/pci/intel/ipu3/ipu3-v4l2.c
index 6c37d70647f6e9..9e57f1951675ab 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-v4l2.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-v4l2.c
@@ -4,6 +4,7 @@
#include <linux/module.h>
#include <linux/pm_runtime.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include "ipu3.h"
@@ -13,47 +14,117 @@
static int ipu3_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
struct v4l2_rect try_crop = {
.top = 0,
.left = 0,
- .height = imgu->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.height,
- .width = imgu->nodes[IMGU_NODE_IN].vdev_fmt.fmt.pix_mp.width,
+ .width = 1920,
+ .height = 1080,
};
unsigned int i;
/* Initialize try_fmt */
- for (i = 0; i < IMGU_NODE_NUM; i++)
- *v4l2_subdev_get_try_format(sd, fh->pad, i) =
- imgu->nodes[i].pad_fmt;
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ struct v4l2_mbus_framefmt *try_fmt =
+ v4l2_subdev_get_try_format(sd, fh->pad, i);
+
+ try_fmt->width = try_crop.width;
+ try_fmt->height = try_crop.height;
+ try_fmt->code = MEDIA_BUS_FMT_FIXED;
+ try_fmt->colorspace = V4L2_COLORSPACE_RAW;
+ try_fmt->field = V4L2_FIELD_NONE;
+ }
*v4l2_subdev_get_try_crop(sd, fh->pad, IMGU_NODE_IN) = try_crop;
+ *v4l2_subdev_get_try_compose(sd, fh->pad, IMGU_NODE_IN) = try_crop;
return 0;
}
static int ipu3_subdev_s_stream(struct v4l2_subdev *sd, int enable)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
+ int i;
+ unsigned int node;
int r = 0;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ struct device *dev = &imgu->pci_dev->dev;
+ struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
+ struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
+ struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
- r = imgu_s_stream(imgu, enable);
- if (!r)
- imgu->streaming = enable;
+ dev_dbg(dev, "%s %d for pipe %d", __func__, enable, pipe);
+ /* grab ctrl after streamon and return after off */
+ v4l2_ctrl_grab(imgu_sd->ctrl, enable);
- return r;
+ if (!enable) {
+ imgu_sd->active = false;
+ return 0;
+ }
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ imgu_pipe->queue_enabled[i] = imgu_pipe->nodes[i].enabled;
+
+ /* This is handled specially */
+ imgu_pipe->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
+
+ /* Initialize CSS formats */
+ for (i = 0; i < IPU3_CSS_QUEUES; i++) {
+ node = imgu_map_node(imgu, i);
+ /* No need to reconfig meta nodes */
+ if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
+ continue;
+ fmts[i] = imgu_pipe->queue_enabled[node] ?
+ &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
+ }
+
+ /* Enable VF output only when VF queue requested by user */
+ css_pipe->vf_output_en = false;
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %d pipe_id %d", pipe, css_pipe->pipe_id);
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
+
+ r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
+ if (r) {
+ dev_err(dev, "failed to set initial formats pipe %d with (%d)",
+ pipe, r);
+ return r;
+ }
+
+ imgu_sd->active = true;
+
+ return 0;
}
static int ipu3_subdev_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *mf;
+ struct imgu_media_pipe *imgu_pipe;
u32 pad = fmt->pad;
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
+ imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- fmt->format = imgu->nodes[pad].pad_fmt;
+ fmt->format = imgu_pipe->nodes[pad].pad_fmt;
} else {
mf = v4l2_subdev_get_try_format(sd, cfg, pad);
fmt->format = *mf;
@@ -66,18 +137,28 @@ static int ipu3_subdev_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
+ struct imgu_media_pipe *imgu_pipe;
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+
struct v4l2_mbus_framefmt *mf;
u32 pad = fmt->pad;
+ unsigned int pipe = imgu_sd->pipe;
+ dev_dbg(&imgu->pci_dev->dev, "set subdev %d pad %d fmt to [%dx%d]",
+ pipe, pad, fmt->format.width, fmt->format.height);
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
mf = v4l2_subdev_get_try_format(sd, cfg, pad);
else
- mf = &imgu->nodes[pad].pad_fmt;
+ mf = &imgu_pipe->nodes[pad].pad_fmt;
fmt->format.code = mf->code;
/* Clamp the w and h based on the hardware capabilities */
- if (imgu->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
+ if (imgu_sd->subdev_pads[pad].flags & MEDIA_PAD_FL_SOURCE) {
fmt->format.width = clamp(fmt->format.width,
IPU3_OUTPUT_MIN_WIDTH,
IPU3_OUTPUT_MAX_WIDTH);
@@ -102,8 +183,10 @@ static int ipu3_subdev_get_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
struct v4l2_rect *try_sel, *r;
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
if (sel->pad != IMGU_NODE_IN)
return -EINVAL;
@@ -111,11 +194,11 @@ static int ipu3_subdev_get_selection(struct v4l2_subdev *sd,
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
- r = &imgu->rect.eff;
+ r = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
- r = &imgu->rect.bds;
+ r = &imgu_sd->rect.bds;
break;
default:
return -EINVAL;
@@ -133,20 +216,28 @@ static int ipu3_subdev_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
- struct imgu_device *imgu = container_of(sd, struct imgu_device, subdev);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
struct v4l2_rect *rect, *try_sel;
+ dev_dbg(&imgu->pci_dev->dev,
+ "set subdev %d sel which %d target 0x%4x rect [%dx%d]",
+ imgu_sd->pipe, sel->which, sel->target,
+ sel->r.width, sel->r.height);
+
if (sel->pad != IMGU_NODE_IN)
return -EINVAL;
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
- rect = &imgu->rect.eff;
+ rect = &imgu_sd->rect.eff;
break;
case V4L2_SEL_TGT_COMPOSE:
try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
- rect = &imgu->rect.bds;
+ rect = &imgu_sd->rect.bds;
break;
default:
return -EINVAL;
@@ -166,13 +257,36 @@ static int ipu3_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
{
- struct imgu_device *imgu = container_of(entity, struct imgu_device,
- subdev.entity);
+ struct imgu_media_pipe *imgu_pipe;
+ struct v4l2_subdev *sd = container_of(entity, struct v4l2_subdev,
+ entity);
+ struct imgu_device *imgu = v4l2_get_subdevdata(sd);
+ struct imgu_v4l2_subdev *imgu_sd = container_of(sd,
+ struct imgu_v4l2_subdev,
+ subdev);
+ unsigned int pipe = imgu_sd->pipe;
u32 pad = local->index;
+ WARN_ON(entity->type != MEDIA_ENT_T_V4L2_SUBDEV);
WARN_ON(pad >= IMGU_NODE_NUM);
- imgu->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
+ dev_dbg(&imgu->pci_dev->dev, "pipe %d pad %d is %s", pipe, pad,
+ flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
+
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ imgu_pipe->nodes[pad].enabled = flags & MEDIA_LNK_FL_ENABLED;
+
+ /* enable input node to enable the pipe */
+ if (pad != IMGU_NODE_IN)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ __set_bit(pipe, imgu->css.enabled_pipes);
+ else
+ __clear_bit(pipe, imgu->css.enabled_pipes);
+
+ dev_dbg(&imgu->pci_dev->dev, "pipe %d is %s", pipe,
+ flags & MEDIA_LNK_FL_ENABLED ? "enabled" : "disabled");
return 0;
}
@@ -187,13 +301,12 @@ static int ipu3_vb2_buf_init(struct vb2_buffer *vb)
struct imgu_buffer, vid_buf.vbb.vb2_buf);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
- unsigned int queue = imgu_node_to_queue(node - imgu->nodes);
+ unsigned int queue = imgu_node_to_queue(node->id);
if (queue == IPU3_CSS_QUEUE_PARAMS)
return 0;
- return ipu3_dmamap_map_sg(&imgu->pci_dev->dev, sg->sgl, sg->nents,
- &buf->map);
+ return ipu3_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map);
}
/* Called when each buffer is freed */
@@ -204,12 +317,12 @@ static void ipu3_vb2_buf_cleanup(struct vb2_buffer *vb)
struct imgu_buffer, vid_buf.vbb.vb2_buf);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
- unsigned int queue = imgu_node_to_queue(node - imgu->nodes);
+ unsigned int queue = imgu_node_to_queue(node->id);
if (queue == IPU3_CSS_QUEUE_PARAMS)
return;
- ipu3_dmamap_unmap(&imgu->pci_dev->dev, &buf->map);
+ ipu3_dmamap_unmap(imgu, &buf->map);
}
/* Transfer buffer ownership to me */
@@ -218,8 +331,9 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
struct imgu_device *imgu = vb2_get_drv_priv(vb->vb2_queue);
struct imgu_video_device *node =
container_of(vb->vb2_queue, struct imgu_video_device, vbq);
- unsigned int queue = imgu_node_to_queue(node - imgu->nodes);
+ unsigned int queue = imgu_node_to_queue(node->id);
unsigned long need_bytes;
+ unsigned int pipe = node->pipe;
if (vb->vb2_queue->type == V4L2_BUF_TYPE_META_CAPTURE ||
vb->vb2_queue->type == V4L2_BUF_TYPE_META_OUTPUT)
@@ -238,7 +352,7 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, payload);
}
if (payload >= need_bytes)
- r = ipu3_css_set_parameters(&imgu->css,
+ r = ipu3_css_set_parameters(&imgu->css, pipe,
vb2_plane_vaddr(vb, 0));
buf->flags = V4L2_BUF_FLAG_DONE;
vb2_buffer_done(vb, r == 0 ? VB2_BUF_STATE_DONE
@@ -251,14 +365,18 @@ static void ipu3_vb2_buf_queue(struct vb2_buffer *vb)
mutex_lock(&imgu->lock);
ipu3_css_buf_init(&buf->css_buf, queue, buf->map.daddr);
list_add_tail(&buf->vid_buf.list,
- &imgu->nodes[node - imgu->nodes].buffers);
+ &node->buffers);
mutex_unlock(&imgu->lock);
vb2_set_plane_payload(&buf->vid_buf.vbb.vb2_buf, 0, need_bytes);
if (imgu->streaming)
- imgu_queue_buffers(imgu, false);
+ imgu_queue_buffers(imgu, false, pipe);
}
+
+ dev_dbg(&imgu->pci_dev->dev, "%s for pipe %d node %d", __func__,
+ node->pipe, node->id);
+
}
static int ipu3_vb2_queue_setup(struct vb2_queue *vq, const void *parg,
@@ -271,16 +389,25 @@ static int ipu3_vb2_queue_setup(struct vb2_queue *vq, const void *parg,
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
const struct v4l2_format *fmt = &node->vdev_fmt;
+ unsigned int size;
- *num_planes = 1;
*num_buffers = clamp_val(*num_buffers, 1, VB2_MAX_FRAME);
alloc_ctxs[0] = imgu->vb2_alloc_ctx;
if (vq->type == V4L2_BUF_TYPE_META_CAPTURE ||
vq->type == V4L2_BUF_TYPE_META_OUTPUT)
- sizes[0] = fmt->fmt.meta.buffersize;
+ size = fmt->fmt.meta.buffersize;
else
- sizes[0] = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+ size = fmt->fmt.pix_mp.plane_fmt[0].sizeimage;
+
+ if (*num_planes) {
+ if (sizes[0] < size)
+ return -EINVAL;
+ size = sizes[0];
+ }
+
+ *num_planes = 1;
+ sizes[0] = size;
/* Initialize buffer queue */
INIT_LIST_HEAD(&node->buffers);
@@ -292,15 +419,27 @@ static int ipu3_vb2_queue_setup(struct vb2_queue *vq, const void *parg,
static bool ipu3_all_nodes_streaming(struct imgu_device *imgu,
struct imgu_video_device *except)
{
- unsigned int i;
-
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- struct imgu_video_device *node = &imgu->nodes[i];
+ unsigned int i, pipe, p;
+ struct imgu_video_device *node;
+ struct device *dev = &imgu->pci_dev->dev;
+
+ pipe = except->pipe;
+ if (!test_bit(pipe, imgu->css.enabled_pipes)) {
+ dev_warn(&imgu->pci_dev->dev,
+ "pipe %d link is not ready yet", pipe);
+ return false;
+ }
- if (node == except)
- continue;
- if (node->enabled && !vb2_start_streaming_called(&node->vbq))
- return false;
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ node = &imgu->imgu_pipe[p].nodes[i];
+ dev_dbg(dev, "%s pipe %u queue %u name %s enabled = %u",
+ __func__, p, i, node->name, node->enabled);
+ if (node == except)
+ continue;
+ if (node->enabled && !vb2_start_streaming_called(&node->vbq))
+ return false;
+ }
}
return true;
@@ -323,10 +462,16 @@ static void ipu3_return_all_buffers(struct imgu_device *imgu,
static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
+ struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
int r;
+ unsigned int pipe;
+
+ dev_dbg(dev, "%s node name %s pipe %d id %u", __func__,
+ node->name, node->pipe, node->id);
if (imgu->streaming) {
r = -EBUSY;
@@ -334,21 +479,34 @@ static int ipu3_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
}
if (!node->enabled) {
+ dev_err(dev, "IMGU node is not enabled");
r = -EINVAL;
goto fail_return_bufs;
}
- r = media_entity_pipeline_start(&node->vdev.entity, &imgu->pipeline);
+
+ pipe = node->pipe;
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ r = media_entity_pipeline_start(&node->vdev.entity,
+ &imgu_pipe->pipeline);
if (r < 0)
goto fail_return_bufs;
+
if (!ipu3_all_nodes_streaming(imgu, node))
return 0;
- /* Start streaming of the whole pipeline now */
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = v4l2_subdev_call(&imgu->imgu_pipe[pipe].imgu_sd.subdev,
+ video, s_stream, 1);
+ if (r < 0)
+ goto fail_stop_pipeline;
+ }
- r = v4l2_subdev_call(&imgu->subdev, video, s_stream, 1);
- if (r < 0)
- goto fail_stop_pipeline;
+ /* Start streaming of the whole pipeline now */
+ dev_dbg(dev, "IMGU streaming is ready to start");
+ r = imgu_s_stream(imgu, true);
+ if (!r)
+ imgu->streaming = true;
return 0;
@@ -362,20 +520,31 @@ fail_return_bufs:
static void ipu3_vb2_stop_streaming(struct vb2_queue *vq)
{
+ struct imgu_media_pipe *imgu_pipe;
struct imgu_device *imgu = vb2_get_drv_priv(vq);
+ struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node =
container_of(vq, struct imgu_video_device, vbq);
int r;
+ unsigned int pipe;
WARN_ON(!node->enabled);
+ pipe = node->pipe;
+ dev_dbg(dev, "Try to stream off node [%d][%d]", pipe, node->id);
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ r = v4l2_subdev_call(&imgu_pipe->imgu_sd.subdev, video, s_stream, 0);
+ if (r)
+ dev_err(&imgu->pci_dev->dev,
+ "failed to stop subdev streaming\n");
+
/* Was this the first node with streaming disabled? */
- if (ipu3_all_nodes_streaming(imgu, node)) {
+ if (imgu->streaming && ipu3_all_nodes_streaming(imgu, node)) {
/* Yes, really stop streaming now */
- r = v4l2_subdev_call(&imgu->subdev, video, s_stream, 0);
- if (r)
- dev_err(&imgu->pci_dev->dev,
- "failed to stop streaming\n");
+ dev_dbg(dev, "IMGU streaming is ready to stop");
+ r = imgu_s_stream(imgu, false);
+ if (!r)
+ imgu->streaming = false;
}
ipu3_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR);
@@ -423,8 +592,8 @@ static int ipu3_vidioc_querycap(struct file *file, void *fh,
{
struct imgu_video_device *node = file_to_intel_ipu3_node(file);
- strlcpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
- strlcpy(cap->card, IMGU_NAME, sizeof(cap->card));
+ strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
+ strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", node->name);
return 0;
@@ -483,29 +652,35 @@ static int ipu3_vidioc_g_fmt(struct file *file, void *fh,
* Set input/output format. Unless it is just a try, this also resets
* selections (ie. effective and BDS resolutions) to defaults.
*/
-static int imgu_fmt(struct imgu_device *imgu, int node,
+static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
struct v4l2_format *f, bool try)
{
+ struct device *dev = &imgu->pci_dev->dev;
struct v4l2_pix_format_mplane try_fmts[IPU3_CSS_QUEUES];
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
struct v4l2_mbus_framefmt pad_fmt;
unsigned int i, css_q;
int r;
+ struct ipu3_css_pipe *css_pipe = &imgu->css.pipes[pipe];
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
- if (imgu->nodes[IMGU_NODE_PV].enabled &&
- imgu->nodes[IMGU_NODE_VF].enabled) {
- dev_err(&imgu->pci_dev->dev,
- "Postview and vf are not supported simultaneously\n");
- return -EINVAL;
- }
- /*
- * Tell css that the vf q is used for PV
- */
- if (imgu->nodes[IMGU_NODE_PV].enabled)
- imgu->css.vf_output_en = IPU3_NODE_PV_ENABLED;
- else if (imgu->nodes[IMGU_NODE_VF].enabled)
- imgu->css.vf_output_en = IPU3_NODE_VF_ENABLED;
+ dev_dbg(dev, "set fmt node [%u][%u](try = %d)", pipe, node, try);
+
+ for (i = 0; i < IMGU_NODE_NUM; i++)
+ dev_dbg(dev, "IMGU pipe %d node %d enabled = %d",
+ pipe, i, imgu_pipe->nodes[i].enabled);
+
+ if (imgu_pipe->nodes[IMGU_NODE_VF].enabled)
+ css_pipe->vf_output_en = true;
+
+ if (atomic_read(&imgu_sd->running_mode) == IPU3_RUNNING_MODE_VIDEO)
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_VIDEO;
+ else
+ css_pipe->pipe_id = IPU3_CSS_PIPE_ID_CAPTURE;
+
+ dev_dbg(dev, "IPU3 pipe %d pipe_id = %d", pipe, css_pipe->pipe_id);
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
unsigned int inode = imgu_map_node(imgu, i);
@@ -514,32 +689,30 @@ static int imgu_fmt(struct imgu_device *imgu, int node,
if (inode == IMGU_NODE_STAT_3A || inode == IMGU_NODE_PARAMS)
continue;
- /* imgu_map_node defauls to PV if VF not enabled */
- if (inode == IMGU_NODE_PV && node == IMGU_NODE_VF)
- if (imgu->css.vf_output_en == IPU3_NODE_VF_DISABLED)
- inode = node;
-
if (try) {
- try_fmts[i] = imgu->nodes[inode].vdev_fmt.fmt.pix_mp;
+ try_fmts[i] =
+ imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
fmts[i] = &try_fmts[i];
} else {
- fmts[i] = &imgu->nodes[inode].vdev_fmt.fmt.pix_mp;
+ fmts[i] = &imgu_pipe->nodes[inode].vdev_fmt.fmt.pix_mp;
}
/* CSS expects some format on OUT queue */
if (i != IPU3_CSS_QUEUE_OUT &&
- !imgu->nodes[inode].enabled && inode != node)
+ !imgu_pipe->nodes[inode].enabled)
fmts[i] = NULL;
}
if (!try) {
/* eff and bds res got by imgu_s_sel */
- rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu->rect.eff;
- rects[IPU3_CSS_RECT_BDS] = &imgu->rect.bds;
- rects[IPU3_CSS_RECT_GDC] = &imgu->rect.gdc;
+ struct imgu_v4l2_subdev *imgu_sd = &imgu_pipe->imgu_sd;
+
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_sd->rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_sd->rect.bds;
+ rects[IPU3_CSS_RECT_GDC] = &imgu_sd->rect.gdc;
/* suppose that pad fmt was set by subdev s_fmt before */
- pad_fmt = imgu->nodes[IMGU_NODE_IN].pad_fmt;
+ pad_fmt = imgu_pipe->nodes[IMGU_NODE_IN].pad_fmt;
rects[IPU3_CSS_RECT_GDC]->width = pad_fmt.width;
rects[IPU3_CSS_RECT_GDC]->height = pad_fmt.height;
}
@@ -555,9 +728,9 @@ static int imgu_fmt(struct imgu_device *imgu, int node,
return -EINVAL;
if (try)
- r = ipu3_css_fmt_try(&imgu->css, fmts, rects);
+ r = ipu3_css_fmt_try(&imgu->css, fmts, rects, pipe);
else
- r = ipu3_css_fmt_set(&imgu->css, fmts, rects);
+ r = ipu3_css_fmt_set(&imgu->css, fmts, rects, pipe);
/* r is the binary number in the firmware blob */
if (r < 0)
@@ -566,7 +739,7 @@ static int imgu_fmt(struct imgu_device *imgu, int node,
if (try)
f->fmt.pix_mp = *fmts[css_q];
else
- f->fmt = imgu->nodes[node].vdev_fmt.fmt;
+ f->fmt = imgu_pipe->nodes[node].vdev_fmt.fmt;
return 0;
}
@@ -595,27 +768,37 @@ static int ipu3_vidioc_try_fmt(struct file *file, void *fh,
struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
+ dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
r = ipu3_try_fmt(file, fh, f);
if (r)
return r;
- return imgu_fmt(imgu, node - imgu->nodes, f, true);
+ return imgu_fmt(imgu, node->pipe, node->id, f, true);
}
static int ipu3_vidioc_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
{
struct imgu_device *imgu = video_drvdata(file);
+ struct device *dev = &imgu->pci_dev->dev;
struct imgu_video_device *node = file_to_intel_ipu3_node(file);
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
int r;
+ dev_dbg(dev, "%s [%ux%u] for node %d\n", __func__,
+ pix_mp->width, pix_mp->height, node->id);
+
r = ipu3_try_fmt(file, fh, f);
if (r)
return r;
- return imgu_fmt(imgu, node - imgu->nodes, f, false);
+ return imgu_fmt(imgu, node->pipe, node->id, f, false);
}
static int ipu3_meta_enum_format(struct file *file, void *fh,
@@ -650,7 +833,7 @@ static int ipu3_vidioc_enum_input(struct file *file, void *fh,
{
if (input->index > 0)
return -EINVAL;
- strlcpy(input->name, "camera", sizeof(input->name));
+ strscpy(input->name, "camera", sizeof(input->name));
input->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
@@ -673,7 +856,7 @@ static int ipu3_vidioc_enum_output(struct file *file, void *fh,
{
if (output->index > 0)
return -EINVAL;
- strlcpy(output->name, "camera", sizeof(output->name));
+ strscpy(output->name, "camera", sizeof(output->name));
output->type = V4L2_INPUT_TYPE_CAMERA;
return 0;
@@ -699,6 +882,11 @@ static struct v4l2_subdev_internal_ops ipu3_subdev_internal_ops = {
.open = ipu3_subdev_open,
};
+static const struct v4l2_subdev_core_ops ipu3_subdev_core_ops = {
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ipu3_subdev_video_ops = {
.s_stream = ipu3_subdev_s_stream,
};
@@ -712,6 +900,7 @@ static const struct v4l2_subdev_pad_ops ipu3_subdev_pad_ops = {
};
static const struct v4l2_subdev_ops ipu3_subdev_ops = {
+ .core = &ipu3_subdev_core_ops,
.video = &ipu3_subdev_video_ops,
.pad = &ipu3_subdev_pad_ops,
};
@@ -805,6 +994,40 @@ static const struct v4l2_ioctl_ops ipu3_v4l2_meta_ioctl_ops = {
.vidioc_expbuf = vb2_ioctl_expbuf,
};
+static int ipu3_sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct imgu_v4l2_subdev *imgu_sd =
+ container_of(ctrl->handler, struct imgu_v4l2_subdev, ctrl_handler);
+ struct imgu_device *imgu = v4l2_get_subdevdata(&imgu_sd->subdev);
+ struct device *dev = &imgu->pci_dev->dev;
+
+ dev_dbg(dev, "set val %d to ctrl 0x%8x for subdev %d",
+ ctrl->val, ctrl->id, imgu_sd->pipe);
+
+ switch (ctrl->id) {
+ case V4L2_CID_INTEL_IPU3_MODE:
+ atomic_set(&imgu_sd->running_mode, ctrl->val);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct v4l2_ctrl_ops ipu3_subdev_ctrl_ops = {
+ .s_ctrl = ipu3_sd_s_ctrl,
+};
+
+static const struct v4l2_ctrl_config ipu3_subdev_ctrl_mode = {
+ .ops = &ipu3_subdev_ctrl_ops,
+ .id = V4L2_CID_INTEL_IPU3_MODE,
+ .name = "IPU3 Pipe Mode",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = IPU3_RUNNING_MODE_VIDEO,
+ .max = IPU3_RUNNING_MODE_STILL,
+ .step = 1,
+ .def = IPU3_RUNNING_MODE_VIDEO,
+};
+
/******************** Framework registration ********************/
/* helper function to config node's video properties */
@@ -845,84 +1068,81 @@ static void ipu3_node_to_v4l2(u32 node, struct video_device *vdev,
vdev->device_caps = V4L2_CAP_STREAMING | cap;
}
-int ipu3_v4l2_register(struct imgu_device *imgu)
+static int ipu3_v4l2_subdev_register(struct imgu_device *imgu,
+ struct imgu_v4l2_subdev *imgu_sd,
+ unsigned int pipe)
{
- struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
- struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
-
int i, r;
-
- /* Initialize miscellaneous variables */
- imgu->streaming = false;
-
- /* Set up media device */
- imgu->media_dev.dev = &imgu->pci_dev->dev;
- strlcpy(imgu->media_dev.model, IMGU_NAME,
- sizeof(imgu->media_dev.model));
- snprintf(imgu->media_dev.bus_info, sizeof(imgu->media_dev.bus_info),
- "%s", dev_name(&imgu->pci_dev->dev));
- imgu->media_dev.hw_revision = 0;
- r = media_device_register(&imgu->media_dev);
- if (r) {
- dev_err(&imgu->pci_dev->dev,
- "failed to register media device (%d)\n", r);
- return r;
- }
-
- /* Set up v4l2 device */
- imgu->v4l2_dev.mdev = &imgu->media_dev;
- imgu->v4l2_dev.ctrl_handler = imgu->ctrl_handler;
- r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
- if (r) {
- dev_err(&imgu->pci_dev->dev,
- "failed to register V4L2 device (%d)\n", r);
- goto fail_v4l2_dev;
- }
+ struct v4l2_ctrl_handler *hdl = &imgu_sd->ctrl_handler;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Initialize subdev media entity */
- imgu->subdev_pads = kzalloc(sizeof(*imgu->subdev_pads) *
- IMGU_NODE_NUM, GFP_KERNEL);
- if (!imgu->subdev_pads) {
- r = -ENOMEM;
- goto fail_subdev_pads;
- }
- r = media_entity_init(&imgu->subdev.entity, IMGU_NODE_NUM,
- imgu->subdev_pads, 0);
+ r = media_entity_init(&imgu_sd->subdev.entity, IMGU_NODE_NUM,
+ imgu_sd->subdev_pads, 0);
if (r) {
dev_err(&imgu->pci_dev->dev,
"failed initialize subdev media entity (%d)\n", r);
- goto fail_media_entity;
+ return r;
}
- imgu->subdev.entity.ops = &ipu3_media_ops;
+ imgu_sd->subdev.entity.ops = &ipu3_media_ops;
for (i = 0; i < IMGU_NODE_NUM; i++) {
- imgu->subdev_pads[i].flags = imgu->nodes[i].output ?
+ imgu_sd->subdev_pads[i].flags = imgu_pipe->nodes[i].output ?
MEDIA_PAD_FL_SINK : MEDIA_PAD_FL_SOURCE;
}
/* Initialize subdev */
- v4l2_subdev_init(&imgu->subdev, &ipu3_subdev_ops);
- imgu->subdev.internal_ops = &ipu3_subdev_internal_ops;
- imgu->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
- strlcpy(imgu->subdev.name, IMGU_NAME, sizeof(imgu->subdev.name));
- v4l2_set_subdevdata(&imgu->subdev, imgu);
- imgu->subdev.ctrl_handler = imgu->ctrl_handler;
- r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu->subdev);
- if (r) {
+ v4l2_subdev_init(&imgu_sd->subdev, &ipu3_subdev_ops);
+ imgu_sd->subdev.internal_ops = &ipu3_subdev_internal_ops;
+ imgu_sd->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
+ snprintf(imgu_sd->subdev.name, sizeof(imgu_sd->subdev.name),
+ "%s %d", IMGU_NAME, pipe);
+ v4l2_set_subdevdata(&imgu_sd->subdev, imgu);
+ atomic_set(&imgu_sd->running_mode, IPU3_RUNNING_MODE_VIDEO);
+ v4l2_ctrl_handler_init(hdl, 1);
+ imgu_sd->subdev.ctrl_handler = hdl;
+ imgu_sd->ctrl = v4l2_ctrl_new_custom(hdl, &ipu3_subdev_ctrl_mode, NULL);
+ if (hdl->error) {
+ r = hdl->error;
dev_err(&imgu->pci_dev->dev,
- "failed initialize subdev (%d)\n", r);
+ "failed to create subdev v4l2 ctrl with err %d", r);
goto fail_subdev;
}
- r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
+ r = v4l2_device_register_subdev(&imgu->v4l2_dev, &imgu_sd->subdev);
if (r) {
dev_err(&imgu->pci_dev->dev,
- "failed to register subdevs (%d)\n", r);
- goto fail_subdevs;
+ "failed initialize subdev (%d)\n", r);
+ goto fail_subdev;
}
+ imgu_sd->pipe = pipe;
+ return 0;
+
+fail_subdev:
+ v4l2_ctrl_handler_free(imgu_sd->subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_sd->subdev.entity);
+
+ return r;
+}
+
+static int ipu3_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
+ int node_num)
+{
+ int r;
+ u32 flags;
+ struct v4l2_mbus_framefmt def_bus_fmt = { 0 };
+ struct v4l2_pix_format_mplane def_pix_fmt = { 0 };
+ struct device *dev = &imgu->pci_dev->dev;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+ struct v4l2_subdev *sd = &imgu_pipe->imgu_sd.subdev;
+ struct imgu_video_device *node = &imgu_pipe->nodes[node_num];
+ struct video_device *vdev = &node->vdev;
+ struct vb2_queue *vbq = &node->vbq;
+
/* Initialize formats to default values */
def_bus_fmt.width = 1920;
def_bus_fmt.height = 1080;
- def_bus_fmt.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ def_bus_fmt.code = MEDIA_BUS_FMT_FIXED;
def_bus_fmt.field = V4L2_FIELD_NONE;
def_bus_fmt.colorspace = V4L2_COLORSPACE_RAW;
def_bus_fmt.ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
@@ -942,110 +1162,221 @@ int ipu3_v4l2_register(struct imgu_device *imgu)
def_pix_fmt.quantization = def_bus_fmt.quantization;
def_pix_fmt.xfer_func = def_bus_fmt.xfer_func;
- /* Create video nodes and links */
+ /* Initialize miscellaneous variables */
+ mutex_init(&node->lock);
+ INIT_LIST_HEAD(&node->buffers);
+
+ /* Initialize formats to default values */
+ node->pad_fmt = def_bus_fmt;
+ node->id = node_num;
+ node->pipe = pipe;
+ ipu3_node_to_v4l2(node_num, vdev, &node->vdev_fmt);
+ if (node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ node->vdev_fmt.type ==
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ def_pix_fmt.pixelformat = node->output ?
+ V4L2_PIX_FMT_IPU3_SGRBG10 :
+ V4L2_PIX_FMT_NV12;
+ node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
+ }
+
+ /* Initialize media entities */
+ r = media_entity_init(&vdev->entity, 1, &node->vdev_pad, 0);
+ if (r) {
+ dev_err(dev, "failed initialize media entity (%d)\n", r);
+ mutex_destroy(&node->lock);
+ return r;
+ }
+ node->vdev_pad.flags = node->output ?
+ MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
+ vdev->entity.ops = NULL;
+
+ /* Initialize vbq */
+ vbq->type = node->vdev_fmt.type;
+ vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
+ vbq->ops = &ipu3_vb2_ops;
+ vbq->mem_ops = &vb2_dma_sg_memops;
+ if (imgu->buf_struct_size <= 0)
+ imgu->buf_struct_size =
+ sizeof(struct ipu3_vb2_buffer);
+ vbq->buf_struct_size = imgu->buf_struct_size;
+ vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ /* can streamon w/o buffers */
+ vbq->min_buffers_needed = 0;
+ vbq->drv_priv = imgu;
+ vbq->lock = &node->lock;
+ r = vb2_queue_init(vbq);
+ if (r) {
+ dev_err(dev, "failed to initialize video queue (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Initialize vdev */
+ snprintf(vdev->name, sizeof(vdev->name), "%s %d %s",
+ IMGU_NAME, pipe, node->name);
+ vdev->release = video_device_release_empty;
+ vdev->fops = &ipu3_v4l2_fops;
+ vdev->lock = &node->lock;
+ vdev->v4l2_dev = &imgu->v4l2_dev;
+ vdev->queue = &node->vbq;
+ vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
+ video_set_drvdata(vdev, imgu);
+ r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (r) {
+ dev_err(dev, "failed to register video device (%d)", r);
+ media_entity_cleanup(&vdev->entity);
+ return r;
+ }
+
+ /* Create link between video node and the subdev pad */
+ flags = 0;
+ if (node->enabled)
+ flags |= MEDIA_LNK_FL_ENABLED;
+ if (node->output) {
+ r = media_entity_create_link(&vdev->entity, 0, &sd->entity,
+ node_num, flags);
+ } else {
+ r = media_entity_create_link(&sd->entity, node_num, &vdev->entity,
+ 0, flags);
+ }
+ if (r) {
+ dev_err(dev, "failed to create pad link (%d)", r);
+ video_unregister_device(vdev);
+ return r;
+ }
+
+ return 0;
+}
+
+static void ipu3_v4l2_nodes_cleanup_pipe(struct imgu_device *imgu,
+ unsigned int pipe, int node)
+{
+ int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
+
+ for (i = 0; i < node; i++) {
+ video_unregister_device(&imgu_pipe->nodes[i].vdev);
+ media_entity_cleanup(&imgu_pipe->nodes[i].vdev.entity);
+ mutex_destroy(&imgu_pipe->nodes[i].lock);
+ }
+}
+
+static int ipu3_v4l2_nodes_setup_pipe(struct imgu_device *imgu, int pipe)
+{
+ int i, r;
+
for (i = 0; i < IMGU_NODE_NUM; i++) {
- struct imgu_video_device *node = &imgu->nodes[i];
- struct video_device *vdev = &node->vdev;
- struct vb2_queue *vbq = &node->vbq;
- u32 flags;
-
- /* Initialize miscellaneous variables */
- mutex_init(&node->lock);
- INIT_LIST_HEAD(&node->buffers);
-
- /* Initialize formats to default values */
- node->pad_fmt = def_bus_fmt;
- ipu3_node_to_v4l2(i, vdev, &node->vdev_fmt);
- if (node->vdev_fmt.type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
- node->vdev_fmt.type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- def_pix_fmt.pixelformat = node->output ?
- V4L2_PIX_FMT_IPU3_SGRBG10 :
- V4L2_PIX_FMT_NV12;
- node->vdev_fmt.fmt.pix_mp = def_pix_fmt;
- }
- /* Initialize media entities */
- r = media_entity_init(&vdev->entity, 1, &node->vdev_pad, 0);
+ r = ipu3_v4l2_node_setup(imgu, pipe, i);
+ if (r)
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ ipu3_v4l2_nodes_cleanup_pipe(imgu, pipe, i);
+ return r;
+}
+
+static void ipu3_v4l2_subdev_cleanup(struct imgu_device *imgu, unsigned int i)
+{
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[i];
+
+ v4l2_device_unregister_subdev(&imgu_pipe->imgu_sd.subdev);
+ v4l2_ctrl_handler_free(imgu_pipe->imgu_sd.subdev.ctrl_handler);
+ media_entity_cleanup(&imgu_pipe->imgu_sd.subdev.entity);
+}
+
+static void ipu3_v4l2_cleanup_pipes(struct imgu_device *imgu, unsigned int pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe; i++) {
+ ipu3_v4l2_nodes_cleanup_pipe(imgu, i, IMGU_NODE_NUM);
+ ipu3_v4l2_subdev_cleanup(imgu, i);
+ }
+}
+
+static int ipu3_v4l2_register_pipes(struct imgu_device *imgu)
+{
+ struct imgu_media_pipe *imgu_pipe;
+ int i, r;
+
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++) {
+ imgu_pipe = &imgu->imgu_pipe[i];
+ r = ipu3_v4l2_subdev_register(imgu, &imgu_pipe->imgu_sd, i);
if (r) {
dev_err(&imgu->pci_dev->dev,
- "failed initialize media entity (%d)\n", r);
- goto fail_vdev_media_entity;
+ "failed to register subdev%d ret (%d)\n", i, r);
+ goto pipes_cleanup;
}
- node->vdev_pad.flags = node->output ?
- MEDIA_PAD_FL_SOURCE : MEDIA_PAD_FL_SINK;
- vdev->entity.ops = NULL;
-
- /* Initialize vbq */
- vbq->type = node->vdev_fmt.type;
- vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
- vbq->ops = &ipu3_vb2_ops;
- vbq->mem_ops = &vb2_dma_sg_memops;
- if (imgu->buf_struct_size <= 0)
- imgu->buf_struct_size = sizeof(struct ipu3_vb2_buffer);
- vbq->buf_struct_size = imgu->buf_struct_size;
- vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = 0; /* Can streamon w/o buffers */
- vbq->drv_priv = imgu;
- vbq->lock = &node->lock;
- r = vb2_queue_init(vbq);
+ r = ipu3_v4l2_nodes_setup_pipe(imgu, i);
if (r) {
- dev_err(&imgu->pci_dev->dev,
- "failed to initialize video queue (%d)\n", r);
- goto fail_vdev;
+ ipu3_v4l2_subdev_cleanup(imgu, i);
+ goto pipes_cleanup;
}
+ }
- /* Initialize vdev */
- snprintf(vdev->name, sizeof(vdev->name), "%s %s",
- IMGU_NAME, node->name);
- vdev->release = video_device_release_empty;
- vdev->fops = &ipu3_v4l2_fops;
- vdev->lock = &node->lock;
- vdev->v4l2_dev = &imgu->v4l2_dev;
- vdev->queue = &node->vbq;
- vdev->vfl_dir = node->output ? VFL_DIR_TX : VFL_DIR_RX;
- video_set_drvdata(vdev, imgu);
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- if (r) {
- dev_err(&imgu->pci_dev->dev,
- "failed to register video device (%d)\n", r);
- goto fail_vdev;
- }
+ return 0;
- /* Create link between video node and the subdev pad */
- flags = 0;
- if (node->enabled)
- flags |= MEDIA_LNK_FL_ENABLED;
- if (node->immutable)
- flags |= MEDIA_LNK_FL_IMMUTABLE;
- if (node->output) {
- r = media_entity_create_link(&vdev->entity, 0,
- &imgu->subdev.entity,
- i, flags);
- } else {
- r = media_entity_create_link(&imgu->subdev.entity,
- i, &vdev->entity, 0,
- flags);
- }
- if (r)
- goto fail_link;
+pipes_cleanup:
+ ipu3_v4l2_cleanup_pipes(imgu, i);
+ return r;
+}
+
+int ipu3_v4l2_register(struct imgu_device *imgu)
+{
+ int r;
+
+ /* Initialize miscellaneous variables */
+ imgu->streaming = false;
+
+ /* Set up media device */
+ imgu->media_dev.dev = &imgu->pci_dev->dev;
+ strscpy(imgu->media_dev.model, IMGU_NAME,
+ sizeof(imgu->media_dev.model));
+ snprintf(imgu->media_dev.bus_info, sizeof(imgu->media_dev.bus_info),
+ "%s", dev_name(&imgu->pci_dev->dev));
+ imgu->media_dev.driver_version = LINUX_VERSION_CODE;
+ imgu->media_dev.hw_revision = 0;
+ r = media_device_register(&imgu->media_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register media device (%d)\n", r);
+ return r;
}
- return 0;
+ /* Set up v4l2 device */
+ imgu->v4l2_dev.mdev = &imgu->media_dev;
+ imgu->v4l2_dev.ctrl_handler = NULL;
+ r = v4l2_device_register(&imgu->pci_dev->dev, &imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register V4L2 device (%d)\n", r);
+ goto fail_v4l2_dev;
+ }
+
+ r = ipu3_v4l2_register_pipes(imgu);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register pipes (%d)\n", r);
+ goto fail_v4l2_pipes;
+ }
- for (; i >= 0; i--) {
-fail_link:
- video_unregister_device(&imgu->nodes[i].vdev);
-fail_vdev:
- media_entity_cleanup(&imgu->nodes[i].vdev.entity);
-fail_vdev_media_entity:
- mutex_destroy(&imgu->nodes[i].lock);
+ r = v4l2_device_register_subdev_nodes(&imgu->v4l2_dev);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to register subdevs (%d)\n", r);
+ goto fail_subdevs;
}
+
+ return 0;
+
fail_subdevs:
- v4l2_device_unregister_subdev(&imgu->subdev);
-fail_subdev:
- media_entity_cleanup(&imgu->subdev.entity);
-fail_media_entity:
- kfree(imgu->subdev_pads);
-fail_subdev_pads:
+ ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+fail_v4l2_pipes:
v4l2_device_unregister(&imgu->v4l2_dev);
fail_v4l2_dev:
media_device_unregister(&imgu->media_dev);
@@ -1056,19 +1387,9 @@ EXPORT_SYMBOL_GPL(ipu3_v4l2_register);
int ipu3_v4l2_unregister(struct imgu_device *imgu)
{
- unsigned int i;
-
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- video_unregister_device(&imgu->nodes[i].vdev);
- media_entity_cleanup(&imgu->nodes[i].vdev.entity);
- mutex_destroy(&imgu->nodes[i].lock);
- }
-
- v4l2_device_unregister_subdev(&imgu->subdev);
- media_entity_cleanup(&imgu->subdev.entity);
- kfree(imgu->subdev_pads);
- v4l2_device_unregister(&imgu->v4l2_dev);
media_device_unregister(&imgu->media_dev);
+ ipu3_v4l2_cleanup_pipes(imgu, IMGU_MAX_PIPE_NUM);
+ v4l2_device_unregister(&imgu->v4l2_dev);
return 0;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3.c b/drivers/media/pci/intel/ipu3/ipu3.c
index 1cf3336c415578..994eb8dc6bca61 100644
--- a/drivers/media/pci/intel/ipu3/ipu3.c
+++ b/drivers/media/pci/intel/ipu3/ipu3.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2017 Intel Corporation
- * Copyright (C) 2017 Google, Inc.
+ * Copyright 2017 Google LLC.
*
* Based on Intel IPU4 driver.
*
@@ -30,7 +30,7 @@
#define CSS_QUEUE_PARAMS_BUF_SIZE 0
#define CSS_QUEUE_OUT_BUF_SIZE (4160 * 3120 * 12 / 8)
#define CSS_QUEUE_VF_BUF_SIZE (1920 * 1080 * 12 / 8)
-#define CSS_QUEUE_STAT_3A_BUF_SIZE 125664
+#define CSS_QUEUE_STAT_3A_BUF_SIZE sizeof(struct ipu3_uapi_stats_3a)
static const size_t css_queue_buf_size_map[IPU3_CSS_QUEUES] = {
[IPU3_CSS_QUEUE_IN] = CSS_QUEUE_IN_BUF_SIZE,
@@ -45,7 +45,6 @@ static const struct imgu_node_mapping imgu_node_map[IMGU_NODE_NUM] = {
[IMGU_NODE_PARAMS] = {IPU3_CSS_QUEUE_PARAMS, "parameters"},
[IMGU_NODE_OUT] = {IPU3_CSS_QUEUE_OUT, "output"},
[IMGU_NODE_VF] = {IPU3_CSS_QUEUE_VF, "viewfinder"},
- [IMGU_NODE_PV] = {IPU3_CSS_QUEUE_VF, "postview"},
[IMGU_NODE_STAT_3A] = {IPU3_CSS_QUEUE_STAT_3A, "3a stat"},
};
@@ -58,10 +57,6 @@ unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
{
unsigned int i;
- if (css_queue == IPU3_CSS_QUEUE_VF)
- return imgu->nodes[IMGU_NODE_VF].enabled ?
- IMGU_NODE_VF : IMGU_NODE_PV;
-
for (i = 0; i < IMGU_NODE_NUM; i++)
if (imgu_node_map[i].css_queue == css_queue)
break;
@@ -71,18 +66,22 @@ unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue)
/**************** Dummy buffers ****************/
-static void imgu_dummybufs_cleanup(struct imgu_device *imgu)
+static void imgu_dummybufs_cleanup(struct imgu_device *imgu, unsigned int pipe)
{
unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
for (i = 0; i < IPU3_CSS_QUEUES; i++)
- ipu3_dmamap_free(&imgu->pci_dev->dev, &imgu->queues[i].dmap);
+ ipu3_dmamap_free(imgu,
+ &imgu_pipe->queues[i].dmap);
}
-static int imgu_dummybufs_preallocate(struct imgu_device *imgu)
+static int imgu_dummybufs_preallocate(struct imgu_device *imgu,
+ unsigned int pipe)
{
unsigned int i;
size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
size = css_queue_buf_size_map[i];
@@ -94,9 +93,9 @@ static int imgu_dummybufs_preallocate(struct imgu_device *imgu)
if (i == IMGU_QUEUE_MASTER || size == 0)
continue;
- if (!ipu3_dmamap_alloc(&imgu->pci_dev->dev,
- &imgu->queues[i].dmap, size)) {
- imgu_dummybufs_cleanup(imgu);
+ if (!ipu3_dmamap_alloc(imgu,
+ &imgu_pipe->queues[i].dmap, size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
return -ENOMEM;
}
}
@@ -104,45 +103,46 @@ static int imgu_dummybufs_preallocate(struct imgu_device *imgu)
return 0;
}
-static int imgu_dummybufs_init(struct imgu_device *imgu)
+static int imgu_dummybufs_init(struct imgu_device *imgu, unsigned int pipe)
{
const struct v4l2_pix_format_mplane *mpix;
const struct v4l2_meta_format *meta;
- unsigned int i, j, node;
+ unsigned int i, k, node;
size_t size;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* Allocate a dummy buffer for each queue where buffer is optional */
for (i = 0; i < IPU3_CSS_QUEUES; i++) {
node = imgu_map_node(imgu, i);
- if (!imgu->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
+ if (!imgu_pipe->queue_enabled[node] || i == IMGU_QUEUE_MASTER)
continue;
- if (!imgu->nodes[IMGU_NODE_VF].enabled &&
- !imgu->nodes[IMGU_NODE_PV].enabled &&
+ if (!imgu_pipe->nodes[IMGU_NODE_VF].enabled &&
i == IPU3_CSS_QUEUE_VF)
/*
- * Do not enable dummy buffers for VF/PV if it is not
+ * Do not enable dummy buffers for VF if it is not
* requested by the user.
*/
continue;
- meta = &imgu->nodes[node].vdev_fmt.fmt.meta;
- mpix = &imgu->nodes[node].vdev_fmt.fmt.pix_mp;
+ meta = &imgu_pipe->nodes[node].vdev_fmt.fmt.meta;
+ mpix = &imgu_pipe->nodes[node].vdev_fmt.fmt.pix_mp;
if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
size = meta->buffersize;
else
size = mpix->plane_fmt[0].sizeimage;
- if (ipu3_css_dma_buffer_resize(&imgu->pci_dev->dev,
- &imgu->queues[i].dmap, size)) {
- imgu_dummybufs_cleanup(imgu);
+ if (ipu3_css_dma_buffer_resize(imgu,
+ &imgu_pipe->queues[i].dmap,
+ size)) {
+ imgu_dummybufs_cleanup(imgu, pipe);
return -ENOMEM;
}
- for (j = 0; j < IMGU_MAX_QUEUE_DEPTH; j++)
- ipu3_css_buf_init(&imgu->queues[i].dummybufs[j], i,
- imgu->queues[i].dmap.daddr);
+ for (k = 0; k < IMGU_MAX_QUEUE_DEPTH; k++)
+ ipu3_css_buf_init(&imgu_pipe->queues[i].dummybufs[k], i,
+ imgu_pipe->queues[i].dmap.daddr);
}
return 0;
@@ -150,40 +150,43 @@ static int imgu_dummybufs_init(struct imgu_device *imgu)
/* May be called from atomic context */
static struct ipu3_css_buffer *imgu_dummybufs_get(struct imgu_device *imgu,
- int queue)
+ int queue, unsigned int pipe)
{
unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
/* dummybufs are not allocated for master q */
if (queue == IPU3_CSS_QUEUE_IN)
return NULL;
- if (WARN_ON(!imgu->queues[queue].dmap.vaddr))
+ if (WARN_ON(!imgu_pipe->queues[queue].dmap.vaddr))
/* Buffer should not be allocated here */
return NULL;
for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
- if (ipu3_css_buf_state(&imgu->queues[queue].dummybufs[i]) !=
+ if (ipu3_css_buf_state(&imgu_pipe->queues[queue].dummybufs[i]) !=
IPU3_CSS_BUFFER_QUEUED)
break;
if (i == IMGU_MAX_QUEUE_DEPTH)
return NULL;
- ipu3_css_buf_init(&imgu->queues[queue].dummybufs[i], queue,
- imgu->queues[queue].dmap.daddr);
+ ipu3_css_buf_init(&imgu_pipe->queues[queue].dummybufs[i], queue,
+ imgu_pipe->queues[queue].dmap.daddr);
- return &imgu->queues[queue].dummybufs[i];
+ return &imgu_pipe->queues[queue].dummybufs[i];
}
/* Check if given buffer is a dummy buffer */
static bool imgu_dummybufs_check(struct imgu_device *imgu,
- struct ipu3_css_buffer *buf)
+ struct ipu3_css_buffer *buf,
+ unsigned int pipe)
{
unsigned int i;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
for (i = 0; i < IMGU_MAX_QUEUE_DEPTH; i++)
- if (buf == &imgu->queues[buf->queue].dummybufs[i])
+ if (buf == &imgu_pipe->queues[buf->queue].dummybufs[i])
break;
return i < IMGU_MAX_QUEUE_DEPTH;
@@ -198,65 +201,63 @@ static void imgu_buffer_done(struct imgu_device *imgu, struct vb2_buffer *vb,
}
static struct ipu3_css_buffer *imgu_queue_getbuf(struct imgu_device *imgu,
- unsigned int node)
+ unsigned int node,
+ unsigned int pipe)
{
struct imgu_buffer *buf;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
if (WARN_ON(node >= IMGU_NODE_NUM))
return NULL;
/* Find first free buffer from the node */
- list_for_each_entry(buf, &imgu->nodes[node].buffers, vid_buf.list) {
+ list_for_each_entry(buf, &imgu_pipe->nodes[node].buffers, vid_buf.list) {
if (ipu3_css_buf_state(&buf->css_buf) == IPU3_CSS_BUFFER_NEW)
return &buf->css_buf;
}
/* There were no free buffers, try to return a dummy buffer */
-
- return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue);
+ return imgu_dummybufs_get(imgu, imgu_node_map[node].css_queue, pipe);
}
/*
* Queue as many buffers to CSS as possible. If all buffers don't fit into
* CSS buffer queues, they remain unqueued and will be queued later.
*/
-int imgu_queue_buffers(struct imgu_device *imgu, bool initial)
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial, unsigned int pipe)
{
unsigned int node;
int r = 0;
struct imgu_buffer *ibuf;
+ struct imgu_media_pipe *imgu_pipe = &imgu->imgu_pipe[pipe];
if (!ipu3_css_is_streaming(&imgu->css))
return 0;
+ dev_dbg(&imgu->pci_dev->dev, "Queue buffers to pipe %d", pipe);
mutex_lock(&imgu->lock);
/* Buffer set is queued to FW only when input buffer is ready */
- if (!imgu_queue_getbuf(imgu, IMGU_NODE_IN)) {
- mutex_unlock(&imgu->lock);
- return 0;
- }
- for (node = IMGU_NODE_IN + 1; 1; node = (node + 1) % IMGU_NODE_NUM) {
+ for (node = IMGU_NODE_NUM - 1;
+ imgu_queue_getbuf(imgu, IMGU_NODE_IN, pipe);
+ node = node ? node - 1 : IMGU_NODE_NUM - 1) {
if (node == IMGU_NODE_VF &&
- (imgu->css.pipe_id == IPU3_CSS_PIPE_ID_CAPTURE ||
- !imgu->nodes[IMGU_NODE_VF].enabled)) {
- continue;
- } else if (node == IMGU_NODE_PV &&
- (imgu->css.pipe_id == IPU3_CSS_PIPE_ID_VIDEO ||
- !imgu->nodes[IMGU_NODE_PV].enabled)) {
+ !imgu_pipe->nodes[IMGU_NODE_VF].enabled) {
+ dev_warn(&imgu->pci_dev->dev,
+ "Vf not enabled, ignore queue");
continue;
- } else if (imgu->queue_enabled[node]) {
+ } else if (imgu_pipe->queue_enabled[node]) {
struct ipu3_css_buffer *buf =
- imgu_queue_getbuf(imgu, node);
+ imgu_queue_getbuf(imgu, node, pipe);
int dummy;
if (!buf)
break;
- r = ipu3_css_buf_queue(&imgu->css, buf);
+ r = ipu3_css_buf_queue(&imgu->css, pipe, buf);
if (r)
break;
- dummy = imgu_dummybufs_check(imgu, buf);
+ dummy = imgu_dummybufs_check(imgu, buf, pipe);
if (!dummy)
ibuf = container_of(buf, struct imgu_buffer,
css_buf);
@@ -267,9 +268,6 @@ int imgu_queue_buffers(struct imgu_device *imgu, bool initial)
dummy ? 0 : ibuf->vid_buf.vbb.vb2_buf.index,
(u32)buf->daddr);
}
- if (node == IMGU_NODE_IN &&
- !imgu_queue_getbuf(imgu, IMGU_NODE_IN))
- break;
}
mutex_unlock(&imgu->lock);
@@ -294,14 +292,15 @@ failed:
for (node = 0; node < IMGU_NODE_NUM; node++) {
struct imgu_buffer *buf, *buf0;
- if (!imgu->queue_enabled[node])
+ if (!imgu_pipe->queue_enabled[node])
continue; /* Skip disabled queues */
mutex_lock(&imgu->lock);
- list_for_each_entry_safe(buf, buf0, &imgu->nodes[node].buffers,
+ list_for_each_entry_safe(buf, buf0,
+ &imgu_pipe->nodes[node].buffers,
vid_buf.list) {
if (ipu3_css_buf_state(&buf->css_buf) ==
- IPU3_CSS_BUFFER_QUEUED)
+ IPU3_CSS_BUFFER_QUEUED)
continue; /* Was already queued, skip */
ipu3_v4l2_buffer_done(&buf->vid_buf.vbb.vb2_buf,
@@ -334,10 +333,7 @@ static void imgu_powerdown(struct imgu_device *imgu)
int imgu_s_stream(struct imgu_device *imgu, int enable)
{
struct device *dev = &imgu->pci_dev->dev;
- struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
- struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
- unsigned int i, node;
- int r;
+ int r, pipe;
if (!enable) {
/* Stop streaming */
@@ -353,54 +349,6 @@ int imgu_s_stream(struct imgu_device *imgu, int enable)
return 0;
}
- /* Start streaming */
-
- dev_dbg(dev, "stream on\n");
- for (i = 0; i < IMGU_NODE_NUM; i++)
- imgu->queue_enabled[i] = imgu->nodes[i].enabled;
-
- /*
- * CSS library expects that the following queues are
- * always enabled; if buffers are not provided to some of the
- * queues, it stalls due to lack of buffers.
- * Force the queues to be enabled and if the user really hasn't
- * enabled them, use dummy buffers.
- */
- imgu->queue_enabled[IMGU_NODE_OUT] = true;
- imgu->queue_enabled[IMGU_NODE_VF] = true;
- imgu->queue_enabled[IMGU_NODE_PV] = true;
- imgu->queue_enabled[IMGU_NODE_STAT_3A] = true;
-
- /* This is handled specially */
- imgu->queue_enabled[IPU3_CSS_QUEUE_PARAMS] = false;
-
- /* Initialize CSS formats */
- for (i = 0; i < IPU3_CSS_QUEUES; i++) {
- node = imgu_map_node(imgu, i);
- /* No need to reconfig meta nodes */
- if (node == IMGU_NODE_STAT_3A || node == IMGU_NODE_PARAMS)
- continue;
- fmts[i] = imgu->queue_enabled[node] ?
- &imgu->nodes[node].vdev_fmt.fmt.pix_mp : NULL;
- }
-
- /* Enable VF output only when VF or PV queue requested by user */
- imgu->css.vf_output_en = IPU3_NODE_VF_DISABLED;
- if (imgu->nodes[IMGU_NODE_VF].enabled)
- imgu->css.vf_output_en = IPU3_NODE_VF_ENABLED;
- else if (imgu->nodes[IMGU_NODE_PV].enabled)
- imgu->css.vf_output_en = IPU3_NODE_PV_ENABLED;
-
- rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu->rect.eff;
- rects[IPU3_CSS_RECT_BDS] = &imgu->rect.bds;
- rects[IPU3_CSS_RECT_GDC] = &imgu->rect.gdc;
-
- r = ipu3_css_fmt_set(&imgu->css, fmts, rects);
- if (r) {
- dev_err(dev, "failed to set initial formats (%d)", r);
- return r;
- }
-
/* Set Power */
r = pm_runtime_get_sync(dev);
if (r < 0) {
@@ -423,24 +371,26 @@ int imgu_s_stream(struct imgu_device *imgu, int enable)
goto fail_start_streaming;
}
- /* Initialize dummy buffers */
- r = imgu_dummybufs_init(imgu);
- if (r) {
- dev_err(dev, "failed to initialize dummy buffers (%d)", r);
- goto fail_dummybufs;
- }
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ /* Initialize dummy buffers */
+ r = imgu_dummybufs_init(imgu, pipe);
+ if (r) {
+ dev_err(dev, "failed to initialize dummy buffers (%d)", r);
+ goto fail_dummybufs;
+ }
- /* Queue as many buffers from queue as possible */
- r = imgu_queue_buffers(imgu, true);
- if (r) {
- dev_err(dev, "failed to queue initial buffers (%d)", r);
- goto fail_queueing;
+ /* Queue as many buffers from queue as possible */
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r) {
+ dev_err(dev, "failed to queue initial buffers (%d)", r);
+ goto fail_queueing;
+ }
}
return 0;
-
fail_queueing:
- imgu_dummybufs_cleanup(imgu);
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_dummybufs_cleanup(imgu, pipe);
fail_dummybufs:
ipu3_css_stop_streaming(&imgu->css);
fail_start_streaming:
@@ -453,60 +403,73 @@ static int imgu_video_nodes_init(struct imgu_device *imgu)
{
struct v4l2_pix_format_mplane *fmts[IPU3_CSS_QUEUES] = { NULL };
struct v4l2_rect *rects[IPU3_CSS_RECTS] = { NULL };
- unsigned int i;
+ struct imgu_media_pipe *imgu_pipe;
+ unsigned int i, j;
int r;
imgu->buf_struct_size = sizeof(struct imgu_buffer);
- for (i = 0; i < IMGU_NODE_NUM; i++) {
- imgu->nodes[i].name = imgu_node_map[i].name;
- imgu->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
- imgu->nodes[i].immutable = false;
- imgu->nodes[i].enabled = false;
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
- if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
- fmts[imgu_node_map[i].css_queue] =
- &imgu->nodes[i].vdev_fmt.fmt.pix_mp;
- atomic_set(&imgu->nodes[i].sequence, 0);
- }
+ for (i = 0; i < IMGU_NODE_NUM; i++) {
+ imgu_pipe->nodes[i].name = imgu_node_map[i].name;
+ imgu_pipe->nodes[i].output = i < IMGU_QUEUE_FIRST_INPUT;
+ imgu_pipe->nodes[i].enabled = false;
- /* Master queue is always enabled */
- imgu->nodes[IMGU_QUEUE_MASTER].immutable = true;
- imgu->nodes[IMGU_QUEUE_MASTER].enabled = true;
+ if (i != IMGU_NODE_PARAMS && i != IMGU_NODE_STAT_3A)
+ fmts[imgu_node_map[i].css_queue] =
+ &imgu_pipe->nodes[i].vdev_fmt.fmt.pix_mp;
+ atomic_set(&imgu_pipe->nodes[i].sequence, 0);
+ }
+ }
r = ipu3_v4l2_register(imgu);
if (r)
return r;
/* Set initial formats and initialize formats of video nodes */
- rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu->rect.eff;
- rects[IPU3_CSS_RECT_BDS] = &imgu->rect.bds;
- ipu3_css_fmt_set(&imgu->css, fmts, rects);
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++) {
+ imgu_pipe = &imgu->imgu_pipe[j];
- /* Pre-allocate dummy buffers */
- r = imgu_dummybufs_preallocate(imgu);
- if (r) {
- dev_err(&imgu->pci_dev->dev,
- "failed to pre-allocate dummy buffers (%d)", r);
- imgu_dummybufs_cleanup(imgu);
- return r;
+ rects[IPU3_CSS_RECT_EFFECTIVE] = &imgu_pipe->imgu_sd.rect.eff;
+ rects[IPU3_CSS_RECT_BDS] = &imgu_pipe->imgu_sd.rect.bds;
+ ipu3_css_fmt_set(&imgu->css, fmts, rects, j);
+
+ /* Pre-allocate dummy buffers */
+ r = imgu_dummybufs_preallocate(imgu, j);
+ if (r) {
+ dev_err(&imgu->pci_dev->dev,
+ "failed to pre-allocate dummy buffers (%d)", r);
+ goto out_cleanup;
+ }
}
imgu->vb2_alloc_ctx = vb2_dma_sg_init_ctx(&imgu->pci_dev->dev);
if (IS_ERR(imgu->vb2_alloc_ctx)) {
- imgu_dummybufs_cleanup(imgu);
- ipu3_v4l2_unregister(imgu);
- return PTR_ERR(imgu->vb2_alloc_ctx);
+ r = PTR_ERR(imgu->vb2_alloc_ctx);
+ goto out_cleanup;
}
return 0;
+
+out_cleanup:
+ for (j = 0; j < IMGU_MAX_PIPE_NUM; j++)
+ imgu_dummybufs_cleanup(imgu, j);
+
+ ipu3_v4l2_unregister(imgu);
+
+ return r;
}
static void imgu_video_nodes_exit(struct imgu_device *imgu)
{
+ int i;
+
vb2_dma_sg_cleanup_ctx(imgu->vb2_alloc_ctx);
imgu->vb2_alloc_ctx = NULL;
- imgu_dummybufs_cleanup(imgu);
+ for (i = 0; i < IMGU_MAX_PIPE_NUM; i++)
+ imgu_dummybufs_cleanup(imgu, i);
ipu3_v4l2_unregister(imgu);
}
@@ -515,13 +478,15 @@ static void imgu_video_nodes_exit(struct imgu_device *imgu)
static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
{
struct imgu_device *imgu = imgu_ptr;
+ struct imgu_media_pipe *imgu_pipe;
+ int p;
/* Dequeue / queue buffers */
do {
u64 ns = ktime_get_ns();
struct ipu3_css_buffer *b;
struct imgu_buffer *buf;
- unsigned int node;
+ unsigned int node, pipe;
bool dummy;
do {
@@ -540,31 +505,41 @@ static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
}
node = imgu_map_node(imgu, b->queue);
- dummy = imgu_dummybufs_check(imgu, b);
+ pipe = b->pipe;
+ dummy = imgu_dummybufs_check(imgu, b, pipe);
if (!dummy)
buf = container_of(b, struct imgu_buffer, css_buf);
dev_dbg(&imgu->pci_dev->dev,
- "dequeue %s %s buffer %d from css\n",
+ "dequeue %s %s buffer %d daddr 0x%x from css\n",
dummy ? "dummy" : "user",
imgu_node_map[node].name,
- dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index);
+ dummy ? 0 : buf->vid_buf.vbb.vb2_buf.index,
+ (u32)b->daddr);
if (dummy)
/* It was a dummy buffer, skip it */
continue;
/* Fill vb2 buffer entries and tell it's ready */
- if (!imgu->nodes[node].output) {
+ imgu_pipe = &imgu->imgu_pipe[pipe];
+ if (!imgu_pipe->nodes[node].output) {
buf->vid_buf.vbb.timestamp = ns_to_timeval(ns);
buf->vid_buf.vbb.field = V4L2_FIELD_NONE;
buf->vid_buf.vbb.sequence =
- atomic_inc_return(&imgu->nodes[node].sequence);
+ atomic_inc_return(
+ &imgu_pipe->nodes[node].sequence);
+ dev_dbg(&imgu->pci_dev->dev, "vb2 buffer sequence %d",
+ buf->vid_buf.vbb.sequence);
}
imgu_buffer_done(imgu, &buf->vid_buf.vbb.vb2_buf,
ipu3_css_buf_state(&buf->css_buf) ==
IPU3_CSS_BUFFER_DONE ?
VB2_BUF_STATE_DONE :
VB2_BUF_STATE_ERROR);
+ mutex_lock(&imgu->lock);
+ if (ipu3_css_queue_empty(&imgu->css))
+ wake_up_all(&imgu->buf_drain_wq);
+ mutex_unlock(&imgu->lock);
} while (1);
/*
@@ -573,7 +548,8 @@ static irqreturn_t imgu_isr_threaded(int irq, void *imgu_ptr)
* to be queued to CSS.
*/
if (!atomic_read(&imgu->qbuf_barrier))
- imgu_queue_buffers(imgu, false);
+ for_each_set_bit(p, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM)
+ imgu_queue_buffers(imgu, false, p);
return IRQ_HANDLED;
}
@@ -666,6 +642,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
mutex_init(&imgu->lock);
atomic_set(&imgu->qbuf_barrier, 0);
+ init_waitqueue_head(&imgu->buf_drain_wq);
r = ipu3_css_set_powerup(&pci_dev->dev, imgu->base);
if (r) {
@@ -681,7 +658,7 @@ static int imgu_pci_probe(struct pci_dev *pci_dev,
goto out_css_powerdown;
}
- r = ipu3_dmamap_init(&pci_dev->dev);
+ r = ipu3_dmamap_init(imgu);
if (r) {
dev_err(&pci_dev->dev,
"failed to initialize DMA mapping (%d)\n", r);
@@ -721,7 +698,7 @@ out_video_exit:
out_css_cleanup:
ipu3_css_cleanup(&imgu->css);
out_dmamap_exit:
- ipu3_dmamap_exit(&pci_dev->dev);
+ ipu3_dmamap_exit(imgu);
out_mmu_exit:
ipu3_mmu_exit(imgu->mmu);
out_css_powerdown:
@@ -742,7 +719,7 @@ static void imgu_pci_remove(struct pci_dev *pci_dev)
imgu_video_nodes_exit(imgu);
ipu3_css_cleanup(&imgu->css);
ipu3_css_set_powerdown(&pci_dev->dev, imgu->base);
- ipu3_dmamap_exit(&pci_dev->dev);
+ ipu3_dmamap_exit(imgu);
ipu3_mmu_exit(imgu->mmu);
mutex_destroy(&imgu->lock);
}
@@ -751,7 +728,6 @@ static int __maybe_unused imgu_suspend(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct imgu_device *imgu = pci_get_drvdata(pci_dev);
- unsigned long expire;
dev_dbg(dev, "enter %s\n", __func__);
imgu->suspend_in_stream = ipu3_css_is_streaming(&imgu->css);
@@ -765,13 +741,10 @@ static int __maybe_unused imgu_suspend(struct device *dev)
*/
synchronize_irq(pci_dev->irq);
/* Wait until all buffers in CSS are done. */
- expire = jiffies + msecs_to_jiffies(1000);
- while (!ipu3_css_queue_empty(&imgu->css)) {
- if (time_is_before_jiffies(expire)) {
- dev_err(dev, "wait buffer drain timeout.\n");
- break;
- }
- }
+ if (!wait_event_timeout(imgu->buf_drain_wq,
+ ipu3_css_queue_empty(&imgu->css), msecs_to_jiffies(1000)))
+ dev_err(dev, "wait buffer drain timeout.\n");
+
ipu3_css_stop_streaming(&imgu->css);
atomic_set(&imgu->qbuf_barrier, 0);
imgu_powerdown(imgu);
@@ -786,6 +759,7 @@ static int __maybe_unused imgu_resume(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct imgu_device *imgu = pci_get_drvdata(pci_dev);
int r = 0;
+ unsigned int pipe;
dev_dbg(dev, "enter %s\n", __func__);
@@ -807,9 +781,13 @@ static int __maybe_unused imgu_resume(struct device *dev)
goto out;
}
- r = imgu_queue_buffers(imgu, true);
- if (r)
- dev_err(dev, "failed to queue buffers (%d)", r);
+ for_each_set_bit(pipe, imgu->css.enabled_pipes, IMGU_MAX_PIPE_NUM) {
+ r = imgu_queue_buffers(imgu, true, pipe);
+ if (r)
+ dev_err(dev, "failed to queue buffers to pipe %d (%d)",
+ pipe, r);
+ }
+
out:
dev_dbg(dev, "leave %s\n", __func__);
diff --git a/drivers/media/pci/intel/ipu3/ipu3.h b/drivers/media/pci/intel/ipu3/ipu3.h
index 9e1374df06dd9e..c577b0ed9db218 100644
--- a/drivers/media/pci/intel/ipu3/ipu3.h
+++ b/drivers/media/pci/intel/ipu3/ipu3.h
@@ -7,6 +7,7 @@
#include <linux/iova.h>
#include <linux/pci.h>
+#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/videobuf2-dma-sg.h>
@@ -28,9 +29,8 @@
#define IMGU_NODE_PARAMS 1 /* Input parameters */
#define IMGU_NODE_OUT 2 /* Main output for still or video */
#define IMGU_NODE_VF 3 /* Preview */
-#define IMGU_NODE_PV 4 /* Postview for still capture */
-#define IMGU_NODE_STAT_3A 5 /* 3A statistics */
-#define IMGU_NODE_NUM 6
+#define IMGU_NODE_STAT_3A 4 /* 3A statistics */
+#define IMGU_NODE_NUM 5
#define file_to_intel_ipu3_node(__file) \
container_of(video_devdata(__file), struct imgu_video_device, vdev)
@@ -70,10 +70,8 @@ struct imgu_node_mapping {
*/
struct imgu_video_device {
const char *name;
- bool output; /* Frames to the driver? */
- bool immutable; /* Can not be enabled/disabled */
+ bool output;
bool enabled;
- int queued; /* Buffers already queued */
struct v4l2_format vdev_fmt; /* Currently set format */
/* Private fields */
@@ -85,14 +83,27 @@ struct imgu_video_device {
/* Protect vb2_queue and vdev structs*/
struct mutex lock;
atomic_t sequence;
+ unsigned int id;
+ unsigned int pipe;
};
-/*
- * imgu_device -- ImgU (Imaging Unit) driver
- */
-struct imgu_device {
- struct pci_dev *pci_dev;
- void __iomem *base;
+struct imgu_v4l2_subdev {
+ unsigned int pipe;
+ struct v4l2_subdev subdev;
+ struct media_pad subdev_pads[IMGU_NODE_NUM];
+ struct {
+ struct v4l2_rect eff; /* effective resolution */
+ struct v4l2_rect bds; /* bayer-domain scaled resolution*/
+ struct v4l2_rect gdc; /* gdc output resolution */
+ } rect;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl;
+ atomic_t running_mode;
+ bool active;
+};
+
+struct imgu_media_pipe {
+ unsigned int pipe;
/* Internally enabled queues */
struct {
@@ -101,18 +112,26 @@ struct imgu_device {
} queues[IPU3_CSS_QUEUES];
struct imgu_video_device nodes[IMGU_NODE_NUM];
bool queue_enabled[IMGU_NODE_NUM];
+ struct media_pipeline pipeline;
+ struct imgu_v4l2_subdev imgu_sd;
+};
+
+/*
+ * imgu_device -- ImgU (Imaging Unit) driver
+ */
+struct imgu_device {
+ struct pci_dev *pci_dev;
+ void __iomem *base;
/* Public fields, fill before registering */
unsigned int buf_struct_size;
bool streaming; /* Public read only */
- struct v4l2_ctrl_handler *ctrl_handler;
+
+ struct imgu_media_pipe imgu_pipe[IMGU_MAX_PIPE_NUM];
/* Private fields */
struct v4l2_device v4l2_dev;
struct media_device media_dev;
- struct media_pipeline pipeline;
- struct v4l2_subdev subdev;
- struct media_pad *subdev_pads;
struct v4l2_file_operations v4l2_file_ops;
void *vb2_alloc_ctx;
@@ -130,18 +149,16 @@ struct imgu_device {
struct mutex lock;
/* Forbit streaming and buffer queuing during system suspend. */
atomic_t qbuf_barrier;
- struct {
- struct v4l2_rect eff; /* effective resolution */
- struct v4l2_rect bds; /* bayer-domain scaled resolution*/
- struct v4l2_rect gdc; /* gdc output resolution */
- } rect;
/* Indicate if system suspend take place while imgu is streaming. */
bool suspend_in_stream;
+ /* Used to wait for FW buffer queue drain. */
+ wait_queue_head_t buf_drain_wq;
};
unsigned int imgu_node_to_queue(unsigned int node);
unsigned int imgu_map_node(struct imgu_device *imgu, unsigned int css_queue);
-int imgu_queue_buffers(struct imgu_device *imgu, bool initial);
+int imgu_queue_buffers(struct imgu_device *imgu, bool initial,
+ unsigned int pipe);
int ipu3_v4l2_register(struct imgu_device *dev);
int ipu3_v4l2_unregister(struct imgu_device *dev);
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 24152accc66c75..8729fdebef8f39 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -125,7 +125,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
/* Get user pages for DMA Xfer */
err = get_user_pages_unlocked(current, current->mm,
- user_dma.uaddr, user_dma.page_count, 0, 1, dma->map);
+ user_dma.uaddr, user_dma.page_count, dma->map,
+ FOLL_FORCE);
if (user_dma.page_count != err) {
IVTV_DEBUG_WARN("failed to map user pages, returned %d instead of %d\n",
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 2b8e7b2f2b865f..9cd995f418e0fb 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -76,13 +76,13 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
/* Get user pages for DMA Xfer */
y_pages = get_user_pages_unlocked(current, current->mm,
- y_dma.uaddr, y_dma.page_count, 0, 1,
- &dma->map[0]);
+ y_dma.uaddr, y_dma.page_count,
+ &dma->map[0], FOLL_FORCE);
uv_pages = 0; /* silence gcc. value is set and consumed only if: */
if (y_pages == y_dma.page_count) {
uv_pages = get_user_pages_unlocked(current, current->mm,
- uv_dma.uaddr, uv_dma.page_count, 0, 1,
- &dma->map[y_pages]);
+ uv_dma.uaddr, uv_dma.page_count,
+ &dma->map[y_pages], FOLL_FORCE);
}
if (y_pages != y_dma.page_count || uv_pages != uv_dma.page_count) {
diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c
index 269e0782c7b64a..93d53195e8cabd 100644
--- a/drivers/media/pci/saa7164/saa7164-fw.c
+++ b/drivers/media/pci/saa7164/saa7164-fw.c
@@ -430,7 +430,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev)
__func__, fw->size);
if (fw->size != fwlength) {
- printk(KERN_ERR "xc5000: firmware incorrect size\n");
+ printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n",
+ fw->size, fwlength);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 8023404339b57e..4ca87411987b26 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -378,3 +378,22 @@ menuconfig DVB_PLATFORM_DRIVERS
if DVB_PLATFORM_DRIVERS
source "drivers/media/platform/sti/c8sectpfe/Kconfig"
endif #DVB_PLATFORM_DRIVERS
+
+menuconfig CEC_PLATFORM_DRIVERS
+ bool "CEC platform devices"
+ depends on MEDIA_CEC_SUPPORT
+
+if CEC_PLATFORM_DRIVERS
+
+config VIDEO_CROS_EC_CEC
+ tristate "Chrome OS EC CEC driver"
+ depends on MFD_CROS_EC || COMPILE_TEST
+ select CEC_CORE
+ select CEC_NOTIFIER
+ ---help---
+ If you say yes here you will get support for the
+ Chrome OS Embedded Controller's CEC.
+ The CEC bus is present in the HDMI connector and enables communication
+ between compatible devices.
+
+endif #CEC_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 5bb3caec33f3e6..7e0574b22a25d4 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -66,3 +66,5 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec/
obj-$(CONFIG_VIDEO_MEDIATEK_MDP) += mtk-mdp/
obj-$(CONFIG_VIDEO_MEDIATEK_JPEG) += mtk-jpeg/
+
+obj-y += cros-ec-cec/
diff --git a/drivers/media/platform/cros-ec-cec/Makefile b/drivers/media/platform/cros-ec-cec/Makefile
new file mode 100644
index 00000000000000..9ce97f93febe9c
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_CROS_EC_CEC) += cros-ec-cec.o
diff --git a/drivers/media/platform/cros-ec-cec/cros-ec-cec.c b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
new file mode 100644
index 00000000000000..7bc4d8a9af287a
--- /dev/null
+++ b/drivers/media/platform/cros-ec-cec/cros-ec-cec.c
@@ -0,0 +1,347 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * CEC driver for ChromeOS Embedded Controller
+ *
+ * Copyright (c) 2018 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/pci.h>
+#include <linux/cec.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
+
+#define DRV_NAME "cros-ec-cec"
+
+/**
+ * struct cros_ec_cec - Driver data for EC CEC
+ *
+ * @cros_ec: Pointer to EC device
+ * @notifier: Notifier info for responding to EC events
+ * @adap: CEC adapter
+ * @notify: CEC notifier pointer
+ * @rx_msg: storage for a received message
+ */
+struct cros_ec_cec {
+ struct cros_ec_device *cros_ec;
+ struct notifier_block notifier;
+ struct cec_adapter *adap;
+ struct cec_notifier *notify;
+ struct cec_msg rx_msg;
+};
+
+static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint8_t *cec_message = cros_ec->event_data.data.cec_message;
+ unsigned int len = cros_ec->event_size;
+
+ cros_ec_cec->rx_msg.len = len;
+ memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+
+ cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
+}
+
+static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
+{
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ uint32_t events = cros_ec->event_data.data.cec_events;
+
+ if (events & EC_MKBP_CEC_SEND_OK)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_OK);
+
+ /* FW takes care of all retries, tell core to avoid more retries */
+ if (events & EC_MKBP_CEC_SEND_FAILED)
+ cec_transmit_attempt_done(cros_ec_cec->adap,
+ CEC_TX_STATUS_MAX_RETRIES |
+ CEC_TX_STATUS_NACK);
+}
+
+static int cros_ec_cec_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_cec *cros_ec_cec;
+ struct cros_ec_device *cros_ec;
+
+ cros_ec_cec = container_of(nb, struct cros_ec_cec, notifier);
+ cros_ec = cros_ec_cec->cros_ec;
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_EVENT) {
+ handle_cec_event(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ if (cros_ec->event_data.event_type == EC_MKBP_EVENT_CEC_MESSAGE) {
+ handle_cec_message(cros_ec_cec);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
+ msg.data.val = logical_addr;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error setting CEC logical address on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *cec_msg)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_write data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_WRITE_MSG;
+ msg.msg.outsize = cec_msg->len;
+ memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error writing CEC msg on EC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct cros_ec_cec *cros_ec_cec = adap->priv;
+ struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+ struct {
+ struct cros_ec_command msg;
+ struct ec_params_cec_set data;
+ } __packed msg = {};
+ int ret;
+
+ msg.msg.command = EC_CMD_CEC_SET;
+ msg.msg.outsize = sizeof(msg.data);
+ msg.data.cmd = CEC_CMD_ENABLE;
+ msg.data.val = enable;
+
+ ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+ if (ret < 0) {
+ dev_err(cros_ec->dev,
+ "error %sabling CEC on EC: %d\n",
+ (enable ? "en" : "dis"), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct cec_adap_ops cros_ec_cec_ops = {
+ .adap_enable = cros_ec_cec_adap_enable,
+ .adap_log_addr = cros_ec_cec_set_log_addr,
+ .adap_transmit = cros_ec_cec_transmit,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int cros_ec_cec_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+
+static int cros_ec_cec_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_cec *cros_ec_cec = dev_get_drvdata(&pdev->dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(cros_ec_cec->cros_ec->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
+ cros_ec_cec_suspend, cros_ec_cec_resume);
+
+#if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
+
+/*
+ * The Firmware only handles a single CEC interface tied to a single HDMI
+ * connector we specify along with the DRM device name handling the HDMI output
+ */
+
+struct cec_dmi_match {
+ char *sys_vendor;
+ char *product_name;
+ char *devname;
+ char *conn;
+};
+
+static const struct cec_dmi_match cec_dmi_match_table[] = {
+ /* Google Fizz */
+ { "Google", "Fizz", "0000:00:02.0", "Port B" },
+};
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(cec_dmi_match_table) ; ++i) {
+ const struct cec_dmi_match *m = &cec_dmi_match_table[i];
+
+ if (dmi_match(DMI_SYS_VENDOR, m->sys_vendor) &&
+ dmi_match(DMI_PRODUCT_NAME, m->product_name)) {
+ struct device *d;
+
+ /* Find the device, bail out if not yet registered */
+ d = bus_find_device_by_name(&pci_bus_type, NULL,
+ m->devname);
+ if (!d)
+ return -EPROBE_DEFER;
+
+ *notify = cec_notifier_get_conn(d, m->conn);
+ return 0;
+ }
+ }
+
+ /* Hardware support must be added in the cec_dmi_match_table */
+ dev_warn(dev, "CEC notifier not configured for this hardware\n");
+
+ return -ENODEV;
+}
+
+#else
+
+static int cros_ec_cec_get_notifier(struct device *dev,
+ struct cec_notifier **notify)
+{
+ return -ENODEV;
+}
+
+#endif
+
+static int cros_ec_cec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
+ struct cros_ec_device *cros_ec = ec_dev->ec_dev;
+ struct cros_ec_cec *cros_ec_cec;
+ int ret;
+
+ cros_ec_cec = devm_kzalloc(&pdev->dev, sizeof(*cros_ec_cec),
+ GFP_KERNEL);
+ if (!cros_ec_cec)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cros_ec_cec);
+ cros_ec_cec->cros_ec = cros_ec;
+
+ ret = cros_ec_cec_get_notifier(&pdev->dev, &cros_ec_cec->notify);
+ if (ret)
+ return ret;
+
+ ret = device_init_wakeup(&pdev->dev, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize wakeup\n");
+ return ret;
+ }
+
+ cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
+ DRV_NAME, CEC_CAP_DEFAULTS, 1);
+ if (IS_ERR(cros_ec_cec->adap))
+ return PTR_ERR(cros_ec_cec->adap);
+
+ /* Get CEC events from the EC. */
+ cros_ec_cec->notifier.notifier_call = cros_ec_cec_event;
+ ret = blocking_notifier_chain_register(&cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier\n");
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
+ if (ret < 0) {
+ cec_delete_adapter(cros_ec_cec->adap);
+ return ret;
+ }
+
+ cec_register_cec_notifier(cros_ec_cec->adap, cros_ec_cec->notify);
+
+ return 0;
+}
+
+static int cros_ec_cec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(
+ &cros_ec_cec->cros_ec->event_notifier,
+ &cros_ec_cec->notifier);
+
+ if (ret) {
+ dev_err(dev, "failed to unregister notifier\n");
+ return ret;
+ }
+
+ cec_unregister_adapter(cros_ec_cec->adap);
+
+ if (cros_ec_cec->notify)
+ cec_notifier_put(cros_ec_cec->notify);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_cec_driver = {
+ .probe = cros_ec_cec_probe,
+ .remove = cros_ec_cec_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &cros_ec_cec_pm_ops,
+ },
+};
+
+module_platform_driver(cros_ec_cec_driver);
+
+MODULE_DESCRIPTION("CEC driver for ChromeOS ECs");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 9a6c2cc38acb3d..abce9c4a1a8ec9 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -753,7 +753,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
def_output);
- return ret;
+ goto fail_kfree_amp;
}
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
@@ -761,12 +761,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
def_mode);
- return ret;
+ goto fail_kfree_amp;
}
vpbe_dev->initialized = 1;
/* TBD handling of bootargs for default output and mode */
return 0;
+fail_kfree_amp:
+ mutex_lock(&vpbe_dev->lock);
+ kfree(vpbe_dev->amp);
fail_kfree_encoders:
kfree(vpbe_dev->encoders);
fail_dev_unregister:
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 6e6648446f0013..667d3720154a6f 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -391,12 +391,17 @@ static void __isp_video_try_fmt(struct fimc_isp *isp,
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
- *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
- pixm->num_planes = (*fmt)->memplanes;
- pixm->pixelformat = (*fmt)->fourcc;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ae8c6b35a35711..7f0ed5a26da9cc 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1417,7 +1417,7 @@ static int viu_of_probe(struct platform_device *op)
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
- goto err;
+ goto err_irq;
}
/* remap registers */
@@ -1425,7 +1425,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
/* Prepare our private structure */
@@ -1433,7 +1433,7 @@ static int viu_of_probe(struct platform_device *op)
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@ static int viu_of_probe(struct platform_device *op)
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err;
+ goto err_irq;
}
ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
- goto err_vdev;
+ goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
@@ -1476,7 +1481,7 @@ static int viu_of_probe(struct platform_device *op)
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
- goto err_vdev;
+ goto err_hdl;
}
*vdev = viu_template;
@@ -1497,7 +1502,7 @@ static int viu_of_probe(struct platform_device *op)
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
- goto err_vdev;
+ goto err_unlock;
}
/* enable VIU clock */
@@ -1505,12 +1510,12 @@ static int viu_of_probe(struct platform_device *op)
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
- goto err_clk;
+ goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_clk;
+ goto err_vdev;
}
viu_dev->clk = clk;
@@ -1521,7 +1526,7 @@ static int viu_of_probe(struct platform_device *op)
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
- goto err_irq;
+ goto err_clk;
}
mutex_unlock(&viu_dev->lock);
@@ -1529,16 +1534,19 @@ static int viu_of_probe(struct platform_device *op)
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
-err_irq:
- clk_disable_unprepare(viu_dev->clk);
err_clk:
- video_unregister_device(viu_dev->vdev);
+ clk_disable_unprepare(viu_dev->clk);
err_vdev:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 70c28d19ea04c8..5963595761096c 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -214,7 +214,7 @@ static int omap_vout_get_userptr(struct videobuf_buffer *vb, u32 virtp,
if (!vec)
return -ENOMEM;
- ret = get_vaddr_frames(virtp, 1, true, false, vec);
+ ret = get_vaddr_frames(virtp, 1, FOLL_WRITE, vec);
if (ret != 1) {
frame_vector_destroy(vec);
return -EINVAL;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 9db03b54cfc3f3..4863f829796ee7 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -304,7 +304,7 @@ static struct clk *isp_xclk_src_get(struct of_phandle_args *clkspec, void *data)
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
- struct clk_init_data init;
+ struct clk_init_data init = { 0 };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
@@ -2078,6 +2078,7 @@ error_csiphy:
static void isp_detach_iommu(struct isp_device *isp)
{
+ arm_iommu_detach_device(isp->dev);
arm_iommu_release_mapping(isp->mapping);
isp->mapping = NULL;
iommu_group_remove_device(isp->dev);
@@ -2111,8 +2112,7 @@ static int isp_attach_iommu(struct isp_device *isp)
mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G);
if (IS_ERR(mapping)) {
dev_err(isp->dev, "failed to create ARM IOMMU mapping\n");
- ret = PTR_ERR(mapping);
- goto error;
+ return PTR_ERR(mapping);
}
isp->mapping = mapping;
@@ -2127,7 +2127,8 @@ static int isp_attach_iommu(struct isp_device *isp)
return 0;
error:
- isp_detach_iommu(isp);
+ arm_iommu_release_mapping(isp->mapping);
+ isp->mapping = NULL;
return ret;
}
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index f8e3e83c52a263..20de5e9fc21799 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1278,7 +1278,7 @@ static int jpu_open(struct file *file)
/* ...issue software reset */
ret = jpu_reset(jpu);
if (ret)
- goto device_prepare_rollback;
+ goto jpu_reset_rollback;
}
jpu->ref_count++;
@@ -1286,6 +1286,8 @@ static int jpu_open(struct file *file)
mutex_unlock(&jpu->mutex);
return 0;
+jpu_reset_rollback:
+ clk_disable_unprepare(jpu->clk);
device_prepare_rollback:
mutex_unlock(&jpu->mutex);
v4l_prepare_rollback:
diff --git a/drivers/media/platform/rockchip-vpu/Makefile b/drivers/media/platform/rockchip-vpu/Makefile
index 130a2bb2e01324..c52ffcae3837d7 100644
--- a/drivers/media/platform/rockchip-vpu/Makefile
+++ b/drivers/media/platform/rockchip-vpu/Makefile
@@ -13,6 +13,7 @@ rockchip-vpu-y += rockchip_vpu.o \
rk3399_vpu_hw.o \
rk3399_vpu_hw_vp8d.o \
rk3399_vpu_hw_h264e.o \
+ rk3399_vpu_hw_jpege.o \
rk3399_vpu_hw_vp8e.o \
rk3399_vdec_hw.o \
rk3399_vdec_hw_h264d.o \
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_h264d.c b/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_h264d.c
index 2b71980b38cfff..534256d5a7e062 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_h264d.c
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_h264d.c
@@ -616,6 +616,9 @@ void rk3399_vdec_h264d_run(struct rockchip_vpu_ctx *ctx)
schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vdpu_write(vpu, 1, RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
+ vdpu_write(vpu, 1, RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
+
/* Start decoding! */
vdpu_write(vpu, RKVDEC_INTERRUPT_DEC_E
| RKVDEC_CONFIG_DEC_CLK_GATE_E,
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_vp9d.c b/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_vp9d.c
index 824b885a5948b5..50a369c0c08392 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_vp9d.c
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vdec_hw_vp9d.c
@@ -617,9 +617,9 @@ static void rk3399_vdec_vp9d_config_registers(struct rockchip_vpu_ctx *ctx)
last_info->mv_base_addr = ctx->hw.vp9d.mv_base_addr;
if (!(frmhdr->sgmnt_params.flags &
- V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED) &&
- !(frmhdr->sgmnt_params.flags &
- V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP))
+ V4L2_VP9_SGMNT_PARAM_FLAG_ENABLED &&
+ !(frmhdr->sgmnt_params.flags &
+ V4L2_VP9_SGMNT_PARAM_FLAG_UPDATE_MAP)))
last_info->last_segid_flag =
!last_info->last_segid_flag;
}
@@ -773,17 +773,17 @@ static void rk3399_vdec_vp9d_config_registers(struct rockchip_vpu_ctx *ctx)
if (last_info->last_segid_flag) {
hw_base = ctx->hw.vp9d.priv_tbl.dma +
- offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap);
+ offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap_last);
vdpu_write_relaxed(vpu, hw_base, RKVDEC_REG_VP9_SEGIDCUR_BASE);
hw_base = ctx->hw.vp9d.priv_tbl.dma +
- offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap_last);
+ offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap);
vdpu_write_relaxed(vpu, hw_base, RKVDEC_REG_VP9_SEGIDLAST_BASE);
} else {
hw_base = ctx->hw.vp9d.priv_tbl.dma +
- offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap_last);
+ offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap);
vdpu_write_relaxed(vpu, hw_base, RKVDEC_REG_VP9_SEGIDCUR_BASE);
hw_base = ctx->hw.vp9d.priv_tbl.dma +
- offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap);
+ offsetof(struct rk3399_vdec_vp9d_priv_tbl, segmap_last);
vdpu_write_relaxed(vpu, hw_base, RKVDEC_REG_VP9_SEGIDLAST_BASE);
}
@@ -813,6 +813,9 @@ void rk3399_vdec_vp9d_run(struct rockchip_vpu_ctx *ctx)
schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+ vdpu_write(vpu, 1, RKVDEC_REG_PREF_LUMA_CACHE_COMMAND);
+ vdpu_write(vpu, 1, RKVDEC_REG_PREF_CHR_CACHE_COMMAND);
+
/* Start decoding! */
vdpu_write(vpu,
RKVDEC_INTERRUPT_DEC_E |
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vdec_regs.h b/drivers/media/platform/rockchip-vpu/rk3399_vdec_regs.h
index ae6c608e1652df..022ea224613507 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vdec_regs.h
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vdec_regs.h
@@ -194,6 +194,8 @@
#define RKVDEC_ERR_PKT_NUM(x) (((x) & 0x3fff) << 16)
#define RKVDEC_REG_H264_ERR_E 0x134
#define RKVDEC_H264_ERR_EN_HIGHBITS(x) ((x) & 0x3fffffff)
+#define RKVDEC_REG_PREF_LUMA_CACHE_COMMAND 0x410
+#define RKVDEC_REG_PREF_CHR_CACHE_COMMAND 0x450
#define SEQ_PARAMETER_SET_ID_OFF FIELD(0, 0)
#define SEQ_PARAMETER_SET_ID_LEN 4
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw.c b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw.c
index 783cb4cc055fab..37715dab8f2449 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw.c
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw.c
@@ -87,6 +87,20 @@ static const struct rockchip_vpu_fmt rk3399_vpu_enc_fmts[] = {
.step_height = MB_DIM,
},
},
+ {
+ .name = "JPEG Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_JPEG_RAW,
+ .codec_mode = RK_VPU_CODEC_JPEGE,
+ .num_planes = 1,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = MB_DIM,
+ },
+ },
};
static const struct rockchip_vpu_fmt rk3399_vpu_dec_fmts[] = {
@@ -281,6 +295,11 @@ static const struct rockchip_vpu_codec_ops rk3399_vpu_mode_ops[] = {
.done = rk3399_vpu_h264e_done,
.reset = rk3399_vpu_enc_reset,
},
+ [RK_VPU_CODEC_JPEGE] = {
+ .run = rk3399_vpu_jpege_run,
+ .done = rk3399_vpu_jpege_done,
+ .reset = rk3399_vpu_enc_reset,
+ },
};
/*
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_h264e.c b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_h264e.c
index a057b0c900dc84..4e2ba710a241b5 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_h264e.c
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_h264e.c
@@ -826,11 +826,16 @@ void rk3399_vpu_h264e_exit(struct rockchip_vpu_ctx *ctx)
static void rk3399_vpu_h264e_set_buffers(struct rockchip_vpu_dev *vpu,
struct rockchip_vpu_ctx *ctx)
{
+ const u32 src_addr_regs[] = { VEPU_REG_ADDR_IN_LUMA,
+ VEPU_REG_ADDR_IN_CB,
+ VEPU_REG_ADDR_IN_CR };
const struct rk3288_h264e_reg_params *params =
(struct rk3288_h264e_reg_params *)ctx->run.h264e.reg_params;
+ struct v4l2_pix_format_mplane *src_fmt = &ctx->src_fmt;
dma_addr_t ref_buf_dma, rec_buf_dma;
size_t rounded_size;
dma_addr_t dst_dma;
+ int i;
dma_addr_t cabac_dma =
ctx->hw.h264e.cabac_tbl[params->cabac_init_idc].dma;
@@ -859,16 +864,17 @@ static void rk3399_vpu_h264e_set_buffers(struct rockchip_vpu_dev *vpu,
vepu_write_relaxed(vpu, rec_buf_dma + rounded_size,
VEPU_REG_ADDR_REC_CHROMA);
- vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_Y),
- VEPU_REG_ADDR_IN_LUMA);
- vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_CB),
- VEPU_REG_ADDR_IN_CB);
- vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_CR),
- VEPU_REG_ADDR_IN_CR);
-
+ /*
+ * TODO(crbug.com/901264): The way to pass an offset within a DMA-buf
+ * is not defined in V4L2 specification, so we abuse data_offset
+ * for now. Fix it when we have the right interface, including
+ * any necessary validation and potential alignment issues.
+ */
+ for (i = 0; i < src_fmt->num_planes; ++i)
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
+ &ctx->run.src->b.vb2_buf, i) +
+ ctx->run.src->b.vb2_buf.planes[i].data_offset,
+ src_addr_regs[i]);
}
static s32 exp_golomb_signed(s32 val)
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_jpege.c b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_jpege.c
new file mode 100644
index 00000000000000..eedefe47d47d7c
--- /dev/null
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_jpege.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Rockchip rk3399 VPU codec driver
+ */
+
+#include <asm/unaligned.h>
+
+#include "rk3399_vpu_regs.h"
+#include "rockchip_vpu_common.h"
+#include "rockchip_vpu_hw.h"
+
+static void set_src_img_ctrl(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ struct v4l2_rect *crop = &ctx->src_crop;
+ u32 overfill_r, overfill_b;
+ u32 reg;
+
+ vpu_debug_enter();
+
+ /* The pix fmt width/height are MB round up aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback
+ */
+ overfill_r = pix_fmt->width - crop->width;
+ overfill_b = pix_fmt->height - crop->height;
+
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(pix_fmt->width);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
+
+ reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4) |
+ VEPU_REG_IN_IMG_CTRL_OVRFLB(overfill_b);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_OVER_FILL_STRM_OFFSET);
+
+ reg = VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
+
+ vpu_debug_leave();
+}
+
+static void rk3399_vpu_jpege_set_buffers(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ struct vb2_buffer *buf;
+ dma_addr_t dst_dma, src_dma[3];
+ u32 dst_size;
+ int i;
+
+ vpu_debug_enter();
+
+ buf = &ctx->run.dst->b.vb2_buf;
+ dst_dma = vb2_dma_contig_plane_dma_addr(buf, 0);
+ dst_size = vb2_plane_size(buf, 0);
+
+ vepu_write_relaxed(vpu, dst_dma, VEPU_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, dst_size, VEPU_REG_STR_BUF_LIMIT);
+
+ buf = &ctx->run.src->b.vb2_buf;
+ for (i = 0; i < ARRAY_SIZE(src_dma); i++) {
+ if (i < pix_fmt->num_planes)
+ src_dma[i] = vb2_dma_contig_plane_dma_addr(buf, i) +
+ ctx->run.src->b.vb2_buf.planes[i].data_offset;
+ else
+ src_dma[i] = src_dma[i-1];
+ }
+ vepu_write_relaxed(vpu, src_dma[0], VEPU_REG_ADDR_IN_LUMA);
+ vepu_write_relaxed(vpu, src_dma[1], VEPU_REG_ADDR_IN_CB);
+ vepu_write_relaxed(vpu, src_dma[2], VEPU_REG_ADDR_IN_CR);
+
+ vpu_debug_leave();
+}
+
+static void rk3399_vpu_jpege_set_params(struct rockchip_vpu_dev *vpu,
+ struct rockchip_vpu_ctx *ctx)
+{
+ u32 reg, i;
+ u8 *qtable;
+
+ vpu_debug_enter();
+
+ set_src_img_ctrl(vpu, ctx);
+
+ for (i = 0; i < 16; i++) {
+ qtable = ctx->run.jpege.lumin_quant_tbl;
+ reg = get_unaligned_be32(&qtable[i * 4]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+
+ qtable = ctx->run.jpege.chroma_quant_tbl;
+ reg = get_unaligned_be32(&qtable[i * 4]);
+
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
+ }
+
+ vpu_debug_leave();
+}
+
+void rk3399_vpu_jpege_run(struct rockchip_vpu_ctx *ctx)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+ u32 reg;
+
+ vpu_debug_enter();
+
+ rockchip_vpu_power_on(vpu);
+
+ /* Switch to JPEG encoder mode before writing regsiters */
+ reg = VEPU_REG_ENCODE_FORMAT(2);
+ vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+
+ rk3399_vpu_jpege_set_params(vpu, ctx);
+ rk3399_vpu_jpege_set_buffers(vpu, ctx);
+
+ /* Make sure that all registers are written at this point. */
+ wmb();
+
+ /* Start the hardware. */
+ reg = VEPU_REG_OUTPUT_SWAP32
+ | VEPU_REG_OUTPUT_SWAP16
+ | VEPU_REG_OUTPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP16
+ | VEPU_REG_INPUT_SWAP32;
+ vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);
+
+ reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
+ vepu_write(vpu, reg, VEPU_REG_AXI_CTRL);
+
+ reg = VEPU_REG_CLK_GATING_EN;
+ vepu_write(vpu, reg, VEPU_REG_INTERRUPT);
+
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
+ | VEPU_REG_PIC_TYPE(1)
+ | VEPU_REG_ENCODE_FORMAT(2)
+ | VEPU_REG_ENCODE_ENABLE;
+
+ /* Set the watchdog. */
+ schedule_delayed_work(&vpu->watchdog_work, msecs_to_jiffies(2000));
+
+ vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+}
+
+void rk3399_vpu_jpege_done(struct rockchip_vpu_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ struct rockchip_vpu_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst = &ctx->run.dst->b;
+ size_t encoded_size;
+
+ vpu_debug_enter();
+
+ encoded_size = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ vb2_set_plane_payload(&vb2_dst->vb2_buf, 0, encoded_size);
+ rockchip_vpu_run_done(ctx, result);
+
+ vpu_debug_leave();
+}
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_vp8e.c b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_vp8e.c
index bf5f7c0422bb4b..414867657bc792 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_vp8e.c
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vpu_hw_vp8e.c
@@ -310,13 +310,18 @@ static inline u32 enc_in_img_ctrl(struct rockchip_vpu_ctx *ctx)
static void rk3399_vpu_vp8e_set_buffers(struct rockchip_vpu_dev *vpu,
struct rockchip_vpu_ctx *ctx)
{
+ const u32 src_addr_regs[] = { VEPU_REG_ADDR_IN_LUMA,
+ VEPU_REG_ADDR_IN_CB,
+ VEPU_REG_ADDR_IN_CR };
const struct rk3399_vp8e_reg_params *params =
(struct rk3399_vp8e_reg_params *)ctx->run.vp8e.reg_params;
+ struct v4l2_pix_format_mplane *src_fmt = &ctx->src_fmt;
dma_addr_t ref_buf_dma, rec_buf_dma;
dma_addr_t stream_dma;
size_t rounded_size;
dma_addr_t dst_dma;
size_t dst_size;
+ int i;
vpu_debug_enter();
@@ -397,24 +402,17 @@ static void rk3399_vpu_vp8e_set_buffers(struct rockchip_vpu_dev *vpu,
VEPU_REG_ADDR_REC_CHROMA);
/* Source buffer. */
- if (rockchip_vpu_ctx_is_dummy_encode(ctx)) {
- vepu_write_relaxed(vpu, vpu->dummy_encode_src[PLANE_Y].dma,
- VEPU_REG_ADDR_IN_LUMA);
- vepu_write_relaxed(vpu, vpu->dummy_encode_src[PLANE_CB].dma,
- VEPU_REG_ADDR_IN_CB);
- vepu_write_relaxed(vpu, vpu->dummy_encode_src[PLANE_CR].dma,
- VEPU_REG_ADDR_IN_CR);
- } else {
- vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_Y),
- VEPU_REG_ADDR_IN_LUMA);
- vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_CB),
- VEPU_REG_ADDR_IN_CB);
+ /*
+ * TODO(crbug.com/901264): The way to pass an offset within a DMA-buf
+ * is not defined in V4L2 specification, so we abuse data_offset
+ * for now. Fix it when we have the right interface, including
+ * any necessary validation and potential alignment issues.
+ */
+ for (i = 0; i < src_fmt->num_planes; ++i)
vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(
- &ctx->run.src->b.vb2_buf, PLANE_CR),
- VEPU_REG_ADDR_IN_CR);
- }
+ &ctx->run.src->b.vb2_buf, i) +
+ ctx->run.src->b.vb2_buf.planes[i].data_offset,
+ src_addr_regs[i]);
/* Source parameters. */
vepu_write_relaxed(vpu, enc_in_img_ctrl(ctx),
diff --git a/drivers/media/platform/rockchip-vpu/rk3399_vpu_regs.h b/drivers/media/platform/rockchip-vpu/rk3399_vpu_regs.h
index f5225994dd37b5..c40e8730f7b3de 100644
--- a/drivers/media/platform/rockchip-vpu/rk3399_vpu_regs.h
+++ b/drivers/media/platform/rockchip-vpu/rk3399_vpu_regs.h
@@ -74,6 +74,8 @@
#define VEPU_REG_VP8_LF_MODE_DELTA_SPLITMV(x) (((x) & 0x7f) << 16)
#define VEPU_REG_VP8_LF_MODE_DELTA_ZEROMV(x) (((x) & 0x7f) << 8)
#define VEPU_REG_VP8_LF_MODE_DELTA_NEWMV(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x000 + ((i) * 0x4))
+#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x040 + ((i) * 0x4))
#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0b0 + ((i) * 0x4))
#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0b0 + ((i) * 0x4))
#define VEPU_REG_INTRA_AREA_CTRL 0x0b8
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu.c b/drivers/media/platform/rockchip-vpu/rockchip_vpu.c
index 5d7335f565849d..24679e49b27a0f 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu.c
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu.c
@@ -301,6 +301,8 @@ void write_header(u32 value, u32 *buffer, u32 offset, u32 len)
#define IS_VPU_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_MPEG) && \
V4L2_CTRL_DRIVER_PRIV(x))
+#define IS_USER_PRIV(x) ((V4L2_CTRL_ID2CLASS(x) == V4L2_CTRL_CLASS_USER) && \
+ V4L2_CTRL_DRIVER_PRIV(x))
int rockchip_vpu_ctrls_setup(struct rockchip_vpu_ctx *ctx,
const struct v4l2_ctrl_ops *ctrl_ops,
@@ -324,8 +326,8 @@ int rockchip_vpu_ctrls_setup(struct rockchip_vpu_ctx *ctx,
for (i = 0; i < num_ctrls; i++) {
if (IS_VPU_PRIV(controls[i].id)
- || controls[i].id >= V4L2_CID_CUSTOM_BASE
- || controls[i].type == V4L2_CTRL_TYPE_PRIVATE) {
+ || IS_USER_PRIV(controls[i].id)
+ || controls[i].type >= V4L2_CTRL_COMPOUND_TYPES) {
memset(&cfg, 0, sizeof(struct v4l2_ctrl_config));
cfg.ops = ctrl_ops;
@@ -356,7 +358,7 @@ int rockchip_vpu_ctrls_setup(struct rockchip_vpu_ctx *ctx,
ctrl_ops,
controls[i].id,
controls[i].maximum,
- 0,
+ controls[i].menu_skip_mask,
controls[i].
default_value);
} else {
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu_common.h b/drivers/media/platform/rockchip-vpu/rockchip_vpu_common.h
index c17b7f0df933d1..6274b150c90790 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu_common.h
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu_common.h
@@ -92,6 +92,7 @@ struct rockchip_vpu_variant {
* @RK_VPU_CODEC_VP9D: VP9 decoder.
* @RK_VPU_CODEC_H264E: H264 encoder.
* @RK_VPU_CODEC_VP8E: VP8 encoder.
+ * @RK_VPU_CODEC_JPEGE: JPEG encoder.
*/
enum rockchip_vpu_codec_mode {
RK_VPU_CODEC_NONE = -1,
@@ -99,7 +100,8 @@ enum rockchip_vpu_codec_mode {
RK_VPU_CODEC_VP8D,
RK_VPU_CODEC_VP9D,
RK_VPU_CODEC_H264E,
- RK_VPU_CODEC_VP8E
+ RK_VPU_CODEC_VP8E,
+ RK_VPU_CODEC_JPEGE
};
/**
@@ -297,6 +299,11 @@ struct rockchip_vpu_vp9d_run {
struct v4l2_ctrl_vp9_entropy *entropy;
};
+struct rockchip_vpu_jpege_run {
+ u8 lumin_quant_tbl[ROCKCHIP_JPEG_QUANT_ELE_SIZE];
+ u8 chroma_quant_tbl[ROCKCHIP_JPEG_QUANT_ELE_SIZE];
+};
+
#define FIELD(word, bit) (32 * (word) + (bit))
#define WRITE_HEADER(value, buffer, field) \
@@ -325,6 +332,7 @@ struct rockchip_vpu_run {
struct rockchip_vpu_h264d_run h264d;
struct rockchip_vpu_h264e_run h264e;
struct rockchip_vpu_vp9d_run vp9d;
+ struct rockchip_vpu_jpege_run jpege;
/* Other modes will need different data. */
};
};
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu_dec.c b/drivers/media/platform/rockchip-vpu/rockchip_vpu_dec.c
index a941a4ab5aaba6..bcaa88417099a9 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu_dec.c
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu_dec.c
@@ -158,6 +158,14 @@ static struct rockchip_vpu_control controls[] = {
.elem_size = sizeof(struct v4l2_ctrl_vp9_entropy),
.can_store = true,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .maximum = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .default_value = V4L2_MPEG_VIDEO_VP9_PROFILE_0,
+ .menu_skip_mask = 0,
+ },
};
static inline void *get_ctrl_ptr(struct rockchip_vpu_ctx *ctx, unsigned id)
@@ -331,6 +339,29 @@ static void calculate_plane_sizes(const struct rockchip_vpu_fmt *src_fmt,
}
}
+static void adjust_dst_sizes(struct rockchip_vpu_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_fmt_mp)
+{
+ /* Limit to hardware min/max. */
+ pix_fmt_mp->width = clamp(pix_fmt_mp->width,
+ ctx->vpu_src_fmt->frmsize.min_width,
+ ctx->vpu_src_fmt->frmsize.max_width);
+ pix_fmt_mp->height = clamp(pix_fmt_mp->height,
+ ctx->vpu_src_fmt->frmsize.min_height,
+ ctx->vpu_src_fmt->frmsize.max_height);
+
+ /* Round up to macroblocks. */
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME) {
+ pix_fmt_mp->width = round_up(pix_fmt_mp->width, SB_DIM);
+ pix_fmt_mp->height =
+ round_up(pix_fmt_mp->height, SB_DIM);
+ } else {
+ pix_fmt_mp->width = round_up(pix_fmt_mp->width, MB_DIM);
+ pix_fmt_mp->height =
+ round_up(pix_fmt_mp->height, MB_DIM);
+ }
+}
+
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct rockchip_vpu_dev *dev = video_drvdata(file);
@@ -373,25 +404,7 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
return -EINVAL;
}
- /* Limit to hardware min/max. */
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- ctx->vpu_src_fmt->frmsize.min_width,
- ctx->vpu_src_fmt->frmsize.max_width);
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- ctx->vpu_src_fmt->frmsize.min_height,
- ctx->vpu_src_fmt->frmsize.max_height);
-
- /* Round up to macroblocks. */
- if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME) {
- pix_fmt_mp->width = round_up(pix_fmt_mp->width, SB_DIM);
- pix_fmt_mp->height =
- round_up(pix_fmt_mp->height, SB_DIM);
- } else {
- pix_fmt_mp->width = round_up(pix_fmt_mp->width, MB_DIM);
- pix_fmt_mp->height =
- round_up(pix_fmt_mp->height, MB_DIM);
- }
-
+ adjust_dst_sizes(ctx, pix_fmt_mp);
/* Fill in remaining fields. */
calculate_plane_sizes(ctx->vpu_src_fmt, ctx->vpu_dst_fmt,
pix_fmt_mp);
@@ -417,11 +430,12 @@ static void reset_dst_fmt(struct rockchip_vpu_ctx *ctx)
memset(dst_fmt, 0, sizeof(*dst_fmt));
- dst_fmt->width = vpu_src_fmt->frmsize.min_width;
- dst_fmt->height = vpu_src_fmt->frmsize.min_height;
+ dst_fmt->width = ctx->src_fmt.width;
+ dst_fmt->height = ctx->src_fmt.height;
dst_fmt->pixelformat = ctx->vpu_dst_fmt->fourcc;
dst_fmt->num_planes = ctx->vpu_dst_fmt->num_planes;
+ adjust_dst_sizes(ctx, dst_fmt);
calculate_plane_sizes(vpu_src_fmt, ctx->vpu_dst_fmt, dst_fmt);
}
@@ -840,6 +854,7 @@ static int rockchip_vpu_dec_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM:
case V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR:
case V4L2_CID_MPEG_VIDEO_VP9_ENTROPY:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
/* These controls are used directly. */
break;
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu_enc.c b/drivers/media/platform/rockchip-vpu/rockchip_vpu_enc.c
index 66ce8549e35865..06b037df1a76d4 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu_enc.c
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu_enc.c
@@ -91,6 +91,8 @@ enum {
ROCKCHIP_VPU_ENC_CTRL_REG_PARAMS,
ROCKCHIP_VPU_ENC_CTRL_HW_PARAMS,
ROCKCHIP_VPU_ENC_CTRL_RET_PARAMS,
+ ROCKCHIP_VPU_ENC_CTRL_Y_QUANT_TBL,
+ ROCKCHIP_VPU_ENC_CTRL_C_QUANT_TBL,
};
static struct rockchip_vpu_control controls[] = {
@@ -128,6 +130,22 @@ static struct rockchip_vpu_control controls[] = {
.max_stores = VIDEO_MAX_FRAME,
.elem_size = ROCKCHIP_RET_PARAMS_SIZE,
},
+ [ROCKCHIP_VPU_ENC_CTRL_Y_QUANT_TBL] = {
+ .id = V4L2_CID_JPEG_LUMA_QUANTIZATION,
+ .type = V4L2_CTRL_TYPE_U8,
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .dims = { 8, 8 }
+ },
+ [ROCKCHIP_VPU_ENC_CTRL_C_QUANT_TBL] = {
+ .id = V4L2_CID_JPEG_CHROMA_QUANTIZATION,
+ .type = V4L2_CTRL_TYPE_U8,
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .dims = { 8, 8 }
+ },
/* Generic controls. (currently ignored) */
{
.id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
@@ -908,6 +926,8 @@ static int rockchip_vpu_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_PRIVATE_ROCKCHIP_HEADER:
case V4L2_CID_PRIVATE_ROCKCHIP_REG_PARAMS:
case V4L2_CID_PRIVATE_ROCKCHIP_HW_PARAMS:
+ case V4L2_CID_JPEG_LUMA_QUANTIZATION:
+ case V4L2_CID_JPEG_CHROMA_QUANTIZATION:
/* Nothing to do here. The control is used directly. */
break;
@@ -1284,7 +1304,6 @@ static void rockchip_vpu_buf_finish(struct vb2_buffer *vb)
*/
rk3288_vpu_vp8e_assemble_bitstream(ctx, buf);
}
-
vpu_debug_leave();
}
@@ -1449,6 +1468,13 @@ static void rockchip_vpu_enc_prepare_run(struct rockchip_vpu_ctx *ctx)
} else if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_H264) {
ctx->run.h264e.reg_params = get_ctrl_ptr(ctx,
ROCKCHIP_VPU_ENC_CTRL_REG_PARAMS);
+ } else if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_JPEG_RAW) {
+ memcpy(ctx->run.jpege.lumin_quant_tbl,
+ get_ctrl_ptr(ctx, ROCKCHIP_VPU_ENC_CTRL_Y_QUANT_TBL),
+ ROCKCHIP_JPEG_QUANT_ELE_SIZE);
+ memcpy(ctx->run.jpege.chroma_quant_tbl,
+ get_ctrl_ptr(ctx, ROCKCHIP_VPU_ENC_CTRL_C_QUANT_TBL),
+ ROCKCHIP_JPEG_QUANT_ELE_SIZE);
}
}
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.c b/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.c
index 8d851fd11ad196..112c3917dddf85 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.c
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.c
@@ -103,10 +103,11 @@ int rockchip_vpu_init(struct rockchip_vpu_ctx *ctx)
ctx->hw.codec_ops = &ctx->dev->variant->mode_ops[codec_mode];
- return ctx->hw.codec_ops->init(ctx);
+ return ctx->hw.codec_ops->init ? ctx->hw.codec_ops->init(ctx) : 0;
}
void rockchip_vpu_deinit(struct rockchip_vpu_ctx *ctx)
{
- ctx->hw.codec_ops->exit(ctx);
+ if (ctx->hw.codec_ops->exit)
+ ctx->hw.codec_ops->exit(ctx);
}
diff --git a/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.h b/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.h
index fe2ec12c770826..b69b810cb0728e 100644
--- a/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.h
+++ b/drivers/media/platform/rockchip-vpu/rockchip_vpu_hw.h
@@ -24,6 +24,7 @@
#define ROCKCHIP_HEADER_SIZE 1280
#define ROCKCHIP_HW_PARAMS_SIZE 5487
#define ROCKCHIP_RET_PARAMS_SIZE 488
+#define ROCKCHIP_JPEG_QUANT_ELE_SIZE 64
#define ROCKCHIP_VPU_CABAC_TABLE_SIZE (52 * 2 * 464)
@@ -399,4 +400,9 @@ void rk3399_vdec_vp9d_run(struct rockchip_vpu_ctx *ctx);
void rk3399_vdec_vp9d_done(struct rockchip_vpu_ctx *ctx,
enum vb2_buffer_state result);
+/* Run ops for rk3399 vdec JPEG encoder */
+void rk3399_vpu_jpege_run(struct rockchip_vpu_ctx *ctx);
+void rk3399_vpu_jpege_done(struct rockchip_vpu_ctx *ctx,
+ enum vb2_buffer_state result);
+
#endif /* ROCKCHIP_VPU_HW_H_ */
diff --git a/drivers/media/platform/rockchip/isp1/capture.c b/drivers/media/platform/rockchip/isp1/capture.c
index e6cfef96e03c4c..b778ef94ba42e3 100644
--- a/drivers/media/platform/rockchip/isp1/capture.c
+++ b/drivers/media/platform/rockchip/isp1/capture.c
@@ -970,16 +970,19 @@ static void mi_buffers_next_to_curr_locked(struct rkisp1_device *isp_dev)
* that don't have buffers with matching sequence IDs in the queue, use dummy
* buffers.
*/
-static int mi_buffers_get_next_locked(struct rkisp1_device *isp_dev)
+static void mi_buffers_get_next_locked(struct rkisp1_device *isp_dev)
{
- bool have_matching_buffers = false, have_buffers = false;
+ struct rkisp1_buffer *buffers[ARRAY_SIZE(isp_dev->stream)];
+ unsigned long have_buffers = 0;
u64 min_delta = -1ULL;
- u64 next_sequence_id = isp_dev->buf_sequence_id;
int s;
- for_each_set_bit(s,
- &isp_dev->mi_streaming,
- ARRAY_SIZE(isp_dev->stream)) {
+ /*
+ * Find buffers with a sequence ID closest to
+ * isp_dev->buf_sequence_id. We might not have an exact match,
+ * since stream stop could have removed some buffers from the queue.
+ */
+ for (s = 0; s < ARRAY_SIZE(isp_dev->stream); ++s) {
struct rkisp1_stream *stream = &isp_dev->stream[s];
struct rkisp1_buffer *buf;
u64 delta;
@@ -987,53 +990,47 @@ static int mi_buffers_get_next_locked(struct rkisp1_device *isp_dev)
if (list_empty(&stream->bufs_ready))
continue;
- have_buffers = true;
-
buf = list_first_entry(&stream->bufs_ready,
struct rkisp1_buffer, queue);
delta = buf->sequence_id - isp_dev->buf_sequence_id;
if (delta < min_delta) {
/*
- * Find a sequence ID existing in any queue nearest
- * to the global sequence counter. See below for how
- * it is used.
+ * This buffer is closer than previous buffers.
+ * Remember the new minimum and forget about the
+ * buffers seen in previous iterations.
*/
min_delta = delta;
- next_sequence_id = buf->sequence_id;
+ have_buffers = 0;
}
- if (delta)
- /* Not the next sequence ID, use dummy for this MI. */
- continue;
-
- /* Found matching buffer, use it. */
- stream->next_buf = buf;
- list_del(&stream->next_buf->queue);
- have_matching_buffers = true;
- }
-
- if (have_buffers) {
- if (!have_matching_buffers) {
- /*
- * This is a tricky case that might show up if we stop
- * streaming, while buffers are still in the queue.
- * Since VB2 .stop_streaming() removes buffers from the
- * queue, we could end up with a gap in sequence ID
- * space. Skip to the nearest sequence ID, which we
- * found when checking stream queues for buffers.
- */
- v4l2_dbg(1, rkisp1_debug, &isp_dev->v4l2_dev,
- "%s: Skipping buffer sequence IDs from %llu to %llu\n",
- __func__, isp_dev->buf_sequence_id,
- next_sequence_id - 1);
- isp_dev->buf_sequence_id = next_sequence_id;
- return -EAGAIN;
+ if (delta == min_delta) {
+ /* Matches currently found minimum, so keep it. */
+ have_buffers |= BIT(s);
+ buffers[s] = buf;
}
- /* Increment sequence ID counter only if we had any buffers. */
- ++isp_dev->buf_sequence_id;
}
- return 0;
+ /* No user buffers. Use dummy. */
+ if (!have_buffers)
+ return;
+
+ /* Increment sequence ID counter only if we had any buffers. */
+ isp_dev->buf_sequence_id += min_delta + 1;
+
+ /*
+ * Some of the streams have not started yet. Since we need to stay
+ * in sync, use dummy buffers until they start.
+ */
+ if ((isp_dev->mi_streaming & have_buffers) != have_buffers)
+ return;
+
+ for_each_set_bit(s, &have_buffers,
+ ARRAY_SIZE(isp_dev->stream)) {
+ struct rkisp1_stream *stream = &isp_dev->stream[s];
+
+ stream->next_buf = buffers[s];
+ list_del(&stream->next_buf->queue);
+ }
}
/*
@@ -1061,17 +1058,13 @@ static void mi_buffers_set(struct rkisp1_device *isp_dev)
static void mi_frame_end(struct rkisp1_device *isp_dev)
{
unsigned long flags;
- int ret;
mi_buffers_done(isp_dev);
spin_lock_irqsave(&isp_dev->vbq_lock, flags);
mi_buffers_next_to_curr_locked(isp_dev);
-
- ret = mi_buffers_get_next_locked(isp_dev);
- if (ret == -EAGAIN)
- WARN_ON(mi_buffers_get_next_locked(isp_dev) < 0);
+ mi_buffers_get_next_locked(isp_dev);
spin_unlock_irqrestore(&isp_dev->vbq_lock, flags);
@@ -1273,6 +1266,7 @@ static void rkisp1_stop_streaming(struct vb2_queue *queue)
struct rkisp1_stream *stream = queue->drv_priv;
struct rkisp1_vdev_node *node = &stream->vnode;
struct rkisp1_device *dev = stream->ispdev;
+ struct rkisp1_stream *other = &dev->stream[stream->id ^ 1];
struct v4l2_device *v4l2_dev = &dev->v4l2_dev;
struct rkisp1_buffer *buf;
LIST_HEAD(buffers);
@@ -1299,6 +1293,11 @@ static void rkisp1_stop_streaming(struct vb2_queue *queue)
}
list_splice_tail_init(&stream->bufs_ready, &buffers);
list_splice_tail_init(&stream->bufs_pending, &buffers);
+ /*
+ * Flush pending buffers of the other queue, since they have nothing
+ * to wait for anymore.
+ */
+ list_splice_tail_init(&other->bufs_pending, &other->bufs_ready);
spin_unlock_irqrestore(&dev->vbq_lock, lock_flags);
while (!list_empty(&buffers)) {
@@ -1890,8 +1889,6 @@ void rkisp1_mi_isr(u32 mis_val, struct rkisp1_device *dev)
* Update all MIs atomically to maintain synchronization between
* streams.
*/
- if ((dev->mi_ready & dev->mi_streaming) == dev->mi_streaming) {
+ if ((dev->mi_ready & dev->mi_streaming) == dev->mi_streaming)
mi_frame_end(dev);
- dev->mi_ready = 0;
- }
}
diff --git a/drivers/media/platform/rockchip/isp1/dev.c b/drivers/media/platform/rockchip/isp1/dev.c
index b2c8a61ea6fdd2..4edeb602ba3a91 100644
--- a/drivers/media/platform/rockchip/isp1/dev.c
+++ b/drivers/media/platform/rockchip/isp1/dev.c
@@ -457,6 +457,7 @@ static const struct of_device_id rkisp1_plat_of_match[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, rkisp1_plat_of_match);
static irqreturn_t rkisp1_irq_handler(int irq, void *ctx)
{
@@ -648,6 +649,8 @@ static int rkisp1_plat_remove(struct platform_device *pdev)
{
struct rkisp1_device *isp_dev = platform_get_drvdata(pdev);
+ v4l2_async_notifier_unregister(&isp_dev->notifier);
+ v4l2_async_notifier_cleanup(&isp_dev->notifier);
pm_runtime_disable(&pdev->dev);
media_device_unregister(&isp_dev->media_dev);
v4l2_device_unregister(&isp_dev->v4l2_dev);
diff --git a/drivers/media/platform/rockchip/isp1/isp_params.c b/drivers/media/platform/rockchip/isp1/isp_params.c
index 3ae57b697e5c1e..ac178aa88672a0 100644
--- a/drivers/media/platform/rockchip/isp1/isp_params.c
+++ b/drivers/media/platform/rockchip/isp1/isp_params.c
@@ -97,8 +97,13 @@ static void dpcc_config(struct rkisp1_isp_params_vdev *params_vdev,
const struct cifisp_dpcc_config *arg)
{
unsigned int i;
+ u32 mode;
- rkisp1_iowrite32(params_vdev, arg->mode, CIF_ISP_DPCC_MODE);
+ /* avoid to override the old enable value */
+ mode = rkisp1_ioread32(params_vdev, CIF_ISP_DPCC_MODE);
+ mode &= CIF_ISP_DPCC_ENA;
+ mode |= arg->mode & ~CIF_ISP_DPCC_ENA;
+ rkisp1_iowrite32(params_vdev, mode, CIF_ISP_DPCC_MODE);
rkisp1_iowrite32(params_vdev, arg->output_mode,
CIF_ISP_DPCC_OUTPUT_MODE);
rkisp1_iowrite32(params_vdev, arg->set_use, CIF_ISP_DPCC_SET_USE);
@@ -130,8 +135,11 @@ static void dpcc_config(struct rkisp1_isp_params_vdev *params_vdev,
static void bls_config(struct rkisp1_isp_params_vdev *params_vdev,
const struct cifisp_bls_config *arg)
{
- u32 new_control = 0;
+ /* avoid to override the old enable value */
+ u32 new_control;
+ new_control = rkisp1_ioread32(params_vdev, CIF_ISP_BLS_CTRL);
+ new_control &= CIF_ISP_BLS_ENA;
/* fixed subtraction values */
if (!arg->enable_auto) {
const struct cifisp_bls_fixed_val *pval = &arg->fixed_val;
@@ -181,7 +189,6 @@ static void bls_config(struct rkisp1_isp_params_vdev *params_vdev,
break;
}
- new_control = CIF_ISP_BLS_MODE_FIXED;
} else {
if (arg->en_windows & BIT(1)) {
rkisp1_iowrite32(params_vdev, arg->bls_window2.h_offs,
@@ -335,7 +342,7 @@ static void lsc_config(struct rkisp1_isp_params_vdev *params_vdev,
CIF_ISP_LSC_YGRAD_01 + i * 4);
}
- /* restore the bls ctrl status */
+ /* restore the lsc ctrl status */
if (lsc_ctrl & CIF_ISP_LSC_CTRL_ENA) {
isp_param_set_bits(params_vdev,
CIF_ISP_LSC_CTRL,
@@ -351,6 +358,8 @@ static void lsc_config(struct rkisp1_isp_params_vdev *params_vdev,
static void flt_config(struct rkisp1_isp_params_vdev *params_vdev,
const struct cifisp_flt_config *arg)
{
+ u32 filt_mode;
+
rkisp1_iowrite32(params_vdev, arg->thresh_bl0, CIF_ISP_FILT_THRESH_BL0);
rkisp1_iowrite32(params_vdev, arg->thresh_bl1, CIF_ISP_FILT_THRESH_BL1);
rkisp1_iowrite32(params_vdev, arg->thresh_sh0, CIF_ISP_FILT_THRESH_SH0);
@@ -367,14 +376,30 @@ static void flt_config(struct rkisp1_isp_params_vdev *params_vdev,
CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1),
CIF_ISP_FILT_MODE);
+
+ /* avoid to override the old enable value */
+ filt_mode = rkisp1_ioread32(params_vdev, CIF_ISP_FILT_MODE);
+ filt_mode &= CIF_ISP_FLT_ENA;
+ if (arg->mode)
+ filt_mode |= CIF_ISP_FLT_MODE_DNR;
+ filt_mode |= CIF_ISP_FLT_CHROMA_V_MODE(arg->chr_v_mode) |
+ CIF_ISP_FLT_CHROMA_H_MODE(arg->chr_h_mode) |
+ CIF_ISP_FLT_GREEN_STAGE1(arg->grn_stage1);
+ rkisp1_iowrite32(params_vdev, filt_mode, CIF_ISP_FILT_MODE);
}
/* ISP demosaic interface function */
static int bdm_config(struct rkisp1_isp_params_vdev *params_vdev,
const struct cifisp_bdm_config *arg)
{
+ u32 bdm_th;
+
+ /* avoid to override the old enable value */
+ bdm_th = rkisp1_ioread32(params_vdev, CIF_ISP_DEMOSAIC);
+ bdm_th &= CIF_ISP_DEMOSAIC_BYPASS;
+ bdm_th |= arg->demosaic_th & ~CIF_ISP_DEMOSAIC_BYPASS;
/* set demosaic threshold */
- rkisp1_iowrite32(params_vdev, arg->demosaic_th, CIF_ISP_DEMOSAIC);
+ rkisp1_iowrite32(params_vdev, bdm_th, CIF_ISP_DEMOSAIC);
return 0;
}
@@ -537,11 +562,16 @@ static void aec_config(struct rkisp1_isp_params_vdev *params_vdev,
const struct cifisp_aec_config *arg)
{
unsigned int block_hsize, block_vsize;
+ u32 exp_ctrl;
- rkisp1_iowrite32(params_vdev,
- ((arg->autostop) ? CIF_ISP_EXP_CTRL_AUTOSTOP : 0) |
- ((arg->mode == CIFISP_EXP_MEASURING_MODE_1) ?
- CIF_ISP_EXP_CTRL_MEASMODE_1 : 0), CIF_ISP_EXP_CTRL);
+ /* avoid to override the old enable value */
+ exp_ctrl = rkisp1_ioread32(params_vdev, CIF_ISP_EXP_CTRL);
+ exp_ctrl &= CIF_ISP_EXP_ENA;
+ if (arg->autostop)
+ exp_ctrl |= CIF_ISP_EXP_CTRL_AUTOSTOP;
+ if (arg->mode == CIFISP_EXP_MEASURING_MODE_1)
+ exp_ctrl |= CIF_ISP_EXP_CTRL_MEASMODE_1;
+ rkisp1_iowrite32(params_vdev, exp_ctrl,CIF_ISP_EXP_CTRL);
rkisp1_iowrite32(params_vdev,
arg->meas_window.h_offs, CIF_ISP_EXP_H_OFFSET);
@@ -596,10 +626,13 @@ static void hst_config(struct rkisp1_isp_params_vdev *params_vdev,
};
int i;
const u8 *weight;
+ u32 hist_prop;
- rkisp1_iowrite32(params_vdev,
- CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider),
- CIF_ISP_HIST_PROP);
+ /* avoid to override the old enable value */
+ hist_prop = rkisp1_ioread32(params_vdev, CIF_ISP_HIST_PROP);
+ hist_prop &= CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop |= CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
+ rkisp1_iowrite32(params_vdev, hist_prop, CIF_ISP_HIST_PROP);
rkisp1_iowrite32(params_vdev,
arg->meas_window.h_offs,
CIF_ISP_HIST_H_OFFS);
@@ -641,8 +674,9 @@ static void afm_config(struct rkisp1_isp_params_vdev *params_vdev,
int i;
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
+ u32 afm_ctrl = rkisp1_ioread32(params_vdev, CIF_ISP_AFM_CTRL);
- /* Switch off to configure. Enabled during normal flow in frame isr. */
+ /* Switch off to configure. */
isp_param_clear_bits(params_vdev, CIF_ISP_AFM_CTRL, CIF_ISP_AFM_ENA);
for (i = 0; i < num_of_win; i++) {
@@ -659,6 +693,8 @@ static void afm_config(struct rkisp1_isp_params_vdev *params_vdev,
}
rkisp1_iowrite32(params_vdev, arg->thres, CIF_ISP_AFM_THRES);
rkisp1_iowrite32(params_vdev, arg->var_shift, CIF_ISP_AFM_VAR_SHIFT);
+ /* restore afm status */
+ rkisp1_iowrite32(params_vdev, afm_ctrl, CIF_ISP_AFM_CTRL);
}
static void ie_config(struct rkisp1_isp_params_vdev *params_vdev,
@@ -729,7 +765,6 @@ static void ie_enable(struct rkisp1_isp_params_vdev *params_vdev, bool en)
isp_param_set_bits(params_vdev, CIF_IMG_EFF_CTRL,
CIF_IMG_EFF_CTRL_CFG_UPD);
} else {
- /* Disable measurement */
isp_param_clear_bits(params_vdev, CIF_IMG_EFF_CTRL,
CIF_IMG_EFF_CTRL_ENABLE);
isp_param_clear_bits(params_vdev, CIF_ICCL, CIF_ICCL_IE_CLK);
diff --git a/drivers/media/platform/rockchip/isp1/rkisp1.c b/drivers/media/platform/rockchip/isp1/rkisp1.c
index ee8110935165a2..2db6e4fed3cb51 100644
--- a/drivers/media/platform/rockchip/isp1/rkisp1.c
+++ b/drivers/media/platform/rockchip/isp1/rkisp1.c
@@ -1177,6 +1177,12 @@ void rkisp1_isp_isr(unsigned int isp_mis, struct rkisp1_device *dev)
if (isp_mis_tmp & CIF_ISP_FRAME_IN)
v4l2_err(&dev->v4l2_dev, "isp icr frame_in err: 0x%x\n",
isp_mis_tmp);
+
+ /*
+ * ISP starts outputting the frame to MI.
+ * Next MI interrupts are going to be for this frame.
+ */
+ dev->mi_ready = 0;
}
/* frame was completely put out */
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index fa6af4a7dae1fa..f97f4bc22ced73 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@ static int sensor_set_power(struct camif_dev *camif, int on)
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 5c678ec9c9f26a..8586379b822d38 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -681,12 +681,12 @@ static struct mfc_control controls[] = {
.default_value = 10,
},
{
- .id = V4L2_CID_MPEG_VIDEO_VPX_PROFILE,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = 0,
- .maximum = 3,
- .step = 1,
- .default_value = 0,
+ .id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .maximum = V4L2_MPEG_VIDEO_VP8_PROFILE_3,
+ .default_value = V4L2_MPEG_VIDEO_VP8_PROFILE_0,
+ .menu_skip_mask = 0,
},
{
.id = V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -1638,7 +1638,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP:
p->codec.vp8.rc_p_frame_qp = ctrl->val;
break;
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
p->codec.vp8.profile = ctrl->val;
break;
default:
@@ -2148,4 +2148,3 @@ void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx)
f.fmt.pix_mp.pixelformat = DEF_DST_FMT_ENC;
ctx->dst_fmt = find_format(&f, MFC_FMT_ENC);
}
-
diff --git a/drivers/media/platform/vivid/vivid-kthread-cap.c b/drivers/media/platform/vivid/vivid-kthread-cap.c
index 83cc6d3b478412..81ba454a6d95b9 100644
--- a/drivers/media/platform/vivid/vivid-kthread-cap.c
+++ b/drivers/media/platform/vivid/vivid-kthread-cap.c
@@ -863,8 +863,11 @@ int vivid_start_generating_vid_cap(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-cap", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_cap)) {
+ int err = PTR_ERR(dev->kthread_vid_cap);
+
+ dev->kthread_vid_cap = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_cap);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-kthread-out.c b/drivers/media/platform/vivid/vivid-kthread-out.c
index c2c46dcdbe95ac..2c5dbdcb576a76 100644
--- a/drivers/media/platform/vivid/vivid-kthread-out.c
+++ b/drivers/media/platform/vivid/vivid-kthread-out.c
@@ -248,8 +248,11 @@ int vivid_start_generating_vid_out(struct vivid_dev *dev, bool *pstreaming)
"%s-vid-out", dev->v4l2_dev.name);
if (IS_ERR(dev->kthread_vid_out)) {
+ int err = PTR_ERR(dev->kthread_vid_out);
+
+ dev->kthread_vid_out = NULL;
v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
- return PTR_ERR(dev->kthread_vid_out);
+ return err;
}
*pstreaming = true;
vivid_grab_controls(dev, true);
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index ef5412311b2fa0..a84954f1be343e 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -461,6 +461,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
break;
}
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
vivid_update_quality(dev);
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
index 1678b730dba244..2e82f520a86929 100644
--- a/drivers/media/platform/vivid/vivid-vid-common.c
+++ b/drivers/media/platform/vivid/vivid-vid-common.c
@@ -33,7 +33,7 @@ const struct v4l2_dv_timings_cap vivid_dv_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, MAX_WIDTH, 0, MAX_HEIGHT, 14000000, 775000000,
+ V4L2_INIT_BT_TIMINGS(16, MAX_WIDTH, 16, MAX_HEIGHT, 14000000, 775000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_INTERLACED)
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index 471d6a8ae8a446..9326439bc49ca5 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -96,7 +96,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
*/
int si470x_get_register(struct si470x_device *radio, int regnr)
{
- u16 buf[READ_REG_NUM];
+ __be16 buf[READ_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
@@ -121,7 +121,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr)
int si470x_set_register(struct si470x_device *radio, int regnr)
{
int i;
- u16 buf[WRITE_REG_NUM];
+ __be16 buf[WRITE_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
@@ -151,7 +151,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr)
static int si470x_get_all_registers(struct si470x_device *radio)
{
int i;
- u16 buf[READ_REG_NUM];
+ __be16 buf[READ_REG_NUM];
struct i2c_msg msgs[1] = {
{
.addr = radio->client->addr,
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 04ae2127844038..77f54e4198d3ad 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -864,6 +864,9 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_CNXT_RDE_250},
{USB_DEVICE(0x0572, 0x58A0),
.driver_info = CX231XX_BOARD_CNXT_RDU_250},
+ /* AverMedia DVD EZMaker 7 */
+ {USB_DEVICE(0x07ca, 0xc039),
+ .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
{USB_DEVICE(0x2040, 0xb110),
.driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
{USB_DEVICE(0x2040, 0xb111),
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 6e02a15d39ce90..abddb621d9e68c 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -389,8 +389,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index 394004607059bf..7c7dfaed9d156b 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2021,13 +2021,13 @@ struct em28xx_board em28xx_boards[] = {
.input = { {
.type = EM28XX_VMUX_COMPOSITE1,
.vmux = TVP5150_COMPOSITE1,
- .amux = EM28XX_AUDIO_SRC_LINE,
+ .amux = EM28XX_AMUX_LINE_IN,
.gpio = terratec_av350_unmute_gpio,
}, {
.type = EM28XX_VMUX_SVIDEO,
.vmux = TVP5150_SVIDEO,
- .amux = EM28XX_AUDIO_SRC_LINE,
+ .amux = EM28XX_AMUX_LINE_IN,
.gpio = terratec_av350_unmute_gpio,
} },
},
diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
index 357be76c7a5523..5502a0fb94fda3 100644
--- a/drivers/media/usb/em28xx/em28xx-dvb.c
+++ b/drivers/media/usb/em28xx/em28xx-dvb.c
@@ -1806,6 +1806,8 @@ static int em28xx_dvb_fini(struct em28xx *dev)
}
}
+ em28xx_unregister_dvb(dvb);
+
/* remove I2C SEC */
client = dvb->i2c_client_sec;
if (client) {
@@ -1827,7 +1829,6 @@ static int em28xx_dvb_fini(struct em28xx *dev)
i2c_unregister_device(client);
}
- em28xx_unregister_dvb(dvb);
kfree(dvb);
dev->dvb = NULL;
kref_put(&dev->ref, em28xx_free_device);
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 6a3cf342e08741..873948e429e8f0 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -930,6 +930,8 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
em28xx_videodbg("%s\n", __func__);
+ dev->v4l2->field_count = 0;
+
/* Make sure streaming is not already in progress for this type
of filehandle (e.g. video, vbi) */
rc = res_get(dev, vq->type);
@@ -1288,9 +1290,9 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (!fmt) {
- em28xx_videodbg("Fourcc format (%08x) invalid.\n",
- f->fmt.pix.pixelformat);
- return -EINVAL;
+ fmt = &format[0];
+ em28xx_videodbg("Fourcc format (%08x) invalid. Using default (%08x).\n",
+ f->fmt.pix.pixelformat, fmt->fourcc);
}
if (dev->board.is_em2800) {
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 095f5db1a790f9..4f317e2686e9e5 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -275,6 +275,11 @@ static int register_dvb(struct tm6000_core *dev)
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
+ if (ret < 0) {
+ pr_err("tm6000: couldn't register the adapter!\n");
+ goto err;
+ }
+
dvb->adapter.priv = dev;
if (dvb->frontend) {
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index cafc34938a7923..91d709efef7a4f 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1461,13 +1461,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index 618e4e2b42077f..fea09a33c6c8f4 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -1202,7 +1202,7 @@ static void uvc_ctrl_fill_event(struct uvc_video_chain *chain,
__uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl);
- memset(ev->reserved, 0, sizeof(ev->reserved));
+ memset(ev, 0, sizeof(*ev));
ev->type = V4L2_EVENT_CTRL;
ev->id = v4l2_ctrl.id;
ev->u.ctrl.value = value;
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index d4c1fde44516d7..96dc73dd93d302 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1019,11 +1019,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
return -EINVAL;
}
- /* Make sure the terminal type MSB is not null, otherwise it
- * could be confused with a unit.
+ /*
+ * Reject invalid terminal types that would cause issues:
+ *
+ * - The high byte must be non-zero, otherwise it would be
+ * confused with a unit.
+ *
+ * - Bit 15 must be 0, as we use it internally as a terminal
+ * direction flag.
+ *
+ * Other unknown types are accepted.
*/
type = get_unaligned_le16(&buffer[4]);
- if ((type & 0xff00) == 0) {
+ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL %d has invalid "
"type 0x%04x, skipping\n", udev->devnum,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index db521c67a93643..97125751a45240 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
}
}
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+ /*
+ * Return the size of the video probe and commit controls, which depends
+ * on the protocol version.
+ */
+ if (stream->dev->uvc_version < 0x0110)
+ return 26;
+ else if (stream->dev->uvc_version < 0x0150)
+ return 34;
+ else
+ return 48;
+}
+
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe, __u8 query)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF)
return -EIO;
@@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
- if (size == 34) {
+ if (size >= 34) {
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
ctrl->bmFramingInfo = data[30];
ctrl->bPreferedVersion = data[31];
@@ -254,11 +267,10 @@ out:
static int uvc_set_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
data = kzalloc(size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
- if (size == 34) {
+ if (size >= 34) {
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
data[30] = ctrl->bmFramingInfo;
data[31] = ctrl->bPreferedVersion;
@@ -626,6 +638,14 @@ void uvc_video_clock_update(struct uvc_streaming *stream,
if (!uvc_hw_timestamps_param)
return;
+ /*
+ * We will get called from __vb2_queue_cancel() if there are buffers
+ * done but not dequeued by the user, but the sample array has already
+ * been released at that time. Just bail out in that case.
+ */
+ if (!clock->samples)
+ return;
+
spin_lock_irqsave(&clock->lock, flags);
if (clock->count < clock->size)
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index c8e9c501bfa72e..36326ae8db68b9 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -309,7 +309,6 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Subdevice driver will reprobe and put the subdev back onto the list */
list_del_init(&sd->async_list);
sd->asd = NULL;
- sd->dev = NULL;
}
/* See if an fwnode can be found in a notifier's lists. */
@@ -477,10 +476,10 @@ static void v4l2_async_notifier_unbind_all_subdevs(
if (subdev_notifier)
v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
- v4l2_async_cleanup(sd);
-
v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
list_move(&sd->async_list, &subdev_list);
}
@@ -600,10 +599,10 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(sd);
-
v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_cleanup(sd);
+
mutex_unlock(&list_lock);
}
EXPORT_SYMBOL(v4l2_async_unregister_subdev);
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 67ab1d404f431a..dcc4df3acb8dce 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -876,7 +876,7 @@ static int put_v4l2_ext_controls32(struct file *file,
get_user(kcontrols, &kp->controls))
return -EFAULT;
- if (!count)
+ if (!count || count > (U32_MAX/sizeof(*ucontrols)))
return 0;
if (get_user(p, &up->controls))
return -EFAULT;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 6cc8b9de41e8fb..e864c2a85dba29 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -431,6 +431,20 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Use Previous Specific Frame",
NULL,
};
+ static const char * const vp8_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
+ static const char * const vp9_profile[] = {
+ "0",
+ "1",
+ "2",
+ "3",
+ NULL,
+ };
static const char * const flash_led_mode[] = {
"Off",
@@ -555,6 +569,10 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return mpeg4_profile;
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
return vpx_golden_frame_sel;
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ return vp8_profile;
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
+ return vp9_profile;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -769,7 +787,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VPX_MAX_QP: return "VPX Maximum QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP: return "VPX I-Frame QP Value";
case V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP: return "VPX P-Frame QP Value";
- case V4L2_CID_MPEG_VIDEO_VPX_PROFILE: return "VPX Profile";
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE: return "VP8 Profile";
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE: return "VP9 Profile";
case V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR: return "VP8 Frame Header";
case V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR: return "VP9 Frame Header";
@@ -868,6 +887,8 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_JPEG_RESTART_INTERVAL: return "Restart Interval";
case V4L2_CID_JPEG_COMPRESSION_QUALITY: return "Compression Quality";
case V4L2_CID_JPEG_ACTIVE_MARKER: return "Active Markers";
+ case V4L2_CID_JPEG_LUMA_QUANTIZATION: return "Luminance Quantization Matrix";
+ case V4L2_CID_JPEG_CHROMA_QUANTIZATION: return "Chrominance Quantization Matrix";
/* Image source controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -886,6 +907,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_LINK_FREQ: return "Link Frequency";
case V4L2_CID_PIXEL_RATE: return "Pixel Rate";
case V4L2_CID_TEST_PATTERN: return "Test Pattern";
+ case V4L2_CID_DIGITAL_GAIN: return "Digital Gain";
/* DV controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1058,6 +1080,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TUNE_DEEMPHASIS:
case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ case V4L2_CID_MPEG_VIDEO_VP8_PROFILE:
+ case V4L2_CID_MPEG_VIDEO_VP9_PROFILE:
case V4L2_CID_DETECT_MD_MODE:
*type = V4L2_CTRL_TYPE_MENU;
break;
@@ -1134,6 +1158,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
*flags |= V4L2_CTRL_FLAG_READ_ONLY;
break;
case V4L2_CID_DETECT_MD_REGION_GRID:
+ case V4L2_CID_JPEG_LUMA_QUANTIZATION:
+ case V4L2_CID_JPEG_CHROMA_QUANTIZATION:
*type = V4L2_CTRL_TYPE_U8;
break;
case V4L2_CID_DETECT_MD_THRESHOLD_GRID:
@@ -1255,7 +1281,7 @@ static u32 user_flags(const struct v4l2_ctrl *ctrl)
static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
{
- memset(ev->reserved, 0, sizeof(ev->reserved));
+ memset(ev, 0, sizeof(*ev));
ev->type = V4L2_EVENT_CTRL;
ev->id = ctrl->id;
ev->u.ctrl.changes = changes;
@@ -2471,20 +2497,15 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active)
}
EXPORT_SYMBOL(v4l2_ctrl_activate);
-/* Grab/ungrab a control.
- Typically used when streaming starts and you want to grab controls,
- preventing the user from changing them.
-
- Just call this and the framework will block any attempts to change
- these controls. */
-void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
{
bool old;
if (ctrl == NULL)
return;
- v4l2_ctrl_lock(ctrl);
+ lockdep_assert_held(ctrl->handler->lock);
+
if (grabbed)
/* set V4L2_CTRL_FLAG_GRABBED */
old = test_and_set_bit(1, &ctrl->flags);
@@ -2493,9 +2514,8 @@ void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
old = test_and_clear_bit(1, &ctrl->flags);
if (old != grabbed)
send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_FLAGS);
- v4l2_ctrl_unlock(ctrl);
}
-EXPORT_SYMBOL(v4l2_ctrl_grab);
+EXPORT_SYMBOL(__v4l2_ctrl_grab);
/* Log the control name and value */
static void log_ctrl(const struct v4l2_ctrl *ctrl,
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 623ad2c3deba7e..6ac9ad4f8008d7 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -120,14 +120,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -206,6 +198,22 @@ int v4l2_event_pending(struct v4l2_fh *fh)
}
EXPORT_SYMBOL_GPL(v4l2_event_pending);
+static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev)
+{
+ struct v4l2_fh *fh = sev->fh;
+ unsigned int i;
+
+ lockdep_assert_held(&fh->subscribe_lock);
+ assert_spin_locked(&fh->vdev->fh_lock);
+
+ /* Remove any pending events for this subscription */
+ for (i = 0; i < sev->in_use; i++) {
+ list_del(&sev->events[sev_pos(sev, i)].list);
+ fh->navailable--;
+ }
+ list_del(&sev->list);
+}
+
int v4l2_event_subscribe(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub, unsigned elems,
const struct v4l2_subscribed_event_ops *ops)
@@ -213,6 +221,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -231,6 +240,9 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -239,23 +251,21 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kvfree(sev);
- return 0; /* Already listening */
- }
-
- if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ } else if (sev->ops && sev->ops->add) {
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ __v4l2_event_unsubscribe(sev);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
+ kvfree(sev);
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ mutex_unlock(&fh->subscribe_lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -287,30 +297,27 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
{
struct v4l2_subscribed_event *sev;
unsigned long flags;
- int i;
if (sub->type == V4L2_EVENT_ALL) {
v4l2_event_unsubscribe_all(fh);
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (sev != NULL) {
- /* Remove any pending events for this subscription */
- for (i = 0; i < sev->in_use; i++) {
- list_del(&sev->events[sev_pos(sev, i)].list);
- fh->navailable--;
- }
- list_del(&sev->list);
- }
+ if (sev != NULL)
+ __v4l2_event_unsubscribe(sev);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
kvfree(sev);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c97067a25bd29a..1d076deb05a90a 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -49,6 +49,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -93,6 +94,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
if (fh->vdev == NULL)
return;
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 49cc5631bf991b..3786d4d4ff76d6 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1275,6 +1275,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
/* Max description length mask: descr = "0123456789012345678901234567890" */
case V4L2_PIX_FMT_MJPEG: descr = "Motion-JPEG"; break;
case V4L2_PIX_FMT_JPEG: descr = "JFIF JPEG"; break;
+ case V4L2_PIX_FMT_JPEG_RAW: descr = "Raw JPEG"; break;
case V4L2_PIX_FMT_DV: descr = "1394"; break;
case V4L2_PIX_FMT_MPEG: descr = "MPEG-1/2/4"; break;
case V4L2_PIX_FMT_H264: descr = "H.264"; break;
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index f669cedca8bd16..f74a74d91b9eb9 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -156,6 +156,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
{
unsigned long first, last;
int err, rw = 0;
+ unsigned int flags = FOLL_FORCE;
dma->direction = direction;
switch (dma->direction) {
@@ -178,13 +179,15 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
if (NULL == dma->pages)
return -ENOMEM;
+ if (rw == READ)
+ flags |= FOLL_WRITE;
+
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
err = get_user_pages(current, current->mm,
data & PAGE_MASK, dma->nr_pages,
- rw == READ, 1, /* force */
- dma->pages, NULL);
+ flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index fb36d8ebccb19d..7532544c1fd522 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1308,6 +1308,11 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
struct vb2_buffer *vb;
int ret;
+ if (q->error) {
+ dprintk(1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
vb = q->bufs[index];
switch (vb->state) {
@@ -1922,9 +1927,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
return -EINVAL;
}
}
+
+ mutex_lock(&q->mmap_lock);
+
if (vb2_fileio_is_active(q)) {
dprintk(1, "mmap: file io in progress\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto unlock;
}
/*
@@ -1932,7 +1941,7 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
*/
ret = __find_plane_by_offset(q, off, &buffer, &plane);
if (ret)
- return ret;
+ goto unlock;
vb = q->bufs[buffer];
@@ -1945,11 +1954,13 @@ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
if (length < (vma->vm_end - vma->vm_start)) {
dprintk(1,
"MMAP invalid, as it would overflow buffer length\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
- mutex_lock(&q->mmap_lock);
ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
mutex_unlock(&q->mmap_lock);
if (ret)
return ret;
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index 3c3b517f1d1cac..1cd322e939c705 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -42,6 +42,10 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
unsigned long first, last;
unsigned long nr;
struct frame_vector *vec;
+ unsigned int flags = FOLL_FORCE;
+
+ if (write)
+ flags |= FOLL_WRITE;
first = start >> PAGE_SHIFT;
last = (start + length - 1) >> PAGE_SHIFT;
@@ -49,7 +53,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, flags, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a1ae0cc2b86d50..6ab481ee8ece3c 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -20,14 +20,6 @@
#include "mc.h"
#define MC_INTSTATUS 0x000
-#define MC_INT_DECERR_MTS (1 << 16)
-#define MC_INT_SECERR_SEC (1 << 13)
-#define MC_INT_DECERR_VPR (1 << 12)
-#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
-#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
-#define MC_INT_ARBITRATION_EMEM (1 << 9)
-#define MC_INT_SECURITY_VIOLATION (1 << 8)
-#define MC_INT_DECERR_EMEM (1 << 6)
#define MC_INTMASK 0x004
@@ -248,12 +240,13 @@ static const char *const error_names[8] = {
static irqreturn_t tegra_mc_irq(int irq, void *data)
{
struct tegra_mc *mc = data;
- unsigned long status, mask;
+ unsigned long status;
unsigned int bit;
/* mask all interrupts to avoid flooding */
- status = mc_readl(mc, MC_INTSTATUS);
- mask = mc_readl(mc, MC_INTMASK);
+ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask;
+ if (!status)
+ return IRQ_NONE;
for_each_set_bit(bit, &status, 32) {
const char *error = status_names[bit] ?: "unknown";
@@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct resource *res;
struct tegra_mc *mc;
- u32 value;
int err;
match = of_match_node(tegra_mc_of_match, pdev->dev.of_node);
@@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev)
WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n");
- value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
- MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
- MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM;
-
- mc_writel(mc, value, MC_INTMASK);
+ mc_writel(mc, mc->soc->intmask, MC_INTMASK);
return 0;
}
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index ddb16676c3af4d..24e020b4609be7 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -14,6 +14,15 @@
#include <soc/tegra/mc.h>
+#define MC_INT_DECERR_MTS (1 << 16)
+#define MC_INT_SECERR_SEC (1 << 13)
+#define MC_INT_DECERR_VPR (1 << 12)
+#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11)
+#define MC_INT_INVALID_SMMU_PAGE (1 << 10)
+#define MC_INT_ARBITRATION_EMEM (1 << 9)
+#define MC_INT_SECURITY_VIOLATION (1 << 8)
+#define MC_INT_DECERR_EMEM (1 << 6)
+
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
return readl(mc->regs + offset);
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index ba8fff3d66a655..6d2a5a849d928b 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = {
.atom_size = 32,
.client_id_mask = 0x7f,
.smmu = &tegra114_smmu_soc,
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
};
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 21e7255e3d96af..234e74f97a4bc1 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1019,6 +1019,9 @@ const struct tegra_mc_soc tegra124_mc_soc = {
.smmu = &tegra124_smmu_soc,
.emem_regs = tegra124_mc_emem_regs,
.num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs),
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
};
#endif /* CONFIG_ARCH_TEGRA_124_SOC */
@@ -1041,5 +1044,8 @@ const struct tegra_mc_soc tegra132_mc_soc = {
.atom_size = 32,
.client_id_mask = 0x7f,
.smmu = &tegra132_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
};
#endif /* CONFIG_ARCH_TEGRA_132_SOC */
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 5e144abe4c181e..47c78a6d8f0092 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = {
.atom_size = 64,
.client_id_mask = 0xff,
.smmu = &tegra210_smmu_soc,
+ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR |
+ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE |
+ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM,
};
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index b44737840e70c1..d0689428ea1a5b 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.atom_size = 16,
.client_id_mask = 0x7f,
.smmu = &tegra30_smmu_soc,
+ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION |
+ MC_INT_DECERR_EMEM,
};
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547dbf980645..4d673a626db4ed 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#define DRIVER_NAME "memstick"
@@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work)
struct memstick_dev *card;
dev_dbg(&host->dev, "memstick_check started\n");
+ pm_runtime_get_noresume(host->dev.parent);
mutex_lock(&host->lock);
if (!host->card) {
if (memstick_power_on(host))
@@ -479,6 +481,7 @@ out_power_off:
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
mutex_unlock(&host->lock);
+ pm_runtime_put(host->dev.parent);
dev_dbg(&host->dev, "memstick_check finished\n");
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index fefbe4cfa61ddb..1263cfd8b4d2f4 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -259,7 +259,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
- return ret;
+ return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 0236cd7cdce4f3..636f541e9e7431 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1262,7 +1262,6 @@ static struct ab8500_prcmu_ranges ab8540_debug_ranges[AB8500_NUM_BANKS] = {
},
};
-
static irqreturn_t ab8500_debug_handler(int irq, void *data)
{
char buf[16];
diff --git a/drivers/mfd/as3722.c b/drivers/mfd/as3722.c
index 924ea90494ae5b..e1f597f97f869e 100644
--- a/drivers/mfd/as3722.c
+++ b/drivers/mfd/as3722.c
@@ -405,6 +405,8 @@ static int as3722_i2c_probe(struct i2c_client *i2c,
goto scrub;
}
+ device_init_wakeup(as3722->dev, true);
+
dev_dbg(as3722->dev, "AS3722 core driver initialized successfully\n");
return 0;
@@ -422,6 +424,29 @@ static int as3722_i2c_remove(struct i2c_client *i2c)
return 0;
}
+static int __maybe_unused as3722_i2c_suspend(struct device *dev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(as3722->chip_irq);
+ disable_irq(as3722->chip_irq);
+
+ return 0;
+}
+
+static int __maybe_unused as3722_i2c_resume(struct device *dev)
+{
+ struct as3722 *as3722 = dev_get_drvdata(dev);
+
+ enable_irq(as3722->chip_irq);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(as3722->chip_irq);
+
+ return 0;
+}
+
static const struct of_device_id as3722_of_match[] = {
{ .compatible = "ams,as3722", },
{},
@@ -434,10 +459,15 @@ static const struct i2c_device_id as3722_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, as3722_i2c_id);
+static const struct dev_pm_ops as3722_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(as3722_i2c_suspend, as3722_i2c_resume)
+};
+
static struct i2c_driver as3722_i2c_driver = {
.driver = {
.name = "as3722",
.of_match_table = as3722_of_match,
+ .pm = &as3722_pm_ops,
},
.probe = as3722_i2c_probe,
.remove = as3722_i2c_remove,
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 91a6502a1ecede..690ec669e3112e 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -124,14 +124,13 @@ static int get_next_event_xfer(struct cros_ec_device *ec_dev,
int cros_ec_get_next_event(struct cros_ec_device *ec_dev)
{
- static int cmd_version = 1;
struct {
struct cros_ec_command msg;
struct ec_response_get_next_event_v1 event;
} __packed buf;
struct cros_ec_command *msg = &buf.msg;
struct ec_response_get_next_event_v1 *event = &buf.event;
- int ret;
+ const int cmd_version = ec_dev->mkbp_event_supported - 1;
BUILD_BUG_ON(sizeof(union ec_response_get_next_data_v1) != 16);
@@ -142,20 +141,12 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev)
return -EHOSTDOWN;
}
- if (cmd_version == 1) {
- ret = get_next_event_xfer(ec_dev, msg, event, 1,
- sizeof(struct ec_response_get_next_event_v1));
- if (ret < 0 || msg->result != EC_RES_INVALID_VERSION)
- return ret;
-
- /* Fallback to version 0 for future send attempts */
- cmd_version = 0;
- }
-
- ret = get_next_event_xfer(ec_dev, msg, event, 0,
+ if (cmd_version == 0)
+ return get_next_event_xfer(ec_dev, msg, event, 0,
sizeof(struct ec_response_get_next_event));
- return ret;
+ return get_next_event_xfer(ec_dev, msg, event, cmd_version,
+ sizeof(struct ec_response_get_next_event_v1));
}
EXPORT_SYMBOL(cros_ec_get_next_event);
@@ -216,21 +207,43 @@ static irqreturn_t ec_irq_handler(int irq, void *data) {
return IRQ_WAKE_THREAD;
}
-static irqreturn_t ec_irq_thread(int irq, void *data)
+static bool ec_handle_event(struct cros_ec_device *ec_dev)
{
- struct cros_ec_device *ec_dev = data;
int wake_event = 1;
+ u8 event_type;
u32 host_event;
int ret;
+ bool ec_has_more_events = false;
if (ec_dev->mkbp_event_supported) {
ret = cros_ec_get_next_event(ec_dev);
- /* Don't signal wake event for non-wake host events */
- host_event = cros_ec_get_host_event(ec_dev);
- if (ret > 0 && host_event &&
- !(host_event & ec_dev->host_event_wake_mask))
- wake_event = 0;
+ if (ret > 0) {
+ event_type = ec_dev->event_data.event_type &
+ EC_MKBP_EVENT_TYPE_MASK;
+ ec_has_more_events =
+ ec_dev->event_data.event_type &
+ EC_MKBP_HAS_MORE_EVENTS;
+ host_event = cros_ec_get_host_event(ec_dev);
+
+ /*
+ * Sensor events need to be parsed by the sensor
+ * sub-device. Defer them, and don't report the
+ * wakeup here.
+ */
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
+ wake_event = 0;
+ /*
+ * Masked host-events should not count as
+ * wake events.
+ */
+ else if (host_event &&
+ !(host_event & ec_dev->host_event_wake_mask))
+ wake_event = 0;
+ /* Consider all other events as wake events. */
+ else
+ wake_event = 1;
+ }
} else {
ret = cros_ec_get_keyboard_state_event(ec_dev);
}
@@ -240,7 +253,21 @@ static irqreturn_t ec_irq_thread(int irq, void *data)
if (ret > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
- 0, ec_dev);
+ 0, ec_dev);
+
+ return ec_has_more_events;
+
+}
+
+static irqreturn_t ec_irq_thread(int irq, void *data)
+{
+ struct cros_ec_device *ec_dev = data;
+ bool ec_has_more_events;
+
+ do {
+ ec_has_more_events = ec_handle_event(ec_dev);
+ } while (ec_has_more_events);
+
return IRQ_HANDLED;
}
@@ -306,7 +333,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
return err;
}
- if (ec_dev->irq) {
+ if (ec_dev->irq > 0) {
err = devm_request_threaded_irq(dev, ec_dev->irq,
ec_irq_handler, ec_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
@@ -427,7 +454,7 @@ int cros_ec_suspend(struct cros_ec_device *ec_dev)
}
EXPORT_SYMBOL(cros_ec_suspend);
-static void cros_ec_drain_events(struct cros_ec_device *ec_dev)
+static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
while (cros_ec_get_next_event(ec_dev) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
@@ -453,20 +480,16 @@ int cros_ec_resume(struct cros_ec_device *ec_dev)
dev_dbg(ec_dev->dev, "Error %d sending resume event to ec",
ret);
- /*
- * In some case, we need to distinguish events that occur during
- * suspend if the EC is not a wake source. For example, keypresses
- * during suspend should be discarded if it does not wake the system.
- *
- * If the EC is not a wake source, drain the event queue and mark them
- * as "queued during suspend".
- */
if (ec_dev->wake_enabled) {
disable_irq_wake(ec_dev->irq);
ec_dev->wake_enabled = 0;
- } else {
- cros_ec_drain_events(ec_dev);
}
+ /*
+ * Let the mfd devices know about events that occur during
+ * suspend. This way the clients know what to do with them.
+ */
+ cros_ec_report_events_during_suspend(ec_dev);
+
return 0;
}
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 5be1d09be6699f..4d2122d9d51d70 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -685,7 +685,8 @@ static int cros_ec_spi_probe(struct spi_device *spi)
err = cros_ec_register(ec_dev);
if (err) {
- dev_err(dev, "cannot register EC\n");
+ dev_err(dev, "cannot register EC, fallback to spidev\n");
+ strncpy(spi->modalias, "spidev", SPI_NAME_SIZE);
return err;
}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bacb09ee5d..e71b9f23379d0e 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
return 0;
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
static int config_hot_period(u16 val)
{
@@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
return config_hot_period(cycles32k);
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
{
@@ -2607,7 +2610,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static __init char *fw_project_name(u32 project)
+static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
@@ -2755,7 +2758,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
-static void __init init_prcm_registers(void)
+static void init_prcm_registers(void)
{
u32 val;
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
index 7923d267b05d4e..d8379a49a7de4d 100644
--- a/drivers/mfd/intel-lpss.c
+++ b/drivers/mfd/intel-lpss.c
@@ -269,11 +269,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
intel_lpss_deassert_reset(lpss);
+ intel_lpss_set_remap_addr(lpss);
+
if (!intel_lpss_has_idma(lpss))
return;
- intel_lpss_set_remap_addr(lpss);
-
/* Make sure that SPI multiblock DMA transfers are re-enabled */
if (lpss->type == LPSS_DEV_SPI)
writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 3f9f4c874d2aa4..8d74806b83c12b 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ if (ret)
+ goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 1d924d1533c027..b9dc2fcd8f26e7 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -548,8 +548,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
@@ -875,6 +875,7 @@ static struct platform_driver usbhs_omap_driver = {
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@@ -884,9 +885,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
@@ -898,7 +899,7 @@ static int __init omap_usbhs_drvinit(void)
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index a867cc91657ef3..27486f278201ef 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -570,6 +570,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
return -EFAULT;
}
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
+
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index c646784c5a7d0a..fbec711c41956c 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
smdev->pdev.name = name;
smdev->pdev.id = sm->pdev_id;
smdev->pdev.dev.parent = sm->dev;
+ smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
if (res_count) {
smdev->pdev.resource = (struct resource *)(smdev+1);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index e4e4b22eebc91c..faf8ce5be576f0 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -224,14 +224,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
* The TSC_ADC_SS controller design assumes the OCP clock is
* at least 6x faster than the ADC clock.
*/
- clk = clk_get(&pdev->dev, "adc_tsc_fck");
+ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get TSC fck\n");
err = PTR_ERR(clk);
goto err_disable_clk;
}
clock_rate = clk_get_rate(clk);
- clk_put(clk);
tscadc->clk_div = clock_rate / ADC_CLK;
/* TSCADC_CLKDIV needs to be configured to the value minus 1 */
@@ -280,8 +279,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
cell->pdata_size = sizeof(tscadc);
}
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
- tscadc->used_cells, NULL, 0, NULL);
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ tscadc->cells, tscadc->used_cells, NULL,
+ 0, NULL);
if (err < 0)
goto err_disable_clk;
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 5628a6b5b19bec..c5c320efc7b499 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -594,6 +594,29 @@ static int tps6586x_i2c_remove(struct i2c_client *client)
return 0;
}
+static int __maybe_unused tps6586x_i2c_suspend(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ disable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static int __maybe_unused tps6586x_i2c_resume(struct device *dev)
+{
+ struct tps6586x *tps6586x = dev_get_drvdata(dev);
+
+ if (tps6586x->client->irq)
+ enable_irq(tps6586x->client->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tps6586x_pm_ops, tps6586x_i2c_suspend,
+ tps6586x_i2c_resume);
+
static const struct i2c_device_id tps6586x_id_table[] = {
{ "tps6586x", 0 },
{ },
@@ -604,6 +627,7 @@ static struct i2c_driver tps6586x_driver = {
.driver = {
.name = "tps6586x",
.of_match_table = of_match_ptr(tps6586x_of_match),
+ .pm = &tps6586x_pm_ops,
},
.probe = tps6586x_i2c_probe,
.remove = tps6586x_i2c_remove,
diff --git a/drivers/mfd/tps68470.c b/drivers/mfd/tps68470.c
index 1b8badd58fd70c..6f6ac2c4742bb6 100644
--- a/drivers/mfd/tps68470.c
+++ b/drivers/mfd/tps68470.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* TPS68470 chip Parent driver
*
@@ -8,15 +9,6 @@
* Tianshu Qiu <tian.shu.qiu@intel.com>
* Jian Xu Zheng <jian.xu.zheng@intel.com>
* Yuning Pu <yuning.pu@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 831696ee2472b4..90732a655d5736 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -982,7 +982,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
-static inline int __init protect_pm_master(void)
+static inline int protect_pm_master(void)
{
int e = 0;
@@ -991,7 +991,7 @@ static inline int __init protect_pm_master(void)
return e;
}
-static inline int __init unprotect_pm_master(void)
+static inline int unprotect_pm_master(void)
{
int e = 0;
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 2bb2d0467a92d3..c47efe6dcb01be 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1622,6 +1622,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2877,6 +2878,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index dba7a6c1ad89c3..ef23334303b9f5 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -550,4 +550,5 @@ source "drivers/misc/mic/Kconfig"
source "drivers/misc/genwqe/Kconfig"
source "drivers/misc/echo/Kconfig"
source "drivers/misc/cxl/Kconfig"
+source "drivers/misc/throttler/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 300f9b12729e91..f52d5b3604889a 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -58,3 +58,4 @@ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o
obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
obj-$(CONFIG_FPC1020) += fpc1020.o
+obj-$(CONFIG_THROTTLER) += throttler/
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index e11a0bd6c66e3f..e2474af7386a19 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -129,7 +129,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = {
MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
#endif
-static inline const struct atmel_ssc_platform_data * __init
+static inline const struct atmel_ssc_platform_data *
atmel_ssc_get_driver_data(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index f7e9a1780c2d54..ae53dfdec3e7f6 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -489,6 +489,24 @@ static void at24_get_pdata(struct device *dev, struct at24_platform_data *chip)
if (device_property_present(dev, "read-only"))
chip->flags |= AT24_FLAG_READONLY;
+ err = device_property_read_u32(dev, "address-width", &val);
+ if (!err) {
+ switch (val) {
+ case 8:
+ if (chip->flags & AT24_FLAG_ADDR16)
+ dev_warn(dev, "Override address width to be 8,"
+ "while default is 16\n");
+ chip->flags &= ~AT24_FLAG_ADDR16;
+ break;
+ case 16:
+ chip->flags |= AT24_FLAG_ADDR16;
+ break;
+ default:
+ dev_warn(dev, "Bad \"address-width\" property: %u\n",
+ val);
+ }
+ }
+
err = device_property_read_u32(dev, "size", &val);
if (!err)
chip->byte_len = val;
diff --git a/drivers/misc/genwqe/card_base.h b/drivers/misc/genwqe/card_base.h
index cb851c14ca4b17..159f35b2bd118e 100644
--- a/drivers/misc/genwqe/card_base.h
+++ b/drivers/misc/genwqe/card_base.h
@@ -404,7 +404,7 @@ struct genwqe_file {
struct file *filp;
struct fasync_struct *async_queue;
- struct task_struct *owner;
+ struct pid *opener;
struct list_head list; /* entry in list of open files */
spinlock_t map_lock; /* lock for dma_mappings */
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 7f1b282d7d963c..c0012ca4229e1d 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -52,7 +52,7 @@ static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
{
unsigned long flags;
- cfile->owner = current;
+ cfile->opener = get_pid(task_tgid(current));
spin_lock_irqsave(&cd->file_lock, flags);
list_add(&cfile->list, &cd->file_list);
spin_unlock_irqrestore(&cd->file_lock, flags);
@@ -65,6 +65,7 @@ static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
spin_lock_irqsave(&cd->file_lock, flags);
list_del(&cfile->list);
spin_unlock_irqrestore(&cd->file_lock, flags);
+ put_pid(cfile->opener);
return 0;
}
@@ -275,7 +276,7 @@ static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
return files;
}
-static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
+static int genwqe_terminate(struct genwqe_dev *cd)
{
unsigned int files = 0;
unsigned long flags;
@@ -283,7 +284,7 @@ static int genwqe_force_sig(struct genwqe_dev *cd, int sig)
spin_lock_irqsave(&cd->file_lock, flags);
list_for_each_entry(cfile, &cd->file_list, list) {
- force_sig(sig, cfile->owner);
+ kill_pid(cfile->opener, SIGKILL, 1);
files++;
}
spin_unlock_irqrestore(&cd->file_lock, flags);
@@ -1356,7 +1357,7 @@ static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
dev_warn(&pci_dev->dev,
"[%s] send SIGKILL and wait ...\n", __func__);
- rc = genwqe_force_sig(cd, SIGKILL); /* force terminate */
+ rc = genwqe_terminate(cd);
if (rc) {
/* Give kill_timout more seconds to end processes */
for (i = 0; (i < genwqe_kill_timeout) &&
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 524660510599cb..0c15ba21fa5443 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -217,7 +217,7 @@ u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
dma_addr_t *dma_handle)
{
- if (get_order(size) > MAX_ORDER)
+ if (get_order(size) >= MAX_ORDER)
return NULL;
return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
diff --git a/drivers/misc/hmc6352.c b/drivers/misc/hmc6352.c
index 90520d76633f4c..9cde4c5bfba409 100644
--- a/drivers/misc/hmc6352.c
+++ b/drivers/misc/hmc6352.c
@@ -27,6 +27,7 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
+#include <linux/nospec.h>
static DEFINE_MUTEX(compass_mutex);
@@ -50,6 +51,7 @@ static int compass_store(struct device *dev, const char *buf, size_t count,
return ret;
if (val >= strlen(map))
return -EINVAL;
+ val = array_index_nospec(val, strlen(map));
mutex_lock(&compass_mutex);
ret = compass_command(c, map[val]);
mutex_unlock(&compass_mutex);
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e8b933111e0df7..92109cadc3fc01 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
void __iomem *address = (void __iomem *)file->private_data;
- unsigned char *page;
- int retval;
int len = 0;
unsigned int value;
-
- if (*offset < 0)
- return -EINVAL;
- if (count == 0 || count > 1024)
- return 0;
- if (*offset != 0)
- return 0;
-
- page = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ char lbuf[20];
value = readl(address);
- len = sprintf(page, "%d\n", value);
-
- if (copy_to_user(buf, page, len)) {
- retval = -EFAULT;
- goto exit;
- }
- *offset += len;
- retval = len;
+ len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
-exit:
- free_page((unsigned long)page);
- return retval;
+ return simple_read_from_buffer(buf, count, offset, lbuf, len);
}
static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 98fd043ef0172b..cb51bb7c210f3d 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -103,6 +103,7 @@ enum ctype {
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
+ CT_WRITE_RO_AFTER_INIT,
CT_WRITE_KERN,
};
@@ -140,6 +141,7 @@ static char* cp_type[] = {
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
+ "WRITE_RO_AFTER_INIT",
"WRITE_KERN",
};
@@ -162,6 +164,7 @@ static DEFINE_SPINLOCK(lock_me_up);
static u8 data_area[EXEC_SIZE];
static const unsigned long rodata = 0xAA55AA55;
+static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
@@ -327,12 +330,19 @@ static void do_overwritten(void)
return;
}
+static noinline void __lkdtm_corrupt_stack(void *stack)
+{
+ memset(stack, 0, 64);
+}
+
static noinline void corrupt_stack(void)
{
/* Use default char array length that triggers stack protection. */
- char data[8];
+ char data[8] __aligned(sizeof(void *));
- memset((void *)data, 0, 64);
+ __lkdtm_corrupt_stack((void *)&data);
+
+ pr_info("Corrupted stack containing char array ...\n");
}
static void execute_location(void *dst)
@@ -504,11 +514,28 @@ static void lkdtm_do_action(enum ctype which)
break;
}
case CT_WRITE_RO: {
- unsigned long *ptr;
+ /* Explicitly cast away "const" for the test. */
+ unsigned long *ptr = (unsigned long *)&rodata;
+
+ pr_info("attempting bad rodata write at %p\n", ptr);
+ *ptr ^= 0xabcd1234;
- ptr = (unsigned long *)&rodata;
+ break;
+ }
+ case CT_WRITE_RO_AFTER_INIT: {
+ unsigned long *ptr = &ro_after_init;
+
+ /*
+ * Verify we were written to during init. Since an Oops
+ * is considered a "success", a failure is to just skip the
+ * real test.
+ */
+ if ((*ptr & 0xAA) != 0xAA) {
+ pr_info("%p was NOT written during init!?\n", ptr);
+ break;
+ }
- pr_info("attempting bad write at %p\n", ptr);
+ pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
@@ -818,6 +845,9 @@ static int __init lkdtm_module_init(void)
int n_debugfs_entries = 1; /* Assume only the direct entry */
int i;
+ /* Make sure we can write to __ro_after_init values during __init */
+ ro_after_init |= 0xAA;
+
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
if (!lkdtm_debugfs_root) {
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index bdc7fcd80eca64..9dcdc6f41ceba4 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -151,7 +151,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = 0;
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length);
- if (bytes_recv < if_version_length) {
+ if (bytes_recv < 0 || bytes_recv < if_version_length) {
dev_err(bus->dev, "Could not read IF version\n");
ret = -EIO;
goto err;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index baf93f553a2350..82768ac93737a9 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -231,8 +231,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pci_dev_run_wake(pdev))
mei_me_set_pm_domain(dev);
- if (mei_pg_is_enabled(dev))
+ if (mei_pg_is_enabled(dev)) {
pm_runtime_put_noidle(&pdev->dev);
+ if (hw->d0i3_supported)
+ pm_runtime_allow(&pdev->dev);
+ }
dev_dbg(&pdev->dev, "initialization successful.\n");
diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
index ddc9e4b08b5cd8..56efa9d18a9afe 100644
--- a/drivers/misc/mic/scif/scif_api.c
+++ b/drivers/misc/mic/scif/scif_api.c
@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
goto scif_bind_exit;
}
} else {
- pn = scif_get_new_port();
- if (!pn) {
- ret = -ENOSPC;
+ ret = scif_get_new_port();
+ if (ret < 0)
goto scif_bind_exit;
- }
+ pn = ret;
}
ep->state = SCIFEP_BOUND;
@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
err = -EISCONN;
break;
case SCIFEP_UNBOUND:
- ep->port.port = scif_get_new_port();
- if (!ep->port.port) {
- err = -ENOSPC;
- } else {
- ep->port.node = scif_info.nodeid;
- ep->conn_async_state = ASYNC_CONN_IDLE;
- }
+ err = scif_get_new_port();
+ if (err < 0)
+ break;
+ ep->port.port = err;
+ ep->port.node = scif_info.nodeid;
+ ep->conn_async_state = ASYNC_CONN_IDLE;
/* Fall through */
case SCIFEP_BOUND:
/*
diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
index 6a451bd65bf3f8..71c69e1c4ac05d 100644
--- a/drivers/misc/mic/scif/scif_rma.c
+++ b/drivers/misc/mic/scif/scif_rma.c
@@ -414,7 +414,7 @@ static int scif_create_remote_lookup(struct scif_dev *remote_dev,
if (err)
goto error_window;
err = scif_map_page(&window->num_pages_lookup.lookup[j],
- vmalloc_dma_phys ?
+ vmalloc_num_pages ?
vmalloc_to_page(&window->num_pages[i]) :
virt_to_page(&window->num_pages[i]),
remote_dev);
@@ -1398,8 +1398,7 @@ retry:
mm,
(u64)addr,
nr_pages,
- !!(prot & SCIF_PROT_WRITE),
- 0,
+ (prot & SCIF_PROT_WRITE) ? FOLL_WRITE : 0,
pinned_pages->pages,
NULL);
up_write(&mm->mmap_sem);
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index f74fc0ca2ef9b8..e6b723c6a2afca 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -199,7 +199,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma,
*pageshift = PAGE_SHIFT;
#endif
if (get_user_pages
- (current, current->mm, vaddr, 1, write, 0, &page, NULL) <= 0)
+ (current, current->mm, vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0)
return -EFAULT;
*paddr = page_to_phys(page);
put_page(page);
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 313da315026268..1540a7785e1474 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -27,6 +27,9 @@
#include <linux/delay.h>
#include <linux/bitops.h>
#include <asm/uv/uv_hub.h>
+
+#include <linux/nospec.h>
+
#include "gru.h"
#include "grutables.h"
#include "gruhandles.h"
@@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg)
/* Currently, only dump by gid is implemented */
if (req.gid >= gru_max_gids)
return -EINVAL;
+ req.gid = array_index_nospec(req.gid, gru_max_gids);
gru = GID_TO_GRU(req.gid);
ubuf = req.buf;
diff --git a/drivers/misc/throttler/Kconfig b/drivers/misc/throttler/Kconfig
new file mode 100644
index 00000000000000..77f9b2feefeab1
--- /dev/null
+++ b/drivers/misc/throttler/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig THROTTLER
+ bool "Throttler support"
+ depends on OF
+ depends on PM_DEVFREQ
+ help
+ This option enables core support for non-thermal throttling of CPUs
+ and devfreq devices.
+
+ Note that you also need a event monitor module usually called
+ *_throttler.
+
+if THROTTLER
+
+menuconfig THROTTLER_DEBUG
+ bool "Enable throttler debugging"
+ help
+ This option enables throttler debugging features like additional
+ logging and a debugfs attribute for setting the logging level.
+
+ Choose N unless you want to debug throttler drivers.
+
+config CROS_EC_THROTTLER
+ tristate "Throttler event monitor for the Chrome OS Embedded Controller"
+ depends on MFD_CROS_EC
+ help
+ This driver adds support to throttle the system in reaction to
+ Chrome OS EC events.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_throttler.
+
+endif # THROTTLER
diff --git a/drivers/misc/throttler/Makefile b/drivers/misc/throttler/Makefile
new file mode 100644
index 00000000000000..d9b2a77dabc9e3
--- /dev/null
+++ b/drivers/misc/throttler/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_THROTTLER) += core.o
+obj-$(CONFIG_CROS_EC_THROTTLER) += cros_ec_throttler.o
diff --git a/drivers/misc/throttler/core.c b/drivers/misc/throttler/core.c
new file mode 100644
index 00000000000000..73961bc3712f33
--- /dev/null
+++ b/drivers/misc/throttler/core.c
@@ -0,0 +1,697 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Core code for non-thermal throttling
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/devfreq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+#include <linux/throttler.h>
+
+/*
+ * Non-thermal throttling: throttling of system components in response to
+ * external events (e.g. high battery discharge current).
+ *
+ * The throttler supports throttling through cpufreq and devfreq. Multiple
+ * levels of throttling can be configured. At level 0 no throttling is
+ * active on behalf of the throttler, for values > 0 throttling is typically
+ * configured to be increasingly aggressive with each level.
+ * The number of throttling levels is not limited by the throttler (though
+ * it is likely limited by the throttling devices). It is not necessary to
+ * configure the same number of levels for all throttling devices. If the
+ * requested throttling level for a device is higher than the maximum level
+ * of the device the throttler will select the maximum throttling level of
+ * the device.
+ *
+ * Non-thermal throttling is split in two parts:
+ *
+ * - throttler core
+ * - parses the thermal policy
+ * - applies throttling settings for a requested level of throttling
+ *
+ * - event monitor driver
+ * - monitors events that trigger throttling
+ * - determines the throttling level (often limited to on/off)
+ * - asks throttler core to apply throttling settings
+ *
+ * It is possible for a system to have more than one throttler and the
+ * throttlers may make use of the same throttling devices, in case of
+ * conflicting settings for a device the more aggressive values will be
+ * applied.
+ *
+ */
+
+#define ci_to_throttler(ci) \
+ container_of(ci, struct throttler, devfreq.class_iface)
+
+struct thr_freq_table {
+ uint32_t *freqs;
+ int n_entries;
+};
+
+struct cpufreq_thrdev {
+ uint32_t cpu;
+ struct thr_freq_table freq_table;
+ uint32_t clamp_freq;
+ struct list_head node;
+};
+
+struct devfreq_thrdev {
+ struct devfreq *devfreq;
+ struct thr_freq_table freq_table;
+ uint32_t clamp_freq;
+ struct throttler *thr;
+ struct notifier_block nb;
+ struct list_head node;
+};
+
+struct __thr_cpufreq {
+ struct list_head list;
+ cpumask_t cm_initialized;
+ cpumask_t cm_ignore;
+ struct notifier_block nb;
+};
+
+struct __thr_devfreq {
+ struct list_head list;
+ struct class_interface class_iface;
+};
+
+struct __thr_debugfs {
+ struct dentry *dir;
+ struct dentry *attr_level;
+};
+
+struct throttler {
+ struct device *dev;
+ unsigned int level;
+ struct __thr_cpufreq cpufreq;
+ struct __thr_devfreq devfreq;
+ struct mutex lock;
+ bool shutting_down;
+#ifdef CONFIG_THROTTLER_DEBUG
+ struct __thr_debugfs debugfs;
+#endif
+};
+
+static inline int cmp_freqs(const void *a, const void *b)
+{
+ const uint32_t *pa = a, *pb = b;
+
+ if (*pa < *pb)
+ return 1;
+ else if (*pa > *pb)
+ return -1;
+
+ return 0;
+}
+
+static int thr_handle_devfreq_event(struct notifier_block *nb,
+ unsigned long event, void *data);
+
+static unsigned long thr_get_throttling_freq(struct thr_freq_table *ft,
+ unsigned int level)
+{
+ if (level == 0)
+ return ULONG_MAX;
+
+ if (level <= ft->n_entries)
+ return ft->freqs[level - 1];
+ else
+ return ft->freqs[ft->n_entries - 1];
+}
+
+static int thr_init_freq_table(struct throttler *thr, struct device *opp_dev,
+ struct thr_freq_table *ft)
+{
+ struct device_node *np_opp_desc;
+ int n_opps;
+ int n_thr_opps;
+ int i;
+ uint32_t *freqs;
+ int n_freqs = 0;
+ int err = 0;
+
+ np_opp_desc = dev_pm_opp_of_get_opp_desc_node(opp_dev);
+ if (!np_opp_desc)
+ return -EINVAL;
+
+ n_opps = of_get_child_count(np_opp_desc);
+ if (!n_opps) {
+ err = -EINVAL;
+ goto out_node_put;
+ }
+
+ freqs = kzalloc(n_opps * sizeof(uint32_t), GFP_KERNEL);
+ if (!freqs) {
+ err = -ENOMEM;
+ goto out_node_put;
+ }
+
+ n_thr_opps = of_property_count_u32_elems(thr->dev->of_node,
+ "throttler-opps");
+ if (n_thr_opps <= 0) {
+ thr_err(thr, "No OPPs configured for throttling\n");
+ err = -EINVAL;
+ goto out_free;
+ }
+
+ for (i = 0; i < n_thr_opps; i++) {
+ struct device_node *np_opp;
+ u64 rate;
+
+ np_opp = of_parse_phandle(thr->dev->of_node, "throttler-opps",
+ i);
+ if (!np_opp) {
+ thr_err(thr,
+ "failed to parse 'throttler-opps' phandle %d\n",
+ i);
+ continue;
+ }
+
+ if (of_get_parent(np_opp) != np_opp_desc) {
+ of_node_put(np_opp);
+ continue;
+ }
+
+ err = of_property_read_u64(np_opp, "opp-hz",
+ &rate);
+ if (!err) {
+ freqs[n_freqs] = rate;
+ n_freqs++;
+
+ thr_dbg(thr,
+ "OPP %s (%llu MHz) is used for throttling\n",
+ np_opp->full_name,
+ div_u64(rate, 1000000));
+ } else {
+ thr_err(thr, "opp-hz not found: %s\n",
+ np_opp->full_name);
+ }
+
+ of_node_put(np_opp);
+ }
+
+ if (n_freqs > 0) {
+ /* sort frequencies in descending order */
+ sort(freqs, n_freqs, sizeof(*freqs), cmp_freqs, NULL);
+
+ ft->n_entries = n_freqs;
+ ft->freqs = devm_kzalloc(thr->dev,
+ n_freqs * sizeof(*freqs), GFP_KERNEL);
+ if (!ft->freqs) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ memcpy(ft->freqs, freqs, n_freqs * sizeof(*freqs));
+ } else {
+ err = -ENODEV;
+ }
+
+out_free:
+ kfree(freqs);
+
+out_node_put:
+ of_node_put(np_opp_desc);
+
+ return err;
+}
+
+static void thr_cpufreq_init(struct throttler *thr, int cpu)
+{
+ struct device *cpu_dev;
+ struct thr_freq_table ft;
+ struct cpufreq_thrdev *cpufreq_dev;
+ int err;
+
+ WARN_ON(!mutex_is_locked(&thr->lock));
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ dev_err_ratelimited(thr->dev, "failed to get CPU %d\n", cpu);
+ return;
+ }
+
+ err = thr_init_freq_table(thr, cpu_dev, &ft);
+ if (err) {
+ /* CPU is not throttled or initialization failed */
+ if (err != -ENODEV)
+ thr_err(thr, "failed to initialize CPU %d: %d", cpu,
+ err);
+
+ cpumask_set_cpu(cpu, &thr->cpufreq.cm_ignore);
+ return;
+ }
+
+ cpufreq_dev = devm_kzalloc(thr->dev, sizeof(*cpufreq_dev), GFP_KERNEL);
+ if (!cpufreq_dev)
+ return;
+
+ cpufreq_dev->cpu = cpu;
+ memcpy(&cpufreq_dev->freq_table, &ft, sizeof(ft));
+ list_add_tail(&cpufreq_dev->node, &thr->cpufreq.list);
+
+ cpumask_set_cpu(cpu, &thr->cpufreq.cm_initialized);
+}
+
+static void thr_devfreq_init(struct device *dev, void *data)
+{
+ struct throttler *thr = data;
+ struct thr_freq_table ft;
+ struct devfreq_thrdev *dftd;
+ int err;
+
+ WARN_ON(!mutex_is_locked(&thr->lock));
+
+ err = thr_init_freq_table(thr, dev->parent, &ft);
+ if (err) {
+ if (err == -ENODEV)
+ return;
+
+ thr_err(thr, "failed to init frequency table of device %s: %d",
+ dev_name(dev), err);
+ return;
+ }
+
+ dftd = devm_kzalloc(thr->dev, sizeof(*dftd), GFP_KERNEL);
+ if (!dftd)
+ return;
+
+ dftd->thr = thr;
+ dftd->devfreq = container_of(dev, struct devfreq, dev);
+ memcpy(&dftd->freq_table, &ft, sizeof(ft));
+
+ dftd->nb.notifier_call = thr_handle_devfreq_event;
+ err = devm_devfreq_register_notifier(thr->dev, dftd->devfreq,
+ &dftd->nb, DEVFREQ_POLICY_NOTIFIER);
+ if (err < 0) {
+ thr_err(thr, "failed to register devfreq notifier\n");
+ devm_kfree(thr->dev, dftd);
+ return;
+ }
+
+ list_add_tail(&dftd->node, &thr->devfreq.list);
+
+ thr_dbg(thr, "device '%s' is used for throttling\n",
+ dev_name(dev));
+}
+
+static int thr_handle_cpufreq_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct throttler *thr =
+ container_of(nb, struct throttler, cpufreq.nb);
+ struct cpufreq_policy *policy = data;
+ struct cpufreq_thrdev *cftd;
+
+ if ((event != CPUFREQ_ADJUST) || thr->shutting_down)
+ return 0;
+
+ mutex_lock(&thr->lock);
+
+ if (cpumask_test_cpu(policy->cpu, &thr->cpufreq.cm_ignore))
+ goto out;
+
+ if (!cpumask_test_cpu(policy->cpu, &thr->cpufreq.cm_initialized)) {
+ thr_cpufreq_init(thr, policy->cpu);
+
+ if (cpumask_test_cpu(policy->cpu, &thr->cpufreq.cm_ignore))
+ goto out;
+
+ thr_dbg(thr, "CPU%d is used for throttling\n", policy->cpu);
+ }
+
+ list_for_each_entry(cftd, &thr->cpufreq.list, node) {
+ unsigned long clamp_freq;
+
+ if (cftd->cpu != policy->cpu)
+ continue;
+
+ if (thr->level == 0) {
+ if (cftd->clamp_freq != 0) {
+ thr_dbg(thr, "unthrottling CPU%d\n", cftd->cpu);
+ cftd->clamp_freq = 0;
+ }
+
+ continue;
+ }
+
+ clamp_freq = thr_get_throttling_freq(&cftd->freq_table,
+ thr->level) / 1000;
+ if (cftd->clamp_freq != clamp_freq) {
+ thr_dbg(thr, "throttling CPU%d to %lu MHz\n", cftd->cpu,
+ clamp_freq / 1000);
+ cftd->clamp_freq = clamp_freq;
+ }
+
+ if (clamp_freq < policy->max)
+ cpufreq_verify_within_limits(policy, 0, clamp_freq);
+ }
+
+out:
+ mutex_unlock(&thr->lock);
+
+ return NOTIFY_DONE;
+}
+
+/*
+ * Notifier called by devfreq. Can't acquire thr->lock since it might
+ * already be held by throttler_set_level(). It isn't necessary to
+ * acquire the lock for the following reasons:
+ *
+ * Only the devfreq_thrdev and thr->level are accessed in this function.
+ * The devfreq device won't go away (or change) during the execution of
+ * this function, since we are called from the devfreq core. Theoretically
+ * thr->level could change and we'd apply an outdated setting, however in
+ * this case the function would run again shortly after and apply the
+ * correct value.
+ */
+static int thr_handle_devfreq_event(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct devfreq_thrdev *dftd =
+ container_of(nb, struct devfreq_thrdev, nb);
+ struct throttler *thr = dftd->thr;
+ struct devfreq_policy *policy = data;
+ int level = READ_ONCE(thr->level);
+ unsigned long clamp_freq;
+
+ if ((event != DEVFREQ_ADJUST) || thr->shutting_down)
+ return NOTIFY_DONE;
+
+ if (level == 0) {
+ if (dftd->clamp_freq != 0) {
+ thr_dbg(thr, "unthrottling '%s'\n",
+ dev_name(&dftd->devfreq->dev));
+ dftd->clamp_freq = 0;
+ }
+
+ return NOTIFY_DONE;
+ }
+
+ clamp_freq = thr_get_throttling_freq(&dftd->freq_table, level);
+ if (clamp_freq != dftd->clamp_freq) {
+ thr_dbg(thr, "throttling '%s' to %lu MHz\n",
+ dev_name(&dftd->devfreq->dev), clamp_freq / 1000000);
+ dftd->clamp_freq = clamp_freq;
+ }
+
+ if (clamp_freq < policy->max)
+ devfreq_verify_within_limits(policy, 0, clamp_freq);
+
+ return NOTIFY_DONE;
+}
+
+static void thr_cpufreq_update_policy(struct throttler *thr)
+{
+ struct cpufreq_thrdev *cftd;
+
+ WARN_ON(!mutex_is_locked(&thr->lock));
+
+ list_for_each_entry(cftd, &thr->cpufreq.list, node) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cftd->cpu);
+
+ if (!policy) {
+ thr_warn(thr, "CPU%d has no cpufreq policy!\n",
+ cftd->cpu);
+ continue;
+ }
+
+ /*
+ * The lock isn't really needed in this function, the list
+ * of cpufreq devices can be extended, but no items are
+ * deleted during the lifetime of the throttler. Releasing
+ * the lock is necessary since cpufreq_update_policy() ends
+ * up calling thr_handle_cpufreq_event(), which needs to
+ * acquire the lock.
+ */
+ mutex_unlock(&thr->lock);
+ cpufreq_update_policy(cftd->cpu);
+ mutex_lock(&thr->lock);
+
+ cpufreq_cpu_put(policy);
+ }
+}
+
+static void thr_update_devfreq(struct throttler *thr)
+{
+ struct devfreq_thrdev *dftd;
+
+ WARN_ON(!mutex_is_locked(&thr->lock));
+
+ list_for_each_entry(dftd, &thr->devfreq.list, node) {
+ mutex_lock(&dftd->devfreq->lock);
+ update_devfreq(dftd->devfreq);
+ mutex_unlock(&dftd->devfreq->lock);
+ }
+}
+
+static int thr_handle_devfreq_added(struct device *dev,
+ struct class_interface *ci)
+{
+ struct throttler *thr = ci_to_throttler(ci);
+
+ mutex_lock(&thr->lock);
+ thr_devfreq_init(dev, thr);
+ mutex_unlock(&thr->lock);
+
+ return 0;
+}
+
+static void thr_handle_devfreq_removed(struct device *dev,
+ struct class_interface *ci)
+{
+ struct devfreq_thrdev *dftd;
+ struct throttler *thr = ci_to_throttler(ci);
+
+ mutex_lock(&thr->lock);
+
+ list_for_each_entry(dftd, &thr->devfreq.list, node) {
+ if (dev == &dftd->devfreq->dev) {
+ list_del(&dftd->node);
+ devm_kfree(thr->dev, dftd->freq_table.freqs);
+ devm_kfree(thr->dev, dftd);
+ break;
+ }
+ }
+
+ mutex_unlock(&thr->lock);
+}
+
+void throttler_set_level(struct throttler *thr, unsigned int level)
+{
+ mutex_lock(&thr->lock);
+
+ if ((level == thr->level) || thr->shutting_down) {
+ mutex_unlock(&thr->lock);
+ return;
+ }
+
+ thr_dbg(thr, "throttling level: %u\n", level);
+ thr->level = level;
+
+ if (!list_empty(&thr->cpufreq.list))
+ thr_cpufreq_update_policy(thr);
+
+ thr_update_devfreq(thr);
+
+ mutex_unlock(&thr->lock);
+}
+EXPORT_SYMBOL_GPL(throttler_set_level);
+
+#ifdef CONFIG_THROTTLER_DEBUG
+
+static ssize_t thr_level_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct throttler *thr = file->f_inode->i_private;
+ char buf[5];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", thr->level);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t thr_level_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int rc;
+ unsigned int level;
+ struct throttler *thr = file->f_inode->i_private;
+
+ rc = kstrtouint_from_user(user_buf, count, 10, &level);
+ if (rc)
+ return rc;
+
+ throttler_set_level(thr, level);
+
+ return count;
+}
+
+static const struct file_operations level_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = thr_level_read,
+ .write = thr_level_write,
+};
+#endif
+
+struct throttler *throttler_setup(struct device *dev)
+{
+ struct throttler *thr;
+ struct device_node *np = dev->of_node;
+ struct class_interface *ci;
+ int cpu;
+ int err;
+
+ if (!np)
+ /* should never happen */
+ return ERR_PTR(-EINVAL);
+
+ thr = devm_kzalloc(dev, sizeof(*thr), GFP_KERNEL);
+ if (!thr)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&thr->lock);
+ thr->dev = dev;
+
+ cpumask_clear(&thr->cpufreq.cm_ignore);
+ cpumask_clear(&thr->cpufreq.cm_initialized);
+
+ INIT_LIST_HEAD(&thr->cpufreq.list);
+ INIT_LIST_HEAD(&thr->devfreq.list);
+
+ thr->cpufreq.nb.notifier_call = thr_handle_cpufreq_event;
+ err = cpufreq_register_notifier(&thr->cpufreq.nb,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (err < 0) {
+ thr_err(thr, "failed to register cpufreq notifier\n");
+ return ERR_PTR(err);
+ }
+
+ /*
+ * The CPU throttling configuration is parsed at runtime, when the
+ * cpufreq policy notifier is called for a CPU that hasn't been
+ * initialized yet.
+ *
+ * This is done for two reasons:
+ * - when the throttler is probed the CPU might not yet have a policy
+ * - CPUs that were offline at probe time might be hotplugged
+ *
+ * The notifier is called then the policy is added/set
+ */
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+
+ if (!policy)
+ continue;
+
+ cpufreq_update_policy(cpu);
+ cpufreq_cpu_put(policy);
+ }
+
+ /*
+ * devfreq devices can be added and removed at runtime, hence they
+ * must also be handled dynamically. The class_interface notifies us
+ * whenever a device is added or removed. When the interface is
+ * registered ci->add_dev() is called for all existing devfreq
+ * devices.
+ */
+ ci = &thr->devfreq.class_iface;
+ ci->class = devfreq_class;
+ ci->add_dev = thr_handle_devfreq_added;
+ ci->remove_dev = thr_handle_devfreq_removed;
+
+ err = class_interface_register(ci);
+ if (err) {
+ thr_err(thr, "failed to register devfreq class interface: %d\n",
+ err);
+ cpufreq_unregister_notifier(&thr->cpufreq.nb,
+ CPUFREQ_POLICY_NOTIFIER);
+ return ERR_PTR(err);
+ }
+
+#ifdef CONFIG_THROTTLER_DEBUG
+ thr->debugfs.dir = debugfs_create_dir(dev_name(thr->dev), NULL);
+ if (IS_ERR(thr->debugfs.dir)) {
+ thr_warn(thr, "failed to create debugfs directory: %ld\n",
+ PTR_ERR(thr->debugfs.dir));
+ thr->debugfs.dir = NULL;
+ goto skip_debugfs;
+ }
+
+ thr->debugfs.attr_level = debugfs_create_file("level", 0644,
+ thr->debugfs.dir, thr,
+ &level_debugfs_ops);
+ if (IS_ERR(thr->debugfs.attr_level)) {
+ thr_warn(thr, "failed to create debugfs attribute: %ld\n",
+ PTR_ERR(thr->debugfs.attr_level));
+ debugfs_remove(thr->debugfs.dir);
+ thr->debugfs.dir = NULL;
+ }
+
+skip_debugfs:
+#endif
+
+ return thr;
+}
+EXPORT_SYMBOL_GPL(throttler_setup);
+
+void throttler_teardown(struct throttler *thr)
+{
+#ifdef CONFIG_THROTTLER_DEBUG
+ debugfs_remove_recursive(thr->debugfs.dir);
+#endif
+
+ /*
+ * Indicate notifiers and _set_level() that we are shutting down.
+ * If a notifier starts before the flag is set it may still apply
+ * throttling settings. This is not a problem since we explicitly
+ * trigger the notifiers (again) below to unthrottle CPUs and
+ * devfreq devices.
+ */
+ thr->shutting_down = true;
+
+ /*
+ * Unregister without the lock being held to avoid possible
+ * deadlock with notifier calls.
+ */
+ cpufreq_unregister_notifier(&thr->cpufreq.nb,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ mutex_lock(&thr->lock);
+
+ if (thr->level) {
+ /* Unthrottle CPUs */
+ if (!list_empty(&thr->cpufreq.list))
+ thr_cpufreq_update_policy(thr);
+
+ /* Unthrottle devfreq devices */
+ thr_update_devfreq(thr);
+ }
+
+ mutex_unlock(&thr->lock);
+
+ /*
+ * Unregistering the class interface must be done without holding the
+ * lock, since it results in calling thr_handle_devfreq_removed(),
+ * which acquires the lock.
+ */
+ class_interface_unregister(&thr->devfreq.class_iface);
+}
+EXPORT_SYMBOL_GPL(throttler_teardown);
diff --git a/drivers/misc/throttler/cros_ec_throttler.c b/drivers/misc/throttler/cros_ec_throttler.c
new file mode 100644
index 00000000000000..82a25415a26419
--- /dev/null
+++ b/drivers/misc/throttler/cros_ec_throttler.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for throttling triggered by events from the Chrome OS Embedded
+ * Controller.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/throttler.h>
+
+#define nb_to_ce_thr(nb) container_of(nb, struct cros_ec_throttler, nb)
+
+struct cros_ec_throttler {
+ struct cros_ec_device *ec;
+ struct throttler *throttler;
+ struct notifier_block nb;
+};
+
+static int cros_ec_throttler_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend, void *_notify)
+{
+ struct cros_ec_throttler *ce_thr = nb_to_ce_thr(nb);
+ u32 host_event;
+
+ host_event = cros_ec_get_host_event(ce_thr->ec);
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_THROTTLE_START)) {
+ throttler_set_level(ce_thr->throttler, 1);
+
+ return NOTIFY_OK;
+ } else if (host_event &
+ EC_HOST_EVENT_MASK(EC_HOST_EVENT_THROTTLE_STOP)) {
+ throttler_set_level(ce_thr->throttler, 0);
+
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int cros_ec_throttler_probe(struct platform_device *pdev)
+{
+ struct cros_ec_throttler *ce_thr;
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec;
+ int ret;
+
+ ce_thr = devm_kzalloc(dev, sizeof(*ce_thr), GFP_KERNEL);
+ if (!ce_thr)
+ return -ENOMEM;
+
+ ec = dev_get_drvdata(pdev->dev.parent);
+ ce_thr->ec = ec->ec_dev;
+
+ /*
+ * The core code uses the DT node of the throttler to identify its
+ * throttling devices and rates. The CrOS EC throttler is a sub-device
+ * of the CrOS EC MFD device and doesn't have its own device node. Use
+ * the node of the MFD device instead.
+ */
+ dev->of_node = ce_thr->ec->dev->of_node;
+
+ ce_thr->throttler = throttler_setup(dev);
+ if (IS_ERR(ce_thr->throttler))
+ return PTR_ERR(ce_thr->throttler);
+
+ dev_set_drvdata(dev, ce_thr);
+
+ ce_thr->nb.notifier_call = cros_ec_throttler_event;
+ ret = blocking_notifier_chain_register(&ce_thr->ec->event_notifier,
+ &ce_thr->nb);
+ if (ret < 0) {
+ dev_err(dev, "failed to register notifier\n");
+ throttler_teardown(ce_thr->throttler);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_ec_throttler_remove(struct platform_device *pdev)
+{
+ struct cros_ec_throttler *ce_thr = platform_get_drvdata(pdev);
+
+ blocking_notifier_chain_unregister(&ce_thr->ec->event_notifier,
+ &ce_thr->nb);
+
+ throttler_teardown(ce_thr->throttler);
+
+ return 0;
+}
+
+static struct platform_driver cros_ec_throttler_driver = {
+ .driver = {
+ .name = "cros-ec-throttler",
+ },
+ .probe = cros_ec_throttler_probe,
+ .remove = cros_ec_throttler_remove,
+};
+
+module_platform_driver(cros_ec_throttler_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Matthias Kaehlcke <mka@chromium.org>");
+MODULE_DESCRIPTION("Chrome OS EC Throttler");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 71b64550b59184..a1bca836e506ce 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -757,14 +757,14 @@ static int kim_probe(struct platform_device *pdev)
err = gpio_request(kim_gdata->nshutdown, "kim");
if (unlikely(err)) {
pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* Configure nShutdown GPIO as output=0 */
err = gpio_direction_output(kim_gdata->nshutdown, 0);
if (unlikely(err)) {
pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
- return err;
+ goto err_sysfs_group;
}
/* get reference of pdev for request_firmware
*/
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 87a13374fdc0da..eb57610673102f 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@ static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
} else
lux = 0;
else
- return -EAGAIN;
+ return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index c344483fa7d65a..9f257c53e6d4c6 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
int tries;
long timeout;
- if (WARN_ON(index > func->num_templates))
+ if (WARN_ON(index >= func->num_templates))
return -EINVAL;
command = readl(syscfg->base + SYS_CFGCTRL);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 1e688bfec56728..5e9122cd389885 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,6 +45,7 @@
#include <linux/seq_file.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
+#include <linux/io.h>
#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
@@ -341,7 +342,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
success = false;
}
- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
+ /*
+ * 2MB pages are only supported with batching. If batching is for some
+ * reason disabled, do not use 2MB pages, since otherwise the legacy
+ * mechanism is used with 2MB pages, causing a failure.
+ */
+ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
+ (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->supported_page_sizes = 2;
else
b->supported_page_sizes = 1;
@@ -450,7 +457,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return -1;
+ return -EINVAL;
STATS_INC(b->stats.lock[false]);
@@ -460,14 +467,14 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail[false]);
- return 1;
+ return -EIO;
}
static int vmballoon_send_batched_lock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.lock[is_2m_pages]);
@@ -515,7 +522,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.unlock[is_2m_pages]);
@@ -576,15 +583,9 @@ static void vmballoon_pop(struct vmballoon *b)
}
}
- if (b->batch_page) {
- vunmap(b->batch_page);
- b->batch_page = NULL;
- }
-
- if (b->page) {
- __free_page(b->page);
- b->page = NULL;
- }
+ /* Clearing the batch_page unconditionally has no adverse effect */
+ free_page((unsigned long)b->batch_page);
+ b->batch_page = NULL;
}
/*
@@ -603,11 +604,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
target);
- if (locked > 0) {
+ if (locked) {
STATS_INC(b->stats.refused_alloc[false]);
- if (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
+ if (locked == -EIO &&
+ (hv_status == VMW_BALLOON_ERROR_RESET ||
+ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
vmballoon_free_page(page, false);
return -EIO;
}
@@ -623,7 +625,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
} else {
vmballoon_free_page(page, false);
}
- return -EIO;
+ return locked;
}
/* track allocated page */
@@ -991,16 +993,13 @@ static const struct vmballoon_ops vmballoon_batched_ops = {
static bool vmballoon_init_batching(struct vmballoon *b)
{
- b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
- if (!b->page)
- return false;
+ struct page *page;
- b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
- if (!b->batch_page) {
- __free_page(b->page);
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
return false;
- }
+ b->batch_page = page_address(page);
return true;
}
@@ -1038,29 +1037,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- int error = 0;
+ unsigned long error, dummy;
- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) {
- error = vmci_doorbell_create(&b->vmci_doorbell,
- VMCI_FLAG_DELAYED_CB,
- VMCI_PRIVILEGE_FLAG_RESTRICTED,
- vmballoon_doorbell, b);
-
- if (error == VMCI_SUCCESS) {
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET,
- b->vmci_doorbell.context,
- b->vmci_doorbell.resource, error);
- STATS_INC(b->stats.doorbell_set);
- }
- }
+ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
+ return 0;
- if (error != 0) {
- vmballoon_vmci_cleanup(b);
+ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
+ VMCI_PRIVILEGE_FLAG_RESTRICTED,
+ vmballoon_doorbell, b);
- return -EIO;
- }
+ if (error != VMCI_SUCCESS)
+ goto fail;
+
+ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, dummy);
+
+ STATS_INC(b->stats.doorbell_set);
+
+ if (error != VMW_BALLOON_SUCCESS)
+ goto fail;
return 0;
+fail:
+ vmballoon_vmci_cleanup(b);
+ return -EIO;
}
/*
@@ -1298,7 +1298,14 @@ static int __init vmballoon_init(void)
return 0;
}
-module_init(vmballoon_init);
+
+/*
+ * Using late_initcall() instead of module_init() allows the balloon to use the
+ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
+ * VMCI is probed only after the balloon is initialized. If the balloon is used
+ * as a module, late_initcall() is equivalent to module_init().
+ */
+late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index cc277f7849b0cc..3877f534fd3f49 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -755,7 +755,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
+ if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -767,7 +767,7 @@ static int qp_host_get_user_memory(u64 produce_uva,
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
+ if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 6142ec1b9dfbbc..654c4caab65c67 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -80,3 +80,11 @@ config MMC_SIMULATE_MAX_SPEED
control the write or read maximum KB/second speed behaviors.
If unsure, say N here.
+
+config MMC_FFU
+ bool "FFU SUPPORT"
+ depends on MMC
+ help
+ This is an option to run firmware update on eMMC 5.0.
+ Field firmware updates (FFU) enables features enhancment
+ in the field.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c68ec414a5e855..67a2d7bb41d6a8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -46,6 +46,7 @@
#include <linux/mmc/sd.h>
#include <asm/uaccess.h>
+#include <linux/mmc/ffu.h>
#include "queue.h"
@@ -862,6 +863,11 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_done;
}
+ if (idata->ic.opcode == MMC_FFU_INVOKE_OP) {
+ err = mmc_ffu_invoke(card, (struct mmc_ffu_args *)idata->buf);
+ goto cmd_done;
+ }
+
mmc_get_card(card);
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 9a11aaa6e985f4..0c67c5085b5d84 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -185,84 +185,6 @@ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
}
/*
- * Fill in the mmc_request structure given a set of transfer parameters.
- */
-static void mmc_test_prepare_mrq(struct mmc_test_card *test,
- struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
- unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
-{
- BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
-
- if (blocks > 1) {
- mrq->cmd->opcode = write ?
- MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
- } else {
- mrq->cmd->opcode = write ?
- MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
- }
-
- mrq->cmd->arg = dev_addr;
- if (!mmc_card_blockaddr(test->card))
- mrq->cmd->arg <<= 9;
-
- mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
-
- if (blocks == 1)
- mrq->stop = NULL;
- else {
- mrq->stop->opcode = MMC_STOP_TRANSMISSION;
- mrq->stop->arg = 0;
- mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
- }
-
- mrq->data->blksz = blksz;
- mrq->data->blocks = blocks;
- mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
- mrq->data->sg = sg;
- mrq->data->sg_len = sg_len;
-
- mmc_set_data_timeout(mrq->data, test->card);
-}
-
-static int mmc_test_busy(struct mmc_command *cmd)
-{
- return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
-}
-
-/*
- * Wait for the card to finish the busy state
- */
-static int mmc_test_wait_busy(struct mmc_test_card *test)
-{
- int ret, busy;
- struct mmc_command cmd = {0};
-
- busy = 0;
- do {
- memset(&cmd, 0, sizeof(struct mmc_command));
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = test->card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
-
- ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
- if (ret)
- break;
-
- if (!busy && mmc_test_busy(&cmd)) {
- busy = 1;
- if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
- pr_info("%s: Warning: Host did not "
- "wait for busy state to end.\n",
- mmc_hostname(test->card->host));
- }
- } while (mmc_test_busy(&cmd));
-
- return ret;
-}
-
-/*
* Transfer a single sector of kernel addressable data
*/
static int mmc_test_buffer_transfer(struct mmc_test_card *test,
@@ -281,7 +203,7 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
sg_init_one(&sg, buffer, blksz);
- mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
+ mmc_prepare_mrq(test->card, &mrq, &sg, 1, addr, 1, blksz, write);
mmc_wait_for_req(test->card->host, &mrq);
@@ -290,7 +212,7 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
if (data.error)
return data.error;
- return mmc_test_wait_busy(test);
+ return mmc_wait_busy(test->card);
}
static void mmc_test_free_mem(struct mmc_test_mem *mem)
@@ -681,6 +603,17 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
}
}
+/* Convert error from mmc stack in test error. */
+static int mmc_test_check_err(int ret)
+{
+ if (ret == -EPERM)
+ return RESULT_FAIL;
+ if (ret == -EINVAL)
+ return RESULT_UNSUP_HOST;
+
+ return ret;
+}
+
/*
* Checks that a normal transfer didn't have any errors
*/
@@ -689,24 +622,8 @@ static int mmc_test_check_result(struct mmc_test_card *test,
{
int ret;
- BUG_ON(!mrq || !mrq->cmd || !mrq->data);
-
- ret = 0;
-
- if (!ret && mrq->cmd->error)
- ret = mrq->cmd->error;
- if (!ret && mrq->data->error)
- ret = mrq->data->error;
- if (!ret && mrq->stop && mrq->stop->error)
- ret = mrq->stop->error;
- if (!ret && mrq->data->bytes_xfered !=
- mrq->data->blocks * mrq->data->blksz)
- ret = RESULT_FAIL;
-
- if (ret == -EINVAL)
- ret = RESULT_UNSUP_HOST;
-
- return ret;
+ ret = mmc_check_result(mrq);
+ return mmc_test_check_err(ret);
}
static int mmc_test_check_result_async(struct mmc_card *card,
@@ -715,7 +632,7 @@ static int mmc_test_check_result_async(struct mmc_card *card,
struct mmc_test_async_req *test_async =
container_of(areq, struct mmc_test_async_req, areq);
- mmc_test_wait_busy(test_async->test);
+ mmc_wait_busy(test_async->test->card);
return mmc_test_check_result(test_async->test, areq->mrq);
}
@@ -748,10 +665,7 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
ret = RESULT_FAIL;
}
- if (ret == -EINVAL)
- ret = RESULT_UNSUP_HOST;
-
- return ret;
+ return mmc_test_check_err(ret);
}
/*
@@ -805,7 +719,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
other_areq->err_check = mmc_test_check_result_async;
for (i = 0; i < count; i++) {
- mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+ mmc_prepare_mrq(test->card, cur_areq->mrq, sg, sg_len, dev_addr,
blocks, blksz, write);
done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
@@ -838,23 +752,11 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq = {0};
- struct mmc_command cmd = {0};
- struct mmc_command stop = {0};
- struct mmc_data data = {0};
-
- mrq.cmd = &cmd;
- mrq.data = &data;
- mrq.stop = &stop;
-
- mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
- blocks, blksz, write);
-
- mmc_wait_for_req(test->card->host, &mrq);
-
- mmc_test_wait_busy(test);
+ int ret;
- return mmc_test_check_result(test, &mrq);
+ ret = mmc_simple_transfer(test->card, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ return mmc_test_check_err(ret);
}
/*
@@ -876,12 +778,12 @@ static int mmc_test_broken_transfer(struct mmc_test_card *test,
sg_init_one(&sg, test->buffer, blocks * blksz);
- mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
+ mmc_prepare_mrq(test->card, &mrq, &sg, 1, 0, blocks, blksz, write);
mmc_test_prepare_broken_mrq(test, &mrq, write);
mmc_wait_for_req(test->card->host, &mrq);
- mmc_test_wait_busy(test);
+ mmc_wait_busy(test->card);
return mmc_test_check_broken_result(test, &mrq);
}
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b73d..0dbe76a7ecc163 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -10,3 +10,4 @@ mmc_core-y := core.o bus.o host.o \
quirks.o slot-gpio.o
mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+mmc_core-$(CONFIG_MMC_FFU) += ffu.o
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8e835d74afd776..0872b634828d7d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1052,7 +1052,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
"width %u timing %u\n",
mmc_hostname(host), ios->clock, ios->bus_mode,
ios->power_mode, ios->chip_select, ios->vdd,
- ios->bus_width, ios->timing);
+ 1 << ios->bus_width, ios->timing);
host->ops->set_ios(host, ios);
}
@@ -1242,8 +1242,12 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
- if (!voltage_ranges || !num_ranges) {
- pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+ if (!voltage_ranges) {
+ pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
+ return -EINVAL;
+ }
+ if (!num_ranges) {
+ pr_err("%s: voltage-ranges empty\n", np->full_name);
return -EINVAL;
}
@@ -2898,6 +2902,130 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+/*
+ * Fill in the mmc_request structure for read or write command,
+ * with the scatter gather list data.
+ */
+void mmc_prepare_mrq(struct mmc_card *card,
+ struct mmc_request *mrq,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks, unsigned blksz,
+ int write)
+{
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop);
+
+ if (blocks > 1) {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
+ } else {
+ mrq->cmd->opcode = write ?
+ MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
+ }
+
+ mrq->cmd->arg = dev_addr;
+ if (!mmc_card_blockaddr(card))
+ mrq->cmd->arg <<= 9;
+
+ mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ if (blocks == 1) {
+ mrq->stop = NULL;
+ } else {
+ mrq->stop->opcode = MMC_STOP_TRANSMISSION;
+ mrq->stop->arg = 0;
+ mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
+ }
+
+ mrq->data->blksz = blksz;
+ mrq->data->blocks = blocks;
+ mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
+ mrq->data->sg = sg;
+ mrq->data->sg_len = sg_len;
+
+ mmc_set_data_timeout(mrq->data, card);
+}
+EXPORT_SYMBOL(mmc_prepare_mrq);
+
+static int mmc_busy(u32 status)
+{
+ return !(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+}
+
+/*
+ * Wait for the card to finish the busy state
+ */
+int mmc_wait_busy(struct mmc_card *card)
+{
+ int ret, busy = 0;
+ u32 status;
+
+ do {
+ ret = mmc_send_status(card, &status);
+ if (ret)
+ break;
+
+ if (!busy && mmc_busy(status)) {
+ busy = 1;
+ if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) {
+ pr_warn("%s: Warning: %s\n",
+ mmc_hostname(card->host),
+ "Host did not wait end of busy state.");
+ }
+ }
+
+ } while (mmc_busy(status));
+
+ return ret;
+}
+EXPORT_SYMBOL(mmc_wait_busy);
+
+int mmc_check_result(struct mmc_request *mrq)
+{
+ BUG_ON(!mrq || !mrq->cmd || !mrq->data);
+
+ if (mrq->cmd->error)
+ return mrq->cmd->error;
+ if (mrq->data->error)
+ return mrq->data->error;
+ if (mrq->stop && mrq->stop->error)
+ return mrq->stop->error;
+ if (mrq->data->bytes_xfered !=
+ mrq->data->blocks * mrq->data->blksz)
+ return EPERM;
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_check_result);
+
+/*
+ * transfer with certain parameters
+ */
+int mmc_simple_transfer(struct mmc_card *card,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr,
+ unsigned blocks, unsigned blksz, int write)
+{
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+ mmc_prepare_mrq(card, &mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ mmc_wait_busy(card);
+
+ return mmc_check_result(&mrq);
+}
+EXPORT_SYMBOL(mmc_simple_transfer);
+
/**
* mmc_init_context_info() - init synchronization context
* @host: mmc host
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 59f4cf88436311..066fd42b1f2125 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -221,7 +221,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
struct mmc_host *host = data;
/* We need this check due to input value is u64 */
- if (val > host->f_max)
+ if (val != 0 && (val > host->f_max || val < host->f_min))
return -EINVAL;
mmc_claim_host(host);
diff --git a/drivers/mmc/core/ffu.c b/drivers/mmc/core/ffu.c
new file mode 100644
index 00000000000000..20edb02b8758b7
--- /dev/null
+++ b/drivers/mmc/core/ffu.c
@@ -0,0 +1,465 @@
+/*
+ * * ffu.c
+ *
+ * Copyright 2007-2008 Pierre Ossman
+ *
+ * Modified by SanDisk Corp.
+ * Modified by Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/ffu.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/core.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+
+/**
+ * struct mmc_ffu_pages - pages allocated by 'alloc_pages()'.
+ * @page: first page in the allocation
+ * @order: order of the number of pages allocated
+ */
+struct mmc_ffu_pages {
+ struct page *page;
+ unsigned int order;
+};
+
+/**
+ * struct mmc_ffu_mem - allocated memory.
+ * @arr: array of allocations
+ * @cnt: number of allocations
+ */
+struct mmc_ffu_mem {
+ struct mmc_ffu_pages *arr;
+ unsigned int cnt;
+};
+
+struct mmc_ffu_area {
+ unsigned long max_sz;
+ unsigned int max_tfr;
+ unsigned int max_segs;
+ unsigned int max_seg_sz;
+ unsigned int blocks;
+ unsigned int sg_len;
+ struct mmc_ffu_mem mem;
+ struct sg_table sgtable;
+};
+
+/*
+ * Get hack value
+ */
+static const struct mmc_ffu_hack *mmc_get_hack(
+ const struct mmc_ffu_args *args,
+ enum mmc_ffu_hack_type type)
+{
+ int i;
+
+ for (i = 0; i < args->ack_nb; i++) {
+ if (args->hack[i].type == type)
+ return &args->hack[i];
+ }
+ return NULL;
+}
+
+/*
+ * Map memory into a scatterlist.
+ */
+static unsigned int mmc_ffu_map_sg(struct mmc_ffu_mem *mem, int size,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = sglist;
+ unsigned int i;
+ unsigned long sz = size;
+ unsigned int sctr_len = 0;
+ unsigned long len;
+
+ for (i = 0; i < mem->cnt && sz; i++, sz -= len) {
+ len = PAGE_SIZE << mem->arr[i].order;
+
+ if (len > sz) {
+ len = sz;
+ sz = 0;
+ }
+
+ sg_set_page(sg, mem->arr[i].page, len, 0);
+ sg = sg_next(sg);
+ sctr_len++;
+ }
+
+ return sctr_len;
+}
+
+static void mmc_ffu_free_mem(struct mmc_ffu_mem *mem)
+{
+ if (!mem)
+ return;
+
+ while (mem->cnt--)
+ __free_pages(mem->arr[mem->cnt].page, mem->arr[mem->cnt].order);
+
+ kfree(mem->arr);
+}
+
+/*
+ * Cleanup struct mmc_ffu_area.
+ */
+static void mmc_ffu_area_cleanup(struct mmc_ffu_area *area)
+{
+ sg_free_table(&area->sgtable);
+ mmc_ffu_free_mem(&area->mem);
+}
+
+/*
+ * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
+ * there isn't much memory do not exceed 1/16th total low mem* pages.
+ * Also do not exceed a maximum number of segments and try not to make
+ * segments much bigger than maximum segment size.
+ */
+static int mmc_ffu_alloc_mem(struct mmc_ffu_area *area, unsigned long min_sz)
+{
+ unsigned long max_page_cnt = DIV_ROUND_UP(area->max_tfr, PAGE_SIZE);
+ unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
+ unsigned long max_seg_page_cnt = DIV_ROUND_UP(area->max_seg_sz,
+ PAGE_SIZE);
+ unsigned long page_cnt = 0;
+ /*
+ * We divide by 16 to ensure we will not allocate a big amount
+ * of unnecessary pages.
+ */
+ unsigned long limit = nr_free_buffer_pages() >> 4;
+
+ gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | __GFP_NORETRY;
+
+ if (max_page_cnt > limit)
+ max_page_cnt = limit;
+
+ if (min_page_cnt > max_page_cnt)
+ min_page_cnt = max_page_cnt;
+
+ if (area->max_segs * max_seg_page_cnt > max_page_cnt)
+ area->max_segs = DIV_ROUND_UP(max_page_cnt, max_seg_page_cnt);
+
+ area->mem.arr = kcalloc(area->max_segs,
+ sizeof(*area->mem.arr),
+ GFP_KERNEL);
+ if (!area->mem.arr)
+ return -ENOMEM;
+ area->mem.cnt = 0;
+
+ while (max_page_cnt) {
+ struct page *page;
+ unsigned int order;
+
+ order = get_order(max_seg_page_cnt << PAGE_SHIFT);
+
+ do {
+ page = alloc_pages(flags, order);
+ } while (!page && order--);
+
+ if (!page)
+ goto out_free;
+
+ area->mem.arr[area->mem.cnt].page = page;
+ area->mem.arr[area->mem.cnt].order = order;
+ area->mem.cnt++;
+ page_cnt += 1UL << order;
+ if (max_page_cnt <= (1UL << order))
+ break;
+ max_page_cnt -= 1UL << order;
+ }
+
+ if (page_cnt < min_page_cnt)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ mmc_ffu_free_mem(&area->mem);
+ return -ENOMEM;
+}
+
+/*
+ * Initialize an area for data transfers.
+ * Copy the data to the allocated pages.
+ */
+static int mmc_ffu_area_init(struct mmc_ffu_area *area, struct mmc_card *card,
+ const u8 *data, int size)
+{
+ int ret;
+ int i;
+ int length = 0, page_length;
+ int min_size = 0;
+
+ area->max_tfr = size;
+
+ ret = mmc_ffu_alloc_mem(area, 1);
+ for (i = 0; i < area->mem.cnt; i++) {
+ if (length > size) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ page_length = PAGE_SIZE << area->mem.arr[i].order;
+ min_size = min(size - length, page_length);
+ memcpy(page_address(area->mem.arr[i].page), data + length,
+ min(size - length, page_length));
+ length += page_length;
+ }
+
+ ret = sg_alloc_table(&area->sgtable, area->mem.cnt, GFP_KERNEL);
+ if (ret)
+ goto out_free;
+
+ area->sg_len = mmc_ffu_map_sg(&area->mem, size, area->sgtable.sgl);
+
+ return 0;
+
+out_free:
+ mmc_ffu_free_mem(&area->mem);
+ return ret;
+}
+
+static int mmc_ffu_write(struct mmc_card *card, const u8 *src, u32 arg,
+ int size)
+{
+ int rc;
+ struct mmc_ffu_area area = {0};
+ int max_tfr;
+
+ area.max_segs = card->host->max_segs;
+ area.max_seg_sz = card->host->max_seg_size;
+
+ do {
+ max_tfr = size;
+ if ((max_tfr >> 9) > card->host->max_blk_count)
+ max_tfr = card->host->max_blk_count << 9;
+ if (max_tfr > card->host->max_req_size)
+ max_tfr = card->host->max_req_size;
+ if (DIV_ROUND_UP(max_tfr, area.max_seg_sz) > area.max_segs)
+ max_tfr = area.max_segs * area.max_seg_sz;
+
+ rc = mmc_ffu_area_init(&area, card, src, max_tfr);
+ if (rc != 0)
+ return rc;
+
+ rc = mmc_simple_transfer(card, area.sgtable.sgl, area.sg_len,
+ arg, max_tfr >> 9, 512, 1);
+ mmc_ffu_area_cleanup(&area);
+ if (rc != 0) {
+ dev_err(mmc_dev(card->host),
+ "%s mmc_ffu_simple_transfer %d\n",
+ __func__, rc);
+ return rc;
+ }
+ src += max_tfr;
+ size -= max_tfr;
+
+ } while (size > 0);
+
+ return rc;
+}
+
+static int mmc_ffu_install(struct mmc_card *card)
+{
+ int err;
+
+ /* check mode operation */
+ if (!card->ext_csd.ffu_mode_op) {
+ /*
+ * Host switch back to work in normal MMC Read/Write commands.
+ * For Hynix, be sure the command set is 0.
+ */
+ err = mmc_switch(card, 0, EXT_CSD_MODE_CONFIG, 0,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: switch to normal mode error %d:\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+
+ /* restart the eMMC */
+ err = mmc_hw_reset(card->host);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: install error %d:\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+ } else {
+ /* set ext_csd to install mode */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_MODE_OPERATION_CODES, 0x1,
+ card->ext_csd.mode_op_codes_time);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d setting install mode\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mmc_ffu_invoke(struct mmc_card *card, const struct mmc_ffu_args *args)
+{
+ u8 *ext_csd = NULL;
+ int err;
+ u32 arg;
+ u32 fw_prog_bytes;
+ const struct firmware *fw;
+ const struct mmc_ffu_hack *hack;
+
+
+ /* Check if FFU is supported */
+ if (!card->ext_csd.ffu_capable) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error FFU is not supported %d rev %d\n",
+ mmc_hostname(card->host), card->ext_csd.ffu_capable,
+ card->ext_csd.rev);
+ return -EOPNOTSUPP;
+ }
+
+ if (strlen(args->name) > 512) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: name %.20s is too long.\n",
+ mmc_hostname(card->host), args->name);
+ return -EINVAL;
+ }
+
+ /* setup FW data buffer */
+ err = request_firmware(&fw, args->name, &card->dev);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: Firmware request failed %d\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+ if (fw->size % 512) {
+ dev_warn(mmc_dev(card->host),
+ "FFU: %s: Warning %zd firmware data size unaligned!\n",
+ mmc_hostname(card->host),
+ fw->size);
+ }
+
+ mmc_get_card(card);
+
+ /* trigger flushing*/
+ err = mmc_flush_cache(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d flushing data\n",
+ mmc_hostname(card->host), err);
+ goto exit;
+ }
+
+ /* set CMD ARG */
+ hack = mmc_get_hack(args, MMC_OVERRIDE_FFU_ARG);
+ if (hack == NULL) {
+ arg = card->ext_csd.ffu_arg;
+ } else {
+ arg = cpu_to_le32((u32)hack->value);
+ }
+
+ /* set device to FFU mode */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_MODE_CONFIG, 0x1,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d FFU is not supported\n",
+ mmc_hostname(card->host), err);
+ goto exit;
+ }
+
+ err = mmc_ffu_write(card, fw->data, arg, fw->size);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: write error %d\n",
+ mmc_hostname(card->host), err);
+ goto exit;
+ }
+ /* payload will be checked only in op_mode supported */
+ if (card->ext_csd.ffu_mode_op) {
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d sending ext_csd\n",
+ mmc_hostname(card->host), err);
+ goto exit;
+ }
+
+ /* check that the eMMC has received the payload */
+ fw_prog_bytes = ext_csd[EXT_CSD_NUM_OF_FW_SEC_PROG] |
+ ext_csd[EXT_CSD_NUM_OF_FW_SEC_PROG + 1] << 8 |
+ ext_csd[EXT_CSD_NUM_OF_FW_SEC_PROG + 2] << 16 |
+ ext_csd[EXT_CSD_NUM_OF_FW_SEC_PROG + 3] << 24;
+ kfree(ext_csd);
+ ext_csd = NULL;
+
+ fw_prog_bytes *= card->ext_csd.data_sector_size;
+ if (fw_prog_bytes != fw->size) {
+ err = -EINVAL;
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d: incorrect programmation\n",
+ __func__, err);
+ dev_err(mmc_dev(card->host),
+ "FFU: sectors written: %d, expected %zd\n",
+ fw_prog_bytes, fw->size);
+ goto exit;
+ }
+ }
+
+ err = mmc_ffu_install(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error firmware install %d\n",
+ mmc_hostname(card->host), err);
+ goto exit;
+ }
+
+ /* read ext_csd */
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: error %d sending ext_csd\n",
+ mmc_hostname(card->host), err);
+ return err;
+ }
+
+ /* return status */
+ err = ext_csd[EXT_CSD_FFU_STATUS];
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "FFU: %s: FFU status 0x%02x, expected 0\n",
+ mmc_hostname(card->host), err);
+ return -EINVAL;
+ }
+ kfree(ext_csd);
+
+exit:
+ if (err != 0) {
+ /*
+ * Host switch back to work in normal MMC
+ * Read/Write commands.
+ */
+ mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_MODE_CONFIG, 0,
+ card->ext_csd.generic_cmd6_time);
+ }
+ release_firmware(fw);
+ mmc_put_card(card);
+ return err;
+}
+EXPORT_SYMBOL(mmc_ffu_invoke);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c32602e2a49aad..fb974e7214f880 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -351,6 +351,9 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
struct device_node *np;
bool broken_hpi = false;
+ /* Reset partition, they will be rescanned. */
+ card->nr_parts = 0;
+
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
@@ -514,7 +517,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
if (!card->ext_csd.man_bkops_en)
- pr_info("%s: MAN_BKOPS_EN bit is not set\n",
+ pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
mmc_hostname(card->host));
}
@@ -598,6 +601,21 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.ffu_capable =
(ext_csd[EXT_CSD_SUPPORTED_MODE] & 0x1) &&
!(ext_csd[EXT_CSD_FW_CONFIG] & 0x1);
+ if (card->ext_csd.ffu_capable) {
+ card->ext_csd.ffu_arg = ext_csd[EXT_CSD_FFU_ARG] |
+ ext_csd[EXT_CSD_FFU_ARG + 1] << 8 |
+ ext_csd[EXT_CSD_FFU_ARG + 2] << 16 |
+ ext_csd[EXT_CSD_FFU_ARG + 3] << 24;
+
+ card->ext_csd.ffu_mode_op =
+ (ext_csd[EXT_CSD_FFU_FEATURES] & 0x1);
+ if (card->ext_csd.ffu_mode_op) {
+ u32 timeout =
+ ext_csd[EXT_CSD_OP_CODES_TIMEOUT];
+ card->ext_csd.mode_op_codes_time =
+ DIV_ROUND_UP(1 << timeout, 10);
+ }
+ }
}
out:
return err;
@@ -958,7 +976,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
break;
} else {
pr_warn("%s: switch to bus width %d failed\n",
- mmc_hostname(host), ext_csd_bits[idx]);
+ mmc_hostname(host), 1 << bus_width);
}
}
@@ -1511,12 +1529,25 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto err;
if (oldcard) {
- if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
+ /*
+ * When comparing the CID, we should exclude the product
+ * revision (Field PRV, offset 55:48), because it can change if
+ * the firmware is upgraded. The new CRC can then be different.
+ * Therefore we skip offset 55:48 and 7:0 (crc) in the
+ * comparison.
+ */
+ if ((cid[0] != oldcard->raw_cid[0]) ||
+ (cid[1] != oldcard->raw_cid[1]) ||
+ ((cid[2] & 0xFF00FFFF) !=
+ (oldcard->raw_cid[2] & 0xFF00FFFF)) ||
+ ((cid[3] & 0xFFFFFF00) !=
+ (oldcard->raw_cid[3] & 0xFFFFFF00))) {
err = -ENOENT;
goto err;
}
card = oldcard;
+ memcpy(card->raw_cid, cid, sizeof(cid));
} else {
/*
* Allocate card structure.
@@ -1550,21 +1581,20 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
- if (!oldcard) {
- /*
- * Fetch CSD from card.
- */
- err = mmc_send_csd(card, card->raw_csd);
- if (err)
- goto free_card;
+ /*
+ * Fetch CSD from card.
+ */
+ err = mmc_send_csd(card, card->raw_csd);
+ if (err)
+ goto free_card;
- err = mmc_decode_csd(card);
- if (err)
- goto free_card;
- err = mmc_decode_cid(card);
- if (err)
- goto free_card;
- }
+ err = mmc_decode_csd(card);
+ if (err)
+ goto free_card;
+
+ err = mmc_decode_cid(card);
+ if (err)
+ goto free_card;
/*
* handling only for cards supporting DSR and hosts requesting
@@ -1582,12 +1612,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
}
- if (!oldcard) {
- /* Read extended CSD. */
- err = mmc_read_ext_csd(card);
- if (err)
- goto free_card;
+ /* Read extended CSD. */
+ err = mmc_read_ext_csd(card);
+ if (err)
+ goto free_card;
+ if (!oldcard) {
/* If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
@@ -1706,9 +1736,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
+ card->ext_csd.hpi_en = 0;
err = 0;
- } else
+ } else {
card->ext_csd.hpi_en = 1;
+ }
}
/*
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index d10538bb5e07ac..96f45caea10930 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -29,15 +29,18 @@ struct mmc_pwrseq_simple {
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
- int i;
struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
- int values[reset_gpios->ndescs];
- for (i = 0; i < reset_gpios->ndescs; i++)
- values[i] = value;
+ if (!IS_ERR(reset_gpios)) {
+ int i;
+ int values[reset_gpios->ndescs];
- gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
- values);
+ for (i = 0; i < reset_gpios->ndescs; i++)
+ values[i] = value;
+
+ gpiod_set_array_value_cansleep(
+ reset_gpios->ndescs, reset_gpios->desc, values);
+ }
}
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
@@ -79,7 +82,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
struct mmc_pwrseq_simple, pwrseq);
- gpiod_put_array(pwrseq->reset_gpios);
+ if (!IS_ERR(pwrseq->reset_gpios))
+ gpiod_put_array(pwrseq->reset_gpios);
if (!IS_ERR(pwrseq->ext_clk))
clk_put(pwrseq->ext_clk);
@@ -112,7 +116,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
}
pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(pwrseq->reset_gpios)) {
+ if (IS_ERR(pwrseq->reset_gpios) &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
+ PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
ret = PTR_ERR(pwrseq->reset_gpios);
goto clk_put;
}
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index bf62e429f7fcc1..98be9eb3184bb1 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1840,13 +1840,14 @@ static void atmci_tasklet_func(unsigned long priv)
}
atmci_request_end(host, host->mrq);
- state = STATE_IDLE;
+ goto unlock; /* atmci_request_end() sets host->state */
break;
}
} while (state != prev_state);
host->state = state;
+unlock:
spin_unlock(&host->lock);
}
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index aad3243a48fce3..e03ec74f3fb084 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1451,6 +1451,7 @@ static int mmc_spi_probe(struct spi_device *spi)
if (status != 0)
goto fail_add_host;
}
+ mmc_detect_change(mmc, 0);
dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",
dev_name(&mmc->class_dev),
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index b9958a123594a6..5bcf4f45f8b4ec 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -105,6 +105,7 @@ struct mmc_omap_slot {
unsigned int vdd;
u16 saved_con;
u16 bus_mode;
+ u16 power_mode;
unsigned int fclk_freq;
struct tasklet_struct cover_tasklet;
@@ -1156,7 +1157,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct mmc_omap_slot *slot = mmc_priv(mmc);
struct mmc_omap_host *host = slot->host;
int i, dsor;
- int clk_enabled;
+ int clk_enabled, init_stream;
mmc_omap_select_slot(slot, 0);
@@ -1166,6 +1167,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
slot->vdd = ios->vdd;
clk_enabled = 0;
+ init_stream = 0;
switch (ios->power_mode) {
case MMC_POWER_OFF:
mmc_omap_set_power(slot, 0, ios->vdd);
@@ -1173,13 +1175,17 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_UP:
/* Cannot touch dsor yet, just power up MMC */
mmc_omap_set_power(slot, 1, ios->vdd);
+ slot->power_mode = ios->power_mode;
goto exit;
case MMC_POWER_ON:
mmc_omap_fclk_enable(host, 1);
clk_enabled = 1;
dsor |= 1 << 11;
+ if (slot->power_mode != MMC_POWER_ON)
+ init_stream = 1;
break;
}
+ slot->power_mode = ios->power_mode;
if (slot->bus_mode != ios->bus_mode) {
if (slot->pdata->set_bus_mode != NULL)
@@ -1195,7 +1201,7 @@ static void mmc_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
for (i = 0; i < 2; i++)
OMAP_MMC_WRITE(host, CON, dsor);
slot->saved_con = dsor;
- if (ios->power_mode == MMC_POWER_ON) {
+ if (init_stream) {
/* worst case at 400kHz, 80 cycles makes 200 microsecs */
int usecs = 250;
@@ -1233,6 +1239,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)
slot->host = host;
slot->mmc = mmc;
slot->id = id;
+ slot->power_mode = MMC_POWER_UNDEFINED;
slot->pdata = &host->pdata->slots[id];
host->slots[id] = slot;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 6b814d7d656043..af937d3e8c3e81 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2117,7 +2117,6 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
mmc->max_blk_size = 512; /* Block Length at max can be 1024 */
mmc->max_blk_count = 0xFFFF; /* No. of Blocks is 16 bits */
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
- mmc->max_seg_size = mmc->max_req_size;
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_ERASE;
@@ -2174,6 +2173,17 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
+ /*
+ * Limit the maximum segment size to the lower of the request size
+ * and the DMA engine device segment size limits. In reality, with
+ * 32-bit transfers, the DMA engine can do longer segments than this
+ * but there is no way to represent that in the DMA model - if we
+ * increase this figure here, we get warnings from the DMA API debug.
+ */
+ mmc->max_seg_size = min3(mmc->max_req_size,
+ dma_get_max_seg_size(host->rx_chan->device->dev),
+ dma_get_max_seg_size(host->tx_chan->device->dev));
+
/* Request IRQ for MMC operations */
ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,
mmc_hostname(mmc), host);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 72bbb12fb93802..1d57c12b191c08 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -181,7 +181,7 @@ static void pxamci_dma_irq(void *param);
static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
{
struct dma_async_tx_descriptor *tx;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct dma_slave_config config;
struct dma_chan *chan;
unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 8d838779fd1bcd..6f45eabfbbe7e4 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -412,7 +412,7 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)
val = readl(host->ioaddr + ESDHC_MIX_CTRL);
else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING)
/* the std tuning bits is in ACMD12_ERR for imx6sl */
- val = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ val = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
}
if (val & ESDHC_MIX_CTRL_EXE_TUNE)
@@ -474,7 +474,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
writel(new_val , host->ioaddr + ESDHC_MIX_CTRL);
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
- u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR);
+ u32 v = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL);
u32 tuning_ctrl;
if (val & SDHCI_CTRL_TUNED_CLK) {
@@ -496,7 +496,7 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
v &= ~ESDHC_MIX_CTRL_EXE_TUNE;
}
- writel(v, host->ioaddr + SDHCI_ACMD12_ERR);
+ writel(v, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
writel(m, host->ioaddr + ESDHC_MIX_CTRL);
}
return;
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index ffd448149796a2..4a2ae06d0da4da 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -217,7 +217,10 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
iproc_host->data = iproc_data;
- mmc_of_parse(host->mmc);
+ ret = mmc_of_parse(host->mmc);
+ if (ret)
+ goto err;
+
sdhci_get_of_property(pdev);
/* Enable EMMC 1/8V DDR capable */
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index d48f03104b5be3..e417e4274d66a3 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -334,6 +334,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
break;
case PCI_DEVICE_ID_O2_SEABIRD0:
+ if (chip->pdev->revision == 0x01)
+ chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
+ /* fall through */
case PCI_DEVICE_ID_O2_SEABIRD1:
/* UnLock WP */
ret = pci_read_config_byte(chip->pdev,
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index df50a1e9b92bc0..9119c0dd0bab60 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -80,40 +80,40 @@ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
static void sdhci_dumpregs(struct sdhci_host *host)
{
- pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s) ===========\n",
mmc_hostname(host->mmc));
- pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
- pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
- pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
- pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
- pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
- pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
- sdhci_readw(host, SDHCI_ACMD12_ERR),
+ pr_err(DRIVER_NAME ": ACmd stat: 0x%08x | Slot int: 0x%08x\n",
+ sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
- pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ pr_err(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
@@ -687,6 +687,11 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)
else
host->ier = (host->ier & ~dma_irqs) | pio_irqs;
+ if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
+ host->ier |= SDHCI_INT_AUTO_CMD_ERR;
+ else
+ host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
+
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}
@@ -889,12 +894,24 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
static void sdhci_finish_data(struct sdhci_host *host)
{
+ struct mmc_command *data_cmd = host->data_cmd;
struct mmc_data *data;
BUG_ON(!host->data);
data = host->data;
host->data = NULL;
+ host->data_cmd = NULL;
+
+ /*
+ * The controller needs a reset of internal state machines upon error
+ * conditions.
+ */
+ if (data->error) {
+ if (!host->cmd || host->cmd == data_cmd)
+ sdhci_do_reset(host, SDHCI_RESET_CMD);
+ sdhci_do_reset(host, SDHCI_RESET_DATA);
+ }
if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
(SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
@@ -919,19 +936,9 @@ static void sdhci_finish_data(struct sdhci_host *host)
*/
if (data->stop &&
(data->error ||
- !host->mrq->sbc)) {
-
- /*
- * The controller needs a reset of internal state machines
- * upon error conditions.
- */
- if (data->error) {
- sdhci_do_reset(host, SDHCI_RESET_CMD);
- sdhci_do_reset(host, SDHCI_RESET_DATA);
- }
-
+ !host->mrq->sbc))
sdhci_send_command(host, data->stop);
- } else
+ else
tasklet_schedule(&host->finish_tasklet);
}
@@ -980,6 +987,10 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
host->cmd = cmd;
host->busy_handle = 0;
+ if (cmd->data || cmd->flags & MMC_RSP_BUSY) {
+ WARN_ON(host->data_cmd);
+ host->data_cmd = cmd;
+ }
sdhci_prepare_data(host, cmd);
@@ -2216,8 +2227,7 @@ static void sdhci_tasklet_finish(unsigned long param)
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
((mrq->cmd && mrq->cmd->error) ||
(mrq->sbc && mrq->sbc->error) ||
- (mrq->data && ((mrq->data->error && !mrq->data->stop) ||
- (mrq->data->stop && mrq->data->stop->error))) ||
+ (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
/* Some controllers need this kick or reset won't work here */
@@ -2234,6 +2244,7 @@ static void sdhci_tasklet_finish(unsigned long param)
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
+ host->data_cmd = NULL;
#ifndef SDHCI_USE_LEDS_CLASS
sdhci_deactivate_led(host);
@@ -2287,6 +2298,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
{
BUG_ON(intmask == 0);
+ /* Handle auto-CMD12 error */
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
+ struct mmc_request *mrq = host->data_cmd->mrq;
+ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
+ int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
+ SDHCI_INT_DATA_TIMEOUT :
+ SDHCI_INT_DATA_CRC;
+
+ /* Treat auto-CMD12 error the same as data error */
+ if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
+ *mask |= data_err_bit;
+ return;
+ }
+ }
+
if (!host->cmd) {
pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
@@ -2301,20 +2327,12 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
else
host->cmd->error = -EILSEQ;
- /*
- * If this command initiates a data phase and a response
- * CRC error is signalled, the card can start transferring
- * data - the card may have received the command without
- * error. We must not terminate the mmc_request early.
- *
- * If the card did not receive the command or returned an
- * error which prevented it sending data, the data phase
- * will time out.
- */
+ /* Treat data command CRC error the same as data CRC error */
if (host->cmd->data &&
(intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
SDHCI_INT_CRC) {
host->cmd = NULL;
+ *mask |= SDHCI_INT_DATA_CRC;
return;
}
@@ -2350,6 +2368,21 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
*mask &= ~SDHCI_INT_DATA_END;
}
+ /* Handle auto-CMD23 error */
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+ struct mmc_request *mrq = host->cmd->mrq;
+ u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
+ int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
+ -ETIMEDOUT :
+ -EILSEQ;
+
+ if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ mrq->sbc->error = err;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+ }
+
if (intmask & SDHCI_INT_RESPONSE)
sdhci_finish_command(host);
}
@@ -2404,14 +2437,19 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (!host->data) {
+ struct mmc_command *data_cmd = host->data_cmd;
+
+ if (data_cmd)
+ host->data_cmd = NULL;
+
/*
* The "data complete" interrupt is also used to
* indicate that a busy state has ended. See comment
* above in sdhci_cmd_irq().
*/
- if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
- host->cmd->error = -ETIMEDOUT;
+ data_cmd->error = -ETIMEDOUT;
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -2486,7 +2524,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
if (intmask & SDHCI_INT_DATA_END) {
- if (host->cmd) {
+ if (host->cmd == host->data_cmd) {
/*
* Data managed to finish before the
* command completed. Make sure we do
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 48d96808c7063b..59bff6825f3d76 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -138,14 +138,15 @@
#define SDHCI_INT_DATA_CRC 0x00200000
#define SDHCI_INT_DATA_END_BIT 0x00400000
#define SDHCI_INT_BUS_POWER 0x00800000
-#define SDHCI_INT_ACMD12ERR 0x01000000
+#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
- SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+ SDHCI_INT_AUTO_CMD_ERR)
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
@@ -153,7 +154,11 @@
SDHCI_INT_BLK_GAP)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
-#define SDHCI_ACMD12_ERR 0x3C
+#define SDHCI_AUTO_CMD_STATUS 0x3C
+#define SDHCI_AUTO_CMD_TIMEOUT 0x00000002
+#define SDHCI_AUTO_CMD_CRC 0x00000004
+#define SDHCI_AUTO_CMD_END_BIT 0x00000008
+#define SDHCI_AUTO_CMD_INDEX 0x00000010
#define SDHCI_HOST_CONTROL2 0x3E
#define SDHCI_CTRL_UHS_MASK 0x0007
@@ -470,6 +475,7 @@ struct sdhci_host {
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
+ struct mmc_command *data_cmd; /* Current data command */
struct mmc_data *data; /* Current data request */
unsigned int data_early:1; /* Data finished before cmd */
unsigned int busy_handle:1; /* Handling the order of Busy-end */
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index a10fde40b6c3dd..3c7c3a1c8f4fa5 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -716,7 +716,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return IRQ_HANDLED;
+ return IRQ_NONE;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask;
@@ -730,7 +730,7 @@ irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(ireg);
}
EXPORT_SYMBOL(tmio_mmc_sdio_irq);
@@ -747,9 +747,7 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- tmio_mmc_sdio_irq(irq, devid);
-
- return IRQ_HANDLED;
+ return tmio_mmc_sdio_irq(irq, devid);
}
EXPORT_SYMBOL(tmio_mmc_irq);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 31448a2b39ae80..fb5a3052f1441d 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -42,7 +42,7 @@
#define AMD_BOOTLOC_BUG
#define FORCE_WORD_WRITE 0
-#define MAX_WORD_RETRIES 3
+#define MAX_RETRIES 3
#define SST49LF004B 0x0060
#define SST49LF040B 0x0050
@@ -1645,7 +1645,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
@@ -1878,7 +1878,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
if (time_after(jiffies, timeo) && !chip_ready(map, adr))
break;
- if (chip_ready(map, adr)) {
+ if (chip_good(map, adr, datum)) {
xip_enable(map, chip, adr);
goto op_done;
}
@@ -2104,7 +2104,7 @@ retry:
map_write(map, CMD(0xF0), chip->start);
/* FIXME - should have reset delay before continuing */
- if (++retry_cnt <= MAX_WORD_RETRIES)
+ if (++retry_cnt <= MAX_RETRIES)
goto retry;
ret = -EIO;
@@ -2239,6 +2239,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
unsigned long int adr;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ int retry_cnt = 0;
adr = cfi->addr_unlock1;
@@ -2256,6 +2257,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
ENABLE_VPP(map);
xip_disable(map, chip, adr);
+ retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2292,12 +2294,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr))
+ if (chip_good(map, adr, map_word_ff(map)))
break;
if (time_after(jiffies, timeo)) {
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__ );
+ ret = -EIO;
break;
}
@@ -2305,12 +2308,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- ret = -EIO;
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
}
chip->state = FL_READY;
@@ -2329,6 +2335,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
unsigned long timeo = jiffies + HZ;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
+ int retry_cnt = 0;
adr += chip->start;
@@ -2346,6 +2353,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
ENABLE_VPP(map);
xip_disable(map, chip, adr);
+ retry:
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
@@ -2382,7 +2390,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->erase_suspended = 0;
}
- if (chip_ready(map, adr)) {
+ if (chip_good(map, adr, map_word_ff(map))) {
xip_enable(map, chip, adr);
break;
}
@@ -2391,6 +2399,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n",
__func__ );
+ ret = -EIO;
break;
}
@@ -2398,12 +2407,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
UDELAY(map, chip, adr, 1000000/HZ);
}
/* Did we succeed? */
- if (!chip_good(map, adr, map_word_ff(map))) {
+ if (ret) {
/* reset on all failures. */
map_write( map, CMD(0xF0), chip->start );
/* FIXME - should have reset delay before continuing */
- ret = -EIO;
+ if (++retry_cnt <= MAX_RETRIES) {
+ ret = 0;
+ goto retry;
+ }
}
chip->state = FL_READY;
@@ -2533,7 +2545,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct ppb_lock {
struct flchip *chip;
- loff_t offset;
+ unsigned long adr;
int locked;
};
@@ -2551,8 +2563,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
unsigned long timeo;
int ret;
+ adr += chip->start;
mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@@ -2570,8 +2583,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
- map_write(map, CMD(0xA0), chip->start + adr);
- map_write(map, CMD(0x00), chip->start + adr);
+ map_write(map, CMD(0xA0), adr);
+ map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
@@ -2609,7 +2622,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
- put_chip(map, chip, adr + chip->start);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@@ -2666,9 +2679,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
- if ((adr < ofs) || (adr >= (ofs + len))) {
+ if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
- sect[sectors].offset = offset;
+ sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2682,6 +2695,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
i++;
if (adr >> cfi->chipshift) {
+ if (offset >= (ofs + len))
+ break;
adr = 0;
chipnum++;
@@ -2712,7 +2727,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
- do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index f73c41697a00e3..5ab9a46daf069c 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -208,7 +208,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS
+ select BCH_CONST_PARAMS if !MTD_NAND_BCH
select BITREVERSE
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
diff --git a/drivers/mtd/maps/solutionengine.c b/drivers/mtd/maps/solutionengine.c
index bb580bc164459a..c07f21b2046325 100644
--- a/drivers/mtd/maps/solutionengine.c
+++ b/drivers/mtd/maps/solutionengine.c
@@ -59,9 +59,9 @@ static int __init init_soleng_maps(void)
return -ENXIO;
}
}
- printk(KERN_NOTICE "Solution Engine: Flash at 0x%08lx, EPROM at 0x%08lx\n",
- soleng_flash_map.phys & 0x1fffffff,
- soleng_eprom_map.phys & 0x1fffffff);
+ printk(KERN_NOTICE "Solution Engine: Flash at 0x%pap, EPROM at 0x%pap\n",
+ &soleng_flash_map.phys,
+ &soleng_eprom_map.phys);
flash_mtd->owner = THIS_MODULE;
eprom_mtd = do_map_probe("map_rom", &soleng_eprom_map);
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 6d19835b80a952..0d244dac1ccb3c 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -160,8 +160,12 @@ static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
pr_debug("MTD_read\n");
- if (*ppos + count > mtd->size)
- count = mtd->size - *ppos;
+ if (*ppos + count > mtd->size) {
+ if (*ppos < mtd->size)
+ count = mtd->size - *ppos;
+ else
+ count = 0;
+ }
if (!count)
return 0;
@@ -246,7 +250,7 @@ static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t c
pr_debug("MTD_write\n");
- if (*ppos == mtd->size)
+ if (*ppos >= mtd->size)
return -ENOSPC;
if (*ppos + count > mtd->size)
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 5e3fa586103916..2c0bbaed360920 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -449,9 +449,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
case NAND_CMD_READID:
case NAND_CMD_PARAM: {
+ /*
+ * For READID, read 8 bytes that are currently used.
+ * For PARAM, read all 3 copies of 256-bytes pages.
+ */
+ int len = 8;
int timing = IFC_FIR_OP_RB;
- if (command == NAND_CMD_PARAM)
+ if (command == NAND_CMD_PARAM) {
timing = IFC_FIR_OP_RBCD;
+ len = 256 * 3;
+ }
ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
(IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
@@ -461,12 +468,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
&ifc->ifc_nand.nand_fcr0);
ifc_out32(column, &ifc->ifc_nand.row3);
- /*
- * although currently it's 8 bytes for READID, we always read
- * the maximum 256 bytes(for PARAM)
- */
- ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
- ifc_nand_ctrl->read_bytes = 256;
+ ifc_out32(len, &ifc->ifc_nand.nand_fbcr);
+ ifc_nand_ctrl->read_bytes = len;
set_addr(mtd, 0, 0, 0);
fsl_ifc_run_command(mtd);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index 43fa16b5f5107c..672c02e32a39e8 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -168,9 +168,10 @@ int gpmi_init(struct gpmi_nand_data *this)
/*
* Reset BCH here, too. We got failures otherwise :(
- * See later BCH reset for explanation of MX23 handling
+ * See later BCH reset for explanation of MX23 and MX28 handling
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
@@ -274,13 +275,11 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
- * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
- * On the other hand, the MX28 needs the reset, because one case has been
- * seen where the BCH produced ECC errors constantly after 10000
- * consecutive reboots. The latter case has not been seen on the MX23
- * yet, still we don't know if it could happen there as well.
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23
+ * and MX28.
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 136e73a3e07e53..53fe795fd716df 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -49,7 +49,7 @@
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1034,6 +1034,9 @@ static void preset_v2(struct mtd_info *mtd)
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index fd2154ca19ff4e..1183384a720cfb 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -868,6 +868,11 @@ static const struct flash_info spi_nor_ids[] = {
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
},
{
+ "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+ SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+ },
+ {
"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
@@ -890,6 +895,12 @@ static const struct flash_info spi_nor_ids[] = {
/* ISSI */
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
+ { "is25wp032", INFO(0x9d7016, 0, 64 * 1024, 64,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp064", INFO(0x9d7017, 0, 64 * 1024, 128,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+ { "is25wp128", INFO(0x9d7018, 0, 64 * 1024, 256,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* Macronix */
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index c1aaf0336cf2e3..5cde3ad1665e3f 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -175,6 +175,40 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
}
/**
+ * add_fastmap - add a Fastmap related physical eraseblock.
+ * @ai: attaching information
+ * @pnum: physical eraseblock number the VID header came from
+ * @vid_hdr: the volume identifier header
+ * @ec: erase counter of the physical eraseblock
+ *
+ * This function allocates a 'struct ubi_ainf_peb' object for a Fastamp
+ * physical eraseblock @pnum and adds it to the 'fastmap' list.
+ * Such blocks can be Fastmap super and data blocks from both the most
+ * recent Fastmap we're attaching from or from old Fastmaps which will
+ * be erased.
+ */
+static int add_fastmap(struct ubi_attach_info *ai, int pnum,
+ struct ubi_vid_hdr *vid_hdr, int ec)
+{
+ struct ubi_ainf_peb *aeb;
+
+ aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
+ if (!aeb)
+ return -ENOMEM;
+
+ aeb->pnum = pnum;
+ aeb->vol_id = be32_to_cpu(vidh->vol_id);
+ aeb->sqnum = be64_to_cpu(vidh->sqnum);
+ aeb->ec = ec;
+ list_add(&aeb->u.list, &ai->fastmap);
+
+ dbg_bld("add to fastmap list: PEB %d, vol_id %d, sqnum: %llu", pnum,
+ aeb->vol_id, aeb->sqnum);
+
+ return 0;
+}
+
+/**
* validate_vid_hdr - check volume identifier header.
* @ubi: UBI device description object
* @vid_hdr: the volume identifier header to check
@@ -803,13 +837,26 @@ out_unlock:
return err;
}
+static bool vol_ignored(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_LAYOUT_VOLUME_ID:
+ return true;
+ }
+
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ return ubi_is_fm_vol(vol_id);
+#else
+ return false;
+#endif
+}
+
/**
* scan_peb - scan and process UBI headers of a PEB.
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
- * @vid: The volume ID of the found volume will be stored in this pointer
- * @sqnum: The sqnum of the found volume will be stored in this pointer
+ * @fast: true if we're scanning for a Fastmap
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
@@ -817,9 +864,9 @@ out_unlock:
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int pnum, int *vid, unsigned long long *sqnum)
+ int pnum, bool fast)
{
- long long uninitialized_var(ec);
+ long long ec;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
@@ -935,6 +982,20 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
*/
ai->maybe_bad_peb_count += 1;
case UBI_IO_BAD_HDR:
+ /*
+ * If we're facing a bad VID header we have to drop *all*
+ * Fastmap data structures we find. The most recent Fastmap
+ * could be bad and therefore there is a chance that we attach
+ * from an old one. On a fine MTD stack a PEB must not render
+ * bad all of a sudden, but the reality is different.
+ * So, let's be paranoid and help finding the root cause by
+ * falling back to scanning mode instead of attaching with a
+ * bad EBA table and cause data corruption which is hard to
+ * analyze.
+ */
+ if (fast)
+ ai->force_full_scan = 1;
+
if (ec_err)
/*
* Both headers are corrupted. There is a possibility
@@ -991,21 +1052,15 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
vol_id = be32_to_cpu(vidh->vol_id);
- if (vid)
- *vid = vol_id;
- if (sqnum)
- *sqnum = be64_to_cpu(vidh->sqnum);
- if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
+ if (vol_id > UBI_MAX_VOLUMES && !vol_ignored(vol_id)) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
- if (vol_id != UBI_FM_SB_VOLUME_ID
- && vol_id != UBI_FM_DATA_VOLUME_ID) {
- ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
- vol_id, lnum);
- }
+ ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
+ vol_id, lnum);
+
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
@@ -1037,7 +1092,12 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
if (ec_err)
ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
pnum);
- err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
+ if (ubi_is_fm_vol(vol_id))
+ err = add_fastmap(ai, pnum, vidh, ec);
+ else
+ err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
+
if (err)
return err;
@@ -1186,6 +1246,10 @@ static void destroy_ai(struct ubi_attach_info *ai)
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
+ list_for_each_entry_safe(aeb, aeb_tmp, &ai->fastmap, u.list) {
+ list_del(&aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, aeb);
+ }
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
@@ -1245,7 +1309,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, ai, pnum, NULL, NULL);
+ err = scan_peb(ubi, ai, pnum, false);
if (err < 0)
goto out_vidh;
}
@@ -1311,6 +1375,7 @@ static struct ubi_attach_info *alloc_ai(void)
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
+ INIT_LIST_HEAD(&ai->fastmap);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
@@ -1337,52 +1402,58 @@ static struct ubi_attach_info *alloc_ai(void)
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info **ai)
{
- int err, pnum, fm_anchor = -1;
- unsigned long long max_sqnum = 0;
+ int err, pnum;
+ struct ubi_attach_info *scan_ai;
err = -ENOMEM;
+ scan_ai = alloc_ai();
+ if (!scan_ai)
+ goto out;
+
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
- goto out;
+ goto out_ai;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
- int vol_id = -1;
- unsigned long long sqnum = -1;
cond_resched();
dbg_gen("process PEB %d", pnum);
- err = scan_peb(ubi, *ai, pnum, &vol_id, &sqnum);
+ err = scan_peb(ubi, scan_ai, pnum, true);
if (err < 0)
goto out_vidh;
-
- if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
- max_sqnum = sqnum;
- fm_anchor = pnum;
- }
}
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
- if (fm_anchor < 0)
- return UBI_NO_FASTMAP;
+ if (scan_ai->force_full_scan)
+ err = UBI_NO_FASTMAP;
+ else
+ err = ubi_scan_fastmap(ubi, *ai, scan_ai);
- destroy_ai(*ai);
- *ai = alloc_ai();
- if (!*ai)
- return -ENOMEM;
+ if (err) {
+ /*
+ * Didn't attach via fastmap, do a full scan but reuse what
+ * we've aready scanned.
+ */
+ destroy_ai(*ai);
+ *ai = scan_ai;
+ } else
+ destroy_ai(scan_ai);
- return ubi_scan_fastmap(ubi, *ai, fm_anchor);
+ return err;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
+out_ai:
+ destroy_ai(scan_ai);
out:
return err;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a2e6c7848b0acd..c9f5ae424af758 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -1132,6 +1132,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
get_device(&ubi->dev);
+#ifdef CONFIG_MTD_UBI_FASTMAP
+ cancel_work_sync(&ubi->fm_work);
+#endif
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 4dd0391d294282..03cf0553ec1b3f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -350,6 +350,82 @@ out_unlock:
return err;
}
+#ifdef CONFIG_MTD_UBI_FASTMAP
+/**
+ * check_mapping - check and fixup a mapping
+ * @ubi: UBI device description object
+ * @vol: volume description object
+ * @lnum: logical eraseblock number
+ * @pnum: physical eraseblock number
+ *
+ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
+ * operations, if such an operation is interrupted the mapping still looks
+ * good, but upon first read an ECC is reported to the upper layer.
+ * Normaly during the full-scan at attach time this is fixed, for Fastmap
+ * we have to deal with it while reading.
+ * If the PEB behind a LEB shows this symthom we change the mapping to
+ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
+ *
+ * Returns 0 on success, negative error code in case of failure.
+ */
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ int err;
+ struct ubi_vid_hdr *vid_hdr;
+
+ if (!ubi->fast_attach)
+ return 0;
+
+ vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+ if (!vid_hdr)
+ return -ENOMEM;
+
+ err = ubi_io_read_vid_hdr(ubi, *pnum, vid_hdr, 0);
+ if (err > 0 && err != UBI_IO_BITFLIPS) {
+ int torture = 0;
+
+ switch (err) {
+ case UBI_IO_FF:
+ case UBI_IO_FF_BITFLIPS:
+ case UBI_IO_BAD_HDR:
+ case UBI_IO_BAD_HDR_EBADMSG:
+ break;
+ default:
+ ubi_assert(0);
+ }
+
+ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
+ torture = 1;
+
+ down_read(&ubi->fm_eba_sem);
+ vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
+ up_read(&ubi->fm_eba_sem);
+ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
+
+ *pnum = UBI_LEB_UNMAPPED;
+ } else if (err < 0) {
+ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
+ *pnum, err);
+
+ goto out_free;
+ }
+
+ err = 0;
+
+out_free:
+ ubi_free_vid_hdr(ubi, vid_hdr);
+
+ return err;
+}
+#else
+static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ int *pnum)
+{
+ return 0;
+}
+#endif
+
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
@@ -381,7 +457,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
pnum = vol->eba_tbl[lnum];
- if (pnum < 0) {
+ if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0)
+ goto out_unlock;
+ }
+
+ if (pnum == UBI_LEB_UNMAPPED) {
/*
* The logical eraseblock is not mapped, fill the whole buffer
* with 0xFF bytes. The exception is static volumes for which
@@ -697,6 +779,14 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
pnum = vol->eba_tbl[lnum];
if (pnum >= 0) {
+ err = check_mapping(ubi, vol, lnum, &pnum);
+ if (err < 0) {
+ leb_write_unlock(ubi, vol_id, lnum);
+ return err;
+ }
+ }
+
+ if (pnum >= 0) {
dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
len, offset, vol_id, lnum, pnum);
@@ -1088,6 +1178,8 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_volume *vol;
uint32_t crc;
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
@@ -1256,9 +1348,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl[lnum] == from);
- down_read(&ubi->fm_eba_sem);
vol->eba_tbl[lnum] = to;
- up_read(&ubi->fm_eba_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index ed62f1efe6ebb2..69dd21679a30c2 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -262,6 +262,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
+ ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
+
if (pool->used == pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
@@ -303,7 +305,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
- schedule_ubi_work(ubi, wrk);
+ __schedule_ubi_work(ubi, wrk);
return 0;
}
@@ -344,7 +346,7 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
- return schedule_erase(ubi, e, vol_id, lnum, torture);
+ return schedule_erase(ubi, e, vol_id, lnum, torture, true);
}
/**
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index bba7dd1b5ebf19..72e89b352034b3 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -326,6 +326,7 @@ static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
+ aeb->sqnum = new_aeb->sqnum;
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
/* new_aeb is older */
@@ -851,27 +852,57 @@ fail:
}
/**
+ * find_fm_anchor - find the most recent Fastmap superblock (anchor)
+ * @ai: UBI attach info to be filled
+ */
+static int find_fm_anchor(struct ubi_attach_info *ai)
+{
+ int ret = -1;
+ struct ubi_ainf_peb *aeb;
+ unsigned long long max_sqnum = 0;
+
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID && aeb->sqnum > max_sqnum) {
+ max_sqnum = aeb->sqnum;
+ ret = aeb->pnum;
+ }
+ }
+
+ return ret;
+}
+
+/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
- * @fm_anchor: The fastmap starts at this PEB
+ * @scan_ai: UBI attach info from the first 64 PEBs,
+ * used to find the most recent Fastmap data structure
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor)
+ struct ubi_attach_info *scan_ai)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
- int i, used_blocks, pnum, ret = 0;
+ struct ubi_ainf_peb *tmp_aeb, *aeb;
+ int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
+ fm_anchor = find_fm_anchor(scan_ai);
+ if (fm_anchor < 0)
+ return UBI_NO_FASTMAP;
+
+ /* Move all (possible) fastmap blocks into our new attach structure. */
+ list_for_each_entry_safe(aeb, tmp_aeb, &scan_ai->fastmap, u.list)
+ list_move_tail(&aeb->u.list, &ai->fastmap);
+
down_write(&ubi->fm_protect);
memset(ubi->fm_buf, 0, ubi->fm_size);
@@ -1484,22 +1515,30 @@ int ubi_update_fastmap(struct ubi_device *ubi)
struct ubi_wl_entry *tmp_e;
down_write(&ubi->fm_protect);
+ down_write(&ubi->work_sem);
+ down_write(&ubi->fm_eba_sem);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return -ENOMEM;
}
@@ -1608,16 +1647,14 @@ int ubi_update_fastmap(struct ubi_device *ubi)
new_fm->e[0] = tmp_e;
}
- down_write(&ubi->work_sem);
- down_write(&ubi->fm_eba_sem);
ret = ubi_write_fastmap(ubi, new_fm);
- up_write(&ubi->fm_eba_sem);
- up_write(&ubi->work_sem);
if (ret)
goto err;
out_unlock:
+ up_write(&ubi->fm_eba_sem);
+ up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
return ret;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index de1ea2e4c37d45..05d9ec66437c33 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -699,6 +699,8 @@ struct ubi_ainf_volume {
* @erase: list of physical eraseblocks which have to be erased
* @alien: list of physical eraseblocks which should not be used by UBI (e.g.,
* those belonging to "preserve"-compatible internal volumes)
+ * @fastmap: list of physical eraseblocks which relate to fastmap (e.g.,
+ * eraseblocks of the current and not yet erased old fastmap blocks)
* @corr_peb_count: count of PEBs in the @corr list
* @empty_peb_count: count of PEBs which are presumably empty (contain only
* 0xFF bytes)
@@ -709,6 +711,8 @@ struct ubi_ainf_volume {
* @vols_found: number of volumes found
* @highest_vol_id: highest volume ID
* @is_empty: flag indicating whether the MTD device is empty or not
+ * @force_full_scan: flag indicating whether we need to do a full scan and drop
+ all existing Fastmap data structures
* @min_ec: lowest erase counter value
* @max_ec: highest erase counter value
* @max_sqnum: highest sequence number value
@@ -727,6 +731,7 @@ struct ubi_attach_info {
struct list_head free;
struct list_head erase;
struct list_head alien;
+ struct list_head fastmap;
int corr_peb_count;
int empty_peb_count;
int alien_peb_count;
@@ -735,6 +740,7 @@ struct ubi_attach_info {
int vols_found;
int highest_vol_id;
int is_empty;
+ int force_full_scan;
int min_ec;
int max_ec;
unsigned long long max_sqnum;
@@ -907,7 +913,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
- int fm_anchor);
+ struct ubi_attach_info *scan_ai);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
#endif
@@ -1101,4 +1107,42 @@ static inline int idx2vol_id(const struct ubi_device *ubi, int idx)
return idx;
}
+/**
+ * ubi_is_fm_vol - check whether a volume ID is a Fastmap volume.
+ * @vol_id: volume ID
+ */
+static inline bool ubi_is_fm_vol(int vol_id)
+{
+ switch (vol_id) {
+ case UBI_FM_SB_VOLUME_ID:
+ case UBI_FM_DATA_VOLUME_ID:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ubi_find_fm_block - check whether a PEB is part of the current Fastmap.
+ * @ubi: UBI device description object
+ * @pnum: physical eraseblock to look for
+ *
+ * This function returns a wear leveling object if @pnum relates to the current
+ * fastmap, @NULL otherwise.
+ */
+static inline struct ubi_wl_entry *ubi_find_fm_block(const struct ubi_device *ubi,
+ int pnum)
+{
+ int i;
+
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++) {
+ if (ubi->fm->e[i]->pnum == pnum)
+ return ubi->fm->e[i];
+ }
+ }
+
+ return NULL;
+}
+
#endif /* !__UBI_UBI_H__ */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 75286588b82341..f4b3ce2b2bc3cf 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -580,7 +580,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* failure.
*/
static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
- int vol_id, int lnum, int torture)
+ int vol_id, int lnum, int torture, bool nested)
{
struct ubi_work *wl_wrk;
@@ -599,7 +599,10 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
- schedule_ubi_work(ubi, wl_wrk);
+ if (nested)
+ __schedule_ubi_work(ubi, wl_wrk);
+ else
+ schedule_ubi_work(ubi, wl_wrk);
return 0;
}
@@ -658,6 +661,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!vid_hdr)
return -ENOMEM;
+ down_read(&ubi->fm_eba_sem);
mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
ubi_assert(!ubi->move_from && !ubi->move_to);
@@ -884,6 +888,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
dbg_wl("done");
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
return 0;
/*
@@ -925,6 +930,7 @@ out_not_moved:
}
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
return 0;
out_error:
@@ -946,6 +952,7 @@ out_error:
out_ro:
ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
ubi_assert(err != 0);
return err < 0 ? err : -EIO;
@@ -953,6 +960,7 @@ out_cancel:
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
mutex_unlock(&ubi->move_mutex);
+ up_read(&ubi->fm_eba_sem);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
}
@@ -1075,7 +1083,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
int err1;
/* Re-schedule the LEB for erasure */
- err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
+ err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
if (err1) {
wl_entry_destroy(ubi, e);
err = err1;
@@ -1256,7 +1264,7 @@ retry:
}
spin_unlock(&ubi->wl_lock);
- err = schedule_erase(ubi, e, vol_id, lnum, torture);
+ err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
if (err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->used);
@@ -1479,6 +1487,7 @@ int ubi_thread(void *u)
}
dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
+ ubi->thread_enabled = 0;
return 0;
}
@@ -1488,9 +1497,6 @@ int ubi_thread(void *u)
*/
static void shutdown_work(struct ubi_device *ubi)
{
-#ifdef CONFIG_MTD_UBI_FASTMAP
- flush_work(&ubi->fm_work);
-#endif
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;
@@ -1503,6 +1509,46 @@ static void shutdown_work(struct ubi_device *ubi)
}
/**
+ * erase_aeb - erase a PEB given in UBI attach info PEB
+ * @ubi: UBI device description object
+ * @aeb: UBI attach info PEB
+ * @sync: If true, erase synchronously. Otherwise schedule for erasure
+ */
+static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
+{
+ struct ubi_wl_entry *e;
+ int err;
+
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pnum = aeb->pnum;
+ e->ec = aeb->ec;
+ ubi->lookuptbl[e->pnum] = e;
+
+ if (sync) {
+ err = sync_erase(ubi, e, false);
+ if (err)
+ goto out_free;
+
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+ } else {
+ err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
+ if (err)
+ goto out_free;
+ }
+
+ return 0;
+
+out_free:
+ wl_entry_destroy(ubi, e);
+
+ return err;
+}
+
+/**
* ubi_wl_init - initialize the WL sub-system using attaching information.
* @ubi: UBI device description object
* @ai: attaching information
@@ -1539,17 +1585,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
- goto out_free;
-
- e->pnum = aeb->pnum;
- e->ec = aeb->ec;
- ubi->lookuptbl[e->pnum] = e;
- if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
- wl_entry_destroy(ubi, e);
+ err = erase_aeb(ubi, aeb, false);
+ if (err)
goto out_free;
- }
found_pebs++;
}
@@ -1559,8 +1597,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1579,8 +1619,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
cond_resched();
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
- if (!e)
+ if (!e) {
+ err = -ENOMEM;
goto out_free;
+ }
e->pnum = aeb->pnum;
e->ec = aeb->ec;
@@ -1600,19 +1642,49 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
}
}
- dbg_wl("found %i PEBs", found_pebs);
+ list_for_each_entry(aeb, &ai->fastmap, u.list) {
+ cond_resched();
- if (ubi->fm) {
- ubi_assert(ubi->good_peb_count ==
- found_pebs + ubi->fm->used_blocks);
+ e = ubi_find_fm_block(ubi, aeb->pnum);
- for (i = 0; i < ubi->fm->used_blocks; i++) {
- e = ubi->fm->e[i];
+ if (e) {
+ ubi_assert(!ubi->lookuptbl[e->pnum]);
ubi->lookuptbl[e->pnum] = e;
+ } else {
+ bool sync = false;
+
+ /*
+ * Usually old Fastmap PEBs are scheduled for erasure
+ * and we don't have to care about them but if we face
+ * an power cut before scheduling them we need to
+ * take care of them here.
+ */
+ if (ubi->lookuptbl[aeb->pnum])
+ continue;
+
+ /*
+ * The fastmap update code might not find a free PEB for
+ * writing the fastmap anchor to and then reuses the
+ * current fastmap anchor PEB. When this PEB gets erased
+ * and a power cut happens before it is written again we
+ * must make sure that the fastmap attach code doesn't
+ * find any outdated fastmap anchors, hence we erase the
+ * outdated fastmap anchor PEBs synchronously here.
+ */
+ if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
+ sync = true;
+
+ err = erase_aeb(ubi, aeb, sync);
+ if (err)
+ goto out_free;
}
+
+ found_pebs++;
}
- else
- ubi_assert(ubi->good_peb_count == found_pebs);
+
+ dbg_wl("found %i PEBs", found_pebs);
+
+ ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
ubi_fastmap_init(ubi, &reserved_pebs);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 2e4649655181df..4e98e5aff7c594 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -284,8 +284,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
- if (rp)
- memcpy(&rcp2, rp, sizeof(rcp2));
+ if (rp) {
+ memset(&rcp2, 0, sizeof(rcp2));
+ rcp2.ip = rp->ip;
+ rcp2.at = rp->at;
+ rcp2.flags = rp->flags;
+ }
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 940e2ebbdea81e..399c627b15cc0a 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2011,6 +2011,9 @@ void bond_3ad_unbind_slave(struct slave *slave)
aggregator->aggregator_identifier);
/* Tell the partner that this port is not suitable for aggregation */
+ port->actor_oper_port_state &= ~AD_STATE_SYNCHRONIZATION;
+ port->actor_oper_port_state &= ~AD_STATE_COLLECTING;
+ port->actor_oper_port_state &= ~AD_STATE_DISTRIBUTING;
port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
__update_lacpdu_from_port(port);
ad_lacpdu_send(port);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 339118f3c7182b..a32dcb6718ca23 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -216,6 +216,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@@ -1106,11 +1107,11 @@ static void bond_compute_features(struct bonding *bond)
gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
}
+ bond_dev->hard_header_len = max_hard_header_len;
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
- bond_dev->hard_header_len = max_hard_header_len;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1237,6 +1238,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
@@ -1244,6 +1247,7 @@ static void bond_free_slave(struct slave *slave)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@@ -1265,39 +1269,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
+
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
/* enslave device <slave> to bond device <master> */
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 55e93b6b6d2150..66560a8fcfa2ff 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1115,6 +1115,7 @@ static int bond_option_primary_set(struct bonding *bond,
slave->dev->name);
rcu_assign_pointer(bond->primary_slave, slave);
strcpy(bond->params.primary, slave->dev->name);
+ bond->force_primary = true;
bond_select_active_slave(bond);
goto out;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index eab132778e6724..8b7c6425b681d2 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -423,6 +423,33 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
+{
+ struct can_priv *priv = netdev_priv(dev);
+
+ if (idx >= priv->echo_skb_max) {
+ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
+ __func__, idx, priv->echo_skb_max);
+ return NULL;
+ }
+
+ if (priv->echo_skb[idx]) {
+ /* Using "struct canfd_frame::len" for the frame
+ * length is supported on both CAN and CANFD frames.
+ */
+ struct sk_buff *skb = priv->echo_skb[idx];
+ struct canfd_frame *cf = (struct canfd_frame *)skb->data;
+ u8 len = cf->len;
+
+ *len_ptr = len;
+ priv->echo_skb[idx] = NULL;
+
+ return skb;
+ }
+
+ return NULL;
+}
+
/*
* Get the skb from the stack and loop it back locally
*
@@ -432,22 +459,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
*/
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
{
- struct can_priv *priv = netdev_priv(dev);
-
- BUG_ON(idx >= priv->echo_skb_max);
-
- if (priv->echo_skb[idx]) {
- struct sk_buff *skb = priv->echo_skb[idx];
- struct can_frame *cf = (struct can_frame *)skb->data;
- u8 dlc = cf->can_dlc;
+ struct sk_buff *skb;
+ u8 len;
- netif_rx(priv->echo_skb[idx]);
- priv->echo_skb[idx] = NULL;
+ skb = __can_get_echo_skb(dev, idx, &len);
+ if (!skb)
+ return 0;
- return dlc;
- }
+ netif_rx(skb);
- return 0;
+ return len;
}
EXPORT_SYMBOL_GPL(can_get_echo_skb);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4bff..2949a381a94dce 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
+ if (!cdm) {
+ of_node_put(np_cdm);
+ dev_err(&ofdev->dev, "can't map clock node!\n");
+ return 0;
+ }
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index bc46be39549d2a..9d93492ddfcc4c 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -24,6 +24,9 @@
#define RCAR_CAN_DRV_NAME "rcar_can"
+#define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \
+ BIT(CLKR_CLKEXT))
+
/* Mailbox configuration:
* mailbox 60 - 63 - Rx FIFO mailboxes
* mailbox 56 - 59 - Tx FIFO mailboxes
@@ -789,7 +792,7 @@ static int rcar_can_probe(struct platform_device *pdev)
goto fail_clk;
}
- if (clock_select >= ARRAY_SIZE(clock_names)) {
+ if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) {
err = -EINVAL;
dev_err(&pdev->dev, "invalid CAN clock selected\n");
goto fail_clk;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 357c9e89fdf952..047348033e2763 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -1078,6 +1078,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
usb_free_urb(dev->intr_urb);
kfree(dev->intr_in_buffer);
+ kfree(dev->tx_msg_buffer);
}
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 51670b322409b6..700b98d9c25007 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
@@ -100,7 +103,7 @@ enum xcan_reg {
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -117,6 +120,7 @@ enum xcan_reg {
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
+ * @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
@@ -131,6 +135,7 @@ enum xcan_reg {
*/
struct xcan_priv {
struct can_priv can;
+ spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -158,6 +163,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+#define XCAN_CAP_WATERMARK 0x0001
+struct xcan_devtype_data {
+ unsigned int caps;
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -237,6 +247,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
+ /* reset clears FIFOs */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
return 0;
}
@@ -391,6 +405,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
+ unsigned long flags;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
@@ -438,6 +453,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
priv->tx_head++;
/* Write the Frame to Xilinx CAN TX FIFO */
@@ -453,10 +471,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stats->tx_bytes += cf->can_dlc;
}
+ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+ if (priv->tx_max > 1)
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
return NETDEV_TX_OK;
}
@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
}
/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev: Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+ return CAN_STATE_ERROR_PASSIVE;
+ else if (status & XCAN_SR_ERRWRN_MASK)
+ return CAN_STATE_ERROR_WARNING;
+ else
+ return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev: Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf: Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+ enum can_state new_state,
+ struct can_frame *cf)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+ u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+ priv->can.state = new_state;
+
+ if (cf) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (cf)
+ cf->data[1] = (rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (cf)
+ cf->data[1] |= (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ if (cf)
+ cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+ break;
+ default:
+ /* non-ERROR states are handled elsewhere */
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev: Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ enum can_state old_state = priv->can.state;
+ enum can_state new_state;
+
+ /* changing error state due to successful frame RX/TX can only
+ * occur from these states
+ */
+ if (old_state != CAN_STATE_ERROR_WARNING &&
+ old_state != CAN_STATE_ERROR_PASSIVE)
+ return;
+
+ new_state = xcan_current_error_state(ndev);
+
+ if (new_state != old_state) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+ if (skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+ }
+}
+
+/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
* @isr: interrupt status register value
@@ -543,16 +684,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- u32 err_status, status, txerr = 0, rxerr = 0;
+ u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
@@ -562,28 +699,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (rxerr > 127) ?
- CAN_ERR_CRTL_RX_PASSIVE :
- CAN_ERR_CRTL_TX_PASSIVE;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ } else {
+ enum can_state new_state = xcan_current_error_state(ndev);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@@ -599,7 +718,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -708,26 +826,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
- if (isr & XCAN_IXR_RXOK_MASK) {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXOK_MASK);
- work_done += xcan_rx(ndev);
- } else {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXNEMP_MASK);
- break;
- }
+ work_done += xcan_rx(ndev);
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- if (work_done)
+ if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
+ xcan_update_error_state_after_rxtx(ndev);
+ }
if (work_done < quota) {
napi_complete(napi);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ ier |= XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@@ -742,18 +854,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ unsigned int frames_in_fifo;
+ int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+ unsigned long flags;
+ int retries = 0;
+
+ /* Synchronize with xmit as we need to know the exact number
+ * of frames in the FIFO to stay in sync due to the TXFEMP
+ * handling.
+ * This also prevents a race between netif_wake_queue() and
+ * netif_stop_queue().
+ */
+ spin_lock_irqsave(&priv->tx_lock, flags);
- while ((priv->tx_head - priv->tx_tail > 0) &&
- (isr & XCAN_IXR_TXOK_MASK)) {
+ frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+ if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+ /* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return;
+ }
+
+ /* Check if 2 frames were sent (TXOK only means that at least 1
+ * frame was sent).
+ */
+ if (frames_in_fifo > 1) {
+ WARN_ON(frames_in_fifo > priv->tx_max);
+
+ /* Synchronize TXOK and isr so that after the loop:
+ * (1) isr variable is up-to-date at least up to TXOK clear
+ * time. This avoids us clearing a TXOK of a second frame
+ * but not noticing that the FIFO is now empty and thus
+ * marking only a single frame as sent.
+ * (2) No TXOK is left. Having one could mean leaving a
+ * stray TXOK as we might process the associated frame
+ * via TXFEMP handling as we read TXFEMP *after* TXOK
+ * clear to satisfy (1).
+ */
+ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
+
+ if (isr & XCAN_IXR_TXFEMP_MASK) {
+ /* nothing in FIFO anymore */
+ frames_sent = frames_in_fifo;
+ }
+ } else {
+ /* single frame in fifo, just clear TXOK */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ }
+
+ while (frames_sent--) {
can_get_echo_skb(ndev, priv->tx_tail %
priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- can_led_event(ndev, CAN_LED_EVENT_TX);
+
netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ xcan_update_error_state_after_rxtx(ndev);
}
/**
@@ -772,6 +937,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
+ u32 isr_errors;
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -790,18 +956,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */
- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
- XCAN_IXR_ARBLST_MASK));
+ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+ if (isr_errors) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ if (isr & XCAN_IXR_RXNEMP_MASK) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ ier &= ~XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@@ -1030,6 +1195,18 @@ static int __maybe_unused xcan_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume);
+static const struct xcan_devtype_data xcan_zynq_data = {
+ .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+ { .compatible = "xlnx,axi-can-1.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
/**
* xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure
@@ -1044,8 +1221,10 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
+ const struct of_device_id *of_id;
+ int caps = 0;
void __iomem *addr;
- int ret, rx_max, tx_max;
+ int ret, rx_max, tx_max, tx_fifo_depth;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1055,7 +1234,8 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+ &tx_fifo_depth);
if (ret < 0)
goto err;
@@ -1063,6 +1243,30 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0)
goto err;
+ of_id = of_match_device(xcan_of_match, &pdev->dev);
+ if (of_id) {
+ const struct xcan_devtype_data *devtype_data = of_id->data;
+
+ if (devtype_data)
+ caps = devtype_data->caps;
+ }
+
+ /* There is no way to directly figure out how many frames have been
+ * sent when the TXOK interrupt is processed. If watermark programming
+ * is supported, we can have 2 frames in the FIFO and use TXFEMP
+ * to determine if 1 or 2 frames have been sent.
+ * Theoretically we should be able to use TXFWMEMP to determine up
+ * to 3 frames, but it seems that after putting a second frame in the
+ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+ * sent), which is not a sensible state - possibly TXFWMEMP is not
+ * completely synchronized with the rest of the bits?
+ */
+ if (caps & XCAN_CAP_WATERMARK)
+ tx_max = min(tx_fifo_depth, 2);
+ else
+ tx_max = 1;
+
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
if (!ndev)
@@ -1077,6 +1281,7 @@ static int xcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
+ spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0);
@@ -1144,9 +1349,9 @@ static int xcan_probe(struct platform_device *pdev)
devm_can_led_init(ndev);
clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk);
- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq,
- priv->tx_max);
+ tx_fifo_depth, priv->tx_max);
return 0;
@@ -1182,14 +1387,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0;
}
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
- { .compatible = "xlnx,zynq-can-1.0", },
- { .compatible = "xlnx,axi-can-1.00.a", },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0527f485c3dc7c..973fcd442aea25 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -98,8 +98,7 @@ static int mv88e6060_switch_reset(struct dsa_switch *ds)
/* Reset the switch. */
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
GLOBAL_ATU_CONTROL_SWRESET |
- GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
- GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+ GLOBAL_ATU_CONTROL_LEARNDIS);
/* Wait up to one second for reset to complete. */
timeout = jiffies + 1 * HZ;
@@ -124,13 +123,10 @@ static int mv88e6060_setup_global(struct dsa_switch *ds)
*/
REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
- /* Enable automatic address learning, set the address
- * database size to 1024 entries, and set the default aging
- * time to 5 minutes.
+ /* Disable automatic address learning.
*/
REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
- GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
+ GLOBAL_ATU_CONTROL_LEARNDIS);
return 0;
}
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 2dea39b5cb0b6e..e2414f2d7ba9b3 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -712,7 +712,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
if (s->sizeof_stat == 8)
_mv88e6xxx_stats_read(ds, s->reg + 1, &high);
}
- value = (((u64)high) << 16) | low;
+ value = (((u64)high) << 32) | low;
return value;
}
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf02095..5c3ef9fc8207e3 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index b9283901136e97..0fdc9ad32a2ef8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -156,8 +156,6 @@ static void dayna_block_output(struct net_device *dev, int count,
#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
@@ -237,19 +235,26 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
{
- unsigned long outdata = 0xA5A0B5B0;
- unsigned long indata = 0x00000000;
+ u32 outdata = 0xA5A0B5B0;
+ u32 indata = 0;
+
/* Try writing 32 bits */
- memcpy_toio(membase, &outdata, 4);
- /* Now compare them */
- if (memcmp_withio(&outdata, membase, 4) == 0)
+ nubus_writel(outdata, membase);
+ /* Now read it back */
+ indata = nubus_readl(membase);
+ if (outdata == indata)
return ACCESS_32;
+
+ outdata = 0xC5C0D5D0;
+ indata = 0;
+
/* Write 16 bit output */
word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
+
return ACCESS_UNKNOWN;
}
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986ba32905a..0ae723f7534171 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
& 0xffff;
if (inuse) { /* Tx FIFO is not empty */
- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+ ready = max_t(int,
+ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
} else {
/* Check for buffered last packet */
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index fe644823ceaf95..bb51f124d8c7df 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -716,8 +716,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
priv->phy_iface);
- if (IS_ERR(phydev))
+ if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
+ phydev = NULL;
+ }
} else {
int ret;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 0038709fd317d8..ec59425fdbff2a 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7847638bdd224c..8914170fccfffd 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
prop = of_get_property(nd, "tpe-link-test?", NULL);
if (!prop)
- goto no_link_test;
+ goto node_put;
if (strcmp(prop, "true")) {
printk(KERN_NOTICE "SunLance: warning: overriding option "
@@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op,
"to ecd@skynet.be\n");
auxio_set_lte(AUXIO_LTE_ON);
}
+node_put:
+ of_node_put(nd);
no_link_test:
lp->auto_select = 1;
lp->tpe = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 4460580818665d..7a0ab4c44ee405 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -872,14 +872,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
if (pdata->tx_pause != pdata->phy.tx_pause) {
new_state = 1;
- pdata->hw_if.config_tx_flow_control(pdata);
pdata->tx_pause = pdata->phy.tx_pause;
+ pdata->hw_if.config_tx_flow_control(pdata);
}
if (pdata->rx_pause != pdata->phy.rx_pause) {
new_state = 1;
- pdata->hw_if.config_rx_flow_control(pdata);
pdata->rx_pause = pdata->phy.rx_pause;
+ pdata->hw_if.config_rx_flow_control(pdata);
}
/* Speed support */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 8b5988e210d55b..c08d34f618b970 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1683,6 +1683,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 2ff465848b6553..097a0bf592abc2 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1338,13 +1338,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct atl2_adapter *adapter;
- static int cards_found;
+ static int cards_found = 0;
unsigned long mmio_start;
int mmio_len;
int err;
- cards_found = 0;
-
err = pci_enable_device(pdev);
if (err)
return err;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 8b1929e9f698c4..ec5834087e4b48 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1063,7 +1063,8 @@ static int bcm_enet_open(struct net_device *dev)
val = enet_readl(priv, ENET_CTL_REG);
val |= ENET_CTL_ENABLE_MASK;
enet_writel(priv, val, ENET_CTL_REG);
- enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+ if (priv->dma_has_sram)
+ enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
enet_dmac_writel(priv, priv->dma_chan_en_mask,
ENETDMAC_CHANCFG, priv->rx_chan);
@@ -1787,7 +1788,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->mac_clk);
goto out;
}
- clk_prepare_enable(priv->mac_clk);
+ ret = clk_prepare_enable(priv->mac_clk);
+ if (ret)
+ goto out_put_clk_mac;
/* initialize default and fetch platform data */
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1819,9 +1822,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (IS_ERR(priv->phy_clk)) {
ret = PTR_ERR(priv->phy_clk);
priv->phy_clk = NULL;
- goto out_put_clk_mac;
+ goto out_disable_clk_mac;
}
- clk_prepare_enable(priv->phy_clk);
+ ret = clk_prepare_enable(priv->phy_clk);
+ if (ret)
+ goto out_put_clk_phy;
}
/* do minimal hardware init to be able to probe mii bus */
@@ -1921,13 +1926,16 @@ out_free_mdio:
out_uninit_hw:
/* turn off mdc clock */
enet_writel(priv, 0, ENET_MIISC_REG);
- if (priv->phy_clk) {
+ if (priv->phy_clk)
clk_disable_unprepare(priv->phy_clk);
+
+out_put_clk_phy:
+ if (priv->phy_clk)
clk_put(priv->phy_clk);
- }
-out_put_clk_mac:
+out_disable_clk_mac:
clk_disable_unprepare(priv->mac_clk);
+out_put_clk_mac:
clk_put(priv->mac_clk);
out:
free_netdev(dev);
@@ -2772,7 +2780,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
ret = PTR_ERR(priv->mac_clk);
goto out_unmap;
}
- clk_enable(priv->mac_clk);
+ ret = clk_prepare_enable(priv->mac_clk);
+ if (ret)
+ goto out_put_clk;
priv->rx_chan = 0;
priv->tx_chan = 1;
@@ -2793,7 +2803,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
ret = register_netdev(dev);
if (ret)
- goto out_put_clk;
+ goto out_disable_clk;
netif_carrier_off(dev);
platform_set_drvdata(pdev, dev);
@@ -2802,6 +2812,9 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
return 0;
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_clk);
+
out_put_clk:
clk_put(priv->mac_clk);
@@ -2833,6 +2846,9 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
+ clk_disable_unprepare(priv->mac_clk);
+ clk_put(priv->mac_clk);
+
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 06ef0f62554a91..57a77e4c8f9e0f 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -126,6 +126,10 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
reg = rxchk_readl(priv, RXCHK_CONTROL);
+ /* Clear L2 header checks, which would prevent BPDUs
+ * from being received.
+ */
+ reg &= ~RXCHK_L2_HDR_DIS;
if (priv->rx_chk_en)
reg |= RXCHK_EN;
else
@@ -400,7 +404,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- u32 reg;
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
wol->wolopts = priv->wolopts;
@@ -408,11 +411,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
if (!(priv->wolopts & WAKE_MAGICSECURE))
return;
- /* Return the programmed SecureOn password */
- reg = umac_readl(priv, UMAC_PSW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = umac_readl(priv, UMAC_PSW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
static int bcm_sysport_set_wol(struct net_device *dev,
@@ -428,13 +427,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
if (wol->wolopts & ~supported)
return -EINVAL;
- /* Program the SecureOn password */
- if (wol->wolopts & WAKE_MAGICSECURE) {
- umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_PSW_MS);
- umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_PSW_LS);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -850,14 +844,22 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
-
/* Clear the MagicPacket detection logic */
reg = umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -890,11 +892,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
- }
-
return IRQ_HANDLED;
}
@@ -1886,12 +1883,17 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
unsigned int timeout = 1000;
u32 reg;
- /* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
reg |= MPD_EN;
reg &= ~PSW_EN;
- if (priv->wolopts & WAKE_MAGICSECURE)
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_PSW_MS);
+ umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_PSW_LS);
reg |= PSW_EN;
+ }
umac_writel(priv, reg, UMAC_MPD_CTRL);
/* Make sure RBUF entered WoL mode as result */
@@ -1915,9 +1917,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 8ace6ecb5f79f1..e668b1ce582806 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -11,6 +11,7 @@
#ifndef __BCM_SYSPORT_H
#define __BCM_SYSPORT_H
+#include <linux/ethtool.h>
#include <linux/if_vlan.h>
/* Receive/transmit descriptor format */
@@ -682,6 +683,7 @@ struct bcm_sysport_priv {
unsigned int crc_fwd:1;
u16 rev;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
/* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index b5e64b02200cd5..2491cdc2535c6a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1634,6 +1634,7 @@ struct bnx2x {
struct link_vars link_vars;
u32 link_cnt;
struct bnx2x_link_report_data last_reported_link;
+ bool force_link_down;
struct mdio_if_info mdio;
@@ -2290,6 +2291,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
#define PMF_DMAE_C(bp) (BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
E1HVN_MAX)
+/* Following is the DMAE channel number allocation for the clients.
+ * MFW: OCBB/OCSD implementations use DMAE channels 14/15 respectively.
+ * Driver: 0-3 and 8-11 (for PF dmae operations)
+ * 4 and 12 (for stats requests)
+ */
+#define BNX2X_FW_DMAE_C 13 /* Channel for FW DMAE operations */
+
/* PCIE link and speed */
#define PCICFG_LINK_WIDTH 0x1f00000
#define PCICFG_LINK_WIDTH_SHIFT 20
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 949a82458a290f..ebc4518d598adc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1277,6 +1277,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
{
struct bnx2x_link_report_data cur_data;
+ if (bp->force_link_down) {
+ bp->link_vars.link_up = 0;
+ return;
+ }
+
/* reread mf_cfg */
if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
@@ -2840,6 +2845,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->pending_max = 0;
}
+ bp->force_link_down = false;
if (bp->port.pmf) {
rc = bnx2x_initial_phy_init(bp, load_mode);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index d84efcd34fac3d..c56b61dce2d152 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3360,14 +3360,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
}
return 0;
@@ -3481,7 +3485,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, false);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726f9..87534c6efd66ba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -594,7 +594,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
* slots for the highest priority.
*/
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
- NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+ NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
/* Mapping between the CREDIT_WEIGHT registers and actual client
* numbers
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 8ddb68a3fdb6dc..403fa8d98aa3a6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10222,6 +10222,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
bnx2x_nic_load(bp, LOAD_NORMAL);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index ff702a707a91a2..343e3366d751f9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -5931,6 +5931,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
+ rdata->dmae_cmd_id = BNX2X_FW_DMAE_C;
rdata->vxlan_dst_port = cpu_to_le16(start_params->vxlan_dst_port);
rdata->geneve_dst_port = cpu_to_le16(start_params->geneve_dst_port);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 9904d768a20a74..00bd7be8567940 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -330,6 +330,12 @@ normal_tx:
}
length >>= 9;
+ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+ skb->len);
+ i = 0;
+ goto tx_dma_error;
+ }
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
@@ -1343,8 +1349,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
if (likely(rc >= 0))
@@ -1362,7 +1371,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget)
break;
}
@@ -1404,8 +1413,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
napi_complete(napi);
@@ -4591,7 +4604,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_request_irq(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
}
@@ -4629,6 +4642,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
open_err:
bnxt_disable_napi(bp);
+
+open_err_irq:
bnxt_del_napi(bp);
open_err_free_mem:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index cef53f2d9854f4..ce20bc939b3853 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -185,6 +185,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MAC1 0x010
#define UMAC_MAX_FRAME_LEN 0x014
+#define UMAC_MODE 0x44
+#define MODE_LINK_STATUS (1 << 5)
+
#define UMAC_EEE_CTRL 0x064
#define EN_LPI_RX_PAUSE (1 << 0)
#define EN_LPI_TX_PFC (1 << 1)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index e96d1f95bb47cc..0565efad6e6ea6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -167,8 +167,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
struct fixed_phy_status *status)
{
- if (dev && dev->phydev && status)
- status->link = dev->phydev->link;
+ struct bcmgenet_priv *priv;
+ u32 reg;
+
+ if (dev && dev->phydev && status) {
+ priv = netdev_priv(dev);
+ reg = bcmgenet_umac_readl(priv, UMAC_MODE);
+ status->link = !!(reg & MODE_LINK_STATUS);
+ }
return 0;
}
@@ -485,7 +491,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
if (!compat)
return -ENOMEM;
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1325825d52252c..58102e96ac5cde 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -9278,6 +9278,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_restore_clk(tp);
+ /* Increase the core clock speed to fix tx timeout issue for 5762
+ * with 100Mbps link speed.
+ */
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ }
+
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
@@ -12370,6 +12379,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
{
struct tg3 *tp = netdev_priv(dev);
int i, irq_sync = 0, err = 0;
+ bool reset_phy = false;
if ((ering->rx_pending > tp->rx_std_ring_mask) ||
(ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
@@ -12401,7 +12411,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, false);
+ /* Reset PHY to avoid PHY lock up */
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ reset_phy = true;
+
+ err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
@@ -12435,6 +12451,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
{
struct tg3 *tp = netdev_priv(dev);
int err = 0;
+ bool reset_phy = false;
if (tp->link_config.autoneg == AUTONEG_ENABLE)
tg3_warn_mgmt_link_flap(tp);
@@ -12525,7 +12542,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
if (netif_running(dev)) {
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- err = tg3_restart_hw(tp, false);
+ /* Reset PHY to avoid PHY lock up */
+ if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
+ tg3_asic_rev(tp) == ASIC_REV_5719 ||
+ tg3_asic_rev(tp) == ASIC_REV_5720)
+ reset_phy = true;
+
+ err = tg3_restart_hw(tp, reset_phy);
if (!err)
tg3_netif_start(tp);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3dd72975..18672ad773fbf6 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -556,8 +556,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
ETH_GSTRING_LEN));
- memcpy(string, bnad_net_stats_strings[i],
- ETH_GSTRING_LEN);
+ strncpy(string, bnad_net_stats_strings[i],
+ ETH_GSTRING_LEN);
string += ETH_GSTRING_LEN;
}
bmap = bna_tx_rid_mask(&bnad->bna);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 8d54e7b41bbf20..085f77f273bacd 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -523,7 +523,7 @@ static int macb_halt_tx(struct macb *bp)
if (!(status & MACB_BIT(TGO)))
return 0;
- usleep_range(10, 250);
+ udelay(250);
} while (time_before(halt_time, timeout));
return -ETIMEDOUT;
@@ -2743,6 +2743,13 @@ static const struct macb_config at91sam9260_config = {
.init = macb_init,
};
+static const struct macb_config sama5d3macb_config = {
+ .caps = MACB_CAPS_SG_DISABLED
+ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@@ -2801,6 +2808,7 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 16baaafed26c31..cbdeb54eab51a1 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1090,6 +1090,9 @@ static void nic_remove(struct pci_dev *pdev)
{
struct nicpf *nic = pci_get_drvdata(pdev);
+ if (!nic)
+ return;
+
if (nic->flags & NIC_SRIOV_ENABLED)
pci_disable_sriov(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 8f7aa53a4c4bec..3dd4c39640dc47 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -50,6 +50,7 @@
#include <linux/stringify.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -2146,6 +2147,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_QSET_PARAMS)
+ return -EINVAL;
if (t.qset_idx >= SGE_QSETS)
return -EINVAL;
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
@@ -2245,6 +2248,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_QSET_PARAMS)
+ return -EINVAL;
+
/* Display qsets for all ports when offload enabled */
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
q1 = 0;
@@ -2256,6 +2262,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (t.qset_idx >= nqsets)
return -EINVAL;
+ t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
q = &adapter->params.sge.qset[q1 + t.qset_idx];
t.rspq_size = q->rspq_size;
@@ -2289,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&edata, useraddr, sizeof(edata)))
return -EFAULT;
+ if (edata.cmd != CHELSIO_SET_QSET_NUM)
+ return -EINVAL;
if (edata.val < 1 ||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
return -EINVAL;
@@ -2329,6 +2338,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EPERM;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_LOAD_FW)
+ return -EINVAL;
/* Check t.len sanity ? */
fw_data = memdup_user(useraddr + sizeof(t), t.len);
if (IS_ERR(fw_data))
@@ -2352,6 +2363,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SETMTUTAB)
+ return -EINVAL;
if (m.nmtus != NMTUS)
return -EINVAL;
if (m.mtus[0] < 81) /* accommodate SACK */
@@ -2393,6 +2406,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EBUSY;
if (copy_from_user(&m, useraddr, sizeof(m)))
return -EFAULT;
+ if (m.cmd != CHELSIO_SET_PM)
+ return -EINVAL;
if (!is_power_of_2(m.rx_pg_sz) ||
!is_power_of_2(m.tx_pg_sz))
return -EINVAL; /* not power of 2 */
@@ -2426,6 +2441,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EIO; /* need the memory controllers */
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_GET_MEM)
+ return -EINVAL;
if ((t.addr & 7) || (t.len & 7))
return -EINVAL;
if (t.mem_id == MEM_CM)
@@ -2478,6 +2495,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
return -EAGAIN;
if (copy_from_user(&t, useraddr, sizeof(t)))
return -EFAULT;
+ if (t.cmd != CHELSIO_SET_TRACE_FILTER)
+ return -EINVAL;
tp = (const struct trace_params *)&t.sip;
if (t.config_tx)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 090e0065060191..a3e1498ca67cef 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -338,7 +338,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
enable ? "set" : "unset", pi->port_id, i, -err);
else
- txq->dcb_prio = value;
+ txq->dcb_prio = enable ? value : 0;
}
}
#endif /* CONFIG_CHELSIO_T4_DCB */
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff2e..ec0b545197e2df 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
+ depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 3c677ed3c29e72..4d9014d5b36d93 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -78,7 +78,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
- enic_rfs_timer_start(enic);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -87,7 +86,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_rfs_timer_stop(enic);
spin_lock_bh(&enic->rfs_h.lock);
- enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
@@ -98,6 +96,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_delfltr(enic, n->fltr_id);
hlist_del(&n->node);
kfree(n);
+ enic->rfs_h.free++;
}
}
spin_unlock_bh(&enic->rfs_h.lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 0e3b2ebf87f18d..9ef4caa4b84d58 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1180,7 +1180,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
* CHECSUM_UNNECESSARY.
*/
if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
- ipv4_csum_ok)
+ (ipv4_csum_ok || ipv6))
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (vlan_stripped)
@@ -1760,7 +1760,7 @@ static int enic_open(struct net_device *netdev)
vnic_intr_unmask(&enic->intr[i]);
enic_notify_timer_start(enic);
- enic_rfs_flw_tbl_init(enic);
+ enic_rfs_timer_start(enic);
return 0;
@@ -1842,10 +1842,32 @@ static int enic_stop(struct net_device *netdev)
return 0;
}
+static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ bool running = netif_running(netdev);
+ int err = 0;
+
+ ASSERT_RTNL();
+ if (running) {
+ err = enic_stop(netdev);
+ if (err)
+ return err;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if (running) {
+ err = enic_open(netdev);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int enic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct enic *enic = netdev_priv(netdev);
- int running = netif_running(netdev);
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
@@ -1853,20 +1875,12 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
- if (running)
- enic_stop(netdev);
-
- netdev->mtu = new_mtu;
-
if (netdev->mtu > enic->port_mtu)
netdev_warn(netdev,
- "interface MTU (%d) set higher than port MTU (%d)\n",
- netdev->mtu, enic->port_mtu);
+ "interface MTU (%d) set higher than port MTU (%d)\n",
+ netdev->mtu, enic->port_mtu);
- if (running)
- enic_open(netdev);
-
- return 0;
+ return _enic_change_mtu(netdev, new_mtu);
}
static void enic_change_mtu_work(struct work_struct *work)
@@ -1874,47 +1888,9 @@ static void enic_change_mtu_work(struct work_struct *work)
struct enic *enic = container_of(work, struct enic, change_mtu_work);
struct net_device *netdev = enic->netdev;
int new_mtu = vnic_dev_mtu(enic->vdev);
- int err;
- unsigned int i;
-
- new_mtu = max_t(int, ENIC_MIN_MTU, min_t(int, ENIC_MAX_MTU, new_mtu));
rtnl_lock();
-
- /* Stop RQ */
- del_timer_sync(&enic->notify_timer);
-
- for (i = 0; i < enic->rq_count; i++)
- napi_disable(&enic->napi[i]);
-
- vnic_intr_mask(&enic->intr[0]);
- enic_synchronize_irqs(enic);
- err = vnic_rq_disable(&enic->rq[0]);
- if (err) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to disable RQ.\n");
- return;
- }
- vnic_rq_clean(&enic->rq[0], enic_free_rq_buf);
- vnic_cq_clean(&enic->cq[0]);
- vnic_intr_clean(&enic->intr[0]);
-
- /* Fill RQ with new_mtu-sized buffers */
- netdev->mtu = new_mtu;
- vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);
- /* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(&enic->rq[0]) == 0) {
- rtnl_unlock();
- netdev_err(netdev, "Unable to alloc receive buffers.\n");
- return;
- }
-
- /* Start RQ */
- vnic_rq_enable(&enic->rq[0]);
- napi_enable(&enic->napi[0]);
- vnic_intr_unmask(&enic->intr[0]);
- enic_notify_timer_start(enic);
-
+ (void)_enic_change_mtu(netdev, new_mtu);
rtnl_unlock();
netdev_info(netdev, "interface MTU set as %d\n", netdev->mtu);
@@ -2543,11 +2519,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 64-bit first, and
+ * limitation for the device. Try 47-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -2561,10 +2537,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 64);
+ "for consistent allocations, aborting\n", 47);
goto err_out_release_regions;
}
using_dac = 1;
@@ -2694,6 +2670,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_rfs_flw_tbl_init(enic);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
@@ -2706,7 +2683,6 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
enic->port_mtu = enic->config.mtu;
- (void)enic_change_mtu(netdev, enic->port_mtu);
err = enic_set_mac_addr(netdev, enic->mac_addr);
if (err) {
@@ -2755,6 +2731,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->mtu = enic->port_mtu;
err = register_netdev(netdev);
if (err) {
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index dce5f7b7f77292..05e1f923f49e8b 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -865,11 +865,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftmac100 *priv = netdev_priv(netdev);
- if (likely(netif_running(netdev))) {
- /* Disable interrupts for polling */
- ftmac100_disable_all_int(priv);
+ /* Disable interrupts for polling */
+ ftmac100_disable_all_int(priv);
+ if (likely(netif_running(netdev)))
napi_schedule(&priv->napi);
- }
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 650f7888e32be9..55ac000559774d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1888,6 +1888,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
u16 i, j;
u8 __iomem *bd;
+ netdev_reset_queue(ugeth->ndev);
+
ug_info = ugeth->ug_info;
uf_info = &ug_info->uf_info;
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 253f8ed0537a05..60c727b0b7ab26 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -919,10 +919,8 @@ static int hip04_mac_probe(struct platform_device *pdev)
}
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto alloc_fail;
- }
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index cec95ac8687df4..fe37fc7ec76ebe 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -171,10 +171,10 @@ struct hnae_desc_cb {
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
- u16 length; /* length of the buffer */
+ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 1a16c0307b475b..bd36fbe81ad2a0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -188,12 +188,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
int i;
- vf_cb->mac_cb = NULL;
-
- kfree(vf_cb);
-
for (i = 0; i < handle->q_num; i++)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
}
static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 37491c85bc422a..6ff13c559e5278 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -319,7 +319,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
}
hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_C45_READ, phy_id, devad);
}
/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3daf2d4a7ca057..884aa809baac51 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2636,7 +2636,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
/* Wait for link to drop */
time = jiffies + (HZ / 10);
do {
- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
if (!in_interrupt())
schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 2f9b12cf9ee5b7..70b3253e7ed5ef 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1163,11 +1163,15 @@ out:
map_failed_frags:
last = i+1;
- for (i = 0; i < last; i++)
+ for (i = 1; i < last; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
map_failed:
if (!firmware_has_feature(FW_FEATURE_CMO))
netdev_err(netdev, "tx: unable to map xmit buffer\n");
@@ -1234,7 +1238,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
struct iphdr *iph;
u16 mss = 0;
-restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1332,7 +1335,6 @@ restart_poll:
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- goto restart_poll;
}
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c7f27968..d70b2e5d52228d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -645,14 +645,14 @@ static int e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+ kfree(tx_old);
+ kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
@@ -665,7 +665,8 @@ err_setup_rx:
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
- e1000_up(adapter);
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
@@ -1825,11 +1826,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
- char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter);
- for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+ for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
+ char *p;
+
switch (stat->type) {
case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset;
@@ -1840,15 +1842,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i);
- break;
+ continue;
}
if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p;
else
data[i] = *(u32 *)p;
-
- stat++;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 2a1d4a9d3c19ae..1f84f2fa459fb1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -521,8 +521,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u32 rctl, tctl;
- netif_carrier_off(netdev);
-
/* disable receives in the hardware */
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
@@ -538,6 +536,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH();
msleep(10);
+ /* Set the carrier off after transmits have been disabled in the
+ * hardware, to avoid race conditions with e1000_watchdog() (which
+ * may be running concurrently to us, checking for the carrier
+ * bit to decide whether it should enable transmits again). Such
+ * a race condition would result into transmission being disabled
+ * in the hardware until the next IFF_DOWN+IFF_UP cycle.
+ */
+ netif_carrier_off(netdev);
+
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 25a0ad5102d633..855cf8c15c8a58 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -111,10 +111,14 @@ static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */
+ cycles = adapter->cc.read(&adapter->cc);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -170,9 +174,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f3f3b95d551247..97bf0c3d5c69e0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -223,17 +223,6 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
- /* Make sure the PHY is in a good state. Several people have reported
- * firmware leaving the PHY's page select register set to something
- * other than the default of zero, which causes the PHY ID read to
- * access something other than the intended register.
- */
- ret_val = hw->phy.ops.reset(hw);
- if (ret_val) {
- hw_dbg("Error resetting the PHY.\n");
- goto out;
- }
-
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 02b23f6277fbee..c1796aa2dde511 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7339,9 +7339,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
rtnl_unlock();
#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
+ if (!runtime) {
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+ }
#endif
status = rd32(E1000_STATUS);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1d2174526a4c90..18e4e4a69262fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -312,7 +312,7 @@ enum ixgbe_ring_f_enum {
};
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_RSS_INDICES_X550 64
+#define IXGBE_MAX_RSS_INDICES_X550 63
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
#define IXGBE_MAX_FCOE_INDICES 8
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 105dd00ddc1a3b..cd2afe92f1daf2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1814,7 +1814,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
if (enable_addr != 0)
rar_high |= IXGBE_RAH_AV;
+ /* Record lower 32 bits of MAC address and then make
+ * sure that write is flushed to hardware before writing
+ * the upper 16 bits and setting the valid bit.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
@@ -1846,8 +1851,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ /* Clear the address valid bit and upper 16 bits of the address
+ * before clearing the lower bits. This way we aren't updating
+ * a live filter.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 995f03107eacd0..04bc4df82fa7fc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3508,7 +3508,7 @@ struct ixgbe_info {
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV1 BIT(6)
+#define IXGBE_FUSES0_REV_MASK (3 << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index a75f2e3ce86fa4..dcd718ce13d58b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1429,7 +1429,9 @@ static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
*autoneg = false;
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return 0;
}
@@ -1873,10 +1875,6 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
u32 save_autoneg;
bool link_up;
- /* SW LPLU not required on later HW revisions. */
- if (IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))
- return 0;
-
/* If blocked by MNG FW, then don't restart AN */
if (ixgbe_check_reset_blocked(hw))
return 0;
@@ -2030,8 +2028,9 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
}
/* setup SW LPLU only for first revision */
- if (!(IXGBE_FUSES0_REV1 & IXGBE_READ_REG(hw,
- IXGBE_FUSES0_GROUP(0))))
+ if (hw->mac.type == ixgbe_mac_X550EM_x &&
+ !(IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)) &
+ IXGBE_FUSES0_REV_MASK))
phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em;
phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 50bbad37d640dc..723bda33472a7a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1014,6 +1014,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ if (budget <= 0)
+ return budget;
#ifdef CONFIG_NET_RX_BUSY_POLL
if (!ixgbevf_qv_lock_napi(q_vector))
return budget;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 4182290fdbcf10..82f080a5ed5c85 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2884,7 +2884,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
- return ret;
+ goto err_put_clk;
pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
@@ -2892,6 +2892,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
infer_hw_params(msp);
return 0;
+
+err_put_clk:
+ if (!IS_ERR(msp->clk))
+ clk_disable_unprepare(msp->clk);
+ return ret;
}
static int mv643xx_eth_shared_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ea693bbf56d84f..1c300259d70ae4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2569,7 +2569,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
}
mvneta_start_dev(pp);
- mvneta_port_up(pp);
netdev_update_features(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ac92685dd4e559..42305f3234ff29 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
+#include <linux/if_vlan.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -4268,7 +4269,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -5032,14 +5033,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -5051,7 +5053,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 7173836fe36196..c9f4b5412844d3 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
memset(p, 0, regs->len);
memcpy_fromio(p, io, B3_RAM_ADDR);
- memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
- regs->len - B3_RI_WTO_R1);
+ if (regs->len > B3_RI_WTO_R1) {
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+ }
}
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 6e5065f0907b1a..5cc05df69a869c 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -46,6 +46,7 @@
#include <linux/mii.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
@@ -93,7 +94,7 @@ static int copybreak __read_mostly = 128;
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
-static int disable_msi = 0;
+static int disable_msi = -1;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
@@ -4923,6 +4924,24 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
return buf;
}
+static const struct dmi_system_id msi_blacklist[] = {
+ {
+ .ident = "Dell Inspiron 1545",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+ },
+ },
+ {
+ .ident = "Gateway P-79",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+ },
+ },
+ {}
+};
+
static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev, *dev1;
@@ -5034,6 +5053,9 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_pci;
}
+ if (disable_msi == -1)
+ disable_msi = !!dmi_check_system(msi_blacklist);
+
if (!disable_msi && pci_enable_msi(pdev) == 0) {
err = sky2_test_msi(hw);
if (err) {
@@ -5079,7 +5101,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 200;
+ pdev->d3_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 0c51c69f802faf..a4912b11e54f59 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -339,7 +339,7 @@ void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc)
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
int align, u32 skip_mask, u32 *puid)
{
- u32 uid;
+ u32 uid = 0;
u32 res;
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
struct mlx4_zone_entry *curr_node;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fc222df47aa997..9e104dcfa9dd8f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2636,6 +2636,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev)
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
+ priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 4dccf7287f0f04..52e4ed2f639d8b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -251,8 +251,11 @@ static u32 freq_to_shift(u16 freq)
{
u32 freq_khz = freq * 1000;
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 tmp_rounded =
+ roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
+ roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
- max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ max_val_cycles : tmp_rounded;
/* calculate max possible multiplier in order to fit in 64bit */
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 82bf1b539d872f..ac7c64bae2a5bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -725,13 +725,27 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
return 0;
}
#endif
+
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
+ void *hdr;
+
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to skip
+ * checksum complete.
+ */
+ if (short_frame(skb->len))
+ return -EINVAL;
- void *hdr = (u8 *)va + sizeof(struct ethhdr);
-
+ hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -851,6 +865,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
if (likely(dev->features & NETIF_F_RXCSUM)) {
+ /* TODO: For IP non TCP/UDP packets when csum complete is
+ * not an option (not supported or any other reason) we can
+ * actually check cqe IPOK status bit and report
+ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
+ */
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index ff77b8b608bd0e..7417605c3cf6cd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -228,7 +228,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 90db94e83fdeef..033f99d2f15c7f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1906,9 +1906,11 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
{
struct mlx4_cmd_mailbox *mailbox;
__be32 *outbox;
+ u64 qword_field;
u32 dword_field;
- int err;
+ u16 word_field;
u8 byte_field;
+ int err;
static const u8 a0_dmfs_query_hw_steering[] = {
[0] = MLX4_STEERING_DMFS_A0_DEFAULT,
[1] = MLX4_STEERING_DMFS_A0_DYNAMIC,
@@ -1936,19 +1938,32 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* QPC/EEC/CQC/EQC/RDMARC attributes */
- MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
- MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
- MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
- MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
- MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
- MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
- MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
- MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
- MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
- MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
- MLX4_GET(param->num_sys_eqs, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
- MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
- MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+ MLX4_GET(qword_field, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ param->qpc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_QP_OFFSET);
+ param->log_num_qps = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ param->srqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ param->log_num_srqs = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ param->cqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ param->log_num_cqs = byte_field & 0x1f;
+ MLX4_GET(qword_field, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ param->altc_base = qword_field;
+ MLX4_GET(qword_field, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ param->auxc_base = qword_field;
+ MLX4_GET(qword_field, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ param->eqc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ param->log_num_eqs = byte_field & 0x1f;
+ MLX4_GET(word_field, outbox, INIT_HCA_NUM_SYS_EQS_OFFSET);
+ param->num_sys_eqs = word_field & 0xfff;
+ MLX4_GET(qword_field, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ param->rdmarc_base = qword_field & ~((u64)0x1f);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_RD_OFFSET);
+ param->log_rd_per_qp = byte_field & 0x7;
MLX4_GET(dword_field, outbox, INIT_HCA_FLAGS_OFFSET);
if (dword_field & (1 << INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN)) {
@@ -1967,22 +1982,21 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* steering attributes */
if (param->steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
- MLX4_GET(byte_field, outbox,
- INIT_HCA_FS_A0_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET);
+ param->log_mc_entry_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_LOG_TABLE_SZ_OFFSET);
+ param->log_mc_table_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_FS_A0_OFFSET);
param->dmfs_high_steer_mode =
a0_dmfs_query_hw_steering[(byte_field >> 6) & 3];
} else {
MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
- MLX4_GET(param->log_mc_entry_sz, outbox,
- INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
- MLX4_GET(param->log_mc_hash_sz, outbox,
- INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- MLX4_GET(param->log_mc_table_sz, outbox,
- INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ param->log_mc_entry_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ param->log_mc_hash_sz = byte_field & 0x1f;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+ param->log_mc_table_sz = byte_field & 0x1f;
}
/* CX3 is capable of extending CQEs/EQEs from 32 to 64 bytes */
@@ -2006,15 +2020,18 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev,
/* TPT attributes */
MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
- MLX4_GET(param->mw_enabled, outbox, INIT_HCA_TPT_MW_OFFSET);
- MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_TPT_MW_OFFSET);
+ param->mw_enabled = byte_field >> 7;
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ param->log_mpt_sz = byte_field & 0x3f;
MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
/* UAR attributes */
MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
- MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ MLX4_GET(byte_field, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+ param->log_uar_sz = byte_field & 0xf;
/* phv_check enable */
MLX4_GET(byte_field, outbox, INIT_HCA_CACHELINE_SZ_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index f5fdbd53d05232..db40387ffaf6ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -537,8 +537,8 @@ struct slave_list {
struct resource_allocator {
spinlock_t alloc_lock; /* protect quotas */
union {
- int res_reserved;
- int res_port_rsvd[MLX4_MAX_PORTS];
+ unsigned int res_reserved;
+ unsigned int res_port_rsvd[MLX4_MAX_PORTS];
};
union {
int res_free;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 93195191f45bf0..53833c06696fbf 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -366,6 +366,7 @@ int mlx4_mr_hw_write_mpt(struct mlx4_dev *dev, struct mlx4_mr *mmr,
container_of((void *)mpt_entry, struct mlx4_cmd_mailbox,
buf);
+ (*mpt_entry)->lkey = 0;
err = mlx4_SW2HW_MPT(dev, mailbox, key);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 62f1a3433a621e..d6d87dd8a28fa1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -386,11 +386,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
struct mlx4_qp *qp;
- spin_lock(&qp_table->lock);
+ spin_lock_irq(&qp_table->lock);
qp = __mlx4_qp_lookup(dev, qpn);
- spin_unlock(&qp_table->lock);
+ spin_unlock_irq(&qp_table->lock);
return qp;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index e3080fbd9d0057..37dfdb1329f4bc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2652,13 +2652,13 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+ int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
- total_pages =
- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
- page_shift);
+ tot = (total_mem + (page_offset << 6)) >> page_shift;
+ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
@@ -2891,7 +2891,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 16bd585365a852..9ac14df0ca3b29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -643,6 +643,7 @@ static void cmd_work_handler(struct work_struct *work)
struct semaphore *sem;
unsigned long flags;
int alloc_ret;
+ int cmd_mode;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -688,6 +689,7 @@ static void cmd_work_handler(struct work_struct *work)
set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ cmd_mode = cmd->mode;
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
@@ -695,7 +697,7 @@ static void cmd_work_handler(struct work_struct *work)
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
- if (cmd->mode == CMD_MODE_POLLING) {
+ if (cmd_mode == CMD_MODE_POLLING) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@@ -1126,7 +1128,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- char outlen_str[8];
+ char outlen_str[8] = {0};
int outlen;
void *ptr;
int err;
@@ -1141,8 +1143,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
if (copy_from_user(outlen_str, buf, count))
return -EFAULT;
- outlen_str[7] = 0;
-
err = sscanf(outlen_str, "%d", &outlen);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 22e72bf1ae4894..7a716733d9ca38 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -586,6 +586,8 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
struct mlx5e_tx_wqe *wqe, int bf_sz)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 7cc9df717323ed..7ee30131081706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev,
mlx5e_close_locked(dev);
priv->params.num_channels = count;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 90e876ecc720b2..26d25ecdca7e9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1186,7 +1186,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- ix = ix % priv->params.num_channels;
MLX5_SET(rqtc, rqtc, rq_num[i],
test_bit(MLX5E_STATE_OPENED, &priv->state) ?
priv->channel[ix]->rq.rqn :
@@ -1304,7 +1303,7 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
lro_timer_supported_periods[2]));
}
-static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
+static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1312,6 +1311,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
void *tirc;
int inlen;
int err;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1323,7 +1323,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ if (err)
+ break;
+ }
kvfree(in);
@@ -1870,8 +1874,10 @@ static int mlx5e_set_features(struct net_device *netdev,
mlx5e_close_locked(priv->netdev);
priv->params.lro_en = !!(features & NETIF_F_LRO);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP);
- mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP);
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err)
+ mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
+ err);
if (was_opened)
err = mlx5e_open_locked(priv->netdev);
@@ -1976,12 +1982,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
}
+void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+ int num_channels)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ indirection_rqt[i] = i % num_channels;
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int i;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2005,8 +2019,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++)
- priv->params.indirection_rqt[i] = i % num_channels;
+ mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419dbda6dc3..0798b4adb0394e 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev)
for (i = 0; i < SONIC_NUM_RRS; i++) {
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
while(i > 0) { /* free any that were mapped successfully */
i--;
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 7ccdb46c67645e..21e0af2620ee1a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -43,7 +43,7 @@
#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
/* ILT entry structure */
-#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
+#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
#define ILT_ENTRY_PHY_ADDR_SHIFT 0
#define ILT_ENTRY_VALID_MASK 0x1ULL
#define ILT_ENTRY_VALID_SHIFT 52
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 9cc9d62c1fec64..8b15a018d625cc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -177,6 +177,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
*/
do {
index = p_sb_attn->sb_index;
+ /* finish reading index before the loop condition */
+ dma_rmb();
attn_bits = le32_to_cpu(p_sb_attn->atten_bits);
attn_acks = le32_to_cpu(p_sb_attn->atten_ack);
} while (index != p_sb_attn->sb_index);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 174f7341c5c32c..b8ae6ed5c7baf2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -22,6 +22,7 @@
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/qed/qed_if.h>
+#include <linux/crash_dump.h>
#include "qed.h"
#include "qed_sp.h"
@@ -460,8 +461,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Fastpath interrupts */
for (j = 0; j < 64; j++) {
if ((0x2ULL << j) & status) {
- hwfn->simd_proto_handler[j].func(
- hwfn->simd_proto_handler[j].token);
+ struct qed_simd_fp_handler *p_handler =
+ &hwfn->simd_proto_handler[j];
+
+ if (p_handler->func)
+ p_handler->func(p_handler->token);
+ else
+ DP_NOTICE(hwfn,
+ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+ j, status);
+
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
@@ -634,6 +643,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+ if (is_kdump_kernel()) {
+ DP_INFO(cdev,
+ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+ cdev->int_params.in.min_msix_cnt);
+ cdev->int_params.in.num_vectors =
+ cdev->int_params.in.min_msix_cnt;
+ }
+
rc = qed_set_int_mode(cdev, false);
if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
@@ -1107,9 +1124,9 @@ static int qed_drain(struct qed_dev *cdev)
return -EBUSY;
}
rc = qed_mcp_drain(hwfn, ptt);
+ qed_ptt_release(hwfn, ptt);
if (rc)
return rc;
- qed_ptt_release(hwfn, ptt);
}
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 20d048cdcb8821..c898006abb3201 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -420,6 +420,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
+ p_link->link_up = 0;
}
/* Correct speed according to bandwidth allocation */
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index b09a6b80d10719..355c5fb802cd71 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev)
qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
- ql_write_nvram_reg(qdev, spir,
- ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
}
/*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbcc7..12cd8aef1881b6 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1802,7 +1802,8 @@ struct qlcnic_hardware_ops {
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2044,9 +2045,10 @@ static inline int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter,
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b4f3cb55605e30..7f7aea9758e7ab 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2132,7 +2132,8 @@ out:
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 331ae2c20f4039..c8e012b3f7e740 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af8e2b3a9..56a3bd9e37dcd7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4625f3..c29d0dbf25f11d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -269,13 +269,12 @@ static void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter,
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -302,7 +301,8 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -336,7 +336,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -351,7 +351,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -767,7 +767,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index ccbb04503b2766..b53a18e365c24e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
switch (data) {
case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73ea2c2588..f39ad0e6663777 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2388,26 +2388,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
return status;
}
-static netdev_features_t qlge_fix_features(struct net_device *ndev,
- netdev_features_t features)
-{
- int err;
-
- /* Update the behavior of vlan accel in the adapter */
- err = qlge_update_hw_vlan_features(ndev, features);
- if (err)
- return err;
-
- return features;
-}
-
static int qlge_set_features(struct net_device *ndev,
netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
+ int err;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
+ /* Update the behavior of vlan accel in the adapter */
+ err = qlge_update_hw_vlan_features(ndev, features);
+ if (err)
+ return err;
- if (changed & NETIF_F_HW_VLAN_CTAG_RX)
qlge_vlan_mode(ndev, features);
+ }
return 0;
}
@@ -4720,7 +4714,6 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_set_mac_address = qlge_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_tx_timeout = qlge_tx_timeout,
- .ndo_fix_features = qlge_fix_features,
.ndo_set_features = qlge_set_features,
.ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index c90ae4d4be7d9b..7886a8a5b55b35 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -635,7 +635,7 @@ qcaspi_netdev_open(struct net_device *dev)
return ret;
}
- netif_start_queue(qca->net_dev);
+ /* SPI thread takes care of TX queue */
return 0;
}
@@ -739,6 +739,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
+
+ if (qca->spi_thread)
+ wake_up_process(qca->spi_thread);
}
static int
@@ -865,22 +868,22 @@ qca_spi_probe(struct spi_device *spi)
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+ qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi->dev, "Invalid burst len: %d\n",
- qcaspi_burst_len);
+ dev_err(&spi->dev, "Invalid burst len: %d\n",
+ qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi->dev, "Invalid pluggable: %d\n",
- qcaspi_pluggable);
+ dev_err(&spi->dev, "Invalid pluggable: %d\n",
+ qcaspi_pluggable);
return -EINVAL;
}
@@ -941,8 +944,8 @@ qca_spi_probe(struct spi_device *spi)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi->dev, "Unable to register net device %s\n",
- qcaspi_devs->name);
+ dev_err(&spi->dev, "Unable to register net device %s\n",
+ qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d7426df0..9b588251f2a707 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -578,6 +578,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
struct cp_private *cp;
int handled = 0;
u16 status;
+ u16 mask;
if (unlikely(dev == NULL))
return IRQ_NONE;
@@ -585,6 +586,10 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
spin_lock(&cp->lock);
+ mask = cpr16(IntrMask);
+ if (!mask)
+ goto out_unlock;
+
status = cpr16(IntrStatus);
if (!status || (status == 0xFFFF))
goto out_unlock;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index d8a4394bbb7b1a..36326378087e67 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -324,6 +324,8 @@ enum cfg_version {
};
static const struct pci_device_id rtl8169_pci_tbl[] = {
+ { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 },
+ { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
@@ -759,7 +761,7 @@ struct rtl8169_tc_offsets {
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_PHY_PENDING,
@@ -7545,17 +7547,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
struct net_device *dev = tp->dev;
u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
- int work_done= 0;
+ int work_done;
u16 status;
status = rtl_get_events(tp);
rtl_ack_events(tp, status & ~tp->event_slow);
- if (status & RTL_EVENT_NAPI_RX)
- work_done = rtl_rx(dev, tp, (u32) budget);
+ work_done = rtl_rx(dev, tp, (u32) budget);
- if (status & RTL_EVENT_NAPI_TX)
- rtl_tx(dev, tp);
+ rtl_tx(dev, tp);
if (status & tp->event_slow) {
enable_mask &= ~tp->event_slow;
@@ -7623,7 +7623,8 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_update_counters(dev);
rtl_lock_work(tp);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
@@ -7801,7 +7802,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
rtl_lock_work(tp);
napi_disable(&tp->napi);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
rtl_unlock_work(tp);
rtl_pll_power_down(tp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index f735dfcb64ae3c..29d31eb995d7f4 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -453,7 +453,7 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
/* Set FIFO size */
- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
/* Timestamp enable */
ravb_write(ndev, TCCR_TFEN, TCCR);
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 4f42e58f830095..b6c1e46512b690 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -821,37 +821,49 @@ static int rocker_tlv_put(struct rocker_desc_info *desc_info,
static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
int attrtype, u8 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
+ u8 tmp = value; /* work around GCC PR81715 */
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
}
static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
int attrtype, u16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
+ u16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
}
static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
int attrtype, __be16 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
+ __be16 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
}
static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
int attrtype, u32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
+ u32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
}
static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
int attrtype, __be32 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
+ __be32 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
}
static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
int attrtype, u64 value)
{
- return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
+ u64 tmp = value;
+
+ return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
}
static struct rocker_tlv *
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0cd3ecff768b3d..398b08e07149b8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -535,8 +535,10 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv)
int ret;
ret = phy_power_on(bsp_priv, true);
- if (ret)
+ if (ret) {
+ gmac_clk_enable(bsp_priv, false);
return ret;
+ }
ret = gmac_clk_enable(bsp_priv, true);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 2e51b816a7e819..fbf701e5f1e9f4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -614,25 +614,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
- priv->eee_enabled = edata->eee_enabled;
-
- if (!priv->eee_enabled)
+ if (!edata->eee_enabled) {
stmmac_disable_eee_mode(priv);
- else {
+ } else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
- priv->eee_enabled = stmmac_eee_init(priv);
- if (!priv->eee_enabled)
+ edata->eee_enabled = stmmac_eee_init(priv);
+ if (!edata->eee_enabled)
return -EOPNOTSUPP;
-
- /* Do not change tx_lpi_timer in case of failure */
- priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(priv->phydev, edata);
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
+ if (ret)
+ return ret;
+
+ priv->eee_enabled = edata->eee_enabled;
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ return 0;
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5adaf537513ba6..059113dce6e0ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -54,7 +54,7 @@
#include <linux/reset.h>
#include <linux/of_mdio.h>
-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
/* Module parameters */
#define TX_TIMEO 5000
@@ -2529,6 +2529,20 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return ret;
}
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ return ret;
+
+ priv->hw->mac->set_umac_addr(priv->hw, ndev->dev_addr, 0);
+
+ return ret;
+}
+
#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
@@ -2730,7 +2744,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = stmmac_set_mac_address,
};
/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index bba670c42e3749..90d95b3654f5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -130,7 +130,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
int stmmac_mdio_reset(struct mii_bus *bus)
{
-#if defined(CONFIG_STMMAC_PLATFORM)
+#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index d02691ba3d7feb..20aa34f45f07bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,7 +71,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -81,8 +81,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
int x = ucast_entries;
switch (x) {
- case 1:
- case 32:
+ case 1 ... 32:
case 64:
case 128:
break;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ccebf89aa1e43e..85f3a2c0d4ddea 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8121,6 +8121,8 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
start += 3;
prop_len = niu_pci_eeprom_read(np, start + 4);
+ if (prop_len < 0)
+ return prop_len;
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
if (err < 0)
return err;
@@ -8165,8 +8167,12 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
netif_printk(np, probe, KERN_DEBUG, np->dev,
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
- for (i = 0; i < prop_len; i++)
- *prop_buf++ = niu_pci_eeprom_read(np, off + i);
+ for (i = 0; i < prop_len; i++) {
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err >= 0)
+ *prop_buf = err;
+ ++prop_buf;
+ }
}
start += len;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index e23a642357e7c0..eb4d8df4939972 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -60,8 +60,7 @@
#include <linux/sungem_phy.h>
#include "sungem.h"
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
#define DEFAULT_MSG (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
- __sum16 csum;
if (netif_msg_rx_status(gp))
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
skb = copy_skb;
}
- csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
- skb->csum = csum_unfold(csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
+ __sum16 csum;
+
+ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->csum = csum_unfold(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
skb->protocol = eth_type_trans(skb, gp->dev);
napi_gro_receive(&gp->napi, skb);
@@ -1755,7 +1757,7 @@ static void gem_init_dma(struct gem *gp)
writel(0, gp->regs + TXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2973,8 +2975,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/* We can do scatter/gather and HW checksum */
- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->features = dev->hw_features;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 29b47e906d3803..746b2bda5b43f6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -371,7 +371,6 @@ struct cpsw_priv {
spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
- struct device_node *phy_node;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
struct device *dev;
@@ -1165,25 +1164,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
- if (priv->phy_node)
- slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+ if (slave->data->phy_node) {
+ slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
&cpsw_adjust_link, 0, slave->data->phy_if);
- else
+ if (!slave->phy) {
+ dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+ slave->data->phy_node->full_name,
+ slave->slave_num);
+ return;
+ }
+ } else {
slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
&cpsw_adjust_link, slave->data->phy_if);
- if (IS_ERR(slave->phy)) {
- dev_err(priv->dev, "phy %s not found on slave %d\n",
- slave->data->phy_id, slave->slave_num);
- slave->phy = NULL;
- } else {
- dev_info(priv->dev, "phy found : id is : 0x%x\n",
- slave->phy->phy_id);
- phy_start(slave->phy);
-
- /* Configure GMII_SEL register */
- cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
- slave->slave_num);
+ if (IS_ERR(slave->phy)) {
+ dev_err(priv->dev,
+ "phy \"%s\" not found on slave %d, err %ld\n",
+ slave->data->phy_id, slave->slave_num,
+ PTR_ERR(slave->phy));
+ slave->phy = NULL;
+ return;
+ }
}
+
+ dev_info(priv->dev, "phy found : id is : 0x%x\n", slave->phy->phy_id);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
}
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1957,12 +1965,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
slave->port_vlan = data->dual_emac_res_vlan;
}
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct device_node *slave_node;
- struct cpsw_platform_data *data = &priv->data;
int i = 0, ret;
u32 prop;
@@ -2050,7 +2057,8 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
if (strcmp(slave_node->name, "slave"))
continue;
- priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+ slave_data->phy_node = of_parse_phandle(slave_node,
+ "phy-handle", 0);
parp = of_get_property(slave_node, "phy_id", &lenp);
if (of_phy_is_fixed_link(slave_node)) {
struct device_node *phy_node;
@@ -2087,6 +2095,7 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
+ put_device(&mdio->dev);
} else {
dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
goto no_phy_slave;
@@ -2291,7 +2300,7 @@ static int cpsw_probe(struct platform_device *pdev)
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(priv, pdev)) {
+ if (cpsw_probe_dt(&priv->data, pdev)) {
dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index 442a7038e660c8..e50afd1b2eda09 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -18,6 +18,7 @@
#include <linux/phy.h>
struct cpsw_slave_data {
+ struct device_node *phy_node;
char phy_id[MII_BUS_ID_SIZE];
int phy_if;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 33bd3b902304f3..8ecb24186b7fe0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1517,6 +1517,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
static int match_first_device(struct device *dev, void *data)
{
+ if (dev->parent && dev->parent->of_node)
+ return of_device_is_compatible(dev->parent->of_node,
+ "ti,davinci_mdio");
+
return !strncmp(dev_name(dev), "davinci_mdio", 12);
}
@@ -2104,6 +2108,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 507bbb0355c231..f6108413adbab7 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
+ lp->mii_bus = NULL;
return ret;
}
return 0;
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d95a50ae996dd4..8748e8c9ce96bc 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -89,10 +89,6 @@
static const char banner[] __initconst = KERN_INFO \
"AX.25: bpqether driver version 004\n";
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
@@ -515,8 +511,8 @@ static int bpq_new_device(struct net_device *edev)
bpq->ethdev = edev;
bpq->axdev = ndev;
- memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
- memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+ eth_broadcast_addr(bpq->dest_addr);
+ eth_broadcast_addr(bpq->acpt_addr);
err = register_netdevice(ndev);
if (err)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 0fbbba7a0cae38..f72c2967ae824a 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -932,7 +932,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
static int
at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
}
@@ -1108,8 +1108,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1117,8 +1116,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1127,15 +1125,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1239,7 +1235,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-
static int
at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 43617ded377384..91de25c532746d 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index a9268db4e349fc..ae02ce17c505d4 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -389,7 +389,12 @@ static int ipvlan_nl_changelink(struct net_device *dev,
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
- if (data && data[IFLA_IPVLAN_MODE]) {
+ if (!data)
+ return 0;
+ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
ipvlan_set_port_mode(port, nmode);
@@ -454,6 +459,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
+ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
err = ipvlan_port_create(phy_dev);
if (err < 0)
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index 4e3d2e7c697c76..e8c3a8c32534b4 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -518,7 +518,9 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
mtt = irda_get_mtt(skb);
pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt);
- if (mtt)
+ if (mtt > 1000)
+ mdelay(mtt/1000);
+ else if (mtt)
udelay(mtt);
/* Enable DMA interrupt */
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index 49bbc682688356..9a7dca2bb61885 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
return rc;
/* make rcal=100, since rdb default is 000 */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
if (rc < 0)
return rc;
/* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
if (rc < 0)
return rc;
/* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
- rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
+ rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
return 0;
}
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index b2091c88b44dbb..ce16b26d49ff31 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -14,11 +14,18 @@
#ifndef _LINUX_BCM_PHY_LIB_H
#define _LINUX_BCM_PHY_LIB_H
+#include <linux/brcmphy.h>
#include <linux/phy.h>
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
+static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
+ u16 reg, u16 val)
+{
+ return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
+}
+
int bcm_phy_write_misc(struct phy_device *phydev,
u16 reg, u16 chl, u16 value);
int bcm_phy_read_misc(struct phy_device *phydev,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 03d4809a91268f..bffa70e462020a 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -48,10 +48,10 @@
static void r_rc_cal_reset(struct phy_device *phydev)
{
/* Reset R_CAL/RC_CAL Engine */
- bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
/* Disable Reset R_AL/RC_CAL Engine */
- bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
}
static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dc934347ae286b..e6f564d50663b1 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -890,14 +890,14 @@ static void decode_txts(struct dp83640_private *dp83640,
struct phy_txts *phy_txts)
{
struct skb_shared_hwtstamps shhwtstamps;
+ struct dp83640_skb_info *skb_info;
struct sk_buff *skb;
- u64 ns;
u8 overflow;
+ u64 ns;
/* We must already have the skb that triggered this. */
-
+again:
skb = skb_dequeue(&dp83640->tx_queue);
-
if (!skb) {
pr_debug("have timestamp but tx_queue empty\n");
return;
@@ -912,6 +912,11 @@ static void decode_txts(struct dp83640_private *dp83640,
}
return;
}
+ skb_info = (struct dp83640_skb_info *)skb->cb;
+ if (time_after(jiffies, skb_info->tmo)) {
+ kfree_skb(skb);
+ goto again;
+ }
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1461,6 +1466,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
static void dp83640_txtstamp(struct phy_device *phydev,
struct sk_buff *skb, int type)
{
+ struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct dp83640_private *dp83640 = phydev->priv;
switch (dp83640->hwts_tx_en) {
@@ -1473,6 +1479,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
/* fall through */
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->tx_queue, skb);
break;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 88cb4592b6fbbc..ccefba7af96051 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -267,7 +267,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
- put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 920391165f1829..ba84fc3637b12c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -28,6 +28,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <uapi/linux/mdio.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -287,6 +288,17 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -771,7 +783,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00fffff0,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz8061_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8179727d3423e4..70f26b30729cbc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1265,23 +1265,17 @@ static int gen10g_resume(struct phy_device *phydev)
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- /* The default values for phydev->supported are provided by the PHY
- * driver "features" member, we want to reset to sane defaults first
- * before supporting higher speeds.
- */
- phydev->supported &= PHY_DEFAULT_FEATURES;
-
switch (max_speed) {
- default:
- return -ENOTSUPP;
- case SPEED_1000:
- phydev->supported |= PHY_1000BT_FEATURES;
+ case SPEED_10:
+ phydev->supported &= ~PHY_100BT_FEATURES;
/* fall through */
case SPEED_100:
- phydev->supported |= PHY_100BT_FEATURES;
- /* fall through */
- case SPEED_10:
- phydev->supported |= PHY_10BT_FEATURES;
+ phydev->supported &= ~PHY_1000BT_FEATURES;
+ break;
+ case SPEED_1000:
+ break;
+ default:
+ return -ENOTSUPP;
}
return 0;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 583d50f80b2468..02327e6c4819d4 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -442,6 +442,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
if (pskb_trim_rcsum(skb, len))
goto drop;
+ ph = pppoe_hdr(skb);
pn = pppoe_pernet(dev_net(dev));
/* Note that get_item does a sock_hold(), so sk_pppox(po)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 12a627fcc02c5b..53c1f2bd0f24a5 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -577,6 +577,7 @@ static void pptp_sock_destruct(struct sock *sk)
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
+ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index e7034c55e7968d..6ef9188384cea9 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -215,9 +215,9 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* it just report sending a packet to the target
* (without actual packet transfer).
*/
- dev_kfree_skb_any(skb);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
+ dev_kfree_skb_any(skb);
}
}
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e74709e4b5dd9c..267a9042315451 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -247,17 +247,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
- const struct team_option_inst *needle)
-{
- struct team_option_inst *opt_inst;
-
- list_for_each_entry(opt_inst, opts, tmp_list)
- if (opt_inst == needle)
- return true;
- return false;
-}
-
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -983,7 +972,8 @@ static void team_port_disable(struct team *team,
static void ___team_compute_features(struct team *team)
{
struct team_port *port;
- u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
+ netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
+ NETIF_F_ALL_FOR_ALL;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
IFF_XMIT_DST_RELEASE_PERM;
@@ -1141,6 +1131,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return -EBUSY;
}
+ if (dev == port_dev) {
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
@@ -2441,7 +2436,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
int err = 0;
int i;
struct nlattr *nl_option;
- LIST_HEAD(opt_inst_list);
team = team_nl_team_get(info);
if (!team)
@@ -2457,6 +2451,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
struct nlattr *attr;
struct nlattr *attr_data;
+ LIST_HEAD(opt_inst_list);
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
u32 opt_array_index = 0;
@@ -2560,23 +2555,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
-
- /* dumb/evil user-space can send us duplicate opt,
- * keep only the last one
- */
- if (__team_option_inst_tmp_find(&opt_inst_list,
- opt_inst))
- continue;
-
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
goto team_put;
}
- }
- err = team_nl_send_event_options_get(team, &opt_inst_list);
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+ if (err)
+ break;
+ }
team_put:
team_nl_team_put(team);
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a1536d0d83a970..a00335b3786e04 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -305,6 +305,20 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
return 0;
}
+static void lb_bpf_func_free(struct team *team)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct bpf_prog *fp;
+
+ if (!lb_priv->ex->orig_fprog)
+ return;
+
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ bpf_prog_destroy(fp);
+}
+
static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
@@ -619,6 +633,7 @@ static void lb_exit(struct team *team)
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ lb_bpf_func_free(team);
cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
free_percpu(lb_priv->pcpu_stats);
kfree(lb_priv->ex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 61c000d272e25e..8397d0565add12 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1481,7 +1481,9 @@ static void tun_setup(struct net_device *dev)
*/
static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
{
- return -EINVAL;
+ /* NL_SET_ERR_MSG(extack,
+ "tun/tap creation via rtnetlink is not supported."); */
+ return -EOPNOTSUPP;
}
static struct rtnl_link_ops tun_link_ops __read_mostly = {
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index e02d9790bb6570..35d0306d071d10 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -575,6 +575,9 @@ int asix_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 4aa2aad75b203f..8842846fae1a47 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -566,6 +566,9 @@ ax88179_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= AX_MONITOR_MODE_RWLC;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 96a5028621c8b3..8edbccf06b7bd2 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -593,7 +593,7 @@ static const struct driver_info cdc_mbim_info_zlp = {
*/
static const struct driver_info cdc_mbim_info_ndp_to_end = {
.description = "CDC MBIM",
- .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
.bind = cdc_mbim_bind,
.unbind = cdc_mbim_unbind,
.manage_power = cdc_mbim_manage_power,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b8f1510ce89bfc..924618bb14cd35 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1075,7 +1075,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ctx->max_ndp_size;
+ delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
else
delayed_ndp_size = 0;
@@ -1208,7 +1208,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
/* If requested, put NDP at end of frame. */
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data;
- cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max);
+ cdc_ncm_align_tail(skb_out, ctx->tx_ndp_modulus, 0, ctx->tx_max - ctx->max_ndp_size);
nth16->wNdpIndex = cpu_to_le16(skb_out->len);
memcpy(skb_put(skb_out, ctx->max_ndp_size), ctx->delayed_ndp16, ctx->max_ndp_size);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 5e151e6a3e0930..3c7715ea40c163 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -255,14 +255,9 @@ static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
tx_overhead = 0x40;
len = skb->len;
- if (skb_headroom(skb) < tx_overhead) {
- struct sk_buff *skb2;
-
- skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+ if (skb_cow_head(skb, tx_overhead)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
__skb_push(skb, tx_overhead);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 111d907e0c117e..79cede19e0c497 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2825,6 +2825,12 @@ static int hso_get_config_data(struct usb_interface *interface)
return -EIO;
}
+ /* check if we have a valid interface */
+ if (if_num > 16) {
+ kfree(config_data);
+ return -EINVAL;
+ }
+
switch (config_data[if_num]) {
case 0x0:
result = 0;
@@ -2895,10 +2901,18 @@ static int hso_probe(struct usb_interface *interface,
/* Get the interface/port specification from either driver_info or from
* the device itself */
- if (id->driver_info)
+ if (id->driver_info) {
+ /* if_num is controlled by the device, driver_info is a 0 terminated
+ * array. Make sure, the access is in bounds! */
+ for (i = 0; i <= if_num; ++i)
+ if (((u32 *)(id->driver_info))[i] == 0)
+ goto exit;
port_spec = ((u32 *)(id->driver_info))[if_num];
- else
+ } else {
port_spec = hso_get_config_data(interface);
+ if (port_spec < 0)
+ goto exit;
+ }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 76465b117b72aa..f1f8227e73428d 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -140,7 +140,6 @@ struct ipheth_device {
struct usb_device *udev;
struct usb_interface *intf;
struct net_device *net;
- struct sk_buff *tx_skb;
struct urb *tx_urb;
struct urb *rx_urb;
unsigned char *tx_buf;
@@ -229,6 +228,7 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
+ case -EPROTO:
return;
case 0:
break;
@@ -280,7 +280,6 @@ static void ipheth_sndbulk_callback(struct urb *urb)
dev_err(&dev->intf->dev, "%s: urb status: %d\n",
__func__, status);
- dev_kfree_skb_irq(dev->tx_skb);
netif_wake_queue(dev->net);
}
@@ -410,7 +409,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
if (skb->len > IPHETH_BUF_SIZE) {
WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
dev->net->stats.tx_dropped++;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -430,12 +429,11 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
dev_err(&dev->intf->dev, "%s: usb_submit_urb: %d\n",
__func__, retval);
dev->net->stats.tx_errors++;
- dev_kfree_skb_irq(skb);
+ dev_kfree_skb_any(skb);
} else {
- dev->tx_skb = skb;
-
dev->net->stats.tx_packets++;
dev->net->stats.tx_bytes += skb->len;
+ dev_consume_skb_any(skb);
netif_stop_queue(net);
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index cd93220c9b45f0..a628db738b8ab8 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -812,18 +812,12 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
}
/* We now decide whether we can put our special header into the sk_buff */
- if (skb_cloned(skb) || skb_headroom(skb) < 2) {
- /* no such luck - we make our own */
- struct sk_buff *copied_skb;
- copied_skb = skb_copy_expand(skb, 2, 0, GFP_ATOMIC);
- dev_kfree_skb_irq(skb);
- skb = copied_skb;
- if (!copied_skb) {
- kaweth->stats.tx_errors++;
- netif_start_queue(net);
- spin_unlock_irq(&kaweth->device_lock);
- return NETDEV_TX_OK;
- }
+ if (skb_cow_head(skb, 2)) {
+ kaweth->stats.tx_errors++;
+ netif_start_queue(net);
+ spin_unlock_irq(&kaweth->device_lock);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
private_header = (__le16 *)__skb_push(skb, 2);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index a6d429950cb053..45a6a7cae4bfc3 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -902,6 +902,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
netif_carrier_on(dev->net);
+
+ tasklet_schedule(&dev->bh);
}
return ret;
@@ -1049,19 +1051,10 @@ static int lan78xx_set_wol(struct net_device *netdev,
if (ret < 0)
return ret;
- pdata->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- pdata->wol |= WAKE_UCAST;
- if (wol->wolopts & WAKE_MCAST)
- pdata->wol |= WAKE_MCAST;
- if (wol->wolopts & WAKE_BCAST)
- pdata->wol |= WAKE_BCAST;
- if (wol->wolopts & WAKE_MAGIC)
- pdata->wol |= WAKE_MAGIC;
- if (wol->wolopts & WAKE_PHY)
- pdata->wol |= WAKE_PHY;
- if (wol->wolopts & WAKE_ARP)
- pdata->wol |= WAKE_ARP;
+ if (wol->wolopts & ~WAKE_ALL)
+ return -EINVAL;
+
+ pdata->wol = wol->wolopts;
device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d72205f06a1de2..3b67140eed73d2 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -635,6 +635,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
{QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index c748f2d3dccde2..19f200f38f03b7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3895,7 +3895,8 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- napi_disable(&tp->napi);
+ if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
@@ -4432,6 +4433,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (!rtl_can_wakeup(tp))
return -EOPNOTSUPP;
+ if (wol->wolopts & ~WAKE_ANY)
+ return -EINVAL;
+
ret = usb_autopm_get_interface(tp->intf);
if (ret < 0)
goto out_set_wol;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 39672984dde1de..58b1e18fdd64d7 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
(netdev->flags & IFF_ALLMULTI)) {
rx_creg &= 0xfffe;
rx_creg |= 0x0002;
- dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+ dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
} else {
/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7337e6c0e12633..234febc6e1d9c8 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -81,6 +81,9 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
u32 *data, int in_pm)
{
@@ -725,6 +728,9 @@ static int smsc75xx_ethtool_set_wol(struct net_device *net,
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -840,6 +846,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
return -EIO;
}
+ /* phy workaround for gig link */
+ smsc75xx_phy_gig_workaround(dev);
+
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
@@ -978,6 +987,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
return -EIO;
}
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0, timeout = 0;
+ u32 buf, link_up = 0;
+
+ /* Set the phy in Gig loopback */
+ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+ /* Wait for the link up */
+ do {
+ link_up = smsc75xx_link_ok_nopm(dev);
+ usleep_range(10000, 20000);
+ timeout++;
+ } while ((!link_up) && (timeout < 1000));
+
+ if (timeout >= 1000) {
+ netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+ return -EIO;
+ }
+
+ /* phy reset */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ buf |= PMT_CTL_PHY_RST;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ timeout = 0;
+ do {
+ usleep_range(10000, 20000);
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+ ret);
+ return ret;
+ }
+ timeout++;
+ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1444,6 +1509,7 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 66b3ab9f614eb0..b6b8aec73b2802 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -727,6 +727,9 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
int ret;
+ if (wolinfo->wolopts & ~SUPPORTED_WAKE)
+ return -EINVAL;
+
pdata->wolopts = wolinfo->wolopts & SUPPORTED_WAKE;
ret = device_set_wakeup_enable(&dev->udev->dev, pdata->wolopts);
@@ -1835,13 +1838,13 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
/* We do not advertise SG, so skbs should be already linearized */
BUG_ON(skb_shinfo(skb)->nr_frags);
- if (skb_headroom(skb) < overhead) {
- struct sk_buff *skb2 = skb_copy_expand(skb,
- overhead, 0, flags);
+ /* Make writable and expand header space by overhead if required */
+ if (skb_cow_head(skb, overhead)) {
+ /* Must deallocate here as returning NULL to indicate error
+ * means the skb won't be deallocated in the caller.
+ */
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
if (csum) {
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index a50df0d8fb9abb..004c955c1fd1b8 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -421,6 +421,9 @@ sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
struct usbnet *dev = netdev_priv(net);
u8 opt = 0;
+ if (wolinfo->wolopts & ~(WAKE_PHY | WAKE_MAGIC))
+ return -EINVAL;
+
if (wolinfo->wolopts & WAKE_PHY)
opt |= SR_MONITOR_LINK;
if (wolinfo->wolopts & WAKE_MAGIC)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index c41378214ede89..835129152fc481 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1229,6 +1229,14 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
}
}
+ rcu_read_lock();
+
+ if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+ rcu_read_unlock();
+ atomic_long_inc(&vxlan->dev->rx_dropped);
+ goto drop;
+ }
+
stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
@@ -1237,6 +1245,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
gro_cells_receive(&vxlan->gro_cells, skb);
+ rcu_read_unlock();
+
return;
drop:
if (tun_dst)
@@ -1881,7 +1891,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
- struct net_device *dev = skb->dev;
+ struct net_device *dev;
int len = skb->len;
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -1901,8 +1911,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
#endif
}
+ rcu_read_lock();
+ dev = skb->dev;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto drop;
+ }
+
if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
+ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -1915,8 +1932,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
} else {
+drop:
dev->stats.rx_dropped++;
}
+ rcu_read_unlock();
}
static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
@@ -2303,6 +2322,8 @@ static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ gro_cells_destroy(&vxlan->gro_cells);
+
vxlan_fdb_delete_default(vxlan);
free_percpu(dev->tstats);
@@ -3047,7 +3068,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
@@ -3256,10 +3276,8 @@ static void __net_exit vxlan_exit_net(struct net *net)
/* If vxlan->dev is in the same netns, it has already been added
* to the list by the previous loop.
*/
- if (!net_eq(dev_net(vxlan->dev), net)) {
- gro_cells_destroy(&vxlan->gro_cells);
+ if (!net_eq(dev_net(vxlan->dev), net))
unregister_netdevice_queue(vxlan->dev, &list);
- }
}
unregister_netdevice_many(&list);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 317bc79cc8b9b2..c178e12183474b 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1385,7 +1385,7 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
case 0x001:
printk(KERN_WARNING "%s: Master Abort (naughty)\n", dev->name);
break;
- case 0x010:
+ case 0x002:
printk(KERN_WARNING "%s: Target Abort (not so naughty)\n", dev->name);
break;
default:
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/Makefile b/drivers/net/wireless/ar10k/ath/ath10k/Makefile
index dace36a8172c14..0a7bdb41d9c272 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ar10k/ath/ath10k/Makefile
@@ -20,6 +20,7 @@ ath10k_core-$(CPTCFG_ATH10K_TRACING) += trace.o
ath10k_core-$(CPTCFG_THERMAL) += thermal.o
ath10k_core-$(CPTCFG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CPTCFG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CPTCFG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/ahb.c b/drivers/net/wireless/ar10k/ath/ath10k/ahb.c
index bd62bc19e758ca..30285c23962b68 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/ahb.c
@@ -34,6 +34,9 @@ static const struct of_device_id ath10k_ahb_of_match[] = {
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
@@ -756,6 +759,25 @@ out:
return ret;
}
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
@@ -823,6 +845,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ret = ath10k_pci_setup_resource(ar);
if (ret) {
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/bmi.c b/drivers/net/wireless/ar10k/ath/ath10k/bmi.c
index 135d3b49119d25..5e4e4c63c91d16 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/bmi.c
@@ -388,3 +388,26 @@ int ath10k_bmi_fast_download(struct ath10k *ar,
return ret;
}
+
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.set_app_start);
+ int ret;
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi set start command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_SET_APP_START);
+ cmd.set_app_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to set start to the device:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/bmi.h b/drivers/net/wireless/ar10k/ath/ath10k/bmi.h
index a65f26267fe349..c07d39d36b3c8b 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/bmi.h
@@ -187,6 +187,35 @@ struct bmi_target_info {
u32 type;
};
+struct bmi_segmented_file_header {
+ __le32 magic_num;
+ __le32 file_flags;
+ u8 data[];
+};
+
+struct bmi_segmented_metadata {
+ __le32 addr;
+ __le32 length;
+ u8 data[];
+};
+
+#define BMI_SGMTFILE_MAGIC_NUM 0x544d4753 /* "SGMT" */
+#define BMI_SGMTFILE_FLAG_COMPRESS 1
+
+/* Special values for bmi_segmented_metadata.length (all have high bit set) */
+
+/* end of segmented data */
+#define BMI_SGMTFILE_DONE 0xffffffff
+
+/* Board Data segment */
+#define BMI_SGMTFILE_BDDATA 0xfffffffe
+
+/* set beginning address */
+#define BMI_SGMTFILE_BEGINADDR 0xfffffffd
+
+/* immediate function execution */
+#define BMI_SGMTFILE_EXEC 0xfffffffc
+
/* in msec */
#define BMI_COMMUNICATION_TIMEOUT_HZ (2 * HZ)
@@ -234,4 +263,6 @@ int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
const void *buffer, u32 length);
int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
+int ath10k_bmi_set_start(struct ath10k *ar, u32 address);
+
#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/ce.c b/drivers/net/wireless/ar10k/ath/ath10k/ce.c
index 52ac97bf8a4d4a..55b105137436a2 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/ce.c
@@ -444,14 +444,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
*/
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
- struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,14 +472,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
desc->nbytes = 0;
/* Return data from completed destination descriptor */
- *bufferp = __le32_to_cpu(sdesc.addr);
*nbytesp = nbytes;
- *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
- if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
- *flagsp = CE_RECV_FLAG_SWAPPED;
- else
- *flagsp = 0;
if (per_transfer_contextp)
*per_transfer_contextp =
@@ -501,10 +490,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +499,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
- bufferp, nbytesp,
- transfer_idp, flagsp);
+ nbytesp);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -1121,3 +1106,42 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->src_ring = NULL;
ce_state->dest_ring = NULL;
}
+
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_crash_data ce;
+ u32 addr, id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_err(ar, "Copy Engine register dump:\n");
+
+ spin_lock_bh(&ar_pci->ce_lock);
+ for (id = 0; id < CE_COUNT; id++) {
+ addr = ath10k_ce_base_address(ar, id);
+ ce.base_addr = cpu_to_le32(addr);
+
+ ce.src_wr_idx =
+ cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
+ ce.src_r_idx =
+ cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
+ ce.dst_wr_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
+ ce.dst_r_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
+
+ if (crash_data)
+ crash_data->ce_crash_data[id] = ce;
+
+ ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
+ le32_to_cpu(ce.base_addr),
+ le32_to_cpu(ce.src_wr_idx),
+ le32_to_cpu(ce.src_r_idx),
+ le32_to_cpu(ce.dst_wr_idx),
+ le32_to_cpu(ce.dst_r_idx));
+ }
+
+ spin_unlock_bh(&ar_pci->ce_lock);
+}
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/ce.h b/drivers/net/wireless/ar10k/ath/ath10k/ce.h
index 0bd351baff53e1..c9773233aafc17 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/ce.h
@@ -20,8 +20,6 @@
#include "hif.h"
-/* Maximum number of Copy Engine's supported */
-#define CE_COUNT_MAX 12
#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
/* Descriptor rings must be aligned to this boundary */
@@ -177,10 +175,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
@@ -212,10 +207,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Support clean shutdown by allowing the caller to cancel
@@ -233,6 +225,8 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_disable_interrupts(struct ath10k *ar);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data);
/* ce_attr.flags values */
/* Use NonSnooping PCIe accesses? */
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/core.c b/drivers/net/wireless/ar10k/ath/ath10k/core.c
index dec19be471ff63..f51dfa5586f694 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/core.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/core.c
@@ -18,6 +18,8 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/of.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
#include "core.h"
#include "mac.h"
@@ -29,6 +31,7 @@
#include "htt.h"
#include "testmode.h"
#include "wmi-ops.h"
+#include "coredump.h"
unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param;
@@ -36,17 +39,25 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
+/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
+ * by default.
+ */
+unsigned long ath10k_coredump_mask = 0x3;
+
+/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@@ -69,6 +80,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = false,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -88,6 +100,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = false,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_6174,
},
{
.id = QCA6174_HW_2_1_VERSION,
@@ -108,6 +122,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = false,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_6174,
},
{
.id = QCA6174_HW_3_0_VERSION,
@@ -128,6 +144,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = false,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_6174,
},
{
.id = QCA6174_HW_3_2_VERSION,
@@ -151,6 +169,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &qca6174_ops,
.hw_clk = qca6174_clk,
.target_cpu_freq = 176000000,
+ .fw_diag_ce_download = true,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_6174,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -176,6 +196,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
+ .fw_diag_ce_download = false,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@@ -195,6 +216,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = false,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_9377,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@@ -214,6 +237,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
.hw_ops = &qca988x_ops,
+ .fw_diag_ce_download = true,
+ .tx_sk_pacing_shift = SK_PACING_SHIFT_9377,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@@ -239,6 +264,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
+ .fw_diag_ce_download = false,
},
};
@@ -607,6 +633,94 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
return 0;
}
+static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath10k *ar = data;
+ const char *bdf_ext;
+ const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
+ u8 bdf_enabled;
+ int i;
+
+ if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
+ if (!bdf_enabled) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ bdf_ext = (char *)hdr + hdr->length;
+
+ if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ for (i = 0; i < strlen(bdf_ext); i++) {
+ if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic suffix */
+ if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
+ sizeof(ar->id.bdf_ext)) < 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ bdf_ext);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
+}
+
+static int ath10k_core_check_smbios(struct ath10k *ar)
+{
+ ar->id.bdf_ext[0] = '\0';
+ dmi_walk(ath10k_core_check_bdfext, ar);
+
+ if (ar->id.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ const char *variant = NULL;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
@@ -690,14 +804,24 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
"boot uploading firmware image %p len %d mode %s\n",
data, data_len, mode_name);
- ret = ath10k_bmi_fast_download(ar, address, data, data_len);
- if (ret) {
- ath10k_err(ar, "failed to download %s firmware: %d\n",
- mode_name, ret);
- return ret;
+ /* Check if device supports to download firmware via
+ * diag copy engine. Downloading firmware via diag CE
+ * greatly reduces the time to download firmware.
+ */
+ if (ar->hw_params.fw_diag_ce_download) {
+ ret = ath10k_hw_diag_fast_download(ar, address,
+ data, data_len);
+ if (ret == 0)
+ /* firmware upload via diag ce was successful */
+ return 0;
+
+ ath10k_warn(ar,
+ "failed to upload firmware via diag ce, trying BMI: %d",
+ ret);
}
- return ret;
+ return ath10k_bmi_fast_download(ar, address,
+ data, data_len);
}
static void ath10k_core_free_board_files(struct ath10k *ar)
@@ -914,6 +1038,23 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
case ATH10K_BD_IE_BOARD:
ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
boardname);
+ if (ret == -ENOENT && ar->id.bdf_ext[0] != '\0') {
+ /* try default bdf if variant was not found */
+ char *s, *v = ",variant=";
+ char boardname2[100];
+
+ strlcpy(boardname2, boardname,
+ sizeof(boardname2));
+
+ s = strstr(boardname2, v);
+ if (s)
+ *s = '\0'; /* strip ",variant=%s" */
+
+ ret = ath10k_core_parse_bd_ie_board(ar, data,
+ ie_len,
+ boardname2);
+ }
+
if (ret == -ENOENT)
/* no match found, continue */
break;
@@ -951,21 +1092,27 @@ err:
static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
size_t name_len)
{
+ /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
+ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+
+ if (ar->id.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ar->id.bdf_ext);
+
if (ar->id.bmi_ids_valid) {
scnprintf(name, name_len,
- "bus=%s,bmi-chip-id=%d,bmi-board-id=%d",
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
ath10k_bus_str(ar->hif.bus),
ar->id.bmi_chip_id,
- ar->id.bmi_board_id);
+ ar->id.bmi_board_id, variant);
goto out;
}
scnprintf(name, name_len,
- "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x",
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
ath10k_bus_str(ar->hif.bus),
ar->id.vendor, ar->id.device,
- ar->id.subsystem_vendor, ar->id.subsystem_device);
-
+ ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
out:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
@@ -1385,6 +1532,7 @@ static int ath10k_init_hw_params(struct ath10k *ar)
static void ath10k_core_restart(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ int ret;
set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
@@ -1435,6 +1583,11 @@ static void ath10k_core_restart(struct work_struct *work)
}
mutex_unlock(&ar->conf_mutex);
+
+ ret = ath10k_coredump_submit(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+ ret);
}
static int ath10k_core_init_firmware_features(struct ath10k *ar)
@@ -1860,6 +2013,14 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
+ ret = ath10k_core_check_smbios(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
@@ -1927,10 +2088,16 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_release_fw;
}
+ status = ath10k_coredump_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to register coredump\n");
+ goto err_unregister_mac;
+ }
+
status = ath10k_debug_register(ar);
if (status) {
ath10k_err(ar, "unable to initialize debugfs\n");
- goto err_unregister_mac;
+ goto err_unregister_coredump;
}
status = ath10k_spectral_create(ar);
@@ -1953,6 +2120,8 @@ err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
ath10k_debug_destroy(ar);
+err_unregister_coredump:
+ ath10k_coredump_unregister(ar);
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
@@ -2083,12 +2252,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->register_work, ath10k_core_register_work);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
- ret = ath10k_debug_create(ar);
+ ret = ath10k_coredump_create(ar);
if (ret)
goto err_free_aux_wq;
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_coredump;
+
return ar;
+err_free_coredump:
+ ath10k_coredump_destroy(ar);
+
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
@@ -2110,6 +2286,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
+ ath10k_coredump_destroy(ar);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);
}
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/core.h b/drivers/net/wireless/ar10k/ath/ath10k/core.h
index 006188631bc7dd..1707bf1b01b482 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/core.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/core.h
@@ -65,6 +65,23 @@
#define ATH10K_KEEPALIVE_MAX_IDLE 3895
#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* Offset pointing to Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
+
+/* Board Data File Name Extension string length.
+ * String format: BDF_<Customer ID>_<Extension>\0
+ */
+#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
+
+/* The magic used by QCA spec */
+#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
struct ath10k;
enum ath10k_bus {
@@ -393,13 +410,32 @@ struct ath10k_vif_iter {
struct ath10k_vif *arvif;
};
+/* Copy Engine register dump, protected by ce-lock */
+struct ath10k_ce_crash_data {
+ __le32 base_addr;
+ __le32 src_wr_idx;
+ __le32 src_r_idx;
+ __le32 dst_wr_idx;
+ __le32 dst_r_idx;
+};
+
+struct ath10k_ce_crash_hdr {
+ __le32 ce_count;
+ __le32 reserved[3]; /* for future use */
+ struct ath10k_ce_crash_data entries[];
+};
+
+#define MAX_MEM_DUMP_TYPE 5
+
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
- bool crashed_since_read;
-
uuid_le uuid;
struct timespec timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
+ struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+ u8 *ramdump_buf;
+ size_t ramdump_buf_len;
};
struct ath10k_debug {
@@ -701,6 +737,12 @@ struct ath10k {
/* hw specific clock control parameters */
const struct ath10k_hw_clk_params *hw_clk;
int target_cpu_freq;
+
+ /* target supporting fw download via diag ce */
+ bool fw_diag_ce_download;
+
+ /* Number of shift to override the default value of ieee80211_hw*/
+ u8 tx_sk_pacing_shift;
} hw_params;
const struct firmware *board;
@@ -732,6 +774,8 @@ struct ath10k {
bool bmi_ids_valid;
u8 bmi_board_id;
u8 bmi_chip_id;
+
+ char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
} id;
int fw_api;
@@ -856,6 +900,12 @@ struct ath10k {
struct ath10k_spec_scan config;
} spectral;
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct ath10k_fw_crash_data *fw_crash_data;
+ } coredump;
+#endif
+
struct {
/* protected by conf_mutex */
const struct firmware *utf;
@@ -883,6 +933,8 @@ struct ath10k {
u8 drv_priv[0] __aligned(sizeof(void *));
};
+extern unsigned long ath10k_coredump_mask;
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/coredump.c b/drivers/net/wireless/ar10k/ath/ath10k/coredump.c
new file mode 100644
index 00000000000000..91cd22822ecac5
--- /dev/null
+++ b/drivers/net/wireless/ar10k/ath/ath10k/coredump.c
@@ -0,0 +1,954 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A064},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* RTC_SOC_BASE_ADDRESS */
+ .start = 0x0,
+
+ /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+ .len = 0x800 - 0x0,
+
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* STEREO_BASE_ADDRESS */
+ .start = 0x27000,
+
+ /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+ .len = 0x60000 - 0x27000,
+
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw21_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+ },
+ },
+
+ /* IRAM dump must be put last */
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+ {
+ .hw_id = QCA6174_HW_1_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_3_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_2_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw21_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw;
+ const struct ath10k_mem_region *mem_region;
+ size_t size = 0;
+ int i;
+
+ hw = ath10k_coredump_get_mem_layout(ar);
+
+ if (!hw)
+ return 0;
+
+ mem_region = &hw->region_table.regions[0];
+
+ for (i = 0; i < hw->region_table.size; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+ /* make sure it is aligned 16 bytes for debug message print out */
+ size = ALIGN(size, 16);
+
+ return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
+ if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ return NULL;
+
+ if (WARN_ON(ar->target_version == 0))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+ if (ar->target_version == hw_mem_layouts[i].hw_id)
+ return &hw_mem_layouts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return NULL;
+
+ uuid_le_gen(&crash_data->uuid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ memcpy(&dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
+ dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ }
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+ }
+
+ /* Gather ram dump */
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ dump = ath10k_coredump_build(ar);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+ return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+ if (!ar->coredump.fw_crash_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+ if (ar->coredump.fw_crash_data->ramdump_buf) {
+ vfree(ar->coredump.fw_crash_data->ramdump_buf);
+ ar->coredump.fw_crash_data->ramdump_buf = NULL;
+ ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+ }
+
+ vfree(ar->coredump.fw_crash_data);
+ ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/coredump.h b/drivers/net/wireless/ar10k/ath/ath10k/coredump.h
new file mode 100644
index 00000000000000..59027637fb5e0d
--- /dev/null
+++ b/drivers/net/wireless/ar10k/ath/ath10k/coredump.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+ /* contains multiple struct ath10k_dump_ram_data_hdr */
+ ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ uuid_le uuid;
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+ /* enum ath10k_mem_region_type */
+ __le32 region_type;
+
+ __le32 start;
+
+ /* length of payload data, not including this header */
+ __le32 length;
+
+ u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED 0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+ ATH10K_MEM_REGION_TYPE_REG = 1,
+ ATH10K_MEM_REGION_TYPE_DRAM = 2,
+ ATH10K_MEM_REGION_TYPE_AXI = 3,
+ ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
+ ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+ u32 start;
+ u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+ enum ath10k_mem_region_type type;
+ u32 start;
+ u32 len;
+
+ const char *name;
+
+ struct {
+ const struct ath10k_mem_section *sections;
+ u32 size;
+ } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+ u32 hw_id;
+
+ struct {
+ const struct ath10k_mem_region *regions;
+ int size;
+ } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/debug.c b/drivers/net/wireless/ar10k/ath/ath10k/debug.c
index c916da6b78ced9..56e05a8b361f5c 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/debug.c
@@ -18,7 +18,6 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
-#include <linux/utsname.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
@@ -30,85 +29,6 @@
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
-#define ATH10K_FW_CRASH_DUMP_VERSION 1
-
-/**
- * enum ath10k_fw_crash_dump_type - types of data in the dump file
- * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
- */
-enum ath10k_fw_crash_dump_type {
- ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
-
- ATH10K_FW_CRASH_DUMP_MAX,
-};
-
-struct ath10k_tlv_dump_data {
- /* see ath10k_fw_crash_dump_type above */
- __le32 type;
-
- /* in bytes */
- __le32 tlv_len;
-
- /* pad to 32-bit boundaries as needed */
- u8 tlv_data[];
-} __packed;
-
-struct ath10k_dump_file_data {
- /* dump file information */
-
- /* "ATH10K-FW-DUMP" */
- char df_magic[16];
-
- __le32 len;
-
- /* file dump version */
- __le32 version;
-
- /* some info we can get from ath10k struct that might help */
-
- u8 uuid[16];
-
- __le32 chip_id;
-
- /* 0 for now, in place for later hardware */
- __le32 bus_type;
-
- __le32 target_version;
- __le32 fw_version_major;
- __le32 fw_version_minor;
- __le32 fw_version_release;
- __le32 fw_version_build;
- __le32 phy_capability;
- __le32 hw_min_tx_power;
- __le32 hw_max_tx_power;
- __le32 ht_cap_info;
- __le32 vht_cap_info;
- __le32 num_rf_chains;
-
- /* firmware version string */
- char fw_ver[ETHTOOL_FWVERS_LEN];
-
- /* Kernel related information */
-
- /* time-of-day stamp */
- __le64 tv_sec;
-
- /* time-of-day stamp, nano-seconds */
- __le64 tv_nsec;
-
- /* LINUX_VERSION_CODE */
- __le32 kernel_ver_code;
-
- /* VERMAGIC_STRING */
- char kernel_ver[64];
-
- /* room for growth w/out changing binary format */
- u8 unused[128];
-
- /* struct ath10k_tlv_dump_data + more */
- u8 data[0];
-} __packed;
-
void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
@@ -601,15 +521,23 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
- char buf[32];
+ char buf[32] = {0};
+ ssize_t rc;
int ret;
- mutex_lock(&ar->conf_mutex);
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
- simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
- /* make sure that buf is null terminated */
- buf[sizeof(buf) - 1] = 0;
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_RESTARTED) {
@@ -617,12 +545,6 @@ static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
goto exit;
}
- /* drop the possible '\n' from the end */
- if (buf[count - 1] == '\n') {
- buf[count - 1] = 0;
- count--;
- }
-
if (!strcmp(buf, "soft")) {
ath10k_info(ar, "simulating soft firmware crash\n");
ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
@@ -685,138 +607,6 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
-
- lockdep_assert_held(&ar->data_lock);
-
- crash_data->crashed_since_read = true;
- uuid_le_gen(&crash_data->uuid);
- getnstimeofday(&crash_data->timestamp);
-
- return crash_data;
-}
-EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
-
-static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar)
-{
- struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
- struct ath10k_dump_file_data *dump_data;
- struct ath10k_tlv_dump_data *dump_tlv;
- int hdr_len = sizeof(*dump_data);
- unsigned int len, sofar = 0;
- unsigned char *buf;
-
- len = hdr_len;
- len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
- sofar += hdr_len;
-
- /* This is going to get big when we start dumping FW RAM and such,
- * so go ahead and use vmalloc.
- */
- buf = vzalloc(len);
- if (!buf)
- return NULL;
-
- spin_lock_bh(&ar->data_lock);
-
- if (!crash_data->crashed_since_read) {
- spin_unlock_bh(&ar->data_lock);
- vfree(buf);
- return NULL;
- }
-
- dump_data = (struct ath10k_dump_file_data *)(buf);
- strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
- sizeof(dump_data->df_magic));
- dump_data->len = cpu_to_le32(len);
-
- dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
-
- memcpy(dump_data->uuid, &crash_data->uuid, sizeof(dump_data->uuid));
- dump_data->chip_id = cpu_to_le32(ar->chip_id);
- dump_data->bus_type = cpu_to_le32(0);
- dump_data->target_version = cpu_to_le32(ar->target_version);
- dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
- dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
- dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
- dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
- dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
- dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
- dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
- dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
- dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
- dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
-
- strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
- sizeof(dump_data->fw_ver));
-
- dump_data->kernel_ver_code = 0;
- strlcpy(dump_data->kernel_ver, init_utsname()->release,
- sizeof(dump_data->kernel_ver));
-
- dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
- dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
-
- /* Gather crash-dump */
- dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
- dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
- dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
- memcpy(dump_tlv->tlv_data, &crash_data->registers,
- sizeof(crash_data->registers));
- sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
-
- ar->debug.fw_crash_data->crashed_since_read = false;
-
- spin_unlock_bh(&ar->data_lock);
-
- return dump_data;
-}
-
-static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
-{
- struct ath10k *ar = inode->i_private;
- struct ath10k_dump_file_data *dump;
-
- dump = ath10k_build_dump_file(ar);
- if (!dump)
- return -ENODATA;
-
- file->private_data = dump;
-
- return 0;
-}
-
-static ssize_t ath10k_fw_crash_dump_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath10k_dump_file_data *dump_file = file->private_data;
-
- return simple_read_from_buffer(user_buf, count, ppos,
- dump_file,
- le32_to_cpu(dump_file->len));
-}
-
-static int ath10k_fw_crash_dump_release(struct inode *inode,
- struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static const struct file_operations fops_fw_crash_dump = {
- .open = ath10k_fw_crash_dump_open,
- .read = ath10k_fw_crash_dump_read,
- .release = ath10k_fw_crash_dump_release,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
static ssize_t ath10k_reg_addr_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2226,10 +2016,6 @@ static const struct file_operations fops_fw_checksums = {
int ath10k_debug_create(struct ath10k *ar)
{
- ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
- if (!ar->debug.fw_crash_data)
- return -ENOMEM;
-
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
@@ -2239,9 +2025,6 @@ int ath10k_debug_create(struct ath10k *ar)
void ath10k_debug_destroy(struct ath10k *ar)
{
- vfree(ar->debug.fw_crash_data);
- ar->debug.fw_crash_data = NULL;
-
ath10k_debug_fw_stats_reset(ar);
kfree(ar->debug.tpc_stats);
@@ -2276,9 +2059,6 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_simulate_fw_crash);
- debugfs_create_file("fw_crash_dump", S_IRUSR, ar->debug.debugfs_phy,
- ar, &fops_fw_crash_dump);
-
debugfs_create_file("reg_addr", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_reg_addr);
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/debug.h b/drivers/net/wireless/ar10k/ath/ath10k/debug.h
index 16ff25f51deae9..85eac23e5538b6 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/debug.h
@@ -80,10 +80,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
void ath10k_debug_tpc_stats_process(struct ath10k *ar,
struct ath10k_tpc_stats *tpc_stats);
-struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
-
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@@ -138,12 +136,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
{
}
-static inline struct ath10k_fw_crash_data *
-ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
-{
- return NULL;
-}
-
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/hw.c b/drivers/net/wireless/ar10k/ath/ath10k/hw.c
index e4dd18739a2cb2..ddf50e830a8449 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/hw.c
@@ -15,9 +15,11 @@
*/
#include <linux/types.h>
+#include <linux/bitfield.h>
#include "core.h"
#include "hw.h"
#include "bmi.h"
+#include "hif.h"
const struct ath10k_hw_regs qca988x_regs = {
.rtc_state_cold_reset_mask = 0x00000400,
@@ -266,6 +268,196 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
survey->time_busy = CCNT_TO_MSEC(ar, rcc);
}
+/* Program CPU_ADDR_MSB to allow different memory
+ * region access.
+ */
+static void ath10k_hw_map_target_mem(struct ath10k *ar, u32 msb)
+{
+ u32 address = SOC_CORE_BASE_ADDRESS + FW_RAM_CONFIG_ADDRESS;
+
+ ath10k_hif_write32(ar, address, msb);
+}
+
+/* 1. Write to memory region of target, such as IRAM adn DRAM.
+ * 2. Target address( 0 ~ 00100000 & 0x00400000~0x00500000)
+ * can be written directly. See ath10k_pci_targ_cpu_to_ce_addr() too.
+ * 3. In order to access the region other than the above,
+ * we need to set the value of register CPU_ADDR_MSB.
+ * 4. Target memory access space is limited to 1M size. If the size is larger
+ * than 1M, need to split it and program CPU_ADDR_MSB accordingly.
+ */
+static int ath10k_hw_diag_segment_msb_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ u32 addr = address & REGION_ACCESS_SIZE_MASK;
+ int ret, remain_size, size;
+ const u8 *buf;
+
+ ath10k_hw_map_target_mem(ar, CPU_ADDR_MSB_REGION_VAL(address));
+
+ if (addr + length > REGION_ACCESS_SIZE_LIMIT) {
+ size = REGION_ACCESS_SIZE_LIMIT - addr;
+ remain_size = length - size;
+
+ ret = ath10k_hif_diag_write(ar, address, buffer, size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the first %d bytes segment to address:0x%x: %d\n",
+ size, address, ret);
+ goto done;
+ }
+
+ /* Change msb to the next memory region*/
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(address) + 1);
+ buf = buffer + size;
+ ret = ath10k_hif_diag_write(ar,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ buf, remain_size);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the second %d bytes segment to address:0x%x: %d\n",
+ remain_size,
+ address & ~REGION_ACCESS_SIZE_MASK,
+ ret);
+ goto done;
+ }
+ } else {
+ ret = ath10k_hif_diag_write(ar, address, buffer, length);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to download the only %d bytes segment to address:0x%x: %d\n",
+ length, address, ret);
+ goto done;
+ }
+ }
+
+done:
+ /* Change msb to DRAM */
+ ath10k_hw_map_target_mem(ar,
+ CPU_ADDR_MSB_REGION_VAL(DRAM_BASE_ADDRESS));
+ return ret;
+}
+
+static int ath10k_hw_diag_segment_download(struct ath10k *ar,
+ const void *buffer,
+ u32 address,
+ u32 length)
+{
+ if (address >= DRAM_BASE_ADDRESS + REGION_ACCESS_SIZE_LIMIT)
+ /* Needs to change MSB for memory write */
+ return ath10k_hw_diag_segment_msb_download(ar, buffer,
+ address, length);
+ else
+ return ath10k_hif_diag_write(ar, address, buffer, length);
+}
+
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length)
+{
+ const u8 *buf = buffer;
+ bool sgmt_end = false;
+ u32 base_addr = 0;
+ u32 base_len = 0;
+ u32 left = 0;
+ struct bmi_segmented_file_header *hdr;
+ struct bmi_segmented_metadata *metadata;
+ int ret = 0;
+
+ if (length < sizeof(*hdr))
+ return -EINVAL;
+
+ /* check firmware header. If it has no correct magic number
+ * or it's compressed, returns error.
+ */
+ hdr = (struct bmi_segmented_file_header *)buf;
+ if (__le32_to_cpu(hdr->magic_num) != BMI_SGMTFILE_MAGIC_NUM) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, magic_num:0x%x\n",
+ hdr->magic_num);
+ return -EINVAL;
+ }
+
+ if (hdr->file_flags != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Not a supported firmware, file_flags:0x%x\n",
+ hdr->file_flags);
+ return -EINVAL;
+ }
+
+ metadata = (struct bmi_segmented_metadata *)hdr->data;
+ left = length - sizeof(*hdr);
+
+ while (left > 0) {
+ if (left < sizeof(*metadata)) {
+ ath10k_warn(ar, "firmware segment is truncated: %d\n",
+ left);
+ ret = -EINVAL;
+ break;
+ }
+ base_addr = __le32_to_cpu(metadata->addr);
+ base_len = __le32_to_cpu(metadata->length);
+ buf = metadata->data;
+ left -= sizeof(*metadata);
+
+ switch (base_len) {
+ case BMI_SGMTFILE_BEGINADDR:
+ /* base_addr is the start address to run */
+ ret = ath10k_bmi_set_start(ar, base_addr);
+ base_len = 0;
+ break;
+ case BMI_SGMTFILE_DONE:
+ /* no more segment */
+ base_len = 0;
+ sgmt_end = true;
+ ret = 0;
+ break;
+ case BMI_SGMTFILE_BDDATA:
+ case BMI_SGMTFILE_EXEC:
+ ath10k_warn(ar,
+ "firmware has unsupported segment:%d\n",
+ base_len);
+ ret = -EINVAL;
+ break;
+ default:
+ if (base_len > left) {
+ /* sanity check */
+ ath10k_warn(ar,
+ "firmware has invalid segment length, %d > %d\n",
+ base_len, left);
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = ath10k_hw_diag_segment_download(ar,
+ buf,
+ base_addr,
+ base_len);
+
+ if (ret)
+ ath10k_warn(ar,
+ "failed to download firmware via diag interface:%d\n",
+ ret);
+ break;
+ }
+
+ if (ret || sgmt_end)
+ break;
+
+ metadata = (struct bmi_segmented_metadata *)(buf + base_len);
+ left -= base_len;
+ }
+
+ if (ret == 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot firmware fast diag download successfully.\n");
+ return ret;
+}
+
const struct ath10k_hw_ops qca988x_ops = {
};
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/hw.h b/drivers/net/wireless/ar10k/ath/ath10k/hw.h
index a828e7519be654..68b582e3b00433 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/hw.h
@@ -137,6 +137,9 @@ enum qca9377_chip_id_rev {
#define REG_DUMP_COUNT_QCA988X 60
+#define SK_PACING_SHIFT_6174 6
+#define SK_PACING_SHIFT_9377 6
+
#define QCA988X_CAL_DATA_LEN 2116
struct ath10k_fw_ie {
@@ -266,6 +269,11 @@ extern const struct ath10k_hw_values qca4019_values;
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+int ath10k_hw_diag_fast_download(struct ath10k *ar,
+ u32 address,
+ const void *buffer,
+ u32 length);
+
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
@@ -504,6 +512,10 @@ struct ath10k_hw_clk_params {
#define TARGET_10_4_IPHDR_PAD_CONFIG 1
#define TARGET_10_4_QWRAP_CONFIG 0
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 12
+
+
struct htt_rx_desc;
/* Defines needed for Rx descriptor abstraction */
@@ -649,6 +661,7 @@ extern const struct ath10k_hw_clk_params qca6174_clk[];
#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
+#define FW_RAM_CONFIG_ADDRESS 0x0018
#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
@@ -769,4 +782,15 @@ extern const struct ath10k_hw_clk_params qca6174_clk[];
#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
/* qca6174 PLL offset/mask end */
+/* CPU_ADDR_MSB is a register, bit[3:0] is to specify which memory
+ * region is accessed. The memory region size is 1M.
+ * If host wants to access 0xX12345 at target, then CPU_ADDR_MSB[3:0]
+ * is 0xX.
+ * The following MACROs are defined to get the 0xX and the size limit.
+ */
+#define CPU_ADDR_MSB_REGION_MASK GENMASK(23, 20)
+#define CPU_ADDR_MSB_REGION_VAL(X) FIELD_GET(CPU_ADDR_MSB_REGION_MASK, X)
+#define REGION_ACCESS_SIZE_LIMIT 0x100000
+#define REGION_ACCESS_SIZE_MASK (REGION_ACCESS_SIZE_LIMIT - 1)
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/mac.c b/drivers/net/wireless/ar10k/ath/ath10k/mac.c
index 1eb43fe84e2dd1..266241735e3482 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/mac.c
@@ -5905,22 +5905,16 @@ static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
{
- struct ath10k *ar = hw->priv;
bool skip;
long time_left;
/* mac80211 doesn't care if we really xmit queued frames or not
* we'll collect those frames either way if we stop/delete vdevs */
- if (drop)
- return;
-
- mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_WEDGED)
- goto skip;
+ return;
time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
@@ -5939,8 +5933,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (time_left == 0 || skip)
ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
skip, ar->state, time_left);
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ u32 bitmap;
+
+ if (drop) {
+ if (vif && vif->type == NL80211_IFTYPE_STATION) {
+ bitmap = ~(1 << WMI_MGMT_TID);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath10k_wmi_peer_flush(ar, arvif->vdev_id,
+ arvif->bssid, bitmap);
+ }
+ }
+ return;
+ }
-skip:
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_wait_tx_complete(ar);
mutex_unlock(&ar->conf_mutex);
}
@@ -7371,6 +7386,9 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+ if (ar->hw_params.tx_sk_pacing_shift != 0)
+ ar->hw->tx_sk_pacing_shift = ar->hw_params.tx_sk_pacing_shift;
+
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
@@ -7500,13 +7518,6 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->cipher_suites = cipher_suites;
ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- /* ChromiumOS wpa_supplicant sends the background scan req. with
- * NL80211_SCAN_FLAG_LOW_PRIORITY enabled. ath10k scans using hw_scan
- * (firmware scan implementation. Advertise the firmware can handle the
- * scan requests and perform the scan w/o influencing the latencies.
- */
- ar->hw->wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN;
-
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/mac.h b/drivers/net/wireless/ar10k/ath/ath10k/mac.h
index 53091588090d0c..7ba5e99d8ee6cd 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/mac.h
@@ -76,6 +76,8 @@ void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_wait_tx_complete(struct ath10k *ar);
+
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
return (struct ath10k_vif *)vif->drv_priv;
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/pci.c b/drivers/net/wireless/ar10k/ath/ath10k/pci.c
index 60d10152b8099a..9cae82f697e634 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/pci.c
@@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
+#include "coredump.h"
#include "targaddrs.h"
#include "bmi.h"
@@ -57,6 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
+
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@@ -838,28 +844,50 @@ void ath10k_pci_rx_replenish_retry(unsigned long ptr)
ath10k_pci_rx_post(ar);
}
-static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
- u32 val = 0;
+ u32 val = 0, region = addr & 0xfffff;
- switch (ar->hw_rev) {
- case ATH10K_HW_QCA988X:
- case ATH10K_HW_QCA6174:
- case ATH10K_HW_QCA9377:
- val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
- CORE_CTRL_ADDRESS) &
- 0x7ff) << 21;
- break;
- case ATH10K_HW_QCA99X0:
- case ATH10K_HW_QCA4019:
- val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
- break;
- }
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
+
+/* Refactor from ath10k_pci_qca988x_targ_cpu_to_ce_addr.
+ * Support to access target space below 1M for qca6174 and qca9377.
+ * If target space is below 1M, the bit[20] of converted CE addr is 0.
+ * Otherwise bit[20] of converted CE addr is 1.
+ */
+static u32 ath10k_pci_qca6174_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
- val |= 0x100000 | (addr & 0xfffff);
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= ((addr >= 0x100000) ? 0x100000 : 0) | region;
return val;
}
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -ENOTSUPP;
+
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
+}
+
/*
* Diagnostic read/write access is provided for startup/config/debug usage.
* Caller must guarantee proper alignment, when applicable, and single user
@@ -870,10 +898,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -891,9 +917,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
@@ -901,29 +928,28 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
ret = -ENOMEM;
goto done;
}
- memset(data_buf, 0, orig_nbytes);
+ memset(data_buf, 0, alloc_nbytes);
- remaining_bytes = orig_nbytes;
+ /* The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+ remaining_bytes = nbytes;
ce_data = ce_data_base;
while (remaining_bytes) {
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
/* Request CE to send from Target(!) address to Host buffer */
- /*
- * The address supplied by the caller is in the
- * Target CPU virtual address space.
- *
- * In order to use this address with the diagnostic CE,
- * convert it from Target CPU virtual address space
- * to CE address space
- */
- address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
-
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
if (ret)
@@ -932,20 +958,24 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
- mdelay(1);
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
- mdelay(1);
-
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -956,25 +986,28 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != ce_data) {
+ if (*buf != ce_data) {
ret = -EIO;
goto done;
}
remaining_bytes -= nbytes;
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
+ address, ret);
+ break;
+ }
+ memcpy(data, data_buf, nbytes);
+
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
- if (ret == 0)
- memcpy(data, data_buf, orig_nbytes);
- else
- ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
- address, ret);
if (data_buf)
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
@@ -1026,13 +1059,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
- unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
- u32 ce_data; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0;
int i;
@@ -1046,9 +1076,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* 1) 4-byte alignment
* 2) Buffer in DMA-able space
*/
- orig_nbytes = nbytes;
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
- orig_nbytes,
+ alloc_nbytes,
&ce_data_base,
GFP_ATOMIC);
if (!data_buf) {
@@ -1056,9 +1087,6 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- /* Copy caller's data to allocated DMA buf */
- memcpy(data_buf, data, orig_nbytes);
-
/*
* The address supplied by the caller is in the
* Target CPU virtual address space.
@@ -1071,14 +1099,16 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
*/
address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
- remaining_bytes = orig_nbytes;
- ce_data = ce_data_base;
+ remaining_bytes = nbytes;
while (remaining_bytes) {
/* FIXME: check cast */
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1086,7 +1116,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
- ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
nbytes, 0, 0);
if (ret != 0)
goto done;
@@ -1094,21 +1124,24 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
- mdelay(1);
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
- mdelay(1);
-
- if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
+ udelay(DIAG_ACCESS_CE_WAIT_US);
+ i += DIAG_ACCESS_CE_WAIT_US;
+
+ if (i > DIAG_ACCESS_CE_TIMEOUT_US) {
ret = -EBUSY;
goto done;
}
@@ -1119,19 +1152,19 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- if (buf != address) {
+ if (*buf != address) {
ret = -EIO;
goto done;
}
remaining_bytes -= nbytes;
address += nbytes;
- ce_data += nbytes;
+ data += nbytes;
}
done:
if (data_buf) {
- dma_free_coherent(ar->dev, orig_nbytes, data_buf,
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
ce_data_base);
}
@@ -1181,15 +1214,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
- u32 ce_data;
unsigned int nbytes, max_nbytes;
- unsigned int transfer_id;
- unsigned int flags;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
- &ce_data, &nbytes, &transfer_id,
- &flags) == 0) {
+ &nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1395,6 +1424,218 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
crash_data->registers[i] = reg_dump_values[i];
}
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ if (mem_region->section_table.size < 0)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start addrress 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section != NULL; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS, config);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS);
+ if (val != config) {
+ ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+ val, config);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count, shift;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* To get IRAM dump, the host driver needs to switch target
+ * ram config from DRAM to IRAM.
+ */
+ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+ current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+ shift = current_region->start >> 20;
+
+ ret = ath10k_pci_set_ram_config(ar, shift);
+ if (ret) {
+ ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ if (current_region->section_table.size > 0) {
+ /* Copy each section individually. */
+ count = ath10k_pci_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+ } else {
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ ret = ath10k_pci_diag_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+
+ count = current_region->len;
+ }
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data;
@@ -1404,7 +1645,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ar->stats.fw_crash_counter++;
- crash_data = ath10k_debug_get_new_fw_crash_data(ar);
+ crash_data = ath10k_coredump_new(ar);
if (crash_data)
scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
@@ -1414,6 +1655,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
ath10k_print_driver_info(ar);
ath10k_pci_dump_registers(ar, crash_data);
+ ath10k_ce_dump_registers(ar, crash_data);
+ ath10k_pci_dump_memory(ar, crash_data);
spin_unlock_bh(&ar->data_lock);
@@ -1835,13 +2078,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
- u32 ce_data;
unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
- if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
- &nbytes, &transfer_id, &flags))
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
return;
if (WARN_ON_ONCE(!xfer))
@@ -1936,7 +2176,6 @@ int ath10k_pci_init_config(struct ath10k *ar)
u32 pcie_state_targ_addr = 0;
u32 pipe_cfg_targ_addr = 0;
u32 svc_to_pipe_map = 0;
- u32 pcie_config_flags = 0;
u32 ealloc_value;
u32 ealloc_targ_addr;
u32 flag2_value;
@@ -2009,26 +2248,6 @@ int ath10k_pci_init_config(struct ath10k *ar)
return ret;
}
- ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
- offsetof(struct pcie_state,
- config_flags)),
- &pcie_config_flags);
- if (ret != 0) {
- ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
- return ret;
- }
-
- pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
-
- ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
- offsetof(struct pcie_state,
- config_flags)),
- pcie_config_flags);
- if (ret != 0) {
- ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
- return ret;
- }
-
/* configure early allocation */
ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
@@ -3070,24 +3289,29 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
enum ath10k_hw_rev hw_rev;
u32 chip_id;
bool pci_ps;
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
switch (pci_dev->device) {
case QCA988X_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA988X;
pci_ps = false;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
break;
case QCA6164_2_1_DEVICE_ID:
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
pci_ps = true;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
case QCA99X0_2_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
+ targ_cpu_to_ce_addr = ath10k_pci_qca6174_targ_cpu_to_ce_addr;
break;
default:
WARN_ON(1);
@@ -3112,6 +3336,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar->dev_id = pci_dev->device;
ar_pci->pci_ps = pci_ps;
ar_pci->bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
ar->id.vendor = pdev->vendor;
ar->id.device = pdev->device;
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/pci.h b/drivers/net/wireless/ar10k/ath/ath10k/pci.h
index 249c73a6980088..d446d7aea1e2b6 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ar10k/ath/ath10k/pci.h
@@ -235,6 +235,11 @@ struct ath10k_pci {
const struct ath10k_bus_ops *bus_ops;
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
/* Keep this entry in the last, memory for struct ath10k_ahb is
* allocated (ahb support enabled case) in the continuation of
* this struct.
@@ -258,7 +263,8 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define CDC_WAR_DATA_CE 4
/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
-#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+#define DIAG_ACCESS_CE_TIMEOUT_US 10000 /* 10 ms */
+#define DIAG_ACCESS_CE_WAIT_US 50
void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ar10k/ath/ath10k/wmi-tlv.c
index 483450c6e906ea..ad2c5ba0980841 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/wmi-tlv.c
@@ -1322,8 +1322,8 @@ ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
cmd->regd = __cpu_to_le32(rd);
cmd->regd_2ghz = __cpu_to_le32(rd2g);
cmd->regd_5ghz = __cpu_to_le32(rd5g);
- cmd->conform_limit_2ghz = __cpu_to_le32(rd2g);
- cmd->conform_limit_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
return skb;
diff --git a/drivers/net/wireless/ar10k/ath/ath10k/wow.c b/drivers/net/wireless/ar10k/ath/ath10k/wow.c
index bff2d159b3096b..502904b58cba37 100644
--- a/drivers/net/wireless/ar10k/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ar10k/ath/ath10k/wow.c
@@ -374,6 +374,8 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
goto cleanup;
}
+ ath10k_mac_wait_tx_complete(ar);
+
ret = ath10k_wow_enable(ar);
if (ret) {
ath10k_warn(ar, "failed to start wow: %d\n", ret);
diff --git a/drivers/net/wireless/ar10k/hdrs/mac80211-exp.h b/drivers/net/wireless/ar10k/hdrs/mac80211-exp.h
index 9ed1f3d0900fbc..5f47b5417f8784 100644
--- a/drivers/net/wireless/ar10k/hdrs/mac80211-exp.h
+++ b/drivers/net/wireless/ar10k/hdrs/mac80211-exp.h
@@ -137,4 +137,5 @@
#define ieee80211_tdls_oper_request __ar10k_ieee80211_tdls_oper_request
#define ieee80211_iterate_interfaces __ar10k_ieee80211_iterate_interfaces
#define ieee80211_reserve_tid __ar10k_ieee80211_reserve_tid
+#define ieee80211_update_mu_groups __ar10k_ieee80211_update_mu_groups
#endif
diff --git a/drivers/net/wireless/ar10k/hdrs/mac80211.h b/drivers/net/wireless/ar10k/hdrs/mac80211.h
index 6ea3d215b2cff5..71ee967ef05722 100644
--- a/drivers/net/wireless/ar10k/hdrs/mac80211.h
+++ b/drivers/net/wireless/ar10k/hdrs/mac80211.h
@@ -2111,6 +2111,10 @@ enum ieee80211_hw_flags {
*
* @txq_ac_max_pending: maximum number of frames per AC pending in all txq
* entries for a vif.
+ *
+ * @tx_sk_pacing_shift: Pacing shift to set on TCP sockets when frames from
+ * them are encountered. The default should typically not be changed,
+ * unless the driver has good reasons for needing more buffers.
*/
struct ieee80211_hw {
struct ieee80211_conf conf;
@@ -2141,6 +2145,7 @@ struct ieee80211_hw {
u8 n_cipher_schemes;
const struct ieee80211_cipher_scheme *cipher_schemes;
int txq_ac_max_pending;
+ u8 tx_sk_pacing_shift;
};
static inline bool _ieee80211_hw_check(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ar10k/mac80211/main.c b/drivers/net/wireless/ar10k/mac80211/main.c
index 9c15b71174ee64..4620b1f85ab056 100644
--- a/drivers/net/wireless/ar10k/mac80211/main.c
+++ b/drivers/net/wireless/ar10k/mac80211/main.c
@@ -572,6 +572,18 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len,
local->ops = ops;
local->use_chanctx = use_chanctx;
+ /*
+ * We need a bit of data queued to build aggregates properly, so
+ * instruct the TCP stack to allow more than a single ms of data
+ * to be queued in the stack. The value is a bit-shift of 1
+ * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * sockets.
+ * This is the default, anyhow - drivers may need to override it
+ * for local reasons (longer buffers, longer completion time, or
+ * similar).
+ */
+ local->hw.tx_sk_pacing_shift = 8;
+
/* set up some defaults */
local->hw.queues = 1;
local->hw.max_rates = 1;
diff --git a/drivers/net/wireless/ar10k/mac80211/tx.c b/drivers/net/wireless/ar10k/mac80211/tx.c
index 7c5978c84998db..3c6b817abecd7a 100644
--- a/drivers/net/wireless/ar10k/mac80211/tx.c
+++ b/drivers/net/wireless/ar10k/mac80211/tx.c
@@ -2950,13 +2950,7 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
if (!IS_ERR_OR_NULL(sta)) {
struct ieee80211_fast_tx *fast_tx;
- /* We need a bit of data queued to build aggregates properly, so
- * instruct the TCP stack to allow more than a single ms of data
- * to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
- * sockets.
- */
- sk_pacing_shift_update(skb->sk, 8);
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
fast_tx = rcu_dereference(sta->fast_tx);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d26cb37b1fbd12..a65b5d7f59f442 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -212,11 +212,12 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
@@ -230,7 +231,9 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
@@ -1166,6 +1169,7 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = (void *)msdu->data - sizeof(*rxd);
hdr = (void *)rxd->rx_hdr_status;
@@ -1182,8 +1186,8 @@ static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- rfc1042 += round_up(hdr_len, 4) +
- round_up(crypto_len, 4);
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index fa49d2cb40bea4..9a0dd027a30139 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2901,6 +2901,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
passive = channel->flags & IEEE80211_CHAN_NO_IR;
ch->passive = passive;
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
@@ -4463,7 +4470,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
}
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
/* It makes no sense to have firmware do keepalives. mac80211 already
* takes care of this with idle connection polling.
@@ -4596,7 +4605,9 @@ err_peer_delete:
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
err:
if (arvif->beacon_buf) {
@@ -4640,7 +4651,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 71bdb368813de3..0194bebbdbf7e1 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
- int ret),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
- TP_ARGS(ar, id, buf, buf_len, ret),
+ TP_ARGS(ar, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
- __field(int, ret)
),
TP_fast_assign(
@@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
- __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "%s %s id %d len %zu ret %d",
+ "%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
- __entry->buf_len,
- __entry->ret
+ __entry->buf_len
)
);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 02eea3c3b5d30d..c27fff39ddae7d 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1424,6 +1424,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
cfg->keep_alive_pattern_size = __cpu_to_le32(0);
cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);
ath10k_wmi_put_host_mem_chunks(ar, chunks);
@@ -1454,10 +1459,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index ad655c44afdb60..f5031f3965c5fa 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1209,6 +1209,11 @@ struct wmi_tlv_resource_config {
__le32 keep_alive_pattern_size;
__le32 max_tdls_concurrent_sleep_sta;
__le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
} __packed;
struct wmi_tlv_init_cmd {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7569db0f69b5ee..b867875aa6e66c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1642,8 +1642,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
memset(skb_cb, 0, sizeof(*skb_cb));
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
if (ret)
goto err_pull;
@@ -1749,6 +1749,12 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
if (ret)
dev_kfree_skb_any(skb);
+ if (ret == -EAGAIN) {
+ ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
+ cmd_id);
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
return ret;
}
@@ -4059,7 +4065,7 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
rate_code[i],
type);
snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
- strncat(tpc_value, buff, strlen(buff));
+ strlcat(tpc_value, buff, sizeof(tpc_value));
}
tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd8fcb13a..184b6810cde990 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -68,12 +68,14 @@ enum CountryCode {
CTRY_AUSTRALIA = 36,
CTRY_AUSTRIA = 40,
CTRY_AZERBAIJAN = 31,
+ CTRY_BAHAMAS = 44,
CTRY_BAHRAIN = 48,
CTRY_BANGLADESH = 50,
CTRY_BARBADOS = 52,
CTRY_BELARUS = 112,
CTRY_BELGIUM = 56,
CTRY_BELIZE = 84,
+ CTRY_BERMUDA = 60,
CTRY_BOLIVIA = 68,
CTRY_BOSNIA_HERZ = 70,
CTRY_BRAZIL = 76,
@@ -159,6 +161,7 @@ enum CountryCode {
CTRY_ROMANIA = 642,
CTRY_RUSSIA = 643,
CTRY_SAUDI_ARABIA = 682,
+ CTRY_SERBIA = 688,
CTRY_SERBIA_MONTENEGRO = 891,
CTRY_SINGAPORE = 702,
CTRY_SLOVAKIA = 703,
@@ -170,11 +173,13 @@ enum CountryCode {
CTRY_SWITZERLAND = 756,
CTRY_SYRIA = 760,
CTRY_TAIWAN = 158,
+ CTRY_TANZANIA = 834,
CTRY_THAILAND = 764,
CTRY_TRINIDAD_Y_TOBAGO = 780,
CTRY_TUNISIA = 788,
CTRY_TURKEY = 792,
CTRY_UAE = 784,
+ CTRY_UGANDA = 800,
CTRY_UKRAINE = 804,
CTRY_UNITED_KINGDOM = 826,
CTRY_UNITED_STATES = 840,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index bdd2b4d61f2f0f..15bbd1e0d912f5 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -35,6 +35,7 @@ enum EnumRd {
FRANCE_RES = 0x31,
FCC3_FCCA = 0x3A,
FCC3_WORLD = 0x3B,
+ FCC3_ETSIC = 0x3F,
ETSI1_WORLD = 0x37,
ETSI3_ETSIA = 0x32,
@@ -44,6 +45,7 @@ enum EnumRd {
ETSI4_ETSIC = 0x38,
ETSI5_WORLD = 0x39,
ETSI6_WORLD = 0x34,
+ ETSI8_WORLD = 0x3D,
ETSI_RESERVED = 0x33,
MKK1_MKKA = 0x40,
@@ -59,6 +61,7 @@ enum EnumRd {
MKK1_MKKA1 = 0x4A,
MKK1_MKKA2 = 0x4B,
MKK1_MKKC = 0x4C,
+ APL2_FCCA = 0x4D,
APL3_FCCA = 0x50,
APL1_WORLD = 0x52,
@@ -67,6 +70,7 @@ enum EnumRd {
APL1_ETSIC = 0x55,
APL2_ETSIC = 0x56,
APL5_WORLD = 0x58,
+ APL13_WORLD = 0x5A,
APL6_WORLD = 0x5B,
APL7_FCCA = 0x5C,
APL8_WORLD = 0x5D,
@@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{FCC2_ETSIC, CTL_FCC, CTL_ETSI},
{FCC3_FCCA, CTL_FCC, CTL_FCC},
{FCC3_WORLD, CTL_FCC, CTL_ETSI},
+ {FCC3_ETSIC, CTL_FCC, CTL_ETSI},
{FCC4_FCCA, CTL_FCC, CTL_FCC},
{FCC5_FCCA, CTL_FCC, CTL_FCC},
{FCC6_FCCA, CTL_FCC, CTL_FCC},
@@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{ETSI4_WORLD, CTL_ETSI, CTL_ETSI},
{ETSI5_WORLD, CTL_ETSI, CTL_ETSI},
{ETSI6_WORLD, CTL_ETSI, CTL_ETSI},
+ {ETSI8_WORLD, CTL_ETSI, CTL_ETSI},
/* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */
{ETSI3_ETSIA, CTL_ETSI, CTL_ETSI},
@@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = {
{FCC1_FCCA, CTL_FCC, CTL_FCC},
{APL1_WORLD, CTL_FCC, CTL_ETSI},
{APL2_WORLD, CTL_FCC, CTL_ETSI},
+ {APL2_FCCA, CTL_FCC, CTL_FCC},
{APL3_WORLD, CTL_FCC, CTL_ETSI},
{APL4_WORLD, CTL_FCC, CTL_ETSI},
{APL5_WORLD, CTL_FCC, CTL_ETSI},
+ {APL13_WORLD, CTL_ETSI, CTL_ETSI},
{APL6_WORLD, CTL_ETSI, CTL_ETSI},
{APL8_WORLD, CTL_ETSI, CTL_ETSI},
{APL9_WORLD, CTL_ETSI, CTL_ETSI},
@@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
{CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
{CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
+ {CTRY_BAHAMAS, FCC3_WORLD, "BS"},
{CTRY_BAHRAIN, APL6_WORLD, "BH"},
{CTRY_BANGLADESH, NULL1_WORLD, "BD"},
{CTRY_BARBADOS, FCC2_WORLD, "BB"},
@@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_BELGIUM, ETSI1_WORLD, "BE"},
{CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
{CTRY_BELIZE, APL1_ETSIC, "BZ"},
+ {CTRY_BERMUDA, FCC3_FCCA, "BM"},
{CTRY_BOLIVIA, APL1_ETSIC, "BO"},
{CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"},
{CTRY_BRAZIL, FCC3_WORLD, "BR"},
@@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_ROMANIA, NULL1_WORLD, "RO"},
{CTRY_RUSSIA, NULL1_WORLD, "RU"},
{CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
+ {CTRY_SERBIA, ETSI1_WORLD, "RS"},
{CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"},
{CTRY_SINGAPORE, APL6_WORLD, "SG"},
{CTRY_SLOVAKIA, ETSI1_WORLD, "SK"},
@@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = {
{CTRY_SWITZERLAND, ETSI1_WORLD, "CH"},
{CTRY_SYRIA, NULL1_WORLD, "SY"},
{CTRY_TAIWAN, APL3_FCCA, "TW"},
+ {CTRY_TANZANIA, APL1_WORLD, "TZ"},
{CTRY_THAILAND, FCC3_WORLD, "TH"},
{CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
{CTRY_TUNISIA, ETSI3_WORLD, "TN"},
{CTRY_TURKEY, ETSI3_WORLD, "TR"},
+ {CTRY_UGANDA, FCC3_WORLD, "UG"},
{CTRY_UKRAINE, NULL1_WORLD, "UA"},
{CTRY_UAE, NULL1_WORLD, "AE"},
{CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"},
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 3691f41505d5f7..14d5f779f61d72 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1295,8 +1295,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index ec2b9c577b909b..3644c9edaf81c8 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -616,7 +616,7 @@ struct b43_c32 b43_cordic(int theta)
u8 i;
s32 tmp;
s8 signx = 1;
- u32 angle = 0;
+ s32 angle = 0;
struct b43_c32 ret = { .i = 39797, .q = 0, };
while (theta > (180 << 16))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 59cef6c69fe86a..72e1796c816704 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -705,7 +705,7 @@ done:
int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq, uint totlen)
{
- struct sk_buff *glom_skb;
+ struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
u32 addr = sdiodev->sbwad;
int err = 0;
@@ -726,10 +726,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
return -ENOMEM;
err = brcmf_sdiod_buffrw(sdiodev, SDIO_FUNC_2, false, addr,
glom_skb);
- if (err) {
- brcmu_pkt_buf_free_skb(glom_skb);
+ if (err)
goto done;
- }
skb_queue_walk(pktq, skb) {
memcpy(skb->data, glom_skb->data, skb->len);
@@ -740,6 +738,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
pktq);
done:
+ brcmu_pkt_buf_free_skb(glom_skb);
return err;
}
@@ -1109,6 +1108,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 09d043f2d5a8f7..3c02f6fa7638c0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4288,6 +4288,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
if (bus) {
+ /* Stop watchdog task */
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 984cd2f05c4ad9..7b2a7d848a56f5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -322,8 +322,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
goto out;
}
- if (changed)
- *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE);
+ if (changed) {
+ u32 status = le32_to_cpu(resp->status);
+
+ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+ status == MCC_RESP_ILLEGAL);
+ }
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
@@ -4050,10 +4054,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
- return;
-
/* if beacon filtering isn't on mac80211 does it anyway */
if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 2ee0f6fe56a1f5..5509c502435285 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -667,9 +667,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
n_channels = __le32_to_cpu(mcc_resp->n_channels);
IWL_DEBUG_LAR(mvm,
- "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n",
- status, mcc, mcc >> 8, mcc & 0xff,
- !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels);
+ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+ status, mcc, mcc >> 8, mcc & 0xff, n_channels);
resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32);
resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index e06591f625c4a9..d6f9858ff2de45 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -713,6 +713,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
WQ_HIGHPRI | WQ_UNBOUND, 1);
INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
+ cancel_work_sync(&rba->rx_alloc);
+
spin_lock(&rba->lock);
atomic_set(&rba->req_pending, 0);
atomic_set(&rba->req_ready, 0);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index e8b770a95f7ae8..0f582117b0e3d9 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2453,9 +2453,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
sband->vht_cap.vht_mcs.rx_mcs_map =
@@ -2518,6 +2515,10 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
if (param->no_vif)
ieee80211_hw_set(hw, NO_AUTO_VIF);
+ tasklet_hrtimer_init(&data->beacon_timer,
+ mac80211_hwsim_beacon,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+
err = ieee80211_register_hw(hw);
if (err < 0) {
printk(KERN_DEBUG "mac80211_hwsim: ieee80211_register_hw failed (%d)\n",
@@ -2542,16 +2543,11 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
data->debugfs,
data, &hwsim_simulate_radar);
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
spin_unlock_bh(&hwsim_radio_lock);
- if (idx > 0)
- hwsim_mcast_new_radio(idx, info, param);
+ hwsim_mcast_new_radio(idx, info, param);
return idx;
@@ -3006,7 +3002,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
goto out_err;
}
- genlmsg_reply(skb, info);
+ res = genlmsg_reply(skb, info);
break;
}
@@ -3199,16 +3195,16 @@ static int __init init_mac80211_hwsim(void)
if (err)
return err;
+ err = hwsim_init_netlink();
+ if (err)
+ goto out_unregister_driver;
+
hwsim_class = class_create(THIS_MODULE, "mac80211_hwsim");
if (IS_ERR(hwsim_class)) {
err = PTR_ERR(hwsim_class);
- goto out_unregister_driver;
+ goto out_exit_netlink;
}
- err = hwsim_init_netlink();
- if (err < 0)
- goto out_unregister_driver;
-
for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 };
@@ -3314,6 +3310,8 @@ out_free_mon:
free_netdev(hwsim_mon);
out_free_radios:
mac80211_hwsim_free();
+out_exit_netlink:
+ hwsim_exit_netlink();
out_unregister_driver:
platform_driver_unregister(&mac80211_hwsim_driver);
return err;
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index d0c881dd584677..2229fb4481895d 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -1108,15 +1108,15 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
* Ethtool related
*/
-static const char * const mesh_stat_strings[] = {
- "drop_duplicate_bcast",
- "drop_ttl_zero",
- "drop_no_fwd_route",
- "drop_no_buffers",
- "fwded_unicast_cnt",
- "fwded_bcast_cnt",
- "drop_blind_table",
- "tx_failed_cnt"
+static const char mesh_stat_strings[MESH_STATS_NUM][ETH_GSTRING_LEN] = {
+ "drop_duplicate_bcast",
+ "drop_ttl_zero",
+ "drop_no_fwd_route",
+ "drop_no_buffers",
+ "fwded_unicast_cnt",
+ "fwded_bcast_cnt",
+ "drop_blind_table",
+ "tx_failed_cnt"
};
void lbs_mesh_ethtool_get_stats(struct net_device *dev,
@@ -1170,17 +1170,11 @@ int lbs_mesh_ethtool_get_sset_count(struct net_device *dev, int sset)
void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s)
{
- int i;
-
lbs_deb_enter(LBS_DEB_ETHTOOL);
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < MESH_STATS_NUM; i++) {
- memcpy(s + i * ETH_GSTRING_LEN,
- mesh_stat_strings[i],
- ETH_GSTRING_LEN);
- }
+ memcpy(s, mesh_stat_strings, sizeof(mesh_stat_strings));
break;
}
lbs_deb_enter(LBS_DEB_ETHTOOL);
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index ec8fedb11978e0..4b0e835c3de584 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1247,27 +1247,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
}
static void
-mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
+mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 rateinfo, u8 htinfo,
struct rate_info *rate)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (adapter->is_hw_11ac_capable) {
/* bit[1-0]: 00=LG 01=HT 10=VHT */
- if (tx_htinfo & BIT(0)) {
+ if (htinfo & BIT(0)) {
/* HT */
- rate->mcs = priv->tx_rate;
+ rate->mcs = rateinfo;
rate->flags |= RATE_INFO_FLAGS_MCS;
}
- if (tx_htinfo & BIT(1)) {
+ if (htinfo & BIT(1)) {
/* VHT */
- rate->mcs = priv->tx_rate & 0x0F;
+ rate->mcs = rateinfo & 0x0F;
rate->flags |= RATE_INFO_FLAGS_VHT_MCS;
}
- if (tx_htinfo & (BIT(1) | BIT(0))) {
+ if (htinfo & (BIT(1) | BIT(0))) {
/* HT or VHT */
- switch (tx_htinfo & (BIT(3) | BIT(2))) {
+ switch (htinfo & (BIT(3) | BIT(2))) {
case 0:
rate->bw = RATE_INFO_BW_20;
break;
@@ -1282,29 +1282,51 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo,
break;
}
- if (tx_htinfo & BIT(4))
+ if (htinfo & BIT(4))
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- if ((priv->tx_rate >> 4) == 1)
+ if ((rateinfo >> 4) == 1)
rate->nss = 2;
else
rate->nss = 1;
}
} else {
/*
- * Bit 0 in tx_htinfo indicates that current Tx rate
- * is 11n rate. Valid MCS index values for us are 0 to 15.
+ * Bit 0 in htinfo indicates that current rate is 11n. Valid
+ * MCS index values for us are 0 to 15.
*/
- if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
- rate->mcs = priv->tx_rate;
+ if ((htinfo & BIT(0)) && (rateinfo < 16)) {
+ rate->mcs = rateinfo;
rate->flags |= RATE_INFO_FLAGS_MCS;
rate->bw = RATE_INFO_BW_20;
- if (tx_htinfo & BIT(1))
+ if (htinfo & BIT(1))
rate->bw = RATE_INFO_BW_40;
- if (tx_htinfo & BIT(2))
+ if (htinfo & BIT(2))
rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
}
}
+
+ /* Decode legacy rates for non-HT. */
+ if (!(htinfo & (BIT(0) | BIT(1)))) {
+ /* Bitrates in multiples of 100kb/s. */
+ static const int legacy_rates[] = {
+ [0] = 10,
+ [1] = 20,
+ [2] = 55,
+ [3] = 110,
+ [4] = 60, /* MWIFIEX_RATE_INDEX_OFDM0 */
+ [5] = 60,
+ [6] = 90,
+ [7] = 120,
+ [8] = 180,
+ [9] = 240,
+ [10] = 360,
+ [11] = 480,
+ [12] = 540,
+ };
+ if (rateinfo < ARRAY_SIZE(legacy_rates))
+ rate->legacy = legacy_rates[rateinfo];
+ }
}
/*
@@ -1347,7 +1369,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
sinfo->tx_packets = node->stats.tx_packets;
sinfo->tx_failed = node->stats.tx_failed;
- mwifiex_parse_htinfo(priv, node->stats.last_tx_htinfo,
+ mwifiex_parse_htinfo(priv, priv->tx_rate,
+ node->stats.last_tx_htinfo,
&sinfo->txrate);
sinfo->txrate.legacy = node->stats.last_tx_rate * 5;
@@ -1373,7 +1396,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
HostCmd_ACT_GEN_GET, DTIM_PERIOD_I,
&priv->dtim_period, true);
- mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate);
+ mwifiex_parse_htinfo(priv, priv->tx_rate, priv->tx_htinfo,
+ &sinfo->txrate);
sinfo->signal_avg = priv->bcn_rssi_avg;
sinfo->rx_bytes = priv->stats.rx_bytes;
@@ -1384,6 +1408,10 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
/* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
sinfo->txrate.legacy = rate * 5;
+ sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE);
+ mwifiex_parse_htinfo(priv, priv->rxpd_rate, priv->rxpd_htinfo,
+ &sinfo->rxrate);
+
if (priv->bss_mode == NL80211_IFTYPE_STATION) {
sinfo->filled |= BIT(NL80211_STA_INFO_BSS_PARAM);
sinfo->bss_param.flags = 0;
@@ -4332,11 +4360,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
wiphy->max_remain_on_channel_duration = 5000;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+
mwifiex_register_cfg80211_vendor_command(wiphy);
wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
@@ -4395,11 +4425,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
- wiphy->features |= NL80211_FEATURE_HT_IBSS |
- NL80211_FEATURE_INACTIVITY_TIMER |
+ wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER |
NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
+ if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
+ wiphy->features |= NL80211_FEATURE_HT_IBSS;
+
if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c
index 91148652426445..69576fea1501de 100644
--- a/drivers/net/wireless/marvell/mwifiex/debugfs.c
+++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c
@@ -296,15 +296,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
"total samples = %d\n",
atomic_read(&phist_data->num_samples));
- p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M");
- p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n");
- p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M");
- p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
+ p += sprintf(p,
+ "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n"
+ "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n");
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) {
- p += sprintf(p, "44-53=MCS0-9(VHT:BW20)");
- p += sprintf(p, "54-63=MCS0-9(VHT:BW40)");
- p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n");
+ p += sprintf(p,
+ "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n");
} else {
p += sprintf(p, "\n");
}
@@ -333,7 +331,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf,
for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) {
value = atomic_read(&phist_data->noise_flr[i]);
if (value)
- p += sprintf(p, "noise_flr[-%02ddBm] = %d\n",
+ p += sprintf(p, "noise_flr[%02ddBm] = %d\n",
(int)(i-128), value);
}
for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index 05a19565e6cfad..1ebf5c9b5f8f8a 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -811,8 +811,8 @@ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode)
* is checked to determine WPA version. If buffer length is zero, the existing
* WPA IE is reset.
*/
-static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv,
- u8 *ie_data_ptr, u16 ie_len)
+static int mwifiex_set_wpa_ie(struct mwifiex_private *priv,
+ u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
if (ie_len > sizeof(priv->wpa_ie)) {
@@ -1351,101 +1351,96 @@ static int
mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
u16 ie_len)
{
- int ret = 0;
struct ieee_types_vendor_header *pvendor_ie;
const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 };
const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 };
- u16 unparsed_len = ie_len;
- int find_wpa_ie = 0;
+ u16 unparsed_len = ie_len, cur_ie_len;
/* If the passed length is zero, reset the buffer */
if (!ie_len) {
priv->gen_ie_buf_len = 0;
priv->wps.session_enable = false;
-
return 0;
- } else if (!ie_data_ptr) {
+ } else if (!ie_data_ptr ||
+ ie_len <= sizeof(struct ieee_types_header)) {
return -1;
}
pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr;
while (pvendor_ie) {
+ cur_ie_len = pvendor_ie->len + sizeof(struct ieee_types_header);
+
+ if (pvendor_ie->element_id == WLAN_EID_RSN) {
+ /* IE is a WPA/WPA2 IE so call set_wpa function */
+ mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie, cur_ie_len);
+ priv->wps.session_enable = false;
+ goto next_ie;
+ }
+
+ if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
+ /* IE is a WAPI IE so call set_wapi function */
+ mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie,
+ cur_ie_len);
+ goto next_ie;
+ }
+
if (pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) {
- /* Test to see if it is a WPA IE, if not, then it is a
- * gen IE
+ /* Test to see if it is a WPA IE, if not, then
+ * it is a gen IE
*/
if (!memcmp(pvendor_ie->oui, wpa_oui,
sizeof(wpa_oui))) {
- find_wpa_ie = 1;
- break;
+ /* IE is a WPA/WPA2 IE so call set_wpa function
+ */
+ mwifiex_set_wpa_ie(priv, (u8 *)pvendor_ie,
+ cur_ie_len);
+ priv->wps.session_enable = false;
+ goto next_ie;
}
- /* Test to see if it is a WPS IE, if so, enable
- * wps session flag
- */
if (!memcmp(pvendor_ie->oui, wps_oui,
sizeof(wps_oui))) {
+ /* Test to see if it is a WPS IE,
+ * if so, enable wps session flag
+ */
priv->wps.session_enable = true;
mwifiex_dbg(priv->adapter, MSG,
- "info: WPS Session Enabled.\n");
- ret = mwifiex_set_wps_ie(priv,
- (u8 *)pvendor_ie,
- unparsed_len);
+ "WPS Session Enabled.\n");
+ mwifiex_set_wps_ie(priv, (u8 *)pvendor_ie,
+ cur_ie_len);
+ goto next_ie;
}
}
- if (pvendor_ie->element_id == WLAN_EID_RSN) {
- find_wpa_ie = 1;
- break;
- }
+ /* Saved in gen_ie, such as P2P IE.etc.*/
- if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) {
- /* IE is a WAPI IE so call set_wapi function */
- ret = mwifiex_set_wapi_ie(priv, (u8 *)pvendor_ie,
- unparsed_len);
- return ret;
+ /* Verify that the passed length is not larger than the
+ * available space remaining in the buffer
+ */
+ if (cur_ie_len <
+ (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
+ /* Append the passed data to the end
+ * of the genIeBuffer
+ */
+ memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len,
+ (u8 *)pvendor_ie, cur_ie_len);
+ /* Increment the stored buffer length by the
+ * size passed
+ */
+ priv->gen_ie_buf_len += cur_ie_len;
}
- unparsed_len -= (pvendor_ie->len +
- sizeof(struct ieee_types_header));
+next_ie:
+ unparsed_len -= cur_ie_len;
if (unparsed_len <= sizeof(struct ieee_types_header))
pvendor_ie = NULL;
else
pvendor_ie = (struct ieee_types_vendor_header *)
- (((u8 *)pvendor_ie) + pvendor_ie->len +
- sizeof(struct ieee_types_header));
- }
-
- if (find_wpa_ie) {
- /* IE is a WPA/WPA2 IE so call set_wpa function */
- ret = mwifiex_set_wpa_ie_helper(priv, (u8 *)pvendor_ie,
- unparsed_len);
- priv->wps.session_enable = false;
- return ret;
+ (((u8 *)pvendor_ie) + cur_ie_len);
}
- /*
- * Verify that the passed length is not larger than the
- * available space remaining in the buffer
- */
- if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) {
-
- /* Append the passed data to the end of the
- genIeBuffer */
- memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr,
- ie_len);
- /* Increment the stored buffer length by the
- size passed */
- priv->gen_ie_buf_len += ie_len;
- } else {
- /* Passed data does not fit in the remaining
- buffer space */
- ret = -1;
- }
-
- /* Return 0, or -1 for error case */
- return ret;
+ return 0;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 00fcbda09349e7..fb28a5c7f4416b 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -152,14 +152,17 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
}
- priv->rxpd_rate = local_rx_pd->rx_rate;
-
- priv->rxpd_htinfo = local_rx_pd->ht_info;
+ /* Only stash RX bitrate for unicast packets. */
+ if (likely(!is_multicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest))) {
+ priv->rxpd_rate = local_rx_pd->rx_rate;
+ priv->rxpd_htinfo = local_rx_pd->ht_info;
+ }
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
- adj_rx_rate = mwifiex_adjust_data_rate(priv, priv->rxpd_rate,
- priv->rxpd_htinfo);
+ adj_rx_rate = mwifiex_adjust_data_rate(priv,
+ local_rx_pd->rx_rate,
+ local_rx_pd->ht_info);
mwifiex_hist_data_add(priv, adj_rx_rate, local_rx_pd->snr,
local_rx_pd->nf);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 9cf3334adf4d56..50e6c1d80fc801 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -625,6 +625,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
MWIFIEX_FUNC_SHUTDOWN);
}
+ if (adapter->workqueue)
+ flush_workqueue(adapter->workqueue);
+
mwifiex_usb_free(card);
mwifiex_dbg(adapter, FATAL,
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index 0cd68ffc2c74dc..51ccf10f44132e 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr,
s8 nflr)
{
struct mwifiex_histogram_data *phist_data = priv->hist_data;
+ s8 nf = -nflr;
+ s8 rssi = snr - nflr;
atomic_inc(&phist_data->num_samples);
atomic_inc(&phist_data->rx_rate[rx_rate]);
- atomic_inc(&phist_data->snr[snr]);
- atomic_inc(&phist_data->noise_flr[128 + nflr]);
- atomic_inc(&phist_data->sig_str[nflr - snr]);
+ atomic_inc(&phist_data->snr[snr + 128]);
+ atomic_inc(&phist_data->noise_flr[nf + 128]);
+ atomic_inc(&phist_data->sig_str[rssi + 128]);
}
/* function to reset histogram data during init/reset */
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 8b537a5a4b010a..8006f0972ad1d8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -135,7 +135,6 @@ found_alt:
firmware->size);
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
- rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index d60151a1a21ce8..c5f23d59655a0a 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2921,6 +2921,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8428858204a676..fc895b466ebb18 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -155,7 +155,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
int err;
struct mmc_card *card = pfunction->card;
struct mmc_host *host = card->host;
- s32 bit = (fls(host->ocr_avail) - 1);
u8 cmd52_resp;
u32 clock, resp, i;
u16 rca;
@@ -175,7 +174,6 @@ static void rsi_reset_card(struct sdio_func *pfunction)
msleep(20);
/* Initialize the SDIO card */
- host->ios.vdd = bit;
host->ios.chip_select = MMC_CS_DONTCARE;
host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
host->ios.power_mode = MMC_POWER_UP;
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index bff81b8d4164db..9f1037e7e55c91 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
if (!frame.skb)
@@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->ie_len)
memcpy(skb_put(frame.skb, req->ie_len), req->ie, req->ie_len);
- /* will be unlocked in cw1200_scan_work() */
- down(&priv->scan.lock);
- mutex_lock(&priv->conf_mutex);
-
ret = wsm_set_template_frame(priv, &frame);
if (!ret) {
/* Host want to be the probe responder. */
ret = wsm_set_probe_responder(priv, true);
}
if (ret) {
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
up(&priv->scan.lock);
- dev_kfree_skb(frame.skb);
return ret;
}
@@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- mutex_unlock(&priv->conf_mutex);
-
if (frame.skb)
dev_kfree_skb(frame.skb);
+ mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
}
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 3dd46c78c1cc84..5a595f9f47ff45 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -1805,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size)
{
size_t pos = buf->data - buf->begin;
size_t size = pos + extra_size;
+ u8 *tmp;
size = round_up(size, FWLOAD_BLOCK_SIZE);
- buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
- if (buf->begin) {
- buf->data = &buf->begin[pos];
- buf->end = &buf->begin[size];
- return 0;
- } else {
- buf->end = buf->data = buf->begin;
+ tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA);
+ if (!tmp) {
+ wsm_buf_deinit(buf);
return -ENOMEM;
}
+
+ buf->begin = tmp;
+ buf->data = &buf->begin[pos];
+ buf->end = &buf->begin[size];
+ return 0;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index c172da56b550b8..e4a8280cea8324 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -388,6 +388,11 @@ static int wl1271_suspend(struct device *dev)
mmc_pm_flag_t sdio_flags;
int ret = 0;
+ if (!wl) {
+ dev_err(dev, "no wilink module was probed\n");
+ goto out;
+ }
+
dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
wl->wow_enabled);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 02db20b2674949..d324ac308e6d62 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1538,11 +1538,6 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1611,6 +1606,8 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1619,6 +1616,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index a0de2453fa09e5..6f55ab4f795942 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -86,7 +86,7 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
-static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+static DECLARE_WAIT_QUEUE_HEAD(module_wq);
struct netfront_stats {
u64 packets;
@@ -238,7 +238,7 @@ static void rx_refill_timeout(unsigned long data)
static int netfront_tx_slot_available(struct netfront_queue *queue)
{
return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
- (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
+ (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
}
static void xennet_maybe_wake_tx(struct netfront_queue *queue)
@@ -775,7 +775,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
grant_ref_t ref = xennet_get_rx_ref(queue, cons);
- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
+ int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
int slots = 1;
int err = 0;
unsigned long ret;
@@ -878,7 +878,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@@ -887,15 +886,20 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
- if (shinfo->nr_frags == MAX_SKB_FRAGS) {
+ if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- BUG_ON(pull_to <= skb_headlen(skb));
+ BUG_ON(pull_to < skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons;
+ kfree_skb(nskb);
+ return ~0U;
+ }
- skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0;
@@ -1029,6 +1033,8 @@ err:
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
+ if (unlikely(i == ~0U))
+ goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1335,6 +1341,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
+ wait_event(module_wq,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown);
return netdev;
exit:
@@ -1602,6 +1613,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
{
unsigned short i;
int err = 0;
+ char *devid;
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
@@ -1609,8 +1621,9 @@ static int xennet_init_queue(struct netfront_queue *queue)
setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
(unsigned long)queue);
- snprintf(queue->name, sizeof(queue->name), "%s-q%u",
- queue->info->netdev->name, queue->id);
+ devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
+ snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
+ devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */
queue->tx_skb_freelist = 0;
@@ -2017,15 +2030,14 @@ static void netback_changed(struct xenbus_device *dev,
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
+ wake_up_all(&module_wq);
+
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- break;
-
case XenbusStateUnknown:
- wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@@ -2041,12 +2053,10 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
- wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
- wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@@ -2154,14 +2164,14 @@ static int xennet_remove(struct xenbus_device *dev)
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
xenbus_switch_state(dev, XenbusStateClosing);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
- wait_event(module_unload_q,
+ wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 6c0c301611c482..1b11ded79c4f8f 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
struct device_node *matched_node;
int ret;
- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
+ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
if (!matched_node) {
- matched_node = of_find_compatible_node(node, NULL,
- "mrvl,nfc-uart");
+ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
if (!matched_node)
return -ENODEV;
}
diff --git a/drivers/nfc/nxp-nci/firmware.c b/drivers/nfc/nxp-nci/firmware.c
index 5291797324bae0..553011f583395f 100644
--- a/drivers/nfc/nxp-nci/firmware.c
+++ b/drivers/nfc/nxp-nci/firmware.c
@@ -24,7 +24,7 @@
#include <linux/completion.h>
#include <linux/firmware.h>
#include <linux/nfc.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include "nxp-nci.h"
diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c
index df4333c7ee0f81..0b1122cb5d0c8a 100644
--- a/drivers/nfc/nxp-nci/i2c.c
+++ b/drivers/nfc/nxp-nci/i2c.c
@@ -36,7 +36,7 @@
#include <linux/of_gpio.h>
#include <linux/of_irq.h>
#include <linux/platform_data/nxp-nci.h>
-#include <linux/unaligned/access_ok.h>
+#include <asm/unaligned.h>
#include <net/nfc/nfc.h>
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 254b0ee3703952..273c7ecf48790d 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -158,6 +158,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
put_device(dev);
}
put_device(dev);
+ if (dev->parent)
+ put_device(dev->parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
@@ -175,6 +177,8 @@ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
void __nd_device_register(struct device *dev)
{
dev->bus = &nvdimm_bus_type;
+ if (dev->parent)
+ get_device(dev->parent);
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
&nd_async_domain);
@@ -237,14 +241,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
{
struct device *dev = disk->driverfs_dev;
struct nd_region *nd_region = to_nd_region(dev->parent);
- const char *pol = nd_region->ro ? "only" : "write";
+ int disk_ro = get_disk_ro(disk);
- if (nd_region->ro == get_disk_ro(disk))
+ /*
+ * Upgrade to read-only if the region is read-only preserve as
+ * read-only if the disk is already read-only.
+ */
+ if (disk_ro || nd_region->ro == disk_ro)
return 0;
- dev_info(dev, "%s read-%s, marking %s read-%s\n",
- dev_name(&nd_region->dev), pol, disk->disk_name, pol);
- set_disk_ro(disk, nd_region->ro);
+ dev_info(dev, "%s read-only, marking %s read-only\n",
+ dev_name(&nd_region->dev), disk->disk_name);
+ set_disk_ro(disk, 1);
return 0;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a43a7b854eb14d..cdf0facf044d28 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -82,15 +82,31 @@ struct nvme_dev {
u8 vwc;
u8 npss;
u8 apsta;
+ /* shadow doorbell buffer support: */
u32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
u32 *dbbuf_eis;
dma_addr_t dbbuf_eis_dma_addr;
+ /* host memory buffer support: */
+ u64 host_mem_size;
+ u32 nr_host_mem_descs;
+ dma_addr_t host_mem_descs_dma;
+ struct nvme_host_mem_buf_desc *host_mem_descs;
+
+ void **host_mem_desc_bufs;
+ struct dma_attrs host_mem_dma_attrs;
+
struct nvme_id_power_state psd[32];
/* Power saving configuration */
u64 ps_max_latency_us;
+
+ /* PCIe only: */
+ u32 hmpre;
+ u32 hmmin;
+ u32 hmminds;
+ u16 hmmaxd;
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8bf4b05752a537..320db0d3669b7c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -89,6 +89,12 @@ MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+
+static unsigned int max_host_mem_size_mb = 128;
+module_param(max_host_mem_size_mb, uint, 0444);
+MODULE_PARM_DESC(max_host_mem_size_mb,
+ "Maximum Host Memory Buffer (HMB) size per controller (in MiB)");
+
static struct workqueue_struct *nvme_workq;
static wait_queue_head_t nvme_kthread_wait;
@@ -1715,11 +1721,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
+ nvme_init_queue(nvmeq, qid);
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- nvme_init_queue(nvmeq, qid);
return result;
release_sq:
@@ -2058,6 +2064,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
goto free_nvmeq;
nvmeq->cq_vector = 0;
+ nvme_init_queue(nvmeq, 0);
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result) {
nvmeq->cq_vector = -1;
@@ -2701,6 +2708,186 @@ static inline void nvme_release_cmb(struct nvme_dev *dev)
}
}
+static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
+{
+ u64 dma_addr = dev->host_mem_descs_dma;
+ struct nvme_command c;
+ int ret;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF);
+ c.features.dword11 = cpu_to_le32(bits);
+ c.features.dword12 = cpu_to_le32(dev->host_mem_size >>
+ ilog2(dev->page_size));
+ c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr));
+ c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr));
+ c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs);
+
+ ret = nvme_submit_sync_cmd(dev->admin_q, &c, NULL, 0);
+ if (ret) {
+ dev_warn(dev->device,
+ "failed to set host mem (err %d, flags %#x).\n",
+ ret, bits);
+ }
+ return ret;
+}
+
+static void nvme_free_host_mem(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->nr_host_mem_descs; i++) {
+ struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
+ size_t size = le32_to_cpu(desc->size) * dev->page_size;
+
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+ le64_to_cpu(desc->addr),
+ &dev->host_mem_dma_attrs);
+ }
+
+ kfree(dev->host_mem_desc_bufs);
+ dev->host_mem_desc_bufs = NULL;
+ dma_free_coherent(dev->dev,
+ dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
+ dev->host_mem_descs, dev->host_mem_descs_dma);
+ dev->host_mem_descs = NULL;
+}
+
+static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
+ u32 chunk_size)
+{
+ struct nvme_host_mem_buf_desc *descs;
+ u32 max_entries, len;
+ dma_addr_t descs_dma;
+ int i = 0;
+ void **bufs;
+ u64 size = 0, tmp;
+
+ tmp = (preferred + chunk_size - 1);
+ do_div(tmp, chunk_size);
+ max_entries = tmp;
+
+ if (dev->hmmaxd && dev->hmmaxd < max_entries)
+ max_entries = dev->hmmaxd;
+
+ descs = dma_zalloc_coherent(dev->dev, max_entries * sizeof(*descs),
+ &descs_dma, GFP_KERNEL);
+ if (!descs)
+ goto out;
+
+ bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL);
+ if (!bufs)
+ goto out_free_descs;
+
+ init_dma_attrs(&dev->host_mem_dma_attrs);
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &dev->host_mem_dma_attrs);
+
+ for (size = 0; size < preferred && i < max_entries; size += len) {
+ dma_addr_t dma_addr;
+
+ len = min_t(u64, chunk_size, preferred - size);
+ bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL,
+ &dev->host_mem_dma_attrs);
+ if (!bufs[i])
+ break;
+
+ descs[i].addr = cpu_to_le64(dma_addr);
+ descs[i].size = cpu_to_le32(len / dev->page_size);
+ i++;
+ }
+
+ if (!size)
+ goto out_free_bufs;
+
+ dev->nr_host_mem_descs = i;
+ dev->host_mem_size = size;
+ dev->host_mem_descs = descs;
+ dev->host_mem_descs_dma = descs_dma;
+ dev->host_mem_desc_bufs = bufs;
+ return 0;
+
+out_free_bufs:
+ while (--i >= 0) {
+ size_t size = le32_to_cpu(descs[i].size) * dev->page_size;
+
+ dma_free_attrs(dev->dev, size, bufs[i],
+ le64_to_cpu(descs[i].addr),
+ &dev->host_mem_dma_attrs);
+ }
+
+ kfree(bufs);
+out_free_descs:
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
+out:
+ dev->host_mem_descs = NULL;
+ return -ENOMEM;
+}
+
+static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
+{
+ u32 chunk_size;
+
+ /* start big and work our way down */
+ for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ chunk_size >= max_t(u32, dev->hmminds * 4096, PAGE_SIZE * 2);
+ chunk_size /= 2) {
+ if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
+ if (!min || dev->host_mem_size >= min)
+ return 0;
+ nvme_free_host_mem(dev);
+ }
+ }
+
+ return -ENOMEM;
+}
+
+static int nvme_setup_host_mem(struct nvme_dev *dev)
+{
+ u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+ u64 preferred = (u64)dev->hmpre * 4096;
+ u64 min = (u64)dev->hmmin * 4096;
+ u32 enable_bits = NVME_HOST_MEM_ENABLE;
+ int ret = 0;
+
+ preferred = min(preferred, max);
+ if (min > max) {
+ dev_warn(dev->device,
+ "min host memory (%lld MiB) above limit (%d MiB).\n",
+ min >> ilog2(SZ_1M), max_host_mem_size_mb);
+ nvme_free_host_mem(dev);
+ return 0;
+ }
+
+ /*
+ * If we already have a buffer allocated check if we can reuse it.
+ */
+ if (dev->host_mem_descs) {
+ if (dev->host_mem_size >= min)
+ enable_bits |= NVME_HOST_MEM_RETURN;
+ else
+ nvme_free_host_mem(dev);
+ }
+
+ if (!dev->host_mem_descs) {
+ if (nvme_alloc_host_mem(dev, min, preferred)) {
+ dev_warn(dev->device,
+ "failed to allocate host memory buffer.\n");
+ return 0; /* controller must work without HMB */
+ }
+
+ dev_info(dev->device,
+ "allocated %lld MiB host memory buffer.\n",
+ dev->host_mem_size >> ilog2(SZ_1M));
+ }
+
+ ret = nvme_set_host_mem(dev, enable_bits);
+ if (ret)
+ nvme_free_host_mem(dev);
+ return ret;
+}
+
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
@@ -2944,6 +3131,10 @@ static int nvme_dev_add(struct nvme_dev *dev)
prev_apsta = dev->apsta;
dev->apsta = ctrl->apsta;
memcpy(dev->psd, ctrl->psd, sizeof(ctrl->psd));
+ dev->hmpre = le32_to_cpu(ctrl->hmpre);
+ dev->hmmin = le32_to_cpu(ctrl->hmmin);
+ dev->hmminds = le32_to_cpu(ctrl->hmminds);
+ dev->hmmaxd = le16_to_cpu(ctrl->hmmaxd);
kfree(ctrl);
@@ -2954,6 +3145,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
nvme_configure_apst(dev);
+ if (dev->hmpre) {
+ res = nvme_setup_host_mem(dev);
+ if (res < 0)
+ return res;
+ }
+
if (!dev->tagset.tags) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -3261,6 +3458,16 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
struct nvme_queue *nvmeq = dev->queues[i];
nvme_suspend_queue(nvmeq);
}
+
+ /*
+ * If the controller is still alive tell it to stop using the
+ * host memory buffer. In theory the shutdown / reset should
+ * make sure that it doesn't access the host memoery anymore,
+ * but I'd rather be safe than sorry..
+ */
+ if (dev->host_mem_descs)
+ nvme_set_host_mem(dev, 0);
+
} else {
nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
@@ -3480,7 +3687,6 @@ static void nvme_probe_work(struct work_struct *work)
goto disable;
}
- nvme_init_queue(dev->queues[0], 0);
result = nvme_alloc_admin_tags(dev);
if (result)
goto disable;
@@ -3826,6 +4032,7 @@ static void nvme_remove(struct pci_dev *pdev)
flush_work(&dev->scan_work);
nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_free_host_mem(dev);
nvme_dev_remove_admin(dev);
device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
nvme_free_queues(dev, 0);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 17dc8c9f2d51e6..c17721b8ba300d 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -713,6 +713,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_compatible_child - Find compatible child node
+ * @parent: parent node
+ * @compatible: compatible string
+ *
+ * Lookup child node whose compatible property contains the given compatible
+ * string.
+ *
+ * Returns a node pointer with refcount incremented, use of_node_put() on it
+ * when done; or NULL if not found.
+ */
+struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(parent, child) {
+ if (of_device_is_compatible(child, compatible))
+ break;
+ }
+
+ return child;
+}
+EXPORT_SYMBOL(of_get_compatible_child);
+
+/**
* of_get_child_by_name - Find the child node by name for a given parent
* @node: parent node
* @name: child name to look for.
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index e16ea5717b7f76..2eac3df7dd2906 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -156,20 +156,20 @@ static void __init of_unittest_dynamic(void)
/* Add a new property - should pass*/
prop->name = "new-property";
prop->value = "new-property-data";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
/* Try to add an existing property - should fail */
prop++;
prop->name = "new-property";
prop->value = "new-property-data-should-fail";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_add_property(np, prop) != 0,
"Adding an existing property should have failed\n");
/* Try to modify an existing property - should pass */
prop->value = "modify-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating an existing property should have passed\n");
@@ -177,7 +177,7 @@ static void __init of_unittest_dynamic(void)
prop++;
prop->name = "modify-property";
prop->value = "modify-missing-property-data-should-pass";
- prop->length = strlen(prop->value);
+ prop->length = strlen(prop->value) + 1;
unittest(of_update_property(np, prop) == 0,
"Updating a missing property should have passed\n");
@@ -553,6 +553,9 @@ static void __init of_unittest_parse_interrupts(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
@@ -627,6 +630,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@ static void __init of_unittest_platform_populate(void)
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
-
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
+
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index bdce0679674cc0..02e6485c1ed5c5 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -1377,7 +1377,7 @@ static struct superio_struct *find_superio(struct parport *p)
{
int i;
for (i = 0; i < NR_SUPERIOS; i++)
- if (superios[i].io != p->base)
+ if (superios[i].io == p->base)
return &superios[i];
return NULL;
}
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 01cf1c1a841a4d..8de329546b8220 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -286,12 +286,16 @@ static int bpp_probe(struct platform_device *op)
ops = kmemdup(&parport_sunbpp_ops, sizeof(struct parport_operations),
GFP_KERNEL);
- if (!ops)
+ if (!ops) {
+ err = -ENOMEM;
goto out_unmap;
+ }
dprintk(("register_port\n"));
- if (!(p = parport_register_port((unsigned long)base, irq, dma, ops)))
+ if (!(p = parport_register_port((unsigned long)base, irq, dma, ops))) {
+ err = -ENOMEM;
goto out_free_ops;
+ }
p->size = size;
p->dev = &op->dev;
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 379d08f7614636..d0a4652bb9acb8 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -1235,7 +1235,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
pcie->realio.start = PCIBIOS_MIN_IO;
pcie->realio.end = min_t(resource_size_t,
IO_SPACE_LIMIT,
- resource_size(&pcie->io));
+ resource_size(&pcie->io) - 1);
} else
pcie->realio = pcie->io;
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c
index 99da549d5d06a0..0118287a8a1087 100644
--- a/drivers/pci/host/pcie-altera.c
+++ b/drivers/pci/host/pcie-altera.c
@@ -40,8 +40,10 @@
#define P2A_INT_ENABLE 0x3070
#define P2A_INT_ENA_ALL 0xf
#define RP_LTSSM 0x3c64
+#define RP_LTSSM_MASK 0x1f
#define LTSSM_L0 0xf
+#define PCIE_CAP_OFFSET 0x80
/* TLP configuration type 0 and 1 */
#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */
#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */
@@ -60,6 +62,9 @@
#define TLP_LOOP 500
#define RP_DEVFN 0
+#define LINK_UP_TIMEOUT HZ
+#define LINK_RETRAIN_TIMEOUT HZ
+
#define INTX_NUM 4
#define DWORD_MASK 3
@@ -80,25 +85,21 @@ struct tlp_rp_regpair_t {
u32 reg1;
};
-static void altera_pcie_retrain(struct pci_dev *dev)
+static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
+ const u32 reg)
{
- u16 linkcap, linkstat;
-
- /*
- * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
- * current speed is 2.5 GB/s.
- */
- pcie_capability_read_word(dev, PCI_EXP_LNKCAP, &linkcap);
+ writel_relaxed(value, pcie->cra_base + reg);
+}
- if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
- return;
+static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
+{
+ return readl_relaxed(pcie->cra_base + reg);
+}
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &linkstat);
- if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB)
- pcie_capability_set_word(dev, PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_RL);
+static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
+{
+ return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0);
}
-DECLARE_PCI_FIXUP_EARLY(0x1172, PCI_ANY_ID, altera_pcie_retrain);
/*
* Altera PCIe port uses BAR0 of RC's configuration space as the translation
@@ -119,17 +120,6 @@ static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn,
return false;
}
-static inline void cra_writel(struct altera_pcie *pcie, const u32 value,
- const u32 reg)
-{
- writel_relaxed(value, pcie->cra_base + reg);
-}
-
-static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg)
-{
- return readl_relaxed(pcie->cra_base + reg);
-}
-
static void tlp_write_tx(struct altera_pcie *pcie,
struct tlp_rp_regpair_t *tlp_rp_regdata)
{
@@ -138,11 +128,6 @@ static void tlp_write_tx(struct altera_pcie *pcie,
cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL);
}
-static bool altera_pcie_link_is_up(struct altera_pcie *pcie)
-{
- return !!(cra_readl(pcie, RP_LTSSM) & LTSSM_L0);
-}
-
static bool altera_pcie_valid_config(struct altera_pcie *pcie,
struct pci_bus *bus, int dev)
{
@@ -286,22 +271,14 @@ static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 *value)
+static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 *value)
{
- struct altera_pcie *pcie = bus->sysdata;
int ret;
u32 data;
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
switch (size) {
case 1:
byte_en = 1 << (where & 3);
@@ -314,7 +291,7 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
break;
}
- ret = tlp_cfg_dword_read(pcie, bus->number, devfn,
+ ret = tlp_cfg_dword_read(pcie, busno, devfn,
(where & ~DWORD_MASK), byte_en, &data);
if (ret != PCIBIOS_SUCCESSFUL)
return ret;
@@ -334,20 +311,14 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_SUCCESSFUL;
}
-static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
- int where, int size, u32 value)
+static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int where, int size,
+ u32 value)
{
- struct altera_pcie *pcie = bus->sysdata;
u32 data32;
u32 shift = 8 * (where & 3);
u8 byte_en;
- if (altera_pcie_hide_rc_bar(bus, devfn, where))
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
switch (size) {
case 1:
data32 = (value & 0xff) << shift;
@@ -363,8 +334,40 @@ static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
break;
}
- return tlp_cfg_dword_write(pcie, bus->number, devfn,
- (where & ~DWORD_MASK), byte_en, data32);
+ return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK),
+ byte_en, data32);
+}
+
+static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 *value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn))) {
+ *value = 0xffffffff;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
+ value);
+}
+
+static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn,
+ int where, int size, u32 value)
+{
+ struct altera_pcie *pcie = bus->sysdata;
+
+ if (altera_pcie_hide_rc_bar(bus, devfn, where))
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ if (!altera_pcie_valid_config(pcie, bus, PCI_SLOT(devfn)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size,
+ value);
}
static struct pci_ops altera_pcie_ops = {
@@ -372,6 +375,90 @@ static struct pci_ops altera_pcie_ops = {
.write = altera_pcie_cfg_write,
};
+static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 *value)
+{
+ u32 data;
+ int ret;
+
+ ret = _altera_pcie_cfg_read(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(*value),
+ &data);
+ *value = data;
+ return ret;
+}
+
+static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno,
+ unsigned int devfn, int offset, u16 value)
+{
+ return _altera_pcie_cfg_write(pcie, busno, devfn,
+ PCIE_CAP_OFFSET + offset, sizeof(value),
+ value);
+}
+
+static void altera_wait_link_retrain(struct altera_pcie *pcie)
+{
+ u16 reg16;
+ unsigned long start_jiffies;
+
+ /* Wait for link training end. */
+ start_jiffies = jiffies;
+ for (;;) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKSTA, &reg16);
+ if (!(reg16 & PCI_EXP_LNKSTA_LT))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link retrain timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+
+ /* Wait for link is up */
+ start_jiffies = jiffies;
+ for (;;) {
+ if (altera_pcie_link_is_up(pcie))
+ break;
+
+ if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) {
+ dev_err(&pcie->pdev->dev, "link up timeout\n");
+ break;
+ }
+ udelay(100);
+ }
+}
+
+static void altera_pcie_retrain(struct altera_pcie *pcie)
+{
+ u16 linkcap, linkstat, linkctl;
+
+ if (!altera_pcie_link_is_up(pcie))
+ return;
+
+ /*
+ * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but
+ * current speed is 2.5 GB/s.
+ */
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP,
+ &linkcap);
+ if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB)
+ return;
+
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA,
+ &linkstat);
+ if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) {
+ altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, &linkctl);
+ linkctl |= PCI_EXP_LNKCTL_RL;
+ altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN,
+ PCI_EXP_LNKCTL, linkctl);
+
+ altera_wait_link_retrain(pcie);
+ }
+}
+
static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -506,6 +593,11 @@ static int altera_pcie_parse_dt(struct altera_pcie *pcie)
return 0;
}
+static void altera_pcie_host_init(struct altera_pcie *pcie)
+{
+ altera_pcie_retrain(pcie);
+}
+
static int altera_pcie_probe(struct platform_device *pdev)
{
struct altera_pcie *pcie;
@@ -543,6 +635,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS);
/* enable all interrupts */
cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE);
+ altera_pcie_host_init(pcie);
bus = pci_scan_root_bus(&pdev->dev, pcie->root_bus_nr, &altera_pcie_ops,
pcie, &pcie->resources);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index d1fab97d6b01ff..6ce2a73fe0e417 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -457,8 +457,17 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
list_add(&slot->slot_list, &pci_hotplug_slot_list);
result = fs_add_slot(pci_slot);
+ if (result)
+ goto err_list_del;
+
kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
dbg("Added slot %s to the list\n", name);
+ goto out;
+
+err_list_del:
+ list_del(&slot->slot_list);
+ pci_slot->hotplug = NULL;
+ pci_destroy_slot(pci_slot);
out:
mutex_unlock(&pci_hp_mutex);
return result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 62d6fe6c37145c..6b0f7e0d7dbd70 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -132,9 +132,10 @@ int pciehp_unconfigure_device(struct slot *p_slot);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
+void pcie_shutdown_notification(struct controller *ctrl);
int pciehp_enable_slot(struct slot *p_slot);
int pciehp_disable_slot(struct slot *p_slot);
-void pcie_enable_notification(struct controller *ctrl);
+void pcie_reenable_notification(struct controller *ctrl);
int pciehp_power_on_slot(struct slot *slot);
void pciehp_power_off_slot(struct slot *slot);
void pciehp_get_power_status(struct slot *slot, u8 *status);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 612b21a14df593..47cc3568514e0f 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -77,6 +77,12 @@ static int reset_slot (struct hotplug_slot *slot, int probe);
*/
static void release_slot(struct hotplug_slot *hotplug_slot)
{
+ struct slot *slot = hotplug_slot->private;
+
+ /* queued work needs hotplug_slot name */
+ cancel_delayed_work(&slot->work);
+ drain_workqueue(slot->wq);
+
kfree(hotplug_slot->ops);
kfree(hotplug_slot->info);
kfree(hotplug_slot);
@@ -276,6 +282,7 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
@@ -295,7 +302,7 @@ static int pciehp_resume(struct pcie_device *dev)
ctrl = get_service_data(dev);
/* reinitialize the chipset's event detection logic */
- pcie_enable_notification(ctrl);
+ pcie_reenable_notification(ctrl);
slot = ctrl->slot;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7fc9d571f3cbcb..8412cef03f2ffb 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -635,7 +635,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-void pcie_enable_notification(struct controller *ctrl)
+static void pcie_enable_notification(struct controller *ctrl)
{
u16 cmd, mask;
@@ -673,6 +673,17 @@ void pcie_enable_notification(struct controller *ctrl)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
}
+void pcie_reenable_notification(struct controller *ctrl)
+{
+ /*
+ * Clear both Presence and Data Link Layer Changed to make sure
+ * those events still fire after we have re-enabled them.
+ */
+ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
+ pcie_enable_notification(ctrl);
+}
+
static void pcie_disable_notification(struct controller *ctrl)
{
u16 mask;
@@ -737,7 +748,7 @@ int pcie_init_notification(struct controller *ctrl)
return 0;
}
-static void pcie_shutdown_notification(struct controller *ctrl)
+void pcie_shutdown_notification(struct controller *ctrl)
{
if (ctrl->notification_enabled) {
pcie_disable_notification(ctrl);
@@ -772,7 +783,7 @@ abort:
static void pcie_cleanup_slot(struct controller *ctrl)
{
struct slot *slot = ctrl->slot;
- cancel_delayed_work(&slot->work);
+
destroy_workqueue(slot->wq);
kfree(slot);
}
@@ -849,7 +860,6 @@ abort:
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_shutdown_notification(ctrl);
pcie_cleanup_slot(ctrl);
kfree(ctrl);
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c8503244689ffe..0e118c1081db81 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -565,7 +565,7 @@ void acpi_pci_add_bus(struct pci_bus *bus)
union acpi_object *obj;
struct pci_host_bridge *bridge;
- if (acpi_pci_disabled || !bus->bridge)
+ if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge))
return;
acpi_pci_slot_enumerate(bus);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index acb56ed86f08b4..9fa2eb80c55829 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -180,13 +180,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (!val) {
- if (pci_is_enabled(pdev))
- pci_disable_device(pdev);
- else
- result = -EIO;
- } else
+ device_lock(dev);
+ if (dev->driver)
+ result = -EBUSY;
+ else if (val)
result = pci_enable_device(pdev);
+ else if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
+ else
+ result = -EIO;
+ device_unlock(dev);
return result < 0 ? result : count;
}
@@ -1595,6 +1598,9 @@ static const struct attribute_group *pci_dev_attr_groups[] = {
#ifdef CONFIG_PCI_IOV
&sriov_dev_attr_group,
#endif
+#ifdef CONFIG_PCIEAER
+ &aer_stats_attr_group,
+#endif
NULL,
};
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 506c9777ed75dd..d0e04985b2be58 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1112,12 +1112,12 @@ int pci_save_state(struct pci_dev *dev)
EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
- u32 saved_val, int retry)
+ u32 saved_val, int retry, bool force)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
- if (val == saved_val)
+ if (!force && val == saved_val)
return;
for (;;) {
@@ -1136,25 +1136,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
}
static void pci_restore_config_space_range(struct pci_dev *pdev,
- int start, int end, int retry)
+ int start, int end, int retry,
+ bool force)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
- retry);
+ retry, force);
}
static void pci_restore_config_space(struct pci_dev *pdev)
{
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- pci_restore_config_space_range(pdev, 10, 15, 0);
+ pci_restore_config_space_range(pdev, 10, 15, 0, false);
/* Restore BARs before the command register. */
- pci_restore_config_space_range(pdev, 4, 9, 10);
- pci_restore_config_space_range(pdev, 0, 3, 0);
+ pci_restore_config_space_range(pdev, 4, 9, 10, false);
+ pci_restore_config_space_range(pdev, 0, 3, 0, false);
+ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+ /*
+ * Force rewriting of prefetch registers to avoid S3 resume
+ * issues on Intel PCI bridges that occur when these
+ * registers are not explicitly written.
+ */
+ pci_restore_config_space_range(pdev, 9, 11, 0, true);
+ pci_restore_config_space_range(pdev, 0, 8, 0, false);
} else {
- pci_restore_config_space_range(pdev, 0, 15, 0);
+ pci_restore_config_space_range(pdev, 0, 15, 0, false);
}
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index f45243040f5a7f..c7fbd6b0ad175b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -200,7 +200,14 @@ extern const struct attribute_group *pci_dev_groups[];
extern const struct attribute_group *pcibus_groups[];
extern struct device_type pci_dev_type;
extern const struct attribute_group *pci_bus_groups[];
-
+#ifdef CONFIG_PCIEAER
+int pci_aer_init(struct pci_dev *pdev);
+void pci_aer_exit(struct pci_dev *pdev);
+extern const struct attribute_group aer_stats_attr_group;
+#else
+static inline int pci_aer_init(struct pci_dev *dev) { return -ENODEV; }
+static void pci_aer_exit(struct pci_dev *pdev) { }
+#endif
/**
* pci_match_one_device - Tell if a PCI device structure has a matching
diff --git a/drivers/pci/pcie/aer/Makefile b/drivers/pci/pcie/aer/Makefile
index 2cba67510dc862..e4d4433eb6aa14 100644
--- a/drivers/pci/pcie/aer/Makefile
+++ b/drivers/pci/pcie/aer/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_PCIEAER) += aerdriver.o
obj-$(CONFIG_PCIE_ECRC) += ecrc.o
-aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o
+aerdriver-objs := aerdrv_errprint.o aerdrv_core.o aerdrv.o aerdrv_stats.o
aerdriver-$(CONFIG_ACPI) += aerdrv_acpi.o
obj-$(CONFIG_PCIEAER_INJECT) += aer_inject.o
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 945c939a86c5c2..69a371bf3893a8 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -32,6 +32,10 @@
PCI_ERR_UNC_MALF_TLP)
#define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
+
+#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
+#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
+
struct aer_err_info {
struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
int error_dev_num;
@@ -110,6 +114,12 @@ void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info);
irqreturn_t aer_irq(int irq, void *context);
+extern void pci_dev_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_info *info);
+extern void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_source *e_src);
+extern const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS];
+extern const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS];
#ifdef CONFIG_ACPI_APEI
int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index ca5dbf03e3884b..809d0dcf2c60a6 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -724,6 +724,8 @@ static void aer_isr_one_error(struct pcie_device *p_device,
{
struct aer_err_info *e_info;
+ pci_rootport_aer_stats_incr(p_device->port, e_src);
+
/* struct aer_err_info might be big, so we allocate it with slab */
e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
if (!e_info) {
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 167fe411ce2e30..8184fcca8fa0d6 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -74,7 +74,7 @@ static const char *aer_error_layer[] = {
"Transaction Layer"
};
-static const char *aer_correctable_error_string[] = {
+const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = {
"Receiver Error", /* Bit Position 0 */
NULL,
NULL,
@@ -93,7 +93,7 @@ static const char *aer_correctable_error_string[] = {
"Header Log Overflow", /* Bit Position 15 */
};
-static const char *aer_uncorrectable_error_string[] = {
+const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = {
"Undefined", /* Bit Position 0 */
NULL,
NULL,
@@ -161,6 +161,7 @@ static void __aer_print_error(struct pci_dev *dev,
dev_err(&dev->dev, " [%2d] Unknown Error Bit%s\n",
i, info->first_error == i ? " (First)" : "");
}
+ pci_dev_aer_stats_incr(dev, info);
}
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
diff --git a/drivers/pci/pcie/aer/aerdrv_stats.c b/drivers/pci/pcie/aer/aerdrv_stats.c
new file mode 100644
index 00000000000000..e32447a605b7d9
--- /dev/null
+++ b/drivers/pci/pcie/aer/aerdrv_stats.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Google Inc, All Rights Reserved.
+ *
+ * AER Statistics - exposed to userspace via /sysfs attributes.
+ */
+
+#include <linux/pci.h>
+#include "aerdrv.h"
+
+/* AER stats for the device */
+struct aer_stats {
+
+ /*
+ * Fields for all AER capable devices. They indicate the errors
+ * "as seen by this device". Note that this may mean that if an
+ * end point is causing problems, the AER counters may increment
+ * at its link partner (e.g. root port) because the errors will be
+ * "seen" by the link partner and not the the problematic end point
+ * itself (which may report all counters as 0 as it never saw any
+ * problems).
+ */
+ /* Counters for different type of correctable errors */
+ u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
+ /* Counters for different type of fatal uncorrectable errors */
+ u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Counters for different type of nonfatal uncorrectable errors */
+ u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
+ /* Total number of ERR_COR sent by this device */
+ u64 dev_total_cor_errs;
+ /* Total number of ERR_FATAL sent by this device */
+ u64 dev_total_fatal_errs;
+ /* Total number of ERR_NONFATAL sent by this device */
+ u64 dev_total_nonfatal_errs;
+
+ /*
+ * Fields for Root ports & root complex event collectors only, these
+ * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
+ * messages received by the root port / event collector, INCLUDING the
+ * ones that are generated internally (by the rootport itself)
+ */
+ u64 rootport_total_cor_errs;
+ u64 rootport_total_fatal_errs;
+ u64 rootport_total_nonfatal_errs;
+};
+
+
+void pci_aer_init(struct pci_dev *dev)
+{
+ dev->aer_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+
+ if (dev->aer_cap)
+ dev->aer_stats = kzalloc(sizeof(struct aer_stats), GFP_KERNEL);
+
+ pci_cleanup_aer_error_status_regs(dev);
+}
+
+void pci_aer_exit(struct pci_dev *dev)
+{
+ kfree(dev->aer_stats);
+ dev->aer_stats = NULL;
+}
+
+#define aer_stats_dev_attr(name, stats_array, strings_array, \
+ total_string, total_field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ unsigned int i; \
+ char *str = buf; \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ u64 *stats = pdev->aer_stats->stats_array; \
+ \
+ for (i = 0; i < ARRAY_SIZE(strings_array); i++) { \
+ if (strings_array[i]) \
+ str += sprintf(str, "%s %llu\n", \
+ strings_array[i], stats[i]); \
+ else if (stats[i]) \
+ str += sprintf(str, #stats_array "_bit[%d] %llu\n",\
+ i, stats[i]); \
+ } \
+ str += sprintf(str, "TOTAL_%s %llu\n", total_string, \
+ pdev->aer_stats->total_field); \
+ return str-buf; \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
+ aer_correctable_error_string, "ERR_COR",
+ dev_total_cor_errs);
+aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
+ aer_uncorrectable_error_string, "ERR_FATAL",
+ dev_total_fatal_errs);
+aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
+ aer_uncorrectable_error_string, "ERR_NONFATAL",
+ dev_total_nonfatal_errs);
+
+#define aer_stats_rootport_attr(name, field) \
+ static ssize_t \
+ name##_show(struct device *dev, struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pdev = to_pci_dev(dev); \
+ return sprintf(buf, "%llu\n", pdev->aer_stats->field); \
+} \
+static DEVICE_ATTR_RO(name)
+
+aer_stats_rootport_attr(aer_rootport_total_err_cor,
+ rootport_total_cor_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_fatal,
+ rootport_total_fatal_errs);
+aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
+ rootport_total_nonfatal_errs);
+
+static struct attribute *aer_stats_attrs[] __ro_after_init = {
+ &dev_attr_aer_dev_correctable.attr,
+ &dev_attr_aer_dev_fatal.attr,
+ &dev_attr_aer_dev_nonfatal.attr,
+ &dev_attr_aer_rootport_total_err_cor.attr,
+ &dev_attr_aer_rootport_total_err_fatal.attr,
+ &dev_attr_aer_rootport_total_err_nonfatal.attr,
+ NULL
+};
+
+static umode_t aer_stats_attrs_are_visible(struct kobject *kobj,
+ struct attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ if (!pdev->aer_stats)
+ return 0;
+
+ if ((a == &dev_attr_aer_rootport_total_err_cor.attr ||
+ a == &dev_attr_aer_rootport_total_err_fatal.attr ||
+ a == &dev_attr_aer_rootport_total_err_nonfatal.attr) &&
+ pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT)
+ return 0;
+
+ return a->mode;
+}
+
+const struct attribute_group aer_stats_attr_group = {
+ .attrs = aer_stats_attrs,
+ .is_visible = aer_stats_attrs_are_visible,
+};
+
+void pci_dev_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_info *info)
+{
+ int status, i, max = -1;
+ u64 *counter = NULL;
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ switch (info->severity) {
+ case AER_CORRECTABLE:
+ aer_stats->dev_total_cor_errs++;
+ counter = &aer_stats->dev_cor_errs[0];
+ max = AER_MAX_TYPEOF_COR_ERRS;
+ break;
+ case AER_NONFATAL:
+ aer_stats->dev_total_nonfatal_errs++;
+ counter = &aer_stats->dev_nonfatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ case AER_FATAL:
+ aer_stats->dev_total_fatal_errs++;
+ counter = &aer_stats->dev_fatal_errs[0];
+ max = AER_MAX_TYPEOF_UNCOR_ERRS;
+ break;
+ }
+
+ status = (info->status & ~info->mask);
+ for (i = 0; i < max; i++)
+ if (status & (1 << i))
+ counter[i]++;
+}
+
+void pci_rootport_aer_stats_incr(struct pci_dev *pdev,
+ struct aer_err_source *e_src)
+{
+ struct aer_stats *aer_stats = pdev->aer_stats;
+
+ if (!aer_stats)
+ return;
+
+ if (e_src->status & PCI_ERR_ROOT_COR_RCV)
+ aer_stats->rootport_total_cor_errs++;
+
+ if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
+ if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
+ aer_stats->rootport_total_fatal_errs++;
+ else
+ aer_stats->rootport_total_nonfatal_errs++;
+ }
+}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index ff1413ec059b3d..554cfcdc88031d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1347,6 +1347,10 @@ static void pci_configure_mps(struct pci_dev *dev)
if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
return;
+ /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
+ if (dev->is_virtfn)
+ return;
+
mps = pcie_get_mps(dev);
p_mps = pcie_get_mps(bridge);
@@ -1549,6 +1553,7 @@ static void pci_configure_device(struct pci_dev *dev)
static void pci_release_capabilities(struct pci_dev *dev)
{
+ pci_aer_exit(dev);
pci_vpd_release(dev);
pci_iov_release(dev);
pci_free_cap_save_buffers(dev);
@@ -1690,7 +1695,8 @@ static void pci_init_capabilities(struct pci_dev *dev)
/* Enable ACS P2P upstream forwarding */
pci_enable_acs(dev);
- pci_cleanup_aer_error_status_regs(dev);
+ /* Advanced Error Reporting */
+ pci_aer_init(dev);
}
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b3c49bf86c7072..7e6ef2e8528782 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3061,7 +3061,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
pci_iounmap(dev, regs);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
diff --git a/drivers/pcmcia/ricoh.h b/drivers/pcmcia/ricoh.h
index 01098c841f877c..8ac7b138c09486 100644
--- a/drivers/pcmcia/ricoh.h
+++ b/drivers/pcmcia/ricoh.h
@@ -119,6 +119,10 @@
#define RL5C4XX_MISC_CONTROL 0x2F /* 8 bit */
#define RL5C4XX_ZV_ENABLE 0x08
+/* Misc Control 3 Register */
+#define RL5C4XX_MISC3 0x00A2 /* 16 bit */
+#define RL5C47X_MISC3_CB_CLKRUN_DIS BIT(1)
+
#ifdef __YENTA_H
#define rl_misc(socket) ((socket)->private[0])
@@ -156,6 +160,35 @@ static void ricoh_set_zv(struct yenta_socket *socket)
}
}
+static void ricoh_set_clkrun(struct yenta_socket *socket, bool quiet)
+{
+ u16 misc3;
+
+ /*
+ * RL5C475II likely has this setting, too, however no datasheet
+ * is publicly available for this chip
+ */
+ if (socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C476 &&
+ socket->dev->device != PCI_DEVICE_ID_RICOH_RL5C478)
+ return;
+
+ if (socket->dev->revision < 0x80)
+ return;
+
+ misc3 = config_readw(socket, RL5C4XX_MISC3);
+ if (misc3 & RL5C47X_MISC3_CB_CLKRUN_DIS) {
+ if (!quiet)
+ dev_dbg(&socket->dev->dev,
+ "CLKRUN feature already disabled\n");
+ } else if (disable_clkrun) {
+ if (!quiet)
+ dev_info(&socket->dev->dev,
+ "Disabling CLKRUN feature\n");
+ misc3 |= RL5C47X_MISC3_CB_CLKRUN_DIS;
+ config_writew(socket, RL5C4XX_MISC3, misc3);
+ }
+}
+
static void ricoh_save_state(struct yenta_socket *socket)
{
rl_misc(socket) = config_readw(socket, RL5C4XX_MISC);
@@ -172,6 +205,7 @@ static void ricoh_restore_state(struct yenta_socket *socket)
config_writew(socket, RL5C4XX_16BIT_IO_0, rl_io(socket));
config_writew(socket, RL5C4XX_16BIT_MEM_0, rl_mem(socket));
config_writew(socket, RL5C4XX_CONFIG, rl_config(socket));
+ ricoh_set_clkrun(socket, true);
}
@@ -197,6 +231,7 @@ static int ricoh_override(struct yenta_socket *socket)
config_writew(socket, RL5C4XX_CONFIG, config);
ricoh_set_zv(socket);
+ ricoh_set_clkrun(socket, false);
return 0;
}
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 5d6d9b1549bc4f..5034422a1d969b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -26,7 +26,8 @@
static bool disable_clkrun;
module_param(disable_clkrun, bool, 0444);
-MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
+MODULE_PARM_DESC(disable_clkrun,
+ "If PC card doesn't function properly, please try this option (TI and Ricoh bridges only)");
static bool isa_probe = 1;
module_param(isa_probe, bool, 0444);
diff --git a/drivers/phy/phy-rockchip-typec.c b/drivers/phy/phy-rockchip-typec.c
index 10dd06775393cd..da4701af41b514 100644
--- a/drivers/phy/phy-rockchip-typec.c
+++ b/drivers/phy/phy-rockchip-typec.c
@@ -63,6 +63,7 @@
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
+#include <soc/rockchip/rockchip_phy_typec.h>
#define CMN_SSM_BANDGAP (0x21 << 2)
#define CMN_SSM_BIAS (0x22 << 2)
@@ -324,21 +325,29 @@
* clock 0: PLL 0 div 1
* clock 1: PLL 1 div 2
*/
-#define CLK_PLL_CONFIG 0X30
+#define CLK_PLL1_DIV1 0x20
+#define CLK_PLL1_DIV2 0x30
#define CLK_PLL_MASK 0x33
#define CMN_READY BIT(0)
+#define DP_PLL_CLOCK_ENABLE_ACK BIT(3)
#define DP_PLL_CLOCK_ENABLE BIT(2)
+#define DP_PLL_ENABLE_ACK BIT(1)
#define DP_PLL_ENABLE BIT(0)
#define DP_PLL_DATA_RATE_RBR ((2 << 12) | (4 << 8))
#define DP_PLL_DATA_RATE_HBR ((2 << 12) | (4 << 8))
#define DP_PLL_DATA_RATE_HBR2 ((1 << 12) | (2 << 8))
+#define DP_PLL_DATA_RATE_MASK 0xff00
-#define DP_MODE_A0 BIT(4)
-#define DP_MODE_A2 BIT(6)
-#define DP_MODE_ENTER_A0 0xc101
-#define DP_MODE_ENTER_A2 0xc104
+#define DP_MODE_MASK 0xf
+#define DP_MODE_ENTER_A0 BIT(0)
+#define DP_MODE_ENTER_A2 BIT(2)
+#define DP_MODE_ENTER_A3 BIT(3)
+#define DP_MODE_A0_ACK BIT(4)
+#define DP_MODE_A2_ACK BIT(6)
+#define DP_MODE_A3_ACK BIT(7)
+#define DP_LINK_RESET_DEASSERTED BIT(8)
#define PHY_MODE_SET_TIMEOUT 100000
@@ -350,51 +359,7 @@
#define MODE_DFP_USB BIT(1)
#define MODE_DFP_DP BIT(2)
-struct usb3phy_reg {
- u32 offset;
- u32 enable_bit;
- u32 write_enable;
-};
-
-/**
- * struct rockchip_usb3phy_port_cfg: usb3-phy port configuration.
- * @reg: the base address for usb3-phy config.
- * @typec_conn_dir: the register of type-c connector direction.
- * @usb3tousb2_en: the register of type-c force usb2 to usb2 enable.
- * @external_psm: the register of type-c phy external psm clock.
- * @pipe_status: the register of type-c phy pipe status.
- * @usb3_host_disable: the register of type-c usb3 host disable.
- * @usb3_host_port: the register of type-c usb3 host port.
- * @uphy_dp_sel: the register of type-c phy DP select control.
- */
-struct rockchip_usb3phy_port_cfg {
- unsigned int reg;
- struct usb3phy_reg typec_conn_dir;
- struct usb3phy_reg usb3tousb2_en;
- struct usb3phy_reg external_psm;
- struct usb3phy_reg pipe_status;
- struct usb3phy_reg usb3_host_disable;
- struct usb3phy_reg usb3_host_port;
- struct usb3phy_reg uphy_dp_sel;
-};
-
-struct rockchip_typec_phy {
- struct device *dev;
- void __iomem *base;
- struct extcon_dev *extcon;
- struct regmap *grf_regs;
- struct clk *clk_core;
- struct clk *clk_ref;
- struct reset_control *uphy_rst;
- struct reset_control *pipe_rst;
- struct reset_control *tcphy_rst;
- const struct rockchip_usb3phy_port_cfg *port_cfgs;
- /* mutex to protect access to individual PHYs */
- struct mutex lock;
-
- bool flip;
- u8 mode;
-};
+#define DP_DEFAULT_RATE 162000
struct phy_reg {
u16 value;
@@ -418,15 +383,15 @@ struct phy_reg usb3_pll_cfg[] = {
{ 0x8, CMN_DIAG_PLL0_LF_PROG },
};
-struct phy_reg dp_pll_cfg[] = {
+struct phy_reg dp_pll_rbr_cfg[] = {
{ 0xf0, CMN_PLL1_VCOCAL_INIT },
{ 0x18, CMN_PLL1_VCOCAL_ITER },
{ 0x30b9, CMN_PLL1_VCOCAL_START },
- { 0x21c, CMN_PLL1_INTDIV },
+ { 0x87, CMN_PLL1_INTDIV },
{ 0, CMN_PLL1_FRACDIV },
- { 0x5, CMN_PLL1_HIGH_THR },
- { 0x35, CMN_PLL1_SS_CTRL1 },
- { 0x7f1e, CMN_PLL1_SS_CTRL2 },
+ { 0x22, CMN_PLL1_HIGH_THR },
+ { 0x8000, CMN_PLL1_SS_CTRL1 },
+ { 0, CMN_PLL1_SS_CTRL2 },
{ 0x20, CMN_PLL1_DSM_DIAG },
{ 0, CMN_PLLSM1_USER_DEF_CTRL },
{ 0, CMN_DIAG_PLL1_OVRD },
@@ -437,9 +402,52 @@ struct phy_reg dp_pll_cfg[] = {
{ 0x8, CMN_DIAG_PLL1_LF_PROG },
{ 0x100, CMN_DIAG_PLL1_PTATIS_TUNE1 },
{ 0x7, CMN_DIAG_PLL1_PTATIS_TUNE2 },
- { 0x4, CMN_DIAG_PLL1_INCLK_CTRL },
+ { 0x1, CMN_DIAG_PLL1_INCLK_CTRL },
};
+struct phy_reg dp_pll_hbr_cfg[] = {
+ { 0xf0, CMN_PLL1_VCOCAL_INIT },
+ { 0x18, CMN_PLL1_VCOCAL_ITER },
+ { 0x30b4, CMN_PLL1_VCOCAL_START },
+ { 0xe1, CMN_PLL1_INTDIV },
+ { 0, CMN_PLL1_FRACDIV },
+ { 0x5, CMN_PLL1_HIGH_THR },
+ { 0x8000, CMN_PLL1_SS_CTRL1 },
+ { 0, CMN_PLL1_SS_CTRL2 },
+ { 0x20, CMN_PLL1_DSM_DIAG },
+ { 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
+ { 0, CMN_DIAG_PLL1_OVRD },
+ { 0, CMN_DIAG_PLL1_FBH_OVRD },
+ { 0, CMN_DIAG_PLL1_FBL_OVRD },
+ { 0x7, CMN_DIAG_PLL1_V2I_TUNE },
+ { 0x45, CMN_DIAG_PLL1_CP_TUNE },
+ { 0x8, CMN_DIAG_PLL1_LF_PROG },
+ { 0x1, CMN_DIAG_PLL1_PTATIS_TUNE1 },
+ { 0x1, CMN_DIAG_PLL1_PTATIS_TUNE2 },
+ { 0x1, CMN_DIAG_PLL1_INCLK_CTRL },
+};
+
+struct phy_reg dp_pll_hbr2_cfg[] = {
+ { 0xf0, CMN_PLL1_VCOCAL_INIT },
+ { 0x18, CMN_PLL1_VCOCAL_ITER },
+ { 0x30b4, CMN_PLL1_VCOCAL_START },
+ { 0xe1, CMN_PLL1_INTDIV },
+ { 0, CMN_PLL1_FRACDIV },
+ { 0x5, CMN_PLL1_HIGH_THR },
+ { 0x8000, CMN_PLL1_SS_CTRL1 },
+ { 0, CMN_PLL1_SS_CTRL2 },
+ { 0x20, CMN_PLL1_DSM_DIAG },
+ { 0x1000, CMN_PLLSM1_USER_DEF_CTRL },
+ { 0, CMN_DIAG_PLL1_OVRD },
+ { 0, CMN_DIAG_PLL1_FBH_OVRD },
+ { 0, CMN_DIAG_PLL1_FBL_OVRD },
+ { 0x7, CMN_DIAG_PLL1_V2I_TUNE },
+ { 0x45, CMN_DIAG_PLL1_CP_TUNE },
+ { 0x8, CMN_DIAG_PLL1_LF_PROG },
+ { 0x1, CMN_DIAG_PLL1_PTATIS_TUNE1 },
+ { 0x1, CMN_DIAG_PLL1_PTATIS_TUNE2 },
+ { 0x1, CMN_DIAG_PLL1_INCLK_CTRL },
+};
static const struct rockchip_usb3phy_port_cfg rk3399_usb3phy_port_cfgs[] = {
{
.reg = 0xff7c0000,
@@ -464,6 +472,24 @@ static const struct rockchip_usb3phy_port_cfg rk3399_usb3phy_port_cfgs[] = {
{ /* sentinel */ }
};
+/* default phy config */
+static const struct phy_config tcphy_default_config[3][4] = {
+ {{ .swing = 0x2a, .pe = 0x00 },
+ { .swing = 0x1f, .pe = 0x15 },
+ { .swing = 0x14, .pe = 0x22 },
+ { .swing = 0x02, .pe = 0x2b } },
+
+ {{ .swing = 0x21, .pe = 0x00 },
+ { .swing = 0x12, .pe = 0x15 },
+ { .swing = 0x02, .pe = 0x22 },
+ { .swing = 0, .pe = 0 } },
+
+ {{ .swing = 0x15, .pe = 0x00 },
+ { .swing = 0x00, .pe = 0x15 },
+ { .swing = 0, .pe = 0 },
+ { .swing = 0, .pe = 0 } },
+};
+
static void tcphy_cfg_24m(struct rockchip_typec_phy *tcphy)
{
u32 i, rdata;
@@ -485,7 +511,7 @@ static void tcphy_cfg_24m(struct rockchip_typec_phy *tcphy)
rdata = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
rdata &= ~CLK_PLL_MASK;
- rdata |= CLK_PLL_CONFIG;
+ rdata |= CLK_PLL1_DIV2;
writel(rdata, tcphy->base + CMN_DIAG_HSCLK_SEL);
}
@@ -499,17 +525,45 @@ static void tcphy_cfg_usb3_pll(struct rockchip_typec_phy *tcphy)
tcphy->base + usb3_pll_cfg[i].addr);
}
-static void tcphy_cfg_dp_pll(struct rockchip_typec_phy *tcphy)
+static void tcphy_cfg_dp_pll(struct rockchip_typec_phy *tcphy, int link_rate)
{
- u32 i;
+ struct phy_reg *phy_cfg;
+ u32 clk_ctrl;
+ u32 i, cfg_size, hsclk_sel;
+
+ hsclk_sel = readl(tcphy->base + CMN_DIAG_HSCLK_SEL);
+ hsclk_sel &= ~CLK_PLL_MASK;
+
+ switch (link_rate) {
+ case 540000:
+ clk_ctrl = DP_PLL_DATA_RATE_HBR2;
+ hsclk_sel |= CLK_PLL1_DIV1;
+ phy_cfg = dp_pll_hbr2_cfg;
+ cfg_size = ARRAY_SIZE(dp_pll_hbr2_cfg);
+ break;
+ case 270000:
+ clk_ctrl = DP_PLL_DATA_RATE_HBR;
+ hsclk_sel |= CLK_PLL1_DIV2;
+ phy_cfg = dp_pll_hbr_cfg;
+ cfg_size = ARRAY_SIZE(dp_pll_hbr_cfg);
+ break;
+ case 162000:
+ default:
+ clk_ctrl = DP_PLL_DATA_RATE_RBR;
+ hsclk_sel |= CLK_PLL1_DIV2;
+ phy_cfg = dp_pll_rbr_cfg;
+ cfg_size = ARRAY_SIZE(dp_pll_rbr_cfg);
+ break;
+ }
+
+ clk_ctrl |= DP_PLL_CLOCK_ENABLE | DP_PLL_ENABLE;
+ writel(clk_ctrl, tcphy->base + DP_CLK_CTL);
- /* set the default mode to RBR */
- writel(DP_PLL_CLOCK_ENABLE | DP_PLL_ENABLE | DP_PLL_DATA_RATE_RBR,
- tcphy->base + DP_CLK_CTL);
+ writel(hsclk_sel, tcphy->base + CMN_DIAG_HSCLK_SEL);
/* load the configuration of PLL1 */
- for (i = 0; i < ARRAY_SIZE(dp_pll_cfg); i++)
- writel(dp_pll_cfg[i].value, tcphy->base + dp_pll_cfg[i].addr);
+ for (i = 0; i < cfg_size; i++)
+ writel(phy_cfg[i].value, tcphy->base + phy_cfg[i].addr);
}
static void tcphy_tx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
@@ -544,9 +598,10 @@ static void tcphy_rx_usb3_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
}
}
-static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
+static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, int link_rate,
+ u8 swing, u8 pre_emp, u32 lane)
{
- u16 rdata;
+ u16 val;
writel(0xbefc, tcphy->base + XCVR_PSM_RCTRL(lane));
writel(0x6799, tcphy->base + TX_PSC_A0(lane));
@@ -554,25 +609,32 @@ static void tcphy_dp_cfg_lane(struct rockchip_typec_phy *tcphy, u32 lane)
writel(0x98, tcphy->base + TX_PSC_A2(lane));
writel(0x98, tcphy->base + TX_PSC_A3(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_001(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_010(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_011(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_100(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_101(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_110(lane));
- writel(0, tcphy->base + TX_TXCC_MGNFS_MULT_111(lane));
- writel(0, tcphy->base + TX_TXCC_CPOST_MULT_10(lane));
- writel(0, tcphy->base + TX_TXCC_CPOST_MULT_01(lane));
- writel(0, tcphy->base + TX_TXCC_CPOST_MULT_00(lane));
- writel(0, tcphy->base + TX_TXCC_CPOST_MULT_11(lane));
-
- writel(0x128, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
- writel(0x400, tcphy->base + TX_DIAG_TX_DRV(lane));
-
- rdata = readl(tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
- rdata = (rdata & 0x8fff) | 0x6000;
- writel(rdata, tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
+ writel(tcphy->config[swing][pre_emp].swing,
+ tcphy->base + TX_TXCC_MGNFS_MULT_000(lane));
+ writel(tcphy->config[swing][pre_emp].pe,
+ tcphy->base + TX_TXCC_CPOST_MULT_00(lane));
+
+ if (swing == 2 && pre_emp == 0 && link_rate != 540000) {
+ writel(0x700, tcphy->base + TX_DIAG_TX_DRV(lane));
+ writel(0x13c, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
+ } else {
+ writel(0x128, tcphy->base + TX_TXCC_CAL_SCLR_MULT(lane));
+ writel(0x0400, tcphy->base + TX_DIAG_TX_DRV(lane));
+ }
+
+ val = readl(tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
+ val = val & 0x8fff;
+ switch (link_rate) {
+ case 540000:
+ val |= (4 << 12);
+ break;
+ case 162000:
+ case 270000:
+ default:
+ val |= (6 << 12);
+ break;
+ }
+ writel(val, tcphy->base + XCVR_DIAG_PLLDRC_CTRL(lane));
}
static inline int property_enable(struct rockchip_typec_phy *tcphy,
@@ -763,30 +825,33 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode)
tcphy_cfg_24m(tcphy);
if (mode == MODE_DFP_DP) {
- tcphy_cfg_dp_pll(tcphy);
+ tcphy_cfg_dp_pll(tcphy, DP_DEFAULT_RATE);
for (i = 0; i < 4; i++)
- tcphy_dp_cfg_lane(tcphy, i);
+ tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, i);
writel(PIN_ASSIGN_C_E, tcphy->base + PMA_LANE_CFG);
} else {
tcphy_cfg_usb3_pll(tcphy);
- tcphy_cfg_dp_pll(tcphy);
+ tcphy_cfg_dp_pll(tcphy, DP_DEFAULT_RATE);
if (tcphy->flip) {
tcphy_tx_usb3_cfg_lane(tcphy, 3);
tcphy_rx_usb3_cfg_lane(tcphy, 2);
- tcphy_dp_cfg_lane(tcphy, 0);
- tcphy_dp_cfg_lane(tcphy, 1);
+ tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 0);
+ tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 1);
} else {
tcphy_tx_usb3_cfg_lane(tcphy, 0);
tcphy_rx_usb3_cfg_lane(tcphy, 1);
- tcphy_dp_cfg_lane(tcphy, 2);
- tcphy_dp_cfg_lane(tcphy, 3);
+ tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 2);
+ tcphy_dp_cfg_lane(tcphy, DP_DEFAULT_RATE, 0, 0, 3);
}
writel(PIN_ASSIGN_D_F, tcphy->base + PMA_LANE_CFG);
}
- writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+ val = readl(tcphy->base + DP_MODE_CTL);
+ val &= ~DP_MODE_MASK;
+ val |= DP_MODE_ENTER_A2 | DP_LINK_RESET_DEASSERTED;
+ writel(val, tcphy->base + DP_MODE_CTL);
reset_control_deassert(tcphy->uphy_rst);
@@ -999,7 +1064,7 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
property_enable(tcphy, &cfg->uphy_dp_sel, 1);
ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
- val, val & DP_MODE_A2, 1000,
+ val, val & DP_MODE_A2_ACK, 1000,
PHY_MODE_SET_TIMEOUT);
if (ret < 0) {
dev_err(tcphy->dev, "failed to wait TCPHY enter A2\n");
@@ -1008,13 +1073,19 @@ static int rockchip_dp_phy_power_on(struct phy *phy)
tcphy_dp_aux_calibration(tcphy);
- writel(DP_MODE_ENTER_A0, tcphy->base + DP_MODE_CTL);
+ /* enter A0 mode */
+ val = readl(tcphy->base + DP_MODE_CTL);
+ val &= ~DP_MODE_MASK;
+ val |= DP_MODE_ENTER_A0;
+ writel(val, tcphy->base + DP_MODE_CTL);
ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL,
- val, val & DP_MODE_A0, 1000,
+ val, val & DP_MODE_A0_ACK, 1000,
PHY_MODE_SET_TIMEOUT);
if (ret < 0) {
- writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+ val &= ~DP_MODE_MASK;
+ val |= DP_MODE_ENTER_A2;
+ writel(val, tcphy->base + DP_MODE_CTL);
dev_err(tcphy->dev, "failed to wait TCPHY enter A0\n");
goto power_on_finish;
}
@@ -1032,6 +1103,7 @@ unlock_ret:
static int rockchip_dp_phy_power_off(struct phy *phy)
{
struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+ u32 val;
mutex_lock(&tcphy->lock);
@@ -1040,7 +1112,10 @@ static int rockchip_dp_phy_power_off(struct phy *phy)
tcphy->mode &= ~MODE_DFP_DP;
- writel(DP_MODE_ENTER_A2, tcphy->base + DP_MODE_CTL);
+ val = readl(tcphy->base + DP_MODE_CTL);
+ val &= ~DP_MODE_MASK;
+ val |= DP_MODE_ENTER_A2;
+ writel(val, tcphy->base + DP_MODE_CTL);
if (tcphy->mode == MODE_DISCONNECT)
tcphy_phy_deinit(tcphy);
@@ -1056,9 +1131,35 @@ static const struct phy_ops rockchip_dp_phy_ops = {
.owner = THIS_MODULE,
};
+static int typec_dp_phy_config(struct phy *phy, int link_rate,
+ int lanes, u8 swing, u8 pre_emp)
+{
+ struct rockchip_typec_phy *tcphy = phy_get_drvdata(phy);
+ u8 i;
+
+ tcphy_cfg_dp_pll(tcphy, link_rate);
+
+ if (tcphy->mode == MODE_DFP_DP) {
+ for (i = 0; i < 4; i++)
+ tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, i);
+ } else {
+ if (tcphy->flip) {
+ tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 0);
+ tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 1);
+ } else {
+ tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 2);
+ tcphy_dp_cfg_lane(tcphy, link_rate, swing, pre_emp, 3);
+ }
+ }
+
+ return 0;
+}
+
static int tcphy_parse_dt(struct rockchip_typec_phy *tcphy,
struct device *dev)
{
+ int ret;
+
tcphy->grf_regs = syscon_regmap_lookup_by_phandle(dev->of_node,
"rockchip,grf");
if (IS_ERR(tcphy->grf_regs)) {
@@ -1096,6 +1197,16 @@ static int tcphy_parse_dt(struct rockchip_typec_phy *tcphy,
return PTR_ERR(tcphy->tcphy_rst);
}
+ /*
+ * check if phy_config pass from dts, if no,
+ * use default phy config value.
+ */
+ ret = of_property_read_u32_array(dev->of_node, "rockchip,phy-config",
+ (u32 *)tcphy->config, sizeof(tcphy->config) / sizeof(u32));
+ if (ret)
+ memcpy(tcphy->config, tcphy_default_config,
+ sizeof(tcphy->config));
+
return 0;
}
@@ -1180,6 +1291,7 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
}
}
+ tcphy->typec_phy_config = typec_dp_phy_config;
pm_runtime_enable(dev);
for_each_available_child_of_node(np, child_np) {
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index acaf84cadca3fc..6c9420ee9e03c3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -434,7 +434,7 @@ static void imx1_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
const char *name;
int i, ret;
- if (group > info->ngroups)
+ if (group >= info->ngroups)
return;
seq_puts(s, "\n");
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index b505b87661f86f..07c4153e6f3d42 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -656,7 +656,7 @@ static const char * const sd_a_groups[] = {
static const char * const sdxc_a_groups[] = {
"sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a",
- "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a"
+ "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a"
};
static const char * const pcm_a_groups[] = {
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index ad3aa9967c0ebc..aa4b5acee61369 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -568,8 +568,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
for_each_child_of_node(np_config, np) {
ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(np);
break;
+ }
}
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 65d395f7910115..7983fd9c15336e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -806,11 +806,24 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
return ret;
}
- ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), 0, 0, chip->ngpio);
- if (ret) {
- dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
- return ret;
+ /*
+ * For DeviceTree-supported systems, the gpio core checks the
+ * pinctrl's device node for the "gpio-ranges" property.
+ * If it is present, it takes care of adding the pin ranges
+ * for the driver. In this case the driver can skip ahead.
+ *
+ * In order to remain compatible with older, existing DeviceTree
+ * files which don't set the "gpio-ranges" property or systems that
+ * utilize ACPI the driver has to call gpiochip_add_pin_range().
+ */
+ if (!of_property_read_bool(pctrl->dev->of_node, "gpio-ranges")) {
+ ret = gpiochip_add_pin_range(&pctrl->chip,
+ dev_name(pctrl->dev), 0, 0, chip->ngpio);
+ if (ret) {
+ dev_err(pctrl->dev, "Failed to add pin range\n");
+ gpiochip_remove(&pctrl->chip);
+ return ret;
+ }
}
ret = gpiochip_irqchip_add(chip,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 3e5ccc76d59ca7..e6ac3910602e5d 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -291,31 +291,47 @@ static int pmic_gpio_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_DRIVE_PUSH_PULL:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_CMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_CMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_NMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_SOURCE:
- arg = pad->buffer_type == PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS;
+ if (pad->buffer_type != PMIC_GPIO_OUT_BUF_OPEN_DRAIN_PMOS)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = pad->pullup == PMIC_GPIO_PULL_DOWN;
+ if (pad->pullup != PMIC_GPIO_PULL_DOWN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_DISABLE:
- arg = pad->pullup = PMIC_GPIO_PULL_DISABLE;
+ if (pad->pullup != PMIC_GPIO_PULL_DISABLE)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- arg = pad->pullup == PMIC_GPIO_PULL_UP_30;
+ if (pad->pullup != PMIC_GPIO_PULL_UP_30)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = !pad->is_enabled;
+ if (pad->is_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_POWER_SOURCE:
arg = pad->power_source;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pad->input_enabled;
+ if (!pad->input_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 69c14ba177d0b5..926cac7e4d9b4c 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -321,6 +321,8 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
pad->function = function;
ret = pmic_mpp_write_mode_ctl(state, pad);
+ if (ret < 0)
+ return ret;
val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
@@ -345,13 +347,12 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = pad->pullup == PMIC_MPP_PULL_UP_OPEN;
+ if (pad->pullup != PMIC_MPP_PULL_UP_OPEN)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
switch (pad->pullup) {
- case PMIC_MPP_PULL_UP_OPEN:
- arg = 0;
- break;
case PMIC_MPP_PULL_UP_0P6KOHM:
arg = 600;
break;
@@ -366,13 +367,17 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
}
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = !pad->is_enabled;
+ if (pad->is_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_POWER_SOURCE:
arg = pad->power_source;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pad->input_enabled;
+ if (!pad->input_enabled)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
arg = pad->out_value;
@@ -384,7 +389,9 @@ static int pmic_mpp_config_get(struct pinctrl_dev *pctldev,
arg = pad->amux_input;
break;
case PMIC_MPP_CONF_PAIRED:
- arg = pad->paired;
+ if (!pad->paired)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
arg = pad->drive_strength;
@@ -457,7 +464,7 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
pad->dtest = arg;
break;
case PIN_CONFIG_DRIVE_STRENGTH:
- arg = pad->drive_strength;
+ pad->drive_strength = arg;
break;
case PMIC_MPP_CONF_AMUX_ROUTE:
if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
@@ -501,6 +508,10 @@ static int pmic_mpp_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (ret < 0)
return ret;
+ ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, pad->drive_strength);
+ if (ret < 0)
+ return ret;
+
val = pad->is_enabled << PMIC_MPP_REG_MASTER_EN_SHIFT;
return pmic_mpp_write(state, pad, PMIC_MPP_REG_EN_CTL, val);
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 7bea0df06fb146..5ca3e0048ba863 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -259,22 +259,32 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- arg = pin->bias == PM8XXX_GPIO_BIAS_NP;
+ if (pin->bias != PM8XXX_GPIO_BIAS_NP)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- arg = pin->bias == PM8XXX_GPIO_BIAS_PD;
+ if (pin->bias != PM8XXX_GPIO_BIAS_PD)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_BIAS_PULL_UP:
- arg = pin->bias <= PM8XXX_GPIO_BIAS_PU_1P5_30;
+ if (pin->bias > PM8XXX_GPIO_BIAS_PU_1P5_30)
+ return -EINVAL;
+ arg = 1;
break;
case PM8XXX_QCOM_PULL_UP_STRENGTH:
arg = pin->pull_up_strength;
break;
case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
- arg = pin->disable;
+ if (!pin->disable)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_INPUT_ENABLE:
- arg = pin->mode == PM8XXX_GPIO_MODE_INPUT;
+ if (pin->mode != PM8XXX_GPIO_MODE_INPUT)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_OUTPUT:
if (pin->mode & PM8XXX_GPIO_MODE_OUTPUT)
@@ -289,10 +299,14 @@ static int pm8xxx_pin_config_get(struct pinctrl_dev *pctldev,
arg = pin->output_strength;
break;
case PIN_CONFIG_DRIVE_PUSH_PULL:
- arg = !pin->open_drain;
+ if (pin->open_drain)
+ return -EINVAL;
+ arg = 1;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
- arg = pin->open_drain;
+ if (!pin->open_drain)
+ return -EINVAL;
+ arg = 1;
break;
default:
return -EINVAL;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index a7c81e988656b7..383977ea3a3caf 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -568,7 +568,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = {
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 11),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 1)), /* PH_EINT11 */
+ SUNXI_FUNCTION_IRQ_BANK(0x6, 2, 11)), /* PH_EINT11 */
};
static const struct sunxi_pinctrl_desc sun8i_a83t_pinctrl_data = {
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 987513fc2bc954..7dcc40a6da190f 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -6,6 +6,8 @@ endif
if MIPS
source "drivers/platform/mips/Kconfig"
endif
+if GOLDFISH
source "drivers/platform/goldfish/Kconfig"
+endif
source "drivers/platform/chrome/Kconfig"
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 39b935d56956fc..1024f6eee13a3f 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -101,14 +101,6 @@ config CROS_EC_PROTO
---help---
ChromeOS EC communication protocol helpers.
-config CROS_EC_SYSFS_USB
- bool "Chrome OS Embedded Controller sysfs USB attribute group"
- depends on MFD_CROS_EC
- default n
- ---help---
- Say Y here to expose a sysfs USB attribute group for the Chrome OS
- embedded controller.
-
config CROS_KBD_LED_BACKLIGHT
tristate "Backlight LED support for Chrome OS keyboards"
depends on LEDS_CLASS && ACPI
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index 5572d5712fa63a..0a2b68100be1cc 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -13,5 +13,4 @@ cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o
cros_ec_lpcs-objs := $(cros_ec_lpcs-y)
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o
-obj-$(CONFIG_CROS_EC_SYSFS_USB) += cros_ec_sysfs_usb.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c
index ebd2f75d30dd76..4e835110022560 100644
--- a/drivers/platform/chrome/cros_ec_dev.c
+++ b/drivers/platform/chrome/cros_ec_dev.c
@@ -43,9 +43,6 @@ static const struct attribute_group *cros_ec_groups[] = {
#if IS_ENABLED(CONFIG_MFD_CROS_EC_PD_UPDATE)
&cros_ec_pd_attr_group,
#endif
-#if IS_ENABLED(CONFIG_CROS_EC_SYSFS_USB)
- &cros_ec_usb_attr_group,
-#endif
#if IS_ENABLED(CONFIG_CHARGER_CROS_USB_PD)
&cros_usb_pd_charger_attr_group,
#endif
@@ -395,7 +392,7 @@ static void __remove(struct device *dev)
kfree(ec);
}
-static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
{
if (ec->features[0] == -1U && ec->features[1] == -1U) {
/* features bitmap not read yet */
@@ -424,6 +421,7 @@ static int cros_ec_check_features(struct cros_ec_dev *ec, int feature)
return ec->features[feature / 32] & EC_FEATURE_MASK_0(feature);
}
+EXPORT_SYMBOL_GPL(cros_ec_check_features);
static const struct mfd_cell cros_usb_pd_charger_devs[] = {
{
@@ -636,6 +634,40 @@ static void cros_ec_rtc_register(struct cros_ec_dev *ec)
dev_err(ec->dev, "failed to add cros-ec-rtc device: %d\n", ret);
}
+static const struct mfd_cell cros_ec_cec_devs[] = {
+ {
+ .name = "cros-ec-cec",
+ .id = -1,
+ },
+};
+
+static void cros_ec_cec_register(struct cros_ec_dev *ec)
+{
+ int ret;
+
+ ret = mfd_add_devices(ec->dev, 0, cros_ec_cec_devs,
+ ARRAY_SIZE(cros_ec_cec_devs),
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(ec->dev, "failed to add cros-ec-cec device: %d\n", ret);
+}
+
+static const struct mfd_cell ec_throttler_cells[] = {
+ { .name = "cros-ec-throttler" }
+};
+
+static void cros_ec_throttler_register(struct cros_ec_dev *ec)
+{
+ int ret;
+
+ ret = mfd_add_devices(ec->dev, 0, ec_throttler_cells,
+ ARRAY_SIZE(ec_throttler_cells),
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(ec->dev,
+ "failed to add cros-ec-throttler device: %d\n", ret);
+}
+
static int ec_device_probe(struct platform_device *pdev)
{
int retval = -ENOMEM;
@@ -656,6 +688,15 @@ static int ec_device_probe(struct platform_device *pdev)
device_initialize(&ec->class_dev);
cdev_init(&ec->cdev, &fops);
+ /*
+ * ACPI attaches the firmware node of the parent to the platform
+ * devices. If the parent firmware node has a valid wakeup flag, ACPI
+ * marks this platform device also as wake capable. But this platform
+ * device by itself cannot wake the system up. Mark the wake capability
+ * to false.
+ */
+ device_set_wakeup_capable(dev, false);
+
/* check whether this is actually a Fingerprint MCU rather than an EC */
if (cros_ec_check_features(ec, EC_FEATURE_FINGERPRINT)) {
dev_info(dev, "Fingerprint MCU detected.\n");
@@ -720,6 +761,13 @@ static int ec_device_probe(struct platform_device *pdev)
if (cros_ec_check_features(ec, EC_FEATURE_RTC))
cros_ec_rtc_register(ec);
+ /* check whether this EC instance has CEC command support */
+ if (cros_ec_check_features(ec, EC_FEATURE_CEC))
+ cros_ec_cec_register(ec);
+
+ if (IS_ENABLED(CONFIG_CROS_EC_THROTTLER))
+ cros_ec_throttler_register(ec);
+
/* Take control of the lightbar from the EC. */
lb_manual_suspend_ctrl(ec, 1);
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 85a07493274a52..4226e07cea032d 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -25,6 +25,7 @@
#include <linux/dmi.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/interrupt.h>
#include <linux/mfd/cros_ec.h>
#include <linux/mfd/cros_ec_commands.h>
#include <linux/mfd/cros_ec_lpc_reg.h>
@@ -252,7 +253,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
acpi_status status;
struct cros_ec_device *ec_dev;
u8 buf[2];
- int ret;
+ int irq, ret;
if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE,
dev_name(dev))) {
@@ -291,6 +292,18 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
sizeof(struct ec_response_get_protocol_info);
ec_dev->dout_size = sizeof(struct ec_host_request);
+ /*
+ * Some boards do not have an IRQ allotted for cros_ec_lpc,
+ * which makes ENXIO an expected (and safe) scenario.
+ */
+ irq = platform_get_irq(pdev, 0);
+ if (irq > 0)
+ ec_dev->irq = irq;
+ else if (irq != -ENXIO) {
+ dev_err(dev, "couldn't retrieve IRQ number (%d)\n", irq);
+ return irq;
+ }
+
ret = cros_ec_register(ec_dev);
if (ret) {
dev_err(dev, "couldn't register ec_dev (%d)\n", ret);
@@ -298,8 +311,13 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
}
/*
- * If we have a companion ACPI device connect a notify handler to
- * process MKBP messages.
+ * If we have a companion ACPI device, connect a notify handler
+ * to process MKBP messages coming via SCI. Specific board
+ * configurations may send MKBP events solely via the registered IRQ,
+ * at which point this handler will produce no effect. Detecting
+ * whether the current system is one such board, however, is a tricky
+ * and error-prone endeavor, so register the handler anyway, and trust
+ * the EC to send the proper MKBP event signals.
*/
if (adev) {
status = acpi_install_notify_handler(adev->handle,
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index b207c1fdd70515..dc82fa3261095b 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -406,10 +406,14 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev)
ret = cros_ec_get_host_command_version_mask(ec_dev, proto_msg,
EC_CMD_GET_NEXT_EVENT,
&ver_mask);
- if (ret < 0 || ver_mask == 0)
+ if (ret < 0 || ver_mask == 0) {
ec_dev->mkbp_event_supported = 0;
- else
- ec_dev->mkbp_event_supported = 1;
+ dev_info(ec_dev->dev, "MKBP not supported\n");
+ } else {
+ ec_dev->mkbp_event_supported = fls(ver_mask);
+ dev_info(ec_dev->dev, "MKBP support version %u\n",
+ ec_dev->mkbp_event_supported - 1);
+ }
/*
* Get host event wake mask, assume all events are wake events
diff --git a/drivers/platform/chrome/cros_ec_sysfs_usb.c b/drivers/platform/chrome/cros_ec_sysfs_usb.c
deleted file mode 100644
index 265c51aee5e0e5..00000000000000
--- a/drivers/platform/chrome/cros_ec_sysfs_usb.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright 2018 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/mfd/cros_ec.h>
-#include <linux/mfd/cros_ec_commands.h>
-#include <linux/slab.h>
-
-#define USB_CHARGE_MODE_DISABLED 0
-#define USB_CHARGE_MODE_ENABLED 4
-
-static int cmd_usb_charge_set_mode_xfer(struct cros_ec_dev *ec, u8 port,
- u8 mode)
-{
- int ret;
- struct cros_ec_command *msg;
- struct ec_params_usb_charge_set_mode *param;
-
- msg = kzalloc(sizeof(*msg) + sizeof(*param), GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
- msg->command = EC_CMD_USB_CHARGE_SET_MODE | ec->cmd_offset;
- msg->outsize = sizeof(*param);
- param = (struct ec_params_usb_charge_set_mode *)msg->data;
- param->usb_port_id = port;
- param->mode = mode;
- ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
- kfree(msg);
- return ret;
-}
-
-static ssize_t vbus_disable_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int ret;
- u8 usb_port;
- struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev,
- class_dev);
-
- ret = kstrtou8(buf, 0, &usb_port);
- if (ret)
- return ret;
- ret = cmd_usb_charge_set_mode_xfer(ec, usb_port,
- USB_CHARGE_MODE_DISABLED);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t vbus_enable_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- int ret;
- u8 usb_port;
- struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev,
- class_dev);
-
- ret = kstrtou8(buf, 0, &usb_port);
- if (ret)
- return ret;
- ret = cmd_usb_charge_set_mode_xfer(ec, usb_port,
- USB_CHARGE_MODE_ENABLED);
- if (ret)
- return ret;
-
- return count;
-}
-
-static DEVICE_ATTR_WO(vbus_disable);
-static DEVICE_ATTR_WO(vbus_enable);
-
-static struct attribute *usb_attrs[] = {
- &dev_attr_vbus_disable.attr,
- &dev_attr_vbus_enable.attr,
- NULL,
-};
-
-struct attribute_group cros_ec_usb_attr_group = {
- .name = "usb",
- .attrs = usb_attrs,
-};
-EXPORT_SYMBOL(cros_ec_usb_attr_group);
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 50331e3e54f31f..635ef25cc722a0 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,23 +1,5 @@
-menuconfig GOLDFISH
- bool "Platform support for Goldfish virtual devices"
- depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
- ---help---
- Say Y here to get to see options for the Goldfish virtual platform.
- This option alone does not add any kernel code.
-
- Unless you are building for the Android Goldfish emulator say N here.
-
-if GOLDFISH
-
-config GOLDFISH_BUS
- bool "Goldfish platform bus"
- ---help---
- This is a virtual bus to host Goldfish Android Virtual Devices.
-
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
---help---
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
-
-endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index d3487125838cdc..a0022395eee93f 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,5 @@
#
# Makefile for Goldfish platform specific drivers
#
-obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
+obj-$(CONFIG_GOLDFISH) += pdev_bus.o
obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 3215a33cf4fe51..e7a29e2750c6ae 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -2,7 +2,6 @@
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
- * Copyright (C) 2014 Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -58,8 +57,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/goldfish.h>
-#include <linux/mm.h>
-#include <linux/acpi.h>
/*
* IMPORTANT: The following constants must match the ones used and defined
@@ -78,7 +75,6 @@
#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
-#define PIPE_REG_VERSION 0x24 /* read: device version */
/* list of commands for PIPE_REG_COMMAND */
#define CMD_OPEN 1 /* open new channel */
@@ -94,6 +90,12 @@
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
is possible */
+
+/* The following commands are related to read operations, they must be
+ * listed in the same order than the corresponding write ones, since we
+ * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
+ * in goldfish_pipe_read_write() below.
+ */
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
* is possible */
@@ -128,7 +130,6 @@ struct goldfish_pipe_dev {
unsigned char __iomem *base;
struct access_params *aps;
int irq;
- u32 version;
};
static struct goldfish_pipe_dev pipe_dev[1];
@@ -262,14 +263,19 @@ static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
return 0;
}
+/* This function is used for both reading from and writing to a given
+ * pipe.
+ */
static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
size_t bufflen, int is_write)
{
unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
+ const int cmd_offset = is_write ? 0
+ : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
unsigned long address, address_end;
- int count = 0, ret = -EINVAL;
+ int ret = 0;
/* If the emulator already closed the pipe, no need to go further */
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -298,100 +304,73 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
unsigned long avail = next - address;
int status, wakeBit;
- struct page *page;
-
- /* Either vaddr or paddr depending on the device version */
- unsigned long xaddr;
-
- /*
- * We grab the pages on a page-by-page basis in case user
- * space gives us a potentially huge buffer but the read only
- * returns a small amount, then there's no need to pin that
- * much memory to the process.
- */
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, address, 1,
- !is_write, 0, &page, NULL);
- up_read(&current->mm->mmap_sem);
- if (ret < 0)
- return ret;
-
- if (dev->version) {
- /* Device version 1 or newer (qemu-android) expects the
- * physical address. */
- xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
+ /* Ensure that the corresponding page is properly mapped */
+ /* FIXME: this isn't safe or sufficient - use get_user_pages */
+ if (is_write) {
+ char c;
+ /* Ensure that the page is mapped and readable */
+ if (__get_user(c, (char __user *)address)) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
} else {
- /* Device version 0 (classic emulator) expects the
- * virtual address. */
- xaddr = address;
+ /* Ensure that the page is mapped and writable */
+ if (__put_user(0, (char __user *)address)) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
}
/* Now, try to transfer the bytes in the current page */
spin_lock_irqsave(&dev->lock, irq_flags);
- if (access_with_param(dev,
- is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
- xaddr, avail, pipe, &status)) {
+ if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
+ address, avail, pipe, &status)) {
gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
dev->base + PIPE_REG_CHANNEL_HIGH);
writel(avail, dev->base + PIPE_REG_SIZE);
- gf_write_ptr((void *)xaddr,
+ gf_write_ptr((void *)address,
dev->base + PIPE_REG_ADDRESS,
dev->base + PIPE_REG_ADDRESS_HIGH);
- writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
- dev->base + PIPE_REG_COMMAND);
+ writel(CMD_WRITE_BUFFER + cmd_offset,
+ dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
- if (status > 0 && !is_write)
- set_page_dirty(page);
- put_page(page);
-
if (status > 0) { /* Correct transfer */
- count += status;
+ ret += status;
address += status;
continue;
- } else if (status == 0) { /* EOF */
- ret = 0;
+ }
+
+ if (status == 0) /* EOF */
break;
- } else if (status < 0 && count > 0) {
- /*
- * An error occured and we already transfered
- * something on one of the previous pages.
- * Just return what we already copied and log this
- * err.
- *
- * Note: This seems like an incorrect approach but
- * cannot change it until we check if any user space
- * ABI relies on this behavior.
- */
- if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
- status, is_write ? "write" : "read");
- ret = 0;
+
+ /* An error occured. If we already transfered stuff, just
+ * return with its count. We expect the next call to return
+ * an error code */
+ if (ret > 0)
break;
- }
- /*
- * If the error is not PIPE_ERROR_AGAIN, or if we are not in
- * non-blocking mode, just return the error code.
- */
+ /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * non-blocking mode, just return the error code.
+ */
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
- /*
- * The backend blocked the read/write, wait until the backend
- * tells us it's ready to process more data.
- */
+ /* We will have to wait until more data/space is available.
+ * First, mark the pipe as waiting for a specific wake signal.
+ */
wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- goldfish_cmd(pipe,
- is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
+ goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
/* Unlock the pipe, then wait for the wake signal */
mutex_unlock(&pipe->lock);
@@ -399,29 +378,22 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
while (test_bit(wakeBit, &pipe->flags)) {
if (wait_event_interruptible(
pipe->wake_queue,
- !test_bit(wakeBit, &pipe->flags))) {
- ret = -ERESTARTSYS;
- break;
- }
+ !test_bit(wakeBit, &pipe->flags)))
+ return -ERESTARTSYS;
- if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) {
- ret = -EIO;
- break;
- }
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
+ return -EIO;
}
/* Try to re-acquire the lock */
- if (mutex_lock_interruptible(&pipe->lock)) {
- ret = -ERESTARTSYS;
- break;
- }
+ if (mutex_lock_interruptible(&pipe->lock))
+ return -ERESTARTSYS;
+
+ /* Try the transfer again */
+ continue;
}
mutex_unlock(&pipe->lock);
-
- if (ret < 0)
- return ret;
- else
- return count;
+ return ret;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -474,11 +446,10 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
unsigned long irq_flags;
int count = 0;
- /*
- * We're going to read from the emulator a list of (channel,flags)
- * pairs corresponding to the wake events that occured on each
- * blocked pipe (i.e. channel).
- */
+ /* We're going to read from the emulator a list of (channel,flags)
+ * pairs corresponding to the wake events that occured on each
+ * blocked pipe (i.e. channel).
+ */
spin_lock_irqsave(&dev->lock, irq_flags);
for (;;) {
/* First read the channel, 0 means the end of the list */
@@ -629,11 +600,6 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
goto error;
}
setup_access_params_addr(pdev, dev);
-
- /* Although the pipe device in the classic Android emulator does not
- * recognize the 'version' register, it won't treat this as an error
- * either and will simply return 0, which is fine. */
- dev->version = readl(dev->base + PIPE_REG_VERSION);
return 0;
error:
@@ -649,25 +615,11 @@ static int goldfish_pipe_remove(struct platform_device *pdev)
return 0;
}
-static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
- { "GFSH0003", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
-
-static const struct of_device_id goldfish_pipe_of_match[] = {
- { .compatible = "generic,android-pipe", },
- {},
-};
-MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
-
static struct platform_driver goldfish_pipe = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
- .name = "goldfish_pipe",
- .of_match_table = goldfish_pipe_of_match,
- .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
+ .name = "goldfish_pipe"
}
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c59e5c13bdbcb0..e9ef1a81bd0b5a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -915,6 +915,7 @@ config INTEL_OAKTRAIL
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
depends on ACPI
+ depends on BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 2acdb0d6ea8983..a0533e4e52d7be 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -233,6 +233,7 @@ static const struct bios_settings bios_tbl[] = {
{"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00}, 0},
{"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00}, 0},
{"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00}, 0},
+ {"Gateway", "LT31", "v1.3307", 0x55, 0x58, {0x9e, 0x00}, 0},
/* Packard Bell */
{"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00}, 0},
{"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00}, 0},
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 1e1e594238892a..3df47c1b04ec7f 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -463,6 +463,7 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 0e0403e024c5e6..a284a2b42bcd5a 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -339,8 +339,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
- { KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
- { KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
+ { KE_KEY, 0x35, { KEY_SCREENLOCK } },
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
@@ -392,6 +391,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
+ { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f96f7b86526792..7c1defaef3f588 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2084,7 +2084,8 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
- }
+ } else
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
status = wmi_install_notify_handler(asus->driver->event_guid,
asus_wmi_notify, asus);
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index ae3e7846cde132..8cfc51c86c2204 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -173,9 +173,9 @@ static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
writel(val, pmcdev->regbase + reg_offset);
}
-static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
{
- return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+ return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
}
int intel_pkgc10_counter_read(u64 *data)
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index f774cb576ffa06..1ff95b5a429db0 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -34,6 +34,7 @@
#define TOSHIBA_ACPI_VERSION "0.23"
#define PROC_INTERFACE_VERSION 1
+#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -1472,7 +1473,7 @@ static const struct file_operations keys_proc_fops = {
.write = keys_proc_write,
};
-static int version_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused version_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "driver: %s\n", TOSHIBA_ACPI_VERSION);
seq_printf(m, "proc_interface: %d\n", PROC_INTERFACE_VERSION);
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 5edee645d890e6..262285e48a0948 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -21,7 +21,7 @@
#include <linux/isapnp.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
extern struct pnp_protocol isapnp_protocol;
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 9e29b1321648d1..15783869e1a008 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -427,14 +427,14 @@ static int olpc_bat_get_property(struct power_supply *psy,
if (ret)
return ret;
- val->intval = (s16)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (s16)be16_to_cpu(ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_TEMP_AMBIENT:
ret = olpc_ec_cmd(EC_AMB_TEMP, NULL, 0, (void *)&ec_word, 2);
if (ret)
return ret;
- val->intval = (int)be16_to_cpu(ec_word) * 100 / 256;
+ val->intval = (int)be16_to_cpu(ec_word) * 10 / 256;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
ret = olpc_ec_cmd(EC_BAT_ACR, NULL, 0, (void *)&ec_word, 2);
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 6a9bf708937360..ccb619632e4612 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -35,6 +35,7 @@ static void vexpress_reset_do(struct device *dev, const char *what)
}
static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
static void vexpress_power_off(void)
{
@@ -99,10 +100,13 @@ static int _vexpress_register_restart_handler(struct device *dev)
int err;
vexpress_restart_device = dev;
- err = register_restart_handler(&vexpress_restart_nb);
- if (err) {
- dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return err;
+ if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ atomic_dec(&vexpress_restart_nb_refcnt);
+ return err;
+ }
}
device_create_file(dev, &dev_attr_active);
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index da7bae99155231..4861cfddcdd349 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -23,6 +23,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
+
#include "ptp_private.h"
static int ptp_disable_pinfunc(struct ptp_clock_info *ops,
@@ -88,6 +90,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
case PTP_PF_PHYSYNC:
if (chan != 0)
return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -201,7 +204,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
- ptp->info->gettime64(ptp->info, &ts);
+ err = ptp->info->gettime64(ptp->info, &ts);
+ if (err)
+ goto out;
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
@@ -223,6 +228,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
pd = ops->pin_config[pin_index];
@@ -241,6 +247,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
err = -EINVAL;
break;
}
+ pin_index = array_index_nospec(pin_index, ops->n_pins);
if (mutex_lock_interruptible(&ptp->pincfg_mux))
return -ERESTARTSYS;
err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
@@ -252,6 +259,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
}
+out:
kfree(sysoff);
return err;
}
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 6a41e66015b67f..062dff1c902df5 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -384,6 +384,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
aqcsfrc_mask = AQCSFRC_CSFA_MASK;
}
+ /* Update shadow register first before modifying active register */
+ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val);
/*
* Changes to immediate action on Action Qualifier. This puts
* Action Qualifier control on PWM output from next TBCLK
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 74e6938ed9dd17..6e156b5b8d3474 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3986,14 +3986,14 @@ regulator_register(const struct regulator_desc *regulator_desc,
if (ret < 0)
goto wash;
+ dev_set_drvdata(&rdev->dev, rdev);
+
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
goto wash;
}
- dev_set_drvdata(&rdev->dev, rdev);
-
if (init_data && init_data->supply_regulator)
rdev->supply_name = init_data->supply_regulator;
else if (regulator_desc->supply_name)
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 2a44e5dd9c2a15..c68556bf6f3994 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -152,6 +152,7 @@ static struct regulator_ops pfuze100_sw_regulator_ops = {
static struct regulator_ops pfuze100_swb_regulator_ops = {
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
.list_voltage = regulator_list_voltage_table,
.map_voltage = regulator_map_voltage_ascend,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 92f88753bfed27..2daf751c26c7f7 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -303,13 +303,13 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(2, STEP_50_MV),
regulator_desc_ldo(3, STEP_50_MV),
regulator_desc_ldo(4, STEP_50_MV),
- regulator_desc_ldo(5, STEP_50_MV),
+ regulator_desc_ldo(5, STEP_25_MV),
regulator_desc_ldo(6, STEP_25_MV),
regulator_desc_ldo(7, STEP_50_MV),
regulator_desc_ldo(8, STEP_50_MV),
regulator_desc_ldo(9, STEP_50_MV),
regulator_desc_ldo(10, STEP_50_MV),
- regulator_desc_ldo(11, STEP_25_MV),
+ regulator_desc_ldo(11, STEP_50_MV),
regulator_desc_ldo(12, STEP_50_MV),
regulator_desc_ldo(13, STEP_50_MV),
regulator_desc_ldo(14, STEP_50_MV),
@@ -320,11 +320,11 @@ static const struct regulator_desc regulators[] = {
regulator_desc_ldo(19, STEP_50_MV),
regulator_desc_ldo(20, STEP_50_MV),
regulator_desc_ldo(21, STEP_50_MV),
- regulator_desc_ldo(22, STEP_25_MV),
- regulator_desc_ldo(23, STEP_25_MV),
+ regulator_desc_ldo(22, STEP_50_MV),
+ regulator_desc_ldo(23, STEP_50_MV),
regulator_desc_ldo(24, STEP_50_MV),
regulator_desc_ldo(25, STEP_50_MV),
- regulator_desc_ldo(26, STEP_50_MV),
+ regulator_desc_ldo(26, STEP_25_MV),
regulator_desc_buck1_4(1),
regulator_desc_buck1_4(2),
regulator_desc_buck1_4(3),
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index b6d831b84e1d0d..47694dd515ab60 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -372,7 +372,7 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_ldo(32, STEP_50_MV),
regulator_desc_s2mps11_ldo(33, STEP_50_MV),
regulator_desc_s2mps11_ldo(34, STEP_50_MV),
- regulator_desc_s2mps11_ldo(35, STEP_50_MV),
+ regulator_desc_s2mps11_ldo(35, STEP_25_MV),
regulator_desc_s2mps11_ldo(36, STEP_50_MV),
regulator_desc_s2mps11_ldo(37, STEP_50_MV),
regulator_desc_s2mps11_ldo(38, STEP_50_MV),
@@ -382,8 +382,8 @@ static const struct regulator_desc s2mps11_regulators[] = {
regulator_desc_s2mps11_buck1_4(4),
regulator_desc_s2mps11_buck5,
regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_12_5_MV),
+ regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_12_5_MV),
regulator_desc_s2mps11_buck9,
regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
};
diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c
index e79f2a181ad242..b9ec4a16db1f6b 100644
--- a/drivers/rtc/hctosys.c
+++ b/drivers/rtc/hctosys.c
@@ -50,8 +50,10 @@ static int __init rtc_hctosys(void)
tv64.tv_sec = rtc_tm_to_time64(&tm);
#if BITS_PER_LONG == 32
- if (tv64.tv_sec > INT_MAX)
+ if (tv64.tv_sec > INT_MAX) {
+ err = -ERANGE;
goto err_read;
+ }
#endif
err = do_settimeofday64(&tv64);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 495a6d08b7b535..80f96e09e7c116 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -349,6 +349,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
+ if (!rtc->ops)
+ return -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ return -EINVAL;
+
err = rtc_valid_tm(&alarm->time);
if (err != 0)
return err;
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index bd170cb3361ce1..5747a54cbd423e 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -164,6 +164,10 @@ static int bq4802_probe(struct platform_device *pdev)
} else if (p->r->flags & IORESOURCE_MEM) {
p->regs = devm_ioremap(&pdev->dev, p->r->start,
resource_size(p->r));
+ if (!p->regs){
+ err = -ENOMEM;
+ goto out;
+ }
p->read = bq4802_read_mem;
p->write = bq4802_write_mem;
} else {
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c60cf2d08ce5aa..26b939f3be8741 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -43,11 +43,24 @@
#include <linux/of_platform.h>
#ifdef CONFIG_X86
#include <asm/i8259.h>
+#include <asm/processor.h>
+#include <linux/dmi.h>
#endif
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
+/*
+ * Use ACPI SCI to replace HPET interrupt for RTC Alarm event
+ *
+ * If cleared, ACPI SCI is only used to wake up the system from suspend
+ *
+ * If set, ACPI SCI is used to handle UIE/AIE and system wakeup
+ */
+
+static bool use_acpi_alarm;
+module_param(use_acpi_alarm, bool, 0444);
+
struct cmos_rtc {
struct rtc_device *rtc;
struct device *dev;
@@ -65,6 +78,8 @@ struct cmos_rtc {
u8 day_alrm;
u8 mon_alrm;
u8 century;
+
+ struct rtc_wkalrm saved_wkalrm;
};
/* both platform and pnp busses use negative numbers for invalid irqs */
@@ -151,6 +166,12 @@ static inline int hpet_unregister_irq_handler(irq_handler_t handler)
#endif
+/* Don't use HPET for RTC Alarm event if ACPI Fixed event is used */
+static int use_hpet_alarm(void)
+{
+ return is_hpet_enabled() && !use_acpi_alarm;
+}
+
/*----------------------------------------------------------------*/
#ifdef RTC_PORT
@@ -292,7 +313,7 @@ static void cmos_checkintr(struct cmos_rtc *cmos, unsigned char rtc_control)
*/
rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
- if (is_hpet_enabled())
+ if (use_hpet_alarm())
return;
rtc_intr &= (rtc_control & RTC_IRQMASK) | RTC_IRQF;
@@ -312,7 +333,13 @@ static void cmos_irq_enable(struct cmos_rtc *cmos, unsigned char mask)
rtc_control |= mask;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_set_rtc_irq_bit(mask);
+ if (use_hpet_alarm())
+ hpet_set_rtc_irq_bit(mask);
+
+ if ((mask & RTC_AIE) && use_acpi_alarm) {
+ if (cmos->wake_on)
+ cmos->wake_on(cmos->dev);
+ }
cmos_checkintr(cmos, rtc_control);
}
@@ -324,19 +351,97 @@ static void cmos_irq_disable(struct cmos_rtc *cmos, unsigned char mask)
rtc_control = CMOS_READ(RTC_CONTROL);
rtc_control &= ~mask;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(mask);
+
+ if ((mask & RTC_AIE) && use_acpi_alarm) {
+ if (cmos->wake_off)
+ cmos->wake_off(cmos->dev);
+ }
cmos_checkintr(cmos, rtc_control);
}
+static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_time now;
+
+ cmos_read_time(dev, &now);
+
+ if (!cmos->day_alrm) {
+ time64_t t_max_date;
+ time64_t t_alrm;
+
+ t_max_date = rtc_tm_to_time64(&now);
+ t_max_date += 24 * 60 * 60 - 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one day in the future\n");
+ return -EINVAL;
+ }
+ } else if (!cmos->mon_alrm) {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ if (max_date.tm_mon == 11) {
+ max_date.tm_mon = 0;
+ max_date.tm_year += 1;
+ } else {
+ max_date.tm_mon += 1;
+ }
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one month in the future\n");
+ return -EINVAL;
+ }
+ } else {
+ struct rtc_time max_date = now;
+ time64_t t_max_date;
+ time64_t t_alrm;
+ int max_mday;
+
+ max_date.tm_year += 1;
+ max_mday = rtc_month_days(max_date.tm_mon, max_date.tm_year);
+ if (max_date.tm_mday > max_mday)
+ max_date.tm_mday = max_mday;
+
+ t_max_date = rtc_tm_to_time64(&max_date);
+ t_max_date -= 1;
+ t_alrm = rtc_tm_to_time64(&t->time);
+ if (t_alrm > t_max_date) {
+ dev_err(dev,
+ "Alarms can be up to one year in the future\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char mon, mday, hrs, min, sec, rtc_control;
+ int ret;
if (!is_valid_irq(cmos->irq))
return -EIO;
+ ret = cmos_validate_alarm(dev, t);
+ if (ret < 0)
+ return ret;
+
mon = t->time.tm_mon + 1;
mday = t->time.tm_mday;
hrs = t->time.tm_hour;
@@ -370,10 +475,14 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
CMOS_WRITE(mon, cmos->mon_alrm);
}
- /* FIXME the HPET alarm glue currently ignores day_alrm
- * and mon_alrm ...
- */
- hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min, t->time.tm_sec);
+ if (use_hpet_alarm()) {
+ /*
+ * FIXME the HPET alarm glue currently ignores day_alrm
+ * and mon_alrm ...
+ */
+ hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
+ t->time.tm_sec);
+ }
if (t->enabled)
cmos_irq_enable(cmos, RTC_AIE);
@@ -430,7 +539,7 @@ static int cmos_procfs(struct device *dev, struct seq_file *seq)
"batt_status\t: %s\n",
(rtc_control & RTC_PIE) ? "yes" : "no",
(rtc_control & RTC_UIE) ? "yes" : "no",
- is_hpet_enabled() ? "yes" : "no",
+ use_hpet_alarm() ? "yes" : "no",
// (rtc_control & RTC_SQWE) ? "yes" : "no",
(rtc_control & RTC_DM_BINARY) ? "no" : "yes",
(rtc_control & RTC_DST_EN) ? "yes" : "no",
@@ -551,7 +660,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
*/
irqstat = CMOS_READ(RTC_INTR_FLAGS);
rtc_control = CMOS_READ(RTC_CONTROL);
- if (is_hpet_enabled())
+ if (use_hpet_alarm())
irqstat = (unsigned long)irq & 0xF0;
/* If we were suspended, RTC_CONTROL may not be accurate since the
@@ -570,7 +679,8 @@ static irqreturn_t cmos_interrupt(int irq, void *p)
cmos_rtc.suspend_ctrl &= ~RTC_AIE;
rtc_control &= ~RTC_AIE;
CMOS_WRITE(rtc_control, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(RTC_AIE);
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(RTC_AIE);
CMOS_READ(RTC_INTR_FLAGS);
}
spin_unlock(&rtc_lock);
@@ -692,7 +802,8 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
* need to do something about other clock frequencies.
*/
cmos_rtc.rtc->irq_freq = 1024;
- hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
+ if (use_hpet_alarm())
+ hpet_set_periodic_freq(cmos_rtc.rtc->irq_freq);
CMOS_WRITE(RTC_REF_CLCK_32KHZ | 0x06, RTC_FREQ_SELECT);
}
@@ -713,10 +824,13 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
+ if (use_hpet_alarm())
+ hpet_rtc_timer_init();
+
if (is_valid_irq(rtc_irq)) {
irq_handler_t rtc_cmos_int_handler;
- if (is_hpet_enabled()) {
+ if (use_hpet_alarm()) {
rtc_cmos_int_handler = hpet_rtc_interrupt;
retval = hpet_register_irq_handler(cmos_interrupt);
if (retval) {
@@ -735,7 +849,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
goto cleanup1;
}
}
- hpet_rtc_timer_init();
+
+ if (use_hpet_alarm())
+ hpet_rtc_timer_init();
/* export at least the first block of NVRAM */
nvram.size = address_space - NVRAM_OFFSET;
@@ -752,7 +868,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
"alarms up to one day",
cmos_rtc.century ? ", y3k" : "",
nvram.size,
- is_hpet_enabled() ? ", hpet irqs" : "");
+ use_hpet_alarm() ? ", hpet irqs" : "");
return 0;
@@ -789,7 +905,8 @@ static void __exit cmos_do_remove(struct device *dev)
if (is_valid_irq(cmos->irq)) {
free_irq(cmos->irq, cmos->rtc);
- hpet_unregister_irq_handler(cmos_interrupt);
+ if (use_hpet_alarm())
+ hpet_unregister_irq_handler(cmos_interrupt);
}
rtc_device_unregister(cmos->rtc);
@@ -869,13 +986,13 @@ static int cmos_suspend(struct device *dev)
mask = RTC_IRQMASK;
tmp &= ~mask;
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_mask_rtc_irq_bit(mask);
-
+ if (use_hpet_alarm())
+ hpet_mask_rtc_irq_bit(mask);
cmos_checkintr(cmos, tmp);
}
spin_unlock_irq(&rtc_lock);
- if (tmp & RTC_AIE) {
+ if ((tmp & RTC_AIE) && !use_acpi_alarm) {
cmos->enabled_wake = 1;
if (cmos->wake_on)
cmos->wake_on(dev);
@@ -883,6 +1000,8 @@ static int cmos_suspend(struct device *dev)
enable_irq_wake(cmos->irq);
}
+ cmos_read_alarm(dev, &cmos->saved_wkalrm);
+
dev_dbg(dev, "suspend%s, ctrl %02x\n",
(tmp & RTC_AIE) ? ", alarm may wake" : "",
tmp);
@@ -903,6 +1022,40 @@ static inline int cmos_poweroff(struct device *dev)
#ifdef CONFIG_PM_SLEEP
+static void cmos_check_wkalrm(struct device *dev)
+{
+ struct cmos_rtc *cmos = dev_get_drvdata(dev);
+ struct rtc_wkalrm current_alarm;
+ time64_t t_now;
+ time64_t t_current_expires;
+ time64_t t_saved_expires;
+ struct rtc_time now;
+
+ /* Check if we have RTC Alarm armed */
+ if (!(cmos->suspend_ctrl & RTC_AIE))
+ return;
+
+ cmos_read_time(dev, &now);
+ t_now = rtc_tm_to_time64(&now);
+
+ /*
+ * ACPI RTC wake event is cleared after resume from STR,
+ * ACK the rtc irq here
+ */
+ if (t_now >= cmos->alarm_expires && use_acpi_alarm) {
+ cmos_interrupt(0, (void *)cmos->rtc);
+ return;
+ }
+
+ cmos_read_alarm(dev, &current_alarm);
+ t_current_expires = rtc_tm_to_time64(&current_alarm.time);
+ t_saved_expires = rtc_tm_to_time64(&cmos->saved_wkalrm.time);
+ if (t_current_expires != t_saved_expires ||
+ cmos->saved_wkalrm.enabled != current_alarm.enabled) {
+ cmos_set_alarm(dev, &cmos->saved_wkalrm);
+ }
+}
+
static void cmos_check_acpi_rtc_status(struct device *dev,
unsigned char *rtc_control);
@@ -911,7 +1064,7 @@ static int cmos_resume(struct device *dev)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char tmp;
- if (cmos->enabled_wake) {
+ if (cmos->enabled_wake && !use_acpi_alarm) {
if (cmos->wake_off)
cmos->wake_off(dev);
else
@@ -919,6 +1072,9 @@ static int cmos_resume(struct device *dev)
cmos->enabled_wake = 0;
}
+ /* The BIOS might have changed the alarm, restore it */
+ cmos_check_wkalrm(dev);
+
spin_lock_irq(&rtc_lock);
tmp = cmos->suspend_ctrl;
cmos->suspend_ctrl = 0;
@@ -926,16 +1082,17 @@ static int cmos_resume(struct device *dev)
if (tmp & RTC_IRQMASK) {
unsigned char mask;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(dev) && use_hpet_alarm())
hpet_rtc_timer_init();
do {
CMOS_WRITE(tmp, RTC_CONTROL);
- hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
+ if (use_hpet_alarm())
+ hpet_set_rtc_irq_bit(tmp & RTC_IRQMASK);
mask = CMOS_READ(RTC_INTR_FLAGS);
mask &= (tmp & RTC_IRQMASK) | RTC_IRQF;
- if (!is_hpet_enabled() || !is_intr(mask))
+ if (!use_hpet_alarm() || !is_intr(mask))
break;
/* force one-shot behavior if HPET blocked
@@ -988,17 +1145,29 @@ static u32 rtc_handler(void *context)
struct cmos_rtc *cmos = dev_get_drvdata(dev);
unsigned char rtc_control = 0;
unsigned char rtc_intr;
+ unsigned long flags;
- spin_lock_irq(&rtc_lock);
- if (cmos_rtc.suspend_ctrl)
- rtc_control = CMOS_READ(RTC_CONTROL);
- if (rtc_control & RTC_AIE) {
- cmos_rtc.suspend_ctrl &= ~RTC_AIE;
- CMOS_WRITE(rtc_control, RTC_CONTROL);
- rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
- rtc_update_irq(cmos->rtc, 1, rtc_intr);
+
+ /*
+ * Always update rtc irq when ACPI is used as RTC Alarm.
+ * Or else, ACPI SCI is enabled during suspend/resume only,
+ * update rtc irq in that case.
+ */
+ if (use_acpi_alarm)
+ cmos_interrupt(0, (void *)cmos->rtc);
+ else {
+ /* Fix me: can we use cmos_interrupt() here as well? */
+ spin_lock_irqsave(&rtc_lock, flags);
+ if (cmos_rtc.suspend_ctrl)
+ rtc_control = CMOS_READ(RTC_CONTROL);
+ if (rtc_control & RTC_AIE) {
+ cmos_rtc.suspend_ctrl &= ~RTC_AIE;
+ CMOS_WRITE(rtc_control, RTC_CONTROL);
+ rtc_intr = CMOS_READ(RTC_INTR_FLAGS);
+ rtc_update_irq(cmos->rtc, 1, rtc_intr);
+ }
+ spin_unlock_irqrestore(&rtc_lock, flags);
}
- spin_unlock_irq(&rtc_lock);
pm_wakeup_event(dev, 0);
acpi_clear_event(ACPI_EVENT_RTC);
@@ -1028,6 +1197,28 @@ static void rtc_wake_off(struct device *dev)
acpi_disable_event(ACPI_EVENT_RTC, 0);
}
+#ifdef CONFIG_X86
+/* Enable use_acpi_alarm mode for Intel platforms no earlier than 2015 */
+static void use_acpi_alarm_quirks(void)
+{
+ int year;
+
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+ return;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return;
+
+ if (!is_hpet_enabled())
+ return;
+
+ if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2015)
+ use_acpi_alarm = true;
+}
+#else
+static inline void use_acpi_alarm_quirks(void) { }
+#endif
+
/* Every ACPI platform has a mc146818 compatible "cmos rtc". Here we find
* its device node and pass extra config data. This helps its driver use
* capabilities that the now-obsolete mc146818 didn't have, and informs it
@@ -1040,6 +1231,8 @@ static void cmos_wake_setup(struct device *dev)
if (acpi_disabled)
return;
+ use_acpi_alarm_quirks();
+
rtc_wake_setup(dev);
acpi_rtc_info.wake_on = rtc_wake_on;
acpi_rtc_info.wake_off = rtc_wake_off;
diff --git a/drivers/rtc/rtc-lib.c b/drivers/rtc/rtc-lib.c
index e6bfb9c42a10b0..5b136bdc03d4ab 100644
--- a/drivers/rtc/rtc-lib.c
+++ b/drivers/rtc/rtc-lib.c
@@ -52,13 +52,11 @@ EXPORT_SYMBOL(rtc_year_days);
*/
void rtc_time64_to_tm(time64_t time, struct rtc_time *tm)
{
- unsigned int month, year;
- unsigned long secs;
+ unsigned int month, year, secs;
int days;
/* time must be positive */
- days = div_s64(time, 86400);
- secs = time - (unsigned int) days * 86400;
+ days = div_s64_rem(time, 86400, &secs);
/* day of the week, 1970-01-01 was a Thursday */
tm->tm_wday = (days + 4) % 7;
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index a161fbf6f172c5..63ad5b543f149f 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -47,49 +47,83 @@ struct snvs_rtc_data {
struct clk *clk;
};
+/* Read 64 bit timer register, which could be in inconsistent state */
+static u64 rtc_read_lpsrt(struct snvs_rtc_data *data)
+{
+ u32 msb, lsb;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &msb);
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &lsb);
+ return (u64)msb << 32 | lsb;
+}
+
+/* Read the secure real time counter, taking care to deal with the cases of the
+ * counter updating while being read.
+ */
static u32 rtc_read_lp_counter(struct snvs_rtc_data *data)
{
u64 read1, read2;
- u32 val;
+ unsigned int timeout = 100;
+ /* As expected, the registers might update between the read of the LSB
+ * reg and the MSB reg. It's also possible that one register might be
+ * in partially modified state as well.
+ */
+ read1 = rtc_read_lpsrt(data);
do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
- read1 = val;
- read1 <<= 32;
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
- read1 |= val;
-
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCMR, &val);
- read2 = val;
- read2 <<= 32;
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &val);
- read2 |= val;
- } while (read1 != read2);
+ read2 = read1;
+ read1 = rtc_read_lpsrt(data);
+ } while (read1 != read2 && --timeout);
+ if (!timeout)
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
/* Convert 47-bit counter to 32-bit raw second count */
return (u32) (read1 >> CNTR_TO_SECS_SH);
}
-static void rtc_write_sync_lp(struct snvs_rtc_data *data)
+/* Just read the lsb from the counter, dealing with inconsistent state */
+static int rtc_read_lp_counter_lsb(struct snvs_rtc_data *data, u32 *lsb)
{
- u32 count1, count2, count3;
- int i;
-
- /* Wait for 3 CKIL cycles */
- for (i = 0; i < 3; i++) {
- do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
- } while (count1 != count2);
-
- /* Now wait until counter value changes */
- do {
- do {
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count2);
- regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count3);
- } while (count2 != count3);
- } while (count3 == count1);
+ u32 count1, count2;
+ unsigned int timeout = 100;
+
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ do {
+ count2 = count1;
+ regmap_read(data->regmap, data->offset + SNVS_LPSRTCLR, &count1);
+ } while (count1 != count2 && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout trying to get valid LPSRT Counter read\n");
+ return -ETIMEDOUT;
}
+
+ *lsb = count1;
+ return 0;
+}
+
+static int rtc_write_sync_lp(struct snvs_rtc_data *data)
+{
+ u32 count1, count2;
+ u32 elapsed;
+ unsigned int timeout = 1000;
+ int ret;
+
+ ret = rtc_read_lp_counter_lsb(data, &count1);
+ if (ret)
+ return ret;
+
+ /* Wait for 3 CKIL cycles, about 61.0-91.5 µs */
+ do {
+ ret = rtc_read_lp_counter_lsb(data, &count2);
+ if (ret)
+ return ret;
+ elapsed = count2 - count1; /* wrap around _is_ handled! */
+ } while (elapsed < 3 && --timeout);
+ if (!timeout) {
+ dev_err(&data->rtc->dev, "Timeout waiting for LPSRT Counter to change\n");
+ return -ETIMEDOUT;
+ }
+ return 0;
}
static int snvs_rtc_enable(struct snvs_rtc_data *data, bool enable)
@@ -173,9 +207,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
enable ? (SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN) : 0);
- rtc_write_sync_lp(data);
-
- return 0;
+ return rtc_write_sync_lp(data);
}
static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
@@ -183,10 +215,14 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
struct rtc_time *alrm_tm = &alrm->time;
unsigned long time;
+ int ret;
rtc_tm_to_time(alrm_tm, &time);
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
+ ret = rtc_write_sync_lp(data);
+ if (ret)
+ return ret;
regmap_write(data->regmap, data->offset + SNVS_LPTAR, time);
/* Clear alarm interrupt status bit */
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 21d174e9ebdb50..80a43074c2f9a6 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2101,8 +2101,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
static int dasd_eckd_online_to_ready(struct dasd_device *device)
{
- cancel_work_sync(&device->reload_device);
- cancel_work_sync(&device->kick_validate);
+ if (cancel_work_sync(&device->reload_device))
+ dasd_put_device(device);
+ if (cancel_work_sync(&device->kick_validate))
+ dasd_put_device(device);
+
return 0;
};
@@ -4020,6 +4023,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
usrparm.psf_data &= 0x7fffffffULL;
usrparm.rssd_result &= 0x7fffffffULL;
}
+ /* at least 2 bytes are accessed and should be allocated */
+ if (usrparm.psf_data_len < 2) {
+ DBF_DEV_EVENT(DBF_WARNING, device,
+ "Symmetrix ioctl invalid data length %d",
+ usrparm.psf_data_len);
+ rc = -EINVAL;
+ goto out;
+ }
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 94415620747744..dcb949dcfa66c1 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -43,7 +43,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
static void __ref sclp_cpu_change_notify(struct work_struct *work)
{
+ lock_device_hotplug();
smp_rescan_cpus();
+ unlock_device_hotplug();
}
static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 742ca57ece8c8c..d64b401f3d0584 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -640,21 +640,20 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
unsigned long phys_aob = 0;
if (!q->use_cq)
- goto out;
+ return 0;
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
}
if (q->aobs[bufnr]) {
- q->sbal_state[bufnr].flags = QDIO_OUTBUF_STATE_FLAG_NONE;
q->sbal_state[bufnr].aob = q->aobs[bufnr];
q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user;
phys_aob = virt_to_phys(q->aobs[bufnr]);
WARN_ON_ONCE(phys_aob & 0xFF);
}
-out:
+ q->sbal_state[bufnr].flags = 0;
return phys_aob;
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 95c631125a2041..b40604d0126f82 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2452,11 +2452,12 @@ out:
return rc;
}
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
+ qeth_clear_outq_buffers(q, 1);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
@@ -2529,10 +2530,8 @@ out_freeoutqbufs:
card->qdio.out_qs[i]->bufs[j] = NULL;
}
out_freeoutq:
- while (i > 0) {
- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- }
+ while (i > 0)
+ qeth_free_output_queue(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
out_freepool:
@@ -2565,10 +2564,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
if (card->qdio.out_qs) {
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
- }
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ qeth_free_output_queue(card->qdio.out_qs[i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
}
@@ -3505,13 +3502,14 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
if (atomic_read(&queue->set_pci_flags_count))
qdio_flags |= QDIO_FLAG_PCI_OUT;
+ atomic_add(count, &queue->used_buffers);
+
rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
queue->queue_no, index, count);
if (queue->card->options.performance_stats)
queue->card->perf_stats.outbound_do_qdio_time +=
qeth_get_micros() -
queue->card->perf_stats.outbound_do_qdio_start_time;
- atomic_add(count, &queue->used_buffers);
if (rc) {
queue->card->stats.tx_errors += count;
/* ignore temporary SIGA errors without busy condition */
@@ -4518,8 +4516,8 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
{
struct qeth_ipa_cmd *cmd;
struct qeth_arp_query_info *qinfo;
- struct qeth_snmp_cmd *snmp;
unsigned char *data;
+ void *snmp_data;
__u16 data_len;
QETH_CARD_TEXT(card, 3, "snpcmdcb");
@@ -4527,7 +4525,6 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
cmd = (struct qeth_ipa_cmd *) sdata;
data = (unsigned char *)((char *)cmd - reply->offset);
qinfo = (struct qeth_arp_query_info *) reply->param;
- snmp = &cmd->data.setadapterparms.data.snmp;
if (cmd->hdr.return_code) {
QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
@@ -4540,10 +4537,15 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
return 0;
}
data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data));
- if (cmd->data.setadapterparms.hdr.seq_no == 1)
- data_len -= (__u16)((char *)&snmp->data - (char *)cmd);
- else
- data_len -= (__u16)((char *)&snmp->request - (char *)cmd);
+ if (cmd->data.setadapterparms.hdr.seq_no == 1) {
+ snmp_data = &cmd->data.setadapterparms.data.snmp;
+ data_len -= offsetof(struct qeth_ipa_cmd,
+ data.setadapterparms.data.snmp);
+ } else {
+ snmp_data = &cmd->data.setadapterparms.data.snmp.request;
+ data_len -= offsetof(struct qeth_ipa_cmd,
+ data.setadapterparms.data.snmp.request);
+ }
/* check if there is enough room in userspace */
if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
@@ -4556,16 +4558,9 @@ static int qeth_snmp_command_cb(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "sseqn%i",
cmd->data.setadapterparms.hdr.seq_no);
/*copy entries to user buffer*/
- if (cmd->data.setadapterparms.hdr.seq_no == 1) {
- memcpy(qinfo->udata + qinfo->udata_offset,
- (char *)snmp,
- data_len + offsetof(struct qeth_snmp_cmd, data));
- qinfo->udata_offset += offsetof(struct qeth_snmp_cmd, data);
- } else {
- memcpy(qinfo->udata + qinfo->udata_offset,
- (char *)&snmp->request, data_len);
- }
+ memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
qinfo->udata_offset += data_len;
+
/* check if all replies received ... */
QETH_CARD_TEXT_(card, 4, "srtot%i",
cmd->data.setadapterparms.hdr.used_total);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index fa844b0ff84754..7bcf0dae3a6559 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -419,6 +419,7 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
if (card->discipline) {
card->discipline->remove(card->gdev);
qeth_core_free_discipline(card);
+ card->options.layer2 = -1;
}
rc = qeth_core_load_discipline(card, newdis);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index acdb5ccb0ab95a..34d3b7aff5132e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -523,7 +523,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index bbdb3b6c54bbee..2cc9bc1ef1e383 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1902,7 +1902,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 38c8e308d4c85e..a96c98e3fc73c7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -275,16 +275,16 @@ static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
*/
int zfcp_status_read_refill(struct zfcp_adapter *adapter)
{
- while (atomic_read(&adapter->stat_miss) > 0)
+ while (atomic_add_unless(&adapter->stat_miss, -1, 0))
if (zfcp_fsf_status_read(adapter->qdio)) {
+ atomic_inc(&adapter->stat_miss); /* undo add -1 */
if (atomic_read(&adapter->stat_miss) >=
adapter->stat_read_buf_num) {
zfcp_erp_adapter_reopen(adapter, 0, "axsref1");
return 1;
}
break;
- } else
- atomic_dec(&adapter->stat_miss);
+ }
return 0;
}
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 4534a7ce77b823..b6caad0fee24cd 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -625,6 +625,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
spin_unlock_irqrestore(&dbf->scsi_lock, flags);
}
+/**
+ * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
+ * @tag: Identifier for event.
+ * @adapter: Pointer to zfcp adapter as context for this event.
+ * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
+ * @ret: Return value of calling function.
+ *
+ * This SCSI trace variant does not depend on any of:
+ * scsi_cmnd, zfcp_fsf_req, scsi_device.
+ */
+void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+ unsigned int scsi_id, int ret)
+{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
+ unsigned long flags;
+ static int const level = 1;
+
+ if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
+ return;
+
+ spin_lock_irqsave(&dbf->scsi_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_SCSI_CMND;
+ rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
+ rec->scsi_retries = ~0;
+ rec->scsi_allowed = ~0;
+ rec->fcp_rsp_info = ~0;
+ rec->scsi_id = scsi_id;
+ rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
+ rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
+ rec->host_scribble = ~0;
+ memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
+
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+}
+
static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
{
struct debug_info *d;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 3b23d675459829..abe460eac71260 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -34,11 +34,28 @@ enum zfcp_erp_steps {
ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
};
+/**
+ * enum zfcp_erp_act_type - Type of ERP action object.
+ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
+ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
+ * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
+ * either of the first four enum values.
+ * Used to indicate that an ERP action could not be
+ * set up despite a detected need for some recovery.
+ * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
+ * either of the first four enum values.
+ * Used to indicate that ERP not needed because
+ * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
+ */
enum zfcp_erp_act_type {
ZFCP_ERP_ACTION_REOPEN_LUN = 1,
ZFCP_ERP_ACTION_REOPEN_PORT = 2,
ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
+ ZFCP_ERP_ACTION_NONE = 0xc0,
+ ZFCP_ERP_ACTION_FAILED = 0xe0,
};
enum zfcp_erp_act_state {
@@ -125,6 +142,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
}
}
+static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev)
+{
+ int need = want;
+ struct zfcp_scsi_dev *zsdev;
+
+ switch (want) {
+ case ZFCP_ERP_ACTION_REOPEN_LUN:
+ zsdev = sdev_to_zfcp(sdev);
+ if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ need = 0;
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
+ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+ need = 0;
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_PORT:
+ if (atomic_read(&port->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED) {
+ need = 0;
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_set_port_status(
+ port, ZFCP_STATUS_COMMON_ERP_FAILED);
+ }
+ break;
+ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_ERP_FAILED) {
+ need = 0;
+ /* ensure propagation of failed status to new devices */
+ zfcp_erp_set_adapter_status(
+ adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
+ }
+ break;
+ default:
+ need = 0;
+ break;
+ }
+
+ return need;
+}
+
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
struct zfcp_port *port,
struct scsi_device *sdev)
@@ -248,16 +308,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
int retval = 1, need;
struct zfcp_erp_action *act;
- if (!adapter->erp_thread)
- return -EIO;
+ need = zfcp_erp_handle_failed(want, adapter, port, sdev);
+ if (!need) {
+ need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
+ goto out;
+ }
+
+ if (!adapter->erp_thread) {
+ need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
+ retval = -EIO;
+ goto out;
+ }
need = zfcp_erp_required_act(want, adapter, port, sdev);
if (!need)
goto out;
act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
- if (!act)
+ if (!act) {
+ need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
goto out;
+ }
atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
++adapter->erp_total_count;
list_add_tail(&act->list, &adapter->erp_ready_head);
@@ -268,18 +339,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
return retval;
}
+void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
+ u64 port_name, u32 port_id)
+{
+ unsigned long flags;
+ static /* don't waste stack */ struct zfcp_port tmpport;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ /* Stand-in zfcp port with fields just good enough for
+ * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
+ * Under lock because tmpport is static.
+ */
+ atomic_set(&tmpport.status, -1); /* unknown */
+ tmpport.wwpn = port_name;
+ tmpport.d_id = port_id;
+ zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+ ZFCP_ERP_ACTION_NONE);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
int clear_mask, char *id)
{
zfcp_erp_adapter_block(adapter, clear_mask);
zfcp_scsi_schedule_rports_block(adapter);
- /* ensure propagation of failed status to new devices */
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- zfcp_erp_set_adapter_status(adapter,
- ZFCP_STATUS_COMMON_ERP_FAILED);
- return -EIO;
- }
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
adapter, NULL, NULL, id, 0);
}
@@ -298,12 +383,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
zfcp_scsi_schedule_rports_block(adapter);
write_lock_irqsave(&adapter->erp_lock, flags);
- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- zfcp_erp_set_adapter_status(adapter,
- ZFCP_STATUS_COMMON_ERP_FAILED);
- else
- zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
- NULL, NULL, id, 0);
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+ NULL, NULL, id, 0);
write_unlock_irqrestore(&adapter->erp_lock, flags);
}
@@ -344,9 +425,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- return;
-
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
port->adapter, port, NULL, id, 0);
}
@@ -372,12 +450,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
zfcp_erp_port_block(port, clear);
zfcp_scsi_schedule_rport_block(port);
- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
- /* ensure propagation of failed status to new devices */
- zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
- return -EIO;
- }
-
return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
port->adapter, port, NULL, id, 0);
}
@@ -417,9 +489,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
zfcp_erp_lun_block(sdev, clear);
- if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
- return;
-
zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
zfcp_sdev->port, sdev, id, act_status);
}
@@ -583,6 +652,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
add_timer(&erp_action->timer);
}
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag)
+{
+ unsigned long flags;
+ struct zfcp_port *port;
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+ read_lock(&adapter->port_list_lock);
+ list_for_each_entry(port, &adapter->port_list, list)
+ _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+ read_unlock(&adapter->port_list_lock);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
int clear, char *id)
{
@@ -1237,6 +1320,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
int lun_status;
+ if (sdev->sdev_state == SDEV_DEL ||
+ sdev->sdev_state == SDEV_CANCEL)
+ continue;
if (zsdev->port != port)
continue;
/* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 7a7984a50683e0..a39a74500e2387 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
struct zfcp_fsf_req *);
+extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
+ unsigned int scsi_id, int ret);
/* zfcp_erp.c */
extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_port_forced_no_port_dbf(char *id,
+ struct zfcp_adapter *adapter,
+ u64 port_name, u32 port_id);
extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
@@ -63,6 +68,8 @@ extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
extern int zfcp_erp_port_reopen(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+ int clear, char *dbftag);
extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index bb99db2948ab23..bdb257eaa2e507 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -180,6 +180,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
if (abrt_req)
break;
+ zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@@ -276,6 +277,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
if (fsf_req)
break;
+ zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
zfcp_erp_wait(adapter);
ret = fc_block_scsi_eh(scpnt);
if (ret) {
@@ -322,15 +324,20 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
- int ret;
+ int ret = SUCCESS, fc_ret;
+ if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+ zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+ zfcp_erp_wait(adapter);
+ }
zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
zfcp_erp_wait(adapter);
- ret = fc_block_scsi_eh(scpnt);
- if (ret)
- return ret;
+ fc_ret = fc_block_scsi_eh(scpnt);
+ if (fc_ret)
+ ret = fc_ret;
- return SUCCESS;
+ zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
+ return ret;
}
struct scsi_transport_template *zfcp_scsi_transport_template;
@@ -600,6 +607,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
if (port) {
zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
put_device(&port->dev);
+ } else {
+ zfcp_erp_port_forced_no_port_dbf(
+ "sctrpin", adapter,
+ rport->port_name /* zfcp_scsi_rport_register */,
+ rport->port_id /* zfcp_scsi_rport_register */);
}
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index bf2d1300a95788..5abd37ce4f6f8d 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -59,6 +59,7 @@ struct virtio_ccw_device {
unsigned int revision; /* Transport revision */
wait_queue_head_t wait_q;
spinlock_t lock;
+ struct mutex io_lock; /* Serializes I/O requests */
struct list_head virtqueues;
unsigned long indicators;
unsigned long indicators2;
@@ -282,6 +283,8 @@ static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev)
{
struct virtio_ccw_vq_info *info;
+ if (!vcdev->airq_info)
+ return;
list_for_each_entry(info, &vcdev->virtqueues, node)
drop_airq_indicator(info->vq, vcdev->airq_info);
}
@@ -307,6 +310,7 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
unsigned long flags;
int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
+ mutex_lock(&vcdev->io_lock);
do {
spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
@@ -319,7 +323,9 @@ static int ccw_io_helper(struct virtio_ccw_device *vcdev,
cpu_relax();
} while (ret == -EBUSY);
wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
- return ret ? ret : vcdev->err;
+ ret = ret ? ret : vcdev->err;
+ mutex_unlock(&vcdev->io_lock);
+ return ret;
}
static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
@@ -419,7 +425,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
- return vcdev->config_block->num;
+ return vcdev->config_block->num ?: -ENOENT;
}
static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
@@ -833,6 +839,7 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
int ret;
struct ccw1 *ccw;
void *config_area;
+ unsigned long flags;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -851,11 +858,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
if (ret)
goto out_free;
+ spin_lock_irqsave(&vcdev->lock, flags);
memcpy(vcdev->config, config_area, offset + len);
- if (buf)
- memcpy(buf, &vcdev->config[offset], len);
if (vcdev->config_ready < offset + len)
vcdev->config_ready = offset + len;
+ spin_unlock_irqrestore(&vcdev->lock, flags);
+ if (buf)
+ memcpy(buf, config_area + offset, len);
out_free:
kfree(config_area);
@@ -869,6 +878,7 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
void *config_area;
+ unsigned long flags;
ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
if (!ccw)
@@ -881,9 +891,11 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
/* Make sure we don't overwrite fields. */
if (vcdev->config_ready < offset)
virtio_ccw_get_config(vdev, 0, NULL, offset);
+ spin_lock_irqsave(&vcdev->lock, flags);
memcpy(&vcdev->config[offset], buf, len);
/* Write the config area to the host. */
memcpy(config_area, vcdev->config, sizeof(vcdev->config));
+ spin_unlock_irqrestore(&vcdev->lock, flags);
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
@@ -1230,6 +1242,7 @@ static int virtio_ccw_online(struct ccw_device *cdev)
init_waitqueue_head(&vcdev->wait_q);
INIT_LIST_HEAD(&vcdev->virtqueues);
spin_lock_init(&vcdev->lock);
+ mutex_init(&vcdev->io_lock);
spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
dev_set_drvdata(&cdev->dev, vcdev);
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 33fbe8249fd5c1..044cffbc45e839 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -221,6 +221,7 @@ static int d7s_probe(struct platform_device *op)
dev_set_drvdata(&op->dev, p);
d7s_device = p;
err = 0;
+ of_node_put(opts);
out:
return err;
diff --git a/drivers/sbus/char/envctrl.c b/drivers/sbus/char/envctrl.c
index 5609b602c54d31..baa9b322520b07 100644
--- a/drivers/sbus/char/envctrl.c
+++ b/drivers/sbus/char/envctrl.c
@@ -910,8 +910,10 @@ static void envctrl_init_i2c_child(struct device_node *dp,
for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) {
pchild->mon_type[len] = ENVCTRL_NOMON;
}
+ of_node_put(root_node);
return;
}
+ of_node_put(root_node);
}
/* Get the monitor channels. */
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index a56a7b243e91fa..b78a2f3745f2f5 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -889,6 +889,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file)
unsigned int minor_number;
int retval = TW_IOCTL_ERROR_OS_ENODEV;
+ if (!capable(CAP_SYS_ADMIN)) {
+ retval = -EACCES;
+ goto out;
+ }
+
minor_number = iminor(inode);
if (minor_number >= twa_device_extension_count)
goto out;
@@ -2040,6 +2045,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twa_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2062,6 +2068,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = ioremap(mem_addr, mem_len);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -2069,8 +2076,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
TW_DISABLE_INTERRUPTS(tw_dev);
/* Initialize the card */
- if (twa_reset_sequence(tw_dev, 0))
+ if (twa_reset_sequence(tw_dev, 0)) {
+ retval = -ENOMEM;
goto out_iounmap;
+ }
/* Set host specific parameters */
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index f8374850f714dd..f0a5536a9ff57a 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1600,6 +1600,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (twl_initialize_device_extension(tw_dev)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -1614,6 +1615,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
if (!tw_dev->base_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
@@ -1623,6 +1625,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
+ retval = -ENOMEM;
goto out_iounmap;
}
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 2940bd769936cd..308a4206b6364a 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1034,6 +1034,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file)
dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n");
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
minor_number = iminor(inode);
if (minor_number >= tw_device_extension_count)
return -ENODEV;
@@ -2275,6 +2278,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
if (tw_initialize_device_extension(tw_dev)) {
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
+ retval = -ENOMEM;
goto out_free_device_extension;
}
@@ -2289,6 +2293,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
tw_dev->base_addr = pci_resource_start(pdev, 0);
if (!tw_dev->base_addr) {
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
+ retval = -ENOMEM;
goto out_release_mem_region;
}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 8da8b46da72219..1c447405ebbfae 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1416,8 +1416,8 @@ static int aac_acquire_resources(struct aac_dev *dev)
/* After EEH recovery or suspend resume, max_msix count
* may change, therfore updating in init as well.
*/
- aac_adapter_start(dev);
dev->init->Sa_MSIXVectors = cpu_to_le32(dev->max_msix);
+ aac_adapter_start(dev);
}
return 0;
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index e415e1c58eb59f..cf3ac0654a3ac1 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -444,7 +444,7 @@ err_out:
return -1;
err_blink:
- return (status > 16) & 0xFF;
+ return (status >> 16) & 0xFF;
}
/**
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 662b2321d1b0f4..913ebb6d0d2912 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -1031,8 +1031,10 @@ static int __init aic94xx_init(void)
aic94xx_transport_template =
sas_domain_attach_transport(&aic94xx_transport_functions);
- if (!aic94xx_transport_template)
+ if (!aic94xx_transport_template) {
+ err = -ENOMEM;
goto out_destroy_caches;
+ }
err = pci_register_driver(&aic94xx_pci_driver);
if (err)
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index dce787f6cca2e4..daade591409a69 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -1249,8 +1249,8 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
rspnid->dap = s_id;
- rspnid->spn_len = (u8) strlen((char *)name);
- strncpy((char *)rspnid->spn, (char *)name, rspnid->spn_len);
+ strlcpy(rspnid->spn, name, sizeof(rspnid->spn));
+ rspnid->spn_len = (u8) strlen(rspnid->spn);
return sizeof(struct fcgs_rspnid_req_s) + sizeof(struct ct_hdr_s);
}
@@ -1270,8 +1270,8 @@ fc_rsnn_nn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
memset(rsnn_nn, 0, sizeof(struct fcgs_rsnn_nn_req_s));
rsnn_nn->node_name = node_name;
- rsnn_nn->snn_len = (u8) strlen((char *)name);
- strncpy((char *)rsnn_nn->snn, (char *)name, rsnn_nn->snn_len);
+ strlcpy(rsnn_nn->snn, name, sizeof(rsnn_nn->snn));
+ rsnn_nn->snn_len = (u8) strlen(rsnn_nn->snn);
return sizeof(struct fcgs_rsnn_nn_req_s) + sizeof(struct ct_hdr_s);
}
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 0f19455951ec37..475470efb7c6e5 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -831,23 +831,23 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcpy(port_cfg->sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname, driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ BFA_SYMNAME_MAXLEN);
/*
* Host OS Info :
@@ -855,24 +855,24 @@ bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
* OS name string and instead copy the entire OS info string (64 bytes).
*/
if (driver_info->host_os_patch[0] == '\0') {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_OS_STR_LEN);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
} else {
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_name,
- BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
- strncat((char *)&port_cfg->sym_name,
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Append host OS Patch Info */
- strncat((char *)&port_cfg->sym_name,
- (char *)driver_info->host_os_patch,
- BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+ strlcat(port_cfg->sym_name.symname,
+ driver_info->host_os_patch,
+ BFA_SYMNAME_MAXLEN);
}
/* null terminate */
@@ -892,26 +892,26 @@ bfa_fcs_fabric_nsymb_init(struct bfa_fcs_fabric_s *fabric)
bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
/* Model name/number */
- strncpy((char *)&port_cfg->node_sym_name, model,
- BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcpy(port_cfg->node_sym_name.symname, model,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Driver Version */
- strncat((char *)&port_cfg->node_sym_name, (char *)driver_info->version,
- BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname, (char *)driver_info->version,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* Host machine name */
- strncat((char *)&port_cfg->node_sym_name,
- (char *)driver_info->host_machine_name,
- BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
- strncat((char *)&port_cfg->node_sym_name,
+ strlcat(port_cfg->node_sym_name.symname,
+ driver_info->host_machine_name,
+ BFA_SYMNAME_MAXLEN);
+ strlcat(port_cfg->node_sym_name.symname,
BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+ BFA_SYMNAME_MAXLEN);
/* null terminate */
port_cfg->node_sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index ff75ef8917551b..aa96f31ebc439e 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -2630,10 +2630,10 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc,
hba_attr->fw_version);
- strncpy(hba_attr->driver_version, (char *)driver_info->version,
+ strlcpy(hba_attr->driver_version, (char *)driver_info->version,
sizeof(hba_attr->driver_version));
- strncpy(hba_attr->os_name, driver_info->host_os_name,
+ strlcpy(hba_attr->os_name, driver_info->host_os_name,
sizeof(hba_attr->os_name));
/*
@@ -2641,23 +2641,23 @@ bfa_fcs_fdmi_get_hbaattr(struct bfa_fcs_lport_fdmi_s *fdmi,
* to the os name along with a separator
*/
if (driver_info->host_os_patch[0] != '\0') {
- strncat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
- sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
- strncat(hba_attr->os_name, driver_info->host_os_patch,
- sizeof(driver_info->host_os_patch));
+ strlcat(hba_attr->os_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+ sizeof(hba_attr->os_name));
+ strlcat(hba_attr->os_name, driver_info->host_os_patch,
+ sizeof(hba_attr->os_name));
}
/* Retrieve the max frame size from the port attr */
bfa_fcs_fdmi_get_portattr(fdmi, &fcs_port_attr);
hba_attr->max_ct_pyld = fcs_port_attr.max_frm_size;
- strncpy(hba_attr->node_sym_name.symname,
+ strlcpy(hba_attr->node_sym_name.symname,
port->port_cfg.node_sym_name.symname, BFA_SYMNAME_MAXLEN);
strcpy(hba_attr->vendor_info, "BROCADE");
hba_attr->num_ports =
cpu_to_be32(bfa_ioc_get_nports(&port->fcs->bfa->ioc));
hba_attr->fabric_name = port->fabric->lps->pr_nwwn;
- strncpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
+ strlcpy(hba_attr->bios_ver, hba_attr->option_rom_ver, BFA_VERSION_LEN);
}
@@ -2724,20 +2724,20 @@ bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
/*
* OS device Name
*/
- strncpy(port_attr->os_device_name, (char *)driver_info->os_device_name,
+ strlcpy(port_attr->os_device_name, driver_info->os_device_name,
sizeof(port_attr->os_device_name));
/*
* Host name
*/
- strncpy(port_attr->host_name, (char *)driver_info->host_machine_name,
+ strlcpy(port_attr->host_name, driver_info->host_machine_name,
sizeof(port_attr->host_name));
port_attr->node_name = bfa_fcs_lport_get_nwwn(port);
port_attr->port_name = bfa_fcs_lport_get_pwwn(port);
- strncpy(port_attr->port_sym_name.symname,
- (char *)&bfa_fcs_lport_get_psym_name(port), BFA_SYMNAME_MAXLEN);
+ strlcpy(port_attr->port_sym_name.symname,
+ bfa_fcs_lport_get_psym_name(port).symname, BFA_SYMNAME_MAXLEN);
bfa_fcs_lport_get_attr(port, &lport_attr);
port_attr->port_type = cpu_to_be32(lport_attr.port_type);
port_attr->scos = pport_attr.cos_supported;
@@ -3217,7 +3217,7 @@ bfa_fcs_lport_ms_gmal_response(void *fcsarg, struct bfa_fcxp_s *fcxp,
rsp_str[gmal_entry->len-1] = 0;
/* copy IP Address to fabric */
- strncpy(bfa_fcs_lport_get_fabric_ipaddr(port),
+ strlcpy(bfa_fcs_lport_get_fabric_ipaddr(port),
gmal_entry->ip_addr,
BFA_FCS_FABRIC_IPADDR_SZ);
break;
@@ -4655,21 +4655,13 @@ bfa_fcs_lport_ns_send_rspn_id(void *ns_cbarg, struct bfa_fcxp_s *fcxp_alloced)
* to that of the base port.
*/
- strncpy((char *)psymbl,
- (char *) &
- (bfa_fcs_lport_get_psym_name
+ strlcpy(symbl,
+ (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *) &
- bfa_fcs_lport_get_psym_name(bfa_fcs_get_base_port
- (port->fcs)))] = 0;
- strncat((char *)psymbl,
- (char *) &(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *) &bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
+
+ strlcat(symbl, (char *)&(bfa_fcs_lport_get_psym_name(port)),
+ sizeof(symbl));
} else {
psymbl = (u8 *) &(bfa_fcs_lport_get_psym_name(port));
}
@@ -5161,7 +5153,6 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u8 symbl[256];
- u8 *psymbl = &symbl[0];
int len;
/* Avoid sending RSPN in the following states. */
@@ -5191,22 +5182,17 @@ bfa_fcs_lport_ns_util_send_rspn_id(void *cbarg, struct bfa_fcxp_s *fcxp_alloced)
* For Vports, we append the vport's port symbolic name
* to that of the base port.
*/
- strncpy((char *)psymbl, (char *)&(bfa_fcs_lport_get_psym_name
+ strlcpy(symbl, (char *)&(bfa_fcs_lport_get_psym_name
(bfa_fcs_get_base_port(port->fcs))),
- strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs))));
-
- /* Ensure we have a null terminating string. */
- ((char *)psymbl)[strlen((char *)&bfa_fcs_lport_get_psym_name(
- bfa_fcs_get_base_port(port->fcs)))] = 0;
+ sizeof(symbl));
- strncat((char *)psymbl,
+ strlcat(symbl,
(char *)&(bfa_fcs_lport_get_psym_name(port)),
- strlen((char *)&bfa_fcs_lport_get_psym_name(port)));
+ sizeof(symbl));
}
len = fc_rspnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
- bfa_fcs_lport_get_fcid(port), 0, psymbl);
+ bfa_fcs_lport_get_fcid(port), 0, symbl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 98f7e8cca52df2..e533474748a5a2 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -2802,7 +2802,7 @@ void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
{
memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strlcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}
void
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index 625225f31081ad..15d02eb0947607 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -365,8 +365,8 @@ bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
lp.eid = event;
lp.log_type = BFA_PL_LOG_TYPE_STRING;
lp.misc = misc;
- strncpy(lp.log_entry.string_log, log_str,
- BFA_PL_STRING_LOG_SZ - 1);
+ strlcpy(lp.log_entry.string_log, log_str,
+ BFA_PL_STRING_LOG_SZ);
lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
bfa_plog_add(plog, &lp);
}
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index cc3b9d3d6d4095..bfefa2bfde0e64 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -987,20 +987,20 @@ bfad_start_ops(struct bfad_s *bfad) {
/* Fill the driver_info info to fcs*/
memset(&driver_info, 0, sizeof(driver_info));
- strncpy(driver_info.version, BFAD_DRIVER_VERSION,
- sizeof(driver_info.version) - 1);
+ strlcpy(driver_info.version, BFAD_DRIVER_VERSION,
+ sizeof(driver_info.version));
if (host_name)
- strncpy(driver_info.host_machine_name, host_name,
- sizeof(driver_info.host_machine_name) - 1);
+ strlcpy(driver_info.host_machine_name, host_name,
+ sizeof(driver_info.host_machine_name));
if (os_name)
- strncpy(driver_info.host_os_name, os_name,
- sizeof(driver_info.host_os_name) - 1);
+ strlcpy(driver_info.host_os_name, os_name,
+ sizeof(driver_info.host_os_name));
if (os_patch)
- strncpy(driver_info.host_os_patch, os_patch,
- sizeof(driver_info.host_os_patch) - 1);
+ strlcpy(driver_info.host_os_patch, os_patch,
+ sizeof(driver_info.host_os_patch));
- strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name) - 1);
+ strlcpy(driver_info.os_device_name, bfad->pci_name,
+ sizeof(driver_info.os_device_name));
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 40be670a1cbc86..6d21bc6a7713c6 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -842,7 +842,7 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
char symname[BFA_SYMNAME_MAXLEN];
bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
- strncpy(symname, port_attr.port_cfg.sym_name.symname,
+ strlcpy(symname, port_attr.port_cfg.sym_name.symname,
BFA_SYMNAME_MAXLEN);
return snprintf(buf, PAGE_SIZE, "%s\n", symname);
}
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 023b9d42ad9a22..be2de04179d05a 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -126,7 +126,7 @@ bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
/* fill in driver attr info */
strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
- strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ strlcpy(iocmd->ioc_attr.driver_attr.driver_ver,
BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
iocmd->ioc_attr.adapter_attr.fw_ver);
@@ -314,9 +314,9 @@ bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
iocmd->attr.port_type = port_attr.port_type;
iocmd->attr.loopback = port_attr.loopback;
iocmd->attr.authfail = port_attr.authfail;
- strncpy(iocmd->attr.port_symname.symname,
+ strlcpy(iocmd->attr.port_symname.symname,
port_attr.port_cfg.sym_name.symname,
- sizeof(port_attr.port_cfg.sym_name.symname));
+ sizeof(iocmd->attr.port_symname.symname));
iocmd->status = BFA_STATUS_OK;
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index d0b227ffbd5fd6..573aeec7a02b62 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -2279,7 +2279,7 @@ static int _bnx2fc_create(struct net_device *netdev,
if (!interface) {
printk(KERN_ERR PFX "bnx2fc_interface_create failed\n");
rc = -ENOMEM;
- goto ifput_err;
+ goto netdev_err;
}
if (netdev->priv_flags & IFF_802_1Q_VLAN) {
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index fb072cc5e9fdb5..dada9ce4e70221 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2742,6 +2742,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 2d1c4ebd40f91d..6587f20cff1a14 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -582,12 +582,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ ln->fc_vport = fc_vport;
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
- ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index c00b2ff72b551e..be5ee2d3781551 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len)
}
static inline void
-csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
{
+ uint16_t len;
struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+
+ if (WARN_ON(val_len > U16_MAX))
+ return;
+
+ len = val_len;
+
ae->type = htons(type);
len += 4; /* includes attribute type and length */
len = (len + 3) & ~3; /* should be multiple of 4 bytes */
ae->len = htons(len);
- memcpy(ae->value, val, len);
+ memcpy(ae->value, val, val_len);
+ if (len > val_len)
+ memset(ae->value + val_len, 0, len - val_len);
*ptr += len;
}
@@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
numattrs++;
val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
- (uint8_t *)&val,
+ &val,
FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
numattrs++;
@@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
else
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
- (uint8_t *)&val,
- FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ &val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
numattrs++;
mfs = ln->ln_sparm.csp.sp_bb_data;
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
- (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ &mfs, sizeof(mfs));
numattrs++;
strcpy(buf, "csiostor");
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
if (!csio_hostname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
attrib_blk->numattrs = htonl(numattrs);
@@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
strcpy(buf, "Chelsio Communications");
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
- (uint16_t)strlen(buf));
+ strlen(buf));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
- hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ hw->vpd.sn, sizeof(hw->vpd.sn));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
- (uint16_t)sizeof(hw->vpd.id));
+ sizeof(hw->vpd.id));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
- hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ hw->model_desc, strlen(hw->model_desc));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
- hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ hw->hw_ver, sizeof(hw->hw_ver));
numattrs++;
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
- hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ hw->fwrev_str, strlen(hw->fwrev_str));
numattrs++;
if (!csio_osname(buf, sizeof(buf))) {
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
- buf, (uint16_t)strlen(buf));
+ buf, strlen(buf));
numattrs++;
}
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
- (uint8_t *)&maxpayload,
- FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ &maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
len = (uint32_t)(pld - (uint8_t *)cmd);
numattrs++;
attrib_blk->numattrs = htonl(numattrs);
@@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
int rv;
+ BUG_ON(pld_len > pld->len);
+
io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
io_req->fw_handle = (uintptr_t) (io_req);
io_req->eq_idx = mgmtm->eq_idx;
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 71cb05b1c3ebdd..60be0742e2c8ba 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -1349,6 +1349,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
+ bytes_sent -= esp->send_cmd_residual;
/*
* The am53c974 has a DMA 'pecularity'. The doc states:
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 84dcbe4a6268bf..55be43fe7667a4 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -540,6 +540,8 @@ struct esp {
void *dma;
int dmarev;
+
+ u32 send_cmd_residual;
};
/* A front-end driver for the ESP chip should do the following in
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 34a1b1f333b4c1..d5184aa1ace4e1 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -752,9 +752,9 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport,
case ELS_LOGO:
if (fip->mode == FIP_MODE_VN2VN) {
if (fip->state != FIP_ST_VNMP_UP)
- return -EINVAL;
+ goto drop;
if (ntoh24(fh->fh_d_id) == FC_FID_FLOGI)
- return -EINVAL;
+ goto drop;
} else {
if (fip->state != FIP_ST_ENABLED)
return 0;
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index adfef9db6f1e2c..e26747a1b35a16 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -261,7 +261,7 @@ static void gather_partition_info(void)
ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
if (p_number_ptr)
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 77128d680e3bc6..6f38fa1f468a71 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -595,6 +595,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ /* turn on DIF support */
+ scsi_host_set_prot(shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
@@ -682,13 +689,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_host_alloc;
}
pci_info->hosts[i] = h;
-
- /* turn on DIF support */
- scsi_host_set_prot(to_shost(h),
- SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e01a29863c384c..867fc036d6ef56 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1739,14 +1739,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_frame_payload_op(fp) != ELS_LS_ACC) {
FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
FC_LPORT_DBG(lport, "FLOGI bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1756,7 +1756,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%hu\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 9f0b00c38658e6..0fdc8c41703588 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -283,11 +283,11 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (opcode != ISCSI_OP_SCSI_DATA_OUT) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] "
"rejected.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
/*
@@ -296,10 +296,10 @@ static int iscsi_check_tmf_restrictions(struct iscsi_task *task, int opcode)
*/
if (conn->session->fast_abort) {
iscsi_conn_printk(KERN_INFO, conn,
- "task [op %x/%x itt "
+ "task [op %x itt "
"0x%x/0x%x] fast abort.\n",
- task->hdr->opcode, opcode,
- task->itt, task->hdr_itt);
+ opcode, task->itt,
+ task->hdr_itt);
return -EACCES;
}
break;
@@ -1448,7 +1448,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
return -ENODATA;
+ spin_lock_bh(&conn->session->back_lock);
+ if (conn->task == NULL) {
+ spin_unlock_bh(&conn->session->back_lock);
+ return -ENODATA;
+ }
__iscsi_get_task(task);
+ spin_unlock_bh(&conn->session->back_lock);
spin_unlock_bh(&conn->session->frwd_lock);
rc = conn->session->tt->xmit_task(task);
spin_lock_bh(&conn->session->frwd_lock);
@@ -2416,8 +2422,8 @@ int iscsi_eh_session_reset(struct scsi_cmnd *sc)
failed:
ISCSI_DBG_EH(session,
"failing session reset: Could not log back into "
- "%s, %s [age %d]\n", session->targetname,
- conn->persistent_address, session->age);
+ "%s [age %d]\n", session->targetname,
+ session->age);
spin_unlock_bh(&session->frwd_lock);
mutex_unlock(&session->eh_mutex);
return FAILED;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 12886f96b2860c..7be581f7c35d17 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -818,6 +818,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
+ rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@@ -845,6 +846,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index fd8fe1202dbe96..398c9a0a5adeb8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5105,6 +5105,9 @@ error:
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+ stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 3406586b92012c..ad4f16ab7f7a2f 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -3485,6 +3485,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event;
unsigned long iflag;
+ int count = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3506,16 +3507,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
+ count++;
break;
case CQE_CODE_RECEIVE:
case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ count++;
break;
default:
break;
}
+
+ /* Limit the number of events to 64 to avoid soft lockups */
+ if (count == 64)
+ break;
}
}
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 26c67c42985c8e..1002124bd8bf4c 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -426,6 +426,8 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
scsi_esp_cmd(esp, ESP_CMD_TI);
}
}
+
+ esp->send_cmd_residual = esp_count;
}
static int mac_esp_irq_pending(struct esp *esp)
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 9d05302a3bcd50..19bffe0b2cc0a3 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4197,6 +4197,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
int irq, i, j;
int error = -ENODEV;
+ if (hba_count >= MAX_CONTROLLERS)
+ goto out;
+
if (pci_enable_device(pdev))
goto out;
pci_set_master(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6835bae33ec461..ac7acd257c9929 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6510,6 +6510,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
+ if (local_sense_off != user_sense_off)
+ return -EINVAL;
+
if (local_sense_len) {
void __user **sense_ioc_ptr =
(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 741509b3561776..14f32c114c5561 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -1273,7 +1273,7 @@ void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
ld = MR_TargetIdToLdGet(ldCount, drv_map);
- if (ld >= MAX_LOGICAL_DRIVES_EXT) {
+ if (ld >= MAX_LOGICAL_DRIVES_EXT - 1) {
lbInfo[ldCount].loadBalanceFlag = 0;
continue;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 96007633ad39b2..3d3bfa81409332 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1758,7 +1758,7 @@ static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
device_id < instance->fw_supported_vd_count)) {
ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
- if (ld >= instance->fw_supported_vd_count)
+ if (ld >= instance->fw_supported_vd_count - 1)
fp_possible = 0;
raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1886,6 +1886,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
} else {
+ if (os_timeout_value)
+ os_timeout_value++;
+
/* system pd Fast Path */
io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
timeout_limit = (scmd->device->type == TYPE_DISK) ?
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aa18c729d23a7a..41a646696babb4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
wait_for_completion(&tm_iocb->u.tmf.comp);
- rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
- QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ rval = tm_iocb->u.tmf.data;
- if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) {
- ql_dbg(ql_dbg_taskm, vha, 0x8030,
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x8030,
"TM IOCB failed (%x).\n", rval);
}
@@ -3261,7 +3260,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
return;
if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
- fcport->fp_speed > ha->link_data_rate)
+ fcport->fp_speed > ha->link_data_rate ||
+ !ha->flags.gpsc_supported)
return;
rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index cb11e04be5685f..87059a6786f429 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3315,10 +3315,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_CNA_CAPABLE(vha->hw))
- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
- else
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 5cbf20ab94aaf1..ff5df33fc7405b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -685,6 +685,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
srb_t *sp;
int rval;
+ if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (ha->flags.eeh_busy) {
if (ha->flags.pci_channel_io_perm_failure) {
ql_dbg(ql_dbg_aer, vha, 0x9010,
@@ -4938,8 +4943,9 @@ qla2x00_do_dpc(void *data)
}
}
- if (test_and_clear_bit(ISP_ABORT_NEEDED,
- &base_vha->dpc_flags)) {
+ if (test_and_clear_bit
+ (ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
+ !test_bit(UNLOADING, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
"ISP abort scheduled.\n");
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index d8c03431d0aa89..f9f899ec94270c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -7245,6 +7245,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
+ if (rc)
+ goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 6b61b09b322609..75f2179860a79c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -33,7 +33,6 @@ struct scsi_dev_info_list_table {
};
-static const char spaces[] = " "; /* 16 of them */
static unsigned scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
@@ -291,20 +290,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length,
size_t from_length;
from_length = strlen(from);
- strncpy(to, from, min(to_length, from_length));
- if (from_length < to_length) {
- if (compatible) {
- /*
- * NUL terminate the string if it is short.
- */
- to[from_length] = '\0';
- } else {
- /*
- * space pad the string if it is short.
- */
- strncpy(&to[from_length], spaces,
- to_length - from_length);
- }
+ /* this zero-pads the destination */
+ strncpy(to, from, to_length);
+ if (from_length < to_length && !compatible) {
+ /*
+ * space pad the string if it is short.
+ */
+ memset(&to[from_length], ' ', to_length - from_length);
}
if (from_length > to_length)
printk(KERN_WARNING "%s: %s string '%s' is too long\n",
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5711d58f9e8139..a8ebaeace154a3 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"IBM", "3526", "rdac", },
{"IBM", "3542", "rdac", },
{"IBM", "3552", "rdac", },
- {"SGI", "TP9", "rdac", },
+ {"SGI", "TP9300", "rdac", },
+ {"SGI", "TP9400", "rdac", },
+ {"SGI", "TP9500", "rdac", },
+ {"SGI", "TP9700", "rdac", },
{"SGI", "IS", "rdac", },
{"STK", "OPENstorage", "rdac", },
{"STK", "FLEXLINE 380", "rdac", },
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 692445bcca6fec..850ddc5fac0499 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -381,11 +381,12 @@ static void scsi_target_reap_ref_release(struct kref *kref)
= container_of(kref, struct scsi_target, reap_ref);
/*
- * if we get here and the target is still in the CREATED state that
+ * if we get here and the target is still in a CREATED state that
* means it was allocated but never made visible (because a scan
* turned up no LUNs), so don't call device_del() on it.
*/
- if (starget->state != STARGET_CREATED) {
+ if ((starget->state != STARGET_CREATED) &&
+ (starget->state != STARGET_CREATED_REMOVE)) {
transport_remove_device(&starget->dev);
device_del(&starget->dev);
}
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4477e999ec707c..085e470d1c49a3 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -678,8 +678,24 @@ static ssize_t
sdev_store_delete(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (device_remove_file_self(dev, attr))
- scsi_remove_device(to_scsi_device(dev));
+ struct kernfs_node *kn;
+
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /*
+ * Concurrent writes into the "delete" sysfs attribute may trigger
+ * concurrent calls to device_remove_file() and scsi_remove_device().
+ * device_remove_file() handles concurrent removal calls by
+ * serializing these and by ignoring the second and later removal
+ * attempts. Concurrent calls of scsi_remove_device() are
+ * serialized. The second and later calls of scsi_remove_device() are
+ * ignored because the first call of that function changes the device
+ * state into SDEV_DEL.
+ */
+ device_remove_file(dev, attr);
+ scsi_remove_device(to_scsi_device(dev));
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
return count;
};
static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
@@ -1196,11 +1212,15 @@ restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
if (starget->state == STARGET_DEL ||
- starget->state == STARGET_REMOVE)
+ starget->state == STARGET_REMOVE ||
+ starget->state == STARGET_CREATED_REMOVE)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
- starget->state = STARGET_REMOVE;
+ if (starget->state == STARGET_CREATED)
+ starget->state = STARGET_CREATED_REMOVE;
+ else
+ starget->state = STARGET_REMOVE;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index e3cd3ece44121c..c3d1891d2d3f32 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -52,6 +52,8 @@ struct srp_internal {
struct transport_container rport_attr_cont;
};
+static int scsi_is_srp_rport(const struct device *dev);
+
#define to_srp_internal(tmpl) container_of(tmpl, struct srp_internal, t)
#define dev_to_rport(d) container_of(d, struct srp_rport, dev)
@@ -61,9 +63,24 @@ static inline struct Scsi_Host *rport_to_shost(struct srp_rport *r)
return dev_to_shost(r->dev.parent);
}
+static int find_child_rport(struct device *dev, void *data)
+{
+ struct device **child = data;
+
+ if (scsi_is_srp_rport(dev)) {
+ WARN_ON_ONCE(*child);
+ *child = dev;
+ }
+ return 0;
+}
+
static inline struct srp_rport *shost_to_rport(struct Scsi_Host *shost)
{
- return transport_class_to_srp_rport(&shost->shost_gendev);
+ struct device *child = NULL;
+
+ WARN_ON_ONCE(device_for_each_child(&shost->shost_gendev, &child,
+ find_child_rport) < 0);
+ return child ? dev_to_rport(child) : NULL;
}
/**
@@ -637,7 +654,8 @@ static enum blk_eh_timer_return srp_timed_out(struct scsi_cmnd *scmd)
struct srp_rport *rport = shost_to_rport(shost);
pr_debug("timeout for sdev %s\n", dev_name(&sdev->sdev_gendev));
- return rport->fast_io_fail_tmo < 0 && rport->dev_loss_tmo < 0 &&
+ return rport && rport->fast_io_fail_tmo < 0 &&
+ rport->dev_loss_tmo < 0 &&
i->f->reset_timer_if_blocked && scsi_device_blocked(sdev) ?
BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 4913147d7a706c..18a49ba7e41e44 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -209,6 +209,12 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
sp = buffer_data[0] & 0x80 ? 1 : 0;
buffer_data[0] &= ~0x80;
+ /*
+ * Ensure WP, DPOFUA, and RESERVED fields are cleared in
+ * received mode parameter buffer before doing MODE SELECT.
+ */
+ data.device_specific = 0;
+
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
SD_MAX_RETRIES, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
@@ -1272,11 +1278,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
}
- /*
- * XXX and what if there are packets in flight and this close()
- * XXX is followed by a "rmmod sd_mod"?
- */
-
scsi_disk_put(sdkp);
}
@@ -3227,11 +3228,23 @@ static void scsi_disk_release(struct device *dev)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
-
+ struct request_queue *q = disk->queue;
+
spin_lock(&sd_index_lock);
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);
+ /*
+ * Wait until all requests that are in progress have completed.
+ * This is necessary to avoid that e.g. scsi_end_request() crashes
+ * due to clearing the disk->private_data pointer. Wait from inside
+ * scsi_disk_release() instead of from sd_release() to avoid that
+ * freezing and unfreezing the request queue affects user space I/O
+ * in case multiple processes open a /dev/sd... node concurrently.
+ */
+ blk_mq_freeze_queue(q);
+ blk_mq_unfreeze_queue(q);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ca93fe8606fad5..61d0ce2aa3c1b0 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -221,6 +222,33 @@ static void sg_device_destroy(struct kref *kref);
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+ if (filp->f_cred != current_real_cred()) {
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EPERM;
+ }
+ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
+ pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EACCES;
+ }
+ return 0;
+}
+
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
@@ -405,6 +433,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
struct sg_header *old_hdr = NULL;
int retval = 0;
+ /*
+ * This could cause a response to be stranded. Close the associated
+ * file descriptor to free up any resources being held.
+ */
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -592,9 +628,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
+ int retval;
- if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
- return -EINVAL;
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
@@ -2159,6 +2197,7 @@ sg_add_sfp(Sg_device * sdp)
write_lock_irqsave(&sdp->sfd_lock, iflags);
if (atomic_read(&sdp->detaching)) {
write_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ kfree(sfp);
return ERR_PTR(-ENODEV);
}
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8f9b8fa797ea4e..fe00510a9c142c 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -533,18 +533,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt)
static int sr_block_open(struct block_device *bdev, fmode_t mode)
{
struct scsi_cd *cd;
+ struct scsi_device *sdev;
int ret = -ENXIO;
+ cd = scsi_cd_get(bdev->bd_disk);
+ if (!cd)
+ goto out;
+
+ sdev = cd->device;
+ scsi_autopm_get_device(sdev);
check_disk_change(bdev);
mutex_lock(&sr_mutex);
- cd = scsi_cd_get(bdev->bd_disk);
- if (cd) {
- ret = cdrom_open(&cd->cdi, bdev, mode);
- if (ret)
- scsi_cd_put(cd);
- }
+ ret = cdrom_open(&cd->cdi, bdev, mode);
mutex_unlock(&sr_mutex);
+
+ scsi_autopm_put_device(sdev);
+ if (ret)
+ scsi_cd_put(cd);
+
+out:
return ret;
}
@@ -572,6 +580,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret)
goto out;
+ scsi_autopm_get_device(sdev);
+
/*
* Send SCSI addressing ioctls directly to mid level, send other
* ioctls to cdrom/block level.
@@ -580,15 +590,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
case SCSI_IOCTL_GET_IDLUN:
case SCSI_IOCTL_GET_BUS_NUMBER:
ret = scsi_ioctl(sdev, cmd, argp);
- goto out;
+ goto put;
}
ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg);
if (ret != -ENOSYS)
- goto out;
+ goto put;
ret = scsi_ioctl(sdev, cmd, argp);
+put:
+ scsi_autopm_put_device(sdev);
+
out:
mutex_unlock(&sr_mutex);
return ret;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 03054c0e7689bd..3c3e8115f73d6b 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -187,30 +187,25 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
struct scsi_device *SDev;
struct scsi_sense_hdr sshdr;
int result, err = 0, retries = 0;
- struct request_sense *sense = cgc->sense;
+ unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
SDev = cd->device;
- if (!sense) {
- sense = kmalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (!sense) {
- err = -ENOMEM;
- goto out;
- }
- }
-
retry:
if (!scsi_block_when_processing_errors(SDev)) {
err = -ENODEV;
goto out;
}
- memset(sense, 0, sizeof(*sense));
+ memset(sense_buffer, 0, sizeof(sense_buffer));
result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, (char *)sense,
+ cgc->buffer, cgc->buflen, sense_buffer,
cgc->timeout, IOCTL_RETRIES, 0, NULL);
- scsi_normalize_sense((char *)sense, sizeof(*sense), &sshdr);
+ scsi_normalize_sense(sense_buffer, sizeof(sense_buffer), &sshdr);
+
+ if (cgc->sense)
+ memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense));
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (driver_byte(result) != 0) {
@@ -261,8 +256,6 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
/* Wake up a process waiting for device */
out:
- if (!cgc->sense)
- kfree(sense);
cgc->stat = err;
return err;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2e522951b61974..088a68ab4246b1 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4821,9 +4821,8 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
current->mm,
uaddr,
nr_pages,
- rw == READ,
- 0, /* don't force */
- pages);
+ pages,
+ rw == READ ? FOLL_WRITE : 0); /* don't force */
/* Errors and no page mapped should return here */
if (res < nr_pages)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 42c459a9d3fec7..ce5234555cc99a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -45,6 +45,7 @@
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -383,7 +384,7 @@ struct utp_cmd_rsp {
__be32 residual_transfer_count;
__be32 reserved[4];
__be16 sense_data_len;
- u8 sense_data[18];
+ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
};
/**
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa466c59ff..52b546fb509b7a 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
+ ufshcd_dealloc_host(hba);
}
/**
@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
+ ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 9714f2a8b32977..f58abfcdfe813a 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -161,7 +161,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
if (ret) {
dev_err(dev, "%s: unable to find %s err %d\n",
__func__, prop_name, ret);
- goto out_free;
+ goto out;
}
vreg->min_uA = 0;
@@ -183,9 +183,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
goto out;
-out_free:
- devm_kfree(dev, vreg);
- vreg = NULL;
out:
if (!ret)
*out_vreg = vreg;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 18f26cf1e24d01..c94d465de941e9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -585,6 +585,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
+ /*
+ * Wait for the ungate work to complete if in progress.
+ * Though the clocks may be in ON state, the link could
+ * still be in hibner8 state if hibern8 is allowed
+ * during clock gating.
+ * Make sure we exit hibern8 state also in addition to
+ * clocks being ON.
+ */
+ if (ufshcd_can_hibern8_during_gating(hba) &&
+ ufshcd_is_link_hibern8(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->clk_gating.ungate_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
@@ -813,10 +828,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
int len;
if (lrbp->sense_buffer &&
ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+ int len_to_copy;
+
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
}
}
@@ -3447,6 +3466,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
+ scsi_block_requests(hba->host);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -3462,6 +3482,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
__func__, err);
}
out:
+ scsi_unblock_requests(hba->host);
pm_runtime_put_sync(hba->dev);
return;
}
@@ -5249,7 +5270,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered || pm_runtime_suspended(hba->dev))
/*
* Let the runtime resume take care of resuming
* if runtime suspended.
@@ -5270,7 +5294,10 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
return ufshcd_suspend(hba, UFS_RUNTIME_PM);
@@ -5300,10 +5327,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
- if (!hba || !hba->is_powered)
+ if (!hba)
+ return -EINVAL;
+
+ if (!hba->is_powered)
return 0;
- else
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+
+ return ufshcd_resume(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -5355,8 +5385,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
- scsi_host_put(hba->host);
-
ufshcd_exit_clk_gating(hba);
if (ufshcd_is_clkscaling_enabled(hba))
devfreq_remove_device(hba->devfreq);
@@ -5481,15 +5509,47 @@ static int ufshcd_devfreq_target(struct device *dev,
{
int err = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
+ bool release_clk_hold = false;
+ unsigned long irq_flags;
if (!ufshcd_is_clkscaling_enabled(hba))
return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+
+ if (ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON)) {
+ if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /* hold the vote until the scaling work is completed */
+ hba->clk_gating.active_reqs++;
+ release_clk_hold = true;
+ hba->clk_gating.state = CLKS_ON;
+ } else {
+ /*
+ * Clock gating work seems to be running in parallel
+ * hence skip scaling work to avoid deadlock between
+ * current scaling work and gating work.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
if (*freq == UINT_MAX)
err = ufshcd_scale_clks(hba, true);
else if (*freq == 0)
err = ufshcd_scale_clks(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (release_clk_hold)
+ __ufshcd_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
return err;
}
@@ -5671,7 +5731,6 @@ exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
- scsi_host_put(host);
ufshcd_hba_exit(hba);
out_error:
return err;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 8ef905cbfc9c22..9237427728cede 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -692,7 +692,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = cpu_to_virtio32(vscsi->vdev,
@@ -751,7 +750,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
return FAILED;
memset(cmd, 0, sizeof(*cmd));
- cmd->sc = sc;
cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
.type = VIRTIO_SCSI_T_TMF,
.subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0f133c1817de34..23081ed8f1e3d3 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -545,9 +545,14 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
(btstat == BTSTAT_SUCCESS ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
- cmd->result = (DID_OK << 16) | sdstat;
- if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
- cmd->result |= (DRIVER_SENSE << 24);
+ if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
+ cmd->result = (DID_RESET << 16);
+ } else {
+ cmd->result = (DID_OK << 16) | sdstat;
+ if (sdstat == SAM_STAT_CHECK_CONDITION &&
+ cmd->sense_buffer)
+ cmd->result |= (DRIVER_SENSE << 24);
+ }
} else
switch (btstat) {
case BTSTAT_SUCCESS:
@@ -1194,8 +1199,6 @@ static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
{
- pvscsi_shutdown_intr(adapter);
-
if (adapter->workqueue)
destroy_workqueue(adapter->workqueue);
@@ -1524,6 +1527,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
out_reset_adapter:
ll_adapter_reset(adapter);
out_release_resources:
+ pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
scsi_host_put(host);
out_disable_device:
@@ -1532,6 +1536,7 @@ out_disable_device:
return error;
out_release_resources_and_disable:
+ pvscsi_shutdown_intr(adapter);
pvscsi_release_resources(adapter);
goto out_disable_device;
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 9dc8687bf0480e..e1b32ed0aa205f 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -676,10 +676,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
static int scsifront_sdev_configure(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
+ if (err) {
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ return err;
+ }
+ }
return 0;
}
@@ -687,10 +694,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
static void scsifront_sdev_destroy(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
}
static struct scsi_host_template scsifront_sht = {
@@ -1025,9 +1037,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
if (scsi_add_device(info->host, chn, tgt, lun)) {
dev_err(&dev->dev, "scsi_add_device\n");
- xenbus_printf(XBT_NIL, dev->nodename,
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
}
break;
case VSCSIFRONT_OP_DEL_LUN:
@@ -1041,10 +1056,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
}
break;
case VSCSIFRONT_OP_READD_LUN:
- if (device_state == XenbusStateConnected)
- xenbus_printf(XBT_NIL, dev->nodename,
+ if (device_state == XenbusStateConnected) {
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateConnected);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
break;
default:
break;
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd8f41351addfd..7bfb154d6fa5eb 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = {
bool soc_is_tegra(void)
{
+ const struct of_device_id *match;
struct device_node *root;
root = of_find_node_by_path("/");
if (!root)
return false;
- return of_match_node(tegra_machine_match, root) != NULL;
+ match = of_match_node(tegra_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index bc34cf7482fb55..a4753644f4cf56 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -738,7 +738,7 @@ void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
if (!pmc->soc->has_tsense_reset)
return;
- np = of_find_node_by_name(pmc->dev->of_node, "i2c-thermtrip");
+ np = of_get_child_by_name(pmc->dev->of_node, "i2c-thermtrip");
if (!np) {
dev_warn(dev, "i2c-thermtrip node not found, %s.\n", disabled);
return;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index cf04960cc3e6ca..1a1368f5863cd0 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -88,7 +88,7 @@ struct bcm2835_spi {
u8 *rx_buf;
int tx_len;
int rx_len;
- bool dma_pending;
+ unsigned int dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
- /* based on flags decide if we can finish the transfer */
- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* wake up the framework */
@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
- dmaengine_terminate_all(master->dma_tx);
-
- /* mark as no longer pending */
- bs->dma_pending = 0;
+ if (cmpxchg(&bs->dma_pending, true, false)) {
+ dmaengine_terminate_all(master->dma_tx);
+ }
/* and mark as completed */;
complete(&master->xfer_completion);
@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
if (ret) {
/* need to reset on errors */
dmaengine_terminate_all(master->dma_tx);
+ bs->dma_pending = false;
bcm2835_spi_reset_hw(master);
return ret;
}
@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* if an error occurred and we have an active dma, then terminate */
- if (bs->dma_pending) {
+ if (cmpxchg(&bs->dma_pending, true, false)) {
dmaengine_terminate_all(master->dma_tx);
dmaengine_terminate_all(master->dma_rx);
- bs->dma_pending = 0;
}
/* and reset */
bcm2835_spi_reset_hw(master);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 55789f7cda9280..645f428ad0a234 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -336,8 +336,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no irq\n");
- return -ENXIO;
+ dev_err(dev, "no irq: %d\n", irq);
+ return irq;
}
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index bf9a610e5b8981..f14500910bc2b2 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -496,8 +496,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no irq\n");
- return -ENXIO;
+ dev_err(dev, "no irq: %d\n", irq);
+ return irq;
}
clk = devm_clk_get(dev, "spi");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index c872a2e54c4ba2..2603bee2ce0786 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -220,7 +220,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
- if (spicfg->wdelay)
+ if (spicfg && spicfg->wdelay)
spidat1 |= SPIDAT1_WDEL;
/*
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 818843336932c9..9882d93e7566d7 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -587,11 +587,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
+ if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
- else if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ } else {
+ if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
@@ -1303,12 +1305,36 @@ static const struct platform_device_id spi_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 3de39bd794b64d..03b566848da63d 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -374,7 +374,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+ sh_msiof_write(p, STR,
+ sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1275,12 +1276,37 @@ static const struct platform_device_id spi_driver_ids[] = {
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 85c91f58b42f3f..af2880d0c1126a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
goto exit_free_master;
}
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_prepare(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+ goto exit_free_master;
+ }
+
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
- goto exit_free_master;
- }
-
- tspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tspi->clk)) {
- dev_err(&pdev->dev, "can not get clock\n");
- ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
+exit_clk_disable:
+ clk_disable(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
+ clk_disable(tspi->clk);
+
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spi/spi-xlp.c b/drivers/spi/spi-xlp.c
index 8f04feca6ee3b6..0ddb0adaa8aab0 100644
--- a/drivers/spi/spi-xlp.c
+++ b/drivers/spi/spi-xlp.c
@@ -392,8 +392,8 @@ static int xlp_spi_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -EINVAL;
+ dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
+ return irq;
}
err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
pdev->name, xspi);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4efd91bc851075..6a3b7b7b8a996a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -712,8 +712,14 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
for (i = 0; i < sgs; i++) {
if (vmalloced_buf) {
- min = min_t(size_t,
- len, desc_len - offset_in_page(buf));
+ /*
+ * Next scatterlist entry size is the minimum between
+ * the desc_len and the remaining buffer length that
+ * fits in a page.
+ */
+ min = min_t(size_t, desc_len,
+ min_t(size_t, len,
+ PAGE_SIZE - offset_in_page(buf)));
vm_page = vmalloc_to_page(buf);
if (!vm_page) {
sg_free_table(sgt);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5d3b86a3385704..67d8467e8eabd0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -110,4 +110,6 @@ source "drivers/staging/wilc1000/Kconfig"
source "drivers/staging/most/Kconfig"
+source "drivers/staging/gasket/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 30918edef5e3c2..0fa4d86b252e86 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
obj-$(CONFIG_WILC1000) += wilc1000/
obj-$(CONFIG_MOST) += most/
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index cba6b4e17fee48..208e07fbee5dad 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -370,6 +370,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
goto out;
}
+ /* requested mapping size larger than object size */
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
calc_vm_prot_bits(PROT_MASK))) {
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index fb73052af64889..a0b7877d084b4a 100755
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -389,7 +389,8 @@ static void ion_handle_get(struct ion_handle *handle)
}
/* Must hold the client lock */
-static struct ion_handle* ion_handle_get_check_overflow(struct ion_handle *handle)
+static struct ion_handle *ion_handle_get_check_overflow(
+ struct ion_handle *handle)
{
if (atomic_read(&handle->ref.refcount) + 1 == 0)
return ERR_PTR(-EOVERFLOW);
@@ -448,18 +449,6 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
-{
- struct ion_handle *handle;
-
- mutex_lock(&client->lock);
- handle = ion_handle_get_by_id_nolock(client, id);
- mutex_unlock(&client->lock);
-
- return handle;
-}
-
static bool ion_handle_validate(struct ion_client *client,
struct ion_handle *handle)
{
@@ -1137,24 +1126,28 @@ static struct dma_buf_ops dma_buf_ops = {
.kunmap = ion_dma_buf_kunmap,
};
-struct dma_buf *ion_share_dma_buf(struct ion_client *client,
- struct ion_handle *handle)
+static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle,
+ bool lock_client)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct ion_buffer *buffer;
struct dma_buf *dmabuf;
bool valid_handle;
- mutex_lock(&client->lock);
+ if (lock_client)
+ mutex_lock(&client->lock);
valid_handle = ion_handle_validate(client, handle);
if (!valid_handle) {
WARN(1, "%s: invalid handle passed to share.\n", __func__);
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
return ERR_PTR(-EINVAL);
}
buffer = handle->buffer;
ion_buffer_get(buffer);
- mutex_unlock(&client->lock);
+ if (lock_client)
+ mutex_unlock(&client->lock);
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
@@ -1169,14 +1162,21 @@ struct dma_buf *ion_share_dma_buf(struct ion_client *client,
return dmabuf;
}
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return __ion_share_dma_buf(client, handle, true);
+}
EXPORT_SYMBOL(ion_share_dma_buf);
-int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+static int __ion_share_dma_buf_fd(struct ion_client *client,
+ struct ion_handle *handle, bool lock_client)
{
struct dma_buf *dmabuf;
int fd;
- dmabuf = ion_share_dma_buf(client, handle);
+ dmabuf = __ion_share_dma_buf(client, handle, lock_client);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
@@ -1186,8 +1186,19 @@ int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
return fd;
}
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+ return __ion_share_dma_buf_fd(client, handle, true);
+}
EXPORT_SYMBOL(ion_share_dma_buf_fd);
+static int ion_share_dma_buf_fd_nolock(struct ion_client *client,
+ struct ion_handle *handle)
+{
+ return __ion_share_dma_buf_fd(client, handle, false);
+}
+
struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
@@ -1334,11 +1345,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct ion_handle *handle;
- handle = ion_handle_get_by_id(client, data.handle.handle);
- if (IS_ERR(handle))
+ mutex_lock(&client->lock);
+ handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
return PTR_ERR(handle);
- data.fd.fd = ion_share_dma_buf_fd(client, handle);
- ion_handle_put(handle);
+ }
+ data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
+ ion_handle_put_nolock(handle);
+ mutex_unlock(&client->lock);
if (data.fd.fd < 0)
ret = data.fd.fd;
break;
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index ca15a87f6fd3d7..13a9b4c42b267f 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 8f181caffca321..619c989c5f3704 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -5275,11 +5275,11 @@ static int ni_E_init(struct comedi_device *dev,
/* Digital I/O (PFI) subdevice */
s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
s->maxdata = 1;
if (devpriv->is_m_series) {
s->n_chan = 16;
s->insn_bits = ni_pfi_insn_bits;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
ni_writew(dev, s->state, NI_M_PFI_DO_REG);
for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
@@ -5288,6 +5288,7 @@ static int ni_E_init(struct comedi_device *dev,
}
} else {
s->n_chan = 10;
+ s->subdev_flags = SDF_INTERNAL;
}
s->insn_config = ni_pfi_insn_config;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e9e43139157d93..769a940151176c 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
/* Make sure D/A update mode is direct update */
outb(0, dev->iobase + DAQP_AUX_REG);
- for (i = 0; i > insn->n; i++) {
+ for (i = 0; i < insn->n; i++) {
unsigned val = data[i];
int ret;
diff --git a/drivers/staging/gasket/Kconfig b/drivers/staging/gasket/Kconfig
new file mode 100644
index 00000000000000..970e299046c378
--- /dev/null
+++ b/drivers/staging/gasket/Kconfig
@@ -0,0 +1,23 @@
+menu "Gasket devices"
+
+config STAGING_GASKET_FRAMEWORK
+ tristate "Gasket framework"
+ depends on PCI && (X86_64 || ARM64)
+ help
+ This framework supports Gasket-compatible devices, such as Apex.
+ It is required for any of the following module(s).
+
+ To compile this driver as a module, choose M here. The module
+ will be called "gasket".
+
+config STAGING_APEX_DRIVER
+ tristate "Apex Driver"
+ depends on STAGING_GASKET_FRAMEWORK
+ help
+ This driver supports the Apex device. Say Y if you want to
+ include this driver in the kernel.
+
+ To compile this driver as a module, choose M here. The module
+ will be called "apex".
+
+endmenu
diff --git a/drivers/staging/gasket/Makefile b/drivers/staging/gasket/Makefile
new file mode 100644
index 00000000000000..cec813ece6785f
--- /dev/null
+++ b/drivers/staging/gasket/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for Gasket framework and dependent drivers.
+#
+
+obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket.o
+obj-$(CONFIG_STAGING_APEX_DRIVER) += apex.o
+
+gasket-objs := gasket_core.o gasket_ioctl.o gasket_interrupt.o gasket_page_table.o gasket_sysfs.o
+apex-objs := apex_driver.o
diff --git a/drivers/staging/gasket/TODO b/drivers/staging/gasket/TODO
new file mode 100644
index 00000000000000..6ff8e01b04cc8f
--- /dev/null
+++ b/drivers/staging/gasket/TODO
@@ -0,0 +1,9 @@
+This is a list of things that need to be done to get this driver out of the
+staging directory.
+- Document sysfs files with Documentation/ABI/ entries.
+- Use misc interface instead of major number for driver version description.
+- Add descriptions of module_param's
+- apex_get_status() should actually check status.
+- "drivers" should never be dealing with "raw" sysfs calls or mess around with
+ kobjects at all. The driver core should handle all of this for you
+ automaically. There should not be a need for raw attribute macros.
diff --git a/drivers/staging/gasket/apex.h b/drivers/staging/gasket/apex.h
new file mode 100644
index 00000000000000..3bbceffff5e41b
--- /dev/null
+++ b/drivers/staging/gasket/apex.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Apex kernel-userspace interface definitions.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __APEX_H__
+#define __APEX_H__
+
+#include <linux/ioctl.h>
+
+/* Clock Gating ioctl. */
+struct apex_gate_clock_ioctl {
+ /* Enter or leave clock gated state. */
+ u64 enable;
+
+ /* If set, enter clock gating state, regardless of custom block's
+ * internal idle state
+ */
+ u64 force_idle;
+};
+
+/* Base number for all Apex-common IOCTLs */
+#define APEX_IOCTL_BASE 0x7F
+
+/* Enable/Disable clock gating. */
+#define APEX_IOCTL_GATE_CLOCK \
+ _IOW(APEX_IOCTL_BASE, 0, struct apex_gate_clock_ioctl)
+
+#endif /* __APEX_H__ */
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
new file mode 100644
index 00000000000000..42cef68eb4c195
--- /dev/null
+++ b/drivers/staging/gasket/apex_driver.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for the Apex chip.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include "apex.h"
+
+#include "gasket_core.h"
+#include "gasket_interrupt.h"
+#include "gasket_page_table.h"
+#include "gasket_sysfs.h"
+
+/* Constants */
+#define APEX_DEVICE_NAME "Apex"
+#define APEX_DRIVER_VERSION "1.0"
+
+/* CSRs are in BAR 2. */
+#define APEX_BAR_INDEX 2
+
+#define APEX_PCI_VENDOR_ID 0x1ac1
+#define APEX_PCI_DEVICE_ID 0x089a
+
+/* Bar Offsets. */
+#define APEX_BAR_OFFSET 0
+#define APEX_CM_OFFSET 0x1000000
+
+/* The sizes of each Apex BAR 2. */
+#define APEX_BAR_BYTES 0x100000
+#define APEX_CH_MEM_BYTES (PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
+
+/* The number of user-mappable memory ranges in BAR2 of a Apex chip. */
+#define NUM_REGIONS 3
+
+/* The number of nodes in a Apex chip. */
+#define NUM_NODES 1
+
+/*
+ * The total number of entries in the page table. Should match the value read
+ * from the register APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE.
+ */
+#define APEX_PAGE_TABLE_TOTAL_ENTRIES 8192
+
+#define APEX_EXTENDED_SHIFT 63 /* Extended address bit position. */
+
+/* Check reset 120 times */
+#define APEX_RESET_RETRY 120
+/* Wait 100 ms between checks. Total 12 sec wait maximum. */
+#define APEX_RESET_DELAY 100
+
+/* Enumeration of the supported sysfs entries. */
+enum sysfs_attribute_type {
+ ATTR_KERNEL_HIB_PAGE_TABLE_SIZE,
+ ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE,
+ ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES,
+};
+
+/*
+ * Register offsets into BAR2 memory.
+ * Only values necessary for driver implementation are defined.
+ */
+enum apex_bar2_regs {
+ APEX_BAR2_REG_SCU_BASE = 0x1A300,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE = 0x46000,
+ APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE = 0x46008,
+ APEX_BAR2_REG_KERNEL_HIB_TRANSLATION_ENABLE = 0x46010,
+ APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL = 0x46018,
+ APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL = 0x46020,
+ APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL = 0x46028,
+ APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL = 0x46030,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL = 0x46038,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL = 0x46040,
+ APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL = 0x46048,
+ APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE = 0x46050,
+ APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE_MASK = 0x46058,
+ APEX_BAR2_REG_KERNEL_HIB_STATUS_BLOCK_DELAY = 0x46060,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY0 = 0x46068,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY1 = 0x46070,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT = 0x46078,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT = 0x46080,
+ APEX_BAR2_REG_KERNEL_WIRE_INT_PENDING_BIT_ARRAY = 0x48778,
+ APEX_BAR2_REG_KERNEL_WIRE_INT_MASK_ARRAY = 0x48780,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSE = 0x486D8,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSED = 0x486E0,
+ APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER = 0x4A000,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE = 0x50000,
+
+ /* Error registers - Used mostly for debug */
+ APEX_BAR2_REG_USER_HIB_ERROR_STATUS = 0x86f0,
+ APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS = 0x41a0,
+};
+
+/* Addresses for packed registers. */
+#define APEX_BAR2_REG_AXI_QUIESCE (APEX_BAR2_REG_SCU_BASE + 0x2C)
+#define APEX_BAR2_REG_GCB_CLOCK_GATE (APEX_BAR2_REG_SCU_BASE + 0x14)
+#define APEX_BAR2_REG_SCU_0 (APEX_BAR2_REG_SCU_BASE + 0xc)
+#define APEX_BAR2_REG_SCU_1 (APEX_BAR2_REG_SCU_BASE + 0x10)
+#define APEX_BAR2_REG_SCU_2 (APEX_BAR2_REG_SCU_BASE + 0x14)
+#define APEX_BAR2_REG_SCU_3 (APEX_BAR2_REG_SCU_BASE + 0x18)
+#define APEX_BAR2_REG_SCU_4 (APEX_BAR2_REG_SCU_BASE + 0x1c)
+#define APEX_BAR2_REG_SCU_5 (APEX_BAR2_REG_SCU_BASE + 0x20)
+
+#define SCU3_RG_PWR_STATE_OVR_BIT_OFFSET 26
+#define SCU3_RG_PWR_STATE_OVR_MASK_WIDTH 2
+#define SCU3_CUR_RST_GCB_BIT_MASK 0x10
+#define SCU2_RG_RST_GCB_BIT_MASK 0xc
+
+/* Configuration for page table. */
+static struct gasket_page_table_config apex_page_table_configs[NUM_NODES] = {
+ {
+ .id = 0,
+ .mode = GASKET_PAGE_TABLE_MODE_NORMAL,
+ .total_entries = APEX_PAGE_TABLE_TOTAL_ENTRIES,
+ .base_reg = APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE,
+ .extended_reg = APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE,
+ .extended_bit = APEX_EXTENDED_SHIFT,
+ },
+};
+
+/* The regions in the BAR2 space that can be mapped into user space. */
+static const struct gasket_mappable_region mappable_regions[NUM_REGIONS] = {
+ { 0x40000, 0x1000 },
+ { 0x44000, 0x1000 },
+ { 0x48000, 0x1000 },
+};
+
+static const struct gasket_mappable_region cm_mappable_regions[1] = { { 0x0,
+ APEX_CH_MEM_BYTES } };
+
+/* Gasket device interrupts enums must be dense (i.e., no empty slots). */
+enum apex_interrupt {
+ APEX_INTERRUPT_INSTR_QUEUE = 0,
+ APEX_INTERRUPT_INPUT_ACTV_QUEUE = 1,
+ APEX_INTERRUPT_PARAM_QUEUE = 2,
+ APEX_INTERRUPT_OUTPUT_ACTV_QUEUE = 3,
+ APEX_INTERRUPT_SC_HOST_0 = 4,
+ APEX_INTERRUPT_SC_HOST_1 = 5,
+ APEX_INTERRUPT_SC_HOST_2 = 6,
+ APEX_INTERRUPT_SC_HOST_3 = 7,
+ APEX_INTERRUPT_TOP_LEVEL_0 = 8,
+ APEX_INTERRUPT_TOP_LEVEL_1 = 9,
+ APEX_INTERRUPT_TOP_LEVEL_2 = 10,
+ APEX_INTERRUPT_TOP_LEVEL_3 = 11,
+ APEX_INTERRUPT_FATAL_ERR = 12,
+ APEX_INTERRUPT_COUNT = 13,
+};
+
+/* Interrupt descriptors for Apex */
+static struct gasket_interrupt_desc apex_interrupts[] = {
+ {
+ APEX_INTERRUPT_INSTR_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL,
+ UNPACKED,
+ },
+ {
+ APEX_INTERRUPT_INPUT_ACTV_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_PARAM_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_OUTPUT_ACTV_QUEUE,
+ APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL,
+ UNPACKED
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_0,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_0
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_1,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_1
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_2,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_2
+ },
+ {
+ APEX_INTERRUPT_SC_HOST_3,
+ APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
+ PACK_3
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_0,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_0
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_1,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_1
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_2,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_2
+ },
+ {
+ APEX_INTERRUPT_TOP_LEVEL_3,
+ APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
+ PACK_3
+ },
+ {
+ APEX_INTERRUPT_FATAL_ERR,
+ APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL,
+ UNPACKED
+ },
+};
+
+
+/* Allows device to enter power save upon driver close(). */
+static int allow_power_save = 1;
+
+/* Allows SW based clock gating. */
+static int allow_sw_clock_gating;
+
+/* Allows HW based clock gating. */
+/* Note: this is not mutual exclusive with SW clock gating. */
+static int allow_hw_clock_gating = 1;
+
+/* Act as if only GCB is instantiated. */
+static int bypass_top_level;
+
+module_param(allow_power_save, int, 0644);
+module_param(allow_sw_clock_gating, int, 0644);
+module_param(allow_hw_clock_gating, int, 0644);
+module_param(bypass_top_level, int, 0644);
+
+/* Check the device status registers and return device status ALIVE or DEAD. */
+static int apex_get_status(struct gasket_dev *gasket_dev)
+{
+ /* TODO: Check device status. */
+ return GASKET_STATUS_ALIVE;
+}
+
+/* Enter GCB reset state. */
+static int apex_enter_reset(struct gasket_dev *gasket_dev)
+{
+ if (bypass_top_level)
+ return 0;
+
+ /*
+ * Software reset:
+ * Enable sleep mode
+ * - Software force GCB idle
+ * - Enable GCB idle
+ */
+ gasket_read_modify_write_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER,
+ 0x0, 1, 32);
+
+ /* - Initiate DMA pause */
+ gasket_dev_write_64(gasket_dev, 1, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSE);
+
+ /* - Wait for DMA pause complete. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_DMA_PAUSED, 1, 1,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "DMAs did not quiesce within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ /* - Enable GCB reset (0x1 to rg_rst_gcb) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x1, 2, 2);
+
+ /* - Enable GCB clock Gate (0x1 to rg_gated_gcb) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x1, 2, 18);
+
+ /* - Enable GCB memory shut down (0x3 to rg_force_ram_sd) */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x3, 2, 14);
+
+ /* - Wait for RAM shutdown. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 1 << 6, 1 << 6,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "RAM did not shut down within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* Quit GCB reset state. */
+static int apex_quit_reset(struct gasket_dev *gasket_dev)
+{
+ u32 val0, val1;
+
+ if (bypass_top_level)
+ return 0;
+
+ /*
+ * Disable sleep mode:
+ * - Disable GCB memory shut down:
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x0, 2, 14);
+
+ /*
+ * - Disable software clock gate:
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x0, 2, 18);
+
+ /*
+ * - Disable GCB reset (rg_rst_gcb):
+ * - b00: Not forced (HW controlled)
+ * - b1x: Force disable = Force not Reset
+ */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_2, 0x2, 2, 2);
+
+ /* - Wait for RAM enable. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 1 << 6, 0,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "RAM did not enable within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ /* - Wait for Reset complete. */
+ if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3,
+ SCU3_CUR_RST_GCB_BIT_MASK, 0,
+ APEX_RESET_DELAY, APEX_RESET_RETRY)) {
+ dev_err(gasket_dev->dev,
+ "GCB did not leave reset within timeout (%d ms)\n",
+ APEX_RESET_RETRY * APEX_RESET_DELAY);
+ return -ETIMEDOUT;
+ }
+
+ if (!allow_hw_clock_gating) {
+ val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ /* Inactive and Sleep mode are disabled. */
+ gasket_read_modify_write_32(gasket_dev,
+ APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 0x3,
+ SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
+ SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
+ val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ dev_dbg(gasket_dev->dev,
+ "Disallow HW clock gating 0x%x -> 0x%x\n", val0, val1);
+ } else {
+ val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ /* Inactive mode enabled - Sleep mode disabled. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3, 2,
+ SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
+ SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
+ val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+ dev_dbg(gasket_dev->dev, "Allow HW clock gating 0x%x -> 0x%x\n",
+ val0, val1);
+ }
+
+ return 0;
+}
+
+/* Reset the Apex hardware. Called on final close via device_close_cb. */
+static int apex_device_cleanup(struct gasket_dev *gasket_dev)
+{
+ u64 scalar_error;
+ u64 hib_error;
+ int ret = 0;
+
+ hib_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_USER_HIB_ERROR_STATUS);
+ scalar_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS);
+
+ dev_dbg(gasket_dev->dev,
+ "%s 0x%p hib_error 0x%llx scalar_error 0x%llx\n",
+ __func__, gasket_dev, hib_error, scalar_error);
+
+ if (allow_power_save)
+ ret = apex_enter_reset(gasket_dev);
+
+ return ret;
+}
+
+/* Determine if GCB is in reset state. */
+static bool is_gcb_in_reset(struct gasket_dev *gasket_dev)
+{
+ u32 val = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_SCU_3);
+
+ /* Masks rg_rst_gcb bit of SCU_CTRL_2 */
+ return (val & SCU3_CUR_RST_GCB_BIT_MASK);
+}
+
+/* Reset the hardware, then quit reset. Called on device open. */
+static int apex_reset(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ if (bypass_top_level)
+ return 0;
+
+ if (!is_gcb_in_reset(gasket_dev)) {
+ /* We are not in reset - toggle the reset bit so as to force
+ * re-init of custom block
+ */
+ dev_dbg(gasket_dev->dev, "%s: toggle reset\n", __func__);
+
+ ret = apex_enter_reset(gasket_dev);
+ if (ret)
+ return ret;
+ }
+ ret = apex_quit_reset(gasket_dev);
+
+ return ret;
+}
+
+static int apex_add_dev_cb(struct gasket_dev *gasket_dev)
+{
+ ulong page_table_ready, msix_table_ready;
+ int retries = 0;
+
+ apex_reset(gasket_dev);
+
+ while (retries < APEX_RESET_RETRY) {
+ page_table_ready =
+ gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT);
+ msix_table_ready =
+ gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT);
+ if (page_table_ready && msix_table_ready)
+ break;
+ schedule_timeout(msecs_to_jiffies(APEX_RESET_DELAY));
+ retries++;
+ }
+
+ if (retries == APEX_RESET_RETRY) {
+ if (!page_table_ready)
+ dev_err(gasket_dev->dev, "Page table init timed out\n");
+ if (!msix_table_ready)
+ dev_err(gasket_dev->dev, "MSI-X table init timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/*
+ * Check permissions for Apex ioctls.
+ * Returns true if the current user may execute this ioctl, and false otherwise.
+ */
+static bool apex_ioctl_check_permissions(struct file *filp, uint cmd)
+{
+ return !!(filp->f_mode & FMODE_WRITE);
+}
+
+/* Gates or un-gates Apex clock. */
+static long apex_clock_gating(struct gasket_dev *gasket_dev,
+ struct apex_gate_clock_ioctl __user *argp)
+{
+ struct apex_gate_clock_ioctl ibuf;
+
+ if (bypass_top_level || !allow_sw_clock_gating)
+ return 0;
+
+ if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
+ return -EFAULT;
+
+ dev_dbg(gasket_dev->dev, "%s %llu\n", __func__, ibuf.enable);
+
+ if (ibuf.enable) {
+ /* Quiesce AXI, gate GCB clock. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_AXI_QUIESCE, 0x1, 1,
+ 16);
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_GCB_CLOCK_GATE, 0x1,
+ 2, 18);
+ } else {
+ /* Un-gate GCB clock, un-quiesce AXI. */
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_GCB_CLOCK_GATE, 0x0,
+ 2, 18);
+ gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
+ APEX_BAR2_REG_AXI_QUIESCE, 0x0, 1,
+ 16);
+ }
+ return 0;
+}
+
+/* Apex-specific ioctl handler. */
+static long apex_ioctl(struct file *filp, uint cmd, void __user *argp)
+{
+ struct gasket_dev *gasket_dev = filp->private_data;
+
+ if (!apex_ioctl_check_permissions(filp, cmd))
+ return -EPERM;
+
+ switch (cmd) {
+ case APEX_IOCTL_GATE_CLOCK:
+ return apex_clock_gating(gasket_dev, argp);
+ default:
+ return -ENOTTY; /* unknown command */
+ }
+}
+
+/* Display driver sysfs entries. */
+static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ enum sysfs_attribute_type type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_err(device, "No Apex device sysfs mapping found\n");
+ return -ENODEV;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_err(device, "No Apex device sysfs attr data found\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return -ENODEV;
+ }
+
+ type = (enum sysfs_attribute_type)gasket_sysfs_get_attr(device, attr);
+ switch (type) {
+ case ATTR_KERNEL_HIB_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+ gasket_page_table_num_active_pages(
+ gasket_dev->page_table[0]));
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+static struct gasket_sysfs_attribute apex_sysfs_attrs[] = {
+ GASKET_SYSFS_RO(node_0_page_table_entries, sysfs_show,
+ ATTR_KERNEL_HIB_PAGE_TABLE_SIZE),
+ GASKET_SYSFS_RO(node_0_simple_page_table_entries, sysfs_show,
+ ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE),
+ GASKET_SYSFS_RO(node_0_num_mapped_pages, sysfs_show,
+ ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES),
+ GASKET_END_OF_ATTR_ARRAY
+};
+
+static int apex_sysfs_setup_cb(struct gasket_dev *gasket_dev)
+{
+ return gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ apex_sysfs_attrs);
+}
+
+/* On device open, perform a core reinit reset. */
+static int apex_device_open_cb(struct gasket_dev *gasket_dev)
+{
+ return gasket_reset_nolock(gasket_dev);
+}
+
+static const struct pci_device_id apex_pci_ids[] = {
+ { PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
+};
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
+
+static struct gasket_driver_desc apex_desc = {
+ .name = "apex",
+ .driver_version = APEX_DRIVER_VERSION,
+ .major = 120,
+ .minor = 0,
+ .module = THIS_MODULE,
+ .pci_id_table = apex_pci_ids,
+
+ .num_page_tables = NUM_NODES,
+ .page_table_bar_index = APEX_BAR_INDEX,
+ .page_table_configs = apex_page_table_configs,
+ .page_table_extended_bit = APEX_EXTENDED_SHIFT,
+
+ .bar_descriptions = {
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ { APEX_BAR_BYTES, (VM_WRITE | VM_READ), APEX_BAR_OFFSET,
+ NUM_REGIONS, mappable_regions, PCI_BAR },
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ GASKET_UNUSED_BAR,
+ },
+ .coherent_buffer_description = {
+ APEX_CH_MEM_BYTES,
+ (VM_WRITE | VM_READ),
+ APEX_CM_OFFSET,
+ },
+ .interrupt_type = PCI_MSIX,
+ .interrupt_bar_index = APEX_BAR_INDEX,
+ .num_interrupts = APEX_INTERRUPT_COUNT,
+ .interrupts = apex_interrupts,
+ .interrupt_pack_width = 7,
+
+ .add_dev_cb = apex_add_dev_cb,
+ .remove_dev_cb = NULL,
+
+ .enable_dev_cb = NULL,
+ .disable_dev_cb = NULL,
+
+ .sysfs_setup_cb = apex_sysfs_setup_cb,
+ .sysfs_cleanup_cb = NULL,
+
+ .device_open_cb = apex_device_open_cb,
+ .device_close_cb = apex_device_cleanup,
+
+ .ioctl_handler_cb = apex_ioctl,
+ .device_status_cb = apex_get_status,
+ .hardware_revision_cb = NULL,
+ .device_reset_cb = apex_reset,
+};
+
+static int __init apex_init(void)
+{
+ return gasket_register_device(&apex_desc);
+}
+
+static void apex_exit(void)
+{
+ gasket_unregister_device(&apex_desc);
+}
+MODULE_DESCRIPTION("Google Apex driver");
+MODULE_VERSION(APEX_DRIVER_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("John Joseph <jnjoseph@google.com>");
+MODULE_DEVICE_TABLE(pci, apex_pci_ids);
+module_init(apex_init);
+module_exit(apex_exit);
diff --git a/drivers/staging/gasket/gasket.h b/drivers/staging/gasket/gasket.h
new file mode 100644
index 00000000000000..a0f065c517a526
--- /dev/null
+++ b/drivers/staging/gasket/gasket.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common Gasket device kernel and user space declarations.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_H__
+#define __GASKET_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* ioctl structure declarations */
+
+/* Ioctl structures are padded to a multiple of 64 bits */
+/* and padded to put 64 bit values on 64 bit boundaries. */
+/* Unsigned 64 bit integers are used to hold pointers. */
+/* This helps compatibility between 32 and 64 bits. */
+
+/*
+ * Common structure for ioctls associating an eventfd with a device interrupt,
+ * when using the Gasket interrupt module.
+ */
+struct gasket_interrupt_eventfd {
+ u64 interrupt;
+ u64 event_fd;
+};
+
+/*
+ * Common structure for ioctls mapping and unmapping buffers when using the
+ * Gasket page_table module.
+ */
+struct gasket_page_table_ioctl {
+ u64 page_table_index;
+ u64 size;
+ u64 host_address;
+ u64 device_address;
+};
+
+/*
+ * Common structure for ioctls mapping and unmapping buffers when using the
+ * Gasket page_table module.
+ * dma_address: phys addr start of coherent memory, allocated by kernel
+ */
+struct gasket_coherent_alloc_config_ioctl {
+ u64 page_table_index;
+ u64 enable;
+ u64 size;
+ u64 dma_address;
+};
+
+/* Base number for all Gasket-common IOCTLs */
+#define GASKET_IOCTL_BASE 0xDC
+
+/* Reset the device. */
+#define GASKET_IOCTL_RESET _IO(GASKET_IOCTL_BASE, 0)
+
+/* Associate the specified [event]fd with the specified interrupt. */
+#define GASKET_IOCTL_SET_EVENTFD \
+ _IOW(GASKET_IOCTL_BASE, 1, struct gasket_interrupt_eventfd)
+
+/*
+ * Clears any eventfd associated with the specified interrupt. The (ulong)
+ * argument is the interrupt number to clear.
+ */
+#define GASKET_IOCTL_CLEAR_EVENTFD _IOW(GASKET_IOCTL_BASE, 2, unsigned long)
+
+/*
+ * [Loopbacks only] Requests that the loopback device send the specified
+ * interrupt to the host. The (ulong) argument is the number of the interrupt to
+ * send.
+ */
+#define GASKET_IOCTL_LOOPBACK_INTERRUPT \
+ _IOW(GASKET_IOCTL_BASE, 3, unsigned long)
+
+/* Queries the kernel for the number of page tables supported by the device. */
+#define GASKET_IOCTL_NUMBER_PAGE_TABLES _IOR(GASKET_IOCTL_BASE, 4, u64)
+
+/*
+ * Queries the kernel for the maximum size of the page table. Only the size and
+ * page_table_index fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_PAGE_TABLE_SIZE \
+ _IOWR(GASKET_IOCTL_BASE, 5, struct gasket_page_table_ioctl)
+
+/*
+ * Queries the kernel for the current simple page table size. Only the size and
+ * page_table_index fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE \
+ _IOWR(GASKET_IOCTL_BASE, 6, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to change the split between the number of simple and
+ * extended entries in the given page table. Only the size and page_table_index
+ * fields are used from the struct gasket_page_table_ioctl.
+ */
+#define GASKET_IOCTL_PARTITION_PAGE_TABLE \
+ _IOW(GASKET_IOCTL_BASE, 7, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to map size bytes at host_address to device_address in
+ * page_table_index page table.
+ */
+#define GASKET_IOCTL_MAP_BUFFER \
+ _IOW(GASKET_IOCTL_BASE, 8, struct gasket_page_table_ioctl)
+
+/*
+ * Tells the kernel to unmap size bytes at host_address from device_address in
+ * page_table_index page table.
+ */
+#define GASKET_IOCTL_UNMAP_BUFFER \
+ _IOW(GASKET_IOCTL_BASE, 9, struct gasket_page_table_ioctl)
+
+/* Clear the interrupt counts stored for this device. */
+#define GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS _IO(GASKET_IOCTL_BASE, 10)
+
+/* Enable/Disable and configure the coherent allocator. */
+#define GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR \
+ _IOWR(GASKET_IOCTL_BASE, 11, struct gasket_coherent_alloc_config_ioctl)
+
+#endif /* __GASKET_H__ */
diff --git a/drivers/staging/gasket/gasket_constants.h b/drivers/staging/gasket/gasket_constants.h
new file mode 100644
index 00000000000000..50d87c7b178c25
--- /dev/null
+++ b/drivers/staging/gasket/gasket_constants.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Google, Inc. */
+#ifndef __GASKET_CONSTANTS_H__
+#define __GASKET_CONSTANTS_H__
+
+#define GASKET_FRAMEWORK_VERSION "1.1.2"
+
+/*
+ * The maximum number of simultaneous device types supported by the framework.
+ */
+#define GASKET_FRAMEWORK_DESC_MAX 2
+
+/* The maximum devices per each type. */
+#define GASKET_DEV_MAX 256
+
+/* The number of supported (and possible) PCI BARs. */
+#define GASKET_NUM_BARS 6
+
+/* The number of supported Gasket page tables per device. */
+#define GASKET_MAX_NUM_PAGE_TABLES 1
+
+/* Maximum length of device names (driver name + minor number suffix + NULL). */
+#define GASKET_NAME_MAX 32
+
+/* Device status enumeration. */
+enum gasket_status {
+ /*
+ * A device is DEAD if it has not been initialized or has had an error.
+ */
+ GASKET_STATUS_DEAD = 0,
+ /*
+ * A device is LAMED if the hardware is healthy but the kernel was
+ * unable to enable some functionality (e.g. interrupts).
+ */
+ GASKET_STATUS_LAMED,
+
+ /* A device is ALIVE if it is ready for operation. */
+ GASKET_STATUS_ALIVE,
+
+ /*
+ * This status is set when the driver is exiting and waiting for all
+ * handles to be closed.
+ */
+ GASKET_STATUS_DRIVER_EXIT,
+};
+
+#endif
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
new file mode 100644
index 00000000000000..2b75f100da4d3a
--- /dev/null
+++ b/drivers/staging/gasket/gasket_core.c
@@ -0,0 +1,1895 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Gasket generic driver framework. This file contains the implementation
+ * for the Gasket generic driver framework - the functionality that is common
+ * across Gasket devices.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#include "gasket_core.h"
+
+#include "gasket_interrupt.h"
+#include "gasket_ioctl.h"
+#include "gasket_page_table.h"
+#include "gasket_sysfs.h"
+
+#include <linux/capability.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/pid_namespace.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_mmap.h>
+#else
+#define trace_gasket_mmap_exit(x)
+#define trace_gasket_mmap_entry(x, ...)
+#endif
+
+/*
+ * "Private" members of gasket_driver_desc.
+ *
+ * Contains internal per-device type tracking data, i.e., data not appropriate
+ * as part of the public interface for the generic framework.
+ */
+struct gasket_internal_desc {
+ /* Device-specific-driver-provided configuration information. */
+ const struct gasket_driver_desc *driver_desc;
+
+ /* Protects access to per-driver data (i.e. this structure). */
+ struct mutex mutex;
+
+ /* Kernel-internal device class. */
+ struct class *class;
+
+ /* PCI subsystem metadata associated with this driver. */
+ struct pci_driver pci;
+
+ /* Instantiated / present devices of this type. */
+ struct gasket_dev *devs[GASKET_DEV_MAX];
+};
+
+/* do_map_region() needs be able to return more than just true/false. */
+enum do_map_region_status {
+ /* The region was successfully mapped. */
+ DO_MAP_REGION_SUCCESS,
+
+ /* Attempted to map region and failed. */
+ DO_MAP_REGION_FAILURE,
+
+ /* The requested region to map was not part of a mappable region. */
+ DO_MAP_REGION_INVALID,
+};
+
+/* Global data definitions. */
+/* Mutex - only for framework-wide data. Other data should be protected by
+ * finer-grained locks.
+ */
+static DEFINE_MUTEX(g_mutex);
+
+/* List of all registered device descriptions & their supporting data. */
+static struct gasket_internal_desc g_descs[GASKET_FRAMEWORK_DESC_MAX];
+
+/* Mapping of statuses to human-readable strings. Must end with {0,NULL}. */
+static const struct gasket_num_name gasket_status_name_table[] = {
+ { GASKET_STATUS_DEAD, "DEAD" },
+ { GASKET_STATUS_ALIVE, "ALIVE" },
+ { GASKET_STATUS_LAMED, "LAMED" },
+ { GASKET_STATUS_DRIVER_EXIT, "DRIVER_EXITING" },
+ { 0, NULL },
+};
+
+/* Enumeration of the automatic Gasket framework sysfs nodes. */
+enum gasket_sysfs_attribute_type {
+ ATTR_BAR_OFFSETS,
+ ATTR_BAR_SIZES,
+ ATTR_DRIVER_VERSION,
+ ATTR_FRAMEWORK_VERSION,
+ ATTR_DEVICE_TYPE,
+ ATTR_HARDWARE_REVISION,
+ ATTR_PCI_ADDRESS,
+ ATTR_STATUS,
+ ATTR_IS_DEVICE_OWNED,
+ ATTR_DEVICE_OWNER,
+ ATTR_WRITE_OPEN_COUNT,
+ ATTR_RESET_COUNT,
+ ATTR_USER_MEM_RANGES
+};
+
+/* Perform a standard Gasket callback. */
+static inline int
+check_and_invoke_callback(struct gasket_dev *gasket_dev,
+ int (*cb_function)(struct gasket_dev *))
+{
+ int ret = 0;
+
+ dev_dbg(gasket_dev->dev, "check_and_invoke_callback %p\n",
+ cb_function);
+ if (cb_function) {
+ mutex_lock(&gasket_dev->mutex);
+ ret = cb_function(gasket_dev);
+ mutex_unlock(&gasket_dev->mutex);
+ }
+ return ret;
+}
+
+/* Perform a standard Gasket callback without grabbing gasket_dev->mutex. */
+static inline int
+gasket_check_and_invoke_callback_nolock(struct gasket_dev *gasket_dev,
+ int (*cb_function)(struct gasket_dev *))
+{
+ int ret = 0;
+
+ if (cb_function) {
+ dev_dbg(gasket_dev->dev,
+ "Invoking device-specific callback.\n");
+ ret = cb_function(gasket_dev);
+ }
+ return ret;
+}
+
+/*
+ * Return nonzero if the gasket_cdev_info is owned by the current thread group
+ * ID.
+ */
+static int gasket_owned_by_current_tgid(struct gasket_cdev_info *info)
+{
+ return (info->ownership.is_owned &&
+ (info->ownership.owner == current->tgid));
+}
+
+/*
+ * Find the next free gasket_internal_dev slot.
+ *
+ * Returns the located slot number on success or a negative number on failure.
+ */
+static int gasket_find_dev_slot(struct gasket_internal_desc *internal_desc,
+ const char *kobj_name)
+{
+ int i;
+
+ mutex_lock(&internal_desc->mutex);
+
+ /* Search for a previous instance of this device. */
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (internal_desc->devs[i] &&
+ strcmp(internal_desc->devs[i]->kobj_name, kobj_name) == 0) {
+ pr_err("Duplicate device %s\n", kobj_name);
+ mutex_unlock(&internal_desc->mutex);
+ return -EBUSY;
+ }
+ }
+
+ /* Find a free device slot. */
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (!internal_desc->devs[i])
+ break;
+ }
+
+ if (i == GASKET_DEV_MAX) {
+ pr_err("Too many registered devices; max %d\n", GASKET_DEV_MAX);
+ mutex_unlock(&internal_desc->mutex);
+ return -EBUSY;
+ }
+
+ mutex_unlock(&internal_desc->mutex);
+ return i;
+}
+
+/*
+ * Allocate and initialize a Gasket device structure, add the device to the
+ * device list.
+ *
+ * Returns 0 if successful, a negative error code otherwise.
+ */
+static int gasket_alloc_dev(struct gasket_internal_desc *internal_desc,
+ struct device *parent, struct gasket_dev **pdev,
+ const char *kobj_name)
+{
+ int dev_idx;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+ struct gasket_dev *gasket_dev;
+ struct gasket_cdev_info *dev_info;
+
+ pr_debug("Allocating a Gasket device %s.\n", kobj_name);
+
+ *pdev = NULL;
+
+ dev_idx = gasket_find_dev_slot(internal_desc, kobj_name);
+ if (dev_idx < 0)
+ return dev_idx;
+
+ gasket_dev = *pdev = kzalloc(sizeof(*gasket_dev), GFP_KERNEL);
+ if (!gasket_dev) {
+ pr_err("no memory for device\n");
+ return -ENOMEM;
+ }
+ internal_desc->devs[dev_idx] = gasket_dev;
+
+ mutex_init(&gasket_dev->mutex);
+
+ gasket_dev->internal_desc = internal_desc;
+ gasket_dev->dev_idx = dev_idx;
+ snprintf(gasket_dev->kobj_name, GASKET_NAME_MAX, "%s", kobj_name);
+ gasket_dev->dev = get_device(parent);
+ /* gasket_bar_data is uninitialized. */
+ gasket_dev->num_page_tables = driver_desc->num_page_tables;
+ /* max_page_table_size and *page table are uninit'ed */
+ /* interrupt_data is not initialized. */
+ /* status is 0, or GASKET_STATUS_DEAD */
+
+ dev_info = &gasket_dev->dev_info;
+ snprintf(dev_info->name, GASKET_NAME_MAX, "%s_%u", driver_desc->name,
+ gasket_dev->dev_idx);
+ dev_info->devt =
+ MKDEV(driver_desc->major, driver_desc->minor +
+ gasket_dev->dev_idx);
+ dev_info->device = device_create(internal_desc->class, parent,
+ dev_info->devt, gasket_dev, dev_info->name);
+
+ dev_dbg(dev_info->device, "Gasket device allocated.\n");
+
+ /* cdev has not yet been added; cdev_added is 0 */
+ dev_info->gasket_dev_ptr = gasket_dev;
+ /* ownership is all 0, indicating no owner or opens. */
+
+ return 0;
+}
+
+/* Free a Gasket device. */
+static void gasket_free_dev(struct gasket_dev *gasket_dev)
+{
+ struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
+
+ mutex_lock(&internal_desc->mutex);
+ internal_desc->devs[gasket_dev->dev_idx] = NULL;
+ mutex_unlock(&internal_desc->mutex);
+ put_device(gasket_dev->dev);
+ pci_dev_put(gasket_dev->pci_dev);
+ kfree(gasket_dev);
+}
+
+/*
+ * Maps the specified bar into kernel space.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ * A zero-sized BAR will not be mapped, but is not an error.
+ */
+static int gasket_map_pci_bar(struct gasket_dev *gasket_dev, int bar_num)
+{
+ struct gasket_internal_desc *internal_desc = gasket_dev->internal_desc;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+ ulong desc_bytes = driver_desc->bar_descriptions[bar_num].size;
+ int ret;
+
+ if (desc_bytes == 0)
+ return 0;
+
+ if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR) {
+ /* not PCI: skip this entry */
+ return 0;
+ }
+ /*
+ * pci_resource_start and pci_resource_len return a "resource_size_t",
+ * which is safely castable to ulong (which itself is the arg to
+ * request_mem_region).
+ */
+ gasket_dev->bar_data[bar_num].phys_base =
+ (ulong)pci_resource_start(gasket_dev->pci_dev, bar_num);
+ if (!gasket_dev->bar_data[bar_num].phys_base) {
+ dev_err(gasket_dev->dev, "Cannot get BAR%u base address\n",
+ bar_num);
+ return -EINVAL;
+ }
+
+ gasket_dev->bar_data[bar_num].length_bytes =
+ (ulong)pci_resource_len(gasket_dev->pci_dev, bar_num);
+ if (gasket_dev->bar_data[bar_num].length_bytes < desc_bytes) {
+ dev_err(gasket_dev->dev,
+ "PCI BAR %u space is too small: %lu; expected >= %lu\n",
+ bar_num, gasket_dev->bar_data[bar_num].length_bytes,
+ desc_bytes);
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes,
+ gasket_dev->dev_info.name)) {
+ dev_err(gasket_dev->dev,
+ "Cannot get BAR %d memory region %p\n",
+ bar_num, &gasket_dev->pci_dev->resource[bar_num]);
+ return -EINVAL;
+ }
+
+ gasket_dev->bar_data[bar_num].virt_base =
+ ioremap_nocache(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes);
+ if (!gasket_dev->bar_data[bar_num].virt_base) {
+ dev_err(gasket_dev->dev,
+ "Cannot remap BAR %d memory region %p\n",
+ bar_num, &gasket_dev->pci_dev->resource[bar_num]);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ dma_set_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&gasket_dev->pci_dev->dev, DMA_BIT_MASK(64));
+
+ return 0;
+
+fail:
+ iounmap(gasket_dev->bar_data[bar_num].virt_base);
+ release_mem_region(gasket_dev->bar_data[bar_num].phys_base,
+ gasket_dev->bar_data[bar_num].length_bytes);
+ return ret;
+}
+
+/*
+ * Releases PCI BAR mapping.
+ *
+ * A zero-sized or not-mapped BAR will not be unmapped, but is not an error.
+ */
+static void gasket_unmap_pci_bar(struct gasket_dev *dev, int bar_num)
+{
+ ulong base, bytes;
+ struct gasket_internal_desc *internal_desc = dev->internal_desc;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+
+ if (driver_desc->bar_descriptions[bar_num].size == 0 ||
+ !dev->bar_data[bar_num].virt_base)
+ return;
+
+ if (driver_desc->bar_descriptions[bar_num].type != PCI_BAR)
+ return;
+
+ iounmap(dev->bar_data[bar_num].virt_base);
+ dev->bar_data[bar_num].virt_base = NULL;
+
+ base = pci_resource_start(dev->pci_dev, bar_num);
+ if (!base) {
+ dev_err(dev->dev, "cannot get PCI BAR%u base address\n",
+ bar_num);
+ return;
+ }
+
+ bytes = pci_resource_len(dev->pci_dev, bar_num);
+ release_mem_region(base, bytes);
+}
+
+/*
+ * Setup PCI & set up memory mapping for the specified device.
+ *
+ * Enables the PCI device, reads the BAR registers and sets up pointers to the
+ * device's memory mapped IO space.
+ *
+ * Returns 0 on success and a negative value otherwise.
+ */
+static int gasket_setup_pci(struct pci_dev *pci_dev,
+ struct gasket_dev *gasket_dev)
+{
+ int i, mapped_bars, ret;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret) {
+ dev_err(gasket_dev->dev, "cannot enable PCI device\n");
+ return ret;
+ }
+
+ pci_set_master(pci_dev);
+
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ ret = gasket_map_pci_bar(gasket_dev, i);
+ if (ret) {
+ mapped_bars = i;
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ for (i = 0; i < mapped_bars; i++)
+ gasket_unmap_pci_bar(gasket_dev, i);
+
+ pci_disable_device(pci_dev);
+ return -ENOMEM;
+}
+
+/* Unmaps memory and cleans up PCI for the specified device. */
+static void gasket_cleanup_pci(struct gasket_dev *gasket_dev)
+{
+ int i;
+
+ for (i = 0; i < GASKET_NUM_BARS; i++)
+ gasket_unmap_pci_bar(gasket_dev, i);
+
+ pci_disable_device(gasket_dev->pci_dev);
+}
+
+/* Determine the health of the Gasket device. */
+static int gasket_get_hw_status(struct gasket_dev *gasket_dev)
+{
+ int status;
+ int i;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ status = gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_status_cb);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev, "Hardware reported status %d.\n",
+ status);
+ return status;
+ }
+
+ status = gasket_interrupt_system_status(gasket_dev);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev,
+ "Interrupt system reported status %d.\n", status);
+ return status;
+ }
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ status = gasket_page_table_system_status(gasket_dev->page_table[i]);
+ if (status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev,
+ "Page table %d reported status %d.\n",
+ i, status);
+ return status;
+ }
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+static ssize_t
+gasket_write_mappable_regions(char *buf,
+ const struct gasket_driver_desc *driver_desc,
+ int bar_index)
+{
+ int i;
+ ssize_t written;
+ ssize_t total_written = 0;
+ ulong min_addr, max_addr;
+ struct gasket_bar_desc bar_desc =
+ driver_desc->bar_descriptions[bar_index];
+
+ if (bar_desc.permissions == GASKET_NOMAP)
+ return 0;
+ for (i = 0;
+ i < bar_desc.num_mappable_regions && total_written < PAGE_SIZE;
+ i++) {
+ min_addr = bar_desc.mappable_regions[i].start -
+ driver_desc->legacy_mmap_address_offset;
+ max_addr = bar_desc.mappable_regions[i].start -
+ driver_desc->legacy_mmap_address_offset +
+ bar_desc.mappable_regions[i].length_bytes;
+ written = scnprintf(buf, PAGE_SIZE - total_written,
+ "0x%08lx-0x%08lx\n", min_addr, max_addr);
+ total_written += written;
+ buf += written;
+ }
+ return total_written;
+}
+
+static ssize_t gasket_sysfs_data_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret = 0;
+ ssize_t current_written = 0;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ const struct gasket_bar_desc *bar_desc;
+ enum gasket_sysfs_attribute_type sysfs_type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_err(device, "No sysfs mapping found for device\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_err(device, "No sysfs attr found for device\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return 0;
+ }
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+
+ sysfs_type =
+ (enum gasket_sysfs_attribute_type)gasket_attr->data.attr_type;
+ switch (sysfs_type) {
+ case ATTR_BAR_OFFSETS:
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ bar_desc = &driver_desc->bar_descriptions[i];
+ if (bar_desc->size == 0)
+ continue;
+ current_written =
+ snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
+ (ulong)bar_desc->base);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ case ATTR_BAR_SIZES:
+ for (i = 0; i < GASKET_NUM_BARS; i++) {
+ bar_desc = &driver_desc->bar_descriptions[i];
+ if (bar_desc->size == 0)
+ continue;
+ current_written =
+ snprintf(buf, PAGE_SIZE - ret, "%d: 0x%lx\n", i,
+ (ulong)bar_desc->size);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ case ATTR_DRIVER_VERSION:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_dev->internal_desc->driver_desc->driver_version);
+ break;
+ case ATTR_FRAMEWORK_VERSION:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ GASKET_FRAMEWORK_VERSION);
+ break;
+ case ATTR_DEVICE_TYPE:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_dev->internal_desc->driver_desc->name);
+ break;
+ case ATTR_HARDWARE_REVISION:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->hardware_revision);
+ break;
+ case ATTR_PCI_ADDRESS:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", gasket_dev->kobj_name);
+ break;
+ case ATTR_STATUS:
+ ret = snprintf(buf, PAGE_SIZE, "%s\n",
+ gasket_num_name_lookup(gasket_dev->status,
+ gasket_status_name_table));
+ break;
+ case ATTR_IS_DEVICE_OWNED:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.is_owned);
+ break;
+ case ATTR_DEVICE_OWNER:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.owner);
+ break;
+ case ATTR_WRITE_OPEN_COUNT:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n",
+ gasket_dev->dev_info.ownership.write_open_count);
+ break;
+ case ATTR_RESET_COUNT:
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", gasket_dev->reset_count);
+ break;
+ case ATTR_USER_MEM_RANGES:
+ for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ current_written =
+ gasket_write_mappable_regions(buf, driver_desc,
+ i);
+ buf += current_written;
+ ret += current_written;
+ }
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+/* These attributes apply to all Gasket driver instances. */
+static const struct gasket_sysfs_attribute gasket_sysfs_generic_attrs[] = {
+ GASKET_SYSFS_RO(bar_offsets, gasket_sysfs_data_show, ATTR_BAR_OFFSETS),
+ GASKET_SYSFS_RO(bar_sizes, gasket_sysfs_data_show, ATTR_BAR_SIZES),
+ GASKET_SYSFS_RO(driver_version, gasket_sysfs_data_show,
+ ATTR_DRIVER_VERSION),
+ GASKET_SYSFS_RO(framework_version, gasket_sysfs_data_show,
+ ATTR_FRAMEWORK_VERSION),
+ GASKET_SYSFS_RO(device_type, gasket_sysfs_data_show, ATTR_DEVICE_TYPE),
+ GASKET_SYSFS_RO(revision, gasket_sysfs_data_show,
+ ATTR_HARDWARE_REVISION),
+ GASKET_SYSFS_RO(pci_address, gasket_sysfs_data_show, ATTR_PCI_ADDRESS),
+ GASKET_SYSFS_RO(status, gasket_sysfs_data_show, ATTR_STATUS),
+ GASKET_SYSFS_RO(is_device_owned, gasket_sysfs_data_show,
+ ATTR_IS_DEVICE_OWNED),
+ GASKET_SYSFS_RO(device_owner, gasket_sysfs_data_show,
+ ATTR_DEVICE_OWNER),
+ GASKET_SYSFS_RO(write_open_count, gasket_sysfs_data_show,
+ ATTR_WRITE_OPEN_COUNT),
+ GASKET_SYSFS_RO(reset_count, gasket_sysfs_data_show, ATTR_RESET_COUNT),
+ GASKET_SYSFS_RO(user_mem_ranges, gasket_sysfs_data_show,
+ ATTR_USER_MEM_RANGES),
+ GASKET_END_OF_ATTR_ARRAY
+};
+
+/* Add a char device and related info. */
+static int gasket_add_cdev(struct gasket_cdev_info *dev_info,
+ const struct file_operations *file_ops,
+ struct module *owner)
+{
+ int ret;
+
+ cdev_init(&dev_info->cdev, file_ops);
+ dev_info->cdev.owner = owner;
+ ret = cdev_add(&dev_info->cdev, dev_info->devt, 1);
+ if (ret) {
+ dev_err(dev_info->gasket_dev_ptr->dev,
+ "cannot add char device [ret=%d]\n", ret);
+ return ret;
+ }
+ dev_info->cdev_added = 1;
+
+ return 0;
+}
+
+/* Disable device operations. */
+static void gasket_disable_dev(struct gasket_dev *gasket_dev)
+{
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+ int i;
+
+ /* Only delete the device if it has been successfully added. */
+ if (gasket_dev->dev_info.cdev_added)
+ cdev_del(&gasket_dev->dev_info.cdev);
+
+ gasket_dev->status = GASKET_STATUS_DEAD;
+
+ gasket_interrupt_cleanup(gasket_dev);
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ if (gasket_dev->page_table[i]) {
+ gasket_page_table_reset(gasket_dev->page_table[i]);
+ gasket_page_table_cleanup(gasket_dev->page_table[i]);
+ }
+ }
+
+ check_and_invoke_callback(gasket_dev, driver_desc->disable_dev_cb);
+}
+
+/*
+ * Registered descriptor lookup.
+ *
+ * Precondition: Called with g_mutex held (to avoid a race on return).
+ * Returns NULL if no matching device was found.
+ */
+static struct gasket_internal_desc *
+lookup_internal_desc(struct pci_dev *pci_dev)
+{
+ int i;
+
+ __must_hold(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc &&
+ g_descs[i].driver_desc->pci_id_table &&
+ pci_match_id(g_descs[i].driver_desc->pci_id_table, pci_dev))
+ return &g_descs[i];
+ }
+
+ return NULL;
+}
+
+/*
+ * Verifies that the user has permissions to perform the requested mapping and
+ * that the provided descriptor/range is of adequate size to hold the range to
+ * be mapped.
+ */
+static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ int bar_permissions)
+{
+ int requested_permissions;
+ /* Always allow sysadmin to access. */
+ if (capable(CAP_SYS_ADMIN))
+ return true;
+
+ /* Never allow non-sysadmins to access to a dead device. */
+ if (gasket_dev->status != GASKET_STATUS_ALIVE) {
+ dev_dbg(gasket_dev->dev, "Device is dead.\n");
+ return false;
+ }
+
+ /* Make sure that no wrong flags are set. */
+ requested_permissions =
+ (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
+ if (requested_permissions & ~(bar_permissions)) {
+ dev_dbg(gasket_dev->dev,
+ "Attempting to map a region with requested permissions "
+ "0x%x, but region has permissions 0x%x.\n",
+ requested_permissions, bar_permissions);
+ return false;
+ }
+
+ /* Do not allow a non-owner to write. */
+ if ((vma->vm_flags & VM_WRITE) &&
+ !gasket_owned_by_current_tgid(&gasket_dev->dev_info)) {
+ dev_dbg(gasket_dev->dev,
+ "Attempting to mmap a region for write without owning "
+ "device.\n");
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Verifies that the input address is within the region allocated to coherent
+ * buffer.
+ */
+static bool
+gasket_is_coherent_region(const struct gasket_driver_desc *driver_desc,
+ ulong address)
+{
+ struct gasket_coherent_buffer_desc coh_buff_desc =
+ driver_desc->coherent_buffer_description;
+
+ if (coh_buff_desc.permissions != GASKET_NOMAP) {
+ if ((address >= coh_buff_desc.base) &&
+ (address < coh_buff_desc.base + coh_buff_desc.size)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static int gasket_get_bar_index(const struct gasket_dev *gasket_dev,
+ ulong phys_addr)
+{
+ int i;
+ const struct gasket_driver_desc *driver_desc;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ for (i = 0; i < GASKET_NUM_BARS; ++i) {
+ struct gasket_bar_desc bar_desc =
+ driver_desc->bar_descriptions[i];
+
+ if (bar_desc.permissions != GASKET_NOMAP) {
+ if (phys_addr >= bar_desc.base &&
+ phys_addr < (bar_desc.base + bar_desc.size)) {
+ return i;
+ }
+ }
+ }
+ /* If we haven't found the address by now, it is invalid. */
+ return -EINVAL;
+}
+
+/*
+ * Sets the actual bounds to map, given the device's mappable region.
+ *
+ * Given the device's mappable region, along with the user-requested mapping
+ * start offset and length of the user region, determine how much of this
+ * mappable region can be mapped into the user's region (start/end offsets),
+ * and the physical offset (phys_offset) into the BAR where the mapping should
+ * begin (either the VMA's or region lower bound).
+ *
+ * In other words, this calculates the overlap between the VMA
+ * (bar_offset, requested_length) and the given gasket_mappable_region.
+ *
+ * Returns true if there's anything to map, and false otherwise.
+ */
+static bool
+gasket_mm_get_mapping_addrs(const struct gasket_mappable_region *region,
+ ulong bar_offset, ulong requested_length,
+ struct gasket_mappable_region *mappable_region,
+ ulong *virt_offset)
+{
+ ulong range_start = region->start;
+ ulong range_length = region->length_bytes;
+ ulong range_end = range_start + range_length;
+
+ *virt_offset = 0;
+ if (bar_offset + requested_length < range_start) {
+ /*
+ * If the requested region is completely below the range,
+ * there is nothing to map.
+ */
+ return false;
+ } else if (bar_offset <= range_start) {
+ /* If the bar offset is below this range's start
+ * but the requested length continues into it:
+ * 1) Only map starting from the beginning of this
+ * range's phys. offset, so we don't map unmappable
+ * memory.
+ * 2) The length of the virtual memory to not map is the
+ * delta between the bar offset and the
+ * mappable start (and since the mappable start is
+ * bigger, start - req.)
+ * 3) The map length is the minimum of the mappable
+ * requested length (requested_length - virt_offset)
+ * and the actual mappable length of the range.
+ */
+ mappable_region->start = range_start;
+ *virt_offset = range_start - bar_offset;
+ mappable_region->length_bytes =
+ min(requested_length - *virt_offset, range_length);
+ return true;
+ } else if (bar_offset > range_start &&
+ bar_offset < range_end) {
+ /*
+ * If the bar offset is within this range:
+ * 1) Map starting from the bar offset.
+ * 2) Because there is no forbidden memory between the
+ * bar offset and the range start,
+ * virt_offset is 0.
+ * 3) The map length is the minimum of the requested
+ * length and the remaining length in the buffer
+ * (range_end - bar_offset)
+ */
+ mappable_region->start = bar_offset;
+ *virt_offset = 0;
+ mappable_region->length_bytes =
+ min(requested_length, range_end - bar_offset);
+ return true;
+ }
+
+ /*
+ * If the requested [start] offset is above range_end,
+ * there's nothing to map.
+ */
+ return false;
+}
+
+/*
+ * Calculates the offset where the VMA range begins in its containing BAR.
+ * The offset is written into bar_offset on success.
+ * Returns zero on success, anything else on error.
+ */
+static int gasket_mm_vma_bar_offset(const struct gasket_dev *gasket_dev,
+ const struct vm_area_struct *vma,
+ ulong *bar_offset)
+{
+ ulong raw_offset;
+ int bar_index;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset;
+ bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
+ if (bar_index < 0) {
+ dev_err(gasket_dev->dev,
+ "Unable to find matching bar for address 0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return bar_index;
+ }
+ *bar_offset =
+ raw_offset - driver_desc->bar_descriptions[bar_index].base;
+
+ return 0;
+}
+
+int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ const struct gasket_mappable_region *map_region)
+{
+ ulong bar_offset;
+ ulong virt_offset;
+ struct gasket_mappable_region mappable_region;
+ int ret;
+
+ if (map_region->length_bytes == 0)
+ return 0;
+
+ ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
+ if (ret)
+ return ret;
+
+ if (!gasket_mm_get_mapping_addrs(map_region, bar_offset,
+ vma->vm_end - vma->vm_start,
+ &mappable_region, &virt_offset))
+ return 1;
+
+ /*
+ * The length passed to zap_vma_ptes MUST BE A MULTIPLE OF
+ * PAGE_SIZE! Trust me. I have the scars.
+ *
+ * Next multiple of y: ceil_div(x, y) * y
+ */
+ zap_vma_ptes(vma, vma->vm_start + virt_offset,
+ DIV_ROUND_UP(mappable_region.length_bytes, PAGE_SIZE) *
+ PAGE_SIZE);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_mm_unmap_region);
+
+/* Maps a virtual address + range to a physical offset of a BAR. */
+static enum do_map_region_status
+do_map_region(const struct gasket_dev *gasket_dev, struct vm_area_struct *vma,
+ struct gasket_mappable_region *mappable_region)
+{
+ /* Maximum size of a single call to io_remap_pfn_range. */
+ /* I pulled this number out of thin air. */
+ const ulong max_chunk_size = 64 * 1024 * 1024;
+ ulong chunk_size, mapped_bytes = 0;
+
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+
+ ulong bar_offset, virt_offset;
+ struct gasket_mappable_region region_to_map;
+ ulong phys_offset, map_length;
+ ulong virt_base, phys_base;
+ int bar_index, ret;
+
+ ret = gasket_mm_vma_bar_offset(gasket_dev, vma, &bar_offset);
+ if (ret)
+ return DO_MAP_REGION_INVALID;
+
+ if (!gasket_mm_get_mapping_addrs(mappable_region, bar_offset,
+ vma->vm_end - vma->vm_start,
+ &region_to_map, &virt_offset))
+ return DO_MAP_REGION_INVALID;
+ phys_offset = region_to_map.start;
+ map_length = region_to_map.length_bytes;
+
+ virt_base = vma->vm_start + virt_offset;
+ bar_index =
+ gasket_get_bar_index(gasket_dev,
+ (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset);
+ phys_base = gasket_dev->bar_data[bar_index].phys_base + phys_offset;
+ while (mapped_bytes < map_length) {
+ /*
+ * io_remap_pfn_range can take a while, so we chunk its
+ * calls and call cond_resched between each.
+ */
+ chunk_size = min(max_chunk_size, map_length - mapped_bytes);
+
+ cond_resched();
+ ret = io_remap_pfn_range(vma, virt_base + mapped_bytes,
+ (phys_base + mapped_bytes) >>
+ PAGE_SHIFT, chunk_size,
+ vma->vm_page_prot);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Error remapping PFN range.\n");
+ goto fail;
+ }
+ mapped_bytes += chunk_size;
+ }
+
+ return DO_MAP_REGION_SUCCESS;
+
+fail:
+ /* Unmap the partial chunk we mapped. */
+ mappable_region->length_bytes = mapped_bytes;
+ if (gasket_mm_unmap_region(gasket_dev, vma, mappable_region))
+ dev_err(gasket_dev->dev,
+ "Error unmapping partial region 0x%lx (0x%lx bytes)\n",
+ (ulong)virt_offset,
+ (ulong)mapped_bytes);
+
+ return DO_MAP_REGION_FAILURE;
+}
+
+/* Map a region of coherent memory. */
+static int gasket_mmap_coherent(struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma)
+{
+ const struct gasket_driver_desc *driver_desc =
+ gasket_dev->internal_desc->driver_desc;
+ const ulong requested_length = vma->vm_end - vma->vm_start;
+ int ret;
+ ulong permissions;
+
+ if (requested_length == 0 || requested_length >
+ gasket_dev->coherent_buffer.length_bytes) {
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ permissions = driver_desc->coherent_buffer_description.permissions;
+ if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
+ dev_err(gasket_dev->dev, "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ ret = remap_pfn_range(vma, vma->vm_start,
+ (gasket_dev->coherent_buffer.phys_base) >>
+ PAGE_SHIFT, requested_length, vma->vm_page_prot);
+ if (ret) {
+ dev_err(gasket_dev->dev, "Error remapping PFN range err=%d.\n",
+ ret);
+ trace_gasket_mmap_exit(ret);
+ return ret;
+ }
+
+ /* Record the user virtual to dma_address mapping that was
+ * created by the kernel.
+ */
+ gasket_set_user_virt(gasket_dev, requested_length,
+ gasket_dev->coherent_buffer.phys_base,
+ vma->vm_start);
+ return 0;
+}
+
+/* Map a device's BARs into user space. */
+static int gasket_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int i, ret;
+ int bar_index;
+ int has_mapped_anything = 0;
+ ulong permissions;
+ ulong raw_offset, vma_size;
+ bool is_coherent_region;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
+ const struct gasket_bar_desc *bar_desc;
+ struct gasket_mappable_region *map_regions = NULL;
+ int num_map_regions = 0;
+ enum do_map_region_status map_status;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+
+ if (vma->vm_start & ~PAGE_MASK) {
+ dev_err(gasket_dev->dev,
+ "Base address not page-aligned: 0x%lx\n",
+ vma->vm_start);
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ /* Calculate the offset of this range into physical mem. */
+ raw_offset = (vma->vm_pgoff << PAGE_SHIFT) +
+ driver_desc->legacy_mmap_address_offset;
+ vma_size = vma->vm_end - vma->vm_start;
+ trace_gasket_mmap_entry(gasket_dev->dev_info.name, raw_offset,
+ vma_size);
+
+ /*
+ * Check if the raw offset is within a bar region. If not, check if it
+ * is a coherent region.
+ */
+ bar_index = gasket_get_bar_index(gasket_dev, raw_offset);
+ is_coherent_region = gasket_is_coherent_region(driver_desc, raw_offset);
+ if (bar_index < 0 && !is_coherent_region) {
+ dev_err(gasket_dev->dev,
+ "Unable to find matching bar for address 0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return bar_index;
+ }
+ if (bar_index > 0 && is_coherent_region) {
+ dev_err(gasket_dev->dev,
+ "double matching bar and coherent buffers for address "
+ "0x%lx\n",
+ raw_offset);
+ trace_gasket_mmap_exit(bar_index);
+ return -EINVAL;
+ }
+
+ vma->vm_private_data = gasket_dev;
+
+ if (is_coherent_region)
+ return gasket_mmap_coherent(gasket_dev, vma);
+
+ /* Everything in the rest of this function is for normal BAR mapping. */
+
+ /*
+ * Subtract the base of the bar from the raw offset to get the
+ * memory location within the bar to map.
+ */
+ bar_desc = &driver_desc->bar_descriptions[bar_index];
+ permissions = bar_desc->permissions;
+ if (!gasket_mmap_has_permissions(gasket_dev, vma, permissions)) {
+ dev_err(gasket_dev->dev, "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+
+ if (driver_desc->get_mappable_regions_cb) {
+ ret = driver_desc->get_mappable_regions_cb(gasket_dev,
+ bar_index,
+ &map_regions,
+ &num_map_regions);
+ if (ret)
+ return ret;
+ } else {
+ if (!gasket_mmap_has_permissions(gasket_dev, vma,
+ bar_desc->permissions)) {
+ dev_err(gasket_dev->dev,
+ "Permission checking failed.\n");
+ trace_gasket_mmap_exit(-EPERM);
+ return -EPERM;
+ }
+ num_map_regions = bar_desc->num_mappable_regions;
+ map_regions = kcalloc(num_map_regions,
+ sizeof(*bar_desc->mappable_regions),
+ GFP_KERNEL);
+ if (map_regions) {
+ memcpy(map_regions, bar_desc->mappable_regions,
+ num_map_regions *
+ sizeof(*bar_desc->mappable_regions));
+ }
+ }
+
+ if (!map_regions || num_map_regions == 0) {
+ dev_err(gasket_dev->dev, "No mappable regions returned!\n");
+ return -EINVAL;
+ }
+
+ /* Marks the VMA's pages as uncacheable. */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ for (i = 0; i < num_map_regions; i++) {
+ map_status = do_map_region(gasket_dev, vma, &map_regions[i]);
+ /* Try the next region if this one was not mappable. */
+ if (map_status == DO_MAP_REGION_INVALID)
+ continue;
+ if (map_status == DO_MAP_REGION_FAILURE) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ has_mapped_anything = 1;
+ }
+
+ kfree(map_regions);
+
+ /* If we could not map any memory, the request was invalid. */
+ if (!has_mapped_anything) {
+ dev_err(gasket_dev->dev,
+ "Map request did not contain a valid region.\n");
+ trace_gasket_mmap_exit(-EINVAL);
+ return -EINVAL;
+ }
+
+ trace_gasket_mmap_exit(0);
+ return 0;
+
+fail:
+ /* Need to unmap any mapped ranges. */
+ num_map_regions = i;
+ for (i = 0; i < num_map_regions; i++)
+ if (gasket_mm_unmap_region(gasket_dev, vma,
+ &bar_desc->mappable_regions[i]))
+ dev_err(gasket_dev->dev, "Error unmapping range %d.\n",
+ i);
+ kfree(map_regions);
+
+ return ret;
+}
+
+/*
+ * Open the char device file.
+ *
+ * If the open is for writing, and the device is not owned, this process becomes
+ * the owner. If the open is for writing and the device is already owned by
+ * some other process, it is an error. If this process is the owner, increment
+ * the open count.
+ *
+ * Returns 0 if successful, a negative error number otherwise.
+ */
+static int gasket_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ struct gasket_ownership *ownership;
+ char task_name[TASK_COMM_LEN];
+ struct gasket_cdev_info *dev_info =
+ container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
+ bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
+
+ gasket_dev = dev_info->gasket_dev_ptr;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ ownership = &dev_info->ownership;
+ get_task_comm(task_name, current);
+ filp->private_data = gasket_dev;
+ inode->i_size = 0;
+
+ dev_dbg(gasket_dev->dev,
+ "Attempting to open with tgid %u (%s) (f_mode: 0%03o, "
+ "fmode_write: %d is_root: %u)\n",
+ current->tgid, task_name, filp->f_mode,
+ (filp->f_mode & FMODE_WRITE), is_root);
+
+ /* Always allow non-writing accesses. */
+ if (!(filp->f_mode & FMODE_WRITE)) {
+ dev_dbg(gasket_dev->dev, "Allowing read-only opening.\n");
+ return 0;
+ }
+
+ mutex_lock(&gasket_dev->mutex);
+
+ dev_dbg(gasket_dev->dev,
+ "Current owner open count (owning tgid %u): %d.\n",
+ ownership->owner, ownership->write_open_count);
+
+ /* Opening a node owned by another TGID is an error (unless root) */
+ if (ownership->is_owned && ownership->owner != current->tgid &&
+ !is_root) {
+ dev_err(gasket_dev->dev,
+ "Process %u is opening a node held by %u.\n",
+ current->tgid, ownership->owner);
+ mutex_unlock(&gasket_dev->mutex);
+ return -EPERM;
+ }
+
+ /* If the node is not owned, assign it to the current TGID. */
+ if (!ownership->is_owned) {
+ ret = gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_open_cb);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Error in device open cb: %d\n", ret);
+ mutex_unlock(&gasket_dev->mutex);
+ return ret;
+ }
+ ownership->is_owned = 1;
+ ownership->owner = current->tgid;
+ dev_dbg(gasket_dev->dev, "Device owner is now tgid %u\n",
+ ownership->owner);
+ }
+
+ ownership->write_open_count++;
+
+ dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+
+ mutex_unlock(&gasket_dev->mutex);
+ return 0;
+}
+
+/*
+ * Called on a close of the device file. If this process is the owner,
+ * decrement the open count. On last close by the owner, free up buffers and
+ * eventfd contexts, and release ownership.
+ *
+ * Returns 0 if successful, a negative error number otherwise.
+ */
+static int gasket_release(struct inode *inode, struct file *file)
+{
+ int i;
+ struct gasket_dev *gasket_dev;
+ struct gasket_ownership *ownership;
+ const struct gasket_driver_desc *driver_desc;
+ char task_name[TASK_COMM_LEN];
+ struct gasket_cdev_info *dev_info =
+ container_of(inode->i_cdev, struct gasket_cdev_info, cdev);
+ struct pid_namespace *pid_ns = task_active_pid_ns(current);
+ bool is_root = ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN);
+
+ gasket_dev = dev_info->gasket_dev_ptr;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ ownership = &dev_info->ownership;
+ get_task_comm(task_name, current);
+ mutex_lock(&gasket_dev->mutex);
+
+ dev_dbg(gasket_dev->dev,
+ "Releasing device node. Call origin: tgid %u (%s) "
+ "(f_mode: 0%03o, fmode_write: %d, is_root: %u)\n",
+ current->tgid, task_name, file->f_mode,
+ (file->f_mode & FMODE_WRITE), is_root);
+ dev_dbg(gasket_dev->dev, "Current open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+
+ if (file->f_mode & FMODE_WRITE) {
+ ownership->write_open_count--;
+ if (ownership->write_open_count == 0) {
+ dev_dbg(gasket_dev->dev, "Device is now free\n");
+ ownership->is_owned = 0;
+ ownership->owner = 0;
+
+ /* Forces chip reset before we unmap the page tables. */
+ driver_desc->device_reset_cb(gasket_dev);
+
+ for (i = 0; i < driver_desc->num_page_tables; ++i) {
+ gasket_page_table_unmap_all(gasket_dev->page_table[i]);
+ gasket_page_table_garbage_collect(gasket_dev->page_table[i]);
+ gasket_free_coherent_memory_all(gasket_dev, i);
+ }
+
+ /* Closes device, enters power save. */
+ gasket_check_and_invoke_callback_nolock(gasket_dev,
+ driver_desc->device_close_cb);
+ }
+ }
+
+ dev_dbg(gasket_dev->dev, "New open count (owning tgid %u): %d\n",
+ ownership->owner, ownership->write_open_count);
+ mutex_unlock(&gasket_dev->mutex);
+ return 0;
+}
+
+/*
+ * Gasket ioctl dispatch function.
+ *
+ * Check if the ioctl is a generic ioctl. If not, pass the ioctl to the
+ * ioctl_handler_cb registered in the driver description.
+ * If the ioctl is a generic ioctl, pass it to gasket_ioctl_handler.
+ */
+static long gasket_ioctl(struct file *filp, uint cmd, ulong arg)
+{
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ void __user *argp = (void __user *)arg;
+ char path[256];
+
+ gasket_dev = (struct gasket_dev *)filp->private_data;
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ if (!driver_desc) {
+ dev_dbg(gasket_dev->dev,
+ "Unable to find device descriptor for file %s\n",
+ d_path(&filp->f_path, path, 256));
+ return -ENODEV;
+ }
+
+ if (!gasket_is_supported_ioctl(cmd)) {
+ /*
+ * The ioctl handler is not a standard Gasket callback, since
+ * it requires different arguments. This means we can't use
+ * check_and_invoke_callback.
+ */
+ if (driver_desc->ioctl_handler_cb)
+ return driver_desc->ioctl_handler_cb(filp, cmd, argp);
+
+ dev_dbg(gasket_dev->dev, "Received unknown ioctl 0x%x\n", cmd);
+ return -EINVAL;
+ }
+
+ return gasket_handle_ioctl(filp, cmd, argp);
+}
+
+/* File operations for all Gasket devices. */
+static const struct file_operations gasket_file_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .mmap = gasket_mmap,
+ .open = gasket_open,
+ .release = gasket_release,
+ .unlocked_ioctl = gasket_ioctl,
+};
+
+/* Perform final init and marks the device as active. */
+static int gasket_enable_dev(struct gasket_internal_desc *internal_desc,
+ struct gasket_dev *gasket_dev)
+{
+ int tbl_idx;
+ int ret;
+ const struct gasket_driver_desc *driver_desc =
+ internal_desc->driver_desc;
+
+ ret = gasket_interrupt_init(gasket_dev, driver_desc->name,
+ driver_desc->interrupt_type,
+ driver_desc->interrupts,
+ driver_desc->num_interrupts,
+ driver_desc->interrupt_pack_width,
+ driver_desc->interrupt_bar_index,
+ driver_desc->wire_interrupt_offsets);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Critical failure to allocate interrupts: %d\n", ret);
+ gasket_interrupt_cleanup(gasket_dev);
+ return ret;
+ }
+
+ for (tbl_idx = 0; tbl_idx < driver_desc->num_page_tables; tbl_idx++) {
+ dev_dbg(gasket_dev->dev, "Initializing page table %d.\n",
+ tbl_idx);
+ ret = gasket_page_table_init(&gasket_dev->page_table[tbl_idx],
+ &gasket_dev->bar_data[driver_desc->page_table_bar_index],
+ &driver_desc->page_table_configs[tbl_idx],
+ gasket_dev->dev,
+ gasket_dev->pci_dev);
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Couldn't init page table %d: %d\n",
+ tbl_idx, ret);
+ return ret;
+ }
+ /*
+ * Make sure that the page table is clear and set to simple
+ * addresses.
+ */
+ gasket_page_table_reset(gasket_dev->page_table[tbl_idx]);
+ }
+
+ /*
+ * hardware_revision_cb returns a positive integer (the rev) if
+ * successful.)
+ */
+ ret = check_and_invoke_callback(gasket_dev,
+ driver_desc->hardware_revision_cb);
+ if (ret < 0) {
+ dev_err(gasket_dev->dev,
+ "Error getting hardware revision: %d\n", ret);
+ return ret;
+ }
+ gasket_dev->hardware_revision = ret;
+
+ ret = check_and_invoke_callback(gasket_dev, driver_desc->enable_dev_cb);
+ if (ret) {
+ dev_err(gasket_dev->dev, "Error in enable device cb: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* device_status_cb returns a device status, not an error code. */
+ gasket_dev->status = gasket_get_hw_status(gasket_dev);
+ if (gasket_dev->status == GASKET_STATUS_DEAD)
+ dev_err(gasket_dev->dev, "Device reported as unhealthy.\n");
+
+ ret = gasket_add_cdev(&gasket_dev->dev_info, &gasket_file_ops,
+ driver_desc->module);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * PCI subsystem probe function.
+ *
+ * Called when a Gasket device is found. Allocates device metadata, maps device
+ * memory, and calls gasket_enable_dev to prepare the device for active use.
+ *
+ * Returns 0 if successful and a negative value otherwise.
+ */
+static int gasket_pci_probe(struct pci_dev *pci_dev,
+ const struct pci_device_id *id)
+{
+ int ret;
+ const char *kobj_name = dev_name(&pci_dev->dev);
+ struct gasket_internal_desc *internal_desc;
+ struct gasket_dev *gasket_dev;
+ const struct gasket_driver_desc *driver_desc;
+ struct device *parent;
+
+ pr_info("Add Gasket device %s\n", kobj_name);
+
+ mutex_lock(&g_mutex);
+ internal_desc = lookup_internal_desc(pci_dev);
+ mutex_unlock(&g_mutex);
+ if (!internal_desc) {
+ pr_err("PCI probe called for unknown driver type\n");
+ return -ENODEV;
+ }
+
+ driver_desc = internal_desc->driver_desc;
+
+ parent = &pci_dev->dev;
+ ret = gasket_alloc_dev(internal_desc, parent, &gasket_dev, kobj_name);
+ if (ret)
+ return ret;
+ gasket_dev->pci_dev = pci_dev_get(pci_dev);
+ if (IS_ERR_OR_NULL(gasket_dev->dev_info.device)) {
+ pr_err("Cannot create %s device %s [ret = %ld]\n",
+ driver_desc->name, gasket_dev->dev_info.name,
+ PTR_ERR(gasket_dev->dev_info.device));
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ ret = gasket_setup_pci(pci_dev, gasket_dev);
+ if (ret)
+ goto fail2;
+
+ ret = check_and_invoke_callback(gasket_dev, driver_desc->add_dev_cb);
+ if (ret) {
+ dev_err(gasket_dev->dev, "Error in add device cb: %d\n", ret);
+ goto fail2;
+ }
+
+ ret = gasket_sysfs_create_mapping(gasket_dev->dev_info.device,
+ gasket_dev);
+ if (ret)
+ goto fail3;
+
+ /*
+ * Once we've created the mapping structures successfully, attempt to
+ * create a symlink to the pci directory of this object.
+ */
+ ret = sysfs_create_link(&gasket_dev->dev_info.device->kobj,
+ &pci_dev->dev.kobj, dev_name(&pci_dev->dev));
+ if (ret) {
+ dev_err(gasket_dev->dev,
+ "Cannot create sysfs pci link: %d\n", ret);
+ goto fail3;
+ }
+ ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ gasket_sysfs_generic_attrs);
+ if (ret)
+ goto fail4;
+
+ ret = check_and_invoke_callback(gasket_dev,
+ driver_desc->sysfs_setup_cb);
+ if (ret) {
+ dev_err(gasket_dev->dev, "Error in sysfs setup cb: %d\n", ret);
+ goto fail5;
+ }
+
+ ret = gasket_enable_dev(internal_desc, gasket_dev);
+ if (ret) {
+ pr_err("cannot setup %s device\n", driver_desc->name);
+ gasket_disable_dev(gasket_dev);
+ goto fail5;
+ }
+
+ return 0;
+
+fail5:
+ check_and_invoke_callback(gasket_dev, driver_desc->sysfs_cleanup_cb);
+fail4:
+fail3:
+ gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
+fail2:
+ gasket_cleanup_pci(gasket_dev);
+ check_and_invoke_callback(gasket_dev, driver_desc->remove_dev_cb);
+ device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
+fail1:
+ gasket_free_dev(gasket_dev);
+ return ret;
+}
+
+/*
+ * PCI subsystem remove function.
+ *
+ * Called to remove a Gasket device. Finds the device in the device list and
+ * cleans up metadata.
+ */
+static void gasket_pci_remove(struct pci_dev *pci_dev)
+{
+ int i;
+ struct gasket_internal_desc *internal_desc;
+ struct gasket_dev *gasket_dev = NULL;
+ const struct gasket_driver_desc *driver_desc;
+ /* Find the device desc. */
+ mutex_lock(&g_mutex);
+ internal_desc = lookup_internal_desc(pci_dev);
+ if (!internal_desc) {
+ mutex_unlock(&g_mutex);
+ return;
+ }
+ mutex_unlock(&g_mutex);
+
+ driver_desc = internal_desc->driver_desc;
+
+ /* Now find the specific device */
+ mutex_lock(&internal_desc->mutex);
+ for (i = 0; i < GASKET_DEV_MAX; i++) {
+ if (internal_desc->devs[i] &&
+ internal_desc->devs[i]->pci_dev == pci_dev) {
+ gasket_dev = internal_desc->devs[i];
+ break;
+ }
+ }
+ mutex_unlock(&internal_desc->mutex);
+
+ if (!gasket_dev)
+ return;
+
+ pr_info("remove %s device %s\n", internal_desc->driver_desc->name,
+ gasket_dev->kobj_name);
+
+ gasket_disable_dev(gasket_dev);
+ gasket_cleanup_pci(gasket_dev);
+
+ check_and_invoke_callback(gasket_dev, driver_desc->sysfs_cleanup_cb);
+ gasket_sysfs_remove_mapping(gasket_dev->dev_info.device);
+
+ check_and_invoke_callback(gasket_dev, driver_desc->remove_dev_cb);
+
+ device_destroy(internal_desc->class, gasket_dev->dev_info.devt);
+ gasket_free_dev(gasket_dev);
+}
+
+/**
+ * Lookup a name by number in a num_name table.
+ * @num: Number to lookup.
+ * @table: Array of num_name structures, the table for the lookup.
+ *
+ * Description: Searches for num in the table. If found, the
+ * corresponding name is returned; otherwise NULL
+ * is returned.
+ *
+ * The table must have a NULL name pointer at the end.
+ */
+const char *gasket_num_name_lookup(uint num,
+ const struct gasket_num_name *table)
+{
+ uint i = 0;
+
+ while (table[i].snn_name) {
+ if (num == table[i].snn_num)
+ break;
+ ++i;
+ }
+
+ return table[i].snn_name;
+}
+EXPORT_SYMBOL(gasket_num_name_lookup);
+
+int gasket_reset(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ mutex_lock(&gasket_dev->mutex);
+ ret = gasket_reset_nolock(gasket_dev);
+ mutex_unlock(&gasket_dev->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_reset);
+
+int gasket_reset_nolock(struct gasket_dev *gasket_dev)
+{
+ int ret;
+ int i;
+ const struct gasket_driver_desc *driver_desc;
+
+ driver_desc = gasket_dev->internal_desc->driver_desc;
+ if (!driver_desc->device_reset_cb)
+ return 0;
+
+ ret = driver_desc->device_reset_cb(gasket_dev);
+ if (ret) {
+ dev_dbg(gasket_dev->dev, "Device reset cb returned %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* Reinitialize the page tables and interrupt framework. */
+ for (i = 0; i < driver_desc->num_page_tables; ++i)
+ gasket_page_table_reset(gasket_dev->page_table[i]);
+
+ ret = gasket_interrupt_reinit(gasket_dev);
+ if (ret) {
+ dev_dbg(gasket_dev->dev, "Unable to reinit interrupts: %d.\n",
+ ret);
+ return ret;
+ }
+
+ /* Get current device health. */
+ gasket_dev->status = gasket_get_hw_status(gasket_dev);
+ if (gasket_dev->status == GASKET_STATUS_DEAD) {
+ dev_dbg(gasket_dev->dev, "Device reported as dead.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(gasket_reset_nolock);
+
+gasket_ioctl_permissions_cb_t
+gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev)
+{
+ return gasket_dev->internal_desc->driver_desc->ioctl_permissions_cb;
+}
+EXPORT_SYMBOL(gasket_get_ioctl_permissions_cb);
+
+/* Get the driver structure for a given gasket_dev.
+ * @dev: pointer to gasket_dev, implementing the requested driver.
+ */
+const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev)
+{
+ return dev->internal_desc->driver_desc;
+}
+
+/* Get the device structure for a given gasket_dev.
+ * @dev: pointer to gasket_dev, implementing the requested driver.
+ */
+struct device *gasket_get_device(struct gasket_dev *dev)
+{
+ return dev->dev;
+}
+
+/**
+ * Asynchronously waits on device.
+ * @gasket_dev: Device struct.
+ * @bar: Bar
+ * @offset: Register offset
+ * @mask: Register mask
+ * @val: Expected value
+ * @max_retries: number of sleep periods
+ * @delay_ms: Timeout in milliseconds
+ *
+ * Description: Busy waits for a specific combination of bits to be set on a
+ * Gasket register.
+ **/
+int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
+ u64 offset, u64 mask, u64 val,
+ uint max_retries, u64 delay_ms)
+{
+ uint retries = 0;
+ u64 tmp;
+
+ while (retries < max_retries) {
+ tmp = gasket_dev_read_64(gasket_dev, bar, offset);
+ if ((tmp & mask) == val)
+ return 0;
+ msleep(delay_ms);
+ retries++;
+ }
+ dev_dbg(gasket_dev->dev, "%s timeout: reg %llx timeout (%llu ms)\n",
+ __func__, offset, max_retries * delay_ms);
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(gasket_wait_with_reschedule);
+
+/* See gasket_core.h for description. */
+int gasket_register_device(const struct gasket_driver_desc *driver_desc)
+{
+ int i, ret;
+ int desc_idx = -1;
+ struct gasket_internal_desc *internal;
+
+ pr_info("Initializing Gasket framework device\n");
+ /* Check for duplicates and find a free slot. */
+ mutex_lock(&g_mutex);
+
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc == driver_desc) {
+ pr_err("%s driver already loaded/registered\n",
+ driver_desc->name);
+ mutex_unlock(&g_mutex);
+ return -EBUSY;
+ }
+ }
+
+ /* This and the above loop could be combined, but this reads easier. */
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (!g_descs[i].driver_desc) {
+ g_descs[i].driver_desc = driver_desc;
+ desc_idx = i;
+ break;
+ }
+ }
+ mutex_unlock(&g_mutex);
+
+ pr_info("Loaded %s driver, framework version %s\n",
+ driver_desc->name, GASKET_FRAMEWORK_VERSION);
+
+ if (desc_idx == -1) {
+ pr_err("Too many Gasket drivers loaded: %d\n",
+ GASKET_FRAMEWORK_DESC_MAX);
+ return -EBUSY;
+ }
+
+ /* Internal structure setup. */
+ pr_debug("Performing initial internal structure setup.\n");
+ internal = &g_descs[desc_idx];
+ mutex_init(&internal->mutex);
+ memset(internal->devs, 0, sizeof(struct gasket_dev *) * GASKET_DEV_MAX);
+ memset(&internal->pci, 0, sizeof(internal->pci));
+ internal->pci.name = driver_desc->name;
+ internal->pci.id_table = driver_desc->pci_id_table;
+ internal->pci.probe = gasket_pci_probe;
+ internal->pci.remove = gasket_pci_remove;
+ internal->class =
+ class_create(driver_desc->module, driver_desc->name);
+
+ if (IS_ERR(internal->class)) {
+ pr_err("Cannot register %s class [ret=%ld]\n",
+ driver_desc->name, PTR_ERR(internal->class));
+ ret = PTR_ERR(internal->class);
+ goto unregister_gasket_driver;
+ }
+
+ /*
+ * Not using pci_register_driver() (without underscores), as it
+ * depends on KBUILD_MODNAME, and this is a shared file.
+ */
+ pr_debug("Registering PCI driver.\n");
+ ret = __pci_register_driver(&internal->pci, driver_desc->module,
+ driver_desc->name);
+ if (ret) {
+ pr_err("cannot register pci driver [ret=%d]\n", ret);
+ goto fail1;
+ }
+
+ pr_debug("Registering char driver.\n");
+ ret = register_chrdev_region(MKDEV(driver_desc->major,
+ driver_desc->minor), GASKET_DEV_MAX,
+ driver_desc->name);
+ if (ret) {
+ pr_err("cannot register char driver [ret=%d]\n", ret);
+ goto fail2;
+ }
+
+ pr_info("Driver registered successfully.\n");
+ return 0;
+
+fail2:
+ pci_unregister_driver(&internal->pci);
+
+fail1:
+ class_destroy(internal->class);
+
+unregister_gasket_driver:
+ mutex_lock(&g_mutex);
+ g_descs[desc_idx].driver_desc = NULL;
+ mutex_unlock(&g_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_register_device);
+
+/* See gasket_core.h for description. */
+void gasket_unregister_device(const struct gasket_driver_desc *driver_desc)
+{
+ int i, desc_idx;
+ struct gasket_internal_desc *internal_desc = NULL;
+
+ mutex_lock(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ if (g_descs[i].driver_desc == driver_desc) {
+ internal_desc = &g_descs[i];
+ desc_idx = i;
+ break;
+ }
+ }
+ mutex_unlock(&g_mutex);
+
+ if (!internal_desc) {
+ pr_err("request to unregister unknown desc: %s, %d:%d\n",
+ driver_desc->name, driver_desc->major,
+ driver_desc->minor);
+ return;
+ }
+
+ unregister_chrdev_region(MKDEV(driver_desc->major, driver_desc->minor),
+ GASKET_DEV_MAX);
+
+ pci_unregister_driver(&internal_desc->pci);
+
+ class_destroy(internal_desc->class);
+
+ /* Finally, effectively "remove" the driver. */
+ mutex_lock(&g_mutex);
+ g_descs[desc_idx].driver_desc = NULL;
+ mutex_unlock(&g_mutex);
+
+ pr_info("removed %s driver\n", driver_desc->name);
+}
+EXPORT_SYMBOL(gasket_unregister_device);
+
+static int __init gasket_init(void)
+{
+ int i;
+
+ pr_info("Performing one-time init of the Gasket framework.\n");
+ /* Check for duplicates and find a free slot. */
+ mutex_lock(&g_mutex);
+ for (i = 0; i < GASKET_FRAMEWORK_DESC_MAX; i++) {
+ g_descs[i].driver_desc = NULL;
+ mutex_init(&g_descs[i].mutex);
+ }
+
+ gasket_sysfs_init();
+
+ mutex_unlock(&g_mutex);
+ return 0;
+}
+
+static void __exit gasket_exit(void)
+{
+ /* No deinit/dealloc needed at present. */
+ pr_info("Removing Gasket framework module.\n");
+}
+MODULE_DESCRIPTION("Google Gasket driver framework");
+MODULE_VERSION(GASKET_FRAMEWORK_VERSION);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rob Springer <rspringer@google.com>");
+module_init(gasket_init);
+module_exit(gasket_exit);
diff --git a/drivers/staging/gasket/gasket_core.h b/drivers/staging/gasket/gasket_core.h
new file mode 100644
index 00000000000000..67f5960943a8a0
--- /dev/null
+++ b/drivers/staging/gasket/gasket_core.h
@@ -0,0 +1,707 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket generic driver. Defines the set of data types and functions necessary
+ * to define a driver using the Gasket generic driver framework.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_CORE_H__
+#define __GASKET_CORE_H__
+
+#include <linux/cdev.h>
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "gasket_constants.h"
+
+/**
+ * struct gasket_num_name - Map numbers to names.
+ * @ein_num: Number.
+ * @ein_name: Name associated with the number, a char pointer.
+ *
+ * This structure maps numbers to names. It is used to provide printable enum
+ * names, e.g {0, "DEAD"} or {1, "ALIVE"}.
+ */
+struct gasket_num_name {
+ uint snn_num;
+ const char *snn_name;
+};
+
+/*
+ * Register location for packed interrupts.
+ * Each value indicates the location of an interrupt field (in units of
+ * gasket_driver_desc->interrupt_pack_width) within the containing register.
+ * In other words, this indicates the shift to use when creating a mask to
+ * extract/set bits within a register for a given interrupt.
+ */
+enum gasket_interrupt_packing {
+ PACK_0 = 0,
+ PACK_1 = 1,
+ PACK_2 = 2,
+ PACK_3 = 3,
+ UNPACKED = 4,
+};
+
+/* Type of the interrupt supported by the device. */
+enum gasket_interrupt_type {
+ PCI_MSIX = 0,
+ PCI_MSI = 1,
+ PLATFORM_WIRE = 2,
+};
+
+/*
+ * Used to describe a Gasket interrupt. Contains an interrupt index, a register,
+ * and packing data for that interrupt. The register and packing data
+ * fields are relevant only for PCI_MSIX interrupt type and can be
+ * set to 0 for everything else.
+ */
+struct gasket_interrupt_desc {
+ /* Device-wide interrupt index/number. */
+ int index;
+ /* The register offset controlling this interrupt. */
+ u64 reg;
+ /* The location of this interrupt inside register reg, if packed. */
+ int packing;
+};
+
+/* Offsets to the wire interrupt handling registers */
+struct gasket_wire_interrupt_offsets {
+ u64 pending_bit_array;
+ u64 mask_array;
+};
+
+/*
+ * This enum is used to identify memory regions being part of the physical
+ * memory that belongs to a device.
+ */
+enum mappable_area_type {
+ PCI_BAR = 0, /* Default */
+ BUS_REGION, /* For SYSBUS devices, i.e. AXI etc... */
+ COHERENT_MEMORY
+};
+
+/*
+ * Metadata for each BAR mapping.
+ * This struct is used so as to track PCI memory, I/O space, AXI and coherent
+ * memory area... i.e. memory objects which can be referenced in the device's
+ * mmap function.
+ */
+struct gasket_bar_data {
+ /* Virtual base address. */
+ u8 __iomem *virt_base;
+
+ /* Physical base address. */
+ ulong phys_base;
+
+ /* Length of the mapping. */
+ ulong length_bytes;
+
+ /* Type of mappable area */
+ enum mappable_area_type type;
+};
+
+/* Maintains device open ownership data. */
+struct gasket_ownership {
+ /* 1 if the device is owned, 0 otherwise. */
+ int is_owned;
+
+ /* TGID of the owner. */
+ pid_t owner;
+
+ /* Count of current device opens in write mode. */
+ int write_open_count;
+};
+
+/* Page table modes of operation. */
+enum gasket_page_table_mode {
+ /* The page table is partitionable as normal, all simple by default. */
+ GASKET_PAGE_TABLE_MODE_NORMAL,
+
+ /* All entries are always simple. */
+ GASKET_PAGE_TABLE_MODE_SIMPLE,
+
+ /* All entries are always extended. No extended bit is used. */
+ GASKET_PAGE_TABLE_MODE_EXTENDED,
+};
+
+/* Page table configuration. One per table. */
+struct gasket_page_table_config {
+ /* The identifier/index of this page table. */
+ int id;
+
+ /* The operation mode of this page table. */
+ enum gasket_page_table_mode mode;
+
+ /* Total (first-level) entries in this page table. */
+ ulong total_entries;
+
+ /* Base register for the page table. */
+ int base_reg;
+
+ /*
+ * Register containing the extended page table. This value is unused in
+ * GASKET_PAGE_TABLE_MODE_SIMPLE and GASKET_PAGE_TABLE_MODE_EXTENDED
+ * modes.
+ */
+ int extended_reg;
+
+ /* The bit index indicating whether a PT entry is extended. */
+ int extended_bit;
+};
+
+/* Maintains information about a device node. */
+struct gasket_cdev_info {
+ /* The internal name of this device. */
+ char name[GASKET_NAME_MAX];
+
+ /* Device number. */
+ dev_t devt;
+
+ /* Kernel-internal device structure. */
+ struct device *device;
+
+ /* Character device for real. */
+ struct cdev cdev;
+
+ /* Flag indicating if cdev_add has been called for the devices. */
+ int cdev_added;
+
+ /* Pointer to the overall gasket_dev struct for this device. */
+ struct gasket_dev *gasket_dev_ptr;
+
+ /* Ownership data for the device in question. */
+ struct gasket_ownership ownership;
+};
+
+/* Describes the offset and length of mmapable device BAR regions. */
+struct gasket_mappable_region {
+ u64 start;
+ u64 length_bytes;
+};
+
+/* Describe the offset, size, and permissions for a device bar. */
+struct gasket_bar_desc {
+ /*
+ * The size of each PCI BAR range, in bytes. If a value is 0, that BAR
+ * will not be mapped into kernel space at all.
+ * For devices with 64 bit BARs, only elements 0, 2, and 4 should be
+ * populated, and 1, 3, and 5 should be set to 0.
+ * For example, for a device mapping 1M in each of the first two 64-bit
+ * BARs, this field would be set as { 0x100000, 0, 0x100000, 0, 0, 0 }
+ * (one number per bar_desc struct.)
+ */
+ u64 size;
+ /* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
+ * and can be or'd.) If set to GASKET_NOMAP, the bar will
+ * not be used for mmapping.
+ */
+ ulong permissions;
+ /* The memory address corresponding to the base of this bar, if used. */
+ u64 base;
+ /* The number of mappable regions in this bar. */
+ int num_mappable_regions;
+
+ /* The mappable subregions of this bar. */
+ const struct gasket_mappable_region *mappable_regions;
+
+ /* Type of mappable area */
+ enum mappable_area_type type;
+};
+
+/* Describes the offset, size, and permissions for a coherent buffer. */
+struct gasket_coherent_buffer_desc {
+ /* The size of the coherent buffer. */
+ u64 size;
+
+ /* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
+ * and can be or'd.) If set to GASKET_NOMAP, the bar will
+ * not be used for mmaping.
+ */
+ ulong permissions;
+
+ /* device side address. */
+ u64 base;
+};
+
+/* Coherent buffer structure. */
+struct gasket_coherent_buffer {
+ /* Virtual base address. */
+ u8 __iomem *virt_base;
+
+ /* Physical base address. */
+ ulong phys_base;
+
+ /* Length of the mapping. */
+ ulong length_bytes;
+};
+
+/* Description of Gasket-specific permissions in the mmap field. */
+enum gasket_mapping_options { GASKET_NOMAP = 0 };
+
+/* This struct represents an undefined bar that should never be mapped. */
+#define GASKET_UNUSED_BAR \
+ { \
+ 0, GASKET_NOMAP, 0, 0, NULL, 0 \
+ }
+
+/* Internal data for a Gasket device. See gasket_core.c for more information. */
+struct gasket_internal_desc;
+
+#define MAX_NUM_COHERENT_PAGES 16
+
+/*
+ * Device data for Gasket device instances.
+ *
+ * This structure contains the data required to manage a Gasket device.
+ */
+struct gasket_dev {
+ /* Pointer to the internal driver description for this device. */
+ struct gasket_internal_desc *internal_desc;
+
+ /* Device info */
+ struct device *dev;
+
+ /* PCI subsystem metadata. */
+ struct pci_dev *pci_dev;
+
+ /* This device's index into internal_desc->devs. */
+ int dev_idx;
+
+ /* The name of this device, as reported by the kernel. */
+ char kobj_name[GASKET_NAME_MAX];
+
+ /* Virtual address of mapped BAR memory range. */
+ struct gasket_bar_data bar_data[GASKET_NUM_BARS];
+
+ /* Coherent buffer. */
+ struct gasket_coherent_buffer coherent_buffer;
+
+ /* Number of page tables for this device. */
+ int num_page_tables;
+
+ /* Address translations. Page tables have a private implementation. */
+ struct gasket_page_table *page_table[GASKET_MAX_NUM_PAGE_TABLES];
+
+ /* Interrupt data for this device. */
+ struct gasket_interrupt_data *interrupt_data;
+
+ /* Status for this device - GASKET_STATUS_ALIVE or _DEAD. */
+ uint status;
+
+ /* Number of times this device has been reset. */
+ uint reset_count;
+
+ /* Dev information for the cdev node. */
+ struct gasket_cdev_info dev_info;
+
+ /* Hardware revision value for this device. */
+ int hardware_revision;
+
+ /*
+ * Device-specific data; allocated in gasket_driver_desc.add_dev_cb()
+ * and freed in gasket_driver_desc.remove_dev_cb().
+ */
+ void *cb_data;
+
+ /* Protects access to per-device data (i.e. this structure). */
+ struct mutex mutex;
+
+ /* cdev hash tracking/membership structure, Accel and legacy. */
+ /* Unused until Accel is upstreamed. */
+ struct hlist_node hlist_node;
+ struct hlist_node legacy_hlist_node;
+};
+
+/* Type of the ioctl handler callback. */
+typedef long (*gasket_ioctl_handler_cb_t)(struct file *file, uint cmd,
+ void __user *argp);
+/* Type of the ioctl permissions check callback. See below. */
+typedef int (*gasket_ioctl_permissions_cb_t)(struct file *filp, uint cmd,
+ void __user *argp);
+
+/*
+ * Device type descriptor.
+ *
+ * This structure contains device-specific data needed to identify and address a
+ * type of device to be administered via the Gasket generic driver.
+ *
+ * Device IDs are per-driver. In other words, two drivers using the Gasket
+ * framework will each have a distinct device 0 (for example).
+ */
+struct gasket_driver_desc {
+ /* The name of this device type. */
+ const char *name;
+
+ /* The name of this specific device model. */
+ const char *chip_model;
+
+ /* The version of the chip specified in chip_model. */
+ const char *chip_version;
+
+ /* The version of this driver: "1.0.0", "2.1.3", etc. */
+ const char *driver_version;
+
+ /*
+ * Non-zero if we should create "legacy" (device and device-class-
+ * specific) character devices and sysfs nodes.
+ */
+ /* Unused until Accel is upstreamed. */
+ int legacy_support;
+
+ /* Major and minor numbers identifying the device. */
+ int major, minor;
+
+ /* Module structure for this driver. */
+ struct module *module;
+
+ /* PCI ID table. */
+ const struct pci_device_id *pci_id_table;
+
+ /* The number of page tables handled by this driver. */
+ int num_page_tables;
+
+ /* The index of the bar containing the page tables. */
+ int page_table_bar_index;
+
+ /* Registers used to control each page table. */
+ const struct gasket_page_table_config *page_table_configs;
+
+ /* The bit index indicating whether a PT entry is extended. */
+ int page_table_extended_bit;
+
+ /*
+ * Legacy mmap address adjusment for legacy devices only. Should be 0
+ * for any new device.
+ */
+ ulong legacy_mmap_address_offset;
+
+ /* Set of 6 bar descriptions that describe all PCIe bars.
+ * Note that BUS/AXI devices (i.e. non PCI devices) use those.
+ */
+ struct gasket_bar_desc bar_descriptions[GASKET_NUM_BARS];
+
+ /*
+ * Coherent buffer description.
+ */
+ struct gasket_coherent_buffer_desc coherent_buffer_description;
+
+ /* Offset of wire interrupt registers. */
+ const struct gasket_wire_interrupt_offsets *wire_interrupt_offsets;
+
+ /* Interrupt type. (One of gasket_interrupt_type). */
+ int interrupt_type;
+
+ /* Index of the bar containing the interrupt registers to program. */
+ int interrupt_bar_index;
+
+ /* Number of interrupts in the gasket_interrupt_desc array */
+ int num_interrupts;
+
+ /* Description of the interrupts for this device. */
+ const struct gasket_interrupt_desc *interrupts;
+
+ /*
+ * If this device packs multiple interrupt->MSI-X mappings into a
+ * single register (i.e., "uses packed interrupts"), only a single bit
+ * width is supported for each interrupt mapping (unpacked/"full-width"
+ * interrupts are always supported). This value specifies that width. If
+ * packed interrupts are not used, this value is ignored.
+ */
+ int interrupt_pack_width;
+
+ /* Driver callback functions - all may be NULL */
+ /*
+ * add_dev_cb: Callback when a device is found.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should initialize the device-specific cb_data.
+ * Called when a device is found by the driver,
+ * before any BAR ranges have been mapped. If this call fails (returns
+ * nonzero), remove_dev_cb will be called.
+ *
+ */
+ int (*add_dev_cb)(struct gasket_dev *dev);
+
+ /*
+ * remove_dev_cb: Callback for when a device is removed from the system.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should free data allocated in add_dev_cb.
+ * Called immediately before a device is unregistered by the driver.
+ * All framework-managed resources will have been cleaned up by the time
+ * this callback is invoked (PCI BARs, character devices, ...).
+ */
+ int (*remove_dev_cb)(struct gasket_dev *dev);
+
+ /*
+ * device_open_cb: Callback for when a device node is opened in write
+ * mode.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should perform device-specific setup that needs to
+ * occur only once when a device is first opened.
+ */
+ int (*device_open_cb)(struct gasket_dev *dev);
+
+ /*
+ * device_release_cb: Callback when a device is closed.
+ * @gasket_dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback is called whenever a device node fd is closed, as
+ * opposed to device_close_cb, which is called when the _last_
+ * descriptor for an open file is closed. This call is intended to
+ * handle any per-user or per-fd cleanup.
+ */
+ int (*device_release_cb)(struct gasket_dev *gasket_dev,
+ struct file *file);
+
+ /*
+ * device_close_cb: Callback for when a device node is closed for the
+ * last time.
+ * @dev: The gasket_dev struct for this driver instance.
+ *
+ * This callback should perform device-specific cleanup that only
+ * needs to occur when the last reference to a device node is closed.
+ *
+ * This call is intended to handle and device-wide cleanup, as opposed
+ * to per-fd cleanup (which should be handled by device_release_cb).
+ */
+ int (*device_close_cb)(struct gasket_dev *dev);
+
+ /*
+ * enable_dev_cb: Callback immediately before enabling the device.
+ * @dev: Pointer to the gasket_dev struct for this driver instance.
+ *
+ * This callback is invoked after the device has been added and all BAR
+ * spaces mapped, immediately before registering and enabling the
+ * [character] device via cdev_add. If this call fails (returns
+ * nonzero), disable_dev_cb will be called.
+ *
+ * Note that cdev are initialized but not active
+ * (cdev_add has not yet been called) when this callback is invoked.
+ */
+ int (*enable_dev_cb)(struct gasket_dev *dev);
+
+ /*
+ * disable_dev_cb: Callback immediately after disabling the device.
+ * @dev: Pointer to the gasket_dev struct for this driver instance.
+ *
+ * Called during device shutdown, immediately after disabling device
+ * operations via cdev_del.
+ */
+ int (*disable_dev_cb)(struct gasket_dev *dev);
+
+ /*
+ * sysfs_setup_cb: Callback to set up driver-specific sysfs nodes.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called just before enable_dev_cb.
+ *
+ */
+ int (*sysfs_setup_cb)(struct gasket_dev *dev);
+
+ /*
+ * sysfs_cleanup_cb: Callback to clean up driver-specific sysfs nodes.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called just before disable_dev_cb.
+ *
+ */
+ int (*sysfs_cleanup_cb)(struct gasket_dev *dev);
+
+ /*
+ * get_mappable_regions_cb: Get descriptors of mappable device memory.
+ * @gasket_dev: Pointer to the struct gasket_dev for this device.
+ * @bar_index: BAR for which to retrieve memory ranges.
+ * @mappable_regions: Out-pointer to the list of mappable regions on the
+ * device/BAR for this process.
+ * @num_mappable_regions: Out-pointer for the size of mappable_regions.
+ *
+ * Called when handling mmap(), this callback is used to determine which
+ * regions of device memory may be mapped by the current process. This
+ * information is then compared to mmap request to determine which
+ * regions to actually map.
+ */
+ int (*get_mappable_regions_cb)(struct gasket_dev *gasket_dev,
+ int bar_index,
+ struct gasket_mappable_region **mappable_regions,
+ int *num_mappable_regions);
+
+ /*
+ * ioctl_permissions_cb: Check permissions for generic ioctls.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @arg: ioctl-specific data pointer.
+ *
+ * Returns 1 if the ioctl may be executed, 0 otherwise. If this callback
+ * isn't specified a default routine will be used, that only allows the
+ * original device opener (i.e, the "owner") to execute state-affecting
+ * ioctls.
+ */
+ gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
+
+ /*
+ * ioctl_handler_cb: Callback to handle device-specific ioctls.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @arg: ioctl-specific data pointer.
+ *
+ * Invoked whenever an ioctl is called that the generic Gasket
+ * framework doesn't support. If no cb is registered, unknown ioctls
+ * return -EINVAL. Should return an error status (either -EINVAL or
+ * the error result of the ioctl being handled).
+ */
+ gasket_ioctl_handler_cb_t ioctl_handler_cb;
+
+ /*
+ * device_status_cb: Callback to determine device health.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called to determine if the device is healthy or not. Should return
+ * a member of the gasket_status_type enum.
+ *
+ */
+ int (*device_status_cb)(struct gasket_dev *dev);
+
+ /*
+ * hardware_revision_cb: Get the device's hardware revision.
+ * @dev: Pointer to the gasket_dev struct for this device.
+ *
+ * Called to determine the reported rev of the physical hardware.
+ * Revision should be >0. A negative return value is an error.
+ */
+ int (*hardware_revision_cb)(struct gasket_dev *dev);
+
+ /*
+ * device_reset_cb: Reset the hardware in question.
+ * @dev: Pointer to the gasket_dev structure for this device.
+ *
+ * Called by reset ioctls. This function should not
+ * lock the gasket_dev mutex. It should return 0 on success
+ * and an error on failure.
+ */
+ int (*device_reset_cb)(struct gasket_dev *dev);
+};
+
+/*
+ * Register the specified device type with the framework.
+ * @desc: Populated/initialized device type descriptor.
+ *
+ * This function does _not_ take ownership of desc; the underlying struct must
+ * exist until the matching call to gasket_unregister_device.
+ * This function should be called from your driver's module_init function.
+ */
+int gasket_register_device(const struct gasket_driver_desc *desc);
+
+/*
+ * Remove the specified device type from the framework.
+ * @desc: Descriptor for the device type to unregister; it should have been
+ * passed to gasket_register_device in a previous call.
+ *
+ * This function should be called from your driver's module_exit function.
+ */
+void gasket_unregister_device(const struct gasket_driver_desc *desc);
+
+/*
+ * Reset the Gasket device.
+ * @gasket_dev: Gasket device struct.
+ *
+ * Calls device_reset_cb. Returns 0 on success and an error code othewrise.
+ * gasket_reset_nolock will not lock the mutex, gasket_reset will.
+ *
+ */
+int gasket_reset(struct gasket_dev *gasket_dev);
+int gasket_reset_nolock(struct gasket_dev *gasket_dev);
+
+/*
+ * Memory management functions. These will likely be spun off into their own
+ * file in the future.
+ */
+
+/* Unmaps the specified mappable region from a VMA. */
+int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
+ struct vm_area_struct *vma,
+ const struct gasket_mappable_region *map_region);
+
+/*
+ * Get the ioctl permissions callback.
+ * @gasket_dev: Gasket device structure.
+ */
+gasket_ioctl_permissions_cb_t
+gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev);
+
+/**
+ * Lookup a name by number in a num_name table.
+ * @num: Number to lookup.
+ * @table: Array of num_name structures, the table for the lookup.
+ *
+ */
+const char *gasket_num_name_lookup(uint num,
+ const struct gasket_num_name *table);
+
+/* Handy inlines */
+static inline ulong gasket_dev_read_64(struct gasket_dev *gasket_dev, int bar,
+ ulong location)
+{
+ return readq(&gasket_dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_dev_write_64(struct gasket_dev *dev, u64 value,
+ int bar, ulong location)
+{
+ writeq(value, &dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_dev_write_32(struct gasket_dev *dev, u32 value,
+ int bar, ulong location)
+{
+ writel(value, &dev->bar_data[bar].virt_base[location]);
+}
+
+static inline u32 gasket_dev_read_32(struct gasket_dev *dev, int bar,
+ ulong location)
+{
+ return readl(&dev->bar_data[bar].virt_base[location]);
+}
+
+static inline void gasket_read_modify_write_64(struct gasket_dev *dev, int bar,
+ ulong location, u64 value,
+ u64 mask_width, u64 mask_shift)
+{
+ u64 mask, tmp;
+
+ tmp = gasket_dev_read_64(dev, bar, location);
+ mask = ((1ULL << mask_width) - 1) << mask_shift;
+ tmp = (tmp & ~mask) | (value << mask_shift);
+ gasket_dev_write_64(dev, tmp, bar, location);
+}
+
+static inline void gasket_read_modify_write_32(struct gasket_dev *dev, int bar,
+ ulong location, u32 value,
+ u32 mask_width, u32 mask_shift)
+{
+ u32 mask, tmp;
+
+ tmp = gasket_dev_read_32(dev, bar, location);
+ mask = ((1 << mask_width) - 1) << mask_shift;
+ tmp = (tmp & ~mask) | (value << mask_shift);
+ gasket_dev_write_32(dev, tmp, bar, location);
+}
+
+/* Get the Gasket driver structure for a given device. */
+const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev);
+
+/* Get the device structure for a given device. */
+struct device *gasket_get_device(struct gasket_dev *dev);
+
+/* Helper function, Asynchronous waits on a given set of bits. */
+int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
+ u64 offset, u64 mask, u64 val,
+ uint max_retries, u64 delay_ms);
+
+#endif /* __GASKET_CORE_H__ */
diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
new file mode 100644
index 00000000000000..09c3d0747af615
--- /dev/null
+++ b/drivers/staging/gasket/gasket_interrupt.c
@@ -0,0 +1,550 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+
+#include "gasket_interrupt.h"
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include "gasket_sysfs.h"
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/printk.h>
+#include <linux/version.h>
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_interrupt.h>
+#else
+#define trace_gasket_interrupt_event(x, ...)
+#endif
+/* Retry attempts if the requested number of interrupts aren't available. */
+#define MSIX_RETRY_COUNT 3
+
+/* Instance interrupt management data. */
+struct gasket_interrupt_data {
+ /* The name associated with this interrupt data. */
+ const char *name;
+
+ /* Interrupt type. See gasket_interrupt_type in gasket_core.h */
+ int type;
+
+ /* The PCI device [if any] associated with the owning device. */
+ struct pci_dev *pci_dev;
+
+ /* Set to 1 if MSI-X has successfully been configred, 0 otherwise. */
+ int msix_configured;
+
+ /* The number of interrupts requested by the owning device. */
+ int num_interrupts;
+
+ /* A pointer to the interrupt descriptor struct for this device. */
+ const struct gasket_interrupt_desc *interrupts;
+
+ /* The index of the bar into which interrupts should be mapped. */
+ int interrupt_bar_index;
+
+ /* The width of a single interrupt in a packed interrupt register. */
+ int pack_width;
+
+ /* offset of wire interrupt registers */
+ const struct gasket_wire_interrupt_offsets *wire_interrupt_offsets;
+
+ /*
+ * Design-wise, these elements should be bundled together, but
+ * pci_enable_msix's interface requires that they be managed
+ * individually (requires array of struct msix_entry).
+ */
+
+ /* The number of successfully configured interrupts. */
+ int num_configured;
+
+ /* The MSI-X data for each requested/configured interrupt. */
+ struct msix_entry *msix_entries;
+
+ /* The eventfd "callback" data for each interrupt. */
+ struct eventfd_ctx **eventfd_ctxs;
+
+ /* The number of times each interrupt has been called. */
+ ulong *interrupt_counts;
+
+ /* Linux IRQ number. */
+ int irq;
+};
+
+/* Structures to display interrupt counts in sysfs. */
+enum interrupt_sysfs_attribute_type {
+ ATTR_INTERRUPT_COUNTS,
+};
+
+/* Set up device registers for interrupt handling. */
+static void gasket_interrupt_setup(struct gasket_dev *gasket_dev)
+{
+ int i;
+ int pack_shift;
+ ulong mask;
+ ulong value;
+ struct gasket_interrupt_data *interrupt_data =
+ gasket_dev->interrupt_data;
+
+ if (!interrupt_data) {
+ dev_dbg(gasket_dev->dev, "Interrupt data is not initialized\n");
+ return;
+ }
+
+ dev_dbg(gasket_dev->dev, "Running interrupt setup\n");
+
+ if (interrupt_data->type == PLATFORM_WIRE ||
+ interrupt_data->type == PCI_MSI) {
+ /* Nothing needs to be done for platform or PCI devices. */
+ return;
+ }
+
+ if (interrupt_data->type != PCI_MSIX) {
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ return;
+ }
+
+ /* Setup the MSIX table. */
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ /*
+ * If the interrupt is not packed, we can write the index into
+ * the register directly. If not, we need to deal with a read-
+ * modify-write and shift based on the packing index.
+ */
+ dev_dbg(gasket_dev->dev,
+ "Setting up interrupt index %d with index 0x%llx and "
+ "packing %d\n",
+ interrupt_data->interrupts[i].index,
+ interrupt_data->interrupts[i].reg,
+ interrupt_data->interrupts[i].packing);
+ if (interrupt_data->interrupts[i].packing == UNPACKED) {
+ value = interrupt_data->interrupts[i].index;
+ } else {
+ switch (interrupt_data->interrupts[i].packing) {
+ case PACK_0:
+ pack_shift = 0;
+ break;
+ case PACK_1:
+ pack_shift = interrupt_data->pack_width;
+ break;
+ case PACK_2:
+ pack_shift = 2 * interrupt_data->pack_width;
+ break;
+ case PACK_3:
+ pack_shift = 3 * interrupt_data->pack_width;
+ break;
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Found interrupt description with "
+ "unknown enum %d\n",
+ interrupt_data->interrupts[i].packing);
+ return;
+ }
+
+ mask = ~(0xFFFF << pack_shift);
+ value = gasket_dev_read_64(gasket_dev,
+ interrupt_data->interrupt_bar_index,
+ interrupt_data->interrupts[i].reg);
+ value &= mask;
+ value |= interrupt_data->interrupts[i].index
+ << pack_shift;
+ }
+ gasket_dev_write_64(gasket_dev, value,
+ interrupt_data->interrupt_bar_index,
+ interrupt_data->interrupts[i].reg);
+ }
+}
+
+static irqreturn_t gasket_msix_interrupt_handler(int irq, void *dev_id)
+{
+ struct eventfd_ctx *ctx;
+ struct gasket_interrupt_data *interrupt_data = dev_id;
+ int interrupt = -1;
+ int i;
+
+ /* If this linear lookup is a problem, we can maintain a map/hash. */
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ if (interrupt_data->msix_entries[i].vector == irq) {
+ interrupt = interrupt_data->msix_entries[i].entry;
+ break;
+ }
+ }
+ if (interrupt == -1) {
+ pr_err("Received unknown irq %d\n", irq);
+ return IRQ_HANDLED;
+ }
+ trace_gasket_interrupt_event(interrupt_data->name, interrupt);
+
+ ctx = interrupt_data->eventfd_ctxs[interrupt];
+ if (ctx)
+ eventfd_signal(ctx, 1);
+
+ ++(interrupt_data->interrupt_counts[interrupt]);
+
+ return IRQ_HANDLED;
+}
+
+static int
+gasket_interrupt_msix_init(struct gasket_interrupt_data *interrupt_data)
+{
+ int ret = 1;
+ int i;
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ interrupt_data->msix_entries[i].entry = i;
+ interrupt_data->msix_entries[i].vector = 0;
+ interrupt_data->eventfd_ctxs[i] = NULL;
+ }
+
+ /* Retry MSIX_RETRY_COUNT times if not enough IRQs are available. */
+ for (i = 0; i < MSIX_RETRY_COUNT && ret > 0; i++)
+ ret = pci_enable_msix_exact(interrupt_data->pci_dev,
+ interrupt_data->msix_entries,
+ interrupt_data->num_interrupts);
+
+ if (ret)
+ return ret > 0 ? -EBUSY : ret;
+ interrupt_data->msix_configured = 1;
+
+ for (i = 0; i < interrupt_data->num_interrupts; i++) {
+ ret = request_irq(interrupt_data->msix_entries[i].vector,
+ gasket_msix_interrupt_handler, 0,
+ interrupt_data->name, interrupt_data);
+
+ if (ret) {
+ dev_err(&interrupt_data->pci_dev->dev,
+ "Cannot get IRQ for interrupt %d, vector %d; "
+ "%d\n",
+ i, interrupt_data->msix_entries[i].vector, ret);
+ return ret;
+ }
+
+ interrupt_data->num_configured++;
+ }
+
+ return 0;
+}
+
+/*
+ * On QCM DragonBoard, we exit gasket_interrupt_msix_init() and kernel interrupt
+ * setup code with MSIX vectors masked. This is wrong because nothing else in
+ * the driver will normally touch the MSIX vectors.
+ *
+ * As a temporary hack, force unmasking there.
+ *
+ * TODO: Figure out why QCM kernel doesn't unmask the MSIX vectors, after
+ * gasket_interrupt_msix_init(), and remove this code.
+ */
+static void force_msix_interrupt_unmasking(struct gasket_dev *gasket_dev)
+{
+ int i;
+#define MSIX_VECTOR_SIZE 16
+#define MSIX_MASK_BIT_OFFSET 12
+#define APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE 0x46800
+ for (i = 0; i < gasket_dev->interrupt_data->num_configured; i++) {
+ /* Check if the MSIX vector is unmasked */
+ ulong location = APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE +
+ MSIX_MASK_BIT_OFFSET + i * MSIX_VECTOR_SIZE;
+ u32 mask =
+ gasket_dev_read_32(gasket_dev,
+ gasket_dev->interrupt_data->interrupt_bar_index,
+ location);
+ if (!(mask & 1))
+ continue;
+ /* Unmask the msix vector (clear 32 bits) */
+ gasket_dev_write_32(gasket_dev, 0,
+ gasket_dev->interrupt_data->interrupt_bar_index,
+ location);
+ }
+#undef MSIX_VECTOR_SIZE
+#undef MSIX_MASK_BIT_OFFSET
+#undef APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE
+}
+
+static ssize_t interrupt_sysfs_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret;
+ ssize_t written = 0, total_written = 0;
+ struct gasket_interrupt_data *interrupt_data;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+ enum interrupt_sysfs_attribute_type sysfs_type;
+
+ gasket_dev = gasket_sysfs_get_device_data(device);
+ if (!gasket_dev) {
+ dev_dbg(device, "No sysfs mapping found for device\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ dev_dbg(device, "No sysfs attr data found for device\n");
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return 0;
+ }
+
+ sysfs_type = (enum interrupt_sysfs_attribute_type)
+ gasket_attr->data.attr_type;
+ interrupt_data = gasket_dev->interrupt_data;
+ switch (sysfs_type) {
+ case ATTR_INTERRUPT_COUNTS:
+ for (i = 0; i < interrupt_data->num_interrupts; ++i) {
+ written =
+ scnprintf(buf, PAGE_SIZE - total_written,
+ "0x%02x: %ld\n", i,
+ interrupt_data->interrupt_counts[i]);
+ total_written += written;
+ buf += written;
+ }
+ ret = total_written;
+ break;
+ default:
+ dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
+ attr->attr.name);
+ ret = 0;
+ break;
+ }
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ gasket_sysfs_put_device_data(device, gasket_dev);
+ return ret;
+}
+
+static struct gasket_sysfs_attribute interrupt_sysfs_attrs[] = {
+ GASKET_SYSFS_RO(interrupt_counts, interrupt_sysfs_show,
+ ATTR_INTERRUPT_COUNTS),
+ GASKET_END_OF_ATTR_ARRAY,
+};
+
+int gasket_interrupt_init(struct gasket_dev *gasket_dev, const char *name,
+ int type,
+ const struct gasket_interrupt_desc *interrupts,
+ int num_interrupts, int pack_width, int bar_index,
+ const struct gasket_wire_interrupt_offsets *wire_int_offsets)
+{
+ int ret;
+ struct gasket_interrupt_data *interrupt_data;
+
+ interrupt_data = kzalloc(sizeof(struct gasket_interrupt_data),
+ GFP_KERNEL);
+ if (!interrupt_data)
+ return -ENOMEM;
+ gasket_dev->interrupt_data = interrupt_data;
+ interrupt_data->name = name;
+ interrupt_data->type = type;
+ interrupt_data->pci_dev = gasket_dev->pci_dev;
+ interrupt_data->num_interrupts = num_interrupts;
+ interrupt_data->interrupts = interrupts;
+ interrupt_data->interrupt_bar_index = bar_index;
+ interrupt_data->pack_width = pack_width;
+ interrupt_data->num_configured = 0;
+ interrupt_data->wire_interrupt_offsets = wire_int_offsets;
+
+ /* Allocate all dynamic structures. */
+ interrupt_data->msix_entries = kcalloc(num_interrupts,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!interrupt_data->msix_entries) {
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ interrupt_data->eventfd_ctxs = kcalloc(num_interrupts,
+ sizeof(struct eventfd_ctx *),
+ GFP_KERNEL);
+ if (!interrupt_data->eventfd_ctxs) {
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ interrupt_data->interrupt_counts = kcalloc(num_interrupts,
+ sizeof(ulong),
+ GFP_KERNEL);
+ if (!interrupt_data->interrupt_counts) {
+ kfree(interrupt_data->eventfd_ctxs);
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ return -ENOMEM;
+ }
+
+ switch (interrupt_data->type) {
+ case PCI_MSIX:
+ ret = gasket_interrupt_msix_init(interrupt_data);
+ if (ret)
+ break;
+ force_msix_interrupt_unmasking(gasket_dev);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_err(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ ret = -EINVAL;
+ };
+
+ if (ret) {
+ /* Failing to setup interrupts will cause the device to report
+ * GASKET_STATUS_LAMED. But it is not fatal.
+ */
+ dev_warn(gasket_dev->dev,
+ "Couldn't initialize interrupts: %d\n", ret);
+ return 0;
+ }
+
+ gasket_interrupt_setup(gasket_dev);
+ gasket_sysfs_create_entries(gasket_dev->dev_info.device,
+ interrupt_sysfs_attrs);
+
+ return 0;
+}
+
+static void
+gasket_interrupt_msix_cleanup(struct gasket_interrupt_data *interrupt_data)
+{
+ int i;
+
+ for (i = 0; i < interrupt_data->num_configured; i++)
+ free_irq(interrupt_data->msix_entries[i].vector,
+ interrupt_data);
+ interrupt_data->num_configured = 0;
+
+ if (interrupt_data->msix_configured)
+ pci_disable_msix(interrupt_data->pci_dev);
+ interrupt_data->msix_configured = 0;
+}
+
+int gasket_interrupt_reinit(struct gasket_dev *gasket_dev)
+{
+ int ret;
+
+ if (!gasket_dev->interrupt_data) {
+ dev_dbg(gasket_dev->dev,
+ "Attempted to reinit uninitialized interrupt data\n");
+ return -EINVAL;
+ }
+
+ switch (gasket_dev->interrupt_data->type) {
+ case PCI_MSIX:
+ gasket_interrupt_msix_cleanup(gasket_dev->interrupt_data);
+ ret = gasket_interrupt_msix_init(gasket_dev->interrupt_data);
+ if (ret)
+ break;
+ force_msix_interrupt_unmasking(gasket_dev);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ gasket_dev->interrupt_data->type);
+ ret = -EINVAL;
+ };
+
+ if (ret) {
+ /* Failing to setup MSIx will cause the device
+ * to report GASKET_STATUS_LAMED, but is not fatal.
+ */
+ dev_warn(gasket_dev->dev, "Couldn't init msix: %d\n", ret);
+ return 0;
+ }
+
+ gasket_interrupt_setup(gasket_dev);
+
+ return 0;
+}
+
+/* See gasket_interrupt.h for description. */
+int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev)
+{
+ dev_dbg(gasket_dev->dev, "Clearing interrupt counts\n");
+ memset(gasket_dev->interrupt_data->interrupt_counts, 0,
+ gasket_dev->interrupt_data->num_interrupts *
+ sizeof(*gasket_dev->interrupt_data->interrupt_counts));
+ return 0;
+}
+
+/* See gasket_interrupt.h for description. */
+void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev)
+{
+ struct gasket_interrupt_data *interrupt_data =
+ gasket_dev->interrupt_data;
+ /*
+ * It is possible to get an error code from gasket_interrupt_init
+ * before interrupt_data has been allocated, so check it.
+ */
+ if (!interrupt_data)
+ return;
+
+ switch (interrupt_data->type) {
+ case PCI_MSIX:
+ gasket_interrupt_msix_cleanup(interrupt_data);
+ break;
+
+ case PCI_MSI:
+ case PLATFORM_WIRE:
+ default:
+ dev_dbg(gasket_dev->dev,
+ "Cannot handle unsupported interrupt type %d\n",
+ interrupt_data->type);
+ };
+
+ kfree(interrupt_data->interrupt_counts);
+ kfree(interrupt_data->eventfd_ctxs);
+ kfree(interrupt_data->msix_entries);
+ kfree(interrupt_data);
+ gasket_dev->interrupt_data = NULL;
+}
+
+int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
+{
+ if (!gasket_dev->interrupt_data) {
+ dev_dbg(gasket_dev->dev, "Interrupt data is null\n");
+ return GASKET_STATUS_DEAD;
+ }
+
+ if (!gasket_dev->interrupt_data->msix_configured) {
+ dev_dbg(gasket_dev->dev, "Interrupt not initialized\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ if (gasket_dev->interrupt_data->num_configured !=
+ gasket_dev->interrupt_data->num_interrupts) {
+ dev_dbg(gasket_dev->dev,
+ "Not all interrupts were configured\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt, int event_fd)
+{
+ struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd);
+
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
+ return -EINVAL;
+
+ interrupt_data->eventfd_ctxs[interrupt] = ctx;
+ return 0;
+}
+
+int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt)
+{
+ if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
+ return -EINVAL;
+
+ interrupt_data->eventfd_ctxs[interrupt] = NULL;
+ return 0;
+}
diff --git a/drivers/staging/gasket/gasket_interrupt.h b/drivers/staging/gasket/gasket_interrupt.h
new file mode 100644
index 00000000000000..835af439e96a9c
--- /dev/null
+++ b/drivers/staging/gasket/gasket_interrupt.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket common interrupt module. Defines functions for enabling
+ * eventfd-triggered interrupts between a Gasket device and a host process.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+#ifndef __GASKET_INTERRUPT_H__
+#define __GASKET_INTERRUPT_H__
+
+#include <linux/eventfd.h>
+#include <linux/pci.h>
+
+#include "gasket_core.h"
+
+/* Note that this currently assumes that device interrupts are a dense set,
+ * numbered from 0 - (num_interrupts - 1). Should this have to change, these
+ * APIs will have to be updated.
+ */
+
+/* Opaque type used to hold interrupt subsystem data. */
+struct gasket_interrupt_data;
+
+/*
+ * Initialize the interrupt module.
+ * @gasket_dev: The Gasket device structure for the device to be initted.
+ * @type: Type of the interrupt. (See gasket_interrupt_type).
+ * @name: The name to associate with these interrupts.
+ * @interrupts: An array of all interrupt descriptions for this device.
+ * @num_interrupts: The length of the @interrupts array.
+ * @pack_width: The width, in bits, of a single field in a packed interrupt reg.
+ * @bar_index: The bar containing all interrupt registers.
+ *
+ * Allocates and initializes data to track interrupt state for a device.
+ * After this call, no interrupts will be configured/delivered; call
+ * gasket_interrupt_set_vector[_packed] to associate each interrupt with an
+ * __iomem location, then gasket_interrupt_set_eventfd to associate an eventfd
+ * with an interrupt.
+ *
+ * If num_interrupts interrupts are not available, this call will return a
+ * negative error code. In that case, gasket_interrupt_cleanup should still be
+ * called. Returns 0 on success (which can include a device where interrupts
+ * are not possible to set up, but is otherwise OK; that device will report
+ * status LAMED.)
+ */
+int gasket_interrupt_init(struct gasket_dev *gasket_dev, const char *name,
+ int type,
+ const struct gasket_interrupt_desc *interrupts,
+ int num_interrupts, int pack_width, int bar_index,
+ const struct gasket_wire_interrupt_offsets *wire_int_offsets);
+
+/*
+ * Clean up a device's interrupt structure.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Cleans up the device's interrupts and deallocates data.
+ */
+void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev);
+
+/*
+ * Clean up and re-initialize the MSI-x subsystem.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Performs a teardown of the MSI-x subsystem and re-initializes it. Does not
+ * free the underlying data structures. Returns 0 on success and an error code
+ * on error.
+ */
+int gasket_interrupt_reinit(struct gasket_dev *gasket_dev);
+
+/*
+ * Reset the counts stored in the interrupt subsystem.
+ * @gasket_dev: The Gasket information structure for this device.
+ *
+ * Sets the counts of all interrupts in the subsystem to 0.
+ */
+int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev);
+
+/*
+ * Associates an eventfd with a device interrupt.
+ * @data: Pointer to device interrupt data.
+ * @interrupt: The device interrupt to configure.
+ * @event_fd: The eventfd to associate with the interrupt.
+ *
+ * Prepares the host to receive notification of device interrupts by associating
+ * event_fd with interrupt. Upon receipt of a device interrupt, event_fd will be
+ * signaled, after successful configuration.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt, int event_fd);
+
+/*
+ * Removes an interrupt-eventfd association.
+ * @data: Pointer to device interrupt data.
+ * @interrupt: The device interrupt to de-associate.
+ *
+ * Removes any eventfd associated with the specified interrupt, if any.
+ */
+int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
+ int interrupt);
+
+/*
+ * The below functions exist for backwards compatibility.
+ * No new uses should be written.
+ */
+/*
+ * Get the health of the interrupt subsystem.
+ * @gasket_dev: The Gasket device struct.
+ *
+ * Returns DEAD if not set up, LAMED if initialization failed, and ALIVE
+ * otherwise.
+ */
+
+int gasket_interrupt_system_status(struct gasket_dev *gasket_dev);
+
+#endif
diff --git a/drivers/staging/gasket/gasket_ioctl.c b/drivers/staging/gasket/gasket_ioctl.c
new file mode 100644
index 00000000000000..0ca48e688818f3
--- /dev/null
+++ b/drivers/staging/gasket/gasket_ioctl.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+#include "gasket.h"
+#include "gasket_ioctl.h"
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include "gasket_interrupt.h"
+#include "gasket_page_table.h"
+#include <linux/compiler.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#ifdef GASKET_KERNEL_TRACE_SUPPORT
+#define CREATE_TRACE_POINTS
+#include <trace/events/gasket_ioctl.h>
+#else
+#define trace_gasket_ioctl_entry(x, ...)
+#define trace_gasket_ioctl_exit(x)
+#define trace_gasket_ioctl_integer_data(x)
+#define trace_gasket_ioctl_eventfd_data(x, ...)
+#define trace_gasket_ioctl_page_table_data(x, ...)
+#define trace_gasket_ioctl_config_coherent_allocator(x, ...)
+#endif
+
+/* Associate an eventfd with an interrupt. */
+static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
+ struct gasket_interrupt_eventfd __user *argp)
+{
+ struct gasket_interrupt_eventfd die;
+
+ if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
+
+ return gasket_interrupt_set_eventfd(
+ gasket_dev->interrupt_data, die.interrupt, die.event_fd);
+}
+
+/* Read the size of the page table. */
+static int gasket_read_page_table_size(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret = 0;
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ ibuf.size = gasket_page_table_num_entries(
+ gasket_dev->page_table[ibuf.page_table_index]);
+
+ trace_gasket_ioctl_page_table_data(
+ ibuf.page_table_index, ibuf.size, ibuf.host_address,
+ ibuf.device_address);
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Read the size of the simple page table. */
+static int gasket_read_simple_page_table_size(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret = 0;
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ ibuf.size =
+ gasket_page_table_num_simple_entries(gasket_dev->page_table[ibuf.page_table_index]);
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Set the boundary between the simple and extended page tables. */
+static int gasket_partition_page_table(
+ struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ int ret;
+ struct gasket_page_table_ioctl ibuf;
+ uint max_page_table_size;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(
+ ibuf.page_table_index, ibuf.size, ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+ max_page_table_size = gasket_page_table_max_size(
+ gasket_dev->page_table[ibuf.page_table_index]);
+
+ if (ibuf.size > max_page_table_size) {
+ dev_dbg(gasket_dev->dev,
+ "Partition request 0x%llx too large, max is 0x%x\n",
+ ibuf.size, max_page_table_size);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gasket_dev->mutex);
+
+ ret = gasket_page_table_partition(
+ gasket_dev->page_table[ibuf.page_table_index], ibuf.size);
+ mutex_unlock(&gasket_dev->mutex);
+
+ return ret;
+}
+
+/* Map a userspace buffer to a device virtual address. */
+static int gasket_map_buffers(struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (gasket_page_table_are_addrs_bad(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.host_address,
+ ibuf.device_address, ibuf.size))
+ return -EINVAL;
+
+ return gasket_page_table_map(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.host_address, ibuf.device_address,
+ ibuf.size / PAGE_SIZE);
+}
+
+/* Unmap a userspace buffer from a device virtual address. */
+static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
+ struct gasket_page_table_ioctl __user *argp)
+{
+ struct gasket_page_table_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
+ ibuf.host_address,
+ ibuf.device_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (gasket_page_table_is_dev_addr_bad(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.device_address, ibuf.size))
+ return -EINVAL;
+
+ gasket_page_table_unmap(gasket_dev->page_table[ibuf.page_table_index],
+ ibuf.device_address, ibuf.size / PAGE_SIZE);
+
+ return 0;
+}
+
+/*
+ * Reserve structures for coherent allocation, and allocate or free the
+ * corresponding memory.
+ */
+static int gasket_config_coherent_allocator(
+ struct gasket_dev *gasket_dev,
+ struct gasket_coherent_alloc_config_ioctl __user *argp)
+{
+ int ret;
+ struct gasket_coherent_alloc_config_ioctl ibuf;
+
+ if (copy_from_user(&ibuf, argp,
+ sizeof(struct gasket_coherent_alloc_config_ioctl)))
+ return -EFAULT;
+
+ trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
+ ibuf.dma_address);
+
+ if (ibuf.page_table_index >= gasket_dev->num_page_tables)
+ return -EFAULT;
+
+ if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
+ return -ENOMEM;
+
+ if (ibuf.enable == 0) {
+ ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
+ ibuf.dma_address,
+ ibuf.page_table_index);
+ } else {
+ ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
+ &ibuf.dma_address,
+ ibuf.page_table_index);
+ }
+ if (ret)
+ return ret;
+ if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Check permissions for Gasket ioctls. */
+static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
+{
+ bool alive;
+ bool read, write;
+ struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
+
+ alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
+ if (!alive)
+ dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
+ __func__, alive, gasket_dev->status);
+
+ read = !!(filp->f_mode & FMODE_READ);
+ write = !!(filp->f_mode & FMODE_WRITE);
+
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ return write;
+
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ return read;
+
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ return alive && write;
+
+ case GASKET_IOCTL_MAP_BUFFER:
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ return alive && write;
+
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ case GASKET_IOCTL_SET_EVENTFD:
+ return alive && write;
+ }
+
+ return false; /* unknown permissions */
+}
+
+/*
+ * standard ioctl dispatch function.
+ * @filp: File structure pointer describing this node usage session.
+ * @cmd: ioctl number to handle.
+ * @argp: ioctl-specific data pointer.
+ *
+ * Standard ioctl dispatcher; forwards operations to individual handlers.
+ */
+long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
+{
+ struct gasket_dev *gasket_dev;
+ unsigned long arg = (unsigned long)argp;
+ gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
+ int retval;
+
+ gasket_dev = (struct gasket_dev *)filp->private_data;
+ trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
+
+ ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
+ if (ioctl_permissions_cb) {
+ retval = ioctl_permissions_cb(filp, cmd, argp);
+ if (retval < 0) {
+ trace_gasket_ioctl_exit(retval);
+ return retval;
+ } else if (retval == 0) {
+ trace_gasket_ioctl_exit(-EPERM);
+ return -EPERM;
+ }
+ } else if (!gasket_ioctl_check_permissions(filp, cmd)) {
+ trace_gasket_ioctl_exit(-EPERM);
+ dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
+ return -EPERM;
+ }
+
+ /* Tracing happens in this switch statement for all ioctls with
+ * an integer argrument, but ioctls with a struct argument
+ * that needs copying and decoding, that tracing is done within
+ * the handler call.
+ */
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ retval = gasket_reset(gasket_dev);
+ break;
+ case GASKET_IOCTL_SET_EVENTFD:
+ retval = gasket_set_event_fd(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ trace_gasket_ioctl_integer_data(arg);
+ retval =
+ gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
+ (int)arg);
+ break;
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ trace_gasket_ioctl_integer_data(arg);
+ retval = gasket_partition_page_table(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
+ if (copy_to_user(argp, &gasket_dev->num_page_tables,
+ sizeof(uint64_t)))
+ retval = -EFAULT;
+ else
+ retval = 0;
+ break;
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ retval = gasket_read_page_table_size(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ retval = gasket_read_simple_page_table_size(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_MAP_BUFFER:
+ retval = gasket_map_buffers(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ retval = gasket_config_coherent_allocator(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ retval = gasket_unmap_buffers(gasket_dev, argp);
+ break;
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ /* Clear interrupt counts doesn't take an arg, so use 0. */
+ trace_gasket_ioctl_integer_data(0);
+ retval = gasket_interrupt_reset_counts(gasket_dev);
+ break;
+ default:
+ /* If we don't understand the ioctl, the best we can do is trace
+ * the arg.
+ */
+ trace_gasket_ioctl_integer_data(arg);
+ dev_dbg(gasket_dev->dev,
+ "Unknown ioctl cmd=0x%x not caught by "
+ "gasket_is_supported_ioctl\n",
+ cmd);
+ retval = -EINVAL;
+ break;
+ }
+
+ trace_gasket_ioctl_exit(retval);
+ return retval;
+}
+
+/*
+ * Determines if an ioctl is part of the standard Gasket framework.
+ * @cmd: The ioctl number to handle.
+ *
+ * Returns 1 if the ioctl is supported and 0 otherwise.
+ */
+long gasket_is_supported_ioctl(uint cmd)
+{
+ switch (cmd) {
+ case GASKET_IOCTL_RESET:
+ case GASKET_IOCTL_SET_EVENTFD:
+ case GASKET_IOCTL_CLEAR_EVENTFD:
+ case GASKET_IOCTL_PARTITION_PAGE_TABLE:
+ case GASKET_IOCTL_NUMBER_PAGE_TABLES:
+ case GASKET_IOCTL_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
+ case GASKET_IOCTL_MAP_BUFFER:
+ case GASKET_IOCTL_UNMAP_BUFFER:
+ case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
+ case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
+ return 1;
+ default:
+ return 0;
+ }
+}
diff --git a/drivers/staging/gasket/gasket_ioctl.h b/drivers/staging/gasket/gasket_ioctl.h
new file mode 100644
index 00000000000000..51f468c77f0411
--- /dev/null
+++ b/drivers/staging/gasket/gasket_ioctl.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Google, Inc. */
+#ifndef __GASKET_IOCTL_H__
+#define __GASKET_IOCTL_H__
+
+#include "gasket_core.h"
+
+#include <linux/compiler.h>
+
+/*
+ * Handle Gasket common ioctls.
+ * @filp: Pointer to the ioctl's file.
+ * @cmd: Ioctl command.
+ * @arg: Ioctl argument pointer.
+ *
+ * Returns 0 on success and nonzero on failure.
+ */
+long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp);
+
+/*
+ * Determines if an ioctl is part of the standard Gasket framework.
+ * @cmd: The ioctl number to handle.
+ *
+ * Returns 1 if the ioctl is supported and 0 otherwise.
+ */
+long gasket_is_supported_ioctl(uint cmd);
+
+#endif
diff --git a/drivers/staging/gasket/gasket_page_table.c b/drivers/staging/gasket/gasket_page_table.c
new file mode 100644
index 00000000000000..ed6ab3c5f03819
--- /dev/null
+++ b/drivers/staging/gasket/gasket_page_table.c
@@ -0,0 +1,1381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Implementation of Gasket page table support.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+/*
+ * Implementation of Gasket page table support.
+ *
+ * This file assumes 4kB pages throughout; can be factored out when necessary.
+ *
+ * Address format is as follows:
+ * Simple addresses - those whose containing pages are directly placed in the
+ * device's address translation registers - are laid out as:
+ * [ 63 - 40: Unused | 39 - 28: 0 | 27 - 12: page index | 11 - 0: page offset ]
+ * page index: The index of the containing page in the device's address
+ * translation registers.
+ * page offset: The index of the address into the containing page.
+ *
+ * Extended address - those whose containing pages are contained in a second-
+ * level page table whose address is present in the device's address translation
+ * registers - are laid out as:
+ * [ 63 - 40: Unused | 39: flag | 38 - 37: 0 | 36 - 21: dev/level 0 index |
+ * 20 - 12: host/level 1 index | 11 - 0: page offset ]
+ * flag: Marker indicating that this is an extended address. Always 1.
+ * dev index: The index of the first-level page in the device's extended
+ * address translation registers.
+ * host index: The index of the containing page in the [host-resident] second-
+ * level page table.
+ * page offset: The index of the address into the containing [second-level]
+ * page.
+ */
+#include "gasket_page_table.h"
+
+#include <linux/device.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+
+/* Constants & utility macros */
+/* The number of pages that can be mapped into each second-level page table. */
+#define GASKET_PAGES_PER_SUBTABLE 512
+
+/* The starting position of the page index in a simple virtual address. */
+#define GASKET_SIMPLE_PAGE_SHIFT 12
+
+/* Flag indicating that a [device] slot is valid for use. */
+#define GASKET_VALID_SLOT_FLAG 1
+
+/*
+ * The starting position of the level 0 page index (i.e., the entry in the
+ * device's extended address registers) in an extended address.
+ * Also can be thought of as (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)),
+ * or (12 + 9).
+ */
+#define GASKET_EXTENDED_LVL0_SHIFT 21
+
+/*
+ * Number of first level pages that Gasket chips support. Equivalent to
+ * log2(NUM_LVL0_PAGE_TABLES)
+ *
+ * At a maximum, allowing for a 34 bits address space (or 16GB)
+ * = GASKET_EXTENDED_LVL0_WIDTH + (log2(PAGE_SIZE) + log2(PAGES_PER_SUBTABLE)
+ * or, = 13 + 9 + 12
+ */
+#define GASKET_EXTENDED_LVL0_WIDTH 13
+
+/*
+ * The starting position of the level 1 page index (i.e., the entry in the
+ * host second-level/sub- table) in an extended address.
+ */
+#define GASKET_EXTENDED_LVL1_SHIFT 12
+
+/* Type declarations */
+/* Valid states for a struct gasket_page_table_entry. */
+enum pte_status {
+ PTE_FREE,
+ PTE_INUSE,
+};
+
+/*
+ * Mapping metadata for a single page.
+ *
+ * In this file, host-side page table entries are referred to as that (or PTEs).
+ * Where device vs. host entries are differentiated, device-side or -visible
+ * entries are called "slots". A slot may be either an entry in the device's
+ * address translation table registers or an entry in a second-level page
+ * table ("subtable").
+ *
+ * The full data in this structure is visible on the host [of course]. Only
+ * the address contained in dma_addr is communicated to the device; that points
+ * to the actual page mapped and described by this structure.
+ */
+struct gasket_page_table_entry {
+ /* The status of this entry/slot: free or in use. */
+ enum pte_status status;
+
+ /* Address of the page in DMA space. */
+ dma_addr_t dma_addr;
+
+ /* Linux page descriptor for the page described by this structure. */
+ struct page *page;
+
+ /*
+ * Index for alignment into host vaddrs.
+ * When a user specifies a host address for a mapping, that address may
+ * not be page-aligned. Offset is the index into the containing page of
+ * the host address (i.e., host_vaddr & (PAGE_SIZE - 1)).
+ * This is necessary for translating between user-specified addresses
+ * and page-aligned addresses.
+ */
+ int offset;
+
+ /*
+ * If this is an extended and first-level entry, sublevel points
+ * to the second-level entries underneath this entry.
+ */
+ struct gasket_page_table_entry *sublevel;
+};
+
+/*
+ * Maintains virtual to physical address mapping for a coherent page that is
+ * allocated by this module for a given device.
+ * Note that coherent pages mappings virt mapping cannot be tracked by the
+ * Linux kernel, and coherent pages don't have a struct page associated,
+ * hence Linux kernel cannot perform a get_user_page_xx() on a phys address
+ * that was allocated coherent.
+ * This structure trivially implements this mechanism.
+ */
+struct gasket_coherent_page_entry {
+ /* Phys address, dma'able by the owner device */
+ dma_addr_t paddr;
+
+ /* Kernel virtual address */
+ u64 user_virt;
+
+ /* User virtual address that was mapped by the mmap kernel subsystem */
+ u64 kernel_virt;
+
+ /*
+ * Whether this page has been mapped into a user land process virtual
+ * space
+ */
+ u32 in_use;
+};
+
+/*
+ * [Host-side] page table descriptor.
+ *
+ * This structure tracks the metadata necessary to manage both simple and
+ * extended page tables.
+ */
+struct gasket_page_table {
+ /* The config used to create this page table. */
+ struct gasket_page_table_config config;
+
+ /* The number of simple (single-level) entries in the page table. */
+ uint num_simple_entries;
+
+ /* The number of extended (two-level) entries in the page table. */
+ uint num_extended_entries;
+
+ /* Array of [host-side] page table entries. */
+ struct gasket_page_table_entry *entries;
+
+ /* Number of actively mapped kernel pages in this table. */
+ uint num_active_pages;
+
+ /* Device register: base of/first slot in the page table. */
+ u64 __iomem *base_slot;
+
+ /* Device register: holds the offset indicating the start of the
+ * extended address region of the device's address translation table.
+ */
+ u64 __iomem *extended_offset_reg;
+
+ /* Device structure for the underlying device. Only used for logging. */
+ struct device *device;
+
+ /* PCI system descriptor for the underlying device. */
+ struct pci_dev *pci_dev;
+
+ /* Location of the extended address bit for this Gasket device. */
+ u64 extended_flag;
+
+ /* Mutex to protect page table internals. */
+ struct mutex mutex;
+
+ /* Number of coherent pages accessible thru by this page table */
+ int num_coherent_pages;
+
+ /*
+ * List of coherent memory (physical) allocated for a device.
+ *
+ * This structure also remembers the user virtual mapping, this is
+ * hacky, but we need to do this because the kernel doesn't keep track
+ * of the user coherent pages (pfn pages), and virt to coherent page
+ * mapping.
+ * TODO: use find_vma() APIs to convert host address to vm_area, to
+ * dma_addr_t instead of storing user virtu address in
+ * gasket_coherent_page_entry
+ *
+ * Note that the user virtual mapping is created by the driver, in
+ * gasket_mmap function, so user_virt belongs in the driver anyhow.
+ */
+ struct gasket_coherent_page_entry *coherent_pages;
+};
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
+ const struct gasket_bar_data *bar_data,
+ const struct gasket_page_table_config *page_table_config,
+ struct device *device, struct pci_dev *pci_dev)
+{
+ ulong bytes;
+ struct gasket_page_table *pg_tbl;
+ ulong total_entries = page_table_config->total_entries;
+
+ /*
+ * TODO: Verify config->total_entries against value read from the
+ * hardware register that contains the page table size.
+ */
+ if (total_entries == ULONG_MAX) {
+ dev_dbg(device, "Error reading page table size. "
+ "Initializing page table with size 0\n");
+ total_entries = 0;
+ }
+
+ dev_dbg(device,
+ "Attempting to initialize page table of size 0x%lx\n",
+ total_entries);
+
+ dev_dbg(device,
+ "Table has base reg 0x%x, extended offset reg 0x%x\n",
+ page_table_config->base_reg,
+ page_table_config->extended_reg);
+
+ *ppg_tbl = kzalloc(sizeof(**ppg_tbl), GFP_KERNEL);
+ if (!*ppg_tbl) {
+ dev_dbg(device, "No memory for page table\n");
+ return -ENOMEM;
+ }
+
+ pg_tbl = *ppg_tbl;
+ bytes = total_entries * sizeof(struct gasket_page_table_entry);
+ if (bytes != 0) {
+ pg_tbl->entries = vzalloc(bytes);
+ if (!pg_tbl->entries) {
+ dev_dbg(device,
+ "No memory for address translation metadata\n");
+ kfree(pg_tbl);
+ *ppg_tbl = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ mutex_init(&pg_tbl->mutex);
+ memcpy(&pg_tbl->config, page_table_config, sizeof(*page_table_config));
+ if (pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_NORMAL ||
+ pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_SIMPLE) {
+ pg_tbl->num_simple_entries = total_entries;
+ pg_tbl->num_extended_entries = 0;
+ pg_tbl->extended_flag = 1ull << page_table_config->extended_bit;
+ } else {
+ pg_tbl->num_simple_entries = 0;
+ pg_tbl->num_extended_entries = total_entries;
+ pg_tbl->extended_flag = 0;
+ }
+ pg_tbl->num_active_pages = 0;
+ pg_tbl->base_slot =
+ (u64 __iomem *)&bar_data->virt_base[page_table_config->base_reg];
+ pg_tbl->extended_offset_reg =
+ (u64 __iomem *)&bar_data->virt_base[page_table_config->extended_reg];
+ pg_tbl->device = get_device(device);
+ pg_tbl->pci_dev = pci_dev;
+
+ dev_dbg(device, "Page table initialized successfully\n");
+
+ return 0;
+}
+
+/*
+ * Check if a range of PTEs is free.
+ * The page table mutex must be held by the caller.
+ */
+static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
+ uint num_entries)
+{
+ int i;
+
+ for (i = 0; i < num_entries; i++) {
+ if (ptes[i].status != PTE_FREE)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Free a second level page [sub]table.
+ * The page table mutex must be held before this call.
+ */
+static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *pte,
+ u64 __iomem *slot)
+{
+ /* Release the page table from the driver */
+ pte->status = PTE_FREE;
+
+ /* Release the page table from the device */
+ writeq(0, slot);
+ /* Force sync around the address release. */
+ mb();
+
+ if (pte->dma_addr)
+ dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ vfree(pte->sublevel);
+
+ if (pte->page)
+ free_page((ulong)page_address(pte->page));
+
+ memset(pte, 0, sizeof(struct gasket_page_table_entry));
+}
+
+/*
+ * Actually perform collection.
+ * The page table mutex must be held by the caller.
+ */
+static void
+gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
+{
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot;
+
+ /* XXX FIX ME XXX -- more efficient to keep a usage count */
+ /* rather than scanning the second level page tables */
+
+ for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
+ slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
+ pte < pg_tbl->entries + pg_tbl->config.total_entries;
+ pte++, slot++) {
+ if (pte->status == PTE_INUSE) {
+ if (gasket_is_pte_range_free(pte->sublevel,
+ GASKET_PAGES_PER_SUBTABLE))
+ gasket_free_extended_subtable(pg_tbl, pte,
+ slot);
+ }
+ }
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_garbage_collect_nolock(pg_tbl);
+ mutex_unlock(&pg_tbl->mutex);
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
+{
+ /* Deallocate free second-level tables. */
+ gasket_page_table_garbage_collect(pg_tbl);
+
+ /* TODO: Check that all PTEs have been freed? */
+
+ vfree(pg_tbl->entries);
+ pg_tbl->entries = NULL;
+
+ put_device(pg_tbl->device);
+ kfree(pg_tbl);
+}
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
+ uint num_simple_entries)
+{
+ int i, start;
+
+ mutex_lock(&pg_tbl->mutex);
+ if (num_simple_entries > pg_tbl->config.total_entries) {
+ mutex_unlock(&pg_tbl->mutex);
+ return -EINVAL;
+ }
+
+ gasket_page_table_garbage_collect_nolock(pg_tbl);
+
+ start = min(pg_tbl->num_simple_entries, num_simple_entries);
+
+ for (i = start; i < pg_tbl->config.total_entries; i++) {
+ if (pg_tbl->entries[i].status != PTE_FREE) {
+ dev_err(pg_tbl->device, "entry %d is not free\n", i);
+ mutex_unlock(&pg_tbl->mutex);
+ return -EBUSY;
+ }
+ }
+
+ pg_tbl->num_simple_entries = num_simple_entries;
+ pg_tbl->num_extended_entries =
+ pg_tbl->config.total_entries - num_simple_entries;
+ writeq(num_simple_entries, pg_tbl->extended_offset_reg);
+
+ mutex_unlock(&pg_tbl->mutex);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_page_table_partition);
+
+/*
+ * Return whether a host buffer was mapped as coherent memory.
+ *
+ * A Gasket page_table currently support one contiguous dma range, mapped to one
+ * contiguous virtual memory range. Check if the host_addr is within that range.
+ */
+static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
+{
+ u64 min, max;
+
+ /* whether the host address is within user virt range */
+ if (!pg_tbl->coherent_pages)
+ return 0;
+
+ min = (u64)pg_tbl->coherent_pages[0].user_virt;
+ max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
+
+ return min <= host_addr && host_addr < max;
+}
+
+/*
+ * Get and map last level page table buffers.
+ *
+ * slots is the location(s) to write device-mapped page address. If this is a
+ * simple mapping, these will be address translation registers. If this is
+ * an extended mapping, these will be within a second-level page table
+ * allocated by the host and so must have their __iomem attribute casted away.
+ */
+static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *ptes,
+ u64 __iomem *slots, ulong host_addr,
+ uint num_pages, int is_simple_mapping)
+{
+ int ret;
+ ulong offset;
+ struct page *page;
+ dma_addr_t dma_addr;
+ ulong page_addr;
+ int i;
+
+ for (i = 0; i < num_pages; i++) {
+ page_addr = host_addr + i * PAGE_SIZE;
+ offset = page_addr & (PAGE_SIZE - 1);
+ dev_dbg(pg_tbl->device, "%s i %d\n", __func__, i);
+ if (is_coherent(pg_tbl, host_addr)) {
+ u64 off =
+ (u64)host_addr -
+ (u64)pg_tbl->coherent_pages[0].user_virt;
+ ptes[i].page = NULL;
+ ptes[i].offset = offset;
+ ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
+ off + i * PAGE_SIZE;
+ } else {
+ ret = get_user_pages_fast(page_addr - offset, 1, 1,
+ &page);
+
+ if (ret <= 0) {
+ dev_err(pg_tbl->device,
+ "get user pages failed for addr=0x%lx, "
+ "offset=0x%lx [ret=%d]\n",
+ page_addr, offset, ret);
+ return ret ? ret : -ENOMEM;
+ }
+ ++pg_tbl->num_active_pages;
+
+ ptes[i].page = page;
+ ptes[i].offset = offset;
+
+ /* Map the page into DMA space. */
+ ptes[i].dma_addr =
+ dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ dev_dbg(pg_tbl->device,
+ "%s i %d pte %p pfn %p -> mapped %llx\n",
+ __func__, i, &ptes[i],
+ (void *)page_to_pfn(page),
+ (unsigned long long)ptes[i].dma_addr);
+
+ if (ptes[i].dma_addr == -1) {
+ dev_dbg(pg_tbl->device,
+ "%s i %d -> fail to map page %llx "
+ "[pfn %p ohys %p]\n",
+ __func__, i,
+ (unsigned long long)ptes[i].dma_addr,
+ (void *)page_to_pfn(page),
+ (void *)page_to_phys(page));
+ return -1;
+ }
+ /* Wait until the page is mapped. */
+ mb();
+ }
+
+ /* Make the DMA-space address available to the device. */
+ dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
+
+ if (is_simple_mapping) {
+ writeq(dma_addr, &slots[i]);
+ } else {
+ ((u64 __force *)slots)[i] = dma_addr;
+ /* Extended page table vectors are in DRAM,
+ * and so need to be synced each time they are updated.
+ */
+ dma_map_single(pg_tbl->device,
+ (void *)&((u64 __force *)slots)[i],
+ sizeof(u64), DMA_TO_DEVICE);
+ }
+ ptes[i].status = PTE_INUSE;
+ }
+ return 0;
+}
+
+/*
+ * Return the index of the page for the address in the simple table.
+ * Does not perform validity checking.
+ */
+static int gasket_simple_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
+ (pg_tbl->config.total_entries - 1);
+}
+
+/*
+ * Return the level 0 page index for the given address.
+ * Does not perform validity checking.
+ */
+static ulong gasket_extended_lvl0_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
+ ((1 << GASKET_EXTENDED_LVL0_WIDTH) - 1);
+}
+
+/*
+ * Return the level 1 page index for the given address.
+ * Does not perform validity checking.
+ */
+static ulong gasket_extended_lvl1_page_idx(struct gasket_page_table *pg_tbl,
+ ulong dev_addr)
+{
+ return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
+ (GASKET_PAGES_PER_SUBTABLE - 1);
+}
+
+/*
+ * Allocate page table entries in a simple table.
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ if (!gasket_is_pte_range_free(pg_tbl->entries +
+ gasket_simple_page_idx(pg_tbl, dev_addr),
+ num_pages))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Safely return a page to the OS. */
+static bool gasket_release_page(struct page *page)
+{
+ if (!page)
+ return false;
+
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ put_page(page);
+
+ return true;
+}
+
+/*
+ * Unmap and release mapped pages.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *ptes,
+ u64 __iomem *slots, uint num_pages,
+ int is_simple_mapping)
+{
+ int i;
+ /*
+ * For each page table entry and corresponding entry in the device's
+ * address translation table:
+ */
+ for (i = 0; i < num_pages; i++) {
+ /* release the address from the device, */
+ if (is_simple_mapping || ptes[i].status == PTE_INUSE)
+ writeq(0, &slots[i]);
+ else
+ ((u64 __force *)slots)[i] = 0;
+ /* Force sync around the address release. */
+ mb();
+
+ /* release the address from the driver, */
+ if (ptes[i].status == PTE_INUSE) {
+ if (ptes[i].dma_addr) {
+ dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+ if (gasket_release_page(ptes[i].page))
+ --pg_tbl->num_active_pages;
+ }
+ ptes[i].status = PTE_FREE;
+
+ /* and clear the PTE. */
+ memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
+ }
+}
+
+/*
+ * Unmap and release pages mapped to simple addresses.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_unmap_simple_pages(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
+
+ gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
+ pg_tbl->base_slot + slot, num_pages, 1);
+}
+
+/*
+ * Unmap and release buffers to extended addresses.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ uint slot_idx, remain, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot_base;
+
+ remain = num_pages;
+ slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ /* TODO: Add check to ensure pte remains valid? */
+ len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
+
+ if (pte->status == PTE_INUSE) {
+ slot_base = (u64 __iomem *)(page_address(pte->page) +
+ pte->offset);
+ gasket_perform_unmapping(pg_tbl,
+ pte->sublevel + slot_idx,
+ slot_base + slot_idx, len, 0);
+ }
+
+ remain -= len;
+ slot_idx = 0;
+ pte++;
+ }
+}
+
+/* Evaluates to nonzero if the specified virtual address is simple. */
+static inline bool gasket_addr_is_simple(struct gasket_page_table *pg_tbl,
+ ulong addr)
+{
+ return !((addr) & (pg_tbl)->extended_flag);
+}
+
+/*
+ * Convert (simple, page, offset) into a device address.
+ * Examples:
+ * Simple page 0, offset 32:
+ * Input (0, 0, 32), Output 0x20
+ * Simple page 1000, offset 511:
+ * Input (0, 1000, 512), Output 0x3E81FF
+ * Extended page 0, offset 32:
+ * Input (0, 0, 32), Output 0x8000000020
+ * Extended page 1000, offset 511:
+ * Input (1, 1000, 512), Output 0x8003E81FF
+ */
+static ulong gasket_components_to_dev_address(struct gasket_page_table *pg_tbl,
+ int is_simple, uint page_index,
+ uint offset)
+{
+ ulong lvl0_index, lvl1_index;
+
+ if (is_simple) {
+ /* Return simple addresses directly. */
+ lvl0_index = page_index & (pg_tbl->config.total_entries - 1);
+ return (lvl0_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
+ }
+
+ /*
+ * This could be compressed into fewer statements, but
+ * A) the compiler should optimize it
+ * B) this is not slow
+ * C) this is an uncommon operation
+ * D) this is actually readable this way.
+ */
+ lvl0_index = page_index / GASKET_PAGES_PER_SUBTABLE;
+ lvl1_index = page_index & (GASKET_PAGES_PER_SUBTABLE - 1);
+ return (pg_tbl)->extended_flag |
+ (lvl0_index << GASKET_EXTENDED_LVL0_SHIFT) |
+ (lvl1_index << GASKET_EXTENDED_LVL1_SHIFT) | offset;
+}
+
+/*
+ * Validity checking for simple addresses.
+ *
+ * Verify that address translation commutes (from address to/from page + offset)
+ * and that the requested page range starts and ends within the set of
+ * currently-partitioned simple pages.
+ */
+static bool gasket_is_simple_dev_addr_bad(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ ulong page_offset = dev_addr & (PAGE_SIZE - 1);
+ ulong page_index =
+ (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
+
+ if (gasket_components_to_dev_address(pg_tbl, 1, page_index,
+ page_offset) != dev_addr) {
+ dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
+ dev_addr);
+ return true;
+ }
+
+ if (page_index >= pg_tbl->num_simple_entries) {
+ dev_err(pg_tbl->device,
+ "starting slot at %lu is too large, max is < %u\n",
+ page_index, pg_tbl->num_simple_entries);
+ return true;
+ }
+
+ if (page_index + num_pages > pg_tbl->num_simple_entries) {
+ dev_err(pg_tbl->device,
+ "ending slot at %lu is too large, max is <= %u\n",
+ page_index + num_pages, pg_tbl->num_simple_entries);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Validity checking for extended addresses.
+ *
+ * Verify that address translation commutes (from address to/from page +
+ * offset) and that the requested page range starts and ends within the set of
+ * currently-partitioned extended pages.
+ */
+static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ /* Starting byte index of dev_addr into the first mapped page */
+ ulong page_offset = dev_addr & (PAGE_SIZE - 1);
+ ulong page_global_idx, page_lvl0_idx;
+ ulong num_lvl0_pages;
+ ulong addr;
+
+ /* check if the device address is out of bound */
+ addr = dev_addr & ~((pg_tbl)->extended_flag);
+ if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
+ dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
+ dev_addr);
+ return true;
+ }
+
+ /* Find the starting sub-page index in the space of all sub-pages. */
+ page_global_idx = (dev_addr / PAGE_SIZE) &
+ (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
+
+ /* Find the starting level 0 index. */
+ page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ /* Get the count of affected level 0 pages. */
+ num_lvl0_pages = (num_pages + GASKET_PAGES_PER_SUBTABLE - 1) /
+ GASKET_PAGES_PER_SUBTABLE;
+
+ if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
+ page_offset) != dev_addr) {
+ dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
+ dev_addr);
+ return true;
+ }
+
+ if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
+ dev_err(pg_tbl->device,
+ "starting level 0 slot at %lu is too large, max is < "
+ "%u\n", page_lvl0_idx, pg_tbl->num_extended_entries);
+ return true;
+ }
+
+ if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
+ dev_err(pg_tbl->device,
+ "ending level 0 slot at %lu is too large, max is <= %u\n",
+ page_lvl0_idx + num_lvl0_pages,
+ pg_tbl->num_extended_entries);
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Non-locking entry to unmapping routines.
+ * The page table mutex must be held by the caller.
+ */
+static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_pages)
+{
+ if (!num_pages)
+ return;
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr))
+ gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
+ else
+ gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
+}
+
+/*
+ * Allocate and map pages to simple addresses.
+ * If there is an error, no pages are mapped.
+ */
+static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
+ ulong host_addr, ulong dev_addr,
+ uint num_pages)
+{
+ int ret;
+ uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
+
+ ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
+ if (ret) {
+ dev_err(pg_tbl->device,
+ "page table slots %u (@ 0x%lx) to %u are not available\n",
+ slot_idx, dev_addr, slot_idx + num_pages - 1);
+ return ret;
+ }
+
+ ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
+ pg_tbl->base_slot + slot_idx, host_addr,
+ num_pages, 1);
+
+ if (ret) {
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
+ dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
+ }
+ return ret;
+}
+
+/*
+ * Allocate a second level page table.
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
+ struct gasket_page_table_entry *pte,
+ u64 __iomem *slot)
+{
+ ulong page_addr, subtable_bytes;
+ dma_addr_t dma_addr;
+
+ /* XXX FIX ME XXX this is inefficient for non-4K page sizes */
+
+ /* GFP_DMA flag must be passed to architectures for which
+ * part of the memory range is not considered DMA'able.
+ * This seems to be the case for Juno board with 4.5.0 Linaro kernel
+ */
+ page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ if (!page_addr)
+ return -ENOMEM;
+ pte->page = virt_to_page((void *)page_addr);
+ pte->offset = 0;
+
+ subtable_bytes = sizeof(struct gasket_page_table_entry) *
+ GASKET_PAGES_PER_SUBTABLE;
+ pte->sublevel = vzalloc(subtable_bytes);
+ if (!pte->sublevel) {
+ free_page(page_addr);
+ memset(pte, 0, sizeof(struct gasket_page_table_entry));
+ return -ENOMEM;
+ }
+
+ /* Map the page into DMA space. */
+ pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ /* Wait until the page is mapped. */
+ mb();
+
+ /* make the addresses available to the device */
+ dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
+ writeq(dma_addr, slot);
+
+ pte->status = PTE_INUSE;
+
+ return 0;
+}
+
+/*
+ * Allocate slots in an extended page table. Check to see if a range of page
+ * table slots are available. If necessary, memory is allocated for second level
+ * page tables.
+ *
+ * Note that memory for second level page tables is allocated as needed, but
+ * that memory is only freed on the final close of the device file, when the
+ * page tables are repartitioned, or the the device is removed. If there is an
+ * error or if the full range of slots is not available, any memory
+ * allocated for second level page tables remains allocated until final close,
+ * repartition, or device removal.
+ *
+ * The page table mutex must be held by the caller.
+ */
+static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
+ ulong dev_addr, uint num_entries)
+{
+ int ret = 0;
+ uint remain, subtable_slot_idx, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot;
+
+ remain = num_entries;
+ subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+ slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ len = min(remain,
+ GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
+
+ if (pte->status == PTE_FREE) {
+ ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
+ if (ret) {
+ dev_err(pg_tbl->device,
+ "no memory for extended addr subtable\n");
+ return ret;
+ }
+ } else {
+ if (!gasket_is_pte_range_free(pte->sublevel +
+ subtable_slot_idx, len))
+ return -EBUSY;
+ }
+
+ remain -= len;
+ subtable_slot_idx = 0;
+ pte++;
+ slot++;
+ }
+
+ return 0;
+}
+
+/*
+ * gasket_map_extended_pages - Get and map buffers to extended addresses.
+ * If there is an error, no pages are mapped.
+ */
+static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
+ ulong host_addr, ulong dev_addr,
+ uint num_pages)
+{
+ int ret;
+ ulong dev_addr_end;
+ uint slot_idx, remain, len;
+ struct gasket_page_table_entry *pte;
+ u64 __iomem *slot_base;
+
+ ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
+ if (ret) {
+ dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
+ dev_err(pg_tbl->device,
+ "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are "
+ "not available\n",
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
+ dev_addr,
+ gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
+ gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
+ return ret;
+ }
+
+ remain = num_pages;
+ slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries +
+ gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+
+ while (remain > 0) {
+ len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
+
+ slot_base =
+ (u64 __iomem *)(page_address(pte->page) + pte->offset);
+ ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
+ slot_base + slot_idx, host_addr,
+ len, 0);
+ if (ret) {
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
+ num_pages);
+ return ret;
+ }
+
+ remain -= len;
+ slot_idx = 0;
+ pte++;
+ host_addr += len * PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+/*
+ * See gasket_page_table.h for general description.
+ *
+ * gasket_page_table_map calls either gasket_map_simple_pages() or
+ * gasket_map_extended_pages() to actually perform the mapping.
+ *
+ * The page table mutex is held for the entire operation.
+ */
+int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
+ ulong dev_addr, uint num_pages)
+{
+ int ret;
+
+ if (!num_pages)
+ return 0;
+
+ mutex_lock(&pg_tbl->mutex);
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
+ ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
+ num_pages);
+ } else {
+ ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
+ num_pages);
+ }
+
+ mutex_unlock(&pg_tbl->mutex);
+
+ dev_dbg(pg_tbl->device,
+ "%s done: ha %llx daddr %llx num %d, ret %d\n",
+ __func__, (unsigned long long)host_addr,
+ (unsigned long long)dev_addr, num_pages, ret);
+ return ret;
+}
+EXPORT_SYMBOL(gasket_page_table_map);
+
+/*
+ * See gasket_page_table.h for general description.
+ *
+ * gasket_page_table_unmap takes the page table lock and calls either
+ * gasket_unmap_simple_pages() or gasket_unmap_extended_pages() to
+ * actually unmap the pages from device space.
+ *
+ * The page table mutex is held for the entire operation.
+ */
+void gasket_page_table_unmap(struct gasket_page_table *pg_tbl, ulong dev_addr,
+ uint num_pages)
+{
+ if (!num_pages)
+ return;
+
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
+ mutex_unlock(&pg_tbl->mutex);
+}
+EXPORT_SYMBOL(gasket_page_table_unmap);
+
+static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
+{
+ gasket_unmap_simple_pages(pg_tbl,
+ gasket_components_to_dev_address(pg_tbl, 1, 0,
+ 0),
+ pg_tbl->num_simple_entries);
+ gasket_unmap_extended_pages(pg_tbl,
+ gasket_components_to_dev_address(pg_tbl, 0,
+ 0, 0),
+ pg_tbl->num_extended_entries *
+ GASKET_PAGES_PER_SUBTABLE);
+}
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_all_nolock(pg_tbl);
+ mutex_unlock(&pg_tbl->mutex);
+}
+EXPORT_SYMBOL(gasket_page_table_unmap_all);
+
+/* See gasket_page_table.h for description. */
+void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
+{
+ mutex_lock(&pg_tbl->mutex);
+ gasket_page_table_unmap_all_nolock(pg_tbl);
+ writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
+ mutex_unlock(&pg_tbl->mutex);
+}
+
+/* See gasket_page_table.h for description. */
+int gasket_page_table_lookup_page(
+ struct gasket_page_table *pg_tbl, ulong dev_addr, struct page **ppage,
+ ulong *poffset)
+{
+ uint page_num;
+ struct gasket_page_table_entry *pte;
+
+ mutex_lock(&pg_tbl->mutex);
+ if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
+ page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
+ if (page_num >= pg_tbl->num_simple_entries)
+ goto fail;
+
+ pte = pg_tbl->entries + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+ } else {
+ /* Find the level 0 entry, */
+ page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
+ if (page_num >= pg_tbl->num_extended_entries)
+ goto fail;
+
+ pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+
+ /* and its contained level 1 entry. */
+ page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
+ pte = pte->sublevel + page_num;
+ if (pte->status != PTE_INUSE)
+ goto fail;
+ }
+
+ *ppage = pte->page;
+ *poffset = pte->offset;
+ mutex_unlock(&pg_tbl->mutex);
+ return 0;
+
+fail:
+ *ppage = NULL;
+ *poffset = 0;
+ mutex_unlock(&pg_tbl->mutex);
+ return -1;
+}
+
+/* See gasket_page_table.h for description. */
+bool gasket_page_table_are_addrs_bad(
+ struct gasket_page_table *pg_tbl, ulong host_addr, ulong dev_addr,
+ ulong bytes)
+{
+ if (host_addr & (PAGE_SIZE - 1)) {
+ dev_err(pg_tbl->device,
+ "host mapping address 0x%lx must be page aligned\n",
+ host_addr);
+ return true;
+ }
+
+ return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
+}
+EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
+
+/* See gasket_page_table.h for description. */
+bool gasket_page_table_is_dev_addr_bad(
+ struct gasket_page_table *pg_tbl, ulong dev_addr, ulong bytes)
+{
+ uint num_pages = bytes / PAGE_SIZE;
+
+ if (bytes & (PAGE_SIZE - 1)) {
+ dev_err(pg_tbl->device,
+ "mapping size 0x%lX must be page aligned\n", bytes);
+ return true;
+ }
+
+ if (num_pages == 0) {
+ dev_err(pg_tbl->device,
+ "requested mapping is less than one page: %lu / %lu\n",
+ bytes, PAGE_SIZE);
+ return true;
+ }
+
+ if (gasket_addr_is_simple(pg_tbl, dev_addr))
+ return gasket_is_simple_dev_addr_bad(pg_tbl, dev_addr,
+ num_pages);
+ return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
+}
+EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_max_size(struct gasket_page_table *page_table)
+{
+ if (!page_table)
+ return 0;
+ return page_table->config.total_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_max_size);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_num_entries);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_simple_entries;
+}
+EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
+
+/* See gasket_page_table.h for description. */
+uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
+{
+ if (!pg_tbl)
+ return 0;
+ return pg_tbl->num_active_pages;
+}
+EXPORT_SYMBOL(gasket_page_table_num_active_pages);
+
+/* See gasket_page_table.h */
+int gasket_page_table_system_status(struct gasket_page_table *page_table)
+{
+ if (!page_table)
+ return GASKET_STATUS_LAMED;
+
+ if (gasket_page_table_num_entries(page_table) == 0) {
+ dev_dbg(page_table->device, "Page table size is 0\n");
+ return GASKET_STATUS_LAMED;
+ }
+
+ return GASKET_STATUS_ALIVE;
+}
+
+/* Record the host_addr to coherent dma memory mapping. */
+int gasket_set_user_virt(
+ struct gasket_dev *gasket_dev, u64 size, dma_addr_t dma_address,
+ ulong vma)
+{
+ int j;
+ struct gasket_page_table *pg_tbl;
+
+ unsigned int num_pages = size / PAGE_SIZE;
+
+ /*
+ * TODO: for future chipset, better handling of the case where multiple
+ * page tables are supported on a given device
+ */
+ pg_tbl = gasket_dev->page_table[0];
+ if (!pg_tbl) {
+ dev_dbg(gasket_dev->dev, "%s: invalid page table index\n",
+ __func__);
+ return 0;
+ }
+ for (j = 0; j < num_pages; j++) {
+ pg_tbl->coherent_pages[j].user_virt =
+ (u64)vma + j * PAGE_SIZE;
+ }
+ return 0;
+}
+
+/* Allocate a block of coherent memory. */
+int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
+ dma_addr_t *dma_address, u64 index)
+{
+ dma_addr_t handle;
+ void *mem;
+ int j;
+ unsigned int num_pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ const struct gasket_driver_desc *driver_desc =
+ gasket_get_driver_desc(gasket_dev);
+
+ if (!gasket_dev->page_table[index])
+ return -EFAULT;
+
+ if (num_pages == 0)
+ return -EINVAL;
+
+ mem = dma_alloc_coherent(gasket_get_device(gasket_dev),
+ num_pages * PAGE_SIZE, &handle, 0);
+ if (!mem)
+ goto nomem;
+
+ gasket_dev->page_table[index]->num_coherent_pages = num_pages;
+
+ /* allocate the physical memory block */
+ gasket_dev->page_table[index]->coherent_pages =
+ kcalloc(num_pages, sizeof(struct gasket_coherent_page_entry),
+ GFP_KERNEL);
+ if (!gasket_dev->page_table[index]->coherent_pages)
+ goto nomem;
+ *dma_address = 0;
+
+ gasket_dev->coherent_buffer.length_bytes =
+ PAGE_SIZE * (num_pages);
+ gasket_dev->coherent_buffer.phys_base = handle;
+ gasket_dev->coherent_buffer.virt_base = mem;
+
+ *dma_address = driver_desc->coherent_buffer_description.base;
+ for (j = 0; j < num_pages; j++) {
+ gasket_dev->page_table[index]->coherent_pages[j].paddr =
+ handle + j * PAGE_SIZE;
+ gasket_dev->page_table[index]->coherent_pages[j].kernel_virt =
+ (u64)mem + j * PAGE_SIZE;
+ }
+
+ if (*dma_address == 0)
+ goto nomem;
+ return 0;
+
+nomem:
+ if (mem) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ num_pages * PAGE_SIZE, mem, handle);
+ }
+
+ if (gasket_dev->page_table[index]->coherent_pages) {
+ kfree(gasket_dev->page_table[index]->coherent_pages);
+ gasket_dev->page_table[index]->coherent_pages = NULL;
+ }
+ gasket_dev->page_table[index]->num_coherent_pages = 0;
+ return -ENOMEM;
+}
+
+/* Free a block of coherent memory. */
+int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
+ dma_addr_t dma_address, u64 index)
+{
+ const struct gasket_driver_desc *driver_desc;
+
+ if (!gasket_dev->page_table[index])
+ return -EFAULT;
+
+ driver_desc = gasket_get_driver_desc(gasket_dev);
+
+ if (driver_desc->coherent_buffer_description.base != dma_address)
+ return -EADDRNOTAVAIL;
+
+ if (gasket_dev->coherent_buffer.length_bytes) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ gasket_dev->coherent_buffer.length_bytes,
+ gasket_dev->coherent_buffer.virt_base,
+ gasket_dev->coherent_buffer.phys_base);
+ gasket_dev->coherent_buffer.length_bytes = 0;
+ gasket_dev->coherent_buffer.virt_base = NULL;
+ gasket_dev->coherent_buffer.phys_base = 0;
+ }
+ return 0;
+}
+
+/* Release all coherent memory. */
+void gasket_free_coherent_memory_all(
+ struct gasket_dev *gasket_dev, u64 index)
+{
+ if (!gasket_dev->page_table[index])
+ return;
+
+ if (gasket_dev->coherent_buffer.length_bytes) {
+ dma_free_coherent(gasket_get_device(gasket_dev),
+ gasket_dev->coherent_buffer.length_bytes,
+ gasket_dev->coherent_buffer.virt_base,
+ gasket_dev->coherent_buffer.phys_base);
+ gasket_dev->coherent_buffer.length_bytes = 0;
+ gasket_dev->coherent_buffer.virt_base = NULL;
+ gasket_dev->coherent_buffer.phys_base = 0;
+ }
+}
diff --git a/drivers/staging/gasket/gasket_page_table.h b/drivers/staging/gasket/gasket_page_table.h
new file mode 100644
index 00000000000000..7b01b73ea3e703
--- /dev/null
+++ b/drivers/staging/gasket/gasket_page_table.h
@@ -0,0 +1,249 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Gasket Page Table functionality. This file describes the address
+ * translation/paging functionality supported by the Gasket driver framework.
+ * As much as possible, internal details are hidden to simplify use -
+ * all calls are thread-safe (protected by an internal mutex) except where
+ * indicated otherwise.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+#ifndef __GASKET_PAGE_TABLE_H__
+#define __GASKET_PAGE_TABLE_H__
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+
+/*
+ * Structure used for managing address translation on a device. All details are
+ * internal to the implementation.
+ */
+struct gasket_page_table;
+
+/*
+ * Allocate and init address translation data.
+ * @ppage_table: Pointer to Gasket page table pointer. Set by this call.
+ * @att_base_reg: [Mapped] pointer to the first entry in the device's address
+ * translation table.
+ * @extended_offset_reg: [Mapped] pointer to the device's register containing
+ * the starting index of the extended translation table.
+ * @extended_bit_location: The index of the bit indicating whether an address
+ * is extended.
+ * @total_entries: The total number of entries in the device's address
+ * translation table.
+ * @device: Device structure for the underlying device. Only used for logging.
+ * @pci_dev: PCI system descriptor for the underlying device.
+ * whether the driver will supply its own.
+ *
+ * Description: Allocates and initializes data to track address translation -
+ * simple and extended page table metadata. Initially, the page table is
+ * partitioned such that all addresses are "simple" (single-level lookup).
+ * gasket_partition_page_table can be called to change this paritioning.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
+ const struct gasket_bar_data *bar_data,
+ const struct gasket_page_table_config *page_table_config,
+ struct device *device, struct pci_dev *pci_dev);
+
+/*
+ * Deallocate and cleanup page table data.
+ * @page_table: Gasket page table pointer.
+ *
+ * Description: The inverse of gasket_init; frees page_table and its contained
+ * elements.
+ *
+ * Because this call destroys the page table, it cannot be
+ * thread-safe (mutex-protected)!
+ */
+void gasket_page_table_cleanup(struct gasket_page_table *page_table);
+
+/*
+ * Sets the size of the simple page table.
+ * @page_table: Gasket page table pointer.
+ * @num_simple_entries: Desired size of the simple page table (in entries).
+ *
+ * Description: gasket_partition_page_table checks to see if the simple page
+ * size can be changed (i.e., if there are no active extended
+ * mappings in the new simple size range), and, if so,
+ * sets the new simple and extended page table sizes.
+ *
+ * Returns 0 if successful, or non-zero if the page table entries
+ * are not free.
+ */
+int gasket_page_table_partition(struct gasket_page_table *page_table,
+ uint num_simple_entries);
+
+/*
+ * Get and map [host] user space pages into device memory.
+ * @page_table: Gasket page table pointer.
+ * @host_addr: Starting host virtual memory address of the pages.
+ * @dev_addr: Starting device address of the pages.
+ * @num_pages: Number of [4kB] pages to map.
+ *
+ * Description: Maps the "num_pages" pages of host memory pointed to by
+ * host_addr to the address "dev_addr" in device memory.
+ *
+ * The caller is responsible for checking the addresses ranges.
+ *
+ * Returns 0 if successful or a non-zero error number otherwise.
+ * If there is an error, no pages are mapped.
+ */
+int gasket_page_table_map(struct gasket_page_table *page_table, ulong host_addr,
+ ulong dev_addr, uint num_pages);
+
+/*
+ * Un-map host pages from device memory.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Starting device address of the pages to unmap.
+ * @num_pages: The number of [4kB] pages to unmap.
+ *
+ * Description: The inverse of gasket_map_pages. Unmaps pages from the device.
+ */
+void gasket_page_table_unmap(struct gasket_page_table *page_table,
+ ulong dev_addr, uint num_pages);
+
+/*
+ * Unmap ALL host pages from device memory.
+ * @page_table: Gasket page table pointer.
+ */
+void gasket_page_table_unmap_all(struct gasket_page_table *page_table);
+
+/*
+ * Unmap all host pages from device memory and reset the table to fully simple
+ * addressing.
+ * @page_table: Gasket page table pointer.
+ */
+void gasket_page_table_reset(struct gasket_page_table *page_table);
+
+/*
+ * Reclaims unused page table memory.
+ * @page_table: Gasket page table pointer.
+ *
+ * Description: Examines the page table and frees any currently-unused
+ * allocations. Called internally on gasket_cleanup().
+ */
+void gasket_page_table_garbage_collect(struct gasket_page_table *page_table);
+
+/*
+ * Retrieve the backing page for a device address.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Gasket device address.
+ * @ppage: Pointer to a page pointer for the returned page.
+ * @poffset: Pointer to an unsigned long for the returned offset.
+ *
+ * Description: Interprets the address and looks up the corresponding page
+ * in the page table and the offset in that page. (We need an
+ * offset because the host page may be larger than the Gasket chip
+ * page it contains.)
+ *
+ * Returns 0 if successful, -1 for an error. The page pointer
+ * and offset are returned through the pointers, if successful.
+ */
+int gasket_page_table_lookup_page(struct gasket_page_table *page_table,
+ ulong dev_addr, struct page **page,
+ ulong *poffset);
+
+/*
+ * Checks validity for input addrs and size.
+ * @page_table: Gasket page table pointer.
+ * @host_addr: Host address to check.
+ * @dev_addr: Gasket device address.
+ * @bytes: Size of the range to check (in bytes).
+ *
+ * Description: This call performs a number of checks to verify that the ranges
+ * specified by both addresses and the size are valid for mapping pages into
+ * device memory.
+ *
+ * Returns true if the mapping is bad, false otherwise.
+ */
+bool gasket_page_table_are_addrs_bad(struct gasket_page_table *page_table,
+ ulong host_addr, ulong dev_addr,
+ ulong bytes);
+
+/*
+ * Checks validity for input dev addr and size.
+ * @page_table: Gasket page table pointer.
+ * @dev_addr: Gasket device address.
+ * @bytes: Size of the range to check (in bytes).
+ *
+ * Description: This call performs a number of checks to verify that the range
+ * specified by the device address and the size is valid for mapping pages into
+ * device memory.
+ *
+ * Returns true if the address is bad, false otherwise.
+ */
+bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *page_table,
+ ulong dev_addr, ulong bytes);
+
+/*
+ * Gets maximum size for the given page table.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_max_size(struct gasket_page_table *page_table);
+
+/*
+ * Gets the total number of entries in the arg.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_entries(struct gasket_page_table *page_table);
+
+/*
+ * Gets the number of simple entries.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_simple_entries(struct gasket_page_table *page_table);
+
+/*
+ * Gets the number of actively pinned pages.
+ * @page_table: Gasket page table pointer.
+ */
+uint gasket_page_table_num_active_pages(struct gasket_page_table *page_table);
+
+/*
+ * Get status of page table managed by @page_table.
+ * @page_table: Gasket page table pointer.
+ */
+int gasket_page_table_system_status(struct gasket_page_table *page_table);
+
+/*
+ * Allocate a block of coherent memory.
+ * @gasket_dev: Gasket Device.
+ * @size: Size of the memory block.
+ * @dma_address: Dma address allocated by the kernel.
+ * @index: Index of the gasket_page_table within this Gasket device
+ *
+ * Description: Allocate a contiguous coherent memory block, DMA'ble
+ * by this device.
+ */
+int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t *dma_address, uint64_t index);
+/* Release a block of contiguous coherent memory, in use by a device. */
+int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t dma_address, uint64_t index);
+
+/* Release all coherent memory. */
+void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev,
+ uint64_t index);
+
+/*
+ * Records the host_addr to coherent dma memory mapping.
+ * @gasket_dev: Gasket Device.
+ * @size: Size of the virtual address range to map.
+ * @dma_address: Dma address within the coherent memory range.
+ * @vma: Virtual address we wish to map to coherent memory.
+ *
+ * Description: For each page in the virtual address range, record the
+ * coherent page mapping.
+ *
+ * Does not perform validity checking.
+ */
+int gasket_set_user_virt(struct gasket_dev *gasket_dev, uint64_t size,
+ dma_addr_t dma_address, ulong vma);
+
+#endif /* __GASKET_PAGE_TABLE_H__ */
diff --git a/drivers/staging/gasket/gasket_sysfs.c b/drivers/staging/gasket/gasket_sysfs.c
new file mode 100644
index 00000000000000..56d62aea511186
--- /dev/null
+++ b/drivers/staging/gasket/gasket_sysfs.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2018 Google, Inc. */
+#include "gasket_sysfs.h"
+
+#include "gasket_core.h"
+
+#include <linux/device.h>
+#include <linux/printk.h>
+
+/*
+ * Pair of kernel device and user-specified pointer. Used in lookups in sysfs
+ * "show" functions to return user data.
+ */
+
+struct gasket_sysfs_mapping {
+ /*
+ * The device bound to this mapping. If this is NULL, then this mapping
+ * is free.
+ */
+ struct device *device;
+
+ /* The Gasket descriptor for this device. */
+ struct gasket_dev *gasket_dev;
+
+ /* This device's set of sysfs attributes/nodes. */
+ struct gasket_sysfs_attribute *attributes;
+
+ /* The number of live elements in "attributes". */
+ int attribute_count;
+
+ /* Protects structure from simultaneous access. */
+ struct mutex mutex;
+
+ /* Tracks active users of this mapping. */
+ struct kref refcount;
+};
+
+/*
+ * Data needed to manage users of this sysfs utility.
+ * Currently has a fixed size; if space is a concern, this can be dynamically
+ * allocated.
+ */
+/*
+ * 'Global' (file-scoped) list of mappings between devices and gasket_data
+ * pointers. This removes the requirement to have a gasket_sysfs_data
+ * handle in all files.
+ */
+static struct gasket_sysfs_mapping dev_mappings[GASKET_SYSFS_NUM_MAPPINGS];
+
+/* Callback when a mapping's refcount goes to zero. */
+static void release_entry(struct kref *ref)
+{
+ /* All work is done after the return from kref_put. */
+}
+
+/* Look up mapping information for the given device. */
+static struct gasket_sysfs_mapping *get_mapping(struct device *device)
+{
+ int i;
+
+ for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
+ mutex_lock(&dev_mappings[i].mutex);
+ if (dev_mappings[i].device == device) {
+ kref_get(&dev_mappings[i].refcount);
+ mutex_unlock(&dev_mappings[i].mutex);
+ return &dev_mappings[i];
+ }
+ mutex_unlock(&dev_mappings[i].mutex);
+ }
+
+ dev_dbg(device, "%s: Mapping to device %s not found\n",
+ __func__, device->kobj.name);
+ return NULL;
+}
+
+/* Put a reference to a mapping. */
+static void put_mapping(struct gasket_sysfs_mapping *mapping)
+{
+ int i;
+ int num_files_to_remove = 0;
+ struct device_attribute *files_to_remove;
+ struct device *device;
+
+ if (!mapping) {
+ pr_debug("%s: Mapping should not be NULL\n", __func__);
+ return;
+ }
+
+ mutex_lock(&mapping->mutex);
+ if (kref_put(&mapping->refcount, release_entry)) {
+ dev_dbg(mapping->device, "Removing Gasket sysfs mapping\n");
+ /*
+ * We can't remove the sysfs nodes in the kref callback, since
+ * device_remove_file() blocks until the node is free.
+ * Readers/writers of sysfs nodes, though, will be blocked on
+ * the mapping mutex, resulting in deadlock. To fix this, the
+ * sysfs nodes are removed outside the lock.
+ */
+ device = mapping->device;
+ num_files_to_remove = mapping->attribute_count;
+ files_to_remove = kcalloc(num_files_to_remove,
+ sizeof(*files_to_remove),
+ GFP_KERNEL);
+ if (!files_to_remove) {
+ mutex_unlock(&mapping->mutex);
+ return;
+ }
+
+ for (i = 0; i < num_files_to_remove; i++)
+ files_to_remove[i] = mapping->attributes[i].attr;
+
+ kfree(mapping->attributes);
+ mapping->attributes = NULL;
+ mapping->attribute_count = 0;
+ put_device(mapping->device);
+ mapping->device = NULL;
+ mapping->gasket_dev = NULL;
+ }
+ mutex_unlock(&mapping->mutex);
+
+ if (num_files_to_remove != 0) {
+ for (i = 0; i < num_files_to_remove; ++i)
+ device_remove_file(device, &files_to_remove[i]);
+ kfree(files_to_remove);
+ }
+}
+
+/*
+ * Put a reference to a mapping N times.
+ *
+ * In higher-level resource acquire/release function pairs, the release function
+ * will need to release a mapping 2x - once for the refcount taken in the
+ * release function itself, and once for the count taken in the acquire call.
+ */
+static void put_mapping_n(struct gasket_sysfs_mapping *mapping, int times)
+{
+ int i;
+
+ for (i = 0; i < times; i++)
+ put_mapping(mapping);
+}
+
+void gasket_sysfs_init(void)
+{
+ int i;
+
+ for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
+ dev_mappings[i].device = NULL;
+ mutex_init(&dev_mappings[i].mutex);
+ }
+}
+
+int gasket_sysfs_create_mapping(struct device *device,
+ struct gasket_dev *gasket_dev)
+{
+ struct gasket_sysfs_mapping *mapping;
+ int map_idx = -1;
+
+ /*
+ * We need a function-level mutex to protect against the same device
+ * being added [multiple times] simultaneously.
+ */
+ static DEFINE_MUTEX(function_mutex);
+
+ mutex_lock(&function_mutex);
+ dev_dbg(device, "Creating sysfs entries for device\n");
+
+ /* Check that the device we're adding hasn't already been added. */
+ mapping = get_mapping(device);
+ if (mapping) {
+ dev_err(device,
+ "Attempting to re-initialize sysfs mapping for device\n");
+ put_mapping(mapping);
+ mutex_unlock(&function_mutex);
+ return -EBUSY;
+ }
+
+ /* Find the first empty entry in the array. */
+ for (map_idx = 0; map_idx < GASKET_SYSFS_NUM_MAPPINGS; ++map_idx) {
+ mutex_lock(&dev_mappings[map_idx].mutex);
+ if (!dev_mappings[map_idx].device)
+ /* Break with the mutex held! */
+ break;
+ mutex_unlock(&dev_mappings[map_idx].mutex);
+ }
+
+ if (map_idx == GASKET_SYSFS_NUM_MAPPINGS) {
+ dev_err(device, "All mappings have been exhausted\n");
+ mutex_unlock(&function_mutex);
+ return -ENOMEM;
+ }
+
+ dev_dbg(device, "Creating sysfs mapping for device %s\n",
+ device->kobj.name);
+
+ mapping = &dev_mappings[map_idx];
+ mapping->attributes = kcalloc(GASKET_SYSFS_MAX_NODES,
+ sizeof(*mapping->attributes),
+ GFP_KERNEL);
+ if (!mapping->attributes) {
+ dev_dbg(device, "Unable to allocate sysfs attribute array\n");
+ mutex_unlock(&mapping->mutex);
+ mutex_unlock(&function_mutex);
+ return -ENOMEM;
+ }
+
+ kref_init(&mapping->refcount);
+ mapping->device = get_device(device);
+ mapping->gasket_dev = gasket_dev;
+ mapping->attribute_count = 0;
+ mutex_unlock(&mapping->mutex);
+ mutex_unlock(&function_mutex);
+
+ /* Don't decrement the refcount here! One open count keeps it alive! */
+ return 0;
+}
+
+int gasket_sysfs_create_entries(struct device *device,
+ const struct gasket_sysfs_attribute *attrs)
+{
+ int i;
+ int ret;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_dbg(device,
+ "Creating entries for device without first "
+ "initializing mapping\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&mapping->mutex);
+ for (i = 0; strcmp(attrs[i].attr.attr.name, GASKET_ARRAY_END_MARKER);
+ i++) {
+ if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
+ dev_err(device,
+ "Maximum number of sysfs nodes reached for "
+ "device\n");
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return -ENOMEM;
+ }
+
+ ret = device_create_file(device, &attrs[i].attr);
+ if (ret) {
+ dev_dbg(device, "Unable to create device entries\n");
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return ret;
+ }
+
+ mapping->attributes[mapping->attribute_count] = attrs[i];
+ ++mapping->attribute_count;
+ }
+
+ mutex_unlock(&mapping->mutex);
+ put_mapping(mapping);
+ return 0;
+}
+EXPORT_SYMBOL(gasket_sysfs_create_entries);
+
+void gasket_sysfs_remove_mapping(struct device *device)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_err(device,
+ "Attempted to remove non-existent sysfs mapping to "
+ "device\n");
+ return;
+ }
+
+ put_mapping_n(mapping, 2);
+}
+
+struct gasket_dev *gasket_sysfs_get_device_data(struct device *device)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping) {
+ dev_err(device, "device not registered\n");
+ return NULL;
+ }
+
+ return mapping->gasket_dev;
+}
+EXPORT_SYMBOL(gasket_sysfs_get_device_data);
+
+void gasket_sysfs_put_device_data(struct device *device, struct gasket_dev *dev)
+{
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+
+ if (!mapping)
+ return;
+
+ /* See comment of put_mapping_n() for why the '2' is necessary. */
+ put_mapping_n(mapping, 2);
+}
+EXPORT_SYMBOL(gasket_sysfs_put_device_data);
+
+struct gasket_sysfs_attribute *
+gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr)
+{
+ int i;
+ int num_attrs;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+ struct gasket_sysfs_attribute *attrs = NULL;
+
+ if (!mapping)
+ return NULL;
+
+ attrs = mapping->attributes;
+ num_attrs = mapping->attribute_count;
+ for (i = 0; i < num_attrs; ++i) {
+ if (!strcmp(attrs[i].attr.attr.name, attr->attr.name))
+ return &attrs[i];
+ }
+
+ dev_err(device, "Unable to find match for device_attribute %s\n",
+ attr->attr.name);
+ return NULL;
+}
+EXPORT_SYMBOL(gasket_sysfs_get_attr);
+
+void gasket_sysfs_put_attr(struct device *device,
+ struct gasket_sysfs_attribute *attr)
+{
+ int i;
+ int num_attrs;
+ struct gasket_sysfs_mapping *mapping = get_mapping(device);
+ struct gasket_sysfs_attribute *attrs = NULL;
+
+ if (!mapping)
+ return;
+
+ attrs = mapping->attributes;
+ num_attrs = mapping->attribute_count;
+ for (i = 0; i < num_attrs; ++i) {
+ if (&attrs[i] == attr) {
+ put_mapping_n(mapping, 2);
+ return;
+ }
+ }
+
+ dev_err(device, "Unable to put unknown attribute: %s\n",
+ attr->attr.attr.name);
+}
+EXPORT_SYMBOL(gasket_sysfs_put_attr);
+
+ssize_t gasket_sysfs_register_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ ulong parsed_value = 0;
+ struct gasket_sysfs_mapping *mapping;
+ struct gasket_dev *gasket_dev;
+ struct gasket_sysfs_attribute *gasket_attr;
+
+ if (count < 3 || buf[0] != '0' || buf[1] != 'x') {
+ dev_err(device,
+ "sysfs register write format: \"0x<hex value>\"\n");
+ return -EINVAL;
+ }
+
+ if (kstrtoul(buf, 16, &parsed_value) != 0) {
+ dev_err(device,
+ "Unable to parse input as 64-bit hex value: %s\n", buf);
+ return -EINVAL;
+ }
+
+ mapping = get_mapping(device);
+ if (!mapping) {
+ dev_err(device, "Device driver may have been removed\n");
+ return 0;
+ }
+
+ gasket_dev = mapping->gasket_dev;
+ if (!gasket_dev) {
+ dev_err(device, "Device driver may have been removed\n");
+ return 0;
+ }
+
+ gasket_attr = gasket_sysfs_get_attr(device, attr);
+ if (!gasket_attr) {
+ put_mapping(mapping);
+ return count;
+ }
+
+ gasket_dev_write_64(gasket_dev, parsed_value,
+ gasket_attr->data.bar_address.bar,
+ gasket_attr->data.bar_address.offset);
+
+ if (gasket_attr->write_callback)
+ gasket_attr->write_callback(gasket_dev, gasket_attr,
+ parsed_value);
+
+ gasket_sysfs_put_attr(device, gasket_attr);
+ put_mapping(mapping);
+ return count;
+}
+EXPORT_SYMBOL(gasket_sysfs_register_store);
diff --git a/drivers/staging/gasket/gasket_sysfs.h b/drivers/staging/gasket/gasket_sysfs.h
new file mode 100644
index 00000000000000..f32eaf89e056bf
--- /dev/null
+++ b/drivers/staging/gasket/gasket_sysfs.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Set of common sysfs utilities.
+ *
+ * Copyright (C) 2018 Google, Inc.
+ */
+
+/* The functions described here are a set of utilities to allow each file in the
+ * Gasket driver framework to manage their own set of sysfs entries, instead of
+ * centralizing all that work in one file.
+ *
+ * The goal of these utilities is to allow for sysfs entries to be easily
+ * created without causing a proliferation of sysfs "show" functions. This
+ * requires O(N) string lookups during show function execution, but as reading
+ * sysfs entries is rarely performance-critical, this is likely acceptible.
+ */
+#ifndef __GASKET_SYSFS_H__
+#define __GASKET_SYSFS_H__
+
+#include "gasket_constants.h"
+#include "gasket_core.h"
+#include <linux/device.h>
+#include <linux/stringify.h>
+#include <linux/sysfs.h>
+
+/* The maximum number of mappings/devices a driver needs to support. */
+#define GASKET_SYSFS_NUM_MAPPINGS (GASKET_FRAMEWORK_DESC_MAX * GASKET_DEV_MAX)
+
+/* The maximum number of sysfs nodes in a directory.
+ */
+#define GASKET_SYSFS_MAX_NODES 196
+
+/* End markers for sysfs struct arrays. */
+#define GASKET_ARRAY_END_TOKEN GASKET_RESERVED_ARRAY_END
+#define GASKET_ARRAY_END_MARKER __stringify(GASKET_ARRAY_END_TOKEN)
+
+/*
+ * Terminator struct for a gasket_sysfs_attr array. Must be at the end of
+ * all gasket_sysfs_attribute arrays.
+ */
+#define GASKET_END_OF_ATTR_ARRAY \
+ { \
+ .attr = __ATTR(GASKET_ARRAY_END_TOKEN, S_IRUGO, NULL, NULL), \
+ .data.attr_type = 0, \
+ }
+
+/*
+ * Pairing of sysfs attribute and user data.
+ * Used in lookups in sysfs "show" functions to return attribute metadata.
+ */
+struct gasket_sysfs_attribute {
+ /* The underlying sysfs device attribute associated with this data. */
+ struct device_attribute attr;
+
+ /* User-specified data to associate with the attribute. */
+ union {
+ struct bar_address_ {
+ ulong bar;
+ ulong offset;
+ } bar_address;
+ uint attr_type;
+ } data;
+
+ /*
+ * Function pointer to a callback to be invoked when this attribute is
+ * written (if so configured). The arguments are to the Gasket device
+ * pointer, the enclosing gasket_attr structure, and the value written.
+ * The callback should perform any logging necessary, as errors cannot
+ * be returned from the callback.
+ */
+ void (*write_callback)(struct gasket_dev *dev,
+ struct gasket_sysfs_attribute *attr,
+ ulong value);
+};
+
+#define GASKET_SYSFS_RO(_name, _show_function, _attr_type) \
+ { \
+ .attr = __ATTR(_name, S_IRUGO, _show_function, NULL), \
+ .data.attr_type = _attr_type \
+ }
+
+/* Initializes the Gasket sysfs subsystem.
+ *
+ * Description: Performs one-time initialization. Must be called before usage
+ * at [Gasket] module load time.
+ */
+void gasket_sysfs_init(void);
+
+/*
+ * Create an entry in mapping_data between a device and a Gasket device.
+ * @device: Device struct to map to.
+ * @gasket_dev: The dev struct associated with the driver controlling @device.
+ *
+ * Description: This function maps a gasket_dev* to a device*. This mapping can
+ * be used in sysfs_show functions to get a handle to the gasket_dev struct
+ * controlling the device node.
+ *
+ * If this function is not called before gasket_sysfs_create_entries, a warning
+ * will be logged.
+ */
+int gasket_sysfs_create_mapping(struct device *device,
+ struct gasket_dev *gasket_dev);
+
+/*
+ * Creates bulk entries in sysfs.
+ * @device: Kernel device structure.
+ * @attrs: List of attributes/sysfs entries to create.
+ *
+ * Description: Creates each sysfs entry described in "attrs". Can be called
+ * multiple times for a given @device. If the gasket_dev specified in
+ * gasket_sysfs_create_mapping had a legacy device, the entries will be created
+ * for it, as well.
+ */
+int gasket_sysfs_create_entries(struct device *device,
+ const struct gasket_sysfs_attribute *attrs);
+
+/*
+ * Removes a device mapping from the global table.
+ * @device: Device to unmap.
+ *
+ * Description: Removes the device->Gasket device mapping from the internal
+ * table.
+ */
+void gasket_sysfs_remove_mapping(struct device *device);
+
+/*
+ * User data lookup based on kernel device structure.
+ * @device: Kernel device structure.
+ *
+ * Description: Returns the user data associated with "device" in a prior call
+ * to gasket_sysfs_create_entries. Returns NULL if no mapping can be found.
+ * Upon success, this call take a reference to internal sysfs data that must be
+ * released with gasket_sysfs_put_device_data. While this reference is held, the
+ * underlying device sysfs information/structure will remain valid/will not be
+ * deleted.
+ */
+struct gasket_dev *gasket_sysfs_get_device_data(struct device *device);
+
+/*
+ * Releases a references to internal data.
+ * @device: Kernel device structure.
+ * @dev: Gasket device descriptor (returned by gasket_sysfs_get_device_data).
+ */
+void gasket_sysfs_put_device_data(struct device *device,
+ struct gasket_dev *gasket_dev);
+
+/*
+ * Gasket-specific attribute lookup.
+ * @device: Kernel device structure.
+ * @attr: Device attribute to look up.
+ *
+ * Returns the Gasket sysfs attribute associated with the kernel device
+ * attribute and device structure itself. Upon success, this call will take a
+ * reference to internal sysfs data that must be released with a call to
+ * gasket_sysfs_get_device_data. While this reference is held, the underlying
+ * device sysfs information/structure will remain valid/will not be deleted.
+ */
+struct gasket_sysfs_attribute *
+gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr);
+
+/*
+ * Releases a references to internal data.
+ * @device: Kernel device structure.
+ * @attr: Gasket sysfs attribute descriptor (returned by
+ * gasket_sysfs_get_attr).
+ */
+void gasket_sysfs_put_attr(struct device *device,
+ struct gasket_sysfs_attribute *attr);
+
+/*
+ * Write to a register sysfs node.
+ * @buf: NULL-terminated data being written.
+ * @count: number of bytes in the "buf" argument.
+ */
+ssize_t gasket_sysfs_register_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#endif /* __GASKET_SYSFS_H__ */
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index b0927e49d0a81f..6ca288bf405971 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -26,6 +26,7 @@
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <linux/goldfish.h>
MODULE_AUTHOR("Google, Inc.");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 35acb1a4669b30..db8390022732b6 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -250,7 +250,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
if (ret)
return ret;
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -288,7 +290,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
ad7280_delay(st);
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -321,7 +325,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
ad7280_delay(st);
for (i = 0; i < cnt; i++) {
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -364,7 +370,10 @@ static int ad7280_chain_setup(struct ad7280_state *st)
return ret;
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
- __ad7280_read32(st, &val);
+ ret = __ad7280_read32(st, &val);
+ if (ret)
+ return ret;
+
if (val == 0)
return n - 1;
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 3abc7789237f72..531338ea5eb482 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -90,12 +90,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad7780_state *st = iio_priv(indio_dev);
+ int voltage_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
- *val = st->int_vref_mv * st->gain;
+ voltage_uv = regulator_get_voltage(st->reg);
+ if (voltage_uv < 0)
+ return voltage_uv;
+ *val = (voltage_uv / 1000) * st->gain;
*val2 = chan->scan_type.realbits - 1;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 5b1c0db33e7f2d..b44253eb62ec34 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -86,7 +86,12 @@ static int ad2s90_probe(struct spi_device *spi)
/* need 600ns between CS and the first falling edge of SCLK */
spi->max_speed_hz = 830000;
spi->mode = SPI_MODE_3;
- spi_setup(spi);
+ ret = spi_setup(spi);
+
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index ecfe7330235031..46a24b4ead0977 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2621,8 +2621,8 @@ ksocknal_enumerate_interfaces(ksock_net_t *net)
net->ksnn_interfaces[j].ksni_ipaddr = ip;
net->ksnn_interfaces[j].ksni_netmask = mask;
- strncpy(&net->ksnn_interfaces[j].ksni_name[0],
- names[i], IFNAMSIZ);
+ strlcpy(net->ksnn_interfaces[j].ksni_name,
+ names[i], sizeof(net->ksnn_interfaces[j].ksni_name));
j++;
}
@@ -2805,8 +2805,9 @@ ksocknal_startup(lnet_ni_t *ni)
goto fail_1;
}
- strncpy(&net->ksnn_interfaces[i].ksni_name[0],
- ni->ni_interfaces[i], IFNAMSIZ);
+ strlcpy(net->ksnn_interfaces[i].ksni_name,
+ ni->ni_interfaces[i],
+ sizeof(net->ksnn_interfaces[i].ksni_name));
}
net->ksnn_ninterfaces = i;
}
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 1b3bc8386524f9..75f120da0a84c5 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -650,8 +650,8 @@ lnet_parse_route(char *str, int *im_a_router)
INIT_LIST_HEAD(&nets);
/* save a copy of the string for error messages */
- strncpy(cmd, str, sizeof(cmd) - 1);
- cmd[sizeof(cmd) - 1] = 0;
+ strncpy(cmd, str, sizeof(cmd));
+ cmd[sizeof(cmd) - 1] = '\0';
sep = str;
for (;;) {
@@ -972,11 +972,13 @@ lnet_splitnets(char *source, struct list_head *nets)
return 0;
offset += (int)(sep - tb->ltb_text);
- tb2 = lnet_new_text_buf(strlen(sep));
+ len = strlen(sep);
+ tb2 = lnet_new_text_buf(len);
if (tb2 == NULL)
return -ENOMEM;
- strcpy(tb2->ltb_text, sep);
+ strncpy(tb2->ltb_text, sep, len);
+ tb2->ltb_text[len] = '\0';
list_add_tail(&tb2->ltb_list, nets);
tb = tb2;
@@ -1021,8 +1023,8 @@ lnet_match_networks(char **networksp, char *ip2nets, __u32 *ipaddrs, int nip)
tb = list_entry(raw_entries.next, struct lnet_text_buf_t,
ltb_list);
- strncpy(source, tb->ltb_text, sizeof(source)-1);
- source[sizeof(source)-1] = 0;
+ strncpy(source, tb->ltb_text, sizeof(source));
+ source[sizeof(source)-1] = '\0';
/* replace ltb_text with the network(s) add on match */
rc = lnet_match_network_tokens(tb->ltb_text, ipaddrs, nip);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 64a0335934f3d2..1066c70434b11a 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -612,8 +612,8 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
msrq = &(*crpc)->crp_rpc->crpc_reqstmsg.msg_body.mksn_reqst;
msrq->mksn_sid = console_session.ses_id;
msrq->mksn_force = console_session.ses_force;
- strncpy(msrq->mksn_name, console_session.ses_name,
- strlen(console_session.ses_name));
+ strlcpy(msrq->mksn_name, console_session.ses_name,
+ sizeof(msrq->mksn_name));
break;
case LST_TRANS_SESEND:
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index d315dd44ae3b34..ed1bc6ac79dd9c 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1739,7 +1739,8 @@ lstcon_session_new(char *name, int key, unsigned feats,
console_session.ses_feats_updated = 0;
console_session.ses_timeout = (timeout <= 0) ?
LST_CONSOLE_TIMEOUT : timeout;
- strcpy(console_session.ses_name, name);
+ strlcpy(console_session.ses_name, name,
+ sizeof(console_session.ses_name));
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
if (rc != 0)
@@ -1959,7 +1960,8 @@ lstcon_acceptor_handle(srpc_server_rpc_t *rpc)
if (grp->grp_userland == 0)
grp->grp_userland = 1;
- strcpy(jrep->join_session, console_session.ses_name);
+ strlcpy(jrep->join_session, console_session.ses_name,
+ sizeof(jrep->join_session));
jrep->join_timeout = console_session.ses_timeout;
jrep->join_status = 0;
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 5e1ac129a681e2..7c6933ffc9c177 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -68,6 +68,7 @@
everything as string options */
#define LMD_MAGIC 0xbdacbd03
+#define LMD_PARAMS_MAXLEN 4096
/* gleaned from the mount command - no persistent info here */
struct lustre_mount_data {
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 1d1c67164418f4..170775bc7bc055 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -512,9 +512,9 @@ int libcfs_debug_init(unsigned long bufsize)
}
if (libcfs_debug_file_path != NULL) {
- strncpy(libcfs_debug_file_path_arr,
- libcfs_debug_file_path, PATH_MAX-1);
- libcfs_debug_file_path_arr[PATH_MAX - 1] = '\0';
+ strlcpy(libcfs_debug_file_path_arr,
+ libcfs_debug_file_path,
+ sizeof(libcfs_debug_file_path_arr));
}
/* If libcfs_debug_mb is set to an invalid value or uninitialized
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 03087442895212..55fc2190a5bb85 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -1062,8 +1062,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
if (hs == NULL)
return NULL;
- strncpy(hs->hs_name, name, len);
- hs->hs_name[len - 1] = '\0';
+ strlcpy(hs->hs_name, name, len);
hs->hs_flags = flags;
atomic_set(&hs->hs_refcount, 1);
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index e1143a566ac4aa..f6cc434af75662 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -360,8 +360,8 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
if (sched == NULL)
return -ENOMEM;
- strncpy(sched->ws_name, name, CFS_WS_NAME_LEN);
- sched->ws_name[CFS_WS_NAME_LEN - 1] = '\0';
+ strlcpy(sched->ws_name, name, CFS_WS_NAME_LEN);
+
sched->ws_cptab = cptab;
sched->ws_cpt = cpt;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 5c9502b5b3582a..951259a9832335 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -641,7 +641,7 @@ static int ll_send_mgc_param(struct obd_export *mgc, char *string)
if (!msp)
return -ENOMEM;
- strncpy(msp->mgs_param, string, MGS_PARAM_MAXLEN);
+ strlcpy(msp->mgs_param, string, sizeof(msp->mgs_param));
rc = obd_set_info_async(NULL, mgc, sizeof(KEY_SET_INFO), KEY_SET_INFO,
sizeof(struct mgs_send_param), msp, NULL);
if (rc)
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 635a93cc94deee..eed7603db5ec0c 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -700,7 +700,7 @@ repeat_fid2path:
memmove(ptr + strlen(gf->gf_path) + 1, ptr,
strlen(ori_gf->gf_path));
- strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
+ strcpy(ptr, gf->gf_path);
ptr += strlen(gf->gf_path);
*ptr = '/';
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index b03827ef65145b..b43ce6cd64c2aa 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -412,8 +412,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
if (!new_pool)
return -ENOMEM;
- strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
- new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
+ strlcpy(new_pool->pool_name, poolname, sizeof(new_pool->pool_name));
new_pool->pool_lobd = obd;
/* ref count init to 1 because when created a pool is always used
* up to deletion
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 48003d5325e320..7617c57d16e0c4 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -892,7 +892,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
}
lmd->lmd_magic = LMD_MAGIC;
- lmd->lmd_params = kzalloc(4096, GFP_NOFS);
+ lmd->lmd_params = kzalloc(LMD_PARAMS_MAXLEN, GFP_NOFS);
if (!lmd->lmd_params)
return -ENOMEM;
lmd->lmd_params[0] = '\0';
@@ -978,7 +978,7 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
goto invalid;
clear++;
} else if (strncmp(s1, "param=", 6) == 0) {
- int length;
+ size_t length, params_length;
char *tail = strchr(s1 + 6, ',');
if (tail == NULL)
@@ -986,8 +986,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
else
length = tail - s1;
length -= 6;
+ params_length = strlen(lmd->lmd_params);
+ if (params_length + length + 1 >= LMD_PARAMS_MAXLEN)
+ return -E2BIG;
strncat(lmd->lmd_params, s1 + 6, length);
- strcat(lmd->lmd_params, " ");
+ lmd->lmd_params[params_length + length] = '\0';
+ strlcat(lmd->lmd_params, " ", LMD_PARAMS_MAXLEN);
clear++;
} else if (strncmp(s1, "osd=", 4) == 0) {
rc = lmd_parse_string(&lmd->lmd_osd_type, s1 + 4);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index ce036a1ac4663f..ac87aa12bd7e72 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -422,6 +422,7 @@ static int ptlrpcd(void *arg)
complete(&pc->pc_starting);
/*
+
* This mainloop strongly resembles ptlrpc_set_wait() except that our
* set never completes. ptlrpcd_check() calls ptlrpc_check_set() when
* there are requests in the set. New requests come in on the set's
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index 7ff948fe142472..7a206705865b76 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -83,8 +83,7 @@ int sptlrpc_parse_flavor(const char *str, struct sptlrpc_flavor *flvr)
return 0;
}
- strncpy(buf, str, sizeof(buf));
- buf[sizeof(buf) - 1] = '\0';
+ strlcpy(buf, str, sizeof(buf));
bulk = strchr(buf, '-');
if (bulk)
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index 2a0158bb497420..5a78ef057635e1 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -22,6 +21,8 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
+#include <asm/cacheflush.h>
+
#include "iss_video.h"
#include "iss.h"
diff --git a/drivers/staging/rdma/hfi1/ud.c b/drivers/staging/rdma/hfi1/ud.c
index 5a9c784bec04c5..a88e37444be0e4 100644
--- a/drivers/staging/rdma/hfi1/ud.c
+++ b/drivers/staging/rdma/hfi1/ud.c
@@ -793,7 +793,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/staging/rdma/hfi1/user_pages.c b/drivers/staging/rdma/hfi1/user_pages.c
index 9071afbd7bf44d..b776b74d3d1465 100644
--- a/drivers/staging/rdma/hfi1/user_pages.c
+++ b/drivers/staging/rdma/hfi1/user_pages.c
@@ -85,7 +85,7 @@ static int __hfi1_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
index d29b4daf61f834..f69ec728e0de8f 100644
--- a/drivers/staging/rdma/ipath/ipath_user_pages.c
+++ b/drivers/staging/rdma/ipath/ipath_user_pages.c
@@ -72,7 +72,7 @@ static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(current, current->mm,
start_page + got * PAGE_SIZE,
- num_pages - got, 1, 1,
+ num_pages - got, FOLL_WRITE | FOLL_FORCE,
p + got, NULL);
if (ret < 0)
goto bail_release;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index c2d2c17550a7c1..951f22265105b7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 12a3893b98fd4f..ade29c4295b770 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -536,7 +536,7 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
if (sendbytes > 8) {
memcpy(buf, inquiry_buf, 8);
- memcpy(buf + 8, inquiry_string, sendbytes - 8);
+ strncpy(buf + 8, inquiry_string, sendbytes - 8);
if (pro_formatter_flag) {
/* Additional Length */
buf[4] = 0x33;
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index d6c498209b2c6a..dc91cd29f1d2ab 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4165,12 +4165,6 @@ RTY_SEND_CMD:
rtsx_trace(chip);
return STATUS_FAIL;
}
-
- } else if (rsp_type == SD_RSP_TYPE_R0) {
- if ((ptr[3] & 0x1E) != 0x03) {
- rtsx_trace(chip);
- return STATUS_FAIL;
- }
}
}
}
@@ -5031,7 +5025,7 @@ int sd_execute_write_data(struct scsi_cmnd *srb, struct rtsx_chip *chip)
goto SD_Execute_Write_Cmd_Failed;
}
- rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
goto SD_Execute_Write_Cmd_Failed;
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index 10fea7bb8f3088..3db4a2570b1946 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -1252,7 +1252,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
reg = 0;
rtsx_read_register(chip, XD_CTL, &reg);
if (reg & (XD_ECC1_ERROR | XD_ECC2_ERROR)) {
- wait_timeout(100);
+ mdelay(100);
if (detect_card_cd(chip,
XD_CARD) != STATUS_SUCCESS) {
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 06ef26872462e5..52aed7cfeb24bc 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -387,7 +387,7 @@ static ssize_t synth_store(struct kobject *kobj, struct kobj_attribute *attr,
len = strlen(buf);
if (len < 2 || len > 9)
return -EINVAL;
- strncpy(new_synth_name, buf, len);
+ memcpy(new_synth_name, buf, len);
if (new_synth_name[len - 1] == '\n')
len--;
new_synth_name[len] = '\0';
@@ -514,7 +514,7 @@ static ssize_t punc_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
}
- strncpy(punc_buf, buf, x);
+ memcpy(punc_buf, buf, x);
while (x && punc_buf[x - 1] == '\n')
x--;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 8fd8f3a2d1bf58..58b6403458b79d 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -972,8 +972,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
return;
}
- MACvIntDisable(priv->PortOffset);
-
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
@@ -1062,8 +1060,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
}
spin_unlock_irqrestore(&priv->lock, flags);
-
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static void vnt_interrupt_work(struct work_struct *work)
@@ -1073,14 +1069,17 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
+
+ MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
{
struct vnt_private *priv = arg;
- if (priv->vif)
- schedule_work(&priv->interrupt_work);
+ schedule_work(&priv->interrupt_work);
+
+ MACvIntDisable(priv->PortOffset);
return IRQ_HANDLED;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 58fe27705b96c8..cbb4414edd71b4 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4232,9 +4232,9 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
struct se_cmd *se_cmd = &cmd->se_cmd;
if (se_cmd->se_tfo != NULL) {
- spin_lock(&se_cmd->t_state_lock);
+ spin_lock_irq(&se_cmd->t_state_lock);
se_cmd->transport_state |= CMD_T_FABRIC_STOP;
- spin_unlock(&se_cmd->t_state_lock);
+ spin_unlock_irq(&se_cmd->t_state_lock);
}
}
spin_unlock_bh(&conn->cmd_lock);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 47e249dccb5fe7..3184e023a05298 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -59,7 +38,7 @@ static void chap_gen_challenge(
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
@@ -241,9 +220,16 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
@@ -292,7 +278,7 @@ static int chap_server_compute_md5(
}
crypto_free_hash(tfm);
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -348,9 +334,7 @@ static int chap_server_compute_md5(
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
@@ -359,6 +343,11 @@ static int chap_server_compute_md5(
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
@@ -433,7 +422,7 @@ static int chap_server_compute_md5(
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bc2cbffec27ec9..63e54beed196ba 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -323,8 +323,7 @@ static int iscsi_login_zero_tsih_s1(
pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess);
- return -ENOMEM;
+ goto free_sess;
}
sess->creation_time = get_jiffies_64();
@@ -340,20 +339,28 @@ static int iscsi_login_zero_tsih_s1(
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- kfree(sess);
- return -ENOMEM;
+ goto remove_idr;
}
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- kfree(sess->sess_ops);
- kfree(sess);
- return -ENOMEM;
+ goto free_ops;
}
return 0;
+
+free_ops:
+ kfree(sess->sess_ops);
+remove_idr:
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+free_sess:
+ kfree(sess);
+ conn->sess = NULL;
+ return -ENOMEM;
}
static int iscsi_login_zero_tsih_s2(
@@ -1142,13 +1149,13 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
ISCSI_LOGIN_STATUS_INIT_ERR);
if (!zero_tsih || !conn->sess)
goto old_sess_out;
- if (conn->sess->se_sess)
- transport_free_session(conn->sess->se_sess);
- if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
- idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
- }
+
+ transport_free_session(conn->sess->se_sess);
+
+ spin_lock_bh(&sess_idr_lock);
+ idr_remove(&sess_idr, conn->sess->session_index);
+ spin_unlock_bh(&sess_idr_lock);
+
kfree(conn->sess->sess_ops);
kfree(conn->sess);
conn->sess = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 63e1dcc5914d05..761b065a40bb38 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -637,8 +637,7 @@ int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 9413e1a949e5bf..5af4d6a03d6ef9 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -108,12 +108,17 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[7] = 0x2; /* CmdQue=1 */
- memcpy(&buf[8], "LIO-ORG ", 8);
- memset(&buf[16], 0x20, 16);
+ /*
+ * ASCII data fields described as being left-aligned shall have any
+ * unused bytes at the end of the field (i.e., highest offset) and the
+ * unused bytes shall be filled with ASCII space characters (20h).
+ */
+ memset(&buf[8], 0x20, 8 + 16 + 4);
+ memcpy(&buf[8], "LIO-ORG", sizeof("LIO-ORG") - 1);
memcpy(&buf[16], dev->t10_wwn.model,
- min_t(size_t, strlen(dev->t10_wwn.model), 16));
+ strnlen(dev->t10_wwn.model, 16));
memcpy(&buf[32], dev->t10_wwn.revision,
- min_t(size_t, strlen(dev->t10_wwn.revision), 4));
+ strnlen(dev->t10_wwn.revision, 4));
buf[4] = 31; /* Set additional length to 31 */
return 0;
@@ -251,7 +256,9 @@ check_t10_vend_desc:
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy(&buf[off+4], "LIO-ORG", 8);
+ /* left align Vendor ID and pad with spaces */
+ memset(&buf[off+4], 0x20, 8);
+ memcpy(&buf[off+4], "LIO-ORG", sizeof("LIO-ORG") - 1);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 21f888ac550ea2..7199bac6733350 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -306,6 +306,7 @@ void __transport_register_session(
{
const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
unsigned char buf[PR_REG_ISID_LEN];
+ unsigned long flags;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr;
@@ -342,7 +343,7 @@ void __transport_register_session(
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
- spin_lock_irq(&se_nacl->nacl_sess_lock);
+ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
/*
* The se_nacl->nacl_sess pointer will be set to the
* last active I_T Nexus for each struct se_node_acl.
@@ -351,7 +352,7 @@ void __transport_register_session(
list_add_tail(&se_sess->sess_acl_list,
&se_nacl->acl_sess_list);
- spin_unlock_irq(&se_nacl->nacl_sess_lock);
+ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 3be9519654e510..cf3fad2cb87140 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -2,7 +2,7 @@
* TURBOchannel bus services.
*
* Copyright (c) Harald Koerfgen, 1998
- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
+ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
@@ -10,6 +10,7 @@
* directory of this archive for more details.
*/
#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tdev->dev.bus = &tc_bus_type;
tdev->slot = slot;
+ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
+ tdev->dma_mask = DMA_BIT_MASK(34);
+ tdev->dev.dma_mask = &tdev->dma_mask;
+ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
+
for (i = 0; i < 8; i++) {
tdev->firmware[i] =
readb(module + offset + TC_FIRM_VER + 4 * i);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index cf0fc46a92708a..425a6ea6364907 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@ config IMX_THERMAL
passive trip is crossed.
config SPEAR_THERMAL
- bool "SPEAr thermal sensor driver"
+ tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
framework.
config DB8500_THERMAL
- bool "DB8500 thermal management"
- depends on ARCH_U8500
+ tristate "DB8500 thermal management"
+ depends on MFD_DB8500_PRCMU
default y
help
Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index ff3b36f339e34f..06d46e2ff337e9 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -416,7 +416,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
proc_priv->soc_dts = intel_soc_dts_iosf_init(
INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
- if (proc_priv->soc_dts && pdev->irq) {
+ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
ret = pci_enable_msi(pdev);
if (!ret) {
ret = request_threaded_irq(pdev->irq, NULL,
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4c30c696875099..887d946da8e32a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -286,10 +286,13 @@ static int of_thermal_set_mode(struct thermal_zone_device *tz,
mutex_lock(&tz->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED) {
tz->polling_delay = data->polling_delay;
- else
+ tz->passive_delay = data->passive_delay;
+ } else {
tz->polling_delay = 0;
+ tz->passive_delay = 0;
+ }
mutex_unlock(&tz->lock);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 52a95d8e20bd1a..fbf6281ea27ac3 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -585,6 +585,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev)
threshold_code = temp_to_code(data, temp);
rising_threshold = readl(data->base + rising_reg_offset);
+ rising_threshold &= ~(0xff << j * 8);
rising_threshold |= (threshold_code << j * 8);
writel(rising_threshold, data->base + rising_reg_offset);
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index c798fdb2ae4368..f97f76691bd0eb 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -34,13 +34,13 @@
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
#else
-static int
+static inline int
thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
{
return 0;
}
-static void
+static inline void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
}
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 47b54c6aefd268..9f660e55d1baa6 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -323,7 +323,6 @@ static void udbg_init_opal_common(void)
udbg_putc = udbg_opal_putc;
udbg_getc = udbg_opal_getc;
udbg_getc_poll = udbg_opal_getc_poll;
- tb_ticks_per_usec = 0x200; /* Make udelay not suck */
}
void __init hvc_opal_init_early(void)
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 6d1e2f746ab482..8d6253903f24f8 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
/* too large for caller's buffer */
ret = -EOVERFLOW;
} else {
+ __set_current_state(TASK_RUNNING);
if (copy_to_user(buf, rbuf->buf, rbuf->count))
ret = -EFAULT;
else
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 190e5dc15738b6..b74de014cef1da 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -128,6 +128,8 @@ struct n_tty_data {
struct mutex output_lock;
};
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
@@ -145,6 +147,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
{
+ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
}
@@ -162,15 +165,29 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
+/* If we are not echoing the data, perhaps this is a secret so erase it */
+static inline void zero_buffer(struct tty_struct *tty, u8 *buffer, int size)
+{
+ bool icanon = !!L_ICANON(tty);
+ bool no_echo = !L_ECHO(tty);
+
+ if (icanon && no_echo)
+ memset(buffer, 0x00, size);
+}
+
static inline int tty_copy_to_user(struct tty_struct *tty,
void __user *to,
- const void *from,
+ void *from,
unsigned long n)
{
struct n_tty_data *ldata = tty->disc_data;
+ int retval;
tty_audit_add_data(tty, from, n, ldata->icanon);
- return copy_to_user(to, from, n);
+ retval = copy_to_user(to, from, n);
+ if (!retval)
+ zero_buffer(tty, from, n);
+ return retval;
}
/**
@@ -322,9 +339,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
ldata->commit_head = 0;
- ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
@@ -645,13 +660,20 @@ static size_t __process_echoes(struct tty_struct *tty)
old_space = space = tty_write_room(tty);
tail = ldata->echo_tail;
- while (ldata->echo_commit != tail) {
+ while (MASK(ldata->echo_commit) != MASK(tail)) {
c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
unsigned char op;
int no_space_left = 0;
/*
+ * Since add_echo_byte() is called without holding
+ * output_lock, we might see only portion of multi-byte
+ * operation.
+ */
+ if (MASK(ldata->echo_commit) == MASK(tail + 1))
+ goto not_yet_stored;
+ /*
* If the buffer byte is the start of a multi-byte
* operation, get the next byte, which is either the
* op code or a control character value.
@@ -662,6 +684,8 @@ static size_t __process_echoes(struct tty_struct *tty)
unsigned int num_chars, num_bs;
case ECHO_OP_ERASE_TAB:
+ if (MASK(ldata->echo_commit) == MASK(tail + 2))
+ goto not_yet_stored;
num_chars = echo_buf(ldata, tail + 2);
/*
@@ -756,7 +780,8 @@ static size_t __process_echoes(struct tty_struct *tty)
/* If the echo buffer is nearly full (so that the possibility exists
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+ while (ldata->echo_commit > tail &&
+ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
tail += 3;
@@ -766,6 +791,7 @@ static size_t __process_echoes(struct tty_struct *tty)
tail++;
}
+ not_yet_stored:
ldata->echo_tail = tail;
return old_space - space;
}
@@ -776,6 +802,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t nr, old, echoed;
size_t head;
+ mutex_lock(&ldata->output_lock);
head = ldata->echo_head;
ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
@@ -784,10 +811,12 @@ static void commit_echoes(struct tty_struct *tty)
* is over the threshold (and try again each time another
* block is accumulated) */
nr = head - ldata->echo_tail;
- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+ if (nr < ECHO_COMMIT_WATERMARK ||
+ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+ mutex_unlock(&ldata->output_lock);
return;
+ }
- mutex_lock(&ldata->output_lock);
ldata->echo_commit = head;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -838,7 +867,9 @@ static void flush_echoes(struct tty_struct *tty)
static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
- *echo_buf_addr(ldata, ldata->echo_head++) = c;
+ *echo_buf_addr(ldata, ldata->echo_head) = c;
+ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+ ldata->echo_head++;
}
/**
@@ -1006,14 +1037,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
}
seen_alnums = 0;
- while (ldata->read_head != ldata->canon_head) {
+ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
head = ldata->read_head;
/* erase a single possibly multibyte character */
do {
head--;
c = read_buf(ldata, head);
- } while (is_continuation(c, tty) && head != ldata->canon_head);
+ } while (is_continuation(c, tty) &&
+ MASK(head) != MASK(ldata->canon_head));
/* do not partially erase */
if (is_continuation(c, tty))
@@ -1055,7 +1087,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* This info is used to go back the correct
* number of columns.
*/
- while (tail != ldata->canon_head) {
+ while (MASK(tail) != MASK(ldata->canon_head)) {
tail--;
c = read_buf(ldata, tail);
if (c == '\t') {
@@ -1332,7 +1364,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
finish_erasing(ldata);
echo_char(c, tty);
echo_char_raw('\n', ldata);
- while (tail != ldata->read_head) {
+ while (MASK(tail) != MASK(ldata->read_head)) {
echo_char(read_buf(ldata, tail), tty);
tail++;
}
@@ -1917,31 +1949,22 @@ static int n_tty_open(struct tty_struct *tty)
struct n_tty_data *ldata;
/* Currently a malloc failure here can panic */
- ldata = vmalloc(sizeof(*ldata));
+ ldata = vzalloc(sizeof(*ldata));
if (!ldata)
- goto err;
+ return -ENOMEM;
ldata->overrun_time = jiffies;
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
tty->disc_data = ldata;
- reset_buffer_flags(tty->disc_data);
- ldata->column = 0;
- ldata->canon_column = 0;
ldata->minimum_to_wake = 1;
- ldata->num_overrun = 0;
- ldata->no_room = 0;
- ldata->lnext = 0;
tty->closing = 0;
/* indicate buffer work may resume */
clear_bit(TTY_LDISC_HALTED, &tty->flags);
n_tty_set_termios(tty, NULL);
tty_unthrottle(tty);
-
return 0;
-err:
- return -ENOMEM;
}
static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -1996,6 +2019,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty);
tty_audit_add_data(tty, read_buf_addr(ldata, tail), n,
ldata->icanon);
+ zero_buffer(tty, read_buf_addr(ldata, tail), n);
smp_store_release(&ldata->read_tail, ldata->read_tail + n);
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
@@ -2479,7 +2503,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
tail = ldata->read_tail;
nr = head - tail;
/* Skip EOF-chars.. */
- while (head != tail) {
+ while (MASK(head) != MASK(tail)) {
if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 96aa0ad3249751..d9c54cf61bd722 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -106,16 +106,19 @@ static void pty_unthrottle(struct tty_struct *tty)
static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
{
struct tty_struct *to = tty->link;
+ unsigned long flags;
if (tty->stopped)
return 0;
if (c > 0) {
+ spin_lock_irqsave(&to->port->lock, flags);
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to->port, buf, c);
/* And shovel */
if (c)
tty_flip_buffer_push(to->port);
+ spin_unlock_irqrestore(&to->port->lock, flags);
}
return c;
}
@@ -813,7 +816,7 @@ out_free_file:
return retval;
}
-static struct file_operations ptmx_fops;
+static struct file_operations ptmx_fops __ro_after_init;
static void __init unix98_pty_init(void)
{
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 802eac7e561b85..2b8f2e0a4224b9 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -1915,7 +1915,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
ByteIO_t UPCIRingInd = 0;
if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
- pci_enable_device(dev))
+ pci_enable_device(dev) || i >= NUM_BOARDS)
return 0;
rcktpt_io_addr[i] = pci_resource_start(dev, 0);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 79339041d8b160..10ecbe7638a3aa 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -249,7 +249,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios,
unsigned int rate;
int ret;
- if (IS_ERR(d->clk) || !old)
+ if (IS_ERR(d->clk))
goto out;
clk_disable_unprepare(d->clk);
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 78883ca64dddeb..035810bbcd02ce 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -231,17 +231,17 @@ static int mtk8250_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
- pm_runtime_enable(&pdev->dev);
- if (!pm_runtime_enabled(&pdev->dev)) {
- err = mtk8250_runtime_resume(&pdev->dev);
- if (err)
- return err;
- }
+ err = mtk8250_runtime_resume(&pdev->dev);
+ if (err)
+ return err;
data->line = serial8250_register_8250_port(&uart);
if (data->line < 0)
return data->line;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
return 0;
}
@@ -252,13 +252,11 @@ static int mtk8250_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
serial8250_unregister_port(data->line);
+ mtk8250_runtime_suspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
- if (!pm_runtime_status_suspended(&pdev->dev))
- mtk8250_runtime_suspend(&pdev->dev);
-
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 746c76b358a008..b032add9272244 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -2326,6 +2326,111 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_ACCESIO,
+ .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_pericom_setup,
+ },
/*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
@@ -5176,10 +5281,10 @@ static struct pci_device_id serial_pci_tbl[] = {
*/
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -5188,10 +5293,10 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -5200,10 +5305,10 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -5212,13 +5317,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7951 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -5227,16 +5332,16 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7954 },
@@ -5245,13 +5350,13 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
+ pbn_pericom_PI7C9X7952 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
@@ -5260,19 +5365,19 @@ static struct pci_device_id serial_pci_tbl[] = {
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_pericom_PI7C9X7958 },
{ PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
+ pbn_pericom_PI7C9X7954 },
/*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 4d180c9423effe..1a14948c86d6eb 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -629,8 +629,10 @@ static int serial_config(struct pcmcia_device * link)
(link->has_func_id) &&
(link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
- (link->func_id == CISTPL_FUNCID_SERIAL)))
- pcmcia_loop_config(link, serial_check_for_multi, info);
+ (link->func_id == CISTPL_FUNCID_SERIAL))) {
+ if (pcmcia_loop_config(link, serial_check_for_multi, info))
+ goto failed;
+ }
/*
* Apply any multi-port quirk.
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index e0277cf0bf5848..f5c4e92b51724d 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1167,6 +1167,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
sg_dma_len(&atmel_port->sg_rx)/2,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(port->dev, "Preparing DMA cyclic failed\n");
+ goto chan_err;
+ }
desc->callback = atmel_complete_rx_dma;
desc->callback_param = port;
atmel_port->desc_rx = desc;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d3e3d42c0c1290..0040c29f651aa4 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1068,8 +1068,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
@@ -1104,7 +1104,11 @@ static int cpm_get_poll_char(struct uart_port *port)
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 01e2274b23f2e1..1544a7cc76ff82 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1267,6 +1267,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
else
cr1 &= ~UARTCR1_PT;
}
+ } else {
+ cr1 &= ~UARTCR1_PE;
}
/* ask the core to calculate the divisor */
@@ -1402,10 +1404,12 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
else
ctrl &= ~UARTCTRL_PT;
}
+ } else {
+ ctrl &= ~UARTCTRL_PE;
}
/* ask the core to calculate the divisor */
- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
spin_lock_irqsave(&sport->port.lock, flags);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 07ede982b4729b..f5f46c121ee392 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1997,6 +1997,14 @@ static int serial_imx_probe(struct platform_device *pdev)
dev_name(&pdev->dev), sport);
if (ret)
return ret;
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 7d524603a55d3c..f0bcc55e1c28f3 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -141,19 +141,6 @@ static void kgdboc_unregister_kbd(void)
#define kgdboc_restore_input()
#endif /* ! CONFIG_KDB_KEYBOARD */
-static int kgdboc_option_setup(char *opt)
-{
- if (strlen(opt) >= MAX_CONFIG_LEN) {
- printk(KERN_ERR "kgdboc: config string too long\n");
- return -ENOSPC;
- }
- strcpy(config, opt);
-
- return 0;
-}
-
-__setup("kgdboc=", kgdboc_option_setup);
-
static void cleanup_kgdboc(void)
{
if (kgdb_unregister_nmi_console())
@@ -167,15 +154,15 @@ static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
- int err;
+ int err = -ENODEV;
char *cptr = config;
struct console *cons;
- err = kgdboc_option_setup(config);
- if (err || !strlen(config) || isspace(config[0]))
+ if (!strlen(config) || isspace(config[0])) {
+ err = 0;
goto noconfig;
+ }
- err = -ENODEV;
kgdboc_io_ops.is_console = 0;
kgdb_tty_driver = NULL;
@@ -257,7 +244,7 @@ static void kgdboc_put_char(u8 chr)
static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
{
- int len = strlen(kmessage);
+ size_t len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdboc: config string too long\n");
@@ -279,7 +266,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
- if (config[len - 1] == '\n')
+ if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
if (configured == 1)
@@ -323,6 +310,25 @@ static struct kgdb_io kgdboc_io_ops = {
};
#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+static int kgdboc_option_setup(char *opt)
+{
+ if (!opt) {
+ pr_err("config string not provided\n");
+ return -EINVAL;
+ }
+
+ if (strlen(opt) >= MAX_CONFIG_LEN) {
+ pr_err("config string too long\n");
+ return -ENOSPC;
+ }
+ strcpy(config, opt);
+
+ return 0;
+}
+
+__setup("kgdboc=", kgdboc_option_setup);
+
+
/* This is only available if kgdboc is a built in for early debugging */
static int __init kgdboc_early_init(char *opt)
{
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 3f98165b479c5e..ab516a90c89fa1 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1306,6 +1306,8 @@ static int max310x_spi_probe(struct spi_device *spi)
if (spi->dev.of_node) {
const struct of_device_id *of_id =
of_match_device(max310x_dt_ids, &spi->dev);
+ if (!of_id)
+ return -ENODEV;
devtype = (struct max310x_devtype *)of_id->data;
} else {
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 7aa584bbf48183..1922d87ea30d4a 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -860,15 +860,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
dma->rx_conf.direction = DMA_DEV_TO_MEM;
dma->rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->rx_conf.src_addr = p->port.mapbase + S3C2410_URXH;
- dma->rx_conf.src_maxburst = 16;
+ dma->rx_conf.src_maxburst = 1;
dma->tx_conf.direction = DMA_MEM_TO_DEV;
dma->tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
dma->tx_conf.dst_addr = p->port.mapbase + S3C2410_UTXH;
- if (dma_get_cache_alignment() >= 16)
- dma->tx_conf.dst_maxburst = 16;
- else
- dma->tx_conf.dst_maxburst = 1;
+ dma->tx_conf.dst_maxburst = 1;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -1332,11 +1329,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
+ port->status &= ~UPSTAT_AUTOCTS;
+
umcon = rd_regl(port, S3C2410_UMCON);
if (termios->c_cflag & CRTSCTS) {
umcon |= S3C2410_UMCOM_AFC;
/* Disable RTS when RX FIFO contains 63 bytes */
umcon &= ~S3C2412_UMCON_AFC_8;
+ port->status = UPSTAT_AUTOCTS;
} else {
umcon &= ~S3C2410_UMCOM_AFC;
}
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index ac51f0d98ba889..350c6b729c43b4 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -648,7 +648,7 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
+static bool sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
{
struct uart_port *port = &s->p[portno].port;
@@ -657,7 +657,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
iir = sc16is7xx_port_read(port, SC16IS7XX_IIR_REG);
if (iir & SC16IS7XX_IIR_NO_INT_BIT)
- break;
+ return false;
iir &= SC16IS7XX_IIR_ID_MASK;
@@ -685,16 +685,23 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
port->line, iir);
break;
}
- } while (1);
+ } while (0);
+ return true;
}
static void sc16is7xx_ist(struct kthread_work *ws)
{
struct sc16is7xx_port *s = to_sc16is7xx_port(ws, irq_work);
- int i;
- for (i = 0; i < s->devtype->nr_uart; ++i)
- sc16is7xx_port_irq(s, i);
+ while (1) {
+ bool keep_polling = false;
+ int i;
+
+ for (i = 0; i < s->devtype->nr_uart; ++i)
+ keep_polling |= sc16is7xx_port_irq(s, i);
+ if (!keep_polling)
+ break;
+ }
}
static irqreturn_t sc16is7xx_irq(int irq, void *dev_id)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 8dd822feb972ed..669134e27ed96b 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -746,19 +746,9 @@ static void sci_transmit_chars(struct uart_port *port)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit))
sci_stop_tx(port);
- } else {
- ctrl = serial_port_in(port, SCSCR);
-
- if (port->type != PORT_SCI) {
- serial_port_in(port, SCxSR); /* Dummy read */
- sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
- }
- ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
- }
}
/* On SH3, SCIF may read end-of-break as a space->mark char */
@@ -2419,13 +2409,12 @@ static void serial_console_write(struct console *co, const char *s,
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
if (port->sysrq)
locked = 0;
else if (oops_in_progress)
- locked = spin_trylock(&port->lock);
+ locked = spin_trylock_irqsave(&port->lock, flags);
else
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
/* first save the SCSCR then disable the interrupts */
ctrl = serial_port_in(port, SCSCR);
@@ -2442,8 +2431,7 @@ static void serial_console_write(struct console *co, const char *s,
serial_port_out(port, SCSCR, ctrl);
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static int serial_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 9ae182a5478431..ba9374ee66659e 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -36,7 +36,7 @@
#define SPRD_FIFO_SIZE 128
#define SPRD_DEF_RATE 26000000
#define SPRD_BAUD_IO_LIMIT 3000000
-#define SPRD_TIMEOUT 256
+#define SPRD_TIMEOUT 256000
/* the offset of serial registers and BITs for them */
/* data registers */
@@ -63,6 +63,7 @@
/* interrupt clear register */
#define SPRD_ICLR 0x0014
+#define SPRD_ICLR_TIMEOUT BIT(13)
/* line control register */
#define SPRD_LCR 0x0018
@@ -298,7 +299,8 @@ static irqreturn_t sprd_handle_irq(int irq, void *dev_id)
return IRQ_NONE;
}
- serial_out(port, SPRD_ICLR, ~0);
+ if (ims & SPRD_IMSR_TIMEOUT)
+ serial_out(port, SPRD_ICLR, SPRD_ICLR_TIMEOUT);
if (ims & (SPRD_IMSR_RX_FIFO_FULL |
SPRD_IMSR_BREAK_DETECT | SPRD_IMSR_TIMEOUT))
@@ -729,8 +731,8 @@ static int sprd_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "not provide irq resource\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "not provide irq resource: %d\n", irq);
+ return irq;
}
up->irq = irq;
diff --git a/drivers/tty/serial/suncore.c b/drivers/tty/serial/suncore.c
index 127472bd6a7cfb..209f314745ab45 100644
--- a/drivers/tty/serial/suncore.c
+++ b/drivers/tty/serial/suncore.c
@@ -111,6 +111,7 @@ void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
mode = of_get_property(dp, mode_prop, NULL);
if (!mode)
mode = "9600,8,n,1,-";
+ of_node_put(dp);
}
cflag = CREAD | HUPCL | CLOCAL;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 8f3566cde3eb0d..355e9cad680d60 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -454,6 +454,8 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
if (count && disc->ops->receive_buf)
disc->ops->receive_buf(tty, p, f, count);
}
+ if (count > 0)
+ memset(p, 0, count);
return count;
}
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 198451fa9e5d1a..5b86ebc76a8a23 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -357,7 +357,7 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
mutex_lock(&tty_mutex);
/* Search through the tty devices to look for a match */
list_for_each_entry(p, &tty_drivers, tty_drivers) {
- if (strncmp(name, p->name, len) != 0)
+ if (!len || strncmp(name, p->name, len) != 0)
continue;
stp = str;
if (*stp == ',')
@@ -2297,7 +2297,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
return -EFAULT;
tty_audit_tiocsti(tty, ch);
ld = tty_ldisc_ref_wait(tty);
- ld->ops->receive_buf(tty, &ch, &mbz, 1);
+ if (ld->ops->receive_buf)
+ ld->ops->receive_buf(tty, &ch, &mbz, 1);
tty_ldisc_deref(ld);
return 0;
}
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 1445dd39aa6227..bece7e39f51234 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -330,7 +330,7 @@ speed_t tty_termios_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
@@ -366,7 +366,7 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios)
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
#else
return tty_termios_baud_rate(termios);
#endif
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index ad7eba5ca380fa..34234c2338511e 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -307,6 +307,16 @@ down_write_failed(struct ld_semaphore *sem, long count, long timeout)
if (!locked)
ldsem_atomic_update(-LDSEM_WAIT_BIAS, sem);
list_del(&waiter.list);
+
+ /*
+ * In case of timeout, wake up every reader who gave the right of way
+ * to writer. Prevent separation readers into two groups:
+ * one that helds semaphore and another that sleeps.
+ * (in case of no contention with a writer)
+ */
+ if (!locked && list_empty(&sem->write_wait))
+ __ldsem_wake_readers(sem);
+
raw_spin_unlock_irq(&sem->wait_lock);
__set_task_state(tsk, TASK_RUNNING);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ff3286fc22d829..6779f733bb83a0 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -958,6 +958,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (CON_IS_VISIBLE(vc))
update_screen(vc);
vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
+ notify_update(vc);
return err;
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 97d5a74558a3a4..a86bc7afb3b289 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -31,6 +31,8 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
@@ -703,6 +705,8 @@ int vt_ioctl(struct tty_struct *tty,
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index bcc1fc02731133..50fe1f76ea4258 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -249,6 +249,8 @@ static struct class uio_class = {
.dev_groups = uio_groups,
};
+bool uio_class_registered;
+
/*
* device functions
*/
@@ -772,6 +774,9 @@ static int init_uio_class(void)
printk(KERN_ERR "class_register failed for uio\n");
goto err_class_register;
}
+
+ uio_class_registered = true;
+
return 0;
err_class_register:
@@ -782,6 +787,7 @@ exit:
static void release_uio_class(void)
{
+ uio_class_registered = false;
class_unregister(&uio_class);
uio_major_cleanup();
}
@@ -801,6 +807,9 @@ int __uio_register_device(struct module *owner,
struct uio_device *idev;
int ret = 0;
+ if (!uio_class_registered)
+ return -EPROBE_DEFER;
+
if (!parent || !info || !info->name || !info->version)
return -EINVAL;
@@ -846,8 +855,10 @@ int __uio_register_device(struct module *owner,
*/
ret = request_irq(info->irq, uio_interrupt,
info->irq_flags, info->name, idev);
- if (ret)
+ if (ret) {
+ info->uio_dev = NULL;
goto err_request_irq;
+ }
}
return 0;
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 9ecb598e48f041..a5557c70034a3f 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -20,7 +20,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
static inline void ci_otg_queue_work(struct ci_hdrc *ci)
{
disable_irq_nosync(ci->irq);
- queue_work(ci->wq, &ci->work);
+ if (queue_work(ci->wq, &ci->work) == false)
+ enable_irq(ci->irq);
}
#endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index edd8ef4ee502d8..736de1021d8ba1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -332,17 +332,17 @@ static void acm_ctrl_irq(struct urb *urb)
if (difference & ACM_CTRL_DSR)
acm->iocount.dsr++;
- if (difference & ACM_CTRL_BRK)
- acm->iocount.brk++;
- if (difference & ACM_CTRL_RI)
- acm->iocount.rng++;
if (difference & ACM_CTRL_DCD)
acm->iocount.dcd++;
- if (difference & ACM_CTRL_FRAMING)
+ if (newctrl & ACM_CTRL_BRK)
+ acm->iocount.brk++;
+ if (newctrl & ACM_CTRL_RI)
+ acm->iocount.rng++;
+ if (newctrl & ACM_CTRL_FRAMING)
acm->iocount.frame++;
- if (difference & ACM_CTRL_PARITY)
+ if (newctrl & ACM_CTRL_PARITY)
acm->iocount.parity++;
- if (difference & ACM_CTRL_OVERRUN)
+ if (newctrl & ACM_CTRL_OVERRUN)
acm->iocount.overrun++;
spin_unlock(&acm->read_lock);
@@ -507,6 +507,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
if (retval)
goto error_init_termios;
+ /*
+ * Suppress initial echoing for some devices which might send data
+ * immediately after acm driver has been installed.
+ */
+ if (acm->quirks & DISABLE_ECHO)
+ tty->termios.c_lflag &= ~ECHO;
+
tty->driver_data = acm;
return 0;
@@ -1677,6 +1684,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
+ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+ },
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1698,6 +1708,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
+ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1713,6 +1726,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
.driver_info = QUIRK_CONTROL_LINE_STATE, },
{ USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
@@ -1768,6 +1784,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
@@ -1866,6 +1885,13 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+ { USB_DEVICE(0x1bc7, 0x0023), /* Telit 3G ACM + ECM composition */
+ .driver_info = SEND_ZERO_PACKET,
+ },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index b30ac5fcde6875..1ad9ff9f493d29 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -134,3 +134,4 @@ struct acm {
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
#define SEND_ZERO_PACKET BIT(8)
+#define DISABLE_ECHO BIT(9)
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ff3c4ec5acea1..63741274188aaa 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1325,10 +1325,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
@@ -1391,6 +1394,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ if (is_in)
+ allow_short = true;
snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
@@ -1402,6 +1407,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
@@ -1422,6 +1431,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1548,16 +1561,21 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
u |= URB_NO_FSBR;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e2f891ce6496aa..3affe8cc49c04a 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -510,7 +510,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -531,16 +530,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
@@ -559,9 +548,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
@@ -1460,6 +1460,7 @@ static void choose_wakeup(struct usb_device *udev, pm_message_t msg)
int usb_suspend(struct device *dev, pm_message_t msg)
{
struct usb_device *udev = to_usb_device(dev);
+ int r;
unbind_no_pm_drivers_interfaces(udev);
@@ -1468,7 +1469,14 @@ int usb_suspend(struct device *dev, pm_message_t msg)
* so we may still need to unbind and rebind upon resume
*/
choose_wakeup(udev, msg);
- return usb_suspend_both(udev, msg);
+ r = usb_suspend_both(udev, msg);
+ if (r)
+ return r;
+
+ if (udev->quirks & USB_QUIRK_DISCONNECT_SUSPEND)
+ usb_port_disable(udev);
+
+ return 0;
}
/* The device lock is held by the PM core */
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index a5240b4d7ab9a9..a5cc9f19120abb 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -19,6 +19,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/gpio/consumer.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -225,7 +226,7 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
static int generic_resume(struct usb_device *udev, pm_message_t msg)
{
int rc;
-
+ struct gpio_desc *reset_gpio;
/* Normal USB devices resume/reset through their upstream port.
* Root hubs don't have upstream ports to resume or reset,
* so we have to start up their downstream HC-to-USB
@@ -235,6 +236,25 @@ static int generic_resume(struct usb_device *udev, pm_message_t msg)
rc = hcd_bus_resume(udev, msg);
else
rc = usb_port_resume(udev, msg);
+
+ /* During system suspend-resume flow, it's observed that sometimes
+ * USB protocol tranascation error's with BT controller. For Ex:-
+ * USB GET STATUS transaction fails with error -110. When error -110
+ * (-ETIMEDOUT) is encountered during resume and device is an Intel
+ * Bluetooth device(VendorId=0x8087,DeviceClass=0xe0), yank it off the
+ * USB bus by toggling the W_DISABLE#2 pin attached to the platform in
+ * order to recover the device.
+ */
+ if (rc < 0 && udev->descriptor.idVendor == 0x8087 &&
+ udev->descriptor.bDeviceClass == 0xe0) {
+ reset_gpio = (struct gpio_desc *) dev_get_drvdata(&udev->dev);
+ if (!IS_ERR_OR_NULL(reset_gpio)) {
+ dev_dbg(&udev->dev, "Reset Intel Bluetooth device\n");
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ msleep(100);
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ }
+ }
return rc;
}
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 40378487e023c4..a5e3e410db4e6f 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -529,8 +529,6 @@ static int resume_common(struct device *dev, int event)
event == PM_EVENT_RESTORE);
if (retval) {
dev_err(dev, "PCI post-resume error %d!\n", retval);
- if (hcd->shared_hcd)
- usb_hc_died(hcd->shared_hcd);
usb_hc_died(hcd);
}
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index e5f24fdd08c3fd..7de618776605e8 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -386,13 +386,19 @@ static const u8 ss_rh_config_descriptor[] = {
* -1 is authorized for all devices except wireless (old behaviour)
* 0 is unauthorized for all devices
* 1 is authorized for all devices
+ * 2 is authorized for internal devices
*/
-static int authorized_default = -1;
+#define USB_AUTHORIZE_WIRED -1
+#define USB_AUTHORIZE_NONE 0
+#define USB_AUTHORIZE_ALL 1
+#define USB_AUTHORIZE_INTERNAL 2
+
+static int authorized_default = USB_AUTHORIZE_WIRED;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
"Default USB device authorization: 0 is not authorized, 1 is "
- "authorized, -1 is authorized except for wireless USB (default, "
- "old behaviour");
+ "authorized, 2 is authorized for internal devices, -1 is "
+ "authorized except for wireless USB (default, old behaviour");
/*-------------------------------------------------------------------------*/
/**
@@ -893,7 +899,7 @@ static ssize_t authorized_default_show(struct device *dev,
struct usb_hcd *hcd;
hcd = bus_to_hcd(usb_bus);
- return snprintf(buf, PAGE_SIZE, "%u\n", !!HCD_DEV_AUTHORIZED(hcd));
+ return snprintf(buf, PAGE_SIZE, "%u\n", hcd->dev_policy);
}
static ssize_t authorized_default_store(struct device *dev,
@@ -909,11 +915,8 @@ static ssize_t authorized_default_store(struct device *dev,
hcd = bus_to_hcd(usb_bus);
result = sscanf(buf, "%u\n", &val);
if (result == 1) {
- if (val)
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
-
+ hcd->dev_policy = val <= USB_DEVICE_AUTHORIZE_INTERNAL ?
+ val : USB_DEVICE_AUTHORIZE_ALL;
result = size;
} else {
result = -EINVAL;
@@ -2786,18 +2789,26 @@ int usb_add_hcd(struct usb_hcd *hcd,
dev_info(hcd->self.controller, "%s\n", hcd->product_desc);
- /* Keep old behaviour if authorized_default is not in [0, 1]. */
- if (authorized_default < 0 || authorized_default > 1) {
- if (hcd->wireless)
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- } else {
- if (authorized_default)
- set_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
- else
- clear_bit(HCD_FLAG_DEV_AUTHORIZED, &hcd->flags);
+ switch (authorized_default) {
+ case USB_AUTHORIZE_NONE:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_NONE;
+ break;
+
+ case USB_AUTHORIZE_ALL:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_ALL;
+ break;
+
+ case USB_AUTHORIZE_INTERNAL:
+ hcd->dev_policy = USB_DEVICE_AUTHORIZE_INTERNAL;
+ break;
+
+ case USB_AUTHORIZE_WIRED:
+ default:
+ hcd->dev_policy = hcd->wireless ?
+ USB_DEVICE_AUTHORIZE_NONE : USB_DEVICE_AUTHORIZE_ALL;
+ break;
}
+
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
/* per default all interfaces are authorized */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 46800a7fe7c68a..d5436ee22b93e6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -27,6 +27,7 @@
#include <linux/random.h>
#include <linux/pm_qos.h>
#include <linux/pm_dark_resume.h>
+#include <linux/kobject.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1107,6 +1108,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
+ /*
+ * Add debounce if USB3 link is in polling/link training state.
+ * Link will automatically transition to Enabled state after
+ * link training completes.
+ */
+ if (hub_is_superspeed(hdev) &&
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_POLLING))
+ need_debounce_delay = true;
+
/* Clear status-change flags; we'll debounce later */
if (portchange & USB_PORT_STAT_C_CONNECTION) {
need_debounce_delay = true;
@@ -1138,10 +1149,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell hub_wq to disconnect the device or
- * check for a new connection
+ * check for a new connection or over current condition.
+ * Based on USB2.0 Spec Section 11.12.5,
+ * C_PORT_OVER_CURRENT could be set while
+ * PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
- (portstatus & USB_PORT_STAT_OVERCURRENT))
+ (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+ (portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
@@ -2227,7 +2242,7 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
/* descriptor may appear anywhere in config */
err = __usb_get_extra_descriptor(udev->rawdescriptors[0],
le16_to_cpu(udev->config[0].desc.wTotalLength),
- USB_DT_OTG, (void **) &desc);
+ USB_DT_OTG, (void **) &desc, sizeof(*desc));
if (err || !(desc->bmAttributes & USB_OTG_HNP))
return 0;
@@ -2816,7 +2831,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
USB_PORT_FEAT_C_BH_PORT_RESET);
usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_PORT_LINK_STATE);
- usb_clear_port_feature(hub->hdev, port1,
+
+ if (udev)
+ usb_clear_port_feature(hub->hdev, port1,
USB_PORT_FEAT_C_CONNECTION);
/*
@@ -3388,6 +3405,10 @@ static int wait_for_connected(struct usb_device *udev,
while (delay_ms < 2000) {
if (status || *portstatus & USB_PORT_STAT_CONNECTION)
break;
+ if (!port_is_power_on(hub, *portstatus)) {
+ status = -ENODEV;
+ break;
+ }
msleep(20);
delay_ms += 20;
status = hub_port_status(hub, *port1, portstatus, portchange);
@@ -3654,12 +3675,54 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
return 0;
}
+/* Report wakeup requests from the ports of a resuming root hub */
+static void report_wakeup_requests(struct usb_hub *hub)
+{
+ struct usb_device *hdev = hub->hdev;
+ struct usb_device *udev;
+ struct usb_hcd *hcd;
+ unsigned long resuming_ports;
+ int i;
+
+ if (hdev->parent)
+ return; /* Not a root hub */
+
+ hcd = bus_to_hcd(hdev->bus);
+ if (hcd->driver->get_resuming_ports) {
+
+ /*
+ * The get_resuming_ports() method returns a bitmap (origin 0)
+ * of ports which have started wakeup signaling but have not
+ * yet finished resuming. During system resume we will
+ * resume all the enabled ports, regardless of any wakeup
+ * signals, which means the wakeup requests would be lost.
+ * To prevent this, report them to the PM core here.
+ */
+ resuming_ports = hcd->driver->get_resuming_ports(hcd);
+ for (i = 0; i < hdev->maxchild; ++i) {
+ if (test_bit(i, &resuming_ports)) {
+ udev = hub->ports[i]->child;
+ if (udev)
+ pm_wakeup_event(&udev->dev, 0);
+ }
+ }
+ }
+}
+
static int hub_resume(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s\n", __func__);
hub_activate(hub, HUB_RESUME);
+
+ /*
+ * This should be called only for system resume, not runtime resume.
+ * We can't tell the difference here, so some wakeup requests will be
+ * reported at the wrong time or more than once. This shouldn't
+ * matter much, so long as they do get reported.
+ */
+ report_wakeup_requests(hub);
return 0;
}
@@ -4217,6 +4280,19 @@ static int hub_port_disable(struct usb_hub *hub, int port1, int set_state)
return ret;
}
+/*
+ * usb_port_disable - disable a usb device's upstream port
+ * @udev: device to disable
+ * Context: @udev locked, must be able to sleep.
+ *
+ * Disables a USB device that isn't in active use.
+ */
+int usb_port_disable(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
+
+ return hub_port_disable(hub, udev->portnum, 0);
+}
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
*
@@ -4533,7 +4609,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
* reset. But only on the first attempt,
* lest we get into a time out/reset loop
*/
- if (r == 0 || (r == -ETIMEDOUT && retries == 0))
+ if (r == 0 || (r == -ETIMEDOUT &&
+ retries == 0 &&
+ udev->speed > USB_SPEED_FULL))
break;
}
udev->descriptor.bMaxPacketSize0 =
@@ -5044,6 +5122,43 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_lock_port(port_dev);
}
+/* Handle notifying userspace about hub over-current events */
+static void port_over_current_notify(struct usb_port *port_dev)
+{
+ char *envp[3];
+ struct device *hub_dev;
+ char *port_dev_path;
+
+ sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count");
+
+ hub_dev = port_dev->dev.parent;
+
+ if (!hub_dev)
+ return;
+
+ port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL);
+ if (!port_dev_path)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
+ if (!envp[0])
+ goto exit_path;
+
+ envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
+ port_dev->over_current_count);
+ if (!envp[1])
+ goto exit;
+
+ envp[2] = NULL;
+ kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(envp[1]);
+exit:
+ kfree(envp[0]);
+exit_path:
+ kfree(port_dev_path);
+}
+
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
@@ -5085,8 +5200,11 @@ static void port_event(struct usb_hub *hub, int port1)
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
+ port_dev->over_current_count++;
+ port_over_current_notify(port_dev);
- dev_dbg(&port_dev->dev, "over-current change\n");
+ dev_dbg(&port_dev->dev, "over-current change #%u\n",
+ port_dev->over_current_count);
usb_clear_port_feature(hdev, port1,
USB_PORT_FEAT_C_OVER_CURRENT);
msleep(100); /* Cool down */
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index f025643e53cbc5..fe2c7e6623efc0 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -102,6 +102,7 @@ struct usb_port {
enum usb_port_connect_type connect_type;
usb_port_location_t location;
struct mutex status_lock;
+ u32 over_current_count;
u8 portnum;
u32 quirks;
unsigned int is_superspeed:1;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 29adabdb305ffa..08cba309eb7828 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1282,6 +1282,11 @@ void usb_enable_interface(struct usb_device *dev,
* is submitted that needs that bandwidth. Some other operating systems
* allocate bandwidth early, when a configuration is chosen.
*
+ * xHCI reserves bandwidth and configures the alternate setting in
+ * usb_hcd_alloc_bandwidth(). If it fails the original interface altsetting
+ * may be disabled. Drivers cannot rely on any particular alternate
+ * setting being in effect after a failure.
+ *
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
@@ -1317,6 +1322,12 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
alternate);
return -EINVAL;
}
+ /*
+ * usb3 hosts configure the interface in usb_hcd_alloc_bandwidth,
+ * including freeing dropped endpoint ring buffers.
+ * Make sure the interface endpoints are flushed before that
+ */
+ usb_disable_interface(dev, iface, false);
/* Make sure we have enough bandwidth for this alternate interface.
* Remove the current alt setting and add the new alt setting.
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index b2188fecd05950..6be658c8ad748b 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -50,6 +50,15 @@ static ssize_t connect_type_show(struct device *dev,
}
static DEVICE_ATTR_RO(connect_type);
+static ssize_t over_current_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ return sprintf(buf, "%u\n", port_dev->over_current_count);
+}
+static DEVICE_ATTR_RO(over_current_count);
+
static ssize_t quirks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -74,6 +83,7 @@ static DEVICE_ATTR_RW(quirks);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
+ &dev_attr_over_current_count.attr,
&dev_attr_quirks.attr,
NULL,
};
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 337a1539f89545..4c59490b5fd1f6 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -11,11 +11,155 @@
*
*/
+#include <linux/moduleparam.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
#include <linux/usb/hcd.h>
#include "usb.h"
+struct quirk_entry {
+ u16 vid;
+ u16 pid;
+ u32 flags;
+};
+
+static DEFINE_MUTEX(quirk_mutex);
+
+static struct quirk_entry *quirk_list;
+static unsigned int quirk_count;
+
+static char quirks_param[128];
+
+static int quirks_param_set(const char *val, const struct kernel_param *kp)
+{
+ char *p, *field;
+ u16 vid, pid;
+ u32 flags;
+ size_t i;
+ int err;
+
+ err = param_set_copystring(val, kp);
+ if (err)
+ return err;
+
+ mutex_lock(&quirk_mutex);
+
+ if (!*val) {
+ quirk_count = 0;
+ kfree(quirk_list);
+ quirk_list = NULL;
+ goto unlock;
+ }
+
+ for (quirk_count = 1, i = 0; val[i]; i++)
+ if (val[i] == ',')
+ quirk_count++;
+
+ if (quirk_list) {
+ kfree(quirk_list);
+ quirk_list = NULL;
+ }
+
+ quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
+ GFP_KERNEL);
+ if (!quirk_list) {
+ quirk_count = 0;
+ mutex_unlock(&quirk_mutex);
+ return -ENOMEM;
+ }
+
+ for (i = 0, p = (char *)val; p && *p;) {
+ /* Each entry consists of VID:PID:flags */
+ field = strsep(&p, ":");
+ if (!field)
+ break;
+
+ if (kstrtou16(field, 16, &vid))
+ break;
+
+ field = strsep(&p, ":");
+ if (!field)
+ break;
+
+ if (kstrtou16(field, 16, &pid))
+ break;
+
+ field = strsep(&p, ",");
+ if (!field || !*field)
+ break;
+
+ /* Collect the flags */
+ for (flags = 0; *field; field++) {
+ switch (*field) {
+ case 'a':
+ flags |= USB_QUIRK_STRING_FETCH_255;
+ break;
+ case 'b':
+ flags |= USB_QUIRK_RESET_RESUME;
+ break;
+ case 'c':
+ flags |= USB_QUIRK_NO_SET_INTF;
+ break;
+ case 'd':
+ flags |= USB_QUIRK_CONFIG_INTF_STRINGS;
+ break;
+ case 'e':
+ flags |= USB_QUIRK_RESET;
+ break;
+ case 'f':
+ flags |= USB_QUIRK_HONOR_BNUMINTERFACES;
+ break;
+ case 'g':
+ flags |= USB_QUIRK_DELAY_INIT;
+ break;
+ case 'h':
+ flags |= USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL;
+ break;
+ case 'i':
+ flags |= USB_QUIRK_DEVICE_QUALIFIER;
+ break;
+ case 'j':
+ flags |= USB_QUIRK_IGNORE_REMOTE_WAKEUP;
+ break;
+ case 'k':
+ flags |= USB_QUIRK_NO_LPM;
+ break;
+ case 'l':
+ flags |= USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL;
+ break;
+ case 'm':
+ flags |= USB_QUIRK_DISCONNECT_SUSPEND;
+ break;
+ /* Ignore unrecognized flag characters */
+ }
+ }
+
+ quirk_list[i++] = (struct quirk_entry)
+ { .vid = vid, .pid = pid, .flags = flags };
+ }
+
+ if (i < quirk_count)
+ quirk_count = i;
+
+unlock:
+ mutex_unlock(&quirk_mutex);
+
+ return 0;
+}
+
+static const struct kernel_param_ops quirks_param_ops = {
+ .set = quirks_param_set,
+ .get = param_get_string,
+};
+
+static struct kparam_string quirks_param_string = {
+ .maxlen = sizeof(quirks_param),
+ .string = quirks_param,
+};
+
+device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
+MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
+
/* Lists of quirky USB devices, split in device quirks and interface quirks.
* Device quirks are applied at the very beginning of the enumeration process,
* right after reading the device descriptor. They can thus only match on device
@@ -37,6 +181,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* WORLDE Controller KS49 or Prodipe MIDI 49C USB controller */
+ { USB_DEVICE(0x0218, 0x0201), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* WORLDE easy key (easykey.25) MIDI controller */
{ USB_DEVICE(0x0218, 0x0401), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -60,6 +208,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Cherry Stream G230 2.0 (G85-231) and 3.0 (G85-232) */
+ { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -73,6 +224,14 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Logitech Logitech Screen Share */
+ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
+
+ /* Logitech Rally Camera */
+ { USB_DEVICE(0x046d, 0x0881), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x046d, 0x0888), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x046d, 0x0889), .driver_info = USB_QUIRK_NO_LPM },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -181,6 +340,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Midiman M-Audio Keystation 88es */
{ USB_DEVICE(0x0763, 0x0192), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* SanDisk Ultra Fit and Ultra Flair */
+ { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM },
+ { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -213,6 +376,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Huawei 4G LTE module */
+ { USB_DEVICE(0x12d1, 0x15bb), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+ { USB_DEVICE(0x12d1, 0x15c3), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* SKYMEDI USB_DRIVE */
{ USB_DEVICE(0x1516, 0x8628), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -232,12 +401,20 @@ static const struct usb_device_id usb_quirk_list[] = {
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
/* Corsair K70 RGB */
- { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
+ /* Corsair Strafe */
+ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
+ /* Corsair K70 LUX RGB */
+ { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair K70 LUX */
{ USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
@@ -258,6 +435,14 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Raydium Touchscreen */
+ { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM },
+
+ { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
+
+ /* DJI CineSSD */
+ { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -326,8 +511,8 @@ static int usb_amd_resume_quirk(struct usb_device *udev)
return 0;
}
-static u32 __usb_detect_quirks(struct usb_device *udev,
- const struct usb_device_id *id)
+static u32 usb_detect_static_quirks(struct usb_device *udev,
+ const struct usb_device_id *id)
{
u32 quirks = 0;
@@ -345,21 +530,43 @@ static u32 __usb_detect_quirks(struct usb_device *udev,
return quirks;
}
+static u32 usb_detect_dynamic_quirks(struct usb_device *udev)
+{
+ u16 vid = le16_to_cpu(udev->descriptor.idVendor);
+ u16 pid = le16_to_cpu(udev->descriptor.idProduct);
+ int i, flags = 0;
+
+ mutex_lock(&quirk_mutex);
+
+ for (i = 0; i < quirk_count; i++) {
+ if (vid == quirk_list[i].vid && pid == quirk_list[i].pid) {
+ flags = quirk_list[i].flags;
+ break;
+ }
+ }
+
+ mutex_unlock(&quirk_mutex);
+
+ return flags;
+}
+
/*
* Detect any quirks the device has, and do any housekeeping for it if needed.
*/
void usb_detect_quirks(struct usb_device *udev)
{
- udev->quirks = __usb_detect_quirks(udev, usb_quirk_list);
+ udev->quirks = usb_detect_static_quirks(udev, usb_quirk_list);
/*
* Pixart-based mice would trigger remote wakeup issue on AMD
* Yangtze chipset, so set them as RESET_RESUME flag.
*/
if (usb_amd_resume_quirk(udev))
- udev->quirks |= __usb_detect_quirks(udev,
+ udev->quirks |= usb_detect_static_quirks(udev,
usb_amd_resume_quirk_list);
+ udev->quirks ^= usb_detect_dynamic_quirks(udev);
+
if (udev->quirks)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
@@ -378,7 +585,7 @@ void usb_detect_interface_quirks(struct usb_device *udev)
{
u32 quirks;
- quirks = __usb_detect_quirks(udev, usb_interface_quirk_list);
+ quirks = usb_detect_static_quirks(udev, usb_interface_quirk_list);
if (quirks == 0)
return;
@@ -393,3 +600,11 @@ void usb_detect_interface_quirks(struct usb_device *udev)
quirks);
udev->quirks |= quirks;
}
+
+void usb_release_quirk_list(void)
+{
+ mutex_lock(&quirk_mutex);
+ kfree(quirk_list);
+ quirk_list = NULL;
+ mutex_unlock(&quirk_mutex);
+}
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index ef9cf4a21afe28..23b65e01d10d66 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -143,86 +143,123 @@ static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
return acpi_find_child_device(parent, raw, false);
}
-static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+static struct acpi_device *
+usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
struct usb_device *udev;
struct acpi_device *adev;
acpi_handle *parent_handle;
+ int port1;
+
+ /* Get the struct usb_device point of port's hub */
+ udev = to_usb_device(port_dev->dev.parent->parent);
/*
- * In the ACPI DSDT table, only usb root hub and usb ports are
- * acpi device nodes. The hierarchy like following.
- * Device (EHC1)
- * Device (HUBN)
- * Device (PR01)
- * Device (PR11)
- * Device (PR12)
- * Device (PR13)
- * ...
- * So all binding process is divided into two parts. binding
- * root hub and usb ports.
+ * The root hub ports' parent is the root hub. The non-root-hub
+ * ports' parent is the parent hub port which the hub is
+ * connected to.
*/
- if (is_usb_device(dev)) {
- udev = to_usb_device(dev);
- if (udev->parent)
+ if (!udev->parent) {
+ adev = ACPI_COMPANION(&udev->dev);
+ port1 = usb_hcd_find_raw_port_number(bus_to_hcd(udev->bus),
+ port_dev->portnum);
+ } else {
+ parent_handle = usb_get_hub_port_acpi_handle(udev->parent,
+ udev->portnum);
+ if (!parent_handle)
return NULL;
- /* root hub is only child (_ADR=0) under its parent, the HC */
- adev = ACPI_COMPANION(dev->parent);
- return acpi_find_child_device(adev, 0, false);
- } else if (is_usb_port(dev)) {
- struct usb_port *port_dev = to_usb_port(dev);
- int port1 = port_dev->portnum;
- struct acpi_pld_info *pld;
- acpi_handle *handle;
- acpi_status status;
-
- /* Get the struct usb_device point of port's hub */
- udev = to_usb_device(dev->parent->parent);
-
- /*
- * The root hub ports' parent is the root hub. The non-root-hub
- * ports' parent is the parent hub port which the hub is
- * connected to.
- */
- if (!udev->parent) {
- struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- int raw;
-
- raw = usb_hcd_find_raw_port_number(hcd, port1);
-
- adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
- raw);
-
- if (!adev)
- return NULL;
- } else {
- parent_handle =
- usb_get_hub_port_acpi_handle(udev->parent,
- udev->portnum);
- if (!parent_handle)
- return NULL;
-
- acpi_bus_get_device(parent_handle, &adev);
-
- adev = usb_acpi_find_port(adev, port1);
-
- if (!adev)
- return NULL;
- }
- handle = adev->handle;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_FAILURE(status) || !pld)
- return adev;
+ acpi_bus_get_device(parent_handle, &adev);
+ port1 = port_dev->portnum;
+ }
+ return usb_acpi_find_port(adev, port1);
+}
+
+static struct acpi_device *
+usb_acpi_find_companion_for_port(struct usb_port *port_dev)
+{
+ struct acpi_device *adev;
+ struct acpi_pld_info *pld;
+ acpi_handle *handle;
+ acpi_status status;
+
+ adev = usb_acpi_get_companion_for_port(port_dev);
+ if (!adev)
+ return NULL;
+
+ handle = adev->handle;
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (!ACPI_FAILURE(status) && pld) {
port_dev->location = USB_ACPI_LOCATION_VALID
| pld->group_token << 8 | pld->group_position;
port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
ACPI_FREE(pld);
+ }
- return adev;
+ return adev;
+}
+
+static struct acpi_device *
+usb_acpi_find_companion_for_device(struct usb_device *udev)
+{
+ struct acpi_device *adev;
+ struct usb_port *port_dev;
+ struct usb_hub *hub;
+
+ if (!udev->parent) {
+ /* root hub is only child (_ADR=0) under its parent, the HC */
+ adev = ACPI_COMPANION(udev->dev.parent);
+ return acpi_find_child_device(adev, 0, false);
}
+ hub = usb_hub_to_struct_hub(udev->parent);
+ if (!hub)
+ return NULL;
+
+ /*
+ * This is an embedded USB device connected to a port and such
+ * devices share port's ACPI companion.
+ */
+ port_dev = hub->ports[udev->portnum - 1];
+ return usb_acpi_get_companion_for_port(port_dev);
+}
+
+static struct acpi_device *usb_acpi_find_companion(struct device *dev)
+{
+ /*
+ * The USB hierarchy like following:
+ *
+ * Device (EHC1)
+ * Device (HUBN)
+ * Device (PR01)
+ * Device (PR11)
+ * Device (PR12)
+ * Device (FN12)
+ * Device (FN13)
+ * Device (PR13)
+ * ...
+ * where HUBN is root hub, and PRNN are USB ports and devices
+ * connected to them, and FNNN are individualk functions for
+ * connected composite USB devices. PRNN and FNNN may contain
+ * _CRS and other methods describing sideband resources for
+ * the connected device.
+ *
+ * On the kernel side both root hub and embedded USB devices are
+ * represented as instances of usb_device structure, and ports
+ * are represented as usb_port structures, so the whole process
+ * is split into 2 parts: finding companions for devices and
+ * finding companions for ports.
+ *
+ * Note that we do not handle individual functions of composite
+ * devices yet, for that we would need to assign companions to
+ * devices corresponding to USB interfaces.
+ */
+ if (is_usb_device(dev))
+ return usb_acpi_find_companion_for_device(to_usb_device(dev));
+ else if (is_usb_port(dev))
+ return usb_acpi_find_companion_for_port(to_usb_port(dev));
+
return NULL;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index a9fffd55bc5554..5a8721ac280a19 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -43,8 +43,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
-#include "usb.h"
-
+#include "hub.h"
const char *usbcore_name = "usbcore";
@@ -96,6 +95,8 @@ struct usb_host_interface *usb_find_alt_setting(
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
@@ -410,6 +411,27 @@ static unsigned usb_bus_is_wusb(struct usb_bus *bus)
return hcd->wireless;
}
+static bool usb_dev_authorized(struct usb_device *dev, struct usb_hcd *hcd)
+{
+ struct usb_hub *hub;
+
+ if (!dev->parent)
+ return true; /* Root hub always ok [and always wired] */
+
+ switch (hcd->dev_policy) {
+ case USB_DEVICE_AUTHORIZE_NONE:
+ default:
+ return false;
+
+ case USB_DEVICE_AUTHORIZE_ALL:
+ return true;
+
+ case USB_DEVICE_AUTHORIZE_INTERNAL:
+ hub = usb_hub_to_struct_hub(dev->parent);
+ return hub->ports[dev->portnum - 1]->connect_type ==
+ USB_PORT_CONNECT_TYPE_HARD_WIRED;
+ }
+}
/**
* usb_alloc_dev - usb device constructor (usbcore-internal)
@@ -526,12 +548,11 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
#endif
- if (root_hub) /* Root hub always ok [and always wired] */
- dev->authorized = 1;
- else {
- dev->authorized = !!HCD_DEV_AUTHORIZED(usb_hcd);
+
+ dev->authorized = usb_dev_authorized(dev, usb_hcd);
+ if (!root_hub)
dev->wusb = usb_bus_is_wusb(bus) ? 1 : 0;
- }
+
return dev;
}
EXPORT_SYMBOL_GPL(usb_alloc_dev);
@@ -695,14 +716,14 @@ EXPORT_SYMBOL_GPL(usb_get_current_frame_number);
*/
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr)
+ unsigned char type, void **ptr, size_t minsize)
{
struct usb_descriptor_header *header;
while (size >= sizeof(struct usb_descriptor_header)) {
header = (struct usb_descriptor_header *)buffer;
- if (header->bLength < 2) {
+ if (header->bLength < 2 || header->bLength > size) {
printk(KERN_ERR
"%s: bogus descriptor, type %d length %d\n",
usbcore_name,
@@ -711,7 +732,7 @@ int __usb_get_extra_descriptor(char *buffer, unsigned size,
return -1;
}
- if (header->bDescriptorType == type) {
+ if (header->bDescriptorType == type && header->bLength >= minsize) {
*ptr = header;
return 0;
}
@@ -1124,6 +1145,7 @@ static void __exit usb_exit(void)
if (usb_disabled())
return;
+ usb_release_quirk_list();
usb_deregister_device_driver(&usb_generic_driver);
usb_major_cleanup();
usb_deregister(&usbfs_driver);
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 53318126ed91b2..bbe59cd6088fc5 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -31,6 +31,7 @@ extern void usb_deauthorize_interface(struct usb_interface *);
extern void usb_authorize_interface(struct usb_interface *);
extern void usb_detect_quirks(struct usb_device *udev);
extern void usb_detect_interface_quirks(struct usb_device *udev);
+extern void usb_release_quirk_list(void);
extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,
@@ -68,6 +69,7 @@ extern void usb_hub_cleanup(void);
extern int usb_major_init(void);
extern void usb_major_cleanup(void);
extern int usb_device_supports_lpm(struct usb_device *udev);
+extern int usb_port_disable(struct usb_device *udev);
#ifdef CONFIG_PM
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 98705b83d2dce3..842c1ae7a29110 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3657,9 +3657,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
}
ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret)
+ if (ret) {
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+ hsotg->ctrl_req);
return ret;
-
+ }
dwc2_hsotg_dump(hsotg);
return 0;
@@ -3672,6 +3674,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
usb_del_gadget_udc(&hsotg->gadget);
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 85fb6226770c85..98339a850940f9 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -3164,7 +3164,6 @@ error3:
error2:
usb_put_hcd(hcd);
error1:
- kfree(hsotg->core_params);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
kfree(hsotg->last_frame_num_array);
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index bda0b21b850f69..51866f3f20522b 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -931,9 +931,8 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL);
- if (!len) {
+ if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0;
- qtd->isoc_split_offset = 0;
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 55da2c7f727f95..6fb66e6a2234ee 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -469,8 +469,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "missing IRQ resource\n");
- return -EINVAL;
+ dev_err(dev, "missing IRQ resource: %d\n", irq);
+ return irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3ba530644c343c..2694a1816b3db9 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1613,6 +1613,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index bf9997bdcef9ea..93d0e396629ffa 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1619,6 +1619,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/
if (w_value && !f->get_alt)
break;
+
+ spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
@@ -1628,6 +1630,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
+ spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -1816,6 +1819,8 @@ unknown:
break;
case USB_RECIP_ENDPOINT:
+ if (!cdev->config)
+ break;
endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
list_for_each_entry(f, &cdev->config->functions, list) {
if (test_bit(endp, f->endpoints))
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 018170a89982fc..3ec8bea6804340 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1738,7 +1738,9 @@ void unregister_gadget_item(struct config_item *item)
{
struct gadget_info *gi = to_gadget_info(item);
+ mutex_lock(&gi->lock);
unregister_gadget(gi);
+ mutex_unlock(&gi->lock);
}
EXPORT_SYMBOL_GPL(unregister_gadget_item);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 4191feb765b138..4800bb22cdd61c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3037,7 +3037,7 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
- return USB_GADGET_DELAYED_STATUS;
+ return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static void ffs_func_suspend(struct usb_function *f)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 4dd3c76722479e..25488c89308a12 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -220,6 +220,8 @@
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
+#include <linux/nospec.h>
+
#include "configfs.h"
@@ -3260,6 +3262,7 @@ static struct config_group *fsg_lun_make(struct config_group *group,
fsg_opts = to_fsg_opts(&group->cg_item);
if (num >= FSG_MAX_LUNS)
return ERR_PTR(-ERANGE);
+ num = array_index_nospec(num, FSG_MAX_LUNS);
mutex_lock(&fsg_opts->lock);
if (fsg_opts->refcnt || fsg_opts->common->luns[num]) {
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 67b24398993898..d7d095781be189 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -849,7 +849,7 @@ static struct usb_function *source_sink_alloc_func(
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index b5dab103be3821..e931c3cb084096 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -941,14 +941,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
};
struct cntrl_cur_lay3 {
- __u32 dCUR;
+ __le32 dCUR;
};
struct cntrl_range_lay3 {
- __u16 wNumSubRanges;
- __u32 dMIN;
- __u32 dMAX;
- __u32 dRES;
+ __le16 wNumSubRanges;
+ __le32 dMIN;
+ __le32 dMAX;
+ __le32 dRES;
} __packed;
static inline void
@@ -1296,9 +1296,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
- c.dCUR = p_srate;
+ c.dCUR = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- c.dCUR = c_srate;
+ c.dCUR = cpu_to_le32(c_srate);
value = min_t(unsigned, w_length, sizeof c);
memcpy(req->buf, &c, value);
@@ -1336,15 +1336,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
if (entity_id == USB_IN_CLK_ID)
- r.dMIN = p_srate;
+ r.dMIN = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- r.dMIN = c_srate;
+ r.dMIN = cpu_to_le32(c_srate);
else
return -EOPNOTSUPP;
r.dMAX = r.dMIN;
r.dRES = 0;
- r.wNumSubRanges = 1;
+ r.wNumSubRanges = cpu_to_le16(1);
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 2ec7171b3f0442..4dba794a6ad502 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -690,9 +690,8 @@ static int rndis_reset_response(struct rndis_params *params,
{
rndis_reset_cmplt_type *resp;
rndis_resp_t *r;
-
- u32 length;
u8 *xbuf;
+ u32 length;
/* drain the response queue */
while ((xbuf = rndis_get_next_response(params, &length)))
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index f7771d86ad6c8d..d736184756640a 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -361,10 +361,15 @@ __acquires(&port->port_lock)
*/
{
struct list_head *pool = &port->write_pool;
- struct usb_ep *in = port->port_usb->in;
+ struct usb_ep *in;
int status = 0;
bool do_tty_wake = false;
+ if (!port->port_usb)
+ return status;
+
+ in = port->port_usb->in;
+
while (!port->write_busy && !list_empty(pool)) {
struct usb_request *req;
int len;
@@ -518,7 +523,7 @@ static void gs_rx_push(unsigned long _port)
}
/* push data to (open) tty */
- if (req->actual) {
+ if (req->actual && tty) {
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index eb876ed96861d6..85f1f282c1d546 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -379,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
- if ((dum_hcd->port_status &
- USB_PORT_STAT_ENABLE) == 1 &&
- (dum_hcd->port_status &
- USB_SS_PORT_LS_U0) == 1 &&
- dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
+ if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
+ (dum_hcd->port_status &
+ USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
+ dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
} else {
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 6ba122cc7490b4..95df2b3bb6a1aa 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1066,12 +1066,15 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
static int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fotg210->gadget);
iounmap(fotg210->reg);
free_irq(platform_get_irq(pdev, 0), fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
return 0;
@@ -1102,7 +1105,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
if (fotg210 == NULL)
- goto err_alloc;
+ goto err;
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1114,7 +1117,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
fotg210->reg = ioremap(res->start, resource_size(res));
if (fotg210->reg == NULL) {
pr_err("ioremap error.\n");
- goto err_map;
+ goto err_alloc;
}
spin_lock_init(&fotg210->lock);
@@ -1162,7 +1165,7 @@ static int fotg210_udc_probe(struct platform_device *pdev)
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
GFP_KERNEL);
if (fotg210->ep0_req == NULL)
- goto err_req;
+ goto err_map;
fotg210_init(fotg210);
@@ -1190,12 +1193,14 @@ err_req:
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
err_map:
- if (fotg210->reg)
- iounmap(fotg210->reg);
+ iounmap(fotg210->reg);
err_alloc:
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
+err:
return ret;
}
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 18f5ebd447b823..3b6e34fc032b93 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2100,7 +2100,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
- if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
spin_unlock(&dev->lock);
return IRQ_NONE;
}
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index a47de8c31ce9f6..8efeadf30b4d7d 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1542,11 +1542,14 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
writel(tmp | BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
} else {
writel(tmp & ~BIT(USB_DETECT_ENABLE), &dev->usb->usbctl);
- stop_activity(dev, dev->driver);
+ stop_activity(dev, NULL);
}
spin_unlock_irqrestore(&dev->lock, flags);
+ if (!is_on && dev->driver)
+ dev->driver->disconnect(&dev->gadget);
+
return 0;
}
@@ -2425,8 +2428,11 @@ static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver)
+ if (driver) {
+ spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
+ spin_lock(&dev->lock);
+ }
usb_reinit(dev);
}
@@ -3272,6 +3278,8 @@ next_endpoints:
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
+__releases(dev->lock)
+__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3312,12 +3320,14 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
+ spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
+ spin_lock(&dev->lock);
return;
}
}
@@ -3336,6 +3346,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
writel(tmp, &dev->regs->irqstat1);
+ spin_unlock(&dev->lock);
if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
if (dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
@@ -3346,6 +3357,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
dev->driver->resume(&dev->gadget);
/* at high speed, note erratum 0133 */
}
+ spin_lock(&dev->lock);
stat &= ~tmp;
}
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index 9b7d39484ed3af..d1ed92acafa341 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2037,6 +2037,7 @@ static inline int machine_without_vbus_sense(void)
{
return machine_is_omap_innovator()
|| machine_is_omap_osk()
+ || machine_is_omap_palmte()
|| machine_is_sx1()
/* No known omap7xx boards with vbus sense */
|| cpu_is_omap7xx();
@@ -2045,7 +2046,7 @@ static inline int machine_without_vbus_sense(void)
static int omap_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
- int status = -ENODEV;
+ int status;
struct omap_ep *ep;
unsigned long flags;
@@ -2083,6 +2084,7 @@ static int omap_udc_start(struct usb_gadget *g,
goto done;
}
} else {
+ status = 0;
if (can_pullup(udc))
pullup_enable(udc);
else
@@ -2612,9 +2614,22 @@ omap_ep_setup(char *name, u8 addr, u8 type,
static void omap_udc_release(struct device *dev)
{
- complete(udc->done);
+ pullup_disable(udc);
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
+ usb_put_phy(udc->transceiver);
+ udc->transceiver = NULL;
+ }
+ omap_writew(0, UDC_SYSCON1);
+ remove_proc_file();
+ if (udc->dc_clk) {
+ if (udc->clk_requested)
+ omap_udc_enable_clock(0);
+ clk_put(udc->hhc_clk);
+ clk_put(udc->dc_clk);
+ }
+ if (udc->done)
+ complete(udc->done);
kfree(udc);
- udc = NULL;
}
static int
@@ -2886,8 +2901,8 @@ bad_on_1710:
udc->clr_halt = UDC_RESET_EP;
/* USB general purpose IRQ: ep0, state changes, dma, etc */
- status = request_irq(pdev->resource[1].start, omap_udc_irq,
- 0, driver_name, udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[1].start,
+ omap_udc_irq, 0, driver_name, udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[1].start, status);
@@ -2895,20 +2910,20 @@ bad_on_1710:
}
/* USB "non-iso" IRQ (PIO for all but ep0) */
- status = request_irq(pdev->resource[2].start, omap_udc_pio_irq,
- 0, "omap_udc pio", udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[2].start,
+ omap_udc_pio_irq, 0, "omap_udc pio", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[2].start, status);
- goto cleanup2;
+ goto cleanup1;
}
#ifdef USE_ISO
- status = request_irq(pdev->resource[3].start, omap_udc_iso_irq,
- 0, "omap_udc iso", udc);
+ status = devm_request_irq(&pdev->dev, pdev->resource[3].start,
+ omap_udc_iso_irq, 0, "omap_udc iso", udc);
if (status != 0) {
ERR("can't get irq %d, err %d\n",
(int) pdev->resource[3].start, status);
- goto cleanup3;
+ goto cleanup1;
}
#endif
if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
@@ -2919,23 +2934,8 @@ bad_on_1710:
}
create_proc_file();
- status = usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
- omap_udc_release);
- if (status)
- goto cleanup4;
-
- return 0;
-
-cleanup4:
- remove_proc_file();
-
-#ifdef USE_ISO
-cleanup3:
- free_irq(pdev->resource[2].start, udc);
-#endif
-
-cleanup2:
- free_irq(pdev->resource[1].start, udc);
+ return usb_add_gadget_udc_release(&pdev->dev, &udc->gadget,
+ omap_udc_release);
cleanup1:
kfree(udc);
@@ -2962,42 +2962,15 @@ static int omap_udc_remove(struct platform_device *pdev)
{
DECLARE_COMPLETION_ONSTACK(done);
- if (!udc)
- return -ENODEV;
-
- usb_del_gadget_udc(&udc->gadget);
- if (udc->driver)
- return -EBUSY;
-
udc->done = &done;
- pullup_disable(udc);
- if (!IS_ERR_OR_NULL(udc->transceiver)) {
- usb_put_phy(udc->transceiver);
- udc->transceiver = NULL;
- }
- omap_writew(0, UDC_SYSCON1);
-
- remove_proc_file();
-
-#ifdef USE_ISO
- free_irq(pdev->resource[3].start, udc);
-#endif
- free_irq(pdev->resource[2].start, udc);
- free_irq(pdev->resource[1].start, udc);
+ usb_del_gadget_udc(&udc->gadget);
- if (udc->dc_clk) {
- if (udc->clk_requested)
- omap_udc_enable_clock(0);
- clk_put(udc->hhc_clk);
- clk_put(udc->dc_clk);
- }
+ wait_for_completion(&done);
release_mem_region(pdev->resource[0].start,
pdev->resource[0].end - pdev->resource[0].start + 1);
- wait_for_completion(&done);
-
return 0;
}
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index baa0609a429d99..e340946476037b 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -835,11 +835,11 @@ static void init_controller(struct r8a66597 *r8a66597)
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
- msleep(3);
+ mdelay(3);
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
- msleep(1);
+ mdelay(1);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
@@ -1193,7 +1193,7 @@ __acquires(r8a66597->lock)
r8a66597->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&r8a66597->lock);
- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
spin_lock(&r8a66597->lock);
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index f7661d9750fdc9..10442b8d4fd152 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1222,6 +1222,7 @@ static const struct hc_driver ehci_hc_driver = {
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
+ .get_resuming_ports = ehci_get_resuming_ports,
/*
* device support
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 086a7115d263c6..a5fdc1eb0cc47e 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -513,10 +513,18 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
return -ESHUTDOWN;
}
+static unsigned long ehci_get_resuming_ports(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return ehci->resuming_ports;
+}
+
#else
#define ehci_bus_suspend NULL
#define ehci_bus_resume NULL
+#define ehci_get_resuming_ports NULL
#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a24720beb39dfd..cccde8217f288f 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -130,8 +130,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "EHCI irq failed\n");
- return -ENODEV;
+ dev_err(dev, "EHCI irq failed: %d\n", irq);
+ return irq;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 1db0626c8bf415..97750f162f0123 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -654,7 +654,7 @@ static int hwahc_security_create(struct hwahc *hwahc)
top = itr + itr_size;
result = __usb_get_extra_descriptor(usb_dev->rawdescriptors[index],
le16_to_cpu(usb_dev->actconfig->desc.wTotalLength),
- USB_DT_SECURITY, (void **) &secd);
+ USB_DT_SECURITY, (void **) &secd, sizeof(*secd));
if (result == -1) {
dev_warn(dev, "BUG? WUSB host has no security descriptors\n");
return 0;
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index f542045dc2a624..e25d72e0527fab 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1849,8 +1849,10 @@ static int imx21_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENXIO;
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
hcd = usb_create_hcd(&imx21_hc_driver,
&pdev->dev, dev_name(&pdev->dev));
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index a11c2c8bda53f7..a217f71b45c6fa 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1990,6 +1990,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
+__acquires(r8a66597->lock)
+__releases(r8a66597->lock)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
@@ -2002,13 +2004,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
return;
pipenum = pipe->info.pipenum;
+ spin_lock_irqsave(&r8a66597->lock, flags);
if (pipenum == 0) {
kfree(hep->hcpriv);
hep->hcpriv = NULL;
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
return;
}
- spin_lock_irqsave(&r8a66597->lock, flags);
pipe_stop(r8a66597, pipe);
pipe_irq_disable(r8a66597, pipenum);
disable_irq_empty(r8a66597, pipenum);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 692ccc69345e4a..d5434e7a3b2e70 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -2565,7 +2565,7 @@ static int u132_get_frame(struct usb_hcd *hcd)
} else {
int frame = 0;
dev_err(&u132->platform_dev->dev, "TODO: u132_get_frame\n");
- msleep(100);
+ mdelay(100);
return frame;
}
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1c1f99a9124a60..ef920c82cb6e5c 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -357,7 +357,7 @@ int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
slot_id = 0;
for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i])
+ if (!xhci->devs[i] || !xhci->devs[i]->udev)
continue;
speed = xhci->devs[i]->udev->speed;
if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
@@ -753,7 +753,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
status |= USB_PORT_STAT_SUSPEND;
}
if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
- !DEV_SUPERSPEED_ANY(raw_port_status)) {
+ !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) {
if ((raw_port_status & PORT_RESET) ||
!(raw_port_status & PORT_PE))
return 0xffffffff;
@@ -799,7 +799,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
time_left = wait_for_completion_timeout(
&bus_state->rexit_done[wIndex],
msecs_to_jiffies(
- XHCI_MAX_REXIT_TIMEOUT));
+ XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, flags);
if (time_left) {
@@ -813,7 +813,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
} else {
int port_status = readl(port_array[wIndex]);
xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n",
- XHCI_MAX_REXIT_TIMEOUT,
+ XHCI_MAX_REXIT_TIMEOUT_MS,
port_status);
status |= USB_PORT_STAT_SUSPEND;
clear_bit(wIndex, &bus_state->rexit_ports);
@@ -1057,17 +1057,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(port_array[wIndex]);
break;
}
-
- /* Software should not attempt to set
- * port link state above '3' (U3) and the port
- * must be enabled.
- */
- if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_U3)) {
- xhci_warn(xhci, "Cannot set link state.\n");
+ /* Port must be enabled */
+ if (!(temp & PORT_PE)) {
+ retval = -ENODEV;
+ break;
+ }
+ /* Can't set port link state above '3' (U3) */
+ if (link_state > USB_SS_PORT_LS_U3) {
+ xhci_warn(xhci, "Cannot set port %d link state %d\n",
+ wIndex, link_state);
goto error;
}
-
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
@@ -1307,13 +1307,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
__le32 __iomem **port_array;
struct xhci_bus_state *bus_state;
unsigned long flags;
+ u32 portsc_buf[USB_MAXCHILDREN];
+ bool wake_enabled;
max_ports = xhci_get_ports(hcd, &port_array);
bus_state = &xhci->bus_state[hcd_index(hcd)];
+ wake_enabled = hcd->self.root_hub->do_remote_wakeup;
spin_lock_irqsave(&xhci->lock, flags);
- if (hcd->self.root_hub->do_remote_wakeup) {
+ if (wake_enabled) {
if (bus_state->resuming_ports || /* USB2 */
bus_state->port_remote_wakeup) { /* USB3 */
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1321,26 +1324,37 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
return -EBUSY;
}
}
-
- port_index = max_ports;
+ /*
+ * Prepare ports for suspend, but don't write anything before all ports
+ * are checked and we know bus suspend can proceed
+ */
bus_state->bus_suspended = 0;
+ port_index = max_ports;
while (port_index--) {
- /* suspend the port if the port is not suspended */
u32 t1, t2;
- int slot_id;
t1 = readl(port_array[port_index]);
t2 = xhci_port_state_to_neutral(t1);
+ portsc_buf[port_index] = 0;
- if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) {
- xhci_dbg(xhci, "port %d not suspended\n", port_index);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
- if (slot_id) {
+ /* Bail out if a USB3 port has a new device in link training */
+ if ((hcd->speed >= HCD_USB3) &&
+ (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
+ bus_state->bus_suspended = 0;
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
+ return -EBUSY;
+ }
+
+ /* suspend ports in U0, or bail out for new connect changes */
+ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
+ if ((t1 & PORT_CSC) && wake_enabled) {
+ bus_state->bus_suspended = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
- spin_lock_irqsave(&xhci->lock, flags);
+ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n");
+ return -EBUSY;
}
+ xhci_dbg(xhci, "port %d not suspended\n", port_index);
t2 &= ~PORT_PLS_MASK;
t2 |= PORT_LINK_STROBE | XDEV_U3;
set_bit(port_index, &bus_state->bus_suspended);
@@ -1349,7 +1363,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
* including the USB 3.0 roothub, but only if CONFIG_PM
* is enabled, so also enable remote wake here.
*/
- if (hcd->self.root_hub->do_remote_wakeup) {
+ if (wake_enabled) {
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
@@ -1362,7 +1376,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t1 = xhci_port_state_to_neutral(t1);
if (t1 != t2)
- writel(t2, port_array[port_index]);
+ portsc_buf[port_index] = t2;
+ }
+
+ /* write port settings, stopping and suspending ports if needed */
+ port_index = max_ports;
+ while (port_index--) {
+ if (!portsc_buf[port_index])
+ continue;
+ if (test_bit(port_index, &bus_state->bus_suspended)) {
+ int slot_id;
+
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ port_index + 1);
+ if (slot_id) {
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_stop_device(xhci, slot_id, 1);
+ spin_lock_irqsave(&xhci->lock, flags);
+ }
+ }
+ writel(portsc_buf[port_index], port_array[port_index]);
}
hcd->state = HC_STATE_SUSPENDED;
bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
@@ -1521,4 +1554,15 @@ int xhci_bus_resume(struct usb_hcd *hcd)
return 0;
}
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_bus_state *bus_state;
+
+ bus_state = &xhci->bus_state[hcd_index(hcd)];
+
+ /* USB3 port wakeups are reported via usb_wakeup_notification() */
+ return bus_state->resuming_ports; /* USB2 ports only */
+}
+
#endif /* CONFIG_PM */
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 66c6642ae0bbf6..da7b20196428fe 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -638,7 +638,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
if (!ep->stream_info)
return NULL;
- if (stream_id > ep->stream_info->num_streams)
+ if (stream_id >= ep->stream_info->num_streams)
return NULL;
return ep->stream_info->stream_rings[stream_id];
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 7adf5a4067bfb4..2fe27121575191 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -174,6 +174,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 23ee1faab314e8..7ecc0bea0f7e7d 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1644,10 +1644,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
}
}
- if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_U0 &&
- DEV_SUPERSPEED_ANY(temp)) {
+ if ((temp & PORT_PLC) &&
+ DEV_SUPERSPEED_ANY(temp) &&
+ ((temp & PORT_PLS_MASK) == XDEV_U0 ||
+ (temp & PORT_PLS_MASK) == XDEV_U1 ||
+ (temp & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- /* We've just brought the device into U0 through either the
+ /* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
* initiated remote wake, don't pass up the link state change,
@@ -1675,7 +1678,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the the driver know it's
* out of the RExit state.
*/
- if (!DEV_SUPERSPEED_ANY(temp) &&
+ if (!DEV_SUPERSPEED_ANY(temp) && hcd->speed < HCD_USB3 &&
test_and_clear_bit(faked_port_index,
&bus_state->rexit_ports)) {
complete(&bus_state->rexit_done[faked_port_index]);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 44195352951ac5..5036ca6038a5e5 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -897,6 +897,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
spin_unlock_irqrestore(&xhci->lock, flags);
}
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+ __le32 __iomem **port_array;
+ int port_index;
+ u32 status;
+ u32 portsc;
+
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT)
+ return true;
+ /*
+ * Checking STS_EINT is not enough as there is a lag between a change
+ * bit being set and the Port Status Change Event that it generated
+ * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+ */
+
+ port_index = xhci->num_usb2_ports;
+ port_array = xhci->usb2_ports;
+ while (port_index--) {
+ portsc = readl(port_array[port_index]);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ port_index = xhci->num_usb3_ports;
+ port_array = xhci->usb3_ports;
+ while (port_index--) {
+ portsc = readl(port_array[port_index]);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ return false;
+}
+
/*
* Stop HC (not bus-specific)
*
@@ -993,7 +1028,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
- u32 command, temp = 0, status;
+ u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
@@ -1027,8 +1062,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
+ /*
+ * Some controllers take up to 55+ ms to complete the controller
+ * restore so setting the timeout to 100ms. Xhci specification
+ * doesn't mention any timeout value.
+ */
if (xhci_handshake(&xhci->op_regs->status,
- STS_RESTORE, 0, 10 * 1000)) {
+ STS_RESTORE, 0, 100 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
@@ -1115,8 +1155,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
done:
if (retval == 0) {
/* Resume root hubs only when have pending events. */
- status = readl(&xhci->op_regs->status);
- if (status & STS_EINT) {
+ if (xhci_pending_portevent(xhci)) {
usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
@@ -3657,6 +3696,9 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
}
spin_lock_irqsave(&xhci->lock, flags);
+
+ virt_dev->udev = NULL;
+
/* Don't disable the slot if the host controller is dead. */
state = readl(&xhci->op_regs->status);
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
@@ -4396,6 +4438,14 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ /* Prevent U1 if service interval is shorter than U1 exit latency */
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
+ return USB3_LPM_DISABLED;
+ }
+ }
+
if (xhci->quirks & XHCI_INTEL_HOST)
timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
else
@@ -4452,6 +4502,14 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ /* Prevent U2 if service interval is shorter than U2 exit latency */
+ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
+ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
+ return USB3_LPM_DISABLED;
+ }
+ }
+
if (xhci->quirks & XHCI_INTEL_HOST)
timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
else
@@ -5028,6 +5086,7 @@ static const struct hc_driver xhci_hc_driver = {
.hub_status_data = xhci_hub_status_data,
.bus_suspend = xhci_bus_suspend,
.bus_resume = xhci_bus_resume,
+ .get_resuming_ports = xhci_get_resuming_ports,
/*
* call back when device connected and addressed
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7a909216d95c18..1c77ee90957044 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -309,6 +309,7 @@ struct xhci_op_regs {
*/
#define PORT_PLS_MASK (0xf << 5)
#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_INACTIVE (0x6 << 5)
@@ -383,6 +384,10 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
/* Cold Attach Status - xHC can set this bit to report device attached during
* Sx state. Warm port reset should be perfomed to clear this bit and move port
* to connected state.
@@ -1486,7 +1491,7 @@ struct xhci_bus_state {
* It can take up to 20 ms to transition from RExit to U0 on the
* Intel Lynx Point LP xHCI host.
*/
-#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000)
+#define XHCI_MAX_REXIT_TIMEOUT_MS 20
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
@@ -1941,9 +1946,11 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
int xhci_bus_resume(struct usb_hcd *hcd);
+unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#else
#define xhci_bus_suspend NULL
#define xhci_bus_resume NULL
+#define xhci_get_resuming_ports NULL
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index a0a3827b4aff75..993f4da065c3a6 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -63,6 +63,8 @@ static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
{ APPLEDISPLAY_DEVICE(0x921d) },
+ { APPLEDISPLAY_DEVICE(0x9222) },
+ { APPLEDISPLAY_DEVICE(0x9226) },
{ APPLEDISPLAY_DEVICE(0x9236) },
/* Terminating entry */
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 442b6631162eb6..3d750671b85a6b 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -388,7 +388,7 @@ static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned ch
mask &= 0x0f;
val &= 0x0f;
d = (priv->reg[1] & (~mask)) ^ val;
- if (set_1284_register(pp, 2, d, GFP_KERNEL))
+ if (set_1284_register(pp, 2, d, GFP_ATOMIC))
return 0;
priv->reg[1] = d;
return d & 0xf;
@@ -398,7 +398,7 @@ static unsigned char parport_uss720_read_status(struct parport *pp)
{
unsigned char ret;
- if (get_1284_register(pp, 1, &ret, GFP_KERNEL))
+ if (get_1284_register(pp, 1, &ret, GFP_ATOMIC))
return 0;
return ret & 0xf8;
}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 343fa6ff9f4bad..5594a4a4a83fd6 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -414,8 +414,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_yurex *dev;
- int retval = 0;
- int bytes_read = 0;
+ int len = 0;
char in_buffer[20];
unsigned long flags;
@@ -423,26 +422,19 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* already disconnected */
- retval = -ENODEV;
- goto exit;
+ mutex_unlock(&dev->io_mutex);
+ return -ENODEV;
}
spin_lock_irqsave(&dev->lock, flags);
- bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+ len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->io_mutex);
- if (*ppos < bytes_read) {
- if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
- retval = -EFAULT;
- else {
- retval = bytes_read - *ppos;
- *ppos += bytes_read;
- }
- }
+ if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+ return -EIO;
-exit:
- mutex_unlock(&dev->io_mutex);
- return retval;
+ return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
@@ -450,13 +442,13 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
{
struct usb_yurex *dev;
int i, set = 0, retval = 0;
- char buffer[16];
+ char buffer[16 + 1];
char *data = buffer;
unsigned long long c, c2 = 0;
signed long timeout = 0;
DEFINE_WAIT(wait);
- count = min(sizeof(buffer), count);
+ count = min(sizeof(buffer) - 1, count);
dev = file->private_data;
/* verify that we actually have some data to write */
@@ -475,6 +467,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
retval = -EFAULT;
goto error;
}
+ buffer[count] = 0;
memset(dev->cntl_buffer, CMD_PADDING, YUREX_BUF_SIZE);
switch (buffer[0]) {
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 494823f21c28d2..7ec66f1db418d7 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2580,8 +2580,11 @@ static int musb_bus_suspend(struct usb_hcd *hcd)
{
struct musb *musb = hcd_to_musb(hcd);
u8 devctl;
+ int ret;
- musb_port_suspend(musb, true);
+ ret = musb_port_suspend(musb, true);
+ if (ret)
+ return ret;
if (!is_host_active(musb))
return 0;
diff --git a/drivers/usb/musb/musb_host.h b/drivers/usb/musb/musb_host.h
index 7bbf01bf4bb0b5..54d02ed032df01 100644
--- a/drivers/usb/musb/musb_host.h
+++ b/drivers/usb/musb/musb_host.h
@@ -92,7 +92,7 @@ extern void musb_host_rx(struct musb *, u8);
extern void musb_root_disconnect(struct musb *musb);
extern void musb_host_resume_root_hub(struct musb *musb);
extern void musb_host_poke_root_hub(struct musb *musb);
-extern void musb_port_suspend(struct musb *musb, bool do_suspend);
+extern int musb_port_suspend(struct musb *musb, bool do_suspend);
extern void musb_port_reset(struct musb *musb, bool do_reset);
extern void musb_host_finish_resume(struct work_struct *work);
#else
@@ -124,7 +124,10 @@ static inline void musb_root_disconnect(struct musb *musb) {}
static inline void musb_host_resume_root_hub(struct musb *musb) {}
static inline void musb_host_poll_rh_status(struct musb *musb) {}
static inline void musb_host_poke_root_hub(struct musb *musb) {}
-static inline void musb_port_suspend(struct musb *musb, bool do_suspend) {}
+static inline int musb_port_suspend(struct musb *musb, bool do_suspend)
+{
+ return 0;
+}
static inline void musb_port_reset(struct musb *musb, bool do_reset) {}
static inline void musb_host_finish_resume(struct work_struct *work) {}
#endif
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 92d5f718659b7e..ac5458a69de5a6 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -74,14 +74,14 @@ void musb_host_finish_resume(struct work_struct *work)
spin_unlock_irqrestore(&musb->lock, flags);
}
-void musb_port_suspend(struct musb *musb, bool do_suspend)
+int musb_port_suspend(struct musb *musb, bool do_suspend)
{
struct usb_otg *otg = musb->xceiv->otg;
u8 power;
void __iomem *mbase = musb->mregs;
if (!is_host_active(musb))
- return;
+ return 0;
/* NOTE: this doesn't necessarily put PHY into low power mode,
* turning off its clock; that's a function of PHY integration and
@@ -92,16 +92,20 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
if (do_suspend) {
int retries = 10000;
- power &= ~MUSB_POWER_RESUME;
- power |= MUSB_POWER_SUSPENDM;
- musb_writeb(mbase, MUSB_POWER, power);
+ if (power & MUSB_POWER_RESUME)
+ return -EBUSY;
- /* Needed for OPT A tests */
- power = musb_readb(mbase, MUSB_POWER);
- while (power & MUSB_POWER_SUSPENDM) {
+ if (!(power & MUSB_POWER_SUSPENDM)) {
+ power |= MUSB_POWER_SUSPENDM;
+ musb_writeb(mbase, MUSB_POWER, power);
+
+ /* Needed for OPT A tests */
power = musb_readb(mbase, MUSB_POWER);
- if (retries-- < 1)
- break;
+ while (power & MUSB_POWER_SUSPENDM) {
+ power = musb_readb(mbase, MUSB_POWER);
+ if (retries-- < 1)
+ break;
+ }
}
dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
@@ -138,6 +142,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
schedule_delayed_work(&musb->finish_resume_work,
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
+ return 0;
}
void musb_port_reset(struct musb *musb, bool do_reset)
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 90b67a4ca22164..558f33a75fd96d 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -56,9 +56,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
- if (ret)
- return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
@@ -77,7 +74,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
device_set_wakeup_enable(dev, false);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
- return 0;
+ return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
}
static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 94eb2923afed03..85d031ce85c1fc 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -879,6 +879,7 @@ int usb_otg_start(struct platform_device *pdev)
if (pdata->init && pdata->init(pdev) != 0)
return -EINVAL;
+#ifdef CONFIG_PPC32
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
@@ -886,6 +887,7 @@ int usb_otg_start(struct platform_device *pdev)
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
+#endif
/* request irq */
p_otg->irq = platform_get_irq(pdev, 0);
@@ -976,7 +978,7 @@ int usb_otg_start(struct platform_device *pdev)
/*
* state file in sysfs
*/
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 8647d2c2a8c499..c5553028e616d9 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -641,14 +641,11 @@ static int usbhsg_ep_disable(struct usb_ep *ep)
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhs_pipe *pipe;
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(&uep->lock, flags);
pipe = usbhsg_uep_to_pipe(uep);
- if (!pipe) {
- ret = -EINVAL;
+ if (!pipe)
goto out;
- }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 71133d96f97d9b..f73ea14e81736d 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -118,7 +118,7 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- if (r < bufsize) {
+ if (r < (int)bufsize) {
if (r >= 0) {
dev_err(&dev->dev,
"short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 32cadca198b29f..e3ea0fdd39132d 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -33,7 +33,7 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *);
static void cp210x_close(struct usb_serial_port *);
static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *);
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp);
+ tcflag_t *cflagp, unsigned int *baudp);
static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
@@ -57,6 +57,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -75,6 +76,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
{ USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
{ USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+ { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
{ USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
{ USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
{ USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
@@ -91,6 +93,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -108,6 +113,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -120,7 +128,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -130,17 +140,24 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
@@ -515,7 +532,7 @@ static void cp210x_get_termios(struct tty_struct *tty,
&tty->termios.c_cflag, &baud);
tty_encode_baud_rate(tty, baud, baud);
} else {
- unsigned int cflag;
+ tcflag_t cflag;
cflag = 0;
cp210x_get_termios_port(port, &cflag, &baud);
}
@@ -526,10 +543,11 @@ static void cp210x_get_termios(struct tty_struct *tty,
* This is the heart of cp210x_get_termios which always uses a &usb_serial_port.
*/
static void cp210x_get_termios_port(struct usb_serial_port *port,
- unsigned int *cflagp, unsigned int *baudp)
+ tcflag_t *cflagp, unsigned int *baudp)
{
struct device *dev = &port->dev;
- unsigned int cflag, modem_ctl[4];
+ tcflag_t cflag;
+ unsigned int modem_ctl[4];
unsigned int baud;
unsigned int bits;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 3e5b189a79b430..af258bb632dd01 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -604,6 +604,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
@@ -1020,6 +1022,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+ /* EZPrototypes devices */
+ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 76a10b222ff922..15d220eaf6e65a 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -566,7 +566,9 @@
/*
* NovaTech product ids (FTDI_VID)
*/
-#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
/*
* Synapse Wireless product ids (FTDI_VID)
@@ -1308,6 +1310,12 @@
#define IONICS_PLUGCOMPUTER_PID 0x0102
/*
+ * EZPrototypes (PID reseller)
+ */
+#define EZPROTOTYPES_VID 0x1c40
+#define HJELMSLUND_USB485_ISO_PID 0x0477
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/io_ti.h b/drivers/usb/serial/io_ti.h
index 1bd67b24f9160a..bc9ff5ebd67c1b 100644
--- a/drivers/usb/serial/io_ti.h
+++ b/drivers/usb/serial/io_ti.h
@@ -178,7 +178,7 @@ struct ump_interrupt {
} __attribute__((packed));
-#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3)
+#define TIUMP_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
#define TIUMP_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
#define TIUMP_INTERRUPT_CODE_LSR 0x03
#define TIUMP_INTERRUPT_CODE_MSR 0x04
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 6b094242891707..8a4047de43dce8 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -373,8 +373,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
0, 0, data, 1, 2000);
- if (rc >= 0)
+ if (rc == 1)
*value = *data;
+ else if (rc >= 0)
+ rc = -EIO;
kfree(data);
return rc;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 813035f51fe73a..7d252678c55aca 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -408,12 +408,20 @@ static int kobil_tiocmget(struct tty_struct *tty)
transfer_buffer_length,
KOBIL_TIMEOUT);
- dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
- __func__, result, transfer_buffer[0]);
+ dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+ result);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ goto out_free;
+ }
+
+ dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
+out_free:
kfree(transfer_buffer);
return result;
}
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 4581fa1dec984e..286b43c79d38a7 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -368,8 +368,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
if (!urbtrack)
return -ENOMEM;
- kref_get(&mos_parport->ref_count);
- urbtrack->mos_parport = mos_parport;
urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urbtrack->urb) {
kfree(urbtrack);
@@ -390,6 +388,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
usb_sndctrlpipe(usbdev, 0),
(unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
+ kref_get(&mos_parport->ref_count);
+ urbtrack->mos_parport = mos_parport;
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index ed883a7ad5339c..58ba6904a087f9 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -482,6 +482,9 @@ static void mos7840_control_callback(struct urb *urb)
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+ if (urb->actual_length < 1)
+ goto out;
+
dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
mos7840_port->MsrLsr, mos7840_port->port_num);
data = urb->transfer_buffer;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d982c455e18e59..9f96dd2743707d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -199,6 +199,8 @@ static void option_instat_callback(struct urb *urb);
#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
+#define DELL_PRODUCT_5821E 0x81d7
+
#define KYOCERA_VENDOR_ID 0x0c88
#define KYOCERA_PRODUCT_KPC650 0x17da
#define KYOCERA_PRODUCT_KPC680 0x180a
@@ -1033,6 +1035,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5821E),
+ .driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
{ USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -1062,7 +1066,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1143,6 +1148,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1159,6 +1166,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
+ { USB_DEVICE(TELIT_VENDOR_ID, 0x1900), /* Telit LN940 (QMI) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1901, 0xff), /* Telit LN940 (MBIM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
@@ -1323,6 +1334,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x0602, 0xff) }, /* GosunCn ZTE WeLink ME3630 (MBIM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
@@ -1526,6 +1538,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1753,6 +1766,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
.driver_info = RSVD(5) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x1e0e, 0x9003, 0xff) }, /* Simcom SIM7500/SIM7600 MBIM mode */
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
@@ -1928,16 +1942,29 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
- { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
+ .driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x10) }, /* HP lt4132 (Huawei ME906s-158) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x14) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
+ { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
+ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+ .driver_info = RSVD(6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 3da25ad267a263..9706d214c409b5 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -47,6 +47,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
+ { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
@@ -86,9 +87,14 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 123289085ee259..d84c3b3d477b70 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -13,6 +13,7 @@
#define PL2303_VENDOR_ID 0x067b
#define PL2303_PRODUCT_ID 0x2303
+#define PL2303_PRODUCT_ID_TB 0x2304
#define PL2303_PRODUCT_ID_RSAQ2 0x04bb
#define PL2303_PRODUCT_ID_DCU11 0x1234
#define PL2303_PRODUCT_ID_PHAROS 0xaaa0
@@ -25,6 +26,7 @@
#define PL2303_PRODUCT_ID_MOTOROLA 0x0307
#define PL2303_PRODUCT_ID_ZTEK 0xe1f1
+
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
@@ -123,10 +125,15 @@
/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LM920_PRODUCT_ID 0x026b
+#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
+#define HP_LD220TA_PRODUCT_ID 0x4349
+#define HP_LD960TA_PRODUCT_ID 0x4439
+#define HP_LM940_PRODUCT_ID 0x5039
/* Cressi Edy (diving computer) PC interface */
#define CRESSI_VENDOR_ID 0x04b8
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 07d1ecd564f79d..8960a46c83bb54 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -790,9 +790,9 @@ static void sierra_close(struct usb_serial_port *port)
kfree(urb->transfer_buffer);
usb_free_urb(urb);
usb_autopm_put_interface_async(serial->interface);
- spin_lock(&portdata->lock);
+ spin_lock_irq(&portdata->lock);
portdata->outstanding_urbs--;
- spin_unlock(&portdata->lock);
+ spin_unlock_irq(&portdata->lock);
}
sierra_stop_rx_urbs(port);
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 98f35c656c02d8..0cd247f75b8b8c 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -227,7 +227,7 @@ struct ti_interrupt {
} __attribute__((packed));
/* Interrupt codes */
-#define TI_GET_PORT_FROM_CODE(c) (((c) >> 4) - 3)
+#define TI_GET_PORT_FROM_CODE(c) (((c) >> 6) & 0x01)
#define TI_GET_FUNC_FROM_CODE(c) ((c) & 0x0f)
#define TI_CODE_HARDWARE_ERROR 0xFF
#define TI_CODE_DATA_ERROR 0x03
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2674da40d9cd78..51124211140395 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -87,7 +87,9 @@ DEVICE(moto_modem, MOTO_IDS);
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
+ { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index dba51362d2e26c..b3344a77dcce76 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -223,8 +223,12 @@ static int slave_configure(struct scsi_device *sdev)
if (!(us->fflags & US_FL_NEEDS_CAP16))
sdev->try_rc_10_first = 1;
- /* assume SPC3 or latter devices support sense size > 18 */
- if (sdev->scsi_level > SCSI_SPC_2)
+ /*
+ * assume SPC3 or latter devices support sense size > 18
+ * unless US_FL_BAD_SENSE quirk is specified.
+ */
+ if (sdev->scsi_level > SCSI_SPC_2 &&
+ !(us->fflags & US_FL_BAD_SENSE))
us->fflags |= US_FL_SANE_SENSE;
/* USB-IDE bridges tend to report SK = 0x04 (Non-recoverable
@@ -341,6 +345,15 @@ static int queuecommand_lck(struct scsi_cmnd *srb,
return 0;
}
+ if ((us->fflags & US_FL_NO_ATA_1X) &&
+ (srb->cmnd[0] == ATA_12 || srb->cmnd[0] == ATA_16)) {
+ memcpy(srb->sense_buffer, usb_stor_sense_invalidCDB,
+ sizeof(usb_stor_sense_invalidCDB));
+ srb->result = SAM_STAT_CHECK_CONDITION;
+ done(srb);
+ return 0;
+ }
+
/* enqueue the command and wake up the control thread */
srb->scsi_done = done;
us->srb = srb;
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 02f86dd1a3400b..90a7bffe348432 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -808,12 +808,24 @@ Retry_Sense:
if (result == USB_STOR_TRANSPORT_GOOD) {
srb->result = SAM_STAT_GOOD;
srb->sense_buffer[0] = 0x0;
+ }
+
+ /*
+ * ATA-passthru commands use sense data to report
+ * the command completion status, and often devices
+ * return Check Condition status when nothing is
+ * wrong.
+ */
+ else if (srb->cmnd[0] == ATA_16 ||
+ srb->cmnd[0] == ATA_12) {
+ /* leave the data alone */
+ }
/* If there was a problem, report an unspecified
* hardware error to prevent the higher layers from
* entering an infinite retry loop.
*/
- } else {
+ else {
srb->result = DID_ERROR << 16;
if ((sshdr.response_code & 0x72) == 0x72)
srb->sense_buffer[1] = HARDWARE_ERROR;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a34d2a89de66c..d92b974f06355e 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1393,6 +1393,18 @@ UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
US_FL_SANE_SENSE),
/*
+ * Reported by Icenowy Zheng <icenowy@aosc.io>
+ * The SMI SM3350 USB-UFS bridge controller will enter a wrong state
+ * that do not process read/write command if a long sense is requested,
+ * so force to use 18-byte sense.
+ */
+UNUSUAL_DEV( 0x090c, 0x3350, 0x0000, 0xffff,
+ "SMI",
+ "SM3350 UFS-to-USB-Mass-Storage bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BAD_SENSE ),
+
+/*
* Pete Zaitcev <zaitcev@yahoo.com>, bz#164688.
* The device blatantly ignores LUN and returns 1 in GetMaxLUN.
*/
@@ -2213,6 +2225,13 @@ UNUSUAL_DEV( 0x4146, 0xba01, 0x0100, 0x0100,
"Micro Mini 1GB",
USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE ),
+/* Reported-by: Tim Anderson <tsa@biglakesoftware.com> */
+UNUSUAL_DEV( 0x2ca3, 0x0031, 0x0000, 0x9999,
+ "DJI",
+ "CineSSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/*
* Nick Bowler <nbowler@elliptictech.com>
* SCSI stack spams (otherwise harmless) error messages.
diff --git a/drivers/usb/storage/unusual_realtek.h b/drivers/usb/storage/unusual_realtek.h
index e41f50c95ed409..f5fc3271e19cf7 100644
--- a/drivers/usb/storage/unusual_realtek.h
+++ b/drivers/usb/storage/unusual_realtek.h
@@ -38,4 +38,14 @@ UNUSUAL_DEV(0x0bda, 0x0159, 0x0000, 0x9999,
"USB Card Reader",
USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+UNUSUAL_DEV(0x0bda, 0x0177, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
+UNUSUAL_DEV(0x0bda, 0x0184, 0x0000, 0x9999,
+ "Realtek",
+ "USB Card Reader",
+ USB_SC_DEVICE, USB_PR_DEVICE, init_realtek_cr, 0),
+
#endif /* defined(CONFIG_USB_STORAGE_REALTEK) || ... */
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index b66faaf3e842e8..4019c11f24e2cf 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -230,7 +230,7 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, sizeof(*secd));
- if (result < sizeof(*secd)) {
+ if (result < (int)sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 1212b4b3c5a909..e9ff710a3d12d7 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -875,6 +875,7 @@ error_get_version:
error_rc_add:
usb_put_intf(iface);
usb_put_dev(hwarc->usb_dev);
+ kfree(hwarc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 59cf36175f997a..211b5538258eb4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -955,7 +955,8 @@ err_used:
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
- sockfd_put(sock);
+ if (sock)
+ sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 42ec026e9200c6..009315f006bf0b 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1009,7 +1009,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
}
/*
- * Set prot_iter to data_iter, and advance past any
+ * Set prot_iter to data_iter and truncate it to
+ * prot_bytes, and advance data_iter past any
* preceeding prot_bytes that may be present.
*
* Also fix up the exp_data_len to reflect only the
@@ -1018,6 +1019,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
if (prot_bytes) {
exp_data_len -= prot_bytes;
prot_iter = data_iter;
+ iov_iter_truncate(&prot_iter, prot_bytes);
iov_iter_advance(&data_iter, prot_bytes);
}
tag = vhost64_to_cpu(vq, v_req_pi.tag);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 080447b81a5915..56e823e9ee929f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -27,6 +27,7 @@
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
+#include <linux/nospec.h>
#include "vhost.h"
@@ -748,6 +749,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg
if (idx >= d->nvqs)
return -ENOBUFS;
+ idx = array_index_nospec(idx, d->nvqs);
vq = d->vqs[idx];
mutex_lock(&vq->mutex);
@@ -1548,6 +1550,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
return -EFAULT;
}
if (unlikely(vq->log_used)) {
+ /* Make sure used idx is seen before log. */
+ smp_wmb();
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index b570e5a5ae3cc8..d9d1e1d3d2e1e6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -15,6 +15,7 @@
#include <net/sock.h>
#include <linux/virtio_vsock.h>
#include <linux/vhost.h>
+#include <linux/hashtable.h>
#include <net/af_vsock.h>
#include "vhost.h"
@@ -27,14 +28,14 @@ enum {
/* Used to track all the vhost_vsock instances on the system. */
static DEFINE_SPINLOCK(vhost_vsock_lock);
-static LIST_HEAD(vhost_vsock_list);
+static DEFINE_HASHTABLE(vhost_vsock_hash, 8);
struct vhost_vsock {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
- /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
- struct list_head list;
+ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */
+ struct hlist_node hash;
struct vhost_work send_pkt_work;
spinlock_t send_pkt_list_lock;
@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid(void)
return VHOST_VSOCK_DEFAULT_HOST_CID;
}
-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
+/* Callers that dereference the return value must hold vhost_vsock_lock or the
+ * RCU read lock.
+ */
+static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
{
struct vhost_vsock *vsock;
- list_for_each_entry(vsock, &vhost_vsock_list, list) {
+ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) {
u32 other_cid = vsock->guest_cid;
/* Skip instances that have no CID yet */
@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid)
return NULL;
}
-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
-{
- struct vhost_vsock *vsock;
-
- spin_lock_bh(&vhost_vsock_lock);
- vsock = __vhost_vsock_get(guest_cid);
- spin_unlock_bh(&vhost_vsock_lock);
-
- return vsock;
-}
-
static void
vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
@@ -205,9 +198,12 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
struct vhost_vsock *vsock;
int len = pkt->len;
+ rcu_read_lock();
+
/* Find the vhost_vsock according to guest context id */
vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
if (!vsock) {
+ rcu_read_unlock();
virtio_transport_free_pkt(pkt);
return -ENODEV;
}
@@ -220,6 +216,8 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
spin_unlock_bh(&vsock->send_pkt_list_lock);
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
+
+ rcu_read_unlock();
return len;
}
@@ -473,6 +471,8 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
goto out;
}
+ vsock->guest_cid = 0; /* no CID assigned yet */
+
atomic_set(&vsock->queued_replies, 0);
vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
@@ -486,10 +486,6 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
spin_lock_init(&vsock->send_pkt_list_lock);
INIT_LIST_HEAD(&vsock->send_pkt_list);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
-
- spin_lock_bh(&vhost_vsock_lock);
- list_add_tail(&vsock->list, &vhost_vsock_list);
- spin_unlock_bh(&vhost_vsock_lock);
return 0;
out:
@@ -516,13 +512,21 @@ static void vhost_vsock_reset_orphans(struct sock *sk)
* executing.
*/
- if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown = SHUTDOWN_MASK;
- sk->sk_state = SS_UNCONNECTED;
- sk->sk_err = ECONNRESET;
- sk->sk_error_report(sk);
- }
+ /* If the peer is still valid, no need to reset connection */
+ if (vhost_vsock_get(vsk->remote_addr.svm_cid))
+ return;
+
+ /* If the close timeout is pending, let it expire. This avoids races
+ * with the timeout callback.
+ */
+ if (vsk->close_work_scheduled)
+ return;
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SS_UNCONNECTED;
+ sk->sk_err = ECONNRESET;
+ sk->sk_error_report(sk);
}
static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
@@ -530,9 +534,13 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
struct vhost_vsock *vsock = file->private_data;
spin_lock_bh(&vhost_vsock_lock);
- list_del(&vsock->list);
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
spin_unlock_bh(&vhost_vsock_lock);
+ /* Wait for other CPUs to finish using vsock */
+ synchronize_rcu();
+
/* Iterating over all connections for all CIDs to find orphans is
* inefficient. Room for improvement here. */
vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
@@ -573,12 +581,17 @@ static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
/* Refuse if CID is already in use */
spin_lock_bh(&vhost_vsock_lock);
- other = __vhost_vsock_get(guest_cid);
+ other = vhost_vsock_get(guest_cid);
if (other && other != vsock) {
spin_unlock_bh(&vhost_vsock_lock);
return -EADDRINUSE;
}
+
+ if (vsock->guest_cid)
+ hash_del_rcu(&vsock->hash);
+
vsock->guest_cid = guest_cid;
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
spin_unlock_bh(&vhost_vsock_lock);
return 0;
diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
index 734a9158946b1f..e55304d5cf0716 100644
--- a/drivers/video/backlight/as3711_bl.c
+++ b/drivers/video/backlight/as3711_bl.c
@@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
static int as3711_backlight_parse_dt(struct device *dev)
{
struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
- struct device_node *bl =
- of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
+ struct device_node *bl, *fb;
int ret;
+ bl = of_get_child_by_name(dev->parent->of_node, "backlight");
if (!bl) {
dev_dbg(dev, "backlight node not found\n");
return -ENODEV;
@@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
if (pdata->su1_max_uA <= 0)
ret = -EINVAL;
if (ret < 0)
- return ret;
+ goto err_put_bl;
}
fb = of_parse_phandle(bl, "su2-dev", 0);
@@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
if (pdata->su2_max_uA <= 0)
ret = -EINVAL;
if (ret < 0)
- return ret;
+ goto err_put_bl;
if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
pdata->su2_feedback = AS3711_SU2_VOLTAGE;
@@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
count++;
}
- if (count != 1)
- return -EINVAL;
+ if (count != 1) {
+ ret = -EINVAL;
+ goto err_put_bl;
+ }
count = 0;
if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
@@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
pdata->su2_fbprot = AS3711_SU2_GPIO4;
count++;
}
- if (count != 1)
- return -EINVAL;
+ if (count != 1) {
+ ret = -EINVAL;
+ goto err_put_bl;
+ }
count = 0;
if (of_find_property(bl, "su2-auto-curr1", NULL)) {
@@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
* At least one su2-auto-curr* must be specified iff
* AS3711_SU2_CURR_AUTO is used
*/
- if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
- return -EINVAL;
+ if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
+ ret = -EINVAL;
+ goto err_put_bl;
+ }
}
+ of_node_put(bl);
+
return 0;
+
+err_put_bl:
+ of_node_put(bl);
+
+ return ret;
}
static int as3711_backlight_probe(struct platform_device *pdev)
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 7b738d60ecc22e..f3aa6088f1d978 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
if (!pdata)
return;
- np = of_find_node_by_name(nproot, "backlight");
+ np = of_get_child_by_name(nproot, "backlight");
if (!np) {
dev_err(&pdev->dev, "failed to find backlight node\n");
return;
@@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
pdata->dual_string = val;
+ of_node_put(np);
+
pdev->dev.platform_data = pdata;
}
diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
index 61d72bffd402f2..dc920e2aa0944d 100644
--- a/drivers/video/backlight/tps65217_bl.c
+++ b/drivers/video/backlight/tps65217_bl.c
@@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
tps65217_bl_parse_dt(struct platform_device *pdev)
{
struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
- struct device_node *node = of_node_get(tps->dev->of_node);
+ struct device_node *node;
struct tps65217_bl_pdata *pdata, *err;
u32 val;
- node = of_find_node_by_name(node, "backlight");
+ node = of_get_child_by_name(tps->dev->of_node, "backlight");
if (!node)
return ERR_PTR(-ENODEV);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 4e3c78d8883222..c03c5b9602bb32 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3032,7 +3032,7 @@ static int fbcon_fb_unbind(int idx)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
con2fb_map[i] != -1) {
- new_idx = i;
+ new_idx = con2fb_map[i];
break;
}
}
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 63c4842eb2243a..46e0e8b39b76af 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -332,6 +332,8 @@ extern const struct aty_pll_ops aty_pll_ct; /* Integrated */
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
@@ -358,7 +360,6 @@ static inline void wait_for_idle(struct atyfb_par *par)
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 7f658fa4d22a75..9755a0ec65913b 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3093,17 +3093,18 @@ static int atyfb_setup_sparc(struct pci_dev *pdev, struct fb_info *info,
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_accel.c b/drivers/video/fbdev/aty/mach64_accel.c
index 182bd680141f7f..e9dfe0e40b8bd6 100644
--- a/drivers/video/fbdev/aty/mach64_accel.c
+++ b/drivers/video/fbdev/aty/mach64_accel.c
@@ -126,7 +126,7 @@ void aty_init_engine(struct atyfb_par *par, struct fb_info *info)
/* set host attributes */
wait_for_fifo(13, par);
- aty_st_le32(HOST_CNTL, 0, par);
+ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
/* set pattern attributes */
aty_st_le32(PAT_REG0, 0, par);
@@ -232,7 +232,8 @@ void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area)
rotation = rotation24bpp(dx, direction);
}
- wait_for_fifo(4, par);
+ wait_for_fifo(5, par);
+ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par);
aty_st_le32(SRC_Y_X, (sx << 16) | sy, par);
aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par);
@@ -268,7 +269,8 @@ void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT);
}
- wait_for_fifo(3, par);
+ wait_for_fifo(4, par);
+ aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par);
aty_st_le32(DP_FRGD_CLR, color, par);
aty_st_le32(DP_SRC,
BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE,
@@ -283,7 +285,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
{
struct atyfb_par *par = (struct atyfb_par *) info->par;
u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width;
- u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix;
+ u32 pix_width, rotation = 0, src, mix;
if (par->asleep)
return;
@@ -295,8 +297,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par);
- host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN;
+ pix_width = par->crtc.dp_pix_width;
switch (image->depth) {
case 1:
@@ -344,7 +345,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
* since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit
* this hwaccelerated triple has an issue with not aligned data
*/
- if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
+ if (image->depth == 1 && M64_HAS(HW_TRIPLE) && image->width % 8 == 0)
pix_width |= DP_HOST_TRIPLE_EN;
}
@@ -369,19 +370,18 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D;
}
- wait_for_fifo(6, par);
- aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par);
+ wait_for_fifo(5, par);
aty_st_le32(DP_PIX_WIDTH, pix_width, par);
aty_st_le32(DP_MIX, mix, par);
aty_st_le32(DP_SRC, src, par);
- aty_st_le32(HOST_CNTL, host_cntl, par);
+ aty_st_le32(HOST_CNTL, HOST_BYTE_ALIGN, par);
aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par);
draw_rect(dx, dy, width, image->height, par);
src_bytes = (((image->width * image->depth) + 7) / 8) * image->height;
/* manual triple each pixel */
- if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
+ if (image->depth == 1 && info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) {
int inbit, outbit, mult24, byte_id_in_dword, width;
u8 *pbitmapin = (u8*)image->data, *pbitmapout;
u32 hostdword;
@@ -414,7 +414,7 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
}
}
wait_for_fifo(1, par);
- aty_st_le32(HOST_DATA0, hostdword, par);
+ aty_st_le32(HOST_DATA0, le32_to_cpu(hostdword), par);
}
} else {
u32 *pbitmap, dwords = (src_bytes + 3) / 4;
@@ -423,8 +423,4 @@ void atyfb_imageblit(struct fb_info *info, const struct fb_image *image)
aty_st_le32(HOST_DATA0, get_unaligned_le32(pbitmap), par);
}
}
-
- /* restore pix_width */
- wait_for_fifo(1, par);
- aty_st_le32(DP_PIX_WIDTH, pix_width_save, par);
}
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 51f29d627cebce..af54256a20a103 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -114,7 +114,7 @@ static void aty_st_pll_ct(int offset, u8 val, const struct atyfb_par *par)
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@@ -221,7 +221,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -512,7 +512,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@@ -534,7 +534,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@@ -583,7 +583,7 @@ static int aty_init_pll_ct(const struct fb_info *info, union aty_pll *pll)
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index 649b32f78c0862..c55109524fd572 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev)
}
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ of_node_put(disp);
goto out_fb_release;
+ }
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
ret = of_property_read_u32(disp, "bits-per-pixel",
&info->var.bits_per_pixel);
+ of_node_put(disp);
if (ret)
goto out_fb_release;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0705d8883edecc..ea2bd6208a2f51 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -433,7 +433,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
- for (x = 0; x < num; x++) {
+ u32 dx = image->dx;
+
+ for (x = 0; x < num && image->dx <= dx; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
@@ -445,7 +447,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
- for (x = 0; x < num; x++) {
+ u32 dy = image->dy;
+
+ for (x = 0; x < num && image->dy <= dy; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
@@ -1687,12 +1691,12 @@ static int do_register_framebuffer(struct fb_info *fb_info)
return 0;
}
-static int do_unregister_framebuffer(struct fb_info *fb_info)
+static int unbind_console(struct fb_info *fb_info)
{
struct fb_event event;
- int i, ret = 0;
+ int ret;
+ int i = fb_info->node;
- i = fb_info->node;
if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
return -EINVAL;
@@ -1707,17 +1711,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
unlock_fb_info(fb_info);
console_unlock();
+ return ret;
+}
+
+static int __unlink_framebuffer(struct fb_info *fb_info);
+
+static int do_unregister_framebuffer(struct fb_info *fb_info)
+{
+ struct fb_event event;
+ int ret;
+
+ ret = unbind_console(fb_info);
+
if (ret)
return -EINVAL;
pm_vt_switch_unregister(fb_info->dev);
- unlink_framebuffer(fb_info);
+ __unlink_framebuffer(fb_info);
if (fb_info->pixmap.addr &&
(fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
kfree(fb_info->pixmap.addr);
fb_destroy_modelist(&fb_info->modelist);
- registered_fb[i] = NULL;
+ registered_fb[fb_info->node] = NULL;
num_registered_fb--;
fb_cleanup_device(fb_info);
event.info = fb_info;
@@ -1730,7 +1746,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info)
return 0;
}
-int unlink_framebuffer(struct fb_info *fb_info)
+static int __unlink_framebuffer(struct fb_info *fb_info)
{
int i;
@@ -1742,6 +1758,20 @@ int unlink_framebuffer(struct fb_info *fb_info)
device_destroy(fb_class, MKDEV(FB_MAJOR, i));
fb_info->dev = NULL;
}
+
+ return 0;
+}
+
+int unlink_framebuffer(struct fb_info *fb_info)
+{
+ int ret;
+
+ ret = __unlink_framebuffer(fb_info);
+ if (ret)
+ return ret;
+
+ unbind_console(fb_info);
+
return 0;
}
EXPORT_SYMBOL(unlink_framebuffer);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 2510fa728d7716..de119f11b78f96 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -644,7 +644,7 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info,
*
* Valid mode specifiers for @mode_option:
*
- * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m] or
+ * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][p][m] or
* <name>[-<bpp>][@<refresh>]
*
* with <xres>, <yres>, <bpp> and <refresh> decimal numbers and
@@ -653,10 +653,10 @@ static int fb_try_mode(struct fb_var_screeninfo *var, struct fb_info *info,
* If 'M' is present after yres (and before refresh/bpp if present),
* the function will compute the timings using VESA(tm) Coordinated
* Video Timings (CVT). If 'R' is present after 'M', will compute with
- * reduced blanking (for flatpanels). If 'i' is present, compute
- * interlaced mode. If 'm' is present, add margins equal to 1.8%
- * of xres rounded down to 8 pixels, and 1.8% of yres. The char
- * 'i' and 'm' must be after 'M' and 'R'. Example:
+ * reduced blanking (for flatpanels). If 'i' or 'p' are present, compute
+ * interlaced or progressive mode. If 'm' is present, add margins equal
+ * to 1.8% of xres rounded down to 8 pixels, and 1.8% of yres. The chars
+ * 'i', 'p' and 'm' must be after 'M' and 'R'. Example:
*
* 1024x768MR-8@60m - Reduced blank with margins at 60Hz.
*
@@ -697,7 +697,8 @@ int fb_find_mode(struct fb_var_screeninfo *var,
unsigned int namelen = strlen(name);
int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
- int yres_specified = 0, cvt = 0, rb = 0, interlace = 0;
+ int yres_specified = 0, cvt = 0, rb = 0;
+ int interlace_specified = 0, interlace = 0;
int margins = 0;
u32 best, diff, tdiff;
@@ -748,9 +749,17 @@ int fb_find_mode(struct fb_var_screeninfo *var,
if (!cvt)
margins = 1;
break;
+ case 'p':
+ if (!cvt) {
+ interlace = 0;
+ interlace_specified = 1;
+ }
+ break;
case 'i':
- if (!cvt)
+ if (!cvt) {
interlace = 1;
+ interlace_specified = 1;
+ }
break;
default:
goto done;
@@ -819,11 +828,21 @@ done:
if ((name_matches(db[i], name, namelen) ||
(res_specified && res_matches(db[i], xres, yres))) &&
!fb_try_mode(var, info, &db[i], bpp)) {
- if (refresh_specified && db[i].refresh == refresh)
- return 1;
+ const int db_interlace = (db[i].vmode &
+ FB_VMODE_INTERLACED ? 1 : 0);
+ int score = abs(db[i].refresh - refresh);
+
+ if (interlace_specified)
+ score += abs(db_interlace - interlace);
+
+ if (!interlace_specified ||
+ db_interlace == interlace)
+ if (refresh_specified &&
+ db[i].refresh == refresh)
+ return 1;
- if (abs(db[i].refresh - refresh) < diff) {
- diff = abs(db[i].refresh - refresh);
+ if (score < diff) {
+ diff = score;
best = i;
}
}
diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c
index 7f6c9e6cfc6c99..66d58e93bc3226 100644
--- a/drivers/video/fbdev/goldfishfb.c
+++ b/drivers/video/fbdev/goldfishfb.c
@@ -234,7 +234,7 @@ static int goldfish_fb_probe(struct platform_device *pdev)
fb->fb.var.activate = FB_ACTIVATE_NOW;
fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
- fb->fb.var.pixclock = 10000;
+ fb->fb.var.pixclock = 0;
fb->fb.var.red.offset = 11;
fb->fb.var.red.length = 5;
@@ -301,6 +301,7 @@ static int goldfish_fb_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, framesize, (void *)fb->fb.screen_base,
fb->fb.fix.smem_start);
iounmap(fb->reg_base);
+ kfree(fb);
return 0;
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
index 195ad7cac1bacc..68fa037d8cbc06 100644
--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
@@ -372,7 +372,7 @@ static int Ti3026_init(struct matrox_fb_info *minfo, struct my_timming *m)
DBG(__func__)
- memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
+ memcpy(hw->DACreg, MGADACbpp32, sizeof(MGADACbpp32));
switch (minfo->fbcon.var.bits_per_pixel) {
case 4: hw->DACreg[POS3026_XLATCHCTRL] = TVP3026_XLATCHCTRL_16_1; /* or _8_1, they are same */
hw->DACreg[POS3026_XTRUECOLORCTRL] = TVP3026_XTRUECOLORCTRL_PSEUDOCOLOR;
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 393ae1bc07e8f7..a8a6f072fb78ec 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -977,7 +977,7 @@ int omapfb_register_client(struct omapfb_notifier_block *omapfb_nb,
{
int r;
- if ((unsigned)omapfb_nb->plane_idx > OMAPFB_PLANE_NUM)
+ if ((unsigned)omapfb_nb->plane_idx >= OMAPFB_PLANE_NUM)
return -EINVAL;
if (!notifier_inited) {
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b84ced..0c1c34ff40a944 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
return -EFAULT;
+ if (mr->w > 4096 || mr->h > 4096)
+ return -EINVAL;
+
if (mr->w * mr->h * 3 > mr->buffer_size)
return -EINVAL;
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ if (copy_to_user(mr->buffer, buf, r))
r = -EFAULT;
}
@@ -606,6 +609,8 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
int r = 0;
+ memset(&p, 0, sizeof(p));
+
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c
index 0e24eb9c219c04..750a384bf19150 100644
--- a/drivers/video/fbdev/pvr2fb.c
+++ b/drivers/video/fbdev/pvr2fb.c
@@ -687,7 +687,7 @@ static ssize_t pvr2fb_write(struct fb_info *info, const char *buf,
return -ENOMEM;
ret = get_user_pages_unlocked(current, current->mm, (unsigned long)buf,
- nr_pages, WRITE, 0, pages);
+ nr_pages, pages, FOLL_WRITE);
if (ret < nr_pages) {
nr_pages = ret;
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index efb57c05999764..5190b1749e2a03 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
/*
* enable controller clock
*/
- clk_enable(fbi->clk);
+ clk_prepare_enable(fbi->clk);
pxa168fb_set_par(info);
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
failed_free_cmap:
fb_dealloc_cmap(&info->cmap);
failed_free_clk:
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
failed_free_fbmem:
dma_free_coherent(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
dma_free_writecombine(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
info->screen_base, info->fix.smem_start);
- clk_disable(fbi->clk);
+ clk_disable_unprepare(fbi->clk);
framebuffer_release(info);
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 50bce45e7f3d47..933619da1a94b9 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -626,8 +626,8 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
/* request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(dev, "no IRQ defined\n");
- return -ENODEV;
+ dev_err(dev, "no IRQ defined: %d\n", irq);
+ return irq;
}
ret = devm_request_irq(dev, irq, pxa3xx_gcu_handle_irq,
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index 178ae93b7ebd6a..381236ff34d9cc 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1059,7 +1059,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
info->cmap.len || cmap->start < info->cmap.start)
return -EINVAL;
- entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
+ entries = kmalloc_array(cmap->len, sizeof(*entries),
+ GFP_KERNEL);
if (!entries)
return -ENOMEM;
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index badee04ef496ce..71b5dca95bdbdd 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -19,6 +19,7 @@
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -1468,7 +1469,7 @@ static const struct file_operations viafb_vt1636_proc_fops = {
#endif /* CONFIG_FB_VIA_DIRECT_PROCFS */
-static int viafb_sup_odev_proc_show(struct seq_file *m, void *v)
+static int __maybe_unused viafb_sup_odev_proc_show(struct seq_file *m, void *v)
{
via_odev_to_seq(m, supported_odev_map[
viaparinfo->shared->chip_info.gfx_chip_name]);
diff --git a/drivers/virt/fsl_hypervisor.c b/drivers/virt/fsl_hypervisor.c
index 32c8fc5f7a5c37..590a0f51a24917 100644
--- a/drivers/virt/fsl_hypervisor.c
+++ b/drivers/virt/fsl_hypervisor.c
@@ -246,8 +246,8 @@ static long ioctl_memcpy(struct fsl_hv_ioctl_memcpy __user *p)
down_read(&current->mm->mmap_sem);
num_pinned = get_user_pages(current, current->mm,
param.local_vaddr - lb_offset, num_pages,
- (param.source == -1) ? READ : WRITE,
- 0, pages, NULL);
+ (param.source == -1) ? 0 : FOLL_WRITE,
+ pages, NULL);
up_read(&current->mm->mmap_sem);
if (num_pinned != num_pages) {
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index a69f8a720038cb..05217df9d41efd 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -244,7 +244,10 @@ static void update_balloon_stats(struct virtio_balloon *vb)
pages_to_bytes(events[PSWPIN]));
update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
pages_to_bytes(events[PSWPOUT]));
- update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT,
+ events[PGMAJFAULT_S] +
+ events[PGMAJFAULT_A] +
+ events[PGMAJFAULT_F]);
update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
#endif
update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
@@ -479,7 +482,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
tell_host(vb, vb->inflate_vq);
/* balloon's page migration 2nd step -- deflate "page" */
+ spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
balloon_page_delete(page);
+ spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
set_page_pfns(vb, vb->pfns, page);
tell_host(vb, vb->deflate_vq);
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a4621757a47f51..dacb5919970c53 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -113,6 +113,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
if (IS_ERR(mdev->clk))
return PTR_ERR(mdev->clk);
+ err = clk_prepare_enable(mdev->clk);
+ if (err)
+ return err;
+
clkrate = clk_get_rate(mdev->clk);
if (clkrate < 10000000)
dev_warn(&pdev->dev,
@@ -126,12 +130,10 @@ static int mxc_w1_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdev->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(mdev->regs))
- return PTR_ERR(mdev->regs);
-
- err = clk_prepare_enable(mdev->clk);
- if (err)
- return err;
+ if (IS_ERR(mdev->regs)) {
+ err = PTR_ERR(mdev->regs);
+ goto out_disable_clk;
+ }
/* Software reset 1-Wire module */
writeb(MXC_W1_RESET_RST, mdev->regs + MXC_W1_RESET);
@@ -147,8 +149,12 @@ static int mxc_w1_probe(struct platform_device *pdev)
err = w1_add_master_device(&mdev->bus_master);
if (err)
- clk_disable_unprepare(mdev->clk);
+ goto out_disable_clk;
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(mdev->clk);
return err;
}
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 0c427d6a12d144..4c5c6550809d52 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -785,6 +785,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
/* remove module dependency */
pm_runtime_disable(&pdev->dev);
+ w1_remove_master_device(&omap_w1_master);
+
return 0;
}
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 39886edfa22211..88c1b8c0147300 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -741,7 +741,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
/* slave modules need to be loaded in a context with unlocked mutex */
mutex_unlock(&dev->mutex);
- request_module("w1-family-0x%02x", rn->family);
+ request_module("w1-family-0x%02X", rn->family);
mutex_lock(&dev->mutex);
spin_lock(&w1_flock);
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 04da4b66c75e36..ae034bb1e55140 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -192,7 +192,7 @@ static int booke_wdt_set_timeout(struct watchdog_device *wdt_dev,
return 0;
}
-static struct watchdog_info booke_wdt_info = {
+static struct watchdog_info booke_wdt_info __ro_after_init = {
.options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
.identity = "PowerPC Book-E Watchdog",
};
diff --git a/drivers/watchdog/pika_wdt.c b/drivers/watchdog/pika_wdt.c
index 0cdfee266690b7..e35cf5e87907c3 100644
--- a/drivers/watchdog/pika_wdt.c
+++ b/drivers/watchdog/pika_wdt.c
@@ -54,7 +54,7 @@ static struct {
struct timer_list timer; /* The timer that pings the watchdog */
} pikawdt_private;
-static struct watchdog_info ident = {
+static struct watchdog_info ident __ro_after_init = {
.identity = DRV_NAME,
.options = WDIOF_CARDRESET |
WDIOF_SETTIMEOUT |
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5676aefdf2bca7..f4e59c445964d5 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -18,15 +18,16 @@ static void enable_hotplug_cpu(int cpu)
static void disable_hotplug_cpu(int cpu)
{
- if (cpu_online(cpu)) {
- lock_device_hotplug();
+ if (!cpu_is_hotpluggable(cpu))
+ return;
+ lock_device_hotplug();
+ if (cpu_online(cpu))
device_offline(get_cpu_device(cpu));
- unlock_device_hotplug();
- }
- if (cpu_present(cpu))
+ if (!cpu_online(cpu) && cpu_present(cpu)) {
xen_arch_unregister_cpu(cpu);
-
- set_cpu_present(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+ unlock_device_hotplug();
}
static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 468961c59fa555..878a40950a3a17 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -139,7 +139,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ evtchn_to_irq[row][col] = irq;
return 0;
}
@@ -637,8 +637,6 @@ static void __unbind_from_irq(unsigned int irq)
xen_irq_info_cleanup(info);
}
- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
xen_free_irq(irq);
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 2dd285827169d2..f494126aaecdd9 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
/*
* The Xenstore watch fires directly after registering it and
* after a suspend/resume cycle. So ENOENT is no error but
- * might happen in those cases.
+ * might happen in those cases. ERANGE is observed when we get
+ * an empty value (''), this happens when we acknowledge the
+ * request by writing '\0' below.
*/
- if (err != -ENOENT)
+ if (err != -ENOENT && err != -ERANGE)
pr_err("Error %d reading sysrq code in control/sysrq\n",
err);
xenbus_transaction_end(xbt, 1);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 1889e928a0daa9..a8a38838234768 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -310,6 +310,9 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
+ /* Convert the size to actually allocated. */
+ size = 1UL << (order + XEN_PAGE_SHIFT);
+
/* On ARM this function returns an ioremap'ped virtual address for
* which virt_to_phys doesn't return the corresponding physical
* address. In fact on ARM virt_to_phys only works for kernel direct
@@ -359,6 +362,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
* physical address */
phys = xen_bus_to_phys(dev_addr);
+ /* Convert the size to actually allocated. */
+ size = 1UL << (order + XEN_PAGE_SHIFT);
+
if (((dev_addr + size - 1 <= dma_mask)) ||
range_straddles_page_boundary(phys, size))
xen_destroy_contiguous_region(phys, order);
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 5063c5e796b7f1..84a1fab0dd6bef 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -34,6 +34,7 @@
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
+#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index a69260f27555df..103ca5e1267beb 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -243,14 +243,14 @@ void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
if (!v9inode->fscache)
return;
- spin_lock(&v9inode->fscache_lock);
+ mutex_lock(&v9inode->fscache_lock);
if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
v9fs_cache_inode_flush_cookie(inode);
else
v9fs_cache_inode_get_cookie(inode);
- spin_unlock(&v9inode->fscache_lock);
+ mutex_unlock(&v9inode->fscache_lock);
}
void v9fs_cache_inode_reset_cookie(struct inode *inode)
@@ -264,7 +264,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
old = v9inode->fscache;
- spin_lock(&v9inode->fscache_lock);
+ mutex_lock(&v9inode->fscache_lock);
fscache_relinquish_cookie(v9inode->fscache, 1);
v9ses = v9fs_inode2v9ses(inode);
@@ -274,7 +274,7 @@ void v9fs_cache_inode_reset_cookie(struct inode *inode)
p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
inode, old, v9inode->fscache);
- spin_unlock(&v9inode->fscache_lock);
+ mutex_unlock(&v9inode->fscache_lock);
}
int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 0923f2cf3c80aa..6877050384a140 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -123,7 +123,7 @@ struct v9fs_session_info {
struct v9fs_inode {
#ifdef CONFIG_9P_FSCACHE
- spinlock_t fscache_lock;
+ struct mutex fscache_lock;
struct fscache_cookie *fscache;
#endif
struct p9_qid qid;
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 5a0db6dec8d1fd..aaee1e6584e652 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -40,6 +40,9 @@
*/
#define P9_LOCK_TIMEOUT (30*HZ)
+/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
+#define V9FS_STAT2INODE_KEEP_ISIZE 1
+
extern struct file_system_type v9fs_fs_type;
extern const struct address_space_operations v9fs_addr_operations;
extern const struct file_operations v9fs_file_operations;
@@ -61,8 +64,10 @@ int v9fs_init_inode(struct v9fs_session_info *v9ses,
struct inode *inode, umode_t mode, dev_t);
void v9fs_evict_inode(struct inode *inode);
ino_t v9fs_qid2ino(struct p9_qid *qid);
-void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
-void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
+void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
+ struct super_block *sb, unsigned int flags);
+void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+ unsigned int flags);
int v9fs_dir_release(struct inode *inode, struct file *filp);
int v9fs_file_open(struct inode *inode, struct file *file);
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
@@ -83,4 +88,18 @@ static inline void v9fs_invalidate_inode_attr(struct inode *inode)
}
int v9fs_open_to_dotl_flags(int flags);
+
+static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+ /*
+ * 32-bit need the lock, concurrent updates could break the
+ * sequences and make i_size_read() loop forever.
+ * 64-bit updates are atomic and can skip the locking.
+ */
+ if (sizeof(i_size) > sizeof(long))
+ spin_lock(&inode->i_lock);
+ i_size_write(inode, i_size);
+ if (sizeof(i_size) > sizeof(long))
+ spin_unlock(&inode->i_lock);
+}
#endif
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 5cc00e56206e33..7d889f56b8e78a 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat)
return rettype;
}
-static void p9stat_init(struct p9_wstat *stbuf)
-{
- stbuf->name = NULL;
- stbuf->uid = NULL;
- stbuf->gid = NULL;
- stbuf->muid = NULL;
- stbuf->extension = NULL;
-}
-
/**
* v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir
* @filp: opened file structure
@@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx)
rdir->tail = n;
}
while (rdir->head < rdir->tail) {
- p9stat_init(&st);
err = p9stat_read(fid->clnt, rdir->buf + rdir->head,
rdir->tail - rdir->head, &st);
if (err) {
p9_debug(P9_DEBUG_VFS, "returned %d\n", err);
- p9stat_free(&st);
return -EIO;
}
reclen = st.size+2;
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 12ceaf52dae60d..62ce8b4a7e5f81 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -204,6 +204,14 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
break;
if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0)
break;
+ /*
+ * p9_client_lock_dotl overwrites flock.client_id with the
+ * server message, free and reuse the client name
+ */
+ if (flock.client_id != fid->clnt->name) {
+ kfree(flock.client_id);
+ flock.client_id = fid->clnt->name;
+ }
}
/* map 9p status to VFS status */
@@ -235,6 +243,8 @@ out_unlock:
locks_lock_file_wait(filp, fl);
fl->fl_type = fl_type;
}
+ if (flock.client_id != fid->clnt->name)
+ kfree(flock.client_id);
out:
return res;
}
@@ -269,7 +279,7 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
res = p9_client_getlock_dotl(fid, &glock);
if (res < 0)
- return res;
+ goto out;
/* map 9p lock type to os lock type */
switch (glock.type) {
case P9_LOCK_TYPE_RDLCK:
@@ -290,7 +300,9 @@ static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
fl->fl_end = glock.start + glock.length - 1;
fl->fl_pid = glock.proc_id;
}
- kfree(glock.client_id);
+out:
+ if (glock.client_id != fid->clnt->name)
+ kfree(glock.client_id);
return res;
}
@@ -430,7 +442,11 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
i_size = i_size_read(inode);
if (iocb->ki_pos > i_size) {
inode_add_bytes(inode, iocb->ki_pos - i_size);
- i_size_write(inode, iocb->ki_pos);
+ /*
+ * Need to serialize against i_size_write() in
+ * v9fs_stat2inode()
+ */
+ v9fs_i_size_write(inode, iocb->ki_pos);
}
return retval;
}
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 73f1d1b3a51c41..2de1505aedfd76 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -244,7 +244,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
return NULL;
#ifdef CONFIG_9P_FSCACHE
v9inode->fscache = NULL;
- spin_lock_init(&v9inode->fscache_lock);
+ mutex_init(&v9inode->fscache_lock);
#endif
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
@@ -538,7 +538,7 @@ static struct inode *v9fs_qid_iget(struct super_block *sb,
if (retval)
goto error;
- v9fs_stat2inode(st, inode, sb);
+ v9fs_stat2inode(st, inode, sb, 0);
v9fs_cache_inode_get_cookie(inode);
unlock_new_inode(inode);
return inode;
@@ -1074,7 +1074,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb);
+ v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb, 0);
generic_fillattr(d_inode(dentry), stat);
p9stat_free(st);
@@ -1152,12 +1152,13 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
* @stat: Plan 9 metadata (mistat) structure
* @inode: inode to populate
* @sb: superblock of filesystem
+ * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
- struct super_block *sb)
+ struct super_block *sb, unsigned int flags)
{
umode_t mode;
char ext[32];
@@ -1198,10 +1199,11 @@ v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
mode = p9mode2perm(v9ses, stat);
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
- i_size_write(inode, stat->length);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+ v9fs_i_size_write(inode, stat->length);
/* not real number of blocks, but 512 byte ones ... */
- inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
+ inode->i_blocks = (stat->length + 512 - 1) >> 9;
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
}
@@ -1389,9 +1391,9 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
{
int umode;
dev_t rdev;
- loff_t i_size;
struct p9_wstat *st;
struct v9fs_session_info *v9ses;
+ unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_stat(fid);
@@ -1404,16 +1406,13 @@ int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
goto out;
- spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- i_size = inode->i_size;
- v9fs_stat2inode(st, inode, inode->i_sb);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- inode->i_size = i_size;
- spin_unlock(&inode->i_lock);
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
+ v9fs_stat2inode(st, inode, inode->i_sb, flags);
out:
p9stat_free(st);
kfree(st);
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 0b88744c644673..7ae67fcca0319c 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -143,7 +143,7 @@ static struct inode *v9fs_qid_iget_dotl(struct super_block *sb,
if (retval)
goto error;
- v9fs_stat2inode_dotl(st, inode);
+ v9fs_stat2inode_dotl(st, inode, 0);
v9fs_cache_inode_get_cookie(inode);
retval = v9fs_get_acl(inode, fid);
if (retval)
@@ -498,7 +498,7 @@ v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry,
if (IS_ERR(st))
return PTR_ERR(st);
- v9fs_stat2inode_dotl(st, d_inode(dentry));
+ v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
generic_fillattr(d_inode(dentry), stat);
/* Change block size to what the server returned */
stat->blksize = st->st_blksize;
@@ -609,11 +609,13 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr)
* v9fs_stat2inode_dotl - populate an inode structure with stat info
* @stat: stat structure
* @inode: inode to populate
+ * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
*
*/
void
-v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
+v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
+ unsigned int flags)
{
umode_t mode;
struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -633,7 +635,8 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
- i_size_write(inode, stat->st_size);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
+ v9fs_i_size_write(inode, stat->st_size);
inode->i_blocks = stat->st_blocks;
} else {
if (stat->st_result_mask & P9_STATS_ATIME) {
@@ -663,8 +666,9 @@ v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
}
if (stat->st_result_mask & P9_STATS_RDEV)
inode->i_rdev = new_decode_dev(stat->st_rdev);
- if (stat->st_result_mask & P9_STATS_SIZE)
- i_size_write(inode, stat->st_size);
+ if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
+ stat->st_result_mask & P9_STATS_SIZE)
+ v9fs_i_size_write(inode, stat->st_size);
if (stat->st_result_mask & P9_STATS_BLOCKS)
inode->i_blocks = stat->st_blocks;
}
@@ -926,9 +930,9 @@ v9fs_vfs_follow_link_dotl(struct dentry *dentry, void **cookie)
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
{
- loff_t i_size;
struct p9_stat_dotl *st;
struct v9fs_session_info *v9ses;
+ unsigned int flags;
v9ses = v9fs_inode2v9ses(inode);
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
@@ -940,16 +944,13 @@ int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
goto out;
- spin_lock(&inode->i_lock);
/*
* We don't want to refresh inode->i_size,
* because we may have cached data
*/
- i_size = inode->i_size;
- v9fs_stat2inode_dotl(st, inode);
- if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- inode->i_size = i_size;
- spin_unlock(&inode->i_lock);
+ flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
+ V9FS_STAT2INODE_KEEP_ISIZE : 0;
+ v9fs_stat2inode_dotl(st, inode, flags);
out:
kfree(st);
return 0;
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index bf495cedec26a2..ccf935d9e722d0 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -165,7 +165,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
goto release_sb;
}
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode_dotl(st, d_inode(root));
+ v9fs_stat2inode_dotl(st, d_inode(root), 0);
kfree(st);
} else {
struct p9_wstat *st = NULL;
@@ -176,7 +176,7 @@ static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags,
}
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
- v9fs_stat2inode(st, d_inode(root), sb);
+ v9fs_stat2inode(st, d_inode(root), sb, 0);
p9stat_free(st);
kfree(st);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index e3d026ac382eb4..f35168ce426b8c 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -107,7 +107,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
{
struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len};
struct iov_iter from;
- int retval;
+ int retval, err;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len);
@@ -128,7 +128,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
retval);
else
p9_client_write(fid, 0, &from, &retval);
- p9_client_clunk(fid);
+ err = p9_client_clunk(fid);
+ if (!retval && err)
+ retval = err;
return retval;
}
diff --git a/fs/aio.c b/fs/aio.c
index 4efaf29354a617..7187d03aa0bc8b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
#include <linux/mount.h>
+#include <linux/nospec.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
@@ -628,9 +629,8 @@ static void free_ioctx_users(struct percpu_ref *ref)
while (!list_empty(&ctx->active_reqs)) {
req = list_first_entry(&ctx->active_reqs,
struct aio_kiocb, ki_list);
-
- list_del_init(&req->ki_list);
kiocb_cancel(req);
+ list_del_init(&req->ki_list);
}
spin_unlock_irq(&ctx->ctx_lock);
@@ -1064,6 +1064,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
if (!table || id >= table->nr)
goto out;
+ id = array_index_nospec(id, table->nr);
ctx = rcu_dereference(table->table[id]);
if (ctx && ctx->user_id == ctx_id) {
if (percpu_ref_tryget_live(&ctx->users))
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 502d3892d8a45d..d71e7ad4d382ba 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/list.h>
+#include <linux/magic.h>
/* This is the range of ioctl() numbers we claim as ours */
#define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
@@ -135,7 +136,8 @@ struct autofs_sb_info {
static inline struct autofs_sb_info *autofs4_sbi(struct super_block *sb)
{
- return (struct autofs_sb_info *)(sb->s_fs_info);
+ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
+ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
}
static inline struct autofs_info *autofs4_dentry_ino(struct dentry *dentry)
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 7a5a598a2d9456..0d8b9c4f27f219 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -560,7 +560,6 @@ int autofs4_expire_run(struct super_block *sb,
pkt.len = dentry->d_name.len;
memcpy(pkt.name, dentry->d_name.name, pkt.len);
pkt.name[pkt.len] = '\0';
- dput(dentry);
if ( copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)) )
ret = -EFAULT;
@@ -573,6 +572,8 @@ int autofs4_expire_run(struct super_block *sb,
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
+ dput(dentry);
+
return ret;
}
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index a3ae0b2aeb5a0f..0fd472d6702914 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -18,7 +18,6 @@
#include <linux/pagemap.h>
#include <linux/parser.h>
#include <linux/bitops.h>
-#include <linux/magic.h>
#include "autofs_i.h"
#include <linux/module.h>
@@ -256,8 +255,10 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
}
root_inode = autofs4_get_inode(s, S_IFDIR | 0755);
root = d_make_root(root_inode);
- if (!root)
+ if (!root) {
+ ret = -ENOMEM;
goto fail_ino;
+ }
pipe = NULL;
root->d_fsdata = ino;
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index fdcb4d69f430db..4714c55c1ae52b 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_magic = BFS_MAGIC;
- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
+ if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) ||
+ le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) {
printf("Superblock is corrupted\n");
goto out1;
}
@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
sizeof(struct bfs_inode)
+ BFS_ROOT_INO - 1;
imap_len = (info->si_lasti / 8) + 1;
- info->si_imap = kzalloc(imap_len, GFP_KERNEL);
- if (!info->si_imap)
+ info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN);
+ if (!info->si_imap) {
+ printf("Cannot allocate %u bytes\n", imap_len);
goto out1;
+ }
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 8a0243efd35925..1f2b477e57285d 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -604,28 +604,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
* Do the same thing for the memory mapping - between
* elf_bss and last_bss is the bss section.
*/
- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
if (k > last_bss)
last_bss = k;
}
}
+ /*
+ * Now fill out the bss section: first pad the last page from
+ * the file up to the page boundary, and zero it from elf_bss
+ * up to the end of the page.
+ */
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out;
+ }
+ /*
+ * Next, align both the file and mem bss up to the page size,
+ * since this is where elf_bss was just zeroed up to, and where
+ * last_bss will end after the vm_brk() below.
+ */
+ elf_bss = ELF_PAGEALIGN(elf_bss);
+ last_bss = ELF_PAGEALIGN(last_bss);
+ /* Finally, if there is still more bss to allocate, do it. */
if (last_bss > elf_bss) {
- /*
- * Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out;
- }
-
- /* What we have mapped so far */
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
-
- /* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
if (BAD_ADDR(error))
goto out;
@@ -1212,11 +1214,13 @@ static int load_elf_library(struct file *file)
goto out_free_ph;
}
- len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
- ELF_MIN_ALIGN - 1);
- bss = eppnt->p_memsz + eppnt->p_vaddr;
- if (bss > len)
- vm_brk(len, bss - len);
+ len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+ bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
+ if (bss > len) {
+ error = vm_brk(len, bss - len);
+ if (BAD_ADDR(error))
+ goto out_free_ph;
+ }
error = 0;
out_free_ph:
@@ -1707,7 +1711,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t,
const struct user_regset *regset = &view->regsets[i];
do_thread_regset_writeback(t->task, regset);
if (regset->core_note_type && regset->get &&
- (!regset->active || regset->active(t->task, regset))) {
+ (!regset->active || regset->active(t->task, regset) > 0)) {
int ret;
size_t size = regset->n * regset->size;
void *data = kmalloc(size, GFP_KERNEL);
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 78f005f3784760..dd784bcf7c968c 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -369,8 +369,13 @@ static Node *create_entry(const char __user *buffer, size_t count)
s = strchr(p, del);
if (!s)
goto einval;
- *s++ = '\0';
- e->offset = simple_strtoul(p, &p, 10);
+ *s = '\0';
+ if (p != s) {
+ int r = kstrtoint(p, 10, &e->offset);
+ if (r != 0 || e->offset < 0)
+ goto einval;
+ }
+ p = s;
if (*p++)
goto einval;
pr_debug("register: offset: %#x\n", e->offset);
@@ -410,7 +415,8 @@ static Node *create_entry(const char __user *buffer, size_t count)
if (e->mask &&
string_unescape_inplace(e->mask, UNESCAPE_HEX) != e->size)
goto einval;
- if (e->size + e->offset > BINPRM_BUF_SIZE)
+ if (e->size > BINPRM_BUF_SIZE ||
+ BINPRM_BUF_SIZE - e->size < e->offset)
goto einval;
pr_debug("register: magic/mask length: %i\n", e->size);
if (USE_DEBUG) {
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 6d1d0b93b1aa18..c792df826e1244 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -9,7 +9,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
- uuid-tree.o props.o hash.o
+ uuid-tree.o props.o hash.o tree-checker.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 38ee086754687a..8f4baa3cb992c5 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1726,20 +1726,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
return err;
}
-/*
- * The leaf data grows from end-to-front in the node.
- * this returns the address of the start of the last item,
- * which is the stop of the leaf data stack
- */
-static inline unsigned int leaf_data_end(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- u32 nr = btrfs_header_nritems(leaf);
- if (nr == 0)
- return BTRFS_LEAF_DATA_SIZE(root);
- return btrfs_item_offset_nr(leaf, nr - 1);
-}
-
/*
* search for key in the extent_buffer. The items start at offset p,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index e847573c6db088..4a91d3119e59e1 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -35,6 +35,7 @@
#include <linux/btrfs.h>
#include <linux/workqueue.h>
#include <linux/security.h>
+#include <linux/sizes.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
@@ -897,6 +898,7 @@ struct btrfs_balance_item {
#define BTRFS_FILE_EXTENT_INLINE 0
#define BTRFS_FILE_EXTENT_REG 1
#define BTRFS_FILE_EXTENT_PREALLOC 2
+#define BTRFS_FILE_EXTENT_TYPES 2
struct btrfs_file_extent_item {
/*
@@ -2283,7 +2285,7 @@ do { \
#define BTRFS_INODE_ROOT_ITEM_INIT (1 << 31)
struct btrfs_map_token {
- struct extent_buffer *eb;
+ const struct extent_buffer *eb;
char *kaddr;
unsigned long offset;
};
@@ -2314,18 +2316,19 @@ static inline void btrfs_init_map_token (struct btrfs_map_token *token)
sizeof(((type *)0)->member)))
#define DECLARE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
- unsigned long off, \
- struct btrfs_map_token *token); \
-void btrfs_set_token_##bits(struct extent_buffer *eb, void *ptr, \
+u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off, \
+ struct btrfs_map_token *token); \
+void btrfs_set_token_##bits(struct extent_buffer *eb, const void *ptr, \
unsigned long off, u##bits val, \
struct btrfs_map_token *token); \
-static inline u##bits btrfs_get_##bits(struct extent_buffer *eb, void *ptr, \
+static inline u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
+ const void *ptr, \
unsigned long off) \
{ \
return btrfs_get_token_##bits(eb, ptr, off, NULL); \
} \
-static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr, \
+static inline void btrfs_set_##bits(struct extent_buffer *eb, void *ptr,\
unsigned long off, u##bits val) \
{ \
btrfs_set_token_##bits(eb, ptr, off, val, NULL); \
@@ -2337,7 +2340,8 @@ DECLARE_BTRFS_SETGET_BITS(32)
DECLARE_BTRFS_SETGET_BITS(64)
#define BTRFS_SETGET_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(struct extent_buffer *eb, type *s) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb, \
+ const type *s) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
return btrfs_get_##bits(eb, s, offsetof(type, member)); \
@@ -2348,7 +2352,8 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, type *s, \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
btrfs_set_##bits(eb, s, offsetof(type, member), val); \
} \
-static inline u##bits btrfs_token_##name(struct extent_buffer *eb, type *s, \
+static inline u##bits btrfs_token_##name(const struct extent_buffer *eb,\
+ const type *s, \
struct btrfs_map_token *token) \
{ \
BUILD_BUG_ON(sizeof(u##bits) != sizeof(((type *)0))->member); \
@@ -2363,9 +2368,9 @@ static inline void btrfs_set_token_##name(struct extent_buffer *eb, \
}
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(struct extent_buffer *eb) \
+static inline u##bits btrfs_##name(const struct extent_buffer *eb) \
{ \
- type *p = page_address(eb->pages[0]); \
+ const type *p = page_address(eb->pages[0]); \
u##bits res = le##bits##_to_cpu(p->member); \
return res; \
} \
@@ -2377,7 +2382,7 @@ static inline void btrfs_set_##name(struct extent_buffer *eb, \
}
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
-static inline u##bits btrfs_##name(type *s) \
+static inline u##bits btrfs_##name(const type *s) \
{ \
return le##bits##_to_cpu(s->member); \
} \
@@ -2678,7 +2683,7 @@ static inline unsigned long btrfs_node_key_ptr_offset(int nr)
sizeof(struct btrfs_key_ptr) * nr;
}
-void btrfs_node_key(struct extent_buffer *eb,
+void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr);
static inline void btrfs_set_node_key(struct extent_buffer *eb,
@@ -2707,28 +2712,28 @@ static inline struct btrfs_item *btrfs_item_nr(int nr)
return (struct btrfs_item *)btrfs_item_nr_offset(nr);
}
-static inline u32 btrfs_item_end(struct extent_buffer *eb,
+static inline u32 btrfs_item_end(const struct extent_buffer *eb,
struct btrfs_item *item)
{
return btrfs_item_offset(eb, item) + btrfs_item_size(eb, item);
}
-static inline u32 btrfs_item_end_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_end_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_end(eb, btrfs_item_nr(nr));
}
-static inline u32 btrfs_item_offset_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_offset_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_offset(eb, btrfs_item_nr(nr));
}
-static inline u32 btrfs_item_size_nr(struct extent_buffer *eb, int nr)
+static inline u32 btrfs_item_size_nr(const struct extent_buffer *eb, int nr)
{
return btrfs_item_size(eb, btrfs_item_nr(nr));
}
-static inline void btrfs_item_key(struct extent_buffer *eb,
+static inline void btrfs_item_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
{
struct btrfs_item *item = btrfs_item_nr(nr);
@@ -2764,8 +2769,8 @@ BTRFS_SETGET_STACK_FUNCS(stack_dir_name_len, struct btrfs_dir_item,
BTRFS_SETGET_STACK_FUNCS(stack_dir_transid, struct btrfs_dir_item,
transid, 64);
-static inline void btrfs_dir_item_key(struct extent_buffer *eb,
- struct btrfs_dir_item *item,
+static inline void btrfs_dir_item_key(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
struct btrfs_disk_key *key)
{
read_eb_member(eb, item, struct btrfs_dir_item, location, key);
@@ -2773,7 +2778,7 @@ static inline void btrfs_dir_item_key(struct extent_buffer *eb,
static inline void btrfs_set_dir_item_key(struct extent_buffer *eb,
struct btrfs_dir_item *item,
- struct btrfs_disk_key *key)
+ const struct btrfs_disk_key *key)
{
write_eb_member(eb, item, struct btrfs_dir_item, location, key);
}
@@ -2785,8 +2790,8 @@ BTRFS_SETGET_FUNCS(free_space_bitmaps, struct btrfs_free_space_header,
BTRFS_SETGET_FUNCS(free_space_generation, struct btrfs_free_space_header,
generation, 64);
-static inline void btrfs_free_space_key(struct extent_buffer *eb,
- struct btrfs_free_space_header *h,
+static inline void btrfs_free_space_key(const struct extent_buffer *eb,
+ const struct btrfs_free_space_header *h,
struct btrfs_disk_key *key)
{
read_eb_member(eb, h, struct btrfs_free_space_header, location, key);
@@ -2794,7 +2799,7 @@ static inline void btrfs_free_space_key(struct extent_buffer *eb,
static inline void btrfs_set_free_space_key(struct extent_buffer *eb,
struct btrfs_free_space_header *h,
- struct btrfs_disk_key *key)
+ const struct btrfs_disk_key *key)
{
write_eb_member(eb, h, struct btrfs_free_space_header, location, key);
}
@@ -2821,25 +2826,25 @@ static inline void btrfs_cpu_key_to_disk(struct btrfs_disk_key *disk,
disk->objectid = cpu_to_le64(cpu->objectid);
}
-static inline void btrfs_node_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
+static inline void btrfs_node_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
{
struct btrfs_disk_key disk_key;
btrfs_node_key(eb, &disk_key, nr);
btrfs_disk_key_to_cpu(key, &disk_key);
}
-static inline void btrfs_item_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_key *key, int nr)
+static inline void btrfs_item_key_to_cpu(const struct extent_buffer *eb,
+ struct btrfs_key *key, int nr)
{
struct btrfs_disk_key disk_key;
btrfs_item_key(eb, &disk_key, nr);
btrfs_disk_key_to_cpu(key, &disk_key);
}
-static inline void btrfs_dir_item_key_to_cpu(struct extent_buffer *eb,
- struct btrfs_dir_item *item,
- struct btrfs_key *key)
+static inline void btrfs_dir_item_key_to_cpu(const struct extent_buffer *eb,
+ const struct btrfs_dir_item *item,
+ struct btrfs_key *key)
{
struct btrfs_disk_key disk_key;
btrfs_dir_item_key(eb, item, &disk_key);
@@ -2872,7 +2877,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_header_nritems, struct btrfs_header,
nritems, 32);
BTRFS_SETGET_STACK_FUNCS(stack_header_bytenr, struct btrfs_header, bytenr, 64);
-static inline int btrfs_header_flag(struct extent_buffer *eb, u64 flag)
+static inline int btrfs_header_flag(const struct extent_buffer *eb, u64 flag)
{
return (btrfs_header_flags(eb) & flag) == flag;
}
@@ -2891,7 +2896,7 @@ static inline int btrfs_clear_header_flag(struct extent_buffer *eb, u64 flag)
return (flags & flag) == flag;
}
-static inline int btrfs_header_backref_rev(struct extent_buffer *eb)
+static inline int btrfs_header_backref_rev(const struct extent_buffer *eb)
{
u64 flags = btrfs_header_flags(eb);
return flags >> BTRFS_BACKREF_REV_SHIFT;
@@ -2911,12 +2916,12 @@ static inline unsigned long btrfs_header_fsid(void)
return offsetof(struct btrfs_header, fsid);
}
-static inline unsigned long btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
+static inline unsigned long btrfs_header_chunk_tree_uuid(const struct extent_buffer *eb)
{
return offsetof(struct btrfs_header, chunk_tree_uuid);
}
-static inline int btrfs_is_leaf(struct extent_buffer *eb)
+static inline int btrfs_is_leaf(const struct extent_buffer *eb)
{
return btrfs_header_level(eb) == 0;
}
@@ -2950,12 +2955,12 @@ BTRFS_SETGET_STACK_FUNCS(root_stransid, struct btrfs_root_item,
BTRFS_SETGET_STACK_FUNCS(root_rtransid, struct btrfs_root_item,
rtransid, 64);
-static inline bool btrfs_root_readonly(struct btrfs_root *root)
+static inline bool btrfs_root_readonly(const struct btrfs_root *root)
{
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_RDONLY)) != 0;
}
-static inline bool btrfs_root_dead(struct btrfs_root *root)
+static inline bool btrfs_root_dead(const struct btrfs_root *root)
{
return (root->root_item.flags & cpu_to_le64(BTRFS_ROOT_SUBVOL_DEAD)) != 0;
}
@@ -3012,51 +3017,51 @@ BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
/* struct btrfs_balance_item */
BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
-static inline void btrfs_balance_data(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_data(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
}
static inline void btrfs_set_balance_data(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
}
-static inline void btrfs_balance_meta(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_meta(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
}
static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
}
-static inline void btrfs_balance_sys(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
+static inline void btrfs_balance_sys(const struct extent_buffer *eb,
+ const struct btrfs_balance_item *bi,
struct btrfs_disk_balance_args *ba)
{
read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
}
static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
- struct btrfs_balance_item *bi,
- struct btrfs_disk_balance_args *ba)
+ struct btrfs_balance_item *bi,
+ const struct btrfs_disk_balance_args *ba)
{
write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
}
static inline void
btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
- struct btrfs_disk_balance_args *disk)
+ const struct btrfs_disk_balance_args *disk)
{
memset(cpu, 0, sizeof(*cpu));
@@ -3076,7 +3081,7 @@ btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
static inline void
btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
- struct btrfs_balance_args *cpu)
+ const struct btrfs_balance_args *cpu)
{
memset(disk, 0, sizeof(*disk));
@@ -3144,7 +3149,7 @@ BTRFS_SETGET_STACK_FUNCS(super_magic, struct btrfs_super_block, magic, 64);
BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
uuid_tree_generation, 64);
-static inline int btrfs_super_csum_size(struct btrfs_super_block *s)
+static inline int btrfs_super_csum_size(const struct btrfs_super_block *s)
{
u16 t = btrfs_super_csum_type(s);
/*
@@ -3158,6 +3163,21 @@ static inline unsigned long btrfs_leaf_data(struct extent_buffer *l)
return offsetof(struct btrfs_leaf, items);
}
+/*
+ * The leaf data grows from end-to-front in the node.
+ * this returns the address of the start of the last item,
+ * which is the stop of the leaf data stack
+ */
+static inline unsigned int leaf_data_end(const struct btrfs_root *root,
+ const struct extent_buffer *leaf)
+{
+ u32 nr = btrfs_header_nritems(leaf);
+
+ if (nr == 0)
+ return BTRFS_LEAF_DATA_SIZE(root);
+ return btrfs_item_offset_nr(leaf, nr - 1);
+}
+
/* struct btrfs_file_extent_item */
BTRFS_SETGET_FUNCS(file_extent_type, struct btrfs_file_extent_item, type, 8);
BTRFS_SETGET_STACK_FUNCS(stack_file_extent_disk_bytenr,
@@ -3174,7 +3194,7 @@ BTRFS_SETGET_STACK_FUNCS(stack_file_extent_compression,
struct btrfs_file_extent_item, compression, 8);
static inline unsigned long
-btrfs_file_extent_inline_start(struct btrfs_file_extent_item *e)
+btrfs_file_extent_inline_start(const struct btrfs_file_extent_item *e)
{
return (unsigned long)e + BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
@@ -3208,8 +3228,9 @@ BTRFS_SETGET_FUNCS(file_extent_other_encoding, struct btrfs_file_extent_item,
* size of any extent headers. If a file is compressed on disk, this is
* the compressed size
*/
-static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
- struct btrfs_item *e)
+static inline u32 btrfs_file_extent_inline_item_len(
+ const struct extent_buffer *eb,
+ struct btrfs_item *e)
{
return btrfs_item_size(eb, e) - BTRFS_FILE_EXTENT_INLINE_DATA_START;
}
@@ -3217,9 +3238,9 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
/* this returns the number of file bytes represented by the inline item.
* If an item is compressed, this is the uncompressed size
*/
-static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
- int slot,
- struct btrfs_file_extent_item *fi)
+static inline u32 btrfs_file_extent_inline_len(const struct extent_buffer *eb,
+ int slot,
+ const struct btrfs_file_extent_item *fi)
{
struct btrfs_map_token token;
@@ -3241,8 +3262,8 @@ static inline u32 btrfs_file_extent_inline_len(struct extent_buffer *eb,
/* btrfs_dev_stats_item */
-static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
- struct btrfs_dev_stats_item *ptr,
+static inline u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
+ const struct btrfs_dev_stats_item *ptr,
int index)
{
u64 val;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 1e668fb7dd4c73..81e5bc62e8e337 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -574,6 +574,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
btrfs_rm_dev_replace_unblocked(fs_info);
/*
+ * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
+ * update on-disk dev stats value during commit transaction
+ */
+ atomic_inc(&tgt_device->dev_stats_ccnt);
+
+ /*
* this is again a consistent state where no dev_replace procedure
* is running, the target device is part of the filesystem, the
* source device is not part of the filesystem anymore and its 1st
@@ -614,7 +620,7 @@ static void btrfs_dev_replace_update_device_in_mapping_tree(
em = lookup_extent_mapping(em_tree, start, (u64)-1);
if (!em)
break;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++)
if (srcdev == map->stripes[i].dev)
map->stripes[i].dev = tgtdev;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 208b3f5ffb3f18..78722aaffecd3c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -49,6 +49,7 @@
#include "raid56.h"
#include "sysfs.h"
#include "qgroup.h"
+#include "tree-checker.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -445,9 +446,9 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
int mirror_num = 0;
int failed_mirror = 0;
- clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
while (1) {
+ clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = read_extent_buffer_pages(io_tree, eb, start,
WAIT_COMPLETE,
btree_get_extent, mirror_num);
@@ -459,14 +460,6 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
ret = -EIO;
}
- /*
- * This buffer's crc is fine, but its contents are corrupted, so
- * there is no reason to read the other copies, they won't be
- * any less wrong.
- */
- if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags))
- break;
-
num_copies = btrfs_num_copies(root->fs_info,
eb->start, eb->len);
if (num_copies == 1)
@@ -530,72 +523,6 @@ static int check_tree_block_fsid(struct btrfs_fs_info *fs_info,
return ret;
}
-#define CORRUPT(reason, eb, root, slot) \
- btrfs_crit(root->fs_info, "corrupt leaf, %s: block=%llu," \
- "root=%llu, slot=%d", reason, \
- btrfs_header_bytenr(eb), root->objectid, slot)
-
-static noinline int check_leaf(struct btrfs_root *root,
- struct extent_buffer *leaf)
-{
- struct btrfs_key key;
- struct btrfs_key leaf_key;
- u32 nritems = btrfs_header_nritems(leaf);
- int slot;
-
- if (nritems == 0)
- return 0;
-
- /* Check the 0 item */
- if (btrfs_item_offset_nr(leaf, 0) + btrfs_item_size_nr(leaf, 0) !=
- BTRFS_LEAF_DATA_SIZE(root)) {
- CORRUPT("invalid item offset size pair", leaf, root, 0);
- return -EIO;
- }
-
- /*
- * Check to make sure each items keys are in the correct order and their
- * offsets make sense. We only have to loop through nritems-1 because
- * we check the current slot against the next slot, which verifies the
- * next slot's offset+size makes sense and that the current's slot
- * offset is correct.
- */
- for (slot = 0; slot < nritems - 1; slot++) {
- btrfs_item_key_to_cpu(leaf, &leaf_key, slot);
- btrfs_item_key_to_cpu(leaf, &key, slot + 1);
-
- /* Make sure the keys are in the right order */
- if (btrfs_comp_cpu_keys(&leaf_key, &key) >= 0) {
- CORRUPT("bad key order", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Make sure the offset and ends are right, remember that the
- * item data starts at the end of the leaf and grows towards the
- * front.
- */
- if (btrfs_item_offset_nr(leaf, slot) !=
- btrfs_item_end_nr(leaf, slot + 1)) {
- CORRUPT("slot offset bad", leaf, root, slot);
- return -EIO;
- }
-
- /*
- * Check to make sure that we don't point outside of the leaf,
- * just incase all the items are consistent to eachother, but
- * all point outside of the leaf.
- */
- if (btrfs_item_end_nr(leaf, slot) >
- BTRFS_LEAF_DATA_SIZE(root)) {
- CORRUPT("slot end outside of leaf", leaf, root, slot);
- return -EIO;
- }
- }
-
- return 0;
-}
-
static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
u64 phy_offset, struct page *page,
u64 start, u64 end, int mirror)
@@ -662,11 +589,14 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && check_leaf(root, eb)) {
+ if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
}
+ if (found_level > 0 && btrfs_check_node(root, eb))
+ ret = -EIO;
+
if (!ret)
set_extent_buffer_uptodate(eb);
err:
@@ -923,7 +853,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
if (bio_flags & EXTENT_BIO_TREE_LOG)
return 0;
#ifdef CONFIG_X86
- if (cpu_has_xmm4_2)
+ if (static_cpu_has(X86_FEATURE_XMM4_2))
return 0;
#endif
return 1;
@@ -1011,8 +941,9 @@ static int btree_writepages(struct address_space *mapping,
fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ fs_info->dirty_metadata_batch);
if (ret < 0)
return 0;
}
@@ -1607,8 +1538,8 @@ fail:
return ret;
}
-static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
- u64 root_id)
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id)
{
struct btrfs_root *root;
@@ -3965,7 +3896,13 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
buf->len,
root->fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_header_level(buf) == 0 && check_leaf(root, buf)) {
+ /*
+ * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+ * but item data not updated.
+ * So here we should only check item pointers, not item data.
+ */
+ if (btrfs_header_level(buf) == 0 &&
+ btrfs_check_leaf_relaxed(root, buf)) {
btrfs_print_leaf(root, buf);
ASSERT(0);
}
@@ -3987,8 +3924,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
if (flush_delayed)
btrfs_balance_delayed_items(root);
- ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
- BTRFS_DIRTY_METADATA_THRESH);
+ ret = __percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH,
+ root->fs_info->dirty_metadata_batch);
if (ret > 0) {
balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping);
@@ -4173,6 +4111,14 @@ static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info)
spin_lock(&fs_info->ordered_root_lock);
}
spin_unlock(&fs_info->ordered_root_lock);
+
+ /*
+ * We need this here because if we've been flipped read-only we won't
+ * get sync() from the umount, so we need to make sure any ordered
+ * extents that haven't had their dirty pages IO start writeout yet
+ * actually get run and error out properly.
+ */
+ btrfs_wait_ordered_roots(fs_info, -1);
}
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
@@ -4331,6 +4277,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
struct extent_io_tree *pinned_extents)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
struct extent_io_tree *unpin;
u64 start;
u64 end;
@@ -4340,21 +4287,31 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
unpin = pinned_extents;
again:
while (1) {
+ /*
+ * The btrfs_finish_extent_commit() may get the same range as
+ * ours between find_first_extent_bit and clear_extent_dirty.
+ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+ * the same extent range.
+ */
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY, NULL);
- if (ret)
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
+ }
clear_extent_dirty(unpin, start, end, GFP_NOFS);
btrfs_error_unpin_extent_range(root, start, end);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
if (loop) {
- if (unpin == &root->fs_info->freed_extents[0])
- unpin = &root->fs_info->freed_extents[1];
+ if (unpin == &fs_info->freed_extents[0])
+ unpin = &fs_info->freed_extents[1];
else
- unpin = &root->fs_info->freed_extents[0];
+ unpin = &fs_info->freed_extents[0];
loop = false;
goto again;
}
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index adeb31830b9cc1..3c9819403487dc 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -68,6 +68,8 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
struct btrfs_root *btrfs_read_fs_root(struct btrfs_root *tree_root,
struct btrfs_key *location);
int btrfs_init_fs_root(struct btrfs_root *root);
+struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
+ u64 root_id);
int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_root *root);
void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 982a9d509817d7..978bbfed5a2ce0 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2342,7 +2342,13 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
}
- BUG_ON(node->ref_mod != 1);
+ if (node->ref_mod != 1) {
+ btrfs_err(root->fs_info,
+ "btree block(%llu) has %d references rather than 1: action %d ref_root %llu parent %llu",
+ node->bytenr, node->ref_mod, node->action, ref_root,
+ parent);
+ return -EIO;
+ }
if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
BUG_ON(!extent_op || !extent_op->update_flags);
ret = alloc_reserved_tree_block(trans, root,
@@ -4128,7 +4134,7 @@ commit_trans:
data_sinfo->flags, bytes, 1);
spin_unlock(&data_sinfo->lock);
- return ret;
+ return 0;
}
/*
@@ -7835,6 +7841,20 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
buf = btrfs_find_create_tree_block(root, bytenr);
if (!buf)
return ERR_PTR(-ENOMEM);
+
+ /*
+ * Extra safety check in case the extent tree is corrupted and extent
+ * allocator chooses to use a tree block which is already used and
+ * locked.
+ */
+ if (buf->lock_owner == current->pid) {
+ btrfs_err_rl(root->fs_info,
+"tree block %llu owner %llu already locked by pid=%d, extent tree corruption detected",
+ buf->start, btrfs_header_owner(buf), current->pid);
+ free_extent_buffer(buf);
+ return ERR_PTR(-EUCLEAN);
+ }
+
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
btrfs_tree_lock(buf);
@@ -8704,15 +8724,14 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
if (eb == root->node) {
if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = eb->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(eb));
+ else if (root->root_key.objectid != btrfs_header_owner(eb))
+ goto owner_mismatch;
} else {
if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
parent = path->nodes[level + 1]->start;
- else
- BUG_ON(root->root_key.objectid !=
- btrfs_header_owner(path->nodes[level + 1]));
+ else if (root->root_key.objectid !=
+ btrfs_header_owner(path->nodes[level + 1]))
+ goto owner_mismatch;
}
btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
@@ -8720,6 +8739,11 @@ out:
wc->refs[level] = 0;
wc->flags[level] = 0;
return 0;
+
+owner_mismatch:
+ btrfs_err_rl(root->fs_info, "unexpected tree owner, have %llu expect %llu",
+ btrfs_header_owner(eb), root->root_key.objectid);
+ return -EUCLEAN;
}
static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
@@ -8773,6 +8797,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
ret = walk_up_proc(trans, root, path, wc);
if (ret > 0)
return 0;
+ if (ret < 0)
+ return ret;
if (path->locks[level]) {
btrfs_tree_unlock_rw(path->nodes[level],
@@ -9461,6 +9487,8 @@ static int find_first_block_group(struct btrfs_root *root,
int ret = 0;
struct btrfs_key found_key;
struct extent_buffer *leaf;
+ struct btrfs_block_group_item bg;
+ u64 flags;
int slot;
ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
@@ -9482,7 +9510,47 @@ static int find_first_block_group(struct btrfs_root *root,
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- ret = 0;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &root->fs_info->mapping_tree.map_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, found_key.objectid,
+ found_key.offset);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ btrfs_err(root->fs_info,
+ "logical %llu len %llu found bg but no related chunk",
+ found_key.objectid, found_key.offset);
+ ret = -ENOENT;
+ } else if (em->start != found_key.objectid ||
+ em->len != found_key.offset) {
+ btrfs_err(root->fs_info,
+ "block group %llu len %llu mismatch with chunk %llu len %llu",
+ found_key.objectid, found_key.offset,
+ em->start, em->len);
+ ret = -EUCLEAN;
+ } else {
+ read_extent_buffer(leaf, &bg,
+ btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bg));
+ flags = btrfs_block_group_flags(&bg) &
+ BTRFS_BLOCK_GROUP_TYPE_MASK;
+
+ if (flags != (em->map_lookup->type &
+ BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(root->fs_info,
+"block group %llu len %llu type flags 0x%llx mismatch with chunk type flags 0x%llx",
+ found_key.objectid,
+ found_key.offset, flags,
+ (BTRFS_BLOCK_GROUP_TYPE_MASK &
+ em->map_lookup->type));
+ ret = -EUCLEAN;
+ } else {
+ ret = 0;
+ }
+ }
+ free_extent_map(em);
goto out;
}
path->slots[0]++;
@@ -9501,6 +9569,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
block_group = btrfs_lookup_first_block_group(info, last);
while (block_group) {
+ wait_block_group_cache_done(block_group);
spin_lock(&block_group->lock);
if (block_group->iref)
break;
@@ -9696,6 +9765,62 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
return cache;
}
+
+/*
+ * Iterate all chunks and verify that each of them has the corresponding block
+ * group
+ */
+static int check_chunk_block_group_mappings(struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+ struct extent_map *em;
+ struct btrfs_block_group_cache *bg;
+ u64 start = 0;
+ int ret = 0;
+
+ while (1) {
+ read_lock(&map_tree->map_tree.lock);
+ /*
+ * lookup_extent_mapping will return the first extent map
+ * intersecting the range, so setting @len to 1 is enough to
+ * get the first chunk.
+ */
+ em = lookup_extent_mapping(&map_tree->map_tree, start, 1);
+ read_unlock(&map_tree->map_tree.lock);
+ if (!em)
+ break;
+
+ bg = btrfs_lookup_block_group(fs_info, em->start);
+ if (!bg) {
+ btrfs_err(fs_info,
+ "chunk start=%llu len=%llu doesn't have corresponding block group",
+ em->start, em->len);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ break;
+ }
+ if (bg->key.objectid != em->start ||
+ bg->key.offset != em->len ||
+ (bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK) !=
+ (em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
+ btrfs_err(fs_info,
+"chunk start=%llu len=%llu flags=0x%llx doesn't match block group start=%llu len=%llu flags=0x%llx",
+ em->start, em->len,
+ em->map_lookup->type & BTRFS_BLOCK_GROUP_TYPE_MASK,
+ bg->key.objectid, bg->key.offset,
+ bg->flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
+ ret = -EUCLEAN;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ break;
+ }
+ start = em->start + em->len;
+ free_extent_map(em);
+ btrfs_put_block_group(bg);
+ }
+ return ret;
+}
+
int btrfs_read_block_groups(struct btrfs_root *root)
{
struct btrfs_path *path;
@@ -9882,7 +10007,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
}
init_global_block_rsv(info);
- ret = 0;
+ ret = check_chunk_block_group_mappings(info);
error:
btrfs_free_path(path);
return ret;
@@ -9891,7 +10016,7 @@ error:
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
- struct btrfs_block_group_cache *block_group, *tmp;
+ struct btrfs_block_group_cache *block_group;
struct btrfs_root *extent_root = root->fs_info->extent_root;
struct btrfs_block_group_item item;
struct btrfs_key key;
@@ -9899,7 +10024,10 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
trans->can_flush_pending_bgs = false;
- list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
+ while (!list_empty(&trans->new_bgs)) {
+ block_group = list_first_entry(&trans->new_bgs,
+ struct btrfs_block_group_cache,
+ bg_list);
if (ret)
goto next;
@@ -10364,7 +10492,7 @@ btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
* more device items and remove one chunk item), but this is done at
* btrfs_remove_chunk() through a call to check_system_chunk().
*/
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
num_items = 3 + map->num_stripes;
free_extent_map(em);
@@ -10410,7 +10538,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
/* Don't want to race with allocators so take the groups_sem */
down_write(&space_info->groups_sem);
spin_lock(&block_group->lock);
- if (block_group->reserved ||
+ if (block_group->reserved || block_group->pinned ||
btrfs_block_group_used(&block_group->item) ||
block_group->ro ||
list_is_singular(&block_group->list)) {
@@ -10609,6 +10737,10 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
*trimmed = 0;
+ /* Discard not supported = nothing to do. */
+ if (!blk_queue_discard(bdev_get_queue(device->bdev)))
+ return 0;
+
/* Not writeable = nothing to do. */
if (!device->writeable)
return 0;
@@ -10680,17 +10812,9 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
u64 start;
u64 end;
u64 trimmed = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret = 0;
- /*
- * try to trim all FS space, our block group may start from non-zero.
- */
- if (range->len == total_bytes)
- cache = btrfs_lookup_first_block_group(fs_info, range->start);
- else
- cache = btrfs_lookup_block_group(fs_info, range->start);
-
+ cache = btrfs_lookup_first_block_group(fs_info, range->start);
while (cache) {
if (cache->key.objectid >= (range->start + range->len)) {
btrfs_put_block_group(cache);
@@ -10731,8 +10855,8 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
}
mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
- devices = &root->fs_info->fs_devices->alloc_list;
- list_for_each_entry(device, devices, dev_alloc_list) {
+ devices = &root->fs_info->fs_devices->devices;
+ list_for_each_entry(device, devices, dev_list) {
ret = btrfs_trim_free_extents(device, range->minlen,
&group_trimmed);
if (ret)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 88bee6703cc0d1..a18f558b4477e9 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3106,11 +3106,11 @@ static int __do_readpage(struct extent_io_tree *tree,
*/
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
prev_em_start && *prev_em_start != (u64)-1 &&
- *prev_em_start != em->orig_start)
+ *prev_em_start != em->start)
force_bio_submit = true;
if (prev_em_start)
- *prev_em_start = em->orig_start;
+ *prev_em_start = em->start;
free_extent_map(em);
em = NULL;
@@ -3847,8 +3847,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
struct block_device *bdev = fs_info->fs_devices->latest_bdev;
struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree;
u64 offset = eb->start;
+ u32 nritems;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
+ unsigned long start, end;
int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
int ret = 0;
@@ -3858,6 +3860,23 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
if (btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID)
bio_flags = EXTENT_BIO_TREE_LOG;
+ /* set btree blocks beyond nritems with 0 to avoid stale content. */
+ nritems = btrfs_header_nritems(eb);
+ if (btrfs_header_level(eb) > 0) {
+ end = btrfs_node_key_ptr_offset(nritems);
+
+ memset_extent_buffer(eb, 0, end, eb->len - end);
+ } else {
+ /*
+ * leaf:
+ * header 0 1 2 .. N ... data_N .. data_2 data_1 data_0
+ */
+ start = btrfs_item_nr_offset(nritems);
+ end = btrfs_leaf_data(eb) +
+ leaf_data_end(fs_info->tree_root, eb);
+ memset_extent_buffer(eb, 0, start, end - start);
+ }
+
for (i = 0; i < num_pages; i++) {
struct page *p = eb->pages[i];
@@ -5362,9 +5381,8 @@ unlock_exit:
return ret;
}
-void read_extent_buffer(struct extent_buffer *eb, void *dstv,
- unsigned long start,
- unsigned long len)
+void read_extent_buffer(const struct extent_buffer *eb, void *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5393,9 +5411,9 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv,
}
}
-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
- unsigned long start,
- unsigned long len)
+int read_extent_buffer_to_user(const struct extent_buffer *eb,
+ void __user *dstv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
@@ -5430,10 +5448,10 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
return ret;
}
-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
- unsigned long min_len, char **map,
- unsigned long *map_start,
- unsigned long *map_len)
+int map_private_extent_buffer(const struct extent_buffer *eb,
+ unsigned long start, unsigned long min_len,
+ char **map, unsigned long *map_start,
+ unsigned long *map_len)
{
size_t offset = start & (PAGE_CACHE_SIZE - 1);
char *kaddr;
@@ -5468,9 +5486,8 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
return 0;
}
-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
- unsigned long start,
- unsigned long len)
+int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
+ unsigned long start, unsigned long len)
{
size_t cur;
size_t offset;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index f4c1ae11855f0b..751435967724ee 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -308,14 +308,13 @@ static inline void extent_buffer_get(struct extent_buffer *eb)
atomic_inc(&eb->refs);
}
-int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
- unsigned long start,
- unsigned long len);
-void read_extent_buffer(struct extent_buffer *eb, void *dst,
+int memcmp_extent_buffer(const struct extent_buffer *eb, const void *ptrv,
+ unsigned long start, unsigned long len);
+void read_extent_buffer(const struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
-int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dst,
- unsigned long start,
+int read_extent_buffer_to_user(const struct extent_buffer *eb,
+ void __user *dst, unsigned long start,
unsigned long len);
void write_extent_buffer(struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
@@ -334,10 +333,10 @@ int set_extent_buffer_uptodate(struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_uptodate(struct extent_buffer *eb);
int extent_buffer_under_io(struct extent_buffer *eb);
-int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
- unsigned long min_len, char **map,
- unsigned long *map_start,
- unsigned long *map_len);
+int map_private_extent_buffer(const struct extent_buffer *eb,
+ unsigned long offset, unsigned long min_len,
+ char **map, unsigned long *map_start,
+ unsigned long *map_len);
int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 6a98bddd8f33a9..84fb56d5c018db 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -76,7 +76,7 @@ void free_extent_map(struct extent_map *em)
WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
- kfree(em->bdev);
+ kfree(em->map_lookup);
kmem_cache_free(extent_map_cache, em);
}
}
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h
index b2991fd8583efe..eb8b8fae036bc3 100644
--- a/fs/btrfs/extent_map.h
+++ b/fs/btrfs/extent_map.h
@@ -32,7 +32,15 @@ struct extent_map {
u64 block_len;
u64 generation;
unsigned long flags;
- struct block_device *bdev;
+ union {
+ struct block_device *bdev;
+
+ /*
+ * used for chunk mappings
+ * flags & EXTENT_FLAG_FS_MAPPING must be set
+ */
+ struct map_lookup *map_lookup;
+ };
atomic_t refs;
unsigned int compress_type;
struct list_head list;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 45934deacfd7bd..6c0161284a9e83 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1699,6 +1699,8 @@ static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes;
+ if (info->max_extent_size > ctl->unit)
+ info->max_extent_size = 0;
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
@@ -1782,6 +1784,13 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
return -1;
}
+static inline u64 get_max_extent_size(struct btrfs_free_space *entry)
+{
+ if (entry->bitmap)
+ return entry->max_extent_size;
+ return entry->bytes;
+}
+
/* Cache the size of the max extent in bytes */
static struct btrfs_free_space *
find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
@@ -1803,8 +1812,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
for (node = &entry->offset_index; node; node = rb_next(node)) {
entry = rb_entry(node, struct btrfs_free_space, offset_index);
if (entry->bytes < *bytes) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1822,8 +1831,8 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
}
if (entry->bytes < *bytes + align_off) {
- if (entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
continue;
}
@@ -1835,8 +1844,10 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
*offset = tmp;
*bytes = size;
return entry;
- } else if (size > *max_extent_size) {
- *max_extent_size = size;
+ } else {
+ *max_extent_size =
+ max(get_max_extent_size(entry),
+ *max_extent_size);
}
continue;
}
@@ -2458,6 +2469,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
struct rb_node *n;
int count = 0;
+ spin_lock(&ctl->tree_lock);
for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
info = rb_entry(n, struct btrfs_free_space, offset_index);
if (info->bytes >= bytes && !block_group->ro)
@@ -2467,6 +2479,7 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
info->offset, info->bytes,
(info->bitmap) ? "yes" : "no");
}
+ spin_unlock(&ctl->tree_lock);
btrfs_info(block_group->fs_info, "block group has cluster?: %s",
list_empty(&block_group->cluster_list) ? "no" : "yes");
btrfs_info(block_group->fs_info,
@@ -2694,8 +2707,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
err = search_bitmap(ctl, entry, &search_start, &search_bytes, true);
if (err) {
- if (search_bytes > *max_extent_size)
- *max_extent_size = search_bytes;
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
return 0;
}
@@ -2732,8 +2745,9 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry = rb_entry(node, struct btrfs_free_space, offset_index);
while (1) {
- if (entry->bytes < bytes && entry->bytes > *max_extent_size)
- *max_extent_size = entry->bytes;
+ if (entry->bytes < bytes)
+ *max_extent_size = max(get_max_extent_size(entry),
+ *max_extent_size);
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 36c126d6afb0f5..2631bde6c01976 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -481,6 +481,7 @@ again:
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
/* just bail out to the uncompressed code */
+ nr_pages = 0;
goto cont;
}
@@ -1202,6 +1203,8 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
list_del(&sums->list);
kfree(sums);
}
+ if (ret < 0)
+ return ret;
return 1;
}
@@ -1351,10 +1354,23 @@ next_slot:
goto out_check;
if (btrfs_extent_readonly(root, disk_bytenr))
goto out_check;
- if (btrfs_cross_ref_exist(trans, root, ino,
+ ret = btrfs_cross_ref_exist(trans, root, ino,
found_key.offset -
- extent_offset, disk_bytenr))
+ extent_offset, disk_bytenr);
+ if (ret) {
+ /*
+ * ret could be -EIO if the above fails to read
+ * metadata.
+ */
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
+ goto error;
+ }
+
+ WARN_ON_ONCE(nolock);
goto out_check;
+ }
disk_bytenr += extent_offset;
disk_bytenr += cur_offset - found_key.offset;
num_bytes = min(end + 1, extent_end) - cur_offset;
@@ -1372,8 +1388,20 @@ next_slot:
* this ensure that csum for a given extent are
* either valid or do not exist.
*/
- if (csum_exist_in_range(root, disk_bytenr, num_bytes))
+ ret = csum_exist_in_range(root, disk_bytenr, num_bytes);
+ if (ret) {
+ /*
+ * ret could be -EIO if the above fails to read
+ * metadata.
+ */
+ if (ret < 0) {
+ if (cow_start != (u64)-1)
+ cur_offset = cow_start;
+ goto error;
+ }
+ WARN_ON_ONCE(nolock);
goto out_check;
+ }
nocow = 1;
} else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
extent_end = found_key.offset +
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 4e3c889c187663..3379490ce54daa 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -378,7 +378,6 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
struct fstrim_range range;
u64 minlen = ULLONG_MAX;
u64 num_devices = 0;
- u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
int ret;
if (!capable(CAP_SYS_ADMIN))
@@ -402,11 +401,15 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return -EOPNOTSUPP;
if (copy_from_user(&range, arg, sizeof(range)))
return -EFAULT;
- if (range.start > total_bytes ||
- range.len < fs_info->sb->s_blocksize)
+
+ /*
+ * NOTE: Don't truncate the range using super->total_bytes. Bytenr of
+ * block group is in the logical address space, which can be any
+ * sectorsize aligned bytenr in the range [0, U64_MAX].
+ */
+ if (range.len < fs_info->sb->s_blocksize)
return -EINVAL;
- range.len = min(range.len, total_bytes - range.start);
range.minlen = max(range.minlen, minlen);
ret = btrfs_trim_fs(fs_info->tree_root, &range);
if (ret < 0)
@@ -3923,11 +3926,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (!(src_file.file->f_mode & FMODE_READ))
goto out_fput;
- /* don't make the dst file partly checksummed */
- if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
- (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM))
- goto out_fput;
-
ret = -EISDIR;
if (S_ISDIR(src->i_mode) || S_ISDIR(inode->i_mode))
goto out_fput;
@@ -3942,15 +3940,30 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
mutex_lock(&src->i_mutex);
}
+ /* don't make the dst file partly checksummed */
+ if ((BTRFS_I(src)->flags & BTRFS_INODE_NODATASUM) !=
+ (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/* determine range to clone */
ret = -EINVAL;
if (off + len > src->i_size || off + len < off)
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
- /* if we extend to eof, continue to block boundary */
- if (off + len == src->i_size)
+ /*
+ * If we extend to eof, continue to block boundary if and only if the
+ * destination end offset matches the destination file's size, otherwise
+ * we would be corrupting data by placing the eof block into the middle
+ * of a file.
+ */
+ if (off + len == src->i_size) {
+ if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+ goto out_unlock;
len = ALIGN(src->i_size, bs) - off;
+ }
if (len == 0) {
ret = 0;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 88d9b66e2207c7..90e29d40aa82eb 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2186,6 +2186,21 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
}
/*
+ * Check if the leaf is the last leaf. Which means all node pointers
+ * are at their last position.
+ */
+static bool is_last_leaf(struct btrfs_path *path)
+{
+ int i;
+
+ for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) {
+ if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1)
+ return false;
+ }
+ return true;
+}
+
+/*
* returns < 0 on error, 0 when more leafs are to be scanned.
* returns 1 when done.
*/
@@ -2198,6 +2213,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
struct ulist *roots = NULL;
struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
u64 num_bytes;
+ bool done;
int slot;
int ret;
@@ -2225,6 +2241,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
mutex_unlock(&fs_info->qgroup_rescan_lock);
return ret;
}
+ done = is_last_leaf(path);
btrfs_item_key_to_cpu(path->nodes[0], &found,
btrfs_header_nritems(path->nodes[0]) - 1);
@@ -2271,6 +2288,8 @@ out:
}
btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+ if (done && !ret)
+ ret = 1;
return ret;
}
@@ -2427,6 +2446,7 @@ qgroup_rescan_zero_tracking(struct btrfs_fs_info *fs_info)
qgroup->rfer_cmpr = 0;
qgroup->excl = 0;
qgroup->excl_cmpr = 0;
+ qgroup_dirty(fs_info, qgroup);
}
spin_unlock(&fs_info->qgroup_lock);
}
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index b9fa99577bf7a3..2d2a7690678617 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -2420,8 +2420,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
bitmap_clear(rbio->dbitmap, pagenr, 1);
kunmap(p);
- for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+ for (stripe = 0; stripe < nr_data; stripe++)
kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ kunmap(p_page);
}
__free_page(p_page);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 9ebe027cc4b7f4..d6ccfb31aef0f9 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1318,18 +1318,19 @@ static void __del_reloc_root(struct btrfs_root *root)
struct mapping_node *node = NULL;
struct reloc_control *rc = root->fs_info->reloc_ctl;
- spin_lock(&rc->reloc_root_tree.lock);
- rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->node->start);
- if (rb_node) {
- node = rb_entry(rb_node, struct mapping_node, rb_node);
- rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ if (rc && root->node) {
+ spin_lock(&rc->reloc_root_tree.lock);
+ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ root->node->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct mapping_node, rb_node);
+ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+ if (!node)
+ return;
+ BUG_ON((struct btrfs_root *)node->data != root);
}
- spin_unlock(&rc->reloc_root_tree.lock);
-
- if (!node)
- return;
- BUG_ON((struct btrfs_root *)node->data != root);
spin_lock(&root->fs_info->trans_lock);
list_del_init(&root->root_list);
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index 2c849b08a91b53..6a6efb26d52fbb 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -272,6 +272,23 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
root_key.objectid = key.offset;
key.offset++;
+ /*
+ * The root might have been inserted already, as before we look
+ * for orphan roots, log replay might have happened, which
+ * triggers a transaction commit and qgroup accounting, which
+ * in turn reads and inserts fs roots while doing backref
+ * walking.
+ */
+ root = btrfs_lookup_fs_root(tree_root->fs_info,
+ root_key.objectid);
+ if (root) {
+ WARN_ON(!test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
+ &root->state));
+ if (btrfs_root_refs(&root->root_item) == 0)
+ btrfs_add_dead_root(root);
+ continue;
+ }
+
root = btrfs_read_fs_root(tree_root, &root_key);
err = PTR_ERR_OR_ZERO(root);
if (err && err != -ENOENT) {
@@ -310,16 +327,8 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state);
err = btrfs_insert_fs_root(root->fs_info, root);
- /*
- * The root might have been inserted already, as before we look
- * for orphan roots, log replay might have happened, which
- * triggers a transaction commit and qgroup accounting, which
- * in turn reads and inserts fs roots while doing backref
- * walking.
- */
- if (err == -EEXIST)
- err = 0;
if (err) {
+ BUG_ON(err == -EEXIST);
btrfs_free_fs_root(root);
break;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index b091d94ceef680..cc9ccc42f46985 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2513,7 +2513,7 @@ static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
have_csum = scrub_find_csum(sctx, logical, csum);
if (have_csum == 0)
++sctx->stat.no_csum;
- if (sctx->is_dev_replace && !have_csum) {
+ if (0 && sctx->is_dev_replace && !have_csum) {
ret = copy_nocow_pages(sctx, logical, l,
mirror_num,
physical_for_dev_replace);
@@ -3460,7 +3460,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
return ret;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (em->start != chunk_offset)
goto out;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 83c73738165e22..40d1ab957fb6c8 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -3232,7 +3232,8 @@ static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
kfree(m);
}
-static void tail_append_pending_moves(struct pending_dir_move *moves,
+static void tail_append_pending_moves(struct send_ctx *sctx,
+ struct pending_dir_move *moves,
struct list_head *stack)
{
if (list_empty(&moves->list)) {
@@ -3243,6 +3244,10 @@ static void tail_append_pending_moves(struct pending_dir_move *moves,
list_add_tail(&moves->list, stack);
list_splice_tail(&list, stack);
}
+ if (!RB_EMPTY_NODE(&moves->node)) {
+ rb_erase(&moves->node, &sctx->pending_dir_moves);
+ RB_CLEAR_NODE(&moves->node);
+ }
}
static int apply_children_dir_moves(struct send_ctx *sctx)
@@ -3257,7 +3262,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
return 0;
INIT_LIST_HEAD(&stack);
- tail_append_pending_moves(pm, &stack);
+ tail_append_pending_moves(sctx, pm, &stack);
while (!list_empty(&stack)) {
pm = list_first_entry(&stack, struct pending_dir_move, list);
@@ -3268,7 +3273,7 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
goto out;
pm = get_pending_dir_moves(sctx, parent_ino);
if (pm)
- tail_append_pending_moves(pm, &stack);
+ tail_append_pending_moves(sctx, pm, &stack);
}
return 0;
diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c
index b976597b07216c..63ffd213b0b740 100644
--- a/fs/btrfs/struct-funcs.c
+++ b/fs/btrfs/struct-funcs.c
@@ -50,8 +50,8 @@ static inline void put_unaligned_le8(u8 val, void *p)
*/
#define DEFINE_BTRFS_SETGET_BITS(bits) \
-u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
- unsigned long off, \
+u##bits btrfs_get_token_##bits(const struct extent_buffer *eb, \
+ const void *ptr, unsigned long off, \
struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)ptr; \
@@ -90,7 +90,8 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \
return res; \
} \
void btrfs_set_token_##bits(struct extent_buffer *eb, \
- void *ptr, unsigned long off, u##bits val, \
+ const void *ptr, unsigned long off, \
+ u##bits val, \
struct btrfs_map_token *token) \
{ \
unsigned long part_offset = (unsigned long)ptr; \
@@ -133,7 +134,7 @@ DEFINE_BTRFS_SETGET_BITS(16)
DEFINE_BTRFS_SETGET_BITS(32)
DEFINE_BTRFS_SETGET_BITS(64)
-void btrfs_node_key(struct extent_buffer *eb,
+void btrfs_node_key(const struct extent_buffer *eb,
struct btrfs_disk_key *disk_key, int nr)
{
unsigned long ptr = btrfs_node_key_ptr_offset(nr);
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3bd2233737ac49..0f99336c37eb45 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2104,6 +2104,7 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
vol = memdup_user((void __user *)arg, sizeof(*vol));
if (IS_ERR(vol))
return PTR_ERR(vol);
+ vol->name[BTRFS_PATH_NAME_MAX] = '\0';
switch (cmd) {
case BTRFS_IOC_SCAN_DEV:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index be8eae80ff6572..098016338f9876 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1821,6 +1821,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret;
}
+ btrfs_trans_release_metadata(trans, root);
+ trans->block_rsv = NULL;
+
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
@@ -1830,9 +1833,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
return ret;
}
- btrfs_trans_release_metadata(trans, root);
- trans->block_rsv = NULL;
-
cur_trans = trans->transaction;
/*
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
new file mode 100644
index 00000000000000..5b98f3c76ce491
--- /dev/null
+++ b/fs/btrfs/tree-checker.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+/*
+ * The module is used to catch unexpected/corrupted tree block data.
+ * Such behavior can be caused either by a fuzzed image or bugs.
+ *
+ * The objective is to do leaf/node validation checks when tree block is read
+ * from disk, and check *every* possible member, so other code won't
+ * need to checking them again.
+ *
+ * Due to the potential and unwanted damage, every checker needs to be
+ * carefully reviewed otherwise so it does not prevent mount of valid images.
+ */
+
+#include "ctree.h"
+#include "tree-checker.h"
+#include "disk-io.h"
+#include "compression.h"
+#include "hash.h"
+#include "volumes.h"
+
+#define CORRUPT(reason, eb, root, slot) \
+ btrfs_crit(root->fs_info, \
+ "corrupt %s, %s: block=%llu, root=%llu, slot=%d", \
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", \
+ reason, btrfs_header_bytenr(eb), root->objectid, slot)
+
+/*
+ * Error message should follow the following format:
+ * corrupt <type>: <identifier>, <reason>[, <bad_value>]
+ *
+ * @type: leaf or node
+ * @identifier: the necessary info to locate the leaf/node.
+ * It's recommened to decode key.objecitd/offset if it's
+ * meaningful.
+ * @reason: describe the error
+ * @bad_value: optional, it's recommened to output bad value and its
+ * expected value (range).
+ *
+ * Since comma is used to separate the components, only space is allowed
+ * inside each component.
+ */
+
+/*
+ * Append generic "corrupt leaf/node root=%llu block=%llu slot=%d: " to @fmt.
+ * Allows callers to customize the output.
+ */
+__printf(4, 5)
+static void generic_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ root->objectid, btrfs_header_bytenr(eb), slot, &vaf);
+ va_end(args);
+}
+
+static int check_extent_data_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_file_extent_item *fi;
+ u32 sectorsize = root->sectorsize;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ CORRUPT("unaligned key offset for file extent",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, fi) > BTRFS_FILE_EXTENT_TYPES) {
+ CORRUPT("invalid file extent type", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Support for new compression/encrption must introduce incompat flag,
+ * and must be caught in open_ctree().
+ */
+ if (btrfs_file_extent_compression(leaf, fi) > BTRFS_COMPRESS_TYPES) {
+ CORRUPT("invalid file extent compression", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_encryption(leaf, fi)) {
+ CORRUPT("invalid file extent encryption", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (btrfs_file_extent_type(leaf, fi) == BTRFS_FILE_EXTENT_INLINE) {
+ /* Inline extent must have 0 as key offset */
+ if (key->offset) {
+ CORRUPT("inline extent has non-zero key offset",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /* Compressed inline extent has no on-disk size, skip it */
+ if (btrfs_file_extent_compression(leaf, fi) !=
+ BTRFS_COMPRESS_NONE)
+ return 0;
+
+ /* Uncompressed inline extent size must match item size */
+ if (item_size != BTRFS_FILE_EXTENT_INLINE_DATA_START +
+ btrfs_file_extent_ram_bytes(leaf, fi)) {
+ CORRUPT("plaintext inline extent has invalid size",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+ return 0;
+ }
+
+ /* Regular or preallocated extent has fixed item size */
+ if (item_size != sizeof(*fi)) {
+ CORRUPT(
+ "regluar or preallocated extent data item size is invalid",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(btrfs_file_extent_ram_bytes(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_disk_bytenr(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_disk_num_bytes(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_offset(leaf, fi), sectorsize) ||
+ !IS_ALIGNED(btrfs_file_extent_num_bytes(leaf, fi), sectorsize)) {
+ CORRUPT(
+ "regular or preallocated extent data item has unaligned value",
+ leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ return 0;
+}
+
+static int check_csum_item(struct btrfs_root *root, struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ u32 sectorsize = root->sectorsize;
+ u32 csumsize = btrfs_super_csum_size(root->fs_info->super_copy);
+
+ if (key->objectid != BTRFS_EXTENT_CSUM_OBJECTID) {
+ CORRUPT("invalid objectid for csum item", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(key->offset, sectorsize)) {
+ CORRUPT("unaligned key offset for csum item", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ if (!IS_ALIGNED(btrfs_item_size_nr(leaf, slot), csumsize)) {
+ CORRUPT("unaligned csum item size", leaf, root, slot);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+/*
+ * Customized reported for dir_item, only important new info is key->objectid,
+ * which represents inode number
+ */
+__printf(4, 5)
+static void dir_item_err(const struct btrfs_root *root,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(root->fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d ino=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node", root->objectid,
+ btrfs_header_bytenr(eb), slot, key.objectid, &vaf);
+ va_end(args);
+}
+
+static int check_dir_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_dir_item *di;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u32 cur = 0;
+
+ di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
+ while (cur < item_size) {
+ u32 name_len;
+ u32 data_len;
+ u32 max_name_len;
+ u32 total_size;
+ u32 name_hash;
+ u8 dir_type;
+
+ /* header itself should not cross item boundary */
+ if (cur + sizeof(*di) > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item header crosses item boundary, have %zu boundary %u",
+ cur + sizeof(*di), item_size);
+ return -EUCLEAN;
+ }
+
+ /* dir type check */
+ dir_type = btrfs_dir_type(leaf, di);
+ if (dir_type >= BTRFS_FT_MAX) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type, have %u expect [0, %u)",
+ dir_type, BTRFS_FT_MAX);
+ return -EUCLEAN;
+ }
+
+ if (key->type == BTRFS_XATTR_ITEM_KEY &&
+ dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "invalid dir item type for XATTR key, have %u expect %u",
+ dir_type, BTRFS_FT_XATTR);
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR &&
+ key->type != BTRFS_XATTR_ITEM_KEY) {
+ dir_item_err(root, leaf, slot,
+ "xattr dir type found for non-XATTR key");
+ return -EUCLEAN;
+ }
+ if (dir_type == BTRFS_FT_XATTR)
+ max_name_len = XATTR_NAME_MAX;
+ else
+ max_name_len = BTRFS_NAME_LEN;
+
+ /* Name/data length check */
+ name_len = btrfs_dir_name_len(leaf, di);
+ data_len = btrfs_dir_data_len(leaf, di);
+ if (name_len > max_name_len) {
+ dir_item_err(root, leaf, slot,
+ "dir item name len too long, have %u max %u",
+ name_len, max_name_len);
+ return -EUCLEAN;
+ }
+ if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root)) {
+ dir_item_err(root, leaf, slot,
+ "dir item name and data len too long, have %u max %zu",
+ name_len + data_len,
+ BTRFS_MAX_XATTR_SIZE(root));
+ return -EUCLEAN;
+ }
+
+ if (data_len && dir_type != BTRFS_FT_XATTR) {
+ dir_item_err(root, leaf, slot,
+ "dir item with invalid data len, have %u expect 0",
+ data_len);
+ return -EUCLEAN;
+ }
+
+ total_size = sizeof(*di) + name_len + data_len;
+
+ /* header and name/data should not cross item boundary */
+ if (cur + total_size > item_size) {
+ dir_item_err(root, leaf, slot,
+ "dir item data crosses item boundary, have %u boundary %u",
+ cur + total_size, item_size);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Special check for XATTR/DIR_ITEM, as key->offset is name
+ * hash, should match its name
+ */
+ if (key->type == BTRFS_DIR_ITEM_KEY ||
+ key->type == BTRFS_XATTR_ITEM_KEY) {
+ char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+
+ read_extent_buffer(leaf, namebuf,
+ (unsigned long)(di + 1), name_len);
+ name_hash = btrfs_name_hash(namebuf, name_len);
+ if (key->offset != name_hash) {
+ dir_item_err(root, leaf, slot,
+ "name hash mismatch with key, have 0x%016x expect 0x%016llx",
+ name_hash, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ cur += total_size;
+ di = (struct btrfs_dir_item *)((void *)di + total_size);
+ }
+ return 0;
+}
+
+__printf(4, 5)
+__cold
+static void block_group_err(const struct btrfs_fs_info *fs_info,
+ const struct extent_buffer *eb, int slot,
+ const char *fmt, ...)
+{
+ struct btrfs_key key;
+ struct va_format vaf;
+ va_list args;
+
+ btrfs_item_key_to_cpu(eb, &key, slot);
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ btrfs_crit(fs_info,
+ "corrupt %s: root=%llu block=%llu slot=%d bg_start=%llu bg_len=%llu, %pV",
+ btrfs_header_level(eb) == 0 ? "leaf" : "node",
+ btrfs_header_owner(eb), btrfs_header_bytenr(eb), slot,
+ key.objectid, key.offset, &vaf);
+ va_end(args);
+}
+
+static int check_block_group_item(struct btrfs_fs_info *fs_info,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ struct btrfs_block_group_item bgi;
+ u32 item_size = btrfs_item_size_nr(leaf, slot);
+ u64 flags;
+ u64 type;
+
+ /*
+ * Here we don't really care about alignment since extent allocator can
+ * handle it. We care more about the size, as if one block group is
+ * larger than maximum size, it's must be some obvious corruption.
+ */
+ if (key->offset > BTRFS_MAX_DATA_CHUNK_SIZE || key->offset == 0) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group size, have %llu expect (0, %llu]",
+ key->offset, BTRFS_MAX_DATA_CHUNK_SIZE);
+ return -EUCLEAN;
+ }
+
+ if (item_size != sizeof(bgi)) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid item size, have %u expect %zu",
+ item_size, sizeof(bgi));
+ return -EUCLEAN;
+ }
+
+ read_extent_buffer(leaf, &bgi, btrfs_item_ptr_offset(leaf, slot),
+ sizeof(bgi));
+ if (btrfs_block_group_chunk_objectid(&bgi) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group chunk objectid, have %llu expect %llu",
+ btrfs_block_group_chunk_objectid(&bgi),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+
+ if (btrfs_block_group_used(&bgi) > key->offset) {
+ block_group_err(fs_info, leaf, slot,
+ "invalid block group used, have %llu expect [0, %llu)",
+ btrfs_block_group_used(&bgi), key->offset);
+ return -EUCLEAN;
+ }
+
+ flags = btrfs_block_group_flags(&bgi);
+ if (hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) > 1) {
+ block_group_err(fs_info, leaf, slot,
+"invalid profile flags, have 0x%llx (%lu bits set) expect no more than 1 bit set",
+ flags & BTRFS_BLOCK_GROUP_PROFILE_MASK,
+ hweight64(flags & BTRFS_BLOCK_GROUP_PROFILE_MASK));
+ return -EUCLEAN;
+ }
+
+ type = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
+ if (type != BTRFS_BLOCK_GROUP_DATA &&
+ type != BTRFS_BLOCK_GROUP_METADATA &&
+ type != BTRFS_BLOCK_GROUP_SYSTEM &&
+ type != (BTRFS_BLOCK_GROUP_METADATA |
+ BTRFS_BLOCK_GROUP_DATA)) {
+ block_group_err(fs_info, leaf, slot,
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
+ type, hweight64(type),
+ BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
+ BTRFS_BLOCK_GROUP_SYSTEM,
+ BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA);
+ return -EUCLEAN;
+ }
+ return 0;
+}
+
+/*
+ * Common point to switch the item-specific validation.
+ */
+static int check_leaf_item(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_key *key, int slot)
+{
+ int ret = 0;
+
+ switch (key->type) {
+ case BTRFS_EXTENT_DATA_KEY:
+ ret = check_extent_data_item(root, leaf, key, slot);
+ break;
+ case BTRFS_EXTENT_CSUM_KEY:
+ ret = check_csum_item(root, leaf, key, slot);
+ break;
+ case BTRFS_DIR_ITEM_KEY:
+ case BTRFS_DIR_INDEX_KEY:
+ case BTRFS_XATTR_ITEM_KEY:
+ ret = check_dir_item(root, leaf, key, slot);
+ break;
+ case BTRFS_BLOCK_GROUP_ITEM_KEY:
+ ret = check_block_group_item(root->fs_info, leaf, key, slot);
+ break;
+ }
+ return ret;
+}
+
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+ bool check_item_data)
+{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ /* No valid key type is 0, so all key should be larger than this key */
+ struct btrfs_key prev_key = {0, 0, 0};
+ struct btrfs_key key;
+ u32 nritems = btrfs_header_nritems(leaf);
+ int slot;
+
+ if (btrfs_header_level(leaf) != 0) {
+ generic_err(root, leaf, 0,
+ "invalid level for leaf, have %d expect 0",
+ btrfs_header_level(leaf));
+ return -EUCLEAN;
+ }
+
+ /*
+ * Extent buffers from a relocation tree have a owner field that
+ * corresponds to the subvolume tree they are based on. So just from an
+ * extent buffer alone we can not find out what is the id of the
+ * corresponding subvolume tree, so we can not figure out if the extent
+ * buffer corresponds to the root of the relocation tree or not. So
+ * skip this check for relocation trees.
+ */
+ if (nritems == 0 && !btrfs_header_flag(leaf, BTRFS_HEADER_FLAG_RELOC)) {
+ u64 owner = btrfs_header_owner(leaf);
+ struct btrfs_root *check_root;
+
+ /* These trees must never be empty */
+ if (owner == BTRFS_ROOT_TREE_OBJECTID ||
+ owner == BTRFS_CHUNK_TREE_OBJECTID ||
+ owner == BTRFS_EXTENT_TREE_OBJECTID ||
+ owner == BTRFS_DEV_TREE_OBJECTID ||
+ owner == BTRFS_FS_TREE_OBJECTID ||
+ owner == BTRFS_DATA_RELOC_TREE_OBJECTID) {
+ generic_err(root, leaf, 0,
+ "invalid root, root %llu must never be empty",
+ owner);
+ return -EUCLEAN;
+ }
+ key.objectid = owner;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ check_root = btrfs_get_fs_root(fs_info, &key, false);
+ /*
+ * The only reason we also check NULL here is that during
+ * open_ctree() some roots has not yet been set up.
+ */
+ if (!IS_ERR_OR_NULL(check_root)) {
+ struct extent_buffer *eb;
+
+ eb = btrfs_root_node(check_root);
+ /* if leaf is the root, then it's fine */
+ if (leaf != eb) {
+ CORRUPT("non-root leaf's nritems is 0",
+ leaf, check_root, 0);
+ free_extent_buffer(eb);
+ return -EUCLEAN;
+ }
+ free_extent_buffer(eb);
+ }
+ return 0;
+ }
+
+ if (nritems == 0)
+ return 0;
+
+ /*
+ * Check the following things to make sure this is a good leaf, and
+ * leaf users won't need to bother with similar sanity checks:
+ *
+ * 1) key ordering
+ * 2) item offset and size
+ * No overlap, no hole, all inside the leaf.
+ * 3) item content
+ * If possible, do comprehensive sanity check.
+ * NOTE: All checks must only rely on the item data itself.
+ */
+ for (slot = 0; slot < nritems; slot++) {
+ u32 item_end_expected;
+ int ret;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ /* Make sure the keys are in the right order */
+ if (btrfs_comp_cpu_keys(&prev_key, &key) >= 0) {
+ CORRUPT("bad key order", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Make sure the offset and ends are right, remember that the
+ * item data starts at the end of the leaf and grows towards the
+ * front.
+ */
+ if (slot == 0)
+ item_end_expected = BTRFS_LEAF_DATA_SIZE(root);
+ else
+ item_end_expected = btrfs_item_offset_nr(leaf,
+ slot - 1);
+ if (btrfs_item_end_nr(leaf, slot) != item_end_expected) {
+ CORRUPT("slot offset bad", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /*
+ * Check to make sure that we don't point outside of the leaf,
+ * just in case all the items are consistent to each other, but
+ * all point outside of the leaf.
+ */
+ if (btrfs_item_end_nr(leaf, slot) >
+ BTRFS_LEAF_DATA_SIZE(root)) {
+ CORRUPT("slot end outside of leaf", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ /* Also check if the item pointer overlaps with btrfs item. */
+ if (btrfs_item_nr_offset(slot) + sizeof(struct btrfs_item) >
+ btrfs_item_ptr_offset(leaf, slot)) {
+ CORRUPT("slot overlap with its data", leaf, root, slot);
+ return -EUCLEAN;
+ }
+
+ if (check_item_data) {
+ /*
+ * Check if the item size and content meet other
+ * criteria
+ */
+ ret = check_leaf_item(root, leaf, &key, slot);
+ if (ret < 0)
+ return ret;
+ }
+
+ prev_key.objectid = key.objectid;
+ prev_key.type = key.type;
+ prev_key.offset = key.offset;
+ }
+
+ return 0;
+}
+
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, false);
+}
+
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
+{
+ unsigned long nr = btrfs_header_nritems(node);
+ struct btrfs_key key, next_key;
+ int slot;
+ int level = btrfs_header_level(node);
+ u64 bytenr;
+ int ret = 0;
+
+ if (level <= 0 || level >= BTRFS_MAX_LEVEL) {
+ generic_err(root, node, 0,
+ "invalid level for node, have %d expect [1, %d]",
+ level, BTRFS_MAX_LEVEL - 1);
+ return -EUCLEAN;
+ }
+ if (nr == 0 || nr > BTRFS_NODEPTRS_PER_BLOCK(root)) {
+ btrfs_crit(root->fs_info,
+"corrupt node: root=%llu block=%llu, nritems too %s, have %lu expect range [1,%zu]",
+ root->objectid, node->start,
+ nr == 0 ? "small" : "large", nr,
+ BTRFS_NODEPTRS_PER_BLOCK(root));
+ return -EUCLEAN;
+ }
+
+ for (slot = 0; slot < nr - 1; slot++) {
+ bytenr = btrfs_node_blockptr(node, slot);
+ btrfs_node_key_to_cpu(node, &key, slot);
+ btrfs_node_key_to_cpu(node, &next_key, slot + 1);
+
+ if (!bytenr) {
+ generic_err(root, node, slot,
+ "invalid NULL node pointer");
+ ret = -EUCLEAN;
+ goto out;
+ }
+ if (!IS_ALIGNED(bytenr, root->sectorsize)) {
+ generic_err(root, node, slot,
+ "unaligned pointer, have %llu should be aligned to %u",
+ bytenr, root->sectorsize);
+ ret = -EUCLEAN;
+ goto out;
+ }
+
+ if (btrfs_comp_cpu_keys(&key, &next_key) >= 0) {
+ generic_err(root, node, slot,
+ "bad key order, current (%llu %u %llu) next (%llu %u %llu)",
+ key.objectid, key.type, key.offset,
+ next_key.objectid, next_key.type,
+ next_key.offset);
+ ret = -EUCLEAN;
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
diff --git a/fs/btrfs/tree-checker.h b/fs/btrfs/tree-checker.h
new file mode 100644
index 00000000000000..3d53e8d6fda0ca
--- /dev/null
+++ b/fs/btrfs/tree-checker.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) Qu Wenruo 2017. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program.
+ */
+
+#ifndef __BTRFS_TREE_CHECKER__
+#define __BTRFS_TREE_CHECKER__
+
+#include "ctree.h"
+#include "extent_io.h"
+
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf);
+int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
+
+#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 738f5d6beb95eb..c7190f32257697 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2961,8 +2961,11 @@ out_wake_log_root:
mutex_unlock(&log_root_tree->log_mutex);
/*
- * The barrier before waitqueue_active is implied by mutex_unlock
+ * The barrier before waitqueue_active is needed so all the updates
+ * above are seen by the woken threads. It might not be necessary, but
+ * proving that seems to be hard.
*/
+ smp_mb();
if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
wake_up(&log_root_tree->log_commit_wait[index2]);
out:
@@ -2973,8 +2976,11 @@ out:
mutex_unlock(&root->log_mutex);
/*
- * The barrier before waitqueue_active is implied by mutex_unlock
+ * The barrier before waitqueue_active is needed so all the updates
+ * above are seen by the woken threads. It might not be necessary, but
+ * proving that seems to be hard.
*/
+ smp_mb();
if (waitqueue_active(&root->log_commit_wait[index1]))
wake_up(&root->log_commit_wait[index1]);
return ret;
@@ -3315,9 +3321,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
}
btrfs_release_path(path);
- /* find the first key from this transaction again */
+ /*
+ * Find the first key from this transaction again. See the note for
+ * log_new_dir_dentries, if we're logging a directory recursively we
+ * won't be holding its i_mutex, which means we can modify the directory
+ * while we're logging it. If we remove an entry between our first
+ * search and this search we'll not find the key again and can just
+ * bail.
+ */
ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
- if (WARN_ON(ret != 0))
+ if (ret != 0)
goto done;
/*
@@ -5234,9 +5247,33 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
dir_inode = btrfs_iget(root->fs_info->sb, &inode_key,
root, NULL);
- /* If parent inode was deleted, skip it. */
- if (IS_ERR(dir_inode))
- continue;
+ /*
+ * If the parent inode was deleted, return an error to
+ * fallback to a transaction commit. This is to prevent
+ * getting an inode that was moved from one parent A to
+ * a parent B, got its former parent A deleted and then
+ * it got fsync'ed, from existing at both parents after
+ * a log replay (and the old parent still existing).
+ * Example:
+ *
+ * mkdir /mnt/A
+ * mkdir /mnt/B
+ * touch /mnt/B/bar
+ * sync
+ * mv /mnt/B/bar /mnt/A/bar
+ * mv -T /mnt/A /mnt/B
+ * fsync /mnt/B/bar
+ * <power fail>
+ *
+ * If we ignore the old parent B which got deleted,
+ * after a log replay we would have file bar linked
+ * at both parents and the old parent B would still
+ * exist.
+ */
+ if (IS_ERR(dir_inode)) {
+ ret = PTR_ERR(dir_inode);
+ goto out;
+ }
ret = btrfs_log_inode(trans, root, dir_inode,
LOG_INODE_ALL, 0, LLONG_MAX, ctx);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index b4d63a9842fac6..d1cca19b29d33b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1184,7 +1184,7 @@ again:
struct map_lookup *map;
int i;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
u64 end;
@@ -2757,7 +2757,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
free_extent_map(em);
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
lock_chunks(root->fs_info->chunk_root);
check_system_chunk(trans, extent_root, map->type);
unlock_chunks(root->fs_info->chunk_root);
@@ -4540,7 +4540,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = 1024 * 1024 * 1024;
- max_chunk_size = 10 * max_stripe_size;
+ max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info->chunk_root);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
@@ -4731,7 +4731,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
goto error;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->bdev = (struct block_device *)map;
+ em->map_lookup = map;
em->start = start;
em->len = num_bytes;
em->block_start = 0;
@@ -4826,7 +4826,7 @@ int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len;
@@ -4968,7 +4968,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em)
return 1;
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->missing) {
miss_ndevs++;
@@ -5048,7 +5048,7 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return 1;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
@@ -5091,7 +5091,7 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map);
free_extent_map(em);
@@ -5112,7 +5112,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
BUG_ON(!em);
BUG_ON(em->start > logical || em->start + em->len < logical);
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
ret = 1;
free_extent_map(em);
@@ -5271,7 +5271,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
return -EINVAL;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
offset = logical - em->start;
stripe_len = map->stripe_len;
@@ -5813,7 +5813,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
free_extent_map(em);
return -EIO;
}
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
length = em->len;
rmap_len = map->stripe_len;
@@ -6208,6 +6208,101 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
return dev;
}
+/* Return -EIO if any error, otherwise return 0. */
+static int btrfs_check_chunk_valid(struct btrfs_root *root,
+ struct extent_buffer *leaf,
+ struct btrfs_chunk *chunk, u64 logical)
+{
+ u64 length;
+ u64 stripe_len;
+ u16 num_stripes;
+ u16 sub_stripes;
+ u64 type;
+ u64 features;
+ bool mixed = false;
+
+ length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+ sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
+ type = btrfs_chunk_type(leaf, chunk);
+
+ if (!num_stripes) {
+ btrfs_err(root->fs_info, "invalid chunk num_stripes: %u",
+ num_stripes);
+ return -EIO;
+ }
+ if (!IS_ALIGNED(logical, root->sectorsize)) {
+ btrfs_err(root->fs_info,
+ "invalid chunk logical %llu", logical);
+ return -EIO;
+ }
+ if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) {
+ btrfs_err(root->fs_info, "invalid chunk sectorsize %u",
+ btrfs_chunk_sector_size(leaf, chunk));
+ return -EIO;
+ }
+ if (!length || !IS_ALIGNED(length, root->sectorsize)) {
+ btrfs_err(root->fs_info,
+ "invalid chunk length %llu", length);
+ return -EIO;
+ }
+ if (!is_power_of_2(stripe_len)) {
+ btrfs_err(root->fs_info, "invalid chunk stripe length: %llu",
+ stripe_len);
+ return -EIO;
+ }
+ if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ type) {
+ btrfs_err(root->fs_info, "unrecognized chunk type: %llu",
+ ~(BTRFS_BLOCK_GROUP_TYPE_MASK |
+ BTRFS_BLOCK_GROUP_PROFILE_MASK) &
+ btrfs_chunk_type(leaf, chunk));
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
+ btrfs_err(root->fs_info, "missing chunk type flag: 0x%llx", type);
+ return -EIO;
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
+ (type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
+ btrfs_err(root->fs_info,
+ "system chunk with data or metadata type: 0x%llx", type);
+ return -EIO;
+ }
+
+ features = btrfs_super_incompat_flags(root->fs_info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+ mixed = true;
+
+ if (!mixed) {
+ if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
+ (type & BTRFS_BLOCK_GROUP_DATA)) {
+ btrfs_err(root->fs_info,
+ "mixed chunk type in non-mixed mode: 0x%llx", type);
+ return -EIO;
+ }
+ }
+
+ if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes != 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
+ (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
+ (type & BTRFS_BLOCK_GROUP_DUP && num_stripes != 2) ||
+ ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
+ num_stripes != 1)) {
+ btrfs_err(root->fs_info,
+ "invalid num_stripes:sub_stripes %u:%u for profile %llu",
+ num_stripes, sub_stripes,
+ type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
@@ -6217,6 +6312,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
struct extent_map *em;
u64 logical;
u64 length;
+ u64 stripe_len;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
@@ -6225,6 +6321,12 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
+ stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
+ num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+
+ ret = btrfs_check_chunk_valid(root, leaf, chunk, logical);
+ if (ret)
+ return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
@@ -6241,7 +6343,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
em = alloc_extent_map();
if (!em)
return -ENOMEM;
- num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
@@ -6249,7 +6350,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
- em->bdev = (struct block_device *)map;
+ em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
@@ -6473,6 +6574,7 @@ int btrfs_read_sys_array(struct btrfs_root *root)
u32 array_size;
u32 len = 0;
u32 cur_offset;
+ u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
@@ -6539,6 +6641,15 @@ int btrfs_read_sys_array(struct btrfs_root *root)
break;
}
+ type = btrfs_chunk_type(sb, chunk);
+ if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
+ btrfs_err(root->fs_info,
+ "invalid chunk type %llu in sys_array at offset %u",
+ type, cur_offset);
+ ret = -EIO;
+ break;
+ }
+
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
@@ -6948,7 +7059,7 @@ void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
/* In order to kick the device replace finish process */
lock_chunks(root);
list_for_each_entry(em, &transaction->pending_chunks, list) {
- map = (struct map_lookup *)em->bdev;
+ map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index d5c84f6b13538a..3c651df420be3b 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -24,6 +24,8 @@
#include <linux/btrfs.h>
#include "async-thread.h"
+#define BTRFS_MAX_DATA_CHUNK_SIZE (10ULL * SZ_1G)
+
extern struct mutex uuid_mutex;
#define BTRFS_STRIPE_LEN (64 * 1024)
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index c4b893453e0eef..a5f59eed828790 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -194,7 +194,6 @@ wait_for_old_object:
pr_err("\n");
pr_err("Error: Unexpected object collision\n");
cachefiles_printk_object(object, xobject);
- BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
@@ -318,7 +317,7 @@ try_again:
trap = lock_rename(cache->graveyard, dir);
/* do some checks before getting the grave dentry */
- if (rep->d_parent != dir) {
+ if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
/* the entry was probably culled when we dropped the parent dir
* lock */
unlock_rename(cache->graveyard, dir);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index c0f3da3926a0c6..c05ab2ec0fef38 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
struct cachefiles_one_read *monitor =
container_of(wait, struct cachefiles_one_read, monitor);
struct cachefiles_object *object;
+ struct fscache_retrieval *op = monitor->op;
struct wait_bit_key *key = _key;
struct page *page = wait->private;
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
list_del(&wait->task_list);
/* move onto the action list and queue for FS-Cache thread pool */
- ASSERT(monitor->op);
+ ASSERT(op);
- object = container_of(monitor->op->op.object,
- struct cachefiles_object, fscache);
+ /* We need to temporarily bump the usage count as we don't own a ref
+ * here otherwise cachefiles_read_copier() may free the op between the
+ * monitor being enqueued on the op->to_do list and the op getting
+ * enqueued on the work queue.
+ */
+ fscache_get_retrieval(op);
+ object = container_of(op->op.object, struct cachefiles_object, fscache);
spin_lock(&object->work_lock);
- list_add_tail(&monitor->op_link, &monitor->op->to_do);
+ list_add_tail(&monitor->op_link, &op->to_do);
spin_unlock(&object->work_lock);
- fscache_enqueue_retrieval(monitor->op);
+ fscache_enqueue_retrieval(op);
+ fscache_put_retrieval(op);
return 0;
}
@@ -956,11 +963,8 @@ error:
void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
{
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
_enter("%p,{%lu}", object, page->index);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 0e3de1bb650044..e7b54514d99ab2 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -3243,7 +3243,6 @@ retry:
tcap->cap_id = t_cap_id;
tcap->seq = t_seq - 1;
tcap->issue_seq = t_seq - 1;
- tcap->mseq = t_mseq;
tcap->issued |= issued;
tcap->implemented |= issued;
if (cap == ci->i_auth_cap)
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4aa7122a8d38c1..a485d0cdc559c0 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -611,7 +611,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ if (list_empty(&ci->i_snap_flush_item))
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 0a3544fb50f920..7bc6d27d47a406 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -123,25 +123,41 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "CIFS Version %s\n", CIFS_VERSION);
seq_printf(m, "Features:");
#ifdef CONFIG_CIFS_DFS_UPCALL
- seq_printf(m, " dfs");
+ seq_printf(m, " DFS");
#endif
#ifdef CONFIG_CIFS_FSCACHE
- seq_printf(m, " fscache");
+ seq_printf(m, ",FSCACHE");
+#endif
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ seq_printf(m, ",SMB_DIRECT");
+#endif
+#ifdef CONFIG_CIFS_STATS2
+ seq_printf(m, ",STATS2");
+#elif defined(CONFIG_CIFS_STATS)
+ seq_printf(m, ",STATS");
+#endif
+#ifdef CONFIG_CIFS_DEBUG2
+ seq_printf(m, ",DEBUG2");
+#elif defined(CONFIG_CIFS_DEBUG)
+ seq_printf(m, ",DEBUG");
+#endif
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+ seq_printf(m, ",ALLOW_INSECURE_LEGACY");
#endif
#ifdef CONFIG_CIFS_WEAK_PW_HASH
- seq_printf(m, " lanman");
+ seq_printf(m, ",WEAK_PW_HASH");
#endif
#ifdef CONFIG_CIFS_POSIX
- seq_printf(m, " posix");
+ seq_printf(m, ",CIFS_POSIX");
#endif
#ifdef CONFIG_CIFS_UPCALL
- seq_printf(m, " spnego");
+ seq_printf(m, ",UPCALL(SPNEGO)");
#endif
#ifdef CONFIG_CIFS_XATTR
- seq_printf(m, " xattr");
+ seq_printf(m, ",XATTR");
#endif
#ifdef CONFIG_CIFS_ACL
- seq_printf(m, " acl");
+ seq_printf(m, ",ACL");
#endif
seq_putc(m, '\n');
seq_printf(m, "Active VFS Requests: %d\n", GlobalTotalActiveXid);
@@ -269,6 +285,13 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
+ atomic_set(&tcpSesReconnectCount, 0);
+ atomic_set(&tconInfoReconnectCount, 0);
+
+ spin_lock(&GlobalMid_Lock);
+ GlobalMaxActiveXid = 0;
+ GlobalCurrentXid = 0;
+ spin_unlock(&GlobalMid_Lock);
spin_lock(&cifs_tcp_ses_lock);
list_for_each(tmp1, &cifs_tcp_ses_list) {
server = list_entry(tmp1, struct TCP_Server_Info,
@@ -281,6 +304,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
struct cifs_tcon,
tcon_list);
atomic_set(&tcon->num_smbs_sent, 0);
+ spin_lock(&tcon->stat_lock);
+ tcon->bytes_read = 0;
+ tcon->bytes_written = 0;
+ spin_unlock(&tcon->stat_lock);
if (server->ops->clear_stats)
server->ops->clear_stats(tcon);
}
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 6908080e9b6d88..e3f2b7370bd8ba 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -143,8 +143,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
sprintf(dp, ";sec=krb5");
else if (server->sec_mskerberos)
sprintf(dp, ";sec=mskrb5");
- else
- goto out;
+ else {
+ cifs_dbg(VFS, "unknown or missing server auth type, use krb5\n");
+ sprintf(dp, ";sec=krb5");
+ }
dp = description + strlen(description);
sprintf(dp, ";uid=0x%x",
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a0b3e7d1be484f..211ac472cb9dc6 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -101,9 +101,6 @@ convert_sfm_char(const __u16 src_char, char *target)
case SFM_LESSTHAN:
*target = '<';
break;
- case SFM_SLASH:
- *target = '\\';
- break;
case SFM_SPACE:
*target = ' ';
break;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 8632380d2b9411..b9b8f19dce0e1d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -150,8 +150,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* greater than cifs socket timeout which is 7 seconds
*/
while (server->tcpStatus == CifsNeedReconnect) {
- wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+ rc = wait_event_interruptible_timeout(server->response_q,
+ (server->tcpStatus != CifsNeedReconnect),
+ 10 * HZ);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+ " signal by the process\n", __func__);
+ return -ERESTARTSYS;
+ }
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
@@ -571,10 +577,15 @@ CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
}
count = 0;
+ /*
+ * We know that all the name entries in the protocols array
+ * are short (< 16 bytes anyway) and are NUL terminated.
+ */
for (i = 0; i < CIFS_NUM_PROT; i++) {
- strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
- count += strlen(protocols[i].name) + 1;
- /* null at end of source and target buffers anyway */
+ size_t len = strlen(protocols[i].name) + 1;
+
+ memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+ count += len;
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9c6041efd30101..a4bcf28f0cf1d1 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -48,6 +48,7 @@
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
+#include "dns_resolve.h"
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
@@ -304,6 +305,53 @@ static int cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
const char *devname);
/*
+ * Resolve hostname and set ip addr in tcp ses. Useful for hostnames that may
+ * get their ip addresses changed at some point.
+ *
+ * This should be called with server->srv_mutex held.
+ */
+#ifdef CONFIG_CIFS_DFS_UPCALL
+static int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+ int rc;
+ int len;
+ char *unc, *ipaddr = NULL;
+
+ if (!server->hostname)
+ return -EINVAL;
+
+ len = strlen(server->hostname) + 3;
+
+ unc = kmalloc(len, GFP_KERNEL);
+ if (!unc) {
+ cifs_dbg(FYI, "%s: failed to create UNC path\n", __func__);
+ return -ENOMEM;
+ }
+ snprintf(unc, len, "\\\\%s", server->hostname);
+
+ rc = dns_resolve_server_name_to_ip(unc, &ipaddr);
+ kfree(unc);
+
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: failed to resolve server part of %s to IP: %d\n",
+ __func__, server->hostname, rc);
+ return rc;
+ }
+
+ rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr,
+ strlen(ipaddr));
+ kfree(ipaddr);
+
+ return !rc ? -1 : 0;
+}
+#else
+static inline int reconn_set_ipaddr(struct TCP_Server_Info *server)
+{
+ return 0;
+}
+#endif
+
+/*
* cifs tcp session reconnection
*
* mark tcp session as reconnecting so temporarily locked
@@ -400,6 +448,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
rc = generic_ip_connect(server);
if (rc) {
cifs_dbg(FYI, "reconnect error %d\n", rc);
+ rc = reconn_set_ipaddr(server);
+ if (rc) {
+ cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
+ __func__, rc);
+ }
mutex_unlock(&server->srv_mutex);
msleep(3000);
} else {
@@ -3674,6 +3727,9 @@ try_mount_again:
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
tcon = NULL;
+ if (rc == -EACCES)
+ goto mount_fail_check;
+
goto remote_path_check;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 76dacd5307b9a9..afd317eb9db9f1 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -163,7 +163,7 @@ cifs_bp_rename_retry:
cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
- full_path[dfsplen] = '\\';
+ full_path[dfsplen] = dirsep;
for (i = 0; i < pplen-1; i++)
if (full_path[dfsplen+1+i] == '/')
full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 0141aba9eca602..0305e386621694 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1073,14 +1073,18 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf) {
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid);
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1404,12 +1408,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -2745,14 +2753,16 @@ cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
* these pages but not on the region from pos to ppos+len-1.
*/
written = cifs_user_writev(iocb, from);
- if (written > 0 && CIFS_CACHE_READ(cinode)) {
+ if (CIFS_CACHE_READ(cinode)) {
/*
- * Windows 7 server can delay breaking level2 oplock if a write
- * request comes - break it on the client to prevent reading
- * an old data.
+ * We have read level caching and we have just sent a write
+ * request to the server thus making data in the cache stale.
+ * Zap the cache and set oplock/lease level to NONE to avoid
+ * reading stale data from the cache. All subsequent read
+ * operations will read new data from the server.
*/
cifs_zap_mapping(inode);
- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
+ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
inode);
cinode->oplock = 0;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cdeb029326751..5c3187df9ab9d5 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -756,7 +756,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if (rc == -EACCES && backup_cred(cifs_sb)) {
+ } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
+ (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
+ == 0)) {
+ /*
+ * For SMB2 and later the backup intent flag is already
+ * sent if needed on open and there is no path based
+ * FindFirst operation to use to retry with
+ */
+
srchinf = kzalloc(sizeof(struct cifs_search_info),
GFP_KERNEL);
if (srchinf == NULL) {
@@ -1063,6 +1071,8 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid,
if (!server->ops->set_file_info)
return -ENOSYS;
+ info_buf.Pad = 0;
+
if (attrs->ia_valid & ATTR_ATIME) {
set_time = true;
info_buf.LastAccessTime =
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index e3548f73bdeaa9..728289c32b32ff 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -419,7 +419,7 @@ smb3_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_II;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct smb2_file_all_info *pfile_info = NULL;
oparms.tcon = tcon;
@@ -481,7 +481,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_io_parms io_parms;
int create_options = CREATE_NOT_DIR;
__le16 *utf16_path;
- __u8 oplock = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+ __u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct kvec iov[2];
if (backup_cred(cifs_sb))
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 0cc699d9b9329c..61a09ab2752e2b 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -406,9 +406,17 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
+ size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
+ if (data_offset >
+ len - sizeof(struct file_notify_information)) {
+ cifs_dbg(FYI, "invalid data_offset %u\n",
+ data_offset);
+ return true;
+ }
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 97d1a15873c55f..43fa471c88d7c0 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -373,8 +373,15 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
pfData->FileNameLength;
- } else
- new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+ } else {
+ u32 next_offset = le32_to_cpu(pDirInfo->NextEntryOffset);
+
+ if (old_entry + next_offset < old_entry) {
+ cifs_dbg(VFS, "invalid offset %u\n", next_offset);
+ return NULL;
+ }
+ new_entry = old_entry + next_offset;
+ }
cifs_dbg(FYI, "new entry %p old entry %p\n", new_entry, old_entry);
/* validate that new_entry is not past end of SMB */
if (new_entry >= end_of_smb) {
@@ -645,7 +652,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
/* scan and find it */
int i;
char *cur_ent;
- char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
+ char *end_of_smb;
+
+ if (cfile->srch_inf.ntwrk_buf_start == NULL) {
+ cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
+ return -EIO;
+ }
+
+ end_of_smb = cfile->srch_inf.ntwrk_buf_start +
server->ops->calc_smb_size(
cfile->srch_inf.ntwrk_buf_start);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index a035d1a958824b..9bc7a29f88d65a 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -398,6 +398,12 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
goto setup_ntlmv2_ret;
}
*pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlmv2_ret;
+ }
sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b2aff0c6f22c52..dee5250701deb3 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -123,12 +123,14 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
/*
* Accessing maxBuf is racy with cifs_reconnect - need to store value
- * and check it for zero before using.
+ * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf;
- if (!max_buf)
+ if (max_buf < sizeof(struct smb2_lock_element))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf)
@@ -265,6 +267,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf) {
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1238cd3552f9cc..0267d8cbc9966c 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -267,7 +267,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
- (buf->LastWriteTime == 0) && (buf->ChangeTime) &&
+ (buf->LastWriteTime == 0) && (buf->ChangeTime == 0) &&
(buf->Attributes == 0))
return 0; /* would be a no op, no sense sending this */
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 8257a5a97cc034..98c25b969ab879 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -377,8 +377,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
- {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
- {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+ {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+ {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
"STATUS_CTL_FILE_NOT_SUPPORTED"},
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 76ccf20fbfb7bd..0e62bf1ebbd735 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -185,6 +185,13 @@ smb2_check_message(char *buf, unsigned int length)
return 0;
/*
+ * Some windows servers (win2016) will pad also the final
+ * PDU in a compound to 8 bytes.
+ */
+ if (((clc_len + 7) & ~7) == len)
+ return 0;
+
+ /*
* MacOS server pads after SMB2.1 write response with 3 bytes
* of junk. Other servers match RFC1001 len to actual
* SMB2/SMB3 frame length (header + smb2 response specific data)
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e6b1795fbf2a36..eae3cdffaf7f0e 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -143,14 +143,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
scredits = server->credits;
/* can deadlock with reopen */
- if (scredits == 1) {
+ if (scredits <= 8) {
*num = SMB2_MAX_BUFFER_SIZE;
*credits = 0;
break;
}
- /* leave one credit for a possible reopen */
- scredits--;
+ /* leave some credits for reopen and other ops */
+ scredits -= 8;
*num = min_t(unsigned int, size,
scredits * SMB2_MAX_BUFFER_SIZE);
@@ -914,7 +914,7 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
}
srch_inf->entries_in_buffer = 0;
- srch_inf->index_of_last_entry = 0;
+ srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 807e989f436aa7..5e21d58c49ef85 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -158,7 +158,7 @@ out:
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
{
- int rc = 0;
+ int rc;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
@@ -169,10 +169,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
* for those three - in the calling routine.
*/
if (tcon == NULL)
- return rc;
+ return 0;
if (smb2_command == SMB2_TREE_CONNECT)
- return rc;
+ return 0;
if (tcon->tidStatus == CifsExiting) {
/*
@@ -215,8 +215,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
return -EAGAIN;
}
- wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+ rc = wait_event_interruptible_timeout(server->response_q,
+ (server->tcpStatus != CifsNeedReconnect),
+ 10 * HZ);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+ " signal by the process\n", __func__);
+ return -ERESTARTSYS;
+ }
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
@@ -234,7 +240,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
- return rc;
+ return 0;
nls_codepage = load_nls_default();
@@ -309,7 +315,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
if (tcon != NULL) {
-#ifdef CONFIG_CIFS_STATS2
+#ifdef CONFIG_CIFS_STATS
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
#endif
@@ -2396,33 +2402,38 @@ num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
int len;
unsigned int entrycount = 0;
unsigned int next_offset = 0;
- FILE_DIRECTORY_INFO *entryptr;
+ char *entryptr;
+ FILE_DIRECTORY_INFO *dir_info;
if (bufstart == NULL)
return 0;
- entryptr = (FILE_DIRECTORY_INFO *)bufstart;
+ entryptr = bufstart;
while (1) {
- entryptr = (FILE_DIRECTORY_INFO *)
- ((char *)entryptr + next_offset);
-
- if ((char *)entryptr + size > end_of_buf) {
+ if (entryptr + next_offset < entryptr ||
+ entryptr + next_offset > end_of_buf ||
+ entryptr + next_offset + size > end_of_buf) {
cifs_dbg(VFS, "malformed search entry would overflow\n");
break;
}
- len = le32_to_cpu(entryptr->FileNameLength);
- if ((char *)entryptr + len + size > end_of_buf) {
+ entryptr = entryptr + next_offset;
+ dir_info = (FILE_DIRECTORY_INFO *)entryptr;
+
+ len = le32_to_cpu(dir_info->FileNameLength);
+ if (entryptr + len < entryptr ||
+ entryptr + len > end_of_buf ||
+ entryptr + len + size > end_of_buf) {
cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
end_of_buf);
break;
}
- *lastentry = (char *)entryptr;
+ *lastentry = entryptr;
entrycount++;
- next_offset = le32_to_cpu(entryptr->NextEntryOffset);
+ next_offset = le32_to_cpu(dir_info->NextEntryOffset);
if (!next_offset)
break;
}
@@ -2512,8 +2523,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
if (rc == -ENODATA && rsp->hdr.Status == STATUS_NO_MORE_FILES) {
srch_inf->endOfSearch = true;
rc = 0;
- }
- cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
+ } else
+ cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index aacb15bd56fe99..f087158c5555b4 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -82,8 +82,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define MAX_SMB2_HDR_SIZE 204
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 54af10204e83c6..1cf0a336ec06b3 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -360,7 +360,7 @@ uncork:
if (rc < 0 && rc != -EINTR)
cifs_dbg(VFS, "Error %d sending data on socket to server\n",
rc);
- else
+ else if (rc > 0)
rc = 0;
return rc;
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index 0525ebc3aea210..66e8c5d58b217b 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -64,7 +64,7 @@ static void fill_item_path(struct config_item * item, char * buffer, int length)
/* back up enough to print this bus id with '/' */
length -= cur;
- strncpy(buffer + length,config_item_name(p),cur);
+ memcpy(buffer + length, config_item_name(p), cur);
*(buffer + --length) = '/';
}
}
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c
index b862bc219cd7c7..ae59ba9ce0ae47 100644
--- a/fs/cramfs/inode.c
+++ b/fs/cramfs/inode.c
@@ -186,7 +186,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i
continue;
blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
blk_offset += offset;
- if (blk_offset + len > BUFFER_SIZE)
+ if (blk_offset > BUFFER_SIZE ||
+ blk_offset + len > BUFFER_SIZE)
continue;
return read_buffers[i] + blk_offset;
}
diff --git a/fs/dcache.c b/fs/dcache.c
index a69e9b42814a38..695c09652b36e6 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -278,7 +278,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
spin_unlock(&dentry->d_lock);
name->name = p->name;
} else {
- memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
+ memcpy(name->inline_name, dentry->d_iname,
+ dentry->d_name.len + 1);
spin_unlock(&dentry->d_lock);
name->name = name->inline_name;
}
@@ -1154,15 +1155,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
*/
void shrink_dcache_sb(struct super_block *sb)
{
- long freed;
-
do {
LIST_HEAD(dispose);
- freed = list_lru_walk(&sb->s_dentry_lru,
+ list_lru_walk(&sb->s_dentry_lru,
dentry_lru_isolate_shrink, &dispose, 1024);
-
- this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
cond_resched();
} while (list_lru_count(&sb->s_dentry_lru) > 0);
@@ -1513,7 +1510,7 @@ static void check_and_drop(void *_data)
{
struct detach_data *data = _data;
- if (!data->mountpoint && !data->select.found)
+ if (!data->mountpoint && list_empty(&data->select.dispose))
__d_drop(data->select.start);
}
@@ -1555,17 +1552,15 @@ void d_invalidate(struct dentry *dentry)
d_walk(dentry, &data, detach_and_collect, check_and_drop);
- if (data.select.found)
+ if (!list_empty(&data.select.dispose))
shrink_dentry_list(&data.select.dispose);
+ else if (!data.mountpoint)
+ return;
if (data.mountpoint) {
detach_mounts(data.mountpoint);
dput(data.mountpoint);
}
-
- if (!data.mountpoint && !data.select.found)
- break;
-
cond_resched();
}
}
@@ -1954,10 +1949,12 @@ struct dentry *d_make_root(struct inode *root_inode)
static const struct qstr name = QSTR_INIT("/", 1);
res = __d_alloc(root_inode->i_sb, &name);
- if (res)
+ if (res) {
+ res->d_flags |= DCACHE_RCUACCESS;
d_instantiate(res, root_inode);
- else
+ } else {
iput(root_inode);
+ }
}
return res;
}
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index d4b8462d823008..05b1145cee81a4 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -734,6 +734,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *trap;
struct name_snapshot old_name;
+ if (IS_ERR(old_dir))
+ return old_dir;
+ if (IS_ERR(new_dir))
+ return new_dir;
+ if (IS_ERR_OR_NULL(old_dentry))
+ return old_dentry;
+
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index dcea1e37a1b734..f18619bc2e094c 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -290,6 +290,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
flush_workqueue(ls->ls_callback_wq);
}
+#define MAX_CB_QUEUE 25
+
void dlm_callback_resume(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
@@ -300,15 +302,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
if (!ls->ls_callback_wq)
return;
+more:
mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++;
+ if (count == MAX_CB_QUEUE)
+ break;
}
mutex_unlock(&ls->ls_cb_mutex);
if (count)
log_rinfo(ls, "dlm_callback_resume %d", count);
+ if (count == MAX_CB_QUEUE) {
+ count = 0;
+ cond_resched();
+ goto more;
+ }
}
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 35502d4046f573..3a7f401e943c18 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1210,6 +1210,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
if (rv < 0) {
log_error(ls, "create_lkb idr error %d", rv);
+ dlm_free_lkb(lkb);
return rv;
}
@@ -4177,6 +4178,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
(unsigned long long)lkb->lkb_recover_seq,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -4230,6 +4232,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
lkb->lkb_id, lkb->lkb_remid,
ms->m_header.h_nodeid, ms->m_lkid);
error = -ENOENT;
+ dlm_put_lkb(lkb);
goto fail;
}
@@ -5792,20 +5795,20 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua,
goto out;
}
}
-
- /* After ua is attached to lkb it will be freed by dlm_free_lkb().
- When DLM_IFL_USER is set, the dlm knows that this is a userspace
- lock and that lkb_astparam is the dlm_user_args structure. */
-
error = set_lock_args(mode, &ua->lksb, flags, namelen, timeout_cs,
fake_astfn, ua, fake_bastfn, &args);
- lkb->lkb_flags |= DLM_IFL_USER;
-
if (error) {
+ kfree(ua->lksb.sb_lvbptr);
+ ua->lksb.sb_lvbptr = NULL;
+ kfree(ua);
__put_lkb(ls, lkb);
goto out;
}
+ /* After ua is attached to lkb it will be freed by dlm_free_lkb().
+ When DLM_IFL_USER is set, the dlm knows that this is a userspace
+ lock and that lkb_astparam is the dlm_user_args structure. */
+ lkb->lkb_flags |= DLM_IFL_USER;
error = request_lock(ls, lkb, name, namelen, &args);
switch (error) {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index f3e72787e7f9db..30e4e01db35a3b 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -673,11 +673,11 @@ static int new_lockspace(const char *name, const char *cluster,
kfree(ls->ls_recover_buf);
out_lkbidr:
idr_destroy(&ls->ls_lkbidr);
+ out_rsbtbl:
for (i = 0; i < DLM_REMOVE_NAMES_MAX; i++) {
if (ls->ls_remove_names[i])
kfree(ls->ls_remove_names[i]);
}
- out_rsbtbl:
vfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index d72d52b9043337..280460fef06647 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -20,8 +20,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
+ /*
+ * We must skip inodes in unusual state. We may also skip
+ * inodes without pages but we deliberately won't in case
+ * we need to reschedule to avoid softlockups.
+ */
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
- (inode->i_mapping->nrpages == 0)) {
+ (inode->i_mapping->nrpages == 0 && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -29,6 +34,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
+ cond_resched();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
diff --git a/fs/esdfs/dentry.c b/fs/esdfs/dentry.c
index 2a88429919aacb..ea3482842f4128 100644
--- a/fs/esdfs/dentry.c
+++ b/fs/esdfs/dentry.c
@@ -90,12 +90,6 @@ out:
return err;
}
-/* 1 = delete, 0 = cache */
-int esdfs_d_delete(const struct dentry *d)
-{
- return 0;
-}
-
/* directly from fs/fat/namei_vfat.c */
static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
{
@@ -166,7 +160,7 @@ static void esdfs_canonical_path(const struct path *path,
const struct dentry_operations esdfs_dops = {
.d_revalidate = esdfs_d_revalidate,
- .d_delete = esdfs_d_delete,
+ .d_delete = always_delete_dentry,
.d_hash = esdfs_d_hash,
.d_compare = esdfs_d_compare,
.d_release = esdfs_d_release,
diff --git a/fs/esdfs/derive.c b/fs/esdfs/derive.c
index ad21e62bc5f966..7173e9c2ca7096 100644
--- a/fs/esdfs/derive.c
+++ b/fs/esdfs/derive.c
@@ -75,12 +75,14 @@ void esdfs_derive_perms(struct dentry *dentry)
bool is_root;
int ret;
kuid_t appid;
+ struct qstr q_Download = QSTR_LITERAL("Download");
struct qstr q_Android = QSTR_LITERAL("Android");
struct qstr q_data = QSTR_LITERAL("data");
struct qstr q_obb = QSTR_LITERAL("obb");
struct qstr q_media = QSTR_LITERAL("media");
struct qstr q_cache = QSTR_LITERAL("cache");
struct qstr q_user = QSTR_LITERAL("user");
+ struct esdfs_inode_info *parent_i = ESDFS_I(dentry->d_parent->d_inode);
spin_lock(&dentry->d_lock);
is_root = IS_ROOT(dentry);
@@ -89,9 +91,10 @@ void esdfs_derive_perms(struct dentry *dentry)
return;
/* Inherit from the parent to start */
- inode_i->tree = ESDFS_I(dentry->d_parent->d_inode)->tree;
- inode_i->userid = ESDFS_I(dentry->d_parent->d_inode)->userid;
- inode_i->appid = ESDFS_I(dentry->d_parent->d_inode)->appid;
+ inode_i->tree = parent_i->tree;
+ inode_i->userid = parent_i->userid;
+ inode_i->appid = parent_i->appid;
+ inode_i->under_obb = parent_i->under_obb;
/*
* ESDFS_TREE_MEDIA* are intentionally dead ends.
@@ -106,20 +109,24 @@ void esdfs_derive_perms(struct dentry *dentry)
case ESDFS_TREE_ROOT:
inode_i->tree = ESDFS_TREE_MEDIA;
- if (qstr_case_eq(&dentry->d_name, &q_Android))
+ if (qstr_case_eq(&dentry->d_name, &q_Download))
+ inode_i->tree = ESDFS_TREE_DOWNLOAD;
+ else if (qstr_case_eq(&dentry->d_name, &q_Android))
inode_i->tree = ESDFS_TREE_ANDROID;
break;
case ESDFS_TREE_ANDROID:
- if (qstr_case_eq(&dentry->d_name, &q_data))
+ if (qstr_case_eq(&dentry->d_name, &q_data)) {
inode_i->tree = ESDFS_TREE_ANDROID_DATA;
- else if (qstr_case_eq(&dentry->d_name, &q_obb))
+ } else if (qstr_case_eq(&dentry->d_name, &q_obb)) {
inode_i->tree = ESDFS_TREE_ANDROID_OBB;
- else if (qstr_case_eq(&dentry->d_name, &q_media))
+ inode_i->under_obb = true;
+ } else if (qstr_case_eq(&dentry->d_name, &q_media)) {
inode_i->tree = ESDFS_TREE_ANDROID_MEDIA;
- else if (ESDFS_RESTRICT_PERMS(ESDFS_SB(dentry->d_sb)) &&
- qstr_case_eq(&dentry->d_name, &q_user))
+ } else if (ESDFS_RESTRICT_PERMS(ESDFS_SB(dentry->d_sb)) &&
+ qstr_case_eq(&dentry->d_name, &q_user)) {
inode_i->tree = ESDFS_TREE_ANDROID_USER;
+ }
break;
case ESDFS_TREE_ANDROID_DATA:
@@ -194,6 +201,7 @@ void esdfs_set_derived_perms(struct inode *inode)
}
break;
+ case ESDFS_TREE_DOWNLOAD:
case ESDFS_TREE_ANDROID:
case ESDFS_TREE_ANDROID_DATA:
case ESDFS_TREE_ANDROID_OBB:
@@ -237,7 +245,7 @@ static int lookup_link_source(struct dentry *dentry, struct dentry *parent)
esdfs_get_lower_path(parent, &lower_parent_path);
- /* Check if the stub user profile obb is there. */
+ /* Check if the stub user profile folder is there. */
err = esdfs_lookup_nocase(&lower_parent_path, &dentry->d_name,
&lower_path);
/* Remember it to handle renames and removal. */
@@ -249,6 +257,27 @@ static int lookup_link_source(struct dentry *dentry, struct dentry *parent)
return err;
}
+int esdfs_is_dl_lookup(struct dentry *dentry, struct dentry *parent)
+{
+ struct esdfs_sb_info *sbi = ESDFS_SB(parent->d_sb);
+ struct esdfs_inode_info *parent_i = ESDFS_I(parent->d_inode);
+ /*
+ * Return 1 if this is the Download directory:
+ * The test for download checks:
+ * 1. The parent is the mount root.
+ * 2. The directory is named 'Download'.
+ * 3. The stub for the directory exists.
+ */
+ if (test_opt(sbi, SPECIAL_DOWNLOAD) &&
+ parent_i->tree == ESDFS_TREE_ROOT &&
+ ESDFS_DENTRY_NEEDS_DL_LINK(dentry) &&
+ lookup_link_source(dentry, parent) == 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
int esdfs_derived_lookup(struct dentry *dentry, struct dentry **parent)
{
struct esdfs_sb_info *sbi = ESDFS_SB((*parent)->d_sb);
@@ -283,6 +312,7 @@ int esdfs_derived_lookup(struct dentry *dentry, struct dentry **parent)
if (ESDFS_INODE_CAN_LINK((*parent)->d_inode))
*parent = dget(sbi->obb_parent);
}
+
return 0;
}
@@ -297,7 +327,10 @@ int esdfs_derived_revalidate(struct dentry *dentry, struct dentry *parent)
ESDFS_DENTRY_NEEDS_LINK(dentry) &&
!ESDFS_DENTRY_IS_LINKED(dentry))
return -ESTALE;
-
+ if (ESDFS_I(parent->d_inode)->tree == ESDFS_TREE_ROOT &&
+ ESDFS_DENTRY_NEEDS_DL_LINK(dentry) &&
+ !ESDFS_DENTRY_IS_LINKED(dentry))
+ return -ESTALE;
return 0;
}
@@ -334,6 +367,7 @@ int esdfs_check_derived_permission(struct inode *inode, int mask)
*/
if ((!test_opt(ESDFS_SB(inode->i_sb), DERIVE_UNIFIED) ||
(ESDFS_I(inode)->tree != ESDFS_TREE_ANDROID &&
+ ESDFS_I(inode)->tree != ESDFS_TREE_DOWNLOAD &&
ESDFS_I(inode)->tree != ESDFS_TREE_ANDROID_DATA &&
ESDFS_I(inode)->tree != ESDFS_TREE_ANDROID_OBB &&
ESDFS_I(inode)->tree != ESDFS_TREE_ANDROID_MEDIA &&
@@ -367,7 +401,14 @@ static kuid_t esdfs_get_derived_lower_uid(struct esdfs_sb_info *sbi,
int perm;
perm = info->tree;
+ if (info->under_obb)
+ perm = ESDFS_TREE_ANDROID_OBB;
+
switch (perm) {
+ case ESDFS_TREE_DOWNLOAD:
+ if (test_opt(sbi, SPECIAL_DOWNLOAD))
+ return make_kuid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_uid);
case ESDFS_TREE_ROOT:
case ESDFS_TREE_MEDIA:
case ESDFS_TREE_ANDROID:
@@ -396,7 +437,14 @@ static kgid_t esdfs_get_derived_lower_gid(struct esdfs_sb_info *sbi,
upper_uid = esdfs_i_uid_read(&info->vfs_inode);
perm = info->tree;
+ if (info->under_obb)
+ perm = ESDFS_TREE_ANDROID_OBB;
+
switch (perm) {
+ case ESDFS_TREE_DOWNLOAD:
+ if (test_opt(sbi, SPECIAL_DOWNLOAD))
+ return make_kgid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_gid);
case ESDFS_TREE_ROOT:
case ESDFS_TREE_MEDIA:
case ESDFS_TREE_ANDROID:
@@ -544,7 +592,7 @@ int esdfs_derive_mkdir_contents(struct dentry *dir_dentry)
/* Now create the lower file. */
mode = S_IFREG;
lower_parent_dentry = lock_parent(lower_dentry);
- esdfs_set_lower_mode(ESDFS_SB(dir_dentry->d_sb), &mode);
+ esdfs_set_lower_mode(ESDFS_SB(dir_dentry->d_sb), inode_i, &mode);
err = vfs_create(lower_dir_path.dentry->d_inode, lower_dentry, mode,
true);
unlock_dir(lower_parent_dentry);
diff --git a/fs/esdfs/esdfs.h b/fs/esdfs/esdfs.h
index 92950a0db26482..cec3b0aeb9ecc2 100644
--- a/fs/esdfs/esdfs.h
+++ b/fs/esdfs/esdfs.h
@@ -55,6 +55,7 @@
#define ESDFS_MOUNT_ACCESS_DISABLE 0x00000020
#define ESDFS_MOUNT_GID_DERIVATION 0x00000040
#define ESDFS_MOUNT_DEFAULT_NORMAL 0x00000080
+#define ESDFS_MOUNT_SPECIAL_DOWNLOAD 0x00000100
#define clear_opt(sbi, option) (sbi->options &= ~ESDFS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->options |= ESDFS_MOUNT_##option)
@@ -85,6 +86,7 @@ enum {
ESDFS_TREE_ROOT_LEGACY, /* root for legacy emulated storage */
ESDFS_TREE_ROOT, /* root for a user */
ESDFS_TREE_MEDIA, /* per-user basic permissions */
+ ESDFS_TREE_DOWNLOAD, /* .../Download */
ESDFS_TREE_ANDROID, /* .../Android */
ESDFS_TREE_ANDROID_DATA, /* .../Android/data */
ESDFS_TREE_ANDROID_OBB, /* .../Android/obb */
@@ -99,7 +101,9 @@ enum {
ESDFS_PERMS_LOWER_DEFAULT = 0,
ESDFS_PERMS_UPPER_LEGACY,
ESDFS_PERMS_UPPER_DERIVED,
+ ESDFS_PERMS_LOWER_DOWNLOAD,
ESDFS_PERMS_TABLE_SIZE
+
};
#define PKG_NAME_MAX 128
@@ -143,13 +147,14 @@ extern int esdfs_init_package_list(void);
extern void esdfs_destroy_package_list(void);
extern void esdfs_derive_perms(struct dentry *dentry);
extern void esdfs_set_derived_perms(struct inode *inode);
+extern int esdfs_is_dl_lookup(struct dentry *dentry, struct dentry *parent);
extern int esdfs_derived_lookup(struct dentry *dentry, struct dentry **parent);
extern int esdfs_derived_revalidate(struct dentry *dentry,
struct dentry *parent);
extern int esdfs_check_derived_permission(struct inode *inode, int mask);
extern int esdfs_derive_mkdir_contents(struct dentry *dentry);
extern int esdfs_lookup_nocase(struct path *lower_parent_path,
- struct qstr *name, struct path *lower_path);
+ const struct qstr *name, struct path *lower_path);
/* file private data */
struct esdfs_file_info {
@@ -174,6 +179,7 @@ struct esdfs_inode_info {
int tree; /* storage tree location */
uint32_t userid; /* Android User ID (not Linux UID) */
uid_t appid; /* Linux UID for this app/user combo */
+ bool under_obb;
};
/* esdfs dentry data in memory */
@@ -188,11 +194,16 @@ struct esdfs_dentry_info {
struct esdfs_sb_info {
struct super_block *lower_sb;
struct super_block *s_sb;
- struct user_namespace base_ns;
+ struct user_namespace *base_ns;
struct list_head s_list;
struct esdfs_perms lower_perms;
- struct esdfs_perms upper_perms; /* root in derived mode */
- struct dentry *obb_parent; /* pinned dentry for obb link parent */
+ struct esdfs_perms upper_perms; /* root in derived mode */
+ struct dentry *obb_parent; /* pinned dentry for obb link parent */
+ struct path dl_path; /* path of lower downloads folder */
+ struct qstr dl_name; /* name of lower downloads folder */
+ const char *dl_loc; /* location of dl folder */
+ struct esdfs_perms lower_dl_perms; /* permissions for lower downloads folder */
+ struct user_namespace *dl_ns; /* lower downloads namespace */
int ns_fd;
unsigned int options;
};
@@ -205,11 +216,19 @@ void esdfs_truncate_share(struct super_block *, struct inode *, loff_t newsize);
void esdfs_derive_lower_ownership(struct dentry *dentry, const char *name);
-static inline bool is_obb(struct qstr *name) {
+static inline bool is_obb(struct qstr *name)
+{
struct qstr q_obb = QSTR_LITERAL("obb");
return qstr_case_eq(name, &q_obb);
}
+static inline bool is_dl(struct qstr *name)
+{
+ struct qstr q_dl = QSTR_LITERAL("Download");
+
+ return qstr_case_eq(name, &q_dl);
+}
+
#define ESDFS_INODE_IS_STALE(i) ((i)->version != esdfs_package_list_version)
#define ESDFS_INODE_CAN_LINK(i) (test_opt(ESDFS_SB((i)->i_sb), \
DERIVE_LEGACY) || \
@@ -217,6 +236,7 @@ static inline bool is_obb(struct qstr *name) {
DERIVE_UNIFIED) && \
ESDFS_I(i)->userid > 0))
#define ESDFS_DENTRY_NEEDS_LINK(d) (is_obb(&(d)->d_name))
+#define ESDFS_DENTRY_NEEDS_DL_LINK(d) (is_dl(&(d)->d_name))
#define ESDFS_DENTRY_IS_LINKED(d) (ESDFS_D(d)->real_parent)
#define ESDFS_DENTRY_HAS_STUB(d) (ESDFS_D(d)->lower_stub_path.dentry)
@@ -401,12 +421,18 @@ static inline void unlock_dir(struct dentry *dir)
}
static inline void esdfs_set_lower_mode(struct esdfs_sb_info *sbi,
- umode_t *mode)
+ struct esdfs_inode_info *inode_i, umode_t *mode)
{
+ struct esdfs_perms *perms = &sbi->lower_perms;
+
+ if (test_opt(sbi, SPECIAL_DOWNLOAD) &&
+ inode_i->tree == ESDFS_TREE_DOWNLOAD)
+ perms = &sbi->lower_dl_perms;
+
if (S_ISDIR(*mode))
- *mode = (*mode & S_IFMT) | sbi->lower_perms.dmask;
+ *mode = (*mode & S_IFMT) | perms->dmask;
else
- *mode = (*mode & S_IFMT) | sbi->lower_perms.fmask;
+ *mode = (*mode & S_IFMT) | perms->fmask;
}
static inline void esdfs_set_perms(struct inode *inode)
@@ -482,32 +508,32 @@ static inline void esdfs_copy_attr(struct inode *dest, const struct inode *src)
static inline uid_t esdfs_from_local_uid(struct esdfs_sb_info *sbi, uid_t uid)
{
- return from_kuid(&sbi->base_ns, make_kuid(current_user_ns(), uid));
+ return from_kuid(sbi->base_ns, make_kuid(current_user_ns(), uid));
}
static inline gid_t esdfs_from_local_gid(struct esdfs_sb_info *sbi, gid_t gid)
{
- return from_kgid(&sbi->base_ns, make_kgid(current_user_ns(), gid));
+ return from_kgid(sbi->base_ns, make_kgid(current_user_ns(), gid));
}
static inline uid_t esdfs_from_kuid(struct esdfs_sb_info *sbi, kuid_t uid)
{
- return from_kuid(&sbi->base_ns, uid);
+ return from_kuid(sbi->base_ns, uid);
}
static inline gid_t esdfs_from_kgid(struct esdfs_sb_info *sbi, kgid_t gid)
{
- return from_kgid(&sbi->base_ns, gid);
+ return from_kgid(sbi->base_ns, gid);
}
static inline kuid_t esdfs_make_kuid(struct esdfs_sb_info *sbi, uid_t uid)
{
- return make_kuid(&sbi->base_ns, uid);
+ return make_kuid(sbi->base_ns, uid);
}
static inline kgid_t esdfs_make_kgid(struct esdfs_sb_info *sbi, gid_t gid)
{
- return make_kgid(&sbi->base_ns, gid);
+ return make_kgid(sbi->base_ns, gid);
}
/* Helper functions to read and write to inode uid/gids without
@@ -544,6 +570,7 @@ static inline const struct cred *esdfs_override_creds(
{
struct cred *creds = prepare_creds();
uid_t uid;
+ gid_t gid = sbi->lower_perms.gid;
if (!creds)
return NULL;
@@ -554,16 +581,24 @@ static inline const struct cred *esdfs_override_creds(
*mask = xchg(&current->fs->umask, *mask & S_IRWXUGO);
}
- if (test_opt(sbi, GID_DERIVATION)) {
- if (info->tree == ESDFS_TREE_ANDROID_OBB)
- uid = AID_MEDIA_OBB;
- else
- uid = derive_uid(info, sbi->lower_perms.uid);
+ if (test_opt(sbi, SPECIAL_DOWNLOAD) &&
+ info->tree == ESDFS_TREE_DOWNLOAD) {
+ creds->fsuid = make_kuid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_uid);
+ creds->fsgid = make_kgid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_gid);
} else {
- uid = sbi->lower_perms.uid;
+ if (test_opt(sbi, GID_DERIVATION)) {
+ if (info->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = derive_uid(info, sbi->lower_perms.uid);
+ } else {
+ uid = sbi->lower_perms.uid;
+ }
+ creds->fsuid = esdfs_make_kuid(sbi, uid);
+ creds->fsgid = esdfs_make_kgid(sbi, gid);
}
- creds->fsuid = esdfs_make_kuid(sbi, uid);
- creds->fsgid = esdfs_make_kgid(sbi, sbi->lower_perms.gid);
/* this installs the new creds into current, which we must destroy */
return override_creds(creds);
diff --git a/fs/esdfs/inode.c b/fs/esdfs/inode.c
index 9e0825b89b7b26..172178f63b41dd 100644
--- a/fs/esdfs/inode.c
+++ b/fs/esdfs/inode.c
@@ -44,7 +44,7 @@ static int esdfs_create(struct inode *dir, struct dentry *dentry,
lower_dentry = lower_path.dentry;
lower_parent_dentry = lock_parent(lower_dentry);
- esdfs_set_lower_mode(ESDFS_SB(dir->i_sb), &mode);
+ esdfs_set_lower_mode(ESDFS_SB(dir->i_sb), ESDFS_I(dir), &mode);
lower_inode = esdfs_lower_inode(dir);
err = vfs_create(lower_inode, lower_dentry, mode, want_excl);
@@ -151,8 +151,7 @@ static int esdfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
lower_parent_dentry = lock_parent(lower_dentry);
mode |= S_IFDIR;
- esdfs_set_lower_mode(ESDFS_SB(dir->i_sb), &mode);
-
+ esdfs_set_lower_mode(ESDFS_SB(dir->i_sb), ESDFS_I(dir), &mode);
err = vfs_mkdir(lower_parent_dentry->d_inode, lower_dentry, mode);
if (err)
goto out;
@@ -241,10 +240,19 @@ static int esdfs_rename(struct inode *old_dir, struct dentry *old_dentry,
int mask;
const struct cred *creds;
+ if (test_opt(sbi, SPECIAL_DOWNLOAD)) {
+ if ((ESDFS_I(old_dir)->tree == ESDFS_TREE_DOWNLOAD
+ || ESDFS_I(new_dir)->tree == ESDFS_TREE_DOWNLOAD)
+ && ESDFS_I(old_dir)->tree != ESDFS_I(new_dir)->tree)
+ return -EXDEV;
+ }
+
if (test_opt(sbi, GID_DERIVATION)) {
if (ESDFS_I(old_dir)->userid != ESDFS_I(new_dir)->userid
- || (ESDFS_I(old_dir)->tree == ESDFS_TREE_ANDROID_OBB
- && ESDFS_I(old_dir)->tree != ESDFS_I(new_dir)->tree))
+ || ((ESDFS_I(old_dir)->under_obb
+ || ESDFS_I(new_dir)->under_obb)
+ && ESDFS_I(old_dir)->under_obb
+ != ESDFS_I(new_dir)->under_obb))
return -EXDEV;
}
creds = esdfs_override_creds(sbi, ESDFS_I(new_dir), &mask);
diff --git a/fs/esdfs/lookup.c b/fs/esdfs/lookup.c
index 6321637531e444..7bb8a1e0b67cbc 100644
--- a/fs/esdfs/lookup.c
+++ b/fs/esdfs/lookup.c
@@ -36,7 +36,7 @@ static int esdfs_name_match(struct dir_context *ctx, const char *name, int namel
}
int esdfs_lookup_nocase(struct path *parent,
- struct qstr *name,
+ const struct qstr *name,
struct path *path) {
int err = 0;
/* Use vfs_path_lookup to check if the dentry exists or not */
@@ -179,6 +179,7 @@ struct inode *esdfs_iget(struct super_block *sb, struct inode *lower_inode,
info->tree = ESDFS_TREE_NONE;
info->userid = 0;
info->appid = 0;
+ info->under_obb = false;
inode->i_ino = lower_inode->i_ino;
esdfs_set_lower_inode(inode, lower_inode);
@@ -295,7 +296,7 @@ int esdfs_interpose(struct dentry *dentry, struct super_block *sb,
static struct dentry *__esdfs_lookup(struct dentry *dentry,
unsigned int flags,
struct path *lower_parent_path,
- uint32_t id)
+ uint32_t id, bool use_dl)
{
int err = 0;
struct vfsmount *lower_dir_mnt;
@@ -312,14 +313,28 @@ static struct dentry *__esdfs_lookup(struct dentry *dentry,
if (IS_ROOT(dentry))
goto out;
- name = dentry->d_name.name;
+ if (use_dl)
+ name = ESDFS_SB(dentry->d_sb)->dl_name.name;
+ else
+ name = dentry->d_name.name;
+
+ dname.name = name;
+ dname.len = strlen(name);
/* now start the actual lookup procedure */
lower_dir_dentry = lower_parent_path->dentry;
lower_dir_mnt = lower_parent_path->mnt;
- err = esdfs_lookup_nocase(lower_parent_path,
- &dentry->d_name, &lower_path);
+ /* if the access is to the Download directory, redirect
+ * to lower path.
+ */
+ if (use_dl) {
+ pathcpy(&lower_path, &ESDFS_SB(dentry->d_sb)->dl_path);
+ path_get(&ESDFS_SB(dentry->d_sb)->dl_path);
+ } else {
+ err = esdfs_lookup_nocase(lower_parent_path, &dname,
+ &lower_path);
+ }
/* no error: handle positive dentries */
if (!err) {
@@ -343,9 +358,6 @@ static struct dentry *__esdfs_lookup(struct dentry *dentry,
goto out;
/* instatiate a new negative dentry */
- dname.name = name;
- dname.len = strlen(name);
-
/* See if the low-level filesystem might want
* to use its own hash */
lower_dentry = d_hash_and_lookup(lower_dir_dentry, &dname);
@@ -385,11 +397,9 @@ struct dentry *esdfs_lookup(struct inode *dir, struct dentry *dentry,
int err;
struct dentry *ret, *real_parent, *parent;
struct path lower_parent_path, old_lower_parent_path;
- const struct cred *creds =
- esdfs_override_creds(ESDFS_SB(dir->i_sb),
- ESDFS_I(dir), NULL);
- if (!creds)
- return NULL;
+ const struct cred *creds;
+ struct esdfs_sb_info *sbi = ESDFS_SB(dir->i_sb);
+ int use_dl;
parent = real_parent = dget_parent(dentry);
@@ -400,7 +410,7 @@ struct dentry *esdfs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if (ESDFS_DERIVE_PERMS(ESDFS_SB(dir->i_sb))) {
+ if (ESDFS_DERIVE_PERMS(sbi)) {
err = esdfs_derived_lookup(dentry, &parent);
if (err) {
ret = ERR_PTR(err);
@@ -410,16 +420,33 @@ struct dentry *esdfs_lookup(struct inode *dir, struct dentry *dentry,
esdfs_get_lower_path(parent, &lower_parent_path);
+ creds = esdfs_override_creds(ESDFS_SB(dir->i_sb),
+ ESDFS_I(d_inode(parent)), NULL);
+ if (!creds) {
+ ret = ERR_PTR(-EINVAL);
+ goto out_put;
+ }
+
+ /* Check if the lookup corresponds to the Download directory */
+ use_dl = esdfs_is_dl_lookup(dentry, parent);
+
ret = __esdfs_lookup(dentry, flags, &lower_parent_path,
- ESDFS_I(dir)->userid);
+ ESDFS_I(dir)->userid,
+ use_dl);
if (IS_ERR(ret))
- goto out_put;
+ goto out_cred;
if (ret)
dentry = ret;
if (dentry->d_inode) {
fsstack_copy_attr_times(dentry->d_inode,
esdfs_lower_inode(dentry->d_inode));
- esdfs_derive_lower_ownership(dentry, dentry->d_name.name);
+ /*
+ * Do not modify the ownership of the lower directory if it
+ * is the Download directory
+ */
+ if (!use_dl)
+ esdfs_derive_lower_ownership(dentry,
+ dentry->d_name.name);
}
/* update parent directory's atime */
fsstack_copy_attr_atime(parent->d_inode,
@@ -435,13 +462,13 @@ struct dentry *esdfs_lookup(struct inode *dir, struct dentry *dentry,
esdfs_put_lower_path(real_parent, &old_lower_parent_path);
esdfs_derive_mkdir_contents(dentry);
}
+out_cred:
+ esdfs_revert_creds(creds, NULL);
out_put:
esdfs_put_lower_path(parent, &lower_parent_path);
out:
dput(parent);
if (parent != real_parent)
dput(real_parent);
-
- esdfs_revert_creds(creds, NULL);
return ret;
}
diff --git a/fs/esdfs/main.c b/fs/esdfs/main.c
index 324f7d016a7bea..9285a2b5e83245 100644
--- a/fs/esdfs/main.c
+++ b/fs/esdfs/main.c
@@ -32,6 +32,9 @@ enum {
Opt_noconfine,
Opt_gid_derivation,
Opt_default_normal,
+ Opt_dl_loc,
+ Opt_dl_uid,
+ Opt_dl_gid,
Opt_ns_fd,
/* From sdcardfs */
@@ -58,6 +61,9 @@ static match_table_t esdfs_tokens = {
{Opt_noconfine, "noconfine"},
{Opt_gid_derivation, "derive_gid"},
{Opt_default_normal, "default_normal"},
+ {Opt_dl_loc, "dl_loc=%s"},
+ {Opt_dl_uid, "dl_uid=%u"},
+ {Opt_dl_gid, "dl_gid=%u"},
{Opt_ns_fd, "ns_fd=%d"},
/* compatibility with sdcardfs options */
{Opt_fsuid, "fsuid=%u"},
@@ -92,6 +98,13 @@ struct esdfs_perms esdfs_perms_table[ESDFS_PERMS_TABLE_SIZE] = {
.gid = AID_SDCARD_R,
.fmask = 0660,
.dmask = 0771 },
+ /* ESDFS_PERMS_LOWER_DOWNLOAD */
+ { .raw_uid = -1,
+ .raw_gid = -1,
+ .uid = -1,
+ .gid = -1,
+ .fmask = 0644,
+ .dmask = 0711 },
};
static int parse_perms(struct esdfs_perms *perms, char *args)
@@ -276,6 +289,22 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_default_normal:
set_opt(sbi, DEFAULT_NORMAL);
break;
+ case Opt_dl_loc:
+ set_opt(sbi, SPECIAL_DOWNLOAD);
+ sbi->dl_loc = match_strdup(args);
+ break;
+ case Opt_dl_uid:
+ set_opt(sbi, SPECIAL_DOWNLOAD);
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ sbi->lower_dl_perms.raw_uid = option;
+ break;
+ case Opt_dl_gid:
+ set_opt(sbi, SPECIAL_DOWNLOAD);
+ if (match_int(&args[0], &option))
+ return -EINVAL;
+ sbi->lower_dl_perms.raw_gid = option;
+ break;
case Opt_ns_fd:
if (match_int(&args[0], &option))
return -EINVAL;
@@ -323,7 +352,10 @@ static int esdfs_read_super(struct super_block *sb, const char *dev_name,
struct path lower_path;
struct esdfs_sb_info *sbi;
struct inode *inode;
+ struct dentry *lower_dl_dentry;
struct user_namespace *user_ns;
+ kuid_t dl_kuid = INVALID_UID;
+ kgid_t dl_kgid = INVALID_GID;
if (!dev_name) {
esdfs_msg(sb, KERN_ERR, "missing dev_name argument\n");
@@ -372,19 +404,26 @@ static int esdfs_read_super(struct super_block *sb, const char *dev_name,
&esdfs_perms_table[ESDFS_PERMS_UPPER_LEGACY],
sizeof(struct esdfs_perms));
+ memcpy(&sbi->lower_dl_perms,
+ &esdfs_perms_table[ESDFS_PERMS_LOWER_DOWNLOAD],
+ sizeof(struct esdfs_perms));
+
err = parse_options(sb, (char *)raw_data);
if (err)
goto out_free;
+ /* Initialize special namespace for lower Downloads directory */
+ sbi->dl_ns = get_user_ns(current_user_ns());
+
if (sbi->ns_fd == -1) {
- memcpy(&sbi->base_ns, current_user_ns(), sizeof(sbi->base_ns));
+ sbi->base_ns = get_user_ns(current_user_ns());
} else {
user_ns = get_ns_from_fd(sbi->ns_fd);
if (IS_ERR(user_ns)) {
err = PTR_ERR(user_ns);
goto out_free;
}
- memcpy(&sbi->base_ns, user_ns, sizeof(sbi->base_ns));
+ sbi->base_ns = get_user_ns(user_ns);
}
/* interpret all parameters in given namespace */
err = interpret_perms(sbi, &sbi->lower_perms);
@@ -397,6 +436,29 @@ static int esdfs_read_super(struct super_block *sb, const char *dev_name,
pr_err("esdfs: Invalid permissions for upper layer\n");
goto out_free;
}
+
+ /* Check if the downloads uid maps into a valid kuid from
+ * the namespace of the mounting process
+ */
+ if (sbi->lower_dl_perms.raw_uid != -1) {
+ dl_kuid = make_kuid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_uid);
+ if (!uid_valid(dl_kuid)) {
+ pr_err("esdfs: Invalid permissions for dl_uid");
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+ if (sbi->lower_dl_perms.raw_gid != -1) {
+ dl_kgid = make_kgid(sbi->dl_ns,
+ sbi->lower_dl_perms.raw_gid);
+ if (!gid_valid(dl_kgid)) {
+ pr_err("esdfs: Invalid permissions for dl_gid");
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
/* set the lower superblock field of upper superblock */
lower_sb = lower_path.dentry->d_sb;
atomic_inc(&lower_sb->s_active);
@@ -439,6 +501,50 @@ static int esdfs_read_super(struct super_block *sb, const char *dev_name,
if (err)
goto out_freeroot;
+ if (test_opt(sbi, SPECIAL_DOWNLOAD)) {
+ /* parse lower path */
+ err = kern_path(sbi->dl_loc, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+ &sbi->dl_path);
+ if (err) {
+ esdfs_msg(sb, KERN_ERR,
+ "error accessing download directory '%s'\n",
+ sbi->dl_loc);
+ goto out_freeroot;
+ }
+
+ lower_dl_dentry = sbi->dl_path.dentry;
+
+ if (!S_ISDIR(lower_dl_dentry->d_inode->i_mode)) {
+ err = -EINVAL;
+ esdfs_msg(sb, KERN_ERR,
+ "dl_loc must be a directory '%s'\n",
+ sbi->dl_loc);
+ goto out_dlput;
+ }
+
+ if (lower_dl_dentry->d_sb != lower_sb) {
+ esdfs_msg(sb, KERN_ERR,
+ "dl_loc must be in the same filesystem '%s'\n",
+ sbi->dl_loc);
+ goto out_dlput;
+ }
+
+ if (!uid_valid(dl_kuid)) {
+ dl_kuid = esdfs_make_kuid(sbi, sbi->lower_perms.uid);
+ sbi->lower_dl_perms.raw_uid = from_kuid(sbi->dl_ns,
+ dl_kuid);
+ }
+ if (!gid_valid(dl_kgid)) {
+ dl_kgid = esdfs_make_kgid(sbi, sbi->lower_perms.gid);
+ sbi->lower_dl_perms.raw_gid = from_kgid(sbi->dl_ns,
+ dl_kgid);
+ }
+ spin_lock(&lower_dl_dentry->d_lock);
+ sbi->dl_name.name = kstrndup(lower_dl_dentry->d_name.name,
+ lower_dl_dentry->d_name.len, GFP_ATOMIC);
+ sbi->dl_name.len = lower_dl_dentry->d_name.len;
+ spin_unlock(&lower_dl_dentry->d_lock);
+ }
/* if get here: cannot have error */
/* set the lower dentries for s_root */
@@ -503,6 +609,10 @@ static int esdfs_read_super(struct super_block *sb, const char *dev_name,
goto out;
+out_dlput:
+ path_put(&sbi->dl_path);
+ sbi->dl_path.dentry = NULL;
+ sbi->dl_path.mnt = NULL;
out_freeroot:
dput(sb->s_root);
sb->s_root = NULL;
@@ -510,6 +620,10 @@ out_sput:
/* drop refs we took earlier */
atomic_dec(&lower_sb->s_active);
out_free:
+ if (sbi->dl_ns)
+ put_user_ns(sbi->dl_ns);
+ if (sbi->base_ns)
+ put_user_ns(sbi->base_ns);
kfree(ESDFS_SB(sb));
sb->s_fs_info = NULL;
out_pput:
@@ -546,6 +660,12 @@ static void esdfs_kill_sb(struct super_block *sb)
{
if (sb->s_fs_info && ESDFS_SB(sb)->obb_parent)
dput(ESDFS_SB(sb)->obb_parent);
+ if (sb->s_fs_info && ESDFS_SB(sb)->dl_ns)
+ put_user_ns(ESDFS_SB(sb)->dl_ns);
+ if (sb->s_fs_info && ESDFS_SB(sb)->base_ns)
+ put_user_ns(ESDFS_SB(sb)->base_ns);
+ if (sb->s_fs_info)
+ path_put(&ESDFS_SB(sb)->dl_path);
kill_anon_super(sb);
}
diff --git a/fs/esdfs/super.c b/fs/esdfs/super.c
index c4f4f43f5839a1..305eac5a3f4d50 100644
--- a/fs/esdfs/super.c
+++ b/fs/esdfs/super.c
@@ -269,6 +269,11 @@ static int esdfs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",derive_gid");
if (test_opt(sbi, DEFAULT_NORMAL))
seq_puts(seq, ",default_normal");
+ if (test_opt(sbi, SPECIAL_DOWNLOAD)) {
+ seq_printf(seq, ",dl_loc=%s", sbi->dl_loc);
+ seq_printf(seq, ",dl_uid=%d", sbi->lower_dl_perms.raw_uid);
+ seq_printf(seq, ",dl_gid=%d", sbi->lower_dl_perms.raw_gid);
+ }
return 0;
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 1b08556776cebb..240d9ceb8d0c65 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1034,7 +1034,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
* semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
- if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
+ if (ep->ovflist != EP_UNACTIVE_PTR) {
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = ep->ovflist;
ep->ovflist = epi;
diff --git a/fs/exec.c b/fs/exec.c
index d285917dbcff85..d99e7670bc2947 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -193,6 +193,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
{
struct page *page;
int ret;
+ unsigned int gup_flags = FOLL_FORCE;
#ifdef CONFIG_STACK_GROWSUP
if (write) {
@@ -201,8 +202,12 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return NULL;
}
#endif
- ret = get_user_pages(current, bprm->mm, pos,
- 1, write, 1, &page, NULL);
+
+ if (write)
+ gup_flags |= FOLL_WRITE;
+
+ ret = get_user_pages(current, bprm->mm, pos, 1, gup_flags,
+ &page, NULL);
if (ret <= 0)
return NULL;
@@ -1099,15 +1104,14 @@ killed:
return -EAGAIN;
}
-char *get_task_comm(char *buf, struct task_struct *tsk)
+char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
- /* buf must be at least sizeof(tsk->comm) in size */
task_lock(tsk);
- strncpy(buf, tsk->comm, sizeof(tsk->comm));
+ strncpy(buf, tsk->comm, buf_size);
task_unlock(tsk);
return buf;
}
-EXPORT_SYMBOL_GPL(get_task_comm);
+EXPORT_SYMBOL_GPL(__get_task_comm);
/*
* These functions flushes out all traces of the currently running executable
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index b795c567b5e1df..360ba74e04e69b 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -100,6 +100,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts)
token = match_token(p, tokens, args);
switch (token) {
case Opt_name:
+ kfree(opts->dev_name);
opts->dev_name = match_strdup(&args[0]);
if (unlikely(!opts->dev_name)) {
EXOFS_ERR("Error allocating dev_name");
@@ -868,8 +869,10 @@ static struct dentry *exofs_mount(struct file_system_type *type,
int ret;
ret = parse_options(data, &opts);
- if (ret)
+ if (ret) {
+ kfree(opts.dev_name);
return ERR_PTR(ret);
+ }
if (!opts.dev_name)
opts.dev_name = dev_name;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index 714cd37a6ba30f..6599c6124552f7 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -76,7 +76,7 @@ static bool dentry_connected(struct dentry *dentry)
struct dentry *parent = dget_parent(dentry);
dput(dentry);
- if (IS_ROOT(dentry)) {
+ if (dentry == parent) {
dput(parent);
return false;
}
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 748d35afc90266..860024392969be 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -721,7 +721,8 @@ static loff_t ext2_max_size(int bits)
{
loff_t res = EXT2_NDIR_BLOCKS;
int meta_blocks;
- loff_t upper_limit;
+ unsigned int upper_limit;
+ unsigned int ppb = 1 << (bits-2);
/* This is calculated to be the largest file size for a
* dense, file such that the total number of
@@ -735,24 +736,34 @@ static loff_t ext2_max_size(int bits)
/* total blocks in file system block size */
upper_limit >>= (bits - 9);
+ /* Compute how many blocks we can address by block tree */
+ res += 1LL << (bits-2);
+ res += 1LL << (2*(bits-2));
+ res += 1LL << (3*(bits-2));
+ /* Does block tree limit file size? */
+ if (res < upper_limit)
+ goto check_lfs;
+ res = upper_limit;
+ /* How many metadata blocks are needed for addressing upper_limit? */
+ upper_limit -= EXT2_NDIR_BLOCKS;
/* indirect blocks */
meta_blocks = 1;
+ upper_limit -= ppb;
/* double indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2));
- /* tripple indirect blocks */
- meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
-
- upper_limit -= meta_blocks;
- upper_limit <<= bits;
-
- res += 1LL << (bits-2);
- res += 1LL << (2*(bits-2));
- res += 1LL << (3*(bits-2));
+ if (upper_limit < ppb * ppb) {
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb);
+ res -= meta_blocks;
+ goto check_lfs;
+ }
+ meta_blocks += 1 + ppb;
+ upper_limit -= ppb * ppb;
+ /* tripple indirect blocks for the rest */
+ meta_blocks += 1 + DIV_ROUND_UP(upper_limit, ppb) +
+ DIV_ROUND_UP(upper_limit, ppb*ppb);
+ res -= meta_blocks;
+check_lfs:
res <<= bits;
- if (res > upper_limit)
- res = upper_limit;
-
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index fa70848afa8f4c..22d817dc821e9b 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -605,9 +605,9 @@ skip_replace:
}
cleanup:
- brelse(bh);
if (!(bh && header == HDR(bh)))
kfree(header);
+ brelse(bh);
up_write(&EXT2_I(inode)->xattr_sem);
return error;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index c57a94f1c198c0..e0fb7cdcee8977 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -183,7 +183,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
unsigned int bit, bit_max;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t start, tmp;
- int flex_bg = 0;
struct ext4_group_info *grp;
J_ASSERT_BH(bh, buffer_locked(bh));
@@ -216,22 +215,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
start = ext4_group_first_block_no(sb, block_group);
- if (ext4_has_feature_flex_bg(sb))
- flex_bg = 1;
-
/* Set bits for block and inode bitmaps, and inode table */
tmp = ext4_block_bitmap(sb, gdp);
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_bitmap(sb, gdp);
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_table(sb, gdp);
for (; tmp < ext4_inode_table(sb, gdp) +
sbi->s_itb_per_group; tmp++) {
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
}
@@ -382,6 +378,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
+ if (buffer_verified(bh))
+ goto verified;
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
desc, bh))) {
ext4_unlock_group(sb, block_group);
@@ -404,6 +402,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
}
set_buffer_verified(bh);
+verified:
ext4_unlock_group(sb, block_group);
return 0;
}
@@ -454,7 +453,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
goto verify;
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Block bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 33f5e2a50cf883..e452f9a9f17447 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -74,7 +74,7 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
- error_msg = "directory entry across range";
+ error_msg = "directory entry overrun";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -83,18 +83,16 @@ int __ext4_check_dir_entry(const char *function, unsigned int line,
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index d0270d801b6649..02e1a39035fdc9 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1468,11 +1468,6 @@ static inline struct timespec ext4_current_time(struct inode *inode)
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
- ino == EXT4_USR_QUOTA_INO ||
- ino == EXT4_GRP_QUOTA_INO ||
- ino == EXT4_BOOT_LOADER_INO ||
- ino == EXT4_JOURNAL_INO ||
- ino == EXT4_RESIZE_INO ||
(ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
@@ -1775,7 +1770,6 @@ EXT4_FEATURE_INCOMPAT_FUNCS(encrypt, ENCRYPT)
EXT4_FEATURE_INCOMPAT_64BIT| \
EXT4_FEATURE_INCOMPAT_FLEX_BG| \
EXT4_FEATURE_INCOMPAT_MMP | \
- EXT4_FEATURE_INCOMPAT_INLINE_DATA | \
EXT4_FEATURE_INCOMPAT_ENCRYPT | \
EXT4_FEATURE_INCOMPAT_CSUM_SEED)
#define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
@@ -3045,9 +3039,6 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed);
extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 3c938154709478..2d8e737935121b 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -103,6 +103,7 @@ struct ext4_extent_header {
};
#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
(sizeof(struct ext4_extent_header) + \
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index f817ed58f5ad44..b40e75dbf48c3c 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -372,7 +372,7 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle,
{
struct ext4_inode_info *ei = EXT4_I(inode);
- if (ext4_handle_valid(handle)) {
+ if (ext4_handle_valid(handle) && !is_handle_aborted(handle)) {
ei->i_sync_tid = handle->h_transaction->t_tid;
if (datasync)
ei->i_datasync_tid = handle->h_transaction->t_tid;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 4705c21f9d0311..1708597659a14e 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -876,6 +876,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
+ if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+ EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+ depth);
+ ret = -EFSCORRUPTED;
+ goto err;
+ }
if (path) {
ext4_ext_drop_refs(path);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0145db31739ee8..685573ad4763c3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -79,7 +79,7 @@ ext4_unaligned_aio(struct inode *inode, struct iov_iter *from, loff_t pos)
struct super_block *sb = inode->i_sb;
int blockmask = sb->s_blocksize - 1;
- if (pos >= i_size_read(inode))
+ if (pos >= ALIGN(i_size_read(inode), sb->s_blocksize))
return 0;
if ((pos | iov_iter_alignment(from)) & blockmask)
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9fe55b7d4c2c73..0963213e9cd362 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -88,6 +88,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
+ if (buffer_verified(bh))
+ goto verified;
blk = ext4_inode_bitmap(sb, desc);
if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -105,6 +107,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSBADCRC;
}
set_buffer_verified(bh);
+verified:
ext4_unlock_group(sb, block_group);
return 0;
}
@@ -152,7 +155,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Inode bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
@@ -919,7 +931,8 @@ got:
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
@@ -1295,7 +1308,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp)) <
+ EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 355ef9c36c878e..8f3e78eb0bbd2f 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -1491,10 +1491,14 @@ end_range:
partial->p + 1,
partial2->p,
(chain+n-1) - partial);
- BUFFER_TRACE(partial->bh, "call brelse");
- brelse(partial->bh);
- BUFFER_TRACE(partial2->bh, "call brelse");
- brelse(partial2->bh);
+ while (partial > chain) {
+ BUFFER_TRACE(partial->bh, "call brelse");
+ brelse(partial->bh);
+ }
+ while (partial2 > chain2) {
+ BUFFER_TRACE(partial2->bh, "call brelse");
+ brelse(partial2->bh);
+ }
return 0;
}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3006b81c107fd4..0dcd33f626376a 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -434,6 +434,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
memset((void *)ext4_raw_inode(&is.iloc)->i_block,
0, EXT4_MIN_INLINE_DATA_SIZE);
+ memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
if (ext4_has_feature_extents(inode->i_sb)) {
if (S_ISDIR(inode->i_mode) ||
@@ -677,6 +678,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
goto convert;
}
+ ret = ext4_journal_get_write_access(handle, iloc.bh);
+ if (ret)
+ goto out;
+
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -696,8 +701,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
- if (ret < 0)
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
goto out_up_read;
+ }
}
ret = 1;
@@ -705,7 +713,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
out_up_read:
up_read(&EXT4_I(inode)->xattr_sem);
out:
- if (handle)
+ if (handle && (ret != 1))
ext4_journal_stop(handle);
brelse(iloc.bh);
return ret;
@@ -747,6 +755,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
+ mark_inode_dirty(inode);
out:
return copied;
}
@@ -853,7 +862,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
- int retries;
+ int retries = 0;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -882,18 +891,17 @@ retry_journal:
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
+ ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
- ext4_journal_stop(handle);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
goto out;
}
-
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
ret = -ENOMEM;
@@ -911,6 +919,9 @@ retry_journal:
if (ret < 0)
goto out_release_page;
}
+ ret = ext4_journal_get_write_access(handle, iloc.bh);
+ if (ret)
+ goto out_release_page;
up_read(&EXT4_I(inode)->xattr_sem);
*pagep = page;
@@ -931,7 +942,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
unsigned len, unsigned copied,
struct page *page)
{
- int i_size_changed = 0;
int ret;
ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -949,10 +959,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
- if (pos+copied > inode->i_size) {
+ if (pos+copied > inode->i_size)
i_size_write(inode, pos+copied);
- i_size_changed = 1;
- }
unlock_page(page);
page_cache_release(page);
@@ -962,8 +970,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed)
- mark_inode_dirty(inode);
+ mark_inode_dirty(inode);
return copied;
}
@@ -1752,6 +1759,7 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
{
int err, inline_size;
struct ext4_iloc iloc;
+ size_t inline_len;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
@@ -1779,8 +1787,9 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
goto out;
}
+ inline_len = ext4_get_inline_size(dir);
offset = EXT4_INLINE_DOTDOT_SIZE;
- while (offset < dir->i_size) {
+ while (offset < inline_len) {
de = ext4_get_inline_entry(dir, &iloc, offset,
&inline_pos, &inline_size);
if (ext4_check_dir_entry(dir, NULL, de,
@@ -1852,51 +1861,15 @@ int ext4_inline_data_fiemap(struct inode *inode,
physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
physical += offsetof(struct ext4_inode, i_block);
- if (physical)
- error = fiemap_fill_next_extent(fieinfo, start, physical,
- inline_len, flags);
brelse(iloc.bh);
out:
up_read(&EXT4_I(inode)->xattr_sem);
+ if (physical)
+ error = fiemap_fill_next_extent(fieinfo, start, physical,
+ inline_len, flags);
return (error < 0 ? error : 0);
}
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed)
-{
- int error;
- struct ext4_xattr_entry *entry;
- struct ext4_inode *raw_inode;
- struct ext4_iloc iloc;
-
- error = ext4_get_inode_loc(inode, &iloc);
- if (error)
- return error;
-
- raw_inode = ext4_raw_inode(&iloc);
- entry = (struct ext4_xattr_entry *)((void *)raw_inode +
- EXT4_I(inode)->i_inline_off);
- if (EXT4_XATTR_LEN(entry->e_name_len) +
- EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
- error = -ENOSPC;
- goto out;
- }
-
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
- brelse(iloc.bh);
- return error;
-}
-
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ecaa983bf24a4b..b280dfd344700e 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -380,9 +380,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
- "lblock %lu mapped to illegal pblock "
+ "lblock %lu mapped to illegal pblock %llu "
"(length %d)", (unsigned long) map->m_lblk,
- map->m_len);
+ map->m_pblk, map->m_len);
return -EFSCORRUPTED;
}
return 0;
@@ -1164,9 +1164,10 @@ static int ext4_write_end(struct file *file,
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
+ int inline_data = ext4_has_inline_data(inode);
trace_ext4_write_end(inode, pos, len, copied);
- if (ext4_has_inline_data(inode)) {
+ if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@@ -1194,7 +1195,7 @@ static int ext4_write_end(struct file *file,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed)
+ if (i_size_changed || inline_data)
ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1268,6 +1269,7 @@ static int ext4_journalled_write_end(struct file *file,
int partial = 0;
unsigned from, to;
int size_changed = 0;
+ int inline_data = ext4_has_inline_data(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_CACHE_SIZE - 1);
@@ -1275,7 +1277,7 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
- if (ext4_has_inline_data(inode)) {
+ if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@@ -1306,7 +1308,7 @@ static int ext4_journalled_write_end(struct file *file,
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
- if (size_changed) {
+ if (size_changed || inline_data) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
@@ -1804,11 +1806,7 @@ static int __ext4_journalled_writepage(struct page *page,
}
if (inline_data) {
- BUFFER_TRACE(inode_bh, "get write access");
- ret = ext4_journal_get_write_access(handle, inode_bh);
-
- err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+ ret = ext4_mark_inode_dirty(handle, inode);
} else {
ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);
@@ -3788,28 +3786,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
- /* If there are no blocks to remove, return now */
- if (first_block >= stop_block)
- goto out_stop;
+ /* If there are blocks to remove, do it */
+ if (stop_block > first_block) {
- down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
- ret = ext4_es_remove_extent(inode, first_block,
- stop_block - first_block);
- if (ret) {
- up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
- }
+ ret = ext4_es_remove_extent(inode, first_block,
+ stop_block - first_block);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto out_stop;
+ }
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_remove_space(inode, first_block,
- stop_block - 1);
- else
- ret = ext4_ind_remove_space(handle, inode, first_block,
- stop_block);
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ ret = ext4_ext_remove_space(inode, first_block,
+ stop_block - 1);
+ else
+ ret = ext4_ind_remove_space(handle, inode, first_block,
+ stop_block);
- up_write(&EXT4_I(inode)->i_data_sem);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
if (IS_SYNC(inode))
ext4_handle_sync(handle);
@@ -3992,7 +3990,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
int inodes_per_block, inode_offset;
iloc->bh = NULL;
- if (!ext4_valid_inum(sb, inode->i_ino))
+ if (inode->i_ino < EXT4_ROOT_INO ||
+ inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 481d6021f4578d..27d80e549c9ed2 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -26,6 +26,7 @@
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/backing-dev.h>
#include <trace/events/ext4.h>
@@ -2144,7 +2145,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
* This should tell if fe_len is exactly power of 2
*/
if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
- ac->ac_2order = i - 1;
+ ac->ac_2order = array_index_nospec(i - 1,
+ sb->s_blocksize_bits + 2);
}
/* if stream allocation is enabled, use global goal */
@@ -2445,7 +2447,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
* initialize bb_free to be able to skip
* empty groups without initialization
*/
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
meta_group_info[i]->bb_free =
ext4_free_clusters_after_init(sb, group, desc);
} else {
@@ -2967,7 +2970,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
#endif
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
ac->ac_b_ex.fe_len);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb,
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 0a512aa81bf750..4c9d799955d136 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -48,7 +48,6 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
*/
sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 05048fcfd602cd..6b5e2eddd8d703 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -526,9 +526,13 @@ mext_check_arguments(struct inode *orig_inode,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
- if (orig_eof < orig_start + *len - 1)
+ if (orig_eof <= orig_start)
+ *len = 0;
+ else if (orig_eof < orig_start + *len - 1)
*len = orig_eof - orig_start;
- if (donor_eof < donor_start + *len - 1)
+ if (donor_eof <= donor_start)
+ *len = 0;
+ else if (donor_eof < donor_start + *len - 1)
*len = donor_eof - donor_start;
if (!*len) {
ext4_debug("ext4 move extent: len should not be 0 "
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 8dedb23016c838..d9c53b9a051af3 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -124,6 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
if (!is_dx_block && type == INDEX) {
ext4_error_inode(inode, func, line, block,
"directory leaf block found instead of index block");
+ brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
if (!ext4_has_metadata_csum(inode->i_sb) ||
@@ -1401,6 +1402,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
goto cleanup_and_exit;
dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
"falling back\n"));
+ ret = NULL;
}
nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb);
if (!nblocks) {
@@ -2833,7 +2835,9 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
list_del_init(&EXT4_I(inode)->i_orphan);
mutex_unlock(&sbi->s_orphan_lock);
}
- }
+ } else
+ brelse(iloc.bh);
+
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 1f5f4c0e2fcac6..adc4733f27d10c 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -18,6 +18,7 @@
int ext4_resize_begin(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int ret = 0;
if (!capable(CAP_SYS_RESOURCE))
@@ -28,7 +29,7 @@ int ext4_resize_begin(struct super_block *sb)
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -454,16 +455,18 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
- if (err)
+ if (err) {
+ brelse(bh);
return err;
+ }
ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
first_cluster, first_cluster - start, count2);
ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (unlikely(err))
return err;
- brelse(bh);
}
return 0;
@@ -600,7 +603,6 @@ handle_bb:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
@@ -613,9 +615,9 @@ handle_bb:
ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
handle_ib:
if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
@@ -630,18 +632,16 @@ handle_ib:
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
}
- bh = NULL;
/* Mark group tables in block bitmap */
for (j = 0; j < GROUP_TABLE_COUNT; j++) {
@@ -680,7 +680,6 @@ handle_ib:
}
out:
- brelse(bh);
err2 = ext4_journal_stop(handle);
if (err2 && !err)
err = err2;
@@ -868,6 +867,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
+ iloc.bh = NULL;
goto exit_inode;
}
brelse(dind);
@@ -919,6 +919,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
sizeof(struct buffer_head *),
GFP_NOFS);
if (!n_group_desc) {
+ brelse(gdb_bh);
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
@@ -934,8 +935,6 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
- if (unlikely(err))
- brelse(gdb_bh);
return err;
}
@@ -1119,8 +1118,10 @@ static void update_backups(struct super_block *sb, sector_t blk_off, char *data,
backup_block, backup_block -
ext4_group_first_block_no(sb, group));
BUFFER_TRACE(bh, "get_write_access");
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
@@ -1626,7 +1627,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
}
if (reserved_gdb || gdb_off == 0) {
- if (ext4_has_feature_resize_inode(sb) ||
+ if (!ext4_has_feature_resize_inode(sb) ||
!le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb,
"No reserved GDT blocks, can't resize");
@@ -1931,7 +1932,7 @@ retry:
return 0;
n_group = ext4_get_group_number(sb, n_blocks_count - 1);
- if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
+ if (n_group >= (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
ext4_warning(sb, "resize would cause inodes_count overflow");
return -EINVAL;
}
@@ -1955,7 +1956,8 @@ retry:
le16_to_cpu(es->s_reserved_gdt_blocks);
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
n_blocks_count = (ext4_fsblk_t)n_group *
- EXT4_BLOCKS_PER_GROUP(sb);
+ EXT4_BLOCKS_PER_GROUP(sb) +
+ le32_to_cpu(es->s_first_data_block);
n_group--; /* set to last group number */
}
@@ -1982,6 +1984,26 @@ retry:
}
}
+ /*
+ * Make sure the last group has enough space so that it's
+ * guaranteed to have enough space for all metadata blocks
+ * that it might need to hold. (We might not need to store
+ * the inode table blocks in the last block group, but there
+ * will be cases where this might be needed.)
+ */
+ if ((ext4_group_first_block_no(sb, n_group) +
+ ext4_group_overhead_blocks(sb, n_group) + 2 +
+ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+ n_blocks_count = ext4_group_first_block_no(sb, n_group);
+ n_group--;
+ n_blocks_count_retry = 0;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ goto retry;
+ }
+
/* extend the last group */
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
@@ -1998,7 +2020,7 @@ retry:
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
if (err)
- return err;
+ goto out;
err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
if (err)
@@ -2034,6 +2056,10 @@ retry:
n_blocks_count_retry = 0;
free_flex_gd(flex_gd);
flex_gd = NULL;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
goto retry;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0e0438b5ddbe0c..6a7df72cb3da1c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1049,6 +1049,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
ext4_nfs_get_inode);
}
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL
+ };
+
+ trace_ext4_nfs_commit_metadata(inode);
+ return ext4_write_inode(inode, &wbc);
+}
+
/*
* Try to release metadata pages (indirect blocks, directories) which are
* mapped via the block device. Since these pages could have journal heads
@@ -1143,6 +1153,7 @@ static const struct export_operations ext4_export_ops = {
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
+ .commit_metadata = ext4_nfs_commit_metadata,
};
enum {
@@ -2102,6 +2113,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
+ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
@@ -2134,6 +2146,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
+ if (block_bitmap >= sb_block + 1 &&
+ block_bitmap <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Block bitmap for group %u overlaps "
+ "block group descriptors", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
+ }
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group "
@@ -2148,6 +2168,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
+ if (inode_bitmap >= sb_block + 1 &&
+ inode_bitmap <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode bitmap for group %u overlaps "
+ "block group descriptors", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
+ }
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group "
@@ -2162,6 +2190,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY))
return 0;
}
+ if (inode_table >= sb_block + 1 &&
+ inode_table <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode table for group %u overlaps "
+ "block group descriptors", i);
+ if (!(sb->s_flags & MS_RDONLY))
+ return 0;
+ }
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -2842,6 +2878,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
struct ext4_group_desc *gdp = NULL;
+ if (!ext4_has_group_desc_csum(sb))
+ return ngroups;
+
for (group = 0; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp)
@@ -3451,6 +3490,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
ext4_msg(sb, KERN_ERR,
@@ -3515,6 +3561,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+ ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+ sbi->s_first_ino);
+ goto failed_mount;
+ }
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
@@ -3591,13 +3642,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
- if (le32_to_cpu(es->s_log_cluster_size) >
- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Invalid log cluster size: %u",
- le32_to_cpu(es->s_log_cluster_size));
- goto failed_mount;
- }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
@@ -3618,10 +3662,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
} else {
if (clustersize != blocksize) {
- ext4_warning(sb, "fragment/cluster size (%d) != "
- "block size (%d)", clustersize,
- blocksize);
- clustersize = blocksize;
+ ext4_msg(sb, KERN_ERR,
+ "fragment/cluster size (%d) != "
+ "block size (%d)", clustersize, blocksize);
+ goto failed_mount;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
@@ -3675,6 +3719,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_blocks_count(es));
goto failed_mount;
}
+ if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
+ (sbi->s_cluster_ratio == 1)) {
+ ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
+ "block is 0 with a 1k block and cluster size");
+ goto failed_mount;
+ }
+
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -3691,6 +3742,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+ le32_to_cpu(es->s_inodes_count),
+ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+ ret = -EINVAL;
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
@@ -3723,13 +3782,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
+ sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
- sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
@@ -3976,11 +4035,13 @@ no_journal:
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
@@ -4388,6 +4449,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (!sbh || block_device_ejected(sb))
return error;
+
+ /*
+ * The superblock bh should be mapped, but it might not be if the
+ * device was hot-removed. Not much we can do but fail the I/O.
+ */
+ if (!buffer_mapped(sbh))
+ return error;
+
if (buffer_write_io_error(sbh)) {
/*
* Oh, dear. A previous attempt to write the
@@ -5126,9 +5195,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
- iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ iput(qf_inode);
return err;
}
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index c2ee23acf35947..ae9929d678d6f6 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -277,8 +277,12 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
case attr_pointer_ui:
if (!ptr)
return 0;
- return snprintf(buf, PAGE_SIZE, "%u\n",
- *((unsigned int *) ptr));
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ le32_to_cpup(ptr));
+ else
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ *((unsigned int *) ptr));
case attr_pointer_atomic:
if (!ptr)
return 0;
@@ -311,7 +315,10 @@ static ssize_t ext4_attr_store(struct kobject *kobj,
ret = kstrtoul(skip_spaces(buf), 0, &t);
if (ret)
return ret;
- *((unsigned int *) ptr) = t;
+ if (a->attr_ptr == ptr_ext4_super_block_offset)
+ *((__le32 *) ptr) = cpu_to_le32(t);
+ else
+ *((unsigned int *) ptr) = t;
return len;
case attr_inode_readahead:
return inode_readahead_blks_store(a, sbi, buf, len);
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index c7cad05aed27a3..53679716bacad0 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -197,6 +197,8 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
if ((void *)next >= end)
return -EFSCORRUPTED;
+ if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
+ return -EFSCORRUPTED;
e = next;
}
@@ -218,12 +220,12 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
{
int error;
- if (buffer_verified(bh))
- return 0;
-
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED;
+ if (buffer_verified(bh))
+ return 0;
+
if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -636,14 +638,20 @@ static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
}
static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+ struct inode *inode)
{
- struct ext4_xattr_entry *last;
+ struct ext4_xattr_entry *last, *next;
size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
/* Compute min_offs and last. */
last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ for (; !IS_LAST_ENTRY(last); last = next) {
+ next = EXT4_XATTR_NEXT(last);
+ if ((void *)next >= s->end) {
+ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+ return -EFSCORRUPTED;
+ }
if (!last->e_value_block && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
@@ -823,7 +831,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
ce = NULL;
}
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
ext4_xattr_rehash(header(s->base),
@@ -873,7 +881,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
@@ -1035,23 +1043,9 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
- if (error) {
- if (error == -ENOSPC &&
- ext4_has_inline_data(inode)) {
- error = ext4_try_to_evict_inline_data(handle, inode,
- EXT4_XATTR_LEN(strlen(i->name) +
- EXT4_XATTR_SIZE(i->value_len)));
- if (error)
- return error;
- error = ext4_xattr_ibody_find(inode, i, is);
- if (error)
- return error;
- error = ext4_xattr_set_entry(i, s);
- }
- if (error)
- return error;
- }
+ error = ext4_xattr_set_entry(i, s, inode);
+ if (error)
+ return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -1073,7 +1067,7 @@ static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error)
return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1167,6 +1161,8 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else if (error == -ENOSPC) {
if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+ brelse(bs.bh);
+ bs.bh = NULL;
error = ext4_xattr_block_find(inode, &i, &bs);
if (error)
goto cleanup;
@@ -1384,6 +1380,11 @@ retry:
/* Find the entry best suited to be pushed into EA block */
entry = NULL;
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ /* never move system.data out of the inode */
+ if ((last->e_name_len == 4) &&
+ (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+ !memcmp(last->e_name, "data", 4))
+ continue;
total_size =
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
EXT4_XATTR_LEN(last->e_name_len);
@@ -1489,6 +1490,8 @@ cleanup:
kfree(buffer);
if (is)
brelse(is->iloc.bh);
+ if (bs)
+ brelse(bs->bh);
kfree(is);
kfree(bs);
brelse(bh);
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index 83dcf7bfd7b8fa..f0ea9192534301 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -350,12 +350,14 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return PTR_ERR(p);
clone = f2fs_acl_clone(p, GFP_NOFS);
- if (!clone)
- goto no_mem;
+ if (!clone) {
+ ret = -ENOMEM;
+ goto release_acl;
+ }
ret = f2fs_acl_create_masq(clone, mode);
if (ret < 0)
- goto no_mem_clone;
+ goto release_clone;
if (ret == 0)
posix_acl_release(clone);
@@ -369,11 +371,11 @@ static int f2fs_acl_create(struct inode *dir, umode_t *mode,
return 0;
-no_mem_clone:
+release_clone:
posix_acl_release(clone);
-no_mem:
+release_acl:
posix_acl_release(p);
- return -ENOMEM;
+ return ret;
}
int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f661d80474be7a..4b2f609f376d3e 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -58,6 +58,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
.rw = READ_SYNC | REQ_META | REQ_PRIO,
.blk_addr = index,
.encrypted_page = NULL,
+ .is_meta = is_meta,
};
if (unlikely(!is_meta))
@@ -74,8 +75,10 @@ repeat:
fio.page = page;
if (f2fs_submit_page_bio(&fio)) {
- f2fs_put_page(page, 1);
- goto repeat;
+ memset(page_address(page), 0, PAGE_SIZE);
+ f2fs_stop_checkpoint(sbi);
+ f2fs_bug_on(sbi, 1);
+ return page;
}
lock_page(page);
@@ -106,7 +109,8 @@ struct page *get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
return __get_meta_page(sbi, index, false);
}
-bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
{
switch (type) {
case META_NAT:
@@ -126,8 +130,20 @@ bool is_valid_blkaddr(struct f2fs_sb_info *sbi, block_t blkaddr, int type)
return false;
break;
case META_POR:
+ case DATA_GENERIC:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
- blkaddr < MAIN_BLKADDR(sbi)))
+ blkaddr < MAIN_BLKADDR(sbi))) {
+ if (type == DATA_GENERIC) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "access invalid blkaddr:%u", blkaddr);
+ WARN_ON(1);
+ }
+ return false;
+ }
+ break;
+ case META_GENERIC:
+ if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+ blkaddr >= MAIN_BLKADDR(sbi)))
return false;
break;
default:
@@ -151,6 +167,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
.type = META,
.rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
.encrypted_page = NULL,
+ .is_meta = (type != META_POR),
};
if (unlikely(type == META_POR))
@@ -158,7 +175,7 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
for (; nrpages-- > 0; blkno++) {
- if (!is_valid_blkaddr(sbi, blkno, type))
+ if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
switch (type) {
@@ -601,54 +618,73 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk)
}
}
-static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
- block_t cp_addr, unsigned long long *version)
+static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
+ struct f2fs_checkpoint **cp_block, struct page **cp_page,
+ unsigned long long *version)
{
- struct page *cp_page_1, *cp_page_2 = NULL;
unsigned long blk_size = sbi->blocksize;
- struct f2fs_checkpoint *cp_block;
- unsigned long long cur_version = 0, pre_version = 0;
- size_t crc_offset;
+ size_t crc_offset = 0;
__u32 crc = 0;
- /* Read the 1st cp block in this CP pack */
- cp_page_1 = get_meta_page(sbi, cp_addr);
+ *cp_page = get_meta_page(sbi, cp_addr);
+ *cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
- /* get the version number */
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_1);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp1;
-
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
- goto invalid_cp1;
+ crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
+ if (crc_offset >= blk_size) {
+ f2fs_put_page(*cp_page, 1);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid crc_offset: %zu", crc_offset);
+ return -EINVAL;
+ }
- pre_version = cur_cp_version(cp_block);
+ crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block
+ + crc_offset)));
+ if (!f2fs_crc_valid(crc, *cp_block, crc_offset)) {
+ f2fs_put_page(*cp_page, 1);
+ f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
+ return -EINVAL;
+ }
- /* Read the 2nd cp block in this CP pack */
- cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
- cp_page_2 = get_meta_page(sbi, cp_addr);
+ *version = cur_cp_version(*cp_block);
+ return 0;
+}
- cp_block = (struct f2fs_checkpoint *)page_address(cp_page_2);
- crc_offset = le32_to_cpu(cp_block->checksum_offset);
- if (crc_offset >= blk_size)
- goto invalid_cp2;
+static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
+ block_t cp_addr, unsigned long long *version)
+{
+ struct page *cp_page_1 = NULL, *cp_page_2 = NULL;
+ struct f2fs_checkpoint *cp_block = NULL;
+ unsigned long long cur_version = 0, pre_version = 0;
+ int err;
- crc = le32_to_cpu(*((__le32 *)((unsigned char *)cp_block + crc_offset)));
- if (!f2fs_crc_valid(crc, cp_block, crc_offset))
- goto invalid_cp2;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_1, version);
+ if (err)
+ return NULL;
+
+ if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+ sbi->blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid cp_pack_total_block_count:%u",
+ le32_to_cpu(cp_block->cp_pack_total_block_count));
+ goto invalid_cp;
+ }
+ pre_version = *version;
- cur_version = cur_cp_version(cp_block);
+ cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
+ err = get_checkpoint_version(sbi, cp_addr, &cp_block,
+ &cp_page_2, version);
+ if (err)
+ goto invalid_cp;
+ cur_version = *version;
if (cur_version == pre_version) {
*version = cur_version;
f2fs_put_page(cp_page_2, 1);
return cp_page_1;
}
-invalid_cp2:
f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
f2fs_put_page(cp_page_1, 1);
return NULL;
}
@@ -696,6 +732,15 @@ int get_valid_checkpoint(struct f2fs_sb_info *sbi)
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
+ if (cur_page == cp1)
+ sbi->cur_cp_pack = 1;
+ else
+ sbi->cur_cp_pack = 2;
+
+ /* Sanity checking of checkpoint */
+ if (sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
if (cp_blks <= 1)
goto done;
@@ -717,6 +762,9 @@ done:
f2fs_put_page(cp2, 1);
return 0;
+free_fail_no_cp:
+ f2fs_put_page(cp1, 1);
+ f2fs_put_page(cp2, 1);
fail_no_cp:
kfree(sbi->ckpt);
return -EINVAL;
@@ -767,24 +815,6 @@ out:
f2fs_trace_pid(page);
}
-void add_dirty_dir_inode(struct inode *inode)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct inode_entry *new =
- f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
- int ret = 0;
-
- new->inode = inode;
- INIT_LIST_HEAD(&new->list);
-
- spin_lock(&sbi->dir_inode_lock);
- ret = __add_dirty_inode(inode, new);
- spin_unlock(&sbi->dir_inode_lock);
-
- if (ret)
- kmem_cache_free(inode_entry_slab, new);
-}
-
void remove_dirty_dir_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -807,12 +837,6 @@ void remove_dirty_dir_inode(struct inode *inode)
stat_dec_dirty_dir(sbi);
spin_unlock(&sbi->dir_inode_lock);
kmem_cache_free(inode_entry_slab, entry);
-
- /* Only from the recovery routine */
- if (is_inode_flag_set(F2FS_I(inode), FI_DELAY_IPUT)) {
- clear_inode_flag(F2FS_I(inode), FI_DELAY_IPUT);
- iput(inode);
- }
}
void sync_dirty_dir_inodes(struct f2fs_sb_info *sbi)
@@ -922,7 +946,6 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct f2fs_nm_info *nm_i = NM_I(sbi);
unsigned long orphan_num = sbi->im[ORPHAN_INO].ino_num;
nid_t last_nid = nm_i->next_scan_nid;
@@ -931,15 +954,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
__u32 crc32 = 0;
int i;
int cp_payload_blks = __cp_payload(sbi);
- block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
- bool invalidate = false;
-
- /*
- * This avoids to conduct wrong roll-forward operations and uses
- * metapages, so should be called prior to sync_meta_pages below.
- */
- if (discard_next_dnode(sbi, discard_blk))
- invalidate = true;
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
@@ -1016,6 +1030,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
set_ckpt_flags(ckpt, CP_FSCK_FLAG);
+ /* set this flag to activate crc|cp_ver for recovery */
+ set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG);
+
/* update SIT/NAT bitmap */
get_sit_bitmap(sbi, __bitmap_ptr(sbi, SIT_BITMAP));
get_nat_bitmap(sbi, __bitmap_ptr(sbi, NAT_BITMAP));
@@ -1025,7 +1042,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
le32_to_cpu(ckpt->checksum_offset)))
= cpu_to_le32(crc32);
- start_blk = __start_cp_addr(sbi);
+ start_blk = __start_cp_next_addr(sbi);
/* need to wait for end_io results */
wait_on_all_pages_writeback(sbi);
@@ -1073,14 +1090,6 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback(sbi);
- /*
- * invalidate meta page which is used temporarily for zeroing out
- * block at the end of warm node chain.
- */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi), discard_blk,
- discard_blk);
-
release_dirty_inode(sbi);
if (unlikely(f2fs_cp_error(sbi)))
@@ -1088,6 +1097,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
clear_prefree_segments(sbi, cpc);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
+ __set_cp_next_pack(sbi);
}
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f6ccb21f286bf4..2b0b671484bde0 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -147,6 +147,10 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct bio *bio;
struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
+ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+ return -EFAULT;
+
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
@@ -172,7 +176,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
- verify_block_addr(sbi, fio->blk_addr);
+ verify_block_addr(fio, fio->blk_addr);
down_write(&io->io_rwsem);
@@ -603,7 +607,13 @@ static int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
goto unlock_out;
}
- if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+ if (__is_valid_data_blkaddr(dn.data_blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, DATA_GENERIC)) {
+ err = -EFAULT;
+ goto sync_out;
+ }
+
+ if (!is_valid_data_blkaddr(sbi, dn.data_blkaddr)) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -866,6 +876,40 @@ out:
return ret;
}
+struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_crypto_ctx *ctx = NULL;
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct bio *bio;
+
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ return ERR_PTR(-EFAULT);
+
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ ctx = f2fs_get_crypto_ctx(inode);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ f2fs_release_crypto_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+
+ return bio;
+}
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -884,7 +928,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
@@ -941,6 +984,10 @@ got_it:
SetPageUptodate(page);
goto confused;
}
+
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC))
+ goto set_error_page;
} else {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
SetPageUptodate(page);
@@ -958,31 +1005,9 @@ submit_and_realloc:
bio = NULL;
}
if (bio == NULL) {
- struct f2fs_crypto_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = f2fs_get_crypto_ctx(inode);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- f2fs_release_crypto_ctx(ctx);
+ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio))
goto set_error_page;
- }
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -1077,11 +1102,17 @@ int do_write_data_page(struct f2fs_io_info *fio)
set_page_writeback(page);
+ if (__is_valid_data_blkaddr(fio->blk_addr) &&
+ !f2fs_is_valid_blkaddr(fio->sbi, fio->blk_addr,
+ DATA_GENERIC)) {
+ err = -EFAULT;
+ goto out_writepage;
+ }
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (unlikely(fio->blk_addr != NEW_ADDR &&
+ if (unlikely(is_valid_data_blkaddr(fio->sbi, fio->blk_addr) &&
!is_cold_data(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
@@ -1482,17 +1513,21 @@ put_next:
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_CACHE_SIZE);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .rw = READ_SYNC,
- .blk_addr = dn.data_blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
- if (err)
+ struct bio *bio;
+
+ bio = f2fs_grab_bio(inode, dn.data_blkaddr, 1);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
goto fail;
+ }
+
+ if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
+ bio_put(bio);
+ err = -EFAULT;
+ goto fail;
+ }
+
+ submit_bio(READ_SYNC, bio);
lock_page(page);
if (unlikely(!PageUptodate(page))) {
@@ -1503,13 +1538,6 @@ put_next:
f2fs_put_page(page, 1);
goto repeat;
}
-
- /* avoid symlink page */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- err = f2fs_decrypt_one(inode, page);
- if (err)
- goto fail;
- }
}
out_update:
SetPageUptodate(page);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 60972a559685b3..92a240616f520b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = {
[F2FS_FT_SYMLINK] = DT_LNK,
};
-#define S_SHIFT 12
static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = {
[S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = F2FS_FT_DIR,
@@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode)
de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT];
}
+unsigned char get_de_type(struct f2fs_dir_entry *de)
+{
+ if (de->file_type < F2FS_FT_MAX)
+ return f2fs_filetype_table[de->file_type];
+ return DT_UNKNOWN;
+}
+
static unsigned long dir_block_index(unsigned int level,
int dir_level, unsigned int idx)
{
@@ -519,11 +525,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d,
test_and_set_bit_le(bit_pos + i, (void *)d->bitmap);
}
-/*
- * Caller should grab and release a rwsem by calling f2fs_lock_op() and
- * f2fs_unlock_op().
- */
-int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name,
struct inode *inode, nid_t ino, umode_t mode)
{
unsigned int bit_pos;
@@ -536,28 +538,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
struct f2fs_dentry_block *dentry_blk = NULL;
struct f2fs_dentry_ptr d;
struct page *page = NULL;
- struct f2fs_filename fname;
- struct qstr new_name;
- int slots, err;
-
- err = f2fs_fname_setup_filename(dir, name, 0, &fname);
- if (err)
- return err;
-
- new_name.name = fname_name(&fname);
- new_name.len = fname_len(&fname);
-
- if (f2fs_has_inline_dentry(dir)) {
- err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
- if (!err || err != -EAGAIN)
- goto out;
- else
- err = 0;
- }
+ int slots, err = 0;
level = 0;
- slots = GET_DENTRY_SLOTS(new_name.len);
- dentry_hash = f2fs_dentry_hash(&new_name, NULL);
+ slots = GET_DENTRY_SLOTS(new_name->len);
+ dentry_hash = f2fs_dentry_hash(new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
@@ -566,10 +551,8 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name,
}
start:
- if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) {
- err = -ENOSPC;
- goto out;
- }
+ if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
+ return -ENOSPC;
/* Increase the depth, if required */
if (level == current_depth)
@@ -583,10 +566,8 @@ start:
for (block = bidx; block <= (bidx + nblock - 1); block++) {
dentry_page = get_new_data_page(dir, NULL, block, true);
- if (IS_ERR(dentry_page)) {
- err = PTR_ERR(dentry_page);
- goto out;
- }
+ if (IS_ERR(dentry_page))
+ return PTR_ERR(dentry_page);
dentry_blk = kmap(dentry_page);
bit_pos = room_for_filename(&dentry_blk->dentry_bitmap,
@@ -606,7 +587,7 @@ add_dentry:
if (inode) {
down_write(&F2FS_I(inode)->i_sem);
- page = init_inode_metadata(inode, dir, &new_name, NULL);
+ page = init_inode_metadata(inode, dir, new_name, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto fail;
@@ -616,7 +597,7 @@ add_dentry:
}
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1);
- f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos);
+ f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos);
set_page_dirty(dentry_page);
@@ -638,7 +619,34 @@ fail:
}
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
-out:
+
+ return err;
+}
+
+/*
+ * Caller should grab and release a rwsem by calling f2fs_lock_op() and
+ * f2fs_unlock_op().
+ */
+int __f2fs_add_link(struct inode *dir, const struct qstr *name,
+ struct inode *inode, nid_t ino, umode_t mode)
+{
+ struct f2fs_filename fname;
+ struct qstr new_name;
+ int err;
+
+ err = f2fs_fname_setup_filename(dir, name, 0, &fname);
+ if (err)
+ return err;
+
+ new_name.name = fname_name(&fname);
+ new_name.len = fname_len(&fname);
+
+ err = -EAGAIN;
+ if (f2fs_has_inline_dentry(dir))
+ err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode);
+ if (err == -EAGAIN)
+ err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode);
+
f2fs_fname_free_filename(&fname);
return err;
}
@@ -792,10 +800,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
break;
de = &d->dentry[bit_pos];
- if (de->file_type < F2FS_FT_MAX)
- d_type = f2fs_filetype_table[de->file_type];
- else
- d_type = DT_UNKNOWN;
+ d_type = get_de_type(de);
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 2871576fbca494..2bfce887dce2a9 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -135,7 +135,7 @@ struct cp_control {
};
/*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
*/
enum {
META_CP,
@@ -143,6 +143,8 @@ enum {
META_SIT,
META_SSA,
META_POR,
+ DATA_GENERIC,
+ META_GENERIC,
};
/* for the list of ino */
@@ -684,6 +686,7 @@ struct f2fs_io_info {
block_t blk_addr; /* block address to be written */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
+ bool is_meta; /* indicate borrow meta inode mapping or not */
};
#define is_read_io(rw) (((rw) & 1) == READ)
@@ -731,6 +734,7 @@ struct f2fs_sb_info {
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
+ int cur_cp_pack; /* remain current cp pack */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
@@ -1140,22 +1144,27 @@ static inline void *__bitmap_ptr(struct f2fs_sb_info *sbi, int flag)
static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
- block_t start_addr;
- struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = cur_cp_version(ckpt);
-
- start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
- /*
- * odd numbered checkpoint should at cp segment 0
- * and even segment must be at cp segment 1
- */
- if (!(ckpt_version & 1))
+ if (sbi->cur_cp_pack == 2)
start_addr += sbi->blocks_per_seg;
+ return start_addr;
+}
+
+static inline block_t __start_cp_next_addr(struct f2fs_sb_info *sbi)
+{
+ block_t start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
+ if (sbi->cur_cp_pack == 1)
+ start_addr += sbi->blocks_per_seg;
return start_addr;
}
+static inline void __set_cp_next_pack(struct f2fs_sb_info *sbi)
+{
+ sbi->cur_cp_pack = (sbi->cur_cp_pack == 1) ? 2 : 1;
+}
+
static inline block_t __start_sum_addr(struct f2fs_sb_info *sbi)
{
return le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
@@ -1402,7 +1411,6 @@ enum {
FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */
FI_UPDATE_DIR, /* should update inode block for consistency */
- FI_DELAY_IPUT, /* used for the recovery */
FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/
@@ -1641,6 +1649,39 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
(pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) / \
ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
+ (!is_read_io(fio->rw) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "invalid blkaddr: %u, type: %d, run fsck to fix.",
+ blkaddr, type);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
+{
+ if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+ return false;
+ return true;
+}
+
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ if (!__is_valid_data_blkaddr(blkaddr))
+ return false;
+ verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+ return true;
+}
+
/*
* file.c
*/
@@ -1677,7 +1718,7 @@ struct dentry *f2fs_get_parent(struct dentry *child);
*/
extern unsigned char f2fs_filetype_table[F2FS_FT_MAX];
void set_de_type(struct f2fs_dir_entry *, umode_t);
-
+unsigned char get_de_type(struct f2fs_dir_entry *);
struct f2fs_dir_entry *find_target_dentry(struct f2fs_filename *,
f2fs_hash_t, int *, struct f2fs_dentry_ptr *);
bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *,
@@ -1698,6 +1739,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *,
const struct qstr *, f2fs_hash_t , unsigned int);
+int f2fs_add_regular_entry(struct inode *, const struct qstr *,
+ struct inode *, nid_t, umode_t);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t,
umode_t);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *,
@@ -1718,6 +1761,7 @@ int f2fs_commit_super(struct f2fs_sb_info *, bool);
int f2fs_sync_fs(struct super_block *, int);
extern __printf(3, 4)
void f2fs_msg(struct super_block *, const char *, const char *, ...);
+int sanity_check_ckpt(struct f2fs_sb_info *sbi);
/*
* hash.c
@@ -1778,7 +1822,6 @@ bool is_checkpointed_data(struct f2fs_sb_info *, block_t);
void refresh_sit_entry(struct f2fs_sb_info *, block_t, block_t);
void clear_prefree_segments(struct f2fs_sb_info *, struct cp_control *);
void release_discard_addrs(struct f2fs_sb_info *);
-bool discard_next_dnode(struct f2fs_sb_info *, block_t);
int npages_for_summary_flush(struct f2fs_sb_info *, bool);
void allocate_new_segments(struct f2fs_sb_info *);
int f2fs_trim_fs(struct f2fs_sb_info *, struct fstrim_range *);
@@ -1810,7 +1853,8 @@ void destroy_segment_manager_caches(void);
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t);
-bool is_valid_blkaddr(struct f2fs_sb_info *, block_t, int);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
int ra_meta_pages(struct f2fs_sb_info *, block_t, int, int, bool);
void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
@@ -1825,7 +1869,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *);
void update_dirty_page(struct inode *, struct page *);
-void add_dirty_dir_inode(struct inode *);
void remove_dirty_dir_inode(struct inode *);
void sync_dirty_dir_inodes(struct f2fs_sb_info *);
void write_checkpoint(struct f2fs_sb_info *, struct cp_control *);
@@ -1864,7 +1907,7 @@ void build_gc_manager(struct f2fs_sb_info *);
/*
* recovery.c
*/
-int recover_fsync_data(struct f2fs_sb_info *);
+int recover_fsync_data(struct f2fs_sb_info *, bool);
bool space_for_roll_forward(struct f2fs_sb_info *);
/*
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 01eed94b01ea6a..bee3bc7a16ac38 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -200,6 +200,9 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_f2fs_sync_file_enter(inode);
+ if (S_ISDIR(inode->i_mode))
+ goto go_write;
+
/* if fdatasync is triggered, let's do in-place-update */
if (get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
set_inode_flag(fi, FI_NEED_IPU);
@@ -305,13 +308,13 @@ static pgoff_t __get_first_dirty_index(struct address_space *mapping,
return pgofs;
}
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
- int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+ pgoff_t dirty, pgoff_t pgofs, int whence)
{
switch (whence) {
case SEEK_DATA:
if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
- (blkaddr != NEW_ADDR && blkaddr != NULL_ADDR))
+ is_valid_data_blkaddr(sbi, blkaddr))
return true;
break;
case SEEK_HOLE:
@@ -374,7 +377,15 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
block_t blkaddr;
blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
- if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+ blkaddr, DATA_GENERIC)) {
+ f2fs_put_dnode(&dn);
+ goto fail;
+ }
+
+ if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+ pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
}
@@ -466,6 +477,11 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
dn->data_blkaddr = NULL_ADDR;
set_data_blkaddr(dn);
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ continue;
+
invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
clear_inode_flag(F2FS_I(dn->inode),
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index ad80f916b64d4a..00685a8b141898 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -127,6 +127,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
+ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(dn);
+ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
+ f2fs_msg(fio.sbi->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dn->inode->i_ino, dn->data_blkaddr);
+ return -EINVAL;
+ }
+
f2fs_wait_on_page_writeback(page, DATA);
if (PageUptodate(page))
@@ -367,7 +377,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
* NOTE: ipage is grabbed by caller, but if any error occurs, we should
* release ipage in this function.
*/
-static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
struct page *page;
@@ -386,6 +396,17 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
if (err)
goto out;
+ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
+ f2fs_put_dnode(&dn);
+ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
+ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
+ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
+ "run fsck to fix.",
+ __func__, dir->i_ino, dn.data_blkaddr);
+ err = -EINVAL;
+ goto out;
+ }
+
f2fs_wait_on_page_writeback(page, DATA);
zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
@@ -428,6 +449,98 @@ out:
return err;
}
+static int f2fs_add_inline_entries(struct inode *dir,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_dentry_ptr d;
+ unsigned long bit_pos = 0;
+ int err = 0;
+
+ make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2);
+
+ while (bit_pos < d.max) {
+ struct f2fs_dir_entry *de;
+ struct qstr new_name;
+ nid_t ino;
+ umode_t fake_mode;
+
+ if (!test_bit_le(bit_pos, d.bitmap)) {
+ bit_pos++;
+ continue;
+ }
+
+ de = &d.dentry[bit_pos];
+ new_name.name = d.filename[bit_pos];
+ new_name.len = de->name_len;
+
+ ino = le32_to_cpu(de->ino);
+ fake_mode = get_de_type(de) << S_SHIFT;
+
+ err = f2fs_add_regular_entry(dir, &new_name, NULL,
+ ino, fake_mode);
+ if (err)
+ goto punch_dentry_pages;
+
+ if (unlikely(!de->name_len))
+ d.max = -1;
+
+ bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
+ }
+ return 0;
+punch_dentry_pages:
+ truncate_inode_pages(&dir->i_data, 0);
+ truncate_blocks(dir, 0, false);
+ remove_dirty_dir_inode(dir);
+ return err;
+}
+
+static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ struct f2fs_inline_dentry *backup_dentry;
+ int err;
+
+ backup_dentry = kmalloc(sizeof(struct f2fs_inline_dentry),
+ GFP_F2FS_ZERO);
+ if (!backup_dentry)
+ return -ENOMEM;
+
+ memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA);
+ truncate_inline_inode(ipage, 0);
+
+ unlock_page(ipage);
+
+ err = f2fs_add_inline_entries(dir, backup_dentry);
+ if (err)
+ goto recover;
+
+ lock_page(ipage);
+
+ stat_dec_inline_dir(dir);
+ clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
+ update_inode(dir, ipage);
+ kfree(backup_dentry);
+ return 0;
+recover:
+ lock_page(ipage);
+ memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
+ i_size_write(dir, MAX_INLINE_DATA);
+ update_inode(dir, ipage);
+ f2fs_put_page(ipage, 1);
+
+ kfree(backup_dentry);
+ return err;
+}
+
+static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
+ struct f2fs_inline_dentry *inline_dentry)
+{
+ if (!F2FS_I(dir)->i_dir_level)
+ return f2fs_move_inline_dirents(dir, ipage, inline_dentry);
+ else
+ return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry);
+}
+
int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
struct inode *inode, nid_t ino, umode_t mode)
{
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 5528801a5baf3a..89bf8dd7758c23 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -50,13 +50,16 @@ static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
}
}
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+ struct f2fs_inode *ri)
{
block_t addr = le32_to_cpu(ri->i_addr[0]);
- if (addr != NEW_ADDR && addr != NULL_ADDR)
- return true;
- return false;
+ if (!__is_valid_data_blkaddr(addr))
+ return 1;
+ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+ return -EFAULT;
+ return 0;
}
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -94,12 +97,57 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
return;
}
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ unsigned long long iblocks;
+
+ iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ if (!iblocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, iblocks);
+ return false;
+ }
+
+ if (ino_of_node(node_page) != nid_of_node(node_page)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode footer i_ino=%lx, ino,nid: "
+ "[%u, %u] run fsck to fix.",
+ __func__, inode->i_ino,
+ ino_of_node(node_page), nid_of_node(node_page));
+ return false;
+ }
+
+ if (F2FS_I(inode)->extent_tree) {
+ struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+ if (ei->len &&
+ (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+ !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+ DATA_GENERIC))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) extent info [%u, %u, %u] "
+ "is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+ ei->blk, ei->fofs, ei->len);
+ return false;
+ }
+ }
+ return true;
+}
+
static int do_read_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct page *node_page;
struct f2fs_inode *ri;
+ int err;
/* Check if ino is within scope */
if (check_nid_range(sbi, inode->i_ino)) {
@@ -142,6 +190,11 @@ static int do_read_inode(struct inode *inode)
get_inline_info(fi, ri);
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ return -EINVAL;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -149,7 +202,12 @@ static int do_read_inode(struct inode *inode)
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
- if (__written_first_block(ri))
+ err = __written_first_block(sbi, ri);
+ if (err < 0) {
+ f2fs_put_page(node_page, 1);
+ return err;
+ }
+ if (!err)
set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
f2fs_put_page(node_page, 1);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 7bcbc6e9c40d4f..5823738493329a 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -261,13 +261,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
{
struct nat_entry *e;
- down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (!e) {
e = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&e->ni, ne);
}
- up_write(&nm_i->nat_tree_lock);
}
static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -298,8 +296,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
new_blkaddr == NULL_ADDR);
f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(sbi, nat_get_blkaddr(e) != NEW_ADDR &&
- nat_get_blkaddr(e) != NULL_ADDR &&
+ f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
/* increment version no as node is removed */
@@ -314,7 +311,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- if (new_blkaddr == NEW_ADDR || new_blkaddr == NULL_ADDR)
+ if (!is_valid_data_blkaddr(sbi, new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
@@ -379,6 +376,8 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
+ down_write(&nm_i->nat_tree_lock);
+
/* Check current segment summary */
mutex_lock(&curseg->curseg_mutex);
i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0);
@@ -399,6 +398,7 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
cache:
/* cache nat entry */
cache_nat_entry(NM_I(sbi), nid, &ne);
+ up_write(&nm_i->nat_tree_lock);
}
/*
@@ -590,6 +590,7 @@ static void truncate_node(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
+ pgoff_t index;
get_node_info(sbi, dn->nid, &ni);
if (dn->inode->i_blocks == 0) {
@@ -613,10 +614,11 @@ invalidate:
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ index = dn->node_page->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
- dn->node_page->index, dn->node_page->index);
+ index, index);
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
@@ -1341,6 +1343,12 @@ static int f2fs_write_node_page(struct page *page,
return 0;
}
+ if (__is_valid_data_blkaddr(ni.blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
+ up_read(&sbi->node_write);
+ goto redirty_out;
+ }
+
set_page_writeback(page);
fio.blk_addr = ni.blk_addr;
write_node_page(nid, &fio);
@@ -1427,9 +1435,9 @@ static void __del_from_free_nid_list(struct f2fs_nm_info *nm_i,
static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- struct free_nid *i;
+ struct free_nid *i, *e;
struct nat_entry *ne;
- bool allocated = false;
+ int err = -EINVAL;
if (!available_free_memory(sbi, FREE_NIDS))
return -1;
@@ -1438,40 +1446,58 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
if (unlikely(nid == 0))
return 0;
- if (build) {
- /* do not add allocated nids */
- down_read(&nm_i->nat_tree_lock);
- ne = __lookup_nat_cache(nm_i, nid);
- if (ne &&
- (!get_nat_flag(ne, IS_CHECKPOINTED) ||
- nat_get_blkaddr(ne) != NULL_ADDR))
- allocated = true;
- up_read(&nm_i->nat_tree_lock);
- if (allocated)
- return 0;
- }
-
i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
i->nid = nid;
i->state = NID_NEW;
- if (radix_tree_preload(GFP_NOFS)) {
- kmem_cache_free(free_nid_slab, i);
- return 0;
- }
+ if (radix_tree_preload(GFP_NOFS))
+ goto err;
spin_lock(&nm_i->free_nid_list_lock);
- if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
- spin_unlock(&nm_i->free_nid_list_lock);
- radix_tree_preload_end();
- kmem_cache_free(free_nid_slab, i);
- return 0;
+
+ if (build) {
+ /*
+ * Thread A Thread B
+ * - f2fs_create
+ * - f2fs_new_inode
+ * - alloc_nid
+ * - __insert_nid_to_list(ALLOC_NID_LIST)
+ * - f2fs_balance_fs_bg
+ * - build_free_nids
+ * - __build_free_nids
+ * - scan_nat_page
+ * - add_free_nid
+ * - __lookup_nat_cache
+ * - f2fs_add_link
+ * - init_inode_metadata
+ * - new_inode_page
+ * - new_node_page
+ * - set_node_addr
+ * - alloc_nid_done
+ * - __remove_nid_from_list(ALLOC_NID_LIST)
+ * - __insert_nid_to_list(FREE_NID_LIST)
+ */
+ ne = __lookup_nat_cache(nm_i, nid);
+ if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
+ nat_get_blkaddr(ne) != NULL_ADDR))
+ goto err_out;
+
+ e = __lookup_free_nid_list(nm_i, nid);
+ if (e)
+ goto err_out;
}
+ if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i))
+ goto err_out;
+ err = 0;
list_add_tail(&i->list, &nm_i->free_nid_list);
nm_i->fcnt++;
+err_out:
spin_unlock(&nm_i->free_nid_list_lock);
radix_tree_preload_end();
- return 1;
+err:
+ if (err)
+ kmem_cache_free(free_nid_slab, i);
+ return !err;
}
static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
@@ -1532,6 +1558,8 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
+ down_read(&nm_i->nat_tree_lock);
+
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1560,6 +1588,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
remove_free_nid(nm_i, nid);
}
mutex_unlock(&curseg->curseg_mutex);
+ up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
@@ -1842,14 +1871,12 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
raw_ne = nat_in_journal(sum, i);
- down_write(&nm_i->nat_tree_lock);
ne = __lookup_nat_cache(nm_i, nid);
if (!ne) {
ne = grab_nat_entry(nm_i, nid);
node_info_from_raw_nat(&ne->ni, &raw_ne);
}
__set_nat_cache_dirty(nm_i, ne);
- up_write(&nm_i->nat_tree_lock);
}
update_nats_in_cursum(sum, -i);
mutex_unlock(&curseg->curseg_mutex);
@@ -1883,7 +1910,6 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
struct f2fs_nat_block *nat_blk;
struct nat_entry *ne, *cur;
struct page *page = NULL;
- struct f2fs_nm_info *nm_i = NM_I(sbi);
/*
* there are two steps to flush nat entries:
@@ -1920,12 +1946,8 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
raw_ne = &nat_blk->entries[nid - start_nid];
}
raw_nat_from_node_info(raw_ne, &ne->ni);
-
- down_write(&NM_I(sbi)->nat_tree_lock);
nat_reset_flag(ne);
__clear_nat_cache_dirty(NM_I(sbi), ne);
- up_write(&NM_I(sbi)->nat_tree_lock);
-
if (nat_get_blkaddr(ne) == NULL_ADDR)
add_free_nid(sbi, nid, false);
}
@@ -1937,9 +1959,7 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, set->entry_cnt);
- down_write(&nm_i->nat_tree_lock);
radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
- up_write(&nm_i->nat_tree_lock);
kmem_cache_free(nat_entry_set_slab, set);
}
@@ -1959,6 +1979,9 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!nm_i->dirty_nat_cnt)
return;
+
+ down_write(&nm_i->nat_tree_lock);
+
/*
* if there are no enough space in journal to store dirty nat
* entries, remove all entries from journal and merge them
@@ -1967,7 +1990,6 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
remove_nats_in_journal(sbi);
- down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_set(nm_i,
set_idx, SETVEC_SIZE, setvec))) {
unsigned idx;
@@ -1976,12 +1998,13 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
__adjust_nat_entry_set(setvec[idx], &sets,
MAX_NAT_JENTRIES(sum));
}
- up_write(&nm_i->nat_tree_lock);
/* flush dirty nats in nat entry set */
list_for_each_entry_safe(set, tmp, &sets, set_list)
__flush_nat_entry_set(sbi, set);
+ up_write(&nm_i->nat_tree_lock);
+
f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index e4fffd2d98c4b9..0d6f0e3dc6556d 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -212,6 +212,37 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
f2fs_change_bit(block_off, nm_i->nat_bitmap);
}
+static inline nid_t ino_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.ino);
+}
+
+static inline nid_t nid_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.nid);
+}
+
+static inline unsigned int ofs_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ unsigned flag = le32_to_cpu(rn->footer.flag);
+ return flag >> OFFSET_BIT_SHIFT;
+}
+
+static inline __u64 cpver_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le64_to_cpu(rn->footer.cp_ver);
+}
+
+static inline block_t next_blkaddr_of_node(struct page *node_page)
+{
+ struct f2fs_node *rn = F2FS_NODE(node_page);
+ return le32_to_cpu(rn->footer.next_blkaddr);
+}
+
static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
@@ -242,40 +273,30 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
struct f2fs_node *rn = F2FS_NODE(page);
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = le64_to_cpu(ckpt->checkpoint_ver);
- rn->footer.cp_ver = ckpt->checkpoint_ver;
+ if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ rn->footer.cp_ver = cpu_to_le64(cp_ver);
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
-static inline nid_t ino_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.ino);
-}
-
-static inline nid_t nid_of_node(struct page *node_page)
+static inline bool is_recoverable_dnode(struct page *page)
{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.nid);
-}
-
-static inline unsigned int ofs_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- unsigned flag = le32_to_cpu(rn->footer.flag);
- return flag >> OFFSET_BIT_SHIFT;
-}
-
-static inline unsigned long long cpver_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le64_to_cpu(rn->footer.cp_ver);
-}
+ struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
+ size_t crc_offset = le32_to_cpu(ckpt->checksum_offset);
+ __u64 cp_ver = cur_cp_version(ckpt);
-static inline block_t next_blkaddr_of_node(struct page *node_page)
-{
- struct f2fs_node *rn = F2FS_NODE(node_page);
- return le32_to_cpu(rn->footer.next_blkaddr);
+ if (is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG)) {
+ __u64 crc = le32_to_cpu(*((__le32 *)
+ ((unsigned char *)ckpt + crc_offset)));
+ cp_ver |= (crc << 32);
+ }
+ return cpu_to_le64(cp_ver) == cpver_of_node(page);
}
/*
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e32f349f341bf8..2878be3e448f73 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -67,7 +67,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
return NULL;
}
-static int recover_dentry(struct inode *inode, struct page *ipage)
+static struct fsync_inode_entry *add_fsync_inode(struct list_head *head,
+ struct inode *inode)
+{
+ struct fsync_inode_entry *entry;
+
+ entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
+ if (!entry)
+ return NULL;
+
+ entry->inode = inode;
+ list_add_tail(&entry->list, head);
+
+ return entry;
+}
+
+static void del_fsync_inode(struct fsync_inode_entry *entry)
+{
+ iput(entry->inode);
+ list_del(&entry->list);
+ kmem_cache_free(fsync_entry_slab, entry);
+}
+
+static int recover_dentry(struct inode *inode, struct page *ipage,
+ struct list_head *dir_list)
{
struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
@@ -75,18 +98,29 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
struct qstr name;
struct page *page;
struct inode *dir, *einode;
+ struct fsync_inode_entry *entry;
int err = 0;
- dir = f2fs_iget(inode->i_sb, pino);
- if (IS_ERR(dir)) {
- err = PTR_ERR(dir);
- goto out;
+ entry = get_fsync_inode(dir_list, pino);
+ if (!entry) {
+ dir = f2fs_iget(inode->i_sb, pino);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ goto out;
+ }
+
+ entry = add_fsync_inode(dir_list, dir);
+ if (!entry) {
+ err = -ENOMEM;
+ iput(dir);
+ goto out;
+ }
}
- if (file_enc_name(inode)) {
- iput(dir);
+ dir = entry->inode;
+
+ if (file_enc_name(inode))
return 0;
- }
name.len = le32_to_cpu(raw_inode->i_namelen);
name.name = raw_inode->i_name;
@@ -94,7 +128,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage)
if (unlikely(name.len > F2FS_NAME_LEN)) {
WARN_ON(1);
err = -ENAMETOOLONG;
- goto out_err;
+ goto out;
}
retry:
de = f2fs_find_entry(dir, &name, &page);
@@ -120,23 +154,12 @@ retry:
goto retry;
}
err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
- if (err)
- goto out_err;
-
- if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) {
- iput(dir);
- } else {
- add_dirty_dir_inode(dir);
- set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
- }
goto out;
out_unmap_put:
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
-out_err:
- iput(dir);
out:
f2fs_msg(inode->i_sb, KERN_NOTICE,
"%s: ino = %x, name = %s, dir = %lx, err = %d",
@@ -170,8 +193,8 @@ static void recover_inode(struct inode *inode, struct page *page)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
+ struct inode *inode;
struct page *page = NULL;
block_t blkaddr;
int err = 0;
@@ -185,12 +208,12 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
while (1) {
struct fsync_inode_entry *entry;
- if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page))
+ if (!is_recoverable_dnode(page))
break;
if (!is_fsync_dnode(page))
@@ -204,27 +227,27 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
break;
}
- /* add this fsync inode to the list */
- entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO);
- if (!entry) {
- err = -ENOMEM;
- break;
- }
/*
* CP | dnode(F) | inode(DF)
* For this case, we should not give up now.
*/
- entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
- if (IS_ERR(entry->inode)) {
- err = PTR_ERR(entry->inode);
- kmem_cache_free(fsync_entry_slab, entry);
+ inode = f2fs_iget(sbi->sb, ino_of_node(page));
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
if (err == -ENOENT) {
err = 0;
goto next;
}
break;
}
- list_add_tail(&entry->list, head);
+
+ /* add this fsync inode to the list */
+ entry = add_fsync_inode(head, inode);
+ if (!entry) {
+ err = -ENOMEM;
+ iput(inode);
+ break;
+ }
}
entry->blkaddr = blkaddr;
@@ -248,11 +271,8 @@ static void destroy_fsync_dnodes(struct list_head *head)
{
struct fsync_inode_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, head, list) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ list_for_each_entry_safe(entry, tmp, head, list)
+ del_fsync_inode(entry);
}
static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
@@ -423,7 +443,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
/* dest is valid block, try to recover from src to dest */
- if (is_valid_blkaddr(sbi, dest, META_POR)) {
+ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = reserve_new_block(&dn);
@@ -459,35 +479,34 @@ out:
return err;
}
-static int recover_data(struct f2fs_sb_info *sbi,
- struct list_head *head, int type)
+static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list,
+ struct list_head *dir_list)
{
- unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page = NULL;
int err = 0;
block_t blkaddr;
/* get node pages in the current segment */
- curseg = CURSEG_I(sbi, type);
+ curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
while (1) {
struct fsync_inode_entry *entry;
- if (!is_valid_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
ra_meta_pages_cond(sbi, blkaddr);
page = get_tmp_page(sbi, blkaddr);
- if (cp_ver != cpver_of_node(page)) {
+ if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
break;
}
- entry = get_fsync_inode(head, ino_of_node(page));
+ entry = get_fsync_inode(inode_list, ino_of_node(page));
if (!entry)
goto next;
/*
@@ -498,7 +517,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
if (entry->last_inode == blkaddr)
recover_inode(entry->inode, page);
if (entry->last_dentry == blkaddr) {
- err = recover_dentry(entry->inode, page);
+ err = recover_dentry(entry->inode, page, dir_list);
if (err) {
f2fs_put_page(page, 1);
break;
@@ -510,11 +529,8 @@ static int recover_data(struct f2fs_sb_info *sbi,
break;
}
- if (entry->blkaddr == blkaddr) {
- iput(entry->inode);
- list_del(&entry->list);
- kmem_cache_free(fsync_entry_slab, entry);
- }
+ if (entry->blkaddr == blkaddr)
+ del_fsync_inode(entry);
next:
/* check next segment */
blkaddr = next_blkaddr_of_node(page);
@@ -525,12 +541,14 @@ next:
return err;
}
-int recover_fsync_data(struct f2fs_sb_info *sbi)
+int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
struct list_head inode_list;
+ struct list_head dir_list;
block_t blkaddr;
int err;
+ int ret = 0;
bool need_writecp = false;
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
@@ -539,6 +557,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
return -ENOMEM;
INIT_LIST_HEAD(&inode_list);
+ INIT_LIST_HEAD(&dir_list);
/* prevent checkpoint */
mutex_lock(&sbi->cp_mutex);
@@ -547,21 +566,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi)
/* step #1: find fsynced inode numbers */
err = find_fsync_dnodes(sbi, &inode_list);
- if (err)
+ if (err || list_empty(&inode_list))
goto out;
- if (list_empty(&inode_list))
+ if (check_only) {
+ ret = 1;
goto out;
+ }
need_writecp = true;
/* step #2: recover data */
- err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
+ err = recover_data(sbi, &inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
out:
destroy_fsync_dnodes(&inode_list);
- kmem_cache_destroy(fsync_entry_slab);
/* truncate meta pages to be used by the recovery */
truncate_inode_pages_range(META_MAPPING(sbi),
@@ -573,31 +593,20 @@ out:
}
clear_sbi_flag(sbi, SBI_POR_DOING);
- if (err) {
- bool invalidate = false;
-
- if (discard_next_dnode(sbi, blkaddr))
- invalidate = true;
-
- /* Flush all the NAT/SIT pages */
- while (get_pages(sbi, F2FS_DIRTY_META))
- sync_meta_pages(sbi, META, LONG_MAX);
+ if (err)
+ set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
+ mutex_unlock(&sbi->cp_mutex);
- /* invalidate temporary meta page */
- if (invalidate)
- invalidate_mapping_pages(META_MAPPING(sbi),
- blkaddr, blkaddr);
+ /* let's drop all the directory inodes for clean checkpoint */
+ destroy_fsync_dnodes(&dir_list);
- set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG);
- mutex_unlock(&sbi->cp_mutex);
- } else if (need_writecp) {
+ if (!err && need_writecp) {
struct cp_control cpc = {
.reason = CP_RECOVERY,
};
- mutex_unlock(&sbi->cp_mutex);
write_checkpoint(sbi, &cpc);
- } else {
- mutex_unlock(&sbi->cp_mutex);
}
- return err;
+
+ kmem_cache_destroy(fsync_entry_slab);
+ return ret ? ret: err;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f77b3258454a6f..6802cd754eda08 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -295,6 +295,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi)
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{
+ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
+ return;
+
/* try to shrink extent cache when there is no enough memory */
if (!available_free_memory(sbi, EXTENT_CACHE))
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
@@ -395,6 +398,9 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
+ if (!test_opt(sbi, FLUSH_MERGE))
+ return err;
+
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
@@ -513,28 +519,6 @@ static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
}
-bool discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
-{
- int err = -ENOTSUPP;
-
- if (test_opt(sbi, DISCARD)) {
- struct seg_entry *se = get_seg_entry(sbi,
- GET_SEGNO(sbi, blkaddr));
- unsigned int offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
-
- if (f2fs_test_bit(offset, se->discard_map))
- return false;
-
- err = f2fs_issue_discard(sbi, blkaddr, 1);
- }
-
- if (err) {
- update_meta_page(sbi, NULL, blkaddr);
- return true;
- }
- return false;
-}
-
static void __add_discard_entry(struct f2fs_sb_info *sbi,
struct cp_control *cpc, struct seg_entry *se,
unsigned int start, unsigned int end)
@@ -768,7 +752,7 @@ bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
struct seg_entry *se;
bool is_cp = false;
- if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return true;
mutex_lock(&sit_i->sentry_lock);
@@ -1482,7 +1466,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
{
struct page *cpage;
- if (blkaddr == NEW_ADDR)
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
@@ -2117,7 +2101,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
return restore_curseg_summaries(sbi);
}
-static void build_sit_entries(struct f2fs_sb_info *sbi)
+static int build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
@@ -2126,6 +2110,7 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
int nrpages = MAX_BIO_BLOCKS(sbi);
+ int err = 0;
do {
readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT, true);
@@ -2139,36 +2124,62 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
struct f2fs_sit_entry sit;
struct page *page;
- mutex_lock(&curseg->curseg_mutex);
- for (i = 0; i < sits_in_cursum(sum); i++) {
- if (le32_to_cpu(segno_in_journal(sum, i))
- == start) {
- sit = sit_in_journal(sum, i);
- mutex_unlock(&curseg->curseg_mutex);
- goto got_it;
- }
- }
- mutex_unlock(&curseg->curseg_mutex);
-
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
-got_it:
- check_block_count(sbi, start, &sit);
+
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ return err;
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
- if (sbi->segs_per_sec > 1) {
- struct sec_entry *e = get_sec_entry(sbi, start);
- e->valid_blocks += se->valid_blocks;
- }
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
+
+ mutex_lock(&curseg->curseg_mutex);
+ for (i = 0; i < sits_in_cursum(sum); i++) {
+ struct f2fs_sit_entry sit;
+ struct seg_entry *se;
+ unsigned int old_valid_blocks;
+
+ start = le32_to_cpu(segno_in_journal(sum, i));
+ if (start >= MAIN_SEGS(sbi)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong journal entry on segno %u",
+ start);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ err = -EINVAL;
+ break;
+ }
+
+ se = &sit_i->sentries[start];
+ sit = sit_in_journal(sum, i);
+
+ old_valid_blocks = se->valid_blocks;
+
+ err = check_block_count(sbi, start, &sit);
+ if (err)
+ break;
+ seg_info_from_raw_sit(se, &sit);
+
+ memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
+ sbi->discard_blks += old_valid_blocks - se->valid_blocks;
+
+ if (sbi->segs_per_sec > 1)
+ get_sec_entry(sbi, start)->valid_blocks +=
+ se->valid_blocks - old_valid_blocks;
+ }
+ mutex_unlock(&curseg->curseg_mutex);
+ return err;
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
@@ -2313,7 +2324,7 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sm_info->sit_entry_set);
- if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
+ if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
@@ -2330,7 +2341,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
return err;
/* reinit free segmap based on SIT */
- build_sit_entries(sbi);
+ err = build_sit_entries(sbi);
+ if (err)
+ return err;
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index ee44d346ea4414..08b08ae6ba9dee 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -17,6 +17,8 @@
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
+#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
+
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
#define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
@@ -46,13 +48,19 @@
(secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
sbi->segs_per_sec)) \
-#define MAIN_BLKADDR(sbi) (SM_I(sbi)->main_blkaddr)
-#define SEG0_BLKADDR(sbi) (SM_I(sbi)->seg0_blkaddr)
+#define MAIN_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->main_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->main_blkaddr))
+#define SEG0_BLKADDR(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->seg0_blkaddr : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment0_blkaddr))
#define MAIN_SEGS(sbi) (SM_I(sbi)->main_segments)
#define MAIN_SECS(sbi) (sbi->total_sections)
-#define TOTAL_SEGS(sbi) (SM_I(sbi)->segment_count)
+#define TOTAL_SEGS(sbi) \
+ (SM_I(sbi) ? SM_I(sbi)->segment_count : \
+ le32_to_cpu(F2FS_RAW_SUPER(sbi)->segment_count))
#define TOTAL_BLKS(sbi) (TOTAL_SEGS(sbi) << sbi->log_blocks_per_seg)
#define MAX_BLKADDR(sbi) (SEG0_BLKADDR(sbi) + TOTAL_BLKS(sbi))
@@ -72,7 +80,7 @@
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
+ ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define GET_SECNO(sbi, segno) \
@@ -381,6 +389,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
if (test_and_clear_bit(segno, free_i->free_segmap)) {
free_i->free_segments++;
+ if (IS_CURSEC(sbi, secno))
+ goto skip_free;
next = find_next_bit(free_i->free_segmap,
start_segno + sbi->segs_per_sec, start_segno);
if (next >= start_segno + sbi->segs_per_sec) {
@@ -388,6 +398,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
free_i->free_sections++;
}
}
+skip_free:
spin_unlock(&free_i->segmap_lock);
}
@@ -571,16 +582,20 @@ static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
f2fs_bug_on(sbi, segno > TOTAL_SEGS(sbi) - 1);
}
-static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+static inline void verify_block_addr(struct f2fs_io_info *fio, block_t blk_addr)
{
- f2fs_bug_on(sbi, blk_addr < SEG0_BLKADDR(sbi)
- || blk_addr >= MAX_BLKADDR(sbi));
+ struct f2fs_sb_info *sbi = fio->sbi;
+
+ if (__is_meta_io(fio))
+ verify_blkaddr(sbi, blk_addr, META_GENERIC);
+ else
+ verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
}
/*
* Summary block is always treated as an invalid block
*/
-static inline void check_block_count(struct f2fs_sb_info *sbi,
+static inline int check_block_count(struct f2fs_sb_info *sbi,
int segno, struct f2fs_sit_entry *raw_sit)
{
#ifdef CONFIG_F2FS_CHECK_FS
@@ -602,11 +617,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
cur_pos = next_pos;
is_valid = !is_valid;
} while (cur_pos < sbi->blocks_per_seg);
- BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
+
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Mismatch valid blocks %d vs. %d",
+ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
#endif
/* check segment usage, and check boundary of a given segment number */
- f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
- || segno > TOTAL_SEGS(sbi) - 1);
+ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+ || segno > TOTAL_SEGS(sbi) - 1)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong valid blocks %d or segno %u",
+ GET_SIT_VBLOCKS(raw_sit), segno);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ return -EINVAL;
+ }
+ return 0;
}
static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 4f666368aa85cd..dbd7adff8b5ad8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -994,6 +994,8 @@ static inline bool sanity_check_area_boundary(struct super_block *sb,
static int sanity_check_raw_super(struct super_block *sb,
struct f2fs_super_block *raw_super)
{
+ block_t segment_count, segs_per_sec, secs_per_zone;
+ block_t total_sections, blocks_per_seg;
unsigned int blocksize;
if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
@@ -1047,6 +1049,68 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
+ segment_count = le32_to_cpu(raw_super->segment_count);
+ segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
+ secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
+ total_sections = le32_to_cpu(raw_super->section_count);
+
+ /* blocks_per_seg should be 512, given the above check */
+ blocks_per_seg = 1 << le32_to_cpu(raw_super->log_blocks_per_seg);
+
+ if (segment_count > F2FS_MAX_SEGMENT ||
+ segment_count < F2FS_MIN_SEGMENTS) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid segment count (%u)",
+ segment_count);
+ return 1;
+ }
+
+ if (total_sections > segment_count ||
+ total_sections < F2FS_MIN_SEGMENTS ||
+ segs_per_sec > segment_count || !segs_per_sec) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid segment/section count (%u, %u x %u)",
+ segment_count, total_sections, segs_per_sec);
+ return 1;
+ }
+
+ if ((segment_count / segs_per_sec) < total_sections) {
+ f2fs_msg(sb, KERN_INFO,
+ "Small segment_count (%u < %u * %u)",
+ segment_count, segs_per_sec, total_sections);
+ return 1;
+ }
+
+ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong segment_count / block_count (%u > %llu)",
+ segment_count, le64_to_cpu(raw_super->block_count));
+ return 1;
+ }
+
+ if (secs_per_zone > total_sections || !secs_per_zone) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong secs_per_zone / total_sections (%u, %u)",
+ secs_per_zone, total_sections);
+ return 1;
+ }
+ if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION) {
+ f2fs_msg(sb, KERN_INFO,
+ "Corrupted extension count (%u > %u)",
+ le32_to_cpu(raw_super->extension_count),
+ F2FS_MAX_EXTENSION);
+ return 1;
+ }
+
+ if (le32_to_cpu(raw_super->cp_payload) >
+ (blocks_per_seg - F2FS_CP_PACKS)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Insane cp_payload (%u > %u)",
+ le32_to_cpu(raw_super->cp_payload),
+ blocks_per_seg - F2FS_CP_PACKS);
+ return 1;
+ }
+
/* check reserved ino info */
if (le32_to_cpu(raw_super->node_ino) != 1 ||
le32_to_cpu(raw_super->meta_ino) != 2 ||
@@ -1059,13 +1123,6 @@ static int sanity_check_raw_super(struct super_block *sb,
return 1;
}
- if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
- f2fs_msg(sb, KERN_INFO,
- "Invalid segment count (%u)",
- le32_to_cpu(raw_super->segment_count));
- return 1;
- }
-
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sb, raw_super))
return 1;
@@ -1073,24 +1130,53 @@ static int sanity_check_raw_super(struct super_block *sb,
return 0;
}
-static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
+int sanity_check_ckpt(struct f2fs_sb_info *sbi)
{
unsigned int total, fsmeta;
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
+ unsigned int ovp_segments, reserved_segments;
unsigned int main_segs, blocks_per_seg;
+ unsigned int sit_segs, nat_segs;
+ unsigned int sit_bitmap_size, nat_bitmap_size;
+ unsigned int log_blocks_per_seg;
+ unsigned int segment_count_main;
+ unsigned int cp_pack_start_sum, cp_payload;
+ block_t user_block_count;
int i;
total = le32_to_cpu(raw_super->segment_count);
fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
+ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
+ fsmeta += sit_segs;
+ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
+ fsmeta += nat_segs;
fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
if (unlikely(fsmeta >= total))
return 1;
+ ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
+ reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
+
+ if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
+ ovp_segments == 0 || reserved_segments == 0)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong layout: check mkfs.f2fs version");
+ return 1;
+ }
+
+ user_block_count = le64_to_cpu(ckpt->user_block_count);
+ segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ if (!user_block_count || user_block_count >=
+ segment_count_main << log_blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong user_block_count: %u", user_block_count);
+ return 1;
+ }
+
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
@@ -1105,6 +1191,28 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi)
return 1;
}
+ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
+ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
+
+ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
+ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong bitmap size: sit: %u, nat:%u",
+ sit_bitmap_size, nat_bitmap_size);
+ return 1;
+ }
+
+ cp_pack_start_sum = __start_sum_addr(sbi);
+ cp_payload = __cp_payload(sbi);
+ if (cp_pack_start_sum < cp_payload + 1 ||
+ cp_pack_start_sum > blocks_per_seg - 1 -
+ NR_CURSEG_TYPE) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong cp_pack_start_sum: %u",
+ cp_pack_start_sum);
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
@@ -1341,13 +1449,6 @@ try_onemore:
goto free_meta_inode;
}
- /* sanity checking of checkpoint */
- err = -EINVAL;
- if (sanity_check_ckpt(sbi)) {
- f2fs_msg(sb, KERN_ERR, "Invalid F2FS checkpoint");
- goto free_cp;
- }
-
sbi->total_valid_node_count =
le32_to_cpu(sbi->ckpt->valid_node_count);
sbi->total_valid_inode_count =
@@ -1447,14 +1548,27 @@ try_onemore:
if (need_fsck)
set_sbi_flag(sbi, SBI_NEED_FSCK);
- err = recover_fsync_data(sbi);
- if (err) {
+ if (!retry)
+ goto skip_recovery;
+
+ err = recover_fsync_data(sbi, false);
+ if (err < 0) {
need_fsck = true;
f2fs_msg(sb, KERN_ERR,
"Cannot recover all fsync data errno=%ld", err);
goto free_kobj;
}
+ } else {
+ err = recover_fsync_data(sbi, true);
+
+ if (!f2fs_readonly(sb) && err > 0) {
+ err = -EINVAL;
+ f2fs_msg(sb, KERN_ERR,
+ "Need to recover fsync data");
+ goto free_kobj;
+ }
}
+skip_recovery:
/* recover_fsync_data() cleared this already */
clear_sbi_flag(sbi, SBI_POR_DOING);
@@ -1500,7 +1614,6 @@ free_nm:
destroy_node_manager(sbi);
free_sm:
destroy_segment_manager(sbi);
-free_cp:
kfree(sbi->ckpt);
free_meta_inode:
make_bad_inode(sbi->meta_inode);
@@ -1566,6 +1679,12 @@ static int __init init_f2fs_fs(void)
{
int err;
+ if (PAGE_SIZE != F2FS_BLKSIZE) {
+ printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
+ PAGE_SIZE, F2FS_BLKSIZE);
+ return -EINVAL;
+ }
+
f2fs_build_trace_ios();
err = init_inodecache();
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 93fc62232ec21e..9ae2c4d7e92102 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -224,7 +224,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
{
struct super_block *sb = inode->i_sb;
- const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
+ struct msdos_sb_info *sbi = MSDOS_SB(sb);
+ const int limit = sb->s_maxbytes >> sbi->cluster_bits;
struct fat_entry fatent;
struct fat_cache_id cid;
int nr;
@@ -233,6 +234,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
*fclus = 0;
*dclus = MSDOS_I(inode)->i_start;
+ if (!fat_valid_entry(sbi, *dclus)) {
+ fat_fs_error_ratelimit(sb,
+ "%s: invalid start cluster (i_pos %lld, start %08x)",
+ __func__, MSDOS_I(inode)->i_pos, *dclus);
+ return -EIO;
+ }
if (cluster == 0)
return 0;
@@ -249,9 +256,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
/* prevent the infinite loop of cluster chain */
if (*fclus > limit) {
fat_fs_error_ratelimit(sb,
- "%s: detected the cluster chain loop"
- " (i_pos %lld)", __func__,
- MSDOS_I(inode)->i_pos);
+ "%s: detected the cluster chain loop (i_pos %lld)",
+ __func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
}
@@ -261,9 +267,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
goto out;
else if (nr == FAT_ENT_FREE) {
fat_fs_error_ratelimit(sb,
- "%s: invalid cluster chain (i_pos %lld)",
- __func__,
- MSDOS_I(inode)->i_pos);
+ "%s: invalid cluster chain (i_pos %lld)",
+ __func__, MSDOS_I(inode)->i_pos);
nr = -EIO;
goto out;
} else if (nr == FAT_ENT_EOF) {
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index be5e15323bab45..1849b1adb6b966 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -344,6 +344,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
fatent->fat_inode = NULL;
}
+static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
+{
+ return FAT_START_ENT <= entry && entry < sbi->max_cluster;
+}
+
extern void fat_ent_access_init(struct super_block *sb);
extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
int entry);
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 8226557130a2fc..e3fc477728b32e 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = entry + (entry >> 1);
- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+ WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = (entry << sbi->fatent_shift);
- WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
+ WARN_ON(!fat_valid_entry(sbi, entry));
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
@@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
int err, offset;
sector_t blocknr;
- if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
+ if (!fat_valid_entry(sbi, entry)) {
fatent_brelse(fatent);
fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
@@ -681,6 +681,7 @@ int fat_count_free_clusters(struct super_block *sb)
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
+ cond_resched();
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index cf644d52c0cf28..3f12d99030faa4 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -613,13 +613,21 @@ static void fat_set_state(struct super_block *sb,
brelse(bh);
}
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+ if (opts->iocharset != fat_default_iocharset) {
+ /* Note: opts->iocharset can be NULL here */
+ kfree(opts->iocharset);
+ opts->iocharset = fat_default_iocharset;
+ }
+}
+
static void delayed_free(struct rcu_head *p)
{
struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
unload_nls(sbi->nls_disk);
unload_nls(sbi->nls_io);
- if (sbi->options.iocharset != fat_default_iocharset)
- kfree(sbi->options.iocharset);
+ fat_reset_iocharset(&sbi->options);
kfree(sbi);
}
@@ -1034,7 +1042,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
opts->fs_fmask = opts->fs_dmask = current_umask();
opts->allow_utime = -1;
opts->codepage = fat_default_codepage;
- opts->iocharset = fat_default_iocharset;
+ fat_reset_iocharset(opts);
if (is_vfat) {
opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
opts->rodir = 0;
@@ -1146,7 +1154,12 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
case Opt_time_offset:
if (match_int(&args[0], &option))
return -EINVAL;
- if (option < -12 * 60 || option > 12 * 60)
+ /*
+ * GMT+-12 zones may have DST corrections so at least
+ * 13 hours difference is needed. Make the limit 24
+ * just in case someone invents something unusual.
+ */
+ if (option < -24 * 60 || option > 24 * 60)
return -EINVAL;
opts->tz_set = 1;
opts->time_offset = option;
@@ -1184,8 +1197,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
/* vfat specific */
case Opt_charset:
- if (opts->iocharset != fat_default_iocharset)
- kfree(opts->iocharset);
+ fat_reset_iocharset(opts);
iocharset = match_strdup(&args[0]);
if (!iocharset)
return -ENOMEM;
@@ -1776,8 +1788,7 @@ out_fail:
iput(fat_inode);
unload_nls(sbi->nls_io);
unload_nls(sbi->nls_disk);
- if (sbi->options.iocharset != fat_default_iocharset)
- kfree(sbi->options.iocharset);
+ fat_reset_iocharset(&sbi->options);
sb->s_fs_info = NULL;
kfree(sbi);
return error;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 7a182c87f37805..ab1d7f35f6c2b7 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -715,6 +715,9 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
if (awaken)
wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+
/* Prevent a race with our last child, which has to signal EV_CLEARED
* before dropping our spinlock.
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index de67745e1cd7d3..77946d6f617d01 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -66,7 +66,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
ASSERT(op->processor != NULL);
ASSERT(fscache_object_is_available(op->object));
ASSERTCMP(atomic_read(&op->usage), >, 0);
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+ op->state, ==, FSCACHE_OP_ST_CANCELLED);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
@@ -481,7 +482,8 @@ void fscache_put_operation(struct fscache_operation *op)
struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+ op->object ? op->object->debug_id : 0,
+ op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index f863ac6647ac35..89a4b231e79cb5 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
if (!dentry)
return NULL;
- fc->ctl_dentry[fc->ctl_ndents++] = dentry;
inode = new_inode(fuse_control_sb);
- if (!inode)
+ if (!inode) {
+ dput(dentry);
return NULL;
+ }
inode->i_ino = get_next_ino();
inode->i_mode = mode;
@@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
set_nlink(inode, nlink);
inode->i_private = fc;
d_add(dentry, inode);
+
+ fc->ctl_dentry[fc->ctl_ndents++] = dentry;
+
return dentry;
}
@@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
for (i = fc->ctl_ndents - 1; i >= 0; i--) {
struct dentry *dentry = fc->ctl_dentry[i];
d_inode(dentry)->i_private = NULL;
- d_drop(dentry);
+ if (!i) {
+ /* Get rid of submounts: */
+ d_invalidate(dentry);
+ }
dput(dentry);
}
drop_nlink(d_inode(fuse_control_sb->s_root));
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index fbfec06b054da9..fb0f6f6997fd45 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -145,6 +145,16 @@ static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
return !fc->initialized || (for_background && fc->blocked);
}
+static void fuse_drop_waiting(struct fuse_conn *fc)
+{
+ if (fc->connected) {
+ atomic_dec(&fc->num_waiting);
+ } else if (atomic_dec_and_test(&fc->num_waiting)) {
+ /* wake up aborters */
+ wake_up_all(&fc->blocked_waitq);
+ }
+}
+
static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
bool for_background)
{
@@ -191,7 +201,7 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
return req;
out:
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
return ERR_PTR(err);
}
@@ -298,7 +308,7 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
if (test_bit(FR_WAITING, &req->flags)) {
__clear_bit(FR_WAITING, &req->flags);
- atomic_dec(&fc->num_waiting);
+ fuse_drop_waiting(fc);
}
if (req->stolen_file)
@@ -384,7 +394,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_iqueue *fiq = &fc->iq;
if (test_and_set_bit(FR_FINISHED, &req->flags))
- return;
+ goto put_request;
spin_lock(&fiq->waitq.lock);
list_del_init(&req->intr_entry);
@@ -394,12 +404,19 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
if (test_bit(FR_BACKGROUND, &req->flags)) {
spin_lock(&fc->lock);
clear_bit(FR_BACKGROUND, &req->flags);
- if (fc->num_background == fc->max_background)
+ if (fc->num_background == fc->max_background) {
fc->blocked = 0;
-
- /* Wake up next waiter, if any */
- if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
wake_up(&fc->blocked_waitq);
+ } else if (!fc->blocked) {
+ /*
+ * Wake up next waiter, if any. It's okay to use
+ * waitqueue_active(), as we've already synced up
+ * fc->blocked with waiters with the wake_up() call
+ * above.
+ */
+ if (waitqueue_active(&fc->blocked_waitq))
+ wake_up(&fc->blocked_waitq);
+ }
if (fc->num_background == fc->congestion_threshold &&
fc->connected && fc->bdi_initialized) {
@@ -414,6 +431,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
wake_up(&req->waitq);
if (req->end)
req->end(fc, req);
+put_request:
fuse_put_request(fc, req);
}
@@ -1321,12 +1339,14 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
goto out_end;
}
list_move_tail(&req->list, &fpq->processing);
- spin_unlock(&fpq->lock);
+ __fuse_get_request(req);
set_bit(FR_SENT, &req->flags);
+ spin_unlock(&fpq->lock);
/* matches barrier in request_wait_answer() */
smp_mb__after_atomic();
if (test_bit(FR_INTERRUPTED, &req->flags))
queue_interrupt(fiq, req);
+ fuse_put_request(fc, req);
return reqsize;
@@ -1725,7 +1745,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->in.h.nodeid = outarg->nodeid;
req->in.numargs = 2;
req->in.argpages = 1;
- req->page_descs[0].offset = offset;
req->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_CACHE_SHIFT;
@@ -1740,6 +1759,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
req->pages[req->num_pages] = page;
+ req->page_descs[req->num_pages].offset = offset;
req->page_descs[req->num_pages].length = this_num;
req->num_pages++;
@@ -1755,8 +1775,10 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->in.args[1].size = total_len;
err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
- if (err)
+ if (err) {
fuse_retrieve_end(fc, req);
+ fuse_put_request(fc, req);
+ }
return err;
}
@@ -1915,16 +1937,20 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
/* Is it an interrupt reply? */
if (req->intr_unique == oh.unique) {
+ __fuse_get_request(req);
spin_unlock(&fpq->lock);
err = -EINVAL;
- if (nbytes != sizeof(struct fuse_out_header))
+ if (nbytes != sizeof(struct fuse_out_header)) {
+ fuse_put_request(fc, req);
goto err_finish;
+ }
if (oh.error == -ENOSYS)
fc->no_interrupt = 1;
else if (oh.error == -EAGAIN)
queue_interrupt(&fc->iq, req);
+ fuse_put_request(fc, req);
fuse_copy_finish(cs);
return nbytes;
@@ -1999,11 +2025,14 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
if (!fud)
return -EPERM;
+ pipe_lock(pipe);
+
bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
- if (!bufs)
+ if (!bufs) {
+ pipe_unlock(pipe);
return -ENOMEM;
+ }
- pipe_lock(pipe);
nbuf = 0;
rem = 0;
for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
@@ -2053,10 +2082,13 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
+ pipe_lock(pipe);
for (idx = 0; idx < nbuf; idx++) {
struct pipe_buffer *buf = &bufs[idx];
buf->ops->release(pipe, buf);
}
+ pipe_unlock(pipe);
+
out:
kfree(bufs);
return ret;
@@ -2159,6 +2191,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags)) {
set_bit(FR_PRIVATE, &req->flags);
+ __fuse_get_request(req);
list_move(&req->list, &to_end1);
}
spin_unlock(&req->waitq.lock);
@@ -2185,7 +2218,6 @@ void fuse_abort_conn(struct fuse_conn *fc)
while (!list_empty(&to_end1)) {
req = list_first_entry(&to_end1, struct fuse_req, list);
- __fuse_get_request(req);
list_del_init(&req->list);
request_end(fc, req);
}
@@ -2196,6 +2228,11 @@ void fuse_abort_conn(struct fuse_conn *fc)
}
EXPORT_SYMBOL_GPL(fuse_abort_conn);
+void fuse_wait_aborted(struct fuse_conn *fc)
+{
+ wait_event(fc->blocked_waitq, atomic_read(&fc->num_waiting) == 0);
+}
+
int fuse_dev_release(struct inode *inode, struct file *file)
{
struct fuse_dev *fud = fuse_get_dev(file);
@@ -2203,9 +2240,15 @@ int fuse_dev_release(struct inode *inode, struct file *file)
if (fud) {
struct fuse_conn *fc = fud->fc;
struct fuse_pqueue *fpq = &fud->pq;
+ LIST_HEAD(to_end);
+ spin_lock(&fpq->lock);
WARN_ON(!list_empty(&fpq->io));
- end_requests(fc, &fpq->processing);
+ list_splice_init(&fpq->processing, &to_end);
+ spin_unlock(&fpq->lock);
+
+ end_requests(fc, &to_end);
+
/* Are we the last open device? */
if (atomic_dec_and_test(&fc->dev_count)) {
WARN_ON(fc->iq.fasync != NULL);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfa274c0666632..62fcec0f6d01b0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -1654,8 +1654,19 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
return err;
if (attr->ia_valid & ATTR_OPEN) {
- if (fc->atomic_o_trunc)
+ /* This is coming from open(..., ... | O_TRUNC); */
+ WARN_ON(!(attr->ia_valid & ATTR_SIZE));
+ WARN_ON(attr->ia_size != 0);
+ if (fc->atomic_o_trunc) {
+ /*
+ * No need to send request to userspace, since actual
+ * truncation has already been done by OPEN. But still
+ * need to truncate page cache.
+ */
+ i_size_write(inode, 0);
+ truncate_pagecache(inode, 0);
return 0;
+ }
file = NULL;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 1a063cbfe50341..d40c2451487cb2 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -625,7 +625,7 @@ static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
struct fuse_io_priv *io = req->io;
ssize_t pos = -1;
- fuse_release_user_pages(req, !io->write);
+ fuse_release_user_pages(req, io->should_dirty);
if (io->write) {
if (req->misc.write.in.size != req->misc.write.out.size)
@@ -879,6 +879,7 @@ static int fuse_readpages_fill(void *_data, struct page *page)
}
if (WARN_ON(req->num_pages >= req->max_pages)) {
+ unlock_page(page);
fuse_put_request(fc, req);
return -EIO;
}
@@ -1332,7 +1333,6 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
int write = flags & FUSE_DIO_WRITE;
- bool should_dirty = !write && iter_is_iovec(iter);
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->file;
struct inode *inode = file->f_mapping->host;
@@ -1361,6 +1361,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
mutex_unlock(&inode->i_mutex);
}
+ io->should_dirty = !write && iter_is_iovec(iter);
while (count) {
size_t nres;
fl_owner_t owner = current->files;
@@ -1377,7 +1378,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
nres = fuse_send_read(req, io, pos, nbytes, owner);
if (!io->async)
- fuse_release_user_pages(req, should_dirty);
+ fuse_release_user_pages(req, io->should_dirty);
if (req->out.h.error) {
if (!res)
res = req->out.h.error;
@@ -1783,7 +1784,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
spin_unlock(&fc->lock);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+ dec_zone_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 644687ae04bd93..d40fad13cf4357 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -252,6 +252,7 @@ struct fuse_io_priv {
size_t size;
__u64 offset;
bool write;
+ bool should_dirty;
int err;
struct kiocb *iocb;
struct file *file;
@@ -845,6 +846,7 @@ void fuse_request_send_background_locked(struct fuse_conn *fc,
/* Abort all requests */
void fuse_abort_conn(struct fuse_conn *fc);
+void fuse_wait_aborted(struct fuse_conn *fc);
/**
* Invalidate inode attributes
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 0d5e8e59b390e9..4b2eb65be0d400 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -379,9 +379,6 @@ static void fuse_put_super(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
- fuse_send_destroy(fc);
-
- fuse_abort_conn(fc);
mutex_lock(&fuse_mutex);
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
@@ -1158,6 +1155,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
err_put_conn:
fuse_bdi_destroy(fc);
fuse_conn_put(fc);
+ sb->s_fs_info = NULL;
err_fput:
fput(file);
err:
@@ -1171,16 +1169,25 @@ static struct dentry *fuse_mount(struct file_system_type *fs_type,
return mount_nodev(fs_type, flags, raw_data, fuse_fill_super);
}
-static void fuse_kill_sb_anon(struct super_block *sb)
+static void fuse_sb_destroy(struct super_block *sb)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
if (fc) {
+ fuse_send_destroy(fc);
+
+ fuse_abort_conn(fc);
+ fuse_wait_aborted(fc);
+
down_write(&fc->killsb);
fc->sb = NULL;
up_write(&fc->killsb);
}
+}
+static void fuse_kill_sb_anon(struct super_block *sb)
+{
+ fuse_sb_destroy(sb);
kill_anon_super(sb);
}
@@ -1203,14 +1210,7 @@ static struct dentry *fuse_mount_blk(struct file_system_type *fs_type,
static void fuse_kill_sb_blk(struct super_block *sb)
{
- struct fuse_conn *fc = get_fuse_conn_super(sb);
-
- if (fc) {
- down_write(&fc->killsb);
- fc->sb = NULL;
- up_write(&fc->killsb);
- }
-
+ fuse_sb_destroy(sb);
kill_block_super(sb);
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 61296ecbd0e20a..09476bb8f6cd0e 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1476,7 +1476,7 @@ int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
end_of_file = (i_size_read(&ip->i_inode) + sdp->sd_sb.sb_bsize - 1) >> shift;
lblock = offset >> shift;
lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift;
- if (lblock_stop > end_of_file)
+ if (lblock_stop > end_of_file && ip != GFS2_I(sdp->sd_rindex))
return 1;
size = (lblock_stop - lblock) << shift;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index baab99b69d8ae3..de7143e2b361a6 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -71,13 +71,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
if (!sdp)
return NULL;
- sb->s_fs_info = sdp;
sdp->sd_vfs = sb;
sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats);
if (!sdp->sd_lkstats) {
kfree(sdp);
return NULL;
}
+ sb->s_fs_info = sdp;
set_bit(SDF_NOJOURNALID, &sdp->sd_flags);
gfs2_tune_init(&sdp->sd_tune);
@@ -1353,6 +1353,9 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type,
struct path path;
int error;
+ if (!dev_name || !*dev_name)
+ return ERR_PTR(-EINVAL);
+
error = kern_path(dev_name, LOOKUP_FOLLOW, &path);
if (error) {
pr_warn("path_lookup on %s returned error %d\n",
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index c134c0462ceecf..ef24894edecc1b 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -732,6 +732,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
spin_lock(&gl->gl_lockref.lock);
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
+ gfs2_rgrp_brelse(rgd);
gfs2_glock_add_to_lru(gl);
gfs2_glock_put(gl);
}
@@ -1139,7 +1140,7 @@ static u32 count_unlinked(struct gfs2_rgrpd *rgd)
* @rgd: the struct gfs2_rgrpd describing the RG to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
- * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
+ * Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
*
* Returns: errno
*/
diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 6fc766df04617a..2e713673df42f1 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -74,9 +74,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
if (!fd->bnode) {
if (!tree->root)
hfs_btree_inc_height(tree);
- fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
- if (IS_ERR(fd->bnode))
- return PTR_ERR(fd->bnode);
+ node = hfs_bnode_find(tree, tree->leaf_head);
+ if (IS_ERR(node))
+ return PTR_ERR(node);
+ fd->bnode = node;
fd->record = -1;
}
new_node = NULL;
@@ -423,6 +424,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c
index 1ab19e660e6909..1ff5774a5382a3 100644
--- a/fs/hfs/btree.c
+++ b/fs/hfs/btree.c
@@ -328,13 +328,14 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
- hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. bmap not found!\n",
node->this);
+ hfs_bnode_put(node);
return;
}
+ hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index 70788e03820ae3..66485d72e97663 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -163,7 +163,7 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx)
rd->file = file;
list_add(&rd->list, &HFS_I(inode)->open_dir_list);
}
- memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key));
+ memcpy(&rd->key, &fd.key->cat, sizeof(struct hfs_cat_key));
out:
hfs_find_exit(&fd);
return err;
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 754fdf8c635638..1002a0c08319b1 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -427,6 +427,10 @@ skip:
if (new_node) {
__be32 cnid;
+ if (!new_node->parent) {
+ hfs_btree_inc_height(tree);
+ new_node->parent = tree->root;
+ }
fd->bnode = hfs_bnode_find(tree, new_node->parent);
/* create index key and entry */
hfs_bnode_read_key(new_node, fd->search_key, 14);
diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c
index 3345c7553edc13..7adc8a327e03ae 100644
--- a/fs/hfsplus/btree.c
+++ b/fs/hfsplus/btree.c
@@ -453,14 +453,15 @@ void hfs_bmap_free(struct hfs_bnode *node)
nidx -= len * 8;
i = node->next;
- hfs_bnode_put(node);
if (!i) {
/* panic */;
pr_crit("unable to free bnode %u. "
"bmap not found!\n",
node->this);
+ hfs_bnode_put(node);
return;
}
+ hfs_bnode_put(node);
node = hfs_bnode_find(tree, i);
if (IS_ERR(node))
return;
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index d0f39dcbb58e86..2b6e2ad57bf965 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -77,13 +77,13 @@ again:
cpu_to_be32(HFSP_HARDLINK_TYPE) &&
entry.file.user_info.fdCreator ==
cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
+ HFSPLUS_SB(sb)->hidden_dir &&
(entry.file.create_date ==
HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
create_date ||
entry.file.create_date ==
HFSPLUS_I(d_inode(sb->s_root))->
- create_date) &&
- HFSPLUS_SB(sb)->hidden_dir) {
+ create_date)) {
struct qstr str;
char name[32];
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index fa40e756c50137..422e00dc5f3bef 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -521,8 +521,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
goto out_put_root;
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
- if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
+ if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+ err = -EINVAL;
goto out_put_root;
+ }
inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 95080cd5009789..f61ecb436267ce 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -118,6 +118,16 @@ static void huge_pagevec_release(struct pagevec *pvec)
pagevec_reinit(pvec);
}
+/*
+ * Mask used when checking the page offset value passed in via system
+ * calls. This value will be converted to a loff_t which is signed.
+ * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
+ * value. The extra bit (- 1 in the shift value) is to take the sign
+ * bit into account.
+ */
+#define PGOFF_LOFFT_MAX \
+ (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
+
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file_inode(file);
@@ -136,17 +146,31 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
vma->vm_ops = &hugetlb_vm_ops;
+ /*
+ * page based offset in vm_pgoff could be sufficiently large to
+ * overflow a loff_t when converted to byte offset. This can
+ * only happen on architectures where sizeof(loff_t) ==
+ * sizeof(unsigned long). So, only check in those instances.
+ */
+ if (sizeof(unsigned long) == sizeof(loff_t)) {
+ if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
+ return -EINVAL;
+ }
+
+ /* must be huge page aligned */
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
+ len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+ /* check for overflow */
+ if (len < vma_len)
+ return -EINVAL;
mutex_lock(&inode->i_mutex);
file_accessed(file);
ret = -ENOMEM;
- len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
-
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
@@ -155,7 +179,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
ret = 0;
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
- inode->i_size = len;
+ i_size_write(inode, len);
out:
mutex_unlock(&inode->i_mutex);
@@ -846,6 +870,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
+
+ /*
+ * page_private is subpool pointer in hugetlb pages. Transfer to
+ * new page. PagePrivate is not associated with page_private for
+ * hugetlb pages and can not be set here as only page_huge_active
+ * pages can be migrated.
+ */
+ if (page_private(page)) {
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ }
+
migrate_page_copy(newpage, page);
return MIGRATEPAGE_SUCCESS;
diff --git a/fs/inode.c b/fs/inode.c
index b95615f3fc5070..b5c3a6473aaa10 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
inode->i_rdev = 0;
inode->dirtied_when = 0;
+#ifdef CONFIG_CGROUP_WRITEBACK
+ inode->i_wb_frn_winner = 0;
+ inode->i_wb_frn_avg_time = 0;
+ inode->i_wb_frn_history = 0;
+#endif
+
if (security_inode_alloc(inode))
goto out;
spin_lock_init(&inode->i_lock);
@@ -1937,8 +1943,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
inode->i_uid = current_fsuid();
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
+
+ /* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
+ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+ mode &= ~S_ISGID;
} else
inode->i_gid = current_fsgid();
inode->i_mode = mode;
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 684996c8a3a4a2..4d5a5a4cc017cd 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -254,8 +254,8 @@ restart:
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
@@ -336,8 +336,8 @@ restart2:
jh = transaction->t_checkpoint_io_list;
bh = jh2bh(jh);
if (buffer_locked(bh)) {
- spin_unlock(&journal->j_list_lock);
get_bh(bh);
+ spin_unlock(&journal->j_list_lock);
wait_on_buffer(bh);
/* the journal_head may have gone by now */
BUFFER_TRACE(bh, "brelse");
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index f3a31f55f37215..c34433432d4711 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1215,11 +1215,12 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
char *committed_data = NULL;
- JBUFFER_TRACE(jh, "entry");
if (jbd2_write_access_granted(handle, bh, true))
return 0;
jh = jbd2_journal_add_journal_head(bh);
+ JBUFFER_TRACE(jh, "entry");
+
/*
* Do this first --- it can drop the journal lock, so we want to
* make sure that obtaining the committed_data is done
@@ -1336,15 +1337,17 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (is_handle_aborted(handle))
return -EROFS;
- if (!buffer_jbd(bh)) {
- ret = -EUCLEAN;
- goto out;
- }
+ if (!buffer_jbd(bh))
+ return -EUCLEAN;
+
/*
* We don't grab jh reference here since the buffer must be part
* of the running transaction.
*/
jh = bh2jh(bh);
+ jbd_debug(5, "journal_head %p\n", jh);
+ JBUFFER_TRACE(jh, "entry");
+
/*
* This and the following assertions are unreliable since we may see jh
* in inconsistent state unless we grab bh_state lock. But this is
@@ -1363,6 +1366,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
jbd_lock_bh_state(bh);
+ if (jh->b_transaction == transaction &&
+ jh->b_jlist != BJ_Metadata)
+ pr_err("JBD2: assertion failure: h_type=%u "
+ "h_line_no=%u block_no=%llu jlist=%u\n",
+ handle->h_type, handle->h_line_no,
+ (unsigned long long) bh->b_blocknr,
+ jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata);
jbd_unlock_bh_state(bh);
@@ -1371,9 +1381,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
}
journal = transaction->t_journal;
- jbd_debug(5, "journal_head %p\n", jh);
- JBUFFER_TRACE(jh, "entry");
-
jbd_lock_bh_state(bh);
if (jh->b_modified == 0) {
@@ -1382,11 +1389,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done
* once a transaction -bzzz
*/
- jh->b_modified = 1;
if (handle->h_buffer_credits <= 0) {
ret = -ENOSPC;
goto out_unlock_bh;
}
+ jh->b_modified = 1;
handle->h_buffer_credits--;
}
@@ -1571,14 +1578,21 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
/* However, if the buffer is still owned by a prior
* (committing) transaction, we can't drop it yet... */
JBUFFER_TRACE(jh, "belongs to older transaction");
- /* ... but we CAN drop it from the new transaction if we
- * have also modified it since the original commit. */
+ /* ... but we CAN drop it from the new transaction through
+ * marking the buffer as freed and set j_next_transaction to
+ * the new transaction, so that not only the commit code
+ * knows it should clear dirty bits when it is done with the
+ * buffer, but also the buffer can be checkpointed only
+ * after the new transaction commits. */
- if (jh->b_next_transaction) {
- J_ASSERT(jh->b_next_transaction == transaction);
+ set_buffer_freed(bh);
+
+ if (!jh->b_next_transaction) {
spin_lock(&journal->j_list_lock);
- jh->b_next_transaction = NULL;
+ jh->b_next_transaction = transaction;
spin_unlock(&journal->j_list_lock);
+ } else {
+ J_ASSERT(jh->b_next_transaction == transaction);
/*
* only drop a reference if this transaction modified
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 600da1a4df2977..023e7f32ee1be0 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -101,7 +101,8 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
- cancel_delayed_work_sync(&c->wbuf_dwork);
+ if (jffs2_is_writebuffered(c))
+ cancel_delayed_work_sync(&c->wbuf_dwork);
#endif
mutex_lock(&c->alloc_sem);
@@ -285,10 +286,8 @@ static int jffs2_fill_super(struct super_block *sb, void *data, int silent)
sb->s_fs_info = c;
ret = jffs2_parse_options(c, data);
- if (ret) {
- kfree(c);
+ if (ret)
return -EINVAL;
- }
/* Initialize JFFS2 superblock locks, the further initialization will
* be done later */
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 4c2c03663533dc..8e1427762eeb82 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -1004,12 +1004,14 @@ ssize_t jffs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
rc = xhandle->list(xhandle, dentry, buffer + len,
size - len, xd->xname,
xd->name_len);
+ if (rc > size - len) {
+ rc = -ERANGE;
+ goto out;
+ }
} else {
rc = xhandle->list(xhandle, dentry, NULL, 0,
xd->xname, xd->name_len);
}
- if (rc < 0)
- goto out;
len += rc;
}
rc = len;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 48b15a6e555865..40a26a542341de 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -493,15 +493,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
- * contiguous buffer to work with
+ * contiguous buffer to work with. Make the buffer large
+ * enough to make use of the whole extent.
*/
- ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+ ea_buf->max_size = (size + sb->s_blocksize - 1) &
+ ~(sb->s_blocksize - 1);
+
+ ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
- ea_buf->max_size = (size + sb->s_blocksize - 1) &
- ~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index db272528ab5bb0..b3b293e2c09900 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -88,7 +88,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
int slen = strlen(kn->name);
len -= slen;
- strncpy(s + len, kn->name, slen);
+ memcpy(s + len, kn->name, slen);
if (len)
s[--len] = '/';
diff --git a/fs/lockd/host.c b/fs/lockd/host.c
index d716c9993a261c..c7eb47f2fb6c31 100644
--- a/fs/lockd/host.c
+++ b/fs/lockd/host.c
@@ -340,7 +340,7 @@ struct nlm_host *nlmsvc_lookup_host(const struct svc_rqst *rqstp,
};
struct lockd_net *ln = net_generic(net, lockd_net_id);
- dprintk("lockd: %s(host='%*s', vers=%u, proto=%s)\n", __func__,
+ dprintk("lockd: %s(host='%.*s', vers=%u, proto=%s)\n", __func__,
(int)hostname_len, hostname, rqstp->rq_vers,
(rqstp->rq_prot == IPPROTO_UDP ? "udp" : "tcp"));
diff --git a/fs/namei.c b/fs/namei.c
index 2fa1c797d3a601..13d9c89429f8c6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -869,6 +869,8 @@ static inline void put_link(struct nameidata *nd)
int sysctl_protected_symlinks __read_mostly = 1;
int sysctl_protected_hardlinks __read_mostly = 1;
+int sysctl_protected_fifos __read_mostly;
+int sysctl_protected_regular __read_mostly;
/**
* nameidata_set_temporary - Used by Chromium OS LSM to check
@@ -1035,6 +1037,45 @@ static int may_linkat(struct path *link)
return -EPERM;
}
+/**
+ * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory
+ * should be allowed, or not, on files that already
+ * exist.
+ * @dir: the sticky parent directory
+ * @inode: the inode of the file to open
+ *
+ * Block an O_CREAT open of a FIFO (or a regular file) when:
+ * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled
+ * - the file already exists
+ * - we are in a sticky directory
+ * - we don't own the file
+ * - the owner of the directory doesn't own the file
+ * - the directory is world writable
+ * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2
+ * the directory doesn't have to be world writable: being group writable will
+ * be enough.
+ *
+ * Returns 0 if the open is allowed, -ve on error.
+ */
+static int may_create_in_sticky(struct dentry * const dir,
+ struct inode * const inode)
+{
+ if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) ||
+ (!sysctl_protected_regular && S_ISREG(inode->i_mode)) ||
+ likely(!(dir->d_inode->i_mode & S_ISVTX)) ||
+ uid_eq(inode->i_uid, dir->d_inode->i_uid) ||
+ uid_eq(current_fsuid(), inode->i_uid))
+ return 0;
+
+ if (likely(dir->d_inode->i_mode & 0002) ||
+ (dir->d_inode->i_mode & 0020 &&
+ ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) ||
+ (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) {
+ return -EACCES;
+ }
+ return 0;
+}
+
static __always_inline
const char *get_link(struct nameidata *nd)
{
@@ -3219,9 +3260,15 @@ finish_open:
error = -ELOOP;
goto out;
}
- error = -EISDIR;
- if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
- goto out;
+ if (open_flag & O_CREAT) {
+ error = -EISDIR;
+ if (d_is_dir(nd->path.dentry))
+ goto out;
+ error = may_create_in_sticky(dir,
+ d_backing_inode(nd->path.dentry));
+ if (unlikely(error))
+ goto out;
+ }
error = -ENOTDIR;
if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
goto out;
diff --git a/fs/namespace.c b/fs/namespace.c
index 41c3778a93b07f..b4b34c4d3f7e0b 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -603,12 +603,21 @@ int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
return 0;
mnt = real_mount(bastard);
mnt_add_count(mnt, 1);
+ smp_mb(); // see mntput_no_expire()
if (likely(!read_seqretry(&mount_lock, seq)))
return 0;
if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
mnt_add_count(mnt, -1);
return 1;
}
+ lock_mount_hash();
+ if (unlikely(bastard->mnt_flags & MNT_DOOMED)) {
+ mnt_add_count(mnt, -1);
+ unlock_mount_hash();
+ return 1;
+ }
+ unlock_mount_hash();
+ /* caller will mntput() */
return -1;
}
@@ -1124,12 +1133,27 @@ static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
- mnt_add_count(mnt, -1);
- if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */
+ if (likely(READ_ONCE(mnt->mnt_ns))) {
+ /*
+ * Since we don't do lock_mount_hash() here,
+ * ->mnt_ns can change under us. However, if it's
+ * non-NULL, then there's a reference that won't
+ * be dropped until after an RCU delay done after
+ * turning ->mnt_ns NULL. So if we observe it
+ * non-NULL under rcu_read_lock(), the reference
+ * we are dropping is not the final one.
+ */
+ mnt_add_count(mnt, -1);
rcu_read_unlock();
return;
}
lock_mount_hash();
+ /*
+ * make sure that if __legitimize_mnt() has not seen us grab
+ * mount_lock, we'll see their refcount increment here.
+ */
+ smp_mb();
+ mnt_add_count(mnt, -1);
if (mnt_get_count(mnt)) {
rcu_read_unlock();
unlock_mount_hash();
@@ -1560,8 +1584,13 @@ static int do_umount(struct mount *mnt, int flags)
namespace_lock();
lock_mount_hash();
- event++;
+ /* Recheck MNT_LOCKED with the locks held */
+ retval = -EINVAL;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto out;
+
+ event++;
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
@@ -1575,6 +1604,7 @@ static int do_umount(struct mount *mnt, int flags)
retval = 0;
}
}
+out:
unlock_mount_hash();
namespace_unlock();
return retval;
@@ -1657,7 +1687,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
goto dput_and_out;
if (!check_mnt(mnt))
goto dput_and_out;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
goto dput_and_out;
retval = -EPERM;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
@@ -1735,8 +1765,14 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
- s = skip_mnt_tree(s);
- continue;
+ if (s->mnt.mnt_flags & MNT_LOCKED) {
+ /* Both unbindable and locked. */
+ q = ERR_PTR(-EPERM);
+ goto out;
+ } else {
+ s = skip_mnt_tree(s);
+ continue;
+ }
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
@@ -1789,7 +1825,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ umount_tree(real_mount(mnt), 0);
unlock_mount_hash();
namespace_unlock();
}
diff --git a/fs/ncpfs/ioctl.c b/fs/ncpfs/ioctl.c
index 0a3f9b59460234..37779ed3f79009 100644
--- a/fs/ncpfs/ioctl.c
+++ b/fs/ncpfs/ioctl.c
@@ -233,7 +233,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
len = strlen(server->nls_vol->charset);
if (len > NCP_IOCSNAME_LEN)
len = NCP_IOCSNAME_LEN;
- strncpy(user.codepage, server->nls_vol->charset, len);
+ strscpy(user.codepage, server->nls_vol->charset, NCP_IOCSNAME_LEN);
user.codepage[len] = 0;
}
@@ -243,7 +243,7 @@ ncp_get_charsets(struct ncp_server* server, struct ncp_nls_ioctl __user *arg)
len = strlen(server->nls_io->charset);
if (len > NCP_IOCSNAME_LEN)
len = NCP_IOCSNAME_LEN;
- strncpy(user.iocharset, server->nls_io->charset, len);
+ strscpy(user.iocharset, server->nls_io->charset, NCP_IOCSNAME_LEN);
user.iocharset[len] = 0;
}
mutex_unlock(&server->root_setup_lock);
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c
index 33b873b259a8dc..11db4c0c5d35bd 100644
--- a/fs/ncpfs/mmap.c
+++ b/fs/ncpfs/mmap.c
@@ -89,8 +89,8 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
* fetches from the network, here the analogue of disk.
* -- nyc
*/
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
+ count_vm_event(PGMAJFAULT_F);
+ mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT_F);
return VM_FAULT_MAJOR;
}
diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c
index a861bbdfe5778e..fa8b484d035df8 100644
--- a/fs/nfs/blocklayout/dev.c
+++ b/fs/nfs/blocklayout/dev.c
@@ -162,7 +162,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset,
chunk = div_u64(offset, dev->chunk_size);
div_u64_rem(chunk, dev->nr_children, &chunk_idx);
- if (chunk_idx > dev->nr_children) {
+ if (chunk_idx >= dev->nr_children) {
dprintk("%s: invalid chunk idx %d (%lld/%lld)\n",
__func__, chunk_idx, offset, dev->chunk_size);
/* error, should not happen */
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index e2e857affbf2a6..0647cb1ede5604 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -911,16 +911,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r
if (hdr_arg.minorversion == 0) {
cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
+ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
goto out_invalidcred;
+ }
}
cps.minorversion = hdr_arg.minorversion;
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
+ if (cps.clp)
+ nfs_put_client(cps.clp);
return rpc_system_err;
-
+ }
while (status == 0 && nops != hdr_arg.nops) {
status = process_op(nops, rqstp, &xdr_in,
argp, &xdr_out, resp, &cps);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 211440722e24e6..88cb8e0d601493 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -670,6 +670,10 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
req = nfs_list_entry(reqs.next);
nfs_direct_setup_mirroring(dreq, &desc, req);
+ if (desc.pg_error < 0) {
+ list_splice_init(&reqs, &failed);
+ goto out_failed;
+ }
list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
if (!nfs_pageio_add_request(&desc, req)) {
@@ -677,13 +681,17 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
nfs_list_add_request(req, &failed);
spin_lock(cinfo.lock);
dreq->flags = 0;
- dreq->error = -EIO;
+ if (desc.pg_error < 0)
+ dreq->error = desc.pg_error;
+ else
+ dreq->error = -EIO;
spin_unlock(cinfo.lock);
}
nfs_release_request(req);
}
nfs_pageio_complete(&desc);
+out_failed:
while (!list_empty(&failed)) {
req = nfs_list_entry(failed.next);
nfs_list_remove_request(req);
@@ -898,6 +906,11 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
}
nfs_direct_setup_mirroring(dreq, &desc, req);
+ if (desc.pg_error < 0) {
+ nfs_free_request(req);
+ result = desc.pg_error;
+ break;
+ }
nfs_lock_request(req);
req->wb_index = pos >> PAGE_SHIFT;
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index fd8da630fd22ed..8e268965c96d92 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -882,13 +882,19 @@ static void
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_READ,
GFP_KERNEL);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
+ }
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_read_mds(pgio);
@@ -901,13 +907,20 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_commit_info cinfo;
int status;
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
GFP_NOFS);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
+ }
+
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
goto out_mds;
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c8e90152b61b27..6506775575aa68 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -786,13 +786,19 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio,
int ds_idx;
/* Use full layout for now */
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_READ,
GFP_KERNEL);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
+ }
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
goto out_mds;
@@ -826,13 +832,19 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
int i;
int status;
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
GFP_NOFS);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
+ }
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
goto out_mds;
@@ -868,18 +880,25 @@ static unsigned int
ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
- if (!pgio->pg_lseg)
+ if (!pgio->pg_lseg) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
0,
NFS4_MAX_UINT64,
IOMODE_RW,
GFP_NOFS);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ goto out;
+ }
+ }
if (pgio->pg_lseg)
return FF_LAYOUT_MIRROR_COUNT(pgio->pg_lseg);
/* no lseg means that pnfs is not in use, so no mirroring here */
nfs_pageio_reset_write_mds(pgio);
+out:
return 1;
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 63498e1a542ae2..ae91d1e450be73 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -879,10 +879,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
/*
* Session has been established, and the client marked ready.
- * Set the mount rsize and wsize with negotiated fore channel
- * attributes which will be bound checked in nfs_server_set_fsinfo.
+ * Limit the mount rsize, wsize and dtsize using negotiated fore
+ * channel attributes.
*/
-static void nfs4_session_set_rwsize(struct nfs_server *server)
+static void nfs4_session_limit_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
struct nfs4_session *sess;
@@ -895,9 +895,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
- if (!server->rsize || server->rsize > server_resp_sz)
+ if (server->dtsize > server_resp_sz)
+ server->dtsize = server_resp_sz;
+ if (server->rsize > server_resp_sz)
server->rsize = server_resp_sz;
- if (!server->wsize || server->wsize > server_rqst_sz)
+ if (server->wsize > server_rqst_sz)
server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
@@ -944,12 +946,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
(unsigned long long) server->fsid.minor);
nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
- nfs4_session_set_rwsize(server);
-
error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto out;
+ nfs4_session_limit_rwsize(server);
+
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
server->namelen = NFS4_MAXNAMLEN;
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
index 1ee62e62ea7667..c99a887100dba1 100644
--- a/fs/nfs/nfs4idmap.c
+++ b/fs/nfs/nfs4idmap.c
@@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
int id_len;
ssize_t ret;
- id_len = snprintf(id_str, sizeof(id_str), "%u", id);
+ id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
if (ret < 0)
return -EINVAL;
@@ -626,7 +626,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
if (strcmp(upcall->im_name, im->im_name) != 0)
break;
/* Note: here we store the NUL terminator too */
- len = sprintf(id_str, "%d", im->im_id) + 1;
+ len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
+ sizeof(id_str));
ret = nfs_idmap_instantiate(key, authkey, id_str, len);
break;
case IDMAP_CONV_IDTONAME:
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 4bdc2fc86280c3..8a2077408ab063 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -872,6 +872,9 @@ static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
+ if (pgio->pg_error < 0)
+ return pgio->pg_error;
+
if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
return -EINVAL;
@@ -980,6 +983,8 @@ static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
} else {
if (desc->pg_ops->pg_init)
desc->pg_ops->pg_init(desc, req);
+ if (desc->pg_error < 0)
+ return 0;
mirror->pg_base = req->wb_pgbase;
}
if (!nfs_can_coalesce_requests(prev, req, desc))
@@ -1102,7 +1107,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
struct nfs_page *req;
req = list_first_entry(&head, struct nfs_page, wb_list);
- nfs_list_remove_request(req);
if (__nfs_pageio_add_request(desc, req))
continue;
if (desc->pg_error < 0) {
@@ -1145,6 +1149,8 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
bytes = req->wb_bytes;
nfs_pageio_setup_mirroring(desc, req);
+ if (desc->pg_error < 0)
+ return 0;
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
if (midx) {
@@ -1196,7 +1202,7 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
desc->pg_mirror_idx = mirror_idx;
for (;;) {
nfs_pageio_doio(desc);
- if (!mirror->pg_recoalesce)
+ if (desc->pg_error < 0 || !mirror->pg_recoalesce)
break;
if (!nfs_do_recoalesce(desc))
break;
@@ -1230,7 +1236,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
nfs_pageio_complete(desc);
if (!list_empty(&failed)) {
list_move(&failed, &hdr->pages);
- return -EIO;
+ return desc->pg_error < 0 ? desc->pg_error : -EIO;
}
return 0;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index c8e75e5e6a67e3..d34fb0feb5c261 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -909,14 +909,15 @@ send_layoutget(struct pnfs_layout_hdr *lo,
if (IS_ERR(lseg)) {
switch (PTR_ERR(lseg)) {
- case -ENOMEM:
case -ERESTARTSYS:
+ case -EIO:
+ case -ENOSPC:
+ case -EROFS:
+ case -E2BIG:
break;
default:
- /* remember that LAYOUTGET failed and suspend trying */
- pnfs_layout_io_set_failed(lo, range->iomode);
+ return NULL;
}
- return NULL;
} else
pnfs_layout_clear_fail_bit(lo,
pnfs_iomode_to_fail_bit(range->iomode));
@@ -1625,7 +1626,7 @@ out:
"(%s, offset: %llu, length: %llu)\n",
__func__, ino->i_sb->s_id,
(unsigned long long)NFS_FILEID(ino),
- lseg == NULL ? "not found" : "found",
+ IS_ERR_OR_NULL(lseg) ? "not found" : "found",
iomode==IOMODE_RW ? "read/write" : "read-only",
(unsigned long long)pos,
(unsigned long long)count);
@@ -1804,6 +1805,11 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
rd_size,
IOMODE_READ,
GFP_KERNEL);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
}
/* If no lseg, fall back to read through mds */
if (pgio->pg_lseg == NULL)
@@ -1816,13 +1822,19 @@ void
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req, u64 wb_size)
{
- if (pgio->pg_lseg == NULL)
+ if (pgio->pg_lseg == NULL) {
pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
req->wb_context,
req_offset(req),
wb_size,
IOMODE_RW,
GFP_NOFS);
+ if (IS_ERR(pgio->pg_lseg)) {
+ pgio->pg_error = PTR_ERR(pgio->pg_lseg);
+ pgio->pg_lseg = NULL;
+ return;
+ }
+ }
/* If no lseg, fall back to write through mds */
if (pgio->pg_lseg == NULL)
nfs_pageio_reset_write_mds(pgio);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0a5e33f33b5c83..0bb580174cb3d4 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -115,7 +115,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
pgm = &pgio.pg_mirrors[0];
NFS_I(inode)->read_io += pgm->pg_bytes_written;
- return 0;
+ return pgio.pg_error < 0 ? pgio.pg_error : 0;
}
static void nfs_readpage_release(struct nfs_page *req)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 62f358f6776464..9b42139a479b77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1877,6 +1877,11 @@ static int nfs_parse_devname(const char *dev_name,
size_t len;
char *end;
+ if (unlikely(!dev_name || !*dev_name)) {
+ dfprintk(MOUNT, "NFS: device name not specified\n");
+ return -EINVAL;
+ }
+
/* Is the host name protected with square brakcets? */
if (*dev_name == '[') {
end = strchr(++dev_name, ']');
@@ -2376,8 +2381,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
goto Ebusy;
if (a->acdirmax != b->acdirmax)
goto Ebusy;
- if (b->auth_info.flavor_len > 0 &&
- clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
goto Ebusy;
return 1;
Ebusy:
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 7b755b7f785ceb..91146f0257699e 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -430,8 +430,19 @@ nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
&resp->common, nfs3svc_encode_entry);
memcpy(resp->verf, argp->verf, 8);
resp->count = resp->buffer - argp->buffer;
- if (resp->offset)
- xdr_encode_hyper(resp->offset, argp->cookie);
+ if (resp->offset) {
+ loff_t offset = argp->cookie;
+
+ if (unlikely(resp->offset1)) {
+ /* we ended up with offset on a page boundary */
+ *resp->offset = htonl(offset >> 32);
+ *resp->offset1 = htonl(offset & 0xffffffff);
+ resp->offset1 = NULL;
+ } else {
+ xdr_encode_hyper(resp->offset, offset);
+ }
+ resp->offset = NULL;
+ }
RETURN_STATUS(nfserr);
}
@@ -499,6 +510,7 @@ nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp,
} else {
xdr_encode_hyper(resp->offset, offset);
}
+ resp->offset = NULL;
}
RETURN_STATUS(nfserr);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 7162ab7bc093e9..d4fa7fbc37dce9 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -898,6 +898,7 @@ encode_entry(struct readdir_cd *ccd, const char *name, int namlen,
} else {
xdr_encode_hyper(cd->offset, offset64);
}
+ cd->offset = NULL;
}
/*
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index bfbee8ddf97805..c67064d94096b7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1632,6 +1632,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp,
if (status) {
op = &args->ops[0];
op->status = status;
+ resp->opcnt = 1;
goto encode_op;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 544672b440dea0..ee0da259a3d3b4 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1538,6 +1538,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp,
gdev->gd_maxcount = be32_to_cpup(p++);
num = be32_to_cpup(p++);
if (num) {
+ if (num > 1000)
+ goto xdr_error;
READ_BUF(4 * num);
gdev->gd_notify_types = be32_to_cpup(p++);
for (i = 1; i < num; i++) {
@@ -3595,7 +3597,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
nfserr = nfserr_resource;
goto err_no_verf;
}
- maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
+ maxcount = svc_max_payload(resp->rqstp);
+ maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
/*
* Note the rfc defines rd_maxcount as the size of the
* READDIR4resok structure, which includes the verifier above
@@ -3609,7 +3612,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
/* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
if (!readdir->rd_dircount)
- readdir->rd_dircount = INT_MAX;
+ readdir->rd_dircount = svc_max_payload(resp->rqstp);
readdir->xdr = xdr;
readdir->rd_maxcount = maxcount;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 9690cb4dd5887b..0cd57db5c5af5e 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1106,6 +1106,8 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
case 'Y':
case 'y':
case '1':
+ if (!nn->nfsd_serv)
+ return -EBUSY;
nfsd4_end_grace(nn);
break;
default:
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index fe50ded1b4ce76..9ee8bcfbf00f34 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -146,7 +146,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
BUG();
}
- clear_buffer_uptodate(bh);
get_bh(bh); /* for end_buffer_read_sync() */
bh->b_end_io = end_buffer_read_sync;
submit_bh(READ, bh);
@@ -300,7 +299,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
continue;
}
- clear_buffer_uptodate(bh);
get_bh(bh); /* for end_buffer_read_sync() */
if (validate)
set_buffer_needs_validate(bh);
@@ -336,6 +334,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 72afdca3cea70c..3c45a9301a0969 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -40,6 +40,9 @@ char *o2nm_fence_method_desc[O2NM_FENCE_METHODS] = {
"panic", /* O2NM_FENCE_PANIC */
};
+static inline void o2nm_lock_subsystem(void);
+static inline void o2nm_unlock_subsystem(void);
+
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
struct o2nm_node *node = NULL;
@@ -181,7 +184,10 @@ static struct o2nm_cluster *to_o2nm_cluster_from_node(struct o2nm_node *node)
{
/* through the first node_set .parent
* mycluster/nodes/mynode == o2nm_cluster->o2nm_node_group->o2nm_node */
- return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ if (node->nd_item.ci_parent)
+ return to_o2nm_cluster(node->nd_item.ci_parent->ci_parent);
+ else
+ return NULL;
}
enum {
@@ -194,7 +200,7 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
int ret = 0;
@@ -214,6 +220,13 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
write_lock(&cluster->cl_nodes_lock);
if (cluster->cl_nodes[tmp])
ret = -EEXIST;
@@ -226,6 +239,8 @@ static ssize_t o2nm_node_num_store(struct config_item *item, const char *page,
set_bit(tmp, cluster->cl_nodes_bitmap);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (ret)
return ret;
@@ -269,7 +284,7 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
int ret, i;
struct rb_node **p, *parent;
unsigned int octets[4];
@@ -286,6 +301,13 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
}
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ o2nm_unlock_subsystem();
+ return -EINVAL;
+ }
+
ret = 0;
write_lock(&cluster->cl_nodes_lock);
if (o2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
@@ -298,6 +320,8 @@ static ssize_t o2nm_node_ipv4_address_store(struct config_item *item,
rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
}
write_unlock(&cluster->cl_nodes_lock);
+ o2nm_unlock_subsystem();
+
if (ret)
return ret;
@@ -315,7 +339,7 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
size_t count)
{
struct o2nm_node *node = to_o2nm_node(item);
- struct o2nm_cluster *cluster = to_o2nm_cluster_from_node(node);
+ struct o2nm_cluster *cluster;
unsigned long tmp;
char *p = (char *)page;
ssize_t ret;
@@ -333,17 +357,26 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
!test_bit(O2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
return -EINVAL; /* XXX */
+ o2nm_lock_subsystem();
+ cluster = to_o2nm_cluster_from_node(node);
+ if (!cluster) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* the only failure case is trying to set a new local node
* when a different one is already set */
if (tmp && tmp == cluster->cl_has_local &&
- cluster->cl_local_node != node->nd_num)
- return -EBUSY;
+ cluster->cl_local_node != node->nd_num) {
+ ret = -EBUSY;
+ goto out;
+ }
/* bring up the rx thread if we're setting the new local node. */
if (tmp && !cluster->cl_has_local) {
ret = o2net_start_listening(node);
if (ret)
- return ret;
+ goto out;
}
if (!tmp && cluster->cl_has_local &&
@@ -358,7 +391,11 @@ static ssize_t o2nm_node_local_store(struct config_item *item, const char *page,
cluster->cl_local_node = node->nd_num;
}
- return count;
+ ret = count;
+
+out:
+ o2nm_unlock_subsystem();
+ return ret;
}
CONFIGFS_ATTR(o2nm_node_, num);
@@ -750,6 +787,16 @@ static struct o2nm_cluster_group o2nm_cluster_group = {
},
};
+static inline void o2nm_lock_subsystem(void)
+{
+ mutex_lock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
+static inline void o2nm_unlock_subsystem(void)
+{
+ mutex_unlock(&o2nm_cluster_group.cs_subsys.su_mutex);
+}
+
int o2nm_depend_item(struct config_item *item)
{
return configfs_depend_item(&o2nm_cluster_group.cs_subsys, item);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index ffecf89c8c1cd2..49af618e410d27 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -1896,8 +1896,7 @@ static int ocfs2_dir_foreach_blk_el(struct inode *inode,
/* On error, skip the f_pos to the
next block. */
ctx->pos = (ctx->pos | (sb->s_blocksize - 1)) + 1;
- brelse(bh);
- continue;
+ break;
}
if (le64_to_cpu(de->inode)) {
unsigned char d_type = DT_UNKNOWN;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 4e2162b355db2b..0cefb036a17ec4 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -589,9 +589,9 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
res->last_used = 0;
- spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
- spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 827fc9809bc271..3494e220b5106d 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -125,10 +125,10 @@ check_err:
check_gen:
if (handle->ih_generation != inode->i_generation) {
- iput(inode);
trace_ocfs2_get_dentry_generation((unsigned long long)blkno,
handle->ih_generation,
inode->i_generation);
+ iput(inode);
result = ERR_PTR(-ESTALE);
goto bail;
}
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index 0a4457fb0711b7..85111d740c9d85 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -345,13 +345,18 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
if (num_used
|| alloc->id1.bitmap1.i_used
|| alloc->id1.bitmap1.i_total
- || la->la_bm_off)
- mlog(ML_ERROR, "Local alloc hasn't been recovered!\n"
+ || la->la_bm_off) {
+ mlog(ML_ERROR, "inconsistent detected, clean journal with"
+ " unrecovered local alloc, please run fsck.ocfs2!\n"
"found = %u, set = %u, taken = %u, off = %u\n",
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
le32_to_cpu(alloc->id1.bitmap1.i_total),
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
+ status = -EINVAL;
+ goto bail;
+ }
+
osb->local_alloc_bh = alloc_bh;
osb->local_alloc_state = OCFS2_LA_ENABLED;
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index 124471d26a73f4..c1a83c58456ef0 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -156,18 +156,14 @@ out:
}
/*
- * lock allocators, and reserving appropriate number of bits for
- * meta blocks and data clusters.
- *
- * in some cases, we don't need to reserve clusters, just let data_ac
- * be NULL.
+ * lock allocator, and reserve appropriate number of bits for
+ * meta blocks.
*/
-static int ocfs2_lock_allocators_move_extents(struct inode *inode,
+static int ocfs2_lock_meta_allocator_move_extents(struct inode *inode,
struct ocfs2_extent_tree *et,
u32 clusters_to_move,
u32 extents_to_split,
struct ocfs2_alloc_context **meta_ac,
- struct ocfs2_alloc_context **data_ac,
int extra_blocks,
int *credits)
{
@@ -192,13 +188,6 @@ static int ocfs2_lock_allocators_move_extents(struct inode *inode,
goto out;
}
- if (data_ac) {
- ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
- }
*credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el);
@@ -260,10 +249,10 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
- &context->meta_ac,
- &context->data_ac,
- extra_blocks, &credits);
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+ *len, 1,
+ &context->meta_ac,
+ extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
@@ -286,6 +275,21 @@ static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
}
}
+ /*
+ * Make sure ocfs2_reserve_cluster is called after
+ * __ocfs2_flush_truncate_log, otherwise, dead lock may happen.
+ *
+ * If ocfs2_reserve_cluster is called
+ * before __ocfs2_flush_truncate_log, dead lock on global bitmap
+ * may happen.
+ *
+ */
+ ret = ocfs2_reserve_clusters(osb, *len, &context->data_ac);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_unlock_mutex;
+ }
+
handle = ocfs2_start_trans(osb, credits);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
@@ -606,9 +610,10 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
}
}
- ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
- &context->meta_ac,
- NULL, extra_blocks, &credits);
+ ret = ocfs2_lock_meta_allocator_move_extents(inode, &context->et,
+ len, 1,
+ &context->meta_ac,
+ extra_blocks, &credits);
if (ret) {
mlog_errno(ret);
goto out;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 63a0d0ba36de11..64c5386d0c1bfb 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -317,7 +317,6 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
struct dentry *upperdir;
struct dentry *upperdentry;
const struct cred *old_cred;
- struct cred *override_cred;
char *link = NULL;
if (WARN_ON(!workdir))
@@ -336,28 +335,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
return PTR_ERR(link);
}
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_free_link;
-
- override_cred->fsuid = stat->uid;
- override_cred->fsgid = stat->gid;
- /*
- * CAP_SYS_ADMIN for copying up extended attributes
- * CAP_DAC_OVERRIDE for create
- * CAP_FOWNER for chmod, timestamp update
- * CAP_FSETID for chmod
- * CAP_CHOWN for chown
- * CAP_MKNOD for mknod
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- cap_raise(override_cred->cap_effective, CAP_MKNOD);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = -EIO;
if (lock_rename(workdir, upperdir) != NULL) {
@@ -380,9 +358,7 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
out_unlock:
unlock_rename(workdir, upperdir);
revert_creds(old_cred);
- put_cred(override_cred);
-out_free_link:
if (link)
free_page((unsigned long) link);
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 327177df03a5ca..f8aa5427212132 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -408,28 +408,13 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
} else {
const struct cred *old_cred;
- struct cred *override_cred;
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_iput;
-
- /*
- * CAP_SYS_ADMIN for setting opaque xattr
- * CAP_DAC_OVERRIDE for create in workdir, rename
- * CAP_FOWNER for removing whiteout from sticky dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_create_over_whiteout(dentry, inode, &stat, link,
hardlink);
revert_creds(old_cred);
- put_cred(override_cred);
}
if (!err)
@@ -659,32 +644,11 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
if (OVL_TYPE_PURE_UPPER(type)) {
err = ovl_remove_upper(dentry, is_dir);
} else {
- const struct cred *old_cred;
- struct cred *override_cred;
-
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_drop_write;
-
- /*
- * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
- * CAP_DAC_OVERRIDE for create in workdir, rename
- * CAP_FOWNER for removing whiteout from sticky dir
- * CAP_FSETID for chmod of opaque dir
- * CAP_CHOWN for chown of opaque dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- old_cred = override_creds(override_cred);
+ const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_remove_and_whiteout(dentry, is_dir);
revert_creds(old_cred);
- put_cred(override_cred);
}
out_drop_write:
ovl_drop_write(dentry);
@@ -723,7 +687,6 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
bool new_is_dir = false;
struct dentry *opaquedir = NULL;
const struct cred *old_cred = NULL;
- struct cred *override_cred = NULL;
err = -EINVAL;
if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
@@ -792,26 +755,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
- if (old_opaque || new_opaque) {
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (!override_cred)
- goto out_drop_write;
-
- /*
- * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
- * CAP_DAC_OVERRIDE for create in workdir
- * CAP_FOWNER for removing whiteout from sticky dir
- * CAP_FSETID for chmod of opaque dir
- * CAP_CHOWN for chown of opaque dir
- */
- cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- cap_raise(override_cred->cap_effective, CAP_FOWNER);
- cap_raise(override_cred->cap_effective, CAP_FSETID);
- cap_raise(override_cred->cap_effective, CAP_CHOWN);
- old_cred = override_creds(override_cred);
- }
+ if (old_opaque || new_opaque)
+ old_cred = ovl_override_creds(old->d_sb);
if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
opaquedir = ovl_check_empty_and_clear(new);
@@ -942,10 +887,8 @@ out_dput_old:
out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
- if (old_opaque || new_opaque) {
+ if (old_opaque || new_opaque)
revert_creds(old_cred);
- put_cred(override_cred);
- }
out_drop_write:
ovl_drop_write(old);
out:
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index c319d5eaabcfaa..27a42975d7cd3e 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -150,6 +150,7 @@ void ovl_drop_write(struct dentry *dentry);
bool ovl_dentry_is_opaque(struct dentry *dentry);
void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
bool ovl_is_whiteout(struct dentry *dentry);
+const struct cred *ovl_override_creds(struct super_block *sb);
void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
@@ -163,6 +164,9 @@ extern const struct file_operations ovl_dir_operations;
int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
void ovl_cache_free(struct list_head *list);
+int ovl_check_d_type_supported(struct path *realpath);
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level);
/* inode.c */
int ovl_setattr(struct dentry *dentry, struct iattr *attr);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 299a6e1d6b779e..da999e73c97a08 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -36,13 +36,15 @@ struct ovl_dir_cache {
struct ovl_readdir_data {
struct dir_context ctx;
- bool is_merge;
+ struct dentry *dentry;
+ bool is_lowest;
struct rb_root root;
struct list_head *list;
struct list_head middle;
struct ovl_cache_entry *first_maybe_whiteout;
int count;
int err;
+ bool d_type_supported;
};
struct ovl_dir_file {
@@ -139,9 +141,9 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
return 0;
}
-static int ovl_fill_lower(struct ovl_readdir_data *rdd,
- const char *name, int namelen,
- loff_t offset, u64 ino, unsigned int d_type)
+static int ovl_fill_lowest(struct ovl_readdir_data *rdd,
+ const char *name, int namelen,
+ loff_t offset, u64 ino, unsigned int d_type)
{
struct ovl_cache_entry *p;
@@ -193,10 +195,10 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name,
container_of(ctx, struct ovl_readdir_data, ctx);
rdd->count++;
- if (!rdd->is_merge)
+ if (!rdd->is_lowest)
return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
else
- return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
+ return ovl_fill_lowest(rdd, name, namelen, offset, ino, d_type);
}
static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
@@ -205,17 +207,8 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
struct ovl_cache_entry *p;
struct dentry *dentry;
const struct cred *old_cred;
- struct cred *override_cred;
-
- override_cred = prepare_creds();
- if (!override_cred)
- return -ENOMEM;
- /*
- * CAP_DAC_OVERRIDE for lookup
- */
- cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
- old_cred = override_creds(override_cred);
+ old_cred = ovl_override_creds(rdd->dentry->d_sb);
err = mutex_lock_killable(&dir->d_inode->i_mutex);
if (!err) {
@@ -231,7 +224,6 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd)
mutex_unlock(&dir->d_inode->i_mutex);
}
revert_creds(old_cred);
- put_cred(override_cred);
return err;
}
@@ -256,7 +248,7 @@ static inline int ovl_dir_read(struct path *realpath,
err = rdd->err;
} while (!err && rdd->count);
- if (!err && rdd->first_maybe_whiteout)
+ if (!err && rdd->first_maybe_whiteout && rdd->dentry)
err = ovl_check_whiteouts(realpath->dentry, rdd);
fput(realfile);
@@ -287,9 +279,10 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
struct path realpath;
struct ovl_readdir_data rdd = {
.ctx.actor = ovl_fill_merge,
+ .dentry = dentry,
.list = list,
.root = RB_ROOT,
- .is_merge = false,
+ .is_lowest = false,
};
int idx, next;
@@ -306,7 +299,7 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
* allows offsets to be reasonably constant
*/
list_add(&rdd.middle, rdd.list);
- rdd.is_merge = true;
+ rdd.is_lowest = true;
err = ovl_dir_read(&realpath, &rdd);
list_del(&rdd.middle);
}
@@ -581,3 +574,100 @@ void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
}
mutex_unlock(&upper->d_inode->i_mutex);
}
+
+static int ovl_check_d_type(struct dir_context *ctx, const char *name,
+ int namelen, loff_t offset, u64 ino,
+ unsigned int d_type)
+{
+ struct ovl_readdir_data *rdd =
+ container_of(ctx, struct ovl_readdir_data, ctx);
+
+ /* Even if d_type is not supported, DT_DIR is returned for . and .. */
+ if (!strncmp(name, ".", namelen) || !strncmp(name, "..", namelen))
+ return 0;
+
+ if (d_type != DT_UNKNOWN)
+ rdd->d_type_supported = true;
+
+ return 0;
+}
+
+/*
+ * Returns 1 if d_type is supported, 0 not supported/unknown. Negative values
+ * if error is encountered.
+ */
+int ovl_check_d_type_supported(struct path *realpath)
+{
+ int err;
+ struct ovl_readdir_data rdd = {
+ .ctx.actor = ovl_check_d_type,
+ .d_type_supported = false,
+ };
+
+ err = ovl_dir_read(realpath, &rdd);
+ if (err)
+ return err;
+
+ return rdd.d_type_supported;
+}
+
+static void ovl_workdir_cleanup_recurse(struct path *path, int level)
+{
+ int err;
+ struct inode *dir = path->dentry->d_inode;
+ LIST_HEAD(list);
+ struct ovl_cache_entry *p;
+ struct ovl_readdir_data rdd = {
+ .ctx.actor = ovl_fill_merge,
+ .dentry = NULL,
+ .list = &list,
+ .root = RB_ROOT,
+ .is_lowest = false,
+ };
+
+ err = ovl_dir_read(path, &rdd);
+ if (err)
+ goto out;
+
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ list_for_each_entry(p, &list, l_node) {
+ struct dentry *dentry;
+
+ if (p->name[0] == '.') {
+ if (p->len == 1)
+ continue;
+ if (p->len == 2 && p->name[1] == '.')
+ continue;
+ }
+ dentry = lookup_one_len(p->name, path->dentry, p->len);
+ if (IS_ERR(dentry))
+ continue;
+ if (dentry->d_inode)
+ ovl_workdir_cleanup(dir, path->mnt, dentry, level);
+ dput(dentry);
+ }
+ inode_unlock(dir);
+out:
+ ovl_cache_free(&list);
+}
+
+void ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
+ struct dentry *dentry, int level)
+{
+ int err;
+
+ if (!d_is_dir(dentry) || level > 1) {
+ ovl_cleanup(dir, dentry);
+ return;
+ }
+
+ err = ovl_do_rmdir(dir, dentry);
+ if (err) {
+ struct path path = { .mnt = mnt, .dentry = dentry };
+
+ inode_unlock(dir);
+ ovl_workdir_cleanup_recurse(&path, level + 1);
+ inode_lock_nested(dir, I_MUTEX_PARENT);
+ ovl_cleanup(dir, dentry);
+ }
+}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index d70208c0de84b8..fa20c95bd45695 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -42,6 +42,8 @@ struct ovl_fs {
long lower_namelen;
/* pathnames of lower and upper dirs, for show_options */
struct ovl_config config;
+ /* creds of process who forced instantiation of super block */
+ const struct cred *creator_cred;
};
struct ovl_dir_cache;
@@ -246,6 +248,13 @@ bool ovl_is_whiteout(struct dentry *dentry)
return inode && IS_WHITEOUT(inode);
}
+const struct cred *ovl_override_creds(struct super_block *sb)
+{
+ struct ovl_fs *ofs = sb->s_fs_info;
+
+ return override_creds(ofs->creator_cred);
+}
+
static bool ovl_is_opaquedir(struct dentry *dentry)
{
int res;
@@ -587,6 +596,7 @@ static void ovl_put_super(struct super_block *sb)
kfree(ufs->config.lowerdir);
kfree(ufs->config.upperdir);
kfree(ufs->config.workdir);
+ put_cred(ufs->creator_cred);
kfree(ufs);
}
@@ -774,7 +784,7 @@ retry:
goto out_dput;
retried = true;
- ovl_cleanup(dir, work);
+ ovl_workdir_cleanup(dir, mnt, work, 0);
dput(work);
goto retry;
}
@@ -1054,6 +1064,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY;
ufs->workdir = NULL;
}
+
+ /*
+ * Upper should support d_type, else whiteouts are visible.
+ * Given workdir and upper are on same fs, we can do
+ * iterate_dir() on workdir. This check requires successful
+ * creation of workdir in previous step.
+ */
+ if (ufs->workdir) {
+ err = ovl_check_d_type_supported(&workpath);
+ if (err < 0)
+ goto out_put_workdir;
+
+ /*
+ * We allowed this configuration and don't want to
+ * break users over kernel upgrade. So warn instead
+ * of erroring out.
+ */
+ if (!err)
+ pr_warn("overlayfs: upper fs needs to support d_type.\n");
+ }
}
err = -ENOMEM;
@@ -1087,10 +1117,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
else
sb->s_d_op = &ovl_dentry_operations;
+ ufs->creator_cred = prepare_creds();
+ if (!ufs->creator_cred)
+ goto out_put_lower_mnt;
+
err = -ENOMEM;
oe = ovl_alloc_entry(numlower);
if (!oe)
- goto out_put_lower_mnt;
+ goto out_put_cred;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
if (!root_dentry)
@@ -1123,6 +1157,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
out_free_oe:
kfree(oe);
+out_put_cred:
+ put_cred(ufs->creator_cred);
out_put_lower_mnt:
for (i = 0; i < ufs->numlower; i++)
mntput(ufs->lower_mnt[i]);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 57f0aec8a95826..f11df9ab4256e4 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -79,6 +79,7 @@
#include <linux/delayacct.h>
#include <linux/seq_file.h>
#include <linux/pid_namespace.h>
+#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/string_helpers.h>
@@ -333,6 +334,31 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
#ifdef CONFIG_SECCOMP
seq_printf(m, "Seccomp:\t%d\n", p->seccomp.mode);
#endif
+ seq_printf(m, "Speculation_Store_Bypass:\t");
+ switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
+ case -EINVAL:
+ seq_printf(m, "unknown");
+ break;
+ case PR_SPEC_NOT_AFFECTED:
+ seq_printf(m, "not vulnerable");
+ break;
+ case PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE:
+ seq_printf(m, "thread force mitigated");
+ break;
+ case PR_SPEC_PRCTL | PR_SPEC_DISABLE:
+ seq_printf(m, "thread mitigated");
+ break;
+ case PR_SPEC_PRCTL | PR_SPEC_ENABLE:
+ seq_printf(m, "thread vulnerable");
+ break;
+ case PR_SPEC_DISABLE:
+ seq_printf(m, "globally mitigated");
+ break;
+ default:
+ seq_printf(m, "vulnerable");
+ break;
+ }
+ seq_putc(m, '\n');
}
static inline void task_context_switch_counts(struct seq_file *m,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fcd1a81609eedc..bd061125de5c93 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -260,7 +260,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
* Inherently racy -- command line shares address space
* with code and data.
*/
- rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
+ rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
if (rv <= 0)
goto out_free_page;
@@ -278,7 +278,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
int nr_read;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -313,7 +313,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -362,7 +362,7 @@ skip_argv:
bool final;
_count = min3(count, len, PAGE_SIZE);
- nr_read = access_remote_vm(mm, p, page, _count, 0);
+ nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
if (nr_read < 0)
rv = nr_read;
if (nr_read <= 0)
@@ -477,6 +477,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
int err;
int i;
+ /*
+ * The ability to racily run the kernel stack unwinder on a running task
+ * and then observe the unwinder output is scary; while it is useful for
+ * debugging kernel issues, it can also allow an attacker to leak kernel
+ * stack contents.
+ * Doing this in a manner that is at least safe from races would require
+ * some work to ensure that the remote task can not be scheduled; and
+ * even then, this would still expose the unwinder as local attack
+ * surface.
+ * Therefore, this interface is restricted to root.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
@@ -860,6 +874,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
unsigned long addr = *ppos;
ssize_t copied;
char *page;
+ unsigned int flags;
if (!mm)
return 0;
@@ -872,6 +887,11 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
+ /* Maybe we should limit FOLL_FORCE to actual ptrace users? */
+ flags = FOLL_FORCE;
+ if (write)
+ flags |= FOLL_WRITE;
+
while (count > 0) {
int this_len = min_t(int, count, PAGE_SIZE);
@@ -880,7 +900,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
break;
}
- this_len = access_remote_vm(mm, addr, page, this_len, write);
+ this_len = access_remote_vm(mm, addr, page, this_len, flags);
if (!this_len) {
if (!copied)
copied = -EIO;
@@ -996,8 +1016,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (env_start + src),
- page, this_len, 0);
+ retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
if (retval <= 0) {
ret = retval;
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 92e6726f6e3732..21f198aa096196 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
+ char *buf = file->private_data;
ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
@@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (clear_user(buffer, tsz))
return -EFAULT;
} else if (is_vmalloc_or_module_addr((void *)start)) {
- char * elf_buf;
-
- elf_buf = kzalloc(tsz, GFP_KERNEL);
- if (!elf_buf)
- return -ENOMEM;
- vread(elf_buf, (char *)start, tsz);
+ vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
- if (copy_to_user(buffer, elf_buf, tsz)) {
- kfree(elf_buf);
+ if (copy_to_user(buffer, buf, tsz))
return -EFAULT;
- }
- kfree(elf_buf);
} else {
if (kern_addr_valid(start)) {
unsigned long n;
- n = copy_to_user(buffer, (char *)start, tsz);
+ /*
+ * Using bounce buffer to bypass the
+ * hardened user copy kernel text checks.
+ */
+ memcpy(buf, (char *) start, tsz);
+ n = copy_to_user(buffer, buf, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
@@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+
+ filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!filp->private_data)
+ return -ENOMEM;
+
if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
@@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
return 0;
}
+static int release_kcore(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
+ .release = release_kcore,
.llseek = default_llseek,
};
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index e81fbeddc581ad..c1532ac83c4e55 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -1555,7 +1555,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
if (--header->nreg)
return;
- put_links(header);
+ if (parent)
+ put_links(header);
start_unregistering(header);
if (!--header->count)
kfree_rcu(header, rcu);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f8f1f61687d34c..c142894352b3fe 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -141,7 +141,7 @@ static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
struct page *page;
pages_pinned = get_user_pages(current, mm, page_start_vaddr,
- 1, 0, 0, &page, NULL);
+ 1, 0, &page, NULL);
if (pages_pinned < 1) {
seq_puts(m, "<fault>]");
return;
@@ -640,6 +640,7 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
[ilog2(VM_NORESERVE)] = "nr",
[ilog2(VM_HUGETLB)] = "ht",
[ilog2(VM_ARCH_1)] = "ar",
+ [ilog2(VM_WIPEONFORK)] = "wf",
[ilog2(VM_DONTDUMP)] = "dd",
#ifdef CONFIG_MEM_SOFT_DIRTY
[ilog2(VM_SOFTDIRTY)] = "sd",
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index e11672aa457514..11e558efd61e06 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -421,7 +421,12 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
vaddr = vmap(pages, page_count, VM_MAP, prot);
kfree(pages);
- return vaddr;
+ /*
+ * Since vmap() uses page granularity, we must add the offset
+ * into the page here, to get the byte granularity address
+ * into the mapping to represent the actual "start" location.
+ */
+ return vaddr + offset_in_page(start);
}
static void *persistent_ram_iomap(phys_addr_t start, size_t size,
@@ -440,6 +445,11 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
else
va = ioremap_wc(start, size);
+ /*
+ * Since request_mem_region() and ioremap() are byte-granularity
+ * there is no need handle anything special like we do when the
+ * vmap() case in persistent_ram_vmap() above.
+ */
return va;
}
@@ -460,7 +470,7 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
return -ENOMEM;
}
- prz->buffer = prz->vaddr + offset_in_page(start);
+ prz->buffer = prz->vaddr;
prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
return 0;
@@ -478,6 +488,11 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
sig ^= PERSISTENT_RAM_SIG;
if (prz->buffer->sig == sig) {
+ if (buffer_size(prz) == 0) {
+ pr_debug("found existing empty buffer\n");
+ return 0;
+ }
+
if (buffer_size(prz) > prz->buffer_size ||
buffer_start(prz) > buffer_size(prz))
pr_info("found existing invalid buffer, size %zu, start %zu\n",
@@ -507,7 +522,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
if (prz->vaddr) {
if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
- vunmap(prz->vaddr);
+ /* We must vunmap() at page-granularity. */
+ vunmap(prz->vaddr - offset_in_page(prz->paddr));
} else {
iounmap(prz->vaddr);
release_mem_region(prz->paddr, prz->size);
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 3746367098fda3..bb0d643481c8f4 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -17,6 +17,7 @@
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
+#include <linux/nospec.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
@@ -644,6 +645,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
+ type = array_index_nospec(type, MAXQUOTAS);
/*
* Quota not supported on this fs? Check this before s_quota_types
* since they needn't be set if quota is not supported at all.
diff --git a/fs/read_write.c b/fs/read_write.c
index bfd1a5dddf6e9e..16e554ba885dd4 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -363,8 +363,10 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos)
iter->type |= WRITE;
ret = file->f_op->write_iter(&kiocb, iter);
BUG_ON(ret == -EIOCBQUEUED);
- if (ret > 0)
+ if (ret > 0) {
*ppos = kiocb.ki_pos;
+ fsnotify_modify(file);
+ }
return ret;
}
EXPORT_SYMBOL(vfs_iter_write);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 6ca00471afbf40..d920a646b57840 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -270,7 +270,7 @@ struct reiserfs_journal_list {
struct mutex j_commit_mutex;
unsigned int j_trans_id;
- time_t j_timestamp;
+ time64_t j_timestamp; /* write-only but useful for crash dump analysis */
struct reiserfs_list_bitmap *j_list_bitmap;
struct buffer_head *j_commit_bh; /* commit buffer head */
struct reiserfs_journal_cnode *j_realblock;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index a8dbc93e45eb37..d424b3d4bf3b42 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -184,6 +184,7 @@ struct reiserfs_dentry_buf {
struct dir_context ctx;
struct dentry *xadir;
int count;
+ int err;
struct dentry *dentries[8];
};
@@ -206,6 +207,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
dentry = lookup_one_len(name, dbuf->xadir, namelen);
if (IS_ERR(dentry)) {
+ dbuf->err = PTR_ERR(dentry);
return PTR_ERR(dentry);
} else if (d_really_is_negative(dentry)) {
/* A directory entry exists, but no file? */
@@ -214,6 +216,7 @@ fill_with_dentries(struct dir_context *ctx, const char *name, int namelen,
"not found for file %pd.\n",
dentry, dbuf->xadir);
dput(dentry);
+ dbuf->err = -EIO;
return -EIO;
}
@@ -261,6 +264,10 @@ static int reiserfs_for_each_xattr(struct inode *inode,
err = reiserfs_readdir_inode(d_inode(dir), &buf.ctx);
if (err)
break;
+ if (buf.err) {
+ err = buf.err;
+ break;
+ }
if (!buf.count)
break;
for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
@@ -791,8 +798,10 @@ static int listxattr_filler(struct dir_context *ctx, const char *name,
size = handler->list(handler, b->dentry,
b->buf + b->pos, b->size, name,
namelen);
- if (size > b->size)
+ if (b->pos + size > b->size) {
+ b->pos = -ERANGE;
return -ERANGE;
+ }
} else {
size = handler->list(handler, b->dentry,
NULL, 0, name, namelen);
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 270221fcef42cc..fbed548cbca6dd 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -142,6 +142,13 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
err |= __put_user((long) kinfo->si_ptr, &uinfo->ssi_ptr);
err |= __put_user(kinfo->si_int, &uinfo->ssi_int);
break;
+#ifdef __ARCH_SIGSYS
+ case __SI_SYS:
+ err |= __put_user((long) kinfo->si_call_addr, &uinfo->ssi_call_addr);
+ err |= __put_user(kinfo->si_syscall, &uinfo->ssi_syscall);
+ err |= __put_user(kinfo->si_arch, &uinfo->ssi_arch);
+ break;
+#endif
default:
/*
* This case catches also the signals queued by sigqueue().
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 0cea9b9236d07c..82bc942fc43761 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -166,6 +166,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
}
if (compressed) {
+ if (!msblk->stream)
+ goto read_failure;
length = squashfs_decompress(msblk, bh, b, offset, length,
output);
if (length < 0)
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 1cb70a0b216844..91ce49c05b7c3d 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+ if (unlikely(length < 0))
+ return -EIO;
+
while (length) {
entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index e5c9689062ba81..1ec7bae2751df6 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
}
for (i = 0; i < blocks; i++) {
- int size = le32_to_cpu(blist[i]);
+ int size = squashfs_block_size(blist[i]);
+ if (size < 0) {
+ err = size;
+ goto failure;
+ }
block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
}
n -= blocks;
@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
sizeof(size));
if (res < 0)
return res;
- return le32_to_cpu(size);
+ return squashfs_block_size(size);
}
/* Copy data into page cache */
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edbc5c7170..0681feab4a8499 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
u64 *fragment_block)
{
struct squashfs_sb_info *msblk = sb->s_fs_info;
- int block = SQUASHFS_FRAGMENT_INDEX(fragment);
- int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
- u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
+ int block, offset, size;
struct squashfs_fragment_entry fragment_entry;
- int size;
+ u64 start_block;
+
+ if (fragment >= msblk->fragments)
+ return -EIO;
+ block = SQUASHFS_FRAGMENT_INDEX(fragment);
+ offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
+
+ start_block = le64_to_cpu(msblk->fragment_index[block]);
size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
&offset, sizeof(fragment_entry));
@@ -61,9 +66,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
return size;
*fragment_block = le64_to_cpu(fragment_entry.start_block);
- size = le32_to_cpu(fragment_entry.size);
-
- return size;
+ return squashfs_block_size(fragment_entry.size);
}
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 506f4ba5b98309..e66486366f0256 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+static inline int squashfs_block_size(__le32 raw)
+{
+ u32 size = le32_to_cpu(raw);
+ return (size >> 25) ? -EIO : size;
+}
+
/*
* Inode number ops. Inodes consist of a compressed block number, and an
* uncompressed offset within that block
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
index 1da565cb50c3d0..ef69c31947bf86 100644
--- a/fs/squashfs/squashfs_fs_sb.h
+++ b/fs/squashfs/squashfs_fs_sb.h
@@ -75,6 +75,7 @@ struct squashfs_sb_info {
unsigned short block_log;
long long bytes_used;
unsigned int inodes;
+ unsigned int fragments;
int xattr_ids;
};
#endif
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index 5056babe00df93..93aa3e23c845b2 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -176,6 +176,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
msblk->inodes = le32_to_cpu(sblk->inodes);
+ msblk->fragments = le32_to_cpu(sblk->fragments);
flags = le16_to_cpu(sblk->flags);
TRACE("Found valid superblock on %s\n", bdevname(sb->s_bdev, b));
@@ -186,7 +187,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
TRACE("Block size %d\n", msblk->block_size);
TRACE("Number of inodes %d\n", msblk->inodes);
- TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
+ TRACE("Number of fragments %d\n", msblk->fragments);
TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
@@ -273,7 +274,7 @@ allocate_id_index_table:
sb->s_export_op = &squashfs_export_ops;
handle_fragments:
- fragments = le32_to_cpu(sblk->fragments);
+ fragments = msblk->fragments;
if (fragments == 0)
goto check_directory_table;
diff --git a/fs/super.c b/fs/super.c
index a57b219a5a0e07..9541995cf3cf73 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -118,13 +118,23 @@ static unsigned long super_cache_count(struct shrinker *shrink,
sb = container_of(shrink, struct super_block, s_shrink);
/*
- * Don't call trylock_super as it is a potential
- * scalability bottleneck. The counts could get updated
- * between super_cache_count and super_cache_scan anyway.
- * Call to super_cache_count with shrinker_rwsem held
- * ensures the safety of call to list_lru_shrink_count() and
- * s_op->nr_cached_objects().
+ * We don't call trylock_super() here as it is a scalability bottleneck,
+ * so we're exposed to partial setup state. The shrinker rwsem does not
+ * protect filesystem operations backing list_lru_shrink_count() or
+ * s_op->nr_cached_objects(). Counts can change between
+ * super_cache_count and super_cache_scan, so we really don't need locks
+ * here.
+ *
+ * However, if we are currently mounting the superblock, the underlying
+ * filesystem might be in a state of partial construction and hence it
+ * is dangerous to access it. trylock_super() uses a MS_BORN check to
+ * avoid this situation, so do the same here. The memory barrier is
+ * matched with the one in mount_fs() as we don't hold locks here.
*/
+ if (!(sb->s_flags & MS_BORN))
+ return 0;
+ smp_rmb();
+
if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb, sc);
@@ -1133,6 +1143,14 @@ mount_fs(struct file_system_type *type, int flags, const char *name, void *data)
sb = root->d_sb;
BUG_ON(!sb);
WARN_ON(!sb->s_bdi);
+
+ /*
+ * Write barrier is for super_cache_count(). We place it before setting
+ * MS_BORN as the data dependency between the two functions is the
+ * superblock structure contents that we just set up, not the MS_BORN
+ * flag.
+ */
+ smp_wmb();
sb->s_flags |= MS_BORN;
error = security_sb_kern_mount(sb, flags, secdata);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 39c75a86c67f1d..666986b95c5d10 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -408,6 +408,50 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
/**
+ * sysfs_break_active_protection - break "active" protection
+ * @kobj: The kernel object @attr is associated with.
+ * @attr: The attribute to break the "active" protection for.
+ *
+ * With sysfs, just like kernfs, deletion of an attribute is postponed until
+ * all active .show() and .store() callbacks have finished unless this function
+ * is called. Hence this function is useful in methods that implement self
+ * deletion.
+ */
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ struct kernfs_node *kn;
+
+ kobject_get(kobj);
+ kn = kernfs_find_and_get(kobj->sd, attr->name);
+ if (kn)
+ kernfs_break_active_protection(kn);
+ return kn;
+}
+EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
+
+/**
+ * sysfs_unbreak_active_protection - restore "active" protection
+ * @kn: Pointer returned by sysfs_break_active_protection().
+ *
+ * Undo the effects of sysfs_break_active_protection(). Since this function
+ * calls kernfs_put() on the kernfs node that corresponds to the 'attr'
+ * argument passed to sysfs_break_active_protection() that attribute may have
+ * been removed between the sysfs_break_active_protection() and
+ * sysfs_unbreak_active_protection() calls, it is not safe to access @kn after
+ * this function has returned.
+ */
+void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+ struct kobject *kobj = kn->parent->priv;
+
+ kernfs_unbreak_active_protection(kn);
+ kernfs_put(kn);
+ kobject_put(kobj);
+}
+EXPORT_SYMBOL_GPL(sysfs_unbreak_active_protection);
+
+/**
* sysfs_remove_file_ns - remove an object attribute with a custom ns tag
* @kobj: object we're acting for
* @attr: attribute descriptor
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index ef8bcdb80acabb..afdfe85e635c87 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -276,7 +276,7 @@ static int __sysv_write_inode(struct inode *inode, int wait)
}
}
brelse(bh);
- return 0;
+ return err;
}
int sysv_write_inode(struct inode *inode, struct writeback_control *wbc)
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 0b9da5b6e0f966..539fa934ed93a2 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -661,6 +661,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
spin_lock(&ui->ui_lock);
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
+ if (xent) {
+ spin_lock(&host_ui->ui_lock);
+ host_ui->synced_i_size = host_ui->ui_size;
+ spin_unlock(&host_ui->ui_lock);
+ }
mark_inode_clean(c, ui);
mark_inode_clean(c, host_ui);
return 0;
@@ -1186,7 +1191,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
else if (err)
goto out_free;
else {
- if (le32_to_cpu(dn->size) <= dlen)
+ int dn_len = le32_to_cpu(dn->size);
+
+ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) {
+ ubifs_err(c, "bad data node (block %u, inode %lu)",
+ blk, inode->i_ino);
+ ubifs_dump_node(c, dn);
+ goto out_free;
+ }
+
+ if (dn_len <= dlen)
dlen = 0; /* Nothing to do */
else {
int compr_type = le16_to_cpu(dn->compr_type);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index a0011aa3a77911..f43f162e36f42c 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c,
}
}
- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
- if (!buf)
- return -ENOMEM;
-
/*
* After an unclean unmount, empty and freeable LEBs
* may contain garbage - do not scan them.
@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c,
return LPT_SCAN_CONTINUE;
}
+ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
ret = PTR_ERR(sleb);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 0bb6de3564516f..7968b7a5e7878f 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1918,6 +1918,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index c763fda257bf37..637114e8c7fdef 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -150,6 +150,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sizeof(struct fileIdentDesc));
}
}
+ /* Got last entry outside of dir size - fs is corrupted! */
+ if (*nf_pos > dir->i_size)
+ return NULL;
return fi;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index d33cc1257a8236..e5fae45efedcb1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1364,6 +1364,12 @@ reread:
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ ret = -EIO;
+ goto out;
+ }
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 42b8c57795cbad..c6ce7503a32991 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -260,6 +260,9 @@ void udf_truncate_extents(struct inode *inode)
epos.block = eloc;
epos.bh = udf_tread(sb,
udf_get_lb_pblock(sb, &eloc, 0));
+ /* Error reading indirect block? */
+ if (!epos.bh)
+ return;
if (elen)
indirect_ext_len =
(elen + sb->s_blocksize - 1) >>
diff --git a/fs/xattr.c b/fs/xattr.c
index 76f01bf4b04820..09441c396798d8 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -453,7 +453,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value,
if (error > 0) {
if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) ||
(strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0))
- posix_acl_fix_xattr_to_user(kvalue, size);
+ posix_acl_fix_xattr_to_user(kvalue, error);
if (size && copy_to_user(value, kvalue, error))
error = -EFAULT;
} else if (error == -ERANGE && size >= XATTR_SIZE_MAX) {
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index e1e7fe3b5424c8..b663b756f55291 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -1924,6 +1924,93 @@ xfs_alloc_space_available(
}
/*
+ * Check the agfl fields of the agf for inconsistency or corruption. The purpose
+ * is to detect an agfl header padding mismatch between current and early v5
+ * kernels. This problem manifests as a 1-slot size difference between the
+ * on-disk flcount and the active [first, last] range of a wrapped agfl. This
+ * may also catch variants of agfl count corruption unrelated to padding. Either
+ * way, we'll reset the agfl and warn the user.
+ *
+ * Return true if a reset is required before the agfl can be used, false
+ * otherwise.
+ */
+static bool
+xfs_agfl_needs_reset(
+ struct xfs_mount *mp,
+ struct xfs_agf *agf)
+{
+ uint32_t f = be32_to_cpu(agf->agf_flfirst);
+ uint32_t l = be32_to_cpu(agf->agf_fllast);
+ uint32_t c = be32_to_cpu(agf->agf_flcount);
+ int agfl_size = XFS_AGFL_SIZE(mp);
+ int active;
+
+ /* no agfl header on v4 supers */
+ if (!xfs_sb_version_hascrc(&mp->m_sb))
+ return false;
+
+ /*
+ * The agf read verifier catches severe corruption of these fields.
+ * Repeat some sanity checks to cover a packed -> unpacked mismatch if
+ * the verifier allows it.
+ */
+ if (f >= agfl_size || l >= agfl_size)
+ return true;
+ if (c > agfl_size)
+ return true;
+
+ /*
+ * Check consistency between the on-disk count and the active range. An
+ * agfl padding mismatch manifests as an inconsistent flcount.
+ */
+ if (c && l >= f)
+ active = l - f + 1;
+ else if (c)
+ active = agfl_size - f + l + 1;
+ else
+ active = 0;
+
+ return active != c;
+}
+
+/*
+ * Reset the agfl to an empty state. Ignore/drop any existing blocks since the
+ * agfl content cannot be trusted. Warn the user that a repair is required to
+ * recover leaked blocks.
+ *
+ * The purpose of this mechanism is to handle filesystems affected by the agfl
+ * header padding mismatch problem. A reset keeps the filesystem online with a
+ * relatively minor free space accounting inconsistency rather than suffer the
+ * inevitable crash from use of an invalid agfl block.
+ */
+static void
+xfs_agfl_reset(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ struct xfs_perag *pag)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+
+ ASSERT(pag->pagf_agflreset);
+ trace_xfs_agfl_reset(mp, agf, 0, _RET_IP_);
+
+ xfs_warn(mp,
+ "WARNING: Reset corrupted AGFL on AG %u. %d blocks leaked. "
+ "Please unmount and run xfs_repair.",
+ pag->pag_agno, pag->pagf_flcount);
+
+ agf->agf_flfirst = 0;
+ agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
+ agf->agf_flcount = 0;
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_FLFIRST | XFS_AGF_FLLAST |
+ XFS_AGF_FLCOUNT);
+
+ pag->pagf_flcount = 0;
+ pag->pagf_agflreset = false;
+}
+
+/*
* Decide whether to use this allocation group for this allocation.
* If so, fix up the btree freelist's size.
*/
@@ -1983,6 +2070,10 @@ xfs_alloc_fix_freelist(
}
}
+ /* reset a padding mismatched agfl before final free space check */
+ if (pag->pagf_agflreset)
+ xfs_agfl_reset(tp, agbp, pag);
+
/* If there isn't enough total space or single-extent, reject it. */
need = xfs_alloc_min_freelist(mp, pag);
if (!xfs_alloc_space_available(args, need, flags))
@@ -2121,6 +2212,7 @@ xfs_alloc_get_freelist(
agf->agf_flfirst = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
+ ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
@@ -2226,6 +2318,7 @@ xfs_alloc_put_freelist(
agf->agf_fllast = 0;
pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
+ ASSERT(!pag->pagf_agflreset);
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
@@ -2417,6 +2510,7 @@ xfs_alloc_read_agf(
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
pag->pagf_init = 1;
+ pag->pagf_agflreset = xfs_agfl_needs_reset(mp, agf);
}
#ifdef DEBUG
else if (!XFS_FORCED_SHUTDOWN(mp)) {
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fb9636cc927cfe..5d8d12746e6e33 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -528,7 +528,14 @@ xfs_attr_shortform_addname(xfs_da_args_t *args)
if (args->flags & ATTR_CREATE)
return retval;
retval = xfs_attr_shortform_remove(args);
- ASSERT(retval == 0);
+ if (retval)
+ return retval;
+ /*
+ * Since we have removed the old attr, clear ATTR_REPLACE so
+ * that the leaf format add routine won't trip over the attr
+ * not being around.
+ */
+ args->flags &= ~ATTR_REPLACE;
}
if (args->namelen >= XFS_ATTR_SF_ENTSIZE_MAX ||
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index f52c72a1a06f28..73b725f965ebc7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3323,8 +3323,6 @@ maybe_sleep:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
return -EIO;
- if (log_flushed)
- *log_flushed = 1;
} else {
no_sleep:
@@ -3432,8 +3430,6 @@ try_again:
xlog_wait(&iclog->ic_prev->ic_write_wait,
&log->l_icloglock);
- if (log_flushed)
- *log_flushed = 1;
already_slept = 1;
goto try_again;
}
@@ -3467,9 +3463,6 @@ try_again:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
return -EIO;
-
- if (log_flushed)
- *log_flushed = 1;
} else { /* just return */
spin_unlock(&log->l_icloglock);
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b57098481c10a2..ae3e52749f20eb 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -278,6 +278,7 @@ typedef struct xfs_perag {
char pagi_inodeok; /* The agi is ok for inodes */
__uint8_t pagf_levels[XFS_BTNUM_AGF];
/* # of levels in bno & cnt btree */
+ bool pagf_agflreset; /* agfl requires reset before use */
__uint32_t pagf_flcount; /* count of blocks in freelist */
xfs_extlen_t pagf_freeblks; /* total free blocks */
xfs_extlen_t pagf_longest; /* longest free space */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 877079eb0f8f0e..cc6fa64821d2d5 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -1485,7 +1485,7 @@ TRACE_EVENT(xfs_trans_commit_lsn,
__entry->lsn)
);
-TRACE_EVENT(xfs_agf,
+DECLARE_EVENT_CLASS(xfs_agf_class,
TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags,
unsigned long caller_ip),
TP_ARGS(mp, agf, flags, caller_ip),
@@ -1541,6 +1541,13 @@ TRACE_EVENT(xfs_agf,
__entry->longest,
(void *)__entry->caller_ip)
);
+#define DEFINE_AGF_EVENT(name) \
+DEFINE_EVENT(xfs_agf_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_agf *agf, int flags, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agf, flags, caller_ip))
+DEFINE_AGF_EVENT(xfs_agf);
+DEFINE_AGF_EVENT(xfs_agfl_reset);
TRACE_EVENT(xfs_free_extent,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 07fb100bcc688d..4f963774e7b521 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -275,7 +275,7 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
#ifdef CONFIG_CPU_FREQ
void acpi_processor_ppc_init(void);
void acpi_processor_ppc_exit(void);
-int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
+void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
#else
static inline void acpi_processor_ppc_init(void)
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 1cbb8338edf391..827e4d3bbc7a46 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
#endif
/* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags) \
-({ \
- unsigned long addr; \
- __set_fixmap(idx, phys, flags); \
- addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
- addr; \
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long ________addr; \
+ __set_fixmap(idx, phys, flags); \
+ ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ ________addr; \
})
#define set_fixmap_offset(idx, phys) \
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 3f5a545b7444d5..b1ad73280d962f 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -778,8 +778,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
{
@@ -797,16 +797,28 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 283a2997187e30..e821bf27fc7066 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -248,6 +248,14 @@
*(.data..init_task)
/*
+ * Allow architectures to handle ro_after_init data on their
+ * own by defining an empty RO_AFTER_INIT_DATA.
+ */
+#ifndef RO_AFTER_INIT_DATA
+#define RO_AFTER_INIT_DATA *(.data..ro_after_init)
+#endif
+
+/*
* Read only Data
*/
#define RO_DATA_SECTION(align) \
@@ -255,6 +263,7 @@
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
+ RO_AFTER_INIT_DATA /* Read only after init */ \
*(__vermagic) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1cb..00000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
- u64 polytmp[2*VMAC_TAG_LEN/64];
- u64 cached_nonce[2];
- u64 cached_aes[2];
- int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
- struct crypto_cipher *child;
- struct vmac_ctx __vmac_ctx;
- u8 partial[VMAC_NHBYTES]; /* partial block */
- int partial_size; /* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 517c0ec1c4936c..174f884724df70 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -153,6 +153,57 @@ struct __drm_connnectors_state {
struct drm_connector_state *state, *old_state, *new_state;
};
+struct drm_private_obj;
+struct drm_private_state;
+
+/**
+ * struct drm_private_state_funcs - atomic state functions for private objects
+ *
+ * These hooks are used by atomic helpers to create, swap and destroy states of
+ * private objects. The structure itself is used as a vtable to identify the
+ * associated private object type. Each private object type that needs to be
+ * added to the atomic states is expected to have an implementation of these
+ * hooks and pass a pointer to it's drm_private_state_funcs struct to
+ * drm_atomic_get_private_obj_state().
+ */
+struct drm_private_state_funcs {
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current state of the private object and return it. It
+ * is an error to call this before obj->state has been initialized.
+ *
+ * RETURNS:
+ *
+ * Duplicated atomic state or NULL when obj->state is not
+ * initialized or allocation failed.
+ */
+ struct drm_private_state *(*atomic_duplicate_state)(struct drm_private_obj *obj);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Frees the private object state created with @atomic_duplicate_state.
+ */
+ void (*atomic_destroy_state)(struct drm_private_obj *obj,
+ struct drm_private_state *state);
+};
+
+struct drm_private_obj {
+ struct drm_private_state *state;
+
+ const struct drm_private_state_funcs *funcs;
+};
+
+struct drm_private_state {
+ struct drm_atomic_state *state;
+};
+
+struct __drm_private_objs_state {
+ struct drm_private_obj *ptr;
+ struct drm_private_state *state, *old_state, *new_state;
+};
+
/**
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
@@ -164,6 +215,8 @@ struct __drm_connnectors_state {
* @crtcs: pointer to array of CRTC pointers
* @num_connector: size of the @connectors and @connector_states arrays
* @connectors: pointer to array of structures with per-connector data
+ * @num_private_objs: size of the @private_objs array
+ * @private_objs: pointer to array of private object pointers
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
@@ -177,6 +230,8 @@ struct drm_atomic_state {
struct __drm_crtcs_state *crtcs;
int num_connector;
struct __drm_connnectors_state *connectors;
+ int num_private_objs;
+ struct __drm_private_objs_state *private_objs;
struct drm_modeset_acquire_ctx *acquire_ctx;
@@ -250,6 +305,15 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
struct drm_connector_state *state, struct drm_property *property,
uint64_t val);
+void drm_atomic_private_obj_init(struct drm_private_obj *obj,
+ struct drm_private_state *state,
+ const struct drm_private_state_funcs *funcs);
+void drm_atomic_private_obj_fini(struct drm_private_obj *obj);
+
+struct drm_private_state * __must_check
+drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj);
+
/**
* drm_atomic_get_existing_crtc_state - get crtc state, if it exists
* @state: global atomic state object
@@ -257,6 +321,9 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
*
* This function returns the crtc state for the given crtc, or NULL
* if the crtc is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_crtc_state or
+ * @drm_atomic_get_new_crtc_state should be used instead.
*/
static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
@@ -266,12 +333,44 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_crtc_state - get old crtc state, if it exists
+ * @state: global atomic state object
+ * @crtc: crtc to grab
+ *
+ * This function returns the old crtc state for the given crtc, or
+ * NULL if the crtc is not part of the global atomic state.
+ */
+static inline struct drm_crtc_state *
+drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ return state->crtcs[drm_crtc_index(crtc)].old_state;
+}
+/**
+ * drm_atomic_get_new_crtc_state - get new crtc state, if it exists
+ * @state: global atomic state object
+ * @crtc: crtc to grab
+ *
+ * This function returns the new crtc state for the given crtc, or
+ * NULL if the crtc is not part of the global atomic state.
+ */
+static inline struct drm_crtc_state *
+drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ return state->crtcs[drm_crtc_index(crtc)].new_state;
+}
+
+/**
* drm_atomic_get_existing_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
*
* This function returns the plane state for the given plane, or NULL
* if the plane is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_plane_state or
+ * @drm_atomic_get_new_plane_state should be used instead.
*/
static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
@@ -281,12 +380,45 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_plane_state - get plane state, if it exists
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the old plane state for the given plane, or
+ * NULL if the plane is not part of the global atomic state.
+ */
+static inline struct drm_plane_state *
+drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ return state->planes[drm_plane_index(plane)].old_state;
+}
+
+/**
+ * drm_atomic_get_new_plane_state - get plane state, if it exists
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the new plane state for the given plane, or
+ * NULL if the plane is not part of the global atomic state.
+ */
+static inline struct drm_plane_state *
+drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ return state->planes[drm_plane_index(plane)].new_state;
+}
+
+/**
* drm_atomic_get_existing_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
*
* This function returns the connector state for the given connector,
* or NULL if the connector is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_connector_state or
+ * @drm_atomic_get_new_connector_state should be used instead.
*/
static inline struct drm_connector_state *
drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
@@ -301,6 +433,46 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_connector_state - get connector state, if it exists
+ * @state: global atomic state object
+ * @connector: connector to grab
+ *
+ * This function returns the old connector state for the given connector,
+ * or NULL if the connector is not part of the global atomic state.
+ */
+static inline struct drm_connector_state *
+drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int index = drm_connector_index(connector);
+
+ if (index >= state->num_connector)
+ return NULL;
+
+ return state->connectors[index].old_state;
+}
+
+/**
+ * drm_atomic_get_new_connector_state - get connector state, if it exists
+ * @state: global atomic state object
+ * @connector: connector to grab
+ *
+ * This function returns the new connector state for the given connector,
+ * or NULL if the connector is not part of the global atomic state.
+ */
+static inline struct drm_connector_state *
+drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int index = drm_connector_index(connector);
+
+ if (index >= state->num_connector)
+ return NULL;
+
+ return state->connectors[index].new_state;
+}
+
+/**
* __drm_atomic_get_current_plane_state - get current plane state
* @state: global atomic state object
* @plane: plane to grab
@@ -480,6 +652,65 @@ int drm_atomic_debugfs_cleanup(struct drm_minor *minor);
for_each_if (plane)
/**
+ * for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @obj: &struct drm_private_obj iteration cursor
+ * @old_obj_state: &struct drm_private_state iteration cursor for the old state
+ * @new_obj_state: &struct drm_private_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all private objects in an atomic update, tracking both
+ * old and new state. This is useful in places where the state delta needs
+ * to be considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_private_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (old_obj_state) = (__state)->private_objs[__i].old_state, \
+ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
+
+/**
+ * for_each_old_private_obj_in_state - iterate over all private objects in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @obj: &struct drm_private_obj iteration cursor
+ * @old_obj_state: &struct drm_private_state iteration cursor for the old state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all private objects in an atomic update, tracking only
+ * the old state. This is useful in disable functions, where we need the old
+ * state the hardware is still in.
+ */
+#define for_each_old_private_obj_in_state(__state, obj, old_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (old_obj_state) = (__state)->private_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
+
+/**
+ * for_each_new_private_obj_in_state - iterate over all private objects in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @obj: &struct drm_private_obj iteration cursor
+ * @new_obj_state: &struct drm_private_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all private objects in an atomic update, tracking only
+ * the new state. This is useful in enable functions, where we need the new state the
+ * hardware should be in when the atomic commit operation has completed.
+ */
+#define for_each_new_private_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_private_objs && \
+ ((obj) = (__state)->private_objs[__i].ptr, \
+ (new_obj_state) = (__state)->private_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (obj)
+
+/**
* drm_atomic_crtc_needs_modeset - compute combined modeset need
* @state: &drm_crtc_state for the CRTC
*
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 5a45a1d9f3837e..03798a3af07e9a 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -33,6 +33,8 @@
#include <drm/drm_modeset_helper.h>
struct drm_atomic_state;
+struct drm_private_obj;
+struct drm_private_state;
int drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_atomic_state *state);
@@ -165,6 +167,8 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
uint32_t size);
+void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
+ struct drm_private_state *state);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 19fba65924da4a..6314e424ad897e 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -90,6 +90,17 @@ enum subpixel_order {
};
/**
+ * enum drm_link_status - connector's link_status property value
+ *
+ * This enum is used as the connector's link status property value.
+ * It is set to the values defined in uapi.
+ */
+enum drm_link_status {
+ DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD,
+ DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD,
+};
+
+/**
* struct drm_display_info - runtime data about the connected sink
*
* Describes a given display (e.g. CRT or flat panel) and its limitations. For
@@ -202,6 +213,12 @@ struct drm_connector_state {
struct drm_encoder *best_encoder;
+ /**
+ * @link_status: Connector link_status to keep track of whether link is
+ * GOOD or BAD to notify userspace if retraining is necessary.
+ */
+ enum drm_link_status link_status;
+
struct drm_atomic_state *state;
/**
@@ -788,6 +805,8 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
int drm_mode_connector_set_tile_property(struct drm_connector *connector);
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
+void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status);
/**
* drm_for_each_connector - iterate over all connectors
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index fc4c010d87ed7f..b2deea7e777a35 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -64,6 +64,11 @@
/* AUX CH addresses */
/* DPCD */
#define DP_DPCD_REV 0x000
+# define DP_DPCD_REV_10 0x10
+# define DP_DPCD_REV_11 0x11
+# define DP_DPCD_REV_12 0x12
+# define DP_DPCD_REV_13 0x13
+# define DP_DPCD_REV_14 0x14
#define DP_MAX_LINK_RATE 0x001
@@ -118,6 +123,7 @@
# define DP_DPCD_DISPLAY_CONTROL_CAPABLE (1 << 3) /* edp v1.2 or higher */
#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */
+# define DP_TRAINING_AUX_RD_MASK 0x7F /* XXX 1.2? */
#define DP_ADAPTER_CAP 0x00f /* 1.2 */
# define DP_FORCE_LOAD_SENSE_CAP (1 << 0)
@@ -182,6 +188,7 @@
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
@@ -229,6 +236,7 @@
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
+# define DP_LINK_BW_8_1 0x1e /* 1.4 */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
@@ -346,6 +354,7 @@
# define DP_PSR_FRAME_CAPTURE (1 << 3)
# define DP_PSR_SELECTIVE_UPDATE (1 << 4)
# define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5)
+# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */
#define DP_ADAPTER_CTRL 0x1a0
# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0)
@@ -417,6 +426,63 @@
#define DP_TEST_LANE_COUNT 0x220
#define DP_TEST_PATTERN 0x221
+# define DP_NO_TEST_PATTERN 0x0
+# define DP_COLOR_RAMP 0x1
+# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2
+# define DP_COLOR_SQUARE 0x3
+
+#define DP_TEST_H_TOTAL_HI 0x222
+#define DP_TEST_H_TOTAL_LO 0x223
+
+#define DP_TEST_V_TOTAL_HI 0x224
+#define DP_TEST_V_TOTAL_LO 0x225
+
+#define DP_TEST_H_START_HI 0x226
+#define DP_TEST_H_START_LO 0x227
+
+#define DP_TEST_V_START_HI 0x228
+#define DP_TEST_V_START_LO 0x229
+
+#define DP_TEST_HSYNC_HI 0x22A
+# define DP_TEST_HSYNC_POLARITY (1 << 7)
+# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_HSYNC_WIDTH_LO 0x22B
+
+#define DP_TEST_VSYNC_HI 0x22C
+# define DP_TEST_VSYNC_POLARITY (1 << 7)
+# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_VSYNC_WIDTH_LO 0x22D
+
+#define DP_TEST_H_WIDTH_HI 0x22E
+#define DP_TEST_H_WIDTH_LO 0x22F
+
+#define DP_TEST_V_HEIGHT_HI 0x230
+#define DP_TEST_V_HEIGHT_LO 0x231
+
+#define DP_TEST_MISC0 0x232
+# define DP_TEST_SYNC_CLOCK (1 << 0)
+# define DP_TEST_COLOR_FORMAT_MASK (3 << 1)
+# define DP_TEST_COLOR_FORMAT_SHIFT 1
+# define DP_COLOR_FORMAT_RGB (0 << 1)
+# define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
+# define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
+# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
+# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4)
+# define DP_TEST_BIT_DEPTH_MASK (7 << 5)
+# define DP_TEST_BIT_DEPTH_SHIFT 5
+# define DP_TEST_BIT_DEPTH_6 (0 << 5)
+# define DP_TEST_BIT_DEPTH_8 (1 << 5)
+# define DP_TEST_BIT_DEPTH_10 (2 << 5)
+# define DP_TEST_BIT_DEPTH_12 (3 << 5)
+# define DP_TEST_BIT_DEPTH_16 (4 << 5)
+
+#define DP_TEST_MISC1 0x233
+# define DP_TEST_REFRESH_DENOMINATOR (1 << 0)
+# define DP_TEST_INTERLACED (1 << 1)
+
+#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234
#define DP_TEST_CRC_R_CR 0x240
#define DP_TEST_CRC_G_Y 0x242
@@ -577,6 +643,15 @@
# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_MASK (0xf << 4)
# define DP_LAST_ACTUAL_SYNCHRONIZATION_LATENCY_SHIFT 4
+#define DP_LAST_RECEIVED_PSR_SDP 0x200a /* eDP 1.2 */
+# define DP_PSR_STATE_BIT (1 << 0) /* eDP 1.2 */
+# define DP_UPDATE_RFB_BIT (1 << 1) /* eDP 1.2 */
+# define DP_CRC_VALID_BIT (1 << 2) /* eDP 1.2 */
+# define DP_SU_VALID (1 << 3) /* eDP 1.4 */
+# define DP_FIRST_SCAN_LINE_SU_REGION (1 << 4) /* eDP 1.4 */
+# define DP_LAST_SCAN_LINE_SU_REGION (1 << 5) /* eDP 1.4 */
+# define DP_Y_COORDINATE_VALID (1 << 6) /* eDP 1.4a */
+
#define DP_RECEIVER_ALPM_STATUS 0x200b /* eDP 1.4 */
# define DP_ALPM_LOCK_TIMEOUT_ERROR (1 << 0)
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 27f3e99f55d135..40e522824b6ffd 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -24,6 +24,7 @@
#include <linux/types.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_atomic.h>
struct drm_dp_mst_branch;
@@ -403,6 +404,17 @@ struct drm_dp_payload {
int vcpi;
};
+#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
+
+struct drm_dp_mst_topology_state {
+ struct drm_private_state base;
+ int avail_slots;
+ struct drm_atomic_state *state;
+ struct drm_dp_mst_topology_mgr *mgr;
+};
+
+#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
+
/**
* struct drm_dp_mst_topology_mgr - DisplayPort MST manager
*
@@ -412,6 +424,11 @@ struct drm_dp_payload {
*/
struct drm_dp_mst_topology_mgr {
/**
+ * @base: Base private object for atomic
+ */
+ struct drm_private_obj base;
+
+ /**
* @dev: device pointer for adding i2c devices etc.
*/
struct drm_device *dev;
@@ -479,18 +496,16 @@ struct drm_dp_mst_topology_mgr {
* @pbn_div: PBN to slots divisor.
*/
int pbn_div;
+
/**
- * @total_slots: Total slots that can be allocated.
- */
- int total_slots;
- /**
- * @avail_slots: Still available slots that can be allocated.
+ * @state: State information for topology manager
*/
- int avail_slots;
+ struct drm_dp_mst_topology_state *state;
+
/**
- * @total_pbn: Total PBN count.
+ * @funcs: Atomic helper callbacks
*/
- int total_pbn;
+ const struct drm_private_state_funcs *funcs;
/**
* @qlock: protects @tx_msg_downq, the tx_slots in struct
@@ -580,7 +595,8 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
int drm_dp_calc_pbn_mode(int clock, int bpp);
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn, int slots);
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
@@ -608,4 +624,15 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
+int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn);
+int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr,
+ int slots);
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, bool power_up);
+
#endif
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 9d2a4e7b21ff46..abc8542bc0b96f 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -445,6 +445,11 @@ struct drm_mode_config {
*/
struct drm_property *tile_property;
/**
+ * @link_status_property: Default connector property for link status
+ * of a connector
+ */
+ struct drm_property *link_status_property;
+ /**
* @plane_type_property: Default plane property to differentiate
* CURSOR, PRIMARY and OVERLAY legacy uses of planes.
*/
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index c71dfca8b07576..b196d8aa0246ec 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -326,6 +326,40 @@ struct drm_connector_helper_funcs {
*/
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
+
+ /**
+ * @atomic_check:
+ *
+ * This hook is used to validate connector state. This function is
+ * called from &drm_atomic_helper_check_modeset, and is called when
+ * a connector property is set, or a modeset on the crtc is forced.
+ *
+ * Because &drm_atomic_helper_check_modeset may be called multiple times,
+ * this function should handle being called multiple times as well.
+ *
+ * This function is also allowed to inspect any other object's state and
+ * can add more state objects to the atomic commit if needed. Care must
+ * be taken though to ensure that state check and compute functions for
+ * these added states are all called, and derived state in other objects
+ * all updated. Again the recommendation is to just call check helpers
+ * until a maximal configuration is reached.
+ *
+ * NOTE:
+ *
+ * This function is called in the check phase of an atomic update. The
+ * driver is not allowed to change anything outside of the free-standing
+ * state objects passed-in or assembled in the overall &drm_atomic_state
+ * update tracking structure.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the state or the transition can't be
+ * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
+ * attempt to obtain another state object ran into a &drm_modeset_lock
+ * deadlock.
+ */
+ int (*atomic_check)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index c56fef40f53efa..5d744ec8f644aa 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0]; /* actual data */
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 73683778953d91..17f22c6d6d3c67 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -471,6 +471,24 @@ void acpi_walk_dep_device_list(acpi_handle handle);
struct platform_device *acpi_create_platform_device(struct acpi_device *);
#define ACPI_PTR(_ptr) (_ptr)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev)
+{
+ adev->flags.visited = true;
+}
+
+static inline void acpi_device_clear_enumerated(struct acpi_device *adev)
+{
+ adev->flags.visited = false;
+}
+
+enum acpi_reconfig_event {
+ ACPI_RECONFIG_DEVICE_ADD = 0,
+ ACPI_RECONFIG_DEVICE_REMOVE,
+};
+
+int acpi_reconfig_notifier_register(struct notifier_block *nb);
+int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -627,6 +645,24 @@ static inline void acpi_dma_deconfigure(struct device *dev) { }
#define ACPI_PTR(_ptr) (NULL)
+static inline void acpi_device_set_enumerated(struct acpi_device *adev)
+{
+}
+
+static inline void acpi_device_clear_enumerated(struct acpi_device *adev)
+{
+}
+
+static inline int acpi_reconfig_notifier_register(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
+static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+ return -EINVAL;
+}
+
#endif /* !CONFIG_ACPI */
#ifdef CONFIG_ACPI
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index a307c37c2e6ca5..072501a0ac8659 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -225,6 +225,14 @@ static inline void wb_get(struct bdi_writeback *wb)
*/
static inline void wb_put(struct bdi_writeback *wb)
{
+ if (WARN_ON_ONCE(!wb->bdi)) {
+ /*
+ * A driver bug might cause a file to be removed before bdi was
+ * initialized.
+ */
+ return;
+ }
+
if (wb != &wb->bdi->wb)
percpu_ref_put(&wb->refcnt);
}
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
new file mode 100644
index 00000000000000..f6505d83069d4b
--- /dev/null
+++ b/include/linux/bitfield.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Felix Fietkau <nbd@nbd.name>
+ * Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_BITFIELD_H
+#define _LINUX_BITFIELD_H
+
+#include <linux/bug.h>
+
+/*
+ * Bitfield access macros
+ *
+ * FIELD_{GET,PREP} macros take as first parameter shifted mask
+ * from which they extract the base mask and shift amount.
+ * Mask must be a compilation time constant.
+ *
+ * Example:
+ *
+ * #define REG_FIELD_A GENMASK(6, 0)
+ * #define REG_FIELD_B BIT(7)
+ * #define REG_FIELD_C GENMASK(15, 8)
+ * #define REG_FIELD_D GENMASK(31, 16)
+ *
+ * Get:
+ * a = FIELD_GET(REG_FIELD_A, reg);
+ * b = FIELD_GET(REG_FIELD_B, reg);
+ *
+ * Set:
+ * reg = FIELD_PREP(REG_FIELD_A, 1) |
+ * FIELD_PREP(REG_FIELD_B, 0) |
+ * FIELD_PREP(REG_FIELD_C, c) |
+ * FIELD_PREP(REG_FIELD_D, 0x40);
+ *
+ * Modify:
+ * reg &= ~REG_FIELD_C;
+ * reg |= FIELD_PREP(REG_FIELD_C, c);
+ */
+
+#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+
+#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
+ ({ \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
+ _pfx "mask is not constant"); \
+ BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
+ ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
+ _pfx "value too large for the field"); \
+ BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ _pfx "type of reg too small for mask"); \
+ __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
+ (1ULL << __bf_shf(_mask))); \
+ })
+
+/**
+ * FIELD_PREP() - prepare a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_val: value to put in the field
+ *
+ * FIELD_PREP() masks and shifts up the value. The result should
+ * be combined with other fields of the bitfield using logical OR.
+ */
+#define FIELD_PREP(_mask, _val) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \
+ ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \
+ })
+
+/**
+ * FIELD_GET() - extract a bitfield element
+ * @_mask: shifted mask defining the field's length and position
+ * @_reg: 32bit value of entire bitfield
+ *
+ * FIELD_GET() extracts the field specified by @_mask from the
+ * bitfield passed in as @_reg by masking and shifting it down.
+ */
+#define FIELD_GET(_mask, _reg) \
+ ({ \
+ __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \
+ (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
+ })
+
+#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index fe14382f966451..1383e1c03ff2da 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -882,8 +882,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
+ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 132585a7fbd841..bae3da5bcda04d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -177,7 +177,6 @@ void bpf_register_map_type(struct bpf_map_type_list *tl);
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
-void bpf_prog_put_rcu(struct bpf_prog *prog);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
@@ -208,10 +207,6 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd)
static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-
-static inline void bpf_prog_put_rcu(struct bpf_prog *prog)
-{
-}
#endif /* CONFIG_BPF_SYSCALL */
/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 7f4818673c41f8..edd3d8d3cd9014 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -13,6 +13,7 @@ enum bug_trap_type {
struct pt_regs;
#ifdef __CHECKER__
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
#define BUILD_BUG_ON_ZERO(e) (0)
#define BUILD_BUG_ON_NULL(e) ((void*)0)
@@ -23,6 +24,8 @@ struct pt_regs;
#else /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
+#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \
+ BUILD_BUG_ON(((n) & ((n) - 1)) != 0)
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 17e7e82d2aa758..1be04f8c563a0c 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -12,10 +12,24 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif
+/*
+ * __read_mostly is used to keep rarely changing variables out of frequently
+ * updated cachelines. If an architecture doesn't support it, ignore the
+ * hint.
+ */
#ifndef __read_mostly
#define __read_mostly
#endif
+/*
+ * __ro_after_init is used to mark things that are read-only after init (i.e.
+ * after mark_rodata_ro() has been called). These are effectively read-only,
+ * but may get written to during init, so can't live in .rodata (via "const").
+ */
+#ifndef __ro_after_init
+#define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
+#endif
+
#ifndef ____cacheline_aligned
#define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
#endif
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5f5270941ba02a..f7178f44825bf3 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -154,6 +154,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
unsigned int idx);
+struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 3e3799cdc6e66d..9b9fe05880085e 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -72,7 +72,13 @@ struct ceph_options {
#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024)
-#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+/*
+ * Handle the largest possible rbd object in one message.
+ * There is no limit on the size of cephfs objects, but it has to obey
+ * rsize and wsize mount options anyway.
+ */
+#define CEPH_MSG_MAX_DATA_LEN (32*1024*1024)
#define CEPH_AUTH_NAME_DEFAULT "guest"
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 38a0a9f0e087ea..a8eaa64b8b98ec 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -27,14 +27,6 @@
#define __SANITIZE_ADDRESS__
#endif
-/*
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well.
- */
-#undef inline
-#define inline inline __attribute__((unused)) notrace
-
/* Clang doesn't have a way to turn it off per-function, yet. */
#ifdef __noretpoline
#undef __noretpoline
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 33d972516c61d4..ab497d735015f3 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -65,21 +65,40 @@
#endif
/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
+/*
* Force always-inline if the user requests it so via the .config,
- * or if gcc is too old:
+ * or if gcc is too old.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline)) notrace
-#define __inline__ __inline__ __attribute__((always_inline)) notrace
-#define __inline __inline __attribute__((always_inline)) notrace
+#define inline \
+ inline __attribute__((always_inline, unused)) notrace __gnu_inline
#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline notrace
-#define __inline__ __inline__ notrace
-#define __inline __inline notrace
+#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
+#define __inline__ inline
+#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5604bb08e5fe50..6778055d98ae0d 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -111,7 +111,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect) ({ \
- int ______r; \
+ long ______r; \
static struct ftrace_branch_data \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_annotated_branch"))) \
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 99f7f5a13d9257..8019ba091a2a99 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -46,6 +46,10 @@ extern ssize_t cpu_show_spectre_v1(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
+ struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 60571292a80201..efc67eb3fe2229 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -158,7 +158,7 @@ void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
+void cpufreq_update_policy(unsigned int cpu);
bool have_governor_per_policy(void);
bool cpufreq_driver_is_slow(void);
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
@@ -176,6 +176,7 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
return 0;
}
static inline void disable_cpufreq(void) { }
+static inline void cpufreq_update_policy(unsigned int cpu) { }
#endif
/*********************************************************************
@@ -204,20 +205,12 @@ __ATTR(_name, _perm, show_##_name, NULL)
static struct freq_attr _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 7ca5688bfbe679..8036ba8fdc146f 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
extern int cpuidle_play_dead(void);
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
+static inline struct cpuidle_device *cpuidle_get_device(void)
+{return __this_cpu_read(cpuidle_devices); }
#else
static inline void disable_cpuidle(void) { }
static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
@@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
static inline int cpuidle_play_dead(void) {return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
struct cpuidle_device *dev) {return NULL; }
+static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#endif
#if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 1a69cf0f0417e0..f23fdf116244a4 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -26,6 +26,10 @@
#define DEVFREQ_PRECHANGE (0)
#define DEVFREQ_POSTCHANGE (1)
+#define DEVFREQ_POLICY_NOTIFIER 1
+
+#define DEVFREQ_ADJUST 0
+
struct devfreq;
struct devfreq_governor;
@@ -102,6 +106,34 @@ struct devfreq_dev_profile {
};
/**
+ * struct devfreq_freq_limits - Devfreq frequency limits
+ * @min_freq: minimum frequency
+ * @max_freq: maximum frequency
+ */
+struct devfreq_freq_limits {
+ unsigned long min_freq;
+ unsigned long max_freq;
+};
+
+/**
+ * struct devfreq_policy - Devfreq policy
+ * @min: minimum frequency (adjustable by policy notifiers)
+ * @min: maximum frequency (adjustable by policy notifiers)
+ * @user: frequency limits requested by the user
+ * @devinfo: frequency limits of the device (available OPPs)
+ * @governor: method how to choose frequency based on the usage.
+ * @governor_name: devfreq governor name for use with this devfreq
+ */
+struct devfreq_policy {
+ unsigned long min;
+ unsigned long max;
+ struct devfreq_freq_limits user;
+ struct devfreq_freq_limits devinfo;
+ const struct devfreq_governor *governor;
+ char governor_name[DEVFREQ_NAME_LEN];
+};
+
+/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
@@ -109,8 +141,6 @@ struct devfreq_dev_profile {
* @dev: device registered by devfreq class. dev.parent is the device
* using devfreq.
* @profile: device-specific devfreq profile
- * @governor: method how to choose frequency based on the usage.
- * @governor_name: devfreq governor name for use with this devfreq
* @nb: notifier block used to notify devfreq object that it should
* reevaluate operable frequencies. Devfreq users may use
* devfreq.nb to the corresponding register notifier call chain.
@@ -118,16 +148,14 @@ struct devfreq_dev_profile {
* @previous_freq: previously configured frequency value.
* @data: Private data of the governor. The devfreq framework does not
* touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
- * @scaling_min_freq: Limit minimum frequency requested by OPP interface
- * @scaling_max_freq: Limit maximum frequency requested by OPP interface
+ * @policy: Policy for frequency adjustments
* @stop_polling: devfreq polling status of a device.
* @total_trans: Number of devfreq transitions
* @trans_table: Statistics of devfreq transitions
* @time_in_state: Statistics of devfreq states
* @last_stat_updated: The last time stat updated
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @policy_notifier_list: list head of DEVFREQ_POLICY_NOTIFIER notifier
*
* This structure stores the devfreq information for a give device.
*
@@ -143,8 +171,6 @@ struct devfreq {
struct mutex lock;
struct device dev;
struct devfreq_dev_profile *profile;
- const struct devfreq_governor *governor;
- char governor_name[DEVFREQ_NAME_LEN];
struct notifier_block nb;
struct delayed_work work;
@@ -154,10 +180,7 @@ struct devfreq {
void *data; /* private data for governors */
- unsigned long min_freq;
- unsigned long max_freq;
- unsigned long scaling_min_freq;
- unsigned long scaling_max_freq;
+ struct devfreq_policy policy;
bool stop_polling;
/* information for device frequency transition */
@@ -167,6 +190,7 @@ struct devfreq {
unsigned long last_stat_updated;
struct srcu_notifier_head transition_notifier_list;
+ struct srcu_notifier_head policy_notifier_list;
};
struct devfreq_freqs {
@@ -175,6 +199,8 @@ struct devfreq_freqs {
};
#if defined(CONFIG_PM_DEVFREQ)
+extern struct class *devfreq_class;
+
extern struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq_dev_profile *profile,
const char *governor_name,
@@ -191,6 +217,14 @@ extern void devm_devfreq_remove_device(struct device *dev,
extern int devfreq_suspend_device(struct devfreq *devfreq);
extern int devfreq_resume_device(struct devfreq *devfreq);
+/**
+ * update_devfreq() - Reevaluate the device and configure frequency
+ * @devfreq: the devfreq device
+ *
+ * Note: devfreq->lock must be held
+ */
+extern int update_devfreq(struct devfreq *devfreq);
+
/* Helper functions for devfreq user device driver with OPP. */
extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
unsigned long *freq, u32 flags);
@@ -220,6 +254,45 @@ extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
int index);
/**
+ * devfreq_verify_within_limits() - Adjust a devfreq policy if needed to make
+ * sure its min/max values are within a
+ * specified range.
+ * @policy: the policy
+ * @min: the minimum frequency
+ * @max: the maximum frequency
+ */
+static inline void devfreq_verify_within_limits(struct devfreq_policy *policy,
+ unsigned int min, unsigned int max)
+{
+ if (policy->min < min)
+ policy->min = min;
+ if (policy->max < min)
+ policy->max = min;
+ if (policy->min > max)
+ policy->min = max;
+ if (policy->max > max)
+ policy->max = max;
+ if (policy->min > policy->max)
+ policy->min = policy->max;
+}
+
+/**
+ * devfreq_verify_within_dev_limits() - Adjust a devfreq policy if needed to
+ * make sure its min/max values are within
+ * the frequency range supported by the
+ * device.
+ * @policy: the policy
+ */
+static inline void
+devfreq_verify_within_dev_limits(struct devfreq_policy *policy)
+{
+ devfreq_verify_within_limits(policy, policy->devinfo.min_freq,
+ policy->devinfo.max_freq);
+}
+
+struct devfreq *dev_to_devfreq(struct device *dev);
+
+/**
* devfreq_update_stats() - update the last_status pointer in struct devfreq
* @df: the devfreq instance whose status needs updating
*
@@ -353,10 +426,26 @@ static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
return ERR_PTR(-ENODEV);
}
+static inline void devfreq_verify_within_limits(struct devfreq_policy *policy,
+ unsigned int min, unsigned int max)
+{
+}
+
+static inline void
+devfreq_verify_within_dev_limits(struct devfreq_policy *policy)
+{
+}
+
static inline int devfreq_update_stats(struct devfreq *df)
{
return -EINVAL;
}
+
+static inline struct devfreq *dev_to_devfreq(struct device *dev)
+{
+ return NULL;
+}
+
#endif /* CONFIG_PM_DEVFREQ */
#endif /* __LINUX_DEVFREQ_H__ */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index f61626b44c1cb6..ea41fe17da311a 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -607,7 +607,7 @@ extern struct ratelimit_state dm_ratelimit_state;
*/
#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
-static inline sector_t to_sector(unsigned long n)
+static inline sector_t to_sector(unsigned long long n)
{
return (n >> SECTOR_SHIFT);
}
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index e270bf3d1d1aea..b62257cfa18bd9 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -17,6 +17,7 @@
#define __DMA_IOMMU_H
#ifdef __KERNEL__
+#include <linux/types.h>
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 3d6e6ce44c5ce8..520fd854e7b3f1 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -99,6 +99,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
#define CP_FSCK_FLAG 0x00000010
#define CP_ERROR_FLAG 0x00000008
@@ -497,4 +498,6 @@ enum {
F2FS_FT_MAX
};
+#define S_SHIFT 12
+
#endif /* _LINUX_F2FS_FS_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 27b6c578685d8f..bbb1062582d10b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -65,6 +65,8 @@ extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+extern int sysctl_protected_fifos;
+extern int sysctl_protected_regular;
struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
@@ -926,9 +928,9 @@ static inline struct file *get_file(struct file *f)
/* Page cache limit. The filesystems should put that into their s_maxbytes
limits, otherwise bad things can happen in VM. */
#if BITS_PER_LONG==32
-#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
+#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
#elif BITS_PER_LONG==64
-#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
+#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
#endif
#define FL_POSIX 1
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 84d971ff3fba50..5d06e838e650f7 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
/**
* Global Utility Registers.
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index eecd19b3700112..250e9be65e74c7 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -185,6 +185,7 @@ static inline void ct_assert_unique_operations(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -203,6 +204,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -212,7 +214,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
{ \
switch (0) { \
- s_fields \
+ s_fields \
+ case 0: \
; \
} \
}
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f216c563e9..2d6100edf2049d 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
#ifdef CONFIG_DEBUG_FS
+#include <linux/kfifo.h>
+
#define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
void hid_debug_exit(void);
void hid_debug_event(struct hid_device *, char *);
-
struct hid_debug_list {
- char *hid_debug_buf;
- int head;
- int tail;
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
struct fasync_struct *fasync;
struct hid_device *hdev;
struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
#endif
#endif
-
diff --git a/include/linux/hid.h b/include/linux/hid.h
index aaad6b646bac9b..a51318dac218e5 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -203,6 +203,7 @@ struct hid_item {
#define HID_GD_VBRZ 0x00010045
#define HID_GD_VNO 0x00010046
#define HID_GD_FEATURE 0x00010047
+#define HID_GD_RESOLUTION_MULTIPLIER 0x00010048
#define HID_GD_SYSTEM_CONTROL 0x00010080
#define HID_GD_UP 0x00010090
#define HID_GD_DOWN 0x00010091
@@ -212,6 +213,7 @@ struct hid_item {
#define HID_DC_BATTERYSTRENGTH 0x00060020
#define HID_CP_CONSUMER_CONTROL 0x000c0001
+#define HID_CP_AC_PAN 0x000c0238
#define HID_DG_DIGITIZER 0x000d0001
#define HID_DG_PEN 0x000d0002
@@ -391,6 +393,7 @@ struct hid_local {
*/
struct hid_collection {
+ int parent_idx; /* device->collection */
unsigned type;
unsigned usage;
unsigned level;
@@ -400,12 +403,16 @@ struct hid_usage {
unsigned hid; /* hid usage code */
unsigned collection_index; /* index into collection array */
unsigned usage_index; /* index into usage array */
+ __s8 resolution_multiplier;/* Effective Resolution Multiplier
+ (HUT v1.12, 4.3.1), default: 1 */
/* hidinput data */
+ __s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
__s8 hat_min; /* hat switch fun */
__s8 hat_max; /* ditto */
__s8 hat_dir; /* ditto */
+ __s16 wheel_accumulated; /* hi-res wheel */
};
struct hid_input;
@@ -592,12 +599,14 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
struct hid_parser {
struct hid_global global;
struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
- unsigned global_stack_ptr;
+ unsigned int global_stack_ptr;
struct hid_local local;
- unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
- unsigned collection_stack_ptr;
+ unsigned int *collection_stack;
+ unsigned int collection_stack_ptr;
+ unsigned int collection_stack_size;
+ int active_collection_idx; /* device->collection */
struct hid_device *device;
- unsigned scan_flags;
+ unsigned int scan_flags;
};
struct hid_class_descriptor {
@@ -817,6 +826,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
unsigned int type, unsigned int id,
unsigned int field_index,
unsigned int report_counts);
+
+void hid_setup_resolution_multiplier(struct hid_device *hid);
int hid_open_report(struct hid_device *device);
int hid_check_keys_pressed(struct hid_device *hid);
int hid_connect(struct hid_device *hid, unsigned int connect_mask);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 685c262e0be848..3957d99e66eaae 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -110,6 +110,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
unsigned long addr, unsigned long sz);
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
@@ -132,6 +134,18 @@ static inline unsigned long hugetlb_total_pages(void)
return 0;
}
+static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
+ pte_t *ptep)
+{
+ return 0;
+}
+
+static inline void adjust_range_if_pmd_sharing_possible(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index ae6a711dcd1ddc..281bb007f7257f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1179,6 +1179,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
struct hv_util_service {
u8 *recv_buffer;
+ void *channel;
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 592061a6d17aa7..33a707b11a429e 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -738,4 +738,13 @@ static inline struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node
}
#endif /* CONFIG_OF */
+#if IS_ENABLED(CONFIG_ACPI)
+u32 i2c_acpi_find_bus_speed(struct device *dev);
+#else
+static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI */
+
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i8253.h b/include/linux/i8253.h
index e6bb36a97519b0..8336b2f6f83462 100644
--- a/include/linux/i8253.h
+++ b/include/linux/i8253.h
@@ -21,6 +21,7 @@
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 1600c55828e0fa..93a774ce492242 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -49,7 +49,7 @@ struct iio_buffer_access_funcs {
int (*request_update)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
- int (*set_length)(struct iio_buffer *buffer, int length);
+ int (*set_length)(struct iio_buffer *buffer, unsigned int length);
void (*release)(struct iio_buffer *buffer);
@@ -78,8 +78,8 @@ struct iio_buffer_access_funcs {
* @watermark: [INTERN] number of datums to wait for poll/read.
*/
struct iio_buffer {
- int length;
- int bytes_per_datum;
+ unsigned int length;
+ size_t bytes_per_datum;
struct attribute_group *scan_el_attrs;
long *scan_mask;
bool scan_timestamp;
diff --git a/include/linux/init.h b/include/linux/init.h
index df4ce20184a658..3fde9c8d1e4e9e 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -149,6 +149,10 @@ void prepare_namespace(void);
void __init load_default_modules(void);
int __init init_rootfs(void);
+#ifdef CONFIG_DEBUG_RODATA
+void mark_rodata_ro(void);
+#endif
+
extern void (*late_time_init)(void);
extern bool initcall_debug;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1c1ff7e4faa4bf..021b1e9ff6cd09 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,8 @@
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <asm/thread_info.h>
+
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
@@ -183,14 +185,21 @@ extern struct task_group root_task_group;
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+# define INIT_TASK_TI(tsk) .thread_info = INIT_THREAD_INFO(tsk),
+#else
+# define INIT_TASK_TI(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
+ INIT_TASK_TI(tsk) \
.state = 0, \
- .stack = &init_thread_info, \
+ .stack = init_stack, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 23e129ef67268c..e353f6600b0b25 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -125,6 +125,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
* Extended Capability Register
*/
+#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
@@ -294,6 +295,7 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -318,6 +320,7 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -463,9 +466,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
diff --git a/include/linux/io.h b/include/linux/io.h
index 1df47608c998a2..a453d88d3f01b4 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -177,4 +177,26 @@ enum {
void *memremap(resource_size_t offset, size_t size, unsigned long flags);
void memunmap(void *addr);
+/*
+ * On x86 PAT systems we have memory tracking that keeps track of
+ * the allowed mappings on memory ranges. This tracking works for
+ * all the in-kernel mapping APIs (ioremap*), but where the user
+ * wishes to map a range from a physical device into user memory
+ * the tracking won't be updated. This API is to be used by
+ * drivers which remap physical device pages into userspace,
+ * and wants to make sure they are mapped WC and not UC.
+ */
+#ifndef arch_io_reserve_memtype_wc
+static inline int arch_io_reserve_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+ return 0;
+}
+
+static inline void arch_io_free_memtype_wc(resource_size_t base,
+ resource_size_t size)
+{
+}
+#endif
+
#endif /* _LINUX_IO_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index a19bcf9e762e1d..410decacff8fc7 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -177,7 +177,7 @@ extern int kdb_get_kbd_char(void);
static inline
int kdb_process_cpu(const struct task_struct *p)
{
- unsigned int cpu = task_thread_info(p)->cpu;
+ unsigned int cpu = task_cpu(p);
if (cpu > num_possible_cpus())
cpu = 0;
return cpu;
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e6284591599ec5..5957c6a3fd7f93 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -113,6 +113,23 @@ extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(atomic_read(&kobj->kref.refcount) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b20a2752f934f8..6428ac4746dee5 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
diff --git a/include/linux/low-mem-notify.h b/include/linux/low-mem-notify.h
index a2973ba89ca6e1..efe2a2bb2618d4 100644
--- a/include/linux/low-mem-notify.h
+++ b/include/linux/low-mem-notify.h
@@ -2,101 +2,143 @@
#define _LINUX_LOW_MEM_NOTIFY_H
#include <linux/mm.h>
+#include <linux/ratelimit.h>
#include <linux/stddef.h>
#include <linux/swap.h>
-extern unsigned low_mem_margin_percent;
-extern unsigned long low_mem_minfree;
+/* We support up to this many different thresholds. */
+#define LOW_MEM_THRESHOLD_MAX 5
+
+extern unsigned long low_mem_thresholds[];
+extern unsigned int low_mem_threshold_count;
+extern unsigned int low_mem_threshold_last;
void low_mem_notify(void);
extern const struct file_operations low_mem_notify_fops;
extern bool low_mem_margin_enabled;
extern unsigned long low_mem_lowest_seen_anon_mem;
-extern const unsigned long low_mem_anon_mem_delta;
extern unsigned int low_mem_ram_vs_swap_weight;
+extern struct ratelimit_state low_mem_logging_ratelimit;
+extern int extra_free_kbytes;
+
+#ifdef CONFIG_SYSFS
+extern void low_mem_threshold_notify(void);
+#else
+static inline void low_mem_threshold_notify(void)
+{
+}
+#endif
/*
* Compute available memory used by files that can be reclaimed quickly.
*/
-static inline unsigned long get_available_file_mem(int lru_base)
+static inline unsigned long get_available_file_mem(void)
{
unsigned long file_mem =
- global_page_state(lru_base + LRU_ACTIVE_FILE) +
- global_page_state(lru_base + LRU_INACTIVE_FILE);
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_FILE);
unsigned long dirty_mem = global_page_state(NR_FILE_DIRTY);
unsigned long min_file_mem = min_filelist_kbytes >> (PAGE_SHIFT - 10);
unsigned long clean_file_mem = file_mem - dirty_mem;
/* Conservatively estimate the amount of available_file_mem */
unsigned long available_file_mem = (clean_file_mem > min_file_mem) ?
- (clean_file_mem - min_file_mem) : 0;
+ (clean_file_mem - min_file_mem) : 0;
return available_file_mem;
}
/*
+ * Available anonymous memory.
+ */
+static inline unsigned long get_available_anon_mem(void)
+{
+ return global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_ANON);
+}
+
+/*
* Compute "available" memory, that is either free memory or memory that can be
* reclaimed quickly, adjusted for the presence of swap.
*/
-static inline unsigned long get_available_mem_adj(int lru_base)
+static inline unsigned long get_available_mem_adj(void)
{
- /* min_free_kbytes is reserved for emergency allocation like when
- * PF_MEMALLOC is set. In general it's not usable in normal page
- * allocation process.
- */
- unsigned long min_free_pages = min_free_kbytes >> (PAGE_SHIFT - 10);
/* free_mem is completely unallocated; clean file-backed memory
* (file_mem - dirty_mem) is easy to reclaim, except for the last
- * min_filelist_kbytes.
+ * min_filelist_kbytes. totalreserve_pages is the reserve of pages that
+ * are not available to user space. totalreserve_pages is high watermark
+ * + lowmem_reserve and extra_free_kbytes raises the high watermark.
+ * Nullify the effect of extra_free_kbytes by excluding it from the
+ * reserved pages.
*/
- unsigned long free_mem =
- global_page_state(NR_FREE_PAGES) - min_free_pages;
+ unsigned long raw_free_mem = global_page_state(NR_FREE_PAGES);
+ unsigned long reserve_pages = totalreserve_pages -
+ (READ_ONCE(extra_free_kbytes) >> (PAGE_SHIFT - 10));
+ unsigned long free_mem = (raw_free_mem > reserve_pages) ?
+ raw_free_mem - reserve_pages : 0;
unsigned long available_mem = free_mem +
- get_available_file_mem(lru_base);
- long _nr_swap_pages = get_nr_swap_pages();
+ get_available_file_mem();
+ unsigned long swappable_pages = min_t(unsigned long,
+ get_nr_swap_pages(), get_available_anon_mem());
/*
* The contribution of swap is reduced by a factor of
* low_mem_ram_vs_swap_weight.
*/
- return available_mem + _nr_swap_pages / low_mem_ram_vs_swap_weight;
+ return available_mem + swappable_pages / low_mem_ram_vs_swap_weight;
}
+#ifdef CONFIG_LOW_MEM_NOTIFY
/*
* Returns TRUE if we are in a low memory state.
*/
-static inline bool _is_low_mem_situation(void)
+static inline bool low_mem_check(void)
{
- const int lru_base = NR_LRU_BASE - LRU_BASE;
static bool was_low_mem; /* = false, as per style guide */
/* We declare a low-memory condition when a combination of RAM and swap
* space is low.
*/
- unsigned long available_mem = get_available_mem_adj(lru_base);
- bool is_low_mem = available_mem < low_mem_minfree;
-
- if (unlikely(is_low_mem && !was_low_mem)) {
- unsigned long anon_mem =
- global_page_state(lru_base + LRU_ACTIVE_ANON) +
- global_page_state(lru_base + LRU_INACTIVE_ANON);
- if (unlikely(anon_mem < low_mem_lowest_seen_anon_mem)) {
- printk(KERN_INFO "entering low_mem "
- "(avail RAM = %lu kB, avail swap %lu kB, "
- "avail file %lu kB) "
- "with lowest seen anon mem: %lu kB\n",
- available_mem * PAGE_SIZE / 1024,
- get_nr_swap_pages() * PAGE_SIZE / 1024,
- get_available_file_mem(lru_base) * PAGE_SIZE /
- 1024,
- anon_mem * PAGE_SIZE / 1024);
- low_mem_lowest_seen_anon_mem = anon_mem -
- low_mem_anon_mem_delta;
- }
+ unsigned long available_mem = get_available_mem_adj();
+ /*
+ * For backwards compatibility with the older margin interface, we will
+ * trigger the /dev/chromeos-low_mem device when we are below the
+ * lowest threshold
+ */
+ bool is_low_mem = available_mem < low_mem_thresholds[0];
+ unsigned int threshold_lowest = UINT_MAX;
+ int i;
+
+ if (!low_mem_margin_enabled)
+ return false;
+
+ if (unlikely(is_low_mem && !was_low_mem) &&
+ __ratelimit(&low_mem_logging_ratelimit)) {
+ pr_info("entering low_mem (avail RAM = %lu kB, avail swap %lu kB, avail file %lu kB, anon mem: %lu kB)\n",
+ available_mem * PAGE_SIZE / 1024,
+ get_nr_swap_pages() * PAGE_SIZE / 1024,
+ get_available_file_mem() * PAGE_SIZE / 1024,
+ get_available_anon_mem() * PAGE_SIZE / 1024);
}
was_low_mem = is_low_mem;
+ if (is_low_mem)
+ low_mem_notify();
+
+ for (i = 0; i < low_mem_threshold_count; i++)
+ if (available_mem < low_mem_thresholds[i]) {
+ threshold_lowest = i;
+ break;
+ }
+
+ /* we crossed one or more thresholds */
+ if (unlikely(threshold_lowest < low_mem_threshold_last))
+ low_mem_threshold_notify();
+
+ low_mem_threshold_last = threshold_lowest;
+
return is_low_mem;
}
-
-static inline bool is_low_mem_situation(void)
+#else
+static inline bool low_mem_check(void)
{
- return low_mem_margin_enabled ? _is_low_mem_situation() : false;
+ return false;
}
+#endif
#endif
diff --git a/include/linux/lzo.h b/include/linux/lzo.h
index a0848d9377e569..9a8917e234e762 100644
--- a/include/linux/lzo.h
+++ b/include/linux/lzo.h
@@ -17,7 +17,7 @@
#define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short))
#define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS
-#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3)
+#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2)
/* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */
int lzo1x_1_compress(const unsigned char *src, size_t src_len,
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 435fd8426b8acf..d40b4bfa963138 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -63,6 +63,10 @@ enum mem_cgroup_events_index {
MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT_S, /* # of major shmem page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT_A, /* # of major anonymous page-faults */
+ MEM_CGROUP_EVENTS_PGMAJFAULT_F, /* # of major file page-faults */
+
MEM_CGROUP_EVENTS_NSTATS,
/* default hierarchy events */
MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
@@ -491,8 +495,20 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
case PGFAULT:
this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
break;
- case PGMAJFAULT:
+ case PGMAJFAULT_S:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ this_cpu_inc(memcg->stat->events[
+ MEM_CGROUP_EVENTS_PGMAJFAULT_S]);
+ break;
+ case PGMAJFAULT_A:
+ this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ this_cpu_inc(memcg->stat->events[
+ MEM_CGROUP_EVENTS_PGMAJFAULT_A]);
+ break;
+ case PGMAJFAULT_F:
this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
+ this_cpu_inc(memcg->stat->events[
+ MEM_CGROUP_EVENTS_PGMAJFAULT_F]);
break;
default:
BUG();
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index b99a89bbe8f7c1..f49695ef586560 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -168,7 +168,8 @@ struct cros_ec_device {
struct cros_ec_command *msg);
struct power_supply *charger;
struct mutex lock;
- bool mkbp_event_supported;
+ /* 0 == not supported, otherwise it supports version x - 1 */
+ u8 mkbp_event_supported;
struct blocking_notifier_head event_notifier;
struct ec_response_get_next_event_v1 event_data;
int event_size;
@@ -297,6 +298,17 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
/**
+ * cros_ec_check_features - Test for the presence of EC features
+ *
+ * Call this function to test whether the ChromeOS EC supports a feature.
+ *
+ * @ec_dev: EC device
+ * @msg: One of ec_feature_code values
+ * @return: 1 if supported, if not
+ */
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+
+/**
* cros_ec_remove - Remove a ChromeOS EC
*
* Call this to deregister a ChromeOS EC, then clean up any private data.
@@ -331,7 +343,6 @@ extern struct attribute_group cros_ec_pd_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
extern struct attribute_group cros_usb_pd_charger_attr_group;
-extern struct attribute_group cros_ec_usb_attr_group;
/**
* cros_ec_get_next_event - Retrieve the EC event.
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index bd6908c99b751b..5fe592ca4ac9dc 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -1,6 +1,11 @@
-/* Copyright (c) 2014 The Chromium OS Authors. All rights reserved.
- * Use of this source code is governed by a BSD-style license that can be
- * found in the LICENSE file.
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Host communication command constants for ChromeOS EC
+ *
+ * Copyright (C) 2012 Google, Inc
+ *
+ * NOTE: This file is copied verbatim from the ChromeOS EC Open Source
+ * project in an attempt to make future updates easy to make.
*/
/* Host communication command constants for Chrome EC */
@@ -12,6 +17,10 @@
#include <stdint.h>
#endif
+#ifdef __cplusplus
+extern "C"{
+#endif
+
/*
* Include common.h for CONFIG_HOSTCMD_ALIGNED, if it's defined. This
* generates more efficient code for accessing request/response structures on
@@ -21,6 +30,12 @@
#include "common.h"
#endif
+#ifdef __KERNEL__
+#define BUILD_ASSERT(_cond)
+#else
+#include "compile_time_macros.h"
+#endif
+
/*
* Current version of this protocol
*
@@ -324,10 +339,19 @@
/*
* Report device orientation
- * bit 0 device is tablet mode
+ * Bits Definition
+ * 3:1 Device DPTF Profile Number (DDPN)
+ * 0 = Reserved for backward compatibility (indicates no valid
+ * profile number. Host should fall back to using TBMD).
+ * 1..7 = DPTF Profile number to indicate to host which table needs
+ * to be loaded.
+ * 0 Tablet Mode Device Indicator (TBMD)
*/
#define EC_ACPI_MEM_DEVICE_ORIENTATION 0x09
-#define EC_ACPI_MEM_DEVICE_TABLET_MODE 0x01
+#define EC_ACPI_MEM_TBMD_SHIFT 0
+#define EC_ACPI_MEM_TBMD_MASK 0x1
+#define EC_ACPI_MEM_DDPN_SHIFT 1
+#define EC_ACPI_MEM_DDPN_MASK 0x7
/*
* Report device features. Uses the same format as the host command, except:
@@ -353,6 +377,15 @@
#define EC_ACPI_MEM_BATTERY_INDEX 0x12
/*
+ * USB Port Power. Each bit indicates whether the corresponding USB ports' power
+ * is enabled (1) or disabled (0).
+ * bit 0 USB port ID 0
+ * ...
+ * bit 7 USB port ID 7
+ */
+#define EC_ACPI_MEM_USB_PORT_POWER 0x13
+
+/*
* ACPI addresses 0x20 - 0xff map to EC_MEMMAP offset 0x00 - 0xdf. This data
* is read-only from the AP. Added in EC_ACPI_MEM_VERSION 2.
*/
@@ -583,7 +616,7 @@ enum host_event_code {
/* EC desires to change state of host-controlled USB mux */
EC_HOST_EVENT_USB_MUX = 28,
- /* TABLET/LAPTOP mode event*/
+ /* TABLET/LAPTOP mode or detachable base attach/detach event */
EC_HOST_EVENT_MODE_CHANGE = 29,
/* Keyboard recovery combo with hardware reinitialization */
@@ -601,17 +634,20 @@ enum host_event_code {
/* Host event mask */
#define EC_HOST_EVENT_MASK(event_code) (1ULL << ((event_code) - 1))
-/* Arguments at EC_LPC_ADDR_HOST_ARGS */
-struct __ec_align4 ec_lpc_host_args {
+/**
+ * struct ec_lpc_host_args - Arguments at EC_LPC_ADDR_HOST_ARGS
+ * @flags: The host argument flags.
+ * @command_version: Command version.
+ * @data_size: The length of data.
+ * @checksum: Checksum; sum of command + flags + command_version + data_size +
+ * all params/response data bytes.
+ */
+struct ec_lpc_host_args {
uint8_t flags;
uint8_t command_version;
uint8_t data_size;
- /*
- * Checksum; sum of command + flags + command_version + data_size +
- * all params/response data bytes.
- */
uint8_t checksum;
-};
+} __ec_align4;
/* Flags for ec_lpc_host_args.flags */
/*
@@ -763,56 +799,45 @@ struct __ec_align4 ec_lpc_host_args {
#define EC_HOST_REQUEST_VERSION 3
-/* Version 3 request from host */
-struct __ec_align4 ec_host_request {
- /* Structure version (=3)
- *
- * EC will return EC_RES_INVALID_HEADER if it receives a header with a
- * version it doesn't know how to parse.
- */
+/**
+ * struct ec_host_request - Version 3 request from host.
+ * @struct_version: Should be 3. The EC will return EC_RES_INVALID_HEADER if it
+ * receives a header with a version it doesn't know how to
+ * parse.
+ * @checksum: Checksum of request and data; sum of all bytes including checksum
+ * should total to 0.
+ * @command: Command to send (EC_CMD_...)
+ * @command_version: Command version.
+ * @reserved: Unused byte in current protocol version; set to 0.
+ * @data_len: Length of data which follows this header.
+ */
+struct ec_host_request {
uint8_t struct_version;
-
- /*
- * Checksum of request and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Command code */
uint16_t command;
-
- /* Command version */
uint8_t command_version;
-
- /* Unused byte in current protocol version; set to 0 */
uint8_t reserved;
-
- /* Length of data which follows this header */
uint16_t data_len;
-};
+} __ec_align4;
#define EC_HOST_RESPONSE_VERSION 3
-/* Version 3 response from EC */
-struct __ec_align4 ec_host_response {
- /* Structure version (=3) */
+/**
+ * struct ec_host_response - Version 3 response from EC.
+ * @struct_version: Struct version (=3).
+ * @checksum: Checksum of response and data; sum of all bytes including
+ * checksum should total to 0.
+ * @result: EC's response to the command (separate from communication failure)
+ * @data_len: Length of data which follows this header.
+ * @reserved: Unused bytes in current protocol version; set to 0.
+ */
+struct ec_host_response {
uint8_t struct_version;
-
- /*
- * Checksum of response and data; sum of all bytes including checksum
- * should total to 0.
- */
uint8_t checksum;
-
- /* Result code (EC_RES_*) */
uint16_t result;
-
- /* Length of data which follows this header */
uint16_t data_len;
-
- /* Unused bytes in current protocol version; set to 0 */
uint16_t reserved;
-};
+} __ec_align4;
/*****************************************************************************/
@@ -875,7 +900,7 @@ struct __ec_align4 ec_host_response {
*/
/* Version 4 request from host */
-struct __ec_align4 ec_host_request4 {
+struct ec_host_request4 {
/*
* bits 0-3: struct_version: Structure version (=4)
* bit 4: is_response: Is response (=0)
@@ -902,10 +927,10 @@ struct __ec_align4 ec_host_request4 {
/* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */
uint8_t header_crc;
-};
+} __ec_align4;
/* Version 4 response from EC */
-struct __ec_align4 ec_host_response4 {
+struct ec_host_response4 {
/*
* bits 0-3: struct_version: Structure version (=4)
* bit 4: is_response: Is response (=1)
@@ -931,7 +956,7 @@ struct __ec_align4 ec_host_response4 {
/* CRC-8 of above fields, using x^8 + x^2 + x + 1 polynomial */
uint8_t header_crc;
-};
+} __ec_align4;
/* Fields in fields0 byte */
#define EC_PACKET4_0_STRUCT_VERSION_MASK 0x0f
@@ -968,9 +993,13 @@ struct __ec_align4 ec_host_response4 {
*/
#define EC_CMD_PROTO_VERSION 0x0000
-struct __ec_align4 ec_response_proto_version {
+/**
+ * struct ec_response_proto_version - Response to the proto version command.
+ * @version: The protocol version.
+ */
+struct ec_response_proto_version {
uint32_t version;
-};
+} __ec_align4;
/*
* Hello. This is a simple command to test the EC is responsive to
@@ -978,13 +1007,21 @@ struct __ec_align4 ec_response_proto_version {
*/
#define EC_CMD_HELLO 0x0001
-struct __ec_align4 ec_params_hello {
- uint32_t in_data; /* Pass anything here */
-};
+/**
+ * struct ec_params_hello - Parameters to the hello command.
+ * @in_data: Pass anything here.
+ */
+struct ec_params_hello {
+ uint32_t in_data;
+} __ec_align4;
-struct __ec_align4 ec_response_hello {
- uint32_t out_data; /* Output will be in_data + 0x01020304 */
-};
+/**
+ * struct ec_response_hello - Response to the hello command.
+ * @out_data: Output will be in_data + 0x01020304.
+ */
+struct ec_response_hello {
+ uint32_t out_data;
+} __ec_align4;
/* Get version number */
#define EC_CMD_GET_VERSION 0x0002
@@ -995,25 +1032,40 @@ enum ec_current_image {
EC_IMAGE_RW
};
-struct __ec_align4 ec_response_get_version {
- /* Null-terminated version strings for RO, RW */
+/**
+ * struct ec_response_get_version - Response to the get version command.
+ * @version_string_ro: Null-terminated RO firmware version string.
+ * @version_string_rw: Null-terminated RW firmware version string.
+ * @reserved: Unused bytes; was previously RW-B firmware version string.
+ * @current_image: One of ec_current_image.
+ */
+struct ec_response_get_version {
char version_string_ro[32];
char version_string_rw[32];
- char reserved[32]; /* Was previously RW-B string */
- uint32_t current_image; /* One of ec_current_image */
-};
+ char reserved[32];
+ uint32_t current_image;
+} __ec_align4;
/* Read test */
#define EC_CMD_READ_TEST 0x0003
-struct __ec_align4 ec_params_read_test {
- uint32_t offset; /* Starting value for read buffer */
- uint32_t size; /* Size to read in bytes */
-};
+/**
+ * struct ec_params_read_test - Parameters for the read test command.
+ * @offset: Starting value for read buffer.
+ * @size: Size to read in bytes.
+ */
+struct ec_params_read_test {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
-struct __ec_align4 ec_response_read_test {
+/**
+ * struct ec_response_read_test - Response to the read test command.
+ * @data: Data returned by the read test command.
+ */
+struct ec_response_read_test {
uint32_t data[32];
-};
+} __ec_align4;
/*
* Get build information
@@ -1025,19 +1077,28 @@ struct __ec_align4 ec_response_read_test {
/* Get chip info */
#define EC_CMD_GET_CHIP_INFO 0x0005
-struct __ec_align4 ec_response_get_chip_info {
- /* Null-terminated strings */
+/**
+ * struct ec_response_get_chip_info - Response to the get chip info command.
+ * @vendor: Null-terminated string for chip vendor.
+ * @name: Null-terminated string for chip name.
+ * @revision: Null-terminated string for chip mask version.
+ */
+struct ec_response_get_chip_info {
char vendor[32];
char name[32];
- char revision[32]; /* Mask version */
-};
+ char revision[32];
+} __ec_align4;
/* Get board HW version */
#define EC_CMD_GET_BOARD_VERSION 0x0006
-struct __ec_align2 ec_response_board_version {
- uint16_t board_version; /* A monotonously incrementing number. */
-};
+/**
+ * struct ec_response_board_version - Response to the board version command.
+ * @board_version: A monotonously incrementing number.
+ */
+struct ec_response_board_version {
+ uint16_t board_version;
+} __ec_align2;
/*
* Read memory-mapped data.
@@ -1049,29 +1110,44 @@ struct __ec_align2 ec_response_board_version {
*/
#define EC_CMD_READ_MEMMAP 0x0007
-struct __ec_align1 ec_params_read_memmap {
- uint8_t offset; /* Offset in memmap (EC_MEMMAP_*) */
- uint8_t size; /* Size to read in bytes */
-};
+/**
+ * struct ec_params_read_memmap - Parameters for the read memory map command.
+ * @offset: Offset in memmap (EC_MEMMAP_*).
+ * @size: Size to read in bytes.
+ */
+struct ec_params_read_memmap {
+ uint8_t offset;
+ uint8_t size;
+} __ec_align1;
/* Read versions supported for a command */
#define EC_CMD_GET_CMD_VERSIONS 0x0008
-struct __ec_align1 ec_params_get_cmd_versions {
- uint8_t cmd; /* Command to check */
-};
-
-struct __ec_align2 ec_params_get_cmd_versions_v1 {
- uint16_t cmd; /* Command to check */
-};
+/**
+ * struct ec_params_get_cmd_versions - Parameters for the get command versions.
+ * @cmd: Command to check.
+ */
+struct ec_params_get_cmd_versions {
+ uint8_t cmd;
+} __ec_align1;
-struct __ec_align4 ec_response_get_cmd_versions {
- /*
- * Mask of supported versions; use EC_VER_MASK() to compare with a
- * desired version.
- */
+/**
+ * struct ec_params_get_cmd_versions_v1 - Parameters for the get command
+ * versions (v1)
+ * @cmd: Command to check.
+ */
+struct ec_params_get_cmd_versions_v1 {
+ uint16_t cmd;
+} __ec_align2;
+
+/**
+ * struct ec_response_get_cmd_version - Response to the get command versions.
+ * @version_mask: Mask of supported versions; use EC_VER_MASK() to compare with
+ * a desired version.
+ */
+struct ec_response_get_cmd_versions {
uint32_t version_mask;
-};
+} __ec_align4;
/*
* Check EC communications status (busy). This is needed on i2c/spi but not
@@ -1087,24 +1163,29 @@ enum ec_comms_status {
EC_COMMS_STATUS_PROCESSING = 1 << 0, /* Processing cmd */
};
-struct __ec_align4 ec_response_get_comms_status {
+/**
+ * struct ec_response_get_comms_status - Response to the get comms status
+ * command.
+ * @flags: Mask of enum ec_comms_status.
+ */
+struct ec_response_get_comms_status {
uint32_t flags; /* Mask of enum ec_comms_status */
-};
+} __ec_align4;
/* Fake a variety of responses, purely for testing purposes. */
#define EC_CMD_TEST_PROTOCOL 0x000A
/* Tell the EC what to send back to us. */
-struct __ec_align4 ec_params_test_protocol {
+struct ec_params_test_protocol {
uint32_t ec_result;
uint32_t ret_len;
uint8_t buf[32];
-};
+} __ec_align4;
/* Here it comes... */
-struct __ec_align4 ec_response_test_protocol {
+struct ec_response_test_protocol {
uint8_t buf[32];
-};
+} __ec_align4;
/* Get protocol information */
#define EC_CMD_GET_PROTOCOL_INFO 0x000B
@@ -1113,21 +1194,21 @@ struct __ec_align4 ec_response_test_protocol {
/* EC_RES_IN_PROGRESS may be returned if a command is slow */
#define EC_PROTOCOL_INFO_IN_PROGRESS_SUPPORTED (1 << 0)
-struct __ec_align4 ec_response_get_protocol_info {
+/**
+ * struct ec_response_get_protocol_info - Response to the get protocol info.
+ * @protocol_versions: Bitmask of protocol versions supported (1 << n means
+ * version n).
+ * @max_request_packet_size: Maximum request packet size in bytes.
+ * @max_response_packet_size: Maximum response packet size in bytes.
+ * @flags: see EC_PROTOCOL_INFO_*
+ */
+struct ec_response_get_protocol_info {
/* Fields which exist if at least protocol version 3 supported */
-
- /* Bitmask of protocol versions supported (1 << n means version n)*/
uint32_t protocol_versions;
-
- /* Maximum request packet size, in bytes */
uint16_t max_request_packet_size;
-
- /* Maximum response packet size, in bytes */
uint16_t max_response_packet_size;
-
- /* Flags; see EC_PROTOCOL_INFO_* */
uint32_t flags;
-};
+} __ec_align4;
/*****************************************************************************/
@@ -1136,19 +1217,21 @@ struct __ec_align4 ec_response_get_protocol_info {
/* The upper byte of .flags tells what to do (nothing means "get") */
#define EC_GSV_SET 0x80000000
-/* The lower three bytes of .flags identifies the parameter, if that has
- meaning for an individual command. */
+/*
+ * The lower three bytes of .flags identifies the parameter, if that has
+ * meaning for an individual command.
+ */
#define EC_GSV_PARAM_MASK 0x00ffffff
-struct __ec_align4 ec_params_get_set_value {
+struct ec_params_get_set_value {
uint32_t flags;
uint32_t value;
-};
+} __ec_align4;
-struct __ec_align4 ec_response_get_set_value {
+struct ec_response_get_set_value {
uint32_t flags;
uint32_t value;
-};
+} __ec_align4;
/* More than one command can use these structs to get/set parameters. */
#define EC_CMD_GSV_PAUSE_IN_S5 0x000C
@@ -1250,13 +1333,28 @@ enum ec_feature_code {
EC_FEATURE_HOST_EVENT64 = 33,
/* EC runs code in RAM (not in place, a.k.a. XIP) */
EC_FEATURE_EXEC_IN_RAM = 34,
+ /* EC supports CEC commands */
+ EC_FEATURE_CEC = 35,
+ /* EC supports tight sensor timestamping. */
+ EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS = 36,
+ /*
+ * EC supports tablet mode detection aligned to Chrome and allows
+ * setting of threshold by host command using
+ * MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
+ */
+ EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
+ /* EC supports audio codec. */
+ EC_FEATURE_AUDIO_CODEC = 38,
+ /* EC Supports SCP. */
+ EC_FEATURE_SCP = 39,
};
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
#define EC_FEATURE_MASK_1(event_code) (1UL << (event_code - 32))
-struct __ec_align4 ec_response_get_features {
+
+struct ec_response_get_features {
uint32_t flags[2];
-};
+} __ec_align4;
/*****************************************************************************/
/* Get the board's SKU ID from EC */
@@ -1265,9 +1363,9 @@ struct __ec_align4 ec_response_get_features {
/* Set SKU ID from AP */
#define EC_CMD_SET_SKU_ID 0x000F
-struct __ec_align4 ec_sku_id_info {
+struct ec_sku_id_info {
uint32_t sku_id;
-};
+} __ec_align4;
/*****************************************************************************/
/* Flash commands */
@@ -1276,39 +1374,56 @@ struct __ec_align4 ec_sku_id_info {
#define EC_CMD_FLASH_INFO 0x0010
#define EC_VER_FLASH_INFO 2
-/* Version 0 returns these fields */
-struct __ec_align4 ec_response_flash_info {
- /* Usable flash size, in bytes */
+/**
+ * struct ec_response_flash_info - Response to the flash info command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ *
+ * Version 0 returns these fields.
+ */
+struct ec_response_flash_info {
uint32_t flash_size;
- /*
- * Write block size. Write offset and size must be a multiple
- * of this.
- */
uint32_t write_block_size;
- /*
- * Erase block size. Erase offset and size must be a multiple
- * of this.
- */
uint32_t erase_block_size;
- /*
- * Protection block size. Protection offset and size must be a
- * multiple of this.
- */
uint32_t protect_block_size;
-};
+} __ec_align4;
-/* Flags for version 1+ flash info command */
-/* EC flash erases bits to 0 instead of 1 */
+/*
+ * Flags for version 1+ flash info command
+ * EC flash erases bits to 0 instead of 1.
+ */
#define EC_FLASH_INFO_ERASE_TO_0 (1 << 0)
-/* Flash must be selected for read/write/erase operations to succeed. This may
+/*
+ * Flash must be selected for read/write/erase operations to succeed. This may
* be necessary on a chip where write/erase can be corrupted by other board
* activity, or where the chip needs to enable some sort of programming voltage,
* or where the read/write/erase operations require cleanly suspending other
- * chip functionality. */
+ * chip functionality.
+ */
#define EC_FLASH_INFO_SELECT_REQUIRED (1 << 1)
-/*
+/**
+ * struct ec_response_flash_info_1 - Response to the flash info v1 command.
+ * @flash_size: Usable flash size in bytes.
+ * @write_block_size: Write block size. Write offset and size must be a
+ * multiple of this.
+ * @erase_block_size: Erase block size. Erase offset and size must be a
+ * multiple of this.
+ * @protect_block_size: Protection block size. Protection offset and size
+ * must be a multiple of this.
+ * @write_ideal_size: Ideal write size in bytes. Writes will be fastest if
+ * size is exactly this and offset is a multiple of this.
+ * For example, an EC may have a write buffer which can do
+ * half-page operations if data is aligned, and a slower
+ * word-at-a-time write mode.
+ * @flags: Flags; see EC_FLASH_INFO_*
+ *
* Version 1 returns the same initial fields as version 0, with additional
* fields following.
*
@@ -1322,7 +1437,7 @@ struct __ec_align4 ec_response_flash_info {
* The EC returns the number of banks describing the flash memory.
* It adds banks descriptions up to num_banks_desc.
*/
-struct __ec_align4 ec_response_flash_info_1 {
+struct ec_response_flash_info_1 {
/* Version 0 fields; see above for description */
uint32_t flash_size;
uint32_t write_block_size;
@@ -1330,24 +1445,16 @@ struct __ec_align4 ec_response_flash_info_1 {
uint32_t protect_block_size;
/* Version 1 adds these fields: */
- /*
- * Ideal write size in bytes. Writes will be fastest if size is
- * exactly this and offset is a multiple of this. For example, an EC
- * may have a write buffer which can do half-page operations if data is
- * aligned, and a slower word-at-a-time write mode.
- */
uint32_t write_ideal_size;
-
- /* Flags; see EC_FLASH_INFO_* */
uint32_t flags;
-};
+} __ec_align4;
-struct __ec_align4 ec_params_flash_info_2 {
+struct ec_params_flash_info_2 {
/* Number of banks to describe */
uint16_t num_banks_desc;
/* Reserved; set 0; ignore on read */
uint8_t reserved[2];
-};
+} __ec_align4;
struct ec_flash_bank {
/* Number of sector is in this bank. */
@@ -1364,7 +1471,7 @@ struct ec_flash_bank {
uint8_t reserved[2];
};
-struct __ec_align4 ec_response_flash_info_2 {
+struct ec_response_flash_info_2 {
/* Total flash in the EC. */
uint32_t flash_size;
/* Flags; see EC_FLASH_INFO_* */
@@ -1376,7 +1483,7 @@ struct __ec_align4 ec_response_flash_info_2 {
/* Number of banks described in banks array. */
uint16_t num_banks_desc;
struct ec_flash_bank banks[0];
-};
+} __ec_align4;
/*
* Read flash
@@ -1385,10 +1492,15 @@ struct __ec_align4 ec_response_flash_info_2 {
*/
#define EC_CMD_FLASH_READ 0x0011
-struct __ec_align4 ec_params_flash_read {
- uint32_t offset; /* Byte offset to read */
- uint32_t size; /* Size to read in bytes */
-};
+/**
+ * struct ec_params_flash_read - Parameters for the flash read command.
+ * @offset: Byte offset to read.
+ * @size: Size to read in bytes.
+ */
+struct ec_params_flash_read {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
/* Write flash */
#define EC_CMD_FLASH_WRITE 0x0012
@@ -1397,24 +1509,32 @@ struct __ec_align4 ec_params_flash_read {
/* Version 0 of the flash command supported only 64 bytes of data */
#define EC_FLASH_WRITE_VER0_SIZE 64
-struct __ec_align4 ec_params_flash_write {
- uint32_t offset; /* Byte offset to write */
- uint32_t size; /* Size to write in bytes */
+/**
+ * struct ec_params_flash_write - Parameters for the flash write command.
+ * @offset: Byte offset to write.
+ * @size: Size to write in bytes.
+ */
+struct ec_params_flash_write {
+ uint32_t offset;
+ uint32_t size;
/* Followed by data to write */
-};
+} __ec_align4;
/* Erase flash */
#define EC_CMD_FLASH_ERASE 0x0013
-/* v0 */
-struct __ec_align4 ec_params_flash_erase {
- uint32_t offset; /* Byte offset to erase */
- uint32_t size; /* Size to erase in bytes */
-};
-
+/**
+ * struct ec_params_flash_erase - Parameters for the flash erase command, v0.
+ * @offset: Byte offset to erase.
+ * @size: Size to erase in bytes.
+ */
+struct ec_params_flash_erase {
+ uint32_t offset;
+ uint32_t size;
+} __ec_align4;
-#define EC_VER_FLASH_WRITE 1
-/* v1 add async erase:
+/*
+ * v1 add async erase:
* subcommands can returns:
* EC_RES_SUCCESS : erased (see ERASE_SECTOR_ASYNC case below).
* EC_RES_INVALID_PARAM : offset/size are not aligned on a erase boundary.
@@ -1436,16 +1556,19 @@ enum ec_flash_erase_cmd {
FLASH_ERASE_GET_RESULT, /* Ask for last erase result */
};
-struct __ec_align4 ec_params_flash_erase_v1 {
- /* One of ec_flash_erase_cmd. */
+/**
+ * struct ec_params_flash_erase_v1 - Parameters for the flash erase command, v1.
+ * @cmd: One of ec_flash_erase_cmd.
+ * @reserved: Pad byte; currently always contains 0.
+ * @flag: No flags defined yet; set to 0.
+ * @params: Same as v0 parameters.
+ */
+struct ec_params_flash_erase_v1 {
uint8_t cmd;
- /* Pad byte; currently always contains 0 */
uint8_t reserved;
- /* No flags defined yet; set to 0 */
uint16_t flag;
- /* Same as v0 parameters. */
struct ec_params_flash_erase params;
-};
+} __ec_align4;
/*
* Get/set flash protection.
@@ -1491,23 +1614,31 @@ struct __ec_align4 ec_params_flash_erase_v1 {
/* Rollback information flash region protected now */
#define EC_FLASH_PROTECT_ROLLBACK_NOW (1 << 10)
-struct __ec_align4 ec_params_flash_protect {
- uint32_t mask; /* Bits in flags to apply */
- uint32_t flags; /* New flags to apply */
-};
-struct __ec_align4 ec_response_flash_protect {
- /* Current value of flash protect flags */
+/**
+ * struct ec_params_flash_protect - Parameters for the flash protect command.
+ * @mask: Bits in flags to apply.
+ * @flags: New flags to apply.
+ */
+struct ec_params_flash_protect {
+ uint32_t mask;
+ uint32_t flags;
+} __ec_align4;
+
+/**
+ * struct ec_response_flash_protect - Response to the flash protect command.
+ * @flags: Current value of flash protect flags.
+ * @valid_flags: Flags which are valid on this platform. This allows the
+ * caller to distinguish between flags which aren't set vs. flags
+ * which can't be set on this platform.
+ * @writable_flags: Flags which can be changed given the current protection
+ * state.
+ */
+struct ec_response_flash_protect {
uint32_t flags;
- /*
- * Flags which are valid on this platform. This allows the caller
- * to distinguish between flags which aren't set vs. flags which can't
- * be set on this platform.
- */
uint32_t valid_flags;
- /* Flags which can be changed given the current protection state */
uint32_t writable_flags;
-};
+} __ec_align4;
/*
* Note: commands 0x14 - 0x19 version 0 were old commands to get/set flash
@@ -1541,18 +1672,25 @@ enum ec_flash_region {
/* Number of regions */
EC_FLASH_REGION_COUNT,
};
-/* 'RW' is vague if there are multiple RW images; we mean the active one,
- * so the old constant is deprecated */
+/*
+ * 'RW' is vague if there are multiple RW images; we mean the active one,
+ * so the old constant is deprecated.
+ */
#define EC_FLASH_REGION_RW EC_FLASH_REGION_ACTIVE
-struct __ec_align4 ec_params_flash_region_info {
- uint32_t region; /* enum ec_flash_region */
-};
+/**
+ * struct ec_params_flash_region_info - Parameters for the flash region info
+ * command.
+ * @region: Flash region; see EC_FLASH_REGION_*
+ */
+struct ec_params_flash_region_info {
+ uint32_t region;
+} __ec_align4;
-struct __ec_align4 ec_response_flash_region_info {
+struct ec_response_flash_region_info {
uint32_t offset;
uint32_t size;
-};
+} __ec_align4;
/* Read/write VbNvContext */
#define EC_CMD_VBNV_CONTEXT 0x0017
@@ -1564,20 +1702,20 @@ enum ec_vbnvcontext_op {
EC_VBNV_CONTEXT_OP_WRITE,
};
-struct __ec_align4 ec_params_vbnvcontext {
+struct ec_params_vbnvcontext {
uint32_t op;
uint8_t block[EC_VBNV_BLOCK_SIZE];
-};
+} __ec_align4;
-struct __ec_align4 ec_response_vbnvcontext {
+struct ec_response_vbnvcontext {
uint8_t block[EC_VBNV_BLOCK_SIZE];
-};
+} __ec_align4;
/* Get SPI flash information */
#define EC_CMD_FLASH_SPI_INFO 0x0018
-struct __ec_align1 ec_response_flash_spi_info {
+struct ec_response_flash_spi_info {
/* JEDEC info from command 0x9F (manufacturer, memory type, size) */
uint8_t jedec[3];
@@ -1589,16 +1727,19 @@ struct __ec_align1 ec_response_flash_spi_info {
/* Status registers from command 0x05 and 0x35 */
uint8_t sr1, sr2;
-};
+} __ec_align1;
/* Select flash during flash operations */
#define EC_CMD_FLASH_SELECT 0x0019
-struct __ec_align4 ec_params_flash_select {
- /* 1 to select flash, 0 to deselect flash */
+/**
+ * struct ec_params_flash_select - Parameters for the flash select command.
+ * @select: 1 to select flash, 0 to deselect flash
+ */
+struct ec_params_flash_select {
uint8_t select;
-};
+} __ec_align4;
/*****************************************************************************/
@@ -1607,54 +1748,54 @@ struct __ec_align4 ec_params_flash_select {
/* Get fan target RPM */
#define EC_CMD_PWM_GET_FAN_TARGET_RPM 0x0020
-struct __ec_align4 ec_response_pwm_get_fan_rpm {
+struct ec_response_pwm_get_fan_rpm {
uint32_t rpm;
-};
+} __ec_align4;
/* Set target fan RPM */
#define EC_CMD_PWM_SET_FAN_TARGET_RPM 0x0021
/* Version 0 of input params */
-struct __ec_align4 ec_params_pwm_set_fan_target_rpm_v0 {
+struct ec_params_pwm_set_fan_target_rpm_v0 {
uint32_t rpm;
-};
+} __ec_align4;
/* Version 1 of input params */
-struct __ec_align_size1 ec_params_pwm_set_fan_target_rpm_v1 {
+struct ec_params_pwm_set_fan_target_rpm_v1 {
uint32_t rpm;
uint8_t fan_idx;
-};
+} __ec_align_size1;
/* Get keyboard backlight */
/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */
#define EC_CMD_PWM_GET_KEYBOARD_BACKLIGHT 0x0022
-struct __ec_align1 ec_response_pwm_get_keyboard_backlight {
+struct ec_response_pwm_get_keyboard_backlight {
uint8_t percent;
uint8_t enabled;
-};
+} __ec_align1;
/* Set keyboard backlight */
/* OBSOLETE - Use EC_CMD_PWM_SET_DUTY */
#define EC_CMD_PWM_SET_KEYBOARD_BACKLIGHT 0x0023
-struct __ec_align1 ec_params_pwm_set_keyboard_backlight {
+struct ec_params_pwm_set_keyboard_backlight {
uint8_t percent;
-};
+} __ec_align1;
/* Set target fan PWM duty cycle */
#define EC_CMD_PWM_SET_FAN_DUTY 0x0024
/* Version 0 of input params */
-struct __ec_align4 ec_params_pwm_set_fan_duty_v0 {
+struct ec_params_pwm_set_fan_duty_v0 {
uint32_t percent;
-};
+} __ec_align4;
/* Version 1 of input params */
-struct __ec_align_size1 ec_params_pwm_set_fan_duty_v1 {
+struct ec_params_pwm_set_fan_duty_v1 {
uint32_t percent;
uint8_t fan_idx;
-};
+} __ec_align_size1;
#define EC_CMD_PWM_SET_DUTY 0x0025
/* 16 bit duty cycle, 0xffff = 100% */
@@ -1670,22 +1811,22 @@ enum ec_pwm_type {
EC_PWM_TYPE_COUNT,
};
-struct __ec_align4 ec_params_pwm_set_duty {
+struct ec_params_pwm_set_duty {
uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
uint8_t pwm_type; /* ec_pwm_type */
uint8_t index; /* Type-specific index, or 0 if unique */
-};
+} __ec_align4;
#define EC_CMD_PWM_GET_DUTY 0x0026
-struct __ec_align1 ec_params_pwm_get_duty {
+struct ec_params_pwm_get_duty {
uint8_t pwm_type; /* ec_pwm_type */
uint8_t index; /* Type-specific index, or 0 if unique */
-};
+} __ec_align1;
-struct __ec_align2 ec_response_pwm_get_duty {
+struct ec_response_pwm_get_duty {
uint16_t duty; /* Duty cycle, EC_PWM_MAX_DUTY = 100% */
-};
+} __ec_align2;
/*****************************************************************************/
/*
@@ -1696,15 +1837,17 @@ struct __ec_align2 ec_response_pwm_get_duty {
*/
#define EC_CMD_LIGHTBAR_CMD 0x0028
-struct __ec_todo_unpacked rgb_s {
+struct rgb_s {
uint8_t r, g, b;
-};
+} __ec_todo_unpacked;
#define LB_BATTERY_LEVELS 4
-/* List of tweakable parameters. NOTE: It's __packed so it can be sent in a
+
+/*
+ * List of tweakable parameters. NOTE: It's __packed so it can be sent in a
* host command, but the alignment is the same regardless. Keep it that way.
*/
-struct __ec_todo_packed lightbar_params_v0 {
+struct lightbar_params_v0 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1736,9 +1879,9 @@ struct __ec_todo_packed lightbar_params_v0 {
/* Color palette */
struct rgb_s color[8]; /* 0-3 are Google colors */
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v1 {
+struct lightbar_params_v1 {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1785,7 +1928,7 @@ struct __ec_todo_packed lightbar_params_v1 {
/* Color palette */
struct rgb_s color[8]; /* 0-3 are Google colors */
-};
+} __ec_todo_packed;
/* Lightbar command params v2
* crbug.com/467716
@@ -1796,7 +1939,7 @@ struct __ec_todo_packed lightbar_params_v1 {
* NOTE: Each of these groups must be less than 120 bytes.
*/
-struct __ec_todo_packed lightbar_params_v2_timing {
+struct lightbar_params_v2_timing {
/* Timing */
int32_t google_ramp_up;
int32_t google_ramp_down;
@@ -1812,9 +1955,9 @@ struct __ec_todo_packed lightbar_params_v2_timing {
int32_t tap_tick_delay;
int32_t tap_gate_delay;
int32_t tap_display_time;
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v2_tap {
+struct lightbar_params_v2_tap {
/* Tap-for-battery params */
uint8_t tap_pct_red;
uint8_t tap_pct_green;
@@ -1822,28 +1965,28 @@ struct __ec_todo_packed lightbar_params_v2_tap {
uint8_t tap_seg_max_on;
uint8_t tap_seg_osc;
uint8_t tap_idx[3];
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v2_oscillation {
+struct lightbar_params_v2_oscillation {
/* Oscillation */
uint8_t osc_min[2]; /* AC=0/1 */
uint8_t osc_max[2]; /* AC=0/1 */
uint8_t w_ofs[2]; /* AC=0/1 */
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v2_brightness {
+struct lightbar_params_v2_brightness {
/* Brightness limits based on the backlight and AC. */
uint8_t bright_bl_off_fixed[2]; /* AC=0/1 */
uint8_t bright_bl_on_min[2]; /* AC=0/1 */
uint8_t bright_bl_on_max[2]; /* AC=0/1 */
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v2_thresholds {
+struct lightbar_params_v2_thresholds {
/* Battery level thresholds */
uint8_t battery_threshold[LB_BATTERY_LEVELS - 1];
-};
+} __ec_todo_packed;
-struct __ec_todo_packed lightbar_params_v2_colors {
+struct lightbar_params_v2_colors {
/* Map [AC][battery_level] to color index */
uint8_t s0_idx[2][LB_BATTERY_LEVELS]; /* AP is running */
uint8_t s3_idx[2][LB_BATTERY_LEVELS]; /* AP is sleeping */
@@ -1853,16 +1996,16 @@ struct __ec_todo_packed lightbar_params_v2_colors {
/* Color palette */
struct rgb_s color[8]; /* 0-3 are Google colors */
-};
+} __ec_todo_packed;
/* Lightbyte program. */
#define EC_LB_PROG_LEN 192
-struct __ec_todo_unpacked lightbar_program {
+struct lightbar_program {
uint8_t size;
uint8_t data[EC_LB_PROG_LEN];
-};
+} __ec_todo_unpacked;
-struct __ec_todo_packed ec_params_lightbar {
+struct ec_params_lightbar {
uint8_t cmd; /* Command (see enum lightbar_command) */
union {
/*
@@ -1909,9 +2052,9 @@ struct __ec_todo_packed ec_params_lightbar {
struct lightbar_program set_program;
};
-};
+} __ec_todo_packed;
-struct __ec_todo_packed ec_response_lightbar {
+struct ec_response_lightbar {
union {
struct __ec_todo_unpacked {
struct __ec_todo_unpacked {
@@ -1955,7 +2098,7 @@ struct __ec_todo_packed ec_response_lightbar {
* set_v2par_thlds, set_v2par_colors
*/
};
-};
+} __ec_todo_packed;
/* Lightbar commands */
enum lightbar_command {
@@ -2038,14 +2181,14 @@ enum ec_led_colors {
EC_LED_COLOR_COUNT
};
-struct __ec_align1 ec_params_led_control {
+struct ec_params_led_control {
uint8_t led_id; /* Which LED to control */
uint8_t flags; /* Control flags */
uint8_t brightness[EC_LED_COLOR_COUNT];
-};
+} __ec_align1;
-struct __ec_align1 ec_response_led_control {
+struct ec_response_led_control {
/*
* Available brightness value range.
*
@@ -2054,7 +2197,7 @@ struct __ec_align1 ec_response_led_control {
* Other values means the LED is control by PWM.
*/
uint8_t brightness_range[EC_LED_COLOR_COUNT];
-};
+} __ec_align1;
/*****************************************************************************/
/* Verified boot commands */
@@ -2067,7 +2210,7 @@ struct __ec_align1 ec_response_led_control {
/* Verified boot hash command */
#define EC_CMD_VBOOT_HASH 0x002A
-struct __ec_align4 ec_params_vboot_hash {
+struct ec_params_vboot_hash {
uint8_t cmd; /* enum ec_vboot_hash_cmd */
uint8_t hash_type; /* enum ec_vboot_hash_type */
uint8_t nonce_size; /* Nonce size; may be 0 */
@@ -2075,9 +2218,9 @@ struct __ec_align4 ec_params_vboot_hash {
uint32_t offset; /* Offset in flash to hash */
uint32_t size; /* Number of bytes to hash */
uint8_t nonce_data[64]; /* Nonce data; ignored if nonce_size=0 */
-};
+} __ec_align4;
-struct __ec_align4 ec_response_vboot_hash {
+struct ec_response_vboot_hash {
uint8_t status; /* enum ec_vboot_hash_status */
uint8_t hash_type; /* enum ec_vboot_hash_type */
uint8_t digest_size; /* Size of hash digest in bytes */
@@ -2085,7 +2228,7 @@ struct __ec_align4 ec_response_vboot_hash {
uint32_t offset; /* Offset in flash which was hashed */
uint32_t size; /* Number of bytes hashed */
uint8_t hash_digest[64]; /* Hash digest data */
-};
+} __ec_align4;
enum ec_vboot_hash_cmd {
EC_VBOOT_HASH_GET = 0, /* Get current hash status */
@@ -2113,8 +2256,10 @@ enum ec_vboot_hash_status {
#define EC_VBOOT_HASH_OFFSET_ACTIVE 0xfffffffd
#define EC_VBOOT_HASH_OFFSET_UPDATE 0xfffffffc
-/* 'RW' is vague if there are multiple RW images; we mean the active one,
- * so the old constant is deprecated */
+/*
+ * 'RW' is vague if there are multiple RW images; we mean the active one,
+ * so the old constant is deprecated.
+ */
#define EC_VBOOT_HASH_OFFSET_RW EC_VBOOT_HASH_OFFSET_ACTIVE
/*****************************************************************************/
@@ -2237,6 +2382,15 @@ enum motionsense_command {
*/
MOTIONSENSE_CMD_SPOOF = 16,
+ /* Set lid angle for tablet mode detection. */
+ MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE = 17,
+
+ /*
+ * Sensor Scale command is a setter/getter command for the calibration
+ * scale.
+ */
+ MOTIONSENSE_CMD_SENSOR_SCALE = 18,
+
/* Number of motionsense sub-commands. */
MOTIONSENSE_NUM_CMDS
};
@@ -2279,6 +2433,7 @@ enum motionsensor_chip {
MOTIONSENSE_CHIP_GPIO = 12,
MOTIONSENSE_CHIP_LIS2DH = 13,
MOTIONSENSE_CHIP_LSM6DSM = 14,
+ MOTIONSENSE_CHIP_LIS2DE = 15,
MOTIONSENSE_CHIP_MAX,
};
@@ -2291,7 +2446,7 @@ enum motionsensor_orientation {
MOTIONSENSE_ORIENTATION_UNKNOWN = 4,
};
-struct __ec_todo_packed ec_response_motion_sensor_data {
+struct ec_response_motion_sensor_data {
/* Flags for each sensor. */
uint8_t flags;
/* sensor number the data comes from */
@@ -2309,26 +2464,28 @@ struct __ec_todo_packed ec_response_motion_sensor_data {
int16_t add_info[2];
};
};
-};
+} __ec_todo_packed;
/* Note: used in ec_response_get_next_data */
-struct __ec_todo_packed ec_response_motion_sense_fifo_info {
+struct ec_response_motion_sense_fifo_info {
/* Size of the fifo */
uint16_t size;
/* Amount of space used in the fifo */
uint16_t count;
- /* Timestamp recorded in us */
+ /* Timestamp recorded in us.
+ * aka accurate timestamp when host event was triggered.
+ */
uint32_t timestamp;
/* Total amount of vector lost */
uint16_t total_lost;
/* Lost events since the last fifo_info, per sensors */
uint16_t lost[0];
-};
+} __ec_todo_packed;
-struct __ec_todo_packed ec_response_motion_sense_fifo_data {
+struct ec_response_motion_sense_fifo_data {
uint32_t number_data;
struct ec_response_motion_sensor_data data[0];
-};
+} __ec_todo_packed;
/* List supported activity recognition */
enum motionsensor_activity {
@@ -2338,13 +2495,13 @@ enum motionsensor_activity {
MOTIONSENSE_ACTIVITY_ORIENTATION = 3,
};
-struct __ec_todo_unpacked ec_motion_sense_activity {
+struct ec_motion_sense_activity {
uint8_t sensor_num;
uint8_t activity; /* one of enum motionsensor_activity */
uint8_t enable; /* 1: enable, 0: disable */
uint8_t reserved;
uint16_t parameters[3]; /* activity dependent parameters */
-};
+} __ec_todo_unpacked;
/* Module flag masks used for the dump sub-command. */
#define MOTIONSENSE_MODULE_FLAG_ACTIVE (1<<0)
@@ -2372,7 +2529,10 @@ struct __ec_todo_unpacked ec_motion_sense_activity {
/* MOTIONSENSE_CMD_SENSOR_OFFSET subcommand flag */
/* Set Calibration information */
-#define MOTION_SENSE_SET_OFFSET 1
+#define MOTION_SENSE_SET_OFFSET (1 << 0)
+
+/* Default Scale value, factor 1. */
+#define MOTION_SENSE_DEFAULT_SCALE (1 << 15)
#define LID_ANGLE_UNRELIABLE 500
@@ -2390,7 +2550,7 @@ enum motionsense_spoof_mode {
MOTIONSENSE_SPOOF_MODE_QUERY,
};
-struct __ec_todo_packed ec_params_motion_sense {
+struct ec_params_motion_sense {
uint8_t cmd;
union {
/* Used for MOTIONSENSE_CMD_DUMP */
@@ -2413,8 +2573,10 @@ struct __ec_todo_packed ec_params_motion_sense {
int16_t data;
} kb_wake_angle;
- /* Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA
- * and MOTIONSENSE_CMD_PERFORM_CALIB. */
+ /*
+ * Used for MOTIONSENSE_CMD_INFO, MOTIONSENSE_CMD_DATA
+ * and MOTIONSENSE_CMD_PERFORM_CALIB.
+ */
struct __ec_todo_unpacked {
uint8_t sensor_num;
} info, info_3, data, fifo_flush, perform_calib,
@@ -2465,6 +2627,36 @@ struct __ec_todo_packed ec_params_motion_sense {
int16_t offset[3];
} sensor_offset;
+ /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */
+ struct __ec_todo_packed {
+ uint8_t sensor_num;
+
+ /*
+ * bit 0: If set (MOTION_SENSE_SET_OFFSET), set
+ * the calibration information in the EC.
+ * If unset, just retrieve calibration information.
+ */
+ uint16_t flags;
+
+ /*
+ * Temperature at calibration, in units of 0.01 C
+ * 0x8000: invalid / unknown.
+ * 0x0: 0C
+ * 0x7fff: +327.67C
+ */
+ int16_t temp;
+
+ /*
+ * Scale for calibration:
+ * By default scale is 1, it is encoded on 16bits:
+ * 1 = 1 << 15
+ * ~2 = 0xFFFF
+ * ~0 = 0.
+ */
+ uint16_t scale[3];
+ } sensor_scale;
+
+
/* Used for MOTIONSENSE_CMD_FIFO_INFO */
/* (no params) */
@@ -2504,10 +2696,28 @@ struct __ec_todo_packed ec_params_motion_sense {
/* Individual component values to spoof. */
int16_t components[3];
} spoof;
+
+ /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */
+ struct __ec_todo_unpacked {
+ /*
+ * Lid angle threshold for switching between tablet and
+ * clamshell mode.
+ */
+ int16_t lid_angle;
+
+ /*
+ * Hysteresis degree to prevent fluctuations between
+ * clamshell and tablet mode if lid angle keeps
+ * changing around the threshold. Lid motion driver will
+ * use lid_angle + hys_degree to trigger tablet mode and
+ * lid_angle - hys_degree to trigger clamshell mode.
+ */
+ int16_t hys_degree;
+ } tablet_mode_threshold;
};
-};
+} __ec_todo_packed;
-struct __ec_todo_packed ec_response_motion_sense {
+struct ec_response_motion_sense {
union {
/* Used for MOTIONSENSE_CMD_DUMP */
struct __ec_todo_unpacked {
@@ -2573,12 +2783,21 @@ struct __ec_todo_packed ec_response_motion_sense {
} ec_rate, sensor_odr, sensor_range, kb_wake_angle,
fifo_int_enable, spoof;
- /* Used for MOTIONSENSE_CMD_SENSOR_OFFSET */
+ /*
+ * Used for MOTIONSENSE_CMD_SENSOR_OFFSET,
+ * PERFORM_CALIB.
+ */
struct __ec_todo_unpacked {
int16_t temp;
int16_t offset[3];
} sensor_offset, perform_calib;
+ /* Used for MOTIONSENSE_CMD_SENSOR_SCALE */
+ struct __ec_todo_unpacked {
+ int16_t temp;
+ uint16_t scale[3];
+ } sensor_scale;
+
struct ec_response_motion_sense_fifo_info fifo_info, fifo_flush;
struct ec_response_motion_sense_fifo_data fifo_read;
@@ -2599,8 +2818,21 @@ struct __ec_todo_packed ec_response_motion_sense {
*/
uint16_t value;
} lid_angle;
+
+ /* Used for MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE. */
+ struct __ec_todo_unpacked {
+ /*
+ * Lid angle threshold for switching between tablet and
+ * clamshell mode.
+ */
+ uint16_t lid_angle;
+
+ /* Hysteresis degree. */
+ uint16_t hys_degree;
+ } tablet_mode_threshold;
+
};
-};
+} __ec_todo_packed;
/*****************************************************************************/
/* Force lid open command */
@@ -2608,9 +2840,9 @@ struct __ec_todo_packed ec_response_motion_sense {
/* Make lid event always open */
#define EC_CMD_FORCE_LID_OPEN 0x002C
-struct __ec_align1 ec_params_force_lid_open {
+struct ec_params_force_lid_open {
uint8_t enabled;
-};
+} __ec_align1;
/*****************************************************************************/
/* Configure the behavior of the power button */
@@ -2621,10 +2853,10 @@ enum ec_config_power_button_flags {
EC_POWER_BUTTON_ENABLE_PULSE = (1 << 0),
};
-struct __ec_align1 ec_params_config_power_button {
+struct ec_params_config_power_button {
/* See enum ec_config_power_button_flags */
uint8_t flags;
-};
+} __ec_align1;
/*****************************************************************************/
/* USB charging control commands */
@@ -2632,10 +2864,10 @@ struct __ec_align1 ec_params_config_power_button {
/* Set USB port charging mode */
#define EC_CMD_USB_CHARGE_SET_MODE 0x0030
-struct __ec_align1 ec_params_usb_charge_set_mode {
+struct ec_params_usb_charge_set_mode {
uint8_t usb_port_id;
uint8_t mode;
-};
+} __ec_align1;
/*****************************************************************************/
/* Persistent storage for host */
@@ -2646,12 +2878,12 @@ struct __ec_align1 ec_params_usb_charge_set_mode {
/* Get persistent storage info */
#define EC_CMD_PSTORE_INFO 0x0040
-struct __ec_align4 ec_response_pstore_info {
+struct ec_response_pstore_info {
/* Persistent storage size, in bytes */
uint32_t pstore_size;
/* Access size; read/write offset and size must be a multiple of this */
uint32_t access_size;
-};
+} __ec_align4;
/*
* Read persistent storage
@@ -2660,31 +2892,31 @@ struct __ec_align4 ec_response_pstore_info {
*/
#define EC_CMD_PSTORE_READ 0x0041
-struct __ec_align4 ec_params_pstore_read {
+struct ec_params_pstore_read {
uint32_t offset; /* Byte offset to read */
uint32_t size; /* Size to read in bytes */
-};
+} __ec_align4;
/* Write persistent storage */
#define EC_CMD_PSTORE_WRITE 0x0042
-struct __ec_align4 ec_params_pstore_write {
+struct ec_params_pstore_write {
uint32_t offset; /* Byte offset to write */
uint32_t size; /* Size to write in bytes */
uint8_t data[EC_PSTORE_SIZE_MAX];
-};
+} __ec_align4;
/*****************************************************************************/
/* Real-time clock */
/* RTC params and response structures */
-struct __ec_align4 ec_params_rtc {
+struct ec_params_rtc {
uint32_t time;
-};
+} __ec_align4;
-struct __ec_align4 ec_response_rtc {
+struct ec_response_rtc {
uint32_t time;
-};
+} __ec_align4;
/* These use ec_response_rtc */
#define EC_CMD_RTC_GET_VALUE 0x0044
@@ -2712,7 +2944,7 @@ enum ec_port80_subcmd {
EC_PORT80_READ_BUFFER,
};
-struct __ec_todo_packed ec_params_port80_read {
+struct ec_params_port80_read {
uint16_t subcmd;
union {
struct __ec_todo_unpacked {
@@ -2720,9 +2952,9 @@ struct __ec_todo_packed ec_params_port80_read {
uint32_t num_entries;
} read_buffer;
};
-};
+} __ec_todo_packed;
-struct __ec_todo_packed ec_response_port80_read {
+struct ec_response_port80_read {
union {
struct __ec_todo_unpacked {
uint32_t writes;
@@ -2733,11 +2965,11 @@ struct __ec_todo_packed ec_response_port80_read {
uint16_t codes[EC_PORT80_SIZE_MAX];
} data;
};
-};
+} __ec_todo_packed;
-struct __ec_align2 ec_response_port80_last_boot {
+struct ec_response_port80_last_boot {
uint16_t code;
-};
+} __ec_align2;
/*****************************************************************************/
/* Temporary secure storage for host verified boot use */
@@ -2750,12 +2982,12 @@ struct __ec_align2 ec_response_port80_last_boot {
/* Get persistent storage info */
#define EC_CMD_VSTORE_INFO 0x0049
-struct __ec_align_size1 ec_response_vstore_info {
+struct ec_response_vstore_info {
/* Indicates which slots are locked */
uint32_t slot_locked;
/* Total number of slots available */
uint8_t slot_count;
-};
+} __ec_align_size1;
/*
* Read temporary secure storage
@@ -2764,23 +2996,23 @@ struct __ec_align_size1 ec_response_vstore_info {
*/
#define EC_CMD_VSTORE_READ 0x004A
-struct __ec_align1 ec_params_vstore_read {
+struct ec_params_vstore_read {
uint8_t slot; /* Slot to read from */
-};
+} __ec_align1;
-struct __ec_align1 ec_response_vstore_read {
+struct ec_response_vstore_read {
uint8_t data[EC_VSTORE_SLOT_SIZE];
-};
+} __ec_align1;
/*
* Write temporary secure storage and lock it.
*/
#define EC_CMD_VSTORE_WRITE 0x004B
-struct __ec_align1 ec_params_vstore_write {
+struct ec_params_vstore_write {
uint8_t slot; /* Slot to write to */
uint8_t data[EC_VSTORE_SLOT_SIZE];
-};
+} __ec_align1;
/*****************************************************************************/
/* Thermal engine commands. Note that there are two implementations. We'll
@@ -2797,21 +3029,21 @@ struct __ec_align1 ec_params_vstore_write {
*/
/* Version 0 - set */
-struct __ec_align2 ec_params_thermal_set_threshold {
+struct ec_params_thermal_set_threshold {
uint8_t sensor_type;
uint8_t threshold_id;
uint16_t value;
-};
+} __ec_align2;
/* Version 0 - get */
-struct __ec_align1 ec_params_thermal_get_threshold {
+struct ec_params_thermal_get_threshold {
uint8_t sensor_type;
uint8_t threshold_id;
-};
+} __ec_align1;
-struct __ec_align2 ec_response_thermal_get_threshold {
+struct ec_response_thermal_get_threshold {
uint16_t value;
-};
+} __ec_align2;
/* The version 1 structs are visible. */
@@ -2845,25 +3077,25 @@ enum ec_temp_thresholds {
* Note that this structure is a sub-structure of
* ec_params_thermal_set_threshold_v1, but maintains its alignment there.
*/
-struct __ec_align4 ec_thermal_config {
+struct ec_thermal_config {
uint32_t temp_host[EC_TEMP_THRESH_COUNT]; /* levels of hotness */
uint32_t temp_host_release[EC_TEMP_THRESH_COUNT]; /* release levels */
uint32_t temp_fan_off; /* no active cooling needed */
uint32_t temp_fan_max; /* max active cooling needed */
-};
+} __ec_align4;
/* Version 1 - get config for one sensor. */
-struct __ec_align4 ec_params_thermal_get_threshold_v1 {
+struct ec_params_thermal_get_threshold_v1 {
uint32_t sensor_num;
-};
+} __ec_align4;
/* This returns a struct ec_thermal_config */
/* Version 1 - set config for one sensor.
* Use read-modify-write for best results! */
-struct __ec_align4 ec_params_thermal_set_threshold_v1 {
+struct ec_params_thermal_set_threshold_v1 {
uint32_t sensor_num;
struct ec_thermal_config cfg;
-};
+} __ec_align4;
/* This returns no data */
/****************************************************************************/
@@ -2872,9 +3104,9 @@ struct __ec_align4 ec_params_thermal_set_threshold_v1 {
#define EC_CMD_THERMAL_AUTO_FAN_CTRL 0x0052
/* Version 1 of input params */
-struct __ec_align1 ec_params_auto_fan_ctrl_v1 {
+struct ec_params_auto_fan_ctrl_v1 {
uint8_t fan_idx;
-};
+} __ec_align1;
/* Get/Set TMP006 calibration data */
#define EC_CMD_TMP006_GET_CALIBRATION 0x0053
@@ -2890,55 +3122,55 @@ struct __ec_align1 ec_params_auto_fan_ctrl_v1 {
*/
/* This is the same struct for both v0 and v1. */
-struct __ec_align1 ec_params_tmp006_get_calibration {
+struct ec_params_tmp006_get_calibration {
uint8_t index;
-};
+} __ec_align1;
/* Version 0 */
-struct __ec_align4 ec_response_tmp006_get_calibration_v0 {
+struct ec_response_tmp006_get_calibration_v0 {
float s0;
float b0;
float b1;
float b2;
-};
+} __ec_align4;
-struct __ec_align4 ec_params_tmp006_set_calibration_v0 {
+struct ec_params_tmp006_set_calibration_v0 {
uint8_t index;
uint8_t reserved[3];
float s0;
float b0;
float b1;
float b2;
-};
+} __ec_align4;
/* Version 1 */
-struct __ec_align4 ec_response_tmp006_get_calibration_v1 {
+struct ec_response_tmp006_get_calibration_v1 {
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved[2];
float val[0];
-};
+} __ec_align4;
-struct __ec_align4 ec_params_tmp006_set_calibration_v1 {
+struct ec_params_tmp006_set_calibration_v1 {
uint8_t index;
uint8_t algorithm;
uint8_t num_params;
uint8_t reserved;
float val[0];
-};
+} __ec_align4;
/* Read raw TMP006 data */
#define EC_CMD_TMP006_GET_RAW 0x0055
-struct __ec_align1 ec_params_tmp006_get_raw {
+struct ec_params_tmp006_get_raw {
uint8_t index;
-};
+} __ec_align1;
-struct __ec_align4 ec_response_tmp006_get_raw {
+struct ec_response_tmp006_get_raw {
int32_t t; /* In 1/100 K */
int32_t v; /* In nV */
-};
+} __ec_align4;
/*****************************************************************************/
/* MKBP - Matrix KeyBoard Protocol */
@@ -2960,17 +3192,17 @@ struct __ec_align4 ec_response_tmp006_get_raw {
*/
#define EC_CMD_MKBP_INFO 0x0061
-struct __ec_align_size1 ec_response_mkbp_info {
+struct ec_response_mkbp_info {
uint32_t rows;
uint32_t cols;
/* Formerly "switches", which was 0. */
uint8_t reserved;
-};
+} __ec_align_size1;
-struct __ec_align1 ec_params_mkbp_info {
+struct ec_params_mkbp_info {
uint8_t info_type;
uint8_t event_type;
-};
+} __ec_align1;
enum ec_mkbp_info_type {
/*
@@ -3014,10 +3246,21 @@ enum ec_mkbp_info_type {
/* Simulate key press */
#define EC_CMD_MKBP_SIMULATE_KEY 0x0062
-struct __ec_align1 ec_params_mkbp_simulate_key {
+struct ec_params_mkbp_simulate_key {
uint8_t col;
uint8_t row;
uint8_t pressed;
+} __ec_align1;
+
+#define EC_CMD_GET_KEYBOARD_ID 0x0063
+
+struct ec_response_keyboard_id {
+ uint32_t keyboard_id;
+} __ec_align4;
+
+enum keyboard_id {
+ KEYBOARD_ID_UNSUPPORTED = 0,
+ KEYBOARD_ID_UNREADABLE = 0xffffffff,
};
/* Configure keyboard scanning */
@@ -3045,7 +3288,7 @@ enum mkbp_config_valid {
* Note that this is used as a sub-structure of
* ec_{params/response}_mkbp_get_config.
*/
-struct __ec_align_size1 ec_mkbp_config {
+struct ec_mkbp_config {
uint32_t valid_mask; /* valid fields */
uint8_t flags; /* some flags (enum mkbp_config_flags) */
uint8_t valid_flags; /* which flags are valid */
@@ -3064,15 +3307,15 @@ struct __ec_align_size1 ec_mkbp_config {
uint16_t debounce_up_us; /* time for debounce on key up */
/* maximum depth to allow for fifo (0 = no keyscan output) */
uint8_t fifo_max_depth;
-};
+} __ec_align_size1;
-struct __ec_align_size1 ec_params_mkbp_set_config {
+struct ec_params_mkbp_set_config {
struct ec_mkbp_config config;
-};
+} __ec_align_size1;
-struct __ec_align_size1 ec_response_mkbp_get_config {
+struct ec_response_mkbp_get_config {
struct ec_mkbp_config config;
-};
+} __ec_align_size1;
/* Run the key scan emulation */
#define EC_CMD_KEYSCAN_SEQ_CTRL 0x0066
@@ -3093,11 +3336,11 @@ enum ec_collect_flags {
EC_KEYSCAN_SEQ_FLAG_DONE = 1 << 0,
};
-struct __ec_align1 ec_collect_item {
+struct ec_collect_item {
uint8_t flags; /* some flags (enum ec_collect_flags) */
-};
+} __ec_align1;
-struct __ec_todo_packed ec_params_keyscan_seq_ctrl {
+struct ec_params_keyscan_seq_ctrl {
uint8_t cmd; /* Command to send (enum ec_keyscan_seq_cmd) */
union {
struct __ec_align1 {
@@ -3119,9 +3362,9 @@ struct __ec_todo_packed ec_params_keyscan_seq_ctrl {
uint8_t num_items; /* Number of items to return */
} collect;
};
-};
+} __ec_todo_packed;
-struct __ec_todo_packed ec_result_keyscan_seq_ctrl {
+struct ec_result_keyscan_seq_ctrl {
union {
struct __ec_todo_unpacked {
uint8_t num_items; /* Number of items */
@@ -3129,7 +3372,7 @@ struct __ec_todo_packed ec_result_keyscan_seq_ctrl {
struct ec_collect_item item[0];
} collect;
};
-};
+} __ec_todo_packed;
/*
* Get the next pending MKBP event.
@@ -3138,6 +3381,17 @@ struct __ec_todo_packed ec_result_keyscan_seq_ctrl {
*/
#define EC_CMD_GET_NEXT_EVENT 0x0067
+#define EC_MKBP_HAS_MORE_EVENTS_SHIFT 7
+
+/*
+ * We use the most significant bit of the event type to indicate to the host
+ * that the EC has more MKBP events available to provide.
+ */
+#define EC_MKBP_HAS_MORE_EVENTS (1 << EC_MKBP_HAS_MORE_EVENTS_SHIFT)
+
+/* The mask to apply to get the raw event type */
+#define EC_MKBP_EVENT_TYPE_MASK ((1 << EC_MKBP_HAS_MORE_EVENTS_SHIFT) - 1)
+
enum ec_mkbp_event {
/* Keyboard matrix changed. The event data is the new matrix state. */
EC_MKBP_EVENT_KEY_MATRIX = 0,
@@ -3169,9 +3423,16 @@ enum ec_mkbp_event {
*/
EC_MKBP_EVENT_HOST_EVENT64 = 7,
+ /* Notify the AP that something happened on CEC */
+ EC_MKBP_EVENT_CEC_EVENT = 8,
+
+ /* Send an incoming CEC message to the AP */
+ EC_MKBP_EVENT_CEC_MESSAGE = 9,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
+BUILD_ASSERT(EC_MKBP_EVENT_COUNT <= EC_MKBP_EVENT_TYPE_MASK);
union __ec_align_offset1 ec_response_get_next_data {
uint8_t key_matrix[13];
@@ -3193,6 +3454,9 @@ union __ec_align_offset1 ec_response_get_next_data {
uint32_t fp_events;
uint32_t sysrq;
+
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
};
union __ec_align_offset1 ec_response_get_next_data_v1 {
@@ -3216,20 +3480,24 @@ union __ec_align_offset1 ec_response_get_next_data_v1 {
uint32_t sysrq;
+ /* CEC events from enum mkbp_cec_event */
+ uint32_t cec_events;
+
uint8_t cec_message[16];
};
+BUILD_ASSERT(sizeof(union ec_response_get_next_data_v1) == 16);
-struct __ec_align1 ec_response_get_next_event {
+struct ec_response_get_next_event {
uint8_t event_type;
/* Followed by event data if any */
union ec_response_get_next_data data;
-};
+} __ec_align1;
-struct __ec_align1 ec_response_get_next_event_v1 {
+struct ec_response_get_next_event_v1 {
uint8_t event_type;
/* Followed by event data if any */
union ec_response_get_next_data_v1 data;
-};
+} __ec_align1;
/* Bit indices for buttons and switches.*/
/* Buttons */
@@ -3241,13 +3509,14 @@ struct __ec_align1 ec_response_get_next_event_v1 {
/* Switches */
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
+#define EC_MKBP_BASE_ATTACHED 2
/* Run keyboard factory test scanning */
#define EC_CMD_KEYBOARD_FACTORY_TEST 0x0068
-struct __ec_align2 ec_response_keyboard_factory_test {
+struct ec_response_keyboard_factory_test {
uint16_t shorted; /* Keyboard pins are shorted */
-};
+} __ec_align2;
/* Fingerprint events in 'fp_events' for EC_MKBP_EVENT_FINGERPRINT */
#define EC_MKBP_FP_RAW_EVENT(fp_events) ((fp_events) & 0x00FFFFFF)
@@ -3255,6 +3524,10 @@ struct __ec_align2 ec_response_keyboard_factory_test {
#define EC_MKBP_FP_ENROLL_PROGRESS_OFFSET 4
#define EC_MKBP_FP_ENROLL_PROGRESS(fpe) (((fpe) & 0x00000FF0) \
>> EC_MKBP_FP_ENROLL_PROGRESS_OFFSET)
+#define EC_MKBP_FP_MATCH_IDX_OFFSET 12
+#define EC_MKBP_FP_MATCH_IDX_MASK 0x0000F000
+#define EC_MKBP_FP_MATCH_IDX(fpe) (((fpe) & EC_MKBP_FP_MATCH_IDX_MASK) \
+ >> EC_MKBP_FP_MATCH_IDX_OFFSET)
#define EC_MKBP_FP_ENROLL (1 << 27)
#define EC_MKBP_FP_MATCH (1 << 28)
#define EC_MKBP_FP_FINGER_DOWN (1 << 29)
@@ -3271,6 +3544,7 @@ struct __ec_align2 ec_response_keyboard_factory_test {
/* code given by EC_MKBP_FP_ERRCODE() when EC_MKBP_FP_MATCH is set */
#define EC_MKBP_FP_ERR_MATCH_NO 0
#define EC_MKBP_FP_ERR_MATCH_NO_INTERNAL 6
+#define EC_MKBP_FP_ERR_MATCH_NO_TEMPLATES 7
#define EC_MKBP_FP_ERR_MATCH_NO_LOW_QUALITY 2
#define EC_MKBP_FP_ERR_MATCH_NO_LOW_COVERAGE 4
#define EC_MKBP_FP_ERR_MATCH_YES 1
@@ -3284,14 +3558,14 @@ struct __ec_align2 ec_response_keyboard_factory_test {
/* Read temperature sensor info */
#define EC_CMD_TEMP_SENSOR_GET_INFO 0x0070
-struct __ec_align1 ec_params_temp_sensor_get_info {
+struct ec_params_temp_sensor_get_info {
uint8_t id;
-};
+} __ec_align1;
-struct __ec_align1 ec_response_temp_sensor_get_info {
+struct ec_response_temp_sensor_get_info {
char sensor_name[32];
uint8_t sensor_type;
-};
+} __ec_align1;
/*****************************************************************************/
@@ -3310,13 +3584,13 @@ struct __ec_align1 ec_response_temp_sensor_get_info {
* Host event mask params and response structures, shared by all of the host
* event commands below.
*/
-struct __ec_align4 ec_params_host_event_mask {
+struct ec_params_host_event_mask {
uint32_t mask;
-};
+} __ec_align4;
-struct __ec_align4 ec_response_host_event_mask {
+struct ec_response_host_event_mask {
uint32_t mask;
-};
+} __ec_align4;
/* These all use ec_response_host_event_mask */
#define EC_CMD_HOST_EVENT_GET_B 0x0087
@@ -3336,7 +3610,7 @@ struct __ec_align4 ec_response_host_event_mask {
* of BIOS/OS to program host events and masks
*/
-struct __ec_align4 ec_params_host_event {
+struct ec_params_host_event {
/* Action requested by host - one of enum ec_host_event_action. */
uint8_t action;
@@ -3352,18 +3626,18 @@ struct __ec_align4 ec_params_host_event {
/* Value to be used in case of set operations. */
uint64_t value;
-};
+} __ec_align4;
/*
* Response structure returned by EC_CMD_HOST_EVENT.
* Update the value on a GET request. Set to 0 on GET/CLEAR
*/
-struct __ec_align4 ec_response_host_event {
+struct ec_response_host_event {
/* Mask value in case of get operation */
uint64_t value;
-};
+} __ec_align4;
enum ec_host_event_action {
/*
@@ -3417,21 +3691,21 @@ enum ec_host_event_mask_type {
/* Enable/disable LCD backlight */
#define EC_CMD_SWITCH_ENABLE_BKLIGHT 0x0090
-struct __ec_align1 ec_params_switch_enable_backlight {
+struct ec_params_switch_enable_backlight {
uint8_t enabled;
-};
+} __ec_align1;
/* Enable/disable WLAN/Bluetooth */
#define EC_CMD_SWITCH_ENABLE_WIRELESS 0x0091
#define EC_VER_SWITCH_ENABLE_WIRELESS 1
/* Version 0 params; no response */
-struct __ec_align1 ec_params_switch_enable_wireless_v0 {
+struct ec_params_switch_enable_wireless_v0 {
uint8_t enabled;
-};
+} __ec_align1;
/* Version 1 params */
-struct __ec_align1 ec_params_switch_enable_wireless_v1 {
+struct ec_params_switch_enable_wireless_v1 {
/* Flags to enable now */
uint8_t now_flags;
@@ -3447,16 +3721,16 @@ struct __ec_align1 ec_params_switch_enable_wireless_v1 {
/* Which flags to copy from suspend_flags */
uint8_t suspend_mask;
-};
+} __ec_align1;
/* Version 1 response */
-struct __ec_align1 ec_response_switch_enable_wireless_v1 {
+struct ec_response_switch_enable_wireless_v1 {
/* Flags to enable now */
uint8_t now_flags;
/* Flags to leave enabled in S3 */
uint8_t suspend_flags;
-};
+} __ec_align1;
/*****************************************************************************/
/* GPIO commands. Only available on EC if write protect has been disabled. */
@@ -3464,25 +3738,25 @@ struct __ec_align1 ec_response_switch_enable_wireless_v1 {
/* Set GPIO output value */
#define EC_CMD_GPIO_SET 0x0092
-struct __ec_align1 ec_params_gpio_set {
+struct ec_params_gpio_set {
char name[32];
uint8_t val;
-};
+} __ec_align1;
/* Get GPIO value */
#define EC_CMD_GPIO_GET 0x0093
/* Version 0 of input params and response */
-struct __ec_align1 ec_params_gpio_get {
+struct ec_params_gpio_get {
char name[32];
-};
+} __ec_align1;
-struct __ec_align1 ec_response_gpio_get {
+struct ec_response_gpio_get {
uint8_t val;
-};
+} __ec_align1;
/* Version 1 of input params and response */
-struct __ec_align1 ec_params_gpio_get_v1 {
+struct ec_params_gpio_get_v1 {
uint8_t subcmd;
union {
struct __ec_align1 {
@@ -3492,9 +3766,9 @@ struct __ec_align1 ec_params_gpio_get_v1 {
uint8_t index;
} get_info;
};
-};
+} __ec_align1;
-struct __ec_todo_packed ec_response_gpio_get_v1 {
+struct ec_response_gpio_get_v1 {
union {
struct __ec_align1 {
uint8_t val;
@@ -3505,7 +3779,7 @@ struct __ec_todo_packed ec_response_gpio_get_v1 {
uint32_t flags;
} get_info;
};
-};
+} __ec_todo_packed;
enum gpio_get_subcmd {
EC_GPIO_GET_BY_NAME = 0,
@@ -3526,27 +3800,27 @@ enum gpio_get_subcmd {
/* Read I2C bus */
#define EC_CMD_I2C_READ 0x0094
-struct __ec_align_size1 ec_params_i2c_read {
+struct ec_params_i2c_read {
uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
uint8_t read_size; /* Either 8 or 16. */
uint8_t port;
uint8_t offset;
-};
+} __ec_align_size1;
-struct __ec_align2 ec_response_i2c_read {
+struct ec_response_i2c_read {
uint16_t data;
-};
+} __ec_align2;
/* Write I2C bus */
#define EC_CMD_I2C_WRITE 0x0095
-struct __ec_align_size1 ec_params_i2c_write {
+struct ec_params_i2c_write {
uint16_t data;
uint16_t addr; /* 8-bit address (7-bit shifted << 1) */
uint8_t write_size; /* Either 8 or 16. */
uint8_t port;
uint8_t offset;
-};
+} __ec_align_size1;
/*****************************************************************************/
/* Charge state commands. Only available when flash write protect unlocked. */
@@ -3563,9 +3837,9 @@ enum ec_charge_control_mode {
CHARGE_CONTROL_DISCHARGE,
};
-struct __ec_align4 ec_params_charge_control {
+struct ec_params_charge_control {
uint32_t mode; /* enum charge_control_mode */
-};
+} __ec_align4;
/*****************************************************************************/
/* Console commands. Only available when flash write protect is unlocked. */
@@ -3592,9 +3866,9 @@ enum ec_console_read_subcmd {
CONSOLE_READ_RECENT
};
-struct __ec_align1 ec_params_console_read_v1 {
+struct ec_params_console_read_v1 {
uint8_t subcmd; /* enum ec_console_read_subcmd */
-};
+} __ec_align1;
/*****************************************************************************/
@@ -3609,9 +3883,9 @@ struct __ec_align1 ec_params_console_read_v1 {
#define EC_BATTERY_CUTOFF_FLAG_AT_SHUTDOWN (1 << 0)
-struct __ec_align1 ec_params_battery_cutoff {
+struct ec_params_battery_cutoff {
uint8_t flags;
-};
+} __ec_align1;
/*****************************************************************************/
/* USB port mux control. */
@@ -3621,9 +3895,9 @@ struct __ec_align1 ec_params_battery_cutoff {
*/
#define EC_CMD_USB_MUX 0x009A
-struct __ec_align1 ec_params_usb_mux {
+struct ec_params_usb_mux {
uint8_t mux;
-};
+} __ec_align1;
/*****************************************************************************/
/* LDOs / FETs control. */
@@ -3638,23 +3912,23 @@ enum ec_ldo_state {
*/
#define EC_CMD_LDO_SET 0x009B
-struct __ec_align1 ec_params_ldo_set {
+struct ec_params_ldo_set {
uint8_t index;
uint8_t state;
-};
+} __ec_align1;
/*
* Get LDO state.
*/
#define EC_CMD_LDO_GET 0x009C
-struct __ec_align1 ec_params_ldo_get {
+struct ec_params_ldo_get {
uint8_t index;
-};
+} __ec_align1;
-struct __ec_align1 ec_response_ldo_get {
+struct ec_response_ldo_get {
uint8_t state;
-};
+} __ec_align1;
/*****************************************************************************/
/* Power info. */
@@ -3664,13 +3938,13 @@ struct __ec_align1 ec_response_ldo_get {
*/
#define EC_CMD_POWER_INFO 0x009D
-struct __ec_align4 ec_response_power_info {
+struct ec_response_power_info {
uint32_t usb_dev_type;
uint16_t voltage_ac;
uint16_t voltage_system;
uint16_t current_system;
uint16_t usb_current_limit;
-};
+} __ec_align4;
/*****************************************************************************/
/* I2C passthru command */
@@ -3689,23 +3963,23 @@ struct __ec_align4 ec_response_power_info {
/* Any error */
#define EC_I2C_STATUS_ERROR (EC_I2C_STATUS_NAK | EC_I2C_STATUS_TIMEOUT)
-struct __ec_align2 ec_params_i2c_passthru_msg {
+struct ec_params_i2c_passthru_msg {
uint16_t addr_flags; /* I2C slave address (7 or 10 bits) and flags */
uint16_t len; /* Number of bytes to read or write */
-};
+} __ec_align2;
-struct __ec_align2 ec_params_i2c_passthru {
+struct ec_params_i2c_passthru {
uint8_t port; /* I2C port number */
uint8_t num_msgs; /* Number of messages */
struct ec_params_i2c_passthru_msg msg[];
/* Data to write for all messages is concatenated here */
-};
+} __ec_align2;
-struct __ec_align1 ec_response_i2c_passthru {
+struct ec_response_i2c_passthru {
uint8_t i2c_status; /* Status flags (EC_I2C_STATUS_...) */
uint8_t num_msgs; /* Number of messages processed */
uint8_t data[]; /* Data read by messages concatenated here */
-};
+} __ec_align1;
/*****************************************************************************/
/* Power button hang detect */
@@ -3751,7 +4025,7 @@ struct __ec_align1 ec_response_i2c_passthru {
*/
#define EC_HANG_STOP_NOW (1 << 31)
-struct __ec_align4 ec_params_hang_detect {
+struct ec_params_hang_detect {
/* Flags; see EC_HANG_* */
uint32_t flags;
@@ -3760,7 +4034,7 @@ struct __ec_align4 ec_params_hang_detect {
/* Timeout in msec before generating warm reboot, if enabled */
uint16_t warm_reboot_timeout_msec;
-};
+} __ec_align4;
/*****************************************************************************/
/* Commands for battery charging */
@@ -3808,12 +4082,14 @@ enum charge_state_params {
CS_PARAM_DEBUG_SEEMS_DEAD,
CS_PARAM_DEBUG_SEEMS_DISCONNECTED,
CS_PARAM_DEBUG_BATT_REMOVED,
+ CS_PARAM_DEBUG_MANUAL_CURRENT,
+ CS_PARAM_DEBUG_MANUAL_VOLTAGE,
CS_PARAM_DEBUG_MAX = 0x2ffff,
/* Other custom param ranges go here... */
};
-struct __ec_todo_packed ec_params_charge_state {
+struct ec_params_charge_state {
uint8_t cmd; /* enum charge_state_command */
union {
/* get_state has no args */
@@ -3827,9 +4103,9 @@ struct __ec_todo_packed ec_params_charge_state {
uint32_t value; /* value to set */
} set_param;
};
-};
+} __ec_todo_packed;
-struct __ec_align4 ec_response_charge_state {
+struct ec_response_charge_state {
union {
struct __ec_align4 {
int ac;
@@ -3845,7 +4121,7 @@ struct __ec_align4 ec_response_charge_state {
/* set_param returns no args */
};
-};
+} __ec_align4;
/*
@@ -3853,9 +4129,9 @@ struct __ec_align4 ec_response_charge_state {
*/
#define EC_CMD_CHARGE_CURRENT_LIMIT 0x00A1
-struct __ec_align4 ec_params_current_limit {
+struct ec_params_current_limit {
uint32_t limit; /* in mA */
-};
+} __ec_align4;
/*
* Set maximum external voltage / current.
@@ -3863,10 +4139,10 @@ struct __ec_align4 ec_params_current_limit {
#define EC_CMD_EXTERNAL_POWER_LIMIT 0x00A2
/* Command v0 is used only on Spring and is obsolete + unsupported */
-struct __ec_align2 ec_params_external_power_limit_v1 {
+struct ec_params_external_power_limit_v1 {
uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */
uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */
-};
+} __ec_align2;
#define EC_POWER_LIMIT_NONE 0xffff
@@ -3875,10 +4151,10 @@ struct __ec_align2 ec_params_external_power_limit_v1 {
*/
#define EC_CMD_OVERRIDE_DEDICATED_CHARGER_LIMIT 0x00A3
-struct __ec_align2 ec_params_dedicated_charger_limit {
+struct ec_params_dedicated_charger_limit {
uint16_t current_lim; /* in mA */
uint16_t voltage_lim; /* in mV */
-};
+} __ec_align2;
/*****************************************************************************/
/* Hibernate/Deep Sleep Commands */
@@ -3886,15 +4162,15 @@ struct __ec_align2 ec_params_dedicated_charger_limit {
/* Set the delay before going into hibernation. */
#define EC_CMD_HIBERNATION_DELAY 0x00A8
-struct __ec_align4 ec_params_hibernation_delay {
+struct ec_params_hibernation_delay {
/*
* Seconds to wait in G3 before hibernate. Pass in 0 to read the
* current settings without changing them.
*/
uint32_t seconds;
-};
+} __ec_align4;
-struct __ec_align4 ec_response_hibernation_delay {
+struct ec_response_hibernation_delay {
/*
* The current time in seconds in which the system has been in the G3
* state. This value is reset if the EC transitions out of G3.
@@ -3912,7 +4188,7 @@ struct __ec_align4 ec_response_hibernation_delay {
* hibernating.
*/
uint32_t hibernate_delay;
-};
+} __ec_align4;
/* Inform the EC when entering a sleep state */
#define EC_CMD_HOST_SLEEP_EVENT 0x00A9
@@ -3926,9 +4202,9 @@ enum host_sleep_event {
HOST_SLEEP_EVENT_S3_WAKEABLE_SUSPEND = 5,
};
-struct __ec_align1 ec_params_host_sleep_event {
+struct ec_params_host_sleep_event {
uint8_t sleep_event;
-};
+} __ec_align1;
/*****************************************************************************/
/* Device events */
@@ -3951,14 +4227,14 @@ enum ec_device_event_param {
#define EC_DEVICE_EVENT_MASK(event_code) (1UL << (event_code % 32))
-struct __ec_align_size1 ec_params_device_event {
+struct ec_params_device_event {
uint32_t event_mask;
uint8_t param;
-};
+} __ec_align_size1;
-struct __ec_align4 ec_response_device_event {
+struct ec_response_device_event {
uint32_t event_mask;
-};
+} __ec_align4;
/*****************************************************************************/
/* Smart battery pass-through */
@@ -3973,27 +4249,27 @@ struct __ec_align4 ec_response_device_event {
#define EC_CMD_SB_READ_BLOCK 0x00B2
#define EC_CMD_SB_WRITE_BLOCK 0x00B3
-struct __ec_align1 ec_params_sb_rd {
+struct ec_params_sb_rd {
uint8_t reg;
-};
+} __ec_align1;
-struct __ec_align2 ec_response_sb_rd_word {
+struct ec_response_sb_rd_word {
uint16_t value;
-};
+} __ec_align2;
-struct __ec_align1 ec_params_sb_wr_word {
+struct ec_params_sb_wr_word {
uint8_t reg;
uint16_t value;
-};
+} __ec_align1;
-struct __ec_align1 ec_response_sb_rd_block {
+struct ec_response_sb_rd_block {
uint8_t data[32];
-};
+} __ec_align1;
-struct __ec_align1 ec_params_sb_wr_block {
+struct ec_params_sb_wr_block {
uint8_t reg;
uint16_t data[32];
-};
+} __ec_align1;
/*****************************************************************************/
/* Battery vendor parameters
@@ -4011,15 +4287,15 @@ enum ec_battery_vendor_param_mode {
BATTERY_VENDOR_PARAM_MODE_SET,
};
-struct __ec_align_size1 ec_params_battery_vendor_param {
+struct ec_params_battery_vendor_param {
uint32_t param;
uint32_t value;
uint8_t mode;
-};
+} __ec_align_size1;
-struct __ec_align4 ec_response_battery_vendor_param {
+struct ec_response_battery_vendor_param {
uint32_t value;
-};
+} __ec_align4;
/*****************************************************************************/
/*
@@ -4042,12 +4318,12 @@ enum ec_sb_fw_update_subcmd {
#define SB_FW_UPDATE_CMD_STATUS_SIZE 2
#define SB_FW_UPDATE_CMD_INFO_SIZE 8
-struct __ec_align4 ec_sb_fw_update_header {
+struct ec_sb_fw_update_header {
uint16_t subcmd; /* enum ec_sb_fw_update_subcmd */
uint16_t fw_id; /* firmware id */
-};
+} __ec_align4;
-struct __ec_align4 ec_params_sb_fw_update {
+struct ec_params_sb_fw_update {
struct ec_sb_fw_update_header hdr;
union {
/* EC_SB_FW_UPDATE_PREPARE = 0x0 */
@@ -4063,9 +4339,9 @@ struct __ec_align4 ec_params_sb_fw_update {
uint8_t data[SB_FW_UPDATE_CMD_WRITE_BLOCK_SIZE];
} write;
};
-};
+} __ec_align4;
-struct __ec_align1 ec_response_sb_fw_update {
+struct ec_response_sb_fw_update {
union {
/* EC_SB_FW_UPDATE_INFO = 0x1 */
struct __ec_align1 {
@@ -4077,7 +4353,7 @@ struct __ec_align1 ec_response_sb_fw_update {
uint8_t data[SB_FW_UPDATE_CMD_STATUS_SIZE];
} status;
};
-};
+} __ec_align1;
/*
* Entering Verified Boot Mode Command
@@ -4086,9 +4362,9 @@ struct __ec_align1 ec_response_sb_fw_update {
*/
#define EC_CMD_ENTERING_MODE 0x00B6
-struct __ec_align4 ec_params_entering_mode {
+struct ec_params_entering_mode {
int vboot_mode;
-};
+} __ec_align4;
#define VBOOT_MODE_NORMAL 0
#define VBOOT_MODE_DEVELOPER 1
@@ -4106,13 +4382,88 @@ enum ec_i2c_passthru_protect_subcmd {
EC_CMD_I2C_PASSTHRU_PROTECT_ENABLE = 0x1,
};
-struct __ec_align1 ec_params_i2c_passthru_protect {
+struct ec_params_i2c_passthru_protect {
uint8_t subcmd;
uint8_t port; /* I2C port number */
-};
+} __ec_align1;
-struct __ec_align1 ec_response_i2c_passthru_protect {
+struct ec_response_i2c_passthru_protect {
uint8_t status; /* Status flags (0: unlocked, 1: locked) */
+} __ec_align1;
+
+
+/*****************************************************************************/
+/*
+ * HDMI CEC commands
+ *
+ * These commands are for sending and receiving message via HDMI CEC
+ */
+
+#define MAX_CEC_MSG_LEN 16
+
+/* CEC message from the AP to be written on the CEC bus */
+#define EC_CMD_CEC_WRITE_MSG 0x00B8
+
+/**
+ * struct ec_params_cec_write - Message to write to the CEC bus
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write {
+ uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* Set various CEC parameters */
+#define EC_CMD_CEC_SET 0x00BA
+
+/**
+ * struct ec_params_cec_set - CEC parameters set
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
+ * or 1 to enable CEC functionality, in case cmd is
+ * CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
+ * address between 0 and 15 or 0xff to unregister
+ */
+struct ec_params_cec_set {
+ uint8_t cmd; /* enum cec_command */
+ uint8_t val;
+} __ec_align1;
+
+/* Read various CEC parameters */
+#define EC_CMD_CEC_GET 0x00BB
+
+/**
+ * struct ec_params_cec_get - CEC parameters get
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ */
+struct ec_params_cec_get {
+ uint8_t cmd; /* enum cec_command */
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_get - CEC parameters get response
+ * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is
+ * disabled or 1 if CEC functionality is enabled,
+ * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the
+ * configured logical address between 0 and 15 or 0xff if unregistered
+ */
+struct ec_response_cec_get {
+ uint8_t val;
+} __ec_align1;
+
+/* CEC parameters command */
+enum cec_command {
+ /* CEC reading, writing and events enable */
+ CEC_CMD_ENABLE,
+ /* CEC logical address */
+ CEC_CMD_LOGICAL_ADDRESS,
+};
+
+/* Events from CEC to AP */
+enum mkbp_cec_event {
+ /* Outgoing message was acknowledged by a follower */
+ EC_MKBP_CEC_SEND_OK = 1 << 0,
+ /* Outgoing message was not acknowledged */
+ EC_MKBP_CEC_SEND_FAILED = 1 << 1,
};
/*****************************************************************************/
@@ -4141,10 +4492,10 @@ enum ec_reboot_cmd {
#define EC_REBOOT_FLAG_ON_AP_SHUTDOWN (1 << 1) /* Reboot after AP shutdown */
#define EC_REBOOT_FLAG_SWITCH_RW_SLOT (1 << 2) /* Switch RW slot */
-struct __ec_align1 ec_params_reboot_ec {
+struct ec_params_reboot_ec {
uint8_t cmd; /* enum ec_reboot_cmd */
uint8_t flags; /* See EC_REBOOT_FLAG_* */
-};
+} __ec_align1;
/*
* Get information on last EC panic.
@@ -4215,11 +4566,11 @@ enum pd_charge_state {
/* Status of EC being sent to PD */
#define EC_STATUS_HIBERNATING (1 << 0)
-struct __ec_align1 ec_params_pd_status {
+struct ec_params_pd_status {
uint8_t status; /* EC status */
int8_t batt_soc; /* battery state of charge */
uint8_t charge_state; /* charging state (from enum pd_charge_state) */
-};
+} __ec_align1;
/* Status of PD being sent back to EC */
#define PD_STATUS_HOST_EVENT (1 << 0) /* Forward host event to AP */
@@ -4232,11 +4583,11 @@ struct __ec_align1 ec_params_pd_status {
#define PD_STATUS_EC_INT_ACTIVE (PD_STATUS_TCPC_ALERT_0 | \
PD_STATUS_TCPC_ALERT_1 | \
PD_STATUS_HOST_EVENT)
-struct __ec_align_size1 ec_response_pd_status {
+struct ec_response_pd_status {
uint32_t curr_lim_ma; /* input current limit */
uint16_t status; /* PD MCU status */
int8_t active_charge_port; /* active charging port */
-};
+} __ec_align_size1;
/* AP to PD MCU host event status command, cleared on read */
#define EC_CMD_PD_HOST_EVENT_STATUS 0x0104
@@ -4246,9 +4597,9 @@ struct __ec_align_size1 ec_response_pd_status {
#define PD_EVENT_POWER_CHANGE (1 << 1)
#define PD_EVENT_IDENTITY_RECEIVED (1 << 2)
#define PD_EVENT_DATA_SWAP (1 << 3)
-struct __ec_align4 ec_response_host_event_status {
+struct ec_response_host_event_status {
uint32_t status; /* PD MCU host event status */
-};
+} __ec_align4;
/* Set USB type-C port role and muxes */
#define EC_CMD_USB_PD_CONTROL 0x0101
@@ -4281,12 +4632,12 @@ enum usb_pd_control_swap {
USB_PD_CTRL_SWAP_COUNT
};
-struct __ec_align1 ec_params_usb_pd_control {
+struct ec_params_usb_pd_control {
uint8_t port;
uint8_t role;
uint8_t mux;
uint8_t swap;
-};
+} __ec_align1;
#define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */
#define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */
@@ -4300,35 +4651,35 @@ struct __ec_align1 ec_params_usb_pd_control {
#define PD_CTRL_RESP_ROLE_USB_COMM (1 << 5) /* Partner USB comm capable */
#define PD_CTRL_RESP_ROLE_EXT_POWERED (1 << 6) /* Partner externally powerd */
-struct __ec_align1 ec_response_usb_pd_control {
+struct ec_response_usb_pd_control {
uint8_t enabled;
uint8_t role;
uint8_t polarity;
uint8_t state;
-};
+} __ec_align1;
-struct __ec_align1 ec_response_usb_pd_control_v1 {
+struct ec_response_usb_pd_control_v1 {
uint8_t enabled;
uint8_t role;
uint8_t polarity;
char state[32];
-};
+} __ec_align1;
#define EC_CMD_USB_PD_PORTS 0x0102
/* Maximum number of PD ports on a device, num_ports will be <= this */
#define EC_USB_PD_MAX_PORTS 8
-struct __ec_align1 ec_response_usb_pd_ports {
+struct ec_response_usb_pd_ports {
uint8_t num_ports;
-};
+} __ec_align1;
#define EC_CMD_USB_PD_POWER_INFO 0x0103
#define PD_POWER_CHARGING_PORT 0xff
-struct __ec_align1 ec_params_usb_pd_power_info {
+struct ec_params_usb_pd_power_info {
uint8_t port;
-};
+} __ec_align1;
enum usb_chg_type {
USB_CHG_TYPE_NONE,
@@ -4350,21 +4701,32 @@ enum usb_power_roles {
USB_PD_PORT_POWER_SINK_NOT_CHARGING,
};
-struct __ec_align2 usb_chg_measures {
+struct usb_chg_measures {
uint16_t voltage_max;
uint16_t voltage_now;
uint16_t current_max;
uint16_t current_lim;
-};
+} __ec_align2;
-struct __ec_align4 ec_response_usb_pd_power_info {
+struct ec_response_usb_pd_power_info {
uint8_t role;
uint8_t type;
uint8_t dualrole;
uint8_t reserved1;
struct usb_chg_measures meas;
uint32_t max_power;
-};
+} __ec_align4;
+
+
+/*
+ * This command will return the number of USB PD charge port + the number
+ * of dedicated port present.
+ * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
+ */
+#define EC_CMD_CHARGE_PORT_COUNT 0x0105
+struct ec_response_charge_port_count {
+ uint8_t port_count;
+} __ec_align1;
/* Write USB-PD device FW */
#define EC_CMD_USB_PD_FW_UPDATE 0x0110
@@ -4376,41 +4738,43 @@ enum usb_pd_fw_update_cmds {
USB_PD_FW_ERASE_SIG,
};
-struct __ec_align4 ec_params_usb_pd_fw_update {
+struct ec_params_usb_pd_fw_update {
uint16_t dev_id;
uint8_t cmd;
uint8_t port;
uint32_t size; /* Size to write in bytes */
/* Followed by data to write */
-};
+} __ec_align4;
/* Write USB-PD Accessory RW_HASH table entry */
#define EC_CMD_USB_PD_RW_HASH_ENTRY 0x0111
/* RW hash is first 20 bytes of SHA-256 of RW section */
#define PD_RW_HASH_SIZE 20
-struct __ec_align1 ec_params_usb_pd_rw_hash_entry {
+struct ec_params_usb_pd_rw_hash_entry {
uint16_t dev_id;
uint8_t dev_rw_hash[PD_RW_HASH_SIZE];
- uint8_t reserved; /* For alignment of current_image
+ uint8_t reserved; /*
+ * For alignment of current_image
* TODO(rspangler) but it's not aligned!
- * Should have been reserved[2]. */
+ * Should have been reserved[2].
+ */
uint32_t current_image; /* One of ec_current_image */
-};
+} __ec_align1;
/* Read USB-PD Accessory info */
#define EC_CMD_USB_PD_DEV_INFO 0x0112
-struct __ec_align1 ec_params_usb_pd_info_request {
+struct ec_params_usb_pd_info_request {
uint8_t port;
-};
+} __ec_align1;
/* Read USB-PD Device discovery info */
#define EC_CMD_USB_PD_DISCOVERY 0x0113
-struct __ec_align_size1 ec_params_usb_pd_discovery_entry {
+struct ec_params_usb_pd_discovery_entry {
uint16_t vid; /* USB-IF VID */
uint16_t pid; /* USB-IF PID */
uint8_t ptype; /* product type (hub,periph,cable,ama) */
-};
+} __ec_align_size1;
/* Override default charge behavior */
#define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114
@@ -4422,9 +4786,9 @@ enum usb_pd_override_ports {
/* [0, CONFIG_USB_PD_PORT_COUNT): Port# */
};
-struct __ec_align2 ec_params_charge_port_override {
+struct ec_params_charge_port_override {
int16_t override_port; /* Override port# */
-};
+} __ec_align2;
/*
* Read (and delete) one entry of PD event log.
@@ -4433,14 +4797,13 @@ struct __ec_align2 ec_params_charge_port_override {
*/
#define EC_CMD_PD_GET_LOG_ENTRY 0x0115
-struct __ec_align4 ec_response_pd_log {
+struct ec_response_pd_log {
uint32_t timestamp; /* relative timestamp in milliseconds */
uint8_t type; /* event type : see PD_EVENT_xx below */
uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */
uint16_t data; /* type-defined data payload */
uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */
-};
-
+} __ec_align4;
/* The timestamp is the microsecond counter shifted to get about a ms. */
#define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */
@@ -4502,18 +4865,18 @@ struct __ec_align4 ec_response_pd_log {
/*
* PD_EVENT_VIDEO_CODEC payload is "struct mcdp_info".
*/
-struct __ec_align4 mcdp_version {
+struct mcdp_version {
uint8_t major;
uint8_t minor;
uint16_t build;
-};
+} __ec_align4;
-struct __ec_align4 mcdp_info {
+struct mcdp_info {
uint8_t family[2];
uint8_t chipid[2];
struct mcdp_version irom;
struct mcdp_version fw;
-};
+} __ec_align4;
/* struct mcdp_info field decoding */
#define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1])
@@ -4521,16 +4884,16 @@ struct __ec_align4 mcdp_info {
/* Get/Set USB-PD Alternate mode info */
#define EC_CMD_USB_PD_GET_AMODE 0x0116
-struct __ec_align_size1 ec_params_usb_pd_get_mode_request {
+struct ec_params_usb_pd_get_mode_request {
uint16_t svid_idx; /* SVID index to get */
uint8_t port; /* port */
-};
+} __ec_align_size1;
-struct __ec_align4 ec_params_usb_pd_get_mode_response {
+struct ec_params_usb_pd_get_mode_response {
uint16_t svid; /* SVID */
uint16_t opos; /* Object Position */
uint32_t vdo[6]; /* Mode VDOs */
-};
+} __ec_align4;
#define EC_CMD_USB_PD_SET_AMODE 0x0117
@@ -4541,20 +4904,20 @@ enum pd_mode_cmd {
PD_MODE_CMD_COUNT,
};
-struct __ec_align4 ec_params_usb_pd_set_mode_request {
+struct ec_params_usb_pd_set_mode_request {
uint32_t cmd; /* enum pd_mode_cmd */
uint16_t svid; /* SVID to set */
uint8_t opos; /* Object Position */
uint8_t port; /* port */
-};
+} __ec_align4;
/* Ask the PD MCU to record a log of a requested type */
#define EC_CMD_PD_WRITE_LOG_ENTRY 0x0118
-struct __ec_align1 ec_params_pd_write_log_entry {
+struct ec_params_pd_write_log_entry {
uint8_t type; /* event type : see PD_EVENT_xx above */
uint8_t port; /* port#, or 0 for events unrelated to a given port */
-};
+} __ec_align1;
/* Control USB-PD chip */
@@ -4568,36 +4931,37 @@ enum ec_pd_control_cmd {
PD_CHIP_ON, /* Power on the PD chip */
};
-struct __ec_align1 ec_params_pd_control {
- uint8_t chip; /* chip id (should be 0) */
+struct ec_params_pd_control {
+ uint8_t chip; /* chip id */
uint8_t subcmd;
-};
+} __ec_align1;
/* Get info about USB-C SS muxes */
#define EC_CMD_USB_PD_MUX_INFO 0x011A
-struct __ec_align1 ec_params_usb_pd_mux_info {
+struct ec_params_usb_pd_mux_info {
uint8_t port; /* USB-C port number */
-};
+} __ec_align1;
/* Flags representing mux state */
-#define USB_PD_MUX_USB_ENABLED (1 << 0)
-#define USB_PD_MUX_DP_ENABLED (1 << 1)
-#define USB_PD_MUX_POLARITY_INVERTED (1 << 2)
-#define USB_PD_MUX_HPD_IRQ (1 << 3)
+#define USB_PD_MUX_USB_ENABLED (1 << 0) /* USB connected */
+#define USB_PD_MUX_DP_ENABLED (1 << 1) /* DP connected */
+#define USB_PD_MUX_POLARITY_INVERTED (1 << 2) /* CC line Polarity inverted */
+#define USB_PD_MUX_HPD_IRQ (1 << 3) /* HPD IRQ is asserted */
+#define USB_PD_MUX_HPD_LVL (1 << 4) /* HPD level is asserted */
-struct __ec_align1 ec_response_usb_pd_mux_info {
+struct ec_response_usb_pd_mux_info {
uint8_t flags; /* USB_PD_MUX_*-encoded USB mux state */
-};
+} __ec_align1;
#define EC_CMD_PD_CHIP_INFO 0x011B
-struct __ec_align1 ec_params_pd_chip_info {
+struct ec_params_pd_chip_info {
uint8_t port; /* USB-C port number */
uint8_t renew; /* Force renewal */
-};
+} __ec_align1;
-struct __ec_align2 ec_response_pd_chip_info {
+struct ec_response_pd_chip_info {
uint16_t vendor_id;
uint16_t product_id;
uint16_t device_id;
@@ -4605,14 +4969,28 @@ struct __ec_align2 ec_response_pd_chip_info {
uint8_t fw_version_string[8];
uint64_t fw_version_number;
};
-};
+} __ec_align2;
+
+struct ec_response_pd_chip_info_v1 {
+ uint16_t vendor_id;
+ uint16_t product_id;
+ uint16_t device_id;
+ union {
+ uint8_t fw_version_string[8];
+ uint64_t fw_version_number;
+ };
+ union {
+ uint8_t min_req_fw_version_string[8];
+ uint64_t min_req_fw_version_number;
+ };
+} __ec_align2;
/* Run RW signature verification and get status */
#define EC_CMD_RWSIG_CHECK_STATUS 0x011C
-struct __ec_align4 ec_response_rwsig_check_status {
+struct ec_response_rwsig_check_status {
uint32_t status;
-};
+} __ec_align4;
/* For controlling RWSIG task */
#define EC_CMD_RWSIG_ACTION 0x011D
@@ -4622,16 +5000,16 @@ enum rwsig_action {
RWSIG_ACTION_CONTINUE = 1, /* Jump to RW immediately */
};
-struct __ec_align4 ec_params_rwsig_action {
+struct ec_params_rwsig_action {
uint32_t action;
-};
+} __ec_align4;
/* Run verification on a slot */
#define EC_CMD_EFS_VERIFY 0x011E
-struct __ec_align1 ec_params_efs_verify {
+struct ec_params_efs_verify {
uint8_t region; /* enum ec_flash_region */
-};
+} __ec_align1;
/*
* Retrieve info from Cros Board Info store. Response is based on the data
@@ -4646,9 +5024,12 @@ struct __ec_align1 ec_params_efs_verify {
#define EC_CMD_SET_CROS_BOARD_INFO 0x0120
enum cbi_data_tag {
- CBI_TAG_BOARD_VERSION = 0, /* uint16_t or uint8_t[] = {minor,major} */
- CBI_TAG_OEM_ID = 1, /* uint8_t */
- CBI_TAG_SKU_ID = 2, /* uint8_t */
+ CBI_TAG_BOARD_VERSION = 0, /* uint32_t or smaller */
+ CBI_TAG_OEM_ID = 1, /* uint32_t or smaller */
+ CBI_TAG_SKU_ID = 2, /* uint32_t or smaller */
+ CBI_TAG_DRAM_PART_NUM = 3, /* variable length ascii, nul terminated. */
+ CBI_TAG_OEM_NAME = 4, /* variable length ascii, nul terminated. */
+ CBI_TAG_MODEL_ID = 5, /* uint32_t or smaller */
CBI_TAG_COUNT,
};
@@ -4660,10 +5041,10 @@ enum cbi_data_tag {
*/
#define CBI_GET_RELOAD (1 << 0)
-struct __ec_align4 ec_params_get_cbi {
+struct ec_params_get_cbi {
uint32_t tag; /* enum cbi_data_tag */
uint32_t flag; /* CBI_GET_* */
-};
+} __ec_align4;
/*
* Flags to control write behavior.
@@ -4676,13 +5057,116 @@ struct __ec_align4 ec_params_get_cbi {
#define CBI_SET_NO_SYNC (1 << 0)
#define CBI_SET_INIT (1 << 1)
-struct __ec_align1 ec_params_set_cbi {
+struct ec_params_set_cbi {
uint32_t tag; /* enum cbi_data_tag */
uint32_t flag; /* CBI_SET_* */
uint32_t size; /* Data size */
uint8_t data[]; /* For string and raw data */
+} __ec_align1;
+
+/*
+ * Information about resets of the AP by the EC and the EC's own uptime.
+ */
+#define EC_CMD_GET_UPTIME_INFO 0x0121
+
+struct ec_response_uptime_info {
+ /*
+ * Number of milliseconds since the last EC boot. Sysjump resets
+ * typically do not restart the EC's time_since_boot epoch.
+ *
+ * WARNING: The EC's sense of time is much less accurate than the AP's
+ * sense of time, in both phase and frequency. This timebase is similar
+ * to CLOCK_MONOTONIC_RAW, but with 1% or more frequency error.
+ */
+ uint32_t time_since_ec_boot_ms;
+
+ /*
+ * Number of times the AP was reset by the EC since the last EC boot.
+ * Note that the AP may be held in reset by the EC during the initial
+ * boot sequence, such that the very first AP boot may count as more
+ * than one here.
+ */
+ uint32_t ap_resets_since_ec_boot;
+
+ /*
+ * The set of flags which describe the EC's most recent reset. See
+ * include/system.h RESET_FLAG_* for details.
+ */
+ uint32_t ec_reset_flags;
+
+ /* Empty log entries have both the cause and timestamp set to zero. */
+ struct ap_reset_log_entry {
+ /*
+ * See include/chipset.h: enum chipset_{reset,shutdown}_reason
+ * for details.
+ */
+ uint16_t reset_cause;
+
+ /* Reserved for protocol growth. */
+ uint16_t reserved;
+
+ /*
+ * The time of the reset's assertion, in milliseconds since the
+ * last EC boot, in the same epoch as time_since_ec_boot_ms.
+ * Set to zero if the log entry is empty.
+ */
+ uint32_t reset_time_ms;
+ } recent_ap_reset[4];
+} __ec_align4;
+
+/*
+ * Add entropy to the device secret (stored in the rollback region).
+ *
+ * Depending on the chip, the operation may take a long time (e.g. to erase
+ * flash), so the commands are asynchronous.
+ */
+#define EC_CMD_ADD_ENTROPY 0x0122
+
+enum add_entropy_action {
+ /* Add entropy to the current secret. */
+ ADD_ENTROPY_ASYNC = 0,
+ /*
+ * Add entropy, and also make sure that the previous secret is erased.
+ * (this can be implemented by adding entropy multiple times until
+ * all rolback blocks have been overwritten).
+ */
+ ADD_ENTROPY_RESET_ASYNC = 1,
+ /* Read back result from the previous operation. */
+ ADD_ENTROPY_GET_RESULT = 2,
};
+struct ec_params_rollback_add_entropy {
+ uint8_t action;
+} __ec_align1;
+
+/*
+ * Perform a single read of a given ADC channel.
+ */
+#define EC_CMD_ADC_READ 0x0123
+
+struct ec_params_adc_read {
+ uint8_t adc_channel;
+} __ec_align1;
+
+struct ec_response_adc_read {
+ int32_t adc_value;
+} __ec_align4;
+
+/*
+ * Read back rollback info
+ */
+#define EC_CMD_ROLLBACK_INFO 0x0124
+
+struct ec_response_rollback_info {
+ int32_t id; /* Incrementing number to indicate which region to use. */
+ int32_t rollback_min_version;
+ int32_t rw_rollback_version;
+} __ec_align4;
+
+
+/* Issue AP reset */
+#define EC_CMD_AP_RESET 0x0125
+
/*****************************************************************************/
/* The command range 0x200-0x2FF is reserved for Rotor. */
@@ -4701,77 +5185,85 @@ struct __ec_align1 ec_params_set_cbi {
#define EC_FP_FLAG_NOT_COMPLETE 0x1
-struct __ec_align2 ec_params_fp_passthru {
+struct ec_params_fp_passthru {
uint16_t len; /* Number of bytes to write then read */
uint16_t flags; /* EC_FP_FLAG_xxx */
uint8_t data[]; /* Data to send */
-};
-
-/* Fingerprint sensor configuration command: prototyping ONLY */
-#define EC_CMD_FP_SENSOR_CONFIG 0x0401
-
-#define EC_FP_SENSOR_CONFIG_MAX_REGS 16
-
-struct __ec_align2 ec_params_fp_sensor_config {
- uint8_t count; /* Number of setup registers */
- /*
- * the value to send to each of the 'count' setup registers
- * is stored in the 'data' array for 'len' bytes just after
- * the previous one.
- */
- uint8_t len[EC_FP_SENSOR_CONFIG_MAX_REGS];
- uint8_t data[];
-};
+} __ec_align2;
/* Configure the Fingerprint MCU behavior */
#define EC_CMD_FP_MODE 0x0402
/* Put the sensor in its lowest power mode */
-#define FP_MODE_DEEPSLEEP (1<<0)
+#define FP_MODE_DEEPSLEEP (1<<0)
/* Wait to see a finger on the sensor */
-#define FP_MODE_FINGER_DOWN (1<<1)
+#define FP_MODE_FINGER_DOWN (1<<1)
/* Poll until the finger has left the sensor */
-#define FP_MODE_FINGER_UP (1<<2)
+#define FP_MODE_FINGER_UP (1<<2)
/* Capture the current finger image */
-#define FP_MODE_CAPTURE (1<<3)
-/* Capture types defined in bits [30..28] */
-#define FP_MODE_CAPTURE_TYPE_SHIFT 28
-#define FP_MODE_CAPTURE_TYPE_MASK 0x7
-/* Full blown vendor-defined capture (produces 'frame_size' bytes) */
-#define FP_CAPTURE_VENDOR_FORMAT 0
-/* Simple raw image capture (produces width x height x bpp bits) */
-#define FP_CAPTURE_SIMPLE_IMAGE 1
-/* Self test pattern (e.g. checkerboard) */
-#define FP_CAPTURE_PATTERN0 2
-/* Self test pattern (e.g. inverted checkerboard) */
-#define FP_CAPTURE_PATTERN1 3
-/* Capture for Quality test with fixed contrast */
-#define FP_CAPTURE_QUALITY_TEST 4
-/* Extracts the capture type from the sensor 'mode' word */
-#define FP_CAPTURE_TYPE(mode) (((mode) >> FP_MODE_CAPTURE_TYPE_SHIFT) \
- & FP_MODE_CAPTURE_TYPE_MASK)
+#define FP_MODE_CAPTURE (1<<3)
/* Finger enrollment session on-going */
#define FP_MODE_ENROLL_SESSION (1<<4)
/* Enroll the current finger image */
#define FP_MODE_ENROLL_IMAGE (1<<5)
/* Try to match the current finger image */
#define FP_MODE_MATCH (1<<6)
+/* Reset and re-initialize the sensor. */
+#define FP_MODE_RESET_SENSOR (1<<7)
/* special value: don't change anything just read back current mode */
#define FP_MODE_DONT_CHANGE (1<<31)
-struct __ec_align4 ec_params_fp_mode {
- uint32_t mode; /* as defined by FP_MODE_ constants */
+#define FP_VALID_MODES (FP_MODE_DEEPSLEEP | \
+ FP_MODE_FINGER_DOWN | \
+ FP_MODE_FINGER_UP | \
+ FP_MODE_CAPTURE | \
+ FP_MODE_ENROLL_SESSION | \
+ FP_MODE_ENROLL_IMAGE | \
+ FP_MODE_MATCH | \
+ FP_MODE_RESET_SENSOR | \
+ FP_MODE_DONT_CHANGE)
+
+/* Capture types defined in bits [30..28] */
+#define FP_MODE_CAPTURE_TYPE_SHIFT 28
+#define FP_MODE_CAPTURE_TYPE_MASK (0x7 << FP_MODE_CAPTURE_TYPE_SHIFT)
+/*
+ * This enum must remain ordered, if you add new values you must ensure that
+ * FP_CAPTURE_TYPE_MAX is still the last one.
+ */
+enum fp_capture_type {
+ /* Full blown vendor-defined capture (produces 'frame_size' bytes) */
+ FP_CAPTURE_VENDOR_FORMAT = 0,
+ /* Simple raw image capture (produces width x height x bpp bits) */
+ FP_CAPTURE_SIMPLE_IMAGE = 1,
+ /* Self test pattern (e.g. checkerboard) */
+ FP_CAPTURE_PATTERN0 = 2,
+ /* Self test pattern (e.g. inverted checkerboard) */
+ FP_CAPTURE_PATTERN1 = 3,
+ /* Capture for Quality test with fixed contrast */
+ FP_CAPTURE_QUALITY_TEST = 4,
+ /* Capture for pixel reset value test */
+ FP_CAPTURE_RESET_TEST = 5,
+ FP_CAPTURE_TYPE_MAX,
};
+/* Extracts the capture type from the sensor 'mode' word */
+#define FP_CAPTURE_TYPE(mode) (((mode) & FP_MODE_CAPTURE_TYPE_MASK) \
+ >> FP_MODE_CAPTURE_TYPE_SHIFT)
-struct __ec_align4 ec_response_fp_mode {
+struct ec_params_fp_mode {
uint32_t mode; /* as defined by FP_MODE_ constants */
-};
+} __ec_align4;
+
+struct ec_response_fp_mode {
+ uint32_t mode; /* as defined by FP_MODE_ constants */
+} __ec_align4;
/* Retrieve Fingerprint sensor information */
#define EC_CMD_FP_INFO 0x0403
/* Number of dead pixels detected on the last maintenance */
#define FP_ERROR_DEAD_PIXELS(errors) ((errors) & 0x3FF)
+/* Unknown number of dead pixels detected on the last maintenance */
+#define FP_ERROR_DEAD_PIXELS_UNKNOWN (0x3FF)
/* No interrupt from the sensor */
#define FP_ERROR_NO_IRQ (1 << 12)
/* SPI communication error */
@@ -4781,7 +5273,7 @@ struct __ec_align4 ec_response_fp_mode {
/* Sensor initialization failed */
#define FP_ERROR_INIT_FAIL (1 << 15)
-struct __ec_align4 ec_response_fp_info_v0 {
+struct ec_response_fp_info_v0 {
/* Sensor identification */
uint32_t vendor_id;
uint32_t product_id;
@@ -4794,9 +5286,9 @@ struct __ec_align4 ec_response_fp_info_v0 {
uint16_t height;
uint16_t bpp;
uint16_t errors; /* see FP_ERROR_ flags above */
-};
+} __ec_align4;
-struct __ec_align4 ec_response_fp_info {
+struct ec_response_fp_info {
/* Sensor identification */
uint32_t vendor_id;
uint32_t product_id;
@@ -4814,18 +5306,48 @@ struct __ec_align4 ec_response_fp_info {
uint16_t template_max; /* maximum number of fingers/templates */
uint16_t template_valid; /* number of valid fingers/templates */
uint32_t template_dirty; /* bitmap of templates with MCU side changes */
-};
+ uint32_t template_version; /* version of the template format */
+} __ec_align4;
/* Get the last captured finger frame or a template content */
#define EC_CMD_FP_FRAME 0x0404
/* constants defining the 'offset' field which also contains the frame index */
#define FP_FRAME_INDEX_SHIFT 28
+/* Frame buffer where the captured image is stored */
#define FP_FRAME_INDEX_RAW_IMAGE 0
-#define FP_FRAME_TEMPLATE_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT)
+/* First frame buffer holding a template */
+#define FP_FRAME_INDEX_TEMPLATE 1
+#define FP_FRAME_GET_BUFFER_INDEX(offset) ((offset) >> FP_FRAME_INDEX_SHIFT)
#define FP_FRAME_OFFSET_MASK 0x0FFFFFFF
-struct __ec_align4 ec_params_fp_frame {
+/* Version of the format of the encrypted templates. */
+#define FP_TEMPLATE_FORMAT_VERSION 3
+
+/* Constants for encryption parameters */
+#define FP_CONTEXT_NONCE_BYTES 12
+#define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t))
+#define FP_CONTEXT_TAG_BYTES 16
+#define FP_CONTEXT_SALT_BYTES 16
+#define FP_CONTEXT_TPM_BYTES 32
+
+struct ec_fp_template_encryption_metadata {
+ /*
+ * Version of the structure format (N=3).
+ */
+ uint16_t struct_version;
+ /* Reserved bytes, set to 0. */
+ uint16_t reserved;
+ /*
+ * The salt is *only* ever used for key derivation. The nonce is unique,
+ * a different one is used for every message.
+ */
+ uint8_t nonce[FP_CONTEXT_NONCE_BYTES];
+ uint8_t salt[FP_CONTEXT_SALT_BYTES];
+ uint8_t tag[FP_CONTEXT_TAG_BYTES];
+};
+
+struct ec_params_fp_frame {
/*
* The offset contains the template index or FP_FRAME_INDEX_RAW_IMAGE
* in the high nibble, and the real offset within the frame in
@@ -4833,7 +5355,7 @@ struct __ec_align4 ec_params_fp_frame {
*/
uint32_t offset;
uint32_t size;
-};
+} __ec_align4;
/* Load a template into the MCU */
#define EC_CMD_FP_TEMPLATE 0x0405
@@ -4841,27 +5363,47 @@ struct __ec_align4 ec_params_fp_frame {
/* Flag in the 'size' field indicating that the full template has been sent */
#define FP_TEMPLATE_COMMIT 0x80000000
-struct __ec_align4 ec_params_fp_template {
+struct ec_params_fp_template {
uint32_t offset;
uint32_t size;
uint8_t data[];
-};
+} __ec_align4;
/* Clear the current fingerprint user context and set a new one */
#define EC_CMD_FP_CONTEXT 0x0406
-#define FP_CONTEXT_USERID_WORDS (32 / sizeof(uint32_t))
-#define FP_CONTEXT_NONCE_WORDS (32 / sizeof(uint32_t))
-
-struct __ec_align4 ec_params_fp_context {
+struct ec_params_fp_context {
uint32_t userid[FP_CONTEXT_USERID_WORDS];
- /* TODO(b/73337313) mostly a placeholder, details to be implemented */
- uint32_t nonce[FP_CONTEXT_NONCE_WORDS];
-};
-
-struct __ec_align4 ec_response_fp_context {
- uint32_t nonce[FP_CONTEXT_NONCE_WORDS];
-};
+} __ec_align4;
+
+#define EC_CMD_FP_STATS 0x0407
+
+#define FPSTATS_CAPTURE_INV (1 << 0)
+#define FPSTATS_MATCHING_INV (1 << 1)
+
+struct ec_response_fp_stats {
+ uint32_t capture_time_us;
+ uint32_t matching_time_us;
+ uint32_t overall_time_us;
+ struct {
+ uint32_t lo;
+ uint32_t hi;
+ } overall_t0;
+ uint8_t timestamps_invalid;
+ int8_t template_matched;
+} __ec_align2;
+
+#define EC_CMD_FP_SEED 0x0408
+struct ec_params_fp_seed {
+ /*
+ * Version of the structure format (N=3).
+ */
+ uint16_t struct_version;
+ /* Reserved bytes, set to 0. */
+ uint16_t reserved;
+ /* Seed from the TPM. */
+ uint8_t seed[FP_CONTEXT_TPM_BYTES];
+} __ec_align4;
/*****************************************************************************/
/* Touchpad MCU commands: range 0x0500-0x05FF */
@@ -4872,10 +5414,10 @@ struct __ec_align4 ec_response_fp_context {
/* Get number of frame types, and the size of each type */
#define EC_CMD_TP_FRAME_INFO 0x0501
-struct __ec_align4 ec_response_tp_frame_info {
+struct ec_response_tp_frame_info {
uint32_t n_frames;
uint32_t frame_sizes[0];
-};
+} __ec_align4;
/* Create a snapshot of current frame readings */
#define EC_CMD_TP_FRAME_SNAPSHOT 0x0502
@@ -4883,11 +5425,11 @@ struct __ec_align4 ec_response_tp_frame_info {
/* Read the frame */
#define EC_CMD_TP_FRAME_GET 0x0503
-struct __ec_align4 ec_params_tp_frame_get {
+struct ec_params_tp_frame_get {
uint32_t frame_index;
uint32_t offset;
uint32_t size;
-};
+} __ec_align4;
/*****************************************************************************/
/* EC-EC communication commands: range 0x0600-0x06FF */
@@ -4900,20 +5442,34 @@ struct __ec_align4 ec_params_tp_frame_get {
*/
#define EC_CMD_BATTERY_GET_STATIC 0x0600
-struct __ec_align_size1 ec_params_battery_static_info {
- uint8_t index; /* Battery index. */
-};
-
-struct __ec_align4 ec_response_battery_static_info {
- uint16_t design_capacity; /* Battery Design Capacity (mAh) */
- uint16_t design_voltage; /* Battery Design Voltage (mV) */
- char manufacturer[EC_COMM_TEXT_MAX]; /* Battery Manufacturer String */
- char model[EC_COMM_TEXT_MAX]; /* Battery Model Number String */
- char serial[EC_COMM_TEXT_MAX]; /* Battery Serial Number String */
- char type[EC_COMM_TEXT_MAX]; /* Battery Type String */
+/**
+ * struct ec_params_battery_static_info - Battery static info parameters
+ * @index: Battery index.
+ */
+struct ec_params_battery_static_info {
+ uint8_t index;
+} __ec_align_size1;
+
+/**
+ * struct ec_response_battery_static_info - Battery static info response
+ * @design_capacity: Battery Design Capacity (mAh)
+ * @design_voltage: Battery Design Voltage (mV)
+ * @manufacturer: Battery Manufacturer String
+ * @model: Battery Model Number String
+ * @serial: Battery Serial Number String
+ * @type: Battery Type String
+ * @cycle_count: Battery Cycle Count
+ */
+struct ec_response_battery_static_info {
+ uint16_t design_capacity;
+ uint16_t design_voltage;
+ char manufacturer[EC_COMM_TEXT_MAX];
+ char model[EC_COMM_TEXT_MAX];
+ char serial[EC_COMM_TEXT_MAX];
+ char type[EC_COMM_TEXT_MAX];
/* TODO(crbug.com/795991): Consider moving to dynamic structure. */
- uint32_t cycle_count; /* Battery Cycle Count */
-};
+ uint32_t cycle_count;
+} __ec_align4;
/*
* Get battery dynamic information, i.e. information that is likely to change
@@ -4921,39 +5477,54 @@ struct __ec_align4 ec_response_battery_static_info {
*/
#define EC_CMD_BATTERY_GET_DYNAMIC 0x0601
-struct __ec_align_size1 ec_params_battery_dynamic_info {
- uint8_t index; /* Battery index. */
-};
-
-struct __ec_align2 ec_response_battery_dynamic_info {
- int16_t actual_voltage; /* Battery voltage (mV) */
- int16_t actual_current; /* Battery current (mA); negative=discharging */
- int16_t remaining_capacity; /* Remaining capacity (mAh) */
- int16_t full_capacity; /* Capacity (mAh, might change occasionally) */
- int16_t flags; /* Flags, see EC_BATT_FLAG_* */
- int16_t desired_voltage; /* Charging voltage desired by battery (mV) */
- int16_t desired_current; /* Charging current desired by battery (mA) */
-};
+/**
+ * struct ec_params_battery_dynamic_info - Battery dynamic info parameters
+ * @index: Battery index.
+ */
+struct ec_params_battery_dynamic_info {
+ uint8_t index;
+} __ec_align_size1;
+
+/**
+ * struct ec_response_battery_dynamic_info - Battery dynamic info response
+ * @actual_voltage: Battery voltage (mV)
+ * @actual_current: Battery current (mA); negative=discharging
+ * @remaining_capacity: Remaining capacity (mAh)
+ * @full_capacity: Capacity (mAh, might change occasionally)
+ * @flags: Flags, see EC_BATT_FLAG_*
+ * @desired_voltage: Charging voltage desired by battery (mV)
+ * @desired_current: Charging current desired by battery (mA)
+ */
+struct ec_response_battery_dynamic_info {
+ int16_t actual_voltage;
+ int16_t actual_current;
+ int16_t remaining_capacity;
+ int16_t full_capacity;
+ int16_t flags;
+ int16_t desired_voltage;
+ int16_t desired_current;
+} __ec_align2;
/*
* Control charger chip. Used to control charger chip on the slave.
*/
#define EC_CMD_CHARGER_CONTROL 0x0602
-struct __ec_align_size1 ec_params_charger_control {
- /*
- * Charger current (mA). Positive to allow base to draw up to
- * max_current and (possibly) charge battery, negative to request
- * current from base (OTG).
- */
+/**
+ * struct ec_params_charger_control - Charger control parameters
+ * @max_current: Charger current (mA). Positive to allow base to draw up to
+ * max_current and (possibly) charge battery, negative to request current
+ * from base (OTG).
+ * @otg_voltage: Voltage (mV) to use in OTG mode, ignored if max_current is
+ * >= 0.
+ * @allow_charging: Allow base battery charging (only makes sense if
+ * max_current > 0).
+ */
+struct ec_params_charger_control {
int16_t max_current;
-
- /* Voltage (mV) to use in OTG mode, ignored if max_current is >= 0. */
uint16_t otg_voltage;
-
- /* Allow base battery charging (only makes sense if max_current > 0). */
uint8_t allow_charging;
-};
+} __ec_align_size1;
/*****************************************************************************/
/*
@@ -5031,4 +5602,8 @@ struct __ec_align_size1 ec_params_charger_control {
#endif /* !__ACPI__ */
+#ifdef __cplusplus
+}
+#endif
+
#endif /* __CROS_EC_EC_COMMANDS_H */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index 44f9d9f647ed1b..ffe81127d91cbc 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -1,17 +1,6 @@
-/*
- * Copyright (c) 2017 Intel Corporation
- *
- * Functions to access TPS68470 power management chip.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Intel Corporation */
+/* Functions to access TPS68470 power management chip. */
#ifndef __LINUX_MFD_TPS68470_H
#define __LINUX_MFD_TPS68470_H
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 035dc7799388bd..c67d236acaa818 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -165,7 +165,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
-#define VM_ARCH_2 0x02000000
+#define VM_WIPEONFORK 0x02000000 /* Wipe VMA contents in child. */
#define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
#ifdef CONFIG_MEM_SOFT_DIRTY
@@ -179,6 +179,19 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NOHUGEPAGE 0x40000000 /* MADV_NOHUGEPAGE marked this vma */
#define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
+#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
+#define VM_HIGH_ARCH_BIT_0 32 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_1 33 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
+#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
+#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
+#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
+#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
@@ -193,9 +206,11 @@ extern unsigned int kobjsize(const void *objp);
# define VM_MAPPED_COPY VM_ARCH_1 /* T if mapped copy of data (nommu mmap) */
#endif
-#if defined(CONFIG_X86)
+#if defined(CONFIG_X86_INTEL_MPX)
/* MPX specific bounds table or bounds directory */
-# define VM_MPX VM_ARCH_2
+# define VM_MPX VM_HIGH_ARCH_BIT_4
+#else
+# define VM_MPX VM_NONE
#endif
#ifndef VM_GROWSUP
@@ -1237,7 +1252,7 @@ static inline int fixup_user_fault(struct task_struct *tsk,
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write);
+ void *buf, int len, unsigned int gup_flags);
long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -1245,19 +1260,17 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *nonblocking);
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- int *locked);
+ unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags);
+ struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages);
+ struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
@@ -1275,7 +1288,7 @@ struct frame_vector {
struct frame_vector *frame_vector_create(unsigned int nr_frames);
void frame_vector_destroy(struct frame_vector *vec);
int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
- bool write, bool force, struct frame_vector *vec);
+ unsigned int gup_flags, struct frame_vector *vec);
void put_vaddr_frames(struct frame_vector *vec);
int frame_vector_to_pages(struct frame_vector *vec);
void frame_vector_to_pfns(struct frame_vector *vec);
@@ -2104,6 +2117,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
return vma;
}
+static inline bool range_in_vma(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ return (vma && vma->vm_start <= start && end <= vma->vm_end);
+}
+
#ifdef CONFIG_MMU
pgprot_t vm_get_page_prot(unsigned long vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
@@ -2129,6 +2148,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2160,6 +2181,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_COW 0x4000 /* internal GUP flag */
+#define FOLL_ANON 0x8000 /* don't do file mappings */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/include/linux/mm_metrics.h b/include/linux/mm_metrics.h
new file mode 100644
index 00000000000000..7338f7152b85ba
--- /dev/null
+++ b/include/linux/mm_metrics.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _LINUX_MM_METRICS_H
+#define _LINUX_MM_METRICS_H
+
+#include <linux/timekeeping.h>
+#include <linux/sched.h>
+#include <linux/swapops.h>
+
+struct histogram {
+ struct rcu_head rcu;
+ unsigned int size;
+ u64 __percpu *buckets;
+ u64 thresholds[0];
+};
+
+enum {
+ MM_SWAP_REFAULT,
+ MM_SWAP_LATENCY,
+ MM_RECLAIM_LATENCY,
+ NR_MM_METRICS,
+};
+
+extern struct histogram __rcu *mm_metrics_files[NR_MM_METRICS];
+
+#ifdef CONFIG_MM_METRICS
+#define mm_metrics_enabled(type) rcu_access_pointer(mm_metrics_files[type])
+#else
+#define mm_metrics_enabled(type) 0
+#endif
+
+extern void mm_metrics_record(unsigned int type, u64 val, u64 count);
+
+static inline void mm_metrics_swapout(swp_entry_t *swap)
+{
+ if (mm_metrics_enabled(MM_SWAP_REFAULT)) {
+ u64 start = ktime_get_seconds();
+
+ VM_BUG_ON(swp_type(*swap) >= MAX_SWAPFILES);
+ VM_BUG_ON(!swp_offset(*swap));
+
+ swap->val &= ~GENMASK_ULL(SWP_TM_OFF_BITS - 1, SWP_OFFSET_BITS);
+ if (start < BIT_ULL(SWP_TIME_BITS))
+ swap->val |= start << SWP_OFFSET_BITS;
+ }
+}
+
+static inline void mm_metrics_swapin(swp_entry_t swap)
+{
+ if (mm_metrics_enabled(MM_SWAP_REFAULT)) {
+ u64 start = _swp_offset(swap) >> SWP_OFFSET_BITS;
+
+ VM_BUG_ON(swp_type(swap) >= MAX_SWAPFILES);
+ VM_BUG_ON(!swp_offset(swap));
+
+ if (start)
+ mm_metrics_record(MM_SWAP_REFAULT,
+ ktime_get_seconds() - start, 1);
+ }
+}
+
+static inline u64 mm_metrics_swapin_start(void)
+{
+ return mm_metrics_enabled(MM_SWAP_LATENCY) ? sched_clock() : 0;
+}
+
+static inline void mm_metrics_swapin_end(u64 start)
+{
+ if (mm_metrics_enabled(MM_SWAP_LATENCY) && start)
+ mm_metrics_record(MM_SWAP_LATENCY, sched_clock() - start, 1);
+}
+
+static inline u64 mm_metrics_reclaim_start(void)
+{
+ return mm_metrics_enabled(MM_RECLAIM_LATENCY) ? sched_clock() : 0;
+}
+
+static inline void mm_metrics_reclaim_end(u64 start)
+{
+ if (mm_metrics_enabled(MM_RECLAIM_LATENCY) && start)
+ mm_metrics_record(MM_RECLAIM_LATENCY, sched_clock() - start, 1);
+}
+
+#endif /* _LINUX_MM_METRICS_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 12094a2ca31d69..045208743278f6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -400,7 +400,7 @@ struct kioctx_table;
struct mm_struct {
struct vm_area_struct *mmap; /* list of VMAs */
struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ u64 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 241370d096ca3f..623d2e277e8c79 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -60,8 +60,9 @@ struct mmc_ext_csd {
u8 packed_event_en;
unsigned int part_time; /* Units: ms */
unsigned int sa_timeout; /* Units: 100ns */
- unsigned int generic_cmd6_time; /* Units: 10ms */
+ unsigned int generic_cmd6_time; /* Units: ms */
unsigned int power_off_longtime; /* Units: ms */
+ unsigned int mode_op_codes_time; /* Units: ms */
u8 power_off_notification; /* state */
unsigned int hs_max_dtr;
unsigned int hs200_max_dtr;
@@ -82,6 +83,7 @@ struct mmc_ext_csd {
bool hpi_en; /* HPI enablebit */
bool hpi; /* HPI support bit */
unsigned int hpi_cmd; /* cmd used as HPI */
+ bool ffu_mode_op; /* FFU mode operation code */
bool bkops; /* background support bit */
bool man_bkops_en; /* manual bkops enable bit */
unsigned int data_sector_size; /* 512 bytes or 4KB */
@@ -89,6 +91,7 @@ struct mmc_ext_csd {
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
bool ffu_capable; /* Firmware upgrade support */
+ u32 ffu_arg; /* Argument for FFU command */
#define MMC_FIRMWARE_LEN 8
u8 fwrev[MMC_FIRMWARE_LEN]; /* FW version */
u8 raw_exception_status; /* 54 */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 37967b6da03cf5..69712bff740d1a 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -196,6 +196,17 @@ extern int mmc_flush_cache(struct mmc_card *);
extern int mmc_detect_card_removed(struct mmc_host *host);
+extern void mmc_prepare_mrq(struct mmc_card *card,
+ struct mmc_request *mrq, struct scatterlist *sg,
+ unsigned sg_len, unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write);
+extern int mmc_wait_busy(struct mmc_card *card);
+extern int mmc_check_result(struct mmc_request *mrq);
+extern int mmc_simple_transfer(struct mmc_card *card,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write);
+
/**
* mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
diff --git a/include/linux/mmc/ffu.h b/include/linux/mmc/ffu.h
new file mode 100644
index 00000000000000..48fdcfef86803f
--- /dev/null
+++ b/include/linux/mmc/ffu.h
@@ -0,0 +1,56 @@
+/*
+ * ffu.h
+ *
+ * Copyright 2015 SanDisk, Corp
+ * Modified by Google Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program was created by SanDisk Corp
+ */
+
+#ifndef _FFU_H_
+#define _FFU_H_
+
+#include <linux/mmc/card.h>
+
+/*
+ * eMMC5.0 Field Firmware Update (FFU) opcodes
+ */
+#define MMC_FFU_INVOKE_OP 302
+
+#define FFU_NAME_LEN 80 /* Firmware file name udev should find */
+
+enum mmc_ffu_hack_type {
+ MMC_OVERRIDE_FFU_ARG = 0,
+ MMC_HACK_LEN,
+};
+
+struct mmc_ffu_hack {
+ enum mmc_ffu_hack_type type;
+ u64 value;
+};
+
+struct mmc_ffu_args {
+ char name[FFU_NAME_LEN];
+ u32 ack_nb;
+ struct mmc_ffu_hack hack[0];
+};
+
+#ifdef CONFIG_MMC_FFU
+int mmc_ffu_invoke(struct mmc_card *card, const struct mmc_ffu_args *args);
+#else
+static inline int mmc_ffu_invoke(struct mmc_card *card,
+ const struct mmc_ffu_args *args)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#endif /* _FFU_H_ */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index c376209c70ef44..6f6b42a59c11c6 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -272,6 +272,9 @@ struct _mmc_csd {
* EXT_CSD fields
*/
+#define EXT_CSD_FFU_STATUS 26 /* R */
+#define EXT_CSD_MODE_OPERATION_CODES 29 /* W */
+#define EXT_CSD_MODE_CONFIG 30 /* R/W */
#define EXT_CSD_FLUSH_CACHE 32 /* W */
#define EXT_CSD_CACHE_CTRL 33 /* R/W */
#define EXT_CSD_POWER_OFF_NOTIFICATION 34 /* R/W */
@@ -331,6 +334,10 @@ struct _mmc_csd {
#define EXT_CSD_CACHE_SIZE 249 /* RO, 4 bytes */
#define EXT_CSD_PWR_CL_DDR_200_360 253 /* RO */
#define EXT_CSD_FIRMWARE_VERSION 254 /* RO, 8 bytes */
+#define EXT_CSD_NUM_OF_FW_SEC_PROG 302 /* RO, 4 bytes */
+#define EXT_CSD_FFU_ARG 487 /* RO, 4 bytes */
+#define EXT_CSD_OP_CODES_TIMEOUT 491 /* RO */
+#define EXT_CSD_FFU_FEATURES 492 /* RO */
#define EXT_CSD_SUPPORTED_MODE 493 /* RO */
#define EXT_CSD_TAG_UNIT_SIZE 498 /* RO */
#define EXT_CSD_DATA_TAG_SUPPORT 499 /* RO */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index 83430f2ea757df..e0325706b76da6 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -33,6 +33,7 @@
#define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d
#define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335
#define SDIO_DEVICE_ID_BROADCOM_43362 0xa962
+#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4
#define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6
#define SDIO_DEVICE_ID_BROADCOM_4345 0x4345
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b93b578cfa42b7..30e37183bb1b30 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -65,8 +65,10 @@ enum {
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
+# define is_migrate_cma_page(_page) false
#endif
#define for_each_migratetype_order(order, type) \
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index f0d87347df19ed..0508fcc6748044 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
#define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
typedef u64 netdev_features_t;
@@ -125,8 +127,26 @@ enum {
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
-#define for_each_netdev_feature(mask_addr, bit) \
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+ /* like BITMAP_LAST_WORD_MASK() for u64
+ * this sets the most significant 64 - start to 0.
+ */
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+ return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit) \
+ for ((bit) = find_next_netdev_feature((mask_addr), \
+ NETDEV_FEATURE_COUNT); \
+ (bit) >= 0; \
+ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 9f3f6155650177..36146fb1c7f6e7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2189,6 +2189,13 @@ struct netdev_notifier_info {
struct net_device *dev;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
diff --git a/include/linux/netfilter/ipset/ip_set_comment.h b/include/linux/netfilter/ipset/ip_set_comment.h
index 8d024852595704..9f34204978e4d7 100644
--- a/include/linux/netfilter/ipset/ip_set_comment.h
+++ b/include/linux/netfilter/ipset/ip_set_comment.h
@@ -41,11 +41,11 @@ ip_set_init_comment(struct ip_set_comment *comment,
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h
index 1d6a935c1ac5f4..8793f5a7b820e9 100644
--- a/include/linux/netfilter/ipset/ip_set_timeout.h
+++ b/include/linux/netfilter/ipset/ip_set_timeout.h
@@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
static inline u32
ip_set_timeout_get(unsigned long *timeout)
{
- return *timeout == IPSET_ELEM_PERMANENT ? 0 :
- jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+ u32 t;
+
+ if (*timeout == IPSET_ELEM_PERMANENT)
+ return 0;
+
+ t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
+ /* Zero value in userspace means no timeout */
+ return t == 0 ? 1 : t;
}
#endif /* __KERNEL__ */
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 2ea517c7c6b945..bffd096fae3b51 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -125,4 +125,9 @@ extern unsigned int ebt_do_table(struct sk_buff *skb,
/* True if the target is not a standard target */
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
+static inline bool ebt_invalid_target(int target)
+{
+ return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
#endif
diff --git a/include/linux/nospec.h b/include/linux/nospec.h
index e791ebc65c9c07..0c5ef54fd41628 100644
--- a/include/linux/nospec.h
+++ b/include/linux/nospec.h
@@ -7,6 +7,8 @@
#define _LINUX_NOSPEC_H
#include <asm/barrier.h>
+struct task_struct;
+
/**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index
@@ -55,4 +57,12 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
\
(typeof(_i)) (_i & _mask); \
})
+
+/* Speculation control prctl */
+int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which);
+int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
+ unsigned long ctrl);
+/* Speculation control for seccomp enforced mitigation */
+void arch_seccomp_spec_mitigate(struct task_struct *task);
+
#endif /* _LINUX_NOSPEC_H */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index bdfd7b240bba31..36fe879e771592 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -104,7 +104,7 @@ struct nvme_id_ctrl {
char fr[8];
__u8 rab;
__u8 ieee[3];
- __u8 mic;
+ __u8 cmic;
__u8 mdts;
__le16 cntlid;
__le32 ver;
@@ -120,7 +120,23 @@ struct nvme_id_ctrl {
__u8 apsta;
__le16 wctemp;
__le16 cctemp;
- __u8 rsvd270[242];
+ __le16 mtfa;
+ __le32 hmpre;
+ __le32 hmmin;
+ __u8 tnvmcap[16];
+ __u8 unvmcap[16];
+ __le32 rpmbs;
+ __le16 edstt;
+ __u8 dsto;
+ __u8 fwug;
+ __le16 kas;
+ __le16 hctma;
+ __le16 mntmt;
+ __le16 mxtmt;
+ __le32 sanicap;
+ __le32 hmminds;
+ __le16 hmmaxd;
+ __u8 rsvd338[174];
__u8 sqes;
__u8 cqes;
__u8 rsvd514[2];
@@ -176,7 +192,7 @@ struct nvme_id_ns {
__le16 nabo;
__le16 nabspf;
__u16 rsvd46;
- __le64 nvmcap[2];
+ __u8 nvmcap[16];
__u8 rsvd64[40];
__u8 nguid[16];
__u8 eui64[8];
@@ -377,6 +393,11 @@ struct nvme_feat_auto_pst {
__le64 entries[32];
};
+enum {
+ NVME_HOST_MEM_ENABLE = (1 << 0),
+ NVME_HOST_MEM_RETURN = (1 << 1),
+};
+
/* Admin commands */
enum nvme_admin_opcode {
@@ -390,8 +411,11 @@ enum nvme_admin_opcode {
nvme_admin_set_features = 0x09,
nvme_admin_get_features = 0x0a,
nvme_admin_async_event = 0x0c,
+ nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_ns_attach = 0x15,
+ nvme_admin_keep_alive = 0x18,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -417,6 +441,8 @@ enum {
NVME_FEAT_WRITE_ATOMIC = 0x0a,
NVME_FEAT_ASYNC_EVENT = 0x0b,
NVME_FEAT_AUTO_PST = 0x0c,
+ NVME_FEAT_HOST_MEM_BUF = 0x0d,
+ NVME_FEAT_KATO = 0x0f,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -452,7 +478,16 @@ struct nvme_features {
__le64 prp2;
__le32 fid;
__le32 dword11;
- __u32 rsvd12[4];
+ __le32 dword12;
+ __le32 dword13;
+ __le32 dword14;
+ __le32 dword15;
+};
+
+struct nvme_host_mem_buf_desc {
+ __le64 addr;
+ __le32 size;
+ __u32 rsvd;
};
struct nvme_create_cq {
@@ -586,12 +621,27 @@ enum {
NVME_SC_INVALID_VECTOR = 0x108,
NVME_SC_INVALID_LOG_PAGE = 0x109,
NVME_SC_INVALID_FORMAT = 0x10a,
- NVME_SC_FIRMWARE_NEEDS_RESET = 0x10b,
+ NVME_SC_FW_NEEDS_CONV_RESET = 0x10b,
NVME_SC_INVALID_QUEUE = 0x10c,
NVME_SC_FEATURE_NOT_SAVEABLE = 0x10d,
NVME_SC_FEATURE_NOT_CHANGEABLE = 0x10e,
NVME_SC_FEATURE_NOT_PER_NS = 0x10f,
- NVME_SC_FW_NEEDS_RESET_SUBSYS = 0x110,
+ NVME_SC_FW_NEEDS_SUBSYS_RESET = 0x110,
+ NVME_SC_FW_NEEDS_RESET = 0x111,
+ NVME_SC_FW_NEEDS_MAX_TIME = 0x112,
+ NVME_SC_FW_ACIVATE_PROHIBITED = 0x113,
+ NVME_SC_OVERLAPPING_RANGE = 0x114,
+ NVME_SC_NS_INSUFFICENT_CAP = 0x115,
+ NVME_SC_NS_ID_UNAVAILABLE = 0x116,
+ NVME_SC_NS_ALREADY_ATTACHED = 0x118,
+ NVME_SC_NS_IS_PRIVATE = 0x119,
+ NVME_SC_NS_NOT_ATTACHED = 0x11a,
+ NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
+ NVME_SC_CTRL_LIST_INVALID = 0x11c,
+
+ /*
+ * I/O Command Set Specific - NVM commands:
+ */
NVME_SC_BAD_ATTRIBUTES = 0x180,
NVME_SC_INVALID_PI = 0x181,
NVME_SC_READ_ONLY = 0x182,
@@ -602,6 +652,8 @@ enum {
NVME_SC_REFTAG_CHECK = 0x284,
NVME_SC_COMPARE_FAILED = 0x285,
NVME_SC_ACCESS_DENIED = 0x286,
+ NVME_SC_UNWRITTEN_BLOCK = 0x287,
+
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/of.h b/include/linux/of.h
index eb56545072ddc3..197e09b0bc8e2a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -268,6 +268,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node,
extern struct device_node *of_get_next_available_child(
const struct device_node *node, struct device_node *prev);
+extern struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible);
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
@@ -587,6 +589,12 @@ static inline bool of_have_populated_dt(void)
return false;
}
+static inline struct device_node *of_get_compatible_child(const struct device_node *parent,
+ const char *compatible)
+{
+ return NULL;
+}
+
static inline struct device_node *of_get_child_by_name(
const struct device_node *node,
const char *name)
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 4e9b4b207bbaa0..948b7f4dcb749d 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -386,6 +386,10 @@ struct pci_dev {
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
+#ifdef CONFIG_PCIEAER
+ u16 aer_cap; /* AER capability offset */
+ struct aer_stats *aer_stats; /* AER stats for this device */
+#endif
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca7259b78..9f0aa1b48c7849 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 68e40d9dd1958f..bc1775dc62d55b 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -65,6 +65,7 @@ void dev_pm_opp_put_supported_hw(struct device *dev);
int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
void dev_pm_opp_put_prop_name(struct device *dev);
int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev);
#else
static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
{
@@ -199,6 +200,11 @@ static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask
{
return -ENOSYS;
}
+
+static inline struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return NULL;
+}
#endif
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 907f3fd191acaa..3e28a1a8d8235b 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -65,8 +65,8 @@ struct k_itimer {
spinlock_t it_lock;
clockid_t it_clock; /* which timer type */
timer_t it_id; /* timer id */
- int it_overrun; /* overrun on pending signal */
- int it_overrun_last; /* overrun on last delivered signal */
+ s64 it_overrun; /* overrun on pending signal */
+ s64 it_overrun_last; /* overrun on last delivered signal */
int it_requeue_pending; /* waiting to requeue this timer */
#define REQUEUE_PENDING 1
int it_sigev_notify; /* notify word of sigevent struct */
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 5d5174b59802a4..cd385fc7bffc04 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -299,7 +299,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
unsigned long nr_to_tag,
unsigned int fromtag, unsigned int totag);
int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
static inline void radix_tree_preload_end(void)
{
@@ -382,6 +381,7 @@ static inline __must_check
void **radix_tree_iter_retry(struct radix_tree_iter *iter)
{
iter->next_index = iter->index;
+ iter->tags = 0;
return NULL;
}
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 5ed540986019b9..a579240c64e9a1 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -402,6 +402,42 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
}
/**
+ * hlist_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_add_head_rcu()
+ * or hlist_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_add_tail_rcu(struct hlist_node *n,
+ struct hlist_head *h)
+{
+ struct hlist_node *i, *last = NULL;
+
+ for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_add_head_rcu(n, h);
+ }
+}
+
+/**
* hlist_add_before_rcu
* @n: the new element to add to the hash list.
* @next: the existing element to add the new element before.
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 00000000000000..0d905d8ec553fa
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e50b31d18462c0..e97cdfd6cba95c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -133,23 +133,23 @@ struct rhashtable_params {
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
@@ -343,7 +343,8 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
struct rhash_head *obj,
- struct bucket_table *old_tbl);
+ struct bucket_table *old_tbl,
+ void **data);
int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl);
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
@@ -514,18 +515,8 @@ static inline int rhashtable_compare(struct rhashtable_compare_arg *arg,
return memcmp(ptr + ht->p.key_offset, arg->key, ht->p.key_len);
}
-/**
- * rhashtable_lookup_fast - search hash table, inlined version
- * @ht: hash table
- * @key: the pointer to the key
- * @params: hash table parameters
- *
- * Computes the hash value for the key and traverses the bucket chain looking
- * for a entry with an identical key. The first matching entry is returned.
- *
- * Returns the first entry on which the compare function returned true.
- */
-static inline void *rhashtable_lookup_fast(
+/* Internal function, do not use. */
+static inline struct rhash_head *__rhashtable_lookup(
struct rhashtable *ht, const void *key,
const struct rhashtable_params params)
{
@@ -537,8 +528,6 @@ static inline void *rhashtable_lookup_fast(
struct rhash_head *he;
unsigned int hash;
- rcu_read_lock();
-
tbl = rht_dereference_rcu(ht->tbl, ht);
restart:
hash = rht_key_hashfn(ht, tbl, key, params);
@@ -547,8 +536,7 @@ restart:
params.obj_cmpfn(&arg, rht_obj(ht, he)) :
rhashtable_compare(&arg, rht_obj(ht, he)))
continue;
- rcu_read_unlock();
- return rht_obj(ht, he);
+ return he;
}
/* Ensure we see any new tables. */
@@ -557,13 +545,64 @@ restart:
tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(tbl))
goto restart;
- rcu_read_unlock();
return NULL;
}
-/* Internal function, please use rhashtable_insert_fast() instead */
-static inline int __rhashtable_insert_fast(
+/**
+ * rhashtable_lookup - search hash table
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This must only be called under the RCU read lock.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ struct rhash_head *he = __rhashtable_lookup(ht, key, params);
+
+ return he ? rht_obj(ht, he) : NULL;
+}
+
+/**
+ * rhashtable_lookup_fast - search hash table, without RCU read lock
+ * @ht: hash table
+ * @key: the pointer to the key
+ * @params: hash table parameters
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * Only use this function when you have other mechanisms guaranteeing
+ * that the object won't go away after the RCU read lock is released.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+static inline void *rhashtable_lookup_fast(
+ struct rhashtable *ht, const void *key,
+ const struct rhashtable_params params)
+{
+ void *obj;
+
+ rcu_read_lock();
+ obj = rhashtable_lookup(ht, key, params);
+ rcu_read_unlock();
+
+ return obj;
+}
+
+/* Internal function, please use rhashtable_insert_fast() instead. This
+ * function returns the existing element already in hashes in there is a clash,
+ * otherwise it returns an error via ERR_PTR().
+ */
+static inline void *__rhashtable_insert_fast(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
@@ -576,6 +615,7 @@ static inline int __rhashtable_insert_fast(
spinlock_t *lock;
unsigned int elasticity;
unsigned int hash;
+ void *data = NULL;
int err;
restart:
@@ -600,11 +640,14 @@ restart:
new_tbl = rht_dereference_rcu(tbl->future_tbl, ht);
if (unlikely(new_tbl)) {
- tbl = rhashtable_insert_slow(ht, key, obj, new_tbl);
+ tbl = rhashtable_insert_slow(ht, key, obj, new_tbl, &data);
if (!IS_ERR_OR_NULL(tbl))
goto slow_path;
err = PTR_ERR(tbl);
+ if (err == -EEXIST)
+ err = 0;
+
goto out;
}
@@ -618,25 +661,25 @@ slow_path:
err = rhashtable_insert_rehash(ht, tbl);
rcu_read_unlock();
if (err)
- return err;
+ return ERR_PTR(err);
goto restart;
}
- err = -EEXIST;
+ err = 0;
elasticity = ht->elasticity;
rht_for_each(head, tbl, hash) {
if (key &&
unlikely(!(params.obj_cmpfn ?
params.obj_cmpfn(&arg, rht_obj(ht, head)) :
- rhashtable_compare(&arg, rht_obj(ht, head)))))
+ rhashtable_compare(&arg, rht_obj(ht, head))))) {
+ data = rht_obj(ht, head);
goto out;
+ }
if (!--elasticity)
goto slow_path;
}
- err = 0;
-
head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
RCU_INIT_POINTER(obj->next, head);
@@ -651,7 +694,7 @@ out:
spin_unlock_bh(lock);
rcu_read_unlock();
- return err;
+ return err ? ERR_PTR(err) : data;
}
/**
@@ -674,7 +717,13 @@ static inline int rhashtable_insert_fast(
struct rhashtable *ht, struct rhash_head *obj,
const struct rhashtable_params params)
{
- return __rhashtable_insert_fast(ht, NULL, obj, params);
+ void *ret;
+
+ ret = __rhashtable_insert_fast(ht, NULL, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -703,11 +752,15 @@ static inline int rhashtable_lookup_insert_fast(
const struct rhashtable_params params)
{
const char *key = rht_obj(ht, obj);
+ void *ret;
BUG_ON(ht->p.obj_hashfn);
- return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj,
- params);
+ ret = __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
}
/**
@@ -736,6 +789,32 @@ static inline int rhashtable_lookup_insert_key(
struct rhashtable *ht, const void *key, struct rhash_head *obj,
const struct rhashtable_params params)
{
+ void *ret;
+
+ BUG_ON(!ht->p.obj_hashfn || !key);
+
+ ret = __rhashtable_insert_fast(ht, key, obj, params);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ return ret == NULL ? 0 : -EEXIST;
+}
+
+/**
+ * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ * @data: pointer to element data already in hashes
+ *
+ * Just like rhashtable_lookup_insert_key(), but this function returns the
+ * object if it exists, NULL if it does not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_key(
+ struct rhashtable *ht, const void *key, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
BUG_ON(!ht->p.obj_hashfn || !key);
return __rhashtable_insert_fast(ht, key, obj, params);
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 4acc552e92790c..19d0778ec38254 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -162,6 +162,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f55b74840dbc3f..851b226a6daed7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1464,6 +1464,13 @@ struct tlbflush_unmap_batch {
};
struct task_struct {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+ * For reasons of header soup (see current_thread_info()), this
+ * must be the first element of task_struct.
+ */
+ struct thread_info thread_info;
+#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
@@ -1473,6 +1480,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ unsigned int cpu; /* current CPU */
+#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -1538,7 +1548,7 @@ struct task_struct {
struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
- u32 vmacache_seqnum;
+ u64 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
@@ -1561,6 +1571,9 @@ struct task_struct {
/* unserialized, strictly 'current' */
unsigned in_execve:1; /* bit to tell LSMs we're in execve */
unsigned in_iowait:1;
+#if !defined(TIF_RESTORE_SIGMASK)
+ unsigned restore_sigmask:1;
+#endif
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#endif
@@ -2261,6 +2274,8 @@ static inline void memalloc_noio_restore(unsigned int flags)
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+#define PFA_SPEC_SSB_DISABLE 4 /* Speculative Store Bypass disabled */
+#define PFA_SPEC_SSB_FORCE_DISABLE 5 /* Speculative Store Bypass force disabled*/
#define TASK_PFA_TEST(name, func) \
@@ -2284,6 +2299,13 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
+TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
+
+TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
+
/*
* task->jobctl flags
*/
@@ -2516,7 +2538,9 @@ extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
+#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
@@ -2626,6 +2650,66 @@ extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+#ifdef TIF_RESTORE_SIGMASK
+/*
+ * Legacy restore_sigmask accessors. These are inefficient on
+ * SMP architectures because they require atomic operations.
+ */
+
+/**
+ * set_restore_sigmask() - make sure saved_sigmask processing gets done
+ *
+ * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
+ * will run before returning to user mode, to process the flag. For
+ * all callers, TIF_SIGPENDING is already set or it's no harm to set
+ * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
+ * arch code will notice on return to user mode, in case those bits
+ * are scarce. We set TIF_SIGPENDING here to ensure that the arch
+ * signal code always gets run when TIF_RESTORE_SIGMASK is set.
+ */
+static inline void set_restore_sigmask(void)
+{
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_restore_sigmask(void)
+{
+ return test_thread_flag(TIF_RESTORE_SIGMASK);
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+}
+
+#else /* TIF_RESTORE_SIGMASK */
+
+/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
+static inline void set_restore_sigmask(void)
+{
+ current->restore_sigmask = true;
+ WARN_ON(!test_thread_flag(TIF_SIGPENDING));
+}
+static inline void clear_restore_sigmask(void)
+{
+ current->restore_sigmask = false;
+}
+static inline bool test_restore_sigmask(void)
+{
+ return current->restore_sigmask;
+}
+static inline bool test_and_clear_restore_sigmask(void)
+{
+ if (!current->restore_sigmask)
+ return false;
+ current->restore_sigmask = false;
+ return true;
+}
+#endif
+
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
@@ -2753,7 +2837,12 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}
-extern char *get_task_comm(char *to, struct task_struct *tsk);
+
+extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
+#define get_task_comm(buf, tsk) ({ \
+ BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \
+ __get_task_comm(buf, sizeof(buf), tsk); \
+})
#ifdef CONFIG_SMP
void scheduler_ipi(void);
@@ -2906,10 +2995,34 @@ static inline void threadgroup_change_end(struct task_struct *tsk)
cgroup_threadgroup_change_end(tsk);
}
-#ifndef __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+
+static inline struct thread_info *task_thread_info(struct task_struct *task)
+{
+ return &task->thread_info;
+}
+
+/*
+ * When accessing the stack of a non-current task that might exit, use
+ * try_get_task_stack() instead. task_stack_page will return a pointer
+ * that could get freed out from under you.
+ */
+static inline void *task_stack_page(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#define setup_thread_stack(new,old) do { } while(0)
+
+static inline unsigned long *end_of_stack(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
-#define task_stack_page(task) ((task)->stack)
+#define task_stack_page(task) ((void *)(task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -2936,6 +3049,14 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
}
#endif
+
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return task_stack_page(tsk);
+}
+
+static inline void put_task_stack(struct task_struct *tsk) {}
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -2946,7 +3067,7 @@ static inline int object_is_on_stack(void *obj)
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
@@ -3201,7 +3322,11 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
static inline unsigned int task_cpu(const struct task_struct *p)
{
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ return p->cpu;
+#else
return task_thread_info(p)->cpu;
+#endif
}
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 2296e6b2f69076..5a53d34bba26d0 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -3,7 +3,8 @@
#include <uapi/linux/seccomp.h>
-#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC)
+#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW)
#ifdef CONFIG_SECCOMP
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index f3d45dd42695e1..d9bb2648f35053 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -139,6 +139,20 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);
+#define DEFINE_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a6da214d0584ce..502787c29ce9ff 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -514,6 +514,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
+ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -555,9 +556,14 @@ struct sk_buff {
struct skb_mstamp skb_mstamp;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
};
- struct sock *sk;
+
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
+ };
+
struct net_device *dev;
/*
@@ -594,8 +600,8 @@ struct sk_buff {
fclone:2,
peeked:1,
head_frag:1,
- xmit_more:1;
- /* one bit hole */
+ xmit_more:1,
+ pfmemalloc:1;
kmemcheck_bitfield_end(flags1);
/* fields enclosed in headers_start/headers_end are copied
@@ -615,19 +621,18 @@ struct sk_buff {
__u8 __pkt_type_offset[0];
__u8 pkt_type:3;
- __u8 pfmemalloc:1;
__u8 ignore_df:1;
__u8 nfctinfo:3;
-
__u8 nf_trace:1;
+
__u8 ip_summed:2;
__u8 ooo_okay:1;
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
-
__u8 no_fcs:1;
+
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
@@ -635,11 +640,11 @@ struct sk_buff {
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_bad:1;
-
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
__u8 ipvs_property:1;
+
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
/* 3 or 5 bit hole */
@@ -2273,6 +2278,8 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
+unsigned int skb_rbtree_purge(struct rb_root *root);
+
void *netdev_alloc_frag(unsigned int fragsz);
struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int length,
@@ -2789,6 +2796,7 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -2796,17 +2804,22 @@ static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
*
* This is exactly the same as pskb_trim except that it ensures the
* checksum of received packets are still valid after the operation.
+ * It can change skb pointers.
*/
static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 0c772845933dd7..16dc1e4a91f397 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -150,6 +150,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c553dad412d2aa..70a195200ea1d3 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -67,7 +67,8 @@ struct kmem_cache {
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- int cpu_partial; /* Number of per cpu partial objects to keep around */
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
@@ -81,6 +82,7 @@ struct kmem_cache {
int reserved; /* Reserved bytes at the end of slabs */
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
+ int red_left_pad; /* Left redzone padding size */
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
diff --git a/include/linux/string.h b/include/linux/string.h
index ce424df20d5ebb..a1b4d38fdee615 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -27,7 +27,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t);
size_t strlcpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRSCPY
-ssize_t __must_check strscpy(char *, const char *, size_t);
+ssize_t strscpy(char *, const char *, size_t);
#endif
#ifndef __HAVE_ARCH_STRCAT
extern char * strcat(char *, const char *);
@@ -170,4 +170,207 @@ static inline const char *kbasename(const char *path)
return tail ? tail + 1 : path;
}
+#define __FORTIFY_INLINE extern __always_inline __attribute__((gnu_inline))
+#define __RENAME(x) __asm__(#x)
+
+void fortify_panic(const char *name) __noreturn __cold;
+void __read_overflow(void) __compiletime_error("detected read beyond size of object passed as 1st parameter");
+void __read_overflow2(void) __compiletime_error("detected read beyond size of object passed as 2nd parameter");
+void __write_overflow(void) __compiletime_error("detected write beyond size of object passed as 1st parameter");
+
+#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
+__FORTIFY_INLINE char *strcpy(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strcpy(p, q);
+ if (strscpy(p, q, p_size < q_size ? p_size : q_size) < 0)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_strncpy(p, q, size);
+}
+
+__FORTIFY_INLINE char *strcat(char *p, const char *q)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (p_size == (size_t)-1)
+ return __builtin_strcat(p, q);
+ if (strlcat(p, q, p_size) >= p_size)
+ fortify_panic(__func__);
+ return p;
+}
+
+__FORTIFY_INLINE __kernel_size_t strlen(const char *p)
+{
+ __kernel_size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+
+ /* Work around gcc excess stack consumption issue */
+ if (p_size == (size_t)-1 ||
+ (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0'))
+ return __builtin_strlen(p);
+ ret = strnlen(p, p_size);
+ if (p_size <= ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+extern __kernel_size_t __real_strnlen(const char *, __kernel_size_t) __RENAME(strnlen);
+__FORTIFY_INLINE __kernel_size_t strnlen(const char *p, __kernel_size_t maxlen)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ __kernel_size_t ret = __real_strnlen(p, maxlen < p_size ? maxlen : p_size);
+ if (p_size <= ret && maxlen != ret)
+ fortify_panic(__func__);
+ return ret;
+}
+
+/* defined after fortified strlen to reuse it */
+extern size_t __real_strlcpy(char *, const char *, size_t) __RENAME(strlcpy);
+__FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size)
+{
+ size_t ret;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __real_strlcpy(p, q, size);
+ ret = strlen(q);
+ if (size) {
+ size_t len = (ret >= size) ? size - 1 : ret;
+ if (__builtin_constant_p(len) && len >= p_size)
+ __write_overflow();
+ if (len >= p_size)
+ fortify_panic(__func__);
+ __builtin_memcpy(p, q, len);
+ p[len] = '\0';
+ }
+ return ret;
+}
+
+/* defined after fortified strlen and strnlen to reuse them */
+__FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count)
+{
+ size_t p_len, copy_len;
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (p_size == (size_t)-1 && q_size == (size_t)-1)
+ return __builtin_strncat(p, q, count);
+ p_len = strlen(p);
+ copy_len = strnlen(q, count);
+ if (p_size < p_len + copy_len + 1)
+ fortify_panic(__func__);
+ __builtin_memcpy(p + p_len, q, copy_len);
+ p[p_len + copy_len] = '\0';
+ return p;
+}
+
+__FORTIFY_INLINE void *memset(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __write_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memset(p, c, size);
+}
+
+__FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcpy(p, q, size);
+}
+
+__FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __write_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memmove(p, q, size);
+}
+
+extern void *__real_memscan(void *, int, __kernel_size_t) __RENAME(memscan);
+__FORTIFY_INLINE void *memscan(void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memscan(p, c, size);
+}
+
+__FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ size_t q_size = __builtin_object_size(q, 0);
+ if (__builtin_constant_p(size)) {
+ if (p_size < size)
+ __read_overflow();
+ if (q_size < size)
+ __read_overflow2();
+ }
+ if (p_size < size || q_size < size)
+ fortify_panic(__func__);
+ return __builtin_memcmp(p, q, size);
+}
+
+__FORTIFY_INLINE void *memchr(const void *p, int c, __kernel_size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __builtin_memchr(p, c, size);
+}
+
+void *__real_memchr_inv(const void *s, int c, size_t n) __RENAME(memchr_inv);
+__FORTIFY_INLINE void *memchr_inv(const void *p, int c, size_t size)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_memchr_inv(p, c, size);
+}
+
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup);
+__FORTIFY_INLINE void *kmemdup(const void *p, size_t size, gfp_t gfp)
+{
+ size_t p_size = __builtin_object_size(p, 0);
+ if (__builtin_constant_p(size) && p_size < size)
+ __read_overflow();
+ if (p_size < size)
+ fortify_panic(__func__);
+ return __real_kmemdup(p, size, gfp);
+}
+#endif
+
#endif /* _LINUX_STRING_H_ */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index cc0fc712bb8238..a8ac3f25b4ecdc 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -290,9 +290,12 @@ struct svc_rqst {
struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
spinlock_t rq_lock; /* per-request lock */
+ struct net *rq_bc_net; /* pointer to backchannel's
+ * net namespace
+ */
};
-#define SVC_NET(svc_rqst) (svc_rqst->rq_xprt->xpt_net)
+#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
/*
* Rigorous type checking on sockaddr type conversions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8ca2eaa3a8bff..18c2ab717b6b18 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -17,6 +17,8 @@ struct notifier_block;
struct bio;
+struct pagevec;
+
#define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */
#define SWAP_FLAG_PRIO_MASK 0x7fff
#define SWAP_FLAG_PRIO_SHIFT 0
@@ -348,7 +350,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
#endif
extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct page **, int nr_pages);
+extern void check_move_unevictable_pages(struct pagevec *pvec);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 388293a91e8c99..e4594de79bc4ce 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -9,5 +9,7 @@ extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern unsigned long generic_max_swapfile_size(void);
+extern unsigned long max_swapfile_size(void);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 5c3a5f3e7eec66..fe4c67cebe45d4 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -45,7 +45,7 @@ static inline unsigned swp_type(swp_entry_t entry)
* Extract the `offset' field from a swp_entry_t. The swp_entry_t is in
* arch-independent format
*/
-static inline pgoff_t swp_offset(swp_entry_t entry)
+static inline pgoff_t _swp_offset(swp_entry_t entry)
{
return entry.val & SWP_OFFSET_MASK(entry);
}
@@ -80,7 +80,7 @@ static inline pte_t swp_entry_to_pte(swp_entry_t entry)
{
swp_entry_t arch_entry;
- arch_entry = __swp_entry(swp_type(entry), swp_offset(entry));
+ arch_entry = __swp_entry(swp_type(entry), _swp_offset(entry));
return __swp_entry_to_pte(arch_entry);
}
@@ -100,6 +100,63 @@ static inline void *swp_to_radix_entry(swp_entry_t entry)
return (void *)(value | RADIX_TREE_EXCEPTIONAL_ENTRY);
}
+/*
+ * We squeeze swapout timestamp into swp_offset because we don't
+ * want to allocate extra memory for it. Normally we have 50 bits
+ * in swp_offset on x86_64 and arm64. So we use 25 bits for the
+ * timestamp and the rest for offset. The timestamp is uptime in
+ * second, and it won't overflow within one year. The max size of
+ * swapfile is 128G, which is more than enough for now. If we have
+ * less than 50 bits in swp_offset due to 32-bit swp_entry_t or
+ * X86_BUG_L1TF, we don't enable the timestamp.
+ */
+#define SWP_TIME_BITS 25
+#define SWP_OFFSET_BITS 25
+#define SWP_TM_OFF_BITS (SWP_TIME_BITS + SWP_OFFSET_BITS)
+
+extern bool swap_refault_enabled __read_mostly;
+
+#ifdef CONFIG_MM_METRICS
+
+static inline pgoff_t swp_offset(swp_entry_t swap)
+{
+ return swap_refault_enabled && swp_type(swap) < MAX_SWAPFILES ?
+ _swp_offset(swap) & GENMASK_ULL(SWP_OFFSET_BITS - 1, 0) :
+ _swp_offset(swap);
+}
+
+static inline bool swp_entry_same(swp_entry_t s1, swp_entry_t s2)
+{
+ return swp_type(s1) == swp_type(s2) && swp_offset(s1) == swp_offset(s2);
+}
+
+static inline bool swp_page_same(swp_entry_t swap, struct page *page)
+{
+ swp_entry_t entry = { .val = page_private(page) };
+
+ VM_BUG_ON(!PageSwapCache(page));
+
+ return swp_entry_same(swap, entry);
+}
+
+static inline bool swp_radix_same(swp_entry_t swap, void *radix)
+{
+ return radix_tree_exceptional_entry(radix) &&
+ swp_entry_same(swap, radix_to_swp_entry(radix));
+}
+
+#else /* CONFIG_MM_METRICS */
+
+#define swp_offset(swap) _swp_offset(swap)
+
+#define swp_entry_same(s1, s2) ((s1).val == (s2).val)
+
+#define swp_page_same(swap, page) ((swap).val == page_private(page))
+
+#define swp_radix_same(swap, radix) (swp_to_radix_entry(swap) == (radix))
+
+#endif /* CONFIG_MM_METRICS */
+
#ifdef CONFIG_MIGRATION
static inline swp_entry_t make_migration_entry(struct page *page, int write)
{
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9c452f6db43835..2839d624d5ee0b 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -238,6 +238,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -351,6 +354,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/tc.h b/include/linux/tc.h
index f92511e57cdbb6..a60639f3796399 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -84,6 +84,7 @@ struct tc_dev {
device. */
struct device dev; /* Generic device interface. */
struct resource resource; /* Address space of this device. */
+ u64 dma_mask; /* DMA addressable range. */
char vendor[9];
char name[9];
char firmware[9];
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index d3076ad4ae1250..9e876e126f649e 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -282,10 +282,9 @@ struct tcp_sock {
struct sk_buff* lost_skb_hint;
struct sk_buff *retransmit_skb_hint;
- /* OOO segments go in this list. Note that socket lock must be held,
- * as we do not use sk_buff_head lock.
- */
- struct sk_buff_head out_of_order_queue;
+ /* OOO segments go in this rbtree. Socket lock must be held. */
+ struct rb_root out_of_order_queue;
+ struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */
/* SACKs data, these 2 need to be together (see tcp_options_write) */
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
@@ -327,7 +326,7 @@ struct tcp_sock {
/* Receiver queue space */
struct {
- int space;
+ u32 space;
u32 seq;
u32 time;
} rcvq_space;
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index ff307b548ed3c9..ff055b90ed0e6a 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,57 +9,24 @@
#include <linux/types.h>
#include <linux/bug.h>
+#include <linux/restart_block.h>
-struct timespec;
-struct compat_timespec;
-
+#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
- * System call restart block.
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
*/
-struct restart_block {
- long (*fn)(struct restart_block *);
- union {
- /* For futex_wait and futex_wait_requeue_pi */
- struct {
- u32 __user *uaddr;
- u32 val;
- u32 flags;
- u32 bitset;
- u64 time;
- u32 __user *uaddr2;
- } futex;
- /* For nanosleep */
- struct {
- clockid_t clockid;
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
+#include <asm/current.h>
+#define current_thread_info() ((struct thread_info *)current)
#endif
- u64 expires;
- } nanosleep;
- /* For poll */
- struct {
- struct pollfd __user *ufds;
- int nfds;
- int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
- } poll;
- };
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
#include <linux/bitops.h>
#include <asm/thread_info.h>
#ifdef __KERNEL__
-#ifdef CONFIG_DEBUG_STACK_USAGE
-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
-#else
-# define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK)
-#endif
+#define THREADINFO_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
/*
* flag set/clear/test wrappers
@@ -104,46 +71,29 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
-#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
-/*
- * An arch can define its own version of set_restore_sigmask() to get the
- * job done however works, with or without TIF_RESTORE_SIGMASK.
- */
-#define HAVE_SET_RESTORE_SIGMASK 1
-
-/**
- * set_restore_sigmask() - make sure saved_sigmask processing gets done
- *
- * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
- * will run before returning to user mode, to process the flag. For
- * all callers, TIF_SIGPENDING is already set or it's no harm to set
- * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
- * arch code will notice on return to user mode, in case those bits
- * are scarce. We set TIF_SIGPENDING here to ensure that the arch
- * signal code always gets run when TIF_RESTORE_SIGMASK is set.
- */
-static inline void set_restore_sigmask(void)
-{
- set_thread_flag(TIF_RESTORE_SIGMASK);
- WARN_ON(!test_thread_flag(TIF_SIGPENDING));
-}
-static inline void clear_restore_sigmask(void)
-{
- clear_thread_flag(TIF_RESTORE_SIGMASK);
-}
-static inline bool test_restore_sigmask(void)
+#ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES
+static inline int arch_within_stack_frames(const void * const stack,
+ const void * const stackend,
+ const void *obj, unsigned long len)
{
- return test_thread_flag(TIF_RESTORE_SIGMASK);
+ return 0;
}
-static inline bool test_and_clear_restore_sigmask(void)
+#endif
+
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
{
- return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
+ __check_object_size(ptr, n, to_user);
}
-#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
-
-#ifndef HAVE_SET_RESTORE_SIGMASK
-#error "no set_restore_sigmask() provided and default one won't work"
-#endif
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
#endif /* __KERNEL__ */
diff --git a/include/linux/throttler.h b/include/linux/throttler.h
new file mode 100644
index 00000000000000..c43ab1acd96b51
--- /dev/null
+++ b/include/linux/throttler.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_THROTTLER_H__
+#define __LINUX_THROTTLER_H__
+
+struct throttler;
+
+extern struct throttler *throttler_setup(struct device *dev);
+extern void throttler_teardown(struct throttler *thr);
+extern void throttler_set_level(struct throttler *thr, unsigned int level);
+
+#ifdef CONFIG_THROTTLER_DEBUG
+#define thr_dbg(thr, fmt, ...) dev_info(thr->dev, fmt, ##__VA_ARGS__)
+#else
+#define thr_dbg(thr, fmt, ...) dev_dbg(thr->dev, fmt, ##__VA_ARGS__)
+#endif
+
+#define thr_info(thr, fmt, ...) dev_info(thr->dev, fmt, ##__VA_ARGS__)
+#define thr_warn(thr, fmt, ...) dev_warn(thr->dev, fmt, ##__VA_ARGS__)
+#define thr_err(thr, fmt, ...) dev_warn(thr->dev, fmt, ##__VA_ARGS__)
+
+#endif /* __LINUX_THROTTLER_H__ */
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 558129af828a7e..9442423979c1c6 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,6 +2,9 @@
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
+
+#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
+
#include <asm/uaccess.h>
static __always_inline void pagefault_disabled_inc(void)
@@ -111,4 +114,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define probe_kernel_address(addr, retval) \
probe_kernel_read(&retval, addr, sizeof(retval))
+#ifndef user_access_begin
+#define user_access_begin() do { } while (0)
+#define user_access_end() do { } while (0)
+#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
+#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#endif
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 07cb73b80fad97..7a4985c68d5c46 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -334,11 +334,11 @@ struct usb_host_bos {
};
int __usb_get_extra_descriptor(char *buffer, unsigned size,
- unsigned char type, void **ptr);
+ unsigned char type, void **ptr, size_t min);
#define usb_get_extra_descriptor(ifpoint, type, ptr) \
__usb_get_extra_descriptor((ifpoint)->extra, \
(ifpoint)->extralen, \
- type, (void **)ptr)
+ type, (void **)ptr, sizeof(**(ptr)))
/* ----------------------------------------------------------------------- */
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index d4e8b05d476f67..ebb50bf5f91fcb 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -188,6 +188,13 @@ struct uac2_iso_endpoint_descriptor {
#define UAC2_CONTROL_DATA_OVERRUN (3 << 2)
#define UAC2_CONTROL_DATA_UNDERRUN (3 << 4)
+/* 5.2.5.4.2 Connector Control Parameter Block */
+struct uac2_connectors_ctl_blk {
+ __u8 bNrChannels;
+ __le32 bmChannelConfig;
+ __u8 iChannelNames;
+} __attribute__((packed));
+
/* 6.1 Interrupt Data Message */
#define UAC2_INTERRUPT_DATA_MSG_VENDOR (1 << 0)
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a8959aaba0ae3d..a710e28b521506 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -221,6 +221,12 @@ struct uac3_iso_endpoint_descriptor {
__le16 wLockDelay;
} __attribute__((packed));
+/* 5.2.1.6.1 INSERTION CONTROL PARAMETER BLOCK */
+struct uac3_insertion_ctl_blk {
+ __u8 bSize;
+ __u8 bmConInserted;
+} __attribute__ ((packed));
+
/* 6.1 INTERRUPT DATA MESSAGE */
struct uac3_interrupt_data_msg {
__u8 bInfo;
@@ -392,4 +398,38 @@ struct uac3_interrupt_data_msg {
#define UAC3_AC_ACTIVE_INTERFACE_CONTROL 0x01
#define UAC3_AC_POWER_DOMAIN_CONTROL 0x02
+/* A.23.5 TERMINAL CONTROL SELECTORS */
+#define UAC3_TE_UNDEFINED 0x00
+#define UAC3_TE_INSERTION 0x01
+#define UAC3_TE_OVERLOAD 0x02
+#define UAC3_TE_UNDERFLOW 0x03
+#define UAC3_TE_OVERFLOW 0x04
+#define UAC3_TE_LATENCY 0x05
+
+/* BADD predefined Unit/Terminal values */
+#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
+#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
+#define UAC3_BADD_OT_ID3 3 /* Output Terminal ID3: bTerminalID = 3 */
+#define UAC3_BADD_IT_ID4 4 /* Input Terminal ID4: bTerminalID = 4 */
+#define UAC3_BADD_FU_ID5 5 /* Feature Unit ID5: bUnitID = 5 */
+#define UAC3_BADD_OT_ID6 6 /* Output Terminal ID6: bTerminalID = 6 */
+#define UAC3_BADD_FU_ID7 7 /* Feature Unit ID7: bUnitID = 7 */
+#define UAC3_BADD_MU_ID8 8 /* Mixer Unit ID8: bUnitID = 8 */
+#define UAC3_BADD_CS_ID9 9 /* Clock Source Entity ID9: bClockID = 9 */
+#define UAC3_BADD_PD_ID10 10 /* Power Domain ID10: bPowerDomainID = 10 */
+#define UAC3_BADD_PD_ID11 11 /* Power Domain ID11: bPowerDomainID = 11 */
+
+/* BADD wMaxPacketSize of AS endpoints */
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_16 0x0060
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_16 0x0062
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_MONO_24 0x0090
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_MONO_24 0x0093
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_16 0x00C0
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_16 0x00C4
+#define UAC3_BADD_EP_MAXPSIZE_SYNC_STEREO_24 0x0120
+#define UAC3_BADD_EP_MAXPSIZE_ASYNC_STEREO_24 0x0126
+
+/* BADD sample rate is always fixed to 48kHz */
+#define UAC3_BADD_SAMPLING_RATE 48000
+
#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 336969c53e3f7e..a9f6c6542be75c 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -70,6 +70,12 @@ struct giveback_urb_bh {
struct usb_host_endpoint *completing_ep;
};
+enum usb_dev_authorize_policy {
+ USB_DEVICE_AUTHORIZE_NONE = 0,
+ USB_DEVICE_AUTHORIZE_ALL = 1,
+ USB_DEVICE_AUTHORIZE_INTERNAL = 2,
+};
+
struct usb_hcd {
/*
@@ -115,7 +121,6 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
-#define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -140,8 +145,7 @@ struct usb_hcd {
* or they require explicit user space authorization; this bit is
* settable through /sys/class/usb_host/X/authorized_default
*/
-#define HCD_DEV_AUTHORIZED(hcd) \
- ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED))
+ enum usb_dev_authorize_policy dev_policy;
/* Flags that get set only during HCD registration or removal. */
unsigned rh_registered:1;/* is root hub registered? */
@@ -313,6 +317,7 @@ struct hc_driver {
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
+ unsigned long (*get_resuming_ports)(struct usb_hcd *);
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index ea4f81c2a6d5e0..b9f9f46dba3c54 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -56,6 +56,12 @@
*/
#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+/*
+ * Device needs to be disconnected before suspend to prevent spurious
+ * wakeup.
+ */
+#define USB_QUIRK_DISCONNECT_SUSPEND BIT(12)
+
/* Device needs a pause after every control message. */
#define USB_QUIRK_DELAY_CTRL_MSG BIT(13)
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 8ef3a61fdc74c3..36e91a592d7383 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -25,6 +25,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
PGFREE, PGACTIVATE, PGDEACTIVATE,
PGFAULT, PGMAJFAULT,
+ PGMAJFAULT_S, PGMAJFAULT_A, PGMAJFAULT_F,
FOR_ALL_ZONES(PGREFILL),
FOR_ALL_ZONES(PGSTEAL_KSWAPD),
FOR_ALL_ZONES(PGSTEAL_DIRECT),
@@ -38,6 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
PAGEOUTRUN, ALLOCSTALL, PGROTATED,
DROP_PAGECACHE, DROP_SLAB,
+ OOM_KILL,
#ifdef CONFIG_NUMA_BALANCING
NUMA_PTE_UPDATES,
NUMA_HUGE_PTE_UPDATES,
@@ -88,7 +90,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
#ifdef CONFIG_DEBUG_VM_VMACACHE
VMACACHE_FIND_CALLS,
VMACACHE_FIND_HITS,
- VMACACHE_FULL_FLUSHES,
#endif
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index c3fa0fd4394995..4f58ff2dacd698 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -15,7 +15,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
memset(tsk->vmacache, 0, sizeof(tsk->vmacache));
}
-extern void vmacache_flush_all(struct mm_struct *mm);
extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
unsigned long addr);
@@ -29,10 +28,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
static inline void vmacache_invalidate(struct mm_struct *mm)
{
mm->vmacache_seqnum++;
-
- /* deal with overflows */
- if (unlikely(mm->vmacache_seqnum == 0))
- vmacache_flush_all(mm);
}
#endif /* __LINUX_VMACACHE_H */
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index 57ec319a7f44ff..2fe6e86a907589 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -32,8 +32,10 @@ struct cec_notifier;
#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
/**
- * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * cec_notifier_get_conn - find or create a new cec_notifier for the given
+ * device and connector tuple.
* @dev: device that sends the events.
+ * @conn: the connector name from which the event occurs
*
* If a notifier for device @dev already exists, then increase the refcount
* and return that notifier.
@@ -43,7 +45,8 @@ struct cec_notifier;
*
* Return NULL if the memory could not be allocated.
*/
-struct cec_notifier *cec_notifier_get(struct device *dev);
+struct cec_notifier *cec_notifier_get_conn(struct device *dev,
+ const char *conn);
/**
* cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
@@ -97,7 +100,8 @@ void cec_register_cec_notifier(struct cec_adapter *adap,
struct cec_notifier *notifier);
#else
-static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
+ const char *conn)
{
/* A non-NULL pointer is expected on success */
return (struct cec_notifier *)0xdeadfeed;
@@ -133,6 +137,23 @@ static inline void cec_register_cec_notifier(struct cec_adapter *adap,
#endif
/**
+ * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * @dev: device that sends the events.
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+ return cec_notifier_get_conn(dev, NULL);
+}
+
+/**
* cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
*
* @n: the CEC notifier
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 0659dae31aa0b1..cafd80b764914b 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -683,6 +683,22 @@ struct v4l2_ctrl *v4l2_ctrl_find(struct v4l2_ctrl_handler *hdl, u32 id);
void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
/**
+ * __v4l2_ctrl_grab() - Unlocked variant of v4l2_ctrl_grab.
+ *
+ * @ctrl: The control to (de)activate.
+ * @grabbed: True if the control should become grabbed.
+ *
+ * This sets or clears the V4L2_CTRL_FLAG_GRABBED flag atomically.
+ * Does nothing if @ctrl == NULL.
+ * The V4L2_EVENT_CTRL event will be generated afterwards.
+ * This will usually be called when starting or stopping streaming in the
+ * driver.
+ *
+ * This function assumes that the control handler is locked by the caller.
+ */
+void __v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
+
+/**
* v4l2_ctrl_grab() - Mark the control as grabbed or not grabbed.
* @ctrl: The control to (de)activate.
* @grabbed: True if the control should become grabbed.
@@ -696,7 +712,15 @@ void v4l2_ctrl_activate(struct v4l2_ctrl *ctrl, bool active);
* This function assumes that the control handler is not locked and will
* take the lock itself.
*/
-void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed);
+static inline void v4l2_ctrl_grab(struct v4l2_ctrl *ctrl, bool grabbed)
+{
+ if (!ctrl)
+ return;
+
+ v4l2_ctrl_lock(ctrl);
+ __v4l2_ctrl_grab(ctrl, grabbed);
+ v4l2_ctrl_unlock(ctrl);
+}
/**
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index 803516775162d1..4fdcd0d807d703 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -43,6 +43,7 @@ struct v4l2_fh {
wait_queue_head_t wait;
struct list_head subscribed; /* Subscribed events */
struct list_head available; /* Dequeueable event */
+ struct mutex subscribe_lock;
unsigned int navailable;
u32 sequence;
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f2758964ce6f89..e6675f79b9254a 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -62,7 +62,8 @@ struct vsock_sock {
struct list_head pending_links;
struct list_head accept_queue;
bool rejected;
- struct delayed_work dwork;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
struct delayed_work close_work;
bool close_work_scheduled;
u32 peer_shutdown;
@@ -75,7 +76,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index e602f8177ebfbf..b507ce2b19523e 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -199,6 +199,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
void __ax25_put_route(ax25_route *ax25_rt);
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+ read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+ read_unlock(&ax25_route_lock);
+}
+
static inline void ax25_put_route(ax25_route *ax25_rt)
{
if (atomic_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index ef83d078025c3a..d6077593ca55b3 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -183,6 +183,14 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
+
+ /* When this quirk is set, hw_reset() would be run to reset the
+ * hardware, after a certain number of commands (currently 5)
+ * time out because the device fails to respond.
+ *
+ * This quirk should be set before hci_register_dev is called.
+ */
+ HCI_QUIRK_HW_RESET_ON_TIMEOUT,
};
/* HCI device flags */
@@ -876,6 +884,7 @@ struct hci_cp_sniff_subrate {
} __packed;
#define HCI_OP_SET_EVENT_MASK 0x0c01
+#define HCI_SET_EVENT_MASK_SIZE 8
#define HCI_OP_RESET 0x0c03
@@ -1980,6 +1989,9 @@ struct hci_ev_si_security {
__u8 incoming;
} __packed;
+/* vendor events */
+#define HCI_EV_VENDOR 0xff
+
/* ---- HCI Packet structures ---- */
#define HCI_COMMAND_HDR_SIZE 3
#define HCI_EVENT_HDR_SIZE 2
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 10ff3115fc1703..3b9a769d754a47 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -234,6 +234,7 @@ struct hci_dev {
__u8 dev_name[HCI_MAX_NAME_LENGTH];
__u8 short_name[HCI_MAX_SHORT_NAME_LENGTH];
__u8 eir[HCI_MAX_EIR_LENGTH];
+ __u8 event_mask[HCI_SET_EVENT_MASK_SIZE];
__u16 appearance;
__u8 dev_class[3];
__u8 major_class;
@@ -320,6 +321,7 @@ struct hci_dev {
unsigned int acl_cnt;
unsigned int sco_cnt;
unsigned int le_cnt;
+ unsigned int timeout_cnt;
unsigned int count_adv_change_in_progress;
unsigned int count_scan_change_in_progress;
@@ -444,6 +446,7 @@ struct hci_dev {
int (*post_init)(struct hci_dev *hdev);
int (*set_diag)(struct hci_dev *hdev, bool enable);
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+ void (*hw_reset)(struct hci_dev *hdev);
};
#define HCI_PHY_HANDLE(handle) (handle & 0xff)
@@ -922,7 +925,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
u16 conn_timeout);
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role);
+ u8 role, bdaddr_t *direct_rpa);
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
u8 sec_level, u8 auth_type);
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
diff --git a/include/net/bluetooth/hci_le_splitter.h b/include/net/bluetooth/hci_le_splitter.h
new file mode 100644
index 00000000000000..6675d1930b0c01
--- /dev/null
+++ b/include/net/bluetooth/hci_le_splitter.h
@@ -0,0 +1,100 @@
+/*
+ HCI LE splitter - two stacks - one chip
+ Copyright (C) 2017 Google, Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation;
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
+ IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
+ CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
+ COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
+ SOFTWARE IS DISCLAIMED.
+*/
+
+#ifndef __BT_HCI_LE_SPLITTER_H
+#define __BT_HCI_LE_SPLITTER_H
+
+#include <linux/types.h>
+
+#define HCI_LE_SPLIT_MAX_LE_CONNS 64
+#define HCI_LE_SPLITTER_BUFFER_TARGET 3
+
+struct hci_dev;
+struct sk_buff;
+
+enum {
+ SPLITTER_STATE_NOT_SET,
+ SPLITTER_STATE_DISABLED,
+ SPLITTER_STATE_ENABLED,
+};
+
+#ifdef CONFIG_BT_HCI_LE_SPLITTER
+
+int hci_le_splitter_sysfs_init(void);
+void hci_le_splitter_init_start(struct hci_dev *hdev);
+int hci_le_splitter_init_done(struct hci_dev *hdev);
+void hci_le_splitter_init_fail(struct hci_dev *hdev);
+void hci_le_splitter_deinit(struct hci_dev *hdev);
+int hci_le_splitter_get_enabled_state(void);
+
+/* return true to let bluez have it */
+bool hci_le_splitter_should_allow_bluez_rx(struct hci_dev *hdev, struct sk_buff *skb);
+
+/* return true to allow transmission */
+bool hci_le_splitter_should_allow_bluez_tx(struct hci_dev *hdev, struct sk_buff *skb);
+
+
+#else
+
+static inline int hci_le_splitter_init_done(struct hci_dev *hdev)
+{
+ return 0;
+}
+
+static inline void hci_le_splitter_init_fail(struct hci_dev *hdev)
+{
+
+}
+
+static inline void hci_le_splitter_deinit(struct hci_dev *hdev)
+{
+
+}
+
+static inline bool hci_le_splitter_should_allow_bluez_rx(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return true;
+}
+
+static inline bool hci_le_splitter_should_allow_bluez_tx(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ return true;
+}
+
+static inline void hci_le_splitter_init_start(struct hci_dev *hdev)
+{
+
+}
+
+static inline int hci_le_splitter_sysfs_init(void)
+{
+ return 0;
+}
+
+static inline int hci_le_splitter_get_enabled_state(void)
+{
+ return SPLITTER_STATE_DISABLED;
+}
+
+#endif
+
+#endif /* __HCI_LE_SPLITTER_H */
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 2ca6e3bcccb9fc..64fa07b59ca873 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -612,6 +612,82 @@ struct mgmt_cp_set_advertising_intervals {
} __packed;
#define MGMT_SET_ADVERTISING_INTERVALS_SIZE 4
+#define MGMT_OP_SET_EVENT_MASK 0x0045
+struct mgmt_cp_set_event_mask {
+ /*
+ * The mask variable enables modifying a subset of the 'event mask'.
+ * Bits that are set to 0 in mask[] should be ignored in events[]
+ */
+ uint8_t mask[HCI_SET_EVENT_MASK_SIZE];
+ uint8_t events[HCI_SET_EVENT_MASK_SIZE];
+} __packed;
+#define MGMT_SET_EVENT_MASK_CP_SIZE (2 * HCI_SET_EVENT_MASK_SIZE)
+enum mgmt_set_event_mask_byte_0 {
+ MGMT_EVENT_MASK_INQUIRY_COMPLETE = (1 << 0), // bit 0
+ MGMT_EVENT_MASK_INQUIRY_RESULT = (1 << 1),
+ MGMT_EVENT_MASK_CONNECTION_COMPLETE = (1 << 2),
+ MGMT_EVENT_MASK_CONNECTION_REQUEST = (1 << 3),
+ MGMT_EVENT_MASK_DISCONNECTION_COMPLETE = (1 << 4),
+ MGMT_EVENT_MASK_AUTH_COMPLETE = (1 << 5),
+ MGMT_EVENT_MASK_RMT_NAME_REQ_COMPLETE = (1 << 6),
+ MGMT_EVENT_MASK_ENCRYPTION_CHANGE = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_1 {
+ MGMT_EVENT_MASK_CHNG_CON_LINK_KEY_COMP = (1 << 0), // bit 8
+ MGMT_EVENT_MASK_MASTER_LINK_KEY_COMP = (1 << 1),
+ MGMT_EVENT_MASK_READ_RMT_SUPPORT_FEAT_COMP = (1 << 2),
+ MGMT_EVENT_MASK_READ_RMT_VER_INFO_COMP = (1 << 3),
+ MGMT_EVENT_MASK_QOS_SETUP_COMPLETE = (1 << 4),
+ MGMT_EVENT_MASK_HARDWARE_ERROR = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_2 {
+ MGMT_EVENT_MASK_FLUSH_OCCURRED = (1 << 0), // bit 16
+ MGMT_EVENT_MASK_ROLE_CHANGE = (1 << 1),
+ MGMT_EVENT_MASK_MODE_CHANGE = (1 << 3),
+ MGMT_EVENT_MASK_RETURN_LINK_KEYS = (1 << 4),
+ MGMT_EVENT_MASK_PIN_CODE_REQUEST = (1 << 5),
+ MGMT_EVENT_MASK_LINK_KEY_REQUEST = (1 << 6),
+ MGMT_EVENT_MASK_LINK_KEY_NOTIFICATION = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_3 {
+ MGMT_EVENT_MASK_LOOPBACK_COMMAND = (1 << 0), // bit 24
+ MGMT_EVENT_MASK_DATA_BUFFER_OVERFLOW = (1 << 1),
+ MGMT_EVENT_MASK_MAX_SLOT_CHANGE = (1 << 2),
+ MGMT_EVENT_MASK_READ_CLOCK_OFFSET_COMP = (1 << 3),
+ MGMT_EVENT_MASK_CON_PKT_TYPE_CHANGE = (1 << 4),
+ MGMT_EVENT_MASK_QOS_VIOLATION = (1 << 5),
+ MGMT_EVENT_MASK_PAGE_SCAN_MODE_CHANGE = (1 << 6),
+ MGMT_EVENT_MASK_PAGE_SCAN_REP_MODE_CHANGE = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_4 {
+ MGMT_EVENT_MASK_FLOW_SPEC_COMPLETE = (1 << 0), // bit 32
+ MGMT_EVENT_MASK_IQUIRY_RSULT_WITH_RSSI = (1 << 1),
+ MGMT_EVENT_MASK_READ_RMT_EXTENDED_FEAT = (1 << 2)
+};
+enum mgmt_set_event_mask_byte_5 {
+ MGMT_EVENT_MASK_SYNC_CON_COMPLETE = (1 << 3), // bit 43
+ MGMT_EVENT_MASK_SYNC_CON_CHANGED = (1 << 4),
+ MGMT_EVENT_MASK_SNIFF_SUBRATING = (1 << 5),
+ MGMT_EVENT_MASK_EXTENDED_INQUIRY_RESULT = (1 << 6),
+ MGMT_EVENT_MASK_ENCRYPTION_KEY_REFRESH = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_6 {
+ MGMT_EVENT_MASK_IO_CAPABILITY_REQUEST = (1 << 1), // bit 48
+ MGMT_EVENT_MASK_IO_CAPABILITY_RESPONSE = (1 << 2),
+ MGMT_EVENT_MASK_USER_CONFIRMATION_REQUEST = (1 << 3),
+ MGMT_EVENT_MASK_USER_PASSKEY_REQUEST = (1 << 4),
+ MGMT_EVENT_MASK_REMOTE_OOB_DATA_REQUEST = (1 << 5),
+ MGMT_EVENT_MASK_SIMPLE_PAIRING_COMPLETE = (1 << 6),
+ MGMT_EVENT_MASK_LSTO_CHANGED = (1 << 7)
+};
+enum mgmt_set_event_mask_byte_7 {
+ MGMT_EVENT_MASK_ENHACNED_FLUSH_COMPLETE = (1 << 1), // bit 56
+ MGMT_EVENT_MASK_USER_PASSKEY_NOTIFICATION = (1 << 3),
+ MGMT_EVENT_MASK_KEY_PRESS_NOTIFICATION = (1 << 4),
+ MGMT_EVENT_MASK_RMT_HOST_SUPPORTED_FEAT = (1 << 5),
+ MGMT_EVENT_MASK_LE_META = (1 << 6),
+};
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 93abe5f6188d29..d5abd3a80896d2 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -146,12 +146,6 @@ struct bond_parm_tbl {
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -177,6 +171,7 @@ struct slave {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
index cf6c74550baa53..cd856b7a11f54c 100644
--- a/include/net/gro_cells.h
+++ b/include/net/gro_cells.h
@@ -19,22 +19,30 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
struct gro_cell *cell;
struct net_device *dev = skb->dev;
+ rcu_read_lock();
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto drop;
+
if (!gcells->cells || skb_cloned(skb) || !(dev->features & NETIF_F_GRO)) {
netif_rx(skb);
- return;
+ goto unlock;
}
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
- return;
+ goto unlock;
}
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
+
+unlock:
+ rcu_read_unlock();
}
/* called under BH context */
@@ -84,6 +92,7 @@ static inline void gro_cells_destroy(struct gro_cells *gcells)
for_each_possible_cpu(i) {
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
+ napi_disable(&cell->napi);
netif_napi_del(&cell->napi);
__skb_queue_purge(&cell->napi_skbs);
}
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 970028e1338280..06ceb483475d1d 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
#include <net/inet_sock.h>
#include <net/snmp.h>
+#include <net/ip.h>
struct icmp_err {
int errno;
@@ -39,7 +40,13 @@ struct net_proto_family;
struct sk_buff;
struct net;
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
int icmp_rcv(struct sk_buff *skb);
void icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h
index 0dc0a51da38faa..dce2d586d9cecb 100644
--- a/include/net/inet_ecn.h
+++ b/include/net/inet_ecn.h
@@ -128,7 +128,8 @@ static inline int IP6_ECN_set_ce(struct sk_buff *skb, struct ipv6hdr *iph)
to = from | htonl(INET_ECN_CE << 20);
*(__be32 *)iph = to;
if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->csum = csum_add(csum_sub(skb->csum, from), to);
+ skb->csum = csum_add(csum_sub(skb->csum, (__force __wsum)from),
+ (__force __wsum)to);
return 1;
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index c26a6e4dc30625..6260ec1461420d 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,13 +1,19 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
+#include <linux/rhashtable.h>
+
struct netns_frags {
- /* Keep atomic mem on separate cachelines in structs that include it */
- atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
+ long high_thresh;
+ long low_thresh;
int timeout;
- int high_thresh;
- int low_thresh;
+ struct inet_frags *f;
+
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_long_t mem ____cacheline_aligned_in_smp;
};
/**
@@ -23,74 +29,68 @@ enum {
INET_FRAG_COMPLETE = BIT(2),
};
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
+};
+
/**
* struct inet_frag_queue - fragment queue
*
- * @lock: spinlock protecting the queue
+ * @node: rhash node
+ * @key: keys identifying this frag.
* @timer: queue expiration timer
- * @list: hash bucket list
+ * @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ * @rcu: rcu head for freeing deferall
*/
struct inet_frag_queue {
- spinlock_t lock;
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
struct timer_list timer;
- struct hlist_node list;
+ spinlock_t lock;
atomic_t refcnt;
- struct sk_buff *fragments;
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
__u8 flags;
u16 max_size;
- struct netns_frags *net;
- struct hlist_node list_evictor;
-};
-
-#define INETFRAGS_HASHSZ 1024
-
-/* averaged:
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
- * struct frag_queue))
- */
-#define INETFRAGS_MAXDEPTH 128
-
-struct inet_frag_bucket {
- struct hlist_head chain;
- spinlock_t chain_lock;
+ struct netns_frags *net;
+ struct rcu_head rcu;
};
struct inet_frags {
- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
-
- struct work_struct frags_work;
- unsigned int next_bucket;
- unsigned long last_rebuild_jiffies;
- bool rebuild;
-
- /* The first call to hashfn is responsible to initialize
- * rnd. This is best done with net_get_random_once.
- *
- * rnd_seqlock is used to let hash insertion detect
- * when it needs to re-lookup the hash chain to use.
- */
- u32 rnd;
- seqlock_t rnd_seqlock;
int qsize;
- unsigned int (*hashfn)(const struct inet_frag_queue *);
- bool (*match)(const struct inet_frag_queue *q,
- const void *arg);
void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
@@ -98,56 +98,47 @@ struct inet_frags {
void (*frag_expire)(unsigned long data);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
};
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline void inet_frags_init_net(struct netns_frags *nf)
+static inline int inet_frags_init_net(struct netns_frags *nf)
{
- atomic_set(&nf->mem, 0);
+ atomic_long_set(&nf->mem, 0);
+ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
}
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+void inet_frags_exit_net(struct netns_frags *nf);
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key, unsigned int hash);
+void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (atomic_dec_and_test(&q->refcnt))
- inet_frag_destroy(q, f);
-}
-
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
-{
- return !hlist_unhashed(&q->list_evictor);
+ inet_frag_destroy(q);
}
/* Memory Tracking Functions. */
-static inline int frag_mem_limit(struct netns_frags *nf)
-{
- return atomic_read(&nf->mem);
-}
-
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+static inline long frag_mem_limit(const struct netns_frags *nf)
{
- atomic_sub(i, &nf->mem);
+ return atomic_long_read(&nf->mem);
}
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
{
- atomic_add(i, &nf->mem);
+ atomic_long_sub(val, &nf->mem);
}
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
+static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
{
- return atomic_read(&nf->mem);
+ atomic_long_add(val, &nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 235c7811a86a1d..408d76f47bd29e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -40,6 +40,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ u32 n_redirects;
unsigned long rate_last;
union {
struct list_head gc_list;
diff --git a/include/net/ip.h b/include/net/ip.h
index 10664a684acf3d..2a1b897f1e79b8 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -525,7 +525,6 @@ static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *s
return skb;
}
#endif
-int ip_frag_mem(struct net *net);
/*
* Functions provided by ip_forward.c
@@ -548,6 +547,8 @@ static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
}
void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb, __be32 *info);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 3afb7c4c70988c..f6ff83b2ac8704 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -200,7 +200,7 @@ int fib_table_insert(struct fib_table *, struct fib_config *);
int fib_table_delete(struct fib_table *, struct fib_config *);
int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
struct netlink_callback *cb);
-int fib_table_flush(struct fib_table *table);
+int fib_table_flush(struct fib_table *table, bool flush_all);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
@@ -322,6 +322,7 @@ int ip_fib_check_default(__be32 gw, struct net_device *dev);
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net *net, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 84f0d06024333a..c07cf9596b6fb0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -320,13 +320,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
idev->cnf.accept_ra;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_mem(struct net *net)
-{
- return sum_frag_mem_limit(&net->ipv6.frags);
-}
-#endif
-
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
@@ -505,17 +498,8 @@ enum ip6_defrag_users {
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
};
-struct ip6_create_arg {
- __be32 id;
- u32 user;
- const struct in6_addr *src;
- const struct in6_addr *dst;
- int iif;
- u8 ecn;
-};
-
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
+extern const struct rhashtable_params ip6_rhash_params;
/*
* Equivalent of ipv4 struct ip
@@ -523,19 +507,13 @@ bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
struct frag_queue {
struct inet_frag_queue q;
- __be32 id; /* fragment id */
- u32 user;
- struct in6_addr saddr;
- struct in6_addr daddr;
-
int iif;
unsigned int csum;
__u16 nhoffset;
u8 ecn;
};
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
- struct inet_frags *frags);
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
@@ -762,7 +740,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
* to minimize possbility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
- rol32(hash, 16);
+ hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
diff --git a/include/net/llc.h b/include/net/llc.h
index e8e61d4fb45838..82d989995d18a4 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
atomic_inc(&sap->refcnt);
}
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+ return atomic_inc_not_zero(&sap->refcnt);
+}
+
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 8b683841e5743f..f6017ddc4ded14 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -448,6 +448,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
+ unsigned int hh_alen = 0;
unsigned int seq;
int hh_len;
@@ -455,16 +456,33 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
seq = read_seqbegin(&hh->hh_lock);
hh_len = hh->hh_len;
if (likely(hh_len <= HH_DATA_MOD)) {
- /* this is inlined by gcc */
- memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+ hh_alen = HH_DATA_MOD;
+
+ /* skb_push() would proceed silently if we have room for
+ * the unaligned size but not for the aligned size:
+ * check headroom explicitly.
+ */
+ if (likely(skb_headroom(skb) >= HH_DATA_MOD)) {
+ /* this is inlined by gcc */
+ memcpy(skb->data - HH_DATA_MOD, hh->hh_data,
+ HH_DATA_MOD);
+ }
} else {
- int hh_alen = HH_DATA_ALIGN(hh_len);
+ hh_alen = HH_DATA_ALIGN(hh_len);
- memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ if (likely(skb_headroom(skb) >= hh_alen)) {
+ memcpy(skb->data - hh_alen, hh->hh_data,
+ hh_alen);
+ }
}
} while (read_seqretry(&hh->hh_lock, seq));
- skb_push(skb, hh_len);
+ if (WARN_ON_ONCE(skb_headroom(skb) < hh_alen)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ __skb_push(skb, hh_len);
return dev_queue_xmit(skb);
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 93328c61934aac..6965dfe7e88b85 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -115,6 +115,7 @@ struct net {
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag nf_frag;
+ struct ctl_table_header *nf_frag_frags_hdr;
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c0368db6df54d7..d235722c0d925e 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -86,7 +86,6 @@ struct netns_ipv6 {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag {
- struct netns_sysctl_ipv6 sysctl;
struct netns_frags frags;
};
#endif
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694dafa5bae..008f466d1da7ea 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@ struct nfc_hci_pipe {
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index b669fe6dbc3bad..98f31c7ea23df9 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -63,10 +63,11 @@ struct pnpipehdr {
u8 state_after_reset; /* reset request */
u8 error_code; /* any response */
u8 pep_type; /* status indication */
- u8 data[1];
+ u8 data0; /* anything else */
};
+ u8 data[];
};
-#define other_pep_type data[1]
+#define other_pep_type data[0]
static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
{
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a306c69b4..803fc26ef0babd 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
- struct sctphdr *sh = sctp_hdr(skb);
+ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
__le32 ret, old = sh->checksum;
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
diff --git a/include/net/sock.h b/include/net/sock.h
index 38116fb565bd51..f46471dd4f4abc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -301,6 +301,7 @@ struct cg_proto;
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_socket: Identd and reporting IO signals
@@ -437,6 +438,9 @@ struct sock {
long sk_sndtimeo;
struct timer_list sk_timer;
ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
u16 sk_tsflags;
u32 sk_tskey;
struct socket *sk_socket;
@@ -651,6 +655,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
@@ -2183,6 +2193,41 @@ sock_skb_set_dropcount(const struct sock *sk, struct sk_buff *skb)
SOCK_SKB_CB(skb)->dropcount = atomic_read(&sk->sk_drops);
}
+static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
+{
+ int segs = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
+
+ atomic_add(segs, &sk->sk_drops);
+}
+
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ sk->sk_stamp = kt;
+#endif
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2207,7 +2252,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
- sk->sk_stamp = kt;
+ sock_write_timestamp(sk, kt);
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
@@ -2227,7 +2272,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else
- sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(sk, skb->tstamp);
}
void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a6bf6003c8e124..11a476c844bb74 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -376,6 +376,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
@@ -559,6 +560,7 @@ void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -647,7 +649,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (skb_queue_empty(&tp->out_of_order_queue) &&
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
!tp->urg_data)
@@ -819,8 +821,6 @@ enum tcp_ca_event {
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
- CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
- CA_EVENT_NON_DELAYED_ACK,
};
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
@@ -1463,6 +1463,7 @@ static inline void tcp_write_queue_purge(struct sock *sk)
sk_wmem_free_skb(sk, skb);
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
+ inet_csk(sk)->icsk_backoff = 0;
}
static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 120da1d7f57eb5..10fefb0dc64018 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3007,6 +3007,20 @@ static inline int ib_check_mr_access(int flags)
return 0;
}
+static inline bool ib_access_writable(int access_flags)
+{
+ /*
+ * We have writable memory backing the MR if any of the following
+ * access flags are set. "Local write" and "remote write" obviously
+ * require write access. "Remote atomic" can do things like fetch and
+ * add, which will modify memory, and "MW bind" can change permissions
+ * by binding a window.
+ */
+ return access_flags &
+ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
+}
+
/**
* ib_check_mr_status: lightweight check of MR status.
* This routine may provide status checks on a selected
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 293b9a7f53bcc8..fb53a94a5e8b7f 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -240,6 +240,7 @@ enum scsi_target_state {
STARGET_CREATED = 1,
STARGET_RUNNING,
STARGET_REMOVE,
+ STARGET_CREATED_REMOVE,
STARGET_DEL,
};
diff --git a/include/soc/rockchip/rockchip_phy_typec.h b/include/soc/rockchip/rockchip_phy_typec.h
new file mode 100644
index 00000000000000..8e45c4d65c5c8c
--- /dev/null
+++ b/include/soc/rockchip/rockchip_phy_typec.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author: Lin Huang <hl@rock-chips.com>
+ */
+
+#ifndef __SOC_ROCKCHIP_PHY_TYPEC_H
+#define __SOC_ROCKCHIP_PHY_TYPEC_H
+
+struct usb3phy_reg {
+ u32 offset;
+ u32 enable_bit;
+ u32 write_enable;
+};
+
+/**
+ * struct rockchip_usb3phy_port_cfg: usb3-phy port configuration.
+ * @reg: the base address for usb3-phy config.
+ * @typec_conn_dir: the register of type-c connector direction.
+ * @usb3tousb2_en: the register of type-c force usb2 to usb2 enable.
+ * @external_psm: the register of type-c phy external psm clock.
+ * @pipe_status: the register of type-c phy pipe status.
+ * @usb3_host_disable: the register of type-c usb3 host disable.
+ * @usb3_host_port: the register of type-c usb3 host port.
+ * @uphy_dp_sel: the register of type-c phy DP select control.
+ */
+struct rockchip_usb3phy_port_cfg {
+ unsigned int reg;
+ struct usb3phy_reg typec_conn_dir;
+ struct usb3phy_reg usb3tousb2_en;
+ struct usb3phy_reg external_psm;
+ struct usb3phy_reg pipe_status;
+ struct usb3phy_reg usb3_host_disable;
+ struct usb3phy_reg usb3_host_port;
+ struct usb3phy_reg uphy_dp_sel;
+};
+
+struct phy_config {
+ int swing;
+ int pe;
+};
+
+struct rockchip_typec_phy {
+ struct device *dev;
+ void __iomem *base;
+ struct extcon_dev *extcon;
+ struct regmap *grf_regs;
+ struct clk *clk_core;
+ struct clk *clk_ref;
+ struct reset_control *uphy_rst;
+ struct reset_control *pipe_rst;
+ struct reset_control *tcphy_rst;
+ const struct rockchip_usb3phy_port_cfg *port_cfgs;
+ /* mutex to protect access to individual PHYs */
+ struct mutex lock;
+ bool flip;
+ u8 mode;
+ struct phy_config config[3][4];
+ int (*typec_phy_config)(struct phy *phy, int link_rate,
+ int lanes, u8 swing, u8 pre_emp);
+};
+
+#endif
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 44202ff897fd93..f759e0918037ba 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -99,6 +99,8 @@ struct tegra_mc_soc {
u8 client_id_mask;
const struct tegra_smmu_soc *smmu;
+
+ u32 intmask;
};
struct tegra_mc {
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index c0abcdc1147083..33414ea260a99b 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -183,7 +183,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ if (stream->direction == SND_COMPRESS_PLAYBACK)
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ else
+ stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index b0be09279943fc..ffc161906d3634 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -100,7 +100,7 @@ struct snd_pcm_ops {
#endif
#define SNDRV_PCM_IOCTL1_RESET 0
-#define SNDRV_PCM_IOCTL1_INFO 1
+/* 1 is absent slot. */
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
#define SNDRV_PCM_IOCTL1_GSTATE 3
#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index c704357775fc4e..2af7bb3ee57d00 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -247,11 +247,13 @@ static inline int snd_interval_empty(const struct snd_interval *i)
static inline int snd_interval_single(const struct snd_interval *i)
{
return (i->min == i->max ||
- (i->min + 1 == i->max && i->openmax));
+ (i->min + 1 == i->max && (i->openmin || i->openmax)));
}
static inline int snd_interval_value(const struct snd_interval *i)
{
+ if (i->openmin && !i->openmax)
+ return i->max;
return i->min;
}
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 594b4b29a2241b..7ef11b97cb2a49 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -223,6 +223,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
diff --git a/include/trace/events/intel_ish.h b/include/trace/events/intel_ish.h
new file mode 100644
index 00000000000000..92f7d5b2317755
--- /dev/null
+++ b/include/trace/events/intel_ish.h
@@ -0,0 +1,30 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM intel_ish
+
+#if !defined(_TRACE_INTEL_ISH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INTEL_ISH_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(ishtp_dump,
+
+ TP_PROTO(const char *message),
+
+ TP_ARGS(message),
+
+ TP_STRUCT__entry(
+ __string(message, message)
+ ),
+
+ TP_fast_assign(
+ __assign_str(message, message);
+ ),
+
+ TP_printk("%s", __get_str(message))
+);
+
+
+#endif /* _TRACE_INTEL_ISH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index a74dd84bbb6d04..cba3feaa2ce6be 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -57,6 +57,9 @@
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_DONTDUMP flag */
+#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */
+#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */
+
/* compatibility flags */
#define MAP_FILE 0
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 7f989949dd2412..1fb0f21ddc94e5 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -112,6 +112,9 @@
#define DRM_MODE_CONTENT_PROTECTION_UNDESIRED 0
#define DRM_MODE_CONTENT_PROTECTION_DESIRED 1
#define DRM_MODE_CONTENT_PROTECTION_ENABLED 2
+/* Link Status options */
+#define DRM_MODE_LINK_STATUS_GOOD 0
+#define DRM_MODE_LINK_STATUS_BAD 1
struct drm_mode_modeinfo {
__u32 clock;
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index cd1629170103ef..08f47e0e9f8da3 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -819,13 +819,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 064d2026ab3889..373afec2ed3478 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -136,11 +136,18 @@
* This is an Ethernet frame header.
*/
+/* allow libcs like musl to deactivate this, glibc does not implement this. */
+#ifndef __UAPI_DEF_ETHHDR
+#define __UAPI_DEF_ETHHDR 1
+#endif
+
+#if __UAPI_DEF_ETHHDR
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
__be16 h_proto; /* packet type ID field */
} __attribute__((packed));
+#endif
#endif /* _UAPI_LINUX_IF_ETHER_H */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 2a5b87d333151f..0e0e57aeb3a15b 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -673,6 +673,8 @@
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
+#define REL_WHEEL_HI_RES 0x0b
+#define REL_HWHEEL_HI_RES 0x0c
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -709,6 +711,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 41e8dff588e160..0dbb20716fd115 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -247,6 +247,7 @@ struct input_mask {
#define BUS_ATARI 0x1B
#define BUS_SPI 0x1C
#define BUS_CEC 0x1E
+#define BUS_INTEL_ISHTP 0x1F
/*
* MT_TOOL types
diff --git a/include/uapi/linux/intel-ipu3.h b/include/uapi/linux/intel-ipu3.h
index 694ef0c8d7a79d..35cd6e628b1cc9 100644
--- a/include/uapi/linux/intel-ipu3.h
+++ b/include/uapi/linux/intel-ipu3.h
@@ -1,100 +1,187 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2018 Intel Corporation */
+/* Copyright (C) 2017 - 2018 Intel Corporation */
#ifndef __IPU3_UAPI_H
#define __IPU3_UAPI_H
#include <linux/types.h>
-#define IPU3_UAPI_ISP_VEC_ELEMS 64
-
-#define IMGU_ABI_PAD __aligned(IPU3_UAPI_ISP_WORD_BYTES)
-#define IPU3_ALIGN __attribute__((aligned(IPU3_UAPI_ISP_WORD_BYTES)))
+/********************* Key Acronyms *************************/
+/*
+ * ACC - Accelerator cluster
+ * ANR - Adaptive noise reduction
+ * AWB_FR- Auto white balance filter response statistics
+ * BNR - Bayer noise reduction parameters
+ * BDS - Bayer downscaler parameters
+ * CCM - Color correction matrix coefficients
+ * CDS - Chroma down sample
+ * CHNR - Chroma noise reduction
+ * CSC - Color space conversion
+ * DM - De-mosaic
+ * IEFd - Image enhancement filter directed
+ * Obgrid - Optical black level compensation
+ * OSYS - Output system configuration
+ * ROI - Region of interest
+ * SHD - Lens shading correction table
+ * TCC - Total color correction
+ * YDS - Y down sampling
+ * YTM - Y-tone mapping
+ */
-#define IPU3_UAPI_ISP_WORD_BYTES 32
-#define IPU3_UAPI_MAX_STRIPES 2
+/*
+ * IPU3 DMA operations require buffers to be aligned at
+ * 32 byte boundaries
+ */
/******************* ipu3_uapi_stats_3a *******************/
+#define IPU3_UAPI_MAX_STRIPES 2
#define IPU3_UAPI_MAX_BUBBLE_SIZE 10
-#define IPU3_UAPI_AE_COLORS 4
-#define IPU3_UAPI_AE_BINS 256
+#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1)
+#define IPU3_UAPI_GRID_Y_START_EN (1 << 15)
-#define IPU3_UAPI_AWB_MD_ITEM_SIZE 8
-#define IPU3_UAPI_AWB_MAX_SETS 60
-#define IPU3_UAPI_AWB_SET_SIZE 0x500
-#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
- (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
- IPU3_UAPI_AWB_MD_ITEM_SIZE)
-#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
- (IPU3_UAPI_AWB_MAX_SETS * \
- (IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
-
-#define IPU3_UAPI_AF_MAX_SETS 24
-#define IPU3_UAPI_AF_MD_ITEM_SIZE 4
-#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \
- (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
- IPU3_UAPI_AF_MD_ITEM_SIZE)
-#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 0x80
-#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \
- (IPU3_UAPI_AF_MAX_SETS * \
- (IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \
- IPU3_UAPI_MAX_STRIPES)
-
-#define IPU3_UAPI_AWB_FR_MAX_SETS 24
-#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8
-#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 0x100
-#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \
- (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
- IPU3_UAPI_AWB_FR_MD_ITEM_SIZE)
-#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \
- (IPU3_UAPI_AWB_FR_MAX_SETS * \
- (IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \
- IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
+/* controls generation of meta_data (like FF enable/disable) */
+#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14)
+#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15)
+/**
+ * struct ipu3_uapi_grid_config - Grid plane config
+ *
+ * @width: Grid horizontal dimensions, in number of grid blocks(cells).
+ * @height: Grid vertical dimensions, in number of grid cells.
+ * @block_width_log2: Log2 of the width of each cell in pixels.
+ * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * @block_height_log2: Log2 of the height of each cell in pixels.
+ * for (2^3, 2^4, 2^5, 2^6, 2^7), values [3, 7].
+ * @height_per_slice: The number of blocks in vertical axis per slice.
+ * Default 2.
+ * @x_start: X value of top left corner of Region of Interest(ROI).
+ * @y_start: Y value of top left corner of ROI
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * Due to the size of total amount of collected data, most statistics
+ * create a grid-based output, and the data is then divided into "slices".
+ */
struct ipu3_uapi_grid_config {
- __u8 width; /* 6 or 7 (rgbs_grd_cfg) bits */
+ __u8 width;
__u8 height;
__u16 block_width_log2:3;
__u16 block_height_log2:3;
- __u16 height_per_slice:8; /* default value 1 */
- __u16 x_start; /* 12 bits */
+ __u16 height_per_slice:8;
+ __u16 x_start;
__u16 y_start;
-#define IPU3_UAPI_GRID_START_MASK ((1 << 12) - 1)
-#define IPU3_UAPI_GRID_Y_START_EN (1 << 15)
- __u16 x_end; /* 12 bits */
+ __u16 x_end;
__u16 y_end;
} __packed;
+/*
+ * The grid based data is divided into "slices" called set, each slice of setX
+ * refers to ipu3_uapi_grid_config width * height_per_slice.
+ */
+#define IPU3_UAPI_AWB_MAX_SETS 60
+/* Based on grid size 80 * 60 and cell size 16 x 16 */
+#define IPU3_UAPI_AWB_SET_SIZE 1280
+#define IPU3_UAPI_AWB_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AWB_MD_ITEM_SIZE)
+#define IPU3_UAPI_AWB_MAX_BUFFER_SIZE \
+ (IPU3_UAPI_AWB_MAX_SETS * \
+ (IPU3_UAPI_AWB_SET_SIZE + IPU3_UAPI_AWB_SPARE_FOR_BUBBLES))
+/**
+ * struct ipu3_uapi_awb_meta_data - AWB meta data
+ *
+ * @meta_data_buffer: Average values for each color channel
+ */
struct ipu3_uapi_awb_meta_data {
__u8 meta_data_buffer[IPU3_UAPI_AWB_MAX_BUFFER_SIZE];
} __packed;
+/**
+ * struct ipu3_uapi_awb_raw_buffer - AWB raw buffer
+ *
+ * @meta_data: buffer to hold auto white balance meta data.
+ */
struct ipu3_uapi_awb_raw_buffer {
struct ipu3_uapi_awb_meta_data meta_data;
} __packed;
-struct IPU3_ALIGN ipu3_uapi_awb_config_s {
+/**
+ * struct ipu3_uapi_awb_config_s - AWB config
+ *
+ * @rgbs_thr_gr: gr threshold value.
+ * @rgbs_thr_r: Red threshold value.
+ * @rgbs_thr_gb: gb threshold value.
+ * @rgbs_thr_b: Blue threshold value.
+ * @grid: &ipu3_uapi_grid_config, the default grid resolution is 16x16 cells.
+ *
+ * The threshold is a saturation measure range [0, 8191], 8191 is default.
+ * Values over threshold may be optionally rejected for averaging.
+ */
+struct ipu3_uapi_awb_config_s {
__u16 rgbs_thr_gr;
__u16 rgbs_thr_r;
__u16 rgbs_thr_gb;
__u16 rgbs_thr_b;
-/* controls generation of meta_data (like FF enable/disable) */
-#define IPU3_UAPI_AWB_RGBS_THR_B_EN (1 << 14)
-#define IPU3_UAPI_AWB_RGBS_THR_B_INCL_SAT (1 << 15)
-
struct ipu3_uapi_grid_config grid;
+} __attribute__((aligned(32))) __packed;
+
+/**
+ * struct ipu3_uapi_awb_config - AWB config wrapper
+ *
+ * @config: config for auto white balance as defined by &ipu3_uapi_awb_config_s
+ */
+struct ipu3_uapi_awb_config {
+ struct ipu3_uapi_awb_config_s config __attribute__((aligned(32)));
} __packed;
+#define IPU3_UAPI_AE_COLORS 4 /* R, G, B, Y */
+#define IPU3_UAPI_AE_BINS 256
+#define IPU3_UAPI_AE_WEIGHTS 96
+
+/**
+ * struct ipu3_uapi_ae_raw_buffer - AE global weighted histogram
+ *
+ * @vals: Sum of IPU3_UAPI_AE_COLORS in cell
+ *
+ * Each histogram contains IPU3_UAPI_AE_BINS bins. Each bin has 24 bit unsigned
+ * for counting the number of the pixel.
+ */
struct ipu3_uapi_ae_raw_buffer {
__u32 vals[IPU3_UAPI_AE_BINS * IPU3_UAPI_AE_COLORS];
} __packed;
+/**
+ * struct ipu3_uapi_ae_raw_buffer_aligned - AE raw buffer
+ *
+ * @buff: &ipu3_uapi_ae_raw_buffer to hold full frame meta data.
+ */
struct ipu3_uapi_ae_raw_buffer_aligned {
- struct ipu3_uapi_ae_raw_buffer buff IPU3_ALIGN;
+ struct ipu3_uapi_ae_raw_buffer buff __attribute__((aligned(32)));
} __packed;
+/**
+ * struct ipu3_uapi_ae_grid_config - AE weight grid
+ *
+ * @width: Grid horizontal dimensions. Value: [16, 32], default 16.
+ * @height: Grid vertical dimensions. Value: [16, 24], default 16.
+ * @block_width_log2: Log2 of the width of the grid cell, 2^3 = 16.
+ * @block_height_log2: Log2 of the height of the grid cell, 2^3 = 16.
+ * @__reserved0: reserved
+ * @ae_en: 0: does not write to meta-data array, 1: write normally.
+ * @rst_hist_array: write 1 to trigger histogram array reset.
+ * @done_rst_hist_array: flag for histogram array reset done.
+ * @x_start: X value of top left corner of ROI, default 0.
+ * @y_start: Y value of top left corner of ROI, default 0.
+ * @x_end: X value of bottom right corner of ROI
+ * @y_end: Y value of bottom right corner of ROI
+ *
+ * The AE block accumulates 4 global weighted histograms(R, G, B, Y) over
+ * a defined ROI within the frame. The contribution of each pixel into the
+ * histogram, defined by &ipu3_uapi_ae_weight_elem LUT, is indexed by a grid.
+ */
struct ipu3_uapi_ae_grid_config {
__u8 width;
__u8 height;
@@ -104,12 +191,145 @@ struct ipu3_uapi_ae_grid_config {
__u8 ae_en:1;
__u8 rst_hist_array:1;
__u8 done_rst_hist_array:1;
- __u16 x_start; /* 12 bits */
+ __u16 x_start;
__u16 y_start;
__u16 x_end;
__u16 y_end;
} __packed;
+/**
+ * struct ipu3_uapi_ae_weight_elem - AE weights LUT
+ *
+ * @cell0: weighted histogram grid value.
+ * @cell1: weighted histogram grid value.
+ * @cell2: weighted histogram grid value.
+ * @cell3: weighted histogram grid value.
+ * @cell4: weighted histogram grid value.
+ * @cell5: weighted histogram grid value.
+ * @cell6: weighted histogram grid value.
+ * @cell7: weighted histogram grid value.
+ *
+ * Use weighted grid value to give a different contribution factor to each cell.
+ * Precision u4, range [0, 15].
+ */
+struct ipu3_uapi_ae_weight_elem {
+ __u32 cell0:4;
+ __u32 cell1:4;
+ __u32 cell2:4;
+ __u32 cell3:4;
+ __u32 cell4:4;
+ __u32 cell5:4;
+ __u32 cell6:4;
+ __u32 cell7:4;
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_ccm - AE coefficients for WB and CCM
+ *
+ * @gain_gr: WB gain factor for the gr channels. Default 256.
+ * @gain_r: WB gain factor for the r channel. Default 256.
+ * @gain_b: WB gain factor for the b channel. Default 256.
+ * @gain_gb: WB gain factor for the gb channels. Default 256.
+ * @mat: 4x4 matrix that transforms Bayer quad output from WB to RGB+Y.
+ *
+ * Default:
+ * 128, 0, 0, 0,
+ * 0, 128, 0, 0,
+ * 0, 0, 128, 0,
+ * 0, 0, 0, 128,
+ *
+ * As part of the raw frame pre-process stage, the WB and color conversion need
+ * to be applied to expose the impact of these gain operations.
+ */
+struct ipu3_uapi_ae_ccm {
+ __u16 gain_gr;
+ __u16 gain_r;
+ __u16 gain_b;
+ __u16 gain_gb;
+ __s16 mat[16];
+} __packed;
+
+/**
+ * struct ipu3_uapi_ae_config - AE config
+ *
+ * @grid_cfg: config for auto exposure statistics grid. See struct
+ * &ipu3_uapi_ae_grid_config
+ * @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
+ * Each grid cell has a corresponding value in weights LUT called
+ * grid value, global histogram is updated based on grid value and
+ * pixel value.
+ * @ae_ccm: Color convert matrix pre-processing block.
+ *
+ * Calculate AE grid from image resolution, resample ae weights.
+ */
+struct ipu3_uapi_ae_config {
+ struct ipu3_uapi_ae_grid_config grid_cfg __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_weight_elem weights[
+ IPU3_UAPI_AE_WEIGHTS] __attribute__((aligned(32)));
+ struct ipu3_uapi_ae_ccm ae_ccm __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_filter_config - AF 2D filter for contrast measurements
+ *
+ * @y1_coeff_0: filter Y1, structure: 3x11, support both symmetry and
+ * anti-symmetry type. A12 is center, A1-A11 are neighbours.
+ * for analyzing low frequency content, used to calculate sum
+ * of gradients in x direction.
+ * @y1_coeff_0.a1: filter1 coefficients A1, u8, default 0.
+ * @y1_coeff_0.a2: filter1 coefficients A2, u8, default 0.
+ * @y1_coeff_0.a3: filter1 coefficients A3, u8, default 0.
+ * @y1_coeff_0.a4: filter1 coefficients A4, u8, default 0.
+ * @y1_coeff_1: Struct
+ * @y1_coeff_1.a5: filter1 coefficients A5, u8, default 0.
+ * @y1_coeff_1.a6: filter1 coefficients A6, u8, default 0.
+ * @y1_coeff_1.a7: filter1 coefficients A7, u8, default 0.
+ * @y1_coeff_1.a8: filter1 coefficients A8, u8, default 0.
+ * @y1_coeff_2: Struct
+ * @y1_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y1_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y1_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y1_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y1_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y2_coeff_0: Y2, same structure as Y1. For analyzing high frequency content.
+ * @y2_coeff_0.a1: filter2 coefficients A1, u8, default 0.
+ * @y2_coeff_0.a2: filter2 coefficients A2, u8, default 0.
+ * @y2_coeff_0.a3: filter2 coefficients A3, u8, default 0.
+ * @y2_coeff_0.a4: filter2 coefficients A4, u8, default 0.
+ * @y2_coeff_1: Struct
+ * @y2_coeff_1.a5: filter2 coefficients A5, u8, default 0.
+ * @y2_coeff_1.a6: filter2 coefficients A6, u8, default 0.
+ * @y2_coeff_1.a7: filter2 coefficients A7, u8, default 0.
+ * @y2_coeff_1.a8: filter2 coefficients A8, u8, default 0.
+ * @y2_coeff_2: Struct
+ * @y2_coeff_2.a9: filter1 coefficients A9, u8, default 0.
+ * @y2_coeff_2.a10: filter1 coefficients A10, u8, default 0.
+ * @y2_coeff_2.a11: filter1 coefficients A11, u8, default 0.
+ * @y2_coeff_2.a12: filter1 coefficients A12, u8, default 128.
+ * @y2_sign_vec: Each bit corresponds to one coefficient sign bit,
+ * 0: positive, 1: negative, default 0.
+ * @y_calc: Pre-processing that converts Bayer quad to RGB+Y values to be
+ * used for building histogram. Range [0, 32], default 8.
+ * Rule:
+ * y_gen_rate_gr + y_gen_rate_r + y_gen_rate_b + y_gen_rate_gb = 32
+ * A single Y is calculated based on sum of Gr/R/B/Gb based on
+ * their contribution ratio.
+ * @y_calc.y_gen_rate_gr: Contribution ratio Gr for Y
+ * @y_calc.y_gen_rate_r: Contribution ratio R for Y
+ * @y_calc.y_gen_rate_b: Contribution ratio B for Y
+ * @y_calc.y_gen_rate_gb: Contribution ratio Gb for Y
+ * @nf: The shift right value that should be applied during the Y1/Y2 filter to
+ * make sure the total memory needed is 2 bytes per grid cell.
+ * @nf.__reserved0: reserved
+ * @nf.y1_nf: Normalization factor for the convolution coeffs of y1,
+ * should be log2 of the sum of the abs values of the filter
+ * coeffs, default 7 (2^7 = 128).
+ * @nf.__reserved1: reserved
+ * @nf.y2_nf: Normalization factor for y2, should be log2 of the sum of the
+ * abs values of the filter coeffs.
+ * @nf.__reserved2: reserved
+ */
struct ipu3_uapi_af_filter_config {
struct {
__u8 a1;
@@ -154,7 +374,7 @@ struct ipu3_uapi_af_filter_config {
__u32 y2_sign_vec;
struct {
- __u8 y_gen_rate_gr; /* 6 bits */
+ __u8 y_gen_rate_gr;
__u8 y_gen_rate_r;
__u8 y_gen_rate_b;
__u8 y_gen_rate_gb;
@@ -169,47 +389,158 @@ struct ipu3_uapi_af_filter_config {
} nf;
} __packed;
+#define IPU3_UAPI_AF_MAX_SETS 24
+#define IPU3_UAPI_AF_MD_ITEM_SIZE 4
+#define IPU3_UAPI_AF_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AF_MD_ITEM_SIZE)
+#define IPU3_UAPI_AF_Y_TABLE_SET_SIZE 128
+#define IPU3_UAPI_AF_Y_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AF_MAX_SETS * \
+ (IPU3_UAPI_AF_Y_TABLE_SET_SIZE + IPU3_UAPI_AF_SPARE_FOR_BUBBLES) * \
+ IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_af_meta_data - AF meta data
+ *
+ * @y_table: Each color component will be convolved separately with filter1
+ * and filter2 and the result will be summed out and averaged for
+ * each cell.
+ */
struct ipu3_uapi_af_meta_data {
- __u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] IPU3_ALIGN;
+ __u8 y_table[IPU3_UAPI_AF_Y_TABLE_MAX_SIZE] __attribute__((aligned(32)));
} __packed;
+/**
+ * struct ipu3_uapi_af_raw_buffer - AF raw buffer
+ *
+ * @meta_data: raw buffer &ipu3_uapi_af_meta_data for auto focus meta data.
+ */
struct ipu3_uapi_af_raw_buffer {
- struct ipu3_uapi_af_meta_data meta_data IPU3_ALIGN;
+ struct ipu3_uapi_af_meta_data meta_data __attribute__((aligned(32)));
} __packed;
+/**
+ * struct ipu3_uapi_af_config_s - AF config
+ *
+ * @filter_config: AF uses Y1 and Y2 filters as configured in
+ * &ipu3_uapi_af_filter_config
+ * @padding: paddings
+ * @grid_cfg: See &ipu3_uapi_grid_config, default resolution 16x16. Use large
+ * grid size for large image and vice versa.
+ */
struct ipu3_uapi_af_config_s {
- struct ipu3_uapi_af_filter_config filter_config IPU3_ALIGN;
+ struct ipu3_uapi_af_filter_config filter_config __attribute__((aligned(32)));
__u8 padding[4];
- struct ipu3_uapi_grid_config grid_cfg IPU3_ALIGN;
+ struct ipu3_uapi_grid_config grid_cfg __attribute__((aligned(32)));
+} __packed;
+
+/**
+ * struct ipu3_uapi_af_config - AF config wrapper
+ *
+ * @config: config for auto focus as defined by &ipu3_uapi_af_config_s
+ */
+struct ipu3_uapi_af_config {
+ struct ipu3_uapi_af_config_s config;
} __packed;
+#define IPU3_UAPI_AWB_FR_MAX_SETS 24
+#define IPU3_UAPI_AWB_FR_MD_ITEM_SIZE 8
+#define IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE 256
+#define IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES \
+ (IPU3_UAPI_MAX_BUBBLE_SIZE * IPU3_UAPI_MAX_STRIPES * \
+ IPU3_UAPI_AWB_FR_MD_ITEM_SIZE)
+#define IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE \
+ (IPU3_UAPI_AWB_FR_MAX_SETS * \
+ (IPU3_UAPI_AWB_FR_BAYER_TBL_SIZE + \
+ IPU3_UAPI_AWB_FR_SPARE_FOR_BUBBLES) * IPU3_UAPI_MAX_STRIPES)
+
+/**
+ * struct ipu3_uapi_awb_fr_meta_data - AWB filter response meta data
+ *
+ * @bayer_table: Statistics output on the grid after convolving with 1D filter.
+ */
struct ipu3_uapi_awb_fr_meta_data {
- __u8 bayer_table[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE] IPU3_ALIGN;
+ __u8 bayer_table[IPU3_UAPI_AWB_FR_BAYER_TABLE_MAX_SIZE] __attribute__((aligned(32)));
} __packed;
+/**
+ * struct ipu3_uapi_awb_fr_raw_buffer - AWB filter response raw buffer
+ *
+ * @meta_data: See &ipu3_uapi_awb_fr_meta_data.
+ */
struct ipu3_uapi_awb_fr_raw_buffer {
struct ipu3_uapi_awb_fr_meta_data meta_data;
} __packed;
-struct IPU3_ALIGN ipu3_uapi_awb_fr_config_s {
+/**
+ * struct ipu3_uapi_awb_fr_config_s - AWB filter response config
+ *
+ * @grid_cfg: grid config, default 16x16.
+ * @bayer_coeff: 1D Filter 1x11 center symmetry/anti-symmetry.
+ * coeffcients defaults { 0, 0, 0, 0, 0, 128 }.
+ * Applied on whole image for each Bayer channel separately
+ * by a weighted sum of its 11x1 neighbors.
+ * @__reserved1: reserved
+ * @bayer_sign: sign of filter coeffcients, default 0.
+ * @bayer_nf: normalization factor for the convolution coeffs, to make sure
+ * total memory needed is within pre-determined range.
+ * NF should be the log2 of the sum of the abs values of the
+ * filter coeffs, range [7, 14], default 7.
+ * @__reserved2: reserved
+ */
+struct ipu3_uapi_awb_fr_config_s {
struct ipu3_uapi_grid_config grid_cfg;
__u8 bayer_coeff[6];
__u16 __reserved1;
- __u32 bayer_sign; /* 11 bits */
- __u8 bayer_nf; /* 4 bits */
+ __u32 bayer_sign;
+ __u8 bayer_nf;
__u8 __reserved2[3];
+} __attribute__((aligned(32))) __packed;
+
+/**
+ * struct ipu3_uapi_awb_fr_config - AWB filter response config wrapper
+ *
+ * @config: See &ipu3_uapi_awb_fr_config_s.
+ */
+struct ipu3_uapi_awb_fr_config {
+ struct ipu3_uapi_awb_fr_config_s config;
} __packed;
+/**
+ * struct ipu3_uapi_4a_config - 4A config
+ *
+ * @awb_config: &ipu3_uapi_awb_config_s, default resolution 16x16
+ * @ae_grd_config: auto exposure statistics &ipu3_uapi_ae_grid_config
+ * @padding: paddings
+ * @af_config: auto focus config &ipu3_uapi_af_config_s
+ * @awb_fr_config: &ipu3_uapi_awb_fr_config_s, default resolution 16x16
+ */
struct ipu3_uapi_4a_config {
- struct ipu3_uapi_awb_config_s awb_config IPU3_ALIGN;
+ struct ipu3_uapi_awb_config_s awb_config __attribute__((aligned(32)));
struct ipu3_uapi_ae_grid_config ae_grd_config;
__u8 padding[20];
struct ipu3_uapi_af_config_s af_config;
struct ipu3_uapi_awb_fr_config_s awb_fr_config;
} __packed;
+/**
+ * struct ipu3_uapi_bubble_info - Bubble info for host side debugging
+ *
+ * @num_of_stripes: A single frame is divided into several parts called stripes
+ * due to limitation on line buffer memory.
+ * The separation between the stripes is vertical. Each such
+ * stripe is processed as a single frame by the ISP pipe.
+ * @padding: padding bytes.
+ * @num_sets: number of sets.
+ * @padding1: padding bytes.
+ * @size_of_set: set size.
+ * @padding2: padding bytes.
+ * @bubble_size: is the amount of padding in the bubble expressed in “sets”.
+ * @padding3: padding bytes.
+ */
struct ipu3_uapi_bubble_info {
- __u32 num_of_stripes IPU3_ALIGN;
+ __u32 num_of_stripes __attribute__((aligned(32)));
__u8 padding[28];
__u32 num_sets;
__u8 padding1[28];
@@ -219,14 +550,29 @@ struct ipu3_uapi_bubble_info {
__u8 padding3[28];
} __packed;
+/*
+ * struct ipu3_uapi_stats_3a_bubble_info_per_stripe
+ */
struct ipu3_uapi_stats_3a_bubble_info_per_stripe {
struct ipu3_uapi_bubble_info awb[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_bubble_info af[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_bubble_info awb_fr[IPU3_UAPI_MAX_STRIPES];
} __packed;
+/**
+ * struct ipu3_uapi_ff_status - Enable bits for each 3A fixed function
+ *
+ * @awb_en: auto white balance enable
+ * @padding: padding config
+ * @ae_en: auto exposure enable
+ * @padding1: padding config
+ * @af_en: auto focus enable
+ * @padding2: padding config
+ * @awb_fr_en: awb filter response enable bit
+ * @padding3: padding config
+ */
struct ipu3_uapi_ff_status {
- __u32 awb_en IPU3_ALIGN;
+ __u32 awb_en __attribute__((aligned(32)));
__u8 padding[28];
__u32 ae_en;
__u8 padding1[28];
@@ -236,8 +582,21 @@ struct ipu3_uapi_ff_status {
__u8 padding3[28];
} __packed;
+/**
+ * struct ipu3_uapi_stats_3a - 3A statistics
+ *
+ * @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
+ * @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
+ * @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
+ * @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
+ * @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
+ * @ae_join_buffers: 1 to use ae_raw_buffer.
+ * @padding: padding config
+ * @stats_3a_bubble_per_stripe: a &ipu3_uapi_stats_3a_bubble_info_per_stripe
+ * @stats_3a_status: 3a statistics status set in &ipu3_uapi_ff_status
+ */
struct ipu3_uapi_stats_3a {
- struct ipu3_uapi_awb_raw_buffer awb_raw_buffer IPU3_ALIGN;
+ struct ipu3_uapi_awb_raw_buffer awb_raw_buffer __attribute__((aligned(32)));
struct ipu3_uapi_ae_raw_buffer_aligned
ae_raw_buffer[IPU3_UAPI_MAX_STRIPES];
struct ipu3_uapi_af_raw_buffer af_raw_buffer;
@@ -252,30 +611,44 @@ struct ipu3_uapi_stats_3a {
/******************* ipu3_uapi_acc_param *******************/
+#define IPU3_UAPI_ISP_VEC_ELEMS 64
+#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9
+
#define IPU3_UAPI_BNR_LUT_SIZE 32
/* number of elements in gamma correction LUT */
#define IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES 256
+/* largest grid is 73x56, for grid_height_per_slice of 2, 73x2 = 146 */
#define IPU3_UAPI_SHD_MAX_CELLS_PER_SET 146
-/* largest grid is 73x56 */
#define IPU3_UAPI_SHD_MAX_CFG_SETS 28
+/* Normalization shift aka nf */
+#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13
+#define IPU3_UAPI_SHD_BLGR_NF_MASK 7
-#define IPU3_UAPI_YUVP2_YTM_LUT_ENTRIES 256
#define IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS 16
#define IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS 14
#define IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS 258
#define IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS 24
-#define IPU3_UAPI_BDS_SAMPLE_PATTERN_ARRAY_SIZE 8
-#define IPU3_UAPI_BDS_PHASE_COEFFS_ARRAY_SIZE 32
-
#define IPU3_UAPI_ANR_LUT_SIZE 26
#define IPU3_UAPI_ANR_PYRAMID_SIZE 22
-#define IPU3_UAPI_AE_WEIGHTS 96
+#define IPU3_UAPI_LIN_LUT_SIZE 64
/* Bayer Noise Reduction related structs */
+
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_config - White balance gains
+ *
+ * @gr: white balance gain for Gr channel.
+ * @r: white balance gain for R channel.
+ * @b: white balance gain for B channel.
+ * @gb: white balance gain for Gb channel.
+ *
+ * Precision u3.13, range [0, 8]. White balance correction is done by applying
+ * a multiplicative gain to each color channels prior to BNR.
+ */
struct ipu3_uapi_bnr_static_config_wb_gains_config {
__u16 gr;
__u16 r;
@@ -283,6 +656,18 @@ struct ipu3_uapi_bnr_static_config_wb_gains_config {
__u16 gb;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_wb_gains_thr_config - Threshold config
+ *
+ * @gr: white balance threshold gain for Gr channel.
+ * @r: white balance threshold gain for R channel.
+ * @b: white balance threshold gain for B channel.
+ * @gb: white balance threshold gain for Gb channel.
+ *
+ * Defines the threshold that specifies how different a defect pixel can be from
+ * its neighbors.(used by dynamic defect pixel correction sub block)
+ * Precision u4.4 range [0, 8].
+ */
struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
__u8 gr;
__u8 r;
@@ -290,6 +675,26 @@ struct ipu3_uapi_bnr_static_config_wb_gains_thr_config {
__u8 gb;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_coeffs_config - Noise model
+ * coefficients that controls noise threshold
+ *
+ * @cf: Free coefficient for threshold calculation, range [0, 8191], default 0.
+ * @__reserved0: reserved
+ * @cg: Gain coefficient for threshold calculation, [0, 31], default 8.
+ * @ci: Intensity coefficient for threshold calculation. range [0, 0x1f]
+ * default 6.
+ * format: u3.2 (3 most significant bits represent whole number,
+ * 2 least significant bits represent the fractional part
+ * with each count representing 0.25)
+ * e.g 6 in binary format is 00110, that translates to 1.5
+ * @__reserved1: reserved
+ * @r_nf: Normalization shift value for r^2 calculation, range [12, 20]
+ * where r is a radius of pixel [row, col] from centor of sensor.
+ * default 14.
+ *
+ * Threshold used to distinguish between noise and details.
+ */
struct ipu3_uapi_bnr_static_config_thr_coeffs_config {
__u32 cf:13;
__u32 __reserved0:3;
@@ -299,6 +704,17 @@ struct ipu3_uapi_bnr_static_config_thr_coeffs_config {
__u32 r_nf:5;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config - Shading config
+ *
+ * @gr: Coefficient defines lens shading gain approximation for gr channel
+ * @r: Coefficient defines lens shading gain approximation for r channel
+ * @b: Coefficient defines lens shading gain approximation for b channel
+ * @gb: Coefficient defines lens shading gain approximation for gb channel
+ *
+ * Parameters for noise model (NM) adaptation of BNR due to shading correction.
+ * All above have precision of u3.3, default to 0.
+ */
struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config {
__u8 gr;
__u8 r;
@@ -306,6 +722,17 @@ struct ipu3_uapi_bnr_static_config_thr_ctrl_shd_config {
__u8 gb;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_config - Optical center config
+ *
+ * @x_reset: Reset value of X (col start - X center). Precision s12.0.
+ * @__reserved0: reserved
+ * @y_reset: Reset value of Y (row start - Y center). Precision s12.0.
+ * @__reserved2: reserved
+ *
+ * Distance from corner to optical center for NM adaptation due to shading
+ * correction (should be calculated based on shading tables)
+ */
struct ipu3_uapi_bnr_static_config_opt_center_config {
__s32 x_reset:13;
__u32 __reserved0:3;
@@ -313,10 +740,39 @@ struct ipu3_uapi_bnr_static_config_opt_center_config {
__u32 __reserved2:3;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_lut_config - BNR square root lookup table
+ *
+ * @values: pre-calculated values of square root function.
+ *
+ * LUT implementation of square root operation.
+ */
struct ipu3_uapi_bnr_static_config_lut_config {
__u8 values[IPU3_UAPI_BNR_LUT_SIZE];
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_bp_ctrl_config - Detect bad pixels (bp)
+ *
+ * @bp_thr_gain: Defines the threshold that specifies how different a
+ * defect pixel can be from its neighbors. Threshold is
+ * dependent on de-noise threshold calculated by algorithm.
+ * Range [4, 31], default 4.
+ * @__reserved0: reserved
+ * @defect_mode: Mode of addressed defect pixels,
+ * 0 – single defect pixel is expected,
+ * 1 - 2 adjacent defect pixels are expected, default 1.
+ * @bp_gain: Defines how 2nd derivation that passes through a defect pixel
+ * is different from 2nd derivations that pass through
+ * neighbor pixels. u4.2, range [0, 256], default 8.
+ * @__reserved1: reserved
+ * @w0_coeff: Blending coefficient of defect pixel correction.
+ * Precision u4, range [0, 8], default 8.
+ * @__reserved2: reserved
+ * @w1_coeff: Enable influence of incorrect defect pixel correction to be
+ * avoided. Precision u4, range [1, 8], default 8.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_bnr_static_config_bp_ctrl_config {
__u32 bp_thr_gain:5;
__u32 __reserved0:2;
@@ -329,6 +785,30 @@ struct ipu3_uapi_bnr_static_config_bp_ctrl_config {
__u32 __reserved3:20;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config - Denoising config
+ *
+ * @alpha: Weight of central element of smoothing filter.
+ * @beta: Weight of peripheral elements of smoothing filter, default 4.
+ * @gamma: Weight of diagonal elements of smoothing filter, default 4.
+ *
+ * beta and gamma parameter define the strength of the noise removal filter.
+ * All above has precision u0.4, range [0, 0xf]
+ * format: u0.4 (no / zero bits represent whole number,
+ * 4 bits represent the fractional part
+ * with each count representing 0.0625)
+ * e.g 0xf translates to 0.0625x15 = 0.9375
+ *
+ * @__reserved0: reserved
+ * @max_inf: Maximum increase of peripheral or diagonal element influence
+ * relative to the pre-defined value range: [0x5, 0xa]
+ * @__reserved1: reserved
+ * @gd_enable: Green disparity enable control, 0 - disable, 1 - enable.
+ * @bpc_enable: Bad pixel correction enable control, 0 - disable, 1 - enable.
+ * @bnr_enable: Bayer noise removal enable control, 0 - disable, 1 - enable.
+ * @ff_enable: Fixed function enable, 0 - disable, 1 - enable.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
__u32 alpha:4;
__u32 beta:4;
@@ -336,7 +816,6 @@ struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
__u32 __reserved0:4;
__u32 max_inf:4;
__u32 __reserved1:7;
- /* aka 'green disparity enable' */
__u32 gd_enable:1;
__u32 bpc_enable:1;
__u32 bnr_enable:1;
@@ -344,11 +823,47 @@ struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config {
__u32 __reserved2:1;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_opt_center_sqr_config - BNR optical square
+ *
+ * @x_sqr_reset: Reset value of X^2.
+ * @y_sqr_reset: Reset value of Y^2.
+ *
+ * Please note:
+ *
+ * #. X and Y ref to
+ * &ipu3_uapi_bnr_static_config_opt_center_config
+ * #. Both structs are used in threshold formula to calculate r^2, where r
+ * is a radius of pixel [row, col] from centor of sensor.
+ */
struct ipu3_uapi_bnr_static_config_opt_center_sqr_config {
__u32 x_sqr_reset;
__u32 y_sqr_reset;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config - BNR static config
+ *
+ * @wb_gains: white balance gains &ipu3_uapi_bnr_static_config_wb_gains_config
+ * @wb_gains_thr: white balance gains threshold as defined by
+ * &ipu3_uapi_bnr_static_config_wb_gains_thr_config
+ * @thr_coeffs: coefficients of threshold
+ * &ipu3_uapi_bnr_static_config_thr_coeffs_config
+ * @thr_ctrl_shd: control of shading threshold
+ * &ipu3_uapi_bnr_static_config_thr_ctrl_shd_config
+ * @opt_center: optical center &ipu3_uapi_bnr_static_config_opt_center_config
+ *
+ * Above parameters and opt_center_sqr are used for white balance and shading.
+ *
+ * @lut: lookup table &ipu3_uapi_bnr_static_config_lut_config
+ * @bp_ctrl: detect and remove bad pixels as defined in struct
+ * &ipu3_uapi_bnr_static_config_bp_ctrl_config
+ * @dn_detect_ctrl: detect and remove noise.
+ * &ipu3_uapi_bnr_static_config_dn_detect_ctrl_config
+ * @column_size: The number of pixels in column.
+ * @opt_center_sqr: Reset value of r^2 to optical center, see
+ * &ipu3_uapi_bnr_static_config_opt_center_sqr_config.
+ */
struct ipu3_uapi_bnr_static_config {
struct ipu3_uapi_bnr_static_config_wb_gains_config wb_gains;
struct ipu3_uapi_bnr_static_config_wb_gains_thr_config wb_gains_thr;
@@ -358,10 +873,39 @@ struct ipu3_uapi_bnr_static_config {
struct ipu3_uapi_bnr_static_config_lut_config lut;
struct ipu3_uapi_bnr_static_config_bp_ctrl_config bp_ctrl;
struct ipu3_uapi_bnr_static_config_dn_detect_ctrl_config dn_detect_ctrl;
- __u32 column_size; /* 0x44 */
+ __u32 column_size;
struct ipu3_uapi_bnr_static_config_opt_center_sqr_config opt_center_sqr;
} __packed;
+/**
+ * struct ipu3_uapi_bnr_static_config_green_disparity - Correct green disparity
+ *
+ * @gd_red: Shading gain coeff for gr disparity level in bright red region.
+ * Precision u0.6, default 4(0.0625).
+ * @__reserved0: reserved
+ * @gd_green: Shading gain coeff for gr disparity level in bright green
+ * region. Precision u0.6, default 4(0.0625).
+ * @__reserved1: reserved
+ * @gd_blue: Shading gain coeff for gr disparity level in bright blue region.
+ * Precision u0.6, default 4(0.0625).
+ * @__reserved2: reserved
+ * @gd_black: Maximal green disparity level in dark region (stronger disparity
+ * assumed to be image detail). Precision u14, default 80.
+ * @__reserved3: reserved
+ * @gd_shading: Change maximal green disparity level according to square
+ * distance from image center.
+ * @__reserved4: reserved
+ * @gd_support: Lower bound for the number of second green color pixels in
+ * current pixel neighborhood with less than threshold difference
+ * from it.
+ *
+ * The shading gain coeff of red, green, blue and black are used to calculate
+ * threshold given a pixel's color value and its coordinates in the image.
+ *
+ * @__reserved5: reserved
+ * @gd_clip: Turn green disparity clip on/off, [0, 1], default 1.
+ * @gd_central_weight: Central pixel weight in 9 pixels weighted sum.
+ */
struct ipu3_uapi_bnr_static_config_green_disparity {
__u32 gd_red:6;
__u32 __reserved0:2;
@@ -375,19 +919,47 @@ struct ipu3_uapi_bnr_static_config_green_disparity {
__u32 __reserved4:1;
__u32 gd_support:2;
__u32 __reserved5:1;
- __u32 gd_clip:1; /* central weights variables */
+ __u32 gd_clip:1;
__u32 gd_central_weight:4;
} __packed;
+/**
+ * struct ipu3_uapi_dm_config - De-mosaic parameters
+ *
+ * @dm_en: de-mosaic enable.
+ * @ch_ar_en: Checker artifacts removal enable flag. Default 0.
+ * @fcc_en: False color correction (FCC) enable flag. Default 0.
+ * @__reserved0: reserved
+ * @frame_width: do not care
+ * @gamma_sc: Sharpening coefficient (coefficient of 2-d derivation of
+ * complementary color in Hamilton-Adams interpolation).
+ * u5, range [0, 31], default 8.
+ * @__reserved1: reserved
+ * @lc_ctrl: Parameter that controls weights of Chroma Homogeneity metric
+ * in calculation of final homogeneity metric.
+ * u5, range [0, 31], default 7.
+ * @__reserved2: reserved
+ * @cr_param1: First parameter that defines Checker artifact removal
+ * feature gain.Precision u5, range [0, 31], default 8.
+ * @__reserved3: reserved
+ * @cr_param2: Second parameter that defines Checker artifact removal
+ * feature gain. Precision u5, range [0, 31], default 8.
+ * @__reserved4: reserved
+ * @coring_param: Defines power of false color correction operation.
+ * low for preserving edge colors, high for preserving gray
+ * edge artifacts. u1.4, range [0, 1.9375], default 4(0.25).
+ * @__reserved5: reserved
+ *
+ * The demosaic fixed function block is responsible to covert Bayer(mosaiced)
+ * images into color images based on demosaicing algorithm.
+ */
struct ipu3_uapi_dm_config {
- /* DWORD0 */
__u32 dm_en:1;
__u32 ch_ar_en:1;
__u32 fcc_en:1;
__u32 __reserved0:13;
__u32 frame_width:16;
- /* DWORD1 */
__u32 gamma_sc:5;
__u32 __reserved1:3;
__u32 lc_ctrl:5;
@@ -397,12 +969,34 @@ struct ipu3_uapi_dm_config {
__u32 cr_param2:5;
__u32 __reserved4:3;
- /* DWORD2 */
__u32 coring_param:5;
__u32 __reserved5:27;
} __packed;
-/* Color Conversion Matrix */
+/**
+ * struct ipu3_uapi_ccm_mat_config - Color correction matrix
+ *
+ * @coeff_m11: CCM 3x3 coefficient, range [-65536, 65535]
+ * @coeff_m12: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m13: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_r: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m21: CCM 3x3 coefficient, range [-32767, 32767]
+ * @coeff_m22: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m23: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_g: Bias 3x1 coefficient, range [-8191, 8181]
+ * @coeff_m31: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_m32: CCM 3x3 coefficient, range [-8192, 8191]
+ * @coeff_m33: CCM 3x3 coefficient, range [-32768, 32767]
+ * @coeff_o_b: Bias 3x1 coefficient, range [-8191, 8181]
+ *
+ * Transform sensor specific color space to standard sRGB by applying 3x3 matrix
+ * and adding a bias vector O. The transformation is basically a rotation and
+ * translation in the 3-dimensional color spaces. Here are the defaults:
+ *
+ * 9775, -2671, 1087, 0
+ * -1071, 8303, 815, 0
+ * -23, -7887, 16103, 0
+ */
struct ipu3_uapi_ccm_mat_config {
__s16 coeff_m11;
__s16 coeff_m12;
@@ -418,22 +1012,62 @@ struct ipu3_uapi_ccm_mat_config {
__s16 coeff_o_b;
} __packed;
-/* Gamma correction */
+/**
+ * struct ipu3_uapi_gamma_corr_ctrl - Gamma correction
+ *
+ * @enable: gamma correction enable.
+ * @__reserved: reserved
+ */
struct ipu3_uapi_gamma_corr_ctrl {
__u32 enable:1;
__u32 __reserved:31;
} __packed;
+/**
+ * struct ipu3_uapi_gamma_corr_lut - Per-pixel tone mapping implemented as LUT.
+ *
+ * @lut: 256 tabulated values of the gamma function. LUT[1].. LUT[256]
+ * format u13.0, range [0, 8191].
+ *
+ * The tone mapping operation is done by a Piece wise linear graph
+ * that is implemented as a lookup table(LUT). The pixel component input
+ * intensity is the X-axis of the graph which is the table entry.
+ */
struct ipu3_uapi_gamma_corr_lut {
__u16 lut[IPU3_UAPI_GAMMA_CORR_LUT_ENTRIES];
} __packed;
+/**
+ * struct ipu3_uapi_gamma_config - Gamma config
+ *
+ * @gc_ctrl: control of gamma correction &ipu3_uapi_gamma_corr_ctrl
+ * @gc_lut: lookup table of gamma correction &ipu3_uapi_gamma_corr_lut
+ */
struct ipu3_uapi_gamma_config {
- struct ipu3_uapi_gamma_corr_ctrl gc_ctrl IPU3_ALIGN;
- struct ipu3_uapi_gamma_corr_lut gc_lut IPU3_ALIGN;
+ struct ipu3_uapi_gamma_corr_ctrl gc_ctrl __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_corr_lut gc_lut __attribute__((aligned(32)));
} __packed;
-/* Color Space Conversion */
+/**
+ * struct ipu3_uapi_csc_mat_config - Color space conversion matrix config
+ *
+ * @coeff_c11: Conversion matrix value, format s0.14, range [-1, 1], default 1.
+ * @coeff_c12: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_c13: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_b1: Bias 3x1 coefficient, s13,0 range [-8191, 8181], default 0.
+ * @coeff_c21: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_c22: Conversion matrix value, format s0.14, range [-1, 1], default 1.
+ * @coeff_c23: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_b2: Bias 3x1 coefficient, s13,0 range [-8191, 8181], default 0.
+ * @coeff_c31: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_c32: Conversion matrix value, format s0.14, range [-1, 1], default 0.
+ * @coeff_c33: Conversion matrix value, format s0.14, range [-1, 1], default 1.
+ * @coeff_b3: Bias 3x1 coefficient, s13,0 range [-8191, 8181], default 0.
+ *
+ * To transform each pixel from RGB to YUV (Y - brightness/luminance,
+ * UV -chroma) by applying the pixel's values by a 3x3 matrix and adding an
+ * optional bias 3x1 vector.
+ */
struct ipu3_uapi_csc_mat_config {
__s16 coeff_c11;
__s16 coeff_c12;
@@ -449,7 +1083,29 @@ struct ipu3_uapi_csc_mat_config {
__s16 coeff_b3;
} __packed;
-/* Chroma Down Sample */
+/**
+ * struct ipu3_uapi_cds_params - Chroma down-scaling
+ *
+ * @ds_c00: range [0, 3]
+ * @ds_c01: range [0, 3]
+ * @ds_c02: range [0, 3]
+ * @ds_c03: range [0, 3]
+ * @ds_c10: range [0, 3]
+ * @ds_c11: range [0, 3]
+ * @ds_c12: range [0, 3]
+ * @ds_c13: range [0, 3]
+ *
+ * In case user does not provide, above 4x2 filter will use following defaults:
+ * 1, 3, 3, 1,
+ * 1, 3, 3, 1,
+ *
+ * @ds_nf: Normalization factor for Chroma output downscaling filter,
+ * range 0,4, default 2.
+ * @__reserved0: reserved
+ * @csc_en: Color space conversion enable
+ * @uv_bin_output: 0: output YUV 4.2.0, 1: output YUV 4.2.2(default).
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_cds_params {
__u32 ds_c00:2;
__u32 ds_c01:2;
@@ -466,8 +1122,24 @@ struct ipu3_uapi_cds_params {
__u32 __reserved1:6;
} __packed;
-/* Bayer shading correction */
-
+/**
+ * struct ipu3_uapi_shd_grid_config - Bayer shading(darkening) correction
+ *
+ * @width: Grid horizontal dimensions, u8, [8, 128], default 73
+ * @height: Grid vertical dimensions, u8, [8, 128], default 56
+ * @block_width_log2: Log2 of the width of the grid cell in pixel count
+ * u4, [0, 15], default value 5.
+ * @__reserved0: reserved
+ * @block_height_log2: Log2 of the height of the grid cell in pixel count
+ * u4, [0, 15], default value 6.
+ * @__reserved1: reserved
+ * @grid_height_per_slice: SHD_MAX_CELLS_PER_SET/width.
+ * (with SHD_MAX_CELLS_PER_SET = 146).
+ * @x_start: X value of top left corner of sensor relative to ROI
+ * u12, [-4096, 0]. default 0, only negative values.
+ * @y_start: Y value of top left corner of sensor relative to ROI
+ * u12, [-4096, 0]. default 0, only negative values.
+ */
struct ipu3_uapi_shd_grid_config {
/* reg 0 */
__u8 width;
@@ -478,34 +1150,76 @@ struct ipu3_uapi_shd_grid_config {
__u8 __reserved1:1;
__u8 grid_height_per_slice;
/* reg 1 */
- __s16 x_start; /* 13 bits */
+ __s16 x_start;
__s16 y_start;
} __packed;
+/**
+ * struct ipu3_uapi_shd_general_config - Shading general config
+ *
+ * @init_set_vrt_offst_ul: set vertical offset,
+ * y_start >> block_height_log2 % grid_height_per_slice.
+ * @shd_enable: shading enable.
+ * @gain_factor: Gain factor. Shift calculated anti shading value. Precision u2.
+ * 0x0 - gain factor [1, 5], means no shift interpolated value.
+ * 0x1 - gain factor [1, 9], means shift interpolated by 1.
+ * 0x2 - gain factor [1, 17], means shift interpolated by 2.
+ * @__reserved: reserved
+ *
+ * Correction is performed by multiplying a gain factor for each of the 4 Bayer
+ * channels as a function of the pixel location in the sensor.
+ */
struct ipu3_uapi_shd_general_config {
__u32 init_set_vrt_offst_ul:8;
__u32 shd_enable:1;
- /* aka 'gf' */
__u32 gain_factor:2;
__u32 __reserved:21;
} __packed;
+/**
+ * struct ipu3_uapi_shd_black_level_config - Black level correction
+ *
+ * @bl_r: Bios values for green red. s11 range [-2048, 2047].
+ * @bl_gr: Bios values for green blue. s11 range [-2048, 2047].
+ * @bl_gb: Bios values for red. s11 range [-2048, 2047].
+ * @bl_b: Bios values for blue. s11 range [-2048, 2047].
+ */
struct ipu3_uapi_shd_black_level_config {
- __s16 bl_r; /* 12 bits */
+ __s16 bl_r;
__s16 bl_gr;
-#define IPU3_UAPI_SHD_BLGR_NF_SHIFT 13 /* Normalization shift aka nf */
-#define IPU3_UAPI_SHD_BLGR_NF_MASK 0x7
- __s16 bl_gb; /* 12 bits */
+ __s16 bl_gb;
__s16 bl_b;
} __packed;
+/**
+ * struct ipu3_uapi_shd_config_static - Shading config static
+ *
+ * @grid: shading grid config &ipu3_uapi_shd_grid_config
+ * @general: shading general config &ipu3_uapi_shd_general_config
+ * @black_level: black level config for shading correction as defined by
+ * &ipu3_uapi_shd_black_level_config
+ */
struct ipu3_uapi_shd_config_static {
- /* B0: Fixed order: one transfer to GAC */
struct ipu3_uapi_shd_grid_config grid;
struct ipu3_uapi_shd_general_config general;
struct ipu3_uapi_shd_black_level_config black_level;
} __packed;
+/**
+ * struct ipu3_uapi_shd_lut - Shading gain factor lookup table.
+ *
+ * @sets: array
+ * @sets.r_and_gr: Red and GreenR Lookup table.
+ * @sets.r_and_gr.r: Red shading factor.
+ * @sets.r_and_gr.gr: GreenR shading factor.
+ * @sets.__reserved1: reserved
+ * @sets.gb_and_b: GreenB and Blue Lookup table.
+ * @sets.gb_and_b.gb: GreenB shading factor.
+ * @sets.gb_and_b.b: Blue shading factor.
+ * @sets.__reserved2: reserved
+ *
+ * Map to shading correction LUT register set.
+ */
struct ipu3_uapi_shd_lut {
struct {
struct {
@@ -521,20 +1235,68 @@ struct ipu3_uapi_shd_lut {
} sets[IPU3_UAPI_SHD_MAX_CFG_SETS];
} __packed;
+/**
+ * struct ipu3_uapi_shd_config - Shading config
+ *
+ * @shd: shading static config, see &ipu3_uapi_shd_config_static
+ * @shd_lut: shading lookup table &ipu3_uapi_shd_lut
+ */
struct ipu3_uapi_shd_config {
- struct ipu3_uapi_shd_config_static shd IPU3_ALIGN;
- struct ipu3_uapi_shd_lut shd_lut IPU3_ALIGN;
+ struct ipu3_uapi_shd_config_static shd __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_lut shd_lut __attribute__((aligned(32)));
} __packed;
-/* Image Enhancement Filter and Denoise */
+/* Image Enhancement Filter directed */
+/**
+ * struct ipu3_uapi_iefd_cux2 - IEFd Config Unit 2 parameters
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @b01: Always 0.
+ *
+ * Calculate weight for blending directed and non-directed denoise elements
+ *
+ * Note:
+ * Each instance of Config Unit needs X coordinate of n points and
+ * slope A factor between points calculated by driver based on calibration
+ * parameters.
+ */
struct ipu3_uapi_iefd_cux2 {
__u32 x0:9;
__u32 x1:9;
__u32 a01:9;
- __u32 b01:5; /* NOTE: hardcoded to zero */
+ __u32 b01:5; /* NOTE: hardcoded to zero */
} __packed;
+/**
+ * struct ipu3_uapi_iefd_cux6_ed - Calculate power of non-directed sharpening
+ * element, Config Unit 6 for edge detail (ED).
+ *
+ * @x0: X coordinate of point 0, u9.0, default 0.
+ * @x1: X coordinate of point 1, u9.0, default 0.
+ * @x2: X coordinate of point 2, u9.0, default 0.
+ * @__reserved0: reserved
+ * @x3: X coordinate of point 3, u9.0, default 0.
+ * @x4: X coordinate of point 4, u9.0, default 0.
+ * @x5: X coordinate of point 5, u9.0, default 0.
+ * @__reserved1: reserved
+ * @a01: slope A points 01, s4.4, default 0.
+ * @a12: slope A points 12, s4.4, default 0.
+ * @a23: slope A points 23, s4.4, default 0.
+ * @__reserved2: reserved
+ * @a34: slope A points 34, s4.4, default 0.
+ * @a45: slope A points 45, s4.4, default 0.
+ * @__reserved3: reserved
+ * @b01: slope B points 01, s4.4, default 0.
+ * @b12: slope B points 12, s4.4, default 0.
+ * @b23: slope B points 23, s4.4, default 0.
+ * @__reserved4: reserved
+ * @b34: slope B points 34, s4.4, default 0.
+ * @b45: slope B points 45, s4.4, default 0.
+ * @__reserved5: reserved
+ */
struct ipu3_uapi_iefd_cux6_ed {
__u32 x0:9;
__u32 x1:9;
@@ -565,6 +1327,16 @@ struct ipu3_uapi_iefd_cux6_ed {
__u32 __reserved5:14;
} __packed;
+/**
+ * struct ipu3_uapi_iefd_cux2_1 - Calculate power of non-directed denoise
+ * element apply.
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A of Config Unit, s4.4, default 0.
+ * @__reserved1: reserved
+ * @b01: offset B0 of Config Unit, u7.0, default 0.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_iefd_cux2_1 {
__u32 x0:9;
__u32 x1:9;
@@ -575,6 +1347,25 @@ struct ipu3_uapi_iefd_cux2_1 {
__u32 __reserved2:24;
} __packed;
+/**
+ * struct ipu3_uapi_iefd_cux4 - Calculate power of non-directed sharpening
+ * element.
+ *
+ * @x0: X0 point of Config Unit, u9.0, default 0.
+ * @x1: X1 point of Config Unit, u9.0, default 0.
+ * @x2: X2 point of Config Unit, u9.0, default 0.
+ * @__reserved0: reserved
+ * @x3: X3 point of Config Unit, u9.0, default 0.
+ * @a01: Slope A0 of Config Unit, s4.4, default 0.
+ * @a12: Slope A1 of Config Unit, s4.4, default 0.
+ * @__reserved1: reserved
+ * @a23: Slope A2 of Config Unit, s4.4, default 0.
+ * @b01: Offset B0 of Config Unit, s7.0, default 0.
+ * @b12: Offset B1 of Config Unit, s7.0, default 0.
+ * @__reserved2: reserved
+ * @b23: Offset B2 of Config Unit, s7.0, default 0.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_iefd_cux4 {
__u32 x0:9;
__u32 x1:9;
@@ -595,6 +1386,30 @@ struct ipu3_uapi_iefd_cux4 {
__u32 __reserved3:24;
} __packed;
+/**
+ * struct ipu3_uapi_iefd_cux6_rad - Radial Config Unit (CU)
+ *
+ * @x0: x0 points of Config Unit radial, u8.0
+ * @x1: x1 points of Config Unit radial, u8.0
+ * @x2: x2 points of Config Unit radial, u8.0
+ * @x3: x3 points of Config Unit radial, u8.0
+ * @x4: x4 points of Config Unit radial, u8.0
+ * @x5: x5 points of Config Unit radial, u8.0
+ * @__reserved1: reserved
+ * @a01: Slope A of Config Unit radial, s7.8
+ * @a12: Slope A of Config Unit radial, s7.8
+ * @a23: Slope A of Config Unit radial, s7.8
+ * @a34: Slope A of Config Unit radial, s7.8
+ * @a45: Slope A of Config Unit radial, s7.8
+ * @__reserved2: reserved
+ * @b01: Slope B of Config Unit radial, s9.0
+ * @b12: Slope B of Config Unit radial, s9.0
+ * @b23: Slope B of Config Unit radial, s9.0
+ * @__reserved4: reserved
+ * @b34: Slope B of Config Unit radial, s9.0
+ * @b45: Slope B of Config Unit radial, s9.0
+ * @__reserved5: reserved
+ */
struct ipu3_uapi_iefd_cux6_rad {
__u32 x0:8;
__u32 x1:8;
@@ -624,8 +1439,25 @@ struct ipu3_uapi_iefd_cux6_rad {
__u32 __reserved5:12;
} __packed;
-/* YUV processing */
-
+/**
+ * struct ipu3_uapi_yuvp1_iefd_cfg_units - IEFd Config Units parameters
+ *
+ * @cu_1: calculate weight for blending directed and
+ * non-directed denoise elements. See &ipu3_uapi_iefd_cux2
+ * @cu_ed: calculate power of non-directed sharpening element, see
+ * &ipu3_uapi_iefd_cux6_ed
+ * @cu_3: calculate weight for blending directed and
+ * non-directed denoise elements. A &ipu3_uapi_iefd_cux2
+ * @cu_5: calculate power of non-directed denoise element apply, use
+ * &ipu3_uapi_iefd_cux2_1
+ * @cu_6: calculate power of non-directed sharpening element. See
+ * &ipu3_uapi_iefd_cux4
+ * @cu_7: calculate weight for blending directed and
+ * non-directed denoise elements. Use &ipu3_uapi_iefd_cux2
+ * @cu_unsharp: Config Unit of unsharp &ipu3_uapi_iefd_cux4
+ * @cu_radial: Config Unit of radial &ipu3_uapi_iefd_cux6_rad
+ * @cu_vssnlm: Config Unit of vssnlm &ipu3_uapi_iefd_cux2
+ */
struct ipu3_uapi_yuvp1_iefd_cfg_units {
struct ipu3_uapi_iefd_cux2 cu_1;
struct ipu3_uapi_iefd_cux6_ed cu_ed;
@@ -638,26 +1470,66 @@ struct ipu3_uapi_yuvp1_iefd_cfg_units {
struct ipu3_uapi_iefd_cux2 cu_vssnlm;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config_s - IEFd config
+ *
+ * @horver_diag_coeff: Gradiant compensation, coefficient that compensates for
+ * different distance for vertical / horizontal and diagonal
+ * * gradient calculation (~1/sqrt(2)).
+ * @__reserved0: reserved
+ * @clamp_stitch: Slope to stitch between clamped and unclamped edge values
+ * @__reserved1: reserved
+ * @direct_metric_update: Update coeff for direction metric
+ * @__reserved2: reserved
+ * @ed_horver_diag_coeff: Radial Coefficient that compensates for
+ * different distance for vertical/horizontal and
+ * diagonal gradient calculation (~1/sqrt(2))
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp1_iefd_config_s {
- __u32 horver_diag_coeff:7; /* Gradiant compensation */
+ __u32 horver_diag_coeff:7;
__u32 __reserved0:1;
- __u32 clamp_stitch:6; /* Slope to stitch edge */
+ __u32 clamp_stitch:6;
__u32 __reserved1:2;
- __u32 direct_metric_update:5; /* Update coeff for direction metric */
+ __u32 direct_metric_update:5;
__u32 __reserved2:3;
__u32 ed_horver_diag_coeff:7;
__u32 __reserved3:1;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_control - IEFd control
+ *
+ * @iefd_en: Enable IEFd
+ * @denoise_en: Enable denoise
+ * @direct_smooth_en: Enable directional smooth
+ * @rad_en: Enable radial update
+ * @vssnlm_en: Enable VSSNLM output filter
+ * @__reserved: reserved
+ */
struct ipu3_uapi_yuvp1_iefd_control {
- __u32 iefd_en:1; /* Enable IEFD */
- __u32 denoise_en:1; /* Enable denoise */
- __u32 direct_smooth_en:1; /* Enable directional smooth */
- __u32 rad_en:1; /* Enable radial update */
- __u32 vssnlm_en:1; /* Enable VSSNLM output filter */
+ __u32 iefd_en:1;
+ __u32 denoise_en:1;
+ __u32 direct_smooth_en:1;
+ __u32 rad_en:1;
+ __u32 vssnlm_en:1;
__u32 __reserved:27;
} __packed;
+/**
+ * struct ipu3_uapi_sharp_cfg - Sharpening config
+ *
+ * @nega_lmt_txt: Sharpening limit for negative overshoots for texture.
+ * @__reserved0: reserved
+ * @posi_lmt_txt: Sharpening limit for positive overshoots for texture.
+ * @__reserved1: reserved
+ * @nega_lmt_dir: Sharpening limit for negative overshoots for direction (edge).
+ * @__reserved2: reserved
+ * @posi_lmt_dir: Sharpening limit for positive overshoots for direction (edge).
+ * @__reserved3: reserved
+ *
+ * Fixed point type u13.0, range [0, 8191].
+ */
struct ipu3_uapi_sharp_cfg {
__u32 nega_lmt_txt:13;
__u32 __reserved0:19;
@@ -669,6 +1541,17 @@ struct ipu3_uapi_sharp_cfg {
__u32 __reserved3:19;
} __packed;
+/**
+ * struct struct ipu3_uapi_far_w - Sharpening config for far sub-group
+ *
+ * @dir_shrp: Weight of wide direct sharpening, u1.6, range [0, 64], default 64.
+ * @__reserved0: reserved
+ * @dir_dns: Weight of wide direct denoising, u1.6, range [0, 64], default 0.
+ * @__reserved1: reserved
+ * @ndir_dns_powr: Power of non-direct denoising,
+ * Precision u1.6, range [0, 64], default 64.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_far_w {
__u32 dir_shrp:7;
__u32 __reserved0:1;
@@ -678,6 +1561,16 @@ struct ipu3_uapi_far_w {
__u32 __reserved2:9;
} __packed;
+/**
+ * struct struct ipu3_uapi_unsharp_cfg - Unsharp config
+ *
+ * @unsharp_weight: Unsharp mask blending weight.
+ * u1.6, range [0, 64], default 16.
+ * 0 – disabled, 64 - use only unsharp.
+ * @__reserved0: reserved
+ * @unsharp_amount: Unsharp mask amount, u4.5, range [0, 511], default 0.
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_unsharp_cfg {
__u32 unsharp_weight:7;
__u32 __reserved0:1;
@@ -685,31 +1578,73 @@ struct ipu3_uapi_unsharp_cfg {
__u32 __reserved1:15;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_shrp_cfg - IEFd sharpness config
+ *
+ * @cfg: sharpness config &ipu3_uapi_sharp_cfg
+ * @far_w: wide range config, value as specified by &ipu3_uapi_far_w:
+ * The 5x5 environment is separated into 2 sub-groups, the 3x3 nearest
+ * neighbors (8 pixels called Near), and the second order neighborhood
+ * around them (16 pixels called Far).
+ * @unshrp_cfg: unsharpness config. &ipu3_uapi_unsharp_cfg
+ */
struct ipu3_uapi_yuvp1_iefd_shrp_cfg {
struct ipu3_uapi_sharp_cfg cfg;
struct ipu3_uapi_far_w far_w;
struct ipu3_uapi_unsharp_cfg unshrp_cfg;
} __packed;
+/**
+ * struct ipu3_uapi_unsharp_coef0 - Unsharp mask coefficients
+ *
+ * @c00: Coeff11, s0.8, range [-255, 255], default 1.
+ * @c01: Coeff12, s0.8, range [-255, 255], default 5.
+ * @c02: Coeff13, s0.8, range [-255, 255], default 9.
+ * @__reserved: reserved
+ *
+ * Configurable registers for common sharpening support.
+ */
struct ipu3_uapi_unsharp_coef0 {
- __u32 c00:9; /* Coeff11 */
- __u32 c01:9; /* Coeff12 */
- __u32 c02:9; /* Coeff13 */
+ __u32 c00:9;
+ __u32 c01:9;
+ __u32 c02:9;
__u32 __reserved:5;
} __packed;
+/**
+ * struct ipu3_uapi_unsharp_coef1 - Unsharp mask coefficients
+ *
+ * @c11: Coeff22, s0.8, range [-255, 255], default 29.
+ * @c12: Coeff23, s0.8, range [-255, 255], default 55.
+ * @c22: Coeff33, s0.8, range [-255, 255], default 96.
+ * @__reserved: reserved
+ */
struct ipu3_uapi_unsharp_coef1 {
- __u32 c11:9; /* Coeff22 */
- __u32 c12:9; /* Coeff23 */
- __u32 c22:9; /* Coeff33 */
+ __u32 c11:9;
+ __u32 c12:9;
+ __u32 c22:9;
__u32 __reserved:5;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_unshrp_cfg - Unsharp mask config
+ *
+ * @unsharp_coef0: unsharp coefficient 0 config. See &ipu3_uapi_unsharp_coef0
+ * @unsharp_coef1: unsharp coefficient 1 config. See &ipu3_uapi_unsharp_coef1
+ */
struct ipu3_uapi_yuvp1_iefd_unshrp_cfg {
struct ipu3_uapi_unsharp_coef0 unsharp_coef0;
struct ipu3_uapi_unsharp_coef1 unsharp_coef1;
} __packed;
+/**
+ * struct ipu3_uapi_radial_reset_xy - Radial coordinate reset
+ *
+ * @x: Radial reset of x coordinate. Precision s12, [-4095, 4095], default 0.
+ * @__reserved0: reserved
+ * @y: Radial center y coordinate. Precision s12, [-4095, 4095], default 0.
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_radial_reset_xy {
__s32 x:13;
__u32 __reserved0:3;
@@ -717,16 +1652,36 @@ struct ipu3_uapi_radial_reset_xy {
__u32 __reserved1:3;
} __packed;
+/**
+ * struct ipu3_uapi_radial_reset_x2 - Radial X^2 reset
+ *
+ * @x2: Radial reset of x^2 coordinate. Precision u24, default 0.
+ * @__reserved: reserved
+ */
struct ipu3_uapi_radial_reset_x2 {
__u32 x2:24;
__u32 __reserved:8;
} __packed;
+/**
+ * struct ipu3_uapi_radial_reset_y2 - Radial Y^2 reset
+ *
+ * @y2: Radial reset of y^2 coordinate. Precision u24, default 0.
+ * @__reserved: reserved
+ */
struct ipu3_uapi_radial_reset_y2 {
__u32 y2:24;
__u32 __reserved:8;
} __packed;
+/**
+ * struct ipu3_uapi_radial_cfg - Radial config
+ *
+ * @rad_nf: Radial. R^2 normalization factor is scale down by 2^ - (15 + scale)
+ * @__reserved0: reserved
+ * @rad_inv_r2: Radial R^-2 normelized to (0.5..1), Prec' u7, range [0, 127].
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_radial_cfg {
__u32 rad_nf:4;
__u32 __reserved0:4;
@@ -734,6 +1689,17 @@ struct ipu3_uapi_radial_cfg {
__u32 __reserved1:17;
} __packed;
+/**
+ * struct ipu3_uapi_rad_far_w - Radial FAR sub-group
+ *
+ * @rad_dir_far_sharp_w: Weight of wide direct sharpening, u1.6, range [0, 64],
+ * default 64.
+ * @rad_dir_far_dns_w: Weight of wide direct denoising, u1.6, range [0, 64],
+ * default 0.
+ * @rad_ndir_far_dns_power: power of non-direct sharpening, u1.6, range [0, 64],
+ * default 0.
+ * @__reserved: reserved
+ */
struct ipu3_uapi_rad_far_w {
__u32 rad_dir_far_sharp_w:8;
__u32 rad_dir_far_dns_w:8;
@@ -741,6 +1707,18 @@ struct ipu3_uapi_rad_far_w {
__u32 __reserved:8;
} __packed;
+/**
+ * struct ipu3_uapi_cu_cfg0 - Radius Config Unit cfg0 register
+ *
+ * @cu6_pow: Power of CU6. Power of non-direct sharpening, u3.4.
+ * @__reserved0: reserved
+ * @cu_unsharp_pow: Power of unsharp mask, u2.4.
+ * @__reserved1: reserved
+ * @rad_cu6_pow: Radial/corner CU6. Directed sharpening power, u3.4.
+ * @__reserved2: reserved
+ * @rad_cu_unsharp_pow: Radial power of unsharp mask, u2.4.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_cu_cfg0 {
__u32 cu6_pow:7;
__u32 __reserved0:1;
@@ -752,6 +1730,15 @@ struct ipu3_uapi_cu_cfg0 {
__u32 __reserved3:2;
} __packed;
+/**
+ * struct ipu3_uapi_cu_cfg1 - Radius Config Unit cfg1 register
+ *
+ * @rad_cu6_x1: X1 point of Config Unit 6, precision u9.0.
+ * @__reserved0: reserved
+ * @rad_cu_unsharp_x1: X1 point for Config Unit unsharp for radial/corner point
+ * precision u9.0.
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_cu_cfg1 {
__u32 rad_cu6_x1:9;
__u32 __reserved0:1;
@@ -759,6 +1746,20 @@ struct ipu3_uapi_cu_cfg1 {
__u32 __reserved1:13;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_rad_cfg - IEFd parameters changed radially over
+ * the picture plain.
+ *
+ * @reset_xy: reset xy value in radial calculation. &ipu3_uapi_radial_reset_xy
+ * @reset_x2: reset x square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_x2
+ * @reset_y2: reset y square value in radial calculation. See struct
+ * &ipu3_uapi_radial_reset_y2
+ * @cfg: radial config defined in &ipu3_uapi_radial_cfg
+ * @rad_far_w: weight for wide range radial. &ipu3_uapi_rad_far_w
+ * @cu_cfg0: configuration unit 0. See &ipu3_uapi_cu_cfg0
+ * @cu_cfg1: configuration unit 1. See &ipu3_uapi_cu_cfg1
+ */
struct ipu3_uapi_yuvp1_iefd_rad_cfg {
struct ipu3_uapi_radial_reset_xy reset_xy;
struct ipu3_uapi_radial_reset_x2 reset_x2;
@@ -769,6 +1770,16 @@ struct ipu3_uapi_yuvp1_iefd_rad_cfg {
struct ipu3_uapi_cu_cfg1 cu_cfg1;
} __packed;
+/* Vssnlm - Very small scale non-local mean algorithm */
+
+/**
+ * struct ipu3_uapi_vss_lut_x - Vssnlm LUT x0/x1/x2
+ *
+ * @vs_x0: Vssnlm LUT x0, precision u8, range [0, 255], default 16.
+ * @vs_x1: Vssnlm LUT x1, precision u8, range [0, 255], default 32.
+ * @vs_x2: Vssnlm LUT x2, precision u8, range [0, 255], default 64.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_vss_lut_x {
__u32 vs_x0:8;
__u32 vs_x1:8;
@@ -776,6 +1787,16 @@ struct ipu3_uapi_vss_lut_x {
__u32 __reserved2:8;
} __packed;
+/**
+ * struct ipu3_uapi_vss_lut_y - Vssnlm LUT y0/y1/y2
+ *
+ * @vs_y1: Vssnlm LUT y1, precision u4, range [0, 8], default 1.
+ * @__reserved0: reserved
+ * @vs_y2: Vssnlm LUT y2, precision u4, range [0, 8], default 3.
+ * @__reserved1: reserved
+ * @vs_y3: Vssnlm LUT y3, precision u4, range [0, 8], default 8.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_vss_lut_y {
__u32 vs_y1:4;
__u32 __reserved0:4;
@@ -785,11 +1806,28 @@ struct ipu3_uapi_vss_lut_y {
__u32 __reserved2:12;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_vssnlm_cf - IEFd Vssnlm Lookup table
+ *
+ * @vss_lut_x: vss lookup table. See &ipu3_uapi_vss_lut_x description
+ * @vss_lut_y: vss lookup table. See &ipu3_uapi_vss_lut_y description
+ */
struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg {
struct ipu3_uapi_vss_lut_x vss_lut_x;
struct ipu3_uapi_vss_lut_y vss_lut_y;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_iefd_config - IEFd config
+ *
+ * @units: configuration unit setting, &ipu3_uapi_yuvp1_iefd_cfg_units
+ * @config: configuration, as defined by &ipu3_uapi_yuvp1_iefd_config_s
+ * @control: control setting, as defined by &ipu3_uapi_yuvp1_iefd_control
+ * @sharp: sharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_shrp_cfg
+ * @unsharp: unsharpness setting, as defined by &ipu3_uapi_yuvp1_iefd_unshrp_cfg
+ * @rad: radial setting, as defined by &ipu3_uapi_yuvp1_iefd_rad_cfg
+ * @vsslnm: vsslnm setting, as defined by &ipu3_uapi_yuvp1_iefd_vssnlm_cfg
+ */
struct ipu3_uapi_yuvp1_iefd_config {
struct ipu3_uapi_yuvp1_iefd_cfg_units units;
struct ipu3_uapi_yuvp1_iefd_config_s config;
@@ -800,6 +1838,31 @@ struct ipu3_uapi_yuvp1_iefd_config {
struct ipu3_uapi_yuvp1_iefd_vssnlm_cfg vsslnm;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_yds_config - Y Down-Sampling config
+ *
+ * @c00: range [0, 3], default 0x0
+ * @c01: range [0, 3], default 0x1
+ * @c02: range [0, 3], default 0x1
+ * @c03: range [0, 3], default 0x0
+ * @c10: range [0, 3], default 0x0
+ * @c11: range [0, 3], default 0x1
+ * @c12: range [0, 3], default 0x1
+ * @c13: range [0, 3], default 0x0
+ *
+ * Above are 4x2 filter coefficients for chroma output downscaling.
+ *
+ * @norm_factor: Normalization factor, range [0, 4], default 2
+ * 0 - divide by 1
+ * 1 - divide by 2
+ * 2 - divide by 4
+ * 3 - divide by 8
+ * 4 - divide by 16
+ * @__reserved0: reserved
+ * @bin_output: Down sampling on Luma channel in two optional modes
+ * 0 - Bin output 4.2.0 (default), 1 output 4.2.2.
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_yuvp1_yds_config {
__u32 c00:2;
__u32 c01:2;
@@ -815,6 +1878,17 @@ struct ipu3_uapi_yuvp1_yds_config {
__u32 __reserved1:6;
} __packed;
+/* Chroma Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_chnr_enable_config - Chroma noise reduction enable
+ *
+ * @enable: enable/disable chroma noise reduction
+ * @yuv_mode: 0 - YUV420, 1 - YUV422
+ * @__reserved0: reserved
+ * @col_size: number of columns in the frame, max width is 2560
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_yuvp1_chnr_enable_config {
__u32 enable:1;
__u32 yuv_mode:1;
@@ -823,6 +1897,14 @@ struct ipu3_uapi_yuvp1_chnr_enable_config {
__u32 __reserved1:4;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_chnr_coring_config - Coring thresholds for UV
+ *
+ * @u: U coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @__reserved0: reserved
+ * @v: V coring level, u0.13, range [0.0, 1.0], default 0.0
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_yuvp1_chnr_coring_config {
__u32 u:13;
__u32 __reserved0:3;
@@ -830,6 +1912,20 @@ struct ipu3_uapi_yuvp1_chnr_coring_config {
__u32 __reserved1:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_chnr_sense_gain_config - Chroma noise reduction gains
+ *
+ * All sensitivity gain parameters have precision u13.0, range [0, 8191].
+ *
+ * @vy: Sensitivity of horizontal edge of Y, default 100
+ * @vu: Sensitivity of horizontal edge of U, default 100
+ * @vv: Sensitivity of horizontal edge of V, default 100
+ * @__reserved0: reserved
+ * @hy: Sensitivity of vertical edge of Y, default 50
+ * @hu: Sensitivity of vertical edge of U, default 50
+ * @hv: Sensitivity of vertical edge of V, default 50
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_yuvp1_chnr_sense_gain_config {
__u32 vy:8;
__u32 vu:8;
@@ -842,6 +1938,17 @@ struct ipu3_uapi_yuvp1_chnr_sense_gain_config {
__u32 __reserved1:8;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_chnr_iir_fir_config - Chroma IIR/FIR filter config
+ *
+ * @fir_0h: Value of center tap in horizontal FIR, range [0, 32], default 8.
+ * @__reserved0: reserved
+ * @fir_1h: Value of distance 1 in horizontal FIR, range [0, 32], default 12.
+ * @__reserved1: reserved
+ * @fir_2h: Value of distance 2 tap in horizontal FIR, range [0, 32], default 0.
+ * @dalpha_clip_val: weight for previous row in IIR, range [1, 256], default 0.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_yuvp1_chnr_iir_fir_config {
__u32 fir_0h:6;
__u32 __reserved0:2;
@@ -852,6 +1959,18 @@ struct ipu3_uapi_yuvp1_chnr_iir_fir_config {
__u32 __reserved2:1;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_chnr_config - Chroma noise reduction config
+ *
+ * @enable: chroma noise reduction enable, see
+ * &ipu3_uapi_yuvp1_chnr_enable_config
+ * @coring: coring config for chroma noise reduction, see
+ * &ipu3_uapi_yuvp1_chnr_coring_config
+ * @sense_gain: sensitivity config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_sense_gain_config
+ * @iir_fir: iir and fir config for chroma noise reduction, see
+ * ipu3_uapi_yuvp1_chnr_iir_fir_config
+ */
struct ipu3_uapi_yuvp1_chnr_config {
struct ipu3_uapi_yuvp1_chnr_enable_config enable;
struct ipu3_uapi_yuvp1_chnr_coring_config coring;
@@ -859,6 +1978,20 @@ struct ipu3_uapi_yuvp1_chnr_config {
struct ipu3_uapi_yuvp1_chnr_iir_fir_config iir_fir;
} __packed;
+/* Edge Enhancement and Noise Reduction */
+
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config - Luma(Y) edge enhancement low-pass
+ * filter coefficients
+ *
+ * @a_diag: Smoothing diagonal coefficient, u5.0.
+ * @__reserved0: reserved
+ * @a_periph: Image smoothing perpherial, u5.0.
+ * @__reserved1: reserved
+ * @a_cent: Image Smoothing center coefficient, u5.0.
+ * @__reserved2: reserved
+ * @enable: 0: Y_EE_NR disabled, output = input; 1: Y_EE_NR enabled.
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config {
__u32 a_diag:5;
__u32 __reserved0:3;
@@ -869,6 +2002,21 @@ struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config {
__u32 enable:1;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_sense_config - Luma(Y) edge enhancement
+ * noise reduction sensitivity gains
+ *
+ * @edge_sense_0: Sensitivity of edge in dark area. u13.0, default 8191.
+ * @__reserved0: reserved
+ * @delta_edge_sense: Difference in the sensitivity of edges between
+ * the bright and dark areas. u13.0, default 0.
+ * @__reserved1: reserved
+ * @corner_sense_0: Sensitivity of corner in dark area. u13.0, default 0.
+ * @__reserved2: reserved
+ * @delta_corner_sense: Difference in the sensitivity of corners between
+ * the bright and dark areas. u13.0, default 8191.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_sense_config {
__u32 edge_sense_0:13;
__u32 __reserved0:3;
@@ -880,6 +2028,21 @@ struct ipu3_uapi_yuvp1_y_ee_nr_sense_config {
__u32 __reserved3:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_gain_config - Luma(Y) edge enhancement
+ * noise reduction gain config
+ *
+ * @gain_pos_0: Gain for positive edge in dark area. u5.0, [0, 16], default 2.
+ * @__reserved0: reserved
+ * @delta_gain_posi: Difference in the gain of edges between the bright and
+ * dark areas for positive edges. u5.0, [0, 16], default 0.
+ * @__reserved1: reserved
+ * @gain_neg_0: Gain for negative edge in dark area. u5.0, [0, 16], default 8.
+ * @__reserved2: reserved
+ * @delta_gain_neg: Difference in the gain of edges between the bright and
+ * dark areas for negative edges. u5.0, [0, 16], default 0.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_gain_config {
__u32 gain_pos_0:5;
__u32 __reserved0:3;
@@ -891,6 +2054,25 @@ struct ipu3_uapi_yuvp1_y_ee_nr_gain_config {
__u32 __reserved3:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_clip_config - Luma(Y) edge enhancement
+ * noise reduction clipping config
+ *
+ * @clip_pos_0: Limit of positive edge in dark area
+ * u5, value [0, 16], default 8.
+ * @__reserved0: reserved
+ * @delta_clip_posi: Difference in the limit of edges between the bright
+ * and dark areas for positive edges.
+ * u5, value [0, 16], default 8.
+ * @__reserved1: reserved
+ * @clip_neg_0: Limit of negative edge in dark area
+ * u5, value [0, 16], default 8.
+ * @__reserved2: reserved
+ * @delta_clip_neg: Difference in the limit of edges between the bright
+ * and dark areas for negative edges.
+ * u5, value [0, 16], default 8.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_clip_config {
__u32 clip_pos_0:5;
__u32 __reserved0:3;
@@ -902,6 +2084,22 @@ struct ipu3_uapi_yuvp1_y_ee_nr_clip_config {
__u32 __reserved3:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_frng_config - Luma(Y) edge enhancement
+ * noise reduction fringe config
+ *
+ * @gain_exp: Common exponent of gains, u4, [0, 8], default 2.
+ * @__reserved0: reserved
+ * @min_edge: Threshold for edge and smooth stitching, u13.
+ * @__reserved1: reserved
+ * @lin_seg_param: Power of LinSeg, u4.
+ * @__reserved2: reserved
+ * @t1: Parameter for enabling/disabling the edge enhancement, u1.0, [0, 1],
+ * default 1.
+ * @t2: Parameter for enabling/disabling the smoothing, u1.0, [0, 1],
+ * default 1.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_frng_config {
__u32 gain_exp:4;
__u32 __reserved0:28;
@@ -914,6 +2112,24 @@ struct ipu3_uapi_yuvp1_y_ee_nr_frng_config {
__u32 __reserved3:6;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_diag_config - Luma(Y) edge enhancement
+ * noise reduction diagonal config
+ *
+ * @diag_disc_g: Coefficient that prioritize diagonal edge direction on
+ * horizontal or vertical for final enhancement.
+ * u4.0, [1, 15], default 1.
+ * @__reserved0: reserved
+ * @hvw_hor: Weight of horizontal/vertical edge enhancement for hv edge.
+ * u2.2, [1, 15], default 4.
+ * @dw_hor: Weight of diagonal edge enhancement for hv edge.
+ * u2.2, [1, 15], default 1.
+ * @hvw_diag: Weight of horizontal/vertical edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 1.
+ * @dw_diag: Weight of diagonal edge enhancement for diagonal edge.
+ * u2.2, [1, 15], default 4.
+ * @__reserved1: reserved
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_diag_config {
__u32 diag_disc_g:4;
__u32 __reserved0:4;
@@ -924,6 +2140,23 @@ struct ipu3_uapi_yuvp1_y_ee_nr_diag_config {
__u32 __reserved1:8;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config - Luma(Y) edge enhancement
+ * noise reduction false color correction (FCC) coring config
+ *
+ * @pos_0: Gain for positive edge in dark, u13.0, [0, 16], default 0.
+ * @__reserved0: reserved
+ * @pos_delta: Gain for positive edge in bright, value: pos_0 + pos_delta <=16
+ * u13.0, default 0.
+ * @__reserved1: reserved
+ * @neg_0: Gain for negative edge in dark area, u13.0, range [0, 16], default 0.
+ * @__reserved2: reserved
+ * @neg_delta: Gain for negative edge in bright area. neg_0 + neg_delta <=16
+ * u13.0, default 0.
+ * @__reserved3: reserved
+ *
+ * Coring is a simple soft thresholding technique.
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config {
__u32 pos_0:13;
__u32 __reserved0:3;
@@ -935,6 +2168,18 @@ struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config {
__u32 __reserved3:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp1_y_ee_nr_config - Edge enhancement and noise reduction
+ *
+ * @lpf: low-pass filter config. See &ipu3_uapi_yuvp1_y_ee_nr_lpf_config
+ * @sense: sensitivity config. See &ipu3_uapi_yuvp1_y_ee_nr_sense_config
+ * @gain: gain config as defined in &ipu3_uapi_yuvp1_y_ee_nr_gain_config
+ * @clip: clip config as defined in &ipu3_uapi_yuvp1_y_ee_nr_clip_config
+ * @frng: fringe config as defined in &ipu3_uapi_yuvp1_y_ee_nr_frng_config
+ * @diag: diagonal edge config. See &ipu3_uapi_yuvp1_y_ee_nr_diag_config
+ * @fc_coring: coring config for fringe control. See
+ * &ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config
+ */
struct ipu3_uapi_yuvp1_y_ee_nr_config {
struct ipu3_uapi_yuvp1_y_ee_nr_lpf_config lpf;
struct ipu3_uapi_yuvp1_y_ee_nr_sense_config sense;
@@ -945,13 +2190,22 @@ struct ipu3_uapi_yuvp1_y_ee_nr_config {
struct ipu3_uapi_yuvp1_y_ee_nr_fc_coring_config fc_coring;
} __packed;
-/* Y-tone Mapping */
-struct ipu3_uapi_yuvp2_y_tm_lut_static_config {
- __u16 entries[IPU3_UAPI_YUVP2_YTM_LUT_ENTRIES]; /* 13 significand bits*/
- __u32 enable;
-} __packed;
-
/* Total Color Correction */
+
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gen_control_static_config - Total color correction
+ * general control config
+ *
+ * @en: 0 – TCC disabled. Output = input 1 – TCC enabled.
+ * @blend_shift: blend shift, Range[3, 4], default NA.
+ * @gain_according_to_y_only: 0: Gain is calculated according to YUV,
+ * 1: Gain is calculated according to Y only
+ * @__reserved0: reserved
+ * @gamma: Final blending coefficients. Values[-16, 16], default NA.
+ * @__reserved1: reserved
+ * @delta: Final blending coefficients. Values[-16, 16], default NA.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_yuvp2_tcc_gen_control_static_config {
__u32 en:1;
__u32 blend_shift:3;
@@ -963,6 +2217,19 @@ struct ipu3_uapi_yuvp2_tcc_gen_control_static_config {
__u32 __reserved2:3;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config - Total color correction
+ * multi-axis color control (MACC) config
+ *
+ * @a: a coefficient for 2x2 MACC conversion matrix.
+ * @__reserved0: reserved
+ * @b: b coefficient 2x2 MACC conversion matrix.
+ * @__reserved1: reserved
+ * @c: c coefficient for 2x2 MACC conversion matrix.
+ * @__reserved2: reserved
+ * @d: d coefficient for 2x2 MACC conversion matrix.
+ * @__reserved3: reserved
+ */
struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config {
__s32 a:12;
__u32 __reserved0:4;
@@ -974,23 +2241,60 @@ struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config {
__u32 __reserved3:4;
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_macc_table_static_config - Total color correction
+ * multi-axis color control (MACC) table array
+ *
+ * @entries: config for multi axis color correction, as specified by
+ * &ipu3_uapi_yuvp2_tcc_macc_elem_static_config
+ */
struct ipu3_uapi_yuvp2_tcc_macc_table_static_config {
struct ipu3_uapi_yuvp2_tcc_macc_elem_static_config
entries[IPU3_UAPI_YUVP2_TCC_MACC_TABLE_ELEMENTS];
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config - Total color correction
+ * inverse y lookup table
+ *
+ * @entries: lookup table for inverse y estimation, and use it to estimate the
+ * ratio between luma and chroma. Chroma by approximate the absolute
+ * value of the radius on the chroma plane (R = sqrt(u^2+v^2) ) and
+ * luma by approximate by 1/Y.
+ */
struct ipu3_uapi_yuvp2_tcc_inv_y_lut_static_config {
- __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS]; /* 10 bits */
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_INV_Y_LUT_ELEMENTS];
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config - Total color
+ * correction lookup table for PCWL
+ *
+ * @entries: lookup table for gain piece wise linear transformation (PCWL)
+ */
struct ipu3_uapi_yuvp2_tcc_gain_pcwl_lut_static_config {
- __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS];/* 12 bits */
+ __u16 entries[IPU3_UAPI_YUVP2_TCC_GAIN_PCWL_LUT_ELEMENTS];
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config - Total color correction
+ * lookup table for r square root
+ *
+ * @entries: lookup table for r square root estimation
+ */
struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config {
- __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS]; /* 11 bits */
+ __s16 entries[IPU3_UAPI_YUVP2_TCC_R_SQR_LUT_ELEMENTS];
} __packed;
+/**
+ * struct ipu3_uapi_yuvp2_tcc_static_config- Total color correction static
+ *
+ * @gen_control: general config for Total Color Correction
+ * @macc_table: config for multi axis color correction
+ * @inv_y_lut: lookup table for inverse y estimation
+ * @gain_pcwl: lookup table for gain PCWL
+ * @r_sqr_lut: lookup table for r square root estimation.
+ */
struct ipu3_uapi_yuvp2_tcc_static_config {
struct ipu3_uapi_yuvp2_tcc_gen_control_static_config gen_control;
struct ipu3_uapi_yuvp2_tcc_macc_table_static_config macc_table;
@@ -999,102 +2303,16 @@ struct ipu3_uapi_yuvp2_tcc_static_config {
struct ipu3_uapi_yuvp2_tcc_r_sqr_lut_static_config r_sqr_lut;
} __packed;
-/* Bayer Down-Scaler */
-
-struct ipu3_uapi_bds_hor_ctrl0 {
- __u32 sample_patrn_length:9;
- __u32 __reserved0:3;
- __u32 hor_ds_en:1;
- __u32 min_clip_val:1;
- __u32 max_clip_val:2;
- __u32 out_frame_width:13;
- __u32 __reserved1:3;
-} __packed;
-
-struct ipu3_uapi_bds_ptrn_arr {
- __u32 elems[IPU3_UAPI_BDS_SAMPLE_PATTERN_ARRAY_SIZE];
-} __packed;
-
-struct ipu3_uapi_bds_phase_entry {
- __s8 coeff_min2;
- __s8 coeff_min1;
- __s8 coeff_0;
- __s8 nf;
- __s8 coeff_pls1;
- __s8 coeff_pls2;
- __s8 coeff_pls3;
- __u8 __reserved;
-} __packed;
-
-struct ipu3_uapi_bds_phase_arr {
- struct ipu3_uapi_bds_phase_entry
- even[IPU3_UAPI_BDS_PHASE_COEFFS_ARRAY_SIZE];
- struct ipu3_uapi_bds_phase_entry
- odd[IPU3_UAPI_BDS_PHASE_COEFFS_ARRAY_SIZE];
-} __packed;
-
-struct ipu3_uapi_bds_hor_ctrl1 {
- __u32 hor_crop_start:13;
- __u32 __reserved0:3;
- __u32 hor_crop_end:13;
- __u32 __reserved1:1;
- __u32 hor_crop_en:1;
- __u32 __reserved2:1;
-} __packed;
-
-struct ipu3_uapi_bds_hor_ctrl2 {
- __u32 input_frame_height:13;
- __u32 __reserved0:19;
-} __packed;
-
-struct ipu3_uapi_bds_hor {
- struct ipu3_uapi_bds_hor_ctrl0 hor_ctrl0;
- struct ipu3_uapi_bds_ptrn_arr hor_ptrn_arr;
- struct ipu3_uapi_bds_phase_arr hor_phase_arr;
- struct ipu3_uapi_bds_hor_ctrl1 hor_ctrl1;
- struct ipu3_uapi_bds_hor_ctrl2 hor_ctrl2;
-} __packed;
-
-struct ipu3_uapi_bds_ver_ctrl0 {
- __u32 sample_patrn_length:9;
- __u32 __reserved0:3;
- __u32 ver_ds_en:1;
- __u32 min_clip_val:1;
- __u32 max_clip_val:2;
- __u32 __reserved1:16;
-} __packed;
-
-struct ipu3_uapi_bds_ver_ctrl1 {
- __u32 out_frame_width:13;
- __u32 __reserved0:3;
- __u32 out_frame_height:13;
- __u32 __reserved1:3;
-} __packed;
-
-struct ipu3_uapi_bds_ver {
- struct ipu3_uapi_bds_ver_ctrl0 ver_ctrl0;
- struct ipu3_uapi_bds_ptrn_arr ver_ptrn_arr;
- struct ipu3_uapi_bds_phase_arr ver_phase_arr;
- struct ipu3_uapi_bds_ver_ctrl1 ver_ctrl1;
-
-} __packed;
-
-struct ipu3_uapi_bds_config {
- struct ipu3_uapi_bds_hor hor IPU3_ALIGN;
- struct ipu3_uapi_bds_ver ver IPU3_ALIGN;
- __u32 enabled;
-} __packed;
-
/* Advanced Noise Reduction related structs */
-struct ipu3_uapi_anr_search_config {
- __u32 enable;
- __u16 frame_width;
- __u16 frame_height;
-} __packed;
-
+/*
+ * struct ipu3_uapi_anr_alpha - Advanced noise reduction alpha
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
struct ipu3_uapi_anr_alpha {
- __u16 gr; /* 9 bits */
+ __u16 gr;
__u16 r;
__u16 b;
__u16 gb;
@@ -1104,26 +2322,75 @@ struct ipu3_uapi_anr_alpha {
__u16 dc_gb;
} __packed;
+/*
+ * struct ipu3_uapi_anr_beta - Advanced noise reduction beta
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
struct ipu3_uapi_anr_beta {
- __u16 beta_gr; /* 11 bits */
+ __u16 beta_gr;
__u16 beta_r;
__u16 beta_b;
__u16 beta_gb;
} __packed;
+/*
+ * struct ipu3_uapi_anr_plain_color - Advanced noise reduction plain color with
+ * 4x4 matrix
+ *
+ * Tunable parameters that are subject to modification according to the
+ * total gain used.
+ */
struct ipu3_uapi_anr_plain_color {
- __u16 reg_w_gr[16]; /* 12 bits */
+ __u16 reg_w_gr[16];
__u16 reg_w_r[16];
__u16 reg_w_b[16];
__u16 reg_w_gb[16];
} __packed;
+/**
+ * struct ipu3_uapi_anr_transform_config - Advanced noise reduction transform
+ *
+ * @enable: advanced noise reduction enabled.
+ * @adaptive_treshhold_en: On IPU3, adaptive threshold is always enabled.
+ * @__reserved1: reserved
+ * @__reserved2: reserved
+ * @alpha: using following defaults:
+ * 13, 13, 13, 13, 0, 0, 0, 0
+ * 11, 11, 11, 11, 0, 0, 0, 0
+ * 14, 14, 14, 14, 0, 0, 0, 0
+ * @beta: use following defaults:
+ * 24, 24, 24, 24
+ * 21, 20, 20, 21
+ * 25, 25, 25, 25
+ * @color: use defaults defined in driver/media/pci/intel/ipu3-tables.c
+ * @sqrt_lut: 11 bits per element, values =
+ * [724 768 810 849 887
+ * 923 958 991 1024 1056
+ * 1116 1145 1173 1201 1086
+ * 1228 1254 1280 1305 1330
+ * 1355 1379 1402 1425 1448]
+ * @xreset: Reset value of X for r^2 calculation Value: col_start-X_center
+ * Constraint: Xreset + FrameWdith=4095 Xreset= -4095, default -1632.
+ * @__reserved3: reserved
+ * @yreset: Reset value of Y for r^2 calculation Value: row_start-Y_center
+ * Constraint: Yreset + FrameHeight=4095 Yreset= -4095, default -1224.
+ * @__reserved4: reserved
+ * @x_sqr_reset: Reset value of X^2 for r^2 calculation Value = (Xreset)^2
+ * @r_normfactor: Normalization factor for R. Default 14.
+ * @__reserved5: reserved
+ * @y_sqr_reset: Reset value of Y^2 for r^2 calculation Value = (Yreset)^2
+ * @gain_scale: Parameter describing shading gain as a function of distance
+ * from the image center.
+ * A single value per frame, loaded by the driver. Default 115.
+ */
struct ipu3_uapi_anr_transform_config {
__u32 enable:1; /* 0 or 1, disabled or enabled */
__u32 adaptive_treshhold_en:1; /* On IPU3, always enabled */
__u32 __reserved1:30;
- __u8 __reserved2[40 + 4];
+ __u8 __reserved2[44];
struct ipu3_uapi_anr_alpha alpha[3];
struct ipu3_uapi_anr_beta beta[3];
@@ -1132,7 +2399,6 @@ struct ipu3_uapi_anr_transform_config {
__u16 sqrt_lut[IPU3_UAPI_ANR_LUT_SIZE]; /* 11 bits per element */
__s16 xreset:13;
-#define IPU3_UAPI_ANR_MAX_XRESET ((1 << 12) - 1)
__u16 __reserved3:3;
__s16 yreset:13;
__u16 __reserved4:3;
@@ -1145,6 +2411,14 @@ struct ipu3_uapi_anr_transform_config {
__u32 gain_scale:8;
} __packed;
+/**
+ * struct ipu3_uapi_anr_stitch_pyramid - ANR stitch pyramid
+ *
+ * @entry0: pyramid LUT entry0, range [0x0, 0x3f]
+ * @entry1: pyramid LUT entry1, range [0x0, 0x3f]
+ * @entry2: pyramid LUT entry2, range [0x0, 0x3f]
+ * @__reserved: reserved
+ */
struct ipu3_uapi_anr_stitch_pyramid {
__u32 entry0:6;
__u32 entry1:6;
@@ -1152,85 +2426,92 @@ struct ipu3_uapi_anr_stitch_pyramid {
__u32 __reserved:14;
} __packed;
+/**
+ * struct ipu3_uapi_anr_stitch_config - ANR stitch config
+ *
+ * @anr_stitch_en: enable stitch. Enabled with 1.
+ * @__reserved: reserved
+ * @pyramid: pyramid table as defined by &ipu3_uapi_anr_stitch_pyramid
+ * default values:
+ * { 1, 3, 5 }, { 7, 7, 5 }, { 3, 1, 3 },
+ * { 9, 15, 21 }, { 21, 15, 9 }, { 3, 5, 15 },
+ * { 25, 35, 35 }, { 25, 15, 5 }, { 7, 21, 35 },
+ * { 49, 49, 35 }, { 21, 7, 7 }, { 21, 35, 49 },
+ * { 49, 35, 21 }, { 7, 5, 15 }, { 25, 35, 35 },
+ * { 25, 15, 5 }, { 3, 9, 15 }, { 21, 21, 15 },
+ * { 9, 3, 1 }, { 3, 5, 7 }, { 7, 5, 3}, { 1 }
+ */
struct ipu3_uapi_anr_stitch_config {
__u32 anr_stitch_en;
- __u16 frame_width;
- __u16 frame_height;
- __u8 __reserved[40];
+ __u8 __reserved[44];
struct ipu3_uapi_anr_stitch_pyramid pyramid[IPU3_UAPI_ANR_PYRAMID_SIZE];
} __packed;
-struct ipu3_uapi_anr_tile2strm_config {
- __u32 enable;
- __u16 frame_width;
- __u16 frame_height;
-} __packed;
-
+/**
+ * struct ipu3_uapi_anr_config - ANR config
+ *
+ * @transform: advanced noise reduction transform config as specified by
+ * &ipu3_uapi_anr_transform_config
+ * @stitch: create 4x4 patch from 4 surrounding 8x8 patches.
+ */
struct ipu3_uapi_anr_config {
- struct ipu3_uapi_anr_search_config search IPU3_ALIGN;
- struct ipu3_uapi_anr_transform_config transform IPU3_ALIGN;
- struct ipu3_uapi_anr_stitch_config stitch IPU3_ALIGN;
- struct ipu3_uapi_anr_tile2strm_config tile2strm IPU3_ALIGN;
-} __packed;
-
-struct ipu3_uapi_awb_fr_config {
- struct ipu3_uapi_awb_fr_config_s config;
-} __packed;
-
-struct ipu3_uapi_ae_weight_elem {
- __u32 cell0:4;
- __u32 cell1:4;
- __u32 cell2:4;
- __u32 cell3:4;
- __u32 cell4:4;
- __u32 cell5:4;
- __u32 cell6:4;
- __u32 cell7:4;
-} __packed;
-
-struct ipu3_uapi_ae_ccm {
- __u16 gain_gr; /* 11 bits */
- __u16 gain_r;
- __u16 gain_b;
- __u16 gain_gb;
- __s16 mat[16];
-} __packed;
-
-struct ipu3_uapi_ae_config {
- struct ipu3_uapi_ae_grid_config grid_cfg IPU3_ALIGN;
- struct ipu3_uapi_ae_weight_elem weights[IPU3_UAPI_AE_WEIGHTS]
- IPU3_ALIGN;
- struct ipu3_uapi_ae_ccm ae_ccm IPU3_ALIGN;
-} __packed;
-
-struct ipu3_uapi_af_config {
- struct ipu3_uapi_af_config_s config;
-} __packed;
-
-struct ipu3_uapi_awb_config {
- struct ipu3_uapi_awb_config_s config IPU3_ALIGN;
+ struct ipu3_uapi_anr_transform_config transform __attribute__((aligned(32)));
+ struct ipu3_uapi_anr_stitch_config stitch __attribute__((aligned(32)));
} __packed;
+/**
+ * struct ipu3_uapi_acc_param - Accelerator cluster parameters
+ *
+ * ACC refers to the HW cluster containing all Fixed Functions(FFs). Each FF
+ * implements a specific algorithm.
+ *
+ * @bnr: parameters for bayer noise reduction static config. See
+ * &ipu3_uapi_bnr_static_config
+ * @green_disparity: disparity static config between gr and gb channel.
+ * See &ipu3_uapi_bnr_static_config_green_disparity
+ * @dm: de-mosaic config. See &ipu3_uapi_dm_config
+ * @ccm: color correction matrix. See &ipu3_uapi_ccm_mat_config
+ * @gamma: gamma correction config. See &ipu3_uapi_gamma_config
+ * @csc: color space conversion matrix. See &ipu3_uapi_csc_mat_config
+ * @cds: color down sample config. See &ipu3_uapi_cds_params
+ * @shd: lens shading correction config. See &ipu3_uapi_shd_config
+ * @iefd: Image enhancement filter and denoise config.
+ * &ipu3_uapi_yuvp1_iefd_config
+ * @yds_c0: y down scaler config. &ipu3_uapi_yuvp1_yds_config
+ * @chnr_c0: chroma noise reduction config. &ipu3_uapi_yuvp1_chnr_config
+ * @y_ee_nr: y edge enhancement and noise reduction config.
+ * &ipu3_uapi_yuvp1_y_ee_nr_config
+ * @yds: y down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @chnr: chroma noise reduction config. See &ipu3_uapi_yuvp1_chnr_config
+ * @__reserved1: reserved
+ * @yds2: y channel down scaler config. See &ipu3_uapi_yuvp1_yds_config
+ * @tcc: total color correction config as defined in struct
+ * &ipu3_uapi_yuvp2_tcc_static_config
+ * @__reserved2: reserved
+ * @anr: advanced noise reduction config.See &ipu3_uapi_anr_config
+ * @awb_fr: AWB filter response config. See ipu3_uapi_awb_fr_config
+ * @ae: auto exposure config As specified by &ipu3_uapi_ae_config
+ * @af: auto focus config. As specified by &ipu3_uapi_af_config
+ * @awb: auto white balance config. As specified by &ipu3_uapi_awb_config
+ */
struct ipu3_uapi_acc_param {
struct ipu3_uapi_bnr_static_config bnr;
- struct ipu3_uapi_bnr_static_config_green_disparity green_disparity
- IPU3_ALIGN;
- struct ipu3_uapi_dm_config dm IPU3_ALIGN;
- struct ipu3_uapi_ccm_mat_config ccm IPU3_ALIGN;
- struct ipu3_uapi_gamma_config gamma IPU3_ALIGN;
- struct ipu3_uapi_csc_mat_config csc IPU3_ALIGN;
- struct ipu3_uapi_cds_params cds IPU3_ALIGN;
- struct ipu3_uapi_shd_config shd IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_iefd_config iefd IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds_c0 IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_chnr_config chnr_c0 IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_chnr_config chnr IPU3_ALIGN;
- struct ipu3_uapi_yuvp2_y_tm_lut_static_config ytm IPU3_ALIGN;
- struct ipu3_uapi_yuvp1_yds_config yds2 IPU3_ALIGN;
- struct ipu3_uapi_yuvp2_tcc_static_config tcc IPU3_ALIGN;
- struct ipu3_uapi_bds_config bds;
+ struct ipu3_uapi_bnr_static_config_green_disparity
+ green_disparity __attribute__((aligned(32)));
+ struct ipu3_uapi_dm_config dm __attribute__((aligned(32)));
+ struct ipu3_uapi_ccm_mat_config ccm __attribute__((aligned(32)));
+ struct ipu3_uapi_gamma_config gamma __attribute__((aligned(32)));
+ struct ipu3_uapi_csc_mat_config csc __attribute__((aligned(32)));
+ struct ipu3_uapi_cds_params cds __attribute__((aligned(32)));
+ struct ipu3_uapi_shd_config shd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_iefd_config iefd __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr_c0 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_y_ee_nr_config y_ee_nr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_chnr_config chnr __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
+ struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
struct ipu3_uapi_anr_config anr;
struct ipu3_uapi_awb_fr_config awb_fr;
struct ipu3_uapi_ae_config ae;
@@ -1238,10 +2519,20 @@ struct ipu3_uapi_acc_param {
struct ipu3_uapi_awb_config awb;
} __packed;
-/* Linearization parameters */
-
-#define IPU3_UAPI_LIN_LUT_SIZE 64
-
+/**
+ * struct ipu3_uapi_isp_lin_vmem_params - Linearization parameters
+ *
+ * @lin_lutlow_gr: linearization look-up table for GR channel interpolation.
+ * @lin_lutlow_r: linearization look-up table for R channel interpolation.
+ * @lin_lutlow_b: linearization look-up table for B channel interpolation.
+ * @lin_lutlow_gb: linearization look-up table for GB channel interpolation.
+ * lin_lutlow_gr / lin_lutlow_gr / lin_lutlow_gr /
+ * lin_lutlow_gr <= LIN_MAX_VALUE - 1.
+ * @lin_lutdif_gr: lin_lutlow_gr[i+1] - lin_lutlow_gr[i].
+ * @lin_lutdif_r: lin_lutlow_r[i+1] - lin_lutlow_r[i].
+ * @lin_lutdif_b: lin_lutlow_b[i+1] - lin_lutlow_b[i].
+ * @lin_lutdif_gb: lin_lutlow_gb[i+1] - lin_lutlow_gb[i].
+ */
struct ipu3_uapi_isp_lin_vmem_params {
__s16 lin_lutlow_gr[IPU3_UAPI_LIN_LUT_SIZE];
__s16 lin_lutlow_r[IPU3_UAPI_LIN_LUT_SIZE];
@@ -1253,10 +2544,18 @@ struct ipu3_uapi_isp_lin_vmem_params {
__s16 lin_lutdif_gb[IPU3_UAPI_LIN_LUT_SIZE];
} __packed;
-/* Temporal Noise Reduction VMEM parameters */
-
-#define IPU3_UAPI_ISP_TNR3_VMEM_LEN 9
+/* Temporal Noise Reduction */
+/**
+ * struct ipu3_uapi_isp_tnr3_vmem_params - Temporal noise reduction vector
+ * memory parameters
+ *
+ * @slope: slope setting in interpolation curve for temporal noise reduction.
+ * @__reserved1: reserved
+ * @sigma: knee point setting in interpolation curve for temporal
+ * noise reduction.
+ * @__reserved2: reserved
+ */
struct ipu3_uapi_isp_tnr3_vmem_params {
__u16 slope[IPU3_UAPI_ISP_TNR3_VMEM_LEN];
__u16 __reserved1[IPU3_UAPI_ISP_VEC_ELEMS
@@ -1266,17 +2565,21 @@ struct ipu3_uapi_isp_tnr3_vmem_params {
- IPU3_UAPI_ISP_TNR3_VMEM_LEN];
} __packed;
-/* XNR3 VMEM parameters */
-
-struct ipu3_uapi_isp_xnr3_vmem_params {
- __u16 x[IPU3_UAPI_ISP_VEC_ELEMS];
- __u16 a[IPU3_UAPI_ISP_VEC_ELEMS];
- __u16 b[IPU3_UAPI_ISP_VEC_ELEMS];
- __u16 c[IPU3_UAPI_ISP_VEC_ELEMS];
-} __packed;
-
-/* TNR3 DMEM parameters */
-
+/**
+ * struct ipu3_uapi_isp_tnr3_params - Temporal noise reduction v3 parameters
+ *
+ * @knee_y1: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y1 are TnrY1_Sigma_Y, U and V.
+ * @knee_y2: Knee point TNR3 assumes standard deviation of Y,U and
+ * V at Y2 are TnrY2_Sigma_Y, U and V.
+ * @maxfb_y: Max feedback gain for Y
+ * @maxfb_u: Max feedback gain for U
+ * @maxfb_v: Max feedback gain for V
+ * @round_adj_y: rounding Adjust for Y
+ * @round_adj_u: rounding Adjust for U
+ * @round_adj_v: rounding Adjust for V
+ * @ref_buf_select: selection of the reference frame buffer to be used.
+ */
struct ipu3_uapi_isp_tnr3_params {
__u32 knee_y1;
__u32 knee_y2;
@@ -1289,8 +2592,35 @@ struct ipu3_uapi_isp_tnr3_params {
__u32 ref_buf_select;
} __packed;
-/* XNR3 DMEM parameters */
+/* Extreme Noise Reduction version 3 */
+/**
+ * struct ipu3_uapi_isp_xnr3_vmem_params - Extreme noise reduction v3
+ * vector memory parameters
+ *
+ * @x: xnr3 parameters.
+ * @a: xnr3 parameters.
+ * @b: xnr3 parameters.
+ * @c: xnr3 parameters.
+ */
+struct ipu3_uapi_isp_xnr3_vmem_params {
+ __u16 x[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 a[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 b[IPU3_UAPI_ISP_VEC_ELEMS];
+ __u16 c[IPU3_UAPI_ISP_VEC_ELEMS];
+} __packed;
+
+/**
+ * struct ipu3_uapi_xnr3_alpha_params - Extreme noise reduction v3
+ * alpha tuning parameters
+ *
+ * @y0: Sigma for Y range similarity in dark area.
+ * @u0: Sigma for U range similarity in dark area.
+ * @v0: Sigma for V range similarity in dark area.
+ * @ydiff: Sigma difference for Y between bright area and dark area.
+ * @udiff: Sigma difference for U between bright area and dark area.
+ * @vdiff: Sigma difference for V between bright area and dark area.
+ */
struct ipu3_uapi_xnr3_alpha_params {
__u32 y0;
__u32 u0;
@@ -1300,6 +2630,15 @@ struct ipu3_uapi_xnr3_alpha_params {
__u32 vdiff;
} __packed;
+/**
+ * struct ipu3_uapi_xnr3_coring_params - Extreme noise reduction v3
+ * coring parameters
+ *
+ * @u0: Coring Threshold of U channel in dark area.
+ * @v0: Coring Threshold of V channel in dark area.
+ * @udiff: Threshold difference of U channel between bright and dark area.
+ * @vdiff: Threshold difference of V channel between bright and dark area.
+ */
struct ipu3_uapi_xnr3_coring_params {
__u32 u0;
__u32 v0;
@@ -1307,10 +2646,23 @@ struct ipu3_uapi_xnr3_coring_params {
__u32 vdiff;
} __packed;
+/**
+ * struct ipu3_uapi_xnr3_blending_params - Blending factor
+ *
+ * @strength: The factor for blending output with input. This is tuning
+ * parameterHigher values lead to more aggressive XNR operation.
+ */
struct ipu3_uapi_xnr3_blending_params {
__u32 strength;
} __packed;
+/**
+ * struct ipu3_uapi_isp_xnr3_params - Extreme noise reduction v3 parameters
+ *
+ * @alpha: parameters for xnr3 alpha. See &ipu3_uapi_xnr3_alpha_params
+ * @coring: parameters for xnr3 coring. See &ipu3_uapi_xnr3_coring_params
+ * @blending: parameters for xnr3 blending. See &ipu3_uapi_xnr3_blending_params
+ */
struct ipu3_uapi_isp_xnr3_params {
struct ipu3_uapi_xnr3_alpha_params alpha;
struct ipu3_uapi_xnr3_coring_params coring;
@@ -1319,6 +2671,17 @@ struct ipu3_uapi_isp_xnr3_params {
/***** Obgrid (optical black level compensation) table entry *****/
+/**
+ * struct ipu3_uapi_obgrid_param - Optical black level compensation parameters
+ *
+ * @gr: Grid table values for color GR
+ * @r: Grid table values for color R
+ * @b: Grid table values for color B
+ * @gb: Grid table values for color GB
+ *
+ * Black level is different for red, green, and blue channels. So black level
+ * compensation is different per channel.
+ */
struct ipu3_uapi_obgrid_param {
__u16 gr;
__u16 r;
@@ -1328,20 +2691,53 @@ struct ipu3_uapi_obgrid_param {
/******************* V4L2_META_FMT_IPU3_PARAMS *******************/
-/*
- * The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS.
- * This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT.
+/**
+ * struct ipu3_uapi_flags - bits to indicate which pipeline needs update
*
- * struct ipu3_uapi_params as defined below contains a lot of parameters and
- * ipu3_uapi_flags selects which parameters to apply.
+ * @gdc: 0 = no update, 1 = update.
+ * @obgrid: 0 = no update, 1 = update.
+ * @__reserved1: Not used.
+ * @acc_bnr: 0 = no update, 1 = update.
+ * @acc_green_disparity: 0 = no update, 1 = update.
+ * @acc_dm: 0 = no update, 1 = update.
+ * @acc_ccm: 0 = no update, 1 = update.
+ * @acc_gamma: 0 = no update, 1 = update.
+ * @acc_csc: 0 = no update, 1 = update.
+ * @acc_cds: 0 = no update, 1 = update.
+ * @acc_shd: 0 = no update, 1 = update.
+ * @__reserved2: Not used.
+ * @acc_iefd: 0 = no update, 1 = update.
+ * @acc_yds_c0: 0 = no update, 1 = update.
+ * @acc_chnr_c0: 0 = no update, 1 = update.
+ * @acc_y_ee_nr: 0 = no update, 1 = update.
+ * @acc_yds: 0 = no update, 1 = update.
+ * @acc_chnr: 0 = no update, 1 = update.
+ * @acc_ytm: 0 = no update, 1 = update.
+ * @acc_yds2: 0 = no update, 1 = update.
+ * @acc_tcc: 0 = no update, 1 = update.
+ * @acc_dpc: 0 = no update, 1 = update.
+ * @acc_bds: 0 = no update, 1 = update.
+ * @acc_anr: 0 = no update, 1 = update.
+ * @acc_awb_fr: 0 = no update, 1 = update.
+ * @acc_ae: 0 = no update, 1 = update.
+ * @acc_af: 0 = no update, 1 = update.
+ * @acc_awb: 0 = no update, 1 = update.
+ * @__acc_osys: 0 = no update, 1 = update.
+ * @__reserved3: Not used.
+ * @lin_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_vmem_params: 0 = no update, 1 = update.
+ * @xnr3_vmem_params: 0 = no update, 1 = update.
+ * @tnr3_dmem_params: 0 = no update, 1 = update.
+ * @xnr3_dmem_params: 0 = no update, 1 = update.
+ * @__reserved4: Not used.
+ * @obgrid_param: 0 = no update, 1 = update.
+ * @__reserved5: Not used.
*/
struct ipu3_uapi_flags {
- __u32 gdc:1; /* Geometric Distortion Correction */
- __u32 obgrid:1; /* Obgrid planes */
+ __u32 gdc:1;
+ __u32 obgrid:1;
__u32 __reserved1:30;
- __u32 __acc_stripe:1; /* Fields beginning with 2 underscore */
- __u32 __acc_input_feeder:1; /* are reserved and must be disabled */
__u32 acc_bnr:1;
__u32 acc_green_disparity:1;
__u32 acc_dm:1;
@@ -1350,8 +2746,7 @@ struct ipu3_uapi_flags {
__u32 acc_csc:1;
__u32 acc_cds:1;
__u32 acc_shd:1;
- __u32 acc_dvs_stat:1;
- __u32 acc_lace_stat:1; /* Local Adpative Contract Enhancement */
+ __u32 __reserved2:2;
__u32 acc_iefd:1;
__u32 acc_yds_c0:1;
__u32 acc_chnr_c0:1;
@@ -1368,36 +2763,65 @@ struct ipu3_uapi_flags {
__u32 acc_ae:1;
__u32 acc_af:1;
__u32 acc_awb:1;
- __u32 __acc_osys:1;
- __u32 __reserved2:3;
+ __u32 __reserved3:4;
__u32 lin_vmem_params:1;
__u32 tnr3_vmem_params:1;
__u32 xnr3_vmem_params:1;
__u32 tnr3_dmem_params:1;
__u32 xnr3_dmem_params:1;
- __u32 __rgbir_dmem_params:1;
+ __u32 __reserved4:1;
__u32 obgrid_param:1;
- __u32 __reserved3:25;
+ __u32 __reserved5:25;
} __packed;
+/**
+ * struct ipu3_uapi_params - V4L2_META_FMT_IPU3_PARAMS
+ *
+ * @use: select which parameters to apply, see &ipu3_uapi_flags
+ * @acc_param: ACC parameters, as specified by &ipu3_uapi_acc_param
+ * @lin_vmem_params: linearization VMEM, as specified by
+ * &ipu3_uapi_isp_lin_vmem_params
+ * @tnr3_vmem_params: tnr3 VMEM as specified by
+ * &ipu3_uapi_isp_tnr3_vmem_params
+ * @xnr3_vmem_params: xnr3 VMEM as specified by
+ * &ipu3_uapi_isp_xnr3_vmem_params
+ * @tnr3_dmem_params: tnr3 DMEM as specified by &ipu3_uapi_isp_tnr3_params
+ * @xnr3_dmem_params: xnr3 DMEM as specified by &ipu3_uapi_isp_xnr3_params
+ * @obgrid_param: obgrid parameters as specified by
+ * &ipu3_uapi_obgrid_param
+ *
+ * The video queue "parameters" is of format V4L2_META_FMT_IPU3_PARAMS.
+ * This is a "single plane" v4l2_meta_format using V4L2_BUF_TYPE_META_OUTPUT.
+ *
+ * struct ipu3_uapi_params as defined below contains a lot of parameters and
+ * ipu3_uapi_flags selects which parameters to apply.
+ */
struct ipu3_uapi_params {
/* Flags which of the settings below are to be applied */
- struct ipu3_uapi_flags use IPU3_ALIGN;
+ struct ipu3_uapi_flags use __attribute__((aligned(32)));
/* Accelerator cluster parameters */
struct ipu3_uapi_acc_param acc_param;
- /* VMEM parameters */
+ /* ISP vector address space parameters */
struct ipu3_uapi_isp_lin_vmem_params lin_vmem_params;
struct ipu3_uapi_isp_tnr3_vmem_params tnr3_vmem_params;
struct ipu3_uapi_isp_xnr3_vmem_params xnr3_vmem_params;
- /* DMEM parameters */
+ /* ISP data memory (DMEM) parameters */
struct ipu3_uapi_isp_tnr3_params tnr3_dmem_params;
struct ipu3_uapi_isp_xnr3_params xnr3_dmem_params;
+ /* Optical black level compensation */
struct ipu3_uapi_obgrid_param obgrid_param;
} __packed;
+/* custom ctrl to set pipe mode */
+#define V4L2_CID_INTEL_IPU3_BASE (V4L2_CID_USER_BASE + 0x10a0)
+#define V4L2_CID_INTEL_IPU3_MODE (V4L2_CID_INTEL_IPU3_BASE + 1)
+enum ipu3_running_mode {
+ IPU3_RUNNING_MODE_VIDEO = 0,
+ IPU3_RUNNING_MODE_STILL = 1,
+};
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index f9f50b7b8c2bfc..d860cdaba238cb 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2396,7 +2396,7 @@ enum nl80211_attrs {
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
-#define NL80211_WIPHY_NAME_MAXLEN 128
+#define NL80211_WIPHY_NAME_MAXLEN 64
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index eb3c786afa7079..723dc1ecd93bb8 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -670,7 +670,10 @@
#define PCI_EXT_CAP_ID_SECPCI 0x19 /* Secondary PCIe Capability */
#define PCI_EXT_CAP_ID_PMUX 0x1A /* Protocol Multiplexing */
#define PCI_EXT_CAP_ID_PASID 0x1B /* Process Address Space ID */
-#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PASID
+#define PCI_EXT_CAP_ID_DPC 0x1D /* Downstream Port Containment */
+#define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */
+#define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */
+#define PCI_EXT_CAP_ID_MAX PCI_EXT_CAP_ID_PTM
#define PCI_EXT_CAP_DSN_SIZEOF 12
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
@@ -946,4 +949,19 @@
#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+/* L1 PM Substates */
+#define PCI_L1SS_CAP 4 /* capability register */
+#define PCI_L1SS_CAP_PCIPM_L1_2 1 /* PCI PM L1.2 Support */
+#define PCI_L1SS_CAP_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
+#define PCI_L1SS_CAP_ASPM_L1_2 4 /* ASPM L1.2 Support */
+#define PCI_L1SS_CAP_ASPM_L1_1 8 /* ASPM L1.1 Support */
+#define PCI_L1SS_CAP_L1_PM_SS 16 /* L1 PM Substates Support */
+#define PCI_L1SS_CTL1 8 /* Control Register 1 */
+#define PCI_L1SS_CTL1_PCIPM_L1_2 1 /* PCI PM L1.2 Enable */
+#define PCI_L1SS_CTL1_PCIPM_L1_1 2 /* PCI PM L1.1 Support */
+#define PCI_L1SS_CTL1_ASPM_L1_2 4 /* ASPM L1.2 Support */
+#define PCI_L1SS_CTL1_ASPM_L1_1 8 /* ASPM L1.1 Support */
+#define PCI_L1SS_CTL1_L1SS_MASK 0x0000000F
+#define PCI_L1SS_CTL2 0xC /* Control Register 2 */
+
#endif /* LINUX_PCI_REGS_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index c0cb2416b02e3c..8c801a43c38a89 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -200,6 +200,18 @@ struct prctl_mm_map {
# define PR_CAP_AMBIENT_LOWER 3
# define PR_CAP_AMBIENT_CLEAR_ALL 4
+/* Per task speculation control */
+#define PR_GET_SPECULATION_CTRL 52
+#define PR_SET_SPECULATION_CTRL 53
+/* Speculation control variants */
+# define PR_SPEC_STORE_BYPASS 0
+/* Return and control values for PR_SET/GET_SPECULATION_CTRL */
+# define PR_SPEC_NOT_AFFECTED 0
+# define PR_SPEC_PRCTL (1UL << 0)
+# define PR_SPEC_ENABLE (1UL << 1)
+# define PR_SPEC_DISABLE (1UL << 2)
+# define PR_SPEC_FORCE_DISABLE (1UL << 3)
+
/* Sets the timerslack for arbitrary threads
* arg2 slack value, 0 means "use default"
* arg3 pid of the thread whose timer slack needs to be set
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 0f238a43ff1e7e..e4acb615792bd5 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -15,7 +15,9 @@
#define SECCOMP_SET_MODE_FILTER 1
/* Valid flags for SECCOMP_SET_MODE_FILTER */
-#define SECCOMP_FILTER_FLAG_TSYNC 1
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+/* In v4.14+ SECCOMP_FILTER_FLAG_LOG is (1UL << 1) */
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
/*
* All BPF programs must return a 32-bit value.
diff --git a/include/uapi/linux/signalfd.h b/include/uapi/linux/signalfd.h
index 492c6def340da6..cdc8e1be35fcf0 100644
--- a/include/uapi/linux/signalfd.h
+++ b/include/uapi/linux/signalfd.h
@@ -34,6 +34,10 @@ struct signalfd_siginfo {
__u64 ssi_stime;
__u64 ssi_addr;
__u16 ssi_addr_lsb;
+ __u16 __pad2;
+ __s32 ssi_syscall;
+ __u64 ssi_call_addr;
+ __u32 ssi_arch;
/*
* Pad strcture to 128 bytes. Remember to update the
@@ -44,7 +48,7 @@ struct signalfd_siginfo {
* comes out of a read(2) and we really don't want to have
* a compat on read(2).
*/
- __u8 __pad[46];
+ __u8 __pad[28];
};
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 25a9ad8bcef124..9de808ebce05f1 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -55,6 +55,7 @@ enum
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
__IPSTATS_MIB_MAX
};
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 6f6d93f6c4063c..05169f08913a97 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -284,9 +284,22 @@ static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor
static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- &desc->baSourceID[desc->bNrInPins + 4] :
- &desc->baSourceID[desc->bNrInPins + 6];
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return &desc->baSourceID[desc->bNrInPins + 4];
+ case UAC_VERSION_2:
+ return &desc->baSourceID[desc->bNrInPins + 6];
+ case UAC_VERSION_3:
+ return &desc->baSourceID[desc->bNrInPins + 2];
+ default:
+ return NULL;
+ }
+}
+
+static inline __u16 uac3_mixer_unit_wClusterDescrID(struct uac_mixer_unit_descriptor *desc)
+{
+ return (desc->baSourceID[desc->bNrInPins + 1] << 8) |
+ desc->baSourceID[desc->bNrInPins];
}
static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc)
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index d4582196255fe1..9ed8763f45a94c 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -583,13 +583,30 @@ enum v4l2_vp8_golden_frame_sel {
#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
-#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR (V4L2_CID_MPEG_BASE+512)
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+enum v4l2_mpeg_video_vp8_profile {
+ V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
+};
+/* Deprecated alias for compatibility reasons. */
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+enum v4l2_mpeg_video_vp9_profile {
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
+};
+
+/* Control IDs not existing in upstream */
+#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HDR (V4L2_CID_MPEG_BASE+590)
-#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR (V4L2_CID_MPEG_BASE+513)
-#define V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM (V4L2_CID_MPEG_BASE+514)
-#define V4L2_CID_MPEG_VIDEO_VP9_ENTROPY (V4L2_CID_MPEG_BASE+515)
+#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_HDR (V4L2_CID_MPEG_BASE+591)
+#define V4L2_CID_MPEG_VIDEO_VP9_DECODE_PARAM (V4L2_CID_MPEG_BASE+592)
+#define V4L2_CID_MPEG_VIDEO_VP9_ENTROPY (V4L2_CID_MPEG_BASE+593)
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
@@ -882,6 +899,9 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_JPEG_ACTIVE_MARKER_DQT (1 << 17)
#define V4L2_JPEG_ACTIVE_MARKER_DHT (1 << 18)
+#define V4L2_CID_JPEG_LUMA_QUANTIZATION (V4L2_CID_JPEG_CLASS_BASE + 5)
+#define V4L2_CID_JPEG_CHROMA_QUANTIZATION (V4L2_CID_JPEG_CLASS_BASE + 6)
+
/* Image source controls */
#define V4L2_CID_IMAGE_SOURCE_CLASS_BASE (V4L2_CTRL_CLASS_IMAGE_SOURCE | 0x900)
@@ -904,7 +924,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_LINK_FREQ (V4L2_CID_IMAGE_PROC_CLASS_BASE + 1)
#define V4L2_CID_PIXEL_RATE (V4L2_CID_IMAGE_PROC_CLASS_BASE + 2)
#define V4L2_CID_TEST_PATTERN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 3)
-
+#define V4L2_CID_DIGITAL_GAIN (V4L2_CID_IMAGE_PROC_CLASS_BASE + 5)
/* DV-class control IDs defined by V4L2 */
#define V4L2_CID_DV_CLASS_BASE (V4L2_CTRL_CLASS_DV | 0x900)
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 0a8b5f2fa1abb3..d1dc9163446377 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -588,6 +588,7 @@ struct v4l2_pix_format {
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
#define V4L2_PIX_FMT_JPEG v4l2_fourcc('J', 'P', 'E', 'G') /* JFIF JPEG */
+#define V4L2_PIX_FMT_JPEG_RAW v4l2_fourcc('J', 'P', 'G', 'R') /* JFIF JPEG RAW without headers */
#define V4L2_PIX_FMT_DV v4l2_fourcc('d', 'v', 's', 'd') /* 1394 */
#define V4L2_PIX_FMT_MPEG v4l2_fourcc('M', 'P', 'E', 'G') /* MPEG-1/2/4 Multiplexed */
#define V4L2_PIX_FMT_H264 v4l2_fourcc('H', '2', '6', '4') /* H264 with start codes */
@@ -1613,7 +1614,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
-#define V4L2_CTRL_FLAG_CAN_STORE 0x0400
+#define V4L2_CTRL_FLAG_CAN_STORE 0x8000
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa54ba4bd..2ad9a6d37ff44b 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -87,7 +87,7 @@ struct dlfb_data {
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
/* remove these once align.h patch is taken into kernel */
diff --git a/init/Kconfig b/init/Kconfig
index 3b2168dec804b7..f9bf7c0064e5e3 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -26,6 +26,16 @@ config IRQ_WORK
config BUILDTIME_EXTABLE_SORT
bool
+config THREAD_INFO_IN_TASK
+ bool
+ help
+ Select this to move thread_info off the stack into task_struct. To
+ make this work, an arch will need to remove all thread_info fields
+ except flags and fix any runtime bugs.
+
+ One subtle change that will be needed is to use try_get_task_stack()
+ and put_task_stack() in save_thread_stack_tsk() and get_wchan().
+
menu "General setup"
config BROKEN
@@ -1380,6 +1390,17 @@ source "usr/Kconfig"
endif
+choice
+ prompt "Compiler optimization level"
+ default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
+
+config CC_OPTIMIZE_FOR_PERFORMANCE
+ bool "Optimize for performance"
+ help
+ This is the default optimization level for the kernel, building
+ with the "-O2" compiler flag for best performance and most
+ helpful compile-time warnings.
+
config CC_OPTIMIZE_FOR_SIZE
bool "Optimize for size"
help
@@ -1388,6 +1409,8 @@ config CC_OPTIMIZE_FOR_SIZE
If unsure, say N.
+endchoice
+
config SYSCTL
bool
@@ -1798,6 +1821,7 @@ choice
config SLAB
bool "SLAB"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
well in all environments. It organizes cache hot objects in
@@ -1805,6 +1829,7 @@ config SLAB
config SLUB
bool "SLUB (Unqueued Allocator)"
+ select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
SLUB is a slab allocator that minimizes cache line usage
instead of managing queues of cached objects (SLAB approach).
diff --git a/init/init_task.c b/init/init_task.c
index 0585e5a4f209c6..43a7366696ac52 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -24,5 +24,8 @@ EXPORT_SYMBOL(init_task);
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data = {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+ INIT_THREAD_INFO(init_task)
+#endif
+};
diff --git a/init/main.c b/init/main.c
index 49926d95442f83..db6b6cbb846bf7 100644
--- a/init/main.c
+++ b/init/main.c
@@ -94,9 +94,6 @@ static int kernel_init(void *);
extern void init_IRQ(void);
extern void fork_init(void);
extern void radix_tree_init(void);
-#ifndef CONFIG_DEBUG_RODATA
-static inline void mark_rodata_ro(void) { }
-#endif
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
@@ -472,7 +469,7 @@ void __init __weak smp_setup_processor_id(void)
}
# if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
{
}
#endif
@@ -650,7 +647,7 @@ asmlinkage __visible void __init start_kernel(void)
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
#endif
- thread_info_cache_init();
+ thread_stack_cache_init();
cred_init();
fork_init();
proc_caches_init();
@@ -931,6 +928,28 @@ static int try_to_run_init_process(const char *init_filename)
static noinline void __init kernel_init_freeable(void);
+#ifdef CONFIG_DEBUG_RODATA
+static bool rodata_enabled = true;
+static int __init set_debug_rodata(char *str)
+{
+ return strtobool(str, &rodata_enabled);
+}
+__setup("rodata=", set_debug_rodata);
+
+static void mark_readonly(void)
+{
+ if (rodata_enabled)
+ mark_rodata_ro();
+ else
+ pr_info("Kernel memory protection disabled.\n");
+}
+#else
+static inline void mark_readonly(void)
+{
+ pr_warn("This architecture does not have kernel memory protection.\n");
+}
+#endif
+
static int __ref kernel_init(void *unused)
{
int ret;
@@ -939,7 +958,7 @@ static int __ref kernel_init(void *unused)
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
free_initmem();
- mark_rodata_ro();
+ mark_readonly();
system_state = SYSTEM_RUNNING;
numa_default_policy();
diff --git a/kernel/Makefile b/kernel/Makefile
index aecd98129c0c2b..2229530ebfb0e5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -62,7 +62,6 @@ endif
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SIG) += module_signing.o
-obj-$(CONFIG_TEST_MODULE) += test_module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
obj-$(CONFIG_KEXEC_CORE) += kexec_core.o
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index a162661c9d60d1..f45a9a5d3e47a3 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -419,6 +419,13 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
struct path parent_path;
int h, ret = 0;
+ /*
+ * When we will be calling audit_add_to_parent, krule->watch might have
+ * been updated and watch might have been freed.
+ * So we need to keep a reference of watch.
+ */
+ audit_get_watch(watch);
+
mutex_unlock(&audit_filter_mutex);
/* Avoid calling path_lookup under audit_filter_mutex. */
@@ -427,8 +434,10 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
/* caller expects mutex locked */
mutex_lock(&audit_filter_mutex);
- if (ret)
+ if (ret) {
+ audit_put_watch(watch);
return ret;
+ }
/* either find an old parent or attach a new one */
parent = audit_find_parent(d_backing_inode(parent_path.dentry));
@@ -446,6 +455,7 @@ int audit_add_watch(struct audit_krule *krule, struct list_head **list)
*list = &audit_inode_hash[h];
error:
path_put(&parent_path);
+ audit_put_watch(watch);
return ret;
}
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index b8ff9e19375361..b57f929f1b4683 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -406,7 +406,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
return -EINVAL;
break;
case AUDIT_EXE:
- if (f->op != Audit_equal)
+ if (f->op != Audit_not_equal && f->op != Audit_equal)
return -EINVAL;
if (entry->rule.listnr != AUDIT_FILTER_EXIT)
return -EINVAL;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 7444f95f3ee92b..0fe8b337291a39 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -470,6 +470,8 @@ static int audit_filter_rules(struct task_struct *tsk,
break;
case AUDIT_EXE:
result = audit_exe_compare(tsk, rule->exe);
+ if (f->op == Audit_not_equal)
+ result = !result;
break;
case AUDIT_UID:
result = audit_uid_comparator(cred->uid, f->op, f->uid);
diff --git a/kernel/bounds.c b/kernel/bounds.c
index e1d1d1952bfa37..c37f68d758db03 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -12,7 +12,7 @@
#include <linux/log2.h>
#include <linux/spinlock_types.h>
-void foo(void)
+int main(void)
{
/* The enum constants to put into include/generated/bounds.h */
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
@@ -22,4 +22,6 @@ void foo(void)
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */
+
+ return 0;
}
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 0eb11b4ac4c7ac..a797f3ea8a360f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -172,7 +172,7 @@ static const struct bpf_map_ops array_ops = {
.map_delete_elem = array_map_delete_elem,
};
-static struct bpf_map_type_list array_type __read_mostly = {
+static struct bpf_map_type_list array_type __ro_after_init = {
.ops = &array_ops,
.type = BPF_MAP_TYPE_ARRAY,
};
@@ -270,9 +270,7 @@ static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd)
static void prog_fd_array_put_ptr(void *ptr)
{
- struct bpf_prog *prog = ptr;
-
- bpf_prog_put_rcu(prog);
+ bpf_prog_put(ptr);
}
/* decrement refcnt of all bpf_progs that are stored in this map */
@@ -296,7 +294,7 @@ static const struct bpf_map_ops prog_array_ops = {
.map_fd_put_ptr = prog_fd_array_put_ptr,
};
-static struct bpf_map_type_list prog_array_type __read_mostly = {
+static struct bpf_map_type_list prog_array_type __ro_after_init = {
.ops = &prog_array_ops,
.type = BPF_MAP_TYPE_PROG_ARRAY,
};
@@ -362,7 +360,7 @@ static const struct bpf_map_ops perf_event_array_ops = {
.map_fd_put_ptr = perf_event_fd_array_put_ptr,
};
-static struct bpf_map_type_list perf_event_array_type __read_mostly = {
+static struct bpf_map_type_list perf_event_array_type __ro_after_init = {
.ops = &perf_event_array_ops,
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
};
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a35abe0482390a..003585c7c5f980 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -375,7 +375,7 @@ static const struct bpf_map_ops htab_ops = {
.map_delete_elem = htab_map_delete_elem,
};
-static struct bpf_map_type_list htab_type __read_mostly = {
+static struct bpf_map_type_list htab_type __ro_after_init = {
.ops = &htab_ops,
.type = BPF_MAP_TYPE_HASH,
};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4b9bbfe764e82a..04fc1022ad9fbd 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -487,7 +487,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
free_uid(user);
}
-static void __prog_put_common(struct rcu_head *rcu)
+static void __bpf_prog_put_rcu(struct rcu_head *rcu)
{
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
@@ -496,17 +496,10 @@ static void __prog_put_common(struct rcu_head *rcu)
bpf_prog_free(aux->prog);
}
-/* version of bpf_prog_put() that is called after a grace period */
-void bpf_prog_put_rcu(struct bpf_prog *prog)
-{
- if (atomic_dec_and_test(&prog->aux->refcnt))
- call_rcu(&prog->aux->rcu, __prog_put_common);
-}
-
void bpf_prog_put(struct bpf_prog *prog)
{
if (atomic_dec_and_test(&prog->aux->refcnt))
- __prog_put_common(&prog->aux->rcu);
+ call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
EXPORT_SYMBOL_GPL(bpf_prog_put);
@@ -514,7 +507,7 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
{
struct bpf_prog *prog = filp->private_data;
- bpf_prog_put_rcu(prog);
+ bpf_prog_put(prog);
return 0;
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 79e3c21a35d07e..c43ca985747981 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -191,6 +191,7 @@ struct bpf_insn_aux_data {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
+ int sanitize_stack_off; /* stack slot to be cleared */
bool seen; /* this insn was processed by the verifier */
};
@@ -569,10 +570,11 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
/* check_stack_read/write functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
-static int check_stack_write(struct verifier_state *state, int off, int size,
- int value_regno)
+static int check_stack_write(struct verifier_env *env,
+ struct verifier_state *state, int off,
+ int size, int value_regno, int insn_idx)
{
- int i;
+ int i, spi = (MAX_BPF_STACK + off) / BPF_REG_SIZE;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
* so it's aligned access and [off, off + size) are within stack limits
*/
@@ -587,15 +589,37 @@ static int check_stack_write(struct verifier_state *state, int off, int size,
}
/* save register state */
- state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
- state->regs[value_regno];
-
- for (i = 0; i < BPF_REG_SIZE; i++)
+ state->spilled_regs[spi] = state->regs[value_regno];
+
+ for (i = 0; i < BPF_REG_SIZE; i++) {
+ if (state->stack_slot_type[MAX_BPF_STACK + off + i] == STACK_MISC &&
+ !env->allow_ptr_leaks) {
+ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off;
+ int soff = (-spi - 1) * BPF_REG_SIZE;
+
+ /* detected reuse of integer stack slot with a pointer
+ * which means either llvm is reusing stack slot or
+ * an attacker is trying to exploit CVE-2018-3639
+ * (speculative store bypass)
+ * Have to sanitize that slot with preemptive
+ * store of zero.
+ */
+ if (*poff && *poff != soff) {
+ /* disallow programs where single insn stores
+ * into two different stack slots, since verifier
+ * cannot sanitize them
+ */
+ verbose("insn %d cannot access two stack slots fp%d and fp%d",
+ insn_idx, *poff, soff);
+ return -EINVAL;
+ }
+ *poff = soff;
+ }
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL;
+ }
} else {
/* regular write of data into stack */
- state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] =
- (struct reg_state) {};
+ state->spilled_regs[spi] = (struct reg_state) {};
for (i = 0; i < size; i++)
state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC;
@@ -696,7 +720,7 @@ static bool is_ctx_reg(struct verifier_env *env, int regno)
* if t==write && value_regno==-1, some unknown value is stored into memory
* if t==read && value_regno==-1, don't care what we read from memory
*/
-static int check_mem_access(struct verifier_env *env, u32 regno, int off,
+static int check_mem_access(struct verifier_env *env, int insn_idx, u32 regno, int off,
int bpf_size, enum bpf_access_type t,
int value_regno)
{
@@ -748,7 +772,8 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
verbose("attempt to corrupt spilled pointer on stack\n");
return -EACCES;
}
- err = check_stack_write(state, off, size, value_regno);
+ err = check_stack_write(env, state, off, size,
+ value_regno, insn_idx);
} else {
err = check_stack_read(state, off, size, value_regno);
}
@@ -760,7 +785,7 @@ static int check_mem_access(struct verifier_env *env, u32 regno, int off,
return err;
}
-static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
+static int check_xadd(struct verifier_env *env, int insn_idx, struct bpf_insn *insn)
{
struct reg_state *regs = env->cur_state.regs;
int err;
@@ -793,13 +818,13 @@ static int check_xadd(struct verifier_env *env, struct bpf_insn *insn)
}
/* check whether atomic_add can read the memory */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
if (err)
return err;
/* check whether atomic_add can write into the same memory */
- return check_mem_access(env, insn->dst_reg, insn->off,
+ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE, -1);
}
@@ -1838,13 +1863,14 @@ static int do_check(struct verifier_env *env)
/* check that memory (src_reg + off) is readable,
* the state of dst_reg will be updated by this func
*/
- err = check_mem_access(env, insn->src_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ,
insn->dst_reg);
if (err)
return err;
- if (BPF_SIZE(insn->code) != BPF_W) {
+ if (BPF_SIZE(insn->code) != BPF_W &&
+ BPF_SIZE(insn->code) != BPF_DW) {
insn_idx++;
continue;
}
@@ -1876,7 +1902,7 @@ static int do_check(struct verifier_env *env)
enum bpf_reg_type *prev_dst_type, dst_reg_type;
if (BPF_MODE(insn->code) == BPF_XADD) {
- err = check_xadd(env, insn);
+ err = check_xadd(env, insn_idx, insn);
if (err)
return err;
insn_idx++;
@@ -1895,7 +1921,7 @@ static int do_check(struct verifier_env *env)
dst_reg_type = regs[insn->dst_reg].type;
/* check that memory (dst_reg + off) is writeable */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
insn->src_reg);
if (err)
@@ -1930,7 +1956,7 @@ static int do_check(struct verifier_env *env)
}
/* check that memory (dst_reg + off) is writeable */
- err = check_mem_access(env, insn->dst_reg, insn->off,
+ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_WRITE,
-1);
if (err)
@@ -2101,7 +2127,7 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
/* hold the map. If the program is rejected by verifier,
* the map will be released by release_maps() or it
* will be used by the valid program until it's unloaded
- * and all maps are released in free_bpf_prog_info()
+ * and all maps are released in free_used_maps()
*/
map = bpf_map_inc(map, false);
if (IS_ERR(map)) {
@@ -2220,13 +2246,43 @@ static int convert_ctx_accesses(struct verifier_env *env)
for (i = 0; i < insn_cnt; i++, insn++) {
u32 cnt;
- if (insn->code == (BPF_LDX | BPF_MEM | BPF_W))
+ if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
- else if (insn->code == (BPF_STX | BPF_MEM | BPF_W))
+ else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
continue;
+ if (type == BPF_WRITE &&
+ env->insn_aux_data[i + delta].sanitize_stack_off) {
+ struct bpf_insn patch[] = {
+ /* Sanitize suspicious stack slot with zero.
+ * There are no memory dependencies for this store,
+ * since it's only using frame pointer and immediate
+ * constant of zero
+ */
+ BPF_ST_MEM(BPF_DW, BPF_REG_FP,
+ env->insn_aux_data[i + delta].sanitize_stack_off,
+ 0),
+ /* the original STX instruction will immediately
+ * overwrite the same stack slot with appropriate value
+ */
+ *insn,
+ };
+
+ cnt = ARRAY_SIZE(patch);
+ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt);
+ if (!new_prog)
+ return -ENOMEM;
+
+ delta += cnt - 1;
+ env->prog = new_prog;
+ insn = new_prog->insnsi + i + delta;
+ continue;
+ }
+
if (env->insn_aux_data[i + delta].ptr_type != PTR_TO_CTX)
continue;
@@ -2487,7 +2543,7 @@ free_log_buf:
vfree(log_buf);
if (!env->prog->aux->used_maps)
/* if we didn't copy map pointers into bpf_prog_info, release
- * them now. Otherwise free_bpf_prog_info() will release them.
+ * them now. Otherwise free_used_maps() will release them.
*/
release_maps(env);
*prog = env->prog;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 4eef29dc76d976..3df3d067753da7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4169,7 +4169,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
*/
do {
css_task_iter_start(&from->self, &it);
- task = css_task_iter_next(&it);
+
+ do {
+ task = css_task_iter_next(&it);
+ } while (task && (task->flags & PF_EXITING));
+
if (task)
get_task_struct(task);
css_task_iter_end(&it);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 62b784dc201b92..7cfe20bde69cd9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -185,10 +185,17 @@ void cpu_hotplug_disable(void)
}
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
+static void __cpu_hotplug_enable(void)
+{
+ if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
+ return;
+ cpu_hotplug_disabled--;
+}
+
void cpu_hotplug_enable(void)
{
cpu_maps_update_begin();
- WARN_ON(--cpu_hotplug_disabled < 0);
+ __cpu_hotplug_enable();
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
@@ -630,7 +637,7 @@ void enable_nonboot_cpus(void)
/* Allow everyone to use the CPU hotplug again */
cpu_maps_update_begin();
- WARN_ON(--cpu_hotplug_disabled < 0);
+ __cpu_hotplug_enable();
if (cpumask_empty(frozen_cpus))
goto out;
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index e1dbf4a2c69e4c..90ff129c88a27c 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -153,13 +153,11 @@ static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
} else {
kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
__func__, bp->bp_addr);
-#ifdef CONFIG_DEBUG_RODATA
if (!bp->bp_type) {
kdb_printf("Software breakpoints are unavailable.\n"
- " Change the kernel CONFIG_DEBUG_RODATA=n\n"
+ " Boot the kernel with rodata=off\n"
" OR use hw breaks: help bph\n");
}
-#endif
return 1;
}
return 0;
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index 3990c1f73e451b..d3c5b15c86c1f3 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -215,7 +215,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
int count;
int i;
int diag, dtab_count;
- int key;
+ int key, buf_size, ret;
static int last_crlf;
diag = kdbgetintenv("DTABCOUNT", &dtab_count);
@@ -343,9 +343,8 @@ poll_again:
else
p_tmp = tmpbuffer;
len = strlen(p_tmp);
- count = kallsyms_symbol_complete(p_tmp,
- sizeof(tmpbuffer) -
- (p_tmp - tmpbuffer));
+ buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer);
+ count = kallsyms_symbol_complete(p_tmp, buf_size);
if (tab == 2 && count > 0) {
kdb_printf("\n%d symbols are found.", count);
if (count > dtab_count) {
@@ -357,9 +356,13 @@ poll_again:
}
kdb_printf("\n");
for (i = 0; i < count; i++) {
- if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
+ ret = kallsyms_symbol_next(p_tmp, i, buf_size);
+ if (WARN_ON(!ret))
break;
- kdb_printf("%s ", p_tmp);
+ if (ret != -E2BIG)
+ kdb_printf("%s ", p_tmp);
+ else
+ kdb_printf("%s... ", p_tmp);
*(p_tmp + len) = '\0';
}
if (i >= dtab_count)
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 75014d7f45681b..533e04e75a9c49 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -83,7 +83,7 @@ typedef struct __ksymtab {
unsigned long sym_start;
unsigned long sym_end;
} kdb_symtab_t;
-extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size);
extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
/* Exported Symbols for kernel loadable modules to use. */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index d35cc2d3a4cc08..61cd704a21c8dd 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -129,13 +129,13 @@ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
}
if (i >= ARRAY_SIZE(kdb_name_table)) {
debug_kfree(kdb_name_table[0]);
- memcpy(kdb_name_table, kdb_name_table+1,
+ memmove(kdb_name_table, kdb_name_table+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-1));
} else {
debug_kfree(knt1);
knt1 = kdb_name_table[i];
- memcpy(kdb_name_table+i, kdb_name_table+i+1,
+ memmove(kdb_name_table+i, kdb_name_table+i+1,
sizeof(kdb_name_table[0]) *
(ARRAY_SIZE(kdb_name_table)-i-1));
}
@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len)
* Parameters:
* prefix_name prefix of a symbol name to lookup
* flag 0 means search from the head, 1 means continue search.
+ * buf_size maximum length that can be written to prefix_name
+ * buffer
* Returns:
* 1 if a symbol matches the given prefix.
* 0 if no string found
*/
-int kallsyms_symbol_next(char *prefix_name, int flag)
+int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size)
{
int prefix_len = strlen(prefix_name);
static loff_t pos;
@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag)
pos = 0;
while ((name = kdb_walk_kallsyms(&pos))) {
- if (strncmp(name, prefix_name, prefix_len) == 0) {
- strncpy(prefix_name, name, strlen(name)+1);
- return 1;
- }
+ if (!strncmp(name, prefix_name, prefix_len))
+ return strscpy(prefix_name, name, buf_size);
}
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 98e68282b5d3fb..ee30dafc4e46ec 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7030,6 +7030,8 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
+ if (event->cpu != smp_processor_id())
+ continue;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
@@ -7151,7 +7153,7 @@ static void perf_event_free_bpf_prog(struct perf_event *event)
prog = event->tp_event->prog;
if (prog && event->tp_event->bpf_prog_owner == event) {
event->tp_event->prog = NULL;
- bpf_prog_put_rcu(prog);
+ bpf_prog_put(prog);
}
}
@@ -8545,6 +8547,7 @@ SYSCALL_DEFINE5(perf_event_open,
f_flags);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
+ event_file = NULL;
goto err_context;
}
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 58013ef228a109..7324d83d6bd84c 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -289,6 +289,13 @@ void *perf_aux_output_begin(struct perf_output_handle *handle,
goto err;
/*
+ * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
+ * the aux buffer is in perf_mmap_close(), about to get freed.
+ */
+ if (!atomic_read(&rb->aux_mmap_count))
+ goto err;
+
+ /*
* Nesting is not supported for AUX area, make sure nested
* writers are caught early
*/
@@ -468,6 +475,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
__free_page(page);
}
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+ int pg;
+
+ if (rb->aux_priv) {
+ rb->free_aux(rb->aux_priv);
+ rb->free_aux = NULL;
+ rb->aux_priv = NULL;
+ }
+
+ if (rb->aux_nr_pages) {
+ for (pg = 0; pg < rb->aux_nr_pages; pg++)
+ rb_free_aux_page(rb, pg);
+
+ kfree(rb->aux_pages);
+ rb->aux_nr_pages = 0;
+ }
+}
+
int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
pgoff_t pgoff, int nr_pages, long watermark, int flags)
{
@@ -556,30 +582,11 @@ out:
if (!ret)
rb->aux_pgoff = pgoff;
else
- rb_free_aux(rb);
+ __rb_free_aux(rb);
return ret;
}
-static void __rb_free_aux(struct ring_buffer *rb)
-{
- int pg;
-
- if (rb->aux_priv) {
- rb->free_aux(rb->aux_priv);
- rb->free_aux = NULL;
- rb->aux_priv = NULL;
- }
-
- if (rb->aux_nr_pages) {
- for (pg = 0; pg < rb->aux_nr_pages; pg++)
- rb_free_aux_page(rb, pg);
-
- kfree(rb->aux_pages);
- rb->aux_nr_pages = 0;
- }
-}
-
void rb_free_aux(struct ring_buffer *rb)
{
if (atomic_dec_and_test(&rb->aux_refcount))
@@ -637,6 +644,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *);
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
+ goto fail;
+
rb = kzalloc(size, GFP_KERNEL);
if (!rb)
goto fail;
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index da0c09ff6112ba..aad43c88a66851 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -299,7 +299,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry:
/* Read the page with vaddr into memory */
- ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
+ ret = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, &vma);
if (ret <= 0)
return ret;
@@ -606,7 +606,7 @@ static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
BUG_ON((uprobe->offset & ~PAGE_MASK) +
UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
- smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
+ smp_wmb(); /* pairs with the smp_rmb() in handle_swbp() */
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
@@ -1700,7 +1700,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
if (likely(result == 0))
goto out;
- result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
+ result = get_user_pages(NULL, mm, vaddr, 1, FOLL_FORCE, &page, NULL);
if (result < 0)
return result;
@@ -1892,10 +1892,18 @@ static void handle_swbp(struct pt_regs *regs)
* After we hit the bp, _unregister + _register can install the
* new and not-yet-analyzed uprobe at the same address, restart.
*/
- smp_rmb(); /* pairs with wmb() in install_breakpoint() */
if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
goto out;
+ /*
+ * Pairs with the smp_wmb() in prepare_uprobe().
+ *
+ * Guarantees that if we see the UPROBE_COPY_INSN bit set, then
+ * we must also see the stores to &uprobe->arch performed by the
+ * prepare_uprobe() call.
+ */
+ smp_rmb();
+
/* Tracing handlers use ->utask to communicate with fetch methods */
if (!get_utask())
goto out;
diff --git a/kernel/exit.c b/kernel/exit.c
index f5f702efb9c46f..a1f7c244ef3afe 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -453,12 +453,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p)
return NULL;
}
-static struct task_struct *find_child_reaper(struct task_struct *father)
+static struct task_struct *find_child_reaper(struct task_struct *father,
+ struct list_head *dead)
__releases(&tasklist_lock)
__acquires(&tasklist_lock)
{
struct pid_namespace *pid_ns = task_active_pid_ns(father);
struct task_struct *reaper = pid_ns->child_reaper;
+ struct task_struct *p, *n;
if (likely(reaper != father))
return reaper;
@@ -474,6 +476,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father)
panic("Attempted to kill init! exitcode=0x%08x\n",
father->signal->group_exit_code ?: father->exit_code);
}
+
+ list_for_each_entry_safe(p, n, dead, ptrace_entry) {
+ list_del_init(&p->ptrace_entry);
+ release_task(p);
+ }
+
zap_pid_ns_processes(pid_ns);
write_lock_irq(&tasklist_lock);
@@ -560,7 +568,7 @@ static void forget_original_parent(struct task_struct *father,
exit_ptrace(father, dead);
/* Can drop and reacquire tasklist_lock */
- reaper = find_child_reaper(father);
+ reaper = find_child_reaper(father, dead);
if (list_empty(&father->children))
return;
diff --git a/kernel/fork.c b/kernel/fork.c
index d92d4c64723848..a823e1648e7643 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -149,18 +149,18 @@ static inline void free_task_struct(struct task_struct *tsk)
}
#endif
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
{
}
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -169,30 +169,31 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
return page ? page_address(page) : NULL;
}
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
{
- kaiser_unmap_thread_stack(ti);
- free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+ struct page *page = virt_to_page(stack);
+ kaiser_unmap_thread_stack(stack);
+ __free_kmem_pages(page, THREAD_SIZE_ORDER);
}
# else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+ return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
{
- kmem_cache_free(thread_info_cache, ti);
+ kmem_cache_free(thread_stack_cache, stack);
}
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
+ BUG_ON(thread_stack_cache == NULL);
}
# endif
#endif
@@ -215,9 +216,9 @@ struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
{
- struct zone *zone = page_zone(virt_to_page(ti));
+ struct zone *zone = page_zone(virt_to_page(stack));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
@@ -225,8 +226,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
- arch_release_thread_info(tsk->stack);
- free_thread_info(tsk->stack);
+ arch_release_thread_stack(tsk->stack);
+ free_thread_stack(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
@@ -339,7 +340,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
- struct thread_info *ti;
+ unsigned long *stack;
int err;
if (node == NUMA_NO_NODE)
@@ -348,19 +349,19 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
if (!tsk)
return NULL;
- ti = alloc_thread_info_node(tsk, node);
- if (!ti)
+ stack = alloc_thread_stack_node(tsk, node);
+ if (!stack)
goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
- goto free_ti;
+ goto free_stack;
- tsk->stack = ti;
+ tsk->stack = stack;
err = kaiser_map_thread_stack(tsk->stack);
if (err)
- goto free_ti;
+ goto free_stack;
#ifdef CONFIG_SECCOMP
/*
* We must handle setting up seccomp filters once we're under
@@ -392,14 +393,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
- account_kernel_stack(ti, 1);
+ account_kernel_stack(stack, 1);
kcov_task_init(tsk);
return tsk;
-free_ti:
- free_thread_info(ti);
+free_stack:
+ free_thread_stack(stack);
free_tsk:
free_task_struct(tsk);
return NULL;
@@ -466,7 +467,12 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
if (retval)
goto fail_nomem_policy;
tmp->vm_mm = mm;
- if (anon_vma_fork(tmp, mpnt))
+ if (tmp->vm_flags & VM_WIPEONFORK) {
+ /* VM_WIPEONFORK gets a clean slate in the child. */
+ tmp->anon_vma = NULL;
+ if (anon_vma_prepare(tmp))
+ goto fail_nomem_anon_vma_fork;
+ } else if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &=
~(VM_LOCKED|VM_LOCKONFAULT|VM_UFFD_MISSING|VM_UFFD_WP);
@@ -512,7 +518,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
rb_parent = &tmp->vm_rb;
mm->map_count++;
- retval = copy_page_range(mm, oldmm, mpnt);
+ if (!(tmp->vm_flags & VM_WIPEONFORK))
+ retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
@@ -1114,7 +1121,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
return -ENOMEM;
atomic_set(&sig->count, 1);
+ spin_lock_irq(&current->sighand->siglock);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
+ spin_unlock_irq(&current->sighand->siglock);
return 0;
}
@@ -1340,6 +1349,18 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (!p)
goto fork_out;
+ /*
+ * This _must_ happen before we call free_task(), i.e. before we jump
+ * to any of the bad_fork_* labels. This is to avoid freeing
+ * p->set_child_tid which is (ab)used as a kthread's data pointer for
+ * kernel threads (PF_KTHREAD).
+ */
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+ */
+ p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
+
ftrace_graph_init_task(p);
rt_mutex_init_task(p);
@@ -1402,8 +1423,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
posix_cpu_timers_init(p);
- p->start_time = ktime_get_ns();
- p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1501,11 +1520,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
}
- p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
- /*
- * Clear TID on mm_release()?
- */
- p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
@@ -1569,6 +1583,17 @@ static struct task_struct *copy_process(unsigned long clone_flags,
goto bad_fork_free_pid;
/*
+ * From this point on we must avoid any synchronous user-space
+ * communication until we take the tasklist-lock. In particular, we do
+ * not want user-space to be able to predict the process start-time by
+ * stalling fork(2) after we recorded the start_time but before it is
+ * visible to the system.
+ */
+
+ p->start_time = ktime_get_ns();
+ p->real_start_time = ktime_get_boot_ns();
+
+ /*
* Make it visible to the rest of the system, but dont wake it up yet.
* Need tasklist lock for parent etc handling!
*/
diff --git a/kernel/futex.c b/kernel/futex.c
index 912b2533a5181d..8041fe3a18dcb0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2923,10 +2923,13 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
- ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
- debug_rt_mutex_free_waiter(&rt_waiter);
+ ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
spin_lock(q.lock_ptr);
+ if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
+ ret = 0;
+
+ debug_rt_mutex_free_waiter(&rt_waiter);
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
@@ -3065,6 +3068,10 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
{
u32 uval, uninitialized_var(nval), mval;
+ /* Futex address must be 32bit aligned */
+ if ((((unsigned long)uaddr) % sizeof(*uaddr)) != 0)
+ return -1;
+
retry:
if (get_user(uval, uaddr))
return -1;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 73a31d99d28bb4..e86abd8f578094 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -30,7 +30,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
* is disabled during the critical section. It also controls the size of
* the RCU grace period. So it needs to be upper-bound.
*/
-#define HUNG_TASK_BATCHING 1024
+#define HUNG_TASK_LOCK_BREAK (HZ / 10)
/*
* Zero means infinite timeout - no checking done:
@@ -160,7 +160,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
static void check_hung_uninterruptible_tasks(unsigned long timeout)
{
int max_count = sysctl_hung_task_check_count;
- int batch_count = HUNG_TASK_BATCHING;
+ unsigned long last_break = jiffies;
struct task_struct *g, *t;
/*
@@ -174,10 +174,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
for_each_process_thread(g, t) {
if (!max_count--)
goto unlock;
- if (!--batch_count) {
- batch_count = HUNG_TASK_BATCHING;
+ if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
if (!rcu_lock_break(g, t))
goto unlock;
+ last_break = jiffies;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e4453d9f788c21..3c74e13a95dc4a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -338,7 +338,6 @@ void handle_nested_irq(unsigned int irq)
raw_spin_lock_irq(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(desc);
action = desc->action;
if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
@@ -346,6 +345,7 @@ void handle_nested_irq(unsigned int irq)
goto out_unlock;
}
+ kstat_incr_irqs_this_cpu(desc);
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock_irq(&desc->lock);
@@ -412,13 +412,13 @@ void handle_simple_irq(struct irq_desc *desc)
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(desc);
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
goto out_unlock;
}
+ kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
out_unlock:
@@ -462,7 +462,6 @@ void handle_level_irq(struct irq_desc *desc)
goto out_unlock;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(desc);
/*
* If its disabled or no action available
@@ -473,6 +472,7 @@ void handle_level_irq(struct irq_desc *desc)
goto out_unlock;
}
+ kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
cond_unmask_irq(desc);
@@ -532,7 +532,6 @@ void handle_fasteoi_irq(struct irq_desc *desc)
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
- kstat_incr_irqs_this_cpu(desc);
/*
* If its disabled or no action available
@@ -544,6 +543,7 @@ void handle_fasteoi_irq(struct irq_desc *desc)
goto out;
}
+ kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 5f55a8bf526405..83cea913983c55 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -864,6 +864,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
local_bh_enable();
return ret;
@@ -880,6 +883,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
irqreturn_t ret;
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
return ret;
}
@@ -957,8 +963,6 @@ static int irq_thread(void *data)
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
@@ -1012,6 +1016,13 @@ static int irq_setup_forced_threading(struct irqaction *new)
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
return 0;
+ /*
+ * No further action required for interrupts which are requested as
+ * threaded interrupts already
+ */
+ if (new->handler == irq_default_primary_handler)
+ return 0;
+
new->flags |= IRQF_ONESHOT;
/*
@@ -1019,7 +1030,7 @@ static int irq_setup_forced_threading(struct irqaction *new)
* thread handler. We force thread them as well by creating a
* secondary action.
*/
- if (new->handler != irq_default_primary_handler && new->thread_fn) {
+ if (new->handler && new->thread_fn) {
/* Allocate the secondary action */
new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!new->secondary)
diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
index 6030efd4a188ae..cd4952d7e2f247 100644
--- a/kernel/kexec_file.c
+++ b/kernel/kexec_file.c
@@ -906,22 +906,8 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min,
if (kexec_purgatory_size <= 0)
return -EINVAL;
- if (kexec_purgatory_size < sizeof(Elf_Ehdr))
- return -ENOEXEC;
-
pi->ehdr = (Elf_Ehdr *)kexec_purgatory;
- if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0
- || pi->ehdr->e_type != ET_REL
- || !elf_check_arch(pi->ehdr)
- || pi->ehdr->e_shentsize != sizeof(Elf_Shdr))
- return -ENOEXEC;
-
- if (pi->ehdr->e_shoff >= kexec_purgatory_size
- || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) >
- kexec_purgatory_size - pi->ehdr->e_shoff))
- return -ENOEXEC;
-
ret = __kexec_load_purgatory(image, min, max, top_down);
if (ret)
return ret;
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 0277d1216f80ae..3a338c6fd54dc0 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -528,7 +528,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
goto out;
INIT_WORK(&sub_info->work, call_usermodehelper_exec_work);
+
+#ifdef CONFIG_STATIC_USERMODEHELPER
+ sub_info->path = CONFIG_STATIC_USERMODEHELPER_PATH;
+#else
sub_info->path = path;
+#endif
sub_info->argv = argv;
sub_info->envp = envp;
@@ -566,6 +571,15 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
retval = -EBUSY;
goto out;
}
+
+ /*
+ * If there is no binary for us to call, then just return and get out of
+ * here. This allows us to set STATIC_USERMODEHELPER_PATH to "" and
+ * disable all call_usermodehelper() calls.
+ */
+ if (strlen(sub_info->path) == 0)
+ goto out;
+
/*
* Set the completion pointer only if there is a waiter.
* This makes it possible to use umh_complete to free
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index bbe9dd0886bdfe..d8daf6c55d2bf3 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -665,9 +665,10 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
}
/* Cancel unoptimizing for reusing */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
{
struct optimized_kprobe *op;
+ int ret;
BUG_ON(!kprobe_unused(ap));
/*
@@ -681,8 +682,12 @@ static void reuse_unused_kprobe(struct kprobe *ap)
/* Enable the probe again */
ap->flags &= ~KPROBE_FLAG_DISABLED;
/* Optimize it again (remove from op->list) */
- BUG_ON(!kprobe_optready(ap));
+ ret = kprobe_optready(ap);
+ if (ret)
+ return ret;
+
optimize_kprobe(ap);
+ return 0;
}
/* Remove optimized instructions */
@@ -894,11 +899,16 @@ static void __disarm_kprobe(struct kprobe *p, bool reopt)
#define kprobe_disarmed(p) kprobe_disabled(p)
#define wait_for_kprobe_optimizer() do {} while (0)
-/* There should be no unused kprobes can be reused without optimization */
-static void reuse_unused_kprobe(struct kprobe *ap)
+static int reuse_unused_kprobe(struct kprobe *ap)
{
+ /*
+ * If the optimized kprobe is NOT supported, the aggr kprobe is
+ * released at the same time that the last aggregated kprobe is
+ * unregistered.
+ * Thus there should be no chance to reuse unused kprobe.
+ */
printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
- BUG_ON(kprobe_unused(ap));
+ return -EINVAL;
}
static void free_aggr_kprobe(struct kprobe *p)
@@ -1276,9 +1286,12 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
goto out;
}
init_aggr_kprobe(ap, orig_p);
- } else if (kprobe_unused(ap))
+ } else if (kprobe_unused(ap)) {
/* This probe is going to die. Rescue it */
- reuse_unused_kprobe(ap);
+ ret = reuse_unused_kprobe(ap);
+ if (ret)
+ goto out;
+ }
if (kprobe_gone(ap)) {
/*
@@ -2441,7 +2454,7 @@ static int __init debugfs_kprobe_init(void)
if (!dir)
return -ENOMEM;
- file = debugfs_create_file("list", 0444, dir, NULL,
+ file = debugfs_create_file("list", 0400, dir, NULL,
&debugfs_kprobes_operations);
if (!file)
goto error;
@@ -2451,7 +2464,7 @@ static int __init debugfs_kprobe_init(void)
if (!file)
goto error;
- file = debugfs_create_file("blacklist", 0444, dir, NULL,
+ file = debugfs_create_file("blacklist", 0400, dir, NULL,
&debugfs_kprobe_blacklist_ops);
if (!file)
goto error;
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index e83b264640615c..e636d2e015a6a1 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -176,7 +176,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute notes_attr = {
+static struct bin_attribute notes_attr __ro_after_init = {
.attr = {
.name = "notes",
.mode = S_IRUGO,
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 850b255649a217..d43f329f312ae8 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -65,7 +65,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
static struct kthread *to_live_kthread(struct task_struct *k)
{
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
- if (likely(vfork))
+ if (likely(vfork) && try_get_task_stack(k))
return __to_kthread(vfork);
return NULL;
}
@@ -313,10 +313,16 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
task = create->result;
if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 };
+ char name[TASK_COMM_LEN];
va_list args;
va_start(args, namefmt);
- vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+ /*
+ * task is already visible to other tasks, so updating
+ * COMM must be protected.
+ */
+ vsnprintf(name, sizeof(name), namefmt, args);
+ set_task_comm(task, name);
va_end(args);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
@@ -427,8 +433,10 @@ void kthread_unpark(struct task_struct *k)
{
struct kthread *kthread = to_live_kthread(k);
- if (kthread)
+ if (kthread) {
__kthread_unpark(k, kthread);
+ put_task_stack(k);
+ }
}
EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -457,6 +465,7 @@ int kthread_park(struct task_struct *k)
wait_for_completion(&kthread->parked);
}
}
+ put_task_stack(k);
ret = 0;
}
return ret;
@@ -492,6 +501,7 @@ int kthread_stop(struct task_struct *k)
__kthread_unpark(k, kthread);
wake_up_process(k);
wait_for_completion(&kthread->exited);
+ put_task_stack(k);
}
ret = k->exit_code;
put_task_struct(k);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 0e2c4911ba6128..a49c565529a02e 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1264,11 +1264,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -1291,11 +1291,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -3314,6 +3314,9 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
unsigned int depth;
int i;
+ if (unlikely(!debug_locks))
+ return 0;
+
depth = curr->lockdep_depth;
/*
* This function is about (re)setting the class of a held lock,
@@ -3826,7 +3829,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat))
+ if (unlikely(!lock_stat || !debug_locks))
return;
if (unlikely(current->lockdep_recursion))
@@ -3846,7 +3849,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- if (unlikely(!lock_stat))
+ if (unlikely(!lock_stat || !debug_locks))
return;
if (unlikely(current->lockdep_recursion))
@@ -4123,7 +4126,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
if (unlikely(!debug_locks))
return;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -4134,7 +4137,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 05a37857ab5516..8d7047ecef4e16 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -104,6 +104,19 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
+
+ /*
+ * osq_lock() unqueue
+ *
+ * node->prev = prev osq_wait_next()
+ * WMB MB
+ * prev->next = node next->prev = prev // unqueue-C
+ *
+ * Here 'node->prev' and 'next->prev' are the same variable and we need
+ * to ensure these stores happen in-order to avoid corrupting the list.
+ */
+ smp_wmb();
+
WRITE_ONCE(prev->next, node);
/*
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b066724d7a5beb..dd173df9ee5e57 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1712,21 +1712,23 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
}
/**
- * rt_mutex_finish_proxy_lock() - Complete lock acquisition
+ * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
* @lock: the rt_mutex we were woken on
* @to: the timeout, null if none. hrtimer should already have
* been started.
* @waiter: the pre-initialized rt_mutex_waiter
*
- * Complete the lock acquisition started our behalf by another thread.
+ * Wait for the the lock acquisition started on our behalf by
+ * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
+ * rt_mutex_cleanup_proxy_lock().
*
* Returns:
* 0 - success
* <0 - error, one of -EINTR, -ETIMEDOUT
*
- * Special API call for PI-futex requeue support
+ * Special API call for PI-futex support
*/
-int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
+int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -1739,9 +1741,6 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
/* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
@@ -1752,3 +1751,42 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
return ret;
}
+
+/**
+ * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
+ * @lock: the rt_mutex we were woken on
+ * @waiter: the pre-initialized rt_mutex_waiter
+ *
+ * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ *
+ * Unless we acquired the lock; we're still enqueued on the wait-list and can
+ * in fact still be granted ownership until we're removed. Therefore we can
+ * find we are in fact the owner and must disregard the
+ * rt_mutex_wait_proxy_lock() failure.
+ *
+ * Returns:
+ * true - did the cleanup, we done.
+ * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
+ * caller should disregards its return value.
+ *
+ * Special API call for PI-futex support
+ */
+bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter)
+{
+ bool cleanup = false;
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ /*
+ * Unless we're the owner; we're still enqueued on the wait_list.
+ * So check if we became owner, if not, take us off the wait_list.
+ */
+ if (rt_mutex_owner(lock) != current) {
+ remove_waiter(lock, waiter);
+ fixup_rt_mutex_waiters(lock);
+ cleanup = true;
+ }
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return cleanup;
+}
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index e317e1cbb3eba8..6f8f68edb700c3 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -106,9 +106,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *to,
- struct rt_mutex_waiter *waiter);
+extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *to,
+ struct rt_mutex_waiter *waiter);
+extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter);
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wqh);
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index a4d4de05b2d167..1be33caf157d26 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -511,6 +511,33 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
unsigned long flags;
/*
+ * __rwsem_down_write_failed_common(sem)
+ * rwsem_optimistic_spin(sem)
+ * osq_unlock(sem->osq)
+ * ...
+ * atomic_long_add_return(&sem->count)
+ *
+ * - VS -
+ *
+ * __up_write()
+ * if (atomic_long_sub_return_release(&sem->count) < 0)
+ * rwsem_wake(sem)
+ * osq_is_locked(&sem->osq)
+ *
+ * And __up_write() must observe !osq_is_locked() when it observes the
+ * atomic_long_add_return() in order to not miss a wakeup.
+ *
+ * This boils down to:
+ *
+ * [S.rel] X = 1 [RmW] r0 = (Y += 0)
+ * MB RMB
+ * [RmW] Y += 1 [L] r1 = X
+ *
+ * exists (r0=1 /\ r1=0)
+ */
+ smp_rmb();
+
+ /*
* If a spinner is present, it is not necessary to do the wakeup.
* Try to do wakeup only if the trylock succeeds to minimize
* spinlock contention which may introduce too much delay in the
diff --git a/kernel/memremap.c b/kernel/memremap.c
index d1199615a7997b..756818f1d11828 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -181,15 +181,12 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
struct page_map *page_map;
int error, nid;
- if (is_ram == REGION_MIXED) {
- WARN_ONCE(1, "%s attempted on mixed region %pr\n",
- __func__, res);
+ if (is_ram != REGION_DISJOINT) {
+ WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
+ is_ram == REGION_MIXED ? "mixed" : "ram", res);
return ERR_PTR(-ENXIO);
}
- if (is_ram == REGION_INTERSECTS)
- return __va(res->start);
-
page_map = devres_alloc_node(devm_memremap_pages_release,
sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
if (!page_map)
@@ -212,5 +209,5 @@ void *devm_memremap_pages(struct device *dev, struct resource *res)
devres_add(dev, page_map);
return __va(res->start);
}
-EXPORT_SYMBOL(devm_memremap_pages);
+EXPORT_SYMBOL_GPL(devm_memremap_pages);
#endif /* CONFIG_ZONE_DEVICE */
diff --git a/kernel/module.c b/kernel/module.c
index aa81f41f2b19ce..bcc78f4c15e9e9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3860,7 +3860,7 @@ static unsigned long mod_find_symname(struct module *mod, const char *name)
for (i = 0; i < kallsyms->num_symtab; i++)
if (strcmp(name, symname(kallsyms, i)) == 0 &&
- kallsyms->symtab[i].st_info != 'U')
+ kallsyms->symtab[i].st_shndx != SHN_UNDEF)
return kallsyms->symtab[i].st_value;
return 0;
}
@@ -3906,6 +3906,10 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < kallsyms->num_symtab; i++) {
+
+ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ continue;
+
ret = fn(data, symname(kallsyms, i),
mod, kallsyms->symtab[i].st_value);
if (ret != 0)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 6d6f63be1f9b48..4335e7d1c39159 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -115,6 +115,7 @@ config PM_SLEEP
def_bool y
depends on SUSPEND || HIBERNATE_CALLBACKS
select PM
+ select SRCU
config PM_SLEEP_SMP
def_bool y
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 12cd989dadf639..160e1006640d58 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -37,6 +37,14 @@
#define HIBERNATE_SIG "S1SUSPEND"
/*
+ * When reading an {un,}compressed image, we may restore pages in place,
+ * in which case some architectures need these pages cleaning before they
+ * can be executed. We don't know which pages these may be, so clean the lot.
+ */
+static bool clean_pages_on_read;
+static bool clean_pages_on_decompress;
+
+/*
* The swap map is a data structure used for keeping track of each page
* written to a swap partition. It consists of many swap_map_page
* structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
if (bio_data_dir(bio) == WRITE)
put_page(page);
+ else if (clean_pages_on_read)
+ flush_icache_range((unsigned long)page_address(page),
+ (unsigned long)page_address(page) + PAGE_SIZE);
if (bio->bi_error && !hb->error)
hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
hib_init_batch(&hb);
+ clean_pages_on_read = true;
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
nr_to_read);
m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
d->unc_len = LZO_UNC_SIZE;
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
d->unc, &d->unc_len);
+ if (clean_pages_on_decompress)
+ flush_icache_range((unsigned long)d->unc,
+ (unsigned long)d->unc + d->unc_len);
+
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
}
memset(crc, 0, offsetof(struct crc_data, go));
+ clean_pages_on_decompress = true;
+
/*
* Start the decompression threads.
*/
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 526e8911460a0b..f83c1876b39c0d 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -184,6 +184,11 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
res = PAGE_SIZE - pg_offp;
}
+ if (!data_of(data->handle)) {
+ res = -EINVAL;
+ goto unlock;
+ }
+
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
buf, count);
if (res > 0)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 1a698158face22..62ffd417622c98 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -885,7 +885,12 @@ static void __init log_buf_len_update(unsigned size)
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
- unsigned size = memparse(str, &str);
+ unsigned int size;
+
+ if (!str)
+ return -EINVAL;
+
+ size = memparse(str, &str);
log_buf_len_update(size);
@@ -3176,9 +3181,8 @@ void show_regs_print_info(const char *log_lvl)
{
dump_stack_print_info(log_lvl);
- printk("%stask: %p ti: %p task.ti: %p\n",
- log_lvl, current, current_thread_info(),
- task_thread_info(current));
+ printk("%stask: %p task.stack: %p\n",
+ log_lvl, current, task_stack_page(current));
}
#endif
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 8a62cbfe1f2ff8..082aedefe29c42 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1611,15 +1611,23 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
}
/*
- * Awaken the grace-period kthread for the specified flavor of RCU.
- * Don't do a self-awaken, and don't bother awakening when there is
- * nothing for the grace-period kthread to do (as in several CPUs
- * raced to awaken, and we lost), and finally don't try to awaken
- * a kthread that has not yet been created.
+ * Awaken the grace-period kthread. Don't do a self-awaken (unless in
+ * an interrupt or softirq handler), and don't bother awakening when there
+ * is nothing for the grace-period kthread to do (as in several CPUs raced
+ * to awaken, and we lost), and finally don't try to awaken a kthread that
+ * has not yet been created. If all those checks are passed, track some
+ * debug information and awaken.
+ *
+ * So why do the self-wakeup when in an interrupt or softirq handler
+ * in the grace-period kthread's context? Because the kthread might have
+ * been interrupted just as it was going to sleep, and just after the final
+ * pre-sleep check of the awaken condition. In this case, a wakeup really
+ * is required, and is therefore supplied.
*/
static void rcu_gp_kthread_wake(struct rcu_state *rsp)
{
- if (current == rsp->gp_kthread ||
+ if ((current == rsp->gp_kthread &&
+ !in_interrupt() && !in_serving_softirq()) ||
!READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
@@ -3817,7 +3825,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
continue;
rdp = per_cpu_ptr(rsp->rda, cpu);
pr_cont(" %d-%c%c%c", cpu,
- "O."[cpu_online(cpu)],
+ "O."[!!cpu_online(cpu)],
"o."[!!(rdp->grpmask & rnp->expmaskinit)],
"N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 05070811c25f97..1391c7e9451bfd 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3959,8 +3959,8 @@ static int __sched_setscheduler(struct task_struct *p,
struct rq *rq;
int reset_on_fork;
- /* may grab non-irq protected spin_locks */
- BUG_ON(in_interrupt());
+ /* The pi code expects interrupts enabled */
+ BUG_ON(pi && in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0) {
@@ -8151,11 +8151,9 @@ void sched_destroy_group(struct task_group *tg)
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
- int i;
/* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
+ unregister_fair_sched_group(tg);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6afcfd2cfbc625..ec1d35928f7e9e 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3664,9 +3664,13 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
/*
* Add to the _head_ of the list, so that an already-started
- * distribute_cfs_runtime will not see us
+ * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
+ * not running add to the tail so that later runqueues don't get starved.
*/
- list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ if (cfs_b->distribute_running)
+ list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ else
+ list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
/*
* If we're the first throttled task, make sure the bandwidth
@@ -3809,14 +3813,16 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
* in us over-using our runtime if it is all used during this loop, but
* only by limited amounts in that extreme case.
*/
- while (throttled && cfs_b->runtime > 0) {
+ while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
runtime = cfs_b->runtime;
+ cfs_b->distribute_running = 1;
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
runtime_expires);
raw_spin_lock(&cfs_b->lock);
+ cfs_b->distribute_running = 0;
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
cfs_b->runtime -= min(runtime, cfs_b->runtime);
@@ -3927,6 +3933,11 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
/* confirm we're still not at a refresh boundary */
raw_spin_lock(&cfs_b->lock);
+ if (cfs_b->distribute_running) {
+ raw_spin_unlock(&cfs_b->lock);
+ return;
+ }
+
if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
raw_spin_unlock(&cfs_b->lock);
return;
@@ -3936,6 +3947,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
runtime = cfs_b->runtime;
expires = cfs_b->runtime_expires;
+ if (runtime)
+ cfs_b->distribute_running = 1;
+
raw_spin_unlock(&cfs_b->lock);
if (!runtime)
@@ -3946,6 +3960,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
cfs_b->runtime -= min(runtime, cfs_b->runtime);
+ cfs_b->distribute_running = 0;
raw_spin_unlock(&cfs_b->lock);
}
@@ -4057,6 +4072,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
cfs_b->period_timer.function = sched_cfs_period_timer;
hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cfs_b->slack_timer.function = sched_cfs_slack_timer;
+ cfs_b->distribute_running = 0;
}
static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
@@ -9294,11 +9310,8 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se) {
- if (tg->se[i])
- remove_entity_load_avg(tg->se[i]);
+ if (tg->se)
kfree(tg->se[i]);
- }
}
kfree(tg->cfs_rq);
@@ -9346,21 +9359,29 @@ err:
return 0;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ struct rq *rq;
+ int cpu;
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
+ for_each_possible_cpu(cpu) {
+ if (tg->se[cpu])
+ remove_entity_load_avg(tg->se[cpu]);
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ continue;
+
+ rq = cpu_rq(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -9442,7 +9463,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 917c94abf5bbae..cc0975f94a8ded 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -133,7 +133,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
*/
static void cpuidle_idle_call(void)
{
- struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
+ struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index daf7fa728f1869..92e0049027e0c8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -233,6 +233,8 @@ struct cfs_bandwidth {
/* statistics */
int nr_periods, nr_throttled;
u64 throttled_time;
+
+ bool distribute_running;
#endif
};
@@ -308,7 +310,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
@@ -1023,7 +1025,11 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
* per-task data have been completed by this moment.
*/
smp_wmb();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ p->cpu = cpu;
+#else
task_thread_info(p)->cpu = cpu;
+#endif
p->wake_cpu = cpu;
#endif
}
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index f613491b4ffd09..3ad40c7115bf23 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -17,6 +17,8 @@
#include <linux/audit.h>
#include <linux/compat.h>
#include <linux/coredump.h>
+#include <linux/nospec.h>
+#include <linux/prctl.h>
#include <linux/sched.h>
#include <linux/seccomp.h>
#include <linux/slab.h>
@@ -215,8 +217,11 @@ static inline bool seccomp_may_assign_mode(unsigned long seccomp_mode)
return true;
}
+void __weak arch_seccomp_spec_mitigate(struct task_struct *task) { }
+
static inline void seccomp_assign_mode(struct task_struct *task,
- unsigned long seccomp_mode)
+ unsigned long seccomp_mode,
+ unsigned long flags)
{
assert_spin_locked(&task->sighand->siglock);
@@ -226,6 +231,9 @@ static inline void seccomp_assign_mode(struct task_struct *task,
* filter) is set.
*/
smp_mb__before_atomic();
+ /* Assume default seccomp processes want spec flaw mitigation. */
+ if ((flags & SECCOMP_FILTER_FLAG_SPEC_ALLOW) == 0)
+ arch_seccomp_spec_mitigate(task);
set_tsk_thread_flag(task, TIF_SECCOMP);
}
@@ -293,7 +301,7 @@ static inline pid_t seccomp_can_sync_threads(void)
* without dropping the locks.
*
*/
-static inline void seccomp_sync_threads(void)
+static inline void seccomp_sync_threads(unsigned long flags)
{
struct task_struct *thread, *caller;
@@ -334,7 +342,8 @@ static inline void seccomp_sync_threads(void)
* allow one thread to transition the other.
*/
if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
- seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
+ seccomp_assign_mode(thread, SECCOMP_MODE_FILTER,
+ flags);
}
}
@@ -453,7 +462,7 @@ static long seccomp_attach_filter(unsigned int flags,
/* Now that the new filter is in place, synchronize to all threads. */
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
- seccomp_sync_threads();
+ seccomp_sync_threads(flags);
return 0;
}
@@ -763,7 +772,7 @@ static long seccomp_set_mode_strict(void)
#ifdef TIF_NOTSC
disable_TSC();
#endif
- seccomp_assign_mode(current, seccomp_mode);
+ seccomp_assign_mode(current, seccomp_mode, 0);
ret = 0;
out:
@@ -821,7 +830,7 @@ static long seccomp_set_mode_filter(unsigned int flags,
/* Do not free the successfully attached filter. */
prepared = NULL;
- seccomp_assign_mode(current, seccomp_mode);
+ seccomp_assign_mode(current, seccomp_mode, flags);
out:
spin_unlock_irq(&current->sighand->siglock);
if (flags & SECCOMP_FILTER_FLAG_TSYNC)
diff --git a/kernel/signal.c b/kernel/signal.c
index 8bfbc47f0a2307..96e8c3cbfa38a6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -696,6 +696,48 @@ static inline bool si_fromuser(const struct siginfo *info)
(!is_si_special(info) && SI_FROMUSER(info));
}
+static int dequeue_synchronous_signal(siginfo_t *info)
+{
+ struct task_struct *tsk = current;
+ struct sigpending *pending = &tsk->pending;
+ struct sigqueue *q, *sync = NULL;
+
+ /*
+ * Might a synchronous signal be in the queue?
+ */
+ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+ return 0;
+
+ /*
+ * Return the first synchronous signal in the queue.
+ */
+ list_for_each_entry(q, &pending->list, list) {
+ /* Synchronous signals have a postive si_code */
+ if ((q->info.si_code > SI_USER) &&
+ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+ sync = q;
+ goto next;
+ }
+ }
+ return 0;
+next:
+ /*
+ * Check if there is another siginfo for the same signal.
+ */
+ list_for_each_entry_continue(q, &pending->list, list) {
+ if (q->info.si_signo == sync->info.si_signo)
+ goto still_pending;
+ }
+
+ sigdelset(&pending->signal, sync->info.si_signo);
+ recalc_sigpending();
+still_pending:
+ list_del_init(&sync->list);
+ copy_siginfo(info, &sync->info);
+ __sigqueue_free(sync);
+ return info->si_signo;
+}
+
/*
* called with RCU read lock from check_kill_permission()
*/
@@ -991,7 +1033,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
result = TRACE_SIGNAL_IGNORED;
if (!prepare_signal(sig, t,
- from_ancestor_ns || (info == SEND_SIG_FORCED)))
+ from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
goto ret;
pending = group ? &t->signal->shared_pending : &t->pending;
@@ -2198,6 +2240,14 @@ relock:
goto relock;
}
+ /* Has this task already been marked for death? */
+ if (signal_group_exit(signal)) {
+ ksig->info.si_signo = signr = SIGKILL;
+ sigdelset(&current->pending.signal, SIGKILL);
+ recalc_sigpending();
+ goto fatal;
+ }
+
for (;;) {
struct k_sigaction *ka;
@@ -2211,7 +2261,15 @@ relock:
goto relock;
}
- signr = dequeue_signal(current, &current->blocked, &ksig->info);
+ /*
+ * Signals generated by the execution of an instruction
+ * need to be delivered before any other pending signals
+ * so that the instruction pointer in the signal stack
+ * frame points to the faulting instruction.
+ */
+ signr = dequeue_synchronous_signal(&ksig->info);
+ if (!signr)
+ signr = dequeue_signal(current, &current->blocked, &ksig->info);
if (!signr)
break; /* will return 0 */
@@ -2293,6 +2351,7 @@ relock:
continue;
}
+ fatal:
spin_unlock_irq(&sighand->siglock);
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index 064874ec58fc37..c7ef4c7a940920 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1147,18 +1147,19 @@ static int override_release(char __user *release, size_t len)
SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
{
- int errno = 0;
+ struct new_utsname tmp;
down_read(&uts_sem);
- if (copy_to_user(name, utsname(), sizeof *name))
- errno = -EFAULT;
+ memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!errno && override_release(name->release, sizeof(name->release)))
- errno = -EFAULT;
- if (!errno && override_architecture(name))
- errno = -EFAULT;
- return errno;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ if (override_architecture(name))
+ return -EFAULT;
+ return 0;
}
#ifdef __ARCH_WANT_SYS_OLD_UNAME
@@ -1167,55 +1168,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
*/
SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
{
- int error = 0;
+ struct old_utsname tmp;
if (!name)
return -EFAULT;
down_read(&uts_sem);
- if (copy_to_user(name, utsname(), sizeof(*name)))
- error = -EFAULT;
+ memcpy(&tmp, utsname(), sizeof(tmp));
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!error && override_release(name->release, sizeof(name->release)))
- error = -EFAULT;
- if (!error && override_architecture(name))
- error = -EFAULT;
- return error;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ if (override_architecture(name))
+ return -EFAULT;
+ return 0;
}
SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
{
- int error;
+ struct oldold_utsname tmp = {};
if (!name)
return -EFAULT;
- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
- return -EFAULT;
down_read(&uts_sem);
- error = __copy_to_user(&name->sysname, &utsname()->sysname,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->release, &utsname()->release,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->release + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->version, &utsname()->version,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->version + __OLD_UTS_LEN);
- error |= __copy_to_user(&name->machine, &utsname()->machine,
- __OLD_UTS_LEN);
- error |= __put_user(0, name->machine + __OLD_UTS_LEN);
+ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
+ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
+ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
+ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
+ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
up_read(&uts_sem);
+ if (copy_to_user(name, &tmp, sizeof(tmp)))
+ return -EFAULT;
- if (!error && override_architecture(name))
- error = -EFAULT;
- if (!error && override_release(name->release, sizeof(name->release)))
- error = -EFAULT;
- return error ? -EFAULT : 0;
+ if (override_architecture(name))
+ return -EFAULT;
+ if (override_release(name->release, sizeof(name->release)))
+ return -EFAULT;
+ return 0;
}
#endif
@@ -1229,17 +1221,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
- down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- struct new_utsname *u = utsname();
+ struct new_utsname *u;
+ down_write(&uts_sem);
+ u = utsname();
memcpy(u->nodename, tmp, len);
memset(u->nodename + len, 0, sizeof(u->nodename) - len);
errno = 0;
uts_proc_notify(UTS_PROC_HOSTNAME);
+ up_write(&uts_sem);
}
- up_write(&uts_sem);
return errno;
}
@@ -1247,8 +1240,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
{
- int i, errno;
+ int i;
struct new_utsname *u;
+ char tmp[__NEW_UTS_LEN + 1];
if (len < 0)
return -EINVAL;
@@ -1257,11 +1251,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
i = 1 + strlen(u->nodename);
if (i > len)
i = len;
- errno = 0;
- if (copy_to_user(name, u->nodename, i))
- errno = -EFAULT;
+ memcpy(tmp, u->nodename, i);
up_read(&uts_sem);
- return errno;
+ if (copy_to_user(name, tmp, i))
+ return -EFAULT;
+ return 0;
}
#endif
@@ -1280,17 +1274,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
- down_write(&uts_sem);
errno = -EFAULT;
if (!copy_from_user(tmp, name, len)) {
- struct new_utsname *u = utsname();
+ struct new_utsname *u;
+ down_write(&uts_sem);
+ u = utsname();
memcpy(u->domainname, tmp, len);
memset(u->domainname + len, 0, sizeof(u->domainname) - len);
errno = 0;
uts_proc_notify(UTS_PROC_DOMAINNAME);
+ up_write(&uts_sem);
}
- up_write(&uts_sem);
return errno;
}
@@ -2227,6 +2222,17 @@ static int prctl_set_vma(unsigned long opt, unsigned long start,
}
#endif
+int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
+{
+ return -EINVAL;
+}
+
+int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
+ unsigned long ctrl)
+{
+ return -EINVAL;
+}
+
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
@@ -2448,6 +2454,16 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
case PR_GET_FP_MODE:
error = GET_FP_MODE(me);
break;
+ case PR_GET_SPECULATION_CTRL:
+ if (arg3 || arg4 || arg5)
+ return -EINVAL;
+ error = arch_prctl_spec_ctrl_get(me, arg2);
+ break;
+ case PR_SET_SPECULATION_CTRL:
+ if (arg4 || arg5)
+ return -EINVAL;
+ error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
+ break;
case PR_SET_VMA:
error = prctl_set_vma(arg2, arg3, arg4, arg5);
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4c6d2b62836181..bfa018abe4799c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -404,7 +404,8 @@ static struct ctl_table kern_table[] = {
.data = &sysctl_sched_time_avg,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
},
{
.procname = "sched_shares_window_ns",
@@ -1849,6 +1850,24 @@ static struct ctl_table fs_table[] = {
.extra2 = &one,
},
{
+ .procname = "protected_fifos",
+ .data = &sysctl_protected_fifos,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
+ {
+ .procname = "protected_regular",
+ .data = &sysctl_protected_regular,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &two,
+ },
+ {
.procname = "suid_dumpable",
.data = &suid_dumpable,
.maxlen = sizeof(int),
@@ -2421,7 +2440,16 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,
{
struct do_proc_dointvec_minmax_conv_param *param = data;
if (write) {
- int val = *negp ? -*lvalp : *lvalp;
+ int val;
+ if (*negp) {
+ if (*lvalp > (unsigned long) INT_MAX + 1)
+ return -EINVAL;
+ val = -*lvalp;
+ } else {
+ if (*lvalp > (unsigned long) INT_MAX)
+ return -EINVAL;
+ val = *lvalp;
+ }
if ((param->min && *param->min > val) ||
(param->max && *param->max < val))
return -EINVAL;
diff --git a/kernel/test_module.c b/kernel/test_module.c
deleted file mode 100644
index f825589315d7dc..00000000000000
--- a/kernel/test_module.c
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * "hello world" kernel module
- */
-
-#define pr_fmt(fmt) "test_module: " fmt
-
-#include <linux/module.h>
-
-static int __init test_module_init(void)
-{
- pr_info("Hello, world\n");
-
- return 0;
-}
-
-module_init(test_module_init);
-
-static void __exit test_module_exit(void)
-{
- pr_info("Goodbye\n");
-}
-
-module_exit(test_module_exit);
-
-MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
-MODULE_LICENSE("GPL");
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8f4c9f933270d3..14f1193035307f 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -789,7 +789,8 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now = alarm_bases[type].gettime();
- exp = ktime_add(now, exp);
+
+ exp = ktime_add_safe(now, exp);
}
if (alarmtimer_do_nsleep(&alarm, exp))
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 80016b329d9442..8fc68e60c7951d 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -103,7 +103,7 @@ static void bump_cpu_timer(struct k_itimer *timer,
continue;
timer->it.cpu.expires += incr;
- timer->it_overrun += 1 << i;
+ timer->it_overrun += 1LL << i;
delta -= incr;
}
}
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index c215f995dc4b5d..f6f410b09058e1 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -355,6 +355,17 @@ static __init int init_posix_timers(void)
__initcall(init_posix_timers);
+/*
+ * The siginfo si_overrun field and the return value of timer_getoverrun(2)
+ * are of type int. Clamp the overrun value to INT_MAX
+ */
+static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
+{
+ s64 sum = timr->it_overrun_last + (s64)baseval;
+
+ return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
+}
+
static void schedule_next_timer(struct k_itimer *timr)
{
struct hrtimer *timer = &timr->it.real.timer;
@@ -362,12 +373,11 @@ static void schedule_next_timer(struct k_itimer *timr)
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += (unsigned int) hrtimer_forward(timer,
- timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
- timr->it_overrun = -1;
+ timr->it_overrun = -1LL;
++timr->it_requeue_pending;
hrtimer_restart(timer);
}
@@ -396,7 +406,7 @@ void do_schedule_next_timer(struct siginfo *info)
else
schedule_next_timer(timr);
- info->si_overrun += timr->it_overrun_last;
+ info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
}
if (timr)
@@ -491,8 +501,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun += (unsigned int)
- hrtimer_forward(timer, now,
+ timr->it_overrun += hrtimer_forward(timer, now,
timr->it.real.interval);
ret = HRTIMER_RESTART;
++timr->it_requeue_pending;
@@ -633,7 +642,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
it_id_set = IT_ID_SET;
new_timer->it_id = (timer_t) new_timer_id;
new_timer->it_clock = which_clock;
- new_timer->it_overrun = -1;
+ new_timer->it_overrun = -1LL;
if (timer_event_spec) {
if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
@@ -762,7 +771,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
*/
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
timr->it_sigev_notify == SIGEV_NONE))
- timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
+ timr->it_overrun += hrtimer_forward(timer, now, iv);
remaining = __hrtimer_expires_remaining_adjusted(timer, now);
/* Return 0 only, when the timer is expired and not pending */
@@ -824,7 +833,7 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
if (!timr)
return -EINVAL;
- overrun = timr->it_overrun_last;
+ overrun = timer_overrun_to_int(timr, 0);
unlock_timer(timr, flags);
return overrun;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e5d228f7224c91..5ad2e852e9f6ef 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -570,7 +570,7 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
static inline bool local_timer_softirq_pending(void)
{
- return local_softirq_pending() & TIMER_SOFTIRQ;
+ return local_softirq_pending() & BIT(TIMER_SOFTIRQ);
}
static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 54e1edeeb66e91..f5337329a859ea 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -28,6 +28,7 @@
*/
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/timekeeper_internal.h>
@@ -259,9 +260,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+ return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+ HZ_TO_MSEC_SHR32;
# else
- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+ return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b5e6934db39363..a9237aa87e209b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -39,7 +39,9 @@
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
-} tk_core ____cacheline_aligned;
+} tk_core ____cacheline_aligned = {
+ .seq = SEQCNT_ZERO(tk_core.seq),
+};
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index ef4f16e81283df..1407ed20ea9395 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -399,7 +399,7 @@ static int __init init_timer_list_procfs(void)
{
struct proc_dir_entry *pe;
- pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
if (!pe)
return -ENOMEM;
return 0;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 6107b59f8e0140..2dc97df8a7c6cc 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1710,6 +1710,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&bdev->bd_mutex);
if (attr == &dev_attr_enable) {
+ if (!!value == !!q->blk_trace) {
+ ret = 0;
+ goto out_unlock_bdev;
+ }
if (value)
ret = blk_trace_setup_queue(q, bdev);
else
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 4228fd3682c3d4..38fe8e476658b4 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -119,11 +119,13 @@ static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
i++;
} else if (fmt[i] == 'p' || fmt[i] == 's') {
mod[fmt_cnt]++;
- i++;
- if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
+ /* disallow any further format extensions */
+ if (fmt[i + 1] != 0 &&
+ !isspace(fmt[i + 1]) &&
+ !ispunct(fmt[i + 1]))
return -EINVAL;
fmt_cnt++;
- if (fmt[i - 1] == 's') {
+ if (fmt[i] == 's') {
if (str_seen)
/* allow only one '%s' per fmt string */
return -EINVAL;
@@ -321,7 +323,7 @@ static struct bpf_verifier_ops kprobe_prog_ops = {
.is_valid_access = kprobe_prog_is_valid_access,
};
-static struct bpf_prog_type_list kprobe_tl = {
+static struct bpf_prog_type_list kprobe_tl __ro_after_init = {
.ops = &kprobe_prog_ops,
.type = BPF_PROG_TYPE_KPROBE,
};
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ac758a53fcea55..d90b42b3990816 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4767,6 +4767,7 @@ void ftrace_destroy_filter_files(struct ftrace_ops *ops)
if (ops->flags & FTRACE_OPS_FL_ENABLED)
ftrace_shutdown(ops, 0);
ops->flags |= FTRACE_OPS_FL_DELETED;
+ ftrace_free_filter(ops);
mutex_unlock(&ftrace_lock);
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d9cd6191760b14..74b20e3ab8c6c6 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1513,6 +1513,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
@@ -3142,6 +3144,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
}
/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+ return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
+/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to.
* @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 68a2eb3212cad1..a771abd08d70ee 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1088,6 +1088,12 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&tr->max_lock);
+ /* Inherit the recordable setting from trace_buffer */
+ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+ ring_buffer_record_on(tr->max_buffer.buffer);
+ else
+ ring_buffer_record_off(tr->max_buffer.buffer);
+
buf = tr->trace_buffer.buffer;
tr->trace_buffer.buffer = tr->max_buffer.buffer;
tr->max_buffer.buffer = buf;
@@ -1796,7 +1802,17 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
{
__buffer_unlock_commit(buffer, event);
- ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
+ /*
+ * If regs is not set, then skip the following callers:
+ * trace_buffer_unlock_commit_regs
+ * event_trigger_unlock_commit
+ * trace_event_buffer_commit
+ * trace_event_raw_event_sched_switch
+ * Note, we can still get here via blktrace, wakeup tracer
+ * and mmiotrace, but that's ok if they lose a function or
+ * two. They are that meaningful.
+ */
+ ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
ftrace_trace_userstack(buffer, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
@@ -1855,6 +1871,13 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
trace.skip = skip;
/*
+ * Add two, for this function and the call to save_stack_trace()
+ * If regs is set, then these functions will not be in the way.
+ */
+ if (!regs)
+ trace.skip += 2;
+
+ /*
* Since events can happen in NMIs there's no safe way to
* use the per cpu ftrace_stacks. We reserve it and if an interrupt
* or NMI comes in, it will just have to use the default
@@ -2225,6 +2248,7 @@ out:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
+__printf(3, 0)
static int
__trace_array_vprintk(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, va_list args)
@@ -2275,12 +2299,14 @@ __trace_array_vprintk(struct ring_buffer *buffer,
return len;
}
+__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
}
+__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
@@ -2296,6 +2322,7 @@ int trace_array_printk(struct trace_array *tr,
return ret;
}
+__printf(3, 4)
int trace_array_printk_buf(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, ...)
{
@@ -2311,6 +2338,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
return ret;
}
+__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -4769,7 +4797,6 @@ out:
return ret;
fail:
- kfree(iter->trace);
kfree(iter);
__trace_array_put(tr);
mutex_unlock(&trace_types_lock);
@@ -6636,7 +6663,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
if (buffer) {
mutex_lock(&trace_types_lock);
- if (val) {
+ if (!!val == tracer_tracing_is_on(tr)) {
+ val = 0; /* do nothing */
+ } else if (val) {
tracer_tracing_on(tr);
if (tr->current_trace->start)
tr->current_trace->start(tr);
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 42a4009fd75ada..8be66a2b0cacfd 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -469,9 +469,10 @@ clear_event_triggers(struct trace_array *tr)
struct trace_event_file *file;
list_for_each_entry(file, &tr->events, list) {
- struct event_trigger_data *data;
- list_for_each_entry_rcu(data, &file->triggers, list) {
+ struct event_trigger_data *data, *n;
+ list_for_each_entry_safe(data, n, &file->triggers, list) {
trace_event_trigger_enable_disable(file, 0);
+ list_del_rcu(&data->list);
if (data->ops->free)
data->ops->free(data->ops, data);
}
@@ -662,6 +663,8 @@ event_trigger_callback(struct event_command *cmd_ops,
goto out_free;
out_reg:
+ /* Up the trigger_data count to make sure reg doesn't free it on failure */
+ event_trigger_init(trigger_ops, trigger_data);
ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
/*
* The above returns on success the # of functions enabled,
@@ -669,11 +672,13 @@ event_trigger_callback(struct event_command *cmd_ops,
* Consider no functions a failure too.
*/
if (!ret) {
+ cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
ret = -ENOENT;
- goto out_free;
- } else if (ret < 0)
- goto out_free;
- ret = 0;
+ } else if (ret > 0)
+ ret = 0;
+
+ /* Down the counter of trigger_data or free it if not used anymore */
+ event_trigger_free(trigger_ops, trigger_data);
out:
return ret;
@@ -722,8 +727,10 @@ static int set_trigger_filter(char *filter_str,
/* The filter is for the 'trigger' event, not the triggered event */
ret = create_event_filter(file->event_call, filter_str, false, &filter);
- if (ret)
- goto out;
+ /*
+ * If create_event_filter() fails, filter still needs to be freed.
+ * Which the calling code will do with data->filter.
+ */
assign:
tmp = rcu_access_pointer(data->filter);
@@ -1226,6 +1233,9 @@ event_enable_trigger_func(struct event_command *cmd_ops,
goto out;
}
+ /* Up the trigger_data count to make sure nothing frees it on failure */
+ event_trigger_init(trigger_ops, trigger_data);
+
if (trigger) {
number = strsep(&trigger, ":");
@@ -1276,6 +1286,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
goto out_disable;
/* Just return zero, not the number of enabled functions */
ret = 0;
+ event_trigger_free(trigger_ops, trigger_data);
out:
return ret;
@@ -1286,7 +1297,7 @@ event_enable_trigger_func(struct event_command *cmd_ops,
out_free:
if (cmd_ops->set_filter)
cmd_ops->set_filter(NULL, trigger_data, NULL);
- kfree(trigger_data);
+ event_trigger_free(trigger_ops, trigger_data);
kfree(enable_data);
goto out;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index a804ee1b3ec683..55002f20950f87 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -774,6 +774,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
+ int cpu = iter->cpu;
int i;
graph_ret = &ret_entry->ret;
@@ -782,7 +783,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
if (data) {
struct fgraph_cpu_data *cpu_data;
- int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
@@ -812,6 +812,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
trace_seq_printf(s, "%ps();\n", (void *)call->func);
+ print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
+ cpu, iter->ent->pid, flags);
+
return trace_handle_return(s);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index f2682799c2151c..f0ee722be520ec 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -349,11 +349,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
static int
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
{
+ struct event_file_link *link = NULL;
int ret = 0;
if (file) {
- struct event_file_link *link;
-
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
ret = -ENOMEM;
@@ -373,6 +372,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
else
ret = enable_kprobe(&tk->rp.kp);
}
+
+ if (ret) {
+ if (file) {
+ /* Notice the if is true on not WARN() */
+ if (!WARN_ON_ONCE(!link))
+ list_del_rcu(&link->list);
+ kfree(link);
+ tk->tp.flags &= ~TP_FLAG_TRACE;
+ } else {
+ tk->tp.flags &= ~TP_FLAG_PROFILE;
+ }
+ }
out:
return ret;
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 68bb89ad9d2811..518e62a398d28d 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -150,7 +150,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
ret = strncpy_from_user(dst, src, maxlen);
if (ret == maxlen)
- dst[--ret] = '\0';
+ dst[ret - 1] = '\0';
+ else if (ret >= 0)
+ /*
+ * Include the terminating null byte. In this case it
+ * was copied by strncpy_from_user but not accounted
+ * for in ret.
+ */
+ ret++;
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
@@ -969,7 +976,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file)
list_del_rcu(&link->list);
/* synchronize with u{,ret}probe_trace_func */
- synchronize_sched();
+ synchronize_rcu();
kfree(link);
if (!list_empty(&tu->tp.files))
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d3eb3b87e4d528..a4d69899e24787 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -602,9 +602,26 @@ static ssize_t map_write(struct file *file, const char __user *buf,
struct uid_gid_map new_map;
unsigned idx;
struct uid_gid_extent *extent = NULL;
- unsigned long page = 0;
+ unsigned long page;
char *kbuf, *pos, *next_line;
- ssize_t ret = -EINVAL;
+ ssize_t ret;
+
+ /* Only allow < page size writes at the beginning of the file */
+ if ((*ppos != 0) || (count >= PAGE_SIZE))
+ return -EINVAL;
+
+ /* Get a buffer */
+ page = __get_free_page(GFP_TEMPORARY);
+ kbuf = (char *) page;
+ if (!page)
+ return -ENOMEM;
+
+ /* Slurp in the user data */
+ if (copy_from_user(kbuf, buf, count)) {
+ free_page(page);
+ return -EFAULT;
+ }
+ kbuf[count] = '\0';
/*
* The userns_state_mutex serializes all writes to any given map.
@@ -638,24 +655,6 @@ static ssize_t map_write(struct file *file, const char __user *buf,
if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
goto out;
- /* Get a buffer */
- ret = -ENOMEM;
- page = __get_free_page(GFP_TEMPORARY);
- kbuf = (char *) page;
- if (!page)
- goto out;
-
- /* Only allow < page size writes at the beginning of the file */
- ret = -EINVAL;
- if ((*ppos != 0) || (count >= PAGE_SIZE))
- goto out;
-
- /* Slurp in the user data */
- ret = -EFAULT;
- if (copy_from_user(kbuf, buf, count))
- goto out;
- kbuf[count] = '\0';
-
/* Parse the user data */
ret = -EINVAL;
pos = kbuf;
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c8eac43267e90d..d2b3b2973456e8 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -17,7 +17,7 @@
#ifdef CONFIG_PROC_SYSCTL
-static void *get_uts(struct ctl_table *table, int write)
+static void *get_uts(struct ctl_table *table)
{
char *which = table->data;
struct uts_namespace *uts_ns;
@@ -25,21 +25,9 @@ static void *get_uts(struct ctl_table *table, int write)
uts_ns = current->nsproxy->uts_ns;
which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
- if (!write)
- down_read(&uts_sem);
- else
- down_write(&uts_sem);
return which;
}
-static void put_uts(struct ctl_table *table, int write, void *which)
-{
- if (!write)
- up_read(&uts_sem);
- else
- up_write(&uts_sem);
-}
-
/*
* Special case of dostring for the UTS structure. This has locks
* to observe. Should this be in kernel/sys.c ????
@@ -49,13 +37,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
{
struct ctl_table uts_table;
int r;
+ char tmp_data[__NEW_UTS_LEN + 1];
+
memcpy(&uts_table, table, sizeof(uts_table));
- uts_table.data = get_uts(table, write);
+ uts_table.data = tmp_data;
+
+ /*
+ * Buffer the value in tmp_data so that proc_dostring() can be called
+ * without holding any locks.
+ * We also need to read the original value in the write==1 case to
+ * support partial writes.
+ */
+ down_read(&uts_sem);
+ memcpy(tmp_data, get_uts(table), sizeof(tmp_data));
+ up_read(&uts_sem);
r = proc_dostring(&uts_table, write, buffer, lenp, ppos);
- put_uts(table, write, uts_table.data);
- if (write)
+ if (write) {
+ /*
+ * Write back the new value.
+ * Note that, since we dropped uts_sem, the result can
+ * theoretically be incorrect if there are two parallel writes
+ * at non-zero offsets to the same sysctl.
+ */
+ down_write(&uts_sem);
+ memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
+ up_write(&uts_sem);
proc_sys_poll_notify(table->poll);
+ }
return r;
}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 82e5668e8c03f7..d8fe141c743638 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1892,14 +1892,6 @@ config TEST_STATIC_KEYS
If unsure, say N.
-config TEST_MODULE
- tristate "Test module loading with 'hello world' module"
- depends on m
- help
- This builds the "test_module" module that emits "Hello, world"
- on printk when loaded. It is designed to be used for basic
- evaluation of the module loading subsystem.
-
source "samples/Kconfig"
source "lib/Kconfig.kgdb"
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 5cd093589c5af2..3b46c5433b7acd 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -781,9 +781,11 @@ all_leaves_cluster_together:
new_s0->index_key[i] =
ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE);
- blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
- pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
- new_s0->index_key[keylen - 1] &= ~blank;
+ if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) {
+ blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
+ pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank);
+ new_s0->index_key[keylen - 1] &= ~blank;
+ }
/* This now reduces to a node splitting exercise for which we'll need
* to regenerate the disparity table.
diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c
index 83c33a5bcffb1a..de67fea3cf46cc 100644
--- a/lib/atomic64_test.c
+++ b/lib/atomic64_test.c
@@ -16,6 +16,10 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#ifdef CONFIG_X86
+#include <asm/cpufeature.h> /* for boot_cpu_has below */
+#endif
+
#define TEST(bit, op, c_op, val) \
do { \
atomic##bit##_set(&v, v0); \
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 814814397cce39..ef54c0f13fe06f 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -12,9 +12,9 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
#include <linux/bug.h>
+#include <linux/uaccess.h>
#include <asm/page.h>
-#include <asm/uaccess.h>
/*
* bitmaps provide an array of bits, implemented using an an
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 96c4c633d95e68..124fdf238b3d95 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -37,7 +37,7 @@ EXPORT_SYMBOL_GPL(debug_locks_silent);
*/
int debug_locks_off(void)
{
- if (__debug_locks_off()) {
+ if (debug_locks && __debug_locks_off()) {
if (!debug_locks_silent) {
console_verbose();
return 1;
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 547f7f923dbcbd..bb37541cd44148 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -295,9 +295,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
limit++;
if (is_on_stack)
- pr_warn("object is on stack, but not annotated\n");
+ pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
+ task_stack_page(current));
else
- pr_warn("object is not on stack, but annotated\n");
+ pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
+ task_stack_page(current));
+
WARN_ON(1);
}
@@ -1085,7 +1088,8 @@ void __init debug_objects_mem_init(void)
obj_cache = kmem_cache_create("debug_objects_cache",
sizeof (struct debug_obj), 0,
- SLAB_DEBUG_OBJECTS, NULL);
+ SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
+ NULL);
if (!obj_cache || debug_objects_replace_static_objects()) {
debug_objects_enabled = 0;
diff --git a/lib/int_sqrt.c b/lib/int_sqrt.c
index 1ef4cc344977cd..1afb545a37c5e3 100644
--- a/lib/int_sqrt.c
+++ b/lib/int_sqrt.c
@@ -22,6 +22,9 @@ unsigned long int_sqrt(unsigned long x)
return x;
m = 1UL << (BITS_PER_LONG - 2);
+ while (m > x)
+ m >>= 2;
+
while (m != 0) {
b = y + m;
y >>= 1;
diff --git a/lib/interval_tree_test.c b/lib/interval_tree_test.c
index 245900b98c8e43..222c8010bda017 100644
--- a/lib/interval_tree_test.c
+++ b/lib/interval_tree_test.c
@@ -1,27 +1,38 @@
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/interval_tree.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <asm/timex.h>
-#define NODES 100
-#define PERF_LOOPS 100000
-#define SEARCHES 100
-#define SEARCH_LOOPS 10000
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+__param(int, nnodes, 100, "Number of nodes in the interval tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the tree");
+
+__param(int, nsearches, 100, "Number of searches to the interval tree");
+__param(int, search_loops, 1000, "Number of iterations searching the tree");
+__param(bool, search_all, false, "Searches will iterate all nodes in the tree");
+
+__param(uint, max_endpoint, ~0, "Largest value for the interval's endpoint");
static struct rb_root root = RB_ROOT;
-static struct interval_tree_node nodes[NODES];
-static u32 queries[SEARCHES];
+static struct interval_tree_node *nodes = NULL;
+static u32 *queries = NULL;
static struct rnd_state rnd;
static inline unsigned long
-search(unsigned long query, struct rb_root *root)
+search(struct rb_root *root, unsigned long start, unsigned long last)
{
struct interval_tree_node *node;
unsigned long results = 0;
- for (node = interval_tree_iter_first(root, query, query); node;
- node = interval_tree_iter_next(node, query, query))
+ for (node = interval_tree_iter_first(root, start, last); node;
+ node = interval_tree_iter_next(node, start, last))
results++;
return results;
}
@@ -29,19 +40,22 @@ search(unsigned long query, struct rb_root *root)
static void init(void)
{
int i;
- for (i = 0; i < NODES; i++) {
- u32 a = prandom_u32_state(&rnd);
- u32 b = prandom_u32_state(&rnd);
- if (a <= b) {
- nodes[i].start = a;
- nodes[i].last = b;
- } else {
- nodes[i].start = b;
- nodes[i].last = a;
- }
+
+ for (i = 0; i < nnodes; i++) {
+ u32 b = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
+ u32 a = (prandom_u32_state(&rnd) >> 4) % b;
+
+ nodes[i].start = a;
+ nodes[i].last = b;
}
- for (i = 0; i < SEARCHES; i++)
- queries[i] = prandom_u32_state(&rnd);
+
+ /*
+ * Limit the search scope to what the user defined.
+ * Otherwise we are merely measuring empty walks,
+ * which is pointless.
+ */
+ for (i = 0; i < nsearches; i++)
+ queries[i] = (prandom_u32_state(&rnd) >> 4) % max_endpoint;
}
static int interval_tree_test_init(void)
@@ -50,6 +64,16 @@ static int interval_tree_test_init(void)
unsigned long results;
cycles_t time1, time2, time;
+ nodes = kmalloc(nnodes * sizeof(struct interval_tree_node), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
+
+ queries = kmalloc(nsearches * sizeof(int), GFP_KERNEL);
+ if (!queries) {
+ kfree(nodes);
+ return -ENOMEM;
+ }
+
printk(KERN_ALERT "interval tree insert/remove");
prandom_seed_state(&rnd, 3141592653589793238ULL);
@@ -57,39 +81,46 @@ static int interval_tree_test_init(void)
time1 = get_cycles();
- for (i = 0; i < PERF_LOOPS; i++) {
- for (j = 0; j < NODES; j++)
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
interval_tree_remove(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, PERF_LOOPS);
+ time = div_u64(time, perf_loops);
printk(" -> %llu cycles\n", (unsigned long long)time);
printk(KERN_ALERT "interval tree search");
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
interval_tree_insert(nodes + j, &root);
time1 = get_cycles();
results = 0;
- for (i = 0; i < SEARCH_LOOPS; i++)
- for (j = 0; j < SEARCHES; j++)
- results += search(queries[j], &root);
+ for (i = 0; i < search_loops; i++)
+ for (j = 0; j < nsearches; j++) {
+ unsigned long start = search_all ? 0 : queries[j];
+ unsigned long last = search_all ? max_endpoint : queries[j];
+
+ results += search(&root, start, last);
+ }
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, SEARCH_LOOPS);
- results = div_u64(results, SEARCH_LOOPS);
+ time = div_u64(time, search_loops);
+ results = div_u64(results, search_loops);
printk(" -> %llu cycles (%lu results)\n",
(unsigned long long)time, results);
+ kfree(queries);
+ kfree(nodes);
+
return -EAGAIN; /* Fail will directly unload the module */
}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 5323b59ca3936d..b9462037868d0d 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -84,7 +84,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
if (ioremap_pmd_enabled() &&
((next - addr) == PMD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
- pmd_free_pte_page(pmd)) {
+ pmd_free_pte_page(pmd, addr)) {
if (pmd_set_huge(pmd, phys_addr + addr, prot))
continue;
}
@@ -111,7 +111,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
if (ioremap_pud_enabled() &&
((next - addr) == PUD_SIZE) &&
IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
- pud_free_pmd_page(pud)) {
+ pud_free_pmd_page(pud, addr)) {
if (pud_set_huge(pud, phys_addr + addr, prot))
continue;
}
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5d84c534..f6b547812fe3de 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@ struct klist_node *klist_prev(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@ struct klist_node *klist_prev(struct klist_iter *i)
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@ struct klist_node *klist_next(struct klist_iter *i)
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@ struct klist_node *klist_next(struct klist_iter *i)
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/kobject.c b/lib/kobject.c
index 895edb63fba4a3..35d490b02cddd1 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -127,7 +127,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
- strncpy(path + length, kobject_name(parent), cur);
+ memcpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c
index 236eb21167b5db..fa8d4ff38531b3 100644
--- a/lib/lzo/lzo1x_compress.c
+++ b/lib/lzo/lzo1x_compress.c
@@ -20,7 +20,7 @@
static noinline size_t
lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
unsigned char *out, size_t *out_len,
- size_t ti, void *wrkmem)
+ size_t ti, void *wrkmem, signed char *state_offset)
{
const unsigned char *ip;
unsigned char *op;
@@ -38,30 +38,87 @@ lzo1x_1_do_compress(const unsigned char *in, size_t in_len,
const unsigned char *m_pos;
size_t t, m_len, m_off;
u32 dv;
+ u32 run_length = 0;
literal:
ip += 1 + ((ip - ii) >> 5);
next:
if (unlikely(ip >= ip_end))
break;
dv = get_unaligned_le32(ip);
- t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
- m_pos = in + dict[t];
- dict[t] = (lzo_dict_t) (ip - in);
- if (unlikely(dv != get_unaligned_le32(m_pos)))
- goto literal;
+
+ if (dv == 0) {
+ const unsigned char *ir = ip + 4;
+ const unsigned char *limit = ip_end
+ < (ip + MAX_ZERO_RUN_LENGTH + 1)
+ ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1;
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ defined(LZO_FAST_64BIT_MEMORY_ACCESS)
+ u64 dv64;
+
+ for (; (ir + 32) <= limit; ir += 32) {
+ dv64 = get_unaligned((u64 *)ir);
+ dv64 |= get_unaligned((u64 *)ir + 1);
+ dv64 |= get_unaligned((u64 *)ir + 2);
+ dv64 |= get_unaligned((u64 *)ir + 3);
+ if (dv64)
+ break;
+ }
+ for (; (ir + 8) <= limit; ir += 8) {
+ dv64 = get_unaligned((u64 *)ir);
+ if (dv64) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctzll(dv64) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clzll(dv64) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+#else
+ while ((ir < (const unsigned char *)
+ ALIGN((uintptr_t)ir, 4)) &&
+ (ir < limit) && (*ir == 0))
+ ir++;
+ for (; (ir + 4) <= limit; ir += 4) {
+ dv = *((u32 *)ir);
+ if (dv) {
+# if defined(__LITTLE_ENDIAN)
+ ir += __builtin_ctz(dv) >> 3;
+# elif defined(__BIG_ENDIAN)
+ ir += __builtin_clz(dv) >> 3;
+# else
+# error "missing endian definition"
+# endif
+ break;
+ }
+ }
+#endif
+ while (likely(ir < limit) && unlikely(*ir == 0))
+ ir++;
+ run_length = ir - ip;
+ if (run_length > MAX_ZERO_RUN_LENGTH)
+ run_length = MAX_ZERO_RUN_LENGTH;
+ } else {
+ t = ((dv * 0x1824429d) >> (32 - D_BITS)) & D_MASK;
+ m_pos = in + dict[t];
+ dict[t] = (lzo_dict_t) (ip - in);
+ if (unlikely(dv != get_unaligned_le32(m_pos)))
+ goto literal;
+ }
ii -= ti;
ti = 0;
t = ip - ii;
if (t != 0) {
if (t <= 3) {
- op[-2] |= t;
+ op[*state_offset] |= t;
COPY4(op, ii);
op += t;
} else if (t <= 16) {
*op++ = (t - 3);
- COPY8(op, ii);
- COPY8(op + 8, ii + 8);
+ COPY16(op, ii);
op += t;
} else {
if (t <= 18) {
@@ -76,8 +133,7 @@ next:
*op++ = tt;
}
do {
- COPY8(op, ii);
- COPY8(op + 8, ii + 8);
+ COPY16(op, ii);
op += 16;
ii += 16;
t -= 16;
@@ -88,6 +144,17 @@ next:
}
}
+ if (unlikely(run_length)) {
+ ip += run_length;
+ run_length -= MIN_ZERO_RUN_LENGTH;
+ put_unaligned_le32((run_length << 21) | 0xfffc18
+ | (run_length & 0x7), op);
+ op += 4;
+ run_length = 0;
+ *state_offset = -3;
+ goto finished_writing_instruction;
+ }
+
m_len = 4;
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(LZO_USE_CTZ64)
@@ -170,7 +237,6 @@ m_len_done:
m_off = ip - m_pos;
ip += m_len;
- ii = ip;
if (m_len <= M2_MAX_LEN && m_off <= M2_MAX_OFFSET) {
m_off -= 1;
*op++ = (((m_len - 1) << 5) | ((m_off & 7) << 2));
@@ -207,6 +273,9 @@ m_len_done:
*op++ = (m_off << 2);
*op++ = (m_off >> 6);
}
+ *state_offset = -2;
+finished_writing_instruction:
+ ii = ip;
goto next;
}
*out_len = op - out;
@@ -221,6 +290,12 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
unsigned char *op = out;
size_t l = in_len;
size_t t = 0;
+ signed char state_offset = -2;
+
+ // LZO v0 will never write 17 as first byte,
+ // so this is used to version the bitstream
+ *op++ = 17;
+ *op++ = LZO_VERSION;
while (l > 20) {
size_t ll = l <= (M4_MAX_OFFSET + 1) ? l : (M4_MAX_OFFSET + 1);
@@ -229,7 +304,8 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
break;
BUILD_BUG_ON(D_SIZE * sizeof(lzo_dict_t) > LZO1X_1_MEM_COMPRESS);
memset(wrkmem, 0, D_SIZE * sizeof(lzo_dict_t));
- t = lzo1x_1_do_compress(ip, ll, op, out_len, t, wrkmem);
+ t = lzo1x_1_do_compress(ip, ll, op, out_len,
+ t, wrkmem, &state_offset);
ip += ll;
op += *out_len;
l -= ll;
@@ -242,7 +318,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
if (op == out && t <= 238) {
*op++ = (17 + t);
} else if (t <= 3) {
- op[-2] |= t;
+ op[state_offset] |= t;
} else if (t <= 18) {
*op++ = (t - 3);
} else {
@@ -255,8 +331,7 @@ int lzo1x_1_compress(const unsigned char *in, size_t in_len,
*op++ = tt;
}
if (t >= 16) do {
- COPY8(op, ii);
- COPY8(op + 8, ii + 8);
+ COPY16(op, ii);
op += 16;
ii += 16;
t -= 16;
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index a1c387f6afba24..b8f88d5ea3ff63 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -46,11 +46,23 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
const unsigned char * const ip_end = in + in_len;
unsigned char * const op_end = out + *out_len;
+ unsigned char bitstream_version;
+
op = out;
ip = in;
if (unlikely(in_len < 3))
goto input_overrun;
+
+ if (likely(*ip == 17)) {
+ bitstream_version = ip[1];
+ ip += 2;
+ if (unlikely(in_len < 5))
+ goto input_overrun;
+ } else {
+ bitstream_version = 0;
+ }
+
if (*ip > 17) {
t = *ip++ - 17;
if (t < 4) {
@@ -86,12 +98,9 @@ copy_literal_run:
const unsigned char *ie = ip + t;
unsigned char *oe = op + t;
do {
- COPY8(op, ip);
- op += 8;
- ip += 8;
- COPY8(op, ip);
- op += 8;
- ip += 8;
+ COPY16(op, ip);
+ op += 16;
+ ip += 16;
} while (ip < ie);
ip = ie;
op = oe;
@@ -154,32 +163,49 @@ copy_literal_run:
m_pos -= next >> 2;
next &= 3;
} else {
- m_pos = op;
- m_pos -= (t & 8) << 11;
- t = (t & 7) + (3 - 1);
- if (unlikely(t == 2)) {
- size_t offset;
- const unsigned char *ip_last = ip;
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ if (((next & 0xfffc) == 0xfffc) &&
+ ((t & 0xf8) == 0x18) &&
+ likely(bitstream_version)) {
+ NEED_IP(3);
+ t &= 7;
+ t |= ip[2] << 3;
+ t += MIN_ZERO_RUN_LENGTH;
+ NEED_OP(t);
+ memset(op, 0, t);
+ op += t;
+ next &= 3;
+ ip += 3;
+ goto match_next;
+ } else {
+ m_pos = op;
+ m_pos -= (t & 8) << 11;
+ t = (t & 7) + (3 - 1);
+ if (unlikely(t == 2)) {
+ size_t offset;
+ const unsigned char *ip_last = ip;
- while (unlikely(*ip == 0)) {
- ip++;
- NEED_IP(1);
- }
- offset = ip - ip_last;
- if (unlikely(offset > MAX_255_COUNT))
- return LZO_E_ERROR;
+ while (unlikely(*ip == 0)) {
+ ip++;
+ NEED_IP(1);
+ }
+ offset = ip - ip_last;
+ if (unlikely(offset > MAX_255_COUNT))
+ return LZO_E_ERROR;
- offset = (offset << 8) - offset;
- t += offset + 7 + *ip++;
- NEED_IP(2);
+ offset = (offset << 8) - offset;
+ t += offset + 7 + *ip++;
+ NEED_IP(2);
+ next = get_unaligned_le16(ip);
+ }
+ ip += 2;
+ m_pos -= next >> 2;
+ next &= 3;
+ if (m_pos == op)
+ goto eof_found;
+ m_pos -= 0x4000;
}
- next = get_unaligned_le16(ip);
- ip += 2;
- m_pos -= next >> 2;
- next &= 3;
- if (m_pos == op)
- goto eof_found;
- m_pos -= 0x4000;
}
TEST_LB(m_pos);
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
@@ -187,12 +213,9 @@ copy_literal_run:
unsigned char *oe = op + t;
if (likely(HAVE_OP(t + 15))) {
do {
- COPY8(op, m_pos);
- op += 8;
- m_pos += 8;
- COPY8(op, m_pos);
- op += 8;
- m_pos += 8;
+ COPY16(op, m_pos);
+ op += 16;
+ m_pos += 16;
} while (op < oe);
op = oe;
if (HAVE_IP(6)) {
diff --git a/lib/lzo/lzodefs.h b/lib/lzo/lzodefs.h
index 6710b83ce72ed0..4f1c44b6fa0177 100644
--- a/lib/lzo/lzodefs.h
+++ b/lib/lzo/lzodefs.h
@@ -12,9 +12,15 @@
*/
+/* Version
+ * 0: original lzo version
+ * 1: lzo with support for RLE
+ */
+#define LZO_VERSION 1
+
#define COPY4(dst, src) \
put_unaligned(get_unaligned((const u32 *)(src)), (u32 *)(dst))
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__aarch64__)
#define COPY8(dst, src) \
put_unaligned(get_unaligned((const u64 *)(src)), (u64 *)(dst))
#else
@@ -22,21 +28,30 @@
COPY4(dst, src); COPY4((dst) + 4, (src) + 4)
#endif
+#define COPY16(dst, src) \
+ do { COPY8(dst, src); COPY8((dst) + 8, (src) + 8); } while (0)
+
#if defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)
#error "conflicting endian definitions"
-#elif defined(__x86_64__)
+#elif defined(__x86_64__) || defined(__aarch64__)
#define LZO_USE_CTZ64 1
#define LZO_USE_CTZ32 1
+#define LZO_FAST_64BIT_MEMORY_ACCESS
#elif defined(__i386__) || defined(__powerpc__)
#define LZO_USE_CTZ32 1
-#elif defined(__arm__) && (__LINUX_ARM_ARCH__ >= 5)
+#elif defined(__arm__)
+#if (__LINUX_ARM_ARCH__ >= 5)
#define LZO_USE_CTZ32 1
#endif
+#if (__LINUX_ARM_ARCH__ >= 6) && defined(CONFIG_THUMB2_KERNEL)
+#define LZO_USE_CTZ64 1
+#endif
+#endif
#define M1_MAX_OFFSET 0x0400
#define M2_MAX_OFFSET 0x0800
#define M3_MAX_OFFSET 0x4000
-#define M4_MAX_OFFSET 0xbfff
+#define M4_MAX_OFFSET 0xbffe
#define M1_MIN_LEN 2
#define M1_MAX_LEN 2
@@ -52,6 +67,9 @@
#define M3_MARKER 32
#define M4_MARKER 16
+#define MIN_ZERO_RUN_LENGTH 4
+#define MAX_ZERO_RUN_LENGTH (2047 + MIN_ZERO_RUN_LENGTH)
+
#define lzo_dict_t unsigned short
#define D_BITS 13
#define D_SIZE (1u << D_BITS)
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6b79e9026e2489..57080b6f8002c1 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1152,102 +1152,6 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
}
EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
-#if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
-#include <linux/sched.h> /* for cond_resched() */
-
-/*
- * This linear search is at present only useful to shmem_unuse_inode().
- */
-static unsigned long __locate(struct radix_tree_node *slot, void *item,
- unsigned long index, unsigned long *found_index)
-{
- unsigned int shift, height;
- unsigned long i;
-
- height = slot->path & RADIX_TREE_HEIGHT_MASK;
- shift = (height-1) * RADIX_TREE_MAP_SHIFT;
-
- for ( ; height > 1; height--) {
- i = (index >> shift) & RADIX_TREE_MAP_MASK;
- for (;;) {
- if (slot->slots[i] != NULL)
- break;
- index &= ~((1UL << shift) - 1);
- index += 1UL << shift;
- if (index == 0)
- goto out; /* 32-bit wraparound */
- i++;
- if (i == RADIX_TREE_MAP_SIZE)
- goto out;
- }
-
- shift -= RADIX_TREE_MAP_SHIFT;
- slot = rcu_dereference_raw(slot->slots[i]);
- if (slot == NULL)
- goto out;
- }
-
- /* Bottom level: check items */
- for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
- if (slot->slots[i] == item) {
- *found_index = index + i;
- index = 0;
- goto out;
- }
- }
- index += RADIX_TREE_MAP_SIZE;
-out:
- return index;
-}
-
-/**
- * radix_tree_locate_item - search through radix tree for item
- * @root: radix tree root
- * @item: item to be found
- *
- * Returns index where item was found, or -1 if not found.
- * Caller must hold no lock (since this time-consuming function needs
- * to be preemptible), and must check afterwards if item is still there.
- */
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
-{
- struct radix_tree_node *node;
- unsigned long max_index;
- unsigned long cur_index = 0;
- unsigned long found_index = -1;
-
- do {
- rcu_read_lock();
- node = rcu_dereference_raw(root->rnode);
- if (!radix_tree_is_indirect_ptr(node)) {
- rcu_read_unlock();
- if (node == item)
- found_index = 0;
- break;
- }
-
- node = indirect_to_ptr(node);
- max_index = radix_tree_maxindex(node->path &
- RADIX_TREE_HEIGHT_MASK);
- if (cur_index > max_index) {
- rcu_read_unlock();
- break;
- }
-
- cur_index = __locate(node, item, cur_index, &found_index);
- rcu_read_unlock();
- cond_resched();
- } while (cur_index != 0 && cur_index <= max_index);
-
- return found_index;
-}
-#else
-unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
-{
- return -1;
-}
-#endif /* CONFIG_SHMEM && CONFIG_SWAP */
-
/**
* radix_tree_shrink - shrink height of a radix tree to minimal
* @root radix tree root
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 29090f3db677b7..28c089cb13f167 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -26,7 +26,7 @@ ifeq ($(ARCH),arm)
CFLAGS += -I../../../arch/arm/include -mfpu=neon
HAS_NEON = yes
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
CFLAGS += -I../../../arch/arm64/include
HAS_NEON = yes
endif
@@ -37,7 +37,7 @@ ifeq ($(IS_X86),yes)
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
diff --git a/lib/rbtree_test.c b/lib/rbtree_test.c
index 8b3c9dc882628f..2cd7ac5a58c43b 100644
--- a/lib/rbtree_test.c
+++ b/lib/rbtree_test.c
@@ -1,11 +1,18 @@
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/rbtree_augmented.h>
#include <linux/random.h>
+#include <linux/slab.h>
#include <asm/timex.h>
-#define NODES 100
-#define PERF_LOOPS 100000
-#define CHECK_LOOPS 100
+#define __param(type, name, init, msg) \
+ static type name = init; \
+ module_param(name, type, 0444); \
+ MODULE_PARM_DESC(name, msg);
+
+__param(int, nnodes, 100, "Number of nodes in the rb-tree");
+__param(int, perf_loops, 1000, "Number of iterations modifying the rb-tree");
+__param(int, check_loops, 100, "Number of iterations modifying and verifying the rb-tree");
struct test_node {
u32 key;
@@ -17,7 +24,7 @@ struct test_node {
};
static struct rb_root root = RB_ROOT;
-static struct test_node nodes[NODES];
+static struct test_node *nodes = NULL;
static struct rnd_state rnd;
@@ -95,7 +102,7 @@ static void erase_augmented(struct test_node *node, struct rb_root *root)
static void init(void)
{
int i;
- for (i = 0; i < NODES; i++) {
+ for (i = 0; i < nnodes; i++) {
nodes[i].key = prandom_u32_state(&rnd);
nodes[i].val = prandom_u32_state(&rnd);
}
@@ -176,6 +183,11 @@ static int __init rbtree_test_init(void)
{
int i, j;
cycles_t time1, time2, time;
+ struct rb_node *node;
+
+ nodes = kmalloc(nnodes * sizeof(*nodes), GFP_KERNEL);
+ if (!nodes)
+ return -ENOMEM;
printk(KERN_ALERT "rbtree testing");
@@ -184,27 +196,47 @@ static int __init rbtree_test_init(void)
time1 = get_cycles();
- for (i = 0; i < PERF_LOOPS; i++) {
- for (j = 0; j < NODES; j++)
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
insert(nodes + j, &root);
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
erase(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, PERF_LOOPS);
- printk(" -> %llu cycles\n", (unsigned long long)time);
+ time = div_u64(time, perf_loops);
+ printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < nnodes; i++)
+ insert(nodes + i, &root);
+
+ time1 = get_cycles();
+
+ for (i = 0; i < perf_loops; i++) {
+ for (node = rb_first(&root); node; node = rb_next(node))
+ ;
+ }
- for (i = 0; i < CHECK_LOOPS; i++) {
+ time2 = get_cycles();
+ time = time2 - time1;
+
+ time = div_u64(time, perf_loops);
+ printk(" -> test 2 (latency of inorder traversal): %llu cycles\n", (unsigned long long)time);
+
+ for (i = 0; i < nnodes; i++)
+ erase(nodes + i, &root);
+
+ /* run checks */
+ for (i = 0; i < check_loops; i++) {
init();
- for (j = 0; j < NODES; j++) {
+ for (j = 0; j < nnodes; j++) {
check(j);
insert(nodes + j, &root);
}
- for (j = 0; j < NODES; j++) {
- check(NODES - j);
+ for (j = 0; j < nnodes; j++) {
+ check(nnodes - j);
erase(nodes + j, &root);
}
check(0);
@@ -216,32 +248,34 @@ static int __init rbtree_test_init(void)
time1 = get_cycles();
- for (i = 0; i < PERF_LOOPS; i++) {
- for (j = 0; j < NODES; j++)
+ for (i = 0; i < perf_loops; i++) {
+ for (j = 0; j < nnodes; j++)
insert_augmented(nodes + j, &root);
- for (j = 0; j < NODES; j++)
+ for (j = 0; j < nnodes; j++)
erase_augmented(nodes + j, &root);
}
time2 = get_cycles();
time = time2 - time1;
- time = div_u64(time, PERF_LOOPS);
- printk(" -> %llu cycles\n", (unsigned long long)time);
+ time = div_u64(time, perf_loops);
+ printk(" -> test 1 (latency of nnodes insert+delete): %llu cycles\n", (unsigned long long)time);
- for (i = 0; i < CHECK_LOOPS; i++) {
+ for (i = 0; i < check_loops; i++) {
init();
- for (j = 0; j < NODES; j++) {
+ for (j = 0; j < nnodes; j++) {
check_augmented(j);
insert_augmented(nodes + j, &root);
}
- for (j = 0; j < NODES; j++) {
- check_augmented(NODES - j);
+ for (j = 0; j < nnodes; j++) {
+ check_augmented(nnodes - j);
erase_augmented(nodes + j, &root);
}
check_augmented(0);
}
+ kfree(nodes);
+
return -EAGAIN; /* Fail will directly unload the module */
}
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 51282f57976065..7bb8649429bffc 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -250,8 +250,10 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
if (!new_tbl)
return 0;
- for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
rhashtable_rehash_chain(ht, old_hash);
+ cond_resched();
+ }
/* Publish the new table pointer. */
rcu_assign_pointer(ht->tbl, new_tbl);
@@ -441,7 +443,8 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
const void *key,
struct rhash_head *obj,
- struct bucket_table *tbl)
+ struct bucket_table *tbl,
+ void **data)
{
struct rhash_head *head;
unsigned int hash;
@@ -452,8 +455,11 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht,
spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
err = -EEXIST;
- if (key && rhashtable_lookup_fast(ht, key, ht->p))
- goto exit;
+ if (key) {
+ *data = rhashtable_lookup_fast(ht, key, ht->p);
+ if (*data)
+ goto exit;
+ }
err = -E2BIG;
if (unlikely(rht_grow_above_max(ht, tbl)))
@@ -670,8 +676,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- (unsigned long)params->min_size);
+ size_t retsize;
+
+ if (params->nelem_hint)
+ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ (unsigned long)params->min_size);
+ else
+ retsize = max(HASH_DEFAULT_SIZE,
+ (unsigned long)params->min_size);
+
+ return retsize;
}
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -728,8 +742,6 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *tbl;
size_t size;
- size = HASH_DEFAULT_SIZE;
-
if ((!params->key_len && !params->obj_hashfn) ||
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
@@ -756,8 +768,7 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
- if (params->nelem_hint)
- size = rounded_hashtable_size(&ht->p);
+ size = rounded_hashtable_size(&ht->p);
/* The maximum (not average) chain length grows with the
* size of the hash table, at a rate of (log N)/(log log N).
@@ -833,6 +844,7 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
+ cond_resched();
for (pos = rht_dereference(tbl->buckets[i], ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 5c94e1012a91f9..cbef5ee4c45910 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -143,9 +143,13 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
WARN_ON(s->size == 0);
+ /* Add 1 to len for the trailing null byte which must be there */
+ len += 1;
+
if (seq_buf_can_fit(s, len)) {
memcpy(s->buffer + s->len, str, len);
- s->len += len;
+ /* Don't count the trailing null byte against the capacity */
+ s->len += len - 1;
return 0;
}
seq_buf_set_overflow(s);
diff --git a/lib/string.c b/lib/string.c
index ed83562a53ae5c..ccabe1651b2c96 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -952,3 +952,10 @@ char *strreplace(char *s, char old, char new)
return s;
}
EXPORT_SYMBOL(strreplace);
+
+void fortify_panic(const char *name)
+{
+ pr_emerg("detected buffer overflow in %s\n", name);
+ BUG();
+}
+EXPORT_SYMBOL(fortify_panic);
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index e0af6ff73d146c..5a003a2ebd967c 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -39,8 +39,8 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
unsigned long c, data;
/* Fall back to byte-at-a-time if we get a page fault */
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- break;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), byte_at_a_time);
+
*(unsigned long *)(dst+res) = c;
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
@@ -55,8 +55,7 @@ byte_at_a_time:
while (max) {
char c;
- if (unlikely(__get_user(c,src+res)))
- return -EFAULT;
+ unsafe_get_user(c,src+res, efault);
dst[res] = c;
if (!c)
return res;
@@ -75,6 +74,7 @@ byte_at_a_time:
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's an EFAULT.
*/
+efault:
return -EFAULT;
}
@@ -107,7 +107,12 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
src_addr = (unsigned long)src;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strncpy_from_user(dst, src, count, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strncpy_from_user(dst, src, count, max);
+ user_access_end();
+ return retval;
}
return -EFAULT;
}
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
index 3a5f2b366d84ed..8e105ed4df12bb 100644
--- a/lib/strnlen_user.c
+++ b/lib/strnlen_user.c
@@ -45,8 +45,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
src -= align;
max += align;
- if (unlikely(__get_user(c,(unsigned long __user *)src)))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)src, efault);
c |= aligned_byte_mask(align);
for (;;) {
@@ -61,8 +60,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
if (unlikely(max <= sizeof(unsigned long)))
break;
max -= sizeof(unsigned long);
- if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
- return 0;
+ unsafe_get_user(c, (unsigned long __user *)(src+res), efault);
}
res -= align;
@@ -77,6 +75,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
* Nope: we hit the address space limit, and we still had more
* characters the caller would have wanted. That's 0.
*/
+efault:
return 0;
}
@@ -112,7 +111,12 @@ long strnlen_user(const char __user *str, long count)
src_addr = (unsigned long)str;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strnlen_user(str, count, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strnlen_user(str, count, max);
+ user_access_end();
+ return retval;
}
return 0;
}
@@ -141,7 +145,12 @@ long strlen_user(const char __user *str)
src_addr = (unsigned long)str;
if (likely(src_addr < max_addr)) {
unsigned long max = max_addr - src_addr;
- return do_strnlen_user(str, ~0ul, max);
+ long retval;
+
+ user_access_begin();
+ retval = do_strnlen_user(str, ~0ul, max);
+ user_access_end();
+ return retval;
}
return 0;
}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 771234d050c795..6bc452b33b76fe 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -17,6 +17,8 @@
* 08/12/11 beckyb Add highmem support
*/
+#define pr_fmt(fmt) "software IO TLB: " fmt
+
#include <linux/cache.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
@@ -143,20 +145,16 @@ static bool no_iotlb_memory;
void swiotlb_print_info(void)
{
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
- unsigned char *vstart, *vend;
if (no_iotlb_memory) {
- pr_warn("software IO TLB: No low mem\n");
+ pr_warn("No low mem\n");
return;
}
- vstart = phys_to_virt(io_tlb_start);
- vend = phys_to_virt(io_tlb_end);
-
- printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
+ pr_info("mapped [mem %#010llx-%#010llx] (%luMB)\n",
(unsigned long long)io_tlb_start,
(unsigned long long)io_tlb_end,
- bytes >> 20, vstart, vend - 1);
+ bytes >> 20);
}
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
@@ -230,7 +228,7 @@ swiotlb_init(int verbose)
if (io_tlb_start)
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
- pr_warn("Cannot allocate SWIOTLB buffer");
+ pr_warn("Cannot allocate buffer");
no_iotlb_memory = true;
}
@@ -272,8 +270,8 @@ swiotlb_late_init_with_default_size(size_t default_size)
return -ENOMEM;
}
if (order != get_order(bytes)) {
- printk(KERN_WARNING "Warning: only able to allocate %ld MB "
- "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
+ pr_warn("only able to allocate %ld MB\n",
+ (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
}
rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs);
@@ -680,7 +678,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
return ret;
err_warn:
- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
+ pr_warn("coherent allocation failed for device %s size=%zu\n",
dev_name(hwdev), size);
dump_stack();
diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c
index 5241df36eedf92..dadcabe5098899 100644
--- a/lib/test-hexdump.c
+++ b/lib/test-hexdump.c
@@ -81,7 +81,7 @@ static void __init test_hexdump(size_t len, int rowsize, int groupsize,
const char *q = *result++;
size_t amount = strlen(q);
- strncpy(p, q, amount);
+ memcpy(p, q, amount);
p += amount + 1;
}
if (i)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index f9cee8e1233c0f..646009db41980f 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1345,9 +1345,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
return string(buf, end, NULL, spec);
switch (fmt[1]) {
- case 'r':
- return number(buf, end, clk_get_rate(clk), spec);
-
case 'n':
default:
#ifdef CONFIG_COMMON_CLK
diff --git a/mm/Kconfig b/mm/Kconfig
index 6768f28820f821..adaf395d5c78c1 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -697,3 +697,15 @@ config LOW_MEM_NOTIFY
is meant to be used in systems with no or very little swap space. In
the presence of large swap space, the system is likely to become
unusable before the OOM killer is triggered.
+
+config MM_METRICS
+ bool "Collect additional memory statistics"
+ help
+ Collect swap refault distances (seconds), swap read latencies and direct
+ reclaim latencies (nanoseconds). They are provided userspace via
+ /sys/kernel/debug/mm_metrics/{swap_refault,swap_latency,reclaim_latency}
+ in histograms.
+ default n
+
+config ARCH_USES_HIGH_VMA_FLAGS
+ bool
diff --git a/mm/Makefile b/mm/Makefile
index d475ea0cbe7d16..7d9eae3edad81f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -21,6 +21,9 @@ KCOV_INSTRUMENT_memcontrol.o := n
KCOV_INSTRUMENT_mmzone.o := n
KCOV_INSTRUMENT_vmstat.o := n
+# Since __builtin_frame_address does work as used, disable the warning.
+CFLAGS_usercopy.o += $(call cc-disable-warning, frame-address)
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -98,3 +101,5 @@ obj-$(CONFIG_USERFAULTFD) += userfaultfd.o
obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o
obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_LOW_MEM_NOTIFY) += low-mem-notify.o
+obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
+obj-$(CONFIG_MM_METRICS) += metrics.o
diff --git a/mm/debug.c b/mm/debug.c
index 668aa35191ca12..689b6e911cae78 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -168,7 +168,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %p mmap %p seqnum %d task_size %lu\n"
+ pr_emerg("mm %p mmap %p seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %p\n"
#endif
@@ -198,7 +198,7 @@ void dump_mm(const struct mm_struct *mm)
#endif
"%s", /* This is here to hold the comma */
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index b8a5bc66b0c09b..001877e32f0c57 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -68,8 +68,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
goto out;
}
- /* Careful about overflows. Len == 0 means "as much as possible" */
- endbyte = offset + len;
+ /*
+ * Careful about overflows. Len == 0 means "as much as possible". Use
+ * unsigned math because signed overflows are undefined and UBSan
+ * complains.
+ */
+ endbyte = (u64)offset + (u64)len;
if (!len || endbyte < len)
endbyte = -1;
else
diff --git a/mm/filemap.c b/mm/filemap.c
index 21e750b6e810ff..7d1dba22c84fbc 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1979,8 +1979,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} else if (!page) {
/* No page in the page cache at all */
do_sync_mmap_readahead(vma, ra, file, offset);
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ count_vm_event(PGMAJFAULT_F);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT_F);
ret = VM_FAULT_MAJOR;
retry_find:
page = find_get_page(mapping, offset);
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 7cf2b7163222b1..c1e7926a41c4de 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -11,10 +11,7 @@
* get_vaddr_frames() - map virtual addresses to pfns
* @start: starting user address
* @nr_frames: number of pages / pfns from start to map
- * @write: whether pages will be written to by the caller
- * @force: whether to force write access even if user mapping is
- * readonly. See description of the same argument of
- get_user_pages().
+ * @gup_flags: flags modifying lookup behaviour
* @vec: structure which receives pages / pfns of the addresses mapped.
* It should have space for at least nr_frames entries.
*
@@ -34,7 +31,7 @@
* This function takes care of grabbing mmap_sem as necessary.
*/
int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
- bool write, bool force, struct frame_vector *vec)
+ unsigned int gup_flags, struct frame_vector *vec)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
@@ -59,7 +56,7 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
vec->got_ref = true;
vec->is_pfns = false;
ret = get_user_pages_locked(current, mm, start, nr_frames,
- write, force, (struct page **)(vec->ptrs), &locked);
+ gup_flags, (struct page **)(vec->ptrs), &locked);
goto out;
}
diff --git a/mm/gup.c b/mm/gup.c
index 618451ad79486d..6976ed71627e0f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -368,6 +368,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
+ if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+ return -EFAULT;
+
if (gup_flags & FOLL_WRITE) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
@@ -381,10 +384,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
* Anon pages in shared mappings are surprising: now
* just reject it.
*/
- if (!is_cow_mapping(vm_flags)) {
- WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+ if (!is_cow_mapping(vm_flags))
return -EFAULT;
- }
}
} else if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
@@ -648,7 +649,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
struct mm_struct *mm,
unsigned long start,
unsigned long nr_pages,
- int write, int force,
struct page **pages,
struct vm_area_struct **vmas,
int *locked, bool notify_drop,
@@ -666,10 +666,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
if (pages)
flags |= FOLL_GET;
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
pages_done = 0;
lock_dropped = false;
@@ -763,11 +759,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
*/
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, NULL, locked, true, FOLL_TOUCH);
+ return __get_user_pages_locked(tsk, mm, start, nr_pages,
+ pages, NULL, locked, true,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_locked);
@@ -783,14 +780,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
*/
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
int locked = 1;
+
down_read(&mm->mmap_sem);
- ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, NULL, &locked, false, gup_flags);
+ ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+ &locked, false, gup_flags);
if (locked)
up_read(&mm->mmap_sem);
return ret;
@@ -816,10 +813,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
*/
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
- force, pages, FOLL_TOUCH);
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+ pages, gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -879,11 +876,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long start, unsigned long nr_pages, int write,
- int force, struct page **pages, struct vm_area_struct **vmas)
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas)
{
- return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
- pages, vmas, NULL, false, FOLL_TOUCH);
+ return __get_user_pages_locked(tsk, mm, start, nr_pages,
+ pages, vmas, NULL, false,
+ gup_flags | FOLL_TOUCH);
}
EXPORT_SYMBOL(get_user_pages);
@@ -961,8 +960,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
int locked = 0;
long ret = 0;
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len != PAGE_ALIGN(len));
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
@@ -1434,7 +1431,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
pages += nr;
ret = get_user_pages_unlocked(current, mm, start,
- nr_pages - nr, write, 0, pages);
+ nr_pages - nr, pages,
+ write ? FOLL_WRITE : 0);
/* Have to be a bit careful with return values */
if (nr > 0) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 0127b788272f1f..465786cd6490ef 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1393,12 +1393,12 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ page_nid = -1;
if (!get_page_unless_zero(page))
goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
put_page(page);
- page_nid = -1;
goto out;
}
@@ -1511,7 +1511,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
spinlock_t *old_ptl, *new_ptl;
int ret = 0;
pmd_t pmd;
-
+ bool force_flush = false;
struct mm_struct *mm = vma->vm_mm;
if ((old_addr & ~HPAGE_PMD_MASK) ||
@@ -1539,6 +1539,8 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
+ if (pmd_present(pmd))
+ force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
@@ -1547,6 +1549,8 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
+ if (force_flush)
+ flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
spin_unlock(old_ptl);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7294301d8495bc..324b2953e57e92 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2038,6 +2038,7 @@ static void __init gather_bootmem_prealloc(void)
*/
if (hstate_is_gigantic(h))
adjust_managed_page_count(page, 1 << h->order);
+ cond_resched();
}
}
@@ -3102,7 +3103,7 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
- pte_t *src_pte, *dst_pte, entry;
+ pte_t *src_pte, *dst_pte, entry, dst_entry;
struct page *ptepage;
unsigned long addr;
int cow;
@@ -3130,15 +3131,30 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
break;
}
- /* If the pagetables are shared don't copy or take references */
- if (dst_pte == src_pte)
+ /*
+ * If the pagetables are shared don't copy or take references.
+ * dst_pte == src_pte is the common case of src/dest sharing.
+ *
+ * However, src could have 'unshared' and dst shares with
+ * another vma. If dst_pte !none, this implies sharing.
+ * Check here before taking page table lock, and once again
+ * after taking the lock below.
+ */
+ dst_entry = huge_ptep_get(dst_pte);
+ if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
continue;
dst_ptl = huge_pte_lock(h, dst, dst_pte);
src_ptl = huge_pte_lockptr(h, src, src_pte);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
entry = huge_ptep_get(src_pte);
- if (huge_pte_none(entry)) { /* skip none entry */
+ dst_entry = huge_ptep_get(dst_pte);
+ if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
+ /*
+ * Skip if src entry none. Also, skip in the
+ * unlikely case dst entry !none as this implies
+ * sharing with another vma.
+ */
;
} else if (unlikely(is_hugetlb_entry_migration(entry) ||
is_hugetlb_entry_hwpoisoned(entry))) {
@@ -3456,7 +3472,6 @@ retry_avoidcopy:
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- set_page_huge_active(new_page);
mmun_start = address & huge_page_mask(h);
mmun_end = mmun_start + huge_page_size(h);
@@ -3478,6 +3493,7 @@ retry_avoidcopy:
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
+ set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@@ -3536,6 +3552,12 @@ int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
return err;
ClearPagePrivate(page);
+ /*
+ * set page dirty so that it will not be removed from cache/file
+ * by non-hugetlbfs specific code paths.
+ */
+ set_page_dirty(page);
+
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
@@ -3553,6 +3575,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page;
pte_t new_pte;
spinlock_t *ptl;
+ bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -3586,7 +3609,7 @@ retry:
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
- set_page_huge_active(page);
+ new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3658,6 +3681,15 @@ retry:
}
spin_unlock(ptl);
+
+ /*
+ * Only make newly allocated pages active. Existing pages found
+ * in the pagecache could be !page_huge_active() if they have been
+ * isolated for migration.
+ */
+ if (new_page)
+ set_page_huge_active(page);
+
unlock_page(page);
out:
return ret;
@@ -4031,6 +4063,14 @@ int hugetlb_reserve_pages(struct inode *inode,
struct resv_map *resv_map;
long gbl_reserve;
+ /* This should never happen */
+ if (from > to) {
+#ifdef CONFIG_DEBUG_VM
+ WARN(1, "%s called with a negative range\n", __func__);
+#endif
+ return -EINVAL;
+ }
+
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
@@ -4120,7 +4160,9 @@ int hugetlb_reserve_pages(struct inode *inode,
return 0;
out_err:
if (!vma || vma->vm_flags & VM_MAYSHARE)
- region_abort(resv_map, from, to);
+ /* Don't call region_abort if region_chg failed */
+ if (chg >= 0)
+ region_abort(resv_map, from, to);
if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
kref_put(&resv_map->refs, resv_map_release);
return ret;
@@ -4194,13 +4236,41 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
/*
* check on proper vm_flags and page table alignment
*/
- if (vma->vm_flags & VM_MAYSHARE &&
- vma->vm_start <= base && end <= vma->vm_end)
+ if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
return true;
return false;
}
/*
+ * Determine if start,end range within vma could be mapped by shared pmd.
+ * If yes, adjust start and end to cover range associated with possible
+ * shared pmd mappings.
+ */
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ unsigned long check_addr = *start;
+
+ if (!(vma->vm_flags & VM_MAYSHARE))
+ return;
+
+ for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
+ unsigned long a_start = check_addr & PUD_MASK;
+ unsigned long a_end = a_start + PUD_SIZE;
+
+ /*
+ * If sharing is possible, adjust start/end if necessary.
+ */
+ if (range_in_vma(vma, a_start, a_end)) {
+ if (a_start < *start)
+ *start = a_start;
+ if (a_end > *end)
+ *end = a_end;
+ }
+ }
+}
+
+/*
* Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
* and returns the corresponding pte. While this is not necessary for the
* !shared pmd case because we can allocate the pmd later as well, it makes the
@@ -4296,6 +4366,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
{
return 0;
}
+
+void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
#define want_pmd_share() (0)
#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 3349b60da591fe..9c5e4d5bdcb556 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -650,12 +650,13 @@ void kasan_kfree_large(const void *ptr)
int kasan_module_alloc(void *addr, size_t size)
{
void *ret;
+ size_t scaled_size;
size_t shadow_size;
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
- shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
- PAGE_SIZE);
+ scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+ shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
return -EINVAL;
diff --git a/mm/low-mem-notify.c b/mm/low-mem-notify.c
index 0a7b07162314cc..ae0be3de408e02 100644
--- a/mm/low-mem-notify.c
+++ b/mm/low-mem-notify.c
@@ -29,21 +29,28 @@
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/ctype.h>
+
+#define MB (1 << 20)
static DECLARE_WAIT_QUEUE_HEAD(low_mem_wait);
static atomic_t low_mem_state = ATOMIC_INIT(0);
-unsigned low_mem_margin_mb = 50;
+/* This is a list of thresholds in pages and should be in ascending order. */
+unsigned long low_mem_thresholds[LOW_MEM_THRESHOLD_MAX] = {
+ 50 * MB / PAGE_SIZE };
+unsigned int low_mem_threshold_count = 1;
+
+/* last observed threshold */
+unsigned int low_mem_threshold_last = UINT_MAX;
bool low_mem_margin_enabled = true;
-unsigned long low_mem_minfree;
unsigned int low_mem_ram_vs_swap_weight = 4;
-/*
- * We're interested in worst-case anon memory usage when the low-memory
- * notification fires. To contain logging, we limit our interest to
- * non-trivial steps.
- */
+/* Limit logging low memory to once per second. */
+DEFINE_RATELIMIT_STATE(low_mem_logging_ratelimit, 1 * HZ, 1);
+
unsigned long low_mem_lowest_seen_anon_mem;
-const unsigned long low_mem_anon_mem_delta = 10 * 1024 * 1024 / PAGE_SIZE;
+const unsigned long low_mem_anon_mem_delta = 10 * MB / PAGE_SIZE;
+static struct kernfs_node *low_mem_available_dirent;
struct low_mem_notify_file_info {
unsigned long unused;
@@ -82,7 +89,7 @@ static unsigned int low_mem_notify_poll(struct file *file, poll_table *wait)
unsigned int ret = 0;
/* Update state to reflect any recent freeing. */
- atomic_set(&low_mem_state, is_low_mem_situation());
+ atomic_set(&low_mem_state, low_mem_check());
poll_wait(file, &low_mem_wait, wait);
@@ -109,23 +116,29 @@ EXPORT_SYMBOL(low_mem_notify_fops);
static ssize_t low_mem_margin_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- if (low_mem_margin_enabled)
- return sprintf(buf, "%u\n", low_mem_margin_mb);
- else
+ if (low_mem_margin_enabled && low_mem_threshold_count) {
+ int i;
+ ssize_t written = 0;
+
+ for (i = 0; i < low_mem_threshold_count; i++)
+ written += sprintf(buf + written, "%lu ",
+ low_mem_thresholds[i] * PAGE_SIZE / MB);
+ written += sprintf(buf + written, "\n");
+ return written;
+ } else
return sprintf(buf, "off\n");
}
-static unsigned low_mem_margin_to_minfree(unsigned margin_mb)
-{
- return margin_mb * (1024 * 1024 / PAGE_SIZE);
-}
-
static ssize_t low_mem_margin_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- int err;
- unsigned long margin;
+ int i = 0, consumed = 0;
+ const char *start = buf;
+ char *endp;
+ unsigned long thresholds[LOW_MEM_THRESHOLD_MAX];
+
+ memset(thresholds, 0, sizeof(thresholds));
/*
* Even though the API does not say anything about this, the string in
* buf is zero-terminated (as long as count < PAGE_SIZE) because buf is
@@ -142,19 +155,51 @@ static ssize_t low_mem_margin_store(struct kobject *kobj,
low_mem_margin_enabled = true;
return count;
}
+ /*
+ * This takes a space separated list of thresholds in ascending order,
+ * and a trailing newline is optional.
+ */
+ while (consumed < count) {
+ if (i >= LOW_MEM_THRESHOLD_MAX) {
+ pr_warn("low-mem: too many thresholds");
+ return -EINVAL;
+ }
+ /* special case for trailing newline */
+ if (*start == '\n')
+ break;
+
+ thresholds[i] = simple_strtoul(start, &endp, 0);
+ if ((endp == start) && *endp != '\n')
+ return -EINVAL;
+
+ /* make sure each is larger than the last one */
+ if (i && thresholds[i] <= thresholds[i - 1]) {
+ pr_warn("low-mem: thresholds not in increasing order: %lu then %lu\n",
+ thresholds[i - 1], thresholds[i]);
+ return -EINVAL;
+ }
+
+ if (thresholds[i] * (MB / PAGE_SIZE) > totalram_pages) {
+ pr_warn("low-mem: threshold too high\n");
+ return -EINVAL;
+ }
+
+ consumed += endp - start + 1;
+ start = endp + 1;
+ i++;
+ }
+
+ low_mem_threshold_count = i;
+ low_mem_margin_enabled = !!low_mem_threshold_count;
- err = kstrtoul(buf, 10, &margin);
- if (err)
- return -EINVAL;
- if (margin * ((1024 * 1024) / PAGE_SIZE) > totalram_pages)
- return -EINVAL;
- /* Notify when the "free" memory is below margin megabytes. */
- low_mem_margin_enabled = true;
- low_mem_margin_mb = (unsigned int) margin;
/* Convert to pages outside the allocator fast path. */
- low_mem_minfree = low_mem_margin_to_minfree(low_mem_margin_mb);
- pr_info("low_mem: setting minfree to %lu kB\n",
- low_mem_minfree * (PAGE_SIZE / 1024));
+ for (i = 0; i < low_mem_threshold_count; i++) {
+ low_mem_thresholds[i] =
+ thresholds[i] * (MB / PAGE_SIZE);
+ pr_info("low_mem: threshold[%d] %lu MB\n", i,
+ low_mem_thresholds[i] * PAGE_SIZE / MB);
+ }
+
return count;
}
LOW_MEM_ATTR(margin);
@@ -189,11 +234,10 @@ static ssize_t low_mem_available_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
- const int lru_base = NR_LRU_BASE - LRU_BASE;
- unsigned long available_mem = get_available_mem_adj(lru_base);
+ unsigned long available_mem = get_available_mem_adj();
return sprintf(buf, "%lu\n",
- available_mem / (1024 * 1024 / PAGE_SIZE));
+ available_mem / (MB / PAGE_SIZE));
}
static ssize_t low_mem_available_store(struct kobject *kobj,
@@ -216,12 +260,29 @@ static struct attribute_group low_mem_attr_group = {
.name = "chromeos-low_mem",
};
+void low_mem_threshold_notify(void)
+{
+ if (low_mem_available_dirent)
+ sysfs_notify_dirent(low_mem_available_dirent);
+}
+
static int __init low_mem_init(void)
{
+ struct kernfs_node *low_mem_node;
int err = sysfs_create_group(mm_kobj, &low_mem_attr_group);
if (err)
pr_err("low_mem: register sysfs failed\n");
- low_mem_minfree = low_mem_margin_to_minfree(low_mem_margin_mb);
+
+ low_mem_node = sysfs_get_dirent(mm_kobj->sd, "chromeos-low_mem");
+ if (low_mem_node) {
+ low_mem_available_dirent =
+ sysfs_get_dirent(low_mem_node, "available");
+ sysfs_put(low_mem_node);
+ }
+
+ if (!low_mem_available_dirent)
+ pr_warn("unable to find dirent for \"available\" attribute\n");
+
low_mem_lowest_seen_anon_mem = totalram_pages;
return err;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index b04f2d26cdb8be..a303d46537be1c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -72,11 +72,22 @@ static long madvise_behavior(struct vm_area_struct *vma,
}
new_flags &= ~VM_DONTCOPY;
break;
+ case MADV_WIPEONFORK:
+ /* MADV_WIPEONFORK is only supported on anonymous memory. */
+ if (vma->vm_file || vma->vm_flags & VM_SHARED) {
+ error = -EINVAL;
+ goto out;
+ }
+ new_flags |= VM_WIPEONFORK;
+ break;
+ case MADV_KEEPONFORK:
+ new_flags &= ~VM_WIPEONFORK;
+ break;
case MADV_DONTDUMP:
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (new_flags & VM_SPECIAL) {
+ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
error = -EINVAL;
goto out;
}
@@ -406,6 +417,8 @@ madvise_behavior_valid(int behavior)
#endif
case MADV_DONTDUMP:
case MADV_DODUMP:
+ case MADV_WIPEONFORK:
+ case MADV_KEEPONFORK:
return true;
default:
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 55a9facb8e8ddd..c1dcef116aaf4f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -102,6 +102,9 @@ static const char * const mem_cgroup_events_names[] = {
"pgpgout",
"pgfault",
"pgmajfault",
+ "pgmajfault_s",
+ "pgmajfault_a",
+ "pgmajfault_f",
};
static const char * const mem_cgroup_lru_names[] = {
@@ -996,7 +999,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
int nid, zid;
int i;
- while ((memcg = parent_mem_cgroup(memcg))) {
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
for_each_node(nid) {
for (zid = 0; zid < MAX_NR_ZONES; zid++) {
mz = &memcg->nodeinfo[nid]->zoneinfo[zid];
diff --git a/mm/memory.c b/mm/memory.c
index ad7fc7d56df8bc..6647c5df12f0b7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -62,6 +62,7 @@
#include <linux/dma-debug.h>
#include <linux/debugfs.h>
#include <linux/userfaultfd_k.h>
+#include <linux/mm_metrics.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@ -361,15 +362,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
- /*
- * When there's less then two users of this mm there cannot be a
- * concurrent page-table walk.
- */
- if (atomic_read(&tlb->mm->mm_users) < 2) {
- __tlb_remove_table(table);
- return;
- }
-
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
@@ -1605,8 +1597,29 @@ out:
int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
+ return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
+/**
+ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ * @pgprot: pgprot flags for the inserted page
+ *
+ * This is exactly like vm_insert_pfn, except that it allows drivers to
+ * to override pgprot on a per-page basis.
+ *
+ * This only makes sense for IO mappings, and it makes no sense for
+ * cow mappings. In general, using multiple vmas is preferable;
+ * vm_insert_pfn_prot should only be used if using multiple VMAs is
+ * impractical.
+ */
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot)
+{
int ret;
- pgprot_t pgprot = vma->vm_page_prot;
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
@@ -1624,19 +1637,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
if (track_pfn_insert(vma, &pgprot, pfn))
return -EINVAL;
+ if (!pfn_modify_allowed(pfn, pgprot))
+ return -EACCES;
+
ret = insert_pfn(vma, addr, pfn, pgprot);
return ret;
}
-EXPORT_SYMBOL(vm_insert_pfn);
+EXPORT_SYMBOL(vm_insert_pfn_prot);
int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
+ pgprot_t pgprot = vma->vm_page_prot;
+
BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
+ if (track_pfn_insert(vma, &pgprot, pfn))
+ return -EINVAL;
+
+ if (!pfn_modify_allowed(pfn, pgprot))
+ return -EACCES;
/*
* If we don't have pte special, then we have to use the pfn_valid()
@@ -1649,9 +1672,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
struct page *page;
page = pfn_to_page(pfn);
- return insert_page(vma, addr, page, vma->vm_page_prot);
+ return insert_page(vma, addr, page, pgprot);
}
- return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+ return insert_pfn(vma, addr, pfn, pgprot);
}
EXPORT_SYMBOL(vm_insert_mixed);
@@ -1666,6 +1689,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
{
pte_t *pte;
spinlock_t *ptl;
+ int err = 0;
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
@@ -1673,12 +1697,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
arch_enter_lazy_mmu_mode();
do {
BUG_ON(!pte_none(*pte));
+ if (!pfn_modify_allowed(pfn, prot)) {
+ err = -EACCES;
+ break;
+ }
set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
pfn++;
} while (pte++, addr += PAGE_SIZE, addr != end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(pte - 1, ptl);
- return 0;
+ return err;
}
static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
@@ -1687,6 +1715,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
{
pmd_t *pmd;
unsigned long next;
+ int err;
pfn -= addr >> PAGE_SHIFT;
pmd = pmd_alloc(mm, pud, addr);
@@ -1695,9 +1724,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
VM_BUG_ON(pmd_trans_huge(*pmd));
do {
next = pmd_addr_end(addr, end);
- if (remap_pte_range(mm, pmd, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot))
- return -ENOMEM;
+ err = remap_pte_range(mm, pmd, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+ return err;
} while (pmd++, addr = next, addr != end);
return 0;
}
@@ -1708,6 +1738,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
{
pud_t *pud;
unsigned long next;
+ int err;
pfn -= addr >> PAGE_SHIFT;
pud = pud_alloc(mm, pgd, addr);
@@ -1715,9 +1746,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
return -ENOMEM;
do {
next = pud_addr_end(addr, end);
- if (remap_pmd_range(mm, pud, addr, next,
- pfn + (addr >> PAGE_SHIFT), prot))
- return -ENOMEM;
+ err = remap_pmd_range(mm, pud, addr, next,
+ pfn + (addr >> PAGE_SHIFT), prot);
+ if (err)
+ return err;
} while (pud++, addr = next, addr != end);
return 0;
}
@@ -2284,11 +2316,6 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
page_cache_get(old_page);
- /*
- * Only catch write-faults on shared writable pages,
- * read-only shared pages can get COWed by
- * get_user_pages(.write=1, .force=1).
- */
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
int tmp;
@@ -2511,6 +2538,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
int locked;
int exclusive = 0;
int ret = 0;
+ u64 start = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
@@ -2528,8 +2556,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out;
}
delayacct_set_flag(DELAYACCT_PF_SWAPIN);
+ mm_metrics_swapin(entry);
page = lookup_swap_cache(entry);
if (!page) {
+ start = mm_metrics_swapin_start();
page = swapin_readahead(entry,
GFP_HIGHUSER_MOVABLE, vma, address);
if (!page) {
@@ -2546,8 +2576,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Had to read the page from swap area: Major fault */
ret = VM_FAULT_MAJOR;
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(mm, PGMAJFAULT);
+ count_vm_event(PGMAJFAULT_A);
+ mem_cgroup_count_vm_event(mm, PGMAJFAULT_A);
} else if (PageHWPoison(page)) {
/*
* hwpoisoned dirty swapcache pages are kept for killing
@@ -2562,6 +2592,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swapcache = page;
locked = lock_page_or_retry(page, mm, flags);
+ mm_metrics_swapin_end(start);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
if (!locked) {
ret |= VM_FAULT_RETRY;
@@ -2574,7 +2605,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
* test below, are not enough to exclude that. Even if it is still
* swapcache, we need to check that the page's swap has not changed.
*/
- if (unlikely(!PageSwapCache(page) || page_private(page) != entry.val))
+ if (unlikely(!PageSwapCache(page) || !swp_page_same(entry, page)))
goto out_page;
page = ksm_might_need_to_copy(page, vma, address);
@@ -3661,6 +3692,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return -EINVAL;
maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
+ if (!maddr)
+ return -ENOMEM;
+
if (write)
memcpy_toio(maddr + offset, buf, len);
else
@@ -3677,10 +3711,11 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
* given task for page fault accounting.
*/
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
void *old_buf = buf;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
/* ignore errors, just check how much was successfully transferred */
@@ -3690,7 +3725,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct page *page = NULL;
ret = get_user_pages(tsk, mm, addr, 1,
- write, 1, &page, &vma);
+ gup_flags, &page, &vma);
if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT
break;
@@ -3742,14 +3777,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
@@ -3762,12 +3797,17 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr,
{
struct mm_struct *mm;
int ret;
+ unsigned int flags = FOLL_FORCE;
mm = get_task_mm(tsk);
if (!mm)
return 0;
- ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ if (write)
+ flags |= FOLL_WRITE;
+
+ ret = __access_remote_vm(tsk, mm, addr, buf, len, flags);
+
mmput(mm);
return ret;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a18923e4359d94..804cbfe9132dd0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -32,6 +32,7 @@
#include <linux/hugetlb.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/rmap.h>
#include <asm/tlbflush.h>
@@ -1357,7 +1358,8 @@ static struct page *next_active_pageblock(struct page *page)
int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
struct page *page = pfn_to_page(start_pfn);
- struct page *end_page = page + nr_pages;
+ unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
+ struct page *end_page = pfn_to_page(end_pfn);
/* Check the starting page of each pageblock within the range */
for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1397,6 +1399,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
i++;
if (i == MAX_ORDER_NR_PAGES)
continue;
+ /* Check if we got outside of the zone */
+ if (zone && !zone_spans_pfn(zone, pfn + i))
+ return 0;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
return 0;
@@ -1471,6 +1476,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
continue;
}
+ /*
+ * HWPoison pages have elevated reference counts so the migration would
+ * fail on them. It also doesn't make any sense to migrate them in the
+ * first place. Still try to unmap such a page in case it is still mapped
+ * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+ * the unmap as the catch all safety net).
+ */
+ if (PageHWPoison(page)) {
+ if (WARN_ON(PageLRU(page)))
+ isolate_lru_page(page);
+ if (page_mapped(page))
+ try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
+ continue;
+ }
+
if (!get_page_unless_zero(page))
continue;
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 878fba5c58eb89..e90f515f621e64 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -819,7 +819,7 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
struct page *p;
int err;
- err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, 0, &p, NULL);
+ err = get_user_pages(current, mm, addr & PAGE_MASK, 1, 0, &p, NULL);
if (err >= 0) {
err = page_to_nid(p);
put_page(p);
@@ -1296,7 +1296,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
@@ -1457,7 +1457,7 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
int uninitialized_var(pval);
nodemask_t nodes;
- if (nmask != NULL && maxnode < MAX_NUMNODES)
+ if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1486,7 +1486,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
@@ -2011,8 +2011,36 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(hpage_node, *nmask)) {
mpol_cond_put(pol);
- page = __alloc_pages_node(hpage_node,
- gfp | __GFP_THISNODE, order);
+ /*
+ * We cannot invoke reclaim if __GFP_THISNODE
+ * is set. Invoking reclaim with
+ * __GFP_THISNODE set, would cause THP
+ * allocations to trigger heavy swapping
+ * despite there may be tons of free memory
+ * (including potentially plenty of THP
+ * already available in the buddy) on all the
+ * other NUMA nodes.
+ *
+ * At most we could invoke compaction when
+ * __GFP_THISNODE is set (but we would need to
+ * refrain from invoking reclaim even if
+ * compaction returned COMPACT_SKIPPED because
+ * there wasn't not enough memory to succeed
+ * compaction). For now just avoid
+ * __GFP_THISNODE instead of limiting the
+ * allocation path to a strict and single
+ * compaction invocation.
+ *
+ * Supposedly if direct reclaim was enabled by
+ * the caller, the app prefers THP regardless
+ * of the node it comes from so this would be
+ * more desiderable behavior than only
+ * providing THP originated from the local
+ * node in such case.
+ */
+ if (!(gfp & __GFP_DIRECT_RECLAIM))
+ gfp |= __GFP_THISNODE;
+ page = __alloc_pages_node(hpage_node, gfp, order);
goto out;
}
}
diff --git a/mm/metrics.c b/mm/metrics.c
new file mode 100644
index 00000000000000..81f3d9f41f4348
--- /dev/null
+++ b/mm/metrics.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/swapfile.h>
+#include <linux/debugfs.h>
+#include <linux/mm_metrics.h>
+
+/* make sure swapout timestamp won't wrap around within a year */
+#define SECONDS_PER_YEAR (60 * 60 * 24 * 365)
+/* max number of buckets for histogram */
+#define MAX_HISTOGRAM_SIZE 100
+/* max number of digits in decimal for threshold plus one space */
+#define MAX_CHARS_PER_THRESHOLD (20 + 1)
+
+bool swap_refault_enabled __read_mostly;
+struct histogram __rcu *mm_metrics_files[NR_MM_METRICS];
+
+static const char *const mm_metrics_names[] = {
+ "swap_refault",
+ "swap_latency",
+ "reclaim_latency",
+};
+
+static DEFINE_SPINLOCK(histogram_lock);
+
+static struct histogram *histogram_alloc(const u64 *thresholds,
+ unsigned int size)
+{
+ int i;
+ int len;
+ struct histogram *hist;
+
+ VM_BUG_ON(!size || size > MAX_HISTOGRAM_SIZE);
+
+ len = sizeof(struct histogram) + size * sizeof(*hist->thresholds);
+ hist = kmalloc(len, GFP_ATOMIC);
+ if (!hist)
+ return ERR_PTR(-ENOMEM);
+
+ len = size * sizeof(*hist->buckets);
+ hist->buckets = __alloc_percpu_gfp(len, __alignof__(*hist->buckets),
+ GFP_ATOMIC);
+ if (!hist->buckets) {
+ kfree(hist);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ hist->size = size;
+ for (i = 0; i < size; i++) {
+ VM_BUG_ON(i && thresholds[i - 1] >= thresholds[i]);
+
+ hist->thresholds[i] = thresholds[i];
+ }
+ VM_BUG_ON(thresholds[i - 1] != U64_MAX);
+
+ return hist;
+}
+
+static struct histogram *histogram_create(char *buf)
+{
+ int i;
+ unsigned int size;
+ u64 *thresholds;
+ struct histogram *hist;
+
+ if (!*buf)
+ return ERR_PTR(-EINVAL);
+
+ thresholds = kmalloc_array(MAX_HISTOGRAM_SIZE, sizeof(*thresholds),
+ GFP_KERNEL);
+ if (!thresholds)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < MAX_HISTOGRAM_SIZE; i++) {
+ thresholds[i] = simple_strtoull(buf, &buf, 0);
+ if (!*buf)
+ break;
+
+ if (!isspace(*buf)) {
+ hist = ERR_PTR(-EINVAL);
+ goto failed;
+ }
+
+ while (isspace(*buf))
+ buf++;
+ }
+
+ if (i == MAX_HISTOGRAM_SIZE) {
+ hist = ERR_PTR(-E2BIG);
+ goto failed;
+ }
+
+ /* the last theshold must be U64_MAX, add it if missing */
+ if (thresholds[i++] != U64_MAX) {
+ if (i == MAX_HISTOGRAM_SIZE) {
+ hist = ERR_PTR(-E2BIG);
+ goto failed;
+ }
+ thresholds[i++] = U64_MAX;
+ }
+
+ size = i;
+
+ for (i = 1; i < size; i++) {
+ if (thresholds[i - 1] >= thresholds[i]) {
+ hist = ERR_PTR(-EINVAL);
+ goto failed;
+ }
+ }
+
+ hist = histogram_alloc(thresholds, size);
+failed:
+ kfree(thresholds);
+
+ return hist;
+}
+
+static void histogram_free(struct rcu_head *rcu)
+{
+ struct histogram *hist = container_of(rcu, struct histogram, rcu);
+
+ VM_BUG_ON(!hist->size || hist->size > MAX_HISTOGRAM_SIZE);
+
+ free_percpu(hist->buckets);
+ kfree(hist);
+}
+
+static int mm_metrics_read(struct seq_file *sf, void *v)
+{
+ int i;
+ int cpu;
+ u64 *buckets;
+ struct histogram *hist;
+ int rc = 0;
+ unsigned int type = (unsigned long)sf->private;
+
+ VM_BUG_ON(type >= NR_MM_METRICS);
+
+ rcu_read_lock();
+
+ hist = rcu_dereference(mm_metrics_files[type]);
+ if (!hist) {
+ seq_puts(sf, "disabled\n");
+ goto unlock;
+ }
+
+ VM_BUG_ON(!hist->size || hist->size > MAX_HISTOGRAM_SIZE);
+
+ buckets = kmalloc_array(hist->size, sizeof(*buckets), GFP_NOWAIT);
+ if (!buckets) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
+ memset(buckets, 0, hist->size * sizeof(*buckets));
+
+ for_each_possible_cpu(cpu) {
+ for (i = 0; i < hist->size; i++)
+ buckets[i] += per_cpu(hist->buckets[i], cpu);
+ }
+
+ for (i = 0; i < hist->size; i++) {
+ u64 lower = i ? hist->thresholds[i - 1] + 1 : 0;
+ u64 upper = hist->thresholds[i];
+
+ VM_BUG_ON(lower > upper);
+
+ seq_printf(sf, "%llu-%llu %llu\n", lower, upper, buckets[i]);
+ }
+ VM_BUG_ON(hist->thresholds[i - 1] != U64_MAX);
+
+ kfree(buckets);
+unlock:
+ rcu_read_unlock();
+
+ return rc;
+}
+
+static ssize_t mm_metrics_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ char *raw, *trimmed;
+ struct histogram *old, *new = NULL;
+ unsigned int type = (unsigned long)file_inode(file)->i_private;
+
+ VM_BUG_ON(type >= NR_MM_METRICS);
+
+ if (len > MAX_HISTOGRAM_SIZE * MAX_CHARS_PER_THRESHOLD)
+ return -E2BIG;
+
+ raw = memdup_user_nul(buf, len);
+ if (IS_ERR(raw))
+ return PTR_ERR(raw);
+
+ trimmed = strim(raw);
+ if (!strcmp(trimmed, "clear")) {
+ rcu_read_lock();
+ old = rcu_dereference(mm_metrics_files[type]);
+ if (old)
+ new = histogram_alloc(old->thresholds, old->size);
+ rcu_read_unlock();
+ } else if (strcmp(trimmed, "disable"))
+ new = histogram_create(trimmed);
+
+ kfree(raw);
+
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ spin_lock(&histogram_lock);
+ old = rcu_dereference_protected(mm_metrics_files[type],
+ lockdep_is_held(&histogram_lock));
+ rcu_assign_pointer(mm_metrics_files[type], new);
+ spin_unlock(&histogram_lock);
+ if (old)
+ call_rcu(&old->rcu, histogram_free);
+
+ return len;
+}
+
+static int mm_metrics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mm_metrics_read, inode->i_private);
+}
+
+static const struct file_operations mm_metrics_ops = {
+ .open = mm_metrics_open,
+ .write = mm_metrics_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init mm_metrics_init(void)
+{
+ int i;
+ struct dentry *dent;
+#ifdef CONFIG_SWAP
+ unsigned long now = ktime_get_seconds();
+ unsigned long size = max_swapfile_size();
+
+ if (SWP_TM_OFF_BITS > FIELD_SIZEOF(swp_entry_t, val) * BITS_PER_BYTE)
+ pr_err("swap refault metrics disabled: 32-bit CPU\n");
+ else if (size < GENMASK_ULL(SWP_TM_OFF_BITS - 1, 0) + 1)
+ pr_err("swap refault metrics disabled: size %ld\n", size);
+ else if (now + SECONDS_PER_YEAR > BIT_ULL(SWP_TIME_BITS))
+ pr_err("swap refault metrics disabled: time %ld\n", now);
+ else
+ swap_refault_enabled = true;
+#endif
+
+ BUILD_BUG_ON(ARRAY_SIZE(mm_metrics_names) != NR_MM_METRICS);
+
+ if (!debugfs_initialized())
+ return -ENODEV;
+
+ dent = debugfs_create_dir("mm_metrics", NULL);
+ if (!dent)
+ return -ENODEV;
+
+ for (i = 0; i < NR_MM_METRICS; i++) {
+ struct dentry *fent;
+
+ if (i == MM_SWAP_REFAULT && !swap_refault_enabled)
+ continue;
+
+ fent = debugfs_create_file(mm_metrics_names[i], 0644, dent,
+ (void *)(long)i, &mm_metrics_ops);
+ if (IS_ERR_OR_NULL(fent)) {
+ debugfs_remove_recursive(dent);
+
+ return -ENODEV;
+ }
+ }
+
+ pr_info("memory metrics initialized\n");
+
+ return 0;
+}
+subsys_initcall(mm_metrics_init);
+
+void mm_metrics_record(unsigned int type, u64 val, u64 count)
+{
+ int lower, upper;
+ struct histogram *hist;
+
+ VM_BUG_ON(type >= NR_MM_METRICS);
+
+ rcu_read_lock();
+
+ hist = rcu_dereference(mm_metrics_files[type]);
+ if (!hist)
+ goto unlock;
+
+ VM_BUG_ON(!hist->size || hist->size > MAX_HISTOGRAM_SIZE);
+
+ lower = 0;
+ upper = hist->size - 1;
+ while (lower < upper) {
+ int i = (lower + upper) >> 1;
+
+ if (val <= hist->thresholds[i])
+ upper = i;
+ else
+ lower = i + 1;
+ }
+
+ this_cpu_add(hist->buckets[upper], count);
+unlock:
+ rcu_read_unlock();
+}
diff --git a/mm/migrate.c b/mm/migrate.c
index afedcfab60e232..73da75d5e5b225 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -936,6 +936,7 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
int rc = MIGRATEPAGE_SUCCESS;
int *result = NULL;
struct page *newpage;
+ bool is_lru = !isolated_balloon_page(page);
newpage = get_new_page(page, private, &result);
if (!newpage)
@@ -983,11 +984,13 @@ out:
/*
* If migration was not successful and there's a freeing callback, use
* it. Otherwise, putback_lru_page() will drop the reference grabbed
- * during isolation.
+ * during isolation. Use the old state of the isolated source page to
+ * determine if we migrated a LRU page. newpage was already unlocked
+ * and possibly modified by its owner - don't rely on the page state.
*/
if (put_new_page)
put_new_page(newpage, private);
- else if (unlikely(__is_movable_balloon_page(newpage))) {
+ else if (rc == MIGRATEPAGE_SUCCESS && unlikely(!is_lru)) {
/* drop our reference, page already in the balloon */
put_page(newpage);
} else
@@ -1053,6 +1056,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
+ /*
+ * Check for pages which are in the process of being freed. Without
+ * page_mapping() set, hugetlbfs specific move page routine will not
+ * be called and we could leak usage counts for subpools.
+ */
+ if (page_private(hpage) && !page_mapping(hpage)) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@@ -1083,6 +1096,7 @@ put_anon:
put_new_page = NULL;
}
+out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
diff --git a/mm/mlock.c b/mm/mlock.c
index e180c4e3dba65a..966dbdcfc32260 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -90,7 +90,6 @@ void mlock_vma_page(struct page *page)
putback_lru_page(page);
}
}
-EXPORT_SYMBOL_GPL(mlock_vma_page);
/*
* Isolate a page from LRU with optional get_page() pin.
@@ -205,7 +204,6 @@ unlock_out:
out:
return nr_pages - 1;
}
-EXPORT_SYMBOL_GPL(munlock_vma_page);
/*
* convert get_user_pages() return value to posix mlock() error
@@ -506,6 +504,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
int nr_pages;
int ret = 0;
int lock = !!(newflags & VM_LOCKED);
+ vm_flags_t old_flags = vma->vm_flags;
if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -540,6 +539,8 @@ success:
nr_pages = (end - start) >> PAGE_SHIFT;
if (!lock)
nr_pages = -nr_pages;
+ else if (old_flags & VM_LOCKED)
+ nr_pages = 0;
mm->locked_vm += nr_pages;
/*
diff --git a/mm/mmap.c b/mm/mmap.c
index 4c79de941b1f90..adcc0259080ddf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -89,7 +89,7 @@ static void unmap_region(struct mm_struct *mm,
* x: (no) no x: (no) yes x: (no) yes x: (yes) yes
*
*/
-pgprot_t protection_map[16] = {
+pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
@@ -1297,6 +1297,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
return 0;
}
+static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
+ if (S_ISBLK(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
+ /* Special "we do even unsigned file positions" case */
+ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ return 0;
+
+ /* Yes, random drivers might want more. But I'm tired of buggy drivers */
+ return ULONG_MAX;
+}
+
+static inline bool file_mmap_ok(struct file *file, struct inode *inode,
+ unsigned long pgoff, unsigned long len)
+{
+ u64 maxsize = file_mmap_size_max(file, inode);
+
+ if (maxsize && len > maxsize)
+ return false;
+ maxsize -= len;
+ if (pgoff > maxsize >> PAGE_SHIFT)
+ return false;
+ return true;
+}
+
/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
@@ -1362,6 +1391,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
if (file) {
struct inode *inode = file_inode(file);
+ if (!file_mmap_ok(file, inode, pgoff, len))
+ return -EOVERFLOW;
+
switch (flags & MAP_TYPE) {
case MAP_SHARED:
if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
@@ -2285,12 +2317,11 @@ int expand_downwards(struct vm_area_struct *vma,
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
unsigned long gap_addr;
- int error;
+ int error = 0;
address &= PAGE_MASK;
- error = security_mmap_addr(address);
- if (error)
- return error;
+ if (address < mmap_min_addr)
+ return -EPERM;
/* Enforce stack_guard_gap */
gap_addr = address - stack_guard_gap;
@@ -2809,10 +2840,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
- len = PAGE_ALIGN(len);
- if (!len)
- return addr;
-
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2880,12 +2907,19 @@ out:
return addr;
}
-unsigned long vm_brk(unsigned long addr, unsigned long len)
+unsigned long vm_brk(unsigned long addr, unsigned long request)
{
struct mm_struct *mm = current->mm;
+ unsigned long len;
unsigned long ret;
bool populate;
+ len = PAGE_ALIGN(request);
+ if (len < request)
+ return -ENOMEM;
+ if (!len)
+ return addr;
+
down_write(&mm->mmap_sem);
ret = do_brk(addr, len);
populate = ((mm->def_flags & VM_LOCKED) != 0);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index b8849a3930a08d..fcd678c3bd24ee 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -255,6 +255,42 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
return pages;
}
+static int prot_none_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+ 0 : -EACCES;
+}
+
+static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask,
+ unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ?
+ 0 : -EACCES;
+}
+
+static int prot_none_test(unsigned long addr, unsigned long next,
+ struct mm_walk *walk)
+{
+ return 0;
+}
+
+static int prot_none_walk(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, unsigned long newflags)
+{
+ pgprot_t new_pgprot = vm_get_page_prot(newflags);
+ struct mm_walk prot_none_walk = {
+ .pte_entry = prot_none_pte_entry,
+ .hugetlb_entry = prot_none_hugetlb_entry,
+ .test_walk = prot_none_test,
+ .mm = current->mm,
+ .private = &new_pgprot,
+ };
+
+ return walk_page_range(start, end, &prot_none_walk);
+}
+
int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
unsigned long start, unsigned long end, unsigned long newflags)
@@ -273,6 +309,19 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
}
/*
+ * Do PROT_NONE PFN permission checks here when we can still
+ * bail out without undoing a lot of state. This is a rather
+ * uncommon case, so doesn't need to be very optimized.
+ */
+ if (arch_has_pfn_modify_check() &&
+ (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+ (newflags & (VM_READ|VM_WRITE|VM_EXEC)) == 0) {
+ error = prot_none_walk(vma, start, end, newflags);
+ if (error)
+ return error;
+ }
+
+ /*
* If we make a private mapping writable we increase our commit;
* but (without finer accounting) cannot reduce our commit if we
* make it unwritable again. hugetlb mapping were accounted for
diff --git a/mm/mremap.c b/mm/mremap.c
index ee9c6c8392f096..c70d8578b17de5 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -96,6 +96,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
spinlock_t *old_ptl, *new_ptl;
+ bool force_flush = false;
+ unsigned long len = old_end - old_addr;
/*
* When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@@ -143,12 +145,26 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
if (pte_none(*old_pte))
continue;
pte = ptep_get_and_clear(mm, old_addr, old_pte);
+ /*
+ * If we are remapping a valid PTE, make sure
+ * to flush TLB before we drop the PTL for the PTE.
+ *
+ * NOTE! Both old and new PTL matter: the old one
+ * for racing with page_mkclean(), the new one to
+ * make sure the physical page stays valid until
+ * the TLB entry for the old mapping has been
+ * flushed.
+ */
+ if (pte_present(pte))
+ force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
set_pte_at(mm, new_addr, new_pte, pte);
}
arch_leave_lazy_mmu_mode();
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
@@ -168,7 +184,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
- bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -207,7 +222,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
anon_vma_unlock_write(vma->anon_vma);
}
if (err > 0) {
- need_flush = true;
continue;
} else if (!err) {
split_huge_page_pmd(vma, old_addr, old_pmd);
@@ -224,10 +238,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent,
new_vma, new_pmd, new_addr, need_rmap_locks);
- need_flush = true;
}
- if (likely(need_flush))
- flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/nommu.c b/mm/nommu.c
index c9df8c67293dfc..518c38c890985a 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -184,40 +184,32 @@ finish_or_fault:
*/
long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas)
{
- int flags = 0;
-
- if (write)
- flags |= FOLL_WRITE;
- if (force)
- flags |= FOLL_FORCE;
-
- return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
- NULL);
+ return __get_user_pages(tsk, mm, start, nr_pages,
+ gup_flags, pages, vmas, NULL);
}
EXPORT_SYMBOL(get_user_pages);
long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
+ unsigned int gup_flags, struct page **pages,
int *locked)
{
- return get_user_pages(tsk, mm, start, nr_pages, write, force,
+ return get_user_pages(tsk, mm, start, nr_pages, gup_flags,
pages, NULL);
}
EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages,
- unsigned int gup_flags)
+ struct page **pages, unsigned int gup_flags)
{
long ret;
down_read(&mm->mmap_sem);
- ret = get_user_pages(tsk, mm, start, nr_pages, write, force,
- pages, NULL);
+ ret = __get_user_pages(tsk, mm, start, nr_pages, gup_flags, pages,
+ NULL, NULL);
up_read(&mm->mmap_sem);
return ret;
}
@@ -225,10 +217,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
- int write, int force, struct page **pages)
+ struct page **pages, unsigned int gup_flags)
{
- return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
- force, pages, 0);
+ return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+ pages, gup_flags);
}
EXPORT_SYMBOL(get_user_pages_unlocked);
@@ -1942,9 +1934,10 @@ void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf)
EXPORT_SYMBOL(filemap_map_pages);
static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
- unsigned long addr, void *buf, int len, int write)
+ unsigned long addr, void *buf, int len, unsigned int gup_flags)
{
struct vm_area_struct *vma;
+ int write = gup_flags & FOLL_WRITE;
down_read(&mm->mmap_sem);
@@ -1979,14 +1972,14 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
- * @write: whether the access is a write
+ * @gup_flags: flags modifying lookup behaviour
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, int write)
+ void *buf, int len, unsigned int gup_flags)
{
- return __access_remote_vm(NULL, mm, addr, buf, len, write);
+ return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
}
/*
@@ -2004,7 +1997,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (!mm)
return 0;
- len = __access_remote_vm(tsk, mm, addr, buf, len, write);
+ len = __access_remote_vm(tsk, mm, addr, buf, len,
+ write ? FOLL_WRITE : 0);
mmput(mm);
return len;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4d7123a2c9317d..013e193c87f90c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -582,6 +582,13 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
* still freeing memory.
*/
read_lock(&tasklist_lock);
+
+ /*
+ * The task 'p' might have already exited before reaching here. The
+ * put_task_struct() will free task_struct 'p' while the loop still try
+ * to access the field of 'p', so, get an extra reference.
+ */
+ get_task_struct(p);
for_each_thread(p, t) {
list_for_each_entry(child, &t->children, sibling) {
unsigned int child_points;
@@ -610,6 +617,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
}
}
}
+ put_task_struct(p);
read_unlock(&tasklist_lock);
p = find_lock_task_mm(victim);
@@ -632,6 +640,7 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
*/
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
mark_oom_victim(victim);
+ count_vm_event(OOM_KILL);
pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 3309dbda7ffae3..0bc7fa21db8540 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2151,6 +2151,7 @@ int write_cache_pages(struct address_space *mapping,
{
int ret = 0;
int done = 0;
+ int error;
struct pagevec pvec;
int nr_pages;
pgoff_t uninitialized_var(writeback_index);
@@ -2247,25 +2248,31 @@ continue_unlock:
goto continue_unlock;
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
- ret = (*writepage)(page, wbc, data);
- if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
+ error = (*writepage)(page, wbc, data);
+ if (unlikely(error)) {
+ /*
+ * Handle errors according to the type of
+ * writeback. There's no need to continue for
+ * background writeback. Just push done_index
+ * past this page so media errors won't choke
+ * writeout for the entire file. For integrity
+ * writeback, we must process the entire dirty
+ * set regardless of errors because the fs may
+ * still have state to clear for each page. In
+ * that case we continue processing and return
+ * the first error.
+ */
+ if (error == AOP_WRITEPAGE_ACTIVATE) {
unlock_page(page);
- ret = 0;
- } else {
- /*
- * done_index is set past this page,
- * so media errors will not choke
- * background writeout for the entire
- * file. This has consequences for
- * range_cyclic semantics (ie. it may
- * not be suitable for data integrity
- * writeout).
- */
+ error = 0;
+ } else if (wbc->sync_mode != WB_SYNC_ALL) {
+ ret = error;
done_index = page->index + 1;
done = 1;
break;
}
+ if (!ret)
+ ret = error;
}
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1872c647dc2b11..18fb6ca85421ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -63,6 +63,7 @@
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/low-mem-notify.h>
+#include <linux/mm_metrics.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -3126,8 +3127,6 @@ retry:
* the allocation is high priority and these type of
* allocations are system rather than user orientated
*/
- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
-
page = __alloc_pages_high_priority(gfp_mask, order, ac);
if (page) {
@@ -3309,15 +3308,14 @@ retry_cpuset:
goto out;
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
-#ifdef CONFIG_LOW_MEM_NOTIFY
- if (is_low_mem_situation())
- low_mem_notify();
-#endif
+ low_mem_check();
/* First allocation attempt */
alloc_mask = gfp_mask|__GFP_HARDWALL;
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
if (unlikely(!page)) {
+ u64 start = 0;
+
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
@@ -3326,7 +3324,11 @@ retry_cpuset:
alloc_mask = memalloc_noio_flags(gfp_mask);
ac.spread_dirty_pages = false;
+ if (order < MAX_ORDER && (gfp_mask & __GFP_DIRECT_RECLAIM) &&
+ !(current->flags & PF_MEMALLOC))
+ start = mm_metrics_reclaim_start();
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
+ mm_metrics_reclaim_end(start);
}
if (kmemcheck_enabled && page)
diff --git a/mm/percpu.c b/mm/percpu.c
index 1c784df3bdfee7..7d1b8a419f7ddc 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -120,35 +120,35 @@ struct pcpu_chunk {
unsigned long populated[]; /* populated bitmap */
};
-static int pcpu_unit_pages __read_mostly;
-static int pcpu_unit_size __read_mostly;
-static int pcpu_nr_units __read_mostly;
-static int pcpu_atom_size __read_mostly;
-static int pcpu_nr_slots __read_mostly;
-static size_t pcpu_chunk_struct_size __read_mostly;
+static int pcpu_unit_pages __ro_after_init;
+static int pcpu_unit_size __ro_after_init;
+static int pcpu_nr_units __ro_after_init;
+static int pcpu_atom_size __ro_after_init;
+static int pcpu_nr_slots __ro_after_init;
+static size_t pcpu_chunk_struct_size __ro_after_init;
/* cpus with the lowest and highest unit addresses */
-static unsigned int pcpu_low_unit_cpu __read_mostly;
-static unsigned int pcpu_high_unit_cpu __read_mostly;
+static unsigned int pcpu_low_unit_cpu __ro_after_init;
+static unsigned int pcpu_high_unit_cpu __ro_after_init;
/* the address of the first chunk which starts with the kernel static area */
-void *pcpu_base_addr __read_mostly;
+void *pcpu_base_addr __ro_after_init;
EXPORT_SYMBOL_GPL(pcpu_base_addr);
-static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
-const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
+static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
+const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
/* group information, used for vm allocation */
-static int pcpu_nr_groups __read_mostly;
-static const unsigned long *pcpu_group_offsets __read_mostly;
-static const size_t *pcpu_group_sizes __read_mostly;
+static int pcpu_nr_groups __ro_after_init;
+static const unsigned long *pcpu_group_offsets __ro_after_init;
+static const size_t *pcpu_group_sizes __ro_after_init;
/*
* The first chunk which always exists. Note that unlike other
* chunks, this one can be allocated and mapped in several different
* ways and thus often doesn't live in the vmalloc area.
*/
-static struct pcpu_chunk *pcpu_first_chunk;
+static struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
/*
* Optional reserved chunk. This chunk reserves part of the first
@@ -157,13 +157,13 @@ static struct pcpu_chunk *pcpu_first_chunk;
* area doesn't exist, the following variables contain NULL and 0
* respectively.
*/
-static struct pcpu_chunk *pcpu_reserved_chunk;
-static int pcpu_reserved_chunk_limit;
+static struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
+static int pcpu_reserved_chunk_limit __ro_after_init;
static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
-static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+static struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 5d453e58ddbf75..1b5a6104c5fcdb 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -88,19 +88,23 @@ static int process_vm_rw_single_vec(unsigned long addr,
ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *);
+ unsigned int flags = 0;
/* Work out address and page range required */
if (len == 0)
return 0;
nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
+ if (vm_write)
+ flags |= FOLL_WRITE;
+
while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop);
size_t bytes;
/* Get the pages we're interested in */
pages = get_user_pages_unlocked(task, mm, pa, pages,
- vm_write, 0, process_pages);
+ process_pages, flags);
if (pages <= 0)
return -EFAULT;
diff --git a/mm/rmap.c b/mm/rmap.c
index 1bceb49aa21422..cf733fab230f54 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -408,7 +408,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
- BUG_ON(anon_vma->degree);
+ VM_WARN_ON(anon_vma->degree);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
@@ -1324,12 +1324,41 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pte_t pteval;
spinlock_t *ptl;
int ret = SWAP_AGAIN;
+ unsigned long sh_address;
+ bool pmd_sharing_possible = false;
+ unsigned long spmd_start, spmd_end;
enum ttu_flags flags = (enum ttu_flags)arg;
/* munlock has nothing to gain from examining un-locked vmas */
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
goto out;
+ /*
+ * Only use the range_start/end mmu notifiers if huge pmd sharing
+ * is possible. In the normal case, mmu_notifier_invalidate_page
+ * is sufficient as we only unmap a page. However, if we unshare
+ * a pmd, we will unmap a PUD_SIZE range.
+ */
+ if (PageHuge(page)) {
+ spmd_start = address;
+ spmd_end = spmd_start + vma_mmu_pagesize(vma);
+
+ /*
+ * Check if pmd sharing is possible. If possible, we could
+ * unmap a PUD_SIZE range. spmd_start/spmd_end will be
+ * modified if sharing is possible.
+ */
+ adjust_range_if_pmd_sharing_possible(vma, &spmd_start,
+ &spmd_end);
+ if (spmd_end - spmd_start != vma_mmu_pagesize(vma)) {
+ sh_address = address;
+
+ pmd_sharing_possible = true;
+ mmu_notifier_invalidate_range_start(vma->vm_mm,
+ spmd_start, spmd_end);
+ }
+ }
+
pte = page_check_address(page, mm, address, &ptl, 0);
if (!pte)
goto out;
@@ -1356,6 +1385,30 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
}
}
+ /*
+ * Call huge_pmd_unshare to potentially unshare a huge pmd. Pass
+ * sh_address as it will be modified if unsharing is successful.
+ */
+ if (PageHuge(page) && huge_pmd_unshare(mm, &sh_address, pte)) {
+ /*
+ * huge_pmd_unshare unmapped an entire PMD page. There is
+ * no way of knowing exactly which PMDs may be cached for
+ * this mm, so flush them all. spmd_start/spmd_end cover
+ * this PUD_SIZE range.
+ */
+ flush_cache_range(vma, spmd_start, spmd_end);
+ flush_tlb_range(vma, spmd_start, spmd_end);
+
+ /*
+ * The ref count of the PMD page was dropped which is part
+ * of the way map counting is done for shared PMDs. When
+ * there is no other sharing, huge_pmd_unshare returns false
+ * and we will unmap the actual page and drop map count
+ * to zero.
+ */
+ goto out_unmap;
+ }
+
/* Nuke the page table entry. */
flush_cache_page(vma, address, page_to_pfn(page));
if (should_defer_flush(mm, flags)) {
@@ -1450,6 +1503,9 @@ out_unmap:
if (ret != SWAP_FAIL && ret != SWAP_MLOCK && !(flags & TTU_MUNLOCK))
mmu_notifier_invalidate_page(mm, address);
out:
+ if (pmd_sharing_possible)
+ mmu_notifier_invalidate_range_end(vma->vm_mm,
+ spmd_start, spmd_end);
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 01baa7fe284060..c8c348fd2b5236 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -68,6 +68,7 @@ static struct vfsmount *shm_mnt;
#include <linux/magic.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
+#include <linux/mm_metrics.h>
#include <uapi/linux/memfd.h>
#include <asm/uaccess.h>
@@ -255,7 +256,7 @@ static void shmem_recalc_inode(struct inode *inode)
static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
{
- void **pslot;
+ void __rcu **pslot;
void *item;
VM_BUG_ON(!expected);
@@ -285,7 +286,7 @@ static bool shmem_confirm_swap(struct address_space *mapping,
rcu_read_lock();
item = radix_tree_lookup(&mapping->page_tree, index);
rcu_read_unlock();
- return item == swp_to_radix_entry(swap);
+ return swp_radix_same(swap, item);
}
/*
@@ -383,7 +384,7 @@ void shmem_unlock_mapping(struct address_space *mapping)
break;
index = indices[pvec.nr - 1] + 1;
pagevec_remove_exceptionals(&pvec);
- check_move_unevictable_pages(pvec.pages, pvec.nr);
+ check_move_unevictable_pages(&pvec);
pagevec_release(&pvec);
cond_resched();
}
@@ -628,6 +629,41 @@ static void shmem_evict_inode(struct inode *inode)
clear_inode(inode);
}
+static unsigned long find_swap_entry(struct radix_tree_root *root,
+ swp_entry_t swap, void **item)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+ unsigned long found = -1;
+ unsigned int checked = 0;
+ pgoff_t start = 0;
+
+ rcu_read_lock();
+restart:
+ radix_tree_for_each_slot(slot, root, &iter, start) {
+ void *entry = radix_tree_deref_slot(slot);
+
+ if (radix_tree_deref_retry(entry)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ if (swp_radix_same(swap, entry)) {
+ *item = entry;
+ found = iter.index;
+ break;
+ }
+ checked++;
+ if ((checked % 4096) != 0)
+ continue;
+ cond_resched_rcu();
+ start = iter.index + 1;
+ goto restart;
+ }
+
+ rcu_read_unlock();
+ return found;
+}
+
/*
* If swap found in inode, free it and move page from swapcache to filecache.
*/
@@ -640,8 +676,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
gfp_t gfp;
int error = 0;
- radswap = swp_to_radix_entry(swap);
- index = radix_tree_locate_item(&mapping->page_tree, radswap);
+ index = find_swap_entry(&mapping->page_tree, swap, &radswap);
if (index == -1)
return -EAGAIN; /* tell shmem_unuse we found nothing */
@@ -720,7 +755,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
* There's a faint possibility that swap page was replaced before
* caller locked it: caller will come back later with the right page.
*/
- if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
+ if (unlikely(!PageSwapCache(page) || !swp_page_same(swap, page)))
goto out;
/*
@@ -848,6 +883,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
spin_unlock(&info->lock);
swap_shmem_alloc(swap);
+ mm_metrics_swapout(&swap);
shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
mutex_unlock(&shmem_swaplist_mutex);
@@ -1104,9 +1140,13 @@ repeat:
sbinfo = SHMEM_SB(inode->i_sb);
if (swap.val) {
+ u64 start = 0;
+
+ mm_metrics_swapin(swap);
/* Look it up and read it in.. */
page = lookup_swap_cache(swap);
if (!page) {
+ start = mm_metrics_swapin_start();
/* here we actually do the io */
if (fault_type)
*fault_type |= VM_FAULT_MAJOR;
@@ -1119,7 +1159,7 @@ repeat:
/* We have to do this with page locked to prevent races */
lock_page(page);
- if (!PageSwapCache(page) || page_private(page) != swap.val ||
+ if (!PageSwapCache(page) || !swp_page_same(swap, page) ||
!shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST; /* try again */
goto unlock;
@@ -1128,6 +1168,7 @@ repeat:
error = -EIO;
goto failed;
}
+ mm_metrics_swapin_end(start);
wait_on_page_writeback(page);
if (shmem_should_replace_page(page, gfp)) {
@@ -1355,8 +1396,8 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
if (ret & VM_FAULT_MAJOR) {
- count_vm_event(PGMAJFAULT);
- mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+ count_vm_event(PGMAJFAULT_S);
+ mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT_S);
}
return ret;
}
@@ -1464,6 +1505,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
@@ -1816,9 +1859,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
mutex_lock(&inode->i_mutex);
/* We're holding i_mutex so we can access i_size directly */
- if (offset < 0)
- offset = -EINVAL;
- else if (offset >= inode->i_size)
+ if (offset < 0 || offset >= inode->i_size)
offset = -ENXIO;
else {
start = offset >> PAGE_CACHE_SHIFT;
@@ -1851,7 +1892,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
static void shmem_tag_pins(struct address_space *mapping)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
pgoff_t start;
struct page *page;
@@ -1893,7 +1934,7 @@ restart:
static int shmem_wait_for_pins(struct address_space *mapping)
{
struct radix_tree_iter iter;
- void **slot;
+ void __rcu **slot;
pgoff_t start;
struct page *page;
int error, scan;
@@ -2293,16 +2334,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = d_inode(old_dentry);
- int ret;
+ int ret = 0;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
+ * But if an O_TMPFILE file is linked into the tmpfs, the
+ * first link must skip that, to get the accounting right.
*/
- ret = shmem_reserve_inode(inode->i_sb);
- if (ret)
- goto out;
+ if (inode->i_nlink) {
+ ret = shmem_reserve_inode(inode->i_sb);
+ if (ret)
+ goto out;
+ }
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
diff --git a/mm/slab.c b/mm/slab.c
index 91790a913bb6ba..a64b71e2f2eafb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -806,8 +806,10 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
struct alien_cache *alc = NULL;
alc = kmalloc_node(memsize, gfp, node);
- init_arraycache(&alc->ac, entries, batch);
- spin_lock_init(&alc->lock);
+ if (alc) {
+ init_arraycache(&alc->ac, entries, batch);
+ spin_lock_init(&alc->lock);
+ }
return alc;
}
@@ -4255,6 +4257,36 @@ static int __init slab_proc_init(void)
module_init(slab_proc_init);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *cachep;
+ unsigned int objnr;
+ unsigned long offset;
+
+ /* Find and validate object. */
+ cachep = page->slab_cache;
+ objnr = obj_to_index(cachep, page, (void *)ptr);
+ BUG_ON(objnr >= cachep->num);
+
+ /* Find offset within object. */
+ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep);
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
+ return NULL;
+
+ return cachep->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
/**
* ksize - get the actual amount of memory allocated for a given object
* @objp: Pointer to the object
diff --git a/mm/slab_common.c b/mm/slab_common.c
index fb04a8e89d14f6..4d3683d438edeb 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -9,6 +9,7 @@
#include <linux/poison.h>
#include <linux/interrupt.h>
#include <linux/memory.h>
+#include <linux/cache.h>
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/cpu.h>
@@ -800,11 +801,11 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
return s;
}
-struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
+struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
EXPORT_SYMBOL(kmalloc_caches);
#ifdef CONFIG_ZONE_DMA
-struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
+struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1] __ro_after_init;
EXPORT_SYMBOL(kmalloc_dma_caches);
#endif
@@ -814,7 +815,7 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
* of two cache sizes there. The size of larger slabs can be determined using
* fls.
*/
-static s8 size_index[24] = {
+static s8 size_index[24] __ro_after_init = {
3, /* 8 */
4, /* 16 */
5, /* 24 */
diff --git a/mm/slub.c b/mm/slub.c
index 82a4185c5ba314..a5b337ef845222 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -124,6 +124,14 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
#endif
}
+static inline void *fixup_red_left(struct kmem_cache *s, void *p)
+{
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
+ p += s->red_left_pad;
+
+ return p;
+}
+
static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -224,24 +232,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
* Core slab cache functions
*******************************************************************/
-/* Verify that a pointer has an address that is valid within a slab page */
-static inline int check_valid_pointer(struct kmem_cache *s,
- struct page *page, const void *object)
-{
- void *base;
-
- if (!object)
- return 1;
-
- base = page_address(page);
- if (object < base || object >= base + page->objects * s->size ||
- (object - base) % s->size) {
- return 0;
- }
-
- return 1;
-}
-
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
@@ -271,12 +261,14 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
/* Loop over all objects in a slab */
#define for_each_object(__p, __s, __addr, __objects) \
- for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
- __p += (__s)->size)
+ for (__p = fixup_red_left(__s, __addr); \
+ __p < (__addr) + (__objects) * (__s)->size; \
+ __p += (__s)->size)
#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
- for (__p = (__addr), __idx = 1; __idx <= __objects;\
- __p += (__s)->size, __idx++)
+ for (__p = fixup_red_left(__s, __addr), __idx = 1; \
+ __idx <= __objects; \
+ __p += (__s)->size, __idx++)
/* Determine object index from a given position */
static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
@@ -432,6 +424,22 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
set_bit(slab_index(p, s, addr), map);
}
+static inline int size_from_object(struct kmem_cache *s)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ return s->size - s->red_left_pad;
+
+ return s->size;
+}
+
+static inline void *restore_red_left(struct kmem_cache *s, void *p)
+{
+ if (s->flags & SLAB_RED_ZONE)
+ p -= s->red_left_pad;
+
+ return p;
+}
+
/*
* Debug settings:
*/
@@ -465,6 +473,26 @@ static inline void metadata_access_disable(void)
/*
* Object debugging
*/
+
+/* Verify that a pointer has an address that is valid within a slab page */
+static inline int check_valid_pointer(struct kmem_cache *s,
+ struct page *page, void *object)
+{
+ void *base;
+
+ if (!object)
+ return 1;
+
+ base = page_address(page);
+ object = restore_red_left(s, object);
+ if (object < base || object >= base + page->objects * s->size ||
+ (object - base) % s->size) {
+ return 0;
+ }
+
+ return 1;
+}
+
static void print_section(char *text, u8 *addr, unsigned int length)
{
metadata_access_enable();
@@ -604,7 +632,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
p, p - addr, get_freepointer(s, p));
- if (p > addr + 16)
+ if (s->flags & SLAB_RED_ZONE)
+ print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ else if (p > addr + 16)
print_section("Bytes b4 ", p - 16, 16);
print_section("Object ", p, min_t(unsigned long, s->object_size,
@@ -621,9 +651,9 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (s->flags & SLAB_STORE_USER)
off += 2 * sizeof(struct track);
- if (off != s->size)
+ if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, s->size - off);
+ print_section("Padding ", p + off, size_from_object(s) - off);
dump_stack();
}
@@ -635,7 +665,7 @@ void object_err(struct kmem_cache *s, struct page *page,
print_trailer(s, page, object);
}
-static void slab_err(struct kmem_cache *s, struct page *page,
+static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page,
const char *fmt, ...)
{
va_list args;
@@ -653,6 +683,9 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
{
u8 *p = object;
+ if (s->flags & SLAB_RED_ZONE)
+ memset(p - s->red_left_pad, val, s->red_left_pad);
+
if (s->flags & __OBJECT_POISON) {
memset(p, POISON_FREE, s->object_size - 1);
p[s->object_size - 1] = POISON_END;
@@ -745,11 +778,11 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
/* We also have user information there */
off += 2 * sizeof(struct track);
- if (s->size == off)
+ if (size_from_object(s) == off)
return 1;
return check_bytes_and_report(s, page, p, "Object padding",
- p + off, POISON_INUSE, s->size - off);
+ p + off, POISON_INUSE, size_from_object(s) - off);
}
/* Check the pad bytes at the end of a slab page */
@@ -794,6 +827,10 @@ static int check_object(struct kmem_cache *s, struct page *page,
if (s->flags & SLAB_RED_ZONE) {
if (!check_bytes_and_report(s, page, object, "Redzone",
+ object - s->red_left_pad, val, s->red_left_pad))
+ return 0;
+
+ if (!check_bytes_and_report(s, page, object, "Redzone",
endobject, val, s->inuse - s->object_size))
return 0;
} else {
@@ -1414,7 +1451,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
set_freepointer(s, p, NULL);
}
- page->freelist = start;
+ page->freelist = fixup_red_left(s, start);
page->inuse = page->objects;
page->frozen = 1;
@@ -1607,7 +1644,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
{
struct page *page, *page2;
void *object = NULL;
- int available = 0;
+ unsigned int available = 0;
int objects;
/*
@@ -3230,7 +3267,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
*/
size += 2 * sizeof(struct track);
- if (flags & SLAB_RED_ZONE)
+ if (flags & SLAB_RED_ZONE) {
/*
* Add some empty padding so that we can catch
* overwrites from earlier objects rather than let
@@ -3239,6 +3276,11 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
* of the object.
*/
size += sizeof(void *);
+
+ s->red_left_pad = sizeof(void *);
+ s->red_left_pad = ALIGN(s->red_left_pad, s->align);
+ size += s->red_left_pad;
+ }
#endif
/*
@@ -3532,6 +3574,46 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(__kmalloc_node);
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+/*
+ * Rejects objects that are incorrectly sized.
+ *
+ * Returns NULL if check passes, otherwise const char * to name of cache
+ * to indicate an error.
+ */
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page)
+{
+ struct kmem_cache *s;
+ unsigned long offset;
+ size_t object_size;
+
+ /* Find object and usable object size. */
+ s = page->slab_cache;
+ object_size = slab_ksize(s);
+
+ /* Reject impossible pointers. */
+ if (ptr < page_address(page))
+ return s->name;
+
+ /* Find offset within object. */
+ offset = (ptr - page_address(page)) % s->size;
+
+ /* Adjust for redzone and reject if within the redzone. */
+ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
+ if (offset < s->red_left_pad)
+ return s->name;
+ offset -= s->red_left_pad;
+ }
+
+ /* Allow address range falling entirely within object size. */
+ if (offset <= object_size && n <= object_size - offset)
+ return NULL;
+
+ return s->name;
+}
+#endif /* CONFIG_HARDENED_USERCOPY */
+
static size_t __ksize(const void *object)
{
struct page *page;
@@ -4621,10 +4703,10 @@ static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- unsigned long objects;
+ unsigned int objects;
int err;
- err = kstrtoul(buf, 10, &objects);
+ err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))
diff --git a/mm/swap_state.c b/mm/swap_state.c
index d504adb7fa5f08..3c91a0e9ae1d2d 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -17,6 +17,7 @@
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/migrate.h>
+#include <linux/mm_metrics.h>
#include <asm/pgtable.h>
@@ -187,6 +188,7 @@ int add_to_swap(struct page *page, struct list_head *list)
/*
* Add it to the swap cache and mark it dirty
*/
+ mm_metrics_swapout(&entry);
err = add_to_swap_cache(page, entry,
__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 34f7baaf0f33e3..fd822c6ece592e 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1117,19 +1117,9 @@ unsigned int count_swap_pages(int type, int free)
}
#endif /* CONFIG_HIBERNATION */
-static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+static inline int pte_same_as_swp(pte_t pte, swp_entry_t swp)
{
-#ifdef CONFIG_MEM_SOFT_DIRTY
- /*
- * When pte keeps soft dirty bit the pte generated
- * from swap entry does not has it, still it's same
- * pte from logical point of view.
- */
- pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
- return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
-#else
- return pte_same(pte, swp_pte);
-#endif
+ return is_swap_pte(pte) && swp_entry_same(pte_to_swp_entry(pte), swp);
}
/*
@@ -1157,7 +1147,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
+ if (unlikely(!pte_same_as_swp(*pte, entry))) {
mem_cgroup_cancel_charge(page, memcg);
ret = 0;
goto out;
@@ -1196,7 +1186,6 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
swp_entry_t entry, struct page *page)
{
- pte_t swp_pte = swp_entry_to_pte(entry);
pte_t *pte;
int ret = 0;
@@ -1215,7 +1204,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
- if (unlikely(maybe_same_pte(*pte, swp_pte))) {
+ if (unlikely(pte_same_as_swp(*pte, entry))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
@@ -1571,7 +1560,7 @@ int try_to_unuse(unsigned int type, bool frontswap,
* delete, since it may not have been written out to swap yet.
*/
if (PageSwapCache(page) &&
- likely(page_private(page) == entry.val))
+ likely(swp_page_same(entry, page)))
delete_from_swap_cache(page);
/*
@@ -2234,6 +2223,35 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode,
return 0;
}
+
+/*
+ * Find out how many pages are allowed for a single swap device. There
+ * are two limiting factors:
+ * 1) the number of bits for the swap offset in the swp_entry_t type, and
+ * 2) the number of bits in the swap pte, as defined by the different
+ * architectures.
+ *
+ * In order to find the largest possible bit mask, a swap entry with
+ * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
+ * decoded to a swp_entry_t again, and finally the swap offset is
+ * extracted.
+ *
+ * This will mask all the bits from the initial ~0UL mask that can't
+ * be encoded in either the swp_entry_t or the architecture definition
+ * of a swap pte.
+ */
+unsigned long generic_max_swapfile_size(void)
+{
+ return swp_offset(pte_to_swp_entry(
+ swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+}
+
+/* Can be overridden by an architecture for additional checks. */
+__weak unsigned long max_swapfile_size(void)
+{
+ return generic_max_swapfile_size();
+}
+
static unsigned long read_swap_header(struct swap_info_struct *p,
union swap_header *swap_header,
struct inode *inode)
@@ -2269,22 +2287,7 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
p->cluster_next = 1;
p->cluster_nr = 0;
- /*
- * Find out how many pages are allowed for a single swap
- * device. There are two limiting factors: 1) the number
- * of bits for the swap offset in the swp_entry_t type, and
- * 2) the number of bits in the swap pte as defined by the
- * different architectures. In order to find the
- * largest possible bit mask, a swap entry with swap type 0
- * and swap offset ~0UL is created, encoded to a swap pte,
- * decoded to a swp_entry_t again, and finally the swap
- * offset is extracted. This will mask all the bits from
- * the initial ~0UL mask that can't be encoded in either
- * the swp_entry_t or the architecture definition of a
- * swap pte.
- */
- maxpages = swp_offset(pte_to_swp_entry(
- swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
+ maxpages = max_swapfile_size();
last_page = swap_header->info.last_page;
if (!last_page) {
pr_warn("Empty swap-file\n");
diff --git a/mm/truncate.c b/mm/truncate.c
index f4c8270f7b84b0..aeb1adba329e57 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -432,9 +432,13 @@ void truncate_inode_pages_final(struct address_space *mapping)
*/
spin_lock_irq(&mapping->tree_lock);
spin_unlock_irq(&mapping->tree_lock);
-
- truncate_inode_pages(mapping, 0);
}
+
+ /*
+ * Cleancache needs notification even if there are no pages or shadow
+ * entries.
+ */
+ truncate_inode_pages(mapping, 0);
}
EXPORT_SYMBOL(truncate_inode_pages_final);
diff --git a/mm/usercopy.c b/mm/usercopy.c
new file mode 100644
index 00000000000000..8345299e3e3b08
--- /dev/null
+++ b/mm/usercopy.c
@@ -0,0 +1,280 @@
+/*
+ * This implements the various checks for CONFIG_HARDENED_USERCOPY*,
+ * which are designed to protect kernel memory from needless exposure
+ * and overwrite under many unintended conditions. This code is based
+ * on PAX_USERCOPY, which is:
+ *
+ * Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
+ * Security Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/sections.h>
+
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
+/*
+ * Checks if a given pointer and length is contained by the current
+ * stack frame (if possible).
+ *
+ * Returns:
+ * NOT_STACK: not at all on the stack
+ * GOOD_FRAME: fully within a valid stack frame
+ * GOOD_STACK: fully on the stack (when can't do frame-checking)
+ * BAD_STACK: error condition (invalid stack position or bad stack frame)
+ */
+static noinline int check_stack_object(const void *obj, unsigned long len)
+{
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
+ int ret;
+
+ /* Object is not on the stack at all. */
+ if (obj + len <= stack || stackend <= obj)
+ return NOT_STACK;
+
+ /*
+ * Reject: object partially overlaps the stack (passing the
+ * the check above means at least one end is within the stack,
+ * so if this check fails, the other end is outside the stack).
+ */
+ if (obj < stack || stackend < obj + len)
+ return BAD_STACK;
+
+ /* Check if object is safely within a valid frame. */
+ ret = arch_within_stack_frames(stack, stackend, obj, len);
+ if (ret)
+ return ret;
+
+ return GOOD_STACK;
+}
+
+static void report_usercopy(const void *ptr, unsigned long len,
+ bool to_user, const char *type)
+{
+ pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
+ to_user ? "exposure" : "overwrite",
+ to_user ? "from" : "to", ptr, type ? : "unknown", len);
+ /*
+ * For greater effect, it would be nice to do do_group_exit(),
+ * but BUG() actually hooks all the lock-breaking and per-arch
+ * Oops code, so that is used here instead.
+ */
+ BUG();
+}
+
+/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
+static bool overlaps(const void *ptr, unsigned long n, unsigned long low,
+ unsigned long high)
+{
+ unsigned long check_low = (uintptr_t)ptr;
+ unsigned long check_high = check_low + n;
+
+ /* Does not overlap if entirely above or entirely below. */
+ if (check_low >= high || check_high <= low)
+ return false;
+
+ return true;
+}
+
+/* Is this address range in the kernel text area? */
+static inline const char *check_kernel_text_object(const void *ptr,
+ unsigned long n)
+{
+ unsigned long textlow = (unsigned long)_stext;
+ unsigned long texthigh = (unsigned long)_etext;
+ unsigned long textlow_linear, texthigh_linear;
+
+ if (overlaps(ptr, n, textlow, texthigh))
+ return "<kernel text>";
+
+ /*
+ * Some architectures have virtual memory mappings with a secondary
+ * mapping of the kernel text, i.e. there is more than one virtual
+ * kernel address that points to the kernel image. It is usually
+ * when there is a separate linear physical memory mapping, in that
+ * __pa() is not just the reverse of __va(). This can be detected
+ * and checked:
+ */
+ textlow_linear = (unsigned long)lm_alias(textlow);
+ /* No different mapping: we're done. */
+ if (textlow_linear == textlow)
+ return NULL;
+
+ /* Check the secondary mapping... */
+ texthigh_linear = (unsigned long)lm_alias(texthigh);
+ if (overlaps(ptr, n, textlow_linear, texthigh_linear))
+ return "<linear kernel text>";
+
+ return NULL;
+}
+
+static inline const char *check_bogus_address(const void *ptr, unsigned long n)
+{
+ /* Reject if object wraps past end of memory. */
+ if ((unsigned long)ptr + n < (unsigned long)ptr)
+ return "<wrapped address>";
+
+ /* Reject if NULL or ZERO-allocation. */
+ if (ZERO_OR_NULL_PTR(ptr))
+ return "<null>";
+
+ return NULL;
+}
+
+/* Checks for allocs that are marked in some way as spanning multiple pages. */
+static inline const char *check_page_span(const void *ptr, unsigned long n,
+ struct page *page, bool to_user)
+{
+#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
+ const void *end = ptr + n - 1;
+ struct page *endpage;
+ bool is_reserved, is_cma;
+
+ /*
+ * Sometimes the kernel data regions are not marked Reserved (see
+ * check below). And sometimes [_sdata,_edata) does not cover
+ * rodata and/or bss, so check each range explicitly.
+ */
+
+ /* Allow reads of kernel rodata region (if not marked as Reserved). */
+ if (ptr >= (const void *)__start_rodata &&
+ end <= (const void *)__end_rodata) {
+ if (!to_user)
+ return "<rodata>";
+ return NULL;
+ }
+
+ /* Allow kernel data region (if not marked as Reserved). */
+ if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
+ return NULL;
+
+ /* Allow kernel bss region (if not marked as Reserved). */
+ if (ptr >= (const void *)__bss_start &&
+ end <= (const void *)__bss_stop)
+ return NULL;
+
+ /* Is the object wholly within one base page? */
+ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
+ ((unsigned long)end & (unsigned long)PAGE_MASK)))
+ return NULL;
+
+ /* Allow if fully inside the same compound (__GFP_COMP) page. */
+ endpage = virt_to_head_page(end);
+ if (likely(endpage == page))
+ return NULL;
+
+ /*
+ * Reject if range is entirely either Reserved (i.e. special or
+ * device memory), or CMA. Otherwise, reject since the object spans
+ * several independently allocated pages.
+ */
+ is_reserved = PageReserved(page);
+ is_cma = is_migrate_cma_page(page);
+ if (!is_reserved && !is_cma)
+ return "<spans multiple pages>";
+
+ for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
+ page = virt_to_head_page(ptr);
+ if (is_reserved && !PageReserved(page))
+ return "<spans Reserved and non-Reserved pages>";
+ if (is_cma && !is_migrate_cma_page(page))
+ return "<spans CMA and non-CMA pages>";
+ }
+#endif
+
+ return NULL;
+}
+
+static inline const char *check_heap_object(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ struct page *page;
+
+ /*
+ * Some architectures (arm64) return true for virt_addr_valid() on
+ * vmalloced addresses. Work around this by checking for vmalloc
+ * first.
+ *
+ * We also need to check for module addresses explicitly since we
+ * may copy static data from modules to userspace
+ */
+ if (is_vmalloc_or_module_addr(ptr))
+ return NULL;
+
+ if (!virt_addr_valid(ptr))
+ return NULL;
+
+ page = virt_to_head_page(ptr);
+
+ /* Check slab allocator for flags and size. */
+ if (PageSlab(page))
+ return __check_heap_object(ptr, n, page);
+
+ /* Verify object does not incorrectly span multiple pages. */
+ return check_page_span(ptr, n, page, to_user);
+}
+
+/*
+ * Validates that the given object is:
+ * - not bogus address
+ * - known-safe heap or stack object
+ * - not in kernel text
+ */
+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
+{
+ const char *err;
+
+ /* Skip all tests if size is zero. */
+ if (!n)
+ return;
+
+ /* Check for invalid addresses. */
+ err = check_bogus_address(ptr, n);
+ if (err)
+ goto report;
+
+ /* Check for bad heap object. */
+ err = check_heap_object(ptr, n, to_user);
+ if (err)
+ goto report;
+
+ /* Check for bad stack object. */
+ switch (check_stack_object(ptr, n)) {
+ case NOT_STACK:
+ /* Object is not touching the current process stack. */
+ break;
+ case GOOD_FRAME:
+ case GOOD_STACK:
+ /*
+ * Object is either in the correct frame (when it
+ * is possible to check) or just generally on the
+ * process stack (when frame checking not available).
+ */
+ return;
+ default:
+ err = "<process stack>";
+ goto report;
+ }
+
+ /* Check for object in kernel to avoid text exposure. */
+ err = check_kernel_text_object(ptr, n);
+ if (!err)
+ return;
+
+report:
+ report_usercopy(ptr, n, to_user, err);
+}
+EXPORT_SYMBOL(__check_object_size);
diff --git a/mm/util.c b/mm/util.c
index 9df49530a8b58f..42532c3a848b9d 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -309,7 +309,7 @@ int __weak get_user_pages_fast(unsigned long start,
{
struct mm_struct *mm = current->mm;
return get_user_pages_unlocked(current, mm, start, nr_pages,
- write, 0, pages);
+ pages, write ? FOLL_WRITE : 0);
}
EXPORT_SYMBOL_GPL(get_user_pages_fast);
diff --git a/mm/vmacache.c b/mm/vmacache.c
index fd09dc9c6812bb..9c8ff3d4eda92c 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -6,44 +6,6 @@
#include <linux/vmacache.h>
/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
-/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
* task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86295bfec93ba6..16be696b37a71d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1487,7 +1487,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
addr))
return;
- area = remove_vm_area(addr);
+ area = find_vmap_area((unsigned long)addr)->vm;
if (unlikely(!area)) {
WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
addr);
@@ -1497,6 +1497,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
debug_check_no_locks_freed(addr, get_vm_area_size(area));
debug_check_no_obj_freed(addr, get_vm_area_size(area));
+ remove_vm_area(addr);
if (deallocate_pages) {
int i;
@@ -2202,7 +2203,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
if (!(area->flags & VM_USERMAP))
return -EINVAL;
- if (kaddr + size > area->addr + area->size)
+ if (kaddr + size > area->addr + get_vm_area_size(area))
return -EINVAL;
do {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 019c0de6f24110..0cc627cbc5e621 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -44,6 +44,7 @@
#include <linux/delayacct.h>
#include <linux/sysctl.h>
#include <linux/oom.h>
+#include <linux/pagevec.h>
#include <linux/prefetch.h>
#include <linux/printk.h>
@@ -1342,7 +1343,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
return ret;
mapping = page_mapping(page);
- migrate_dirty = mapping && mapping->a_ops->migratepage;
+ migrate_dirty = !mapping || mapping->a_ops->migratepage;
unlock_page(page);
if (!migrate_dirty)
return ret;
@@ -1971,9 +1972,6 @@ static int file_is_low(struct lruvec *lruvec)
struct zone *zone = lruvec_zone(lruvec);
u64 pages_min = min_filelist_kbytes >> (PAGE_SHIFT - 10);
- if (!mem_cgroup_disabled())
- return false;
-
pages_min *= zone->managed_pages;
do_div(pages_min, totalram_pages);
@@ -2049,9 +2047,10 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
/*
* Do not scan file pages when swap is allowed by __GFP_IO and
- * file page count is low.
+ * it's global reclaim and file page count is low.
*/
- if ((sc->gfp_mask & __GFP_IO) && file_is_low(lruvec)) {
+ if ((sc->gfp_mask & __GFP_IO) && global_reclaim(sc) &&
+ file_is_low(lruvec)) {
scan_balance = SCAN_ANON;
goto out;
}
@@ -3891,17 +3890,16 @@ int page_evictable(struct page *page)
return ret;
}
-#ifdef CONFIG_SHMEM
/**
- * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
- * @pages: array of pages to check
- * @nr_pages: number of pages to check
- *
- * Checks pages for evictability and moves them to the appropriate lru list.
+ * check_move_unevictable_pages - check pages for evictability and move to
+ * appropriate zone lru list
+ * @pvec: pagevec with lru pages to check
*
- * This function is only used for SysV IPC SHM_UNLOCK.
+ * Checks pages for evictability, if an evictable page is in the unevictable
+ * lru list, moves it to the appropriate evictable lru list. This function
+ * should be only used for lru pages.
*/
-void check_move_unevictable_pages(struct page **pages, int nr_pages)
+void check_move_unevictable_pages(struct pagevec *pvec)
{
struct lruvec *lruvec;
struct zone *zone = NULL;
@@ -3909,8 +3907,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
int pgrescued = 0;
int i;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pages[i];
+ for (i = 0; i < pvec->nr; i++) {
+ struct page *page = pvec->pages[i];
struct zone *pagezone;
pgscanned++;
@@ -3943,4 +3941,4 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
spin_unlock_irq(&zone->lru_lock);
}
}
-#endif /* CONFIG_SHMEM */
+EXPORT_SYMBOL_GPL(check_move_unevictable_pages);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 5c9e6239018ac0..6467d83bb9cca7 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -784,6 +784,9 @@ const char * const vmstat_text[] = {
"pgfault",
"pgmajfault",
+ "pgmajfault_s",
+ "pgmajfault_a",
+ "pgmajfault_f",
TEXTS_FOR_ZONES("pgrefill")
TEXTS_FOR_ZONES("pgsteal_kswapd")
@@ -807,6 +810,7 @@ const char * const vmstat_text[] = {
"drop_pagecache",
"drop_slab",
+ "oom_kill",
#ifdef CONFIG_NUMA_BALANCING
"numa_pte_updates",
@@ -860,6 +864,9 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
+#else
+ "", /* nr_tlb_remote_flush */
+ "", /* nr_tlb_remote_flush_received */
#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
@@ -868,7 +875,6 @@ const char * const vmstat_text[] = {
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
- "vmacache_full_flushes",
#endif
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
@@ -1336,6 +1342,8 @@ static void *vmstat_start(struct seq_file *m, loff_t *pos)
all_vm_events(v);
v[PGPGIN] /= 2; /* sectors -> kbytes */
v[PGPGOUT] /= 2;
+ /* Add up page faults */
+ v[PGMAJFAULT] = v[PGMAJFAULT_S] + v[PGMAJFAULT_A] + v[PGMAJFAULT_F];
#endif
return (unsigned long *)m->private + *pos;
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 568015e2fe7a13..87a8491909ee02 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1018,6 +1018,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
ret = -ENOMEM;
goto reject;
}
+
+ /* A second zswap_is_full() check after
+ * zswap_shrink() to make sure it's now
+ * under the max_pool_percent
+ */
+ if (zswap_is_full()) {
+ ret = -ENOMEM;
+ goto reject;
+ }
}
/* allocate entry */
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 346b5c1a91851e..c40eb04dd856af 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -569,6 +569,7 @@ int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr));
+ skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
diff --git a/net/9p/client.c b/net/9p/client.c
index 3ff26eb1ea20d1..443db202db093b 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -156,6 +156,12 @@ static int parse_opts(char *opts, struct p9_client *clnt)
ret = r;
continue;
}
+ if (option < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "msize should be at least 4k\n");
+ ret = -EINVAL;
+ continue;
+ }
clnt->msize = option;
break;
case Opt_trans:
@@ -931,7 +937,7 @@ static int p9_client_version(struct p9_client *c)
{
int err = 0;
struct p9_req_t *req;
- char *version;
+ char *version = NULL;
int msize;
p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n",
@@ -972,10 +978,18 @@ static int p9_client_version(struct p9_client *c)
else if (!strncmp(version, "9P2000", 6))
c->proto_version = p9_proto_legacy;
else {
+ p9_debug(P9_DEBUG_ERROR,
+ "server returned an unknown version: %s\n", version);
err = -EREMOTEIO;
goto error;
}
+ if (msize < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "server returned a msize < 4096: %d\n", msize);
+ err = -EREMOTEIO;
+ goto error;
+ }
if (msize < c->msize)
c->msize = msize;
@@ -1040,6 +1054,13 @@ struct p9_client *p9_client_create(const char *dev_name, char *options)
if (clnt->msize > clnt->trans_mod->maxsize)
clnt->msize = clnt->trans_mod->maxsize;
+ if (clnt->msize < 4096) {
+ p9_debug(P9_DEBUG_ERROR,
+ "Please specify a msize of at least 4k\n");
+ err = -EINVAL;
+ goto close_trans;
+ }
+
err = p9_client_version(clnt);
if (err)
goto close_trans;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 16d28756598789..145f80518064ad 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -46,10 +46,15 @@ p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
void p9stat_free(struct p9_wstat *stbuf)
{
kfree(stbuf->name);
+ stbuf->name = NULL;
kfree(stbuf->uid);
+ stbuf->uid = NULL;
kfree(stbuf->gid);
+ stbuf->gid = NULL;
kfree(stbuf->muid);
+ stbuf->muid = NULL;
kfree(stbuf->extension);
+ stbuf->extension = NULL;
}
EXPORT_SYMBOL(p9stat_free);
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index bced8c074c1280..2f68ffda3715b4 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m)
spin_lock_irqsave(&p9_poll_lock, flags);
list_del_init(&m->poll_pending_link);
spin_unlock_irqrestore(&p9_poll_lock, flags);
+
+ flush_work(&p9_poll_work);
}
/**
@@ -933,7 +935,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
if (err < 0)
return err;
- if (valid_ipaddr4(addr) < 0)
+ if (addr == NULL || valid_ipaddr4(addr) < 0)
return -EINVAL;
csocket = NULL;
@@ -981,6 +983,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
csocket = NULL;
+ if (addr == NULL)
+ return -EINVAL;
+
if (strlen(addr) >= UNIX_PATH_MAX) {
pr_err("%s (%d): address too long: %s\n",
__func__, task_pid_nr(current), addr);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 52b4a2f993f2c9..f42550dd356007 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -644,6 +644,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
struct ib_qp_init_attr qp_attr;
struct ib_cq_init_attr cq_attr = {};
+ if (addr == NULL)
+ return -EINVAL;
+
/* Parse the transport specific mount options */
err = parse_opts(args, &opts);
if (err < 0)
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 2ddeecca5b12aa..2a15b6aa9cddb7 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,7 +192,7 @@ static int pack_sg_list(struct scatterlist *sg, int start,
s = rest_of_page(data);
if (s > count)
s = count;
- BUG_ON(index > limit);
+ BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_buf(&sg[index++], data, s);
@@ -237,6 +237,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit,
s = PAGE_SIZE - data_off;
if (s > count)
s = count;
+ BUG_ON(index >= limit);
/* Make sure we don't terminate early. */
sg_unmark_end(&sg[index]);
sg_set_page(&sg[index++], pdata[i++], s, data_off);
@@ -409,6 +410,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
p9_debug(P9_DEBUG_TRANS, "virtio request\n");
if (uodata) {
+ __le32 sz;
int n = p9_get_mapped_pages(chan, &out_pages, uodata,
outlen, &offs, &need_drop);
if (n < 0)
@@ -419,6 +421,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4);
outlen = n;
}
+ /* The size field of the message must include the length of the
+ * header and the length of the data. We didn't actually know
+ * the length of the data until this point so add it in now.
+ */
+ sz = cpu_to_le32(req->tc->size + outlen);
+ memcpy(&req->tc->sdata[0], &sz, sizeof(sz));
} else if (uidata) {
int n = p9_get_mapped_pages(chan, &in_pages, uidata,
inlen, &offs, &need_drop);
@@ -566,7 +574,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
if (IS_ERR(chan->vq)) {
err = PTR_ERR(chan->vq);
- goto out_free_vq;
+ goto out_free_chan;
}
chan->vq->vdev->priv = chan;
spin_lock_init(&chan->lock);
@@ -619,6 +627,7 @@ out_free_tag:
kfree(tag);
out_free_vq:
vdev->config->del_vqs(vdev);
+out_free_chan:
kfree(chan);
fail:
return err;
@@ -646,6 +655,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
int ret = -ENOENT;
int found = 0;
+ if (devname == NULL)
+ return -EINVAL;
+
mutex_lock(&virtio_9p_lock);
list_for_each_entry(chan, &virtio_chan_list, chan_list) {
if (!strncmp(devname, chan->tag, chan->tag_len) &&
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 2fdebabbfacd14..2772f6a13fcb4f 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -654,15 +654,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- dev = dev_get_by_name(&init_net, devname);
+ rtnl_lock();
+ dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
+ rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25->ax25_dev) {
+ rtnl_unlock();
+ res = -ENODEV;
+ break;
+ }
ax25_fillin_cb(ax25, ax25->ax25_dev);
- dev_put(dev);
+ rtnl_unlock();
break;
default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 3d106767b27252..5faca5db638568 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 2fa3be96510161..cd9a24e5b97a09 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
+ ax25_route_lock_use();
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
ax25_queue_xmit(skb, dev);
put:
- if (route)
- ax25_put_route(route);
+ ax25_route_lock_unuse();
return NETDEV_TX_OK;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index d39097737e387a..149f82bd83fdba 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
#include <linux/export.h>
static ax25_route *ax25_route_list;
-static DEFINE_RWLOCK(ax25_route_lock);
+DEFINE_RWLOCK(ax25_route_lock);
void ax25_rt_device_down(struct net_device *dev)
{
@@ -349,6 +349,7 @@ const struct file_operations ax25_route_fops = {
* Find AX.25 route
*
* Only routes with a reference count of zero can be destroyed.
+ * Must be called with ax25_route_lock read locked.
*/
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
{
@@ -356,7 +357,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
ax25_route *ax25_def_rt = NULL;
ax25_route *ax25_rt;
- read_lock(&ax25_route_lock);
/*
* Bind to the physical interface we heard them on, or the default
* route if none is found;
@@ -379,11 +379,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
if (ax25_spe_rt != NULL)
ax25_rt = ax25_spe_rt;
- if (ax25_rt != NULL)
- ax25_hold_route(ax25_rt);
-
- read_unlock(&ax25_route_lock);
-
return ax25_rt;
}
@@ -414,9 +409,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
ax25_route *ax25_rt;
int err = 0;
- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
+ ax25_route_lock_use();
+ ax25_rt = ax25_get_route(addr, NULL);
+ if (!ax25_rt) {
+ ax25_route_lock_unuse();
return -EHOSTUNREACH;
-
+ }
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
@@ -451,8 +449,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
put:
- ax25_put_route(ax25_rt);
-
+ ax25_route_lock_unuse();
return err;
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 5d2f9d4879b20a..d50c3b003dc93a 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -266,7 +266,7 @@ batadv_frag_merge_packets(struct hlist_head *chain)
kfree(entry);
packet = (struct batadv_frag_packet *)skb_out->data;
- size = ntohs(packet->total_size);
+ size = ntohs(packet->total_size) + hdr_size;
/* Make room for the rest of the fragments. */
if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index f11345e163d7f5..3c8d8142e8c63a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -18,7 +18,6 @@
#include "hard-interface.h"
#include "main.h"
-#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -104,8 +103,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
/* recurse over the parent device */
parent_dev = __dev_get_by_index(&init_net, dev_get_iflink(net_dev));
/* if we got a NULL parent_dev there is something broken.. */
- if (WARN(!parent_dev, "Cannot find parent device"))
+ if (!parent_dev) {
+ pr_err("Cannot find parent device\n");
return false;
+ }
ret = batadv_is_on_batman_iface(parent_dev);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 9f1fe6169befa6..4812123e0a2c27 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -209,10 +209,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
soft_iface->trans_start = jiffies;
vid = batadv_get_vid(skb, 0);
+
+ skb_reset_mac_header(skb);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
+ goto dropped;
vhdr = vlan_eth_hdr(skb);
if (vhdr->h_vlan_encapsulated_proto != ethertype) {
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 59344c7701c044..b69829da172bb7 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -123,6 +123,21 @@ config BT_EVE_HACKS
this option will be on for eve and off for every other device
for now. Yes, this is beyond ugly. Accepting patches :)
+config BT_ENFORCE_CLASSIC_SECURITY
+ bool "Enforces security requirements for Bluetooth classic"
+ depends on BT
+ default y
+ help
+ Enforces Bluetooth classic security requirements by disallowing use of
+ insecure Bluetooth chips, i.e. that doesn't support Read Encryption
+ Key Size command to prevent BT classic connection with very short
+ encryption key.
+config BT_HCI_LE_SPLITTER
+ bool "Split LE out for separate userspace stack"
+ depends on BT
+ help
+ Creates a char device to allow a userspace stack to handle LE
+ comms. Bluez will only see an EDR chip and will act accordingly.
source "drivers/bluetooth/Kconfig"
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index b3ff12eb9b6dcd..5578bf13846836 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -20,5 +20,6 @@ bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
bluetooth-$(CONFIG_BT_LEDS) += leds.o
bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
+bluetooth-$(CONFIG_BT_HCI_LE_SPLITTER) += hci_le_splitter.o
subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 4b325250236fd8..ba9803cc8496d8 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -30,6 +30,7 @@
#include <asm/ioctls.h>
#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_le_splitter.h>
#include <linux/proc_fs.h>
#include "leds.h"
@@ -774,6 +775,10 @@ static int __init bt_init(void)
BT_INFO("HCI device and connection manager initialized");
+ err = hci_le_splitter_sysfs_init();
+ if (err < 0)
+ goto error;
+
err = hci_sock_init();
if (err < 0)
goto error;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index b0495f5273e792..403b4c6b29e489 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -749,18 +749,31 @@ static bool conn_use_rpa(struct hci_conn *conn)
}
static void hci_req_add_le_create_conn(struct hci_request *req,
- struct hci_conn *conn)
+ struct hci_conn *conn,
+ bdaddr_t *direct_rpa)
{
struct hci_cp_le_create_conn cp;
struct hci_dev *hdev = conn->hdev;
u8 own_addr_type;
- /* Update random address, but set require_privacy to false so
- * that we never connect with an non-resolvable address.
+ /* If direct address was provided we use it instead of current
+ * address.
*/
- if (hci_update_random_address(req, false, conn_use_rpa(conn),
- &own_addr_type))
- return;
+ if (direct_rpa) {
+ if (bacmp(&req->hdev->random_addr, direct_rpa))
+ hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
+ direct_rpa);
+
+ /* direct address is always RPA */
+ own_addr_type = ADDR_LE_DEV_RANDOM;
+ } else {
+ /* Update random address, but set require_privacy to false so
+ * that we never connect with an non-resolvable address.
+ */
+ if (hci_update_random_address(req, false, conn_use_rpa(conn),
+ &own_addr_type))
+ return;
+ }
memset(&cp, 0, sizeof(cp));
@@ -825,7 +838,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
u8 dst_type, u8 sec_level, u16 conn_timeout,
- u8 role)
+ u8 role, bdaddr_t *direct_rpa)
{
struct hci_conn_params *params;
struct hci_conn *conn;
@@ -941,7 +954,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
}
- hci_req_add_le_create_conn(&req, conn);
+ hci_req_add_le_create_conn(&req, conn, direct_rpa);
create_conn:
err = hci_req_run(&req, create_le_conn_complete);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 233d148c517c78..99b648b16a16fe 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -34,6 +34,7 @@
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_le_splitter.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/mgmt.h>
@@ -1336,6 +1337,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
goto done;
}
+ hci_le_splitter_init_start(hdev);
+
set_bit(HCI_RUNNING, &hdev->flags);
hci_sock_dev_event(hdev, HCI_DEV_OPEN);
@@ -1402,7 +1405,20 @@ static int hci_dev_do_open(struct hci_dev *hdev)
clear_bit(HCI_INIT, &hdev->flags);
+#ifdef CONFIG_BT_ENFORCE_CLASSIC_SECURITY
+ /* Don't allow usage of Bluetooth if the chip doesn't support */
+ /* Read Encryption Key Size command (byte 20 bit 4). */
+ if (!ret && !(hdev->commands[20] & 0x10)) {
+ WARN(1, "Disabling Bluetooth due to unsupported HCI Read Encryption Key Size command");
+ ret = -EIO;
+ }
+#endif
+
+ if (!ret)
+ ret = hci_le_splitter_init_done(hdev);
+
if (!ret) {
+
hci_dev_hold(hdev);
hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
set_bit(HCI_UP, &hdev->flags);
@@ -1418,6 +1434,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
mgmt_power_on(hdev, ret);
}
} else {
+ hci_le_splitter_init_fail(hdev);
+
/* Init failed, cleanup */
flush_work(&hdev->tx_work);
flush_work(&hdev->cmd_work);
@@ -1526,6 +1544,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
BT_DBG("%s %p", hdev->name, hdev);
+ hci_le_splitter_deinit(hdev);
+
if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
test_bit(HCI_UP, &hdev->flags)) {
@@ -1570,6 +1590,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
hci_dev_lock(hdev);
+ /* This will clear the HCI_LE_SCAN_CHANGE_IN_PROGRESS flag in case its
+ * already set and exception occurred to sync host and controller state.
+ */
+ if (hci_dev_test_flag(hdev, HCI_LE_SCAN_CHANGE_IN_PROGRESS)) {
+ hci_dev_clear_flag(hdev, HCI_LE_SCAN_CHANGE_IN_PROGRESS);
+ hdev->count_scan_change_in_progress = 0;
+ }
+
hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
@@ -2484,13 +2512,24 @@ static void hci_cmd_timeout(struct work_struct *work)
struct hci_dev *hdev = container_of(work, struct hci_dev,
cmd_timer.work);
+ hdev->timeout_cnt++;
if (hdev->sent_cmd) {
struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
u16 opcode = __le16_to_cpu(sent->opcode);
- BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
+ BT_ERR("%s command 0x%4.4x tx timeout (cnt = %u)",
+ hdev->name, opcode, hdev->timeout_cnt);
} else {
- BT_ERR("%s command tx timeout", hdev->name);
+ BT_ERR("%s command tx timeout (cnt = %u)", hdev->name,
+ hdev->timeout_cnt);
+ }
+
+ if (test_bit(HCI_QUIRK_HW_RESET_ON_TIMEOUT, &hdev->quirks) &&
+ hdev->timeout_cnt >= 5) {
+ hdev->timeout_cnt = 0;
+ if (hdev->hw_reset)
+ hdev->hw_reset(hdev);
+ return;
}
atomic_set(&hdev->cmd_cnt, 1);
@@ -3130,6 +3169,10 @@ int hci_register_dev(struct hci_dev *hdev)
hci_sock_dev_event(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
+ // Don't try to power on if LE splitter is not yet set up.
+ if (hci_le_splitter_get_enabled_state() == SPLITTER_STATE_NOT_SET)
+ return id;
+
queue_work(hdev->req_workqueue, &hdev->power_on);
return id;
@@ -3364,6 +3407,11 @@ static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
+ if (!hci_le_splitter_should_allow_bluez_tx(hdev, skb)) {
+ kfree_skb(skb);
+ return;
+ }
+
err = hdev->send(hdev, skb);
if (err < 0) {
BT_ERR("%s sending frame failed (%d)", hdev->name, err);
@@ -4193,6 +4241,9 @@ static void hci_rx_work(struct work_struct *work)
continue;
}
+ if (!hci_le_splitter_should_allow_bluez_rx(hdev, skb))
+ continue;
+
if (test_bit(HCI_INIT, &hdev->flags)) {
/* Don't process data packets in this states. */
switch (hci_skb_pkt_type(skb)) {
diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c
index 63df63ebfb249a..2fffc5a052438c 100644
--- a/net/bluetooth/hci_debugfs.c
+++ b/net/bluetooth/hci_debugfs.c
@@ -88,17 +88,7 @@ static int __name ## _show(struct seq_file *f, void *ptr) \
return 0; \
} \
\
-static int __name ## _open(struct inode *inode, struct file *file) \
-{ \
- return single_open(file, __name ## _show, inode->i_private); \
-} \
- \
-static const struct file_operations __name ## _fops = { \
- .open = __name ## _open, \
- .read = seq_read, \
- .llseek = seq_lseek, \
- .release = single_release, \
-} \
+DEFINE_SHOW_ATTRIBUTE(__name)
static int features_show(struct seq_file *f, void *ptr)
{
@@ -126,17 +116,7 @@ static int features_show(struct seq_file *f, void *ptr)
return 0;
}
-static int features_open(struct inode *inode, struct file *file)
-{
- return single_open(file, features_show, inode->i_private);
-}
-
-static const struct file_operations features_fops = {
- .open = features_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(features);
static int device_id_show(struct seq_file *f, void *ptr)
{
@@ -150,17 +130,7 @@ static int device_id_show(struct seq_file *f, void *ptr)
return 0;
}
-static int device_id_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_id_show, inode->i_private);
-}
-
-static const struct file_operations device_id_fops = {
- .open = device_id_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_id);
static int device_list_show(struct seq_file *f, void *ptr)
{
@@ -180,17 +150,7 @@ static int device_list_show(struct seq_file *f, void *ptr)
return 0;
}
-static int device_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, device_list_show, inode->i_private);
-}
-
-static const struct file_operations device_list_fops = {
- .open = device_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(device_list);
static int blacklist_show(struct seq_file *f, void *p)
{
@@ -205,17 +165,7 @@ static int blacklist_show(struct seq_file *f, void *p)
return 0;
}
-static int blacklist_open(struct inode *inode, struct file *file)
-{
- return single_open(file, blacklist_show, inode->i_private);
-}
-
-static const struct file_operations blacklist_fops = {
- .open = blacklist_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(blacklist);
static int uuids_show(struct seq_file *f, void *p)
{
@@ -240,17 +190,7 @@ static int uuids_show(struct seq_file *f, void *p)
return 0;
}
-static int uuids_open(struct inode *inode, struct file *file)
-{
- return single_open(file, uuids_show, inode->i_private);
-}
-
-static const struct file_operations uuids_fops = {
- .open = uuids_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(uuids);
static int remote_oob_show(struct seq_file *f, void *ptr)
{
@@ -269,17 +209,7 @@ static int remote_oob_show(struct seq_file *f, void *ptr)
return 0;
}
-static int remote_oob_open(struct inode *inode, struct file *file)
-{
- return single_open(file, remote_oob_show, inode->i_private);
-}
-
-static const struct file_operations remote_oob_fops = {
- .open = remote_oob_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(remote_oob);
static int conn_info_min_age_set(void *data, u64 val)
{
@@ -443,17 +373,7 @@ static int inquiry_cache_show(struct seq_file *f, void *p)
return 0;
}
-static int inquiry_cache_open(struct inode *inode, struct file *file)
-{
- return single_open(file, inquiry_cache_show, inode->i_private);
-}
-
-static const struct file_operations inquiry_cache_fops = {
- .open = inquiry_cache_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(inquiry_cache);
static int link_keys_show(struct seq_file *f, void *ptr)
{
@@ -469,17 +389,7 @@ static int link_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int link_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, link_keys_show, inode->i_private);
-}
-
-static const struct file_operations link_keys_fops = {
- .open = link_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(link_keys);
static int dev_class_show(struct seq_file *f, void *ptr)
{
@@ -493,17 +403,7 @@ static int dev_class_show(struct seq_file *f, void *ptr)
return 0;
}
-static int dev_class_open(struct inode *inode, struct file *file)
-{
- return single_open(file, dev_class_show, inode->i_private);
-}
-
-static const struct file_operations dev_class_fops = {
- .open = dev_class_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dev_class);
static int voice_setting_get(void *data, u64 *val)
{
@@ -692,17 +592,7 @@ static int identity_show(struct seq_file *f, void *p)
return 0;
}
-static int identity_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_show, inode->i_private);
-}
-
-static const struct file_operations identity_fops = {
- .open = identity_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity);
static int rpa_timeout_set(void *data, u64 val)
{
@@ -746,17 +636,7 @@ static int random_address_show(struct seq_file *f, void *p)
return 0;
}
-static int random_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, random_address_show, inode->i_private);
-}
-
-static const struct file_operations random_address_fops = {
- .open = random_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(random_address);
static int static_address_show(struct seq_file *f, void *p)
{
@@ -769,17 +649,7 @@ static int static_address_show(struct seq_file *f, void *p)
return 0;
}
-static int static_address_open(struct inode *inode, struct file *file)
-{
- return single_open(file, static_address_show, inode->i_private);
-}
-
-static const struct file_operations static_address_fops = {
- .open = static_address_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(static_address);
static ssize_t force_static_address_read(struct file *file,
char __user *user_buf,
@@ -841,17 +711,7 @@ static int white_list_show(struct seq_file *f, void *ptr)
return 0;
}
-static int white_list_open(struct inode *inode, struct file *file)
-{
- return single_open(file, white_list_show, inode->i_private);
-}
-
-static const struct file_operations white_list_fops = {
- .open = white_list_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(white_list);
static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
{
@@ -869,18 +729,7 @@ static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int identity_resolving_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, identity_resolving_keys_show,
- inode->i_private);
-}
-
-static const struct file_operations identity_resolving_keys_fops = {
- .open = identity_resolving_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys);
static int long_term_keys_show(struct seq_file *f, void *ptr)
{
@@ -898,17 +747,7 @@ static int long_term_keys_show(struct seq_file *f, void *ptr)
return 0;
}
-static int long_term_keys_open(struct inode *inode, struct file *file)
-{
- return single_open(file, long_term_keys_show, inode->i_private);
-}
-
-static const struct file_operations long_term_keys_fops = {
- .open = long_term_keys_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(long_term_keys);
static int conn_min_interval_set(void *data, u64 val)
{
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index d1a4ea7fc5f80f..fc6156153f563d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -42,6 +42,9 @@
/* Intel manufacturer ID and specific events */
#define MAUFACTURER_ID_INTEL 0x0002
+/* Minimum encryption key length, value adopted from BLE (7 bytes) */
+#define MIN_ENC_KEY_LEN 7
+
/* Handle HCI Event packets */
static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
@@ -1433,6 +1436,37 @@ unlock:
hci_dev_unlock(hdev);
}
+static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u8 status = *((u8 *)skb->data);
+ u8 *events;
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
+ if (status) {
+ BT_ERR("Set Event mask failed! status %d", status);
+ return;
+ }
+
+ hci_dev_lock(hdev);
+ events = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_MASK);
+ if (events)
+ memcpy(hdev->event_mask, events, sizeof(hdev->event_mask));
+ else
+ BT_ERR("Set Event mask failed! events is NULL");
+
+ BT_DBG("Event mask byte 0: 0x%02x byte 1: 0x%02x",
+ hdev->event_mask[0], hdev->event_mask[1]);
+ BT_DBG("Event mask byte 2: 0x%02x byte 3: 0x%02x",
+ hdev->event_mask[2], hdev->event_mask[3]);
+ BT_DBG("Event mask byte 4: 0x%02x byte 5: 0x%02x",
+ hdev->event_mask[4], hdev->event_mask[5]);
+ BT_DBG("Event mask byte 6: 0x%02x byte 7: 0x%02x",
+ hdev->event_mask[6], hdev->event_mask[7]);
+
+ hci_dev_unlock(hdev);
+}
+
static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
{
u8 status = *((u8 *) skb->data);
@@ -2576,18 +2610,31 @@ static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
if (!conn)
goto unlock;
- /* If we fail to read the encryption key size, assume maximum
- * (which is the same we do also when this HCI command isn't
- * supported.
+ /* If we fail to read the encryption key size, abort the connection
+ * since the encryption key entropy is not guaranteed to be large
+ * enough.
*/
if (rp->status) {
BT_ERR("%s failed to read key size for handle %u", hdev->name,
handle);
conn->enc_key_size = HCI_LINK_KEY_SIZE;
+#ifdef CONFIG_BT_ENFORCE_CLASSIC_SECURITY
+ WARN(1, "Read Encryption Key Size command failed, chip may not support this");
+ hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ hci_conn_drop(conn);
+ goto unlock;
+#endif
} else {
conn->enc_key_size = rp->key_size;
}
+ if (conn->enc_key_size < MIN_ENC_KEY_LEN) {
+ WARN(1, "Dropping connection with weak encryption key length");
+ hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ hci_conn_drop(conn);
+ goto unlock;
+ }
+
if (conn->state == BT_CONFIG) {
conn->state = BT_CONNECTED;
hci_connect_cfm(conn, 0);
@@ -2693,7 +2740,14 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
BT_ERR("Sending HCI Read Encryption Key Size failed");
conn->enc_key_size = HCI_LINK_KEY_SIZE;
+#ifdef CONFIG_BT_ENFORCE_CLASSIC_SECURITY
+ WARN(1, "Failed sending HCI Read Encryption Key Size, chip may not support this");
+ hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
+ hci_conn_drop(conn);
+ goto unlock;
+#else
goto notify;
+#endif
}
goto unlock;
@@ -3066,6 +3120,10 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
hci_cc_write_ssp_debug_mode(hdev, skb);
break;
+ case HCI_OP_SET_EVENT_MASK:
+ hci_cc_set_event_mask(hdev, skb);
+ break;
+
default:
BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
break;
@@ -3376,13 +3434,13 @@ static void hci_vendor_evt(struct hci_dev *hdev, struct sk_buff *skb)
case HCI_EV_INTEL_FATAL_EXCEPTION:
case HCI_EV_INTEL_DEBUG_EXCEPTION:
if (skb->len < 1) {
- BT_INFO("Evt ID:%02X", evt_id);
+ BT_WARN("Evt ID:%02X", evt_id);
return;
}
b = (u8 *)skb->data;
for (i = 0; i < skb->len && i < HCI_MAX_EVENT_SIZE; ++i)
sprintf(line + strlen(line), " %02X", b[i]);
- BT_INFO("Evt ID: %02X data:%s", evt_id, line);
+ BT_WARN("Evt ID: %02X data:%s", evt_id, line);
break;
default:
if (skb->len < 1) {
@@ -4711,7 +4769,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
/* This function requires the caller holds hdev->lock */
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
bdaddr_t *addr,
- u8 addr_type, u8 adv_type)
+ u8 addr_type, u8 adv_type,
+ bdaddr_t *direct_rpa)
{
struct hci_conn *conn;
struct hci_conn_params *params;
@@ -4762,7 +4821,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
}
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
- HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
+ HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
+ direct_rpa);
if (!IS_ERR(conn)) {
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
* by higher layer that tried to connect, if no then
@@ -4872,8 +4932,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
bdaddr_type = irk->addr_type;
}
- /* Check if we have been requested to connect to this device */
- conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
+ /* Check if we have been requested to connect to this device.
+ *
+ * direct_addr is set only for directed advertising reports (it is NULL
+ * for advertising reports) and is already verified to be RPA above.
+ */
+ conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
+ direct_addr);
if (conn && type == LE_ADV_IND) {
/* Store report for later inclusion by
* mgmt_device_connected
@@ -5270,6 +5335,12 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
return true;
}
+ /* Check if request ended in Command Status - no way to retreive
+ * any extra parameters in this case.
+ */
+ if (hdr->evt == HCI_EV_CMD_STATUS)
+ return false;
+
if (hdr->evt != HCI_EV_CMD_COMPLETE) {
BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
return false;
diff --git a/net/bluetooth/hci_le_splitter.c b/net/bluetooth/hci_le_splitter.c
new file mode 100644
index 00000000000000..4c38ca9165fdb2
--- /dev/null
+++ b/net/bluetooth/hci_le_splitter.c
@@ -0,0 +1,1095 @@
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci_le_splitter.h>
+#include <linux/miscdevice.h>
+#include <linux/semaphore.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <asm/atomic.h>
+#include <asm/ioctls.h>
+
+
+/* RXed bytes we'll queue before giving up on userspace. picked arbitrarily */
+#define MAX_RX_QUEUE_SZ 524288
+
+
+#undef pr_fmt
+#define pr_fmt(fmt) "le_splitter " fmt
+
+
+struct hci_le_splitter_le_conn {
+ u16 conn_id;
+ size_t tx_in_flight;
+};
+
+
+/* This mutex protects the below (essentially splitter internal state) */
+static DEFINE_MUTEX(hci_state_lock);
+static struct hci_dev *cur_dev;
+static u16 le_waiting_on_opcode;
+static int local_lmp_ver;
+static struct hci_le_splitter_le_conn tracked_conns[HCI_LE_SPLIT_MAX_LE_CONNS];
+static u32 max_pkts_in_flight;
+static u32 cur_pkts_in_flight;
+static u8 splitter_enable_state = SPLITTER_STATE_NOT_SET;
+
+/* protects command sequencing */
+static DEFINE_SEMAPHORE(cmd_sem);
+
+/* "is userspace connected?" */
+static atomic_t usr_connected = ATOMIC_INIT(0);
+
+/* "is chip in state to talk to second stack?" */
+static atomic_t chip_ready_for_second_stack = ATOMIC_INIT(0);
+
+/* protects messages waiting to be read */
+static DEFINE_MUTEX(usr_msg_q_lock);
+static DECLARE_WAIT_QUEUE_HEAD(usr_msg_wait_q);
+static struct sk_buff_head usr_msg_q;
+static size_t usr_msg_q_len; /* in bytes of actual data */
+
+
+static DECLARE_WAIT_QUEUE_HEAD(tx_has_room_wait_q);
+
+
+
+/* event masks for things we need */
+/* disconnect, encr_change, encr_refresh */
+static const uint64_t evtsNeeded = 0x0000800000000090ULL;
+/* LE meta event */
+static const uint64_t evtsLE = 0x2000000000000000ULL;
+
+#define INVALID_CONN_ID 0xffff
+
+/* missing hci defs */
+#define HCI_OGF_LE 0x08
+#define BT_HCI_VERSION_3 5
+#define BT_LMP_VERSION_3 5
+#define HCI_OP_READ_LE_HOST_SUPPORTED 0x0c6c
+
+struct hci_rp_read_le_host_supported {
+ u8 status;
+ u8 le;
+ u8 simul;
+} __packed;
+
+
+
+static void hci_le_splitter_usr_queue_reset_message(bool allow_commands);
+static struct hci_le_splitter_le_conn *cid_find_le_conn(u16 cid);
+static struct device_attribute sysfs_attr;
+static struct miscdevice mdev;
+
+
+/* always called with usr_msg_q_lock held */
+static void hci_le_splitter_usr_purge_rx_q(void)
+{
+ struct sk_buff *skb;
+ while (!skb_queue_empty(&usr_msg_q)) {
+ skb = skb_dequeue(&usr_msg_q);
+ if (skb)
+ kfree_skb(skb);
+ }
+ usr_msg_q_len = 0;
+}
+
+/* always called with hci_state_lock held */
+static void reset_tracked_le_conns(void)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tracked_conns); ++i) {
+ tracked_conns[i].conn_id = INVALID_CONN_ID;
+ tracked_conns[i].tx_in_flight = 0;
+ }
+ cur_pkts_in_flight = 0;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_set_our_dev(struct hci_dev *hdev)
+{
+ if (!cur_dev) {
+
+ cur_dev = hdev;
+
+ local_lmp_ver = -1;
+ max_pkts_in_flight = 0;
+ le_waiting_on_opcode = 0;
+ reset_tracked_le_conns();
+
+ return true;
+ }
+
+ return false;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_is_our_dev(struct hci_dev *hdev)
+{
+ return cur_dev == hdev;
+}
+
+void hci_le_splitter_init_start(struct hci_dev *hdev)
+{
+ mutex_lock(&hci_state_lock);
+
+ if (hci_le_splitter_set_our_dev(hdev))
+ pr_info("HCI splitter inited\n");
+ else
+ pr_info("HCI splitter ignoring dev\n");
+
+ mutex_unlock(&hci_state_lock);
+}
+
+int hci_le_splitter_init_done(struct hci_dev *hdev)
+{
+ /* nothing to do for now */
+
+ return 0;
+}
+
+static ssize_t hci_le_splitter_read(struct file *file, char __user *userbuf,
+ size_t bytes, loff_t *off)
+{
+ struct sk_buff *skb;
+ u8 packet_typ;
+ ssize_t ret;
+
+
+ mutex_lock(&usr_msg_q_lock);
+
+ do {
+ while (!usr_msg_q_len) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ goto bailout;
+ }
+ mutex_unlock(&usr_msg_q_lock);
+ if (wait_event_interruptible(usr_msg_wait_q,
+ usr_msg_q_len)) {
+ ret = -EINTR;
+ goto bailout_q_unlocked;
+ }
+ mutex_lock(&usr_msg_q_lock);
+ }
+
+ skb = skb_dequeue(&usr_msg_q);
+
+ } while (!skb);
+
+ /* one byte for hci packet type */
+ packet_typ = hci_skb_pkt_type(skb);
+
+ if (skb->len + sizeof(packet_typ) > bytes) {
+ ret = -ENOMEM;
+ } else if (put_user(packet_typ, userbuf) ||
+ copy_to_user(userbuf + sizeof(packet_typ),
+ skb->data, skb->len)) {
+ ret = -EFAULT;
+ } else {
+ usr_msg_q_len -= skb->len;
+ ret = (ssize_t)skb->len + 1;
+ kfree_skb(skb);
+ }
+
+ if (ret < 0)
+ skb_queue_head(&usr_msg_q, skb);
+
+bailout:
+ mutex_unlock(&usr_msg_q_lock);
+
+bailout_q_unlocked:
+ return ret;
+}
+
+static ssize_t hci_le_splitter_write(struct file *file,
+ const char __user *userbuf, size_t bytes,
+ loff_t *off)
+{
+ struct hci_acl_hdr acl_hdr;
+ struct sk_buff *skb;
+ u16 cmd_val = 0;
+ ssize_t ret;
+ u8 pkt_typ;
+
+ if (bytes < 1)
+ return -EINVAL;
+
+ if (copy_from_user(&pkt_typ, userbuf, sizeof(pkt_typ)))
+ return -EFAULT;
+
+ bytes -= sizeof(pkt_typ);
+ userbuf += sizeof(pkt_typ);
+
+ /* figure out the size and do some sanity checks */
+ if (pkt_typ == HCI_ACLDATA_PKT) {
+
+
+ if (bytes < HCI_ACL_HDR_SIZE)
+ return -EFAULT;
+
+ if (copy_from_user(&acl_hdr, userbuf, HCI_ACL_HDR_SIZE))
+ return -EFAULT;
+
+ /* verify length */
+ if (bytes - __le16_to_cpu(acl_hdr.dlen) - HCI_ACL_HDR_SIZE)
+ return -EINVAL;
+
+ } else if (pkt_typ == HCI_COMMAND_PKT) {
+
+ struct hci_command_hdr cmd_hdr;
+
+ if (bytes < HCI_COMMAND_HDR_SIZE)
+ return -EFAULT;
+
+ if (copy_from_user(&cmd_hdr, userbuf, HCI_COMMAND_HDR_SIZE))
+ return -EFAULT;
+
+ /* verify length */
+ if (bytes - cmd_hdr.plen - HCI_COMMAND_HDR_SIZE)
+ return -EINVAL;
+
+ cmd_val = __le16_to_cpu(cmd_hdr.opcode);
+
+ } else {
+ return -EINVAL;
+ }
+
+ skb = bt_skb_alloc(bytes, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hci_skb_pkt_type(skb) = pkt_typ;
+ if (copy_from_user(skb_put(skb, bytes), userbuf, bytes)) {
+ kfree_skb(skb);
+ return -EFAULT;
+ }
+ __net_timestamp(skb);
+
+ /* perform appropriate actions-before-sending */
+ if (pkt_typ == HCI_COMMAND_PKT) {
+
+ /* commands require the semaphore */
+ down(&cmd_sem);
+ mutex_lock(&hci_state_lock);
+
+ } else {
+ /* ACL data is not allowed without credits
+ * (stack should be keeping track of this)
+ */
+ u16 acl_hndl = hci_handle(__le16_to_cpu(acl_hdr.handle));
+ struct hci_le_splitter_le_conn *conn;
+
+ mutex_lock(&hci_state_lock);
+ if (max_pkts_in_flight == cur_pkts_in_flight) {
+ kfree_skb(skb);
+ ret = -ENOSPC;
+ goto out_unlock;
+ }
+
+ /* find conn & increment its inflight packet counter */
+ conn = cid_find_le_conn(acl_hndl);
+ if (!conn) {
+ kfree_skb(skb);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ conn->tx_in_flight++;
+ cur_pkts_in_flight++;
+ }
+
+ /* perform the actual transmission */
+ if (pkt_typ == HCI_COMMAND_PKT)
+ le_waiting_on_opcode = cmd_val;
+
+ hci_send_to_monitor(cur_dev, skb);
+ skb_orphan(skb);
+ if (cur_dev->send(cur_dev, skb) < 0) {
+ kfree_skb(skb);
+
+ if (pkt_typ == HCI_COMMAND_PKT)
+ up(&cmd_sem);
+
+ ret = -EIO;
+ goto out_unlock;
+ }
+
+ ret = bytes + sizeof(pkt_typ);
+
+out_unlock:
+ mutex_unlock(&hci_state_lock);
+ return ret;
+}
+
+static unsigned int hci_le_splitter_poll(struct file *file,
+ struct poll_table_struct *wait)
+{
+ int ret = 0;
+
+ poll_wait(file, &usr_msg_wait_q, wait);
+
+ mutex_lock(&usr_msg_q_lock);
+ if (usr_msg_q_len)
+ ret |= (POLLIN | POLLRDNORM);
+
+ /* poll for POLLOUT only indicates data TX ability.
+ * commands can always be sent and will block!
+ */
+ poll_wait(file, &tx_has_room_wait_q, wait);
+ if (max_pkts_in_flight > cur_pkts_in_flight)
+ ret |= (POLLOUT | POLLWRNORM);
+
+ mutex_unlock(&usr_msg_q_lock);
+ return ret;
+}
+
+static int hci_le_splitter_open(struct inode *inode, struct file *file)
+{
+ int ret = 0;
+
+ if (atomic_cmpxchg(&usr_connected, 0, 1))
+ return -EBUSY;
+
+ mutex_lock(&usr_msg_q_lock);
+ hci_le_splitter_usr_purge_rx_q();
+ mutex_unlock(&usr_msg_q_lock);
+ hci_le_splitter_usr_queue_reset_message(atomic_read(&chip_ready_for_second_stack) != 0);
+
+ return ret;
+}
+
+static int hci_le_splitter_release(struct inode *inode, struct file *file)
+{
+ int32_t dev_id = -1;
+
+ mutex_lock(&usr_msg_q_lock);
+ hci_le_splitter_usr_purge_rx_q();
+
+ if (atomic_cmpxchg(&usr_connected, 1, 0)) {
+ /* file close while chip was being used - we must reset it */
+ if (cur_dev)
+ dev_id = cur_dev->id;
+ pr_info("reset queued\n");
+ }
+
+ atomic_set(&chip_ready_for_second_stack, 0);
+ mutex_unlock(&usr_msg_q_lock);
+
+ if (dev_id >= 0) {
+ hci_dev_reset(dev_id);
+ /* none of this matters - we must restart bluetoothd to regain ability to run */
+ }
+
+ return 0;
+}
+
+static long hci_le_splitter_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct sk_buff *skb;
+ int readable_sz;
+
+ switch (cmd) {
+ case FIONREAD: /* if we had multiple readers, this would be bad */
+ mutex_lock(&usr_msg_q_lock);
+ skb = skb_peek(&usr_msg_q);
+ readable_sz = skb ? skb->len + 1 : 0;
+ mutex_unlock(&usr_msg_q_lock);
+ return put_user(readable_sz, (int __user *)arg);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t hci_le_splitter_sysfs_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret = 0;
+
+ mutex_lock(&hci_state_lock);
+ switch (splitter_enable_state) {
+ case SPLITTER_STATE_NOT_SET:
+ ret = sprintf(buf, "%s\n", "NOT SET");
+ break;
+ case SPLITTER_STATE_DISABLED:
+ ret = sprintf(buf, "%s\n", "OFF");
+ break;
+ case SPLITTER_STATE_ENABLED:
+ ret = sprintf(buf, "%s\n", "ON");
+ break;
+ }
+ mutex_unlock(&hci_state_lock);
+
+ return ret;
+}
+
+static ssize_t hci_le_splitter_sysfs_enabled_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ ssize_t ret = strlen(buf);
+ bool set;
+
+ if (strtobool(buf, &set) < 0)
+ return -EINVAL;
+
+ mutex_lock(&hci_state_lock);
+ if (splitter_enable_state == SPLITTER_STATE_NOT_SET)
+ splitter_enable_state =
+ set ? SPLITTER_STATE_ENABLED : SPLITTER_STATE_DISABLED;
+ else
+ ret = -EPERM;
+ mutex_unlock(&hci_state_lock);
+
+ return ret;
+}
+
+static const struct file_operations hci_le_splitter_fops = {
+
+ .read = hci_le_splitter_read,
+ .write = hci_le_splitter_write,
+ .open = hci_le_splitter_open,
+ .poll = hci_le_splitter_poll,
+ .release = hci_le_splitter_release,
+ .unlocked_ioctl = hci_le_splitter_ioctl,
+};
+
+static struct miscdevice mdev = {
+
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hci_le",
+ .fops = &hci_le_splitter_fops
+};
+
+static struct device_attribute sysfs_attr =
+ __ATTR(le_splitter_enabled, /* file name */
+ 0660, /* file permissions */
+ hci_le_splitter_sysfs_enabled_show, /* file read fn */
+ hci_le_splitter_sysfs_enabled_store); /* file write fn */
+
+
+void hci_le_splitter_deinit(struct hci_dev *hdev)
+{
+ mutex_lock(&hci_state_lock);
+ if (hci_le_splitter_is_our_dev(hdev)) {
+ mutex_lock(&usr_msg_q_lock);
+ cur_dev = NULL;
+ pr_info("HCI splitter unregistered\n");
+
+ hci_le_splitter_usr_purge_rx_q();
+ wake_up_interruptible(&usr_msg_wait_q);
+ wake_up_interruptible(&tx_has_room_wait_q);
+ atomic_set(&chip_ready_for_second_stack, 0);
+ mutex_unlock(&usr_msg_q_lock);
+ hci_le_splitter_usr_queue_reset_message(false);
+ }
+ mutex_unlock(&hci_state_lock);
+}
+
+void hci_le_splitter_init_fail(struct hci_dev *hdev)
+{
+ (void)hci_le_splitter_deinit(hdev);
+}
+
+bool hci_le_splitter_should_allow_bluez_tx(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ bool ret = true, skipsem = true;
+ u16 opcode = 0;
+
+ mutex_lock(&hci_state_lock);
+
+ if (hci_le_splitter_is_our_dev(hdev) &&
+ bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+
+ void *cmdParams = ((u8 *)skb->data) + HCI_COMMAND_HDR_SIZE;
+ opcode = hci_skb_opcode(skb);
+
+ skipsem = false;
+
+ if (splitter_enable_state == SPLITTER_STATE_NOT_SET) {
+ /* if state is not set, drop all packets */
+ pr_warn("LE splitter not initialized - chip TX denied!\n");
+ ret = false;
+ } else if (splitter_enable_state == SPLITTER_STATE_DISABLED) {
+ /* if disabled - allow all packets */
+ ret = true;
+ skipsem = true;
+ } else if (opcode == HCI_OP_RESET) {
+
+ static bool first = true;
+ if (!first)
+ pr_warn("unexpected chip reset\n");
+ first = false;
+ /* notify other stack of reset, do not allow commands */
+ hci_le_splitter_usr_queue_reset_message(false);
+ reset_tracked_le_conns();
+ skipsem = true;
+
+ } else if (opcode == HCI_OP_WRITE_LE_HOST_SUPPORTED) {
+
+ struct hci_cp_write_le_host_supported *params =
+ cmdParams;
+
+ if (params->le)
+ pr_warn("EDR stack enabling LE -> bad\n");
+ if (params->simul)
+ pr_warn("EDR stack enabling simul -> bad\n");
+
+ params->le = 1;
+ params->simul = 1;
+
+ } else if (opcode == HCI_OP_SET_EVENT_MASK) {
+
+ struct hci_cp_le_set_event_mask *params =
+ cmdParams;
+ __le64 *mask_loc;
+ uint64_t mask, oldmask;
+
+ mask_loc = (__le64 *)params->mask;
+ oldmask = mask = __le64_to_cpup(mask_loc);
+
+ if ((mask & evtsNeeded) != evtsNeeded) {
+ pr_warn("EDR stack blocked some vital events - BAD - fixing\n");
+ mask |= evtsNeeded;
+ }
+ if (mask & evtsLE)
+ pr_info("EDR stack unmasked some events unexpectedly - OK, just weird\n");
+ mask |= evtsLE;
+ *mask_loc = __cpu_to_le64(mask);
+ pr_info("modified event mask 0x%016llX -> 0x%016llX\n",
+ (unsigned long long)oldmask,
+ (unsigned long long)mask);
+
+ /* it is now safe to prepare our stack, if this is the first time we've seen this since we were not ready */
+ if (!atomic_cmpxchg(&chip_ready_for_second_stack, 0, 1))
+ hci_le_splitter_usr_queue_reset_message(true);
+
+ } else if (hci_opcode_ogf(opcode) == HCI_OGF_LE) {
+
+ pr_warn("LE command (0x%x) from EDR stack!\n",
+ hci_opcode_ocf(opcode));
+ ret = false;
+ }
+ }
+
+ mutex_unlock(&hci_state_lock);
+
+ if (ret && !skipsem && bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ if (-ETIMEDOUT == down_timeout(&cmd_sem, msecs_to_jiffies(1000)))
+ pr_err("Breaking semaphore for out-of-order bluez command 0x%x"
+ " write due to sem timeout\n", opcode);
+ }
+ return ret;
+}
+
+/* always called with hci_state_lock held */
+static struct hci_le_splitter_le_conn *cid_find_le_conn(u16 cid)
+{
+ size_t i;
+ for (i = 0; i < ARRAY_SIZE(tracked_conns); i++)
+ if (cid == tracked_conns[i].conn_id)
+ return tracked_conns + i;
+ return NULL;
+}
+
+/* always called with hci_state_lock held */
+static bool cid_is_le_conn(u16 cid)
+{
+ return !!cid_find_le_conn(cid);
+}
+
+/* always takes ownership of skb */
+static void hci_le_splitter_enq_packet(struct sk_buff *skb)
+{
+ mutex_lock(&usr_msg_q_lock);
+ if (MAX_RX_QUEUE_SZ - usr_msg_q_len < skb->len) {
+
+ pr_err("enqueue failed - not enough space to enqueue %u bytes over %zu\n",
+ skb->len, usr_msg_q_len);
+ kfree_skb(skb);
+ } else {
+ usr_msg_q_len += skb->len;
+ skb_queue_tail(&usr_msg_q, skb);
+ }
+ mutex_unlock(&usr_msg_q_lock);
+
+ /* wake up userspace in either case as we have data */
+ wake_up_interruptible(&usr_msg_wait_q);
+}
+
+/* used to indicate to userspace when it is and is not ok to send commands */
+static void hci_le_splitter_usr_queue_reset_message(bool allow_commands)
+{
+ struct sk_buff *skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE +
+ sizeof(struct hci_ev_cmd_complete),
+ GFP_KERNEL);
+ struct hci_ev_cmd_complete *cc;
+ struct hci_event_hdr *ev;
+
+ if (!skb)
+ return;
+
+ ev = (struct hci_event_hdr *)skb_put(skb, HCI_EVENT_HDR_SIZE);
+ cc = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(struct hci_ev_cmd_complete));
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+ ev->evt = HCI_EV_CMD_COMPLETE;
+ ev->plen = sizeof(struct hci_ev_cmd_complete);
+
+ cc->ncmd = allow_commands ? 1 : 0;
+ cc->opcode = cpu_to_le16(HCI_OP_RESET);
+
+ hci_le_splitter_enq_packet(skb);
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_filter_le_meta(const struct hci_ev_le_meta *meta)
+{
+ const struct hci_ev_le_conn_complete *evt;
+
+ /* We always pass LE meta events to the LE half of things. This
+ * filter's main job is to look for successful LE connection completed
+ * events and record the connection ID in the tracked_conns
+ * array used for filtering subsequent messages.
+ */
+ if (HCI_EV_LE_CONN_COMPLETE != meta->subevent)
+ return true;
+
+ evt = (struct hci_ev_le_conn_complete *)(meta + 1);
+ if (!evt->status) {
+ u16 cid = __le16_to_cpu(evt->handle);
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(tracked_conns); i++) {
+ struct hci_le_splitter_le_conn *c = tracked_conns + i;
+
+ if (c->conn_id == INVALID_CONN_ID) {
+ c->conn_id = cid;
+ BUG_ON(c->tx_in_flight);
+ break;
+ }
+ }
+
+ /* At the point where we are receiving a connection completed
+ * event, we should always have room in our tracked array. If
+ * we don't it can only be because of a bookkeeping bug.
+ */
+ BUG_ON(i >= ARRAY_SIZE(tracked_conns));
+ }
+ return true;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_filter_disconn(const struct hci_ev_disconn_complete *evt)
+{
+ u16 cid = __le16_to_cpu(evt->handle);
+ struct hci_le_splitter_le_conn *c = cid_find_le_conn(cid);
+
+ if (!c)
+ return false;
+
+ BUG_ON(cur_pkts_in_flight < c->tx_in_flight);
+
+ c->conn_id = INVALID_CONN_ID;
+ cur_pkts_in_flight -= c->tx_in_flight;
+ c->tx_in_flight = 0;
+
+ return true;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_is_our_reply(u16 opcode)
+{
+ if (!le_waiting_on_opcode || le_waiting_on_opcode != opcode)
+ return false;
+
+ le_waiting_on_opcode = 0;
+ return true;
+}
+
+static void hci_le_splitter_filter_ftr_page_0(u8 *features)
+{
+ if (features[4] & BIT(6)) {
+ pr_debug("features[0]: clearing 4,6 - LE supported\n");
+ features[4] &= ~BIT(6);
+ }
+ if (features[6] & BIT(1)) {
+ pr_debug("features[0]: clearing 6,1 - EDR/LE simult\n");
+ features[6] &= ~BIT(1);
+ }
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_filter_cmd_complete(const struct hci_ev_cmd_complete *evt)
+{
+ u16 opcode = __le16_to_cpu(evt->opcode);
+ bool ours = hci_le_splitter_is_our_reply(opcode);
+ int i;
+
+ if (opcode == HCI_OP_LE_READ_BUFFER_SIZE) {
+
+ struct hci_rp_le_read_buffer_size *le_buf_sz =
+ (struct hci_rp_le_read_buffer_size *)(evt + 1);
+
+ if (!le_buf_sz->status && le_buf_sz->le_max_pkt &&
+ le_buf_sz->le_mtu)
+ max_pkts_in_flight = le_buf_sz->le_max_pkt;
+ } else if (opcode == HCI_OP_RESET) {
+ /* Any and all command credits we have/had go out the
+ * window after a reset, and we instead have one. This
+ * code assures our semaphore acts that way too.
+ */
+ while (!down_trylock(&cmd_sem))
+ ;
+ up(&cmd_sem);
+ } else if (opcode == HCI_OP_READ_LE_HOST_SUPPORTED && !ours) {
+ struct hci_rp_read_le_host_supported *repl =
+ (struct hci_rp_read_le_host_supported *)(evt + 1);
+
+ if (repl->le)
+ pr_debug("host_support: clearing le\n");
+ if (repl->simul)
+ pr_debug("host_support: clearing simul\n");
+ repl->le = 0;
+ repl->simul = 0;
+ } else if (opcode == HCI_OP_READ_LOCAL_VERSION && !ours) {
+ struct hci_rp_read_local_version *repl =
+ (struct hci_rp_read_local_version *)(evt + 1);
+
+ if (!repl->status) {
+
+ local_lmp_ver = repl->lmp_ver;
+
+ if (repl->hci_ver > BT_HCI_VERSION_3) {
+ pr_debug("Downgrading BT HCI version from %d\n",
+ repl->hci_ver);
+ repl->hci_ver = BT_HCI_VERSION_3;
+ }
+ if (repl->lmp_ver > BT_LMP_VERSION_3) {
+ pr_debug("Downgrading BT LMP version from %d\n",
+ repl->lmp_ver);
+ repl->lmp_ver = BT_LMP_VERSION_3;
+ }
+ }
+ } else if (opcode == HCI_OP_READ_LOCAL_COMMANDS && !ours) {
+ struct hci_rp_read_local_commands *repl =
+ (struct hci_rp_read_local_commands *)(evt + 1);
+
+ if (repl->commands[24] & 0x60) {
+ pr_debug("supported commands: ~LE in byte 24\n");
+ repl->commands[24] &= ~0x60;
+ }
+ /* these are LE commands */
+ for (i = 25; i <= 28; i++) {
+ if (repl->commands[i])
+ pr_debug("supported commands: clearing byte %d\n",
+ i);
+ repl->commands[i] = 0;
+ }
+ } else if (opcode == HCI_OP_READ_LOCAL_FEATURES && !ours) {
+ struct hci_rp_read_local_features *repl =
+ (struct hci_rp_read_local_features *)(evt + 1);
+
+ hci_le_splitter_filter_ftr_page_0(repl->features);
+ } else if (opcode == HCI_OP_READ_LOCAL_EXT_FEATURES && !ours) {
+ struct hci_rp_read_local_ext_features *repl =
+ (struct hci_rp_read_local_ext_features *)(evt + 1);
+
+ if (repl->page == 0) {
+ hci_le_splitter_filter_ftr_page_0(repl->features);
+ } else if (repl->page == 1) {
+ if (repl->features[0] & 0x02) {
+ pr_debug("features[1]: clr 0,1 - LE supported\n");
+ repl->features[4] &= ~0x02;
+ }
+ if (repl->features[0] & 0x04) {
+ pr_debug("features[1]: clr 0,2 - EDR/LE simult\n");
+ repl->features[4] &= ~0x04;
+ }
+ }
+ } else if (opcode == HCI_OP_READ_BUFFER_SIZE) {
+ struct hci_rp_read_buffer_size *repl =
+ (struct hci_rp_read_buffer_size *)(evt + 1);
+ u16 pkts, edr_pkts, le_pkts, reported_pkts;
+
+ pkts = __le16_to_cpu(repl->acl_max_pkt);
+ /* If we cannot hit our target number of payloads and
+ * leave at least that many payloads for the EDR
+ * stack, then give 1/2 rounded up to the normal stack,
+ * and 1/2 rounded down to LE.
+ */
+ if (pkts < (2 * HCI_LE_SPLITTER_BUFFER_TARGET))
+ le_pkts = pkts >> 1;
+ else
+ le_pkts = HCI_LE_SPLITTER_BUFFER_TARGET;
+ edr_pkts = pkts - le_pkts;
+ reported_pkts = ours ? le_pkts : edr_pkts;
+ pr_info("Chip has %hu bufs, telling %s: '%hu bufs'.\n", pkts,
+ ours ? "LE" : "EDR", reported_pkts);
+ repl->acl_max_pkt = __cpu_to_le16(reported_pkts);
+
+ if (!max_pkts_in_flight)
+ max_pkts_in_flight = le_pkts;
+ } else if (hci_opcode_ogf(opcode) == HCI_OGF_LE && !ours) {
+
+ pr_warn("unexpected LE cmd complete");
+ }
+
+ return ours;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_filter_cmd_status(const struct hci_ev_cmd_status *evt)
+{
+ u16 opcode = __le16_to_cpu(evt->opcode);
+ bool ours = hci_le_splitter_is_our_reply(opcode);
+
+ if (hci_opcode_ogf(opcode) == HCI_OGF_LE && !ours)
+ pr_warn("unexpected LE status");
+
+ return ours;
+}
+
+/* always called with hci_state_lock held */
+static bool hci_le_splitter_filter_num_comp_pkt(struct hci_ev_num_comp_pkts *evt,
+ int *len_chng)
+{
+ int le_nonzero_conns = 0, edr_nonzero_conns = 0, pkts, i, j, hndl;
+ int le_pkts = 0, edr_pkts = 0, le_conns = 0, edr_conns = 0;
+ struct hci_le_splitter_le_conn *le_conn;
+
+ /* first see how many of each packets there are (ours and bluez's) */
+ for (i = 0; i < evt->num_hndl; i++) {
+
+ hndl = __le16_to_cpu(evt->handles[i].handle);
+ pkts = __le16_to_cpu(evt->handles[i].count);
+ le_conn = cid_find_le_conn(hndl);
+
+ if (le_conn) {
+ le_conns++;
+ le_pkts += pkts;
+ if (pkts) {
+ le_nonzero_conns++;
+ BUG_ON(le_conn->tx_in_flight < pkts);
+ BUG_ON(cur_pkts_in_flight < pkts);
+ le_conn->tx_in_flight -= pkts;
+ cur_pkts_in_flight -= pkts;
+ }
+ } else {
+ edr_conns++;
+ edr_pkts += pkts;
+ if (pkts)
+ edr_nonzero_conns++;
+ }
+ }
+
+ /* if we have le packets, send an event to LE stack */
+ if (le_nonzero_conns) {
+
+ u32 plen = sizeof(struct hci_ev_num_comp_pkts) +
+ sizeof(struct hci_comp_pkts_info) *
+ le_nonzero_conns;
+ struct sk_buff *le_evt = bt_skb_alloc(HCI_EVENT_HDR_SIZE +
+ plen, GFP_KERNEL);
+ if (le_evt) { /* if this fails, you have bigger problems */
+
+ struct hci_event_hdr *new_hdr = (struct hci_event_hdr*)skb_put(le_evt, HCI_EVENT_HDR_SIZE);
+ struct hci_ev_num_comp_pkts *new_evt = (struct hci_ev_num_comp_pkts *)
+ skb_put(le_evt, sizeof(struct hci_ev_num_comp_pkts) + sizeof(struct hci_comp_pkts_info) * le_nonzero_conns);
+
+ hci_skb_pkt_type(le_evt) = HCI_EVENT_PKT;
+ new_hdr->evt = HCI_EV_NUM_COMP_PKTS;
+
+ new_evt->num_hndl = 0;
+ for (i = 0; i < evt->num_hndl; i++) {
+
+ pkts = __le16_to_cpu(evt->handles[i].count);
+ hndl = __le16_to_cpu(evt->handles[i].handle);
+
+ if (pkts && cid_is_le_conn(hndl)) {
+
+ new_evt->handles[new_evt->num_hndl].
+ handle = evt->handles[i].handle;
+ new_evt->handles[new_evt->num_hndl].
+ count = evt->handles[i].count;
+ new_evt->num_hndl++;
+ }
+ }
+
+ new_hdr->plen = plen;
+ hci_le_splitter_enq_packet(le_evt);
+ }
+ }
+
+ /* if we had le conns at all, shrink the packet to remove them */
+ if (le_conns) {
+
+ for (i = 0, j = 0; i < evt->num_hndl; i++) {
+ hndl = __le16_to_cpu(evt->handles[i].handle);
+
+ if (cid_is_le_conn(hndl))
+ continue;
+ evt->handles[j].handle = evt->handles[i].handle;
+ evt->handles[j].count = evt->handles[i].count;
+ j++;
+ }
+
+ /* record how many bytes we shed */
+ *len_chng = (j - i) * sizeof(struct hci_comp_pkts_info);
+
+ /* adjust counter */
+ evt->num_hndl = j;
+ }
+
+ /* if any LE buffers got freed, signal user */
+ if (le_pkts)
+ wake_up_interruptible(&tx_has_room_wait_q);
+
+ /* if any EDR packets left in the event, it is not ours to claim */
+ if (evt->num_hndl)
+ return false;
+
+ /* but if no EDR handles left, we need to free the skb */
+
+
+ return !evt->num_hndl;
+}
+
+/* called with lock held, return true to let bluez have the event. if we return false, WE must free packet */
+static bool hci_le_splitter_should_allow_bluez_rx_evt(struct sk_buff *skb)
+{
+ struct hci_event_hdr *hdr = (void *) skb->data;
+ void *evt_data = ((u8 *) skb->data) + HCI_EVENT_HDR_SIZE;
+ struct hci_ev_le_meta *le_meta = evt_data;
+ struct hci_ev_cmd_status *cmd_status = evt_data;
+ struct hci_ev_cmd_complete *cmd_complete = evt_data;
+ struct hci_ev_encrypt_change *encr_chg = evt_data;
+ struct hci_ev_disconn_complete *disconn = evt_data;
+ struct hci_ev_num_comp_pkts *num_comp_pkts = evt_data;
+ struct hci_ev_key_refresh_complete *key_refr = evt_data;
+ bool isours = false, enq_if_ours = true;
+ int len_chng = 0;
+
+ switch (hdr->evt) {
+ case HCI_EV_DISCONN_COMPLETE:
+ isours = hci_le_splitter_filter_disconn(disconn);
+ break;
+ case HCI_EV_ENCRYPT_CHANGE:
+ isours = cid_is_le_conn(__le16_to_cpu(encr_chg->handle));
+ break;
+ case HCI_EV_CMD_COMPLETE:
+ if (cmd_complete->ncmd)
+ up(&cmd_sem);
+ isours = hci_le_splitter_filter_cmd_complete(cmd_complete);
+ break;
+ case HCI_EV_CMD_STATUS:
+ if (cmd_status->ncmd)
+ up(&cmd_sem);
+ isours = hci_le_splitter_filter_cmd_status(cmd_status);
+ break;
+ case HCI_EV_NUM_COMP_PKTS:
+ isours = hci_le_splitter_filter_num_comp_pkt(num_comp_pkts,
+ &len_chng);
+ enq_if_ours = false;
+ break;
+ case HCI_EV_KEY_REFRESH_COMPLETE:
+ isours = cid_is_le_conn(__le16_to_cpu(key_refr->handle));
+ break;
+ case HCI_EV_LE_META:
+ isours = hci_le_splitter_filter_le_meta(le_meta);
+ break;
+ case HCI_EV_VENDOR:
+ /* always ours */
+ isours = true;
+ break;
+ }
+
+ skb->len += len_chng;
+
+ if (isours && enq_if_ours)
+ hci_le_splitter_enq_packet(skb);
+ else if (isours) /* we still own it, so free it */
+ kfree_skb(skb);
+
+ return !isours;
+}
+
+/* return true to let bluez have the packet. if we return false, WE must free packet */
+bool hci_le_splitter_should_allow_bluez_rx(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ u16 acl_handle;
+ bool ret = true;
+
+ mutex_lock(&hci_state_lock);
+ if (hci_le_splitter_is_our_dev(hdev)) {
+
+ if (splitter_enable_state == SPLITTER_STATE_NOT_SET) {
+ /* if state is not set, drop all packets */
+ pr_warn("LE splitter not initialized - chip RX denied!\n");
+ kfree_skb(skb);
+ ret = false;
+ } else if (splitter_enable_state == SPLITTER_STATE_DISABLED) {
+ /* if disabled - allow all packets */
+ ret = true;
+ } else switch (hci_skb_pkt_type(skb)) {
+ case HCI_EVENT_PKT:
+ /* invalid (too small) packet? let bluez handle it */
+ if (HCI_EVENT_HDR_SIZE > skb->len)
+ break;
+
+ ret = hci_le_splitter_should_allow_bluez_rx_evt(skb);
+ break;
+
+ case HCI_ACLDATA_PKT:
+ /* data - could be ours. look into it */
+
+ /* invalid (too small) packet? let bluez handle it */
+ if (HCI_ACL_HDR_SIZE > skb->len)
+ break;
+
+ acl_handle = __le16_to_cpu(((struct hci_acl_hdr *)skb->data)->handle);
+ acl_handle = hci_handle(acl_handle);
+
+ if (cid_is_le_conn(acl_handle)) {
+ hci_le_splitter_enq_packet(skb);
+ ret = false;
+ }
+ break;
+
+ case HCI_SCODATA_PKT:
+ /* SCO data is never for LE */
+ break;
+
+ default:
+ /* let weird things go to bluez */
+ break;
+ }
+ }
+ mutex_unlock(&hci_state_lock);
+
+ return ret;
+}
+
+int hci_le_splitter_sysfs_init(void)
+{
+ int err;
+
+ BT_INFO("Initializing LE splitter sysfs");
+ skb_queue_head_init(&usr_msg_q);
+ misc_register(&mdev);
+
+ err = device_create_file(mdev.this_device, &sysfs_attr);
+ if (err) {
+ pr_err("Cannot create sysfs file (%d) - off by default\n", err);
+ splitter_enable_state = SPLITTER_STATE_DISABLED;
+ return -1;
+ }
+ return 0;
+}
+
+int hci_le_splitter_get_enabled_state(void)
+{
+ return splitter_enable_state;
+}
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e2f06f1174ea73..ecfb9ff2988169 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -664,6 +664,15 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
cp.enable = LE_SCAN_DISABLE;
BT_DBG("BT_DBG_DG: set scan enable tx: hci_req_add_le_scan_disable (ena=%d)\n", cp.enable);
hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
+
+ /* It is possible that the only HCI command built into the command queue
+ * is HCI_OP_LE_SET_SCAN_ENABLE. If the only command is skipped
+ * at run time in hci_core.c:hci_cmd_work(), the corresponding event
+ * would never be received. This would accidentally cause HCI command
+ * timeout. Hence, it is important to add this dummy HCI command to
+ * invoke the hci_req_sync_complete() callback.
+ */
+ hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
}
static void add_to_white_list(struct hci_request *req,
@@ -1988,13 +1997,6 @@ unlock:
hci_dev_unlock(hdev);
}
-static void disable_advertising(struct hci_request *req)
-{
- u8 enable = 0x00;
-
- hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
-}
-
static int active_scan(struct hci_request *req, unsigned long opt)
{
uint16_t interval = opt;
@@ -2006,24 +2008,6 @@ static int active_scan(struct hci_request *req, unsigned long opt)
BT_DBG("%s", hdev->name);
- if (hci_dev_test_flag(hdev, HCI_LE_ADV) || hci_dev_test_flag(hdev, HCI_LE_ADV_CHANGE_IN_PROGRESS)) {
-
- hci_dev_lock(hdev);
-
- /* Don't let discovery abort an outgoing connection attempt
- * that's using directed advertising.
- */
- if (hci_lookup_le_connect(hdev)) {
- hci_dev_unlock(hdev);
- return -EBUSY;
- }
-
- cancel_adv_timeout(hdev);
- hci_dev_unlock(hdev);
-
- disable_advertising(req);
- }
-
/* If controller is scanning, it means the background scanning is
* running. Thus, we should temporarily stop it in order to set the
* discovery scanning parameters.
@@ -2114,7 +2098,10 @@ static void start_discovery(struct hci_dev *hdev, u8 *status)
break;
case DISCOV_TYPE_LE:
timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
- hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
+ /* Reduce the LE active scan duty cycle to 50% by increasing
+ * the scan interval from 11.25ms to 22.5ms
+ */
+ hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT * 2,
HCI_CMD_TIMEOUT, status);
break;
default:
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 1fc076420d1e91..552e00b07196e3 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -431,8 +431,8 @@ static void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer);
}
-static void hidp_process_report(struct hidp_session *session,
- int type, const u8 *data, int len, int intr)
+static void hidp_process_report(struct hidp_session *session, int type,
+ const u8 *data, unsigned int len, int intr)
{
if (len > HID_MAX_BUFFER_SIZE)
len = HID_MAX_BUFFER_SIZE;
@@ -774,7 +774,7 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->version = req->version;
hid->country = req->country;
- strncpy(hid->name, req->name, sizeof(req->name) - 1);
+ strncpy(hid->name, req->name, sizeof(hid->name));
snprintf(hid->phys, sizeof(hid->phys), "%pMR",
&l2cap_pi(session->ctrl_sock->sk)->chan->src);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 3fdd886ad61519..933c6640e391ac 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -3325,16 +3325,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
+ if (len < 0)
+ break;
hint = type & L2CAP_CONF_HINT;
type &= L2CAP_CONF_MASK;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
mtu = val;
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
break;
@@ -3342,26 +3348,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *) val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *) val, olen);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- remote_efs = 1;
- memcpy(&efs, (void *) val, olen);
- }
+ if (olen != sizeof(efs))
+ break;
+ remote_efs = 1;
+ memcpy(&efs, (void *) val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
return -ECONNREFUSED;
-
set_bit(FLAG_EXT_CTRL, &chan->flags);
set_bit(CONF_EWS_RECV, &chan->conf_state);
chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
@@ -3371,7 +3381,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
default:
if (hint)
break;
-
result = L2CAP_CONF_UNKNOWN;
*((u8 *) ptr++) = type;
break;
@@ -3536,58 +3545,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_MTU:
+ if (olen != 2)
+ break;
if (val < L2CAP_DEFAULT_MIN_MTU) {
*result = L2CAP_CONF_UNACCEPT;
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
+ endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
+ if (olen != 2)
+ break;
chan->flush_to = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
+ chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
-
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
rfc.mode != chan->mode)
return -ECONNREFUSED;
-
chan->fcs = 0;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
- if (olen == sizeof(efs)) {
- memcpy(&efs, (void *)val, olen);
-
- if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != L2CAP_SERV_NOTRAFIC &&
- efs.stype != chan->local_stype)
- return -ECONNREFUSED;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs, endptr - ptr);
- }
+ if (olen != sizeof(efs))
+ break;
+ memcpy(&efs, (void *)val, olen);
+ if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != L2CAP_SERV_NOTRAFIC &&
+ efs.stype != chan->local_stype)
+ return -ECONNREFUSED;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
+ if (olen != 1)
+ break;
if (*result == L2CAP_CONF_PENDING)
if (val == L2CAP_FCS_NONE)
set_bit(CONF_RECV_NO_FCS,
@@ -3716,13 +3732,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
while (len >= L2CAP_CONF_OPT_SIZE) {
len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+ if (len < 0)
+ break;
switch (type) {
case L2CAP_CONF_RFC:
- if (olen == sizeof(rfc))
- memcpy(&rfc, (void *)val, olen);
+ if (olen != sizeof(rfc))
+ break;
+ memcpy(&rfc, (void *)val, olen);
break;
case L2CAP_CONF_EWS:
+ if (olen != 2)
+ break;
txwin_ext = val;
break;
}
@@ -7140,7 +7161,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
hcon = hci_connect_le(hdev, dst, dst_type,
chan->sec_level,
HCI_LE_CONN_TIMEOUT,
- HCI_ROLE_SLAVE);
+ HCI_ROLE_SLAVE, NULL);
else
hcon = hci_connect_le_scan(hdev, dst, dst_type,
chan->sec_level,
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index b2a647fe36aebd..132eebe8d957c8 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -107,6 +107,7 @@ static const u16 mgmt_commands[] = {
MGMT_OP_READ_EXT_INFO,
MGMT_OP_SET_APPEARANCE,
MGMT_OP_SET_ADVERTISING_INTERVALS,
+ MGMT_OP_SET_EVENT_MASK,
};
static const u16 mgmt_events[] = {
@@ -2317,9 +2318,8 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
/* LE address type */
addr_type = le_addr_type(cp->addr.type);
- hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
-
- err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
+ /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
+ err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
if (err < 0) {
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
MGMT_STATUS_NOT_PAIRED, &rp,
@@ -2333,8 +2333,6 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
goto done;
}
- /* Abort any ongoing SMP pairing */
- smp_cancel_pairing(conn);
/* Defer clearing up the connection parameters until closing to
* give a chance of keeping them if a repairing happens.
@@ -4347,6 +4345,72 @@ static int set_advertising_intervals(struct sock *sk, struct hci_dev *hdev,
return err;
}
+static void set_event_mask_complete(struct hci_dev *hdev, u8 status, u16 opcode)
+{
+ struct mgmt_pending_cmd *cmd;
+
+ BT_DBG("status 0x%02x", status);
+
+ hci_dev_lock(hdev);
+
+ /* Saving of the new event mask is done in */
+ cmd = pending_find_data(MGMT_OP_SET_EVENT_MASK, hdev, NULL);
+ if (!cmd)
+ goto unlock;
+
+ cmd->cmd_complete(cmd, mgmt_status(status));
+ mgmt_pending_remove(cmd);
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
+static int set_event_mask(struct sock *sk, struct hci_dev *hdev,
+ void *data, u16 len)
+{
+ struct mgmt_cp_set_event_mask *cp = data;
+ struct mgmt_pending_cmd *cmd;
+ struct hci_request req;
+ u8 new_events[8], i;
+ int err;
+
+ BT_DBG("request for %s", hdev->name);
+
+ hci_dev_lock(hdev);
+
+ if (pending_find(MGMT_OP_SET_EVENT_MASK, hdev)) {
+ err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EVENT_MASK,
+ MGMT_STATUS_BUSY, NULL, 0);
+ goto failed;
+ }
+
+ cmd = mgmt_pending_add(sk, MGMT_OP_SET_EVENT_MASK, hdev, data, len);
+ cmd->cmd_complete = generic_cmd_complete;
+
+ if (!cmd) {
+ err = -ENOMEM;
+ goto failed;
+ }
+
+ hci_req_init(&req, hdev);
+ for (i = 0 ; i < HCI_SET_EVENT_MASK_SIZE; i++) {
+ /* Modify only bits that are requested by the stack */
+ new_events[i] = hdev->event_mask[i];
+ new_events[i] &= ~cp->mask[i];
+ new_events[i] |= cp->mask[i] & cp->events[i];
+ }
+
+ hci_req_add(&req, HCI_OP_SET_EVENT_MASK, sizeof(new_events),
+ new_events);
+ err = hci_req_run(&req, set_event_mask_complete);
+ if (err < 0)
+ mgmt_pending_remove(cmd);
+
+failed:
+ hci_dev_unlock(hdev);
+ return err;
+}
+
static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
struct mgmt_pending_cmd *cmd;
@@ -6656,6 +6720,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
HCI_MGMT_UNTRUSTED },
{ set_appearance, MGMT_SET_APPEARANCE_SIZE },
{ set_advertising_intervals, MGMT_SET_ADVERTISING_INTERVALS_SIZE },
+ { set_event_mask, MGMT_SET_EVENT_MASK_CP_SIZE },
};
void mgmt_index_added(struct hci_dev *hdev)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 3125ce670c2f24..95fd7a837dc5c3 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -392,7 +392,8 @@ static void sco_sock_cleanup_listen(struct sock *parent)
*/
static void sco_sock_kill(struct sock *sk)
{
- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
+ sock_flag(sk, SOCK_DEAD))
return;
BT_DBG("sk %p state %d", sk, sk->sk_state);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index ead4d1baeaa6a7..1abfbcd8090a56 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2353,30 +2353,51 @@ unlock:
return ret;
}
-void smp_cancel_pairing(struct hci_conn *hcon)
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type)
{
- struct l2cap_conn *conn = hcon->l2cap_data;
+ struct hci_conn *hcon;
+ struct l2cap_conn *conn;
struct l2cap_chan *chan;
struct smp_chan *smp;
+ int err;
+
+ err = hci_remove_ltk(hdev, bdaddr, addr_type);
+ hci_remove_irk(hdev, bdaddr, addr_type);
+
+ hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type);
+ if (!hcon)
+ goto done;
+ conn = hcon->l2cap_data;
if (!conn)
- return;
+ goto done;
chan = conn->smp;
if (!chan)
- return;
+ goto done;
l2cap_chan_lock(chan);
smp = chan->data;
if (smp) {
+ /* Set keys to NULL to make sure smp_failure() does not try to
+ * remove and free already invalidated rcu list entries. */
+ smp->ltk = NULL;
+ smp->slave_ltk = NULL;
+ smp->remote_irk = NULL;
+
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
smp_failure(conn, 0);
else
smp_failure(conn, SMP_UNSPECIFIED);
+ err = 0;
}
l2cap_chan_unlock(chan);
+
+done:
+ return err;
}
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h
index ffcc70b6b1997d..993cbd7bcfe780 100644
--- a/net/bluetooth/smp.h
+++ b/net/bluetooth/smp.h
@@ -180,7 +180,8 @@ enum smp_key_pref {
};
/* SMP Commands */
-void smp_cancel_pairing(struct hci_conn *hcon);
+int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr,
+ u8 addr_type);
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
enum smp_key_pref key_pref);
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index fcdb86dd5a239c..c21209aada8c83 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -39,10 +39,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
goto drop;
- skb_push(skb, ETH_HLEN);
br_drop_fake_rtable(skb);
skb_sender_cpu_clear(skb);
@@ -88,12 +88,11 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
skb->dev = to->dev;
if (unlikely(netpoll_tx_running(to->br->dev))) {
+ skb_push(skb, ETH_HLEN);
if (!is_skb_forwardable(skb->dev, skb))
kfree_skb(skb);
- else {
- skb_push(skb, ETH_HLEN);
+ else
br_netpoll_send_skb(to, skb);
- }
return;
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 3400b1e4766880..50e84e634dfe5e 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -511,8 +511,11 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br_fdb_insert(br, p, dev->dev_addr, 0))
netdev_err(dev, "failed insert local address bridge forwarding table\n");
- if (nbp_vlan_init(p))
+ err = nbp_vlan_init(p);
+ if (err) {
netdev_err(dev, "failed to initialize vlan filtering on this port\n");
+ goto err6;
+ }
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
@@ -533,6 +536,12 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
return 0;
+err6:
+ list_del_rcu(&p->list);
+ br_fdb_delete_by_port(br, p, 0, 1);
+ nbp_update_port_count(br);
+ netdev_upper_dev_unlink(dev, br->dev);
+
err5:
dev->priv_flags &= ~IFF_BRIDGE_PORT;
netdev_rx_handler_unregister(dev);
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index 55dcb2b20b596d..6def85d75b1d55 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -267,7 +267,7 @@ int br_nf_pre_routing_finish_bridge(struct net *net, struct sock *sk, struct sk_
struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);
int ret;
- if (neigh->hh.hh_len) {
+ if ((neigh->nud_state & NUD_CONNECTED) && neigh->hh.hh_len) {
neigh_hh_bridge(&neigh->hh, skb);
skb->dev = nf_bridge->physindev;
ret = br_handle_frame_finish(net, sk, skb);
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index d61f56efc8dc3a..69dfd212e50db6 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
IPSTATS_MIB_INDISCARDS);
goto drop;
}
+ hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
goto drop;
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 070cf134a22f98..f2660c1b29e47d 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -67,6 +67,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
+ if (ebt_invalid_target(info->target))
+ return -EINVAL;
+
return 0;
}
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 51eab9b5baa1ab..f13402d407e4ce 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -404,6 +404,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher))
return PTR_ERR(watcher);
+
+ if (watcher->family != NFPROTO_BRIDGE) {
+ module_put(watcher->me);
+ return -ENOENT;
+ }
+
w->u.watcher = watcher;
par->target = watcher;
@@ -701,6 +707,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
}
i = 0;
+ memset(&mtpar, 0, sizeof(mtpar));
+ memset(&tgpar, 0, sizeof(tgpar));
mtpar.net = tgpar.net = net;
mtpar.table = tgpar.table = name;
mtpar.entryinfo = tgpar.entryinfo = e;
@@ -722,6 +730,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
goto cleanup_watchers;
}
+ /* Reject UNSPEC, xtables verdicts/return values are incompatible */
+ if (target->family != NFPROTO_BRIDGE) {
+ module_put(target->me);
+ ret = -ENOENT;
+ goto cleanup_watchers;
+ }
+
t->u.target = target;
if (t->u.target == &ebt_standard_target) {
if (gap < sizeof(struct ebt_standard_target)) {
@@ -1513,6 +1528,8 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
+ tmp.name[sizeof(tmp.name) - 1] = '\0';
+
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
@@ -1912,7 +1929,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
int off, pad = 0;
unsigned int size_kern, match_size = mwt->match_size;
- strlcpy(name, mwt->u.name, sizeof(name));
+ if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
+ return -EINVAL;
if (state->buf_kern_start)
dst = state->buf_kern_start + state->buf_kern_offset;
@@ -2352,6 +2370,8 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
+ tmp.name[sizeof(tmp.name) - 1] = '\0';
+
t = find_table_lock(net, tmp.name, &ret, &ebt_mutex);
if (!t)
return ret;
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index fdba3d9fbff3ba..6e48aa69fa24db 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -192,6 +192,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
return false;
+ ip6h = ipv6_hdr(skb);
thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
return false;
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index d730a0f68f46b4..a0443d40d677cf 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
caifd = caif_get(skb->dev);
WARN_ON(caifd == NULL);
- if (caifd == NULL)
+ if (!caifd) {
+ rcu_read_unlock();
return;
+ }
caifd_hold(caifd);
rcu_read_unlock();
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 4ccfd356baedef..1f15622d3c6571 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -67,6 +67,9 @@
*/
#define MAX_NFRAMES 256
+/* limit timers to 400 days for sending/timeouts */
+#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
+
/* use of last_frames[index].can_dlc */
#define RX_RECV 0x40 /* received data for this element */
#define RX_THR 0x80 /* element not been sent due to throttle feature */
@@ -136,6 +139,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
}
+/* check limitations for timeval provided by user */
+static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
+{
+ if ((msg_head->ival1.tv_sec < 0) ||
+ (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
+ (msg_head->ival1.tv_usec < 0) ||
+ (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
+ (msg_head->ival2.tv_sec < 0) ||
+ (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
+ (msg_head->ival2.tv_usec < 0) ||
+ (msg_head->ival2.tv_usec >= USEC_PER_SEC))
+ return true;
+
+ return false;
+}
+
#define CFSIZ sizeof(struct can_frame)
#define OPSIZ sizeof(struct bcm_op)
#define MHSIZ sizeof(struct bcm_msg_head)
@@ -855,6 +874,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
return -EINVAL;
+ /* check timeval limitations */
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+ return -EINVAL;
+
/* check the given can_id */
op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
@@ -1020,6 +1043,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
(!(msg_head->can_id & CAN_RTR_FLAG))))
return -EINVAL;
+ /* check timeval limitations */
+ if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
+ return -EINVAL;
+
/* check the given can_id */
op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
if (op) {
diff --git a/net/can/gw.c b/net/can/gw.c
index 77c8af4047ef6f..81650affa3faad 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -418,13 +418,29 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
- /* check for checksum updates when the CAN frame has been modified */
+ /* Has the CAN frame been modified? */
if (modidx) {
- if (gwj->mod.csumfunc.crc8)
+ /* get available space for the processed CAN frame type */
+ int max_len = nskb->len - offsetof(struct can_frame, data);
+
+ /* dlc may have changed, make sure it fits to the CAN frame */
+ if (cf->can_dlc > max_len)
+ goto out_delete;
+
+ /* check for checksum updates in classic CAN length only */
+ if (gwj->mod.csumfunc.crc8) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
+
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
+ }
+
+ if (gwj->mod.csumfunc.xor) {
+ if (cf->can_dlc > 8)
+ goto out_delete;
- if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
+ }
}
/* clear the skb timestamp if not configured the other way */
@@ -436,6 +452,14 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
gwj->dropped_frames++;
else
gwj->handled_frames++;
+
+ return;
+
+ out_delete:
+ /* delete frame due to misconfiguration */
+ gwj->deleted_frames++;
+ kfree_skb(nskb);
+ return;
}
static inline int cgw_register_filter(struct cgw_job *gwj)
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index ad3c9e96a27523..3ed2796d008bbe 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2049,15 +2049,19 @@ static int process_connect(struct ceph_connection *con)
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
if (con->auth_reply_buf) {
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
+
/*
* Any connection that defines ->get_authorizer()
* should also define ->verify_authorizer_reply().
* See get_connect_authorizer().
*/
- ret = con->ops->verify_authorizer_reply(con, 0);
- if (ret < 0) {
- con->error_msg = "bad authorize reply";
- return ret;
+ if (len) {
+ ret = con->ops->verify_authorizer_reply(con, 0);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
}
}
@@ -3181,9 +3185,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
dout("con_keepalive %p\n", con);
mutex_lock(&con->mutex);
clear_standby(con);
+ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
mutex_unlock(&con->mutex);
- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+
+ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index d4f5f220a8e55e..28453d698d86fd 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -26,7 +26,7 @@ struct page **ceph_get_direct_page_vector(const void __user *data,
while (got < num_pages) {
rc = get_user_pages_unlocked(current, current->mm,
(unsigned long)data + ((unsigned long)got * PAGE_SIZE),
- num_pages - got, write_page, 0, pages + got);
+ num_pages - got, pages + got, write_page ? FOLL_WRITE : 0);
if (rc < 0)
break;
BUG_ON(rc == 0);
diff --git a/net/compat.c b/net/compat.c
index 17e97b106458c6..d676840104556d 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -443,12 +443,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
+
if (tv.tv_sec == -1)
return err;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
err = 0;
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -471,12 +473,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
err = -ENOENT;
if (!sock_flag(sk, SOCK_TIMESTAMP))
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return err;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- ts = ktime_to_timespec(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ ts = ktime_to_timespec(kt);
}
err = 0;
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/dev.c b/net/core/dev.c
index bc4ac80d4e83f4..0a60ad43775c26 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1660,6 +1660,28 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, dev, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static struct static_key ingress_needed __read_mostly;
@@ -4459,6 +4481,10 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->vlan_tci = 0;
skb->dev = napi->dev;
skb->skb_iif = 0;
+
+ /* eth_type_trans() assumes pkt_type is PACKET_HOST */
+ skb->pkt_type = PACKET_HOST;
+
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
@@ -6135,14 +6161,16 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
@@ -6394,7 +6422,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature)
&& (features & feature)) {
@@ -6414,7 +6442,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
@@ -7421,7 +7449,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (dev_get_valid_name(net, dev, pat) < 0)
+ err = dev_get_valid_name(net, dev, pat);
+ if (err < 0)
goto out;
}
@@ -7433,7 +7462,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_close(dev);
/* And unlink it from device chain */
- err = -ENODEV;
unlist_netdevice(dev);
synchronize_net();
diff --git a/net/core/filter.c b/net/core/filter.c
index 1a9ded6af1382d..39cf5ff862dcca 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1901,17 +1901,17 @@ static const struct bpf_verifier_ops tc_cls_act_ops = {
.convert_ctx_access = bpf_net_convert_ctx_access,
};
-static struct bpf_prog_type_list sk_filter_type __read_mostly = {
+static struct bpf_prog_type_list sk_filter_type __ro_after_init = {
.ops = &sk_filter_ops,
.type = BPF_PROG_TYPE_SOCKET_FILTER,
};
-static struct bpf_prog_type_list sched_cls_type __read_mostly = {
+static struct bpf_prog_type_list sched_cls_type __ro_after_init = {
.ops = &tc_cls_act_ops,
.type = BPF_PROG_TYPE_SCHED_CLS,
};
-static struct bpf_prog_type_list sched_act_type __read_mostly = {
+static struct bpf_prog_type_list sched_act_type __ro_after_init = {
.ops = &tc_cls_act_ops,
.type = BPF_PROG_TYPE_SCHED_ACT,
};
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 4d14908afaec48..697c4212129a10 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -480,8 +480,8 @@ ip_proto_again:
break;
}
- if (dissector_uses_key(flow_dissector,
- FLOW_DISSECTOR_KEY_PORTS)) {
+ if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_PORTS) &&
+ !(key_control->flags & FLOW_DIS_IS_FRAGMENT)) {
key_ports = skb_flow_dissector_target(flow_dissector,
FLOW_DISSECTOR_KEY_PORTS,
target_container);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f60b93627876d8..78dc184072e8f6 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1140,6 +1140,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}
+ /* Update confirmed timestamp for neighbour entry after we
+ * received ARP packet even if it doesn't change IP to MAC binding.
+ */
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1163,15 +1169,12 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}
- /* Update timestamps only once we know we will make a change to the
+ /* Update timestamp only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
- if (new != old || lladdr != neigh->ha) {
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
+ if (new != old || lladdr != neigh->ha)
neigh->updated = jiffies;
- }
if (new != old) {
neigh_del_timer(neigh);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f88a62ab019d25..267c5b2d761924 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -486,7 +486,7 @@ static ssize_t phys_switch_id_show(struct device *dev,
}
static DEVICE_ATTR_RO(phys_switch_id);
-static struct attribute *net_class_attrs[] = {
+static struct attribute *net_class_attrs[] __ro_after_init = {
&dev_attr_netdev_group.attr,
&dev_attr_type.attr,
&dev_attr_dev_id.attr,
@@ -574,7 +574,7 @@ NETSTAT_ENTRY(tx_window_errors);
NETSTAT_ENTRY(rx_compressed);
NETSTAT_ENTRY(tx_compressed);
-static struct attribute *netstat_attrs[] = {
+static struct attribute *netstat_attrs[] __ro_after_init = {
&dev_attr_rx_packets.attr,
&dev_attr_tx_packets.attr,
&dev_attr_rx_bytes.attr,
@@ -1112,9 +1112,9 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue,
return len;
}
-static struct netdev_queue_attribute bql_hold_time_attribute =
- __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
- bql_set_hold_time);
+static struct netdev_queue_attribute bql_hold_time_attribute __ro_after_init
+ = __ATTR(hold_time, S_IRUGO | S_IWUSR,
+ bql_show_hold_time, bql_set_hold_time);
static ssize_t bql_show_inflight(struct netdev_queue *queue,
struct netdev_queue_attribute *attr,
@@ -1125,7 +1125,7 @@ static ssize_t bql_show_inflight(struct netdev_queue *queue,
return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
}
-static struct netdev_queue_attribute bql_inflight_attribute =
+static struct netdev_queue_attribute bql_inflight_attribute __ro_after_init =
__ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
#define BQL_ATTR(NAME, FIELD) \
@@ -1143,15 +1143,15 @@ static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
return bql_set(buf, len, &queue->dql.FIELD); \
} \
\
-static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
- __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
- bql_set_ ## NAME);
+static struct netdev_queue_attribute bql_ ## NAME ## _attribute __ro_after_init \
+ = __ATTR(NAME, S_IRUGO | S_IWUSR, \
+ bql_show_ ## NAME, bql_set_ ## NAME)
-BQL_ATTR(limit, limit)
-BQL_ATTR(limit_max, max_limit)
-BQL_ATTR(limit_min, min_limit)
+BQL_ATTR(limit, limit);
+BQL_ATTR(limit_max, max_limit);
+BQL_ATTR(limit_min, min_limit);
-static struct attribute *dql_attrs[] = {
+static struct attribute *dql_attrs[] __ro_after_init = {
&bql_limit_attribute.attr,
&bql_limit_max_attribute.attr,
&bql_limit_min_attribute.attr,
@@ -1361,6 +1361,9 @@ static int register_queue_kobjects(struct net_device *dev)
error:
netdev_queue_update_kobjects(dev, txq, 0);
net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+ kset_unregister(dev->queues_kset);
+#endif
return error;
}
@@ -1457,7 +1460,7 @@ static const void *net_namespace(struct device *d)
return dev_net(dev);
}
-static struct class net_class = {
+static struct class net_class __ro_after_init = {
.name = "net",
.dev_release = netdev_release,
.dev_groups = net_class_groups,
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 707c95ac778bbf..194239143b5716 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1693,6 +1693,10 @@ static int do_setlink(const struct sk_buff *skb,
const struct net_device_ops *ops = dev->netdev_ops;
int err;
+ err = validate_linkmsg(dev, tb);
+ if (err < 0)
+ return err;
+
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
struct net *net = rtnl_link_get_net(dev_net(dev), tb);
if (IS_ERR(net)) {
@@ -1984,10 +1988,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
goto errout;
}
- err = validate_linkmsg(dev, tb);
- if (err < 0)
- goto errout;
-
err = do_setlink(skb, dev, ifm, tb, ifname, 0);
errout:
return err;
@@ -2089,9 +2089,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
return err;
}
- dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
- __dev_notify_flags(dev, old_flags, ~0U);
+ if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+ __dev_notify_flags(dev, old_flags, 0U);
+ } else {
+ dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+ __dev_notify_flags(dev, old_flags, ~0U);
+ }
return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);
@@ -2115,6 +2118,12 @@ struct net_device *rtnl_create_link(struct net *net,
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
+ if (num_tx_queues < 1 || num_tx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
+ if (num_rx_queues < 1 || num_rx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
err = -ENOMEM;
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
@@ -2727,6 +2736,11 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ pr_info("PF_BRIDGE: FDB add only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid);
@@ -2829,6 +2843,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EINVAL;
}
+ if (dev->type != ARPHRD_ETHER) {
+ pr_info("PF_BRIDGE: FDB delete only supported for Ethernet devices");
+ return -EINVAL;
+ }
+
addr = nla_data(tb[NDA_LLADDR]);
err = fdb_vid_parse(tb[NDA_VLAN], &vid);
@@ -2914,6 +2933,9 @@ int ndo_dflt_fdb_dump(struct sk_buff *skb,
{
int err;
+ if (dev->type != ARPHRD_ETHER)
+ return -EINVAL;
+
netif_addr_lock_bh(dev);
err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
if (err)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index fa02c680eebc03..2f63a90065e630 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -374,6 +374,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -387,6 +389,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
}
EXPORT_SYMBOL(napi_alloc_frag);
@@ -828,6 +832,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->cloned = 1;
n->nohdr = 0;
n->peeked = 0;
+ C(pfmemalloc);
n->destructor = NULL;
C(tail);
C(end);
@@ -1501,6 +1506,21 @@ done:
}
EXPORT_SYMBOL(___pskb_trim);
+/* Note : use pskb_trim_rcsum() instead of calling this directly
+ */
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ int delta = skb->len - len;
+
+ skb->csum = csum_block_sub(skb->csum,
+ skb_checksum(skb, len, delta, 0),
+ len);
+ }
+ return __pskb_trim(skb, len);
+}
+EXPORT_SYMBOL(pskb_trim_rcsum_slow);
+
/**
* __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
@@ -2377,6 +2397,32 @@ void skb_queue_purge(struct sk_buff_head *list)
EXPORT_SYMBOL(skb_queue_purge);
/**
+ * skb_rbtree_purge - empty a skb rbtree
+ * @root: root of the rbtree to empty
+ * Return value: the sum of truesizes of all purged skbs.
+ *
+ * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
+ * the list and one reference dropped. This function does not take
+ * any lock. Synchronization should be handled by the caller (e.g., TCP
+ * out-of-order queue is protected by the socket lock).
+ */
+unsigned int skb_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ sum += skb->truesize;
+ kfree_skb(skb);
+ }
+ return sum;
+}
+
+/**
* skb_queue_head - queue a buffer at the list head
* @list: list to use
* @newsk: buffer to queue
diff --git a/net/core/sock.c b/net/core/sock.c
index e68cb2efe9c141..67afb3ca2ad28d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -768,6 +768,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break;
case SO_DONTROUTE:
sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ sk_dst_reset(sk);
break;
case SO_BROADCAST:
sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
@@ -2464,6 +2465,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = ktime_set(-1L, 0);
+#if BITS_PER_LONG==32
+ seqlock_init(&sk->sk_stamp_seq);
+#endif
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 4f6c1862dfd252..6fe2b615518c78 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1763,7 +1763,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
if (itr->app.selector == app->selector &&
itr->app.protocol == app->protocol &&
itr->ifindex == ifindex &&
- (!prio || itr->app.priority == prio))
+ ((prio == -1) || itr->app.priority == prio))
return itr;
}
@@ -1798,7 +1798,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio = itr->app.priority;
spin_unlock_bh(&dcb_lock);
@@ -1826,7 +1827,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
spin_lock_bh(&dcb_lock);
/* Search for existing match and replace */
- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
+ itr = dcb_app_lookup(new, dev->ifindex, -1);
+ if (itr) {
if (new->priority)
itr->app.priority = new->priority;
else {
@@ -1859,7 +1861,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
u8 prio = 0;
spin_lock_bh(&dcb_lock);
- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
+ itr = dcb_app_lookup(app, dev->ifindex, -1);
+ if (itr)
prio |= 1 << itr->app.priority;
spin_unlock_bh(&dcb_lock);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a47b5c42..baaaeb2b2c4230 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
}
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 86a2ed0fb219c1..161dfcf8612687 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -228,14 +228,16 @@ static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
u32 cwnd = hc->tx_cwnd, restart_cwnd,
iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
+ s32 delta = now - hc->tx_lsndtime;
hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
/* don't reduce cwnd below the initial window (IW) */
restart_cwnd = min(cwnd, iwnd);
- cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
- hc->tx_cwnd = max(cwnd, restart_cwnd);
+ while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
+ cwnd >>= 1;
+ hc->tx_cwnd = max(cwnd, restart_cwnd);
hc->tx_cwnd_stamp = now;
hc->tx_cwnd_used = 0;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 119c04317d48ee..03fcf3ee15346c 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -599,7 +599,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
- ktime_t now = ktime_get_real();
+ ktime_t now = ktime_get();
s64 delta = 0;
switch (fbtype) {
@@ -624,15 +624,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
case CCID3_FBACK_PERIODIC:
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
if (delta <= 0)
- DCCP_BUG("delta (%ld) <= 0", (long)delta);
- else
- hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+ delta = 1;
+ hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
break;
default:
return;
}
- ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+ ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
hc->rx_x_recv, hc->rx_pinv);
hc->rx_tstamp_last_feedback = now;
@@ -679,7 +678,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
static u32 ccid3_first_li(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
- u32 x_recv, p, delta;
+ u32 x_recv, p;
+ s64 delta;
u64 fval;
if (hc->rx_rtt == 0) {
@@ -687,7 +687,9 @@ static u32 ccid3_first_li(struct sock *sk)
hc->rx_rtt = DCCP_FALLBACK_RTT;
}
- delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+ delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+ if (delta <= 0)
+ delta = 1;
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 7b4488235df937..729a167c4aa588 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -427,8 +427,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
- newnp->mcast_oif = inet6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ff3b058cf58ca5..936dab12f99f2e 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -280,9 +280,7 @@ int dccp_disconnect(struct sock *sk, int flags)
dccp_clear_xmit_timers(sk);
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
dp->dccps_hc_rx_ccid = NULL;
- dp->dccps_hc_tx_ccid = NULL;
__skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_write_queue);
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index e26df2764e836c..1689c7bdf1c974 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -87,35 +87,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
opt++;
kdebug("options: '%s'", opt);
do {
+ int opt_len, opt_nlen;
const char *eq;
- int opt_len, opt_nlen, opt_vlen, tmp;
+ char optval[128];
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (opt_len <= 0 || opt_len > 128) {
+ if (opt_len <= 0 || opt_len > sizeof(optval)) {
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
opt_len);
return -EINVAL;
}
- eq = memchr(opt, '=', opt_len) ?: end;
- opt_nlen = eq - opt;
- eq++;
- opt_vlen = next_opt - eq; /* will be -1 if no value */
+ eq = memchr(opt, '=', opt_len);
+ if (eq) {
+ opt_nlen = eq - opt;
+ eq++;
+ memcpy(optval, eq, next_opt - eq);
+ optval[next_opt - eq] = '\0';
+ } else {
+ opt_nlen = opt_len;
+ optval[0] = '\0';
+ }
- tmp = opt_vlen >= 0 ? opt_vlen : 0;
- kdebug("option '%*.*s' val '%*.*s'",
- opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+ kdebug("option '%*.*s' val '%s'",
+ opt_nlen, opt_nlen, opt, optval);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
- if (opt_vlen <= 0)
- goto bad_option_value;
- ret = kstrtoul(eq, 10, &derrno);
+ ret = kstrtoul(optval, 10, &derrno);
if (ret < 0)
goto bad_option_value;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 554c2a961ad566..4256ac95a141f4 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -157,10 +157,14 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
struct dsa_slave_priv *p = netdev_priv(dev);
struct net_device *master = p->parent->dst->master_netdev;
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(master,
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(master,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+ }
}
static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -1099,6 +1103,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ if (!netif_running(slave_dev))
+ return 0;
+
netif_device_detach(slave_dev);
if (p->phy) {
@@ -1116,6 +1123,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ if (!netif_running(slave_dev))
+ return 0;
+
netif_device_attach(slave_dev);
if (p->phy) {
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index c7d1adca30d891..943378d6e4c311 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -93,9 +93,8 @@ static void hsr_check_announce(struct net_device *hsr_dev,
if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
/* Went up */
hsr->announce_count = 0;
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer,
+ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -323,6 +322,7 @@ static void hsr_announce(unsigned long data)
{
struct hsr_priv *hsr;
struct hsr_port *master;
+ unsigned long interval;
hsr = (struct hsr_priv *) data;
@@ -337,14 +337,12 @@ static void hsr_announce(unsigned long data)
}
if (hsr->announce_count < 3)
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
else
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
if (is_admin_up(master->dev))
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer, jiffies + interval);
rcu_read_unlock();
}
@@ -477,7 +475,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
- return res;
+ goto err_add_port;
res = register_netdevice(hsr_dev);
if (res)
@@ -498,6 +496,8 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
fail:
hsr_for_each_port(hsr, port)
hsr_del_port(port);
+err_add_port:
+ hsr_del_node(&hsr->self_node_db);
return res;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index bace124d14ef57..46415839e67e9b 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@ int hsr_create_self_node(struct list_head *self_node_db,
return 0;
}
+void hsr_del_node(struct list_head *self_node_db)
+{
+ struct hsr_node *node;
+
+ rcu_read_lock();
+ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ rcu_read_unlock();
+ if (node) {
+ list_del_rcu(&node->mac_list);
+ kfree(node);
+ }
+}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
* seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 438b40f98f5a98..7a8f4e98f5151d 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
struct hsr_node;
+void hsr_del_node(struct list_head *self_node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index b4e17a7c0df083..fdbebe51446f78 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -16,37 +16,19 @@ typedef unsigned __bitwise__ lowpan_rx_result;
#define LOWPAN_DISPATCH_FRAG1 0xc0
#define LOWPAN_DISPATCH_FRAGN 0xe0
-struct lowpan_create_arg {
+struct frag_lowpan_compare_key {
u16 tag;
u16 d_size;
- const struct ieee802154_addr *src;
- const struct ieee802154_addr *dst;
+ struct ieee802154_addr src;
+ struct ieee802154_addr dst;
};
-/* Equivalent of ipv4 struct ip
+/* Equivalent of ipv4 struct ipq
*/
struct lowpan_frag_queue {
struct inet_frag_queue q;
-
- u16 tag;
- u16 d_size;
- struct ieee802154_addr saddr;
- struct ieee802154_addr daddr;
};
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
-{
- switch (a->mode) {
- case IEEE802154_ADDR_LONG:
- return (((__force u64)a->extended_addr) >> 32) ^
- (((__force u64)a->extended_addr) & 0xffffffff);
- case IEEE802154_ADDR_SHORT:
- return (__force u32)(a->short_addr);
- default:
- return 0;
- }
-}
-
/* private device info */
struct lowpan_dev_info {
struct net_device *wdev; /* wpan device ptr */
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index 12e8cf4bda9ff9..6183730d38db36 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -37,47 +37,15 @@ static struct inet_frags lowpan_frags;
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
struct sk_buff *prev, struct net_device *ldev);
-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
- const struct ieee802154_addr *saddr,
- const struct ieee802154_addr *daddr)
-{
- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
- return jhash_3words(ieee802154_addr_hash(saddr),
- ieee802154_addr_hash(daddr),
- (__force u32)(tag + (d_size << 16)),
- lowpan_frags.rnd);
-}
-
-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
-{
- const struct lowpan_frag_queue *fq;
-
- fq = container_of(q, struct lowpan_frag_queue, q);
- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
-}
-
-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct lowpan_frag_queue *fq;
- const struct lowpan_create_arg *arg = a;
-
- fq = container_of(q, struct lowpan_frag_queue, q);
- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
- ieee802154_addr_equal(&fq->saddr, arg->src) &&
- ieee802154_addr_equal(&fq->daddr, arg->dst);
-}
-
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
{
- const struct lowpan_create_arg *arg = a;
+ const struct frag_lowpan_compare_key *key = a;
struct lowpan_frag_queue *fq;
fq = container_of(q, struct lowpan_frag_queue, q);
- fq->tag = arg->tag;
- fq->d_size = arg->d_size;
- fq->saddr = *arg->src;
- fq->daddr = *arg->dst;
+ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
+ memcpy(&q->key, key, sizeof(*key));
}
static void lowpan_frag_expire(unsigned long data)
@@ -93,10 +61,10 @@ static void lowpan_frag_expire(unsigned long data)
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
- inet_frag_kill(&fq->q, &lowpan_frags);
+ inet_frag_kill(&fq->q);
out:
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &lowpan_frags);
+ inet_frag_put(&fq->q);
}
static inline struct lowpan_frag_queue *
@@ -104,25 +72,20 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb,
const struct ieee802154_addr *src,
const struct ieee802154_addr *dst)
{
- struct inet_frag_queue *q;
- struct lowpan_create_arg arg;
- unsigned int hash;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
+ struct frag_lowpan_compare_key key = {};
+ struct inet_frag_queue *q;
- arg.tag = cb->d_tag;
- arg.d_size = cb->d_size;
- arg.src = src;
- arg.dst = dst;
-
- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
+ key.tag = cb->d_tag;
+ key.d_size = cb->d_size;
+ key.src = *src;
+ key.dst = *dst;
- q = inet_frag_find(&ieee802154_lowpan->frags,
- &lowpan_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct lowpan_frag_queue, q);
}
@@ -229,7 +192,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
struct sk_buff *fp, *head = fq->q.fragments;
int sum_truesize;
- inet_frag_kill(&fq->q, &lowpan_frags);
+ inet_frag_kill(&fq->q);
/* Make the one we just received the head. */
if (prev) {
@@ -408,7 +371,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
struct lowpan_frag_queue *fq;
struct net *net = dev_net(skb->dev);
struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
- struct ieee802154_hdr hdr;
+ struct ieee802154_hdr hdr = {};
int err;
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
@@ -437,7 +400,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
ret = lowpan_frag_queue(fq, skb, frag_type);
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &lowpan_frags);
+ inet_frag_put(&fq->q);
return ret;
}
@@ -447,24 +410,22 @@ err:
}
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
{
.procname = "6lowpanfrag_high_thresh",
.data = &init_net.ieee802154_lowpan.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
},
{
.procname = "6lowpanfrag_low_thresh",
.data = &init_net.ieee802154_lowpan.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
},
{
@@ -580,14 +541,20 @@ static int __net_init lowpan_frags_init_net(struct net *net)
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
+ int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+ ieee802154_lowpan->frags.f = &lowpan_frags;
- inet_frags_init_net(&ieee802154_lowpan->frags);
-
- return lowpan_frags_ns_sysctl_register(net);
+ res = inet_frags_init_net(&ieee802154_lowpan->frags);
+ if (res < 0)
+ return res;
+ res = lowpan_frags_ns_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
+ return res;
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
@@ -596,7 +563,7 @@ static void __net_exit lowpan_frags_exit_net(struct net *net)
net_ieee802154_lowpan(net);
lowpan_frags_ns_sysctl_unregister(net);
- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
}
static struct pernet_operations lowpan_frags_ops = {
@@ -604,33 +571,64 @@ static struct pernet_operations lowpan_frags_ops = {
.exit = lowpan_frags_exit_net,
};
-int __init lowpan_net_frag_init(void)
+static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
{
- int ret;
+ return jhash2(data,
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
- ret = lowpan_frags_sysctl_register();
- if (ret)
- return ret;
+static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
- ret = register_pernet_subsys(&lowpan_frags_ops);
- if (ret)
- goto err_pernet;
+ return jhash2((const u32 *)&fq->key,
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
+
+static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_lowpan_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params lowpan_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = lowpan_key_hashfn,
+ .obj_hashfn = lowpan_obj_hashfn,
+ .obj_cmpfn = lowpan_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
+int __init lowpan_net_frag_init(void)
+{
+ int ret;
- lowpan_frags.hashfn = lowpan_hashfn;
lowpan_frags.constructor = lowpan_frag_init;
lowpan_frags.destructor = NULL;
lowpan_frags.skb_free = NULL;
lowpan_frags.qsize = sizeof(struct frag_queue);
- lowpan_frags.match = lowpan_frag_match;
lowpan_frags.frag_expire = lowpan_frag_expire;
lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+ lowpan_frags.rhash_params = lowpan_rhash_params;
ret = inet_frags_init(&lowpan_frags);
if (ret)
- goto err_pernet;
+ goto out;
+ ret = lowpan_frags_sysctl_register();
+ if (ret)
+ goto err_sysctl;
+
+ ret = register_pernet_subsys(&lowpan_frags_ops);
+ if (ret)
+ goto err_pernet;
+out:
return ret;
err_pernet:
lowpan_frags_sysctl_unregister();
+err_sysctl:
+ inet_frags_fini(&lowpan_frags);
return ret;
}
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index d4353faced35cf..df32134da924ee 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -55,6 +55,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
const u8 *daddr = _daddr;
struct lowpan_addr_info *info;
+ if (!daddr)
+ return -EINVAL;
+
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
*/
@@ -265,9 +268,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev)
/* We must take a copy of the skb before we modify/replace the ipv6
* header as the header could be used elsewhere
*/
- skb = skb_unshare(skb, GFP_ATOMIC);
- if (!skb)
- return NET_XMIT_DROP;
+ if (unlikely(skb_headroom(skb) < ldev->needed_headroom ||
+ skb_tailroom(skb) < ldev->needed_tailroom)) {
+ struct sk_buff *nskb;
+
+ nskb = skb_copy_expand(skb, ldev->needed_headroom,
+ ldev->needed_tailroom, GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+ } else {
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_XMIT_DROP;
+ }
ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset);
if (ret < 0) {
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 154e7423cdeb94..395d82754626f4 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -354,6 +354,7 @@ config INET_ESP
select CRYPTO_CBC
select CRYPTO_SHA1
select CRYPTO_DES
+ select CRYPTO_ECHAINIV
---help---
Support for IPsec ESP.
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 82f4aeb3307ad0..0acf89d53fb1e1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1324,6 +1324,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
+ skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 4ac29cd57c0c3e..4869033041ebcf 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -167,7 +167,8 @@ static int cipso_v4_bitmap_walk(const unsigned char *bitmap,
(state == 0 && (byte & bitmask) == 0))
return bit_spot;
- bit_spot++;
+ if (++bit_spot >= bitmap_len)
+ return -1;
bitmask >>= 1;
if (bitmask == 0) {
byte = bitmap[++byte_offset];
@@ -737,7 +738,8 @@ static int cipso_v4_map_lvl_valid(const struct cipso_v4_doi *doi_def, u8 level)
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+ if ((level < doi_def->map.std->lvl.cipso_size) &&
+ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
return 0;
break;
}
@@ -1582,7 +1584,7 @@ static int cipso_v4_parsetag_loc(const struct cipso_v4_doi *doi_def,
*
* Description:
* Parse the packet's IP header looking for a CIPSO option. Returns a pointer
- * to the start of the CIPSO option on success, NULL if one if not found.
+ * to the start of the CIPSO option on success, NULL if one is not found.
*
*/
unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
@@ -1592,10 +1594,21 @@ unsigned char *cipso_v4_optptr(const struct sk_buff *skb)
int optlen;
int taglen;
- for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 0; ) {
+ for (optlen = iph->ihl*4 - sizeof(struct iphdr); optlen > 1; ) {
+ switch (optptr[0]) {
+ case IPOPT_END:
+ return NULL;
+ case IPOPT_NOOP:
+ taglen = 1;
+ break;
+ default:
+ taglen = optptr[1];
+ }
+ if (!taglen || taglen > optlen)
+ return NULL;
if (optptr[0] == IPOPT_CIPSO)
return optptr;
- taglen = optptr[1];
+
optlen -= taglen;
optptr += taglen;
}
@@ -1794,13 +1807,26 @@ validate_return:
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
+ unsigned char optbuf[sizeof(struct ip_options) + 40];
+ struct ip_options *opt = (struct ip_options *)optbuf;
+
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
+ /*
+ * We might be called above the IP layer,
+ * so we can not use icmp_send and IPCB here.
+ */
+
+ memset(opt, 0, sizeof(struct ip_options));
+ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ return;
+
if (gateway)
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
else
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
}
/**
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7dc9f0680bf6cf..8dc9073d4a76d3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -187,7 +187,7 @@ static void fib_flush(struct net *net)
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
- flushed += fib_table_flush(tb);
+ flushed += fib_table_flush(tb, false);
}
if (flushed)
@@ -289,18 +289,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
return ip_hdr(skb)->daddr;
in_dev = __in_dev_get_rcu(dev);
- BUG_ON(!in_dev);
net = dev_net(dev);
scope = RT_SCOPE_UNIVERSE;
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
+ bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
struct flowi4 fl4 = {
.flowi4_iif = LOOPBACK_IFINDEX,
+ .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
.flowi4_scope = scope,
- .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
+ .flowi4_mark = vmark ? skb->mark : 0,
};
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
@@ -1170,7 +1171,8 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1205,16 +1207,19 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
@@ -1273,7 +1278,7 @@ static void ip_fib_net_exit(struct net *net)
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
- fib_table_flush(tb);
+ fib_table_flush(tb, true);
fib_free_table(tb);
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 44abc52bae1360..3109b9bb95d20e 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -979,6 +979,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
if (val == TCP_CA_UNSPEC)
return -EINVAL;
} else {
+ if (nla_len(nla) != sizeof(u32))
+ return -EINVAL;
val = nla_get_u32(nla);
}
if (type == RTAX_ADVMSS && val > 65535 - 40)
@@ -1371,6 +1373,56 @@ int fib_sync_down_addr(struct net *net, __be32 local)
return ret;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 5c598f99a500a9..fdaa905dccdd0d 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1806,7 +1806,7 @@ void fib_table_flush_external(struct fib_table *tb)
}
/* Caller must hold RTNL. */
-int fib_table_flush(struct fib_table *tb)
+int fib_table_flush(struct fib_table *tb, bool flush_all)
{
struct trie *t = (struct trie *)tb->tb_data;
struct key_vector *pn = t->kv;
@@ -1850,7 +1850,17 @@ int fib_table_flush(struct fib_table *tb)
hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
struct fib_info *fi = fa->fa_info;
- if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
+ if (!fi ||
+ (!(fi->fib_flags & RTNH_F_DEAD) &&
+ !fib_props[fa->fa_type].error)) {
+ slen = fa->fa_slen;
+ continue;
+ }
+
+ /* Do not flush error routes if network namespace is
+ * not being dismantled
+ */
+ if (!flush_all && fib_props[fa->fa_type].error) {
slen = fa->fa_slen;
continue;
}
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 08d8ee12453801..d83888bc33d32d 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -195,6 +195,14 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
u8 proto = NAPI_GRO_CB(skb)->proto;
const struct net_offload **offloads;
+ /* We can clear the encap_mark for FOU as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+ * header to the outer L3 tunnel header, or we are are simply
+ * treating the GRE tunnel header as though it is a UDP protocol
+ * specific header such as VXLAN or GENEVE.
+ */
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[proto]);
@@ -354,6 +362,14 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
}
}
+ /* We can clear the encap_mark for GUE as we are essentially doing
+ * one of two possible things. We are either adding an L4 tunnel
+ * header to the outer L3 tunnel header, or we are are simply
+ * treating the GRE tunnel header as though it is a UDP protocol
+ * specific header such as VXLAN or GENEVE.
+ */
+ NAPI_GRO_CB(skb)->encap_mark = 0;
+
rcu_read_lock();
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
ops = rcu_dereference(offloads[guehdr->proto_ctype]);
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ef2d4322aba7f7..a51f0dd6a49ef2 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -567,7 +567,8 @@ relookup_failed:
* MUST reply to only the first fragment.
*/
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt)
{
struct iphdr *iph;
int room;
@@ -681,7 +682,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
- if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in))
+ if (__ip_options_echo(&icmp_param->replyopts.opt.opt, skb_in, opt))
goto out_unlock;
@@ -733,7 +734,7 @@ out_free:
kfree(icmp_param);
out:;
}
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 51512742f21f1c..9ec181c077b643 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -787,7 +787,6 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
tcp_sk(child)->fastopen_rsk = NULL;
}
inet_csk_destroy_sock(child);
- reqsk_put(req);
}
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
@@ -858,6 +857,7 @@ void inet_csk_listen_stop(struct sock *sk)
sock_hold(child);
inet_child_forget(sk, req, child);
+ reqsk_put(req);
bh_unlock_sock(child);
local_bh_enable();
sock_put(child);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index b34fa1bb278f0d..c03e5f5859e14f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,12 +25,6 @@
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
-#define INETFRAGS_EVICT_BUCKETS 128
-#define INETFRAGS_EVICT_MAX 512
-
-/* don't rebuild inetfrag table with new secret more often than this */
-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
-
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -52,157 +46,8 @@ const u8 ip_frag_ecn_table[16] = {
};
EXPORT_SYMBOL(ip_frag_ecn_table);
-static unsigned int
-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
-{
- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
-}
-
-static bool inet_frag_may_rebuild(struct inet_frags *f)
-{
- return time_after(jiffies,
- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
-}
-
-static void inet_frag_secret_rebuild(struct inet_frags *f)
-{
- int i;
-
- write_seqlock_bh(&f->rnd_seqlock);
-
- if (!inet_frag_may_rebuild(f))
- goto out;
-
- get_random_bytes(&f->rnd, sizeof(u32));
-
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
- struct inet_frag_bucket *hb;
- struct inet_frag_queue *q;
- struct hlist_node *n;
-
- hb = &f->hash[i];
- spin_lock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
- unsigned int hval = inet_frag_hashfn(f, q);
-
- if (hval != i) {
- struct inet_frag_bucket *hb_dest;
-
- hlist_del(&q->list);
-
- /* Relink to new hash chain. */
- hb_dest = &f->hash[hval];
-
- /* This is the only place where we take
- * another chain_lock while already holding
- * one. As this will not run concurrently,
- * we cannot deadlock on hb_dest lock below, if its
- * already locked it will be released soon since
- * other caller cannot be waiting for hb lock
- * that we've taken above.
- */
- spin_lock_nested(&hb_dest->chain_lock,
- SINGLE_DEPTH_NESTING);
- hlist_add_head(&q->list, &hb_dest->chain);
- spin_unlock(&hb_dest->chain_lock);
- }
- }
- spin_unlock(&hb->chain_lock);
- }
-
- f->rebuild = false;
- f->last_rebuild_jiffies = jiffies;
-out:
- write_sequnlock_bh(&f->rnd_seqlock);
-}
-
-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
-{
- if (!hlist_unhashed(&q->list_evictor))
- return false;
-
- return q->net->low_thresh == 0 ||
- frag_mem_limit(q->net) >= q->net->low_thresh;
-}
-
-static unsigned int
-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
-{
- struct inet_frag_queue *fq;
- struct hlist_node *n;
- unsigned int evicted = 0;
- HLIST_HEAD(expired);
-
- spin_lock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
- if (!inet_fragq_should_evict(fq))
- continue;
-
- if (!del_timer(&fq->timer))
- continue;
-
- hlist_add_head(&fq->list_evictor, &expired);
- ++evicted;
- }
-
- spin_unlock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
- f->frag_expire((unsigned long) fq);
-
- return evicted;
-}
-
-static void inet_frag_worker(struct work_struct *work)
-{
- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
- unsigned int i, evicted = 0;
- struct inet_frags *f;
-
- f = container_of(work, struct inet_frags, frags_work);
-
- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
-
- local_bh_disable();
-
- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
- evicted += inet_evict_bucket(f, &f->hash[i]);
- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
- if (evicted > INETFRAGS_EVICT_MAX)
- break;
- }
-
- f->next_bucket = i;
-
- local_bh_enable();
-
- if (f->rebuild && inet_frag_may_rebuild(f))
- inet_frag_secret_rebuild(f);
-}
-
-static void inet_frag_schedule_worker(struct inet_frags *f)
-{
- if (unlikely(!work_pending(&f->frags_work)))
- schedule_work(&f->frags_work);
-}
-
int inet_frags_init(struct inet_frags *f)
{
- int i;
-
- INIT_WORK(&f->frags_work, inet_frag_worker);
-
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
- struct inet_frag_bucket *hb = &f->hash[i];
-
- spin_lock_init(&hb->chain_lock);
- INIT_HLIST_HEAD(&hb->chain);
- }
-
- seqlock_init(&f->rnd_seqlock);
- f->last_rebuild_jiffies = 0;
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
NULL);
if (!f->frags_cachep)
@@ -214,73 +59,53 @@ EXPORT_SYMBOL(inet_frags_init);
void inet_frags_fini(struct inet_frags *f)
{
- cancel_work_sync(&f->frags_work);
+ /* We must wait that all inet_frag_destroy_rcu() have completed. */
+ rcu_barrier();
+
kmem_cache_destroy(f->frags_cachep);
+ f->frags_cachep = NULL;
}
EXPORT_SYMBOL(inet_frags_fini);
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
-{
- unsigned int seq;
- int i;
-
- nf->low_thresh = 0;
-
-evict_again:
- local_bh_disable();
- seq = read_seqbegin(&f->rnd_seqlock);
-
- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
- inet_evict_bucket(f, &f->hash[i]);
-
- local_bh_enable();
- cond_resched();
-
- if (read_seqretry(&f->rnd_seqlock, seq) ||
- sum_frag_mem_limit(nf))
- goto evict_again;
-}
-EXPORT_SYMBOL(inet_frags_exit_net);
-
-static struct inet_frag_bucket *
-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
-__acquires(hb->chain_lock)
+static void inet_frags_free_cb(void *ptr, void *arg)
{
- struct inet_frag_bucket *hb;
- unsigned int seq, hash;
-
- restart:
- seq = read_seqbegin(&f->rnd_seqlock);
+ struct inet_frag_queue *fq = ptr;
- hash = inet_frag_hashfn(f, fq);
- hb = &f->hash[hash];
+ /* If we can not cancel the timer, it means this frag_queue
+ * is already disappearing, we have nothing to do.
+ * Otherwise, we own a refcount until the end of this function.
+ */
+ if (!del_timer(&fq->timer))
+ return;
- spin_lock(&hb->chain_lock);
- if (read_seqretry(&f->rnd_seqlock, seq)) {
- spin_unlock(&hb->chain_lock);
- goto restart;
+ spin_lock_bh(&fq->lock);
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
+ fq->flags |= INET_FRAG_COMPLETE;
+ atomic_dec(&fq->refcnt);
}
+ spin_unlock_bh(&fq->lock);
- return hb;
+ inet_frag_put(fq);
}
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+void inet_frags_exit_net(struct netns_frags *nf)
{
- struct inet_frag_bucket *hb;
+ nf->high_thresh = 0; /* prevent creation of new frags */
- hb = get_frag_bucket_locked(fq, f);
- hlist_del(&fq->list);
- fq->flags |= INET_FRAG_COMPLETE;
- spin_unlock(&hb->chain_lock);
+ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
}
+EXPORT_SYMBOL(inet_frags_exit_net);
-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
+void inet_frag_kill(struct inet_frag_queue *fq)
{
if (del_timer(&fq->timer))
atomic_dec(&fq->refcnt);
if (!(fq->flags & INET_FRAG_COMPLETE)) {
- fq_unlink(fq, f);
+ struct netns_frags *nf = fq->net;
+
+ fq->flags |= INET_FRAG_COMPLETE;
+ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
atomic_dec(&fq->refcnt);
}
}
@@ -294,11 +119,23 @@ static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
kfree_skb(skb);
}
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
+static void inet_frag_destroy_rcu(struct rcu_head *head)
+{
+ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
+ rcu);
+ struct inet_frags *f = q->net->f;
+
+ if (f->destructor)
+ f->destructor(q);
+ kmem_cache_free(f->frags_cachep, q);
+}
+
+void inet_frag_destroy(struct inet_frag_queue *q)
{
struct sk_buff *fp;
struct netns_frags *nf;
unsigned int sum, sum_truesize = 0;
+ struct inet_frags *f;
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
WARN_ON(del_timer(&q->timer) != 0);
@@ -306,68 +143,34 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
/* Release all fragment data. */
fp = q->fragments;
nf = q->net;
- while (fp) {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- frag_kfree_skb(nf, f, fp);
- fp = xp;
+ f = nf->f;
+ if (fp) {
+ do {
+ struct sk_buff *xp = fp->next;
+
+ sum_truesize += fp->truesize;
+ frag_kfree_skb(nf, f, fp);
+ fp = xp;
+ } while (fp);
+ } else {
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
}
sum = sum_truesize + f->qsize;
- if (f->destructor)
- f->destructor(q);
- kmem_cache_free(f->frags_cachep, q);
+ call_rcu(&q->rcu, inet_frag_destroy_rcu);
sub_frag_mem_limit(nf, sum);
}
EXPORT_SYMBOL(inet_frag_destroy);
-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
- struct inet_frag_queue *qp_in,
- struct inet_frags *f,
- void *arg)
-{
- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
- struct inet_frag_queue *qp;
-
-#ifdef CONFIG_SMP
- /* With SMP race we have to recheck hash table, because
- * such entry could have been created on other cpu before
- * we acquired hash bucket lock.
- */
- hlist_for_each_entry(qp, &hb->chain, list) {
- if (qp->net == nf && f->match(qp, arg)) {
- atomic_inc(&qp->refcnt);
- spin_unlock(&hb->chain_lock);
- qp_in->flags |= INET_FRAG_COMPLETE;
- inet_frag_put(qp_in, f);
- return qp;
- }
- }
-#endif
- qp = qp_in;
- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
- atomic_inc(&qp->refcnt);
-
- atomic_inc(&qp->refcnt);
- hlist_add_head(&qp->list, &hb->chain);
-
- spin_unlock(&hb->chain_lock);
-
- return qp;
-}
-
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
struct inet_frags *f,
void *arg)
{
struct inet_frag_queue *q;
- if (frag_mem_limit(nf) > nf->high_thresh) {
- inet_frag_schedule_worker(f);
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
return NULL;
- }
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
if (!q)
@@ -379,70 +182,52 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
- atomic_set(&q->refcnt, 1);
+ atomic_set(&q->refcnt, 3);
return q;
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
- struct inet_frags *f,
- void *arg)
+ void *arg,
+ struct inet_frag_queue **prev)
{
+ struct inet_frags *f = nf->f;
struct inet_frag_queue *q;
q = inet_frag_alloc(nf, f, arg);
- if (!q)
+ if (!q) {
+ *prev = ERR_PTR(-ENOMEM);
return NULL;
-
- return inet_frag_intern(nf, q, f, arg);
-}
-
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key,
- unsigned int hash)
-{
- struct inet_frag_bucket *hb;
- struct inet_frag_queue *q;
- int depth = 0;
-
- if (frag_mem_limit(nf) > nf->low_thresh)
- inet_frag_schedule_worker(f);
-
- hash &= (INETFRAGS_HASHSZ - 1);
- hb = &f->hash[hash];
-
- spin_lock(&hb->chain_lock);
- hlist_for_each_entry(q, &hb->chain, list) {
- if (q->net == nf && f->match(q, key)) {
- atomic_inc(&q->refcnt);
- spin_unlock(&hb->chain_lock);
- return q;
- }
- depth++;
}
- spin_unlock(&hb->chain_lock);
-
- if (depth <= INETFRAGS_MAXDEPTH)
- return inet_frag_create(nf, f, key);
-
- if (inet_frag_may_rebuild(f)) {
- if (!f->rebuild)
- f->rebuild = true;
- inet_frag_schedule_worker(f);
+ mod_timer(&q->timer, jiffies + nf->timeout);
+
+ *prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
+ &q->node, f->rhash_params);
+ if (*prev) {
+ q->flags |= INET_FRAG_COMPLETE;
+ inet_frag_kill(q);
+ inet_frag_destroy(q);
+ return NULL;
}
-
- return ERR_PTR(-ENOBUFS);
+ return q;
}
-EXPORT_SYMBOL(inet_frag_find);
+EXPORT_SYMBOL(inet_frag_create);
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix)
+/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
{
- static const char msg[] = "inet_frag_find: Fragment hash bucket"
- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
- ". Dropping fragment.\n";
+ struct inet_frag_queue *fq = NULL, *prev;
- if (PTR_ERR(q) == -ENOBUFS)
- net_dbg_ratelimited("%s%s", prefix, msg);
+ rcu_read_lock();
+ prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+ if (!prev)
+ fq = inet_frag_create(nf, key, &prev);
+ if (prev && !IS_ERR(prev)) {
+ fq = prev;
+ if (!atomic_inc_not_zero(&fq->refcnt))
+ fq = NULL;
+ }
+ rcu_read_unlock();
+ return fq;
}
-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+EXPORT_SYMBOL(inet_frag_find);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 86fa4580954060..0c5862914f05d5 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -448,6 +448,7 @@ relookup:
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
+ p->n_redirects = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 7057a1b09b5eb0..9b09a9b5a4fe45 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -58,27 +58,64 @@
static int sysctl_ipfrag_max_dist __read_mostly = 64;
static const char ip_frag_cache_name[] = "ip4-frags";
-struct ipfrag_skb_cb
-{
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
struct inet_skb_parm h;
- int offset;
+ struct sk_buff *next_frag;
+ int frag_run_len;
};
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void ip4_frag_init_run(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ ip4_frag_init_run(skb);
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
- u32 user;
- __be32 saddr;
- __be32 daddr;
- __be16 id;
- u8 protocol;
u8 ecn; /* RFC3168 support */
u16 max_df_size; /* largest frag with DF set seen */
int iif;
- int vif; /* L3 master device index */
unsigned int rid;
struct inet_peer *peer;
};
@@ -90,49 +127,9 @@ static u8 ip4_frag_ecn(u8 tos)
static struct inet_frags ip4_frags;
-int ip_frag_mem(struct net *net)
-{
- return sum_frag_mem_limit(&net->ipv4.frags);
-}
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev);
-
-struct ip4_create_arg {
- struct iphdr *iph;
- u32 user;
- int vif;
-};
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
-{
- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
- return jhash_3words((__force u32)id << 16 | prot,
- (__force u32)saddr, (__force u32)daddr,
- ip4_frags.rnd);
-}
-
-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
-{
- const struct ipq *ipq;
-
- ipq = container_of(q, struct ipq, q);
- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
-}
-
-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct ipq *qp;
- const struct ip4_create_arg *arg = a;
-
- qp = container_of(q, struct ipq, q);
- return qp->id == arg->iph->id &&
- qp->saddr == arg->iph->saddr &&
- qp->daddr == arg->iph->daddr &&
- qp->protocol == arg->iph->protocol &&
- qp->user == arg->user &&
- qp->vif == arg->vif;
-}
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
{
@@ -141,17 +138,12 @@ static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
frags);
struct net *net = container_of(ipv4, struct net, ipv4);
- const struct ip4_create_arg *arg = a;
+ const struct frag_v4_compare_key *key = a;
- qp->protocol = arg->iph->protocol;
- qp->id = arg->iph->id;
- qp->ecn = ip4_frag_ecn(arg->iph->tos);
- qp->saddr = arg->iph->saddr;
- qp->daddr = arg->iph->daddr;
- qp->vif = arg->vif;
- qp->user = arg->user;
+ q->key.v4 = *key;
+ qp->ecn = 0;
qp->peer = sysctl_ipfrag_max_dist ?
- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
NULL;
}
@@ -169,7 +161,7 @@ static void ip4_frag_free(struct inet_frag_queue *q)
static void ipq_put(struct ipq *ipq)
{
- inet_frag_put(&ipq->q, &ip4_frags);
+ inet_frag_put(&ipq->q);
}
/* Kill ipq entry. It is not destroyed immediately,
@@ -177,7 +169,7 @@ static void ipq_put(struct ipq *ipq)
*/
static void ipq_kill(struct ipq *ipq)
{
- inet_frag_kill(&ipq->q, &ip4_frags);
+ inet_frag_kill(&ipq->q);
}
static bool frag_expire_skip_icmp(u32 user)
@@ -194,8 +186,11 @@ static bool frag_expire_skip_icmp(u32 user)
*/
static void ip_expire(unsigned long arg)
{
- struct ipq *qp;
+ const struct iphdr *iph;
+ struct sk_buff *head = NULL;
struct net *net;
+ struct ipq *qp;
+ int err;
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
@@ -208,51 +203,65 @@ static void ip_expire(unsigned long arg)
ipq_kill(qp);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
- if (!inet_frag_evicting(&qp->q)) {
- struct sk_buff *clone, *head = qp->q.fragments;
- const struct iphdr *iph;
- int err;
-
- IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ if (qp->q.fragments) {
+ head = qp->q.fragments;
+ qp->q.fragments = head->next;
+ } else {
+ head = skb_rb_first(&qp->q.rb_fragments);
+ if (!head)
goto out;
+ if (FRAG_CB(head)->next_frag)
+ rb_replace_node(&head->rbnode,
+ &FRAG_CB(head)->next_frag->rbnode,
+ &qp->q.rb_fragments);
+ else
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
+ }
+ if (head == qp->q.fragments_tail)
+ qp->q.fragments_tail = NULL;
- head->dev = dev_get_by_index_rcu(net, qp->iif);
- if (!head->dev)
- goto out;
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (!head->dev)
+ goto out;
- /* skb has no dst, perform route lookup again */
- iph = ip_hdr(head);
- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
iph->tos, head->dev);
- if (err)
- goto out;
+ if (err)
+ goto out;
- /* Only an end host needs to send an ICMP
- * "Fragment Reassembly Timeout" message, per RFC792.
- */
- if (frag_expire_skip_icmp(qp->user) &&
- (skb_rtable(head)->rt_type != RTN_LOCAL))
- goto out;
+ /* Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out;
- clone = skb_clone(head, GFP_ATOMIC);
+ spin_unlock(&qp->q.lock);
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+ goto out_rcu_unlock;
- /* Send an ICMP "Fragment Reassembly Timeout" message. */
- if (clone) {
- spin_unlock(&qp->q.lock);
- icmp_send(clone, ICMP_TIME_EXCEEDED,
- ICMP_EXC_FRAGTIME, 0);
- consume_skb(clone);
- goto out_rcu_unlock;
- }
- }
out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
+ if (head)
+ kfree_skb(head);
ipq_put(qp);
}
@@ -262,21 +271,20 @@ out_rcu_unlock:
static struct ipq *ip_find(struct net *net, struct iphdr *iph,
u32 user, int vif)
{
+ struct frag_v4_compare_key key = {
+ .saddr = iph->saddr,
+ .daddr = iph->daddr,
+ .user = user,
+ .vif = vif,
+ .id = iph->id,
+ .protocol = iph->protocol,
+ };
struct inet_frag_queue *q;
- struct ip4_create_arg arg;
- unsigned int hash;
-
- arg.iph = iph;
- arg.user = user;
- arg.vif = vif;
- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
-
- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&net->ipv4.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct ipq, q);
}
@@ -296,7 +304,7 @@ static int ip_frag_too_far(struct ipq *qp)
end = atomic_inc_return(&peer->rid);
qp->rid = end;
- rc = qp->q.fragments && (end - start) > max;
+ rc = qp->q.fragments_tail && (end - start) > max;
if (rc) {
struct net *net;
@@ -310,7 +318,6 @@ static int ip_frag_too_far(struct ipq *qp)
static int ip_frag_reinit(struct ipq *qp)
{
- struct sk_buff *fp;
unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
@@ -318,21 +325,16 @@ static int ip_frag_reinit(struct ipq *qp)
return -ETIMEDOUT;
}
- fp = qp->q.fragments;
- do {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
- } while (fp);
+ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
sub_frag_mem_limit(qp->q.net, sum_truesize);
qp->q.flags = 0;
qp->q.len = 0;
qp->q.meat = 0;
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
qp->iif = 0;
qp->ecn = 0;
@@ -342,11 +344,13 @@ static int ip_frag_reinit(struct ipq *qp)
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
- struct sk_buff *prev, *next;
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct rb_node **rbn, *parent;
+ struct sk_buff *skb1, *prev_tail;
+ int ihl, end, skb1_run_end;
struct net_device *dev;
unsigned int fragsize;
int flags, offset;
- int ihl, end;
int err = -ENOENT;
u8 ecn;
@@ -405,94 +409,68 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
if (err)
goto err;
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = qp->q.fragments_tail;
- if (!prev || FRAG_CB(prev)->offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = qp->q.fragments; next != NULL; next = next->next) {
- if (FRAG_CB(next)->offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
+ /* Note : skb->rbnode and skb->dev share the same location. */
+ dev = skb->dev;
+ /* Makes sure compiler wont do silly aliasing games */
+ barrier();
+
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * We do the same here for IPv4 (and increment an snmp counter) but
+ * we do not want to drop the whole queue in response to a duplicate
+ * fragment.
*/
- if (prev) {
- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
- if (i > 0) {
- offset += i;
- err = -EINVAL;
- if (end <= offset)
- goto err;
- err = -ENOMEM;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
-
- err = -ENOMEM;
-
- while (next && FRAG_CB(next)->offset < end) {
- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- FRAG_CB(next)->offset += i;
- qp->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
+ err = -EINVAL;
+ /* Find out where to put this fragment. */
+ prev_tail = qp->q.fragments_tail;
+ if (!prev_tail)
+ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
+ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
+ goto discard_qp;
+ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
+ ip4_frag_append_to_last_run(&qp->q, skb);
+ else
+ ip4_frag_create_run(&qp->q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ rbn = &qp->q.rb_fragments.rb_node;
+ do {
+ parent = *rbn;
+ skb1 = rb_to_skb(parent);
+ skb1_run_end = skb1->ip_defrag_offset +
+ FRAG_CB(skb1)->frag_run_len;
+ if (end <= skb1->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= skb1_run_end)
+ rbn = &parent->rb_right;
+ else if (offset >= skb1->ip_defrag_offset &&
+ end <= skb1_run_end)
+ goto err; /* No new data, potential duplicate */
else
- qp->q.fragments = next;
-
- qp->q.meat -= free_it->len;
- sub_frag_mem_limit(qp->q.net, free_it->truesize);
- kfree_skb(free_it);
- }
+ goto discard_qp; /* Found an overlap */
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ ip4_frag_init_run(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
}
- FRAG_CB(skb)->offset = offset;
-
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- qp->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- qp->q.fragments = skb;
-
- dev = skb->dev;
- if (dev) {
+ if (dev)
qp->iif = dev->ifindex;
- skb->dev = NULL;
- }
+ skb->ip_defrag_offset = offset;
+
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
qp->ecn |= ecn;
@@ -514,7 +492,7 @@ found:
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- err = ip_frag_reasm(qp, prev, dev);
+ err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
return err;
}
@@ -522,20 +500,23 @@ found:
skb_dst_drop(skb);
return -EINPROGRESS;
+discard_qp:
+ inet_frag_kill(&qp->q);
+ IP_INC_STATS_BH(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);
return err;
}
-
/* Build a new IP datagram from all its fragments. */
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev)
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = qp->q.fragments;
+ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
+ struct sk_buff **nextp; /* To build frag_list. */
+ struct rb_node *rbn;
int len;
int ihlen;
int err;
@@ -549,26 +530,27 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
goto out_fail;
}
/* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
if (!fp)
goto out_nomem;
-
- fp->next = head->next;
- if (!fp->next)
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(prev_tail)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &qp->q.rb_fragments);
+ if (qp->q.fragments_tail == skb)
qp->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, qp->q.fragments);
- head->next = qp->q.fragments->next;
-
- consume_skb(qp->q.fragments);
- qp->q.fragments = head;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &qp->q.rb_fragments);
+ consume_skb(head);
+ head = skb;
}
- WARN_ON(!head);
- WARN_ON(FRAG_CB(head)->offset != 0);
+ WARN_ON(head->ip_defrag_offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
@@ -592,35 +574,61 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
goto out_nomem;
- clone->next = head->next;
- head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
+ head->truesize += clone->truesize;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(qp->q.net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
}
- skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &qp->q.rb_fragments);
+ rbn = rbnext;
+ }
}
sub_frag_mem_limit(qp->q.net, head->truesize);
+ *nextp = NULL;
head->next = NULL;
+ head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
@@ -648,7 +656,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
return 0;
out_nomem:
@@ -656,7 +666,7 @@ out_nomem:
err = -ENOMEM;
goto out_fail;
out_oversize:
- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
out_fail:
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
return err;
@@ -716,10 +726,14 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
- return skb;
- if (pskb_trim_rcsum(skb, netoff + len))
- return skb;
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ if (pskb_trim_rcsum(skb, netoff + len)) {
+ kfree_skb(skb);
+ return NULL;
+ }
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;
@@ -730,25 +744,46 @@ struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
}
EXPORT_SYMBOL(ip_check_defrag);
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
#ifdef CONFIG_SYSCTL
-static int zero;
+static int dist_min;
static struct ctl_table ip4_frags_ns_ctl_table[] = {
{
.procname = "ipfrag_high_thresh",
.data = &init_net.ipv4.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ipv4.frags.low_thresh
},
{
.procname = "ipfrag_low_thresh",
.data = &init_net.ipv4.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ipv4.frags.high_thresh
},
{
@@ -777,7 +812,7 @@ static struct ctl_table ip4_frags_ctl_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero
+ .extra1 = &dist_min,
},
{ }
};
@@ -849,6 +884,8 @@ static void __init ip4_frags_ctl_register(void)
static int __net_init ipv4_frags_init_net(struct net *net)
{
+ int res;
+
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
@@ -872,15 +909,21 @@ static int __net_init ipv4_frags_init_net(struct net *net)
*/
net->ipv4.frags.timeout = IP_FRAG_TIME;
- inet_frags_init_net(&net->ipv4.frags);
+ net->ipv4.frags.f = &ip4_frags;
- return ip4_frags_ns_ctl_register(net);
+ res = inet_frags_init_net(&net->ipv4.frags);
+ if (res < 0)
+ return res;
+ res = ip4_frags_ns_ctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->ipv4.frags);
+ return res;
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
{
ip4_frags_ns_ctl_unregister(net);
- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+ inet_frags_exit_net(&net->ipv4.frags);
}
static struct pernet_operations ip4_frags_ops = {
@@ -888,18 +931,50 @@ static struct pernet_operations ip4_frags_ops = {
.exit = ipv4_frags_exit_net,
};
+
+static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v4,
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v4_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params ip4_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .key_offset = offsetof(struct inet_frag_queue, key),
+ .key_len = sizeof(struct frag_v4_compare_key),
+ .hashfn = ip4_key_hashfn,
+ .obj_hashfn = ip4_obj_hashfn,
+ .obj_cmpfn = ip4_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
void __init ipfrag_init(void)
{
- ip4_frags_ctl_register();
- register_pernet_subsys(&ip4_frags_ops);
- ip4_frags.hashfn = ip4_hashfn;
ip4_frags.constructor = ip4_frag_init;
ip4_frags.destructor = ip4_frag_free;
ip4_frags.skb_free = NULL;
ip4_frags.qsize = sizeof(struct ipq);
- ip4_frags.match = ip4_frag_match;
ip4_frags.frag_expire = ip_expire;
ip4_frags.frags_cache_name = ip_frag_cache_name;
+ ip4_frags.rhash_params = ip4_rhash_params;
if (inet_frags_init(&ip4_frags))
panic("IP: failed to allocate ip4_frags cache\n");
+ ip4_frags_ctl_register();
+ register_pernet_subsys(&ip4_frags_ops);
}
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index b1209b63381f6f..eb1834f2682ff4 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -444,6 +444,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
goto drop;
}
+ iph = ip_hdr(skb);
skb->transport_header = skb->network_header + iph->ihl*4;
/* Remove any debris in the socket control block */
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index bd246792360b4b..d3922a93e4c1d7 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -254,8 +254,9 @@ static void spec_dst_fill(__be32 *spec_dst, struct sk_buff *skb)
* If opt == NULL, then skb->data should point to IP header.
*/
-int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb,
+ __be32 *info)
{
__be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
@@ -472,11 +473,22 @@ eol:
return 0;
error:
- if (skb) {
- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
- }
+ if (info)
+ *info = htonl((pp_ptr-iph)<<24);
return -EINVAL;
}
+
+int ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb)
+{
+ int ret;
+ __be32 info;
+
+ ret = __ip_options_compile(net, opt, skb, &info);
+ if (ret != 0 && skb)
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+ return ret;
+}
EXPORT_SYMBOL(ip_options_compile);
/*
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index c1d7dc4339760a..ac2966f02d0788 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -480,6 +480,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->dev = from->dev;
to->mark = from->mark;
+ skb_copy_hash(to, from);
+
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 611b5939c965be..274ce9e2674d5e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -133,20 +133,20 @@ static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
+ __be16 _ports[2], *ports;
struct sockaddr_in sin;
- const struct iphdr *iph = ip_hdr(skb);
- __be16 *ports = (__be16 *)skb_transport_header(skb);
-
- if (skb_transport_offset(skb) + 4 > skb->len)
- return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (!ports)
+ return;
sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
@@ -493,8 +493,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
int err;
int copied;
- WARN_ON_ONCE(sk->sk_family == AF_INET6);
-
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
if (!skb)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 3d62feb65932cb..91ae061d46ac2a 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -261,8 +261,8 @@ static struct net_device *__ip_tunnel_create(struct net *net,
} else {
if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
- strlcpy(name, ops->kind, IFNAMSIZ);
- strncat(name, "%d", 2);
+ strcpy(name, ops->kind);
+ strcat(name, "%d");
}
ASSERT_RTNL();
@@ -597,6 +597,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -607,6 +608,14 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
int err;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index dbda0565781cfe..4916d1857b75a1 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -71,7 +71,7 @@ int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
- iph->frag_off = df;
+ iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df;
iph->protocol = proto;
iph->tos = tos;
iph->daddr = dst;
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 4b7c81f88abf26..fcf327ebd13456 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -75,6 +75,33 @@ drop:
return 0;
}
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
+{
+ struct ip_tunnel *tunnel;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ if (tunnel) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+ skb->dev = tunnel->dev;
+
+ return xfrm_input(skb, nexthdr, spi, encap_type);
+ }
+
+ return -EINVAL;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
static int vti_rcv(struct sk_buff *skb)
{
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -83,6 +110,14 @@ static int vti_rcv(struct sk_buff *skb)
return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
}
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
static int vti_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
@@ -409,6 +444,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
.priority = 100,
};
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+ .handler = vti_rcv_ipip,
+ .err_handler = vti4_err,
+ .priority = 0,
+};
+
static int __net_init vti_init_net(struct net *net)
{
int err;
@@ -592,6 +633,13 @@ static int __init vti_init(void)
if (err < 0)
goto xfrm_proto_comp_failed;
+ msg = "ipip tunnel";
+ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+ if (err < 0) {
+ pr_info("%s: cant't register tunnel\n",__func__);
+ goto xfrm_tunnel_failed;
+ }
+
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
if (err < 0)
@@ -601,6 +649,8 @@ static int __init vti_init(void)
rtnl_link_failed:
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 9d6b9c4c5f822f..60f564db25a3d5 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -790,6 +790,11 @@ static void __init ic_bootp_init_ext(u8 *e)
*/
static inline void __init ic_bootp_init(void)
{
+ /* Re-initialise all name servers to NONE, in case any were set via the
+ * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses
+ * specified there will already have been decoded but are no longer
+ * needed
+ */
ic_nameservers_predef();
dev_add_pack(&bootp_packet_type);
@@ -1423,6 +1428,13 @@ static int __init ip_auto_config(void)
int err;
unsigned int i;
+ /* Initialise all name servers to NONE (but only if the "ip=" or
+ * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise
+ * we'll overwrite the IP addresses specified there)
+ */
+ if (ic_set_manually == 0)
+ ic_nameservers_predef();
+
#ifdef CONFIG_PROC_FS
proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops);
#endif /* CONFIG_PROC_FS */
@@ -1640,6 +1652,7 @@ static int __init ip_auto_config_setup(char *addrs)
return 1;
}
+ /* Initialise all name servers to NONE */
ic_nameservers_predef();
/* Parse string for static IP assignment. */
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8e77786549c614..1cb865fcc91b49 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -66,6 +66,7 @@
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
+#include <linux/nospec.h>
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
#define CONFIG_IP_PIMSM 1
@@ -1574,6 +1575,7 @@ int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
+ vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index f51b32ed353c91..cbe630aab44ac2 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -983,6 +983,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
sizeof(struct arpt_get_entries) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1557,6 +1558,7 @@ static int compat_get_entries(struct net *net,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(NFPROTO_ARP);
t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index dac62b5e7fe32a..53d664a7774c48 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -663,6 +663,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
return -ENOMEM;
j = 0;
+ memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
@@ -1170,6 +1171,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1798,6 +1800,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
@@ -2071,6 +2074,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
.checkentry = icmp_checkentry,
.proto = IPPROTO_ICMP,
.family = NFPROTO_IPV4,
+ .me = THIS_MODULE,
},
};
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 3abd9d7a3adf32..b001ad6681084e 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -52,7 +52,6 @@
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
- unsigned int frag_mem;
int orphans, sockets;
local_bh_disable();
@@ -72,8 +71,9 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
sock_prot_inuse_get(net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
sock_prot_inuse_get(net, &raw_prot));
- frag_mem = ip_frag_mem(net);
- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
+ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
+ atomic_read(&net->ipv4.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv4.frags));
return 0;
}
@@ -132,6 +132,7 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 75406603fa1e42..dd59b4deb27aff 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -880,13 +880,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
peer->rate_tokens = 0;
+ peer->n_redirects = 0;
+ }
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
- if (peer->rate_tokens >= ip_rt_redirect_number) {
+ if (peer->n_redirects >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
@@ -903,6 +905,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
+ ++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
@@ -1607,6 +1610,10 @@ static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
if (fnhe->fnhe_daddr == daddr) {
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+ /* set fnhe_daddr to 0 to ensure it won't bind with
+ * new dsts in rt_bind_exception().
+ */
+ fnhe->fnhe_daddr = 0;
fnhe_flush_routes(fnhe);
kfree_rcu(fnhe, rcu);
break;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 8ed2e7d325d9a7..a48dd55bb0c9bb 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -228,7 +228,12 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
if (child) {
atomic_set(&req->rsk_refcnt, 1);
sock_rps_save_rxhash(child, skb);
- inet_csk_reqsk_queue_add(sk, req, child);
+ if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+ bh_unlock_sock(child);
+ sock_put(child);
+ child = NULL;
+ reqsk_put(req);
+ }
} else {
reqsk_free(req);
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b137c43c223144..fe0f54f55f17fe 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -141,8 +141,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
if (write && ret == 0) {
low = make_kgid(user_ns, urange[0]);
high = make_kgid(user_ns, urange[1]);
- if (!gid_valid(low) || !gid_valid(high) ||
- (urange[1] < urange[0]) || gid_lt(high, low)) {
+ if (!gid_valid(low) || !gid_valid(high))
+ return -EINVAL;
+ if (urange[1] < urange[0] || gid_lt(high, low)) {
low = make_kgid(&init_user_ns, 1);
high = make_kgid(&init_user_ns, 0);
}
@@ -228,8 +229,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
{
struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
struct tcp_fastopen_context *ctxt;
- int ret;
u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+ __le32 key[4];
+ int ret, i;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
@@ -238,11 +240,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
rcu_read_lock();
ctxt = rcu_dereference(tcp_fastopen_ctx);
if (ctxt)
- memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+ memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
else
- memset(user_key, 0, sizeof(user_key));
+ memset(key, 0, sizeof(key));
rcu_read_unlock();
+ for (i = 0; i < ARRAY_SIZE(key); i++)
+ user_key[i] = le32_to_cpu(key[i]);
+
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
user_key[0], user_key[1], user_key[2], user_key[3]);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -258,12 +263,16 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
* first invocation of tcp_fastopen_cookie_gen
*/
tcp_fastopen_init_key_once(false);
- tcp_fastopen_reset_cipher(user_key, TCP_FASTOPEN_KEY_LENGTH);
+
+ for (i = 0; i < ARRAY_SIZE(user_key); i++)
+ key[i] = cpu_to_le32(user_key[i]);
+
+ tcp_fastopen_reset_cipher(key, TCP_FASTOPEN_KEY_LENGTH);
}
bad_key:
pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
- user_key[0], user_key[1], user_key[2], user_key[3],
+ user_key[0], user_key[1], user_key[2], user_key[3],
(char *)tbl.data, ret);
kfree(tbl.data);
return ret;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0b556abd53a3da..119c452b0bfdce 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -384,7 +384,7 @@ void tcp_init_sock(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- __skb_queue_head_init(&tp->out_of_order_queue);
+ tp->out_of_order_queue = RB_ROOT;
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
INIT_LIST_HEAD(&tp->tsq_node);
@@ -1679,7 +1679,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
* shouldn't happen.
*/
if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
- "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+ "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
flags))
break;
@@ -1692,7 +1692,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK),
- "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+ "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
}
@@ -2260,7 +2260,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
tcp_write_queue_purge(sk);
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
inet->inet_dport = 0;
@@ -2273,7 +2273,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
- icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->packets_out = 0;
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 55d7da1d2ce991..62f90f6b7a9d70 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -55,7 +55,6 @@ struct dctcp {
u32 dctcp_alpha;
u32 next_seq;
u32 ce_state;
- u32 delayed_ack_reserved;
u32 loss_cwnd;
};
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
- ca->delayed_ack_reserved = 0;
ca->loss_cwnd = 0;
ca->ce_state = 0;
@@ -131,23 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- /* State has changed from CE=0 to CE=1 and delayed
- * ACK has not sent yet.
- */
- if (!ca->ce_state && ca->delayed_ack_reserved) {
- u32 tmp_rcv_nxt;
-
- /* Save current rcv_nxt. */
- tmp_rcv_nxt = tp->rcv_nxt;
-
- /* Generate previous ack with CE=0. */
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
- tp->rcv_nxt = ca->prior_rcv_nxt;
-
- tcp_send_ack(sk);
-
- /* Recover current rcv_nxt. */
- tp->rcv_nxt = tmp_rcv_nxt;
+ if (!ca->ce_state) {
+ /* State has changed from CE=0 to CE=1, force an immediate
+ * ACK to reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+ tcp_enter_quickack_mode(sk, 1);
}
ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- /* State has changed from CE=1 to CE=0 and delayed
- * ACK has not sent yet.
- */
- if (ca->ce_state && ca->delayed_ack_reserved) {
- u32 tmp_rcv_nxt;
-
- /* Save current rcv_nxt. */
- tmp_rcv_nxt = tp->rcv_nxt;
-
- /* Generate previous ack with CE=1. */
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
- tp->rcv_nxt = ca->prior_rcv_nxt;
-
- tcp_send_ack(sk);
-
- /* Recover current rcv_nxt. */
- tp->rcv_nxt = tmp_rcv_nxt;
+ if (ca->ce_state) {
+ /* State has changed from CE=1 to CE=0, force an immediate
+ * ACK to reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+ tcp_enter_quickack_mode(sk, 1);
}
ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -248,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
}
}
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
- struct dctcp *ca = inet_csk_ca(sk);
-
- switch (ev) {
- case CA_EVENT_DELAYED_ACK:
- if (!ca->delayed_ack_reserved)
- ca->delayed_ack_reserved = 1;
- break;
- case CA_EVENT_NON_DELAYED_ACK:
- if (ca->delayed_ack_reserved)
- ca->delayed_ack_reserved = 0;
- break;
- default:
- /* Don't care for the rest. */
- break;
- }
-}
-
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
switch (ev) {
@@ -276,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
case CA_EVENT_ECN_NO_CE:
dctcp_ce_state_1_to_0(sk);
break;
- case CA_EVENT_DELAYED_ACK:
- case CA_EVENT_NON_DELAYED_ACK:
- dctcp_update_ack_reserved(sk, ev);
- break;
default:
/* Don't care for the rest. */
break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8cd3e3f3d32dac..004bfeab80e589 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -176,24 +176,27 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
}
}
-static void tcp_incr_quickack(struct sock *sk)
+static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
{
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
if (quickacks == 0)
quickacks = 2;
+ quickacks = min(quickacks, max_quickacks);
if (quickacks > icsk->icsk_ack.quick)
- icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
+ icsk->icsk_ack.quick = quickacks;
}
-static void tcp_enter_quickack_mode(struct sock *sk)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- tcp_incr_quickack(sk);
+
+ tcp_incr_quickack(sk, max_quickacks);
icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
/* Send ACKs quickly, if "quick" count is not exhausted
* and the session is not interactive.
@@ -225,8 +228,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
-static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
@@ -234,31 +239,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
* it is probably a retransmit.
*/
if (tp->ecn_flags & TCP_ECN_SEEN)
- tcp_enter_quickack_mode((struct sock *)tp);
+ tcp_enter_quickack_mode(sk, 2);
break;
case INET_ECN_CE:
- if (tcp_ca_needs_ecn((struct sock *)tp))
- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
+ if (tcp_ca_needs_ecn(sk))
+ tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
/* Better not delay acks, sender can have a very low cwnd */
- tcp_enter_quickack_mode((struct sock *)tp);
+ tcp_enter_quickack_mode(sk, 2);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
}
tp->ecn_flags |= TCP_ECN_SEEN;
break;
default:
- if (tcp_ca_needs_ecn((struct sock *)tp))
- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
+ if (tcp_ca_needs_ecn(sk))
+ tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
tp->ecn_flags |= TCP_ECN_SEEN;
break;
}
}
-static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
+static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
{
- if (tp->ecn_flags & TCP_ECN_OK)
- __tcp_ecn_check_ce(tp, skb);
+ if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
+ __tcp_ecn_check_ce(sk, skb);
}
static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
@@ -557,8 +562,8 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ u32 copied;
int time;
- int copied;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
@@ -580,12 +585,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
if (sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
- int rcvwin, rcvmem, rcvbuf;
+ int rcvmem, rcvbuf;
+ u64 rcvwin;
/* minimal window to cope with packet losses, assuming
* steady state. Add some cushion because of small variations.
*/
- rcvwin = (copied << 1) + 16 * tp->advmss;
+ rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
/* If rate increased by 25%,
* assume slow start, rcvwin = 3 * copied
@@ -605,12 +611,13 @@ void tcp_rcv_space_adjust(struct sock *sk)
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
- rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]);
+ do_div(rcvwin, tp->advmss);
+ rcvbuf = min_t(u64, rcvwin * rcvmem, sysctl_tcp_rmem[2]);
if (rcvbuf > sk->sk_rcvbuf) {
sk->sk_rcvbuf = rcvbuf;
/* Make the window clamp follow along. */
- tp->window_clamp = rcvwin;
+ tp->window_clamp = tcp_win_from_space(rcvbuf);
}
}
tp->rcvq_space.space = copied;
@@ -648,7 +655,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
/* The _first_ data packet received, initialize
* delayed ACK engine.
*/
- tcp_incr_quickack(sk);
+ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
icsk->icsk_ack.ato = TCP_ATO_MIN;
} else {
int m = now - icsk->icsk_ack.lrcvtime;
@@ -664,13 +671,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
/* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly.
*/
- tcp_incr_quickack(sk);
+ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
sk_mem_reclaim(sk);
}
}
icsk->icsk_ack.lrcvtime = now;
- tcp_ecn_check_ce(tp, skb);
+ tcp_ecn_check_ce(sk, skb);
if (skb->len >= 128)
tcp_grow_window(sk, skb);
@@ -3216,6 +3223,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
+
+ /* If any of the cumulatively ACKed segments was
+ * retransmitted, non-SACK case cannot confirm that
+ * progress was due to original transmission due to
+ * lack of TCPCB_SACKED_ACKED bits even if some of
+ * the packets may have been never retransmitted.
+ */
+ if (flag & FLAG_RETRANS_DATA_ACKED)
+ flag &= ~FLAG_ORIG_SACK_ACKED;
} else {
int delta;
@@ -4057,7 +4073,7 @@ static void tcp_fin(struct sock *sk)
/* It _is_ possible, that we have something out-of-order _after_ FIN.
* Probably, we should reset in this case. For now drop them.
*/
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
@@ -4124,7 +4140,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
- tcp_enter_quickack_mode(sk);
+ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
@@ -4217,7 +4233,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
- if (skb_queue_empty(&tp->out_of_order_queue)) {
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
return;
}
@@ -4280,6 +4296,29 @@ static bool tcp_try_coalesce(struct sock *sk,
return true;
}
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+ /* In case tcp_drop() is called later, update to->gso_segs */
+ if (res) {
+ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+ max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ }
+ return res;
+}
+
+static void tcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ __kfree_skb(skb);
+}
+
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
@@ -4287,10 +4326,13 @@ static void tcp_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
+ bool fin, fragstolen, eaten;
struct sk_buff *skb, *tail;
- bool fragstolen, eaten;
+ struct rb_node *p;
- while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
+ p = rb_first(&tp->out_of_order_queue);
+ while (p) {
+ skb = rb_entry(p, struct sk_buff, rbnode);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
@@ -4300,11 +4342,12 @@ static void tcp_ofo_queue(struct sock *sk)
dsack_high = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &tp->out_of_order_queue);
- __skb_unlink(skb, &tp->out_of_order_queue);
- if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
+ if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
continue;
}
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
@@ -4314,12 +4357,19 @@ static void tcp_ofo_queue(struct sock *sk)
tail = skb_peek_tail(&sk->sk_receive_queue);
eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
+ fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
if (!eaten)
__skb_queue_tail(&sk->sk_receive_queue, skb);
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
- tcp_fin(sk);
- if (eaten)
+ else
kfree_skb_partial(skb, fragstolen);
+
+ if (unlikely(fin)) {
+ tcp_fin(sk);
+ /* tcp_fin() purges tp->out_of_order_queue,
+ * so we must end this loop right now.
+ */
+ break;
+ }
}
}
@@ -4349,14 +4399,16 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct rb_node **p, *q, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
+ bool fragstolen;
- tcp_ecn_check_ce(tp, skb);
+ tcp_ecn_check_ce(sk, skb);
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
@@ -4365,89 +4417,89 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
inet_csk_schedule_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
+ seq = TCP_SKB_CB(skb)->seq;
+ end_seq = TCP_SKB_CB(skb)->end_seq;
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
- tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
+ tp->rcv_nxt, seq, end_seq);
- skb1 = skb_peek_tail(&tp->out_of_order_queue);
- if (!skb1) {
+ p = &tp->out_of_order_queue.rb_node;
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
- tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
- tp->selective_acks[0].end_seq =
- TCP_SKB_CB(skb)->end_seq;
+ tp->selective_acks[0].start_seq = seq;
+ tp->selective_acks[0].end_seq = end_seq;
}
- __skb_queue_head(&tp->out_of_order_queue, skb);
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+ tp->ooo_last_skb = skb;
goto end;
}
- seq = TCP_SKB_CB(skb)->seq;
- end_seq = TCP_SKB_CB(skb)->end_seq;
-
- if (seq == TCP_SKB_CB(skb1)->end_seq) {
- bool fragstolen;
-
- if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- } else {
- tcp_grow_window(sk, skb);
- kfree_skb_partial(skb, fragstolen);
- skb = NULL;
+ /* In the typical case, we are adding an skb to the end of the list.
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+ skb, &fragstolen)) {
+coalesce_done:
+ tcp_grow_window(sk, skb);
+ kfree_skb_partial(skb, fragstolen);
+ skb = NULL;
+ goto add_sack;
+ }
+
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(seq, TCP_SKB_CB(skb1)->seq)) {
+ p = &parent->rb_left;
+ continue;
}
- if (!tp->rx_opt.num_sacks ||
- tp->selective_acks[0].end_seq != seq)
- goto add_sack;
-
- /* Common case: data arrive in order after hole. */
- tp->selective_acks[0].end_seq = end_seq;
- goto end;
- }
-
- /* Find place to insert this segment. */
- while (1) {
- if (!after(TCP_SKB_CB(skb1)->seq, seq))
- break;
- if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
- skb1 = NULL;
- break;
- }
- skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
- }
-
- /* Do skb overlap to previous one? */
- if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
- if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
- /* All the bits are present. Drop. */
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb);
- skb = NULL;
- tcp_dsack_set(sk, seq, end_seq);
- goto add_sack;
- }
- if (after(seq, TCP_SKB_CB(skb1)->seq)) {
- /* Partial overlap. */
- tcp_dsack_set(sk, seq,
- TCP_SKB_CB(skb1)->end_seq);
- } else {
- if (skb_queue_is_first(&tp->out_of_order_queue,
- skb1))
- skb1 = NULL;
- else
- skb1 = skb_queue_prev(
- &tp->out_of_order_queue,
- skb1);
+ if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
+ if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ tcp_drop(sk, skb);
+ skb = NULL;
+ tcp_dsack_set(sk, seq, end_seq);
+ goto add_sack;
+ }
+ if (after(seq, TCP_SKB_CB(skb1)->seq)) {
+ /* Partial overlap. */
+ tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &tp->out_of_order_queue);
+ tcp_dsack_extend(sk,
+ TCP_SKB_CB(skb1)->seq,
+ TCP_SKB_CB(skb1)->end_seq);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPOFOMERGE);
+ tcp_drop(sk, skb1);
+ goto merge_right;
+ }
+ } else if (tcp_ooo_try_coalesce(sk, skb1,
+ skb, &fragstolen)) {
+ goto coalesce_done;
}
+ p = &parent->rb_right;
}
- if (!skb1)
- __skb_queue_head(&tp->out_of_order_queue, skb);
- else
- __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
- /* And clean segments covered by new one as whole. */
- while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
- skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((q = rb_next(&skb->rbnode)) != NULL) {
+ skb1 = rb_entry(q, struct sk_buff, rbnode);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4455,12 +4507,15 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
end_seq);
break;
}
- __skb_unlink(skb1, &tp->out_of_order_queue);
+ rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb1);
+ tcp_drop(sk, skb1);
}
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!q)
+ tp->ooo_last_skb = skb;
add_sack:
if (tcp_is_sack(tp))
@@ -4542,12 +4597,13 @@ err:
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- int eaten = -1;
bool fragstolen = false;
+ int eaten = -1;
- if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
- goto drop;
-
+ if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
+ __kfree_skb(skb);
+ return;
+ }
skb_dst_drop(skb);
__skb_pull(skb, tcp_hdr(skb)->doff * 4);
@@ -4598,13 +4654,13 @@ queue_and_out:
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
tcp_fin(sk);
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
+ if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
- if (skb_queue_empty(&tp->out_of_order_queue))
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
inet_csk(sk)->icsk_ack.pingpong = 0;
}
@@ -4626,10 +4682,10 @@ queue_and_out:
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
- tcp_enter_quickack_mode(sk);
+ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_schedule_ack(sk);
drop:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return;
}
@@ -4637,8 +4693,6 @@ drop:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
goto out_of_window;
- tcp_enter_quickack_mode(sk);
-
if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
/* Partial packet, seq < rcv_next < end_seq */
SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
@@ -4658,48 +4712,76 @@ drop:
tcp_data_queue_ofo(sk, skb);
}
+static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ if (list)
+ return !skb_queue_is_last(list, skb) ? skb->next : NULL;
+
+ return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+}
+
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
- struct sk_buff_head *list)
+ struct sk_buff_head *list,
+ struct rb_root *root)
{
- struct sk_buff *next = NULL;
+ struct sk_buff *next = tcp_skb_next(skb, list);
- if (!skb_queue_is_last(list, skb))
- next = skb_queue_next(list, skb);
+ if (list)
+ __skb_unlink(skb, list);
+ else
+ rb_erase(&skb->rbnode, root);
- __skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next;
}
+/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
+static void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sk_buff *skb1;
+
+ while (*p) {
+ parent = *p;
+ skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, root);
+}
+
/* Collapse contiguous sequence of skbs head..tail with
* sequence numbers start..end.
*
- * If tail is NULL, this means until the end of the list.
+ * If tail is NULL, this means until the end of the queue.
*
* Segments with FIN/SYN are not collapsed (only because this
* simplifies code)
*/
static void
-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
- struct sk_buff *head, struct sk_buff *tail,
- u32 start, u32 end)
+tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
+ struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
{
- struct sk_buff *skb, *n;
+ struct sk_buff *skb = head, *n;
+ struct sk_buff_head tmp;
bool end_of_skbs;
/* First, check that queue is collapsible and find
- * the point where collapsing can be useful. */
- skb = head;
+ * the point where collapsing can be useful.
+ */
restart:
- end_of_skbs = true;
- skb_queue_walk_from_safe(list, skb, n) {
- if (skb == tail)
- break;
+ for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
+ n = tcp_skb_next(skb, list);
+
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb)
break;
goto restart;
@@ -4717,13 +4799,10 @@ restart:
break;
}
- if (!skb_queue_is_last(list, skb)) {
- struct sk_buff *next = skb_queue_next(list, skb);
- if (next != tail &&
- TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
- end_of_skbs = false;
- break;
- }
+ if (n && n != tail &&
+ TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
+ end_of_skbs = false;
+ break;
}
/* Decided to skip this, advance start seq. */
@@ -4733,17 +4812,22 @@ restart:
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
return;
+ __skb_queue_head_init(&tmp);
+
while (before(start, end)) {
int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
struct sk_buff *nskb;
nskb = alloc_skb(copy, GFP_ATOMIC);
if (!nskb)
- return;
+ break;
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
- __skb_queue_before(list, skb, nskb);
+ if (list)
+ __skb_queue_before(list, skb, nskb);
+ else
+ __skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
skb_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
@@ -4761,14 +4845,17 @@ restart:
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
- skb = tcp_collapse_one(sk, skb, list);
+ skb = tcp_collapse_one(sk, skb, list, root);
if (!skb ||
skb == tail ||
(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)))
- return;
+ goto end;
}
}
}
+end:
+ skb_queue_walk_safe(&tmp, skb, n)
+ tcp_rbtree_insert(root, skb);
}
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
@@ -4777,70 +4864,101 @@ restart:
static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
- struct sk_buff *head;
+ u32 range_truesize, sum_tiny = 0;
+ struct sk_buff *skb, *head;
+ struct rb_node *p;
u32 start, end;
- if (!skb)
+ p = rb_first(&tp->out_of_order_queue);
+ skb = rb_entry_safe(p, struct sk_buff, rbnode);
+new_range:
+ if (!skb) {
+ p = rb_last(&tp->out_of_order_queue);
+ /* Note: This is possible p is NULL here. We do not
+ * use rb_entry_safe(), as ooo_last_skb is valid only
+ * if rbtree is not empty.
+ */
+ tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
return;
-
+ }
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
- head = skb;
-
- for (;;) {
- struct sk_buff *next = NULL;
+ range_truesize = skb->truesize;
- if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
- next = skb_queue_next(&tp->out_of_order_queue, skb);
- skb = next;
+ for (head = skb;;) {
+ skb = tcp_skb_next(skb, NULL);
- /* Segment is terminated when we see gap or when
- * we are at the end of all the queue. */
+ /* Range is terminated when we see a gap or when
+ * we are at the queue end.
+ */
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, &tp->out_of_order_queue,
- head, skb, start, end);
- head = skb;
- if (!skb)
- break;
- /* Start new segment */
+ /* Do not attempt collapsing tiny skbs */
+ if (range_truesize != head->truesize ||
+ end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+ tcp_collapse(sk, NULL, &tp->out_of_order_queue,
+ head, skb, start, end);
+ } else {
+ sum_tiny += range_truesize;
+ if (sum_tiny > sk->sk_rcvbuf >> 3)
+ return;
+ }
+
+ goto new_range;
+ }
+
+ range_truesize += skb->truesize;
+ if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
start = TCP_SKB_CB(skb)->seq;
+ if (after(TCP_SKB_CB(skb)->end_seq, end))
end = TCP_SKB_CB(skb)->end_seq;
- } else {
- if (before(TCP_SKB_CB(skb)->seq, start))
- start = TCP_SKB_CB(skb)->seq;
- if (after(TCP_SKB_CB(skb)->end_seq, end))
- end = TCP_SKB_CB(skb)->end_seq;
- }
}
}
/*
* Purge the out-of-order queue.
+ * Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
* Return true if queue was pruned.
*/
static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
- bool res = false;
+ struct rb_node *node, *prev;
+ int goal;
- if (!skb_queue_empty(&tp->out_of_order_queue)) {
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
- __skb_queue_purge(&tp->out_of_order_queue);
+ if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
+ return false;
- /* Reset SACK state. A conforming SACK implementation will
- * do the same at a timeout based retransmit. When a connection
- * is in a sad state like this, we care only about integrity
- * of the connection not performance.
- */
- if (tp->rx_opt.sack_ok)
- tcp_sack_reset(&tp->rx_opt);
- sk_mem_reclaim(sk);
- res = true;
- }
- return res;
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ goal = sk->sk_rcvbuf >> 3;
+ node = &tp->ooo_last_skb->rbnode;
+ do {
+ prev = rb_prev(node);
+ rb_erase(node, &tp->out_of_order_queue);
+ goal -= rb_to_skb(node)->truesize;
+ __kfree_skb(rb_to_skb(node));
+ if (!prev || goal <= 0) {
+ sk_mem_reclaim(sk);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ !tcp_under_memory_pressure(sk))
+ break;
+ goal = sk->sk_rcvbuf >> 3;
+ }
+
+ node = prev;
+ } while (node);
+ tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+
+ /* Reset SACK state. A conforming SACK implementation will
+ * do the same at a timeout based retransmit. When a connection
+ * is in a sad state like this, we care only about integrity
+ * of the connection not performance.
+ */
+ if (tp->rx_opt.sack_ok)
+ tcp_sack_reset(&tp->rx_opt);
+
+ return true;
}
/* Reduce allocated memory if we can, trying to get
@@ -4863,9 +4981,12 @@ static int tcp_prune_queue(struct sock *sk)
else if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ return 0;
+
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
- tcp_collapse(sk, &sk->sk_receive_queue,
+ tcp_collapse(sk, &sk->sk_receive_queue, NULL,
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
@@ -4970,7 +5091,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
- (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
+ (ofo_possible && !RB_EMPTY_ROOT(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
@@ -5206,7 +5327,7 @@ syn_challenge:
return true;
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return false;
}
@@ -5424,7 +5545,7 @@ csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
EXPORT_SYMBOL(tcp_rcv_established);
@@ -5649,12 +5770,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- tcp_enter_quickack_mode(sk);
+ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
return 0;
} else {
tcp_send_ack(sk);
@@ -6011,7 +6132,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (!queued) {
discard:
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
}
return 0;
}
@@ -6282,7 +6403,13 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
af_ops->send_synack(fastopen_sk, dst, &fl, req,
&foc, false);
/* Add the child socket directly into the accept queue */
- inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+ reqsk_fastopen_remove(fastopen_sk, req, false);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+ reqsk_put(req);
+ goto drop;
+ }
sk->sk_data_ready(sk);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 1d804b9200f724..70ba2cc039016c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -471,14 +471,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (sock_owned_by_user(sk))
break;
+ skb = tcp_write_queue_head(sk);
+ if (WARN_ON_ONCE(!skb))
+ break;
+
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
- skb = tcp_write_queue_head(sk);
- BUG_ON(!skb);
-
remaining = icsk->icsk_rto -
min(icsk->icsk_rto,
tcp_time_stamp - tcp_skb_timestamp(skb));
@@ -1632,6 +1633,10 @@ process:
reqsk_put(req);
goto discard_it;
}
+ if (tcp_checksum_complete(skb)) {
+ reqsk_put(req);
+ goto csum_error;
+ }
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
@@ -1717,6 +1722,7 @@ discard_it:
return 0;
discard_and_relse:
+ sk_drops_add(sk, skb);
sock_put(sk);
goto discard_it;
@@ -1830,7 +1836,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
- __skb_queue_purge(&tp->out_of_order_queue);
+ skb_rbtree_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9f2ada8fe5de0d..4dd23f10d5c5f0 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -200,8 +200,9 @@ kill:
inet_twsk_deschedule_put(tw);
return TCP_TW_SUCCESS;
}
+ } else {
+ inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
}
- inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
if (tmp_opt.saw_tstamp) {
tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
@@ -495,7 +496,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->snd_cwnd_cnt = 0;
tcp_init_xmit_timers(newsk);
- __skb_queue_head_init(&newtp->out_of_order_queue);
newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
newtp->rx_opt.saw_tstamp = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8e0ae6374ae305..0a4a258c8efdfa 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -177,8 +177,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+ u32 rcv_nxt)
{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (unlikely(rcv_nxt != tp->rcv_nxt))
+ return; /* Special ACK sent by DCTCP to reflect ECN */
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@@ -909,8 +914,8 @@ out:
* We are working here with either a clone of the original
* SKB, or a fresh unique copy made by the retransmit engine.
*/
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
- gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
@@ -970,7 +975,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->source = inet->inet_sport;
th->dest = inet->inet_dport;
th->seq = htonl(tcb->seq);
- th->ack_seq = htonl(tp->rcv_nxt);
+ th->ack_seq = htonl(rcv_nxt);
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->tcp_flags);
@@ -1013,7 +1018,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
- tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+ tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
if (skb->len != tcp_header_size)
tcp_event_data_sent(tp, sk);
@@ -1044,6 +1049,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
return net_xmit_eval(err);
}
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ gfp_t gfp_mask)
+{
+ return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+ tcp_sk(sk)->rcv_nxt);
+}
+
/* This routine just queues the buffer for sending.
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -2271,14 +2283,18 @@ void tcp_send_loss_probe(struct sock *sk)
skb = tcp_write_queue_tail(sk);
}
+ if (unlikely(!skb)) {
+ WARN_ONCE(tp->packets_out,
+ "invalid inflight: %u state %u cwnd %u mss %d\n",
+ tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
+ inet_csk(sk)->icsk_pending = 0;
+ return;
+ }
+
/* At most one outstanding TLP retransmission. */
if (tp->tlp_high_seq)
goto rearm_timer;
- /* Retransmit last segment. */
- if (WARN_ON(!skb))
- goto rearm_timer;
-
if (skb_still_in_host_queue(sk, skb))
goto rearm_timer;
@@ -3309,8 +3325,6 @@ void tcp_send_delayed_ack(struct sock *sk)
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
- tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
@@ -3359,7 +3373,7 @@ void tcp_send_delayed_ack(struct sock *sk)
}
/* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
{
struct sk_buff *buff;
@@ -3367,8 +3381,6 @@ void tcp_send_ack(struct sock *sk)
if (sk->sk_state == TCP_CLOSE)
return;
- tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
/* We are not putting this on the write queue, so
* tcp_transmit_skb() will set the ownership to this
* sock.
@@ -3396,9 +3408,14 @@ void tcp_send_ack(struct sock *sk)
/* Send it off, this clears delayed acks for us. */
skb_mstamp_get(&buff->skb_mstamp);
- tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
+ __tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC), rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+ __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
}
-EXPORT_SYMBOL_GPL(tcp_send_ack);
/* This routine sends a packet with an out of date sequence
* number. It assumes the other end will try to ack it.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4c61b3ecfb6e91..1fa06f8972282f 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1464,7 +1464,7 @@ static void udp_v4_rehash(struct sock *sk)
udp_lib_rehash(sk, new_hash);
}
-static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index 7e0fe4bdd96702..feb50a16398dfa 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
int flags);
-int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udp_destroy_sock(struct sock *sk);
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 3b3efbda48e139..78766b32b78bd1 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -50,7 +50,7 @@ struct proto udplite_prot = {
.sendmsg = udp_sendmsg,
.recvmsg = udp_recvmsg,
.sendpage = udp_sendpage,
- .backlog_rcv = udp_queue_rcv_skb,
+ .backlog_rcv = __udp_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v4_get_port,
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 851d5c9e3eccfb..0f50248bad1712 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -69,6 +69,7 @@ config INET6_ESP
select CRYPTO_CBC
select CRYPTO_SHA1
select CRYPTO_DES
+ select CRYPTO_ECHAINIV
---help---
Support for IPsec ESP.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index bbc6abf17f9119..0ffd555151be66 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1070,7 +1070,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa == ifp)
continue;
- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+ if (ifa->prefix_len != ifp->prefix_len ||
+ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
ifp->prefix_len))
continue;
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
@@ -3841,7 +3842,6 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
p++;
continue;
}
- state->offset++;
return ifa;
}
@@ -3865,13 +3865,12 @@ static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
@@ -4496,8 +4495,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
/* unicast address incl. temp addr */
list_for_each_entry(ifa, &idev->addr_list, if_list) {
- if (++ip_idx < s_ip_idx)
- continue;
+ if (ip_idx < s_ip_idx)
+ goto next;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
@@ -4506,6 +4505,8 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
if (err < 0)
break;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+next:
+ ip_idx++;
}
break;
}
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6bd2de481abc6c..4812644de90ef1 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -296,6 +296,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* Check if the address belongs to the host. */
if (addr_type == IPV6_ADDR_MAPPED) {
+ struct net_device *dev = NULL;
int chk_addr_ret;
/* Binding to v4-mapped address on a v6-only socket
@@ -306,9 +307,20 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
+ rcu_read_lock();
+ if (sk->sk_bound_dev_if) {
+ dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
+ if (!dev) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+ }
+
/* Reproduce AF_INET checks to make the bindings consistent */
v4addr = addr->sin6_addr.s6_addr32[3];
- chk_addr_ret = inet_addr_type(net, v4addr);
+ chk_addr_ret = inet_addr_type_dev_table(net, dev, v4addr);
+ rcu_read_unlock();
+
if (!net->ipv4.sysctl_ip_nonlocal_bind &&
!(inet->freebind || inet->transparent) &&
v4addr != htonl(INADDR_ANY) &&
@@ -337,6 +349,9 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = -EINVAL;
goto out_unlock;
}
+ }
+
+ if (sk->sk_bound_dev_if) {
dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
if (!dev) {
err = -ENODEV;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 56528e9f3e0175..54fc094095324a 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -291,6 +291,7 @@ void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
iph->daddr = fl6->daddr;
+ ip6_flow_hdr(iph, 0, 0);
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
@@ -658,14 +659,15 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
- __be16 *ports = (__be16 *) skb_transport_header(skb);
+ __be16 _ports[2], *ports;
- if (skb_transport_offset(skb) + 4 <= skb->len) {
+ ports = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_ports), &_ports);
+ if (ports) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
-
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
sin6.sin6_port = ports[1];
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 9e2ea4ae840d75..244b9fec9d4d7b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -118,6 +118,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
skb->network_header = (u8 *)ipv6h - skb->head;
+ skb_reset_mac_len(skb);
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 74786783834b71..f8cca81d66f27e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -169,39 +169,37 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
+ unsigned int head_room;
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
int hlimit = -1;
u32 mtu;
- if (opt) {
- unsigned int head_room;
+ head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
+ if (opt)
+ head_room += opt->opt_nflen + opt->opt_flen;
- /* First: exthdrs may take lots of space (~8K for now)
- MAX_HEADER is not enough.
- */
- head_room = opt->opt_nflen + opt->opt_flen;
- seg_len += head_room;
- head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
-
- if (skb_headroom(skb) < head_room) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
- if (!skb2) {
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTDISCARDS);
- kfree_skb(skb);
- return -ENOBUFS;
- }
- consume_skb(skb);
- skb = skb2;
- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
- * it is safe to call in our context (socket lock not held)
- */
- skb_set_owner_w(skb, (struct sock *)sk);
+ if (unlikely(skb_headroom(skb) < head_room)) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
+ if (!skb2) {
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTDISCARDS);
+ kfree_skb(skb);
+ return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ consume_skb(skb);
+ skb = skb2;
+ }
+
+ if (opt) {
+ seg_len += opt->opt_nflen + opt->opt_flen;
+
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
+
if (opt->opt_nflen)
ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
}
@@ -559,6 +557,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->dev = from->dev;
to->mark = from->mark;
+ skb_copy_hash(to, from);
+
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3c2468bd0b7cb4..8d55abb1a6899f 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1096,7 +1096,7 @@ static inline int
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
@@ -1104,6 +1104,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = ACCESS_ONCE(t->parms.proto);
@@ -1142,7 +1147,7 @@ static inline int
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1151,6 +1156,10 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index 14dacf1df529d9..30b03d8e321a0c 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -15,7 +15,7 @@
int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
struct socket **sockp)
{
- struct sockaddr_in6 udp6_addr;
+ struct sockaddr_in6 udp6_addr = {};
int err;
struct socket *sock = NULL;
@@ -42,6 +42,7 @@ int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
goto error;
if (cfg->peer_udp_port) {
+ memset(&udp6_addr, 0, sizeof(udp6_addr));
udp6_addr.sin6_family = AF_INET6;
memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
sizeof(udp6_addr.sin6_addr));
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index b8bf123f7f79b3..5dd544c5cfe212 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -469,12 +469,8 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
goto tx_err_dst_release;
}
- skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
- skb_dst_set(skb, dst);
- skb->dev = skb_dst(skb)->dev;
-
mtu = dst_mtu(dst);
- if (!skb->ignore_df && skb->len > mtu) {
+ if (skb->len > mtu) {
skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
if (skb->protocol == htons(ETH_P_IPV6)) {
@@ -487,9 +483,14 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
htonl(mtu));
}
- return -EMSGSIZE;
+ err = -EMSGSIZE;
+ goto tx_err_dst_release;
}
+ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+ skb_dst_set(skb, dst);
+ skb->dev = skb_dst(skb)->dev;
+
err = dst_output(t->net, skb->sk, skb);
if (net_xmit_eval(err) == 0) {
struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -1140,6 +1141,33 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
.priority = 100,
};
+static bool is_vti6_tunnel(const struct net_device *dev)
+{
+ return dev->netdev_ops == &vti6_netdev_ops;
+}
+
+static int vti6_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ip6_tnl *t = netdev_priv(dev);
+
+ if (!is_vti6_tunnel(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_DOWN:
+ if (!net_eq(t->net, dev_net(dev)))
+ xfrm_garbage_collect(t->net);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block vti6_notifier_block __read_mostly = {
+ .notifier_call = vti6_device_event,
+};
+
/**
* vti6_tunnel_init - register protocol and reserve needed resources
*
@@ -1150,6 +1178,8 @@ static int __init vti6_tunnel_init(void)
const char *msg;
int err;
+ register_netdevice_notifier(&vti6_notifier_block);
+
msg = "tunnel device";
err = register_pernet_device(&vti6_net_ops);
if (err < 0)
@@ -1182,6 +1212,7 @@ xfrm_proto_ah_failed:
xfrm_proto_esp_failed:
unregister_pernet_device(&vti6_net_ops);
pernet_dev_failed:
+ unregister_netdevice_notifier(&vti6_notifier_block);
pr_err("vti6 init: failed to register %s\n", msg);
return err;
}
@@ -1196,6 +1227,7 @@ static void __exit vti6_tunnel_cleanup(void)
xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti6_net_ops);
+ unregister_netdevice_notifier(&vti6_notifier_block);
}
module_init(vti6_tunnel_init);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index e5846d1f9b558d..e348a140e540c6 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -72,6 +72,8 @@ struct mr6_table {
#endif
};
+#include <linux/nospec.h>
+
struct ip6mr_rule {
struct fib_rule common;
};
@@ -1787,7 +1789,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
ret = 0;
if (!ip6mr_new_table(net, v))
ret = -ENOMEM;
- raw6_sk(sk)->ip6mr_table = v;
+ else
+ raw6_sk(sk)->ip6mr_table = v;
rtnl_unlock();
return ret;
}
@@ -1870,6 +1873,7 @@ int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
@@ -1944,6 +1948,7 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
return -EFAULT;
if (vr.mifi >= mrt->maxvif)
return -EINVAL;
+ vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
read_lock(&mrt_lock);
vif = &mrt->vif6_table[vr.mifi];
if (MIF_EXISTS(mrt, vr.mifi)) {
@@ -1985,10 +1990,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTFORWDATAGRAMS);
- IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTOCTETS, skb->len);
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 06640685ff4332..a5ec9a0cbb80a4 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2061,7 +2061,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
mld_send_initial_cr(idev);
idev->mc_dad_count--;
if (idev->mc_dad_count)
- mld_dad_start_timer(idev, idev->mc_maxdelay);
+ mld_dad_start_timer(idev,
+ unsolicited_report_interval(idev));
}
}
@@ -2073,7 +2074,8 @@ static void mld_dad_timer_expire(unsigned long data)
if (idev->mc_dad_count) {
idev->mc_dad_count--;
if (idev->mc_dad_count)
- mld_dad_start_timer(idev, idev->mc_maxdelay);
+ mld_dad_start_timer(idev,
+ unsolicited_report_interval(idev));
}
in6_dev_put(idev);
}
@@ -2388,17 +2390,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
{
int err;
- /* callers have the socket lock and rtnl lock
- * so no other readers or writers of iml or its sflist
- */
+ write_lock_bh(&iml->sflock);
if (!iml->sflist) {
/* any-source empty exclude case */
- return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
+ } else {
+ err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
+ iml->sflist->sl_count, iml->sflist->sl_addr, 0);
+ sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
+ iml->sflist = NULL;
}
- err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
- iml->sflist->sl_count, iml->sflist->sl_addr, 0);
- sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
- iml->sflist = NULL;
+ write_unlock_bh(&iml->sflock);
return err;
}
@@ -2431,7 +2433,8 @@ static void mld_ifc_timer_expire(unsigned long data)
if (idev->mc_ifc_count) {
idev->mc_ifc_count--;
if (idev->mc_ifc_count)
- mld_ifc_start_timer(idev, idev->mc_maxdelay);
+ mld_ifc_start_timer(idev,
+ unsolicited_report_interval(idev));
}
in6_dev_put(idev);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index dfe55e7ef07d04..e03bb5a6b8d4c5 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1651,10 +1651,9 @@ int ndisc_rcv(struct sk_buff *skb)
return 0;
}
- memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
-
switch (msg->icmph.icmp6_type) {
case NDISC_NEIGHBOUR_SOLICITATION:
+ memset(NEIGH_CB(skb), 0, sizeof(struct neighbour_cb));
ndisc_recv_ns(skb);
break;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 795c343347ecee..f563cf3fcc4c3f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -676,6 +676,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
return -ENOMEM;
j = 0;
+ memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ipv6;
@@ -1181,6 +1182,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET6, get.name);
if (!IS_ERR_OR_NULL(t)) {
@@ -1799,6 +1801,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
*len, sizeof(get) + get.size);
return -EINVAL;
}
+ get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET6);
t = xt_find_table_lock(net, AF_INET6, get.name);
@@ -2072,6 +2075,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
.checkentry = icmp6_checkentry,
.proto = IPPROTO_ICMPV6,
.family = NFPROTO_IPV6,
+ .me = THIS_MODULE,
},
};
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index eb2dc39f7066f2..664c84e47bab7a 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -64,7 +64,6 @@ struct nf_ct_frag6_skb_cb
static struct inet_frags nf_frags;
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
@@ -77,18 +76,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
.procname = "nf_conntrack_frag6_low_thresh",
.data = &init_net.nf_frag.frags.low_thresh,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.nf_frag.frags.high_thresh
},
{
.procname = "nf_conntrack_frag6_high_thresh",
.data = &init_net.nf_frag.frags.high_thresh,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.nf_frag.frags.low_thresh
},
{ }
@@ -118,7 +116,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
if (hdr == NULL)
goto err_reg;
- net->nf_frag.sysctl.frags_hdr = hdr;
+ net->nf_frag_frags_hdr = hdr;
return 0;
err_reg:
@@ -132,8 +130,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
- table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
- unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+ table = net->nf_frag_frags_hdr->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf_frag_frags_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
@@ -153,23 +151,6 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
}
-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr)
-{
- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
- (__force u32)id, nf_frags.rnd);
-}
-
-
-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
-{
- const struct frag_queue *nq;
-
- nq = container_of(q, struct frag_queue, q);
- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
-}
-
static void nf_skb_free(struct sk_buff *skb)
{
if (NFCT_FRAG6_CB(skb)->orig)
@@ -184,34 +165,26 @@ static void nf_ct_frag6_expire(unsigned long data)
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, nf_frag.frags);
- ip6_expire_frag_queue(net, fq, &nf_frags);
+ ip6_expire_frag_queue(net, fq);
}
/* Creation primitives. */
-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
- u32 user, struct in6_addr *src,
- struct in6_addr *dst, int iif, u8 ecn)
+static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+ const struct ipv6hdr *hdr, int iif)
{
+ struct frag_v6_compare_key key = {
+ .id = id,
+ .saddr = hdr->saddr,
+ .daddr = hdr->daddr,
+ .user = user,
+ .iif = iif,
+ };
struct inet_frag_queue *q;
- struct ip6_create_arg arg;
- unsigned int hash;
-
- arg.id = id;
- arg.user = user;
- arg.src = src;
- arg.dst = dst;
- arg.iif = iif;
- arg.ecn = ecn;
-
- local_bh_disable();
- hash = nf_hash_frag(id, src, dst);
-
- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
- local_bh_enable();
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+
+ q = inet_frag_find(&net->nf_frag.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct frag_queue, q);
}
@@ -362,7 +335,7 @@ found:
return 0;
discard_fq:
- inet_frag_kill(&fq->q, &nf_frags);
+ inet_frag_kill(&fq->q);
err:
return -1;
}
@@ -383,7 +356,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
int payload_len;
u8 ecn;
- inet_frag_kill(&fq->q, &nf_frags);
+ inet_frag_kill(&fq->q);
WARN_ON(head == NULL);
WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -454,6 +427,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
+ fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);
@@ -472,6 +446,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
head->csum);
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
/* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
@@ -601,8 +576,13 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
hdr = ipv6_hdr(clone);
fhdr = (struct frag_hdr *)skb_transport_header(clone);
- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (clone->len - skb_network_offset(clone) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ goto ret_orig;
+
+ skb_orphan(skb);
+ fq = fq_find(net, fhdr->identification, user, hdr,
+ skb->dev ? skb->dev->ifindex : 0);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
goto ret_orig;
@@ -613,7 +593,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
spin_unlock_bh(&fq->q.lock);
pr_debug("Can't insert skb to queue\n");
- inet_frag_put(&fq->q, &nf_frags);
+ inet_frag_put(&fq->q);
goto ret_orig;
}
@@ -625,7 +605,7 @@ struct sk_buff *nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 use
}
spin_unlock_bh(&fq->q.lock);
- inet_frag_put(&fq->q, &nf_frags);
+ inet_frag_put(&fq->q);
return ret_skb;
ret_orig:
@@ -649,18 +629,26 @@ EXPORT_SYMBOL_GPL(nf_ct_frag6_consume_orig);
static int nf_ct_net_init(struct net *net)
{
+ int res;
+
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
- inet_frags_init_net(&net->nf_frag.frags);
-
- return nf_ct_frag6_sysctl_register(net);
+ net->nf_frag.frags.f = &nf_frags;
+
+ res = inet_frags_init_net(&net->nf_frag.frags);
+ if (res < 0)
+ return res;
+ res = nf_ct_frag6_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->nf_frag.frags);
+ return res;
}
static void nf_ct_net_exit(struct net *net)
{
nf_ct_frags6_sysctl_unregister(net);
- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
+ inet_frags_exit_net(&net->nf_frag.frags);
}
static struct pernet_operations nf_ct_net_ops = {
@@ -672,14 +660,13 @@ int nf_ct_frag6_init(void)
{
int ret = 0;
- nf_frags.hashfn = nf_hashfn;
nf_frags.constructor = ip6_frag_init;
nf_frags.destructor = NULL;
nf_frags.skb_free = nf_skb_free;
nf_frags.qsize = sizeof(struct frag_queue);
- nf_frags.match = ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.frags_cache_name = nf_frags_cache_name;
+ nf_frags.rhash_params = ip6_rhash_params;
ret = inet_frags_init(&nf_frags);
if (ret)
goto out;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 679253d0af8427..73e766e7bc37b9 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -33,7 +33,6 @@
static int sockstat6_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
- unsigned int frag_mem = ip6_frag_mem(net);
seq_printf(seq, "TCP6: inuse %d\n",
sock_prot_inuse_get(net, &tcpv6_prot));
@@ -43,7 +42,9 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
sock_prot_inuse_get(net, &udplitev6_prot));
seq_printf(seq, "RAW6: inuse %d\n",
sock_prot_inuse_get(net, &rawv6_prot));
- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
+ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
+ atomic_read(&net->ipv6.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv6.frags));
return 0;
}
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 58f2139ebb5e5c..ec917f58d10506 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -79,94 +79,58 @@ static struct inet_frags ip6_frags;
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct net_device *dev);
-/*
- * callers should be careful not to use the hash value outside the ipfrag_lock
- * as doing so could race with ipfrag_hash_rnd being recalculated.
- */
-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr)
-{
- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
- (__force u32)id, ip6_frags.rnd);
-}
-
-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
-{
- const struct frag_queue *fq;
-
- fq = container_of(q, struct frag_queue, q);
- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
-}
-
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct frag_queue *fq;
- const struct ip6_create_arg *arg = a;
-
- fq = container_of(q, struct frag_queue, q);
- return fq->id == arg->id &&
- fq->user == arg->user &&
- ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst) &&
- (arg->iif == fq->iif ||
- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
- IPV6_ADDR_LINKLOCAL)));
-}
-EXPORT_SYMBOL(ip6_frag_match);
-
void ip6_frag_init(struct inet_frag_queue *q, const void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
- const struct ip6_create_arg *arg = a;
+ const struct frag_v6_compare_key *key = a;
- fq->id = arg->id;
- fq->user = arg->user;
- fq->saddr = *arg->src;
- fq->daddr = *arg->dst;
- fq->ecn = arg->ecn;
+ q->key.v6 = *key;
+ fq->ecn = 0;
}
EXPORT_SYMBOL(ip6_frag_init);
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
- struct inet_frags *frags)
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
+ struct sk_buff *head;
+ rcu_read_lock();
spin_lock(&fq->q.lock);
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
- inet_frag_kill(&fq->q, frags);
+ inet_frag_kill(&fq->q);
- rcu_read_lock();
dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
- goto out_rcu_unlock;
+ goto out;
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
-
- if (inet_frag_evicting(&fq->q))
- goto out_rcu_unlock;
-
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
- goto out_rcu_unlock;
+ head = fq->q.fragments;
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ goto out;
/* But use as source device on which LAST ARRIVED
* segment was received. And do not use fq->dev
* pointer directly, device might already disappeared.
*/
- fq->q.fragments->dev = dev;
- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
-out_rcu_unlock:
- rcu_read_unlock();
+ head->dev = dev;
+ skb_get(head);
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
out:
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, frags);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
}
EXPORT_SYMBOL(ip6_expire_frag_queue);
@@ -178,31 +142,29 @@ static void ip6_frag_expire(unsigned long data)
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, ipv6.frags);
- ip6_expire_frag_queue(net, fq, &ip6_frags);
+ ip6_expire_frag_queue(net, fq);
}
static struct frag_queue *
-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
- const struct in6_addr *dst, int iif, u8 ecn)
+fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
{
+ struct frag_v6_compare_key key = {
+ .id = id,
+ .saddr = hdr->saddr,
+ .daddr = hdr->daddr,
+ .user = IP6_DEFRAG_LOCAL_DELIVER,
+ .iif = iif,
+ };
struct inet_frag_queue *q;
- struct ip6_create_arg arg;
- unsigned int hash;
- arg.id = id;
- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
- arg.src = src;
- arg.dst = dst;
- arg.iif = iif;
- arg.ecn = ecn;
+ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)))
+ key.iif = 0;
- hash = inet6_hash_frag(id, src, dst);
-
- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&net->ipv6.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct frag_queue, q);
}
@@ -359,7 +321,7 @@ found:
return -1;
discard_fq:
- inet_frag_kill(&fq->q, &ip6_frags);
+ inet_frag_kill(&fq->q);
err:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
@@ -386,7 +348,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
int sum_truesize;
u8 ecn;
- inet_frag_kill(&fq->q, &ip6_frags);
+ inet_frag_kill(&fq->q);
ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff))
@@ -503,6 +465,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
return 1;
@@ -524,6 +487,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
struct frag_queue *fq;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct net *net = dev_net(skb_dst(skb)->dev);
+ int iif;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
goto fail_hdr;
@@ -552,17 +516,22 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return 1;
}
- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ goto fail_hdr;
+
+ iif = skb->dev ? skb->dev->ifindex : 0;
+ fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
int ret;
spin_lock(&fq->q.lock);
+ fq->iif = iif;
ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &ip6_frags);
+ inet_frag_put(&fq->q);
return ret;
}
@@ -583,24 +552,22 @@ static const struct inet6_protocol frag_protocol = {
};
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table ip6_frags_ns_ctl_table[] = {
{
.procname = "ip6frag_high_thresh",
.data = &init_net.ipv6.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ipv6.frags.low_thresh
},
{
.procname = "ip6frag_low_thresh",
.data = &init_net.ipv6.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ipv6.frags.high_thresh
},
{
@@ -708,19 +675,27 @@ static void ip6_frags_sysctl_unregister(void)
static int __net_init ipv6_frags_init_net(struct net *net)
{
+ int res;
+
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+ net->ipv6.frags.f = &ip6_frags;
- inet_frags_init_net(&net->ipv6.frags);
+ res = inet_frags_init_net(&net->ipv6.frags);
+ if (res < 0)
+ return res;
- return ip6_frags_ns_sysctl_register(net);
+ res = ip6_frags_ns_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->ipv6.frags);
+ return res;
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
{
ip6_frags_ns_sysctl_unregister(net);
- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
+ inet_frags_exit_net(&net->ipv6.frags);
}
static struct pernet_operations ip6_frags_ops = {
@@ -728,14 +703,55 @@ static struct pernet_operations ip6_frags_ops = {
.exit = ipv6_frags_exit_net,
};
+static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+const struct rhashtable_params ip6_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = ip6_key_hashfn,
+ .obj_hashfn = ip6_obj_hashfn,
+ .obj_cmpfn = ip6_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+EXPORT_SYMBOL(ip6_rhash_params);
+
int __init ipv6_frag_init(void)
{
int ret;
- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+ ip6_frags.constructor = ip6_frag_init;
+ ip6_frags.destructor = NULL;
+ ip6_frags.qsize = sizeof(struct frag_queue);
+ ip6_frags.frag_expire = ip6_frag_expire;
+ ip6_frags.frags_cache_name = ip6_frag_cache_name;
+ ip6_frags.rhash_params = ip6_rhash_params;
+ ret = inet_frags_init(&ip6_frags);
if (ret)
goto out;
+ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+ if (ret)
+ goto err_protocol;
+
ret = ip6_frags_sysctl_register();
if (ret)
goto err_sysctl;
@@ -744,17 +760,6 @@ int __init ipv6_frag_init(void)
if (ret)
goto err_pernet;
- ip6_frags.hashfn = ip6_hashfn;
- ip6_frags.constructor = ip6_frag_init;
- ip6_frags.destructor = NULL;
- ip6_frags.skb_free = NULL;
- ip6_frags.qsize = sizeof(struct frag_queue);
- ip6_frags.match = ip6_frag_match;
- ip6_frags.frag_expire = ip6_frag_expire;
- ip6_frags.frags_cache_name = ip6_frag_cache_name;
- ret = inet_frags_init(&ip6_frags);
- if (ret)
- goto err_pernet;
out:
return ret;
@@ -762,6 +767,8 @@ err_pernet:
ip6_frags_sysctl_unregister();
err_sysctl:
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+err_protocol:
+ inet_frags_fini(&ip6_frags);
goto out;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0a7403b9d572e6..9de10cecc91951 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1417,8 +1417,12 @@ EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
{
- ip6_update_pmtu(skb, sock_net(sk), mtu,
- sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
+ int oif = sk->sk_bound_dev_if;
+
+ if (!oif && skb->dev)
+ oif = l3mdev_master_ifindex(skb->dev);
+
+ ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
@@ -3083,7 +3087,7 @@ static int rt6_fill_node(struct net *net,
table = rt->rt6i_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
- rtm->rtm_table = table;
+ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table))
goto nla_put_failure;
if (rt->rt6i_flags & RTF_REJECT) {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index dec4e7bda5f372..96582ec9c80700 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -577,7 +577,7 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
goto out;
err = 0;
- if (!ipip6_err_gen_icmpv6_unreach(skb))
+ if (__in6_dev_get(skb->dev) && !ipip6_err_gen_icmpv6_unreach(skb))
goto out;
if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
@@ -692,7 +692,6 @@ static int ipip6_rcv(struct sk_buff *skb)
if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
goto out;
- iph = ip_hdr(skb);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
@@ -773,8 +772,9 @@ static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
pbw0 = tunnel->ip6rd.prefixlen >> 5;
pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
- tunnel->ip6rd.relay_prefixlen;
+ d = tunnel->ip6rd.relay_prefixlen < 32 ?
+ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+ tunnel->ip6rd.relay_prefixlen : 0;
pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
if (pbi1 > 0)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 2fc2b2c074c79e..6fa69233e8b483 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1049,11 +1049,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
- newnp->mcast_oif = tcp_v6_iif(skb);
- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+ newnp->mcast_oif = inet_iif(skb);
+ newnp->mcast_hops = ip_hdr(skb)->ttl;
+ newnp->rcv_flowinfo = 0;
if (np->repflow)
- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+ newnp->flow_label = 0;
/*
* No need to charge this sock to the relevant IPv6 refcnt debug socks count
@@ -1421,6 +1421,10 @@ process:
reqsk_put(req);
goto discard_it;
}
+ if (tcp_checksum_complete(skb)) {
+ reqsk_put(req);
+ goto csum_error;
+ }
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
@@ -1507,6 +1511,7 @@ discard_it:
return 0;
discard_and_relse:
+ sk_drops_add(sk, skb);
sock_put(sk);
goto discard_it;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 3b0e22d20e8384..0f2a36ead33904 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -585,7 +585,7 @@ out:
sock_put(sk);
}
-static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index 0682c031ccdc77..3c1dbc9f74cf79 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
int flags, int *addr_len);
-int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
void udpv6_destroy_sock(struct sock *sk);
void udp_v6_clear_sk(struct sock *sk, int size);
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 9cf097e206e931..d1eaeeaa34d2c0 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -45,7 +45,7 @@ struct proto udplitev6_prot = {
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
- .backlog_rcv = udpv6_queue_rcv_skb,
+ .backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.get_port = udp_v6_get_port,
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 4d09ce6fa90e66..64862c5084ee6a 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -165,9 +165,11 @@ static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
if (toobig && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
} else if (!skb->ignore_df && toobig && skb->sk) {
xfrm_local_error(skb, mtu);
+ kfree_skb(skb);
return -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 1a8608cc104cb1..4d0c7115f78e0a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,7 +124,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
- u16 offset = sizeof(*hdr);
+ u32 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
u16 nhoff = IP6CB(skb)->nhoff;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 5743044cd660b8..56b72cada346ff 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -144,6 +144,9 @@ static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
+
+ if (spi == XFRM6_TUNNEL_SPI_MAX)
+ break;
}
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 4a116d766c159f..7cc9db38e1b629 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -774,6 +774,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
lock_sock(sk);
+
+ /* Ensure that the socket is not already bound */
+ if (self->ias_obj) {
+ err = -EINVAL;
+ goto out;
+ }
+
#ifdef CONFIG_IRDA_ULTRA
/* Special care for Ultra sockets */
if ((sk->sk_type == SOCK_DGRAM) &&
@@ -2020,7 +2027,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
err = -EINVAL;
goto out;
}
- irias_insert_object(ias_obj);
+
+ /* Only insert newly allocated objects */
+ if (free_ias)
+ irias_insert_object(ias_obj);
+
kfree(ias_opt);
break;
case IRLMP_IAS_DEL:
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index aeffb65181f50a..5984cc35d50882 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -705,10 +705,8 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
char uid[9];
/* Verify the input sockaddr */
- if (!addr || addr->sa_family != AF_IUCV)
- return -EINVAL;
-
- if (addr_len < sizeof(struct sockaddr_iucv))
+ if (addr_len < sizeof(struct sockaddr_iucv) ||
+ addr->sa_family != AF_IUCV)
return -EINVAL;
lock_sock(sk);
@@ -852,7 +850,7 @@ static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
struct iucv_sock *iucv = iucv_sk(sk);
int err;
- if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
+ if (alen < sizeof(struct sockaddr_iucv) || addr->sa_family != AF_IUCV)
return -EINVAL;
if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 15150b412930bb..3ba903ff2bb083 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -437,6 +437,24 @@ static int verify_address_len(const void *p)
return 0;
}
+static inline int sadb_key_len(const struct sadb_key *key)
+{
+ int key_bytes = DIV_ROUND_UP(key->sadb_key_bits, 8);
+
+ return DIV_ROUND_UP(sizeof(struct sadb_key) + key_bytes,
+ sizeof(uint64_t));
+}
+
+static int verify_key_len(const void *p)
+{
+ const struct sadb_key *key = p;
+
+ if (sadb_key_len(key) > key->sadb_key_len)
+ return -EINVAL;
+
+ return 0;
+}
+
static inline int pfkey_sec_ctx_len(const struct sadb_x_sec_ctx *sec_ctx)
{
return DIV_ROUND_UP(sizeof(struct sadb_x_sec_ctx) +
@@ -533,16 +551,25 @@ static int parse_exthdrs(struct sk_buff *skb, const struct sadb_msg *hdr, void *
return -EINVAL;
if (ext_hdrs[ext_type-1] != NULL)
return -EINVAL;
- if (ext_type == SADB_EXT_ADDRESS_SRC ||
- ext_type == SADB_EXT_ADDRESS_DST ||
- ext_type == SADB_EXT_ADDRESS_PROXY ||
- ext_type == SADB_X_EXT_NAT_T_OA) {
+ switch (ext_type) {
+ case SADB_EXT_ADDRESS_SRC:
+ case SADB_EXT_ADDRESS_DST:
+ case SADB_EXT_ADDRESS_PROXY:
+ case SADB_X_EXT_NAT_T_OA:
if (verify_address_len(p))
return -EINVAL;
- }
- if (ext_type == SADB_X_EXT_SEC_CTX) {
+ break;
+ case SADB_X_EXT_SEC_CTX:
if (verify_sec_ctx_len(p))
return -EINVAL;
+ break;
+ case SADB_EXT_KEY_AUTH:
+ case SADB_EXT_KEY_ENCRYPT:
+ if (verify_key_len(p))
+ return -EINVAL;
+ break;
+ default:
+ break;
}
ext_hdrs[ext_type-1] = (void *) p;
}
@@ -1111,14 +1138,12 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (key != NULL &&
sa->sadb_sa_auth != SADB_X_AALG_NULL &&
- ((key->sadb_key_bits+7) / 8 == 0 ||
- (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+ key->sadb_key_bits == 0)
return ERR_PTR(-EINVAL);
key = ext_hdrs[SADB_EXT_KEY_ENCRYPT-1];
if (key != NULL &&
sa->sadb_sa_encrypt != SADB_EALG_NULL &&
- ((key->sadb_key_bits+7) / 8 == 0 ||
- (key->sadb_key_bits+7) / 8 > key->sadb_key_len * sizeof(uint64_t)))
+ key->sadb_key_bits == 0)
return ERR_PTR(-EINVAL);
x = xfrm_state_alloc(net);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 92df832a18965a..429dbb06424001 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -83,8 +83,7 @@
#define L2TP_SLFLAG_S 0x40000000
#define L2TP_SL_SEQ_MASK 0x00ffffff
-#define L2TP_HDR_SIZE_SEQ 10
-#define L2TP_HDR_SIZE_NOSEQ 6
+#define L2TP_HDR_SIZE_MAX 14
/* Default trace flags */
#define L2TP_DEFAULT_DEBUG_FLAGS 0
@@ -705,11 +704,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
"%s: recv data ns=%u, session nr=%u\n",
session->name, ns, session->nr);
}
+ ptr += 4;
}
- /* Advance past L2-specific header, if present */
- ptr += session->l2specific_len;
-
if (L2TP_SKB_CB(skb)->has_seq) {
/* Received a packet with sequence numbers. If we're the LNS,
* check if we sre sending sequence numbers and if not,
@@ -860,7 +857,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
__skb_pull(skb, sizeof(struct udphdr));
/* Short packet? */
- if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
+ if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
l2tp_info(tunnel, L2TP_MSG_DATA,
"%s: recv short packet (len=%d)\n",
tunnel->name, skb->len);
@@ -933,6 +930,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
goto error;
}
+ if (tunnel->version == L2TP_HDR_VER_3 &&
+ l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto error;
+
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
return 0;
@@ -1031,21 +1032,20 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
memcpy(bufp, &session->cookie[0], session->cookie_len);
bufp += session->cookie_len;
}
- if (session->l2specific_len) {
- if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
- u32 l2h = 0;
- if (session->send_seq) {
- l2h = 0x40000000 | session->ns;
- session->ns++;
- session->ns &= 0xffffff;
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: updated ns to %u\n",
- session->name, session->ns);
- }
+ if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
+ u32 l2h = 0;
- *((__be32 *) bufp) = htonl(l2h);
+ if (session->send_seq) {
+ l2h = 0x40000000 | session->ns;
+ session->ns++;
+ session->ns &= 0xffffff;
+ l2tp_dbg(session, L2TP_MSG_SEQ,
+ "%s: updated ns to %u\n",
+ session->name, session->ns);
}
- bufp += session->l2specific_len;
+
+ *((__be32 *)bufp) = htonl(l2h);
+ bufp += 4;
}
if (session->offset)
bufp += session->offset;
@@ -1145,7 +1145,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
+ skb_dst_set(skb, sk_dst_check(sk, 0));
inet = inet_sk(sk);
fl = &inet->cork.fl;
@@ -1724,7 +1724,7 @@ int l2tp_session_delete(struct l2tp_session *session)
EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
- * l2specific_len parameters are set.
+ * l2specific_type parameters are set.
*/
void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
@@ -1733,7 +1733,8 @@ void l2tp_session_set_header_len(struct l2tp_session *session, int version)
if (session->send_seq)
session->hdr_len += 4;
} else {
- session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
+ session->hdr_len = 4 + session->cookie_len + session->offset;
+ session->hdr_len += l2tp_get_l2specific_len(session);
if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
session->hdr_len += 4;
}
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 9cf546846edbb9..fad47e9d74bccc 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -313,6 +313,37 @@ do { \
#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
#endif
+static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
+{
+ switch (session->l2specific_type) {
+ case L2TP_L2SPECTYPE_DEFAULT:
+ return 4;
+ case L2TP_L2SPECTYPE_NONE:
+ default:
+ return 0;
+ }
+}
+
+static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
+ unsigned char **ptr, unsigned char **optr)
+{
+ int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
+
+ if (opt_len > 0) {
+ int off = *ptr - *optr;
+
+ if (!pskb_may_pull(skb, off + opt_len))
+ return -1;
+
+ if (skb->data != *optr) {
+ *optr = skb->data;
+ *ptr = skb->data + off;
+ }
+ }
+
+ return 0;
+}
+
#define l2tp_printk(ptr, type, func, fmt, ...) \
do { \
if (((ptr)->debug) & (type)) \
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 48ab93842322ed..7efb3cadc152be 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -163,6 +163,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard;
+
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
return 0;
@@ -177,21 +180,23 @@ pass_up:
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
read_lock_bh(&l2tp_ip_lock);
sk = __l2tp_ip_bind_lookup(net, iph->daddr, 0, tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip_lock);
+ goto discard;
+ }
+
+ sock_hold(sk);
read_unlock_bh(&l2tp_ip_lock);
}
- if (sk == NULL)
- goto discard;
-
- sock_hold(sk);
-
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index c8f483cd2ca9c1..c125478981acf2 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -174,6 +174,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
}
+ if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
+ goto discard;
+
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
tunnel->recv_payload_hook);
return 0;
@@ -188,22 +191,24 @@ pass_up:
tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
- if (tunnel != NULL)
+ if (tunnel) {
sk = tunnel->sock;
- else {
+ sock_hold(sk);
+ } else {
struct ipv6hdr *iph = ipv6_hdr(skb);
read_lock_bh(&l2tp_ip6_lock);
sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
0, tunnel_id);
+ if (!sk) {
+ read_unlock_bh(&l2tp_ip6_lock);
+ goto discard;
+ }
+
+ sock_hold(sk);
read_unlock_bh(&l2tp_ip6_lock);
}
- if (sk == NULL)
- goto discard;
-
- sock_hold(sk);
-
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
@@ -662,9 +667,6 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
if (flags & MSG_OOB)
goto out;
- if (addr_len)
- *addr_len = sizeof(*lsa);
-
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
@@ -694,6 +696,7 @@ static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = inet6_iif(skb);
+ *addr_len = sizeof(*lsa);
}
if (np->rxopt.all)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 83e8a295c80623..c153fc2883a863 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -726,7 +726,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct sk_buff *skb = NULL;
struct sock *sk = sock->sk;
struct llc_sock *llc = llc_sk(sk);
- unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
u32 *seq, skb_len;
@@ -851,9 +850,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
goto copy_uaddr;
if (!(flags & MSG_PEEK)) {
- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb);
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
+ skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
*seq = 0;
}
@@ -874,9 +872,8 @@ copy_uaddr:
llc_cmsg_rcv(msg, skb);
if (!(flags & MSG_PEEK)) {
- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
- sk_eat_skb(sk, skb);
- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
+ skb_unlink(skb, &sk->sk_receive_queue);
+ kfree_skb(skb);
*seq = 0;
}
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 842851cef6987c..e896a2c53b1202 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -73,8 +73,8 @@ struct llc_sap *llc_sap_find(unsigned char sap_value)
rcu_read_lock_bh();
sap = __llc_sap_find(sap_value);
- if (sap)
- llc_sap_hold(sap);
+ if (!sap || !llc_sap_hold_safe(sap))
+ sap = NULL;
rcu_read_unlock_bh();
return sap;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c436c946097f50..2182b5d39ee976 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -305,7 +305,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
/* Keys without a station are used for TX only */
- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
break;
case NL80211_IFTYPE_ADHOC:
@@ -373,7 +373,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
goto out_unlock;
}
- ieee80211_key_free(key, true);
+ ieee80211_key_free(key, sdata->vif.type == NL80211_IFTYPE_STATION);
ret = 0;
out_unlock:
@@ -1306,6 +1306,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
+ if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index b65d1ce8af1ee4..77f95866e63b3f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -948,8 +948,8 @@ static void ieee80211_rx_mgmt_deauth_ibss(struct ieee80211_sub_if_data *sdata,
if (len < IEEE80211_DEAUTH_FRAME_LEN)
return;
- ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, reason);
+ ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
sta_info_destroy_addr(sdata, mgmt->sa);
}
@@ -967,9 +967,9 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- ibss_dbg(sdata,
- "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+ ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+ mgmt->bssid, auth_transaction);
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
@@ -1172,10 +1172,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rx_timestamp = drv_get_tsf(local, sdata);
}
- ibss_dbg(sdata,
- "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
+ (unsigned long long)rx_timestamp);
+ ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
(unsigned long long)beacon_timestamp,
(unsigned long long)(rx_timestamp - beacon_timestamp),
jiffies);
@@ -1534,9 +1534,9 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
tx_last_beacon = drv_tx_last_beacon(local);
- ibss_dbg(sdata,
- "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+ ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8c5b8b4c6cd7c7..19936289653700 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -999,6 +999,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
if (local->open_count == 0)
ieee80211_clear_tx_pending(local);
+ sdata->vif.bss_conf.beacon_int = 0;
+
/*
* If the interface goes down while suspended, presumably because
* the device was unplugged and that happens before our resume,
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4a72c0d1e56f9a..91a4e606edcdb4 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -647,11 +647,15 @@ int ieee80211_key_link(struct ieee80211_key *key,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
- int idx, ret;
- bool pairwise;
-
- pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
- idx = key->conf.keyidx;
+ int idx = key->conf.keyidx;
+ bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE;
+ /*
+ * We want to delay tailroom updates only for station - in that
+ * case it helps roaming speed, but in other cases it hurts and
+ * can cause warnings to appear.
+ */
+ bool delay_tailroom = sdata->vif.type == NL80211_IFTYPE_STATION;
+ int ret;
mutex_lock(&sdata->local->key_mtx);
@@ -679,14 +683,14 @@ int ieee80211_key_link(struct ieee80211_key *key,
increment_tailroom_need_count(sdata);
ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
- ieee80211_key_destroy(old_key, true);
+ ieee80211_key_destroy(old_key, delay_tailroom);
ieee80211_debugfs_key_add(key);
if (!local->wowlan) {
ret = ieee80211_key_enable_hw_accel(key);
if (ret)
- ieee80211_key_free(key, true);
+ ieee80211_key_free(key, delay_tailroom);
} else {
ret = 0;
}
@@ -874,7 +878,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
ieee80211_key_replace(key->sdata, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
- __ieee80211_key_destroy(key, true);
+ __ieee80211_key_destroy(key, key->sdata->vif.type ==
+ NL80211_IFTYPE_STATION);
}
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
@@ -884,7 +889,8 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
ieee80211_key_replace(key->sdata, key->sta,
key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
key, NULL);
- __ieee80211_key_destroy(key, true);
+ __ieee80211_key_destroy(key, key->sdata->vif.type ==
+ NL80211_IFTYPE_STATION);
}
mutex_unlock(&local->key_mtx);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 71d27d9382ecb1..0da2205c0f0690 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -253,8 +253,27 @@ static void ieee80211_restart_work(struct work_struct *work)
"%s called with hardware scan in progress\n", __func__);
rtnl_lock();
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ /*
+ * XXX: there may be more work for other vif types and even
+ * for station mode: a good thing would be to run most of
+ * the iface type's dependent _stop (ieee80211_mg_stop,
+ * ieee80211_ibss_stop) etc...
+ * For now, fix only the specific bug that was seen: race
+ * between csa_connection_drop_work and us.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /*
+ * This worker is scheduled from the iface worker that
+ * runs on mac80211's workqueue, so we can't be
+ * scheduling this worker after the cancel right here.
+ * The exception is ieee80211_chswitch_done.
+ * Then we can have a race...
+ */
+ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+ }
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ }
ieee80211_scan_cancel(local);
ieee80211_reconfig(local);
rtnl_unlock();
@@ -460,10 +479,7 @@ static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index f55cddcd556c88..466922f09d0484 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -552,6 +552,10 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
forward = false;
reply = true;
target_metric = 0;
+
+ if (SN_GT(target_sn, ifmsh->sn))
+ ifmsh->sn = target_sn;
+
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ec97afd9eed388..ff9206ef052b7e 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -988,6 +988,10 @@ static void ieee80211_chswitch_work(struct work_struct *work)
*/
if (sdata->reserved_chanctx) {
+ struct ieee80211_supported_band *sband = NULL;
+ struct sta_info *mgd_sta = NULL;
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
@@ -996,6 +1000,48 @@ static void ieee80211_chswitch_work(struct work_struct *work)
if (sdata->reserved_ready)
goto out;
+ if (sdata->vif.bss_conf.chandef.width !=
+ sdata->csa_chandef.width) {
+ /*
+ * For managed interface, we need to also update the AP
+ * station bandwidth and align the rate scale algorithm
+ * on the bandwidth change. Here we only consider the
+ * bandwidth of the new channel definition (as channel
+ * switch flow does not have the full HT/VHT/HE
+ * information), assuming that if additional changes are
+ * required they would be done as part of the processing
+ * of the next beacon from the AP.
+ */
+ switch (sdata->csa_chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ }
+
+ mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+ sband =
+ local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+ }
+
+ if (sdata->vif.bss_conf.chandef.width >
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
@@ -1006,6 +1052,13 @@ static void ieee80211_chswitch_work(struct work_struct *work)
goto out;
}
+ if (sdata->vif.bss_conf.chandef.width <
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
goto out;
}
@@ -1800,7 +1853,8 @@ static bool ieee80211_sta_wmm_params(struct ieee80211_local *local,
params[ac].acm = acm;
params[ac].uapsd = uapsd;
- if (params[ac].cw_min > params[ac].cw_max) {
+ if (params[ac].cw_min == 0 ||
+ params[ac].cw_min > params[ac].cw_max) {
sdata_info(sdata,
"AP has invalid WMM params (CWmin/max=%d/%d for ACI %d), using defaults\n",
params[ac].cw_min, params[ac].cw_max, aci);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 67a57cc671b3fa..7e3ac43bf6d790 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -149,6 +149,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
/* allocate extra bitmaps */
if (status->chains)
len += 4 * hweight8(status->chains);
+ /* vendor presence bitmap */
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
+ len += 4;
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -185,8 +188,6 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
- /* vendor presence bitmap */
- len += 4;
/* alignment for fixed 6-byte vendor data header */
len = ALIGN(len, 2);
/* vendor data header */
@@ -1129,6 +1130,7 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
if (ieee80211_is_ctl(hdr->frame_control) ||
+ ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control) ||
is_multicast_ether_addr(hdr->addr1))
return RX_CONTINUE;
@@ -2358,7 +2360,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
skb_set_queue_mapping(skb, q);
if (!--mesh_hdr->ttl) {
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+ if (!is_multicast_ether_addr(hdr->addr1))
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+ dropped_frames_ttl);
goto out;
}
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index e9f2844af5b9b0..10650085eaa9c5 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -466,11 +466,6 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
if (!skb)
return;
- if (dropped) {
- dev_kfree_skb_any(skb);
- return;
- }
-
if (info->flags & IEEE80211_TX_INTFL_NL80211_FRAME_TX) {
u64 cookie = IEEE80211_SKB_CB(skb)->ack.cookie;
struct ieee80211_sub_if_data *sdata;
@@ -492,6 +487,8 @@ static void ieee80211_report_ack_skb(struct ieee80211_local *local,
rcu_read_unlock();
dev_kfree_skb_any(skb);
+ } else if (dropped) {
+ dev_kfree_skb_any(skb);
} else {
/* consumes skb */
skb_complete_wifi_ack(skb, acked);
@@ -651,6 +648,8 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
/* Track when last TDLS packet was ACKed */
if (test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH))
sta->status_stats.last_tdls_pkt_time = jiffies;
+ } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ return;
} else {
ieee80211_lost_packet(sta, info);
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index a8b2219971d715..022de905002602 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -432,8 +432,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (ieee80211_hw_check(&tx->local->hw, QUEUE_CONTROL))
info->hw_queue = tx->sdata->vif.cab_queue;
- /* no stations in PS mode */
- if (!atomic_read(&ps->num_sta_ps))
+ /* no stations in PS mode and no buffered packets */
+ if (!atomic_read(&ps->num_sta_ps) && skb_queue_empty(&ps->bc_buf))
return TX_CONTINUE;
info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
@@ -1600,9 +1600,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
int head_need, bool may_encrypt)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hdr *hdr;
+ bool enc_tailroom;
int tail_need = 0;
- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ enc_tailroom = may_encrypt &&
+ (sdata->crypto_tx_tailroom_needed_cnt ||
+ ieee80211_is_mgmt(hdr->frame_control));
+
+ if (enc_tailroom) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
@@ -1610,8 +1617,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
if (skb_cloned(skb) &&
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
- !skb_clone_writable(skb, ETH_HLEN) ||
- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
+ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 996a2d09c704ed..3de1883640abed 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2055,7 +2055,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (!sta->uploaded)
continue;
- if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+ if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
continue;
for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 3827f359b336de..9e1ff9d4cf2de2 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -72,8 +72,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb)
int ret;
if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) {
- u16 crc = crc_ccitt(0, skb->data, skb->len);
+ struct sk_buff *nskb;
+ u16 crc;
+
+ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) {
+ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN,
+ GFP_ATOMIC);
+ if (likely(nskb)) {
+ consume_skb(skb);
+ skb = nskb;
+ } else {
+ goto err_tx;
+ }
+ }
+ crc = crc_ccitt(0, skb->data, skb->len);
put_unaligned_le16(crc, skb_put(skb, 2));
}
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
index 9a14c237830f4b..b259a5814965fd 100644
--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
@@ -213,13 +213,13 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -492,13 +492,13 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index dd1649caa2b233..ac212542a21788 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1809,13 +1809,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
/* the destination server is not available */
- if (sysctl_expire_nodest_conn(ipvs)) {
+ __u32 flags = cp->flags;
+
+ /* when timer already started, silently drop the packet.*/
+ if (timer_pending(&cp->timer))
+ __ip_vs_conn_put(cp);
+ else
+ ip_vs_conn_put(cp);
+
+ if (sysctl_expire_nodest_conn(ipvs) &&
+ !(flags & IP_VS_CONN_F_ONE_PACKET)) {
/* try to expire the connection immediately */
ip_vs_conn_expire_now(cp);
}
- /* don't restart its timer, and silently
- drop the packet. */
- __ip_vs_conn_put(cp);
+
return NF_DROP;
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c0656510c4dc80..56c62b65923f18 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2218,6 +2218,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
+ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+ return -EINVAL;
+ }
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+ return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
@@ -2349,8 +2361,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
struct ipvs_sync_daemon_cfg cfg;
memset(&cfg, 0, sizeof(cfg));
- strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
- sizeof(cfg.mcast_ifn));
+ ret = -EINVAL;
+ if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
+ sizeof(cfg.mcast_ifn)) <= 0)
+ goto out_dec;
cfg.syncid = dm->syncid;
ret = start_sync_thread(ipvs, &cfg, dm->state);
} else {
@@ -2388,12 +2402,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
}
}
+ if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
+ strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
+ IP_VS_SCHEDNAME_MAXLEN) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
/* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
usvc.protocol != IPPROTO_SCTP) {
- pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
+ pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
usvc.protocol, &usvc.addr.ip,
- ntohs(usvc.port), usvc.sched_name);
+ ntohs(usvc.port));
ret = -EFAULT;
goto out_unlock;
}
@@ -2822,7 +2843,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
[IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
- .len = IP_VS_IFNAME_MAXLEN },
+ .len = IP_VS_IFNAME_MAXLEN - 1 },
[IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
[IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
[IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
@@ -2840,7 +2861,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
[IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
[IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
[IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
- .len = IP_VS_SCHEDNAME_MAXLEN },
+ .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
[IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
.len = IP_VS_PENAME_MAXLEN },
[IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index fce1b1cca32d65..99d0e9261a642f 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -244,14 +244,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
[CT_DCCP_ROLE_SERVER] = {
@@ -372,14 +372,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
};
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 278f3b9356efdc..7cc1d9c22a9fa7 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -410,6 +410,8 @@ static void tcp_options(const struct sk_buff *skb,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize=*ptr++;
if (opsize < 2) /* "silly options" */
return;
@@ -470,6 +472,8 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
length--;
continue;
default:
+ if (length < 2)
+ return;
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 2c89f90cd7bc95..f94a2e1172f04a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -422,14 +422,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
+ struct ctl_table tmp = *table;
+
+ tmp.data = buf;
mutex_lock(&nf_log_mutex);
logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
if (!logger)
- table->data = "NONE";
+ strlcpy(buf, "NONE", sizeof(buf));
else
- table->data = logger->name;
- r = proc_dostring(table, write, buffer, lenp, ppos);
+ strlcpy(buf, logger->name, sizeof(buf));
mutex_unlock(&nf_log_mutex);
+ r = proc_dostring(&tmp, write, buffer, lenp, ppos);
}
return r;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 120e9ae04db340..a7967af0da82d1 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3452,6 +3452,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
dreg = nft_type_to_reg(set->dtype);
list_for_each_entry(binding, &set->bindings, list) {
struct nft_ctx bind_ctx = {
+ .net = ctx->net,
.afi = ctx->afi,
.table = ctx->table,
.chain = (struct nft_chain *)binding->chain,
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index f3695a4974086f..99bc2f87a97422 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -167,7 +167,8 @@ next_rule:
switch (regs.verdict.code) {
case NFT_JUMP:
- BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
+ if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
+ return NF_DROP;
jumpstack[stackptr].chain = chain;
jumpstack[stackptr].rule = rule;
jumpstack[stackptr].rulenum = rulenum;
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index fefbf5f0b28d2f..088e8da06b00b9 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -243,6 +243,9 @@ nfacct_filter_alloc(const struct nlattr * const attr)
if (err < 0)
return ERR_PTR(err);
+ if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
+ return ERR_PTR(-EINVAL);
+
filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
if (!filter)
return ERR_PTR(-ENOMEM);
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index dea467647c90d6..86a1956c036a56 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -895,7 +895,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
goto out_put;
default:
ret = -ENOTSUPP;
- break;
+ goto out_put;
}
} else if (!inst) {
ret = -ENODEV;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 7edcfda288c4c4..54cde78c271839 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1106,6 +1106,9 @@ nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
[NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
[NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
+ [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+ [NFQA_CFG_MASK] = { .type = NLA_U32 },
+ [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
};
static const struct nf_queue_handler nfqh = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 3c9936bd2fce9a..134939a0fae9d6 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -877,7 +877,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0)
return ERR_PTR(-EFAULT);
- strlcpy(info->name, compat_tmp.name, sizeof(info->name));
+ memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1);
info->num_counters = compat_tmp.num_counters;
user += sizeof(compat_tmp);
} else
@@ -890,9 +890,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
if (copy_from_user(info, user, sizeof(*info)) != 0)
return ERR_PTR(-EFAULT);
- info->name[sizeof(info->name) - 1] = '\0';
user += sizeof(*info);
}
+ info->name[sizeof(info->name) - 1] = '\0';
size = sizeof(struct xt_counters);
size *= info->num_counters;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 1f29faae09344d..b3bd48a9482513 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -276,6 +276,22 @@ static int idletimer_resume(struct notifier_block *notifier,
return NOTIFY_DONE;
}
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+ int ret;
+
+ ret = xt_check_proc_name(name, size);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(name, "power") ||
+ !strcmp(name, "subsystem") ||
+ !strcmp(name, "uevent"))
+ return -EINVAL;
+
+ return 0;
+}
+
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
@@ -286,6 +302,10 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
goto out;
}
+ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+ if (ret < 0)
+ goto out_free_timer;
+
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 3f33ec44bd2836..9f4ec16abfcffe 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -787,7 +787,8 @@ static int netlbl_unlabel_addrinfo_get(struct genl_info *info,
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 818400fddc9ba6..bf292010760a6d 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
#include <asm/cacheflush.h>
#include <linux/hash.h>
#include <linux/genetlink.h>
+#include <linux/nospec.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -654,6 +655,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
if (protocol < 0 || protocol >= MAX_LINKS)
return -EPROTONOSUPPORT;
+ protocol = array_index_nospec(protocol, MAX_LINKS);
netlink_lock_table();
#ifdef CONFIG_MODULES
@@ -984,6 +986,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
return err;
}
+ if (nlk->ngroups == 0)
+ groups = 0;
+ else if (nlk->ngroups < 8*sizeof(groups))
+ groups &= (1UL << nlk->ngroups) - 1;
+
bound = nlk->bound;
if (bound) {
/* Ensure nlk->portid is up-to-date. */
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ed212ffc1d9d31..046ae1caecea88 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -918,6 +918,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -927,6 +928,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -958,10 +960,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1014,6 +1018,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c
index 94d05806a9a243..f0ecaec1ff3dae 100644
--- a/net/netrom/nr_timer.c
+++ b/net/netrom/nr_timer.c
@@ -53,21 +53,21 @@ void nr_start_t1timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t1timer, jiffies + nr->t1);
+ sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1);
}
void nr_start_t2timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t2timer, jiffies + nr->t2);
+ sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2);
}
void nr_start_t4timer(struct sock *sk)
{
struct nr_sock *nr = nr_sk(sk);
- mod_timer(&nr->t4timer, jiffies + nr->t4);
+ sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4);
}
void nr_start_idletimer(struct sock *sk)
@@ -75,37 +75,37 @@ void nr_start_idletimer(struct sock *sk)
struct nr_sock *nr = nr_sk(sk);
if (nr->idle > 0)
- mod_timer(&nr->idletimer, jiffies + nr->idle);
+ sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle);
}
void nr_start_heartbeat(struct sock *sk)
{
- mod_timer(&sk->sk_timer, jiffies + 5 * HZ);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ);
}
void nr_stop_t1timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t1timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t1timer);
}
void nr_stop_t2timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t2timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t2timer);
}
void nr_stop_t4timer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->t4timer);
+ sk_stop_timer(sk, &nr_sk(sk)->t4timer);
}
void nr_stop_idletimer(struct sock *sk)
{
- del_timer(&nr_sk(sk)->idletimer);
+ sk_stop_timer(sk, &nr_sk(sk)->idletimer);
}
void nr_stop_heartbeat(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
int nr_t1timer_running(struct sock *sk)
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 2b0f0ac498d2c0..5a58f9f380958f 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index d25212b135ea7b..96277ac37dac1f 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
sock->service_name,
sock->service_name_len,
&service_name_tlv_length);
+ if (!service_name_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += service_name_tlv_length;
}
@@ -429,9 +433,17 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -486,9 +498,17 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
@@ -754,11 +774,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
- pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+ pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
frag_len + LLCP_HEADER_SIZE, &err);
if (pdu == NULL) {
- pr_err("Could not allocate PDU\n");
- continue;
+ pr_err("Could not allocate PDU (error=%d)\n", err);
+ len -= remaining_len;
+ if (len == 0)
+ len = err;
+ break;
}
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index 98876274a1eea2..c1334b826dd52e 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@ static u8 nfc_llcp_reserve_sdp_ssap(struct nfc_llcp_local *local)
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
{
- u8 *gb_cur, *version_tlv, version, version_length;
- u8 *lto_tlv, lto_length;
- u8 *wks_tlv, wks_length;
- u8 *miux_tlv, miux_length;
+ u8 *gb_cur, version, version_length;
+ u8 lto_length, wks_length, miux_length;
+ u8 *version_tlv = NULL, *lto_tlv = NULL,
+ *wks_tlv = NULL, *miux_tlv = NULL;
__be16 wks = cpu_to_be16(local->local_wks);
u8 gb_len = 0;
int ret = 0;
@@ -543,17 +543,33 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
version = LLCP_VERSION_11;
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
1, &version_length);
+ if (!version_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += version_length;
lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, &lto_length);
+ if (!lto_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += lto_length;
pr_debug("Local wks 0x%lx\n", local->local_wks);
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+ if (!wks_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += wks_length;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
&miux_length);
+ if (!miux_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += miux_length;
gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 624c4719e40450..537917dfa83a08 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -409,7 +409,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
return -EINVAL;
}
- if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
attrs |= 1 << type;
a[type] = nla;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 392d4e2c0a2460..7d93228ba1e135 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2511,8 +2511,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out_put;
}
err = -ENXIO;
@@ -2678,8 +2680,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
+ if (addr && dev && saddr->sll_halen < dev->addr_len)
+ goto out_unlock;
}
err = -ENXIO;
@@ -2779,7 +2783,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (unlikely(offset < 0))
goto out_free;
} else if (reserve) {
- skb_push(skb, reserve);
+ skb_reserve(skb, -reserve);
+ if (len < reserve)
+ skb_reset_network_header(skb);
}
/* Returns -EFAULT on error */
@@ -3149,7 +3155,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
}
mutex_lock(&net->packet.sklist_lock);
- sk_add_node_rcu(sk, &net->packet.sklist);
+ sk_add_node_tail_rcu(sk, &net->packet.sklist);
mutex_unlock(&net->packet.sklist_lock);
preempt_disable();
@@ -4124,7 +4130,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
struct pgv *pg_vec;
int i;
- pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+ pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!pg_vec))
goto out;
@@ -4174,6 +4180,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
}
if (req->tp_block_nr) {
+ unsigned int min_frame_size;
+
/* Sanity tests and some calculations */
err = -EBUSY;
if (unlikely(rb->pg_vec))
@@ -4196,12 +4204,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
goto out;
if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
goto out;
+ min_frame_size = po->tp_hdrlen + po->tp_reserve;
if (po->tp_version >= TPACKET_V3 &&
- req->tp_block_size <=
- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
+ req->tp_block_size <
+ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
goto out;
- if (unlikely(req->tp_frame_size < po->tp_hdrlen +
- po->tp_reserve))
+ if (unlikely(req->tp_frame_size < min_frame_size))
goto out;
if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
goto out;
@@ -4209,7 +4217,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 850a86cde0b3f6..f6aa532bcbf647 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -131,7 +131,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code,
ph->utid = 0;
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -152,7 +152,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
ph->utid = id; /* whatever */
ph->message_id = id;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = code;
+ ph->error_code = code;
return pn_skb_send(sk, skb, NULL);
}
@@ -207,7 +207,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
struct pnpipehdr *ph;
struct sockaddr_pn dst;
u8 data[4] = {
- oph->data[0], /* PEP type */
+ oph->pep_type, /* PEP type */
code, /* error code, at an unusual offset */
PAD, PAD,
};
@@ -220,7 +220,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
ph->utid = oph->utid;
ph->message_id = PNS_PEP_CTRL_RESP;
ph->pipe_handle = oph->pipe_handle;
- ph->data[0] = oph->data[1]; /* CTRL id */
+ ph->data0 = oph->data[0]; /* CTRL id */
pn_skb_get_src_sockaddr(oskb, &dst);
return pn_skb_send(sk, skb, &dst);
@@ -271,17 +271,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
hdr = pnp_hdr(skb);
- if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
+ if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
- (unsigned int)hdr->data[0]);
+ (unsigned int)hdr->pep_type);
return -EOPNOTSUPP;
}
- switch (hdr->data[1]) {
+ switch (hdr->data[0]) {
case PN_PEP_IND_FLOW_CONTROL:
switch (pn->tx_fc) {
case PN_LEGACY_FLOW_CONTROL:
- switch (hdr->data[4]) {
+ switch (hdr->data[3]) {
case PEP_IND_BUSY:
atomic_set(&pn->tx_credits, 0);
break;
@@ -291,7 +291,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
}
break;
case PN_ONE_CREDIT_FLOW_CONTROL:
- if (hdr->data[4] == PEP_IND_READY)
+ if (hdr->data[3] == PEP_IND_READY)
atomic_set(&pn->tx_credits, wake = 1);
break;
}
@@ -300,12 +300,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
break;
- atomic_add(wake = hdr->data[4], &pn->tx_credits);
+ atomic_add(wake = hdr->data[3], &pn->tx_credits);
break;
default:
net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
- (unsigned int)hdr->data[1]);
+ (unsigned int)hdr->data[0]);
return -EOPNOTSUPP;
}
if (wake)
@@ -317,7 +317,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
{
struct pep_sock *pn = pep_sk(sk);
struct pnpipehdr *hdr = pnp_hdr(skb);
- u8 n_sb = hdr->data[0];
+ u8 n_sb = hdr->data0;
pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
__skb_pull(skb, sizeof(*hdr));
@@ -505,7 +505,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
return -ECONNREFUSED;
/* Parse sub-blocks */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[6], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -738,7 +738,7 @@ static int pipe_do_remove(struct sock *sk)
ph->utid = 0;
ph->message_id = PNS_PIPE_REMOVE_REQ;
ph->pipe_handle = pn->pipe_handle;
- ph->data[0] = PAD;
+ ph->data0 = PAD;
return pn_skb_send(sk, skb, NULL);
}
@@ -815,7 +815,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
peer_type = hdr->other_pep_type << 8;
/* Parse sub-blocks (options) */
- n_sb = hdr->data[4];
+ n_sb = hdr->data[3];
while (n_sb > 0) {
u8 type, buf[1], len = sizeof(buf);
const u8 *data = pep_get_sb(skb, &type, &len, buf);
@@ -1106,7 +1106,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
ph->utid = 0;
if (pn->aligned) {
ph->message_id = PNS_PIPE_ALIGNED_DATA;
- ph->data[0] = 0; /* padding */
+ ph->data0 = 0; /* padding */
} else
ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 6b12b68541ae96..05cab8c5a37913 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -191,4 +191,5 @@ struct rds_transport rds_loop_transport = {
.inc_copy_to_user = rds_message_inc_copy_to_user,
.inc_free = rds_loop_inc_free,
.t_name = "loopback",
+ .t_type = RDS_TRANS_LOOP,
};
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 4588860f4c3bb4..254f1345cf7ec5 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -401,6 +401,11 @@ struct rds_notifier {
int n_status;
};
+/* Available as part of RDS core, so doesn't need to participate
+ * in get_preferred transport etc
+ */
+#define RDS_TRANS_LOOP 3
+
/**
* struct rds_transport - transport specific behavioural hooks
*
diff --git a/net/rds/recv.c b/net/rds/recv.c
index 0514af3ab378ff..6275de19689c29 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -76,6 +76,11 @@ static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
return;
rs->rs_rcv_bytes += delta;
+
+ /* loop transport doesn't send/recv congestion updates */
+ if (rs->rs_transport->t_type == RDS_TRANS_LOOP)
+ return;
+
now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs);
rdsdebug("rs %p (%pI4:%u) recv bytes %d buf %d "
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index 0fc76d84510388..9f704a7f2a282c 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -848,6 +848,7 @@ void rose_link_device_down(struct net_device *dev)
/*
* Route a frame to an appropriate AX.25 connection.
+ * A NULL ax25_cb indicates an internally generated frame.
*/
int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
{
@@ -865,6 +866,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
if (skb->len < ROSE_MIN_LEN)
return res;
+
+ if (!ax25)
+ return rose_loopback_queue(skb, NULL);
+
frametype = skb->data[2];
lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
if (frametype == ROSE_CALL_REQUEST &&
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 7ca57741b2fbbb..7849f286bb9331 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
struct sk_buff *skb;
unsigned char *dptr;
unsigned char lci1, lci2;
- char buffer[100];
- int len, faclen = 0;
+ int maxfaclen = 0;
+ int len, faclen;
+ int reserve;
- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+ len = ROSE_MIN_LEN;
switch (frametype) {
case ROSE_CALL_REQUEST:
len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
- faclen = rose_create_facilities(buffer, rose);
- len += faclen;
+ maxfaclen = 256;
break;
case ROSE_CALL_ACCEPTED:
case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
break;
}
- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+ if (!skb)
return;
/*
* Space for AX.25 header and PID.
*/
- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+ skb_reserve(skb, reserve);
- dptr = skb_put(skb, skb_tailroom(skb));
+ dptr = skb_put(skb, len);
lci1 = (rose->lci >> 8) & 0x0F;
lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
dptr += ROSE_ADDR_LEN;
memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
dptr += ROSE_ADDR_LEN;
- memcpy(dptr, buffer, faclen);
+ faclen = rose_create_facilities(dptr, rose);
+ skb_put(skb, faclen);
dptr += faclen;
break;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 403746b202637f..040d853f48b985 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -382,22 +382,20 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
tcf_bind_filter(tp, &cr.res, base);
}
- if (old_r)
- tcf_exts_change(tp, &r->exts, &e);
- else
- tcf_exts_change(tp, &cr.exts, &e);
-
if (old_r && old_r != r)
tcindex_filter_result_init(old_r);
oldp = p;
r->res = cr.res;
+ tcf_exts_change(tp, &r->exts, &e);
+
rcu_assign_pointer(tp->root, cp);
if (r == &new_filter_result) {
struct tcindex_filter *nfp;
struct tcindex_filter __rcu **fp;
+ f->result.res = r->res;
tcf_exts_change(tp, &f->result.exts, &r->exts);
fp = cp->h + (handle % cp->hash);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 6d340cd6e2a7ce..b379c330a33886 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1823,7 +1823,6 @@ done:
int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res, bool compat_mode)
{
- __be16 protocol = tc_skb_protocol(skb);
#ifdef CONFIG_NET_CLS_ACT
const struct tcf_proto *old_tp = tp;
int limit = 0;
@@ -1831,6 +1830,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
reclassify:
#endif
for (; tp; tp = rcu_dereference_bh(tp->next)) {
+ __be16 protocol = tc_skb_protocol(skb);
int err;
if (tp->protocol != protocol &&
@@ -1857,7 +1857,6 @@ reset:
}
tp = old_tp;
- protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index 3fee70d9814f91..562edd50fa94bc 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -20,7 +20,7 @@
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
qdisc_drop(skb, sch);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index f9e8deeeac9684..a5745cb2d01453 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -444,7 +444,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
if (tb[TCA_GRED_LIMIT] != NULL)
sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
- return gred_change_table_def(sch, opt);
+ return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
}
if (tb[TCA_GRED_PARMS] == NULL ||
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 45d4b2f22f62ee..aff2a1b46f7f38 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -501,6 +501,9 @@ static void hhf_destroy(struct Qdisc *sch)
hhf_free(q->hhf_valid_bits[i]);
}
+ if (!q->hh_flows)
+ return;
+
for (i = 0; i < HH_FLOWS_CNT; i++) {
struct hh_flow_state *flow, *next;
struct list_head *head = &q->hh_flows[i];
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 87b02ed3d5f212..daa01d5604c292 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1025,6 +1025,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
int err;
int i;
+ qdisc_watchdog_init(&q->watchdog, sch);
+ INIT_WORK(&q->work, htb_work_func);
+
if (!opt)
return -EINVAL;
@@ -1045,8 +1048,6 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < TC_HTB_NUMPRIO; i++)
INIT_LIST_HEAD(q->drops + i);
- qdisc_watchdog_init(&q->watchdog, sch);
- INIT_WORK(&q->work, htb_work_func);
__skb_queue_head_init(&q->direct_queue);
if (tb[TCA_HTB_DIRECT_QLEN])
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index bcdd54bb101cc8..cef36ad691dd5c 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -254,7 +254,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
{
struct multiq_sched_data *q = qdisc_priv(sch);
- int i, err;
+ int i;
q->queues = NULL;
@@ -269,12 +269,7 @@ static int multiq_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < q->max_bands; i++)
q->queues[i] = &noop_qdisc;
- err = multiq_tune(sch, opt);
-
- if (err)
- kfree(q->queues);
-
- return err;
+ return multiq_tune(sch, opt);
}
static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index b7c29d5b6f04e5..7acf1f2b8dfc39 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -432,6 +432,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int count = 1;
int rc = NET_XMIT_SUCCESS;
+ /* Do not fool qdisc_drop_all() */
+ skb->prev = NULL;
+
/* Random duplication */
if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
++count;
@@ -943,11 +946,11 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt)
struct netem_sched_data *q = qdisc_priv(sch);
int ret;
+ qdisc_watchdog_init(&q->watchdog, sch);
+
if (!opt)
return -EINVAL;
- qdisc_watchdog_init(&q->watchdog, sch);
-
q->loss_model = CLG_RANDOM;
ret = netem_change(sch, opt);
if (ret)
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 0505b8408c8ba4..4bf2b599ef98af 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) {
+ qdisc_qstats_backlog_inc(sch, skb);
sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++;
@@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch)
skb = child->dequeue(child);
if (skb) {
qdisc_bstats_update(sch, skb);
+ qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
} else {
if (!red_is_idling(&q->vars))
@@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch)
if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++;
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= len;
sch->q.qlen--;
return len;
}
@@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch)
struct red_sched_data *q = qdisc_priv(sch);
qdisc_reset(q->qdisc);
+ sch->qstats.backlog = 0;
sch->q.qlen = 0;
red_restart(&q->vars);
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index c2fbde742f3734..a06c9d6bfc9cd6 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -432,12 +432,13 @@ static int tbf_init(struct Qdisc *sch, struct nlattr *opt)
{
struct tbf_sched_data *q = qdisc_priv(sch);
+ qdisc_watchdog_init(&q->watchdog, sch);
+ q->qdisc = &noop_qdisc;
+
if (opt == NULL)
return -EINVAL;
q->t_c = ktime_get_ns();
- qdisc_watchdog_init(&q->watchdog, sch);
- q->qdisc = &noop_qdisc;
return tbf_change(sch, opt);
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a40b8b0ef0d578..f085b01b660391 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -486,8 +486,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc,
void sctp_assoc_rm_peer(struct sctp_association *asoc,
struct sctp_transport *peer)
{
- struct list_head *pos;
- struct sctp_transport *transport;
+ struct sctp_transport *transport;
+ struct list_head *pos;
+ struct sctp_chunk *ch;
pr_debug("%s: association:%p addr:%pISpc\n",
__func__, asoc, &peer->ipaddr.sa);
@@ -543,7 +544,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
*/
if (!list_empty(&peer->transmitted)) {
struct sctp_transport *active = asoc->peer.active_path;
- struct sctp_chunk *ch;
/* Reset the transport of each chunk on this list */
list_for_each_entry(ch, &peer->transmitted,
@@ -565,6 +565,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
sctp_transport_hold(active);
}
+ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list)
+ if (ch->transport == peer)
+ ch->transport = NULL;
+
asoc->peer.transport_count--;
sctp_transport_free(peer);
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 5ca8309ea7b129..9fa0b0dc386839 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -97,10 +97,9 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifa->addr;
addr->a.v6.sin6_scope_id = ifa->idev->dev->ifindex;
addr->valid = 1;
@@ -411,7 +410,6 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v6.sin6_family = AF_INET6;
- addr->a.v6.sin6_port = 0;
addr->a.v6.sin6_addr = ifp->addr;
addr->a.v6.sin6_scope_id = dev->ifindex;
addr->valid = 1;
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index dc030efa4447e0..9f2f3c48b7b62c 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -151,7 +151,6 @@ static void sctp_v4_copy_addrlist(struct list_head *addrlist,
addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
INIT_LIST_HEAD(&addr->list);
@@ -775,10 +774,9 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev,
switch (ev) {
case NETDEV_UP:
- addr = kmalloc(sizeof(struct sctp_sockaddr_entry), GFP_ATOMIC);
+ addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
if (addr) {
addr->a.v4.sin_family = AF_INET;
- addr->a.v4.sin_port = 0;
addr->a.v4.sin_addr.s_addr = ifa->ifa_local;
addr->valid = 1;
spin_lock_bh(&net->sctp.local_addr_lock);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 13c7f42b7040ed..53f1b33bca4e25 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -248,11 +248,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
+ if (asoc && (asoc->base.sk != sk || asoc->base.dead))
+ asoc = NULL;
spin_unlock_bh(&sctp_assocs_id_lock);
- if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
- return NULL;
-
return asoc;
}
diff --git a/net/socket.c b/net/socket.c
index e8a1382f25c0e7..ee88c86b207a62 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -89,6 +89,7 @@
#include <linux/magic.h>
#include <linux/slab.h>
#include <linux/xattr.h>
+#include <linux/nospec.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -469,27 +470,15 @@ static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
static ssize_t sockfs_getxattr(struct dentry *dentry,
const char *name, void *value, size_t size)
{
- const char *proto_name;
- size_t proto_size;
- int error;
-
- error = -ENODATA;
- if (!strncmp(name, XATTR_NAME_SOCKPROTONAME, XATTR_NAME_SOCKPROTONAME_LEN)) {
- proto_name = dentry->d_name.name;
- proto_size = strlen(proto_name);
-
+ if (!strcmp(name, XATTR_NAME_SOCKPROTONAME)) {
if (value) {
- error = -ERANGE;
- if (proto_size + 1 > size)
- goto out;
-
- strncpy(value, proto_name, proto_size + 1);
+ if (dentry->d_name.len + 1 > size)
+ return -ERANGE;
+ memcpy(value, dentry->d_name.name, dentry->d_name.len + 1);
}
- error = proto_size + 1;
+ return dentry->d_name.len + 1;
}
-
-out:
- return error;
+ return -EOPNOTSUPP;
}
static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
@@ -527,7 +516,10 @@ static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
- sock->sk->sk_uid = iattr->ia_uid;
+ if (sock->sk)
+ sock->sk->sk_uid = iattr->ia_uid;
+ else
+ err = -ENOENT;
}
return err;
@@ -578,12 +570,17 @@ static struct socket *sock_alloc(void)
* an inode not a file.
*/
-void sock_release(struct socket *sock)
+static void __sock_release(struct socket *sock, struct inode *inode)
{
if (sock->ops) {
struct module *owner = sock->ops->owner;
+ if (inode)
+ inode_lock(inode);
sock->ops->release(sock);
+ sock->sk = NULL;
+ if (inode)
+ inode_unlock(inode);
sock->ops = NULL;
module_put(owner);
}
@@ -598,6 +595,11 @@ void sock_release(struct socket *sock)
}
sock->file = NULL;
}
+
+void sock_release(struct socket *sock)
+{
+ __sock_release(sock, NULL);
+}
EXPORT_SYMBOL(sock_release);
void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags)
@@ -1034,7 +1036,7 @@ static int sock_mmap(struct file *file, struct vm_area_struct *vma)
static int sock_close(struct inode *inode, struct file *filp)
{
- sock_release(SOCKET_I(inode));
+ __sock_release(SOCKET_I(inode), inode);
return 0;
}
@@ -2338,6 +2340,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
if (call < 1 || call > SYS_SENDMMSG)
return -EINVAL;
+ call = array_index_nospec(call, SYS_SENDMMSG + 1);
len = nargs[call];
if (len > sizeof(a))
@@ -2772,9 +2775,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32)
copy_in_user(&rxnfc->fs.ring_cookie,
&compat_rxnfc->fs.ring_cookie,
(void __user *)(&rxnfc->fs.location + 1) -
- (void __user *)&rxnfc->fs.ring_cookie) ||
- copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt,
- sizeof(rxnfc->rule_cnt)))
+ (void __user *)&rxnfc->fs.ring_cookie))
+ return -EFAULT;
+ if (ethcmd == ETHTOOL_GRXCLSRLALL) {
+ if (put_user(rule_cnt, &rxnfc->rule_cnt))
+ return -EFAULT;
+ } else if (copy_in_user(&rxnfc->rule_cnt,
+ &compat_rxnfc->rule_cnt,
+ sizeof(rxnfc->rule_cnt)))
return -EFAULT;
}
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index 41248b1820c778..cc27e38392ea16 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -272,13 +272,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred)
{
struct auth_cred *acred = &container_of(cred, struct generic_cred,
gc_base)->acred;
- bool ret;
-
- get_rpccred(cred);
- ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
- put_rpccred(cred);
-
- return ret;
+ return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags);
}
static const struct rpc_credops generic_credops = {
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 1f0687d8e3d770..62fca77bf3c705 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1722,6 +1722,7 @@ priv_release_snd_buf(struct rpc_rqst *rqstp)
for (i=0; i < rqstp->rq_enc_pages_num; i++)
__free_page(rqstp->rq_enc_pages[i]);
kfree(rqstp->rq_enc_pages);
+ rqstp->rq_release_snd_buf = NULL;
}
static int
@@ -1730,6 +1731,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
int first, last, i;
+ if (rqstp->rq_release_snd_buf)
+ rqstp->rq_release_snd_buf(rqstp);
+
if (snd_buf->page_len == 0) {
rqstp->rq_enc_pages_num = 0;
return 0;
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 036bbf2b44c161..b5291ea54a3d80 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1105,7 +1105,7 @@ static int svcauth_gss_legacy_init(struct svc_rqst *rqstp,
struct kvec *resv = &rqstp->rq_res.head[0];
struct rsi *rsip, rsikey;
int ret;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
memset(&rsikey, 0, sizeof(rsikey));
ret = gss_read_verf(gc, argv, authp,
@@ -1216,7 +1216,7 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
uint64_t handle;
int status;
int ret;
- struct net *net = rqstp->rq_xprt->xpt_net;
+ struct net *net = SVC_NET(rqstp);
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
memset(&ud, 0, sizeof(ud));
@@ -1406,7 +1406,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
__be32 *rpcstart;
__be32 *reject_stat = resv->iov_base + resv->iov_len;
int ret;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
dprintk("RPC: svcauth_gss: argv->iov_len = %zd\n",
argv->iov_len);
@@ -1694,7 +1694,7 @@ svcauth_gss_release(struct svc_rqst *rqstp)
struct rpc_gss_wire_cred *gc = &gsd->clcred;
struct xdr_buf *resbuf = &rqstp->rq_res;
int stat = -EINVAL;
- struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
+ struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
if (gc->gc_proc != RPC_GSS_PROC_DATA)
goto out;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 63fb5ee212cf8d..af17b00145e1c5 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -54,6 +54,11 @@ static void cache_init(struct cache_head *h, struct cache_detail *detail)
h->last_refresh = now;
}
+static void cache_fresh_locked(struct cache_head *head, time_t expiry,
+ struct cache_detail *detail);
+static void cache_fresh_unlocked(struct cache_head *head,
+ struct cache_detail *detail);
+
struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
struct cache_head *key, int hash)
{
@@ -95,6 +100,7 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
if (cache_is_expired(detail, tmp)) {
hlist_del_init(&tmp->cache_list);
detail->entries --;
+ cache_fresh_locked(tmp, 0, detail);
freeme = tmp;
break;
}
@@ -110,8 +116,10 @@ struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail,
cache_get(new);
write_unlock(&detail->hash_lock);
- if (freeme)
+ if (freeme) {
+ cache_fresh_unlocked(freeme, detail);
cache_put(freeme, detail);
+ }
return new;
}
EXPORT_SYMBOL_GPL(sunrpc_cache_lookup);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index cf5770d8f49af9..c89626b2afffb7 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -772,6 +772,12 @@ void rpcb_getport_async(struct rpc_task *task)
case RPCBVERS_3:
map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID];
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_ATOMIC);
+ if (!map->r_addr) {
+ status = -ENOMEM;
+ dprintk("RPC: %5u %s: no memory available\n",
+ task->tk_pid, __func__);
+ goto bailout_free_args;
+ }
map->r_owner = "";
break;
case RPCBVERS_2:
@@ -794,6 +800,8 @@ void rpcb_getport_async(struct rpc_task *task)
rpc_put_task(child);
return;
+bailout_free_args:
+ kfree(map);
bailout_release_client:
rpc_release_client(rpcb_clnt);
bailout_nofree:
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c5b0cb4f4056c4..41f6e964fe91fe 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1062,6 +1062,8 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
#endif
+extern void svc_tcp_prep_reply_hdr(struct svc_rqst *);
+
/*
* Common routine for processing the RPC request.
*/
@@ -1091,7 +1093,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
clear_bit(RQ_DROPME, &rqstp->rq_flags);
/* Setup reply header */
- rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
+ if (rqstp->rq_prot == IPPROTO_TCP)
+ svc_tcp_prep_reply_hdr(rqstp);
svc_putu32(resv, rqstp->rq_xid);
@@ -1138,7 +1141,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
case SVC_DENIED:
goto err_bad_auth;
case SVC_CLOSE:
- if (test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
+ if (rqstp->rq_xprt &&
+ test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
svc_close_xprt(rqstp->rq_xprt);
case SVC_DROP:
goto dropit;
@@ -1360,10 +1364,10 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
dprintk("svc: %s(%p)\n", __func__, req);
/* Build the svc_rqst used by the common processing routine */
- rqstp->rq_xprt = serv->sv_bc_xprt;
rqstp->rq_xid = req->rq_xid;
rqstp->rq_prot = req->rq_xprt->prot;
rqstp->rq_server = serv;
+ rqstp->rq_bc_net = req->rq_xprt->xprt_net;
rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index a6cbb2104667d2..2b8e80c721db1a 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -454,10 +454,11 @@ out:
*/
void svc_reserve(struct svc_rqst *rqstp, int space)
{
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+
space += rqstp->rq_res.head[0].iov_len;
- if (space < rqstp->rq_reserved) {
- struct svc_xprt *xprt = rqstp->rq_xprt;
+ if (xprt && space < rqstp->rq_reserved) {
atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved);
rqstp->rq_reserved = space;
@@ -945,7 +946,7 @@ static void call_xpt_users(struct svc_xprt *xprt)
spin_lock(&xprt->xpt_lock);
while (!list_empty(&xprt->xpt_users)) {
u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list);
- list_del(&u->list);
+ list_del_init(&u->list);
u->callback(u);
}
spin_unlock(&xprt->xpt_lock);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index bfe0a06530f6aa..8b20b27c119038 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -613,7 +613,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
/* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
- svsk->sk_sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
len = skb->len - sizeof(struct udphdr);
@@ -1239,7 +1239,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
/*
* Setup response header. TCP has a 4B record length field.
*/
-static void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
+void svc_tcp_prep_reply_hdr(struct svc_rqst *rqstp)
{
struct kvec *resv = &rqstp->rq_res.head[0];
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 4439ac4c1b53fc..ed9bbd383f7d3e 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -512,7 +512,7 @@ EXPORT_SYMBOL_GPL(xdr_commit_encode);
static __be32 *xdr_get_next_encode_buffer(struct xdr_stream *xdr,
size_t nbytes)
{
- static __be32 *p;
+ __be32 *p;
int space_left;
int frag1bytes, frag2bytes;
@@ -639,11 +639,10 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
WARN_ON_ONCE(xdr->iov);
return;
}
- if (fraglen) {
+ if (fraglen)
xdr->end = head->iov_base + head->iov_len;
- xdr->page_ptr--;
- }
/* (otherwise assume xdr->end is already set) */
+ xdr->page_ptr--;
head->iov_len = len;
buf->len = len;
xdr->p = head->iov_base + head->iov_len;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 2e98f4a243e57d..112c191b833630 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -758,8 +758,15 @@ void xprt_connect(struct rpc_task *task)
return;
if (xprt_test_and_set_connecting(xprt))
return;
- xprt->stat.connect_start = jiffies;
- xprt->ops->connect(xprt, task);
+ /* Race breaker */
+ if (!xprt_connected(xprt)) {
+ xprt->stat.connect_start = jiffies;
+ xprt->ops->connect(xprt, task);
+ } else {
+ xprt_clear_connecting(xprt);
+ task->tk_status = 0;
+ rpc_wake_up_queued_task(&xprt->pending, task);
+ }
}
xprt_release_write(xprt, task);
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index f86c6555a539ef..e9653c42cdd16a 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -87,6 +87,11 @@ static int tipc_skb_tailroom(struct sk_buff *skb)
return limit;
}
+static inline int TLV_GET_DATA_LEN(struct tlv_desc *tlv)
+{
+ return TLV_GET_LEN(tlv) - TLV_SPACE(0);
+}
+
static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
{
struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
@@ -166,6 +171,11 @@ static struct sk_buff *tipc_get_err_tlv(char *str)
return buf;
}
+static inline bool string_is_valid(char *s, int len)
+{
+ return memchr(s, '\0', len) ? true : false;
+}
+
static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
struct tipc_nl_compat_msg *msg,
struct sk_buff *arg)
@@ -364,6 +374,7 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_bearer_config *b;
+ int len;
b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
@@ -371,6 +382,10 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(b->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
return -EMSGSIZE;
@@ -396,6 +411,7 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *bearer;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -403,6 +419,10 @@ static int tipc_nl_compat_bearer_disable(struct tipc_nl_compat_cmd_doit *cmd,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
return -EMSGSIZE;
@@ -462,6 +482,7 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
+ int len;
nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
@@ -472,6 +493,11 @@ static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
NULL);
name = (char *)TLV_DATA(msg->req);
+
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
return 0;
@@ -605,6 +631,7 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *media;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -612,6 +639,10 @@ static int tipc_nl_compat_media_set(struct sk_buff *skb,
if (!media)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_MEDIA_NAME, lc->name))
return -EMSGSIZE;
@@ -632,6 +663,7 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
struct nlattr *prop;
struct nlattr *bearer;
struct tipc_link_config *lc;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
@@ -639,6 +671,10 @@ static int tipc_nl_compat_bearer_set(struct sk_buff *skb,
if (!bearer)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_MEDIA_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, lc->name))
return -EMSGSIZE;
@@ -687,9 +723,14 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
struct tipc_link_config *lc;
struct tipc_bearer *bearer;
struct tipc_media *media;
+ int len;
lc = (struct tipc_link_config *)TLV_DATA(msg->req);
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(lc->name, len))
+ return -EINVAL;
+
media = tipc_media_find(lc->name);
if (media) {
cmd->doit = &tipc_nl_media_set;
@@ -711,6 +752,7 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
{
char *name;
struct nlattr *link;
+ int len;
name = (char *)TLV_DATA(msg->req);
@@ -718,6 +760,10 @@ static int tipc_nl_compat_link_reset_stats(struct tipc_nl_compat_cmd_doit *cmd,
if (!link)
return -EMSGSIZE;
+ len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME);
+ if (!string_is_valid(name, len))
+ return -EINVAL;
+
if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
return -EMSGSIZE;
@@ -739,6 +785,8 @@ static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
};
ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
+ if (TLV_GET_DATA_LEN(msg->req) < sizeof(struct tipc_name_table_query))
+ return -EINVAL;
depth = ntohl(ntq->depth);
@@ -1117,7 +1165,7 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
}
len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (len && !TLV_OK(msg.req, len)) {
+ if (!len || !TLV_OK(msg.req, len)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index f9ff73a8d8154f..500c9e614a0631 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -337,7 +337,7 @@ int tipc_topsrv_start(struct net *net)
topsrv->tipc_conn_new = tipc_subscrb_connect_cb;
topsrv->tipc_conn_shutdown = tipc_subscrb_shutdown_cb;
- strncpy(topsrv->name, name, strlen(name) + 1);
+ strscpy(topsrv->name, name, sizeof(topsrv->name));
tn->topsrv = topsrv;
atomic_set(&tn->subscription_count, 0);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c246a859f0c9c5..e3064c89ef34b9 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -890,7 +890,7 @@ retry:
addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
spin_unlock(&unix_table_lock);
err = 0;
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
err = 0;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(list, sk);
out_unlock:
@@ -1331,15 +1331,29 @@ restart:
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
- /* copy address information from listening to new sock*/
- if (otheru->addr) {
- atomic_inc(&otheru->addr->refcnt);
- newu->addr = otheru->addr;
- }
+ /* copy address information from listening to new sock
+ *
+ * The contents of *(otheru->addr) and otheru->path
+ * are seen fully set up here, since we have found
+ * otheru in hash under unix_table_lock. Insertion
+ * into the hash chain we'd found it in had been done
+ * in an earlier critical area protected by unix_table_lock,
+ * the same one where we'd set *(otheru->addr) contents,
+ * as well as otheru->path and otheru->addr itself.
+ *
+ * Using smp_store_release() here to set newu->addr
+ * is enough to make those stores, as well as stores
+ * to newu->path visible to anyone who gets newu->addr
+ * by smp_load_acquire(). IOW, the same warranties
+ * as for unix_sock instances bound in unix_bind() or
+ * in unix_autobind().
+ */
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
+ atomic_inc(&otheru->addr->refcnt);
+ smp_store_release(&newu->addr, otheru->addr);
/* Set credentials */
copy_peercred(sk, other);
@@ -1452,7 +1466,7 @@ out:
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
- struct unix_sock *u;
+ struct unix_address *addr;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
@@ -1467,19 +1481,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_
sock_hold(sk);
}
- u = unix_sk(sk);
- unix_state_lock(sk);
- if (!u->addr) {
+ addr = smp_load_acquire(&unix_sk(sk)->addr);
+ if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
*uaddr_len = sizeof(short);
} else {
- struct unix_address *addr = u->addr;
-
*uaddr_len = addr->len;
memcpy(sunaddr, addr->name, *uaddr_len);
}
- unix_state_unlock(sk);
sock_put(sk);
out:
return err;
@@ -1531,7 +1541,6 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
unsigned char max_level = 0;
- int unix_sock_count = 0;
if (too_many_unix_fds(current))
return -ETOOMANYREFS;
@@ -1539,11 +1548,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
for (i = scm->fp->count - 1; i >= 0; i--) {
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
- if (sk) {
- unix_sock_count++;
+ if (sk)
max_level = max(max_level,
unix_sk(sk)->recursion_level);
- }
}
if (unlikely(max_level > MAX_RECURSION_LEVEL))
return -ETOOMANYREFS;
@@ -2096,11 +2103,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
- struct unix_sock *u = unix_sk(sk);
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
- if (u->addr) {
- msg->msg_namelen = u->addr->len;
- memcpy(msg->msg_name, u->addr->name, u->addr->len);
+ if (addr) {
+ msg->msg_namelen = addr->len;
+ memcpy(msg->msg_name, addr->name, addr->len);
}
}
@@ -2823,7 +2830,7 @@ static int unix_seq_show(struct seq_file *seq, void *v)
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) {
+ if (u->addr) { // under unix_table_lock here
int i, len;
seq_putc(seq, ' ');
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e83462e5..3183d9b8ab3323 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_address *addr = unix_sk(sk)->addr;
+ /* might or might not have unix_table_lock */
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9e335db7baa777..81abe3a96dc56e 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -440,14 +440,14 @@ static int vsock_send_shutdown(struct sock *sk, int mode)
return transport->shutdown(vsock_sk(sk), mode);
}
-void vsock_pending_work(struct work_struct *work)
+static void vsock_pending_work(struct work_struct *work)
{
struct sock *sk;
struct sock *listener;
struct vsock_sock *vsk;
bool cleanup;
- vsk = container_of(work, struct vsock_sock, dwork.work);
+ vsk = container_of(work, struct vsock_sock, pending_work.work);
sk = sk_vsock(vsk);
listener = vsk->listener;
cleanup = true;
@@ -487,7 +487,6 @@ out:
sock_put(sk);
sock_put(listener);
}
-EXPORT_SYMBOL_GPL(vsock_pending_work);
/**** SOCKET OPERATIONS ****/
@@ -586,6 +585,8 @@ static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
return retval;
}
+static void vsock_connect_timeout(struct work_struct *work);
+
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
@@ -628,6 +629,8 @@ struct sock *__vsock_create(struct net *net,
vsk->sent_request = false;
vsk->ignore_connecting_rst = false;
vsk->peer_shutdown = 0;
+ INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
+ INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
psk = parent ? vsock_sk(parent) : NULL;
if (parent) {
@@ -1098,7 +1101,7 @@ static void vsock_connect_timeout(struct work_struct *work)
struct sock *sk;
struct vsock_sock *vsk;
- vsk = container_of(work, struct vsock_sock, dwork.work);
+ vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
lock_sock(sk);
@@ -1199,9 +1202,7 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
* timeout fires.
*/
sock_hold(sk);
- INIT_DELAYED_WORK(&vsk->dwork,
- vsock_connect_timeout);
- schedule_delayed_work(&vsk->dwork, timeout);
+ schedule_delayed_work(&vsk->connect_work, timeout);
/* Skip ahead to preserve error code set above. */
goto out_wait;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 5f8c99eb104c53..b2abdc07ac799f 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -273,6 +273,31 @@ vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
}
static int
+vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
+ struct sockaddr_vm *dst,
+ enum vmci_transport_packet_type type,
+ u64 size,
+ u64 mode,
+ struct vmci_transport_waiting_info *wait,
+ u16 proto,
+ struct vmci_handle handle)
+{
+ struct vmci_transport_packet *pkt;
+ int err;
+
+ pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
+ if (!pkt)
+ return -ENOMEM;
+
+ err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
+ mode, wait, proto, handle,
+ true);
+ kfree(pkt);
+
+ return err;
+}
+
+static int
vmci_transport_send_control_pkt(struct sock *sk,
enum vmci_transport_packet_type type,
u64 size,
@@ -281,9 +306,7 @@ vmci_transport_send_control_pkt(struct sock *sk,
u16 proto,
struct vmci_handle handle)
{
- struct vmci_transport_packet *pkt;
struct vsock_sock *vsk;
- int err;
vsk = vsock_sk(sk);
@@ -293,17 +316,10 @@ vmci_transport_send_control_pkt(struct sock *sk,
if (!vsock_addr_bound(&vsk->remote_addr))
return -EINVAL;
- pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
- return -ENOMEM;
-
- err = __vmci_transport_send_control_pkt(pkt, &vsk->local_addr,
- &vsk->remote_addr, type, size,
- mode, wait, proto, handle,
- true);
- kfree(pkt);
-
- return err;
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
+ &vsk->remote_addr,
+ type, size, mode,
+ wait, proto, handle);
}
static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
@@ -321,12 +337,29 @@ static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
static int vmci_transport_send_reset(struct sock *sk,
struct vmci_transport_packet *pkt)
{
+ struct sockaddr_vm *dst_ptr;
+ struct sockaddr_vm dst;
+ struct vsock_sock *vsk;
+
if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
return 0;
- return vmci_transport_send_control_pkt(sk,
- VMCI_TRANSPORT_PACKET_TYPE_RST,
- 0, 0, NULL, VSOCK_PROTO_INVALID,
- VMCI_INVALID_HANDLE);
+
+ vsk = vsock_sk(sk);
+
+ if (!vsock_addr_bound(&vsk->local_addr))
+ return -EINVAL;
+
+ if (vsock_addr_bound(&vsk->remote_addr)) {
+ dst_ptr = &vsk->remote_addr;
+ } else {
+ vsock_addr_init(&dst, pkt->dg.src.context,
+ pkt->src_port);
+ dst_ptr = &dst;
+ }
+ return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
+ VMCI_TRANSPORT_PACKET_TYPE_RST,
+ 0, 0, NULL, VSOCK_PROTO_INVALID,
+ VMCI_INVALID_HANDLE);
}
static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
@@ -1099,8 +1132,7 @@ static int vmci_transport_recv_listen(struct sock *sk,
vpending->listener = sk;
sock_hold(sk);
sock_hold(pending);
- INIT_DELAYED_WORK(&vpending->dwork, vsock_pending_work);
- schedule_delayed_work(&vpending->dwork, HZ);
+ schedule_delayed_work(&vpending->pending_work, HZ);
out:
return err;
@@ -1624,6 +1656,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
static void vmci_transport_destruct(struct vsock_sock *vsk)
{
+ /* transport can be NULL if we hit a failure at init() time */
+ if (!vmci_trans(vsk))
+ return;
+
/* Ensure that the detach callback doesn't use the sk/vsk
* we are about to destruct.
*/
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a08567db87c35c..28a52bd8436932 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3725,6 +3725,7 @@ static int parse_station_flags(struct genl_info *info,
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
BIT(NL80211_STA_FLAG_MFP) |
BIT(NL80211_STA_FLAG_AUTHORIZED);
+ break;
default:
return -EINVAL;
}
@@ -10960,6 +10961,7 @@ static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
if (!info->attrs[NL80211_ATTR_MDID] ||
+ !info->attrs[NL80211_ATTR_IE] ||
!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -12786,7 +12788,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -12938,7 +12940,7 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -12978,7 +12980,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13016,7 +13018,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
if (!msg)
return;
@@ -13093,7 +13095,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
trace_cfg80211_notify_new_peer_candidate(dev, addr);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + ie_len, gfp);
if (!msg)
return;
@@ -13464,7 +13466,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return -ENOMEM;
@@ -13508,7 +13510,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -14317,7 +14319,7 @@ void cfg80211_ft_event(struct net_device *netdev,
if (!ft_event->target_ap)
return;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
if (!msg)
return;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 0075cb680b6f3d..9bbdf1881c3dfb 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1219,7 +1219,7 @@ static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
* definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
* however it is safe for now to assume that a frequency rule should not be
* part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
* 60 GHz band.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
@@ -1234,7 +1234,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
* with the Channel starting frequency above 45 GHz.
*/
u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
@@ -2689,11 +2689,12 @@ static void reg_process_hint(struct regulatory_request *reg_request)
{
struct wiphy *wiphy = NULL;
enum reg_request_treatment treatment;
+ enum nl80211_reg_initiator initiator = reg_request->initiator;
if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- switch (reg_request->initiator) {
+ switch (initiator) {
case NL80211_REGDOM_SET_BY_CORE:
treatment = reg_process_hint_core(reg_request);
break;
@@ -2711,7 +2712,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
treatment = reg_process_hint_country_ie(wiphy, reg_request);
break;
default:
- WARN(1, "invalid initiator %d\n", reg_request->initiator);
+ WARN(1, "invalid initiator %d\n", initiator);
goto out_free;
}
@@ -2726,7 +2727,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
*/
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
- wiphy_update_regulatory(wiphy, reg_request->initiator);
+ wiphy_update_regulatory(wiphy, initiator);
wiphy_all_share_dfs_chan_state(wiphy);
reg_check_channels();
}
@@ -2903,6 +2904,7 @@ static int regulatory_hint_core(const char *alpha2)
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
request->initiator = NL80211_REGDOM_SET_BY_CORE;
+ request->wiphy_idx = WIPHY_IDX_INVALID;
queue_regulatory_request(request);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 9428bead5b9e42..178c5aaceb5f6a 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1124,8 +1124,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
u32 bitrate;
int idx;
- if (WARN_ON_ONCE(rate->mcs > 9))
- return 0;
+ if (rate->mcs > 9)
+ goto warn;
switch (rate->bw) {
case RATE_INFO_BW_160:
@@ -1140,8 +1140,7 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
case RATE_INFO_BW_5:
case RATE_INFO_BW_10:
default:
- WARN_ON(1);
- /* fall through */
+ goto warn;
case RATE_INFO_BW_20:
idx = 0;
}
@@ -1154,6 +1153,10 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
/* do NOT round down here */
return (bitrate + 50000) / 100000;
+ warn:
+ WARN_ONCE(1, "invalid rate bw=%d, mcs=%d, nss=%d\n",
+ rate->bw, rate->mcs, rate->nss);
+ return 0;
}
u32 cfg80211_calculate_bitrate(struct rate_info *rate)
@@ -1356,7 +1359,7 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
u8 *op_class)
{
u8 vht_opclass;
- u16 freq = chandef->center_freq1;
+ u32 freq = chandef->center_freq1;
if (freq >= 2412 && freq <= 2472) {
if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index c6ab4da4b8e214..5dca42dbc73758 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
unsigned int lci = 1;
struct sock *sk;
- read_lock_bh(&x25_list_lock);
-
- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
+ while ((sk = x25_find_socket(lci, nb)) != NULL) {
sock_put(sk);
if (++lci == 4096) {
lci = 0;
break;
}
+ cond_resched();
}
- read_unlock_bh(&x25_list_lock);
return lci;
}
@@ -680,8 +678,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- if (!sock_flag(sk, SOCK_ZAPPED) ||
- addr_len != sizeof(struct sockaddr_x25) ||
+ if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
rc = -EINVAL;
goto out;
@@ -696,9 +693,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
}
lock_sock(sk);
- x25_sk(sk)->source_addr = addr->sx25_addr;
- x25_insert_socket(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ x25_sk(sk)->source_addr = addr->sx25_addr;
+ x25_insert_socket(sk);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ } else {
+ rc = -EINVAL;
+ }
release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
@@ -814,8 +815,13 @@ static int x25_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTED;
rc = 0;
out_put_neigh:
- if (rc)
+ if (rc) {
+ read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ x25->state = X25_STATE_0;
+ }
out_put_route:
x25_route_put(rt);
out:
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 922b9daaa819d7..633b12c73c4990 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -626,6 +626,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
/* re-insert all policies by order of creation */
list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
+ if (policy->walk.dead ||
+ xfrm_policy_id2dir(policy->index) >= XFRM_POLICY_MAX) {
+ /* skip socket policies */
+ continue;
+ }
newpos = NULL;
chain = policy_hash_bysel(net, &policy->selector,
policy->family,
@@ -1841,7 +1846,10 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
/* Try to instantiate a bundle */
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
if (err <= 0) {
- if (err != 0 && err != -EAGAIN)
+ if (err == 0)
+ return NULL;
+
+ if (err != -EAGAIN)
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
return ERR_PTR(err);
}
@@ -2322,6 +2330,9 @@ struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
return make_blackhole(net, dst_orig->ops->family, dst_orig);
+ if (IS_ERR(dst))
+ dst_release(dst_orig);
+
return dst;
}
EXPORT_SYMBOL(xfrm_lookup_route);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 0ae127841883d4..be480ae049b340 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -615,7 +615,7 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
{
spin_lock_bh(&net->xfrm.xfrm_state_lock);
si->sadcnt = net->xfrm.state_num;
- si->sadhcnt = net->xfrm.state_hmask;
+ si->sadhcnt = net->xfrm.state_hmask + 1;
si->sadhmcnt = xfrm_state_hashmax;
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
@@ -1840,11 +1840,6 @@ int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
-#ifdef CONFIG_COMPAT
- if (is_compat_task())
- return -EOPNOTSUPP;
-#endif
-
if (!optval && !optlen) {
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05a6e3d9c258c0..235de9dd9ab3f9 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,9 +54,13 @@ int __net_init xfrm_sysctl_init(struct net *net)
table[2].data = &net->xfrm.sysctl_larval_drop;
table[3].data = &net->xfrm.sysctl_acq_expires;
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
+ /* Only export xfrm_acq_expires to unprivileged users. This is required
+ * By Android Ipsec stack as per CTS.
+ */
+ if (net->user_ns != &init_user_ns) {
+ table[0] = table[3];
+ table[1].procname = NULL;
+ }
net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
if (!net->xfrm.sysctl_hdr)
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 93878e7f5e7a51..f6bb1f30db775c 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -151,10 +151,16 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
err = -EINVAL;
switch (p->family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ goto out;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ goto out;
+
break;
#else
err = -EAFNOSUPPORT;
@@ -988,10 +994,12 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
{
struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
- if (nlsk)
- return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
- else
- return -1;
+ if (!nlsk) {
+ kfree_skb(skb);
+ return -EPIPE;
+ }
+
+ return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
}
static inline size_t xfrm_spdinfo_msgsize(void)
@@ -1318,10 +1326,16 @@ static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
switch (p->sel.family) {
case AF_INET:
+ if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
+ return -EINVAL;
+
break;
case AF_INET6:
#if IS_ENABLED(CONFIG_IPV6)
+ if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
+ return -EINVAL;
+
break;
#else
return -EAFNOSUPPORT;
@@ -1398,8 +1412,16 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
if (!ut[i].family)
ut[i].family = family;
- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
- (ut[i].family != prev_family))
+ switch (ut[i].mode) {
+ case XFRM_MODE_TUNNEL:
+ case XFRM_MODE_BEET:
+ break;
+ default:
+ if (ut[i].family != prev_family)
+ return -EINVAL;
+ break;
+ }
+ if (ut[i].mode >= XFRM_MODE_MAX)
return -EINVAL;
prev_family = ut[i].family;
@@ -1632,9 +1654,11 @@ static inline size_t userpolicy_type_attrsize(void)
#ifdef CONFIG_XFRM_SUB_POLICY
static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
{
- struct xfrm_userpolicy_type upt = {
- .type = type,
- };
+ struct xfrm_userpolicy_type upt;
+
+ /* Sadly there are two holes in struct xfrm_userpolicy_type */
+ memset(&upt, 0, sizeof(upt));
+ upt.type = type;
return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
}
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 1d163d0ec1d08d..52aa801354264e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -7,9 +7,7 @@ quote := "
squote := '
empty :=
space := $(empty) $(empty)
-space_escape := _-_SPACE_-_
-right_paren := )
-left_paren := (
+pound := \#
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -82,66 +80,6 @@ cc-cross-prefix = \
echo $(c); \
fi)))
-# Tools for caching Makefile variables that are "expensive" to compute.
-#
-# Here we want to help deal with variables that take a long time to compute
-# by making it easy to store these variables in a cache.
-#
-# The canonical example here is testing for compiler flags. On a simple system
-# each call to the compiler takes 10 ms, but on a system with a compiler that's
-# called through various wrappers it can take upwards of 100 ms. If we have
-# 100 calls to the compiler this can take 1 second (on a simple system) or 10
-# seconds (on a complicated system).
-#
-# The "cache" will be in Makefile syntax and can be directly included.
-# Any time we try to reference a variable that's not in the cache we'll
-# calculate it and store it in the cache for next time.
-
-# Include values from last time
-make-cache := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/,$(if $(obj),$(obj)/)).cache.mk
-ifeq ($(wildcard $(dir $(make-cache))),)
-$(shell mkdir -p '$(dir $(make-cache))')
-endif
-$(make-cache): ;
--include $(make-cache)
-
-# If cache exceeds 1000 lines, shrink it down to 500.
-ifneq ($(word 1000,$(filter __cached_%, $(.VARIABLES))),)
-$(shell tail -n 500 $(make-cache) > $(make-cache).tmp; \
- mv $(make-cache).tmp $(make-cache))
-endif
-
-# Usage: $(call __sanitize-opt,Hello=Hola$(comma)Goodbye Adios)
-#
-# Convert all '$', ')', '(', '\', '=', ' ', ',', ':' to '_'
-__sanitize-opt = $(subst $$,_,$(subst $(right_paren),_,$(subst $(left_paren),_,$(subst \,_,$(subst =,_,$(subst $(space),_,$(subst $(comma),_,$(subst :,_,$(1)))))))))
-
-# Usage: $(call shell-cached,shell_command)
-# Example: $(call shell-cached,md5sum /usr/bin/gcc)
-#
-# If we've already seen a call to this exact shell command (even in a
-# previous invocation of make!) we'll return the value. If not, we'll
-# compute it and store the result for future runs.
-#
-# This is a bit of voodoo, but basic explanation is that if the variable
-# was undefined then we'll evaluate the shell command and store the result
-# into the variable. We'll then store that value in the cache and finally
-# output the value.
-#
-# NOTE: The $$(2) here isn't actually a parameter to __run-and-store. We
-# happen to know that the caller will have their shell command in $(2) so the
-# result of "call"ing this will produce a reference to that $(2). The reason
-# for this strangeness is to avoid an extra level of eval (and escaping) of
-# $(2).
-define __run-and-store
-ifeq ($(origin $(1)),undefined)
- $$(eval $(1) := $$(shell $$(2)))
- $$(shell echo '$(1) := $$($(1))' >> $(make-cache))
-endif
-endef
-__shell-cached = $(eval $(call __run-and-store,$(1)))$($(1))
-shell-cached = $(call __shell-cached,__cached_$(call __sanitize-opt,$(1)),$(1))
-
# output directory for tests below
TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
@@ -149,36 +87,30 @@ TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
# Exit code chooses option. "$$TMP" is can be used as temporary file and
# is automatically cleaned up.
-__try-run = set -e; \
+try-run = $(shell set -e; \
TMP="$(TMPOUT).$$$$.tmp"; \
TMPO="$(TMPOUT).$$$$.o"; \
if ($(1)) >/dev/null 2>&1; \
then echo "$(2)"; \
else echo "$(3)"; \
fi; \
- rm -f "$$TMP" "$$TMPO"
-
-try-run = $(shell $(__try-run))
-
-# try-run-cached
-# This works like try-run, but the result is cached.
-try-run-cached = $(call shell-cached,$(__try-run))
+ rm -f "$$TMP" "$$TMPO")
# as-option
# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)
-as-option = $(call try-run-cached,\
+as-option = $(call try-run,\
$(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))
# as-instr
# Usage: cflags-y += $(call as-instr,instr,option1,option2)
-as-instr = $(call try-run-cached,\
+as-instr = $(call try-run,\
printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
# __cc-option
# Usage: MY_CFLAGS += $(call __cc-option,$(CC),$(MY_CFLAGS),-march=winchip-c6,-march=i586)
-__cc-option = $(call try-run-cached,\
+__cc-option = $(call try-run,\
$(1) -Werror $(2) $(3) -c -x c /dev/null -o "$$TMP",$(3),$(4))
# Do not attempt to build with gcc plugins during cc-option tests.
@@ -198,7 +130,7 @@ hostcc-option = $(call __cc-option, $(HOSTCC),\
# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
-cc-option-yn = $(call try-run-cached,\
+cc-option-yn = $(call try-run,\
$(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)
# cc-option-align
@@ -208,18 +140,18 @@ cc-option-align = $(subst -functions=0,,\
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
-cc-disable-warning = $(call try-run-cached,\
+cc-disable-warning = $(call try-run,\
$(CC) -Werror $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
# cc-name
# Expands to either gcc or clang
-cc-name = $(call shell-cached,$(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
+cc-name = $(shell $(CC) -v 2>&1 | grep -q "clang version" && echo clang || echo gcc)
# cc-version
-cc-version = $(call shell-cached,$(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
+cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
# cc-fullversion
-cc-fullversion = $(call shell-cached,$(CONFIG_SHELL) \
+cc-fullversion = $(shell $(CONFIG_SHELL) \
$(srctree)/scripts/gcc-version.sh -p $(CC))
# cc-ifversion
@@ -228,22 +160,22 @@ cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
# cc-ldoption
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
-cc-ldoption = $(call try-run-cached,\
+cc-ldoption = $(call try-run,\
$(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
# ld-option
# Usage: LDFLAGS += $(call ld-option, -X)
-ld-option = $(call try-run-cached,\
+ld-option = $(call try-run,\
$(CC) -x c /dev/null -c -o "$$TMPO" ; $(LD) $(1) "$$TMPO" -o "$$TMP",$(1),$(2))
# ar-option
# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
# Important: no spaces around options
-ar-option = $(call try-run-cached, $(AR) rc$(1) "$$TMP",$(1),$(2))
+ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
# ld-version
# Note this is mainly for HJ Lu's 3 number binutil versions
-ld-version = $(call shell-cached,$(LD) --version | $(srctree)/scripts/ld-version.sh)
+ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
# ld-ifversion
# Usage: $(call ld-ifversion, -ge, 22252, y)
@@ -319,11 +251,11 @@ endif
# Replace >$< with >$$< to preserve $ when reloading the .cmd file
# (needed for make)
-# Replace >#< with >\#< to avoid starting a comment in the .cmd file
+# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
# (needed for make)
# Replace >'< with >'\''< to be able to enclose the whole string in '...'
# (needed for the shell)
-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
+make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
# Find any prerequisites that is newer than target or that does not exist.
# PHONY targets skipped in both cases.
@@ -441,3 +373,6 @@ endif
endef
#
###############################################################################
+
+# delete partially updated (i.e. corrupted) files on error
+.DELETE_ON_ERROR:
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index abe5f47b1ab0a4..93e23a73b232f2 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -10,6 +10,8 @@
# are not supported by all versions of the compiler
# ==========================================================================
+KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
+
ifeq ("$(origin W)", "command line")
export KBUILD_ENABLE_EXTRA_GCC_CHECKS := $(W)
endif
@@ -25,6 +27,7 @@ warning-1 += -Wold-style-definition
warning-1 += $(call cc-option, -Wmissing-include-dirs)
warning-1 += $(call cc-option, -Wunused-but-set-variable)
warning-1 += $(call cc-option, -Wunused-const-variable)
+warning-1 += $(call cc-option, -Wpacked-not-aligned)
warning-1 += $(call cc-disable-warning, missing-field-initializers)
warning-2 := -Waggregate-return
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index dd8397894d5c77..12a6940741fe5b 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -46,8 +46,8 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
if ($arch eq 'aarch64') {
- #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp,#-80]!
- $re = qr/^.*stp.*sp,\#-([0-9]{1,8})\]\!/o;
+ #ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
+ $re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
} elsif ($arch eq 'arm') {
#c0008ffc: e24dd064 sub sp, sp, #100 ; 0x64
$re = qr/.*sub.*sp, sp, #(([0-9]{2}|[3-9])[0-9]{2})/o;
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 00d6d53c2681da..ffc46c7c3afbb2 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -64,7 +64,7 @@ parse_symbol() {
fi
# Strip out the base of the path
- code=${code//$basepath/""}
+ code=${code//^$basepath/""}
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
index 122599b1c13b7d..baedaef53ca055 100755
--- a/scripts/depmod.sh
+++ b/scripts/depmod.sh
@@ -10,7 +10,13 @@ DEPMOD=$1
KERNELRELEASE=$2
SYMBOL_PREFIX=$3
-if ! test -r System.map -a -x "$DEPMOD"; then
+if ! test -r System.map ; then
+ exit 0
+fi
+
+if [ -z $(command -v $DEPMOD) ]; then
+ echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
+ echo "This is probably in the kmod package." >&2
exit 0
fi
diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py
index 862a4ae24d4996..fc2265c87d9670 100644
--- a/scripts/gdb/linux/tasks.py
+++ b/scripts/gdb/linux/tasks.py
@@ -96,6 +96,8 @@ def get_thread_info(task):
thread_info_addr = task.address + ia64_task_size
thread_info = thread_info_addr.cast(thread_info_ptr_type)
else:
+ if task.type.fields()[0].type == thread_info_type.get_type():
+ return task['thread_info']
thread_info = task['stack'].cast(thread_info_ptr_type)
return thread_info.dereference()
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index 6c204318bc942d..58887eaea708c4 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -19,7 +19,6 @@
static void conf(struct menu *menu);
static void check_conf(struct menu *menu);
-static void xfgets(char *str, int size, FILE *in);
enum input_mode {
oldaskconfig,
@@ -38,7 +37,6 @@ enum input_mode {
static int indent = 1;
static int tty_stdio;
-static int valid_stdin = 1;
static int sync_kconfig;
static int conf_cnt;
static char line[128];
@@ -71,14 +69,11 @@ static void strip(char *str)
*p-- = 0;
}
-static void check_stdin(void)
+/* Helper function to facilitate fgets() by Jean Sacren. */
+static void xfgets(char *str, int size, FILE *in)
{
- if (!valid_stdin) {
- printf(_("aborted!\n\n"));
- printf(_("Console input/output is redirected. "));
- printf(_("Run 'make oldconfig' to update configuration.\n\n"));
- exit(1);
- }
+ if (!fgets(str, size, in))
+ fprintf(stderr, "\nError in reading or end of file.\n");
}
static int conf_askvalue(struct symbol *sym, const char *def)
@@ -105,7 +100,6 @@ static int conf_askvalue(struct symbol *sym, const char *def)
printf("%s\n", def);
return 0;
}
- check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
@@ -307,7 +301,6 @@ static int conf_choice(struct menu *menu)
printf("%d\n", cnt);
break;
}
- check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
@@ -641,7 +634,6 @@ int main(int ac, char **av)
return 1;
}
}
- valid_stdin = tty_stdio;
}
switch (input_mode) {
@@ -711,12 +703,3 @@ int main(int ac, char **av)
}
return 0;
}
-
-/*
- * Helper function to facilitate fgets() by Jean Sacren.
- */
-void xfgets(char *str, int size, FILE *in)
-{
- if (fgets(str, size, in) == NULL)
- fprintf(stderr, "\nError in reading or end of file.\n");
-}
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index dd243d2abd875b..138d7f100f7e8c 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -743,7 +743,7 @@ int conf_write(const char *name)
struct menu *menu;
const char *basename;
const char *str;
- char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
+ char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
char *env;
dirname[0] = 0;
diff --git a/scripts/kconfig/zconf.l b/scripts/kconfig/zconf.l
index c410d257da0602..0c7800112ff529 100644
--- a/scripts/kconfig/zconf.l
+++ b/scripts/kconfig/zconf.l
@@ -71,7 +71,7 @@ static void warn_ignored_character(char chr)
{
fprintf(stderr,
"%s:%d:warning: ignoring unsupported character '%c'\n",
- zconf_curname(), zconf_lineno(), chr);
+ current_file->name, yylineno, chr);
}
%}
@@ -191,6 +191,8 @@ n [A-Za-z0-9_-]
}
<<EOF>> {
BEGIN(INITIAL);
+ yylval.string = text;
+ return T_WORD_QUOTE;
}
}
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index bd5151915e5a11..81b1c02a76fada 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -649,7 +649,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
break;
if (symname[0] == '.') {
- char *munged = strdup(symname);
+ char *munged = NOFAIL(strdup(symname));
munged[0] = '_';
munged[1] = toupper(munged[1]);
symname = munged;
@@ -1197,6 +1197,30 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
return 1;
}
+static inline int is_arm_mapping_symbol(const char *str)
+{
+ return str[0] == '$' && strchr("axtd", str[1])
+ && (str[2] == '\0' || str[2] == '.');
+}
+
+/*
+ * If there's no name there, ignore it; likewise, ignore it if it's
+ * one of the magic symbols emitted used by current ARM tools.
+ *
+ * Otherwise if find_symbols_between() returns those symbols, they'll
+ * fail the whitelist tests and cause lots of false alarms ... fixable
+ * only by merging __exit and __init sections into __text, bloating
+ * the kernel (which is especially evil on embedded platforms).
+ */
+static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+{
+ const char *name = elf->strtab + sym->st_name;
+
+ if (!name || !strlen(name))
+ return 0;
+ return !is_arm_mapping_symbol(name);
+}
+
/**
* Find symbol based on relocation record info.
* In some cases the symbol supplied is a valid symbol so
@@ -1222,6 +1246,8 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
continue;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
+ if (!is_valid_name(elf, sym))
+ continue;
if (sym->st_value == addr)
return sym;
/* Find a symbol nearby - addr are maybe negative */
@@ -1240,30 +1266,6 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
return NULL;
}
-static inline int is_arm_mapping_symbol(const char *str)
-{
- return str[0] == '$' && strchr("axtd", str[1])
- && (str[2] == '\0' || str[2] == '.');
-}
-
-/*
- * If there's no name there, ignore it; likewise, ignore it if it's
- * one of the magic symbols emitted used by current ARM tools.
- *
- * Otherwise if find_symbols_between() returns those symbols, they'll
- * fail the whitelist tests and cause lots of false alarms ... fixable
- * only by merging __exit and __init sections into __text, bloating
- * the kernel (which is especially evil on embedded platforms).
- */
-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
-{
- const char *name = elf->strtab + sym->st_name;
-
- if (!name || !strlen(name))
- return 0;
- return !is_arm_mapping_symbol(name);
-}
-
/*
* Find symbols before or equal addr and after addr - in the section sec.
* If we find two symbols with equal offset prefer one with a valid name.
@@ -1311,7 +1313,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
static char *sec2annotation(const char *s)
{
if (match(s, init_exit_sections)) {
- char *p = malloc(20);
+ char *p = NOFAIL(malloc(20));
char *r = p;
*p++ = '_';
@@ -1331,7 +1333,7 @@ static char *sec2annotation(const char *s)
strcat(p, " ");
return r;
} else {
- return strdup("");
+ return NOFAIL(strdup(""));
}
}
@@ -2032,7 +2034,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
{
if (buf->size - buf->pos < len) {
buf->size += len + SZ;
- buf->p = realloc(buf->p, buf->size);
+ buf->p = NOFAIL(realloc(buf->p, buf->size));
}
strncpy(buf->p + buf->pos, s, len);
buf->pos += len;
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 9fc7d52dbe7553..d0d0c480fc667d 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -149,7 +149,7 @@ fi
if test -e include/config/auto.conf; then
. include/config/auto.conf
else
- echo "Error: kernelrelease not valid - run 'make prepare' to update it"
+ echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
exit 1
fi
diff --git a/scripts/unifdef.c b/scripts/unifdef.c
index 7493c0ee51cc93..db00e3e30a59d7 100644
--- a/scripts/unifdef.c
+++ b/scripts/unifdef.c
@@ -395,7 +395,7 @@ usage(void)
* When we have processed a group that starts off with a known-false
* #if/#elif sequence (which has therefore been deleted) followed by a
* #elif that we don't understand and therefore must keep, we edit the
- * latter into a #if to keep the nesting correct. We use strncpy() to
+ * latter into a #if to keep the nesting correct. We use memcpy() to
* overwrite the 4 byte token "elif" with "if " without a '\0' byte.
*
* When we find a true #elif in a group, the following block will
@@ -450,7 +450,7 @@ static void Idrop (void) { Fdrop(); ignoreon(); }
static void Itrue (void) { Ftrue(); ignoreon(); }
static void Ifalse(void) { Ffalse(); ignoreon(); }
/* modify this line */
-static void Mpass (void) { strncpy(keyword, "if ", 4); Pelif(); }
+static void Mpass (void) { memcpy(keyword, "if ", 4); Pelif(); }
static void Mtrue (void) { keywordedit("else"); state(IS_TRUE_MIDDLE); }
static void Melif (void) { keywordedit("endif"); state(IS_FALSE_TRAILER); }
static void Melse (void) { keywordedit("endif"); state(IS_FALSE_ELSE); }
diff --git a/security/Kconfig b/security/Kconfig
index 405d098f1a6479..aba5f4296118c0 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -137,6 +137,87 @@ config LSM_MMAP_MIN_ADDR
this low address space will need the permission specific to the
systems running LSM.
+config HAVE_HARDENED_USERCOPY_ALLOCATOR
+ bool
+ help
+ The heap allocator implements __check_heap_object() for
+ validating memory ranges against heap object sizes in
+ support of CONFIG_HARDENED_USERCOPY.
+
+config HAVE_ARCH_HARDENED_USERCOPY
+ bool
+ help
+ The architecture supports CONFIG_HARDENED_USERCOPY by
+ calling check_object_size() just before performing the
+ userspace copies in the low level implementation of
+ copy_to_user() and copy_from_user().
+
+config HARDENED_USERCOPY
+ bool "Harden memory copies between kernel and userspace"
+ depends on HAVE_ARCH_HARDENED_USERCOPY
+ select BUG
+ help
+ This option checks for obviously wrong memory regions when
+ copying memory to/from the kernel (via copy_to_user() and
+ copy_from_user() functions) by rejecting memory ranges that
+ are larger than the specified heap object, span multiple
+ separately allocates pages, are not on the process stack,
+ or are part of the kernel text. This kills entire classes
+ of heap overflow exploits and similar kernel memory exposures.
+
+config HARDENED_USERCOPY_PAGESPAN
+ bool "Refuse to copy allocations that span multiple pages"
+ depends on HARDENED_USERCOPY
+ depends on !COMPILE_TEST
+ help
+ When a multi-page allocation is done without __GFP_COMP,
+ hardened usercopy will reject attempts to copy it. There are,
+ however, several cases of this in the kernel that have not all
+ been removed. This config is intended to be used only while
+ trying to find such users.
+
+config FORTIFY_SOURCE
+ bool "Harden common str/mem functions against buffer overflows"
+ depends on ARCH_HAS_FORTIFY_SOURCE
+ help
+ Detect overflows of buffers in common string and memory functions
+ where the compiler can determine and validate the buffer sizes.
+
+config STATIC_USERMODEHELPER
+ bool "Force all usermode helper calls through a single binary"
+ help
+ By default, the kernel can call many different userspace
+ binary programs through the "usermode helper" kernel
+ interface. Some of these binaries are statically defined
+ either in the kernel code itself, or as a kernel configuration
+ option. However, some of these are dynamically created at
+ runtime, or can be modified after the kernel has started up.
+ To provide an additional layer of security, route all of these
+ calls through a single executable that can not have its name
+ changed.
+
+ Note, it is up to this single binary to then call the relevant
+ "real" usermode helper binary, based on the first argument
+ passed to it. If desired, this program can filter and pick
+ and choose what real programs are called.
+
+ If you wish for all usermode helper programs are to be
+ disabled, choose this option and then set
+ STATIC_USERMODEHELPER_PATH to an empty string.
+
+config STATIC_USERMODEHELPER_PATH
+ string "Path to the static usermode helper binary"
+ depends on STATIC_USERMODEHELPER
+ default "/sbin/usermode-helper"
+ help
+ The binary called by the kernel when any usermode helper
+ program is wish to be run. The "real" application's name will
+ be in the first argument passed to this program on the command
+ line.
+
+ If you wish for all usermode helper programs to be disabled,
+ specify an empty string here (i.e. "").
+
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
diff --git a/security/Makefile b/security/Makefile
index 2f9df6c043a5c0..5693a90d8a347f 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -17,13 +17,13 @@ obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o
obj-$(CONFIG_SECURITYFS) += inode.o
+obj-$(CONFIG_SECURITY_CHROMIUMOS) += chromiumos/
obj-$(CONFIG_SECURITY_SELINUX) += selinux/
obj-$(CONFIG_SECURITY_SMACK) += smack/
obj-$(CONFIG_AUDIT) += lsm_audit.o
obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
obj-$(CONFIG_SECURITY_YAMA) += yama/
-obj-$(CONFIG_SECURITY_CHROMIUMOS) += chromiumos/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
diff --git a/security/chromiumos/Kconfig b/security/chromiumos/Kconfig
index a4ffa9303ef4b1..caaa98e1125178 100644
--- a/security/chromiumos/Kconfig
+++ b/security/chromiumos/Kconfig
@@ -1,6 +1,8 @@
config SECURITY_CHROMIUMOS
bool "Chromium OS Security Module"
depends on SECURITY
+ depends on BLOCK
+ depends on X86_64 || ARM64
help
The purpose of the Chromium OS security module is to reduce attacking
surface by preventing access to general purpose access modes not
@@ -38,6 +40,7 @@ config SECURITY_CHROMIUMOS_DEVICE_JAIL
config ALT_SYSCALL_CHROMIUMOS
bool "Chromium OS Alt-Syscall Tables"
depends on ALT_SYSCALL
+ depends on X86_64 || ARM64
help
Register restricted, alternate syscall tables used by Chromium OS
using the alt-syscall infrastructure. Alternate syscall tables
diff --git a/security/chromiumos/alt-syscall.c b/security/chromiumos/alt-syscall.c
index de511bae39748c..580ffaae8ca6d8 100644
--- a/security/chromiumos/alt-syscall.c
+++ b/security/chromiumos/alt-syscall.c
@@ -21,50 +21,11 @@
#include <asm/unistd.h>
-static int allow_devmode_syscalls;
-
-#ifdef CONFIG_SYSCTL
-static int zero;
-static int one = 1;
-
-static struct ctl_path chromiumos_sysctl_path[] = {
- { .procname = "kernel", },
- { .procname = "chromiumos", },
- { .procname = "alt_syscall", },
- { }
-};
-
-static struct ctl_table chromiumos_sysctl_table[] = {
- {
- .procname = "allow_devmode_syscalls",
- .data = &allow_devmode_syscalls,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
- },
- { }
-};
-#endif
-
-struct syscall_whitelist_entry {
- unsigned int nr;
- sys_call_ptr_t alt;
-};
-
-struct syscall_whitelist {
- const char *name;
- const struct syscall_whitelist_entry *whitelist;
- unsigned int nr_whitelist;
-#ifdef CONFIG_COMPAT
- const struct syscall_whitelist_entry *compat_whitelist;
- unsigned int nr_compat_whitelist;
-#endif
- bool permissive;
-};
-
-static struct alt_sys_call_table default_table;
+#include "alt-syscall.h"
+#include "android_whitelists.h"
+#include "complete_whitelists.h"
+#include "read_write_test_whitelists.h"
+#include "third_party_whitelists.h"
/* Intercept and log blocked syscalls. */
static asmlinkage long block_syscall(void)
@@ -124,12 +85,8 @@ static asmlinkage long warn_compat_syscall(void)
return do_syscall(fn);
}
-#endif
+#endif /* CONFIG_COMPAT */
-/*
- * If an alt_syscall table allows prctl(), override it to prevent a process
- * from changing its syscall table.
- */
static asmlinkage long alt_sys_prctl(int option, unsigned long arg2,
unsigned long arg3, unsigned long arg4,
unsigned long arg5)
@@ -141,322 +98,6 @@ static asmlinkage long alt_sys_prctl(int option, unsigned long arg2,
return sys_prctl(option, arg2, arg3, arg4, arg5);
}
-#ifdef CONFIG_COMPAT
-#define SYSCALL_WHITELIST_COMPAT(x) \
- .compat_whitelist = x ## _compat_whitelist, \
- .nr_compat_whitelist = ARRAY_SIZE(x ## _compat_whitelist),
-#else
-#define SYSCALL_WHITELIST_COMPAT(x)
-#endif
-
-#define SYSCALL_WHITELIST(x) \
- { \
- .name = #x, \
- .whitelist = x ## _whitelist, \
- .nr_whitelist = ARRAY_SIZE(x ## _whitelist), \
- SYSCALL_WHITELIST_COMPAT(x) \
- }
-
-#define PERMISSIVE_SYSCALL_WHITELIST(x) \
- { \
- .name = #x "_permissive", \
- .permissive = true, \
- .whitelist = x ## _whitelist, \
- .nr_whitelist = ARRAY_SIZE(x ## _whitelist), \
- SYSCALL_WHITELIST_COMPAT(x) \
- }
-
-#ifdef CONFIG_COMPAT
-#ifdef CONFIG_X86_64
-#define __NR_compat_access __NR_ia32_access
-#define __NR_compat_adjtimex __NR_ia32_adjtimex
-#define __NR_compat_brk __NR_ia32_brk
-#define __NR_compat_capget __NR_ia32_capget
-#define __NR_compat_capset __NR_ia32_capset
-#define __NR_compat_chdir __NR_ia32_chdir
-#define __NR_compat_chmod __NR_ia32_chmod
-#define __NR_compat_clock_adjtime __NR_ia32_clock_adjtime
-#define __NR_compat_clock_getres __NR_ia32_clock_getres
-#define __NR_compat_clock_gettime __NR_ia32_clock_gettime
-#define __NR_compat_clock_nanosleep __NR_ia32_clock_nanosleep
-#define __NR_compat_clock_settime __NR_ia32_clock_settime
-#define __NR_compat_clone __NR_ia32_clone
-#define __NR_compat_close __NR_ia32_close
-#define __NR_compat_creat __NR_ia32_creat
-#define __NR_compat_dup __NR_ia32_dup
-#define __NR_compat_dup2 __NR_ia32_dup2
-#define __NR_compat_dup3 __NR_ia32_dup3
-#define __NR_compat_epoll_create __NR_ia32_epoll_create
-#define __NR_compat_epoll_create1 __NR_ia32_epoll_create1
-#define __NR_compat_epoll_ctl __NR_ia32_epoll_ctl
-#define __NR_compat_epoll_wait __NR_ia32_epoll_wait
-#define __NR_compat_epoll_pwait __NR_ia32_epoll_pwait
-#define __NR_compat_eventfd __NR_ia32_eventfd
-#define __NR_compat_eventfd2 __NR_ia32_eventfd2
-#define __NR_compat_execve __NR_ia32_execve
-#define __NR_compat_exit __NR_ia32_exit
-#define __NR_compat_exit_group __NR_ia32_exit_group
-#define __NR_compat_faccessat __NR_ia32_faccessat
-#define __NR_compat_fallocate __NR_ia32_fallocate
-#define __NR_compat_fchdir __NR_ia32_fchdir
-#define __NR_compat_fchmod __NR_ia32_fchmod
-#define __NR_compat_fchmodat __NR_ia32_fchmodat
-#define __NR_compat_fchown __NR_ia32_fchown
-#define __NR_compat_fchownat __NR_ia32_fchownat
-#define __NR_compat_fcntl __NR_ia32_fcntl
-#define __NR_compat_fdatasync __NR_ia32_fdatasync
-#define __NR_compat_fgetxattr __NR_ia32_fgetxattr
-#define __NR_compat_flistxattr __NR_ia32_flistxattr
-#define __NR_compat_flock __NR_ia32_flock
-#define __NR_compat_fork __NR_ia32_fork
-#define __NR_compat_fremovexattr __NR_ia32_fremovexattr
-#define __NR_compat_fsetxattr __NR_ia32_fsetxattr
-#define __NR_compat_fstat __NR_ia32_fstat
-#define __NR_compat_fstatfs __NR_ia32_fstatfs
-#define __NR_compat_fsync __NR_ia32_fsync
-#define __NR_compat_ftruncate __NR_ia32_ftruncate
-#define __NR_compat_futex __NR_ia32_futex
-#define __NR_compat_futimesat __NR_ia32_futimesat
-#define __NR_compat_getcpu __NR_ia32_getcpu
-#define __NR_compat_getcwd __NR_ia32_getcwd
-#define __NR_compat_getdents __NR_ia32_getdents
-#define __NR_compat_getdents64 __NR_ia32_getdents64
-#define __NR_compat_getegid __NR_ia32_getegid
-#define __NR_compat_geteuid __NR_ia32_geteuid
-#define __NR_compat_getgid __NR_ia32_getgid
-#define __NR_compat_getgroups32 __NR_ia32_getgroups32
-#define __NR_compat_getpgid __NR_ia32_getpgid
-#define __NR_compat_getpgrp __NR_ia32_getpgrp
-#define __NR_compat_getpid __NR_ia32_getpid
-#define __NR_compat_getppid __NR_ia32_getppid
-#define __NR_compat_getpriority __NR_ia32_getpriority
-#define __NR_compat_getresgid __NR_ia32_getresgid
-#define __NR_compat_getrandom __NR_ia32_getrandom
-#define __NR_compat_getresuid __NR_ia32_getresuid
-#define __NR_compat_getrlimit __NR_ia32_getrlimit
-#define __NR_compat_getrusage __NR_ia32_getrusage
-#define __NR_compat_getsid __NR_ia32_getsid
-#define __NR_compat_gettid __NR_ia32_gettid
-#define __NR_compat_gettimeofday __NR_ia32_gettimeofday
-#define __NR_compat_getuid __NR_ia32_getuid
-#define __NR_compat_getxattr __NR_ia32_getxattr
-#define __NR_compat_inotify_add_watch __NR_ia32_inotify_add_watch
-#define __NR_compat_inotify_init __NR_ia32_inotify_init
-#define __NR_compat_inotify_init1 __NR_ia32_inotify_init1
-#define __NR_compat_inotify_rm_watch __NR_ia32_inotify_rm_watch
-#define __NR_compat_ioctl __NR_ia32_ioctl
-#define __NR_compat_ioprio_set __NR_ia32_ioprio_set
-#define __NR_compat_kill __NR_ia32_kill
-#define __NR_compat_lgetxattr __NR_ia32_lgetxattr
-#define __NR_compat_link __NR_ia32_link
-#define __NR_compat_linkat __NR_ia32_linkat
-#define __NR_compat_listxattr __NR_ia32_listxattr
-#define __NR_compat_llistxattr __NR_ia32_llistxattr
-#define __NR_compat_lremovexattr __NR_ia32_lremovexattr
-#define __NR_compat_lseek __NR_ia32_lseek
-#define __NR_compat_lsetxattr __NR_ia32_lsetxattr
-#define __NR_compat_lstat __NR_ia32_lstat
-#define __NR_compat_madvise __NR_ia32_madvise
-#define __NR_compat_memfd_create __NR_ia32_memfd_create
-#define __NR_compat_mincore __NR_ia32_mincore
-#define __NR_compat_mkdir __NR_ia32_mkdir
-#define __NR_compat_mkdirat __NR_ia32_mkdirat
-#define __NR_compat_mknod __NR_ia32_mknod
-#define __NR_compat_mknodat __NR_ia32_mknodat
-#define __NR_compat_mlock __NR_ia32_mlock
-#define __NR_compat_munlock __NR_ia32_munlock
-#define __NR_compat_mlockall __NR_ia32_mlockall
-#define __NR_compat_munlockall __NR_ia32_munlockall
-#define __NR_compat_modify_ldt __NR_ia32_modify_ldt
-#define __NR_compat_mount __NR_ia32_mount
-#define __NR_compat_mprotect __NR_ia32_mprotect
-#define __NR_compat_mremap __NR_ia32_mremap
-#define __NR_compat_msync __NR_ia32_msync
-#define __NR_compat_munmap __NR_ia32_munmap
-#define __NR_compat_name_to_handle_at __NR_ia32_name_to_handle_at
-#define __NR_compat_nanosleep __NR_ia32_nanosleep
-#define __NR_compat_open __NR_ia32_open
-#define __NR_compat_open_by_handle_at __NR_ia32_open_by_handle_at
-#define __NR_compat_openat __NR_ia32_openat
-#define __NR_compat_perf_event_open __NR_ia32_perf_event_open
-#define __NR_compat_personality __NR_ia32_personality
-#define __NR_compat_pipe __NR_ia32_pipe
-#define __NR_compat_pipe2 __NR_ia32_pipe2
-#define __NR_compat_poll __NR_ia32_poll
-#define __NR_compat_ppoll __NR_ia32_ppoll
-#define __NR_compat_prctl __NR_ia32_prctl
-#define __NR_compat_pread64 __NR_ia32_pread64
-#define __NR_compat_preadv __NR_ia32_preadv
-#define __NR_compat_prlimit64 __NR_ia32_prlimit64
-#define __NR_compat_process_vm_readv __NR_ia32_process_vm_readv
-#define __NR_compat_process_vm_writev __NR_ia32_process_vm_writev
-#define __NR_compat_pselect6 __NR_ia32_pselect6
-#define __NR_compat_ptrace __NR_ia32_ptrace
-#define __NR_compat_pwrite64 __NR_ia32_pwrite64
-#define __NR_compat_pwritev __NR_ia32_pwritev
-#define __NR_compat_read __NR_ia32_read
-#define __NR_compat_readahead __NR_ia32_readahead
-#define __NR_compat_readv __NR_ia32_readv
-#define __NR_compat_readlink __NR_ia32_readlink
-#define __NR_compat_readlinkat __NR_ia32_readlinkat
-#define __NR_compat_recvmmsg __NR_ia32_recvmmsg
-#define __NR_compat_remap_file_pages __NR_ia32_remap_file_pages
-#define __NR_compat_removexattr __NR_ia32_removexattr
-#define __NR_compat_rename __NR_ia32_rename
-#define __NR_compat_renameat __NR_ia32_renameat
-#define __NR_compat_restart_syscall __NR_ia32_restart_syscall
-#define __NR_compat_rmdir __NR_ia32_rmdir
-#define __NR_compat_rt_sigaction __NR_ia32_rt_sigaction
-#define __NR_compat_rt_sigpending __NR_ia32_rt_sigpending
-#define __NR_compat_rt_sigprocmask __NR_ia32_rt_sigprocmask
-#define __NR_compat_rt_sigqueueinfo __NR_ia32_rt_sigqueueinfo
-#define __NR_compat_rt_sigreturn __NR_ia32_rt_sigreturn
-#define __NR_compat_rt_sigsuspend __NR_ia32_rt_sigsuspend
-#define __NR_compat_rt_sigtimedwait __NR_ia32_rt_sigtimedwait
-#define __NR_compat_rt_tgsigqueueinfo __NR_ia32_rt_tgsigqueueinfo
-#define __NR_compat_sched_get_priority_max __NR_ia32_sched_get_priority_max
-#define __NR_compat_sched_get_priority_min __NR_ia32_sched_get_priority_min
-#define __NR_compat_sched_getaffinity __NR_ia32_sched_getaffinity
-#define __NR_compat_sched_getparam __NR_ia32_sched_getparam
-#define __NR_compat_sched_getscheduler __NR_ia32_sched_getscheduler
-#define __NR_compat_sched_setaffinity __NR_ia32_sched_setaffinity
-#define __NR_compat_sched_setparam __NR_ia32_sched_setparam
-#define __NR_compat_sched_setscheduler __NR_ia32_sched_setscheduler
-#define __NR_compat_sched_yield __NR_ia32_sched_yield
-#define __NR_compat_seccomp __NR_ia32_seccomp
-#define __NR_compat_sendfile __NR_ia32_sendfile
-#define __NR_compat_sendfile64 __NR_ia32_sendfile64
-#define __NR_compat_sendmmsg __NR_ia32_sendmmsg
-#define __NR_compat_set_robust_list __NR_ia32_set_robust_list
-#define __NR_compat_set_tid_address __NR_ia32_set_tid_address
-#define __NR_compat_set_thread_area __NR_ia32_set_thread_area
-#define __NR_compat_setdomainname __NR_ia32_setdomainname
-#define __NR_compat_setgid __NR_ia32_setgid
-#define __NR_compat_setgroups __NR_ia32_setgroups
-#define __NR_compat_setitimer __NR_ia32_setitimer
-#define __NR_compat_setns __NR_ia32_setns
-#define __NR_compat_setpgid __NR_ia32_setpgid
-#define __NR_compat_setpriority __NR_ia32_setpriority
-#define __NR_compat_setregid __NR_ia32_setregid
-#define __NR_compat_setresgid __NR_ia32_setresgid
-#define __NR_compat_setresuid __NR_ia32_setresuid
-#define __NR_compat_setrlimit __NR_ia32_setrlimit
-#define __NR_compat_setsid __NR_ia32_setsid
-#define __NR_compat_settimeofday __NR_ia32_settimeofday
-#define __NR_compat_setuid __NR_ia32_setuid
-#define __NR_compat_setxattr __NR_ia32_setxattr
-#define __NR_compat_signalfd4 __NR_ia32_signalfd4
-#define __NR_compat_sigaltstack __NR_ia32_sigaltstack
-#define __NR_compat_socketcall __NR_ia32_socketcall
-#define __NR_compat_splice __NR_ia32_splice
-#define __NR_compat_stat __NR_ia32_stat
-#define __NR_compat_statfs __NR_ia32_statfs
-#define __NR_compat_symlink __NR_ia32_symlink
-#define __NR_compat_symlinkat __NR_ia32_symlinkat
-#define __NR_compat_sync_file_range __NR_ia32_sync_file_range
-#define __NR_compat_syncfs __NR_ia32_syncfs
-#define __NR_compat_sysinfo __NR_ia32_sysinfo
-#define __NR_compat_syslog __NR_ia32_syslog
-#define __NR_compat_tee __NR_ia32_tee
-#define __NR_compat_tgkill __NR_ia32_tgkill
-#define __NR_compat_tkill __NR_ia32_tkill
-#define __NR_compat_time __NR_ia32_time
-#define __NR_compat_timer_create __NR_ia32_timer_create
-#define __NR_compat_timer_delete __NR_ia32_timer_delete
-#define __NR_compat_timer_getoverrun __NR_ia32_timer_getoverrun
-#define __NR_compat_timer_gettime __NR_ia32_timer_gettime
-#define __NR_compat_timer_settime __NR_ia32_timer_settime
-#define __NR_compat_timerfd_create __NR_ia32_timerfd_create
-#define __NR_compat_timerfd_gettime __NR_ia32_timerfd_gettime
-#define __NR_compat_timerfd_settime __NR_ia32_timerfd_settime
-#define __NR_compat_times __NR_ia32_times
-#define __NR_compat_truncate __NR_ia32_truncate
-#define __NR_compat_umask __NR_ia32_umask
-#define __NR_compat_umount2 __NR_ia32_umount2
-#define __NR_compat_uname __NR_ia32_uname
-#define __NR_compat_unlink __NR_ia32_unlink
-#define __NR_compat_unlinkat __NR_ia32_unlinkat
-#define __NR_compat_unshare __NR_ia32_unshare
-#define __NR_compat_ustat __NR_ia32_ustat
-#define __NR_compat_utimensat __NR_ia32_utimensat
-#define __NR_compat_utimes __NR_ia32_utimes
-#define __NR_compat_vfork __NR_ia32_vfork
-#define __NR_compat_vmsplice __NR_ia32_vmsplice
-#define __NR_compat_wait4 __NR_ia32_wait4
-#define __NR_compat_waitid __NR_ia32_waitid
-#define __NR_compat_waitpid __NR_ia32_waitpid
-#define __NR_compat_write __NR_ia32_write
-#define __NR_compat_writev __NR_ia32_writev
-#define __NR_compat_chown32 __NR_ia32_chown32
-#define __NR_compat_fadvise64 __NR_ia32_fadvise64
-#define __NR_compat_fadvise64_64 __NR_ia32_fadvise64_64
-#define __NR_compat_fchown32 __NR_ia32_fchown32
-#define __NR_compat_fcntl64 __NR_ia32_fcntl64
-#define __NR_compat_fstat64 __NR_ia32_fstat64
-#define __NR_compat_fstatat64 __NR_ia32_fstatat64
-#define __NR_compat_fstatfs64 __NR_ia32_fstatfs64
-#define __NR_compat_ftruncate64 __NR_ia32_ftruncate64
-#define __NR_compat_getegid32 __NR_ia32_getegid32
-#define __NR_compat_geteuid32 __NR_ia32_geteuid32
-#define __NR_compat_getgid32 __NR_ia32_getgid32
-#define __NR_compat_getresgid32 __NR_ia32_getresgid32
-#define __NR_compat_getresuid32 __NR_ia32_getresuid32
-#define __NR_compat_getuid32 __NR_ia32_getuid32
-#define __NR_compat_lchown32 __NR_ia32_lchown32
-#define __NR_compat_lstat64 __NR_ia32_lstat64
-#define __NR_compat_mmap2 __NR_ia32_mmap2
-#define __NR_compat__newselect __NR_ia32__newselect
-#define __NR_compat__llseek __NR_ia32__llseek
-#define __NR_compat_sigaction __NR_ia32_sigaction
-#define __NR_compat_sigpending __NR_ia32_sigpending
-#define __NR_compat_sigprocmask __NR_ia32_sigprocmask
-#define __NR_compat_sigreturn __NR_ia32_sigreturn
-#define __NR_compat_sigsuspend __NR_ia32_sigsuspend
-#define __NR_compat_setgid32 __NR_ia32_setgid32
-#define __NR_compat_setgroups32 __NR_ia32_setgroups32
-#define __NR_compat_setregid32 __NR_ia32_setregid32
-#define __NR_compat_setresgid32 __NR_ia32_setresgid32
-#define __NR_compat_setresuid32 __NR_ia32_setresuid32
-#define __NR_compat_setreuid32 __NR_ia32_setreuid32
-#define __NR_compat_setuid32 __NR_ia32_setuid32
-#define __NR_compat_stat64 __NR_ia32_stat64
-#define __NR_compat_statfs64 __NR_ia32_statfs64
-#define __NR_compat_truncate64 __NR_ia32_truncate64
-#define __NR_compat_ugetrlimit __NR_ia32_ugetrlimit
-#endif
-#endif
-
-#define SYSCALL_ENTRY_ALT(name, func) \
- { \
- .nr = __NR_ ## name, \
- .alt = (sys_call_ptr_t)func, \
- }
-#define SYSCALL_ENTRY(name) SYSCALL_ENTRY_ALT(name, NULL)
-#define COMPAT_SYSCALL_ENTRY_ALT(name, func) \
- { \
- .nr = __NR_compat_ ## name, \
- .alt = (sys_call_ptr_t)func, \
- }
-#define COMPAT_SYSCALL_ENTRY(name) COMPAT_SYSCALL_ENTRY_ALT(name, NULL)
-
-static struct syscall_whitelist_entry read_write_test_whitelist[] = {
- SYSCALL_ENTRY(exit),
- SYSCALL_ENTRY(openat),
- SYSCALL_ENTRY(close),
- SYSCALL_ENTRY(read),
- SYSCALL_ENTRY(write),
- SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
-
- /* open(2) is deprecated and not wired up on ARM64. */
-#ifndef CONFIG_ARM64
- SYSCALL_ENTRY(open),
-#endif
-};
-
-/*
- * Syscall overrides for android.
- */
-
/* Thread priority used by Android. */
#define ANDROID_PRIORITY_FOREGROUND -2
#define ANDROID_PRIORITY_DISPLAY -4
@@ -474,9 +115,8 @@ static struct syscall_whitelist_entry read_write_test_whitelist[] = {
#define CONTAINER_PRIORITY_HIGHEST -10
/*
- * Reflect the priority adjustment done by android_setpriority.
- * Note that the prio returned by getpriority has been offset by 20.
- * (returns 40..1 instead of -20..19)
+ * TODO(mortonm): Move the implementation of these Android-specific
+ * alt-syscalls (starting with android_*) to their own .c file.
*/
static asmlinkage long android_getpriority(int which, int who)
{
@@ -511,7 +151,14 @@ static asmlinkage long android_getpriority(int which, int who)
return -nice + 20;
}
-/* Make sure nothing sets a nice value more favorable than -10. */
+static asmlinkage long android_keyctl(int cmd, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5)
+{
+ return -EACCES;
+}
+
+
static asmlinkage long android_setpriority(int which, int who, int niceval)
{
if (niceval < 0) {
@@ -581,9 +228,9 @@ android_sched_setscheduler(pid_t pid, int policy,
#define SETPARAM_POLICY -1
static asmlinkage long android_sched_setparam(pid_t pid,
- struct sched_param __user *param)
+ struct sched_param __user *param)
{
- return do_android_sched_setscheduler(pid, SETPARAM_POLICY, param);
+ return do_android_sched_setscheduler(pid, SETPARAM_POLICY, param);
}
static asmlinkage int __maybe_unused
@@ -638,485 +285,7 @@ static asmlinkage long android_getcpu(unsigned __user *cpu,
return sys_getcpu(cpu, node, tcache);
}
-static struct syscall_whitelist_entry android_whitelist[] = {
- SYSCALL_ENTRY_ALT(adjtimex, android_adjtimex),
- SYSCALL_ENTRY(brk),
- SYSCALL_ENTRY(capget),
- SYSCALL_ENTRY(capset),
- SYSCALL_ENTRY(chdir),
- SYSCALL_ENTRY_ALT(clock_adjtime, android_clock_adjtime),
- SYSCALL_ENTRY(clock_getres),
- SYSCALL_ENTRY(clock_gettime),
- SYSCALL_ENTRY(clock_nanosleep),
- SYSCALL_ENTRY(clock_settime),
- SYSCALL_ENTRY(clone),
- SYSCALL_ENTRY(close),
- SYSCALL_ENTRY(dup),
- SYSCALL_ENTRY(dup3),
- SYSCALL_ENTRY(epoll_create1),
- SYSCALL_ENTRY(epoll_ctl),
- SYSCALL_ENTRY(epoll_pwait),
- SYSCALL_ENTRY(eventfd2),
- SYSCALL_ENTRY(execve),
- SYSCALL_ENTRY(exit),
- SYSCALL_ENTRY(exit_group),
- SYSCALL_ENTRY(faccessat),
- SYSCALL_ENTRY(fallocate),
- SYSCALL_ENTRY(fchdir),
- SYSCALL_ENTRY(fchmod),
- SYSCALL_ENTRY(fchmodat),
- SYSCALL_ENTRY(fchownat),
- SYSCALL_ENTRY(fcntl),
- SYSCALL_ENTRY(fdatasync),
- SYSCALL_ENTRY(fgetxattr),
- SYSCALL_ENTRY(flistxattr),
- SYSCALL_ENTRY(flock),
- SYSCALL_ENTRY(fremovexattr),
- SYSCALL_ENTRY(fsetxattr),
- SYSCALL_ENTRY(fstat),
- SYSCALL_ENTRY(fstatfs),
- SYSCALL_ENTRY(fsync),
- SYSCALL_ENTRY(ftruncate),
- SYSCALL_ENTRY(futex),
- SYSCALL_ENTRY_ALT(getcpu, android_getcpu),
- SYSCALL_ENTRY(getcwd),
- SYSCALL_ENTRY(getdents64),
- SYSCALL_ENTRY(getpgid),
- SYSCALL_ENTRY(getpid),
- SYSCALL_ENTRY(getppid),
- SYSCALL_ENTRY_ALT(getpriority, android_getpriority),
- SYSCALL_ENTRY(getrandom),
- SYSCALL_ENTRY(getrlimit),
- SYSCALL_ENTRY(getrusage),
- SYSCALL_ENTRY(getsid),
- SYSCALL_ENTRY(gettid),
- SYSCALL_ENTRY(gettimeofday),
- SYSCALL_ENTRY(getxattr),
- SYSCALL_ENTRY(inotify_add_watch),
- SYSCALL_ENTRY(inotify_init1),
- SYSCALL_ENTRY(inotify_rm_watch),
- SYSCALL_ENTRY(ioctl),
- SYSCALL_ENTRY(ioprio_set),
- SYSCALL_ENTRY(kill),
- SYSCALL_ENTRY(lgetxattr),
- SYSCALL_ENTRY(linkat),
- SYSCALL_ENTRY(listxattr),
- SYSCALL_ENTRY(llistxattr),
- SYSCALL_ENTRY(lremovexattr),
- SYSCALL_ENTRY(lseek),
- SYSCALL_ENTRY(lsetxattr),
- SYSCALL_ENTRY(madvise),
- SYSCALL_ENTRY(memfd_create),
- SYSCALL_ENTRY(mincore),
- SYSCALL_ENTRY(mkdirat),
- SYSCALL_ENTRY(mknodat),
- SYSCALL_ENTRY(mlock),
- SYSCALL_ENTRY(mlockall),
- SYSCALL_ENTRY(munlock),
- SYSCALL_ENTRY(munlockall),
- SYSCALL_ENTRY(mount),
- SYSCALL_ENTRY(mprotect),
- SYSCALL_ENTRY(mremap),
- SYSCALL_ENTRY(msync),
- SYSCALL_ENTRY(munmap),
- SYSCALL_ENTRY(name_to_handle_at),
- SYSCALL_ENTRY(nanosleep),
- SYSCALL_ENTRY(open_by_handle_at),
- SYSCALL_ENTRY(openat),
- SYSCALL_ENTRY_ALT(perf_event_open, android_perf_event_open),
- SYSCALL_ENTRY(personality),
- SYSCALL_ENTRY(pipe2),
- SYSCALL_ENTRY(ppoll),
- SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
- SYSCALL_ENTRY(pread64),
- SYSCALL_ENTRY(preadv),
- SYSCALL_ENTRY(prlimit64),
- SYSCALL_ENTRY(process_vm_readv),
- SYSCALL_ENTRY(process_vm_writev),
- SYSCALL_ENTRY(pselect6),
- SYSCALL_ENTRY(ptrace),
- SYSCALL_ENTRY(pwrite64),
- SYSCALL_ENTRY(pwritev),
- SYSCALL_ENTRY(read),
- SYSCALL_ENTRY(readahead),
- SYSCALL_ENTRY(readv),
- SYSCALL_ENTRY(readlinkat),
- SYSCALL_ENTRY(recvmmsg),
- SYSCALL_ENTRY(remap_file_pages),
- SYSCALL_ENTRY(removexattr),
- SYSCALL_ENTRY(renameat),
- SYSCALL_ENTRY(restart_syscall),
- SYSCALL_ENTRY(rt_sigaction),
- SYSCALL_ENTRY(rt_sigpending),
- SYSCALL_ENTRY(rt_sigprocmask),
- SYSCALL_ENTRY(rt_sigqueueinfo),
- SYSCALL_ENTRY(rt_sigreturn),
- SYSCALL_ENTRY(rt_sigsuspend),
- SYSCALL_ENTRY(rt_sigtimedwait),
- SYSCALL_ENTRY(rt_tgsigqueueinfo),
- SYSCALL_ENTRY(sched_get_priority_max),
- SYSCALL_ENTRY(sched_get_priority_min),
- SYSCALL_ENTRY(sched_getaffinity),
- SYSCALL_ENTRY(sched_getparam),
- SYSCALL_ENTRY(sched_getscheduler),
- SYSCALL_ENTRY(sched_setaffinity),
- SYSCALL_ENTRY_ALT(sched_setparam, android_sched_setparam),
- SYSCALL_ENTRY_ALT(sched_setscheduler, android_sched_setscheduler),
- SYSCALL_ENTRY(sched_yield),
- SYSCALL_ENTRY(seccomp),
- SYSCALL_ENTRY(sendfile),
- SYSCALL_ENTRY(sendmmsg),
- SYSCALL_ENTRY(set_robust_list),
- SYSCALL_ENTRY(set_tid_address),
- SYSCALL_ENTRY(setdomainname),
- SYSCALL_ENTRY(setitimer),
- SYSCALL_ENTRY(setns),
- SYSCALL_ENTRY(setpgid),
- SYSCALL_ENTRY_ALT(setpriority, android_setpriority),
- SYSCALL_ENTRY(setrlimit),
- SYSCALL_ENTRY(setsid),
- SYSCALL_ENTRY(settimeofday),
- SYSCALL_ENTRY(setxattr),
- SYSCALL_ENTRY(signalfd4),
- SYSCALL_ENTRY(sigaltstack),
- SYSCALL_ENTRY(splice),
- SYSCALL_ENTRY(statfs),
- SYSCALL_ENTRY(symlinkat),
- SYSCALL_ENTRY(sysinfo),
- SYSCALL_ENTRY(syslog),
- SYSCALL_ENTRY(syncfs),
- SYSCALL_ENTRY(tee),
- SYSCALL_ENTRY(tgkill),
- SYSCALL_ENTRY(tkill),
- SYSCALL_ENTRY(timer_create),
- SYSCALL_ENTRY(timer_delete),
- SYSCALL_ENTRY(timer_gettime),
- SYSCALL_ENTRY(timer_getoverrun),
- SYSCALL_ENTRY(timer_settime),
- SYSCALL_ENTRY(timerfd_create),
- SYSCALL_ENTRY(timerfd_gettime),
- SYSCALL_ENTRY(timerfd_settime),
- SYSCALL_ENTRY(times),
- SYSCALL_ENTRY(truncate),
- SYSCALL_ENTRY(umask),
- SYSCALL_ENTRY(umount2),
- SYSCALL_ENTRY(uname),
- SYSCALL_ENTRY(unlinkat),
- SYSCALL_ENTRY(unshare),
- SYSCALL_ENTRY(utimensat),
- SYSCALL_ENTRY(vmsplice),
- SYSCALL_ENTRY(wait4),
- SYSCALL_ENTRY(waitid),
- SYSCALL_ENTRY(write),
- SYSCALL_ENTRY(writev),
-
- /*
- * Deprecated syscalls which are not wired up on new architectures
- * such as ARM64.
- */
-#ifndef CONFIG_ARM64
- SYSCALL_ENTRY(access),
- SYSCALL_ENTRY(chmod),
- SYSCALL_ENTRY(open),
- SYSCALL_ENTRY(creat),
- SYSCALL_ENTRY(dup2),
- SYSCALL_ENTRY(epoll_create),
- SYSCALL_ENTRY(epoll_wait),
- SYSCALL_ENTRY(eventfd),
- SYSCALL_ENTRY(fork),
- SYSCALL_ENTRY(futimesat),
- SYSCALL_ENTRY(getdents),
- SYSCALL_ENTRY(getpgrp),
- SYSCALL_ENTRY(inotify_init),
- SYSCALL_ENTRY(link),
- SYSCALL_ENTRY(lstat),
- SYSCALL_ENTRY(mkdir),
- SYSCALL_ENTRY(mknod),
- SYSCALL_ENTRY(pipe),
- SYSCALL_ENTRY(poll),
- SYSCALL_ENTRY(readlink),
- SYSCALL_ENTRY(rename),
- SYSCALL_ENTRY(rmdir),
- SYSCALL_ENTRY(stat),
- SYSCALL_ENTRY(symlink),
- SYSCALL_ENTRY(time),
- SYSCALL_ENTRY(unlink),
- SYSCALL_ENTRY(ustat),
- SYSCALL_ENTRY(utimes),
- SYSCALL_ENTRY(vfork),
-#endif
-
- /*
- * waitpid(2) is deprecated on most architectures, but still exists
- * on IA32.
- */
-#ifdef CONFIG_X86_32
- SYSCALL_ENTRY(waitpid),
-#endif
-
- /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
-#ifdef CONFIG_X86_32
- SYSCALL_ENTRY(socketcall),
-#else
- SYSCALL_ENTRY(accept),
- SYSCALL_ENTRY(accept4),
- SYSCALL_ENTRY(bind),
- SYSCALL_ENTRY(connect),
- SYSCALL_ENTRY(getpeername),
- SYSCALL_ENTRY(getsockname),
- SYSCALL_ENTRY(getsockopt),
- SYSCALL_ENTRY(listen),
- SYSCALL_ENTRY(recvfrom),
- SYSCALL_ENTRY(recvmsg),
- SYSCALL_ENTRY(sendmsg),
- SYSCALL_ENTRY(sendto),
- SYSCALL_ENTRY(setsockopt),
- SYSCALL_ENTRY(shutdown),
- SYSCALL_ENTRY_ALT(socket, android_socket),
- SYSCALL_ENTRY(socketpair),
- /*
- * recv(2)/send(2) are officially deprecated, but their entry-points
- * still exist on ARM.
- */
-#ifdef CONFIG_ARM
- SYSCALL_ENTRY(recv),
- SYSCALL_ENTRY(send),
-#endif
-#endif
-
- /*
- * posix_fadvise(2) and sync_file_range(2) have ARM-specific wrappers
- * to deal with register alignment.
- */
-#ifdef CONFIG_ARM
- SYSCALL_ENTRY(arm_fadvise64_64),
- SYSCALL_ENTRY(sync_file_range2),
-#else
-#ifdef CONFIG_X86_32
- SYSCALL_ENTRY(fadvise64_64),
-#endif
- SYSCALL_ENTRY(fadvise64),
- SYSCALL_ENTRY(sync_file_range),
-#endif
-
- /* 64-bit only syscalls. */
-#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
- SYSCALL_ENTRY(fchown),
- SYSCALL_ENTRY(getegid),
- SYSCALL_ENTRY(geteuid),
- SYSCALL_ENTRY(getgid),
- SYSCALL_ENTRY(getgroups),
- SYSCALL_ENTRY(getresgid),
- SYSCALL_ENTRY(getresuid),
- SYSCALL_ENTRY(getuid),
- SYSCALL_ENTRY(newfstatat),
- SYSCALL_ENTRY(mmap),
- SYSCALL_ENTRY(setgid),
- SYSCALL_ENTRY(setgroups),
- SYSCALL_ENTRY(setregid),
- SYSCALL_ENTRY(setresgid),
- SYSCALL_ENTRY(setresuid),
- SYSCALL_ENTRY(setreuid),
- SYSCALL_ENTRY(setuid),
- /*
- * chown(2), lchown(2), and select(2) are deprecated and not wired up
- * on ARM64.
- */
-#ifndef CONFIG_ARM64
- SYSCALL_ENTRY(chown),
- SYSCALL_ENTRY(lchown),
- SYSCALL_ENTRY(select),
-#endif
-#endif
-
- /* 32-bit only syscalls. */
-#if defined(CONFIG_ARM) || defined(CONFIG_X86_32)
- SYSCALL_ENTRY(chown32),
- SYSCALL_ENTRY(fchown32),
- SYSCALL_ENTRY(fcntl64),
- SYSCALL_ENTRY(fstat64),
- SYSCALL_ENTRY(fstatat64),
- SYSCALL_ENTRY(fstatfs64),
- SYSCALL_ENTRY(ftruncate64),
- SYSCALL_ENTRY(getegid32),
- SYSCALL_ENTRY(geteuid32),
- SYSCALL_ENTRY(getgid32),
- SYSCALL_ENTRY(getgroups32),
- SYSCALL_ENTRY(getresgid32),
- SYSCALL_ENTRY(getresuid32),
- SYSCALL_ENTRY(getuid32),
- SYSCALL_ENTRY(lchown32),
- SYSCALL_ENTRY(lstat64),
- SYSCALL_ENTRY(mmap2),
- SYSCALL_ENTRY(_newselect),
- SYSCALL_ENTRY(_llseek),
- SYSCALL_ENTRY(sigaction),
- SYSCALL_ENTRY(sigpending),
- SYSCALL_ENTRY(sigprocmask),
- SYSCALL_ENTRY(sigreturn),
- SYSCALL_ENTRY(sigsuspend),
- SYSCALL_ENTRY(sendfile64),
- SYSCALL_ENTRY(setgid32),
- SYSCALL_ENTRY(setgroups32),
- SYSCALL_ENTRY(setregid32),
- SYSCALL_ENTRY(setresgid32),
- SYSCALL_ENTRY(setresuid32),
- SYSCALL_ENTRY(setreuid32),
- SYSCALL_ENTRY(setuid32),
- SYSCALL_ENTRY(stat64),
- SYSCALL_ENTRY(statfs64),
- SYSCALL_ENTRY(truncate64),
- SYSCALL_ENTRY(ugetrlimit),
-#endif
-
- /* X86-specific syscalls. */
-#ifdef CONFIG_X86
- SYSCALL_ENTRY(modify_ldt),
- SYSCALL_ENTRY(set_thread_area),
-#endif
-
-#ifdef CONFIG_X86_64
- SYSCALL_ENTRY(arch_prctl),
-#endif
-
-}; /* end android whitelist */
-
-static struct syscall_whitelist_entry third_party_whitelist[] = {
- SYSCALL_ENTRY(brk),
- SYSCALL_ENTRY(chdir),
- SYSCALL_ENTRY(clock_gettime),
- SYSCALL_ENTRY(clone),
- SYSCALL_ENTRY(close),
- SYSCALL_ENTRY(dup),
- SYSCALL_ENTRY(execve),
- SYSCALL_ENTRY(exit),
- SYSCALL_ENTRY(exit_group),
- SYSCALL_ENTRY(fcntl),
- SYSCALL_ENTRY(fstat),
- SYSCALL_ENTRY(futex),
- SYSCALL_ENTRY(getcwd),
- SYSCALL_ENTRY(getdents64),
- SYSCALL_ENTRY(getpid),
- SYSCALL_ENTRY(getpgid),
- SYSCALL_ENTRY(getppid),
- SYSCALL_ENTRY(getpriority),
- SYSCALL_ENTRY(getrlimit),
- SYSCALL_ENTRY(getsid),
- SYSCALL_ENTRY(gettimeofday),
- SYSCALL_ENTRY(ioctl),
- SYSCALL_ENTRY(lseek),
- SYSCALL_ENTRY(madvise),
- SYSCALL_ENTRY(memfd_create),
- SYSCALL_ENTRY(mprotect),
- SYSCALL_ENTRY(munmap),
- SYSCALL_ENTRY(nanosleep),
- SYSCALL_ENTRY(openat),
- SYSCALL_ENTRY(prlimit64),
- SYSCALL_ENTRY(read),
- SYSCALL_ENTRY(rt_sigaction),
- SYSCALL_ENTRY(rt_sigprocmask),
- SYSCALL_ENTRY(rt_sigreturn),
- SYSCALL_ENTRY(sendfile),
- SYSCALL_ENTRY(set_robust_list),
- SYSCALL_ENTRY(set_tid_address),
- SYSCALL_ENTRY(setpgid),
- SYSCALL_ENTRY(setpriority),
- SYSCALL_ENTRY(setsid),
- SYSCALL_ENTRY(syslog),
- SYSCALL_ENTRY(statfs),
- SYSCALL_ENTRY(umask),
- SYSCALL_ENTRY(uname),
- SYSCALL_ENTRY(wait4),
- SYSCALL_ENTRY(write),
- SYSCALL_ENTRY(writev),
-
- /*
- * Deprecated syscalls which are not wired up on new architectures
- * such as ARM64.
- */
-#ifndef CONFIG_ARM64
- SYSCALL_ENTRY(access),
- SYSCALL_ENTRY(creat),
- SYSCALL_ENTRY(dup2),
- SYSCALL_ENTRY(getdents),
- SYSCALL_ENTRY(getpgrp),
- SYSCALL_ENTRY(lstat),
- SYSCALL_ENTRY(mkdir),
- SYSCALL_ENTRY(open),
- SYSCALL_ENTRY(pipe),
- SYSCALL_ENTRY(poll),
- SYSCALL_ENTRY(readlink),
- SYSCALL_ENTRY(stat),
- SYSCALL_ENTRY(unlink),
-#endif
-
- /* 32-bit only syscalls. */
-#if defined(CONFIG_ARM) || defined(CONFIG_X86_32)
- SYSCALL_ENTRY(fcntl64),
- SYSCALL_ENTRY(fstat64),
- SYSCALL_ENTRY(geteuid32),
- SYSCALL_ENTRY(getuid32),
- SYSCALL_ENTRY(_llseek),
- SYSCALL_ENTRY(lstat64),
- SYSCALL_ENTRY(_newselect),
- SYSCALL_ENTRY(mmap2),
- SYSCALL_ENTRY(stat64),
- SYSCALL_ENTRY(ugetrlimit),
-#endif
-
-
- /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
-#ifdef CONFIG_X86_32
- SYSCALL_ENTRY(socketcall),
-#else
- SYSCALL_ENTRY(accept),
- SYSCALL_ENTRY(bind),
- SYSCALL_ENTRY(connect),
- SYSCALL_ENTRY(listen),
- SYSCALL_ENTRY(recvfrom),
- SYSCALL_ENTRY(recvmsg),
- SYSCALL_ENTRY(sendmsg),
- SYSCALL_ENTRY(sendto),
- SYSCALL_ENTRY(setsockopt),
- SYSCALL_ENTRY(socket),
- SYSCALL_ENTRY(socketpair),
-#endif
-
- /* 64-bit only syscalls. */
-#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
- SYSCALL_ENTRY(getegid),
- SYSCALL_ENTRY(geteuid),
- SYSCALL_ENTRY(getgid),
- SYSCALL_ENTRY(getuid),
- SYSCALL_ENTRY(mmap),
- SYSCALL_ENTRY(setgid),
- SYSCALL_ENTRY(setuid),
- /*
- * chown(2), lchown(2), and select(2) are deprecated and not wired up
- * on ARM64.
- */
-#ifndef CONFIG_ARM64
- SYSCALL_ENTRY(select),
-#endif
-#endif
-
- /* X86_64-specific syscalls. */
-#ifdef CONFIG_X86_64
- SYSCALL_ENTRY(arch_prctl),
-#endif
-};
-
-
#ifdef CONFIG_COMPAT
-static struct syscall_whitelist_entry read_write_test_compat_whitelist[] = {
- COMPAT_SYSCALL_ENTRY(exit),
- COMPAT_SYSCALL_ENTRY(open),
- COMPAT_SYSCALL_ENTRY(close),
- COMPAT_SYSCALL_ENTRY(read),
- COMPAT_SYSCALL_ENTRY(write),
- COMPAT_SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
-};
-
static asmlinkage long android_compat_adjtimex(struct compat_timex __user *buf)
{
struct compat_timex kbuf;
@@ -1142,419 +311,16 @@ android_compat_clock_adjtime(const clockid_t which_clock,
return -EPERM;
return compat_sys_clock_adjtime(which_clock, buf);
}
-
-static struct syscall_whitelist_entry android_compat_whitelist[] = {
- COMPAT_SYSCALL_ENTRY(access),
- COMPAT_SYSCALL_ENTRY_ALT(adjtimex, android_compat_adjtimex),
- COMPAT_SYSCALL_ENTRY(brk),
- COMPAT_SYSCALL_ENTRY(capget),
- COMPAT_SYSCALL_ENTRY(capset),
- COMPAT_SYSCALL_ENTRY(chdir),
- COMPAT_SYSCALL_ENTRY(chmod),
- COMPAT_SYSCALL_ENTRY_ALT(clock_adjtime, android_compat_clock_adjtime),
- COMPAT_SYSCALL_ENTRY(clock_getres),
- COMPAT_SYSCALL_ENTRY(clock_gettime),
- COMPAT_SYSCALL_ENTRY(clock_nanosleep),
- COMPAT_SYSCALL_ENTRY(clock_settime),
- COMPAT_SYSCALL_ENTRY(clone),
- COMPAT_SYSCALL_ENTRY(close),
- COMPAT_SYSCALL_ENTRY(creat),
- COMPAT_SYSCALL_ENTRY(dup),
- COMPAT_SYSCALL_ENTRY(dup2),
- COMPAT_SYSCALL_ENTRY(dup3),
- COMPAT_SYSCALL_ENTRY(epoll_create),
- COMPAT_SYSCALL_ENTRY(epoll_create1),
- COMPAT_SYSCALL_ENTRY(epoll_ctl),
- COMPAT_SYSCALL_ENTRY(epoll_wait),
- COMPAT_SYSCALL_ENTRY(epoll_pwait),
- COMPAT_SYSCALL_ENTRY(eventfd),
- COMPAT_SYSCALL_ENTRY(eventfd2),
- COMPAT_SYSCALL_ENTRY(execve),
- COMPAT_SYSCALL_ENTRY(exit),
- COMPAT_SYSCALL_ENTRY(exit_group),
- COMPAT_SYSCALL_ENTRY(faccessat),
- COMPAT_SYSCALL_ENTRY(fallocate),
- COMPAT_SYSCALL_ENTRY(fchdir),
- COMPAT_SYSCALL_ENTRY(fchmod),
- COMPAT_SYSCALL_ENTRY(fchmodat),
- COMPAT_SYSCALL_ENTRY(fchownat),
- COMPAT_SYSCALL_ENTRY(fcntl),
- COMPAT_SYSCALL_ENTRY(fdatasync),
- COMPAT_SYSCALL_ENTRY(fgetxattr),
- COMPAT_SYSCALL_ENTRY(flistxattr),
- COMPAT_SYSCALL_ENTRY(flock),
- COMPAT_SYSCALL_ENTRY(fork),
- COMPAT_SYSCALL_ENTRY(fremovexattr),
- COMPAT_SYSCALL_ENTRY(fsetxattr),
- COMPAT_SYSCALL_ENTRY(fstat),
- COMPAT_SYSCALL_ENTRY(fstatfs),
- COMPAT_SYSCALL_ENTRY(fsync),
- COMPAT_SYSCALL_ENTRY(ftruncate),
- COMPAT_SYSCALL_ENTRY(futex),
- COMPAT_SYSCALL_ENTRY(futimesat),
- COMPAT_SYSCALL_ENTRY_ALT(getcpu, android_getcpu),
- COMPAT_SYSCALL_ENTRY(getcwd),
- COMPAT_SYSCALL_ENTRY(getdents),
- COMPAT_SYSCALL_ENTRY(getdents64),
- COMPAT_SYSCALL_ENTRY(getpgid),
- COMPAT_SYSCALL_ENTRY(getpgrp),
- COMPAT_SYSCALL_ENTRY(getpid),
- COMPAT_SYSCALL_ENTRY(getppid),
- COMPAT_SYSCALL_ENTRY_ALT(getpriority, android_getpriority),
- COMPAT_SYSCALL_ENTRY(getrandom),
- COMPAT_SYSCALL_ENTRY(getrusage),
- COMPAT_SYSCALL_ENTRY(getsid),
- COMPAT_SYSCALL_ENTRY(gettid),
- COMPAT_SYSCALL_ENTRY(gettimeofday),
- COMPAT_SYSCALL_ENTRY(getxattr),
- COMPAT_SYSCALL_ENTRY(inotify_add_watch),
- COMPAT_SYSCALL_ENTRY(inotify_init),
- COMPAT_SYSCALL_ENTRY(inotify_init1),
- COMPAT_SYSCALL_ENTRY(inotify_rm_watch),
- COMPAT_SYSCALL_ENTRY(ioctl),
- COMPAT_SYSCALL_ENTRY(ioprio_set),
- COMPAT_SYSCALL_ENTRY(kill),
- COMPAT_SYSCALL_ENTRY(lgetxattr),
- COMPAT_SYSCALL_ENTRY(link),
- COMPAT_SYSCALL_ENTRY(linkat),
- COMPAT_SYSCALL_ENTRY(listxattr),
- COMPAT_SYSCALL_ENTRY(llistxattr),
- COMPAT_SYSCALL_ENTRY(lremovexattr),
- COMPAT_SYSCALL_ENTRY(lseek),
- COMPAT_SYSCALL_ENTRY(lsetxattr),
- COMPAT_SYSCALL_ENTRY(lstat),
- COMPAT_SYSCALL_ENTRY(madvise),
- COMPAT_SYSCALL_ENTRY(memfd_create),
- COMPAT_SYSCALL_ENTRY(mincore),
- COMPAT_SYSCALL_ENTRY(mkdir),
- COMPAT_SYSCALL_ENTRY(mkdirat),
- COMPAT_SYSCALL_ENTRY(mknod),
- COMPAT_SYSCALL_ENTRY(mknodat),
- COMPAT_SYSCALL_ENTRY(mlock),
- COMPAT_SYSCALL_ENTRY(mlockall),
- COMPAT_SYSCALL_ENTRY(munlock),
- COMPAT_SYSCALL_ENTRY(munlockall),
- COMPAT_SYSCALL_ENTRY(mount),
- COMPAT_SYSCALL_ENTRY(mprotect),
- COMPAT_SYSCALL_ENTRY(mremap),
- COMPAT_SYSCALL_ENTRY(msync),
- COMPAT_SYSCALL_ENTRY(munmap),
- COMPAT_SYSCALL_ENTRY(name_to_handle_at),
- COMPAT_SYSCALL_ENTRY(nanosleep),
- COMPAT_SYSCALL_ENTRY(open),
- COMPAT_SYSCALL_ENTRY(open_by_handle_at),
- COMPAT_SYSCALL_ENTRY(openat),
- COMPAT_SYSCALL_ENTRY_ALT(perf_event_open, android_perf_event_open),
- COMPAT_SYSCALL_ENTRY(personality),
- COMPAT_SYSCALL_ENTRY(pipe),
- COMPAT_SYSCALL_ENTRY(pipe2),
- COMPAT_SYSCALL_ENTRY(poll),
- COMPAT_SYSCALL_ENTRY(ppoll),
- COMPAT_SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
- COMPAT_SYSCALL_ENTRY(pread64),
- COMPAT_SYSCALL_ENTRY(preadv),
- COMPAT_SYSCALL_ENTRY(prlimit64),
- COMPAT_SYSCALL_ENTRY(process_vm_readv),
- COMPAT_SYSCALL_ENTRY(process_vm_writev),
- COMPAT_SYSCALL_ENTRY(pselect6),
- COMPAT_SYSCALL_ENTRY(ptrace),
- COMPAT_SYSCALL_ENTRY(pwrite64),
- COMPAT_SYSCALL_ENTRY(pwritev),
- COMPAT_SYSCALL_ENTRY(read),
- COMPAT_SYSCALL_ENTRY(readahead),
- COMPAT_SYSCALL_ENTRY(readv),
- COMPAT_SYSCALL_ENTRY(readlink),
- COMPAT_SYSCALL_ENTRY(readlinkat),
- COMPAT_SYSCALL_ENTRY(recvmmsg),
- COMPAT_SYSCALL_ENTRY(remap_file_pages),
- COMPAT_SYSCALL_ENTRY(removexattr),
- COMPAT_SYSCALL_ENTRY(rename),
- COMPAT_SYSCALL_ENTRY(renameat),
- COMPAT_SYSCALL_ENTRY(restart_syscall),
- COMPAT_SYSCALL_ENTRY(rmdir),
- COMPAT_SYSCALL_ENTRY(rt_sigaction),
- COMPAT_SYSCALL_ENTRY(rt_sigpending),
- COMPAT_SYSCALL_ENTRY(rt_sigprocmask),
- COMPAT_SYSCALL_ENTRY(rt_sigqueueinfo),
- COMPAT_SYSCALL_ENTRY(rt_sigreturn),
- COMPAT_SYSCALL_ENTRY(rt_sigsuspend),
- COMPAT_SYSCALL_ENTRY(rt_sigtimedwait),
- COMPAT_SYSCALL_ENTRY(rt_tgsigqueueinfo),
- COMPAT_SYSCALL_ENTRY(sched_get_priority_max),
- COMPAT_SYSCALL_ENTRY(sched_get_priority_min),
- COMPAT_SYSCALL_ENTRY(sched_getaffinity),
- COMPAT_SYSCALL_ENTRY(sched_getparam),
- COMPAT_SYSCALL_ENTRY(sched_getscheduler),
- COMPAT_SYSCALL_ENTRY(sched_setaffinity),
- COMPAT_SYSCALL_ENTRY_ALT(sched_setparam,
- android_sched_setparam),
- COMPAT_SYSCALL_ENTRY_ALT(sched_setscheduler,
- android_sched_setscheduler),
- COMPAT_SYSCALL_ENTRY(sched_yield),
- COMPAT_SYSCALL_ENTRY(seccomp),
- COMPAT_SYSCALL_ENTRY(sendfile),
- COMPAT_SYSCALL_ENTRY(sendfile64),
- COMPAT_SYSCALL_ENTRY(sendmmsg),
- COMPAT_SYSCALL_ENTRY(set_robust_list),
- COMPAT_SYSCALL_ENTRY(set_tid_address),
- COMPAT_SYSCALL_ENTRY(setitimer),
- COMPAT_SYSCALL_ENTRY(setns),
- COMPAT_SYSCALL_ENTRY(setpgid),
- COMPAT_SYSCALL_ENTRY_ALT(setpriority, android_setpriority),
- COMPAT_SYSCALL_ENTRY(setrlimit),
- COMPAT_SYSCALL_ENTRY(setsid),
- COMPAT_SYSCALL_ENTRY(settimeofday),
- COMPAT_SYSCALL_ENTRY(setxattr),
- COMPAT_SYSCALL_ENTRY(signalfd4),
- COMPAT_SYSCALL_ENTRY(sigaltstack),
- COMPAT_SYSCALL_ENTRY(splice),
- COMPAT_SYSCALL_ENTRY(stat),
- COMPAT_SYSCALL_ENTRY(statfs),
- COMPAT_SYSCALL_ENTRY(symlink),
- COMPAT_SYSCALL_ENTRY(symlinkat),
- COMPAT_SYSCALL_ENTRY(sysinfo),
- COMPAT_SYSCALL_ENTRY(syslog),
- COMPAT_SYSCALL_ENTRY(tgkill),
- COMPAT_SYSCALL_ENTRY(tee),
- COMPAT_SYSCALL_ENTRY(tkill),
- COMPAT_SYSCALL_ENTRY(timer_create),
- COMPAT_SYSCALL_ENTRY(timer_delete),
- COMPAT_SYSCALL_ENTRY(timer_gettime),
- COMPAT_SYSCALL_ENTRY(timer_getoverrun),
- COMPAT_SYSCALL_ENTRY(timer_settime),
- COMPAT_SYSCALL_ENTRY(timerfd_create),
- COMPAT_SYSCALL_ENTRY(timerfd_gettime),
- COMPAT_SYSCALL_ENTRY(timerfd_settime),
- COMPAT_SYSCALL_ENTRY(times),
- COMPAT_SYSCALL_ENTRY(truncate),
- COMPAT_SYSCALL_ENTRY(umask),
- COMPAT_SYSCALL_ENTRY(umount2),
- COMPAT_SYSCALL_ENTRY(uname),
- COMPAT_SYSCALL_ENTRY(unlink),
- COMPAT_SYSCALL_ENTRY(unlinkat),
- COMPAT_SYSCALL_ENTRY(unshare),
- COMPAT_SYSCALL_ENTRY(ustat),
- COMPAT_SYSCALL_ENTRY(utimensat),
- COMPAT_SYSCALL_ENTRY(utimes),
- COMPAT_SYSCALL_ENTRY(vfork),
- COMPAT_SYSCALL_ENTRY(vmsplice),
- COMPAT_SYSCALL_ENTRY(wait4),
- COMPAT_SYSCALL_ENTRY(waitid),
- COMPAT_SYSCALL_ENTRY(write),
- COMPAT_SYSCALL_ENTRY(writev),
- COMPAT_SYSCALL_ENTRY(chown32),
- COMPAT_SYSCALL_ENTRY(fchown32),
- COMPAT_SYSCALL_ENTRY(fcntl64),
- COMPAT_SYSCALL_ENTRY(fstat64),
- COMPAT_SYSCALL_ENTRY(fstatat64),
- COMPAT_SYSCALL_ENTRY(fstatfs64),
- COMPAT_SYSCALL_ENTRY(ftruncate64),
- COMPAT_SYSCALL_ENTRY(getegid),
- COMPAT_SYSCALL_ENTRY(getegid32),
- COMPAT_SYSCALL_ENTRY(geteuid),
- COMPAT_SYSCALL_ENTRY(geteuid32),
- COMPAT_SYSCALL_ENTRY(getgid),
- COMPAT_SYSCALL_ENTRY(getgid32),
- COMPAT_SYSCALL_ENTRY(getgroups32),
- COMPAT_SYSCALL_ENTRY(getresgid32),
- COMPAT_SYSCALL_ENTRY(getresuid32),
- COMPAT_SYSCALL_ENTRY(getuid),
- COMPAT_SYSCALL_ENTRY(getuid32),
- COMPAT_SYSCALL_ENTRY(lchown32),
- COMPAT_SYSCALL_ENTRY(lstat64),
- COMPAT_SYSCALL_ENTRY(mmap2),
- COMPAT_SYSCALL_ENTRY(_newselect),
- COMPAT_SYSCALL_ENTRY(_llseek),
- COMPAT_SYSCALL_ENTRY(setdomainname),
- COMPAT_SYSCALL_ENTRY(sigaction),
- COMPAT_SYSCALL_ENTRY(sigpending),
- COMPAT_SYSCALL_ENTRY(sigprocmask),
- COMPAT_SYSCALL_ENTRY(sigreturn),
- COMPAT_SYSCALL_ENTRY(sigsuspend),
- COMPAT_SYSCALL_ENTRY(setgid32),
- COMPAT_SYSCALL_ENTRY(setgroups32),
- COMPAT_SYSCALL_ENTRY(setregid32),
- COMPAT_SYSCALL_ENTRY(setresgid32),
- COMPAT_SYSCALL_ENTRY(setresuid32),
- COMPAT_SYSCALL_ENTRY(setreuid32),
- COMPAT_SYSCALL_ENTRY(setuid32),
- COMPAT_SYSCALL_ENTRY(stat64),
- COMPAT_SYSCALL_ENTRY(statfs64),
- COMPAT_SYSCALL_ENTRY(syncfs),
- COMPAT_SYSCALL_ENTRY(truncate64),
- COMPAT_SYSCALL_ENTRY(ugetrlimit),
-
- /*
- * waitpid(2) is deprecated on most architectures, but still exists
- * on IA32.
- */
-#ifdef CONFIG_X86
- COMPAT_SYSCALL_ENTRY(waitpid),
-#endif
-
- /*
- * posix_fadvise(2) and sync_file_range(2) have ARM-specific wrappers
- * to deal with register alignment.
- */
-#ifdef CONFIG_ARM64
- COMPAT_SYSCALL_ENTRY(arm_fadvise64_64),
- COMPAT_SYSCALL_ENTRY(sync_file_range2),
-#else
- COMPAT_SYSCALL_ENTRY(fadvise64_64),
- COMPAT_SYSCALL_ENTRY(fadvise64),
- COMPAT_SYSCALL_ENTRY(sync_file_range),
-#endif
-
- /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
-#ifdef CONFIG_X86
- COMPAT_SYSCALL_ENTRY(socketcall),
-#else
- COMPAT_SYSCALL_ENTRY(accept),
- COMPAT_SYSCALL_ENTRY(accept4),
- COMPAT_SYSCALL_ENTRY(bind),
- COMPAT_SYSCALL_ENTRY(connect),
- COMPAT_SYSCALL_ENTRY(getpeername),
- COMPAT_SYSCALL_ENTRY(getsockname),
- COMPAT_SYSCALL_ENTRY(getsockopt),
- COMPAT_SYSCALL_ENTRY(listen),
- COMPAT_SYSCALL_ENTRY(recvfrom),
- COMPAT_SYSCALL_ENTRY(recvmsg),
- COMPAT_SYSCALL_ENTRY(sendmsg),
- COMPAT_SYSCALL_ENTRY(sendto),
- COMPAT_SYSCALL_ENTRY(setsockopt),
- COMPAT_SYSCALL_ENTRY(shutdown),
- COMPAT_SYSCALL_ENTRY(socket),
- COMPAT_SYSCALL_ENTRY(socketpair),
- COMPAT_SYSCALL_ENTRY(recv),
- COMPAT_SYSCALL_ENTRY(send),
-#endif
-
- /*
- * getrlimit(2) and time(2) are deprecated and not wired in the ARM
- * compat table on ARM64.
- */
-#ifndef CONFIG_ARM64
- COMPAT_SYSCALL_ENTRY(getrlimit),
- COMPAT_SYSCALL_ENTRY(time),
-#endif
-
- /* x86-specific syscalls. */
-#ifdef CONFIG_X86
- COMPAT_SYSCALL_ENTRY(modify_ldt),
- COMPAT_SYSCALL_ENTRY(set_thread_area),
-#endif
-};
-
-static struct syscall_whitelist_entry third_party_compat_whitelist[] = {
- COMPAT_SYSCALL_ENTRY(access),
- COMPAT_SYSCALL_ENTRY(brk),
- COMPAT_SYSCALL_ENTRY(chdir),
- COMPAT_SYSCALL_ENTRY(clock_gettime),
- COMPAT_SYSCALL_ENTRY(clone),
- COMPAT_SYSCALL_ENTRY(close),
- COMPAT_SYSCALL_ENTRY(creat),
- COMPAT_SYSCALL_ENTRY(dup),
- COMPAT_SYSCALL_ENTRY(dup2),
- COMPAT_SYSCALL_ENTRY(execve),
- COMPAT_SYSCALL_ENTRY(exit),
- COMPAT_SYSCALL_ENTRY(exit_group),
- COMPAT_SYSCALL_ENTRY(fcntl),
- COMPAT_SYSCALL_ENTRY(fcntl64),
- COMPAT_SYSCALL_ENTRY(fstat),
- COMPAT_SYSCALL_ENTRY(fstat64),
- COMPAT_SYSCALL_ENTRY(futex),
- COMPAT_SYSCALL_ENTRY(getcwd),
- COMPAT_SYSCALL_ENTRY(getdents),
- COMPAT_SYSCALL_ENTRY(getdents64),
- COMPAT_SYSCALL_ENTRY(getegid),
- COMPAT_SYSCALL_ENTRY(geteuid),
- COMPAT_SYSCALL_ENTRY(geteuid32),
- COMPAT_SYSCALL_ENTRY(getgid),
- COMPAT_SYSCALL_ENTRY(getpgid),
- COMPAT_SYSCALL_ENTRY(getpgrp),
- COMPAT_SYSCALL_ENTRY(getpid),
- COMPAT_SYSCALL_ENTRY(getpriority),
- COMPAT_SYSCALL_ENTRY(getppid),
- COMPAT_SYSCALL_ENTRY(getsid),
- COMPAT_SYSCALL_ENTRY(gettimeofday),
- COMPAT_SYSCALL_ENTRY(getuid),
- COMPAT_SYSCALL_ENTRY(getuid32),
- COMPAT_SYSCALL_ENTRY(ioctl),
- COMPAT_SYSCALL_ENTRY(_llseek),
- COMPAT_SYSCALL_ENTRY(lseek),
- COMPAT_SYSCALL_ENTRY(lstat),
- COMPAT_SYSCALL_ENTRY(lstat64),
- COMPAT_SYSCALL_ENTRY(madvise),
- COMPAT_SYSCALL_ENTRY(memfd_create),
- COMPAT_SYSCALL_ENTRY(mkdir),
- COMPAT_SYSCALL_ENTRY(mmap2),
- COMPAT_SYSCALL_ENTRY(mprotect),
- COMPAT_SYSCALL_ENTRY(munmap),
- COMPAT_SYSCALL_ENTRY(nanosleep),
- COMPAT_SYSCALL_ENTRY(_newselect),
- COMPAT_SYSCALL_ENTRY(open),
- COMPAT_SYSCALL_ENTRY(openat),
- COMPAT_SYSCALL_ENTRY(pipe),
- COMPAT_SYSCALL_ENTRY(poll),
- COMPAT_SYSCALL_ENTRY(prlimit64),
- COMPAT_SYSCALL_ENTRY(read),
- COMPAT_SYSCALL_ENTRY(readlink),
- COMPAT_SYSCALL_ENTRY(rt_sigaction),
- COMPAT_SYSCALL_ENTRY(rt_sigprocmask),
- COMPAT_SYSCALL_ENTRY(rt_sigreturn),
- COMPAT_SYSCALL_ENTRY(sendfile),
- COMPAT_SYSCALL_ENTRY(set_robust_list),
- COMPAT_SYSCALL_ENTRY(set_tid_address),
- COMPAT_SYSCALL_ENTRY(setgid32),
- COMPAT_SYSCALL_ENTRY(setuid32),
- COMPAT_SYSCALL_ENTRY(setpgid),
- COMPAT_SYSCALL_ENTRY(setpriority),
- COMPAT_SYSCALL_ENTRY(setsid),
- COMPAT_SYSCALL_ENTRY(stat),
- COMPAT_SYSCALL_ENTRY(stat64),
- COMPAT_SYSCALL_ENTRY(statfs),
- COMPAT_SYSCALL_ENTRY(syslog),
- COMPAT_SYSCALL_ENTRY(ugetrlimit),
- COMPAT_SYSCALL_ENTRY(umask),
- COMPAT_SYSCALL_ENTRY(uname),
- COMPAT_SYSCALL_ENTRY(unlink),
- COMPAT_SYSCALL_ENTRY(wait4),
- COMPAT_SYSCALL_ENTRY(write),
- COMPAT_SYSCALL_ENTRY(writev),
-
- /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
-#ifdef CONFIG_X86
- COMPAT_SYSCALL_ENTRY(socketcall),
-#else
- COMPAT_SYSCALL_ENTRY(accept),
- COMPAT_SYSCALL_ENTRY(bind),
- COMPAT_SYSCALL_ENTRY(connect),
- COMPAT_SYSCALL_ENTRY(listen),
- COMPAT_SYSCALL_ENTRY(recvfrom),
- COMPAT_SYSCALL_ENTRY(recvmsg),
- COMPAT_SYSCALL_ENTRY(sendmsg),
- COMPAT_SYSCALL_ENTRY(sendto),
- COMPAT_SYSCALL_ENTRY(setsockopt),
- COMPAT_SYSCALL_ENTRY(socket),
- COMPAT_SYSCALL_ENTRY(socketpair),
-#endif
-
- /*
- * getrlimit(2) is deprecated and not wired in the ARM compat table
- * on ARM64.
- */
-#ifndef CONFIG_ARM64
- COMPAT_SYSCALL_ENTRY(getrlimit),
-#endif
-};
-#endif
+#endif /* CONFIG_COMPAT */
static struct syscall_whitelist whitelists[] = {
SYSCALL_WHITELIST(read_write_test),
SYSCALL_WHITELIST(android),
PERMISSIVE_SYSCALL_WHITELIST(android),
SYSCALL_WHITELIST(third_party),
- PERMISSIVE_SYSCALL_WHITELIST(third_party)
+ PERMISSIVE_SYSCALL_WHITELIST(third_party),
+ SYSCALL_WHITELIST(complete),
+ PERMISSIVE_SYSCALL_WHITELIST(complete)
};
static int alt_syscall_apply_whitelist(const struct syscall_whitelist *wl,
@@ -1621,7 +387,7 @@ alt_syscall_apply_compat_whitelist(const struct syscall_whitelist *wl,
{
return 0;
}
-#endif
+#endif /* CONFIG_COMPAT */
static int alt_syscall_init_one(const struct syscall_whitelist *wl)
{
diff --git a/security/chromiumos/alt-syscall.h b/security/chromiumos/alt-syscall.h
new file mode 100644
index 00000000000000..ed6c3e65883e04
--- /dev/null
+++ b/security/chromiumos/alt-syscall.h
@@ -0,0 +1,386 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Authors:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ALT_SYSCALL_H
+#define ALT_SYSCALL_H
+
+/*
+ * NOTE: this file uses the 'static' keyword for variable and function
+ * definitions because alt-syscall.c is the only .c file that is expected to
+ * include this header. Definitions were pulled out from alt-syscall.c into
+ * this header and the *_whitelists.h headers for the sake of readability.
+ */
+
+static int allow_devmode_syscalls;
+
+#ifdef CONFIG_SYSCTL
+static int zero;
+static int one = 1;
+
+static struct ctl_path chromiumos_sysctl_path[] = {
+ { .procname = "kernel", },
+ { .procname = "chromiumos", },
+ { .procname = "alt_syscall", },
+ { }
+};
+
+static struct ctl_table chromiumos_sysctl_table[] = {
+ {
+ .procname = "allow_devmode_syscalls",
+ .data = &allow_devmode_syscalls,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+ { }
+};
+#endif
+
+struct syscall_whitelist_entry {
+ unsigned int nr;
+ sys_call_ptr_t alt;
+};
+
+struct syscall_whitelist {
+ const char *name;
+ const struct syscall_whitelist_entry *whitelist;
+ unsigned int nr_whitelist;
+#ifdef CONFIG_COMPAT
+ const struct syscall_whitelist_entry *compat_whitelist;
+ unsigned int nr_compat_whitelist;
+#endif
+ bool permissive;
+};
+
+static struct alt_sys_call_table default_table;
+
+#define SYSCALL_ENTRY_ALT(name, func) \
+ { \
+ .nr = __NR_ ## name, \
+ .alt = (sys_call_ptr_t)func, \
+ }
+#define SYSCALL_ENTRY(name) SYSCALL_ENTRY_ALT(name, NULL)
+#define COMPAT_SYSCALL_ENTRY_ALT(name, func) \
+ { \
+ .nr = __NR_compat_ ## name, \
+ .alt = (sys_call_ptr_t)func, \
+ }
+#define COMPAT_SYSCALL_ENTRY(name) COMPAT_SYSCALL_ENTRY_ALT(name, NULL)
+
+/*
+ * If an alt_syscall table allows prctl(), override it to prevent a process
+ * from changing its syscall table.
+ */
+static asmlinkage long alt_sys_prctl(int option, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+
+#ifdef CONFIG_COMPAT
+#define SYSCALL_WHITELIST_COMPAT(x) \
+ .compat_whitelist = x ## _compat_whitelist, \
+ .nr_compat_whitelist = ARRAY_SIZE(x ## _compat_whitelist),
+#else
+#define SYSCALL_WHITELIST_COMPAT(x)
+#endif
+
+#define SYSCALL_WHITELIST(x) \
+ { \
+ .name = #x, \
+ .whitelist = x ## _whitelist, \
+ .nr_whitelist = ARRAY_SIZE(x ## _whitelist), \
+ SYSCALL_WHITELIST_COMPAT(x) \
+ }
+
+#define PERMISSIVE_SYSCALL_WHITELIST(x) \
+ { \
+ .name = #x "_permissive", \
+ .permissive = true, \
+ .whitelist = x ## _whitelist, \
+ .nr_whitelist = ARRAY_SIZE(x ## _whitelist), \
+ SYSCALL_WHITELIST_COMPAT(x) \
+ }
+
+#ifdef CONFIG_COMPAT
+#ifdef CONFIG_X86_64
+#define __NR_compat_access __NR_ia32_access
+#define __NR_compat_adjtimex __NR_ia32_adjtimex
+#define __NR_compat_brk __NR_ia32_brk
+#define __NR_compat_capget __NR_ia32_capget
+#define __NR_compat_capset __NR_ia32_capset
+#define __NR_compat_chdir __NR_ia32_chdir
+#define __NR_compat_chmod __NR_ia32_chmod
+#define __NR_compat_clock_adjtime __NR_ia32_clock_adjtime
+#define __NR_compat_clock_getres __NR_ia32_clock_getres
+#define __NR_compat_clock_gettime __NR_ia32_clock_gettime
+#define __NR_compat_clock_nanosleep __NR_ia32_clock_nanosleep
+#define __NR_compat_clock_settime __NR_ia32_clock_settime
+#define __NR_compat_clone __NR_ia32_clone
+#define __NR_compat_close __NR_ia32_close
+#define __NR_compat_creat __NR_ia32_creat
+#define __NR_compat_dup __NR_ia32_dup
+#define __NR_compat_dup2 __NR_ia32_dup2
+#define __NR_compat_dup3 __NR_ia32_dup3
+#define __NR_compat_epoll_create __NR_ia32_epoll_create
+#define __NR_compat_epoll_create1 __NR_ia32_epoll_create1
+#define __NR_compat_epoll_ctl __NR_ia32_epoll_ctl
+#define __NR_compat_epoll_wait __NR_ia32_epoll_wait
+#define __NR_compat_epoll_pwait __NR_ia32_epoll_pwait
+#define __NR_compat_eventfd __NR_ia32_eventfd
+#define __NR_compat_eventfd2 __NR_ia32_eventfd2
+#define __NR_compat_execve __NR_ia32_execve
+#define __NR_compat_exit __NR_ia32_exit
+#define __NR_compat_exit_group __NR_ia32_exit_group
+#define __NR_compat_faccessat __NR_ia32_faccessat
+#define __NR_compat_fallocate __NR_ia32_fallocate
+#define __NR_compat_fchdir __NR_ia32_fchdir
+#define __NR_compat_fchmod __NR_ia32_fchmod
+#define __NR_compat_fchmodat __NR_ia32_fchmodat
+#define __NR_compat_fchown __NR_ia32_fchown
+#define __NR_compat_fchownat __NR_ia32_fchownat
+#define __NR_compat_fcntl __NR_ia32_fcntl
+#define __NR_compat_fdatasync __NR_ia32_fdatasync
+#define __NR_compat_fgetxattr __NR_ia32_fgetxattr
+#define __NR_compat_flistxattr __NR_ia32_flistxattr
+#define __NR_compat_flock __NR_ia32_flock
+#define __NR_compat_fork __NR_ia32_fork
+#define __NR_compat_fremovexattr __NR_ia32_fremovexattr
+#define __NR_compat_fsetxattr __NR_ia32_fsetxattr
+#define __NR_compat_fstat __NR_ia32_fstat
+#define __NR_compat_fstatfs __NR_ia32_fstatfs
+#define __NR_compat_fsync __NR_ia32_fsync
+#define __NR_compat_ftruncate __NR_ia32_ftruncate
+#define __NR_compat_futex __NR_ia32_futex
+#define __NR_compat_futimesat __NR_ia32_futimesat
+#define __NR_compat_getcpu __NR_ia32_getcpu
+#define __NR_compat_getcwd __NR_ia32_getcwd
+#define __NR_compat_getdents __NR_ia32_getdents
+#define __NR_compat_getdents64 __NR_ia32_getdents64
+#define __NR_compat_getegid __NR_ia32_getegid
+#define __NR_compat_geteuid __NR_ia32_geteuid
+#define __NR_compat_getgid __NR_ia32_getgid
+#define __NR_compat_getgroups32 __NR_ia32_getgroups32
+#define __NR_compat_getpgid __NR_ia32_getpgid
+#define __NR_compat_getpgrp __NR_ia32_getpgrp
+#define __NR_compat_getpid __NR_ia32_getpid
+#define __NR_compat_getppid __NR_ia32_getppid
+#define __NR_compat_getpriority __NR_ia32_getpriority
+#define __NR_compat_getrandom __NR_ia32_getrandom
+#define __NR_compat_getresgid __NR_ia32_getresgid
+#define __NR_compat_getresuid __NR_ia32_getresuid
+#define __NR_compat_getrlimit __NR_ia32_getrlimit
+#define __NR_compat_getrusage __NR_ia32_getrusage
+#define __NR_compat_getsid __NR_ia32_getsid
+#define __NR_compat_gettid __NR_ia32_gettid
+#define __NR_compat_gettimeofday __NR_ia32_gettimeofday
+#define __NR_compat_getuid __NR_ia32_getuid
+#define __NR_compat_getxattr __NR_ia32_getxattr
+#define __NR_compat_inotify_add_watch __NR_ia32_inotify_add_watch
+#define __NR_compat_inotify_init __NR_ia32_inotify_init
+#define __NR_compat_inotify_init1 __NR_ia32_inotify_init1
+#define __NR_compat_inotify_rm_watch __NR_ia32_inotify_rm_watch
+#define __NR_compat_ioctl __NR_ia32_ioctl
+#define __NR_compat_io_destroy __NR_ia32_io_destroy
+#define __NR_compat_io_getevents __NR_ia32_io_getevents
+#define __NR_compat_io_setup __NR_ia32_io_setup
+#define __NR_compat_io_submit __NR_ia32_io_submit
+#define __NR_compat_ioprio_set __NR_ia32_ioprio_set
+#define __NR_compat_keyctl __NR_ia32_keyctl
+#define __NR_compat_kill __NR_ia32_kill
+#define __NR_compat_lgetxattr __NR_ia32_lgetxattr
+#define __NR_compat_link __NR_ia32_link
+#define __NR_compat_linkat __NR_ia32_linkat
+#define __NR_compat_listxattr __NR_ia32_listxattr
+#define __NR_compat_llistxattr __NR_ia32_llistxattr
+#define __NR_compat_lremovexattr __NR_ia32_lremovexattr
+#define __NR_compat_lseek __NR_ia32_lseek
+#define __NR_compat_lsetxattr __NR_ia32_lsetxattr
+#define __NR_compat_lstat __NR_ia32_lstat
+#define __NR_compat_madvise __NR_ia32_madvise
+#define __NR_compat_memfd_create __NR_ia32_memfd_create
+#define __NR_compat_mincore __NR_ia32_mincore
+#define __NR_compat_mkdir __NR_ia32_mkdir
+#define __NR_compat_mkdirat __NR_ia32_mkdirat
+#define __NR_compat_mknod __NR_ia32_mknod
+#define __NR_compat_mknodat __NR_ia32_mknodat
+#define __NR_compat_mlock __NR_ia32_mlock
+#define __NR_compat_munlock __NR_ia32_munlock
+#define __NR_compat_mlockall __NR_ia32_mlockall
+#define __NR_compat_munlockall __NR_ia32_munlockall
+#define __NR_compat_modify_ldt __NR_ia32_modify_ldt
+#define __NR_compat_mount __NR_ia32_mount
+#define __NR_compat_mprotect __NR_ia32_mprotect
+#define __NR_compat_mremap __NR_ia32_mremap
+#define __NR_compat_msync __NR_ia32_msync
+#define __NR_compat_munmap __NR_ia32_munmap
+#define __NR_compat_name_to_handle_at __NR_ia32_name_to_handle_at
+#define __NR_compat_nanosleep __NR_ia32_nanosleep
+#define __NR_compat_open __NR_ia32_open
+#define __NR_compat_open_by_handle_at __NR_ia32_open_by_handle_at
+#define __NR_compat_openat __NR_ia32_openat
+#define __NR_compat_perf_event_open __NR_ia32_perf_event_open
+#define __NR_compat_personality __NR_ia32_personality
+#define __NR_compat_pipe __NR_ia32_pipe
+#define __NR_compat_pipe2 __NR_ia32_pipe2
+#define __NR_compat_poll __NR_ia32_poll
+#define __NR_compat_ppoll __NR_ia32_ppoll
+#define __NR_compat_prctl __NR_ia32_prctl
+#define __NR_compat_pread64 __NR_ia32_pread64
+#define __NR_compat_preadv __NR_ia32_preadv
+#define __NR_compat_prlimit64 __NR_ia32_prlimit64
+#define __NR_compat_process_vm_readv __NR_ia32_process_vm_readv
+#define __NR_compat_process_vm_writev __NR_ia32_process_vm_writev
+#define __NR_compat_pselect6 __NR_ia32_pselect6
+#define __NR_compat_ptrace __NR_ia32_ptrace
+#define __NR_compat_pwrite64 __NR_ia32_pwrite64
+#define __NR_compat_pwritev __NR_ia32_pwritev
+#define __NR_compat_read __NR_ia32_read
+#define __NR_compat_readahead __NR_ia32_readahead
+#define __NR_compat_readv __NR_ia32_readv
+#define __NR_compat_readlink __NR_ia32_readlink
+#define __NR_compat_readlinkat __NR_ia32_readlinkat
+#define __NR_compat_recvmmsg __NR_ia32_recvmmsg
+#define __NR_compat_remap_file_pages __NR_ia32_remap_file_pages
+#define __NR_compat_removexattr __NR_ia32_removexattr
+#define __NR_compat_rename __NR_ia32_rename
+#define __NR_compat_renameat __NR_ia32_renameat
+#define __NR_compat_restart_syscall __NR_ia32_restart_syscall
+#define __NR_compat_rmdir __NR_ia32_rmdir
+#define __NR_compat_rt_sigaction __NR_ia32_rt_sigaction
+#define __NR_compat_rt_sigpending __NR_ia32_rt_sigpending
+#define __NR_compat_rt_sigprocmask __NR_ia32_rt_sigprocmask
+#define __NR_compat_rt_sigqueueinfo __NR_ia32_rt_sigqueueinfo
+#define __NR_compat_rt_sigreturn __NR_ia32_rt_sigreturn
+#define __NR_compat_rt_sigsuspend __NR_ia32_rt_sigsuspend
+#define __NR_compat_rt_sigtimedwait __NR_ia32_rt_sigtimedwait
+#define __NR_compat_rt_tgsigqueueinfo __NR_ia32_rt_tgsigqueueinfo
+#define __NR_compat_sched_get_priority_max __NR_ia32_sched_get_priority_max
+#define __NR_compat_sched_get_priority_min __NR_ia32_sched_get_priority_min
+#define __NR_compat_sched_getaffinity __NR_ia32_sched_getaffinity
+#define __NR_compat_sched_getparam __NR_ia32_sched_getparam
+#define __NR_compat_sched_getscheduler __NR_ia32_sched_getscheduler
+#define __NR_compat_sched_setaffinity __NR_ia32_sched_setaffinity
+#define __NR_compat_sched_setparam __NR_ia32_sched_setparam
+#define __NR_compat_sched_setscheduler __NR_ia32_sched_setscheduler
+#define __NR_compat_sched_yield __NR_ia32_sched_yield
+#define __NR_compat_seccomp __NR_ia32_seccomp
+#define __NR_compat_sendfile __NR_ia32_sendfile
+#define __NR_compat_sendfile64 __NR_ia32_sendfile64
+#define __NR_compat_sendmmsg __NR_ia32_sendmmsg
+#define __NR_compat_setdomainname __NR_ia32_setdomainname
+#define __NR_compat_set_robust_list __NR_ia32_set_robust_list
+#define __NR_compat_set_tid_address __NR_ia32_set_tid_address
+#define __NR_compat_set_thread_area __NR_ia32_set_thread_area
+#define __NR_compat_setgid __NR_ia32_setgid
+#define __NR_compat_setgroups __NR_ia32_setgroups
+#define __NR_compat_setitimer __NR_ia32_setitimer
+#define __NR_compat_setns __NR_ia32_setns
+#define __NR_compat_setpgid __NR_ia32_setpgid
+#define __NR_compat_setpriority __NR_ia32_setpriority
+#define __NR_compat_setregid __NR_ia32_setregid
+#define __NR_compat_setresgid __NR_ia32_setresgid
+#define __NR_compat_setresuid __NR_ia32_setresuid
+#define __NR_compat_setrlimit __NR_ia32_setrlimit
+#define __NR_compat_setsid __NR_ia32_setsid
+#define __NR_compat_settimeofday __NR_ia32_settimeofday
+#define __NR_compat_setuid __NR_ia32_setuid
+#define __NR_compat_setxattr __NR_ia32_setxattr
+#define __NR_compat_signalfd4 __NR_ia32_signalfd4
+#define __NR_compat_sigaltstack __NR_ia32_sigaltstack
+#define __NR_compat_socketcall __NR_ia32_socketcall
+#define __NR_compat_splice __NR_ia32_splice
+#define __NR_compat_stat __NR_ia32_stat
+#define __NR_compat_statfs __NR_ia32_statfs
+#define __NR_compat_symlink __NR_ia32_symlink
+#define __NR_compat_symlinkat __NR_ia32_symlinkat
+#define __NR_compat_sync __NR_ia32_sync
+#define __NR_compat_syncfs __NR_ia32_syncfs
+#define __NR_compat_sync_file_range __NR_ia32_sync_file_range
+#define __NR_compat_sysinfo __NR_ia32_sysinfo
+#define __NR_compat_syslog __NR_ia32_syslog
+#define __NR_compat_tee __NR_ia32_tee
+#define __NR_compat_tgkill __NR_ia32_tgkill
+#define __NR_compat_tkill __NR_ia32_tkill
+#define __NR_compat_time __NR_ia32_time
+#define __NR_compat_timer_create __NR_ia32_timer_create
+#define __NR_compat_timer_delete __NR_ia32_timer_delete
+#define __NR_compat_timer_getoverrun __NR_ia32_timer_getoverrun
+#define __NR_compat_timer_gettime __NR_ia32_timer_gettime
+#define __NR_compat_timer_settime __NR_ia32_timer_settime
+#define __NR_compat_timerfd_create __NR_ia32_timerfd_create
+#define __NR_compat_timerfd_gettime __NR_ia32_timerfd_gettime
+#define __NR_compat_timerfd_settime __NR_ia32_timerfd_settime
+#define __NR_compat_times __NR_ia32_times
+#define __NR_compat_truncate __NR_ia32_truncate
+#define __NR_compat_umask __NR_ia32_umask
+#define __NR_compat_umount2 __NR_ia32_umount2
+#define __NR_compat_uname __NR_ia32_uname
+#define __NR_compat_unlink __NR_ia32_unlink
+#define __NR_compat_unlinkat __NR_ia32_unlinkat
+#define __NR_compat_unshare __NR_ia32_unshare
+#define __NR_compat_ustat __NR_ia32_ustat
+#define __NR_compat_utimensat __NR_ia32_utimensat
+#define __NR_compat_utimes __NR_ia32_utimes
+#define __NR_compat_vfork __NR_ia32_vfork
+#define __NR_compat_vmsplice __NR_ia32_vmsplice
+#define __NR_compat_wait4 __NR_ia32_wait4
+#define __NR_compat_waitid __NR_ia32_waitid
+#define __NR_compat_waitpid __NR_ia32_waitpid
+#define __NR_compat_write __NR_ia32_write
+#define __NR_compat_writev __NR_ia32_writev
+#define __NR_compat_chown32 __NR_ia32_chown32
+#define __NR_compat_fadvise64 __NR_ia32_fadvise64
+#define __NR_compat_fadvise64_64 __NR_ia32_fadvise64_64
+#define __NR_compat_fchown32 __NR_ia32_fchown32
+#define __NR_compat_fcntl64 __NR_ia32_fcntl64
+#define __NR_compat_fstat64 __NR_ia32_fstat64
+#define __NR_compat_fstatat64 __NR_ia32_fstatat64
+#define __NR_compat_fstatfs64 __NR_ia32_fstatfs64
+#define __NR_compat_ftruncate64 __NR_ia32_ftruncate64
+#define __NR_compat_getegid32 __NR_ia32_getegid32
+#define __NR_compat_geteuid32 __NR_ia32_geteuid32
+#define __NR_compat_getgid32 __NR_ia32_getgid32
+#define __NR_compat_getresgid32 __NR_ia32_getresgid32
+#define __NR_compat_getresuid32 __NR_ia32_getresuid32
+#define __NR_compat_getuid32 __NR_ia32_getuid32
+#define __NR_compat_lchown32 __NR_ia32_lchown32
+#define __NR_compat_lstat64 __NR_ia32_lstat64
+#define __NR_compat_mmap2 __NR_ia32_mmap2
+#define __NR_compat__newselect __NR_ia32__newselect
+#define __NR_compat__llseek __NR_ia32__llseek
+#define __NR_compat_sigaction __NR_ia32_sigaction
+#define __NR_compat_sigpending __NR_ia32_sigpending
+#define __NR_compat_sigprocmask __NR_ia32_sigprocmask
+#define __NR_compat_sigreturn __NR_ia32_sigreturn
+#define __NR_compat_sigsuspend __NR_ia32_sigsuspend
+#define __NR_compat_setgid32 __NR_ia32_setgid32
+#define __NR_compat_setgroups32 __NR_ia32_setgroups32
+#define __NR_compat_setregid32 __NR_ia32_setregid32
+#define __NR_compat_setresgid32 __NR_ia32_setresgid32
+#define __NR_compat_setresuid32 __NR_ia32_setresuid32
+#define __NR_compat_setreuid32 __NR_ia32_setreuid32
+#define __NR_compat_setuid32 __NR_ia32_setuid32
+#define __NR_compat_stat64 __NR_ia32_stat64
+#define __NR_compat_statfs64 __NR_ia32_statfs64
+#define __NR_compat_truncate64 __NR_ia32_truncate64
+#define __NR_compat_ugetrlimit __NR_ia32_ugetrlimit
+#endif
+#endif
+
+#endif /* ALT_SYSCALL_H */
diff --git a/security/chromiumos/android_whitelists.h b/security/chromiumos/android_whitelists.h
new file mode 100644
index 00000000000000..978c0366f1e291
--- /dev/null
+++ b/security/chromiumos/android_whitelists.h
@@ -0,0 +1,727 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Authors:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ANDROID_WHITELISTS_H
+#define ANDROID_WHITELISTS_H
+
+/*
+ * NOTE: the purpose of this header is only to pull out the definition of this
+ * array from alt-syscall.c for the purposes of readability. It should not be
+ * included in other .c files.
+ */
+
+#include "alt-syscall.h"
+
+/*
+ * Syscall overrides for android.
+ */
+
+/*
+ * Reflect the priority adjustment done by android_setpriority.
+ * Note that the prio returned by getpriority has been offset by 20.
+ * (returns 40..1 instead of -20..19)
+ */
+static asmlinkage long android_getpriority(int which, int who);
+/* Android does not get to call keyctl. */
+static asmlinkage long android_keyctl(int cmd, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+/* Make sure nothing sets a nice value more favorable than -10. */
+static asmlinkage long android_setpriority(int which, int who, int niceval);
+static asmlinkage long
+android_sched_setscheduler(pid_t pid, int policy,
+ struct sched_param __user *param);
+static asmlinkage long android_sched_setparam(pid_t pid,
+ struct sched_param __user *param);
+static asmlinkage int __maybe_unused
+android_socket(int domain, int type, int socket);
+static asmlinkage long
+android_perf_event_open(struct perf_event_attr __user *attr_uptr,
+ pid_t pid, int cpu, int group_fd, unsigned long flags);
+static asmlinkage long android_adjtimex(struct timex __user *buf);
+static asmlinkage long android_clock_adjtime(const clockid_t which_clock,
+ struct timex __user *buf);
+static asmlinkage long android_getcpu(unsigned __user *cpu,
+ unsigned __user *node,
+ struct getcpu_cache __user *tcache);
+#ifdef CONFIG_COMPAT
+static asmlinkage long android_compat_adjtimex(struct compat_timex __user *buf);
+static asmlinkage long
+android_compat_clock_adjtime(const clockid_t which_clock,
+ struct compat_timex __user *buf);
+#endif /* CONFIG_COMPAT */
+
+static struct syscall_whitelist_entry android_whitelist[] = {
+ SYSCALL_ENTRY_ALT(adjtimex, android_adjtimex),
+ SYSCALL_ENTRY(brk),
+ SYSCALL_ENTRY(capget),
+ SYSCALL_ENTRY(capset),
+ SYSCALL_ENTRY(chdir),
+ SYSCALL_ENTRY_ALT(clock_adjtime, android_clock_adjtime),
+ SYSCALL_ENTRY(clock_getres),
+ SYSCALL_ENTRY(clock_gettime),
+ SYSCALL_ENTRY(clock_nanosleep),
+ SYSCALL_ENTRY(clock_settime),
+ SYSCALL_ENTRY(clone),
+ SYSCALL_ENTRY(close),
+ SYSCALL_ENTRY(dup),
+ SYSCALL_ENTRY(dup3),
+ SYSCALL_ENTRY(epoll_create1),
+ SYSCALL_ENTRY(epoll_ctl),
+ SYSCALL_ENTRY(epoll_pwait),
+ SYSCALL_ENTRY(eventfd2),
+ SYSCALL_ENTRY(execve),
+ SYSCALL_ENTRY(exit),
+ SYSCALL_ENTRY(exit_group),
+ SYSCALL_ENTRY(faccessat),
+ SYSCALL_ENTRY(fallocate),
+ SYSCALL_ENTRY(fchdir),
+ SYSCALL_ENTRY(fchmod),
+ SYSCALL_ENTRY(fchmodat),
+ SYSCALL_ENTRY(fchownat),
+ SYSCALL_ENTRY(fcntl),
+ SYSCALL_ENTRY(fdatasync),
+ SYSCALL_ENTRY(fgetxattr),
+ SYSCALL_ENTRY(flistxattr),
+ SYSCALL_ENTRY(flock),
+ SYSCALL_ENTRY(fremovexattr),
+ SYSCALL_ENTRY(fsetxattr),
+ SYSCALL_ENTRY(fstat),
+ SYSCALL_ENTRY(fstatfs),
+ SYSCALL_ENTRY(fsync),
+ SYSCALL_ENTRY(ftruncate),
+ SYSCALL_ENTRY(futex),
+ SYSCALL_ENTRY_ALT(getcpu, android_getcpu),
+ SYSCALL_ENTRY(getcwd),
+ SYSCALL_ENTRY(getdents64),
+ SYSCALL_ENTRY(getpgid),
+ SYSCALL_ENTRY(getpid),
+ SYSCALL_ENTRY(getppid),
+ SYSCALL_ENTRY_ALT(getpriority, android_getpriority),
+ SYSCALL_ENTRY(getrandom),
+ SYSCALL_ENTRY(getrlimit),
+ SYSCALL_ENTRY(getrusage),
+ SYSCALL_ENTRY(getsid),
+ SYSCALL_ENTRY(gettid),
+ SYSCALL_ENTRY(gettimeofday),
+ SYSCALL_ENTRY(getxattr),
+ SYSCALL_ENTRY(inotify_add_watch),
+ SYSCALL_ENTRY(inotify_init1),
+ SYSCALL_ENTRY(inotify_rm_watch),
+ SYSCALL_ENTRY(ioctl),
+ SYSCALL_ENTRY(io_destroy),
+ SYSCALL_ENTRY(io_getevents),
+ SYSCALL_ENTRY(io_setup),
+ SYSCALL_ENTRY(io_submit),
+ SYSCALL_ENTRY(ioprio_set),
+ SYSCALL_ENTRY_ALT(keyctl, android_keyctl),
+ SYSCALL_ENTRY(kill),
+ SYSCALL_ENTRY(lgetxattr),
+ SYSCALL_ENTRY(linkat),
+ SYSCALL_ENTRY(listxattr),
+ SYSCALL_ENTRY(llistxattr),
+ SYSCALL_ENTRY(lremovexattr),
+ SYSCALL_ENTRY(lseek),
+ SYSCALL_ENTRY(lsetxattr),
+ SYSCALL_ENTRY(madvise),
+ SYSCALL_ENTRY(memfd_create),
+ SYSCALL_ENTRY(mincore),
+ SYSCALL_ENTRY(mkdirat),
+ SYSCALL_ENTRY(mknodat),
+ SYSCALL_ENTRY(mlock),
+ SYSCALL_ENTRY(mlockall),
+ SYSCALL_ENTRY(munlock),
+ SYSCALL_ENTRY(munlockall),
+ SYSCALL_ENTRY(mount),
+ SYSCALL_ENTRY(mprotect),
+ SYSCALL_ENTRY(mremap),
+ SYSCALL_ENTRY(msync),
+ SYSCALL_ENTRY(munmap),
+ SYSCALL_ENTRY(name_to_handle_at),
+ SYSCALL_ENTRY(nanosleep),
+ SYSCALL_ENTRY(open_by_handle_at),
+ SYSCALL_ENTRY(openat),
+ SYSCALL_ENTRY_ALT(perf_event_open, android_perf_event_open),
+ SYSCALL_ENTRY(personality),
+ SYSCALL_ENTRY(pipe2),
+ SYSCALL_ENTRY(ppoll),
+ SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
+ SYSCALL_ENTRY(pread64),
+ SYSCALL_ENTRY(preadv),
+ SYSCALL_ENTRY(prlimit64),
+ SYSCALL_ENTRY(process_vm_readv),
+ SYSCALL_ENTRY(process_vm_writev),
+ SYSCALL_ENTRY(pselect6),
+ SYSCALL_ENTRY(ptrace),
+ SYSCALL_ENTRY(pwrite64),
+ SYSCALL_ENTRY(pwritev),
+ SYSCALL_ENTRY(read),
+ SYSCALL_ENTRY(readahead),
+ SYSCALL_ENTRY(readv),
+ SYSCALL_ENTRY(readlinkat),
+ SYSCALL_ENTRY(recvmmsg),
+ SYSCALL_ENTRY(remap_file_pages),
+ SYSCALL_ENTRY(removexattr),
+ SYSCALL_ENTRY(renameat),
+ SYSCALL_ENTRY(restart_syscall),
+ SYSCALL_ENTRY(rt_sigaction),
+ SYSCALL_ENTRY(rt_sigpending),
+ SYSCALL_ENTRY(rt_sigprocmask),
+ SYSCALL_ENTRY(rt_sigqueueinfo),
+ SYSCALL_ENTRY(rt_sigreturn),
+ SYSCALL_ENTRY(rt_sigsuspend),
+ SYSCALL_ENTRY(rt_sigtimedwait),
+ SYSCALL_ENTRY(rt_tgsigqueueinfo),
+ SYSCALL_ENTRY(sched_get_priority_max),
+ SYSCALL_ENTRY(sched_get_priority_min),
+ SYSCALL_ENTRY(sched_getaffinity),
+ SYSCALL_ENTRY(sched_getparam),
+ SYSCALL_ENTRY(sched_getscheduler),
+ SYSCALL_ENTRY(sched_setaffinity),
+ SYSCALL_ENTRY_ALT(sched_setparam, android_sched_setparam),
+ SYSCALL_ENTRY_ALT(sched_setscheduler, android_sched_setscheduler),
+ SYSCALL_ENTRY(sched_yield),
+ SYSCALL_ENTRY(seccomp),
+ SYSCALL_ENTRY(sendfile),
+ SYSCALL_ENTRY(sendmmsg),
+ SYSCALL_ENTRY(setdomainname),
+ SYSCALL_ENTRY(set_robust_list),
+ SYSCALL_ENTRY(set_tid_address),
+ SYSCALL_ENTRY(setitimer),
+ SYSCALL_ENTRY(setns),
+ SYSCALL_ENTRY(setpgid),
+ SYSCALL_ENTRY_ALT(setpriority, android_setpriority),
+ SYSCALL_ENTRY(setrlimit),
+ SYSCALL_ENTRY(setsid),
+ SYSCALL_ENTRY(settimeofday),
+ SYSCALL_ENTRY(setxattr),
+ SYSCALL_ENTRY(signalfd4),
+ SYSCALL_ENTRY(sigaltstack),
+ SYSCALL_ENTRY(splice),
+ SYSCALL_ENTRY(statfs),
+ SYSCALL_ENTRY(symlinkat),
+ SYSCALL_ENTRY(sync),
+ SYSCALL_ENTRY(syncfs),
+ SYSCALL_ENTRY(sysinfo),
+ SYSCALL_ENTRY(syslog),
+ SYSCALL_ENTRY(tee),
+ SYSCALL_ENTRY(tgkill),
+ SYSCALL_ENTRY(tkill),
+ SYSCALL_ENTRY(timer_create),
+ SYSCALL_ENTRY(timer_delete),
+ SYSCALL_ENTRY(timer_gettime),
+ SYSCALL_ENTRY(timer_getoverrun),
+ SYSCALL_ENTRY(timer_settime),
+ SYSCALL_ENTRY(timerfd_create),
+ SYSCALL_ENTRY(timerfd_gettime),
+ SYSCALL_ENTRY(timerfd_settime),
+ SYSCALL_ENTRY(times),
+ SYSCALL_ENTRY(truncate),
+ SYSCALL_ENTRY(umask),
+ SYSCALL_ENTRY(umount2),
+ SYSCALL_ENTRY(uname),
+ SYSCALL_ENTRY(unlinkat),
+ SYSCALL_ENTRY(unshare),
+ SYSCALL_ENTRY(utimensat),
+ SYSCALL_ENTRY(vmsplice),
+ SYSCALL_ENTRY(wait4),
+ SYSCALL_ENTRY(waitid),
+ SYSCALL_ENTRY(write),
+ SYSCALL_ENTRY(writev),
+
+ /*
+ * Deprecated syscalls which are not wired up on new architectures
+ * such as ARM64.
+ */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(access),
+ SYSCALL_ENTRY(chmod),
+ SYSCALL_ENTRY(open),
+ SYSCALL_ENTRY(creat),
+ SYSCALL_ENTRY(dup2),
+ SYSCALL_ENTRY(epoll_create),
+ SYSCALL_ENTRY(epoll_wait),
+ SYSCALL_ENTRY(eventfd),
+ SYSCALL_ENTRY(fork),
+ SYSCALL_ENTRY(futimesat),
+ SYSCALL_ENTRY(getdents),
+ SYSCALL_ENTRY(getpgrp),
+ SYSCALL_ENTRY(inotify_init),
+ SYSCALL_ENTRY(link),
+ SYSCALL_ENTRY(lstat),
+ SYSCALL_ENTRY(mkdir),
+ SYSCALL_ENTRY(mknod),
+ SYSCALL_ENTRY(pipe),
+ SYSCALL_ENTRY(poll),
+ SYSCALL_ENTRY(readlink),
+ SYSCALL_ENTRY(rename),
+ SYSCALL_ENTRY(rmdir),
+ SYSCALL_ENTRY(stat),
+ SYSCALL_ENTRY(symlink),
+ SYSCALL_ENTRY(time),
+ SYSCALL_ENTRY(unlink),
+ SYSCALL_ENTRY(ustat),
+ SYSCALL_ENTRY(utimes),
+ SYSCALL_ENTRY(vfork),
+#endif
+
+ /*
+ * waitpid(2) is deprecated on most architectures, but still exists
+ * on IA32.
+ */
+#ifdef CONFIG_X86_32
+ SYSCALL_ENTRY(waitpid),
+#endif
+
+ /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
+#ifdef CONFIG_X86_32
+ SYSCALL_ENTRY(socketcall),
+#else
+ SYSCALL_ENTRY(accept),
+ SYSCALL_ENTRY(accept4),
+ SYSCALL_ENTRY(bind),
+ SYSCALL_ENTRY(connect),
+ SYSCALL_ENTRY(getpeername),
+ SYSCALL_ENTRY(getsockname),
+ SYSCALL_ENTRY(getsockopt),
+ SYSCALL_ENTRY(listen),
+ SYSCALL_ENTRY(recvfrom),
+ SYSCALL_ENTRY(recvmsg),
+ SYSCALL_ENTRY(sendmsg),
+ SYSCALL_ENTRY(sendto),
+ SYSCALL_ENTRY(setsockopt),
+ SYSCALL_ENTRY(shutdown),
+ SYSCALL_ENTRY_ALT(socket, android_socket),
+ SYSCALL_ENTRY(socketpair),
+ /*
+ * recv(2)/send(2) are officially deprecated, but their entry-points
+ * still exist on ARM.
+ */
+#ifdef CONFIG_ARM
+ SYSCALL_ENTRY(recv),
+ SYSCALL_ENTRY(send),
+#endif
+#endif
+
+ /*
+ * posix_fadvise(2) and sync_file_range(2) have ARM-specific wrappers
+ * to deal with register alignment.
+ */
+#ifdef CONFIG_ARM
+ SYSCALL_ENTRY(arm_fadvise64_64),
+ SYSCALL_ENTRY(sync_file_range2),
+#else
+#ifdef CONFIG_X86_32
+ SYSCALL_ENTRY(fadvise64_64),
+#endif
+ SYSCALL_ENTRY(fadvise64),
+ SYSCALL_ENTRY(sync_file_range),
+#endif
+
+ /* 64-bit only syscalls. */
+#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
+ SYSCALL_ENTRY(fchown),
+ SYSCALL_ENTRY(getegid),
+ SYSCALL_ENTRY(geteuid),
+ SYSCALL_ENTRY(getgid),
+ SYSCALL_ENTRY(getgroups),
+ SYSCALL_ENTRY(getresgid),
+ SYSCALL_ENTRY(getresuid),
+ SYSCALL_ENTRY(getuid),
+ SYSCALL_ENTRY(newfstatat),
+ SYSCALL_ENTRY(mmap),
+ SYSCALL_ENTRY(setgid),
+ SYSCALL_ENTRY(setgroups),
+ SYSCALL_ENTRY(setregid),
+ SYSCALL_ENTRY(setresgid),
+ SYSCALL_ENTRY(setresuid),
+ SYSCALL_ENTRY(setreuid),
+ SYSCALL_ENTRY(setuid),
+ /*
+ * chown(2), lchown(2), and select(2) are deprecated and not wired up
+ * on ARM64.
+ */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(chown),
+ SYSCALL_ENTRY(lchown),
+ SYSCALL_ENTRY(select),
+#endif
+#endif
+
+ /* 32-bit only syscalls. */
+#if defined(CONFIG_ARM) || defined(CONFIG_X86_32)
+ SYSCALL_ENTRY(chown32),
+ SYSCALL_ENTRY(fchown32),
+ SYSCALL_ENTRY(fcntl64),
+ SYSCALL_ENTRY(fstat64),
+ SYSCALL_ENTRY(fstatat64),
+ SYSCALL_ENTRY(fstatfs64),
+ SYSCALL_ENTRY(ftruncate64),
+ SYSCALL_ENTRY(getegid32),
+ SYSCALL_ENTRY(geteuid32),
+ SYSCALL_ENTRY(getgid32),
+ SYSCALL_ENTRY(getgroups32),
+ SYSCALL_ENTRY(getresgid32),
+ SYSCALL_ENTRY(getresuid32),
+ SYSCALL_ENTRY(getuid32),
+ SYSCALL_ENTRY(lchown32),
+ SYSCALL_ENTRY(lstat64),
+ SYSCALL_ENTRY(mmap2),
+ SYSCALL_ENTRY(_newselect),
+ SYSCALL_ENTRY(_llseek),
+ SYSCALL_ENTRY(sigaction),
+ SYSCALL_ENTRY(sigpending),
+ SYSCALL_ENTRY(sigprocmask),
+ SYSCALL_ENTRY(sigreturn),
+ SYSCALL_ENTRY(sigsuspend),
+ SYSCALL_ENTRY(sendfile64),
+ SYSCALL_ENTRY(setgid32),
+ SYSCALL_ENTRY(setgroups32),
+ SYSCALL_ENTRY(setregid32),
+ SYSCALL_ENTRY(setresgid32),
+ SYSCALL_ENTRY(setresuid32),
+ SYSCALL_ENTRY(setreuid32),
+ SYSCALL_ENTRY(setuid32),
+ SYSCALL_ENTRY(stat64),
+ SYSCALL_ENTRY(statfs64),
+ SYSCALL_ENTRY(truncate64),
+ SYSCALL_ENTRY(ugetrlimit),
+#endif
+
+ /* X86-specific syscalls. */
+#ifdef CONFIG_X86
+ SYSCALL_ENTRY(modify_ldt),
+ SYSCALL_ENTRY(set_thread_area),
+#endif
+
+#ifdef CONFIG_X86_64
+ SYSCALL_ENTRY(arch_prctl),
+#endif
+}; /* end android_whitelist */
+
+#ifdef CONFIG_COMPAT
+static struct syscall_whitelist_entry android_compat_whitelist[] = {
+ COMPAT_SYSCALL_ENTRY(access),
+ COMPAT_SYSCALL_ENTRY_ALT(adjtimex, android_compat_adjtimex),
+ COMPAT_SYSCALL_ENTRY(brk),
+ COMPAT_SYSCALL_ENTRY(capget),
+ COMPAT_SYSCALL_ENTRY(capset),
+ COMPAT_SYSCALL_ENTRY(chdir),
+ COMPAT_SYSCALL_ENTRY(chmod),
+ COMPAT_SYSCALL_ENTRY_ALT(clock_adjtime, android_compat_clock_adjtime),
+ COMPAT_SYSCALL_ENTRY(clock_getres),
+ COMPAT_SYSCALL_ENTRY(clock_gettime),
+ COMPAT_SYSCALL_ENTRY(clock_nanosleep),
+ COMPAT_SYSCALL_ENTRY(clock_settime),
+ COMPAT_SYSCALL_ENTRY(clone),
+ COMPAT_SYSCALL_ENTRY(close),
+ COMPAT_SYSCALL_ENTRY(creat),
+ COMPAT_SYSCALL_ENTRY(dup),
+ COMPAT_SYSCALL_ENTRY(dup2),
+ COMPAT_SYSCALL_ENTRY(dup3),
+ COMPAT_SYSCALL_ENTRY(epoll_create),
+ COMPAT_SYSCALL_ENTRY(epoll_create1),
+ COMPAT_SYSCALL_ENTRY(epoll_ctl),
+ COMPAT_SYSCALL_ENTRY(epoll_wait),
+ COMPAT_SYSCALL_ENTRY(epoll_pwait),
+ COMPAT_SYSCALL_ENTRY(eventfd),
+ COMPAT_SYSCALL_ENTRY(eventfd2),
+ COMPAT_SYSCALL_ENTRY(execve),
+ COMPAT_SYSCALL_ENTRY(exit),
+ COMPAT_SYSCALL_ENTRY(exit_group),
+ COMPAT_SYSCALL_ENTRY(faccessat),
+ COMPAT_SYSCALL_ENTRY(fallocate),
+ COMPAT_SYSCALL_ENTRY(fchdir),
+ COMPAT_SYSCALL_ENTRY(fchmod),
+ COMPAT_SYSCALL_ENTRY(fchmodat),
+ COMPAT_SYSCALL_ENTRY(fchownat),
+ COMPAT_SYSCALL_ENTRY(fcntl),
+ COMPAT_SYSCALL_ENTRY(fdatasync),
+ COMPAT_SYSCALL_ENTRY(fgetxattr),
+ COMPAT_SYSCALL_ENTRY(flistxattr),
+ COMPAT_SYSCALL_ENTRY(flock),
+ COMPAT_SYSCALL_ENTRY(fork),
+ COMPAT_SYSCALL_ENTRY(fremovexattr),
+ COMPAT_SYSCALL_ENTRY(fsetxattr),
+ COMPAT_SYSCALL_ENTRY(fstat),
+ COMPAT_SYSCALL_ENTRY(fstatfs),
+ COMPAT_SYSCALL_ENTRY(fsync),
+ COMPAT_SYSCALL_ENTRY(ftruncate),
+ COMPAT_SYSCALL_ENTRY(futex),
+ COMPAT_SYSCALL_ENTRY(futimesat),
+ COMPAT_SYSCALL_ENTRY_ALT(getcpu, android_getcpu),
+ COMPAT_SYSCALL_ENTRY(getcwd),
+ COMPAT_SYSCALL_ENTRY(getdents),
+ COMPAT_SYSCALL_ENTRY(getdents64),
+ COMPAT_SYSCALL_ENTRY(getpgid),
+ COMPAT_SYSCALL_ENTRY(getpgrp),
+ COMPAT_SYSCALL_ENTRY(getpid),
+ COMPAT_SYSCALL_ENTRY(getppid),
+ COMPAT_SYSCALL_ENTRY_ALT(getpriority, android_getpriority),
+ COMPAT_SYSCALL_ENTRY(getrandom),
+ COMPAT_SYSCALL_ENTRY(getrusage),
+ COMPAT_SYSCALL_ENTRY(getsid),
+ COMPAT_SYSCALL_ENTRY(gettid),
+ COMPAT_SYSCALL_ENTRY(gettimeofday),
+ COMPAT_SYSCALL_ENTRY(getxattr),
+ COMPAT_SYSCALL_ENTRY(inotify_add_watch),
+ COMPAT_SYSCALL_ENTRY(inotify_init),
+ COMPAT_SYSCALL_ENTRY(inotify_init1),
+ COMPAT_SYSCALL_ENTRY(inotify_rm_watch),
+ COMPAT_SYSCALL_ENTRY(ioctl),
+ COMPAT_SYSCALL_ENTRY(io_destroy),
+ COMPAT_SYSCALL_ENTRY(io_getevents),
+ COMPAT_SYSCALL_ENTRY(io_setup),
+ COMPAT_SYSCALL_ENTRY(io_submit),
+ COMPAT_SYSCALL_ENTRY(ioprio_set),
+ COMPAT_SYSCALL_ENTRY_ALT(keyctl, android_keyctl),
+ COMPAT_SYSCALL_ENTRY(kill),
+ COMPAT_SYSCALL_ENTRY(lgetxattr),
+ COMPAT_SYSCALL_ENTRY(link),
+ COMPAT_SYSCALL_ENTRY(linkat),
+ COMPAT_SYSCALL_ENTRY(listxattr),
+ COMPAT_SYSCALL_ENTRY(llistxattr),
+ COMPAT_SYSCALL_ENTRY(lremovexattr),
+ COMPAT_SYSCALL_ENTRY(lseek),
+ COMPAT_SYSCALL_ENTRY(lsetxattr),
+ COMPAT_SYSCALL_ENTRY(lstat),
+ COMPAT_SYSCALL_ENTRY(madvise),
+ COMPAT_SYSCALL_ENTRY(memfd_create),
+ COMPAT_SYSCALL_ENTRY(mincore),
+ COMPAT_SYSCALL_ENTRY(mkdir),
+ COMPAT_SYSCALL_ENTRY(mkdirat),
+ COMPAT_SYSCALL_ENTRY(mknod),
+ COMPAT_SYSCALL_ENTRY(mknodat),
+ COMPAT_SYSCALL_ENTRY(mlock),
+ COMPAT_SYSCALL_ENTRY(mlockall),
+ COMPAT_SYSCALL_ENTRY(munlock),
+ COMPAT_SYSCALL_ENTRY(munlockall),
+ COMPAT_SYSCALL_ENTRY(mount),
+ COMPAT_SYSCALL_ENTRY(mprotect),
+ COMPAT_SYSCALL_ENTRY(mremap),
+ COMPAT_SYSCALL_ENTRY(msync),
+ COMPAT_SYSCALL_ENTRY(munmap),
+ COMPAT_SYSCALL_ENTRY(name_to_handle_at),
+ COMPAT_SYSCALL_ENTRY(nanosleep),
+ COMPAT_SYSCALL_ENTRY(open),
+ COMPAT_SYSCALL_ENTRY(open_by_handle_at),
+ COMPAT_SYSCALL_ENTRY(openat),
+ COMPAT_SYSCALL_ENTRY_ALT(perf_event_open, android_perf_event_open),
+ COMPAT_SYSCALL_ENTRY(personality),
+ COMPAT_SYSCALL_ENTRY(pipe),
+ COMPAT_SYSCALL_ENTRY(pipe2),
+ COMPAT_SYSCALL_ENTRY(poll),
+ COMPAT_SYSCALL_ENTRY(ppoll),
+ COMPAT_SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
+ COMPAT_SYSCALL_ENTRY(pread64),
+ COMPAT_SYSCALL_ENTRY(preadv),
+ COMPAT_SYSCALL_ENTRY(prlimit64),
+ COMPAT_SYSCALL_ENTRY(process_vm_readv),
+ COMPAT_SYSCALL_ENTRY(process_vm_writev),
+ COMPAT_SYSCALL_ENTRY(pselect6),
+ COMPAT_SYSCALL_ENTRY(ptrace),
+ COMPAT_SYSCALL_ENTRY(pwrite64),
+ COMPAT_SYSCALL_ENTRY(pwritev),
+ COMPAT_SYSCALL_ENTRY(read),
+ COMPAT_SYSCALL_ENTRY(readahead),
+ COMPAT_SYSCALL_ENTRY(readv),
+ COMPAT_SYSCALL_ENTRY(readlink),
+ COMPAT_SYSCALL_ENTRY(readlinkat),
+ COMPAT_SYSCALL_ENTRY(recvmmsg),
+ COMPAT_SYSCALL_ENTRY(remap_file_pages),
+ COMPAT_SYSCALL_ENTRY(removexattr),
+ COMPAT_SYSCALL_ENTRY(rename),
+ COMPAT_SYSCALL_ENTRY(renameat),
+ COMPAT_SYSCALL_ENTRY(restart_syscall),
+ COMPAT_SYSCALL_ENTRY(rmdir),
+ COMPAT_SYSCALL_ENTRY(rt_sigaction),
+ COMPAT_SYSCALL_ENTRY(rt_sigpending),
+ COMPAT_SYSCALL_ENTRY(rt_sigprocmask),
+ COMPAT_SYSCALL_ENTRY(rt_sigqueueinfo),
+ COMPAT_SYSCALL_ENTRY(rt_sigreturn),
+ COMPAT_SYSCALL_ENTRY(rt_sigsuspend),
+ COMPAT_SYSCALL_ENTRY(rt_sigtimedwait),
+ COMPAT_SYSCALL_ENTRY(rt_tgsigqueueinfo),
+ COMPAT_SYSCALL_ENTRY(sched_get_priority_max),
+ COMPAT_SYSCALL_ENTRY(sched_get_priority_min),
+ COMPAT_SYSCALL_ENTRY(sched_getaffinity),
+ COMPAT_SYSCALL_ENTRY(sched_getparam),
+ COMPAT_SYSCALL_ENTRY(sched_getscheduler),
+ COMPAT_SYSCALL_ENTRY(sched_setaffinity),
+ COMPAT_SYSCALL_ENTRY_ALT(sched_setparam,
+ android_sched_setparam),
+ COMPAT_SYSCALL_ENTRY_ALT(sched_setscheduler,
+ android_sched_setscheduler),
+ COMPAT_SYSCALL_ENTRY(sched_yield),
+ COMPAT_SYSCALL_ENTRY(seccomp),
+ COMPAT_SYSCALL_ENTRY(sendfile),
+ COMPAT_SYSCALL_ENTRY(sendfile64),
+ COMPAT_SYSCALL_ENTRY(sendmmsg),
+ COMPAT_SYSCALL_ENTRY(setdomainname),
+ COMPAT_SYSCALL_ENTRY(set_robust_list),
+ COMPAT_SYSCALL_ENTRY(set_tid_address),
+ COMPAT_SYSCALL_ENTRY(setitimer),
+ COMPAT_SYSCALL_ENTRY(setns),
+ COMPAT_SYSCALL_ENTRY(setpgid),
+ COMPAT_SYSCALL_ENTRY_ALT(setpriority, android_setpriority),
+ COMPAT_SYSCALL_ENTRY(setrlimit),
+ COMPAT_SYSCALL_ENTRY(setsid),
+ COMPAT_SYSCALL_ENTRY(settimeofday),
+ COMPAT_SYSCALL_ENTRY(setxattr),
+ COMPAT_SYSCALL_ENTRY(signalfd4),
+ COMPAT_SYSCALL_ENTRY(sigaltstack),
+ COMPAT_SYSCALL_ENTRY(splice),
+ COMPAT_SYSCALL_ENTRY(stat),
+ COMPAT_SYSCALL_ENTRY(statfs),
+ COMPAT_SYSCALL_ENTRY(symlink),
+ COMPAT_SYSCALL_ENTRY(symlinkat),
+ COMPAT_SYSCALL_ENTRY(sync),
+ COMPAT_SYSCALL_ENTRY(syncfs),
+ COMPAT_SYSCALL_ENTRY(sysinfo),
+ COMPAT_SYSCALL_ENTRY(syslog),
+ COMPAT_SYSCALL_ENTRY(tgkill),
+ COMPAT_SYSCALL_ENTRY(tee),
+ COMPAT_SYSCALL_ENTRY(tkill),
+ COMPAT_SYSCALL_ENTRY(timer_create),
+ COMPAT_SYSCALL_ENTRY(timer_delete),
+ COMPAT_SYSCALL_ENTRY(timer_gettime),
+ COMPAT_SYSCALL_ENTRY(timer_getoverrun),
+ COMPAT_SYSCALL_ENTRY(timer_settime),
+ COMPAT_SYSCALL_ENTRY(timerfd_create),
+ COMPAT_SYSCALL_ENTRY(timerfd_gettime),
+ COMPAT_SYSCALL_ENTRY(timerfd_settime),
+ COMPAT_SYSCALL_ENTRY(times),
+ COMPAT_SYSCALL_ENTRY(truncate),
+ COMPAT_SYSCALL_ENTRY(umask),
+ COMPAT_SYSCALL_ENTRY(umount2),
+ COMPAT_SYSCALL_ENTRY(uname),
+ COMPAT_SYSCALL_ENTRY(unlink),
+ COMPAT_SYSCALL_ENTRY(unlinkat),
+ COMPAT_SYSCALL_ENTRY(unshare),
+ COMPAT_SYSCALL_ENTRY(ustat),
+ COMPAT_SYSCALL_ENTRY(utimensat),
+ COMPAT_SYSCALL_ENTRY(utimes),
+ COMPAT_SYSCALL_ENTRY(vfork),
+ COMPAT_SYSCALL_ENTRY(vmsplice),
+ COMPAT_SYSCALL_ENTRY(wait4),
+ COMPAT_SYSCALL_ENTRY(waitid),
+ COMPAT_SYSCALL_ENTRY(write),
+ COMPAT_SYSCALL_ENTRY(writev),
+ COMPAT_SYSCALL_ENTRY(chown32),
+ COMPAT_SYSCALL_ENTRY(fchown32),
+ COMPAT_SYSCALL_ENTRY(fcntl64),
+ COMPAT_SYSCALL_ENTRY(fstat64),
+ COMPAT_SYSCALL_ENTRY(fstatat64),
+ COMPAT_SYSCALL_ENTRY(fstatfs64),
+ COMPAT_SYSCALL_ENTRY(ftruncate64),
+ COMPAT_SYSCALL_ENTRY(getegid),
+ COMPAT_SYSCALL_ENTRY(getegid32),
+ COMPAT_SYSCALL_ENTRY(geteuid),
+ COMPAT_SYSCALL_ENTRY(geteuid32),
+ COMPAT_SYSCALL_ENTRY(getgid),
+ COMPAT_SYSCALL_ENTRY(getgid32),
+ COMPAT_SYSCALL_ENTRY(getgroups32),
+ COMPAT_SYSCALL_ENTRY(getresgid32),
+ COMPAT_SYSCALL_ENTRY(getresuid32),
+ COMPAT_SYSCALL_ENTRY(getuid),
+ COMPAT_SYSCALL_ENTRY(getuid32),
+ COMPAT_SYSCALL_ENTRY(lchown32),
+ COMPAT_SYSCALL_ENTRY(lstat64),
+ COMPAT_SYSCALL_ENTRY(mmap2),
+ COMPAT_SYSCALL_ENTRY(_newselect),
+ COMPAT_SYSCALL_ENTRY(_llseek),
+ COMPAT_SYSCALL_ENTRY(sigaction),
+ COMPAT_SYSCALL_ENTRY(sigpending),
+ COMPAT_SYSCALL_ENTRY(sigprocmask),
+ COMPAT_SYSCALL_ENTRY(sigreturn),
+ COMPAT_SYSCALL_ENTRY(sigsuspend),
+ COMPAT_SYSCALL_ENTRY(setgid32),
+ COMPAT_SYSCALL_ENTRY(setgroups32),
+ COMPAT_SYSCALL_ENTRY(setregid32),
+ COMPAT_SYSCALL_ENTRY(setresgid32),
+ COMPAT_SYSCALL_ENTRY(setresuid32),
+ COMPAT_SYSCALL_ENTRY(setreuid32),
+ COMPAT_SYSCALL_ENTRY(setuid32),
+ COMPAT_SYSCALL_ENTRY(stat64),
+ COMPAT_SYSCALL_ENTRY(statfs64),
+ COMPAT_SYSCALL_ENTRY(truncate64),
+ COMPAT_SYSCALL_ENTRY(ugetrlimit),
+
+ /*
+ * waitpid(2) is deprecated on most architectures, but still exists
+ * on IA32.
+ */
+#ifdef CONFIG_X86
+ COMPAT_SYSCALL_ENTRY(waitpid),
+#endif
+
+ /*
+ * posix_fadvise(2) and sync_file_range(2) have ARM-specific wrappers
+ * to deal with register alignment.
+ */
+#ifdef CONFIG_ARM64
+ COMPAT_SYSCALL_ENTRY(arm_fadvise64_64),
+ COMPAT_SYSCALL_ENTRY(sync_file_range2),
+#else
+ COMPAT_SYSCALL_ENTRY(fadvise64_64),
+ COMPAT_SYSCALL_ENTRY(fadvise64),
+ COMPAT_SYSCALL_ENTRY(sync_file_range),
+#endif
+
+ /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
+#ifdef CONFIG_X86
+ COMPAT_SYSCALL_ENTRY(socketcall),
+#else
+ COMPAT_SYSCALL_ENTRY(accept),
+ COMPAT_SYSCALL_ENTRY(accept4),
+ COMPAT_SYSCALL_ENTRY(bind),
+ COMPAT_SYSCALL_ENTRY(connect),
+ COMPAT_SYSCALL_ENTRY(getpeername),
+ COMPAT_SYSCALL_ENTRY(getsockname),
+ COMPAT_SYSCALL_ENTRY(getsockopt),
+ COMPAT_SYSCALL_ENTRY(listen),
+ COMPAT_SYSCALL_ENTRY(recvfrom),
+ COMPAT_SYSCALL_ENTRY(recvmsg),
+ COMPAT_SYSCALL_ENTRY(sendmsg),
+ COMPAT_SYSCALL_ENTRY(sendto),
+ COMPAT_SYSCALL_ENTRY(setsockopt),
+ COMPAT_SYSCALL_ENTRY(shutdown),
+ COMPAT_SYSCALL_ENTRY(socket),
+ COMPAT_SYSCALL_ENTRY(socketpair),
+ COMPAT_SYSCALL_ENTRY(recv),
+ COMPAT_SYSCALL_ENTRY(send),
+#endif
+
+ /*
+ * getrlimit(2) and time(2) are deprecated and not wired in the ARM
+ * compat table on ARM64.
+ */
+#ifndef CONFIG_ARM64
+ COMPAT_SYSCALL_ENTRY(getrlimit),
+ COMPAT_SYSCALL_ENTRY(time),
+#endif
+
+ /* x86-specific syscalls. */
+#ifdef CONFIG_X86
+ COMPAT_SYSCALL_ENTRY(modify_ldt),
+ COMPAT_SYSCALL_ENTRY(set_thread_area),
+#endif
+}; /* end android_compat_whitelist */
+#endif /* CONFIG_COMPAT */
+
+#endif /* ANDROID_WHITELISTS_H */
diff --git a/security/chromiumos/complete_whitelists.h b/security/chromiumos/complete_whitelists.h
new file mode 100644
index 00000000000000..d1167afdba219a
--- /dev/null
+++ b/security/chromiumos/complete_whitelists.h
@@ -0,0 +1,402 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Authors:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef COMPLETE_WHITELISTS_H
+#define COMPLETE_WHITELISTS_H
+
+/*
+ * NOTE: the purpose of this header is only to pull out the definition of this
+ * array from alt-syscall.c for the purposes of readability. It should not be
+ * included in other .c files.
+ */
+
+#include "alt-syscall.h"
+
+static struct syscall_whitelist_entry complete_whitelist[] = {
+ /* Syscalls wired up on ARM32/ARM64 and x86_64. */
+ SYSCALL_ENTRY(accept),
+ SYSCALL_ENTRY(accept4),
+ SYSCALL_ENTRY(acct),
+ SYSCALL_ENTRY(add_key),
+ SYSCALL_ENTRY(adjtimex),
+ SYSCALL_ENTRY(bind),
+ SYSCALL_ENTRY(brk),
+ SYSCALL_ENTRY(capget),
+ SYSCALL_ENTRY(capset),
+ SYSCALL_ENTRY(chdir),
+ SYSCALL_ENTRY(chroot),
+ SYSCALL_ENTRY(clock_adjtime),
+ SYSCALL_ENTRY(clock_getres),
+ SYSCALL_ENTRY(clock_gettime),
+ SYSCALL_ENTRY(clock_nanosleep),
+ SYSCALL_ENTRY(clock_settime),
+ SYSCALL_ENTRY(clone),
+ SYSCALL_ENTRY(close),
+ SYSCALL_ENTRY(connect),
+ SYSCALL_ENTRY(delete_module),
+ SYSCALL_ENTRY(dup),
+ SYSCALL_ENTRY(dup3),
+ SYSCALL_ENTRY(epoll_create1),
+ SYSCALL_ENTRY(epoll_ctl),
+ SYSCALL_ENTRY(epoll_pwait),
+ SYSCALL_ENTRY(eventfd2),
+ SYSCALL_ENTRY(execve),
+ SYSCALL_ENTRY(execveat),
+ SYSCALL_ENTRY(exit),
+ SYSCALL_ENTRY(exit_group),
+ SYSCALL_ENTRY(faccessat),
+ SYSCALL_ENTRY(fallocate),
+ SYSCALL_ENTRY(fanotify_init),
+ SYSCALL_ENTRY(fanotify_mark),
+ SYSCALL_ENTRY(fchdir),
+ SYSCALL_ENTRY(fchmod),
+ SYSCALL_ENTRY(fchmodat),
+ SYSCALL_ENTRY(fchown),
+ SYSCALL_ENTRY(fchownat),
+ SYSCALL_ENTRY(fcntl),
+ SYSCALL_ENTRY(fdatasync),
+ SYSCALL_ENTRY(fgetxattr),
+ SYSCALL_ENTRY(finit_module),
+ SYSCALL_ENTRY(flistxattr),
+ SYSCALL_ENTRY(flock),
+ SYSCALL_ENTRY(fremovexattr),
+ SYSCALL_ENTRY(fsetxattr),
+ SYSCALL_ENTRY(fstatfs),
+ SYSCALL_ENTRY(fsync),
+ SYSCALL_ENTRY(ftruncate),
+ SYSCALL_ENTRY(futex),
+ SYSCALL_ENTRY(getcpu),
+ SYSCALL_ENTRY(getcwd),
+ SYSCALL_ENTRY(getdents64),
+ SYSCALL_ENTRY(getegid),
+ SYSCALL_ENTRY(geteuid),
+ SYSCALL_ENTRY(getgid),
+ SYSCALL_ENTRY(getgroups),
+ SYSCALL_ENTRY(getitimer),
+ SYSCALL_ENTRY(get_mempolicy),
+ SYSCALL_ENTRY(getpeername),
+ SYSCALL_ENTRY(getpgid),
+ SYSCALL_ENTRY(getpid),
+ SYSCALL_ENTRY(getppid),
+ SYSCALL_ENTRY(getpriority),
+ SYSCALL_ENTRY(getrandom),
+ SYSCALL_ENTRY(getresgid),
+ SYSCALL_ENTRY(getresuid),
+ SYSCALL_ENTRY(getrlimit),
+ SYSCALL_ENTRY(get_robust_list),
+ SYSCALL_ENTRY(getrusage),
+ SYSCALL_ENTRY(getsid),
+ SYSCALL_ENTRY(getsockname),
+ SYSCALL_ENTRY(getsockopt),
+ SYSCALL_ENTRY(gettid),
+ SYSCALL_ENTRY(gettimeofday),
+ SYSCALL_ENTRY(getuid),
+ SYSCALL_ENTRY(getxattr),
+ SYSCALL_ENTRY(init_module),
+ SYSCALL_ENTRY(inotify_add_watch),
+ SYSCALL_ENTRY(inotify_init1),
+ SYSCALL_ENTRY(inotify_rm_watch),
+ SYSCALL_ENTRY(io_cancel),
+ SYSCALL_ENTRY(ioctl),
+ SYSCALL_ENTRY(io_destroy),
+ SYSCALL_ENTRY(io_getevents),
+ SYSCALL_ENTRY(ioprio_get),
+ SYSCALL_ENTRY(ioprio_set),
+ SYSCALL_ENTRY(io_setup),
+ SYSCALL_ENTRY(io_submit),
+ SYSCALL_ENTRY(kcmp),
+ SYSCALL_ENTRY(kexec_load),
+ SYSCALL_ENTRY(keyctl),
+ SYSCALL_ENTRY(kill),
+ SYSCALL_ENTRY(lgetxattr),
+ SYSCALL_ENTRY(linkat),
+ SYSCALL_ENTRY(listen),
+ SYSCALL_ENTRY(listxattr),
+ SYSCALL_ENTRY(llistxattr),
+ SYSCALL_ENTRY(lookup_dcookie),
+ SYSCALL_ENTRY(lremovexattr),
+ SYSCALL_ENTRY(lseek),
+ SYSCALL_ENTRY(lsetxattr),
+ SYSCALL_ENTRY(madvise),
+ SYSCALL_ENTRY(mbind),
+ SYSCALL_ENTRY(memfd_create),
+ SYSCALL_ENTRY(mincore),
+ SYSCALL_ENTRY(mkdirat),
+ SYSCALL_ENTRY(mknodat),
+ SYSCALL_ENTRY(mlock),
+ SYSCALL_ENTRY(mlock2),
+ SYSCALL_ENTRY(mlockall),
+ SYSCALL_ENTRY(mount),
+ SYSCALL_ENTRY(move_pages),
+ SYSCALL_ENTRY(mprotect),
+ SYSCALL_ENTRY(mq_getsetattr),
+ SYSCALL_ENTRY(mq_notify),
+ SYSCALL_ENTRY(mq_open),
+ SYSCALL_ENTRY(mq_timedreceive),
+ SYSCALL_ENTRY(mq_timedsend),
+ SYSCALL_ENTRY(mq_unlink),
+ SYSCALL_ENTRY(mremap),
+ SYSCALL_ENTRY(msgctl),
+ SYSCALL_ENTRY(msgget),
+ SYSCALL_ENTRY(msgrcv),
+ SYSCALL_ENTRY(msgsnd),
+ SYSCALL_ENTRY(msync),
+ SYSCALL_ENTRY(munlock),
+ SYSCALL_ENTRY(munlockall),
+ SYSCALL_ENTRY(munmap),
+ SYSCALL_ENTRY(name_to_handle_at),
+ SYSCALL_ENTRY(nanosleep),
+ SYSCALL_ENTRY(openat),
+ SYSCALL_ENTRY(open_by_handle_at),
+ SYSCALL_ENTRY(perf_event_open),
+ SYSCALL_ENTRY(personality),
+ SYSCALL_ENTRY(pipe2),
+ SYSCALL_ENTRY(pivot_root),
+ SYSCALL_ENTRY(ppoll),
+ SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
+ SYSCALL_ENTRY(pread64),
+ SYSCALL_ENTRY(preadv),
+ SYSCALL_ENTRY(prlimit64),
+ SYSCALL_ENTRY(process_vm_readv),
+ SYSCALL_ENTRY(process_vm_writev),
+ SYSCALL_ENTRY(pselect6),
+ SYSCALL_ENTRY(ptrace),
+ SYSCALL_ENTRY(pwrite64),
+ SYSCALL_ENTRY(pwritev),
+ SYSCALL_ENTRY(quotactl),
+ SYSCALL_ENTRY(read),
+ SYSCALL_ENTRY(readahead),
+ SYSCALL_ENTRY(readlinkat),
+ SYSCALL_ENTRY(readv),
+ SYSCALL_ENTRY(reboot),
+ SYSCALL_ENTRY(recvfrom),
+ SYSCALL_ENTRY(recvmmsg),
+ SYSCALL_ENTRY(recvmsg),
+ SYSCALL_ENTRY(remap_file_pages),
+ SYSCALL_ENTRY(removexattr),
+ SYSCALL_ENTRY(renameat),
+ SYSCALL_ENTRY(request_key),
+ SYSCALL_ENTRY(restart_syscall),
+ SYSCALL_ENTRY(rt_sigaction),
+ SYSCALL_ENTRY(rt_sigpending),
+ SYSCALL_ENTRY(rt_sigprocmask),
+ SYSCALL_ENTRY(rt_sigqueueinfo),
+ SYSCALL_ENTRY(rt_sigsuspend),
+ SYSCALL_ENTRY(rt_sigtimedwait),
+ SYSCALL_ENTRY(rt_tgsigqueueinfo),
+ SYSCALL_ENTRY(sched_getaffinity),
+ SYSCALL_ENTRY(sched_getattr),
+ SYSCALL_ENTRY(sched_getparam),
+ SYSCALL_ENTRY(sched_get_priority_max),
+ SYSCALL_ENTRY(sched_get_priority_min),
+ SYSCALL_ENTRY(sched_getscheduler),
+ SYSCALL_ENTRY(sched_rr_get_interval),
+ SYSCALL_ENTRY(sched_setaffinity),
+ SYSCALL_ENTRY(sched_setattr),
+ SYSCALL_ENTRY(sched_setparam),
+ SYSCALL_ENTRY(sched_setscheduler),
+ SYSCALL_ENTRY(sched_yield),
+ SYSCALL_ENTRY(seccomp),
+ SYSCALL_ENTRY(semctl),
+ SYSCALL_ENTRY(semget),
+ SYSCALL_ENTRY(semop),
+ SYSCALL_ENTRY(semtimedop),
+ SYSCALL_ENTRY(sendfile),
+ SYSCALL_ENTRY(sendmmsg),
+ SYSCALL_ENTRY(sendmsg),
+ SYSCALL_ENTRY(sendto),
+ SYSCALL_ENTRY(setdomainname),
+ SYSCALL_ENTRY(setfsgid),
+ SYSCALL_ENTRY(setfsuid),
+ SYSCALL_ENTRY(setgid),
+ SYSCALL_ENTRY(setgroups),
+ SYSCALL_ENTRY(sethostname),
+ SYSCALL_ENTRY(setitimer),
+ SYSCALL_ENTRY(set_mempolicy),
+ SYSCALL_ENTRY(setns),
+ SYSCALL_ENTRY(setpgid),
+ SYSCALL_ENTRY(setpriority),
+ SYSCALL_ENTRY(setregid),
+ SYSCALL_ENTRY(setresgid),
+ SYSCALL_ENTRY(setresuid),
+ SYSCALL_ENTRY(setreuid),
+ SYSCALL_ENTRY(setrlimit),
+ SYSCALL_ENTRY(set_robust_list),
+ SYSCALL_ENTRY(setsid),
+ SYSCALL_ENTRY(setsockopt),
+ SYSCALL_ENTRY(set_tid_address),
+ SYSCALL_ENTRY(settimeofday),
+ SYSCALL_ENTRY(setuid),
+ SYSCALL_ENTRY(setxattr),
+ SYSCALL_ENTRY(shmat),
+ SYSCALL_ENTRY(shmctl),
+ SYSCALL_ENTRY(shmdt),
+ SYSCALL_ENTRY(shmget),
+ SYSCALL_ENTRY(shutdown),
+ SYSCALL_ENTRY(sigaltstack),
+ SYSCALL_ENTRY(signalfd4),
+ SYSCALL_ENTRY(socket),
+ SYSCALL_ENTRY(socketpair),
+ SYSCALL_ENTRY(splice),
+ SYSCALL_ENTRY(statfs),
+ SYSCALL_ENTRY(swapoff),
+ SYSCALL_ENTRY(swapon),
+ SYSCALL_ENTRY(symlinkat),
+ SYSCALL_ENTRY(sync),
+ SYSCALL_ENTRY(syncfs),
+ SYSCALL_ENTRY(sysinfo),
+ SYSCALL_ENTRY(syslog),
+ SYSCALL_ENTRY(tee),
+ SYSCALL_ENTRY(tgkill),
+ SYSCALL_ENTRY(timer_create),
+ SYSCALL_ENTRY(timer_delete),
+ SYSCALL_ENTRY(timerfd_create),
+ SYSCALL_ENTRY(timerfd_gettime),
+ SYSCALL_ENTRY(timerfd_settime),
+ SYSCALL_ENTRY(timer_getoverrun),
+ SYSCALL_ENTRY(timer_gettime),
+ SYSCALL_ENTRY(timer_settime),
+ SYSCALL_ENTRY(times),
+ SYSCALL_ENTRY(tkill),
+ SYSCALL_ENTRY(truncate),
+ SYSCALL_ENTRY(umask),
+ SYSCALL_ENTRY(unlinkat),
+ SYSCALL_ENTRY(unshare),
+ SYSCALL_ENTRY(userfaultfd),
+ SYSCALL_ENTRY(utimensat),
+ SYSCALL_ENTRY(vhangup),
+ SYSCALL_ENTRY(vmsplice),
+ SYSCALL_ENTRY(wait4),
+ SYSCALL_ENTRY(waitid),
+ SYSCALL_ENTRY(write),
+ SYSCALL_ENTRY(writev),
+
+ /* Exist for x86_64 and ARM32 but not ARM64. */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(access),
+ SYSCALL_ENTRY(alarm),
+ SYSCALL_ENTRY(chmod),
+ SYSCALL_ENTRY(chown),
+ SYSCALL_ENTRY(creat),
+ SYSCALL_ENTRY(dup2),
+ SYSCALL_ENTRY(epoll_create),
+ SYSCALL_ENTRY(epoll_wait),
+ SYSCALL_ENTRY(eventfd),
+ SYSCALL_ENTRY(fork),
+ SYSCALL_ENTRY(futimesat),
+ SYSCALL_ENTRY(getdents),
+ SYSCALL_ENTRY(getpgrp),
+ SYSCALL_ENTRY(inotify_init),
+ SYSCALL_ENTRY(lchown),
+ SYSCALL_ENTRY(link),
+ SYSCALL_ENTRY(mkdir),
+ SYSCALL_ENTRY(mknod),
+ SYSCALL_ENTRY(open),
+ SYSCALL_ENTRY(pause),
+ SYSCALL_ENTRY(pipe),
+ SYSCALL_ENTRY(poll),
+ SYSCALL_ENTRY(readlink),
+ SYSCALL_ENTRY(rename),
+ SYSCALL_ENTRY(rmdir),
+ SYSCALL_ENTRY(select),
+ SYSCALL_ENTRY(signalfd),
+ SYSCALL_ENTRY(symlink),
+ SYSCALL_ENTRY(sysfs),
+ SYSCALL_ENTRY(time),
+ SYSCALL_ENTRY(unlink),
+ SYSCALL_ENTRY(ustat),
+ SYSCALL_ENTRY(utime),
+ SYSCALL_ENTRY(utimes),
+ SYSCALL_ENTRY(vfork),
+#endif
+
+ /* Exist for x86_64 and ARM64 but not ARM32 */
+#if !defined(CONFIG_ARM) && (defined(CONFIG_ARM64) || defined(CONFIG_X86_64))
+ SYSCALL_ENTRY(fadvise64),
+ SYSCALL_ENTRY(fstat),
+ SYSCALL_ENTRY(migrate_pages),
+ SYSCALL_ENTRY(mmap),
+ SYSCALL_ENTRY(rt_sigreturn),
+ SYSCALL_ENTRY(sync_file_range),
+ SYSCALL_ENTRY(umount2),
+ SYSCALL_ENTRY(uname),
+#endif
+
+ /* Unique to ARM32. */
+#if defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
+ SYSCALL_ENTRY(arm_fadvise64_64),
+ SYSCALL_ENTRY(bdflush),
+ SYSCALL_ENTRY(fcntl64),
+ SYSCALL_ENTRY(fstat64),
+ SYSCALL_ENTRY(fstatat64),
+ SYSCALL_ENTRY(ftruncate64),
+ SYSCALL_ENTRY(ipc),
+ SYSCALL_ENTRY(lstat64),
+ SYSCALL_ENTRY(mmap2),
+ SYSCALL_ENTRY(nice),
+ SYSCALL_ENTRY(pciconfig_iobase),
+ SYSCALL_ENTRY(pciconfig_read),
+ SYSCALL_ENTRY(pciconfig_write),
+ SYSCALL_ENTRY(recv),
+ SYSCALL_ENTRY(send),
+ SYSCALL_ENTRY(sendfile64),
+ SYSCALL_ENTRY(sigaction),
+ SYSCALL_ENTRY(sigpending),
+ SYSCALL_ENTRY(sigprocmask),
+ SYSCALL_ENTRY(sigsuspend),
+ SYSCALL_ENTRY(socketcall),
+ SYSCALL_ENTRY(stat64),
+ SYSCALL_ENTRY(stime),
+ SYSCALL_ENTRY(syscall),
+ SYSCALL_ENTRY(truncate64),
+ SYSCALL_ENTRY(umount),
+ SYSCALL_ENTRY(uselib),
+#endif
+
+ /* Unique to x86_64. */
+#ifdef CONFIG_X86_64
+ SYSCALL_ENTRY(arch_prctl),
+ SYSCALL_ENTRY(ioperm),
+ SYSCALL_ENTRY(iopl),
+ SYSCALL_ENTRY(kexec_file_load),
+ SYSCALL_ENTRY(lstat),
+ SYSCALL_ENTRY(modify_ldt),
+ SYSCALL_ENTRY(newfstatat),
+ SYSCALL_ENTRY(stat),
+ SYSCALL_ENTRY(_sysctl),
+#endif
+
+ /* Unique to ARM64. */
+#if defined(CONFIG_ARM64) && !defined(CONFIG_ARM)
+ SYSCALL_ENTRY(nfsservctl),
+ SYSCALL_ENTRY(renameat2),
+#endif
+}; /* end complete_whitelist */
+
+#ifdef CONFIG_COMPAT
+/*
+ * For now not adding a 32-bit-compatible version of the complete whitelist.
+ * Since we are not whitelisting any compat syscalls here, a call into the
+ * compat section of this "complete" alt syscall table will be redirected to
+ * block_syscall() (unless the permissive mode is used in which case the call
+ * will be redirected to warn_compat_syscall()).
+ */
+static struct syscall_whitelist_entry complete_compat_whitelist[] = {};
+#endif /* CONFIG_COMPAT */
+
+#endif /* COMPLETE_WHITELISTS_H */
diff --git a/security/chromiumos/inode_mark.c b/security/chromiumos/inode_mark.c
index db9a5ed93f10a7..502e07f38b097b 100644
--- a/security/chromiumos/inode_mark.c
+++ b/security/chromiumos/inode_mark.c
@@ -305,7 +305,7 @@ int chromiumos_flush_inode_security_policies(struct super_block *sb)
}
enum chromiumos_inode_security_policy chromiumos_get_inode_security_policy(
- struct dentry *dentry,
+ struct dentry *dentry, struct inode *inode,
enum chromiumos_inode_security_policy_type type)
{
struct chromiumos_super_block_mark *sbm;
@@ -318,11 +318,10 @@ enum chromiumos_inode_security_policy chromiumos_get_inode_security_policy(
enum chromiumos_inode_security_policy policy =
CHROMIUMOS_INODE_POLICY_INHERIT;
- if (!dentry || !dentry->d_inode ||
- type >= CHROMIUMOS_NUMBER_OF_POLICIES)
+ if (!dentry || !inode || type >= CHROMIUMOS_NUMBER_OF_POLICIES)
return policy;
- sbm = chromiumos_super_block_lookup(dentry->d_inode->i_sb);
+ sbm = chromiumos_super_block_lookup(inode->i_sb);
if (!sbm)
return policy;
@@ -330,7 +329,7 @@ enum chromiumos_inode_security_policy chromiumos_get_inode_security_policy(
rcu_read_lock();
while (1) {
struct fsnotify_mark *mark = fsnotify_find_inode_mark(
- sbm->fsn_group, dentry->d_inode);
+ sbm->fsn_group, inode);
if (mark) {
struct chromiumos_inode_mark *inode_mark =
chromiumos_to_inode_mark(mark);
@@ -343,7 +342,12 @@ enum chromiumos_inode_security_policy chromiumos_get_inode_security_policy(
if (IS_ROOT(dentry))
break;
- dentry = dentry->d_parent;
+ dentry = READ_ONCE(dentry->d_parent);
+ if (!dentry)
+ break;
+ inode = d_inode_rcu(dentry);
+ if (!inode)
+ break;
}
rcu_read_unlock();
diff --git a/security/chromiumos/inode_mark.h b/security/chromiumos/inode_mark.h
index 168bed9984a8e6..ec00bb483d2922 100644
--- a/security/chromiumos/inode_mark.h
+++ b/security/chromiumos/inode_mark.h
@@ -43,5 +43,5 @@ int chromiumos_flush_inode_security_policies(struct super_block *sb);
extern enum chromiumos_inode_security_policy
chromiumos_get_inode_security_policy(
- struct dentry *dentry,
+ struct dentry *dentry, struct inode *inode,
enum chromiumos_inode_security_policy_type type);
diff --git a/security/chromiumos/lsm.c b/security/chromiumos/lsm.c
index 9505f6572327db..3b00d7e2b774a0 100644
--- a/security/chromiumos/lsm.c
+++ b/security/chromiumos/lsm.c
@@ -19,19 +19,65 @@
#define pr_fmt(fmt) "Chromium OS LSM: " fmt
-#include <linux/module.h>
-#include <linux/security.h>
-#include <linux/lsm_hooks.h>
-#include <linux/sched.h> /* current and other task related stuff */
-#include <linux/namei.h> /* for nameidata_get_total_link_count */
+#include <asm/syscall.h>
#include <linux/fs.h>
#include <linux/fs_struct.h>
+#include <linux/hashtable.h>
+#include <linux/lsm_hooks.h>
+#include <linux/module.h>
#include <linux/mount.h>
+#include <linux/namei.h> /* for nameidata_get_total_link_count */
#include <linux/path.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h> /* current and other task related stuff */
+#include <linux/security.h>
#include "inode_mark.h"
+#include "process_management.h"
#include "utils.h"
+#define NUM_BITS 8 // 128 buckets in hash table
+
+static DEFINE_HASHTABLE(process_setuid_policy_hashtable, NUM_BITS);
+
+/*
+ * Bool signifying whether to disable fixups for process management related
+ * routines in the kernel (setuid, setgid, kill). Default value is false. Can
+ * be overridden by 'disable_process_management_policies' flag. Static vars get
+ * initialized to 0/false since in BSS.
+ **/
+static bool disable_process_management_policies;
+
+/* Disable process management policies if flag passed */
+static int set_disable_process_management_policies(char *str)
+{
+ disable_process_management_policies = true;
+ return 1;
+}
+__setup("disable_process_management_policies=",
+ set_disable_process_management_policies);
+
+/*
+ * Hash table entry to store process management policy signifying that 'parent'
+ * user can use 'child' user for process management (for now that just means
+ * 'parent' can set*uid() to 'child'). Will be adding exceptions for set*gid()
+ * and kill() in the future.
+ */
+struct entry {
+ struct hlist_node next;
+ struct hlist_node dlist; /* for deletion cleanup */
+ uint64_t parent_kuid;
+ uint64_t child_kuid;
+};
+
+static DEFINE_HASHTABLE(sb_nosymfollow_hashtable, NUM_BITS);
+
+struct sb_entry {
+ struct hlist_node next;
+ struct hlist_node dlist; /* for deletion cleanup */
+ uintptr_t sb;
+};
+
static void report(const char *origin, struct path *path, char *operation)
{
char *alloced = NULL, *cmdline;
@@ -79,20 +125,32 @@ int chromiumos_security_sb_mount(const char *dev_name, struct path *path,
#endif
#ifdef CONFIG_SECURITY_CHROMIUMOS_NO_UNPRIVILEGED_UNSAFE_MOUNTS
- if (!(flags & (MS_BIND | MS_MOVE | MS_SHARED | MS_PRIVATE | MS_SLAVE |
- MS_UNBINDABLE)) &&
+ if ((!(flags & (MS_BIND | MS_MOVE | MS_SHARED | MS_PRIVATE | MS_SLAVE |
+ MS_UNBINDABLE)) ||
+ ((flags & MS_REMOUNT) && (flags & MS_BIND))) &&
!capable(CAP_SYS_ADMIN)) {
+ int required_mnt_flags = MNT_NOEXEC | MNT_NOSUID | MNT_NODEV;
+
+ if (flags & MS_REMOUNT) {
+ /*
+ * If this is a remount, we only require that the
+ * requested flags are a superset of the original mount
+ * flags.
+ */
+ required_mnt_flags &= path->mnt->mnt_flags;
+ }
/*
* The three flags we are interested in disallowing in
* unprivileged user namespaces (MS_NOEXEC, MS_NOSUID, MS_NODEV)
- * cannot be modified when doing a remount/bind. The kernel
+ * cannot be modified when doing a bind-mount. The kernel
* attempts to dispatch calls to do_mount() within
* fs/namespace.c in the following order:
*
* * If the MS_REMOUNT flag is present, it calls do_remount().
- * When MS_BIND is also present, it only allows to set/unset
- * MS_RDONLY. Otherwise it bails in the absence of the
- * CAP_SYS_ADMIN in the init ns.
+ * When MS_BIND is also present, it only allows to modify the
+ * per-mount flags, which are copied into
+ * |required_mnt_flags|. Otherwise it bails in the absence of
+ * the CAP_SYS_ADMIN in the init ns.
* * If the MS_BIND flag is present, the only other flag checked
* is MS_REC.
* * If any of the mount propagation flags are present
@@ -101,21 +159,22 @@ int chromiumos_security_sb_mount(const char *dev_name, struct path *path,
* flags.
* * If MS_MOVE flag is present, all other flags are ignored.
*/
- if (!(flags & MS_NOEXEC)) {
+ if ((required_mnt_flags & MNT_NOEXEC) && !(flags & MS_NOEXEC)) {
report("sb_mount", path,
"Mounting a filesystem with 'exec' flag requires CAP_SYS_ADMIN in init ns");
pr_notice("sb_mount dev=%s type=%s flags=%#lx\n",
dev_name, type, flags);
return -EPERM;
}
- if (!(flags & MS_NOSUID)) {
+ if ((required_mnt_flags & MNT_NOSUID) && !(flags & MS_NOSUID)) {
report("sb_mount", path,
"Mounting a filesystem with 'suid' flag requires CAP_SYS_ADMIN in init ns");
pr_notice("sb_mount dev=%s type=%s flags=%#lx\n",
dev_name, type, flags);
return -EPERM;
}
- if (!(flags & MS_NODEV) && strcmp(type, "devpts")) {
+ if ((required_mnt_flags & MNT_NODEV) && !(flags & MS_NODEV) &&
+ strcmp(type, "devpts")) {
report("sb_mount", path,
"Mounting a filesystem with 'dev' flag requires CAP_SYS_ADMIN in init ns");
pr_notice("sb_mount dev=%s type=%s flags=%#lx\n",
@@ -131,6 +190,8 @@ int chromiumos_security_sb_mount(const char *dev_name, struct path *path,
static int module_locking = 1;
static struct super_block *locked_root;
static DEFINE_SPINLOCK(locked_root_spinlock);
+static DEFINE_SPINLOCK(process_setuid_policy_hashtable_spinlock);
+static DEFINE_SPINLOCK(sb_nosymfollow_hashtable_spinlock);
#ifdef CONFIG_SYSCTL
static int zero;
@@ -195,6 +256,106 @@ static void check_locking_enforcement(struct super_block *mnt_sb)
static void check_locking_enforcement(void) { }
#endif
+/* Check for entry in hash table. */
+static bool chromiumos_check_sb_nosymfollow_hashtable(struct super_block *sb)
+{
+ struct sb_entry *entry;
+ uintptr_t sb_pointer = (uintptr_t)sb;
+ bool found = false;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(sb_nosymfollow_hashtable,
+ entry, next, sb_pointer) {
+ if (entry->sb == sb_pointer) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Its possible that a policy gets added in between the time we check
+ * above and when we return false here. Such a race condition should
+ * not affect this check however, since it would only be relevant if
+ * userspace tried to traverse a symlink on a filesystem before that
+ * filesystem was done being mounted (or potentially while it was being
+ * remounted with new mount flags).
+ */
+ return found;
+}
+
+/* Add entry to hash table. */
+static int chromiumos_add_sb_nosymfollow_hashtable(struct super_block *sb)
+{
+ struct sb_entry *new;
+ uintptr_t sb_pointer = (uintptr_t)sb;
+
+ /* Return if entry already exists */
+ if (chromiumos_check_sb_nosymfollow_hashtable(sb))
+ return 0;
+
+ new = kzalloc(sizeof(struct sb_entry), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->sb = sb_pointer;
+ spin_lock(&sb_nosymfollow_hashtable_spinlock);
+ hash_add_rcu(sb_nosymfollow_hashtable, &new->next, sb_pointer);
+ spin_unlock(&sb_nosymfollow_hashtable_spinlock);
+ return 0;
+}
+
+/* Flush all entries from hash table. */
+void chromiumos_flush_sb_nosymfollow_hashtable(void)
+{
+ struct sb_entry *entry;
+ struct hlist_node *hlist_node;
+ unsigned int bkt_loop_cursor;
+ HLIST_HEAD(free_list);
+
+ /*
+ * Could probably use hash_for_each_rcu here instead, but this should
+ * be fine as well.
+ */
+ spin_lock(&sb_nosymfollow_hashtable_spinlock);
+ hash_for_each_safe(sb_nosymfollow_hashtable, bkt_loop_cursor,
+ hlist_node, entry, next) {
+ hash_del_rcu(&entry->next);
+ hlist_add_head(&entry->dlist, &free_list);
+ }
+ spin_unlock(&sb_nosymfollow_hashtable_spinlock);
+ synchronize_rcu();
+ hlist_for_each_entry_safe(entry, hlist_node, &free_list, dlist)
+ kfree(entry);
+}
+
+/* Remove entry from hash table. */
+static void chromiumos_remove_sb_nosymfollow_hashtable(struct super_block *sb)
+{
+ struct sb_entry *entry;
+ struct hlist_node *hlist_node;
+ uintptr_t sb_pointer = (uintptr_t)sb;
+ bool free_entry = false;
+
+ /*
+ * Could probably use hash_for_each_rcu here instead, but this should
+ * be fine as well.
+ */
+ spin_lock(&sb_nosymfollow_hashtable_spinlock);
+ hash_for_each_possible_safe(sb_nosymfollow_hashtable, entry,
+ hlist_node, next, sb_pointer) {
+ if (entry->sb == sb_pointer) {
+ hash_del_rcu(&entry->next);
+ free_entry = true;
+ break;
+ }
+ }
+ spin_unlock(&sb_nosymfollow_hashtable_spinlock);
+ if (free_entry) {
+ synchronize_rcu();
+ kfree(entry);
+ }
+}
+
void chromiumos_security_sb_free(struct super_block *sb)
{
/*
@@ -208,6 +369,14 @@ void chromiumos_security_sb_free(struct super_block *sb)
}
}
+int chromiumos_security_sb_umount(struct vfsmount *mnt, int flags)
+{
+ /* If mnt->mnt_sb is in nosymfollow hashtable, remove it. */
+ chromiumos_remove_sb_nosymfollow_hashtable(mnt->mnt_sb);
+
+ return 0;
+}
+
static int check_pinning(const char *origin, struct file *file)
{
struct vfsmount *module_root;
@@ -268,22 +437,32 @@ int chromiumos_security_load_firmware(struct file *file, char *buf, size_t size)
return check_pinning("request_firmware", file);
}
+/*
+ * NOTE: The WARN() calls will emit a warning in cases of blocked symlink
+ * traversal attempts. These will show up in kernel warning reports
+ * collected by the crash reporter, so we have some insight on spurious
+ * failures that need addressing.
+ */
static int chromiumos_security_inode_follow_link(struct dentry *dentry,
struct inode *inode, bool rcu)
{
static char accessed_path[PATH_MAX];
enum chromiumos_inode_security_policy policy;
+ /* Deny if symlinks have been disabled on this superblock. */
+ if (chromiumos_check_sb_nosymfollow_hashtable(dentry->d_sb)) {
+ WARN(1,
+ "Blocked symlink traversal for path %x:%x:%s (symlinks were disabled on this FS through the 'nosymfollow' mount option)\n",
+ MAJOR(dentry->d_sb->s_dev),
+ MINOR(dentry->d_sb->s_dev),
+ dentry_path(dentry, accessed_path, PATH_MAX));
+ return -EACCES;
+ }
+
policy = chromiumos_get_inode_security_policy(
- dentry,
+ dentry, inode,
CHROMIUMOS_SYMLINK_TRAVERSAL);
- /*
- * Emit a warning in cases of blocked symlink traversal attempts. These
- * will show up in kernel warning reports collected by the crash
- * reporter, so we have some insight on spurious failures that need
- * addressing.
- */
WARN(policy == CHROMIUMOS_INODE_POLICY_BLOCK,
"Blocked symlink traversal for path %x:%x:%s (see https://goo.gl/8xICW6 for context and rationale)\n",
MAJOR(dentry->d_sb->s_dev), MINOR(dentry->d_sb->s_dev),
@@ -305,7 +484,7 @@ static int chromiumos_security_file_open(
return 0;
policy = chromiumos_get_inode_security_policy(
- dentry,
+ dentry, dentry->d_inode,
CHROMIUMOS_FIFO_ACCESS);
/*
@@ -321,6 +500,329 @@ static int chromiumos_security_file_open(
return policy == CHROMIUMOS_INODE_POLICY_BLOCK ? -EACCES : 0;
}
+bool chromiumos_check_setuid_policy_hashtable_key(kuid_t parent)
+{
+ struct entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(process_setuid_policy_hashtable,
+ entry, next, __kuid_val(parent)) {
+ if (entry->parent_kuid == __kuid_val(parent)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Using RCU, its possible that a policy gets added in between the time
+ * we check above and when we return false here. This is fine, since
+ * policy updates only happen during system startup, well before
+ * sandboxed system services start running and the policies need to be
+ * queried.
+ */
+ return false;
+}
+
+bool chromiumos_check_setuid_policy_hashtable_key_value(kuid_t parent,
+ kuid_t child)
+{
+ struct entry *entry;
+
+ rcu_read_lock();
+ hash_for_each_possible_rcu(process_setuid_policy_hashtable,
+ entry, next, __kuid_val(parent)) {
+ if (entry->parent_kuid == __kuid_val(parent) &&
+ entry->child_kuid == __kuid_val(child)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ /*
+ * Using RCU, its possible that a policy gets added in between the time
+ * we check above and when we return false here. This is fine, since
+ * policy updates only happen during system startup, well before
+ * sandboxed system services start running and the policies need to be
+ * queried.
+ */
+ return false;
+}
+
+bool setuid_syscall(int num)
+{
+#ifdef CONFIG_X86_64
+ if (!(num == __NR_setreuid ||
+ num == __NR_setuid ||
+ num == __NR_setresuid ||
+ num == __NR_setfsuid))
+ return false;
+#elif defined CONFIG_ARM64
+ if (!(num == __NR_setuid ||
+ num == __NR_setreuid ||
+ num == __NR_setfsuid ||
+ num == __NR_setresuid ||
+ num == __NR_compat_setuid ||
+ num == __NR_compat_setreuid ||
+ num == __NR_compat_setfsuid ||
+ num == __NR_compat_setresuid ||
+ num == __NR_compat_setreuid32 ||
+ num == __NR_compat_setresuid32 ||
+ num == __NR_compat_setuid32 ||
+ num == __NR_compat_setfsuid32))
+ return false;
+#else /* CONFIG_ARM */
+ if (!(num == __NR_setreuid32 ||
+ num == __NR_setuid32 ||
+ num == __NR_setresuid32 ||
+ num == __NR_setfsuid32))
+ return false;
+#endif
+ return true;
+}
+
+int chromiumos_security_capable(const struct cred *cred,
+ struct user_namespace *ns,
+ int cap,
+ int audit)
+{
+ /* The current->mm check will fail if this is a kernel thread. */
+ if (!disable_process_management_policies &&
+ cap == CAP_SETUID &&
+ current->mm &&
+ chromiumos_check_setuid_policy_hashtable_key(cred->uid)) {
+ // syscall_get_nr can theoretically return 0 or -1, but that
+ // would signify that the syscall is being aborted due to a
+ // signal, so we don't need to check for this case here.
+ if (!(setuid_syscall(syscall_get_nr(current,
+ current_pt_regs())))) {
+ // Deny if we're not in a set*uid() syscall to avoid
+ // giving powers gated by CAP_SETUID that are related
+ // to functionality other than calling set*uid() (e.g.
+ // allowing user to set up userns uid mappings).
+ WARN(1,
+ "Operation requires CAP_SETUID, which is not available to UID %u for operations besides approved set*uid transitions\n",
+ __kuid_val(cred->uid));
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * This hook inspects the string pointed to by the first parameter, looking for
+ * the "nosymfollow" mount option. The second parameter points to an empty
+ * page-sized buffer that is used for holding LSM-specific mount options that
+ * are grabbed (after this function executes, in security_sb_copy_data) from
+ * the mount string in the first parameter. Since the chromiumos LSM is stacked
+ * ahead of SELinux for ChromeOS, the page-sized buffer is empty when this
+ * function is called. If the "nosymfollow" mount option is encountered in this
+ * function, we write "nosymflw" to the empty page-sized buffer which lets us
+ * transmit information which will be visible in chromiumos_sb_kern_mount
+ * signifying that symlinks should be disabled for the sb. We store this token
+ * at a spot in the buffer that is at a greater offset than the bytes needed to
+ * record the rest of the LSM-specific mount options (e.g. those for SELinux).
+ * The "nosymfollow" option will be stripped from the mount string if it is
+ * encountered.
+ */
+int chromiumos_sb_copy_data(char *orig, char *copy)
+{
+ char *orig_copy;
+ char *orig_copy_cur;
+ char *option;
+ size_t offset = 0;
+ bool found = false;
+
+ if (!orig || *orig == 0)
+ return 0;
+
+ orig_copy = alloc_secdata();
+ if (!orig_copy)
+ return -ENOMEM;
+ strncpy(orig_copy, orig, PAGE_SIZE);
+
+ memset(orig, 0, strlen(orig));
+
+ orig_copy_cur = orig_copy;
+ while (orig_copy_cur) {
+ option = strsep(&orig_copy_cur, ",");
+ if (strcmp(option, "nosymfollow") == 0) {
+ if (found) /* Found multiple times. */
+ return -EINVAL;
+ found = true;
+ } else {
+ if (offset > 0) {
+ orig[offset] = ',';
+ offset++;
+ }
+ strcpy(orig + offset, option);
+ offset += strlen(option);
+ }
+ }
+
+ if (found)
+ strcpy(copy + offset + 1, "nosymflw");
+
+ free_secdata(orig_copy);
+ return 0;
+}
+
+/*
+ * Emit a warning when no entry found in whitelist. These will show up in
+ * kernel warning reports collected by the crash reporter, so we have some
+ * insight regarding failures that need addressing.
+ */
+void chromiumos_setuid_policy_warning(kuid_t parent, kuid_t child)
+{
+ WARN(1,
+ "UID %u is restricted to using certain whitelisted UIDs for process management, and %u is not in the whitelist.\n",
+ __kuid_val(parent),
+ __kuid_val(child));
+}
+
+int chromiumos_check_uid_transition(kuid_t parent, kuid_t child)
+{
+ if (chromiumos_check_setuid_policy_hashtable_key_value(parent, child))
+ return 0;
+ chromiumos_setuid_policy_warning(parent, child);
+ return -1;
+}
+
+/*
+ * Check whether there is either an exception for user under old cred struct to
+ * use user under new cred struct, or the UID transition is allowed (by Linux
+ * set*uid rules) even without CAP_SETUID.
+ */
+int chromiumos_security_task_fix_setuid(struct cred *new,
+ const struct cred *old, int flags)
+{
+ /*
+ * Do nothing if feature is turned off by kernel compile flag or there
+ * are no setuid restrictions for this UID.
+ */
+ if (disable_process_management_policies ||
+ !chromiumos_check_setuid_policy_hashtable_key(old->uid))
+ return 0;
+
+ switch (flags) {
+ case LSM_SETID_RE:
+ /*
+ * Users for which setuid restrictions exist can only set the
+ * real UID to the real UID or the effective UID, unless an
+ * explicit whitelist policy allows the transition.
+ */
+ if (!uid_eq(old->uid, new->uid) &&
+ !uid_eq(old->euid, new->uid)) {
+ return chromiumos_check_uid_transition(old->uid,
+ new->uid);
+ }
+ /*
+ * Users for which setuid restrictions exist can only set the
+ * effective UID to the real UID, the effective UID, or the
+ * saved set-UID, unless an explicit whitelist policy allows
+ * the transition.
+ */
+ if (!uid_eq(old->uid, new->euid) &&
+ !uid_eq(old->euid, new->euid) &&
+ !uid_eq(old->suid, new->euid)) {
+ return chromiumos_check_uid_transition(old->euid,
+ new->euid);
+ }
+ break;
+ case LSM_SETID_ID:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * real UID or saved set-UID unless an explicit whitelist
+ * policy allows the transition.
+ */
+ if (!uid_eq(old->uid, new->uid)) {
+ return chromiumos_check_uid_transition(old->uid,
+ new->uid);
+ }
+ if (!uid_eq(old->suid, new->suid)) {
+ return chromiumos_check_uid_transition(old->suid,
+ new->suid);
+ }
+ break;
+ case LSM_SETID_RES:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * real UID, effective UID, or saved set-UID to anything but
+ * one of: the current real UID, the current effective UID or
+ * the current saved set-user-ID unless an explicit whitelist
+ * policy allows the transition.
+ */
+ if (!uid_eq(new->uid, old->uid) &&
+ !uid_eq(new->uid, old->euid) &&
+ !uid_eq(new->uid, old->suid)) {
+ return chromiumos_check_uid_transition(old->uid,
+ new->uid);
+ }
+ if (!uid_eq(new->euid, old->uid) &&
+ !uid_eq(new->euid, old->euid) &&
+ !uid_eq(new->euid, old->suid)) {
+ return chromiumos_check_uid_transition(old->euid,
+ new->euid);
+ }
+ if (!uid_eq(new->suid, old->uid) &&
+ !uid_eq(new->suid, old->euid) &&
+ !uid_eq(new->suid, old->suid)) {
+ return chromiumos_check_uid_transition(old->suid,
+ new->suid);
+ }
+ break;
+ case LSM_SETID_FS:
+ /*
+ * Users for which setuid restrictions exist cannot change the
+ * filesystem UID to anything but one of: the current real UID,
+ * the current effective UID or the current saved set-UID
+ * unless an explicit whitelist policy allows the transition.
+ */
+ if (!uid_eq(new->fsuid, old->uid) &&
+ !uid_eq(new->fsuid, old->euid) &&
+ !uid_eq(new->fsuid, old->suid) &&
+ !uid_eq(new->fsuid, old->fsuid)) {
+ return chromiumos_check_uid_transition(old->fsuid,
+ new->fsuid);
+ }
+ break;
+ }
+ return 0;
+}
+
+/* Unfortunately the kernel doesn't implement memmem function. */
+static void *search_buffer(void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen)
+{
+ if (!needlelen)
+ return (void *)haystack;
+ while (haystacklen >= needlelen) {
+ haystacklen--;
+ if (!memcmp(haystack, needle, needlelen))
+ return (void *)haystack;
+ haystack++;
+ }
+ return NULL;
+}
+
+int chromiumos_sb_kern_mount(struct super_block *sb, int flags, void *data)
+{
+ int ret;
+ char search_str[10] = "\0nosymflw";
+
+ if (!data)
+ return 0;
+
+ if (search_buffer(data, PAGE_SIZE, search_str, 10)) {
+ ret = chromiumos_add_sb_nosymfollow_hashtable(sb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static struct security_hook_list chromiumos_security_hooks[] = {
LSM_HOOK_INIT(sb_mount, chromiumos_security_sb_mount),
LSM_HOOK_INIT(sb_free_security, chromiumos_security_sb_free),
@@ -328,8 +830,61 @@ static struct security_hook_list chromiumos_security_hooks[] = {
LSM_HOOK_INIT(kernel_fw_from_file, chromiumos_security_load_firmware),
LSM_HOOK_INIT(inode_follow_link, chromiumos_security_inode_follow_link),
LSM_HOOK_INIT(file_open, chromiumos_security_file_open),
+ LSM_HOOK_INIT(capable, chromiumos_security_capable),
+ LSM_HOOK_INIT(task_fix_setuid, chromiumos_security_task_fix_setuid),
+ LSM_HOOK_INIT(sb_copy_data, chromiumos_sb_copy_data),
+ LSM_HOOK_INIT(sb_kern_mount, chromiumos_sb_kern_mount),
+ LSM_HOOK_INIT(sb_umount, chromiumos_security_sb_umount)
};
+/* Add process management policy to hash table */
+int chromiumos_add_process_management_entry(kuid_t parent, kuid_t child)
+{
+ struct entry *new;
+
+ /* Return if entry already exists */
+ if (chromiumos_check_setuid_policy_hashtable_key_value(parent,
+ child))
+ return 0;
+
+ new = kzalloc(sizeof(struct entry), GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->parent_kuid = __kuid_val(parent);
+ new->child_kuid = __kuid_val(child);
+ spin_lock(&process_setuid_policy_hashtable_spinlock);
+ hash_add_rcu(process_setuid_policy_hashtable,
+ &new->next,
+ __kuid_val(parent));
+ spin_unlock(&process_setuid_policy_hashtable_spinlock);
+ return 0;
+}
+
+void chromiumos_flush_process_management_entries(void)
+{
+ struct entry *entry;
+ struct hlist_node *hlist_node;
+ unsigned int bkt_loop_cursor;
+ HLIST_HEAD(free_list);
+
+ /*
+ * Could probably use hash_for_each_rcu here instead, but this should
+ * be fine as well.
+ */
+ spin_lock(&process_setuid_policy_hashtable_spinlock);
+ hash_for_each_safe(process_setuid_policy_hashtable, bkt_loop_cursor,
+ hlist_node, entry, next) {
+ hash_del_rcu(&entry->next);
+ hlist_add_head(&entry->dlist, &free_list);
+ }
+ spin_unlock(&process_setuid_policy_hashtable_spinlock);
+ synchronize_rcu();
+ hlist_for_each_entry_safe(entry, hlist_node, &free_list, dlist) {
+ hlist_del(&entry->dlist);
+ kfree(entry);
+ }
+}
+
static int __init chromiumos_security_init(void)
{
security_add_hooks(
diff --git a/security/chromiumos/process_management.h b/security/chromiumos/process_management.h
new file mode 100644
index 00000000000000..85538fc2d83ede
--- /dev/null
+++ b/security/chromiumos/process_management.h
@@ -0,0 +1,38 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Author:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SECURITY_PROCESS_MANAGEMENT_H
+#define _SECURITY_PROCESS_MANAGEMENT_H
+
+#include <linux/types.h>
+
+/* Function type. */
+enum chromiumos_process_management_file_write_type {
+ CHROMIUMOS_PROCESS_MANAGEMENT_ADD, /* Add whitelist policy. */
+ CHROMIUMOS_PROCESS_MANAGEMENT_FLUSH, /* Flush whitelist policies. */
+};
+
+/*
+ * Add entry to chromiumos process management policies to allow user 'parent'
+ * to use user 'child' for process management.
+ */
+int chromiumos_add_process_management_entry(kuid_t parent, kuid_t child);
+
+void chromiumos_flush_process_management_entries(void);
+
+#endif /* _SECURITY_PROCESS_MANAGEMENT_H */
diff --git a/security/chromiumos/read_write_test_whitelists.h b/security/chromiumos/read_write_test_whitelists.h
new file mode 100644
index 00000000000000..5aa7370af4fc3f
--- /dev/null
+++ b/security/chromiumos/read_write_test_whitelists.h
@@ -0,0 +1,56 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Authors:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef READ_WRITE_TESTS_WHITELISTS_H
+#define READ_WRITE_TESTS_WHITELISTS_H
+
+/*
+ * NOTE: the purpose of this header is only to pull out the definition of this
+ * array from alt-syscall.c for the purposes of readability. It should not be
+ * included in other .c files.
+ */
+
+#include "alt-syscall.h"
+
+static struct syscall_whitelist_entry read_write_test_whitelist[] = {
+ SYSCALL_ENTRY(exit),
+ SYSCALL_ENTRY(openat),
+ SYSCALL_ENTRY(close),
+ SYSCALL_ENTRY(read),
+ SYSCALL_ENTRY(write),
+ SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
+
+ /* open(2) is deprecated and not wired up on ARM64. */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(open),
+#endif
+}; /* end read_write_test_whitelist */
+
+#ifdef CONFIG_COMPAT
+static struct syscall_whitelist_entry read_write_test_compat_whitelist[] = {
+ COMPAT_SYSCALL_ENTRY(exit),
+ COMPAT_SYSCALL_ENTRY(open),
+ COMPAT_SYSCALL_ENTRY(openat),
+ COMPAT_SYSCALL_ENTRY(close),
+ COMPAT_SYSCALL_ENTRY(read),
+ COMPAT_SYSCALL_ENTRY(write),
+ COMPAT_SYSCALL_ENTRY_ALT(prctl, alt_sys_prctl),
+}; /* end read_write_test_compat_whitelist */
+#endif /* CONFIG_COMPAT */
+
+#endif /* READ_WRITE_TESTS_WHITELISTS_H */
diff --git a/security/chromiumos/securityfs.c b/security/chromiumos/securityfs.c
index 4bd566f073440d..39a6e78678cdcb 100644
--- a/security/chromiumos/securityfs.c
+++ b/security/chromiumos/securityfs.c
@@ -20,14 +20,17 @@
#include <linux/dcache.h>
#include <linux/fs.h>
#include <linux/namei.h>
+#include <linux/sched.h>
#include <linux/security.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include "inode_mark.h"
+#include "process_management.h"
static struct dentry *chromiumos_dir;
static struct dentry *chromiumos_inode_policy_dir;
+static struct dentry *chromiumos_process_management_policy_dir;
struct chromiumos_inode_policy_file_entry {
const char *name;
@@ -38,6 +41,12 @@ struct chromiumos_inode_policy_file_entry {
struct dentry *dentry;
};
+struct chromiumos_process_management_file_entry {
+ const char *name;
+ enum chromiumos_process_management_file_write_type type;
+ struct dentry *dentry;
+};
+
static int chromiumos_inode_policy_file_write(
struct chromiumos_inode_policy_file_entry *file_entry,
struct dentry *dentry)
@@ -87,6 +96,14 @@ static struct chromiumos_inode_policy_file_entry
.handle_write = &chromiumos_inode_policy_file_flush_write},
};
+static struct chromiumos_process_management_file_entry
+ chromiumos_process_management_files[] = {
+ {.name = "add_whitelist_policy",
+ .type = CHROMIUMOS_PROCESS_MANAGEMENT_ADD},
+ {.name = "flush_whitelist_policies",
+ .type = CHROMIUMOS_PROCESS_MANAGEMENT_FLUSH},
+};
+
static int chromiumos_resolve_path(const char __user *buf, size_t len,
struct path *path)
{
@@ -161,7 +178,7 @@ static ssize_t chromiumos_inode_file_write(
struct path path = {};
int ret;
- if (!capable(CAP_SYS_ADMIN))
+ if (!ns_capable(current_cred()->user_ns, CAP_SYS_ADMIN))
return -EPERM;
if (*ppos != 0)
@@ -176,10 +193,118 @@ static ssize_t chromiumos_inode_file_write(
return ret < 0 ? ret : len;
}
+/*
+ * In the case the input buffer contains one or more invalid UIDS, the kuid_t
+ * variables pointed to by 'parent' and 'child' will get updated but this
+ * function will return an error.
+ */
+static int chromiumos_parse_process_management_policy(const char __user *buf,
+ size_t len,
+ kuid_t *parent,
+ kuid_t *child)
+{
+ char *kern_buf;
+ char *parent_buf;
+ char *child_buf;
+ const char separator[] = ":";
+ int ret;
+ size_t first_substring_length;
+ long parsed_parent;
+ long parsed_child;
+
+ /* Duplicate string from user memory and NULL-terminate */
+ kern_buf = memdup_user_nul(buf, len);
+ if (IS_ERR(kern_buf))
+ return PTR_ERR(kern_buf);
+
+ /*
+ * Format of |buf| string should be <UID>:<UID>.
+ * Find location of ":" in kern_buf (copied from |buf|).
+ */
+ first_substring_length = strcspn(kern_buf, separator);
+ if (first_substring_length == 0 || first_substring_length == len) {
+ ret = -EINVAL;
+ goto free_kern;
+ }
+
+ parent_buf = kmemdup_nul(kern_buf, first_substring_length, GFP_KERNEL);
+ if (!parent_buf) {
+ ret = -ENOMEM;
+ goto free_kern;
+ }
+
+ ret = kstrtol(parent_buf, 0, &parsed_parent);
+ if (ret)
+ goto free_both;
+
+ child_buf = kern_buf + first_substring_length + 1;
+ ret = kstrtol(child_buf, 0, &parsed_child);
+ if (ret)
+ goto free_both;
+
+ *parent = make_kuid(current_user_ns(), parsed_parent);
+ if (!uid_valid(*parent)) {
+ ret = -EINVAL;
+ goto free_both;
+ }
+
+ *child = make_kuid(current_user_ns(), parsed_child);
+ if (!uid_valid(*child)) {
+ ret = -EINVAL;
+ goto free_both;
+ }
+
+free_both:
+ kfree(parent_buf);
+free_kern:
+ kfree(kern_buf);
+ return ret;
+}
+
+static ssize_t chromiumos_process_management_file_write(struct file *file,
+ const char __user *buf,
+ size_t len,
+ loff_t *ppos)
+{
+ struct chromiumos_process_management_file_entry *file_entry =
+ file->f_inode->i_private;
+ kuid_t parent;
+ kuid_t child;
+ int ret;
+
+ if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (file_entry->type == CHROMIUMOS_PROCESS_MANAGEMENT_FLUSH) {
+ chromiumos_flush_process_management_entries();
+ return len;
+ }
+
+ /* file_entry->type must equal CHROMIUMOS_PROCESS_MANAGEMENT_ADD */
+ ret = chromiumos_parse_process_management_policy(buf, len, &parent,
+ &child);
+ if (ret)
+ return ret;
+
+ ret = chromiumos_add_process_management_entry(parent, child);
+ if (ret)
+ return ret;
+
+ /* Return len on success so caller won't keep trying to write */
+ return len;
+}
+
static const struct file_operations chromiumos_inode_policy_file_fops = {
.write = chromiumos_inode_file_write,
};
+static const struct file_operations chromiumos_process_management_file_fops = {
+ .write = chromiumos_process_management_file_write,
+};
+
static void chromiumos_shutdown_securityfs(void)
{
int i;
@@ -191,9 +316,19 @@ static void chromiumos_shutdown_securityfs(void)
entry->dentry = NULL;
}
+ for (i = 0; i < ARRAY_SIZE(chromiumos_process_management_files); ++i) {
+ struct chromiumos_process_management_file_entry *entry =
+ &chromiumos_process_management_files[i];
+ securityfs_remove(entry->dentry);
+ entry->dentry = NULL;
+ }
+
securityfs_remove(chromiumos_inode_policy_dir);
chromiumos_inode_policy_dir = NULL;
+ securityfs_remove(chromiumos_process_management_policy_dir);
+ chromiumos_process_management_policy_dir = NULL;
+
securityfs_remove(chromiumos_dir);
chromiumos_dir = NULL;
}
@@ -230,6 +365,29 @@ static int chromiumos_init_securityfs(void)
}
}
+ chromiumos_process_management_policy_dir =
+ securityfs_create_dir(
+ "process_management_policies",
+ chromiumos_dir);
+ if (!chromiumos_process_management_policy_dir) {
+ ret = PTR_ERR(chromiumos_process_management_policy_dir);
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(chromiumos_process_management_files); ++i) {
+ struct chromiumos_process_management_file_entry *entry =
+ &chromiumos_process_management_files[i];
+ entry->dentry = securityfs_create_file(
+ entry->name,
+ 0200,
+ chromiumos_process_management_policy_dir,
+ entry, &chromiumos_process_management_file_fops);
+ if (IS_ERR(entry->dentry)) {
+ ret = PTR_ERR(entry->dentry);
+ goto error;
+ }
+ }
+
return 0;
error:
diff --git a/security/chromiumos/third_party_whitelists.h b/security/chromiumos/third_party_whitelists.h
new file mode 100644
index 00000000000000..68df2c284449c4
--- /dev/null
+++ b/security/chromiumos/third_party_whitelists.h
@@ -0,0 +1,261 @@
+/*
+ * Linux Security Module for Chromium OS
+ *
+ * Copyright 2018 Google LLC. All Rights Reserved
+ *
+ * Authors:
+ * Micah Morton <mortonm@chromium.org>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef THIRD_PARTY_WHITELISTS_H
+#define THIRD_PARTY_WHITELISTS_H
+
+/*
+ * NOTE: the purpose of this header is only to pull out the definition of this
+ * array from alt-syscall.c for the purposes of readability. It should not be
+ * included in other .c files.
+ */
+
+#include "alt-syscall.h"
+
+static struct syscall_whitelist_entry third_party_whitelist[] = {
+ SYSCALL_ENTRY(brk),
+ SYSCALL_ENTRY(chdir),
+ SYSCALL_ENTRY(clock_gettime),
+ SYSCALL_ENTRY(clone),
+ SYSCALL_ENTRY(close),
+ SYSCALL_ENTRY(dup),
+ SYSCALL_ENTRY(execve),
+ SYSCALL_ENTRY(exit),
+ SYSCALL_ENTRY(exit_group),
+ SYSCALL_ENTRY(fcntl),
+ SYSCALL_ENTRY(fstat),
+ SYSCALL_ENTRY(futex),
+ SYSCALL_ENTRY(getcwd),
+ SYSCALL_ENTRY(getdents64),
+ SYSCALL_ENTRY(getpid),
+ SYSCALL_ENTRY(getpgid),
+ SYSCALL_ENTRY(getppid),
+ SYSCALL_ENTRY(getpriority),
+ SYSCALL_ENTRY(getrlimit),
+ SYSCALL_ENTRY(getsid),
+ SYSCALL_ENTRY(gettimeofday),
+ SYSCALL_ENTRY(ioctl),
+ SYSCALL_ENTRY(lseek),
+ SYSCALL_ENTRY(madvise),
+ SYSCALL_ENTRY(memfd_create),
+ SYSCALL_ENTRY(mprotect),
+ SYSCALL_ENTRY(munmap),
+ SYSCALL_ENTRY(nanosleep),
+ SYSCALL_ENTRY(openat),
+ SYSCALL_ENTRY(prlimit64),
+ SYSCALL_ENTRY(read),
+ SYSCALL_ENTRY(rt_sigaction),
+ SYSCALL_ENTRY(rt_sigprocmask),
+ SYSCALL_ENTRY(rt_sigreturn),
+ SYSCALL_ENTRY(sendfile),
+ SYSCALL_ENTRY(set_robust_list),
+ SYSCALL_ENTRY(set_tid_address),
+ SYSCALL_ENTRY(setpgid),
+ SYSCALL_ENTRY(setpriority),
+ SYSCALL_ENTRY(setsid),
+ SYSCALL_ENTRY(syslog),
+ SYSCALL_ENTRY(statfs),
+ SYSCALL_ENTRY(umask),
+ SYSCALL_ENTRY(uname),
+ SYSCALL_ENTRY(wait4),
+ SYSCALL_ENTRY(write),
+ SYSCALL_ENTRY(writev),
+
+ /*
+ * Deprecated syscalls which are not wired up on new architectures
+ * such as ARM64.
+ */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(access),
+ SYSCALL_ENTRY(creat),
+ SYSCALL_ENTRY(dup2),
+ SYSCALL_ENTRY(getdents),
+ SYSCALL_ENTRY(getpgrp),
+ SYSCALL_ENTRY(lstat),
+ SYSCALL_ENTRY(mkdir),
+ SYSCALL_ENTRY(open),
+ SYSCALL_ENTRY(pipe),
+ SYSCALL_ENTRY(poll),
+ SYSCALL_ENTRY(readlink),
+ SYSCALL_ENTRY(stat),
+ SYSCALL_ENTRY(unlink),
+#endif
+
+ /* 32-bit only syscalls. */
+#if defined(CONFIG_ARM) || defined(CONFIG_X86_32)
+ SYSCALL_ENTRY(fcntl64),
+ SYSCALL_ENTRY(fstat64),
+ SYSCALL_ENTRY(geteuid32),
+ SYSCALL_ENTRY(getuid32),
+ SYSCALL_ENTRY(_llseek),
+ SYSCALL_ENTRY(lstat64),
+ SYSCALL_ENTRY(_newselect),
+ SYSCALL_ENTRY(mmap2),
+ SYSCALL_ENTRY(stat64),
+ SYSCALL_ENTRY(ugetrlimit),
+#endif
+
+
+ /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
+#ifdef CONFIG_X86_32
+ SYSCALL_ENTRY(socketcall),
+#else
+ SYSCALL_ENTRY(accept),
+ SYSCALL_ENTRY(bind),
+ SYSCALL_ENTRY(connect),
+ SYSCALL_ENTRY(listen),
+ SYSCALL_ENTRY(recvfrom),
+ SYSCALL_ENTRY(recvmsg),
+ SYSCALL_ENTRY(sendmsg),
+ SYSCALL_ENTRY(sendto),
+ SYSCALL_ENTRY(setsockopt),
+ SYSCALL_ENTRY(socket),
+ SYSCALL_ENTRY(socketpair),
+#endif
+
+ /* 64-bit only syscalls. */
+#if defined(CONFIG_X86_64) || defined(CONFIG_ARM64)
+ SYSCALL_ENTRY(getegid),
+ SYSCALL_ENTRY(geteuid),
+ SYSCALL_ENTRY(getgid),
+ SYSCALL_ENTRY(getuid),
+ SYSCALL_ENTRY(mmap),
+ SYSCALL_ENTRY(setgid),
+ SYSCALL_ENTRY(setuid),
+ /*
+ * chown(2), lchown(2), and select(2) are deprecated and not wired up
+ * on ARM64.
+ */
+#ifndef CONFIG_ARM64
+ SYSCALL_ENTRY(select),
+#endif
+#endif
+
+ /* X86_64-specific syscalls. */
+#ifdef CONFIG_X86_64
+ SYSCALL_ENTRY(arch_prctl),
+#endif
+}; /* end third_party_whitelist */
+
+#ifdef CONFIG_COMPAT
+static struct syscall_whitelist_entry third_party_compat_whitelist[] = {
+ COMPAT_SYSCALL_ENTRY(access),
+ COMPAT_SYSCALL_ENTRY(brk),
+ COMPAT_SYSCALL_ENTRY(chdir),
+ COMPAT_SYSCALL_ENTRY(clock_gettime),
+ COMPAT_SYSCALL_ENTRY(clone),
+ COMPAT_SYSCALL_ENTRY(close),
+ COMPAT_SYSCALL_ENTRY(creat),
+ COMPAT_SYSCALL_ENTRY(dup),
+ COMPAT_SYSCALL_ENTRY(dup2),
+ COMPAT_SYSCALL_ENTRY(execve),
+ COMPAT_SYSCALL_ENTRY(exit),
+ COMPAT_SYSCALL_ENTRY(exit_group),
+ COMPAT_SYSCALL_ENTRY(fcntl),
+ COMPAT_SYSCALL_ENTRY(fcntl64),
+ COMPAT_SYSCALL_ENTRY(fstat),
+ COMPAT_SYSCALL_ENTRY(fstat64),
+ COMPAT_SYSCALL_ENTRY(futex),
+ COMPAT_SYSCALL_ENTRY(getcwd),
+ COMPAT_SYSCALL_ENTRY(getdents),
+ COMPAT_SYSCALL_ENTRY(getdents64),
+ COMPAT_SYSCALL_ENTRY(getegid),
+ COMPAT_SYSCALL_ENTRY(geteuid),
+ COMPAT_SYSCALL_ENTRY(geteuid32),
+ COMPAT_SYSCALL_ENTRY(getgid),
+ COMPAT_SYSCALL_ENTRY(getpgid),
+ COMPAT_SYSCALL_ENTRY(getpgrp),
+ COMPAT_SYSCALL_ENTRY(getpid),
+ COMPAT_SYSCALL_ENTRY(getpriority),
+ COMPAT_SYSCALL_ENTRY(getppid),
+ COMPAT_SYSCALL_ENTRY(getsid),
+ COMPAT_SYSCALL_ENTRY(gettimeofday),
+ COMPAT_SYSCALL_ENTRY(getuid),
+ COMPAT_SYSCALL_ENTRY(getuid32),
+ COMPAT_SYSCALL_ENTRY(ioctl),
+ COMPAT_SYSCALL_ENTRY(_llseek),
+ COMPAT_SYSCALL_ENTRY(lseek),
+ COMPAT_SYSCALL_ENTRY(lstat),
+ COMPAT_SYSCALL_ENTRY(lstat64),
+ COMPAT_SYSCALL_ENTRY(madvise),
+ COMPAT_SYSCALL_ENTRY(memfd_create),
+ COMPAT_SYSCALL_ENTRY(mkdir),
+ COMPAT_SYSCALL_ENTRY(mmap2),
+ COMPAT_SYSCALL_ENTRY(mprotect),
+ COMPAT_SYSCALL_ENTRY(munmap),
+ COMPAT_SYSCALL_ENTRY(nanosleep),
+ COMPAT_SYSCALL_ENTRY(_newselect),
+ COMPAT_SYSCALL_ENTRY(open),
+ COMPAT_SYSCALL_ENTRY(openat),
+ COMPAT_SYSCALL_ENTRY(pipe),
+ COMPAT_SYSCALL_ENTRY(poll),
+ COMPAT_SYSCALL_ENTRY(prlimit64),
+ COMPAT_SYSCALL_ENTRY(read),
+ COMPAT_SYSCALL_ENTRY(readlink),
+ COMPAT_SYSCALL_ENTRY(rt_sigaction),
+ COMPAT_SYSCALL_ENTRY(rt_sigprocmask),
+ COMPAT_SYSCALL_ENTRY(rt_sigreturn),
+ COMPAT_SYSCALL_ENTRY(sendfile),
+ COMPAT_SYSCALL_ENTRY(set_robust_list),
+ COMPAT_SYSCALL_ENTRY(set_tid_address),
+ COMPAT_SYSCALL_ENTRY(setgid32),
+ COMPAT_SYSCALL_ENTRY(setuid32),
+ COMPAT_SYSCALL_ENTRY(setpgid),
+ COMPAT_SYSCALL_ENTRY(setpriority),
+ COMPAT_SYSCALL_ENTRY(setsid),
+ COMPAT_SYSCALL_ENTRY(stat),
+ COMPAT_SYSCALL_ENTRY(stat64),
+ COMPAT_SYSCALL_ENTRY(statfs),
+ COMPAT_SYSCALL_ENTRY(syslog),
+ COMPAT_SYSCALL_ENTRY(ugetrlimit),
+ COMPAT_SYSCALL_ENTRY(umask),
+ COMPAT_SYSCALL_ENTRY(uname),
+ COMPAT_SYSCALL_ENTRY(unlink),
+ COMPAT_SYSCALL_ENTRY(wait4),
+ COMPAT_SYSCALL_ENTRY(write),
+ COMPAT_SYSCALL_ENTRY(writev),
+
+ /* IA32 uses the common socketcall(2) entrypoint for socket calls. */
+#ifdef CONFIG_X86
+ COMPAT_SYSCALL_ENTRY(socketcall),
+#else
+ COMPAT_SYSCALL_ENTRY(accept),
+ COMPAT_SYSCALL_ENTRY(bind),
+ COMPAT_SYSCALL_ENTRY(connect),
+ COMPAT_SYSCALL_ENTRY(listen),
+ COMPAT_SYSCALL_ENTRY(recvfrom),
+ COMPAT_SYSCALL_ENTRY(recvmsg),
+ COMPAT_SYSCALL_ENTRY(sendmsg),
+ COMPAT_SYSCALL_ENTRY(sendto),
+ COMPAT_SYSCALL_ENTRY(setsockopt),
+ COMPAT_SYSCALL_ENTRY(socket),
+ COMPAT_SYSCALL_ENTRY(socketpair),
+#endif
+
+ /*
+ * getrlimit(2) is deprecated and not wired in the ARM compat table
+ * on ARM64.
+ */
+#ifndef CONFIG_ARM64
+ COMPAT_SYSCALL_ENTRY(getrlimit),
+#endif
+
+}; /* end third_party_compat_whitelist */
+#endif /* CONFIG_COMPAT */
+
+#endif /* THIRD_PARTY_WHITELISTS_H */
diff --git a/security/chromiumos/utils.c b/security/chromiumos/utils.c
index c3b935eea6e71a..45c0e186493815 100644
--- a/security/chromiumos/utils.c
+++ b/security/chromiumos/utils.c
@@ -142,8 +142,7 @@ char *printable_cmdline(struct task_struct *task)
}
/* Make sure the buffer is always NULL-terminated. */
- len = max_t(int, PAGE_SIZE-1, res);
- buffer[len] = 0;
+ buffer[PAGE_SIZE-1] = 0;
/* Make sure result is printable. */
sanitized = printable(buffer, res);
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 8da7c91b725d3c..c36b98b07d6be4 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -383,14 +383,10 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
result = ima_protect_xattr(dentry, xattr_name, xattr_value,
xattr_value_len);
if (result == 1) {
- bool digsig;
-
if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
return -EINVAL;
- digsig = (xvalue->type == EVM_IMA_XATTR_DIGSIG);
- if (!digsig && (ima_appraise & IMA_APPRAISE_ENFORCE))
- return -EPERM;
- ima_reset_appraise_flags(d_backing_inode(dentry), digsig);
+ ima_reset_appraise_flags(d_backing_inode(dentry),
+ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
result = 0;
}
return result;
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 816d175da79aa9..30aced99bc55c7 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -26,14 +26,14 @@
#include "ima.h"
static int valid_policy = 1;
-#define TMPBUFLEN 12
+
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
loff_t *ppos, atomic_long_t *val)
{
- char tmpbuf[TMPBUFLEN];
+ char tmpbuf[32]; /* greater than largest 'long' string value */
ssize_t len;
- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
diff --git a/security/keys/key.c b/security/keys/key.c
index 4d971bf88ac337..03160f1f1aa279 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -260,8 +260,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
- if (user->qnkeys + 1 >= maxkeys ||
- user->qnbytes + quotalen >= maxbytes ||
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d5264f950ce11c..737e60b3d4bd5e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -628,9 +628,6 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
- if (ctx->index_key.description)
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -888,6 +885,7 @@ key_ref_t keyring_search(key_ref_t keyring,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 0361286824638d..f2c7e090a66d7e 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -186,9 +186,8 @@ static int proc_keys_show(struct seq_file *m, void *v)
int rc;
struct keyring_search_context ctx = {
- .index_key.type = key->type,
- .index_key.description = key->description,
- .cred = current_cred(),
+ .index_key = key->index_key,
+ .cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
@@ -208,11 +207,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
}
}
- /* check whether the current task is allowed to view the key (assuming
- * non-possession)
- * - the caller holds a spinlock, and thus the RCU read lock, making our
- * access to __current_cred() safe
- */
+ /* check whether the current task is allowed to view the key */
rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
if (rc < 0)
return 0;
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index ac1d5b2b1626c0..a7095372701e44 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -808,15 +808,14 @@ long join_session_keyring(const char *name)
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
- key_put(keyring);
ret = 0;
- goto error2;
+ goto error3;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
- goto error2;
+ goto error3;
commit_creds(new);
mutex_unlock(&key_session_mutex);
@@ -826,6 +825,8 @@ long join_session_keyring(const char *name)
okay:
return ret;
+error3:
+ key_put(keyring);
error2:
mutex_unlock(&key_session_mutex);
error:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 3ae3acf473c8f6..88172c163953f4 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -544,6 +544,7 @@ struct key *request_key_and_link(struct key_type *type,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 217775fcd0f3ee..8882b729924dd4 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -254,7 +254,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
struct key *authkey;
key_ref_t authkey_ref;
- sprintf(description, "%x", target_id);
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index cccbf3068cdca8..331fd3bd0f39bd 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -308,6 +308,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
if (a->u.net->sk) {
struct sock *sk = a->u.net->sk;
struct unix_sock *u;
+ struct unix_address *addr;
int len = 0;
char *p = NULL;
@@ -338,14 +339,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
#endif
case AF_UNIX:
u = unix_sk(sk);
+ addr = smp_load_acquire(&u->addr);
+ if (!addr)
+ break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
- if (!u->addr)
- break;
- len = u->addr->len-sizeof(short);
- p = &u->addr->name->sun_path[0];
+ len = addr->len-sizeof(short);
+ p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
diff --git a/security/security.c b/security/security.c
index ae05ab153c5a2c..42c4cb0cb122e4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -862,6 +862,13 @@ int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
void security_cred_free(struct cred *cred)
{
+ /*
+ * There is a failure case in prepare_creds() that
+ * may result in a call here with ->security being NULL.
+ */
+ if (unlikely(cred->security == NULL))
+ return;
+
call_void_hook(cred_free, cred);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index b9fbdf808317cc..7d08976f1523a5 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -348,27 +348,26 @@ static struct avc_xperms_decision_node
struct avc_xperms_decision_node *xpd_node;
struct extended_perms_decision *xpd;
- xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ xpd_node = kmem_cache_zalloc(avc_xperms_decision_cachep, GFP_NOWAIT);
if (!xpd_node)
return NULL;
xpd = &xpd_node->xpd;
if (which & XPERMS_ALLOWED) {
xpd->allowed = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->allowed)
goto error;
}
if (which & XPERMS_AUDITALLOW) {
xpd->auditallow = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->auditallow)
goto error;
}
if (which & XPERMS_DONTAUDIT) {
xpd->dontaudit = kmem_cache_zalloc(avc_xperms_data_cachep,
- GFP_ATOMIC | __GFP_NOMEMALLOC);
+ GFP_NOWAIT);
if (!xpd->dontaudit)
goto error;
}
@@ -396,8 +395,7 @@ static struct avc_xperms_node *avc_xperms_alloc(void)
{
struct avc_xperms_node *xp_node;
- xp_node = kmem_cache_zalloc(avc_xperms_cachep,
- GFP_ATOMIC|__GFP_NOMEMALLOC);
+ xp_node = kmem_cache_zalloc(avc_xperms_cachep, GFP_NOWAIT);
if (!xp_node)
return xp_node;
INIT_LIST_HEAD(&xp_node->xpd_head);
@@ -550,7 +548,7 @@ static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_NOWAIT);
if (!node)
goto out;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 992a3153082587..965a55eacaba6e 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -726,7 +726,8 @@ static int sens_destroy(void *key, void *datum, void *p)
kfree(key);
if (datum) {
levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
+ if (levdatum->level)
+ ebitmap_destroy(&levdatum->level->cat);
kfree(levdatum->level);
}
kfree(datum);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 23dca68ffe25e8..0a258c0602d13d 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1441,7 +1441,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
- context.len = scontext_len;
+ context.len = strlen(str) + 1;
str = NULL;
} else if (rc)
goto out_unlock;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 735a1a9386d64d..9db7c80a74aa56 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2260,6 +2260,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
struct smack_known *skp = smk_of_task_struct(p);
isp->smk_inode = skp;
+ isp->smk_flags |= SMK_INODE_INSTANT;
}
/*
@@ -4310,6 +4311,12 @@ static int smack_key_permission(key_ref_t key_ref,
int request = 0;
int rc;
+ /*
+ * Validate requested permissions
+ */
+ if (perm & ~KEY_NEED_ALL)
+ return -EINVAL;
+
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
return -EINVAL;
@@ -4329,10 +4336,10 @@ static int smack_key_permission(key_ref_t key_ref,
ad.a.u.key_struct.key = keyp->serial;
ad.a.u.key_struct.key_desc = keyp->description;
#endif
- if (perm & KEY_NEED_READ)
- request = MAY_READ;
+ if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW))
+ request |= MAY_READ;
if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
- request = MAY_WRITE;
+ request |= MAY_WRITE;
rc = smk_access(tkp, keyp->security, request, &ad);
rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
return rc;
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 38651454ed08a5..6f388e77999c3c 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -874,7 +874,8 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
}
/* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
#ifdef CONFIG_MMU
- if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
+ if (get_user_pages(current, bprm->mm, pos, 1,
+ FOLL_FORCE, &page, NULL) <= 0)
return false;
#else
page = bprm->page[pos / PAGE_SIZE];
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index cb6ed10816d49a..0a8808954bd851 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -288,7 +288,9 @@ static int yama_ptrace_access_check(struct task_struct *child,
break;
case YAMA_SCOPE_RELATIONAL:
rcu_read_lock();
- if (!task_is_descendant(current, child) &&
+ if (!pid_alive(child))
+ rc = -EPERM;
+ if (!rc && !task_is_descendant(current, child) &&
!ptracer_exception_found(current, child) &&
!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
rc = -EPERM;
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index f34153962d07f1..585b594bd8381a 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -88,8 +88,10 @@ static struct device_node *get_gpio(char *name,
}
reg = of_get_property(np, "reg", NULL);
- if (!reg)
+ if (!reg) {
+ of_node_put(np);
return NULL;
+ }
*gpioptr = *reg;
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 84b5305aea0659..73be756982a747 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -38,6 +38,7 @@
#include <linux/uio.h>
#include <linux/uaccess.h>
#include <linux/module.h>
+#include <linux/compat.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/info.h>
@@ -501,7 +502,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size)
+ params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments == 0)
return -EINVAL;
/* now codec parameters */
@@ -859,6 +861,15 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return retval;
}
+/* support of 32bit userspace on 64bit platforms */
+#ifdef CONFIG_COMPAT
+static long snd_compr_ioctl_compat(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return snd_compr_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
static const struct file_operations snd_compr_file_ops = {
.owner = THIS_MODULE,
.open = snd_compr_open,
@@ -866,6 +877,9 @@ static const struct file_operations snd_compr_file_ops = {
.write = snd_compr_write,
.read = snd_compr_read,
.unlocked_ioctl = snd_compr_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = snd_compr_ioctl_compat,
+#endif
.mmap = snd_compr_mmap,
.poll = snd_compr_poll,
};
diff --git a/sound/core/control.c b/sound/core/control.c
index bd01d492f46aa6..2be860a446a2fc 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -346,6 +346,40 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
return 0;
}
+/* add a new kcontrol object; call with card->controls_rwsem locked */
+static int __snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
+{
+ struct snd_ctl_elem_id id;
+ unsigned int idx;
+ unsigned int count;
+
+ id = kcontrol->id;
+ if (id.index > UINT_MAX - kcontrol->count)
+ return -EINVAL;
+
+ if (snd_ctl_find_id(card, &id)) {
+ dev_err(card->dev,
+ "control %i:%i:%i:%s:%i is already present\n",
+ id.iface, id.device, id.subdevice, id.name, id.index);
+ return -EBUSY;
+ }
+
+ if (snd_ctl_find_hole(card, kcontrol->count) < 0)
+ return -ENOMEM;
+
+ list_add_tail(&kcontrol->list, &card->controls);
+ card->controls_count += kcontrol->count;
+ kcontrol->id.numid = card->last_numid + 1;
+ card->last_numid += kcontrol->count;
+
+ id = kcontrol->id;
+ count = kcontrol->count;
+ for (idx = 0; idx < count; idx++, id.index++, id.numid++)
+ snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+
+ return 0;
+}
+
/**
* snd_ctl_add - add the control instance to the card
* @card: the card instance
@@ -362,45 +396,18 @@ static int snd_ctl_find_hole(struct snd_card *card, unsigned int count)
*/
int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
{
- struct snd_ctl_elem_id id;
- unsigned int idx;
- unsigned int count;
int err = -EINVAL;
if (! kcontrol)
return err;
if (snd_BUG_ON(!card || !kcontrol->info))
goto error;
- id = kcontrol->id;
- if (id.index > UINT_MAX - kcontrol->count)
- goto error;
down_write(&card->controls_rwsem);
- if (snd_ctl_find_id(card, &id)) {
- up_write(&card->controls_rwsem);
- dev_err(card->dev, "control %i:%i:%i:%s:%i is already present\n",
- id.iface,
- id.device,
- id.subdevice,
- id.name,
- id.index);
- err = -EBUSY;
- goto error;
- }
- if (snd_ctl_find_hole(card, kcontrol->count) < 0) {
- up_write(&card->controls_rwsem);
- err = -ENOMEM;
- goto error;
- }
- list_add_tail(&kcontrol->list, &card->controls);
- card->controls_count += kcontrol->count;
- kcontrol->id.numid = card->last_numid + 1;
- card->last_numid += kcontrol->count;
- id = kcontrol->id;
- count = kcontrol->count;
+ err = __snd_ctl_add(card, kcontrol);
up_write(&card->controls_rwsem);
- for (idx = 0; idx < count; idx++, id.index++, id.numid++)
- snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
+ if (err < 0)
+ goto error;
return 0;
error:
@@ -1322,9 +1329,12 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
kctl->tlv.c = snd_ctl_elem_user_tlv;
/* This function manage to free the instance on failure. */
- err = snd_ctl_add(card, kctl);
- if (err < 0)
- return err;
+ down_write(&card->controls_rwsem);
+ err = __snd_ctl_add(card, kctl);
+ if (err < 0) {
+ snd_ctl_free_one(kctl);
+ goto unlock;
+ }
offset = snd_ctl_get_ioff(kctl, &info->id);
snd_ctl_build_ioff(&info->id, kctl, offset);
/*
@@ -1335,10 +1345,10 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
* which locks the element.
*/
- down_write(&card->controls_rwsem);
card->user_ctl_count++;
- up_write(&card->controls_rwsem);
+ unlock:
+ up_write(&card->controls_rwsem);
return 0;
}
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index f05cb6a8cbe02e..78ffe445d7757c 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -239,16 +239,12 @@ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
- size_t aligned_size;
if (err != -ENOMEM)
return err;
if (size <= PAGE_SIZE)
return -ENOMEM;
- aligned_size = PAGE_SIZE << get_order(size);
- if (size != aligned_size)
- size = aligned_size;
- else
- size >>= 1;
+ size >>= 1;
+ size = PAGE_SIZE << get_order(size);
}
if (! dmab->area)
return -ENOMEM;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 07feb35f19350a..443bb8ce82559f 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -950,6 +950,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
params_channels(params) / 8;
+ err = snd_pcm_oss_period_size(substream, params, sparams);
+ if (err < 0)
+ goto failure;
+
+ n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+ if (err < 0)
+ goto failure;
+
+ err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+ runtime->oss.periods, NULL);
+ if (err < 0)
+ goto failure;
+
+ snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+ err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+ if (err < 0) {
+ pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+ goto failure;
+ }
+
#ifdef CONFIG_SND_PCM_OSS_PLUGINS
snd_pcm_oss_plugin_clear(substream);
if (!direct) {
@@ -984,27 +1006,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
}
#endif
- err = snd_pcm_oss_period_size(substream, params, sparams);
- if (err < 0)
- goto failure;
-
- n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
- if (err < 0)
- goto failure;
-
- err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
- runtime->oss.periods, NULL);
- if (err < 0)
- goto failure;
-
- snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
- if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
- pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
- goto failure;
- }
-
if (runtime->oss.trigger) {
sw_params->start_threshold = 1;
} else {
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 6bda8f6c5f8410..cdff5f97648086 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
@@ -125,6 +126,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
return -EFAULT;
if (stream < 0 || stream > 1)
return -EINVAL;
+ stream = array_index_nospec(stream, 2);
if (get_user(subdevice, &info->subdevice))
return -EFAULT;
mutex_lock(&register_mutex);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 4c145d6bccd4dd..3ce2b877176239 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -648,27 +648,33 @@ EXPORT_SYMBOL(snd_interval_refine);
static int snd_interval_refine_first(struct snd_interval *i)
{
+ const unsigned int last_max = i->max;
+
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->max = i->min;
- i->openmax = i->openmin;
- if (i->openmax)
+ if (i->openmin)
i->max++;
+ /* only exclude max value if also excluded before refine */
+ i->openmax = (i->openmax && i->max >= last_max);
return 1;
}
static int snd_interval_refine_last(struct snd_interval *i)
{
+ const unsigned int last_min = i->min;
+
if (snd_BUG_ON(snd_interval_empty(i)))
return -EINVAL;
if (snd_interval_single(i))
return 0;
i->min = i->max;
- i->openmin = i->openmax;
- if (i->openmin)
+ if (i->openmax)
i->min--;
+ /* only exclude min value if also excluded before refine */
+ i->openmin = (i->openmin && i->min <= last_min);
return 1;
}
@@ -1843,8 +1849,6 @@ int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg)
{
switch (cmd) {
- case SNDRV_PCM_IOCTL1_INFO:
- return 0;
case SNDRV_PCM_IOCTL1_RESET:
return snd_pcm_lib_ioctl_reset(substream, arg);
case SNDRV_PCM_IOCTL1_CHANNEL_INFO:
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 70fa5806351b34..1296865ebc95d0 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -35,6 +35,7 @@
#include <sound/timer.h>
#include <sound/minors.h>
#include <linux/uio.h>
+#include <linux/delay.h>
/*
* Compatibility
@@ -78,12 +79,12 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
* and this may lead to a deadlock when the code path takes read sem
* twice (e.g. one in snd_pcm_action_nonatomic() and another in
* snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
- * spin until it gets the lock.
+ * sleep until all the readers are completed without blocking by writer.
*/
-static inline void down_write_nonblock(struct rw_semaphore *lock)
+static inline void down_write_nonfifo(struct rw_semaphore *lock)
{
while (!down_write_trylock(lock))
- cond_resched();
+ msleep(1);
}
/**
@@ -213,11 +214,7 @@ int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info)
info->subdevices_avail = pstr->substream_count - pstr->substream_opened;
strlcpy(info->subname, substream->name, sizeof(info->subname));
runtime = substream->runtime;
- /* AB: FIXME!!! This is definitely nonsense */
- if (runtime) {
- info->sync = runtime->sync;
- substream->ops->ioctl(substream, SNDRV_PCM_IOCTL1_INFO, info);
- }
+
return 0;
}
@@ -1257,8 +1254,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+ switch (runtime->status->state) {
+ case SNDRV_PCM_STATE_SUSPENDED:
+ return -EBUSY;
+ /* unresumable PCM state; return -EBUSY for skipping suspend */
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_DISCONNECTED:
return -EBUSY;
+ }
runtime->trigger_master = substream;
return 0;
}
@@ -1825,7 +1829,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
res = -ENOMEM;
goto _nolock;
}
- down_write_nonblock(&snd_pcm_link_rwsem);
+ down_write_nonfifo(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1872,7 +1876,7 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
struct snd_pcm_substream *s;
int res = 0;
- down_write_nonblock(&snd_pcm_link_rwsem);
+ down_write_nonfifo(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
@@ -2224,7 +2228,8 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
static void pcm_release_private(struct snd_pcm_substream *substream)
{
- snd_pcm_unlink(substream);
+ if (snd_pcm_stream_linked(substream))
+ snd_pcm_unlink(substream);
}
void snd_pcm_release_substream(struct snd_pcm_substream *substream)
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 16f8124b11500c..c8b2309352d73e 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/nospec.h>
#include <sound/rawmidi.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -591,6 +592,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
return -ENXIO;
if (info->stream < 0 || info->stream > 1)
return -EINVAL;
+ info->stream = array_index_nospec(info->stream, 2);
pstr = &rmidi->streams[info->stream];
if (pstr->substream_count == 0)
return -ENOENT;
@@ -635,7 +637,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
- char *newbuf;
+ char *newbuf, *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (substream->append && substream->use_count > 1)
@@ -648,13 +650,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
- GFP_KERNEL);
+ newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
+ spin_lock_irq(&runtime->lock);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
+ spin_unlock_irq(&runtime->lock);
+ kfree(oldbuf);
}
runtime->avail_min = params->avail_min;
substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +671,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
- char *newbuf;
+ char *newbuf, *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
snd_rawmidi_drain_input(substream);
@@ -676,12 +682,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
- GFP_KERNEL);
+ newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
+ spin_lock_irq(&runtime->lock);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
+ spin_unlock_irq(&runtime->lock);
+ kfree(oldbuf);
}
runtime->avail_min = params->avail_min;
return 0;
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index ea545f9291b404..df5b984bb33f7a 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -617,13 +617,14 @@ int
snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
- if (dev < 0 || dev >= dp->max_synthdev)
+ if (!info)
return -ENXIO;
- if (dp->synths[dev].is_midi) {
+ if (info->is_midi) {
struct midi_info minf;
- snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+ snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
inf->synth_type = SYNTH_TYPE_MIDI;
inf->synth_subtype = 0;
inf->nr_voices = 16;
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index ef494ffc13697d..975a7c939d2fc9 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -163,6 +163,7 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
int count, res;
unsigned char buf[32], *pbuf;
unsigned long flags;
+ bool check_resched = !in_atomic();
if (up) {
vmidi->trigger = 1;
@@ -200,6 +201,15 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
}
+ if (!check_resched)
+ continue;
+ /* do temporary unlock & cond_resched() for avoiding
+ * CPU soft lockup, which may happen via a write from
+ * a huge rawmidi buffer
+ */
+ spin_unlock_irqrestore(&substream->runtime->lock, flags);
+ cond_resched();
+ spin_lock_irqsave(&substream->runtime->lock, flags);
}
out:
spin_unlock_irqrestore(&substream->runtime->lock, flags);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index ef850a99d64a81..f989adb98a229a 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -35,6 +35,9 @@
#include <sound/initval.h>
#include <linux/kmod.h>
+/* internal flags */
+#define SNDRV_TIMER_IFLG_PAUSED 0x00010000
+
#if IS_ENABLED(CONFIG_SND_HRTIMER)
#define DEFAULT_TIMER_LIMIT 4
#elif IS_ENABLED(CONFIG_SND_RTCTIMER)
@@ -547,6 +550,10 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
}
}
timeri->flags &= ~(SNDRV_TIMER_IFLG_RUNNING | SNDRV_TIMER_IFLG_START);
+ if (stop)
+ timeri->flags &= ~SNDRV_TIMER_IFLG_PAUSED;
+ else
+ timeri->flags |= SNDRV_TIMER_IFLG_PAUSED;
snd_timer_notify1(timeri, stop ? SNDRV_TIMER_EVENT_STOP :
SNDRV_TIMER_EVENT_PAUSE);
unlock:
@@ -608,6 +615,10 @@ int snd_timer_stop(struct snd_timer_instance *timeri)
*/
int snd_timer_continue(struct snd_timer_instance *timeri)
{
+ /* timer can continue only after pause */
+ if (!(timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+ return -EINVAL;
+
if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE)
return snd_timer_start_slave(timeri, false);
else
@@ -1837,6 +1848,9 @@ static int snd_timer_user_continue(struct file *file)
tu = file->private_data;
if (!tu->timeri)
return -EBADFD;
+ /* start timer instead of continue if it's not used before */
+ if (!(tu->timeri->flags & SNDRV_TIMER_IFLG_PAUSED))
+ return snd_timer_user_start(file);
tu->timeri->lost = 0;
return (err = snd_timer_continue(tu->timeri)) < 0 ? err : 0;
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 091290d1f3ea0a..3a03614585974f 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -382,7 +382,7 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Apogee Electronics, DA/AD/DD-16X (X-FireWire card) */
SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00010048, &spec_normal),
/* Apogee Electronics, Ensemble */
- SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x00001eee, &spec_normal),
+ SND_BEBOB_DEV_ENTRY(VEN_APOGEE, 0x01eeee, &spec_normal),
/* ESI, Quatafire610 */
SND_BEBOB_DEV_ENTRY(VEN_ESI, 0x00010064, &spec_normal),
/* AcousticReality, eARMasterOne */
@@ -422,7 +422,19 @@ static const struct ieee1394_device_id bebob_id_table[] = {
/* Focusrite, SaffirePro 26 I/O */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
/* Focusrite, SaffirePro 10 I/O */
- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+ {
+ // The combination of vendor_id and model_id is the same as the
+ // same as the one of Liquid Saffire 56.
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VEN_FOCUSRITE,
+ .model_id = 0x000006,
+ .specifier_id = 0x00a02d,
+ .version = 0x010001,
+ .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
+ },
/* Focusrite, Saffire(no label and LE) */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
&saffire_spec),
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 07e5abdbceb59c..0a576ccca3dc8d 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
@@ -114,20 +110,28 @@ int snd_bebob_maudio_load_firmware(struct fw_unit *unit)
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 9fee464e5d4977..fca40ee916c1b9 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@ static void azx_clear_corbrp(struct hdac_bus *bus)
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
@@ -394,13 +396,15 @@ bool snd_hdac_bus_init_chip(struct hdac_bus *bus, bool full_reset)
/* reset controller */
azx_reset(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index a31ea6c22d1919..2d7379dec1f03c 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -82,10 +82,10 @@
static void set_default_audio_parameters(struct snd_msnd *chip)
{
- chip->play_sample_size = DEFSAMPLESIZE;
+ chip->play_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
chip->play_sample_rate = DEFSAMPLERATE;
chip->play_channels = DEFCHANNELS;
- chip->capture_sample_size = DEFSAMPLESIZE;
+ chip->capture_sample_size = snd_pcm_format_width(DEFSAMPLESIZE);
chip->capture_sample_rate = DEFSAMPLERATE;
chip->capture_channels = DEFCHANNELS;
}
diff --git a/sound/isa/wavefront/wavefront_synth.c b/sound/isa/wavefront/wavefront_synth.c
index 69f76ff5693d4e..718d5e3b7806f0 100644
--- a/sound/isa/wavefront/wavefront_synth.c
+++ b/sound/isa/wavefront/wavefront_synth.c
@@ -785,6 +785,9 @@ wavefront_send_patch (snd_wavefront_t *dev, wavefront_patch_info *header)
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading patch %d\n",
header->number);
+ if (header->number >= ARRAY_SIZE(dev->patch_status))
+ return -EINVAL;
+
dev->patch_status[header->number] |= WF_SLOT_FILLED;
bptr = buf;
@@ -809,6 +812,9 @@ wavefront_send_program (snd_wavefront_t *dev, wavefront_patch_info *header)
DPRINT (WF_DEBUG_LOAD_PATCH, "downloading program %d\n",
header->number);
+ if (header->number >= ARRAY_SIZE(dev->prog_status))
+ return -EINVAL;
+
dev->prog_status[header->number] = WF_SLOT_USED;
/* XXX need to zero existing SLOT_USED bit for program_status[i]
@@ -898,6 +904,9 @@ wavefront_send_sample (snd_wavefront_t *dev,
header->number = x;
}
+ if (header->number >= WF_MAX_SAMPLE)
+ return -EINVAL;
+
if (header->size) {
/* XXX it's a debatable point whether or not RDONLY semantics
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 913b731d2236a5..f40330ddb9b2ce 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -1531,7 +1531,6 @@ static int snd_wss_playback_open(struct snd_pcm_substream *substream)
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma1);
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->playback_substream = substream;
@@ -1572,7 +1571,6 @@ static int snd_wss_capture_open(struct snd_pcm_substream *substream)
if (err < 0) {
if (chip->release_dma)
chip->release_dma(chip, chip->dma_private_data, chip->dma2);
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
return err;
}
chip->capture_substream = substream;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 82259ca61e6442..c4840fda44b408 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -824,7 +824,7 @@ static int snd_ac97_put_spsa(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_
{
struct snd_ac97 *ac97 = snd_kcontrol_chip(kcontrol);
int reg = kcontrol->private_value & 0xff;
- int shift = (kcontrol->private_value >> 8) & 0xff;
+ int shift = (kcontrol->private_value >> 8) & 0x0f;
int mask = (kcontrol->private_value >> 16) & 0xff;
// int invert = (kcontrol->private_value >> 24) & 0xff;
unsigned short value, old, new;
diff --git a/sound/pci/ca0106/ca0106.h b/sound/pci/ca0106/ca0106.h
index 04402c14cb2392..9847b669cf3cf0 100644
--- a/sound/pci/ca0106/ca0106.h
+++ b/sound/pci/ca0106/ca0106.h
@@ -582,7 +582,7 @@
#define SPI_PL_BIT_R_R (2<<7) /* right channel = right */
#define SPI_PL_BIT_R_C (3<<7) /* right channel = (L+R)/2 */
#define SPI_IZD_REG 2
-#define SPI_IZD_BIT (1<<4) /* infinite zero detect */
+#define SPI_IZD_BIT (0<<4) /* infinite zero detect */
#define SPI_FMT_REG 3
#define SPI_FMT_BIT_RJ (0<<0) /* right justified mode */
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index d2951ed4bf7194..1984291ebd0763 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -899,6 +899,9 @@ int cs46xx_dsp_proc_done (struct snd_cs46xx *chip)
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
int i;
+ if (!ins)
+ return 0;
+
snd_info_free_entry(ins->proc_sym_info_entry);
ins->proc_sym_info_entry = NULL;
diff --git a/sound/pci/cs5535audio/cs5535audio.h b/sound/pci/cs5535audio/cs5535audio.h
index 0579daa6221571..425d1b6640293d 100644
--- a/sound/pci/cs5535audio/cs5535audio.h
+++ b/sound/pci/cs5535audio/cs5535audio.h
@@ -66,9 +66,9 @@ struct cs5535audio_dma_ops {
};
struct cs5535audio_dma_desc {
- u32 addr;
- u16 size;
- u16 ctlreserved;
+ __le32 addr;
+ __le16 size;
+ __le16 ctlreserved;
};
struct cs5535audio_dma {
diff --git a/sound/pci/cs5535audio/cs5535audio_pcm.c b/sound/pci/cs5535audio/cs5535audio_pcm.c
index 9c2dc911d8d7f0..709f1c584d3eec 100644
--- a/sound/pci/cs5535audio/cs5535audio_pcm.c
+++ b/sound/pci/cs5535audio/cs5535audio_pcm.c
@@ -158,8 +158,8 @@ static int cs5535audio_build_dma_packets(struct cs5535audio *cs5535au,
lastdesc->addr = cpu_to_le32((u32) dma->desc_buf.addr);
lastdesc->size = 0;
lastdesc->ctlreserved = cpu_to_le16(PRD_JMP);
- jmpprd_addr = cpu_to_le32(lastdesc->addr +
- (sizeof(struct cs5535audio_dma_desc)*periods));
+ jmpprd_addr = (u32)dma->desc_buf.addr +
+ sizeof(struct cs5535audio_dma_desc) * periods;
dma->substream = substream;
dma->period_bytes = period_bytes;
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 56fc47bd6dbab3..5d422d65e62bda 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/tlv.h>
@@ -1000,6 +1001,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
if (ipcm->channels > 32)
return -EINVAL;
pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1046,6 +1049,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
pcm = &emu->fx8010.pcm[ipcm->substream];
mutex_lock(&emu->fx8010.lock);
spin_lock_irq(&emu->reg_lock);
@@ -2520,7 +2525,7 @@ static int snd_emu10k1_fx8010_ioctl(struct snd_hwdep * hw, struct file *file, un
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 14a305bd8a9857..72e442d86bb13c 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device)
if (!kctl)
return -ENOMEM;
kctl->id.device = device;
- snd_ctl_add(emu->card, kctl);
+ err = snd_ctl_add(emu->card, kctl);
+ if (err < 0)
+ return err;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
index 4f1f69be18651b..8c778fa3303173 100644
--- a/sound/pci/emu10k1/memory.c
+++ b/sound/pci/emu10k1/memory.c
@@ -237,13 +237,13 @@ __found_pages:
static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
{
if (addr & ~emu->dma_mask) {
- dev_err(emu->card->dev,
+ dev_err_ratelimited(emu->card->dev,
"max memory size is 0x%lx (addr = 0x%lx)!!\n",
emu->dma_mask, (unsigned long)addr);
return 0;
}
if (addr & (EMUPAGESIZE-1)) {
- dev_err(emu->card->dev, "page is not aligned\n");
+ dev_err_ratelimited(emu->card->dev, "page is not aligned\n");
return 0;
}
return 1;
@@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
else
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
if (! is_valid_page(emu, addr)) {
- dev_err(emu->card->dev,
+ dev_err_ratelimited(emu->card->dev,
"emu: failure page = %d\n", idx);
mutex_unlock(&hdr->block_mutex);
return NULL;
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 1fdd92b6f18f37..d6e89a6d0bb91f 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1050,11 +1050,19 @@ static int snd_fm801_mixer(struct fm801 *chip)
if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0)
return err;
}
- for (i = 0; i < FM801_CONTROLS; i++)
- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip));
+ for (i = 0; i < FM801_CONTROLS; i++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_fm801_controls[i], chip));
+ if (err < 0)
+ return err;
+ }
if (chip->multichannel) {
- for (i = 0; i < FM801_CONTROLS_MULTI; i++)
- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
+ for (i = 0; i < FM801_CONTROLS_MULTI; i++) {
+ err = snd_ctl_add(chip->card,
+ snd_ctl_new1(&snd_fm801_controls_multi[i], chip));
+ if (err < 0)
+ return err;
+ }
}
return 0;
}
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index 6efadbfb3fe351..7ea201c05e5da6 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -109,7 +109,8 @@ static int hda_codec_driver_probe(struct device *dev)
err = snd_hda_codec_build_controls(codec);
if (err < 0)
goto error_module;
- if (codec->card->registered) {
+ /* only register after the bus probe finished; otherwise it's racy */
+ if (!codec->bus->bus_probing && codec->card->registered) {
err = snd_card_register(codec->card);
if (err < 0)
goto error_module;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index a2d943430aa0f3..6e34ffbc2c3331 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2978,6 +2978,7 @@ static void hda_call_codec_resume(struct hda_codec *codec)
hda_jackpoll_work(&codec->jackpoll_work.work);
else
snd_hda_jack_report_sync(codec);
+ codec->core.dev.power.power_state = PMSG_ON;
atomic_dec(&codec->core.in_pm);
}
@@ -3010,10 +3011,62 @@ static int hda_codec_runtime_resume(struct device *dev)
}
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_SLEEP
+static int hda_codec_force_resume(struct device *dev)
+{
+ int ret;
+
+ /* The get/put pair below enforces the runtime resume even if the
+ * device hasn't been used at suspend time. This trick is needed to
+ * update the jack state change during the sleep.
+ */
+ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_force_resume(dev);
+ pm_runtime_put(dev);
+ return ret;
+}
+
+static int hda_codec_pm_suspend(struct device *dev)
+{
+ dev->power.power_state = PMSG_SUSPEND;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_resume(struct device *dev)
+{
+ dev->power.power_state = PMSG_RESUME;
+ return hda_codec_force_resume(dev);
+}
+
+static int hda_codec_pm_freeze(struct device *dev)
+{
+ dev->power.power_state = PMSG_FREEZE;
+ return pm_runtime_force_suspend(dev);
+}
+
+static int hda_codec_pm_thaw(struct device *dev)
+{
+ dev->power.power_state = PMSG_THAW;
+ return hda_codec_force_resume(dev);
+}
+
+static int hda_codec_pm_restore(struct device *dev)
+{
+ dev->power.power_state = PMSG_RESTORE;
+ return hda_codec_force_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
/* referred in hda_bind.c */
const struct dev_pm_ops hda_codec_driver_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = hda_codec_pm_suspend,
+ .resume = hda_codec_pm_resume,
+ .freeze = hda_codec_pm_freeze,
+ .thaw = hda_codec_pm_thaw,
+ .poweroff = hda_codec_pm_suspend,
+ .restore = hda_codec_pm_restore,
+#endif /* CONFIG_PM_SLEEP */
SET_RUNTIME_PM_OPS(hda_codec_runtime_suspend, hda_codec_runtime_resume,
NULL)
};
@@ -3993,7 +4046,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
list_for_each_codec(codec, bus) {
/* FIXME: maybe a better way needed for forced reset */
- cancel_delayed_work_sync(&codec->jackpoll_work);
+ if (current_work() != &codec->jackpoll_work.work)
+ cancel_delayed_work_sync(&codec->jackpoll_work);
#ifdef CONFIG_PM
if (hda_codec_is_power_on(codec)) {
hda_call_codec_suspend(codec);
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 776dffa88aee41..171e11be938d98 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
unsigned int response_reset:1; /* controller was reset */
unsigned int in_reset:1; /* during reset operation */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+ unsigned int bus_probing :1; /* during probing process */
int primary_dig_out_type; /* primary digital out PCM type */
unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 863da5276b8a51..979c92834de6be 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -547,8 +547,10 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
return err;
strlcpy(pcm->name, cpcm->name, sizeof(pcm->name));
apcm = kzalloc(sizeof(*apcm), GFP_KERNEL);
- if (apcm == NULL)
+ if (apcm == NULL) {
+ snd_device_free(chip->card, pcm);
return -ENOMEM;
+ }
apcm->chip = chip;
apcm->pcm = pcm;
apcm->codec = codec;
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index b69552c19032a1..97a6652c2cca66 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -151,6 +151,7 @@ struct azx {
unsigned int msi:1;
unsigned int probing:1; /* codec probing phase */
unsigned int snoop:1;
+ unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
unsigned int align_buffer_size:1;
unsigned int region_requested:1;
unsigned int disabled:1; /* disabled by vga_switcheroo */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index e84e38bd145426..b2e44de21dceb5 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -407,7 +407,7 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
#ifdef CONFIG_SND_DMA_SGBUF
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
struct snd_sg_buf *sgbuf = dmab->private_data;
- if (chip->driver_type == AZX_DRIVER_CMEDIA)
+ if (!chip->uc_buffer)
return; /* deal with only CORB/RIRB buffers */
if (on)
set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
@@ -1490,6 +1490,7 @@ static void azx_check_snoop_available(struct azx *chip)
dev_info(chip->card->dev, "Force to %s mode by module option\n",
snoop ? "snoop" : "non-snoop");
chip->snoop = snoop;
+ chip->uc_buffer = !snoop;
return;
}
@@ -1510,8 +1511,12 @@ static void azx_check_snoop_available(struct azx *chip)
snoop = false;
chip->snoop = snoop;
- if (!snoop)
+ if (!snoop) {
dev_info(chip->card->dev, "Force to non-snoop mode\n");
+ /* C-Media requires non-cached pages only for CORB/RIRB */
+ if (chip->driver_type != AZX_DRIVER_CMEDIA)
+ chip->uc_buffer = true;
+ }
}
static void azx_probe_work(struct work_struct *work)
@@ -1897,7 +1902,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
#ifdef CONFIG_X86
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
- if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
+ if (chip->uc_buffer)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
#endif
}
@@ -2008,7 +2013,7 @@ out_free:
*/
static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
- SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
+ SND_PCI_QUIRK(0x1849, 0xc892, "Asrock B85M-ITX", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
@@ -2034,6 +2039,7 @@ static int azx_probe_continue(struct azx *chip)
int val;
int err;
+ to_hda_bus(bus)->bus_probing = 1;
hda->probe_continued = 1;
/* Request display power well for the HDA controller or codec. For
@@ -2134,6 +2140,7 @@ i915_power_fail:
if (err < 0)
hda->init_failed = 1;
complete_all(&hda->probe_wait);
+ to_hda_bus(bus)->bus_probing = 0;
return err;
}
@@ -2294,9 +2301,14 @@ static const struct pci_device_id azx_ids[] = {
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Stoney */
+ { PCI_DEVICE(0x1022, 0x157a),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x1308),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 58c0aad3728421..d04e293b53f499 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -249,10 +249,12 @@ static int hda_tegra_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+ struct hdac_bus *bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_stop_chip(chip);
+ synchronize_irq(bus->irq);
azx_enter_link_reset(chip);
hda_tegra_disable_clocks(hda);
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index f540659e7ed10b..def6453d274b56 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -38,6 +38,10 @@
/* Enable this to see controls for tuning purpose. */
/*#define ENABLE_TUNING_CONTROLS*/
+#ifdef ENABLE_TUNING_CONTROLS
+#include <sound/tlv.h>
+#endif
+
#define FLOAT_ZERO 0x00000000
#define FLOAT_ONE 0x3f800000
#define FLOAT_TWO 0x40000000
@@ -3067,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
-static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0);
+static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0);
static int add_tuning_control(struct hda_codec *codec,
hda_nid_t pnid, hda_nid_t nid,
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 9fae1d2483185f..40dd4655645267 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -205,6 +205,7 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
struct conexant_spec *spec = codec->spec;
switch (codec->core.vendor_id) {
+ case 0x14f12008: /* CX8200 */
case 0x14f150f2: /* CX20722 */
case 0x14f150f4: /* CX20724 */
break;
@@ -212,13 +213,14 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
return;
}
- /* Turn the CX20722 codec into D3 to avoid spurious noises
+ /* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
snd_hda_codec_write(codec, codec->core.afg, 0,
AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
}
static void cx_auto_free(struct hda_codec *codec)
@@ -851,6 +853,10 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
@@ -863,6 +869,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
+ SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 580b8943b965c3..5d8ac2d798df39 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2447,6 +2447,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+ SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4791,6 +4792,13 @@ static void alc280_fixup_hp_9480m(struct hda_codec *codec,
}
}
+static void alc_fixup_disable_mic_vref(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ snd_hda_codec_set_pin_target(codec, 0x19, PIN_VREFHIZ);
+}
+
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
@@ -4890,6 +4898,7 @@ enum {
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DISABLE_MIC_VREF,
ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_FIXUP_DISABLE_DAC3,
ALC280_FIXUP_HP_HEADSET_MIC,
@@ -5545,6 +5554,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
},
+ [ALC225_FIXUP_DISABLE_MIC_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_mic_vref,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
[ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -5554,7 +5569,7 @@ static const struct hda_fixup alc269_fixups[] = {
{}
},
.chained = true,
- .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ .chain_id = ALC225_FIXUP_DISABLE_MIC_VREF
},
[ALC280_FIXUP_HP_HEADSET_MIC] = {
.type = HDA_FIXUP_FUNC,
@@ -5641,6 +5656,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 7c8941b8b2defd..dd6c9e6a1d53c4 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4065,15 +4066,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
- int mapped_channel;
+ unsigned int channel = info->channel;
- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+ if (snd_BUG_ON(channel >= hdsp->max_channels))
return -EINVAL;
+ channel = array_index_nospec(channel, hdsp->max_channels);
- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
+ if (hdsp->channel_map[channel] < 0)
return -EINVAL;
- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index cedf13b64803ac..2f18b1cdc2cd4d 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -123,7 +123,7 @@ static int snd_trident_probe(struct pci_dev *pci,
} else {
strcpy(card->shortname, "Trident ");
}
- strcat(card->shortname, card->driver);
+ strcat(card->shortname, str);
sprintf(card->longname, "%s PCI Audio at 0x%lx, irq %d",
card->shortname, trident->port, trident->irq);
diff --git a/sound/pci/vx222/vx222_ops.c b/sound/pci/vx222/vx222_ops.c
index 8e457ea27f8918..1997bb048d8b9a 100644
--- a/sound/pci/vx222/vx222_ops.c
+++ b/sound/pci/vx222/vx222_ops.c
@@ -275,7 +275,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--) {
- outl(cpu_to_le32(*addr), port);
+ outl(*addr, port);
addr++;
}
addr = (u32 *)runtime->dma_area;
@@ -285,7 +285,7 @@ static void vx2_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--) {
- outl(cpu_to_le32(*addr), port);
+ outl(*addr, port);
addr++;
}
@@ -313,7 +313,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--)
- *addr++ = le32_to_cpu(inl(port));
+ *addr++ = inl(port);
addr = (u32 *)runtime->dma_area;
pipe->hw_ptr = 0;
}
@@ -321,7 +321,7 @@ static void vx2_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 2; /* in 32bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--)
- *addr++ = le32_to_cpu(inl(port));
+ *addr++ = inl(port);
vx2_release_pseudo_dma(chip);
}
diff --git a/sound/pcmcia/vx/vxp_ops.c b/sound/pcmcia/vx/vxp_ops.c
index 56aa1ba73ccc1c..49a883341efffd 100644
--- a/sound/pcmcia/vx/vxp_ops.c
+++ b/sound/pcmcia/vx/vxp_ops.c
@@ -375,7 +375,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--) {
- outw(cpu_to_le16(*addr), port);
+ outw(*addr, port);
addr++;
}
addr = (unsigned short *)runtime->dma_area;
@@ -385,7 +385,7 @@ static void vxp_dma_write(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; count > 0; count--) {
- outw(cpu_to_le16(*addr), port);
+ outw(*addr, port);
addr++;
}
vx_release_pseudo_dma(chip);
@@ -417,7 +417,7 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
length >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; length > 0; length--)
- *addr++ = le16_to_cpu(inw(port));
+ *addr++ = inw(port);
addr = (unsigned short *)runtime->dma_area;
pipe->hw_ptr = 0;
}
@@ -425,12 +425,12 @@ static void vxp_dma_read(struct vx_core *chip, struct snd_pcm_runtime *runtime,
count >>= 1; /* in 16bit words */
/* Transfer using pseudo-dma. */
for (; count > 1; count--)
- *addr++ = le16_to_cpu(inw(port));
+ *addr++ = inw(port);
/* Disable DMA */
pchip->regDIALOG &= ~VXP_DLG_DMAREAD_SEL_MASK;
vx_outb(chip, DIALOG, pchip->regDIALOG);
/* Read the last word (16 bits) */
- *addr = le16_to_cpu(inw(port));
+ *addr = inw(port);
/* Disable 16-bit accesses */
pchip->regDIALOG &= ~VXP_DLG_DMA16_SEL_MASK;
vx_outb(chip, DIALOG, pchip->regDIALOG);
diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
index 85962657aabe02..517963ef484726 100644
--- a/sound/soc/cirrus/edb93xx.c
+++ b/sound/soc/cirrus/edb93xx.c
@@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
.cpu_dai_name = "ep93xx-i2s",
.codec_name = "spi0.0",
.codec_dai_name = "cs4271-hifi",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &edb93xx_ops,
};
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 934f8aefdd90b8..0dc3852c46219f 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -51,7 +51,9 @@
#define EP93XX_I2S_WRDLEN_24 (1 << 0)
#define EP93XX_I2S_WRDLEN_32 (2 << 0)
-#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
+#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
+
+#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
#define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
#define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
@@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
- unsigned int clk_cfg, lin_ctrl;
+ unsigned int clk_cfg;
+ unsigned int txlin_ctrl = 0;
+ unsigned int rxlin_ctrl = 0;
clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
- lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
clk_cfg |= EP93XX_I2S_CLKCFG_REL;
- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
break;
case SND_SOC_DAIFMT_LEFT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
break;
case SND_SOC_DAIFMT_RIGHT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
- lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
+ rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
+ txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
break;
default:
@@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/* Negative bit clock, lrclk low on left word */
- clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
+ clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
break;
case SND_SOC_DAIFMT_NB_IF:
/* Negative bit clock, lrclk low on right word */
clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
- clk_cfg |= EP93XX_I2S_CLKCFG_REL;
+ clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
break;
case SND_SOC_DAIFMT_IB_NF:
/* Positive bit clock, lrclk low on left word */
clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
- clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
+ clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
break;
case SND_SOC_DAIFMT_IB_IF:
/* Positive bit clock, lrclk low on right word */
- clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
+ clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
break;
}
/* Write new register values */
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
- ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
- ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
+ ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
+ ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
return 0;
}
diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
index 98089df08df62e..c6737a573bc086 100644
--- a/sound/soc/cirrus/snappercl15.c
+++ b/sound/soc/cirrus/snappercl15.c
@@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
.codec_dai_name = "tlv320aic23-hifi",
.codec_name = "tlv320aic23-codec.0-001a",
.platform_name = "ep93xx-i2s",
- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.ops = &snappercl15_ops,
};
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index dff3586e294afc..34ecf920955aaa 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -80,6 +80,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MAX98925 if I2C
select SND_SOC_MAX98926 if I2C
select SND_SOC_MAX98927 if I2C
+ select SND_SOC_MAX98373 if I2C
select SND_SOC_MAX9850 if I2C
select SND_SOC_MAX9768 if I2C
select SND_SOC_MAX9877 if I2C
@@ -524,6 +525,10 @@ config SND_SOC_MAX98927
tristate "Maxim Integrated MAX98927 Speaker Amplifier"
depends on I2C
+config SND_SOC_MAX98373
+ tristate "Maxim Integrated MAX98373 Speaker Amplifier"
+ depends on I2C
+
config SND_SOC_MAX9850
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 993cadeaad6974..9c3a491e09d110 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -75,6 +75,7 @@ snd-soc-max98357a-objs := max98357a.o
snd-soc-max98925-objs := max98925.o
snd-soc-max98926-objs := max98926.o
snd-soc-max98927-objs := max98927.o
+snd-soc-max98373-objs := max98373.o
snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
@@ -279,6 +280,7 @@ obj-$(CONFIG_SND_SOC_MAX98357A) += snd-soc-max98357a.o
obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
obj-$(CONFIG_SND_SOC_MAX98927) += snd-soc-max98927.o
+obj-$(CONFIG_SND_SOC_MAX98373) += snd-soc-max98373.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
diff --git a/sound/soc/codecs/ak4613.c b/sound/soc/codecs/ak4613.c
index 07a266460ec39c..b4b36cc92ffe2b 100644
--- a/sound/soc/codecs/ak4613.c
+++ b/sound/soc/codecs/ak4613.c
@@ -143,6 +143,7 @@ static const struct regmap_config ak4613_regmap_cfg = {
.max_register = 0x16,
.reg_defaults = ak4613_reg,
.num_reg_defaults = ARRAY_SIZE(ak4613_reg),
+ .cache_type = REGCACHE_RBTREE,
};
static const struct of_device_id ak4613_of_match[] = {
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 55db19ddc5ff34..93b02be3a90ed8 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+ 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c
index 1d1d10dd92ae23..f4a647aa5db752 100644
--- a/sound/soc/codecs/da7219-aad.c
+++ b/sound/soc/codecs/da7219-aad.c
@@ -59,6 +59,7 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
container_of(work, struct da7219_aad_priv, btn_det_work);
struct snd_soc_codec *codec = da7219_aad->codec;
struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+ struct da7219_priv *da7219 = snd_soc_codec_get_drvdata(codec);
u8 statusa, micbias_ctrl;
bool micbias_up = false;
int retries = 0;
@@ -86,6 +87,8 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
if (retries >= DA7219_AAD_MICBIAS_CHK_RETRIES)
dev_warn(codec->dev, "Mic bias status check timed out");
+ da7219->micbias_on_event = true;
+
/*
* Mic bias pulse required to enable mic, must be done before enabling
* button detection to prevent erroneous button readings.
@@ -439,6 +442,8 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
snd_soc_update_bits(codec, DA7219_ACCDET_CONFIG_1,
DA7219_BUTTON_CONFIG_MASK, 0);
+ da7219->micbias_on_event = false;
+
/* Disable mic bias */
snd_soc_dapm_disable_pin(dapm, "Mic Bias");
snd_soc_dapm_sync(dapm);
diff --git a/sound/soc/codecs/da7219.c b/sound/soc/codecs/da7219.c
index 4d9ad6f6d35f3c..662b233107fbea 100644
--- a/sound/soc/codecs/da7219.c
+++ b/sound/soc/codecs/da7219.c
@@ -766,6 +766,30 @@ static const struct snd_kcontrol_new da7219_st_out_filtr_mix_controls[] = {
* DAPM Events
*/
+static int da7219_mic_pga_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct da7219_priv *da7219 = snd_soc_component_get_drvdata(component);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (da7219->micbias_on_event) {
+ /*
+ * Delay only for first capture after bias enabled to
+ * avoid possible DC offset related noise.
+ */
+ da7219->micbias_on_event = false;
+ msleep(da7219->mic_pga_delay);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int da7219_dai_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -917,12 +941,12 @@ static const struct snd_soc_dapm_widget da7219_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("MIC"),
/* Input PGAs */
- SND_SOC_DAPM_PGA("Mic PGA", DA7219_MIC_1_CTRL,
- DA7219_MIC_1_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
- SND_SOC_DAPM_PGA("Mixin PGA", DA7219_MIXIN_L_CTRL,
- DA7219_MIXIN_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
- NULL, 0),
+ SND_SOC_DAPM_PGA_E("Mic PGA", DA7219_MIC_1_CTRL,
+ DA7219_MIC_1_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_mic_pga_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_E("Mixin PGA", DA7219_MIXIN_L_CTRL,
+ DA7219_MIXIN_L_AMP_EN_SHIFT, DA7219_NO_INVERT,
+ NULL, 0, da7219_settling_event, SND_SOC_DAPM_POST_PMU),
/* Input Filters */
SND_SOC_DAPM_ADC("ADC", NULL, DA7219_ADC_L_CTRL, DA7219_ADC_L_EN_SHIFT,
@@ -1737,6 +1761,14 @@ static void da7219_handle_pdata(struct snd_soc_codec *codec)
snd_soc_write(codec, DA7219_MICBIAS_CTRL, micbias_lvl);
+ /*
+ * Calculate delay required to compensate for DC offset in
+ * Mic PGA, based on Mic Bias voltage.
+ */
+ da7219->mic_pga_delay = DA7219_MIC_PGA_BASE_DELAY +
+ (pdata->micbias_lvl *
+ DA7219_MIC_PGA_OFFSET_DELAY);
+
/* Mic */
switch (pdata->mic_amp_in_sel) {
case DA7219_MIC_AMP_IN_SEL_DIFF:
diff --git a/sound/soc/codecs/da7219.h b/sound/soc/codecs/da7219.h
index 8d6c3c8c802639..94abd3502e0772 100644
--- a/sound/soc/codecs/da7219.h
+++ b/sound/soc/codecs/da7219.h
@@ -778,8 +778,10 @@
#define DA7219_SYS_STAT_CHECK_DELAY 50
/* Power up/down Delays */
-#define DA7219_SETTLING_DELAY 40
-#define DA7219_MIN_GAIN_DELAY 30
+#define DA7219_SETTLING_DELAY 40
+#define DA7219_MIN_GAIN_DELAY 30
+#define DA7219_MIC_PGA_BASE_DELAY 100
+#define DA7219_MIC_PGA_OFFSET_DELAY 40
enum da7219_clk_src {
DA7219_CLKSRC_MCLK = 0,
@@ -819,6 +821,8 @@ struct da7219_priv {
bool master;
bool alc_en;
+ bool micbias_on_event;
+ unsigned int mic_pga_delay;
u8 gain_ramp_ctrl;
};
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index d06ef6c7aa0763..86c5718f91d01b 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -30,9 +30,36 @@
#include <sound/soc.h>
#include <sound/soc-dapm.h>
+#define MAX_MODESWITCH_DELAY 70
+static int modeswitch_delay;
+module_param(modeswitch_delay, uint, 0644);
+
struct dmic {
struct gpio_desc *gpio_en;
int wakeup_delay;
+ /* Delay after DMIC mode switch */
+ int modeswitch_delay;
+};
+
+int dmic_daiops_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct dmic *dmic = snd_soc_codec_get_drvdata(codec);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_STOP:
+ if (dmic->modeswitch_delay)
+ mdelay(dmic->modeswitch_delay);
+
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops dmic_dai_ops = {
+ .trigger = dmic_daiops_trigger,
};
static int dmic_aif_event(struct snd_soc_dapm_widget *w,
@@ -68,6 +95,7 @@ static struct snd_soc_dai_driver dmic_dai = {
| SNDRV_PCM_FMTBIT_S24_LE
| SNDRV_PCM_FMTBIT_S16_LE,
},
+ .ops = &dmic_dai_ops,
};
static int dmic_codec_probe(struct snd_soc_codec *codec)
@@ -85,6 +113,13 @@ static int dmic_codec_probe(struct snd_soc_codec *codec)
device_property_read_u32(codec->dev, "wakeup-delay-ms",
&dmic->wakeup_delay);
+ device_property_read_u32(codec->dev, "modeswitch-delay-ms",
+ &dmic->modeswitch_delay);
+ if (modeswitch_delay)
+ dmic->modeswitch_delay = modeswitch_delay;
+
+ if (dmic->modeswitch_delay > MAX_MODESWITCH_DELAY)
+ dmic->modeswitch_delay = MAX_MODESWITCH_DELAY;
snd_soc_codec_set_drvdata(codec, dmic);
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
new file mode 100644
index 00000000000000..b718c5ec06c4db
--- /dev/null
+++ b/sound/soc/codecs/max98373.c
@@ -0,0 +1,980 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Maxim Integrated
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <sound/tlv.h>
+#include "max98373.h"
+
+static struct reg_default max98373_reg[] = {
+ {MAX98373_R2000_SW_RESET, 0x00},
+ {MAX98373_R2001_INT_RAW1, 0x00},
+ {MAX98373_R2002_INT_RAW2, 0x00},
+ {MAX98373_R2003_INT_RAW3, 0x00},
+ {MAX98373_R2004_INT_STATE1, 0x00},
+ {MAX98373_R2005_INT_STATE2, 0x00},
+ {MAX98373_R2006_INT_STATE3, 0x00},
+ {MAX98373_R2007_INT_FLAG1, 0x00},
+ {MAX98373_R2008_INT_FLAG2, 0x00},
+ {MAX98373_R2009_INT_FLAG3, 0x00},
+ {MAX98373_R200A_INT_EN1, 0x00},
+ {MAX98373_R200B_INT_EN2, 0x00},
+ {MAX98373_R200C_INT_EN3, 0x00},
+ {MAX98373_R200D_INT_FLAG_CLR1, 0x00},
+ {MAX98373_R200E_INT_FLAG_CLR2, 0x00},
+ {MAX98373_R200F_INT_FLAG_CLR3, 0x00},
+ {MAX98373_R2010_IRQ_CTRL, 0x00},
+ {MAX98373_R2014_THERM_WARN_THRESH, 0x10},
+ {MAX98373_R2015_THERM_SHDN_THRESH, 0x27},
+ {MAX98373_R2016_THERM_HYSTERESIS, 0x01},
+ {MAX98373_R2017_THERM_FOLDBACK_SET, 0xC0},
+ {MAX98373_R2018_THERM_FOLDBACK_EN, 0x00},
+ {MAX98373_R201E_PIN_DRIVE_STRENGTH, 0x55},
+ {MAX98373_R2020_PCM_TX_HIZ_EN_1, 0xFE},
+ {MAX98373_R2021_PCM_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2022_PCM_TX_SRC_1, 0x00},
+ {MAX98373_R2023_PCM_TX_SRC_2, 0x00},
+ {MAX98373_R2024_PCM_DATA_FMT_CFG, 0xC0},
+ {MAX98373_R2025_AUDIO_IF_MODE, 0x00},
+ {MAX98373_R2026_PCM_CLOCK_RATIO, 0x04},
+ {MAX98373_R2027_PCM_SR_SETUP_1, 0x08},
+ {MAX98373_R2028_PCM_SR_SETUP_2, 0x88},
+ {MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1, 0x00},
+ {MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2, 0x00},
+ {MAX98373_R202B_PCM_RX_EN, 0x00},
+ {MAX98373_R202C_PCM_TX_EN, 0x00},
+ {MAX98373_R202E_ICC_RX_CH_EN_1, 0x00},
+ {MAX98373_R202F_ICC_RX_CH_EN_2, 0x00},
+ {MAX98373_R2030_ICC_TX_HIZ_EN_1, 0xFF},
+ {MAX98373_R2031_ICC_TX_HIZ_EN_2, 0xFF},
+ {MAX98373_R2032_ICC_LINK_EN_CFG, 0x30},
+ {MAX98373_R2034_ICC_TX_CNTL, 0x00},
+ {MAX98373_R2035_ICC_TX_EN, 0x00},
+ {MAX98373_R2036_SOUNDWIRE_CTRL, 0x05},
+ {MAX98373_R203D_AMP_DIG_VOL_CTRL, 0x00},
+ {MAX98373_R203E_AMP_PATH_GAIN, 0x08},
+ {MAX98373_R203F_AMP_DSP_CFG, 0x02},
+ {MAX98373_R2040_TONE_GEN_CFG, 0x00},
+ {MAX98373_R2041_AMP_CFG, 0x03},
+ {MAX98373_R2042_AMP_EDGE_RATE_CFG, 0x00},
+ {MAX98373_R2043_AMP_EN, 0x00},
+ {MAX98373_R2046_IV_SENSE_ADC_DSP_CFG, 0x04},
+ {MAX98373_R2047_IV_SENSE_ADC_EN, 0x00},
+ {MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0x00},
+ {MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG, 0x00},
+ {MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG, 0x00},
+ {MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0x00},
+ {MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0x00},
+ {MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0x00},
+ {MAX98373_R2090_BDE_LVL_HOLD, 0x00},
+ {MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0x00},
+ {MAX98373_R2092_BDE_CLIPPER_MODE, 0x00},
+ {MAX98373_R2097_BDE_L1_THRESH, 0x00},
+ {MAX98373_R2098_BDE_L2_THRESH, 0x00},
+ {MAX98373_R2099_BDE_L3_THRESH, 0x00},
+ {MAX98373_R209A_BDE_L4_THRESH, 0x00},
+ {MAX98373_R209B_BDE_THRESH_HYST, 0x00},
+ {MAX98373_R20A8_BDE_L1_CFG_1, 0x00},
+ {MAX98373_R20A9_BDE_L1_CFG_2, 0x00},
+ {MAX98373_R20AA_BDE_L1_CFG_3, 0x00},
+ {MAX98373_R20AB_BDE_L2_CFG_1, 0x00},
+ {MAX98373_R20AC_BDE_L2_CFG_2, 0x00},
+ {MAX98373_R20AD_BDE_L2_CFG_3, 0x00},
+ {MAX98373_R20AE_BDE_L3_CFG_1, 0x00},
+ {MAX98373_R20AF_BDE_L3_CFG_2, 0x00},
+ {MAX98373_R20B0_BDE_L3_CFG_3, 0x00},
+ {MAX98373_R20B1_BDE_L4_CFG_1, 0x00},
+ {MAX98373_R20B2_BDE_L4_CFG_2, 0x00},
+ {MAX98373_R20B3_BDE_L4_CFG_3, 0x00},
+ {MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE, 0x00},
+ {MAX98373_R20B5_BDE_EN, 0x00},
+ {MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0x00},
+ {MAX98373_R20D1_DHT_CFG, 0x01},
+ {MAX98373_R20D2_DHT_ATTACK_CFG, 0x02},
+ {MAX98373_R20D3_DHT_RELEASE_CFG, 0x03},
+ {MAX98373_R20D4_DHT_EN, 0x00},
+ {MAX98373_R20E0_LIMITER_THRESH_CFG, 0x00},
+ {MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0x00},
+ {MAX98373_R20E2_LIMITER_EN, 0x00},
+ {MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG, 0x00},
+ {MAX98373_R20FF_GLOBAL_SHDN, 0x00},
+ {MAX98373_R21FF_REV_ID, 0x42},
+};
+
+static int max98373_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int format = 0;
+ unsigned int invert = 0;
+
+ dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert = MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE;
+ break;
+ default:
+ dev_err(codec->dev, "DAI invert mode unsupported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE,
+ invert);
+
+ /* interface format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ format = MAX98373_PCM_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ format = MAX98373_PCM_FORMAT_LJ;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ format = MAX98373_PCM_FORMAT_TDM_MODE1;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ format = MAX98373_PCM_FORMAT_TDM_MODE0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_FORMAT_MASK,
+ format << MAX98373_PCM_MODE_CFG_FORMAT_SHIFT);
+
+ return 0;
+}
+
+/* BCLKs per LRCLK */
+static const int bclk_sel_table[] = {
+ 32, 48, 64, 96, 128, 192, 256, 384, 512, 320,
+};
+
+static int max98373_get_bclk_sel(int bclk)
+{
+ int i;
+ /* match BCLKs per LRCLK */
+ for (i = 0; i < ARRAY_SIZE(bclk_sel_table); i++) {
+ if (bclk_sel_table[i] == bclk)
+ return i + 2;
+ }
+ return 0;
+}
+
+static int max98373_set_clock(struct snd_soc_codec *codec,
+ struct snd_pcm_hw_params *params)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ /* BCLK/LRCLK ratio calculation */
+ int blr_clk_ratio = params_channels(params) * max98373->ch_size;
+ int value;
+
+ if (!max98373->tdm_mode) {
+ /* BCLK configuration */
+ value = max98373_get_bclk_sel(blr_clk_ratio);
+ if (!value) {
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ value);
+ }
+ return 0;
+}
+
+static int max98373_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ unsigned int sampling_rate = 0;
+ unsigned int chan_sz = 0;
+
+ /* pcm mode configuration */
+ switch (snd_pcm_format_width(params_format(params))) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ params_format(params));
+ goto err;
+ }
+
+ max98373->ch_size = snd_pcm_format_width(params_format(params));
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ dev_dbg(codec->dev, "format supported %d",
+ params_format(params));
+
+ /* sampling rate configuration */
+ switch (params_rate(params)) {
+ case 8000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_8000;
+ break;
+ case 11025:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_11025;
+ break;
+ case 12000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_12000;
+ break;
+ case 16000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_16000;
+ break;
+ case 22050:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_22050;
+ break;
+ case 24000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_24000;
+ break;
+ case 32000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_32000;
+ break;
+ case 44100:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_44100;
+ break;
+ case 48000:
+ sampling_rate = MAX98373_PCM_SR_SET1_SR_48000;
+ break;
+ default:
+ dev_err(codec->dev, "rate %d not supported\n",
+ params_rate(params));
+ goto err;
+ }
+
+ /* set DAI_SR to correct LRCLK frequency */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2027_PCM_SR_SETUP_1,
+ MAX98373_PCM_SR_SET1_SR_MASK,
+ sampling_rate);
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_SR_MASK,
+ sampling_rate << MAX98373_PCM_SR_SET2_SR_SHIFT);
+
+ /* set sampling rate of IV */
+ if (max98373->interleave_mode &&
+ sampling_rate > MAX98373_PCM_SR_SET1_SR_16000)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate - 3);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2028_PCM_SR_SETUP_2,
+ MAX98373_PCM_SR_SET2_IVADC_SR_MASK,
+ sampling_rate);
+
+ return max98373_set_clock(codec, params);
+err:
+ return -EINVAL;
+}
+
+static int max98373_dai_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+ int bsel = 0;
+ unsigned int chan_sz = 0;
+ unsigned int mask;
+ int x, slot_found;
+
+ if (!tx_mask && !rx_mask && !slots && !slot_width)
+ max98373->tdm_mode = false;
+ else
+ max98373->tdm_mode = true;
+
+ /* BCLK configuration */
+ bsel = max98373_get_bclk_sel(slots * slot_width);
+ if (bsel == 0) {
+ dev_err(codec->dev, "BCLK %d not supported\n",
+ slots * slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2026_PCM_CLOCK_RATIO,
+ MAX98373_PCM_CLK_SETUP_BSEL_MASK,
+ bsel);
+
+ /* Channel size configuration */
+ switch (slot_width) {
+ case 16:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_16;
+ break;
+ case 24:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_24;
+ break;
+ case 32:
+ chan_sz = MAX98373_PCM_MODE_CFG_CHANSZ_32;
+ break;
+ default:
+ dev_err(codec->dev, "format unsupported %d\n",
+ slot_width);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_MODE_CFG_CHANSZ_MASK, chan_sz);
+
+ /* Rx slot configuration */
+ slot_found = 0;
+ mask = rx_mask;
+ for (x = 0 ; x < 16 ; x++, mask >>= 1) {
+ if (mask & 0x1) {
+ if (slot_found == 0)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_CH0_SRC_MASK, x);
+ else
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ x);
+ slot_found++;
+ if (slot_found > 1)
+ break;
+ }
+ }
+
+ /* Tx slot Hi-Z configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ ~tx_mask & 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ (~tx_mask & 0xFF00) >> 8);
+
+ return 0;
+}
+
+#define MAX98373_RATES SNDRV_PCM_RATE_8000_96000
+
+#define MAX98373_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops max98373_dai_ops = {
+ .set_fmt = max98373_dai_set_fmt,
+ .hw_params = max98373_dai_hw_params,
+ .set_tdm_slot = max98373_dai_tdm_slot,
+};
+
+static int max98373_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 1);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R20FF_GLOBAL_SHDN,
+ MAX98373_GLOBAL_EN_MASK, 0);
+ max98373->tdm_mode = 0;
+ break;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
+static const char * const max98373_switch_text[] = {
+ "Left", "Right", "LeftRight"};
+
+static const struct soc_enum dai_sel_enum =
+ SOC_ENUM_SINGLE(MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT,
+ 3, max98373_switch_text);
+
+static const struct snd_kcontrol_new max98373_dai_controls =
+ SOC_DAPM_ENUM("DAI Sel", dai_sel_enum);
+
+static const struct snd_kcontrol_new max98373_vi_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R202C_PCM_TX_EN, 0, 1, 0);
+
+static const struct snd_kcontrol_new max98373_spkfb_control =
+ SOC_DAPM_SINGLE("Switch", MAX98373_R2043_AMP_EN, 1, 1, 0);
+
+static const struct snd_soc_dapm_widget max98373_dapm_widgets[] = {
+SND_SOC_DAPM_DAC_E("Amp Enable", "HiFi Playback",
+ MAX98373_R202B_PCM_RX_EN, 0, 0, max98373_dac_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_MUX("DAI Sel Mux", SND_SOC_NOPM, 0, 0,
+ &max98373_dai_controls),
+SND_SOC_DAPM_OUTPUT("BE_OUT"),
+SND_SOC_DAPM_AIF_OUT("Voltage Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 0, 0),
+SND_SOC_DAPM_AIF_OUT("Current Sense", "HiFi Capture", 0,
+ MAX98373_R2047_IV_SENSE_ADC_EN, 1, 0),
+SND_SOC_DAPM_AIF_OUT("Speaker FB Sense", "HiFi Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_SWITCH("VI Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_vi_control),
+SND_SOC_DAPM_SWITCH("SpkFB Sense", SND_SOC_NOPM, 0, 0,
+ &max98373_spkfb_control),
+SND_SOC_DAPM_SIGGEN("VMON"),
+SND_SOC_DAPM_SIGGEN("IMON"),
+SND_SOC_DAPM_SIGGEN("FBMON"),
+};
+
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, -6350, 50, 1);
+static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
+ 0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
+ 9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_spkgain_max_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_step_size_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(25, 25, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(100, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
+ 0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
+ 0, 1, TLV_DB_SCALE_ITEM(-3000, 500, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(-2200, 200, 0),
+ 5, 6, TLV_DB_SCALE_ITEM(-1500, 300, 0),
+ 7, 9, TLV_DB_SCALE_ITEM(-1000, 200, 0),
+ 10, 13, TLV_DB_SCALE_ITEM(-500, 100, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-100, 50, 0),
+);
+static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
+ 0, 15, TLV_DB_SCALE_ITEM(-1500, 100, 0),
+);
+
+static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
+ 0, 60, TLV_DB_SCALE_ITEM(-1500, 25, 0),
+);
+
+static bool max98373_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2000_SW_RESET:
+ case MAX98373_R2001_INT_RAW1 ... MAX98373_R200C_INT_EN3:
+ case MAX98373_R2010_IRQ_CTRL:
+ case MAX98373_R2014_THERM_WARN_THRESH
+ ... MAX98373_R2018_THERM_FOLDBACK_EN:
+ case MAX98373_R201E_PIN_DRIVE_STRENGTH
+ ... MAX98373_R2036_SOUNDWIRE_CTRL:
+ case MAX98373_R203D_AMP_DIG_VOL_CTRL ... MAX98373_R2043_AMP_EN:
+ case MAX98373_R2046_IV_SENSE_ADC_DSP_CFG
+ ... MAX98373_R2047_IV_SENSE_ADC_EN:
+ case MAX98373_R2051_MEAS_ADC_SAMPLING_RATE
+ ... MAX98373_R2056_MEAS_ADC_PVDD_CH_EN:
+ case MAX98373_R2090_BDE_LVL_HOLD ... MAX98373_R2092_BDE_CLIPPER_MODE:
+ case MAX98373_R2097_BDE_L1_THRESH
+ ... MAX98373_R209B_BDE_THRESH_HYST:
+ case MAX98373_R20A8_BDE_L1_CFG_1 ... MAX98373_R20B3_BDE_L4_CFG_3:
+ case MAX98373_R20B5_BDE_EN ... MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R20D1_DHT_CFG ... MAX98373_R20D4_DHT_EN:
+ case MAX98373_R20E0_LIMITER_THRESH_CFG ... MAX98373_R20E2_LIMITER_EN:
+ case MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG
+ ... MAX98373_R20FF_GLOBAL_SHDN:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+};
+
+static bool max98373_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX98373_R2000_SW_RESET ... MAX98373_R2009_INT_FLAG3:
+ case MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK:
+ case MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK:
+ case MAX98373_R20B6_BDE_CUR_STATE_READBACK:
+ case MAX98373_R21FF_REV_ID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const char * const max98373_output_voltage_lvl_text[] = {
+ "5.43V", "6.09V", "6.83V", "7.67V", "8.60V",
+ "9.65V", "10.83V", "12.15V", "13.63V", "15.29V"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_out_volt_enum,
+ MAX98373_R203E_AMP_PATH_GAIN, 0,
+ max98373_output_voltage_lvl_text);
+
+static const char * const max98373_dht_attack_rate_text[] = {
+ "17.5us", "35us", "70us", "140us",
+ "280us", "560us", "1120us", "2240us"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_attack_rate_enum,
+ MAX98373_R20D2_DHT_ATTACK_CFG, 0,
+ max98373_dht_attack_rate_text);
+
+static const char * const max98373_dht_release_rate_text[] = {
+ "45ms", "225ms", "450ms", "1150ms",
+ "2250ms", "3100ms", "4500ms", "6750ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_dht_release_rate_enum,
+ MAX98373_R20D3_DHT_RELEASE_CFG, 0,
+ max98373_dht_release_rate_text);
+
+static const char * const max98373_limiter_attack_rate_text[] = {
+ "10us", "20us", "40us", "80us",
+ "160us", "320us", "640us", "1.28ms",
+ "2.56ms", "5.12ms", "10.24ms", "20.48ms",
+ "40.96ms", "81.92ms", "16.384ms", "32.768ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_attack_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 4,
+ max98373_limiter_attack_rate_text);
+
+static const char * const max98373_limiter_release_rate_text[] = {
+ "40us", "80us", "160us", "320us",
+ "640us", "1.28ms", "2.56ms", "5.120ms",
+ "10.24ms", "20.48ms", "40.96ms", "81.92ms",
+ "163.84ms", "327.68ms", "655.36ms", "1310.72ms"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_limiter_release_rate_enum,
+ MAX98373_R20E1_LIMITER_ATK_REL_RATES, 0,
+ max98373_limiter_release_rate_text);
+
+static const char * const max98373_ADC_samplerate_text[] = {
+ "333kHz", "192kHz", "64kHz", "48kHz"
+};
+
+static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
+ MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
+ max98373_ADC_samplerate_text);
+
+static const struct snd_kcontrol_new max98373_snd_controls[] = {
+SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Volume Location Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Up Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT, 1, 0),
+SOC_SINGLE("Ramp Down Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT, 1, 0),
+SOC_SINGLE("CLK Monitor Switch", MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG,
+ MAX98373_CLOCK_MON_SHIFT, 1, 0),
+SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DITH_SHIFT, 1, 0),
+SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
+ MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0, 0x7F, 1, max98373_digital_tlv),
+SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
+SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
+ MAX98373_FS_GAIN_MAX_SHIFT, 9, 0, max98373_spkgain_max_tlv),
+SOC_ENUM("Output Voltage", max98373_out_volt_enum),
+/* Dynamic Headroom Tracking */
+SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
+ MAX98373_DHT_EN_SHIFT, 1, 0),
+SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
+SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
+ MAX98373_DHT_ROT_PNT_SHIFT, 15, 1, max98373_dht_rotation_point_tlv),
+SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
+ MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
+ MAX98373_DHT_RELEASE_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
+SOC_ENUM("DHT Attack Rate", max98373_dht_attack_rate_enum),
+SOC_ENUM("DHT Release Rate", max98373_dht_release_rate_enum),
+/* ADC configuration */
+SOC_SINGLE("ADC PVDD CH Switch", MAX98373_R2056_MEAS_ADC_PVDD_CH_EN, 0, 1, 0),
+SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ MAX98373_FLT_EN_SHIFT, 1, 0),
+SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
+ 0, 0x3, 0),
+SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
+ 0, 0x3, 0),
+SOC_ENUM("ADC SampleRate", max98373_adc_samplerate_enum),
+/* Brownout Detection Engine */
+SOC_SINGLE("BDE Switch", MAX98373_R20B5_BDE_EN, MAX98373_BDE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Mute Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_MUTE_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL4 Hold Switch", MAX98373_R20B2_BDE_L4_CFG_2,
+ MAX98373_LVL4_HOLD_EN_SHIFT, 1, 0),
+SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
+SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
+SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
+SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
+SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
+SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
+SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
+SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
+SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
+/* Limiter */
+SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
+ MAX98373_LIMITER_EN_SHIFT, 1, 0),
+SOC_SINGLE("Limiter Src Switch", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SRC_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Limiter Thresh Volume", MAX98373_R20E0_LIMITER_THRESH_CFG,
+ MAX98373_LIMITER_THRESH_SHIFT, 15, 0, max98373_limiter_thresh_tlv),
+SOC_ENUM("Limiter Attack Rate", max98373_limiter_attack_rate_enum),
+SOC_ENUM("Limiter Release Rate", max98373_limiter_release_rate_enum),
+};
+
+static const struct snd_soc_dapm_route max98373_audio_map[] = {
+ /* Plabyack */
+ {"DAI Sel Mux", "Left", "Amp Enable"},
+ {"DAI Sel Mux", "Right", "Amp Enable"},
+ {"DAI Sel Mux", "LeftRight", "Amp Enable"},
+ {"BE_OUT", NULL, "DAI Sel Mux"},
+ /* Capture */
+ { "VI Sense", "Switch", "VMON" },
+ { "VI Sense", "Switch", "IMON" },
+ { "SpkFB Sense", "Switch", "FBMON" },
+ { "Voltage Sense", NULL, "VI Sense" },
+ { "Current Sense", NULL, "VI Sense" },
+ { "Speaker FB Sense", NULL, "SpkFB Sense" },
+};
+
+static struct snd_soc_dai_driver max98373_dai[] = {
+ {
+ .name = "max98373-aif1",
+ .playback = {
+ .stream_name = "HiFi Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .capture = {
+ .stream_name = "HiFi Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = MAX98373_RATES,
+ .formats = MAX98373_FORMATS,
+ },
+ .ops = &max98373_dai_ops,
+ }
+};
+
+static int max98373_probe(struct snd_soc_codec *codec)
+{
+ struct max98373_priv *max98373 = snd_soc_codec_get_drvdata(codec);
+
+ codec->control_data = max98373->regmap;
+
+ /* Software Reset */
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
+
+ /* IV default slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 0xFF);
+ regmap_write(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 0xFF);
+ /* L/R mix configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1,
+ 0x80);
+ regmap_write(max98373->regmap,
+ MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2,
+ 0x1);
+ /* Set inital volume (0dB) */
+ regmap_write(max98373->regmap,
+ MAX98373_R203D_AMP_DIG_VOL_CTRL,
+ 0x00);
+ regmap_write(max98373->regmap,
+ MAX98373_R203E_AMP_PATH_GAIN,
+ 0x00);
+ /* Enable DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R203F_AMP_DSP_CFG,
+ 0x3);
+ /* Enable IMON VMON DC blocker */
+ regmap_write(max98373->regmap,
+ MAX98373_R2046_IV_SENSE_ADC_DSP_CFG,
+ 0x7);
+ /* voltage, current slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2022_PCM_TX_SRC_1,
+ (max98373->i_slot << MAX98373_PCM_TX_CH_SRC_A_I_SHIFT |
+ max98373->v_slot) & 0xFF);
+ if (max98373->v_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->v_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->v_slot - 8), 0);
+
+ if (max98373->i_slot < 8)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2020_PCM_TX_HIZ_EN_1,
+ 1 << max98373->i_slot, 0);
+ else
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2021_PCM_TX_HIZ_EN_2,
+ 1 << (max98373->i_slot - 8), 0);
+
+ /* speaker feedback slot configuration */
+ regmap_write(max98373->regmap,
+ MAX98373_R2023_PCM_TX_SRC_2,
+ max98373->spkfb_slot & 0xFF);
+
+ /* Set interleave mode */
+ if (max98373->interleave_mode)
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2024_PCM_DATA_FMT_CFG,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK,
+ MAX98373_PCM_TX_CH_INTERLEAVE_MASK);
+
+ /* Speaker enable */
+ regmap_update_bits(max98373->regmap,
+ MAX98373_R2043_AMP_EN,
+ MAX98373_SPK_EN_MASK, 1);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int max98373_suspend(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regcache_cache_only(max98373->regmap, true);
+ regcache_mark_dirty(max98373->regmap);
+ return 0;
+}
+static int max98373_resume(struct device *dev)
+{
+ struct max98373_priv *max98373 = dev_get_drvdata(dev);
+
+ regmap_write(max98373->regmap,
+ MAX98373_R2000_SW_RESET, MAX98373_SOFT_RESET);
+ usleep_range(10000, 11000);
+ regcache_cache_only(max98373->regmap, false);
+ regcache_sync(max98373->regmap);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops max98373_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(max98373_suspend, max98373_resume)
+};
+
+static const struct snd_soc_codec_driver soc_codec_dev_max98373 = {
+ .probe = max98373_probe,
+ .component_driver = {
+ .controls = max98373_snd_controls,
+ .num_controls = ARRAY_SIZE(max98373_snd_controls),
+ .dapm_widgets = max98373_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max98373_dapm_widgets),
+ .dapm_routes = max98373_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(max98373_audio_map),
+ },
+};
+
+static const struct regmap_config max98373_regmap = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MAX98373_R21FF_REV_ID,
+ .reg_defaults = max98373_reg,
+ .num_reg_defaults = ARRAY_SIZE(max98373_reg),
+ .readable_reg = max98373_readable_register,
+ .volatile_reg = max98373_volatile_reg,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static void max98373_slot_config(struct i2c_client *i2c,
+ struct max98373_priv *max98373)
+{
+ int value;
+ struct device *dev = &i2c->dev;
+
+ if (!device_property_read_u32(dev, "maxim,vmon-slot-no", &value))
+ max98373->v_slot = value & 0xF;
+ else
+ max98373->v_slot = 0;
+
+ if (!device_property_read_u32(dev, "maxim,imon-slot-no", &value))
+ max98373->i_slot = value & 0xF;
+ else
+ max98373->i_slot = 1;
+
+ if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
+ max98373->spkfb_slot = value & 0xF;
+ else
+ max98373->spkfb_slot = 2;
+}
+
+static int max98373_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+
+ int ret = 0;
+ int reg = 0;
+ struct max98373_priv *max98373 = NULL;
+
+ max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
+
+ if (!max98373) {
+ ret = -ENOMEM;
+ return ret;
+ }
+ i2c_set_clientdata(i2c, max98373);
+
+ /* update interleave mode info */
+ if (device_property_read_bool(&i2c->dev, "maxim,interleave_mode"))
+ max98373->interleave_mode = 1;
+ else
+ max98373->interleave_mode = 0;
+
+
+ /* regmap initialization */
+ max98373->regmap
+ = devm_regmap_init_i2c(i2c, &max98373_regmap);
+ if (IS_ERR(max98373->regmap)) {
+ ret = PTR_ERR(max98373->regmap);
+ dev_err(&i2c->dev,
+ "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ /* Check Revision ID */
+ ret = regmap_read(max98373->regmap,
+ MAX98373_R21FF_REV_ID, &reg);
+ if (ret < 0) {
+ dev_err(&i2c->dev,
+ "Failed to read: 0x%02X\n", MAX98373_R21FF_REV_ID);
+ return ret;
+ }
+ dev_info(&i2c->dev, "MAX98373 revisionID: 0x%02X\n", reg);
+
+ /* voltage/current slot configuration */
+ max98373_slot_config(i2c, max98373);
+
+ /* codec registeration */
+ ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98373,
+ max98373_dai, ARRAY_SIZE(max98373_dai));
+ if (ret < 0)
+ dev_err(&i2c->dev, "Failed to register codec: %d\n", ret);
+
+ return ret;
+}
+
+static int max98373_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static const struct i2c_device_id max98373_i2c_id[] = {
+ { "max98373", 0},
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, max98373_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id max98373_of_match[] = {
+ { .compatible = "maxim,max98373", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98373_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max98373_acpi_match[] = {
+ { "MX98373", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, max98373_acpi_match);
+#endif
+
+static struct i2c_driver max98373_i2c_driver = {
+ .driver = {
+ .name = "max98373",
+ .of_match_table = of_match_ptr(max98373_of_match),
+ .acpi_match_table = ACPI_PTR(max98373_acpi_match),
+ .pm = &max98373_pm,
+ },
+ .probe = max98373_i2c_probe,
+ .remove = max98373_i2c_remove,
+ .id_table = max98373_i2c_id,
+};
+
+module_i2c_driver(max98373_i2c_driver)
+
+MODULE_DESCRIPTION("ALSA SoC MAX98373 driver");
+MODULE_AUTHOR("Ryan Lee <ryans.lee@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98373.h b/sound/soc/codecs/max98373.h
new file mode 100644
index 00000000000000..f6a37aa02f2665
--- /dev/null
+++ b/sound/soc/codecs/max98373.h
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2017, Maxim Integrated
+
+#ifndef _MAX98373_H
+#define _MAX98373_H
+
+#define MAX98373_R2000_SW_RESET 0x2000
+#define MAX98373_R2001_INT_RAW1 0x2001
+#define MAX98373_R2002_INT_RAW2 0x2002
+#define MAX98373_R2003_INT_RAW3 0x2003
+#define MAX98373_R2004_INT_STATE1 0x2004
+#define MAX98373_R2005_INT_STATE2 0x2005
+#define MAX98373_R2006_INT_STATE3 0x2006
+#define MAX98373_R2007_INT_FLAG1 0x2007
+#define MAX98373_R2008_INT_FLAG2 0x2008
+#define MAX98373_R2009_INT_FLAG3 0x2009
+#define MAX98373_R200A_INT_EN1 0x200A
+#define MAX98373_R200B_INT_EN2 0x200B
+#define MAX98373_R200C_INT_EN3 0x200C
+#define MAX98373_R200D_INT_FLAG_CLR1 0x200D
+#define MAX98373_R200E_INT_FLAG_CLR2 0x200E
+#define MAX98373_R200F_INT_FLAG_CLR3 0x200F
+#define MAX98373_R2010_IRQ_CTRL 0x2010
+#define MAX98373_R2014_THERM_WARN_THRESH 0x2014
+#define MAX98373_R2015_THERM_SHDN_THRESH 0x2015
+#define MAX98373_R2016_THERM_HYSTERESIS 0x2016
+#define MAX98373_R2017_THERM_FOLDBACK_SET 0x2017
+#define MAX98373_R2018_THERM_FOLDBACK_EN 0x2018
+#define MAX98373_R201E_PIN_DRIVE_STRENGTH 0x201E
+#define MAX98373_R2020_PCM_TX_HIZ_EN_1 0x2020
+#define MAX98373_R2021_PCM_TX_HIZ_EN_2 0x2021
+#define MAX98373_R2022_PCM_TX_SRC_1 0x2022
+#define MAX98373_R2023_PCM_TX_SRC_2 0x2023
+#define MAX98373_R2024_PCM_DATA_FMT_CFG 0x2024
+#define MAX98373_R2025_AUDIO_IF_MODE 0x2025
+#define MAX98373_R2026_PCM_CLOCK_RATIO 0x2026
+#define MAX98373_R2027_PCM_SR_SETUP_1 0x2027
+#define MAX98373_R2028_PCM_SR_SETUP_2 0x2028
+#define MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 0x2029
+#define MAX98373_R202A_PCM_TO_SPK_MONO_MIX_2 0x202A
+#define MAX98373_R202B_PCM_RX_EN 0x202B
+#define MAX98373_R202C_PCM_TX_EN 0x202C
+#define MAX98373_R202E_ICC_RX_CH_EN_1 0x202E
+#define MAX98373_R202F_ICC_RX_CH_EN_2 0x202F
+#define MAX98373_R2030_ICC_TX_HIZ_EN_1 0x2030
+#define MAX98373_R2031_ICC_TX_HIZ_EN_2 0x2031
+#define MAX98373_R2032_ICC_LINK_EN_CFG 0x2032
+#define MAX98373_R2034_ICC_TX_CNTL 0x2034
+#define MAX98373_R2035_ICC_TX_EN 0x2035
+#define MAX98373_R2036_SOUNDWIRE_CTRL 0x2036
+#define MAX98373_R203D_AMP_DIG_VOL_CTRL 0x203D
+#define MAX98373_R203E_AMP_PATH_GAIN 0x203E
+#define MAX98373_R203F_AMP_DSP_CFG 0x203F
+#define MAX98373_R2040_TONE_GEN_CFG 0x2040
+#define MAX98373_R2041_AMP_CFG 0x2041
+#define MAX98373_R2042_AMP_EDGE_RATE_CFG 0x2042
+#define MAX98373_R2043_AMP_EN 0x2043
+#define MAX98373_R2046_IV_SENSE_ADC_DSP_CFG 0x2046
+#define MAX98373_R2047_IV_SENSE_ADC_EN 0x2047
+#define MAX98373_R2051_MEAS_ADC_SAMPLING_RATE 0x2051
+#define MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG 0x2052
+#define MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG 0x2053
+#define MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK 0x2054
+#define MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK 0x2055
+#define MAX98373_R2056_MEAS_ADC_PVDD_CH_EN 0x2056
+#define MAX98373_R2090_BDE_LVL_HOLD 0x2090
+#define MAX98373_R2091_BDE_GAIN_ATK_REL_RATE 0x2091
+#define MAX98373_R2092_BDE_CLIPPER_MODE 0x2092
+#define MAX98373_R2097_BDE_L1_THRESH 0x2097
+#define MAX98373_R2098_BDE_L2_THRESH 0x2098
+#define MAX98373_R2099_BDE_L3_THRESH 0x2099
+#define MAX98373_R209A_BDE_L4_THRESH 0x209A
+#define MAX98373_R209B_BDE_THRESH_HYST 0x209B
+#define MAX98373_R20A8_BDE_L1_CFG_1 0x20A8
+#define MAX98373_R20A9_BDE_L1_CFG_2 0x20A9
+#define MAX98373_R20AA_BDE_L1_CFG_3 0x20AA
+#define MAX98373_R20AB_BDE_L2_CFG_1 0x20AB
+#define MAX98373_R20AC_BDE_L2_CFG_2 0x20AC
+#define MAX98373_R20AD_BDE_L2_CFG_3 0x20AD
+#define MAX98373_R20AE_BDE_L3_CFG_1 0x20AE
+#define MAX98373_R20AF_BDE_L3_CFG_2 0x20AF
+#define MAX98373_R20B0_BDE_L3_CFG_3 0x20B0
+#define MAX98373_R20B1_BDE_L4_CFG_1 0x20B1
+#define MAX98373_R20B2_BDE_L4_CFG_2 0x20B2
+#define MAX98373_R20B3_BDE_L4_CFG_3 0x20B3
+#define MAX98373_R20B4_BDE_INFINITE_HOLD_RELEASE 0x20B4
+#define MAX98373_R20B5_BDE_EN 0x20B5
+#define MAX98373_R20B6_BDE_CUR_STATE_READBACK 0x20B6
+#define MAX98373_R20D1_DHT_CFG 0x20D1
+#define MAX98373_R20D2_DHT_ATTACK_CFG 0x20D2
+#define MAX98373_R20D3_DHT_RELEASE_CFG 0x20D3
+#define MAX98373_R20D4_DHT_EN 0x20D4
+#define MAX98373_R20E0_LIMITER_THRESH_CFG 0x20E0
+#define MAX98373_R20E1_LIMITER_ATK_REL_RATES 0x20E1
+#define MAX98373_R20E2_LIMITER_EN 0x20E2
+#define MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG 0x20FE
+#define MAX98373_R20FF_GLOBAL_SHDN 0x20FF
+#define MAX98373_R21FF_REV_ID 0x21FF
+
+/* MAX98373_R2022_PCM_TX_SRC_1 */
+#define MAX98373_PCM_TX_CH_SRC_A_V_SHIFT (0)
+#define MAX98373_PCM_TX_CH_SRC_A_I_SHIFT (4)
+
+/* MAX98373_R2024_PCM_DATA_FMT_CFG */
+#define MAX98373_PCM_MODE_CFG_FORMAT_MASK (0x7 << 3)
+#define MAX98373_PCM_MODE_CFG_FORMAT_SHIFT (3)
+#define MAX98373_PCM_TX_CH_INTERLEAVE_MASK (0x1 << 2)
+#define MAX98373_PCM_FORMAT_I2S (0x0 << 0)
+#define MAX98373_PCM_FORMAT_LJ (0x1 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE0 (0x3 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE1 (0x4 << 0)
+#define MAX98373_PCM_FORMAT_TDM_MODE2 (0x5 << 0)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_MASK (0x3 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_16 (0x1 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_24 (0x2 << 6)
+#define MAX98373_PCM_MODE_CFG_CHANSZ_32 (0x3 << 6)
+
+/* MAX98373_R2026_PCM_CLOCK_RATIO */
+#define MAX98373_PCM_MODE_CFG_PCM_BCLKEDGE (0x1 << 4)
+#define MAX98373_PCM_CLK_SETUP_BSEL_MASK (0xF << 0)
+
+/* MAX98373_R2027_PCM_SR_SETUP_1 */
+#define MAX98373_PCM_SR_SET1_SR_MASK (0xF << 0)
+#define MAX98373_PCM_SR_SET1_SR_8000 (0x0 << 0)
+#define MAX98373_PCM_SR_SET1_SR_11025 (0x1 << 0)
+#define MAX98373_PCM_SR_SET1_SR_12000 (0x2 << 0)
+#define MAX98373_PCM_SR_SET1_SR_16000 (0x3 << 0)
+#define MAX98373_PCM_SR_SET1_SR_22050 (0x4 << 0)
+#define MAX98373_PCM_SR_SET1_SR_24000 (0x5 << 0)
+#define MAX98373_PCM_SR_SET1_SR_32000 (0x6 << 0)
+#define MAX98373_PCM_SR_SET1_SR_44100 (0x7 << 0)
+#define MAX98373_PCM_SR_SET1_SR_48000 (0x8 << 0)
+
+/* MAX98373_R2028_PCM_SR_SETUP_2 */
+#define MAX98373_PCM_SR_SET2_SR_MASK (0xF << 4)
+#define MAX98373_PCM_SR_SET2_SR_SHIFT (4)
+#define MAX98373_PCM_SR_SET2_IVADC_SR_MASK (0xF << 0)
+
+/* MAX98373_R2029_PCM_TO_SPK_MONO_MIX_1 */
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_MASK (0x3 << 6)
+#define MAX98373_PCM_TO_SPK_MONOMIX_CFG_SHIFT (6)
+#define MAX98373_PCM_TO_SPK_CH0_SRC_MASK (0xF << 0)
+
+/* MAX98373_R203E_AMP_PATH_GAIN */
+#define MAX98373_SPK_DIGI_GAIN_MASK (0xF << 4)
+#define MAX98373_SPK_DIGI_GAIN_SHIFT (4)
+#define MAX98373_FS_GAIN_MAX_MASK (0xF << 0)
+#define MAX98373_FS_GAIN_MAX_SHIFT (0)
+
+/* MAX98373_R203F_AMP_DSP_CFG */
+#define MAX98373_AMP_DSP_CFG_DCBLK_SHIFT (0)
+#define MAX98373_AMP_DSP_CFG_DITH_SHIFT (1)
+#define MAX98373_AMP_DSP_CFG_RMP_UP_SHIFT (2)
+#define MAX98373_AMP_DSP_CFG_RMP_DN_SHIFT (3)
+#define MAX98373_AMP_DSP_CFG_DAC_INV_SHIFT (5)
+#define MAX98373_AMP_VOL_SEL_SHIFT (7)
+
+/* MAX98373_R2043_AMP_EN */
+#define MAX98373_SPKFB_EN_MASK (0x1 << 1)
+#define MAX98373_SPK_EN_MASK (0x1 << 0)
+#define MAX98373_SPKFB_EN_SHIFT (1)
+
+/*MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG */
+#define MAX98373_FLT_EN_SHIFT (4)
+
+/* MAX98373_R20B2_BDE_L4_CFG_2 */
+#define MAX98373_LVL4_MUTE_EN_SHIFT (7)
+#define MAX98373_LVL4_HOLD_EN_SHIFT (6)
+
+/* MAX98373_R20B5_BDE_EN */
+#define MAX98373_BDE_EN_SHIFT (0)
+
+/* MAX98373_R20D1_DHT_CFG */
+#define MAX98373_DHT_SPK_GAIN_MIN_SHIFT (4)
+#define MAX98373_DHT_ROT_PNT_SHIFT (0)
+
+/* MAX98373_R20D2_DHT_ATTACK_CFG */
+#define MAX98373_DHT_ATTACK_STEP_SHIFT (3)
+#define MAX98373_DHT_ATTACK_RATE_SHIFT (0)
+
+/* MAX98373_R20D3_DHT_RELEASE_CFG */
+#define MAX98373_DHT_RELEASE_STEP_SHIFT (3)
+#define MAX98373_DHT_RELEASE_RATE_SHIFT (0)
+
+/* MAX98373_R20D4_DHT_EN */
+#define MAX98373_DHT_EN_SHIFT (0)
+
+/* MAX98373_R20E0_LIMITER_THRESH_CFG */
+#define MAX98373_LIMITER_THRESH_SHIFT (2)
+#define MAX98373_LIMITER_THRESH_SRC_SHIFT (0)
+
+/* MAX98373_R20E2_LIMITER_EN */
+#define MAX98373_LIMITER_EN_SHIFT (0)
+
+/* MAX98373_R20FE_DEVICE_AUTO_RESTART_CFG */
+#define MAX98373_CLOCK_MON_SHIFT (0)
+
+/* MAX98373_R20FF_GLOBAL_SHDN */
+#define MAX98373_GLOBAL_EN_MASK (0x1 << 0)
+
+/* MAX98373_R2000_SW_RESET */
+#define MAX98373_SOFT_RESET (0x1 << 0)
+
+struct max98373_priv {
+ struct regmap *regmap;
+ unsigned int v_slot;
+ unsigned int i_slot;
+ unsigned int spkfb_slot;
+ bool interleave_mode;
+ unsigned int ch_size;
+ bool tdm_mode;
+};
+#endif
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
index 12f92dd37897be..d149fc5260ed7a 100644
--- a/sound/soc/codecs/rt5514.c
+++ b/sound/soc/codecs/rt5514.c
@@ -338,7 +338,8 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
rt5514->dsp_enabled = ucontrol->value.integer.value[0];
if (rt5514->dsp_enabled) {
- if (!IS_ERR(rt5514->dsp_calib_clk)) {
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
if (clk_set_rate(rt5514->dsp_calib_clk,
rt5514->pdata.dsp_calib_clk_rate))
dev_err(codec->dev,
@@ -431,7 +432,8 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol,
regmap_write(rt5514->i2c_regmap, 0x18002f00,
0x00055148);
- if (!IS_ERR(rt5514->dsp_calib_clk)) {
+ if (rt5514->pdata.dsp_calib_clk_name &&
+ !IS_ERR(rt5514->dsp_calib_clk)) {
msleep(20);
regmap_write(rt5514->i2c_regmap, 0x1800211c,
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index acc70e8837a20e..525072b253fc61 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -61,6 +61,7 @@ struct rt5663_priv {
static const struct reg_sequence rt5663_patch_list[] = {
{ 0x002a, 0x8020 },
{ 0x0086, 0x0028 },
+ { 0x0100, 0xa020 },
};
static const struct reg_default rt5663_v2_reg[] = {
@@ -567,7 +568,7 @@ static const struct reg_default rt5663_reg[] = {
{ 0x00fd, 0x0001 },
{ 0x00fe, 0x10ec },
{ 0x00ff, 0x6406 },
- { 0x0100, 0xa0a0 },
+ { 0x0100, 0xa020 },
{ 0x0108, 0x4444 },
{ 0x0109, 0x4444 },
{ 0x010a, 0xaaaa },
@@ -2127,6 +2128,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
0x8000);
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000,
0x3000);
+ snd_soc_update_bits(codec,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0x0080);
}
break;
@@ -2139,6 +2142,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, RT5663_DEPOP_1, 0x3000, 0x0);
snd_soc_update_bits(codec, RT5663_HP_CHARGE_PUMP_1,
RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_EN);
+ snd_soc_update_bits(codec,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0);
}
break;
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680ac78e42d..6df158669420db 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@ static int sigmadsp_ctrl_write(struct sigmadsp *sigmadsp,
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c2c5bad5..79541960f45d94 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
@@ -40,17 +41,29 @@ static const struct i2c_device_id wm8804_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
diff --git a/sound/soc/codecs/wm8940.c b/sound/soc/codecs/wm8940.c
index f6f9395ea38ef8..1c600819f7689b 100644
--- a/sound/soc/codecs/wm8940.c
+++ b/sound/soc/codecs/wm8940.c
@@ -743,6 +743,7 @@ static const struct regmap_config wm8940_regmap = {
.max_register = WM8940_MONOMIX,
.reg_defaults = wm8940_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8940_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
.readable_reg = wm8940_readable_register,
.volatile_reg = wm8940_volatile_register,
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index a18aecb4993590..2b770d3f05d4da 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
WM8994_OPCLK_ENA, 0);
}
+ break;
default:
return -EINVAL;
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 14dfdee05fd5ae..3066e068aae5c9 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -219,7 +219,7 @@ config SND_SOC_PHYCORE_AC97
config SND_SOC_EUKREA_TLV320
tristate "Eukrea TLV320"
- depends on ARCH_MXC && I2C
+ depends on ARCH_MXC && !ARM64 && I2C
select SND_SOC_TLV320AIC23_I2C
select SND_SOC_IMX_AUDMUX
select SND_SOC_IMX_SSI
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index e8adead8be00f5..a87836d4de15b4 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -394,7 +394,8 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
break;
case SND_SOC_DAIFMT_RIGHT_J:
/* Data on rising edge of bclk, frame high, right aligned */
- xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCR_xWA;
+ xccr |= ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP;
+ xcr |= ESAI_xCR_xWA;
break;
case SND_SOC_DAIFMT_DSP_A:
/* Data on rising edge of bclk, frame high, 1clk before data */
@@ -451,12 +452,12 @@ static int fsl_esai_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
- mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR;
+ mask = ESAI_xCR_xFSL | ESAI_xCR_xFSR | ESAI_xCR_xWA;
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCR, mask, xcr);
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCR, mask, xcr);
mask = ESAI_xCCR_xCKP | ESAI_xCCR_xHCKP | ESAI_xCCR_xFSP |
- ESAI_xCCR_xFSD | ESAI_xCCR_xCKD | ESAI_xCR_xWA;
+ ESAI_xCCR_xFSD | ESAI_xCCR_xCKD;
regmap_update_bits(esai_priv->regmap, REG_ESAI_TCCR, mask, xccr);
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR, mask, xccr);
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index fc57da341d6106..136df38c4536c6 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+ ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
pdcr, ptcr);
if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS output from %s, ",
audmux_port_string((ptcr >> 27) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk output from %s",
audmux_port_string((ptcr >> 22) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk input");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"Port is symmetric");
} else {
if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS output from %s, ",
audmux_port_string((ptcr >> 17) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk output from %s",
audmux_port_string((ptcr >> 12) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk input");
}
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"\nData received from %s\n",
audmux_port_string((pdcr >> 13) & 0x7));
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 2d4b536e55e469..7482d4c18c8681 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -224,6 +224,36 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
Say Y if you have such a device.
If unsure select "N".
+config SND_SOC_INTEL_KBL_DA7219_MAX98373_MACH
+ tristate "ASoC Audio driver for KBL with DA7219 and MAX98373 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98373
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for DA7219 + MAX98373 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
+ tristate "ASoC Audio driver for KBL with DA7219 and MAX98927 in I2S Mode"
+ depends on X86_INTEL_LPSS && I2C
+ select SND_SOC_INTEL_SST
+ select SND_SOC_INTEL_SKYLAKE
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98927
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for DA7219 + MAX98927 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
config SND_SOC_INTEL_SKYLAKE_SSP_CLK
tristate
help
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 239f93186391df..eb6b9984a96380 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -398,7 +398,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ int ret;
+
+ ret =
+ snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(params));
+ if (ret)
+ return ret;
memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
return 0;
}
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 33917146d9c445..054b1d514e8abf 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -354,14 +354,14 @@ static int sst_request_fw(struct intel_sst_drv *sst)
const struct firmware *fw;
retval = request_firmware(&fw, sst->firmware_name, sst->dev);
- if (fw == NULL) {
- dev_err(sst->dev, "fw is returning as null\n");
- return -EINVAL;
- }
if (retval) {
dev_err(sst->dev, "request fw failed %d\n", retval);
return retval;
}
+ if (fw == NULL) {
+ dev_err(sst->dev, "fw is returning as null\n");
+ return -EINVAL;
+ }
mutex_lock(&sst->sst_lock);
retval = sst_cache_and_parse_fw(sst, fw);
mutex_unlock(&sst->sst_lock);
diff --git a/sound/soc/intel/baytrail/sst-baytrail-pcm.c b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
index 9a6b18c90f95a9..6e820be99da7f0 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-pcm.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-pcm.c
@@ -421,7 +421,8 @@ static int sst_byt_pcm_dev_suspend_late(struct device *dev)
return ret;
}
- priv_data->restore_stream = true;
+ if (priv_data)
+ priv_data->restore_stream = true;
return ret;
}
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 5211860b74554d..26f10763bb29f8 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -9,6 +9,8 @@ snd-soc-sst-cht-bsw-rt5672-objs := cht_bsw_rt5672.o
snd-soc-sst-cht-bsw-rt5645-objs := cht_bsw_rt5645.o
snd-soc-sst-cht-bsw-max98090_ti-objs := cht_bsw_max98090_ti.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
+snd-soc-kbl_da7219_max98373-objs := kbl_da7219_max98373.o
+snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-skl_rt286-objs := skl_rt286.o
@@ -26,6 +28,8 @@ obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5672_MACH) += snd-soc-sst-cht-bsw-rt5672.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_RT5645_MACH) += snd-soc-sst-cht-bsw-rt5645.o
obj-$(CONFIG_SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH) += snd-soc-sst-cht-bsw-max98090_ti.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98373_MACH) += snd-soc-kbl_da7219_max98373.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH) += snd-soc-kbl_rt5663_rt5514_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7486a0022fdea1..993d2c105ae14c 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -191,7 +191,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index cedced0d626b91..a7c5ac29533ca7 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -111,12 +111,33 @@ static struct notifier_block cht_jack_nb = {
static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
{
+ int ret;
+ int jack_type;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(runtime->card);
+ struct snd_soc_jack *jack = &ctx->jack;
+
+ if (ctx->ts3a227e_present) {
+ /*
+ * The jack has already been created in the
+ * cht_max98090_headset_init() function.
+ */
+ snd_soc_jack_notifier_register(jack, &cht_jack_nb);
+ return 0;
+ }
+
+ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
+
+ ret = snd_soc_card_jack_new(runtime->card, "Headset Jack",
+ jack_type, jack, NULL, 0);
+ if (ret) {
+ dev_err(runtime->dev, "Headset Jack creation failed %d\n", ret);
+ return ret;
+ }
if (ctx->ts3a227e_present)
- snd_soc_jack_notifier_register(&ctx->jack, &cht_jack_nb);
+ snd_soc_jack_notifier_register(jack, &cht_jack_nb);
- return 0;
+ return ret;
}
static int cht_codec_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -167,28 +188,25 @@ static int cht_max98090_headset_init(struct snd_soc_component *component)
int jack_type;
int ret;
- /**
- * TI supports 4 butons headset detection
- * KEY_MEDIA
- * KEY_VOICECOMMAND
- * KEY_VOLUMEUP
- * KEY_VOLUMEDOWN
- */
- if (ctx->ts3a227e_present)
- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
- SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3;
- else
- jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE;
-
- ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type, jack,
- NULL, 0);
+ /*
+ * TI supports 4 butons headset detection
+ * KEY_MEDIA
+ * KEY_VOICECOMMAND
+ * KEY_VOLUMEUP
+ * KEY_VOLUMEDOWN
+ */
+ jack_type = SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3;
+
+ ret = snd_soc_card_jack_new(card, "Headset Jack", jack_type,
+ jack, NULL, 0);
if (ret) {
dev_err(card->dev, "Headset Jack creation failed %d\n", ret);
return ret;
}
- return ts3a227e_enable_jack_detect(component, jack);
+ return ts3a227e_enable_jack_detect(component, &ctx->jack);
}
static struct snd_soc_ops cht_aif1_ops = {
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index 863f1d5e2a2c97..11d0cc2b0e3902 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -145,7 +145,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
index 796a913ff5408b..72eea26ec6bb8a 100644
--- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
+++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
@@ -65,14 +65,6 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
return -EIO;
}
- /* Configure sysclk for codec */
- ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
- SND_SOC_CLOCK_IN);
- if (ret) {
- dev_err(card->dev, "can't set codec sysclk configuration\n");
- return ret;
- }
-
if (SND_SOC_DAPM_EVENT_OFF(event)) {
ret = snd_soc_dai_set_pll(codec_dai, 0,
DA7219_SYSCLK_MCLK, 0, 0);
@@ -168,10 +160,19 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_jack *jack;
int ret;
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(rtd->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
/*
* Headset buttons map to the google Reference headset.
* These can be configured by userspace.
@@ -187,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
jack = &ctx->kabylake_headset;
- snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
@@ -400,6 +401,7 @@ static struct snd_soc_dai_link kabylake_dais[] = {
.trigger = {
SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
.dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
},
[KBL_DPCM_AUDIO_REF_CP] = {
.name = "Kbl Audio Reference cap",
diff --git a/sound/soc/intel/boards/kbl_da7219_max98373.c b/sound/soc/intel/boards/kbl_da7219_max98373.c
new file mode 100644
index 00000000000000..206a360545c255
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_da7219_max98373.c
@@ -0,0 +1,960 @@
+/*
+ * SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+ * Copyright(c) 2017-18 Intel Corporation.
+ *
+ * Intel Kabylake I2S Machine Driver with MAX98373 & DA7219 Codecs
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAX98357 and
+ * DA7219 codecs
+ */
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/da7219.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "../../codecs/da7219-aad.h"
+
+#define KBL_DIALOG_CODEC_DAI "da7219-hifi"
+#define MAX98373_CODEC_DAI "max98373-aif1"
+#define MAXIM_DEV0_NAME "i2c-MX98373:00"
+#define MAXIM_DEV1_NAME "i2c-MX98373:01"
+#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
+
+static struct snd_soc_card *kabylake_audio_card;
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_ECHO_REF_CP,
+ KBL_DPCM_AUDIO_REF_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+ KBL_DPCM_AUDIO_HS_PB,
+ KBL_DPCM_AUDIO_CP,
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, KBL_DIALOG_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop PLL: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM,
+ 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret)
+ dev_err(card->dev, "failed to start PLL: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "DMic", NULL, "SoC DMIC" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "spk_out" },
+
+ /* IV feedback path */
+ { "codec0_fb_in", NULL, "ssp0 Rx"},
+ { "ssp0 Rx", NULL, "Left HiFi Capture" },
+ { "ssp0 Rx", NULL, "Right HiFi Capture" },
+
+ /* AEC capture path */
+ { "echo_ref_out", NULL, "ssp0 Rx" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+};
+
+static const struct snd_soc_dapm_route kabylake_ssp1_map[] = {
+ { "Headphone Jack", NULL, "HPL" },
+ { "Headphone Jack", NULL, "HPR" },
+
+ /* other jacks */
+ { "MIC", NULL, "Headset Mic" },
+
+ /* CODEC BE connections */
+ { "Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "hs_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "Capture" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < runtime->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
+
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ }
+
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ kabylake_ssp1_map,
+ ARRAY_SIZE(kabylake_ssp1_map));
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->kabylake_headset;
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ da7219_aad_jack_det(codec, &ctx->kabylake_headset);
+
+ return 0;
+}
+
+static int kabylake_dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+ int ret;
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+ if (ret)
+ dev_err(rtd->dev, "SoC DMIC - Ignore suspend failed %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI3_PB);
+}
+
+static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_da7219_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /*
+ * set BE channel constraint as user FE channels
+ */
+
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels_quad);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static const unsigned int ch_mono[] = {
+ 1,
+};
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
+static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+}
+
+
+static struct snd_soc_ops skylaye_refcap_ops = {
+ .startup = kabylake_refcap_startup,
+};
+
+static struct snd_soc_codec_conf max98373_codec_conf[] = {
+
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = MAX98373_CODEC_DAI,
+ },
+
+ { /* For Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = MAX98373_CODEC_DAI,
+ },
+
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HS_PB] = {
+ .name = "Kbl Audio Headset Playback",
+ .stream_name = "Headset Audio",
+ .cpu_dai_name = "System Pin2",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &kabylake_da7219_fe_ops,
+
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = KBL_DIALOG_CODEC_DAI,
+ .init = kabylake_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .init = kabylake_dmic_init,
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_max98373_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .init = kabylake_dmic_init,
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 2,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 3,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 4,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ int err;
+
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device);
+ if (err < 0)
+ return err;
+
+ }
+
+ return 0;
+}
+
+/* kabylake audio machine driver for SPT + DA7219 */
+static struct snd_soc_card kbl_audio_card_da7219_m98373 = {
+ .name = "kblda7219m98373",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+/* kabylake audio machine driver for Maxim98373 */
+static struct snd_soc_card kbl_audio_card_max98373 = {
+ .name = "kblmax98373",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_max98373_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98373_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98373_codec_conf,
+ .num_configs = ARRAY_SIZE(max98373_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card =
+ (struct snd_soc_card *)pdev->id_entry->driver_data;
+
+ kabylake_audio_card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ {
+ .name = "kbl_da7219_max98373",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_da7219_m98373,
+ },
+ {
+ .name = "kbl_max98373",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_max98373,
+ },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_da7219_max98373",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98373 & DA7219");
+MODULE_AUTHOR("Sathya Prakash M R <sathya.prakash.m.r@intel.com>");
+MODULE_AUTHOR("Sathyanarayana Nujella <sathyanarayana.nujella@intel.com>");
+MODULE_AUTHOR("Naveen Manohar <naveen.m@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_da7219_max98373");
+MODULE_ALIAS("platform:kbl_max98373");
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
new file mode 100644
index 00000000000000..1ea1331fb1b1e5
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -0,0 +1,1026 @@
+/*
+ * SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+ * Copyright(c) 2017-18 Intel Corporation.
+ *
+ * Intel Kabylake I2S Machine Driver with MAX98927 & DA7219 Codecs
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAX98927 and
+ * DA7219 codecs
+ */
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/da7219.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "../../codecs/da7219-aad.h"
+
+#define KBL_DIALOG_CODEC_DAI "da7219-hifi"
+#define MAX98927_CODEC_DAI "max98927-aif1"
+#define MAXIM_DEV0_NAME "i2c-MX98927:00"
+#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
+
+static struct snd_soc_card *kabylake_audio_card;
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_ECHO_REF_CP,
+ KBL_DPCM_AUDIO_REF_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+ KBL_DPCM_AUDIO_HS_PB,
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, KBL_DIALOG_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop PLL: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM,
+ 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret)
+ dev_err(card->dev, "failed to start PLL: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "DMic", NULL, "SoC DMIC" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "spk_out" },
+
+ /* IV feedback path */
+ { "codec0_fb_in", NULL, "ssp0 Rx"},
+ { "ssp0 Rx", NULL, "Left HiFi Capture" },
+ { "ssp0 Rx", NULL, "Right HiFi Capture" },
+
+ /* AEC capture path */
+ { "echo_ref_out", NULL, "ssp0 Rx" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+};
+
+static const struct snd_soc_dapm_route kabylake_ssp1_map[] = {
+ { "Headphone Jack", NULL, "HPL" },
+ { "Headphone Jack", NULL, "HPR" },
+
+ /* other jacks */
+ { "MIC", NULL, "Headset Mic" },
+
+ /* CODEC BE connections */
+ { "Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "hs_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "Capture" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < runtime->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int kabylake_ssp0_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ int j, ret;
+
+ for (j = 0; j < rtd->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
+ const char *name = codec_dai->component->name;
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_codec_get_dapm(codec);
+ char pin_name[20];
+
+ if (strcmp(name, MAXIM_DEV0_NAME) &&
+ strcmp(name, MAXIM_DEV1_NAME))
+ continue;
+
+ snprintf(pin_name, ARRAY_SIZE(pin_name), "%s Spk",
+ codec_dai->component->name_prefix);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ret = snd_soc_dapm_enable_pin(dapm, pin_name);
+ if (ret) {
+ dev_err(rtd->dev, "failed to enable %s: %d\n",
+ pin_name, ret);
+ return ret;
+ }
+ snd_soc_dapm_sync(dapm);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ret = snd_soc_dapm_disable_pin(dapm, pin_name);
+ if (ret) {
+ dev_err(rtd->dev, "failed to disable %s: %d\n",
+ pin_name, ret);
+ return ret;
+ }
+ snd_soc_dapm_sync(dapm);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+ .trigger = kabylake_ssp0_trigger,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
+
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ }
+
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_codec *codec = rtd->codec;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ kabylake_ssp1_map,
+ ARRAY_SIZE(kabylake_ssp1_map));
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->kabylake_headset;
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
+
+ da7219_aad_jack_det(codec, &ctx->kabylake_headset);
+
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+ if (ret)
+ dev_err(rtd->dev, "SoC DMIC - Ignore suspend failed %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI3_PB);
+}
+
+static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_da7219_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /*
+ * set BE channel constraint as user FE channels
+ */
+
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels_quad);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static const unsigned int ch_mono[] = {
+ 1,
+};
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
+static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+}
+
+
+static struct snd_soc_ops skylaye_refcap_ops = {
+ .startup = kabylake_refcap_startup,
+};
+
+static struct snd_soc_codec_conf max98927_codec_conf[] = {
+
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+ { /* For Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HS_PB] = {
+ .name = "Kbl Audio Headset Playback",
+ .stream_name = "Headset Audio",
+ .cpu_dai_name = "System Pin2",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &kabylake_da7219_fe_ops,
+
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = KBL_DIALOG_CODEC_DAI,
+ .init = kabylake_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_max98927_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 2,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 3,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 4,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ struct snd_soc_dapm_context *dapm = &card->dapm;
+ int err;
+
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device);
+ if (err < 0)
+ return err;
+
+ }
+
+ err = snd_soc_dapm_disable_pin(dapm, "Left Spk");
+ if (err) {
+ dev_err(card->dev, "failed to disable Left Spk: %d\n", err);
+ return err;
+ }
+
+ err = snd_soc_dapm_disable_pin(dapm, "Right Spk");
+ if (err) {
+ dev_err(card->dev, "failed to disable Right Spk: %d\n", err);
+ return err;
+ }
+
+ return snd_soc_dapm_sync(dapm);
+}
+
+/* kabylake audio machine driver for SPT + DA7219 */
+static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ .name = "kblda7219m98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+/* kabylake audio machine driver for Maxim98927 */
+static struct snd_soc_card kbl_audio_card_max98927 = {
+ .name = "kblmax98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_max98927_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98927_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card =
+ (struct snd_soc_card *)pdev->id_entry->driver_data;
+
+ kabylake_audio_card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ {
+ .name = "kbl_da7219_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_da7219_m98927,
+ },
+ {
+ .name = "kbl_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_max98927,
+ },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_da7219_max98927",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927 & DA7219");
+MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_da7219_max98927");
+MODULE_ALIAS("platform:kbl_max98927");
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index ef4881e7753ab2..d4f6c222dda77d 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -274,7 +274,6 @@ int sst_dma_new(struct sst_dsp *sst)
struct sst_pdata *sst_pdata = sst->pdata;
struct sst_dma *dma;
struct resource mem;
- const char *dma_dev_name;
int ret = 0;
if (sst->pdata->resindex_dma_base == -1)
@@ -285,7 +284,6 @@ int sst_dma_new(struct sst_dsp *sst)
* is attached to the ADSP IP. */
switch (sst->pdata->dma_engine) {
case SST_DMA_TYPE_DW:
- dma_dev_name = "dw_dmac";
break;
default:
dev_err(sst->dev, "error: invalid DMA engine %d\n",
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 22701205deeecd..bfed6559b6f9a0 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -1024,8 +1024,18 @@ static struct sst_codecs kbl_5663_5514_codecs = {
};
static struct sst_codecs kbl_7219_98357_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98357A"}
+};
+
+static struct sst_codecs kbl_7219_98373_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98373"}
+};
+
+static struct sst_codecs kbl_7219_98927_codecs = {
.num_codecs = 1,
- .codecs = {"MX98357A"}
+ .codecs = {"MX98927"}
};
static struct sst_acpi_mach sst_skl_devdata[] = {
@@ -1117,8 +1127,30 @@ static struct sst_acpi_mach sst_kbl_devdata[] = {
.fw_filename = "intel/dsp_fw_kbl.bin",
.machine_quirk = sst_acpi_codec_list,
.quirk_data = &kbl_7219_98357_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98373",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_7219_98373_codecs,
.pdata = &skl_dmic_data
},
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98927",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = sst_acpi_codec_list,
+ .quirk_data = &kbl_7219_98927_codecs,
+ .pdata = &skl_dmic_data
+ },
+ {
+ .id = "MX98373",
+ .drv_name = "kbl_max98373",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .pdata = &skl_dmic_data
+ },
{}
};
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 09db2aec12a301..776e809a8aab07 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -48,6 +48,8 @@ struct omap_dmic {
struct device *dev;
void __iomem *io_base;
struct clk *fclk;
+ struct pm_qos_request pm_qos_req;
+ int latency;
int fclk_freq;
int out_freq;
int clk_div;
@@ -124,6 +126,8 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
mutex_lock(&dmic->mutex);
+ pm_qos_remove_request(&dmic->pm_qos_req);
+
if (!dai->active)
dmic->active = 0;
@@ -226,6 +230,8 @@ static int omap_dmic_dai_hw_params(struct snd_pcm_substream *substream,
/* packet size is threshold * channels */
dma_data = snd_soc_dai_get_dma_data(dai, substream);
dma_data->maxburst = dmic->threshold * channels;
+ dmic->latency = (OMAP_DMIC_THRES_MAX - dmic->threshold) * USEC_PER_SEC /
+ params_rate(params);
return 0;
}
@@ -236,6 +242,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
u32 ctrl;
+ if (pm_qos_request_active(&dmic->pm_qos_req))
+ pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
+
/* Configure uplink threshold */
omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 8d0d45d330e762..8eb2d12b6a3496 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -54,6 +54,8 @@ struct omap_mcpdm {
unsigned long phys_base;
void __iomem *io_base;
int irq;
+ struct pm_qos_request pm_qos_req;
+ int latency[2];
struct mutex mutex;
@@ -273,6 +275,9 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock(&mcpdm->mutex);
@@ -285,6 +290,14 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
}
}
+ if (mcpdm->latency[stream2])
+ pm_qos_update_request(&mcpdm->pm_qos_req,
+ mcpdm->latency[stream2]);
+ else if (mcpdm->latency[stream1])
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
+
+ mcpdm->latency[stream1] = 0;
+
mutex_unlock(&mcpdm->mutex);
}
@@ -296,7 +309,7 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
int stream = substream->stream;
struct snd_dmaengine_dai_dma_data *dma_data;
u32 threshold;
- int channels;
+ int channels, latency;
int link_mask = 0;
channels = params_channels(params);
@@ -336,14 +349,25 @@ static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream,
dma_data->maxburst =
(MCPDM_DN_THRES_MAX - threshold) * channels;
+ latency = threshold;
} else {
/* If playback is not running assume a stereo stream to come */
if (!mcpdm->config[!stream].link_mask)
mcpdm->config[!stream].link_mask = (0x3 << 3);
dma_data->maxburst = threshold * channels;
+ latency = (MCPDM_DN_THRES_MAX - threshold);
}
+ /*
+ * The DMA must act to a DMA request within latency time (usec) to avoid
+ * under/overflow
+ */
+ mcpdm->latency[stream] = latency * USEC_PER_SEC / params_rate(params);
+
+ if (!mcpdm->latency[stream])
+ mcpdm->latency[stream] = 10;
+
/* Check if we need to restart McPDM with this stream */
if (mcpdm->config[stream].link_mask &&
mcpdm->config[stream].link_mask != link_mask)
@@ -358,6 +382,20 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai);
+ struct pm_qos_request *pm_qos_req = &mcpdm->pm_qos_req;
+ int tx = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ int stream1 = tx ? SNDRV_PCM_STREAM_PLAYBACK : SNDRV_PCM_STREAM_CAPTURE;
+ int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
+ int latency = mcpdm->latency[stream2];
+
+ /* Prevent omap hardware from hitting off between FIFO fills */
+ if (!latency || mcpdm->latency[stream1] < latency)
+ latency = mcpdm->latency[stream1];
+
+ if (pm_qos_request_active(pm_qos_req))
+ pm_qos_update_request(pm_qos_req, latency);
+ else if (latency)
+ pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
if (!omap_mcpdm_active(mcpdm)) {
omap_mcpdm_start(mcpdm);
@@ -419,6 +457,9 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
free_irq(mcpdm->irq, (void *)mcpdm);
pm_runtime_disable(mcpdm->dev);
+ if (pm_qos_request_active(&mcpdm->pm_qos_req))
+ pm_qos_remove_request(&mcpdm->pm_qos_req);
+
return 0;
}
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 6147e86e9b0fc2..55ca9c9364b818 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -136,3 +136,4 @@ module_platform_driver(mmp_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("ALSA SoC Brownstone");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:brownstone-audio");
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 5c8f9db50a47a0..d1661fa6ee0818 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -207,3 +207,4 @@ module_platform_driver(mioa701_wm9713_driver);
MODULE_AUTHOR("Robert Jarzmik (rjarzmik@free.fr)");
MODULE_DESCRIPTION("ALSA SoC WM9713 MIO A701");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mioa701-wm9713");
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 51e790d006f5cb..96df9b2d8fc47f 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -248,3 +248,4 @@ module_platform_driver(mmp_pcm_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("MMP Soc Audio DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-pcm-audio");
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index eca60c29791a33..ca8b23f8c525e7 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -482,3 +482,4 @@ module_platform_driver(asoc_mmp_sspa_driver);
MODULE_AUTHOR("Leo Yan <leoy@marvell.com>");
MODULE_DESCRIPTION("MMP SSPA SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mmp-sspa-dai");
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 4e74d9573f032e..bcc81e920a67cb 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -161,3 +161,4 @@ module_platform_driver(palm27x_wm9712_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("ALSA SoC Palm T|X, T5 and LifeDrive");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:palm27x-asoc");
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index da03fad1b9cda1..3cad990dad2cd3 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -833,3 +833,4 @@ module_platform_driver(asoc_ssp_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("PXA SSP/PCM SoC Interface");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-ssp-dai");
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index f3de615aacd77f..9615e6de1306b1 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -287,3 +287,4 @@ module_platform_driver(pxa2xx_ac97_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("AC97 driver for the Intel PXA2xx chip");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-ac97");
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index 9f390398d51804..410d48b93031c0 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -117,3 +117,4 @@ module_platform_driver(pxa_pcm_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa-pcm-audio");
diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
index 73fbfc4c075830..2d91fff013a3d4 100644
--- a/sound/soc/rockchip/rk3399_gru_sound.c
+++ b/sound/soc/rockchip/rk3399_gru_sound.c
@@ -389,7 +389,8 @@ static const struct snd_soc_dai_link rockchip_dais[] = {
[DAILINK_RT5514_DSP] = {
.name = "RT5514 DSP",
.stream_name = "Wake on Voice",
- .codec_dai_name = "rt5514-dsp-cpu-dai",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
},
};
@@ -550,7 +551,18 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
if (index < 0)
continue;
- np_cpu = (index == DAILINK_CDNDP) ? np_cpu1 : np_cpu0;
+ switch (index) {
+ case DAILINK_CDNDP:
+ np_cpu = np_cpu1;
+ break;
+ case DAILINK_RT5514_DSP:
+ np_cpu = np_codec;
+ break;
+ default:
+ np_cpu = np_cpu0;
+ break;
+ }
+
if (!np_cpu) {
dev_err(dev, "Missing 'rockchip,cpu' for %s\n",
rockchip_dais[index].name);
@@ -560,7 +572,8 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
dai = &card->dai_link[card->num_links++];
*dai = rockchip_dais[index];
- dai->codec_of_node = np_codec;
+ if (!dai->codec_name)
+ dai->codec_of_node = np_codec;
dai->platform_of_node = np_cpu;
dai->cpu_of_node = np_cpu;
diff --git a/sound/soc/sirf/sirf-usp.c b/sound/soc/sirf/sirf-usp.c
index 45fc06c0e0e551..6b504f407079bb 100644
--- a/sound/soc/sirf/sirf-usp.c
+++ b/sound/soc/sirf/sirf-usp.c
@@ -367,10 +367,9 @@ static int sirf_usp_pcm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, usp);
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap(&pdev->dev, mem_res->start,
- resource_size(mem_res));
- if (base == NULL)
- return -ENOMEM;
+ base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
usp->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&sirf_usp_regmap_config);
if (IS_ERR(usp->regmap))
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 2589d3a723f3e5..aa8e216d49d2c2 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -2062,6 +2062,7 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
card->instantiated = 1;
+ dapm_mark_endpoints_dirty(card);
snd_soc_dapm_sync(&card->dapm);
mutex_unlock(&card->mutex);
mutex_unlock(&client_mutex);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d5d3c4d9c590a..5be4975454bdde 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -425,6 +425,8 @@ err_data:
static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
{
struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
+
+ list_del(&data->paths);
kfree(data->wlist);
kfree(data);
}
@@ -1984,19 +1986,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
out = is_connected_output_ep(w, NULL, NULL);
}
- ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
+ ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
w->name, w->power ? "On" : "Off",
w->force ? " (forced)" : "", in, out);
if (w->reg >= 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" - R%d(0x%x) mask 0x%x",
w->reg, w->reg, w->mask << w->shift);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (w->sname)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
w->sname,
w->active ? "active" : "inactive");
@@ -2009,7 +2011,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!p->connect)
continue;
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" %s \"%s\" \"%s\"\n",
(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
p->name ? p->name : "static",
@@ -3921,6 +3923,13 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
continue;
}
+ /* let users know there is no DAI to link */
+ if (!dai_w->priv) {
+ dev_dbg(card->dev, "dai widget %s has no DAI\n",
+ dai_w->name);
+ continue;
+ }
+
dai = dai_w->priv;
/* ...find all widgets with the same stream and link them */
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 80088c98ce27dd..b111ecda6439d1 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1621,6 +1621,14 @@ static u64 dpcm_runtime_base_format(struct snd_pcm_substream *substream)
int i;
for (i = 0; i < be->num_codecs; i++) {
+ /*
+ * Skip CODECs which don't support the current stream
+ * type. See soc_pcm_init_runtime_hw() for more details
+ */
+ if (!snd_soc_dai_stream_valid(be->codec_dais[i],
+ stream))
+ continue;
+
codec_dai_drv = be->codec_dais[i]->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
@@ -1793,8 +1801,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
continue;
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN))
- continue;
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) {
+ soc_pcm_hw_free(be_substream);
+ be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
+ }
dev_dbg(be->dev, "ASoC: close BE %s\n",
be->dai_link->name);
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 26c80b72eeca04..4efe30a66ecc70 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2425,6 +2425,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id)
{
struct soc_tplg tplg;
+ int ret;
/* setup parsing context */
memset(&tplg, 0, sizeof(tplg));
@@ -2438,7 +2439,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp,
tplg.bytes_ext_ops = ops->bytes_ext_ops;
tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count;
- return soc_tplg_load(&tplg);
+ ret = soc_tplg_load(&tplg);
+ /* free the created components if fail to load topology */
+ if (ret)
+ snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load);
diff --git a/sound/soc/spear/spdif_in.c b/sound/soc/spear/spdif_in.c
index 977a078eb92f23..7f32527fc3c8d8 100644
--- a/sound/soc/spear/spdif_in.c
+++ b/sound/soc/spear/spdif_in.c
@@ -223,8 +223,10 @@ static int spdif_in_probe(struct platform_device *pdev)
host->io_base = io_base;
host->irq = platform_get_irq(pdev, 0);
- if (host->irq < 0)
- return -EINVAL;
+ if (host->irq < 0) {
+ dev_warn(&pdev->dev, "failed to get IRQ: %d\n", host->irq);
+ return host->irq;
+ }
host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk))
diff --git a/sound/sparc/cs4231.c b/sound/sparc/cs4231.c
index 30bdc971883b4c..017e241b0ec9f5 100644
--- a/sound/sparc/cs4231.c
+++ b/sound/sparc/cs4231.c
@@ -1146,10 +1146,8 @@ static int snd_cs4231_playback_open(struct snd_pcm_substream *substream)
runtime->hw = snd_cs4231_playback;
err = snd_cs4231_open(chip, CS4231_MODE_PLAY);
- if (err < 0) {
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ if (err < 0)
return err;
- }
chip->playback_substream = substream;
chip->p_periods_sent = 0;
snd_pcm_set_sync(substream);
@@ -1167,10 +1165,8 @@ static int snd_cs4231_capture_open(struct snd_pcm_substream *substream)
runtime->hw = snd_cs4231_capture;
err = snd_cs4231_open(chip, CS4231_MODE_RECORD);
- if (err < 0) {
- snd_free_pages(runtime->dma_area, runtime->dma_bytes);
+ if (err < 0)
return err;
- }
chip->capture_substream = substream;
chip->c_periods_sent = 0;
snd_pcm_set_sync(substream);
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946718a9ed..d9fcae071b477d 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
#include <sound/core.h>
#include <sound/hwdep.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "emux_voice.h"
-
#define TMP_CLIENT_ID 0x1001
/*
@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
return -EFAULT;
if (info.mode < 0 || info.mode >= EMUX_MD_END)
return -EINVAL;
+ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
if (info.port < 0) {
for (i = 0; i < emu->num_ports; i++)
emu->portptrs[i]->ctrls[info.mode] = info.value;
} else {
- if (info.port < emu->num_ports)
+ if (info.port < emu->num_ports) {
+ info.port = array_index_nospec(info.port, emu->num_ports);
emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+ }
}
return 0;
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 8a49e08a7e9f79..94e16d6931f5d7 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -592,9 +592,12 @@ static int usb_audio_probe(struct usb_interface *intf,
__error:
if (chip) {
+ /* chip->active is inside the chip->card object,
+ * decrement before memory is possibly returned.
+ */
+ atomic_dec(&chip->active);
if (!chip->num_interfaces)
snd_card_free(chip->card);
- atomic_dec(&chip->active);
}
mutex_unlock(&register_mutex);
return err;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 08e63e01a24892..788e80521d984d 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -451,7 +451,8 @@ int snd_usb_get_cur_mix_value(struct usb_mixer_elem_info *cval,
cval->control, channel, err);
return err;
}
- cval->cached |= 1 << channel;
+ if (!cval->cache_disabled)
+ cval->cached |= 1 << channel;
cval->cache_val[index] = *value;
return 0;
}
@@ -542,7 +543,8 @@ int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
value);
if (err < 0)
return err;
- cval->cached |= 1 << channel;
+ if (!cval->cache_disabled)
+ cval->cached |= 1 << channel;
cval->cache_val[index] = value;
return 0;
}
@@ -715,6 +717,66 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
}
/*
+ * Get logical cluster information for UAC3 devices.
+ */
+static int get_cluster_channels_v3(struct mixer_build *state, unsigned int cluster_id)
+{
+ struct uac3_cluster_header_descriptor c_header;
+ int err;
+
+ err = snd_usb_ctl_msg(state->chip->dev,
+ usb_rcvctrlpipe(state->chip->dev, 0),
+ UAC3_CS_REQ_HIGH_CAPABILITY_DESCRIPTOR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ cluster_id,
+ snd_usb_ctrl_intf(state->chip),
+ &c_header, sizeof(c_header));
+ if (err < 0)
+ goto error;
+ if (err != sizeof(c_header)) {
+ err = -EIO;
+ goto error;
+ }
+
+ return c_header.bNrChannels;
+
+error:
+ usb_audio_err(state->chip, "cannot request logical cluster ID: %d (err: %d)\n", cluster_id, err);
+ return err;
+}
+
+/*
+ * Get number of channels for a Mixer Unit.
+ */
+static int uac_mixer_unit_get_channels(struct mixer_build *state,
+ struct uac_mixer_unit_descriptor *desc)
+{
+ int mu_channels;
+
+ if (desc->bLength < 11)
+ return -EINVAL;
+ if (!desc->bNrInPins)
+ return -EINVAL;
+
+ switch (state->mixer->protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ mu_channels = uac_mixer_unit_bNrChannels(desc);
+ break;
+ case UAC_VERSION_3:
+ mu_channels = get_cluster_channels_v3(state,
+ uac3_mixer_unit_wClusterDescrID(desc));
+ break;
+ }
+
+ if (!mu_channels)
+ return -EINVAL;
+
+ return mu_channels;
+}
+
+/*
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
@@ -861,6 +923,18 @@ static int check_input_term(struct mixer_build *state, int id,
term->name = le16_to_cpu(d->wClockSourceStr);
return 0;
}
+ case UAC3_MIXER_UNIT: {
+ struct uac_mixer_unit_descriptor *d = p1;
+
+ err = uac_mixer_unit_get_channels(state, d);
+ if (err < 0)
+ return err;
+
+ term->channels = err;
+ term->type = d->bDescriptorSubtype << 16; /* virtual type */
+
+ return 0;
+ }
default:
return -ENODEV;
}
@@ -1019,6 +1093,15 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
cval->res = 384;
}
break;
+
+ case USB_ID(0x0b0e, 0x0412): /* Jabra Speakerphone 410 */
+ case USB_ID(0x0b0e, 0x0420): /* Jabra Speakerphone 510 */
+ if (strstr(kctl->id.name, "PCM Playback Volume") != NULL) {
+ cval->min = -8765; /* -36 dB + 1 step (0xdc00 - 451) */
+ cval->max = 1597; /* +8 dB - 1 step (0x0800 - 451) */
+ cval->res = 451; /* 25 steps between -36 and +8 dB */
+ }
+ break;
}
}
@@ -1218,7 +1301,7 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
return filter_error(cval, err);
val = ucontrol->value.integer.value[cnt];
val = get_abs_value(cval, val);
- if (oval != val) {
+ if (cval->cache_disabled || (oval != val)) {
snd_usb_set_cur_mix_value(cval, c + 1, cnt, val);
changed = 1;
}
@@ -1231,7 +1314,7 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
return filter_error(cval, err);
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
- if (val != oval) {
+ if (cval->cache_disabled || (val != oval)) {
snd_usb_set_cur_mix_value(cval, 0, 0, val);
changed = 1;
}
@@ -1254,6 +1337,51 @@ static int mixer_ctl_master_bool_get(struct snd_kcontrol *kcontrol,
return 0;
}
+/* get the connectors status and report it as boolean type */
+static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *cval = kcontrol->private_data;
+ struct snd_usb_audio *chip = cval->head.mixer->chip;
+ int idx = 0, validx, ret, val;
+
+ validx = cval->control << 8 | 0;
+
+ ret = snd_usb_lock_shutdown(chip) ? -EIO : 0;
+ if (ret)
+ goto error;
+
+ idx = snd_usb_ctrl_intf(chip) | (cval->head.id << 8);
+ if (cval->head.mixer->protocol == UAC_VERSION_2) {
+ struct uac2_connectors_ctl_blk uac2_conn;
+
+ ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ validx, idx, &uac2_conn, sizeof(uac2_conn));
+ val = !!uac2_conn.bNrChannels;
+ } else { /* UAC_VERSION_3 */
+ struct uac3_insertion_ctl_blk uac3_conn;
+
+ ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), UAC2_CS_CUR,
+ USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
+ validx, idx, &uac3_conn, sizeof(uac3_conn));
+ val = !!uac3_conn.bmConInserted;
+ }
+
+ snd_usb_unlock_shutdown(chip);
+
+ if (ret < 0) {
+error:
+ usb_audio_err(chip,
+ "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
+ UAC_GET_CUR, validx, idx, cval->val_type);
+ return ret;
+ }
+
+ ucontrol->value.integer.value[0] = val;
+ return 0;
+}
+
static struct snd_kcontrol_new usb_feature_unit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
@@ -1284,6 +1412,15 @@ static struct snd_kcontrol_new usb_bool_master_control_ctl_ro = {
.put = NULL,
};
+static const struct snd_kcontrol_new usb_connector_ctl_ro = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .name = "", /* will be filled later manually */
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = snd_ctl_boolean_mono_info,
+ .get = mixer_ctl_connector_get,
+ .put = NULL,
+};
+
/*
* This symbol is exported in order to allow the mixer quirks to
* hook up to the standard feature unit control mechanism
@@ -1530,17 +1667,25 @@ static void build_connector_control(struct mixer_build *state,
return;
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, term->id);
/*
- * The first byte from reading the UAC2_TE_CONNECTOR control returns the
- * number of channels connected. This boolean ctl will simply report
- * if any channels are connected or not.
- * (Audio20_final.pdf Table 5-10: Connector Control CUR Parameter Block)
+ * UAC2: The first byte from reading the UAC2_TE_CONNECTOR control returns the
+ * number of channels connected.
+ *
+ * UAC3: The first byte specifies size of bitmap for the inserted controls. The
+ * following byte(s) specifies which connectors are inserted.
+ *
+ * This boolean ctl will simply report if any channels are connected
+ * or not.
*/
- cval->control = UAC2_TE_CONNECTOR;
+ if (state->mixer->protocol == UAC_VERSION_2)
+ cval->control = UAC2_TE_CONNECTOR;
+ else /* UAC_VERSION_3 */
+ cval->control = UAC3_TE_INSERTION;
+
cval->val_type = USB_MIXER_BOOLEAN;
cval->channels = 1; /* report true if any channel is connected */
cval->min = 0;
cval->max = 1;
- kctl = snd_ctl_new1(&usb_bool_master_control_ctl_ro, cval);
+ kctl = snd_ctl_new1(&usb_connector_ctl_ro, cval);
if (!kctl) {
usb_audio_err(state->chip, "cannot malloc kcontrol\n");
kfree(cval);
@@ -1802,11 +1947,10 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
*/
static void build_mixer_unit_ctl(struct mixer_build *state,
struct uac_mixer_unit_descriptor *desc,
- int in_pin, int in_ch, int unitid,
- struct usb_audio_term *iterm)
+ int in_pin, int in_ch, int num_outs,
+ int unitid, struct usb_audio_term *iterm)
{
struct usb_mixer_elem_info *cval;
- unsigned int num_outs = uac_mixer_unit_bNrChannels(desc);
unsigned int i, len;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
@@ -1859,16 +2003,28 @@ static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
void *raw_desc)
{
struct usb_audio_term iterm;
- struct uac2_input_terminal_descriptor *d = raw_desc;
+ unsigned int control, bmctls, term_id;
- check_input_term(state, d->bTerminalID, &iterm);
if (state->mixer->protocol == UAC_VERSION_2) {
- /* Check for jack detection. */
- if (uac_v2v3_control_is_readable(le16_to_cpu(d->bmControls),
- UAC2_TE_CONNECTOR)) {
- build_connector_control(state, &iterm, true);
- }
+ struct uac2_input_terminal_descriptor *d_v2 = raw_desc;
+ control = UAC2_TE_CONNECTOR;
+ term_id = d_v2->bTerminalID;
+ bmctls = le16_to_cpu(d_v2->bmControls);
+ } else if (state->mixer->protocol == UAC_VERSION_3) {
+ struct uac3_input_terminal_descriptor *d_v3 = raw_desc;
+ control = UAC3_TE_INSERTION;
+ term_id = d_v3->bTerminalID;
+ bmctls = le32_to_cpu(d_v3->bmControls);
+ } else {
+ return 0; /* UAC1. No Insertion control */
}
+
+ check_input_term(state, term_id, &iterm);
+
+ /* Check for jack detection. */
+ if (uac_v2v3_control_is_readable(bmctls, control))
+ build_connector_control(state, &iterm, true);
+
return 0;
}
@@ -1883,14 +2039,17 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
int input_pins, num_ins, num_outs;
int pin, ich, err;
- if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
- !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
+ err = uac_mixer_unit_get_channels(state, desc);
+ if (err < 0) {
usb_audio_err(state->chip,
"invalid MIXER UNIT descriptor %d\n",
unitid);
- return -EINVAL;
+ return err;
}
+ num_outs = err;
+ input_pins = desc->bNrInPins;
+
num_ins = 0;
ich = 0;
for (pin = 0; pin < input_pins; pin++) {
@@ -1917,7 +2076,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
}
}
if (ich_has_controls)
- build_mixer_unit_ctl(state, desc, pin, ich,
+ build_mixer_unit_ctl(state, desc, pin, ich, num_outs,
unitid, &iterm);
}
}
@@ -2072,7 +2231,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
- int num_ins = desc->bNrInPins;
+ int num_ins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
@@ -2087,7 +2246,13 @@ static int build_audio_procunit(struct mixer_build *state, int unitid,
0, NULL, default_value_info
};
- if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
+ if (desc->bLength < 13) {
+ usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
+ return -EINVAL;
+ }
+
+ num_ins = desc->bNrInPins;
+ if (desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
usb_audio_err(state->chip, "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
@@ -2454,7 +2619,7 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
} else { /* UAC_VERSION_3 */
switch (p1[2]) {
case UAC_INPUT_TERMINAL:
- return 0; /* NOP */
+ return parse_audio_input_terminal(state, unitid, p1);
case UAC3_MIXER_UNIT:
return parse_audio_mixer_unit(state, unitid, p1);
case UAC3_CLOCK_SOURCE:
@@ -2592,6 +2757,12 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
err = parse_audio_unit(&state, desc->bCSourceID);
if (err < 0 && err != -EINVAL)
return err;
+
+ if (uac_v2v3_control_is_readable(le32_to_cpu(desc->bmControls),
+ UAC3_TE_INSERTION)) {
+ build_connector_control(&state, &state.oterm,
+ false);
+ }
}
}
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 545d99b09706b3..9369160415b84d 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -64,6 +64,7 @@ struct usb_mixer_elem_info {
int min, max, res;
int dBmin, dBmax;
int cached;
+ int cache_disabled;
int cache_val[MAX_CHANNELS];
u8 initialized;
u8 min_mute;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 5d2fc5f58bfe58..b756000b6780ed 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1720,6 +1720,24 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+static int snd_jabra_controls_tweak(struct usb_mixer_interface *mixer)
+{
+ struct usb_mixer_elem_list *list;
+ struct usb_mixer_elem_info *cval;
+ int unitids[] = { 2 /* Playback */, 5 /* Capture */ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(unitids); i++)
+ for (list = mixer->id_elems[unitids[i]]; list;
+ list = list->next_id_elem) {
+ cval = (struct usb_mixer_elem_info *)list;
+ cval->cache_disabled = 1;
+ cval->cached = 0; /* invalidate current cached value */
+ }
+
+ return 0;
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1773,6 +1791,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
err = snd_mbox1_create_sync_switch(mixer);
break;
+ case USB_ID(0x0b0e, 0x0412): /* Jabra Speakerphone 410 */
+ case USB_ID(0x0b0e, 0x0420): /* Jabra Speakerphone 510 */
+ err = snd_jabra_controls_tweak(mixer);
+ break;
+
case USB_ID(0x17cc, 0x1011): /* Traktor Audio 6 */
err = snd_nativeinstruments_create_mixer(mixer,
snd_nativeinstruments_ta6_mixers,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 8e8db4ddf36542..1ea1384bc23697 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -313,6 +313,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
return 0;
}
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
+ * applies. Returns 1 if a quirk was found.
+ */
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
struct usb_device *dev,
struct usb_interface_descriptor *altsd,
@@ -381,7 +384,7 @@ add_sync_ep:
subs->data_endpoint->sync_master = subs->sync_endpoint;
- return 0;
+ return 1;
}
static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -420,6 +423,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
if (err < 0)
return err;
+ /* endpoint set by quirk */
+ if (err > 0)
+ return 0;
+
if (altsd->bNumEndpoints < 2)
return 0;
@@ -1300,7 +1307,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs,
if (bytes % (runtime->sample_bits >> 3) != 0) {
int oldbytes = bytes;
bytes = frames * stride;
- dev_warn(&subs->dev->dev,
+ dev_warn_ratelimited(&subs->dev->dev,
"Corrected urb data len. %d->%d\n",
oldbytes, bytes);
}
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 69bf5cf1e91ef1..d32727c74a1687 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2875,7 +2875,8 @@ YAMAHA_DEVICE(0x7010, "UB99"),
*/
#define AU0828_DEVICE(vid, pid, vname, pname) { \
- USB_DEVICE_VENDOR_SPEC(vid, pid), \
+ .idVendor = vid, \
+ .idProduct = pid, \
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
USB_DEVICE_ID_MATCH_INT_CLASS | \
USB_DEVICE_ID_MATCH_INT_SUBCLASS, \
@@ -3320,6 +3321,9 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
}
}
},
+ {
+ .ifnum = -1
+ },
}
}
},
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
new file mode 100644
index 00000000000000..a5fa3195a230f5
--- /dev/null
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -0,0 +1,336 @@
+#ifndef _ASM_X86_CPUFEATURES_H
+#define _ASM_X86_CPUFEATURES_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+/*
+ * Defines x86 CPU feature bits
+ */
+#define NCAPINTS 19 /* N 32-bit words worth of info */
+#define NBUGINTS 1 /* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name. If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
+ /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
+/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+/* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+
+#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+#define X86_FEATURE_INVPCID_SINGLE ( 7*32+ 4) /* Effectively INVPCID && CR4.PCIDE=1 */
+
+#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+
+#define X86_FEATURE_RETPOLINE ( 7*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
+#define X86_FEATURE_RETPOLINE_AMD ( 7*32+13) /* "" AMD Retpoline mitigation for Spectre variant 2 */
+
+#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
+#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+
+#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
+#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+
+/* Because the ALTERNATIVE scheme is for members of the X86_FEATURE club... */
+#define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */
+
+#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled*/
+#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation */
+
+#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
+#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
+#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
+#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
+#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
+#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
+#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
+#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
+#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
+#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
+#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
+#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
+#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
+#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
+#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
+#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
+#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x) (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
+#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
+#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
+#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
+#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
+
+#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
new file mode 100644
index 00000000000000..1f8cca459c6c8c
--- /dev/null
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -0,0 +1,65 @@
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#define _ASM_X86_DISABLED_FEATURES_H
+
+/* These features, although they might be available in a CPU
+ * will not be used because the compile options to support
+ * them are not present.
+ *
+ * This code allows them to be checked and disabled at
+ * compile time without an explicit #ifdef. Use
+ * cpu_feature_enabled().
+ */
+
+#ifdef CONFIG_X86_INTEL_MPX
+# define DISABLE_MPX 0
+#else
+# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
+#endif
+
+#ifdef CONFIG_X86_64
+# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
+# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
+# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
+# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
+# define DISABLE_PCID 0
+#else
+# define DISABLE_VME 0
+# define DISABLE_K6_MTRR 0
+# define DISABLE_CYRIX_ARR 0
+# define DISABLE_CENTAUR_MCR 0
+# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
+#endif /* CONFIG_X86_64 */
+
+#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+# define DISABLE_PKU 0
+# define DISABLE_OSPKE 0
+#else
+# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
+# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
+#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
+
+/*
+ * Make sure to add features to the correct mask
+ */
+#define DISABLED_MASK0 (DISABLE_VME)
+#define DISABLED_MASK1 0
+#define DISABLED_MASK2 0
+#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
+#define DISABLED_MASK4 (DISABLE_PCID)
+#define DISABLED_MASK5 0
+#define DISABLED_MASK6 0
+#define DISABLED_MASK7 0
+#define DISABLED_MASK8 0
+#define DISABLED_MASK9 (DISABLE_MPX)
+#define DISABLED_MASK10 0
+#define DISABLED_MASK11 0
+#define DISABLED_MASK12 0
+#define DISABLED_MASK13 0
+#define DISABLED_MASK14 0
+#define DISABLED_MASK15 0
+#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
+#define DISABLED_MASK17 0
+#define DISABLED_MASK18 0
+#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+
+#endif /* _ASM_X86_DISABLED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h
new file mode 100644
index 00000000000000..6847d85400a8b7
--- /dev/null
+++ b/tools/arch/x86/include/asm/required-features.h
@@ -0,0 +1,106 @@
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#define _ASM_X86_REQUIRED_FEATURES_H
+
+/* Define minimum CPUID feature set for kernel These bits are checked
+ really early to actually display a visible error message before the
+ kernel dies. Make sure to assign features to the proper mask!
+
+ Some requirements that are not in CPUID yet are also in the
+ CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
+
+ The real information is in arch/x86/Kconfig.cpu, this just converts
+ the CONFIGs into a bitmask */
+
+#ifndef CONFIG_MATH_EMULATION
+# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
+#else
+# define NEED_FPU 0
+#endif
+
+#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
+# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
+#else
+# define NEED_PAE 0
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
+#else
+# define NEED_CX8 0
+#endif
+
+#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
+# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
+#else
+# define NEED_CMOV 0
+#endif
+
+#ifdef CONFIG_X86_USE_3DNOW
+# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
+#else
+# define NEED_3DNOW 0
+#endif
+
+#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
+# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
+#else
+# define NEED_NOPL 0
+#endif
+
+#ifdef CONFIG_MATOM
+# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
+#else
+# define NEED_MOVBE 0
+#endif
+
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_PARAVIRT
+/* Paravirtualized systems may not have PSE or PGE available */
+#define NEED_PSE 0
+#define NEED_PGE 0
+#else
+#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
+#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
+#endif
+#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
+#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
+#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
+#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
+#define NEED_LM (1<<(X86_FEATURE_LM & 31))
+#else
+#define NEED_PSE 0
+#define NEED_MSR 0
+#define NEED_PGE 0
+#define NEED_FXSR 0
+#define NEED_XMM 0
+#define NEED_XMM2 0
+#define NEED_LM 0
+#endif
+
+#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
+ NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
+ NEED_XMM|NEED_XMM2)
+#define SSE_MASK (NEED_XMM|NEED_XMM2)
+
+#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
+
+#define REQUIRED_MASK2 0
+#define REQUIRED_MASK3 (NEED_NOPL)
+#define REQUIRED_MASK4 (NEED_MOVBE)
+#define REQUIRED_MASK5 0
+#define REQUIRED_MASK6 0
+#define REQUIRED_MASK7 0
+#define REQUIRED_MASK8 0
+#define REQUIRED_MASK9 0
+#define REQUIRED_MASK10 0
+#define REQUIRED_MASK11 0
+#define REQUIRED_MASK12 0
+#define REQUIRED_MASK13 0
+#define REQUIRED_MASK14 0
+#define REQUIRED_MASK15 0
+#define REQUIRED_MASK16 0
+#define REQUIRED_MASK17 0
+#define REQUIRED_MASK18 0
+#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19)
+
+#endif /* _ASM_X86_REQUIRED_FEATURES_H */
diff --git a/tools/arch/x86/include/asm/unistd_32.h b/tools/arch/x86/include/asm/unistd_32.h
new file mode 100644
index 00000000000000..cf33ab09273df6
--- /dev/null
+++ b/tools/arch/x86/include/asm/unistd_32.h
@@ -0,0 +1,9 @@
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 336
+#endif
+#ifndef __NR_futex
+# define __NR_futex 240
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 224
+#endif
diff --git a/tools/arch/x86/include/asm/unistd_64.h b/tools/arch/x86/include/asm/unistd_64.h
new file mode 100644
index 00000000000000..2c9835695b56e0
--- /dev/null
+++ b/tools/arch/x86/include/asm/unistd_64.h
@@ -0,0 +1,9 @@
+#ifndef __NR_perf_event_open
+# define __NR_perf_event_open 298
+#endif
+#ifndef __NR_futex
+# define __NR_futex 202
+#endif
+#ifndef __NR_gettid
+# define __NR_gettid 186
+#endif
diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
new file mode 100644
index 00000000000000..a0de849435ad69
--- /dev/null
+++ b/tools/arch/x86/lib/memcpy_64.S
@@ -0,0 +1,179 @@
+/* Copyright 2002 Andi Kleen */
+
+#include <linux/linkage.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+
+/*
+ * We build a jump to memcpy_orig by default which gets NOPped out on
+ * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
+ * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
+ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
+ */
+
+.weak memcpy
+
+/*
+ * memcpy - Copy a memory block.
+ *
+ * Input:
+ * rdi destination
+ * rsi source
+ * rdx count
+ *
+ * Output:
+ * rax original destination
+ */
+ENTRY(__memcpy)
+ENTRY(memcpy)
+ ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
+ "jmp memcpy_erms", X86_FEATURE_ERMS
+
+ movq %rdi, %rax
+ movq %rdx, %rcx
+ shrq $3, %rcx
+ andl $7, %edx
+ rep movsq
+ movl %edx, %ecx
+ rep movsb
+ ret
+ENDPROC(memcpy)
+ENDPROC(__memcpy)
+
+/*
+ * memcpy_erms() - enhanced fast string memcpy. This is faster and
+ * simpler than memcpy. Use memcpy_erms when possible.
+ */
+ENTRY(memcpy_erms)
+ movq %rdi, %rax
+ movq %rdx, %rcx
+ rep movsb
+ ret
+ENDPROC(memcpy_erms)
+
+ENTRY(memcpy_orig)
+ movq %rdi, %rax
+
+ cmpq $0x20, %rdx
+ jb .Lhandle_tail
+
+ /*
+ * We check whether memory false dependence could occur,
+ * then jump to corresponding copy mode.
+ */
+ cmp %dil, %sil
+ jl .Lcopy_backward
+ subq $0x20, %rdx
+.Lcopy_forward_loop:
+ subq $0x20, %rdx
+
+ /*
+ * Move in blocks of 4x8 bytes:
+ */
+ movq 0*8(%rsi), %r8
+ movq 1*8(%rsi), %r9
+ movq 2*8(%rsi), %r10
+ movq 3*8(%rsi), %r11
+ leaq 4*8(%rsi), %rsi
+
+ movq %r8, 0*8(%rdi)
+ movq %r9, 1*8(%rdi)
+ movq %r10, 2*8(%rdi)
+ movq %r11, 3*8(%rdi)
+ leaq 4*8(%rdi), %rdi
+ jae .Lcopy_forward_loop
+ addl $0x20, %edx
+ jmp .Lhandle_tail
+
+.Lcopy_backward:
+ /*
+ * Calculate copy position to tail.
+ */
+ addq %rdx, %rsi
+ addq %rdx, %rdi
+ subq $0x20, %rdx
+ /*
+ * At most 3 ALU operations in one cycle,
+ * so append NOPS in the same 16 bytes trunk.
+ */
+ .p2align 4
+.Lcopy_backward_loop:
+ subq $0x20, %rdx
+ movq -1*8(%rsi), %r8
+ movq -2*8(%rsi), %r9
+ movq -3*8(%rsi), %r10
+ movq -4*8(%rsi), %r11
+ leaq -4*8(%rsi), %rsi
+ movq %r8, -1*8(%rdi)
+ movq %r9, -2*8(%rdi)
+ movq %r10, -3*8(%rdi)
+ movq %r11, -4*8(%rdi)
+ leaq -4*8(%rdi), %rdi
+ jae .Lcopy_backward_loop
+
+ /*
+ * Calculate copy position to head.
+ */
+ addl $0x20, %edx
+ subq %rdx, %rsi
+ subq %rdx, %rdi
+.Lhandle_tail:
+ cmpl $16, %edx
+ jb .Lless_16bytes
+
+ /*
+ * Move data from 16 bytes to 31 bytes.
+ */
+ movq 0*8(%rsi), %r8
+ movq 1*8(%rsi), %r9
+ movq -2*8(%rsi, %rdx), %r10
+ movq -1*8(%rsi, %rdx), %r11
+ movq %r8, 0*8(%rdi)
+ movq %r9, 1*8(%rdi)
+ movq %r10, -2*8(%rdi, %rdx)
+ movq %r11, -1*8(%rdi, %rdx)
+ retq
+ .p2align 4
+.Lless_16bytes:
+ cmpl $8, %edx
+ jb .Lless_8bytes
+ /*
+ * Move data from 8 bytes to 15 bytes.
+ */
+ movq 0*8(%rsi), %r8
+ movq -1*8(%rsi, %rdx), %r9
+ movq %r8, 0*8(%rdi)
+ movq %r9, -1*8(%rdi, %rdx)
+ retq
+ .p2align 4
+.Lless_8bytes:
+ cmpl $4, %edx
+ jb .Lless_3bytes
+
+ /*
+ * Move data from 4 bytes to 7 bytes.
+ */
+ movl (%rsi), %ecx
+ movl -4(%rsi, %rdx), %r8d
+ movl %ecx, (%rdi)
+ movl %r8d, -4(%rdi, %rdx)
+ retq
+ .p2align 4
+.Lless_3bytes:
+ subl $1, %edx
+ jb .Lend
+ /*
+ * Move data from 1 bytes to 3 bytes.
+ */
+ movzbl (%rsi), %ecx
+ jz .Lstore_1byte
+ movzbq 1(%rsi), %r8
+ movzbq (%rsi, %rdx), %r9
+ movb %r8b, 1(%rdi)
+ movb %r9b, (%rdi, %rdx)
+.Lstore_1byte:
+ movb %cl, (%rdi)
+
+.Lend:
+ retq
+ENDPROC(memcpy_orig)
diff --git a/tools/arch/x86/lib/memset_64.S b/tools/arch/x86/lib/memset_64.S
new file mode 100644
index 00000000000000..c9c81227ea37d1
--- /dev/null
+++ b/tools/arch/x86/lib/memset_64.S
@@ -0,0 +1,138 @@
+/* Copyright 2002 Andi Kleen, SuSE Labs */
+
+#include <linux/linkage.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative-asm.h>
+
+.weak memset
+
+/*
+ * ISO C memset - set a memory block to a byte value. This function uses fast
+ * string to get better performance than the original function. The code is
+ * simpler and shorter than the orignal function as well.
+ *
+ * rdi destination
+ * rsi value (char)
+ * rdx count (bytes)
+ *
+ * rax original destination
+ */
+ENTRY(memset)
+ENTRY(__memset)
+ /*
+ * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
+ * to use it when possible. If not available, use fast string instructions.
+ *
+ * Otherwise, use original memset function.
+ */
+ ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
+ "jmp memset_erms", X86_FEATURE_ERMS
+
+ movq %rdi,%r9
+ movq %rdx,%rcx
+ andl $7,%edx
+ shrq $3,%rcx
+ /* expand byte value */
+ movzbl %sil,%esi
+ movabs $0x0101010101010101,%rax
+ imulq %rsi,%rax
+ rep stosq
+ movl %edx,%ecx
+ rep stosb
+ movq %r9,%rax
+ ret
+ENDPROC(memset)
+ENDPROC(__memset)
+
+/*
+ * ISO C memset - set a memory block to a byte value. This function uses
+ * enhanced rep stosb to override the fast string function.
+ * The code is simpler and shorter than the fast string function as well.
+ *
+ * rdi destination
+ * rsi value (char)
+ * rdx count (bytes)
+ *
+ * rax original destination
+ */
+ENTRY(memset_erms)
+ movq %rdi,%r9
+ movb %sil,%al
+ movq %rdx,%rcx
+ rep stosb
+ movq %r9,%rax
+ ret
+ENDPROC(memset_erms)
+
+ENTRY(memset_orig)
+ movq %rdi,%r10
+
+ /* expand byte value */
+ movzbl %sil,%ecx
+ movabs $0x0101010101010101,%rax
+ imulq %rcx,%rax
+
+ /* align dst */
+ movl %edi,%r9d
+ andl $7,%r9d
+ jnz .Lbad_alignment
+.Lafter_bad_alignment:
+
+ movq %rdx,%rcx
+ shrq $6,%rcx
+ jz .Lhandle_tail
+
+ .p2align 4
+.Lloop_64:
+ decq %rcx
+ movq %rax,(%rdi)
+ movq %rax,8(%rdi)
+ movq %rax,16(%rdi)
+ movq %rax,24(%rdi)
+ movq %rax,32(%rdi)
+ movq %rax,40(%rdi)
+ movq %rax,48(%rdi)
+ movq %rax,56(%rdi)
+ leaq 64(%rdi),%rdi
+ jnz .Lloop_64
+
+ /* Handle tail in loops. The loops should be faster than hard
+ to predict jump tables. */
+ .p2align 4
+.Lhandle_tail:
+ movl %edx,%ecx
+ andl $63&(~7),%ecx
+ jz .Lhandle_7
+ shrl $3,%ecx
+ .p2align 4
+.Lloop_8:
+ decl %ecx
+ movq %rax,(%rdi)
+ leaq 8(%rdi),%rdi
+ jnz .Lloop_8
+
+.Lhandle_7:
+ andl $7,%edx
+ jz .Lende
+ .p2align 4
+.Lloop_1:
+ decl %edx
+ movb %al,(%rdi)
+ leaq 1(%rdi),%rdi
+ jnz .Lloop_1
+
+.Lende:
+ movq %r10,%rax
+ ret
+
+.Lbad_alignment:
+ cmpq $7,%rdx
+ jbe .Lhandle_7
+ movq %rax,(%rdi) /* unaligned store */
+ movq $8,%r8
+ subq %r9,%r8
+ addq %r8,%rdi
+ subq %r8,%rdx
+ jmp .Lafter_bad_alignment
+.Lfinal:
+ENDPROC(memset_orig)
diff --git a/tools/build/Build.include b/tools/build/Build.include
index 4d000bc959b495..0340d8a51daba8 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -12,6 +12,7 @@
# Convenient variables
comma := ,
squote := '
+pound := \#
###
# Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o
@@ -43,11 +44,11 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
###
# Replace >$< with >$$< to preserve $ when reloading the .cmd file
# (needed for make)
-# Replace >#< with >\#< to avoid starting a comment in the .cmd file
+# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file
# (needed for make)
# Replace >'< with >'\''< to be able to enclose the whole string in '...'
# (needed for the shell)
-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1)))))
+make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1)))))
###
# Find any prerequisites that is newer than target or that does not exist.
@@ -62,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
$(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
rm -f $(depfile); \
mv -f $(dot-target).tmp $(dot-target).cmd, \
- printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
- printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
+ printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+ printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
cat $(depfile) >> $(dot-target).cmd; \
printf '%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 60a94b3e532e4d..17748006681686 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -286,7 +286,7 @@ static int kvp_key_delete(int pool, const __u8 *key, int key_size)
* Found a match; just move the remaining
* entries up.
*/
- if (i == num_records) {
+ if (i == (num_records - 1)) {
kvp_file_info[pool].num_records--;
kvp_update_file(pool);
return 0;
diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/include/asm/alternative-asm.h
index 3a3a0f16456ae3..2a4d1bfa298848 100644
--- a/tools/perf/util/include/asm/alternative-asm.h
+++ b/tools/include/asm/alternative-asm.h
@@ -1,5 +1,5 @@
-#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
-#define _PERF_ASM_ALTERNATIVE_ASM_H
+#ifndef _TOOLS_ASM_ALTERNATIVE_ASM_H
+#define _TOOLS_ASM_ALTERNATIVE_ASM_H
/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 39c38cb45b00f8..358b810057d6dc 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -11,6 +11,11 @@ tools/arch/sparc/include/asm/barrier_32.h
tools/arch/sparc/include/asm/barrier_64.h
tools/arch/tile/include/asm/barrier.h
tools/arch/x86/include/asm/barrier.h
+tools/arch/x86/include/asm/cpufeatures.h
+tools/arch/x86/include/asm/disabled-features.h
+tools/arch/x86/include/asm/required-features.h
+tools/arch/x86/lib/memcpy_64.S
+tools/arch/x86/lib/memset_64.S
tools/arch/xtensa/include/asm/barrier.h
tools/scripts
tools/build
@@ -25,6 +30,7 @@ tools/lib/rbtree.c
tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
+tools/include/asm/alternative-asm.h
tools/include/asm/atomic.h
tools/include/asm/barrier.h
tools/include/asm/bug.h
@@ -65,8 +71,6 @@ include/linux/swab.h
arch/*/include/asm/unistd*.h
arch/*/include/uapi/asm/unistd*.h
arch/*/include/uapi/asm/perf_regs.h
-arch/*/lib/memcpy*.S
-arch/*/lib/memset*.S
include/linux/poison.h
include/linux/hw_breakpoint.h
include/uapi/linux/perf_event.h
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index dcd9a70c7193b4..55933b2eb93248 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -69,10 +69,10 @@ all tags TAGS:
$(make)
#
-# The clean target is not really parallel, don't print the jobs info:
+# Explicitly disable parallelism for the clean target.
#
clean:
- $(make)
+ $(make) -j1
#
# The build-test target is not really parallel, don't print the jobs info:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index b67e006d56cc83..7e0837579f4086 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -310,6 +310,21 @@ export srctree OUTPUT RM CC LD AR CFLAGS V BISON FLEX AWK
include $(srctree)/tools/build/Makefile.include
$(PERF_IN): prepare FORCE
+ @(test -f ../../arch/x86/include/asm/disabled-features.h && ( \
+ (diff -B ../arch/x86/include/asm/disabled-features.h ../../arch/x86/include/asm/disabled-features.h >/dev/null) \
+ || echo "Warning: tools/arch/x86/include/asm/disabled-features.h differs from kernel" >&2 )) || true
+ @(test -f ../../arch/x86/include/asm/required-features.h && ( \
+ (diff -B ../arch/x86/include/asm/required-features.h ../../arch/x86/include/asm/required-features.h >/dev/null) \
+ || echo "Warning: tools/arch/x86/include/asm/required-features.h differs from kernel" >&2 )) || true
+ @(test -f ../../arch/x86/include/asm/cpufeatures.h && ( \
+ (diff -B ../arch/x86/include/asm/cpufeatures.h ../../arch/x86/include/asm/cpufeatures.h >/dev/null) \
+ || echo "Warning: tools/arch/x86/include/asm/cpufeatures.h differs from kernel" >&2 )) || true
+ @(test -f ../../arch/x86/lib/memcpy_64.S && ( \
+ (diff -B ../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memcpy_64.S >/dev/null) \
+ || echo "Warning: tools/arch/x86/lib/memcpy_64.S differs from kernel" >&2 )) || true
+ @(test -f ../../arch/x86/lib/memset_64.S && ( \
+ (diff -B ../arch/x86/lib/memset_64.S ../../arch/x86/lib/memset_64.S >/dev/null) \
+ || echo "Warning: tools/arch/x86/lib/memset_64.S differs from kernel" >&2 )) || true
$(Q)$(MAKE) $(build)=perf
$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 0c370f81e00280..9a53f6e9ef43e8 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -58,9 +58,13 @@ static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
}
/*
- * Check if return address is on the stack.
+ * Check if return address is on the stack. If return address
+ * is in a register (typically R0), it is yet to be saved on
+ * the stack.
*/
- if (nops != 0 || ops != NULL)
+ if ((nops != 0 || ops != NULL) &&
+ !(nops == 1 && ops[0].atom == DW_OP_regx &&
+ ops[0].number2 == 0 && ops[0].offset == 0))
return 0;
/*
@@ -243,10 +247,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
u64 ip;
u64 skip_slot = -1;
- if (chain->nr < 3)
+ if (!chain || chain->nr < 3)
return skip_slot;
- ip = chain->ips[2];
+ ip = chain->ips[1];
thread__find_addr_location(thread, PERF_RECORD_MISC_USER,
MAP__FUNCTION, ip, &al);
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index bbc1a50768dd5d..873f19f1a771e9 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -27,15 +27,16 @@ void arch__elf_sym_adjust(GElf_Sym *sym)
#endif
#endif
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
char *sym = syma->name;
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Skip over any initial dot */
if (*sym == '.')
sym++;
+#endif
/* Avoid "SyS" kernel syscall aliases */
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -46,6 +47,7 @@ int arch__choose_best_symbol(struct symbol *syma,
return SYMBOL_A;
}
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Allow matching against dot variants */
int arch__compare_symbol_names(const char *namea, const char *nameb)
{
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index c53f787675685a..df21da796fa721 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -471,10 +471,21 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
struct perf_evsel *evsel)
{
int err;
+ char c;
if (!evsel)
return 0;
+ /*
+ * If supported, force pass-through config term (pt=1) even if user
+ * sets pt=0, which avoids senseless kernel errors.
+ */
+ if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
+ !(evsel->attr.config & 1)) {
+ pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
+ evsel->attr.config |= 1;
+ }
+
err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
"cyc_thresh", "caps/psb_cyc",
evsel->attr.config);
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index 14e4e668fad733..f97696a418ccdb 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -146,7 +146,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
if (strstr(cpuid, "Intel")) {
kvm->exit_reasons = vmx_exit_reasons;
kvm->exit_reasons_isa = "VMX";
- } else if (strstr(cpuid, "AMD")) {
+ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
kvm->exit_reasons = svm_exit_reasons;
kvm->exit_reasons_isa = "SVM";
} else
diff --git a/tools/perf/bench/mem-memcpy-x86-64-asm.S b/tools/perf/bench/mem-memcpy-x86-64-asm.S
index e4c2c30143b951..9d82c44a6d714b 100644
--- a/tools/perf/bench/mem-memcpy-x86-64-asm.S
+++ b/tools/perf/bench/mem-memcpy-x86-64-asm.S
@@ -1,7 +1,7 @@
#define memcpy MEMCPY /* don't hide glibc's memcpy() */
#define altinstr_replacement text
#define globl p2align 4; .globl
-#include "../../../arch/x86/lib/memcpy_64.S"
+#include "../../arch/x86/lib/memcpy_64.S"
/*
* We need to provide note.GNU-stack section, saying that we want
* NOT executable stack. Otherwise the final linking will assume that
diff --git a/tools/perf/bench/mem-memset-x86-64-asm.S b/tools/perf/bench/mem-memset-x86-64-asm.S
index de278784c866a3..58407aa24c1bfd 100644
--- a/tools/perf/bench/mem-memset-x86-64-asm.S
+++ b/tools/perf/bench/mem-memset-x86-64-asm.S
@@ -1,7 +1,7 @@
#define memset MEMSET /* don't hide glibc's memset() */
#define altinstr_replacement text
#define globl p2align 4; .globl
-#include "../../../arch/x86/lib/memset_64.S"
+#include "../../arch/x86/lib/memset_64.S"
/*
* We need to provide note.GNU-stack section, saying that we want
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index de89ec57436171..b92c952b01ef44 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -200,6 +200,7 @@ CFLAGS += -I$(src-perf)/arch/$(ARCH)/include
CFLAGS += -I$(srctree)/tools/include/
CFLAGS += -I$(srctree)/arch/$(ARCH)/include/uapi
CFLAGS += -I$(srctree)/arch/$(ARCH)/include
+CFLAGS += -I$(srctree)/tools/arch/$(ARCH)/include
CFLAGS += -I$(srctree)/include/uapi
CFLAGS += -I$(srctree)/include
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 83a25cef82fdd2..5cee8a3d0455ce 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -11,29 +11,11 @@
#if defined(__i386__)
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 336
-#endif
-#ifndef __NR_futex
-# define __NR_futex 240
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 224
-#endif
#endif
#if defined(__x86_64__)
#define cpu_relax() asm volatile("rep; nop" ::: "memory");
#define CPUINFO_PROC {"model name"}
-#ifndef __NR_perf_event_open
-# define __NR_perf_event_open 298
-#endif
-#ifndef __NR_futex
-# define __NR_futex 202
-#endif
-#ifndef __NR_gettid
-# define __NR_gettid 186
-#endif
#endif
#ifdef __powerpc__
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 90129accffbe82..4341ed267d4e37 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -29,7 +29,9 @@ static inline unsigned long long rdclock(void)
return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
}
+#ifndef MAX_NR_CPUS
#define MAX_NR_CPUS 1024
+#endif
extern const char *input_name;
extern bool perf_host, perf_guest;
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 1b02cdc0cab69b..84cb5913b05a44 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -205,14 +205,23 @@ from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 790e413d9a1f39..da474d743b6a2e 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -16,7 +16,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | FIELD_IS_SIGNED);
+ is_signed = !!(field->flags & FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index f5bb096c3bd970..bf67343c779575 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -42,6 +42,7 @@ static int session_write_header(char *path)
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
+ perf_header__set_feat(&session->header, HEADER_ARCH);
session->header.data_size += DATA_SIZE;
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index 7f10430af39c3a..4b898b15643dea 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -186,6 +186,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues,
for (i = 0; i < queues->nr_queues; i++) {
list_splice_tail(&queues->queue_array[i].head,
&queue_array[i].head);
+ queue_array[i].tid = queues->queue_array[i].tid;
+ queue_array[i].cpu = queues->queue_array[i].cpu;
+ queue_array[i].set = queues->queue_array[i].set;
queue_array[i].priv = queues->queue_array[i].priv;
}
@@ -1223,9 +1226,9 @@ static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
}
/* padding must be written by fn() e.g. record__process_auxtrace() */
- padding = size & 7;
+ padding = size & (PERF_AUXTRACE_RECORD_ALIGNMENT - 1);
if (padding)
- padding = 8 - padding;
+ padding = PERF_AUXTRACE_RECORD_ALIGNMENT - padding;
memset(&ev, 0, sizeof(ev));
ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index b86f90db1352a6..b6d6ccf630d9e3 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -37,6 +37,9 @@ struct record_opts;
struct auxtrace_info_event;
struct events_stats;
+/* Auxtrace records must have the same alignment as perf event records */
+#define PERF_AUXTRACE_RECORD_ALIGNMENT 8
+
enum auxtrace_type {
PERF_AUXTRACE_UNKNOWN,
PERF_AUXTRACE_INTEL_PT,
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 10af1e7524fbd2..f1aae86f7f6c87 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -124,7 +124,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (!cpu_list)
return cpu_map__read_all_cpu_map();
- if (!isdigit(*cpu_list))
+ /*
+ * must handle the case of empty cpumap to cover
+ * TOPOLOGY header for NUMA nodes with no CPU
+ * ( e.g., because of CPU hotplug)
+ */
+ if (!isdigit(*cpu_list) && *cpu_list != '\0')
goto out;
while (isdigit(*cpu_list)) {
@@ -171,8 +176,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
+ else if (*cpu_list != '\0')
cpus = cpu_map__default_new();
+ else
+ cpus = cpu_map__dummy_new();
invalid:
free(tmp_cpus);
out:
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index 425df5c86c9c9a..42559718667735 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -249,6 +249,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
(strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
(strncmp(name, "[vdso]", 6) == 0) ||
+ (strncmp(name, "[vdso32]", 8) == 0) ||
+ (strncmp(name, "[vdsox32]", 9) == 0) ||
(strncmp(name, "[vsyscall]", 10) == 0)) {
m->kmod = false;
diff --git a/tools/perf/util/include/asm/unistd_32.h b/tools/perf/util/include/asm/unistd_32.h
deleted file mode 100644
index 8b137891791fe9..00000000000000
--- a/tools/perf/util/include/asm/unistd_32.h
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/tools/perf/util/include/asm/unistd_64.h b/tools/perf/util/include/asm/unistd_64.h
deleted file mode 100644
index 8b137891791fe9..00000000000000
--- a/tools/perf/util/include/asm/unistd_64.h
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 0b540b84f8b799..62b38f2ff60d28 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -26,6 +26,7 @@
#include "../cache.h"
#include "../util.h"
+#include "../auxtrace.h"
#include "intel-pt-insn-decoder.h"
#include "intel-pt-pkt-decoder.h"
@@ -111,6 +112,7 @@ struct intel_pt_decoder {
bool have_cyc;
bool fixup_last_mtc;
bool have_last_ip;
+ enum intel_pt_param_flags flags;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -213,6 +215,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
decoder->data = params->data;
decoder->return_compression = params->return_compression;
+ decoder->flags = params->flags;
+
decoder->period = params->period;
decoder->period_type = params->period_type;
@@ -234,19 +238,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
decoder->tsc_ctc_ratio_d;
-
- /*
- * Allow for timestamps appearing to backwards because a TSC
- * packet has slipped past a MTC packet, so allow 2 MTC ticks
- * or ...
- */
- decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
- decoder->tsc_ctc_ratio_n,
- decoder->tsc_ctc_ratio_d);
}
- /* ... or 0x100 paranoia */
- if (decoder->tsc_slip < 0x100)
- decoder->tsc_slip = 0x100;
+
+ /*
+ * A TSC packet can slip past MTC packets so that the timestamp appears
+ * to go backwards. One estimate is that can be up to about 40 CPU
+ * cycles, which is certainly less than 0x1000 TSC ticks, but accept
+ * slippage an order of magnitude more to be on the safe side.
+ */
+ decoder->tsc_slip = 0x10000;
intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
@@ -1010,6 +1010,15 @@ out_no_progress:
return err;
}
+static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
+ struct intel_pt_insn *intel_pt_insn,
+ uint64_t ip, int err)
+{
+ return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
+ intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
+ ip == decoder->ip + intel_pt_insn->length;
+}
+
static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
{
struct intel_pt_insn intel_pt_insn;
@@ -1022,7 +1031,8 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
if (err == INTEL_PT_RETURN)
return 0;
- if (err == -EAGAIN) {
+ if (err == -EAGAIN ||
+ intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
if (decoder->set_fup_tx_flags) {
decoder->set_fup_tx_flags = false;
decoder->tx_flags = decoder->fup_tx_flags;
@@ -1032,7 +1042,7 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
decoder->state.flags = decoder->fup_tx_flags;
return 0;
}
- return err;
+ return -EAGAIN;
}
decoder->set_fup_tx_flags = false;
if (err)
@@ -1268,8 +1278,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
{
intel_pt_log("ERROR: Buffer overflow\n");
intel_pt_clear_tx_flags(decoder);
- decoder->have_tma = false;
- decoder->cbr = 0;
decoder->timestamp_insn_cnt = 0;
decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
decoder->overflow = true;
@@ -1487,7 +1495,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
case INTEL_PT_PSB:
case INTEL_PT_TSC:
case INTEL_PT_TMA:
- case INTEL_PT_CBR:
case INTEL_PT_MODE_TSX:
case INTEL_PT_BAD:
case INTEL_PT_PSBEND:
@@ -1496,6 +1503,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
decoder->pkt_step = 0;
return -ENOENT;
+ case INTEL_PT_CBR:
+ intel_pt_calc_cbr(decoder);
+ break;
+
case INTEL_PT_OVF:
return intel_pt_overflow(decoder);
@@ -2306,6 +2317,34 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
}
}
+#define MAX_PADDING (PERF_AUXTRACE_RECORD_ALIGNMENT - 1)
+
+/**
+ * adj_for_padding - adjust overlap to account for padding.
+ * @buf_b: second buffer
+ * @buf_a: first buffer
+ * @len_a: size of first buffer
+ *
+ * @buf_a might have up to 7 bytes of padding appended. Adjust the overlap
+ * accordingly.
+ *
+ * Return: A pointer into @buf_b from where non-overlapped data starts
+ */
+static unsigned char *adj_for_padding(unsigned char *buf_b,
+ unsigned char *buf_a, size_t len_a)
+{
+ unsigned char *p = buf_b - MAX_PADDING;
+ unsigned char *q = buf_a + len_a - MAX_PADDING;
+ int i;
+
+ for (i = MAX_PADDING; i; i--, p++, q++) {
+ if (*p != *q)
+ break;
+ }
+
+ return p;
+}
+
/**
* intel_pt_find_overlap_tsc - determine start of non-overlapped trace data
* using TSC.
@@ -2356,8 +2395,11 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
/* Same TSC, so buffers are consecutive */
if (!cmp && rem_b >= rem_a) {
+ unsigned char *start;
+
*consecutive = true;
- return buf_b + len_b - (rem_b - rem_a);
+ start = buf_b + len_b - (rem_b - rem_a);
+ return adj_for_padding(start, buf_a, len_a);
}
if (cmp < 0)
return buf_b; /* tsc_a < tsc_b => no overlap */
@@ -2420,7 +2462,7 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
found = memmem(buf_a, len_a, buf_b, len_a);
if (found) {
*consecutive = true;
- return buf_b + len_a;
+ return adj_for_padding(buf_b + len_a, buf_a, len_a);
}
/* Try again at next PSB in buffer 'a' */
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
index 89a3eda6a3183c..e420bd3be15946 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
@@ -53,6 +53,14 @@ enum {
INTEL_PT_ERR_MAX,
};
+enum intel_pt_param_flags {
+ /*
+ * FUP packet can contain next linear instruction pointer instead of
+ * current linear instruction pointer.
+ */
+ INTEL_PT_FUP_WITH_NLIP = 1 << 0,
+};
+
struct intel_pt_state {
enum intel_pt_sample_type type;
int err;
@@ -91,6 +99,7 @@ struct intel_pt_params {
unsigned int mtc_period;
uint32_t tsc_ctc_ratio_n;
uint32_t tsc_ctc_ratio_d;
+ enum intel_pt_param_flags flags;
};
struct intel_pt_decoder;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index 7528ae4f7e28e1..e5c6caf913f3ea 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -281,7 +281,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
if (len < offs)
return INTEL_PT_NEED_MORE_BYTES;
byte = buf[offs++];
- payload |= (byte >> 1) << shift;
+ payload |= ((uint64_t)byte >> 1) << shift;
}
packet->type = INTEL_PT_CYC;
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 3693cb26ec661f..c8f2d084a8ce3d 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -676,6 +676,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
unsigned int queue_nr)
{
struct intel_pt_params params = { .get_trace = 0, };
+ struct perf_env *env = pt->machine->env;
struct intel_pt_queue *ptq;
ptq = zalloc(sizeof(struct intel_pt_queue));
@@ -753,6 +754,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
}
}
+ if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
+ params.flags |= INTEL_PT_FUP_WITH_NLIP;
+
ptq->decoder = intel_pt_decoder_new(&params);
if (!ptq->decoder)
goto out_free;
@@ -1246,6 +1250,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
switch (ptq->switch_state) {
+ case INTEL_PT_SS_NOT_TRACING:
case INTEL_PT_SS_UNKNOWN:
case INTEL_PT_SS_EXPECTING_SWITCH_IP:
err = intel_pt_next_tid(pt, ptq);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 00724d496d38c4..62f6d7dc2ddae2 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -254,16 +254,16 @@ static const char *kinc_fetch_script =
"#!/usr/bin/env sh\n"
"if ! test -d \"$KBUILD_DIR\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"TMPDIR=`mktemp -d`\n"
"if test -z \"$TMPDIR\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"cat << EOF > $TMPDIR/Makefile\n"
"obj-y := dummy.o\n"
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index e81dfb2e239cce..9351738df70394 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1903,7 +1903,7 @@ restart:
if (!name_only && strlen(syms->alias))
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
else
- strncpy(name, syms->symbol, MAX_NAME_LEN);
+ strlcpy(name, syms->symbol, MAX_NAME_LEN);
evt_list[evt_i] = strdup(name);
if (evt_list[evt_i] == NULL)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 593066c68e3da3..4f650ebd564a86 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -100,7 +100,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
char path[PATH_MAX];
const char *lc;
- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -147,7 +147,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
ssize_t sret;
int fd;
- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -177,7 +177,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -195,7 +195,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index eec6c1149f4475..132878d4847a87 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -333,7 +333,7 @@ static char *cpu_model(void)
if (file) {
while (fgets(buf, 255, file)) {
if (strstr(buf, "model name")) {
- strncpy(cpu_m, &buf[13], 255);
+ strlcpy(cpu_m, &buf[13], 255);
break;
}
}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 7c97ecaeae484c..2070c02de3af5a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -74,6 +74,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+ return GELF_ST_VISIBILITY(sym->st_other);
+}
+
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
@@ -98,7 +103,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
+ sym->st_shndx != SHN_ABS &&
+ elf_sym__visibility(sym) != STV_HIDDEN &&
+ elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type)
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index d995743cb673e7..58ce62088a3920 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -507,12 +507,14 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
"/tmp/perf-XXXXXX");
if (!mkstemp(tdata->temp_file)) {
pr_debug("Can't make temp file");
+ free(tdata);
return NULL;
}
temp_fd = open(tdata->temp_file, O_RDWR);
if (temp_fd < 0) {
pr_debug("Can't read '%s'", tdata->temp_file);
+ free(tdata);
return NULL;
}
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index b67a0ccf5ab949..23baee7b786aa6 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -334,9 +334,12 @@ static int read_event_files(struct pevent *pevent)
for (x=0; x < count; x++) {
size = read8(pevent);
ret = read_event_file(pevent, sys, size);
- if (ret)
+ if (ret) {
+ free(sys);
return ret;
+ }
}
+ free(sys);
}
return 0;
}
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 60edec383281f0..bf5ee8906fb270 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -41,13 +41,13 @@ static int __report_module(struct addr_location *al, u64 ip,
Dwarf_Addr s;
dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
- if (s != al->map->start)
+ if (s != al->map->start - al->map->pgoff)
mod = 0;
}
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
- dso->long_name, -1, al->map->start,
+ (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
false);
return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 64bf71fb6181df..bd71b7b1e5f9b6 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -734,9 +734,7 @@ void format_all_counters(struct thread_data *t, struct core_data *c, struct pkg_
if (!printed || !summary_only)
print_header();
- if (topo.num_cpus > 1)
- format_counters(&average.threads, &average.cores,
- &average.packages);
+ format_counters(&average.threads, &average.cores, &average.packages);
printed = 1;
@@ -3193,7 +3191,9 @@ void process_cpuid()
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
- if (family == 6 || family == 0xf)
+ if (family == 0xf)
+ family += (fms >> 20) & 0xff;
+ if (family >= 6)
model += ((fms >> 16) & 0xf) << 4;
if (debug) {
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 19edc1a7a23226..7ea4438b801ddb 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -92,3 +92,5 @@ ifneq ($(silent),1)
QUIET_INSTALL = @printf ' INSTALL %s\n' $1;
endif
endif
+
+pound := \#
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 00000000000000..4e151f1005b2b2
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 00000000000000..3b1f45e13a2e7f
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
new file mode 100644
index 00000000000000..88e6c3f4300662
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-syntax.tc
@@ -0,0 +1,80 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test synthetic_events syntax parser
+
+do_reset() {
+ reset_trigger
+ echo > set_event
+ clear_trace
+}
+
+fail() { #msg
+ do_reset
+ echo $1
+ exit_fail
+}
+
+if [ ! -f set_event ]; then
+ echo "event tracing is not supported"
+ exit_unsupported
+fi
+
+if [ ! -f synthetic_events ]; then
+ echo "synthetic event is not supported"
+ exit_unsupported
+fi
+
+reset_tracer
+do_reset
+
+echo "Test synthetic_events syntax parser"
+
+echo > synthetic_events
+
+# synthetic event must have a field
+! echo "myevent" >> synthetic_events
+echo "myevent u64 var1" >> synthetic_events
+
+# synthetic event must be found in synthetic_events
+grep "myevent[[:space:]]u64 var1" synthetic_events
+
+# it is not possible to add same name event
+! echo "myevent u64 var2" >> synthetic_events
+
+# Non-append open will cleanup all events and add new one
+echo "myevent u64 var2" > synthetic_events
+
+# multiple fields with different spaces
+echo "myevent u64 var1; u64 var2;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ; u64 var2 ;" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+echo "myevent u64 var1 ;u64 var2" > synthetic_events
+grep "myevent[[:space:]]u64 var1; u64 var2" synthetic_events
+
+# test field types
+echo "myevent u32 var" > synthetic_events
+echo "myevent u16 var" > synthetic_events
+echo "myevent u8 var" > synthetic_events
+echo "myevent s64 var" > synthetic_events
+echo "myevent s32 var" > synthetic_events
+echo "myevent s16 var" > synthetic_events
+echo "myevent s8 var" > synthetic_events
+
+echo "myevent char var" > synthetic_events
+echo "myevent int var" > synthetic_events
+echo "myevent long var" > synthetic_events
+echo "myevent pid_t var" > synthetic_events
+
+echo "myevent unsigned char var" > synthetic_events
+echo "myevent unsigned int var" > synthetic_events
+echo "myevent unsigned long var" > synthetic_events
+grep "myevent[[:space:]]unsigned long var" synthetic_events
+
+# test string type
+echo "myevent char var[10]" > synthetic_events
+grep "myevent[[:space:]]char\[10\] var" synthetic_events
+
+do_reset
+
+exit 0
diff --git a/Documentation/networking/timestamping/.gitignore b/tools/testing/selftests/networking/timestamping/.gitignore
index 9e69e982fb382b..9e69e982fb382b 100644
--- a/Documentation/networking/timestamping/.gitignore
+++ b/tools/testing/selftests/networking/timestamping/.gitignore
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile
new file mode 100644
index 00000000000000..ccbb9edbbbb9a6
--- /dev/null
+++ b/tools/testing/selftests/networking/timestamping/Makefile
@@ -0,0 +1,8 @@
+TEST_PROGS := hwtstamp_config timestamping txtimestamp
+
+all: $(TEST_PROGS)
+
+include ../../lib.mk
+
+clean:
+ rm -fr $(TEST_PROGS)
diff --git a/Documentation/networking/timestamping/hwtstamp_config.c b/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
index e8b685a7f15fc3..e8b685a7f15fc3 100644
--- a/Documentation/networking/timestamping/hwtstamp_config.c
+++ b/tools/testing/selftests/networking/timestamping/hwtstamp_config.c
diff --git a/Documentation/networking/timestamping/timestamping.c b/tools/testing/selftests/networking/timestamping/timestamping.c
index 5cdfd743447b72..5cdfd743447b72 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/tools/testing/selftests/networking/timestamping/timestamping.c
diff --git a/Documentation/networking/timestamping/txtimestamp.c b/tools/testing/selftests/networking/timestamping/txtimestamp.c
index 5df07047ca8660..5df07047ca8660 100644
--- a/Documentation/networking/timestamping/txtimestamp.c
+++ b/tools/testing/selftests/networking/timestamping/txtimestamp.c
diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
index f45cee80c58bcc..af2b1e66e35e6d 100644
--- a/tools/testing/selftests/powerpc/harness.c
+++ b/tools/testing/selftests/powerpc/harness.c
@@ -85,13 +85,13 @@ wait:
return status;
}
-static void alarm_handler(int signum)
+static void sig_handler(int signum)
{
- /* Jut wake us up from waitpid */
+ /* Just wake us up from waitpid */
}
-static struct sigaction alarm_action = {
- .sa_handler = alarm_handler,
+static struct sigaction sig_action = {
+ .sa_handler = sig_handler,
};
int test_harness(int (test_function)(void), char *name)
@@ -101,8 +101,14 @@ int test_harness(int (test_function)(void), char *name)
test_start(name);
test_set_git_version(GIT_VERSION);
- if (sigaction(SIGALRM, &alarm_action, NULL)) {
- perror("sigaction");
+ if (sigaction(SIGINT, &sig_action, NULL)) {
+ perror("sigaction (sigint)");
+ test_error(name);
+ return 1;
+ }
+
+ if (sigaction(SIGALRM, &sig_action, NULL)) {
+ perror("sigaction (sigalrm)");
test_error(name);
return 1;
}
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
index 6ccb154cb4aa4f..22f8df1ad7d484 100755
--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
+++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
@@ -7,13 +7,16 @@
#
# Released under the terms of the GPL v2.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./common_tests
if [ -e $REBOOT_FLAG ]; then
rm $REBOOT_FLAG
else
prlog "pstore_crash_test has not been executed yet. we skip further tests."
- exit 0
+ exit $ksft_skip
fi
prlog -n "Mounting pstore filesystem ... "
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
index 882fe83a355442..b3f345433ec727 100644
--- a/tools/testing/selftests/seccomp/seccomp_bpf.c
+++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
@@ -1476,15 +1476,19 @@ TEST_F(TRACE_syscall, syscall_dropped)
#define SECCOMP_SET_MODE_FILTER 1
#endif
-#ifndef SECCOMP_FLAG_FILTER_TSYNC
-#define SECCOMP_FLAG_FILTER_TSYNC 1
+#ifndef SECCOMP_FILTER_FLAG_TSYNC
+#define SECCOMP_FILTER_FLAG_TSYNC (1UL << 0)
+#endif
+
+#ifndef SECCOMP_FILTER_FLAG_SPEC_ALLOW
+#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#endif
#ifndef seccomp
-int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter)
+int seccomp(unsigned int op, unsigned int flags, void *args)
{
errno = 0;
- return syscall(__NR_seccomp, op, flags, filter);
+ return syscall(__NR_seccomp, op, flags, args);
}
#endif
@@ -1576,6 +1580,78 @@ TEST(seccomp_syscall_mode_lock)
}
}
+/*
+ * Test detection of known and unknown filter flags. Userspace needs to be able
+ * to check if a filter flag is supported by the current kernel and a good way
+ * of doing that is by attempting to enter filter mode, with the flag bit in
+ * question set, and a NULL pointer for the _args_ parameter. EFAULT indicates
+ * that the flag is valid and EINVAL indicates that the flag is invalid.
+ */
+TEST(detect_seccomp_filter_flags)
+{
+ unsigned int flags[] = { SECCOMP_FILTER_FLAG_TSYNC,
+ SECCOMP_FILTER_FLAG_SPEC_ALLOW };
+ unsigned int flag, all_flags;
+ int i;
+ long ret;
+
+ /* Test detection of known-good filter flags */
+ for (i = 0, all_flags = 0; i < ARRAY_SIZE(flags); i++) {
+ int bits = 0;
+
+ flag = flags[i];
+ /* Make sure the flag is a single bit! */
+ while (flag) {
+ if (flag & 0x1)
+ bits ++;
+ flag >>= 1;
+ }
+ ASSERT_EQ(1, bits);
+ flag = flags[i];
+
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ ASSERT_NE(ENOSYS, errno) {
+ TH_LOG("Kernel does not support seccomp syscall!");
+ }
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EFAULT, errno) {
+ TH_LOG("Failed to detect that a known-good filter flag (0x%X) is supported!",
+ flag);
+ }
+
+ all_flags |= flag;
+ }
+
+ /* Test detection of all known-good filter flags */
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, all_flags, NULL);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EFAULT, errno) {
+ TH_LOG("Failed to detect that all known-good filter flags (0x%X) are supported!",
+ all_flags);
+ }
+
+ /* Test detection of an unknown filter flag */
+ flag = -1;
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EINVAL, errno) {
+ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported!",
+ flag);
+ }
+
+ /*
+ * Test detection of an unknown filter flag that may simply need to be
+ * added to this test
+ */
+ flag = flags[ARRAY_SIZE(flags) - 1] << 1;
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, flag, NULL);
+ EXPECT_EQ(-1, ret);
+ EXPECT_EQ(EINVAL, errno) {
+ TH_LOG("Failed to detect that an unknown filter flag (0x%X) is unsupported! Does a new flag need to be added to this test?",
+ flag);
+ }
+}
+
TEST(TSYNC_first)
{
struct sock_filter filter[] = {
@@ -1592,7 +1668,7 @@ TEST(TSYNC_first)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
@@ -1810,7 +1886,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor)
self->sibling_count++;
}
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Could install filter on all threads!");
@@ -1871,7 +1947,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter)
TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!");
}
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_NE(ENOSYS, errno) {
TH_LOG("Kernel does not support seccomp syscall!");
@@ -1919,7 +1995,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence)
self->sibling_count++;
}
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(self->sibling[0].system_tid, ret) {
TH_LOG("Did not fail on diverged sibling.");
@@ -1971,7 +2047,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!");
}
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(ret, self->sibling[0].system_tid) {
TH_LOG("Did not fail on diverged sibling.");
@@ -2000,7 +2076,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
/* Switch to the remaining sibling */
sib = !sib;
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret) {
TH_LOG("Expected the remaining sibling to sync");
@@ -2023,7 +2099,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter)
while (!kill(self->sibling[sib].system_tid, 0))
sleep(0.1);
- ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC,
+ ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC,
&self->apply_prog);
ASSERT_EQ(0, ret); /* just us chickens */
}
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
index 1261e3fa1e3a85..5bba7796fb34ce 100755
--- a/tools/testing/selftests/static_keys/test_static_keys.sh
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -1,6 +1,19 @@
#!/bin/sh
# Runs static keys kernel module tests
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+ echo "static_key: module test_static_key_base is not found [SKIP]"
+ exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+ echo "static_key: module test_static_keys is not found [SKIP]"
+ exit $ksft_skip
+fi
+
if /sbin/modprobe -q test_static_key_base; then
if /sbin/modprobe -q test_static_keys; then
echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644
index 00000000000000..1ab7e8130db246
--- /dev/null
+++ b/tools/testing/selftests/sync/config
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index 30906bfd9c1b58..0ab937a17ebb32 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -146,6 +146,11 @@ int main(int argv, char **argc)
printf(" %lld.%i(act)", ppm/1000, abs((int)(ppm%1000)));
if (llabs(eppm - ppm) > 1000) {
+ if (tx1.offset || tx2.offset ||
+ tx1.freq != tx2.freq || tx1.tick != tx2.tick) {
+ printf(" [SKIP]\n");
+ return ksft_exit_skip("The clock was adjusted externally. Shutdown NTPd or other time sync daemons\n");
+ }
printf(" [FAILED]\n");
return ksft_exit_fail();
}
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
index 350107f40c1d52..0409270f998c1e 100755
--- a/tools/testing/selftests/user/test_user_copy.sh
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -1,6 +1,13 @@
#!/bin/sh
# Runs copy_to/from_user infrastructure using test_user_copy kernel module
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+ echo "user: module test_user_copy is not found [SKIP]"
+ exit $ksft_skip
+fi
if /sbin/modprobe -q test_user_copy; then
/sbin/modprobe -q -r test_user_copy
echo "user_copy: ok"
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index b5aa1bab741639..97ad2d40324a3e 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -456,19 +456,38 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
greg_t req = requested_regs[i], res = resulting_regs[i];
if (i == REG_TRAPNO || i == REG_IP)
continue; /* don't care */
- if (i == REG_SP) {
- printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
- (unsigned long long)res);
+ if (i == REG_SP) {
/*
- * In many circumstances, the high 32 bits of rsp
- * are zeroed. For example, we could be a real
- * 32-bit program, or we could hit any of a number
- * of poorly-documented IRET or segmented ESP
- * oddities. If this happens, it's okay.
+ * If we were using a 16-bit stack segment, then
+ * the kernel is a bit stuck: IRET only restores
+ * the low 16 bits of ESP/RSP if SS is 16-bit.
+ * The kernel uses a hack to restore bits 31:16,
+ * but that hack doesn't help with bits 63:32.
+ * On Intel CPUs, bits 63:32 end up zeroed, and, on
+ * AMD CPUs, they leak the high bits of the kernel
+ * espfix64 stack pointer. There's very little that
+ * the kernel can do about it.
+ *
+ * Similarly, if we are returning to a 32-bit context,
+ * the CPU will often lose the high 32 bits of RSP.
*/
- if (res == (req & 0xFFFFFFFF))
- continue; /* OK; not expected to work */
+
+ if (res == req)
+ continue;
+
+ if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+ printf("[NOTE]\tSP: %llx -> %llx\n",
+ (unsigned long long)req,
+ (unsigned long long)res);
+ continue;
+ }
+
+ printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+ (unsigned long long)requested_regs[i],
+ (unsigned long long)resulting_regs[i]);
+ nerrs++;
+ continue;
}
bool ignore_reg = false;
@@ -507,13 +526,6 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
}
if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
- /*
- * SP is particularly interesting here. The
- * usual cause of failures is that we hit the
- * nasty IRET case of returning to a 16-bit SS,
- * in which case bits 16:31 of the *kernel*
- * stack pointer persist in ESP.
- */
printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
i, (unsigned long long)requested_regs[i],
(unsigned long long)resulting_regs[i]);
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 683a292e329015..9399c4aeaa265a 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -1,6 +1,9 @@
#!/bin/bash
TCID="zram.sh"
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./zram_lib.sh
run_zram () {
@@ -23,5 +26,5 @@ elif [ -b /dev/zram0 ]; then
else
echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
echo "$TCID : CONFIG_ZRAM is not set"
- exit 1
+ exit $ksft_skip
fi
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index f6a9c73e7a442e..9e73a4fb9b0aa9 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -18,6 +18,9 @@ MODULE=0
dev_makeswap=-1
dev_mounted=-1
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
trap INT
check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
if [ $uid -ne 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
}
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 88d5e71be0449f..47dfa0b0fcd714 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
/******************** Little Endian Handling ********************************/
-#define cpu_to_le16(x) htole16(x)
-#define cpu_to_le32(x) htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x) (x)
+#define cpu_to_le32(x) (x)
+#else
+#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x) \
+ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
+ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
+#endif
+
#define le32_to_cpu(x) le32toh(x)
#define le16_to_cpu(x) le16toh(x)
-
/******************** Messages and Errors ***********************************/
static const char argv0[] = "ffs-test";
diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c
index 9db9d21bb2ecee..6a8db858caa5f3 100644
--- a/tools/usb/usbip/src/usbip_detach.c
+++ b/tools/usb/usbip/src/usbip_detach.c
@@ -43,7 +43,7 @@ void usbip_detach_usage(void)
static int detach_port(char *port)
{
- int ret;
+ int ret = 0;
uint8_t portnum;
char path[PATH_MAX+1];
@@ -73,9 +73,12 @@ static int detach_port(char *port)
}
ret = usbip_vhci_detach_device(portnum);
- if (ret < 0)
- return -1;
+ if (ret < 0) {
+ ret = -1;
+ goto call_driver_close;
+ }
+call_driver_close:
usbip_vhci_driver_close();
return ret;
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 5a6016224bb9c9..c7fcc84fc0c09d 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -150,12 +150,6 @@ static const char * const page_flag_names[] = {
};
-static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug",
- "/debug",
- 0,
-};
-
/*
* data structures
*/
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index 499b8819d4c6ab..5173a191cd03c6 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -29,8 +29,8 @@ struct slabinfo {
int alias;
int refs;
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
- int hwcache_align, object_size, objs_per_slab;
- int sanity_checks, slab_size, store_user, trace;
+ unsigned int hwcache_align, object_size, objs_per_slab;
+ unsigned int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 5d10f104f3ebd0..964df643509dd2 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -821,7 +821,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_io_device *iodev = container_of(this,
struct vgic_io_device, dev);
- struct kvm_run *run = vcpu->run;
const struct vgic_io_range *range;
struct kvm_exit_mmio mmio;
bool updated_state;
@@ -850,12 +849,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
updated_state = false;
}
spin_unlock(&dist->lock);
- run->mmio.is_write = is_write;
- run->mmio.len = len;
- run->mmio.phys_addr = addr;
- memcpy(run->mmio.data, val, len);
-
- kvm_handle_mmio_return(vcpu, run);
if (updated_state)
vgic_kick_vcpus(vcpu->kvm);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 4f70d12e392d3d..eddce59986ee58 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -80,7 +80,7 @@ static void async_pf_execute(struct work_struct *work)
might_sleep();
- get_user_pages_unlocked(NULL, mm, addr, 1, 1, 0, NULL);
+ get_user_pages_unlocked(NULL, mm, addr, 1, NULL, FOLL_WRITE);
kvm_async_page_present_sync(vcpu, apf);
spin_lock(&vcpu->async_pf.lock);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 49001fa84ead8d..f509cfd37db55b 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, shutdown);
+ struct kvm *kvm = irqfd->kvm;
u64 cnt;
+ /* Make sure irqfd has been initalized in assign path. */
+ synchronize_srcu(&kvm->irq_srcu);
+
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_update(kvm, irqfd);
- srcu_read_unlock(&kvm->irq_srcu, idx);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (events & POLLIN)
schedule_work(&irqfd->inject);
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the POLLHUP
- */
- fdput(f);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
@@ -419,6 +417,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
irqfd->consumer.token, ret);
#endif
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the POLLHUP
+ */
+ fdput(f);
return 0;
fail:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 66c3ab86cff922..3e61292bfc1932 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1352,10 +1352,15 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(&current->mm->mmap_sem);
- } else
+ } else {
+ unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON;
+
+ if (write_fault)
+ flags |= FOLL_WRITE;
+
npages = __get_user_pages_unlocked(current, current->mm, addr, 1,
- write_fault, 0, page,
- FOLL_TOUCH|FOLL_HWPOISON);
+ page, flags);
+ }
if (npages != 1)
return npages;
@@ -2655,6 +2660,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
{
struct kvm_device *dev = filp->private_data;
+ if (dev->kvm->mm != current->mm)
+ return -EIO;
+
switch (ioctl) {
case KVM_SET_DEVICE_ATTR:
return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
@@ -2755,14 +2763,15 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
return ret;
}
+ kvm_get_kvm(kvm);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
+ kvm_put_kvm(kvm);
ops->destroy(dev);
return ret;
}
list_add(&dev->vm_node, &kvm->devices);
- kvm_get_kvm(kvm);
cd->fd = ret;
return 0;
}